diff --git a/.dict-speechbrain.txt b/.dict-speechbrain.txt new file mode 100644 index 0000000000..88541bd245 --- /dev/null +++ b/.dict-speechbrain.txt @@ -0,0 +1,1199 @@ +## Contents ## + +### Common Words for use in Compound Words ### +### Compound Words With 1 or 2 letter Words ### +### Jargon ### +### Names ### +Houlsby +### British ### +### Non-English ### + +####### Common Words for use in Compound Words ####### +acoustics +bar +csv +dummy +estimator +file +for +form +forms +function +hat +image +inter +intra +labels +max +min +mini +mix +path +range +speech +test +writer +your + +####### Compound Words With 1 or 2 letter Words ####### +aMax +aRange +asArray +asType +atleast +BackW +bFloat +bname +bNorm +bSampler +bSize +byLoss +bySource +checkIs +cleanID +cMap +distP +distQ +dLoss +dModel +dScore +dSet +dType +eMax +fBank +fBanks +fileID +fOut +fPath +fStream +gNoise +gOpen +hcand +hCat +hHat +hStack +idword +inProceedings +isEmpty +isGenerator +isreal +istop +itTop +Kmax +kMeans +kNeighbors +kWeight +lLoss +MANHATTANINJANUARY +maskL +maskR +mDay +modelFt +mvNorm +mychannel +myFile +myFormat +myRecipe +nArray +nChannels +nCols +nElement +nEvent +nFolds +nMap +nMax +nMixtures +nNodes +nodeID +noHash +noProgress +noRecurseDirs +noSignatures +nRef +nRows +nSamples +nSub +oClock +oneOf +pBar +pName +pRange +pushTo +qWeight +rArrow +rFilename +rTend +RUNNERUP +sBatch +sControl +skipIf +sLonger +sourceL +sourceR +sprintF +sType +textid +tGap +tMax +tMin +toArray +toList +transformerLM +tSeg +tSet +tSize +tStart +tStop +tZip +uRandom +vBias +vDim +vHead +vWeight +wDay +wGap +whamr +whats +Wmax +xAxis +xHat +xLabel +xLim +xMix +xScores +Xspec +xTick +xTicks +xVect +xVector +xVectors +xVects +yAxis +yDay +yLabel +yLim +yMat +yTickLabels +yTicks +Zpad + +####### Jargon ####### +aabbbb +accum +accumarray +accumulatable +acorr +activ +activlev +adsp +adspvqe +AEIOUÁÉÍÓÚ +ÁÉÍÓÚáéíóú +aiox +alffa +alived +annot +ans +arpa +arpack +arxiv +asind +attns +audiomnist +averager +awgn +bbcc +biquad +biquads +bleu +blmf +blstm +blunkett +bmbf +bmlf +bmlh +brir +cafile +cand +cant +catl +catr +cbak +cbaks +certifi +cffinit +cfgs +chans +cheby +cheybyshev +childs +chkarada +chnl +chnls +chrs +cipic +CKPT +ckpts +clsname +clstm +clust +cnrst +coef +coefs +coer +colab +complexlstm +complexrnn +concated +consideree +convblocks +convenc +convolutional +convs +convtasnet +convtranspose +couldnt +covl +covls +cpulm +CRDNN +crnn +csgraph +csgu +csig +csigs +csvf +csvs +cudatoolkit +cudnn +cver +cycliclrloader +cycliclrsaver +datafreqs +dataio +datio +dawalatn +dblp +dbstop +dcconv +dclassifier +dcnn +ddpm +ddwkim +dels +demixing +denoised +denoises +denoising +denorm +denormalize +denormalized +dependee +depod +dequantized +dereverb +dereverberation +ders +determinize +detokenization +detokenize +detokenized +detokenizer +detokenizes +dets +devel +dfcn +dffn +dfilters +diar +diarization +diarize +diarizes +Diarizing +dictify +diDataset +didatasets +didnt +disambig +discretized +discretizes +dnns +dnsmos +dnsmsos +doas +docherty +dprnn +drawio +dualpathrnn +durs +dvoice +dwfst +dynbatch +dynchunktrain +eder +eend +eess +Eigenvoice +eigh +eigsh +elbo +elems +ellip +elra +embs +emiss +emoid +emovdb +encodec +enhc +epaca +estoi +ests +etal +evals +evaluatable +evecs +falarm +fftby +fftconvolve +ffts +filt +finfo +finv +fitzooth +flac +fltp +fo +foos +fpr +freqs +fro +fromx +fsa's +fsas +fstaddselfloops +funcs +funct +fwhm +fwhms +fzero +galc +gelu +genbmm +gevd +ginv +gloo +glorot +gndr +gptmodel +gpulm +groakat +hann +hhpf +hibs +hifi +hinne +hlpf +hparam +hparams +hpfit +hpopt +hrtf +hyperparam +hyperparams +hyperpyyaml +hyps +icassp +icml +idcs +idxs +iemocap +iemocapie +ifft +iloc +impr +imshow +inclusivity +indcs +indi +inds +indx +indxs +initialising +inpfid +inpricey +inpt +insig +iowait +ipdb +iref +irfft +irit +isdst +isft +issn +isspmatrix +isstruct +issubseq +ISTFT +isys +iterrows +ivar +iwbeg +iwend +iwslt +jasonfu +jitify +jlcorpus +jsonl +jsonlines +jspeech +kaldi +Kaldi's +kaldilm +karpathy's +kbit +kbps +kdim +keepmodidx +keepsegidx +keepsegs +keyfuncs +kldiv +klen +kmean +Kpad +kspon +kwags +kwonlyargs +labse +langengullís +lbrain +ldir +lemma +lemmatize +lerp +levdb +libeigen +libnvvm +LIBRI +libritts +Libry +libsndfile +licenced +ljspeech +lmctc +lmnt +lmplz +logit +logprobs +lpcoeff +lpparams +lrec +lrelu +lstm +madda +mathjax +matvec +matvecmul +maxfilt +maxvecmul +mbart +mcgregor +mels +metafname +mfcc +mfccs +mhaxl +mhsa +mimo +miniters +misversioned +mlps +mnist +modelize +modelizes +modernisation +msed +mseg +mstacotron +mulaw +mult +multiwoz +mvdr +mvec +mwoz +myrir +nans +nargin +nargout +nbest +nbin +nccl +ncor +nd +ndarray +ndim +ndims +negs +nelems +nerr +nesterov +Neuro +nfft +ngram +ngramlm +ngrams +nhead +nikvaessen +nllb +nmfbrain +nmfdictionary +nmfencoder +nnet +noisifier +nonl +noqa +nproc +nprocs +nsamp +nspk +nspks +nsys +ntasks +numbapro +numel +numlayers +numpy +nums +numspks +nvvm +nwerr +NYU's +oclock +oemax +oen +Omniglot +onnxruntime +onwsj +openfst +openrir +optim +osama +ot +ovrl +paedophiles +parametrizations +pcen +pcolormesh +pdns +peft +perc +percactive +perturbator +pesq +pesqs +pfxuc +phix +phns +plda +pmul +pooler +preds +prelu +probs +procs +puml +punc +pval +pyctcdecode +pydoclint +pydub +pygtrie +pyin +pyln +pyloudnorm +pymodule +pypa +pyplot +pyRoom +pysndfx +pystoi +pythonic +Pytorch +Pytorch's +qcnn +qlen +qlstm +qrnn +quantisers +quantizer +quantizers +quaternionli +quaternionlstm +quaternionrnn +randn +ravdess +rcoeff +recommonmark +reducelronplateau +refactorings +refcoeff +reimplementation +reinit +Reinitializes +relis +relu +renorm +renormalize +renormalized +repar +reparameterization +reparameterize +resamplers +resepf +resepformer +resynth +resynthesized +resynthesizing +revb +reverbed +reverberance +rfft +rgen +rirs +rnnlm +rnnlmrescorer +rnnn +rnnp +rnnt +rtbeg +rtdur +rtmid +rttm +rttms +rtxa +rtype +ruamel +rwbeg +rwdur +rwend +samu +sasx +sbrnn +sbtf +scalarize +scipy +sconv +sdrs +segan +seglist +seglstm +segs +segset +segsets +segsnr +sents +septillionths +seqlm +seqs +ser +sers +sess +setu +sextillionths +sidx +sigm +sils +silu +simu +sincconv +sinr +sisnr +sisnrs +slaney +sligru +slogdet +snr +snrlevels +snrs +snts +soxi +spacy +specif +spectr +sphs +spkid +spkr +spkrdata +spkrec +spkrs +spks +splitted +srate +srmr +srmrpy +srnn +srpphat +srun +sseg +ssegs +ssim +ssnr +stds +STFT +stnorm +stoi +stois +strt +subakany +subseg +subsegs +subt +subtokenization +subtokenizations +subtrs +svdl +swbd +syss +targ +taslp +tbeg +TDNN +tdoa +tdoas +tdur +texthvc +thats +theyre +tids +timit +tjoint +TLDR +tmid +tocoo +tocsr +todia +toeplitz +tokenizable +tokenizes +topk +topo +tovl +tqdm +trac +transformerlmrescorer +triu +trnpath +trous +txts +Ukranian +uncond +uncondition +underdogliu +undoc +unet +unflatten +unflattened +unlex +unmixing +unnormalized +unorm +unpadded +unquantized +unscale +unsq +unsqueeze +unsqueezed +unsqueezes +upalign +updown +uttid +uttr +utts +vals +vctk +vecs +vect +vectorize +veri +ville +vocav +voceleb +vocoding +vocos +vorbis +vqgan +Vtrans +wagnerdo +wandb +wav +wavlm +wavs +wavscp +wavxk +wbeg +wdur +webrtcvad +wfst +wids +winit +wlen +wnormandskip +wordemb +wordid +wpsb +wrds +wsjmix +wtyp +xargs +xlsr +xmls +xponent +yamls +youre +ӿéæœâçèàûî + +####### Names ####### +Abdel +Abdelmoumene +Abdelwahab +Abdou +Abous +Adel +adiyoss +Aichner +Alaa +Algayres +Algazi +Alghisi +Alumäe +Alya +Andreas +Aravind +Aris +Arjun +ARNIE +Arseniy +Artem +ASRU +Avendano +Awni +Bahdanau +Bain +Bengio +Benoit +Bernd +Bonafonte +Borra +Bougares +Boumadane +Brakel +Bronzi +Bulut +Busso +Cámbara +Caubriere +Chaabani +Chebyshev +Cheng +Chieh +Chien +Choi +Chorowski +Chun +Coeff +Comberts +Concordia +Cuda +Darija +Davide +Dawalatabad +Dhivehi +Dimas +Diola +Dominik +Dubey +Duchêne +Duda +Duret +Ebrahim +Ecapa +Eddine +Efthymios +Eigen +Émile +Emov +Emre +Eskimez +Essid +Estève +Fance +Farrens +Feng +Fethi +Firas +Florentin +Fongbe +Fosler +Francesco +Fujita +Gabor +Gaëlle +Gahbiche +Gaudet +Gaussianly +Gdrive +Genabith +Georgios +Getreuer +Ghannay +Gopal +Gorin +Gradio +Grondin +Guimarães +Guoguo +Hakha +Hannes +Hanning +Hanzi +Harishchandra +Heba +Heitor +Hifigan +hnguyen +Hsieh +Hsuan +Hwidong +Hyun +INSEA +Ivana +Iwhmdeo +Jabaian +Jacoby +Jarod +Jenie +Jenthe +Jeong +Jeongkyu +Jiang +Jianyuan +Junkai +Kandarkar +Kappenman +Karakasidis +Kazemzadeh +Kenlm +Khudanpur +Kiefer +Kinyarwanda +Klatt's +Kleit +Klocmax +Korbayová +Kruijff +Kuang +Kullback +Kürzinger +Langevin +Laperrière +Leibler +Ligru +Limame +Linv +Luca +Ludwigsfelde +Lussier +Luxembourgish +Makuhari +Mangolian +Matusevych +Maurizio +Mdhaffar +Mesgarani +Messe +Mickael +Mila +Mirco +Mirko +Mohonk +Montréal +Mori +Motlicek +Moumen +Mousavi +München +Musan +Narayanan +Nauman +Ndel +Nfissi +Nima +Nins +Noam +Numba +Nyquist +Occitan +Omologo +OpenAI +Oríon +Paissan +Paltz +Panayotov +Papreja +Parcollt +Pascual +Pavlo +Pelloin +Petr +Piyush +Plantinga +Ploujnikov +Popen +Povey +Pradnya +Pular +Qilin +Quattro +Raby +Ralf +Rastorgueva +Ravanelli +REBECCA +Renato +Rescu +Rigoll +Riguidel +Rjeili +rocheng +Rouhe +Rouvier +Ruban +Ryant +Safaya +Sagar +Sahar +SAIT +Salah +Salima +Samuele +Sangeet +Sanjeev +Sarthak +Sathvik +Saurous +Sefik +Sergiy +Serr +Seung +Shona +Shou +Shrikanth +Shubham +Shucong +Sinc +Sinica +Smaragdis +Sobule +Soninke +Souhir +Spinor +Sreeramadas +Sridharan +Sungbok +Sylvain +Tagliasacchi +Takuya +Tamasheq +Tanel +Tasnet +Teboul +Technische +Tedlium +Thakker +Thienpondt +Titouan +Toks +Trabelsi +Tsao +Tsun +Tzinis +Udupa +Universität +Vaessen +Valk +Vassil +Vaswani +Vishak +Viterbi +Vogt +Wahab +Waray +Watzel +Waytowich +Whipps +Winkelbauer +Wlocmax +Xilin +Xuechen +Xugang +Yacoubi +Yadav +Yanni +Yannick +Yeol +Yingzhi +Yoshioka +Yoshua +Yusuke +Yuxuan +Zaiem +Zaion +Zanon +Zeghidour +Zenodo +Zeyu +Zhang +Zhao +Zhepei +Zhong +Zijian + +####### British ####### +behaviour +finalised +harmonise +initialised +Initialises +neighbours +normalise +optimisation +optimisations +optimise +optimised +optimiser +organised +quantised +realise +semantizer +stabilised +stabilises +stabilising +traveller +travellers +utilises +visualisation + +####### Non-English ####### +AUJOURD +AUJOURD'HUI +collés +delle +encadre +noviembre +Politecnica +quelques +Université +Università +vie diff --git a/.flake8 b/.flake8 deleted file mode 100644 index cd5622dd34..0000000000 --- a/.flake8 +++ /dev/null @@ -1,8 +0,0 @@ -[flake8] -ignore = E203, E266, E501, W503 -# line length is intentionally set to 80 here because black uses Bugbear -# See https://github.com/psf/black/blob/master/README.md#line-length for more details -max-line-length = 80 -max-complexity = 18 -select = B,C,E,F,W,T4,B9 -exclude = tools/kaldi_decoder diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 10732f4473..4c7798012f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -1,27 +1,29 @@ -name: 🪲Bug Report +name: 🪲 Bug Report description: Something went wrong? Let us know! 🐣 -title: "[Bug]: " labels: ["bug"] body: - type: markdown attributes: value: | - Before submitting a bug, please make sure the issue hasn't been already addressed by searching through the existing and past issues. + **Before submitting a bug report, please read the following instructions:** + + - Make sure the issue hasn't already been addressed by searching through existing and past issues. + - Use a clear and concise title for your bug report. + - Fill out all relevant sections below to help us understand and reproduce the issue. - type: textarea id: describe-the-bug attributes: label: Describe the bug - description: Short and clear description of what the bug is. + description: Provide a clear and concise description of the bug. validations: required: True - - type: textarea id: expected-behaviour attributes: label: Expected behaviour - description: A description of what you expected to happen. + description: Describe what you expected to happen. validations: required: True @@ -30,7 +32,15 @@ body: attributes: label: To Reproduce description: | - If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as minimal as possible. We will copy-paste your code, and we expect to get the same result as you did: avoid any external data, and include the relevant imports. + If relevant, add a minimal example or detailed steps to reproduce the error. You can share code directly using Google Colab: + 1. Visit [Google Colab](https://colab.research.google.com/). + 2. Create a new notebook. + 3. Paste your code into the notebook. + 4. Share the notebook by clicking on "Share" in the top-right corner. + 5. Share the notebook's link here. + + In the worst case, provide detailed steps to reproduce the behavior. + placeholder: "```python #your code ``` \n ```yaml #your yaml code ```" validations: required: False @@ -38,27 +48,29 @@ body: - type: textarea id: versions attributes: - label: Versions - description: "Please tell us more about your current SpeechBrain version and/or git hash (if installed via cloning+editable install). You can also add other setup information that might be relevant." + label: Environment Details + description: Provide information about your SpeechBrain version, setup, and any other relevant environment details. validations: required: False - type: textarea id: logs attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. + label: Relevant Log Output + description: Copy and paste any relevant log output here. render: shell + validations: + required: False - type: textarea id: add-context attributes: - label: Additional context - description: "Add any other context about the problem here." + label: Additional Context + description: Share any other context about the problem or your environment that may help in troubleshooting. validations: required: False - type: markdown attributes: value: | - Thanks for contributing to SpeechBrain! + **Thank you for contributing to SpeechBrain!** Your bug report helps us improve the project's reliability. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml deleted file mode 100644 index 806f1b7d18..0000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ /dev/null @@ -1,35 +0,0 @@ -name: 🚀Feature Request -description: New missing features? Inform us! 🎉 -title: "[Feature Request]:" -labels: ["enhancement, feature_request"] -body: - - - type: textarea - id: proposal - attributes: - label: 🚀The feature - description: "What are you missing in SpeechBrain and what would you like to do with it?" - validations: - required: True - - - type: textarea - id: solution - attributes: - label: Solution outline - description: "How would you like to use it, and see it being used by others?" - validations: - required: True - - - - type: textarea - id: context - attributes: - label: Additional context - description: "Add any other context or screenshots about the feature request." - validations: - required: False - - - type: markdown - attributes: - value: | - Thanks for contributing to SpeechBrain! diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..c04d6d1613 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,45 @@ +## What does this PR do? + + + +Fixes # + + + +
+ Before submitting + +- [ ] Did you read the [contributor guideline](https://speechbrain.readthedocs.io/en/latest/contributing.html)? +- [ ] Did you make sure your **PR does only one thing**, instead of bundling different changes together? +- [ ] Did you make sure to **update the documentation** with your changes? (if necessary) +- [ ] Did you write any **new necessary tests**? (not for typos and docs) +- [ ] Did you verify new and **existing [tests](https://github.com/speechbrain/speechbrain/tree/develop/tests) pass** locally with your changes? +- [ ] Did you list all the **breaking changes** introduced by this pull request? +- [ ] Does your code adhere to project-specific code style and conventions? + +
+ +## PR review + +
+ Reviewer checklist + +- [ ] Is this pull request ready for review? (if not, please submit in draft mode) +- [ ] Check that all items from **Before submitting** are resolved +- [ ] Make sure the title is self-explanatory and the description concisely explains the PR +- [ ] Add labels and milestones (and optionally projects) to the PR so it can be classified +- [ ] Confirm that the changes adhere to compatibility requirements (e.g., Python version, platform) +- [ ] Review the self-review checklist to ensure the code is ready for review + +
+ + diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 377fca5deb..b7fd9a0293 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -7,10 +7,11 @@ on: # yamllint disable-line rule:truthy jobs: pre-commit: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: '3.8' - - uses: pre-commit/action@v2.0.3 + python-version: '3.12' + - uses: pre-commit/action@v3.0.1 diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 40c8602c1c..481793148d 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -10,27 +10,40 @@ on: # yamllint disable-line rule:truthy jobs: tests: + if: github.event.pull_request.draft == false name: Tests runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, 3.8, 3.9] + python-version: ["3.10", 3.13] steps: - uses: actions/checkout@v2 + - uses: actions/cache@v4 + id: cache-uv + with: + path: ~/.cache/uv + key: ${{ runner.os }}-python-${{ matrix.python-version }}-uv - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install libsndfile + - name: Full dependencies + run: | + pip install uv + uv pip install --system -r requirements.txt torch==2.6.0+cpu torchaudio==2.6.0+cpu --extra-index-url https://download.pytorch.org/whl/cpu + uv pip install --system --editable . --no-deps # already installed pinned deps from requirements.txt, we're good + - name: Install sox run: | - sudo apt-get install -y libsndfile1 + sudo apt-get update + sudo apt install sox libsox-dev + # Installing only SoX for now due to FFmpeg issues on the CI server with Torchaudio 2.1. + # FFmpeg works fine on all other machines. We'll switch back when the CI server is fixed. + #- name: Install ffmpeg + # run: | + # sudo apt-get update + # sudo apt-get install -y ffmpeg - name: Display Python version run: python -c "import sys; print(sys.version)" - - name: Full dependencies - run: | - pip install -r requirements.txt - pip install --editable . - pip install ctc-segmentation - name: Consistency tests with pytest run: | pytest tests/consistency diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b48b4c032f..cf4214796b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,14 +17,14 @@ jobs: ref: main - uses: actions/setup-python@v2 with: - python-version: 3.8 + python-version: 3.12 - name: Install pypa/build run: python -m pip install build --user - name: Build binary wheel and source tarball run: python -m build --sdist --wheel --outdir dist/ - name: Publish to PyPI if: startsWith(github.ref, 'refs/tags') - uses: pypa/gh-action-pypi-publish@master + uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_API_KEY }} diff --git a/.github/workflows/verify-docs-gen.yml b/.github/workflows/verify-docs-gen.yml index ac279a9cd8..53c300363e 100644 --- a/.github/workflows/verify-docs-gen.yml +++ b/.github/workflows/verify-docs-gen.yml @@ -8,19 +8,26 @@ on: # yamllint disable-line rule:truthy jobs: docs: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - name: Setup Python 3.8 - uses: actions/setup-python@v2 + - uses: actions/cache@v4 + id: cache-uv with: - python-version: '3.8' + path: ~/.cache/uv + key: ${{ runner.os }}-python-docs-uv + - name: Setup Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: '3.12' - name: Full dependencies run: | - pip install -r requirements.txt - pip install --editable . - pip install -r docs/docs-requirements.txt + pip install uv + uv pip install --system "sphinx>=7.4.1,<9.0" + uv pip install --system -r requirements.txt -r docs/docs-requirements.txt torch==2.6.0+cpu torchaudio==2.6.0+cpu --extra-index-url https://download.pytorch.org/whl/cpu + uv pip install --system --editable . --no-deps # already installed pinned deps from requirements.txt, we're good - name: Generate docs run: | cd docs - make html + SPHINXOPTS="-j=auto" make html diff --git a/.gitignore b/.gitignore index 3ac4dd0ad0..6fdb0ab7bc 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ eggs/ .eggs/ lib/ lib64/ +node_modules/ parts/ sdist/ var/ @@ -52,6 +53,7 @@ coverage.xml .pytest_cache/ cover/ tests/tmp/ +tests/download/ # Translations *.mo @@ -71,11 +73,8 @@ instance/ .scrapy # Sphinx documentation -docs/_build/ -docs/source/*.rst -!docs/source/index.rst -!docs/source/_templates -!docs/source/_static +docs/build/ +docs/API/*.rst # PyBuilder target/ @@ -142,6 +141,9 @@ dmypy.json # pytype static type analyzer .pytype/ +# Audio folders +**/audio_cache/ + # Pretrained & models folders **/model_checkpoints/ **/pretrained_model_checkpoints/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e9fd8af1ad..67ecad97f9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,19 +13,32 @@ repos: - id: check-added-large-files args: [--maxkb=1024] - - repo: https://github.com/psf/black - rev: 19.10b0 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.12.4 hooks: - - id: black + - id: ruff types: [python] - additional_dependencies: ['click==8.0.4'] - - repo: https://gitlab.com/pycqa/flake8.git - rev: 3.7.9 - hooks: - - id: flake8 + args: [--fix] + - id: ruff-format types: [python] - repo: https://github.com/adrienverge/yamllint - rev: v1.23.0 + rev: v1.35.1 hooks: - id: yamllint + + - repo: https://github.com/codespell-project/codespell + rev: v2.3.0 + hooks: + - id: codespell + args: + - "--ignore-words=.dict-speechbrain.txt" + # skip jupyter notebook as there isn't a good way to only match inputs + # at the moment. manually fixing up outputs would be a pain and we + # cannot always expect to regex them out. + - "--skip=*.ipynb" + # for ipynb inline base64 -- although this isn't very useful since we + # are disabling support for ipynb for now + - "--ignore-regex='base64,.*?=='" + additional_dependencies: + - tomli diff --git a/.readthedocs.yaml b/.readthedocs.yaml index ed8451a3c0..2741a3ed09 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,13 +1,20 @@ # .readthedocs.yaml +version: 2 + build: - image: latest + os: ubuntu-24.04 + tools: + python: "3.12" python: - version: 3.8 - pip_install: True + install: + - requirements: docs/readthedocs-requirements.txt # Don't build any extra formats formats: [] -requirements_file: docs/docs-requirements.txt +# Path to sphinx config file, as per the change outlined in +# https://about.readthedocs.com/blog/2024/12/deprecate-config-files-without-sphinx-or-mkdocs-config/ +sphinx: + configuration: docs/conf.py diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000..8b48809676 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,116 @@ +# This CITATION.cff file was generated with cffinit. +# Visit https://bit.ly/cffinit to generate yours today! + +cff-version: 1.2.0 +title: SpeechBrain +message: A PyTorch-based Speech Toolkit +type: software +authors: + - given-names: Mirco + family-names: Ravanelli + affiliation: 'Mila - Quebec AI Institute, Université de Montréal' + - given-names: Titouan + family-names: Parcollet + affiliation: >- + LIA - Avignon Université, CaMLSys - University of + Cambridge + - given-names: Peter + family-names: Plantinga + affiliation: Ohio State University + - given-names: Aku + family-names: Rouhe + affiliation: Aalto University + - given-names: Samuele + family-names: Cornell + affiliation: Università Politecnica delle Marche + - given-names: Loren + family-names: Lugosch + affiliation: 'Mila - Quebec AI Institute, McGill University' + - given-names: Cem + family-names: Subakan + affiliation: Mila - Quebec AI Institute + - given-names: Nauman + family-names: Dawalatabad + affiliation: Indian Institute of Technology Madras + - given-names: Abdelwahab + family-names: Heba + affiliation: IRIT - Université Paul Sabatier + - given-names: Jianyuan + family-names: Zhong + affiliation: Mila - Quebec AI Institute + - given-names: Ju-Chieh + family-names: Chou + affiliation: Toyota Technological Institute at Chicago + - given-names: Sung-Lin + family-names: Yeh + affiliation: University of Edinburgh + - given-names: Szu-Wei + family-names: Fu + affiliation: 'Academia Sinica, Taiwan' + - given-names: Chien-Feng + family-names: Liao + affiliation: 'Academia Sinica, Taiwan' + - given-names: Elena + family-names: Rastorgueva + affiliation: NVIDIA + - given-names: François + family-names: Grondin + affiliation: Université de Sherbrooke + - given-names: William + family-names: Aris + affiliation: Université de Sherbrooke + - given-names: Hwidong + family-names: Na + affiliation: Samsung-SAIT + - given-names: Yan + family-names: Gao + affiliation: CaMLSys - University of Cambridge + - given-names: Renato + name-particle: De + family-names: Mori + affiliation: 'LIA - Avignon Université, McGill University' + - given-names: Yoshua + family-names: Bengio + affiliation: 'Mila - Quebec AI Institute, Université de Montréal' +identifiers: + - type: doi + value: 10.48550/arXiv.2106.04624 + description: 'SpeechBrain: A General-Purpose Speech Toolkit' +repository-code: 'https://github.com/speechbrain/speechbrain/' +url: 'https://speechbrain.github.io/' +abstract: >- + SpeechBrain is an open-source and all-in-one speech + toolkit. It is designed to facilitate the research and + development of neural speech processing technologies by + being simple, flexible, user-friendly, and + well-documented. This paper describes the core + architecture designed to support several tasks of common + interest, allowing users to naturally conceive, compare + and share novel speech processing pipelines. SpeechBrain + achieves competitive or state-of-the-art performance in a + wide range of speech benchmarks. It also provides training + recipes, pretrained models, and inference scripts for + popular speech datasets, as well as tutorials which allow + anyone with basic Python proficiency to familiarize + themselves with speech technologies. +keywords: + - speech toolkit + - audio + - deep learning + - PyTorch + - transformers + - voice recognition + - speech recognition + - speech-to-text + - language model + - speaker recognition + - speaker verification + - speech processing + - audio processing + - ASR + - speaker diarization + - speech separation + - speech enhancement + - spoken language understanding + - HuggingFace +license: Apache-2.0 diff --git a/PERFORMANCE.md b/PERFORMANCE.md new file mode 100644 index 0000000000..cb80af4328 --- /dev/null +++ b/PERFORMANCE.md @@ -0,0 +1,454 @@ +# SpeechBrain Performance Report +This document provides an overview of the performance achieved on key datasets and tasks supported by SpeechBrain. + +## AISHELL-1 Dataset + +### ASR + +| Model | Checkpoints | HuggingFace | Test-CER | +| --------| --------| --------| --------| + | [`recipes/AISHELL-1/ASR/CTC/hparams/train_with_wav2vec.yaml`](recipes/AISHELL-1/ASR/CTC/hparams/train_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/e4bth1bylk7c6h8/AADFq3cWzBBKxuDv09qjvUMta?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-ctc-aishell) | 5.06 | + | [`recipes/AISHELL-1/ASR/seq2seq/hparams/train.yaml`](recipes/AISHELL-1/ASR/seq2seq/hparams/train.yaml) | [here](https://www.dropbox.com/sh/kefuzzf6jaljqbr/AADBRWRzHz74GCMDqJY9BES4a?dl=0) | - | 7.51 | + | [`recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer.yaml`](recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer.yaml) | [here](https://www.dropbox.com/sh/tp6tjmysorgvsr4/AAD7KNqi1ot0gR4N406JbKM6a?dl=0) | [here](https://huggingface.co/speechbrain/asr-transformer-aishell) | 6.04 | + | [`recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer_with_wav2vect.yaml`](recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer_with_wav2vect.yaml) | [here](https://www.dropbox.com/sh/tp6tjmysorgvsr4/AAD7KNqi1ot0gR4N406JbKM6a?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-transformer-aishell) | 5.58 | + + +## Aishell1Mix Dataset + +### Separation + +| Model | Checkpoints | HuggingFace | SI-SNRi | +| --------| --------| --------| --------| + | [`recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2.yaml`](recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2.yaml) | [here](https://www.dropbox.com/sh/6x9356yuybj8lue/AABPlpS03Vcci_E3jA69oKoXa?dl=0) | - | 13.4dB | + | [`recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3.yaml`](recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3.yaml) | [here](https://www.dropbox.com/sh/6x9356yuybj8lue/AABPlpS03Vcci_E3jA69oKoXa?dl=0) | - | 11.2dB | + + +## BinauralWSJ0Mix Dataset + +### Separation + +| Model | Checkpoints | HuggingFace | SI-SNRi | +| --------| --------| --------| --------| + | [`recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-cross.yaml`](recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-cross.yaml) | [here](https://www.dropbox.com/sh/i7fhu7qswjb84gw/AABsX1zP-GOTmyl86PtU8GGua?dl=0) | - | 12.39dB | + | [`recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-independent.yaml`](recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-independent.yaml) | [here](https://www.dropbox.com/sh/i7fhu7qswjb84gw/AABsX1zP-GOTmyl86PtU8GGua?dl=0) | - | 11.90dB | + | [`recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-noise.yaml`](recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-noise.yaml) | [here](https://www.dropbox.com/sh/i7fhu7qswjb84gw/AABsX1zP-GOTmyl86PtU8GGua?dl=0) | - | 18.25dB | + | [`recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-reverb.yaml`](recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-reverb.yaml) | [here](https://www.dropbox.com/sh/i7fhu7qswjb84gw/AABsX1zP-GOTmyl86PtU8GGua?dl=0) | - | 6.95dB | + | [`recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel.yaml`](recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel.yaml) | [here](https://www.dropbox.com/sh/i7fhu7qswjb84gw/AABsX1zP-GOTmyl86PtU8GGua?dl=0) | - | 16.93dB | + + +## CVSS Dataset + +### S2ST + +| Model | Checkpoints | HuggingFace | Test-sacrebleu | +| --------| --------| --------| --------| + | [`recipes/CVSS/S2ST/hparams/train_fr-en.yaml`](recipes/CVSS/S2ST/hparams/train_fr-en.yaml) | [here]( https://www.dropbox.com/sh/woz4i1p8pkfkqhf/AACmOvr3sS7p95iXl3twCj_xa?dl=0) | [here]( ) | 24.47 | + + +## CommonLanguage Dataset + +### Language-id + +| Model | Checkpoints | HuggingFace | Error | +| --------| --------| --------| --------| + | [`recipes/CommonLanguage/lang_id/hparams/train_ecapa_tdnn.yaml`](recipes/CommonLanguage/lang_id/hparams/train_ecapa_tdnn.yaml) | [here](https://www.dropbox.com/sh/1fxpzyv67ouwd2c/AAAeMUWYP2f1ycpE1Lp1CwEla?dl=0) | [here](https://huggingface.co/speechbrain/lang-id-commonlanguage_ecapa) | 15.1% | + + +## CommonVoice Dataset + +### ASR-seq2seq + +| Model | Checkpoints | HuggingFace | Test-WER | +| --------| --------| --------| --------| + | [`recipes/CommonVoice/ASR/seq2seq/hparams/train_de.yaml`](recipes/CommonVoice/ASR/seq2seq/hparams/train_de.yaml) | [here](https://www.dropbox.com/sh/zgatirb118f79ef/AACmjh-D94nNDWcnVI4Ef5K7a?dl=0) | [here](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-de) | 12.25% | + | [`recipes/CommonVoice/ASR/seq2seq/hparams/train_en.yaml`](recipes/CommonVoice/ASR/seq2seq/hparams/train_en.yaml) | [here](https://www.dropbox.com/sh/h8ged0yu3ztypkh/AAAu-12k_Ceg-tTjuZnrg7dza?dl=0) | [here](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-en) | 23.88% | + | [`recipes/CommonVoice/ASR/seq2seq/hparams/train_fr.yaml`](recipes/CommonVoice/ASR/seq2seq/hparams/train_fr.yaml) | [here](https://www.dropbox.com/sh/07a5lt21wxp98x5/AABhNwmWFaNFyA734bNZUO03a?dl=0) | [here](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-fr) | 14.88% | + | [`recipes/CommonVoice/ASR/seq2seq/hparams/train_it.yaml`](recipes/CommonVoice/ASR/seq2seq/hparams/train_it.yaml) | [here](https://www.dropbox.com/sh/ss59uu0j5boscvp/AAASsiFhlB1nDWPkFX410bzna?dl=0) | [here](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-it) | 17.02% | + | [`recipes/CommonVoice/ASR/seq2seq/hparams/train_rw.yaml`](recipes/CommonVoice/ASR/seq2seq/hparams/train_rw.yaml) | [here](https://www.dropbox.com/sh/i1fv4f8miilqgii/AAB3gE97kmFDA0ISkIDSUW_La?dl=0) | [here](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-rw) | 29.22% | + | [`recipes/CommonVoice/ASR/seq2seq/hparams/train_es.yaml`](recipes/CommonVoice/ASR/seq2seq/hparams/train_es.yaml) | [here](https://www.dropbox.com/sh/r3w0b2tm1p73vft/AADCxdhUwDN6j4PVT9TYe-d5a?dl=0) | [here](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-es) | 14.77% | + + +### ASR-CTC + +| Model | Checkpoints | HuggingFace | Test-WER | +| --------| --------| --------| --------| + | [`recipes/CommonVoice/ASR/CTC/hparams/train_en_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_en_with_wav2vec.yaml) | [here](https://www.dropbox.com/scl/fo/gx0szpbectig2r6r6p9vk/APdoN_wWWq_wP4My7w6SvMo?rlkey=v8fhd887bn947yjb45i99wm8p&st=6muft51b&dl=0) | - | 16.16% | + | [`recipes/CommonVoice/ASR/CTC/hparams/train_fr_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_fr_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/0i7esfa8jp3rxpp/AAArdi8IuCRmob2WAS7lg6M4a?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-fr) | 9.71% | + | [`recipes/CommonVoice/ASR/CTC/hparams/train_it_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_it_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/hthxqzh5boq15rn/AACftSab_FM6EFWWPgHpKw82a?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-it) | 7.99% | + | [`recipes/CommonVoice/ASR/CTC/hparams/train_rw_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_rw_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/4iax0l4yfry37gn/AABuQ31JY-Sbyi1VlOJfV7haa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-rw) | 22.52% | + | [`recipes/CommonVoice/ASR/CTC/hparams/train_de_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_de_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/dn7plq4wfsujsi1/AABS1kqB_uqLJVkg-bFkyPpVa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-de) | 8.39% | + | [`recipes/CommonVoice/ASR/CTC/hparams/train_ar_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_ar_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/7tnuqqbr4vy96cc/AAA_5_R0RmqFIiyR0o1nVS4Ia?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-ar) | 28.53% | + | [`recipes/CommonVoice/ASR/CTC/hparams/train_es_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_es_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/ejvzgl3d3g8g9su/AACYtbSWbDHvBr06lAb7A4mVa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-es) | 12.67% | + | [`recipes/CommonVoice/ASR/CTC/hparams/train_pt_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_pt_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/80wucrvijdvao2a/AAD6-SZ2_ZZXmlAjOTw6fVloa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-pt) | 21.69% | + | [`recipes/CommonVoice/ASR/CTC/hparams/train_zh-CN_with_wav2vec.yaml`](recipes/CommonVoice/ASR/CTC/hparams/train_zh-CN_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/2bikr81vgufoglf/AABMpD0rLIaZBxjtwBHgrNpga?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-zh-CN) | 23.17% | + + +### ASR-transformer + +| Model | Checkpoints | HuggingFace | Test-WER | +| --------| --------| --------| --------| + | [`recipes/CommonVoice/ASR/transformer/hparams/train_hf_whisper.yaml`](recipes/CommonVoice/ASR/transformer/hparams/train_hf_whisper.yaml) | - | - | 16.96% | + + +## DNS Dataset + +### Enhancement + +| Model | Checkpoints | HuggingFace | valid-PESQ | test-SIG | test-BAK | test-OVRL | +| --------| --------| --------| --------| --------| --------| --------| + | [`recipes/DNS/enhancement/hparams/sepformer-dns-16k.yaml`](recipes/DNS/enhancement/hparams/sepformer-dns-16k.yaml) | [here](https://www.dropbox.com/sh/d3rp5d3gjysvy7c/AACmwcEkm_IFvaW1lt2GdtQka?dl=0) | [here](https://huggingface.co/speechbrain/sepformer-dns4-16k-enhancement) | 2.06 | 2.999 | 3.076 | 2.437 | + + +## DVoice Dataset + +### ASR-CTC + +| Model | Checkpoints | HuggingFace | Test-WER | +| --------| --------| --------| --------| + | [`recipes/DVoice/ASR/CTC/hparams/train_amh_with_wav2vec.yaml`](recipes/DVoice/ASR/CTC/hparams/train_amh_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/pyu40jq1ebv6hcc/AADQO_lAD-F9Q0vlVq8KoXHqa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-dvoice-amharic) | 24.92% | + | [`recipes/DVoice/ASR/CTC/hparams/train_dar_with_wav2vec.yaml`](recipes/DVoice/ASR/CTC/hparams/train_dar_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/pyu40jq1ebv6hcc/AADQO_lAD-F9Q0vlVq8KoXHqa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-dvoice-darija) | 18.28% | + | [`recipes/DVoice/ASR/CTC/hparams/train_fon_with_wav2vec.yaml`](recipes/DVoice/ASR/CTC/hparams/train_fon_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/pyu40jq1ebv6hcc/AADQO_lAD-F9Q0vlVq8KoXHqa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-dvoice-fongbe) | 9.00% | + | [`recipes/DVoice/ASR/CTC/hparams/train_sw_with_wav2vec.yaml`](recipes/DVoice/ASR/CTC/hparams/train_sw_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/pyu40jq1ebv6hcc/AADQO_lAD-F9Q0vlVq8KoXHqa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-dvoice-swahili) | 23.16% | + | [`recipes/DVoice/ASR/CTC/hparams/train_wol_with_wav2vec.yaml`](recipes/DVoice/ASR/CTC/hparams/train_wol_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/pyu40jq1ebv6hcc/AADQO_lAD-F9Q0vlVq8KoXHqa?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-dvoice-wolof) | 16.05% | + + +### Multilingual-ASR-CTC + +| Model | Checkpoints | HuggingFace | WER-Darija | WER-Swahili | WER-Fongbe | Fongbe-Wolof | WER-Amharic | +| --------| --------| --------| --------| --------| --------| --------| --------| + | [`recipes/DVoice/ASR/CTC/hparams/train_multi_with_wav2vec.yaml`](recipes/DVoice/ASR/CTC/hparams/train_multi_with_wav2vec.yaml) | [here](https://www.dropbox.com/sh/pyu40jq1ebv6hcc/AADQO_lAD-F9Q0vlVq8KoXHqa?dl=0) | - | 13.27% | 29.31% | 10.26% | 21.54% | 31.15% | + + +## ESC50 Dataset + +### SoundClassification + +| Model | Checkpoints | HuggingFace | Accuracy | +| --------| --------| --------| --------| + | [`recipes/ESC50/classification/hparams/cnn14.yaml`](recipes/ESC50/classification/hparams/cnn14.yaml) | [here](https://www.dropbox.com/sh/fbe7l14o3n8f5rw/AACABE1BQGBbX4j6A1dIhBcSa?dl=0) | - | 82% | + | [`recipes/ESC50/classification/hparams/conv2d.yaml`](recipes/ESC50/classification/hparams/conv2d.yaml) | [here](https://www.dropbox.com/sh/tl2pbfkreov3z7e/AADwwhxBLw1sKvlSWzp6DMEia?dl=0) | - | 75% | + + +## Fisher-Callhome-Spanish Dataset + +### Speech_Translation + +| Model | Checkpoints | HuggingFace | Test-sacrebleu | +| --------| --------| --------| --------| + | [`recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/transformer.yaml`](recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/transformer.yaml) | [here](https://www.dropbox.com/sh/tmh7op8xwthdta0/AACuU9xHDHPs8ToxIIwoTLB0a?dl=0) | - | 47.31 | + | [`recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/conformer.yaml`](recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/conformer.yaml) | [here](https://www.dropbox.com/sh/tmh7op8xwthdta0/AACuU9xHDHPs8ToxIIwoTLB0a?dl=0) | - | 48.04 | + + +## Google-speech-commands Dataset + +### Command_recognition + +| Model | Checkpoints | HuggingFace | Test-accuracy | +| --------| --------| --------| --------| + | [`recipes/Google-speech-commands/hparams/xvect.yaml`](recipes/Google-speech-commands/hparams/xvect.yaml) | [here](https://www.dropbox.com/sh/9n9q42pugbx0g7a/AADihpfGKuWf6gkwQznEFINDa?dl=0) | [here](https://huggingface.co/speechbrain/google_speech_command_xvector) | 97.43% | + | [`recipes/Google-speech-commands/hparams/xvect_leaf.yaml`](recipes/Google-speech-commands/hparams/xvect_leaf.yaml) | [here](https://www.dropbox.com/sh/r63w4gytft4s1x6/AAApP8-pp179QKGCZHV_OuD8a?dl=0) | - | 96.79% | + + +## IEMOCAP Dataset + +### Emotion_recognition + +| Model | Checkpoints | HuggingFace | Test-Accuracy | +| --------| --------| --------| --------| + | [`recipes/IEMOCAP/emotion_recognition/hparams/train_with_wav2vec2.yaml`](recipes/IEMOCAP/emotion_recognition/hparams/train_with_wav2vec2.yaml) | [here](https://www.dropbox.com/sh/lmebg4li83sgkhg/AACooPKbNlwd-7n5qSJMbc7ya?dl=0) | [here](https://huggingface.co/speechbrain/emotion-recognition-wav2vec2-IEMOCAP/) | 65.7% | + | [`recipes/IEMOCAP/emotion_recognition/hparams/train.yaml`](recipes/IEMOCAP/emotion_recognition/hparams/train.yaml) | [here](https://www.dropbox.com/sh/ke4fxiry97z58m8/AACPEOM5bIyxo9HxG2mT9v_aa?dl=0) | - | 77.0% | + + +## IWSLT22_lowresource Dataset + +### Speech_Translation + +| Model | Checkpoints | HuggingFace | Test-BLEU | +| --------| --------| --------| --------| + | [`recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_mbart_st.yaml`](recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_mbart_st.yaml) | [here](https://www.dropbox.com/sh/xjo0ou739oksnus/AAAgyrCwywmDRRuUiDnUva2za?dl=0) | - | 7.73 | + | [`recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_nllb_st.yaml`](recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_nllb_st.yaml) | [here](https://www.dropbox.com/sh/spp2ijgfdbzuz26/AABkJ97e72D7aKzNLTm1qmWEa?dl=0) | - | 8.70 | + | [`recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_mbart_st.yaml`](recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_mbart_st.yaml) | [here](https://www.dropbox.com/sh/98s1xyc3chreaw6/AABom3FnwY5SsIvg4en9tWC2a?dl=0) | - | 10.28 | + | [`recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_nllb_st.yaml`](recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_nllb_st.yaml) | [here](https://www.dropbox.com/sh/ekkpl9c3kxsgllj/AABa0q2LrJe_o7JF-TTbfxZ-a?dl=0) | - | 11.32 | + + +## LibriMix Dataset + +### Separation + +| Model | Checkpoints | HuggingFace | SI-SNR | +| --------| --------| --------| --------| + | [`recipes/LibriMix/separation/hparams/sepformer-libri2mix.yaml`](recipes/LibriMix/separation/hparams/sepformer-libri2mix.yaml) | [here](https://www.dropbox.com/sh/skkiozml92xtgdo/AAD0eJxgbCTK03kAaILytGtVa?dl=0) | - | 20.4dB | + | [`recipes/LibriMix/separation/hparams/sepformer-libri3mix.yaml`](recipes/LibriMix/separation/hparams/sepformer-libri3mix.yaml) | [here](https://www.dropbox.com/sh/kmyz7tts9tyg198/AACsDcRwKvelXxEB-k5q1OaIa?dl=0) | - | 19.0dB | + + +## LibriParty Dataset + +### VAD + +| Model | Checkpoints | HuggingFace | Test-Precision | Recall | F-Score | +| --------| --------| --------| --------| --------| --------| + | [`recipes/LibriParty/VAD/hparams/train.yaml`](recipes/LibriParty/VAD/hparams/train.yaml) | [here](https://www.dropbox.com/sh/6yguuzn4pybjasd/AABpUF8LAQ8d2TJyC8aK2OBga?dl=0 ) | [here](https://huggingface.co/speechbrain/vad-crdnn-libriparty) | 0.9518 | 0.9437 | 0.9477 | + + +## LibriSpeech Dataset + +### ASR-Transformers + +| Model | Checkpoints | HuggingFace | Test_clean-WER | Test_other-WER | +| --------| --------| --------| --------| --------| + | [`recipes/LibriSpeech/ASR/transformer/hparams/conformer_small.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/conformer_small.yaml) | [here](https://www.dropbox.com/sh/s0x6ni124858b8i/AAALaCH6sGTMRUVTjh8Tm8Jwa?dl=0) | [here](https://huggingface.co/speechbrain/asr-conformersmall-transformerlm-librispeech) | 2.49% | 6.10% | + | [`recipes/LibriSpeech/ASR/transformer/hparams/transformer.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/transformer.yaml) | [here](https://www.dropbox.com/sh/653kq8h2k87md4p/AAByAaAryXtQKpRzYtzV9ih5a?dl=0) | [here](https://huggingface.co/speechbrain/asr-transformer-transformerlm-librispeech) | 2.27% | 5.53% | + | [`recipes/LibriSpeech/ASR/transformer/hparams/conformer_large.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/conformer_large.yaml) | [here](https://www.dropbox.com/scl/fo/9we244tgdf47ay20hrdoz/AKnoqQ13nLwSv1ITeJEQ3wY?rlkey=05o5jiszr8rhj6dlprw87t2x4&st=u2odesyk&dl=0) | - | 2.01% | 4.52% | + | [`recipes/LibriSpeech/ASR/transformer/hparams/branchformer_large.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/branchformer_large.yaml) | [here](https://www.dropbox.com/scl/fo/qhtds5rrdvhhhjywa7ovw/AMiIL5YvQENw5JKVpzXlP5o?rlkey=hz8vlpy3qf9kcyfx0cox089e6&st=ufckv6tb&dl=0) | - | 2.04% | 4.12% | + | [`recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_22M.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_22M.yaml) | [here](https://www.dropbox.com/sh/30xsmqj13jexzoh/AACvZNtX1Fsr0Wa1Z3C9rHLXa?dl=0) | - | 2.23% | 4.54% | + | [`recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_8M.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_8M.yaml) | [here](https://www.dropbox.com/sh/8jc96avmivr8fke/AABrFEhtWy_3-Q7BHhkh0enwa?dl=0) | - | 2.55% | 6.61% | + | [`recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_25M.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_25M.yaml) | - | - | 2.36% | 6.89% | + | [`recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_13M.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_13M.yaml) | - | - | 2.54% | 6.58% | + | [`recipes/LibriSpeech/ASR/transformer/hparams/train_hf_whisper.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/train_hf_whisper.yaml) | - | - | | + | [`recipes/LibriSpeech/ASR/transformer/hparams/bayesspeech.yaml`](recipes/LibriSpeech/ASR/transformer/hparams/bayesspeech.yaml) | [here](https://www.dropbox.com/scl/fo/cdken4jqfj96ev1v84jxm/h?rlkey=25eu1ytgm5ac51zqj8p65zwxd&dl=0) | - | 2.84% | 6.27% | + + +### ASR-Transducers + +| Model | Checkpoints | HuggingFace | Test_clean-WER | Test_other-WER | +| --------| --------| --------| --------| --------| + | [`recipes/LibriSpeech/ASR/transducer/hparams/conformer_transducer.yaml`](recipes/LibriSpeech/ASR/transducer/hparams/conformer_transducer.yaml) | [here](https://www.dropbox.com/scl/fo/kl1eikmoauygwqcx8ok4r/AMkreKLzHtxPtqnoXzUerko?rlkey=juk374k210b76lbnblh7or95d&st=1ugwe9e3&dl=0) | - | 2.72% | 6.47% | + + +### ASR-CTC + +| Model | Checkpoints | HuggingFace | Test_clean-WER | Test_other-WER | +| --------| --------| --------| --------| --------| + | [`recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec.yaml`](recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec.yaml) | [here](https://www.dropbox.com/sh/qj2ps85g8oiicrj/AAAxlkQw5Pfo0M9EyHMi8iAra?dl=0) | [here](https://huggingface.co/speechbrain/asr-wav2vec2-librispeech) | 1.65% | 3.67% | + | [`recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_transformer_rescoring.yaml`](recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_transformer_rescoring.yaml) | [here](https://www.dropbox.com/sh/ijqalvre7mm08ng/AAD_hsN-8dBneUMMkELsOOxga?dl=0) | - | 1.57% | 3.37% | + + +### G2P + +| Model | Checkpoints | HuggingFace | PER-Test | +| --------| --------| --------| --------| + | [`recipes/LibriSpeech/G2P/hparams/hparams_g2p_rnn.yaml`](recipes/LibriSpeech/G2P/hparams/hparams_g2p_rnn.yaml) | [here](https://www.dropbox.com/sh/qmcl1obp8pxqaap/AAC3yXvjkfJ3mL-RKyAUxPdNa?dl=0) | - | 2.72% | + | [`recipes/LibriSpeech/G2P/hparams/hparams_g2p_transformer.yaml`](recipes/LibriSpeech/G2P/hparams/hparams_g2p_transformer.yaml) | [here](https://www.dropbox.com/sh/zhrxg7anuhje7e8/AADTeJtdsja_wClkE2DsF9Ewa?dl=0) | [here](https://huggingface.co/speechbrain/soundchoice-g2p) | 2.89% | + + +### ASR-Seq2Seq + +| Model | Checkpoints | HuggingFace | Test_clean-WER | Test_other-WER | +| --------| --------| --------| --------| --------| + | [`recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_5000.yaml`](recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_5000.yaml) | [here](https://www.dropbox.com/sh/1ycv07gyxdq8hdl/AABUDYzza4SLYtY45RcGf2_0a?dl=0) | [here](https://huggingface.co/speechbrain/asr-crdnn-transformerlm-librispeech) | 2.89% | 8.09% | + + +## MEDIA Dataset + +### ASR + +| Model | Checkpoints | HuggingFace | Test-ChER | Test-CER | +| --------| --------| --------| --------| --------| + | [`recipes/MEDIA/ASR/CTC/hparams/train_hf_wav2vec.yaml`](recipes/MEDIA/ASR/CTC/hparams/train_hf_wav2vec.yaml) | - | [here](https://huggingface.co/speechbrain/asr-wav2vec2-ctc-MEDIA) | 7.78% | 4.78% | + + +### SLU + +| Model | Checkpoints | HuggingFace | Test-ChER | Test-CER | Test-CVER | +| --------| --------| --------| --------| --------| --------| + | [`recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_full.yaml`](recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_full.yaml) | - | [here](https://huggingface.co/speechbrain/slu-wav2vec2-ctc-MEDIA-relax) | 7.46% | 20.10% | 31.41% | + | [`recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_relax.yaml`](recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_relax.yaml) | - | [here](https://huggingface.co/speechbrain/slu-wav2vec2-ctc-MEDIA-full) | 7.78% | 24.88% | 35.77% | + + +## MultiWOZ Dataset + +### Response-Generation + +| Model | Checkpoints | HuggingFace | Test-PPL | Test_BLEU-4 | +| --------| --------| --------| --------| --------| + | [`recipes/MultiWOZ/response_generation/gpt/hparams/train_gpt.yaml`](recipes/MultiWOZ/response_generation/gpt/hparams/train_gpt.yaml) | [here](https://www.dropbox.com/sh/vm8f5iavohr4zz9/AACrkOxXuxsrvJy4Cjpih9bQa?dl=0) | [here](https://huggingface.co/speechbrain/MultiWOZ-GPT-Response_Generation) | 4.01 | 2.54e-04 | + | [`recipes/MultiWOZ/response_generation/llama2/hparams/train_llama2.yaml`](recipes/MultiWOZ/response_generation/llama2/hparams/train_llama2.yaml) | [here](https://www.dropbox.com/sh/d093vsje1d7ijj9/AAA-nHEd_MwNEFJfBGLmXxJra?dl=0) | [here](https://huggingface.co/speechbrain/MultiWOZ-Llama2-Response_Generation) | 2.90 | 7.45e-04 | + + +## REAL-M Dataset + +### Sisnr-estimation + +| Model | Checkpoints | HuggingFace | L1-Error | +| --------| --------| --------| --------| + | [`recipes/REAL-M/sisnr-estimation/hparams/pool_sisnrestimator.yaml`](recipes/REAL-M/sisnr-estimation/hparams/pool_sisnrestimator.yaml) | [here](https://www.dropbox.com/sh/n55lm8i5z51pbm1/AABHfByOEy__UP_bmT4GJvSba?dl=0) | [here](https://huggingface.co/speechbrain/REAL-M-sisnr-estimator) | 1.71dB | + + +## RescueSpeech Dataset + +### ASR+enhancement + +| Model | Checkpoints | HuggingFace | SISNRi | SDRi | PESQ | STOI | WER | +| --------| --------| --------| --------| --------| --------| --------| --------| + | [`recipes/RescueSpeech/ASR/noise-robust/hparams/robust_asr_16k.yaml`](recipes/RescueSpeech/ASR/noise-robust/hparams/robust_asr_16k.yaml) | [here](https://www.dropbox.com/sh/kqs2ld14fm20cxl/AACiobSLdNtXhm-4Y3IIbTeia?dl=0) | [here](https://huggingface.co/sangeet2020/noisy-whisper-resucespeech) | 7.482 | 8.011 | 2.083 | 0.854 | 45.29% | + + +## SLURP Dataset + +### SLU + +| Model | Checkpoints | HuggingFace | scenario-accuracy | action-accuracy | intent-accuracy | +| --------| --------| --------| --------| --------| --------| + | [`recipes/SLURP/NLU/hparams/train.yaml`](recipes/SLURP/NLU/hparams/train.yaml) | [here](https://www.dropbox.com/scl/fo/c0rm2ja8oxus8q27om8ve/h?rlkey=irxzl1ea8g7e6ipk0vuc288zh&dl=0 ) | - | 90.81% | 88.29% | 87.28% | + | [`recipes/SLURP/direct/hparams/train.yaml`](recipes/SLURP/direct/hparams/train.yaml) | [here](https://www.dropbox.com/scl/fo/c0rm2ja8oxus8q27om8ve/h?rlkey=irxzl1ea8g7e6ipk0vuc288zh&dl=0 ) | - | 81.73% | 77.11% | 75.05% | + | [`recipes/SLURP/direct/hparams/train_with_wav2vec2.yaml`](recipes/SLURP/direct/hparams/train_with_wav2vec2.yaml) | [here](https://www.dropbox.com/scl/fo/c0rm2ja8oxus8q27om8ve/h?rlkey=irxzl1ea8g7e6ipk0vuc288zh&dl=0 ) | [here](https://huggingface.co/speechbrain/SLU-direct-SLURP-hubert-enc) | 91.24% | 88.47% | 87.55% | + + +## Switchboard Dataset + +### ASR + +| Model | Checkpoints | HuggingFace | Swbd-WER | Callhome-WER | Eval2000-WER | +| --------| --------| --------| --------| --------| --------| + | [`recipes/Switchboard/ASR/CTC/hparams/train_with_wav2vec.yaml`](recipes/Switchboard/ASR/CTC/hparams/train_with_wav2vec.yaml) | - | [here](https://huggingface.co/speechbrain/asr-wav2vec2-switchboard) | 8.76% | 14.67% | 11.78% | + | [`recipes/Switchboard/ASR/seq2seq/hparams/train_BPE_2000.yaml`](recipes/Switchboard/ASR/seq2seq/hparams/train_BPE_2000.yaml) | - | [here](https://huggingface.co/speechbrain/asr-crdnn-switchboard) | 16.90% | 25.12% | 20.71% | + | [`recipes/Switchboard/ASR/transformer/hparams/transformer.yaml`](recipes/Switchboard/ASR/transformer/hparams/transformer.yaml) | - | [here](https://huggingface.co/speechbrain/asr-transformer-switchboard) | 9.80% | 17.89% | 13.94% | + + +## TIMIT Dataset + +### ASR + +| Model | Checkpoints | HuggingFace | Test-PER | +| --------| --------| --------| --------| + | [`recipes/TIMIT/ASR/CTC/hparams/train.yaml`](recipes/TIMIT/ASR/CTC/hparams/train.yaml) | [here](https://www.dropbox.com/sh/059jnwdass8v45u/AADTjh5DYdYKuZsgH9HXGx0Sa?dl=0) | - | 14.78% | + | [`recipes/TIMIT/ASR/seq2seq/hparams/train.yaml`](recipes/TIMIT/ASR/seq2seq/hparams/train.yaml) | [here](https://www.dropbox.com/sh/059jnwdass8v45u/AADTjh5DYdYKuZsgH9HXGx0Sa?dl=0) | - | 14.07% | + | [`recipes/TIMIT/ASR/seq2seq/hparams/train_with_wav2vec2.yaml`](recipes/TIMIT/ASR/seq2seq/hparams/train_with_wav2vec2.yaml) | [here](https://www.dropbox.com/sh/059jnwdass8v45u/AADTjh5DYdYKuZsgH9HXGx0Sa?dl=0) | - | 8.04% | + | [`recipes/TIMIT/ASR/transducer/hparams/train.yaml`](recipes/TIMIT/ASR/transducer/hparams/train.yaml) | [here](https://www.dropbox.com/sh/059jnwdass8v45u/AADTjh5DYdYKuZsgH9HXGx0Sa?dl=0) | - | 14.12% | + | [`recipes/TIMIT/ASR/transducer/hparams/train_wav2vec.yaml`](recipes/TIMIT/ASR/transducer/hparams/train_wav2vec.yaml) | [here](https://www.dropbox.com/sh/059jnwdass8v45u/AADTjh5DYdYKuZsgH9HXGx0Sa?dl=0) | - | 8.91% | + + +## Tedlium2 Dataset + +### ASR + +| Model | Checkpoints | HuggingFace | Test-WER_No_LM | +| --------| --------| --------| --------| + | [`recipes/Tedlium2/ASR/transformer/hparams/branchformer_large.yaml`](recipes/Tedlium2/ASR/transformer/hparams/branchformer_large.yaml) | [here](https://www.dropbox.com/sh/el523uofs96czfi/AADgTd838pKo2aR8fhqVOh-Oa?dl=0) | [here](https://huggingface.co/speechbrain/asr-branchformer-large-tedlium2) | 8.11% | + + +## UrbanSound8k Dataset + +### SoundClassification + +| Model | Checkpoints | HuggingFace | Accuracy | +| --------| --------| --------| --------| + | [`recipes/UrbanSound8k/SoundClassification/hparams/train_ecapa_tdnn.yaml`](recipes/UrbanSound8k/SoundClassification/hparams/train_ecapa_tdnn.yaml) | [here](https://www.dropbox.com/sh/f61325e3w8h5yy2/AADm3E3PXFi1NYA7-QW3H-Ata?dl=0 ) | [here](https://huggingface.co/speechbrain/urbansound8k_ecapa) | 75.4% | + + +## Voicebank Dataset + +### Dereverberation + +| Model | Checkpoints | HuggingFace | PESQ | +| --------| --------| --------| --------| + | [`recipes/Voicebank/dereverb/MetricGAN-U/hparams/train_dereverb.yaml`](recipes/Voicebank/dereverb/MetricGAN-U/hparams/train_dereverb.yaml) | [here](https://www.dropbox.com/sh/r94qn1f5lq9r3p7/AAAZfisBhhkS8cwpzy1O5ADUa?dl=0 ) | - | 2.07 | + | [`recipes/Voicebank/dereverb/spectral_mask/hparams/train.yaml`](recipes/Voicebank/dereverb/spectral_mask/hparams/train.yaml) | [here](https://www.dropbox.com/sh/pw8aer8gcsrdbx7/AADknh7plHF5GBeTRK9VkIKga?dl=0 ) | - | 2.35 | + + +### ASR + +| Model | Checkpoints | HuggingFace | Test-PER | +| --------| --------| --------| --------| + | [`recipes/Voicebank/ASR/CTC/hparams/train.yaml`](recipes/Voicebank/ASR/CTC/hparams/train.yaml) | [here](https://www.dropbox.com/sh/w4j0auezgmmo005/AAAjKcoJMdLDp0Pqe3m7CLVaa?dl=0) | - | 10.12% | + + +### ASR+enhancement + +| Model | Checkpoints | HuggingFace | PESQ | COVL | test-WER | +| --------| --------| --------| --------| --------| --------| + | [`recipes/Voicebank/MTL/ASR_enhance/hparams/robust_asr.yaml`](recipes/Voicebank/MTL/ASR_enhance/hparams/robust_asr.yaml) | [here](https://www.dropbox.com/sh/azvcbvu8g5hpgm1/AACDc6QxtNMGZ3IoZLrDiU0Va?dl=0) | [here](https://huggingface.co/speechbrain/mtl-mimic-voicebank) | 3.05 | 3.74 | 2.80 | + + +### Enhancement + +| Model | Checkpoints | HuggingFace | PESQ | +| --------| --------| --------| --------| + | [`recipes/Voicebank/enhance/MetricGAN/hparams/train.yaml`](recipes/Voicebank/enhance/MetricGAN/hparams/train.yaml) | [here](https://www.dropbox.com/sh/n5q9vjn0yn1qvk6/AAB-S7i2-XzVm6ux0MrXCvqya?dl=0 ) | [here](https://huggingface.co/speechbrain/metricgan-plus-voicebank) | 3.15 | + | [`recipes/Voicebank/enhance/SEGAN/hparams/train.yaml`](recipes/Voicebank/enhance/SEGAN/hparams/train.yaml) | [here](https://www.dropbox.com/sh/ez0folswdbqiad4/AADDasepeoCkneyiczjCcvaOa?dl=0 ) | - | 2.38 | + | [`recipes/Voicebank/enhance/spectral_mask/hparams/train.yaml`](recipes/Voicebank/enhance/spectral_mask/hparams/train.yaml) | [here](https://www.dropbox.com/sh/n5q9vjn0yn1qvk6/AAB-S7i2-XzVm6ux0MrXCvqya?dl=0 ) | - | 2.65 | + + +## VoxCeleb Dataset + +### Speaker_recognition + +| Model | Checkpoints | HuggingFace | EER | +| --------| --------| --------| --------| + | [`recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn.yaml`](recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn.yaml) | [here](https://www.dropbox.com/sh/ab1ma1lnmskedo8/AADsmgOLPdEjSF6wV3KyhNG1a?dl=0) | [here](https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb) | 0.80% | + | [`recipes/VoxCeleb/SpeakerRec/hparams/train_x_vectors.yaml`](recipes/VoxCeleb/SpeakerRec/hparams/train_x_vectors.yaml) | [here](https://www.dropbox.com/sh/ab1ma1lnmskedo8/AADsmgOLPdEjSF6wV3KyhNG1a?dl=0) | [here](https://huggingface.co/speechbrain/spkrec-xvect-voxceleb) | 3.23% | + | [`recipes/VoxCeleb/SpeakerRec/hparams/train_resnet.yaml`](recipes/VoxCeleb/SpeakerRec/hparams/train_resnet.yaml) | [here](https://www.dropbox.com/sh/ab1ma1lnmskedo8/AADsmgOLPdEjSF6wV3KyhNG1a?dl=0) | [here](https://huggingface.co/speechbrain/spkrec-resnet-voxceleb) | 0.95% | + + +## VoxLingua107 Dataset + +### Language-id + +| Model | Checkpoints | HuggingFace | Accuracy | +| --------| --------| --------| --------| + | [`recipes/VoxLingua107/lang_id/hparams/train_ecapa.yaml`](recipes/VoxLingua107/lang_id/hparams/train_ecapa.yaml) | [here](https://www.dropbox.com/sh/72gpuic5m4x8ztz/AAB5R-RVIEsXJtRH8SGkb_oCa?dl=0 ) | [here](https://huggingface.co/speechbrain/lang-id-voxlingua107-ecapa) | 93.3% | + + +## VoxPopuli Dataset + +## WHAMandWHAMR Dataset + +### Separation + +| Model | Checkpoints | HuggingFace | SI-SNR | +| --------| --------| --------| --------| + | [`recipes/WHAMandWHAMR/separation/hparams/sepformer-wham.yaml`](recipes/WHAMandWHAMR/separation/hparams/sepformer-wham.yaml) | [here](https://www.dropbox.com/sh/sfrgb3xivri432e/AACQodNmiDIKrB9vCeCFUDWUa?dl=0) | [here](https://huggingface.co/speechbrain/sepformer-whamr) | 16.5 | + | [`recipes/WHAMandWHAMR/separation/hparams/sepformer-whamr.yaml`](recipes/WHAMandWHAMR/separation/hparams/sepformer-whamr.yaml) | [here](https://www.dropbox.com/sh/1sia32z01xbfgvu/AADditsqaTyfN3N6tzfEFPica?dl=0) | [here](https://huggingface.co/speechbrain/sepformer-wham) | 14.0 | + + +### Enhancement + +| Model | Checkpoints | HuggingFace | SI-SNR | PESQ | +| --------| --------| --------| --------| --------| + | [`recipes/WHAMandWHAMR/enhancement/hparams/sepformer-wham.yaml`](recipes/WHAMandWHAMR/enhancement/hparams/sepformer-wham.yaml) | [here](https://www.dropbox.com/sh/pxz2xbj76ijd5ci/AAD3c3dHyszk4oHJaa26K1_ha?dl=0) | [here](https://huggingface.co/speechbrain/sepformer-wham-enhancement) | 14.4 | 3.05 | + | [`recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr.yaml`](recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr.yaml) | [here](https://www.dropbox.com/sh/kb0xrvi5k168ou2/AAAPB2U6HyyUT1gMoUH8gxQCa?dl=0) | [here](https://huggingface.co/speechbrain/sepformer-whamr-enhancement) | 10.6 | 2.84 | + + +## WSJ0Mix Dataset + +### Separation (2mix) + +| Model | Checkpoints | HuggingFace | SI-SNRi | +| --------| --------| --------| --------| + | [`recipes/WSJ0Mix/separation/hparams/convtasnet.yaml`](recipes/WSJ0Mix/separation/hparams/convtasnet.yaml) | [here](https://www.dropbox.com/sh/hdpxj47signsay7/AABbDjGoyQesnFxjg0APxl7qa?dl=0) | - | 14.8dB | + | [`recipes/WSJ0Mix/separation/hparams/dprnn.yaml`](recipes/WSJ0Mix/separation/hparams/dprnn.yaml) | [here](https://www.dropbox.com/sh/o8fohu5s07h4bnw/AADPNyR1E3Q4aRobg3FtXTwVa?dl=0) | - | 18.5dB | + | [`recipes/WSJ0Mix/separation/hparams/resepformer.yaml`](recipes/WSJ0Mix/separation/hparams/resepformer.yaml) | [here](https://www.dropbox.com/sh/obnu87zhubn1iia/AAAbn_jzqzIfeqaE9YQ7ujyQa?dl=0) | [here](https://huggingface.co/speechbrain/resepformer-wsj02mix) | 18.6dB | + | [`recipes/WSJ0Mix/separation/hparams/sepformer.yaml`](recipes/WSJ0Mix/separation/hparams/sepformer.yaml) | [here](https://www.dropbox.com/sh/9klsqadkhin6fw1/AADEqGdT98rcqxVgFlfki7Gva?dl=0 ) | [here](https://huggingface.co/speechbrain/sepformer-wsj02mix) | 22.4dB | + | [`recipes/WSJ0Mix/separation/hparams/skim.yaml`](recipes/WSJ0Mix/separation/hparams/skim.yaml) | [here](https://www.dropbox.com/sh/zy0l5rc8abxdfp3/AAA2ngB74fugqpWXmjZo5v3wa?dl=0) | [here](https://huggingface.co/speechbrain/resepformer-wsj02mix ) | 18.1dB | + + +## ZaionEmotionDataset Dataset + +### Emotion_Diarization + +| Model | Checkpoints | HuggingFace | EDER | +| --------| --------| --------| --------| + | [`recipes/ZaionEmotionDataset/emotion_diarization/hparams/train.yaml`](recipes/ZaionEmotionDataset/emotion_diarization/hparams/train.yaml) | [here](https://www.dropbox.com/sh/woudm1v31a7vyp5/AADAMxpQOXaxf8E_1hX202GJa?dl=0) | [here](https://huggingface.co/speechbrain/emotion-diarization-wavlm-large) | 30.2% | + + +## fluent-speech-commands Dataset + +### SLU + +| Model | Checkpoints | HuggingFace | Test-accuracy | +| --------| --------| --------| --------| + | [`recipes/fluent-speech-commands/direct/hparams/train.yaml`](recipes/fluent-speech-commands/direct/hparams/train.yaml) | [here](https://www.dropbox.com/sh/wal9ap0go9f66qw/AADBVlGs_E2pEU4vYJgEe3Fba?dl=0) | - | 99.60% | + + +## timers-and-such Dataset + +### SLU + +| Model | Checkpoints | HuggingFace | Accuracy-Test_real | +| --------| --------| --------| --------| + | [`recipes/timers-and-such/decoupled/hparams/train_TAS_LM.yaml`](recipes/timers-and-such/decoupled/hparams/train_TAS_LM.yaml) | [here](https://www.dropbox.com/sh/gmmum179ig9wz0x/AAAOSOi11yVymGXHp9LzYNrqa?dl=0) | - | 46.8% | + | [`recipes/timers-and-such/direct/hparams/train.yaml`](recipes/timers-and-such/direct/hparams/train.yaml) | [here](https://www.dropbox.com/sh/gmmum179ig9wz0x/AAAOSOi11yVymGXHp9LzYNrqa?dl=0) | [here](https://huggingface.co/speechbrain/slu-timers-and-such-direct-librispeech-asr) | 77.5% | + | [`recipes/timers-and-such/direct/hparams/train_with_wav2vec2.yaml`](recipes/timers-and-such/direct/hparams/train_with_wav2vec2.yaml) | [here](https://www.dropbox.com/sh/gmmum179ig9wz0x/AAAOSOi11yVymGXHp9LzYNrqa?dl=0) | - | 94.0% | + | [`recipes/timers-and-such/multistage/hparams/train_TAS_LM.yaml`](recipes/timers-and-such/multistage/hparams/train_TAS_LM.yaml) | [here](https://www.dropbox.com/sh/gmmum179ig9wz0x/AAAOSOi11yVymGXHp9LzYNrqa?dl=0) | - | 72.6% | + + diff --git a/README.md b/README.md index e13764fb53..9b636e267c 100644 --- a/README.md +++ b/README.md @@ -1,238 +1,289 @@

- SpeechBrain Logo + SpeechBrain Logo

-[![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/SpeechBrain1/) +[![Typing SVG](https://readme-typing-svg.demolab.com?font=Fira+Code&size=40&duration=7000&pause=1000&random=false&width=1200&height=100&lines=Simplify+Conversational+AI+Development)](https://git.io/typing-svg) -SpeechBrain is an **open-source** and **all-in-one** conversational AI toolkit based on PyTorch. - -The goal is to create a **single**, **flexible**, and **user-friendly** toolkit that can be used to easily develop **state-of-the-art speech technologies**, including systems for **speech recognition**, **speaker recognition**, **speech enhancement**, **speech separation**, **language identification**, **multi-microphone signal processing**, and many others. - -drawing **Please, star our project on github (see top-right corner) if you appreciate our contribution to the community!** - -*SpeechBrain is currently in beta*. - -| **[Discourse](https://speechbrain.discourse.group)** | **[Tutorials](https://speechbrain.github.io/tutorial_basics.html)** | **[Website](https://speechbrain.github.io/)** | **[Documentation](https://speechbrain.readthedocs.io/en/latest/index.html)** | **[Contributing](https://speechbrain.readthedocs.io/en/latest/contributing.html)** | **[HuggingFace](https://huggingface.co/speechbrain)** | - -# Key features - -SpeechBrain provides various useful tools to speed up and facilitate research on speech and language technologies: -- Various pretrained models nicely integrated with drawing (HuggingFace) in our official [organization account](https://huggingface.co/speechbrain). These models are coupled with easy-inference interfaces that facilitate their use. To help everyone replicate our results, we also provide all the experimental results and folders (including logs, training curves, etc.) in a shared Google Drive folder. -- The `Brain` class is a fully-customizable tool for managing training and evaluation loops over data. The annoying details of training loops are handled for you while retaining complete flexibility to override any part of the process when needed. -- A YAML-based hyperparameter file that specifies all the hyperparameters, from individual numbers (e.g., learning rate) to complete objects (e.g., custom models). This elegant solution dramatically simplifies the training script. -- Multi-GPU training and inference with PyTorch Data-Parallel or Distributed Data-Parallel. -- Mixed-precision for faster training. -- A transparent and entirely customizable data input and output pipeline. SpeechBrain follows the PyTorch data loading style and enables users to customize the I/O pipelines (e.g., adding on-the-fly downsampling, BPE tokenization, sorting, threshold ...). -- On-the-fly dynamic batching -- Efficient reading of large datasets from a shared Network File System (NFS) via [WebDataset](https://github.com/webdataset/webdataset). -- Interface with [HuggingFace](https://huggingface.co/speechbrain) for popular models such as wav2vec2 and Hubert. -- Interface with [Orion](https://github.com/Epistimio/orion) for hyperparameter tuning. - - -### Speech recognition - -SpeechBrain supports state-of-the-art methods for end-to-end speech recognition: -- Support of wav2vec 2.0 pretrained model with finetuning. -- State-of-the-art performance or comparable with other existing toolkits in several ASR benchmarks. -- Easily customizable neural language models, including RNNLM and TransformerLM. We also share several pre-trained models that you can easily use (more to come!). We support the Hugging Face `dataset` to facilitate the training over a large text dataset. -- Hybrid CTC/Attention end-to-end ASR: - - Many available encoders: CRDNN (VGG + {LSTM,GRU,LiGRU} + DNN), ResNet, SincNet, vanilla transformers, context net-based transformers or conformers. Thanks to the flexibility of SpeechBrain, any fully customized encoder could be connected to the CTC/attention decoder and trained in a few hours of work. The decoder is fully customizable: LSTM, GRU, LiGRU, transformer, or your neural network! - - Optimised and fast beam search on both CPUs and GPUs. -- Transducer end-to-end ASR with both a custom Numba loss and the torchaudio one. Any encoder or decoder can be plugged into the transducer ranging from VGG+RNN+DNN to conformers. -- Pre-trained ASR models for transcribing an audio file or extracting features for a downstream task. - -### Feature extraction and augmentation - -SpeechBrain provides efficient (GPU-friendly) speech augmentation and feature extraction pipelines: -- On-the-fly and fully-differentiable acoustic feature extraction: filter banks can be learned. This strategy simplifies the training pipeline (you don't have to dump features on disk). -- On-the-fly feature normalization (global, sentence, batch, or speaker level). -- On-the-fly environmental corruptions based on noise, reverberation, and babble for robust model training. -- On-the-fly frequency and time domain SpecAugment with speed augmentation. -- We support both SinConv and LEAF convolutional frontends. - -### Speech enhancement and separation -- Recipes for spectral masking, spectral mapping, and time-domain speech enhancement. -- Multiple sophisticated enhancement losses, including differentiable STOI loss, MetricGAN, and mimic loss. -- State-of-the-art performance on speech separation with Conv-TasNet, DualPath RNN, SepFormer, and RE-SepFormer. - -### Speaker recognition, identification and diarization -SpeechBrain provides different models for speaker recognition, identification, and diarization on different datasets: -- State-of-the-art performance on speaker recognition and diarization based on ECAPA-TDNN models. -- Original Xvectors implementation (inspired by Kaldi) with PLDA. -- Spectral clustering for speaker diarization (combined with speakers embeddings). -- Libraries to extract speaker embeddings with a pre-trained model on your data. - -### Text-to-Speech (TTS) and Vocoders -- Recipes for training TTS systems such as [Tacotron2](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LJSpeech) with LJSpeech. -- Recipes for training Vocoders such as [HiFIGAN](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LJSpeech). - -### Grapheme-to-Phoneme (G2P) -We have models for converting characters into a sequence of phonemes. In particular, we have Transformer- and RNN-based models operating at the sentence level (i.e, converting a full sentence into a corresponding sequence of phonemes). The models are trained with both data from Wikipedia and LibriSpeech. - -### Language Identification -SpeechBrain provides different models for language identification. -In particular, our best model is based on an ECAPA-TDNN trained with the [voxlingua107 dataset](http://bark.phon.ioc.ee/voxlingua107/). - -### Speech Translation -- Recipes for transformer and conformer-based end-to-end speech translation. -- Possibility to choose between normal training (Attention), multi-objectives (CTC+Attention), and multitasks (ST + ASR). - -### Self-Supervised Learning of Speech Representations -- Recipes for wav2vec 2.0 pre-training with multiple GPUs compatible with HuggingFace models. - -### Multi-microphone processing -Combining multiple microphones is a powerful approach to achieving robustness in adverse acoustic environments: -- Delay-and-sum, MVDR, and GeV beamforming. -- Speaker localization. - - - -### Performance -The recipes released with speechbrain implement speech processing systems with competitive or state-of-the-art performance. In the following, we report the best performance achieved on some popular benchmarks: - -| Dataset | Task | System | Performance | -| ------------- |:-------------:| -----:|-----:| -| LibriSpeech | Speech Recognition | wav2vec2 | WER=1.90% (test-clean) | -| LibriSpeech | Speech Recognition | CNN + Transformer | WER=2.26% (test-clean) | -| TIMIT | Speech Recognition | CRDNN + distillation | PER=13.1% (test) | -| TIMIT | Speech Recognition | wav2vec2 + CTC/Att. | PER=8.04% (test) | -| CommonVoice (English) | Speech Recognition | wav2vec2 + CTC | WER=15.69% (test) | -| CommonVoice (French) | Speech Recognition | wav2vec2 + CTC | WER=9.96% (test) | -| CommonVoice (Italian) | Speech Recognition | wav2vec2 + seq2seq | WER=9.86% (test) | -| CommonVoice (Kinyarwanda) | Speech Recognition | wav2vec2 + seq2seq | WER=18.91% (test) | -| AISHELL (Mandarin) | Speech Recognition | wav2vec2 + seq2seq | CER=5.58% (test) | -| Fisher-callhome (spanish) | Speech translation | conformer (ST + ASR) | BLEU=48.04 (test) | -| VoxCeleb2 | Speaker Verification | ECAPA-TDNN | EER=0.80% (vox1-test) | -| AMI | Speaker Diarization | ECAPA-TDNN | DER=3.01% (eval)| -| VoiceBank | Speech Enhancement | MetricGAN+| PESQ=3.08 (test)| -| WSJ2MIX | Speech Separation | SepFormer| SDRi=22.6 dB (test)| -| WSJ3MIX | Speech Separation | SepFormer| SDRi=20.0 dB (test)| -| WHAM! | Speech Separation | SepFormer| SDRi= 16.4 dB (test)| -| WHAMR! | Speech Separation | SepFormer| SDRi= 14.0 dB (test)| -| Libri2Mix | Speech Separation | SepFormer| SDRi= 20.6 dB (test-clean)| -| Libri3Mix | Speech Separation | SepFormer| SDRi= 18.7 dB (test-clean)| -| LibryParty | Voice Activity Detection | CRDNN | F-score=0.9477 (test) | -| IEMOCAP | Emotion Recognition | wav2vec | Accuracy=79.8% (test) | -| CommonLanguage | Language Recognition | ECAPA-TDNN | Accuracy=84.9% (test) | -| Timers and Such | Spoken Language Understanding | CRDNN | Intent Accuracy=89.2% (test) | -| SLURP | Spoken Language Understanding | CRDNN | Intent Accuracy=87.54% (test) | -| VoxLingua 107 | Identification | ECAPA-TDNN | Sentence Accuracy=93.3% (test) | - -For more details, take a look at the corresponding implementation in recipes/dataset/. - -### Pretrained Models - -Beyond providing recipes for training the models from scratch, SpeechBrain shares several pre-trained models (coupled with easy-inference functions) on [HuggingFace](https://huggingface.co/speechbrain). In the following, we report some of them: - -| Task | Dataset | Model | -| ------------- |:-------------:| -----:| -| Speech Recognition | LibriSpeech | [CNN + Transformer](https://huggingface.co/speechbrain/asr-transformer-transformerlm-librispeech) | -| Speech Recognition | LibriSpeech | [CRDNN](https://huggingface.co/speechbrain/asr-crdnn-transformerlm-librispeech) | -| Speech Recognition | CommonVoice(English) | [wav2vec + CTC](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-en) | -| Speech Recognition | CommonVoice(French) | [wav2vec + CTC](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-fr) | -| Speech Recognition | CommonVoice(Italian) | [wav2vec + CTC](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-it) | -| Speech Recognition | CommonVoice(Kinyarwanda) | [wav2vec + CTC](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-rw) | -| Speech Recognition | AISHELL(Mandarin) | [wav2vec + CTC](https://huggingface.co/speechbrain/asr-wav2vec2-transformer-aishell) | -| Text-to-Speech | LJSpeech | [Tacotron2](https://huggingface.co/speechbrain/tts-tacotron2-ljspeech) | -| Speaker Recognition | Voxceleb | [ECAPA-TDNN](https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb) | -| Speech Separation | WHAMR! | [SepFormer](https://huggingface.co/speechbrain/sepformer-whamr) | -| Speech Enhancement | Voicebank | [MetricGAN+](https://huggingface.co/speechbrain/metricgan-plus-voicebank) | -| Speech Enhancement | WHAMR! | [SepFormer](https://huggingface.co/speechbrain/sepformer-whamr-enhancement) | -| Spoken Language Understanding | Timers and Such | [CRDNN](https://huggingface.co/speechbrain/slu-timers-and-such-direct-librispeech-asr) | -| Language Identification | CommonLanguage | [ECAPA-TDNN](https://huggingface.co/speechbrain/lang-id-commonlanguage_ecapa) | - -The full list of pre-trained models can be found on [HuggingFace](https://huggingface.co/speechbrain) - -### Documentation & Tutorials -SpeechBrain is designed to speed up the research and development of speech technologies. Hence, our code is backed-up with different levels of documentation: -- **Educational-level:** we provide various Google Colab (i.e., interactive) tutorials describing all the building blocks of SpeechBrain ranging from the core of the toolkit to a specific model designed for a particular task. The tutorials are designed not only to help people familiarize themselves with SpeechBrain but, more in general, to help them familiarize themselves with speech and language technologies. -- **Functional-level:** all classes in SpeechBrain contains a detailed docstring. It describes the input and output formats, the different arguments, the usage of the function, the potentially associated bibliography, and a function example used for test integration during pull requests. -- **Low-level:** The code also uses a lot of in-line comments to describe nontrivial parts of the code. - -### Under development -We are currently implementing speech synthesis pipelines and real-time speech processing pipelines. An interface with the Finite State Transducers (FST) implemented by the [Kaldi 2 team](https://github.com/k2-fsa/k2) is under development. - -# Conference Tutorials -SpeechBrain has been presented at Interspeech 2021 and 2022 as well as ASRU 2021. When possible, we will provide some ressources here: -- [Interspeech 2022 slides.](https://drive.google.com/drive/folders/1d6GAquxw6rZBI-7JvfUQ_-upeiKstJEo?usp=sharing) -- [Interspeech 2021 YouTube recordings.](https://www.youtube.com/results?search_query=Interspeech+speechbrain+) - -# Quick installation -SpeechBrain is constantly evolving. New features, tutorials, and documentation will appear over time. -SpeechBrain can be installed via PyPI. Moreover, a local installation can be used by those users that what to run experiments and modify/customize the toolkit. SpeechBrain supports both CPU and GPU computations. For most all the recipes, however, a GPU is necessary during training. Please note that CUDA must be properly installed to use GPUs. - - -## Install via PyPI - -Once you have created your Python environment (Python 3.7+) you can simply type: -``` -pip install speechbrain -``` +| 📘 [Tutorials](https://speechbrain.readthedocs.io) | 🌐 [Website](https://speechbrain.github.io/) | 📚 [Documentation](https://speechbrain.readthedocs.io/en/latest/index.html) | 🤝 [Contributing](https://speechbrain.readthedocs.io/en/latest/contributing.html) | 🤗 [HuggingFace](https://huggingface.co/speechbrain) | ▶️ [YouTube](https://www.youtube.com/@SpeechBrainProject) | 🐦 [X](https://twitter.com/SpeechBrain1) | -Then you can access SpeechBrain with: +![GitHub Repo stars](https://img.shields.io/github/stars/speechbrain/speechbrain?style=social) *Please, help our community project. Star on GitHub!* -``` -import speechbrain as sb -``` +**Exciting News (January, 2024):** Discover what is new in SpeechBrain 1.0 [here](https://colab.research.google.com/drive/1IEPfKRuvJRSjoxu22GZhb3czfVHsAy0s?usp=sharing)! +# +# 🗣️💬 What SpeechBrain Offers -## Install with GitHub +- SpeechBrain is an **open-source** [PyTorch](https://pytorch.org/) toolkit that accelerates **Conversational AI** development, i.e., the technology behind *speech assistants*, *chatbots*, and *large language models*. -Once you have created your Python environment (Python 3.7+) you can simply type: +- It is crafted for fast and easy creation of advanced technologies for **Speech** and **Text** Processing. -``` -git clone https://github.com/speechbrain/speechbrain.git -cd speechbrain -pip install -r requirements.txt -pip install --editable . -``` -Then you can access SpeechBrain with: +## 🌐 Vision +- With the rise of [deep learning](https://www.deeplearningbook.org/), once-distant domains like speech processing and NLP are now very close. A well-designed neural network and large datasets are all you need. +- We think it is now time for a **holistic toolkit** that, mimicking the human brain, jointly supports diverse technologies for complex Conversational AI systems. + +- This spans *speech recognition*, *speaker recognition*, *speech enhancement*, *speech separation*, *language modeling*, *dialogue*, and beyond. + +- Aligned with our long-term goal of natural human-machine conversation, including for non-verbal individuals, we have recently added support for the [EEG modality](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB). + + + +## 📚 Training Recipes +- We share over 200 competitive training [recipes](recipes) on more than 40 datasets supporting 20 speech and text processing tasks (see below). + +- We support both training from scratch and fine-tuning pretrained models such as [Whisper](https://huggingface.co/openai/whisper-large), [Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2), [WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm), [Hubert](https://huggingface.co/docs/transformers/model_doc/hubert), [GPT2](https://huggingface.co/gpt2), [Llama2](https://huggingface.co/docs/transformers/model_doc/llama2), and beyond. The models on [HuggingFace](https://huggingface.co/) can be easily plugged in and fine-tuned. + +- For any task, you train the model using these commands: +```python +python train.py hparams/train.yaml ``` -import speechbrain as sb -``` -Any modification made to the `speechbrain` package will be automatically interpreted as we installed it with the `--editable` flag. +- The hyperparameters are encapsulated in a YAML file, while the training process is orchestrated through a Python script. + +- We maintained a consistent code structure across different tasks. + +- For better replicability, training logs and checkpoints are hosted on Dropbox. + +## drawing Pretrained Models and Inference -## Test Installation -Please, run the following script to make sure your installation is working: +- Access over 100 pretrained models hosted on [HuggingFace](https://huggingface.co/speechbrain). +- Each model comes with a user-friendly interface for seamless inference. For example, transcribing speech using a pretrained model requires just three lines of code: + +```python +from speechbrain.inference import EncoderDecoderASR + +asr_model = EncoderDecoderASR.from_hparams(source="speechbrain/asr-conformer-transformerlm-librispeech", savedir="pretrained_models/asr-transformer-transformerlm-librispeech") +asr_model.transcribe_file("speechbrain/asr-conformer-transformerlm-librispeech/example.wav") ``` + +## drawing Documentation +- We are deeply dedicated to promoting inclusivity and education. +- We have authored over 30 [tutorials](https://speechbrain.readthedocs.io) that not only describe how SpeechBrain works but also help users familiarize themselves with Conversational AI. +- Every class or function has clear explanations and examples that you can run. Check out the [documentation](https://speechbrain.readthedocs.io/en/latest/index.html) for more details 📚. + + + +## 🎯 Use Cases +- 🚀 **Research Acceleration**: Speeding up academic and industrial research. You can develop and integrate new models effortlessly, comparing their performance against our baselines. + +- ⚡️ **Rapid Prototyping**: Ideal for quick prototyping in time-sensitive projects. + +- 🎓 **Educational Tool**: SpeechBrain's simplicity makes it a valuable educational resource. It is used by institutions like [Mila](https://mila.quebec/en/), [Concordia University](https://www.concordia.ca/), [Avignon University](https://univ-avignon.fr/en/), and many others for student training. + +# +# 🚀 Quick Start + +To get started with SpeechBrain, follow these simple steps: + +## 🛠️ Installation + +### Install via PyPI + +1. Install SpeechBrain using PyPI: + + ```bash + pip install speechbrain + ``` + +2. Access SpeechBrain in your Python code: + + ```python + import speechbrain as sb + ``` + +### Install from GitHub +This installation is recommended for users who wish to conduct experiments and customize the toolkit according to their needs. + +1. Clone the GitHub repository and install the requirements: + + ```bash + git clone https://github.com/speechbrain/speechbrain.git + cd speechbrain + pip install -r requirements.txt + pip install --editable . + ``` + +2. Access SpeechBrain in your Python code: + + ```python + import speechbrain as sb + ``` + +Any modifications made to the `speechbrain` package will be automatically reflected, thanks to the `--editable` flag. + +## ✔️ Test Installation + +Ensure your installation is correct by running the following commands: + +```bash pytest tests pytest --doctest-modules speechbrain ``` -# Running an experiment -In SpeechBrain, you can run experiments in this way: +## 🏃‍♂️ Running an Experiment -``` -> cd recipes/// -> python experiment.py params.yaml +In SpeechBrain, you can train a model for any task using the following steps: + +```python +cd recipes/// +python experiment.py params.yaml ``` -The results will be saved in the `output_folder` specified in the yaml file. The folder is created by calling `sb.core.create_experiment_directory()` in `experiment.py`. Both detailed logs and experiment outputs are saved there. Furthermore, less verbose logs are output to stdout. +The results will be saved in the `output_folder` specified in the YAML file. -# SpeechBrain Roadmap +## 📘 Learning SpeechBrain -As a community-based and open-source project, SpeechBrain needs the help of its community to grow in the right direction. Opening the roadmap to our users enables the toolkit to benefit from new ideas, new research axes, or even new technologies. The roadmap, available on our [Discourse](https://speechbrain.discourse.group/t/speechbrain-a-community-roadmap/179) lists all the changes and updates that need to be done in the current version of SpeechBrain. Users are more than welcome to propose new items via new Discourse topics! +- **Website:** Explore general information on the [official website](https://speechbrain.github.io). -# Learning SpeechBrain +- **Tutorials:** Start with [basic tutorials](https://speechbrain.readthedocs.io/en/latest/tutorials/basics.html) covering fundamental functionalities. Find advanced tutorials and topics in the Tutorial notebooks category in the [SpeechBrain documentation](https://speechbrain.readthedocs.io). -We provide users with different resources to learn how to use SpeechBrain: -- General information can be found on the [website](https://speechbrain.github.io). -- We offer many tutorials, you can start from the [basic ones](https://speechbrain.github.io/tutorial_basics.html) about SpeechBrain's basic functionalities and building blocks. We provide also more advanced tutorials (e.g SpeechBrain advanced, signal processing ...). You can browse them via the Tutorials drop-down menu on [SpeechBrain website](https://speechbrain.github.io) in the upper right. -- Details on the SpeechBrain API, how to contribute, and the code are given in the [documentation](https://speechbrain.readthedocs.io/en/latest/index.html). +- **Documentation:** Detailed information on the SpeechBrain API, contribution guidelines, and code is available in the [documentation](https://speechbrain.readthedocs.io/en/latest/index.html). -# License -SpeechBrain is released under the Apache License, version 2.0. The Apache license is a popular BSD-like license. SpeechBrain can be redistributed for free, even for commercial purposes, although you can not take off the license headers (and under some circumstances, you may have to distribute a license document). Apache is not a viral license like the GPL, which forces you to release your modifications to the source code. Note that this project has no connection to the Apache Foundation, other than that we use the same license terms. +# +# 🔧 Supported Technologies +- SpeechBrain is a versatile framework designed for implementing a wide range of technologies within the field of Conversational AI. +- It excels not only in individual task implementations but also in combining various technologies into complex pipelines. -# Social Media -We constantly update the community using Twitter. [Feel free to follow us](https://twitter.com/speechbrain1) +## 🎙️ Speech/Audio Processing +| Tasks | Datasets | Technologies/Models | +| ------------- |-------------| -----| +| Speech Recognition | [AISHELL-1](recipes/AISHELL-1), [CommonVoice](recipes/CommonVoice), [DVoice](recipes/DVoice), [LibriSpeech](recipes/LibriSpeech), [MEDIA](recipes/MEDIA), [RescueSpeech](recipes/RescueSpeech), [Switchboard](recipes/Switchboard), [TIMIT](recipes/TIMIT), [Tedlium2](recipes/Tedlium2), [Voicebank](recipes/Voicebank) | [CTC](https://www.cs.toronto.edu/~graves/icml_2006.pdf), [Transducers](https://arxiv.org/pdf/1211.3711.pdf?origin=publication_detail), [Transformers](https://arxiv.org/abs/1706.03762), [Seq2Seq](http://zhaoshuaijiang.com/file/Hybrid_CTC_Attention_Architecture_for_End-to-End_Speech_Recognition.pdf), [Beamsearch techniques for CTC](https://arxiv.org/pdf/1911.01629.pdf),[seq2seq](https://arxiv.org/abs/1904.02619.pdf),[transducers](https://www.merl.com/publications/docs/TR2017-190.pdf)), [Rescoring](https://arxiv.org/pdf/1612.02695.pdf), [Conformer](https://arxiv.org/abs/2005.08100), [Branchformer](https://arxiv.org/abs/2207.02971), [Hyperconformer](https://arxiv.org/abs/2305.18281), [Kaldi2-FST](https://github.com/k2-fsa/k2) | +| Speaker Recognition | [VoxCeleb](recipes/VoxCeleb) | [ECAPA-TDNN](https://arxiv.org/abs/2005.07143), [ResNET](https://arxiv.org/pdf/1910.12592.pdf), [Xvectors](https://www.danielpovey.com/files/2018_icassp_xvectors.pdf), [PLDA](https://ieeexplore.ieee.org/document/6639151), [Score Normalization](https://www.sciencedirect.com/science/article/abs/pii/S1051200499903603) | +| Speech Separation | [WSJ0Mix](recipes/WSJ0Mix), [LibriMix](recipes/LibriMix), [WHAM!](recipes/WHAMandWHAMR), [WHAMR!](recipes/WHAMandWHAMR), [Aishell1Mix](recipes/Aishell1Mix), [BinauralWSJ0Mix](recipes/BinauralWSJ0Mix) | [SepFormer](https://arxiv.org/abs/2010.13154), [RESepFormer](https://arxiv.org/abs/2206.09507), [SkiM](https://arxiv.org/abs/2201.10800), [DualPath RNN](https://arxiv.org/abs/1910.06379), [ConvTasNET](https://arxiv.org/abs/1809.07454) | +| Speech Enhancement | [DNS](recipes/DNS), [Voicebank](recipes/Voicebank) | [SepFormer](https://arxiv.org/abs/2010.13154), [MetricGAN](https://arxiv.org/abs/1905.04874), [MetricGAN-U](https://arxiv.org/abs/2110.05866), [SEGAN](https://arxiv.org/abs/1703.09452), [spectral masking](http://staff.ustc.edu.cn/~jundu/Publications/publications/Trans2015_Xu.pdf), [time masking](http://staff.ustc.edu.cn/~jundu/Publications/publications/Trans2015_Xu.pdf) | +| Interpretability | [ESC50](recipes/ESC50) | [Listenable Maps for Audio Classifiers (L-MAC)](https://arxiv.org/abs/2403.13086), [Learning-to-Interpret (L2I)](https://proceedings.neurips.cc/paper_files/paper/2022/file/e53280d73dd5389e820f4a6250365b0e-Paper-Conference.pdf), [Non-Negative Matrix Factorization (NMF)](https://proceedings.neurips.cc/paper_files/paper/2022/file/e53280d73dd5389e820f4a6250365b0e-Paper-Conference.pdf), [PIQ](https://arxiv.org/abs/2303.12659) | +| Speech Generation | [AudioMNIST](recipes/AudioMNIST) | [Diffusion](https://arxiv.org/abs/2006.11239), [Latent Diffusion](https://arxiv.org/abs/2112.10752) | +| Text-to-Speech | [LJSpeech](recipes/LJSpeech), [LibriTTS](recipes/LibriTTS) | [Tacotron2](https://arxiv.org/abs/1712.05884), [Zero-Shot Multi-Speaker Tacotron2](https://arxiv.org/abs/2112.02418), [FastSpeech2](https://arxiv.org/abs/2006.04558) | +| Vocoding | [LJSpeech](recipes/LJSpeech), [LibriTTS](recipes/LibriTTS) | [HiFiGAN](https://arxiv.org/abs/2010.05646), [DiffWave](https://arxiv.org/abs/2009.09761) +| Spoken Language Understanding | [MEDIA](recipes/MEDIA), [SLURP](recipes/SLURP), [Fluent Speech Commands](recipes/fluent-speech-commands), [Timers-and-Such](recipes/timers-and-such) | [Direct SLU](https://arxiv.org/abs/2104.01604), [Decoupled SLU](https://arxiv.org/abs/2104.01604), [Multistage SLU](https://arxiv.org/abs/2104.01604) | +| Speech-to-Speech Translation | [CVSS](recipes/CVSS) | [Discrete Hubert](https://arxiv.org/pdf/2106.07447.pdf), [HiFiGAN](https://arxiv.org/abs/2010.05646), [wav2vec2](https://arxiv.org/abs/2006.11477) | +| Speech Translation | [Fisher CallHome (Spanish)](recipes/Fisher-Callhome-Spanish), [IWSLT22(lowresource)](recipes/IWSLT22_lowresource) | [wav2vec2](https://arxiv.org/abs/2006.11477) | +| Emotion Classification | [IEMOCAP](recipes/IEMOCAP), [ZaionEmotionDataset](recipes/ZaionEmotionDataset) | [ECAPA-TDNN](https://arxiv.org/abs/2005.07143), [wav2vec2](https://arxiv.org/abs/2006.11477), [Emotion Diarization](https://arxiv.org/abs/2306.12991) | +| Language Identification | [VoxLingua107](recipes/VoxLingua107), [CommonLanguage](recipes/CommonLanguage)| [ECAPA-TDNN](https://arxiv.org/abs/2005.07143) | +| Voice Activity Detection | [LibriParty](recipes/LibriParty) | [CRDNN](https://arxiv.org/abs/2106.04624) | +| Sound Classification | [ESC50](recipes/ESC50), [UrbanSound](recipes/UrbanSound8k) | [CNN14](https://github.com/ranchlai/sound_classification), [ECAPA-TDNN](https://arxiv.org/abs/2005.07143) | +| Self-Supervised Learning | [CommonVoice](recipes/CommonVoice), [LibriSpeech](recipes/LibriSpeech) | [wav2vec2](https://arxiv.org/abs/2006.11477) | +| Metric Learning | [REAL-M](recipes/REAL-M/sisnr-estimation), [Voicebank](recipes/Voicebank) | [Blind SNR-Estimation](https://arxiv.org/abs/2002.08909), [PESQ Learning](https://arxiv.org/abs/2110.05866) | +| Alignment | [TIMIT](recipes/TIMIT) | [CTC](https://www.cs.toronto.edu/~graves/icml_2006.pdf), [Viterbi](https://www.cs.cmu.edu/~cga/behavior/rabiner1.pdf), [Forward Forward](https://www.cs.cmu.edu/~cga/behavior/rabiner1.pdf) | +| Diarization | [AMI](recipes/AMI) | [ECAPA-TDNN](https://arxiv.org/abs/2005.07143), [X-vectors](https://www.danielpovey.com/files/2018_icassp_xvectors.pdf), [Spectral Clustering](https://web.archive.org/web/20240305184559/http://www.ifp.illinois.edu/~hning2/papers/Ning_spectral.pdf) | -# Citing SpeechBrain -Please, cite SpeechBrain if you use it for your research or business. +## 📝 Text Processing +| Tasks | Datasets | Technologies/Models | +| ------------- |-------------| -----| +| Language Modeling | [CommonVoice](recipes/CommonVoice), [LibriSpeech](recipes/LibriSpeech)| [n-grams](https://web.stanford.edu/~jurafsky/slp3/3.pdf), [RNNLM](https://www.fit.vutbr.cz/research/groups/speech/publi/2010/mikolov_interspeech2010_IS100722.pdf), [TransformerLM](https://arxiv.org/abs/1706.03762) | +| Response Generation | [MultiWOZ](recipes/MultiWOZ/response_generation)| [GPT2](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf), [Llama2](https://arxiv.org/abs/2307.09288) | +| Grapheme-to-Phoneme | [LibriSpeech](recipes/LibriSpeech) | [RNN](https://arxiv.org/abs/2207.13703), [Transformer](https://arxiv.org/abs/2207.13703), [Curriculum Learning](https://arxiv.org/abs/2207.13703), [Homograph loss](https://arxiv.org/abs/2207.13703) | + +## 🧠 EEG Processing +| Tasks | Datasets | Technologies/Models | +| ------------- |-------------| -----| +| Motor Imagery | [BNCI2014001](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/MotorImagery/BNCI2014001), [BNCI2014004](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/MotorImagery/BNCI2014004), [BNCI2015001](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/MotorImagery/BNCI2015001), [Lee2019_MI](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/MotorImagery/Lee2019_MI), [Zhou201](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/MotorImagery/Zhou2016) | [EEGNet](https://github.com/speechbrain/benchmarks/blob/main/benchmarks/MOABB/models/EEGNet.py), [ShallowConvNet](https://github.com/speechbrain/benchmarks/blob/main/benchmarks/MOABB/models/ShallowConvNet.py), [EEGConformer](https://github.com/speechbrain/benchmarks/blob/main/benchmarks/MOABB/models/EEGConformer.py) | +| P300 | [BNCI2014009](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/P300/BNCI2014009), [EPFLP300](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/P300/EPFLP300), [bi2015a](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/P300/bi2015a), | [EEGNet](https://github.com/speechbrain/benchmarks/blob/main/benchmarks/MOABB/models/EEGNet.py) | +| SSVEP | [Lee2019_SSVEP](https://github.com/speechbrain/benchmarks/tree/main/benchmarks/MOABB/hparams/SSVEP/Lee2019_SSVEP) | [EEGNet](https://github.com/speechbrain/benchmarks/blob/main/benchmarks/MOABB/models/EEGNet.py) | + + + + +## 🔍 Additional Features + +SpeechBrain includes a range of native functionalities that enhance the development of Conversational AI technologies. Here are some examples: + +- **Training Orchestration:** The `Brain` class serves as a fully customizable tool for managing training and evaluation loops over data. It simplifies training loops while providing the flexibility to override any part of the process. + +- **Hyperparameter Management:** A YAML-based hyperparameter file specifies all hyperparameters, from individual numbers (e.g., learning rate) to complete objects (e.g., custom models). This elegant solution drastically simplifies the training script. + +- **Dynamic Dataloader:** Enables flexible and efficient data reading. + +- **GPU Training:** Supports single and multi-GPU training, including distributed training. + +- **Dynamic Batching:** On-the-fly dynamic batching enhances the efficient processing of variable-length signals. + +- **Mixed-Precision Training:** Accelerates training through mixed-precision techniques. + +- **Efficient Data Reading:** Reads large datasets efficiently from a shared Network File System (NFS) via [WebDataset](https://github.com/webdataset/webdataset). + +- **Hugging Face Integration:** Interfaces seamlessly with [HuggingFace](https://huggingface.co/speechbrain) for popular models such as wav2vec2 and Hubert. + +- **Orion Integration:** Interfaces with [Orion](https://github.com/Epistimio/orion) for hyperparameter tuning. + +- **Speech Augmentation Techniques:** Includes SpecAugment, Noise, Reverberation, and more. + +- **Data Preparation Scripts:** Includes scripts for preparing data for supported datasets. + +SpeechBrain is rapidly evolving, with ongoing efforts to support a growing array of technologies in the future. + + +## 📊 Performance + +- SpeechBrain integrates a variety of technologies, including those that achieves competitive or state-of-the-art performance. + +- For a comprehensive overview of the achieved performance across different tasks, datasets, and technologies, please visit [here](PERFORMANCE.md). + +# +# 📜 License + +- SpeechBrain is released under the [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0), a popular BSD-like license. +- You are free to redistribute SpeechBrain for both free and commercial purposes, with the condition of retaining license headers. Unlike the GPL, the Apache License is not viral, meaning you are not obligated to release modifications to the source code. + +# +# 🔮Future Plans + +We have ambitious plans for the future, with a focus on the following priorities: + +- **Scale Up:** We aim to provide comprehensive recipes and technologies for training massive models on extensive datasets. + +- **Scale Down:** While scaling up delivers unprecedented performance, we recognize the challenges of deploying large models in production scenarios. We are focusing on real-time, streamable, and small-footprint Conversational AI. + +- **Multimodal Large Language Models**: We envision a future where a single foundation model can handle a wide range of text, speech, and audio tasks. Our core team is focused on enabling the training of advanced multimodal LLMs. + +# +# 🤝 Contributing + +- SpeechBrain is a community-driven project, led by a core team with the support of numerous international collaborators. +- We welcome contributions and ideas from the community. For more information, check [here](https://speechbrain.github.io/contributing.html). + +# +# 🙏 Sponsors + +- SpeechBrain is an academically driven project and relies on the passion and enthusiasm of its contributors. +- As we cannot rely on the resources of a large company, we deeply appreciate any form of support, including donations or collaboration with the core team. +- If you're interested in sponsoring SpeechBrain, please reach out to us at speechbrainproject@gmail.com. +- A heartfelt thank you to all our sponsors, including the current ones: + + + +[Image 1](https://speechbrain.github.io/img/hf.ico)     +[Image 3](https://viadialog.com/en/)     +[Image 4](https://europe.naverlabs.com/) + +

+ +[Image 5](https://www.ovhcloud.com/en-ca/)     +[Image 2](https://usa.baidu.com/)     +[Image 6](https://research.samsung.com/aicenter_cambridge) + +

+ +[Image 7](https://mila.quebec/en/)     +[Image 9](https://www.concordia.ca/)     +[Image 8](https://lia.univ-avignon.fr/)     +# +# 📖 Citing SpeechBrain + +If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry: ```bibtex +@article{speechbrain_v1, + author = {Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Ha Nguyen and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Ga{{\"e}}lle Laperri{{\`e}}re and Mickael Rouvier and Renato De Mori and Yannick Est{{\`e}}ve}, + title = {Open-Source Conversational AI with SpeechBrain 1.0}, + journal = {Journal of Machine Learning Research}, + year = {2024}, + volume = {25}, + number = {333}, + url = {http://jmlr.org/papers/v25/24-0991.html} +} + @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..f128bffe79 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,10 @@ +# Security Policy + +## Supported Versions + +Since SpeechBrain is a beta release research-oriented toolkit, it aims to support the latest major version (at x.y level, e.g. 0.5 until 0.6 is released) with security updates, but unfortunately cannot promise long-term security updates for old versions. + +## Reporting a Vulnerability + +Vulnerabilities may be reported confidentially to speechbrainproject@gmail.com + diff --git a/conftest.py b/conftest.py index 74b6f501ab..2de1abca7e 100644 --- a/conftest.py +++ b/conftest.py @@ -10,20 +10,14 @@ def pytest_generate_tests(metafunc): metafunc.parametrize("device", [option_value]) -collect_ignore = ["setup.py"] -try: - import numba # noqa: F401 -except ModuleNotFoundError: - collect_ignore.append("speechbrain/nnet/loss/transducer_loss.py") -try: - import fairseq # noqa: F401 -except ModuleNotFoundError: - collect_ignore.append("speechbrain/lobes/models/fairseq_wav2vec.py") -try: - from transformers import Wav2Vec2Model # noqa: F401 -except ModuleNotFoundError: - collect_ignore.append("speechbrain/lobes/models/huggingface_wav2vec.py") -try: - import sacrebleu # noqa: F401 -except ModuleNotFoundError: - collect_ignore.append("speechbrain/utils/bleu.py") +collect_ignore = [ + "speechbrain/integrations/", + # These can be removed once the modules are fully deprecated + "speechbrain/utils/bleu.py", + "speechbrain/utils/kmeans.py", + "speechbrain/processing/diarization.py", + "speechbrain/decoders/language_model.py", + "speechbrain/alignment/ctc_segmentation.py", + "speechbrain/lobes/models/fairseq_wav2vec.py", + "speechbrain/lobes/models/kmeans.py", +] diff --git a/docs/README.md b/docs/README.md index cea1048f6a..7119baa82f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -20,10 +20,34 @@ docstrings. Sphinx natively supports reStructuredText directives. Automatically generating documentation based on docstrings is not the core of Sphinx. For this, after much searching, we use better-apidoc. -It seems better-apidoc doesn't use autodoc\_mock\_imports so we currently just -add all extra dependencies to docs-requirements.txt +## Tutorial integration -## Future work +Tutorials are now inside of the main SpeechBrain repository. -Besides automatic API documentation, Sphinx will facilitate manual prose -documentation. +### Contributor guidelines for tutorials + +The `docs/tutorials` directory exclusively contains tutorials in Jupyter Notebook format. These tutorials are integrated into the doc semi-automatically. You should ensure that the following steps are respected so that they render correctly and so that we can keep consistent quality. + +#### Relatively important notices + +- Create your new notebook, preferably with the same structure as existing tutorials. +- Keep the file size low! Limit images and audio. + - Ideally it should be a few hundred KiB total, avoid anything larger than 1MiB unless you really have to. + - It's OK if the user has to run the notebook to get some of the heavier outputs. +- Preferably use Jupyter Notebook for final editing of your notebook. + - Jupyter Notebook tends to have somewhat sane `.ipynb` output. This avoids Git diffs from being excessively large. +- **Images can be put in the `docs/tutorials/assets` directory,** rather than embedded as base64. You can then refer to them in Markdown like `![alt text](../assets/myimage.png)`. These will work correctly when imported on Colab. + - Pick descriptive names. + +#### Integration in documentation + +- Add your notebook to the relevant category `.rst`, paying attention to keep the same structure and appearance as existing tutorials. + - (Create a category if _really_ necessary, but this bloats the table of contents/sidebar.) +- **The Colab header/citation footer** are generated automatically and should not be manually inserted or edited. See `tools/tutorial-cell-update.py`. You should run this for your notebook. +- Add your notebook to the hidden `toctree` of the same document. +- Make sure that your headings are consistent! + - Please use a single top-level heading for the title of your notebook. + - That title should match the name in the summary. + - Please use level-2 or deeper headings for everything else (`##`, `###`, etc. in markdown). Notebook headings **are** used as part of the document tree! +- Make sure that your tutorial renders at least correctly with the in-documentation view. + - You can check this by either generating docs normally, or use the readthedocs PR integration that lets you preview docs for your PR (assuming it succeeds). This takes time, though! You preferably really should have a functional documentation environment when contributing. \ No newline at end of file diff --git a/docs/audioloading.rst b/docs/audioloading.rst new file mode 100644 index 0000000000..def3a73c89 --- /dev/null +++ b/docs/audioloading.rst @@ -0,0 +1,75 @@ +============================= +Audio loading troubleshooting +============================= + +This page is intended to document how to install audio backends and +provides troubleshooting steps for your audio loading troubles. + +Introduction +============ + +SpeechBrain now uses `soundfile `_ as the +sole supported audio I/O backend through the :mod:`speechbrain.dataio.audio_io` module. + +The soundfile backend supports most common audio formats including: +``wav``, ``flac``, and ``mp3``. For advanced format support or issues, +please refer to the sections below. + +.. note:: + **Legacy torchaudio backends**: SpeechBrain previously used torchaudio for + audio I/O, which supported three backends: ``ffmpeg``, ``sox`` and ``soundfile``. + However, torchaudio 2.9 deprecated all audio I/O support so SpeechBrain + now relies on ``soundfile`` directly for audio I/O. + +Recommended install steps +========================= + +The pip package `soundfile` is a dependency of SpeechBrain and should be automatically +installed when you install SpeechBrain. +Starting with SoundFile 0.12.0, the pip package bundles a prebuilt ``libsndfile`` +for most platforms (Windows, macOS, Linux), so it typically works out of the box +when installed via pip. + +If you encounter issues with audio loading: + +- **Update soundfile**: Try running ``pip install --upgrade soundfile`` to get + the latest version with updated ``libsndfile`` binaries. + +- **On Linux with superuser rights**: Install ``libsndfile`` through your + distribution's package manager (e.g., ``sudo apt install libsndfile1`` on + Ubuntu/Debian). + +- **For advanced codec support**: If you need to work with formats not supported + by soundfile (e.g., AAC/M4A), you may need to convert your audio files + to a supported format like WAV or FLAC using external tools such as ``ffmpeg``. + +- **Check installation**: You can verify soundfile is working by running: + + .. code-block:: python + + import soundfile as sf + print(sf.__version__) + print(sf.available_formats()) + +SpeechBrain Audio I/O API +========================== + +SpeechBrain provides its own audio I/O interface through the +:mod:`speechbrain.dataio.audio_io` module. Usage example: + +.. code-block:: python + + from speechbrain.dataio import audio_io + + # Load audio file + audio, sample_rate = audio_io.load("path/to/audio.wav") + + # Get audio metadata + info = audio_io.info("path/to/audio.wav") + print(info.sample_rate, info.duration, info.channels) + + # Save audio file + audio_io.save("output.wav", audio, sample_rate) + +This API is compatible with the previous torchaudio-based interface, making +migration straightforward. diff --git a/docs/codereview.md b/docs/codereview.md new file mode 100644 index 0000000000..5eab437803 --- /dev/null +++ b/docs/codereview.md @@ -0,0 +1,24 @@ +## Reviewing code + +This is not a comprehensive code review guide, but some rough guidelines to unify the general review practices across this project. + +Firstly, let the review take some time. Try to read every line that was added, +if possible. Try also to run some tests. Read the surrounding context of the code if needed to understand +the changes introduced. Possibly ask for clarifications if you don't understand. +If the pull request changes are hard to understand, maybe that's a sign that +the code is not clear enough yet. However, don't nitpick every detail. + +Secondly, focus on the major things first, and only then move on to smaller, +things. Level of importance: +- Immediate deal breakers (code does the wrong thing, or feature shouldn't be added etc.) +- Things to fix before merging (Add more documentation, reduce complexity, etc.) +- More subjective things could be changed if the author also agrees with you. + +Thirdly, approve the pull request only once you believe the changes "improve overall code health" as attested to [here](https://google.github.io/eng-practices/review/reviewer/standard.html). +However, this also means the pull request does not have to be perfect. Some features are best implemented incrementally over many pull requests, and you should be more concerned with making sure that the changes introduced lend themselves to painless further improvements. + +Fourthly, use the tools that GitHub has: comment on specific code lines, suggest edits, and once everyone involved has agreed that the PR is ready to merge, merge the request and delete the feature branch. + +Fifthly, the code review is a place for professional constructive criticism, +a nice strategy to show (and validate) that you understand what the PR is really +doing is to provide some affirmative comments on its strengths. \ No newline at end of file diff --git a/docs/compilation.md b/docs/compilation.md new file mode 100644 index 0000000000..12aa7b2788 --- /dev/null +++ b/docs/compilation.md @@ -0,0 +1,53 @@ +# Compilation + +Compilation of your models in SpeechBrain can potentially improve their speed and reduce memory demand. SpeechBrain inherits the compilation methods supported by PyTorch, including the just-in-time compiler (JIT) and the `torch.compile` method introduced in PyTorch version >=2.0. + +## Compile with `torch.compile` +The `torch.compile` feature was introduced with PyTorch version >=2.0 to gradually replace JIT. Although this feature is valuable, it is still in the beta phase, and improvements are ongoing. Please have a look at the [PyTorch documentation](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) for more information. + +### How to use `torch.compile` +Compiling all modules in SpeechBrain is straightforward. You can enable compilation by using the `--compile` flag in the command line when running a training recipe. For example: + +```bash +python train.py train.yaml --data_folder=your/data/folder --compile +``` + +This will automatically compile all the modules declared in the YAML file under the `modules` section. + +Note that you might need to configure additional compilation flags correctly (e.g., `--compile_mode`, `--compile_using_fullgraph`, `--compile_using_dynamic_shape_tracing`) to ensure successful model compilation or achieve the best performance. For a deeper understanding of their roles, refer to the documentation in the [PyTorch documentation](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). + +In some cases, you may want to compile only specific modules. To achieve this, add a list of the module keys you want to compile in the YAML file using `compile_module_keys`. For instance: + +```yaml +compile_module_keys: [encoder, decoder] +``` + +This will compile only the encoder and decoder models, which should be declared in the YAML file before using the respective keys. + +Remember to call the training script with the `--compile` flag. + +**Note of caution**: Compiling a model can be a complex process and may take some time. Additionally, it may fail in certain cases. The speed-up achieved through compilation is highly dependent on the system and GPU being used. For example, higher-end GPUs like the A100 tend to yield better speed-ups, while you may not observe significant improvements with V100 GPUs. We support this feature with the hope that `torch.compile` will constantly improve over time. + +## Compile with JIT +JIT was the first compilation method supported by PyTorch. It is important to note that JIT is expected to be replaced soon by `torch.compile`. Please have a look at the [PyTorch documentation](https://pytorch.org/docs/stable/jit.html) for more information. + +### How to use JIT +To compile all modules in SpeechBrain using JIT, use the `--jit` flag in the command line when running a training recipe: + +```bash +python train.py train.yaml --data_folder=your/data/folder --jit +``` + +This will automatically compile all the modules declared in the YAML file under the `modules` section. + +If you only want to compile specific modules, add a list of the module keys you want to compile in the YAML file using `jit_module_keys`. For example: + +```yaml +jit_module_keys: [encoder, decoder] +``` +This will compile only the encoder and decoder models, provided they are declared in the YAML file using the specified keys. + +Remember to call the training script with the `--jit` flag. + +**Note of caution**: JIT has specific requirements for supported syntax, and many popular Python syntaxes are not supported. Therefore, when designing a model with JIT in mind, ensure that it meets the necessary syntax requirements for successful compilation. Additionally, the speed-up achieved through JIT compilation varies depending on the model type. We found it most beneficial for custom RNNs, such as the Li-GRU used in SpeechBrain's TIMIT/ASR/CTC. Custom RNNs often require "for loops," which can be slow in Python. The compilation with JIT provides a significant speed-up in such cases. + diff --git a/docs/conf.py b/docs/conf.py index 4b7e2d0699..778d9a5007 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,7 +12,10 @@ # import os import sys + +import better_apidoc import hyperpyyaml +from sphinx.ext.autodoc.mock import mock sys.path.insert(-1, os.path.abspath("../")) @@ -39,7 +42,13 @@ "sphinx.ext.viewcode", "sphinx.ext.autosummary", "sphinx.ext.napoleon", + "sphinx_copybutton", + "sphinx_design", + "sphinx_markdown_tables", "recommonmark", + # chose myst-nb over nbsphinx is annoying because of the pandoc dependency + # of the latter, which needs to be installed system-wide or through conda + "myst_nb", ] @@ -59,16 +68,41 @@ # Intersphinx mapping: intersphinx_mapping = { "python": ("https://docs.python.org/", None), - "numpy": ("http://docs.scipy.org/doc/numpy/", None), + "numpy": ("https://numpy.org/doc/stable/", None), "torch": ("https://pytorch.org/docs/master/", None), + "torchaudio": ("https://pytorch.org/audio/stable/", None), } +# Myst-NB documentation + +jupyter_execute_notebooks = "off" + +myst_enable_extensions = [ + "amsmath", + "colon_fence", + "deflist", + "dollarmath", + "html_image", +] + # AUTODOC: autodoc_default_options = {} -# Autodoc mock extra dependencies: -autodoc_mock_imports = [] +# Autodoc mock extra dependencies -- doesn't work out of the box, because of better_apidoc. +# +# So, let's reuse the autodoc mock... +# +# We would also like to mock more imports than this but this is shockingly prone +# to randomly breaking, so let's keep a small-ish set of dependencies that tend +# to be more annoying to install and to nuke our CI on update +autodoc_mock_imports = [ + "k2", + "flair", + "fairseq", + "spacy", + "ctc_segmentation", +] # Order of API items: autodoc_member_order = "bysource" @@ -83,43 +117,52 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ["_apidoc_templates"] +exclude_patterns = ["_apidoc_templates", "build"] + +# Make backticks behave as inline code blocks rather than italics +default_role = "code" # -- Better apidoc ----------------------------------------------------------- def run_apidoc(app): - """Generage API documentation""" - import better_apidoc - - better_apidoc.APP = app - - better_apidoc.main( - [ - "better-apidoc", - "-t", - "_apidoc_templates", - "--force", - "--no-toc", - "--separate", - "-o", - "API", - os.path.dirname(hyperpyyaml.__file__), - ] - ) - better_apidoc.main( - [ - "better-apidoc", - "-t", - "_apidoc_templates", - "--force", - "--no-toc", - "--separate", - "-o", - "API", - os.path.join("../", "speechbrain"), - ] - ) + """Generate API documentation""" + + with mock(autodoc_mock_imports): + try: + better_apidoc.APP = app + better_apidoc.main( + [ + "better-apidoc", + "-t", + "_apidoc_templates", + "--force", + "--no-toc", + "--separate", + "-o", + "API", + os.path.join("../", "speechbrain"), + ] + ) + better_apidoc.main( + [ + "better-apidoc", + "-t", + "_apidoc_templates", + "--force", + "--no-toc", + "--separate", + "-o", + "API", + os.path.dirname(hyperpyyaml.__file__), + ] + ) + except Exception: + # because otherwise sphinx very helpfully eats the backtrace + import traceback + + print(traceback.format_exc(), file=sys.stderr) + raise # -- Options for HTML output ------------------------------------------------- @@ -131,6 +174,7 @@ def run_apidoc(app): # See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html # for rtd theme options html_theme_options = { + "logo_only": True, # Toc options "collapse_navigation": False, "sticky_navigation": True, @@ -138,6 +182,8 @@ def run_apidoc(app): "includehidden": True, } +html_logo = "images/speechbrain-logo.svg" + # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/docs/contributing.md b/docs/contributing.md index fd4dac64f5..b568a4d777 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -1,23 +1,19 @@ # Contributing -The goal is to write a set of libraries that process audio and speech in several ways. It is crucial to write a set of homogeneous libraries that are all compliant with the guidelines described in the following sub-sections. - ## Zen of Speechbrain -SpeechBrain could be used for *research*, *academic*, *commercial*, *non-commercial* purposes. Ideally, the code should have the following features: - -- **Simple:** the code must be easy to understand even by students or by users that are not professional programmers or speech researchers. Try to design your code such that it can be easily read. Given alternatives with the same level of performance, code the simplest one. (the most explicit and straightforward manner is preferred) +SpeechBrain is used for *research*, *academic*, *commercial*, *non-commercial* purposes, thus the code should be: -- **Readable:** SpeechBrain mostly adopts the code style conventions in PEP8. The code written by the users must be compliant with that. We test code style with `flake8` +- **Simple:** Straightforward and easy to understand even by students, academics and non-professional programmers. Complex code, when it _must_ exist, should be especially well explained. -- **Efficient**: The code should be as efficient as possible. When possible, users should maximize the use of pytorch native operations. Remember that in generally very convenient to process in parallel multiple signals rather than processing them one by one (e.g try to use *batch_size > 1* when possible). Test the code carefully with your favorite profiler (e.g, torch.utils.bottleneck https://pytorch.org/docs/stable/bottleneck.html ) to make sure there are no bottlenecks in your code. Since we are not working in *c++* directly, the speed can be an issue. Despite that, our goal is to make SpeechBrain as fast as possible. +- **Readable:** Avoid abstract naming. Link to resources and references to help understand complex topics or implementations. Code style and formatting are automatically enforced. -- **Modular:** Write your code such that it is very modular and fits well with the other functionalities of the toolkit. The idea is to develop a bunch of models that can be naturally interconnected with each other. +- **Efficient**: Not _everything_ must be fast, but for what _should_ be, [profile and optimize it](https://speechbrain.readthedocs.io/en/develop/tutorials/advanced/profiling-and-benchmark.html). Operate on batches. Prefer tensor operations over Python-heavy constructs. Avoid CPU/GPU syncs. -- **Well documented:** Given the goals of SpeechBrain, writing rich and good documentation is a crucial step. +- **Modular:** It should be easy to use any of the functionality from the toolkit. Break up functions/classes when it helps. Group functionality logically. Avoid unnecessary coupling. -## How to get your code in SpeechBrain +- **Well documented:** Docs should be complete, easy to navigate and easy to discover. Consider [writing a tutorial](https://github.com/speechbrain/speechbrain/tree/develop/docs#tutorial-integration). -Practically, development goes as follows: +## Creating Pull Requests on GitHub 0. We use git and GitHub. 1. Fork the speechbrain repository (https://github.com/speechbrain/speechbrain) @@ -34,7 +30,9 @@ on GitHub under your own account. `git add files-you-changed ...` `git commit -m "Short message about what you did"` 5. Push the branch to your GitHub repository. - `git push origin your-branch-name` + `git push -u origin your-branch-name` + (This uploads your branch to your GitHub repository and sets the upstream tracking reference, +so future `git push` or `git pull` commands will automatically know which remote branch to sync with.) 6. Navigate to GitHub, and create a pull request from your branch to the upstream repository speechbrain/speechbrain, to the "develop" branch. 7. The Pull Request (PR) appears on the upstream repository. Discuss your contribution @@ -48,41 +46,51 @@ See the section on pre-commit. These will automatically check the code when you commit and when you push. -## Python -### Version -SpeechBrain targets Python >= 3.7. +## Important code guidelines -### Formatting -To settle code formatting, SpeechBrain adopts the [black](https://black.readthedocs.io/en/stable/) code formatter. Before submitting pull requests, please run the black formatter on your code. +We target a specific range of supported Python versions, which are tested via CI. -In addition, we use [flake8](https://flake8.pycqa.org/en/latest/) to test code -style. Black as a tool does not enforce everything that flake8 tests. +### Formatting & linting -You can run the formatter with: `black `. Similarly the -flake8 tests can be run with `flake8 `. +Use `pre-commit run -a` to run formatting and linting, using tools like `ruff` +under the hood (see [`.pre-commit-config.yaml`](../.pre-commit-config.yaml)). +Some passes automatically fix your code, and some may require your intervention. -### Adding dependencies -In general, we strive to have as few dependencies as possible. However, we will -debate dependencies on a case-by-case basis. We value easy installability via -pip. +These checks are run and enforced on the CI. -In case the dependency is only needed for a specific recipe or specific niche -module, we suggest the extra tools pattern: don't add the dependency to general -requirements, but add it in the extra-requirement.txt file of the specific recipe. +### Running tests -### Testing -We are adopting unit tests using -[pytest](https://docs.pytest.org/en/latest/contents.html). -Run unit tests with `pytest tests` +We use [pytest](https://docs.pytest.org/en/latest/contents.html). Run unit tests +with `pytest tests` Additionally, we have runnable doctests, though primarily these serve as examples of the documented code. Run doctests with `pytest --doctest-modules ` -## Documentation +These checks are run and enforced on the CI. + +### Adding dependencies + +In general, we strive to have as few dependencies as possible. However, we will +debate new dependencies on a case-by-case basis. We value keeping the toolkit +lightweight to preserve easy installability via pip and to reduce the work +needed to maintain the tool in the face of conflicting dependencies. + +In case the dependency is only needed for a specific recipe, we suggest using +the extra tools pattern: don't add the dependency to general +requirements, but add it in the `extra-requirements.txt` file of that specific +recipe. + +Finally, if you want to add to the speechbrain core tools but a dependency +cannot be added to the full toolkit dependency list, you can add the tool to +the `speechbrain/integrations` folder, which requires more tests and will +only be verified to work before each release, not on every CI run. + +## Important documentation guidelines + In SpeechBrain, we plan to provide documentation at different levels: -- **Docstrings**: For each class/function in the repository, there should be a header that properly describes its functionality, inputs, and outputs. It is also crucial to provide an example that shows how it can be used as a stand-alone function. We use [Numpy-style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html) docstrings. Consistent docstring style enables automatic API documentation. Also note the automatic doctests (see [here](#testing). +- **Docstrings**: For each class/function in the repository, there should be a header that properly describes its functionality, inputs, and outputs. It is also crucial to provide an example that shows how it can be used as a stand-alone function. We use [Numpy-style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html) docstrings. Consistent docstring style enables automatic API documentation. Also note the automatic doctests (see [here](#testing)). - **Comments**: We encourage developers to write self-documenting code, and use proper comments where the implementation is surprising (to a Python-literate audience) @@ -92,124 +100,13 @@ and where the implemented algorithm needs clarification. - **Tutorials**: Tutorials are a good way to familiarize yourself with SpeechBrain with interactive codes and explanations. -## Development tools - -### flake8 -- A bit like pycodestyle: make sure the codestyle is according to guidelines. -- Compatible with black, in fact, current flake8 config directly taken from black -- Code compliance can be tested simply with: `flake8 ` -- You can bypass flake8 for a line with `# noqa: E.G. # noqa: E731 to allow lambda assignment` - -### pre-commit -- Python tool which takes a configuration file (.pre-commit-config.yaml) and installs the git commit hooks specified in it. -- Git commit hooks are local so all who want to use them need to install them separately. This is done by: `pre-commit install` -- The tool can also install pre-push hooks. This is done separately with: `pre-commit install --hook-type pre-push --config .pre-push-config.yaml` - -### the git pre-commit hooks -- Automatically run black -- Automatically fix trailing whitespace, end of file, sort requirements.txt -- Check that no large (>512kb) files are added by accident -- Automatically run flake8 -- NOTE: If the hooks fix something (e.g. trailing whitespace or reformat with black), these changes are not automatically added and committed. You’ll have to add the fixed files again and run the commit again. I guess this is a safeguard: don’t blindly accept changes from git hooks. -- NOTE2: The hooks are only run on the files you git added to the commit. This is in contrast to the CI pipeline, which always tests everything. - -### the git pre-push hooks -- Black and flake8 as checks on the whole repo -- Unit-tests and doctests run on the whole repo -- These hooks can only be run in the full environment, so if you install these, you’ll need to e.g. activate virtualenv before pushing. - -### pytest doctests -- This is not an additional dependency, but just that doctests are now run with pytest. Use: `pytest --doctest-modules ` -- Thus you may use some pytest features in docstring examples. Most notably IMO: `tmpdir = getfixture('tmpdir')` which makes a temp dir and gives you a path to it, without needing a `with tempfile.TemporaryDirectory() as tmpdir:` - -## Continuous integration - -### What is CI? -- loose term for a tight merge schedule -- typically assisted by automated testing and code review tools + practices - -### CI / CD Pipelines -- GitHub Actions (and also available as a third-party solution) feature, which automatically runs basically anything in reaction to git events. -- The CI pipeline is triggered by pull requests. -- Runs in a Ubuntu environment provided by GitHub -- GitHub offers a limited amount of CI pipeline minutes for free. -- CD stands for continuous deployment, check out the "Releasing a new version" section. - -### Our test suite -- Code linters are run. This means black and flake8. These are run on everything in speechbrain (the library directory), everything in recipes and everything in tests. -- Note that black will only error out if it would change a file here, but won’t reformat anything at this stage. You’ll have to run black on your code and push a new commit. The black commit hook helps avoid these errors. -- All unit-tests and doctests are run. You can check that these pass by running them yourself before pushing, with `pytest tests` and `pytest --doctest-modules speechbrain` -- Integration tests (minimal examples). The minimal examples serve both to - illustrate basic tasks and experiment running, but also as integration tests - for the toolkit. For this purpose, any file which is prefixed with - `example_` gets collected by pytest, and we add a short `test_` function at - the end of the minimal examples. -- Currently, these are not run: docstring format tests (this should be added once the docstring conversion is done). -- If all tests pass, the whole pipeline takes a couple of minutes. - -## Pull Request review guide - -This is not a comprehensive code review guide, but some rough guidelines to unify the general review practices across this project. - -Firstly, let the review take some time. Try to read every line that was added, -if possible. Try also to run some tests. Read the surrounding context of the code if needed to understand -the changes introduced. Possibly ask for clarifications if you don't understand. -If the pull request changes are hard to understand, maybe that's a sign that -the code is not clear enough yet. However, don't nitpick every detail. - -Secondly, focus on the major things first, and only then move on to smaller, -things. Level of importance: -- Immediate deal breakers (code does the wrong thing, or feature shouldn't be added etc.) -- Things to fix before merging (Add more documentation, reduce complexity, etc.) -- More subjective things could be changed if the author also agrees with you. - -Thirdly, approve the pull request only once you believe the changes "improve overall code health" as attested to [here](https://google.github.io/eng-practices/review/reviewer/standard.html). -However, this also means the pull request does not have to be perfect. Some features are best implemented incrementally over many pull requests, and you should be more concerned with making sure that the changes introduced lend themselves to painless further improvements. - -Fourthly, use the tools that GitHub has: comment on specific code lines, suggest edits, and once everyone involved has agreed that the PR is ready to merge, merge the request and delete the feature branch. - -Fifthly, the code review is a place for professional constructive criticism, -a nice strategy to show (and validate) that you understand what the PR is really -doing is to provide some affirmative comments on its strengths. - -## Releasing a new version - -Here are a few guidelines for when and how to release a new version. -To begin with, as hinted in the "Continuous Integration" section, we would like to follow a -pretty tight release schedule, known as "Continuous Deployment". For us, this means a new -version should be released roughly once a week. - -As for how to name the released version, we try to follow semantic versioning for this. More details -can be found at [semver.org](http://semver.org). As it applies to SpeechBrain, some examples -of what this would likely mean: - * Changes to the Brain class or other core elements often warrant a major version bump (e.g. 1.5.3 -> 2.0.0) - * Added classes or features warrant a minor version bump. Most weekly updates should fall into this. - * Patch version bumps should happen only for bug fixes. - -When releasing a new version, there are a few user-initiated action that need to occur. - 1. On the `develop` branch, update `speechbrain/version.txt` to say the new version: - X.Y.Z - 2. Merge the `develop` branch into the `main` branch: - git checkout main - git merge develop - 3. Push the `main` branch to github: - git push - 4. Tag the `main` branch with the new version: - git tag vX.Y.Z - 5. Push the new tag to github: - git push --tags - -This kicks off an automatic action that creates a draft release with release notes. -Review the notes to make sure they make sense and remove commits that aren't important. -You can then publish the release to make it public. -Publishing a new release kicks off a series of automatic tools, listed below: - - * The `main` branch is checked out and used for building a python package. - * The built package is uploaded to PyPI and the release is published there. - * Read the Docs uses Webhooks to get notified when a new version is published. - Read the Docs then builds the documentation and publishes the new version. - -Maintainers of relevant accounts: - * Mirco Ravanelli maintains the GitHub and PyPI accounts - * Titouan Parcollet maintains the website at [speechbrain.github.io](speechbrain.github.io) - as well as accounts at Read the Docs and Discourse + +## Additional reading + +- [Development tools](devtools.md) +- [What testing coverage approaches are needed?](coverage.md) + +### Internal contributors + +- [Releasing a new version](newversion.md) +- [Reviewing code](codereview.md) diff --git a/docs/coverage.md b/docs/coverage.md new file mode 100644 index 0000000000..f857021525 --- /dev/null +++ b/docs/coverage.md @@ -0,0 +1,265 @@ +# What testing coverage approaches are needed? + +1. Dependencies: version control (check commit ID dates) +
see: [requirements.txt](https://github.com/speechbrain/speechbrain/blob/develop/requirements.txt) +
run: `find *txt . | grep extra` +2. Docstring tests: commented function signatures
_(of functions intended for outer calls)_ +3. [Unittests](https://github.com/speechbrain/speechbrain/tree/develop/tests/unittests) per function-critical code block +4. [Integration tests](https://github.com/speechbrain/speechbrain/tree/develop/tests/integration) for vanilla experiments to cover use-cases on a generic task basis +5. Regression testing: standing interfaces & their refactoring +6. Linters for automated style checks & corrections of python & yaml code + +## Where to get things done? + +1. Raise your questions & engage in [Discussions](https://github.com/speechbrain/speechbrain/discussions) +2. Report a bug or request a feature, open [Issues](https://github.com/speechbrain/speechbrain/issues/new/choose) +3. Contribute [Pull requests](https://github.com/speechbrain/speechbrain/pulls) +4. Release pretrained models through SpeechBrain +
e.g. registering linking HuggingFace account to SpeechBrain for hosting your model card + +## GitHub workflow: strategy by configuration + +API configurations are located at [.github/workflows](https://github.com/speechbrain/speechbrain/tree/develop/.github/workflows) +
_(all creating a one-time ubuntu-latest environment)_ + +--- + +Info: although our PyTorch requirements are +``` +torch>=1.9.0 +torchaudio>=0.9.0 +``` +our tests cover one PyTorch version only, _the latest_. + + +### [pre-commit.yml](https://github.com/speechbrain/speechbrain/blob/develop/.github/workflows/pre-commit.yml) + > SpeechBrain pre-commit / pre-commit (pull_request) +* python-version: '3.12' +* run pre-commit action, configured in [.pre-commit-config.yaml](https://github.com/speechbrain/speechbrain/blob/develop/.pre-commit-config.yaml) + * hook: https://github.com/pre-commit/pre-commit-hooks +
trailing-whitespace +
end-of-file-fixer +
requirements-txt-fixer +
mixed-line-ending +
check-added-large-files + * hook: https://github.com/psf/black +
black +
click + * hook: https://github.com/astral-sh/ruff-pre-commit +
ruff; see: [pyproject.toml](https://github.com/speechbrain/speechbrain/blob/develop/pyproject.toml) for configuration + * hook: https://github.com/adrienverge/yamllint +
yamllint; see: [.yamllint.yaml](https://github.com/speechbrain/speechbrain/blob/develop/.yamllint.yaml) + +### [pythonapp.yml](https://github.com/speechbrain/speechbrain/blob/develop/.github/workflows/pythonapp.yml) + > SpeechBrain toolkit CI / Tests (3.10) (pull_request)
+ > SpeechBrain toolkit CI / Tests (3.13) (pull_request) +* python-version: ["3.10", 3.13] +* create fresh environment + ```shell + sudo apt-get install -y libsndfile1 + pip install -r requirements.txt + pip install --editable . + pip install ctc-segmentation + ``` +* run PyTest checks +
see: [pytest.ini](https://github.com/speechbrain/speechbrain/blob/develop/pytest.ini) - files: `test_*.py`; `check_*.py`; `example_*.py` & norecursedirs +
see: [conftest.py](https://github.com/speechbrain/speechbrain/blob/develop/conftest.py) - prepare test item collection & direct discovery + ``` + # excerpts + parser.addoption("--device", action="store", default="cpu") + ... + ``` + * a. hook: Consistency tests with pytest +
`pytest tests/consistency` + * b. hook: Unittests with pytest +
`pytest tests/unittests` + * c. hook: Doctests with pytest +
`pytest --doctest-modules speechbrain` + * d. hook: Integration tests with pytest +
`pytest tests/integration` + +### [verify-docs-gen.yml](https://github.com/speechbrain/speechbrain/blob/develop/.github/workflows/verify-docs-gen.yml) [I.2.a] + > Verify docs generation / docs (pull_request) +* python-version: '3.12' +* create fresh environment + ```shell + pip install -r requirements.txt + pip install --editable . + pip install -r docs/docs-requirements.txt + ``` +* generates docs + ```shell + cd docs + make html + ``` +* compare: [.readthedocs.yaml](https://github.com/speechbrain/speechbrain/blob/develop/.readthedocs.yaml) - python version: 3.8 + +### [newtag.yml](https://github.com/speechbrain/speechbrain/blob/develop/.github/workflows/newtag.yml) + > Draft release when pushing new tag +* tagging of `develop` branch commit ID +* before + * follow through [tests/PRE-RELEASE-TESTS.md](https://github.com/speechbrain/speechbrain/blob/develop/tests/PRE-RELEASE-TESTS.md) + * set-up fresh environment + * run `pytest` + * a. hook: [tests/.run-load-yaml-tests.sh](https://github.com/speechbrain/speechbrain/blob/develop/tests/.run-load-yaml-tests.sh) + * b. hook: [tests/.run-recipe-tests.sh](https://github.com/speechbrain/speechbrain/blob/develop/tests/.run-recipe-tests.sh) + * c. hook: [tests/.run-HF-checks.sh](https://github.com/speechbrain/speechbrain/blob/develop/tests/.run-HF-checks.sh) + * d. hook: [ests/.run-url-checks.sh](https://github.com/speechbrain/speechbrain/blob/develop/tests/.run-url-checks.sh) + * update of [speechbrain/version.txt](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/version.txt) to the next +* action: draft push to `main` branch +
implies pre-push hook, see: [.pre-push-config.yaml](https://github.com/speechbrain/speechbrain/blob/develop/.pre-push-config.yaml) with hooks to: + * e. [tests/.run-linters.sh](https://github.com/speechbrain/speechbrain/blob/develop/tests/.run-linters.sh) + * f. [tests/.run-unittests.sh](https://github.com/speechbrain/speechbrain/blob/develop/tests/.run-unittests.sh) + * g. [tests/.run-doctests.sh](https://github.com/speechbrain/speechbrain/blob/develop/tests/.run-doctests.sh) + +### [release.yml](https://github.com/speechbrain/speechbrain/blob/develop/.github/workflows/release.yml) + > Publish to PyPI +* python-version: 3.12 +* action: checkout to `main` branch +* creates: `pypa/build` for binary wheel and source tarball +* action: Publish to PyPI via `pypa/gh-action-pypi-publish@master` +
implies use of + * [LICENSE](https://github.com/speechbrain/speechbrain/blob/develop/LICENSE) + * [README.md](https://github.com/speechbrain/speechbrain/blob/develop/README.md) + * [pyproject.toml](https://github.com/speechbrain/speechbrain/blob/develop/pyproject.toml) - target-version = ['py38'] + * python_requires=">=3.8.1", + * uses: [speechbrain/version.txt](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/version.txt) + * requires: + ``` + "hyperpyyaml", + "joblib", + "numpy", + "packaging", + "scipy", + "sentencepiece", + "torch>=1.9", + "torchaudio", + "tqdm", + "huggingface_hub", + ``` + * points to https://speechbrain.github.io/ + +The versions of tools used/hooked in these checks are controlled via [lint-requirements.txt](https://github.com/speechbrain/speechbrain/blob/develop/lint-requirements.txt), a nested dependency in [requirements.txt](https://github.com/speechbrain/speechbrain/blob/develop/requirements.txt). +With major version releases of SpeechBrain, the versions of each hook should be updated—alongside requirement consistency in source, testing & builds incl. running spell-checking. + + +## PyTest for reporting code coverage rates + +How to know test coverage changes of Open PRs to be merged? +
_(snippet for cpu-only)_ +``` +# Example: install more dependencies to avoid ignoring modules +sudo apt install -y libsndfile1 +pip install ctc_segmentation + +# install coverage +pip install pytest-cov + +# run the test (w/ duration reporting) +pytest --durations=0 --cov=speechbrain --cov-context=test --doctest-modules speechbrain tests +``` +Example: _After collecting 459 testing items, 4481/16782 statements are reported "missing" (73% coverage)._ + +YET—python code of the core modules is not all to be covered; thus far, only, consistency is ensured.. + +--- + +Further reading: +
pytest & coverage - https://breadcrumbscollector.tech/how-to-use-code-coverage-in-python-with-pytest/ (pointer by @Adel-Moumen) + +--- + +``` +pytest --durations=0 --cov=speechbrain --cov-context=test --doctest-modules speechbrain tests + +---------- coverage: platform linux, python 3.9.12-final-0 ----------- +Name Stmts Miss Cover +----------------------------------------------------------------------------- +speechbrain/alignment/aligner.py 380 61 84% +speechbrain/alignment/ctc_segmentation.py 189 10 95% +speechbrain/core.py 424 155 63% <== < 80% +speechbrain/dataio/batch.py 99 8 92% +speechbrain/dataio/dataio.py 279 50 82% +speechbrain/dataio/dataloader.py 140 25 82% +speechbrain/dataio/dataset.py 100 8 92% +speechbrain/dataio/encoder.py 328 46 86% +speechbrain/dataio/iterators.py 80 62 22% <== < 80% +speechbrain/dataio/legacy.py 121 41 66% <== < 80% +speechbrain/dataio/preprocess.py 22 4 82% +speechbrain/dataio/sampler.py 224 61 73% <== < 80% +speechbrain/dataio/wer.py 63 54 14% <== < 80% +speechbrain/decoders/ctc.py 111 89 20% <== < 80% +speechbrain/decoders/seq2seq.py 370 46 88% +speechbrain/decoders/transducer.py 133 64 52% <== < 80% +speechbrain/lm/arpa.py 77 3 96% +speechbrain/lm/counting.py 37 4 89% +speechbrain/lm/ngram.py 36 1 97% +speechbrain/lobes/augment.py 154 55 64% <== < 80% +speechbrain/lobes/beamform_multimic.py 20 14 30% <== < 80% +speechbrain/lobes/features.py 96 9 91% +speechbrain/lobes/models/CRDNN.py 52 12 77% <== < 80% Ruff #29 +speechbrain/lobes/models/transformer/Transformer.py 180 22 88% +speechbrain/lobes/models/transformer/TransformerASR.py 92 28 70% <== < 80% +speechbrain/lobes/models/transformer/TransformerLM.py 47 5 89% +speechbrain/lobes/models/transformer/TransformerSE.py 20 2 90% +speechbrain/lobes/models/transformer/TransformerST.py 81 60 26% <== < 80% +speechbrain/lobes/models/wav2vec.py 123 55 55% <== < 80% +speechbrain/nnet/CNN.py 417 56 87% +speechbrain/nnet/RNN.py 471 51 89% +speechbrain/nnet/activations.py 39 1 97% +speechbrain/nnet/attention.py 234 44 81% +speechbrain/nnet/complex_networks/c_CNN.py 130 23 82% +speechbrain/nnet/complex_networks/c_RNN.py 374 67 82% +speechbrain/nnet/complex_networks/c_normalization.py 277 68 75% <== < 80% +speechbrain/nnet/complex_networks/c_ops.py 108 40 63% <== < 80% +speechbrain/nnet/containers.py 139 14 90% +speechbrain/nnet/linear.py 27 1 96% +speechbrain/nnet/loss/si_snr_loss.py 20 16 20% <== < 80% +speechbrain/nnet/loss/stoi_loss.py 81 1 99% +speechbrain/nnet/losses.py 323 112 65% <== < 80% +speechbrain/nnet/normalization.py 142 6 96% +speechbrain/nnet/pooling.py 156 31 80% +speechbrain/nnet/quantisers.py 47 2 96% +speechbrain/nnet/quaternion_networks/q_CNN.py 150 25 83% +speechbrain/nnet/quaternion_networks/q_RNN.py 370 59 84% +speechbrain/nnet/quaternion_networks/q_linear.py 50 11 78% <== < 80% +speechbrain/nnet/quaternion_networks/q_normalization.py 44 4 91% +speechbrain/nnet/quaternion_networks/q_ops.py 229 122 47% <== < 80% +speechbrain/nnet/schedulers.py 363 103 72% <== < 80% +speechbrain/nnet/transducer/transducer_joint.py 33 5 85% +speechbrain/pretrained/fetching.py 48 6 88% +speechbrain/pretrained/interfaces.py 786 338 57% <== < 80% +speechbrain/pretrained/training.py 33 28 15% <== < 80% +speechbrain/processing/PLDA_LDA.py 345 96 72% <== < 80% +speechbrain/processing/decomposition.py 102 8 92% +speechbrain/processing/diarization.py 319 157 51% <== < 80% +speechbrain/processing/features.py 359 75 79% <== < 80% +speechbrain/processing/multi_mic.py 345 2 99% +speechbrain/processing/signal_processing.py 166 39 77% <== < 80% +speechbrain/processing/speech_augmentation.py 386 34 91% +speechbrain/tokenizers/SentencePiece.py 181 74 59% <== < 80% +speechbrain/utils/Accuracy.py 24 17 29% <== < 80% +speechbrain/utils/DER.py 44 33 25% <== < 80% +speechbrain/utils/bleu.py 50 43 14% <== < 80% +speechbrain/utils/callchains.py 28 5 82% +speechbrain/utils/checkpoints.py 294 52 82% +speechbrain/utils/data_pipeline.py 181 15 92% +speechbrain/utils/data_utils.py 197 77 61% <== < 80% +speechbrain/utils/depgraph.py 82 1 99% +speechbrain/utils/distributed.py 61 37 39% <== < 80% +speechbrain/utils/edit_distance.py 180 50 72% <== < 80% +speechbrain/utils/epoch_loop.py 55 22 60% <== < 80% +speechbrain/utils/hparams.py 2 1 50% <== < 80% +speechbrain/utils/hpopt.py 134 41 69% <== < 80% +speechbrain/utils/logger.py 73 45 38% <== < 80% +speechbrain/utils/metric_stats.py 285 48 83% +speechbrain/utils/parameter_transfer.py 87 17 80% +speechbrain/utils/profiling.py 191 54 72% <== < 80% +speechbrain/utils/superpowers.py 20 6 70% <== < 80% +speechbrain/utils/text_to_sequence.py 77 22 71% <== < 80% +speechbrain/utils/torch_audio_backend.py 9 2 78% <== < 80% +speechbrain/utils/train_logger.py 150 113 25% <== < 80% +speechbrain/wordemb/transformer.py 90 67 26% <== < 80% +----------------------------------------------------------------------------- +TOTAL 16782 4481 73% +``` diff --git a/docs/devtools.md b/docs/devtools.md new file mode 100644 index 0000000000..fd69f22ee1 --- /dev/null +++ b/docs/devtools.md @@ -0,0 +1,58 @@ +# Development tools + +## Linting/formatting/testing + +### ruff +- A fast Python linter and formatter that replaces multiple tools like flake8, isort, and pycodestyle +- Compatible with black and provides similar functionality to flake8 +- Code compliance can be tested simply with: `ruff check ` +- You can bypass ruff for a line with `# noqa: ` E.G. `# noqa: E731` to allow lambda assignment +- Can also format code with: `ruff format ` + +### pre-commit +- Python tool which takes a configuration file (.pre-commit-config.yaml) and installs the git commit hooks specified in it. +- Git commit hooks are local so all who want to use them need to install them separately. This is done by: `pre-commit install` +- The tool can also install pre-push hooks. This is done separately with: `pre-commit install --hook-type pre-push --config .pre-push-config.yaml` + +### the git pre-commit hooks +- Automatically run ruff (linting and formatting) +- Automatically fix trailing whitespace, end of file, sort requirements.txt +- Check that no large (>512kb) files are added by accident +- Automatically run cspell +- NOTE: If the hooks fix something (e.g. trailing whitespace or reformat with ruff), these changes are not automatically added and committed. You'll have to add the fixed files again and run the commit again. I guess this is a safeguard: don't blindly accept changes from git hooks. +- NOTE2: The hooks are only run on the files you git added to the commit. This is in contrast to the CI pipeline, which always tests everything. +- NOTE3: If a word is flagged as a spelling error but it should be kept, you can add the word to `.dict-speechbrain.txt` + +### the git pre-push hooks +- Ruff as checks on the whole repo +- Unit-tests and doctests run on the whole repo +- These hooks can only be run in the full environment, so if you install these, you'll need to e.g. activate virtualenv before pushing. + +### pytest doctests +- This is not an additional dependency, but just that doctests are now run with pytest. Use: `pytest --doctest-modules ` +- Thus you may use some pytest features in docstring examples. Most notably IMO: `tmpdir = getfixture('tmpdir')` which makes a temp dir and gives you a path to it, without needing a `with tempfile.TemporaryDirectory() as tmpdir:` + +## Continuous integration + +### What is CI? +- loose term for a tight merge schedule +- typically assisted by automated testing and code review tools + practices + +### CI / CD Pipelines +- GitHub Actions (and also available as a third-party solution) feature, which automatically runs basically anything in reaction to git events. +- The CI pipeline is triggered by pull requests. +- Runs in a Ubuntu environment provided by GitHub +- GitHub offers a limited amount of CI pipeline minutes for free. +- CD stands for continuous deployment, check out the "Releasing a new version" section. + +### Our test suite +- Code linters are run. This means ruff. These are run on everything in speechbrain (the library directory), everything in recipes and everything in tests. +- Note that ruff will only error out if it would change a file here, but won't reformat anything at this stage. You'll have to run ruff on your code and push a new commit. The ruff commit hook helps avoid these errors. +- All unit-tests and doctests are run. You can check that these pass by running them yourself before pushing, with `pytest tests` and `pytest --doctest-modules speechbrain` +- Integration tests (minimal examples). The minimal examples serve both to + illustrate basic tasks and experiment running, but also as integration tests + for the toolkit. For this purpose, any file which is prefixed with + `example_` gets collected by pytest, and we add a short `test_` function at + the end of the minimal examples. +- Currently, these are not run: docstring format tests (this should be added once the docstring conversion is done). +- If all tests pass, the whole pipeline takes a couple of minutes. \ No newline at end of file diff --git a/docs/docs-requirements.txt b/docs/docs-requirements.txt index 36e81e254d..f5dd106467 100644 --- a/docs/docs-requirements.txt +++ b/docs/docs-requirements.txt @@ -1,10 +1,14 @@ better-apidoc>=0.3.1 -ctc-segmentation>=1.7.0 -fairseq +https://github.com/kpu/kenlm/archive/master.zip +myst_nb numba>=0.54.1 +pyctcdecode recommonmark>=0.7.1 scikit-learn six +sphinx-copybutton +sphinx-design +sphinx-markdown-tables sphinx-rtd-theme>=0.4.3 -Sphinx>=3.4.3 -transformers==4.13 +Sphinx>=7.4.1,<9.0 +transformers diff --git a/docs/experiment.md b/docs/experiment.md index ffd0eb3328..5c5b872698 100644 --- a/docs/experiment.md +++ b/docs/experiment.md @@ -1,21 +1,33 @@ # Running an experiment -In SpeechBrain, you can run experiments in this way: +In SpeechBrain, you can train most models in recipes like this: ``` -> cd recipes/// -> python experiment.py params.yaml +> cd recipes/// +> python train.py hparams/hyperparams.yaml ``` +Follow the steps in the README of each recipe for more details. + The results will be saved in the `output_folder` specified in the yaml file. -The folder is created by calling `sb.core.create_experiment_directory()` in `experiment.py`. Both detailed logs and experiment outputs are saved there. Furthermore, less verbose logs are output to stdout. +The folder is created by calling `sb.core.create_experiment_directory()` in `train.py`. Both detailed logs and experiment outputs are saved there. Furthermore, less verbose logs are output to stdout. ## YAML basics -The YAML syntax offers an elegant way to specify the hyperparameters of a recipe. +SpeechBrain uses an extended variant of YAML named HyperPyYAML. It offers an elegant way to specify the hyperparameters of a recipe. + In SpeechBrain, the YAML file is not a plain list of parameters, but for each parameter, we specify the function (or class) that is using it. -This not only makes the specification of the parameters more transparent but also allows us to properly initialize all the entries by simply calling the `load_extended_yaml` (in `speechbrain.utils.data_utils`). +This not only makes the specification of the parameters more transparent but also allows us to properly initialize all the entries by simply calling `load_hyperpyyaml` (from HyperPyYAML). + +### Security warning + +Loading HyperPyYAML allows **arbitrary code execution**. +This is a feature: HyperPyYAML allows you to construct *anything* and *everything* +you need in your experiment. +However, take care to verify any untrusted recipes' YAML files just as you would verify the Python code. + +### Features -Let's now take a quick look at the extended YAML features, using an example: +Let's now take a quick tour of the extended YAML features, using an example: ``` seed: !PLACEHOLDER @@ -35,24 +47,22 @@ model: !new:speechbrain.lobes.models.CRDNN.CRDNN - `!ref /save` evaluates the part in angle brackets, referencing the YAML itself. - `!PLACEHOLDER` simply errors out when loaded; it should be replaced by - every user either by editing the yaml, or with an override (passed to - `load_extended_yaml`). + every user either by using the commandline (which passes an override to + `load_hyperpyyaml`), or by manually editing the `.yaml` if necessary. -For more details on YAML and our extensions, please see our dedicated [tutorial](https://colab.research.google.com/drive/1Pg9by4b6-8QD2iC0U7Ic3Vxq4GEwEdDz?usp=sharing). +[**Learn more with the dedicated HyperPyYAML tutorial!**](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html) ## Running arguments -SpeechBrain defines a set of running arguments that can be set from the command line args (or within the YAML file). + +SpeechBrain defines a set of running arguments that can be set from the command line args (or within the YAML file), e.g.: + - `device`: set the device to be used for computation. - `debug`: a flag that enables debug mode, only running a few iterations to verify that program won't crash. -- `data_parallel_backend`: a flag that enables `data_parallel` for multigpu training on a single machine. -- `data_parallel_count`: default "-1" (use all gpus), if > 0, use a subset of gpus available `[0, 1, ..., data_parallel_count]`. -- `distributed_launch`: A flag that enables training with `ddp` for multiGPU training. Assumes `torch.distributed.launch` was used to start script. the `local_rank` and `rank` UNIX arguments are parsed. -- `distributed_backend`: default "nccl", options: `["nccl", "gloo", "mpi"]`, this backend will be used as a DDP communication protocol. See PyTorch documentation for more details. - Additional runtime arguments are documented in the Brain class. -Please note that we provide a dedicated [tutorial](https://colab.research.google.com/drive/13pBUacPiotw1IvyffvGZ-HrtBr9T6l15?usp=sharing) to document the different multi-gpu training strategies: +If you want to train using multiple GPUs, please follow the [**multi-GPU training guide**](https://speechbrain.readthedocs.io/en/latest/multigpu.html). -You can also override parameters in YAML in this way: +You can also override parameters from the YAML in this way: ``` > python experiment.py params.yaml --seed 1234 --data_folder /path/to/folder --num_layers 5 @@ -64,26 +74,84 @@ This call would override hyperparameters `seed` and `data_folder` and `num_layer - The command line args will always override the hparams file args. ## Tensor format -All the tensors within SpeechBrain are formatted using the following convention: + +Tensors in SpeechBrain follow a batch-time-channels convention: + +- **The batch dimension is always the first dimension (even if it is `1`).** +- **The time step dimension is always the second one.** +- **The remaining optional dimensions are channels (however many dimensions you need)**. + +In other words, a tensor will look like any of these: + ``` -tensor=(batch, time_steps, channels[optional]) +(batch_size, time_steps) +(batch_size, time_steps, channel0) +(batch_size, time_steps, channel0, channel1, ...) ``` -**The batch is always the first element, and time_steps is always the second one. The remaining optional dimensions are channels. (there might be as many channels as you need)**. -*Why do we need all tensors to have the same format?* It is crucial to have a shared format for all the classes and functions. This makes model combination easier. Many formats are possible. For SpeechBrain we selected this one because it is commonly used in recurrent neural networks. -The adopted format is very flexible and allows users to read different types of data. For instance, with single-channel raw waveform signals, the tensor will be tensor=(batch, time_steps), while for multi-channel raw waveform it will be tensor=(batch, time_steps, n_channel). Beyond waveforms, this format is used for any tensor in the computation pipeline. For instance, fbank features that are formatted in this way: +For waveforms, we generally choose to squeeze the final dimension (i.e. it there is _no_ channel dimension for mono audio). + +Simple waveform examples: + +- A waveform of 3 seconds sampled at 16kHz in mono: `(1, 3*16000)` +- A waveform of 3 seconds sampled at 16kHz in stereo: `(1, 3*16000, 2)` + +Beyond waveforms, this format is used for any tensor in the computation pipeline. For instance... + +- The [Short-Time Fourier Transform (STFT)](https://speechbrain.readthedocs.io/en/develop/tutorials/preprocessing/fourier-transform-and-spectrograms.html) for mono audio would follow this shape, where `2` corresponds to the real and imaginary parts of the STFT (complex number): + ``` -(batch, time_step, n_filters) +(batch_size, time_steps, n_fft, 2) +``` + +- If we were to process the STFT of multi-channel audio (e.g. stereo), it would look like this: + ``` -The Short-Time Fourier Transform (STFT) tensor, instead, will be: +(batch_size, time_steps, n_fft, 2, n_audio_channels) ``` -(batch, time_step, n_fft, 2) + +- For [Filter Banks (FBanks)](https://speechbrain.readthedocs.io/en/develop/tutorials/preprocessing/speech-features.html), the shape would be: + ``` -where the “2” corresponds to the real and imaginary parts of the STFT. -We can also read multi-channel SFT data, that will be formatted in this way: +(batch_size, time_steps, n_filters) ``` -(batch, time_step, n_fft, 2, n_audio_channels) + +## Modified PyTorch globals and GPU quirks + +For various reasons, SpeechBrain modifies some PyTorch global configuration to work around issues or improve execution speed, sometimes depending on GPU configuration. +We do so when we consider that some modified defaults make more sense given our usecases than PyTorch's defaults. For instance, we very commonly encounter dynamic tensor shapes, which comes at odds with certain auto-tuning methods. + +These changes are applied in a standardized location, [`quirks.py`](https://github.com/speechbrain/speechbrain/tree/develop/speechbrain/utils/quirks.py). They are logged when starting an experiment. + +The `SB_DISABLE_QUIRKS` environment variable lets you disable quirks easily. For instance, to disable TensorFloat32 and re-enable JIT profiling, you would use `SB_DISABLE_QUIRKS=allow_tf32,disable_jit_profiling`. + +## Parallel processing and SLURM + +SpeechBrain's data preparation scripts often use `parallel_map` from `speechbrain.utils.parallel` to speed up processing. By default, this function automatically detects the number of available CPUs by checking CPU affinity (via `os.sched_getaffinity` on Unix systems), which respects SLURM allocations and cgroup limits. + +The `SB_NUM_PROC` environment variable allows you to manually override the number of parallel processes: + +```bash +# Limit parallel processing to 4 processes +SB_NUM_PROC=4 python prepare_data.py params.yaml ``` + +This is useful when: +- You want to limit CPU usage on shared systems +- The automatic detection doesn't work correctly in your environment +- You need reproducible behavior across different machines + +## Reproducibility + +To improve reproducibility across experiments, SpeechBrain supports its own seeding function located in `speechbrain.utils.seed.seed_everything`. This function sets the seed for various generators such as NumPy, PyTorch, and Python, following the [PyTorch recommendations](https://pytorch.org/docs/stable/notes/randomness.html). + +However, due to the differences in how GPU and CPU executions work, results may not be fully reproducible even with identical seeds, especially when training models. This issue primarily affects training experiments. + +On the other hand, when preparing data using data preparation scripts, the output of these scripts is independent of the global seeds. This ensures that you will get identical outputs on different setups, even if different seeds are used. + +In distributed experiments, reproducibility becomes more complex as different seeds (offset by the rank) will be set on different machines or processes. This primarily impacts operations that rely on randomness, such as data augmentations. Since each process in a distributed setup is assigned its own seed, the randomness applied to data (e.g., augmentations) can differ between processes, even though the global seed is the same across machines. + +It’s important to note that this variance in seeding does not affect certain elements of the experiment. For instance, initial model parameters are broadcast to all processes from the main process in distributed training. Similarly, components like data loaders, which shuffle data, will be affected by per-process seeds, but the underlying data pipeline remains synchronized across processes. diff --git a/docs/guidance.md b/docs/guidance.md new file mode 100644 index 0000000000..14b8f85df1 --- /dev/null +++ b/docs/guidance.md @@ -0,0 +1,54 @@ +# Project Structure & Ecosystem + +"SpeechBrain" refers to both the software and recipes here on GitHub, and to a wider ecosystem spanning various platforms (PyPI, readthedocs, HuggingFace, DropBox). + +This document hopes to untangle the general structure of the project and its ecosystem, for contributors and regular users. + +## Directory Structure + +This is not quite a complete list, but it gives a broad outline. + +| Directory | Contents | +|-|-| +| **Core** [(API doc)](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.html) | | +| **[`speechbrain/`](https://github.com/speechbrain/speechbrain/tree/develop/speechbrain/)** | Source code for the core | +| **[`speechbrain/inference/`](https://github.com/speechbrain/speechbrain/tree/develop/speechbrain/inference/)** | Easy-to-use inference code with HuggingFace integration | +| **[`speechbrain/utils/`](https://github.com/speechbrain/speechbrain/tree/develop/speechbrain/utils/)** | Miscellaneous utilities that don't really fit elsewhere | +| **Documentation** | | +| **[`docs/`](https://github.com/speechbrain/speechbrain/tree/develop/docs/)** | Documentation pages and configuration | +| **[`docs/tutorials/`](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/)** | Jupyter Notebook tutorials | +| **Recipes** | | +| **[`recipes/`](https://github.com/speechbrain/speechbrain/tree/develop/recipes/)** | Ready-to-use recipes under the form `dataset/task/model/` | +| **[`templates/`](https://github.com/speechbrain/speechbrain/tree/develop/templates/)** | Reference implementation for tasks to (optionally) use for new recipes | +| **Testing/linting/meta** | | +| **[`.github/`](https://github.com/speechbrain/speechbrain/tree/develop/.github/)** | GitHub issue/PR templates and Actions workflows for testing | +| **[`tests/`](https://github.com/speechbrain/speechbrain/tree/develop/tests/)** | Automated tests, some run under CI, some manually | +| **[`tools/`](https://github.com/speechbrain/speechbrain/tree/develop/tools/)** | One-off complete scripts and tools for specific tasks | +| **[`.pre-commit-config.yaml`](`https://github.com/speechbrain/speechbrain/tree/develop/.pre-commit-config.yaml`)** | Linter configuration (style check, formatting) | + +## External Platforms + +| URL | Contents | +|-|-| +|****| Official SpeechBrain repository | +|| Landing page (deployed from [here](https://github.com/speechbrain/speechbrain.github.io>)) | +|| Standardized benchmarks based on SpeechBrain | +|| Official HyperPyYAML repository | +|| Documentation and tutorials (deployed from [`docs/`](docs/)) | +|| Pre-trained models ready for inference | +| DropBox links in repository | Data, training logs and checkpoints | + +## Testing Infrastructure + +| Scope | Description | +|-|-| +| **CI-automated** | Tests that are verified continuously through Actions | +| Linting | Enforcing good practice, formatting, etc., see [`.pre-commit-config.yaml`](`https://github.com/speechbrain/speechbrain/tree/develop/.pre-commit-config.yaml`) | +| Consistency | Enforcing rules on YAMLs, presence of tests, among others | +| Doctests | Testing simple usecases at class/function level, and providing examples | +| Unit tests | Tests for specific components. Deeper testing than doctests | +| Integration tests | Testing for regressions at a larger scale (e.g. mini-recipes) | +| **Semi-manual** | Tests that are manually run by you or the Core Team at a varying frequency | +| [URL checks](https://github.com/speechbrain/speechbrain/tree/develop/tests/.run-url-checks.sh) | Checking for dead links in documentation, code and tutorials | +| [Recipe tests](https://github.com/speechbrain/speechbrain/tree/develop/tests/recipes/) | Test model training for all recipe `.csv` on sample data | +| [HuggingFace checks](https://github.com/speechbrain/speechbrain/tree/develop/tests/.run-HF-checks.sh) | Check if known models on HF seem to execute fine | diff --git a/docs/index.rst b/docs/index.rst index bf7e23253b..fb97e1c2c1 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,56 +3,87 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -.. image:: images/speechbrain-logo.svg - :width: 400 - :align: center +========== +User guide +========== SpeechBrain is an open-source and all-in-one speech toolkit based on PyTorch. -This documentation is intended to give SpeechBrain users all the API -information necessary to develop their projects. For tutorials, -please refer to the official `Github `_ -or the official `Website ` +This documentation provides install steps, tutorials and API documentation +necessary to help users develop their projects. +.. dropdown:: License considerations (Apache 2.0) -License --------- + SpeechBrain is released under the `Apache License, version 2.0 `_. The Apache license is a popular BSD-like license. + SpeechBrain can be redistributed for free, even for commercial purposes, although you can not take off the license headers (and under some circumstances you may have to distribute a license document). + Apache is not a viral license like the GPL, which forces you to release your modifications to the source code. Also note that this project has no connection to the Apache Foundation, other than that we use the same license terms. -SpeechBrain is released under the Apache license, version 2.0. The Apache license is a popular BSD-like license. -SpeechBrain can be redistributed for free, even for commercial purposes, although you can not take off the license headers (and under some circumstances you may have to distribute a license document). -Apache is not a viral license like the GPL, which forces you to release your modifications to the source code. Also note that this project has no connection to the Apache Foundation, other than that we use the same license terms. + It is a community project, which means that discussions are engaged community-wide while decisions are taken by Dr. Ravanelli and Dr. Parcollet with respect to the community views. + There is no legal institution associated as an owner of SpeechBrain. Furthermore, and due to the Apache License, anyone that would disagree with the way the project is being run can fork it and start a new toolkit. -It is a community project, which means that discussions are engaged community-wide while decisions are taken by Dr. Ravanelli and Dr. Parcollet with respect to the community views. -There is no legal institution associated as an owner of SpeechBrain. Furthermore, and due to the Apache Licence, anyone that would disagree with the way the project is being run can fork it and start a new toolkit. +.. dropdown:: Referencing SpeechBrain (BibTeX) -Referencing SpeechBrain --------- -.. code-block:: txt + If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry: - @misc{speechbrain, - title={SpeechBrain: A General-Purpose Speech Toolkit}, - author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, - year={2021}, - eprint={2106.04624}, - archivePrefix={arXiv}, - primaryClass={eess.AS} - } + .. code-block:: bibtex + @misc{speechbrainV1, + title={Open-Source Conversational AI with {SpeechBrain} 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, + } + @misc{speechbrain, + title={SpeechBrain: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS} + } + + +We provide complete **Jupyter Notebook tutorials below** for beginners and +advanced users alike! You can view them in documentation, run them in Google +Colab or run them locally with +`Jupyter Notebook `_. .. toctree:: :maxdepth: 1 - :caption: Getting started: + :caption: Getting started installation.md experiment.md + guidance.md + Contributing + + +.. toctree:: + :maxdepth: 1 + :caption: Tutorial notebooks + + tutorials/basics.rst + tutorials/advanced.rst + tutorials/preprocessing.rst + tutorials/tasks.rst + tutorials/nn.rst + + +.. toctree:: + :maxdepth: 1 + :caption: Tips & tricks + + audioloading.rst multigpu.md - tutorials.md - contributing.md -API Documentation --------- + +API +--- .. toctree:: - :caption: API Documentation: + :caption: API :hidden: :maxdepth: 3 @@ -63,11 +94,16 @@ API Documentation speechbrain speechbrain.alignment + speechbrain.augment speechbrain.dataio speechbrain.decoders + speechbrain.inference + speechbrain.integrations speechbrain.lm speechbrain.lobes speechbrain.nnet speechbrain.processing speechbrain.tokenizers speechbrain.utils + + hyperpyyaml.core diff --git a/docs/installation.md b/docs/installation.md index 13295dfa15..5b428d29db 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,11 +1,12 @@ # Quick installation -SpeechBrain is constantly evolving. New features, tutorials, and documentation will appear over time. SpeechBrain can be installed via PyPI to rapidly use the standard library. Moreover, a local installation can be used to run experiments and modify/customize the toolkit. +SpeechBrain is constantly evolving. New features, tutorials, and documentation will appear over time. SpeechBrain can be installed via PyPI to rapidly use the standard library. Moreover, a local installation can be used to run experiments and modify/customize the toolkit and its recipes. -SpeechBrain supports both CPU and GPU computations. For most recipes, however, a GPU is necessary during training. Please note that CUDA must be properly installed to use GPUs. +SpeechBrain supports both CPU and GPU computation. For most recipes, however, a GPU is necessary during training. Please note that CUDA must be properly installed to use GPUs. -We support pytorch >= 1.7 (https://pytorch.org/) and Python >= 3.7. +We support [PyTorch](https://pytorch.org/get-started/locally/) 1.9+ and Python 3.8.1-3.12 (newer Python versions than advertised may work if supported by PyTorch). +We recommend you upgrade to at least 3.9+ as support for 3.8 will be removed eventually. ## Install via PyPI @@ -15,6 +16,8 @@ Once you have created your Python environment (see instructions below) you can s pip install speechbrain ``` +Depending on your OS, audio loading may require the install of optional torchaudio dependencies to work. If it does not work out-of-the box for you, please visit [audio troubleshooting](audioloading.html). + Then you can then access SpeechBrain with: ``` @@ -54,25 +57,37 @@ You can run doctests with: tests/.run-doctests.sh ``` +## Recipe installation +If you're trying to run a specific recipe, first follow the "Install locally" instructions above. + +Next, install any recipe-specific dependencies (if any) by executing: + +```bash +cd recipes/// +pip install -r extra_requirements.txt +``` + +Any recipe-specific instructions should be clearly spelled out in the associated `README.md`. + ## Operating Systems SpeechBrain supports Linux-based distributions and macOS. A solution for windows users can be found in this [GitHub issue](https://github.com/speechbrain/speechbrain/issues/512). -## Anaconda and venv +## Setting up a Conda environment/virtualenv A good practice is to have different python environments for your different tools and toolkits, so they do not interfere with each other. This can be done either with [Anaconda](https://www.anaconda.com/products/distribution) or [venv](https://docs.python.org/3.8/library/venv.html). -Anaconda can be installed by simply following [this tutorial](https://docs.anaconda.com/anaconda/install/linux/). In practice, it is a matter of downloading the installation script and executing it. +Anaconda can be installed by simply following [this tutorial](https://docs.anaconda.com/free/anaconda/install/linux/). In practice, it is a matter of downloading the installation script and executing it. -## Anaconda setup +### Conda -Once Anaconda is installed, you can create a new environment with: +Once Conda is installed, you can create a new environment with: ``` -conda create --name speechbrain python=3.9 +conda create --name speechbrain python=3.11 ``` Then, activate it with: diff --git a/docs/multigpu.md b/docs/multigpu.md index 590f295f8e..dbcd77663e 100644 --- a/docs/multigpu.md +++ b/docs/multigpu.md @@ -1,65 +1,95 @@ # Basics of multi-GPU -SpeechBrain provides two different ways of using multiple gpus while training or inferring. For further information, please see our multi-gpu tutorial: amazing multi-gpu tutorial +Training speed can greatly benefit from being distributed across multiple GPUs. However, even on a single machine, this is **NOT** the default. To enable multi-GPU training, we strongly recommend you use **Distributed Data Parallel** (DDP). -## Multi-GPU training using Data Parallel -The common pattern for using multi-GPU training over a single machine with Data Parallel is: +## Multi-GPU training using Distributed Data Parallel (DDP) -``` -> cd recipes/// -> python experiment.py params.yaml --data_parallel_backend -``` -If you want to use a specific set of GPU devices, condiser using `CUDA_VISIBLE_DEVICES` as follow: -``` -> cd recipes/// -> CUDA_VISIBLE_DEVICES=1,5 python experiment.py params.yaml --data_parallel_backend -``` +DDP implements data parallelism by spawning **one process per GPU**. DDP allows you to distribute work across GPUs **on the same machine _or_ across several machines on a network** if wanted. -Important: the batch size for each GPU process will be: `batch_size / Number of GPUs`. So you should consider changing the batch_size value according to you need. +When using CUDA (which we will assume in this document), PyTorch uses [NCCL](https://developer.nvidia.com/nccl) behind the scenes to synchronize everything. PyTorch documentation [further details](https://pytorch.org/docs/stable/distributed.html) distributed backends. -## Multi-GPU training using Distributed Data Parallel (DDP) +### Writing DDP-safe code in SpeechBrain -DDP implements data parallelism on different processes. This way, the GPUs do not necessarily have to be in the same server. This solution is much more flexible. However, the training routines must be written considering multi-threading. +DDP requires your training routines to be written to be DDP-safe, because your script will be run several times concurrently (potentially across multiple machines). Stock SpeechBrain recipes will work with DDP. We also provide functionality to assist with writing DDP-safe scripts. -With SpeechBrain, we put several efforts to make sure the code is compliant with DDP. For instance, to avoid conflicts across processes we develop the `run_on_main` function. It is called when critical operations such as writing a file on disk are performed. It ensures that these operations are run in a single process only. The other processes are waiting until this operation is completed. +`run_on_main` ensures that a specific function is executed only once, in only one process, forcing other processes to wait. It is frequently used to run a dataset preparation step in recipes. -Using DDP in speechbrain with a single server (node) is quite easy: +Many functions like `Brain.fit` are written to be DDP-aware. In practice, there is not a lot you need to do to make your code DDP-safe, but it is something you should keep in mind. -``` +> **NOTE:** +> With DDP, batch size is defined for a single process/GPU. This is different from Data Parallel (DP), where batches are split according to the number of GPUs. For example, with DDP, if you specify a batch size of 16, each GPU/process will use batches of 16 regardless of how many GPUs you have. + +### Single-node setup + +_This covers the case where you want to split training across **multiple GPUs** on **a single machine** (node)._ + +Using SpeechBrain, this would look like: + +```bash cd recipes/// -python -m torch.distributed.launch --nproc_per_node=4 experiment.py hyperparams.yaml --distributed_launch --distributed_backend='nccl' +torchrun --standalone --nproc_per_node=4 experiment.py hyperparams.yaml ``` -Where: -- nproc_per_node must be equal to the number of GPUs. -- distributed_backend is the type of backend managing multiple processes synchronizations (e.g, 'nccl', 'gloo'). Try to switch the DDP backend if you have issues with nccl. +... where `nproc_per_node` is the the number of processes to spawn/GPUs to use. + +### Multi-node setup + +_This covers the case where you want to split training across **multiple machines** on a network, with any amount of GPUs per machine._ + +Note that using DDP across multiple machines introduces a **communication overhead** that might slow down training significantly, sometimes more than if you were to train on a single node! This largely depends on the network speed between the nodes. +Make sure you are actually observing any benefits from distributing the work across machines. + +While DDP is more efficient than `DataParallel`, it is somewhat prone to exhibit unexpected bugs. DDP is quite server-dependent, so some setups may face issues. If you are encountering problems, make sure PyTorch is well up to date. + +#### Basics & manual multi-node setup + +Let's start with a simple example where a user is able to connect to each node directly. Consider that we have 2 nodes with 2 GPUs each (for a total of 4 GPUs). + +We use `torchrun` once on each machine, with the following parameters: + +- `--nproc_per_node=2` means we will spawn 2 processes per node, which equates to 2 GPUs per nodes. +- `--nnodes=2` means we will be using two nodes in total. +- `--node_rank=0` and `--node_rank=1` refer to the rank/"index" we are attributing to the node/machine. +- `--master_addr`/`--master_port` define the IP address and the port of the "master" machine. In this case, we're arbitrarily choosing the first machine to be the "master" of everyone else (the 2nd machine in our case). Note that `5555` might be taken by a different process if you are unlucky or if you would run multiple different training scripts on that node, so you may need to choose a different free port. -Running DDP over multiple servers (nodes) is quite system dependent. Let's start with a simple example where a user is able to connect to each node directly. If we want to run 2 GPUs on 2 different nodes (i.e total of 4 GPUs), we must do: +Hence, we get: -```shell +```bash # Machine 1 cd recipes/// -python -m torch.distributed.launch --nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr machine_1_adress --master_port 5555 experiment.py hyperparams.yaml --distributed_launch --distributed_backend='nccl' +torchrun --nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr machine_1_address --master_port 5555 experiment.py hyperparams.yaml +``` +```bash # Machine 2 cd recipes/// -python -m torch.distributed.launch --nproc_per_node=2 --nnodes=2 --node_rank=1 --master_addr machine_1_adress --master_port 5555 experiment.py hyperparams.yaml --distributed_launch --distributed_backend='nccl' +torchrun --nproc_per_node=2 --nnodes=2 --node_rank=1 --master_addr machine_1_address --master_port 5555 experiment.py hyperparams.yaml ``` -In this case, Machine 1 will have 2 subprocesses (subprocess1: with local_rank=0, rank=0, and subprocess2: with local_rank=1, rank=1). Machine 2 will have 2 subprocess (subprocess1: with local_rank=0, rank=2, and subprocess2: with local_rank=1, rank=3). +In this setup: + +- Machine 1 will have 2 subprocesses: + - Subprocess #1: `local_rank`=0, `rank`=0 + - Subprocess #2: `local_rank`=1, `rank`=1 +- Machine 2 will have 2 subprocess: + - Subprocess #1: `local_rank`=0, `rank`=2 + - Subprocess #2: `local_rank`=1, `rank`=3 + +In practice, using `torchrun` ensures that the right environment variables are set (`LOCAL_RANK` and `RANK`), so you don't have to bother with it. -In practice, using `torch.distributed.launch` ensures that the right environment variables are set (`local_rank` and `rank`), so you don't have to bother about it. +#### Multi-node setup with Slurm + +If you have access to a compute cluster using Slurm, you can automate this process. We will create two scripts: -Now, let's try to scale this up a bit with a resource manager like SLURM. Here, we will create two scripts: - a SBATCH script that will request the node configuration and call the second script. - a SRUN script that will call the training on each node. -```shell -## sbatch.sh +`sbatch.sh`: +```bash #SBATCH --nodes=2 # We want two nodes (servers) #SBATCH --ntasks-per-node=1 # we will run once the next srun per node -#SBATCH --gres=gpu:4 # we want 4 GPUs per node +#SBATCH --gres=gpu:4 # we want 4 GPUs per node #cspell:ignore gres #SBATCH --job-name=SBisSOcool #SBATCH --cpus-per-task=10 # the only task will request 10 cores #SBATCH --time=20:00:00 # Everything will run for 20H. @@ -71,9 +101,9 @@ cd ${SLURM_SUBMIT_DIR} srun srun_script.sh ``` -```shell -## srun_script.sh +`srun_script.sh`: +```bash #!/bin/bash # We jump into the submission dir @@ -86,10 +116,37 @@ conda activate super_cool_sb_env LISTNODES=`scontrol show hostname $SLURM_JOB_NODELIST` MASTER=`echo $LISTNODES | cut -d" " -f1` -# here --nproc_per_node=4 because we want torch.distributed to spawn 4 processes (4 GPUs). Then we give the total amount of nodes requested (--nnodes) and then --node_rank that is necessary to dissociate the node that we are calling this from. -python -m torch.distributed.launch --nproc_per_node=4 --nnodes=${SLURM_JOB_NUM_NODES} --node_rank=${SLURM_NODEID} --master_addr=${MASTER} --master_port=5555 train.py hparams/myrecipe.yaml +# here --nproc_per_node=4 because we want torchrun to spawn 4 processes (4 GPUs). Then we give the total amount of nodes requested (--nnodes) and then --node_rank that is necessary to dissociate the node that we are calling this from. +torchrun --nproc_per_node=4 --nnodes=${SLURM_JOB_NUM_NODES} --node_rank=${SLURM_NODEID} --master_addr=${MASTER} --master_port=5555 train.py hparams/myrecipe.yaml ``` -Note that using DDP on different machines introduces a **communication overhead** that might slow down training (depending on how fast is the connection across the different machines). +#### Multi-node with separate filesystems + +In addition to our `run_on_main` function, we have a parallel function `run_once_per_node` that runs on `LOCAL_RANK=0`, for setups where different nodes do not have access to the same filesystem so that checkpointing can be saved on all of the separate filesystems. + +To apply this to checkpointing, we provide the convenience function: + +`speechbrain.utils.checkpoints.convert_torch_save_hooks_to_once_per_node()` + +If you call this, the saves should happen once on every node rather than only on a single process. + + +## (DEPRECATED) Single-node multi-GPU training using Data Parallel + +[**We strongly recommend AGAINST using `DataParallel`, even for single-node setups**](https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html)! Use `DistributedDataParallel` instead. We no longer provide support for `DataParallel`. Future PyTorch versions may even remove `DataParallel` altogether. -We would like to advise our users that despite being more efficient, DDP is also more prone to exhibit unexpected bugs. Indeed, DDP is quite server-dependent and some setups might generate errors with the PyTorch implementation of DDP. The future version of pytorch will improve the stability of DDP. +The common pattern for using multi-GPU training over a single machine with Data Parallel is: + +```bash +cd recipes/// +python experiment.py params.yaml --data_parallel_backend +``` + +If you want to use a specific set of GPU devices, consider using `CUDA_VISIBLE_DEVICES` as follow: + +```bash +cd recipes/// +CUDA_VISIBLE_DEVICES=1,5 python experiment.py params.yaml --data_parallel_backend +``` + +Important: the batch size for each GPU process will be: `batch_size / Number of GPUs`. So you should consider changing the batch_size value according to you need. diff --git a/docs/newversion.md b/docs/newversion.md new file mode 100644 index 0000000000..4cdc8b1c7f --- /dev/null +++ b/docs/newversion.md @@ -0,0 +1,43 @@ +# Releasing a new version + +Here are a few guidelines for when and how to release a new version. +To begin with, as hinted in the "Continuous Integration" document, we would like to follow a +pretty tight release schedule, known as "Continuous Deployment". For us, this means a new +version should be released roughly once a week. + +As for how to name the released version, we try to follow semantic versioning for this. More details +can be found at [semver.org](http://semver.org). As it applies to SpeechBrain, some examples +of what this would likely mean: + * Changes to the Brain class or other core elements often warrant a major version bump (e.g. 1.5.3 -> 2.0.0) + * Added classes or features warrant a minor version bump. Most weekly updates should fall into this. + * Patch version bumps should happen only for bug fixes. + +**[Final pre-release tests](../tests/PRE-RELEASE-TESTS.md) should be performed!** Some of these checks aren't run by the CI. + +When releasing a new version, there are a few user-initiated action that need to occur. + 1. On the `develop` branch, update `speechbrain/version.txt` to say the new version: + X.Y.Z + 2. Merge the `develop` branch into the `main` branch: + git checkout main + git merge develop + 3. Push the `main` branch to github: + git push + 4. Tag the `main` branch with the new version: + git tag vX.Y.Z + 5. Push the new tag to github: + git push --tags + +This kicks off an automatic action that creates a draft release with release notes. +Review the notes to make sure they make sense and remove commits that aren't important. +You can then publish the release to make it public. +Publishing a new release kicks off a series of automatic tools, listed below: + + * The `main` branch is checked out and used for building a python package. + * The built package is uploaded to PyPI and the release is published there. + * Read the Docs uses Webhooks to get notified when a new version is published. + Read the Docs then builds the documentation and publishes the new version. + +Maintainers of relevant accounts: + * Mirco Ravanelli maintains the GitHub and PyPI accounts + * Titouan Parcollet maintains the website at [speechbrain.github.io](speechbrain.github.io) + as well as accounts at Read the Docs diff --git a/docs/readthedocs-requirements.txt b/docs/readthedocs-requirements.txt new file mode 100644 index 0000000000..3a950abe70 --- /dev/null +++ b/docs/readthedocs-requirements.txt @@ -0,0 +1,7 @@ +# readthedocs only lets us define a single requirements file in the yaml +# this file merges both the usual and the docs requirements so that everything +# gets installed correctly. + +-r ../requirements.txt +-r docs-requirements.txt +torch==2.9.0 diff --git a/docs/tutorials.md b/docs/tutorials.md deleted file mode 100644 index 7f8a961ff1..0000000000 --- a/docs/tutorials.md +++ /dev/null @@ -1,5 +0,0 @@ -# Tutorials - -A good way to familiarize yourself with SpeechBrain is to take a look at the Colab tutorials that we made available. More tutorials will be made available as the project will progress. - -The full list of tutorials can be found on the official [website](https://speechbrain.github.io). All the tutorials are developed on the [Google Colab platform](https://colab.research.google.com). This allows users to directly try SpeechBrain on GPUs without the need to set up an environment. diff --git a/docs/tutorials/advanced.rst b/docs/tutorials/advanced.rst new file mode 100644 index 0000000000..8c400c6044 --- /dev/null +++ b/docs/tutorials/advanced.rst @@ -0,0 +1,177 @@ +SpeechBrain Advanced +==================== + +.. + Originally generated with https://gist.github.com/asumagic/19f9809480b62bfd16094fb5c844a564 but OK to edit in repo now. + Please ensure for each tutorial that you are adding it to the hidden toctree at the end of the file! + +.. toctree:: + :hidden: + + advanced/profiling-and-benchmark.ipynb + advanced/dynamic-batching.ipynb + advanced/hyperparameter-optimization.ipynb + advanced/federated-speech-model-training-via-speechbrain-and-flower.ipynb + advanced/inferring-on-your-own-speechbrain-models.ipynb + advanced/pre-trained-models-and-fine-tuning-with-huggingface.ipynb + advanced/data-loading-for-big-datasets-and-shared-filesystems.ipynb + advanced/text-tokenizer.ipynb + advanced/model-quantization.ipynb + + +.. rubric:: `🔗 Performance Profiling `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Nautsch A. + - June. 2022 + - Difficulty: medium + - Time: 45min + - `🔗 Google Colab `__ + + +Profiling and benchmark of SpeechBrain models can serve different purposes and look at different angles. Performance requirements are highly particular to the use case with that one desires to use SpeechBrain. This provides means to comprehensive self-learning as a starting point to individual growth beyond the provided. + +.. rubric:: `🔗 Dynamic Batching: What is it and why it is necessary sometimes `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Nautsch A. and Cornell S. + - Nov. 2021 + - Difficulty: medium + - Time: 25min + - `🔗 Google Colab `__ + + +Do you want to speed up training or make it less memory-demanding? One possible solution could be dynamic batching. With this approach, you can dynamically sample batches composed of a variable number of sentences. In this tutorial, we show how to use this technique within SpeechBrain. + +.. rubric:: `🔗 Hyperparameter Optimization `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ploujnikov A. + - Dec. 2021 + - Difficulty: medium + - Time: 25min + - `🔗 Google Colab `__ + + +Do you want to optimize the hyperparameters of your model? Are you tired of doing it by hand? This tutorial will describe how you can optimize the hyperparameter of your SpeechBrain model using the Orion toolkit. + +.. rubric:: `🔗 Federated Speech Model Training via SpeechBrain and Flower `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Gao Y. & Parcollet T. + - Nov. 2021 + - Difficulty: high + - Time: 45min + - `🔗 Google Colab `__ + + +Are you interested in both federated learning (FL) and speech, but worried about the proper tools to run experiments? Today you will get the answer. +This tutorial introduces how to integrate Flower and SpeechBrain to achieve federated speech model training. + +.. rubric:: `🔗 Inferring on your trained SpeechBrain model `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Parcollet T. + - Sept.. 2021 + - Difficulty: medium + - Time: 30min + - `🔗 Google Colab `__ + + +In this tutorial, we will learn the three different ways of inferring on a trained model. +This is particularly useful to debug your pipeline or to deploy a model in a production context. + +.. rubric:: `🔗 Pre-trained Models and Fine-Tuning with HuggingFace `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Cornell S. & Parcollet T. + - Mar. 2021 + - Difficulty: medium + - Time: 30min + - `🔗 Google Colab `__ + + +Training DNN models is often very time-consuming and expensive. +For this reason, whenever it is possible, using off-the-shelf pretrained +models can be convenient in various scenarios. +We provide a simple and straightforward way to download and instantiate a +state-of-the-art pretrained-model from HuggingFace HuggingFace HuggingFace and use it either for direct inference or +or fine-tuning/knowledge distillation or whatever new fancy technique you can come up with! + +.. rubric:: `🔗 Data Loading for Big Datasets and Shared Filesystems `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Rouhe A. + - Feb. 2021 + - Difficulty: medium + - Time: 15min + - `🔗 Google Colab `__ + + +Do you have a huge dataset stored in a shared file system? This tutorial will show you how to load large datasets from the shared file system and use them for training a neural network with SpeechBrain. +In particular, we describe a solution based on the WebDataset library, that is easy to integrate within the SpeechBrain toolkit. + +.. rubric:: `🔗 Text Tokenization `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Heba A. & Parcollet T. + - Feb. 2021 + - Difficulty: easy + - Time: 20min + - `🔗 Google Colab `__ + + +Machine Learning tasks that process text may contain thousands of vocabulary +words which leads to models dealing with huge embeddings as input/output +(e.g. for one-hot-vectors and ndim=vocabulary_size). This causes an important consumption of memory, +complexe computations, and more importantly, sub-optimal learning due to extremely sparse and cumbersome +one-hot vectors. In this tutorial, we provide all the basics needed to correctly use the SpeechBrain Tokenizer relying +on SentencePiece (BPE and unigram). + +.. rubric:: `🔗 Applying Quantization to a Speech Recognition Model `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Lam J. + - Apr. 2024 + - Difficulty: medium + - Time: 30min + - `🔗 Google Colab `__ + + +Quantization is a necessary step for many deep neural networks, particularly for tasks requiring low latency and efficient memory usage like real-time automatic speech recognition. This tutorial will introduce the problem of quantization and explain how to perform quantization using SpeechBrain. diff --git a/docs/tutorials/advanced/data-loading-for-big-datasets-and-shared-filesystems.ipynb b/docs/tutorials/advanced/data-loading-for-big-datasets-and-shared-filesystems.ipynb new file mode 100644 index 0000000000..32f6fe3e26 --- /dev/null +++ b/docs/tutorials/advanced/data-loading-for-big-datasets-and-shared-filesystems.ipynb @@ -0,0 +1,526 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/data-loading-for-big-datasets-and-shared-filesystems.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/data-loading-for-big-datasets-and-shared-filesystems.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "geevgOW2-M9Y" + }, + "source": [ + "# Data Loading for Big Datasets and Shared Filesystems\n", + "\n", + "Do you have a large dataset stored in a shared filesystem, and you want to use it for training a neural network? Is this dataset so large that it doesn't even fit into the local SSD of your computation nodes? If so, this tutorial will walk you through all the needed steps to manage reading large files from a shared filesystem.\n", + "\n", + "In many compute clusters, the main data storage is a network filesystem (NFS), for example [Lustre](https://en.wikipedia.org/wiki/Lustre_(file_system)). The NFS can serve many users concurrently and provide high data throughput from a single file. However, opening or listing many different files is slow - and doing so may slow the whole system down for everyone, not just the offending user. Speech datasets usually consist of very many small recordings. Reading every file again and again is exactly the kind of data IO that can slow down an NFS.\n", + "\n", + "One solution is to copy the dataset into the **local SSD** of the computing node. This can be done relatively efficiently by compressing the dataset into a single file (e.g. `dataset.tar.gz`), copying it into the local node, and finally, uncompressing (untarring) the file. Reading files from the local SSD is very efficient and does not harm the performance of the shared filesystem.\n", + "The standard SpeechBrain data IO works well in this case, see [this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html).\n", + "However, there might be huge datasets that exceed the size of your local SSD. \n", + "\n", + "A possible workaround is to keep the data in the shared filesystem and bundle the small recordings into larger archives, which are usually called **shards**. Loading data off shards avoids opening too many files, so it is fast.\n", + "\n", + "When reading data from shards, random access across the dataset is no longer possible. Data is read sequentially, from a **stream**. This requires a bit of care in preparing the experiment.\n", + "\n", + "The case for sharded IO laid out above is typical in an academic compute-cluster setup. Streaming data IO can also be used in even larger scales with dedicated data servers.\n", + "\n", + "In this tutorial we will use the **WebDataset** library. Alternatives and the case for WebDataset are laid out by the WebDataset developer in [this PyTorch proposal](https://github.com/pytorch/pytorch/issues/38419).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Xgyxbji2bivF" + }, + "source": [ + "\n", + "## What is WebDataset?\n", + "\n", + "[WebDataset](https://github.com/webdataset/webdataset) is a sharded (streaming) data IO library that works well with PyTorch. WebDataset uses standard TAR archives as the shard format, with the simple convention that all consequtive files that have the same basename belong to the same example. So listing `data-archive/shard-0000.tar`s contents can look like:\n", + "\n", + "```\n", + "> tar -t data-archives/shard-0000.tar\n", + "spk1-utt1.wav\n", + "spk1-utt1.txt\n", + "spk1-utt1.json\n", + "spk1-utt2.wav\n", + "spk1-utt2.txt\n", + "spk1-utt2.json\n", + "spk2-utt1wav\n", + "spk2-utt1.txt\n", + "spk2-utt1.json\n", + "...\n", + "```\n", + "\n", + "On the Python side, the dataset interface is an [IterableDataset](https://pytorch.org/docs/stable/data.html#iterable-style-datasets), and it has a set of methods which can be chained to build a data pipeline, like:\n", + "\n", + "```python\n", + "import webdataset as wds # Note the typical import shorthand\n", + "dataset = (\n", + " wds.WebDataset(\"data-archives/shard-00{00...24}.tar\") # 25 shards\n", + " .decode() # Automagically decode files\n", + " .shuffle(size=1000) # Shuffle on-the-fly in a buffer\n", + " .batch(batchsize=10) # Create batches\n", + ")\n", + "```\n", + "\n", + "Note that WebDataset is (at least at the time of writing) a fast moving library. It is also being considered for inclusion in the PyTorch core. Again, read more [here](https://github.com/pytorch/pytorch/issues/38419) and [here](https://pytorch.org/blog/efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus/).\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6RBQlaRvTSN4" + }, + "source": [ + "## Installing dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jrilWKHNVUiK" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DPX-4BBbX5L9" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install \"webdataset<0.2\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "iaKvk0eKZ_iv" + }, + "outputs": [], + "source": [ + "import speechbrain as sb\n", + "import webdataset as wds\n", + "import torch\n", + "import glob\n", + "import pathlib\n", + "import random" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "G2FZpMS9xprl" + }, + "source": [ + "## Creating TAR shards\n", + "\n", + "The data-prep process in WebDataset is to iterate over each example in the dataset and split them into TAR shards. TAR files are a standard format, so you could create them with any standard tools. WebDataset has a couple of helpers, which can make this process a bit easier.\n", + "\n", + "- Tarp, a Go-based tool which can split a TAR stream into shards, and do a couple of other streaming processing tasks. See [the GitHub page](https://github.com/webdataset/tarp). This is a separate tool, and requires a separate installation, but Go could theoretically be faster than Python.\n", + "- `wds.ShardWriter`, a Python class that can write WebDataset style `dict`s into TAR archives, splitting into multiple shards of a given size. This is the approach we will take here.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "guxT9hrC9-tX" + }, + "source": [ + "### Download some data\n", + "\n", + "In this tutorial we will work with the development set of Mini Librispeech (but we will treat it like any normal training data)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9L2__cR89yn7" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.openslr.org/resources/31/dev-clean-2.tar.gz\n", + "!tar -xvzf dev-clean-2.tar.gz\n", + "!rm dev-clean-2.tar.gz" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-MjocQqL-IOe" + }, + "source": [ + "### Iterate over the data\n", + "\n", + "This step will of course vary dataset to dataset. In Mini Librispeech, the data is organized by speaker and by document. We will first read all transcripts in, then shuffle those, so that consecutive examples are not from the speaker and document.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Gxq6Al2hCu0n" + }, + "outputs": [], + "source": [ + "DATAROOT = pathlib.Path(\"LibriSpeech/dev-clean-2\")\n", + "SHARDSDIR = pathlib.Path(\"DATA-SHARDS\")\n", + "SHARDSDIR.mkdir(exist_ok=True, parents=True)\n", + "\n", + "# 1. Gather texts\n", + "# Note that here uttid encodes speaker and document IDs, so we don't need to\n", + "# keep track of them separately\n", + "texts = {}\n", + "for textf in DATAROOT.glob(\"*/*/*.trans.txt\"):\n", + " with open(textf) as fi:\n", + " for line in fi:\n", + " uttid, text = line.split(\" \", maxsplit=1)\n", + " texts[uttid] = text\n", + " print(uttid, text)\n", + "\n", + "# 2. Shuffle uttids\n", + "uttids = list(texts.keys())\n", + "random.shuffle(uttids)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "40ntrNKFSnnY" + }, + "outputs": [], + "source": [ + "print(uttids)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Vqfo-MOVE9TZ" + }, + "outputs": [], + "source": [ + "# 3. Create TARs\n", + "# In this example, we are only storing 100 examples / shard, because the full\n", + "# development set could probably fit in a normal shard. In practical setups\n", + "# use bigger values.\n", + "# maxcount sets the max number of examples, and maxsize\n", + "# sets the maximum size in bytes.\n", + "\n", + "# 3A. Iterate over the shuffled uttids\n", + "# 3B. For each uttid, create an example dict\n", + "# The example dict is written into a TAR stream. The special __key__\n", + "# entry becomes the basename for this example's files, and the other\n", + "# entries in the dict become files with different extensions.\n", + "# E.G. with uttid \"3536-23268-0007\" this will write the files:\n", + "# 3536-23268-0007.audio.pth, 3536-23268-0007.text\n", + "# There are default handlers for many extensions\n", + "# See https://github.com/webdataset/webdataset/blob/6ee2279795b3f667bb7a5868af596990cc6efee3/webdataset/writer.py#L97\n", + "\n", + "with wds.ShardWriter(f\"{SHARDSDIR}/shard-%06d.tar\", maxcount = 100) as writer:\n", + " for uttid in uttids:\n", + " spk, doc, _ = uttid.split(\"-\")\n", + " audio_fpath = (DATAROOT / spk / doc / uttid).with_suffix(\".flac\")\n", + " audio_tensor = sb.dataio.dataio.read_audio(str(audio_fpath))\n", + " example = {\n", + " \"__key__\": uttid,\n", + " \"audio.pth\": audio_tensor,\n", + " \"text\": texts[uttid]\n", + " }\n", + " writer.write(example)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cln8mfljQI7A" + }, + "outputs": [], + "source": [ + "! cd DATA-SHARDS/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "s0sKuFuTMMYy" + }, + "outputs": [], + "source": [ + "# Now we can load these shards.\n", + "# This uses the SpeechBrain batch class, but batching itself is done by\n", + "# WebDataset\n", + "dataset = (\n", + " wds.WebDataset(str(SHARDSDIR)+\"/shard-0000{00..10}.tar\")\n", + " .decode()\n", + " .shuffle(100)\n", + " .batched(batchsize=10,\n", + " collation_fn=sb.dataio.batch.PaddedBatch)\n", + ")\n", + "batch = next(iter(dataset))\n", + "print(batch.text)\n", + "print(batch[\"audio.pth\"]) # Because of the audio.pth name, attribute access doesn't work\n", + "print(\"How much of batch is padding [%]:\",\n", + " sb.dataio.iterators.padding_ratio(batch[\"audio.pth\"].lengths).item()*100)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Z9SzZesQekBY" + }, + "source": [ + "## WebDataset with SpeechBrain\n", + "\n", + "SpeechBrain is compatible with any PyTorch data loading, so WebDataset can be used without any extensions (as we have done so far). However, three problems remain:\n", + "\n", + "1. Data in shards is typically not sorted (or is even deliberately shuffled). Consecutive utterances will be of very different lengths, and need a lot of padding.\n", + "2. The intra-epoch checkpointing in the SaveableDataLoader does not work with IterableDatasets.\n", + "3. Exact epochs are very difficult to acheive with Distributed Data Parallel. (This problem is not specific to WebDataset or SpeechBrain.)\n", + "\n", + "These problems are solved with the following strategies and extensions:\n", + "\n", + "1. SpeechBrain implements an on-the-fly dynamic batching and bucketing iterator. This works together with `webdataset.WebDataset`.\n", + " - Bucketing puts similar length utterances in the same batch, reducing the amount of padding.\n", + " - Dynamic batching is natural to implement at the same time as bucketing, and aims to yield batches that have similar total number of elements. Batches with short utterances have larger batch size and batches with long utterances have a smaller batch size.\n", + " - On the fly operation is needed for streaming data loading.\n", + "2. Don't care about exact epochs. Instead measure the number of updates and set a nominal epoch length (e.g. an epoch = 2500 updates).\n", + "3. Don't care about exact restarts: when an experiment is restarted, data loading will not continue from the examples where it left off, but instead just begin again from randomly assigned shards.\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ps8qIWEWRyay" + }, + "source": [ + "### Some changes in the train data loading pipeline\n", + "\n", + "- First of all, use `.rename` in the loading pipeline to get more sensibly named batch elements. This will also solve the issue (above) where `audio.pth` couldn't be accessed with typical attribute style.\n", + "- Then add a `.repeat` so that an infinite stream of data is used.\n", + "- Finally, the main change is to use `sb.dataio.iterators.dynamic_bucketed_batch` as the batching method\n", + " - A generic iterator can be used with the `.then` method\n", + " - See [the documentation](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.dataio.iterators.html) for arguments.\n", + " - Since this also involves a shuffling operation, don't use the WebDataset shuffle anymore." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wwReAbgB7uEP" + }, + "outputs": [], + "source": [ + "dataset = (\n", + " wds.WebDataset(str(SHARDSDIR)+\"/shard-0000{00..10}.tar\")\n", + " .decode()\n", + " .rename(id=\"__key__\", signal=\"audio.pth\", text=\"text\") # Mention all, even text.\n", + " .repeat()\n", + " .then(sb.dataio.iterators.dynamic_bucketed_batch,\n", + " len_key = \"signal\", # Which batch element's length to consider\n", + " sampler_kwargs={\n", + " \"target_batch_numel\":16000*45., # Add examples till they total 45 seconds\n", + " \"max_batch_numel\": 16000*60. # ... but so that they don't go over 60 seconds\n", + " }\n", + " )\n", + ")\n", + "\n", + "batch = next(iter(dataset))\n", + "print(\"Batch size:\", len(batch))\n", + "print(\"How much of batch is padding [%]:\",\n", + " sb.dataio.iterators.padding_ratio(batch.signal.lengths).item()*100)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kxjSmuoPe2BS" + }, + "source": [ + "### More complex data loading pipelines\n", + "\n", + "- You can use `.map()` to implement arbitrary processing.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "TeVMdOvmg1Rg" + }, + "outputs": [], + "source": [ + "text_mapping = {\"\": 0}\n", + "index = 1\n", + "for example in wds.WebDataset(str(SHARDSDIR)+\"/shard-0000{00..10}.tar\").decode():\n", + " for word in example[\"text\"].split():\n", + " if word not in text_mapping:\n", + " text_mapping[word] = index\n", + " index += 1\n", + "\n", + "def text_to_index(sample):\n", + " \"\"\"Adds text_vec entry, a LongTensor for text\"\"\"\n", + " sample[\"text_vec\"] = torch.LongTensor(\n", + " [text_mapping[word] for word in sample[\"text\"].split()]\n", + " )\n", + " return sample\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JHoAI8AffdAP" + }, + "outputs": [], + "source": [ + "dataset = (\n", + " wds.WebDataset(str(SHARDSDIR)+\"/shard-0000{00..10}.tar\")\n", + " .decode()\n", + " .rename(id=\"__key__\", signal=\"audio.pth\", text=\"text\")\n", + " .map(text_to_index)\n", + " .repeat()\n", + " .then(sb.dataio.iterators.dynamic_bucketed_batch,\n", + " len_key = \"signal\", # Which batch element's length to consider\n", + " sampler_kwargs={\n", + " \"target_batch_numel\":16000*45., # Add examples till they total 45 seconds\n", + " \"max_batch_numel\": 16000*60. # ... but so that they don't go over 60 seconds\n", + " }\n", + " )\n", + ")\n", + "batch = next(iter(dataset))\n", + "print(batch.text[0])\n", + "print(batch.text_vec.data[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ljh8IoKIb9LY" + }, + "source": [ + "### How to handle the DataLoader\n", + "\n", + "- Since we have a dataset that returns batches (as opposed to single examples), the DataLoader should set `batch_size=None`\n", + " - The `Brain` class (and the underlying `sb.dataio.dataloader.make_dataloader`) will set this automatically if your Dataset is from WebDataset).\n", + "- To acheive the nominal epochs, SpeechBrain has [`sb.dataio.dataloader.LoopedLoader`](https://github.com/speechbrain/speechbrain/blob/4022f5307ae23f1415e44a9c8b8b9cc5994a945b/speechbrain/dataio/dataloader.py#L258)\n", + " - The `Brain` class (and the underlying `sb.dataio.dataloader.make_dataloader`) will use this if you specify `looped_nominal_epoch` in `train_loader_kwargs` (when calling `.fit()`).\n", + " - The `Brain` class will also automatically add this to the checkpointer, so that it is saved in checkpoints (and it also works for intra-epoch checkpoints)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XbTOwpa_iFil" + }, + "outputs": [], + "source": [ + "dataloader = sb.dataio.dataloader.make_dataloader(dataset, looped_nominal_epoch=5)\n", + "for epoch in range(1,6):\n", + " print(\"Epoch\", epoch)\n", + " for ind, batch in enumerate(dataloader, start=1):\n", + " print(\"\\tBatch\", ind, \": batch size\", len(batch))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/advanced/dynamic-batching.ipynb b/docs/tutorials/advanced/dynamic-batching.ipynb new file mode 100644 index 0000000000..73fa5ec342 --- /dev/null +++ b/docs/tutorials/advanced/dynamic-batching.ipynb @@ -0,0 +1,2132 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/dynamic-batching.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/dynamic-batching.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1lMT9knfou6A" + }, + "source": [ + "# Dynamic Batching: What is it and why it is necessary sometimes\n", + "\n", + "Batching examples together is a crucial optimization that significantly accelerates training processes. This, combined with distributed training across multiple GPUs, enables the training of models with large parameter counts on extensive datasets in a matter of days instead of months.\n", + "\n", + "The conventional approach involves using a fixed batch size to group examples together. However, when each input has a different size, as is often the case in audio or natural language processing (NLP) applications, it necessitates padding each example in a batch to match the size of the largest one in that batch.\n", + "\n", + "While this is a common practice, it introduces a potential inefficiency when the lengths of examples exhibit significant variance. In scenarios like audio and NLP applications, a substantial portion of computation is performed on padded values, leading to computational waste. To address this issue, dynamic batching becomes essential, allowing for more efficient and resource-conscious processing of variable-length sequences in the context of diverse machine learning tasks.\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tILFmgtVDaJK" + }, + "source": [ + "To illustrate this point, let's look, for example, at **MiniLibriSpeech** which is a subset of LibriSpeech. Let's download this dataset and other tools from the [data-io tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html) which uses this same data." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "executionInfo": { + "elapsed": 23640, + "status": "ok", + "timestamp": 1718826449038, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "-Xb6KaE6DkvI" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# here we download the material needed for this tutorial: images and an example based on mini-librispeech\n", + "!wget https://www.dropbox.com/s/b61lo6gkpuplanq/MiniLibriSpeechTutorial.tar.gz?dl=0\n", + "!tar -xvzf MiniLibriSpeechTutorial.tar.gz?dl=0\n", + "# downloading mini_librispeech dev data\n", + "!wget https://www.openslr.org/resources/31/train-clean-5.tar.gz\n", + "!tar -xvzf train-clean-5.tar.gz" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TgEBlx-iVht4" + }, + "source": [ + "Next, we install `speechbrain`:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "executionInfo": { + "elapsed": 114162, + "status": "ok", + "timestamp": 1718826563198, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "fVHJYKO8tOic" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "n6sGSbUUEitE" + }, + "source": [ + "Now, let's look at what is the length of each audio in this dataset and how it is distributed.\n", + "\n", + "We can plot the histogram of the lengths for each audio in this dataset using `torchaudio`:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 490 + }, + "executionInfo": { + "elapsed": 25614, + "status": "ok", + "timestamp": 1718826588807, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "eTcGLwrwEtQG", + "outputId": "3322dbec-68bd-479e-b427-338295c25d2f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of audio files in MiniLibriSpeech train-clean-5: 1519\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkMAAAHHCAYAAAC88FzIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA7YklEQVR4nO3deVxV1f7/8fcBZHAARAVEEZxx1tCUKLFEcQhzyiH1qnnLDJwb1MypAeua4zXN7lXLtJuWmlpqpohWOKRZmuaUUyaaoeCQYLB/f/j1/DyCCnjgiPv1fDzO48HZe+21PwtI3q299j4WwzAMAQAAmJSTowsAAABwJMIQAAAwNcIQAAAwNcIQAAAwNcIQAAAwNcIQAAAwNcIQAAAwNcIQAAAwNcIQAAAwNcIQkAcWi0Xjxo3L07HBwcHq06fPXZ9z3LhxslgsOnv2bJ7quFW/95Pg4GA9/vjjji4jV5o1a6ZmzZoV+Hn79Omj4ODgAj8vcC8gDMG05s+fL4vFIovFom+++SbLfsMwFBgYKIvFkq9/UDdu3CiLxaJPP/00386RW7t371bnzp0VFBQkd3d3lStXTi1atNCMGTMcXZrD7d27V+PGjdPRo0cdXYrpHD161Prf7M2v//3vf44uD4WYi6MLABzN3d1dixYt0sMPP2yzPSEhQb/99pvc3NyyHPPXX3/JxSVv//ns379fTk65//+Quzlnbvr97rvv9Oijj6pChQp65pln5O/vrxMnTmjLli2aNm2aBg4caPcaCpO9e/dq/PjxatasWb7MpHz11Vd27/N+0717d7Vp08ZmW1hYmIOqwf2AMATTa9OmjZYsWaLp06fbhIJFixYpNDQ028tQ7u7ueT5fduEqJ+7mnDfLzMxUenq63N3ds/T7xhtvyMvLS9u3b5e3t7fNvjNnztitBjMwDENXrlyRh4dHjo9xdXXNx4ruDw888IB69uzp6DJwH+EyGUyve/fu+vPPP7Vu3TrrtvT0dH366ad66qmnsj3mVut3Dh06pD59+sjb21teXl7q27evLl++bHOsvdYMXXf27Fl16dJFnp6eKlWqlAYPHqwrV65kOTY2NlYLFy5UrVq15ObmpjVr1mTb7+HDh1WrVq0sQUiSfH19b9lv9erV5e7urtDQUG3atCnLsSdPntTTTz8tPz8/ubm5qVatWpo7d26WdmlpaRo7dqyqVKkiNzc3BQYG6qWXXlJaWlqWth999JEefPBBFS1aVCVLllTTpk2znVn55ptv9OCDD8rd3V2VKlXShx9+mKXN4cOHdfjw4SzbbzR//nw9+eSTkqRHH33Ueolm48aNkv7/GqW1a9eqYcOG8vDw0HvvvSdJmjdvnh577DH5+vrKzc1NNWvW1KxZs7Kc4+Y1Q9cvoy5evFhvvPGGypcvL3d3dzVv3lyHDh26bb03Wr16tSIiIlSiRAl5enqqUaNGWrRo0W2PyczM1NSpU1WrVi25u7vLz89P/fv317lz52zaff7552rbtq0CAgLk5uamypUr67XXXlNGRkaWsdWuXVt79+7Vo48+qqJFi6pcuXJ6++23czyO6y5duqT09PRcHwdkhzAE0wsODlZYWJg+/vhj67bVq1crJSVF3bp1y1VfXbp00YULFxQXF6cuXbpo/vz5Gj9+vL1LznLOK1euKC4uTm3atNH06dP17LPPZmm3YcMGDR06VF27dtW0adNueYknKChIO3bs0J49e3J0/oSEBA0ZMkQ9e/bUhAkT9Oeff6pVq1Y2x58+fVpNmjTR119/rdjYWE2bNk1VqlRRv379NHXqVGu7zMxMtWvXTpMmTVJ0dLRmzJih9u3ba8qUKeratavNecePH69evXqpSJEimjBhgsaPH6/AwEBt2LDBpt2hQ4fUuXNntWjRQu+8845KliypPn366Oeff7Zp17x5czVv3vy2Y23atKkGDRokSRo1apQWLFigBQsWqEaNGtY2+/fvV/fu3dWiRQtNmzZN9evXlyTNmjVLQUFBGjVqlN555x0FBgbq+eef18yZM3P0fZ44caKWLVumF154QSNHjtSWLVvUo0ePHB07f/58tW3bVsnJyRo5cqQmTpyo+vXrWwPxrfTv318vvviiwsPDNW3aNPXt21cLFy5UVFSUrl69atN/8eLFNWzYME2bNk2hoaEaM2aMRowYkaXPc+fOqVWrVqpXr57eeecdhYSE6OWXX9bq1atzNBbp2s++ePHicnd3V6NGjbi0iLtnACY1b948Q5Kxfft249///rdRokQJ4/Lly4ZhGMaTTz5pPProo4ZhGEZQUJDRtm1bm2MlGWPHjrW+Hzt2rCHJePrpp23adejQwShVqpTNtqCgIKN3797W9/Hx8YYkY8mSJbet91bnbNeunU27559/3pBk/PjjjzbHOjk5GT///PMd+/3qq68MZ2dnw9nZ2QgLCzNeeuklY+3atUZ6enq2x0oyvv/+e+u2Y8eOGe7u7kaHDh2s2/r162eULVvWOHv2rM3x3bp1M7y8vKzf9wULFhhOTk7G5s2bbdrNnj3bkGR8++23hmEYxsGDBw0nJyejQ4cORkZGhk3bzMxM69dBQUGGJGPTpk3WbWfOnDHc3NyM4cOH2xwXFBRkBAUFZRnjzZYsWWJIMuLj47Psu36+NWvWZNl3fYw3ioqKMipVqmSzLSIiwoiIiLC+v/77UaNGDSMtLc26fdq0aYYkY/fu3bet9/z580aJEiWMxo0bG3/99ZfNvhu/V71797YZ/+bNmw1JxsKFC22OWbNmTZbt2Y2tf//+RtGiRY0rV67YjE2S8eGHH1q3paWlGf7+/kanTp1uOw7DuPa71bJlS2PWrFnGihUrjKlTpxoVKlQwnJycjFWrVt3xeOBWmBkCdG125a+//tKqVat04cIFrVq16paXyG7nueees3n/yCOP6M8//1Rqaqq9Ss0iJibG5v31Bc5ffvmlzfaIiAjVrFnzjv21aNFCiYmJateunX788Ue9/fbbioqKUrly5bRixYos7cPCwhQaGmp9X6FCBT3xxBNau3atMjIyZBiGPvvsM0VHR8swDJ09e9b6ioqKUkpKinbu3ClJWrJkiWrUqKGQkBCbdo899pgkKT4+XpK0fPlyZWZmasyYMVkWo1ssFpv3NWvW1COPPGJ9X6ZMGVWvXl2//vqrTbujR4/a5Q6xihUrKioqKsv2G9cNpaSk6OzZs4qIiNCvv/6qlJSUO/bbt29fm/VE18d08zhutm7dOl24cEEjRozIsj7s5u/VjZYsWSIvLy+1aNHC5mcRGhqq4sWLW38WN4/twoULOnv2rB555BFdvnxZv/zyi02/xYsXt1nv4+rqqgcffPCO45Cu/W6tXbtWzz33nKKjozV48GD98MMPKlOmjIYPH37H44FbYQE1oGt/ICMjI7Vo0SJdvnxZGRkZ6ty5c677qVChgs37kiVLSrp2acDT09Mutd6satWqNu8rV64sJyenLH/YK1asmOM+GzVqpKVLlyo9PV0//vijli1bpilTpqhz587atWuXTai6+fySVK1aNV2+fFl//PGHnJycdP78ec2ZM0dz5szJ9nzXF2YfPHhQ+/btU5kyZW7b7vDhw3JycspRuLv5ZyJd+7ncvO7FXm71ff722281duxYJSYmZllHlpKSIi8vr9v2e7vfLUm6ePGiLl68aN3v7OysMmXKWNdB1a5dO1fjOHjwoFJSUrKsE7vuxsX0P//8s0aPHq0NGzZkCf43B73y5ctnCWElS5bUTz/9ZH2flJRks9/Ly+uWi9B9fHzUt29fTZw4Ub/99pvKly9/58EBNyEMAf/nqaee0jPPPKOkpCS1bt062wXEd+Ls7JztdsMw7rK6nLvV/+3n5o6m61xdXdWoUSM1atRI1apVU9++fbVkyRKNHTs2x31kZmZKknr27KnevXtn26Zu3brWtnXq1NHkyZOzbRcYGJjLERT8zyS77/Phw4fVvHlzhYSEaPLkyQoMDJSrq6u+/PJLTZkyxfo9up07jWPSpEk269OCgoLuaqYrMzNTvr6+WrhwYbb7rwfW8+fPKyIiQp6enpowYYIqV64sd3d37dy5Uy+//HKWseXk51G2bFmbffPmzbvtTQfXfy+Sk5MJQ8gTwhDwfzp06KD+/ftry5Yt+uSTTxxdTo4dPHjQZjbi0KFDyszMtPszcBo2bChJOnXqVJbz3+zAgQMqWrSo9Q9miRIllJGRocjIyNueo3Llyvrxxx/VvHnz217CqVy5sjIzM7V3717rAuWCcru6bmXlypVKS0vTihUrbGZ4brzUdLf+8Y9/2Dwr63ooq1y5siRpz549qlKlSo77q1y5sr7++muFh4ffNkhv3LhRf/75p5YuXaqmTZtatx85ciS3Q7C68c5OSapVq9Zt21+/xHarGUXgTlgzBPyf4sWLa9asWRo3bpyio6MdXU6O3Xw30vWnRLdu3TpP/cXHx2c7a3J9DVL16tVtticmJlrX/EjSiRMn9Pnnn6tly5ZydnaWs7OzOnXqpM8++yzbO9T++OMP69ddunTRyZMn9f7772dp99dff+nSpUuSpPbt28vJyUkTJkzIMvOQ1xmfnNxaL0nFihWTdG1GJKeuz4bcWFtKSormzZuXuyJvo1KlSoqMjLS+wsPDJUktW7ZUiRIlFBcXl+WRC7f7XnXp0kUZGRl67bXXsuz7+++/rePPbmzp6el699138zyWG8cRGRlpnSm68XflupMnT2ru3LmqW7dulhklIKeYGQJucKvLOAXhs88+y7LYVLpW0+0uDx05ckTt2rVTq1atlJiYqI8++khPPfWU6tWrl6c6Bg4cqMuXL6tDhw4KCQlRenq6vvvuO33yyScKDg5W3759bdrXrl1bUVFRGjRokNzc3Kx/BG+8ZDNx4kTFx8ercePGeuaZZ1SzZk0lJydr586d+vrrr5WcnCxJ6tWrlxYvXqznnntO8fHxCg8PV0ZGhn755RctXrzY+vyeKlWq6JVXXtFrr72mRx55RB07dpSbm5u2b9+ugIAAxcXF5Xrc12+rv9Olpfr168vZ2VlvvfWWUlJS5ObmZn1+0K20bNlSrq6uio6OVv/+/XXx4kW9//778vX1zTLTZm+enp6aMmWK/vnPf6pRo0Z66qmnVLJkSf3444+6fPmyPvjgg2yPi4iIUP/+/RUXF6ddu3apZcuWKlKkiA4ePKglS5Zo2rRp6ty5sx566CGVLFlSvXv31qBBg2SxWLRgwYJ8uQz50ksvWS85BgQE6OjRo3rvvfd06dIlTZs2ze7ng3kQhoB7xK0+W6lZs2a3DUOffPKJ9ZkuLi4uio2N1b/+9a881zFp0iQtWbJEX375pebMmaP09HRVqFBBzz//vEaPHp1lLVVERITCwsI0fvx4HT9+XDVr1tT8+fOt64Akyc/PT9u2bdOECRO0dOlSvfvuuypVqpRq1aqlt956y9rOyclJy5cv15QpU/Thhx9q2bJlKlq0qCpVqqTBgwerWrVq1rYTJkxQxYoVNWPGDL3yyisqWrSo6tatq169euV57Dnh7++v2bNnKy4uTv369VNGRobi4+NvG4aqV6+uTz/9VKNHj9YLL7wgf39/DRgwQGXKlNHTTz+dr/VKUr9+/eTr66uJEyfqtddeU5EiRRQSEqKhQ4fe9rjZs2crNDRU7733nkaNGiUXFxcFBwerZ8+e1pmnUqVKadWqVRo+fLhGjx6tkiVLqmfPnmrevHm2d9XdjZYtW2r27NmaOXOmzp07J29vbzVt2lSjR4/WAw88YNdzwVwsRkGu7ARwX7FYLIqJidG///1vR5cCAHnGmiEAAGBqhCEAAGBqhCEAAGBqLKAGkGcsOQRwP2BmCAAAmBphCAAAmBqXyXTtM3h+//13lShRIk+P2gcAAAXPMAxduHBBAQEBcnLK+/wOYUjS77//nqcPgAQAAI534sSJu/qQXsKQrn2IpHTtm+np6engagAAQE6kpqYqMDDQ+nc8rwhD+v+fQu3p6UkYAgCgkLnbJS4soAYAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKZGGAIAAKbm4ugCAAAwu+ARX9itr6MT29qtL7NgZggAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgaYQgAAJgan1oPAEAe2POT5uFYzAwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTIwwBAABTc2gYiouLU6NGjVSiRAn5+vqqffv22r9/v02bK1euKCYmRqVKlVLx4sXVqVMnnT592qbN8ePH1bZtWxUtWlS+vr568cUX9ffffxfkUAAAQCHl0DCUkJCgmJgYbdmyRevWrdPVq1fVsmVLXbp0ydpm6NChWrlypZYsWaKEhAT9/vvv6tixo3V/RkaG2rZtq/T0dH333Xf64IMPNH/+fI0ZM8YRQwIAAIWMxTAMw9FFXPfHH3/I19dXCQkJatq0qVJSUlSmTBktWrRInTt3liT98ssvqlGjhhITE9WkSROtXr1ajz/+uH7//Xf5+flJkmbPnq2XX35Zf/zxh1xdXe943tTUVHl5eSklJUWenp75OkYAwP0heMQXji4hW0cntnV0CQXGXn+/76k1QykpKZIkHx8fSdKOHTt09epVRUZGWtuEhISoQoUKSkxMlCQlJiaqTp061iAkSVFRUUpNTdXPP/+c7XnS0tKUmppq8wIAAOZ0z4ShzMxMDRkyROHh4apdu7YkKSkpSa6urvL29rZp6+fnp6SkJGubG4PQ9f3X92UnLi5OXl5e1ldgYKCdRwMAAAqLeyYMxcTEaM+ePfrf//6X7+caOXKkUlJSrK8TJ07k+zkBAMC9ycXRBUhSbGysVq1apU2bNql8+fLW7f7+/kpPT9f58+dtZodOnz4tf39/a5tt27bZ9Hf9brPrbW7m5uYmNzc3O48CAAAURg6dGTIMQ7GxsVq2bJk2bNigihUr2uwPDQ1VkSJFtH79euu2/fv36/jx4woLC5MkhYWFaffu3Tpz5oy1zbp16+Tp6amaNWsWzEAAAECh5dCZoZiYGC1atEiff/65SpQoYV3j4+XlJQ8PD3l5ealfv34aNmyYfHx85OnpqYEDByosLExNmjSRJLVs2VI1a9ZUr1699PbbbyspKUmjR49WTEwMsz8AAOCOHBqGZs2aJUlq1qyZzfZ58+apT58+kqQpU6bIyclJnTp1UlpamqKiovTuu+9a2zo7O2vVqlUaMGCAwsLCVKxYMfXu3VsTJkwoqGEAAIBC7J56zpCj8JwhAEBu8Zwhx7svnzMEAABQ0AhDAADA1O6JW+sBACgI9+qlLTgWM0MAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUHBqGNm3apOjoaAUEBMhisWj58uU2+/v06SOLxWLzatWqlU2b5ORk9ejRQ56envL29la/fv108eLFAhwFAAAozBwahi5duqR69epp5syZt2zTqlUrnTp1yvr6+OOPbfb36NFDP//8s9atW6dVq1Zp06ZNevbZZ/O7dAAAcJ9wceTJW7durdatW9+2jZubm/z9/bPdt2/fPq1Zs0bbt29Xw4YNJUkzZsxQmzZtNGnSJAUEBNi9ZgAAcH+559cMbdy4Ub6+vqpevboGDBigP//807ovMTFR3t7e1iAkSZGRkXJyctLWrVtv2WdaWppSU1NtXgAAwJwcOjN0J61atVLHjh1VsWJFHT58WKNGjVLr1q2VmJgoZ2dnJSUlydfX1+YYFxcX+fj4KCkp6Zb9xsXFafz48fldPgDADoJHfOHoEnCfu6fDULdu3axf16lTR3Xr1lXlypW1ceNGNW/ePM/9jhw5UsOGDbO+T01NVWBg4F3VCgAACqd7/jLZjSpVqqTSpUvr0KFDkiR/f3+dOXPGps3ff/+t5OTkW64zkq6tQ/L09LR5AQAAcypUYei3337Tn3/+qbJly0qSwsLCdP78ee3YscPaZsOGDcrMzFTjxo0dVSYAAChEHHqZ7OLFi9ZZHkk6cuSIdu3aJR8fH/n4+Gj8+PHq1KmT/P39dfjwYb300kuqUqWKoqKiJEk1atRQq1at9Mwzz2j27Nm6evWqYmNj1a1bN+4kAwAAOeLQmaHvv/9eDRo0UIMGDSRJw4YNU4MGDTRmzBg5Ozvrp59+Urt27VStWjX169dPoaGh2rx5s9zc3Kx9LFy4UCEhIWrevLnatGmjhx9+WHPmzHHUkAAAQCHj0JmhZs2ayTCMW+5fu3btHfvw8fHRokWL7FkWAAAwkUK1ZggAAMDeCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDUCEMAAMDU7joMZWRkaNeuXTp37pw96gEAAChQuQ5DQ4YM0X//+19J14JQRESEHnjgAQUGBmrjxo32rg8AACBf5ToMffrpp6pXr54kaeXKlTpy5Ih++eUXDR06VK+88ordCwQAAMhPuQ5DZ8+elb+/vyTpyy+/1JNPPqlq1arp6aef1u7du+1eIAAAQH7KdRjy8/PT3r17lZGRoTVr1qhFixaSpMuXL8vZ2dnuBQIAAOQnl9we0LdvX3Xp0kVly5aVxWJRZGSkJGnr1q0KCQmxe4EAAAD5KddhaNy4capdu7ZOnDihJ598Um5ubpIkZ2dnjRgxwu4FAgAA5KdchyFJ6ty5c5ZtvXv3vutiAAAAClqenjOUkJCg6OhoValSRVWqVFG7du20efNme9cGAACQ73Idhj766CNFRkaqaNGiGjRokAYNGiQPDw81b95cixYtyo8aAQAA8o3FMAwjNwfUqFFDzz77rIYOHWqzffLkyXr//fe1b98+uxZYEFJTU+Xl5aWUlBR5eno6uhwAwA2CR3zh6BIKlaMT2zq6hAJjr7/fuZ4Z+vXXXxUdHZ1le7t27XTkyJE8FwIAAOAIuQ5DgYGBWr9+fZbtX3/9tQIDA+1SFAAAQEHJ9d1kw4cP16BBg7Rr1y499NBDkqRvv/1W8+fP17Rp0+xeIAAAQH7KdRgaMGCA/P399c4772jx4sWSrq0j+uSTT/TEE0/YvUAAAID8lKfnDHXo0EEdOnSwdy0AAAAFLk/PGQIAALhf5GhmyMfHRwcOHFDp0qVVsmRJWSyWW7ZNTk62W3EAAAD5LUdhaMqUKSpRooQkaerUqflZDwAAQIHKURi68XPH+AwyAABwP8lRGEpNTc1xhzzBGQAAFCY5CkPe3t63XSd0o4yMjLsqCAAAoCDlKAzFx8dbvz569KhGjBihPn36KCwsTJKUmJioDz74QHFxcflTJQAAQD7JURiKiIiwfj1hwgRNnjxZ3bt3t25r166d6tSpozlz5rCmCAAAFCq5fs5QYmKiGjZsmGV7w4YNtW3bNrsUBQAAUFDy9EGt77//fpbt//nPf/igVgAAUOjk+uM4pkyZok6dOmn16tVq3LixJGnbtm06ePCgPvvsM7sXCAAAkJ9yPTPUpk0bHThwQNHR0UpOTlZycrKio6N14MABtWnTJj9qBAAAyDd5+qDWwMBAvfnmm/auBQAAoMDlOgxt2rTptvubNm2a52IAAAAKWq7DULNmzbJsu/GBjDx0EQAAFCa5XjN07tw5m9eZM2e0Zs0aNWrUSF999VV+1AgAAJBvcj0z5OXllWVbixYt5OrqqmHDhmnHjh12KQwAAKAg5Hpm6Fb8/Py0f/9+e3UHAABQIHI9M/TTTz/ZvDcMQ6dOndLEiRNVv359e9UFAABQIHIdhurXry+LxSLDMGy2N2nSRHPnzrVbYQAAAAUh12HoyJEjNu+dnJxUpkwZubu7260oAACAgpLrMBQUFJQfdQAAADhEnp5AfenSJSUkJOj48eNKT0+32Tdo0CC7FAYAAFAQch2GfvjhB7Vp00aXL1/WpUuX5OPjo7Nnz6po0aLy9fUlDAEAgEIl17fWDx06VNHR0Tp37pw8PDy0ZcsWHTt2TKGhoZo0aVJ+1AgAAJBvch2Gdu3apeHDh8vJyUnOzs5KS0tTYGCg3n77bY0aNSo/agQAAMg3uQ5DRYoUkZPTtcN8fX11/PhxSdeeTH3ixAn7VgcAAJDPcr1mqEGDBtq+fbuqVq2qiIgIjRkzRmfPntWCBQtUu3bt/KgRAAAg3+R6ZujNN99U2bJlJUlvvPGGSpYsqQEDBuiPP/7QnDlz7F4gAABAfsr1zFDDhg2tX/v6+mrNmjV2LQgAAKAg2e2DWgEAAAqjPD10EQCAOwke8YWjSwByhJkhAABgaoQhAABgaoQhAABgankKQ7GxsUpOTrZ3LQAAAAUux2Hot99+s369aNEiXbx4UZJUp04dnjwNAAAKrRzfTRYSEqJSpUopPDxcV65c0YkTJ1ShQgUdPXpUV69ezc8aAQAA8k2OZ4bOnz+vJUuWKDQ0VJmZmWrTpo2qVaumtLQ0rV27VqdPn871yTdt2qTo6GgFBATIYrFo+fLlNvsNw9CYMWNUtmxZeXh4KDIyUgcPHrRpk5ycrB49esjT01Pe3t7q16+fddYKAADgTnIchq5evaoHH3xQw4cPl4eHh3744QfNmzdPzs7Omjt3ripWrKjq1avn6uSXLl1SvXr1NHPmzGz3v/3225o+fbpmz56trVu3qlixYoqKitKVK1esbXr06KGff/5Z69at06pVq7Rp0yY9++yzuaoDAACYV44vk3l7e6t+/foKDw9Xenq6/vrrL4WHh8vFxUWffPKJypUrp+3bt+fq5K1bt1br1q2z3WcYhqZOnarRo0friSeekCR9+OGH8vPz0/Lly9WtWzft27dPa9as0fbt260fEzJjxgy1adNGkyZNUkBAQK7qAQAA5pPjMHTy5EklJibqu+++099//63Q0FA1atRI6enp2rlzp8qXL6+HH37YboUdOXJESUlJioyMtG7z8vJS48aNlZiYqG7duikxMVHe3t42n5cWGRkpJycnbd26VR06dMi277S0NKWlpVnfp6am2q1uACjMeGo0zCjHl8lKly6t6OhoxcXFqWjRotq+fbsGDhwoi8WiF154QV5eXoqIiLBbYUlJSZIkPz8/m+1+fn7WfUlJSfL19bXZ7+LiIh8fH2ub7MTFxcnLy8v6CgwMtFvdAACgcMnzQxe9vLzUpUsXFSlSRBs2bNCRI0f0/PPP27O2fDNy5EilpKRYXzwaAAAA88rTB7X+9NNPKleunCQpKChIRYoUkb+/v7p27Wq3wvz9/SVJp0+fVtmyZa3bT58+rfr161vbnDlzxua4v//+W8nJydbjs+Pm5iY3Nze71QoAAAqvPM0MBQYGysnp2qF79uzJl8tMFStWlL+/v9avX2/dlpqaqq1btyosLEySFBYWpvPnz2vHjh3WNhs2bFBmZqYaN25s95oAAMD9J08zQ/Zy8eJFHTp0yPr+yJEj2rVrl3x8fFShQgUNGTJEr7/+uqpWraqKFSvq1VdfVUBAgNq3by9JqlGjhlq1aqVnnnlGs2fP1tWrVxUbG6tu3bpxJxkAAMgRh4ah77//Xo8++qj1/bBhwyRJvXv31vz58/XSSy/p0qVLevbZZ3X+/Hk9/PDDWrNmjdzd3a3HLFy4ULGxsWrevLmcnJzUqVMnTZ8+vcDHAgAACieLYRiGo4twtNTUVHl5eSklJUWenp6OLgcAHIZb6wu/oxPbOrqEAmOvv995vpsMAADgfkAYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApkYYAgAApubi6AIAAID9BI/4wi79HJ3Y1i79FAbMDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFMjDAEAAFO7p8PQuHHjZLFYbF4hISHW/VeuXFFMTIxKlSql4sWLq1OnTjp9+rQDKwYAAIXNPR2GJKlWrVo6deqU9fXNN99Y9w0dOlQrV67UkiVLlJCQoN9//10dO3Z0YLUAAKCwcXF0AXfi4uIif3//LNtTUlL03//+V4sWLdJjjz0mSZo3b55q1KihLVu2qEmTJgVdKgAAKITu+ZmhgwcPKiAgQJUqVVKPHj10/PhxSdKOHTt09epVRUZGWtuGhISoQoUKSkxMdFS5AACgkLmnZ4YaN26s+fPnq3r16jp16pTGjx+vRx55RHv27FFSUpJcXV3l7e1tc4yfn5+SkpJu229aWprS0tKs71NTU/OjfAAAUAjc02GodevW1q/r1q2rxo0bKygoSIsXL5aHh0ee+42Li9P48ePtUSIAOFzwiC8cXQJQqN3zl8lu5O3trWrVqunQoUPy9/dXenq6zp8/b9Pm9OnT2a4xutHIkSOVkpJifZ04cSIfqwYAAPeyQhWGLl68qMOHD6ts2bIKDQ1VkSJFtH79euv+/fv36/jx4woLC7ttP25ubvL09LR5AQAAc7qnL5O98MILio6OVlBQkH7//XeNHTtWzs7O6t69u7y8vNSvXz8NGzZMPj4+8vT01MCBAxUWFsadZAAAIMfu6TD022+/qXv37vrzzz9VpkwZPfzww9qyZYvKlCkjSZoyZYqcnJzUqVMnpaWlKSoqSu+++66DqwYAAIWJxTAMw9FFOFpqaqq8vLyUkpLCJTMAhQ4LqJEfjk5s6+gS7shef78L1ZohAAAAeyMMAQAAUyMMAQAAU7unF1ADwP2KdT7AvYOZIQAAYGqEIQAAYGqEIQAAYGqEIQAAYGqEIQAAYGqEIQAAYGqEIQAAYGqEIQAAYGqEIQAAYGqEIQAAYGqEIQAAYGp8NhmA+569Pgfs6MS2dukHwL2FmSEAAGBqhCEAAGBqhCEAAGBqhCEAAGBqhCEAAGBqhCEAAGBq3FoPADlkr1v0AdxbmBkCAACmRhgCAACmxmUyk7LndD9P5QUAFGaEIQD3JNbnACgoXCYDAACmxswQ7hlcugMAOAJhCIDdcGkLQGHEZTIAAGBqhCEAAGBqhCEAAGBqrBkCCil7rc9hsTkAsyMMFSIsTgUAwP64TAYAAEyNMAQAAEyNMAQAAEyNMAQAAEyNBdS4ayzsLtz4+QEwO8IQUIAIHgBw7+EyGQAAMDVmhoA7YDYHAO5vzAwBAABTIwwBAABT4zIZ7ktc2gIA5BQzQwAAwNSYGcpnzFAAAHBvY2YIAACYGmEIAACYGmEIAACYGmEIAACYGmEIAACYGmEIAACYGmEIAACYGs8ZAgAAWdjzOXlHJ7a1W1/5gZkhAABgaoQhAABgaoQhAABgaoQhAABgaoQhAABgaoQhAABgaoQhAABgaoQhAABgaoQhAABgaoQhAABgaoQhAABgavdNGJo5c6aCg4Pl7u6uxo0ba9u2bY4uCQAAFAL3RRj65JNPNGzYMI0dO1Y7d+5UvXr1FBUVpTNnzji6NAAAcI+7L8LQ5MmT9cwzz6hv376qWbOmZs+eraJFi2ru3LmOLg0AANzjCn0YSk9P144dOxQZGWnd5uTkpMjISCUmJjqwMgAAUBi4OLqAu3X27FllZGTIz8/PZrufn59++eWXbI9JS0tTWlqa9X1KSookKTU11e71ZaZdtnufAAAUJvnx9/XGfg3DuKt+Cn0Yyou4uDiNHz8+y/bAwEAHVAMAwP3Na2r+9n/hwgV5eXnl+fhCH4ZKly4tZ2dnnT592mb76dOn5e/vn+0xI0eO1LBhw6zvMzMzlZycrFKlSsliseRbrampqQoMDNSJEyfk6emZb+e5F5hlrGYZp2SesZplnJJ5xmqWcUrmGev1cR4/flwWi0UBAQF31V+hD0Ourq4KDQ3V+vXr1b59e0nXws369esVGxub7TFubm5yc3Oz2ebt7Z3Plf5/np6e9/Uv6Y3MMlazjFMyz1jNMk7JPGM1yzgl84zVy8vLLuMs9GFIkoYNG6bevXurYcOGevDBBzV16lRdunRJffv2dXRpAADgHndfhKGuXbvqjz/+0JgxY5SUlKT69etrzZo1WRZVAwAA3Oy+CEOSFBsbe8vLYvcKNzc3jR07NssluvuRWcZqlnFK5hmrWcYpmWesZhmnZJ6x2nucFuNu70cDAAAoxAr9QxcBAADuBmEIAACYGmEIAACYGmEIAACYGmGoAMTFxalRo0YqUaKEfH191b59e+3fv9/RZeW7iRMnymKxaMiQIY4uJV+cPHlSPXv2VKlSpeTh4aE6dero+++/d3RZdpWRkaFXX31VFStWlIeHhypXrqzXXnvtrj8H6F6wadMmRUdHKyAgQBaLRcuXL7fZbxiGxowZo7Jly8rDw0ORkZE6ePCgY4q9C7cb59WrV/Xyyy+rTp06KlasmAICAvSPf/xDv//+u+MKvgt3+pne6LnnnpPFYtHUqVMLrD57yck49+3bp3bt2snLy0vFihVTo0aNdPz48YIv9i7daawXL15UbGysypcvLw8PD9WsWVOzZ8/O9XkIQwUgISFBMTEx2rJli9atW6erV6+qZcuWunTpkqNLyzfbt2/Xe++9p7p16zq6lHxx7tw5hYeHq0iRIlq9erX27t2rd955RyVLlnR0aXb11ltvadasWfr3v/+tffv26a233tLbb7+tGTNmOLq0u3bp0iXVq1dPM2fOzHb/22+/renTp2v27NnaunWrihUrpqioKF25cqWAK707txvn5cuXtXPnTr366qvauXOnli5dqv3796tdu3YOqPTu3elnet2yZcu0ZcuWu/4IB0e50zgPHz6shx9+WCEhIdq4caN++uknvfrqq3J3dy/gSu/encY6bNgwrVmzRh999JH27dunIUOGKDY2VitWrMjdiQwUuDNnzhiSjISEBEeXki8uXLhgVK1a1Vi3bp0RERFhDB482NEl2d3LL79sPPzww44uI9+1bdvWePrpp222dezY0ejRo4eDKsofkoxly5ZZ32dmZhr+/v7Gv/71L+u28+fPG25ubsbHH3/sgArt4+ZxZmfbtm2GJOPYsWMFU1Q+udVYf/vtN6NcuXLGnj17jKCgIGPKlCkFXps9ZTfOrl27Gj179nRMQfkou7HWqlXLmDBhgs22Bx54wHjllVdy1TczQw6QkpIiSfLx8XFwJfkjJiZGbdu2VWRkpKNLyTcrVqxQw4YN9eSTT8rX11cNGjTQ+++/7+iy7O6hhx7S+vXrdeDAAUnSjz/+qG+++UatW7d2cGX568iRI0pKSrL5Hfby8lLjxo2VmJjowMryX0pKiiwWS4F+XmNByczMVK9evfTiiy+qVq1aji4nX2RmZuqLL75QtWrVFBUVJV9fXzVu3Pi2lwwLs4ceekgrVqzQyZMnZRiG4uPjdeDAAbVs2TJX/RCGClhmZqaGDBmi8PBw1a5d29Hl2N3//vc/7dy5U3FxcY4uJV/9+uuvmjVrlqpWraq1a9dqwIABGjRokD744ANHl2ZXI0aMULdu3RQSEqIiRYqoQYMGGjJkiHr06OHo0vJVUlKSJGX5SB8/Pz/rvvvRlStX9PLLL6t79+735Yd8vvXWW3JxcdGgQYMcXUq+OXPmjC5evKiJEyeqVatW+uqrr9ShQwd17NhRCQkJji7P7mbMmKGaNWuqfPnycnV1VatWrTRz5kw1bdo0V/3cNx/HUVjExMRoz549+uabbxxdit2dOHFCgwcP1rp16wrltencyMzMVMOGDfXmm29Kkho0aKA9e/Zo9uzZ6t27t4Ors5/Fixdr4cKFWrRokWrVqqVdu3ZpyJAhCggIuK/GiWuLqbt06SLDMDRr1ixHl2N3O3bs0LRp07Rz505ZLBZHl5NvMjMzJUlPPPGEhg4dKkmqX7++vvvuO82ePVsRERGOLM/uZsyYoS1btmjFihUKCgrSpk2bFBMTo4CAgFxdnWBmqADFxsZq1apVio+PV/ny5R1djt3t2LFDZ86c0QMPPCAXFxe5uLgoISFB06dPl4uLizIyMhxdot2ULVtWNWvWtNlWo0aNQnm3xu28+OKL1tmhOnXqqFevXho6dOh9P/Pn7+8vSTp9+rTN9tOnT1v33U+uB6Fjx45p3bp19+Ws0ObNm3XmzBlVqFDB+u/TsWPHNHz4cAUHBzu6PLspXbq0XFxcTPHv019//aVRo0Zp8uTJio6OVt26dRUbG6uuXbtq0qRJueqLmaECYBiGBg4cqGXLlmnjxo2qWLGio0vKF82bN9fu3btttvXt21chISF6+eWX5ezs7KDK7C88PDzL4xEOHDigoKAgB1WUPy5fviwnJ9v/Z3J2drb+3+f9qmLFivL399f69etVv359SVJqaqq2bt2qAQMGOLY4O7sehA4ePKj4+HiVKlXK0SXli169emWZKYiKilKvXr3Ut29fB1Vlf66urmrUqJEp/n26evWqrl69apd/owhDBSAmJkaLFi3S559/rhIlSljXHHh5ecnDw8PB1dlPiRIlsqyDKlasmEqVKnXfrY8aOnSoHnroIb355pvq0qWLtm3bpjlz5mjOnDmOLs2uoqOj9cYbb6hChQqqVauWfvjhB02ePFlPP/20o0u7axcvXtShQ4es748cOaJdu3bJx8dHFSpU0JAhQ/T666+ratWqqlixol599VUFBASoffv2jis6D243zrJly6pz587auXOnVq1apYyMDOu/Tz4+PnJ1dXVU2Xlyp5/pzUGvSJEi8vf3V/Xq1Qu61Ltyp3G++OKL6tq1q5o2bapHH31Ua9as0cqVK7Vx40bHFZ1HdxprRESEXnzxRXl4eCgoKEgJCQn68MMPNXny5Nyd6K7uc0OOSMr2NW/ePEeXlu/u11vrDcMwVq5cadSuXdtwc3MzQkJCjDlz5ji6JLtLTU01Bg8ebFSoUMFwd3c3KlWqZLzyyitGWlqao0u7a/Hx8dn+d9m7d2/DMK7dXv/qq68afn5+hpubm9G8eXNj//79ji06D243ziNHjtzy36f4+HhHl55rd/qZ3qyw3lqfk3H+97//NapUqWK4u7sb9erVM5YvX+64gu/CncZ66tQpo0+fPkZAQIDh7u5uVK9e3XjnnXeMzMzMXJ3HYhj3waNkAQAA8ogF1AAAwNQIQwAAwNQIQwAAwNQIQwAAwNQIQwAAwNQIQwAAwNQIQwAAwNQIQwAAwNQIQwAKrfnz58vb27tAzrV//375+/vrwoULBXLuvXv3qnz58rp06VK+nQPANYQhALfVp08fWSwWWSwWFSlSRH5+fmrRooXmzp1boB/YGhwcrKlTp9ps69q1qw4cOFAg5x85cqQGDhyoEiVKFMi5a9asqSZNmuT+M5YA5BphCMAdtWrVSqdOndLRo0e1evVqPfrooxo8eLAef/xx/f3333nu1zCMuzrew8NDvr6+eT4+p44fP65Vq1apT58+BXruvn37atasWXf1PQJwZ4QhAHfk5uYmf39/lStXTg888IBGjRqlzz//XKtXr9b8+fMlSUePHpXFYtGuXbusx50/f14Wi8X6adkbN26UxWLR6tWrFRoaKjc3N33zzTc6fPiwnnjiCfn5+al48eJq1KiRvv76a2s/zZo107FjxzR06FDrLJWU/aWqWbNmqXLlynJ1dVX16tW1YMECm/0Wi0X/+c9/1KFDBxUtWlRVq1bVihUrbjv+xYsXq169eipXrpx1283nHjdunOrXr68FCxYoODhYXl5e6tatm/WyWnaOHTum6OholSxZUsWKFVOtWrX05ZdfWve3aNFCycnJSkhIuG19AO4OYQhAnjz22GOqV6+eli5dmutjR4wYoYkTJ2rfvn2qW7euLl68qDZt2mj9+vX64Ycf1KpVK0VHR+v48eOSpKVLl6p8+fKaMGGCTp06pVOnTmXb77JlyzR48GANHz5ce/bsUf/+/dW3b1/Fx8fbtBs/fry6dOmin376SW3atFGPHj2UnJx8y3o3b96shg0b3nFchw8f1vLly7Vq1SqtWrVKCQkJmjhx4i3bx8TEKC0tTZs2bdLu3bv11ltvqXjx4tb9rq6uql+/vjZv3nzHcwPIO8IQgDwLCQnR0aNHc33chAkT1KJFC1WuXFk+Pj6qV6+e+vfvr9q1a6tq1ap67bXXVLlyZeuMjY+Pj5ydnVWiRAn5+/vL398/234nTZqkPn366Pnnn1e1atU0bNgwdezYUZMmTbJp16dPH3Xv3l1VqlTRm2++qYsXL2rbtm23rPfYsWMKCAi447gyMzM1f/581a5dW4888oh69eql9evX37L98ePHFR4erjp16qhSpUp6/PHH1bRpU5s2AQEBOnbs2B3PDSDvCEMA8swwDOslq9y4eZbl4sWLeuGFF1SjRg15e3urePHi2rdvn3VmKKf27dun8PBwm23h4eHat2+fzba6detavy5WrJg8PT115syZW/b7119/yd3d/Y7nDw4Oti6wlqSyZcvett9Bgwbp9ddfV3h4uMaOHauffvopSxsPDw9dvnz5jucGkHeEIQB5tm/fPlWsWFGS5OR07Z8TwzCs+69evZrtccWKFbN5/8ILL2jZsmV68803tXnzZu3atUt16tRRenp6vtRdpEgRm/cWi+W2d8aVLl1a586ds3u///znP/Xrr7+qV69e2r17txo2bKgZM2bYtElOTlaZMmXueG4AeUcYApAnGzZs0O7du9WpUydJsv7BvnE9z42LqW/n22+/VZ8+fdShQwfVqVNH/v7+WS6/ubq6KiMj47b91KhRQ99++22WvmvWrJmjOm6lQYMG2rt37131cSuBgYF67rnntHTpUg0fPlzvv/++zf49e/aoQYMG+XJuANe4OLoAAPe+tLQ0JSUlKSMjQ6dPn9aaNWsUFxenxx9/XP/4xz8kXbuc06RJE02cOFEVK1bUmTNnNHr06Bz1X7VqVS1dulTR0dGyWCx69dVXs8yoBAcHa9OmTerWrZvc3NxUunTpLP28+OKL6tKlixo0aKDIyEitXLlSS5cutbkzLS+ioqL0z3/+UxkZGXJ2dr6rvm40ZMgQtW7dWtWqVdO5c+cUHx+vGjVqWPcfPXpUJ0+eVGRkpN3OCSArZoYA3NGaNWtUtmxZBQcHq1WrVoqPj9f06dP1+eef24SDuXPn6u+//1ZoaKiGDBmi119/PUf9T548WSVLltRDDz2k6OhoRUVF6YEHHrBpM2HCBB09elSVK1e+5WWj9u3ba9q0aZo0aZJq1aql9957T/PmzVOzZs3yPHZJat26tVxcXO46VN0sIyNDMTExqlGjhlq1aqVq1arp3Xffte7/+OOP1bJlSwUFBdn1vABsWYwbL/ADALI1c+ZMrVixQmvXri2Q86Wnp6tq1apatGhRlkXhAOyLy2QAkAP9+/fX+fPndeHCBZs7xvLL8ePHNWrUKIIQUACYGQIAAKbGmiEAAGBqhCEAAGBqhCEAAGBqhCEAAGBqhCEAAGBqhCEAAGBqhCEAAGBqhCEAAGBqhCEAAGBq/w9gUWVx4DhhfgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "import torchaudio\n", + "import numpy\n", + "import glob\n", + "import os\n", + "\n", + "# fetching all flac files in MiniLibriSpeech\n", + "all_flacs = glob.glob(os.path.join(\"/content/LibriSpeech/train-clean-5\", \"**/*.flac\"), recursive=True)\n", + "\n", + "print(\"Number of audio files in MiniLibriSpeech train-clean-5: \", len(all_flacs))\n", + "\n", + "# step-by-step\n", + "# collect durations\n", + "all_durations = numpy.zeros(len(all_flacs))\n", + "for i, audio in enumerate(all_flacs):\n", + " wav_meta = torchaudio.info(audio)\n", + " all_durations[i] = wav_meta.num_frames / wav_meta.sample_rate\n", + "\n", + "# plot histogram\n", + "_ = plt.hist(all_durations, bins='auto')\n", + "plt.title(\"MiniLibriSpeech: train-clean-5\")\n", + "plt.xlabel(\"Duration (in s)\")\n", + "plt.ylabel(\"# audios\")\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HYqdrRzYHCOf" + }, + "source": [ + "We can see that most files have a length between 14 and 16 seconds. Moreover, there is a large variance in the file length.\n", + "So if we sample randomly without any particular strategy a certain number of examples (e.g., 8), pad them, and batch them together we will end up with lots of padded values.\n", + "\n", + "This way, we will waste a significant portion of computation on padded values.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fZhST3jGLgRV" + }, + "source": [ + "We can try to effectively compute the total number of samples which belong to padding when iterating over the whole dataset with a fixed batch size.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "X0WcpKPoiDSi" + }, + "source": [ + "We follow here SpeechBrain data preparation best practices.\n", + "We parse all examples into a `.json` file so that parsing occurs only once and not at the start of each new experiment. In fact, parsing many small files can take a lot of time on networked storage or slow physical hard-drives." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "executionInfo": { + "elapsed": 18596, + "status": "ok", + "timestamp": 1718826607399, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "r0RYowCdOHp3" + }, + "outputs": [], + "source": [ + "# prepare LibriSpeech dataset using pre-made, downloaded parse_data.py script from\n", + "# the data-io tutorial available here: https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html\n", + "from parse_data import parse_to_json\n", + "parse_to_json(\"/content/LibriSpeech/train-clean-5\")\n", + "# this produced a manifest data.json file:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2j--OL9sideZ" + }, + "source": [ + "We can briefly look at each `.json` file. In particular we are interested in the `length` field which contains the length in samples for each audio in the dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 28, + "status": "ok", + "timestamp": 1718826607399, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "N0Il1pGvhLZ9", + "outputId": "efc95e9f-5e6e-40ec-8559-5c4b4e4b0357" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " },\n", + " \"4640-19188-0038\": {\n", + " \"file_path\": \"/content/LibriSpeech/train-clean-5/4640/19188/4640-19188-0038.flac\",\n", + " \"words\": \"THE FIFTH MAN WAS SAVED\",\n", + " \"spkID\": \"speaker_4640\",\n", + " \"length\": 41200\n", + " },\n", + " \"4640-19188-0005\": {\n", + " \"file_path\": \"/content/LibriSpeech/train-clean-5/4640/19188/4640-19188-0005.flac\",\n", + " \"words\": \"COME SAID HE YOU MUST HAVE A LITTLE PITY DO YOU KNOW WHAT THE QUESTION IS HERE IT IS A QUESTION OF WOMEN SEE HERE ARE THERE WOMEN OR ARE THERE NOT ARE THERE CHILDREN OR ARE THERE NOT\",\n", + " \"spkID\": \"speaker_4640\",\n", + " \"length\": 247920\n", + " },\n", + " \"4640-19188-0035\": {\n", + " \"file_path\": \"/content/LibriSpeech/train-clean-5/4640/19188/4640-19188-0035.flac\",\n", + " \"words\": \"DO YOU DESIGNATE WHO IS TO REMAIN YES SAID THE FIVE CHOOSE WE WILL OBEY YOU MARIUS DID NOT BELIEVE THAT HE WAS CAPABLE OF ANOTHER EMOTION\",\n", + " \"spkID\": \"speaker_4640\",\n", + " \"length\": 184720\n", + " }\n", + "}" + ] + } + ], + "source": [ + "!tail -n 20 data.json" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9FNKDqIeisYY" + }, + "source": [ + "We can use this `.json` manifest file to instantiate a SpeechBrain `DynamicItemDataset` object.\n", + "\n", + "If this is not clear refer to the [data-io tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html).\n", + "\n", + "We also define a `data-io pipeline` to read the audio file." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 2044, + "status": "ok", + "timestamp": 1718826609437, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "FBIoa_DQhNt2", + "outputId": "35c9632c-4f14-472f-b43a-d5358d3859a8" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'signal': tensor([ 7.9346e-04, 6.7139e-04, 4.8828e-04, ..., -2.1362e-04,\n", + " -1.2207e-04, 3.0518e-05]),\n", + " 'file_path': '/content/LibriSpeech/train-clean-5/3664/178355/3664-178355-0029.flac'}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# initializing a sb dataset object from this json\n", + "from speechbrain.dataio.dataset import DynamicItemDataset\n", + "import speechbrain\n", + "train_data = speechbrain.dataio.dataset.DynamicItemDataset.from_json(\"data.json\")\n", + "# we define a pipeline to read audio\n", + "@speechbrain.utils.data_pipeline.takes(\"file_path\")\n", + "@speechbrain.utils.data_pipeline.provides(\"signal\")\n", + "def audio_pipeline(file_path):\n", + " sig = speechbrain.dataio.dataio.read_audio(file_path)\n", + " return sig\n", + "# setting the pipeline\n", + "train_data.add_dynamic_item(audio_pipeline)\n", + "train_data.set_output_keys([\"signal\", \"file_path\"])\n", + "train_data[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4CsCgHaHjPj0" + }, + "source": [ + "Voilà, we now can start to iterate over this dataset using a torch `Dataloader`.\n", + "By using `PaddedBatch` as a `collate_fn` SpeechBrain will handle padding automatically for us. Neat!\n", + "\n", + "We can also define a simple function `count_samples` to count samples that belong to padding in each batch" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "executionInfo": { + "elapsed": 11, + "status": "ok", + "timestamp": 1718826609438, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "lypSm786W1GH" + }, + "outputs": [], + "source": [ + "import torch\n", + "import time\n", + "from torch.utils.data import DataLoader\n", + "from speechbrain.dataio.batch import PaddedBatch\n", + "\n", + "# counting tot padded values when batching the dataset with batch_size = 8\n", + "batch_size = 32\n", + "\n", + "# PaddedBatch will pad audios to the right\n", + "dataloader = DataLoader(train_data, collate_fn=PaddedBatch, batch_size=batch_size)\n", + "\n", + "def count_samples(dataloader):\n", + " true_samples = 0\n", + " padded_samples = 0\n", + " t1 = time.time()\n", + " for batch in dataloader:\n", + " audio, lens = batch.signal\n", + "\n", + " true_samples += torch.sum(audio.shape[-1]*lens).item()\n", + " padded_samples += torch.sum(audio.shape[-1]*(1-lens)).item()\n", + "\n", + " elapsed = time.time() - t1\n", + " tot_samples = true_samples + padded_samples\n", + " return true_samples / tot_samples, padded_samples / tot_samples, elapsed\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 1087, + "status": "ok", + "timestamp": 1718826610518, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "rlfkiahZa0Qx", + "outputId": "205dd4c3-7e8a-46b4-f770-09ff090e410c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PaddedData(data=tensor([[ 7.9346e-04, 6.7139e-04, 4.8828e-04, ..., 0.0000e+00,\n", + " 0.0000e+00, 0.0000e+00],\n", + " [-9.7656e-04, -4.8828e-04, -2.7466e-04, ..., 0.0000e+00,\n", + " 0.0000e+00, 0.0000e+00],\n", + " [ 1.8311e-04, 9.1553e-05, 3.0518e-04, ..., 0.0000e+00,\n", + " 0.0000e+00, 0.0000e+00],\n", + " ...,\n", + " [-4.8828e-04, -3.6621e-04, -4.8828e-04, ..., 0.0000e+00,\n", + " 0.0000e+00, 0.0000e+00],\n", + " [ 0.0000e+00, -6.1035e-05, -3.6621e-04, ..., 0.0000e+00,\n", + " 0.0000e+00, 0.0000e+00],\n", + " [-7.6294e-04, -8.8501e-04, -8.8501e-04, ..., 0.0000e+00,\n", + " 0.0000e+00, 0.0000e+00]]), lengths=tensor([0.7254, 0.9600, 0.9525, 0.9864, 0.8919, 0.9579, 0.2834, 0.9282, 0.5404,\n", + " 0.8429, 0.9552, 0.9667, 0.6845, 0.8650, 0.9164, 0.8892, 0.3215, 0.9579,\n", + " 0.7363, 0.7172, 0.8601, 0.8959, 0.8529, 0.7826, 1.0000, 0.9325, 0.9818,\n", + " 0.9679, 0.8974, 0.7914, 0.9912, 0.9319]))\n", + "PaddedData(data=tensor([[ 0.0006, 0.0003, -0.0003, ..., 0.0000, 0.0000, 0.0000],\n", + " [ 0.0005, 0.0004, 0.0005, ..., 0.0000, 0.0000, 0.0000],\n", + " [ 0.0003, 0.0004, 0.0004, ..., 0.0000, 0.0000, 0.0000],\n", + " ...,\n", + " [-0.0055, -0.0057, -0.0051, ..., 0.0000, 0.0000, 0.0000],\n", + " [ 0.0010, -0.0007, -0.0013, ..., 0.0000, 0.0000, 0.0000],\n", + " [ 0.0015, 0.0007, 0.0022, ..., 0.0000, 0.0000, 0.0000]]), lengths=tensor([0.9501, 0.9389, 0.8989, 0.9055, 0.9780, 0.7591, 0.8813, 0.7880, 1.0000,\n", + " 0.9442, 0.2604, 0.7607, 0.9253, 0.9048, 0.8974, 0.7514, 0.9895, 0.2610,\n", + " 0.8360, 0.6321, 0.5701, 0.9231, 0.9764, 0.7725, 0.3549, 0.8633, 0.7337,\n", + " 0.7446, 0.9309, 0.8590, 0.9262, 0.5115]))\n", + "PaddedData(data=tensor([[ 0.0000, 0.0003, 0.0006, ..., 0.0000, 0.0000, 0.0000],\n", + " [-0.0150, -0.0154, -0.0150, ..., 0.0000, 0.0000, 0.0000],\n", + " [-0.0012, -0.0012, -0.0022, ..., 0.0000, 0.0000, 0.0000],\n", + " ...,\n", + " [-0.0011, -0.0031, -0.0020, ..., 0.0000, 0.0000, 0.0000],\n", + " [-0.0011, 0.0016, 0.0015, ..., 0.0000, 0.0000, 0.0000],\n", + " [-0.0012, -0.0031, -0.0022, ..., 0.0000, 0.0000, 0.0000]]), lengths=tensor([0.7143, 0.9765, 0.8430, 0.9280, 0.8743, 0.9289, 0.8849, 0.6190, 0.8590,\n", + " 1.0000, 0.7652, 0.3936, 0.7022, 0.8803, 0.7474, 0.9388, 0.9602, 0.8933,\n", + " 0.9331, 0.9370, 0.8327, 0.8547, 0.7664, 0.6492, 0.7902, 0.8996, 0.8267,\n", + " 0.9524, 0.8189, 0.8502, 0.6377, 0.7077]))\n" + ] + } + ], + "source": [ + "for i, d in enumerate(dataloader):\n", + " print(d.signal)\n", + " # few example are enough to demonstrate what's going on here\n", + " if i == 2:\n", + " break" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Qp0vH1iCjwXl" + }, + "source": [ + "Let's count the samples when using a fixed batch size of 32 (as above) and the examples are sampled randomly." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 11139, + "status": "ok", + "timestamp": 1718826621655, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "uKOZybJ0jtQM", + "outputId": "78f4f270-940f-42e7-f268-1ea0b70d7bbc" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Random Sampling: % True samples 76.8, % of padding 23.2, Total time 11.06s\n" + ] + } + ], + "source": [ + "percent_true, percent_padded, elapsed = count_samples(dataloader)\n", + "print(\"Random Sampling: % True samples {:.1f}, % of padding {:.1f}, Total time {:.2f}s\".format(percent_true*100, percent_padded*100, elapsed))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Tid6zWnK2_as" + }, + "source": [ + "*We* are wasting more than 20% of computations in each training iteration on useless values which are only there to enable batched computations.\n", + "\n", + "Can we avoid such waste, speed up training, and consume less energy?\n", + "\n", + "Sure, we can simply sort the dataset according to the length of the examples in ascending or descending order and then batch the examples together.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 10682, + "status": "ok", + "timestamp": 1718826632325, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "at-_sxv8w6hs", + "outputId": "1fe00421-59ae-475b-87bd-98528e4cc1a7" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "After sorting: % True samples 98.8, % of padding 1.2, Total time 10.65\n" + ] + } + ], + "source": [ + "# if you followed the data-io tutorial you already know that sorting is super simple:\n", + "sorted_data = train_data.filtered_sorted(sort_key=\"length\")\n", + "dataloader = DataLoader(sorted_data, collate_fn=PaddedBatch, batch_size=batch_size)\n", + "percent_true, percent_padded, elapsed = count_samples(dataloader)\n", + "print(\"After sorting: % True samples {:.1f}, % of padding {:.1f}, Total time {:.2f}\".format(percent_true*100, percent_padded*100, elapsed))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pmmBM6E_5CFh" + }, + "source": [ + "That is quite a reduction. Now, we are almost not wasting any compute on padded values as we have minimized padding by taking audios with roughly the same length in each batch. Iterating over one epoch is also significantly faster.\n", + "\n", + "But this means that we must train with a sorted dataset.\n", + "In some applications, this might hurt the performance as the network sees the examples always in the same order.\n", + "\n", + "In other applications sorting the examples can instead bring better performance as it can be seen as a sort of curriculum learning. This is the case for example for our TIMIT recipes.\n", + "\n", + "Dynamic Batching allows users to trade-off between full random sampling of the examples and deterministic sampling from sorted examples." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hOxnX03A1eS6" + }, + "source": [ + "Another problem with fixed batch size is that we are under-utilizing our resources for the shortest examples.\n", + "Suppose we use a fixed batch size of 8, and our dataset is sorted in ascending order. This means we must have sufficient memory to train on the 8 longest examples. But we also train on the 8 shortest ones!\n", + "In many instances, we can afford to batch a larger number of shorter examples together and optimize the GPU usage.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iEuEnbEz75sr" + }, + "source": [ + "## SpeechBrain `DynamicBatchSampler` class" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Mecsawr--Fff" + }, + "source": [ + "SpeechBrain provides a useful abstraction to perform Dynamic Batching:\n", + "\n", + "---\n", + "\n", + "**DynamicBatchSampler**.\n", + "\n", + "In particular, with the right settings, it allows us to train large models even with 12 GB VRAM GPUs in a reasonable time. When using high-performance high VRAM GPUs, instead, it can significantly reduce training time.\n", + "\n", + "**This abstraction allows us to select a good trade-off between training speed, randomization of sampling, and VRAM usage.**\n", + "\n", + "It is up to you, depending on your application scenario and hardware, which of these characteristics should be prioritized.\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "j8M-LuNBobTk" + }, + "source": [ + "`DynamicBatchSampler` belongs to the `torch.utils.data` `Sampler` class and is a torch *Batch Sampler*:\n", + "\n", + "Being a batch Sampler, it is just a *python generator* which returns, at each call, a list containing the indexes of the examples which should be batched together by the `DataLoader` using the `collate_fn`. These indexes are used to fetch the actual examples in the `torch.utils.data.Dataset` class using the `__getitem__` method.\n", + "\n", + "Here is an example with batch_size 2. The DataLoader is responsible for taking care of parallelization of the Dataset `__getitem__` method. The indexes of the examples are provided by the Batch Sampler.\n", + "For more info, you can refer to the official [Pytorch documentation on torch.utils.data](https://pytorch.org/docs/stable/data.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TaRTseKUgz5D" + }, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "O4OQaEM5kQMD" + }, + "source": [ + "### Using `speechbrain.dataio.samplers.DynamicBatchSampler`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XFzGEwfJkZxx" + }, + "source": [ + "`DynamicBatchSampler` has several input arguments upon instantiation and provides a great deal of flexibility.\n", + "\n", + "We will practically illustrate what is the effect of some of these using MiniLibriSpeech and how each of these can change the trade-off between speed, randomization, and VRAM usage." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_tM5iEaYolWS" + }, + "source": [ + "**NOTE:** you should be highly familiar with SpeechBrain [data-io](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html) to follow this tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "executionInfo": { + "elapsed": 6, + "status": "ok", + "timestamp": 1718826632325, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "lTogAfnOrEjp" + }, + "outputs": [], + "source": [ + "# initializing a sb dataset object from this json\n", + "from speechbrain.dataio.dataset import DynamicItemDataset\n", + "import speechbrain\n", + "\n", + "# we instantiate here the train data dataset from the json manifest file\n", + "train_data = DynamicItemDataset.from_json(\"data.json\")\n", + "\n", + "# we define a pipeline to read audio\n", + "@speechbrain.utils.data_pipeline.takes(\"file_path\")\n", + "@speechbrain.utils.data_pipeline.provides(\"signal\")\n", + "def audio_pipeline(file_path):\n", + " sig = speechbrain.dataio.dataio.read_audio(file_path)\n", + " return sig\n", + "\n", + "# setting the pipeline\n", + "train_data.add_dynamic_item(audio_pipeline)\n", + "train_data.set_output_keys([\"signal\", \"file_path\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ggol_VzvrwME" + }, + "source": [ + "Crucially to use `DynamicBatchSampler` **it is important that the manifest/dataset description file** (`json` or `csv`) **contains**, for each example, **an entry which specifies the duration or length of each example**.\n", + "The `DynamicBatchSampler` will use this information to batch efficiently examples together." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 5, + "status": "ok", + "timestamp": 1718826632325, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "X3JSZOecrzud", + "outputId": "cb0a9bfe-6fef-4aad-c851-28eb9d71a35f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " \"spkID\": \"speaker_4640\",\n", + " \"length\": 247920\n", + " },\n", + " \"4640-19188-0035\": {\n", + " \"file_path\": \"/content/LibriSpeech/train-clean-5/4640/19188/4640-19188-0035.flac\",\n", + " \"words\": \"DO YOU DESIGNATE WHO IS TO REMAIN YES SAID THE FIVE CHOOSE WE WILL OBEY YOU MARIUS DID NOT BELIEVE THAT HE WAS CAPABLE OF ANOTHER EMOTION\",\n", + " \"spkID\": \"speaker_4640\",\n", + " \"length\": 184720\n", + " }\n", + "}" + ] + } + ], + "source": [ + "!tail -n 10 data.json" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "elu2P2lar5lH" + }, + "source": [ + "We can see that in this case we have a length key containing, for each audio, the length in samples." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QFEPu-d6q8r-" + }, + "source": [ + "#### Instantiating `DynamicBatchSampler`: Core Parameters\n", + "\n", + "---\n", + "At its core, `DynamicBatchSampler` batches examples with similar lengths based on \"buckets\". Upon instantiation, based on the input args, several buckets are created. These buckets define a number of contiguous intervals e.g. $0\\leq x < 200, 200 \\leq x < 400$ and so on. \n", + "Examples whose lengths fall into a certain bucket are assumed as they have the same length and can be batched together. In some way, we are \"quantizing\" the lengths of the examples in the dataset.\n", + "\n", + "In the Figure below we have N buckets, each defined by his right boundary.\n", + "For each bucket, we can have a different `batch_size` because we can fit more examples falling in the leftmost bucket than the rightmost one.\n", + "\n", + "For the first bucket, the batch size is 8 because 1725 // 200 = 8.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XilSLmHmtHYY" + }, + "source": [ + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5tQSe564wK2U" + }, + "source": [ + "In the Figure below we illustrate how 14 examples with different lengths are \"bucketized\": 3 examples in the first bucket, 5 examples in the second, 2 in the third, 2 in the fourth and one in the last.\n", + "\n", + "One example is discarded because it is too long (its length is more than `max_batch_size`)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zXmT-3KWwIkV" + }, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eNco4lz-JurP" + }, + "source": [ + "A minimal instantiation of `DynamicBatchSampler` requires four arguments at least:\n", + "\n", + "1. A `Dataset` object (`train_data` here, note it can also be validation or test set).\n", + "2. `max_batch_length`: the maximum length we want in a batch. This will be the maximum aggregated length of all examples in a batch we are going to allow and must be chosen carefully to avoid OOM errors.\n", + "A higher number means we are going to have, on average, an higher batch size so you must apply the same \"tricks\" as when batch size is increased for standard fixed batch size training.
E.g. increase learning rate.\n", + "3. `num_buckets`: number of buckets one wishes to use. If just one bucket is used, all examples can be batched together, and dynamic batching in this instance is the same as uniform random sampling of the examples.\n", + "If too many buckets are specified the training will be slow because some buckets will be half empty.\n", + "As a rule of thumb: num_buckets trades-off speed with randomization.\n", + "
Low number -> better randomization, High number -> faster training.\n", + "\n", + "4. `length_func`: function to be applied to each dataset element to get its length. In our case, we can see that the `.json` manifest contains a key *length* which specifies each audio length in samples. This can be used for example to convert the length into seconds or the number of feature frames. So that `max_batch_length` and the bucket boundaries will be specified not anymore in samples.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xmNzc38_Hfow" + }, + "source": [ + "We can specify `max_batch_length` in terms of seconds" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "executionInfo": { + "elapsed": 164, + "status": "ok", + "timestamp": 1718826632486, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "cEk1NgP9HjwH" + }, + "outputs": [], + "source": [ + "from speechbrain.dataio.sampler import DynamicBatchSampler\n", + "\n", + "max_batch_len = 17*32\n", + "\n", + "dynamic_batcher = DynamicBatchSampler(\n", + " train_data,\n", + " max_batch_length=max_batch_len,\n", + " num_buckets=60,\n", + " length_func=lambda x: x[\"length\"] / 16000,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 7, + "status": "ok", + "timestamp": 1718826632486, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "I-KXWrvq_ZuO", + "outputId": "0fb5912d-6ca9-4c1a-f2e1-ca6a53d22e6b" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "11.98" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dynamic_batcher._ex_lengths['0']" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 6, + "status": "ok", + "timestamp": 1718826632486, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "OjLGSB-IhD0K", + "outputId": "224bd1f5-c8cf-42c7-c54f-279ae8aa8726" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "41" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(dynamic_batcher)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 4, + "status": "ok", + "timestamp": 1718826632486, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "bwDemOO-U5G9", + "outputId": "4b3251e0-81ba-403b-d4ec-47a052e45b2a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "34\n", + "44\n", + "34\n", + "34\n", + "34\n", + "44\n", + "34\n", + "57\n", + "34\n", + "34\n", + "38\n", + "34\n", + "38\n", + "53\n", + "38\n", + "17\n", + "71\n", + "38\n", + "34\n", + "16\n", + "34\n", + "34\n", + "35\n", + "38\n", + "34\n", + "30\n", + "38\n", + "34\n", + "34\n", + "8\n", + "30\n", + "44\n", + "34\n", + "38\n", + "53\n", + "38\n", + "26\n", + "71\n", + "38\n", + "34\n", + "34\n" + ] + } + ], + "source": [ + "for b in dynamic_batcher:\n", + " print(len(b))" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 9886, + "status": "ok", + "timestamp": 1718826642370, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "qhlzdqqNUKTL", + "outputId": "8e1f986c-7804-4435-f140-e68608e47141" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "511.5\n", + "515.8\n", + "509.3\n", + "506.6\n", + "491.4\n", + "345.4\n", + "516.5\n", + "506.6\n", + "514.2\n", + "479.3\n", + "478.4\n", + "195.0\n", + "74.9\n", + "501.8\n", + "514.2\n", + "328.9\n", + "510.0\n", + "514.6\n", + "270.3\n", + "514.0\n", + "517.8\n", + "519.4\n", + "507.3\n", + "505.9\n", + "508.4\n", + "467.8\n", + "517.4\n", + "511.2\n", + "514.0\n", + "424.3\n", + "512.6\n", + "503.3\n", + "241.4\n", + "510.6\n", + "506.0\n", + "512.4\n", + "512.5\n", + "508.0\n", + "516.7\n", + "489.6\n", + "513.6\n" + ] + } + ], + "source": [ + "for b in dynamic_batcher:\n", + " print(\"%.1f\" % sum([train_data[i]['signal'].shape[0]/16000 for i in b]))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NfewXTPxuHI9" + }, + "source": [ + "#### Using `DynamicBatchSampler`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6pMNyqs-vvAj" + }, + "source": [ + "Once this special batch sampler is instantiated it can be used in the standard Pytorch way by using it as a DataLoader argument:" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "executionInfo": { + "elapsed": 2, + "status": "ok", + "timestamp": 1718826642370, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "mwyIQz5ROaw5" + }, + "outputs": [], + "source": [ + "dataloader = DataLoader(train_data, batch_sampler=dynamic_batcher, collate_fn=PaddedBatch)\n", + "# note that the batch size in the DataLoader cannot be specified when a batch sampler is used.\n", + "# the batch size is handled by the batch_sampler and in this case is dynamic" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 429, + "status": "ok", + "timestamp": 1718826642798, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "1YWQI8bUwN2v", + "outputId": "be2ff8ef-b5c5-41a5-cbcb-57886a57926a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([34])\n" + ] + } + ], + "source": [ + "# we can iterate now over the data in an efficient way using dynamic batching.\n", + "# our DynamicBatchSampler will sample the index of the examples such that padding is minimized\n", + "# while PaddedBatch will handle the actual padding and batching.\n", + "# everything happens in parallel thanks to the torch DataLoader.\n", + "first_batch = next(iter(dataloader))\n", + "print(first_batch.signal.lengths.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 4, + "status": "ok", + "timestamp": 1718826642798, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "jXOYNdw4wvD-", + "outputId": "b35f4e40-14e9-4e50-ea7f-29d19749771a" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([34, 255280])" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "first_batch.signal.data.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 10431, + "status": "ok", + "timestamp": 1718826653227, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "bY0AwDiSxdVE", + "outputId": "164f7843-d935-4d27-ef03-f57356cd2007" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "With Dynamic Batching: % True samples 92.1, % of padding 7.9, Total time 10.38s\n" + ] + } + ], + "source": [ + "percent_true, percent_padded, elapsed = count_samples(dataloader)\n", + "print(\"With Dynamic Batching: % True samples {:.1f}, % of padding {:.1f}, Total time {:.2f}s\".format(percent_true*100, percent_padded*100, elapsed))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5AuQKRKIxv-j" + }, + "source": [ + "**The amount of padded values is significantly reduced vs the fixed batch size and full uniform random sampling.**\n", + "\n", + "It indeed is close to what is obtained with fully deterministic sorting and fixed batch size.\n", + "The difference is that, here, with the DynamiBatchSampler we can still allow for some randomness in the sampling strategy.\n", + "\n", + "Moreover, by batching together examples changing the batch size we use our hardware at the fullest with each batch significantly speeding up training.\n", + "\n", + "We can look at the maximum number of examples that are batched together:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 4, + "status": "ok", + "timestamp": 1718826653227, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "2zjPhvgoZihU", + "outputId": "79856ee9-5d37-42a6-e837-69a76b4e2864" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "41" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(dynamic_batcher)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8guqeYr1Z9_q" + }, + "source": [ + "Using the DynamicBatchSampler with the current parameters we have 41 batches.\n", + "\n", + "While using a fixed batch size of 32 we would end up with:" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 4, + "status": "ok", + "timestamp": 1718826653227, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "EiO7KoM0Z1mi", + "outputId": "040c1075-52c4-48bd-8b09-70ec8156bd97" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "48" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(train_data) // 32 + 1" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1djPRPX7aKEc" + }, + "source": [ + "so more training iterations, with more padded values --> longer training time." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YxhMuUFeuSgx" + }, + "source": [ + "Another way to use `DynamicBatchSampler` straightforwardly is by feeding it directly to the Brain class as an additional argument via `run_opts`. In this case, the Brain class will implicitly instantiate for you a `DataLoader`." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "executionInfo": { + "elapsed": 3, + "status": "ok", + "timestamp": 1718826653228, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "6Ig9zJi0ugjn" + }, + "outputs": [], + "source": [ + "## dummy Brain class here with dummy model\n", + "class SimpleBrain(speechbrain.Brain):\n", + " def compute_forward(self, batch, stage):\n", + " return model(batch[\"signal\"][0].unsqueeze(1))\n", + "\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " loss_dummy = torch.mean(predictions)\n", + " return loss_dummy" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 40473, + "status": "ok", + "timestamp": 1718826693699, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "-HdYfcSO_v1X", + "outputId": "6f73a392-3faa-485a-c009-f77a12ea66b5" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 1519/1519 [00:37<00:00, 40.24it/s, train_loss=-75.8]\n" + ] + } + ], + "source": [ + "model = torch.nn.Conv1d(1, 1, 3)\n", + "brain = SimpleBrain({\"model\": model}, opt_class=lambda x: torch.optim.SGD(x, 0.1), run_opts={\"batch_sampler\": dynamic_batcher})\n", + "brain.fit(range(1), train_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FrsjDadz0AP_" + }, + "source": [ + "### Advanced Parameters: Full control over randomness, training speed, and VRAM consumption.\n", + "---\n", + "Right now we have explored the most basilar input args for `DynamicBatchSampler`.\n", + "Let's see more advanced parameters.\n", + "\n", + "#### Controlling Randomness\n", + "\n", + "\n", + "Randomness in `DynamicBatchSampler` is controlled with `shuffle` and `batch_ordering`.\n", + "\n", + "`shuffle` is a flag:\n", + "\n", + "* if `true`, then dynamic batches are created based on random sampling (deterministically based on `epoch` and `seed` parameters) at each epoch (included upon `DynamicBatchSampler` instantiation or epoch 0);\n", + "* if `false`, then dynamic batches are created taking the examples from the database as they are. If the dataset is sorted in ascending or descending order this ordering is preserved. Note that if `false` the batches will be created once and never change during training (their permutation can change however see next).\n", + "\n", + "\n", + "\n", + "Batch permutation depends on `batch_ordering`:\n", + "\n", + "* `\"random\"` deterministically shuffles batches based on `epoch` and `seed` parameters\n", + "* `\"ascending\"` and `\"descending\"` sort the batches based on the duration of the longest example in the batch.\n", + "\n", + "This argument is independent of `shuffle`.`shuffle` controls if we have to shuffle the examples before creating the batches. `batch_ordering` instead controls the shuffling of the batches after they have been created.\n", + "For example, if set to `\"ascending\"` the first batch returned by the batch sampler will be the one with the shortest example in the dataset (examples belonging to the leftmost bucket); while the last one will contain the longest example in the dataset.\n", + "\n", + "\n", + "NOTE: when iterating the `DynamicBatchSampler` (calling its `__iter__` function):\n", + "\n", + "* dynamic batches are re-generated at each epoch if `shuffle == True`; or\n", + "* dynamic batches are permuted at each epoch if `batch_ordering == \"random\"`\n", + "\n", + "\n", + "\n", + "Note that also `num_buckets` affects randomization of training. As we stated before if `num_buckets`-->1 we obtain full random sampling as all examples can be batched together at least if `shuffle` is True and `batch_ordering` is random. Curiously even if `num_buckets` is very large we also obtain full random sampling if `shuffle` is True and `batch_ordering` is random as practically every example in the dataset is batched alone (we will have closer to batch size == 1 and very slow training, probably you want to avoid this)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2GR81Tjmeqxl" + }, + "source": [ + "Here we create the batches by firstly shuffling the examples (so the batches will be different at each epoch) but then sort them so always the one with the shortest example comes first." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 292, + "status": "ok", + "timestamp": 1718826693986, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "A1brcnzleTp1", + "outputId": "bc0a5a4f-51a3-44f2-ffda-b0043c343c3d" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([71, 120480])" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from speechbrain.dataio.sampler import DynamicBatchSampler\n", + "\n", + "max_batch_len = 17*32\n", + "\n", + "dynamic_batcher = DynamicBatchSampler(train_data,\n", + " max_batch_length=max_batch_len,\n", + " num_buckets= 60,\n", + " length_func=lambda x: x[\"length\"] / 16000,\n", + " shuffle=True,\n", + " batch_ordering=\"ascending\"\n", + " )\n", + "\n", + "dataloader = DataLoader(train_data, batch_sampler=dynamic_batcher, collate_fn=PaddedBatch)\n", + "\n", + "first_batch = next(iter(dataloader))\n", + "\n", + "first_batch.signal[0].shape" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "A07LSqX7e9PN" + }, + "source": [ + "We can use instead descending order" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 299, + "status": "ok", + "timestamp": 1718826694284, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "ONwRup3NfAzy", + "outputId": "2d3e34e3-ae7c-42de-f16b-0911fc1b3b5c" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([30, 276400])" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from speechbrain.dataio.sampler import DynamicBatchSampler\n", + "\n", + "max_batch_len = 17*32\n", + "\n", + "dynamic_batcher = DynamicBatchSampler(train_data,\n", + " max_batch_length=max_batch_len,\n", + " num_buckets= 60,\n", + " length_func=lambda x: x[\"length\"] / 16000,\n", + " shuffle=True,\n", + " batch_ordering=\"descending\"\n", + " )\n", + "\n", + "dataloader = DataLoader(train_data, batch_sampler=dynamic_batcher, collate_fn=PaddedBatch)\n", + "\n", + "first_batch = next(iter(dataloader))\n", + "\n", + "first_batch.signal[0].shape" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XqAasFaEfIrQ" + }, + "source": [ + "We can see that it now returns the batch with longest example." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vwXc_DTLJ4t0" + }, + "source": [ + "##### Specifying manually the buckets" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MXTgObmHJWYa" + }, + "source": [ + "The argument `bucket_boundaries` can be used to manually specify how many buckets and what are their boundaries.\n", + "\n", + "Needless to say, this arg will supersede `num_buckets`.\n", + "\n", + "Let's see an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "executionInfo": { + "elapsed": 2, + "status": "ok", + "timestamp": 1718826694284, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "npoZZCNaKZkO" + }, + "outputs": [], + "source": [ + "# trivial example just one bucket\n", + "dynamic_batcher = DynamicBatchSampler(train_data,\n", + " max_batch_length=max_batch_len,\n", + " bucket_boundaries=[max_batch_len],\n", + " length_func=lambda x: x[\"length\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "s9mYhtGiSH5q" + }, + "source": [ + "It is easy to see that having just one bucket in this case all examples can be batched together. Even the shortest ones with the longest ones.\n", + "\n", + "When just one bucket is used the `DynamicBatchSampler` will be inefficient as it will not minimize at all the amount of padding in each batch with a behavior similar to having a fixed batch size.\n", + "\n", + "As we said previously we have the maximal amount of randomness in each batch as each example can be batched with any other one, regardless of its length.\n", + "We can now see more clearly the trade-off between training speed and randomness.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4pYvO6cqTVTz" + }, + "source": [ + "Here, in a more practical example, we use `bucket_boundaries` argument to specify a distribution for the buckets, given the distribution of the length of the audio files in our dataset, which we have plotted before and has, a **reversed log-normal distribution**." + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 12118, + "status": "ok", + "timestamp": 1718826706400, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "xaykn4vlUFLc", + "outputId": "c3f0a8f5-a486-493a-adb6-b19e659a3bf3" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "With Dynamic Batching: % True samples 89.8, % of padding 10.2, Total time 12.07\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "[512.8205128205128,\n", + " 1025.6410256410256,\n", + " 1538.4615384615386,\n", + " 2051.2820512820513,\n", + " 2564.102564102564,\n", + " 3076.923076923077,\n", + " 3589.74358974359,\n", + " 4102.5641025641025,\n", + " 4615.384615384615,\n", + " 5128.205128205128,\n", + " 5641.025641025641,\n", + " 6153.846153846154,\n", + " 6666.666666666667,\n", + " 7179.48717948718,\n", + " 7692.307692307692,\n", + " 8205.128205128205,\n", + " 8717.948717948719,\n", + " 9230.76923076923,\n", + " 9743.589743589744,\n", + " 10256.410256410256,\n", + " 10769.23076923077,\n", + " 11282.051282051281,\n", + " 11794.871794871795,\n", + " 12307.692307692309,\n", + " 12820.51282051282,\n", + " 13333.333333333334,\n", + " 13846.153846153846,\n", + " 14358.97435897436,\n", + " 14871.794871794871,\n", + " 15384.615384615385,\n", + " 15897.435897435897,\n", + " 16410.25641025641,\n", + " 16923.076923076922,\n", + " 17435.897435897437,\n", + " 17948.71794871795,\n", + " 18461.53846153846,\n", + " 18974.358974358973,\n", + " 19487.17948717949,\n", + " 20000.0]" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# number of buckets --> less buckets more randomness\n", + "n_buckets = 40\n", + "\n", + "# we can create n_buckets linearly spaced\n", + "max_batch_len = 20000\n", + "import numpy as np\n", + "buckets = np.linspace(0, max_batch_len, n_buckets)\n", + "buckets_bounds = buckets[1:].tolist()\n", + "dynamic_batcher = DynamicBatchSampler(train_data,\n", + " max_batch_length=max_batch_len,\n", + " bucket_boundaries=buckets_bounds,\n", + " length_func=lambda x: x[\"length\"] / 160)# length in terms of 10ms\n", + "\n", + "dataloader = DataLoader(train_data, batch_sampler=dynamic_batcher, collate_fn=PaddedBatch)\n", + "percent_true, percent_padded, elapsed = count_samples(dataloader)\n", + "print(\"With Dynamic Batching: % True samples {:.1f}, % of padding {:.1f}, Total time {:.2f}\\n\".format(percent_true*100, percent_padded*100, elapsed))\n", + "\n", + "import numpy as np\n", + "max_batch_len = 20000\n", + "n_buckets = 40\n", + "buckets = np.linspace(0, max_batch_len, n_buckets)\n", + "buckets[1:].tolist()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uqYwDqcDW4GE" + }, + "source": [ + "*However*, having linearly spaced buckets when our length distribution is not uniform is sub-optimal.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0dJX0aJvp1zc" + }, + "source": [ + "Intuitively one better way to generate the buckets is using an exponential distribution as we can employ coarser buckets for longer examples.\n", + "Indeed, more padding for longer examples has less impact as overall the examples are longer." + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 10923, + "status": "ok", + "timestamp": 1718826717317, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "9rnF21yhqjUF", + "outputId": "7f0d5533-a3f3-49c3-edd9-a4e599dd908e" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "With Dynamic Batching: % True samples 94.0, % of padding 6.0, Total time 10.81\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "[200,\n", + " 240.0,\n", + " 288.0,\n", + " 345.59999999999997,\n", + " 414.71999999999997,\n", + " 497.66399999999993,\n", + " 597.1967999999999,\n", + " 716.6361599999999,\n", + " 859.9633919999999,\n", + " 1031.9560703999998,\n", + " 1238.3472844799996,\n", + " 1486.0167413759996,\n", + " 1783.2200896511995,\n", + " 2139.8641075814394,\n", + " 2567.836929097727,\n", + " 3081.4043149172726,\n", + " 3697.685177900727,\n", + " 4437.222213480873,\n", + " 5324.666656177047,\n", + " 6389.599987412456,\n", + " 7667.519984894947,\n", + " 9201.023981873936,\n", + " 11041.228778248722,\n", + " 13249.474533898467,\n", + " 15899.36944067816,\n", + " 19079.24332881379,\n", + " 22895.09199457655,\n", + " 27474.110393491857,\n", + " 32968.93247219023,\n", + " 39562.71896662827,\n", + " 47475.26275995393,\n", + " 56970.31531194471,\n", + " 68364.37837433365,\n", + " 82037.25404920038,\n", + " 98444.70485904044,\n", + " 118133.64583084853,\n", + " 141760.37499701823,\n", + " 170112.44999642187,\n", + " 204134.93999570623,\n", + " 244961.92799484747,\n", + " 293954.31359381694]" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# number of buckets --> less buckets more randomness\n", + "n_buckets = 40\n", + "# we can create n_buckets linearly spaced\n", + "max_batch_len = 20000\n", + "import numpy as np\n", + "batch_multiplier = 1.2\n", + "buckets_bounds = [200]\n", + "for x in range(n_buckets):\n", + " buckets_bounds.append(buckets_bounds[-1]*batch_multiplier)\n", + "\n", + "dynamic_batcher = DynamicBatchSampler(train_data,\n", + " max_batch_length=max_batch_len,\n", + " bucket_boundaries=buckets_bounds,\n", + " length_func=lambda x: x[\"length\"] / 160) # length in terms of 10ms\n", + "\n", + "dataloader = DataLoader(train_data, batch_sampler=dynamic_batcher, collate_fn=PaddedBatch)\n", + "percent_true, percent_padded, elapsed = count_samples(dataloader)\n", + "print(\"With Dynamic Batching: % True samples {:.1f}, % of padding {:.1f}, Total time {:.2f}\\n\".format(percent_true*100, percent_padded*100, elapsed))\n", + "\n", + "# number of buckets --> less buckets more randomness\n", + "n_buckets = 40\n", + "# we can create n_buckets linearly spaced\n", + "max_batch_len = 20000\n", + "import numpy as np\n", + "batch_multiplier = 1.2\n", + "buckets_bounds = [200]\n", + "for x in range(n_buckets):\n", + " buckets_bounds.append(buckets_bounds[-1]*batch_multiplier)\n", + "\n", + "buckets_bounds" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 5, + "status": "ok", + "timestamp": 1718826717317, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "ioyIneckuE1_", + "outputId": "9b8bcd54-d640-499f-8b16-517a3817f6e5" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "115" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(dynamic_batcher._batches)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cgRNd2latc2a" + }, + "source": [ + "The amount of padding is reduced by using a more appropriate distribution.\n", + "\n", + "---\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "executionInfo": { + "elapsed": 8850, + "status": "ok", + "timestamp": 1718826726164, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": 240 + }, + "id": "RtHZft5Vg010" + }, + "outputs": [], + "source": [ + "lengths = np.array([torchaudio.info(x).num_frames for x in all_flacs])\n", + "from scipy.stats import beta\n", + "lengths = (lengths - np.amin(lengths)) / (np.amax(lengths)- np.amin(lengths))\n", + "lengths = np.clip(lengths, 1e-6, 1-1e-6)\n", + "a, b, loc, upper = beta.fit(lengths, floc=0, fscale=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HHAEaTMc9rg7" + }, + "source": [ + "## How to find good hyper-parameters and speed up training with DynamicBatchSampler\n", + "\n", + "\n", + "Training speed largely depends on:\n", + "\n", + "\n", + "* `max_batch_length`: you want to set this as high as possible without getting OOM errors.\n", + "* `num_buckets`: you want to avoid too low values and too high values for this parameter. As said previously: too low values and shorter examples will be batched also with longer ones, too high and almost all examples are batched alone. In both cases, your training will be extremely slow.\n", + "\n", + "\n", + "Finding a good value for `max_batch_length`:\n", + "\n", + "\n", + "1. Sort the dataset in descending order, set `shuffle = False` and `batch_ordering = \"descending\"` and do multiple short runs increasing `max_batch_length` till you get an OOM error. Choose a value slightly below the one that leads to OOM.\n", + "\n", + "Finding a good value for `num_buckets`:\n", + "\n", + "1. Without using `DynamicBatchSampler`, sort the dataset in descending order and find the maximum batch size that your GPU can handle. Look at the estimated time and number of batches for this configuration given in the very first iterations.\n", + "2. Sort the dataset in descending order, set `shuffle = False` and `batch_ordering = \"descending\"` and `max_batch_length` with the value found before. Start with a `num_buckets` between 10 and 20 and do some guesses by doing some short runs looking at the estimated time and number of batches for each configuration. Choose the value which gives fewer batches than the one in step 1 (without dynamic batching) and whose estimated time is lower." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gVJiv2igqSyb" + }, + "source": [ + "### Dynamic Batching with Web dataset\n", + "When working on an HPC cluster it is crucial to copy the dataset to the SSD of the local computing node. This step significantly improves the data-io performance and avoids slowing down a shared filesystem. In some cases, the dataset could be too big that might not fit into the SSD. This scenario is getting more common these days with the adoption of larger and larger datasets.\n", + "\n", + "SpeechBrain supports [Webdataset](https://github.com/webdataset/webdataset), which allows users to efficiently read datasets from the shared file system.\n", + "The proposed Webdataset-based solution also supports dynamic batching. For more information, please take a look at [this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/advanced/data-loading-for-big-datasets-and-shared-filesystems.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "B3jUpS4pxv3q" + }, + "source": [ + "## Acknowledgements\n", + "\n", + "SpeechBrain DynamicBatchSampler has been developed by Ralf Leibold and Andreas Nautsch with the help of Samuele Cornell" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1Z5JmWionKAgTkWEbzLpb_kea6VEdRVAZ", + "timestamp": 1639958144782 + }, + { + "file_id": "19y3Z2moUYJA_ofvear6IG9LqpN1-uYYE", + "timestamp": 1639958094720 + }, + { + "file_id": "1SKvv_hO9R6vlBIb7_9fpQG6VBKQ8DJON", + "timestamp": 1639432761997 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/advanced/federated-speech-model-training-via-speechbrain-and-flower.ipynb b/docs/tutorials/advanced/federated-speech-model-training-via-speechbrain-and-flower.ipynb new file mode 100644 index 0000000000..8a9a4b4bbb --- /dev/null +++ b/docs/tutorials/advanced/federated-speech-model-training-via-speechbrain-and-flower.ipynb @@ -0,0 +1,1676 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/federated-speech-model-training-via-speechbrain-and-flower.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/federated-speech-model-training-via-speechbrain-and-flower.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "X4rk4cFW8x6o" + }, + "source": [ + "# Federated Speech Model Training via Flower and SpeechBrain\n", + "\n", + "Are you interested in both federated learning (FL) and speech, but worried about the proper tools to run experiments? Today you will get the answer. This tutorial introduces how to integrate [Flower](https://github.com/adap/flower) and [SpeechBrain](https://github.com/speechbrain/speechbrain) to achieve federated speech model training.\n", + "\n", + "**Important:** It is recommended to be familiar with SpeechBrain and Flower before jumping into this tutorial as some parts may involve some level of complexity. Tutorials are available for both toolkits on their respective website!\n", + "\n", + "For simplicity, we choose a popular speech task --- automatic speech recognition (ASR) as an example, and training will be done with a toy dataset which only contains 100 audio recordings. In a real case, you need much more training data (e.g 100 or even 1000 hours) to reach acceptable performance. Note that ASR is regarded as a case study, all other speech related tasks can be done similarly.\n", + "\n", + "Apart from running normal federated ASR model training, the code also provides three other features to speed up model converge and improve the performance.\n", + "\n", + "* Loading a centralised initial model before federated training starts.\n", + "\n", + "* Providing three aggregation weighting strategies --- standard FedAvg, Loss-based and WER-based aggregation based on [this paper](https://arxiv.org/abs/2104.14297).\n", + "\n", + "* Facilitating an additional training with a held-out dataset on the server side after aggregation.\n", + "\n", + "The details of them will be elaborated in the later sections.\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0TMVAXERs_sb" + }, + "source": [ + "![fig1.png](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEBLAEsAAD//gATQ3JlYXRlZCB3aXRoIEdJTVD/4gKwSUNDX1BST0ZJTEUAAQEAAAKgbGNtcwRAAABtbnRyUkdCIFhZWiAH6AAJAA0ACQAtAB9hY3NwQVBQTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9tYAAQAAAADTLWxjbXMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1kZXNjAAABIAAAAEBjcHJ0AAABYAAAADZ3dHB0AAABmAAAABRjaGFkAAABrAAAACxyWFlaAAAB2AAAABRiWFlaAAAB7AAAABRnWFlaAAACAAAAABRyVFJDAAACFAAAACBnVFJDAAACFAAAACBiVFJDAAACFAAAACBjaHJtAAACNAAAACRkbW5kAAACWAAAACRkbWRkAAACfAAAACRtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACQAAAAcAEcASQBNAFAAIABiAHUAaQBsAHQALQBpAG4AIABzAFIARwBCbWx1YwAAAAAAAAABAAAADGVuVVMAAAAaAAAAHABQAHUAYgBsAGkAYwAgAEQAbwBtAGEAaQBuAABYWVogAAAAAAAA9tYAAQAAAADTLXNmMzIAAAAAAAEMQgAABd7///MlAAAHkwAA/ZD///uh///9ogAAA9wAAMBuWFlaIAAAAAAAAG+gAAA49QAAA5BYWVogAAAAAAAAJJ8AAA+EAAC2xFhZWiAAAAAAAABilwAAt4cAABjZcGFyYQAAAAAAAwAAAAJmZgAA8qcAAA1ZAAAT0AAACltjaHJtAAAAAAADAAAAAKPXAABUfAAATM0AAJmaAAAmZwAAD1xtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAEcASQBNAFBtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEL/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wgARCAKCA3oDAREAAhEBAxEB/8QAHAABAAIDAQEBAAAAAAAAAAAAAAQFAgMGAQcI/8QAGgEBAQEBAQEBAAAAAAAAAAAAAAECAwQFBv/aAAwDAQACEAMQAAAB+qAAAAAAAAAAAAAAAAAAAAAAAAAAAAFQWZsAAAAAAAAAAAAIZrIZWHSEsAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5o1nUgAAAAAAAGg506A50tC3AAAPD1CgAAAADw9AKQgHVHHnYAAAAAAAAAAAAAAAAEIpTpTIAAAAAAAAAArSlLQmlIdMAAAADEwMTw9MzYeHGHYHKnXAAAGIMkBQBgcgUJWG0yPTpS4NZckQ1EYuzeAAAAAAAAAAADwAEUlHhANhvOPO1PQAAAAAVRSEIgpDqERzXUbN+lJsXpjmTqwAAADSazORWC5noNx4csTywJYAAMAenqehRBPlpYEk4A1npoO6OaB4fcShOlPTMAAAAAAAAAAAHhic6c6nOerx9dw793jtiUJqLwpjowAAAADE+Ebxi4SunePm1fTM7NkEBIHDp9ZO6OaLgngAAAFGWh6MmgyPDw3lAX4AAANYPT0J6sA+aFoWBXFcceSDAzLIiFaXp9pNwAAAAAAAAAAAAPD5eU/q8Nt7vlbd89GOvR/K+92OOg54syvL8AAAAA4w+dd+UTfLdWczlx9FZ1zfTXOZszjv7cWxoN4AAABGOcLUlmZ6enhgYE4AAAAGoA9PStPkpMJBBKsjE0rjQZSdDzXfJW6V3R9fm7Hrj1fTIyAAAAAAAAAAB4c4fNu/lvvqfD9RUbj3675f3eozvw5o6cAAAAA8PlHTny+7LvDX25YJJz6Ki8bvG6/j3v+fT7AAAAAACtOeJhJJBuMgYmstAAAAAaEHq+kbnPnnFw+2GkRdyZ5bYsMugi84LfjPcoWFfhx/1b9Dduh9fHIyNi7gAAAAAAAAAAeA5c+fejx3f0/ie0iLx9PcfL+5fzQ9AAAAAOYOA9XjpdWbrEzGK3Wt+/VlfDP49Zvl9l5NdwAAAAACrOcJZJN5vNgMTWW4AAAAIyeGRgvJ+JC+ZnXGuMJPdXD3Jfr62vim7hzgYRso5FzK/rZPvvW8159RlGRuJKgAAAAAAAAADwA484T1eC++j8dSIvm9nf/N+1cL6AAAAAfJdYr/d82JW5JPO68bg6WGd1/Pvjx9HYR9KoAAAADwrTmCWSTeSDYDE0l2AAAADWAVPmmjk0+KRj3DPo1fRvmY8mnDMLCkzMF8y11T/RveYnB+q/YvQsNvTNdgAAAAAAAAAAPAAcMch7PnXf0Pko8WB8f9B9Sx2mAAAAAoT5v6vJ0U8Wj148sy4dfOHozlr9Z5Xj7Z+dwj7iTwAAAAYFcceSiQbyUZmJoNZ1YAAAAANWXG/Fz8x+tY2nmXmWZa+Kdz41p1c11ch6be/NmXNr017VX0LJks/oX6JLe0AAAAAAAAAAAB4AAfPzm/d8u39vzKv532dvl931QkgAAA1kcoD5gQNYm9fPb9PPcbk7x/U2dfHWa5cH5/oxo1H1c+ggAAAA1EQ4gzBsMzabT0knUgAAAAAquE+f/Azl0ROli9EfpdaWXlWMNOe+5dnRF+Ys/BK71sPbZPglb9K9x2dwtkAAAAAAAAAAAAeAAA+cHLdfPYcvR9HLk9AAAKQqDE8MDSVRTFSQUsdYzs046yTcdud4AAAADWRziTWemRmbTYelodEAAAAACr4z5t8yUnpQut1x5Fn55rq78CN7Wz6yStR4zCXE+MO7ifU7k+rLmAAAAAAAAAAAAeAAAHz8tzqQegAAA1FYVRHMTw8MTAriqJZ1Z0JNAAAAAMSKcwVBkemRsMz064ngAAAAAxj5d45ymGdWJrjdxkWpXlaPps/ZZhznOyC16LotTYdCWQAAAAAAAAAAAAPAAAAADXZtlAAAAEMrCuNZ4pPItS8raAAAAAAYmJVnHmkyMjIkF+XRvAAAAABic6fMCkjwyr2M4xy0dJ0BJO8W+JpkAAAAAAAAAAAAAAAAAAAAQ95yJOL6oAAAAEIrCQXAAAAAAAAPAYmogGkG4kkozPQAAAAAAeGo1mk1GB6ZGZYGQAAAAAAAAAAAAAAAAAAAAAB4Q+mM5ZWNAAAAAAVPbnbcegAAAAAAAAA0azvzoAAAAAAADTqbs0AAAAAAAAAAAAAAAAAAAAAAAAAAAAQ95EzGgAAAAAIm8y8aAAAAAAAAAg9MTuewAAAAAAK3riTm5RvzoAAAAAAAAAAAAAAAAAAAAAAAAAAAADCyPqekrGgAAAAIm8y8aAAAAAAAAFT2523HoAAAAAANGsx9SbjWUoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGjUi7zY8tgAAADVZE6ZsOWwAAAAAABW9cWXLYAAAAAGjWYXTNlx3koAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA12RtTdG/OgAAAIXTEznr1QAAAAABG3ndm5ygAAAAV3XEjF9JGdAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADXZA6Ys+XQAAAeJo1JGdAAAAAAeETeZmNAAAADVZD6Zn8tZKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKvtzs+O/VAAAreuLLlsAAAAACq7c7Xj0AAAAj6zG3JvPWyUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACLvOJMxoAACt64suWwAAAAKzrzs+XQAAAQOmN0ok40AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVdudrx6AADXZjW7NAAAAi7zIxclAAGFlf0xZcunoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAK3riw5ayUACv6YsOewAAB4Rd5l40ABG3nTZJzrdmgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa7NkoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEMzPDAA8PQAegAHh6eAAA9MjElnoAAAAAAAAAAAAAAAAABFJJ6AAQyYAAAAAAAAAAAAAAAAAADQfDrOyJRxpzZtJXGS/Nmm9m5lkvVhZmw8PDUWmrVybo9NNso1yW1dHb4cmfaJZ4AAAAAAAAAAAAAAAABpKohlKdQTj00FKAdUegAAAAAAAAAAAAAAAAA0Hwqy1LgvQc7WJ6ZGR6enpXROrEwMDEwPAdDGZqK85M+2yzwAAAcyeGwgliaTSSTYaCKTjMlEE6MAAAhdMbs3XnXhrIZgbDMoCzNxENJYloVxYEsAAAAAAAAAAAAAAAAAjnwKzI0n0ElFDWs9PT09PTWUEXFbzAxMTE8PToI1mqoRy8fdZbEAAAHIGk5w6UzOXLkkmJz5aFmQylOgO3AAAKvtz5mWw5bzMiMeEgpDceEogmJfGROI5OJJuAAAAAAAAAAAAAAAAI5+f7L0sK1lTFYYgyNp6eA8APDAHhiZFgWNbiJFOfdJbEAAAGkGoyMiMSDw0A3HpqJANwAABXdcQJeb56nGokFmCOURHBtLEuTcbSMQTcYFpnjMu992AAAAAAAAAAAAAAI58BstjScwDYYAEnMarM9rwHkuemqM5Fal2niYLbVMK8+6yyz5SdiAAAAAAAAAAAADoSSV/XELNo8aiEstCSZEQqyvIJdE0tCWbTSQivMi9zxl3ptugAAAAAAAAAAAAABoPh1nZlkc4cuaiXxk3zyn9epNknViZmw8PDWT9WvjZJ6abZBlJeW3VRzjz7TLPK8sAAAAAAAAAAAAAAVHfnzsufHdeWBMNJIKQllKekw3FuZEw9NRAKs6rHnmXpL10AAAAAAAAAAAAAAGg+E2SiedaSDna1np6ZHp6CpixoYGBiYmJkdHGBqWvTlD7dLYAAAAAAAAAAAAAAArO3NjXKY1ONhqLojEg5U0k0gl8SC7IpoPD0sTVnlLvTK0AAAAAAAAAAAAAARz8/2Sz07EkFHWs9PT09PTEootq2GJiYmJ4ZF7GBrqCcxH3iWxAAAAAAAAAAAAAABT+jnRZWfHpONBBNhqKs8LI3lWYliZlyaCUTAAAAAAAAAAAAAAAAARz4BZck6oMUxDMQem49PADw9PDAyPDAzJJZk2o5Vx9zlsQAAAAAAAAAAAAAAeHoI5rIxRFuVRvLM8MDmTozM2FoZAAAAAAAAAAAAAAAAAEQ+OpsqKV8KxMQeGeWGmWSkKR5QzhWJsMjwk1MPI+tLZgAAAAAAAAAAAAAAAAAwKIFsADWVRPLQAAAAAAAAAAAAAAAAAA8Kg2HpgYHgAPQDwAHp4eg8PQZGw8NZamYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPAAAAVZNN4AAAAAAAB6AAAAAAAAAAAAACqMC4ANBvBVm4nAFWWgABrMjIEEnAGk0E0AAAAAAAAAAAAAAAAAHyY+jHMF2UxckUrS1MDqQAAAAAAAAAAAAAAAAADijoz5sdMekEkFMdMCsKM6cil2UxuIJNPTQWJSlqUhbkciHVl8AAAAAAAAAAAAAAAAADmzhi0LAvThCabjUfQjeAAAAAAAAAAAAAAAAAAcobiKWYK0mnNnUlYZGolmowNRcEMtzM5ktSrJgBNOdOpLMAAAAAAAAAAAAAAAAAHMF+UBcEgglocuW5YnoAAAAAAAAAAAAAAAAAAAAAAAABpNwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABFIRNIRYHKF+VZ04AAAAAAAAAAAAAAAAAAAAAAAAAOVOqAAAAAAAAAAAAAAAAAAAAAAAAAOLI5rBXloVpsPpQAAAAAAAAAAAAAAAAAAAAAAAAAOVOqAAAAAAAAAAAAAAAAAAAAAAAAAIgBTlsQTwkFoAAAAAAAAAAAAAAAAAAAAAAAAADlTqgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD40fXzcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAV5qAAAAAAABqNoAAAAAAABmWQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKotQDEAyNZsI5IABHJBgZgAAAAwMwAAVRagAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqi1AONOdOkPAbzni8K4uAc0fTT5cdiVpkYG82kAsjWVJIJQLAvACqLUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFUWoBzZTm85w6AoCxL4hFMWZEO/OGKQtCpL4siEcsXxJPSvLEsj06cAqi1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVFqAAADmi1LAAAArSuNRdk0HKHQkoAAAAqi1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVFqACIUJYFyClI50J6AYFYWwAKUugAAUxcgAFUWoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKotQAceaizOCOkNZUm4ujij6+bT5oWZGJZalOZlcSzwhlobjtQACqLUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFUWoAKEhnTnKE0jlmaCEcIdidicwZHpuJxEIpELA1GZiaC7LMAFUWoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKotQAAAAADw9AAAAAAAAABVFqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACqLUAAAA5YrieRixOVPpZxhekAsT01Gg5Y7wnAAAAFUWoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKotQAAACoOMIxOLg5I745w2HNnVFEXRHPS+OhAAAAKotQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVRagAAAAjg1HpuMzUeEczJJFMyuLg2gAAAFUWoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKoAAAAAAAA1G0AAAAAAAAxLcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5U6oAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5U6oAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGBxR3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMT5efUgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADQUBoL82AxKAkl+ZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEHpidz3EBEKsmEciFoelYSDcSiQTwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADVqVHfnz3m6dAvhXm09KkEk3EE0nREoGwlkgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwsq+2KXhuEuRLJ5kRylIh4Si3LEsCIQTYaixzxn3e67AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHhUejnXefpVGBaE83GkoCqPSzL03FgaCOVZiX2eM69Nl0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANWpQ+nlT+TrrWQSDcaysMiAek4jl6WZkaCGVxdY81nesvXQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeJV98UnDpIMjQSjaRSsMTYaSzLIzPDElE09TJQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMLKvtj3hv1dJpMCSRilLIyKsticWRCJxvAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIpWkArS/JoIxzBfFiWQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDNxuABgQyeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAc6SDnjqSQaDWWpVArS0JZiQyAXxCNhgTDkTpyvLAnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHJFEbSaVBPI59IOGNZz5elgVRaGJtK4mGk5U6Q1GR2BZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEQ1mwryYSSiOjIhVFiQTYDMxJoBWlgVZMMSzNgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB/8QAOBAAAgICAAQDCAEDAgUFAAAAAgMBBAAFERITFAYVMBAgISI0QFBgMSMkNRYzJTI2RnAmQkNFgP/aAAgBAQABBQL8zY2VOu9ZgwfubVlVRVa7WtZb1de044va3KthVpP5t2vs9VWxah/2LyME+b9HI6Vhb9X0S1t2Li/tePx9k/HLmrq2sqWX1LOFHlm1/A22sUkdwAFExMfa3aSbolRt1M118LkW667aNO5gz6UzwzjPt+OQXsmImLKJ1BrMWBtI7K965mIDY8Q1Vy7xK+M/1LbnB3OxPJ3V8c/1BYDNXvBsB5vSyN1QmUvU+NnUi7U1FmbdDxAvqamuzqo+8KwkT9lu4inFeymyLAFgcs6V8erc2FanJeIaUYzxKiMLxPOH4lsYfiK7ORv78ZO8vlh7W/M+HbbHVdyokYpgtVsP6O59Fh8uR7ZmB9kfz/HsmImNHxQW7X1dTTKTqe9P8+9ctLqquWHbBtaqC4uH0ak90uvXa5Muk3SKuq8wqpq/HDhZFSuspWa1hdlWo+S+wBYsBgA+5KYGLO3Qubtx5gFcyzUXyUeTETFvVV3TQuNixYSNhGhaR0fTIoEb75tWb1WKsM1wq1uvpRZwgiX2kqUvV0wslcBY2Yzwqr+kQwQ+HZ4a+1VCzPoizndHuxGTi5+OBXYO89Cf5925aCquwxtx6EQuIjLXz2d09ZJGMKeVXHpqRxZPTheGQEQDzr02ydShJESvu/EFoyvoR1IBYh7Grhg6fYTM+zd1pdUo2It1NfXYm/6fiW106yqxWXISVl1xUpbYrMrp1+vbdxw8rS1Tl0f/AG0kdY9Ujt6GKStPpWS5ULnFtzmic4+zjnHJnIn5/Sn+fcv2O2qm8YyrbBaG7Ra8LbNnGudYMUlEiAjDp4kJrYYrVz1ta98K0M82x1/anWYT7dR/OPHOPsj7jb60boKZNd0Txj2OX1I1Gw68ezQDK6vpz8I2JSwjSalV0uFalNsMty3nTYt00Dxk7d6zZVaqmlWkRzersJ/tgnBnInInIn2cfZx+PpF/Ptc8EjtjK4R0WRPRfGdF/GEvnIqNwKzoIK1lhI8PkWBp6apHpLwnYb8N+Uj4Xtc0ZdxzjnHA/n7jca2LQpZKS9rVzM6q/Ftfq+ILXbUGwblNguPXM0V3duBfGbljrxrS6bOHcW9tZTKvD6o5vU2X0w4ORkZHuT6Zf83sacLXYg2508ledLOlnRnJEYDoh0qUdsk7GG/JdhOwmZYbyq01KW10iNZnHOPsX/P3O81ncDVfyz7TEgZrbo3U+ptH95tBLhnLi15yY5fAYXJT0YDLBkvKIcW6W3AXfU2P0g4ORkZHuSXCfSkeM8mcubEwBXUUwiAckIzkjJCIEpHg/pPTwCA6DTx4QvCKc+OcfZdnnKbgUKVi/d56HB9LpRnTjBHh93vdV1MqWPbPwyuTFvquixX9LdXOzpUOEJrQrOgJl28DPbY2tzYNb4MiRl5dRqjFdSsfKVF/cVfTv/GoM8MFudUZwWDkODOsGS8clxTi/i71Gn01sjugdXch3UfGdxYjO7s53dnKDHvKtwCOqcRsdryROwfxWyDD2EXCKM9W5aOW2DSLK+jCV6r77e6rKljmjGH1JrKbYdXUKE+hJjGFZVGbI020voOCeraRitq4cVuF4q7VOS/qYHyhsWdOt/AtmVoWzPDFrj6c5YjmT7Y9ke2r8bHqbSeFEZkckynCWoslCMJKsha8BvLirR81txHPNXHG1lsGhPCJnG2QHO2s2YWAoAZ57c/BWtjhR+/3mr6ci8iA45l6ynFRHoPS/jP88PZOSsSx2vSzGaiMbrXhn9dMq2tgctXCswpBOBlAnNjURGaXVdBvpTk44em32xkezjmtHiXqbOONG0ljaays8s2rQ53rc72clnJXC2HEP5uz8KgRw4QmwseF4kcZUQrwKr3zf1xIqVy4NSPWlY8gfgL+iIn6vV9qXpGsWQyljFGv2cM4ezhGSuJxtBLMLUlxqU5WaNcwsRVUj1WRl4eORPt45E5xyOJlXGFr9Qhgh29NmvyNmeeaRgMFq+apjmVHLFVUZ6sZMjZKtb6I8/O3iRGKWHitc88VpGlgaIcHw5SialFFT8N1B6npMrKZjKRxhgQezhk8oQvqOxeuksWsFj605YRzw9LUyJcY45xyJwQMsrJ5MGPVnLyrDBtaaxzFr3DnRaOcJzjnNhH8AZIGOwGcQp+zOtp1LhdVQZAxH4qxWB8ScV0RMTHpsqqPD10yStcgJ+y4YS4nDorLPL4zy+Ming1xHIHh9hwjJWE5NdU5NNE55fWzy2rnltTPL6mDQqjgjA/jpiJivXFBKetperEmm99p1Q632ItAy/NNrrYx1gEn6tZ8PD7OojoD67rgLNyuupKVpH82YwYwMVq9d4PH0xSIP+yqAZs9ZxytdfuJmBiJ/PwoIb3UC/0mhDV0ep0fsK7ye71YcsmuQ1zBiBH9CYAsGy00wloOX6NtpIUMwUet1QJwCID6b7a1EQ9ZCUrSP6Nw5Rr2eY/QmImFQtI+tVRCA9Jpci6/ckfCOP6U9zUMieMe/dSRx6k/xUWcn6PXX1npa41hCw/TbHV5KzpaPv8AWILvp1nlYb6Fi0tMxxchCFoH9RUVhTfeIBPFMFsejDgJ4xAx7xzyinuWM4Rx/VLIvgx4yPugiV2/Qn+KyIQv3ZeqG2EsaSVilf7bADB/q/dV87hOdyiM7utnd1s7utnd1s7utnd1s7uvnd187qvndV87qvndV87qvnd187qvndV87qvnd187uvnd187utndV87lGd1XyJ4x+FfYSjImCj3XWq6C/DP8A9if51sRNNtYGDY0pjLazFFVji6OhFjjr8Z0+efp6o/0VT/SD4JX8ErngnjwSY8KyPlOPg+Pg+PjZZ8rVjJWF642MVUBY2hGK2UPo/wAGxoLy1sBQOtbHUDYHVtxZnh/Wzg/GMNc3dkysGqYvo6+x0BiYKPwlj/Ymfiq6SFK2ITMHxwhA4MQgvlz5c+XPlz5c+XPhneI5uI58M+GfDPhnwz4ZHLxAQAZLJLLZf2/HNf8AReh12f6jPeUgzZ2FnrL247baw4T2rN3TAtmf/FEPBdmttq1iyzd0wLZbeKt8ngzYWdtVrunYV4oL3NJlj32P5WJbDYmwvjzOLDDgJfIFWsJsstWxjBTsKdayKy+cpatqxY+UroJXZsvctrlmu0iVlXIXfN+DsfTyccTiSjt2ExM8q4P4GXzc2c2c2c2c2c2MnmX1eSnX4gnmzmzmzmzmzmwS+MH8snknlov6HPGa76H0P+69AA9r/wBl7JgJ35FBeJrV9tzVH/u0nBX2GiYp77V9tzVPMVlaIT3+jsoq5Ef+mN4Ijq/f4kh2zq2XFXkZDqqLGpQjIQDWHDXFThuui3TsdYwicUxZCaUokuk87iTdlWWa9KK9pcBYOYJqlBwMIWwT/BWfp5iMpl/bweHZBeN2sRnfEUzemM7+c7+c7wuHeFw70uHelw7yeHelw7wuHdlkWynO6Lj3RTPdFxiyeL2M4FkDzmywX9HhHDXfQeh0l9ValrjtkdF9dNgYSqDmlVlnRVk10ziqVVRzSqyxldLVQhIy6pXeRqWamKWwffk2tczYhWJsjJFbmYkqEZ1IjE3a6FLe68U7Za5Ll6vXmDJ1IM6oFlW2ujiXPuDXvLIWOqMibawiDqAJPUxfmikQBOcKT6i/vrH08gXGup/QeuzyT8Z4RgjJSQcD4ZwzlnpCoyEI+SBnpiM9MRnp8OC+kYisZiYGecRnqck88CQSIz1CiedAWOQ1vlXKea74UWn012PElg7vnOec55znnOec55znnOec55znnOec55znnOec55znnOec55znnOec55znnOec55znnOec5Wb10YSmC1uuFuE8gk5cqV2a7M6jasRViyPbNrkeqAoJ5HFiXRC7Cn4JODFJjZiNZ9fEVAWEJIJOwScJ6zc9thJqqjsMFTVwoOmv75/+xP8AOsn+zIAOLGmUeWNW5RV0NhoQ5T+u/GqssLpWOhXQwFLTZFYJsCoE2AUtNgFdGyKSUzoKS9ZQt8Ohbod0XS8wZDE1rBvVrfnBS1xcn+2yh9FlilXsM+85Is2NhZs1m/8AxCkYiLDmQgYWbnvUdMPMMdatBZPhyGuOBON2KGBy05lfKSovpq2LPL/WLOqNeGtayBr80WbDaza6oeqqUkn76x/sTPx7k1gra8hAzmjm+DC+fmzmzmzmzmzmyT4RFxpALYMebObObObObObBn5oL5JLJLLZf2/HNf9F99a6EHDKyKh15fIQtuO7yVAtTAqvEZvRTF3PVr0DXywARYl8W3SHK1mvIKzrfZDZRCwQdmJHqAvOm5mdPrYpIrwqwSQxAx99Y+nk/jCTaHYGRrnlCD+Bl83NnNnNnNnNnNhTzBIt6Ko6a+bObObObObObBL48/wAsnknlkv6HPmu+h++FgIt2KR3rIjKiIZfBhaES60DNOLKaBhRLsDuWCOScIy6Dr2Zi1MkJVmbOvr2qoopVOsHBSJGfnhPGfwVn6eZyoX9DqRGMvrDG7JpZFhs5NpsT3Ls7ludwzl7hnCLDeXuG8sWW8sWGcvXZy9c8F7JnrHxhxyXWPjDTxdxoyF4ZyGQWPL+lx+Gu+g++mOPtYlbJJbIxwnxOz2B95Lyv7SOlUXwDtuMRTmM6UxjLXYWO7l+eYLIFVzPBGBH8HYgiSWsdgULQgzX2pjyuxnltjPLrOeXWOPl9nPL7Gdlb5Oxs5FO3A9na5Ip2oGKdqB7K3yRRtYFG0MxSt80UbfN5fZmR19qMjX2uadbZmQ11mM7C1IRrX5REwq/hDETFQHr5rUBJvQXnQXnQXnQXh1EnFcW68KFeUj+Fn+OhkK+HSzoRnb52+dvnb52+dvnQzt87fO3zt87fO3zt86GdDO3zoZ2+dvGdLJVnQwI4D+qcM4e5wzhnDLN6pWNLFuXwzhnDOGcM4ZwzhnDOGcM4e5wzh97e2FellHY17pe1LVvD2OuQu/WtBYL2xcGdl7rDFYCUGPsRaBz/AGvZ0k0rEWqv4TWlTVskqrUKsbe61PmQFqKW3t3Z02w8xq1NuLo84u9vd2op1Gt2Fm6/8Lr4h3iK+0atWdmwKt1tt24pTZ7rwypg63RDdtVrd6auXf8AqKib7ea59iHeZsmnZs2LdvXrcrxL4he2trLfe60rc27G8uXpG/qtrEV9rZVsNOfXpeG1JthTq3uTahad1ZLYjptxsSWWr2E+YBF+7qtD/h/wliNbtK+u6rNBqapWdayqqp4Z0f8AiLdjybZPpFX8H0qc2dduK66vhZHwR+Fua+xF9dW5YXXTtq6TqHO5TUMNtq6VymNGiatKeq2J66xUNm18vtBW11O4qvWrbWom/Sslao0bYbXxV/hyobC0wahxubNO0vYUu65byO6pzRvu0u0olc1kUr7ro0GwxlBpaC7QceVYvGa9bsU1NYgqtD8IzQ61jEJWhTdDrmM7NEU0KBCbuvq3SmOMHoNaRto1m1IjhH5NyluD9rdYSiQ2VI11rCbKz2VIHOctCz2Iv3FjYVKzN3YNdf8AKbH/ADv5PeKB2yZURPipHFNuomn/AKVL569laA8Uaxdpx2qx09N+U2P+d/Jtrqa3tld2uspb50mvltqqi0mvqqddlnVU7Luwrdv+U2P+d/b93WuDvUh01frt5pqr8l7OS9nJezkvZyXs5L2cl7OS9nJezkvZyXs5L2cl7OS9nJezkvYSLZHyXs5L2cl7OS9nJezkvZyXs5L2cl7OS9nJezkvZyXs5L2cl7OS9nJeykxpz+mbL6f280c3NHNJRHsIxD2Pcuuv3HOWgckhGfSAxYPvU/qP0zZfT+3Yf22/2bjje2v7nxF5nacNq4mwt+wsla2Nxl7wzGztLs9fYf6gO/ZbaDcL8o29m6xObgGXrjtmC9R5hfC+/Z2pNm3arUnsbNeqm/YC87bWpAbd1tOrfstnw660zX1tiutox2NlFp2ys+Zaq7Nxftp/Ufpmy+n9viFBv1takx+g8NqfMVqSKcWq/wDSaDXbazK7XhzeLNh2jZT37Kaq+wsUodoNlasbBNg5UjWacypvBtXS0pGrfhbX27JBc1PiOodmvrl0puIWbYut7lehTK9loyYmqFSzGjebdtaqLON9qFmFv20/qP0zZfT+gepHr0aiqSPfv01XkL1Qw7ycROnWXTr+yNKARWQusj0af1H6Zsvp/ctvGtVLZnX1tO417fZFyZ3G22qtbMfGPac8oaq1N3X+5sLhVWe9QuTZd7tP6j9M2X0/ueK5/wCDeJuIpixeUgOq2halt7Ya+t2viWxbpWNkt7LXhdtjv9kx67FSovo1nf7KGtjQW0eVWtjwXstxBdZNxNehuzK2o0TrNtsu1KbRWbGqdcVX0Aw1L4TOz23hsJUz3af1H6Zsvp/c3tabeqhfm+vMYMFay3XC/rie2rq2Iu6unFCnW18It6nVRrBpOBGuhl2yw45g8nGdUrWOKzc19qxjaVkc1tMaNTba/wAwGtrG955O4Y7GwqqvTh2SKd4Csa1verqTqaOsW5VD3Kf1H6Zsvp/V4Rx+0p/Ufpmy+n9HevbUzxNefVzc2z7G5Nh29cDqWrc27U1QzxHbWhq7mjsa9w/PKXG5sE1C7+vFKps69l1jcVEPrCVnwvRf3NP0af1H6Zsvp/R29futbpZ82boyO1d2aqjfEjhpr0TKAVdXYXYtZsmBW3HWW7xJfusuaS8tcuv2m3dUpAu2Ons1qo6QhHw14eiY0vo0/qP0zZfT+klKkQtKlm6pXeQVa619JfSARAGoU2UITXHsanM6pXfnTDppp1kEyrXY3ZUidr1AKlejT+o/TNnx7Xv1Z36s79Wd+rO/Vnfqzv1Z36s79Wd+rO/Vnfqzv1Z36s79Wd+rB2dYj79Wd+rO/Vnfqzv1Z36s79Wd+rO/Vnfqzv1Z36s79Wd+rO/Vnfqzv1Zr55z/AF3Xf539v13+d/bjIQDXXK3nf7cQwY6bSSjd/wDltrlqyu8dm8NmFa4FjqD1TzqnhOkYt7UOez/YYBiwf2Hu08c7gJzi4sP4z0O5O0NRWMqV7dFTwiFtYYsY9YMsKypTUmvT7TrnX6RrsfD9faMmoyk6gRZW4nwA8Jaop6mEbVxV7io20FuHkHJAN7jGFNZwR3Igq45rzK4QuesiOINX/IDYIv10ygAI3gh1xewWp0Ix7akZzcsJcKAVbm8c7QVtIIg+uJ51krgVieRtAXDHNpl3CCjqAvJKobhcDcLYdtiWnaWk+ov9amOMdGJy6hdXCMoXVeSoURJEFDZx6RTJUUyqJJk2mcZFpTcgiSKKCTSgIc2a6+Zhko2OI8cZ81VQXiJfJKwgA/Wm83SOa3ZKXZScRJPWNoAHgkJWTlVuK7crex1jkYLoa2yfWTcrcqDJNoZf0Sb0HznxRDJsyBK5puKe5lXkFNTj2/61MxEdcoFtsbzgqsABqc8qV0SlzK5d4NuLG1FUEmQEKvzHWlbUomYTtQXIvJB931c+ZUSr4guZgAEBmImf1tgwYGuwxI6+uGRX5ZJVRmMqq4VYriXalWcVNA4tEm6VJWEKrTj6s8QpV7KSS+yTmwoV85EtQhP7H0F81ijLcqLe9rlXH20pJYhwCWf1MaBHhqt07VkJpymrAl+zvrKfKlAkPcMRME1EpP8A8n7206nR09ybtENs5m+tXqtUu4T26btZzVbGm12eY1Oo/Y067N/YNWqVsKjXi5ZOGwksVsqbo1e3TelzVoANhTNlW7WtTZv1Kx9ZfRv7NbGWrderF+1BaigUnR/V/E30F2zOmvhV7O9pwW3Yo4Rq7KBr+FJoW7VDPDtdUx4dWhmtn/o/drrLr05iPEdNQX069rF3fDQh0fFXx0+6o1oeawV4m0AKbn/0m7VWXcZFg/EpUnVNXrf8d+r2a6rS7NRFk2VlMfb1VO22KaIqCoBSGloAzK1dVYbGnovc6mh1avqaNexd1lS6yNZThFPXVaZr1tRdy1XVaS6up5lXUVm1qaVp01ETVVpqCpu6+tclWuqqrLAVr/8Aw3//xAAwEQABAwIEBQMDAwUAAAAAAAABAAIRAxIEITAxEEBBUGATIjIgYXAFUXFCQ4CQsP/aAAgBAwEBPwH/ACCJjg1pO3gJIGZVXHU2n25qlWbVEt5NxhMq37LomwfkeBQzQ+/ieJxZpG0BVKr6nyPClVNJ1wVOoKjbm8l8io/ZSm1Wu2Kpi5E5rpKb3ZzC0A/vzuJw4rD7ogtMHjQrmi6Ux4eLhyD3QFUoOpNDid0DuQvkg0N2VtSm2SFdG69W4Knt2edctPph3PYrDeqLm7oiMjxw2INE/ZAhwka73F77Qqtd9Q+8o1TbYgcoQIBzWJxfrgBPp+oIVKmGi1Dszvt9bGXGFVtDoboTz+Lwvqe9u/0YXE+mbXbaz3WiUKjmnJSpTSihwpDr264jtOLwt3vZxAkwFQYWUw06uIJ2VwHyQd0RdBhMcCgz1DkrYy4MEDxjGYX+4zhhMN6fvdvrmm124TsHTcjgCPiUaFQdF+m4hmGJNVYzE+vWLxsqIud2qe4DDUw68DlTSaeiZSazbt4/D89PMHNDlNoz8xa23ZBwOtmHcrOcclIPey0HNFwG+s113KNbbyBeBkiLggANu+br4jJNcHbalsGeTaCfcdcmE27r4DAmVfnB0yJEJkxB5Frrj9taRMItJ8EIndOJCBBzGk42ieQkTCiNQvAyW4QaBt4P/Ca7odIQMtdrbdM5Jt3XwwuLf40XicxrNB+R0rhMItJ6oCPDnT0TTOhdDoOo11x+2i54C3GaDQ3bxIXAwfrhAzpXZxoi45nxZwO4+sNh06TW2/VcJhOBPVARl5dH5OMrNBRwhQoUKOBUKOBUfhQu6BAz5f8AEpv7+XySck09DygL3+4JjrhPjUGZCAjlAxzcmnJNbaI7MQrUFHXhChQoUcIlQo4wo7HFxMpu5HKMYHi4qmZbn4063qmxGXKGk0oCPGpAcZTdyR+N4/6iV48vOYU5RHl5yUuieTBc7MJjrhPjlv8ATPJlsHJ0IC0QPGztkvbblybbM791Sm0T45d1jk48cIlQ6I/3o//EADMRAAEDAwIEBgEDAgcAAAAAAAEAAhEDEiEEMRATMEEUIkBQUWAyIGFwI0IzYnGAkKGw/9oACAECAQE/Af8AbdCgq0q0/VAJTdK52ThVKZpmD6Ng7qjU5sxspzCrOc1ssErsmmU91qDpbd6LM/QaOn5guKbTazbhUph4gp7CwwfRDyBYaE3ZblV6vKGBKaZEq8Ewqh7e4ynVWtR1Cp1w7BVR9jblSfe2fW0a3LP7IGcjjVpCoE5paYPoGNkprw8wn2khru6JDVMotBUIUwFU39mqVW0xLk3WUyvEU/lc6n8rms+VzqfyvEU/leJpfK8XT7I6g/CfWJ7rmIvKLlcniWBi0hxHrqFazB2W/GtR5g/dERg9dgDGyUxjW/ircynsuQwnOB2THtb+SqPkyj7Nq6ZcyVChWq1NpOdsqWg7vXLp0hgJ9bOEXyi5XKSeFFl7wFWdLoC09TkmDt6/T17PK7b9Fehfkb9ZgkosDt+JQU8Kh7ezaitywnakPbGyLYQc22O/BjRPmQr0W4BR1lMbKrqTUwEKbini1BhdlYC34aNsAvKa8vda3ujpGkJn459fp68eV3EmFVcHOJHV04G61T3iGsQEDKcB2TwQphSgjk+yuNolOeXm4qm5tQI0mHsvD0/heGpfC8LS+FqqdKmIbum4QJ32VKm6vtsnaBsYKeHM8h4taXGAtR/SpWBaCnAL/YtPX/sdw1Fe/wAo264e4bFDUPCGp+QjWDhCrNc78FTYQ3KfgezakxTKlB5AkFDVVR3Q1lVDVVUdRU+UQSZJUwmg1XQmNDG2jhrmZDkAqekc7fCaynQEqvU5rlp220x7Gazy22fSh5Cc8u39m1AmmVprAfOqlKhhwXg6RXgmLwY+UKBc8sHZO0bwJRWhbLruOrGBKFWnT/AI1K1TZDSuP5JulCj6+cp2jB2Xgv3XhnjYrl1h3U1wmmrTMhOr1ThcsrR07WcX0w/dCm1uw9rtMT7+WNK5bfj3BlQs2UXu8oW33B9Qv3TmFuT1sOpz3HpbTF3oi0gSfe21C0Qm0y4SOs9lh9JUqX/6egbSJEnATXWGQnPLsn3wGMhTzHZT2Fm/ULyWhvo6hAFjeu1txhPs2ap+gXGLVypbc3ptdaZCqxMt9C9gY0Tv1rCBcmvawYGUTOfoYcW5CY0OwSnNLDB6VNgeYW3XsIFyJJyeoykXZ7IGx0hOeX5P0eZ3T6cC5u3R2TiXeY9eo+89Nokwn2AQ1T9LaxrxA36NJ4HldsetUcIsbt0rHRcmPa0bZRNxk/TmWz5k9lux6FgLLh1HsDBB36LKRdnsvxdhPe5+/wBScGOEtx+sEjZOaW79KwhtyJn9YElO5YEDJ+rMLItcj+ovlkHpPeXmf1WOi5Me1vbKc4uMn7dcYj+TqwqH/DVup+Uy63zbq3MzwtMzKtzMqMzKjMyiMzPAiVGIlW4iUAnCVbiJQGI/hNtORcTCcy37fh7QJ2VQiA0dvt9rWtBd3T2gQ5ux9JkoGfaYzwhQoyoUcCowoxwKj2QPaRD0992Bt6SCgI9mrUub3Xgv8xTGWNtVgmeFgmVaJlWiZVoJlWgmeBaDurREK0RCAhEA7q0RCDQBHsd3LaLe6qZaHekAnKH1qneRDdlUunz+kt+tlpextiq4a1p3/i6BM8ICgbqBuoCgb8DlQNlA24HKgbcZ/wDUL5Lvt7TBBQbD75x9vAkwopl1no8lDP1vZX/325+3tiRKAqcyTt/1CPohHdDb62Mrlj8Ls/bwbTIQdTBvRz/zof/EAE8QAAIBAgMCBwgPBwIFBQEAAAECAwARBBIhEzEFIjJBUWFxEBQjMFKBkdEgMzRAQlBgcpKTlKGx0vAkNWJzs8HhQ4JTg6Ky8QYVcIDChP/aAAgBAQAGPwL452M86pJ0Gs0bKy9IN/fW0xD5E3Xr9nmR+oHWtq+0WW1syORWdXbG4UcpW9sX10ssDBkPx40mG4QmUls2V+MvZSwcJxrGzGySpyG9XvJmij2jjcl7Xr9uwk+GHl2zL6RQbiSIefeKM/Bbd7zeQOQ/aKa6mOaM5ZIz8E+9j3dau0YSQbpE0YUuD4QbPn9pm8rqPX3EK6YTFnKRzLJ/n4izQQ7d/JDWoLjYJsKTuLji+mgQbg+9lE4Jy6izEVm4PxLuB/oTm4PYeamFtnOmkkR3rTQzrmRqmwOIbNLh9zeUnMfeViLiji8GP2Unw0I5v4hSuhurC4NYfHporHZTW5wdx94FnIUdJoiJZJj/AAjSrphQo/ivX+kP9pq6sLdUVcZwO2KtXiJ6ChFN30BERzjca9u+417d/wBJq8Lq46qeL4e9G8luY1HI+kg4rjrFT9KDaDzVHJ5ahvfuRpUDdGbuqcTIEDaC9ZoJUkH8JopIoZTvBFLlJPB8jWIP+kfV44CeSxPNa9abQ9i14OGVu2uLhfS1aQIPPWmzHmo8ZPRXtoHYKIOJbzUFnfO+pB6qXhHDjw0PLHlpz0siG6sLg1gJx/qXhb+3ius+w1PsSDuNYrAsfc78T5h3Viltc5CR2ioWa+ZkBN+zx2eQ9g6a427mTmFa6mna2vN208ToVj59KBjkZazTMXPXQCiwqwyu4G69X0olLLdb2pZU3c46RQkiYMDXCkQ3CUPb5wpkcXVhYilRBZVFgPfV2Nh11li8K/8ADuoiScRk7ootT5zVzp20MLij8xu5Yi4raRjvfEDUSxaGu8seLYgC6uN0oqSKTkOLGtlKbywMYm83jCzGwFSTH4RstRgyBnYXIHNQxMkhDtyVolywQaaUVj1W+lDKHD356cyckU6w8gdySTmvlFFW3HStif8AQkaL0GoC5YbKQSC3T4rNzeIt3JZ8vgXhAzX+Ffx+ZtW5l6auxu33LWnp7mGi68581CNGBa+tu4TQ8phXEFz0Cv2hwv8ACNTWRECrbz0w5+anREzltwPTSmQZXI1HvwwHSNANOmrk6VxR3LGu9cSfCDkt5Xd2kWmIh8JGesVFOu5xesezLaKRlZD06a+MECcuXTzUY0YARjVjQQbzWyL58tBmblc1Ns9AvPTKTcg2o4lmULa9u43QoqJOq/cbZKFzNmNuc+Kc9Xc1rf7Ie8He/G5u2mZpM8rbzelurFjvtWiSN5q4kIHaazMwU2tp3ONWXmGpo7c5R1V4OVvMKvErZel9KzTTW7KRoLun4UsOVRc78tFTyk0PvzMvty7uvqoxvfrFXHd00YbjWxn0nX/q7s0RBAjndVv0X8ZrUuOkOnJhH96DE6PzUcRGwUDr1ricZ99zWWc6rpREZKI/VWgJNCF9EHNaonPJakXy2+7xp7R4kePu5q6kgbgOiuf0VzgdS1ub0VyP+muQa3VZUq+IYKKvlLdtcRFHsJ8Rbk7u06VIgAzEXPv3axAbYf8AVWR72/D2AeM5ZF3Gsr6Tryh44gHjycUVDD/w1ohubShFplFNZAS1Zm6aUWsBRNtaHWaOHiO0c6WHNRbeEGUeN8/vdnPNQDjj8o+r2RdjZBzmrjNr01a+p9kxpW6TmNE5eVpm9/GbD+3DePK/zWR/YCaA2lX76uNJByl8abaxwij0n2ArQVpVlNr1mHwd3bRS/g24vq8a3m8SO3xwV9bms2YCt4oa9y+laefWshsnZrSqik2ro7a5dz1Vz1r3Y4V3uaso3Cto/I6Khl8pffxxOGHhPhKPhVkf2BnhOXXQdNJKu5vFlhy24oonMM7m5o7bo0qwUa0Qd46K5QrUi1HuE0zqdf70KSTn5+3xknc1Fc4rl1yq31oK00pL9PjWY7hRcnjGmVmZeg331pI/pr2xq9s+6r5vuoluRWq5jWZ7RpWVPRV3UFeqgVPFPd1ppjuXdSJvtrTWTKQL1h1Pk+/zisKNd7qPxrK3ct8D8aWGM7956BSxx8lfE6sK5V62bxFujW1qvGCa3utcYK1eEjYdmtcWQAnpoZTmA6DVqbpOlE9FIvlcbuNETv1HjHHV4lPGy9laG1WbUddcaJPRXtdvPWin6Vci/nriqK5q1NMzKZWvY0zYblrvA3Uy83csOM3QKu/gYv4qyrr10dCdeauMRuNuyofm/EBxWGHF3uvRVidKVYuc2ArXWVuUfEkg5xVtb9B9huBrkW7K4jGtBmrc61y83zqXMALdFG2gFvOavzbhQzyW6uehLZ1t5XP41l6/EM/m8bL2UHw78ZTqBvp1u2e2lxXG/CtVWtU++kkYcvcK1DDuGzBes1tJMjHnI1vXgzII7eTpU0akAXtrX7RPceTHVsNEB17zV2dF+c1NKk4kYb1ArjbjvpYo2zu+nYKVRzD4h2mDZFU/BatpMVeTmtuHi7OoNeDa3Ua466dI9lqK1QVxbWq2baMNyrravCHZjoG+vBrr08/js3OPZhV30FHjSp3GtpBmaH8K1rjoreas/e65fRXHw/30qFCAu61XGY9vcaO19L2vvqzpYgFggp5pBZbaH+GmYfCrca0BrjORXHmk9Na7Q/7q8Alj0/E2zzDPa9vF6rY9I0rwbZuo1x0I7t2IHbXgIy38R0FXxMpb+FdBWWNQo6vePJzL7Hdb3gRG9qJtWqGtQ1ag93fWZTZhQLx8YdBrJGMsQ30L1oorT4q42jDcw3igZnvbQsauDceM5Nj1VpiHC9lZmBkbpfX3tyvur4P0a5XvPkiuQK9rWvalr2la9oT0V7ni+jXFw8Q/2iuKAPi6x3U2zLZT8HmFMqHjLvHjspJaOXUdR967K/Htf3kyqwLLvt8dq50decaUqyXAb4XN44m2Ug2IPN70NzmkbVm6feGRQZJfJWsrFl6bGssShR8eFWAINWjViF3LV4z2jnHjHlBIzDUc3vNsRNdWbRU6B48sEZz0LRafIo5kHNRIABO/5AbQKM+69GOVTH5LHc3i2RtzC1ZJhxkOW/T7xfIBsF0v0nxxjDguNbUQ8uWHyU3mgo3DT5B5XUMOulZI86/CtvFZ4zceKzhcwB43ZQI3Hx7Q72AuaCoLKObxmQXeXyF31lfMmYa2O6rRKFHyHbZgX3+etlMuzm6OY9nibHdSRKbaaC/j7XzMdWY8/iywUtbmFZ5sqJ5A1Ppom2p+RZZkzQdK71oEc/iEkh9ujNx19Xjmnn0kbQL5I8Vss42nRVttkh6F3nz0EXkj5HeAK5h089HPGyMuhB8Rs5LZHHEPX4x2X3ONAfKPicurSHci768IpjLDUX3VaJQPkkI5RtUO6Qf39muYA5TcUShvY28U0I1KjXooBRYD2ZIBNuYUHkyxR+RvJq/P8ldpA1+mM7jQuLHo9k8iEbNxxh1+KyjU7yen2QiLjaHmoASlI+fLvNBE5I+VxYKMx3n5Me3xfTFe2x/SrWaP6Qr3RD9MV7oi+mK90RfTFe6IfpivdEP0xXuiL6Yr2+L6Yr2+L6Yr2+L6Yr2+L6Yr2+L6Yr2+L6Yr2+L6Yr2+L6Yr2+L6Yr2+L6Yr2+L6Yr3RF9MV7oi+mK9vi+mK90RfTFe3xfSFe3R/Sr2+L6Yq41HxMNvNHHfdma1Ag3B9kFnnijJ3BmA+J5PmnuJcURuvRMZvXGVq3U/fCMw5rV7S9SbKMgX4tZcgzW30bjWipQZumspQZumsrIM3TWVkBbpqxQFumhcVqt9OenYqMp3CmbKMvMKJC6UCVBFuerrEMtt1Z2UKvRQB1p7Abu5D834k8I4XtrSKRz0Wt/msQ2LVYcUTcqzbl5vNTRwxA4KVyYXHZqB570C0kag85XT07q5UforlR/RqzywgncLamiQmduYZbf5qY4zJHi2YmYMfR5rWrK0bDCtIdi41Fr7vVQINwfiWT5p7kSKBuoK+h7hzKDR4orkj0VyR6K5I9FckeiuSvorkj0VyR6KNlJA3sE0rctbh6K3D0VuHorcK3CtwrcKGVR3X7O5D83xOwznY97ZsvXmpuM5KkhgqXtahMmJeKNiCJIxrUWHyts7HPxCT1WqG08ozQZhDbTt7aNzLkBymQRnKPPXBORuKzNuO/SuEGM8suzIumXkdlLAm0ErC4DIRRuZcgOUyCM5R56w0SgmNr5zkJ0tpasERPKm0ViIsujdtNES7uvKyLmy9td+B80HSKWJXPHOVWy8Unt8Rs442lfeQvNRtcEGxB3iiA2YjmQZq4sYTrc/2FZp5mt0LxR66Jjj2QPQOO/q89d8OF6Usb+f9eqxjGFOKaPlaLxfTz0V3W6dCjCmUMc45WUZvT1UA0Iwp5nzb/RQ2uKMn8LcW/Zl/zRvB3vcZsqWues9A662/FKxGy2PP2fr7qIXBtiTHoWsunVrVxqh0INXD5U8o6j/d66CyjI59B7D8SSfNNb6iIBrQUAaNH2br0i1GACTOT5qVSdbeIHdet9QfNHif/wCT/wDVY/Qa4iQGofnf/uuDnlYImVxmO7dUDKQVOFuCO2sTJJi4IEN1GHyXav8A0983/wDIrhqWQ2RCpPop8ZPNEcXPyY84uidFYmSTFwQIbqMPku1f+n5JGCoF1Y/NFcGMjBlKSWI7Kx0eKkSKcTszZza4rhJ1FopJi0fzbisFlAGWWO1ubxErZGdJDmuuttLV32M4Qb4L716fxpAAx03CZl9Gv96GQyoD8OSRwo+/WsxkeSc7i85H991EqMQy7pHLsobq1O6gmHZxm+GJGIHXvpopFnxAZswlAv6abGuDkkN3w+bRdAAdOfShLxtBrllYHttf+9Wj2qIeUskjada68YUDh8QXky/6kx3dv9qyxba5Fy0kjLu+Fqdw5uuo8PhgQx5Jztxeu5owSRTzAElZF4xe5vr100qyZXkYu0JsR6as0WZrcZRofQfXWWT2g6ZXFsvrFAwnaR+ST+BrTRhvU7x8RS/NPcXucZqsgrdW6uTXJq9q3Cr5avlq+UVfLV7VurdW6jpW6t1WdfRWjdxq3VB8weJ2uRdpbLmtraiI0VQTc2G81sdjHsvIy6eigs8SSAeUL0riNAyjKDbcOimc4eEu285BrUfg08HyNOT2VJeKPwnL4vK7aDxYaBHG5lQA0znDwl23nINaEUkSNGNyldBUZESAxiycXk9lBpoIpG6WUGtm6K0fkkaUFkRWUagEeIdIiqKmhYi9zWyxR8NzZfhDp6quzYfZOfa3kGQH+/bWWPFpL5SFguT1+nz1ssE6h21eRJMh85pYxioJivlEBU9G81aRwOls4fMfNTtg5okgQ5c+XPnp4MRl76U2Kjcevs1rjNAUY6LJKMo7AOfrvWRMaJQN3GyZRzr1+ms/B5ygLeXwmTN1dv66wskuIgnY9Jts+gL11+0MjX3yAgn/AD20ZcNJEkFyEOXNmtz76ZZ3jjmQ5XXNXGmi7c+6iJJopY+nML+is2ExMYj35I5Bp2D+1BhicPL5LZ8jemv2h+L5QsfwraRsioeSCL3oNax3Edfv+X5po6UuWO46aJItWrVvFWWtd/c3irWXtrTUCrWU9dWsvbVrL21aynrq1l7auRpXwTRNl7KJsvZR3a81aqPPRNl7KO4dVAimzR6dNbqg+aKZsrNbmXfUccULRRiQZltd213V+7uEvqK/d3CX1Ffu7hL6iv3dwl9RX7u4S+or93cJfUV+7uEvqK/d3CX1Ffu7hL6iv3dwl9RX7u4S+or93cJfUV+7uEvqK/d3CX1Ffu7hL6iv3dwl9RX7u4S+or93cJfUV+7uEvqK/d3CX1Ffu7hL6iv3dwl9RX7u4S+or93cJfUV+7uEvqK/d3CX1FLJs5I7/BkFmHcZ4CvG5StW0kP7Te4kA3dXZWx2j6Gz/Dv2Dn826kjzxJmBKSI1iR/H/wCb/fSxYeJoWtyxZefmJ5VLFJkRebZDOx/XTrQbEMXQjk5r37eb0U/eRiWNtdm66A9VqMha+LJzGW2//FbDaMoB4x5d/wC/+7d/ZIS0SSWuGja1x19f+d+6lfCxHDxxKFfKwXM3V0/rtpJiViYpxY4hcZevo7aO1ZtkDqua9/11f5org3hWEm4R15HZ1UM3hJL3MhGt68HK1uh+N/mvDKtulW/sa8AHSY6XYZL+Y7/RRLMqSWuVi3N/uPJ8/wB9Z8RmMat7Wx3nrrLE6ZOa43UFGvX7/k+ae4lWYVePQ1cLertGwpmWDOD0ivca+ipDsMuY3sK2YV7WoqVPorJlfLWzyvatnle1ZMr2rIFe1ABWvVwjDS1NJZ7tTSWe5ouVbWgyBr1n1BraStxqsBUnZ3Ifm9xJJYlMiEMrc49+zCW+WMgBb25t9bCEO0N9ZN5X+GuIsrPzbIoDftvpRzQtGWGYz5l8L9+lRwSYd4JAOK4C9NtNay97TRTnUsrrx+vfrTHDIzNzx8X77GpZMYjq6tlEWfkej8abBx52gR7bbeToOL99eD2m0140ZQW+cc17eetjsnhdlzO11vJ/Fv8A1+Mchw3e6xxi1gOMvn/D9DiYfEQYc8dkVxr/ABandWfDAmVdAoA16jY0Z8WHEpYjZ5z4Ox3aU8axNKI3KbUsNa8KmIPUpVR+N/vrN3q630+Dc/frR2mGkSH/AGXP36UCuGxEEYNwisuvWwJoSYNC55OzFjfq0JoSy5to3Q3J6qGY31Iv06+/5PmnuRKrEC1ZX166BFGj7O5NhTSrF4Ec5bWgynQ6+IHdfs7kPzff4aRikh04pNz6KL5l2POSb3oyNHEybskh4i+fm7PTarK88s3KAYXA+bf+xpY8Sseb4Mica589rUVkd2KnWGZstuytg/EYbtLVmlleKUjXZuQSOu1XTIYDyQNc5P402KIjkXS2c6DsP4DdWfaTO41fMv8A0r1dm+o+/I0UgXSwzX7f0a2EbbRfg7duS3PlHP8ArWhDKeMdEZub+HqptpK0cjDjhHIv22pRDlEQGlt1XQqqf8V+T5ums6pJLIdMzC1/T/ahLiWSLLqFGuTz7r1xQXH/ABJdR5lq41byjvonjC+8K1r0AosB7/l+aaNRMo0rXQUBRo+zZekWrYCNLeVSqObxA7r1uqD5o9/zmchM1irNpcW3fjW2hkHe4a4Q7mPlUM6AEbnMZf770RtYWt0Lu++srtHNEd/g9fx1oOJonQaZmQ/RbWiAyoN1ghGU9l6njxs42xbNtZOLnWpMTGR3vnOzicWB0Fz5zW4LkuM2Qlr9Rzfrf0UhDwyyoLKIhpF570O/Hjnj6o93mvrVjPDNzXWPMT1HXf11bixMNL6jL2C9PDjZ1WcMS7yG2010PoovI0gh2haOLcLUMqAynd5Teer2EuIGhtyU6v1rQaY52GoHMPN8Ry/NPcWtTWmpriaVqa31vrfV81Djc1XzVfNV81XzVfNXKrfR41EZqPGrlVa9+2rNoa0NN3IPmD3/AK927KM3lc/pq8UpHU4uPXWbZm/lxH8RQdvazyl3adQNWwcInUWu+ay1sYbpin0KHelDKh5P+kLX8+4earKkUPWu/wDtR8O8vQJdR91qPgcnXHqp7RQl1KbpB1dNt9FsJhxOinR84AJ6qAi1nN7RHeO2s05sSONl3nz/ANh99AKAAOYfEjgc4NaUAtaiuTXJrkVyK9rr2usuQ5a9rqwQ2q2Q5asEOWrBDarBDatI64qG9E5DmonIb0SU1riraicutEka1oKs26tbVEp5l+JcrqGXoNOkGGMkDNmURWGXq1qTFYqNe+ZDfQ8gWta/mr4X0jXwvpGvhfSNfC+kas65h1k0cPFhWlhBJjKMNLm9jc1I8gXbStne34fFW72W7ubu5u7m7u7u5urd7ID5O5Z8RGjdBOtB4XV0POpv8XLt3szblAuTTLCWzrvRlsR7DPC6unSO7h8LlJMwJv0WqZUDjZPkOYW9gcHlOYRbXN57eyLyMFUakmgym6nUHuzxKHzQkA3Gnm9hJIdQilqinUWDi9viXGxcLom3aTRpRcWpzEBHDrIeimxOH4OzYUc5ezEdlNjoRmAXNlNI2HwPgNxctz0ZCmzdWystY6V0yYbDmwe/KrvscHfsm++041umlx0C7RWtYHSg3eZjwjDiyE6/E3CMkmrRBUS/MKmxOUF0Q0mJThB5cVoWg2fF7N1Q4fDT7FHgztpe2v41jcBLinbIFZJbcYXrbd8Pl4/g+btqLFz4w2sbJbTo1pZIuFjiZgwzR2GVhXBvzJPwrhKMzuhWcqjD4IrGznEyT4LDqRdrcdhXfI4RfvvfsBHxOzdWDwuHkOH2kW2kYDW3RUiYibbEYbR7WNswppIGyuGXXz1hsRJizMHkCSoV016KbCQYkwxbIO1hrv5q7zmxzwRwxjM6rdpGrFHFzF4oHss2Q8YHdpWJbCYhrRjjWW1+rWnmXFSM+zQre3E3bqkmxHCGV3UG5UZY+ysLFBwg2LjluHDDdXDQ2h8CPB/w8U0vCJxpzhQ2zy8UjrrBxLL3uswzPJa5UVJAMQ+Kw2z2gdlsVPRUuO77KZ1YiHLxcvRWE+Z8S5pGhkQfDvYrXCkUbM8KHwZ6qiaPhSZVy2KC3F6qxMeHl2yZWOasJ/LFcIAaLiI88fzv1emjA45Akf0g1Gy8KzbIpYrpp1UIoJNrGGFm6dajt5I+Ju/eDpESZhldJOS1TrwjLFkkTIEhGg69aTDo+DZE4okbNe3ZUeLuuzWHZ2573rE4oldnIiqBz6VJhmaBsLxsp1za0MFKwz5GUsvXelwWfCLElrEXu2vPWExIK5IgwPTrXCSwyRrJiJCyG50BoYTELhO9MhU7LNmoYeF8I8S6K73vbsqDGYN4++I1yMH5LCmxmLkibNFksnwdaf5y/jUC4+WA4eF8/EBzOR00+LuuzMOztz3vTYzANFmkXLIkt7HrpjjdjmvoIr2FTQXtnUrepcFOcPmyqkZW/N00cMrhX01O7SsHNiWwypAeRHforhQ3T9qFk6tLa13ldNrswt+asJNhnRcThxYZuS2lN373uIstssd70+Bing71N8rkHOAeaoIZLFkWxt8S5zhwD1MQKEcKKiDmFZzh7E+SxFd6iMCC1sopIohZFFgKRsTFnKbtasd1Zu9/MGIFDCvGNgPgjSgBu+NMkqK69BHysUTSKma9sx6KeRcTFkTlG+6s+HkWRekVsmxMQfdbNRkmdUQc5NcHphMQHhfPnCnqrJPiI0foJrCvh5LZ50F15x8a8Ef83/t+NOCo5VzIXa4PZSAxJbvfPa2l72rh9cOMtlDKB05TVyseXZEs38VcALivaieNm/6a4P2KoshV8+X5ptXCGzXBsxmYSbYHN/4rAQSurlcUuq9p+NeCP+b/ANvxpFLIt3i1Q33V3zl8NkyZr81TTItpJbZzffatp3sL3va5t6K2WIjDx9FRyQw5ZEvZrm+tGWWLwh3lWK39FRQbLwUTZlW+4/GvBH/N/wC35YRJFNN4U3hOc8S++kTMzZRa7G5PyezRZc5dEGYXGrAf3r3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+elczYUsvJPe50/wCuvdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nr3RhvqD+evdGG+oP5690Yb6g/nrEJOULRSZbotr8UHpPT8jU/nRf1F9hluM3RQW4zHmoXIF93cGdgMxsLnf3NpM4ROk+xBlcKCcov09wAsATu8WGRgynnHs8d/OH9NPkan86L+ovsMBifgygwN/au/AfA4R0ibz3vWFi+Dh4zKe06VNNg8Kj4aIkXZ7F7dFcFzbESCWYZc3wDWIhwOHSTvcDOXa1+oU2IkjVMzjKFN+esOMXhBFDiGyqc1yDzXqZEjRvBcgyaWvv7amiwECSCHR3dra9Ap8a6FcnFaP+LorCjF4QRK0yEMrXt1HuYuaEn9gUZPnbzQx2XMCoIXrPNWHw0+GhBm3EPzc9YrvPDo8WG0cs1iTz2qDGPApMrgZVPNWfF4UCaRwkUatyiaiw2OgRDMDkZGuNOasTNhcMjYfDsVJZ9TbfWGkhwsbPKMxu/FUc1YqBoEXGQW0zcVr14dFeDjcdnux6qw0sWHsZDkjhU89zUEXCGHSNZjlV0a9j0GpsHhcKJGQA5i1gNOepNpHsponyOt76+wx384f00+Rqfzov6i+wYwgmaJhIlukVixNGVnxJaXKd9+b8KnxWLRkmkypZhbRRUkGO4OxEzhjlkiBIcVwSMPhnjRJwxS3IHXXCDRLimXMFY4Zwo3bjfnox4CGXwbhNnbjAg61wfs0ZsuKRjYbhXfBw80sUkOS8S3sb1izjcDNPHK+0SSIE7+bSpEwOFeA5s4jfeaw6R4DEpllVpC62t2VI4UuVUnKOeg8+KxkUs13kRGyjXzVi8DIjZUnCx3Htik83XUDY6LG7RvBRPM4YL6K4QZUxckTSkN3u4CsOi1YE4CJ9mk6DLbVbVCyR7XZSBmj8oVEYODcTG41zyKQF++sU+zxkmHeVi2wcBH16K4PlhjxDcG2IdId/QL2rGOuFlw8LKuQSVJgZcNOsi5+Pl4p89cGyJCxmw0mcxEWJFzWDSPCzwxRSCV3mXLu5qx8hRgjKlmtodK4SLoyhprrcb/YY7+cP6afI1P50X9RfEyywYnEQbU3cRtoTWyhva9ySbknp8Rsps1r5gVNiDUcs+IxGIMZugkbQHpqQwYrEwLIczJG2l6WGAWQd1kgxWKihb/TR9KSGEZY13DxWO/nD+mnyNT+dF/UX2Msz7kW9YWfEQlpZyBkj691FZMHNALXzPbutg8oyiHa5vPaohKrNn6OYVp7Bm6BUWIZQpfmHb7HCKqg7aURnq9ni0KgbGTIOv2WO/nD+mnyNT+dF/UX2LjymUH01gNkBmGJTKDWIlxUMKrHGWGRr3NCdE4SbHNxhKBxf/FYLCTM8SGDbSqpsSeipI9o8i97XXObkDMNK4R78mCDJ3vHoT2n00ksLkT4Q8x8n/FcHRwMRGE75ex9AqdnxE54TMllVWPTuHVUceug5zepPmmuDMPC5jOIk2ZcbwLmsFJhpZcskoidHfNe/PU7cJ98jDG2xljY5U9FYaRxPNwdk42xbW/SagOGTFYuJr2KDOR21waVEuHZ8QBxxZlrA7CWVo5yUdXa9+usW0cmPmxCZuPHfLGeiuDpbTSxWvOsRszaVNJwbLIePl45uYz0VhWwUPCWfOBKZgbMvPWNWaWVYsPlVFRra9NcIozmQrPbMd59ljv5w/pp8jU/nRf1F9jiIl5VrjtFYCYPkyOsp0vcjeKKsLqRY1scNwiUw/MDECV89QTwYgw4mIWD5b3HXUmKfFtLM8WQll577/8AFLAGzkXJbpNYyQPeLEamO241OwkMzNuuLWHRRy8IywYlb2w+W9j0VwYhzxuBtZyNBbopl6RaocG0pzwnMkqi1jeopsfiziNlqi5Aov01Knf1sPJvTZgkDovUa4LGbGNECZGjzbuelgVi1tSx5zUI2rRbN89131HicbiziXiFoxkygVPFDjmjw0zFimTXXrrDRYXGbMwrlPg7h6xEGIkaVsQ2d3tbXqqMScI54kO7ZAFvPT4nB4s4dpRaQZM16xrpJJO78Yaca9QpiXLzAcYk39jjv5w/pp8jU/nRf1F8de2vvXHfzh/TT5Gp/Oi/qL4rCzo5ESygSjpU1h1whOfWRreSKw/ej5ZMS6qjDroYWPFSQx7DPxbb71i2OKklkCMyswGmlQcIDHNJmCkxOosb0DurAPK5WLI9/RTpEWEib0cWNDWTZk22uQ5fTSI+dpH1CIuY132z5YelhWyXOktrhZFy3FNE7sWXllVJC9tTMHOdGaRWv0G9QzeWoPisd/OH9NPkan86L+ovisRFzldO2ppZeSuGXD+c76wkMg9wI+btvYUFx+TZd7fDbLrmrGpgGjMYjbkPm5qwPCUKlmjCO6NqCKjlweO2MZW/tYa/XXBTYyQMFVgXI5+mtrhiJFiw5zlNR2U8suMgUPoMMq676gmixqYbGxxfDtZl66wGKktEFxHHYC4H8VYSWXhSOeRL5FVVF9OquEUxjokgnYuH5xU7fBG0NYXN5Pisd/OH9NPkan86L+oviyIY0jB1OUWp3jjRWflEDfWaeCKRt13QGmSOCJUblKFABrZZF2VsuS2lqCoAqjQAVeWJHNrcZb1lgiSMfwi1M3e0N23nINaG2giktuzLe1bPIuztbLbSs0OHijbpVQKEskEbSD4RXWnw2E2cW0PG5tL60kaaKosPFY7+cP6afI0EKzWkjayi5sHFcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVTou2LpylED3H3VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVXIxP2eT1VyMT9nk9VcjE/Z5PVWLkyuFeW4zqVvxFHP8nuF/8Alf8Ab8sOF/8Alf8Ab8ryzsFUbya4TPfEVpNlkOccbi83yvKsLqdCKmaQeCg1jPTfd/8ALnhHC1Ls5ZBBHYDLxc3XUuCmMjup4jdIsKzJEzDpBX117RJ6V9de0SelfXV2hcDtX10mHizrNKQM2hyi++opRLMylsrqxLX66DIwZTzj5Rco23Zspy+nd3OIdof4Na0VUH8Wp9H+aK53lbnscqr5xTR2Ajvx8vF+79euNeOrhbKIM2bL/t5q/ZbXUkqefNz3vQSUjPuyk8bzEVlw2fMObEc396viTboEGpPp/tXkN/Fq49O7s/CpJcZlLOOMW5l6KXKZjLbibfPu6s1GTD6X1ZRQ2lhfcw5J+UDqDYkWvRgELiQrktl0Hn3U0WMfasNzHcw6dxtSmSacFtwAVr+gVtJMWyw/wlfxtWySVz5CIoIXrbT7v0BHC75yfhZSb9en96d8bnmMqjjRpfLv4thUmLXNHhpG4yc405R9VNKOKp5YXLbtOnroKuIkyD/UcAMnmtfz0wOIdr8qXi5h1Wtr5qBJmaEc0uXU9G65NXjLd7QsrbM28Iegc1QrAkqFHzmR4yuX076tiFjt5Q0FEhShPKRxo3n3Vmwp4vOh/WlZTxX8k/J5mO4C9bd2TKBmKW5u2v2U2AOkp01o5sQkDsbXHHLHmB9dXxw2s5No0zX9BrJ33x31yHW/aeYfrWg0ojaTdeLcP8U3eMiCJN7lb3PRanwk1nxa8y6BvVQ2mzKHkhju7B0+em22IOJw2XjNfKXXs6v11XwDrHhSbo1+T0tlPR+urKjqQ3+qG4zD+Lt6P/FJhgtsSbKinceu9I2KmjaFjlJy5cpr2xCDXgJVt5DHT/FZ42EOI5xe2bttvohp0NjqjjUdhFeG40Q+EDe1baB1EZ5PF30Gtb5N2O6th3xeP/h89uijJCMubUoNPP1VtTLs2YZQZtQ48kf51p2OGMrOvGbU5Ndxv+tPQMk4zN/pkeEPZf1WoifN1xsdf12emtrFMuG+CbgZTW0d7ycvbns/CngIZocuoXn7ers16qLu6TbLcQpyynybbqknssMha5R7+DA+EeahkYsspLXBsrnnZf8AOlZzYytrtF3r2UrPixPszcKtrA9dZguVulTa9e3Kb/AYa+a1WbCSW5n5vzD0UQMRtHQborZkH8V+b0Ue+gCB8G5Ob062/VunKk+zVvg6fdQVdw+Tb7Pl207ayx5drbij4ef11JJwg/HvfMOTbq6KkkhMC6cYSJa3p+FRfBEbLc6yA5u3t7aY5odm2rrINf1+hX7Nu5mUEBey/wDan/8AdWhaawEbnQW6uunOEYf+2Z9yc5tvHVelhOnQ1t3zdbA0AGQzI1ozEvEz/wCP1uovidn30TxjbwYHMeztplZoy7coqp06rjS1GQE94FhtVYbxcbgKw44OMO3voUFwF5725q8JNm6t34VpsVvzKm+s0rxwx9HP+uygMR3sFBukeXXtr9mLCfp3W7b603fhj2/+pf8AW6kzX8/R8myTuFbYwWite9+NbsoxxcfDroxGub0c3666URxK6jck25fm/wCaLRTyQy25A0Fuz1UxaNCPhEaOl+e9M+R8u9gR+hWXBomJFrks1lH+aEWW2KzZdlfd19lHZbVZX3n13/t99BJIxhmItmi3J5+msmfvkk8qbjDsY7h6K2MyMcvJXei9Q66aCe/fC8hed+jz0O+IEhR/hq1xfr0r9m4y+UBf9ees2WxOmZ+Mx8wrPjJmbyUGluy2+tFECeSu81ZRYVcj5OMjbmFjRgfZ5CMpe+tuyvBxheq1xXGhw7jqWxq0uGCMOcpa3noMYo3HNIqC/nHPQiaKKx5BsLdlPJgo4bScpDxNenQVLi8cE2p4zMNLDora5YSG3DLc+a36/sq4nDLDDusoBv2nfTJg8PHIOeNlAH31mCRRMObJdh5+cUs0KoMQpuGIvlYGkGKWFYkbNlU5sx9Fa6noosvP8Ntw6lFX3sd7Hf8AKTMFyt0rpR8Lv6tfuqSHhCTjRAWEbFc/8RpsPGWfCQtqSwJJtu81W2cv+3Kv971dcK9+ni3/ABoZ8KzW6cvrrRJuw5SPxvWaK6YadlSRuLxdd9QnCszSO2TJJKSG9NF5W2shN7nd6PlQDIvGG5gbH0igkShV6B7Eq4DKd4NZo041rAkk27P/AJQ2mHy7QuFF6WRxaUcVx0Gkw8YXvQs0d7akga0BiJ0RjzE1txIphtfPfSjFFMjOBmIB5q2UeJiaToB7ip3xHnZsoF+etnNiI0foJozYeSxutmXtrYx4iNpPJBp4lcGROUvRUtpF8Fo+vJp9niI2yDM2u4UyZkWXOQq35QHPWeVgq7rmmjXExFl38amGHmSQrvsayT4iNG6Ca220XZWvnvpasCMFiVN8SquFPNQOIlSO+65rET4SUGyHK6msO7m7NGpJ83yYj/nJWNCg5cSm0i/mbq4Dh+EBIW7ba1wo06q0+2txvI5q4cEPucO2S26i2GjCuYVuQNdbXrDBDgI40yujpmv3MZMY1MnfLC5HRUz4hUaVnbb5/wC9G99nteLfyc1cHHCKgfbLssnOK4RB3lEI9FcMRiUBJZ9HoYHGwQ7XZcWSPcy1ijlXMMS43U/zl/GuC4xCgTa5LAc1YfZKEzQEHLpWPadVafbsHzdFOoP7H39b/l1wUY1jWXbpyfJvWJ2Xe5dY1ybe+7qrhYzND4UZskW5awv8pfw+TASdcyg5t/PUTzxhmiOZOqopnW8kV8hvuvW0nhu/SCRf0UcKsYEBFsooRBRswuXKeig6wbjcDMbX7O44hXKGYudeejLJAM7b7Ei9DDvGNiLcTcK20UAEnNqdKD4iLM40vcipYRAoilN2Wi8EdnOhYkk0cUkVpjre5/CtlOuZN9r1E0q3MTZl13GlxBXwqjKDfmoyzQXc7yCReu9tkuwtbJzUpSDjKwcHMb3FKcRHmZdzA2NSQJFaKTli51pUQWVRYf8A0c//xAAtEAEAAgIBAwIEBwEBAQEAAAABABEhMUFRYXGBkRAwofAgQFBgscHh0fFwgP/aAAgBAQABPyH9ZwoIaUw99QW1arH1/NVWWlouXxCMv9sbjS4Cxca7TtRA0dRjQVdP4/XALfAw74HScy4RfTO35KtRro29LZ9aqt50SnAfFBCY0nftPB5nXbYP/B+Rz84sHT4gFATvKLNbNhpucFgpo+88/Ae27fCT9CJmgmI662xLj1brpjiGyBYmn8tdXlrI+kN3ywd02QhWvkT+zvBsA08dzvLmSBub/wAPljtvpFtYmerKljTOpBvUZCTCPMRhkGwOjxXSAzrByQDVIfkjw/Idy/xkxO0ogVj2F6sI7nNfwSzijskLsPI+A4ZDS7yCOWuLoIfa/iUA519hPMSFxKUU7QgMOV0mJl/1eei7mjzQdy/zoFN5gfi9hLAtvpPHGFrz0g4fq2H0ik2GS/Sd8VgmT5pgs4KX0+CMU9Uof3PuD/E/lWmaT4/9wTKvrrNH4ATFm4cJ5HCHDMc5JL0T4MwtRxORgr/R+RL+q/lVQM6iFvOXrHHwrKi2i/iOVkGyyEyIUjzLAE2N7f7ThnUFuw/id3cYtHzhH4RtdpXOfG4O/WCqfxwgy0o1y1M0m26+8oJHA4iXvI1C0P4W6l7MEG74jVuLtWJelhkwJsiGKGuLkIRsrw6nBRzoyv8AEBO47SMPgYTgNfmlIA2qgg+5sVhbzMKuvYH+kGUIYwTvfpfwrS6CXMrQ0k9+sbg0DdYd+pCqtPrTJEe68H2r5hMBWrwS4bSdD4MFeEJAYd3/AJcYegseYmC1W7SXAinedY6NVlXVr9st7vq1uC3zEHGH0pcJyzUdoypbZcv/AKR2xc4WOHtn5Vq3bHiafhIt6xzI9XwLHGpppW9fNL+D1bMFtRKJOmjtKvk8rmGSvCe0jO/0K6r4BM1qAu6C+JfCceRm19zvb1mfREdrHLOgDMYl940F9sBbp/OJo0Dgku/r9JobaqIFhO/Pw1eeHowvwpOHxT5DT2ZK9dQIaHTo8kyTzE9Bxn5ju+YH28S8V9GPMcw5nglVFFY4gmgmg67CiFiVjmpVkq3ef/Y4XfEyJqx68QD95HqufgQRW652/K9LkplBUVMUZltJZKR+Aq8/y9/xv4HQilHrFMuQl67ROzlghWc7QJfqNL6ThxxF3fuTQTDKIN3pxS4/BnduAOwbKUpV0MYyDa8vEuItQCVm5m1gR57/ABriz+YLkB5ap1QJMGk2QCSx+Jjbe6bH9bfDr5+DAruQrmK7Zh8tCKoMsqYfav5g/wAHRzXeHbR6Edor7ika2ut0jPMu6ZhM3K6Ms5z7qvpAvw+00bmfCGDHzKu+H1/EHl/AswTv8vbD44s+hEIaaEYZb0RRT4DNzPO0t5MTsIGbIcQgS+kbGrKGVh7APKEare07s7kuuVBbTzP+oNERjb93CD4Cv8ySUVk6OnmPdrWdqCJZk+Nyx2cdfXr3+J8vGd7Zy/fWXbOW3mCatfKyWEDBZtlV5EeJYDu7N8xxRlUQKM0otIVruqjghJKRynqlLJZB7fxn5u3wjijjilxYs2Pl7oMuaSTcC7geL/pMSdmKi+kOhiCBwOL06y0PUWoqumD+4DZltWWczu/ByUZYjtmS9EcjXs3E0sZFuHwLjz8fl3406rIMn0NYzx+DdMmPonTtek/A+XubDef/AGvaPfJdqzJMqIluVhK6wCVz1jAxHKdIvMC/4PrB7wPpt8y5s9H+RHj4HHH8WBf2gD5Y2XKdWVluA1DxHIInMRkfwxuFa77gvJF1m8FkIbHfTMylkvK0xBGjvDKSsHUmS9gXiWtq7EaFGIsZlijKhNQcrE2zPJ4I7w4mdxneYLsX8u/g0TOSe47/AMzmK6Xx8UBVoJc2ZJod+00QGujye8PlfdUdy0E2HPb+4tdk0HLKIVWLxAAAlWrIFbLjKSqbjxUCwqtASpVrbKf60QHGb6aH/ZahqFfZFeDfy1mF7D9SKcMr/ohtMr428VOhTvH0nIGcQEvfXj/PzQ0JuZVfo9oGWOYAi7R9c/6CUcoKNgRrpduYbbwMBTptiJtdW/mA69+mWNMhFiwGVQTgY15amQA0EDLBD2Os2CH5o/hydJFvshA+dD8MYvhfVKvxlRAdqCj5OrHrP6UI1oOW5HWVpvmXM+VkmGL7lMbXqr/EfO4jGVDLIxMKC3tlp7SX2t9ZaHax/EoTMHXB62n6V8p18FD8uGofEfCMuCt0b+alDmk314S3+opvj7J0zwRC/wBhCOQO6ZjgA4lNinib6dCIgmeyvSYXbaTbtKU5WHSUbitr5GFbnUVXGT0u3qgXMcZ07m0lKlFkUd6PzT+JulY/UdoCbxesbJ0gbZWuY/q+TckroaSWzU8R9mMU+YDkqXN9gzdBdY4u8xBwO0OiHS5gUHoLlGL7zLVWKLyNB/L6RgLD6ATZo6Ba8G4bZNdH0cfMqp0XMPHEH4XFFBjFz6MYfMxPi8oeZipQjdp1kZoe8v8AsJ0T0g03oIgHokwMXIriyglSKas0o4fMr1E0CgTOV3mHfQh0MwVxHg95n29J9RmRP5sfpOEJoYgDZ0Uc5mgJ3PWfaBpAH6CgyhWQPGIeOmviJ8iqB7kDbepJuk8ghksbjbiYaYnb2nuQ/A+YPtdSMy9KHrtI+p2mXPzvul3TfbyvX5jErEuH/g+BcuHxE54U0jH1hr5hI2FJNkbbz5wLtXVJd9Sg3nu+UscAx6AjSBSc4WIG3ca95Rgsd4HG3SAGmXodwnD3HWsQ3+Vsewk1wJ/NJFlUuhF0u+iXWWwpV+v6N/aA18thRXvvpMiI8L7kT9bqyYdN/CqAeqj09FqPsf4y+8JdDzXzxZDetx227G4FxfxLMBfiOVg7ytgy7ZWQ18y9Yjwy8VBZK+hUf/hw/oKU/wBCFITkFiGLIu5WJVVkHskJnZdH++YJZ1BZtAD9KLVei/qRIYIFt1DYkyI2Py9zJJ62M8KlRfeFwfl/hqABQUfkKlRvNrUQugepHoHzaA8yE7OxU1kGK+etsJtPYmw9ubb2orv24r/xh1eP/ORu1usgqE7H6cyEVhGLoM432JnUlKUnp85QK7W2vZ4/KjMWFocH5LVXwrr9bFdVY9vD2nHYlMHosGyzXzSSl98j8m6xuCu+2zb8h2pt2nnpAA20ip8TwMjnz+ubYbCRMNlBtfebfDCYTufMc8EdYc/knBCjAzPf7/PNEtbmVdWcl8mNrG0G/P7AWFMseSZ1p/7Dj5fPXEQsDskmn8ih6Be+eu3ziDVYuouHjweRmtUp+w+zlRcQibtwdQ5hob/T5WMZPDkxS7Cx6/PDTQtWHZgscQHzK3cYb/xFrVCmbsueNUbfL+x3FN2OB7ot/wD/AEK+SzEUUjBDUy7KfOc2DTF9i+Sfy1SMY3sVMrY+sQEUGxrL+yqLGiyDzf8A0DmFrAs+QEByGqcwaLKfmKk0vYl9+yMdHz8rCCUuuU8y8lRk680F7Q0F3+ziyma6OB07SzRuEvs8/ITtzEcNj8vW4V4ZcZ5Hx8kW+MFqGrRAMvVO7qvL6/tF10iA04tJ4fjYTmQ4esphkfZPlGo2mOF8QemEAUH47XELyPaOoRx9wzgjCr5/aog6CvoTvEhdGbXT+IoC1/LPki0DT16S7Kir2uv4uNwTl/yVc3iM3niCemi2/wB3Hu60y/tdaLdfAD/wkQWE7y+4P7n2J/c+xP7n3B/c+4P7n3J/c+3P7n25/f4vLLLPPtz+/wAHnnn2p/c+1P7n25/c+xP7ghYhITXs/gABIJpP0bw8at7wywrEbE/FmUGmvvBEEbH9G+6dI8vMokdzTcKjhT0IXwveUFLTeNQ4KEOvF5hQ2uwpv+ogs85u44f2CAbHZjcYOFVYbiDRpVxviFhA2G+IW6g2u5gCtG4hEg6Bw5jEH1Cjcqhe6lEFUGNBohOHE0iRjRVqP0QrpIxWEqfRc+ifolHktCy+OsYNVsFz6P8ASo/IrIaSg9GsYu5mt+EKCqOshrmU+bYr+f8AlC4I49//AGf+i/7BHIJegXbDp1bTe1XS/EIEd2sL6uFBxiG+o7nRTYeyoZYViNifov2zpM3zGAxtmPSP0YQEbGep0EAivxAlVCP/AAvgX2BPsiHTE0GDrQ+ssBGEsQMkX/OizlnL/jl/5p94hRH2o9AfwpJ9D+S1vaHquPC7gFdSvQllVklF6QGF1LDV9yWYN0qy9unZAVFtBrqFyN3wCsEVJdJi467l/cMDTz4gKi2g11Fo7RWKG03Mq6XGHfRUOGNl/UGo0qpdd81rzLx+0o6I+QxihaAHur9JRKnPSQ/JshHmtes4qdyn8nuTtTLM3avotmgV4vrfV6qhtYxcBa698Uu6vojN9CjDqwtmnSUuSqof0xP4iCLJhB8ccM4Szhhab4J16MXwvvFfJ1Rd9ML6VCuwRWq9ofQX4cTH+ZPXf0ett7ppElrXqfIWW+JzzoNI6ROGIlI/8B/SuL7gaLC20+p432/RPu3SXUJZFbJtyl3FSZPkh39CNL26Wbh0G4V5TJ0Mv+Rlhh+OwfwMHsnwCtvkzrFjtzCtmMfVi3d97zOPmat3RvSkLE5ysIGhOgq3fgmfkSsXDvXMCySQGgelsrCBoToKt34JWmImgvcxX1y7E7olUxKk0l7PvmZymtK4f8QUFpxXo+QrhoBsqUT0JlQx0lypq+jp7TqosNDpaPs9Ij50eODgvDXeAgK1EP0eGWuWEhWpDfWVt0Prm2GYWlvciz0zw4Yo5lyqhZyG5uK0LChtXUe/aG3G9YAOCohzSxZuJDM0q/Dgm8NVk6RkSIFMeqDgv1VxsST61oGXUtBP5UwjkepxTNWOjeLhJF9yDkxZd4g3jUBvjldVzS+8waOWP4Su9p5EbLsBP+HGIFMmdivsw+5L0t0A8h+hfaukzMSofgBYjtLm33YPNYtVZ2Uw3HHaq4VHcXBMSoaJUHEwmJUsxa3LDWOoHEuRXEAAMTMKYlrAnOlRp6+jGbPDOifKp7F661X0iOdERZte8xcO2V1zfZuMC6wOj6ytGXm+ienaAlBHstu4spu6XjXhMgXWH+008QEeoQEoI9lt3K7f0zRqiXgoQMOnRDrjSz6xE+FJWniKR0BoJp+RQLO1qboL1SStTtGv4CIEAbYcIW9iqmsdMhNhFPm78NuyLVnw1z7i3jgujUs6mAm4PQbHV45uhzPeA8voSkEFkKheSjNTmbL6qHrpA6u9xg87pPcehdmMRDNGYNy3lHBpumqSLxJdRzfO8PBzDKEhUW71+PjGVc1B2l0dZ0oqC9qXpLtos3dSsKLGPh+04Tk7OyGDH0CPg3idHDlE78vT2mTb2CFy6PUfWJ7jKAPRefQ6TUyWrlO6Xx3+kH9PCnVb5l+ZD0AafqfnzYdf4pXP5S9eYQlYBwyyVL5hBxgWNVUHE8iUcEKNDXOE0gKsYovJJvDjnmCyuOcF7iQeQ+UoC+Ucpp0zka69QFqvr1FCM+CUlqmoBYr6tS2lzwdSgKndjFRTNsTFX84KTxLQJ3Va8EJ/Dhql04Xp9Z9r/U+1/qfa/wBT7X+p9r/U+1/qfa/1Ptf6n2v9T7X+p9r/AFPtf6n2v9T7X+p9r/U+1/qfa/1Ptf6n2v8AU+1/qfa/1Ptf6n2v9T7X+p9r/U+1/qc9n1srJ8CT8ULSmLK01MG3RDpofw/uJGQjBiDNovQuHIKqHcJE2GkucXqxLvaUmdKy204lkW88Y6Hdbd3Deu9BltJaItbBez+D1YXjmyMKr4DGv4nDOwrYxXGABxK+eQDn1TgWVly0xV6cjto2323ysFgayMcyc3TNOVvV0ETs/WQw3rEKq+rxVA66u+j1AOQsh14iFs7wTbNSvJaIDTanTxCns893+0ZC60W32PpcoSh/IVKO4oppUa+84V3Ha472EDR3cX4HGKypjOvr24cw2TTKtq5X3/P/AHzpHl5n1GOw04Yg1dyVRk6czDuDbW5UttSJm0gFQoKH/sc6MVViWA+iFLuXisQD0WqxCoqlVWOJReLVV4gxjtVUBizWCNbNZR5gq5prPMDHU6zDQ0WpK74VYRMbpV8zJWspbzBOn1+ALn0n4aBSxQbM/wBfnb3sDsZDat7T0mNYXMFOXfrKGwVp+RLUddHbiMT2VyOFxGKpvWeXEfUCwoqgOMPUKbI+H96EPabqAyeaVb5VeaPMuZZdBUHllbv0ankl7RM9Y5O6DrN0V0olDLYo8DRjEJmPRuM5dfBlXNhUPt1H1vZcKNmXPNMPOMQXiY5tbPF8jbpq8brobfYP7lDsWLMAYZxd7z0qUdpsSHpZaa41L9RPptI5Z3kXXpzUp61Lavuu74msHEfeAv8A9c6x4ZhH1J0qqziJkrHIeKtVG62eDIv6fn/tnSZnmD1vQZQvXmBVYYHNNBPKec855y/WX6wUpmVeCCjXzR9kuIFSPfHvj3x7/iYUIr/Con0f8+T2FEhfyP8AsCZvOVXqu3jModYsKN0b8MDNdABKifUQSrHVXvK4HE26CUKuMjqE9sNW8cTxhOMZgkTDO08aPTDx0PohEXHHvE6taV2DOyzfmYvU0PS5a6K1Fi0akznQijTJk+WbreUhBbaOBo15GSyhtS+2mAxbMUjVDEUo4ArwHDGAxjFwVHbDbh4sczl0B6dblwe5a+x6d5dMGNxbNGHThxCn2Hodu3uVjjla0tJe276L70HS4m2KU5V27HYxLsX7YHmEEoKgNH5/7l0nEMCkg6wtclzmGS6+SnP3A4ZeGE2LfXC40jLLL8Zgw+KD2yeSZ/n+SXNNQQUXo2rvFYYyErl/07zXhlSeLUPNTL19vb8mLEeBgtU8Y/y8w4wQwR9F3dcxaWtdcXCPeB05hMIpHtquPWZz5HxPsY/zzLk3NpunVN1psotVFyLFsnbi1zutHENhBaJT6o9zsUREYcE1HAioOlzh3mtnMqWOtuvo8Q5g4mFU250O1VLitpc3Cm7d7xfEa1C9XqM+rFbzWTVvVxxfLpVBnjmBT8V1d2/T9D+1dJulUkNgHeb1KzMPaPZZUT4ZjucO/VwtL5tFQlSCDBZgN2kFNo6HWDlwnBBMZbFRLtHuOcQ/TIazSXeOfw/oFAFATv8AEQ8NDA8DJFPskuPqmZHUwF9SrPeLTqqoV3ZBXTpi9VuQ5SSlgObaTjklfmLEmbWuvCdb4lx4KsBtq9C8K9ZxUsoR4F95g+8DthcO9MnrcaquNNIei1fs+YtDomx83Cu/i27nPJzyNzvzg7xOKfRMNNTWeZomwMOyjR/6WZgzMBQfobKZsL6S82H0gVlRhte8oYf/AFyqHOvc7j3nde8+kK5WBbUrQ9NwOntrnhtrnht7gFJ6bgNhFCLylK2NtwBbO24uhbcatV5hyoe25lotsOIrgEOMhK28RC+f0RUibGxjjZOMnaEYvkgUe3sAHWxl3na+13na+13na+13na+13iNq4Rx/MOkGLRMC1bkvFS1B6cXi3IGP0Y2ztKdJXSpXki8J6SnSU6SnSU6SkV6SkU6SkU6SkU6SnSVivSUisU6QHiBNEFGU6TsMftWnwVKlfgH2rWKmlJlR80ACpUr8+B9We9EQBo7aJ4fwGWBQezHxTBJXxRcZiJtSdOp+DqHoYrR+I/B2VAQrRQGk+IJyDWr6ufwAYUAOaLiMautn6LchZv31YPMLdcFsYyntHKWkOFyf+o3fqFps2MbqvS9c12vEz0PdumASo7bG7x7e8tSqmYeim4Ki5NZ7ktgI5t0x3/RhL7tUZr2+stHkGsvQvpcfDFRDFyey93xGH3w3dQPQRuOCajaZfagqVbfKWTYa9jNu68+kFOu3UzVFHv8ADCpUayyrBBfhhFC2qNFfWPToU4X/ABc3/wBhoTPArQvvN/haxiX33KR+D0IiP2Q/q1L6HgEaepxKLJb9AbwNYhDkIB5QG/SW/ubmnamvEDCVa8D3S/r7vO3LtidTL/oWI1EqoN9Qx6kdSFEZIeS7l7ci1kugrbAiw6cm00XEdiEGqz1Ycz6L+iOCJRhw+o2QJ9Z1hlr0D3hBVZKM3GLai4i868QhWVBr/hrh/wCv0RydXBnLPY/iExDtUxpXjUsqkvHJvHmEAKCgPH6MyoxRpNOM3qWZoS5erK4NAgTD1el1B5ZFZ5HtKuBEtHa5lMEe3S+IbEkmwtka6xuJaHF7GOvmH+HqtqUVOiMdRXeMNXqBi5Uss3nGbZq/K89wxCc2BtvxrNxL7eJLVaL2Ub3bDZjXN6ID0FiaToPSP+jCzyPaOTgolGgnMF4t4QeXbN7ZnQpAByx1RGXXQNEq5RmRPB7Rs2q4zS8njEcdQ5Hl4O/eCTzScyuav6TSW4qDRGsyqbELaeqvFXCl6Kiy01zuLUWbW/0VwsZT2kM07SOiJhotAX0GCiSmyYd53KAPXrogqt3Y/hzqAgCikeYjZFbfYwZjYimaVrULGAUfqitRRzjH7scz8Eulv0ikX9BTKoatumUhlstHpN1EqxAAVmgpcuEGLvP8lLAi2Hv6fqv2vb+qFw4imgxNI9sXavEN4mO9NsesQnEgC9+et69JbK7w0UDKYuAGHcHvKuFptODHs9ZlwuFlaP1X7Xt/VL3LbAslPmKBY6bOd1WtzmZsHBRjiJVat2vqqc7kNK8VqFxSrrBTec46zjgih5WzF6gIZFDd77v6r9r2/vBa61PQCN4r+KmnQ+qBf29dgICBnaEv1fkY0aNGjRo0aNGjRo0aNGtNHZbbdfkkaNGjRo0aNGjRo0aNGjRhI4LCHTfqf2js7NLyzURQWQtljYcVWdvweK+hLdD4PRBDgz+E5z05LR8MYoQtX4+UoFuCaq2LY+v7Y8bDQYUe+/rfpNxx3Y2+l16wc9030P6Y995BkTUpjlsp6nkSFwEZ0S6IYY2xBXPqMqHA120PeVVuFJOPrDLzxOyJSybGS1ZepOQpZk0dfgS0Y7Tf6YfQhaRUtXgy8wv91TaGeGwlz+tHIsDtBCBXud9t41MumD3gLxFXcX85KslQ3jz7A6S6Ecpwm2LbGXP48Zo3WIvaFmEW6dNwni+3PFb7zBBOvwwaI5Vk2f4UTP8AligOj+1fGwQKMLbOPS46KsdC+H0RFe2JUS6erOhDknOnDGyqslyLyqXmsVAUwMiEnD7aIbV5uLORjNG16EQB+cG2pi8sUj2ULGYyR6+wm7eQgCjJCDobfMEPRG1hoitkAbOo5VVy+lFcyIOuhfWDp5zM8dzFBh3WQRl6p1m7xJo2G/8AsQfWRp5Q7y3juknvtxHfVNw6EX7QXPgSFMAyoqZw14t3y3ntLtwrY14r1TmRawlh51PGAuSDrD9FssGVPMvzDGA1s6/trxsty6KHIcm5QC5X4tp6/IxkA0wtIyu2Kz2QNxV6oEs2lmIsHqW1eV+L6dVIC91iyVsTX7cPGzclmde0M8RwZs7PSoSS8Qe2Pj1owM3qi2BvjVl94gFWOR/BXmbGFKJXQUj+vwkxbw6PP4y1rsOnX9seNjg2hV2y/qApF0KFpoZZSj9AXUWcXYzvQXWHadMrgVZ9mVfnJ1CHPMrRObU8BA5LrYX3dS9+kXWNExzevEaqRdHQAxhc5WmNqvnLPvHSd1StTV3iv95APc5gwVMJhzfBuMPbcdirZVTrtuatWp5r0gK8nVRdfWAjyvuDq5zEqkVz3zCgCPmgVpcsb5xLl2BcpQ5ZP+sMuKIO2cuZY3dfMWrviLHoum7f2x42ADePOUuvWqg+TRgmzreYM5IHkZX1HHN0LRMEqotsH3uNEERKoaA4Cj+0tshMpZu5gCVeNGW75tgInrMBdbMTcGai5oiLnzzKCsmPQPu9JlirLTnZmC0jV94jldIrqVtjZt+Rg2LS4qtQejylipOwRtZ0Vgujh4gGUKCztxtlRBDKeQdMM5GCNTZeJdyQYfSjiPxAUkjhtFHmUArpL0xkGu2mUa3nmXo03y1ur7XXp+4PGzindX7s8bBjkprkPt9ZXK9hO+2fpKK23gGV/fWBvWurtOZbMSMZwqLoNDRYHcsgyLp4hd1TLb4YN5gowtkzrTM0nUorxVSiqUOaI3qz5A2NVW7sli8xVdQvcq6D6iIifebxGp/EwfV6dFM/uHxsq8t6ezJ9QlDFsLyFv3iPrB0ar0K9ompITCyVmzvFDHpQFXNsSI4lwFoOvvpAqLgGfyWe0LEE8GtFOM+0M1ktY3i3XUeZ9DccS7s1eoCPO1hwHi7zKTQafKFHiWFXuBctukUjzGW1Rzz9srDiq7Zg7tn6Kp9P3F42OXCgwvXEVC1ni+7zC4cUQh0yTEMfyYnM9i0eKukFedFQHQgYERQwdkcI7aHb2naX/WmI83DVGjpPbgGHSp3/AK0mkhx0esBQRtVLWWDbBApy6AV+1/GwBybjcGXA/kUqVKlSpUqVKlSpUqVKlwlZY7dWa3+SSpUqVKlSpUqVKlSpUqVLSwzaoLVAdj7ft77Xv/eH2vf+7zXHa6A7s2KwwDEPdn93laCo5Indaxz/AKD6/wD1wDgrpcvpKL3A1tu77rFFVp3Cx6sC0Sl5S/ap5hdz4zBhaCcoRgeHISgb2nOO8yANoKRwMolXjgZmLAJY/uIQhYaZB9P8pqaTaKG/RdHqz3ul9BiMoM045d6PCrDGCLpdSh1xvPOCiFFK0VA7ZerEOhTjWnA8rebzGmtVJueoHDjmsVkFggNFer++TzOQRQ+86n0XNykXFHQ25W6PN5IY9TPWSxfy6z4li3RyOr/LMa7XUO3rXPj2qAsRwVfqcPZ5xn9wXBjHRZM/7pw6vpc/1HxKWruYDY60fTM+ugSxkx3agpoivbJ27HuzByzXZqqhnSYWBqUm7OFL93NVoK9s16OAus7rrqLqY4TABHDXorvVm2gmxV0ONFmGG6GJQOq9xp/oDJuZ+8wLPp9JpWoPBJSq5FsEXHQ4b1/K2Aj2R1KL9wkzARUcFCzrHC5mS5cO+LN58p6y5XWA73U9fMzpW1LwPQ+x/MzyPy+TqeP29vbS9I8xL+tLTLfpBxp84dg5rv21plusD7ql32oYxioqjGti5KeB6tFcd7mBchbjt5Ds8bTkrQLIU80NV2Z9Yf41fXRWKrlesBnjKlQi75alqHF2PoDR0LnrEoHKZnk8Kb0V8c3BkwsNv6sdDWa3ZCCWraG3YbFWTAyrWwg3rd7WC+7t82zdM3VNO2zicCLrYk2/tqH/AMdoqHzmR5MO+a9yLZRCu3gUYw0wLr5RUP5fY9YUTTVtUatzib/GxOiNP7bJxFFI8zY1CnC/c3X17yp63Qr5Oui8nGbw5JmGymtp9dxqpuv8afLIoXxfoCAh3JFTlgremrGtx1GBsvN4P+doVsweIGGs7jc+ZUXw46TUATKOaHUJR2HbINoK9UsDBRdrw85AxZLdNl0SFNWznfJXWGi4O0gU5xhblLyt4b9JdmmKTpl2cTCU5RQS6bd9u2qS7llU/Kt+sAme6r9nRhoxfJgPpkvmNmRNB2aW2nViy3W9GTzjPYYfURZgZZSwvdA2oqM3+294q32sRz2j7y8pYDsueAeGl6M73mOpnPqure7W5cBvueY+XZsDbh6qMSuVrS1vSW5M619EADbfzAOPRDiMSW52Wx3V26Q/gNC4FhWc2sXb3ic1LVtt0WULuqo3UCB5dgNXrDe3eiLtEbeRufODrleRsj68WavZPDeuYVEqbyjwKM9usx0dxKN2D6t1OG9gSe4fdYldGsNtdgcxCM7A37WOPGWqZUjIHW7sXa9jzvQGuZKo6MiOaHDncRD7tuWcVfRVQEAGsGxbF+lfttm6C1m3NZ700/u4CSlwJ6vZRnPXOMA+j0kA7C6+ri5obN8cFX7qukEhHIZ3Co9bOvWIFcqavezbw5rXIGLpziaFp4OK81K0VCWvC37HH/suq7YFXkvrb1ZV3ZmCO2nTQ455ydM6js2nqjvxDk6rRLEq3Txo2CzYiZrMuXUU1laBns35dMtL3/nFXnJ3lXVtSz0HttBrcXG35F/po9miU2mvHa6ZL3/m64xbaTzJr0z3JQv2IIAppTX7c2sTwM0iUVatOtX6x9q3gPYbr0l73T9OzfuRQvYg/IHH1iiHGwn0KHj2hg+obwfeNPFlOaL1KXgp1ziG2U0IgAvJjnbBdr2pA1VXFuUo8qRXy+AbcBt4PVgNFLNd+49BO0J2DQueWO5TWRwtB4FtwKTFZ7GIbkjSy1dii886l+Cmh05XoHLC5q8bB/wbe29CDPriv+eDH7kb942l3zW/WPq/UpPdQ9UWNXRw0N6qe1asZmEnAgbK25O+p2ZYYlUjsOM8Xn6Q0keG6XlyZTrcHIeIPwzd/wDSPRJmwwmza1LxjF5+kHtHLabuyVV2cEd+MgoJquFcevX90PPdpn6gwqL4Pr5/CK0aBYkv0BgFdLODx/8AUDGlNszAeMT8O/8AvrO7kkj5eUmVJhke0GQZhWp5mX28VdX1mdYUbvjr8Ee7BTaq951i7mnnpL3dctiOYiF8bL46w9FiDm2rgippXRHWNJj0aG3xMFibdiZy2cC3UN0SimA3NCMGYgtT7ye3JfNc3g26Xw9oKM7k+CXXFWVJF8Mhyo/bH37rMEpgLrZ/N+0srcvVl+qEAKxWnQviDWU8TGa7aiwLs4N18XKxhHArWfg+p5WQQldMsu9RgKN9XFS6LQq/sdZSG2wX2OpqO6A48hkxbyAmzivOZXb7H2KpONfSUlkxsahpun+NKASqhfS9ZSrDRRpxLbtSxTgZ43FBZatvH+LlZNRQFo6cXUJjUQLUvHvczICbLqbw9cT7/wBP7YrAiCxhrUtMJFcvsJzP2RgpxzKtK6Wo72zE44eOx3ncwVE5DCqzvEpkOeZ6sq+G0OZNvbmLrJbF8waiQ+IMFa1K19lWHgLRAnloWV0acwRgBKCmq6a4lQs4GulrLS2UarduVcsdW9FYZGzUzhc7Hclpt9thbxqDA/lW6cyyumGkNeLNBozeu2oHXagvUmf8ck36t3K3zHd0GD/8Of/aAAwDAQACAAMAAAAQkkkkkkkkkkkkkkkkkkkkkkkkkkkkgEkkkkkkkkkkkkkgkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkgkgkkkkMkkkkkgkAEEkkkkkkkkkkkkkkAAkkkkkkkkkkkkkAkkkkkkEgAEgkkkgjMkgkEAAEEgkkkkkkkkkkkkgAAgEkkkkkkkATp83kkkkkkkRsEAkEkkgALkEAgkgkggkkkkkkkkkkkkgCvAAkkkkkkgthpKSAEkkkkkmMAEkkkkkAFgkgggggAkkkkkkkkkkkkkkiUgEAkkkkkkWSVG0gkkkkgEEgkEkkkkgEAEEEEAdt/MAkkkkkkkkkkkkIQ1EEkkkkklzIg1QkkkkkkAkEkkkkkkjchvuLbUpo0/wDJJJJJJJJJJJIJuFSAJJJJJIc/afxJJJJJJIBIABJJJJJ/5SBdNTIHFR/5JJJJJJJJJJIAKguwJJJJJGd+ianJJJJJIJBABJJJJJJBPBhZ10Zsqn7JJJJJJJJJJJABJAg4JJJJIHLDtnjJJJJJBAABJJJJJJJJCvPWvQ12vdJJJJJJJJJJJJIAICpBJJJJAEh7s4IZJJJJBIIIBBJJJJJJfxR9QgH92ZJJJJJJJJJJJJIAAB5BJJJCaSTx1dAJJJJJABJAAIJJJJJJR1NfH5kHfRJJJJJJJJJJJJIAABIBJJJJLaSTSiBJJJJJAJJBIIJJJJJIqrt22ItJMJJJJJJJJJJJJJJIBJJJpJJJJITYfpJJJJJJJIJAJIJJJJJJJH7zm3AJJJJJJJJJJJJJJJJJJJJIg5JJJJJDBJJJJJJJJJIBIJJJJJJJIIAAIBJJJJJJJJJJJJJJJJJJJJJJFbJJJJJJNJJJJJJJJJJZJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJNhJJJJJIpJJJJJJJJBZJJJJJJJCZJJJJJJJJJJJJJJJJJJJJJJJJJJJJJVZJJJJJJJJJJJJJINJJJJJJJeZJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJIhJJJJJJJIpJJJJJIb5JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJFrJJJJK5JJJJJJDZJJJJJH5JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJF5JJJG5JJJJJJlJJJJJh5JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIi5JJIhJJJJJFZJJJIwpJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJdJJJNJJJJJPJJJJyZJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJA5JJLJJJJIPJJJWpJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ5JJ5JJJNJJIWJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIpJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBAABAJAABBIBBBJJJJJJJJJJJJJJJJJJJAJJJBJJJJJJJJJJJJJJJJJJB333vjy3eC6l7/AOSSSSSSSSSSSSSSSSSSQCQAQCSSSSSSSSSSSSSSSSSQN/8AttNtdJppv7bEkkkgEggggAEkkkjcgkgAgAEkkkkkkkkkkkkkkkkkkn/bJJpt9JJJrdPkkkkgkkgAkkAkkkn0kkggAAEkEkkkkkkkkkkkkkkkkFdv/wC2/wB9v/8A7pbEkkkkkAkEgkgkkkm0EkgAkEkkklkkkkkkkkkkkkkkkn/NppdZtx3uxXfEgAAAAAAAAAAAAAlUkkEgEEAEkKkkkkkkkkkkkkkkkDb7emb7fcHaTzfkEkkkkkkkkkkkkkmkEgEgAkkkAfkkkkkkkkkkkkkkkDf7NtNpZttNPe/kkkkkkkkkkkkkkkjUkkkkkkggA/kkkkkkkkkkkkkkkn//ACSSSfaSaT2b5JJJJJJJJJJJJJJIPJJAJJJJJJJJJJJJJJJJJJJJJJB/XybSbTT6TTXaxJJJJJJJJJJJJJJJpJABAJBIJJJJJJJJJJJJJJJJJIBzWaSUUEE4p4Ia4JJJJJJJJJJJJJJJJJJAAAJJJJJJJJJJJJJJJJJJJJBBAJJABJIIBAIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIBBABIAAAAABJAJJJJJJJJJJJJJJBJJJIJJJJIBIJIJJJJJJJJJJJJJJJJJJIAJIJBJJJJJJJJJJJJJJJJJJBIAAABJBABAIIJJJJJJJJJJJJJJJJJJJIIJIBJJJJJJJJJJJJJJJJJJJJAJAAJBIAJAIAJJJJJJJJJJJJJJJJJJJIIBJIBJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJAABAJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJAAJBJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJABABJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIAJIJJJJJJJJAJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIJBBIJJJBBBBJIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIBBJAAIJBBAJBBBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBBJJJIJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIAJJBIJBJAJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIIBJIBABBJBIAJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJABJIAIIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIIBJJABBBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJABJAJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJAIAAIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJI5JJIBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBWIAJIBBBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJCvBJBBJBBJKZJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJFIIIJJIJIJJ5JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJAuBJJJJJJJLpJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJGZIBBJBAIALJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJDmBAAIJIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBJBJAJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJIJBBAAIAIIABBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBBAJJBJBBIJAJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJBAAJBIJBBABJBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJP/EAC8RAQACAQIEBAUEAgMAAAAAAAEAESExQRAwQFFQYGGhIHGBkeGxwdHwcICQsPH/2gAIAQMBAT8Q/wBbblyyWSzoaFs1IdVmvIINlEpg+jSXe+m50ZmWF0TBZhJaA95ecQBDCXAroxTTyBmea1Ypd36cDn1fWHkx0Tm7QRiHul4XMy81AEXK3Ibz4lcuDGIDTLrbyYGn8RwdJxqjI6n93gdbHoaNl6N49J0gqWIoKjeQGOsorBpO74HfFa4LJfCyWS5cuLL4kZeLZdV2649g+/pETgTjSXK1/mGEsef2CwNmSY9pDWzX9oQRZGBKCJACEFHgpnEZdbJUqVwqVK7ulhonG48AjLVW0vrqFOHv+YlYeL7x9v7vBEs5tzDt3T0xvMkSGZZhiXqvBrqa/DcshiDKjK+A4VKlSutqJZ3O/r8/14oQWs1JQ5rqBiLkTpiu8dS2svtUQ+Uag2+kbVYZo1K88G141KlRhwJRHgcTwLVD5n7/AMwzK1OXt+ebUSY4Jt9T94iM/sh2IpQ1t9EftA1u0+Rgv6RBPBnThcvguXKj8DxrgeB6vHt9ukQdZrcILv8ABmBKlEqVxeBYR41K8RvxBJUqVKlcKhiVwC5XhdLyzzL8TqVKleHaprEWOCOTm35iS8MFOyMobc4abo9LX1eiGIPjagtSOhu5wC+krZyuvQOUy9iYxxDaFeOICmAVjSAXzAEO/RsO7t6c9hYXLM4+kANPIFWDMoUK7evLJlPUB0KJWjnDUOY3lo9IFFHkMBQgthZDeUPQINlnPu7sAKOY7uexExOITQ8j1Qxa108lLwwCHnkK35apdXLG8TtKNfJbVpf6IN55C01DnXnVfY5W45jNYHpAFHk7D3xTkp5FtgdOYiJ+XJRrV7QvaXCKHlJaoO/8/Gh1g5HKAo7QAKPjWi5e0DtK38qhbfpDTPxIg0f15QCj4kMmYrRQhmnm4A2Gf8ndjhtWZa7vg5XcbN3HK7jZu42buJBZVxwq44VcCioLKmirgUVf+E6Gi2H5v2o1Y/8AkCqyr83qBgERXUOkAtA2Ku/nneZEU/uYfCazrwddZvrHXWOus31jHSbax01hpGOmsNPA0lux7K2vSYIfMae/2hAPBrJaCipW8M8ELdRC3UQtxC2yi2kS4jBiEqUqqgAURBKZSqrEAFeB0Fs2+ksbE6Sx7b6pXodqjAq9S+9NX5aqbVMHpekQXJetKXAFGnlo0JvT5SroD5vq/H6LvhRrKLuUXcou5Rd8EvWUqpQlTSIOsoqoFY/7Q31f4++nm82BFO+qvT76Qx5uVFYhb9vzBvPRWagZrF3Uo6V5bS8Mp6Hbfo8FQdsb9v6ww0Dy3e26Li1e9/zDTPRIGDJu/b6VpAP7K29vLa0WywbXv9vz0aFtPLgBGKW1Xf8AH5gUUf8AOh//xAAvEQEAAgIBAQcEAQQCAwAAAAABABEhMUFREDBAUGBhcYGRofDBIHCx0eHxgJCw/9oACAECAQE/EP8AxsplMt0ntT2J7Uqo9+tdinoFFQQrwgYgqtEGwFOYXqIisukOTHbZKGIHYHwCaqr8+gSsMQijXaOd+C0CJuLr/T3mVkqF0VFBVvSMbKviGASoPMSDcwm/iV6PvM2Zlj0RCW5XjGqdtwCJY9tTd8MRFk8DT8GuZWO+H0hAMK7jtsywRm4rl8lsRNm1B9GDcfvPb/eJcfvE+MSl0WX4idUPf/Ua3r2xMNRXMs3G2pcTvfwFy9+7xzvnfiCCztKw0iN2d/1DwTCrgcnENivCfeMQcE0Co0tvpLIaiuPkjiBhxnst+3UorLMn0JYJYj20Y2z2DPAinzHmKvGV8XudRkM+Osf9D/ib7SPG/MRGnvachQcIEY4rWNJdzgR8lqk3cb7mNAUiMRoViG4BOcMIoqbDEHC7hHR1nTyxXaBD4lH2xSvq31lwG0uX43fscPaAt1NDT3ohTmIhbeQHPBDs47qLpbXvBLg5uzSPJHyS9fE5GQalTYH7RXZl3HspEKgmSSlbCMUq6o/IuWHDAgQxsY69r+ssDbjsDyDWnw/x2WP+13x2Kc5BZHk6hxg/WCjaZb5MiyDIKyJqJ6mK5/EYRmAYuAkmh1LhBOYq0TN4pZDmZI0alA8jtDDwmSaJgAdq/I1IdIMKJLlCrzU3g/M6azpQzHyjAI9m8XEOwn30Oq9/eZgBR7R+1A7YAAPT4BTFrU64JfhOPOijY7uDq/xFeIzLz21Zx7OKrtvyfgceCryxBmwOzFBR2V5a90PHEGxs4iKp72vUQo2QArk55YOBh75KTGz3PC1Ysa8EOCh87SHTwxtlrjma71wFscj4SwUUNHgPr6GPhH5jdt+eI7KYqx0vMWo94iDX7Xgz2YNvV78rDXuysLV6/wDEUgLr0Avc4lrC+pyd2g7iF3sOfj28Dyg5rof775tRiGGzqdfQiJXPoNi2mPtB4lc6e6uNpdfMRVPf0vGO2W94PfHUzlCo1bfofIXx/EGwv/D57kVWRN7+WyqDR07sCTUGYL11+JZKvHoq5cCvfpiI09xY+4fwx7wzDjS56vdcVjrLFn7tfaOmx9HPRx7cQmZB13Byab/33hvZy+x0+e5D4dTqNXO6ilu/SVlrHD/H9dpbcQAbz3SA9Ooitc/10AtQqv6mj6EtqvSrqP1NkAKDf9RluNPt3Jhl4fQ6f1DMGDmAq292vtGW59XKBOD+5pACwPvPb/aEAd8pbcrpx2KZldMRSLPjEWiz4+/79ItyPj9/cRUWHtEuMKGotFnzFORfWIALcQUNS25fXFxSxv3/ALJ4FDi7z8Bf3jMvI6TT5vXnFCgOGeS1v8wKd5W+709vNjtuXLly5cuXLly5cuXLly5cvyQGSuhdY1euowr6g4rZ7+EHIShflNsr7GzdxV3cdlxs3ccruJBZupbC46LgUQWbqNirgUb8jAk40n+IVBoa/lfnwgGBxKFeSkpBY+J+hlRa66zhc9itpmL2mYuGZP3+WJjM/v8AuN4MxB3AaFxe0xF7TEAUahtCyL2mJQBjyOz9rK0PNVnWr+srR0t39Oa/deEAWjsz6abAsdaofrp/zE67H0+xqvjwiGBWDyKvOB4XV2HW917lF+1QOorv2LwP5xxfmx23Lly5cuXLly5cuXLly5cuXL8jFNebXLly5cuXLlz5CXFG2WPVFGxmKNu4o9UadwApmDDEUMMQoKIAUxQtqABRL7F//UKRLrPSy/td/j1eTmhhGzld3nrrd/j3qLbfq5yG2FQHdXfPxWvrEpp8EOQipfpsVWS5+Zmr61q/x7eDSsDUCij037VvPxA5PU/oVWpS2teCoHlL0v02FUSxpb4Yv5v+K95rwVenHDcZlDXe6xV/N6+n+4lW/wDvQ//EAC0QAQEAAgICAQIFBAMBAQEAAAERACExQVFhcYGREDChsfAgQGDBUNHh8XCA/9oACAEBAAE/EP8AmUI+kx0On7td5wmnL4QY/wB0sMmBDQEC4yT1oETteh9MCuYNDkg7HmYOmy6JcnCZaJdE7cNmu3wnKOk7P+cpE5JJVIIaAVkynZ3L0k7cOnm9f2SXmxJ+gj64iFLNLpcmr1MDO3uK0zkTkxyElJCOOIuYOK/J9i+LT5go978fkLt+A/mFFRnb3fxUsGkFHAECAzjgTc8NMEhZCID6J/hX8GFENKEu9oFEDkq8H91cplMplMplMphV4rMnY0a8d4WKNj3gVrzZO3AzUXEEoj2P9sIydIIiizrzlSl+obTzTcsrMp9wPOn1Ohk7PENrpOg7EyBJxIlu42htOw5d/lyxbcDlzhAfu4q5b658n74chfS4zws0+TAFVMF5cOg8I4tmSge7F9ThPBwdTD9GKJiMAXczvSz3KiHB+Rz4YMv9IZ7oAHtc0BAGo+5+mM6zD9umVEM4mH3wQidyfvjJYelMHqej6DqEPq5v/rYC8do422vZkUXpVmD7uH5RynHydZy2otOwCbI8zpTHM0Fy7f8AKX64IqjhKiftE+HDOAGFQno/X+5ef6NOThV+i24NKbOdfg2d50AqUM125q6tVD4OV6QxvVAB/CtPxhT+NLTFeSDePeqDgQojafmhlaiJ5QM+uJp9iX7pkqNOA792bI/ph+yxSjPxn6GVwL2Li4c9z/RvHn+SHLhgioKfuEfo4zpVBGLBHdH6TGOAgYHxetAtk0WRB5SUBR/XBbWqkUH56p8P5QhxU/3PrAJtSr7cUVv0zkwbvsJXx+AITnhxP3g84R8DipWAUDpHGDFt7BPfM0vnI0iRiBqDdomD+CcSoaeav9aiwcuD/RPf1teMM3/HXyyfb24wiG68H0YEK6y3Rp8pgHpA5orj54rcdE18nDisHtXfoOvpjZHEth8uQxUUK6sPBzlIAbv9jHfYKlIEXzWZJIAO1ye5x4ZhYKLct6fDkOoGcIGPlwcrw6NEfSOceEBDQHwB/cvOBM+iA9r++WZ+EA4Av/VwoEIe7wGHuL46ynJaPN+h/vILNSCPhrrieOOJMbKnIh9nK5wAPLxAvCPIuzG0GMAeT6Jv5MlnvnMEp4Tn5Ma0K3Kc+Bf5hNR3oAVXLdFX/QPoTAVMQEs4vm0+mGU0EymheeC+hm4gtm293gAW4quZfI0p5mCfFRgQUHW+MQmERdWt+AF9s4cMVtct8YgTlYpbcPj9zMLoyXaIn2c2mLvAqKdMH2xqPjIKFBqqhH3+SoCrAw2B130eP+8WvrX4oMoM2X8EYg7OucoObe+n+/wKaL+avu7bUnV/Je2Dg5cMCZysAZ4wH+89oQnqenvlyE6+4sgZXJKDp2A/Uw0IjiAu58Zqv2uFAuhzvrEznn46H1zpQQKfTEVJOl/SDPrfRxyiKG+5T9gDHKvjOZzgt1h02B1tee8AfCKMbB7y5cv9w85Ir5KDGzvof9sDwaDb9eskmZHkvq/gp0mz58xkAR1GdBXvw98cm/w0u8V2DmUIw62U1i3rr54PkET6YGwtmvXQ2B2F9/mJeV7DbZ9dfViIedKc+1bM7JIeA7crUBpgnqfbIcHEG74+2NRHT0vjHsKT0lNfbA5pHJEB9dMFvej/AHjUDNnnofXebxhrop2fuH0/AlM+Jo19sPyoKxEH26/3mpMYNgcOGjvYYCHR8Z7jAO8+rlLcsPMffX5a/SwfwMXN0AO5bgDvKrC3VPR/n2yqczKSuqqdYbVRqQH3ch3PCi+xP3znnCDe7NvfzgAhBFf+7KIJxyOHdl19G/2wnM5zY4NcEwA3XLfhozoPW39o7yYS8yerm6NRlfNHPq5XMATeYIUdYRSuI66E8zCu8K7z55ZHr+3ecgMQ7HNT7x6X5wVzUMTwnk/XChConf4qCrg0/wDVjJpp0h0+H358zAJEET+fTGke8AOQ3yRzh+WIQSjwB3lKCvb8D+Nj2GFs1t3y08ZdntWGgoCJffWFQaQJuhVfaYvAnaI/UytCikSTlF4ydjpKI3CfWC2IOWjRjlF6Icnpxmm3v7z9EX64AAABAOvzIf8A+Qf9ZAM49405xNbypg+8V+G+iF/X8t/p/ti1g38JIXw7X6YueMcDtPb3vFi92kD9nJ0Ic0foYcieyT7SY1sr+DWMVJ7NZXSrab9c5csvDaY0sIv6rj7ZSzsPN89fphM9whv3x54Y0csANO8EKaReEgPssUxtXBqJ+IDCqev7d5/DWsdNT8uhNP0dcGQKsMAxE6TswkohRO/xQAIxGjQX576wsA8UmsJPnnx8J+PD8s7MJnJF+1r6MMmiJgHt/wBH1y5FAHA8OAg6QaDgfW3HCEADbLP1cCSexeW8vmOhl+oYBnwgIcvKeuMMRSQcG3foFyYHz13T0vozZsPqL2+oPq/NG3w/65xZwZxZqywYafiCoe/y3+n+2QwDiKRrxQhAi6RB8EPpg5JvGXHhZ0MS3TydH3x4LUSJ8Dn9B7xUPwUacEm9cnBQu1gPHr4zY/3zfDDveO0PgP5rLH2h/wB4qEDp89frlsScmkDa+lvyGb3Iwjnr6ZcEdOGDNP5OT+35fisj/HADo8Djzx4x1cpURfx48nWCJTY/ikh4Ho8vpf2wJIAjT5j0z/X4cPy325gGxCV+rHxi9iJfjeLB++ApD3ipAd4cEKusHoVGNGJFCbWPGZRHpPrjUL9YGfrGDSHJ8PD5GbfzT3uFfpf7wkIiecWbjOLOjBxcWnCpD5wVPy2DQuGID25IYVkY7wkOuEV9uPIr0uNKxVAQeMX4/rnr6BV++Efjw0dgLMZwCKIJwEJiX4HLEcL5cdjftT9MReBNjDKKsI7+eDJ+N5f95oAHgzXnBKI6FCv3v0wwagTfCA+uMm1UbZ77+uEjeMcDzB4zdoKTf9vy/oQyi8UftN0/dzrNJpdnh9ft+IlhVXgw/wCqB4QIPKfznBVStXIM+QCX1nD8rSZeL0pv6M36LkaFA+6r6mCqFJL4DGetOAfLiDWNaTxcuaEvqFnGXgod6OwwC6iLD/5kvc0Qt84oBj9GYrEeB3/o1Poyw7E2MjgDDpvWj9RfhPynhyGD+JBv9YU4uZiA3e1MMCA8h6wjIeS3ht/oP/WF4I9PBH7EGXQK8Fc2QTde9PzUviX9MCI5be3T0YyYX0BI/GRSOkR/fHrfw/8AM8z+R/1jWo9WZesGghB1vo4a6gF1jPoaPpHvCpUtnL2s3nvmaMmon4WnnCqAqvjG3NteKIPsr9MJO4W9WanuWe8fMgeqGzyUuKDos8Fdf3XL8L+KMUbCHZDz5Pr5yYwbX7P+sUBXQectDFRwP+sLkACdlvir4OvbM0nfnl7V9qr8uHH9brnP1ShlHZHTObVYp0eEuT+HICnwnOEiO8F/UpiZ2PVPqM/TN0seTP64TF9oZ6LoPq4VNhqF+yOIgO8CFdswyUj9d/8ALgXHaJ7Wj9XGCq/jTS+G3BswyYmO8EPuaT0/lcmcsYJU4e446Aft+DlmKfGMJ4xbLJ8fgGmsYTdH9BfzeAn7nDfvyYqgldAL4RxxVfj+yZSp/FXOBQf8fOfHkEfq5OGkAQPphRjXthwZBKQxWZmdiEOpWY2QoUCcw8PjpziYPlnJ98AKgPOdciAq5bi7ah9HLj19garBAkgiQRGPHL6HCquUEhjbvvkxOTH90cv6XfOTdNQdeRfqOueOOHqROHi4KAUeq8B7uEGkLbvovg/Xb+Bx/XQ0iT7xGCHzfWJOb6X5+Se83Vo+sZTXyzaFB2FwoF+AcEXx4mbq68G+QyH5ZGVXZwfbWNhx0Y+0wXriiYnfrFTRYaFD7inp5AUg08IMSAP3vwXb6GMQy1pie428x9flPDg7PwgDEd9bf6EzR7weN4RnBvNWacENuQJpr7dv7H3zh+YspVvty6KGEf8AeKGOmzHVTvEp1OQz9jBaf9B/vLc75D/Th/PZVktuL0VlQn6OBs0jkcETvjXPWBXiBFkzIQhfPzjMKGjVNikGKIyND7PrNx88C+v/AKw6762X+XUyRby/RVY0IR0a4o3c544uIB3UlAjPo5t+nIee8GwnanjBCATPRP7p5z9v6UoiW8ji2ErB7sWvXXXjHQzCVpyigq8WE+v48PyGqr1bGik62H15Muobq0+VOPrgAEHYm7h8xcBox6d4vYfeLLueDrIA/wBLjxMuAmNJuANQ+cS1NWSSK6+WRHnvH+eB9MOQ/JT5Vt/LeHBTBJxDiFH3/wCMhhxMZyTznyYQYYamHg9vrNpI7ZK7c4vzCtLM7HPPDBtfHp7zhYOBOMgB8uNd+Nl+wczHlWbjf95zLUsESTnjEwxQJ9UzRD6YAkvnIqaM0YAFC+zJYVstd4UEsVrOxfEfLhtNqHtwRVfWSxN85AS/UZtKiMdwIVl2g/YMLYBp2eKln92jcjkfGRyORyORyORyORKCRTFSWfR/L35iVVfbC/DcEehhG9AT7jk10PIPpP8ArCDAemzAPN++PvTAxSWc3l9eXEArmyXp/cI9YLK8FPzDv814fwGDnBtGbLffFkA6cibPSdZLN4eD9sd1LwK4NonfL7YVX7nl/wDMhZwfmBPthmISAiYFt5XmfQxeLnis1Rw6VgOA+Vi8hPkwDzhwJ6RxL4ALEfJiCqW5EjsT6YORioh7Vt/pgI2Zh4oO0wuCejJ/xLSi1Ffr/TjJPSQrJSHG3CXXDA8ic/loBEEeRxF6Aez3DT9TAjeb/wCuv0zgYhqviD9GEhAQAgfnvGfLH3zyDB4UezPjcyJ9cX0vX+rnGdfaYSCTwM/pkjR+mHQmEmBD82HjOZXyZy3+RnLX6M5mfTnOj6cW2/04Ln5BcAJT5f8A1kbbuLftg8i6IP0/44XqoFEenFEiIVe09XX2y0FbqZlrcfP52o9JAv0UFP8Az+1gvcZaMK8D6f7KRMJquy/Z/wCbN7uuQeQ5XE9uIrZEficAX/WACCiiNE/MeOLi24aFjhnqP9nSqCNKUuKJ1+HqJ0F0fPn+w6pn99HA33jcj+U5ppOn+OEOzw79lyvtf+cDkUrCY9tQe9YK9/YxJSsP0XQ/mWQjEFTf3Nffz/ZKjFhYcuBJmpE2hHKd7NeuD85y4AKos+29vRjMhjc3nhX0U+MjhgMXJUcvz/gELk0jo588G+cVnIJEvQ0vT68z8sXFRBiD2e8TtoUeGfOv2/sTyDjaTt6uPfXr83dCCpFDfjk1zvCMiSp5u/G7o5MEhDiqoBDbz/gaxaRTi/XGg4ngP/gZo3vTldidPr8oSeHVt9B2mATMNwEo/nqkmAlOhOBTcesM5MWAfmWIfw8O+hEd9YYUTNbSgfE9meT2R9RNv1/weelwSgcx5ZXnJ10UWwOewevyXWuAojyZVKWjR2la8/T87UFHnmeGYan/AJy9vrwf+/l03kAL+DEyuznDw8VPWt8YeOgMAHFe5/hSzEpFNlwgjFNZm3oXeuPpt8lIiIxKadn5Egg02GmfCefHtxq1A2Wz8xCEgvY+jBiC5YTr5JX/AO/lCpcq4DnTQ74ZiqStlOxOD45qJ3i2i3JAe3f+GpSOzDIN34rW+Wt+s1ENu6L4B7/IkWLATkV2vJ9D8tQKgDaubIEYkdPgce/uH5BGhVP6bo+cSOMD3eB3MlCyAX5lt/xEKgaTT4xfpQbRs4jjk9crP6zooc2fA97wbT7BKom/ygfdcrpJ8puYZfI4HgD+tR1JJUOHvrAHKULeNdZOdfDm5veDw5l8f4rp3LsG2rk25cmfVk0tlOZ5/qAQBJuNHVst9/ks0sIAFXnesT85a3yv6rOInjF3PA7mdG1mo48U8frhwBYVbatfKr/l0dqySYBXl4P8XBEAFV6xJRNNI/8AZn7v/NxgGcoA/XBOPxtMmbJu/pxQYYMW/gfrn8Q/3n8Q/wB5/EP95/IP9/gY/kH+8/kH+8/lH+8T5HJvm38Y8mElzsY/fEIp9f8Abn8s/wB4VMaiieR/4ZYJKD6fouESweQKInIn9RgGgx8gil1cJkCiNE/4b+a8sD67B60VT24TeQdz4xo1Vp/NfyYYQedNfGuMNAAlqeBvE5STaR84dxL0vGbhSJQ4rZtejxqd1e80Ix5GX6ZCOBOLX92UqiNDb8z9MY13GCqboz9O3L0ySBV2ozW+u31NyUCxTyozW5rt9Q1fIkis7Ga+MBcVIXT51kYLaB2CJvRhQaii11C3Wpv743fORXNYW9HeUQyDtO5XD1FCSpvpHEmICEXzLf1yCySH5it+tyDN1Cfrh7B0Q3yZq5/P+P8AhD2lqSL4HK9G8G7m0i6O46Hc6G4IY551HQGivJu3OKlPPmAUdgNhTh0023lJDb6HhGUURxPwg+URg/gBs9A4J60rnpJotF33LMslh/8AZqa3cijkcDw7KihACmqikbIUiWDyBRE5E/4X+b887Ht++L+Ukq8vhxu3Byl+Of3wygKI6TCLX3As0Oicq/0sP/FYMof/ACv+sH+a/bBwiaCVYEbyo9Id/lG57D4unIJEQhCiPYnefxZ+2cL9Fn/iGP8A+jj/AODxa/08morvTHs7dDvPbh+cFO/wmBzb+Tr8n6GBfrGzWOsZ4nQZKcLzi1fXMEA0S2PjvszbcWnXo8N7gzeEdwXbfPsLKKT5xaRgkaDHTrVxu8qtvAs0m9YQUXgWgV7meD5zYEhQBVoNbfbFpGCRoMdOtXFEopLIajruWawDasBS2u0NgnIcdm+sPi9gnwtydh21bM9hFEdmKp7Cby4i3XhU3v8AIdAHQQdTgXcFdWBvFZ5HIYxBThERRHTicHRweIC/oZcFvEXtCIPlsTyOVC0B3roNp4OsdGrAGBrwGxWUFh7yankctKdm6UMXMDzPCQogCkIUgllLTfI8cCeR07WlEcXgBKQhSbUC2sUNEuT2QdRCzNfnRGC8VUE9iCkHah13kKfAJwuvHaATccHmlAhIK+CLdqi1DzAQv9JF2WDpR1kxzooCptgiI7EzRzIEbdKm73J5WLhxTI+poAvCDRdD/hNP4u+UKOXUf+sA0+Jo24p9dHofXP6ZVYW98Vs+nGdt1lmvefLD2w9sPfD2w98cgC12Q4eLcgosQqqeCuiaTAyjhLZ/0OPpnsz257c9mPtj75Hf3kN/We3PI5Xf/KYnuPs/9Z7Cb9Pyf4/tjdcxKRCew4Pb5xlpWTebeDGyUArRtD5TASpgSSBNIm7irwhxsjBc2BzrYJWqqyublhqKw0h2vAeXKKU35wVtTgdCg3FXhDjZGC5sDnWwZEsQspTRXtw/CEXuYNJhDQaSm08nF5vSszktFEg6KiesiXsAaJocafyANYhHVZZGEvLZzgtBZ5QEOg3sRaaVdx0Cg4WIjSgcnDE7oq2Ynwlsb56zkA7STGgNTwQ6WH6215aNZ0dzhVlrHGDfRMZIalbKAvyOTpboFGJELrIylgwoSqDKgcnZrTEL8xuDSRTAALJx7mmaUBiiEUWEbp2pQagIRFAbZAjmdqMVT2jo4BcKuw6WjZjYXY5bhfHikf3CIlhaaWYpDJjuAARFIAPFxAZqC/CNTsgKILuSEL9uXDoPAVHYAFBubwm3vIsocABlX8BaP2PCUeRTf/BfwvniXXa5sjgf3yHDjhWOjXB9n1/45Em87y/s+nD/AOxgqBmaO+8mJjgBt4chGleXFgVe3ORBw9/zeAjVvv8AneRN8YAE8veUYhXATqK/r/1lavnjzvm/XNhaby2UHbyDDwA/S5fvJm8/7mE7snjP5Dx+T/syZ6d23LLhjnnvAQbXa7c+XmTchENLxzvIMCffIQxyDCo3rCnq1jz7OjCBStFHyO8WxCNsIjTqBqaM6PLd6EOHE83KDn2ciMAlFPhx59nRhApWij5HeCBgEcJQQhonGOXoGrkUmiahMLnYCsOqFnrDQkZMSFkhCa6zg2xEUBIJ09fkOalwYMwAK12qgEuIYLZ0prvB01d8XjG74IAGooryJ8CYVCQwTE2L9MjW9tKIM32Md47ihaKQpL6Ipw2KdLSBFmEyASITtS/lICAABwGapLABG+RYuiXZrZBSakWomDAAXI5liemNQgSrOXLCNqlcKLUGiBQDHvYZHOVUV5BoROC7gazpJoisXrkS04qHKt2IBQZWlza74I1gFQACoDdzBxZn1asjKUOGOxxWptY5JQpexxJdtV7ATCb4jAKd5tWdknsm41gXYCV0mCAMlrHISTtHODaep5Tlg1dAdlMuMjRWWbAYmlgaE2uOBTc20d7Kf37GIIRfnhdhtLGb5EZGc4q7bAAYI3HYrXzg9FPnOM3sHHKxKt4Dn9BwtsOu9YgXaKVnnmYcUPEaMhQbKNP9YAkNBT6uZgEBoKXrfM1P1yLgBLVPfM1P1zX5CWr/ANZRXJFab3rDIVijD3hCrE2Tn7veCBU1KfquFiGNvEzhoAn66wAIMtT9Vw0JNIm/Jv6ZUK2AI7x0HCdG8JfBOGLDUTPjORU8X+ux9ZrqAZkFwBSN1/uzWta1rWta1rWta1rWta1rW8IKuGFy2WU3wn4byrJHAZWAHkYcd0RvwdRZudKVebAm+4gkkzIgiMrcwtDxV0vRRDdBYBl/G8q1CYQEVSpgf0CcyVQlauyhOQmhIWgFFM4JojWxcZgFIEEqKAsymmnCTNsDiDmiXCgtVVfWyRyhteCBE2JwXKGOZZsLSHfYdHGu35obFLQU4MniIq0AltUZGxyJlyRJRDoWFCJIaEi2GZmKZykjWKghA3bV+OUNmsA6AOsQAC+77oV8s9Zfm4V2cqL6JvjB4aVjO4SMsg1iVwLlAE5OshwRAoCTJ2GFIoNyjoC1xY3CVQZrUQAdWTjczZJvIoonal+v9/8AyXlgfXYIPr9zi6AsNUxtV1gH9/8AeHOgWp/H0wwBAiacDxANZ8eHBd+7v/1hdVkaAgGCchUwvpP95rNdL3qeMsJlHZXxMPygoQhR4nr9MchoiEKnU9foYLjlCBu2p3P0MOPSQgbLh5xKF+pxjxipCnT60yasTQRyrO6YoMEdPblnesR/GNgh5wktdISvsyc62uHyzKgdbBHjX+sAUnDlhS/jZk3P4Xx+BeBkRLQilDanr+90BocqpIpSWhxiuX6gkCDLseQ6gw5IvxBPgLGMVQIimjSVJAlcd+IS01CLiIzyoc4sjewMEmND2YWXgpp0bYaiszpGniUcpyN0saxNOVk6xIVubkEGz8l/Bjt8mhShHlCDiGOggBCjoEaMsk/WSx0EqIyCFWDeB2ARBQEUjC9Zz44nBYChWhx4Wjgi6gLVHfaXVq2UA77KBN1OEJlPSBvE70TNtubrSm25po6cmPIxfcesTxxX0fRNnG1mKUWHrdqNtapdLM5tIHcG1b3EWlGDLxqoY5vSIbQAGNnKGZGmqbRwIvK2+M+E50k/WB+v9/8Az/nn3dibxWDdcq9aKAn1OvplqB0uOOEneADoXrDflgvLB+X3wfl98PM++HmffIoGrMDlZ0YxIQYs78PiO9XFUlouwSx8JwnTi+f3x7y++J5ffEe33xfn9cfbDa7L3ghATgz24fnLgP8ACfhVX+M/v4K4vS8JsC9iHguL2B2A4iVVdEvWF88BCUN7AKhUKxi2mqWQlYTQrZfGURmjUFBJSBq1HRl/BDit6yeWom3AA2BZ8bXZubKWucHFI2E9Cio6RqadYDiACbGVO2tVWr24tHNEXu6yASuB2lpy+NSHMrO9MoxNRCc+XgQYIqKOBUcxRQFUd30GuQB9feobts4gjgQCFrkauUA+hLjDmYO5VYPbHa827txRokAZTUKPM2Ih0pMBCCW4w+0doxyTvH+PUg7ZoFFMFFvAayQkVLak+AIRhTChFLQHlx2BA3AwdjRzzyoDfcl7wOmD4BwB4/v9f5O+CUtXN0YNAm3y5qfQoX405RYHny8v652XWd13nzw9sPbD2w9s+eGxEzYoIn2cWtv72NN64nDrVmNVoRfL2/Vrnsz2Z7M9mO+Pvnb95r71ntz2ZeL/ACmOtK4qnlb9P78SPo+EQqKV5zbhdHZ3IE5hoztE5V59Ad0gHvQGLyRqM9Ep7hI4WFLPmkA+IJwnV5nWrlaWbcVYJdrXtIBwAqd6HhF05vvjKImGIRQdqw5EnyAEBmwaNFNExqOtSGV3AkQXYFZeoLshIBJAeUOhUXjASHcLryRRQVUcuh3uE0Ykm0FGRk3kbYQQ8LEJFNGP2tkqK0XSFrwGcCnqvsTcpogQVroRxEMRalBSpCldmFVutiha32AdDGAxmTocarOkGiGohn/B/wA/55svy5O3gf3yJa7UwxGLxozmg/H9cVpr3py078zDydfGF6idQzug2IczKpQOJ2fGbAWWQx9uvUMQLqWQ9ZTKpZD1gxutTWDXZLwYvkBSTe3LZaD17/6x5IJnvnHj4x98ZQ03jwh3k185RIuXkz42s5NX+XARjf8Aka/v/RihT8e72M+nH0ExfCEC/PbT6z1nAhz3SIMb+N/AIOIToiIDUiaxiGha4sRO7QkkaEgOS2Mpk1QA2XggzehdMHuojSJonMFpE8QOPJcxqu5Zttq/WEdQ05cHQ75t7wBH7iZ6RdoCBYO00ekkXwEDmTQh0hq44TBo2kWUUoKOQHbBViokMVrSim8A3UVIVHKa0SDbYSClEA8AaP8Ag6mucQn9yowE0rziDHGPeDtQ2quUHdvnBmvtx5R+uKr9RiTgYozcRkUSdJgqOACvWOHi0aZOdCdEydiE6oy/sZOwxI0jL/rNSpiNMk2glHrEqwinTBytnKcMl94caBUUvOJ0AikawORQdnHdpxmrsf3mEEU4R3jSCI3nBABJxi2LZ/Bmjy/4Q7Qw0nsdORGtBPl/UojBkAMMaJCECglAKIKZoP6Z06dOKlClQkRHwzRR8roaSiNgdrhOgmqJwvcILLtAs/4b2EjGjrgkInrFDl+TN9o+jKdB+mQ64t1z0c9XNEjOLXjNEjNEnNEjNEnNEjPRwB45u4YEuu85NMl0wpxPpm4bfjBoB8GGU5zRxyWagf4o56DIycnJyPWR4Mj1gxjBlgeFGw9ucWEMWclO8jwZHrI9ZHgyPBkesjwZHgyPWR4Mj1keDJycnI9ZHgwIf3hayp8QpLOqwusPy51ThQ2fF68/0CRQGyolPCfiHKvg2QTluJKMkHOX+p/QZBEoXDRza3+pHn4ouVXgxgsQ1RRHsR/FqCDGlT6Df08/0ckcAgyD7YFH6Yi9Kf8ACxo9+gsButE0EQugx1LoegNC6gga8ZtxbkBSC6joPRZjZ48k8UWIj+/eBJEOE28GzzA+46CH6FYojsHYnXI+MW4Og9BBtkl3Ji8ZEYi+Oyb4fNm8GDRpjcEDEonkx5G28Tk+PTr2/wDDScLNq6vCtX38uExrIEdryqMI8DUtwER7FTrJGDgBdUdCjoFecS7xdOodR6N+/IDkEppvUZtRee8hH5WME+l/RA70KN9MiCfrJ8N/hW48KC0gEkgm/Lmwp8kDYaEXuH4SUqTj92f/AEYN98kOkLTsLzx4Rr+iShITUMU5I4+QUjpJH0mnDgsU5e4KIQ7WL2YMclYRLbhEL4H4UTHS0UIzQuuV56YDXUVNhhIp2L5T1CocXxbXN8MbpPGCoYGyTu8GApaJUCBAxtvccRloD4V40vReHmkRhLobNmngd+M33hUwBJRNPlQmkYak2QAy2KWa++IJyGxikibs7PeLxQoS0mWiHLjP4f3/AMIhFYG1x6sYCfLzPLHXkcQojxVkR4WQ7fnCNPPQIg7A6vIj3mhS5eGAddKPkcFiF4E2lX75f48yiEHwAn4Mc2aDDQfGX1kWjfM+ZcctvGTWKJQ6jpKjWCMFAgEaP+GOvl2mUdAAa8Gzdl3eQnLnB44yUEZhoh2ADmeV5wK9EUphCTR831j0/ZQQEgeIuJjF7vKBICq8vjFz5y0YiJgejZifqgkRK01qQVAcK4FO/wATWAiXmp9ctD8wGAMvTEMZvIm8W3wpJSRfLlGuCW+glIMI9FcMxsrQUtMJB7N627nZKijGmtNJrvEeEiDruzsO68JKdjfZl0jWY4hBakmhzb6zpeB8MmoACOuebnJywtDw7aegyABsSjAKdgzLqwUhFHJi7LedDQYIqh4LU8PTHjFDBhJsA8lAaAd3I7eJZHrGxxwwY8X9yS9DWF9PeinHhpiHbxpBJtPUpYYaEDvnWVxInQn2VGzBU6BApSKx4UGfQ/4VlY00fiB9JgFnQ0Lz8r2u3H10UV8QPoGDOeI0XQ7Krba5twWCEgVVfq4hum04oogRppv6uN/UFQPInjF5oRB9BA9EMZWnN6WFDz73gxwg8Bo/5Rk7ATqox8J/lhJjUBNGuobOKcUIQTqh3uMnM1m9mnG8D2OzT5xZL0nJKeBvS5EpkY7xt89YHgcqFp5EdlmEugYwDwp09sxjp8LRJeFQ/wApEdXfsRITsoa4eHB7xEyKPgoAKdD0ZEE/YqQOF216xp4eRBT2mTtoHWT/AG+A6N6aM3yLlUKzCTYLQzlyh6M8alsG6WtB59MGa2MFKBQdVPXH+UicPiYFJBCgORykzpQvzOxbL7z3ZBxNpkC8Be814sIvzn7MnrNroE2poUhQUomlM37JSG3K8QUFZLmvLxk/EOm3esMYx3OCinaNW3f/AOSiB30Ro7iHah7s3QUWJCUbV5X/AB4FVcdaSECZGw3/AGJIkSJEiRIkSJEiRIkSJbV90xynCmn+yJEiRIkSJEiRIkSJEiRIkFBiPWVgRR8Dj/EcQ6Kis58pzPeHyhWQDlDlyHgUQ+IvL+BXMESXinL6PwdBR4ioH1X+l02JQ4B7fwclkGn4Dzz1+UiUAqrAMoTJNmYwadj/AIwyxeiH8K0PtLGizpRcB+VfDxkkpx8oF9kD6+8YgaZ1cIDWrz80AqjV1qJmmBHVMc/gK2AjbB2so+rSL0iHTQRgngPOA1746wINGjjfiYVAIVZHQEck7d4feaNYqQKpwrofouxIJBROOFSzQ1NTNka6WLjaQ28aT8HhsLiyh7UJ5ws/OWUBbSJF3I84itByDHys9LI4jKFdCBnh2768Y0tHqiqKO46XvCoOXkNcZE28YnJTQKCCITfFTzpPWaCJHIC3bU+oM2lY2ITUDQaRwcIAWmUN2np5PYUtHQyokPgt4MemoKGDaQUUt25Tjb6MiQ22U187m4cbGYc5qA27esH5L2gN9g/6eSP+KssS+2HvsQbWpDlxeD+9nYSm1Dw4V+pKYkBBF+TH5VKsfHIdPq7zYvzKlDHkVVe8lvRzCi4Iiior52PVZqECq8j7bNgemW3c6Ro7XRiTK2PQikId+TwwXmzAJDNC1ed9TD36ObWQiOgL0GJgvqJHQaqwABedIeES4IRtVJ9cr0cmN5U1yO6Ywa+w2Vg0yDKxlgmhYyxKSovwVwzs/wD8CiEohtHdVA3xZcEujVrurvK7/XjowR0nG+ZvTtzddx6VhXQHm4u+jDyVEZNKthKTDVPitpGxGmaYuqsDw7IhVEJFUqUxuiiu5OxteADZiIYtqADZpQSx1eHXAOjbjXZqM408Yeu55YBCIeZxjse91SU09n+NMsVtetzWIsSu7ghT61JxKH2PyCS7UM7TMQXkeclQ8EnECnpXPPz9SUqnyPjwZa46biah5Vfx6LDWLLYPzhf9nr2qr2qqvlf8cZYjSpsYsND2sD5wJ6/6GKWGA75cTGAPUITS1q/R/FaGUWuGniRtxe/iTsBt1RDveCEEAaI9/wBDhBEj3BcXc6S8Arvy/pYZjMNOk5f607bCUi1eH/GGWLwaeghT9RgY9JS1hwWC5UXjmIIQ1BwZtCa1cniv2TWB182Z5BsAde3uIgz0iQ5nYER6Z1cXkmOgKjQ0RHxirQeBjUvAK8uCyOvUgIOequnAdZ+Cdi4bG6fA3hW+ZN0yu15c/kPLHV0Sitz0qR+c8IfXlpaMWnc43TLLAq0itq5Rs4mNDv2DuuO+t1t7ig5CsIbVhGA7kdZJpJrdG9jqN+MIb41AQ9KpTx4UwSwEGBp4LHlg7uMdLphAFCKJHKnEpH3ZTEg4gKCuxFzXlTkYFIGkgBV6M0e6OVwuaUffrOugRBHyPf8AjDLEtFzzgBHv9Rg6UAJg4FOLlxxnEoUoER+Ry14rXWrReXaa6Mg0YBDYoO6+CtcQzIdRQRAiPk8pjiLrNSRWchy6DBqrgFA7jcSHPrDWd0g0fkXbo41jkJtUWZgUr223cCBw4xAPCo3rbPHrQhZSXAAegVxttGsu5aMjNldWeaYQTiJ9MW7469QyD1TWDgp4MEBUSIPTDCpQdBVAaCujwd84V+mK0iDYUKN3xmnAHuoomh38cwi91uMEYaLRxwTtV+8HVSFaRNIvLidqSCAg0hBm++tY5rkERzzEIoV85rlV/wBgkAHe+/KJbBqhQlpS6wQ61i/qJEZRcwK/yAyxelc1X7/5YyxTP8VwDTxw94tNnXcFFdlXBg+ujGZ9SDgR12qElY7E+2BIWwUyBHZd4vUt2GjBRbrcFusRJRLmo4cRpZANgdxQBOXA2ccq47Bs44peTEAIUNqcji98Y3fJD5pNHt9+HDaREPS7nIEnXjErB6x9jT9f1xNUig/ESH6zFAeNKcF8iD5wgCzXgBH0af5CyxfKrKZ4FumDpkR5jnwmbV0ntfsRR8sTWlrOPYMdL+2UScqtNoF8XEUfLOljYokgc6jHcHjnsCbGjGMzuQUjVsda5aC5Duo+lZNO32Tpg80jRKjqAVpJ5MFCCCqTRODRZ44xvOAfMnMUdNKzvGh4j0JtEi7Hh4uG1ToTqTZehefSwPKWTUcnxllrRv8A9IH+RMsSyft16UAro3m/36blpFVV35c3DZkIsKMqs9uG81BgkQgNNb6xa/0Aicc01JMNLkWcQA0B4xSCIIlsR0wp3kTRnq8sFfblGKSqpeRcr35y2iW94FNHrjFK/onfDJ6mMVgTTLyULPWQUTqO4iLrrxkiwDIAQ1ITctametGXoD7B/i7I8pWXBw2CICwf7FOnTp06dOnTp06dOnTp054rE7NUNl5OP7JOnTp06dOnTp06dOnTp06VllaoJiOzt/8Ax6IIuTBk7lTQe3PBuuABXcQIXbP8vIIkFGIj6RwDDwdEV+218A//AFxcvUTy265PDwdOcrNntwZNeoq3SUkrj29oL3WNJsXFBVUEpOdkfiBA8qXjx1y5evMkOocSE0UEYmJu53lDAB0kEE4QLHx4PSf5FuQeTokNjevJ1zigVQDauVII7EG2ju08HvB23Xj3pU288PGEGj50HRHo6k8JXD71rT5gPY6LtYPWn9sEmL4bcbxtkvrNUHU9a8GAriPWWD6ygXJMVqBfZtCj3qxOR7Op6AgLpBedaFTfFu3UbvAOh0WpwjIAQxCTYJFUgVBNMJwLuAQWZOJXg51g8nISBtGpfDQzaVqSGoEWSgbE09Cv8g95r6ID9LjjdhlHlij4Nh21gLKEW+BADQRIqxgtY2S+wmDrgFKmJdztK1A7UQrzqLsSzk0eHR7YQsVu8IvBqQkG+qoQOguGxkqM/A4KOFW7bGHonYWJpGlEnkYE8mnSBGg4hBBKgVigcNFYAhC9CLKpwcKfYlFcuiKl2OmXd5tcnmx7i4lVtUxHR0EpQRI4AhDWGoMQjLNgapt8A9KwBCvR2BWDIHS+CQaGd7JAaJEnU2mgoVWjW9qG6TDuGYz7HuSdMaH+O2D1nzBX9sHRS5IQHwF3opwc4zHSAgSiwecKcKhpoby2WBE9FprVlsxiogBCsdaoELmXRjsbsF0NsgqGV4orJaL1gYphVFyO2nyN1RA7DABLgYIlCrGwUApU54TDB3ypu16NRSKqdgfNgEIRrqrKiAuJ61xYC6huWABbocCp7fYFusvQGpwbFqugWTU2FGSgwRV9IRlSVKHZRqDiyMoRKB+omIFHnR10bngEKN8t5QXSgvjgbtFo/IhoIoS1c4VU1yIgSeK1NPKTTOW07aGVF2FYUw0joBDnei+K421AHsox/b/G1mqAoHSODjlANbyLw64ot2yji+1D0GHJlUihikCLE0oQLTosr4D8QQMxtVqlNiyYOJ6NivAnTcCBK4M7QVYt+7ygKgCmLgdMGKlYAsInO6ay6gxMUJsBEbyFt3ij0QKJEA6UujkcjR0juIDZKLTAwuriMY0baWpRSWq6thp7phWwhuyGD2RgJcljRKQAw4NimRYCbURBsCrsYPjULkOLZ9NGRZgZUpxqXtlvZjdlSEDhJegjOaYbSvk4qsIFWuhDGgMVcAVyDbnWQuKYQSyG74VrIldYjKEKXyrtff8AjZRgpt4KV95iPneURFKfNV3yrK5SViUwEAXBgoquzGxJGTYI2TIIAqzB3qHagE4Ft6tojjIrogKa/eoV2elTqLeIkFxHtpvvBAQWFuEm2ahThBFQeHUOGgHV1qPDdzNuLsCQJcm8GDviqQQdLkpSKgUFgjEjDrjwE3VLgs5TbrQiN9WB1s4lIfhsWluEMXrqSpa8kiNjoAi6NmWKO7p7VlQ1xofXA6VcBW9sodCwyC7Ke6AmlWUUsDpkNbvUCUWoDJS4O76DQg0xO0A4BSMKizhajtOhGvQ0yvpPNia3STe/8bNi47gAq4MThFRFliA1LgdXWO7ZB0kAoIg1GzIANzMpJrQKRUdOCNwXzC2TAVAnLgtVriUfbuhipKipIDGYGQ91zW2AFqYR8kcaSpq9FCqKVSxh1QJGJSFdQkbI6SIUWhKgBglmyabsU5sYm6HV541wikJH5GBBsEAgjOET1TiJIWachNBukoBboaaFQexBZwgU4UGq0E7GuRQ8qIVXcE0NWhVEOKWPXFXRgBdwQrAslGY2vlG30drAoaVhY2AnU9Mp7cJBMIG+4OXtfK9rtzbJsBfg9f44PqlQxQR/RzbV7gzQUmUuhbHjHwkkgbaGNrdLXCBzcIR4sC539DPXxUoIab4L8mAQOPFAYU90HROwmAIqdyksvA76W+SfxXuFb5eBVgjaKuJElYgjq+5waCgejVADgDEtq1MYPTwmR5alrCtprZUGGkTdAab5K6KuOUnQH1Dr2GDYVRkIK8I7WwgEFo41AL4AheKmiIlGzdIWooCCVKAmow9qDDw1WJ9FTRYVVSGBKToro6vA1YAbAv8AkkWef31KfqYaKJeGaAcRWBKxK5aEtu1sqoDbC5sQ2akY3kIWKrusAomx4KgaGGAEAAATGZOUg1rXmTlcmqltK+VuPsw8C0p5GjWwdi4h4wUBiMU2QS2zsjFBmyptlCtGGoLQice/Fh4GQeRUVCVf8no4EI15CAfFlB6yzSUgFWq7U1V2qrX+l0dCyXIjyYFYRmPFtWhqDR/+oCCu2MpxT1hro2g+CPFJHWIcb0mrytADiJ84A/Ieyyxue+MRrRCE8seM3mfMatnjUXxd4HUkBV+FwtdX8Fqzj0WsddN95MwSBt2QfumKCi7QQpwRHG3kwFDk6IjxeM0JgUZqHs3i1zRAJUbohnzjZFTelNmtnnHoZPuKHRpK/TFA9ZygB9VDBZe/QKrwgCsdTNyLLwWWcz3xghCGOR4UNh7cFgv/AHwMnvJ9uxUUQd15/XLu1ARHnsZ6MPLBkBdJwjnPpJx0r7Vf8Y/gPPFKq9hGEPKn6B3h7LZtXxe9kHwGStajOpLgg65h6yccQ5XH1jhOphcISXG+VFJ6gdBiY6kJihUrd+X3+Bw+BCgpOCNO54Md+BVAUWxyeNr5x7kQSsZz7wIC3MFWjaNr5nnGeoxEUD0KYWXC1ambIQEHY65wURI1CEwtwpoXhxhwSCAPiXkJg8GJ+Mm/kCBgVjYp35fLkP5Sh5Qa1D7HgzTTU75bp19afGcwCq57p9Y3ze8SXSyb0choXi65cGzQRDRjvdPv5xB4l2vV2m2nr4/xnpxxNkrVUOvHGQwPI3jdJSpGmjWerRMJpMoDkZ1lk6J5EjDhrdZrFkvYnwB2e233jrQtxGD2jhu3vHYgPMmwu7qT1+EcnLgnkMsNGsr+RuLaUm7WbebjU82tEQCNCGuHvBCkUtXm5yPBrqZU/EC8wo283l85LkReigPGOm8io9KtdFIaNE4PGE9PqI07Amydry5y6DkgSoeTzk5GcIE0EH4aZr2PpRRtXylw4VhFnqR0270Yyq9Odl1OG7vN3bvG86o0WU2L7IUZg7xgt+Bxm3TrbkQwCSSKqqe83LN1CArtgHP/APDn/9k=)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FwGNEPVTPAwS" + }, + "source": [ + "To run the code fast enough, we suggest using a GPU (`Runtime => change runtime type => GPU`).\n", + "\n", + "\n", + "## Installation\n", + "Before starting, let's install Flower and SpeechBrain:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "executionInfo": { + "elapsed": 20306, + "status": "ok", + "timestamp": 1709075703966, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "saSuftgFBTHv" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "executionInfo": { + "elapsed": 21591, + "status": "ok", + "timestamp": 1709075731107, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "CpwJ-Of0tbWe" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# For pip installation\n", + "!pip install flwr\n", + "\n", + "# update tqdm package to avoid an ImportError.\n", + "!pip install tqdm==4.50.2" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CtMlAwdZwXT3" + }, + "source": [ + "Then, download Flower-SpeechBrain integration code and template dataset, which was released on ```github.com/yan-gao-GY/Flower-SpeechBrain```. This integration will be explained in more details later on." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 216, + "status": "ok", + "timestamp": 1709075733276, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "QJXNAg7fE6ld", + "outputId": "3614a5d1-a832-4a52-db9d-47e40c9fcbcc" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content\n" + ] + } + ], + "source": [ + "%cd /content\n", + "%rm -rf results" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "executionInfo": { + "elapsed": 842, + "status": "ok", + "timestamp": 1709075734948, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "16hQcJa-h6XE" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!git clone https://github.com/yan-gao-GY/Flower-SpeechBrain.git" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fh9WH4THxkgA" + }, + "source": [ + "## What steps are needed for your experiments?\n", + "\n", + "The steps needed to launch a federated speech model training are just as normal Flower experiments.\n", + "\n", + "1. **Prepare your data**. The goal of this step is to create the data manifest files (TSV format) to fit the input format of SpeechBrain. The data manifest files contains the location of the speech data and their corresponding text annotations. In this tutorial, we skip the data partitioning step and simulate different partitions using a small template dataset. But in practice, you might want to have different files per federated client or a more complex data partitioning scheme.\n", + "\n", + "Now let's uncompress our template dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 240, + "status": "ok", + "timestamp": 1709075737691, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "dOXD6PjcjjSi", + "outputId": "91717178-c41b-4ea6-a667-10daa4285464" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content/Flower-SpeechBrain/temp_dataset\n", + "/content\n" + ] + } + ], + "source": [ + "%cd /content/Flower-SpeechBrain/temp_dataset/\n", + "import zipfile\n", + "import os\n", + "\n", + "# Uncompression function\n", + "def un_zip(file_name):\n", + " zip_file = zipfile.ZipFile(file_name)\n", + " for names in zip_file.namelist():\n", + " zip_file.extract(names)\n", + " zip_file.close()\n", + "\n", + "un_zip(\"temp_dataset.zip\")\n", + "\n", + "# Simulate partitions using template dataset.\n", + "%cp temp_dataset.tsv train_0.tsv\n", + "\n", + "# Go back to /content directory.\n", + "%cd /content" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jcUTmthGIp84" + }, + "source": [ + "2. **Specify server and clients**. As Colab notebooks only allow one cell to be run at a time, we simulate the server and the clients as background processes within this tutorial. The following cells create `server.sh` and `clients.sh` scripts that will launch the required processes. All arguments required for federated training are passed in from the scripts." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 95, + "status": "ok", + "timestamp": 1709075740241, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "FkrC6mTObM05", + "outputId": "b05e8e1a-c177-4ef2-bc7f-71b4c99a0c8e" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing server.sh\n" + ] + } + ], + "source": [ + "%%writefile server.sh\n", + "PYTHONUNBUFFERED=1 python3 /content/Flower-SpeechBrain/server.py \\\n", + " --data_path=\"/content/Flower-SpeechBrain/temp_dataset/\" \\\n", + " --config_path=\"/content/Flower-SpeechBrain/configs/\" \\\n", + " --tr_path=\"/content/Flower-SpeechBrain/temp_dataset/temp_dataset.tsv\" \\\n", + " --test_path=\"/content/Flower-SpeechBrain/temp_dataset/temp_dataset.tsv\" \\\n", + " --tr_add_path=\"/content/Flower-SpeechBrain/temp_dataset/temp_dataset.tsv\" \\\n", + " --config_file=\"template.yaml\" \\\n", + " --min_fit_clients=1 \\\n", + " --min_available_clients=1 \\\n", + " --rounds=1 \\\n", + " --local_epochs=1 \\\n", + " --server_address=\"localhost:24338\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 91, + "status": "ok", + "timestamp": 1709075741600, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "kUP683Skelc5", + "outputId": "83481e23-de95-4002-aa7b-4d85acfe662e" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing clients.sh\n" + ] + } + ], + "source": [ + "%%writefile clients.sh\n", + "export PYTHONUNBUFFERED=1\n", + "NUM_CLIENTS=1\n", + "\n", + "\n", + "echo \"Starting $NUM_CLIENTS clients.\"\n", + "for ((i = 0; i < $NUM_CLIENTS; i++))\n", + "do\n", + " echo \"Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients.\"\n", + " # Staggered loading of clients: clients are loaded 8s apart.\n", + " sleep 8s\n", + " python3 /content/Flower-SpeechBrain/client.py \\\n", + " --cid=$i \\\n", + " --data_path=\"/content/Flower-SpeechBrain/temp_dataset/\" \\\n", + " --tr_path=\"/content/Flower-SpeechBrain/temp_dataset/\" \\\n", + " --dev_path=\"/content/Flower-SpeechBrain/temp_dataset/temp_dataset.tsv\" \\\n", + " --config_path=\"/content/Flower-SpeechBrain/configs/\" \\\n", + " --config_file=\"template.yaml\" \\\n", + " --eval_device=\"cuda:0\" \\\n", + " --server_address=\"localhost:24338\" &\n", + "done\n", + "echo \"Started $NUM_CLIENTS clients.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "executionInfo": { + "elapsed": 239, + "status": "ok", + "timestamp": 1709075799505, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "vLIzAe_0gEuI" + }, + "outputs": [], + "source": [ + "# Execute this after running any of the %%writefile cells above\n", + "!chmod +x clients.sh server.sh" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OJtjZHr0N5G3" + }, + "source": [ + "3. **Launch federated training~!** The following single cell will start the server, wait 5 seconds for it to initialise, and then start all clients.\n", + "\n", + " ```\n", + " !((./server.sh & sleep 5s); ./clients.sh)\n", + " ```\n", + "\n", + " We suggest running it at the end of this tutorial.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IYSzHjeR_VSS" + }, + "source": [ + "## Integration details — coupling SpeechBrain to Flower\n", + "Let's first see some details of the integration process to better understand the code. There are only four main steps required:\n", + "\n", + "1. Define a Brain class ([SpeechBrain Brain Class tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html)).\n", + "2. Initialise the Brain class and dataset ([SpeechBrain dataio tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)).\n", + "3. Define a SpeechBrain Client ([Flower client documentation](https://flower.dev/docs/quickstart_pytorch.html#flower-client)).\n", + "4. Define a Flower Strategy on the server side ([Flower strategies](https://flower.dev/docs/strategies.html#strategies))." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rFSxYCs6JKfp" + }, + "source": [ + "### Define a Brain class\n", + "\n", + "First, we define our customised Brain class as any normal SpeechBrain experiments. This override is necessary (while usually not needed on SpeechBrain) because Flower requires the number of processed samples to perform aggregation!\n", + "\n", + "```python\n", + "class ASR(sb.core.Brain):\n", + " def compute_forward(self, batch, stage):\n", + " \"\"\"Forward pass, to be overridden by sub-classes.\n", + "\n", + " Arguments\n", + " ---------\n", + " batch : torch.Tensor or tensors\n", + " An element from the dataloader, including inputs for processing.\n", + " stage : Stage\n", + " The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n", + "\n", + " Returns\n", + " -------\n", + " torch.Tensor or Tensors\n", + " The outputs after all processing is complete.\n", + " Directly passed to ``compute_objectives()``.\n", + " \"\"\"\n", + " [...]\n", + "\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " \"\"\"Compute loss, to be overridden by sub-classes.\n", + "\n", + " Arguments\n", + " ---------\n", + " predictions : torch.Tensor or Tensors\n", + " The output tensor or tensors to evaluate.\n", + " Comes directly from ``compute_forward()``.\n", + " batch : torch.Tensor or tensors\n", + " An element from the dataloader, including targets for comparison.\n", + " stage : Stage\n", + " The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n", + "\n", + " Returns\n", + " -------\n", + " loss : torch.Tensor\n", + " A tensor with the computed loss.\n", + " \"\"\"\n", + " [...]\n", + "\n", + " def fit_batch(self, batch):\n", + " \"\"\"Fit one batch, override to do multiple updates.\n", + "\n", + " The default implementation depends on a few methods being defined\n", + " with a particular behavior:\n", + "\n", + " * ``compute_forward()``\n", + " * ``compute_objectives()``\n", + " * ``optimizers_step()``\n", + "\n", + " Also depends on having optimizers passed at initialization.\n", + "\n", + " Arguments\n", + " ---------\n", + " batch : list of torch.Tensors\n", + " Batch of data to use for training. Default implementation assumes\n", + " this batch has two elements: inputs and targets.\n", + "\n", + " Returns\n", + " -------\n", + " detached loss\n", + " \"\"\"\n", + " [...]\n", + "\n", + " def evaluate_batch(self, batch, stage):\n", + " \"\"\"Evaluate one batch, override for different procedure than train.\n", + "\n", + " The default implementation depends on two methods being defined\n", + " with a particular behavior:\n", + "\n", + " * ``compute_forward()``\n", + " * ``compute_objectives()``\n", + "\n", + " Arguments\n", + " ---------\n", + " batch : list of torch.Tensors\n", + " Batch of data to use for evaluation. Default implementation assumes\n", + " this batch has two elements: inputs and targets.\n", + " stage : Stage\n", + " The stage of the experiment: Stage.VALID, Stage.TEST\n", + "\n", + " Returns\n", + " -------\n", + " detached loss\n", + " \"\"\"\n", + " [...]\n", + "\n", + " def fit(\n", + " self,\n", + " epoch_counter,\n", + " train_set,\n", + " valid_set=None,\n", + " progressbar=None,\n", + " train_loader_kwargs={},\n", + " valid_loader_kwargs={},\n", + " ):\n", + " \"\"\"Iterate epochs and datasets to improve objective.\n", + "\n", + " Relies on the existence of multiple functions that can (or should) be\n", + " overridden. The following methods are used and expected to have a\n", + " certain behavior:\n", + "\n", + " * ``fit_batch()``\n", + " * ``evaluate_batch()``\n", + " * ``update_average()``\n", + "\n", + " If the initialization was done with distributed_count > 0 and the\n", + " distributed_backend is ddp, this will generally handle multiprocess\n", + " logic, like splitting the training data into subsets for each device and\n", + " only saving a checkpoint on the main process.\n", + "\n", + " Arguments\n", + " ---------\n", + " epoch_counter : iterable\n", + " Each call should return an integer indicating the epoch count.\n", + " train_set : Dataset, DataLoader\n", + " A set of data to use for training. If a Dataset is given, a\n", + " DataLoader is automatically created. If a DataLoader is given, it is\n", + " used directly.\n", + " valid_set : Dataset, DataLoader\n", + " A set of data to use for validation. If a Dataset is given, a\n", + " DataLoader is automatically created. If a DataLoader is given, it is\n", + " used directly.\n", + " train_loader_kwargs : dict\n", + " Kwargs passed to `make_dataloader()` for making the train_loader\n", + " (if train_set is a Dataset, not DataLoader).\n", + " E.G. batch_size, num_workers.\n", + " DataLoader kwargs are all valid.\n", + " valid_loader_kwargs : dict\n", + " Kwargs passed to `make_dataloader()` for making the valid_loader\n", + " (if valid_set is a Dataset, not DataLoader).\n", + " E.g., batch_size, num_workers.\n", + " DataLoader kwargs are all valid.\n", + " progressbar : bool\n", + " Whether to display the progress of each epoch in a progressbar.\n", + " \"\"\"\n", + " [...]\n", + "\n", + " def evaluate(\n", + " self,\n", + " test_set,\n", + " progressbar=None,\n", + " test_loader_kwargs={},\n", + " ):\n", + " \"\"\"Iterate test_set and evaluate brain performance. By default, loads\n", + " the best-performing checkpoint (as recorded using the checkpointer).\n", + "\n", + " Arguments\n", + " ---------\n", + " test_set : Dataset, DataLoader\n", + " If a DataLoader is given, it is iterated directly. Otherwise passed\n", + " to ``self.make_dataloader()``.\n", + " max_key : str\n", + " Key to use for finding best checkpoint, passed to\n", + " ``on_evaluate_start()``.\n", + " min_key : str\n", + " Key to use for finding best checkpoint, passed to\n", + " ``on_evaluate_start()``.\n", + " progressbar : bool\n", + " Whether to display the progress in a progressbar.\n", + " test_loader_kwargs : dict\n", + " Kwargs passed to ``make_dataloader()`` if ``test_set`` is not a\n", + " DataLoader. NOTE: ``loader_kwargs[\"ckpt_prefix\"]`` gets\n", + " automatically overwritten to ``None`` (so that the test DataLoader\n", + " is not added to the checkpointer).\n", + "\n", + " Returns\n", + " -------\n", + " average test loss\n", + " \"\"\"\n", + " [...]\n", + "```\n", + "\n", + "We override the `fit()` method, which calculates number of training examples, average training loss and average WER. In practice, the code is almost identical to the official SpeechBrain (copy and paste), as we just need to return the number of processed samples !\n", + "\n", + "```python\n", + " def fit(\n", + " self,\n", + " epoch_counter,\n", + " train_set,\n", + " valid_set=None,\n", + " progressbar=None,\n", + " train_loader_kwargs={},\n", + " valid_loader_kwargs={},\n", + " ):\n", + " if self.test_only:\n", + " return\n", + "\n", + " if not (\n", + " isinstance(train_set, DataLoader)\n", + " or isinstance(train_set, LoopedLoader)\n", + " ):\n", + " train_set = self.make_dataloader(\n", + " train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs\n", + " )\n", + " if valid_set is not None and not (\n", + " isinstance(valid_set, DataLoader)\n", + " or isinstance(valid_set, LoopedLoader)\n", + " ):\n", + " valid_set = self.make_dataloader(\n", + " valid_set,\n", + " stage=sb.Stage.VALID,\n", + " ckpt_prefix=None,\n", + " **valid_loader_kwargs,\n", + " )\n", + "\n", + " self.on_fit_start()\n", + "\n", + " if progressbar is None:\n", + " progressbar = not self.noprogressbar\n", + "\n", + " batch_count = 0\n", + " # Iterate epochs\n", + " for epoch in epoch_counter:\n", + "\n", + " # Training stage\n", + " self.on_stage_start(sb.Stage.TRAIN, epoch)\n", + " self.modules.train()\n", + " avg_wer = 0.0\n", + "\n", + " # Reset nonfinite count to 0 each epoch\n", + " self.nonfinite_count = 0\n", + "\n", + " if self.train_sampler is not None and hasattr(\n", + " self.train_sampler, \"set_epoch\"\n", + " ):\n", + " self.train_sampler.set_epoch(epoch)\n", + "\n", + " # Time since last intra-epoch checkpoint\n", + " last_ckpt_time = time.time()\n", + "\n", + " # Only show progressbar if requested and main_process\n", + " enable = progressbar and sb.utils.distributed.if_main_process()\n", + " with tqdm(\n", + " train_set,\n", + " initial=self.step,\n", + " dynamic_ncols=True,\n", + " disable=not enable,\n", + " ) as t:\n", + " for batch in t:\n", + " self.step += 1\n", + " loss, wer = self.fit_batch(batch)\n", + " _, wav_lens = batch.sig\n", + " batch_count += wav_lens.shape[0]\n", + "\n", + " self.avg_train_loss = self.update_average(\n", + " loss, self.avg_train_loss\n", + " )\n", + " avg_wer = self.update_average_wer(\n", + " wer, avg_wer\n", + " )\n", + " t.set_postfix(train_loss=self.avg_train_loss)\n", + "\n", + " # Debug mode only runs a few batches\n", + " if self.debug and self.step == self.debug_batches:\n", + " break\n", + "\n", + " if (\n", + " self.checkpointer is not None\n", + " and self.ckpt_interval_minutes > 0\n", + " and time.time() - last_ckpt_time\n", + " >= self.ckpt_interval_minutes * 60.0\n", + " ):\n", + " run_on_main(self._save_intra_epoch_ckpt)\n", + " last_ckpt_time = time.time()\n", + "\n", + " # Run train \"on_stage_end\" on all processes\n", + " if epoch == epoch_counter.limit:\n", + " avg_loss = self.avg_train_loss\n", + "\n", + " self.on_stage_end(sb.Stage.TRAIN, self.avg_train_loss, epoch)\n", + " self.avg_train_loss = 0.0\n", + " self.step = 0\n", + "\n", + " # Validation stage\n", + " if valid_set is not None:\n", + " self.on_stage_start(sb.Stage.VALID, epoch)\n", + " self.modules.eval()\n", + " avg_valid_loss = 0.0\n", + " with torch.no_grad():\n", + " for batch in tqdm(\n", + " valid_set, dynamic_ncols=True, disable=not enable\n", + " ):\n", + " self.step += 1\n", + " loss = self.evaluate_batch(batch, stage=sb.Stage.VALID)\n", + " avg_valid_loss = self.update_average(\n", + " loss, avg_valid_loss\n", + " )\n", + "\n", + " # Debug mode only runs a few batches\n", + " if self.debug and self.step == self.debug_batches:\n", + " break\n", + "\n", + " # Only run validation \"on_stage_end\" on main process\n", + " self.step = 0\n", + " valid_wer = self.on_stage_end(sb.Stage.VALID, avg_valid_loss, epoch)\n", + " if epoch == epoch_counter.limit:\n", + " valid_wer_last = valid_wer\n", + "\n", + " # Debug mode only runs a few epochs\n", + " if self.debug and epoch == self.debug_epochs:\n", + " break\n", + "\n", + " return batch_count, avg_loss, valid_wer_last\n", + "\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yKX7H6NBCpCJ" + }, + "source": [ + "### Initialise Brain class and dataset\n", + "Next, we instantiate the `ASR` Brain class defined before, as well as the dataset. In SpeechBrain, this would be the `Main` function of your speech recipe. Here, we need to encapsulate this because Flower will call this function for each client of the federated setup to initialise it properly!\n", + "\n", + "```python\n", + "def int_model(\n", + " flower_path,\n", + " tr_path,\n", + " dev_path,\n", + " test_path,\n", + " save_path,\n", + " data_path,\n", + " config_file=\"CRDNN.yaml\",\n", + " tokenizer_path=None,\n", + " eval_device=\"cuda:0\",\n", + " evaluate=False,\n", + " add_train=False):\n", + "\n", + " # Load hyperparameters file with command-line overrides\n", + " params_file = flower_path + config_file\n", + "\n", + " # Override with FLOWER PARAMS\n", + " if evaluate:\n", + " overrides = {\n", + " \"output_folder\": save_path,\n", + " \"number_of_epochs\": 1,\n", + " \"test_batch_size\": 4,\n", + " \"device\": eval_device,\n", + " # \"device\": 'cpu'\n", + " }\n", + " elif add_train:\n", + " overrides = {\n", + " \"output_folder\": save_path,\n", + " \"lr\": 0.01\n", + " }\n", + "\n", + " else:\n", + " overrides = {\n", + " \"output_folder\": save_path\n", + " }\n", + " run_opts = None\n", + "\n", + " with open(params_file) as fin:\n", + " params = load_hyperpyyaml(fin, overrides)\n", + "\n", + " params[\"data_folder\"] = data_path\n", + " params[\"train_tsv_file\"] = tr_path\n", + " params[\"dev_tsv_file\"] = dev_path\n", + " params[\"test_tsv_file\"] = test_path\n", + " params[\"save_folder\"] = params[\"output_folder\"] + \"/save\"\n", + " params[\"train_csv\"] = params[\"save_folder\"] + \"/train.csv\"\n", + " params[\"valid_csv\"] = params[\"save_folder\"] + \"/dev.csv\"\n", + " params[\"test_csv\"] = params[\"save_folder\"] + \"/test.csv\"\n", + " params[\"tokenizer_csv\"] = tokenizer_path if tokenizer_path is not None else params[\"train_csv\"]\n", + "\n", + " # Dataset preparation (parsing CommonVoice)\n", + " from common_voice_prepare import prepare_common_voice # noqa\n", + "\n", + " # Create experiment directory\n", + " sb.create_experiment_directory(\n", + " experiment_directory=params[\"output_folder\"],\n", + " hyperparams_to_save=params_file,\n", + " overrides=overrides,\n", + " )\n", + "\n", + " # Due to DDP, we do the preparation ONLY on the main python process\n", + " run_on_main(\n", + " prepare_common_voice,\n", + " kwargs={\n", + " \"data_folder\": params[\"data_folder\"],\n", + " \"save_folder\": params[\"save_folder\"],\n", + " \"train_tsv_file\": params[\"train_tsv_file\"],\n", + " \"dev_tsv_file\": params[\"dev_tsv_file\"],\n", + " \"test_tsv_file\": params[\"test_tsv_file\"],\n", + " \"accented_letters\": params[\"accented_letters\"],\n", + " \"language\": params[\"language\"],\n", + " },\n", + " )\n", + "\n", + " # Defining tokenizer and loading it\n", + " tokenizer = SentencePiece(\n", + " model_dir=params[\"save_folder\"],\n", + " vocab_size=params[\"output_neurons\"],\n", + " annotation_train=params[\"train_csv\"],\n", + " annotation_read=\"wrd\",\n", + " model_type=params[\"token_type\"],\n", + " character_coverage=params[\"character_coverage\"],\n", + " )\n", + "\n", + " # Create the datasets objects as well as tokenization and encoding :-D\n", + " train_data, valid_data, test_data = dataio_prepare(params, tokenizer)\n", + "\n", + " # Trainer initialization\n", + " asr_brain = ASR(\n", + " modules=params[\"modules\"],\n", + " hparams=params,\n", + " run_opts=run_opts,\n", + " opt_class=params[\"opt_class\"],\n", + " checkpointer=params[\"checkpointer\"],\n", + " )\n", + "\n", + " # Adding objects to trainer.\n", + " asr_brain.tokenizer = tokenizer\n", + "\n", + " return asr_brain, [train_data, valid_data, test_data]\n", + "\n", + "asr_brain, dataset = int_model(...)\n", + "```\n", + "This function can also load all hyper-parameters from provided `yaml` file as normal SpeechBrain model training. Additionally, we can overwrite the hyper-parameters of `yaml` file here." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dQxY4EWKKa3F" + }, + "source": [ + "### Define a SpeechBrain client\n", + "We define a customised Flower client that can mainly achieve three features:\n", + "* Set server weights to SpeechBrain model.\n", + "* Trigger SpeechBrain model training.\n", + "* Extract model weights after training.\n", + "\n", + "Let's first see `set_weights` and `get_weights` functions. This is quite simple, just the transformation between pytorch tensor and NumPy ndarrays.\n", + "\n", + "If you are familiar with SpeechBrain, you will recognize the **modules** argument. If not, this simply is all the PyTorch blocks of your pipeline. This means that we can iterated over the state_dict to obtain absolutely all the parameters of the speech pipeline.\n", + "\n", + "```python\n", + "def set_weights(weights, modules, device) -> None:\n", + " \"\"\"Set model weights from a list of NumPy ndarrays.\"\"\"\n", + " state_dict = OrderedDict()\n", + " valid_keys = [k for k in modules.state_dict().keys()]\n", + " for k, v in zip(valid_keys, weights):\n", + " v_ = torch.Tensor(np.array(v))\n", + " v_ = v_.to(device)\n", + " state_dict[k] = v_\n", + " modules.load_state_dict(state_dict, strict=False)\n", + "\n", + "def get_weights(modules):\n", + " \"\"\"Get model weights as a list of NumPy ndarrays.\"\"\"\n", + " w = []\n", + " for k, v in modules.state_dict().items():\n", + " w.append(v.cpu().numpy())\n", + " return w\n", + "```\n", + "\n", + "Then, we define the `SpeechBrainClient` class.\n", + "\n", + "```python\n", + "class SpeechBrainClient(fl.client.NumPyClient):\n", + " def __init__(self,\n", + " cid: int,\n", + " asr_brain,\n", + " dataset,\n", + " pre_train_model_path=None):\n", + "\n", + " self.cid = cid\n", + " self.params = asr_brain.hparams\n", + " self.modules = asr_brain.modules\n", + " self.asr_brain = asr_brain\n", + " self.dataset = dataset\n", + " self.pre_train_model_path = pre_train_model_path\n", + "\n", + " def get_parameters(self, config):\n", + " print(f\"Client {self.cid}: get_parameters\")\n", + " weights = get_weights(self.modules)\n", + " return weights\n", + "\n", + " def fit(self, parameters, config):\n", + " print(f\"Client {self.cid}: fit\")\n", + "\n", + " # Read training configuration\n", + " global_rounds = int(config[\"epoch_global\"])\n", + " print(\"Current global round: \", global_rounds)\n", + " epochs = int(config[\"epochs\"])\n", + "\n", + " (\n", + " new_weights,\n", + " num_examples,\n", + " num_examples_ceil,\n", + " fit_duration,\n", + " avg_loss,\n", + " avg_wer\n", + " ) = self.train_speech_recogniser(\n", + " parameters,\n", + " epochs,\n", + " global_rounds=global_rounds\n", + " )\n", + "\n", + " metrics = {\"train_loss\": avg_loss, \"wer\": avg_wer}\n", + "\n", + " # Release GPU VRAM\n", + " torch.cuda.empty_cache()\n", + "\n", + " return self.get_parameters(config={}), num_examples, metrics\n", + "\n", + " def evaluate(self, parameters, config):\n", + " print(f\"Client {self.cid}: evaluate\")\n", + "\n", + " num_examples, loss, wer = self.train_speech_recogniser(\n", + " server_params=parameters,\n", + " epochs=1,\n", + " evaluate=True\n", + " )\n", + " torch.cuda.empty_cache()\n", + "\n", + " # Return the number of evaluation examples and the evaluation result (loss)\n", + " return float(loss), num_examples, {\"accuracy\": float(wer)}\n", + "\n", + "\n", + " def train_speech_recogniser(\n", + " self,\n", + " server_params,\n", + " epochs,\n", + " evaluate=False,\n", + " add_train=False,\n", + " global_rounds=None\n", + " ):\n", + " '''\n", + " This function aims to trigger client local training or evaluation\n", + " via calling the fit() or evaluate() function of SpeechBrain Brain\n", + " class. It can also load a pre-trained model before training.\n", + "\n", + " Arguments\n", + " ---------\n", + " server_params : Parameter\n", + " The parameters given by the server.\n", + " epochs : int\n", + " The total number of local epochs for training.\n", + " evaluate : bool\n", + " Evaluation or not.\n", + " add_train : bool\n", + " The additional training on the server or not.\n", + " global_rounds : int\n", + " The current global round.\n", + " \n", + " Returns\n", + " -------\n", + " model weights after training,\n", + " number of total training samples,\n", + " number of training samples ceil,\n", + " training duration,\n", + " training loss,\n", + " valid WER\n", + " '''\n", + " self.params.epoch_counter.limit = epochs\n", + " self.params.epoch_counter.current = 0\n", + "\n", + " train_data, valid_data, test_data = self.dataset\n", + "\n", + " # Set the parameters to the ones given by the server\n", + " if server_params is not None:\n", + " set_weights(server_params, self.modules, evaluate, add_train, self.params.device)\n", + "\n", + " # Load the pre-trained model at global round 1\n", + " if global_rounds == 1 and not add_train and not evaluate:\n", + " if self.pre_train_model_path is not None:\n", + " print(\"loading pre-trained model...\")\n", + " state_dict = torch.load(self.pre_train_model_path)\n", + " self.params.model.load_state_dict(state_dict)\n", + "\n", + " # Exclude two layers which do not join the aggregation\n", + " if global_rounds != 1:\n", + " # Two layer names that do not join aggregation\n", + " k1 = \"enc.DNN.block_0.norm.norm.num_batches_tracked\"\n", + " k2 = \"enc.DNN.block_1.norm.norm.num_batches_tracked\"\n", + "\n", + " state_dict_norm = OrderedDict()\n", + " state_dict_norm[k1] = torch.tensor(1, device=self.params.device)\n", + " state_dict_norm[k2] = torch.tensor(0, device=self.params.device)\n", + " self.modules.load_state_dict(state_dict_norm, strict=False)\n", + "\n", + " # Load best checkpoint for evaluation\n", + " if evaluate:\n", + " self.params.test_wer_file = self.params.output_folder + \"/wer_test.txt\"\n", + " batch_count, loss, wer = self.asr_brain.evaluate(\n", + " test_data,\n", + " test_loader_kwargs=self.params.test_dataloader_options,\n", + " )\n", + " return batch_count, loss, wer\n", + "\n", + " # Training\n", + " fit_begin = timeit.default_timer()\n", + "\n", + " count_sample, avg_loss, avg_wer = self.asr_brain.fit(\n", + " self.params.epoch_counter,\n", + " train_data,\n", + " valid_data,\n", + " train_loader_kwargs=self.params.dataloader_options,\n", + " valid_loader_kwargs=self.params.test_dataloader_options,\n", + " )\n", + "\n", + " # Exp operation to avg_loss and avg_wer\n", + " avg_wer = 100 if avg_wer > 100 else avg_wer\n", + " avg_loss = exp(- avg_loss)\n", + " avg_wer = exp(100 - avg_wer)\n", + "\n", + " # Retrieve the parameters to return\n", + " params_list = get_weights(self.modules)\n", + "\n", + " if add_train:\n", + " return params_list\n", + "\n", + " fit_duration = timeit.default_timer() - fit_begin\n", + "\n", + " # Manage when last batch isn't full w.r.t batch size\n", + " train_set = sb.dataio.dataloader.make_dataloader(train_data, **self.params.dataloader_options)\n", + " if count_sample > len(train_set) * self.params.batch_size * epochs:\n", + " count_sample = len(train_set) * self.params.batch_size * epochs\n", + "\n", + " return (\n", + " params_list,\n", + " count_sample,\n", + " len(train_set) * self.params.batch_size * epochs,\n", + " fit_duration,\n", + " avg_loss,\n", + " avg_wer\n", + " )\n", + "\n", + "client = SpeechBrainClient(...)\n", + "```\n", + "\n", + "The training process happens in `fit()` method of our defined `SpeechBrainClient` class.. A function named `train_speech_recogniser()` is called inside of `fit()`. This function aims to trigger client local training by calling `fit()` method of SpeechBrain Brain class. Also, we can load a pre-trained model at 1st global round for initialisation.\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HYXRGO_-R1Ol" + }, + "source": [ + "### Define a Flower Strategy on the server side\n", + "To achieve different aggregation weighting strategies and an additional training after aggregation, we need to define a customised Flower Strategy class.\n", + "\n", + "```python\n", + "class TrainAfterAggregateStrategy(fl.server.strategy.FedAvg):\n", + " def aggregate_fit(\n", + " self,\n", + " server_round: int,\n", + " results: List[Tuple[ClientProxy, FitRes]],\n", + " failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],\n", + " ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:\n", + " \"\"\"Aggregate fit results using weighted average.\"\"\"\n", + "\n", + " if not results:\n", + " return None, {}\n", + " # Do not aggregate if there are failures and failures are not accepted\n", + " if not self.accept_failures and failures:\n", + " return None, {}\n", + "\n", + " # Convert results\n", + " key_name = 'train_loss' if args.weight_strategy == 'loss' else 'wer'\n", + " weights = None\n", + "\n", + " # Standard FedAvg\n", + " if args.weight_strategy == 'num':\n", + " weights_results = [\n", + " (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples)\n", + " for _, fit_res in results\n", + " ]\n", + " # Here we do aggregation\n", + " weights = aggregate(weights_results)\n", + "\n", + " # If loss-based or WER-based aggregation, fetch the values of loss or WER from `metrics`\n", + " elif args.weight_strategy == 'loss' or args.weight_strategy == 'wer':\n", + " weights_results = [\n", + " (parameters_to_ndarrays(fit_res.parameters), fit_res.metrics[key_name])\n", + " for client, fit_res in results\n", + " ]\n", + " # Here we do aggregation\n", + " weights = aggregate(weights_results)\n", + "\n", + " # Aggregate custom metrics if aggregation fn was provided\n", + " metrics_aggregated = {}\n", + " if self.fit_metrics_aggregation_fn:\n", + " fit_metrics = [(res.num_examples, res.metrics) for _, res in results]\n", + " metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics)\n", + " elif server_round == 1: # Only log this warning once\n", + " log(WARNING, \"No fit_metrics_aggregation_fn provided\")\n", + "\n", + " # Train model after aggregation\n", + " if weights is not None:\n", + " print(f\"Train model after aggregation\")\n", + " save_path = args.save_path + \"add_train\"\n", + " # Initial Brain class and dataset\n", + " asr_brain, dataset = int_model(args.config_path, args.tr_add_path, args.tr_path, args.tr_path,\n", + " save_path,\n", + " args.data_path, args.config_file, args.tokenizer_path, add_train=True)\n", + " # Initial SpeechBrain client\n", + " client = SpeechBrainClient(None, asr_brain, dataset)\n", + "\n", + " # Call the training function\n", + " weights_after_server_side_training = client.train_speech_recogniser(\n", + " server_params=weights,\n", + " epochs=1,\n", + " add_train=True\n", + " )\n", + " # Release cuda memory after training\n", + " torch.cuda.empty_cache()\n", + " return ndarrays_to_parameters(weights_after_server_side_training), metrics_aggregated \n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1N7R8UhLJwsX" + }, + "source": [ + "## Run an experiment\n", + "\n", + "OK, it's time for launching our experiment!" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 176603, + "status": "ok", + "timestamp": 1709075987779, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "eIinvOYVgKaz", + "outputId": "b940513c-62b7-4ad9-f534-e0c6ab221fbe" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "All background processes were killed.\n", + "2024-02-27 23:16:54.932275: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "2024-02-27 23:16:54.932334: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "2024-02-27 23:16:54.933651: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2024-02-27 23:16:54.940829: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "Starting 1 clients.\n", + "Starting client(cid=0) with partition 0 out of 1 clients.\n", + "2024-02-27 23:16:56.317841: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", + "Server IP: 172.28.0.12\n", + "WARNING flwr 2024-02-27 23:17:00,830 | fedavg.py:118 | \n", + "Setting `min_available_clients` lower than `min_fit_clients` or\n", + "`min_evaluate_clients` can cause the server to fail when there are too few clients\n", + "connected to the server. `min_available_clients` must be set to a value larger\n", + "than or equal to the values of `min_fit_clients` and `min_evaluate_clients`.\n", + "\n", + "INFO flwr 2024-02-27 23:17:00,838 | app.py:163 | Starting Flower server, config: ServerConfig(num_rounds=1, round_timeout=None)\n", + "INFO flwr 2024-02-27 23:17:00,890 | app.py:176 | Flower ECE: gRPC server running (1 rounds), SSL is disabled\n", + "INFO flwr 2024-02-27 23:17:00,890 | server.py:89 | Initializing global parameters\n", + "INFO flwr 2024-02-27 23:17:00,890 | server.py:276 | Requesting initial parameters from one random client\n", + "Started 1 clients.\n", + "2024-02-27 23:17:06.200995: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "2024-02-27 23:17:06.201049: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "2024-02-27 23:17:06.202383: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2024-02-27 23:17:06.209160: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2024-02-27 23:17:07.312437: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", + "speechbrain.core - Beginning experiment!\n", + "speechbrain.core - Experiment folder: ./results/client_0\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/client_0/save/train.csv ...\n", + "100% 50/50 [00:00<00:00, 129.06it/s]\n", + "common_voice_prepare - ./results/client_0/save/train.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/client_0/save/dev.csv ...\n", + "100% 50/50 [00:00<00:00, 159.40it/s]\n", + "common_voice_prepare - ./results/client_0/save/dev.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/client_0/save/test.csv ...\n", + "100% 50/50 [00:00<00:00, 178.27it/s]\n", + "common_voice_prepare - ./results/client_0/save/test.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "speechbrain.tokenizers.SentencePiece - Train tokenizer with type:unigram\n", + "speechbrain.tokenizers.SentencePiece - Extract wrd sequences from:./results/client_0/save/train.csv\n", + "speechbrain.tokenizers.SentencePiece - Text file created at: ./results/client_0/save/train.txt\n", + "sentencepiece_trainer.cc(177) LOG(INFO) Running command: --input=./results/client_0/save/train.txt --model_prefix=./results/client_0/save/250_unigram --model_type=unigram --bos_id=-1 --eos_id=-1 --pad_id=-1 --unk_id=0 --max_sentencepiece_length=10 --character_coverage=1.0 --add_dummy_prefix=True --vocab_size=250\n", + "sentencepiece_trainer.cc(77) LOG(INFO) Starts training with : \n", + "trainer_spec {\n", + " input: ./results/client_0/save/train.txt\n", + " input_format: \n", + " model_prefix: ./results/client_0/save/250_unigram\n", + " model_type: UNIGRAM\n", + " vocab_size: 250\n", + " self_test_sample_size: 0\n", + " character_coverage: 1\n", + " input_sentence_size: 0\n", + " shuffle_input_sentence: 1\n", + " seed_sentencepiece_size: 1000000\n", + " shrinking_factor: 0.75\n", + " max_sentence_length: 4192\n", + " num_threads: 16\n", + " num_sub_iterations: 2\n", + " max_sentencepiece_length: 10\n", + " split_by_unicode_script: 1\n", + " split_by_number: 1\n", + " split_by_whitespace: 1\n", + " split_digits: 0\n", + " pretokenization_delimiter: \n", + " treat_whitespace_as_suffix: 0\n", + " allow_whitespace_only_pieces: 0\n", + " required_chars: \n", + " byte_fallback: 0\n", + " vocabulary_output_piece_score: 1\n", + " train_extremely_large_corpus: 0\n", + " hard_vocab_limit: 1\n", + " use_all_vocab: 0\n", + " unk_id: 0\n", + " bos_id: -1\n", + " eos_id: -1\n", + " pad_id: -1\n", + " unk_piece: \n", + " bos_piece: \n", + " eos_piece: \n", + " pad_piece: \n", + " unk_surface: ⁇ \n", + " enable_differential_privacy: 0\n", + " differential_privacy_noise_level: 0\n", + " differential_privacy_clipping_threshold: 0\n", + "}\n", + "normalizer_spec {\n", + " name: nmt_nfkc\n", + " add_dummy_prefix: 1\n", + " remove_extra_whitespaces: 1\n", + " escape_whitespaces: 1\n", + " normalization_rule_tsv: \n", + "}\n", + "denormalizer_spec {}\n", + "trainer_interface.cc(351) LOG(INFO) SentenceIterator is not specified. Using MultiFileSentenceIterator.\n", + "trainer_interface.cc(183) LOG(INFO) Loading corpus: ./results/client_0/save/train.txt\n", + "trainer_interface.cc(407) LOG(INFO) Loaded all 50 sentences\n", + "trainer_interface.cc(423) LOG(INFO) Adding meta_piece: \n", + "trainer_interface.cc(428) LOG(INFO) Normalizing sentences...\n", + "trainer_interface.cc(537) LOG(INFO) all chars count=3018\n", + "trainer_interface.cc(558) LOG(INFO) Alphabet size=37\n", + "trainer_interface.cc(559) LOG(INFO) Final character coverage=1\n", + "trainer_interface.cc(591) LOG(INFO) Done! preprocessed 50 sentences.\n", + "unigram_model_trainer.cc(222) LOG(INFO) Making suffix array...\n", + "unigram_model_trainer.cc(226) LOG(INFO) Extracting frequent sub strings... node_num=1348\n", + "unigram_model_trainer.cc(274) LOG(INFO) Initialized 635 seed sentencepieces\n", + "trainer_interface.cc(597) LOG(INFO) Tokenizing input sentences with whitespace: 50\n", + "trainer_interface.cc(608) LOG(INFO) Done! 328\n", + "unigram_model_trainer.cc(564) LOG(INFO) Using 328 sentences for EM training\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=0 size=411 obj=14.1573 num_tokens=965 num_tokens/piece=2.34793\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=1 size=365 obj=13.6029 num_tokens=976 num_tokens/piece=2.67397\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=0 size=275 obj=14.1354 num_tokens=1061 num_tokens/piece=3.85818\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=1 size=274 obj=13.813 num_tokens=1061 num_tokens/piece=3.87226\n", + "trainer_interface.cc(686) LOG(INFO) Saving model: ./results/client_0/save/250_unigram.model\n", + "trainer_interface.cc(698) LOG(INFO) Saving vocabs: ./results/client_0/save/250_unigram.vocab\n", + "speechbrain.tokenizers.SentencePiece - ==== Loading Tokenizer ===\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer path: ./results/client_0/save/250_unigram.model\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer vocab_size: 250\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer type: unigram\n", + "speechbrain.core - Info: device arg from hparam file is used\n", + "speechbrain.core - Info: precision arg from hparam file is used\n", + "speechbrain.core - Gradscaler enabled: False. Using precision: fp32.\n", + "speechbrain.core - 46.3M trainable parameters in ASR\n", + "INFO flwr 2024-02-27 23:17:31,020 | grpc.py:52 | Opened insecure gRPC connection (no certificates were passed)\n", + "flwr - Opened insecure gRPC connection (no certificates were passed)\n", + "DEBUG flwr 2024-02-27 23:17:31,023 | connection.py:55 | ChannelConnectivity.IDLE\n", + "DEBUG flwr 2024-02-27 23:17:31,027 | connection.py:55 | ChannelConnectivity.CONNECTING\n", + "DEBUG flwr 2024-02-27 23:17:31,031 | connection.py:55 | ChannelConnectivity.READY\n", + "Client 0: get_parameters\n", + "INFO flwr 2024-02-27 23:17:34,462 | server.py:280 | Received initial parameters from one random client\n", + "INFO flwr 2024-02-27 23:17:34,462 | server.py:91 | Evaluating initial parameters\n", + "speechbrain.core - Beginning experiment!\n", + "speechbrain.core - Experiment folder: ./results/evaluation\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/evaluation/save/train.csv ...\n", + "100% 50/50 [00:00<00:00, 162.24it/s]\n", + "common_voice_prepare - ./results/evaluation/save/train.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/evaluation/save/dev.csv ...\n", + "100% 50/50 [00:00<00:00, 169.96it/s]\n", + "common_voice_prepare - ./results/evaluation/save/dev.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/evaluation/save/test.csv ...\n", + "100% 50/50 [00:00<00:00, 172.94it/s]\n", + "common_voice_prepare - ./results/evaluation/save/test.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "speechbrain.tokenizers.SentencePiece - Train tokenizer with type:unigram\n", + "speechbrain.tokenizers.SentencePiece - Extract wrd sequences from:./results/evaluation/save/train.csv\n", + "speechbrain.tokenizers.SentencePiece - Text file created at: ./results/evaluation/save/train.txt\n", + "sentencepiece_trainer.cc(177) LOG(INFO) Running command: --input=./results/evaluation/save/train.txt --model_prefix=./results/evaluation/save/250_unigram --model_type=unigram --bos_id=-1 --eos_id=-1 --pad_id=-1 --unk_id=0 --max_sentencepiece_length=10 --character_coverage=1.0 --add_dummy_prefix=True --vocab_size=250\n", + "sentencepiece_trainer.cc(77) LOG(INFO) Starts training with : \n", + "trainer_spec {\n", + " input: ./results/evaluation/save/train.txt\n", + " input_format: \n", + " model_prefix: ./results/evaluation/save/250_unigram\n", + " model_type: UNIGRAM\n", + " vocab_size: 250\n", + " self_test_sample_size: 0\n", + " character_coverage: 1\n", + " input_sentence_size: 0\n", + " shuffle_input_sentence: 1\n", + " seed_sentencepiece_size: 1000000\n", + " shrinking_factor: 0.75\n", + " max_sentence_length: 4192\n", + " num_threads: 16\n", + " num_sub_iterations: 2\n", + " max_sentencepiece_length: 10\n", + " split_by_unicode_script: 1\n", + " split_by_number: 1\n", + " split_by_whitespace: 1\n", + " split_digits: 0\n", + " pretokenization_delimiter: \n", + " treat_whitespace_as_suffix: 0\n", + " allow_whitespace_only_pieces: 0\n", + " required_chars: \n", + " byte_fallback: 0\n", + " vocabulary_output_piece_score: 1\n", + " train_extremely_large_corpus: 0\n", + " hard_vocab_limit: 1\n", + " use_all_vocab: 0\n", + " unk_id: 0\n", + " bos_id: -1\n", + " eos_id: -1\n", + " pad_id: -1\n", + " unk_piece: \n", + " bos_piece: \n", + " eos_piece: \n", + " pad_piece: \n", + " unk_surface: ⁇ \n", + " enable_differential_privacy: 0\n", + " differential_privacy_noise_level: 0\n", + " differential_privacy_clipping_threshold: 0\n", + "}\n", + "normalizer_spec {\n", + " name: nmt_nfkc\n", + " add_dummy_prefix: 1\n", + " remove_extra_whitespaces: 1\n", + " escape_whitespaces: 1\n", + " normalization_rule_tsv: \n", + "}\n", + "denormalizer_spec {}\n", + "trainer_interface.cc(351) LOG(INFO) SentenceIterator is not specified. Using MultiFileSentenceIterator.\n", + "trainer_interface.cc(183) LOG(INFO) Loading corpus: ./results/evaluation/save/train.txt\n", + "trainer_interface.cc(407) LOG(INFO) Loaded all 50 sentences\n", + "trainer_interface.cc(423) LOG(INFO) Adding meta_piece: \n", + "trainer_interface.cc(428) LOG(INFO) Normalizing sentences...\n", + "trainer_interface.cc(537) LOG(INFO) all chars count=3018\n", + "trainer_interface.cc(558) LOG(INFO) Alphabet size=37\n", + "trainer_interface.cc(559) LOG(INFO) Final character coverage=1\n", + "trainer_interface.cc(591) LOG(INFO) Done! preprocessed 50 sentences.\n", + "unigram_model_trainer.cc(222) LOG(INFO) Making suffix array...\n", + "unigram_model_trainer.cc(226) LOG(INFO) Extracting frequent sub strings... node_num=1348\n", + "unigram_model_trainer.cc(274) LOG(INFO) Initialized 635 seed sentencepieces\n", + "trainer_interface.cc(597) LOG(INFO) Tokenizing input sentences with whitespace: 50\n", + "trainer_interface.cc(608) LOG(INFO) Done! 328\n", + "unigram_model_trainer.cc(564) LOG(INFO) Using 328 sentences for EM training\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=0 size=411 obj=14.1573 num_tokens=965 num_tokens/piece=2.34793\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=1 size=365 obj=13.6029 num_tokens=976 num_tokens/piece=2.67397\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=0 size=275 obj=14.1354 num_tokens=1061 num_tokens/piece=3.85818\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=1 size=274 obj=13.813 num_tokens=1061 num_tokens/piece=3.87226\n", + "trainer_interface.cc(686) LOG(INFO) Saving model: ./results/evaluation/save/250_unigram.model\n", + "trainer_interface.cc(698) LOG(INFO) Saving vocabs: ./results/evaluation/save/250_unigram.vocab\n", + "speechbrain.tokenizers.SentencePiece - ==== Loading Tokenizer ===\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer path: ./results/evaluation/save/250_unigram.model\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer vocab_size: 250\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer type: unigram\n", + "speechbrain.core - Info: device arg from hparam file is used\n", + "speechbrain.core - Info: precision arg from hparam file is used\n", + "speechbrain.core - Gradscaler enabled: False. Using precision: fp32.\n", + "speechbrain.core - 46.3M trainable parameters in ASR\n", + "100% 13/13 [00:14<00:00, 1.15s/it]\n", + "speechbrain.utils.train_logger - Epoch loaded: 0 - test loss: 0.00e+00, test CER: 1.00e+02, test WER: 1.00e+02\n", + "INFO flwr 2024-02-27 23:18:08,737 | server.py:94 | initial parameters (loss, other metrics): 0.0, {'accuracy': 100.0}\n", + "flwr - initial parameters (loss, other metrics): 0.0, {'accuracy': 100.0}\n", + "INFO flwr 2024-02-27 23:18:08,739 | server.py:104 | FL starting\n", + "flwr - FL starting\n", + "DEBUG flwr 2024-02-27 23:18:08,739 | server.py:222 | fit_round 1: strategy sampled 1 clients (out of 1)\n", + "Client 0: fit\n", + "Current global round: 1\n", + "speechbrain.utils.checkpoints - Would load a checkpoint here, but none found yet.\n", + "speechbrain.utils.epoch_loop - Going into epoch 1\n", + "100% 12/12 [00:11<00:00, 1.05it/s, train_loss=7.47]\n", + "100% 13/13 [00:03<00:00, 4.20it/s]\n", + "speechbrain.utils.train_logger - epoch: 1, lr: 1.00e+00 - train loss: 7.47 - valid loss: 5.40, valid CER: 89.62, valid WER: 1.01e+02\n", + "Client 0: get_parameters\n", + "DEBUG flwr 2024-02-27 23:18:29,403 | server.py:236 | fit_round 1 received 1 results and 0 failures\n", + "WARNING flwr 2024-02-27 23:18:29,615 | server.py:103 | No fit_metrics_aggregation_fn provided\n", + "flwr - No fit_metrics_aggregation_fn provided\n", + "Train model after aggregation\n", + "speechbrain.core - Beginning experiment!\n", + "speechbrain.core - Experiment folder: ./results/add_train\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/add_train/save/train.csv ...\n", + "100% 50/50 [00:00<00:00, 165.90it/s]\n", + "common_voice_prepare - ./results/add_train/save/train.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/add_train/save/dev.csv ...\n", + "100% 50/50 [00:00<00:00, 170.89it/s]\n", + "common_voice_prepare - ./results/add_train/save/dev.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "common_voice_prepare - Preparing CSV files for 50 samples ...\n", + "common_voice_prepare - Creating csv lists in ./results/add_train/save/test.csv ...\n", + "100% 50/50 [00:00<00:00, 169.93it/s]\n", + "common_voice_prepare - ./results/add_train/save/test.csv sucessfully created!\n", + "common_voice_prepare - Number of samples: 50 \n", + "common_voice_prepare - Total duration: 0.08 Hours\n", + "speechbrain.tokenizers.SentencePiece - Train tokenizer with type:unigram\n", + "speechbrain.tokenizers.SentencePiece - Extract wrd sequences from:./results/add_train/save/train.csv\n", + "speechbrain.tokenizers.SentencePiece - Text file created at: ./results/add_train/save/train.txt\n", + "sentencepiece_trainer.cc(177) LOG(INFO) Running command: --input=./results/add_train/save/train.txt --model_prefix=./results/add_train/save/250_unigram --model_type=unigram --bos_id=-1 --eos_id=-1 --pad_id=-1 --unk_id=0 --max_sentencepiece_length=10 --character_coverage=1.0 --add_dummy_prefix=True --vocab_size=250\n", + "sentencepiece_trainer.cc(77) LOG(INFO) Starts training with : \n", + "trainer_spec {\n", + " input: ./results/add_train/save/train.txt\n", + " input_format: \n", + " model_prefix: ./results/add_train/save/250_unigram\n", + " model_type: UNIGRAM\n", + " vocab_size: 250\n", + " self_test_sample_size: 0\n", + " character_coverage: 1\n", + " input_sentence_size: 0\n", + " shuffle_input_sentence: 1\n", + " seed_sentencepiece_size: 1000000\n", + " shrinking_factor: 0.75\n", + " max_sentence_length: 4192\n", + " num_threads: 16\n", + " num_sub_iterations: 2\n", + " max_sentencepiece_length: 10\n", + " split_by_unicode_script: 1\n", + " split_by_number: 1\n", + " split_by_whitespace: 1\n", + " split_digits: 0\n", + " pretokenization_delimiter: \n", + " treat_whitespace_as_suffix: 0\n", + " allow_whitespace_only_pieces: 0\n", + " required_chars: \n", + " byte_fallback: 0\n", + " vocabulary_output_piece_score: 1\n", + " train_extremely_large_corpus: 0\n", + " hard_vocab_limit: 1\n", + " use_all_vocab: 0\n", + " unk_id: 0\n", + " bos_id: -1\n", + " eos_id: -1\n", + " pad_id: -1\n", + " unk_piece: \n", + " bos_piece: \n", + " eos_piece: \n", + " pad_piece: \n", + " unk_surface: ⁇ \n", + " enable_differential_privacy: 0\n", + " differential_privacy_noise_level: 0\n", + " differential_privacy_clipping_threshold: 0\n", + "}\n", + "normalizer_spec {\n", + " name: nmt_nfkc\n", + " add_dummy_prefix: 1\n", + " remove_extra_whitespaces: 1\n", + " escape_whitespaces: 1\n", + " normalization_rule_tsv: \n", + "}\n", + "denormalizer_spec {}\n", + "trainer_interface.cc(351) LOG(INFO) SentenceIterator is not specified. Using MultiFileSentenceIterator.\n", + "trainer_interface.cc(183) LOG(INFO) Loading corpus: ./results/add_train/save/train.txt\n", + "trainer_interface.cc(407) LOG(INFO) Loaded all 50 sentences\n", + "trainer_interface.cc(423) LOG(INFO) Adding meta_piece: \n", + "trainer_interface.cc(428) LOG(INFO) Normalizing sentences...\n", + "trainer_interface.cc(537) LOG(INFO) all chars count=3018\n", + "trainer_interface.cc(558) LOG(INFO) Alphabet size=37\n", + "trainer_interface.cc(559) LOG(INFO) Final character coverage=1\n", + "trainer_interface.cc(591) LOG(INFO) Done! preprocessed 50 sentences.\n", + "unigram_model_trainer.cc(222) LOG(INFO) Making suffix array...\n", + "unigram_model_trainer.cc(226) LOG(INFO) Extracting frequent sub strings... node_num=1348\n", + "unigram_model_trainer.cc(274) LOG(INFO) Initialized 635 seed sentencepieces\n", + "trainer_interface.cc(597) LOG(INFO) Tokenizing input sentences with whitespace: 50\n", + "trainer_interface.cc(608) LOG(INFO) Done! 328\n", + "unigram_model_trainer.cc(564) LOG(INFO) Using 328 sentences for EM training\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=0 size=411 obj=14.1573 num_tokens=965 num_tokens/piece=2.34793\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=1 size=365 obj=13.6029 num_tokens=976 num_tokens/piece=2.67397\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=0 size=275 obj=14.1354 num_tokens=1061 num_tokens/piece=3.85818\n", + "unigram_model_trainer.cc(580) LOG(INFO) EM sub_iter=1 size=274 obj=13.813 num_tokens=1061 num_tokens/piece=3.87226\n", + "trainer_interface.cc(686) LOG(INFO) Saving model: ./results/add_train/save/250_unigram.model\n", + "trainer_interface.cc(698) LOG(INFO) Saving vocabs: ./results/add_train/save/250_unigram.vocab\n", + "speechbrain.tokenizers.SentencePiece - ==== Loading Tokenizer ===\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer path: ./results/add_train/save/250_unigram.model\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer vocab_size: 250\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer type: unigram\n", + "speechbrain.core - Info: device arg from hparam file is used\n", + "speechbrain.core - Info: precision arg from hparam file is used\n", + "speechbrain.core - Gradscaler enabled: False. Using precision: fp32.\n", + "speechbrain.core - 46.3M trainable parameters in ASR\n", + "speechbrain.utils.checkpoints - Would load a checkpoint here, but none found yet.\n", + "speechbrain.utils.epoch_loop - Going into epoch 1\n", + "100% 12/12 [00:12<00:00, 1.06s/it, train_loss=5.56]\n", + "100% 13/13 [00:10<00:00, 1.24it/s]\n", + "speechbrain.utils.train_logger - epoch: 1, lr: 1.00e-02 - train loss: 5.56 - valid loss: 5.34, valid CER: 1.18e+02, valid WER: 2.27e+02\n", + "speechbrain.core - Beginning experiment!\n", + "speechbrain.core - Experiment folder: ./results/evaluation\n", + "common_voice_prepare - ./results/evaluation/save/train.csv already exists, skipping data preparation!\n", + "common_voice_prepare - ./results/evaluation/save/dev.csv already exists, skipping data preparation!\n", + "common_voice_prepare - ./results/evaluation/save/test.csv already exists, skipping data preparation!\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer is already trained.\n", + "speechbrain.tokenizers.SentencePiece - ==== Loading Tokenizer ===\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer path: ./results/evaluation/save/250_unigram.model\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer vocab_size: 250\n", + "speechbrain.tokenizers.SentencePiece - Tokenizer type: unigram\n", + "speechbrain.core - Info: device arg from hparam file is used\n", + "speechbrain.core - Info: precision arg from hparam file is used\n", + "speechbrain.core - Gradscaler enabled: False. Using precision: fp32.\n", + "speechbrain.core - 46.3M trainable parameters in ASR\n", + "100% 13/13 [00:12<00:00, 1.03it/s]\n", + "speechbrain.utils.train_logger - Epoch loaded: 0 - test loss: 0.00e+00, test CER: 1.00e+02, test WER: 1.00e+02\n", + "INFO flwr 2024-02-27 23:19:43,732 | server.py:125 | fit progress: (1, 0.0, {'accuracy': 100.0}, 94.99289715399999)\n", + "flwr - fit progress: (1, 0.0, {'accuracy': 100.0}, 94.99289715399999)\n", + "INFO flwr 2024-02-27 23:19:43,733 | client_manager.py:196 | Sampling failed: number of available clients (1) is less than number of requested clients (2).\n", + "flwr - Sampling failed: number of available clients (1) is less than number of requested clients (2).\n", + "INFO flwr 2024-02-27 23:19:43,733 | server.py:171 | evaluate_round 1: no clients selected, cancel\n", + "flwr - evaluate_round 1: no clients selected, cancel\n", + "INFO flwr 2024-02-27 23:19:43,733 | server.py:153 | FL finished in 94.99359605999996\n", + "flwr - FL finished in 94.99359605999996\n", + "INFO flwr 2024-02-27 23:19:43,743 | app.py:226 | app_fit: losses_distributed []\n", + "flwr - app_fit: losses_distributed []\n", + "INFO flwr 2024-02-27 23:19:43,743 | app.py:227 | app_fit: metrics_distributed_fit {}\n", + "flwr - app_fit: metrics_distributed_fit {}\n", + "INFO flwr 2024-02-27 23:19:43,743 | app.py:228 | app_fit: metrics_distributed {}\n", + "flwr - app_fit: metrics_distributed {}\n", + "INFO flwr 2024-02-27 23:19:43,743 | app.py:229 | app_fit: losses_centralized [(0, 0.0), (1, 0.0)]\n", + "flwr - app_fit: losses_centralized [(0, 0.0), (1, 0.0)]\n", + "INFO flwr 2024-02-27 23:19:43,743 | app.py:230 | app_fit: metrics_centralized {'accuracy': [(0, 100.0), (1, 100.0)]}\n", + "flwr - app_fit: metrics_centralized {'accuracy': [(0, 100.0), (1, 100.0)]}\n", + "DEBUG flwr 2024-02-27 23:19:43,792 | connection.py:220 | gRPC channel closed\n", + "INFO flwr 2024-02-27 23:19:43,793 | app.py:398 | Disconnect and shut down\n", + "flwr - Disconnect and shut down\n" + ] + } + ], + "source": [ + "%killbgscripts\n", + "!((./server.sh & sleep 5s); ./clients.sh)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IUDpDzQsCkKI" + }, + "source": [ + "As Colab only allow one cell to be run at a time, logs from both the server and all the clients will be blended together in the cell output. Here are a few tips on reading the log and dealing with the environment:\n", + "\n", + "* At the start, the clients first load the data, and you will see `common_voice_prepare - Preparing CSV files for ... samples`. The statistic information of loading data will be showed. Then, the following lines are about trianing tokenizer. Afterwards, you'll see the expected training or evaluation progressbar in the log.\n", + "* To see the evaluation WER, look for the `speechbrain.utils.train_logger - Epoch loaded: 0 - test loss: ..., test CER: ..., test WER: ...`. To see the training WER and loss, look for the line `speechbrain.utils.train_logger - epoch: ..., lr: ... - train loss: ... - valid loss: ..., valid CER: ..., valid WER: ...`.\n", + "* To terminate the experiment early, press the stop icon next to the left of the cell. The stop icon is equivalent to `Ctrl+C` in a terminal, so you might have to press it multiple times to terminate quicker; if you get a pop-up saying that the environment became unresponsive, press `Cancel` rather than `Terminate`, as it should come back within a few seconds and you will not lose your progress.\n", + "\n", + "We can find that the results are horrible. This is because we didn't leverage a pre-trained model for initailisation and only trained on little toy dataset. Don't worry about results. You get acceptable results by running on real dataset." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [ + { + "file_id": "17tKZMghjFF0ZqHnDGty26Yn1RXW67DrX", + "timestamp": 1635935282341 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/advanced/hyperparameter-optimization.ipynb b/docs/tutorials/advanced/hyperparameter-optimization.ipynb new file mode 100644 index 0000000000..cc7ef9ece2 --- /dev/null +++ b/docs/tutorials/advanced/hyperparameter-optimization.ipynb @@ -0,0 +1,584 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/hyperparameter-optimization.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/hyperparameter-optimization.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GxuFVVbnJb8d" + }, + "source": [ + "# Hyperparameter Optimization\n", + "\n", + "Many of the speech processing tasks implemented as part of the SpeechBrain project rely on the careful selection of hyperparameters, such as:\n", + "\n", + "* The number of layers\n", + "* Normalization\n", + "* Hidden layer dimensions\n", + "* Weights within cost functions\n", + "* etc\n", + "\n", + "Selecting such hyperparameters by hand can be tedious. This tutorial will show how to use the automated hyperparameter optimization techniques implemented as part of the [Oríon](https://github.com/Epistimio/orion) project to automatically fit hyperparameters in a systematic way." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JnRSS3jGxVsk" + }, + "source": [ + "## Prerequisites\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "C0crB6tRbRFN" + }, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "m2X6fF7dbCRz" + }, + "outputs": [], + "source": [ + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vxr8nZ7bbP15" + }, + "source": [ + "### Install SpeechBrain\n", + "\n", + "SpeechBrain can be downloaded from the GitHub repository listed below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "KSOTkqPsJXxZ" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BTlxRL6aNtCp" + }, + "source": [ + "### Dependency Fixes\n", + "\n", + "PyYAML 6.0 is not backwards-compatible, a 5.x version is needed to support HyperPyYAML" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "EX8eL65Q6B4D" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install pyyaml==5.4.1" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iakT5Lq3xvSU" + }, + "source": [ + "### Install Oríon\n", + "Oríon can be installed using `pip` or `conda`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "09WKjQpoyHxl" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install orion[profet]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "CowQ-8Ia7pWo" + }, + "outputs": [], + "source": [ + "from speechbrain.utils import hpopt as hp" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hEcF9W_cyxpv" + }, + "source": [ + "## Update the Recipe to Support Hyperparameter Optimization\n", + "\n", + "SpeechBrain comes with a convenience wrapper called `hpopt`, which is capable of reporting objective values to Orion or to other tools.\n", + "\n", + "For a complete example on how to implement it,\n", + "\n", + "1. Add the following import statement to the top of your recipe:\n", + "\n", + "```python\n", + "from speechbrain.utils import hpopt as hp\n", + "```\n", + "\n", + "2. Wrap the main code of your recipe in a hyperparameter optimization context. Set `objective_key` to the metric that Orion will optimize.\n", + "\n", + " **Before**:\n", + "\n", + " ```python\n", + " hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])\n", + " \n", + " with open(hparams_file) as fin:\n", + " hparams = load_hyperpyyaml(fin, overrides)\n", + " \n", + " ## ...\n", + " \n", + " spk_id_brain = SpkIdBrain(\n", + " modules=hparams[\"modules\"],\n", + " opt_class=hparams[\"opt_class\"],\n", + " hparams=hparams,\n", + " run_opts=run_opts,\n", + " checkpointer=hparams[\"checkpointer\"],\n", + " )\n", + " \n", + " # The `fit()` method iterates the training loop, calling the methods\n", + " # necessary to update the parameters of the model. Since all objects\n", + " # with changing state are managed by the Checkpointer, training can be\n", + " # stopped at any point, and will be resumed on next call.\n", + " spk_id_brain.fit(\n", + " epoch_counter=spk_id_brain.hparams.epoch_counter,\n", + " train_set=datasets[\"train\"],\n", + " valid_set=datasets[\"valid\"],\n", + " train_loader_kwargs=hparams[\"dataloader_options\"],\n", + " valid_loader_kwargs=hparams[\"dataloader_options\"],\n", + " )\n", + "\n", + " ```\n", + "\n", + " **After**:\n", + "\n", + " ```python\n", + " with hp.hyperparameter_optimization(objective_key=\"error\") as hp_ctx: # <-- Initialize the context\n", + " hparams_file, run_opts, overrides = hp_ctx.parse_arguments(sys.argv[1:]) # <-- Replace sb with hp_ctx\n", + "\n", + " with open(hparams_file) as fin:\n", + " hparams = load_hyperpyyaml(fin, overrides)\n", + "\n", + " ## ...\n", + "\n", + " spk_id_brain = SpkIdBrain(\n", + " modules=hparams[\"modules\"],\n", + " opt_class=hparams[\"opt_class\"],\n", + " hparams=hparams,\n", + " run_opts=run_opts,\n", + " checkpointer=hparams[\"checkpointer\"],\n", + " )\n", + "\n", + " # The `fit()` method iterates the training loop, calling the methods\n", + " # necessary to update the parameters of the model. Since all objects\n", + " # with changing state are managed by the Checkpointer, training can be\n", + " # stopped at any point, and will be resumed on next call.\n", + " spk_id_brain.fit(\n", + " epoch_counter=spk_id_brain.hparams.epoch_counter,\n", + " train_set=datasets[\"train\"],\n", + " valid_set=datasets[\"valid\"],\n", + " train_loader_kwargs=hparams[\"dataloader_options\"],\n", + " valid_loader_kwargs=hparams[\"dataloader_options\"],\n", + " )\n", + " ```\n", + "\n", + "3. Add code to report the stats\n", + "\n", + " e.g. in `on_stage_end` when `stage == sb.Stage.VALID`\n", + "\n", + " ```python\n", + "hp.report_result(stage_stats)\n", + "```\n", + "\n", + " The **last** result reported through this function will be reported for hyperparameter optimization.\n", + "\n", + " The key specified in **objective_key** parameter needs to be present in the dictionary passed to `report_result`.\n", + "\n", + "4. Add the following lines in your main hyperparameter file `train.yaml`:\n", + "```yaml\n", + "hpopt_mode: null\n", + "hpopt: null\n", + "```\n", + "\n", + "5. **Optional**: Create a separate YAML file overriding any hyperparameters to be used during hyperparameter optimization that are **different** from the ones used during regular training **other than** the ones being fitted. A typical approach would reduce the number of epochs and the number of training samples.\n", + "\n", + " This step can be omitted if the number of parameters being overridden is small. In this case, they can be passed on the command line instead.\n", + "\n", + " Example:\n", + "\n", + " `hpopt.yaml`:\n", + " ```yaml\n", + " number_of_epochs: 1\n", + " ckpt_enable: false\n", + " ```\n", + "6. ❗ **Important**: Most recipes use a checkpointer to save snapshots of the model after each epoch (or on a custom schedule) to ensure that training can be resumed if it is interrupted. During hyperparameter optimization, this can cause issues because if the model's architecture (e.g. the number of layers, neurons per layer, etc) changes from one set of hyperparamter values to the next, an attempt to restore a checkpoint will fail.\n", + "\n", + " One possible solution is to make the run of the checkpointer conditional and to disable it in `hpopt.yaml`\n", + "\n", + " __Before__:\n", + " ```python\n", + " self.checkpointer.save_and_keep_only(meta=stats, min_keys=[\"error\"])\n", + " ```\n", + " __After__:\n", + " ```python\n", + " if self.hparams.ckpt_enable:\n", + " self.checkpointer.save_and_keep_only(meta=stats, min_keys=[\"error\"])\n", + " ```\n", + "\n", + " An alternative strategy is to reconfigure the checkpointer to save each run in a separate directory. For this scenario, the hyperparameter optimization wrapper can supply a variable named trial_id, which can be interpolated into the output path.\n", + "\n", + " Given below is an example of this strategy:\n", + "\n", + " `hpopt.yaml`:\n", + "\n", + " ```yaml\n", + " number_of_epochs: 1\n", + " ckpt_enable: False\n", + " trial_id: hpopt\n", + " output_folder: !ref ./results/speaker_id/\n", + " ```\n", + "\n", + " `train.yaml`:\n", + "\n", + " ```yaml\n", + " # ...\n", + " save_folder: !ref /save\n", + " # ...\n", + " checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer\n", + " checkpoints_dir: !ref #<-- will contain trial_id\n", + " recoverables:\n", + " embedding_model: !ref \n", + " classifier: !ref \n", + " normalizer: !ref \n", + " counter: !ref \n", + " ```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TwbiUJLWSICU" + }, + "source": [ + "## Perform the Hyperparameter Search\n", + "\n", + "### Choose and Prepare Hyperparameters\n", + "\n", + "Choose the hyperparameters that you would like to optimize using Orion out of the\n", + "ones available in your hyperparameter file. The hyperparameters need to be\n", + "available at the top level in order for it to be fitted using this technique.\n", + "\n", + "Consider the following sample file:\n", + "\n", + "```yaml\n", + "dropout: 0.1\n", + "n_mels: 80\n", + "encoder: !new:speechbrain.lobes.models.mymodel.MyModel\n", + " input_shape: [null, null, !ref ]\n", + " dropout: !ref \n", + " cnn_blocks: 3\n", + "```\n", + "\n", + "In the above file, `n_mels` and `dropout` are available for optimization, but `cnn_blocks` is not.\n", + "\n", + "To make `cnn_blocks` available for optimization, modify it as follows:\n", + "\n", + "```yaml\n", + "dropout: 0.1\n", + "n_mels: 80\n", + "cnn_blocks: 3 # <-- Define at the top level\n", + "encoder: !new:speechbrain.lobes.models.mymodel.MyModel\n", + " input_shape: [null, null, !ref ]\n", + " dropout: !ref \n", + " cnn_blocks: !ref # <-- Introduce a reference\n", + "```\n", + "\n", + "### Configure Orion\n", + "Create a `.yaml` file with the configuration for the Orion algorithm to be used.\n", + "\n", + "Given below is an example:\n", + "```yaml\n", + "experiment:\n", + " max_trials: 1000\n", + " max_broken: 1000\n", + " algorithms:\n", + " tpe:\n", + " seed: 42\n", + " n_initial_points: 5\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "E_kGgrp_Z35H" + }, + "outputs": [], + "source": [ + "config_file_content = \"\"\"\n", + "experiment:\n", + " max_trials: 3\n", + " max_broken: 1\n", + " algorithms:\n", + " tpe:\n", + " seed: 42\n", + " n_initial_points: 5\n", + "\"\"\"\n", + "config_path = os.path.expanduser(\"~/config\")\n", + "if not os.path.exists(config_path):\n", + " os.mkdir(config_path)\n", + "\n", + "config_file_path = os.path.join(config_path, \"orion-speaker-id.yaml\")\n", + "with open(config_file_path, \"w\") as config_file:\n", + " print(config_file_content, file=config_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5Kt-blyWadDV" + }, + "source": [ + "For more information on the available algorithms, please take a look at the [Orion Repository](https://github.com/Epistimio/orion/tree/develop/src/orion/algo)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xVyU61qXaCox" + }, + "source": [ + "### Define the Search Space\n", + "Write a shell script calling Orion defining a search space\n", + "\n", + "Example:\n", + "```sh\n", + "#!/bin/bash\n", + "HPOPT_EXPERIMENT_NAME=speaker-id\n", + "HPOPT_CONFIG_FILE=$HOME/config/orion-speaker-id.yaml\n", + "orion hunt -n $HPOPT_EXPERIMENT_NAME -c $HPOPT_CONFIG_FILE python train.py hparams/$HPARAMS \\\n", + " --hpopt hpopt.yaml \\\n", + " --hpopt_mode orion \\\n", + " --emb_dim~\"choices([128,256,512,768,1024])\" \\\n", + " --tdnn_channels~\"choices([128,256,512,768,1024])\"\n", + "```\n", + "\n", + "Replace `--hpopt hpopt.yaml` with `--hpopt=True` if you are not using the additional `hpopt.yaml` file." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xCw_DZYOZ7r0" + }, + "source": [ + "Consider running the standalone example below" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lJup9mNnYw_0" + }, + "outputs": [], + "source": [ + "%env PYTHONPATH=/env/python:/content/speechbrain/\n", + "%cd /content/speechbrain/templates/hyperparameter_optimization_speaker_id\n", + "!orion hunt -n speaker-id -c $HOME/config/orion-speaker-id.yaml python train.py train.yaml \\\n", + " --hpopt hpopt.yaml \\\n", + " --hpopt_mode orion \\\n", + " --emb_dim~\"choices([128,256,512,768,1024])\" \\\n", + " --tdnn_channels~\"choices([128,256,512,768,1024])\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FntqoHyK8Llt" + }, + "source": [ + "## Inspecting Results\n", + "\n", + "Use the `orion info` command to inspect the results of hyperparameter fitting.\n", + "\n", + "The tool will output basic statistics about the hyperparameter fitting experiment, including the number of runs completed, the objective value for the best trial and the hyperparameter values corresponding to that run.\n", + "\n", + "In the example below, the best objective achieved value is shown under **evaluation**, and the corresponding hyperparameter values are shown under **params**.\n", + "\n", + "```\n", + "Stats\n", + "=====\n", + "completed: False\n", + "trials completed: 4\n", + "best trial:\n", + " id: c1a71e0988d70005302ab655d7e391d3\n", + " evaluation: 0.2384105920791626\n", + " params:\n", + " /emb_dim: 128\n", + " /tdnn_channels: 128\n", + "start time: 2021-11-14 21:01:12.760704\n", + "finish time: 2021-11-14 21:13:25.043336\n", + "duration: 0:12:12.282632\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lVVUBZgYBNQP" + }, + "outputs": [], + "source": [ + "!orion info --name speaker-id" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ys-hUnmgSsFf" + }, + "source": [ + "## Hyperparameter Optimization at Scale\n", + "\n", + "### Multiple GPUs\n", + "Since Orion simply wraps the execution of the training script and launches it for each set of hyperparameters using the OS shell, training scripts that support Data-Parallel (DP) or Distributed Data Parallel (DDP) execution can be used with hyperparameter fitting without modification.\n", + "\n", + "For information on how to set up DP/DDP experiments, refer to the [SpeechBrain documentation](https://speechbrain.readthedocs.io/en/latest/multigpu.html#) and the [Multi-GPU Considerations](https://speechbrain.readthedocs.io/en/latest/multigpu.html) tutorial.\n", + "\n", + "### Parallel or Distributed Oríon\n", + "\n", + "Oríon itself provide support for parallel and distributed hyperparameter fitting.\n", + "\n", + "To use multiple parallel workers on a single node, pass the `--n-workers` parameter to the Oríon CLI.\n", + "\n", + "The example below will start the experiment with three workers:\n", + "```shell\n", + "orion hunt -n $HPOPT_EXPERIMENT_NAME -c $HOPT_CONFIG_FILE --n-workers 3 python train.py hparams/$HPARAMS \\\n", + " --hpopt hpopt.yaml \\\n", + " --hpopt_mode orion \\\n", + " --emb_dim~\"choices([128,256,512,768,1024])\" \\\n", + " --tdnn_channels~\"choices([128,256,512,768,1024])\"\n", + "\n", + "```\n", + "\n", + "For more advanced scenarios, including distributed hyperparameter fittig on multiple nodes, refer to the [Parallel Workers](https://orion.readthedocs.io/en/stable/user/parallel.html]) page in Oríon's official documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [ + { + "file_id": "14Lh3BPve730S8NhbxypYeTrq16R32Xp5", + "timestamp": 1636476890780 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/advanced/inferring-on-your-own-speechbrain-models.ipynb b/docs/tutorials/advanced/inferring-on-your-own-speechbrain-models.ipynb new file mode 100644 index 0000000000..50a7e3f135 --- /dev/null +++ b/docs/tutorials/advanced/inferring-on-your-own-speechbrain-models.ipynb @@ -0,0 +1,586 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/inferring-on-your-own-speechbrain-models.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/inferring-on-your-own-speechbrain-models.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4RTi_EwOXzfb" + }, + "source": [ + "# Inferring on your trained SpeechBrain model\n", + "\n", + "In this tutorial, we will learn the different ways of inferring on a trained model. Please understand that this is not related to loading pretrained models for further training or transfer learning. If interested in these topics, refer to the corresponding [tutorial](https://colab.research.google.com/drive/1LN7R3U3xneDgDRK2gC5MzGkLysCWxuC3?usp=sharing).\n", + "\n", + "## Prerequisites\n", + "- [SpeechBrain Introduction](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/introduction-to-speechbrain.html)\n", + "- [YAML tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html)\n", + "- [Brain Class tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html)\n", + "- [Pretraining tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.html\n", + ")\n", + "\n", + "## Context\n", + "\n", + "In this example, we will consider a user that would like to use a custom pretrained speech recognizer **that has been trained by him** to transcribe some audio files. If you are interested in using online-available pretrained models, please refer to the [Pretraining tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.html\n", + "). The following can be extended to any SpeechBrain supported task as we provide an homogeneous way of dealing with all of them.\n", + "\n", + "## Different options available\n", + "\n", + "At this point, three options are available to you:\n", + "1. Define a custom python function in your ASR class (extended from Brain). This introduces strong coupling between the training recipe and your transcripts. It is pretty convenient for prototyping and obtaining simple transcripts on your datasets. However, it is not recommended for deployment.\n", + "2. Use already available Interfaces (such as `EncoderDecoderASR`, introduction in the [pretraining tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.html\n", + ")). This is probably the most elegant and convenient way. However, your model should be compliant with some constraints to fit the proposed interface.\n", + "3. Build your own Interface perfectly fitting to your custom ASR model.\n", + "\n", + "**Important: All these solutions also apply to other tasks (speaker recognition, source separation ...)**\n", + "\n", + "### 1. Custom function in the training script\n", + "The goal of this approach is to enable the user to call a function at the end of `train.py` that transcribes a given dataset:\n", + "\n", + "```python\n", + " # Trainer initialization\n", + " asr_brain = ASR(\n", + " modules=hparams[\"modules\"],\n", + " opt_class=hparams[\"opt_class\"],\n", + " hparams=hparams,\n", + " run_opts=run_opts,\n", + " checkpointer=hparams[\"checkpointer\"],\n", + " )\n", + "\n", + " # Training\n", + " asr_brain.fit(\n", + " asr_brain.hparams.epoch_counter,\n", + " datasets[\"train\"],\n", + " datasets[\"valid\"],\n", + " train_loader_kwargs=hparams[\"train_dataloader_opts\"],\n", + " valid_loader_kwargs=hparams[\"valid_dataloader_opts\"],\n", + " )\n", + "\n", + " # Load best checkpoint for evaluation\n", + " test_stats = asr_brain.evaluate(\n", + " test_set=datasets[\"test\"],\n", + " min_key=\"WER\",\n", + " test_loader_kwargs=hparams[\"test_dataloader_opts\"],\n", + " )\n", + "\n", + " # Load best checkpoint for transcription !!!!!!\n", + " # You need to create this function w.r.t your system architecture !!!!!!\n", + " transcripts = asr_brain.transcribe_dataset(\n", + " dataset=datasets[\"your_dataset\"], # Must be obtained from the dataio_function\n", + " min_key=\"WER\", # We load the model with the lowest WER\n", + " loader_kwargs=hparams[\"transcribe_dataloader_opts\"], # opts for the dataloading\n", + " )\n", + "```\n", + "\n", + "As you can see, there exists a strong coupling with the training recipe due to the need for an instantiated Brain class.\n", + "\n", + "**Note 1:** You can remove the `.fit()` and `.evaluate()` if you don't want to call them. This is just an example to better highlight how to use it.\n", + "\n", + "**Note 2:** Here, the `.transcribe_dataset()` function takes a `dataset` object to transcribe. You could also simply use a path instead. It is **completely** up to you to implement this function as you wish.\n", + "\n", + "Now: what to put in this function? Here, we will give an example based on the template, but you will need to adapt it to **your** system.\n", + "\n", + "```python\n", + "\n", + "def transcribe_dataset(\n", + " self,\n", + " dataset, # Must be obtained from the dataio_function\n", + " min_key, # We load the model with the lowest WER\n", + " loader_kwargs # opts for the dataloading\n", + " ):\n", + " \n", + " # If dataset isn't a Dataloader, we create it.\n", + " if not isinstance(dataset, DataLoader):\n", + " loader_kwargs[\"ckpt_prefix\"] = None\n", + " dataset = self.make_dataloader(\n", + " dataset, Stage.TEST, **loader_kwargs\n", + " )\n", + " \n", + " \n", + " self.on_evaluate_start(min_key=min_key) # We call the on_evaluate_start that will load the best model\n", + " self.modules.eval() # We set the model to eval mode (remove dropout etc)\n", + "\n", + " # Now we iterate over the dataset and we simply compute_forward and decode\n", + " with torch.no_grad():\n", + "\n", + " transcripts = []\n", + " for batch in tqdm(dataset, dynamic_ncols=True):\n", + " \n", + " # Make sure that your compute_forward returns the predictions !!!\n", + " # In the case of the template, when stage = TEST, a beam search is applied\n", + " # in compute_forward().\n", + " out = self.compute_forward(batch, stage=sb.Stage.TEST)\n", + " p_seq, wav_lens, predicted_tokens = out\n", + " \n", + " # We go from tokens to words.\n", + " predicted_words = self.tokenizer(\n", + " predicted_tokens, task=\"decode_from_list\"\n", + " )\n", + " transcripts.append(predicted_words)\n", + " \n", + " return transcripts\n", + "```\n", + "\n", + "The pipeline is simple: load the model -> do compute_forward -> detokenize.\n", + "\n", + "### 2. Using the `EndoderDecoderASR` interface\n", + "\n", + "The [EncoderDecoderASR class](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/inference/ASR.py). interface allows you to decouple your trained model from the training recipe and to infer (or encode) on any new audio file in few lines of code. If you are not interested in ASR, you'll find many other interfaces to fit your purpose in the `interfaces.py` file. This solution must be preferred if you intend to deploy your model in a production fashion i.e. if you plan to use your model a lot and in a stable way. Of course, this will require you to slightly rework the yaml.\n", + "\n", + "The class has the following methods:\n", + "\n", + "- *encode_batch*: apply the encoder to an input batch and returns some encoded features.\n", + "- *transcribe_file*: transcribes the single audio file in input.\n", + "- *transcribe_batch*: transcribes the input batch.\n", + "\n", + "In fact, if you fulfill few constraints that we will detail in the next paragraph, you can simply do:\n", + "\n", + "```python\n", + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "\n", + "asr_model = EncoderDecoderASR.from_hparams(source=\"your_folder\", hparams_file='your_file.yaml', savedir=\"pretrained_model\")\n", + "asr_model.transcribe_file('your_file.wav')\n", + "```\n", + "\n", + "Nevertheless, to allow such a generalization over all the possible EncoderDecoder ASR pipelines, you will have to consider a few constraints when deploying your system:\n", + "\n", + "1. **Necessary modules.** As you can see in the `EncoderDecoderASR` class, the modules defined in your yaml file MUST contain certain elements with specific names. In practice, you need a tokenizer, a decoder, and a decoder. The encoder can simply be a `speechbrain.nnet.containers.LengthsCapableSequential` composed with a sequence of features computation, normalization and model encoding.\n", + "```python\n", + " HPARAMS_NEEDED = [\"tokenizer\"]\n", + " MODULES_NEEDED = [\n", + " \"encoder\",\n", + " \"decoder\",\n", + " ]\n", + "```\n", + "\n", + "You also need to declare these entities in the YAML file and create the following dictionary called `modules`:\n", + "\n", + "```yaml\n", + "encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential\n", + " input_shape: [null, null, !ref ]\n", + " compute_features: !ref \n", + " normalize: !ref \n", + " model: !ref \n", + "\n", + "ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer\n", + " eos_index: !ref \n", + " blank_index: !ref \n", + " ctc_fc: !ref \n", + "\n", + "coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer\n", + " vocab_size: !ref \n", + "\n", + "rnnlm_scorer: !new:speechbrain.decoders.scorer.RNNLMScorer\n", + " language_model: !ref \n", + " temperature: !ref \n", + "\n", + "scorer: !new:speechbrain.decoders.scorer.ScorerBuilder\n", + " scorer_beam_scale: 1.5\n", + " full_scorers: [\n", + " !ref ,\n", + " !ref ]\n", + " partial_scorers: [!ref ]\n", + " weights:\n", + " rnnlm: !ref \n", + " coverage: !ref \n", + " ctc: !ref \n", + "\n", + "decoder: !new:speechbrain.decoders.S2SRNNBeamSearcher\n", + " embedding: !ref \n", + " decoder: !ref \n", + " linear: !ref \n", + " bos_index: !ref \n", + " eos_index: !ref \n", + " min_decode_ratio: !ref \n", + " max_decode_ratio: !ref \n", + " beam_size: !ref \n", + " eos_threshold: !ref \n", + " using_max_attn_shift: !ref \n", + " max_attn_shift: !ref \n", + " temperature: !ref \n", + " scorer: !ref \n", + "\n", + "modules:\n", + " encoder: !ref \n", + " decoder: !ref \n", + " lm_model: !ref \n", + "```\n", + "\n", + "In this case, `enc` is a CRDNN, but could be any custom neural network for instance.\n", + "\n", + " **Why do you need to ensure this?** Well, it simply is because these are the modules we call when inferring on the `EncoderDecoderASR` class. Here is an example of the `encode_batch()` function.\n", + "```python\n", + "[...]\n", + " wavs = wavs.float()\n", + " wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)\n", + " encoder_out = self.modules.encoder(wavs, wav_lens)\n", + "return encoder_out\n", + "```\n", + " **What if I have a complex asr_encoder structure with multiple deep neural networks and stuffs ?** Simply put everything in a torch.nn.ModuleList in your yaml:\n", + "```yaml\n", + "asr_encoder: !new:torch.nn.ModuleList\n", + " - [!ref , my_different_blocks ... ]\n", + "```\n", + "\n", + "2. **Call to the pretrainer to load the checkpoints.** Finally, you need to define a call to the pretrainer that will load the different checkpoints of your trained model into the corresponding SpeechBrain modules. In short, it will load the weights of your encoder, language model or even simply load the tokenizer.\n", + "```yaml\n", + "pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer\n", + " loadables:\n", + " asr: !ref \n", + " lm: !ref \n", + " tokenizer: !ref \n", + " paths:\n", + " asr: !ref \n", + " lm: !ref \n", + " tokenizer: !ref \n", + "```\n", + "The loadable field creates a link between a file (e.g. `lm` that is related to the checkpoint in ``) to a yaml instance (e.g. ``) that is nothing more than your lm.\n", + "\n", + "If you respect these two constraints, it should works! Here, we give a complete example of a yaml that is used for inference only:\n", + "\n", + "```yaml\n", + "\n", + "# ############################################################################\n", + "# Model: E2E ASR with attention-based ASR\n", + "# Encoder: CRDNN model\n", + "# Decoder: GRU + beamsearch + RNNLM\n", + "# Tokens: BPE with unigram\n", + "# Authors: Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, Peter Plantinga 2020\n", + "# ############################################################################\n", + "\n", + "\n", + "# Feature parameters\n", + "sample_rate: 16000\n", + "n_fft: 400\n", + "n_mels: 40\n", + "\n", + "# Model parameters\n", + "activation: !name:torch.nn.LeakyReLU\n", + "dropout: 0.15\n", + "cnn_blocks: 2\n", + "cnn_channels: (128, 256)\n", + "inter_layer_pooling_size: (2, 2)\n", + "cnn_kernelsize: (3, 3)\n", + "time_pooling_size: 4\n", + "rnn_class: !name:speechbrain.nnet.RNN.LSTM\n", + "rnn_layers: 4\n", + "rnn_neurons: 1024\n", + "rnn_bidirectional: True\n", + "dnn_blocks: 2\n", + "dnn_neurons: 512\n", + "emb_size: 128\n", + "dec_neurons: 1024\n", + "output_neurons: 1000 # index(blank/eos/bos) = 0\n", + "blank_index: 0\n", + "\n", + "# Decoding parameters\n", + "bos_index: 0\n", + "eos_index: 0\n", + "min_decode_ratio: 0.0\n", + "max_decode_ratio: 1.0\n", + "beam_size: 80\n", + "eos_threshold: 1.5\n", + "using_max_attn_shift: True\n", + "max_attn_shift: 240\n", + "lm_weight: 0.50\n", + "coverage_penalty: 1.5\n", + "temperature: 1.25\n", + "temperature_lm: 1.25\n", + "\n", + "normalize: !new:speechbrain.processing.features.InputNormalization\n", + " norm_type: global\n", + "\n", + "compute_features: !new:speechbrain.lobes.features.Fbank\n", + " sample_rate: !ref \n", + " n_fft: !ref \n", + " n_mels: !ref \n", + "\n", + "enc: !new:speechbrain.lobes.models.CRDNN.CRDNN\n", + " input_shape: [null, null, !ref ]\n", + " activation: !ref \n", + " dropout: !ref \n", + " cnn_blocks: !ref \n", + " cnn_channels: !ref \n", + " cnn_kernelsize: !ref \n", + " inter_layer_pooling_size: !ref \n", + " time_pooling: True\n", + " using_2d_pooling: False\n", + " time_pooling_size: !ref \n", + " rnn_class: !ref \n", + " rnn_layers: !ref \n", + " rnn_neurons: !ref \n", + " rnn_bidirectional: !ref \n", + " rnn_re_init: True\n", + " dnn_blocks: !ref \n", + " dnn_neurons: !ref \n", + "\n", + "emb: !new:speechbrain.nnet.embedding.Embedding\n", + " num_embeddings: !ref \n", + " embedding_dim: !ref \n", + "\n", + "dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder\n", + " enc_dim: !ref \n", + " input_size: !ref \n", + " rnn_type: gru\n", + " attn_type: location\n", + " hidden_size: !ref \n", + " attn_dim: 1024\n", + " num_layers: 1\n", + " scaling: 1.0\n", + " channels: 10\n", + " kernel_size: 100\n", + " re_init: True\n", + " dropout: !ref \n", + "\n", + "ctc_lin: !new:speechbrain.nnet.linear.Linear\n", + " input_size: !ref \n", + " n_neurons: !ref \n", + "\n", + "seq_lin: !new:speechbrain.nnet.linear.Linear\n", + " input_size: !ref \n", + " n_neurons: !ref \n", + "\n", + "log_softmax: !new:speechbrain.nnet.activations.Softmax\n", + " apply_log: True\n", + "\n", + "lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM\n", + " output_neurons: !ref \n", + " embedding_dim: !ref \n", + " activation: !name:torch.nn.LeakyReLU\n", + " dropout: 0.0\n", + " rnn_layers: 2\n", + " rnn_neurons: 2048\n", + " dnn_blocks: 1\n", + " dnn_neurons: 512\n", + " return_hidden: True # For inference\n", + "\n", + "tokenizer: !new:sentencepiece.SentencePieceProcessor\n", + "\n", + "asr_model: !new:torch.nn.ModuleList\n", + " - [!ref , !ref , !ref , !ref , !ref ]\n", + "\n", + "# We compose the inference (encoder) pipeline.\n", + "encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential\n", + " input_shape: [null, null, !ref ]\n", + " compute_features: !ref \n", + " normalize: !ref \n", + " model: !ref \n", + "\n", + "\n", + "ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer\n", + " eos_index: !ref \n", + " blank_index: !ref \n", + " ctc_fc: !ref \n", + "\n", + "coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer\n", + " vocab_size: !ref \n", + "\n", + "rnnlm_scorer: !new:speechbrain.decoders.scorer.RNNLMScorer\n", + " language_model: !ref \n", + " temperature: !ref \n", + "\n", + "scorer: !new:speechbrain.decoders.scorer.ScorerBuilder\n", + " scorer_beam_scale: 1.5\n", + " full_scorers: [\n", + " !ref ,\n", + " !ref ]\n", + " partial_scorers: [!ref ]\n", + " weights:\n", + " rnnlm: !ref \n", + " coverage: !ref \n", + " ctc: !ref \n", + "\n", + "decoder: !new:speechbrain.decoders.S2SRNNBeamSearcher\n", + " embedding: !ref \n", + " decoder: !ref \n", + " linear: !ref \n", + " bos_index: !ref \n", + " eos_index: !ref \n", + " min_decode_ratio: !ref \n", + " max_decode_ratio: !ref \n", + " beam_size: !ref \n", + " eos_threshold: !ref \n", + " using_max_attn_shift: !ref \n", + " max_attn_shift: !ref \n", + " temperature: !ref \n", + " scorer: !ref \n", + " \n", + "\n", + "modules:\n", + " encoder: !ref \n", + " decoder: !ref \n", + " lm_model: !ref \n", + "\n", + "pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer\n", + " loadables:\n", + " asr: !ref \n", + " lm: !ref \n", + " tokenizer: !ref \n", + "\n", + "\n", + "```\n", + "\n", + "As you can see, it is a standard YAMl file, but with a pretrainer that loads the model. It is similar to the yaml file used for training. We only have to remove all the parts that are training-specific (e.g, training parameters, optimizers, checkpointers, etc.) and add the pretrainer and `encoder`, `decoder` elements that links the needed modules with their pre-trained files.\n", + "\n", + "### 3. Developing your own inference interface\n", + "\n", + "While the `EncoderDecoderASR` class has been designed to be as generic as possible, your might require a more complex inference scheme that better fits your needs. In this case, you have to develop your own interface. To do so, follow these steps:\n", + "\n", + "1. Create your custom interface inheriting from `Pretrained` (code [in this file](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/inference/interfaces.py)):\n", + "\n", + "\n", + "```python\n", + "class MySuperTask(Pretrained):\n", + " # Here, do not hesitate to also add some required modules\n", + " # for further transparency.\n", + " HPARAMS_NEEDED = [\"mymodule1\", \"mymodule2\"]\n", + " MODULES_NEEDED = [\n", + " \"mytask_enc\",\n", + " \"my_searcher\",\n", + " ]\n", + " def __init__(self, *args, **kwargs):\n", + " super().__init__(*args, **kwargs)\n", + " # Do whatever is needed here w.r.t your system\n", + "```\n", + "\n", + "This will enable your class to call useful functions such as `.from_hparams()` that fetches and loads based on a HyperPyYAML file, `load_audio()` that loads a given audio file. Likely, most of the methods that we coded in the Pretrained class will fit your need. If not, you can override them to implement your custom functionality.\n", + "\n", + "\n", + "2. Develop your interface and the different functionalities. Unfortunately, we can't provide a generic enough example here. You can add **any** function to this class that you think can make inference on your data/model easier and natural. For instance, we can create here a function that simply encodes a wav file using the `mytask_enc` module.\n", + "```python\n", + "class MySuperTask(Pretrained):\n", + " # Here, do not hesitate to also add some required modules\n", + " # for further transparency.\n", + " HPARAMS_NEEDED = [\"mymodule1\", \"mymodule2\"]\n", + " MODULES_NEEDED = [\n", + " \"mytask_enc\",\n", + " \"my_searcher\",\n", + " ]\n", + " def __init__(self, *args, **kwargs):\n", + " super().__init__(*args, **kwargs)\n", + " # Do whatever is needed here w.r.t your system\n", + " \n", + " def encode_file(self, path):\n", + " waveform = self.load_audio(path)\n", + " # Fake a batch:\n", + " batch = waveform.unsqueeze(0)\n", + " rel_length = torch.tensor([1.0])\n", + " with torch.no_grad():\n", + " rel_lens = rel_length.to(self.device)\n", + " encoder_out = self.encode_batch(waveform, rel_lens)\n", + " \n", + " return encode_file\n", + "```\n", + "\n", + "Now, we can use your Interface in the following way:\n", + "```python\n", + "from speechbrain.inference.my_super_task import MySuperTask\n", + "\n", + "my_model = MySuperTask.from_hparams(source=\"your_local_folder\", hparams_file='your_file.yaml', savedir=\"pretrained_model\")\n", + "audio_file = 'your_file.wav'\n", + "encoded = my_model.encode_file(audio_file)\n", + "\n", + "```\n", + "\n", + "As you can see, this formalism is extremely flexible and enables you to create a holistic interface that can be used to do anything you want with your pretrained model.\n", + "\n", + "We provide different generic interfaces for E2E ASR, speaker recognition, source separation, speech enhancement, etc. Please have a look [here](https://github.com/speechbrain/speechbrain/tree/develop/speechbrain/inference) if interested!\n", + "\n", + "\n", + "## General Pretraining Inference\n", + "In some cases, users might want to develop their inference interface in an external file. This can be done using the [foreign class](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/inference/interfaces.py).\n", + "You can take a look at the example reported [here](https://huggingface.co/speechbrain/emotion-recognition-wav2vec2-IEMOCAP):\n", + "\n", + "\n", + "\n", + "```\n", + "from speechbrain.inference.interfaces import foreign_class\n", + "classifier = foreign_class(source=\"speechbrain/emotion-recognition-wav2vec2-IEMOCAP\", pymodule_file=\"custom_interface.py\", classname=\"CustomEncoderWav2vec2Classifier\")\n", + "out_prob, score, index, text_lab = classifier.classify_file(\"speechbrain/emotion-recognition-wav2vec2-IEMOCAP/anger.wav\")\n", + "print(text_lab)\n", + "```\n", + "\n", + "\n", + "\n", + "In this case, the inference interface is not a class written in `speechbrain.pretrained.interfaces`, but it is coded in an external file (`custom_interface.py`).\n", + "\n", + "This might be useful if the interface that you need is not available in `speechbrain.pretrained.interfaces`. If you want, you can add it there. If you use the foreign_class, however, we also give you the possibility to fetch the inference code from any other path.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/advanced/model-quantization.ipynb b/docs/tutorials/advanced/model-quantization.ipynb new file mode 100644 index 0000000000..2419b76b4e --- /dev/null +++ b/docs/tutorials/advanced/model-quantization.ipynb @@ -0,0 +1,1751 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/model-quantization.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/model-quantization.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0TAuTeJw1lfg" + }, + "source": [ + "# Applying Quantization to a Speech Recognition Model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7fAsr2SqINPz" + }, + "source": [ + "## Introduction to Quantization\n", + "\n", + "Quantization is often required for low latency applications of SpeechBrain automatic speech recognition models, such as real-time speech recognition.\n", + "\n", + "Quantization works by converting the weights and activations of a model from floating point values into lower resolution values, such as 8-bit integers. Not only does this reduce the memory footprint of the model, but it also reduces inference latency, because integer operations are typically faster than floating point operations.\n", + "\n", + "This conversion works by mapping values in a given range to the quantized range and \"clipping\" values to the closest value at the chosen resolution. In general, there are two main concepts associated with quantization: zero point and scale factor.\n", + "\n", + "- Zero point: the quantized value that 0 is mapped to during quantization.\n", + "\n", + "- Scale factor: the factor by which the data range is scaled to fit within the quantized range.\n", + "\n", + "Put together, the zero point and scale factor describe how the mapping works.\n", + "\n", + "In other words,\n", + "\n", + "$y = round\\left(\\frac{x}{S} + Z\\right)$\n", + "\n", + "where $x$ is the original value, $y$ is the quantized value, $S$ is the scale factor, and $Z$ is the zero point." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PE7F4x2QWCM-" + }, + "source": [ + "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAn0AAAF4CAIAAACuNNi0AAAgAElEQVR4AeydCXwM9/vHhx7OUlospYQUcVMqoYq0VaLRNpqqtLRptJWmrsZRzY9quqUILU0pIs5NXRvnBmHjCBuExJkgkrgXwSLYkuP5/78zszuzyebenZ3ZefY1r2R35jvf53nez8z3M9+Z78xQgB8kgASQABJAAkhAKAKUUIbQDhJAAkgACSABJACou7gRIAEkgASQABIQjgDqrnCs0RISQAJIAAkgAdRd3AaQABJAAkgACQhHAHVXONZoCQkgASSABJAA6i5uA0gACSABJIAEhCOAuisca7SEBJAAEkACSAB1F7cBJIAEkAASQALCEUDdFY41WkICSAAJIAEkgLqL2wASQAJIAAkgAeEIoO4KxxotIQEkgASQABJA3cVtAAkgASSABJCAcARQd4VjjZaQABJAAkgACaDu4jaABJAAEkACSEA4Aqi7wrFGS0gACSABJIAEUHdxG0ACSAAJIAEkIBwB1F3hWKMlJIAEkAASQAKou7gNIAEkgASQABIQjgDqrnCs0RISQAJIAAkgAdRd3AaQABJAAkgACQhHAHVXONZoCQkgASSABJAA6i5uA0gACSABJIAEhCOAuisca7SEBJAAEkACSAB1F7cBJIAEkAASQALCEUDdFY41WkICSAAJIAEkgLqL2wASQAJIAAkgAeEIoO4KxxotIQEkgASQABJA3cVtAAkgASSABJCAcARQd4VjjZYkRsCo10Up/dr5qa5IzPEi3c0xpMSE+bVTKBOKLIILkAASsDcB1F17E8b6RUdAn6AKC/TxbKegyMfF490AZZROn2Php/GUOnxKgAcp4OMcums8pVJOZiKiitTdBCXNxMofRTtPT9+gkEh10hWjBSnBfhTlm8LN01oGBfMLDSGBshJA3S0rMSwvZQKGpHBfF0rhpYxJMTDykWPMSIgI7qugXH3CEw2WselVvs6ju3RoRu0EoqlF6i5dyBAb0plW3pAYTmKN+hTNLD83cqyi8BynzrA8TLHkVtSvjIhIXVHLSjm/sG/GTK3Smz6EclfqsktZDRZDAo4kgLrrSPpoW1ACmZogd4pS+KkyC5nNyVANU1CUhzKBUxoA0E1xMt0FXWjJugugY7q9VuQ5U00YUpRimCqjEMUSZqRFeIVWVHet+5atC+lEezVZa5G/EhzCxUjAMQRQdx3DHa0KTiAjYgDRi6AtBTq1JkfSIrzIcj8172ourVJOcp6ZibOiugsAmSofujfsMTfJxK40//XqYQrKTroLkDSX6aIHo/CWJhlYxrEEUHcdyx+tC0TAEB1Ad4hCim6XDWr/gn0m1F2r6UmaxYhcgDrL6vLCM40pi/3IuWC76a4+ijkYcKqDpMIccY5zEEDddY48YhTFE2Cu1FLUhKJlFyBjJdN2c30ms+4aU1XB3uTipqKdX1ic3tKYQTcrwMOVdJbd+gYEzwoKjrIsoNeG+XsyV0bdhirVqdypUEOqVhXq5zZMpQdj0nwfF0rh9vXSv8lFZe5jPtnLdFXJAl8VZ6DoyhknDafUSn8PF9o7rwmq8FGkAnOdloGYfxV9npkpkhbhSTvIP3lgTDUZoigXd5+gyCTTiQW9ZpwnM4bNFJVZHQ26+UFezAA3hZunv1JT+BKA2Sn2i3XfdKG0BYWy0InsYk1kpWgig9kh69kpqgleJE2uXsFbCp1EN6SoQ5ksm4Jg//MtGlKign3cCWzK1Stovs5EoGAM+FvmBFB3Zb4ByCR8XQjdSnYu/tRoPFNKoUxksTC6Gx6l9PIOCpkVFjyUVk9K4RfFtctJcz3IxU5aTA3JEX4Kyoenu8YEpYd7kDqNLDam0RdHFZ7hyeRXUrRaNd2LyIWvSrcuOGgCM9g4SJNlSGJ6h1SQxnKgUEakj0co15oXXTnrf8a6ABfKIyia9s6o10ymB2hXXHeB5cn1X5PDPCjKZ0EKUZocvWYC6RB7RXKU4Ap9dtqiv2vQBCqoTsEa+iDCcCqcHPUoijkhwQRlTXczVXRvmvJazLNIihdrIlWjjlLSCfBRxWuCA4MjorXaaGaOV0Qay5D8y9Yp3SnFsAg6PAPbdy840D1DNczFa67OkAOQY9DNJRW7jNKg9PI44leWAOoubgoyIMA0+pSFIloJ23Snirk7SOuuwm9xirmLaogNsby5KEmpoIL4435jg/3MupulCVJ0DiMqy36MscHkAGBABKsPRi353ckrLJ62oE/SpTENNXM12rL1B4Pan3dqt8TK6QuxllKUFEaPPzIHaPKrwH9r2mZRhC1gPn9gPjHAlkoOI8I7hdf5LKy7BecYNIH0KLYSurwWvhkNGbqoEFo7FZ5TtAVFrmQTjFGXgEguxRmLSWeef/BEn1fn55HJDuW3jjvvkDTXg/JX8xzIiHiXPo6IM287FgTxh5wJoO7KOfuyib1Cums+KcrgMmroU7WmdplWMt9wuifEFNCpTM0xueioUFqMPmI98VOzLTatIh9ZGRvMXJDuPIu3dlqED2+8bkmVG7WTFYVvPqZHaFf4PLNpwLNZWTNW+ihcg7nOHRMm/3x4QQkEMOpCXBWeC1LMWyFzIr2UxwTsWV7yz80nVKWzeldxKUwUPGIAAPrwy2elueucofqo4LD2QtqsUyosZJgMhqeHjium8zJoDhW/yJsA6q688y+X6NnzoiU0gmx/l+vZWGmUAfTr/Ehrbzplqo8OoK+eegbM12Tw+jvmlpenENxXk7rQusvXJ3NGjNoQcj05WMueajbqQv34Jz+ZZp2rkfeNVM70pCn+1UdSNbOWybrZWIEvFn3KAsvon0XzZB/yRV9t5cdVWHd59RpSNeGBXvR10TIdExh1oeTsQ2luairKhJUUM5uBKb8AJt3ldcSZYVxcf5c9nOLlwPyVD4EXMn6VMwHUXTlnXz6xm8ZVBXJdssLBs4LKu8RopVE29YfMuksu1WZqwnwZ1XDxmaU1PfqKNvqu6ZRyYXtkTtG6C5ARSe5sYq+SZqkDLE5jllQ5qwT20V3TuKrgWN5JVAM9LqmdH3n4Vxp9NZcvOUXorj4uzK+dm1dguCbVoC3PvcUZ9LNNKI9QHc8VC9bFm7CS4oK6yyYiIJo7qqLPPPOuAtCrWNCwcAF/IAELAqi7Fjjwh7MSYO8jorixyoUiZZ/l1JnXgltplAGYa7S885BsTYY0Vn3N3S/6pC7vimwhk8XrLmSpyc1PnZQ6I2RE+oRYXiksoXJWd0N4l1iJeTqiMvUprTjN3kfEO0AxJig9FeRCOCtNjPUSdDdD7e9CuYcw46rK7xs7qMpisJvJ6ZJNWElxId0FoOtR+Kno8XGGUxF+CoXnfN4JZPpSusVFAZMH+B8JFCaAuluYCc5xSgJJYfSDlkzXZQvFyEiFIkjDuyfVSqMMQF/bM/d1dBEWY2gNmgkKimLPVDMd1sJdMWN8hJodLltcf5dcAg0l45MCVqqVA8J4zTxxvqTKyRVHsyfmaG2gu+xzMxTcWVagbfF79qXQXeZULb+PWG7fyLhucl634OPGSmPCSoqt6C4YU8OD/IP86FueXNwDlNHcOCzClr0oUPhRaBmqSK0ZPn5BAgwB1F3cEmRDgOkYFfOcSIVnwedEkjOfluOq6EcS8qRUp+QuwRKSxpggTu3IkGNy/dFznCopiz4PmmPMiFN6fWQ++UzrLl+xCmSDeYoWRfFPcrJFSqqcjLClb1IyDxAifUry5MsK9HdNz4nkEQD2HqFOvCODU+FkTDDd3zVm04EzSszeP23QXzEyKsuLy8AMWKOvPRuNRZ01Ng3psrxEbUyaTgdrmcHSmKCBWKa4sO5mqvwGhHMDwArkiP7J0nb1CYvLMNIPrzZmJalGeRY4S2FtVZwnOwKou7JLuawDztKG9FUU8V6EgAjeEy0YSkmz6CFT3kptJq0DWUkRwxQu/mqekhHhVPQN0TAFDCkRwxTm88ykV7oliBYE8zAbiwdEGxLDyCVcyius4CsZzFmin6LFO6NrXlBi5eSu07607HsrNal6gyFDOzfIi3m6sndw2CwNLwp+rcB79wD78ghyPMG9F8HFZ775mRjMiinhdLUe/uHqOHXEhKDg6cFEdzsFq6LDlNHM/bmaIHIE4qWM1qhCleorYIhhjkm8QqI0mqiwoMAw5deEhc9cdURoeFIRumv2zWtuAR+YJ2yTXm9QpC6DdrxkE0adkr6xKoh37TZlAZ0T3wjzux+SpisohWfALJUmTqs1TUmZBkZfWXbZSWHM6xl4qbY4OrFgjD9kTQB1V9bpl2XwRn1cRLC/J/2EKfKMKU/foPCYFPK4A2sffYJKaX7alHdQeMGHVemU7Tw932UeR0VRrh4B3LgqtjqLJ0YFhutM57GZ3hjXSvOvhvI9SQ7z4t9NxF8EUFTlbKkcvZZ9lpaLh3+YVg+6UIXb0DBNKjdEyKI+pqvH+cT75urh6RsUFqXTW1XETE3wABdC0ztYRQ5f9OTareWDnzK2BHsqKEU78/GNMWk+/YIjAo08DMSYGOapULgNDU+yfFoI66E13yyuGpjejsA4TS8q1kTBCpU6U2faFDbbD9bHFDp4Ykq4+oQn83Dk6LWmx29ZORdtARp/yJoA6q6s04/BIwEkUDIBgy58vlZvyEgydXa1cVp1ZFjI156Koo6WSq4US8iXAOqufHOPkSMBJFAyAXK6vqinV+qU1p55UnKdWELeBFB35Z1/jB4JIIHiCNCP/VLwHsXFK2yICQku6rWSvGL4FQkUIIC6WwAI/kQCSAAJcAQyouh3LijcvAJDwqM0zLgqdWRY8FBP/lOduRXwGxIoiQDqbkmEcDkSQALyJmBI00RMCPBk3lfIvOVwlkrHvRNB3nQw+rITQN0tOzNcAwkgASSABJBAeQmg7paXHK6HBJAAEkACSKDsBFB3y84M10ACSMBMYNo081f8ggSQQGkIoO6WhhKWQQJIwBoBb2+gKPD2trYM5yEBJGCdAOqudS44FwkggRIIXLtGRJeZrl0roTAuRgJIwEQAdddEAv8jASRQJgLx8ZzuxseXaVUsjATkTAB1V87Zx9iRQAUIrFjB6e6KFRWoCFdFAvIigLorr3xjtEjAZgSmTeN0F0dX2QwrVuT8BFB3nT/HGCESsAuB4cM53R0+3C4msFIk4IwEUHedMasYExIQgEC3bpzudusmgEE0gQScgwDqrnPkEaNAAsIS2LiRE11mSPPGjcJ6gNaQgFQJoO5KNXPoNxJwJIFPPmF1d9gw9ssnnzjSH7SNBKRDAHVXOrlCT5GASAgcP851dnfu5L4fPy4SB9ENJCBmAqi7Ys4O+oYERElg0iRWa3v0IP716MH+nDRJlO6iU0hAXARQd8WVD/QGCYidgFYLVauyQvv338Tbv/9mf1atClqt2P1H/5CAowmg7jo6A2gfCUiLQOvWrMo2bw63bhHfb92C5s3Zma1bSysa9BYJCE8AdVd45mgRCUiWQP/+rL5SFGzYwIWxYQM3v39/bj5+QwJIoBAB1N1CSHAGEkACVgn8/jsnriEhBYuEhHBLf/+94FL8jQSQgIkA6q6JBP5HAkigGAJbt3Ky6uVlvaCXF1dm61brZXAuEpA9AdRd2W8CCAAJlEhg3jxOUBs2LK54w4ZcyXnziiuJy5CAXAmg7so18xg3EigNgexsGDmSk9JWrUpeqVUrrvzIkZCdXfIqWAIJyIkA6q6cso2xIoEyEVi3jrs3l6Jg/PjSrj1+PCe9PXrAunWlXRHLIQEZEEDdlUGSMUQkUCYC9+/DtGnQti2nnRQFZX3DLv/tvBRFaps2De7fL5MjWBgJOCUB1F2nTCsGhQRKTSA7G9LSYP9+0iudPx8CAizklqKgZctyPg1DqyXrMm9NMP8NCCBW1q0jFtPS8Cx0qfOEBZ2HAOqu8+TSlpEcOQJRUTBzJnz/PQwcCG3acI8oMjeg+MXpCbz5Jvz1F9y8Wf5N6+ZNUsObbxZUX6dHhwFWrUrajYEDSRsycyZpT44cKf+G5Fxrou46Vz4rGM3Tp7B6NXh7YyspawI1a5Je786dFdyaLFbfuZPUWbOmrMGiGHt7kxbm6VOLbUN+P1B35ZdzqxGfPg1Tplg5K4gthdMTePFF6NgRBg2CUaNgzhzyFKqLF61uIzaYefEiqX/OHGJr0CBi98UXUYllR6BlS9LanD5tgy1KmlWg7kozb7b1euZMK3v+O++QG0hmz4aNG+HUKXj82LY2sTYkgAScmcDjx6Td2LiRtCEjR8I771hpZGbOdGYCRceGuls0G5ks+fRTi/3hpZfI9ZiDB2USPYaJBJCAQAQOHiRty0svWTQ4n34qkHUxmUHdFVM2BPbl3j3gP+KgfXtyAvD6dYG9QHNIAAnIiMD166Sdad+eU99WreDePRkRAEDdlVW6ecEePMht9xRFxlJdvsxbjF+RABJAAnYjcPlywfGbcjrHhrprtw1LzBXfvWshuqNGidlZ9A0JIAHnJDBqlEVDdPeuc4ZZKCrU3UJI5DCD/ygirVYOEWOMSAAJiJGAVstJb9u2YvTQDj6h7toBqsir5D87F5/bJ/JkoXtIwOkJ/P9zSc1365X+GeBSxoK6K+XslcP3P//kNvHY2HJUgKsgASSABGxMIDaWa5f+/NPGlYuvOtRd8eXEfh5t28Zt3KGh9rODNSMBJIAEykYgNJRrnbZtK9u6UiuNuiu1jFXE3+HD2S174MCKVIPrIgEkgARsT2DgQLaBGj7c9pWLqUbUXTFlw66+pKVBlSrsZp2YaFdTWDkSQAJIoMwEEhPZBqpKFfKuKuf9oO46b24LRKZUstv0kCEFluBPJIAEkIAoCAwZwjZTSqUo/LGPE6i79uEqwlo7dGA3aGe/diJC9ugSEkACpSJgHoPSoUOpykuzEOquNPNWVq/Xr2dFt0+fsq6K5ZEAEkACwhHo04dtrNavF86osJZQd4Xl7ShrY8awm3JEhKNcQLtIoDABrVY7jf7s3r278FKcI0cCERFsYzVmjLOGj7rrrJm1jMv8KvukJMsF+AsJOJJASEgIRX9++uknR/qBtsVDICmJ1V1vb/E4ZVtPUHdty1OstZkfDHn1qlhdRL/kSAB1V45ZLz7mq1dZ3XXex0ai7ha/CTjL0mrVyKZcsyY8feosIWEczkAAddcZsmjbGJ4+JS0VRUG1aratWDy1oe6KJxd288RoZI8fXV3tZgMrRgLlIYC6Wx5qTr+OqyvbZBmNThkr6q5TptUyqNRUdiPu0cNyAf5CAg4mgLrr4ASI03yPHmyTlZoqTgcr6BXqbgUB4upIAAmUnwDqbvnZ4ZqSJYC6K9nUoeNIQPoEUHeln0OMoMwEUHfLjAxXQAJIwFYEUHdtRRLrkRAB1F0JJQtdRQLORgB119kyivGUggDqbikgYREkgATsQwB11z5csVZREyij7ubodVFhQb4eLswzZiiKcvXw9FeqEvTlG+6dsS7ATUEp2gWoM8uGqdwrlsaMPsrHHB9F+aiulGYlLIMEkECZCaDulhkZriB9AmXQXUN8mI8r0SNF3xBNJqOzRn1CeAA7M1hTRu0ESFIqWIFTTC/T8wvLvWJpM2aMCTJJL+puaaFhOSRQVgKou2UlhuWdgEBpdTdjXQDbx3VX6rItA0+L8GI0SuGnKqP0lrvbWu4VLV0v+leCEnW3aDq4BAnYhgDqrm04Yi2SIlA63TUrK0UFRBsKBWjUTjb1W92VSeU741yoUgfPQN11cALQvCwIoO4Wmeb8fMjLg9xcMuXkkCe8PnkC//1HJqMRHj+GR4/I9PAhZGfDgwdw/z6Z7t0DgwHu3iXTnTtw+zZkZcGtW2S6eRNu3AC9Hq5fJ9O1a3D1Kly5QqbLl+HSJbh4ETIzyZSRAenpcOECmdLS4Px5OHcOzp4lU2oqpKTAmTNkOn0aTp2CkyfhxAkyHT8OycmQlESmY8fg6FFITIQjR8h0+DAcOgQJCWTS6eDgQThwAOLjybR/P+zbB3v3kmnPHoiLA60Wdu8m065dEBsLO3eSQJzrUxrdNaj9TX0/KkhTWHYBjLHB5hI+UXpnQOQculvULsrspVZ3UWYvNe+izF7K7KLMXmreRZm91OouyuylVndRZi+1uosye2kFd9GdO2HHDti+HWJiQKMh07ZtsHUrbNlCps2bYdMm2LgRoqPJpFbDhg2wfj2Z1q2DtWthzRr4918yRUWBSgWrV8OqVWRauRJWrIDly8m0bBlERsLSpRARQaYlS2DxYli0iEz//AMLF8KCBfD332QKD4e//oL588k0bx78+Sf88QfMnUumOXMgLAxmz4ZZs8g0cyb8/jvMmEGm6dPht99AqYRffyVTaCj88gtMm0amn3+GqVNhyhT43//IFBICP/0EkyeT6ccfYdIkmDgRJkwg0/jxEBwMP/xApnHjYOxYGDMGRo8m06hR8P33EBQE331HpsBAGDkSvv2WTN98A19/DSNGQEAAmb76Cvz94csvyfTFFzB8OAwbBp9/TqbPPgM/Pxg6lEyffgpDhsAnn4CvL5k+/hgGDwYfHzJ99BF8+CF88AEMGkQmb294/30YOJBMXl4wYEBIixZM0/GTiwu8+y688w6Z3n4bPD2hb1/o04dMvXvDW29Br15kevNN6NkTevQADw8yubtD9+7wxhtk6tYNunaF11+HLl3I1LkzdOoEHTuSqUMHaN8e2rWDtm3J1KYNuLlB69ZkatUKWraE114DV1cytWgBzZuDiwuZmjWDpk3h1VehSRMyNW4Mr7wCjRqRqWFDUCigQQOoX59M9erByy/DSy+RqW5dqFMHXnwRatcmU61a8MIL5IHDNWtCjRpQvTp57HDVqmSqUgWefx6eew6efZZMzzwDlStDpUpkoiiczASy1q+/cuXKpUuXMjMz09PTL1y4cP78+bNnz6ampp45c+bUqVMnTpw4fvx4UlLS0aNHExMTDx8+nJCQoNPpDhw4sH///r179+7Zs0er1e7atSs2Nvb69esOV6hS6O4VFTfK6N2IDKsuZ/LKDDCV4aSLYkcnZenChpJxVJ6TtQbQmc/kkh0vVMdVbEhRhwZ4tjP1oc2STlG0qFtbke8kRSkTAFhblMuAYFVqwT64IU0TMcFswsXDP0xb4GiBc17K13fj4szbLn5BAiIkwL4FkKJ+QqVBAkUQ6MuTgIp/jYqK4rTGQd9K1l1+X5byVRWQJ9ZtvdqP48HrE58K92Tn+6gSNEHu5kJKWmZTwt81zTHrbqaaHag1TJWRA5AZwai+YpiKJ/nWVjRozEOhQlaylbC1K0K0POXNiPJTUFQA0y/Xq4MYfVf4qfnjlqWvu/fvw9bgPSJsatElJGAmgLprRoFfiiLwYe3a9erVa9CgQcOGDRs1atS4ceNXX321WbNmLi4uLVq0eO2111q2bNm6des2bdq0bdu2ffv2HTt27Ny5c5cuXbp27frGG290797dw8OjZ8+evXr1euutt1avXu0gteXMlqy7FjfVTOH1SrlKACw6r7wOItcN7ezhrdQawBCv9FRQHhO09Olqvcq3gO5y57SDYxmpNGgCmTIK0otlP4VXtPBBofAJSzYCrxfOrWuSZ8UwNX0MwVVlcYbc1rp7+fLlixcvZmRkpKenp6WlnTt37uzZsykpKadPnz558uSJEyeSk5OPHTt29OjRI0eOHDp0KCEh4eDBg/Hx8fv27du7d29cXNzu3bt37dq1c+fO7du3azSabdu2bdmyZdOmTRs3blSr1evXr1+7du2aNWuioqJWr14dEJBQrdrTj+poitqUcT4SEAMB1F0bZ6FSpXxmqlw5v3LlvGeeyXv2WTI991zuc8/lPv88mapUya1aNadq1Zxq1chUvXpOjRpkqlkz54UXnr7wwtNatZ7Wrv30xRfJVKfO07p1n770EplefvlpvXpP69d/2qDBU4XiqUKR07BhTqNGOa+8ktO4cU6TJjmvvprTtGlOs2Y5zZrlurjkNm+e26JFrqtr3muv5bVsmdeqVV7r1nlubvlt2uS3bZvfvj05/9+xI7kc0KkTuS7QpQu5TNC1K3Ttmt+1W8oLbxyiul9u1J1c8XWuj2C6y5wiLgCP0zz2PLNJFCnmXDFdXBfKajNPFwutSEpy5587z2LuSuLmmFQcwKgNYTu4TCeYq4pXP4CtdTctLc10iCHY/1O9qb3F79X5FJVHUbn0lENRTynqCT39R1FGinpMUY/o6SFFZVPUA4q6T0/3KMpAUXfp6Q5F3aaoLIq6RU83KeoGRenp6TpFXaOoqxR1hZ4uU9QlirpIT5kUlUFR6RR1gZ7SKOo8RZ2jp7MUlUpRKRR1hp5OU9QpijpJTyco6jhFJVNUEj0do6ijFJVIT0co6jBFHaKoBHrSUdRBijpAUfH0tJ+i9lHUXnraQ1FxFKWlqN30tIuiYikqtlKl2EqVdlauTKZnntn5zDM7nn2WTM89R6bnn9/+/PPbq1TZXrXq9mrVyFS9ekz16jE1asTUrEmmF16IqVWLTLVrx7z4YkydOmSqWzfmpZdiXn45pl49MtWvH9OgQYxCEdOwIZkaNYp55ZWYxo1jmjSJefXV7U2bbm/WbLuLy/bmzbe3aLHD1XW7q+uOli13tGpFptatd7Zps7Nt253t2sW2bx/boUNsx46xnTrt6tx5V5cuu15/fXe3brvfeGN39+5ad/c4D4+4Hj3ievbc06vXnrfe2tO7994+ffZ5eu57++1977yzv1+//e+9F9+//4EBAw4MHHjg/fcPenvrPvhA9+GHCR99dGjw4EO+voc/+eTwkCFHhg5N9PNL/Oyzo8OGHRs+/NiXXyb5+ycHBBwfMeL4N9+cGDnyZGDgqe++O/3992dGjz4zZkzKuHGpwcGp48efnTjx3KRJ5ydPTgsJufC//6VPnZoxbVrmL79c/PXXi7/9dmn69Cu//35l1qyrYWHX5s4d8y57ymvse+/dWrAg659/7ixefDciwhAZeW/58vsrVz5YtSo7KurRmhHXu8kAACAASURBVDWP1q41rl//n1r9ZNOmp5s352zdmqfR5G/fTsbdxMaSMTi7dpHBOFotGZuzZw+Z9u4lY3b27ydTfDwZy3PwIBnXo9ORAT6HDpHxPszAn8REMg7o2DEyJSWR8UHHj7PDhU6eJKOHTp8m05kzZFRRaio7yOjcOTLmKC2NTBcukLFIGRns0KSLF8lIpcuXyXTlChnBdO0aO6BJryfjm27eZIc7ZWWR0U937pDp7l0yKurePXaQ1IMHZMzUw4dkevSIjKUyGtmhVU+ekJFWOTlkys0lI7Dy8ws0r5L+6eVFmq7XXiM4ne9Tsu5CvPmQlCryPDNPLykqhOsU8/q7YcmF6XGaVxrdDdpiHtNVaEVSN6eypt4tN8dCU436pDhdhgEgR6+LDPai7z8ml6D5I8Jsrbvp6ekKhYI5SdKkSZOmTZu6uLg0b97c1dW1ZcuWrVq1cnNzY06SdOjQoVOnTl26dHn99de7devWvXt3d3f3Hj16vPnmm2+99Vbv3r379u379ttvv/POO/369evfv7+Xl9fAgQO9vb0/+OCDDz/80MfH5+OPP/b19e3e/e/KVF6dGte/8PPz//zzr774IuDLL0d89dU3I0Z8++23I0eO/O67777//vtRo0aNGTNm3LhxP/zww/jx4ydOnPjjjz9Onjw5JCRkypQpU6dOnTZtWmho6K+//vrbb7/NmDFj5syZs2bNCgsLmzt37h9//DFv3ry//vrr77//XrBgwT///LN48eIlS5YsXbp02bJly5cvX7ly5erVq6Oiov7999+1a9euX79+w4YN0dHRmzZt2rx589atWzUazfbt23fs2BEbG7t7926tVrtnz559+/bFx8cfOHBAp9MdOnTo8OHDiYmJx44dS0pKOn78+MmTJ0+fPn3mzJnU1NRz586dP3/+woULGRkZmZmZly5dunLlyrVr165fv37jxo1bt25lZWXduXPHYDDcv3//wYMHDx8+fPz4sdFofPLkSU5OTm5ubr5ztVaF9zGRz8HxzCJPkEPcGzKEiK5CQcZKO+WnFLrL19TSjKsK5A155nSXd/KZA1lYPrnzzCHxTDmjdgLdQVQEabLMaxZesSy6CwA5hqSoYE9Xv4hTKeZz3XbVXbPrQn55/XWy+YaFCWkTbSGBMhBA3S0DLHkUHTGCtFo1apBzE876KYXuAu/23JLvI1KExPGGMJVZdwEy2eFXZCAVGVelIoOgKA9lAq9aqJjuZqqD+ypMdXJVOZ/urllDtuAGDcgZKfwgARESQN0VYVIc6NK4caTJoihy9cCJP6XRXbP4kX5n8c/NMA1WMhErh+6SzqheM9mDvQqqcPMKDC94k09FdDdLww5gZvvuzqy7AORGR4oit4PiBwmIkADqrgiT4iiXpk5lRXfDBke5IJDd0ukugDFBySph4edEJocVuag8umvQTvH0WZBSLABOLHk3/nJXc4u5vqtfZ7rjSRFMToiz/Wmi8qbRWLRlW1/fLTYcOy7cuJFsyi++SMZn4AcJiI0A6q7YMuIof2bPZkV32TJHuSCc3dLqLgAYEsPZ9yJ4K7W89yLQ54EpF9/wJPOwJ7P/3P27nuHJ/BPFdIkc3m24k9mR4sxtSz6RvJt1zbWZv1hbEXjXoQPW0bcI8eaYKzRsMd/lS17x4Dldo57APaDDYzr7mEteMZ8IiQ+o69uXbNBTp5rx4RckIBYCqLtiyYRD/Vi4kBXd+fMd6odQxsugu8SlHENKTESwv6eHaQywi7tnwIQITWphyeXfisOeM+b1TS2GQbGLyaMzeB1Z00rkP/22QfUpxgrXr+VW5DrW7DyfKJXF87C44coZmgleLhTl4h4QFkfLc6aGvtzr4hXK3FUMFrcsk/qsDgoTKkUVthMTQ7bpatXI3Qr4QQKiIoC6K6p0OMSZ1atZ0ZXP5bAy6q7905Iy33RllxVQ/j8Pazcj2d8n6Vvo359s2ZMmST8SjMC5CKDuOlc+yxzN5s2s6P74Y5nXle4KotNdAINuvp8bX2153y2GHEuXuuCea7Vk465cmdy7jx8kIB4CqLviyYXwnsTFkZdBUBR5T4esPmLT3Qy1vwvVKUTL3apLp8NoSFrgQ1GKoBhrJ7RllbHyBvvBB2T7Hju2vOvjekjADgRQd+0AVRpVJiaS8Z4URd50JbePyHSXfYijlzIuw8ANwzIaMnXhwxQu/upiR1vJLXdlizc+nmzi/7+VO+Vz18rGAkuLhgDqrmhSIagjZ8+SNytSFHlXpAw/ItNdMmw6RT0/OOBdTzfTKGNFO0+fwJCIOD0nxDJMlC1C/uQTsqEHBtqiLqwDCdiCAOquLShKrI6rV8kbkCmKvHBZnh/x6a488yBI1EeOkG2doshD3fGDBMRAAHVXDFkQ0of796F7d9IKubvL96ECqLtCbnKOtzVsGNniAwIc7wl6gAT+/85E1F1ZbQb5+fDOO6QJatuWvJxJth/UXXml/sQJstFTFHnFGX6QgMMJpKenx9Gf9PR0hzuDDtibwIcfksbn1Vfh3Dl7mxJ1/ai7ok6PPZwLCCCb/uef26NurBMJIAEkYJ0Ac7Ktbl3ypmOZf1B3ZbcBnD3LdnmPHJFd7BgwEkACDiHw3Xek2XnuOdizxyH2xWUUdVdc+RDGG2Yf8PUVxhpaQQJIQNYEJk1ij/W3bpU1B3PwqLtmFDL6kpnJ7gbx8TKKGkNFAkhAeAJKJdvaREUJb1ykFlF3RZoYe7vFvF960CB728H6kQASkC+BefNY0V20SL4QCkeOuluYiSzmXL/OPhl1925ZxItBIgEkIDCByEhWdMPCBLYsdnOou2LPkP38+/FHsle89579LGDNSAAJyJTAunWs6P78s0wJFBM26m4xcJx80Z07UL062Tc0GiePFMNDAkhASALbt7Oi+8MPQpqVjC3UXcmkyh6O/vwz2T369LFH3VgnEkACciRw4AB7QP/113IMvzQxo+6WhpLTlnn4EOrUIdIbHe20MWJgSAAJCEbgxAmoX580KUOHCmZTeoZQd6WXM9t6PH062Uk8PGxbK9aGBJCA7AhkZECLFqQ9ef992cVepoBRd8uEywkL5+SAQkF2lX//dcLoMCQkgASEIZCVBZ06kZakd294+lQYm1K1gror1czZ0O85c8je0qWLDavEqpAAEpARAaMRevVim5E7d2QUePlCRd0tHzdnW6tpU7LPLF/ubHFhPEgACQhAwMuLNCCvvQYXLwpgTfImUHcln0KbBBAeTnabtm1tUhlWggSQgIwIDBlCWg+FAk6dklHUFQkVdbci9Jxq3ZYtyc6zeLFTBYXBiJbArWNrZ4YEDh86dOjQ4QGjlYv2Zj7KFa2z6FiRBJj3itasCTpdkWVwQQECqLsFgMj35+LFRHddXeVLACMXisDTq1un0oo7dHjg6NGjA5jvgXMP3ELpFSoHNrEzdixpNP7/hoidO21Sn1wqQd2VS6ZLE2f79mQX+uuv0pTFMkignASenlkWSHq5UzemP2KruHUgPIietetWOSvF1QQnMGUKK7obNghuW+IGUXclnkCbur9iBdmRXn3VppViZUjAgoBhz3Ryclm512Ax+/LG8UOHDv1m9Xns8lpwEemPWbNY0cXBmOXIEOpuOaA58ypdu5LdCd8f4sw5dmxsDw7MJLIbfsRYwI9b234cOnRowOq0AvPxp+gILFzIii6eGytfblB3y8fNaddas4bsUQ0a4J3vTptiBwd2Zhm5mvvjtsInlE8vJYI8dUfhJQ52Gc3zCaxezYru9On82fi9DARQd8sASyZFe/Qg+9Vvv8kkXAxTUAKP9tPd3ZkHTJd2Oeu3dkwlwrv0NDcLv4mMwKZNrOhOniwyzyTlDuqupNIliLMbN5Jd68UXITtbEHtoRE4EihHXYhbJiZB4Y9Vq4ZlnSOPw/ffidVISnqHuSiJNQjvZty/Zu6ZOFdou2nN6AsWIazGLnB6L+AM8coQci1MUfPGF+J0Vu4eou2LPkEP8i4khO1i1anD7tkPso1GnJVCMuBazyGlxSCSw1FRo3Ji0CYMHS8RjcbuJuivu/DjOu/79yW42aZLjPEDLzkjg6aFwchH3ryOF31jD6O7w5Xh9V1yJv3oV3NxIa9Cvn7gck643qLvSzZ19PddqyZ5WuTJcu2ZfQ1i7vAjgeGZJ5fv+fXjjDdIUeHjAw4eScl3EzqLuijg5jnbtgw/I/jZ2rKP9QPvORKDI+3evbhyL9++KK9N5efD226QRaNcO9Hpx+SZpb1B3JZ0++zp/4ADZ5SgKMjLsawhrlxMBfF6VZLL94Ydk92/aFM6fl4zPknAUdVcSaXKYk598Qna8wECHOYCGnY9A0c9nxodmiCjbw4aRfb9uXTh2TEReOYcrqLvOkUd7RZGYSPY9ioLUVHuZwHrlR+DRedVkfB+RmPMeGEj2+ueeg717xeymVH1D3ZVq5gTzmzns/eorwQyiIVkQwPfvijbNEyeyR9tbt4rWR2k7hror7fwJ4P3Jk+xOePy4ANbQBBJAAo4k8Ouv7P4eFeVIN5zbNuquc+fXNtGNGEF2xc8+s01tWAsSQALiJPDnn6zoLl4sTgedxCvUXSdJpF3DOHuW3RsPH7arHawcCSABhxFYupTdzefMcZgPMjGMuiuTRFc0zKAgsk/6+la0HlwfCSABERJYt44V3WnTROids7mEuutsGbVTPJmZ7G4ZH28nC1gtEkACjiGwfTu7dwcHO8YBuVlF3ZVbxssf77hxZOccNKj8NeCaSMBMYNeuXTNmzDD/xC+OIhAfT96AQlHwzTeOckF2dlF3ZZfycgd8/To8+yzZP3fvLncduCISYAkMHDhw4cKFiMOxBI4fh/r1yU49dKhjHZGXddRdeeW7gtH++CPZRd97r4LV4OpyJxAZGdm7d2+5U3B0/Onp0KIF2aPff9/RrsjMPuquzBJesXDv3IEaNciOqtFUrCJcW94EGjduvHnzZnkzcHD0t25Bx45kX+7TB54Wfimjg71zcvOou06eYJuH9/PP7L5q85qxQvkQOHnypHyCFWGkRiO8+SbZkV9/He7eFaGDTu4S6q6TJ9jm4T18CHXqkD02OtrmdWOFSAAJCEFgwACyC7dsCZcuCWEObRQggLpbAAj+LJnA9Olkp/XwKLkklkACBQgsWrSowBz8KTAB5iVjDRvC6dMCW0ZzLAHUXdwUykwgJwcUCiK9//5b5nVxBTkTiIiIaNKkyV08s+m4jSAggOy5NWtCQoLjnJC9ZdRd2W8C5QIwZw7Ze7t0KdfKuJIsCTx58qRDhw7z5s2TZfSiCHrsWLLbUhTExorCH9k6gbor29RXNPCmTckOvHx5RevB9WVC4LfffuvTp49MghVhmP/7Hyu6arUIvZOXS6i78sq3DaMNDye7cdu2NqwSq3JmAkePHlVjk++gDM+cyYouHig7KAMWZlF3LXDgjzIRaNmS7Mw4UKZM0LAwEhCYwIIFrOj+9ZfAltGcdQKou9a54NzSEFiyhOzPrq6lKYtl5Etg/fr1R48elW/8Do181SpWdPFh2A7Ng4Vx1F0LHPijrATatyd79fz5ZV0Py8uIQM+ePWfPni2jgEUT6qZNrOj+9JNofEJHAFB3cSuoEIGVK8mO3aRJhSrBlZ2YgFKpfOutt5w4QNGGptVC5cpk9xw1SrQ+ytQx1F2ZJt6GYXftSvZt7M/YEKnTVHX79u0XXnhh165dThORVAI5fBhq1yY75pdfSsVlGfmJuiujZNsp1LVrye5dvz48eWInC1itVAn8999/06ZNk6r3kvU7JQUaNyZ75eDBko3BqR1H3XXq9AoVXM+eZCf/7Teh7KEdJIAEiiBw5Qq4uZH9sV+/IkrgbEcTQN11dAacwv7GjWQ/r10bHjxwingwCFsQwJcO2YJi2eq4dw+6dSM7Y48e8PBh2dbF0oIRQN0VDLWTG/L0JHv71KlOHiaGV0oCmzdvbty4cSkLYzGbEMjLA2Y3bN8e9HqbVImV2IUA6q5dsMqw0pgYorvVqkFWlgyjx5ALEujdu3dkZGTBufjbngQ++IDsg82aQVqaPc1g3RUmgLpbYYRYgYlA//5kt580yfQb/8uVwMKFCwcOHCjX6B0T9+efk73vpZcgKckxDqDV0hNA3S09KyxZAgGtluz5lSvDtWsllMTFzk1gxowZeO+QkCkODCS73vPPw759QppFW+UkgLpbTnC4mlUCzJmusWOtLsSZSAAJ2J7AxIlEdCkKtm2zfeVYoz0IoO7ag6p86zxwgG0CMjLkC0HOket0OjmHL3zsoaHsHvfvv8IbR4vlJIC6W05wuFpRBD75hDQEgYFFLcf5zkxg8ODB+GZ7wRL8xx+s6C5eLJhNNGQDAqi7NoCIVfAJJCaybUFqKn82fnd+AnPnzu3SpYvzxymOCCMi2B1tzhxxOIRelJoA6m6pUWHBUhMYNoy0CF99VeoVsKD0CVy6dKlOnTrr16+XfigSiIB5OCtFwS+/SMBbdLEAAdTdAkDwpw0InDzJHokfP26D2rAKSRDYunXriBEjJOGq1J1k7pWnKBg/XuqhyNR/1F2ZJt7eYY8YQaT3s8/sbQfrRwLyIhAfD1Wrkp3r22/lFbgzRYu660zZFFEs586xXd7Dh0XkFbqCBCRN4PhxqFeP7Fl+fpKOQ+7Oo+7KfQuwX/xBQaSB+Phj+1nAmkVBYPXq1d7e3jdu3BCFN87rxIUL0Lw52ae8vZ03SHlEhrorjzw7IsqLF9ku7/79jjCPNoUi0L1799mzZwtlTaZ2bt6Ejh3JDtWnD+TkyBSC04SNuus0qRRjID/8gIfnYsyLDX2aPXt29+7dbVghVlWYwOPH8OabZFd6/XW4e7fwcpwjMQKouxJLmLTc1evh2WdJe7Frl7QcR29LReDGjRve3t6rV68uVWksVF4CzBtHWrWCS5fKWwWuJyYCqLtiyoYz+jJ5MtHdfv2cMTaMCQnYn4CvL9mDGjWCM2fsbwwtCEIAdVcQzDI2cvcu1KhBGg58aLuTbQU3b950sohEGM5XX5F954UX4NAhEXqHLpWTAOpuOcHhaqUnMG0aaTv69Cn9GlhSAgT69++PT6eya57GjCE7Dl6msStkh1SOuusQ7PIy+ugR1K1Lmg+1Wl6BO3G0s2bNevvtt504QIeH9r//saIbHe1wX9ABGxNA3bUxUKzOKoHp00kj4uFhdSHOlBiBBw8e1K9f/+DBgxLzWzruzpzJiu6KFdJxGj0tNQHU3VKjwoIVIJCbCw0bkqYE3xJaAYpiWXXcuHHBwcFi8cbp/Pj7b1Z0w8OdLjYMiCaAuosbgkAE5swhrQm+Jk4g3HY2c+XKFTtbkGn1K1eyojtjhkwJyCFs1F05ZFksMTZtStqU5cvF4g/6gQRERWDjRlZ0Q0JE5Rc6Y2MCqLs2BorVFUMgPJw0K23bFlMEF4magEajmYEdMfukaPduqFSJ7CCjR9vHANYqGgKou6JJhTwcadmStCyLFskjWqeL0tPTc+HChU4XluMDOnwYatUiu4a/v+OdQQ/sTQB1196EsX4LAkuWkMbF1dViJv6QBIHQ0NC33npLEq5Ky8mUFHjlFbJf4Mu7pJW4cnuLultudLhiOQm0b0+amPnzy7k6ruYQAmfOnKEoavPmzQ6x7sRGL1+G1q3JHvHee04cJYZmQQB11wIH/hCAADNis0kTAUyhCVsSOHr0qC2rw7oADAbo1o2Ibo8e8OgREpELAdRduWRaVHF27UraGnxnq6iSgs4ITCA3Fzw9yY7Qvj3cuCGwcTTnSAKou46kL1vba9eS5qZ+fXjyRLYMJBN4dnb2N998g51dmyds0CCyF7i4QFqazevGCkVNAHVX1OlxYud69iSNjlLpxCE6SWjTpk3r0aOHkwQjmjA++4xs/y+/DMnJovEJHRGKAOquUKTRjiWBTZtIu1O7Njx4YLkAf4mJwIULF+rWrRuNz+a3aVJGjiQbf5UqsH+/TevFyiRCAHVXIolyRjeZi1tTpjhjbM4S08iRIz/77DNniUYUcUyYQESXokCjEYU/6ITwBFB3hWeOFlkC27eT1qdqVcjKQibiJXAIX7luu+SEhrKiu2aN7SrFmqRGAHVXahlzLn8HDCDN0MSJzhWVlKOJiYmRsvui9v2PP1jRXbJE1H6ic/YmgLprb8JYf3EE4uJIS1SpEly9WlwxXCYYge+++87b2zstLW3NmjU4htmG2CMiWNGdO9eGtWJVkiSAuivJtDmT0x9+SNqjMWOcKSYJx9K9e3eK/jRq1GjZsmUSjkRMrq9Zw4ruL7+IyS30xUEEUHcdBB7NmggcPMg2Senppln433EEateuzehu9erVGzRooFKpHOeLk1jWaNgtfPx4J4kIw6ggAdTdCgLE1W1AYMgQ0jAFBtqgKqyiIgT0en3VqlUZ3WX+1q5d+8svv6xInTJfd/9+MnKQomDkSJmTwPA5Aqi7HAv85igCR4+yHYKUFEe5gHYJgdjYWHN/l6KoGjVqhIaGIppyE0hOJk/GoCjAW7HKzdApV0Tddcq0Si+oYcNI8/TVV9Lz3Jk8DgsLq1Klirm/u2rVKmeKTuBYLlwgz4CkKBg0SGDLaE7sBFB3xZ4hmfh36hTb5cXH5jkw4z4+Pozo1qtX7+DBgw70ROqmb96EDh3IJt23L+TmSj0a9N/GBFB3bQwUqys3gREjSDuFZ+TKDbDiKzZv3rxmzZqvv/769evXK16bbGt49AiYx4937Ure9IcfJFCAAOpuASD402EEzp1ju7yHDzvMB5kbrly5sp+fn8whVDz8/v3JltyqFVy+XPHKsAYnJIC664RJlW5IQUGkwfr4Y+lG4BjPb+2YOrTEz5Sdt4r17sSJE9OnTy+2CC4smYCvL9mGGzWCM2dKLowl5EkAdVeeeRdp1Bcvsl1efE9LmTJkE91dg48MLhN0a4X9/ckGXKsW4Dkba3hwHksAdRc3BXER+OEH0nJ5e4vLK+l6YzgUHki6woHLTj6SbhSS8Hz0aLLpVqoEu3dLwl900mEEUHcdhh4NWyWg18Ozz5L2a9cuq8txZhkIPDq5jBbd4VN3FH+OuQx1YlGrBEJCyEZLUbBxo9XlOBMJcARQdzkW+E0kBCZPJu1Xv34icUeybtzYOXU43dVdehq7unbN4u+/s6K7cqVd7WDlTkIAdddJEulMYdy9CzVqkIZs2zZnCkvYWHKvbpxID7UKWnbaKKxpmVn7+29WdP/+W2aRY7jlJYC6W15yuJ49CUybRtqy3r3tacOZ6350eil7gnnnDWeO0+GxrVzJiu7vvzvcF3RAMgRQdyWTKlk5+ugR1K1LWjS1WlZx2ybYWzvYE8w4lso2QIuoJTqaFd2QkCJK4GwkYI0A6q41KjhPBARmzCCNmru7CFyRlAs4lkqYdO3aRYYuUxSMHi2MQbTiPARQd50nl04WSW4uNGxI2rWoKCeLzJ7h4Fgqe9I1133oELlJF9/kYQaCX8pEAHW3TLiwsKAE5s4lTVvnzoIalbCxR6eXBZGxVMOn7byFz+K3WyLPnCGPo6Io8PW1mw2s2KkJoO46dXqlH1yzZqSBW7ZM+pHYPYJbO6fRtw0FLcPbhuwH+/Jl8uBlioL+/e1nBGt2cgKou06eYKmHx9yk0bat1OOwu//sWKrhU3EAs/1YGwzQtSsR3Z494fFj+9nBmp2cAOqukyfYCcJjuhf//OMEodgrBNNYKnwYpL0IA5DX6PbtS0S3Qwe4edOOhrBqpyeAuuv0KZZ8gBERpLFzdZV8IPYKgB1LhQ+DtBdgpt5Bg8h26OICFy7Y1xDW7vQEUHedPsXOEGD79qTJmzfPGWKxeQylehkRGW617LTNbcumws8+I1vgyy9DcrJsYsZA7UYAddduaLFi2xFYtYq0ek2a2K5GJ6oJddfeyfz2W7L5Va0K+HpKe6OWSf2ouzJJtOTDZMazzJ4t+UAwAGkRGD+eiC5FQUyMtBxHb8VLAHVXvLlBz/gE1q4lbV/9+vDff/zZ+B0J2JHAL7+wort2rR2tYNVyI4C6K7eMSzjenj1JI6hUSjgEdF1CBJjHtlAURERIyGt0VQIEUHclkCR0kSGwaRPR3dq14f59RIIE7EtgyRK2p/vHH/Y1hLXLkADqrgyTLuGQPT1JazhlioRDQNfFT2DNGlZ0Q0PF7yx6KD0CqLvSy5mcPd6+nTSIVatCVpacMWDsdiSg0bCiO2GCHa1g1XImgLor5+xLMvYBA0izOHGiJJ1Hp0VOYN8+qFKFbGAjR4rcU3RPwgRQdyWcPHm6HhdHmsVKleDqVXkCwKjtRSApiTwZg6Lg88/tZQLrRQIAgLqLm4H0CHz4IWkcx4yRnufosWgJpKWRZ0BSFHzwgWh9RMechADqrpMkUlZhHDxI2keKgvR0WcWNwdqLwI0bwDyL1NMT8vLsZQXrRQIMAdRd3BIkSWDIEKK7eBFOkskTmdOPHkGPHmRz6tYN7t0TmXPojjMSQN11xqzKIKajR9kub0qKDKLFEO1J4L33yLbUujVcuWJPM1g3EjARQN01kcD/UiMwfDhpLv39pea3nf3Nz8+fZvrk5OTY2Zrkq//4Y7IVvfIK4AGc5HMpnQBQd6WTK/TUksCpU2yXF1/NxgeTl5dHmT6PHz/mL8LvBQj4+5NNqFYtOHy4wBL8iQTsSAB1145wsWp7E/j6a9Ju+vnZ246U6kfdLWW2Ro8mG0+lSrB7dynXwGJIwDYEUHdtwxFrcQiB8+fZLu+hQw6xL0ajqLulycpPP7FbzqZNpSmOZZCALQmg7tqSJtYlPIHvvycN6ODBrOUtW6BXL+G9EJFF1N3Cydi0CYYN42bPmMGK7qpV3Ez8hgQEI4C6KxhqNGQXApcusW1oWBi88w75Xr++XQxJpVLU3cKZ8vYmG8aHH0J+PoSHsxvMggWFC+IcJCAEAdRdISijDbsS+OQTtiVlHqZBUZCdbVeDoq4cCibk4wAAIABJREFUdbdAek6f5jaPtm3Z7zNnFiiFP5GAcARQd4VjjZZsTkCnAx8frlU16+6pUzY3JZkKUXcLpGrcuIJbCD5htAAi/CkwAdRdgYGjOdsQyM+HoUMLtqdm3d261TZWpFgL6i4/a//9By+8UHA7adMGUlP5pfA7EhCUAOquoLjRmG0JfPFFwSaVkd75821rR0q1oe7ys/X339a3kCZN4MgRfkH8jgSEI4C6KxxrtGQPAqNGWWlYf/jBHqakUSfqLj9PnTtb2TyYg7Mvv4TTp/ll8TsSEIgA6q5AoNGM/QiEhBRsW3187GdN7DWj7poztHNnwQ2DUdzPPwd8xpmZEn4RngDqrvDM0aLtCcyaZdHCduliexNSqRF115wpX1+LrYKiYMgQSEw0L8cvSMAxBKzorvGKTjUryKevm4J5yqvCzdM3KCxKp5fAI9Yz1F+7KSiF29fqDDvxTFCann1L/w/V2ckOVltWAv/8wzWydeuWdW3nKY+6y+QyPZ3bHiiKjHvX4c7qPJu5tCOx1N3sFNU4TyK3Cs/gqCQDLbTGrCTVKA8iMq4+YfEGUYWbsVKp4r+6K1HJHitQCqUdj2oNmlEm8UXdFdMGERXFNbV374rJMwF9EbXu5kN+LuTlQN4TyDVCziN4mg1P78OTe/DfXTDeBuMteHwDHl2Hh1fh4RXIvgQPMuF+OtxPg3vnwHAW7qbA3dNw5yTcPg63kyDrGNxKhJuH4WYC3DgI+ni4vg+u74VrcfDLUHCjyPbg7Q379gmYADRVMQJ5eXm5ublPnz7977//jEbjo0ePsrOz79+/f+/evbt3796+fTsrK+vmzZt6vf769etXr169fPnyxYsXMzMz09PT09LSzp8/f/bs2ZSUlDNnzpw6derEiRPJyclJSUlHjx49cuTI4cOHs7KyKuagDdbm6W62TtmXka3OIfFGy7r16kBmkcIvyl49SUuLpfiVqfJT+FjoLti/v0v7pQtF3S1FghxRZNs2qFKFtLbHDkHOY8h5SFr2J/fhiQH+uwPGLHh8Ex7r2ZY9+zJkX4QHGXD/AtxjWvZUuHsG7pyCOyfgdjJkJUHWUbh1BG4eghs6uHGAbdmv7YFrWri6C67shMs74HIMXNoGF7dA5ibIjIaMDZC+HtLXwoV/IU0F51fB+RVwbjmcjYTUCEhdDCn/wJkFcDocTv8Fp+bByT/gxBw4MRuOz4Tk3yF5OiQp4VgoHJ0GR6dC4hQ4EgKHJ8PhSXBoAiQEg24c6MbCwdFw4HuI/w7iR8L+b2Df17A3APb6g3Y49z6iGN/Huz6BXR9DrA/s/BB2fAA7vGH7QIgZAJr3QPMubHsbtnrC1j6w5S3Y/CZs6gGb3GFjd4juBurXQd0ZNnSE9e1hXTtY1wbWtoY1LeFfV/i3OUQ1A1VTUDWB1a/AqoawsgGsrAcrXobldWHZi7CsFkTWhKXVIaIaRFSBJc/B4mdgUSVYRAk9xcT89/DhwwcPHty/f99gMNy5c+f27du3bt26ceOGXq+/du3alStXLl26dPHixYyMjAsXLqSlpZ07dy41NfXMmTOnT58+efLk8ePHk5OTjx07lpiYeOTIkUOHDul0ugMHDsTHx+/bt2/Pnj1xcXG7d++OjY3duXPn9u3bNRrN1q1bt2zZsmnTpujoaLVavX79+rVr165ZsyYqKmr16tUrV65csWLFsmXLli5dGhERsXjx4n/++WfBggV///33X3/9NW/evD///HPu3LlhYWGzZ8+eOXPmjBkzfvvtN6VSGRoaOm3atJ9//nnKlCkhISGTJ0/+8ccfJ06cOH78+ODg4HHjxo0ZM2b06NHff//9d999N3LkyG+//fbrr78OCAj46quvvvzyy+HDhw8bNuyzzz4bOnTokCFDPvnkk48//tjHx+ejjz764IMPvL29Bw4c6OXl1b9//379+r377rtvv/123759+/Tp89Zbb7355ps9evTw8PDo3r17t27dunbt2qVLl06dOnXo0KF9+/Zt27Z1c3Nr3bp1y5YtXV1dW7Ro4eLi0rRp0yZNmjRu3LhRo0YKhaJBgwb16tV76aWX6tSp8+KLL9aqVatmzZo1atSoVq1alSpVnn/++WeffbZy5cqmhtWO/6OiohzRMlnYNOuuXj3M1Ff0V1vp1aZFeLEoPJQJBVTZokaBfmRpQ9wpiiqguwIZR90VCHS5zOzfD/XqwSpfoZt44UXFqsV/KE53w6nHVsvIZ2YlqpIdm3Cs2s4EKlWq9Mwzzzz33HPPP/981apVq1evXqNGjRdeeKF27dovvvhi3bp1X3755fr16zdo0KBhw4avvPJK48aNX3311WbNmrm4uLRo0eK1115r1apV69at27Rp065du/bt23fs2FGlUpWrXbHlSqzuGuNCTKpLBW2xIrsAGRHvmhi7hyXZ0oey15WpCnBlnJGM7hZzuM2cJylwuJ2SksIcbp84caLw4fbBgwfj44s83N62bVsxh9urVq0qfLi9cOFC5nB7/vz5hQ+3p0+fzhxu//LLL+bD7Z9++unHH8nh9oQJE5jD7bFjx5oPtwMDA5nD7REjRjCH21988YX5cPvTTz9lDrcHDx7MHG4PGjTo/fffZw6333vvPeZw29PTkznc7tWrV8+ePZnD7TfeeIM53O7cuXPHjh3btyeH223atGEOt1977bUWLVq88sqAT6rtk4+68CNF3eXTeLbyc0zDXaVKlWrVqlWvXr1mzZq1atWqXbt2nTp1XnrpJabhVigUjRo1euWVV5o0adK0adNmzZo1b97c1dW1ZcuWrVq1cnNza9u2bbt27Tp06NCpU6fOnTu//vrr3bp16969u7u7e48ePd58881evXr17t27b9++np6e77zzTr9+/fr37z9gwICBAwd6e3sPGjToww8/9PHxGTx4sK+v75AhQ4YOHern5/f5558PHz78iy++8Pf3DwgIGDFixDfffDNy5MjvvvsuKCho1KhRY8aMGTt27A8//DB+/PiJEydOmjRp8uTJISEh//vf/6ZOnTpt2rRffvnl119//e2332bMmPH777/PmjUrLCxszpw5f/zxx7x58+bPnx8eHr5gwYJ//vln0aJFS5YsWbp0aWRk5PLly1euXLlq1SqVSvXvv/+uXbt23bp1GzZsiI6O3rhx4+bNm7du3bpt27aYmJgdO3bExsbu2rVLq9Xu2bNn7969+/fvP3DggE6nS0hIOHz4cGJi4tGjR5OSko4fP37ixIlTp06dOXMmNTX17Nmz58+fv3DhQnp6emZm5qVLly5fvnz16tXr16/fuHHj5s2bWVlZd+7cuXv37r179x48eJCdnf3o0SOj0fjkyZOnT5/m5ubm5+eXXRuksQajuwZNoElTKc+INOuuc508SqFMIGV4cyjKV6UH0Ef5mCuiKKXFOIYcvS4qLMjXw4UuoWjnFRyVwlN4vcqXtyq5bmrQzfIjg7tcSUlzF1sfE0RfbeYVpigqVAeWI54YD+GKiu+QxToDIrgz5jl67fwgr3bk2MPFPSAsTl8AgeGUKtibHmjm6hEwS6eeYqqp1Nd309LSTOvgfyEIvE/9wW9/5fNdkrpbCRY/k7/42fwlz+cvqZIfUS0vonre0pp5S2vlRdbOi6yTu+yl3OX1cpc3yF2hyF3ZKHdV49xVr+aubparap4b5Zr3b8u8Na3z1rbJW9cub32H/A2d8tVd8qO7kRPmeU8L7Mr4Ewk4ngCju7oQriUssgeZsdjTXMpzMatZKfNNM2ndBYCMSLPS8XQ3W6d0p6h3w3REaQ1Jc9mT1h7Tk8yCCsAbrzRFpfZnBJqxqQiJ4xXk1NTS29RwkzcUp7vuIZor7LqGLUGmbn1n7mw5e2GbuXRt0E7pTFEWl7GNCUpa6RXBMQbi/CzTGXdG70uXxPT09KpVq1avTg63mfMkderUqVu3rvlwu2HDhszh9quvvtqsWbPmzZubz5O4ubm1adOmXbt2HTp06NixI3O43bVr1zfeeIM53O7Zs2evXr169+7dp08fT0/Pd955p1+/fu+9996AAQMGDhz4/vvvM4fbH3300eDBg319yeH2p59+6udHDreHDRv2xRdf+Pv78w+3AwMDg4KCRo0aNXr06LFjx/7wAzncnjBhwqRJkyZPnvzTTz8xh9s///zzL7+wh9vTp0///fffZ82aNXv27Dlz5vzxxx/8w+2FCxcyh9sRERGRkZEFDrfXrFmzbt26DRs2qNXqwofbO3fu3LVrl1Zrcbh98OBB5nD7yJEjR48WPNxOSUnZOTqLr7WLK8PiZ2HJc0yznr+0Rv7SmvmRtfKXvZi/rG7+8pfzV9TLX9kgf1XD/FWv5K9ukq9qClEu8G8LWPMarG0Fa91gXVtypXNDB9jQCdRdILorbHwDNrqTa6Kbe8LmXrClN2ztS66YbnsXNP0gpj9s94Lt78OOQeTaauxHEDsYdvnC7iGgHQrazyBuGOwZDnu+hL1fwb4R5Brt/pEQHwgHguDgKDg4hlzHTQiGQ+Ph0EQ4/CMc+QkS/weJU8l132O/wLFfIek3SJ5Brgofnw0nwuDkXDj1J5yYx51nTg5/nLoEzi6Fc8vg3ApypTltNaRFwYU1kL6OXIfOiIbMjXBxM1zaCpc0cHk7uWJ9NRau7iZDk67vBf1+0B8g17ZvJpDhS7cSyVCm28lw+wQZ3HT3NBhSyHCne+fJNfIH6WQYVPYlMiTq0TV4pCdX0423yICp/+6SwVNPH8DTh2Q4Va6RDK3Ky4H8vNLtP1gKCTgRAVp3ORmjKCrEopPKC9WiL2vq53EzTbrLzeH1d5Pmdqb100OZSEsg1zflaTO/A61Q+MxNMkIG1wk2WSQecQ5b6i43n9PdsGhTpzpbG2xSXcUErUnGjdoJ9NxOppPnrG8BambUm1Gn7ET7bi4AGREDTEcgfK94rPCrwwnk55C+Tn6u7Fp2UY9ndvhmgQ4gAREQEEh3MyLZPqJXJN1RLlF3TSLHncrmlLIsusshNiZNN52fVvipzXcfmaXadNxgPl/NXOfmrnybC/CPD1B3OcL4TRQEUHdFkQZ0AgkUTYDWXaM22NR/K2aEcMZK8wlkqvNcdmQV17s1yRI3h9ffJSeXU3XaZHLd1JCmCRvqZjJYRH/XpGec7prqJ7GYxbLAeGZuvqm/a46cG49NeZlOkgOAMZYXuskn5j8To6mnTl9CNtXGeWXy07QE/yMBBxNA3XVwAtA8EiiJAHN9lz+mqXNYsvWVOLGhuKutnMqadJGbY6m7ROSuaMN8XTwma/Rx5qc+CaO7vLuk3JX8S8qct+/yhlnxAHBR8yTW6kzeSvgVCTiMAOquw9CjYSRQOgKM7oIhOsDc2QswXxC1qIJ3H1Enpc50dZTTrRJ015A038eFohTDVOREc4nnmU0ixymcqX7iFNevLen6Lh2CMZa7sBsca3KdXqRf52cKPEhjuhDMjztpuuma8BTuwjfnlclP/ir4HQk4kADqrgPho2kkUBoCrO4CJIWRx1DQnxKem6HwW8fdZlNYd3mno7m+bNJc9toqK3tC6q55YBRFKQLN2mrUTg8n58qTw5gRX+QZHCu5G4sgUxVBK7RhSxCLhSf8qLul2bawjEMIoO46BDsaRQKlJ2DWXQDy2EVGYgo/JzJDZXqalUeoua9LW4k334IUpMnmV0Lx7t9NMj832WtBCrkxN9Q0xIkyDRumKyusZ9wcnuzx+ruKkHijIU4ZRt9PzJvPXd81Sz65dJ1pIpOp8mEr5B1wUB4hMXrSHc7ShY0znXbO0pjuPQo2jYE2cs9nHmUWclPN+B8JOJQA6q5D8aNxJFAyAZ7uAkCmOph5RLPCMyQ6peB7ESiF53T6/lt+tbybc2jRVri4mk7MUiFa9hVGSWHMrTiMrLsGqGLCuXtgudHFvPt3/dV0n5o35yOTEBLrvDt5KIoyPz/rFHf/bkgc7WUm99wMD2YsmNGQkRAR5E5Rpr6vMTGMfhcE260l/xR+nEIDZESxByQ+9GDsjC3hwfxHfBQY28WHg9+RgOAEUHcFR44GkUDZCFjqLlnXkBETHsQ8m8msRAo3r8BwTZq1658AxlQVo9Yu7j7BUSkp1h5ZZUwO92tHXtDnNUFFP6TKyF7ubRcQkcpccOWP7aIN+6pU5tcPMJ7wu7xZWuUA8mANF98wHXOjLXfumvVbmWDQsK9zMEfC+8K/NKvXhvl70s+jcvHwV2rM3WITzIwYJfGfolwGBKszQTedvBsxJFKtTc4wZJsK4X8kIAICqLsiSAK6gASKI1BYd9nS3IVbyuLGm+Iqw2VIAAk4mgDqrqMzgPaRQAkEitRdAO6aboHzriVUiYuRABJwHAHUXcexR8tIoFQEitFdgOyUCPNDkhVeIdFJejynWiqqWAgJOIwA6q7D0KNhJFA6AsXqLl2FPkEVFujjwb53j74+yr/OWjozWAoJCEaAvJTeNAlmVDyGUHfN2b+2RzxpQU+QAEegZN3lyuI3JCAFArtMb7w/OEoK7traR9Tdg6OAeRXVLl9bw8X6kIAtCKDu2oIi1iEmAkenybrZzcvLizN9Hj9+LKbMCOSL+cDr6DSBLKIZJFAmAqi7ZcKFhSVA4NwKVnc395KAt+iizQls7sVuAOdW2LxurBAJ2IAA6q4NIGIVoiJwdTfb7K5xFZVf6IxABNa4shvA1d0CWUQzSKBMBFB3y4QLC0uAgCGVbXYja0rAW3TR5gQia7IbgCHV5nVjhUjABgRQd20AEasQFYGnD9hmdxEFWUdF5Ro6Y3cCWUe57D99YHdzaAAJlIMA6m45oOEqYieg/YxtfPd/I3ZX0T/bEtj/DZt67We2rRhrQwI2I4C6azOUWJF4CFyO4To9j2+Ixy/0xL4EHt/g8n45xr62sHYkUG4CqLvlRocriprA1j5sE3xitqj9ROdsSODEbDbpW/vYsFasCgnYmADqro2BYnUiIZC6hG2CN3QQiUfoht0JbOjAJj11id1toQEkUG4CqLvlRocrippAzmMw308S/52oXUXnbEIg/jtWdNe4Qo4cnxdiE4pYiRAEUHeFoIw2HELA/OCqRRQcn+UQF9CoQASOz2JFdxEF+JgqgaCjmfISQN0tLzlcTwoEdgzimuP7F6TgMfpYdgL3L3BZ3jGo7OvjGkhAWAKou8LyRmvCEsj9D1RN2EZ5RT1hbaM1oQisqMemWNUEcv8TyiraQQLlJYC6W15yuJ5ECPAfpBDdVSJOo5ulJhDdlevs4mNSSo0NCzqSAOquI+mjbWEInFvONc0rXgZDijBm0Yp9CRhSYMXLXGbPLbevOawdCdiKAOqurUhiPaImcPovroFeREH6elF7i86VSCB9vUVCT/9V4hpYAAmIhQDqrlgygX7Ym8Dt4xBRlWusj06DOyfsbRPrtz2BOyfIiGXmzfaLKJLT28dtbwVrRAL2I4C6az+2WLPoCBhvg/ml6EzDvcMbLqwRnZ/okFUCF9bADm9OcRdRJJvG21bL4kwkIF4CqLvizQ16ZicCunEWbfciClY1Il2olH8gcxPcPATZlyDviZ2MY7WlIpD3hGTh5iGSkZR/SHZWNSqYNd24UlWFhZCA2Aig7ootI+iPEASuxcH+b2F5nYJNufnspcO/RDWDrX1gz5eQOAVSFsElDeQ9tReZvKek/pRFxNaeL4ndqGbiJbO8DsndtTh70cB6kYC9CaDu2psw1i9eArlGSFkIG93FqzF8+V/XFo7PhMd6W/J8rCd1rmsrDQIbu8PJufDwmi0JYF1IQHgCqLvCM0eLoiNgSIUL/0LyDNg/EmL6w9rWFiOw+OLn8O8rFXBoItw9XVGGd0+TelYqRKq4EVVhTUvY9g7s/YqcZD67FK7uqmjIuD4SEAkB1F2RJALdQAIWBO6nE6VJWQSHJ5HRQ6pXLQRyyXNQkaubunGw5DmLClWvEiuHJxGLV3fB/XQLZ/AHEkACNiSAumtDmFgVErAXgSf3yfAi80uFmW73Jo8yd3zvnoZNHhaKu7UPqfnJfXt5jvUiASRQgADqbgEg+BMJiJrAlVjY628hnKkRpXU4NcJixb3+cCW2tOtiOSSABGxFAHXXViSxHiQgHIGUhbD4GU5E9wWUbHpfAFd+8TNkQBl+kAAScAgB1F2HYEejSKCiBG4nw78tOCmNG1ZchXHDuJL/toDbycUVxmVIAAnYlQDqrl3xYuVIwL4EYj/mBDV9nXVb6eu4MrEfWy+Dc5EAEhCMAOquYKjREBKwC4H933KyekNX0MQNHbd0/7cFl+JvJIAEhCeAuis8c7SIBGxMILobK66qpvDwClf5wyugasouiu7GzcdvSAAJOJAA6q4D4aNpJGAbAtmXYMmzrL5uf5+rc/v77Mwlz5LHHeMHCSABMRBA3RVDFtAHJFBRAulrWYldRMGFKFLbhShuTvraitaP6yMBJGArAqi7tiKJ9SABBxPY8yUrtDsGEU92DGJ/7vnSwY6heSSABPgEUHf5NPA7EpAwgUsaroN7+wT3/ZJGwkGh60jA+Qig7jpfTjEi+RIwD7BKGM/qLg6nku/WgJGLlQDqrlgzg34hgbITOD6LlduVDdgvx2eVvRZcAwkgAXsSQN21J12sGwkIS+BBJiytwZ1hXloDHmQK6wFaQwJIoCQCqLslEcLlSEBSBHZ4c7q7w1tSrqOzSEAeBFB35ZFnjFI2BA6O4XT34BjZhI2BIgHpEEDdlU6u0FMkUAoCp+ZxuntqXilWwCJIAAkISwB1V1jeaA0J2JnAxa2c7l7camdjWD0SQAJlJ4C6W3ZmuAYSEDGBu6c53b17WsSOomtIQK4EUHflmnmM20kJ5D6GZbWI9C6rBbmPnTRIDAsJSJkA6q6Us4e+IwFrBI5OI7p7dJq1ZTgPCSABRxNA3XV0BtA+EkACSAAJyIkA6q6cso2xIgEkgASQgKMJoO46OgNoHwkgASSABOREAHVXTtnGWJEAEkACSMDRBFB3HZ0BtI8EkAASQAJyIoC6K6dsY6zCE7iiDnClXPzVeruaNup1kcFerj6qK3Y1g5UjASRgAwKouzaAiFUITsCoj4sI9vf0cKXIR+Hm6R8cEZNhENyPkg3aX3cNsWEh47wUBESRuqsLpUFZ+aNw6+vpExgSEZ2kN5YcjT1KFOWbop2np79SlWDfIxZ7RIR1IoHiCaDuFs8Hl4qPQKYmuK+Ccg+KSMgw5tDuGQ0pMWE+rpSib4g2y6EOp0VEJDjEgYyId4vTXeJTjkE7pTOtvCGabJOTOUZ9qiZsqBuRbYVncHSGaUEZ/hvjItQV7GcX9i3HmBGnZI4mPEJ1DjokKAMELIoESk8Adbf0rLCkCAhkqvwUFOWu1JmVw+wUs0jhp3LcG2czFnspHaO7epVvSboLAAlKWneVOjM005eM6CAPskzhF1VW6TVqJ/jZ4Py2Nd+M8SH0kYIiJA6V15Qq/C99Aqi70s+hfCLI1indKYryCEu2HrMxNpj029yVSQ5ppa+o/RSURHUXADJW+tCqXCReq9CNiUqPos9vW13F+kxruguQFNaJdmqC1iEpte4qzkUCFSOAulsxfri2gAQyFnuRNthXVfQVP52Svs7pFVnWTluFw8hOiRhGbEtXdzmR81eX9kp5ljaEHAkVeV25DFit6y7Tjy8+6WUwgkWRgBgIoO6KIQvoQ2kIsF0fz8XFaapuCt09ejciA0AfxXTgyByTHOqYM61kVij/bKtBNz/Iqx0t2mSUllLDP1lt1OuiwoIGuNCVGHSzAsh4LoWb33xTv/oKfcmZtsz+YQ8OjIZUTXiglwvvWIH1kF+Y/c5XL0NKVLCPuwtZ4uoVNF9XWAj1ceFB3vR1WVePgFmqsI9KoX/WtY2Dn7HYk/YlSMPZK5KMMTnchxnXZo7FhNSYqlb6e9DeUy7uPkGRSVx9nDXLb9Z9Yw+kFKaauXWyU9ShdCIIIg+fwAi+DUOqJmKCn9swcohmTFUF06BcBgRbpJWpS68ND/SiOZrDoL/wUgZ6bZi/J3MN3G2oUp2KfW8uD/itHARQd8sBDVdxBIFMFaOiQVuKa8NNJ0v92Bt3clLCB5Bm1KS7xPOMSLomrik3aAIVVKdgDd2PNpwKJ4sVIeyZzSydOioiiPTqKGVMUnhgUFiURhvDzLG47sjIPM+QQRetVk2mL5tyjbheNazAFWjausWF1QzVMBevuTpDDhkMpZtLRhe5jOJJIRh10z0VCq+wBBpFVhLT1S6532ld23jZjA9hxMcURbFk6PXo0cj8IwaA5DAPivJZkEKcy9FrJpBLtCWfgbDmW0YUuZpPUV4RaTwnydekMHeK8g2nbYB+SzCxMYAcbAFASoxaNZ0ekuWr0m0JDpoQoY7Tqpk5pjJsdfSYAI/JGj1Brdcwyeqk5I/jMiYoPdyD1GlEa41parIlKDzDi7jSwVaL/5BAsQRQd4vFgwvFQ4Btly0UtLB3pj6uj3l0FXObiklI6DWYqsy6e4VWdPNPMGgC6b4jr8ubNJc07J5TtGbNN8YGE0Xg1mK71xaGAIA5XODr7mKLS5WMtPCH7CbN9aAszvQyY5U5jadHGymCY3m9rix1APHGUv8K07GmbRalTJDZyktBprDuFpyTHEbYTeGfXbCwyf6w8M1oyNSpJtPaqfAMiTVTN61IF/aJMl9wYM6FhHA2DJog0g8OiOD6poWHfBvU/tT/tXf+rm1rURzXn6HVkOEZMiRbMsZbBRlqyPAMHYLpEEyGIjwU85YgOhTRIYgOD5OhoAwP7CHgDA+cpeAMBXco+A0BZcjgIYOHDBoy6HHuL139cKQ2auu031ISVb6Szv1cV1+dc889MgzNuQ/HPdJ50/kkL3Q76pibej6BGPeUfsvm+A0CZQhAd8tQQpsVICAlIS1sSdOk7jaUh1Ssu+Gkt2Y23s/UmbKHZHzZKOKapMlJTptINot1V12EbTDXMJmeTZHV1j9KUagZt8d8M2W3J5XjAAAHz0lEQVTHBP1nJA2xxtDewK8iziwTno3eR3apEmTSKsvys8w1O3bPOahlBNh16IccX+5w088/ms7pJH9V8ZXfNGv2udLjbDo3m1BIXlSYGj9OTZh3r5MMx126svqO0ZiaDucuLOXdMWRARdmPDRAoTQC6WxoVGv5cAld9PveY0qSUUVJ349tiVkTFLV5zVdVJxHQsu/erm28kp4r1PUJ3tTv7V+suT89OLXwSt/VYfeItfi3OQbsuMz4rPKpP2kbCp9T2q00RZ9YcPvnRMjJZ3ZVHRBHNizstPmueNjhuJbZ020TieplFTeH80nf4+uOEu79Md2NNjSKuu5qXLB5x4u7zL088BNpW4suQ6Q12gMADBKC7D8DBR6tEIByzwK6x+S7hfqRMnL5hE4IbrmpUUnfnF25rvW4deKP/FmNW3Um/seZoasaNy2nzkL8bjrtmznpZJj+JGHKqh1yf0jJWje7KvCpbD4U/TCZfdxczv2vV11vksF6xMH7a4FSvlL8rvU8xnb/tXGrh9MRBlHpmrddbR/7kJsgsXy6ju+H4tWkYehiZRZ7V1H7EqLIcvcSV8Q8QeBwB6O7j+OHoH0eA3yUN46H7IJ/DS2TxlNDdYLBfM7Z6PK9KxXW/q+5mp3UFSKY3m2/VY0OGL9fd534yq7sS3RUZ4+ZrJbvFZLK6G146DdNs/c1znooi7ap/ur/LdoqkqlQ8gLe/mzg7pvmiL6+R7X4Z3Y0idh5jy+FlzubnvW2j1onD1xFLPm8Pfm4RNIUIG78KAejurzKSv0M/hA+0ucwHCi8dSuHZ6OnVrAp1l/upuouZPSTHl32Mv8s7klt1i6f25IhN4J+MaZBFIDolBlnhyftCZLRNbyRSwc2WKvpYhkxGd9nKH/3ZKANKv2i8nWNbODli2eAZUJMj0zDiKfyIO6ZfHWemiy/Oe+2XHYsWRJn13Y73Uc0Z06fBCS0Z17PeuMHhx/4gnWIddwVbIPAwAejuw3zw6WoRED5Q5kZMVspqVilV5rHT9lDdTxfTd6z+hkyJ4iqrNxgd0jwe83fDkIU5uSbpHvCy+V2h37dzkRCUVZ3cad0oCv8d81QpymemXNymeyEKUIe3U/+wIWsl8nVHxuZf+mqXx+ZVyTqRibhuGTKsjZDA8Ga+4P3V4vzRF48m5lmcObxbFjTOxJn59y6csgplhrmjVwblDxlafPh+5qny1Hd8xNjcbTK4zbujDyKtETocLbcpiiifmSS58cqf3rKGvHD0c7FmabX+e8CaJ0IAuvtEBgpmSgLBmd0wE7IUyfciGGtN73PmLspzhk2rdzoaD73Ovjs5Z8UzTMt+642uosU5v7VSg9Gp2zlwnZckfM13g/6RR6Ux7uc+LTgxGqpQBvlJrCblRk8tLVqc0dIVc9cZnPvOkVg/nGnGfbhMxlAY+C/k1Obd1N1ls9R0TfEn4XJd07sFyQ877E+uF4v51H/dtlg9xe39nvdBLyAhqaXei7CQlPT3Iqw1vU/q6YQOLCYTRXyF1fZhfzT07PfTMJp5bK3z9r43uBj0ux37jU26u2H7Q9cZJvK0Y+Pi9yJYbtKGiJfdpvKf9CYMbvjsmPvBbW84HpzYna5jk+5u2h8GLiMvIh9GJw4Ry5XczbiW2Xzwgr5I9slgfDGWfyezuaJDBgZnvHK1HAka49QK7Lgf2AKBMgSgu2Uooc2KEVjMBketRI0hqtk0WvYiwODMpkCiWbe6g+CeXCuTp/wI9Qmnx+xsdBKqDBV+chumWf/Tm96puK667Tb9G+5vqT0qRTYYvWqYhll/2Z/Raxtymgkliw/Vt6TusnITY1k/q7bVdoYzqZNyIHjiEiVm1a2uP1sE/l7N6vYnN+mG/ADu6ukXU9u1rUbzwPUvcxfsPEiGn/pu6u3VDKNmHcknkOuR/azGwra2T8tn5zR9vmbZZ8kpadmVPNsSC5Hl2xG4yfyjYNS16Brrln1KcObDds2oWd1RIJddqQ7SGmsRxJb7hB8cTo/5G4/kfvnb3ElUtlp8keW3TMq8m2C6V44dfn8bAejut3HDUStAQLp95IGgbv4KDMjTM+Had09ni/lsEvu7I//YtfdqOZUpn173YPGKEoDurujAwKxSBG7HvR0eks1Ebksdj0a/MYFrv7VsmvbGb8rp/98YELr+vQhAd78XWZz3BxG4DwYU3aXpzs43vbb9B9mJy6wWAVb265k3u8+aFc7ed/TakNkW2AMCjyEA3X0MPRy7KgRoBo5mGY3anjv4LNKAV8U42LGKBOQipbXtdtftD3le1cg/7nV2rZyK0KvYBdj0VAlAd5/qyMHuLIHwZjo46XX2GnUzkZiTbYk9IECZb5e+e9CktzrSH7O+07ZPlmbngRgIVEUAulsVSZwHBEAABEAABIoJQHeLGaEFCIAACIAACFRFALpbFUmcBwRAAARAAASKCUB3ixmhBQiAAAiAAAhURQC6WxVJnAcEQAAEQAAEiglAd4sZoQUIgAAIgAAIVEUAulsVSZwHBEAABEAABIoJQHeLGaEFCIAACIAACFRFALpbFUmcBwRAAARAAASKCUB3ixmhBQiAAAiAAAhURQC6WxVJnAcEQAAEQAAEiglAd4sZoQUIgAAIgAAIVEUAulsVSZwHBEAABEAABIoJQHeLGaEFCIAACIAACFRF4H9VIF8DJGgOYAAAAABJRU5ErkJggg==)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "awO4N921VxaK" + }, + "source": [ + "## Quantization Approaches\n", + "\n", + "Quantization can be categorized based on *when* the quantization happens: in quantization-aware training (QAT), quantization is incorporated during training, whereas in post-training quantization (PTQ), quantization is applied only after the model has been trained. This tutorial's emphasis is on quantizing pretrained models, which means it will focus on the latter.\n", + "\n", + "PTQ can be further subdivided into two approaches based on when the activations of a model are quantized. Dynamic quantization does so during model inference, while static quantization does so before inference happens.\n", + "\n", + "For all types of quantization, weights can be quantized beforehand, because weights are dependent on the model itself and not on input data. This means that information about the range of weight values is already available at the time of quantization, allowing weights to be quantized without requiring any further information.\n", + "\n", + "However, activations of models, i.e. the values after applying activation functions, are dependent on input data. This means that the range of activation values is subject to change during runtime, which motivates the different approaches to quantization." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ldPXP0keXXh6" + }, + "source": [ + "### Dynamic Quantization" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OggKu0F0Ytd8" + }, + "source": [ + "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA4AAAAGDCAYAAAB6P37QAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAEnQAABJ0Ad5mH3gAAJF7SURBVHhe7N0LXFRl/j/wT1veLfDCRTElCzUw0ZQwbAUTpYW8FKlJbtOq6SIVyhop+Qflp2joorgqK6m/pgxXjFZ0oVBMISEQCzAhjS6jqQl4G1O8UL/9P8+ZA85wUTC5OZ+3r3HmPOfh3GbOM+c757nc818BREREREREdNf7g/pMREREREREdzkGgERERERERGaCASAREREREZGZYABIRERERERkJhgAEhERERERmQkGgERERERERGaCASAREREREZGZYABIRERERERkJhgAEhERERERmQkGgERERERERGaCASAREREREZGZYABIRERERERkJhgAEhERERERmQkGgERERERERGaCASAREREREZGZYABIRERERERkJhgAEhERERERmQkGgERERERERGaCASAREREREZGZYABIRERERERkJhgAEhERERERmYl7/iuor++oS5cu4dy5c8ozUU1at26Nrl27wtLSUk0hunuUlZXhzJkzLAPpriHL7C5duqBTp05qChERtUR3LAD86aefsP/z/fj668M4c/YMysvL0blzV7Rr207NQWTq119/xblz4rPyazmsulqh98O98dRTw9C/f381B1HLodfrkZ6ejozMbBH4leL6tau4v5M12rTriHvUPEQt2a+irP7lQinKr19FJ/H93qtXTzzr/Qz69u2r5iAiopbgdweAubm54qJnP4588w0e7euM/o6Po5NlF3Ro31HNQXRz169fw/kLZ/Hdj0dwuPBLWFpaYLj7H+Hu7q7mIGq+jh07htTP0vB5ehqsHnSC7SNP4IEuPdD+ASs1B9HdpfzaZVzWl6Lk2CEU/3AAPXv2xJ9GeWDQoEFqDiIias5+VwD4wQcf4MiRb/H4gGF4TAR+RHfC9z8exdff5KBVqz9gTtBsNZWo+fnkk08Qvy0BjwwZB9vej6NtB1ZnJvPz83cHcfzwLgxw6ofpU/+sphIRUXN12wHg/PnzYdWlG54Z6Yv77mulphLdOWkZKcg99AUWLlwIa2trNZWoeVj9j3UoPFKEgaNn4f7O3dVUIvN1+PM4XDnzLUIXzEfnzp3VVCIiam5uKwAMCAjAY44ueHq4t5pC1DCKvv8GH8avx9q1a9nxADUb0avX4oefSvDE2GA1hYik0z/m4stP17HMJiJqxuo9DER8fDy6drFl8EeNwuHhR+E1cjyWL1+uphA1rZSU3fi6oJDBH1ENbB8ahEfdJiJiGctsIqLmql4B4L59+5D7VT5emjBTTSFqeE8+4YGunbsjZl2MmkLUNDIyMhC3JQ6D//S6mkJEVfV2HoX77n8Qa9b+U00hIqLmpM4BoBzT6qOPEjDmmclqClHjedZrIs6ePY/Dhw+rKUSNb8u/4vHYiL/ggS4PqilEVJP+7i/jZMkFltlERM1QnQPA/fv3o69Df3Ttws44qGn0fWQg0tM+V6eIGld2djbuua89uvUerKYQ0c106umCz9Iy1CkiImou6h4Afp4Bx74c44eajhxq5EDOAZw+fVpNIWo8aZ9/AZuHXdUpIrqV7o+44MucLJbZRETNTJ0CQFmF4+Ivv+BBO3s1hajx/eEPf8DAAS5KOyyixpb31QF078MAkKiu7vnDvXiw75Mss4mImpk6BYClpaWw6cpxrqjp2Vh1R/HpEnWKqHHIMrB9R0u0afeAmkJEddGh84M4+XOpOkVERM1BnQLAkpJSdLLsqk4RNR35OZQX40SNSX7m2t3PMpCovto/YCWuIfijHRFRc1KnAPBM6Rl06tRFnSJqOp0su+DM2TPqFFHjKBFlYDtxIdu86HEqfTW0YSMxZ3hHTBGPAI0PNmh34NQlNQv9LvpUP3Fc/ZDZ0n5zKo1HtPg8RKcWqwlNp/0DXXGOZTYRUbNStwBQFN7ywpuoqcnP4VleTFAjk3cw2t/fjALA0ymIC3wcwQtiUWoxDhMikxASnYCXnrFHyb/9EDzFB4kFV9XMd4Fz2di3JhY6dfJuoi/QIm57vjp195F3AC+cZ5lNRNSc1CkAJCKiZuJ6PhIX+SL5mDtmxR/G3KA34DbUHY6DvOA2eS1CNu/BhG5p2PZ2CPL06t+0cEU7R2JD/AWUq9N3DxHY+gcgWX8XBetERNTsMQAkImpBSlPDsa3ABt4L1sHNVk001tEV3tOC4dDrGkp/ZmBBREREphgAEhG1GDoUpKYAnWfCZUhbNa26VkNCERa9FqP6GeX5rRiFCUFYoXlYaS84ZfLIGtoLFiMzTMwLi0fp6RQkLlPbF44fjBUx8TfyXk9DnGxzqM1WE4zod2CDmLcg/ka1xvLjO24sa/jDCJ4fgswfq9yeLIg0tLc7no3k4P6GfGH/QJLYnkUbZYZwLJJ/X9M6jehzV2ND4GAEKOvqj0VhNaxLqNM21egqTqVHYoO/3MYajo2xc2nYHeWL4PFyHR0xxz8AielHUSbnKe30RmKbfL1xpDI/sUBOGNR5+y4dRWZMxTrE/i5bjaKz6jwiIqIaMAAkImopzh1C4UHx7OWKeo3KeiUbiUFuiIjehVZPL8Xc6CQEvtDP0F7wzSDkVe3k5MR6rPvr36DrNhOa6ATMeq4vTm6ZiuDl8VBCkNauGDjFBvp/p6DwuvIXlcryU7APXvAc5qxMl4vALmKKH7bl28AjNAEhkUsxrPUOrNM8X0M7xWPYvXQaCgdEKPk8/jgUT01OgmaMnKcR25KEkBH9lJw1KcsIQkhgNC44vIFXZN7IUDhe36qsK7lIzSTUb5uMXUWR1gfBC8JRYP0yZkUmYe5rk9AqXRybNyNRdEXNJp2Ox7qpPtDmtMGw18Q6ouPg3VuHXQsGY/nmfJRbuGNcdBQ8ZN4xUWJ+Elx6KH9Z9+2T7+ubg7Eu5Sr6/DlOLCMCbvemYNW8+fhWzUJERFQVA0Aiopbit6uGdnDt26KVklA3pXsjsS3XFuPWHESgZiIGDnKHi+9ahPxzE1wKYvHuv9NM29cVXYVjeEVeL7hp4hDoLwK6vdtRoASLbeE4bCYszmlRYBIwFSNvjxYYOhZOdnI6H7ujwlHkEIywTXEY5+kFx6ETMW7RfzDLS4dtb0dWCSDzUdQrArOmjFXyeXu6wKKfO+yt5bxesBfb7djTQslZXTEK9sVCP3wpNK9p4CLzimVMCF2HUZ0vIK+g4o5kfbfJSFEstBuzYa/Zg8hFwUrby4GewQhcuQluP4dj1QcVx1GPnPemIhNvIKRiHYPGYtSbCZitccXlnEQU/WYj9scZStdC1s5ivju6K7tW9+1T3tcCV0xYkoDpvuKYVazjOXtDoE5ERFQDBoBERHc1tdrokBnwGFCl2qitCBgmi3BlcwqKTIIeLwyskte6p6v4PxulFdULnbzg41CMxKzsG8HjyU+RuRfweHa8IbA5koKkIsBx/MtwaKfkUNnDbbwMICORd8j0jpvLEFe0V1/XjwU6WNkA6euRnCy2s+JuXDsvaLZ/iZDxhjuSt7NNFXTZ0eJousPzGVfTANx2IkY9Z3PjOF7KQmGyWKLfJDiarKMtHKaJ4DE6tEq6kTpvnw5FmeJ9HT4Tbk7G75VYh9dMuKhTREREVTEAJCJqKdpZwFI+l6l3AuukGHpZbdS5nyEoM9EWdg5jxbMOF0xuGVVvX9jeUvY4Uwz8ZpgWC4TLeHdgSyIK1PZvpfk7kAcNnJ0Nd+n0Px9R7kRZ/3YMhblppo+Sq5A3CUvPnlbyVuhuLYK429IWjmPXwXuQDrtl2zkvOS6iL7Rb4lF48sbO3c42GRSj9JgcV88e5SVV/k48Lvwqq6aK43hOPF3R44J4supcUy89N1f37RPbky6eHHpVf19te9WvijAREZkVBoBERC1Fx4FwGCGeU7JFqHETl1IQ5+8HbcrRWwaK1ysDuvqzenwiBiIWB7+SIYvhTqPFFJFWUUvzN8OdtH1RPogIrPIIi0ShmJdz2nSw8tb3qi9uh60X/KILsHZjHKZrZqBP+6PYHTMVEZMfR8SWfMOxuI1tqlCu3CXVQlv178QjWpsm5u24cYf0dv2O7SMiIqoLBoBERC2GDRxcvYBz65FzsOZqipI+S4vkgh0ob2eLVuJvLIaIxPwjqNrXi+zUpPSHHeLZHpa1Na27Gbtn4CYC0n2ZaSgr2oHUgzbwcLlRPdKix0DIxY5bcwmb02t5aGTV0jupLSwcxsJjWhQCYw5j884MaEYUozBmq1I98/a3yQbdHeTdyWCE1fQ36mOck8ii3qktPVf9TmL5wRAEaPyw77iaUEXdt88eVvLHgJre19JjN/+BgIiIzBoDQCKiFsTKMxQTnIqRvHgWMmuoqVh+XIsP14igzikCnsNlKGEPJ08RNB6Mxb6qbdtO78DuLSLo8HWHfWs1rV5sMHCkBkjeh+2pW6FzCITLIKPqo71d4dFZBIg746sFKaWpUxEwfjC23SSQrdDqXhl43SpfPnbPH4k50Smmdz0t+sJOqYnZFq3kPv6ObbJz0ojgTIvdqVXDKx0ylzwsArtwFMq2hx2HwtFbpKYkmvYMKvLl7FwN/X1DYd9TTotjJbYFv8rXqjpvn/pjQA3va2nWVuSor4mIiKq6d6Ggvq5VWloaHuzeG5YW8puKqGnt2/8pfH191SmihldQWIjSi/+HLnZ91ZQmdK8t+g3pj3N7wrFtUxx+OC+ih3uu4pfSQyjYvghrwlfiaGcNApeGw6mj4U869LDHvQVafLzpf/HTH7qi7X/P4KfM5dBGhCNf5P3r396AvZL3Mn7atxo5Ond4/GUYuih/rSrNwMdJaXB6NgT9lF45DVp164ir7/8NyYeLYT8pHM8/ZtTu7V579HnsPnwVHYSkvJNo3aEjrp0zbOeGtR/jt+HL8cqLzugg89ayfOmekmwkp32J1l17oPV/74dl146oXlPUFveeex//0X6IojP34N571WOSEIr3tn6Pnq9F4nknsW312KZrPySI9QIuk3zxoEi4t5sr+vwhGZtXrcaRM+3Qtv0l6IvSsHvTa4hL/RWD34iCd195768t7B55BKe3BWNz8mHca9kR/1fyFTLfnQntXkuMmrcaHj1koHwPzn0jjndBW3S2EdNtbGH5wCN13r4ODq6wPbUOm/+RjHMPdELba0eRtzkI/0w8jLZXLqOLeyCG9lY/BE2o6OBOltlERM0I7wASEbU0tmMxffP3CAkcjVbHtPgwWLYR80PiIWBgYBLWbl4LF+P+R9q5YlxUppK//LP5WBHog+iPjsD6uThEvlslb32pYwJC9o45XO1p00grp2C8qY3D6G467Fbatd3Yzoh5E2vomKY6i2FvQeN5FQeW+yIi9lOlg5Wa2E9MQuTimbA+UXFM5iC5yBajF3+JkIk3tu32t6ktHDQfq+tYj/dku7wFy1GE0dBEZ2KWp1HXK7YTMWtTEjQu15CxRmx38HxkXB+N6bEZ0AytqG9rA5cpUXD7NREbxPbGZRlu6dZ9++zhNk+8r/79cPIjP2V/d59zxkur14ktIiIiqtk9/xXU17VatCgcTw7xhH3PR9QUoqazcOlsxMXFqVNEDS9+20co/OlXOLgoI5ITUT0kxbzKMpuIqBnhHUAiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMxEiwsAc9fboO+Amh8jp0xFeNx+nPlNzWyODq1SjkXMIXWaiOh30qf6YcpwP2SWqgnN2O1vazEywzpiSlg89GoKERHR3aiF3gF8DGOmL8RbQcaP2XB/4Gt8uMwX40I/xgk1513tl+NIe28VdhWr00R0S9988436iqh50BdoEbc9X50iIiJqWC00AOwJ90n+mPqK8WM+Qtftx79mPoYzO8Pw0YFrat6715n9CzEj6hBM9nTAbBw9VAz/Aeo0EZkoLS2Fu7s71q1bh7Nnz6qpRE0lG/v8A5Csv6pOExERNay7rA1gGwwa6QtHlGDboW/VNCIiU+np6QgICEDXrl0xYcIEfPTRR+ocIiIiorvb3dcJTCcb9BBPZ34z3Bc788lU9B2wCrmnkvD284+J1y54ce1+9a7ZNXy/ZxXenuJiaEf49FOYEfUxvv9FmVnJsIyp2PljEXZGvYSRSptDF8xYvAm5F9RMlfSGZc70UfPZYNjzvnh7fRK+L1OzCDfdrl+KsGv9HGgqtmvAY/CePgcxe4oq7/bJtpDD3koSr5Iwd5TIM/djnJEzamwDqG6T8X6Kbc9S/qBCCXbONSznxKlUxISq21/LMSG6W8jgTwaBMhiUQaEMDu8G+tzV2BA4GAHDO2LK8P5YFBaCzB+NWrcVRIr0jkgsUKcrZSNR/o02W502cuUoMmN8ETxezB8/GCuiYlFk0mDuxt+WHRLr9++vrCNA44u4DJ2SozQrEus0Dyvpc/wDsLuo6p0vPU6lR1b+bcV6Cs+ps42U/RiPuPnqPk4eiQ1bsnG+ahvw29lPE3oUJQRhhbrNhvWkQV91PZfksfHDoskij8gXoPGpOZ+x0nhEDx+JbfL1xpHVtrPsxx1IXDYSc+R6hz+M4PlB2J3LOv9ERPT73PNfQX1dq0WLwvHkEE/Y93xETWk6MvB5ca0PVuzehDE2aqKRa/vnY8CsTRj9ztf4x5+slUBr2FvFGDQAcHxhDkbffxzf3++Ll55oK5b1vFjWQfQYPR+zxw/BA78cxEdrl2KX5Xz8K3Y2BrU3LNOwjONiGcUotZ2m5v0cHy5fhTQ8j39sicFoZVuuoXDD83hudQlGvzIHY57qiQeul6Bwz4fYmLAf8PsQn8zzxAOVy6xhu5yPI2aaJ1Zd8MHU6b5w726Ba2e/xq6EdfjoAPDSuv0IfcoCF3/cj6zklXh9PeC/Yg6G2vXBICdrtJEB4JSlmL25ohrocewK9cXr269iqO8cvPSnPmhzVt3PMk+ErtuEl/q1EflkAPgY5h4fgkFnSmA1aT5eGPQALubGYdXaJJwYHYOMFc+jq1xkE/OfPRkhISHqFFH9HDp0CEFBQepUzR599FFMmjRJefTr1w/x2z5C4U+/wsFljJqjeSvLCMKb8xPx0MRQuA+zR4drxShInI/EDHv4bdwDbweRSQZG/uGYEHMJ45wMf2cgAyMRlEzbg80aVyVFdqwSEH4MDk6nccF6Jib8yRXtL6UhdU0k8jADc7VRGGhh9LdOrnC4YIOB0zRw6KivXPeoib2QV9QO3r4TYYd87I4JQc4V47/XIWfZs4hOvgLHMW9hlGdftDqbjbSN4SKfFzTLP8Qoh7YyI8rF9keI7S8ZpMF4ZXkiAEt4B7nHxPaec8WshDi4WYmMdd5P2QnMw1iHTVi7aCKUzRHbk7lkJNaliHJa3R78EI/dH2hx0mUTFr49EXIVEPuS7D8McdDAz3cs7Lu0xWWRLzlai5LxCVge5AX168TU9WLoChKRGhiEfWOiECKWb9nbHaLYR+neAESEaXG9Yv9aF6Pok3Bs23sFAwM/RqCvM1qpi2nukmJeRVxcnDpFRERN7S4KAK/h4pEEvDN3Dj4qm4rYj5fC3bIi0EqCY1Aq/v3KY2pe4UgMnpu4EJiZhH8FDIEMgRSnPsbcKf74Qnxpf/bGU0p6xTK6jonB1iXPK3cYFSLv68/446vpat6y/Vg1Yw7SPDeZrgt6EYT1EUGYCCwPicBSpNS2Xde+WArNkj0YvSIVU/upidKFJLw9fCo+CkjC0ZlDlCTDMmB6LKoEgNcOLMXT01fh8SU5+MeYnmomoewgVr3ig5iuK7FnnZ/YJzUA3PUY/LVJmD2o8oig8D1PPBfVs9agu7G98JInvv76a3WKqGHJ9oKPOPRBl15PYID7ZDW1OStGzpKHEX1lE1YurghQhCsp0E4OwclXNiFkvPNtBIA7YOFlHPQIp+MRPXEqSv0zsHiyWGbF32IsZsWLAMzWkA0ntYiYHIBCh2CErQmFQzs1/VA4prwWiXFrLmGCKK/Kc8MxOzASfd4+jEAvezWTcCUb214bicQua7EyUiPWr8O+4P7Y8EsowqKCbyxP5EsMEusvEOu/AwFgWXoAZizQwqXK9lQEnx3mHcZcb5F+JBIBM8Lhoe5HhVPbfbA8xRmaZRFqgFuT6scb19OwbaIPEp3Feyi2pfJ44yqKNo7EIq0tpm9JgIedmtzMMQAkImpeWmgAqE7UpOtT8F+2CbOfMHzbVgRaN+6IGRRueAzPre6DJf9JwAtGcZGUu/4xsQ5faHMWYqiIgwzLOI634qsEZSLozBKBkeY/8/Hvz2bDUU2tiWG7qweAVberdgcRM8AHq+oVAKrb955/5b4YO5Hgi5GLoB6DigBwttjG+co2VriYNh8ur/8Hs7d8DX+TC6imIQNAC4tar6aIbkqv19fpB4Q2bdpU3gW8dLmsBd0BvIrCWCdEbLbHqHkR8B7hCquKAMlYvQPAY/DbmGG4e1hJj7woO6xID8Xi7cGwr/jb4VWCz4p0jVjmNDXIkdQAEqHfI9DTAoUxXRGx5Q2EpEbAsbWaR1W60wdzlkMEPknwuFf9u6qBolCa4oc5S3AHAkA1kE6paXvUvL+IgDRKBKQiwF0hAtwfh4XiVf+ZcOppUY+7c9WPd/nBEPwlaDW8o87Ab4jhjmclNZjGm4cRMsZ035srBoBERM1LCw0A5TAQvnCUdSkrWaCH42N43PkxdDUKdCqDpE9FkNRdTUQJdr39GF7f6YfQDb54WE2tcDFHVq20qPybGgMtVeEHnnhuedW7Y9dw8cxxfP9DMUpPfY3cr1KRtn8/vj9TNQCsul1Grulx5lQRvistxonCHOTuT8W+A0U4U68AUA3qUEv1zZry7rqxjZWqVSttWguXzubFBN022cZP3tWrzTPPPFMZ+LVrZ4icWloVUJxOQdzSWUhW24tZPOSFJ56ZBJfhXnC0U388qXcAaBRUGSnSdsSijRMRuH0TXDqrfzvCuBqlVH2ZCtkGzrciAEQNVTCNGG8vatt2QcmXdwcCwHwkjh+Gbb2DEfhnd3Qw/FGlk6k+0O4MRpgIfh1E0K1LmIro6B0wDD9oDwfvsXB3m4RBw5xhca+SWIv6He9aj2UzxgCQiKh5uYuGgfDD6CdMgz8TVb6Arym9qcQhXASSmiqP19fvF/OScMKkk5SbaYM26vJP7FkIzdM94fL0U3hRLOudj1Jx0dIH7o8b5ldT9cLgt+PYtdwXw1z6YNg4H7E9C/HRF3o88LQnalvEbfvtivqCyLw9/vjjeOedd6DT6fDJJ5/glVdeqQz+WiRbL/hFF2DtxjhM18xAn/ZHsTtmKiImP46ILfkoV7PdOZZoVeWO3R33W2MPkyDWJzueORiJ6EAfRFR5aHfKPEdQqkR8bWHvG4eVO79ESGgUvL174XLWamxYMAwBUwKQc4cH0L+uPhMREd2Ou68X0DqxxsP9rMWzrO5YrIybV9PD9G7XcZSeV19W0qP02NdA177oIW+vHYnB3DkxuDb+Q+zJOK4sY8/mBCwJmorRJtWmalcY54/XP7iKCetykJMjtyMH/1q/Em/5PXvTKqY1s4CVvLu46xC+q+g+1MiJHw6K/5+ClaVhmsic9OjRA3/7299w4MABfPnllwgODkavXr3UuXeDtrBwGAuPaVEIjDmMzTszoBlRjMKYrSgyiiAuX6kSWF2/isvqS1PHcKGGXo8vy6EUHexh2dGQcvssYCnbDO7Nw8kaIpzSY7K3TndYyPLK2h4u4qngB0PPosb0P+epr0zVfT8r2KO7l3iS1VnTL2FzjY8qd+gs+sLRcwb85iUhcvsZrF36BuxParE7q/p23kx7S1m1cwd0x2oIek8eQZF4cuzcDBpkExFRi2SmASDw8AA/dEUcPvzkuJpS4Th2vv0Yhj2/FFlGwzYAX2PnnoOVwzAoTu3BR/Hii/jlkUpwdubHHOSK56F/9ESP+41uRf6yH2l75ItiXLzpcAol+D7fEJS5yx5EjRZx8cBu7JIvSi/gopIi3KtmqLWb8TYY9NRssZ8x+HBXlf0sO4iPtu4HBvhgUJU2kER3u//85z/46aefsGLFCri4yFDibpKP3fNHYk50iumdPhGc2CmdsrQ13K1Tg6jCoqMysZI+KxEZ6mtT+chMzzZd5vGtSN0uQiVPdxEu/V5t4TA0WISBq7F7b5WA6Uo29m1PA5zGoY/s+MTqCTgPFdu+/X0UmVRk0CF31w71tare+1nBBvaDRASYvh6ZBVUCMdnZjH9HzJmvxSkxWZYRggiNL3abFLMiALfvq/T+2eGmd0fbAp3F06+GKanVAC+ME2nJOyuqlFa4iqJPY1EIV7j0bxnt/4iIqPkx2wCwzRNBWBPQEzvf8oFm0SbsOrAfWXvi8M5cX8zdCTz56ksYWqXf7sL1Im+oIW/a9oXQTPHHrgHzETrR0ItnVydPyJZFMX+fg02fpCLrQCp2vjcfMyb7YtsZecdRBIAmQWVV1iJg8xTPq/BOaAx27hfbtP9jbFr8Ep6bHocSeZfxwsXK6j9dbfuK/5Owc2eSWFfRjcDQSJsnXkLY+J7Y9faN/Uz7ZBVen+iDmDNPYXbwS9XaQBLdzYYPHw4fHx916m7kDIdBQGnCLCxfvhqZWWkozN2BfWtewtotgMNr46BUSFCDKF3MS4jeskPkSUGm1g8Lo/JhXWONBWe0OjhNLDMWOblpyEkIQMQbQchzCoVG9ip6B7Qa9DJe8bZHzpKRiFDXk5caieipI5F41h0TXnsZhibT9vAI2gS3s5FYFBSA3eliH9NjsSFwJLZ9W+XOWL338wYr779jlpcO2/yHIFobjzxle1Zjwzw/bCtwhceUScr2tO/vDjt9CrRv+SEuWa5Dzbc4AIWdZ8B96M2CNVtYycOXuRX7xHulKxXBZmt3eARqYLV3KhYGqvuXFY/EsCFYpD0NxxkR8HjI8NdERET1ZbYBoHJ3bGYckldOQ4+fNmGRbP83ZyVy4YnQDXuw4k/Vb4vNXrcfL1nuwTsi74wNOegxaROSY26MF4ieflixZSVe6pSDjW+9BM30+fjwcBuMjvgan62ahq7IwffHaqiLaaTH+HX4d/hUPHB4HebOEtu0bCNy2/tgxWcHsWaSCCK/OnqjOueA5xH7iie+XztVrOtDFNa46J4YHZ5q2M+iGLwut335R7g2dCm08QnwH1Bbo0kiaqnsJyYhcvFMWJ/Q4sNg2WZtDpKLbDF68ZcImVgRrIkgKvRLzJrcFye3+ok8f8Pun4dilnYjRlWOdWOsF0bNT4BHxxTEBfog+oMsWD4Xh0jjYRh+N3u4zMswbPsPq5W2dyvWbEW5SxRCNiVhnJNRj5i2EzFLpPn1OoLkBWIfo9bjQr+leHveTDVDhfrupzF7uM3LREjgaCArHCuU7dGipNtMBGo/vrE9Fl7QaPdA4wIUfSDXIfJtTMRlhyiEVY5xWBsbuEyJgtuvidgg3qu4rNNKqtWItViijcPobur+Bc9HxvXR0ESL7Zni2mLGACQiouanxfUC2hQMvW3WZ8gGakjsBZQaW4vrBZSoGWEvoEREzYsZ3wEkIiIiIiIyLwwAiYiIiIiIzAQDQCIiIiIiIjPBALAOuv5pUw3jAhIREREREbUsDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiojujIBJThndEYoE63VhK4xEt1hudWqwmtAxF2o7ieEWiSJ0mIiJqDAwAiYio/i7pkLclEjml6jQRERG1CAwAiYio3vRZIVgRk4dydVrhFIzN6Zcwzkmdppty0FwSxysYDuo0ERFRY2AASEREREREZCYYABIRNTP63Fhog/sr7ekCNFORmKVDaaqfSXsxvTLth8yqVTBrbA+nx6n0SGwIGok5Yp5huT7YoN2BU1fULFJFG75DeuiSg7BC87AyPcc/AMm5N5Yn264FhO8Qr3Zgna9YXli8WINg0gYwG4nqump+mG57+fEdSFxWsX0PI3h+CDJ/VJZq6tJRZMb4Ini8zNcfi5atRtFZdd7NnNQiQiy7xnaCRauxQMzTZlSs76rhePkb3oMp4wdjRUw8Tl1SZyuKkRkm5mmzUbo3wLA9k0di28GrytyyH+MRF2a0P4Gmx1CqsQ3guTTsjqrYP8OxT0w/ijJ1tqKO71MFfe5qbAgcjABlW8QxC6vl2BIRkVlgAEhE1IyUpk5FSGAQ8tq9jFmRSXj1z/2gix6JdQnH1Bz1dRW6zc8jeMH7uOzwMvyikxASuQk+/YHcjX5YEptiGlwIeTHPY3nKNThP24SQxVFwav0p4gKfF4GdIbixG5GEQI27eOWOcYvE8ia7o70yx1g/uMh1VX3Mm4HuYq6F13g4WBlylouAJmKKH7bl28AjNEFs31IMay2CS82NdSquiKDyzcFYl3IVff4cJ5YXAbd7U7Bq3nx8q2aplZ073IYCObs+RdWYWXdwK3Sdg+HiYiGmrorAzEccr3AUWBveg7mvTUKr9KkIflMEa8YBs5QVgnXxbeA9TxyTFybByaGtElAuF4F7UbtxmCD+PiR6JUZ1OyKOoZtRkFmdchym+kCb0wbDXhPHIToO3r112LVgMJZHpxiCbCO3ep+ksowg8XmKxgWHN/CKPP6RoXC8vlU5tsnsfYaIyCwxACQiai6up2HfmnjoR4gL+kXBImBxx0DPYASuXAqrn/PVTPUkgqacjGLY+3+IQH8NXAa5w3HoRHi/GYdXvAF9Qj5OqlkrFFnPxMLotRg1XOQdPgPTw1fCA/nYdtCwDe17uqNPLxksWcCuv8jTzwatlDnGLNBdrsv40a8tinbG4pRTKGYHTYQh/svH7qhwFDkEI2xTHMZ5einbN27RfzDLS4dtb0ei8LqSEaV7I7GtwBUTliRguu9YscyxGPVmAmY/Z18tOKrOHoNGjxUB2w4UmOxwNvK35sPC2x0OrcVkUSy0G7Nhr9mDSJP3YBPcfg7Hqg/STNs9imDLJSgKHiKfi+8MOIrDosuORhGC4TfvDeXvle2ctw6aIe2gO5RVLeA20CFDG47CLkbHQdm/JCx8eyyKEmZhV65RMCzc6n2SdykL9sVCP3wpNK/deO8nhK7DqM4XkFdwm58pIiJq0RgAEhE1F0fSkHgO8B4zVg2OVLYTMeo5G3Wintq5Y0LMYSye7KwmVLCAZTf1ZRUubk+Yrt+iF7rLnkpOFNch0KqNDplRftj280TMCguGQzs1+UgKkooAx/Ev30hT2MNt/ExYnItE3iEZ+OhQlJkCDJ8JN6e2hiyKtnDwmgkXdepmLFxeFAFSCjK/0qkpQHluCnadc4bPCHcliJXBmw7u8HzG1TSoVd8D/eYUFKkBqcE4OFTpxaVD54Hify0SRd5T+oqgrS9GRR1GmL9XDXdLhZNpyMyq6TgAViNmwlsEc4lZ2SbB563fJwt0sBKfm/T1SE7ORmnF3ct2XtBs/xIh46t+JoiIyBwwACQiaib0JUfE/+7o3s04wDGwtnNVX92uqyg7dxRFuWnISV6NuGU+ePff6qyq7q0SgcAWlj3E03XTO1B1J6tVTsO6FHtMWLIObrZqsqD/+YgSrFj/dgyFYttMHiVXYSfmlZ49Lf4vRmm6eHLoZRr0SLa9RLhYBxaj4TYZKNy+QwR5ktiuHC30DpPgqARxYh3HZBs6e5SXVNkW8bjwaz8xT4cLIkivNNwelurLClYjQqEZ0Q55sb4IHtNVaRu4IUaLnKIa2h9WuFCMQvHk1LuGPWndD/YjxPPpYtO7h7d8n9rCcew6eA/SYbdsX+kl2376QrslHoUnbz+UJyKilo0BIBHRXa40PQQR47tixvjBWBTog7idKSizGIeBjXQDqDR1FlZt1MEtdCPGmdy9E34zBCv7onwQIbbN5BEWqQRFOSLwuTPawmFoMCyKtqJQtn+7no28zcUY6Du2MoAsV+7uaaGtui3iEa1NE/N2iIBU5lHdK+Iz9WWlds4YtegwYjcnYVbgG/CwvoDcLQGInvYw5izbcRt3Ua+I46S+rC9bL/hFF2DtxjhM18xAn/ZHsTtmKiImP46ILfmm1VmJiMgsMAAkImomLHoMhAXScOrn6nfazp+rqRMYPa6bVEcUrl/BZfWlomg11i1YjXLvBKxMPqOM07cyJgnT/WfApbeapwHJTm0WhsfDelocXvWsfnfLsM/AuDVyTLxaHhp599MeVvIuWP6Rap24oPSYekfv1loN8sLozvnIPJiPspxEJMMLQ5wrtssG3R1kVdtghNW0HeqjruMcyraSbr4RmB79Jdamfo+5k51RmrweuVUbXUqWNnAUTwU/1LAn18X+ybuftjY1Vx+9pbawcBgLj2lRCIw5jM07M6AZUYzCmK1VqrMSEZE5YABIRNRc9POCjwOQvHOHaZBzJQ2ZcaYddlh0k+3M0vDd96b3k3Tp7yt3zSroj2Upwww4DvWCVUeju2+X0pAngwqcxmWT4Q3q6F51WTe5MyV7tVwngj94bcIsEcRV7yhG6O0Kj87Avp3x1QI7GTwGjB+sDq1gAwdXL+BgLPYpbQJvKM3aihz19a25wnmSM3SpW7E9Oxbw1uAJWc9UZeekEQGpFrtTqwZiOmQueRgBmnAUVu0J1IQeeTE+CA6OxSk1RdHaBnY9ZGVRC7S+15BkQu2ltHD7+9V6Gi3du14EqjYYNWhgzcewVvnYPX8k5kSnmN7ps+gLO6Uablu0qnb7koiI7nYMAImImg1njAoKhcPeqVgYGIJ9WWkoTF+N6KlTkaHmqOTginEycIryQ1xyCgpzd2D3ch8sF0GdcZ8kFo96QYaKiTEBSE6V+VKQuSUIK171wb6z8m7XaZTdNKCpmYW1bA+3A5kpO8Qyq4xTJ5XuwLtvh4vg0wujPWxQWqU9nXzoSkUg19od45eEwjpF7nMAdqfLeTuwL8YPETJ4dHkLHkMMwaaV998xy+s0El/zwYYEuV7DPi/cmKfcRawr++Ez4Fi0GsnbAQ830yEsWg0Kxuxp9sgMH4mI5bHIkduarkVc2LNYlwI4/fllOFZtemfCAn0G9MXlrCAsDxPvobI/8pgHYN3yNFiM12CgURvIG+wxTBMKx7ORWDTVD4nKe6Xu35IdYt9XwntoffZScobDIPFWJMzC8uWrkSk/T/LYrnkJa7eIz8lr40w+K0REZB7qFAC2b9ceV67W3HE1UWOSn8N24vNI1Jg6dOiA8usmFSsbTCunYIRsjsPoLinYFuyDiJg0WE1LxqvPqRkqiMBpwqYk+A29ipxlvohYtAj5HTV4OypCCfgq2WkwK3YtRllmISlc5Av8G3YfaYshC77HqiUzRbiSjZMnqlc5vSWniZg72QsnN4pALVALXdWqhCVHkKl0liL2Y3719nTykZhvuHsp9/lNrdjnbjrsVtoCigDoEDAwMAkR8yqGi5Ds4TYvEyH+/XDyI7neOdh9zhkvrV6H0WqOOlHvtgFymIaqQVVbOGg+RuTimbA+sR7vyW1dsFwEsqOhic7ErBqqsVbVflgUlq+JEu9DNhIXyP3xxbb087AP3IPlQbX0Aioo7714TzUu15CxRr5Xfkj+wR6jF3+JJfOq9AxbR/YTk9R90eJD+XkSxyy5yFZZZsjEhm8EWn6tDG1ZZhMRNSv3/FdQX9fqfze9JzK2g9sTsgEGUdM5dfonfLJ7G96JXKamEDW8gwcP4sOP92DIn15TUxpfkbYjFm0MRVh6MO/aUIuhLz2G7794H39fzjKbiKi5qNMdQCvrrjh/3rjbM6Kmcf7CWXTp2lWdImocVlZWuPpLta5HiOgWyi6WomvX27l3SUREDaVOAaC1tTX0F40HPiJqGjIAtLHmxQQ1LhkAXtYzACSqr7KLZ9DNlmU2EVFzUqcAsHfv3vjpxI+4dv2amkLUNIq+L8Cjjo+qU0SNo3379ujc1RpnThj3r0lEt3Lup3w4Pio7DCIiouaiTgFg165d4er6JA4XfqWmEDW+08Un8V/8H5544gk1hajxuD3pilNF2epU43PQyDHo2P6PWo6LZ47jvj/8xjKbiKiZqVMAKLl7/BHfHM1Vp4ga3zff5mH48KfUKaLG5fbkUJz8NhtXfmF7aKK6KPkhB0+7s8wmImpu6hwA9u3bFw9Y3o/M7M/UFKLG8+13BSIAPIRhw4apKUSNq0ePHnh88GAUHdyuphBRbUqOHcKZ43mizHZTU4iIqLmocwAoTZs2FVkH03Dk26/VFKKG99tvvyIxeQumT58GC4v6DoRMdOcEzPIHrpSi4PM4NYWIqvo/UWZ/ve9/MfPVqSyziYiaoXoFgJ06dcKcObPxr4SNagpRw4vZ+A4mTZqI/v37qylETaNNmzaYFxwE3eG9KDnOH8KIarI/PgwvTpzAMpuIqJmqVwAoyaqgvr6+WLh0No6f+FFNJbrzrlwtQ+x7f0e37rbw9PRUU4malvwhLDw8HDlJq/HTN/vVVCKS7WPTt4Sg14PdMHr0KDWViIiam3v+K6iv6+Xo0aNYtWoVXAe7w831aTWV6M74QXcU/975IYa7/xEvvfSSmkrUfFy+fBnLlq9C+b2WeMzjFTWVyDydOJqJr/dpMenFFzHm2WfVVCIiao5uOwCUzp8/j40bN+Hc2fN45CFHDBk0DG3btlPnEtVf3tcH8O13X6Ps6iU8//xzGDJkiDqHqHna9N77OPhlLjr3cEa3R4bA0vohdQ7R3e3X8qs4VZSD0h9z0NmiHaZMfgH29vbqXCIiaq5+VwBYQd4NTNv3ObKzv8CDPR6CxQOd0alTF3Sy7IJ2bduruYhM/fprOc5fOIvz58Xj4lnodEV49FEnDHd/iuNGUYty+vRp7E3bjz17UtHRwhpt7rdBewsrdLC0Reu2HdVcRC3b/4ky+7K+BGUXS3H98hlcPPsTHnroYTz7J0+29yMiakHuSABY4cyZM/jxxx/F81mUlpaipKREqSZFVJNWrVrDxtoa1tZW6NK1C2xtbdG7d291LlHLU15ejq+//lop/06dLkVxsbhYLmMZ2JCO6XRISzMMT/SyZqryTA3jvvtaoWtXK9jadIW1VRfY2NjAwcFBnUtERC3FHQ0AiYiIGtO+ffswYsQIeHh4YO/evWoqERER1abevYASERERERFRy8QAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMBANAIiIiIiIiM8EAkIiIiIiIyEwwACQiIiIiIjITDACJiIiIiIjMxD3/FdTX1IA+/vhj+Pr6qlNERHQneXh4YO/eveoUERER1YZ3ABvJ119/rb4iIqI77f7771dfERER0c3wDmAjWbRoERYuXKj8Sh0WFqamEhHRneDk5AQrKyt1ioiIiGrDALCRGAeArKZERERERERNgVVAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBAJCIiIiIiMhMMAAkIiIiIiIyEwwAiYiIiIiIzAQDQCIiIiIiIjPBALCBFBQUmDxKS0uV9MuXL1ebR0RERERE1Bju+a+gvqY7zMbGBiUlJepUzb766isMGjRInSIiIiIiImo4vAPYgF588UX1Vc2eeOIJBn9ERERERNRoGAA2oEmTJqmvanar+URERERERHcSq4A2sAEDBuDrr79Wp0ydOHECdnZ26hQREREREVHD4h3ABlbbXb5nn32WwR8RERERETUqBoANrLYAkNU/iYiIiIiosbEKaCPw9PTEnj171CmgQ4cOOH/+PFq1aqWmEBERERERNTzeAWwEVe/2yWkGf0RERERE1Nh4B7AR/PLLL7CwsEDFoU5JScHo0aOV10RERERERI2FdwAbwf333185JqC9vT2DPyIiIiIiahIMABtJRTXQWw0OT0RERERE1FBYBbQRdevWDZ9++imcnZ3VFCIiIiIiosZzxwPAixcv4ty5czh79ixKS0tx+fJldQ797//+L/7yl7+oU9S6dWtYW1ujc+fO6NKli/LcUpSXl+PMmTPKQ37OZa+uRHcLeV5aWVmha9euyuNuwu8ooqbRvn17pVyR3/fy8cADD6hz7l7G1wklJSVqKrVk8vux4jMsr1tbaqeOdyQAlBfD+/fvx/7PM/Dd90Xo3KkrOll2gaVFF7Rt007NRT/8+B16P/SIOkW//laOC/pzOH/hrAigzqCTOJGGuQ3DH4c/pZxYzdEPP/yAlNR9+HxfKjrcb4n2D1ijrXi069BygleiW7mkP42rv5Tgsr4UrVu1xkhPTzzt8ccWGwzK76i9aRn4fH8Gjv3wLTpaWolz1wpt7++K+1p1UHMRUUP69fplXBHlStnFM6JsOQO7B3vBx/sZPDXMTc1xd5AB3+7PPse+z1Jxvfw6OlrI8kZcJzxgq+agluzq5VJcEZ/hsouluAf/B9ehbhjh/hR69+6t5mgZfncAmJCQgM8/34+unW0xwGkI+vV5TJ1DVD8nfz6OL/O+QNF3BXjC9Qm8+OIkZczE5qCwsBCJ/0nBTz+dRJdezrBzcMX9ne3UuUR3rzMnv8HP3+XgzPFDGDTocWj+PBkdO3ZU5zZ/8jtKBn+t7++GHn3dYPPQQHUOETWlU6JcKf7hAP5Q/gt8/uQJd3d3dU7LpNfrsTU+AdkHDqBrzwHo/sgT6NrjUXUu3Y2uXr6AY4f34uxPeej1oB3GjfFG37591bnN220HgFevXkVk5HJ0srDCE4OGo1Onu6uaEDWd8vLrOPBVOnLzszAnaA4eeughdU7TiN/2EZKTP8FjI/4C24cGqalE5uXX8qvKF93PR9Pxtzlv4JFHmndtBvkdtfSd5bivgy1sHx2l3PEjoubnQokO3x+Ix0O97PD6rBlqasvy9ddf45/r30XP/iNg6zAc97Vuq84hc1H845fI/+x/MX3aVPzxj39UU5uv2woAv/32WyxcuBDPPesH58eeUFOJ7qyi7wvxYXwspk+fjqefflpNbVzvLI+C7vgpDPYORLuOrOZJdP70d8jdFYMXJ03EKM+mOS9vpeI7asAIDR7s95SaSkTN2XcHE6E7lIqwsFBlyKyWIikpCR9++CGGjpuLLt1bxt0fahiyWmjerrUY9Fhf5dq1Oat3AFhWVqbs1MuTZ6G3fR81lahhXLt2FUuj5iEkJAT9+/dXUxtH7Ib3cKigCE8+H6KmEFGFg/9ZjhfGjcYwt+bVfkd27vL666/DdWwQutqx+hVRS3Lmp0Jk/2cl3n333WbTBORm8vPz8c4778Br+j9wXyve9SODA4kRGP7k4/D19VVTmp96jwMYHb0af3zSk8EfNYo2bdpi6pQ3EBER0ag9aO1O3YOs7GwGf0S1GPLsm9C+H4dDhw6pKU3vypUrWBi+GI88/icGf0QtUNcHHZXz9+9Rq9SU5kvWNJDB35Pjgxn8kYknxoXg012pyMrKUlOan3oFgP/a8i+0vq89Rno8q6YQNbyeD/bG6KfHISbmn2pKw/riiy/w/vvvw/npaWoKEdXEedRfsWZtDI4fP66mNK11MTGwtHkEfV2fV1OIqKWR52/5vZbQvv+BmtL8nDhxAv9YE4NHn5yAzt0c1FSiG/p7TEPsuxtw7NgxNaV5qXMAePr0aWQfyMGYZ15UU4gaj5vrCDzQsRMyMjLUlIbzn+QUOLpNYu9dRLdgaf0QHh78LPbuS1NTms6BAwdw/KfT6PfHV9QUImqp5Hl88Ms85dqzOZJDn1naOqD3wNFqCpEpeQ3pOPQ5fLY3XU1pXuocAKbv+xxO/R5Xp4gaX79HnJGetl+dahi5ubk4f+EX9Oh3d41LRNRQbHq7iIuhTKUL9KaU8cUBdLEfok4RUUtn1dsVqXsb9jv/drG8obro+pALMjIzmvz7sSZ1CgDlILqZX2Si/6PsAp+azsO9++HUqVM4evSomnLnpaVnwNbhSfzh3lZqChHdTKs2HWBj/xgyMzPVlKaR+2UOuj3CXqmJ7hbdHn4CWeLaU16DNidyXOBff/svrB50UlOIaia/H617Dmjy78ea1CkAPHz4MDp2fACdOdYfNbEB/Qcrd+kaguzh9mBOtjJYNBHVnfXDQ5H5RbY61fhkT3xW3XtzrD+iu0h7Cyu0aW+hXIM2J3n5h2DT21WdIro5W4ehyMhsuu/H2tQpACwpKYXF/RwDjZqepUUXnP65WJ26s2T38a3btlO+cIio7mTgVVp6Rp1qfCVi3fe15w+URHcbeV6XlpaqU83DiVMlosxjeUN1o3w/nmm678fa1CkALBUBYKdOXdQpoqbTybJLg51IZ8RyW7Vpr04RUV2169gZl37R49q1a2pK4youlhdkvPtHdLeR5/Xp4uYVAJ49W8ryhupMfj9ebsLvx9rUKQCUF8adLPlrBzU9+Tk8e7ZhAkB5B7BV60YKAPX5yNkShGj//pgyvKN49MeisCAkpx9FmZqluSvSyu2ORJE63Vj0qX5ivX7IbF7XBNWVxiNavLfRqfW/Y91i9tHIA52slO+KplBc0rwvyMrSAwznebAW9X5Lq32OspEol6VtflWKzMrvOL+bq6Yq029GntfyDn9zcl5cgzAApPpoyu/H2tQpANRf1OP+jg+oU0RNR34OL4rPY0OQvTTd16qNOtVwSjOCsGjMMETvOAbL4aGYG52EkMhQuFgcw+4Fg/FmYDgKm1uHUeeysW9NLHTqJFFVHTpaNllPZ3K9bTo016rbOhz4jxb2Tq6wyNqBgpNqcnN2SYe8LZHIaUE/QLQsepRmrUZi+t0TPDYUeV5fvHhRnWoeZG2H5lveUHPUTnxemltPoHUKAInozigviMS6+bEo8dqElZsToJk8EQMHucNx6ER4ByUgcvNaOB2LRMS8SBRdUf+oGSjaORIb4i/AuC82B80lbE4PBofAJbqJk2k4mAU4Tg6GR+cUpKbnqzNulyvGpYtzT9NwnVDos0KwIibP5HynKqwmIlC8D4GeNmpCPZSmIC44BLrr6jQRUSNjAEjUaPKxe004ihyCMTtoIqzuVZONtOqpwasL3oBFQTjidv7eC0Uiamq69Fjk4Q0MHOoOJ28b6LYmopAX/kRE1IQYABI1loIUJBUAA31fhkM7Na0GrYbMwIQhQFFKmlrlshiZYR0xJSweVSsQ1NhW7NJR5GgDEFHZvvBhBAcGIDH9qMkv+pV/e1qHPJF/0WQ17/wQZP5YsSbDuhdtlK/Dscio7ZFJe5GCSHVdtTxMtv0qTqVHYkPF9o0fjBUx8Th1SZ1tpOzHeMTNH4wAmW/ySGzYko3zv6kzb+rGMTt1vMoyEvKV41B2RAttsGEbAjS+iMuoXsG17McdSFw2EnPk3yrHJgi7c2uotiWOeWaML4LHy3z9sWjZahSdVedVqr3tVt3a3tTxuP1WjMIt4v3XPGzIJ/Y5upbjSw0tH4Wp+bCY4gWH1m3hOCIQ9ucikZlVS1Wg2/oc1a98UM6pMKPPtCgbko0+0/KzGBC+Q7zagXW+NS/XQF2vspyaH4mivKukz8buqIp964g5/qbrVSjliNje49lIVs5NsX1hWpxSZ9f5fKxKLZ8SD+lRtGWqug3i78MjkXP8qprJyLm0atsqy0+T9tk3aZuplC0m5VYa9BXlltwW36nIES9zwuU5equ2vnrDeR9Usd+yvPLBBu0OnDKqJVK38vwGfe7qyrJEKf9Sj+KyOo+I7n4MAIkaSekP4iIANnDoba+m1MYeds7iqSgLp04bUursej4S3xyM6E/Pw35MBEKikzA3NBB98Cm2LfBGXLULz2PYvWgktD/0wqhAmXcm7HSrsU7jr16UWMBpchI0Y+RrDTSyveKIfnLCVI9xyrqqPqaP7ytm2sDtj0+IJUlXxQWmD4IXhKPA+mXMihTrfG0SWqWLi7I3Tau9yuqyyzVTkVE2FOMXi+X5TwKy/LA8ph6dX5xYj3ff0qJsSBgCouOgEcd1X/TzeHdNEJYvSoHl6L8jJHIthlnmIXn+NCQbRWClewPwtsYPu37uB2+5/silGNZ6F7SBblihBpGKK+LCTxzzdSlX0efPcWK/I+B2bwpWzZuPb9Usv19dj5seedFuiNh6GvaTVirvwdxp7ihPkflWsw1nIyvPTURSkQ1GD3NHK5ng4A43B/EZzEyr3tlTY3yOilYr51RRu3GYID5DIdErMarbEcSJz7Q2w1A22I1IQqDGXbxyx7hFIs9kd9TcNZahbKh6zocsDcVAOdspFI69lYzA6XhRpohyJr1i3+Lg3VuHJLHedalVP5WiTFo6DYUDRPklzjmPPw5Ed5Fa5/PxJvJinseqrHYYFST+fvFb6PNzOKKn+CDxyI0gUJY7EVN9oM1pg2GvJVRu664Fg7E8OqWWYNhIVgiWz4tHeUWZ83hb5Mb4YPnmbMM2yrJyUTAcxUtHjTwWb8Gp1uZkV6Hb/Lw479/HZYeX4SePb+Qm+PQHcjf6YUlsSpXP0a3Kc4PS1KkICQypLEte/bMrSjd6491/qxmI6K7HAJCokVw4lyb+d4VVHUZUsbYbK/7fgdJqv/7fXPmhRORdcIZfeBz8vMfCcZA7Bnq+genhK+GBYuw+ekTNWUFcOA2JQ6S4IHEbKvMGIzA8QoSgO5CTL3/ZbguLfu6wt5Z5e8FetlfsWcPVikVfZV3GD4fW2UjbfhQO0+Lwqqca9BbFQrsxG/aaPabrXLkJbuJibNUHaeqFnA4Z2nAUiYvI2cvWYtRwsczhMzB9WRxGd6vDL/4Viq7CcX4CpvvKYzFWXPiFwlveuYg/jVEr4zDO0wuOQzXwm78ULshG0Q/qxej1NBEoalE6YhMWRqvrHzpRXBAfRJjGVgRZ4chQO/Mo3RuJbQWumLDEaD1vJmD2c/a3vlisq7oeN/EZy9xeDBf/vxu9/6EInDcDFmUpKGpO3fvd9UTQnqOF3iEQzk5qEpzh4usFJGtxoEpnMI3xOdJlR6MIwfCb94byGVLWMW8dNEPaQXcoSwkm2vd0R59e8hy3gF1/kaefjSF4rcZQNpie971wYd965HWeiFlhwWpNBz1y3puKzHNjMeufSab7Ns0emeF/wz6TY5GPol4RmDVF5JNtoz2d63U+3kwRvEzLk6g9GOeQjW2btqq9sxrKncIuwQjbpJYPyrYmYeHbY1GUMAu7cmu4Y2iswAajTPYzDq94iyVvVGt0yLKyfz90EC879HIVeZxh0VrOqMGVbORkFMPe/0ME+mvgIo+xPCbqMvUi8DXd7VuV54I8lmviAa9NSiB6oyxZisqPKRHd9RgAEt1FWg0JRdiWDHhX7ZnFwga1dVrt7OJqeoHXra/Sscu3p+t7+9HI6Xi8+3a40tnNLM2N5csLUB3c4flMlXXaTsSo52yg3yyCFNk+6vQB5GcBLuMnmlaXbecKt/EyOK4rLwwc0FZ9LbTuB/sR4nn4eDjYGpIUtr2Uuww5pw0XSeWHUpB4DvAeM7bKcWsLh2dmwBEpyPxKXs7pUJSZIpY3E25ORuuR+bxmiqDyzqjzcetoAavOYj+2r8a+Ah3K1WpnrYZGYa02CaPYY0/j0e9C5uZiOIrPq/E9fyvnsRgoPj+mncE0zueoQ2d5b06LRPF5OaWvCGT6YlTUYYT5e9Vyp6+u5F3qaViXYi+C2HVwqzi/zqUhJ1k8Txb7YXzOyX3zlvtWcS7d4DLE1WRb6n4+3pyH2Iaq5YnHeHcgSwRnYvmyw55M2WHP+OrV9K1GzFR+PErMUu/k1aZq2SICaaveskrHEVyQ66iPdu6YEHMYiyfLvzdmActu6ssqblmeH0lTjqXHmImmx1KWJUpNDyIyBwwAiRqJZWdZrSq7Tnf1Sk7KNjjiYqcOdwtrdF0P/fFsFObuwL4tIdgQOAu71FlVta7aGU1HS1iKJ/1vt/iluzayKtuiqcjsFmro7EZNlm2GSo/JAMse5SVpYttMHxd+lVVLdYaLpLM6pY2MfY/q1WWteigVzOrI+GLaiNjnqj+6G0+XXZAXk+LCvVcNf2/XT/mlvPCc3BexT+niyaFX9QBbBJW3quxbN/U4bq3d4fHmG3D8OVZp3/OXEYZ2UsmpaShlG8BGVZafgn3iuXC52maz4jE5AHki3bQzmMb4HMkgJhSaEe2QF+uL4DFdDe3TYrTIKVLvDv0OpamzsGqjDm6hGzHOOIgtOQLZFNCxtR5FVT67hScuKHfCCktM19/d2rRnzbqfjzdn1aV67YUOnWU19Xic+lk8XShGoXhyqqmafsWPR6eLbz5Waw1lS6fOvcT/Vyt/kKm/qyg7d1Q5fjnJqxG3zKfW6pq3Ks/14v2Qx9KuhgDSuld9flwjopaMASBRI7Hq7Q4LcaFXWc2wVjqclDcHOg+ElckvyXXwmwic1vggwNMOAVNGIiIwBGk5erQf7oU+apaGpUNmlB+2/WxcBeyGcuWCVwttoI/YNtNHtFZWka1/tdem0NidONbnuFkNi0BI/PdYvHgtxo13RQddLOLCfTBnig+Sjdo6UUMyjP2HoaEIrNpGTj6CNMC5SOQdauT3o50zRi06jNjNSZgV+AY8rC8gd0sAoqc9jDnLdtx2VVNleJvweFgbV/euIIIeudxCrV+1z25EYJASJOP4MZN1VwtibuH3n48i4LzlOq8o+9LYStNDEDG+K2aMH4xF4pjF7UxBmcU4DKx6U5CIqB4YABI1Ficv+DgBeQnvVxnjT4d9YYZe2OQvy+WH3kfqQcDBz8t0jL1frlS70Ll+xfSSTZcwDdHxV+ERKS7yUuU4fYcRFrUWfr7j7thdhNqJ4G/JyOpVwCrZoLuD/GU/GGFyHLNaHuPkz/nW9kq1t4IagmX9z/L+ScNqbymP1g7ojtVwgX7yiNJbp2NnuS/2sJJ3BfKPqG2IjJQeE0ekBmVXq1QhE9M3vaVQj+NWobUN7IdrMCEoDmFbLiF2YxRczqUhbu8tqq/RnaGO/TfQY6Kh3VbVh/dEeItsyam71LtJt/E5qqoO5UMF2c7PzTcC06O/xNrU7zF3sjNKk9cjtw7t6KpSOk3xr17du1K3fnATTy5vH67xc6s8Fk1Erf2gCHU/H2/uwoXqx6PkpOxUyh3dZTtnSxulc5aayh1cF++DvEtra/M7q8rWQ9FqrFuwGuXeCViZfEY5VitjkjDdfwZcKjrYqSeLbgPFsa75WBqOBRGZAwaARI3GGaNeC4VDUSRWRcWjtPLXZHs88cpYlG0cjDfn+yE6NBI6p1D4jan4idcGVvLL/uAB6IyvX37LR06yvPtToRinCgwXMwOH2qO9UT2kstwUpUolzl64efWlWrS6V15c3exuRUX7H1SvAmbEzkkjLj602F2t5z8ZPD6MAE04CmVwbPUEnIcChdurB8u5u2T12IbVaoAXxnUWF+g7d1S5IBf7+WksCuEKl/7yotQGDq5e4r2Jxb4qd3NKs7Yajnkl9SL/YJWOG87tQo44bjdT5+NWFIsV/v2hzTLdlva9+hqqFrZuW/0Cne44XYYc+88LQ5xr+dmltSsGThHnVGVnMPX5HFVV1/JBj7wYHwQHx1YOq6BobQO7HrKSoMWNO2/3qufvre54qW19lc6aTKp7G+k8AI7iXM7ZHl/lXDYEj4uG98eK5KNqSs3qfj7e3O5dKaZ/LztZSckHvMfDSSwfdiIwrrHcEe/D3vVIFsd61KCBv/8cEsdXWcZvVVZShf5YliG4HeoFq45GZeqlNOTJYBSncbm+VbsdXOFR07G8koacf//+qsDUlNShSGp89MeisDoOm3IbSjNCsEIZekQ8ltx+bQJqPAwAiRpRK6dgzFr6BuxypmLOFF9ot8QjLzcNuuJ2sOrvDH3GDuSdA1y8TTs/sR8QrAQA74WGYF9WGgrTY7Eh6HlxYeiq5pDUi0hEIm7ZamTKfFnxSI7yxduBWpyXFzh6/W3dAepkK9eTgsydKSg8UlxtGfq9sv2PCD6HzYRbl2Om7XyURz7018X+DwpWe/4biYjlsciR89K1iAt7Vgkenf78MhyV/baHR9AmuJ0VF4hBAdidLvOJfQ4ciW3f3vqX/t9NtqUL1MBq71QsDFTXL45lYtgQLNKehuOMCHg8ZMhq5f13zPI6jcTXfLAhYYfY1x3YvdwHCzfmiffMmPr+FIUgOmy1su95qZGIDpgDXbeb1+eq83ETF3eOIijcvcwXG9TPliHfVHHx6gq/4cafF2oY+SiUQcXQsXCyU5OqaQsHFxnU3+gMpu6fo+rqVj5YoM+AvricFYTlYSKf/EzninN6SwDWLU+DxXgNBqp37S2sZbvSHchMkdtRZfy7CnLIGdnW91xfjBrjivIjcnlVHsflZaB6Lv8cjkVT/ZCYKsoQdb3LleDxZYwbIdvh3UQ9zsebkn8/P9xQNqavRvTUkUg8OxGzXhmr3tWzxzBNKBxluVO5rer7IC5qrbxXwnvord6NOugsgm7xlLMnXjmXT9VytWzxqJcypEZiTACSK49bEFa86oN9Z2U5eBplN48hqxPHcvySUDgYHQulHJrqgwxRRtFdwGEixvlHwM/4oRmNDt/GKsOmVB965XcqlWPtrsaP9mqVdz/Z3IWaOwaARI1MaaO1+UsEju2FC+nhWCHbwgSHI++6uEBfnIHFi2fgwnJxob8xpXLw4FaDQhERHYFB13dgQ7AP1op57cckI+Q1GfDdYOW9CYvnzUCHb6KxTuSLiF6PonZjMWt7AWY/J77c84/g5G00mLEY9hY0nldxYLkvImI/xQU1vULJcXWw6Ax1f6o93kGBkkFc+Go+RuTimbA+sR7vyXkLlqMIo6GJzsQs4/ZDtuLCbFMS/HodQfICkS9qPS70W4q3581UMzQsqxFrsUQrh51Q1x88HxnXDdsZMsW4qps93OaJNP9+OPmRbOc0B7vPOeOl1evEXpmS709k6Buw00UjWuy7NuEYHOZ/hVm+spOIm6nrcXOGd5T4bD1nj5JP5xvei6jV0FnMRODmpOq9w9IdZxj7D3D8o3vNd8RUrZxEUNMZRp3B1P1zVFVdy4f2w6KwfE2UCCqykSg/04G+2JZ+HvaBe7A8yKgXUKeJmDvZCyc3yu3QQldTmaE/Cp0y0PtR7F4ml1XDY6867Ix6LmtcgLyNogwR6/3wUx2sn4tD5PLqbYVrUvfzsXYub2dg9gAdtsvtjdKi3CUKIZtEcGpUXV3+SBeibOs1ZKyR2+qH5B/sMXrxl1gyr2ovpLfLFW5L38DAn8KVcmBfkeld30p2GsyKXYtRlllICpfb8jfsPtIWQxZ8j1VLZoqL7GycPFHL396E3Mc3tZswDIn4UHxeVojPS4exe/CmP38guiv0eAajJ78Bb+PHtCjM3SSHPSlG5pr3jTqgugNKDJ22eUwONlR5f4jhX0twz38F9XWtFi0Kx5NDPGHf8xE1hajpLFw6G3FxcerUnZOQkIDPsw/DZcxcNaUJnUtD8prVKPf9sNbqlETNSV7y3/GXP78AR0fZiqpxhS78H3R91Addut/iTtJdQVbzGolt0/Zgs4YX7HVSEIkp/uFwCf0egZ68y9WSnD11FGe+SUL4wv+npjQ9Pz8/+Pi/q041J2rZMGIT1tbSrlYXPwwL1pzGhNjvMU7e6L8T1PNrQkyVtuhU6aukFZj28oQm+X6sDe8AEjVHnd3hHZrA4I+IiIjuCMOQJMWm7Xv12dgd5Yvg8YY2fHP8A5Bcta2gDPKG+yHzeDaSg+XQNg8jOOwfSAoTfyOCP2mbv/x7kaeicelvxShMCMIKzcOGtoFy2BntDpwyabdqaLcYnXoURVt8MUe8DtD4Yd/xYmTKZYfF49RxWcV0MAIqlpGQrzRDKTuihVbZFvk3vojLuMNVW+9yDACJiIiavavQH0lD3k5DpzAudrfu9ISI6Iar+LFAHWNY9nornY7HOs1IaNOvos+f4xASHQfv3jok1dhW8Bh2L52GwgERCIlcCo8/DsVTk9VhbQSPIDnMzVtwkrce5XjAQW6IiN6FVk8vxdzoJAS+0A8l//ZD8JtByDPtzQmlCbOw7pAr/KIT8NIz42HfU51xYj3efUuLsiFhCBDbpnEG9kU/j3fXBGH5ohRYjv672Ja1GGaZh+T505Ase02iOmEASERE1OwdRcZSH6xYnojrYzbBbzirMhJRHV3XQ5f8N2gTYOjwSWnMqkfOe7Ijp7GY9c8kTPcdC8dBYzHqzQS107G/YZ9Jl9X5KOoVgVlTRL6hE+Ht6QKLfu5wdDC0YbdykMPcOMOitew1NxLbcm0xbs1BBGomYuAgd7j4rkXIPzfBpSAW7/47zaQzOV1BP/jNk20IveA2eSIqf94qugrH+Qk3ti0oFN4oRmb8aYxaGYdxnl5iWzTwm78ULsiuwzjLVIEBIBERUbPnDG+tHDfve0S+ORFWRsO80C04BStj6LH9H5mFvVMN1SWNH552WLBMi+uDghEwTe3w6VwacpLF8+SZcDEZt7ctHLxFmuz5+yvTgMpliGsdxsHUoSA1BRgyAx4DqjRjsRVB3GQRem5OQZFxRzQj3NGnxr5jvDDQeBmt+8FeDqc0fDwcjLfZthe6i6ec0xzKpK4YABIRERER3Q1qGgZiXhxCNn6PVdGhcKwItEqOQHbk69haj6KqQ7icuIAOYl5hiWlA1d26Lj+iFEN/UDw596uh19y2sHMYK551uGA8/EnPXrUMHVFLPwj3ilhQfVmBv4nVDwNAIiIiIqK7QU3DQHiPhaODjelwKb/JSqAiyNPK4V6Mhm9RHkHYJ/McP2YyqHtrEXj9XteNO6CpcJ/6TI2GASARERERkTnp1g9u4snl7cNKFekaH7UMJ3FzNrAYIp7yj6BKXy/CVZT+IDuisYdl/RdMdxADQCIiIiIic9J5AByHAjnb41F0RU1TlRdEYtHw/liRfFRNqQ97OHl6AQdjse/QVTVNdXoHdm8BLHzdYc86m03KTALAg4gZYIO+tT2e9YEmdBV2/XhNzd/IDq1StiPmkDpNdBP6VD+YjLVjLpRxiDoiUTZaaNFujG9kXLWGqEKDnOOnUxCnjpk1ZfhU5JxT06l5KI1HtHhvolMr2lyxnKCGZg+PoE1w+zkci6b6ITE1BYW5KcjcEoDlb4ejyOlljBvRV81bP1YjgjFh0GkkvjYE0dp45OWmISchABF/FWWPnQavTFI7oqEmY153AJ+cjX9sSIDW5PEhVrzQB9i/FK9Pm42dp9S8DeWX40h7TwSb7KioQf3yyy84duyYOkVELcW///1v9RXdOSKYiPFF8rd9MWGxOlZXZ3XWHaQv0CJue746RS2XHqVZq5GYfvdfqGRmZqqvzJTtRMzalASNC5C30RcRgb748FMdrJ+LQ+TyYDi0U/PVVztXjIvKREjgaJR/Nh8rAn0Q/dERw3LfXVul11FqCuYVAN7fF48/8RSGmjw8MeaVlYj9+2w4nvkYy3bKrosazpn9CzEj6hBM7jUOmI2jh4rhP0CdpjvC3t4ezzzzDP73f/8XV65Uqd9ALY/alfs4J3W6xbKB26LbbVtx9/vmm29gbW2N119/HRkZGWoq/T46lO4VT2OCMW64HKurbwP8+p6Nff4BSNZXqfJFt6kJy4lSebc4BDrjbvrvUv/v//0/PPbYY1i8eDG+++47NbWlEkHX7bTb6+yOUUFxCNtiaPe3VpuE6Zqx6N5RnS/d7Pu3tnn32sDRNwpztd8b2hNu2VN9uRXbrHFVpyuon/90EYSqKQa1nRe1LYdqwzaAqjaDRmG0eD5TdBxnDEl0F0hJScHUqVNhaWmJl19+GcnJctAbImrOSktLsWbNGjz11FPo378//ud//gdFRUXqXLpt7GmPqJrDhw8rgaCDgwNGjhyJd999FxcvXlTnEt2dGABWuFCME/LZ8gF1LJES7Jxrg75zP64WEJ75ZCr6DpiKnRW1Iyra8OXqUbh9PmY8/5gyPXLKHGw6UKJmAnLX22DYW0niVRLmjjJadtU2gEbLy31vDl58VrZVfAzery9Emqyi+ttxpK33h/fTMt0FL4ZuQmGZ4U9vEH8bd2NbZDvHt9/bjzM1db9rBq5fv44PPvgAPj4+ePDBBzF37lzk5OSoc5uPsh93IHHZSMxR2uk8jOD5QdidW0s1nCtHlWpdweNF3vGDsSIqFkVVG4tcknn8sGiyXF5HBGh8sGFLGvTVPgd6FCUEYYXmYUMbockja8iXjUSljcpRFG3xVbYxQOOHnVof8Tc1t1fSbRkm5gUh75KagKs4lR6JDf5qWyS53THxOFU5/yaqtgGsmD6khy75xrbPkXchajpm59KwO0o9Xmq+xPSjMD119NW3TxzXQpP2Ujfa5pw6Ho+4+YMNg+7KY5aQj3KRo+yIFlq1vVWAxhdxGcaD6VZt23NjuvR0yo33v9Zjo75X6ns6JzgEmT+K9/kubS9UUFCA0NBQ9OnTB08//TTWr1+PCxcuqHMbwW/FKNwSgAijcyO6pvdF5qt6Dml3VMmnfr6CKs5x9ZyU+W5ZSeH2zh1De8KR2CYnNo5U/raynZkoH3K0Yt8qlinLnEDDeSE/xyZudv4o7ddM12HcVrcu5ZphOyNRdHoHNijHsD8WbUyrvh1G9Lmx0Ib5VG6T8t7UVA7W6bwW6lJG1Ol9Vs9pbTZK9wYYlifybTuo3h1VyuWK9Yj9XLYaRWcNs25oonJClqu+UyG/HXPC5T4al+11PI4t2GeffYYZM2YoPxpPnjwZ27dvV+cQ3V3u+a+gvq7VokXheHKIJ+x7PqKmtDSyExgfrBodg4wVz6Ormlrh2qn92LTUH6vSemL2lo/h79RGpMoA8DHMRfW/kQHgsLeAFbs3YYwcE1MGbFOWYtCAIfipbR/MmuyLh/Etdm5ZiY8O2GD25iT4D2iDiz/uR1bySry+HvBfMQdD7fpgkJM12qh/P3uzWg3UaHmltqMwe/wQPPDL5/hw+SqkdZ+Kqb1SUdjaHy/9qQ9QuAnvRCXh6sQP8ckCTzygbOFx7HzbB3N3AkN95xjyfZeADzfE4bsnY7B1yfPooeRrmZ6f/DSee+45dapmMuCLjIxUp2r3+OOPY9KkSXjxxReVgPDz7MNwGTNXndu45IVCRJgW1wdpMN53IuxaF6Pok3Bs23sFAwM/RqCvszKGj7xQCgg/Bgen07hgPRMT/uSK9pfSkLomEnmYgbnaKAxU6kbkI9l/GOKggZ/vWNh3aYvLP8QjOVqLkvEJWB5U0Qhbh8wlI7EuBXAc8xZGefYFRL7dH2hx0mUTFr49UR3MVQaAI5Hj5IrLll7wm+iM8iN6dB9+BR9NDgDmHcZcb3slp4FY/7RhSHJNwqoZ7mLbr6JIBIuLNmbDakSout3ZSNsYjhzLUIRF3aK9gbww8Q/HhBi1qok67SC2p6R1P8Mxg7jASXgH+3JtRb49Ip86iOzpeKz761RkthuLCdM0cOhyDSdT12P7zjRYz8hAyBR5bHXIWfYsopOvVB6HVmfV7bviBc3yDzHKQS5PXog9jHUnXOFQ1hZ2L8yEW2/gZEo0tMk6uE0ch9KM0xgo19PxNPK2hIuA1B5+G/fAW6nLov49NmGtUo3FaHlni2H5XCjcB1ig7JAW2zbuQOmIinyGv81Z4obolHZwmWacLxvXOxdD72yct3FtjZqK7jadYGVVfejf+pDVPvfulfUVb06et/Ihy4LQhf+Dro/6oEv32+uwoHZ65EU9jhXpA+E9Q4OB3Sxw/ax6rnWLwOKYN2D4xFecQxXviw3K1XOosFsoFkcHw771Veg2+2BBbDFcJr8Jt6H26HCtGLrPtUgSn0P4inMy0HBOGs5xYFZCHNyUw3n75055aT6KTmQjU47rNSYKIeJz3b6HK+wtjiIxcBi2XRgL7z+/qO5bPg7ujBbnDzAq8itohqqfpFudPxNtcbIgEalG67Ds7Y7u4s/rV64Vi/MZsB8TDJeOx3Dy/kkYNajmT7N+71SEhKXBbkwgPP7oDMs2epRk/QuJW8Q54xSFyJgZ6K7krDh2uhrO64kIfHcTXOQxrksZcUWUgfP8sC23pvdZlL3houxV3i/1nC4R5zSc4a4Zhw4nj6KD5ww4thbLCBLB4M/u8PizWnbI9WQcEQFoMfqEfo9AT3lh0UTlhP4oCr/aiu1h4vtTE4fxj/eCnZMzLFrXtXy8s86eOoqP1gXBffgf1ZQ7a/PmzdDpjH+gq87GxqayvHFzc4Ofnx98/N9V5xLd2oHEd9D23mto375hu75xdHTEhAkT1KmbM68KIbv8MWyAvzphqusjz+Mt7TJMVYK/25NrOw17RLBoCK6ewtDBohAfPhWrvvhaBHYiiHvoKTzee5My92FnMV+W8TdRdXkPlx3EyEWbkPVkEv4VMATKlj7xFKwu2+DF9Tn4XgSAg0TSxT0rRfBXgtFLcvCPMT1lLiWfe/9e0EzxR7jLEMSOV9NbKFlf/0746quvlMdbb72FQYMGwcq2FwZ6XUOr1rf/Obgt19OwTwRm8kt8pfhirriEdhw6Fo49R2JRdDgyhibAw06dIYKrkh7GwZk7Bvbvh+iJU/FRsrhQnewMHElBUgEwbs1aeFe0Lx3kDrt7dViekoZv9V5KoFiWvlxcuIqL0rcPI9BLDeBEvoH9eiHCfyq0g54wCex0Bf0QuFNcoMmrB/mBExcbbiOAdfvSUCryVWw7CsT6i2zg8ZqrYfDZolhoxQWIvWYPwqapacp22ysXXqs+cFUDxfopEkHwjWPmDkdnW2CMuEg7mC8CQFeRpkfOe+LCDm8gZFMEHNULZcdBo2Hf2Qfv5iSiSFyEOhx5H++JAM7kOMjtG+aOba+NhPbdrRgYqbmxf0VX4bgmCRMGGC56HJ0sUJrsg+T405gVLy7c1UbujvbtUCrel6IfxEWGg3GAXIWyvIOVy8MgL3RvPQwLYraj4LWJSiBQnrse74lA3S30P5jlWfFeecHpoSCELIg1TDcheXeusWzdulV5yPaC9g/1htu9Pe58AHguDZnb5bnxd/gZfSacOl7A7JgUFBWJAFAE9YZzCPCOOgi/IRXvn/jsONgg4u9pyDkk8jllIyejGPb+HyJQnp8qx6FesPrNDtEJ+TgpAkDT9i6q33HutLJyhqOVCILkhLV4PUieE+KzdDAReRec4Rcep/4wIYlluoj9FOfP7qNHRABY1/MnVEw7G84No3XUv1zLRvnwDGi8DcfHUfm/JkeRuy9fHPCVmP3m2Mr2jI6DxsLeUp4z+6ArFQGgXKF67Aa+eRhzxxid1+LYRc9Zj5yco3Dxtq1TGWG1N1IEf7aiTN1jdJ66w0WUEbLsffff40zfh4KrcNkYBQ/l+LorSaXJYhkFrpgQk1D5A5WyHmsZpNZS28NYQ5cTFn3hKL5LdsvXvVzFthkuVMpz61k+3kE//vgDPk9PU6caX3FxMVavXq08BgwwfJkOOfkDbOxE9E5UR++99x5KSm7UCmwodbivpzCvAFD2Ajrtj+pdMj2+/886hG/XY8y8GIT6Paam377R7kNM76xZ9sTD/cTz8WKlqmfVO4+3UnV5PRzkL2D74e6mBn8qq54+4v8inBDfHYNsSpD1WZyY9sdLo02DvDYD/ETaUsz9ZD9OjPdr0XcBZX39m5F3AN955x11qnaDBw+uvAN44MAB5Q5gowd/QvmhFCSeE9czY8ZW+QJtC4dnZsBRG4DMr3TiQqnii9cZPhNvXFApbL3gPh5YsTUFOnGBaX+/DR4Syfu2RMLBciacelooFybdxydhpchnUIyCz7Xi+Q2MGlGxbINWThqRFo51qVUCuxHu6GPy07ENBo7UAAviUXBSo17MXUVhxnroHQLhMshwoaLLjoZOXDBMf6biAlZlOxGjnpuPzI3igvoVEcDVc2wgF7cnTI+DRS90lxdc4oSQ1ZwsLmWhMBmwf21S5YWdgTi20/bAcJ9YbG9WpMhf/TjI3sw8xrsjcbnx/kkigK64CJNa94O9CITx23g4GPdwZiu2RzwlnpYXd1WWbaLK8gTrnvJCOhGlsnqYvIiX29g5FKMqLupUFsM18HGIhTzzm9Jf//rX330HcP/+/be8A3jPPfdU/iI/fvx4wx3APjeCqjumowWsOot3YPtq7OvxBob1s0ere8W5MTQKa4eqecSn5tuD4hxyiIBbRfCnajUgFGHy9FK4i4v+w6j+26wFLLupL2vREOdOqyFi27aEqlNGLGxMz6c6nT81q3+5Js7nAXV5H/vCY9GX8FCnjHWy6qW+MtDlbhXHbgbmVj2vxbEL3DLR8PpSSh32UYd9qSKqGrIWHlXOU9iOxajJQM7mqu/DODiYRPQ6FGWKZQzfBLeK2gkKsR6vmSJYzFanb6Ypygl1mfUqH++chx7qjb+88oo6dWfJpiG3ugNoa2tbWd48+eSTyh1ABn9UX6+Iz3BD3gFcuHCh+qpuzLgXUB+8FP4x/hVggZ3L/BD+yXE10+9wX5UvBXFhbCVjsGu3Ob5gteUZtBEXIMbamoTxxfj+C/H0ZBtczN+PrAPGj29x8X4x74tjaMlDyMlf4MLDw2/6ePvtt9Xc1Rm3ATx48CDefPNNJa0plV2QX0BjYd+rhvfcrh9kjcfCc8a/DveCpaX6spIFOnQRT+eOoFS2ybCbhBcCx6J1RjhWTLHDX5S2JiHYl55v1LbvNE7Jxh7iwvVyQRoKc40fR3BZ9tZ18BhMWlz17FWt6lD7oRPhjTSkZqhdwF/PRkFyMew93dWQpxilxwwBUHlJ1fWk4cKv8pcSHS7cTluSe02u2ARbWMpfN66r7W2u6JXtt+p8s36nRZ7T4mnEQNjVcBFt1Vv+ep8GvcmBqPn8hDg/qy6ibtfl1ZfX3lJuszhuyvulbqOzPazlpAl1n5tQ335OSs+dNZ2P9Xl4enqqS6yuog3g+fPnsWXLFiX4a1Ct3eHx5htw/DlWaff0lxGG9mvJ8keRyjZXV1Emf2noYYtOhoRbEPnPHUWR+NznJK9G3DIfvHvTkS8a8NyRruuhP54tlrUD+7aEYEPgLOxSZynqdP7UrP7lmggUZRlWD+WXdCLIS0Neaiy2Rflh1cYd6hyD8iuyTLJFB5OeB6uo0z4WQy87CXfuVyWYldrCzmGseBbvg3HjuuH2MC2mxXuZLp4celVfhm2vm/48dENTlBO3Uz7eOR4jRtZYVtyJR+/eNQdyf/jDHyrbAP78889YtWqVEvwR3Y777rsXGo0GYWFhDfaoLzPvBKYNBr2yDLMHlGDnW/6IKWiigeDvKLEP8nbjF6vw+nRfaKo8whNkHsPdQnPSpk0bpRfQpKQkHD9+HMuXL8eQIUPUuc1f/XrjtkQr5Uu6Lex947By55cICY2Ct3cvXM5ajQ0LhiFgSgBylF8BRJAkLxwPRiI60AcRVR7anTKPCCiNfzGoqd5Aa1cMnGIDXUqauASq+OXfC57Db/yaX67shBbaGtYTrZXVe3YYfsFujn5j1/ZNpaIX0G+//RZ79uxROmiwsKix9VKDsBoWgZD477F48VqMG++KDrpYxIX7YM4UHyQfqd/nojQ9BBHju2LG+MFYJD73cTtTUGYxDgNvcdOrQc6d33TIWeODAE87UR6MFMsKQVqOHu2He6GPmqWh1ViuVfmBszZlR2IRPbkj/uLdHwvEcXj3g0ScQj8MdDFUtWxs16t1rCXU8GPQXekuKh9lL6CxsbFKR1NxcXEYN26cOofo7sJeQNs8hqnz52MQDmLV0k0oVJMr/XJVXiKbuKr83Ntc9cTDY8TTyBjsOVSsjC9Y/aF2XmMGKsYBlIW5VqsVQZC3Oqd5aW8pf/vdAd2xGr5ITx5R2u84djZ+046JfVJfVtLjsrwIdLCHpfGv3bJNh+cM+M1LQuT2M1i79A3Yn9Rid5YM1ezR3Us8Dd+ElXIMnRofFR1R3ExbOA6bCYuirSgs0qMgfTUwdCycKqsD2aC7g9z+YITVuA7Do0HG+GtnofwKX3pO/oRtqvxgiNKT6b7jIo/8EX1vHk7WcFVaekxWzXKHRbW7ro1J3cZ8Haq3IjiNC0o3xneHinEAZXXQr7/+GgsWLFC6aG8yrW1gP1yDCepYWbEbo+ByLg1xe7NRLj777WU8euI0zhty33Bd5BHBXnTyUaBoNdYtWI1y7wSsTD6jfN5XxiRhuv8MuNy0NlnDnDu6hGmIjr8Kj8jDiE2VyziMsKi18PMdZ3onqk7nj5pQRf3LtTrSp2DbvCB8238tFosyTe6/HLssMCgUHgNMfxxo1U5G16dxuVovmTrsC1J7Gq3TPtrAQv5mmH+khho0V1H6g7zzKMrem/42YQ8rWU28pmWUHlN+PPv9GqKcaAnl4+0zHgcwNTUVr776Ku6/X1aXIrp7MQAU2jj5Y/Yr1sChhYj5pKLItEYPeb3xRQ4KjS+0f/sau/69X524Dfeq7ctq+rXwjrCGo4snsGcjdh6qckez7CBipthg5Otx+F5NupvJev2ffPKJUu+6bdtaqus1E60GeGFcZyB5544qFwZXUfRpLArhCpf+xpdl+chMlxefRo5vRep2cYmhVrssywhBhMYXu00uztrCwt4wCHQH5adpcWE7SESA6euRWVDlIk32eOffEXPmayFHH7klJy/4OIjtSn0H+WI7PJ4db1LNyc5JIy4jROCZWvUyR/ag+LC4yApHYUOM199xKBxF3K9LSUSRyfJ1yNm5Gvr7hsK+Z1s4DA0W27cau/dW2T5xHPZtTxP7Nw59GqB9S92p23hufbVjWH4oEZl3yTB5jz76aGWnC8OGDVNTm0hRLFb494c2y/TcaN+rr+Gz3botWolPTZ8hGpF3K3IOmeYr3bseyefE++bQF/pjWYaAR3b60tGoPLqUhjxZLbDGIMXgzp87xThVYLhoHzjUHu2NblOV5aYoQwDg7AXD8Ad1On/ktNgnUYbhV/naoP7lWh2dyMfucxDHXZR1nY2O5W865H4uA7EbP5DZD5okysNYpJkMxSLPmfeRelAHO4eBaFWnfbSHk6coKw/GYl+V9xmnd2D3FhEm+YrtuektPxs4uNa8jNIs8flRX/8+d6CcuFd+roXfKg5GSygfb4+sXXDo0CGl2cjDDz+sprYAVYdFqqdScX1QMUTIlCU70Jxva1DDYACoaIOhLy1SBoLftXwl0tQvDsdBs9EVcVg0dyE+2r8fWXs24e2ZftiF26862NVW9lKXhJ07k5B1oAgNMdRoj/FLsWLMcaya8hReX/8x0g7sR9onMXj7jb9g1aEheOFVOUzF3U3+eterl2lnAM2abGsUqIHV3qlYGBiA3elpKMyKR2LYECzSnobjjAh4yB5dKjmj1cFpWL48Fjm5achJCEDEG0HIcwqFZryhPln7/u6w06dA+5Yf4pJ3KO2F8lJXY8PiABR2ngF3ceEnWXn/HbO8dNjmPwTR2njkVeST3Z0XuMJjyiS1O/VbcYbLeHfotoiLBGjg7Fzll/hBwZg9zR6Z4SMRoW53YboWcWHPKj0oOv355SodMNwpFnB5ZRPczkZi0VQ/JKamqMdWrHevPUa9qlEC5laDXsYr3vbIWXJj+/JSIxE9dSQSz7pjwmsv1/E4NJxWg2biFXH9mBn+rPpepSBT64fg0DS0asIbZHfSrYZ4aVQOrnAUQcDuZb7YsMVwbhg+s1ORLIIXv+GG3i7bD39TnEOnkfhaxTlkeF8ixIWVlW+o0gukxaNeGCjyJsYEIFl+BmWeLUFY8aoP9p2Vd8FOo6yWIO7OnztqIIJIxC1bjcwsQ3mTHOWLtwO1OC8DOb1e/YGpbuePbF9mJYuezK3YJ5anKxUBTr3LtToS74sMLHNi/lZZtuUkh2CdOFe35ctjma9WmxUcZkAzzdXkvJbl5fLQSJwfFAXP4bKcqts+Wo0IxoRBxu+zWvb+dSpy7DR4ZVLF0Dq1M5S3chk+2JAgt10Ej8t9sHBjntiKO+N3lxOdbSBjuZw98crxOiWig5ZQPt4OOayD2SmV49euxo/2oQiMTkKIn/sd++xRy8EAsILN83grzBM4swmrPjwoW9KhzRPzkbhhITyuJuHtWb7429o9eMD3Y2iDRxn+5nYMeB6xr3ji+7VToZn+IQobpNlhT4wJ3wPtPLE/ny/FjOm+mLH8Q5zoPg3/+DhOGZOQmh+rEWuxRBuH0d2OIHmBDyKC5yPj+mhoojMRMqVK73/ohVHzE+DRMQVxsh3QB1mwfC4OkcbjgVl4QaPdA40LUPSBuBgV+VZsTMRlhyiEVY4VKNnDbZ5YR+BoICscK2S+NVqUdJuJQO3HN8bSqwOrxycqF7mYLJ6rfaO0hYPmY0QungnrE+vxnlhPxILlKIJhHyu7K28IthMxa1OSOBbXkLHGt/LYTo/NuDHWmTgOLvMyDNv3w2qlTeSKNVtR7hKFEPG39TkODcdGbKN4r/zdceHTqeK9moXtR+wxYfU6jLpFT5J0O5zhHfUlAp+zR8mn85VzIyJqNXQW4tzYnGQ0fMKNc6j8M5nPFx9+dg0DxedpiTq2H0SAMCt2LUZZZiEpXHwGA/+G3UfaYsiC77FqyUxxAZaNkydE0FSjO3/uWHlvwuJ5M9Dhm2isCxbLi16PonZjMWt7AWY/J4Ko/CM3qvvV6fwRn80pUXD7NREbxPLisgzVKetXrtWRCCwn/DMBE4ZeQM4yWbZNxbYUPez992D5P5dCFHkoOlZxp8ro2KnndfRHR2D9XAIWRs1Qg1ehLvvYTgSeUcbvc8WyRNn77lq4yGqSt6R+Vvz74eRHctvnYPc5Z7wkzmH5I/Sd8XvLCVe4LX0DA38KV47XviL5uWwJ5aMZcQq+rarfihKdcrfZY3IwXAa5w/Ghal/WZAbMZCB4upssXDpbaZx9pyUkJDTpQPBEt88wSP+2aXuwWaOOwdbI8pL/jr/8+QVlINrG1nADwRPdTZq+nKgvORD8mW+SEL7w5kM/NaYWPxC8rD7qH44JMQ3U7p6q+SppBaa9PKFBvx/l8EhSXccB5B1AIqIWQp86FcGBIcir0mBD9roqf9F1MRpPjYjME8sJM1ClDaA+1U9M+yHztA552gAsUtr3yWFrQpD5Y8UHoRiZYSJdBH/SNn+ZR/xNZQNdPYoSgrBC87ChbeDkkdiwJc1o2ChJ/ojQEdGpR1G0xRdzxGuTjqD02dgd5Yvg8XLZHTHHPwDJuVW6na/Y9kN66JJvrK/GvNK5tGrLTEw/amijXOkqTqVHKsP1KNs+fjBWxMTjVC3tqokBIBFRi2HhPB72x1bj3VC1PZVReyadUyjGDb+NHhWJ6K7CcsJcHcPuRSOh/aEXRgUmYW7oTNjpVmOdxl8N8izgNDkJIUEaJbdHkHgd/RaclBqgskOpx7EoOhHX+7+ltA0MfKEfSrb6IGRZfLVea0sTZmHdIVf4RSfgpWfE5012BHU6XqxLrD/9Kvr8OU4sOw7evXVICnTDumodWAF5Mc9jeco1OE/bhJDFUXBq/SniAp8XQa1RVXi5zKk+0Oa0wbDXEiqXuWvBYCzfnK+2U76KIq0PgheEo8D6ZcyKFPv+2iS0Sp+K4Dcjq3TsRBUYABIRtRRWY/HqatmeSofdUT5Ku873duhgPSkJa9cE36IHQiIyCywnzJQIiIbEIXJRMNyGumOgZzACwyNgjx3IyZd31trCop87HB0MHeRZOYjXg5xhIT4PZenLsS6lGC5v70HImzOUtoEuvmvx5pJQWKdMhTbZNIDTFfSD3zzZhtALbpMninXokfPeVGSeG4tZ/0zCdN+xYtljMerNBLUDq79h30n1j1VF1jOxMHotRg0X2zF8BqaHr4SH2IdtB/PVHOoy8QZCNsVhnKfXjWVqXHE5R+21tygW2o3ZsNfsMd33lZvg9nM4Vn2QZtpjOikYABIRtSCteo7FOGVMR8P4b3L8s+mT3WFRxwG0iejux3LCPDm7VOlYqVtfyL6qvj1dfYzLG4pR8LlWPL+BUSNMqwe3ctKINCAvNc30LuAId/Qx7jvmnOyJVzxPnlmlM6S2cPAWaUhB5lemQaSL2xMmQ0XBohe6y409UWwYluJSFgrFMu39JlXp5Vgsc5oI9qJDlXRddjR0cIfnM1X23XYiRj1nA/3mFBTVMH6luWMASERERETUwrWuGuB3tIQcn1//W209DEuncUo2Dh3SFpcLDFWGbzyO4HJHMe/gMRgPiY2evUyHjig5Atkc0bG1HkUmfy8eJy6gg5hXWFKlfd+9VceusYVlD/F0Xd3WK3plnVadb9a9bjFKj8nl2qO8pMp6xePCr/3EPB0unFMykxEGgEREREREZkkEXDJAOhipDO8hqwwbP7Q7ZZ4jKDW+BXif+lzhN1lhUwR5cvzTassIwj6Z5/gxw529O8ww5qcW2mrr9UG0Nk3M24HSszIPGWMASERERERkluzR3Us8Dd+ElemGKsPVH3FwM6mvWUW3fpBD6ru8fbiGv1UfiybWb8D5dhbK3cvSc9Wrr5YfDFF7H7VBdwfZqVEwwmpap/rgcBfVMQAkoiZk6FJ6ijZbnSaiplLZlXzVLv/udqXxiBblUHRqDV3QE931bGA/SESA6euRadwDp3RFfEf7d8Sc+VqcUpNq1HkAHIcCOdvjq/W6WV4QiUXD+2NF8lE1pY46DoWjN6BLUTt7qaRDzs7V0N83VOl91M5JIwJLLXZX62lU9mz6sAgUw1HInkCrYQBIRC2WvkCLuO0VPYYREd3CJR3ytkQix9yCXKKbsPL+O2Z56bDNfwiitfHIy01DXupqbJjnh20FrvCYMgnd1bw1s4dHkKHXzUVT/ZCYmoLC3BRkbgnA8rfDUeT0MsaN6KvmrSsLuLwilnlWBJAVy8yKR2LYs1i31x6jXtWItQKtBgWrPY2ORMTyWOTI9n/p4tpA5ksBnP78cpVOZEhiAEhELVQ29smBY/U3a9xORHSDPisEK2LyTLuFt5qIwPRLCPTk+HhkruzhNi8TIYGjgaxwrAj0wYo1WpR0m4lA7ccY59RWzXcTthMxa1MSNC5A3kZfRAT64sNPdbB+Lg6Ry4PhcDtBWOUyryFjjVhm8HxkXB+N6bEZ0AytqFDaFg6ajxG5eCasT6zHe7L934LlKMJoaKIzMcvTtGdTMrjnv4L6ulaLFoXjySGesO/5iJpC1HQWLp2NuLg4derOSUhIwOfZh+EyZq6aQg1PVgEdiW3T9mCzxlVNq6vf87d0p+Ul/x1/+fMLcHR0VFMaT+jC/0HXR33QpXt9f2EmY7IKaEA4MCvhFu19WjBz2Me7ydlTR3HmmySEL/x/akrT8/Pzg4//u+oU0a19lbQC016e0KDfj/fcc4/yXIewTsE7gETUOH4rRuGWACya3BFThj+M4PkhyDxu0rF0JX1uLLRhPggeL/OKx+SRiI6KRVFFF2JKmx0R/MnXG0cqeRJlH9QKPU6lR2JD0EjMkX8rHgEaH2zQ7sAptgMgM1X24w4kLqs4J+T5F4TdubW0ebtyFJkxvobzb/xgrDA+9ypcknn81PNZPce2pEH/mzq/kh5FCUFYoXm48lyuns/QFjg69SiKtvgq26h08JCjRYSSXsN2Fq3GAjFPm6FumNieHG0AIvz7G9Yj9zEwAInpRyvv9hVpxXLDd4hXO7DOV+QJizf0SlhjG0C1HKlYnnocCk26ky9GZphhOaWnU24cX5k3Jh6nLqnZKtT5mBERNSwGgETUCHTIXOaGiJg0WD6zCXOj12F8Px22vTELu9QcFfR7pyIk8B2c7OiF8fOSEBIdh+nDbaDbHoRF82INDdEt3DEuOgoe8vWYKJEnCS5y/CBchW7z8whe8D4uO7wMP5EeErkJPv2B3I1+WBKbgjKZjciMlO4NwNsioNr1cz94L5bnxFIMa70L2kA3rEjIN60OiWPYvdQb2067Kuff3NfGAeni3NMEIa8yCMxH8puDse5QJ7hMS1DOv1fG2uNkjI94bXyOyU4YHsei6ERc7/8WAkW+wBf6oWSryLdMBE1qrgqlCbPEMl3FeZuAl54ZD3sXd7jJjiV2fVotr+7gVug6B8PFxQK4no9EsT3Rn56H/ZgIZXvmhgaiDz7FtgXeiMsybLjdCLF+jbt4JcqPReI4THZHe2VOVTrkLBsmypH1KOn9hrLdc1+bhFY5QYiY6ovdRVWqnZ9Yj3V//Rt03WZCI7Z91nN9cXLLVAQvVwNMRV2PGRFRw2MASEQNrjz3fXyYAriF/kdcgE3EwEFecNPEIeQ1eTFm7Chy9+UD3isx+803xMWfOxwHjYWHfxwC/Z2Bgn3QySvB1rLXMmcoNbisnUUed3SXzQGuZCMnoxj2/h+K/Bq4iHTHoRPh/WYcXvEWwaW42D0p/4bIXFxPw75oLUpHbMLC6LUYNdxwToxbdBBhGlvkRYcjw+SkyEdJj6UIWRSsnH8DPUMx95+b4HIuFh8lqx0uHUlBUgEwzn8tvD29lPPPxXctXg1yR+uiNHyrRj1l6cuxLqUYLm/vQcibM5TzUeZ7c0korFOmQpts2mufrqAf/OaJoE6WD5Mnwl78GzR6LJC1AwUm25iN/K35sPB2h0NrUb4cSkTeBWf4hcfBz3ussj0DPd/A9PCV8EAxdh89ovxV+57u6NNLFhQWsOsvjkM/G7RS5piS5dV7YtuMt3ugZzACN+3BuC4p0L671TQgFQGhY/hBk7JNKa/2bkdBRcY6HjMiosbAAJCIGpzuq0joocGoKo2xrTxnGu7iVeoLj0VfYu28sdV+me9k1Ut9dRPt3DEh5jAWTxYXXyYsYNlNfUlkRsoPpSDxHOA9ZqzhB5NKbeHwzAw4IgWZXxkHYs7wmTjRNK+tF9zHi/N4awqUnPfb4CHxtG9LJPKO6yvvIHYfn4SVMREYqPTNUIyCz7Xi+Q2MGmF63rdyEmXBCCAvNc00kBohArSKfh1UFi4vijLCdBvLc1Ow65zYTpFfBnCthoQibEsGvB0M8ytZ2FTZ57q4iqIsWV5V3260c4XHeHcRkMZXCUi9MHCAaScZ1j1lu+TsGwNQ1+mYERE1DgaARNTAilF6XDyN6AdrQ4IRe9iJC8GalF/SQad0RR2LbVF+WLVRtt2pq6soO3cUReLvc5JXI26ZD979tzqLyIyUXZCB01jY96qhFz+7fpDjIxeeM2771guWcvRlExbo0EU8nTuCUtkGzm4SXggci9YZ4VgxxQ5/Gd4fi5aFYF96vlF7ttM4lSOehrTF5YI0FMqu2SsfR3C5o5h38BhMWgH37CXWVIXFaLhNFtu4fYch+JQBWo4WeodJcKwa8F3XQ388Wyx/hwi0QrAhsHoV81vT44Icd3rEQNi1NqQYs+otay2kQW+y4dWPbXtLW/G/OK4Vx6NOx4yIqHEwACSiZqXsSCyiJ3fEX7z7Y0GgCNw+SMQp9MNAl6rVRWtWmh6CiPFdMWP8YCwSfx+3MwVlFuMwsOpNQSLCdfW5bizRSgmK2sLeNw4rd36JkNAoeHv3wuWs1diwYBgCpgSoY+xdFQGjeDoYiWjZLXuVh3anzCMCSuNbgPepzybawmFoMCyKtqKwSExez0be5mIM9BVBrSGDCLJ0yFnjgwBPO7H+kWL5IUjL0aP9cC/0UbPcMb/d7rAzdTlmRESNgwEgETUwG1j1tgH25uFktatNHUqNx3HXp2DbvCB8238tFm8/g83pl7BWm4TAoFB4DKhDHami1Vi3YDXKvROwMtnw9ytjkjDdfwZceqt5iMxIe0sZJu2A7lgNgcvJI5AxlWNn4/HvjuFCtc559bgsqzI62MNS3rmrYNEXjp4z4DcvCZHifF279A3Yn9Rid5a8V2eP7l7iafgmrBTnoTwXqz/qNhRDq0FeGN05H5kH81GWk4hkeGGI843qmbqEaYiOvwqPyMOITZXLPYywqLXw8x13I0isMwsoN+9qLK+A0mPZ4n93WFS7S1pHNz1mRESNgwEgETU4+wEacVm1Grv3ml7klB9MxC7jbtVP5GO3mO4zxB32nY2qVf2mQ+7nsgqo8cWpmN9ZPP1qmJL0x7IMF7RDvWDV0ejvL6UhL12+OI3LVbtmJ7qLtRrghXHiPEneuaNKT5pXUfRpLArhCpf+xmGSCLTSs017Bj2+FanbxXnsKc5LMVmWEYIIjS92y6rdldrCwr6v0na3g3KXUHbUJCLA9PXILKgSfF7JRqJ/R8yZrzX06ntLrnCe5Axd6lZsz44FvDV4wk6dhWKcKjAEZQOH2qO9UbXNstwUyFqoOHvhRi+b96rlQq3VLtU7jjWUV3K7921PA5zGoU/l+uumbseMiKhxMAAkogbXalAwZk9zRc6SkVgRG488cWGWqfVDcNBWQxBXwcFVuVjNifkb4pJ3KO2FcpJDsG7qSGzLl3cp8lFe+au8Laxktc7MrdiXlQZd6VVYPOqFgSIpMSYAyakp4u/FerYEYcWrPth3Vv79aZRxLEAyJ63d4RGogdXeqVgYGIDd6WkozIpHYtgQLNKehuOMCHjI3kkqOaPVwWlYvjwWOfL8SwhAxBtByHMKhWa8oR51+/7usNOnQPuWX+V5mpe6GhsWB6Cw8wy4i0BMsvL+O2Z56bDNfwiitfK8V/PN88O2Ald4TJmE7krOW7MfPgOORauRLAJRDzfj4RtsRLEhbzVGIm7ZamSKskDuX3KUL94O1OK8LF/0NzpdsbDuJ/7fgcwUud1Haxx+odWgl/GKt71SXkWoxyEvNRLRohxKPOuOCa+9XOftrlDXY0ZE1BjqFABaPGCBXy5dVKeImo78HD4gPo8NwcLCAr+WX1On6M5qCwfNx4gMnQRkzMeKQF9sy+qEUWs+xkvGbfPExeqEfyZgwtALyFnmh4jAqdiWooe9/x4s/+dSuIgsRccqfpW3gcuUKLj9mogNwT6IyzoN2GkwK3YtRllmISncV/z937D7SFsMWfA9Vi2ZCQtk4+SJ223DQzdz+dIF5RxqCnK91y6zH/3aWI1YiyXaOIzudgTJC3wQETwfGddHQxOdiZAprlWGQuiFUfMT4NExBXGBPoj+IAuWz8UhMioYDu3ULBZe0Gj3QCNOyKIP5HnqgxUbE3HZIQph2iijHi3t4TZPrCNwNJAVLs57kW+NFiXdZiJQ+zHGOVXvPKVWdoYxAWWvom5DTT9nVt6bsHjeDHT4JhrrRFkQEb0eRe3GYtb2Asx+zgbIP3KjOqfTRMyd7IWTG+V2a6GrsRGkPVzmZSBy8UxY/7BaacO4Ys1WlLtEIWRTUv22u0KdjxkZk+f1Aw88oE41Dx3vZ3lD9VMmPi9N9f1Ym3v+K6iva7X5gw9xtey/GO4mCnGiJnT8xA/Yuz8JS5b8j5py5+Tm5uKfG7QY/uISNYWI6uqT9X/Fxo0b0KZNGzWl8Wz+cAu+L70Xjwz2UVOI6G7w3ZdJeKhLOV7+80tqStN7K+T/4cGBL6BTt0fUFKKba4zvx3vuuUd5rkNYp6jTHUArayucP18xmA1R0zl/4SysunZVp+6srmK5v15j/UCi+rpy6Rw6PmDRJMGfZGNjjbKL7EaR6G4jz2sbcQ3anHTpYsXyhupMfj92uL/pvh9rU6cA0FqcfPpfjHtqIGoa8ocIG9vqo8ndCV26dMG1q2W4VsaqHUT1IS+GGuqHmbqwtuqKX8vOqFNEdLf49coZ5Rq0OenRXf7gxPKG6kZ+P8obDM1NnQLA/v3749Klizh3nh94alpfFx7E448/rk7dWe3bt8cQF1ecOJqpphBRXZR8nwW3J13Vqcbn7OyM0lM/8Fd5ortImb4U5WV65Rq0ORnoPADFP8ieZ4lu7XRRFoY14fdjbeoUALZq1Up8ubvh8De5agpR4/v+hyPo3t0Offv2VVPuPPfhw8TJ+gX+7zeTTtCJqBbl1y6jWPc13Nzc1JSmMWiwC37+7oA6RUQt3fnjOcp3srwGbU4cHR1x3733oPSnAjWFqGby+7Hk+CEMG9a03481qVMAKA33+CMKjnylThE1viPf5WO4+1PqVMMYNGgQOlnejxNHeBeQqC6Kv8/BU0+5NXkPZ8OefAJndQfVKSJq6U4WZYsL52HqVPPC8obq4syPORjmNqzZ9QAq1TkAtLW1FRfHztj56b/UFKLGk5m9F9fKyxrly+BZby8UZm7FmRPfqClEVBN5jvyY9wlGPj1CTWk6TzzxBHo+aIsjn7+nphBRSyXP4yddhyjXns3RU089hQuni/BD3i41hciU/H78Jns7PEd6qCnNS50DQOnll19G2dWL2LPvP2oKUcM7/tMP2PVZIqZPn6amNKwnn3xSfNY1yP9so5pCRDX5eu9GBM15Az169FBTmtYsf39cKP4OR7M/VlOIqKWR52+7ey7hxRdfVFOaH1nmvf6aP775YhvO/VykphLdcHjfRrwWMAsPPvigmtK81CsAlIKC5uDzL1Lxg+5bNYWo4Vy/fg2bNq9GWFhYo/aiNMrzaQx1dcUXH0eoKURkLHt7BPz/OgN9+vRRU5peu3btsDB0Ab776hOcOck7+EQtzZmfCpXzd3bg62pK8yXLvrfeegtfbI/Er9c5hBTdcCAxAs+M9lSaFTVX9Q4AO3TogCVLluD9LeuQfTBNTSW683THv0PE39/CSy+91KAdv9RmxvRX4Nj3IeyPD2PvgkSqi2d+wmfv/w2eHk9i8OPN78tNDuciv6Oyd0Thx/zdaioRNXc/5n2KLz9dg/DwcKVX7pZA9kA8ffp0pGx8AyXHv1ZTyVzJMf8yP1oIe7uu8PX1VVObp3v+W9ch46u4fv06Vq1chXvuaY3hQ0ejU6fmN8YFtUzl5dfx1aEvcODLzzFjxqt47LHH1DlNY+++dLz33v/iMY+/oNvDg9VUIvPya/k1/PTN5zhZuAez/tr05+WtyO+oFVHRuPJbW/QcOAbtH2heY4kRkcGFEh1OfJ0Myw734a0356ipLcuPP/4oyptVsOs7DHaPjsR9rduqc8hclOhyceiz/8Wzz3o3SfB3zz33KM91DetuOwCskJCQgM8/34+unW0xwGkI+vVp3hcF1Hyd/Pk4vsz7AkXfFeAJVxe88MILzabnpKNHj+LjxGT8dOIkuvR0hp2DK+7vbKfOJbp7yfYtp3/4CqW6rzDo8cfhN8m3WfZoVhv5HbU3LQOt7++GHn3dYPPQQHUOETUlOebu6e8PoNV/r8LnT55wd3dX57RMly9fxodxW5F94AC69hyA7o88ga49HlXn0t3o6uULOHZ4L87+lIdeD9opnQjKYUKaQqMHgFJ5eTn279+P/Z9n4Lvvi9C5U1d0suwCS4suaNumnZqLyNSvv5Xjgv4czl84i3Pnz6Bz587KWGJ//ONTjdrerz5++OEH7Erdh/R9qehwvyXaP2CNduLRtkNnNQdRy3dZfxpXfynBJX0pWrdqjZGenhg5YrhSvbIlkt9RMgj8fH8Gjv3wLTpaWil3BNve3xX3teqg5iKihvTr9Uu48kspyi6eEWXMGdg92EsEfs8ow8jcTc6ePYtde9Kx77NUXC+/jo4WsryR1wrNs0dTqp+rl0rF51h+hktxD/4PrkPd8LTHU+jdu7eao2k0SQBo7OLFizh37pxyApSWliq/iBDVpHXr1rC2tlYCP3lhKZ9bCnlBeebMGeUhP+fnz59X51BDyc3NVXrTaq4/Dtwt5DeCjY01rKyslGN9tx1vfkcRNQ3Zrk+WK/L7Xj4eeOABdc7dy/g6oaSkRE2llkxet1Z8huV1a6tWrdQ5TavJA0Aioobwl7/8BY8++iiCg4PVFCIiIiJiAEhEdx3ZoYelpaUSAH755ZdqKhERERHVNwCs9zAQRESN7V//+heuXLmCr776CgcPHlRTiYiIiKi+GAASUbO3detW9ZUhGCQiIiKi28MqoETUrP3000/o2bOnOgX06NFDSSMiIiIiVgEloruM8d0/6cSJE0hOTlaniIiIiKg+GAASUbNWNQCUWA2UiIiI6PawCigRNVuyx88hQ4aoUzfIMSQvXLiAdu3aqSlERERE5olVQInorlHT3T9JDgvBu4BERERE9ccAkIiardoCQOlm84iIiIioZgwAiahZ+uSTT3D8+HF1qrqUlBTodDp1ioiIiIjqggEgETVLdbnDx7uARERERPXDTmCIqNm5du0ajhw5ok4ZDBw4UHnOy8tTnis4Ozurr4iIiIjMT307gWEASEQtQn0LNyIiIiJzwF5AiYiIiIiIqEYMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITDAAJCIiIiIiMhMMAImIiIiIiMwEA0AiIiIiIiIzwQCQiIiIiIjITNzzX0F9TUTUbN1zzz3qKyIiIiKqqq5hHe8AElGL4OHhob4iIiIiImP1uU7iHUAi+v/t2QENAAAAgjD7pzYIfwsGAAARDiAAAECEAAQAAIgQgAAAABECEAAAIEIAAgAARAhAAACACAEIAAAQIQABAAAiBCAAAECEAAQAAIgQgAAAABECEAAAIEIAAgAARAhAAACACAEIAAAQIQABAAAiBCAAAECEAAQAAEjYDtIdcYHG6JfTAAAAAElFTkSuQmCC)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KieKn-w0bpGu" + }, + "source": [ + "In dynamic quantization, submodules are converted to quantized versions during the preparation stage, such that the weights are appropriately quantized. Then, during inference, each quantized layer observes the data that is fed into it, and adjusts the quantization parameters according to what is observed. This happens repeatedly as inference is carried out, hence the name, 'dynamic' quantization." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xeIaGl0YXZxh" + }, + "source": [ + "### Static Quantization" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AGqQQclcXbYi" + }, + "source": [ + "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA5AAAAHiCAYAAACeOSPHAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAEnQAABJ0Ad5mH3gAALDUSURBVHhe7N0LXFRl/j/wz/4Lw8uGN0TFzUlDDS01JV1sBdYL/WATitSVrGm1dJVWklVS8gfCKhq6KKxokbpNGf7EpUSDDcEEShbCEkwIYzM0NRA1phRJcvf/PGcOODMMOCgo6Ofta5w5lznnOZfncL7zXM4v/iuAiIiIiIiI6Br+n/pORERERERE1CQGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERkFQaQREREREREZBUGkERERERERGQVBpBERERERERklV/8V1A/t6hjx46hvLwc586ew5kzlag4cwa1tZfVqUSmOnfuDIdevWDfyx49evTA/fffj549e6pT266zZ8/im2++Ee/n8V1FpTjXz6C6+qI6lah969SpM3qJfNnboSfse7affHnu3Dn8+9//xlnx/l35WVRWVuLSpWp1KlH7ZmNjo+TLPr17ib+b9hg4cGC7yJe1tbX44osvcEbkx3Lx97Ki4gwuXuTfS7o9yHzp4CD/XravfHm9WjyA/PTTT5GV9TFKviyGpr8Tutn1QLdu4tW1B+6+20adi8jUpZpqfF91Dt9/fw4//HgeJ779BmPG/hpubr/B4MGD1bnajqNHj+KjrAP4NPcAevQZCJvO9uh0b0/xsofNPZ3UuYjat9qfqlH9Q6V4nUXtxUqc++5rPDpmHH7rPq5N5stvv/0Waen7kflROnrfNwR3d5J5sic62/WCjW1ndS6i9u3Kz5dxsapcvM7gssyXp7/GqEd/jckTxrfJfCkLEz7M+BifZO1D5669cM8vHUSetBefe6ODbRd1LqL2zTxfVnxbgkfH/gaeE93aZL68US0WQB48eBDvvfc+OomLwaAHHsKIhx5VpxA1X03NJRw8dAD//qYY3Xt0w+zZs9CtWzd16q1TUVGBv+u2oeJsFbr1G477nN0YMNIdQwaUJ4qz8P3JQjj07Iq5L7aNfCnFb34LhYePoEf/keg36Nfi5tRBnUJ0e6u58D2+O/Y5zp/4HPbdf4k/zmk7+fLNLW+hoOAwuv3qYfQZOBpde92vTiG6vV368RxO//tT6E8dRq+edpjzQtvJly2hRQLId999F9nZH+PJ3z2DAZrbL8qmWysn7yPkHszCwoUv39JfcfLz87Hp9Tfg9KifCBzHq2OJ7kxfH/oQJ4sy8OegW5svS0pKELthE3oN/DWcRj+hjiW6M8l8+a3Il4vaQL7cELcJDg/8GgMeYb6kO9vxw2koO5x+y/9etqS7lgvq5+uyatUqfPPNccx+dqGIsPuoY4lazq/63Q/Hvhqsj31NGXZ2dlbeb6akpCRs3boVY31fgYNmuDqW6M7Vvc8DsOs1ANteX6kM36p8+frrr2PE5PnoN9hVHUt052pL+XK4yJeOg5gvibo63Pp82dJuqBfWjIwMnD5djhef+zM62rIaH7We+0QQuXzpeuUPk2x/eDPJDqHker3nvQk7+/7qWCLq1vsBJV/I/CE7x7iZjPNlj76s+UJUh/mSqO25lfmyNVx3AHnkyBHs2JGI+bNfUccQtb7f+83G+vXr8f3336tjWpder8fq19Zi1OPz1TFEZE7mD1kbRfZ8ejMo+TKK+ZKoKbciX77GfEnUpJudL1vLdVVhlReJ6Oh18Jr0NOx79lbHErW+nj0c8N///BcH/pUNV9fWrxoT87dN6NxnBO578DfqGCIy16VbH9x1tw2y03dhnOuvcc8996hTWods89ipN/MlUVPq8mXWTcqXcRs34Zd9R8JxCPMlUWPq8+XeXRg79lHY2tqqU9qX6yqBPHDgAB4c9DAGPTBUHUN087iO+S1+qPqx1auyyuWfOfcDBo78H3UMETVG5pO7bO2URzm1JuZLIusp+fKem5MvK8//iPseflwdQ0SNkfnyni7dkZ2drY5pf64rgMzO/kQEkCPUIaKb78HBI5GV+bE61DoyMj9Bz/v5OBoia/Xo74ID/2rdG9WPMg+gh4b5kshaPTUu+CSndfNl9scHYM+/l0RW6znAFR/tz1KH2p9mB5DyV6xfiK/1dnBUxxDdfMOcH0Fe3r9w9uxZdUzLkss9mJeDvk78g0hkrb5OLigp/gI//PCDOqZlyXz5ad4B5kuiZpD58uiXrZsv/5WTg573u6hjiOha+j7ggrs7dMLnn3+ujmlfmh1AfvllCZwGsuoq3Vr3dLgHv/rV/UqPb61BLrd774G426Z91k0nuhVkfumjGYavvvpKHdOyZL7s0Yf5kqg5ZH7p3dr5su8A5kuiZuqlebjV8mVra3YAWVFxBt269lCHiG4du192x5kzZ9ShllUuzvMOXezVISKylsw331VUqkMtq+JMJWw6M18SNdc9Il+e+q6V8qXI73d3Yr4kaq6ff9EJFy9eVIfal2YHkOfOnmUASW1Ct249UFnZOlVYyyvOotO9PdUhIrKWzDfflbfOjWq5uFFlviRqPplv5A+jrUH+sMN8SdR8Nvd0xo8XqtWh9qXZAeTZcwwgqW2Q5+HZVgog5Xne6V7+okrUXDLfnD3bOgHk2bPnmC+JroMhX7bO38vKtpQvL5ShICkIMfOGYeb4LuI1EMGBAUjOyIP+ijqPEX2Gv5jHHzmtc8mitqwyETHiHInJqFBH3Hw2HTqKAPIOKYG8dKkaHW07qUNEt448D6vF+dgaai5dgs09PM+Jmkvmm0vVrZUvq5kvia6Dki8vtc6NqrwvbAv5svpwFCJnDsPad7Jg8/BiBMakICRqFdz7lyMzYgICZgYgp1ydmagN+H9326C2tlYdal+aHUASEREREbUZpbFY81IETj0YiRXbPsP8eVq4jHSD89hp8ApKwrrEBEzqqsPGPwahQK9+h4iuGwNIIiIiImqnKpCzLQSl3RcgIHQBNF3U0cZ6T4E2KBJO5+Px5o4stM8yH6K2gwEkEREREbVPpz5E5n5AM306nDuq4yxxmo5JHoB+WxpKL6vj6lw6ipxNfgj27YKZvqOwNjoepeYllRfkPP4InyHbVnZBgNYbm7dnWWhbqUdpUhDWagca2mHOmGBhvjwkK+3vjqJ0ux8WKsvzxx6dd6NtMsu2jxPTglBwQR2BGpzOjsLmuvaeMt2bEnG6frokguswMU2Xh8r9AYbtE+nZebBGmao/FIvNgaMQIL8/fhjCw0KQ8821imibXqZ1229Q/c1uJK+eoGy/0l51aRDSD5m1SbxSgWLz5el2m22n5f2ZeUKdrBw79fjK7Vwdi9Jz6jS6LgwgiYiIiKhdqi4rRLF4d3YabBjRKAc4jXYT7ztQavII6eNIX+WFneVj4LskBYte8gGygxCuNa7uWojUxaOw8XA3uMxOQkhMCp6fosGpTd7icxqutvouQ87KRxAek4zLw15R2mEGPj0EZ3aI+VYnwjwurEyaL5Y5Bv4xSXjmcV88NHEaRmA3cvLL1DnqiG3MKITdTB8MVUpYa1Aqgs3gZREo6vUc5kfJdE+HTfYsBC+OQukl5UtX5YZgY+I98BLbF/j0dAx1skX1gSCEBMagymkBnlfai4bC+fIObNQ+hdRS9XtNsbDM5mx/bVEU1oggb+93Q+C1wtBedVyHvdAFumLjfjWIvCQCwyBXRMbshc1vV2FR3fLe9xfbKY6P2Q4135+a+8RIuQx57NJqMOjZBHG8IuF6VxrWL1mK9vkExraBASQRERERtUu1lww943TuKAOYpnXoaCf+r8DFC3WlZVIhzvRbhZDwYLiOdcOIiaFY9PpWuJyPxz9SCw2zlKQhpQjwmRcHr4mecB7pBhe/OLwY5IYOpVn4Sg00q7PXiEClAi6v7kPI4jlKO0w53+KVoeiVNgu6VNPAsKxoCPyXBIv5POE6Yxo0jo/D1QMoyMwyDTaLxPpLHeDuMgY2crg0HrotedBo9yGqPt3BCFy3Fa7fRWD9O2bVdItq4BIUDXcxn4vfHDjbVaAoMx768augfelqe9GpoRsxqXsVCorU7W5Kg2U2Z/sLkR4dgdKxcVgeE4dJ4w3r9wn/APM9OqLy4w9xWsxVuT8KOw/1hs+GgwjUiuBaXV6IPD5F8XjzfdPtbLA/xThlGUVjMHVlEl7wmyKO3RRMWpyEl5/UgM1hrx8DSCIiIiK6Qw2H97RpMHkQSW9PuPmKgGRHGpSQ55cOuF+8ZW6PQsEJfX3Q0tc3Bes2RWKEjEtFYFr0sU68L8AkDxm6XGUzVKtUny3IMAsMPdwwSPluHQeMmKAFchNRdEodhRoUH3gDeqdAERgZguSyvBiRLjdMfFwNKOv0noZJTzpYqKbrAycn9aPCDp3tHYDsN5CamofKuhLLjp7Q7voMIb7D1RFNMV9mM7ZfBN05pcAkH1/T/S5CPtfwIwgL1aKv2MKijDRg9By4P2z240BvEQTOsFAducH+LENpjljG+LlwHWq8DFs4ec6FizpEzccAkoiIiIjap7sMgcFlC23szF2+JMucHNC5i3Ew0R9du6of64kASz7y/HwJKs+Ld8fpeDpwCjociMDamY74g9KOLgSZ2YVGbfvKcTpfvI22xcWiLBQfMn6V4KKsenrwOKqUeVX39RdrMtVJ9hyLLGQcUEsBL+ehKLUCmoluSomaDNQqj8sqnhrUnjFfTxaqfh4ippWhSqa7zngNTDfRFs5TNsJrZBnSZRtET9lm0A+67YkoPmVluVyDZTZj+y/XKIF5167mW2+sAvqD4m34ELMgU7KFo9MU8S620zi5Dfan2FfZ4s2pf8Nl9O6v7k+6HgwgiYiIiKhdshvkBmfxXnjNapd6lInABpgOpwGGMdfWFTYd5LstNH4JWLfnM4SERsPLqz8u5sZi87JxyvMl85VitRoRcIq3g1GICfRGpNlLt0fOIwJS4yLIu9V3Yx3GYMRMB5SlZSlBVu3hNCSf98TE8VdLBWuVUjcddBbWE6OT27gblcadxNwlFqt+rNfbE/4xRYjbkoAXtHMwqNNRpG+ahcgZjyBye+G1e6ptsMzr2P4bYPEHA0v7k1oFA0giIiIiap8cH4e7h6xuqmv6GY+lOqSmioBzpiecTCKf46gyKRaU9LgoAzAnDboaPxbEbjCcJ86B/5IURO06i7hVC6A5pUN6rgz1NOjrKd7Gb8W67AvYZvGVANeGxWlmbOE8bi7sSneguFSPouxYYOwUDHVUJ8MBfZ0cxHswwiyuw/DyGWqYu2m2sHOaAvfZ0QjcdATb9hyA1qMCxZt2NOyp9pqasf0dREAu3qpMig8NKvd4Y+G8CBRfcoDdaDGiUASdhklGalB5bLd4F8enqUJMMd1enBsWl1F5XAnQ6fowgCS65dQuscMS77gG3foMfzTWZTnR7aMQqbMN3dTvPGTceUdrM3RtL7vbvx2U6uQ+jII1HUSaunOvsXcGB7jO2wpXxOPN1bEoM3m8g6p8N3TR8lmRc/DidDfTdoMif+Zk55mWuJ3YgYxdIvxQq41WHwhBpNYP6XWPhVCI4EszGJ3Ep85KQOoAzUgRQWW/gZwis3wuewKd1wULl+qUzmGuaagnvJ1EujJeQ6FIh/vvTNsKOg7Vwg4icM0wD4FkL6gDEaCVAZg6yqJCpC+dgIUxaabbLQJkx97yg61a8toczdh+Jze4OgHpe9NMAzsxX+auLFT2HwFNRw2GThTLOxiPzMNmyxPHM327SK6fOD5NptMBTmMsL6MydwdkjVu6PgwgichKenHBjUVyttkzmqhN0xfpkLDLih71qNXUHkpGSukYOA2tQGa+2Y0qtSG8xrVbvafhxTWRuP/LECyb6Y3NW3TIl23vchORvskfC6f5I71Ki/mvR6sd3hgbDpuDs7FmTbzynfykAEQuCELB0FBo1c5kOg1zg6M+DbpX/JGQultp11eQEYvNKwJQLIJSt7GG1nT2Xn/FfM8y7Jw3GjG6RBTUzbfEX+kJ1H3mdPRV5ryW4XDxdUPZ9likQ4vhw00TbTMyGC/P1iAnYgIi1XQXZ4trfdjvsDFNxJ/PPtf0MzHF8p1GiiAqab7Y7ljk5Mp2iruRueEZxInAzOklH5j0j2Ml67d/OCYFhcJp/ywsDwxAerZMf7wyX/I5N2j9piiBub1HMKaOLEfyS1eXpxyfP85CvqMWz0/3VOZriiFNchnivEiSx04En2u8sXxLQYP2p2Q9BpBEZJ3KNCQEh6Cs2dVa6NbJQ+a8AKTqzX69pZuoBqX5OuhHPwefJzyh3yZuglgM1jbxGteu2TgtwKJtR7Do2cG4eHCNoR1e8FKkl3SDe+g+xG2Lg6tSumauPyYtTYJ7F3H8xXdi3slF1ycTEBUdDKe6IMzOE1rdPmhdgNJ3/JU2fWu3JOOiUzTCdMZBqQauS3IQEjgZyI3AWjnfBh3O9JmLQN178DHpCbRp9o/IZ0IKM8R7g0jHFk7a9xC1Yi56nXwDb8ltXbYGpZgMbUwO5k+8dvcwmmkp6vd1eDdYtlNciNTS3pi84jOETLOmF1ZLrN9+m6HBWKxLwOQ+JUhdJtMfi1NivkWvp2BSXfTacQx8og3Lq/1oqbK8mH+UoJc8Pm/GwcXi8TSnpmneEJz6hzx2C5F+fjieid0o9hZdr1/8V1A/W8Xf3x/Ll65Xh4hunbIT/8a/DmYgLCxUHdNyQpf/BT0f9EaPvtd6MHFLkNWrBmIjtiIufFrb/UWsMhExfrPEzvkagRNl+4sbJ6uwBkQA85OsaRdCzSerME7Aztn7sE07Rh3Xus6dPorK4hT8Jfx/1TEtJ0zkyx43LV+2EP1ubH7CH2cWH0HII1lYOyMAWCJucr1uRv9/N//4tyZZhTV8SyjCssWNvTrOOlZeY1vhGtdWyHx5pvgDrAhv738viW4fMl+e/TIFEctb/u9la2MJJFFbVBSFmeO7IPmwHmWpQVirHagML5SlSYfMqlddqUDx9gBEqvPMnDEBMZsScbpBO5AanM6OwuZ5wwzz+Y7C2gbzqW2FdHmo3B+AYF/D8nYmRWKmuLGS7QXyI+R6rt1usfqb3UiW3YPLdY0fiOClQUg3T3udS0eRs8nPsD6Zruh4lJqX0lyQ8/gjfIZcnuxy3Bubt2cZdaFeR4/SpKv7TKa/4XyGtmExGUdRut1PSWOA1h97dN6NblvZ9nFiWhAK6vfXDezPg4YSQf2hWGwOHIUA+X3ZLXxYCHK+sa54Sn8oHrowb8My1e2MMd5v8mZYBg/y85YJyjzJRcoUuomqC9OQCU+4PiICRkdfuHkBBUm7G3beYJTnS7fPUo+ryDcRUcg/YVyCfLU93+kTiUgIVs+/GX7QJeWhWp3LklKdzBMByG9witWgOF5Mmx3beKcSJukLUPOhzNchKJDPcb9ShgJdXbrlIw7iUWbeBkteq8zzpm63hWuVmjfUvCUfL5Ag8upFdZopa/K7FeT2NXqN0zfM6yKvFRs/JoGI6A7CAJKoDSvY9BTWpP2E4bO3ImRFNIZ2+BAJgU+JQKDuhlKPghhXRO4oh2b6OoTEpGDRbDfUpokbucXGN4M14uZRBBvLIlDU6znMjxLzvTQdNtlyviiUmt/o5YZgY+I98FqSgsCnp2Poo34ICQ9Wukp31iaI9byCoU0Ulcpg6VURkO39bgi8VqQgJGoVxnXYC12gK9YmmXcPfhzpq7yws3wMfMX6Fr3kA2QHIVwrgrX6G91CpC4ehY2Hu8FldpKync9P0eDUJm/xOc3opll2IPAIwmOScXnYKwgU8wU+PQRndoj5Vic26IVNtv/YeHgM/GOS8MzjvnhooqwytBs5+ea30YUoziiE3UwfDFV65LvB/elki+oDQQgJjEGV0wI8L9IZEhUK58s7sFH7FFKv0UuIfv8s8d3XcKqLp7LPQmIS8MJ4B5TtEvttSbyhkwI7N/jERMNdfn4iWtlnLv3kAN08Zfj0Ax3gMR0jlR4U7TD0N3NEJCd7V1RmaEDm+fW5HTEpSBzXFa9g0HcRiJnpjeQSs2rIJ9/AmwsiUDk0FIvE+TvfA/g0ZgIWr9wtrgqWObkGQgMdCgvN5mjwnLnGyfRtLOmPSYHinA/VoteXsVgbHoSE1b9D6pmxmBomzvF5D6MqNQhrNhnlTdmJRpC4VsXshc1vV4k0q3nzfX+RZ0ReN8qclRny/A6pz1svPjsGlVu88Ob76gz1mpffm9TPp5FrXBnyV48Tef0NnBmwQFmHktfzgxA5yw/ppaweTkR3HgaQRG1Yaa+5WB4Th0nj3eA8fg5eiFgnAoJC7DyodopyPgs5uyrgMu+v8PeaAueRbhgxMRSBS+bArjoNpXU3qaXx0G3Jg0a7D1HiJsl1rJwvGIHrtsJV3KCufyfLNKgTAapLkAg+xHwufnPg/KvBcB42BJ3FpM79x4j1DIddYz2fXc5CZowOlR5br6Z97DT4hB9EmLa3CHgjcOCUOq+iEGf6rVJu3gzpEjfEr2+Fy/l4/CNV3c6SNKQUAT7z4uA10VPZThe/OLwY5IYOpVn4Sr0frs5eg41pYn+8ug8hi+fARZ1v8cpQ9BJBtS7VNDAsKxoC/yXBYj5PuM6YBo3j43AVN+IFmVmmN59FYv2lDnB3GWPove9G96ddBYoy46Efvwral7RKOuU+mhq6EZO6V6GgyeeZHcWhTDHdax1eXrxAWbfzyClwn5cgbtyHi3VlokwmvoPsEW+4oee+XsOVfda3zdaPvk2dysLBXMB9gmd9tclOLj7w6V6IlP1m54iqFJ54ebVRno/eBx+nPOzcusP0nCwtg/1LHyBQOw0j5Pk7JwnLX50CfdpC7G2sp1e158PMfWkmQabhOXNuJs+Za4y8JhnnVe1sN3HOxaO49xYsrstzMxIwXwvod+WhLqtX7o/CzkO94bPhoJpmQ94MkXldfP/N99X9Ia8fGxIBz61G65F5axXMn0rQ3PzeJPl4BgvXuNpDb+MtsRzjdSjp2SqOS4806N40Oy5ERHcABpBEbZiL66MmXXfDrj/6ysY/JysMN4Bd7GDfHcjfFYvMojLUqtW2bMZGI053tSF6WV4MyiBuEB9XA6A6vadh0pMO0G8TwaZJxxE+cGpeI6N6hptREd88McU07bLR/+MieEIacj43vrEbDu9p00zn7e0JN1+R7h1pIt3CLx1wv3jL3B6FghP6+hvvvr4pWLcpUu1gQARlH+vE+wJM8jAtR7EZqhXjRGCYYRYYerhhkElQ5YARE8Sdb24iiuqD3BoUH3gDeqdAcfNo6ADgxvenHTrbOyjdnaem5qGyrsSyoye0uz5DiNrzn2WD4R7+GeKWGHqpM9bNvr/6idqCsux4FIjz0XWs0UnWYQyGeslzxHJnOu4z5l7tuEPqOAbuviJIy81CmXGVSXE+epl1lGHvMRdeIh9kFh1Vx5gbLgIsT2D/LhTVZ4S658xNM3rOXOPMr0n2A0TahOF1P66ouvabIv6ve2h4GYoy0oDRc+D+sFknIr2nYNIMkYq6PFOSpVw/3J8wvyaIvPWE+llxHfm92WpQmhsl9lDDdVw9LsbXCiKiOwMDSKK27C7zfrh7ixsz8XZZLWHo4Ab3xQvg/F280j7nDx6Gtoap8sapvl1RBSqPy7aHGtSekV11m76qfh4ippWhyvjmdLwGXdWPzVVdJUO+KdD0t9DbnOMQpRSh+LxxW8j+6NpgZSLA6iHezosbUJkux+l4OnAKOhyIwNqZjviD0sYqBJnZhUZtncpxWjZgGm2Li0Xm21mCi7Lq6cHjMHle9H39G3So0UncSHshCxkH1FLABtX7WmJ/2sJ5ykZ4jSxDumwn6mlo56XbnojiUxaiikbUXihDmVhnQUY8dkb7Y/0W+WBlahsM1Z6BWEROlO0C6149EblNnj86ZB1oWEJm38P8jAQ6d5edkyTi9HeGYUW/3uimfqzXoSu6OolgrPRoo9VY7V2mw8W4mrY+Cwd3icBw8uNmP/g0osE1yaDDXeoHlelwBfQHxdvwIRbWYQtHJxlsijwjEq0/UyI+T4FjH2WiiV795Xx1riO/N5seVbJ9p8cIOFqocWEInrOgv7GVEBG1Owwgido5+3GRCEn8GitWxMHHdww6l8UjIcIbC2d6I1VtN1WrlIbpoJNdfZu9YnRZYtpuVJ6T86jEzV+znyFspeb1kN9VfZixLTR+CVi35zOEhEbDy6s/LubGYvOycQiYGYB8pZhBbKsM2g5GGbpvN3vp9sh56kpEVHer78Y6jMGImQ4oS8sSt7R1JaqeJtX7WmR/9vaEf0wR4rYk4AXtHAzqdBTpm2YhcsYjiNxu3k7UVHVJPGJmdMEfvIZhmVjnm+8k4zSGYISLoTSIbj3Dsx8dMGmxbKNq/kqAz+jmlpA5KOeRVTpY+PGmjr0nXGVHPmo1bUMnP1q4uty6XkcvN7fDG8V15PeWdqWRqsJERLc5BpBEtwPZ3m28FlODEhC2/QLit0TD5XwWEvbLh5Y7oK+TvDkMRlj2BWxr5OVj3sDoOnXqKsvpdqPsuIWbq1MlkM0ynbsb36weR1WDX/D1uCgDMCcNuiqd1qhkO6WJc+C/JAVRu84ibtUCaE7pkJ4rQz0N+nqKt/Fbsc7C9hle1jwuxBbO4+bCTunopK563xSj6n0tuT9tYec0Be6zoxG46Qi27TkArUcFijftMKsCa0Sfhp1LgvDVsDisEPtArktWVw4MCoX7ww1Lr+hWUJ/92F0LF0/ZRtX8JY65hwj2D8Yj36wznSpZDGfmzKk88b8b+vYyDCtOluN79WO9C+WoFMuz69+wZP0qtSMfpeplBQr26QBfcX636qnjADsRMKNQBHSGEUZqUHlMlpyLvC7SYNdnhEih5euHYT/Uaan83hQ7dJXPmdtfgFMW8mPlccNxsbve6hpERO1UOw4gD2LTww4YbPHlgt8vWop3Pz2jzntnOvSG3BfrcUgdpttQaTzWzhsGXa7pzVan/oMNVcU62CrtkhyHasWtkAi0MsyrzMleDAciQBuBYvOeQ83dZVgWrjQ9o83DnvDpDqTu2W12syhuqj+MRzHGwGWYcXuiQuRky0DXyIkdyNglbhHVaqPVB0IQqfVD+gnDZAMRfGkGK+0AOyvFe7LTGHFHmf0Gcup7qVXJHiDndcHCpTpDD6XXMtQT3k4iXRmvoVCkw/13viZV7258fxYifekELIxJM91uESA7Kg9GFvu6sSLgk+K754FBo8W+6W5U0nSlDIc+ljfixgG5mC6OBX42DNFNciEL+dsqYOflBqdGjqPhIeENO9NJ35tmmm/EuZufJjtN8sVQeSzrlO5A/mHT87zygDgnZZviMU13hmPoyCdLnN86lO53gI8IZs3b07YsDYZOFHlTBMyZZmlG+W6kbxenvp84n+W+choDd0vXj0tin75vXPW9BfN7nQbXOFs4jQ0WeT0W6fvN8rpYR+auLHGt8MEgK9qOErWKS+LvQUTdo6AGIlnWALdAPkN15vgo5Qfcdk15PJV8BJfxtYBuhfZfAjnkKcwLWo5XjF9zJ+Lekq2IeGECFv3T5I7z9nX2IP6xZqu4Oac7irjZchZBS/pqP2zenogC2f4nW4eEsFlIFYGa/3jDw8NtRgbj5dka5ERMQOSaeOTXz/c7bEwT90DPPgdny02bruruAHmflL8vUfn+6cYaWcl2mYFa2O+fheWBAUjPFuvKTURy2GiE68rhPCcS7rJHnHrDYXNwNtao6cpPCkDkgiAUDA2FVu1MptMwNzjq06B7xR8JqbuVdk4FGbHYvCIAxd3nwG2sISC19/or5nuWYee80YjRGfaHMt8Sf+wsEjemM6ejrzLntQyHi68byraLG0doMXy4afHMje/P4XAaKf4WJs0X2x2LnFzZbms3Mjc8gzhxM+30kk/jD0oXx1wG6Pmb/ly/L/JTQ7Bx1gTsLJQlo4VqFVupN+zlLszZgUzZCUul2Y02tYrqz0VQJAKc+l57LXF0w+ixaNiZjsw3SyMM50R2LGLEcU0+Nw3znzfvNKkQyS95Y3OSPAfSxLnjjeUrd8NpdnR951mNUjvyKd4SgfTuWgwd2kSV1xZi7xGMqSPLRZqv5k0lr/9xFvIdtXh+uqdh+8T1w3dlKJyM9kNBRpTYD944IPapsZbL7yoL1zibkc/heS8N8ldezeuG9Mjj4oapLz3XvHUQtaDSxKegy+iKR5fIx1slwJX9qNFN0v4DyPsmYebz8zDL+BWwCvGJKZg35Az2rHkXuT+p897GDiV549V3foDxpo6cW4Gjh1+GuE+9o23cuBE5OTnq0O1mOLyiP0Pgkxqc+XAp1sr2P9GxKLObi8BtKfCqv5G0hZP2PUStmIteJ9/AW3K+ZWtQisnQxuRgvllvjpaNgeuqBRjxbYTS5iizieef2XvEYaUuAZP7lCB1mVhX8FIcuGxYV8hM85vq/pi0NAnuXdKQIJYb804uuj6ZgKjo4Ku9Udp5QqvbB62L+IP5jr/SxmntlmRcdIpGmC5a7YVV0sB1iVhH4GQgN0LZH2s36HCmj9gfuvfg04wbZUMJkTBDvDeo3nfj+1MzLUX9vg7vBst2WwuRWtobk1d8hpBpTZQgiRvsqa8nYerYKuSvlvtiFnam6aGZtw9rXl8FsYtQeryutMQBLjOj4fpzMjaLdSTkyh5B2oa33noLH3/8sTp0O1Gf/Yjp1wjMNBjqLutgmnam4/LqAbz8cBl2rZZ5WYdal2iEbN0KV6Vk2lgoFm2bi055IeIc8ENykUY5dxZrmwha69nC2UM+E1KkYroPnBsr7W5JHcfAJ9qQN2s/MlyrYv5Rgl4yr78ZBxej7bMZGozFuq0Yh2Qlb6zdkobOU/Zh8TzDD2JXtVx+N7B0jdPAZckBQ149JgJ6ZR071OOSch3raNt27NiB/fv3q0PUtlWg8lgFMH4uvLzk463GwL6RHy6dtLJKt/ibqg63W/bTEJh9AYETTX9MopvvF/8V1M9W8ff3x/Kl69WhW0lWYfXG+smbcGDtU+ipjjVW/M5EPLmmAi9v/wLzWqh9V1slq6v+Pm4p/u8OChjLTvwb/zqYgbCwUHWMZTKADAgIwLBhwzB9+nTl5XSNZ1SELv8Lej7ojR59Ze+HRGStc6ePorI4BX8J/191jGUygPzDH/6ABx98sD5fDhkie7BtXJjIlz1u13xZFIWZ8yLgEvr1NW6OKpATNhAb94ci7EZuCEtjsWz2DrhuOWD0QxPdrmS+PFP8AVaEN/33UgaQv//97zFo0KD6fDl0aNM3UPx7eauo1wJsRVz4NLRqM2ZqFTJfnv0yBRHLm/572Rbd1p3o9Op5n/j/DKD08HYGexY5YPAbB3Fy70J4/VZ8/p0IQP9VV2anx6GEpZjz1EOGdpRi2qtvfYKzJr3DqctY9B6+/uY9vDbfRZ33GUQkHMQP6lz1fizF3jcWQjtTne/hh+D1wkJs2ldqVFJ4jXSd/QTvrpgFbV266tp3Hqqr82T4/u/j5OdV+L2cRyxLstgG8soZ5Jpv5xsp+PpHdbpCbV8qlvNDUQIi/vQYxtXN22CftB9HjhzB//7v/yp/GCdMmID4+Hj88EODo0ZEN9GXX36J5cuXK4Gku7s7Nm3ahO+/b9A9DLWoGhRnxKBs7By4MHgkC7766iv85S9/UX54/c1vfoMNGzagslW7tL0O8keX8f7IOZGH1OBhShvA4LC6dq96nM6OwuagCVioPkInQOuNzbrdOG3UPl2f4W9YRnkZCnQBCJ9haEsYvDQEOd80bKdRfTgeOmVdYr4ZfkjIOIrTaeoyTHaPHqVJQVirHajOOwGbt2cZPXbqGs5nIT3aD8G+hrQvnBeA5OyjqFYnG7Zd/pAkPu+fZWgDqTPuZMqUeRvIZm/3N7uRLB85JdejzBeE9ENm7RCvVKDYfJvl/q5/pJiUh2Q1rdWHY5XHjxmOjdiXak2MytwobFSXIbc73bi2U4M2kEbLK9FBt1RtD9rY/r50FPm6Wep+FdsREYtisa/rlkHWu40DyJ9QVJgi3r3Rz/jH3I/DsOhtW8yKSMLf/P0w9sF7xMgT2PPqY/j96g/w0/CF+NtmOW0QTr7tB5/Q93DS8M2rTmzBq7NX4eTwpYjf/C7WTgbSVnvjf15NwVl1Fvz0BTbNewx/2qOH89PLoRPLjH9tPkYiA+sXPoXXPjHLoJbSVZGCRdP8sLHMDm4vrlOW8bcIb9jL9p1af7z7b/lFO/z6D0kI9ZOf/REq5tFNbuSOoFoEhnMnQLs6A/d4hou0q9u5Yxa85i1FlnmbZJGmOX9Kwk+/Xoq/bt6KUBdbZIoL2oubD5pUlW2PPvroI8ydOxddu3ZVStWTk5PVKUR0q2RlZWH+/Pno3r07pk2bhvfee0+dQi1C3pRuj8XONX5KW1uvadOte/Yj3dE++eQT/OlPf0KvXr3g5+eHxMREdUpbcBzpq2aj+OFIhEStgvtvRqAvalC27SkEL3sbF52eg798dE7UVngPAw5t8cfK+LSrgZhCLCN8AnTH+mNSoLjvCp0Lx7JYEcTMMwkK9SJQW/xSEAo6Pof5UWK+2WNQuWUUVm4yDzxkZ2qPIDwmGZeHvYJAsf7Ap4fgzA5vhKxOtNATsalaERxGzvKGLv8ejHvJ0LbRa0AZ9i4bhTUxaSI0Ffr51D8OCKODlXWEeDRdg6Mh67ZbpmeNuOfc+90QeK2Q+3IVxnXYC12gqwhg1RtH2XFVkCsiY/bC5rersKhum9/3R/Bisc/MNzo3BGtW5cLe76/KsXHvW4DUpbOh2zALkduPw2n2VoSsiISmSgSFi0NM24lbIpe3JBG1o8MQIPaL9hFbHNrkjTXbjDrou1wo0jgKMe9XYNCzCWL/rcOkjmmImzULe9VZyHq3ZxXWn/Qo/udy/Ck0ATXT3sU/l03EvUpJ3UNYtPchvJKYgVlG+eyHfQvhsjABk1fm429PyFJLg58Or4d25ircG5GPeF9DaaZhGb3wxGspWPs/V+c9uWcWJryaj3kiuHr50Xvw079WQbtyHyavNV0XqlLw6vhZ+EdACo7OlTm/8XR9vWsW/hRrh5ffW4fJxt2El2zCk9OWo99rX+Bv/2Po191SFVbzcSd3PYMJoRWYp0vByyNl4Kw6/R7+9Pg8fP5CEj5a8Bjuqdu3Ivhe++FWPFHfQ4Aee0MH4U+72kZVWVmF9Y9/molf/epX6hjLTp8+jX//W4m2m9S7d+/6Kjv/TMtglRyi6yCr5Pxt2Qz07dug0Z6JiooKHD16VB1qXM+ePZUqdTJfpmfsZxXWG6rCWojUGeOQcGkwXJ/dihf9hlvRXpJuBzJfbgp/Fr3sLTX4uUqWMspaAdfSrVs3JU/KvLlvf/at+Xup5hl4JSB+iVEnU5eysDMoAIXj38WKGcbtyfXIX+2ImNSreUeWxAVE7IZGuw9hIiCszw9KFe8Q2Nflx8timdO8kemyFctfnXb1h5fzu6Gb5Y/081MwP8nw2Jjq7ADMWaaDy6tHEOh5tT28EhiK9HZecgSLvBprJ1+GzOBh2HwuGGEbQq/2AyBUpvlj4co8+MQUYepI2fbW+iqssgQyfMt1bLe8ZswW14wecVgXpTX6wUkEyWG/Q/pdi/FiqBY2qX5YuLocPhv2YerDRu2CyxMRM20WvpqZgvVz3MR6ZInhBOyE2F+JYn/V/Zk4pUPkjAAUO5lt9+EIzHwpSiz3gliuGJYlkH6zgPr0NbI8C8e6UkljFaZuMm27XLZ9ApbJHwFm78M2rXk769Yl82X0Ej/cr7GmH4obI2v4hIWFqUM3rv2XQO6dZ6heafxyGYQnZfD46Mv460syeDT2O4w0+ZHmDHI/ShDv8/DM5KsBoXTPw/5iHJD1z09MSyGHzMcso+BR6jd5FmaJZe08/JUyfM+vRZD1gVnwKHV1QD/1oynzdAEDfbci9SOz4FHq1tgymnICuf/MAH49C08bB49SX2888zxwdnMGDhkXLU4QaTLpXs4O/ZweEu9HUVlf1HrrZWdnN/myJniUysvLERMTg8jISHxxuBBXrvDZB0TXy1JeNH5ZEzxKZ8+eVarPyXx57px8OOhtamiw8uzCpoNHyQGu4dfbIcZweG0X3931GeYzeLwjWcqLxi9rgkdJVjN//fXXsXLlShQc+hyXf7rWc6Baj8voMaY9FHd0E0HCEbPgUbJD1z7qRzPDzXtM7jNYyV9fifsCqfZwGpLPO2Cyr1HwKHWfArfpxuupQNHHshOtBZjkYRoU2AzVinFAQUZW46WQp7KQkws4+z5nEjxK9h5z4SWWn5xr9tirG3Ct7UapSE8pMMnH9FFWSgdW4UcQJoLHviKYLMpIA0bPgbtx8Cj1noJJM0Q4ty3N9NnG433hZPwbo+MQKC1tXT1Nt7vXEKVjuNNnzKvImTFfnjjW9gPkcSlB1Xk5rKZR7EN3s46vNF6BcFc/3yqZmZk35dWSbs/HeERshS7xC3y0eSnGmgdfE+4zywQV+Ppf4u3X9+CHwk+Q+6nx6yv88Esx7V/HTTP7fQ4wfp6z4h5xsooA8GxJ6dVqrNJPepz95qBYVgr+8dZyvPrCy9imTjLRIF1GqvU4WSLS88l7eDduKf7051VofnH7GVTK7XxkkIXg8x488KC3eD+BSuMHut+lPEXOhKFd6U/4qY20g5RtM/bu3dvkS1a9uZZHH30Uf/3rX3Hy5Ens2bMHDz08HHfddbc6lYiaY8zYX1vMi8avRYsWqXM3btSoUYiKisLx48eRmpqKHj16qFOIqLkeGTXaYl40fr366qvq3I0bPnw4Vq1ahWPHjinfGTHyEXS4p5HuP2+Cvr0a+9GlBtXnj6JUedRRLBJWe+PN99VJZjqI+x0TXbpC3j7qrxja31VXyfZ5Y2Df4OYP6GZv/OyMcpzOF2+jbXGxSD6eyfhVgotdxLSDx2F8q2WiqkJ5HNvQARZKpDoMgUYEoCivMKuCe/2utd24XCNCL6Br16bKNyugl11vDB9i4T7WFo5OU8R7GaqMq6GK9Vrs/Nn8tss8fY2xsLxu3eVxqUGtcr+qpnFA/4YltXYOjd9/3wRubr9Vej5uzVdruD0f4+HrjbFDeomwyAJxkpkGRT+JqE+8/Ws9/vSCH7Rmr4gkOU8pTl7jx49696hrvXICe9f4YZzLIIzz8RbLWo5//EuPe387EY8Y5jDVIF3Cj1/g3UUuGDx2ECZME+kJjcbeb4CB7hMxVp2lpdS008K2AQMGYtKkSU2+GuvZUaPR4JVXXsHnn3+OvLw8BAUFwdGRT4QmulH3W5EvG+vZUVZJl8Flfn4+Dh48iMWLF+O++0xrfBBR81mTLx96SNYyaqhv375YuHAhcnNzUVBQgCVLluD++00e6HvLNAiChMrsEET69sQc31EID/RGwp40VNv5YEQTT0hqGSLwkiVeB6OUR77IR04Zv3R75DwluL7+iC6pnUK2L5fbYZpvpv7iXlRWL22tV2u5rXthtc59GPiEeJuwCfsOy+cmWnptxRPGP3CdqMAZ9WO9HytwsgToef99SnvM4oR5+NM7NZi6MV/cCMll5OP/3liHV/x/B2fDN65Bj6wYf0R8PhorE7/A4UNiGR99At3aVXj5CRezarnW6AX7X4u3z79q2CmQCKJPlsoOh+6DvXmJ7W2mU6dOyqMDPvzwQ3zzzTdYvXo1Ro6805+USXRr3XPPPXjuueeQkpKCEydOYM2aNRg9WrYRJ6Jb5e6778YzzzyD3bt349SpU4iOjsaYMTe3jdh1KY3FxmWxqPVKwrrUs0rV8HWbUvDCvDlwGaDO00ydusoSwTxUNrj5A74/f1z9JGnQVz7edfxWrBPrletu+DK0lbSoq4Nyj1h07OqzYetdPo6ybPHe28G0ym5r6mCrPC+2yqT40KByjzcWzotA8SUH2MnLdaEIjA2TjNSg8thu8a5Bk4WYrU5N47Hjhk6IjF2oarxEmBrFAFIEVs4uE4F9W7DnsFnforLX0pkOmPCnBHytjlKUJGGvSWNB4GRmAt7FQ5j9mPz17gy+LpRl5Y/B7bH7cK9RUegPn6Ybqp9WVjV87IeJUhQniivV8EmG0lSjX9hO/usDZRknz14tFr3nLlmvoqk2CPdh7P+I7fzXVvzDLO04nYJ33xLBr/9jcLZYbNv+eXt74+2331babWzduhWenvIKf/ur76r7un7tvIUadNXdxliZPvOu08mUzId///vflXyp0+ng5eWlTrlNyE4+xHmSXKQOQ3Z6Ic6JsET1JsZ8+BZo63mtDWq311UrTZw4EZs3b1by5bZt2/DEE/JX9vZDfzxXueY6j/WEfRejul0XslAgAzCU46LJoyWuzeZhT/h0r8DeXWa9qF7KQ35aoTogOUAzUtxfZL+BnCKjx09IsqfSeV2wcGndo0YscHSD61igeNfbKDW7pavc/wZSxfInjRxx89ovO4n0OAHpe9MabHfmrixU9h8BTUcNhk4U23wwHpmHzba5fDfStwN2fm7QWKyzerOoaRT7MNPsuFQe0CFd/UzWYwAp9PNdhbVPnMD6mY/hT2+8h6xPP0HWPzfh1QV/wPrDo/H0i34YqM5r8AU2aZ/CqwkpyP00A/9Y44fpr6ZgZMBqPKPUluyFkY+JYA3r8VroJuz5xNB+ceuKZ/DkCwk4I4soq36AcXvihh7CyBdEULgvDBFrErBXtsncl4DXXnkM02PzDaWc1VcDwV6OspnxPuxJykBu0RmLj9noN3khXn60QqT96nbuTZDPqZyHvff5I+w58w6Hbg9TpkzBBx98gGeffRYdOtzSK9htSV+kQ8Iu4z+gd6jz4g/qhnilvQhdm4eHh1IT4Pnnn0fHjreu/dSdRY/K3FgkZzNYJMtcXFyQnp6O2bNno0sX2WCv/bF70BMjxHvypgCkZqSh+FAacrYHYe2L3sg8J6uTlaO6uX3+dHDD5MBpQNosRIZFISdXBKMZUYiZ5Y/iDqb1Yu29/or5nmXYOW80YnSJKDgk543F5iX+2Fk0Bu4zp8Okf0ITGozThsL5XBTCxbKTlfSLIGyNN5av3C2WvQ5eY29mUd5wTAoKhdP+WVgeGID07CwUZ8cr25J8zg1aP0Pvt/YewZg6shzJL13d5vykAET+cRbyHbV4frrnzSs1bYT9xFBMHZqHna/6YXPS7qv7dVfFdXRIRgwgFffhiYh90C0RQd/HqzDnBT/MWfMuTvadjb+9l4B5D5sXyy1FfPJs3PvJcmhfeAabCu/DzHWfIH7u6Pp2l/18N+L9iFm498hGLJrvB+3qLTjUyRtrPzqIDdNFYPj5UfzbUpRX7x6MDUhBfMBE/JC1UGmf+ee4JPzw4HLsSE7BkslilsKr1VF7ui1E6P/UIC38GWhjMyz38NVpNOa9YdjOn9LClO38U8JX6Dd9K1K3r8Pkxq9o7Vq/fs3vs5asJYKmeeKPtN7sV8fbnf00BGab9phZumcCNidWmfSO56SV1ZWup7fM21///sYdT9wp1B5Ur9HlfqupTENCcAjKjH+9tHAu051rwIDrrOPZloiAZX58HCZ1zUVKhB8iA/+M9BJbjF72NdavnCvyXh5OnWz+3yw7j62IjInE0DNvY2OwN958Jw/2s1Pxoq/5tUwD1yU5CAkUN2q5EVgbKO79Nuhwps9cBOreM3mEhCU2Q4MRsjUFWpefcGCDTL8/Uo9pMHnFZ1i5ZMpN7/BFpmexLgGT+5QgdZk3IpfF4pTYlkWvp2BS3R+3jmPgE23Y5tqPlirbHPOPEvR6MgFRb8bBpemnOt0cItD3WfOZCO5t8dU7/mK/hiDniideXikCdnUWsl47fg7krVD3zMa28RzEO518DuS/DmYgLCxUHdNyQpf/5bZ4DqThWU+ofz5Vy1OfwdTSz09q8Kynts/8OVt3Kvlcq8riFPwl/H/VMS0nTOTLdvUcSPU5dVM3XRA3jeo4E9Y/x61FtMN81Ra1/nW15cl8eab4A6wI59/LlsZrfzt3C6+LMl+e/TIFEctb/u9lHfn4DlnjR3ao05I9srIEkqiN0R+KxebAUQgYL9vPDUN4WAhyvmnYQqr2xG4kr56Ahcp8AxG81PJ8DdXgdHYUNs8bprTPmuk7Cms3JeK0pTYh57OQHu2HYF+5ji5YOC8AydlHDV2IK22n5AN8hS0TlOlX23o1I30XjiJnU906xPaujkWpVY/7u9p+7PSJRCQEq9szww+6pDwL3ZzrG253dDyKlWdEGblSgeLtAYjUDlSXNwEx5vvHpN2YIR3hW+SECITL7+jy5IDlNpBy+UlBWGu0/M263Wb7XwbmhuVUl+igW6qeD3Le7VnQm/VqZ+05Q9aw7jzRH4qHLsy7Pm8o54mYr7TJ3d54m8eL8hxWj3OA1sI5rLSn9EfOiTykKue6yFNhdW2p1DQH1eU3uQxvw3lVV1VPfl/cJMknDORHyHNPbcNnci5fVf2Nef4NQvoh03nq2wKWl6FAF4DwGXXzWnn+WZPXJHGNyBfLj6w7JnIdgYZr0dUSf7PrgXGeSSpU5lPyknqdkPs44YBxhXOjPHc4FhvVNAVoZ5mtpzFWXlet3WZqW0Q+2SjyVEKu2Xl9Kc/QrtJD0/DxbtSmlOrk38aGzUwqP98lrosO0PThj2rNwQCSqA2pPhCEkMAYVDktwPMxKQiJCoXz5R3iD9dTSDWKQmrFzWDkTH/sLHSAe2iSmG8VxnXYrcyXbN5w30SNuIiKm95lESjq9RzmR6Vg0UvTYZM9C8GLRaBj3C6kXPzBnOUNXf49GPeSWEdMArwGlGHvslFYs03ckNm5wScm2vAA3ieixfQUuKi1ha1On+xUYPEobEyrwaBnE8QyIuF6VxrWL1mKr9RZrunkG3hzQQQqh4ZiUUwS5nsAn8ZMwOKVu41u0suQv3qc2O43cGbAAgSKtCrbnR+EyFl+SC+tS5MeBTGuiNxRDs30dco2LZrthto0uX9iG2nfaIehM1KgVfqZ0EIrj5uH5UfHKNsbJJYfsxc2v10l0puCwKeH4Mz7/mL5QSgwr3ueG4I1SxJROzoMAWL/ax+xxaFN3mL/X32QtLXnDFlD5o+nGjlPRPClHh/9/llin7+GU1084btE7HNxbF4Y74CyXUEIXxLfeAcZjak7h4e8ghfFOfzMOPUcjk4z+yHkONJXzUbxw5FKnnL/zQj0FWku2ybT/DYuOj0Hf+Uc2ArvYcChLf5YGa8uo58PQsKDlapazlqZ117B0EaKPCv3B+BVrT/2fjcEXivk8mT+3QtdoCvWqsHYVSJN4ROgO9YfkwLF/gqdC8cyGYDNu0YnM1bmtcuFyjUi5sPvoXlCbLecLzQQg/Ahdi7zanhDL/flKzpU1+WZ4UBmzFN4c0MQ1oSnoevkv4rticO4rgVIXTq7YR6ReS40DR2myDQlwPfB42I9oxApr3nqLA1Ze129nusLtQn2bnAZVILU1f5q+zmjtoClYzB1xpRbUy2drKYZ8xywPwgxahtWpW2szh+R4l7BznMd3C0/WYoawQCSqM2oQFFmPPTjV0H7khYuI93gPHYapoZuxKTuVSgoquuophDp0REodQpG2NYE+Ez0VObzCf/A0HD/1SgUN9ZDU2k8dFvyoNHuQ5S4mXQd64YRE4MRuG4rXL+LwPp3stSbJD3y35qFHCxASN06Rk7BpMVJeFk7Bhfzk1F6RfY2N9zQHqPXcDHdDX2Vv6DWp69yf5TSqcDUlUl4wW/K1XU8qTEK/q6htAz2L32AQO00jBjpCdc5SVj+6hTo0xZi7yFDYFh76G28lVoGl1f3IWTxHGXfKtu9dR98eqRB9+YOQ7vh81nI2VUBl3l/hb+XTI+cLxSBS+bArjoNpRYDMlvYDXGDRvn5ub/YJ+K43Wf5VkLZ3kO94bPhoJpecVPiF4eQ17fCpSgeb75ft/9VRQ6Y9HqK0b5JwPNeIhzekqXebFp7zpBV1PwxYrHZeSLyh0vH48jPPypmOopDmWK/eq3Dy4sXKHlIHhv3eQkInCeilaJMlDUZOFnQ2Dm8KwKZJudcIUr7R2L+THE+iOPsNVGs71Ie8g9UQDPvXbH+q+eAl3qu6EXAd0p+1W4wnIcNQWfxsXP/MSLNw2FnqU+xy1ki4NKh0mMrlsfEYdJ4w/J8wg8iTNtbBEAROKAssI4IrEYnmF5PIiKhwW7kFzbRWY+Vea32cDIKqobDPyLBaL4FeCFC3PCJ8z/9aIlhxjqlNXBeanQ9CQqFlyydTCzHpHV11yMt/JeuggvyUGr+uIQiYITx9WhJitju4eLUMN9uI9ZeV6/r+kJtgwNcglIRKP42ndm90PBcx+g3cKZPIEJ2ib8jQ5pu10i3ns2QYCzeEI0RSMPOYPlcTj/szAVcQj/DGnG95Q8AzcMAsll64Ym18pmObP9IrcEOne0dlO6/U1PzUFn3q3VHT2h3fYYQX7Wnt5I0pIgbDWff5+Bk0nmlBq6+c2F3PgoF5l1pq8ryYkTg4YaJj48x7Qa89zRMetIB+m3iJkYGdxdyUZwqlug/Hc4m67CF02xxkxQTajbeiNXpK0NpThowfi5cTToVEOvwnCtu7qzkFChupOWTqq6y95ir3DRmFskb/hqU5kaJgHQBJnmYzicb/rv7ugG5iSiSN4dd7GDfHcjfFSu+W4ZataqozdhoxOmMOgy4LmUoyhDbO3oO3B82u9noLW5UZ4ib/br9X2e8L5xMOh8Q6Rsgz4MSVClVKq08Z8gqZYd2iKM0BxPNzxORPwK378N8L9nGazDcwz9D3BJD74PGutlfZ8dAls7hcVpMEsFZSp7pjwAuo8eYrrejG6ZuOoIVM8yPtR269lE/NkPt4TQki3PL6wnzzjpEvnx8DpzFzVfO56ZB13AXs+tJn8FKW7CvyssNw5ZYmddsRocibPsBeJnnPTuHRjoT8cQI4/zVYQg0HuLdPC/17q/0hJlfbhbkegVikvn1SN3u4q8sB8RWX1db9fpCra7jYLho4xCi+9rwPEd5jV2yAM7imFL70OnhOdCG76t/Rue6TQnwnzj4lvcQ2x4xgCRqM2zhPGUjvEaWIV22PfJU20JtT0TxqavlcfrvSpTSuV5Xjhuq0Ri/ztTAUUyrPGfpxq0ClcflDZAGtWfMvideVT/LapdlhsDkkl55sK599+Z3nWZ9+kR6ZNsRp/4NbwTFzZ3ZLXzj+vVGN/VjvQ5d0VXcjOlLj4q0iG2Rq/MYAUcLJS72A0QAiSzo5QZ3cIP7YnFD8F280pbpDx6Gtl+pGVmovOE2ShXQy8fDDh9i4cbXFo5OU8S72P/GRa93iSSpH+t06y6DlBr15tO6c4asU3tJBmu90dnKpxfUXigTQafsoj8eO6P9sX6LfGD2dbB0DncR57B40x83ffB1316NtdOpQfX5oygV6clPjUXCam+8+b46qRmqq2RwOAWa/hZKVByHQNbyKj5vGkh1MHpOsaIu7Vcs/5ClaG5euyxy8ok8cR3ZjcztIdgcOB/KM5UbsJBuyUJesvhQpz4WHtJu11u5buWcsFTJtBnX1Va9vhAR3TwMIInakt6e8I8pQtyWBLygnYNBnY4ifdMsRM54BJHb1TY46k1ZZrSsgmH2CotCsZjW4Fd1Va1SuqWDzvx74hWjyxLTdovgTs5zA24gfS2uQyM3k8bMbnLtx0UiJPFrrFgRBx/fMehcFo+ECG8snOmN1JImbohv0GWzjnGsZs05Qy2quiQeMTO64A9ew7BMnNdvvpOM0xiCES7yx4gW1vEek1KtBsGaUJkdgkjfnpjjOwrhIj0Je9JQbeeDEa1QAG1cQH6jrMprV8qQv8EbARMdETBzgriOhCArX49O4z0xyDDHTWN3l+XrSXOuq7fq+kJE1JIYQBK1Obawc5oC99nRCNx0BNv2HIDWowLFm3Yo1aDs+o1Q6ur7bDBUwbD4svhIDQf0dZKlF8EIs/Qd9aU8bqCjnVKCUHm+YUlm7cEQBGj9kXlCHWHG+vRpYC+rlhWWNHxuaeVxWPqt36KT5fhe/VjvQjkqS0Va+vcXaRHbIgtS9xfglIW738rjssdUN9jJDa7TwQGa8VpMDUpA2PYLiN8SDZfzWUjYf7XzmuZzgN1o8WZpe1GDymOy9EqDrtfVEKPpc4asY9NRRlzluNigNKgMmUHDEL4lC7X6NOxcEoSvhsVhxa6zyvksqx8GBoXC/eHrbEVj8RyuUmoBaPr1brp6VWksNi6LRa1XEtalGtKzblMKXpg3By7X8Ui/Tl1l2f9ulB23EMycKlF6FHbu3lgp6HW4Rl4rS5qNmMQauEcdQXyGvH4cQVh0HPz9fKyvpdAc56oa9uD83VFlu4c6WqqR0Yzrap1Wub4QEd08DCCJ2oxCpC+dgIUxaaY3EXaDYbhvsYWNrHM1YAzcuwOZexIbBCKVGbMQ4DsKOw9a/iXbcahWhFM6pGeYh2dlyFk5UASGESiW7ei6jIWz7KwlLdm0Z1YxX/6eWOjvHgvNfXLYFpDtP36Wn1VWp88BTmM8gYPxyDRrs1mZu0N53IBVSsW85t8/ILYRw+E9RgYEtnAaGyy2Oxbp+822+1IeMndliTtDHwySddRK47F23jDock2X16n/YEO10w7iGChjGrK5S95EWt7vBhoMnWh5e1G+G+nbxaH2c4PGYr26xlh5zpBVNCOni6MUjyyTxzsAtYffRsbBMjg6jYDNSbHPzwODRotj1d2oROpKGQ59LH8EOI4qGfk1h4Vz+HRGvHIOu45suhhRfzzXENSN9YR9F6P0XMgyPF7AOCC+Sz1/r5hkahM2D3vCR+Tf1D27zfJvDUo/jEcxxsBlWAuEblbltQqcLjL8wDNirAadjM7l6kNphmuEpYDvRuzSocDkdzOx3dmybawWLo9YDpytvq7ewPWFiKgtYQBJ1GYMh9NIEfwkzceaNbFqN9O7kbnhGcSJ4MLpJR/DQ4o7uMF3ZSh6pc3C8sAApGer823yR2REIuDyCtxHW65qZTMyGC/P1iAnYgIi18QjX7bTydYhIex32Jgm4qhnn1M7x7GDy/Nb4XouCuGz/JGckYbi3EQky/n2azDpRa36639v2Mv725wdyBTpLasUN0bNSJ+9118x37McyS95q12ji0BqjTeWbykQKbBWodH308T+Et9fuRtOs6OvdsQx8jk876VB/sqr212QEYWYWROQfM4NU196TulQQ0S0cBY3femr/bB5eyIK6vfPLKSKG2f/8ZZKdg269ZbT0pCzR+yrkgqLJQn2HsGYOlJu72jE6AzLz08KQOQfZyHfUYvnp3s2szG/lecMWcdpDrSzx5icJ/L4rAmNwvcjozFxvDgrxTkiA6z8TX9GQqqhO//81BBsFOeSfGyNPB8NVRqbwckWxavEOWeUB1ZGp5mcw42xe9ATI8R78qYApMp8Krum3x6EtS96I/OcTE85quvixe4OSlu+/H2JyradttRMVrbTC9TCfr9R/lXy/miE68rhPCcS7ver894Iq/Ka+iMTopCwWj2/RVpSo/3waqAO38sfr/T6Fi61242Nf/TDTmVf7kaqut2uoYvh0shFyerr6g1cX4hMNPL8VqKbhQEkURuimZaCqBVz0eukDu8q3UwvRGppb0xe8RlCpl0tibAZGozFugRM7iNuRpS2hiLIOwyMCExB5JJpFjppqWMLJ+176jrewFuync6yNSjFZGhjcjDfuCfI3tMwf2sKtC4/4cAGP0QGL8WBy5PxQvwBaMfW3Uk5wGVmNFx/TsZmkd6EXMNP99anTwPXJTkImTcEp/4hAkyxvennh+OZ2I0iRdYKxaJtc9EpL0R83w/JRRplfy3WGveIqIHLkgOG7T4Wixix3Ws37ECtSzRCxDb61Pe6OBxe0Z8Zumr/cCnWyv0THYsyu7kI3JbSsCdII3bjXoF2Yg0+XSP2VfyHSvXDBjqK4CNabG/gZNR+ZFh+zD9K0OvJBES9GQeX5vdZZPU5Q9Ywyh/qeWI4PklYHj3H8KOJCLCmvp6EqWOrkL9anrOzsDNND828fVjzunw0BFB63OoK2Ab95uLF17TqOeyPXUccDMfP5BxuhKMW8+PjMKlrLlIixLkX+Gekl9hi9LKvsX7lXNghD6dO1qgzj4HrqgUY8W2Esm2Z9c8/NWXvEYeVSv4tQeoycU6peV9eI0JmWpEmq1iX1+y9tmLFkjno/GUMNsrzO+YNlHacgvm7ivDykyJALiyxWDX9unmI9UW4ofKd+UpeylS32+Ta2IC119Xrv74QtRw9KnNjkZzN4JOu3y/+K6ifreLv74/lS9erQ0S3TtmJf+NfBzMQFhaqjmk5ocv/gp4PeqNHX9ltP7VNFcgJG4iN+0MRlh3MkrY24tzpo6gsTsFfwv9XHdNywkS+7MF8Sa0iD8njJ2CnCCDjwqc1owZE+yDz5ZniD7AinH8vbwuyBNJvltj5XyNwouWq1Y26ke9Si5L58uyXKYhY3vJ/L+tkZmbCw8MD7u7u2L9/vzr2xrEEkoiIiIiIiKzCAJKIiIiIqC26cBQ5m/wQ7NsFM8cPQ/jqWJRafNyWHqezo7A5aAIWjpfzyucCe2OzbjdO17WDLorCTL9ZSgdU+REDxTz+yKnrLUusJ18XgMh5w5Tvzhw/EMGBAUjOPsregakBBpBERERERG3NpTwkLx6FjWk1GPRsAkJiIuF6VxrWL1mKr9RZDGpQtu0pBC97GxednoN/TApCorbCexhwaIs/VsanGXor7ueDkPBgOIuPzlq5vFcwVNbXvlyorCfmw++heSJSjE/BotBADMKH2LnMCwm5lnrcojsZA0giaqcc4Boun7HG9o9EdKPGwEc+s/E2bP9I7Vfl/ijsLBqDqSuT8ILfFDiPnIJJi5Pw8pMamIR0ItDMP1ABzbx3EThPC5eRbnAeOw1eixPwvBegTyrEKTmf3WA4DxuCzuJj5/5jxPKGw66DfFRRMgqqhsM/IgH+XnI9bhgxcQFeiFgHd1Qg/WiJ/DZRPQaQRERERERtShlKc9KA8XPhWt9TuGQLJ8+5So/P9Tq6YeqmI1gxw7znbTt07aN+bILN6FCEbT/QsCdgO4cmenWnOxkDSCIiIiKiNqUCldnizal/wyCud3/1WczmalB9/ihKlefTxiJhtTfefF+dZI3LeuhP5BmeJ7w9BJsD52OvOonIGANIIiIiIqJ2rDI7BJG+PTHHdxTCA72RsCcN1XY+GGHN44CvlCF/gzcCJjoiYOYERAaGICtfj07jPTFInYXIGANIIiIiIqI2RQN7D/FWWIK6jlLrVR5HmfpRURqLjctiUeuVhHWpZ7Et+wLWbUrBC/PmwGWAOk8TypJmIyaxBu5RRxCfIfsWOIKw6Dj4+/k0UtJJdzoGkEREREREbYoDnMZ4AgfjkXm4Rh1nUJm7Q3kURx398VyUinfnsZ6w72LUXvJCFgpkNViU4+IFZQxwly1s5PuVumd7VOB0UZ54d8OIsRp06mAYK1UfSjOs51yVoRdXIhUDSCIiIiKiNsbe66+Y71mO5Je8sTlpt9I2MX2NN5ZvKTDpLdjuQU+MEO/JmwKQmpEm5ktDzvYgrH3RG5nnHMSUclTXxYvdHeAo3vL3JSL/UBZO69VAFVFIWB2LnNwsFOcmIjXaD68G6vB9dzFJr+ezIMlEswPIHt174vsqi08wJbqp5HnYs0dPdahlde/eA9U/nFWHiMhaMt90b6182YP5kuh6GPJl6/Sn2ZP5shVp4LokByHzhuDUP/wRGbgQ6eeH45nYjZiszqFw1GJ+fBwmdc1FSoSfmO/PSC+xxehlX2P9yrki2MzDqZN1pZhj4LpqAUZ8G4GYQBFgltaIQHUrViyZg85fxmBjsDciY95AaccpmL+rCC8/KQLQwhKcuqx+nVrMzz9dQufOndSh9uUX/xXUz1ZZvjwCY0f9FgM0bFZLt9ZHWanoZt8J06dPU8e0nHe3J+Lo6VoMftRXHUNE1jj66S4MdrwHz/z+aXVMy0n4v0SUnGK+JGqu1syX20W+/JL5kqjZTpYcQJfaYwiY/0d1TMvLzMyEh4cH3N3dsX//fnXsjWt2CWTPnrIEkr800a0nz8NevVrnF9W+ve1R80ODZutEdA3VIt/069NLHWpZfRzs8dOP/PtD1FyXWjFf9hb5UuZ7Imqeyz9V495fdlaH2pdmB5B9+jig4sxpdYjo1qmoPA17+9YJIOVyL5z/Vh0iImtdFPnG3r51qrDKfPnj+RPqEBFZq7XzZU0V/14SNVvNGXTq1D6rsDY7gBw3bhwKvsjHf/7zH3UM0c337aky/PLeX2LYsGHqmJYll3uvWP735V+rY4joWmR++e+Vn+Ds7KyOaVkyX9r9kvmSqDlkfvlPK+fLrnbMl0TNcaGqHKWHD+C3v/2tOqZ9aXYA2bt3bzzq8ii+KP5cHUN08xUfPYTf/GacOtQ6PNzGofxr446yiagp3/07H2MefVQdah2/dX8MFceYL4ms9Z34O9ba+dJ9/GP44eRBdYiIrqW8NAePjBqNbt26qWPal2YHkNJ4t9/g6L8L1CGim+vsuTM4WnoEjz32mDqmdfzmsXE4e6JA+ZWIiJom80nFN/n49djWvVEdN86V+ZLISkq+PNb6+dLV1RWnvma+JLKGzCcnj+bAw328Oqb9ua4AUlZX6NGjGz5I26GOIbp59ny4HU8/7dfq9cbl8n2neKM46+/qGCJqTFHm3zHxt+4YMmSIOqZ1yHzp84TMl2+pY4ioMUdEvpxwk/LlE7/zQkk28yXRtci/l27j3TB8+HB1TPtzXQGkNG/+PFSe+w7/+jRTHUPU+hL+EY+RjwxXuiO+GR5//HEMHeKET1Ni1DFEZE7mD82veuP3v/+9OqZ1yXw57MEHkM98SdSounw54ybmy5EPOeGzfzJfEjVG5ssB/ftg5jM3J1+2lusOIKXg4MVI27cLpV9/qY4haj0fZaeg8ux3mDat5Z/72JQ5L2jxk/4kjua9r44hojoyX1w8W4agl19Sx9wcL87Wokbky5LcneoYIqpTly//fJPz5XPPPSf+Xp7CV3n/UMcQUR0lX54rw8sLAtQx7dcNBZCy4ef69euRsncHsnPS1LFELevnn2ux55/bcfzkV4iLi1PH3lyvb9qIe2q/w+F9b+LKz5fVsUR3risiX36R+XdUnynC5s3x6tibS+bLbjYX8FnqOlzUV6hjie5cSr7cf2vzpfw73avjBRSmrWe+JBJkvvwy++/4qVLkyzdvTb5sab/4r6B+viHrotejtvY/eOhBFwy8f7A6lujGyN5+Pz98AEOGDMKzzz6rjr11diT+Awf+lY9fPfQ4+jq1bqcERG3V6dJP8d3R/Uo10lnPP6eOvXX2ZuzD9oTtcHLxRZ8HXNDBtos6hejO8N///Aflxz7D6S8/wlCRL2f/4dbny48++gjvbEvAA6N90HvAI7Dt3FWdQnRnqMuX5eLv5UPOD0D73M2/j83MzISHh4fS9Gv//v3q2BvXYgGklJWVheysj1FVpccw51F44P4h6Na1Bzp0uEedg6hpF6sv4PuqczgiAscvjxZiyIMPYvz4xzBy5Eh1jlvv0KFD+CjzAEpKvkSv+0ehr7hh7XSvPTp0/KU6B9Ht5fKlH1H9QyVO/zsfZ745iMFDHsQE97aVL8vKyvDPvfvxae4BOAwYBcdBvxb5sqe4aW2fXaQTXctP1Xpc1J/BmeNf4OzxzzFgwABM9Gh7+XLvR9nI+TgLPfsNRR+nR3Fvj37K30yi25Fxvqws+xwDB97afNkuAsg6R44cwccff4Jjx47h7NmzsLnbBt2798Tdd9+tzkFk6lLNJZw/L84VGxv07NETDz00DI/95jH86le/Uudoe7799ltkf3wAhYeP4Py5StTW1uKXXUUgadu6vcMS3SyXa6rxY1Wlki+797DH8IeHYfxvxrXpfFlRUYEP0/bi8BdFIl+exX/+8x/80q4nOnTsrM5B1L5dqb2MH6rO4L//uaLky2FDnTFp4m/bdL7U6/X4aH8WcvM+xbmzlbj8Uw3u7dYL93RkbQG6Pfz8s8iX31/Nl0OdnTF50q3Pl+0qgDRXVVWlBJKXL7PtGFnWpUsXdO/eXXlvry5cuCCC4PPKO9Ht4HbJl+fOncPFixfVMUTtW4cOHWBvbw87Ozt1TPtTXV2t3Bfy7yXdLtpqvmzXASQRERERERHdPK0VQN5QL6xERERERER052AASURERERERFZhAElERERERERWYQBJREREREREVmEASURERERERFZhAElERERERERWYQBJREREREREVmEASURERERERFZhAElERERERERWYQBJREREREREVmEASURERERERFZhAElERERERERWYQBJREREREREVmEASURERERERFZhAElERERERERWYQBJREREREREVmEASURERERERFb5xX8F9TMRERG1gh9//BGfffaZOkRERHRzeHh4wN3dHfv371fH3DgGkERERK3s3XffxcyZM9UhIiKim6elA0hWYSUiImpl//73v9VPREREN5dGo1E/tQyWQBIREbWy8PBwLF++XHmFhYWpY4mIiNoflkASERERERGRVRhAEhERERERkVUYQBIREREREZFVGEASERERERGRVRhAEhERERERkVUYQBIREbWwL774wuR15swZZbx8N59GRETUnvAxHkRERC3s6NGjGDJkiDpk2b333ovvv/8e/+//8bdcIiJqP/hXi4iIqIUNHjwY7u7u6pBl06dPZ/BIRETtDv9yERERtQIZIDblWtOJiIjaIlZhJSIiagWyemr37t3VIVMPPPAASktL1SEiIqL2gyWQREREraBbt26YNm2aOmSKpY9ERNReMYAkIiJqJY0FigwgiYiovWIVViIiolZkb2+Ps2fPqkOAq6srDhw4oA4RERG1LyyBJCIiakW///3v1U8GLH0kIqL2jCWQREREreiTTz7Bb37zG3UIqKioQK9evdQhIiKi9oUlkERERK3oscceg7Ozs/LZ19eXwSMREbVrDCCJiIhaWV01VlZfJSKi9o5VWImIiFrZV199hUcffVR5NuQvfvELdSwREVH7wwCSiO5Y586dq39VVlaitrZWnULU8t544w3MnTtXHSJqeR06dFCqSPfs2VN52dnZqVOIiFoOA0giuqNUV1fjo48ykZKaipqaGnS264l7uvREp3t74a67OqhzEbW8b/5dhPsfGKoOEbW8Kz9fRvUP5bj0QyUuVFWid19HeP/PZOXRMTY2NupcREQ3hgEkEd0x/i8xSQSPH8HOYRAcB42F/X3D1ClERLef8mOf40zZ5/ix8hv81v03ePppP3UKEdH1YwBJRLe9b7/9FjEbXodt94HoP2wCOt3bU51CRHT7q/7hLI59loxOd9fglUUvK1VdiYiuF3thJaLb2ocffohXXnkFfYd64UHX6QweieiOI697wzxm4xf3DsLzzz+Pb775Rp1CRNR8LIEkotvW0aNHER4ejsmzYmBzTyd1LBHRnevsqS+Rtzsaf/vb39CjRw91LBGR9VgCSUS3pbNnzyrB4699gxk8EhGpejo+iAce+R8sj1iBS5cuqWOJiKzHAJKIbkub4rfiwV9PRfc+TuoYIiKSBo95Cp17DMSGuE3qGCIi6zGAJKLbzoEDB3AZnTFgxGR1DBERGXvI4w84cfI7fPrpp+oYIiLrMIAkottORuYB9NC4qENERGRJT3GdzMllAElEzcMAkohuK7LjnO9On4b9r/jAdiKipvR1ehSfH8xXh4iIrMMAkohuK599fgi9B45Vh4iIqDGd7rVHjz4DUFhYqI4hIro2BpBEdFs5/d0ZPuuRiMhKHTr3RMWZSnWIiOjaGEAS0W2lsrJS+VWdiIiuTV4vT33HAJKIrMcAkohuK9+fP3uTA8g8JI/vgplNvvyRo96flerkcBRKDYN0vYqilH2bXKQOt3vqeaTLU4fbktZIWw3KkmYh2NeQRwK23QZVKMvTkBA8TM3zs5B/Xh3fxsnrpfzhjYjIWgwgiei2cuFHPe7pbKcO3URO0+AzLxL+Fl+esO+ozke33vk8ZG6IR5k6SLdAUSzWxCSis0ccFsWk4OWJg9UJLehCGQq2RyH/psRGFcjZ5IfUrwZj6ooUhMS8gqHd1UltnLxe/viDXh0iIro2BpBERC2h3+OYPGMBvCy+tHDqYpjNSXsB27KD4WQYpOs1NFjsxwvwuY7Odkv3TMDmxCrUqsN08+m/K4AeUzBpuhYjRrrBqbetOqXl6HNDsHZTwU06zmWo3C/engiGz3g3OI8cjE6GCUREtx0GkERERHRr3KW+3y7uVt+JiG5jDCCJiG4i0zaQZcgMFsO+s8yq2dWND0HxJXWUObUNYKOvsERcrZRWg9PZUdg8T22f5TsKazcl4vQFdbKiAjlhYpouD5X7Awxt02ZMwM6DNYbJVypQnBSEtdqBhmWIaZt1u82WYYm6XJGe0ycSr7YRm+EHXVIeqtW5rtI3TGt0PIrN25OZtYHUZ/iLYX/klJehQBeA8Bniu+MHInhpCHK+qdsThrSEb5GfIxAul3+tdn0XjiJfLC+yLj1ymYEBSM4+alSydXUbK8vTkLx6AhbWpb3BfhbkvtxulsYTVerEa9MfisXmwFEIUNIzDOFhxtt4VfU3u6+mRVlPENIPVahTVeezkB7tj8i646osT8x3+NpVGmtPmC/fcjpMGdpTBkTsFp93Y6Of/O7VNsH6Q/HQhXnXt42U51mMOP6lDRbb9Hki85nJOozzg1XnsiGdMRlHUbrdT9nGAK0/Mk+ok40Yzj2RV+TAlgnKMmMy6vazNefzNfIeEVEbwwCSiOiW0cA9aCtczifirfjd6g1uDcq2zcbm3DGYujIUzo21nezng5AY2dbK9PWCr2xL5gDX3zwKQ0vQGnEzLW7Il0WgqNdzmB+VgkUvTYdN9iwELxY37uYBam4INibeA68lKQh8ejqGOtkCl8TNdJArImP2wua3q5Q2a4FPD8GZ9/3FMoJQYE0bs5Nv4M0FEagcGiq+n4T5HsCnMROweGXddktlyF89TqT1DZwZsACBYj1KWvODEDnLD+ml17qhPo708AnQHeuPSYHiu6Fz4VgWi43aeWonRnYYOiMF2ifkZy20cp95DJEDll0uRPLiUYj58HtonohU9u+i0EAMwofYucwLCblmUY3Yxo1//DPK+swVyxbb+ORgnNou9vMa42C+DDmrxb7clIWuj28V+2IjfIeUYeeC+dirztGU6gNBCAmMQZXTAjwv0x8lzpHLO8Q2PoVUo56ZakWAvUYEPHu/GwIv2SYvahXGddgLXaArNu5Xg5tKEVzN8sau490w4tmNyvYFLpmCrl+JIO6lp5D+jWE2S+TyI2f6Y2ehA9xDk9Tli+WJdCQXNXWchsBFrkfrJj67wSdcnrc+cBRD+v2zxLa9hlNdPOErzr+QmAS8MN4BZbuCEL4kHqeV70vynH6qkfPE8GOMo4fZOma4GaqUNvNcrkyaj42Hx8BfHM9nHveF5j51gpFOw18RaY2Guxx4IlrZjz7DZe5r5vlsKe8REbVBDCCJiFqCuPk1lAiZv672wGpR72nwD50GpC3ErgN6cWMeC118HpzmRMNraBM3kHaD4TxStrW6+nLqkIesXUfhNDsBL07UGOYrFcHAljxotPsQFR4M17FuGDExGIHrtsL1uwisfyfLtI2YuPl3CRI3w2I+F785cBb3wZX7o7DzUG/4bDgobsqnKW3WXPziEPK6CH6L4vHm+2bLsKS0DPYvfaB+3xOuc5Kw/NUp0Ivt3nvIcCNde+htvJVaBpdX9yFk8Ry4iPUoad26Dz490qB7cweajlULUTs6wXQ7IyJFmL4b+YUyaLKF3RA3aHrJeftDI/fbfY13uFR7OBkFVcPhH5EAf68pyj4eMXEBXohYJ4KFCqQfLVHnVImAwDmibh+JbdQmIHDecHFu7EKRmnC5je+mAa6hRvtCzBfykgx2rqUCRZnx0I9fBe1LWmX/OI+dhqmhGzGpexUKiup6Mi1EenQESsfGYXlMHCbJNnliPp/wD0Tg3hGVH3+oBGOn8/9PhDhaPB8RB6+Jnsr2uXhFqvssD8Vfm5VW1lOX7xSMsK0J8JHfrVu+pwiGX41C8WV11gbs0FesZ1B/ud/t4DhMnruyveBRHMoU6fdah5cXL1COn/PIKXCfp+7DokyU1R189ZwesdjsPBHntEvH48jPP4pO95mtY4gDbMRQc8/lsqIh8F8SLNYhjtOMaWK/NGRjP1ykdTiUvp97yc/iHLO3bf75bCHvERG1RQwgiYhaQqO9sF67B1b7iaF4xhNIX+OPNRsMN/7zZw5XbnitVp6IN1+NwBnPrZivHVP/3bK8GBEkuGHi41fHKUTgOulJB+i3paHU5GbfB04mPfyUoShDRDyj58D9YbOAtvcUTJoBC8uwwClQBCmmt9/2HnPhJYKizKKjYqgGpblR0GMBJnmY3aZ3HAN3XxFg5Sai6JQ6rhHDXcy2s89gpcOir8rLDcPNYDM6FGHbD8DLvMcjOwdDsNCAJ0aY7aNe940R/+eh8pxhuOxzuY1aTDLfFxPnGkqwmmSHzvYOQPYbSE0Vy6wrPe7oCe2uzxDiKwItqTQLOaXAJB9fs3Rq4Bp+BGGhWvQVQ329RLC9Kw4u5oFK196NbJ+qJA0pYvnOvs/ByeTcFsv3nQu781EoONzc6peD4R7+GeKWTGnQ+Uw3+/7qJ4OyQzvEWTkHE83PE3FOB27fh/lejfXoeh3nsocIRK8rkLue89k87xERtU0MIImIWkKjvbBe7YG1ceLGe84quJzPQnHRFLwQpG36Bt6crJYXPgs5fULxctA0o+9WoPK4LEXSoPaMWPYh01fVz7L6ZhmqjNtjjdegq/rRoAL6g+Jt+BALabKFo9MU8S6WYVabs4F+vdFN/VivQ1d0FTfM+tKj4kZbjyoZ43mMgGMHw2Rj9gNkCV0W9NdoKtjBvFOWLmId4k1/pbkBjZHLInUn8sQ+243M7SHYHNhYddOGJcadRDAm9yGuyCFxPGQbOo8hUApBTWjg6KF+bJQtnKdshNfIMqTLtoeesl2eH3TbE1F8yugAXK4RR0TEgV2tjHwu6VEpgs5iEdCkbwlCTGgE8tVJlui/KxFHSwTHV443OKeKz9Qo1VErzzU/YK9Te6FMBIlZKMiIx85of6zfItsyXlV7SZa09kbna+Yrc9dxLt/XX4Tt1+M6zucGeY+IqG1iAElE1AZUf5WLr5RPu5GV3ZyHqpchR9xk7/xuGuaHBZuVCImbbaU0RQddoDcizV4xuiwxbXd96ZhCBGAW7ncbdVkJjG5Qh4aBVwM3EgBerytlyN/gjYCJjgiYOUHssxBk5evRabwnBqmz3HS9PeEfU4S4LQl4QTsHgzodRfqmWYic8Qgitxc275EVFwqRHjYMMz0dsXC2OCdWv4Z8EeD2HTcZzuosFqnHIjO64TkVGRaFYjEtv7yx6q+Nqy6JR8yMLviD1zAsE8t6851knMYQjHCxpnrvjbN4LrdWr6qWzudm5j0ioluFASQR0a2mT8PONfGAZxwC54xB6YYgkw5RGieCx5UTsDFNg6krN8JVFnaZcEBfJwfxHoywbPn8Scuvpp+l6AC70eKtsMRC+8MaVB6TpUMaXLOw62Q5vlc/1rtQjkqxnXb9ZSmPnaw5CewvwCkL1WErj8veUt1gdxOLaMqSZiMmsQbuUUcQnyH31RGERcfB389HbPH1cID9AHE8LG5jGSqt/t3AFnZOU+A+OxqBm45g254D0HpUoHjTDkP1SxGQy/RVWSgWrtzjjYXzIlB8SY+C+KegK3wUL2z5Gn/fL7ZPVoMNj8ZUz7HorM5viV2/EUqpnM8G0/PI5KWVVXebQeaBJUH4algcVuw6qywjTpeCwKBQuD9senLZdJRVdctxsUEPwGXIDBqG8C2NtcltoXPZKm3vfCYiaikMIImIbqkK5G+Yj3RMw/NztHCZJm7gh+YhIToKZRZuPK+SPVHOFsGj7JBliwgCLZfiOQ7ViltZHdIzZKVGYzL4HIgArQwm1FEWaTB0oidwMB6Z5u3ayncjfbu4VfZzg+ZaRSelO5Bv9v3KAyJdGA7vMTIgsIXT2GCR1lik7zdL66U8ZO7KAob6YJCsH3mDbO6SQfW1SjQrcLrIcJM/YqwGnYy2r/pQmqGK57kqC48haZrmYXk8Gm5j7cFk7DV/VEkDhUhfOgELY9JMAyS7wXBUfjywhY1Mp5MbXJ2A9L1ppoGSuh8r+4+ApmMJju+qAIY9jqFODmKfqPMIlfm7lO2rPN9INdQBY+DeHcjck9ggEKvMmIUA31HNfwTFSbFtYvsHjRbnUnejc/lKGQ59LAO74yIgNozSjJwuzsp4ZB0w24eH30bGwTI4Oo0wtIO9S11OfcliC53LVrl55zMR0c3GAJKI6BaqzFiKt2QQGLgKLrJhVofh8HopFE5FEdBtz2u0SqJ+/3ys3yICnHFz4drDQlu0Q4XQiwDUZmQwXp6tQU7EBESuiUe+nJatQ0LY75Tgc+izzzX+qBCVvUcwpo4sR/JLoxGjS0SBWEZ+UgAi/zgL+Y5aPD/ds0HHJw0Viu97Y3PSbpG2NGRu8MbylbvhNDsak9SOQ2xGPofnvTTIX3k1rQUZUYiZNQHJ59ww9aXnlM5fblS33rJ0LA05e9JQXFLRaGmV0xgRbCAKCatjkZMr9ltuIlKj/fBqoA7fiwAKen3zqowKhuMxRtnGtfFyX4p06PwRHLQDkMts0nA4jRTnTNJ8rFmjpkm2y9zwDOJE8OP0ko/SYZCcb1KQOIf2z8LywACkZ8tjHo/NS/yV/aj1kx3ViGXNlB3yLIVug+7qeRExCsvj80TgA5RdaiQI7OAG35Wh6JVmtHyZjk3+iIxIBFxegftoK6olG3MaAx+x/fmb/oyEVHmOiHMsNQQbxbGXjwqR54+hOrbgNAdadR/WnSfyfFwTGoXvR0Zj4nhDEaJdL9nGdzdy0uTyjirBfsucy9a5WeczEdHNxgCSiOhWKU/Ezg3ihttzHZ7xkDfJBjZDF8BfOxylW0KQ2sgz9c6cUJ8teCACa43boNW/XkORMoMtnLTvIWrFXPQ6+QbektOWrUEpJkMbk4P5Zr2BWtRR3NxH5yAkcDJqP1qqrC/mHyXo9WQCot6Mg0uDqrOWhGLRtrnolBci0uaH5CINJq/4DIuNeoyVJUQuSw4Y0nosFjFiPWs37ECtSzRCtqY0WsraXHbjXoF2Yg0+XeOHyPgP0Vi/PPZeW7FiyRx0/jIGG4PFfot5A6Udp2D+riK8/KQ4XoUlFqsnNk09HqHTxbGT+9IPO3O7YdKG9/CM2olqUzTTUtRjqcO7Mk2BC5Fa2lvZlyHTri7AZmgwFusSMLlPCVKXyWMei1N95mLR6ylqwG4L59n7sGj247h4IEDZ13FbElHtFInl2/aJ81HMUmSpqqfB1eWXIV1pCymC08PAiMAURC4x7sjJSiIonfp6EqaOrUL+ahGIBs7CzjQ9NPP2Yc3rq+AiZik9XleSZ3ROq+eJ4XxMwvLoOeIsUg2dhkUzPHFqi1yezlCi3yLnsrVuzvlMRHSz/eK/gvqZiKjd8/f3h/e8N9UhuvUqkBM2EBv3hyIsO1gtISOituLc6aOoLE7BX8L/Vx1DRNQ0lkASERERERGRVRhAEhERERERkVUYQBIREREREZFVGEASEVErcoBruHw2INs/EhER3Q4YQBIREREREZFVGEASERERERGRVRhAEhERERERkVUYQBIREREREZFVGEASEbU3RVGYOb4LYjIq1BF3kMpExNyibS/VdRH7PQql6vBNoR7r5CJ1uC27gWOjz/AX2+mPnEp1xA1q6eXdXHlIFvtxpi5PHSYialsYQBIRURulR2VuLJKz78BAmYiIqI1iAElERG1TZRoSgkNQdlkdluynITD7AgInOqgjiIiI6GZiAElERERERERWYQBJRHQj9HlIj/ZDsK9sH9cFC+cFIPWQhSqXF44iXxeAyHnDlPlmjh+I4MAAJGcfRa06S73zWQ2WKeerVifXu1KF0xkhWKsd2PS6LVHSM0tdxzCsjdah7IKh7dXVNmyNt8Wy2B5QSbc/ItX0yOWGhwUh/bBenUG6uszqEh10S0chQA7PmIDN27Ogv6LOJtv++c1CvviYHyGXp7ZnM2tnZ2jrJtfVyMsk7XqUJgXV768G6zSiPxSLzeqxCtD6ISHjKC6q066l+ptEJIRNwEIlDYbjbHpcKpATJqaFJYoUmWq07d4VkfbtAQifIZcp9uvqKOSfqFEnSleXefqEWL/xfk0qVM4xZX8HG23TgTLDV41Uf7MbyauN0r5UHL9GzuecTXXnqExPLErPqdPqNfP8aaAGp7Oj6o/DTN9RWLtJbN8FdbI1LhmlU34/Oh6l5jtdHAVlPUF12y33jzc263bj9CV1FtW1j61B7Qnz/RiCnG8arFgc1woU1x9Xdb4TVepEIqK2iQEkEdH1Kk/ERu0E6LJrMOjZBITEJMBrQBlSAl2xMcPo5vxyIZIXj0LMh99D80SkmC8Fi0IDMQgfYucyLyTkGt1YymXO8oYu/x6Meympfpl7l43Cmm2GQKDOV5u8sHKPHsNnb0VIVBxcOnyIBLHunYeMAwsLLokbe5me9yvUdEdi+JVErFkcggJ1lmar3K2ke9fxbhjx7EZlGwOXTEHXr+Khe+kppH+jzlcnNwRrliSidnQYAsQ2ah+xxaFN3mIb8wzb2M8HIeHBcBYfnbUyja9gqJ2cYKrT8FeUdZm+kjB1nJw6BlNHD1fmA8qQs/IRhMck4/KwVxAo0/f0EJzZ4Y2Q1YkwjtcqM2YhJDAERb2ew/yoFLz47BhUbvHCm++rMzSlNBZrtLNQ2tEHU8V3Q2LWYVKfEuW46A5YCCCsVLDpKazPBVzniWWuWADH4xGImemNVPMI7OQbePMVHarr9qvY/MyYp/DmhiCsCU9D18l/Vc6VcV0LkLp0tsn3K/cH4FWtP/Z+NwReK8R6olZhXIe90Im0r1WDUIV6/mxMqzvvI+F6VxrWL1mKr9RZblyNCDK9Ebwsov44LHppOmyyZyF4sQg8zQI7y44jfZUXdpaPge8S+X0fIDsI4dogFNQfihqUbXtKrOdtXHR6Dv7y/InaCu9hwKEt/lgZn3b1hxsrj21tURQiZ/pjZ6ED3ENFHlb2o8gf2qeQXGScN8U5udoVkZuy0PXxrVgUsxG+Q8qwc8F87FXnICJqixhAEhFdFz3y35qFnPNTMP/1FLzgNwXOI6dg0uIkvDxbg5yIPyPzlGHO2sPJKKgaDv+IBPh7yfncMGLiArwQsQ7uqED60RLDjHXLxAKEbE2Az0TPq8vUjsHF/GSTG2f9oFAsj4nDpPFucB6rFcs3LC/5cKE6h2WV+6Ows2g4fCKSTNM9Vtwjq/M01+n8/xO3w1o8HxEHLyXdbnDxikRgRCQ0yEPx12alNEUOmGSy3xLwvJe4pd6SJZYj2A2G87Ah6Cw+du4/RswzHHYd5ARTNvbDlXUZv+zP7cDeAw5wDd0Cn6G2ynzV2WtEwFMBl1f3IWTxHLjI9PnFYfHKUPRKmwVdqhrwX85C5oZEwFME5SKAdR0rj1UwAtetwlDDHE0qy4sR+zAY/ksWKN9Vtm3JRmhHd0TZ4dyGpchWKoUnXl6tHuvxc/BC9D74OOUhIWm36TJLa+C81Oi4BoXCS5ZOJpZj0jr1nJLnytJVcBHHpfSY0XbH6FDpsdXonJoGn/CDCNP2RkFMBA6o57Ph/BHB+Uqz8+dJTYMS1etWGg/dljxotPsQZXIctsL1uwisfyerYcl9A4U402+V0XEMxaLXt8LlfDz+karmEREM5x+ogGbeuwicp1XOC7ndXur5qBeBs7rZVh7bQqRHR6DUKRhhdXlY2Y8fYL6nCA5fjUKx2qa39tDbeDcN4jz9AIHaaRgx0hOu8seSl9wMMxARtVEMIImIrsf5LOSnivcZc+HS2zDKwBZOXmIc0pDzueHm3GZ0KMK2H4CXkzJ4lZ0D7NWPigu5KBbL1PhPh3NHdZxCLHO2uJGOCTUZ7zJBBEvqZ4XdwyK4EO/HjjdxI1+G0hxx1zp6DtwfNgRXBnXpvj59vRIQtSsOLualhF17m6axznhfOJnsNzvYD5ClhSWoOm8Ycz1k6c/GiET0mp2AFydq1LEVKPpYJ94XYJJH3TgDm6FaMQ4oyMgylEKWZCFZrN/9iWmm6e49DZOeUD83oXP3EeJ/HZK3peG0vq60aTAmRR9B2DxPdFLHNJe7OM+cjM+JjmPg4in2V+ouFJnsL0+MMD6uHYZAI7avwf7u3R99xVt+uSGwrz2cpmy31xNTzI6XOC8enwPn+vNZPX/Gz4WrGpwbiPk8r//8MSeDtTK4YeLjY2CjjlPI4/CkA/Ri/5Yad65k0XB4TzM/jp5w8xXL35Fm+KGioxumbjqCFTPqSqrr2KFrH/WjyqpjW5KGlFLA2fc50+MFDVx958LufBQKDhu+W/Z5lMin4vyrP08N7CfOhbv6mYioLWIASUR0Pc6UQD6az7mDHqWHslBs/DpZpZScFZ8xK3W7rIf+RJ6YZzcyt4dgc6BZVbVLesjWT/bdTSKrxt1lcocqiOG71I+NqkBltnhz0qCrYcRV9v3Fbe4NEttQWSr2QW4i0rcEISY0QmnH2IBIp3mBYrfu/cX/Nai10CbRKuWJePPVCJzx3Ir5WuPAoxynZSJG2+JikdmxOlSCi13EtIPHlX2vF8cVmAJHs+BB6tV/ivqpcfYeodB6dERBvB+Cn+hpaIO4SYf8UgvtCJvBvkfD+ruG/ZWF02cMwwbGQZ0RC/vbeLi6SoZTU6Dpb+H7jkOU0tfi83Ib6s6f/g1/GBBB6Q2fPwqxjuNyXRrUnjE/Xlmo+nmImFZmxQ8N/dG1wUluh849xNv5ElSafL8G1eePKnk5PzUWCau9G1RZtubY6r8rUX686XXleIN0F5+pgaOYVnmuXPwvtvGEePMYgl7izZQGjjLoJyJqoxhAEhFdDxHkyBvFYp0/IgO9zV5ByJTznFBLAq+UIX+DNwImOiJg5gQxPQRZ+Xp0Gu+JQXL67eBCIdLDhmGmpyMWzhb7YPVryBc3yH3HTVbaMbY62S4vfBZy+oTi5SCzUicRHEAGCwejENPgWHlDt0fOIwKKlnjofMfhmBR+BPHbUjA/cAHce1Xh0PYAxMweiIWrdzdRMny9esPmmj8a3LhrFva1sFplhTroLByvGF2WmLZbBGJynuvVFTZqBF2ZHYJI356Y4zsK4WL5CXvSUG3ngxHmhZLWHNsrhtLFzOiG6Y4Mi0KxmFZX6ktE1F4xgCQiuh59hsBVvLm8egTbsi9YfoVPgyw3KkuajZjEGrhHiZvPDDntCMKi4+Dv52NaYtPRTikVrDwvSyhM1R4MQYDWH5my1OKGaNDXU7yVliklbiYuVDUcJ1XXmLU3E8MmDe/0KIh/CrrCR/HClq/x9/1iG3d9hpDwaEz1HKuUxrYqGTwGTcDO76ZhfliwWdVBSd3m8VuxztJxUl4JcBVRp12fEeKY7UbZ8boqiledOdWwN9HGdLrPDa5+kXgh5jPEZXyNRTOGozL1DRyqa1An/XipQWB2+ZLlELOqquH4788fF/+PQV8LpaXN1amrPBMtbzdOlShtY527y2dvamAvS8cKRcAtpxmrPG6oFmrumuePOQf0dZLrCkaYxWNlePlcs1HqcbHf1I/19LgoA09ZAi9LnktjsXFZLGq9krAu9ayy3HWbUvDCvDlwGaB8oYGmjq1dP3n+AD4bLKdZeWnHiDkcYD9AbOP+ApxqEJ2XobLpZsxERLcUA0giouvR/WE4jwXydyU26BFStsMLl4/GSD0qhipwukgGHm4YMVaDTkb1BqsPpRmqd56rMnTA0WUsnGVHMmmmneXIG8r8PbHQ3z0WmvvUUdfNAUN/owUOxiNTbYtVp3J/PNLVzwZqsHDwakciivN7kZ+mflaU4PiuCmDY4xgqbvyNS8Qq83cp22gpKL6mu2wN1VCvNNXlZhlyov3VTl02wtVi7V8HaEaKCDL7DeSY9IIpyOBzXhcsXKrDaTnsNAbu3YHUPbtNA6RLWch//1olRyKQ3uSN4OB4w7LqdHCAYz/504AdOij7RgYP4u3gpygzjguvFCI/VZauNZS+N800PRfSkJMgogwvdwySgdANsnnYEz6WtlsEe6UfxqNYBKouw2SQ6SB2kdiXls6f3B1m1ZWtPX8achyqFXtLh3Tj3owVsjfdgQjQRqD4mj2xFiInW+3Vt86JHcjYJVI20U2kThyx47mG4HisJ+y7GFXfvZCFAllVF+W4qDw2xMpjO8Bw/mTuMe3ZV5K9+wb4jsLOg4b9pnlYbmMs0vebbmPtwWTsvWb1XCKiW+eu5YL6mYio3UtKSsIgl2u3VbtxXaF56AGU/18QtqUewV1du+A/+q9RmrkOCbGxODpgAeb+0Qc9bLrA9tJnSPvkbZyu6IIOd9fgx5OfIj8xFG+uT8PF7hfxU9fH8bjHUNiKf44PiGXuDL66zDOfI+fNudDt74pJS2Lh3k/c5FYewHspWXB0C8TYAcbRw0V8mxkrbuJ94K0szzKb/kPR+/RGbPvb3/GtbT/c+3MZipICsTHpiEjrRfSoX66a9t3x+KKsC3p0rUFlUSJ2rFiJcvsHcL5yCNz/MA490BP/qdXh4z37UHGxK2xs9NCXZiFdNxvvfPC1ssyKh57BUyP6iWWewtG/v43i+xum8adjSZDxk8t0P/xKFlt2rMJpMW96bU/8qufP+EUnDX55pQh5ickiHpdpvBtl26bjtcSj6Osbggn3XURl+XGz10/o3KcnuorAsPfpWOhixDb/v56w/e9ZlBftxgcbg5F6eBi8QyLh0utu8VdRg0EP3Y3iDUFI+epndBO74VyJ2Oblc1HwswN+ujRa3WZLbHHv5c+QootEXtkF2N51t6FdnTgndsQk44pvJJ6b9IASFN/735+R+eFGFJRcQOduYr5je5G8fgFKrgwT+7XL1X2gHmunzuVIz6/AvR1/NqQnPABZP0/D/GXBMJwC6rEvczNLX2PnhHocHtEajovY7l4Op/D53yOwr+AUOnTugp+++1Sce89g464qOM/ZiGfc+ilNbDsr+1KeP6k4f2832P50FAXbgvB68vWePw2P/V19xmDQ/0vFtvWxKDnbEbadLhjOqa0vISHjZ4xaEA2vwQ0aONYzLM8W9j8nIvfLu5Xvn85ZA91rUfhiQCgCA/1E3hRH7J4LOJaUiqzjp2BrezdqlTy8ETvW/hkFP8jj3RUjnpJpsvLYqufP5zHi/Knbj+cPo2hXODbHvYcr49fg+d8PV0rlDduYhYSYWByr7YNO/+8kju0NRczqNFyR14UH1WPTyi79eA7VlaX4rQd7fyUi67AEkojoevUWN/BbU6B1AQq2+CFS3JS++2EZej2ZgKg1V6tS2nttxYolc9D5yxhsDPZGZMwbKO04BfN3FeHlJx2U6oD11djql/kTDmwQywxeigOXJ+OF+APQjrXwIMTrooHrq58jLHAyanfLNpzzkXpePiZiVYM2mTLtUaEL4FgWo7Qf1CUdh9PSzzHfT3bgUscWzrP3YdHsx3HxQIAyX9yWRFQ7RWL5tn14RpZCFVmo8nhNY+C6agFGfBuhLDOz1Lx6pR6nSw3VSk/vCjBtb1b/SlZLv8Q2L8lBiNhm5EZgrZi2doMOZ/rMRaDuvfrHfUg2Q4OxWLcV45CMd8XxWrslDZ2n7MPiebLqYdM6jYvGmg3RGCEfor9Mrt8PO7O/hyZwH9YEXe2F1WZkqDgPIjHy8m5sFuuIE+vo9EQqQl6SdW0bGjHvPcwfUoJdq0V6It5GrUs0wkQaLZe4Xh97jzis1CVgcp8SpMq0q+eeNkbst5nGnRKp+3LeEJz6hzx/FiL9/HA8E7sRYu+asO78scQWTtr3ELViLnqdfANvyWO5bA1KxRpkeuab9VxqWX9MWpoE9y5pSBDfj3knF11l3ow2qubsqMX8+DhM6pqLlAiZh/+M9BJbjF72NdavnAs7cRxPnTScd1YfW+X8kfuxDOlKW0h/JB8WxzAwBZFLjNvnqtsYOh04sFSck2J5ud0wacN7eMa8/SURURvyi/8K6mcionbP398f3vPeVIeoWSoTEeM3Cwj9GoETZRs0IrrdnTt9FJXFKfhL+P+qY4iImsYSSCIiIiIiIrIKA0giIiIiIiKyCgNIIiIiIiIisgoDSCIiMrCfhsDsC2z/SERERI1iAElERERERERWYQBJREREREREVmEASURERERERFZhAElERLepCuSEdcHMsETo1TF3En2GP2aO90dOpTqimUp1Yt+Nj0KpOkxERCQxgCQiIqKWdT4PmRviUaYOEhHR7YMBJBEREbWo0j0TsDmxCrXqMBER3T4YQBIREREREZFVGEASEVHLuVKB4qQgrNUOxMzxXTBzxgRs1u3G6QvqdEUekuU0XR6qS3TQLR2FgLp5t2dBf0WdrUl6nM6OwuZ5wwzr8R2FtdHxKD6vTjZz8UQiEtT1BGj9oEsS61an1btwFDmb/BE+Q7b9k/N5W06PPg/p0X4I9jXMt3BeAFIPVagT6xi2MSbjKEq3+2Ghsjx/7NF5i+9YbpdYtn2cmBaEgvp9VdNwGzclmu1Lg+pvrm6fYT/m4Xur9qOB/lBs/Xrk/kkQ6b6oTjNxPktsuz8i647v+GEIDwtC+uG6VqaGdqfhW+TnCITLecRxrqM/FA9dmHf9vpNpjRHHrfRObKRKRNROMYAkIqKWcUkETUGuiIzZC5vfrsKimBQEPj0EZ973R/BiERiZB025IVizJBG1o8MQEJMA7SO2OLTJG2u25V2j6mMZ8lePQ/CyN3BmwAIEivUsemk6bPKDEDnLD+mlNep8qpNv4M0FEagc8gpejEnCM+OAT2MmYHF0mlEQWYjUxaOw8XA3uMxOQohY5vNTNDgl0hMSYzRfeSI2aidAl12DQc8miGkJ8BpQhpRAV2zMaNjirzJpvljmGPjL9T7ui4cmTsMI7EZOvvm8hSjOKITdTB8M7SKHa1Aqgs3gZREo6vUc5kep25g9S+zLKJReUr6kqC2KwhrtLByoHgvfFSkImTdd7Ft/rNl0NXBrSmXGLIQEhtSv58Vnx6ByixfefF+doU7lbmyc5Y1dx7thxLMblX0UuGQKun4lgsKXnkL6N3ImOwydkQLtE/KzFloxT4jHEDkA/X65ntdwqosnfJeI8WLfvTDeAWW7ghC+JB6nlbmIiKitYwBJREQtonJ/FHYe6g2fDQcRqBWB0kg3uPjFIeT1rXApihcBSZZpYFjkgEmvp+AFvylwHjkFkxYn4HkvER5uyWqy85XaQ2/jrdQyuLy6DyGL58BFrGfExGAEbt0Hnx5p0L25AyaxamkZ7F/6QE2TJ1znJGH5q1Og3xWBzLouRkvSkFIE+MyLg9dET5EeQ9pfDHJDh9IsfKWUkOmR/9Ys5Jyfgvkm6U7Cy7M1yIn4MzJPKUurV1Y0BP5LgkUaxXpnTIPG8XG4egAFmVmmaSwS6y91gLvLGNjI4VIRlG3Jg0a7D1HhwXAdq27juq1w/S4C69+p25dlOKCLQOnQULy8Og6TxrvBefwcvLA6AZP7mJeKWnA5C5kbEgHPrQgxWc8qDFVnqXM6///E2rR4PsJoH3lFIjAiEhrkofhruT5b2A1xg6aX/EZ/aMQ8zvfZic9HcSizEPBah5cXL1DWI/ed+7wEBM4bLrY/E2XX2VssERHdXAwgiYioBZShKCMNGD0H7g/bquNUvUWQNUOEX9vSUHpZHSeN94VTb/Wzwg72A0QwgRJUNVIVVSmZy40SodwCTPLQqONUHcfA3dcNyE1EkXEg5xQoAh7Tee3HaTEJhUjJE0GN9EsH3C/eMrdHoeCEvj7Q7eubgnWbIjFCxkDns5CfKt5nzIWLSbpt4eQlxiENOZ+bhb4ebhgkv1vPASMmaM3SWIPiA29AL9LpMtKw78ryYsQedcPEx9WAsk7vaZj0pMPVfVn+KQpzARffaXDqaJhFIfaFq+8UdaAJJVlIFvva/YlpsFdHKeR6lFLEq/p6JSBqVxxcTLZH6Nrb9LsWDYZ7+GeIWzIFndQxdbrZ91c/ERFRe8AAkoiIWkAF9AfF2/AhFoIJWzg6yWCmDFXGbd3uAjqoH+t06y6DiRrUNtp+T4+qcvHmMQKO5l8W7AeIABJZ0FcZhhX9eqOb+rFel67oKt70x48bnhHpOB1PB05BhwMRWDvTEX+QbftWhyAzu/BqG8gzJSgSb84d9Cg9lIVi49fJKnQW04rPmJX63ddfhMWmOo2dBi+RxowDavB6OQ9FqRXQTHSDIcytQOVxuRwNas+YrUe8qn6WVULFvpRB9rky5Ms5+5kF04J9vxHqp8bpxTYBU+DYxzBsrFf/RgLQS3pUloq0iCA4fUsQYkIjlDRYq/ZCGcrEdhRkxGNntD/Wb9mtTiEiovaAASQREbW6y83o0OWGXDFr/3gtHe9RS/hsofFLwLo9nyEkNBpeXv1xMTcWm5eNQ8DMAOTL6pViG2SwWazzR2Sgt9krCJlyMSfUgLTO3eq7sQ5jMGKmA8rSDFV1aw+nIfm8JyaOl6WvBrVKSa0Ougbr8UaMLktM243Kc3Kem+hCIdLDhmGmpyMWzhZpWf0a8k8AfcdNhrM6S1OqS+IRM6ML/uA1DMvEdrz5TjJOYwhGuMign4iI2gsGkERE1AIcYDdavBWWmLbtU9Sg8pgsZdKgq3lxXLPZyRqTwP4CnDKuDquqPC47jnGDnSxerHOyHN+rH+tdqIIspNT0621apdJuMJwnzoH/khRE7TqLuFULoDmlQ3quCPX6DIGrmMXl1SPYln3B8it8WoMSx4Zs4TxuLuxKd6C4VI+i7Fhg7BQMdVQni33Z18lBvAcjzNI61JePbKTYSwMX8VZ0rGGrUf13Beqnxtn1GSHSuxtlxxsG3mdOGXfCo0dB/FPQFT6KF7Z8jb/vF2nYJYLt8GhM9RyrlL42SZ+GnUuC8NWwOKwQ+1WmP06XgsCgULg/fMMnBRER3UQMIImIqAVoMHSiJ3AwHpmHzYKR8t1I3y6CFT83aCxUO20eWziNDRZBTyzS95sFTZfykLkrCxjqg0H1wZggArV8szSdzohHOobDdaSh1K/6QAgitX5IP6EMqmxhpxmsBJidZbq7PwznsUD+rkSTXlAl2RNq+PhhWJt6VB1zDUM94e1UiJyM11C4C3D/na9J1V/HoVqxjSJwbdCzaxlyVg5EgDYCxTIN9o9iuEhT8a63zdJUhkN7raga6jQG7t2B1D27TQP/S1nIf9+4Om4Jju8Sw8Mex1AR3NrcpY4WKvN3KVVYK8/LusUGNnfJANhon58sRPp5YNBocQ50N2oje0Wk82OZzuOoMq52TEREbdZdywX1MxFRu5eUlIRBLlZ0HkItrnM/De4q0uG9rX/Ht/+vJ2z/exbf5qyBLjIChd21+OOfF0CjPKLiFI7+/W0U3+8Db4+hIky76qdjSUgVMaDLdD/8qpFirbv69EevilSxni0oOdsRtp0uoLIoETuWz8ZHP7hh6tJIuPSSdUcv4tvMWOTbaPCf/e/h37/4JWx/OoqCbUF4U7cP981OwPO/7SebYsLG7mcc+79IvJd5BDWdbHDXhXKcKtqND974M/KvzIH/n6ajt21XaB56AOX/F4RtqUdwV9cu+I/+a5RmrkNCbCyODliAuX/0QQ+lTqy6jY9o8dSIfnKEmd7o+v8OYMcbiTgGLXwWPgVHox1xV58xGPT/UrFtfWz9NupLs5C+9SUkZPyMUQui4TVYFrOqadoZjG05p/DLe7vgp+N7kRw9B/uOic+X+jW5L3GXBoMeuhvFG4KQ8tXP6CaOz7kSuS/nouBnB/H90XD/wzj0QE/8p1aHj/fsQ8XFrrCx0RvSo5uNdz74GraXLqLioWfqt/UXZ/LEcfwMHXr2Q4f//hJdHxAb90ECPjr4NWru7ajs37JD8UiODEbmcZnOrzHkf0IwROm9lW6mSz+eQ3VlKX7rwarERGQdlkAS0W2lyy/t8NNFk1ZodLN0HAOf6ByEBE5G7UdLsVa21/tHCXo9mYCoN+PMei69ERq4LDmAqBVz0etYLGLEetZu2IFal2iEbE2Bz1DjkFToNxcvvqZFp7wQRAb6Y9cRB0xe8RlCtEY9nNp5QiuCSq0LUPqOoY3j2i3JuOgUjTBdtKEXVqn3NMwX65DzFWzxE/P54d0PywzbuCbYtCfUa7B/RD4TUpgh3hvU4rSFk/Y9wzaefANvifRELluDUkyGNiYH8417lVXT5N+/BKnLxHzRb6BqyCq8umSuOkPTbIYGY7FuK8YhGe8Gy+1OQ+cp+7B43hh1DskWzrP3YdHsx3HxQICyz+O2JKLaKRLLt+3DMx5ilqKr1Zftxr0C7cQafLpG7KP4D1HVQQT2rydh6tgq5K+W+3cWdqbpoZm3D2teX6VUwy093rAaLrU+eb385b2sRkxE1vvFfwX1MxFRu/fK0v/Fr0Y+jW59HlDHEBFRY/79WQp+1f0KZj03Qx1DRNQ0lkAS0W3F3t4e1T/wieRERNaQ18u+va/9JE8iojoMIInottK3Ty9xQ3RWHSIioqb8dPEs+jgwgCQi6zGAJKLbyqhHRqL861x1iIiIGiNLH89/dwzDh199BikR0bUwgCSi28rgwYPRp29fVH5bpI4hIiJLTpd+ipGjZBdGRETWYwBJRLedie7jcK5MPpmOiIgaUymuk+N+/ag6RERkHQaQRHTbGTduHPDTeRwr2KuOISIiY4f3/x33OfbBo48ygCSi5mEASUS3pT8FzMWX/9qJ89+VqmOIiEg6mvceqs9+jT+9NE8dQ0RkPQaQRHRb6tWrF0JCQvCvXVGorbmojiUiurOdPfUl/v35P7E8bBk6duyojiUish4DSCK6bQ0bNgxz587F3r+/jFNf/UsdS0R0Z/q25BPk7Y7G8uXL0aNHD3UsEVHz/OK/gvqZiOi2VFlZib+ujwNs7THwkd+h07091SlERLc/+Wzcb7/Yi/9c+g7LlgbD1tZWnUJE1HwMIInojvF/iUn46KOPYOcwCI6DxsL+vmHqFCKi20/5sc9xpuxz/Fh5DB7u4zH1aT91ChHR9WMASUR3lOrqahFE7kdK6j9RU1ODznY9cU+Xnuh0by/cdVcHdS4iovbnys+XcemHclT/UIkL+kr07tMP3v8zCa6urrCxsVHnIiK6MQwgieiOde7cufqXrOZaW1urTiFqWYmJicpr2rRpyouoNXTo0EHpQKxnz57Ky87OTp1CRNRyGEASERG1svDwcKXjEvkKCwtTxxIREbU/7IWViIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIiswgCSiIiIiIiIrMIAkoiIiIiIiKzCAJKIiIiIiIis8ov/CupnuknCw8ORmZmpDhER0Z1AXveXL1+OsLAwdQwREVH7wwDyFvDw8GAASUR0B2IASURE7R0DyFugLoCUNxJERHTncHNzg7u7uzpERETU/jCAvAWMA0j+Ek1ERERERO0FO9EhIiIiIiIiqzCAJCIiIiIiIqswgCQiIiIiIiKrMIAkIiIiIiIiqzCAJKL/3969gFVV5nsc/zkpXrIEAdFCxUwtvF8ox/ukqek4OpJpWNFBT42ZqT1m6mnUPN5SxzQnPTrFxExaajqpo463ElLzwmTeSGPyWslVoBRJy85am5cERFsQAhu+n+dZrv3+19ove7H3En686wIAAAA4QoAEAAAAADhCgAQAAAAAOEKABAAAAAA4QoAEAAAAADhCgAQAAAAAOEKABAAAAAA4QoAEAAAAADhCgAQAAAAAOEKABAAAAAA4QoAEAAAAADhCgAQAAAAAOEKABAAAAAA4QoAEAAAAADhCgAQAAAAAOEKABAAAAAA4QoAEAAAAADhCgAQAAAAAOEKABAAAAAA4QoAEAAAAADhCgAQAAAAAOEKABAAAAAA4Uu5Hi3mMm2TBggXmUSa7HRsbq169eqlnz56mKrVt21ZBQUGmBQAAAAAlCwGyCJw6dUoBAQGmlbdWrVrp3//+t2kBAAAAQMnDIaxFoG7dujlGGvMycOBA8wgAAAAASiYCZBH5uYBIgAQAAABQ0nEIaxG5ePGiPD09denSJVO5yh6d3Lhxo2kBAAAAQMnECGQRqVy5sgYNGmRaOTH6CAAAAMAdECCLUF5B0cPDgwAJAAAAwC0QIIuQfduO2rVrm1YmOzzao5MAAAAAUNIRIItY7tHG6x3WCgAAAAAlDRfRKWLR0dEKCgpyPfb399eZM2dcjwEAAACgpGMEsoi1adNGrVu3dj1m9BEAAACAOyFAFoOsw1i5eA4AAAAAd8IhrMXg9OnT6t+/v+twVgAAAABwFzctQB4/flxxcXFKSkxWQkKCEhITdfnytTfRL6t27typ9u3bmxZurXKratSoId8avvLx8Va9evWsuY9ZWjYdOnRIycnJiktIUnxCopKTkswSwP1V8/RUTT8/1fLzka+vr5o2bWqWAACAkqzQA+TevXsVFblDMZ8dUb2ABvK63VteXtbk6a3y5SuYtZCQGK8avn6mhYsZ6UpJTVZKSrLSvj2nM2dOqO2v26lTpw5q1KiRWav0S09P165du/Svzdt0/kK6bvcJUMWqPrq1mq+qVKth1gLcX8b5czqfGq+MbxP1bdIpVa5cUQ9176b27dupSpUqZi0AAFDSFFqAtA/HXLVqtSpXqqp7GjRTi6b3mSVA/mVkXFT0/p2KPR6j6t5eGjo0TF5eXmZp6bRx40atXbdBVb3ryu+uNqp1d+bVeoGyIOHUQSWc+EQpZ4+q3+96q2fPnmYJAAAoSQolQC5dulSRkVHq3+cx3RVQdkaLUDR27t6mPf+O0ujRo0rtaOQrs/6k018nqNGvB8r7DvYhlF3nU+MUExWhwEb19fTQUFMFAAAlxS8OkNOnz9C55BQ9PugZVa7EYUe4OU5/eULhf5+v4OBg11SaDBkyVN51mqn5A0NMBcCRHe/o3OlP9H+LFpoKAAAoCX7RbTy2bN6is1/H6aknxxAecVPV8a+nyePnadWqVTp27Jipur9pM2bL7+77CY9ALo07PKrqtVtq3IQ/mgoAACgJChwgDx8+rBUrV+qZoS+aCnDzDQoeoldfnaeUlBRTcV/r/rlBCSnp1i/KIaYCILvGHUN08fvy+utbfzcVAABQ3AoUINPS0vTGX95U316P6pZbypsqcPPd07Cp7mvVUW+8EW4q7mn//v3asi1SrXo8ayoA8mLvI/sPHHEdfQAAAIpfgQLkzh071fDuJtbU2FSAotPh1930Tdq3bn0o67btO1WneW+V96hkKgDyYu8jdVr00q6P95oKAAAoTgUKkB99tENN7m1lWkDRC2zUUlFRO0zLvZw5c0afHzuqWvVbmwqAG6l1V2udv3BRMTExpgIAAIpLvgPk3r17pR9/pZp+d5oKUPSaBLbS7o93KSkpyVTcR+RHO1Xz7ramBcAJ+2JTO3ftMS0AAFBc8h0gY2I+UwMOXUUxq+hRUbVr19Px48dNxX0cOHBIfgHNTQuAE351m+loKboCMwAA7irfATIhIVFent6mBRSfardVtz6PCablPs4lJ6nK7b6mBcAJe5+x9x0AAFC88h0gkxIJkCgZvLy8lRCfaFru4fz58/r++8vyqHybqQBwwt5nrly5ovT0dFMBAADFId8BMvlcMgESJYL9OUxKTjYt92Cfs3mbJ6OPQEFUrearxET3+qMRAAClTb4D5MWL6apcqYppAcXH/hzan0d3Yo+eVGT/AQrEw9p3Lly4YFoAAKA45DtAAgAAAADKJgIkAAAAAMARAiQAAAAAwBECJAAAAADAEQIkAAAAAMARAiQAAAAAwBECJAAAAADAEQIkAAAAAMARAiQAAAAAwBECJAAAAADAEQIkAAAAAMARAiQAAAAAwBECJAAAAADAEQIkAAAAAMARAiQAAAAAwBECJAAAAADAEQIkAAAAAMARAiQAAAAAwBECJAAAAADAEQIkAAAAAMCRshcg41drRDM/NbrO1L5/sEZMDdf+VLN+Edu/2H4d87TftIHCEhtRVY91ynsaPSxEEasilfaDWbkwxW3SsrFNzNcK075zpl7SJK7QfOs1zt8abwr5k7hzguY8ar6n09YqzdQBAABKk7I7Ahk8QxFvrMo1hWt0h2qKWTFeg561Qly6WfdmSYrWe7PDFWOawM3XXO0em66QYdmnsWpx20Ftmd9bE2auUKJZs3DEa9eiYG34vJEGTF2vCfNfVOPqZlFpYoXPZeNf04mAiRo539rOkM6qZhYBAACUJmU3QNZsprb3dcg19dbDz4crYlpv6eAMhUcmmJVvjv2reut//v6NvjNtW8un43Xs4Ci1NG2UbIsXL1ZamjuNNdVVi98/p16PZp8mKnRWtCaFNlfapvHavj/DrFsYTirxQ2vWZ6z6duqswJaNVCVzQcnj+4hGRp3XyG5+ppAPCSe1z5p1eXSsglpa21mP+AgAAEonzoHMg/99v1V3a7759OnMAnAdU6ZMkaenpwYNGqR//OMfpuqOKqlBp4EKULy2HzlmaoWovJmXch63mAcAAAClFAEyLynx+tKaBVapmNlWtBbZ50gujjbtq3Kfs5i0Mcxqh2nd16cVuXi0Bv3WXt5UvUZM1rr/ZI1UJWjdGD8Net1+PEODsvV93f5OxGrd3MHq9YC9PEiDJoYrxj7E9ttDem/qYHW1+3igg56au9X12nNIjdZSa53M5/qp62OjFb735o6uljXLly9X//795efnp+eee047d+40S9yIZ035WrO0H7KPQGbo66hZemOYOYexX2vNWbRCX583i13itWuStSxijxI/HK6x/azHj3bV20sGWc/pqpX2Km92dT3/6vmFadf2O3eJYnKcH5l3vyujM5S2NcR6Xoh2nT7mOkTWtaxTE708c4lOXrSeev6Ats8N1uifXvOmnz80N/c5kEdmuV7bmoNpOrnhec0Jre9qjx42XBv2Z22HeY3DprhaK4fZr8N6XT99sTTFrrr6XPv1v/FO7nNNr7+dmZz0sUdr7GVWH+lHIxQxvrWGX3ddyw/xisndZ8TaXO+rdPn0Wq2Z2TXz+9ipvsaOn6BdJzi7EwCAsowAmcN3+uboak2ZOlkxPv01rHtTU8+v01o6tremxNbV4HGrtOSVIap/YpHG9B+tda7fO6vp1/+1ShOD7cchmmiff9m9gd24Dqu/P4bovW+7atTMVVowroP0/nj997TJmjIsTJG+wVYfSzXtIT8deWuwxvz9kHme5evV1te1XssHGWo5NNx1nmdYg9N6c2hXjdnICGthS0hI0IIFC9ShQwc1bdpUU6dOVWxsrFlasl3+YrfrMMygOwMyC1Z4jI3orbEvTdGRGk/omVnrNebZgaoQFaaxL8xSrB3Usts9QQtXVFSvces18uGBatZ1vCbMn6su9rI+c63H69W3uX1o50ntm9ne6nexEu56znXOoKvffc9reliwtsTmOoQ2V7+NG1QyC05py4xgbT/fQwMmWctGdpasoDd77gRFvDBYB7wHKnT+Kg3tVlMn3gm2+jhgnpc/ny7qr9mbvlPzIeGaMHWuGnv8S8tG9teaI/brrKbGj67XhOdDXet2ed6c5+k6gvWkdk1rpZfnr9GlJi+6tnPkw/coYfl1zjXNczvz38fscSt0uc0kDZ+/TKGtKmn/ot6a/fYeXTaruPqc2U7T529WhQdmaExWn/8Icb2vJy9lrnXZCtDTHwvRygN+6jJxlSbMmqH2Hmu1MDRr2wEAQFlU7keLeexISEiIJo+fZ1puyL4K64PDtNk08+LfYZhGvTBefeplH4HsrXnD1+vY021MLZM9Yjjo9fF615y3aI8Ytn9xvQKfXq93h7dRVg86uki/f2Sy/F85pAUP1XCVcj83r1re/X2n3XPrKPQtqfu0fVrQp46ragfNdaODNEaLtO3V/vJXmjZPbKgR7/fWnH+Fq88dZjXr+fsX97e+jqem/XOpHs56ups5efo/+sOIx1S7dm1TKXq7d+/WpUvmN+4beOCBBzRw4EC1aNFCy1dvVKveY8ySomNfhfXlN3+nZ1YtUzt7qPEnGUqPXa5lE4dr+8WnNCZirlrYASj2Nb00ZIIUuk2ThtyvCpkrS3ErtPAPYTrSa73mPdXZqtsjaPW18MPmCnlzp3rl+FuIPTLWVSuHbNPbofe7Kpf3T9GokbPU8H8Oa2SPrLBqubhHK5/tqjXer+vVWaHyvUG/9gjk8ClrFZDjtWUoZpGPpr9jheAcfVuB6aUmWqhwvTr1Edcoa57sEcjgMGniF5nnQdojkPbI4m+s572c7Xlpa/VGnxBtz7ZNWesOWHRefRtnltKjhuuplyJyvRYTzKx1bx13WGN62fXrb6fzPsz3Wdb7u8J6f2tmrmePXu6beafmb5ioSVFjZXed2ee/1GvuEYW0yQrjVp8Hp2j6n/YocMQqDWhzTBuGtNcyjdWkP09Ug8pmJVeg7aqF+0I1YcVEBXqYchF5f+GzOvXFEd16662m4j6CgoI0a9Ys0wIAwH1xFVZ7WrhIwzrboa6NwhYc0raFk7OFx4Lp3C5beLTd2cAVCD/5qmC3CMjZX0XdfW9va95b3VtnT3915H+3Ndt2OnNkImmHNr9vzZ8MU/efwqOtolr2G6Lu2qp1+9x/FDIqKqrYJifh0fbBBx9o+vTpWrJkiRLiz5pqcVirhcH24YjZJx89NcQOj53Vd9KkzPBoOblnvhUXOqtbz2zh0VbzET34ez+lvb1JsTk2v68a3Ggg3SVDsbtnWbHmOT34m6uByKXy/erSr7OVylfoyFem5nL9fpsHZX9tlazd7HfW/HcKap697wD51rNmUSdVkLvzBLW7L2forFZXd9iv58t4azuuJ15HPoqw5tduZ4XGoVZN+nRrZK4RxNzbWYA+OvVTg5/Co62afO9qbs2PKtV1eHCaPo+2+mwwUu2yhUdbhWZWyIxYb4VHq350k9bHSoH9nsgWHm0BatfvaVU7N0ufHiyeUch9+/Zp+/btbjfNnj3bbAEAAO6Nq7DaU4f+GrVgveb0Oa3wEf+lRQezXxe1YCrmvpjGbZ6uX0KTfihY39f0lyV3PfvFSuJj9bE1a1sxTfv37tDu7NOpVN1uLdsd597nQnbs2FGbN28utsnb29u8krxVrlxZTz75pDZu3KiTJ0/q+eefVw2/WmZpccjrNh6va+T8nXp9hRUeWmZdPTReiafsP3YE6HJCpGL255xSv7/HWmYFsuznLHYKkKd5eH1pSo2zZr9poTvzGL3yvcsKkIpUWvakd4N+r3vRmhvtF/l1S44EZakpT39rdulGASpOX9vHA1th7MKR3N+/o7pQ1VoWfSpnoL1mOwvQh7Xdub+tXtXrWv9m6LLrPMgMpdup17+mvOzmdaSdPeoKxzV+OJXr61pTQobutJYlJttvZNF766239OGHH7rVBABAacI5kD+poz7PzVQfn2jNGzVKmws2UFiyWL8wJlmz3YvDFDo0ONc0Xu/Z65w47VrHXd11V309+OCDxTZVrJj3SHWvXr30t7/9TampqfrrX/+qnj17miXFLa/beIQqqGVzVcuVPC67RhcjFDGyt6bnmuZHRFrL1lohwl7HyCO85FuOC/gYhdFvkbO2ww7X0bM0P4/vX8Q6e52jSsw+fHjNdhagj8Ji3oftc6/9utMnzXLdu3ZfXNH/J+lfJ8B1KGiXLl3cagIAoDQhQGbn11vjxvWXT9JqvfyXrfrGlH9yISPHPRvtcwm/u2AelkR3NFAfa2afJ3nsoH1/yTymOdb2Zq6NX6hNmzauw9TOnDmj9evX6/HHH5eHh/tFn0x+1sfHvh/iWE2KOq+3rzNlne/nXDX7Yq/Sh5/qqzyO/k08tcf6t7Oq/fxQZgkXoDt6WLNO4Xo1j+9b5pT7XNTcCqOP3Cqpij3I/GWcUjILV12K1LJ+rTV/wzFV829hvVNS3z/n9TXNlHX+JwAAKFMIkLn4dB+v0R2kpBUztfSnQ1nryN++MeTuQ/ois5Apaas2/9M8LoCKt9jnXea+lGUh8mmqtta2bF6+WvvtW35k893BeRrULEhPve8eVwgtqcqXL68XXnhB0dHRrnOzxowZI39/+/hG93dn41ArRERoy9aTppLFvpBKfQ0PnaKYfH98K6lB27FWv69py4e5+r24R9vfj5Qa91VD+xhJt+angJZW+otarF25r1hqbeeaYVU1enyEvjalvBVGH7lVU8M2oVLscu3LdQ5j4oeLteGc9f40aCTddb+6VJe2r7v2Sq+JW8M03AqaV28zAgAAyhIC5DXqqM8fxitQhzRvVrjrUC2phlp26CYdnawRYxZp894ditw4TyOeHKeYOwp6qw+r1zuDrH+3ad2qrdp9JCHX6GZhqKOHX1qkPl/P0KBHwrRoo/V19m7VurdG66lRM7S/2WANu+HtQ/BzTp065bqyYuvWrU2l9KjQcqxGDQnQrildNX32Eu2zz3+LitCySb/Vwk1Wznv8CQXmPj3QgQotn9CTvQK0b9rVfj/dOkvzw7pqTXJnDXj2CeW45pOb8u31Jz3T46RWDmuj+REr9KlrO1/TG+NCtPKIFdAeG/iz21kYfeRWpdMLVp9xWvNsVp+btCsiRNOnrZVv8ER1sf9L8OisftMmqsamME0eOVxbouzzH9dq+yJrvSkrpKAX1SXXRXgAAEDZQIDMQ8Vmw/TikzWkg5O1aF3mVUr9+y3UhleGqf7xhRoxNFhTlp5Sy//doTmDC34PDJ/OozXxoQxtenmwQl/beu093QrDHf01Z8UqTWwrRb5ufZ2hgzVz7Wn5DwzXhkWj1LKKWQ+4RiU1CF2tWVOfVo0vF+st+/y3l2YrVt0VOn+XnumW6yqqjgUoaNzOzH6Pv+Y6v2/On5frctBcTQhfr76NS0swCVC7cbs0YWR3afcUzXFtZ4QSaj2tkRGrHW5nYfSR29U+L38w3uozWEs/+E4trPdk2sgeyvovoULjsXohYpm61zqpLa5zIUO05qDUYuR6TR93g9uhAACAUq3s3QcSpYZ9H8iPo7dq0qSJplLyxcTE6M2/rSyW+0AC7i76n3P036EDFBgYaCruoVy5cq55Pn/cAgBQIjECCQAAAABwhAAJAAAAAHCEAAkAAAAAcIQACQAAAABwhAAJAAAAAHCEAAkAAAAAcIQACQAAAABwhAAJAAAAAHCEAAkAAAAAcIQACQAAAABwhAAJAAAAAHCEAAkAAAAAcIQACQAAAABwhAAJAAAAAHCEAAkAAAAAcIQACQAAAABwhAAJAAAAAHCEAAkAAAAAcIQACQAAAABwhAAJAAAAAHCEAAkAAAAAcCTfAdK7uo9SUpNNCyg+9ufQ29vbtNyDj4+Pvk1NNC0A+XE+LVG+vr6mBQAAikP+A6T1CzsBEiVBSkqyfH3c65dJO0Cmn0/Vj1d+MBUATtj7zIVvzhEgAQAoZvkOkPYP75TUJNMCik9qWrJq+LnXL5O/+tWv5Onlq/Rv2IeA/LD3mWpePqYFAACKS74DZM1afoqL/8q0gOITl/CVW45G1LqjltKSTpsWACe+sfYZRh8BACh++Q6Q7du314HD0bpy5YqpAEXvzFcndfvtt6lJkyam4j66dGqv+P/sNi0ATsR9sUcd27c1LQAAUFzyPwJZs6aCgu7ToZhPTAUoekc+26+OnTqalnux/wjz/cUUnU+NMxUAN2LvK3EnD+v+++83FQAAUFzyHSBtnTt31Gef7zctoGglJSfo8/8ctoJYO1NxP106t1fC8X2mBeBG4r/YpzZBQbr99ttNBQAAFJcCBUj7sEEfn+pau+FdUwGKztoN7+jhAcGqUqWKqbifXg/1VOqXn+rEwa2mAiAv9j5y6vAH+n2/vqYCAACKU4ECpO2Z4cOUlHJWH+/dbirAzff28v9Ts+ZN1aVLF1NxTx4eHho98lnF7Fyu8ykcygrkxd437H3khTHPq27duqYKAACKU4EDpO3FF8dq07b3FfvFZ6YC3DzbItcrKTlOIYMHmYp78/f315gxYxT57h+V/k2iqQKwZVxIde0bw4cP1z333GOqAACguJX70WIeF0hCQoImTZqslk3bqkvHnqYKFJ7vv7+s9ZtXKulcnF55Zaaplh5bt25VeHi4mnZ+XHUCO5kqUHbFnzyg6I1/VlhYmLp162aq7qtcuXKu+S/8cQsAQInwiwNklrl/elWXvvtBzZvcr/r1Gpkq8MvYV/v996c71eieBgoNfcJUS5/PP/9ci/8Srgq31dIdDTvI+05GXFD22Pd6TDx1wHV+8B+eClPDhg3NEvdGgAQAlCaFFiBtkZGRitz+kVJT09S0cWvdXe8eeXl6y8OjolkDuLEL6eeVkpqsw1Zw/OzYAdeha506d1TLli3NGqVXRkaGNm3eom0fRErlK6tm/baqXquBqlTz1S3lPcxaQOnx45UfXLfoSIn7QvHH9+n7jFR1/U1nPdSzhypVqmTWcn8ESABAaVKoATLL4cOH9VHUDh0/cVxJSUmqUL6Cqlf3VnlrDuTl4sWLOpdifVYqVJCPt4+aNmuiDh06qHbt2maNsmXHjh3avOUD1/6Tan1fqt5eXVU9fc1SwP2lf5uib1ISVM3TWz6+vurx4AOufb40IkACAEqTmxIgc0tNTXX9Inzp0iVTAXKqWrWqqle3QpI1R05XrlxRcnKyEhO50M7N9sknn7i+zz169DAV3CxeXl7y8fFx/dGotCNAAgBKkyIJkADgDkaMGKGzZ8/qvffeMxXglyNAAgBKEwIkABi+vr6uoyXsydvb21SBX4YACQAoTX7RfSABoLRYvXq1Kzja3n33XdccAAAAOREgAcCyfPly8yjnYwAAAFzFIawAyryUlBTXRZyyi4mJ0b333mtaQMFxCCsAoDRhBBJAmZfXiCOHsQIAAFyLAAmgzMsrQHIYKwAAwLUIkADKtGPHjmn79u2mddX16gAAAGUZARJAmXajkUZGIQEAAHLiIjoAyrTAwEB99tlnppWTl5eXzp07Z1pAwXARHQBAacIIJIAy66OPPrpueLTZV2ddsWKFaQEAAIAACaDMsg9R9fT0/GnKkr3GYawAAABXcQgrABgcaoibgc8VAKA0YQQSAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4AgBEgAAAADgSLkfLeYxAJRp5cqVc80nT57smgOFIevzxI9bAEBpQIAEACMrQAI3Az9uAQClAQESAIwlS5bo7NmzpgUUnsDAQA0YMMC0AABwXwRIAAAAAIAjXEQHAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4AgBEgAAAADgCAESAAAAAOAIARIAAAAA4ID0/92LZU3hOd3eAAAAAElFTkSuQmCC)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VidQdxOmdKr-" + }, + "source": [ + "In contrast to dynamic quantization, static quantization does not perform any adjustments during runtime. Instead, observer modules are inserted at selected positions where layers will be quantized, and the model is applied to a representative set of data samples. The observer modules will then select quantization parameters based on the data that has been fed into the model, and these parameters will remain fixed during runtime." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "liG9mUAZZ5G-" + }, + "source": [ + "### *Comparing Dynamic and Static Quantization*\n", + "\n", + "Dynamic quantization does not fix the zero point and scale factor, but rather adjusts it in response to the data observed during runtime. In contrast, the static quantization requires an initial calibration stage. During calibration for static quantization, observer modules will record the data range of the activations and use it to determine the zero point and scale factor.\n", + "\n", + "The advantage of dynamic quantization is that it does not require calibration and is suitable for modules that can have large variances in their input data range. On the other hand, static quantization does not need to perform on-the-fly quantization adjustments during runtime, which can potentially reduce latency, but this can come at the cost of lower accuracy." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZbLY0DMDXbd-" + }, + "source": [ + "## Purpose of Tutorial\n", + "\n", + "This tutorial will show how to adapt PyTorch quantization functions so that they can be applied to SpeechBrain models, as well as how the quantized models can be benchmarked.\n", + "\n", + "The tutorial will focus on pretrained automatic speech recognition (ASR) models, which can be easily loaded and used using the `speechbrain.inference.ASR` module in the library." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9LwAHVlWJCzM" + }, + "source": [ + "## Prerequisites" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-DeclH2cJhCG" + }, + "source": [ + "### Install SpeechBrain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_yGRWIwkJjeq" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "68rlwk4LKRqs" + }, + "source": [ + "### Install Other Dependencies\n", + "\n", + "`kenlm` and `pygtrie` are external libraries that our chosen model relies on for n-gram related functionality. If your model does not use these libraries, you may not need these. Replace these installations with other external libraries that your model needs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4aUswz8sJshD" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install https://github.com/kpu/kenlm/archive/master.zip\n", + "!pip install pygtrie" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vcK-aQB8JGg3" + }, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "APZyoiDHI_E_" + }, + "outputs": [], + "source": [ + "import gc\n", + "import numpy as np\n", + "import os\n", + "import sentencepiece\n", + "import speechbrain\n", + "import time\n", + "import torch\n", + "import torch.nn as nn\n", + "import tqdm\n", + "\n", + "from collections import Counter\n", + "from copy import deepcopy" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Xg79njnsMHsi" + }, + "source": [ + "### Model Selection\n", + "\n", + "For the purposes of this tutorial, we will be using a Wav2Vec 2.0 model with CTC trained on CommonVoice English.\n", + "\n", + "Wav2Vec 2.0 models are transformer-based. In addition, this is an Encoder ASR model, meaning that it does not have a decoder layer, but instead uses a decoding function. While the encoder does not use a language model, the decoding function optionally uses a language model for n-gram rescoring, which is why kenlm needs to be installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 47674, + "status": "ok", + "timestamp": 1714058184558, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "DTieO0VzMXY9", + "outputId": "7c1afad1-b6ab-49f0-dabd-5ec47287ee1a" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_token.py:88: UserWarning: \n", + "The secret `HF_TOKEN` does not exist in your Colab secrets.\n", + "To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.\n", + "You will be able to reuse this secret in all of your notebooks.\n", + "Please note that authentication is recommended but still optional to access public models or datasets.\n", + " warnings.warn(\n", + "Some weights of Wav2Vec2Model were not initialized from the model checkpoint at facebook/wav2vec2-large-lv60 and are newly initialized: ['wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n", + "WARNING:speechbrain.lobes.models.huggingface_transformers.huggingface:speechbrain.lobes.models.huggingface_transformers.huggingface - Wav2Vec2Model is frozen.\n" + ] + } + ], + "source": [ + "from speechbrain.inference.ASR import EncoderASR\n", + "\n", + "asr_model = EncoderASR.from_hparams(\n", + " source=\"speechbrain/asr-wav2vec2-commonvoice-14-en\",\n", + " savedir=\"/content/pretrained_ASR/asr-wav2vec2-commonvoice-14-en\",\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "INkF31Vzugg8" + }, + "source": [ + "Let us take a closer look at the submodules of the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 17, + "status": "ok", + "timestamp": 1714058184558, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "7OAe1RpNub9d", + "outputId": "5483801f-cc23-4227-fd8f-13fcd040f29f" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "EncoderASR(\n", + " (mods): ModuleDict(\n", + " (encoder): LengthsCapableSequential(\n", + " (wav2vec2): Wav2Vec2(\n", + " (model): Wav2Vec2Model(\n", + " (feature_extractor): Wav2Vec2FeatureEncoder(\n", + " (conv_layers): ModuleList(\n", + " (0): Wav2Vec2LayerNormConvLayer(\n", + " (conv): Conv1d(1, 512, kernel_size=(10,), stride=(5,))\n", + " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (1-4): 4 x Wav2Vec2LayerNormConvLayer(\n", + " (conv): Conv1d(512, 512, kernel_size=(3,), stride=(2,))\n", + " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (5-6): 2 x Wav2Vec2LayerNormConvLayer(\n", + " (conv): Conv1d(512, 512, kernel_size=(2,), stride=(2,))\n", + " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " )\n", + " )\n", + " (feature_projection): Wav2Vec2FeatureProjection(\n", + " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (projection): Linear(in_features=512, out_features=1024, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (encoder): Wav2Vec2EncoderStableLayerNorm(\n", + " (pos_conv_embed): Wav2Vec2PositionalConvEmbedding(\n", + " (conv): ParametrizedConv1d(\n", + " 1024, 1024, kernel_size=(128,), stride=(1,), padding=(64,), groups=16\n", + " (parametrizations): ModuleDict(\n", + " (weight): ParametrizationList(\n", + " (0): _WeightNorm()\n", + " )\n", + " )\n", + " )\n", + " (padding): Wav2Vec2SamePadLayer()\n", + " (activation): GELUActivation()\n", + " )\n", + " (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " (layers): ModuleList(\n", + " (0-23): 24 x Wav2Vec2EncoderLayerStableLayerNorm(\n", + " (attention): Wav2Vec2Attention(\n", + " (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (feed_forward): Wav2Vec2FeedForward(\n", + " (intermediate_dropout): Dropout(p=0.1, inplace=False)\n", + " (intermediate_dense): Linear(in_features=1024, out_features=4096, bias=True)\n", + " (intermediate_act_fn): GELUActivation()\n", + " (output_dense): Linear(in_features=4096, out_features=1024, bias=True)\n", + " (output_dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (enc): Sequential(\n", + " (linear1): Linear(\n", + " (w): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (bn1): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation): LeakyReLU(negative_slope=0.01)\n", + " (drop): Dropout(p=0.15, inplace=False)\n", + " (linear2): Linear(\n", + " (w): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (bn2): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation2): LeakyReLU(negative_slope=0.01)\n", + " (drop2): Dropout(p=0.15, inplace=False)\n", + " (linear3): Linear(\n", + " (w): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (bn3): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation3): LeakyReLU(negative_slope=0.01)\n", + " )\n", + " (ctc_lin): Linear(\n", + " (w): Linear(in_features=1024, out_features=1000, bias=True)\n", + " )\n", + " (log_softmax): Softmax()\n", + " )\n", + " )\n", + " (decoding_function): CTCBeamSearcher()\n", + ")" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "asr_model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "caUkjCWZZUKZ" + }, + "source": [ + "Note that not all modules can be quantized, and some modules cannot be quantized with certain methods. In particular, note the following list of modules that can be quantized without needing custom modifications to get around PyTorch's limitations:\n", + "\n", + "Dynamically quantizable modules\n", + "* `nn.Linear`\n", + "* `nn.LSTM`\n", + "* `nn.GRU`\n", + "* `nn.RNNCell`\n", + "* `nn.GRUCell`\n", + "* `nn.LSTMCell`\n", + "* `nn.EmbeddingBag`\n", + "* `nn.Embedding`\n", + "\n", + "Statically quantizable modules\n", + "* `nn.Linear`\n", + "* `nn.Conv1d/2d/3d`\n", + "* `nn.EmbeddingBag`\n", + "* `nn.Embedding`\n", + "\n", + "Armed with this information, we can start to pinpoint our quantization scheme. From our chosen model, we can identify the following modules:\n", + "* `encoder.wav2vec2.model.feature_extractor`: contains 7 `nn.Conv1d` layers, which must be statically quantized.\n", + "\n", + "* `encoder.wav2vec2.model.feature_projection`: contains 1 `nn.Linear` layer, which can be quantized both dynamically and statically.\n", + "\n", + "* `encoder.wav2vec2.model.encoder.pos_conv_embed`: contains an `nn.ParameterizedConv1d` layer, for which quantization has not been implemented in PyTorch.\n", + "\n", + "* `encoder.wav2vec2.model.encoder.layers`: static quantization has not been properly implemented for modules that rely on attention, such as this submodule, which contains transformer layers, so only dynamic quantization can be applied.\n", + "\n", + "* `encoder.enc`: a sequence of `nn.Linear` and `nn.BatchNorm1d` layers. Unfortunately, PyTorch does not allow BatchNorm layers to be statically quantized if they do not come after convolutional layers, so this submodule must be dynamically quantized.\n", + "\n", + "* `encoder.ctc_lin`: contains 1 `nn.Linear` layer, which can be either dynamically or statically quantized.\n", + "\n", + "Note that we have just separated out the \"main\" submodules of the model - it is possible to quantize in an even more granular manner by applying different quantization strategies to specific layers within the submodules we have picked out. (For example, we could apply static quantization to a specific `nn.Linear` layer inside `encoder.wav2vec2.model.encoder.layers`, even if the entire submodule cannot be quantized in such a manner.)\n", + "\n", + "However, there is an overhead to quantization, because inputs have to be quantized and outputs have to be dequantized, so it is unadvisable to perform quantization in too granular of a manner. For example, statically quantizing multiple layers at the same time means only one quantization and one dequantization are needed, whereas quantizing them separately would mean performing dequantization and quantization repeatedly as data flows from one layer to the other.\n", + "\n", + "Given the restrictions on quantization, as well as empirically collected data, for this model we will dynamically quantize `encoder.wav2vec2.model.encoder.layers` and `encoder.enc`, and statically quantize `encoder.wav2vec2.model.feature_extractor` and `encoder.wav2vec2.model.feature_projection`.\n", + "\n", + "`encoder.ctc_lin` will not be quantized, because experiments show that it has a large impact on WER (word error rate, a measure of accuracy) if it is quantized.\n", + "\n", + "Since submodules respond differently to different quantization methods, you may have to experiment with a combination of dynamic and static combination in order to find the combination that best suits your model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iy8E3mxbLHaT" + }, + "source": [ + "### Data Download and Preprocessing\n", + "\n", + "Download the LibriSpeech dev-clean dataset, which contains audio samples and corresponding transcriptions. This will be the dataset we use for evaluating the performance of the model before and after quantization. This dataset is chosen because it is relatively small---we do not need a large dataset for evaluating the performance of the model---and because it is clean, i.e. there are no background noise or audio artifacts that may unnecessarily interfere with the model's accuracy.\n", + "\n", + "Additional preprocessing is needed to convert the dataset into a format suitable for applying our models to, as well as for comparing the output of the models to the reference transcription. We want to have a list of audio-reference pairs in order to compare the output of the model on each audio sample to the correct reference transcription." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "w0O1mukQLj6-" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!mkdir librispeech_dev_clean\n", + "!wget https://www.openslr.org/resources/12/dev-clean.tar.gz -P /content\n", + "!tar -xvf dev-clean.tar.gz -C librispeech_dev_clean" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "x2w0a6zdLGym" + }, + "outputs": [], + "source": [ + "from speechbrain.dataio.dataio import read_audio\n", + "\n", + "# Retrieve the downloaded speech data as a list of audio-reference pairs\n", + "def get_samples(root):\n", + " audios = []\n", + " references = []\n", + " for book in os.listdir(root):\n", + " for chapter in os.listdir(f\"{root}/{book}\"):\n", + " for file in os.listdir(f\"{root}/{book}/{chapter}\"):\n", + " if file.endswith(\"txt\"):\n", + " with open(f\"{root}/{book}/{chapter}/{file}\", \"r\") as f:\n", + " for line in f.readlines():\n", + " audio_path, reference = line.split(\" \", 1)\n", + " full_audio_path = f\"{root}/{book}/{chapter}/{audio_path}.flac\"\n", + " audios.append(read_audio(full_audio_path))\n", + " references.append(reference)\n", + " return audios, references\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cQFfyX62Ln3J" + }, + "outputs": [], + "source": [ + "audios, references = get_samples(\"/content/librispeech_dev_clean/LibriSpeech/dev-clean\")\n", + "assert len(audios) == len(references)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0jSL-5IXMvhq" + }, + "source": [ + "## Quantization Set-Up" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UGvA8TmCM8g3" + }, + "source": [ + "### Utility Functions\n", + "\n", + "Here we define `get_module` and `set_module`, utility functions that are used to retrieve and set submodules within a module by providing the string. This is necessary in order to perform localized quantization, i.e. to replace a submodule with a quantized submodule without quantizing anything else.\n", + "\n", + "The utility functions build on the `getattr` and `setattr` functions, but enable nested attributes to be used, e.g.\n", + "\n", + "```\n", + "module_string = \"encoder.wav2vec2.model.feature_projection\"\n", + "```\n", + "\n", + "This allows nested submodules to be retrieved and set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tqIBQrcfM2G0" + }, + "outputs": [], + "source": [ + "def get_module(model, module_string):\n", + " curr = model.mods\n", + " for attr in module_string.split(\".\"):\n", + " if attr.isnumeric():\n", + " curr = curr[int(attr)]\n", + " else:\n", + " curr = getattr(curr, attr)\n", + " return curr\n", + "\n", + "def set_module(model, module_string, new_module):\n", + " curr = model.mods\n", + " attrs = module_string.split(\".\")\n", + " for attr in attrs[:-1]:\n", + " if attr.isnumeric():\n", + " curr = curr[int(attr)]\n", + " else:\n", + " curr = getattr(curr, attr)\n", + " if attrs[-1].isnumeric():\n", + " curr[int(attrs[-1])] = new_module\n", + " else:\n", + " setattr(curr, attrs[-1], new_module)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FNihVFCAOdx3" + }, + "source": [ + "### Static Quantization Wrapper\n", + "\n", + "Static quantization requires the `QuantStub` and `DeQuantStub` modules to indicate the boundaries between quantized and unquantized modules, as well as to indicate where quantization observers should be placed for calibration.\n", + "\n", + "During calibration, quantization observers will record the range of data in order to determine the scale factor and zero point of quantization, so as to achieve a more optimal quantization result.\n", + "\n", + "Furthermore, upon static quantization, `QuantStub` and `DeQuantStub` will be converted to layers that quantize and dequantize incoming tensors respectively, allowing quantized modules to smoothly interface with unquantized modules.\n", + "\n", + "Notice below that `__getattr__` is overridden in order to allow attributes referencing the model inside the wrapper to be retrieved.\n", + "\n", + "Also, `DeQuantStub` must be able to deal with tuples returned from the model, i.e. multiple return values, because on its own the forward function of `DeQuantStub` does not account for tuples." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "61nJTcB9OdOK" + }, + "outputs": [], + "source": [ + "from torch.ao.quantization import QuantStub, DeQuantStub\n", + "\n", + "class StaticQuant(nn.Module):\n", + " def __init__(self, model):\n", + " super().__init__()\n", + " self.quant = QuantStub()\n", + " self.model = model\n", + " self.dequant = DeQuantStub()\n", + "\n", + " def __getattr__(self, name):\n", + " if name in self.__dict__:\n", + " return self.__dict__[name]\n", + " elif name in self.__dict__['_modules']:\n", + " return self.__dict__['_modules'][name]\n", + " else:\n", + " return getattr(self.__dict__['_modules']['model'], name)\n", + "\n", + " def forward(self, x, *args, **kwargs):\n", + " x = self.quant(x)\n", + " x = self.model(x, *args, **kwargs)\n", + " if isinstance(x, tuple):\n", + " return tuple(self.dequant(output) for output in x)\n", + " else:\n", + " return self.dequant(x)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GXayy0d1NDXm" + }, + "source": [ + "### Quantization Function\n", + "\n", + "This is a custom quantization function that enables submodules to be quantized both dynamically and statically. It also offers additional flexibility by allowing hyperparameters such as the resolution of quantization and other quantization configurations to be applied. This allows for simpler application of a combination of quantization strategies to our model.\n", + "\n", + "See the docstring for details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "X4A4VqDANJeL" + }, + "outputs": [], + "source": [ + "def custom_quantize(\n", + " model,\n", + " dynamic_modules=None,\n", + " static_modules=None,\n", + " calibration_samples=None,\n", + " dynamic_targets=None,\n", + " dynamic_dtype=torch.qint8,\n", + " static_qconfig=torch.ao.quantization.default_qconfig,\n", + "):\n", + " \"\"\"Performs in-place quantization of an ASR model\n", + "\n", + " The quantization is customizable. A combination of dynamic and static\n", + " quantization can be performed on specific submodules that are passed into\n", + " this function.\n", + "\n", + " Names of submodules passed into this class are implicitly assumed to be\n", + " nested fields of ``model.mods``. For example, the ``model.mods.encoder.enc``\n", + " submodule should be passed in as ``encoder.enc``.\n", + "\n", + " Reference https://pytorch.org/docs/stable/quantization.html for\n", + " what torch modules can and cannot be dynamically/statically quantized.\n", + "\n", + " Arguments\n", + " ---------\n", + " model : torch.nn.Module\n", + " Model to be quantized.\n", + " dynamic_modules : list[str]\n", + " Names of the submodules to be dynamically quantized. They should be\n", + " formatted as stated above.\n", + " static_modules : list[str]\n", + " Names of the submodules to be statically quantized. They should be\n", + " formatted as stated above.'\n", + " calibration_samples : list[torch.Tensor]\n", + " Sample inputs used for calibration during static quantization.\n", + " dynamic_targets : set[torch.nn.Module]\n", + " Torch modules to be quantized during dynamic quantization.\n", + " dynamic_dtype : torch.dtype\n", + " The torch datatype that values will be converted to during dynamic\n", + " quantization. This should be a quantized datatype, such as\n", + " ``torch.quint8``, ``torch.qint8``, ``torch.qint32``\n", + " static_qconfig : torch.ao.quantization.qconfig.QConfig\n", + " The quantization config for static quantization, which, among other\n", + " things, specifies the observer modules that will be inserted\n", + " and the resolution of quantization.\n", + "\n", + " Returns\n", + " -------\n", + " None\n", + " \"\"\"\n", + "\n", + " ##################################################\n", + " # Dynamic Quantization #\n", + " ##################################################\n", + " if dynamic_modules is not None and len(dynamic_modules) > 0:\n", + " if dynamic_targets is None:\n", + " dynamic_targets = {\n", + " torch.nn.LSTM,\n", + " torch.nn.GRU,\n", + " torch.nn.RNNCell,\n", + " torch.nn.GRUCell,\n", + " torch.nn.LSTMCell,\n", + " torch.nn.Linear\n", + " }\n", + "\n", + " for module in dynamic_modules:\n", + " torch.quantization.quantize_dynamic(\n", + " get_module(model, module),\n", + " dynamic_targets,\n", + " dtype=dynamic_dtype,\n", + " inplace=True,\n", + " )\n", + "\n", + " ##################################################\n", + " # Static Quantization #\n", + " ##################################################\n", + " if static_modules is not None and len(static_modules) > 0:\n", + " if calibration_samples is None or len(calibration_samples) == 0:\n", + " raise Exception(\"No calibration samples provided for static quantization.\")\n", + "\n", + " for module in static_modules:\n", + " set_module(\n", + " model,\n", + " module,\n", + " StaticQuant(get_module(model, module)),\n", + " )\n", + " get_module(model, module).qconfig = static_qconfig\n", + "\n", + " torch.ao.quantization.prepare(model, inplace=True)\n", + "\n", + " for sample in calibration_samples:\n", + " model.transcribe_batch(sample.unsqueeze(0), torch.tensor([1.0]))\n", + "\n", + " torch.ao.quantization.convert(model, inplace=True)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "o52o1tD1OGZZ" + }, + "source": [ + "## Benchmarking Set-Up\n", + "\n", + "We will focus on the two main performance metrics for ASRs, real-time factor (RTF) and word error rate (WER).\n", + "\n", + "RTF is the ratio of the total inference time to the total length of the input audio. This is significant because an RTF lower than 1 implies that inference takes less time than it takes to play the audio, which potentially allows for real time speech recognition (excluding other sources of latency).\n", + "\n", + "WER is the ratio of the number of word-level errors (substitutions, deletions, insertions) made by the model to the number of words in the reference.\n", + "\n", + "Put together, these two metrics allow us to assess the latency and accuracy of the model before and after quantization." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yoeJMQGJOZId" + }, + "source": [ + "### WER\n", + "\n", + "Levenshtein distance, or edit distance, is at the core of the WER metric. It measures the number of substitutions, deletions, and/or insertions needed to transform one string to another, which can be computed using a dynamic programming approach.\n", + "\n", + "The main difference between Levenshtein distance and WER is that the former considers strings on a character level, while the latter considers the substitution/deletion/insertion of entire words.\n", + "\n", + "Speechbrain provides utility functions to measure WER and other related metrics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qVGtuoVpOxiJ" + }, + "outputs": [], + "source": [ + "from speechbrain.utils.edit_distance import accumulatable_wer_stats\n", + "\n", + "def compute_wer(references, hypotheses):\n", + " if isinstance(references, str):\n", + " references = [references.split()]\n", + " else:\n", + " references = [ref.split() for ref in references]\n", + " if isinstance(hypotheses, str):\n", + " hypotheses = [hypotheses.split()]\n", + " else:\n", + " hypotheses = [hyp.split() for hyp in hypotheses]\n", + " if len(references) != len(hypotheses):\n", + " raise Exception(\"Number of references is not equal to the number of hypotheses\")\n", + " stats = accumulatable_wer_stats(references, hypotheses, Counter())\n", + " return stats['WER']\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ULWP-aioO3fx" + }, + "source": [ + "### Modify EncoderASR transcribe_batch\n", + "\n", + "Modify the existing `transcribe_batch` method in order to time the encoder's forward function.\n", + "\n", + "Different ASR types have different `transcribe_batch` implementations, so small tweaks may need to be carried out as appropriate for your own model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LT2M_epBO9Pw" + }, + "outputs": [], + "source": [ + "import functools\n", + "\n", + "# Functions necessary for preprocessing the input and generating transcriptions\n", + "\n", + "def preprocess_input(model: EncoderASR, input):\n", + " with torch.no_grad():\n", + " wavs = input.unsqueeze(0)\n", + " wav_lens = torch.tensor([1.0])\n", + " wavs = wavs.float()\n", + " wavs, wav_lens = wavs.to(model.device), wav_lens.to(model.device)\n", + " return wavs, wav_lens\n", + "\n", + "def generate(model, predictions):\n", + " is_ctc_text_encoder_tokenizer = isinstance(\n", + " model.tokenizer, speechbrain.dataio.encoder.CTCTextEncoder\n", + " )\n", + " if isinstance(model.hparams.decoding_function, functools.partial):\n", + " if is_ctc_text_encoder_tokenizer:\n", + " predicted_words = [\n", + " \"\".join(model.tokenizer.decode_ndim(token_seq))\n", + " for token_seq in predictions\n", + " ]\n", + " else:\n", + " predicted_words = [\n", + " model.tokenizer.decode_ids(token_seq)\n", + " for token_seq in predictions\n", + " ]\n", + " else:\n", + " predicted_words = [hyp[0].text for hyp in predictions]\n", + " return predicted_words\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7CiEEOg3s0yA" + }, + "source": [ + "Note that we are only interested in the change in inference time relevant to quantization, and not the overhead of input preprocessing or word generation. This is why we only record the duration of the encoder's forward function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_C8A7ONqQLsi" + }, + "outputs": [], + "source": [ + "def timed_transcribe(model: EncoderASR, input):\n", + " with torch.no_grad():\n", + " wavs, wav_lens = preprocess_input(model, input)\n", + " start = time.time()\n", + " encoder_out = model.mods.encoder(wavs, wav_lens)\n", + " end = time.time()\n", + " duration = end - start\n", + " predictions = model.decoding_function(encoder_out, wav_lens)\n", + " predicted_words = generate(model, predictions)\n", + " return predicted_words[0], duration\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aPPUQOXVQbHX" + }, + "source": [ + "### Benchmark Model Performance\n", + "\n", + "Latency measurement is often unstable at first, so a warmup phase is introduced in order to ensure a more accurate performance evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "kAg5Z-FXQZST" + }, + "outputs": [], + "source": [ + "def benchmark(model, samples, references):\n", + " total_audio_length = sum([sample.shape[0] / 16000 for sample in samples])\n", + " total_cpu_time = 0\n", + " outputs = []\n", + "\n", + " for sample in tqdm.tqdm(samples[:10], desc=\"warming up\"):\n", + " timed_transcribe(model, sample)\n", + "\n", + " for sample in tqdm.tqdm(samples, desc=\"evaluating\"):\n", + " output, duration = timed_transcribe(model, sample)\n", + " outputs.append(output)\n", + " total_cpu_time += duration\n", + "\n", + " wer = compute_wer(references, outputs)\n", + " rtf = total_cpu_time / total_audio_length\n", + " return wer, rtf\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JRVVb-2eQvBu" + }, + "source": [ + "## Quantization and Benchmarking\n", + "\n", + "With the necessary set-up code for quantization and benchmarking in place, we can start to actually benchmark our model before and after quantization." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HDoLvCnoRoXO" + }, + "source": [ + "### Select Data\n", + "\n", + "For the sake of time, select a subset of the audio data for benchmarking the models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "CJDBj9lmRqDp" + }, + "outputs": [], + "source": [ + "n = 100\n", + "audio_subset = audios[:n]\n", + "ref_subset = references[:n]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GA05wr7bRF4g" + }, + "source": [ + "### Original Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9sb8Odc_RM-A" + }, + "outputs": [], + "source": [ + "# Deepcopy the original model to avoid propagating unwanted changes\n", + "original_model = deepcopy(asr_model)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 672982, + "status": "ok", + "timestamp": 1714058922122, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "Qf2GL-fzQ74P", + "outputId": "99455f40-e219-4b7a-9a35-5985443e8768" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "warming up: 100%|██████████| 10/10 [01:40<00:00, 10.01s/it]\n", + "evaluating: 100%|██████████| 100/100 [09:32<00:00, 5.73s/it]\n" + ] + } + ], + "source": [ + "original_model.eval()\n", + "wer, rtf = benchmark(original_model, audio_subset, ref_subset)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 310, + "status": "ok", + "timestamp": 1714059005102, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "kAxvB8JVxWXV", + "outputId": "8edb9c2f-60bb-4689-a66c-6efaab78b7cf" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original Model\n", + "WER(%): 6.067291781577496\n", + "RTF: 0.7967449480673793\n" + ] + } + ], + "source": [ + "print(f\"Original Model\\nWER(%): {wer}\\nRTF: {rtf}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qvbwU8XQ0zeU" + }, + "source": [ + "To avoid exceeding the session's RAM limit, delete models after benchmarking." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 1689, + "status": "ok", + "timestamp": 1714058923806, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "m6Q6A_q-0ydx", + "outputId": "5866e92b-9935-4e3b-d4af-e3eea579a54c" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "del original_model\n", + "gc.collect()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6N2YWR3gSPUI" + }, + "source": [ + "### Quantized Model\n", + "\n", + "First, let us recall the model architecture:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 8, + "status": "ok", + "timestamp": 1714058923807, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "dUBulr8eSXFF", + "outputId": "bc0d62ce-bc94-4185-ff01-9cc72bcb27af" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "EncoderASR(\n", + " (mods): ModuleDict(\n", + " (encoder): LengthsCapableSequential(\n", + " (wav2vec2): Wav2Vec2(\n", + " (model): Wav2Vec2Model(\n", + " (feature_extractor): Wav2Vec2FeatureEncoder(\n", + " (conv_layers): ModuleList(\n", + " (0): Wav2Vec2LayerNormConvLayer(\n", + " (conv): Conv1d(1, 512, kernel_size=(10,), stride=(5,))\n", + " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (1-4): 4 x Wav2Vec2LayerNormConvLayer(\n", + " (conv): Conv1d(512, 512, kernel_size=(3,), stride=(2,))\n", + " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (5-6): 2 x Wav2Vec2LayerNormConvLayer(\n", + " (conv): Conv1d(512, 512, kernel_size=(2,), stride=(2,))\n", + " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " )\n", + " )\n", + " (feature_projection): Wav2Vec2FeatureProjection(\n", + " (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (projection): Linear(in_features=512, out_features=1024, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (encoder): Wav2Vec2EncoderStableLayerNorm(\n", + " (pos_conv_embed): Wav2Vec2PositionalConvEmbedding(\n", + " (conv): ParametrizedConv1d(\n", + " 1024, 1024, kernel_size=(128,), stride=(1,), padding=(64,), groups=16\n", + " (parametrizations): ModuleDict(\n", + " (weight): ParametrizationList(\n", + " (0): _WeightNorm()\n", + " )\n", + " )\n", + " )\n", + " (padding): Wav2Vec2SamePadLayer()\n", + " (activation): GELUActivation()\n", + " )\n", + " (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " (layers): ModuleList(\n", + " (0-23): 24 x Wav2Vec2EncoderLayerStableLayerNorm(\n", + " (attention): Wav2Vec2Attention(\n", + " (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (feed_forward): Wav2Vec2FeedForward(\n", + " (intermediate_dropout): Dropout(p=0.1, inplace=False)\n", + " (intermediate_dense): Linear(in_features=1024, out_features=4096, bias=True)\n", + " (intermediate_act_fn): GELUActivation()\n", + " (output_dense): Linear(in_features=4096, out_features=1024, bias=True)\n", + " (output_dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (enc): Sequential(\n", + " (linear1): Linear(\n", + " (w): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (bn1): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation): LeakyReLU(negative_slope=0.01)\n", + " (drop): Dropout(p=0.15, inplace=False)\n", + " (linear2): Linear(\n", + " (w): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (bn2): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation2): LeakyReLU(negative_slope=0.01)\n", + " (drop2): Dropout(p=0.15, inplace=False)\n", + " (linear3): Linear(\n", + " (w): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (bn3): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation3): LeakyReLU(negative_slope=0.01)\n", + " )\n", + " (ctc_lin): Linear(\n", + " (w): Linear(in_features=1024, out_features=1000, bias=True)\n", + " )\n", + " (log_softmax): Softmax()\n", + " )\n", + " )\n", + " (decoding_function): CTCBeamSearcher()\n", + ")" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "asr_model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yAZaH9SOSZq5" + }, + "source": [ + "As previously mentioned, for this tutorial we will apply dynamic quantization to the attention layers and sequential linear layers, and static quantization to the other quantizable layers (excluding `ctc_lin`, which has been experimentally observed to respond poorly to quantization).\n", + "\n", + "Recall that not all PyTorch layers can be quantized, and some can only be quantized dynamically or statically, so there are restrictions on your choice of modules to quantize and method of quantization.\n", + "\n", + "For your model, feel free to experiment with what yields the best results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WYqJoxKyS1ev" + }, + "outputs": [], + "source": [ + "dynamic_modules = [\n", + " \"encoder.wav2vec2.model.encoder.layers\",\n", + " \"encoder.enc\"\n", + "]\n", + "static_modules = [\n", + " \"encoder.wav2vec2.model.feature_projection\",\n", + " \"encoder.wav2vec2.model.feature_extractor\",\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "esnYoqy_rQo7" + }, + "source": [ + "Randomly select calibration samples for use in static quantization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "J86cO6IMiq7X" + }, + "outputs": [], + "source": [ + "from operator import itemgetter\n", + "\n", + "np.random.seed(1337)\n", + "indices = np.random.choice(len(audios), 10)\n", + "calibration_samples = list(itemgetter(*indices)(audios))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "748wzP9QrWss" + }, + "source": [ + "We have what we need to quantize the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2lFNQoARSn7R" + }, + "outputs": [], + "source": [ + "# Deepcopy the original model to avoid propagating unwanted changes\n", + "quantized_model = deepcopy(asr_model)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3KEU3iqbTZoH" + }, + "outputs": [], + "source": [ + "custom_quantize(\n", + " model=quantized_model,\n", + " dynamic_modules=dynamic_modules,\n", + " static_modules=static_modules,\n", + " calibration_samples=calibration_samples,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9SM85ctkUewu" + }, + "source": [ + "Here is the model after quantization. Notice how the specified submodules have been replaced with quantized versions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 3197, + "status": "ok", + "timestamp": 1714059095809, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "iWCn6Vx7UmkZ", + "outputId": "8b5a0287-1b41-4b43-8e18-0a8759126346" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "EncoderASR(\n", + " (mods): ModuleDict(\n", + " (encoder): LengthsCapableSequential(\n", + " (wav2vec2): Wav2Vec2(\n", + " (model): Wav2Vec2Model(\n", + " (feature_extractor): Static(\n", + " (quant): Quantize(scale=tensor([0.1671]), zero_point=tensor([60]), dtype=torch.quint8)\n", + " (model): Wav2Vec2FeatureEncoder(\n", + " (conv_layers): ModuleList(\n", + " (0): Wav2Vec2LayerNormConvLayer(\n", + " (conv): QuantizedConv1d(1, 512, kernel_size=(10,), stride=(5,), scale=0.23443543910980225, zero_point=67)\n", + " (layer_norm): QuantizedLayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (1): Wav2Vec2LayerNormConvLayer(\n", + " (conv): QuantizedConv1d(512, 512, kernel_size=(3,), stride=(2,), scale=0.8026854991912842, zero_point=62)\n", + " (layer_norm): QuantizedLayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (2): Wav2Vec2LayerNormConvLayer(\n", + " (conv): QuantizedConv1d(512, 512, kernel_size=(3,), stride=(2,), scale=1.169354796409607, zero_point=89)\n", + " (layer_norm): QuantizedLayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (3): Wav2Vec2LayerNormConvLayer(\n", + " (conv): QuantizedConv1d(512, 512, kernel_size=(3,), stride=(2,), scale=0.8424969911575317, zero_point=66)\n", + " (layer_norm): QuantizedLayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (4): Wav2Vec2LayerNormConvLayer(\n", + " (conv): QuantizedConv1d(512, 512, kernel_size=(3,), stride=(2,), scale=0.592667818069458, zero_point=54)\n", + " (layer_norm): QuantizedLayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (5): Wav2Vec2LayerNormConvLayer(\n", + " (conv): QuantizedConv1d(512, 512, kernel_size=(2,), stride=(2,), scale=0.4864558279514313, zero_point=68)\n", + " (layer_norm): QuantizedLayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " (6): Wav2Vec2LayerNormConvLayer(\n", + " (conv): QuantizedConv1d(512, 512, kernel_size=(2,), stride=(2,), scale=0.4137037694454193, zero_point=41)\n", + " (layer_norm): QuantizedLayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (activation): GELUActivation()\n", + " )\n", + " )\n", + " )\n", + " (dequant): DeQuantize()\n", + " )\n", + " (feature_projection): Static(\n", + " (quant): Quantize(scale=tensor([0.0369]), zero_point=tensor([5]), dtype=torch.quint8)\n", + " (model): Wav2Vec2FeatureProjection(\n", + " (layer_norm): QuantizedLayerNorm((512,), eps=1e-05, elementwise_affine=True)\n", + " (projection): QuantizedLinear(in_features=512, out_features=1024, scale=0.7401247620582581, zero_point=64, qscheme=torch.per_tensor_affine)\n", + " (dropout): QuantizedDropout(p=0.1, inplace=False)\n", + " )\n", + " (dequant): DeQuantize()\n", + " )\n", + " (encoder): Wav2Vec2EncoderStableLayerNorm(\n", + " (pos_conv_embed): Wav2Vec2PositionalConvEmbedding(\n", + " (conv): ParametrizedConv1d(\n", + " 1024, 1024, kernel_size=(128,), stride=(1,), padding=(64,), groups=16\n", + " (parametrizations): ModuleDict(\n", + " (weight): ParametrizationList(\n", + " (0): _WeightNorm()\n", + " )\n", + " )\n", + " )\n", + " (padding): Wav2Vec2SamePadLayer()\n", + " (activation): GELUActivation()\n", + " )\n", + " (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " (layers): ModuleList(\n", + " (0-23): 24 x Wav2Vec2EncoderLayerStableLayerNorm(\n", + " (attention): Wav2Vec2Attention(\n", + " (k_proj): DynamicQuantizedLinear(in_features=1024, out_features=1024, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " (v_proj): DynamicQuantizedLinear(in_features=1024, out_features=1024, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " (q_proj): DynamicQuantizedLinear(in_features=1024, out_features=1024, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " (out_proj): DynamicQuantizedLinear(in_features=1024, out_features=1024, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " )\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (feed_forward): Wav2Vec2FeedForward(\n", + " (intermediate_dropout): Dropout(p=0.1, inplace=False)\n", + " (intermediate_dense): DynamicQuantizedLinear(in_features=1024, out_features=4096, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " (intermediate_act_fn): GELUActivation()\n", + " (output_dense): DynamicQuantizedLinear(in_features=4096, out_features=1024, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " (output_dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (enc): Sequential(\n", + " (linear1): Linear(\n", + " (w): DynamicQuantizedLinear(in_features=1024, out_features=1024, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " )\n", + " (bn1): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation): LeakyReLU(negative_slope=0.01)\n", + " (drop): Dropout(p=0.15, inplace=False)\n", + " (linear2): Linear(\n", + " (w): DynamicQuantizedLinear(in_features=1024, out_features=1024, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " )\n", + " (bn2): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation2): LeakyReLU(negative_slope=0.01)\n", + " (drop2): Dropout(p=0.15, inplace=False)\n", + " (linear3): Linear(\n", + " (w): DynamicQuantizedLinear(in_features=1024, out_features=1024, dtype=torch.qint8, qscheme=torch.per_tensor_affine)\n", + " )\n", + " (bn3): BatchNorm1d(\n", + " (norm): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (activation3): LeakyReLU(negative_slope=0.01)\n", + " )\n", + " (ctc_lin): Linear(\n", + " (w): Linear(in_features=1024, out_features=1000, bias=True)\n", + " )\n", + " (log_softmax): Softmax()\n", + " )\n", + " )\n", + " (decoding_function): CTCBeamSearcher()\n", + ")" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "quantized_model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xMpLUITwUoMr" + }, + "source": [ + "Next, we benchmark the quantized model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 508945, + "status": "ok", + "timestamp": 1714059604748, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "H5_yCY7PfDSV", + "outputId": "8db35a6b-b6d6-4b2f-dfd9-4140921c7910" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "warming up: 100%|██████████| 10/10 [01:16<00:00, 7.61s/it]\n", + "evaluating: 100%|██████████| 100/100 [07:12<00:00, 4.32s/it]\n" + ] + } + ], + "source": [ + "quantized_model.eval()\n", + "wer, rtf = benchmark(quantized_model, audio_subset, ref_subset)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 19, + "status": "ok", + "timestamp": 1714059604748, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "Ty2SNP1ZxS1C", + "outputId": "6eccdef2-19a6-450f-b3b0-02f33e4ac739" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Quantized Model\n", + "WER(%): 7.335907335907336\n", + "RTF: 0.6004914075674289\n" + ] + } + ], + "source": [ + "print(f\"Quantized Model\\nWER(%): {wer}\\nRTF: {rtf}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WrqZAqPF5mrF" + }, + "source": [ + "We can observe a significant decrease in RTF with a reasonable increase in WER. This shows that the quantization has been successful." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "A1dgoees085g" + }, + "source": [ + "Finally, if you need to do any more quantization benchmarking with other models, you may delete this one to free up RAM." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 2340, + "status": "ok", + "timestamp": 1714059607085, + "user": { + "displayName": "Justin Lam", + "userId": "12539712095741347076" + }, + "user_tz": -60 + }, + "id": "ebUa57bV07rl", + "outputId": "ce63ebfd-add1-477b-f33d-76d928b3c622" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "4479" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "del quantized_model\n", + "gc.collect()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1Qa9ejsBf8xlW-IRtCAJDd4l7MNhdWm_8", + "timestamp": 1714190616904 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.ipynb b/docs/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.ipynb new file mode 100644 index 0000000000..ddbc4d08f7 --- /dev/null +++ b/docs/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.ipynb @@ -0,0 +1,7171 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "79ryKiGHinQ3" + }, + "source": [ + "# Pretrained Models and Fine-Tuning with HuggingFace\n", + "\n", + "Training DNN models is often very time-consuming and expensive.\n", + "For this reason, whenever it is possible, using off-the-shelf pretrained models can be convenient in various scenarios.\n", + "\n", + "In SpeechBrain we provide pre-trained models and we also encourage users to share their own using \"drawing\"[HuggingFace Hub](https://huggingface.co/models)\"drawing\" as we strongly believe that sharing models can help research.\n", + "\n", + "You can browse our official pre-trained models [here](https://huggingface.co/speechbrain).\n", + "\n", + "If you have a pre-trained model and want to include it among the official ones, please consider opening a pull request on [GitHub](https://github.com/speechbrain/speechbrain/blob/develop/README.md) with all the details of your model!\n", + "\n", + "We provide a simple and straightforward way to download and instantiate a state-of-the-art pretrained-model and use it either for direct inference or for fine-tuning/knowledge distillation or whatever new fancy technique you can come up with!\n", + "\n", + "With this tutorial, you will learn how to:\n", + "\n", + "1. Use pretrained models to infer on your data.\n", + "2. Use pretrained models as a component of a new pipeline (e.g language models, finetuning, speaker embeddings extraction ...).\n", + "\n", + "## Prerequisites\n", + "- [SpeechBrain Introduction](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/introduction-to-speechbrain.html)\n", + "- [YAML tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html)\n", + "- [Brain Class tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html)\n", + "- [DataIOBasics](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HND7bB_1S07R" + }, + "source": [ + "## Installing Dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "executionInfo": { + "elapsed": 34461, + "status": "ok", + "timestamp": 1706105674984, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "BAzQuVzl6ww1" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "executionInfo": { + "elapsed": 21062, + "status": "ok", + "timestamp": 1706105696010, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "_pC-98RDOqPq" + }, + "outputs": [], + "source": [ + "%%capture\n", + "import speechbrain\n", + "# here we download the material needed for this tutorial: images and an example based on mini-librispeech\n", + "!wget https://www.dropbox.com/s/b61lo6gkpuplanq/MiniLibriSpeechTutorial.tar.gz?dl=0\n", + "!tar -xvzf MiniLibriSpeechTutorial.tar.gz?dl=0\n", + "# downloading mini_librispeech dev data\n", + "!wget https://www.openslr.org/resources/31/dev-clean-2.tar.gz\n", + "!tar -xvzf dev-clean-2.tar.gz" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_KCk7uODNgNB" + }, + "source": [ + "## Using PreTrained models to perform inference on your data\n", + "\n", + "In this section, we will provide examples on using pretrained models with various tasks including:\n", + "1. Automatic Speech Recognition.\n", + "2. Speaker Recognition, Verification and Diarization.\n", + "3. Source Separation\n", + "\n", + "**Many more can be found in our \"drawing\"[HuggingFace Hub](https://huggingface.co/models)\"drawing\"!**" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "m0xCb38O6kFM" + }, + "source": [ + "### Automatic Speech Recognition" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PaWLPvJ2Nrxm" + }, + "source": [ + "Suppose we want to try out a pretrained ASR model on your data.\n", + "\n", + "Maybe we want to see if our new speech enhancement algorithm is able to also improve Word Error Rate or maybe we just want to transcribe a lecture recording.\n", + "\n", + "After browsing models on \"drawing\"[HuggingFace](https://huggingface.co/models)\"drawing\", we choose the following ASR pipeline: [super cool and SOTA ASR pipeline](https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech), trained on LibriSpeech.\n", + "\n", + "This ASR pipeline is made of three components as detailed [here](https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech):\n", + "\n", + "\n", + "1. a CRDNN-based seq2seq E2E ASR model trained following this [recipe](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/ASR/seq2seq).\n", + "2. a RNN-based LM.\n", + "3. a SentencePiece Tokenizer object needed transforms words into subword units.\n", + "\n", + "**Now, we can transcribe any audio file in solely 3 lines of code!**\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 215, + "referenced_widgets": [ + "73417ed270d7450984a48f3c7260eef7", + "85a4d1969185444aa63499a3e59faa2c", + "ee1bf41d42a54f9a92d76f5394626e97", + "5e58c000011c48879130e6374bbab799", + "7ea8ba3f9016452bb6ec20d3502675df", + "76a0e79fab2644908c86b7784d67504e", + "ab133c95b3da4bdf86777ade47d60698", + "7cfb724f62f14f889a3d3a69a9e7acd8", + "2e8935dded774cb9bcd87167992c5558", + "76fffbd6bc9542359181b3333077b888", + "1861687899e8426cb9bc7c74d0a51aea", + "4873f222358a46c7b50d741a1932c5a6", + "b5d8d5c9c94b4329a7cf143448476408", + "0adda9c3d3a14ceaa3d247b59c962554", + "a1e78669452a47899a85a7fae3a15661", + "8073c957b55e4b1c870b8e7c50d3765a", + "84bada4f0df245469620edab3ad40c38", + "3bd1dfae506d4299af956d497d96d9ad", + "402e80d5952d4da991f798672ff53d46", + "2232c1b90a0a40d7b470a492dd470197", + "3940e8873ae3498393094ea7c361a7fc", + "e2cf26fb7f29467daef310993935888f", + "d3278bfc8503432b8d16c9ad47963ee5", + "bac9acbddead48e1b90aded2f686c5d7", + "4b7f1dcce7c14f758c8b33e196214a88", + "4d695e2724324b5fa4af39345b38226c", + "baa6984bd26244ce849d203aa0d2dc30", + "b28ec6dd2556486693cb7657158b4f35", + "a6d2f27d5d4c493083ead216c8137c89", + "5c5bc290e7664009a63b8bc45d676aa7", + "adba26074a384a34ac2866813537c0d3", + "f81b74f0764a440392633978639dc9a8", + "0420cacd6a7349e8b7ccaef1ddc985b2", + "634871e7ef574c48a22fe20bd29c98fb", + "a2c4a9390bd6495aae3a79a7174f5832", + "de326e62dcee451ebafdef0a888f9974", + "fe6ee540077e4fd086ae8975feb97deb", + "890276931b55409cacaccbea256fe745", + "7b0782edb2394087910b772c90b03be9", + "0657d483243b46748b6dd499a188b588", + "d7765befe0af40e9bb810f5896f4b52a", + "8d0a5753b71243c59f4741043f98b857", + "304a1d7486b94d8c83371c54cfda2ca4", + "7a0a2f4b5b1c41c2bd27f1406568ff34", + "9e69e88263a24037aa45fb9f837c2c40", + "cb5045ef6ee544cf9e7f4d7cf772c14f", + "1ea356cbbabf483b94fac984fa0d3690", + "3301f47181664600aac6a0483dfa69a2", + "a3f6fdd0090c4c35b1f44d01be6d8e4f", + "3fec66291aed46e9aaec1e60748760d6", + "934446687ef449149f71c9c64923ed4d", + "303541f7df7044b1b43c6bd38d8058f1", + "1ff81f3732074056a9f1834501219e55", + "01a0f8b6ae4e4fbcaf1e96dd3adce92a", + "9707ff2d35094cfd97c39d166579cdba" + ] + }, + "executionInfo": { + "elapsed": 34118, + "status": "ok", + "timestamp": 1706105730109, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "GIJgefQpNnVO", + "outputId": "750201d3-46ab-493d-ad62-3ab2240d23ce" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "73417ed270d7450984a48f3c7260eef7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "hyperparams_develop.yaml: 0%| | 0.00/4.83k [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "fig, ax = plt.subplots()\n", + "ax.scatter(principalComponents[:, 0], principalComponents[:, 1])\n", + "\n", + "for i, spkid in enumerate(labels):\n", + " ax.annotate(spkid, (principalComponents[i, 0], principalComponents[i, 1]))\n", + "plt.xlabel(\"Principal Component 1\")\n", + "plt.ylabel(\"Principal Component 2\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rDfkPAUA_JUk" + }, + "source": [ + "Given the embeddings computed with the ECAPA-TDNN model, we can perform speaker verification in this way:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 3166, + "status": "ok", + "timestamp": 1706105755778, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "KuWnaG-j_Tr3", + "outputId": "7d6c1ac5-d9aa-4438-cf49-9bd2ba883fec" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([0.6952]) tensor([True])\n", + "tensor([0.0159]) tensor([False])\n" + ] + } + ], + "source": [ + "# Different files from the same speaker\n", + "file1 = './LibriSpeech/dev-clean-2/1272/135031/1272-135031-0000.flac' # Same speaker\n", + "file2 = './LibriSpeech/dev-clean-2/1272/141231/1272-141231-0004.flac' # Same speaker\n", + "file3 = './LibriSpeech/dev-clean-2/1462/170142/1462-170142-0000.flac' # Different speaker\n", + "\n", + "# Test with 2 files from the same speaker\n", + "score, prediction = verification.verify_files(file1, file2)\n", + "print(score, prediction)\n", + "\n", + "# Test with 2 files from different speakers\n", + "score, prediction = verification.verify_files(file1, file3)\n", + "print(score, prediction)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VSax2FUVBiRu" + }, + "source": [ + "LibriSpeech is a very easy task for speaker verification. However, the ECAPA model works very well on other types of data. With voxceleb, we achieved an Equal Error Rate of 0.69%. Feel free to record yourself (with a sampling rate of 16 kHz) and play with that!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2AV_AknNQebh" + }, + "source": [ + "### Source Separation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OFiEZitTaxcx" + }, + "source": [ + "What about Source Separation ?\n", + "A pretrained SepFormer model is available [here]:(https://huggingface.co/speechbrain/sepformer-wsj02mix). It can be used right out off the box to perform separation on clean speech mixtures.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "08KQG5rVQkaA" + }, + "source": [ + "We create an artificial mixture here by mixing together two utterances from MiniLibriSpeech." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "executionInfo": { + "elapsed": 172, + "status": "ok", + "timestamp": 1706105755780, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "SyEQ3QmmQR3H" + }, + "outputs": [], + "source": [ + "import torchaudio\n", + "s1, fs = torchaudio.load('./LibriSpeech/dev-clean-2/1272/135031/1272-135031-0003.flac')\n", + "s2, fs = torchaudio.load('./LibriSpeech/dev-clean-2/1462/170142/1462-170142-0001.flac')\n", + "\n", + "# we resample because we will use a model trained on 8KHz data.\n", + "resampler = torchaudio.transforms.Resample(fs, 8000)\n", + "s1 = resampler(s1)\n", + "s2 = resampler(s2)\n", + "fs= 8000\n", + "\n", + "min_len = min(s1.shape[-1], s2.shape[-1])\n", + "s1 = s1[:, :min_len]\n", + "s2 = s2[:, :min_len]\n", + "mix = s1 + s2" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "executionInfo": { + "elapsed": 131, + "status": "ok", + "timestamp": 1706105755783, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "1jBzCNMtRbdJ" + }, + "outputs": [], + "source": [ + "import IPython.display as ipd" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d6MI6Xb5-9nu" + }, + "source": [ + "We can listen to this artificial mixture." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 73 + }, + "executionInfo": { + "elapsed": 109, + "status": "ok", + "timestamp": 1706105755784, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "FzJXPO9LXIKy", + "outputId": "033a197f-8946-4e9d-9947-7cfb60bb17fa" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ipd.Audio(mix[0], rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LTuaNzcG_DGX" + }, + "source": [ + "Now, we can instantiate the pretrained SepFormer from \"drawing\"[HuggingFace](https://huggingface.co/models)\"drawing\"." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 166, + "referenced_widgets": [ + "e625e51535634cb88f89f93c1b95a5b8", + "b7ff251217dc4a31abd29db0d8ddb8ee", + "6e88d9cddfae4c0585a030628f415163", + "5a4bb7e698db43ab98cf5e3cad123eff", + "d37c1933e8a24a11ae9a2b3180e53309", + "c65d659414184d6bbf896b581177e1a4", + "f92e99fc7682436a85d11ec2f115e206", + "7a59681cc6884eeb90cad0ad810eb340", + "5dcb9b7c948c42a6afb63bd89664744f", + "16f0ca4e8c1944a1a1e90b5484f9a270", + "8c5f52ed4b7042df8bb636e73c702d64", + "f6739e26b4de4cb8b3d9e3f62876325f", + "f52c23a438e2415cb4632ae276299cc4", + "94ac439a7b2344acb89dd968f4832d24", + "16c602c5ca4b42d5a22e97249dbc8316", + "ea33a83bcbbc4ba5a983712f1d197e17", + "6b3edbd6a47b4b3982ab30870d853199", + "8c9d3cdb6d9248129db49203fb4df0ec", + "2961959a379f4934b2f47f3ec99736cd", + "d439ae84c3cd41f0b89eaa7c32ba310e", + "a9ede27dcd6c4c17aad2e03976788ac8", + "1791bf4e7a7f4f7aa03bb9c420f2c3af", + "00b563eeca124124bc81f8f19a5f0e48", + "86b24bfbc04d4bda8cce53a140c17e62", + "4ef78f63daba4f20a768e58c023a8eee", + "daaf30a442cb4fb48db9a082a40c6376", + "6b302652bec7427680692afe5c3630f9", + "78cfac8bcaea470ebfba4104154cec3d", + "d9b66e1b93fb4664beeadcf100a43232", + "6d597a48192e436f89b71fd1b18a37fb", + "bd7023fc26be4623ab2c0e98b242ea1a", + "2afeeaed1b344835bc0cd7a3f16add8e", + "ccd1e7c8a24f4a5e90875d85ca2cf6a3", + "9a755c6dcb464a4c91ba4443ea9d30d9", + "0c1d9a4d91eb4f0397bc17c1abeafcd6", + "5bcd65e2cc574d428cf8fa49207b1d0f", + "e600eb9e6fb74c35b3015ac6d56b5b90", + "89f354d3bb7d40a9ba8b106071802006", + "9b1b16de38574725bcbf9527f77b8820", + "c81777f5d3814ced8096613714f3d054", + "3461887798324f60993c526e21a197ff", + "1bcc055f1b474119bb9b734939340767", + "0d4c194f09524aac95e602a14d9a485d", + "1d4f4e45172e432a9a6a9e2545b87e6b" + ] + }, + "executionInfo": { + "elapsed": 1860, + "status": "ok", + "timestamp": 1706105757564, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "8Fmt65bJS1X4", + "outputId": "bf07934e-2d55-43b8-c8e7-b7e1b6cb7d6f" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e625e51535634cb88f89f93c1b95a5b8", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "hyperparams.yaml: 0%| | 0.00/1.51k [00:00\n", + " \n", + " Your browser does not support the audio element.\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ipd.Audio(est_sources[:, 0], rate=fs)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 73 + }, + "executionInfo": { + "elapsed": 63, + "status": "ok", + "timestamp": 1706105769767, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "m-GMA0Q7YA62", + "outputId": "9c698565-da04-43fb-d50f-63603afaea12" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ipd.Audio(est_sources[:, 1], rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "N_0v7Ougbgcm" + }, + "source": [ + "## Fine-tuning or using pretrained models as components of a new pipeline" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RnrPT0GuA03w" + }, + "source": [ + "Here we will show how to fine-tune the CRDNN Encoder Decoder Seq2Seq model used to transcribe the audio in the previous example and downloaded from [here](https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech/blob/main/hyperparams.yaml).\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "executionInfo": { + "elapsed": 6038, + "status": "ok", + "timestamp": 1706105775747, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "ARD9PaEpE4oD" + }, + "outputs": [], + "source": [ + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "asr_model = EncoderDecoderASR.from_hparams(source=\"speechbrain/asr-crdnn-rnnlm-librispeech\", savedir=\"./pretrained_ASR\", hparams_file=\"hyperparams_develop.yaml\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fuuMQdb4ExyP" + }, + "source": [ + "First we can see that the pretrained `asr_model` allows to access easily all its components:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 185, + "status": "ok", + "timestamp": 1706105775749, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "q7c1qywYHkOo", + "outputId": "464933f1-ca5c-435a-fe26-675f1894da29" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "odict_keys(['normalizer', 'encoder', 'decoder', 'lm_model'])" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "asr_model.mods.keys()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F5d9JG94JBLs" + }, + "source": [ + "These keys corresponds to the modules entry specified in the [hyperparameter file](https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech/blob/main/hyperparams.yaml):\n", + "\n", + "\n", + "```yaml\n", + "modules:\n", + " encoder: !ref \n", + " decoder: !ref \n", + " lm_model: !ref \n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KNbCyQHIJYKx" + }, + "source": [ + "We can also see that the encoder is actually composed of several sub-modules:\n", + "\n", + "```yaml\n", + "encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential\n", + " input_shape: [null, null, !ref ]\n", + " compute_features: !ref \n", + " normalize: !ref \n", + " model: !ref \n", + "```\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bR6c1k20J3SN" + }, + "source": [ + "These are simply accessible as members of the encoder:" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 175, + "status": "ok", + "timestamp": 1706105775752, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "S6Vwi7cNI7eq", + "outputId": "dbf05273-283a-4a8f-89fa-fdb3a08ca65c" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "LengthsCapableSequential(\n", + " (compute_features): Fbank(\n", + " (compute_STFT): STFT()\n", + " (compute_fbanks): Filterbank()\n", + " (compute_deltas): Deltas()\n", + " (context_window): ContextWindow()\n", + " )\n", + " (normalize): InputNormalization()\n", + " (model): CRDNN(\n", + " (CNN): Sequential(\n", + " (block_0): CNN_Block(\n", + " (conv_1): Conv2d(\n", + " (conv): Conv2d(1, 128, kernel_size=(3, 3), stride=(1, 1))\n", + " )\n", + " (norm_1): LayerNorm(\n", + " (norm): LayerNorm((40, 128), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " (act_1): LeakyReLU(negative_slope=0.01)\n", + " (conv_2): Conv2d(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))\n", + " )\n", + " (norm_2): LayerNorm(\n", + " (norm): LayerNorm((40, 128), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " (act_2): LeakyReLU(negative_slope=0.01)\n", + " (pooling): Pooling1d(\n", + " (pool_layer): MaxPool2d(kernel_size=(1, 2), stride=(1, 2), padding=(0, 0), dilation=(1, 1), ceil_mode=False)\n", + " )\n", + " (drop): Dropout2d(\n", + " (drop): Dropout2d(p=0.15, inplace=False)\n", + " )\n", + " )\n", + " (block_1): CNN_Block(\n", + " (conv_1): Conv2d(\n", + " (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1))\n", + " )\n", + " (norm_1): LayerNorm(\n", + " (norm): LayerNorm((20, 256), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " (act_1): LeakyReLU(negative_slope=0.01)\n", + " (conv_2): Conv2d(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1))\n", + " )\n", + " (norm_2): LayerNorm(\n", + " (norm): LayerNorm((20, 256), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " (act_2): LeakyReLU(negative_slope=0.01)\n", + " (pooling): Pooling1d(\n", + " (pool_layer): MaxPool2d(kernel_size=(1, 2), stride=(1, 2), padding=(0, 0), dilation=(1, 1), ceil_mode=False)\n", + " )\n", + " (drop): Dropout2d(\n", + " (drop): Dropout2d(p=0.15, inplace=False)\n", + " )\n", + " )\n", + " )\n", + " (time_pooling): Pooling1d(\n", + " (pool_layer): MaxPool2d(kernel_size=(1, 4), stride=(1, 4), padding=(0, 0), dilation=(1, 1), ceil_mode=False)\n", + " )\n", + " (RNN): LSTM(\n", + " (rnn): LSTM(2560, 1024, num_layers=4, batch_first=True, dropout=0.15, bidirectional=True)\n", + " )\n", + " (DNN): Sequential(\n", + " (block_0): DNN_Block(\n", + " (linear): Linear(\n", + " (w): Linear(in_features=2048, out_features=512, bias=True)\n", + " )\n", + " (norm): BatchNorm1d(\n", + " (norm): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (act): LeakyReLU(negative_slope=0.01)\n", + " (dropout): Dropout(p=0.15, inplace=False)\n", + " )\n", + " (block_1): DNN_Block(\n", + " (linear): Linear(\n", + " (w): Linear(in_features=512, out_features=512, bias=True)\n", + " )\n", + " (norm): BatchNorm1d(\n", + " (norm): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (act): LeakyReLU(negative_slope=0.01)\n", + " (dropout): Dropout(p=0.15, inplace=False)\n", + " )\n", + " )\n", + " )\n", + ")" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "asr_model.mods.encoder" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 138, + "status": "ok", + "timestamp": 1706105775753, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "sgv5daThU1UC", + "outputId": "c28da378-9d26-4dad-808f-3d4be1a4b7f3" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "Fbank(\n", + " (compute_STFT): STFT()\n", + " (compute_fbanks): Filterbank()\n", + " (compute_deltas): Deltas()\n", + " (context_window): ContextWindow()\n", + ")" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "asr_model.mods.encoder.compute_features" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hoYbLtZmJ8V6" + }, + "source": [ + "The training hyperparameters also can be easily accessed:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 86, + "status": "ok", + "timestamp": 1706105775753, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "fBy1ryJ6H2LJ", + "outputId": "b2bcd22a-37e3-48dd-98eb-1df97d9f1a41" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "['__class__',\n", + " '__delattr__',\n", + " '__dict__',\n", + " '__dir__',\n", + " '__doc__',\n", + " '__eq__',\n", + " '__format__',\n", + " '__ge__',\n", + " '__getattribute__',\n", + " '__gt__',\n", + " '__hash__',\n", + " '__init__',\n", + " '__init_subclass__',\n", + " '__le__',\n", + " '__lt__',\n", + " '__ne__',\n", + " '__new__',\n", + " '__reduce__',\n", + " '__reduce_ex__',\n", + " '__repr__',\n", + " '__setattr__',\n", + " '__sizeof__',\n", + " '__str__',\n", + " '__subclasshook__',\n", + " 'activation',\n", + " 'asr_model',\n", + " 'beam_size',\n", + " 'blank_index',\n", + " 'bos_index',\n", + " 'cnn_blocks',\n", + " 'cnn_channels',\n", + " 'cnn_kernelsize',\n", + " 'compute_features',\n", + " 'coverage_penalty',\n", + " 'coverage_scorer',\n", + " 'ctc_lin',\n", + " 'dec',\n", + " 'dec_neurons',\n", + " 'decoder',\n", + " 'dnn_blocks',\n", + " 'dnn_neurons',\n", + " 'dropout',\n", + " 'emb',\n", + " 'emb_size',\n", + " 'enc',\n", + " 'encoder',\n", + " 'eos_index',\n", + " 'eos_threshold',\n", + " 'inter_layer_pooling_size',\n", + " 'lm_model',\n", + " 'lm_weight',\n", + " 'log_softmax',\n", + " 'max_attn_shift',\n", + " 'max_decode_ratio',\n", + " 'min_decode_ratio',\n", + " 'modules',\n", + " 'n_fft',\n", + " 'n_mels',\n", + " 'normalizer',\n", + " 'output_neurons',\n", + " 'pretrainer',\n", + " 'rnn_bidirectional',\n", + " 'rnn_class',\n", + " 'rnn_layers',\n", + " 'rnn_neurons',\n", + " 'rnnlm_scorer',\n", + " 'sample_rate',\n", + " 'scorer',\n", + " 'seq_lin',\n", + " 'temperature',\n", + " 'temperature_lm',\n", + " 'time_pooling_size',\n", + " 'tokenizer',\n", + " 'using_max_attn_shift']" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dir(asr_model.hparams)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rUsVgcjgKJKz" + }, + "source": [ + "Such information are pretty useful as we can directly use some of these hyperparameters in our fine-tuning pipeline to ensure compability with the pretrained model (e.g. use the same BOS or EOS indexes)!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BDYhruKqLv0G" + }, + "source": [ + "### Setting up the data pipeline" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Uz8KzuQPEdW-" + }, + "source": [ + "First we must set up the data pipeline for downloaded MiniLibriSpeech data.\n", + "\n", + "If you are not familiar with **SpeechBrain dataIO** you may want to take a look at the [tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "executionInfo": { + "elapsed": 56, + "status": "ok", + "timestamp": 1706105775758, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "MB9zoQGFcjsd" + }, + "outputs": [], + "source": [ + "import speechbrain as sb\n", + "import torch" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "axAQJISREGM7" + }, + "source": [ + "We parse MiniLibriSpeech to a suitable JSON annotation" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "executionInfo": { + "elapsed": 3189, + "status": "ok", + "timestamp": 1706105778894, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "DwcQjRS2cnL5" + }, + "outputs": [], + "source": [ + "from parse_data import parse_to_json # parse_data is a local library downloaded before (see Installing Dependencies step)\n", + "parse_to_json(\"./LibriSpeech/dev-clean-2\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VIR_XyydEOTS" + }, + "source": [ + "We instantiate a **DynamicItemDataset** from the JSON annotation" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "executionInfo": { + "elapsed": 246, + "status": "ok", + "timestamp": 1706105778898, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "kdBfsbdUb3kn" + }, + "outputs": [], + "source": [ + "from speechbrain.dataio.dataset import DynamicItemDataset\n", + "dataset = DynamicItemDataset.from_json(\"data.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NEB015VjMDLR" + }, + "source": [ + "We sort the dataset based on length to speed-up training" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "executionInfo": { + "elapsed": 237, + "status": "ok", + "timestamp": 1706105778899, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "YSHVrP3CgLq2" + }, + "outputs": [], + "source": [ + "dataset = dataset.filtered_sorted(sort_key=\"length\", select_n=100)\n", + "# we limit the dataset to 100 utterances to keep the trainin short in this Colab example" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "npAUJCOEMhXE" + }, + "source": [ + "and add a pipeline for reading audio" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "executionInfo": { + "elapsed": 196, + "status": "ok", + "timestamp": 1706105778899, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "Kauu_ke5eglF" + }, + "outputs": [], + "source": [ + "dataset.add_dynamic_item(sb.dataio.dataio.read_audio, takes=\"file_path\", provides=\"signal\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "M1K1xvEHMnmP" + }, + "source": [ + "and another one to encode the words from annotation.\n", + "\n", + "It is worth noting that we use the Tokenizer object obtained from the pretrained `asr_model` and that we encode the words with `asr_model.tokenizer.encode_as_ids(words)`. We also reuse `asr_model` `eos_index` and `bos_index` accessed via `asr_model.hparams` to ensure that all these parameters correspond to the ones used at pretraining time!" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "executionInfo": { + "elapsed": 176, + "status": "ok", + "timestamp": 1706105778899, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "bf1rDbYBeh-7" + }, + "outputs": [], + "source": [ + "# 3. Define text pipeline:\n", + "@sb.utils.data_pipeline.takes(\"words\")\n", + "@sb.utils.data_pipeline.provides(\n", + " \"words\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\")\n", + "def text_pipeline(words):\n", + " yield words\n", + " tokens_list = asr_model.tokenizer.encode_as_ids(words)\n", + " yield tokens_list\n", + " tokens_bos = torch.LongTensor([asr_model.hparams.bos_index] + (tokens_list))\n", + " yield tokens_bos\n", + " tokens_eos = torch.LongTensor(tokens_list + [asr_model.hparams.eos_index]) # we use same eos and bos indexes as in pretrained model\n", + " yield tokens_eos\n", + " tokens = torch.LongTensor(tokens_list)\n", + " yield tokens" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "executionInfo": { + "elapsed": 169, + "status": "ok", + "timestamp": 1706105778901, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "5ZaW1-WEwhDf" + }, + "outputs": [], + "source": [ + "dataset.add_dynamic_item(text_pipeline)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1Ijrx8r8OhuH" + }, + "source": [ + "We set the dataset object to return the signal tensor as well as the encoded tokens and words." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 163, + "status": "ok", + "timestamp": 1706105778901, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "cAulkwZRfzDo", + "outputId": "cc674614-cd45-452b-be72-2c0fefcced62" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'id': '777-126732-0081',\n", + " 'signal': tensor([-9.1553e-05, -3.6621e-04, -4.8828e-04, ..., 2.1362e-04,\n", + " 2.4414e-04, 3.3569e-04]),\n", + " 'words': 'COMFORTABLE DEAR',\n", + " 'tokens_list': [875, 157, 598],\n", + " 'tokens_bos': tensor([ 0, 875, 157, 598]),\n", + " 'tokens_eos': tensor([875, 157, 598, 0]),\n", + " 'tokens': tensor([875, 157, 598])}" + ] + }, + "execution_count": 33, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dataset.set_output_keys([\"id\", \"signal\", \"words\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\"])\n", + "dataset[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9-LZusHdOr61" + }, + "source": [ + "### Fine-Tuning the ASR model\n", + "\n", + "First, We define our Brain class that will perform the fine-tuning. Here, we just take an example similar to the Brain class of the original [Seq2Seq LibriSpeech recipe](https://github.com/speechbrain/speechbrain/blob/develop/recipes/LibriSpeech/ASR/seq2seq/train.py).\n" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": { + "executionInfo": { + "elapsed": 153, + "status": "ok", + "timestamp": 1706105778903, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "EfpGJ3FIgFRu" + }, + "outputs": [], + "source": [ + "from speechbrain.lobes.features import Fbank\n", + "import torch\n", + "\n", + "# Define fine-tuning procedure\n", + "class EncDecFineTune(sb.Brain):\n", + "\n", + " def on_stage_start(self, stage, epoch):\n", + " # enable grad for all modules we want to fine-tune\n", + " if stage == sb.Stage.TRAIN:\n", + " for module in [self.modules.enc, self.modules.emb, self.modules.dec, self.modules.seq_lin]:\n", + " for p in module.parameters():\n", + " p.requires_grad = True\n", + "\n", + " def compute_forward(self, batch, stage):\n", + " \"\"\"Forward computations from the waveform batches to the output probabilities.\"\"\"\n", + " batch = batch.to(self.device)\n", + " wavs, wav_lens = batch.signal\n", + " tokens_bos, _ = batch.tokens_bos\n", + " wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)\n", + "\n", + " # Forward pass\n", + " feats = self.modules.compute_features(wavs)\n", + " feats = self.modules.normalize(feats, wav_lens)\n", + " #feats.requires_grad = True\n", + " x = self.modules.enc(feats)\n", + "\n", + " e_in = self.modules.emb(tokens_bos) # y_in bos + tokens\n", + " h, _ = self.modules.dec(e_in, x, wav_lens)\n", + "\n", + " # Output layer for seq2seq log-probabilities\n", + " logits = self.modules.seq_lin(h)\n", + " p_seq = self.hparams.log_softmax(logits)\n", + "\n", + " return p_seq, wav_lens\n", + "\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " \"\"\"Computes the loss (CTC+NLL) given predictions and targets.\"\"\"\n", + "\n", + "\n", + " p_seq, wav_lens = predictions\n", + "\n", + " ids = batch.id\n", + " tokens_eos, tokens_eos_lens = batch.tokens_eos\n", + " tokens, tokens_lens = batch.tokens\n", + "\n", + " loss = self.hparams.seq_cost(\n", + " p_seq, tokens_eos, tokens_eos_lens)\n", + "\n", + "\n", + " return loss\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZJrCTNIBPVS7" + }, + "source": [ + "Here we define the modules and hyperparameters needed for the Brain class defined before.\n", + "\n", + "We fetch them directly from the pretrained model by accessing its `modules` and `hparams`. These can be found in the `hyperparams.yaml` file in the model [HuggingFace repo](https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech/blob/main/hyperparams.yaml)." + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": { + "executionInfo": { + "elapsed": 139, + "status": "ok", + "timestamp": 1706105778903, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "R3B4Dv1Wjfv6" + }, + "outputs": [], + "source": [ + "modules = {\"enc\": asr_model.mods.encoder.model,\n", + " \"emb\": asr_model.hparams.emb,\n", + " \"dec\": asr_model.hparams.dec,\n", + " \"compute_features\": asr_model.mods.encoder.compute_features, # we use the same features\n", + " \"normalize\": asr_model.mods.encoder.normalize,\n", + " \"seq_lin\": asr_model.hparams.seq_lin,\n", + "\n", + " }\n", + "\n", + "hparams = {\"seq_cost\": lambda x, y, z: speechbrain.nnet.losses.nll_loss(x, y, z, label_smoothing = 0.1),\n", + " \"log_softmax\": speechbrain.nnet.activations.Softmax(apply_log=True)}\n", + "\n", + "brain = EncDecFineTune(modules, hparams=hparams, opt_class=lambda x: torch.optim.SGD(x, 1e-5))\n", + "brain.tokenizer = asr_model.tokenizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kqfkX393Rkbb" + }, + "source": [ + "The pre-trained model can be finally fine-tuned:" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 680130, + "status": "ok", + "timestamp": 1706106458896, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "aHWteDB0jXFp", + "outputId": "941550cf-f8a8-4be4-fce5-e26f0118a063" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 12/12 [05:44<00:00, 28.69s/it, train_loss=1.31]\n", + "100%|██████████| 12/12 [05:35<00:00, 27.99s/it, train_loss=1.28]\n" + ] + } + ], + "source": [ + "brain.fit(range(2), train_set=dataset,\n", + " train_loader_kwargs={\"batch_size\": 8, \"drop_last\":True, \"shuffle\": False})" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "l7qb4lrtG8G-" + }, + "source": [ + "## Pretrainer Class\n", + "In speechbrain, another way to perform pre-training is to use the PreTrainer Class (`speechbrain.utils.parameter_transfer.Pretrainer`). It orchestrates parameter transfer in a more structured way, which can aid in writing easy-to-share recipes (and it is also central in the implementation `speechbrain.pretrained` models). To use it, let's first initialize a model:" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": { + "executionInfo": { + "elapsed": 61, + "status": "ok", + "timestamp": 1706106458898, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "DJ9Wvc41K3p8" + }, + "outputs": [], + "source": [ + "from speechbrain.lobes.models.ECAPA_TDNN import ECAPA_TDNN\n", + "\n", + "model = ECAPA_TDNN(input_size= 80,\n", + " channels= [1024, 1024, 1024, 1024, 3072],\n", + " kernel_sizes= [5, 3, 3, 3, 1],\n", + " dilations= [1, 2, 3, 4, 1],\n", + " attention_channels= 128,\n", + " lin_neurons = 192)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TFPMuK0eLRWK" + }, + "source": [ + "At this level, the model is initialized with random parameters. However, we can use our pretrainer to replace random parameters with the ones stored in the saved checkpoint:" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": { + "executionInfo": { + "elapsed": 989, + "status": "ok", + "timestamp": 1706106459847, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "PsBfJrc5MhHV" + }, + "outputs": [], + "source": [ + "from speechbrain.utils.parameter_transfer import Pretrainer\n", + "\n", + "# Initialization of the pre-trainer\n", + "pretrain = Pretrainer(loadables={'model': model}, paths={'model': 'speechbrain/spkrec-ecapa-voxceleb/embedding_model.ckpt'})\n", + "\n", + "# We download the pretrained model from HuggingFace in this case\n", + "pretrain.collect_files()\n", + "pretrain.load_collected()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IBxnWXe4OYSJ" + }, + "source": [ + "Now, the model is not anymore randomly initialized, but it contains the pre-trained parameters of `embedding_model.ckpt`. The path of the pre-trained model can be a **local path**, a **web url**, or a **huggingface repository**:" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": { + "executionInfo": { + "elapsed": 2641, + "status": "ok", + "timestamp": 1706106462469, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "4AQW7yD3PRLD" + }, + "outputs": [], + "source": [ + "# Local Path\n", + "pretrain = Pretrainer(collect_in='model_local', loadables={'model': model}, paths={'model': 'model_checkpoints/model.ckpt'})\n", + "pretrain.collect_files()\n", + "pretrain.load_collected()\n", + "\n", + "# Or web\n", + "pretrain = Pretrainer(collect_in='model_web', loadables={'model': model}, paths={'model': 'https://www.dropbox.com/s/2mdnl784ram5w8o/embedding_model.ckpt?dl=1'})\n", + "pretrain.collect_files()\n", + "pretrain.load_collected()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BBlIHzVjRHHA" + }, + "source": [ + "As you can see, you can use the variable `collect_in` to set where the pre-trained model is stored." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OyjEzG-6zIFT" + }, + "source": [ + "## Acknowledgements\n", + "\n", + "\n", + "* Many thanks to ([ziz19](https://github.com/ziz19)) who helped improving this Tutorial.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [ + { + "file_id": "1LN7R3U3xneDgDRK2gC5MzGkLysCWxuC3", + "timestamp": 1706026019611 + }, + { + "file_id": "1228U7DsXZ3A_B3tkZoQhixC0WP6O0DZH", + "timestamp": 1615161894564 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "000de1ccc95c4cd8850d3a3424f4714a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "00b563eeca124124bc81f8f19a5f0e48": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_86b24bfbc04d4bda8cce53a140c17e62", + "IPY_MODEL_4ef78f63daba4f20a768e58c023a8eee", + "IPY_MODEL_daaf30a442cb4fb48db9a082a40c6376" + ], + "layout": "IPY_MODEL_6b302652bec7427680692afe5c3630f9" + } + }, + "01a0f8b6ae4e4fbcaf1e96dd3adce92a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0420cacd6a7349e8b7ccaef1ddc985b2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0631b7878fd14f38b0f8270c1fafe391": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_f3bc35d9420f44239f719e2f4b37d379", + "IPY_MODEL_df2df25b22aa434ebaa769dce4162ca8", + "IPY_MODEL_7def4f14f4414bd3a0b25bb628863123" + ], + "layout": "IPY_MODEL_dcfa2ee4056a47a38a4bb14deafe7366" + } + }, + "0657d483243b46748b6dd499a188b588": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0adda9c3d3a14ceaa3d247b59c962554": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_402e80d5952d4da991f798672ff53d46", + "max": 1409, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_2232c1b90a0a40d7b470a492dd470197", + "value": 1409 + } + }, + "0c1d9a4d91eb4f0397bc17c1abeafcd6": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_9b1b16de38574725bcbf9527f77b8820", + "placeholder": "​", + "style": "IPY_MODEL_c81777f5d3814ced8096613714f3d054", + "value": "decoder.ckpt: 100%" + } + }, + "0d3e8b469920445f88475b4496a5d66a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_000de1ccc95c4cd8850d3a3424f4714a", + "max": 1920, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_5d221ae2b9ac4e36a7cfe2cce3c536dc", + "value": 1920 + } + }, + "0d4c194f09524aac95e602a14d9a485d": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0d555777e2d2405698247f6fb163454d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_eb2aa07593d44cbcae05ace5ace724a8", + "placeholder": "​", + "style": "IPY_MODEL_a2d5bacd3dfc4279b7434002341d000b", + "value": "mean_var_norm_emb.ckpt: 100%" + } + }, + "11890cbf63b24f13b0f3e75b7fc68d01": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "16c602c5ca4b42d5a22e97249dbc8316": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a9ede27dcd6c4c17aad2e03976788ac8", + "placeholder": "​", + "style": "IPY_MODEL_1791bf4e7a7f4f7aa03bb9c420f2c3af", + "value": " 113M/113M [00:00<00:00, 168MB/s]" + } + }, + "16f0ca4e8c1944a1a1e90b5484f9a270": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "1791bf4e7a7f4f7aa03bb9c420f2c3af": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "1861687899e8426cb9bc7c74d0a51aea": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "1bcc055f1b474119bb9b734939340767": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "1d4f4e45172e432a9a6a9e2545b87e6b": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "1ea356cbbabf483b94fac984fa0d3690": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_303541f7df7044b1b43c6bd38d8058f1", + "max": 253217, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_1ff81f3732074056a9f1834501219e55", + "value": 253217 + } + }, + "1ff81f3732074056a9f1834501219e55": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "2232c1b90a0a40d7b470a492dd470197": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "241bb75e8c0141ec8c00ef562d0feb5f": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "24ee8a32861b4a3daa79a35583175dc7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "2961959a379f4934b2f47f3ec99736cd": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2afeeaed1b344835bc0cd7a3f16add8e": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2e8935dded774cb9bcd87167992c5558": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "2fda19c612c5486dbf75a5de2d6ebcda": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "303541f7df7044b1b43c6bd38d8058f1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "304a1d7486b94d8c83371c54cfda2ca4": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "3301f47181664600aac6a0483dfa69a2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_01a0f8b6ae4e4fbcaf1e96dd3adce92a", + "placeholder": "​", + "style": "IPY_MODEL_9707ff2d35094cfd97c39d166579cdba", + "value": " 253k/253k [00:00<00:00, 4.97MB/s]" + } + }, + "3461887798324f60993c526e21a197ff": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "38550314ccb744c78b779de82a9f6d8e": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "3940e8873ae3498393094ea7c361a7fc": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "3bd1dfae506d4299af956d497d96d9ad": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "3fec66291aed46e9aaec1e60748760d6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "402e80d5952d4da991f798672ff53d46": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "4873f222358a46c7b50d741a1932c5a6": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_b5d8d5c9c94b4329a7cf143448476408", + "IPY_MODEL_0adda9c3d3a14ceaa3d247b59c962554", + "IPY_MODEL_a1e78669452a47899a85a7fae3a15661" + ], + "layout": "IPY_MODEL_8073c957b55e4b1c870b8e7c50d3765a" + } + }, + "4b7f1dcce7c14f758c8b33e196214a88": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5c5bc290e7664009a63b8bc45d676aa7", + "max": 479555971, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_adba26074a384a34ac2866813537c0d3", + "value": 479555971 + } + }, + "4d695e2724324b5fa4af39345b38226c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f81b74f0764a440392633978639dc9a8", + "placeholder": "​", + "style": "IPY_MODEL_0420cacd6a7349e8b7ccaef1ddc985b2", + "value": " 480M/480M [00:03<00:00, 137MB/s]" + } + }, + "4ef78f63daba4f20a768e58c023a8eee": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6d597a48192e436f89b71fd1b18a37fb", + "max": 17267, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_bd7023fc26be4623ab2c0e98b242ea1a", + "value": 17267 + } + }, + "518849236e44483f803078be7ad8dc93": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_f2660154764f4be4ba404d1e6e994271", + "IPY_MODEL_0d3e8b469920445f88475b4496a5d66a", + "IPY_MODEL_576907c14b614dbc8b7d0253909f9e99" + ], + "layout": "IPY_MODEL_ece9d78d617444f2a31fc912237d25cb" + } + }, + "5217db8767a042f098a971ee4c02c731": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "5338ff1061d14c89ad7c8bd7317a4642": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_a1b9d608708d4e9d962dbfa65f2b8951", + "IPY_MODEL_6eaaa4ebfe4440ebb750c853a7545e17", + "IPY_MODEL_84864afa1f0d447ca0d5a38f972ab1f3" + ], + "layout": "IPY_MODEL_6a38bd3599754589b4f9c17f39adeb92" + } + }, + "576907c14b614dbc8b7d0253909f9e99": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5ba74f54df8f4b11a58aee4c66b554f7", + "placeholder": "​", + "style": "IPY_MODEL_684b9471808648b7bda92f1dd4afb25f", + "value": " 1.92k/1.92k [00:00<00:00, 106kB/s]" + } + }, + "596d1f66a8cd480ab32af9f3f3025edc": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "5a4bb7e698db43ab98cf5e3cad123eff": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_16f0ca4e8c1944a1a1e90b5484f9a270", + "placeholder": "​", + "style": "IPY_MODEL_8c5f52ed4b7042df8bb636e73c702d64", + "value": " 1.51k/1.51k [00:00<00:00, 57.7kB/s]" + } + }, + "5ba74f54df8f4b11a58aee4c66b554f7": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5bb62e9fe8f14b898666af7527389908": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5bcd65e2cc574d428cf8fa49207b1d0f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_3461887798324f60993c526e21a197ff", + "max": 17202, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_1bcc055f1b474119bb9b734939340767", + "value": 17202 + } + }, + "5c5bc290e7664009a63b8bc45d676aa7": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5d221ae2b9ac4e36a7cfe2cce3c536dc": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "5dcb9b7c948c42a6afb63bd89664744f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "5e58c000011c48879130e6374bbab799": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76fffbd6bc9542359181b3333077b888", + "placeholder": "​", + "style": "IPY_MODEL_1861687899e8426cb9bc7c74d0a51aea", + "value": " 4.83k/4.83k [00:00<00:00, 325kB/s]" + } + }, + "611c74c530d44002b43a09e86fdd133a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "634871e7ef574c48a22fe20bd29c98fb": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_a2c4a9390bd6495aae3a79a7174f5832", + "IPY_MODEL_de326e62dcee451ebafdef0a888f9974", + "IPY_MODEL_fe6ee540077e4fd086ae8975feb97deb" + ], + "layout": "IPY_MODEL_890276931b55409cacaccbea256fe745" + } + }, + "684b9471808648b7bda92f1dd4afb25f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "6a38bd3599754589b4f9c17f39adeb92": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6b302652bec7427680692afe5c3630f9": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6b3edbd6a47b4b3982ab30870d853199": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6d597a48192e436f89b71fd1b18a37fb": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6e88d9cddfae4c0585a030628f415163": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7a59681cc6884eeb90cad0ad810eb340", + "max": 1515, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_5dcb9b7c948c42a6afb63bd89664744f", + "value": 1515 + } + }, + "6eaaa4ebfe4440ebb750c853a7545e17": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f0673eaf978145e780399c5d003abb33", + "max": 5534328, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_a7436702e317468ba76a9c7ab1d7b7bd", + "value": 5534328 + } + }, + "701f1496c0d145b8b376174ed9501cab": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "73417ed270d7450984a48f3c7260eef7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_85a4d1969185444aa63499a3e59faa2c", + "IPY_MODEL_ee1bf41d42a54f9a92d76f5394626e97", + "IPY_MODEL_5e58c000011c48879130e6374bbab799" + ], + "layout": "IPY_MODEL_7ea8ba3f9016452bb6ec20d3502675df" + } + }, + "747b0366a0284c4188d441e934021fa8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "75d1e7af67264ffca59cc387db02d6d7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_0d555777e2d2405698247f6fb163454d", + "IPY_MODEL_a89727caed4642e7af12415c8ca66641", + "IPY_MODEL_8718c8b37bea4e53ac6457276ce63045" + ], + "layout": "IPY_MODEL_dcfbc459a1d6415bb9cb71cf6d65a51f" + } + }, + "76a0e79fab2644908c86b7784d67504e": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "76fffbd6bc9542359181b3333077b888": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "78cfac8bcaea470ebfba4104154cec3d": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7a0a2f4b5b1c41c2bd27f1406568ff34": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "7a59681cc6884eeb90cad0ad810eb340": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7b0782edb2394087910b772c90b03be9": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7cfb724f62f14f889a3d3a69a9e7acd8": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7def4f14f4414bd3a0b25bb628863123": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_886dbc6801664287b38590260ba33286", + "placeholder": "​", + "style": "IPY_MODEL_5217db8767a042f098a971ee4c02c731", + "value": " 129k/129k [00:00<00:00, 3.24MB/s]" + } + }, + "7ea8ba3f9016452bb6ec20d3502675df": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8073c957b55e4b1c870b8e7c50d3765a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "84864afa1f0d447ca0d5a38f972ab1f3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_38550314ccb744c78b779de82a9f6d8e", + "placeholder": "​", + "style": "IPY_MODEL_596d1f66a8cd480ab32af9f3f3025edc", + "value": " 5.53M/5.53M [00:00<00:00, 143MB/s]" + } + }, + "84bada4f0df245469620edab3ad40c38": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "85a4d1969185444aa63499a3e59faa2c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76a0e79fab2644908c86b7784d67504e", + "placeholder": "​", + "style": "IPY_MODEL_ab133c95b3da4bdf86777ade47d60698", + "value": "hyperparams_develop.yaml: 100%" + } + }, + "86b24bfbc04d4bda8cce53a140c17e62": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_78cfac8bcaea470ebfba4104154cec3d", + "placeholder": "​", + "style": "IPY_MODEL_d9b66e1b93fb4664beeadcf100a43232", + "value": "encoder.ckpt: 100%" + } + }, + "8718c8b37bea4e53ac6457276ce63045": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_611c74c530d44002b43a09e86fdd133a", + "placeholder": "​", + "style": "IPY_MODEL_c22924e5299d4ea79ad2dc6257ce8d6b", + "value": " 1.92k/1.92k [00:00<00:00, 131kB/s]" + } + }, + "87d1d3bebd594b798db2cd726046fbb1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "886dbc6801664287b38590260ba33286": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "890276931b55409cacaccbea256fe745": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "891f3d15f25a4cc2a64f4e7013d48e54": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "89f354d3bb7d40a9ba8b106071802006": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8c5f52ed4b7042df8bb636e73c702d64": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "8c9d3cdb6d9248129db49203fb4df0ec": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "8d0a5753b71243c59f4741043f98b857": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "8e66c81421af4032a4048dda5f7db960": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "934446687ef449149f71c9c64923ed4d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "94ac439a7b2344acb89dd968f4832d24": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_2961959a379f4934b2f47f3ec99736cd", + "max": 113108458, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_d439ae84c3cd41f0b89eaa7c32ba310e", + "value": 113108458 + } + }, + "96b01ae3bf274b3e84cda3f94dd564fb": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9707ff2d35094cfd97c39d166579cdba": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9713800931064fd3809f1fd0a2bc2d03": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "975f5572379d4dd2b4776d5469ca1bb7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_87d1d3bebd594b798db2cd726046fbb1", + "placeholder": "​", + "style": "IPY_MODEL_9c9985770a644c60a40d8a5479a19c48", + "value": "embedding_model.ckpt: 100%" + } + }, + "98ea97d41a6c4565a238d184ac9bd09a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "9a755c6dcb464a4c91ba4443ea9d30d9": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_0c1d9a4d91eb4f0397bc17c1abeafcd6", + "IPY_MODEL_5bcd65e2cc574d428cf8fa49207b1d0f", + "IPY_MODEL_e600eb9e6fb74c35b3015ac6d56b5b90" + ], + "layout": "IPY_MODEL_89f354d3bb7d40a9ba8b106071802006" + } + }, + "9b1b16de38574725bcbf9527f77b8820": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9c9985770a644c60a40d8a5479a19c48": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9e69e88263a24037aa45fb9f837c2c40": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_cb5045ef6ee544cf9e7f4d7cf772c14f", + "IPY_MODEL_1ea356cbbabf483b94fac984fa0d3690", + "IPY_MODEL_3301f47181664600aac6a0483dfa69a2" + ], + "layout": "IPY_MODEL_a3f6fdd0090c4c35b1f44d01be6d8e4f" + } + }, + "a0317c2f06cf410783a893b8187e00c3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5bb62e9fe8f14b898666af7527389908", + "placeholder": "​", + "style": "IPY_MODEL_701f1496c0d145b8b376174ed9501cab", + "value": " 83.3M/83.3M [00:00<00:00, 170MB/s]" + } + }, + "a1b9d608708d4e9d962dbfa65f2b8951": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ae6a556144e54692b1be363027c1e867", + "placeholder": "​", + "style": "IPY_MODEL_8e66c81421af4032a4048dda5f7db960", + "value": "classifier.ckpt: 100%" + } + }, + "a1e78669452a47899a85a7fae3a15661": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_3940e8873ae3498393094ea7c361a7fc", + "placeholder": "​", + "style": "IPY_MODEL_e2cf26fb7f29467daef310993935888f", + "value": " 1.41k/1.41k [00:00<00:00, 22.6kB/s]" + } + }, + "a2c4a9390bd6495aae3a79a7174f5832": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7b0782edb2394087910b772c90b03be9", + "placeholder": "​", + "style": "IPY_MODEL_0657d483243b46748b6dd499a188b588", + "value": "lm.ckpt: 100%" + } + }, + "a2d5bacd3dfc4279b7434002341d000b": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "a3f6fdd0090c4c35b1f44d01be6d8e4f": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a6d2f27d5d4c493083ead216c8137c89": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "a7436702e317468ba76a9c7ab1d7b7bd": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "a89727caed4642e7af12415c8ca66641": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e94b2fcd8c8d495ea7ebd6beeab5dad0", + "max": 1921, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_c62d9509f5d9449590c067db7631d10a", + "value": 1921 + } + }, + "a9ede27dcd6c4c17aad2e03976788ac8": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ab133c95b3da4bdf86777ade47d60698": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "adba26074a384a34ac2866813537c0d3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "ae6a556144e54692b1be363027c1e867": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b28ec6dd2556486693cb7657158b4f35": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b53ce8b214574bde84833798ab1e0a0e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_975f5572379d4dd2b4776d5469ca1bb7", + "IPY_MODEL_edb3172c98ff4ad8a27f35d1a7835906", + "IPY_MODEL_a0317c2f06cf410783a893b8187e00c3" + ], + "layout": "IPY_MODEL_2fda19c612c5486dbf75a5de2d6ebcda" + } + }, + "b5d8d5c9c94b4329a7cf143448476408": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_84bada4f0df245469620edab3ad40c38", + "placeholder": "​", + "style": "IPY_MODEL_3bd1dfae506d4299af956d497d96d9ad", + "value": "normalizer.ckpt: 100%" + } + }, + "b7ff251217dc4a31abd29db0d8ddb8ee": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c65d659414184d6bbf896b581177e1a4", + "placeholder": "​", + "style": "IPY_MODEL_f92e99fc7682436a85d11ec2f115e206", + "value": "hyperparams.yaml: 100%" + } + }, + "baa6984bd26244ce849d203aa0d2dc30": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "bac9acbddead48e1b90aded2f686c5d7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_b28ec6dd2556486693cb7657158b4f35", + "placeholder": "​", + "style": "IPY_MODEL_a6d2f27d5d4c493083ead216c8137c89", + "value": "asr.ckpt: 100%" + } + }, + "bd7023fc26be4623ab2c0e98b242ea1a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "c22924e5299d4ea79ad2dc6257ce8d6b": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c62d9509f5d9449590c067db7631d10a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "c65d659414184d6bbf896b581177e1a4": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c81777f5d3814ced8096613714f3d054": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "cb5045ef6ee544cf9e7f4d7cf772c14f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_3fec66291aed46e9aaec1e60748760d6", + "placeholder": "​", + "style": "IPY_MODEL_934446687ef449149f71c9c64923ed4d", + "value": "tokenizer.ckpt: 100%" + } + }, + "ccd1e7c8a24f4a5e90875d85ca2cf6a3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d3278bfc8503432b8d16c9ad47963ee5": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_bac9acbddead48e1b90aded2f686c5d7", + "IPY_MODEL_4b7f1dcce7c14f758c8b33e196214a88", + "IPY_MODEL_4d695e2724324b5fa4af39345b38226c" + ], + "layout": "IPY_MODEL_baa6984bd26244ce849d203aa0d2dc30" + } + }, + "d37c1933e8a24a11ae9a2b3180e53309": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d439ae84c3cd41f0b89eaa7c32ba310e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "d7765befe0af40e9bb810f5896f4b52a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d9b66e1b93fb4664beeadcf100a43232": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "daaf30a442cb4fb48db9a082a40c6376": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_2afeeaed1b344835bc0cd7a3f16add8e", + "placeholder": "​", + "style": "IPY_MODEL_ccd1e7c8a24f4a5e90875d85ca2cf6a3", + "value": " 17.3k/17.3k [00:00<00:00, 883kB/s]" + } + }, + "dcfa2ee4056a47a38a4bb14deafe7366": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "dcfbc459a1d6415bb9cb71cf6d65a51f": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "de326e62dcee451ebafdef0a888f9974": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d7765befe0af40e9bb810f5896f4b52a", + "max": 212420087, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_8d0a5753b71243c59f4741043f98b857", + "value": 212420087 + } + }, + "df2df25b22aa434ebaa769dce4162ca8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_96b01ae3bf274b3e84cda3f94dd564fb", + "max": 128619, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_747b0366a0284c4188d441e934021fa8", + "value": 128619 + } + }, + "e2cf26fb7f29467daef310993935888f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "e600eb9e6fb74c35b3015ac6d56b5b90": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_0d4c194f09524aac95e602a14d9a485d", + "placeholder": "​", + "style": "IPY_MODEL_1d4f4e45172e432a9a6a9e2545b87e6b", + "value": " 17.2k/17.2k [00:00<00:00, 1.10MB/s]" + } + }, + "e625e51535634cb88f89f93c1b95a5b8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_b7ff251217dc4a31abd29db0d8ddb8ee", + "IPY_MODEL_6e88d9cddfae4c0585a030628f415163", + "IPY_MODEL_5a4bb7e698db43ab98cf5e3cad123eff" + ], + "layout": "IPY_MODEL_d37c1933e8a24a11ae9a2b3180e53309" + } + }, + "e94b2fcd8c8d495ea7ebd6beeab5dad0": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ea33a83bcbbc4ba5a983712f1d197e17": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "eb2aa07593d44cbcae05ace5ace724a8": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ece9d78d617444f2a31fc912237d25cb": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "edb3172c98ff4ad8a27f35d1a7835906": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_241bb75e8c0141ec8c00ef562d0feb5f", + "max": 83316686, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_98ea97d41a6c4565a238d184ac9bd09a", + "value": 83316686 + } + }, + "ee1bf41d42a54f9a92d76f5394626e97": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7cfb724f62f14f889a3d3a69a9e7acd8", + "max": 4832, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_2e8935dded774cb9bcd87167992c5558", + "value": 4832 + } + }, + "f0673eaf978145e780399c5d003abb33": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f2660154764f4be4ba404d1e6e994271": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_891f3d15f25a4cc2a64f4e7013d48e54", + "placeholder": "​", + "style": "IPY_MODEL_9713800931064fd3809f1fd0a2bc2d03", + "value": "hyperparams.yaml: 100%" + } + }, + "f3bc35d9420f44239f719e2f4b37d379": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_11890cbf63b24f13b0f3e75b7fc68d01", + "placeholder": "​", + "style": "IPY_MODEL_24ee8a32861b4a3daa79a35583175dc7", + "value": "label_encoder.txt: 100%" + } + }, + "f52c23a438e2415cb4632ae276299cc4": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6b3edbd6a47b4b3982ab30870d853199", + "placeholder": "​", + "style": "IPY_MODEL_8c9d3cdb6d9248129db49203fb4df0ec", + "value": "masknet.ckpt: 100%" + } + }, + "f6739e26b4de4cb8b3d9e3f62876325f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_f52c23a438e2415cb4632ae276299cc4", + "IPY_MODEL_94ac439a7b2344acb89dd968f4832d24", + "IPY_MODEL_16c602c5ca4b42d5a22e97249dbc8316" + ], + "layout": "IPY_MODEL_ea33a83bcbbc4ba5a983712f1d197e17" + } + }, + "f81b74f0764a440392633978639dc9a8": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f92e99fc7682436a85d11ec2f115e206": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "fe6ee540077e4fd086ae8975feb97deb": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_304a1d7486b94d8c83371c54cfda2ca4", + "placeholder": "​", + "style": "IPY_MODEL_7a0a2f4b5b1c41c2bd27f1406568ff34", + "value": " 212M/212M [00:03<00:00, 101MB/s]" + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/advanced/profiling-and-benchmark.ipynb b/docs/tutorials/advanced/profiling-and-benchmark.ipynb new file mode 100644 index 0000000000..3fa547719a --- /dev/null +++ b/docs/tutorials/advanced/profiling-and-benchmark.ipynb @@ -0,0 +1,125 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/profiling-and-benchmark.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/profiling-and-benchmark.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xrB8PyR6nO31" + }, + "source": [ + "# Performance Profiling\n", + "\n", + "SpeechBrain provides a simple way of profiling any training recipe. The output will be a standard tensorboard logdir containing all the typical metrics, traces, charts etc. If you want more information, please refer to [the PyTorch documentation](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html). Indeed, SpeechBrain simply uses the standard Torch profiler.\n", + "\n", + "## Installation\n", + "Please ensure you have installed the TensorBoard profiler:\n", + "\n", + "`pip install torch_tb_profiler`\n", + "\n", + "## Calling the profiler\n", + "\n", + "Let's start a training with the profiler enabled. Of course, we do not want to run the full training, just a few steps. This can be achieved with:\n", + "\n", + "```bash\n", + "python train.py hparams/config.yaml --profile_training --profile_warmup 10 --profile_steps 5\n", + "```\n", + "\n", + "**--profile_warmup** and **--profile_steps** indicate for how long we should wait, i.e. warmup, before starting to record and the number of steps to record respectively. Waiting is useful as some PyTorch and CuDNN optimizations are usually happening on the first training steps.\n", + "\n", + "## Visualizing the logs with tensorboard\n", + "\n", + "The previous step will have generated profiler logs in the **{output_folder}/profiler_logs** location. The output folder is the one from your YAML. You can start tensorboard and exploring the trace and charts by executing:\n", + "\n", + "```bash\n", + "tensorboard --log_dir {output_folder}/profiler_logs\n", + "```\n", + "\n", + "Of course, you'll need to have tensorboard installed.\n", + "\n", + "```bash\n", + "pip install tensorboard\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1Jvc7Uf1bGjAPXiO0N1zFJKGfCyC89v-N", + "timestamp": 1654072519281 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/advanced/text-tokenizer.ipynb b/docs/tutorials/advanced/text-tokenizer.ipynb new file mode 100644 index 0000000000..fd1585bea3 --- /dev/null +++ b/docs/tutorials/advanced/text-tokenizer.ipynb @@ -0,0 +1,584 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/advanced/text-tokenizer.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/advanced/text-tokenizer.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Wa5O28sydb_U" + }, + "source": [ + "# Text Tokenization\n", + "\n", + "## Why do we need tokenization?\n", + "\n", + "Almost all languages have a huge number of possible words. Machine learning tasks that process text have thus to support large vocabularies that might contain several thousands of words. Dealing with such a large vocabulary, however, is critical. The input and output embeddings (e.g. one-hot-vectors) are normally huge vectors, leading to and increase memory consumption and memory usage. More importantly, learning with such extremely sparse and high-dimensional embeddings might be sub-optimal.\n", + "\n", + "A naive alternative can be to simply use characters instead of words.\n", + "The latter approach alleviates some of the aforementioned issues, but\n", + "it requires processing a longer sequence (that is critical as well from a machine learning point of view).\n", + "\n", + "Can we find a middle ground between words and characters? Yes, this is what the tokenizer is trying to do.\n", + "\n", + "One popular technique called **rule-based tokenization** (e.g. [spaCy](https://spacy.io)) allows splitting the text into smaller chunks based on grammar rules, spaces, and punctuation. Unfortunately, this approach is language-dependent and must be set for each language considered ...\n", + "\n", + "Another solution to get the best of both word-level and character-level tokenizations is a hybrid solution named **subword tokenization** relying on the principle that frequently-used words should not be split into smaller subwords, but rare words should be decomposed into meaningful (i.e. more frequent) subwords.\n", + "\n", + "\n", + "SpeechBrain currently relies on a custom integration of the [*SentencePiece tokenizer*](https://github.com/google/sentencepiece) which treats the input as a raw input stream. The following tokenizer algorithms are supported:\n", + "1. [BPE](https://web.archive.org/web/20230319172720/https://www.derczynski.com/papers/archive/BPE_Gage.pdf).\n", + "2. [Unigram](https://arxiv.org/pdf/1804.10959.pdf) (Subword Regularization).\n", + "\n", + "\n", + "The *SentencePiece tokenizer* is available at `speechbrain.tokenizer.SentencePiece`. In the following, we will describe all the aforementioned techniques, but first of all, let's install SpeechBrain.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "executionInfo": { + "elapsed": 36501, + "status": "ok", + "timestamp": 1708531382261, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "JSRmMsPvdkfu" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "\n", + "# Clone SpeechBrain repository\n", + "!git clone https://github.com/speechbrain/speechbrain/\n", + "%cd /content/speechbrain/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0v2mq9wwfBeV" + }, + "source": [ + "Let's also download a csv file to train our tokenizer.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "executionInfo": { + "elapsed": 4253, + "status": "ok", + "timestamp": 1708531386505, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "e1vUAGGkfPl9" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.dropbox.com/s/atg0zycfbacmwqi/dev-clean.csv" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hz__Nn1Z1fxO" + }, + "source": [ + "## Train sentencepiece tokenizer within SpeechBrain\n", + "SentencePiece is a class that can be instantiated with few parameters\n", + "\n", + "\n", + "* **model_dir**: it is the directory where the trained tokenizer model is saved. The model will be saved as *`model_dir/model_type_vocab_size.model`*\n", + "* **vocab_sizes**: It is the vocabulary size for the chosen tokenizer type (BPE, Unigram). The vocab_size is optional for character tokenization and mandatory for BPE & unigram tokenization.\n", + "* **csv_train**: It is the path of the csv file which is used to learn the tokenizer.\n", + "* **csv_read**: It is the data entry (csv header) which contains the word sequence in the csv file.\n", + "* **model_type**: It can be: word, char, bpe, or unigram tokenization.\n", + "\n", + "Let's now apply it to our dev-clean.csv." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "U-1IDruE0UO_" + }, + "outputs": [], + "source": [ + "import torch\n", + "from speechbrain.tokenizers.SentencePiece import SentencePiece" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UH5mHGkn110v" + }, + "outputs": [], + "source": [ + "spm = SentencePiece(model_dir=\"tokenizer_data\",\n", + " vocab_size=2000,\n", + " annotation_train=\"dev-clean.csv\",\n", + " annotation_read=\"wrd\",\n", + " model_type=\"bpe\",\n", + " annotation_list_to_check=[\"dev-clean.csv\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SK0NPosDVtbk" + }, + "outputs": [], + "source": [ + "%less tokenizer_data/2000_bpe.vocab" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jE1ubRuGeWKq" + }, + "source": [ + "As you can see, SetencePiece lib is an unsupervised text tokenizer and detokenizer. Some of the tokens have `_` symbols representing spaces. The sentence piece detokenization will simply merge the sequence of tokens and replace `_` with spaces." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2j0CeZ0RKJQ4" + }, + "source": [ + "### Advanced parameters\n", + "* `character_coverage`: it is the number of characters covered by the model (value between [0.98 - 1]). default: 1.0 for languages with a small character set. It can be set to 0.995 for languages with rich characters set like Japanese or Chinese.\n", + "* `bos_id/eos_id/pad_id/unk_id`: allow users to define specefic index for `bos/eos/pad and unk` tokens\n", + "* `split_by_whitespace`: this parameter allows sentencepiece to extract crossword pieces and consider space as a unique token.\n", + "* `num_sequences`: use at most `num_sequences` to train the tokenize (limit the training text for large datasets).\n", + "* `csv_list_to_check`: List of csv files used for checking the accuracy of recovering words from the tokenizer.\n", + "* `user_defined_symbols`: it is a string list (separated by comma ',') which force the insertion of specific vocabulary.\n", + "\n", + "As an example, if we set the `character_coverage` to `0.98` and reduce the `vocab_size`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "EhEvT1JZ_hBA" + }, + "outputs": [], + "source": [ + "spm = SentencePiece(model_dir=\"tokenizer_data\",\n", + " vocab_size=500,\n", + " annotation_train=\"dev-clean.csv\",\n", + " annotation_read=\"wrd\",\n", + " model_type=\"unigram\",\n", + " character_coverage=0.98,\n", + " annotation_list_to_check=[\"dev-clean.csv\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OZ7bhmnpJoiO" + }, + "source": [ + "As we can see, we are not able to recover all the words from the text because some characters are missing." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QEZnDlKehEvi" + }, + "source": [ + "## Loading a pre-trained sentence piece tokenizer within SpeechBrain\n", + "Loading the sentencepiece tokenizer is very simple. We just need to specify the path of the model, the `vocab_size`, and the `model_type`:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "l8wCKWfphfAy" + }, + "outputs": [], + "source": [ + "spm = SentencePiece(model_dir=\"tokenizer_data\",\n", + " vocab_size=2000,\n", + " model_type=\"bpe\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OOnHQACiiXKY" + }, + "source": [ + "Now, we can directly use the tokenizer loaded from `tokenizer_data/2000_bpe.model`. This feature is very useful to replicate results. As an example, you can upload your tokenizer to the internet and someone else can download it to obtain the same tokenization as you." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cLNxiVpkfgXo" + }, + "source": [ + "## How to use the sentencepiece\n", + "\n", + "The SentencePiece object is available at `speechbrain.tokenizer.SentencePiece.sp`. By accessing this object, you can easily perform tokenization and detokenization. If interested in all the features of SentencePiece, please feel free to read the [official tutorial](https://colab.research.google.com/github/google/sentencepiece/blob/master/python/sentencepiece_python_module_example.ipynb#scrollTo=uzBiPAm4ljor)\n", + "\n", + "Let's try to tokenize and detokenize some text!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WKyVd23AgnyU" + }, + "outputs": [], + "source": [ + "# Encode as pieces\n", + "print(spm.sp.encode_as_pieces('THIS IS A TEST'))\n", + "# Encode as ids\n", + "print(spm.sp.encode_as_ids('THIS IS A TEST'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OvxCFkiYoBpH" + }, + "outputs": [], + "source": [ + "# Decode from ids\n", + "print(spm.sp.decode_ids([244, 177, 3, 1, 97]))\n", + "# Decode from pieces\n", + "print(spm.sp.decode_pieces(['▁THIS', '▁IS', '▁A', '▁T', 'EST']))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y5zbiLO4pAeN" + }, + "source": [ + "## Use SpeechBrain SentencePiece with Pytorch\n", + "We designed our SentencePiece wrapper to be used jointly to our data transform pipeline [(see the tutorial)](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html) and therefore deal with tensors.\n", + "For that purpose, two options are available:\n", + "1. Option 1: Generating token tensors directly from a word tensors + an external dictionary named `int2lab` (which maps your tensors to words).\n", + "1. Option 2: If you use our DynamicDataset, the DynamicItem will automatically generate the token tensors.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wqpG4Ccoxo9y" + }, + "source": [ + "### Example for option 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "QeLJbRntpcfc" + }, + "outputs": [], + "source": [ + "# INPUTS\n", + "# word vocab\n", + "dict_int2lab = {1: \"HELLO\", 2: \"WORLD\", 3: \"GOOD\", 4:\"MORNING\"}\n", + "# wrd tensors\n", + "wrd_tensor = torch.Tensor([[1, 2, 0], [3,4,2]])\n", + "# relative lens tensor (will help for dealing with padding)\n", + "lens_tensor = torch.Tensor([0.75, 1.0])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d0QLTr1GzJ_S" + }, + "source": [ + "Our SentencePiece can be called like any other pytorch function with the tensors passed to the __call__ method. Parameters are given as:\n", + "batch : it is a word_ids tensor (i.e. your words). Shape: [batch_size, max_seq_lenght]\n", + "batch_lens: it is a relative length tensor. shape: [batch_size]\n", + "int2lab: dictionary which maps the word_ids to the word.\n", + "task:\n", + "\"encode\": convert the word batch tensor into a token tensor.\n", + "\"decode\": convert the token tensor into a list of word sequences.\n", + "\"decode_from_list\": convert a list of token sequences to a list of word sequences." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "k4WDwuSIyleS" + }, + "outputs": [], + "source": [ + "encoded_seq_ids, encoded_seq_lens = spm(\n", + " wrd_tensor,\n", + " lens_tensor,\n", + " dict_int2lab,\n", + " \"encode\",\n", + " )\n", + "# tokens tensor\n", + "print(encoded_seq_ids)\n", + "# relative lens token tensor\n", + "print(encoded_seq_lens)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jA7KAgZ3N3uj" + }, + "source": [ + "Then we can simply decode it by simply specifying `\"decode\"` to the function!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "P0QijcAKyvSL" + }, + "outputs": [], + "source": [ + "# decode from torch tensors (batch, batch_lens)\n", + "words_seq = spm(encoded_seq_ids, encoded_seq_lens, task=\"decode\")\n", + "print(words_seq)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L64IE3lH4wb6" + }, + "source": [ + "### Example for option 2\n", + "\n", + "**Note:** please first read our dataio [tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html) to perfectly grasp the next lines.\n", + "\n", + "Here, we use a tokenizer to tokenize on-the-fly the text obtained from a .csv file. In the following example, we combined it with the data_io pipeline of SpeechBrain.\n", + "\n", + "First, we define a DynamicItemDataset from our csv file:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "S6-Os1Eb4ycu" + }, + "outputs": [], + "source": [ + "import speechbrain as sb\n", + "train_set = sb.dataio.dataset.DynamicItemDataset.from_csv(\n", + " csv_path=\"dev-clean.csv\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "68xx4kgezPtX" + }, + "outputs": [], + "source": [ + "%less dev-clean.csv" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xehgtxCmP1ir" + }, + "source": [ + "Then, we define the text_pipeline (i.e. what is called for each sample gathered in a mini-batch). In the text_pipeline, we simply call our tokenizer to obtain the tokenized text!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f00AeLtt5O9o" + }, + "outputs": [], + "source": [ + " @sb.utils.data_pipeline.takes(\"wrd\")\n", + " @sb.utils.data_pipeline.provides(\n", + " \"wrd\", \"tokens_list\", \"tokens\"\n", + " )\n", + " def text_pipeline(wrd):\n", + " yield wrd\n", + " tokens_list = spm.sp.encode_as_ids(wrd)\n", + " yield tokens_list\n", + " tokens = torch.LongTensor(tokens_list)\n", + " yield tokens" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ksltlAzaQNvt" + }, + "source": [ + "Some more SpeechBrain stuff to finalize the data pipeline:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tVmLtHLO5ep9" + }, + "outputs": [], + "source": [ + "train_set.add_dynamic_item(text_pipeline)\n", + "train_set.set_output_keys([\"wrd\", \"tokens\", \"tokens_list\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5D1lzSTCQV-S" + }, + "source": [ + "Finally, we create a data loader that contains the defined transformation (i.e. tokenizer)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ko0IBWvN8cox" + }, + "outputs": [], + "source": [ + "train_dataloader = sb.dataio.dataloader.make_dataloader(train_set, batch_size=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hQ63N5urQhys" + }, + "source": [ + "Now, we can simply get our tokenized samples !!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7Me64J__yQsO" + }, + "outputs": [], + "source": [ + "b = next(iter(train_dataloader))\n", + "print(b.wrd)\n", + "print(b.tokens)\n", + "print(b.tokens_list)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/assets/attention-chunking-dep.png b/docs/tutorials/assets/attention-chunking-dep.png new file mode 100644 index 0000000000..ca66408cca Binary files /dev/null and b/docs/tutorials/assets/attention-chunking-dep.png differ diff --git a/docs/tutorials/assets/attention-chunking-no-lc.png b/docs/tutorials/assets/attention-chunking-no-lc.png new file mode 100644 index 0000000000..fc9c988c77 Binary files /dev/null and b/docs/tutorials/assets/attention-chunking-no-lc.png differ diff --git a/docs/tutorials/assets/attention-chunking.png b/docs/tutorials/assets/attention-chunking.png new file mode 100644 index 0000000000..207702f68c Binary files /dev/null and b/docs/tutorials/assets/attention-chunking.png differ diff --git a/docs/tutorials/assets/attn-restrict.png b/docs/tutorials/assets/attn-restrict.png new file mode 100644 index 0000000000..0079d2cf6e Binary files /dev/null and b/docs/tutorials/assets/attn-restrict.png differ diff --git a/docs/tutorials/assets/conformer-simple.png b/docs/tutorials/assets/conformer-simple.png new file mode 100644 index 0000000000..4450f124eb Binary files /dev/null and b/docs/tutorials/assets/conformer-simple.png differ diff --git a/docs/tutorials/assets/dcc-causal.png b/docs/tutorials/assets/dcc-causal.png new file mode 100644 index 0000000000..12bf3e2697 Binary files /dev/null and b/docs/tutorials/assets/dcc-causal.png differ diff --git a/docs/tutorials/assets/dcc-dcc.png b/docs/tutorials/assets/dcc-dcc.png new file mode 100644 index 0000000000..59bcaea970 Binary files /dev/null and b/docs/tutorials/assets/dcc-dcc.png differ diff --git a/docs/tutorials/assets/dcc-regular.png b/docs/tutorials/assets/dcc-regular.png new file mode 100644 index 0000000000..b0404c71bc Binary files /dev/null and b/docs/tutorials/assets/dcc-regular.png differ diff --git a/docs/tutorials/basics.rst b/docs/tutorials/basics.rst new file mode 100644 index 0000000000..1371e163de --- /dev/null +++ b/docs/tutorials/basics.rst @@ -0,0 +1,135 @@ +SpeechBrain Basics +================== + +.. + Originally generated with https://gist.github.com/asumagic/19f9809480b62bfd16094fb5c844a564 but OK to edit in repo now. + Please ensure for each tutorial that you are adding it to the hidden toctree at the end of the file! + +.. toctree:: + :hidden: + + basics/introduction-to-speechbrain.ipynb + basics/what-can-i-do-with-speechbrain.ipynb + basics/brain-class.ipynb + basics/hyperpyyaml.ipynb + basics/data-loading-pipeline.ipynb + basics/checkpointing.ipynb + + +.. rubric:: `🔗 Introduction to SpeechBrain `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Feb. 2021 + - Difficulty: easy + - Time: 10min + - `🔗 Google Colab `__ + + +SpeechBrain is an open-source all-in-one speech toolkit based on PyTorch. +It is designed to make the research and development of speech technology easier. Alongside with our documentation +this tutorial will provide you all the very basic elements needed to start using SpeechBrain for your projects. + +.. rubric:: `🔗 What can I do with SpeechBrain? `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Jan. 2021 + - Difficulty: easy + - Time: 10min + - `🔗 Google Colab `__ + + +In this tutorial, we provide a high-level description of the speech tasks currently supported by SpeechBrain. +We also show how to perform inference on speech recognition, speech separation, speaker verification, and other applications. + + +.. rubric:: `🔗 The Brain Class `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Plantinga P. + - Jan. 2021 + - Difficulty: easy + - Time: 10min + - `🔗 Google Colab `__ + + +One key component of deep learning is iterating the dataset multiple times and performing parameter updates. +This process is sometimes called the "training loop" and there are usually many stages to this loop. + +SpeechBrain provides a convenient framework for organizing the training loop, in the form of a class known as the "Brain" class, +implemented in `speechbrain/core.py`. In each recipe, we sub-class this class and override the methods for which the default +implementation doesn't do what is required for that particular recipe. + +.. rubric:: `🔗 HyperPyYAML Tutorial `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Plantinga P. + - Jan. 2021 + - Difficulty: easy + - Time: 15min + - `🔗 Google Colab `__ + + +An essential part of any deep learning pipeline is the definition of hyperparameters and other metadata. +These data in conjunction with the deep learning algorithms control the various aspects of the pipeline, +such as model architecture, training, and decoding. + +At SpeechBrain, we decided that the distinction between +hyperparameters and learning algorithms ought to be evident in the structure of our toolkit, so we split our +recipes into two primary files: `train.py` and `hyperparams.yaml`. The `hyperparams.yaml` file is in a +SpeechBrain-developed format, which we call "HyperPyYAML". We chose to extend YAML since it is a highly +readable format for data serialization. By extending an already useful format, we were able to create an +expanded definition of hyperparameter, keeping our actual experimental code small and highly readable. + +.. rubric:: `🔗 Data Loading `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Cornell S. & Rouhe A. + - Jan. 2021 + - Difficulty: medium + - Time: 20min + - `🔗 Google Colab `__ + + +Setting up an efficient data loading pipeline is often a tedious task which involves creating the examples, +defining your torch.utils.data.Dataset class as well as different data sampling and augmentations strategies. +In SpeechBrain, we provide efficient abstractions to simplify this time-consuming process without sacrificing +flexibility. In fact our data pipeline is built around the Pytorch one. + +.. rubric:: `🔗 Checkpointing `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Rouhe A. + - Feb. 2021 + - Difficulty: easy + - Time: 15min + - `🔗 Google Colab `__ + + +By checkpointing, we mean saving the model and all the other necessary state information +(like optimizer parameters, which epoch and which iteration), at a particular point in time. diff --git a/docs/tutorials/basics/brain-class.ipynb b/docs/tutorials/basics/brain-class.ipynb new file mode 100644 index 0000000000..c7f1229e3b --- /dev/null +++ b/docs/tutorials/basics/brain-class.ipynb @@ -0,0 +1,566 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/basics/brain-class.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/basics/brain-class.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jr2jH1sDZcml" + }, + "source": [ + "# The Brain Class\n", + "\n", + "A fundamental aspect of deep learning involves iterating through a dataset multiple times and updating model parameters, commonly referred to as the \"training loop.\" To streamline and organize this process, SpeechBrain offers a versatile framework in the form of the \"Brain\" class, implemented in `speechbrain/core.py`. In each recipe, this class is sub-classed, and its methods are overridden to tailor the implementation to the specific requirements of that recipe.\n", + "\n", + "The core method of the Brain class is `fit()`, responsible for iterating through the dataset, performing updates to the model, and managing the training loop. To leverage `fit()`, at least two methods must be defined in the sub-class: `compute_forward()` and `compute_objectives()`. These methods handle the computation of the model for generating predictions and the calculation of loss terms required for gradient computation.\n", + "\n", + "Let's explore a minimal example to illustrate this:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zRHI45kUzKul" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "\n", + "# Clone SpeechBrain repository\n", + "!git clone https://github.com/speechbrain/speechbrain/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nV0-QonaDhEu" + }, + "outputs": [], + "source": [ + "import torch\n", + "import speechbrain as sb\n", + "\n", + "class SimpleBrain(sb.Brain):\n", + " def compute_forward(self, batch, stage):\n", + " return self.modules.model(batch[\"input\"])\n", + "\n", + "\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " return torch.nn.functional.l1_loss(predictions, batch[\"target\"])\n", + "\n", + "model = torch.nn.Linear(in_features=10, out_features=10)\n", + "brain = SimpleBrain({\"model\": model}, opt_class=lambda x: torch.optim.SGD(x, 0.1))\n", + "data = [{\"input\": torch.rand(10, 10), \"target\": torch.rand(10, 10)}]\n", + "brain.fit(range(10), data)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aduTa6eDdHlf" + }, + "source": [ + "With just around 10 lines of code, we can successfully train a neural model. This efficiency is achieved because the Brain class handles intricate details of training, such as managing `train()` and `eval()` states or computing and applying gradients. Furthermore, the flexibility of the class allows every step of the process to be overridden by adding methods to the sub-class. This means that even intricate training procedures, such as those involved in Generative Adversarial Networks (GAN), can be seamlessly integrated into the Brain class.\n", + "\n", + "In this tutorial, we'll begin by elucidating the parameters of the Brain class. Subsequently, we'll delve into the `fit()` method, breaking it down step by step and highlighting the segments that can be overridden when necessary. These insights into the class's parameters and the `fit()` method form the foundation for understanding the functionality and versatility of the Brain class.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F__CemYaLlB_" + }, + "source": [ + "## Arguments to `Brain` class\n", + "\n", + "The Brain class only takes 5 arguments, but each of these can be a little complex, so we explain them in detail here. The relevant code is just the `__init__` definition:\n", + "\n", + "```python\n", + "def __init__(\n", + " self,\n", + " modules=None,\n", + " opt_class=None,\n", + " hparams=None,\n", + " run_opts=None,\n", + " checkpointer=None,\n", + "):\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7cg-Xp_EMDSA" + }, + "source": [ + "### `modules` argument\n", + "\n", + "This first argument takes a dictionary of torch modules. The Brain class takes this dictionary and converts it to a Torch ModuleDict. This provides a convenient way to move all parameters to the correct device, call `train()` and `eval()`, and wrap the modules in the appropriate distributed wrapper if necessary." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SRis_Y4RMo8y" + }, + "source": [ + "### `opt_class` argument\n", + "\n", + "The Brain class takes a function definition for a pytorch optimizer. The reason for choosing this as input rather than a pre-constructed pytorch optimizer is that the Brain class automatically handles wrapping the module parameters in distributed wrappers if requested. This needs to happen before the parameters get passed to the optimizer constructor.\n", + "\n", + "To pass a pytorch optimizer constructor, a lambda can be used, as in the example at the beginning of this tutorial. More convenient, however, is the option used by most of the recipes in SpeechBrain: define the constructor with HyperPyYAML. The `!name:` tag acts similarly to the lambda, creating a new constructor that can be used to make optimizers.\n", + "\n", + "```yaml\n", + "optimizer: !name:torch.optim.Adam\n", + " lr: 0.1\n", + "```\n", + "\n", + "Of course sometimes zero or multiple optimizers are required. In the case of multiple optimizers, the `init_optimizers` method can be overridden to initialize each individually.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KTnyybHOOtKQ" + }, + "source": [ + "### `hparams` argument\n", + "\n", + "The Brain class algorithm may depend on a set of hyperparameters that should be easy to control externally, this argument accepts a dictionary that will be accessible to all the internal methods using \"dot notation\". An example follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gPr50ZjTiG8I" + }, + "outputs": [], + "source": [ + "class SimpleBrain(sb.Brain):\n", + " def compute_forward(self, batch, stage):\n", + " return self.modules.model(batch[\"input\"])\n", + "\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " term1 = torch.nn.functional.l1_loss(predictions, batch[\"target1\"])\n", + " term2 = torch.nn.functional.mse_loss(predictions, batch[\"target2\"])\n", + " return self.hparams.weight1 * term1 + self.hparams.weight2 * term2\n", + "\n", + "hparams = {\"weight1\": 0.7, \"weight2\": 0.3}\n", + "model = torch.nn.Linear(in_features=10, out_features=10)\n", + "brain = SimpleBrain(\n", + " modules={\"model\": model},\n", + " opt_class=lambda x: torch.optim.SGD(x, 0.1),\n", + " hparams=hparams,\n", + ")\n", + "data = [{\n", + " \"input\": torch.rand(10, 10),\n", + " \"target1\": torch.rand(10, 10),\n", + " \"target2\": torch.rand(10, 10),\n", + "}]\n", + "brain.fit(range(10), data)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IKcE7OwKjzW_" + }, + "source": [ + "### `run_opts` argument\n", + "\n", + "There are a large number of options for controlling the execution details for the `fit()` method, that can all be passed via this argument. Some examples include enabling debug mode, the execution device, and the distributed execution options. For a full list, see [ADD LINK TO THE DOCUMENTATION].\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DYZ4rEo8lDLP" + }, + "source": [ + "### `checkpointer` argument\n", + "\n", + "Finally, if you pass a SpeechBrain checkpointer to the Brain class, there are several operations that automatically get called:\n", + "\n", + " 1. The optimizer parameters are added to the checkpointer.\n", + " 2. At the beginning of training, the most recent checkpoint is loaded and training is resumed from that point. If training is finished, this simply ends the training step and moves on to evaluation.\n", + " 3. During training, checkpoints are saved every 15 minutes by default (this can be changed or disabled with an option in `run_opts`).\n", + " 4. At the beginning of evaluation, the \"best\" checkpoint is loaded, as determined by the lowest or highest score on a metric recorded in the checkpoints." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NTar81RFHEUU" + }, + "source": [ + "## The `fit()` method\n", + "\n", + "This method does a lot, but only actually takes about ~100 lines of code, so it is understandable by reading the code itself. We break it down section-by-section and explain what each part is doing. First, let's briefly go over the arguments:\n", + "\n", + "```python\n", + "def fit(\n", + " self,\n", + " epoch_counter,\n", + " train_set,\n", + " valid_set=None,\n", + " progressbar=None,\n", + " train_loader_kwargs={},\n", + " valid_loader_kwargs={},\n", + "):\n", + "```\n", + "\n", + "1. The `epoch_counter` argument takes an iterator, so when `fit()` is called, the outer loop iterates this variable. This argument was co-designed with an `EpochCounter` class enabling storage of the epoch loop state. With this argument, we can restart experiments from where they left off.\n", + "2. The `train_set` and `valid_set` arguments take a Torch Dataset or DataLoader that will load the tensors needed for training. If a DataLoader is not passed, one will be constructed automatically (see next section).\n", + "3. The `progressbar` argument controls whether a `tqdm` progressbar is displayed showing progress through the dataset for each epoch.\n", + "4. The `train_loader_kwargs` and `valid_loader_kwargs` are passed to the `make_dataloader` method for making the DataLoader (see next section).\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gGA6y4TDBTwX" + }, + "source": [ + "### Fit structure\n", + "\n", + "With the arguments out of the way, we can start to look at the structure of this method. Here is a simple graphic to show all the override-able calls within `fit()`. We'll go over these one-by-one through the rest of the tutorial.\n", + "\n", + "![brain-class.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfEAAAHqCAYAAAAUI3clAAAACXBIWXMAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAIABJREFUeJzsnXd4lFXWwH/TMklmMumdJEACCST0phQRMEgRBRakfCr27lpWxL6ywqqsu+raV9FdddddmgVQ0UiV3kNCCKT33ifT5/3+GPLCMBNSCAT0/T0PzwO3nvfOMOe95557jkwQBAEJCQkJCQmJKw55dwsgISEhISEh0TkkJS4hISEhIXGFIilxCQkJCQmJKxRJiUtISEhISFyhSEpcQkJCQkLiCkVS4hISEhISElcokhKXkJCQkJC4QpGUuISEhISExBWKpMQlJCQkJCSuUCQlLiEhISEhcYUiKXEJCQkJCYkrFEmJS0hISEhIXKFISlxCQkJCQuIKRVLiEpeMnJwcrFarS7nJZCI/P78bJJKQkJC4spGUuMRFx2Kx8MILL5CYmMiOHTtc6jdv3kz//v15+eWXsdls3SChhISExJXJZa/EZ8yYwc0338zlmPa8pqaGt956i7y8vO4WRWTVqlXMnj2bwYMHc9111/HOO+9gMpnctq2treWtt94iJyfnosnT1NTEjTfeyLJlyxg/fjx9+vRxadO/f39GjhzJiy++yOzZs2lubr5o8khISEj8qhAuczQajTBs2LDuFsMtjz/+uAAI48eP725RBEEQhDvuuEMAhEGDBgkPPvigMHXqVEEmkwnXX3+92/aLFy8WAGH06NEXRR6r1Spcd911AiA8/fTTgt1ub7Wt3W4XHn74YQEQpk+fLthstosik4SEhMSvict+Jw4gk8m6WwS3XHPNNfj7+5OcnNzdopCamsqnn37KTTfdxKFDh3j33Xf57rvvWLFiBXPnznXbZ9y4cQQEBLRL/pKSEu677z6effbZdsv0wgsvkJKSwqOPPsorr7xy3s9RJpPx9ttvc+edd7Jx40aWL1/e7nkkJCQkfqvIBOEytFOfhVarpV+/fuzfv7+7Rbmsef/993nwwQf57rvvmDp1qlhusVhQqVQXPP6ePXu4+uqrWbJkCa+++mqb7XNzc0lISCApKYndu3fj4eHRrnkMBgPDhg0jLy+PU6dOERkZeaGiS0hISPxquSJ24hJtU1dXB0BERIRTeVcocHAoV4DQ0NB2tX/11Vcxm80sX7683QocwMvLi+XLl2MwGPjLX/7SKVklJCQkfitcMUq8sbGRxx57jKioKNEEvH37drdtP/74Y5KTkzly5Ahbt25lwoQJBAYGEh0dzRNPPEFTU5NT+8zMTO677z7i4+MJCAigT58+3H777aSnp7sd/6677iI5OVn888EHH7Qq95///GeSk5Mxm81s2bKF6667jqCgIAYPHszatWs7vyCnWbRoEcnJyaxcuRKA+++/X5Tr0Ucfddvnvvvuc5L/3XffbXX8pUuXkpyczB/+8AfgzNq2/MnIyHDpY7PZWLduHfHx8UyZMqXDzzRz5kx69uzJmjVrLkuHRgkJCYnLhStCiRsMBq699lq+/vpr5syZQ3JyMjt37mTSpEl89dVXLu2zsrJISUlh1apVTJ48GaVSyR133EFiYiJvvvkm//73v8W2Bw8eZPDgwaxfv55Jkybx2GOPMWbMGNauXcuwYcPYs2ePy/g6nQ5/f3/kcjkpKSmcOnWqVdnT0tJISUlh5cqVTJ06Fa1Wy+9+9zvy8/OZO3cu69evv6C18fX1xd/fHy8vLyfZ/P398fHxcdunpY1SqSQlJYXMzMw252m5390epXro0CGqqqq44YYbOvAkZ5DJZEydOpXi4mKOHz/eqTEkJCQkfhN0r19d22g0GtHjurKyUizft2+foNFohMDAQKG5udmpz5IlSwRA8PLyEtatW+dU99FHHwkmk0n8t9VqFd544w3BYDA4tSsuLhaioqKEkSNHtipbamqqAAhPPPFEq20WLFggAEJoaKiwf/9+sXzv3r2CTCYTpk+ffv4FaCfLli0TACE1NbXdfTIyMgRAeOSRR9ps+/nnnwuA8Nlnn7XZdvXq1QIgfPLJJ+2W5VzeffddARA2btzY6TEkJCQkfu1cETtxgBUrVhAUFCT+e8SIEdx3331UV1ezadMmt30WLFjArFmznMruvvtupzNahULBY489hqenp1O7iIgI7rnnHvbv309JSckFy//qq68yfPhw8d8jR44kISGBrKysCx77UtByd1uj0bTZtrq6GsDp8+oowcHBAFRVVXV6DAkJCYlfO8ruFqA9aLVaJk+e7FI+YsQIwHG9aubMmS71M2bM6NA8paWlnDp1iurqanx8fNBoNAiCQEFBgYvDWEeZNm2aS1lkZCR79+69oHEvFS1KXKvVttnW19cXcPgxdJaGhgYA/P39Oz2GhISExK+dK0KJR0VFuS1vuX5UX1/vtj4kJKRd42/YsIGlS5dy4MABt/U1NTXtGud86HQ6lzIPDw8sFssFj30pMBqNAC4WC3e0fC4XEg+9JQqedMXst4PdbqeiooLGxkbsdnt3iyMh0WXIZDJ8fHwICQlBoVB06dhXhBI3m83nLW/Z+Z2LXN72acF7773Hww8/zJw5c3jttdcYPHgwAQEB1NfXs2bNGu6++27pB+UsWgvhejYjRozA29ubH3/8kWeeeaZT86SkpBAQEMDAgQM71V/iyqG2tpavNvxMabUBkyIEo6DBfmX43EpItAu5zI6aPDxtFYT4ezD7hkkEBgZ2ydhXhBIvKSnBZDKhVqudylu8qnv16tWpcZuamli8eDGTJk1i1apVTnW+vr5UVFR0TuBfIS078JYdeVttk5OT2bhxIzk5OfTu3btDc2VkZLBv3z5uueUWlMor4isq0Ul27NrH5r1Z6H2vgcAztykkFS7xa8N0+k+dVc/bn/3EuCFRTLp2zAWPe0X8XzEYDHz77bdOZXa7nc8++wyVSsX06dM7NW5BQQHNzc0MGDDApc5kMvH55593atyuwmAwtOv616WgR48egMNv4Gxayzr21FNPYbVaefnllzs810svvQTAk08+2eG+ElcOm7fv4scDteiDpoPK/XVICYlfGzKlhuagqWw+ZuT7H7de8HhXhBJXqVQ8/vjjbNq0CYPBQE5ODnfccQd79+7lqaeeIiAgoFPjxsbGotVq2bhxI+Xl5WJ5YWEhc+bMadUrPTc3l5ycHIqKigBHtLScnJxW82V3ljFjxpCQkNDlkcta5C8sLAQcPgUt8rd2Rj927FjkcjnvvfceeXl5NDQ08MUXX5CYmOjWgW306NHMmjWLf/3rX3z99dftlu3LL79k1apV3HrrrW5friR+HZSWlrHtYBEm/6u6WxQJiW7B4jec3cdryMsvuKBxrggl/qc//YkbbriBKVOm4O3tTWxsLJ999hn33nuvuGvrDGq1mpUrV1JUVER0dDRJSUlERkYSHR2NRqNhwYIFbvv17duX2NhY0eP8k08+ITY2ltjYWFGxXyhWq5Xs7GwATp482SVjtpCYmEhsbKzo8f/ZZ5+J8rfmjBYWFsaSJUs4evQovXr1wtfXl1tvvZWoqKhWU4d+/PHHxMTEsGjRIrd5xM/l559/5p577iEhIYG333678w8ocdmzdv3PNPuN724xJCS6FYP/NXy9cesFjXHZJ0DZsmULw4YNQ6fTsW/fPn7++WeUSiXXXXcdQ4YMcdsnKyuLvLw8RowY0arT29mUlZXx/fffU1BQgL+/P5MmTSIxMZHMzEwKCwsZPHiw053nn3/+udXIZWPGjBGjp4EjYltZWRkTJ050cbQ7cuQINTU1TJw40e1Ye/fuZcuWLdx7771tWhtyc3PJzs7mqquuavMa2ObNm1t11hs9ejTe3t6t9j106BBHjx5Fp9MxcOBAt/nBz+bYsWNMmTKFuro63n77be644w6XbGaCIPDhhx/y2GOPERERwaZNm9ocV+LKpbm5mdc/2kBTwPXdLYqERLejqfmZRxdNxM/Pr1P9L3slLnHlU1hYyO9+9zv279/PW2+9xe9//3un+hUrVrBkyRLGjh3L6tWrCQsL6yZJJS4FaWnpfLatCXQJ3S2KhES3IzRmM2e4nVEjh7fd2A1XhDld4somKiqKPXv28OGHHzJ+vKsJdcKECXz66ads375dUuC/AcqrarErJEc2CQkAVL5UVLmPddIepPs7EpcEuVzOvffe67ZuxIgRYvQ9iV8/RpMFmaL96WklJH7NyBQeGNoRf6M1pJ24hISEhITEFYqkxCUkJCQkLnsEwU5ltvMtl9rCQ9jM+m6S6PJAMqdLSEj86qnI/JnmWsf1ydCE6/Hyc43Jb9ZXU5q+EatJj2/EAIJix15qMbsMm9VEfdERAnqOciqvOLmZ5po8AEITkvHyc5+X4kJoKMtArQlE7eOcu8JQX4LNrEcb3PGbJzZzMxmblgEygnqNRiZ3xB/XV2VzauubJE77E5rAnl0g/ZWHtBOXkJD41ePpG4Y2uA9V2TswNpa51At2K6nfLEGuVBPY62pK0r7FUF/qZqQrg+bqHE78+GesJuddqpdvuGMdcnZhqL/wFMvuKNj/BaXHv3cpLzv+Pfn73EfBbK7Jp7nGfYwKQbBz4qdXUfuEknTDMlGBA/QYMpdeV99F2oZnMeuru+YBrjAuq5242WzmxIkTHD9+nIyMDDESmM1mE/9ISEhc2SjVPngmundyvFjowhIBKDy0ym19i0KLGjoPAN+IRGRy1aUR7iLgE9qPq+9ei0yudCn3CYXio19dtLkTp/8Jmcx1f9jrqjsRBPfxKXL3fEJQ7Di8A2Jc6qqytmPWV9F/6otwTowJgOC48TSWnyBvz6f0nfTbC9V8WSjx3Nxcfv75ZzIyMtDr9RQUFFBTU0NTU5OkuCUkfmX06TeI+MTO9a3O3U1twX7sVjO68ETC+l0PpxWGzWKkNG09+po8vP2jiRg4E4VS3caIUJ27i9rCQ9itJgoPrUImk9FjyNzOCXgONouBsuObaK4twFMXSli/Kai8HAGorKZGStM2YmgoxSe4D6H9rkeuUKGvycdYX0pTVRYKlRcBMSMoObaegOjhBPQcRWX2Drz9elBffJSm6lx8IwYQ2neSqOCqsn/BUF/Sqecw1BVTlvE9luY6/HuOIjh2XLv6CXYLRUfOvBjowhPxDXd8yIJgp+jwGgC8/aMI7HW12M6sr6Y882cMdUXUFx/FrK9BE9iTgJiRYpvStPX0GDrP5YXkbKKGLWD/Z7fSe+z9KNXnD3b1a6NbzenZ2dmsWLGCN998k7Vr15KSksK2bdvIzc2lvr5eUuASEhIidcVHyd29kqC48UQMuJGG0nQKTysHu83CsW+fRrBbiRw4C1NTJRk/tC/5jsrLDw/vAORKD7x8w1HruiZWgdWk58jaxzA1lRPYcxQ2i5Fj3zwFgh2bxcCRtY9jMdYR1Gs0tYUHHfIKAk2Vp8ja9nf8IgdTkvo1eXs+ISBmOFnb3wGgIjOFzJTXsJr1BMSMpPTYt+Tu/lic10Pjj4e3P7l7Pu2QvM01+aRvfB6fkATCEqdRdGgVJWnr29VXEECp1qJUa6krOkRd4SGxToYML99wTI3lVJ7c7NRPplCiVGsR7DaUag1KtRaF6kzES8Fupb4sg4Do819BVXnq0AbH0VB2vANP/OugW3bijY2NrFq1irS0NFJTUyktLW01jKmEhIQEAIIdwWZBrQnCyy+SPhMeRxAcL/o1ubtRefoSNcyR7yA28AH2/nMBxsZyPH1CzzusLqy/Y4z8fQS1c+fZHoqPrkMXGk/vMfcBENBzFBFJM0AmpzT9Ozx9Quk95n4A/KKGcuDfd1JXfAQA/6ih+EYk4R0QQ3DctfhHDcNqOpNoSBMUS/Tw/wNAG9Sbg1/eR/SIW1GoPNGFJeLtHwNb3uiQvIWH/kfk4LmiQ1/vMfeSveM9h8xtIFeoCE905JJorj0noYdMRlDsOGzmZmoLDjhVqTx9CU+cRln6BgJ7jxN37y1YTU0olB4oPFoPBS2O5R2A1eiajOnXziVX4idPnmTlypVkZ2dz4sQJabctISHRLvx6DCF6+ELS1j+L2ieE0H7XE9LXkXegqTqXpspTHF79iNjeZjViaqpqU4lfLBrLMwhNmOxU5qFx5EBoqjyFb+RAsVyuUKEL609jxSk8NA6rgKNc6VBg55wxn63sPHXhyJUemBrL3Z4ptxd9dQ6NFacoO+2UZreZMetrOj1eR7BZLW6PPpRqLTarGbvVLK5Ja1iNDb85UzpcYiWekpLCxo0b2bdvHzU1l+bLISEh8eshtN/1hCQkU1d4kLx9n9NYnkncNQ+h9PAmJH4iva6+u7tFFJEpVFhM7neGcoUHFoNzqE2LsR5FG4qqBbvtrJTHgoBgt7Wp5NpC6aGh51V3oAtPuqBxOoNcoRKtKmcjkyvRhcZTV3TY5brc2dgsRhorT6EL738xxbwsuWRn4l999RXr169n27ZtkgKXkJDoMDX5+zDUFSOTyfGPHkHPUbfTVOFI0+sbkUR1zk5sFqPY3tqKAu0MNovB1UzcBoE9r6Y0bQM2iwFwOHi1XKMKiBlJde4u8QqYqbGCxvJM/KPblwSjJn+/4yAah6+A0tMHT58LO8vXhSdRlvGj+G9BsLtcUbtYKNUaTI2VgONOOGcdr4b2m0rh4dVOZedScuwbAmJGolT/9mLyX5Kd+IYNG0hJSWHHjh1YLJZLMaWEhMSvDLO+hlNb38Q/ahgKDw01eXvoedUdgOPqVHCfiRxe/RABMaMwNVVit5pJnPYSyGTk7f0nhrpijA2lFBz4krLjPxAz8la8/aPbNXfhoVWUHPuGq+9a4/b6lDtC+02msfw4B7+8F7/IQTRV5aAJ7EX8dYsJ6j2GmoL9HP3qCXRhidQW7Cdm5CK8/HrQUH6izbFtFgPHvn0atTaY2sKDjqtVp73T8/b+8/TLgkDGpuUE9BxFaPx1AOTv/4LmmnwMdUUUHVpF+YkUYkbcgndADFHD5pO+8UWOfbMEbXAc9aXphMZfR3jSDe163hbOvQVmNTVxautbmBrLMTfXkLFpucvaB/UeQ87OD6nO3U1jRSYDbnwFtTbYsY7xk6jI/JGCA/8mesQtLvPVl6ZTcuxbBs/umA/Ar4WLnor0l19+YfXq1WzduhWz2Xwxp5KQkLgC6NNvEPFTlyHzDO5wX6upkabKbGyWZrTBfVFrg5zqTY0VNFXl4KEJwCc4TjxLNtQVOu3SAbz8olCoPAGHOdasr2w1gpnFUI+hvgRdWD+xrLE8g+rc3S5tFSov0cEOTgcyqS3Ayy8STWBvp7bNNfkYG8rQBPUWlZbF2IDdYkTtE4KxoRSlWodSrUFflY0mKJb07/5IYK8x+Ib3o6kyG114ktM6nPusKi9/sd5QVyRaBs6sQ48zHuGCQFNVFqamarwDYvDyDQegPGMThvpil2f1CU0gsNdoBMEuvtxkpvwFbXAckYNmOYa0W9FX554z55m1b0FflY3F1IRPcB8XRzZzcy3pG19EG9yH2HH3Iz+dQKc8YxP5+z8nPvkZF6e4KwZLA8P9j3HzrGmd6n5Rd+JFRUV89dVX/PLLL5ICl5CQuGCUah/8egxutV7tE+IS7hNoM7yoQuV53jYqL1/xfncLcpU3ajcmbIXK2UHLOyCmVYczd3UqTx146gCH01oLmqBYp3ZeflFuZT7fc3j59Wi1DgCZDG1wH5fQqCovP+x21zNrpVqHzWIk44eXiRq+EKuxnpr8fUQPP/MSI5Mr2xVq9dznOxsPb38GzforxUfXIjtrq2+zmhg0+w3xBei3yEVT4haLhQ8++IDDhw/T3Nx8saaRkJCQ6BY0ATFoLsAbvLPowvrj1UV32dvL+ZzKAEL7T6Ek9SsQBPpd/1zbLwudQK70cLJwAEQMuLHL57nSuGhK/IcffqC4uJjS0is3/rCEhITE5UZLaNjLieDYce2O7ibRtVwU7/Samhq2bt3KkSNHLsbwEhISEhISElwkJf7DDz+Qn5+P0Whsu7GEhISEhIREp+hyJa7X6zlw4ACnTp3q6qElJCQkJCQkzqLLlfiBAweoqamRvNElJCQkJH7V2CwGGsrSncpqCw9eUhm6XIlv376drKysrh5WQkLiIhIeHs7dd7uGLNVoNNx7771oNJpukKrrqMrZSeGhVRQeWoWxocxtG6upkaLDq8nb8+kl/yHuLhrKMqgrcvZdMjVVdjg63YUi2C3UF6c6lwl2Cg+tcrnXfqmxWQyUpH7tUt5ck8/hVQ9SX3JGiQt2C/n7PufEj69gt12ajWyXKvGmpiZqamou+7CqQUlBhI0IE/9oI9oXNN87xJuI0RFET4xGF627yFJ2jBEjRjBs2LDuFsOFkJAQxo0bh053+azXoEGDGDdunPinZ8+e3S0SAP369eOqq6665POOHz+e999/n9GjR6NWO99xDgoKYvLkyXzwwQfExcVdctm6Cg9vP7x8wynP+N5t0BLBbiP168VYzXp8QhPI3/sZhvpf/82a+pJU6kuclWfRkbXk7PxHh8eymhppKMvolBxNldkcW/8MFmPDmUJBIG/PJ+cNt9oWhvoSDHVFne4PjpSyxecocbO+mmPrnyVm5CKiht4slsvkKgbNeh3BbuXUljcvaN720qVXzLKysmhubr7s04rGzYxDG64FGSi9lJxae4qmkqY2+8TOiMVUb8JqtNL/lv6U7i8l9cNUBHv3P+8jjzzCyZMnOXjw8tpBJCUl8eSTTzJ//vzuFkVk3rx5xMXFIZPJ0Gg0rFq1in/+85/dLRYzZ84kKCiIPXv2XLI5k5OTefTRR9m1axd/+9vfMJlMTvX5+fk8+OCDvPTSS6xYsYJnnnmGzMzMSyZfV6ELc0TzKjy0ym29saEEu9VMz1G3A46MaedGFPut0Hv0PQhuAru0RXXeXqqyd5A4bWmH+/qEJnD1XWuccol3Bbm7/kFgrzEXfm/9HJ2Wu3slIX2uJbjPtS5NZXIlfSct5vCqB6gt2I9/G7nQL5QuVeI5OTkUFV3YW8+lYM8yx4+kh48HE/8+sc32uhgdcTfFkft9LpmrM0GA4AHB9Bjf47JQ4C3Y7fbuFqFVLkQ2f39/kpOTyc3NZf/+/Rcsy7PPPguAh4cHX3/taibrTi7lZxgfH8/DDz/Mnj17eO2111pNC1xRUcHTTz/N66+/znPPPcfDDz9MQ0OD27YXm7qiI9Tk70OwW9GF9Sc4brwYrNtuM1Oe8SP6mny8A6IJ7z8FmVzV5pjVubuoLTyE3Wah8NAqZDIZPYbMvSA5DXVFGBvKXBKaNJSmIVeq0Qb3ob4kDbvNjFyupCp3F166MMKTpjtkFuxUnNpKU+UplGotIX0nitHbKrN34O0XRVNFJk3VufiGJ4k5wNtEECg/+TONZRmOCGlnKyfBTuHhNQB4+0cR2Otqp66mpirKT/zoSO+qCyOs3/ViFLvStA3UFR3GrK+h8NAq5AqVGHYVoPzEj+jCk6gvPoK+pgD/HkPEADLVubtornXojaghc12Cr5sN9VQcXYdgtxLSZ4IY4a62YD8qL3+0wXGn5aukrvgoofHXYW6uofxECs21RSg8jmBurkUTEOMUtKa+JI3q3J3IFR6E9rseL98IsU5fnUP5iZ+QKzwI6j3GSR6LoZ7q3F2MvO3zVpdZofKkx5B5FKd+e9GVeJea0/Pz89HrL03Wm0uJby/HFzVnQw6c/s5XHqsk9R+p5+klcTayc7MidIDo6Ghuv/12FApFF0okcf/991NXV8cbb7zRqgJvoaGhgVdffRV/f38WLlx4iSQ8R4aydLK2vYVvxEACe42mKucXio6uBRzm8LT1z2FqqiSkz3gay0+QsemVdo0rV3qiUHkhkytQqrVdkpPabrdReOh/ANSXHKPotHKsytkp5uiuKzpM7q5/UJz6NdrgOJoqs2g8nZWt8PAaGstP4BsxALnSk6Nf/QFTYwUAFZkpnNz8Os11RWiDYsnf/zn5+1pXKGeTvfNDanJ3E9BzJM21BZSkrRctpwKO/N2G2gIqT2116Xvsm6ccCi9hEjK5nPTv/oggOF46lWoNMpkchUqNUq1F4eHsQ1F8dB0nfnoVY0MZPiF9KUnbgGB3JMOSKz1RKD3I2/MJAq6bosyfXkHl5YtC5cXRr54UM8FVZv9CQ9lxsZ2xvpTStI0AyGSOz1Kw21CqfRwynbXLL8/YRO7uj/GPHoGXbwSpXy/G2FgOOL5nx79/GZ+QeDx9I8nc/FcnuRrK0vEJTWgzY1pAzAgaSi6+jujSnXh1dfVFU+IKlQKlRom12YrNfP4fHIWHAoWnAnND1zgWePp7YjfbsTQ7Z2BrS4628PPzo6GhAbvdjlwuR6vVtrrDUavVeHh40NjYdekVAXx8fLBarRgMXeM8IpPJCAgIwGAwtBluVy6Xo9FoMBqN581up1I5dlO1tbXtkkGj0WCz2bo0ToFcLsfLy6td3+/2zq9UKvHz86Ourg6r1XretnK5HJ1OR1NTU5ttz5bDYrG4vSkyePBg4uPjeeutt9r9fzYnJ4eUlBSmTp3Kl19+SX19fduduhCbuRlkcrTBcai1QfhGJIl5tavz9iBXeIhZzbQh8ez95wJMTZVtxtX2jxqKQuVJfckxwhM7l4TiXDT+URjqihEEO7UFB6jO20OPIXMwN9fiHXB25jQ5/aY8j0wmJ6TvRDGJyNnnrC3PXnFqixitzcuvB72uvgsAXVgCh1c9TI8hc897BGAxNlB5agsjbv0MhVJNQMwoGkrTxHqZTE544jRkMpmLsxuCgM1iwFMXii4sEV1YImH9pojyBveZgLGhDJWhvtU1DIgeQczIW0+3v1bs6x81FF1YP7J/+cBtv8hBswjuM+H0OugpOrqOvhMeP+9ZucrLl/DEaZQd/46g3mPwjRhw5lHsVnL3fMqQuW+L342mKsfOO2bELRQeWkWvq+4gKO4acc7i1K/OrKOhDrXGOfGOOzy8A7DZLNgshi4/JjibLlXiFouly1ONBvYPJGF+Aj5RjrcewS5Qe7KWzNWZ1Oc4/4joYnQkLEggID4AAGOtkexvsyncWtgYZMBvAAAgAElEQVSpuePnxRM6NBQPHw9kShnXvHaNWHfgrwdoruh4TPilS5dSXFxMr169GDRoEJmZmbz33ns8/fTThIeHk56ezpIlS0Sz6qhRo5g/fz59+/ZFJpNhNBrZuXMnH330UZsmTT8/P55//nnKysp48803nX78J0+ezIIFCwgNDQUQ5biQ+/0TJkzg9ttvJzg4GLvdzvbt20lPd3hunr0TDw0N5fbbb2fUqFF4enpit9s5efIkK1euFNu3yDhv3jy8vBz/AZ577jnx+7V582b+/e9/i20VCgVz585l2rRpBAU5/oNVVVXx7bffsm7duk6bqf38/Lj77rsZO3YsHh4e6PV6vvvuO7744gun77pcLmfu3LlMnz7daf7169ezbt06p52uSqXijjvuYMqUKXh6eqLX68VnOddioVarWbRokdjWarWyc+dO3n//fZfP/7HHHiMyMpI33niDxx9/nMTEROx2O6mpqbz11luUl5eLbceMGYPRaGTLli0dWo+NGzdy/fXXM3LkSH766acO9b1Q/KNHEJFUzJG1v0cb1Iew/lNEk6++KhtDXRHHvn1GbC/YrZiaKronOYZMjqcuHGN9MfrqXDy8/TE2lGLWV6P2CRWb+YYnisrs7BSnNouR0rQN1Jccw24zYzE24BvW/0y/s5SSl18UyJWYGstbTbQCYKgrRq0NRqE847zoE9IX3Ox+XZ9HRv9pS8ne8R75+78gpO9EIs5JUWqzmp3GPhfdWVnG2pvOFcA3YqD4d21IvCNGO64pT91ht1mQnyOTsbESm1nPyc1/E8tMTZX4hjvW11Bb6LSOPiF9nF4YlB5aLO3IVW81NyOXK1AoL65vRZcqcavV2qZZriOovFUM/f1Qak7UcOS9I1j0Fnx6+BA7I5YRT45g6x+2YjU4FJMuWseoZ0dRcaSCX57/BZvZRsykGBIXJWK32in+xdUjtS3K95dTn1NP+KhwggYGcXLNSbGus7v84OBghg0bxrJly1izZg0vv/wyr7zyCq+99hoqlYrnn3+eQYMGcfjwYTQaDY8++ijbt2/nP//5Dw0NDcTFxbFw4UJefPFFnnrqqVaVU1hYGMuXL6euro4PPvjASYHPmTOHRYsW8emnn7J9+3b8/Py4//77eeWVV3jooYecfuzby/jx43nyySdZu3YtW7duxcPDg0WLFrFgwQKXtrfeeitBQUG8/fbbFBUVERAQwKxZs1i2bBmPPvooBQWO6y0nT55kzZo1JCQkkJyczKZNm8SbD/n5+U5jDhkyhBkzZrBu3TpOnDiBTCZj1KhRLFq0CJlMxurVqzv8TBqNhr/+9a/odDo++ugj8vLyGDRoEAsXLiQ2NpYXX3xRXP8hQ4Zw0003sW7dOjIyHB66o0aN4rbbbkOhUPDf//5XHPeJJ55g+PDhvPfee5w4cYKYmBgefPBBBEGgrOzM9Se5XM5LL71EREQEK1as4OTJk8THx/PII4+4/fz9/f0JDQ3l5ZdfZu/evfzrX//Cz8+P2267DT8/P6fPNTExkbS0tA7Hc8jOzqaxsZH+/ftfciUOEDFwJuFJN1Cds4u8vf+koSydXlffjVypJqDnVeJOr4WLuQNqC21IH+pL0pEr1fj1GEp17l5kcoWTApMpXH+CBbuNY988RWDvMcSNfwSlhxclaRsxNpzxlm8xY5/VC5n8/D/ncoUKq8nZ6mKzGFF6tu/miE9IXwb/7k301bkUH1nLoVUPMXTeB+LuX65Qnfc6mFzRtn9Cmwh2ZLLTx2oypZMDnt3uuoGUK1QuTnpKDy/kSjX9rn/OqVx2Wj6ZQoXNcmaDdm4KW5+wfpza9nfHC8J5nqm+JBWf0IT2vW1cAF2qxAVB6FLHHLW/GoVaQeXRSvRlji9fdUY1NSdrCOofJCpwgH7/14/GgkYnb/ET/z3hUPo3xHZKidfl1EEO+ET5EJQURNl+9/dLO8qhQ4dED2Sr1crRo0fZv3+/eA0rIMBhSdDr9dxzzz1O5s7MzEyKiop45ZVXiI+PFxXG2URHR7N8+XKKiopYunSpk1nX39+f2267jS+++IJ169YBjh3jsmXL+OSTT5gxYwYff/xxh55HLpdz5513sn37dj755BOxfPny5Xz+uetZ3TvvvIPFYnF64Tty5Ajvv/8+06dP5/333wcgLy+PvLw8wOFFvXnz5lYT6hw4cIB7773Xaa3S0tJQKBTMmjWLtWvXdvi7uXDhQsLCwnjyySfFdU5PT6e+vp6HHnqIa6+9ls2bNwNw8OBBl88qPT0dhULBTTfdxOrVq7HZbMTGxjJ+/Hhee+01tm3bBjhS9spkMp599lknRXvttdcyYMAAHn30UbKzswHYvXs3CoWCZ599lgEDBnD06FEnmYOCgvjkk09Ys2aNWHbs2DEX03dAQADHjx+nowiCQGVlJYGBgR3ue6HUFR3Byy8StTaYoLhrUHr6kLfnU8Cxoz255U16jb5LzDVtsxiRybvGj8JuM2NprnOb5rQ1fIL7UJz6FaEJk/GPGkrGpj+jCezVZj9jQwmmpiqnRCf1JalOFoW6wkNEJM0AHGe0CpUnnr7hLmOdjSYwBqupiabKLLTBcQh2K43lGe2yVNitZqrzdhMcNx5NYC/6TnqSvf9a6LT7V6o1GOqKnfrIlR5tjt0WdUVHCImfBEBN/j504UkAeOpC0dfknWlXeMilr8LDG4uh1kkelZcfKk8djRUn8Y8aCji+Ky1WBN/wJCqzduAT6sgdX19yzGlMtTYYn5B4yjM2EX6ONUJEECg+soawLjqeOR8XNZ/4haIv1VOfW0/8vHgUngqKdxZjbjAj2AQqj1WK7dR+avz7+JP+WbqLt3hVehXxN8ej1qkxNZjOnaJbOPuH2mQykZubK/4dHEqxBXfnlampqej1evr27euixBMSEli6dClpaWm8+uqrLscbV199NQqFgpSUFKfyuro6srKy6N+/Px0lJiaG4OBgtm/f7lTe1NTEgQMHGDPG2bvT3VmxyWTi4MGD9O3b16Wu5e5yW+f27tZq9+7dzJw5k5CQEKddbnsYP3486enpLmv8ww8/cOuttzJ+/HhRibc2/65du5g1axYhISGUlpYycuRI7HY7v/zyi4uc567LuHHjyM3NFRV4Cy3XCPv37++ixC0WCxs2bHAqc3d2LZPJOv3C3d4z+a6mubaAk5tfJ7DXaBQeGqqyd4iKTheeREDMCA6vfoSg2HGYGiswN9eQNP1lkMkoPPQ/TI3lmJoqKEn9huqcnUQOnuPkkXw+Cg58Scmxb7j6rjXtNgVrTjurJSQ/jZdfD+xWwznn4e7x1IWhVGvJ2vY2Xn5R1BUfxmZ2/u6b9TWkb/wjap9gqnN3E3fNI23KJZOriB33AOnfvUhgrzEY6grx1IUjCI6Xaaupibw9n6CvKcCsryZr29/pMWQunrpwTE3l5O/9J1XZO/D2j6GpKgtv/xi8/c88j3/UUPL2/otTW/9Oc20BIX2ubV3JnUXRkTUYTgeXydr2Nn6Rg8SrW0q1loqTm2koS8ekr8bYUEbvMfcDEBqfzOE1j5Cz8wPsVjPGhjLxWVoIiB5Jzs6PqCs8TF1JKonTluKpC6PPhMfJTFlBYK+rUXh4U5O3l4Tkp/EOiCF6+P9xdN3jmPRVKJRqbOZmF8tHrzH3kvbt0+jCE92+mBUc/BJkcvEs/2JyWStxwS6w//X99J3dl7hZcfSd05eqY1UUbiuk4nCF2M4r0AtkkLgokcRFiW7HUmlVl40SP5fzHUH4+flx0003MXz4cMLDw1Gr1TQ2NqJWq10CqPTu3Zs///nPeHp6UlRU5NY/ITQ0FJlM5naHDFBSUtJh+Vt2ZVVVVS517nbOcrmciRMncu2119K7d298fHwwGo0IguDWca9FibflKNanTx9mzJhBUlIS/v7+wBnFr9PpOqTEPT09CQgIYPfu3S51NpuN4uJievRwvnsaFxfHjBkzGDBggDh/i3Ofr68vpaWlBAQEUFtb6/KZ22w2KisrncpCQ0Pp2bMn3333nVsZfXxcvWNramra5dBXW1srWnw6SmBgoGghuZREDLiRoN5jaKw4ic3cTOK0l5zu//Yecx/6mnz0Vdn4RgzANzxRNGX69xiC1awn6Kx0mR6nr0cBePvHEDfuwdbnTpqBX+RAJ0XZVJlFrbvdn8qTiAE3ovGPYsCNr4oyJlz/gtOcoQnXub2PLZOrGDzn79QW7MdmMRI37iHsNouTggpLnI4msCfN1blEDpwpzlF5aivGxgqXMbXBsfhHDSOk70R8QvrQWH6SyIE3oVB5YTU1nZZbTVDsOIJiz/RTnTa1e/lFMXT+hzSWncDQUIZvxAD8Igc6mYq9/KIYMvcd9FU5RAy8ySnXetz4R/H2j3K7tr4RA9EGxYoKr8UyIJPJGThzBd7+UY4rgFYzATGjxN29hyaAIXP+Tl3REcfuOCyBxvKTTmP3GPw7dGEJWE16okf8Hyovv9NzDmDovPeoL0nHbrcQMeBGPLwDxHGHzv+Amvx9KD00+PUY4giIIwji82oCYugz4THSN75A77EPiNfQ7DYzubtWUl+axoAZyzt09t9ZLmslDmBttnL8i+OcWneKsJFhxCTHMPT3Q6k4XMHhdw4j2AVkCsfCHv/8uMME7obOOKF1NzExMbzyyitUVFTw7bffiopZp9OxePFilErnjy8sLIz33nuPfv36MWfOHHJyckSTbQsKhQKz2czixYvdBuXpTMz7lp2Zt7e3S12LZ3mLw5ZcLuePf/wjCQkJfPPNN6xevRq9Xo+npyczZsygX79+LmO0WCbOd8VsypQpPPzww2zdupWPPvqI6upqFAoFvXv35qGHHur09bTWXrDsdrvT+k2ePJlHHnmE7du3i/PL5XJiY2Od5rdara2GMG1ZqxbkcjlHjx5l5cqVbtvX1bl+19u7S87MzGTkyJHI5fIO7cjDw8MJDAzstoAvHppAl/vLZ6MJiHFSHi1oQ1wtPGejVGvO28ZDE4CHpoMvPTI5fj0Gn5EhqLdTdcu9b3coVJ5OLxzu8Anpe9oxrWN4+UU5nOFO46EJPC2uCr8eQ1rtJ1d44Bs5EN/Iga228fKNcGvd0IW5/r9uodVnkMnQBDrWLCBmlNsmHppA0dQO4BuR5DJGi/n9XJRqHwJ7uY+QqFB5OWIQnMbdugTEjCLphjBqCg6IZYLdhsLDi0Gz/nrJggVd9kpcJpch2AUszRYKtxZSuK2Q3tN603dOXwKTAqlKrcJQ5dhtyRQyGvK6JwjFxeDuu+9Gr9ezePFip121p6en213Y3r17+e6770hJSSE6OprHHnuMkpISJ4/z8vJyPDw8aGpqavV8uaO0HA/06tXLxbwbHu78QzVhwgRGjBjBk08+6XImO2vWLNzR8mLR4h1+LhqNhvvuu4/169fz4YcfOtWFhLT/DPNsjEYjVVVVxMbGutQplUp69epFWprjeo63tzcPPPAA33//Pe+9955T2+Bg5/PGiooKPD09CQ8Pd1p/pVJJcHCw0zW68vJywsLCLkougr1795KcnMywYcM6FEBn4sSJ2Gw29u3b1+UyXWlog+PEQCOXEk1gb9Ra9z4J7iKISVw8vANinDzZFSovMerfpeLi7/UvAG2klvh58c6FguOcG0CpdryDGGuMNOQ1EHVtFAq1845LrpQj97i0jzlo0CDxmtGFEBERQV5enotZ/He/+53TuXkLLbtGs9nMyy+/jMFg4IUXXnAym+7duxe73e5WYXY2yUVpaSm5ublMmTLFacfr5+fHwIGOt/aWnXhkZCSCILgopujoaEaMcB/ZqMXMfLZCVKvV4s41KCgItVrtMqZCoWD27NmdeiaALVu20L9/f/EZWpgxYwbe3t7ieXjL/Odez5PL5S7z79mzB7vdzrRpzg4vY8eORaFQOF0x27NnD1FRUW7X5UITkuzdu5fCwkIWLlzYbiuFr68vN910E5s3b6a6uvqC5pfoPD1HLcI/6vLLkyDRPVzWO/Ee43rQc3JPvIO9KdpehKnBhHewN71v6I2xxkhl6pkzxPTP0xm1ZBSjXxpN9vps9KV6vEO8iUmOobm8mdSPUp3GlSlkosL37e1L1LUO81L18eoLMr2PHz+eJUuWUFpayl133dXpccDhVTx27FhGjx5NZmYmAQEBTJgwgZEjR7bZt6qqij//+c+88sorPP/88yxZsgSLxUJFRQVffPEFt912G4GBgfzwww80NTURHx/PzTffzPvvv8+OHTs6LOs//vEPli1bxssvv8yGDRtQq9XMnTuXiooKoqOjReWUmprKvHnzuP3221m/fj3giK++cOHCVoOHpKamYrFYuOeee1i9ejXBwcHMnj2bzz//nK1bt1JcXEx1dTU33HADRUVFVFdXExkZyfz5891aLCZOnIharRaVV+/evZk6daq45i2hg7/88ktGjRrFCy+8wH//+18KCgpISkpi9uzZ7Nq1S1yn4uJiqqqquOGGGygoKHCa39fX12nu0tJSvv76a2bOnIlcLufIkSNERUUxZ84cSktLnZT4Tz/9xMSJE3n++edZt24dBw4cwMPDg+HDhzN58mTuuecetyb19mCz2fjoo49YunQpCxcubNVHogW5XM4f/vAHAL744otOzSkhIdH1XNZKPHNVJvoyPVHjoxjyyBBkchk2i42qY1Vk/i8Tm+nMeWV9Tj27l+2m7+/6knh7IgqVApvJRvnBcke887PoeX1P5CrHTra5ohltpBZtpCPUorHW6KLErc1W0WTfFlVVVaKydEdlZaWTsiovL6epyeFYIggCpaWlojPUP/7xD+RyOYsXL0atVmMymdi5cyevvfYaTz/9tJMTWEVFhcsPelpaGh988AGLFi1i/vz54g/1f//7X0pLS5k7dy4vvfQSMplMDIyya9eudj3nuRw9epSlS5dy11138fzzz2MwGFizZg1ZWVncf//9+Pj4UFtby5EjR3jnnXdYsGABM2fOBODEiRO89dZbTJ8+XXQIO5u6ujr+9re/sWjRIl544QXKysrYtm2baAa2Wq289NJLPPjgg7z++usoFAqqq6vZuHEjNTU1zJs3z8maceONN4rKvbS0lMjISObMmQM4POpblLjRaGTx4sXcdtttzJkzB51OR0VFBf/5z3/43//+J56J22w2XnrpJR544AFx/pqaGjZu3EhVVRXz5893mv+TTz5Br9dz0003MWvWLEpLS1mxYgXDhg1z8gmwWq08//zzzJ8/n+TkZObNc3hiZ2VlsWLFCpfPu7a2Fg+P9l/pOXDgAKtWrWL+/Pni5+UOpVLJH/7wB4YOHcqf/vQnFwc8CQmJ7kMmdGHKsSeeeIJNmzZdlCQOMrlj53z23fDztvVUYG3unqswarUai8XSpeug1Wppbm7u8rVVqVQoFAoXj+YePXrw4IOte+uC46Xjueeecyn39PTEZDK1mc2uJVpbZ5zpWkOhUKBWq9sM+doZZDJZm8/U0fk9PT3bHR7W09MTm83WpVERZTIZt9xyCwsWLGD37t288cYb4kslQFRUFEuWLCEyMpK//OUvnX7JO5s+/QYRP3UZMs9uiKQmIXG5YWlguP8xbp7VuTvll/VO/GwEu9AuBS627SYFDrikc+wKzv5h7UpaC5VrsVjadHxrTaG1Vyl1ZWzzFmw220VR4ND6817I/B1Zg4uxXoIg8Pnnn3PixAnuuusul2cMCAigubmZxx9/vEuulXl6euLrq8NfI0PtbcVDKeChEPBQ2MW/e6oEvFR2fD1teKoEqvRK9uR6UWu4Yn6uJCQuGVfMTlxCQuLi4u66WcsZfWd/JqKjo1m0aBEDBgzA29tbDDIjIMcunB4bkAFyGchlglOUyppmBd8e02G0XtY+uBISnecCd+LS/wwJCQnAfS5zQRA6rcABCgoKWLZsGX/7298oKCgQrVQKOagUAiqFY/etUggo5A4FLgA2O2zL0rLqsJ+kwCUuHoKd8swUpwQntQX7xXSxVwLS/w4JCYmLiiAI7NmzhwcffJBly5ZRXFyCrRVjndUuo7ZZwf8O+5FR3npGrI5QnrGJrG1/J2vb39FX57htY9ZXk7t7JSc3/5XyjE1uEoy0Tk3+Xsz6jl+5s1lNVJ7act6UmpcTdquZylNbOrQ2LRjqiqkvvvi5tTuCzWIgbcPzVJ7aht125kjRrK/h8JpHqC9JO0/vywdJiUtISFwSfHx8CAkJwWqzIQiuustmh2Mlnqw+7EuDsWsSlwD4hCYQFDuOhrIMjA2uoXcFwc6x9c+i8tQR1n8aVbm7MNS2P31x3u5PqCk42GG5DHXFZG1/B7PB9ZqgxVBHde6eDo95oVTn7sbiRh4AQ0MpWdvfwdLc8WuNVTm/UHDwPxcqXtchCGSmvIaHJojE6UudErWE9rue+EmLyfjhZQx1Rd0oZPuQPEUkJCQuGhqNhlGjRjFu3DiSkpI4ePAgW7btYN78GDSnN9o2uwyzDX7K1FFS3/U/SS1RtZRqrdt6c1MVNnMzPYbMBaD/lBfaTOt5NkPnfdCpdJPaoN5cfecat33rio5QnpnSaljQi4Eg2Dn+/VJG373Wbb0mIKZVedsiaug8oobcfKEidhnVebsx1JUwdN5zbuOb+/UYTOTg2eTs/AeJ0//UDRK2H0mJS0hIdCkqlYqhQ4cyduxYRowYQUZGBjt27OC1117DaDRy8/xb8FA63NlsdhmFtUq2ZPlgsp5fOVTl7KQmdzd2qwltaDyRA29CJndE7bNZjJSkfoW+KgdP3wh6DJnbqtJ2GjNrOzUFB7DbzOTtcaTRjRl5W7ueU1+dR+UpR9S+4D7XinG+waGEhdP5rcszN+PlF0n0sHmivJWntoqm/ZiRi5xSphYdWUt98VFMTZUOmWTydofytJoaKU79BkNdEZrAXkQMuEmM4Z2351NCE5IpOfYtNnMzIfGTxJjgtYWHqCs8KGZ8AwjpO0kMKVqZtQ19lSObXvSIW8U82s21BTSWZzoyiNkthMRfR9GRtfhGDCA0/jrHvHv/BYINT98ehPWbLMpqMdRTfNT1hSF6+C3izrg6dw9V2duRyRWE9Z+KLuxMlsXStA34hCZQW3gQfXUuuvBEwhOnI5PJMTaUUnLsG8xN1ai8/IgYeKNTvPjStI30GDJX/DzcETFwJkWHV2FqqmxXutbuQjKnS0hIXDByuZzExETuu+8+/vnPfzJnzhyysrK49957Wbp0KZs3b8ZoNCKTyZg2dTIqhQyLDTaf0vDDCV2bCryh7Dh5uz8mPGk60SNvxayvpuDAl4Aj6UTa+mdAJidm1G3I5ArSNjzfrrNmr4AYfEITkCvV+PUY4lBq7cw8pVRr0Qb3obbwMPrqvHPkTSdn18dUntpGSN8J1BYcpOjIOrFe7ROKJrAXhYdWAc5nzJqAGDHntTa4D9qg9sVnt1mMHFn7BFZTE+H9p2GoL+HYt08j2B3XbQsPryZr+zv4RgzAP3o4mSkrqMnbCzgSiXj7RyOTKRxzBvdB4XEmtK+nLgxNUCyFh1Y5ZV4z1peSs/NDAmJGUJO/j+wd7xOeOI3s7e+I668NjsNmNVOV7RwJUiZXovYJE/80VpykvuSY+EJTcuxbio+upceQOQT3uZaMTcudfBoqTm7h5M9/AWSE9Z9KY/kJ7FZHzIn0jX/E0zeSmKvuQBeRRGbK607n3g1l6WIu8dZQKNXowpNoKE1v1/p3F9JOXEJCotNER0czadIkJk6ciF6vJyUlhd///vetxlYfOXIkwUGBVDTAjyf9aTK1T2HK5UpsVjMymQJv/2h6j74X++mdbm3BAeRKTzG3eMyIWyk/8RP6mjy3uZ7PRhMQA4IdpcrrvBm83KHWBqHWjnN4N7vBwzuAvpOeBBxOYeWZP4l1urB+CCFx8NNrLv38o4ejr8lDrlC1mcnsbCoyU1B5+RI71pFv2zdiAAf/dz/VObsIirsGcKQwbRnTZmmm6OhaAnqOQhMQg9LDG7lS7XZOn5B4fIL7cIJXXOp8I5LwCU3AO6AnfpED8Qlx5Luw262OZ+g9BsFuxVBX7NRPqdYQnui4VlVfkoahvoTBs990KHHBTsGB/zBk7tuotcFoAiGs3xTKT/xE7zH3iWME9LqaqKEOM71f5CCxXK5QYjPpUWuD8fKNILDn1aL1wG4zY7MY8fB2jQ55LipPXyzGyzuplqTEJSQkOkR0dDTjxo1j/PjxKJVKduzYwdNPP01xcXGbfWfPns2G7zZR5JkM6vYbArUhfYm75iEyN/8VmUxBWP/rCevvUAD6mjyaKk9y4N93iO3NzXWY9dVtKvGLifasxNwqTx1WY+N5Wjtjt5qQKzvmna+vznHkUG9BJsM3PJGm6hxRietCE8RqTVAshv3/PmtOMwpVx28EtJikZXIFCpVXS2G7+5saK8hMeZV+1z8npns1N9diNTZw7JunxHZWczN+kYOd+raWxrT/1JfI2f0x+z9fRFDvMUQOni2mfpUrPFCoPDEb6sQc4q1hMdSj8vI9b5vuRlLiEhISbRIUFMSYMWMYO3YsoaGh7Ny5k7feeov09PabGoODgx35ADy0xE9NpqPuUYG9RhPYazSN5Rnk7f0XDWUZJCQ/jdJDQ3DceOLG/76DI15cZG4yDbYXuUKNYO/YDlCuVGNurnUqMzfXoNaeScfbYm52/N1yRume7i/YbFxKbBYj6d+9RPTw/8Mn9EzeAIVai1zpwfCFK8/7QiCTub/FoPYJod/kZ7EY6ig5tp7Dq3/PsAUfikpbFz6AuqLDhPSd5LY/OHbsDWXp9Ln20U4+3aVBOhOXkLgMCAoKcpudrmfPniQmJrrpcfHRarVMnDiR5cuX88477xAXF8eaNWu44447+PDDDzukwMGR/Cc7O7tTstQWHqSx4iQAPqH9iB5xK83VuQDoIgZQnbfH6WpUZ+5tt4bN3ExjRWbbDbsQpVqLudkRcEQQ7O0y6Qb2vJqa/H3is42PhA0AACAASURBVJv11dQVH8M/5sz3qjr3TOz7qpxf8D/rCEGp1mI1N2Mz6wGwGN1nFewyBIFTm/+KLqwfYf2nOlUplGq8A3pRfnKzWGYx1Ivn++fDbjNTdNiRzEfl5UfMiFtQefliajyTlCo8cRpFh9ecd7zStA34RgwQrQOXK7+5nbhMLsMryMupzFRrwmZp+w1U6aVEG6FFsAs05Dcg2K+MIA3nQ6FQ0KNHD3x8fMjPz3fKjCZxaZg8eTIPPPAA6enp7Nu3z6lu2LBh3HnnnXz//fe8//77Ys74i4VarWbw4MFMmDCBYcOGkZaWxg8//MDu3buxWrsvH4HN3MzJzX9FF9YfhYeG+qIj9LzKYT7XBMQQNeRmDq/5PQHRwzE312AxNjFo5gqQySk48G/01Xk01xRQdHgNFSe3ED18gZM3+fkoSVtPwf4vGH3POidv5trCg5Qd/4GmylNYDPXUFR2h78Qn2jVmwf4v0NfkAXDip9fw6zFUPB8Gx7l43t5POf79SxgbyglPmuFU7w7fyIGE9Z/K0a/+gG94EnXFR4kZcQvaoDPPWV9yjIay49jMzZibaxk4c4VYp1B54h81lNRvlqDydJiQk2Ysd8h74D/iS1Nmygr8o4cRnji9zee0W81k/vwXzPoqjA3lZGxaLq59+cmfqczegS48iWPfPiP2Ces/heC48fSd8BjHf3iZ6pxdqLx8qS8+SnzyEvHMvTWsxkaqc3dRlb0DTVAvDHXFePlFoj3L9B7Y8yrKT/xI9i8fEDfuIZdrcw1lxyk6spZBs15v8xm7m99c7HQPHw8m/n2iU9neV/ZSe7K2lR6ADPrM6kOvqb2QK+QgA1OdifR/pVNxxH3K0SuBQYMG8dhjjxEaGoogCNjtdtavX8/KlStdlMUzzzxDZGQkTz311EVLMPJbZM6cOdx5551s3ryZd999F4PBOeWtTCbjxhtv5O677+bw4cMsXbq0yxW5QqFgwIABTJo0iVGjRnHq1Cl+/vlndu/e7SJPV9DZLGY2qwlDTT4WYwOaoFgXxySLsR59dR4qLz+8/aPE+7+GumJsFufvrKcuAqXa4X1tt5oxNpSK16lc5rUYMNQVow129hK3GOuddnfIFKLCbNkNe2gCxTHM+mq8/HqclqkIm+XM2qr+n73zDm+q3B/4J0mb0XSkK90tdNLBLEMKFISyxetVFEHkIgrqDwWZAiICIsiVqygooHJFFPW6B3vI3rJXBxTa0ha690qT/P6ITQlJJ6Otns/z8GhP3nPe9z0nOd/3/U6FyiyMSVOaT0nedRT27kiVzuh1WpKOfW5xjG5t+htDqMqLsijLT0Oh8jL2D7B/1RC6jfnyz0VOPg6e7YzOXlXodVqKMuMRW9ugdPQ1Crfb76G13AGZnZrK8mI0ZQUoHDwoK7yJldQGK5kdxdmJKJ1aA3qKsky1L1X3vqIkl4riLLO5yOzUxkWEXqehKOsqusoKbF0DTNT/pXnXsVY4Gp+j6UT0lBakU1aQjtTG0TCW2wS1tqKEi1vfxEpqQ3DfaUikNgDcuLSd5OPrCYmZhYNnhMX7fVf5u1Qxu1tUFFWwc6LBm1TdQU278e3qPMezuycBwwJI+DGBa1uvIZaKCR8TjnOYc4sV4ra2tsybN48rV64wY8YMcnNz6d27N6NGjWL9+vVmgiIgIAC1Wm0siVoTQUFBeHp6snfv3ns9BYsEBgbi5eXFvn377ijn95307+3tzb59++pczD7wwAM888wzbNq0iY8++sjiePV6Pb/88gt5eXnMnDmT8ePHs3r16jsep1gsJjQ0lJ49exIdHU1WVha///47a9euNatT3lyQWMlMdlO3Yy13MPFQrkKh8qr1umIraY0CHEBirTAT4FX9VQmb27lVeFZdo0qAG8bkffsp5tdXOOBwq1OVSISta1ANY7Qx/r/Ba96lhqvqLc7F2IVYYmKbrh6v5XtoJVMahajczs14vFrLUfOYpTaOdXqIi8TWNTqv1XoPRSIUDp4oHDxrbCKR2hDx0CLSL2wysbuLxGLaP/pes44Nv5W/nRBHj7FMqbaifjsapzZOlNws4cpvhhWlVqPlwhcX0JbdXyeQu0lgYCAKhYK1a9eSlWVYDf/++++cOXPGYsnLqVOnYmNjQ0ZG7YuWadOmkZub22RCfOrUqRQVFTVZ/6+88gplZWXs2bOn1nZyuZxJkyYRHx/P6tWr61xw7N27l8DAQB577DH27NlDbGxso8ZX5Vnet29fNBoN+/fvZ9q0ady4YZ6OVKB5IRKJGxRydjseYYMRW8nv4ohaPiKxBM+2D5scq0pS01L4+wnxRqBwUVCSabr71BSZ1+BuSbi5GVbNt9cMrym+t6CggIKCup1rpFIpOTlNVwFIJpORlJTUZP1LpdJ6hVoNGTIElUrFm2++WW/1+IYNG+jXrx+jR49m7ty59R6TWq0mOjqamJgY5HI5hw8f5q233iIx0XIxkHtOCyn48VcjsPfLTT0EgRpoeCLbapq9EHcOc6bVoFbYetoisZZQml3KjeM3SN6VbLaTtvezx3+oP6oAFQA5sTnE/xBPWY75zrI+tJvQDpmDDIdWDugqdXSZ0QUATbGG0x+dbvScxGIxDz/8MH369MHJyYmMjAy2bdvGzp07TXZko0aNwtnZmTVr1jBixAh69+6NSCRi586dfPvtt42yjQ4YMIA+ffrg7GxQ982ZM8eo9l27dq2Z9/Drr7+OQmGwQ2k0Gt544w2za1pZWbFwoSG/sLOzM23btmXx4sWAobxlQwTO7YSFhTFixAh8fHyQy+VkZWVx+PBhfvrpJ6PGQCwWs2jRImP/4eHhxv6r5ngrLi4uPProo3Tq1AlbW1tycnI4duwY3333nbFUZhXDhw83zufpp58mKioKnU7HqVOnWLduHcXFxYhEIt56y+AA5OrqikKhMOl/7ty5Zqr16Oho4uLiuHTpUr3vRVlZGVu2bOGJJ57A3t6+1kWVnZ0dPXr0oF+/fnh7e3Po0CFWrFjBxYsXm8TMcCtlpcXoK0vu6MUlIPBXQacpwcG+7hTBNdGshbi9rz2dp3Ym41QGF9dfRK/T49TGiYCHA3AKdeLEu9WVg1wiXOg4qSOZpzO5+OVFrBRWBD4cSPd53Tk47yAVBRW19GSZwuuFlOeWo3RXoqvUUXDN8NKsLG+8l65EImHevHlERkayfft2du7cSVhYGFOmTCEiIoL33nvP2LZVq1b4+vqyYMECxGIxmzZtIiIigqeffhqNRsP333/f4P6zs7NJSEhAp9Ph4+NDYmIiGo1Bq2DJ1p2YmIhUKqVDhw60atXK4jV1Oh0JCQkAREREkJ+fb/z7TgSGj48Pixcv5syZM6xatQqNRkPbtm2NAnjGjBmAwfmrqr/w8HAKCgqMf9+OWCxm6dKlFBcXs3fvXrKysvDz8+ORRx6hffv2zJo1y2Rx5OfnR1hYGHPmzMHOzo5ff/0VhULBsGHDiIuLY+dOg39FVX+hoaG19g9gY2NDUFAQX33V8KpOJ06cYOTIkbRr144DBw6YfHZrsZHQ0FCOHz/O999/z/Hjx5uVs2lRYS6VRWlIbWu2QwsI/F2QaHLxcHOuu2ENNGsh7hjsiEgiIu67OEoyDAIm63wWmWczEUmq1/FiKzFtn21LxskMzqw+YzyeE5tD9JJoWg9uTdz/Gh7neXWzIaRCFaiisqySuO/uPFZ00KBBdOnShZUrV7J582YANm7cyPXr1xk9ejTHjh3j4MGDxva+vr6cPHmSTz75BL1ez88//8yKFSuIjo5ulBA/ceIEJ06cYOjQoXTq1IkNGzZQXFxcY/sNGwwZnSZMmFCrEP/ss88AQ0aus2fPGv++E9q0aYNUKmXdunVcvWp4FqdPn+bw4cM4OVXHbmq1WmN///jHP2rtX6fTsWDBAq5fv24i2I4cOcLSpUvp1KkTx48fNznHxsaG0tJSFixYYDzn4MGDXL9uKFOo1+uN/Q0bNozz58/XOn9nZ2dEIpGZKaM+pKWlAQZtwq24uLjw0Ucfcfr0aXbs2MGSJUuoqGj4wvV+kJ+XR2n6SaTu3Zt6KAICTY6tJpGQ4OGNPr9ZJ3vJjc9FV6kj4pkIbL1sTY7nXKq2uzqHOyNTybi27ZrJ+WU5ZeTE5+Ac1vhVzt1m0KBBpKWlsXXrVpPjP/zwAwUFBQwYMMDkeFlZGZ9//rlxR6vX67lw4QKurs3Pc1IikSCRSCw6xjWGixcvUl5ezssvv4y/f3Ws6+XLl83iqav6l0qlZirx20lOTjbbmZ4/f54bN24QEWEeUiISifj8889NzrF0DZFIhFQqrXP+MpkhtWVj7lNVyJe1tWloUFZWFmPGjGHx4sUcOnSo2QpwMCykctLi0ZXcbOqhCAg0KfqKPHzUcqRSad2Na6BZ78QLkgs49cEpQkeH0nNRTwqSC0g7mEba4TQqCqtfUrYeBgEfPjb89oJAKFwV6LXNw5FGJBLh5+fHoUOHzARAeXk5SUlJtG5tmus5IyPDTCgVFhZia9t4G8q9Qi43eL7erdji1NRU5s+fz8SJE1m5ciXXrl1j165d7Nq1y2IYVJVgq6t/kUhE586diYqKwtPTE4VCQWlpKQqFApVKZda+vLy8XrtmqVSKWCyuUzhXjd3BoeE5mR0dHU2ucSt3a/F0P4i9cAJHty9RRU4GUbN+DQkI3Bv0WuzzdvHYhMfv6DLN/teTeS6TrNlZuLR1wae3DyEjQgh8JJBza89x84RhJS+WikEP1/det5hFrb6hZPcakUiEWCyuMc66tLTUbIdlyXmtqR2TakL8Z65okejuuSydOXOGF198kQ4dOtC3b19Gjx7NqFGjWLlyJbt37zZpK5FI6uxfLpczb948goKC2Lt3L/v376e8vBylUomLi4vFFXFdO/sq6jvvnJwcCgoKCAkJYcuWLfU6p4qQEEO2qibzLL9LVFZWcuboTjpaybFvNx7Ejd+JCAi0NPQ6DbZZmxn1z77Y2dnd0bWavRAH0Ov0ZJ7JJPNMJjZqG9q/0J72E9qz66VdaDVayvPLQWSwgRelFTX1cGtEp9ORk5ODj4+Pxc99fHxadLxu1YLjTlRDNV23ypavUqmYM2cOU6ZM4fjx4xQVVT/vqrSgtfU/cuRIQkNDmThxotG+XMU///nPOx4nVKvLa0Kn0/HHH3/QtWtXZDJZvRcJAD169CArK6vFC3GAgvxcju/9kfYludgH9EeqjhR25QJ/bfRaJIVxOFZeYvSoIXh6uN/xJZv1L8a1vSsVhRXkJ1Yn4i/JKCH1YCpho8OwtrVGm6sl+2I2ep0ej+4eJPxwm1ewCLjPG1dnZ+ca460PHjzIQw89hL+/v8mLODIyEg8PD7Zt23a/hnnXKSkpoayszEwlLRKJGqU9aNu2LWKxmDNnqp0V8/Ly2LFjBxERETg6OpoI8fLycoqLi83U1Lf2HxISQlJSkpkA79ix4x37GWg0GgoLC2vtv4pff/2VBx98kMGDB/Pzzz/X6/qtWrUiKiqKdevWNVttTEMpKS7m8J6NeCScwbtVG2Q2joit5dxZ5KzAXxEbGwUODiqsrJq12LKMXo+VBOwUYro+EErnTs/cNY1ls74bwY8FI3eUE/tNLJlnM9FWaHFo5YBfjB/5V/MpyzXYAEszS0nelYz/EH/EEjHJu5KpKKzAvpU9wY8Fk7wrmfRjBpumRCZB6WFIE2jjakhVaONmY1S5F6YU3pENfdSoUYwePZpvv/2WdevWmX3+9ddf06tXL9544w1WrFjB1atXCQ0N5aWXXiIlJYVffvml0X3fbSQSidFG7+joiEgkIjDQkLIxNTXVou35/Pnz9OzZk127dpGfn090dDStWrVi6dKlDe5/zJgxtGrVirVr13LkyBHKysrw9/fnn//8J8nJyRaTqly8eNHYf25uLr169SIwMJAlS5YAkJSURP/+/QkODiY+Ph6JRELXrl3517/+1eDxWeLChQtERUWxY8cOsrOziY6OJjg42BhHXkV8fDz79+9nzJgxnDx5kuTk5FqvK5VKmT59OpmZmfz22293ZazNifTUFNJTU5p6GALNmJiYGMY8M8XEyVWgmQvxM6vPEPJECBHjIhCJq1ctufG5nP34rEnb2G9iqSiqoPXg1rQe/KdzmB4yTmeQfal6V2zraUv3eaahLW3HtTX+/+5XdhvU842kyuGsJjtHQUEBM2bMYNKkScYEKTqdjmPHjvHBBx80SLV6r5HL5XzwwQcmx6r+nj17tskOuYp169Yxf/583n33XcDgmFcVptZQ3n33XZ599lleeuklJk2qrhUdGxvLsmXLLMY+f/bZZyxcuJD//Oc/gKH85a39r1+/Hl9fX5YvX05RUREKhYKMjAxWrlxpJmgbw7p163jzzTdZtsxQ/SgrK6vGePAVK1bwwQcfsGDBAubMmVOj85xMJmPOnDn4+Pgwffr0ZvUdERAQaFpaRBUzK4UVth62iCQiSrNLa83AJrYSY+tli0QmoSSjhPI80xeeSCxCIrdcSB5AW6Y1c46zkluh1+vRltftICeRSGjTpg1xcXF1lm5UqVQ4OTmRlZVlMfuWXC636Agnk8mwtrY2qpKHDRtmFjd8O4cOHSIurjrO3draGplMRnFxcY2qWZFIhFJpoUIQBtV5Tc9ZIpHg4eFBcXExubnV1eGUSiVPPPFEreMEg7biVk9rhUKBj48P1tbWZGRkkJmZWev5EokEd3d3SktLa0wB6+HhgaurK/n5+aSkpKDT6bC1taWystKkb7lcjkQiqTWWvjH93zqO+fPnY29vz6pVq9i3b5/J561bt2bKlCl4e3uzePFi/vjjj3qPQ0Dgr0RMTAxTpgg78dtp1jvxKipLK8lLrF9lJV2ljoKkmtNR6nV6YwGUevdfVv/2Wq2WCxcu1KttXl5erRWjagoZKi8vN9mN+fj44O1de1Wk28ek0WiMmdpqQq/Xm9ic64tWqzUmQrkViURCUJDlika3crvNq7S0lPj4+Ab1X1f+8vT0dLOdr6W5NiZsqz793zqO6dOnM378eEaNGmUmxPv3709lZSXTpk0zJrwREBAQqKJF7MQFBP4OWMqHbmtrW6u2REDg74KwE7dMi9iJCwj8HbBkUmmMJkRAQODvQ7NOuyogICAgICBQM4IQFxAQEBAQaKEIQlxAQEBAQKCFIghxAQEBAQGBFoogxAUEBAQEBFooghAXEBAQEBBooQhCXEBAQEBAoIUiCHEBAQEBAYEWSrNP9iISS2jV6VG8IwajcHCnND+da6d+JuXsRhCyWDWI8ePH4+Xlxfz58+/J9R3cQ/BpO4SEQ59TXlx7zvC/I2PHjiUoKIi5c+cKGdgEBATuCs1+Jx4eM5nwmFfITjnN+R3vknP9LO0Hz8LFr3NTD63F4e7ujpOT0z27vn/XJ2ndZQQ+bYfcsz5aMu7u7ri4uAgCHBCLxS2zLrSAQDOjef+KRCJ82z9M0qmfuLjLUAIz7eJOclPPk3XteBMPTuB2Lh9aT2n+TZLPbmzqoQg0Yx555BGeffZZJk6cWGcddQEBgdpp1kJcYeeKxFpObup5k+Npl3Y10YgEaqMw6yqxe1c39TAEmjkqlQqxWFzvSm8CAgI10yyFuMJejTogCpmtoUa2o1dbJNZyAPJvxpOXdtHsHKWjNx5tHkSmdKI45zqpF7ejKSs0a6cO7IGV1Ia0iztwadUZdUAUlRUlZF45TG5a/UqIWsLJyYnevXvj7u5Obm4u+/btIy0tzaTNwIEDSUlJITExkT59+uDj40NmZia7d+8mPz/f7JpyuZzo6Gh8fX0pLi7m6NGjJCYmWuxfJBLRrl07wsLCsLKyIjk5mSNHjpiULL2VgIAAoqOj0Wg07Nu37452RC6tOqN0rC6FevPyAcoKs8zaqQO6I5ZIuRG/F3u3YDzb9AX0pF7YTmHWnZXZVKlU9OnTB09PT3Jzczlw4AApKSkmbfr37096ejoJCQn06dMHX19fsrKy2L17t8WSsDKZjF69etGqVSuKi4v5448/SEhIsNi/SCSibdu2hIeHY21tzfXr1zl06FCNpUxbt25N79690el0HDhwoMbn2hCq+pdKpbX2HxERQYcOHZDJZCQkJHDw4EG0Wq3xc6VSSXR0NGfOnDH7Dvfo0YPS0lJOnjwJGEwEHTt2ZP/+/YCh0pSbmxuXLl0yK6saGRmJWq0mODgYjUbDgAEDjJ+dOnWKGzdumLQXi8V0796d0NBQRCIR58+f5+jRo2ZVEtVqNZGRkRw/fhytVktMTAxOTk4kJiaye/duKisbVnpYQKAlIZl/F72ctm3bxpUrV+7Y5uegDiS07yQcPcOwkilROLij8gjDybs95cU5ZjvzwAdG02X4UuxcA5BYy/COGIR/1xHk34ilJM90td9u0Ezcg3pi4+BOeP8pIBLj5NWWgAeeIjf1vFn7+hAVFcXbb7+Ni4sLhYWFdOrUiVGjRpGWlkZSUpKx3fvvv49er2fEiBF07doVpVJJ3759GTx4MJcuXSIzM7N6ToGB/Oc//6FHjx5IJBIiIiJ44oknUCqVxhdoFc7Ozrz55puMGDECW1tbvL29GTRoEP369ePChQvk5BiczHr37o1SqUShUDBjxgysra3p1q0bQ4cO5cKFC2RkZDR47gD+XUbg024Y6tZd8QofQMaVQ5TkpZm1C+//Cmr/7kgV9nQcNh+JtRy3wB74dXqMnJRTlBbcbFT/Xbt25Z133sHNzY2CggI6dOjAU089RWZmpolwfOedd5DJZDz66KP06NEDGxsbHnzwQR566CHi4+NNhEirVq1477336NWrFxKJhLCwMJ544glUKhUnT540+Y6rVCoWLlzIqFGjsLe3x8vLi/79+zNw4EAuXrxIdnY2AD179sTR0RGRSMTs2bORyWR07tyZhx56iNjYWDMhVl8cHBxYsGABo0ePxt7eHk9PTwYMGMDAgQOJjY0lK8uwoJJKpcyaNYtnn30WlUqFk5MTw4YNo1evXhw/fpzi4mLAIBTfeustYmNjuXbtmklfr776Kl5eXuzduxeADh06MGPGDK5du8Ybb7yBn58f3t7eDBkyBAcHB44frzZ7jRgxgr59++Lt7Y1UKsXPz4/w8HDCw8OJi4szWTA4ODiwZMkSBgwYQF5eHiqVipEjR9KmTRv27dtncv/btWvHzJkzyc3N5dVXX8Xb2xulUsnQoUMJCAgwjrUKKysrnn76aXx8fBpUp16gafH396d79+44Ojo29VCaFc1yJ56dcobfVz2GnUtr+kz4mrNb3uZG/D6LbdWBPQjt+xIpZzdxZvNi9DotUoUD3Ua8S+dHl7Dnk1FmwkFu54azXyS/r3rMuGMMeOApclJON3isnp6ezJgxg82bN/Ppp5+i1+sRi8XMmjWLyZMnc+rUKZMSkz169GD//v189NFH6HQ6XFxcePvtt5k1axbjx4+nrKwMhULBvHnzqKio4MUXXzS+3EePHs2oUaNITU1l06ZNAEgkEubOnYuLiwuTJ0/m8uXLhvuiVjN79myioqKMxwC8vb2Jiopi3Lhx5Obm4uzszOrVq3n88cc5f950cVRfzu94D3a8h9r/Abo9ubzWtrYurXHX6/h99XDKCjNR2KvpM/5r/LuNJjvlTIP7dnNzY9asWezatYtVq1ah0+kQi8VMmzaNl156iZMnTxqFKEC3bt04evQo06dPR6vVolKpWLp0qVG4FRcXI5PJeP3119Hr9UycONGo9h0xYgRjx47lxo0b/PDDD0D1/ffy8mLq1KnExsYC4OLiwqxZs4iOjiYuLs7Yv1qtpl+/fjz33HNkZWWhUqlYvXo1TzzxBKdOnWrw/MViMXPmzMHPz4/p06dz8eJFs/4vXboEwJgxY+jRowfvv/8+27ZtAwyLxbfeeovXXnuNKVOmoNVqEYlEDR7HhAkTeO+99zh27Jjx+z9kyBA2bNhg1DItX274bkyfPp2OHTsybty4Gq83ZcoUHB0def75542L0E6dOrFo0SL+8Y9/8OOPP5qdM2LECFasWGHUALRq1QoPDw+zdpGRkTz55JMAHDt2rNGLVwGB5kCz906vi8DuYygvzubc1n+j1xlUghWl+ZzZvBiJVEHrzo+bnWMlVXD6t4UmKt8rRzagrbSseq6Nhx56iPLyctauXWvcHeh0Or788kvkcjndunUzaa/X6/nkk0+MKsGsrCzWr1+Ps7MzXbt2BaBv3764uLiwZs0ak93Zhg0bSExMZMSIEYjFhkcXFRVFSEgIK1euNBHWGRkZLFy4kC+++MKkf2tra95++21yc3MByM7O5ujRo3h7e3M/kFhZ88ePcygrNGgdSgsyuHn5AHYufo263uDBg9Hr9Xz88cfGe6rT6fjiiy+QSqV0797dpL2VlRUffvihUX2cl5fHZ599hoODA1FRUQD06tULT09PPv30UxO77f/+9z9iY2MZPny48f5HRkYSHh7OqlWrjAIcDM910aJFfPrppyb9W1tb8+9//9u4O87Ly+PQoUP4+Pg0av6RkZG0a9eONWvWGAX4rf1/8sknANjY2PDQQw9x5MgRowAHuHz5Mhs2bCAoKIiOHTsCNEqI//jjjxw7dgww3P8tW7YgkUjw8vIyayuTyWo084BhodmtWzc+//xzowAHOHnyJOfPn6d3794Wz9u7d6+JCv/atWscPnzYrN358+c5dOgQGzduND4HAYGWSrPcidcXkdgKJ++2pF7YZiaACzKuUJKXhrNvR7Pzygoz79gGW0VoaCiFhYU89thjJserwmfc3d1Njl+4cMHsBXb27FnAYKfet28fbdu2RavVcuLECZN2er2eI0eOMGrUKNzc3EhPTycyMpLS0lL++OMPs7FVCepbSU1NNXtx5eTk3DcVVVFOCmWFpjufsqJsZErnRl0vLCyMgoICHnnkEZPjIpEIvV5vdv8vXbpkZieuuv/+/v6AQT2r1+s5evSoWX9Hjhxh0V260wAAIABJREFU7Nix+Pr6cu3aNTp37oxGo+HgwYNmbS3Z2TMzM83szDk5OahUKuOYG0KnTp3QarVGm3RN/YeEhCCTySwKtSNHjvDiiy/Stm1b/vjjj0YJ8dOnTbVYVcLX0vdKJpNRWlpa47XCwsIAw/NwcXEx+Uwul+Pm5mbxvNvNTDVRXFzMm2++Wa+2AgLNnRYtxKUKe0QisUUnKoDyomxktubCwZLDW2Oxt7dHKpUadzG3cvr0aTOBaenFXlhYiF6vx8bGBjDYA3Nzc02cjaqoejk6ODiQnp6Og4MDhYWFFttaoqKiwuxYlQr6fqCtMH9563VaRGJJo65nb2+PTCajU6dOZp+dOXOGmzdNTSmW7n9JSQkVFRXY2toar1lYWGjxXlU9TwcHB+N/i4uL7+j+a7VaJJLGzV+lUlFcXFyn85a9vT2AiWmhiqpjVXNqDLfPq0orYmle1tbWtY7Xzs4OMKj6b6e4uNhE43ErhYV373ctINBSaNFCvKK0AL1eh9TG8i5SpnSiosT8pX03k22UlpaSnp7O3Llz69W+SlDcikKhQCQSGW3neXl5hISEIBaLzTxxq3Y2VXbGgoICVCoVUqnUooD4q1NaWkpRURGzZ8+uV3tL918mkyGVSo33ND8/H6VSibW1NRqNxqRt1f2vWgwUFBRgZ2eHQqGodXd5rygoKMDW1hYbGxtKSkpqbFc1N5VKZfbZ7d+pKk2FTCYza6tQKO54zDVdu4qq/lesWGGmtRAQEDClRdvE9bpKcq+fxS3gASTWpi8Fe3UASicfspPrp2JrLPHx8QQHByOXy+vVPjg42GzX27ZtWwCjA9S5c+eQy+VERkaatBOJRHTv3p2MjAyjrfzUqVNIpVL69OlzhzNpmcTFxREQEIBSqaxX+4CAALNMYeHh4cZrgUG9LpFIzPwZwOCYmJeXx/Xr1wGDClcikRATE3Mn02g0J0+eRCwWm4RrWSIuLo7y8nIzHwEwzAkMmgsw7Mx1Op2Z2tre3r5GVXZDKCgoqFWIVz2Hqt+FgIBAzbRoIQ6QcOhzZEpnIgbMQCyxBkCqcKD9kNfQlBVx9Y/v72n/GzduRKFQMH36dJNdXlVIzO0C283Njaeeespod3RycmLs2LGkpaUZbXq7d+/m5s2bvPDCC0abrlgsZvTo0fj7+/Ptt98atQkHDx4kMTGR559/3uQFbW9vz6xZs2jTps09nX9Ts2nTJiQSCTNnzjSqjMEw/5EjR5oJbGdnZ8aOHWt8LiqViueee46bN28aHbMOHDhAWloazz//PJ6enoBhATVixAjatGnDt99+a1SfHz16lLi4OJ599ll69eplfK62trbMmDGDiIiIezr/48ePExsby9ixY4mOjjbrv3379oBBY/Hbb7/Ro0cPBg4caDw/MDCQp556iri4OKMQ12q1XLlyhf79+xvvqVgs5tlnn70rZpdr167h5uZmdKaUy+U8+eSTRtX75cuXOXv2LOPGjTMusKrGMHz4cPz8GucEWYWtrS0LFy7klVdeabQZQ0CgudCi1ekAGVcOc37ncsL7TcItsDtFOddxcAtCr9Pyxw+vmjlR3W0SExNZtmwZkydPZu3atcTFxSGXywkKCiIxMZFff/3VGH8LBjv5wIEDiYmJISMjg8DAQMrKypg7d67RTlhaWsrChQtZsGABa9asISEhARcXF1xdXfn+++/ZsmWL8XoajYaFCxfy+uuv8/rrr5ORkUFhYSG+vr6UlpaatL1XdH50CfZuwVj9qQ3p9PB8KjXlJJ/+hcuH19/TvlNSUnj77beZPn06//3vf4mLi8PKyorg4GBSUlLYtGmTSYjf2bNn6d27N7179+bmzZsEBgZSUVHB66+/bnQ4LC8vZ8GCBSxcuNB4/x0dHXF3d+fXX3/ll19+MV5Pp9OxaNEi5s6dy5w5c8jKyiI/Px9vb280Gg27d+++p/O/tf/Zs2czfvx48vLy8PHxobKykj179hjbrl+/HldXV1555RWGDx9OcXExgYGBJCcns2jRIhO7/po1a1iwYAGrV6/m/Pnz+Pn5kZaWxsWLF7G2tr6jMe/cuZPHHnuM5cuXk5ycbExmtHPnTqPPwdKlS3njjTd45513SEhIICcnh8DAQGxtbUlJSTHJv9BQIiIi6NKlC2CI+Lg1P4OAQEtDpL+LBuKpU6eybds2MztuY7GS2eIVFkNm4lFK8tNrbWvj4IF7SDRShSMl+Wmkx/6OpqzIrJ06sAdWVjLSYn+/K2OsQqVS0bNnT7y8vNBoNFy6dMksu9TGjRvZunUr69evZ+DAgXh6epKamsrOnTtrzBgWFRWFt7c3JSUlHD9+vMbMahKJhK5duxIaGopcLicpKYm9e/dSVFR9DyIjI5HJZBw6dMjk3JCQEPz8/Ni+fXuj5u4eHI1MaV5YJf9mAnm3ZMG7NWPbrag8w7F39Sf5zG+N6h8MO+/o6Gi8vLzQarXExsZy6NAhk/v/ww8/cPjwYVavXs3AgQPx8fEhLS2NHTt2WPTkl0qlREVF4evrS0lJCSdPnqwxs5pYLKZz585EREQgl8tJSUlh7969JguIDh06YGdnZ+ZJHhgYSFBQkHHB5evrW6u6GeDmzZsm167qPzw8HBsbG5KTk9m3b5/FTIChoaG0a9cOa2trrly5YjELGhgiK/r06YOTkxMXLlxg//79hIeH4+joaAzlujVj263ftaqsb6dPnyY93fy36+LiQs+ePVEoFCQlJXHs2DEzZ7eqOYWFhWFjY0NaWhr79u0zCTuD6oxtR48eNfvMEtbW1jz55JNkZ2ezefPmOtsLNA9iYmKYMmWKMYpEwECzFuJ/NaqE+MqVK5t6KH9LqoT4smXLmnootfLBBx8QFBRUZ5v7oWUREGguCELcMi1enS4g8Fdj1qxZddqea0uWIiAg8PdBEOL3kZpijwXuD8XFxTUWJGlO1BYqJiAgIHArghC/j4wcObKph/C3ZsyYMU09BAEBAYG7SosPMRMQEBAQEPi7IghxAQEBAQGBFoogxAUEBAQEBFooghAXEBAQEBBooQhCXEBAQEBAoIUiCHEBAQEBAYEWiiDEBQQE7jkhISFNPQSB+0RQUNBdKZQjUD+EO91CkcgkiK2ExyfQvHFxcWHp0qW8/fbbd1w4RaBlMHPmTFasWHHH1eYE6oeQ7KWJsPO2o8ebPUyO7Xt1HyUZtWfr8unjQ8CwAOROcvQ6PZlnM7n01SVKM0vv5XAFBBqMj48Pb731FiKRiIULF6LRaCy2c1BZoXaTUlKsJS21nLtXzaF5IpaAQlF3CdTiIkNVOaWtadvSUi06raUzTHFQWZGfV1nj57dftwqNRkdFueEhiMUiFDZiyst0VFaaPpiqzyo1esrLq+tlLF26lBkzZrBs2TLmz5/PhQsXELh3CEK8iSjJLOH4O8cBcOvkhm8/3zrP8erpRfi/wknakUTqgVSk9lJCR4bi+6Avcd/G3eshNzkymQw3NzcyMzMpLf37LVpkMhlqtZqcnByT8rb3C6lUilqtJjc3t87+lUolb7zxBjqdjlmzZnHjxg2zNr6t5Dz/kjdhEUrjsfS0ctavTefIQfPqa1KpGFe1Nbm5GkqKW26RpTahSt5aFlhrG61Wz/ChZ7G2FvHl96Y16XVayMys4PCBPL754qaJAL2VkWPc2fRzFinJ5qmGRSLMrlvFlo1ZfLwyFQA3dykf/bcNe3bm8v4y0wqKKkcr1m4IY8V/Uvh9R3X1uMuXLzNjxgwWL17M66+/zuTJk7l582at8xVoPIIQbyK05VqyL2YDoPRQ1tHagFsnNwpTCrn09SX4c1F86qNTlGT+PXJtT5gwgQEDBjB8+PCmHkqTMG7cOIYMGcLjjz/eJP2PHTuWhx9+mCeeeKLOtuPHj0etVjN16lSLAlyhELPgbX/yciuZ8n9xpKaU4+IqZcjDzsx4zY+Xx8eRlmpa5OWpse4M+6crTz9+/q7NqSm4drWUN2ZfMf7do5eKAUOceXvBNUrLDFvs27URB/bmGRc2Nkox7Tva8chwNS6uUv6zxLy2urW1iJ7RKgoLKtmwzvz+6/Uw/eV4AIY87EKvPo7MnpoAYHH3Ht1XxaZfs7gcb/6uKSszX0QUFBSwcOFCVq5cySuvvMLs2bNruh0Cd4ggxFsQNmobCpIKjAIcoCjVvGb6XxWZTEZubu7ftoKXTCajsLCwyYq4yGQyioqK6izQ4unpSUxMDL/88guXL1+22CYkVIlKZc27S5K5lmiYT3paOWtXp3HpQomZAAewloooLdEZ1cwtlZJiHWdPVf9uAwJtADh/tojiYstzS0kq4+C+POPfO7bkUJBfyeBhLnz2cRo52aamisiu9ihtJfTu68jX62+i05nbKK4kGLRZuTmV6HR6498W0YsYN8GTOdMtP09LZGRk8MUXX/Diiy/Svn17zpw5U+9zBeqPIMTridJTiW8fX2zUNogkIkoySkg/mk5ufK5ZW7mTHN++vtj52KHX6sk6n0XK3hT02sYZ+4IeM3h7VtnBQx43ePqW55dzbfu1Rs9JJBLRrVs3oqKiUKlUXL9+na1bt5KcbKo2Gzp0KBqNhp07d9KvXz8eeOABNBoNW7ZsuaMfpkKhYPDgwYSGhqJUKiksLOT06dPs3LnTxH768MMP4+zsTFBQENbW1jzzzDPGzzZu3EhmZqbJnLp06UJUVBSurq4UFhZy7tw5duzYYVZBztHRkX/961+Ehoai1WrZtWsXO3bsYMSIEYSFhTFr1izjgkEikdCvXz+6dOmCXC4nJSWFX3/91eIus77I5XIGDRpEWFgYtra2FBUVcebMGbOxDh06FLVaTZs2bZBIJCbz37Jli9kYOnfuTI8ePXB1daWoqIgLFy6wfft2s8VPcHAwPXr04LfffsPNzY2hQ4eiVCq5fPkyP/74o1FlXtV/aGgoYrHYpP+tW7eSnp5uct0+ffoA8OOPP9Y4d2upyOS/t3Jof57J3wOHOOPmLiU0XIkePWPGeRg/2741mxtpps+1Y6Qd3XuqcHWzpqhQS3xsCdu3ZFN+247R3sGKJ592I6KdLehh/548tmzM4p+Pq2nb3pZF865SkG/YlUokIno9qKJLN3tslBKuJ5ex+dds0tOabkF57HABg4e54O0rMxPiD8Y4cfZUERHtlYSG23Dh3J2ZX7ZtyWLwQy70iFaZLCbqPG/bNkaPHk1MTIwgxO8RgntzPbD3syfqjSgc/B3IvpRN9qVslB5Kus3uRtBjQSZtHYMd6bmoJ86hzuTG51KaVUrIkyF0ntoZkdj8hVUfbFxtULgqEIlFWMmtULgqULgqkKlkjZ6TWCxm+vTpzJs3D7VaTUZGBl26dOHDDz8kJibGpO2DDz5Inz59mDNnDk8++SRFRUUEBQWxaNEiOnTo0Oj+Fy9ezOOPP87169c5fPgwpaWlTJgwgffffx9bW1tjWx8fH4KCgnBwcEAmkxEUFGT8p1AoTK47adIkXnvtNZycnEhKMqgZx48fz7Jly5DJqu+XQqFg2bJlhIWFsWXLFg4ePMjIkSP54osvUKvVHDp0yOhNLZPJWLJkCc899xzp6elcuHCBTp06sWLFCvz9/Rs1f5FIxKJFixg5ciRpaWkcOnSIoqIinn32WT744APs7e2Nbb29vY3zl0qlJvOXy+Um1/2///s/5s2bh7OzM8nJyej1esaNG8e7775r1rZ169Y8/vjjDBs2jDfffBOJREJubi5Dhw7lxRdfNLbz8vIiKCgIlUqFtbV1rfcfIDIykoSEBLKzs2uc/8VzxeTnVzJ2vCcurrV7rXt6ywgIssHR0RorKxEBQTbGfzY2ps5Z45735LWFrXF1syYlqQy9HkaP9WDZB8EoFNWvO5lMzKJ/B9Cpsz27tuWwf28e/3jMlXXfhOPtI+PwwXxEf/5craxEzH2zNeP/z4usTA3nzxQR0d6Wdz8MJriNTa1jv5c4uxjuW2GB6e7d1k5Cpy52/PJDJrEXS3gwxumO+zpxtJCzpwt5epwHUmn9xUZFRQUnT54kMjLyjscgYBlhJ14P3CLdEFuJOfbvY+gqDKv5q5uv0npQa4rSq9ViYqmY9s+3J/tiNqc/Oo3+TxVW1vksIqdE4hnlSeqB1Ab3f2a1YQUbvTSanNgczn925zbBIUOG8OCDD7J27Vp++OEHwOC49Nprr/Hyyy8TFxdHSkqKsX27du3YtWsXEyZMQKvVYmdnx3//+18GDx7M6dOnG9y/h4cHISEhrFixgi1bthiP/+9//2PEiBEmjlMffvghAEuWLEEulzNnzpwar7tt2za+++470tLSjMdat27N8uXLGTx4MD///DMAvXr1wt7envHjx5OXZ9hZxMfHM3/+fD755BMyMjKM548ZM4ZWrVoxadIk4673559/5sMPP+SFF15g5syZDZ6/Wq0mLCyMVatW8dtvvxmPf/PNN4wePZrCwkLjsTVr1gCwcOFCnJycap3/zp07+e2330yena+vL++//z7Dhg3ju+++MzunX79+TJo0ievXrwMGdfit4WAff/wxAAsWLECtVtfaf9XcTpw4UWub4mIt/1mSxKuvt+LDT0PZviWLX37IIiuzwqztZx8bnuXsN1rh4SUzsSffzp5duezYmkNKUrXJwdNLxrsfBvPQP1357iuDg1Xnbvao3aW89Fycsc+L54pY9E4gX667YXL+4yPdaBOqZMakBK6nGI7/9lMW73wQxPMveTPtpfha53q3sbYWERpuy5NPu5N4uZRriaZq8F69HSkp1nL6VAFqd2tGP+PBJx+l1ugAV18++ziN/6wMZugjLvz0bUbdJ/xJUlISvXv3RiaT/W1NYfcSYSdeD4rTihGJRQQ+HGgSm31161Uyz1Srct06uiF3kpPwQ4JRgANkns2k5GYJbp3c7uu4a+Mf//gHycnJ/PTTT8ZjFRUVrF69GolEwuDBg03aFxUV8eGHH6LVGlb9VWpqDw8PGkNWVhYlJSUMGjQIN7fq+5Kens7y5cvRW4gzkslkddqDY2NjTQQ4wNWrVzl27BidO3c2HnNzcyM5OdkowAHOnTuHSCQymZO1tTUDBgwwU1uXlZWxfft2wsPDsbOzq//E/yQnJ4eioiIGDBhg0l9GRgbvvvuuxfkrFIo65x8fH28iwAGSk5M5cuRIjbuh77//3ijAAdLS0oxajFuRSqX1igqwtbWtl/f8udNFTJsYz749uQwe5sKHa0MYO94TO3vLoU9yuYSy0toFUeLlUhMBDJCWWs6hA/l06lz9nFzdrLmRVmGyaLh0oQSdFtw9pMZjEomIgUOd2b0zxyjAASoqdGzfko1/oMK4I76XjBzjzk9b2/PT1vZ8+1s7Frztj6ZCxzuLr5k5wUX3VXHoQD46LRw5UIBCLqHLA/aWL9wAriWWsXtnLsOfVOOgqv/+r2pBqlTWz4FXoGEIO/F6kHYkDaWnEv8h/vj08SH9aDpph9LIu2JqG7L3tUev0+M3wDzJgUQmQe4oNzveFCiVSry8vNi0aRM6nelLMT09nczMTLMMW1evXjWzKRcUFBAYWHuoTE2Ul5ezYMECZsyYwaeffsrp06fZvXs3Bw8erHG1LpfLKSgoqPPadnZ2dOvWDX9/f2QyGTqdDicnJ2xsqlWfqampPPzww9jb2xuvGRERgV6vN1kEuLu7o1QqCQ4O5uWXXzbpx8vLC5FIhIuLi8nOuT5oNBrmz5/PzJkz+eSTTzh79iy7du3i4MGDNQpqmUxWr/krlUq6detGYGCgcf4uLi6oVCqL7ePi6heeKJfL6+VUV1BQYGIOqY2bNyr48L0UvvvqJo+NUDP0Hy707K3i1VcSyM4ytfNKZaJ67SaVSgmdu9kTEKRAJhOj04HaTYqTc7WwTbtejrunFGcXa2M/bcJsEEswsbE7OlnjoLLCr7WCFyd5m/Tj5m4Q9q5qqdlY7za3eqeP+pc7FRU6ZrycYBa77eEpIyRUyZefGfwU8vI0nD1TSO++jhzYW39bdk189fkNekarGDXGnf9tqF/YmIODA3q9nvx887BBgTtHEOL1JOHHBFIPpOLVwwvP7p749vUl70oe5z49R/ENw67DysYKfaVl57WM0xmU5TaNV/HtVAmzW3eht5Kfn2+2aq6sNA87uX0B0FDOnTvH+PHjiY6Opm/fvkydOpXnn3+eNWvW8Pvvv5u1F4vFRk1ATQwYMIDnn3+e7OxsLl26RH5+PnZ2diiVSqTS6h3WkSNHGDNmDP/+97/ZunUrSqWSRx55hH379pk4ylXdB0uJSlJTU0lNTW2wAK/i4sWLTJgwgV69etGvXz+mTJnCCy+8wCeffML27dsbNf+YmBheeOEF8vLyuHDhAvn5+dja2mJra2viE3Ar9Y05l0gkdfYPhvvSUF+BjJsVrPrgOr/vyGHRO4GMeMqdj9431SiIJSL0FbU7h0Y/6MjzL3mTn6/h0oViCvK02CjFKG3FyOTVWrQzJ4vIztLw5r8D2LoxG7lczEOPuHL8SIFJXLWN0nCOpsL8u37zRgXbN2eTl3dvBTiYeqdbWYl4ZaYvQSE2XLpg+uyi+6oQieDZF7yo+nk6OVth72CFg4MV+fk1J3+pDznZGn76PoPHR7px9FDdC0owmLNu3LhRr++OQMMRhHgDKMkoIeGnBBJ+TkDdQU3EMxF0mNiBg68fBKCiqAKRlYhLX18y2s6bI/n5+Wi1WtRqtdlnIpEItVpNYmLifRlLeXk5O3bsYMeOHXh4ePDSSy8xdepUkpKSuHLF3PZZkyACCA0NZfLkyaxbt87M9jtt2jTCw8NN+s3IyEAsFvPoo49SUlLCxo0b+eqrr0zOq9r5Hj16lE2bNt3JVC1SUVHBrl272LVrF25ubkycOJHJkyeTlJRktkPW6XS1zj84OJhXXnmFDRs28PXXX5t8NmnSJBNzQmOoq/8qjh8/znPPPWd0mGwIcZdKiI8twcvHQj96y97sVbTylzNpug8/fpvBV5+beuxPmOhFVHS1JqKiQkd6Wjl2dlY89IgLFRV6dm3P5usvTM+rcho7fbKIn79v2FzuFQf25jF8pJpR/3Ln9ZnVvxGRCPr0dTLZtQOIxTBpui89+6jY9EvWHff/8/eZ9B/kzJhn6zalyeVyOnTowM6dO++4XwHLCDbxeuAc5oyVzS3rHT1knMogZW8Kdl52iCSGF0tuXC4isQjXtq5NNNL6UVFRwblz5+jWrZvZjjsyMhKVSsUff/xxT8cgk8nMhEp6ejqrV69GLBZb3Mnl5ubWalfr2rUrer2eX375xeS4QqGgY8eOJsdCQkIIDw9n7ty5jBkzhhdeeIHPP//cbMednp5OdnY2Xbp0aegUa0UqldK1a1eTYzdv3mTVqlWIRCICAgLMzrGkIbmVLl26IBaLjc57VVi6142hoKDAxCRRE3v27KGioqLWpDRqNynhbc3nIpGIcHS0IjPD3MEtP68SpbLmdKWRXeyRSET8+mOmyXGpVEynLqY24VatFUR2sWfJ/GtMGHOJl56L5fNP043pRqvIzdFwI62CTl0a7vdwr9Bq9Xz3VQYR7Wxp17HabBHcxgZ3Tym//JDJwX15xn/79+Rx5mQh0X0d70r/5WU6vvr8Bn6t6zYPDhkyBIVCYVGzJHB3EIR4HYgkIsLHhNN5ameU7tUvHbmTHLeObuRdzTPGf2dfzCbvSh7h/wpH3bF6lytzkNH2ubbY+dy/F8Hw4cP55ptviIqKsvj5+vXrUSqVvPrqq0ZbaWBgIJMmTSI9PZ2tW7fe0/E9/vjjvPrqq/Tu3dtY8cja2pqBAwei0+ks2mkTEhIICAigdevWAPj7+zNlyhREf8YCZWdnmy0A7OzsmDZtmknIFhgEklarZfny5UyePJlnnnmGkSNHMmDAABPbsV6v55tvvqFr165MmDDBuBMVi8UMHTq00dnTHn30UWbOnEnfvn1N5j9o0CD0ej2XLl2yOP/WrVsb/RBatWrFtGnTjOdXhXTdugBQKpVMmTKlRnt4Q4iPj8fX15egIENYpZ+fH9OnT0ciMRWsubm5/PzzzwwePNhE+3ErT4xy49XXWzNoqLNRzW3vYMULk7xx85CyfbN5eNqVy4YdenCIYSHh7SvjlRm+SP5cRFfFSvsHVoe9KZUSJk7xNnM+KyyspKJCx6JlAUyc4sOYcR488ZQbMYOccHQybfvNhhu0bW/LC5O8jSFtEomIfgOcGDXG3eL8YgY6sf7bCPoNvPPwLksc3JdHSnIZo8dW74b79HPiRlqFxaxq+/fkERxig5e3zDh+pa0Epa0EmazhYmDPrlyuXqndydHT05NRo0axd+9erl692uA+BOqHoE6vA71Wz+VfLxPyeAi9lvSiLMdgL5OpZJRmlXL+w+pwL71Oz8kPTtJ+Qns6TeqEpkiDpkSDjasNBckFVJZU26PUHdV0mtTJpK/opdF/Xgi2jrszIdqhQwfs7e2JiIjg0KFDZp/HxsayZMkSJk+ezBdffEFxcTH29vYkJiby1ltv3fOsYL///jsRERG8+uqrTJw4kcLCQpycnBCLxaxatcos4QzAb7/9RkxMDCtWrKC0tBSlUsnhw4eNXus7duygd+/eLFu2jPj4eMRiMb6+vuzcuZPExEQTQW5ra0tmZiZxcXH4+Pggl8txcHDA0dGR0tJSXnnlFVJTDeGAmzZtQqlUMnLkSB566CHS09Nxc3OjsrLSGP7WUPbu3Uv79u2ZPn06L774IgUFBTg5OSGRSPj4448tvvQ2b95M//79ef/99ykpKUGpVHLs2DHkcjklJSXs2rWLPn36sGTJEi5fvoxIJMLHx4e9e/eSkJCAq+udaYg2b97MgAEDWL58ubH/48ePI5fLzezqX331FR06dGDOnDnMnj3b7Hl+8+UN1G5Snn/Zm+f+z4vyMj02SkOhjY+WX+fieXM7/bZN2fQf5Mzby4MoLdFhoxRz4ngBcoWY4iIt+/fk0aefE/MW+XM4i1h1AAAgAElEQVTlskHA+PjKOXwgj9iLxfj4Ve8cbe0k5GRVEnepGE8vGTZBNtg7SHBQWaGp0DN76mWuXTVcY++uXOztrXjqX+7EDHTiRnoFrq5S9Ho9n662HDIa3s4WO3sJbdvbsmtbjsU2d4JhN36TqbP86PKAPaf+KKRHtIotGy2ry48dzqe8XEd0X0e+Xn8Ddw8pKz9tY9Lmp63tATj5RwFvzq1d6Op0ej77OI2FS801RgDOzs7MmzeP/Px8Vq1a1YgZCtQXkd5SLEsjmTp1Ktu2bbtjh6fmiNhKjIO/AzZuNojEIkozSslNyEVXaXmu9r722PvZI5KIKEorIu9ynknYmZXCChu3mlWTBdfMnUZsvWzRlmkpza47zMfFxYVu3bqxZ8+eWh2XZDIZ7dq1w87OjvT0dGJjY83Cm7y9vdHpdGahW2q1GhsbG65duwYYksLcnlDkdk6cOGFiJ/Xy8iI4OBgbGxsKCgo4d+5cjQ53YHDKi4iIQCKRcPXqVbNsZSKRiHbt2uHt7Y1Go+HcuXOkp6fj4+ODWCwmKSkJKysrPv/8c7777jsz1bNarWbt2rV89dVXZnZle3t72rdvj729PXl5eZw5c4aiouo8Ab17965T3Xz69GmTDGeenp6EhIRgY2NDYWEhZ8+erXX+crmctm3bYmVlxbVr18yypYlEIiIiIvD19UWj0XDhwgVSU1Px9vbG2traZHHg4uKCt7c3sbGx9V601dX/rbi4uBhj21esWMHBgwfN2nh5ywgMsUEuE5ObW8m504WU1hJGJpOLCW+rxMpKRHJSmVm2NoDQCCV+fnI0lXriLpZwPaUMTy8ZMpmYq4mliESwel0ou7bl8O1Xph7Wjk7WrPpvGzb/msX6/5rOzdbOIJQdHKwoKtJy5lShWaKVKhwcrOjey4EjBwrqdHxzUFnh4mrNtcQytLdldRSJDJqFnOxKcnNMryMWi2gdIKcgX0t+XiU+fjLS08prLA7j4yunslJPelo5UqkYHz/L/g0lxTpjJjpraxG+reTcSKuwmBK2tb+CjIwKk1S4ERERTJs2DYlEwty5cy0uyBtDTEwMU6ZMaXSCpb8qghAXuGusXr0aJ6fa1YfvvPMOx48fv08jsoyLiwvr16/nvffeY8eOHSafSaVSvv76azZs2FBr2lBLrFixwiTm3RLvvfcehw8fbvCYWypKpZKXXnqJ6Ohoxo4da+L533RjkvDlDxF88lEqm3813blKJCLWbghjy8Ys/velUHmroVhZWfHtt98SFxfHsmXLas3a11AEIW4ZQZ0ucNd44YUXmnoI9aIq/OzFF1/E29ubuLg4ysvLcXZ2ZtCgQZSVlbF79+4GX/f2OHIBQ/ja0qVL+eGHH5qFAAdDtrgzJwsZ86wHajcpcZeKKS3R4eRsTb+BTojFIn7fbl4TQaBuKisrmTlzZo2FbwTuPoIQF/jbodfrmTNnDjExMXTr1o2uXbsilUqNWejeeustcnOFl/jdpLm91N964yrRfR3p1t2BDp3skMlFFBfpuHi+iPeWJpGVee9jv/+qNLdn/VdHEOICf0vKy8vZtGnTPYn9Fmj+aDR6dm3LuSdOZwIC9xMhxExAQEBAQKCFIghxAQEBAQGBFoogxAUEBAQEBFooghAXEBAQEBBooQhCXEBAQEBAoIUiCPGGIBKhdPTCyacDcrvmVeTE0dERd3d3Yx7t5oyTk1OdSVEEBAQEBOpGCDGrJ0onXzoOm4ejV4ThgF7PjYT9nN64EE1ZUe0n3wcmTZqEv78/Y8eObeqh1MmMGTOQyWRMnTr1nlzfv+tI/Ls+yfHvZ5J/w7yQyl+dYcOGMXz4cBYvXmyxkIyAgMBfh+a/bWsmdH70LeS2Luz771g2vt2Dg1++iMojFJnSpamHZqSiosIs7/ndIjw8nAceeOCuXa+iwjzn9d3CzjUAhb0bSiefe9ZHTcjlcjp37mys9NUU+Pn54eLigpeXV5ON4W4TEhJCjx49mnoYAgLNDkGI1wNruR326iAuH/mC/Bux6HVaclJOs3/dOIqyrzX18O4LCxcurLGsaXPj/LZ32Lf2adIu7rzvfYeEhLBgwYI7rhh2J6xZs4aXX36Z33//vcnGcLd57bXX6Nu3b1MPQ0Cg2SEI8XqgcDDU7C3JNS07WFbYPHJB3w+sra3JyrJc5rC5oa0sJ/9mQpP0LZVKAe5q4YeGotFoSExMbLL+7wUymazFfP8EBO4ngk28FlSe4QR0G4VU4QBAYNS/8Gk/DIDUC9u4Eb/PpL3ESoZ/t1F4hPRBqnCgKCeZaye+40b8frNrt+n9AmKJFVeObKDNg/+H2r8bFaUF3EzYR/yBz9BpG5e72dHRkWeeeYYOHTpQVlbGwYMH+eabbygvLzdpFxUVxaBBg/Dz80Oj0ZCUlMQv/8/emYdHVZ2P/zP7TGYmyWSyr2RlJyAgICKouGKxVqm4tPaLS21dKtqqrai0/opai1pttbVabStVpFZc2kK1yo7sEEiAbGTfM5lk9vX+/pjmwjATEsKW6P08j4/MnTP3nnPuzX3Pu5z3/fBDSkpKxDYJCQncddddyGQyFAoF06ZNIz09HaDPOsE5OTl885vfpKioCIVCQU1NDatXr+bQoUMRbbVaLTfffDMzZ87E6/Xy73//m48//njQLoGCGd8hLvVojeRD617F0dUQ0W7krDsQBIHq7X+j8MLbSRs5h2DAR+2e1RzZsXJQ1542bRoXX3wxiYkh98ott9yC0+kEYOfOnXz2WbhV4KabbiI2NpYVK1Zw2223MWXKFNxuN9u3b2fFihWiu0Gj0XDNNdcwc+ZMzGYzTqeT0tJS3n333Qih9q1vfYuioiLx89tvv01DQ+T4Fy5ciEql4r333uOmm27iwgsvJBAIsGbNGlavXj3o+U9KSuKmm25izJgxxMXFYbVa2b9/P6tWrYoofmIwGPj2t7/N1KlTMRqN1NbWsmrVKvbu3Su20ev1YlGZmJgYiouLefTRRwFwu928+OKLg+qnhMRXCUmIn4CAz4XT2kTQH3qheuwdOLtD9at9bltYW4VSwwW3vkpsSiG1u/+Bw9pEct40pt7wHIc3vk75xtfD2iflTkWuVJNSMBNndzOVW/+KNjaFvPNvprVyK12N+0+6vxqNhueee47Gxkb+8Y9/kJ+fz4IFCygqKuLxxx8XS8TOnj2bBx54gI0bN7J161bUajUzZsxg2bJlPPXUU2zbtg0IVSRqbW1FpVIBYLPZaG0NlWfs6Ymsdz5z5kx+/OMf09DQwBdffIFSqeT888/nueee47e//S1r164V26rVapYtW4bNZuOTTz5hypQpfP/738ftdvOf//znpMcO4LF34rQ2YTDnkFp0EVXbVkCUOibmnMnIFUpSCi/E67RSs+t9UgpmMu6yxfg9dupLTj6fusPhoLW1Vayn3tHRgc0WekZ6/38s48ePx2w2M2bMGBwOBx988AEmk4lrrrmGPXv2iMLs3nvvpbi4mHXr1tHQ0EBCQgJXXHEFM2bM4N577w0r1GK1WmltbSUtLY2ZM2fy0UcfRe3ruHHjMBgMTJw4EZfLxb/+9S8mTZrEHXfcgcvlYs2aNSc9frlczrJlyxAEgb/97W9YLBZycnKYP38+s2fP5vvf/75YJ91oNPL8888TCARYtWoVDoeDyy67jKeeeopf/vKXfPnllwAEAgFaW1uRyWTI5XJxjoE+65/LZDKeeOIJcnJyePLJJ6mvrz/psUhIDCckIX4CbO3VHPzid5jSx5I5/ipq935IR83OqG2LZt1BfPoYtr33EG2VmwE4svM9iq/+GUUXLqK9aitdTaVhv4lNKqBiy585tP734rHGA2voaRtcFSCz2cznn3/O888/L2pTzc3N3HLLLcyYMYPNm0P92rRpE9XV1WEvuA8//JCf//zn3HrrraIQ7+np4c033yQhIYFrr72WrVu38uGHH0a9dlJSEg8++CC7d+9m2bJlBAIBAFauXMkPfvCDCI1w5MiR/OUvf2HlypXi9d944w0uuuiiQQvx+v3/AiB91CWkFl10wramjPEc3vAa5Zv+BMCRnau49Ifvkz567qCE+IEDBzhw4ABXXnklU6dO5b333qOlpeWEv8nIyODjjz/mD3/4g3hsw4YNHDlyRPz817/+ld///vc4HA7x2Jo1a3j11Vf5xje+wV/+8hfxeK8PfPr06f0GgRUWFvLuu+/y17/+FQjN/2uvvcbs2bMHJcTNZjPp6en88Y9/ZN26dQCUlJTw6aefMm/ePFGAAyxatAi1Ws29994rLnC2bdvGs88+y+233y4KcbfbzZtvvolGo+H6669n165d/O1vfzthP9RqNZMmTUKlUlFYWCgJcYmvPJJP/DQgk8nJmjCPztrdogAHQBA4tO73IAhkFV8T8Tu/z03F5jfDjg1WgPeyYsWKMHPo6tWrCQaDTJ48WTwWCASivtw+/fRT8vLyMBgMYcc1Gg0ALperz+teccUVqNVqXn31VVGAQ6js5yuvvEJpafgCxmKxsGrVKvFzMBjk4MGDZy0gzGPvoGLLn4/2M+inq3E/MXGpp3TeXk28L03xWPx+P2+//XbYsWMFOEBbW1uYAIeQxr19+3YmTJgw6H52d3eHCcRAIEBZWdmg57+zs5PGxkauv/56pk+fjkKhAELz8P7774vt1Go1F110EZ999lmYhUIQBDZs2EB6ejrJyclh5+6NMxjInHo8Hn72s5/x4osvsn79+kGNRUJiOCFp4qcBtT4BjT5B1ASPxePoxNXTijEpL+I7p7WBgN8TcXywOByOCO3P6XRisVgiXoxxcXHMnTuXMWPGEB8fj0wmw+/3AxAfH4/dfnTve68QP9FLND8/n9bW1gEHHzU3N4vm/V5sNhtGo3FAvz9V7JZ6hGAg7JjX2Y1KF3tK5+0V4ida8PTS0dERIaCjkZeXx5w5cxgxYgRxcXF4PB5iY2NRKgf/59vc3By22IJTm/9gMMiSJUu4//77efzxx+ns7GTdunV8+umnYQtGs9mMVqtlzpw5TJkyJewcvYtHk8lEW1ubeHwgz9+xlJWVUVZWNqhxSEgMNyQhfhpQKEOagt/jjPq93+tEodREOd7/i/5k6Etw9AZY9TJlyhR++tOfUl9fz7Zt2+js7ESlUpGTk8PYsWNFzaeX3ixwMpmsz2trNBpxETAQjhfgwBnb4x6N4wU4AH0Pb8D0ztGJ5qqXgQilRYsW8c1vfpPt27eze/dunE4nWq2W6dOnn9I+8OMF+Omgra2NJUuWkJeXx6WXXsqll17Kddddx8cff8wf//hHBEFAp9MBsHv3bqqqqqKep9fv3UuvVj+QOZWQ+LohCfHTgMfeSTDgw2COTC4iV6iJic+gvfrLM96P+Ph45HJ5mIBUKBSYzWbKy8uBUMTvww8/zJYtW8J85wBz5sxh3rx5EeftfeH3akTRaG1tZezYsRgMhjAt/utG79yr1eoBa459MWXKFK6//nqee+450c/cy8iRI4dsMpfq6mqqq6t56623WLRoEddeey179+5l+/btWCwWIPS8DNT33vv89QZYSkhIHEXyiZ8GAn4PbVVbSS2ag9YYnsEtc/yVKNW6iO1oZwKlUhnm+waYNGkSer2eXbt2AZCVlYVer+fLL7+M0Hwvu+yyqOftffEeb2o9VjPasGEDSqWSG2+8MeL3cXFxJz+YYUrvXMXHx4cdH4wWOXr0aARBYOvWrWHH4+PjmTp16uA7eQYwGAxce+21Ycd8Pp/oD+/1tVutVqqrq5k9e7aoYR9LtHmyWq0Eg0FiY2P7bQuhxebxbSUkvqpIQvw0Ufb5bwGYduOLJGRNRKM3k138DcZd/hCW+n00lg0u4vpk8Hq93H333UybNo3k5GSmT5/OAw88QGVlpRiZ3tTUhM/n44ILLhA16/j4eO67774+U4XabDbq6+u5/PLLycnJITMzk0WLFnHHHXeIbfbs2cO6dev41re+xf33309BQQHp6elcddVV/OEPf2DOnDlnfPyGxFziUkehiw/tZTck5BCXOgqN4eylxu0N4FuwYAGJiYmMGjWKX/ziF0yaNOmkz1VbW4tMJmP27NnIZDJkMhn5+fk88cQTUdtnZ2dTUFBAamooOC8zM5OCggLMZvPgBzRArr32Wu644w4WLVpEcnIyGo2GjIwMFi1ahM/nExeRAG+++SYjRozgF7/4Bfn5+SgUCjIyMrjrrrui5tP3+/2Ul5dzySWXkJeXR1paGrfeeiv33ntvRFu5XM5vf/tb3n77bcaMGXNGxywhMRSQzOmnCYeljq0rfkjx1T9l5ndCW8aEYIDGsv+wf81z0X2wp5l//OMfuFwufvazn4lBT/v37+fZZ58VTZI9PT28+OKL3H333axcuRK3241er2fdunV8/PHHLFy4MOq5f//73/Poo4/yyiuvAFBfX89bb70V1mb58uW0tbUxf/58rrjiCiBkCl2/fj3bt28/Q6M+ypTrfhkWQDhp/pMAHN7wR8o3vXHGrw+hRdK7777LggULuOSSSxAEgR07doQFag2UjRs3Mm7cOO69917uvPNO5HK5GNE+e/bsiEjyhx56iIKCAvHzj370I4CwrWRnivfff5+YmBiuvvpqrr/+evF4Z2cny5YtCwu43L17Nz//+c+5++67eemll8TjjY2N/OY3v4l6/tdff50lS5bw8ssvA9DS0hK2va4XpVJJbGwsCoUCvV5/uoYnITFkkQmnMZrowQcfZO3atVGDloYzMrkCpToGv9c5IGGsNSajiYnDYW3G74nuH1aqY4BQ0NvpQKvV4vP5CAQCxMbGkpOTQ1dXV9SMXb3tMzMz0Wq1NDQ0YLVaUavVqNVqXC5X1MAntVpNamoqVqs1arKXY8+dk5ODSqWirq4uom1vcNPxgXgajQalUjmgiO1oKDV6ZLJI41LQ7w3bBdDX3CuUGuRK1WmpSmc0GjGZTLS2tkZky4PQHMhksoigw2jEx8eTkZGB3++npqYGj8eDTqcTE6D0EhMTE7UUrc/nC+uDVqtFJpP1O//f+ta3+nWFrF+/PizFq0KhICcnB4PBgNVqpaGhoc/3gUwmIzMzE5PJhNVqpb6+/oTBjUqlkrS0NGw2W9i+8+Mxm83ExcV95VLPft2ZO3cuixcvJi8vcqfP1xlJiEtISERl8eLFYhrZvli5cmVYql4JiTOFJMSjI5nTJSQkovLCCy+c6y5ISEj0gxTYJiEhISEhMUyRhLiEhISEhMQwRRLiEhISEhISwxRJiEtISEhISAxTJCEuISEhISExTJGEuISERBhfl0IjX5dxSny1kYS4hIQEALGxsdx7770ROdC/qhQUFPD000+TnZ19rrsiITFopH3iX1NikmPImh1eda3igwqC/hMn6tHEa8i8KBN9mh6/w0/z9ma6yrtOqS9yuZy5c+cyevRoVCoV+/fv5/PPP8fn80W0HT16NA6Hg7q6ulO65kDR6XQsXLiQsrIytm3bdlauOVhiY2O5/vrr2blzJ/v37z+p32ZlZfHzn/8co9EYlvpUo9Fw8803c+jQoYhCLEMNvV7Pt7/9bXbv3s2+ffv6be/1eklOTuY3v/kNzz77LF9+eeYrDUpInG4kTfxrilwlR5ekQ5ekI3liMrlX5yJXnvhxiM+LZ9bTs8i8MJOgJ4guWce0n04jZ27OoPuhVqtZvnw5P/zhD9FqtQSDQe666y6efvrpiLYJCQk899xzLF++/KyZQnU6HQsWLCArK7LM7FBDr9ezYMGCiJzq/ZGYmMizzz4LwP3338+mTZvE7zQaDQsWLGDEiBGns6tnhN57lZycPKD2tbW13HPPPRw8eJDHHnssogKghMRwQNLEv6bYG+3sfWUvAIXXFZI/P7/f3+TOy8Xn8LHp8U0EPKHc6vnX5NNe0j7oflxwwQWMHDmSxx57jD179gDw0UcfRa1A1dPTw5dffonFYjlhjm2FQsGjjz5KY2NjRJGWocbIkSO55ZZb+Oijj9i5c+dpOefJZFKWy+U89thj4pwNplDLqVBQUMB3vvMd1qxZc040fbfbzdKlS3nuuef4yU9+wj333ENnZ+dZ74eExGCRNHGJAWPMNNKxv0MU4ABVn1ThbBt8EZecnBwcDocowAEqKyv56KOPItr6/X7+3//7f2Iltb5QqVTMnDkzqjl+qJGRkcGUKVPOmeCYPXs2I0eO5JVXXjnrAhwgPT2dqVOnnlPB6fV6eeaZZ4iJieGmm246Z/2QkBgMkiY+jDBmGtEl6kAGzlYn9qa+q23FpMRgzDDi9/ixVlgJeAdXClUml4WuCWjiNAiCQExyqApYwBvAY42s0DUQTCYTWq2WtLQ0XC4XaWlp4netra1hRXTkcjkpKSniZ6/XG/Wlr1arMZvNYglKQRDE8/p8Pjo6OgbV12PJy8sjPT2dhoYGampqoraRy+UUFRVhMpno7u6msrISr9cb1sZoNGIwGMQCIxqNRuxrT09Pn5Xc0tLSSE1NxeVyUVFREbXa3LFtc3JyaGho6LOa3XXXXUdNTQ0bNmzob+gA5ObmkpGRQVNTU59VwuRyOYWFhSQkJNDT00NFRUWf4+81/avVanH8NpsNuz36s907frfbTUVFBX6/v8++pqamMmLECJqamvqNoWhubmbt2rVceeWVvPXWW31eX0JiqCEJ8WGASq9i8gOTicuLw9nmRKlVoonX0F3dTckfS3C0HH3hK2OUTLhjAknFSbi73Khj1QS9QUr+WEL7vpM3eyu1Si569iLxc/bF2WRfHIrm7SztZMevdwxqTA888ABTp04VP7/xxtF63zfeeCM2m038nJycHPb9wYMHeeihhyLOmZ+fz/Lly8XPt956K7feeisAFRUVYn3twaDRaFi6dCnnn3++eGzHjh0888wzYSU9Z8+ezZ133klCQgJerxe1Wo3D4eDVV1/l888/F9stWLCAG264Qfz8/PPPi//+85//zMqVK8Oun52dzQMPPMCoUaPEY1arlddffz3svBASovfccw9XXXUVcrkcQRBYuXJlRP3txMRE8vPzeeutt/o1watUKh5//HFmzJghHtu1axfPPPNM2IJj1qxZ3HnnnSQmJorjd7lc/P73v+fTTz8V21133XVhtet/9atfif9esWIFK1asCLt+RkYGixcvDnOz9PT08MYbb4SdF0Jbx+666y7mz58vlmb9+9//zp/+9KcTjnHdunXMmzePKVOmsG7duhO2lZAYKkhCfBiQe1UusSNi2fz4ZlH7js2JZdyicaTPSKfigwogpDVPuncSugQdm5/YjL3RjkKjYMJdE5j0w0lsXLIRV7vrRJeKwO/2s+GRkJY28xczadzSSM2aGoBBa/cAL774Ilqtlttuu40xY8bwyCOPiN8dr4W2tbVx++23A/Dwww/3ec6qqipuv/12kpOTefrpp3nzzTfFIK1TNa1fd911VFRUsGjRInp6erjqqqv4v//7P+655x5+/etfi+0mTpzIJ598wmeffUZHRwcmk4nvfe97PPjgg9TU1Ija66pVq/j3v//NVVddxQ033MB9990n1hY/vv56amoqy5cvp6WlhcWLF1NdXU1iYiILFy7kwQcfpKKigvr6erH9woULqa6u5s4776Snp4cHHniAhQsXsmnTpjDtOS8vD5lMxoEDB/od//z588X5tVqtXHHFFdx+++3cd999PPPMM2K7CRMmsHbtWtauXUtHRwfx8fHcdttt3H///dTU1FBREXpWP/jgAz799FMuv/xybrzxRhYvXiyO+9gFHIQWG8uXL6ezs5OHHnqIyspKEhMTWbBgAQ888AAVFRVhVpEFCxZQW1vLXXfdhdVq5b777uOGG25g06ZNlJeX9znGQ4cO4ff7ycvLk4S4xLBBEuLDALkqpE343UdNhz21Pex5aQ/uLrd4LHFsIubRZrYt24a9MSTsA54ApW+VMuf5OWTMzKBydeVJXVsICqLPWwgK+J3+U/KB99LVFdqW5nQ68fl8NDc399k2GAyK3/t8PhQKRdR2Xq+X5uZmdLqQ+b+1tfWE5z0ZBEHgqaeeEgXt+++/T2FhIbNnz+b111/HarUChG3PgtA4X3rpJSZMmMAll1wiClGbzYbNZhPNzLW1tX2ahm+//XYCgQBLliyhu7sbCJl/X3jhBdavXx8mwCG0CPjVr34lmtr/9Kc/MXPmTAoKCsKEeHx8vNjH/pDJZDz11FOimXn16tUUFBQwZ84czGaz6N743e9+F/Y7q9XKyy+/zPjx47n00ktFId47fo/HI47f7XYTjdtvvx25XM6SJUvEvjY3N/PSSy+xadOmCLeG0+lk2bJlokvmT3/6E7Nnz6awsPCEQjwQCGC1WjGZTP3Oh4TEUEEKbBsG1P23Dp/Tx8yfz6TwukL0qSGfr6vThRA8agZNHJ+I3+0n6A8SOyJW/E9r1uLt8WLMMp6rIZxVNBoNQJ9CYTBs2bJFFODHHlMoFBQUFES0VyqVZGZmUlBQQEZGBtXV1VGTimg0Gvx+f58CXKPRMH36dNatWycK8GPZvXt3xLF169aF+crb2toIBAKi/72XXutEX4uiY9m2bVuEn3jLli2i//t4jh1/ZmYmlZWVfY5fEIQIn3kvCoWCGTNmsHHjxqiLjb7Gf2xMRUdHBz6fD7PZ3O84lUqluLCQkBgOSJr4MMDZ5mTzks3kXZNH9qXZ5M/Px1pl5ci/j9C6q1Vsp4nXoNQqmfHEjKjnOdZ3/lVGrVYDnNaXcbTI7V7t02g8ujhKSUnhu9/9LjNmzECr1eLxeMRFxaFDh6L2tS8BBqG98QqFgpaWlgH39XhhGwgECAQCqFSqqP1PTk6msbHxhOcc6PiTkpL47ne/y8yZMyPGX1kZaQVSq9X4fL4woXssJpMJtVp9UhaV48cvCAI+ny9i/Mej1WqJjY2VtphJDCskIT5M8Nq8HHrnEOWrykkqTmLE5SOYdO8kKldXUvlh6LTLIlgAACAASURBVOUoBAVc7S7WP7z+HPd2aNDfS/tk6F0YHItSGfrz6RUaiYmJvPDCC9TX1/P4449z+PBh/H4/SqWSH//4x2ER9tHOE41ea0JvxP3p5PDhw3g8HiZNmhS2xS8a0eayt9+9MQwJCQm8+OKLNDc388QTT3D48GF8Ph9KpZLFixeTkxM9KZBSqUQmk0UNrutdiB27UDhTFBcXI5fLB5TtTUJiqCCZ04cBauNRARL0B2nd1cq2Z7bRWdpJ9iVHTZT2Jju6RF1Y+68jvYKvVwM8HWRmZkYc6zWj927fmj9/PkajkaVLl1JaWiqayAOBAHl5eX32VaVSiVHUx2O1Wmlvb2f69OmnPUudx+Nh165dXHrppf3O1YnG3+uTnzdvHrGxsSxdupQDBw6I5voTjd/lciGXy/tcyNhsNpqbmzn//PP7nKPTxdVXX01HRweHDx8+o9eRkDidSEJ8GDD+zvGkTUsLPyiEBLrPeTTquvnLZoSgwKiFo5Apwl/4piITnMWiTUVFRdxyyy0YDIazd9H/0dLSgiAIYalCjUZjVEE0UKZOnRq2l91gMHDNNddQVlYmmnp1Oh0ejyfCPH711Vf3ee2mpiZkMllYXwsKCkTNXxAEVq9eTX5+vrhdrheTycRVV1016DEBvPPOO8THx3P99defsN3kyZPJyMgQP+v1eubPn095ebkoxHU6HX6/P2zLHcBll13WpxbeO3e5ubnisby8vLBFxerVq8nJyeF73/te2EImLi6OefPmDXCkJ6a4uJipU6eycuXKE+69l5AYakjm9CGOQqNAG6+l+O5i0qal0VXRhVwpJ2FkAuYxZvb94ajpz9nmpOztMsZ8ZwyxObE0b28m6A+SMDKBxLGJbFu2DWt1KIraVGgi/YJ0AOJy4wAYdfMohICA3+Xn8Hunpo08+OCDYiDT8Xt+T5bMzEyuu+46IJThSy6Xc9999wHw2muvRfi+u7u72bt3LzfccAMGgwGlUsmsWbPYuHFjRPT0QDlw4ADPP/8827dvx+FwiD7fJ598Umyzfv16rrzySpYsWcKGDRtQq9VMnDiR3NzcPiPAd+zYgdvt5qc//SlbtmwhJSWFmTNn8vjjj7N3bygt7ocffkhhYSE33XQTU6dO5cCBA8TExDBz5kycTicbNmzoMzlMf1RVVfHxxx9z8803U1ZWJl7zeEpKSli+fLkY4HbBBRdgMBj45S9/KbbZuHEj8+fP58knn2TdunUolUqKi4spLCzsM9HOzp07cTgcPPLII2zatInk5GRmzZrF0qVLxTS0n3zyCYWFhdxwww2cd955lJSUEBMTw4wZM/B4PGzYsCFiW9rJkJiYyE9+8hMOHjzImjVrBn0eCYlzgWLp0qVLT9fJ1q5dS1VV1UnlbpY4MUJAoGFDA85WJ/oUPQmjEtCn63F1ujj4t4MRCVx6anro2N+B1qQlYXQCxkwjrnYXB/50gO6ao9HN+lQ9piITCrUCn8OHvcmOXClHoVYgk8lo2xsZyGRIN2CttGJr6P+FqVKpMJlMvP/++1gslj7bmc1mnE7nCfOGm0wmJk+ejFqtpr29nba2NtRqNWq1mi+//DJqZPfu3bvR6XTk5eURCARYs2YN77333kk/mwqFgqSkJJ5++mlaWlo477zzyM3N5cCBAyxfvjwsIKytrY2ysjKKioqYMWMGI0aMoKKigpdffpmEhATq6+sjhKTL5aKkpISUlBQyMjKwWCz88Y9/DKtCJggCmzdvpra2ltTUVIqKitDr9axbt47ly5eLAlypVJKcnMzu3btpbw9/LrKzsyktLaW2tjZijHv37mX8+PFcf/311NXVhWV3k8vlpKam8uyzz9LQ0MCkSZPIy8vj4MGD/PrXvw7LhNbR0UFJSQmFhYXi+Kurq3n55ZeJjY2lsbExwvfu8XgoKSkhOTmZzMxMurq6eOONNyLabd26lSNHjpCSkkJRUREGg4GNGzfy/PPPiwJcqVSSkpLCnj17aG1tDft9VlYWBw8e5MiRIxHzsmzZMmQyGY8//vigF0MSZ568vDxmzJghbQE8DplwGiXugw8+yNq1a/uMNJWQGApMmTLlhEljACwWC3ffffdZ6tG5R61W89BDDzFr1izee++9IV845nRwwQUX8PDDD9PQ0MDSpUtPS1peiTPH3LlzWbx4cZ/xFV9XJHO6xNeOI0eO8PLLL5+wzddtr7DX6+Xpp59m48aNxMXFnevunBXsdjsrVqzggw8+OGEOdgmJoYwkxCW+dnR2drJx48Zz3Y0hybG1xL/qlJSUUFJScq67ISFxSkjR6RISEhISEsMUSYhLSEhISEgMUyQhLiEhISEhMUyRfOLDFI1GM2yCrxQKBYkpiaRmp6LSqVCoFNLyUUJC4qSQx8pZuXYlGu3py8QoQ4ZKpsIQY2DiqImMGTUmaorloYwkxIcpixcvZsOGDWzZsuVcd6VP1Go1oyePJi47Ds0oDYpMhfTESUhIDJo2IvNXnA4Er8CB8gPEboslJy6H66++/pxkmxwM0it1GBITE8N555036OxjZ4PsgmxGTB6BbpYOmf4s5nuVkJCQOElkahmBzABdmV102buoebuGuefNZeb5M8911/pFEuLDkAsvvJB9+/adUqrJM8nYyWNJnZWKqvj0VRGTkJCQOCsYwDrRyj8r/klTaxMLvrHgXPfohEieyWFIcXExX3755bnuRlTyx+STepkkwCUkJIY3rnwXu1y7+Gz9Z+e6KydEEuLDkOzs7Kg5sM81CeYEsi/IRjVSEuASEhLDH0+2h/WV66mrr+u/8TlCEuLDDLlcTnp6eliRiqHCyMkj0cw4fZGjEhISEuca+yg77695/1x3o08kIT7M6N3+4Ha7z3FPwklMSiRmQoz0REl8fRkOxRuHQx+HGgpoM7RRWVV5rnsSFemVO8zwer2oVCpksqEV8Z1VlIWq6KttRg96gwRcAQKuAEKw77ehEBAIuAJnsWfDEAF8Nt+wEyoBdwAhENnp7sPd7H9hv/hcBL1Bgv6zXM1R4ITPXcAToOTXJdhr7WexU18N3OluNu0amnUFpOj0YUYwGMTv96PRaIaUNh5jjoGvtgyn9qNa7HV2XC0uRn1/FLH5sRFtmv7bRMumFtTxaoSAwKg7R6EyfsUn5iRx1Dso/3M5mngN3m4v2ddkk1CccK67NSDKfltG3k156DP04rHOvZ3UfFDDyP8biUweWlxX/KWChPEJJE1LOmt9c7W6OPTaISY9MSnq9wqNguxrsjn8+mEKv1tIbGHk8yvRBxpo7W7tv905QBLiw5Da2lry8/MpLS09110BQCaTodArznU3zji5N+QCUPKr6JWvvFYvTV80MWnJJBRaBV2lXSj1w/dPrOaDGmLzY0mYcHoFbN0ndWTPy8Y8yYyn04PfPbzKgPYKagB3h5sjfz/CmB+OISY9JrzhKdg5rYesWPZZyLvx5Gpny5QnttDFj46n8LuFVPylggmPTEBlkBaYA8UjeBAEYchZQSVz+jBk7969TJw48Vx3Q0Sj0SDXD/5R6jVRnzUE8HZ78Vq9Uc25QX8Qj8UT1Wx6ItydbnQpOhTa0ILGNNYU9sLvj6AvGDIxRyHgCuCzR//O7/CDAEJQiNom4A6E2pwk9jq7OJZoeLu9g7pvrjYXhpxQNiyNWROm1fbem6Cvb1O0EBDwdntP+rrebi8BT9/9FQJC6Jk4joA7EHb82Jd403+bSJ6eHCnA/0fQH+yzr0IwdL1oz5m3K/rxoz8OuSP6MtkH3H3/TcUWxhI/Jp6W9S19n18igoA6gN0+9FwRw1dN+Bqza9cu7rnnHt555x2CwbPsd4uCQqFApj651anX6mXv03tJm5OGZZ+FgCtAbFEs+Tfl9yv4aj6owVpmjTie/Y3sfrVGb4+Xw68dRqFVIFfJcXe6yb8pH2OuEYD2He00/bcJjVmDq8VF7g25xI+O73c81e9WYz1kxe/ws/eXe5EpZRQ/Utzv7yDkP61eVY2tyoZcE1oMFdxSgD5TT8AdoOqdKhwNDmRyGdpELfm35KMyqDj46kFkShl+ux9tsha/04+r1UXO/ByMeUZ2PbGLrKuz6NjZQcAdIDY/lvyb85EpZFT8uQLTWBOJUxIBsOy30LaljVHfH4W91k7l25V4rV4qV1SiUCtImppExuUZQEj7rPhLBSq9Co/FQ2xhLLnX50I/j0DXgS5qP6zF2+2l9OVS5Eo5uTfkEjcyDgBHg4PKtyuRK+X47D6SpiWRdWWWeN69v9xL4uRE2ne0o9QpQQbjHhjHnqf2MOnxScgUMnoqe4gtiMVR76B+TT2j7hyFZb+Fuo/rUOqV+Lp9xBbGkndjnvic7Xt6H+ZJ5qPnBcYtHodMIaPx00bat7ejNChRapVhiwshKGDZb2Hs/WOjjtdR56Dpv03I5DKC/iAjbx+JLkUHAtR+WEtXaRcqY2gOs+dnk3he6F6UvlSKq82F4BewHbEhV8uZ8JMJ4nnbt7fTsKYBZYwSn91H6qxU0i9NF79v/E8j7Tvb8dl8ZF2VRepFqRF9S56WTNW7VWTNyzrxTZMQCSqCeL0nv3g800hCfBhy4MABbDYbM2fOZOPGjee6O4Mm6Asik8sofrSYoC9I6cultG5uJXVW5EvnWEZcNwKuG9w1uw91o4pTMerOUUBI2+zVeFwtLhr/08j4B8ej0Cmw19k5/PphznvyPGSKE0uovIV59FT00PCfBsbcM+ak+lS9qhq5Ss7EJRORyWV0lXbhanOhz9RT+1EtyGDSkkli25r3ayi8rRAAc7EZY66Rvcv2MvGxiVgPWumu6MaYZwQBgp4gxY8WIwQEyl4po2VDC2kXpyEIfWt5hhwDEx+byL6n91FwawH6LH3Y95V/rSTzikxMY00E/UEOvHAA60Er8WNOvNgxjTNhGmdi15O7GHv/WDSmo9sRg/4gh984TNbVWSRNTcLv9FP6m1J0STpxoQFgr7VT/GgxcpUcR6MDmUKGNlmLs8WJOlbNwd8fZMovp+Bz+EICE9Amahn3o3Eo9UqEgMCh1w5hKbFgnmgWz2ursUWc11Zto3NPJxN+MgG5Wk77tnaq3q0Sf+N3+hH8ArpkXdTxdh3sYtwD41AZVDT+p5Hq96oZe99YkIXmIufaHJCBp8vDgRcOkDAuAblaztj7x9L03yY8Vk9ocXQMPVU9NKxtYMx9Y9CYNPidfhrWNogBdZ5ODwqtgok/m4ij0UHZb8uiCnF9ph53hztkiRpa1mGJk0Qypw9T3n33XW6++eZhV3EnDAFSZqYAIFfJSZmRQldp1xm9ZFxRHK4WF1XvVGE7YsOQZRC1cEuJBW2ylu7ybiz7LHi7QmZdV5vrjPUn4Alg2Wsh5xs5omZoGmsKaWUCdO7uJPPyzNCLVgaZl2diKbEQ9IY0Qo1Zg8qgQqlXok3UIlfLw1wEyRckAyBTyMLndwCegoA3gFwV/opwd7hxtYa0RMs+C9ZSKxqThp7qnlOaB/sROzK5jKSpoUAwZYyS1FmpdOzqCGuXOitV7FOvGd6QbcBR78BWbUOpV9J9qBuPxUNMasjEHZMWg1wlp6eqh+7ybmLSYnDUO8LOmzIzJeK81oNWEopDghXAPNmMTC47ugDqRwAmn58s+pxTLkzBXmMXzd+xBbF4u71YD1lxt7lRx6txNjvF3wa8ARSaSFdG25dtpFyYIi6AlDFKRlw3Qnx2lHqlKLT1GXoQQouNCP7X7xMt5iSGB5ImPkzZvXs3NTU1fP/73+fll18+190ZFDK5LCywRqlXEnD372M9FXO6Ol7NhIcn0LGrg5r3awgGgqLp2u/0E/QEsdcf9XulzEwRTaxngt4XrEIX+cIOBoIEPIGw4DilQYkgCPhdod/1F2Sjjj26yFPEKPqc32jnkclkERaIgDuATCkLmyNdqk5cCA0Wn90XEQSo1CsjBJDSEHkvDFkGusu7kSllZM/LpqssFFDYq2m3bm6ldXMrcUVxyDVyXK0u1Kbwxe+x89RLwBNArT16XK6UhwnW3v56rV7U8ZG/V8Ue82zrQvct4A6AFqrfqcZn82HIMYTup8Mv3lMABKK6lXw2X9S+9hIRwyAnqt/c1epCm6g9qZgNiaGJJMSHMb/5zW948cUXufLKK1mzZs257s5J0xvY0/sCdHe60SZq+/1d1tVZIV/pcchU/b+QAq4ACp2ClAtSSLkghabPm6j7uI7RPxiNxqzB5/CRfU32yQ9mkKhj1SGhWGfHkB1e+lCulKOOV+NocKCOC82Rs8GJQqMY8LY1j8WDNik0p55OD5rE/2lwxwnIaAFdcrU8IsBMm6gl6AuSeXmmqKGeDrTJWlytLoK+oKgROxocA3oeYjJiaPq8CXWcmpxrc2j8rJGY1Bh0KTq8Vi91/6zjvCfPEwVw/T/r8TmiBwkei8aswVF3VGP32X1hglYml2EaZ6JjV0eYT7oXT5fn6L8tntB906to2dxC0B9k9A9Hh74UoGt/V9hCSq6WRw1G1CXrsFXbSJycGPHdydCxqwPTONMpnUNiaCCZ04cxbrebp556iptuuolrrrnmXHfn5JGFAny8Vi+2ahst61tIuSCl358pNAoUusj/5Mr+H+e6f9ZR+1Etni4PAXcAX49PNE0mTk6kp6KHtm1tBL1BXC0uGtYcTW/rtXpxd7gJ+oP4eny4O9wnFcFuKbFwZNWR8ClQyMiYm0Hl25XYqm24O9zU/6seyz4LAOmXpFPzQQ3OZifudjc1q2tIm5M2YA2qd6z2GjvNXzSTPD1kXo8tiKV9ezt+RygYrnVLa4RpVWPW0LGrA0+nh6b/NhH0BlFoFZgnmql6pwpfjw+/00/jp414LJ5olx8w+gw9xhFGqt+txtfjo6usi9bNraTNSev3t1qzFq/Vi8qoQiaXEZMeg7PFeVRr/l8ktxAU6C7vpn17+4D6ZJ5kpru8m47dHXi7vTT8uyG0wDhmmjLmZtC8vjlMYPfSsaODnqoeXG0ualbXkDIjJWTGFkLacW9CmIb/NETMn9aspaeqB0+nh67SLqyHQpan1ItS6djTQcuGFjxdoe+O9dMPBGezk45dHQOaW4mhj2Lp0qVLT9fJ1q5dS1VVleRnOYvYbDa2bt3KD37wA+Lj4yktLT3r869Wq8mYmIEyY+CGnYA7QOumVjLmZlD7YS3Wg1ayr84eUCT4qWDMMWKrttG8vpm2LW1o4jVkfSMLuUqOXCUPaVY7O2j6rAl7nR3TOBO6pFDgUt3HdXTs7ECpU+JocGAts2IYYUClD2nFfrefgCMgRlsfj73Wjr3eLkYhi33KM6JQK2j8byOduztRx6lJnp6MXCnHkGNAqVHS+GkjXfu7SDwvkfSL00EWCsQzjjCi1CtxNDowTzSL2ltMWgzNXzSTMz+H2tW1dO3vIuPyDNHdoEvR4e3xUvdxHc5GJ+lz0wk4A2Hzr8/Q07GzA8s+CxqzBmOuEZlChmmMCXeHm4ZPG+jY1YHWrCV+dPyAFxaOegcJ4xMiNPmECQmhiO7Pm3C3ucm9IRfDiKPWCXu9nfhR8ZF772Wh7WOmcaZQXIAydC/jR8aj0CpQx6mp+7iO1s2tyJARPzYelUElbnNzNDiIK4iL2DOt0CiILYileV0zbVvbSD4/GWWMEn2WXrSEqAwq5Co5tR/UYhprEl0vrhYXmVdl0vxFM22b2zDmGsm6OguZXIY+U4+jzkH9v+vp2N1B/Oh45Co5xnyjaHHRpehwNjlp2dSC3+EnYVwCSr0SZYyShLEJtO9op2V9Cx6Lh9SLUtHEawh6g3g6PCSMP+pSstfYMRebReuGs9nJ4dcPM+JbIyIsPxInRm1RM71wOjEx0bcTnitkwml84z/44IOsXbt2SGx7+roRHx/Pww8/jFKp5Pnnn6el5eztAdXr9Zx/2/lopg68+InX6mXvL/dy/nPnn8GefX3x2X3senwX05ZPO2t+T5/dR+lvoicgKvxeYfh+8K8YHTs7qPtnHcWPFkcNSBsKeLu9HHjxALk35GIaK5nSTxZDhYEHrn4As9ncf+OziOQT/4pgtVp57LHHmD9/Ps8//zyrVq3in//855Dc1yjx1URlUDHxsaGThOhskjglEdM405AV4ADquFBQ55kM1JQ4+0g+8a8QgiDw4Ycf8sgjjzBu3Dhee+01Lr/8cuTyoXeblQYlY38UPUmGxKmjjFEy/sHxUvTxWeRE2e2GCpIA/+ox9N7uEqdMfX09Tz31FM888wxz5szhzTffZOHChSdlBkpNTcVoPLVtQydCrpSjz/zqmlfPNTK5LCJJi4SExFcPaVn2FebQoUP87Gc/Izc3l6uuuorf/e53lJaWsnnzZnbs2IHNZuvzt93d3SxZsoTExET279/P7t27KS0tpbu7+yyOQEJCQkLiREiBbV8jdDodM2bMYMaMGUycOJHKykp27drFwYMHqaioiPCfq9VqlixZwuTJk8U65p2dnezcuZO9e/eyf/9+rFbroALbJCQkJIYTQzWwTRLiX1M0Gg0TJ06kuLiY0aNHk52dzZEjR6isrKSuro76+nrq6uqw2+0sXryYmTNniileg8EgbrcbrVZLT08PZWVl2I12mtKbsAX61u4lJCQkhitDVYhL5vSvKR6Ph23btrFt2zYAtFotRUVF5Ofnk5+fz8UXX0xWVhZyuRyLxYLNZsNkMiGXy5HL5eJeyfj4eGbMmEFQCCKXy/EEPVgCFircFVR7q7EHhl7pPgmJrzu9CYDMk4aWQDobuNtD+f+/KhnrJCEuAYSyv5WUlFBSUhJ23Gg0YjKZSExM5Pbbbyc7Ozsi2l0mk6GQhSJzNXINafI0AkIAZ9BJVaAKIUq1DSEoUPGXCgAUagX5N+efoZENfSr+XIEgCMgVcgq+UxC1TdeBLjp2dSBXy0mcnEhcUfSEMhLh2GpsdB3oGlAq3e7D3aiMqj5rg59uXC0uPF2eM57g6Hh6qnpCleiuzDzj16pcUUn2NdliEpuzgfWQFVeri7TZfWSkk0H9v+rpqewRK8kNZ6TodIkTYrPZqKurIy4ujoyMjAgBLggCHo8HX8BHhbuCtT1r+UP7H1htXU2lpzKqAIeQ4M+4NIOkqUl0HTizlcuGOumXpJM8LRlLiSXq9/ZaOzUf1JB6USrJ05Jp3dR6lnt4GhFCiVGi5QU/E/i6fREVy/qibVtbn/dgMHi7vXTu6ezz+97UsmcTd4ebircqyL8lX0zBeyaxlFgGVDGvL+y1dmzVJ+eic7e7cbe5+/xem6hl7P1j6anuoenzpsF3boggaeIS/TJt2jR+9KMfoVKFUk0GAgF8vlABiR07drB161ZU41UoJ5/E4yQDfZYeZaf0COqz9Hi7+07K013RTeJ5iWKlsGNTkQ43goEglSsqOf9XQy9TX+F3C0/r+WzVNtq2tfVpsk6/JB0uOa2X7Je6j+pIuTCFuMLhYclp2diCYYQBY97p3e6q0Coouq2I/cv3Yz7PHFbbfrghvUElTsj48eN57LHHkMvl+Hw+PB4PGzduZP369ZSVlREIBELR6WPPR3kaHychINC8oRlblQ2lQUna7DRi0o6aOdu3tWM5YEHwCxhzjaRdnCbmh7bX2WnZ2IKvx4fapCb9knR0ybp+r9m8vhnBH6k2pM5K7bdiV8AdoPHTRpzNTuTKkMk7ofiYHNb/65Pf6Sd+VDwpM1MGlIjlyKoj2GpsyBQy/E4/MekxYg32/hCCAh07OkLFM2SQNCWJ+DH/M90K0Lyhme7ybtRGNakXpYbMyAIc+ccR9Bl6rGVWsuZl0bqpFSEoMOJbI3A2Oemp7kETr6F9RzvqODVpF6eJ1cZqV9eSeWWmmPikc08nSr2SuKI4LPstIauLLNQOQqVee83XAXeApv82Ya+3o03SknFZxgnLbh5LT1UPrZtbEfwCCRMSSJwSnp/eUmKhfXuovxmXZYiV84SgQM37NWK7uJFxYeVsnU1Omtc147P7iBsZR9pFaUfNr0KoGpjlgAWZXEbieYmin7VhTUOooI3FLRa9yV2QK5635v0ahKCALlVH6qxU8bi1zErAGxDLqAJ07O5AHacmNj829HlnB537OpEr5aRckEJsYWxYW8teCwFvAH2mnoy5GeK98Nl8dJd3k39LpOvqyN+PkHJBCi0bWvA5fCRNTeq3rO9AcXeGivoEfUHM55nDcrtb9lvo2t+Fz+EjriCO1ItSkSlk+Ow+Gv7dgL3Wjs/mw9Xsinj2HY0OWje34rP50GfqSZuTFpYxr6u0i7Yv21DHq8m6Oisi0Y3GrME0PlSFLmNuxmkZ67lAMqdL9ElhYSFPPvkkPT09fPTRRzz66KMsXLiQ3/72t+zfv59AoP/a34OlckUltiobGZdnYBxh5OArB3G1uICQ77JpXRPZ87LJuzGPgDdA8/rm0A8FOPSHQ5gnmim4tQDTGJP4wuwPZYwyanW0gVD7US0BT4C8b+eRcXkGrVtaxf7a6+xUvl1J6sxURlw3gq79XTR/0Tyg85onmolJjUGfriehOOGk6nZXr6zGst9C6qxUEqckUvdJHfYau/id9aCVjMsyMOYZOfjqQZxNTgRBoHVzK3KVHG2SlrLflZE0PQl7vZ2eqh7cHW6a1zXTVdolCsPSl0sJuELPQvv29rDypT1VPTibnECoKEvcyDjkKjkJxQkkFCeINbeD/iBlr5ShMqoouLkAbYKWQ384NKD71n24m8q/VGKeaCZtThrN65vDqs85Ghx07ukk47IMVEYVpS+VhvWxty8BdwB77dFATGeTk/I3y0malkTuDbl0H+qmYe3R8x55/wgduzpInZVK0vlJNKxtoKeiB4D40fHoUnVoEjQkFCeECWUA03gTyhilWJ2sF7VJTd0ndWHjblnfIgqgpv820bG7g5xrc0i9KJXKFZXY60J9ttfZqfu4jowrMsi/KR+lThnWX3utHX2WPmpq2PZt7dSuriVhYgLJ05OpXV17QlfAyVDzkpuFsgAAIABJREFUjxrMk8wkTk6k5v0aukpD7jNvtxfLPgvJM5LJuioLW62N2o9CizuFWkFCcQIKnQLDCEPEs99T1cPh1w8TWxBLxtwM/E4/R/5+tEKg9bAVS4mF9IvT8Vg8NK5tjNq3uJFx4t/EcEXSxCWiotVqGTt2LI888ghVVSdX6vBUcbW5sJZZOW/peSi0CgzZBlxtLpq+aCL/pnwUWgUBVwC/048uVUf2vOywym3KGCWuVpeoVZnGmgak9SZNTRp0n1V6FY6GkO9Vn6ln1F2jkClC12z+opnMyzNFM3jWvCyq3qmKWoP6eGILY+ku70aulp9UMJuj0UH34W4mLpkolmg15hpR6pR4LB469nQw6fFJqAwqjCOMeCyhcqP5t+SLWmWnvBNXmwt9hh5topaAJySog74gud/OFaus9VT00LGng5QLUk5YQa9XW1doFBFjsR60ooxRknpRSCtNuziNls0tOBud/Waea1jbQOaVmaLmWPCdAvb/en/IXA0gQN7CPBQaBYYcA9aDVqxlVhKKE5DJZWJfeip6woRn0xdNpF+SLmrAWddkcfj1w2RemYm73Y1ln4WJSyaKQtGQYxCFrSHHQE91D0JAiHrf4ori8Dv8OJrC/fUxaTGojWq6y7uJHxWPu91N0BckJj0GISjQ9EUTxY8UozKq0CZqSZ6eTMeuDgzZoWsLfiGkmabrSb80PaxUrt/lj6wAdwzJFyQTPypkqQm4Qgvj0xG9nn5puhi85+ny0LqlFdNYE+o4NQW3Hg3kzLsxj5JnShhx3QjxeVeoFRhzjBFzWP9JPTnX5oiLI0OOIazWuzpWTf5NIYtDWiCN+n/WR+2bSq8aUG35oYwkxCWi4na7Wb169Tm5trPJiS5FF5aL2pBtEINQDDkGchfkUvthLQF3gKRpSSEznFwGMhj9g9HUfVLHnl/swTTWRMbcDDTm/n1ep2JOz7gig+Yvmil9qRRtkpbUi1LFSlHOFie2GpuoFQlB4YxHxDobncRkxITVWD+2TKbWrA0rvanP0otBXTJFaB6RhTSi49Gl6MLOq0vT4en8Xz3sfhTnoC8ouj2Oxd3mxlHnYO8v94rHAu7AgF6wzmYn+uyjgl6XrEOmkOFqd4n9PVb71KXqBlT/3N3mpru8W3zuhKAgCkVHg4OYjJiw8x5vrhUCAjLlyd/o5BnJtH3ZRvyoeNq+bBMD0Hrrt5e+dLRSXNAXJG5USMBpk7QU3lZIw5oGjvz9CElTk0ImZkWojyqDCr+974DCY9Mgx6THHL2np8ix59Wl6Gjd8r9gPgFat7TSuaeTgDeAEBDwOUJ133sX3X3NoaPBQWxebNixY+dfl3rUfabSqwi4o1sNvT3eAbtshiqSEJcYcii0iohAL1+PL+yP1DTWhGmsCWeTU6yJ3bs9S5OgofC7hfgdflo2tnDgxQNMfGziGS1QIVfKybgsg4y5GXTu6+TI34+E/LPFCaj0KkZcN+KsbgtTaBR9vrAVWgXenvD59XZ7kWsG5l071hQNEPQGxZrqcpU87Pvj2yrUijDtsBelXknChATyFuYNqA9h59Qq8NmOCnshIBD0BVFqlXjwRPQh4AkgU/UvXJV6JZlXZB6NIzgGuVp+wmBECI3VFzx5LS+hOIG6j+vwWDx07ulkwsMTQv2JUSJXySn+aXGflqXYgljG3DsGV5uLhn83UP5mOaPvHg2ELDH2ejsBVyCqmyjsvnmDp+3v5diFseAXUGpDf8eNnzbiaHRQtKgIZYySoD/Izp/uDBPicpU8qkul9x3R6445HplsYIun7vJu0dIyXJF84hJDjti8WIL+IJb9Ic1QCAh07OoQA2Is+yyi7zImPYa0OWmi1uWz+2j8LOT/UuqVZFwWClgZyJamtNlppF+aHvFff1o4hEy6AXcAZCE/dvzoeLFPxjwjbVvbxJdR0B/sVwCcDI4GB+6O8C01cSPjcHe6xTmEkB9RCAjos/XIFDLR59k7v+YJAzOdOpucuNpCYwu4A1gPWkVzqTZJK/po/U4/3YfDc+0r9Ur8Tr+oGXm6QtqecYSRrtIuvNaj8zJQTTBhQgItG1tEK0Dr5lZiUmNE64ujyYG7PTQ/PruPnsoe4kf2vzfbmGekdUtr2H3r7W9cYRx+uz/Mb9xT1UPQf1QQKnQK8RkQgsKA77lCoyBhQgLlfyrHNN4kClO5Wo4hy0D7tnaxrdfqFRdF3Ye7RR+7LllH2sVpYXOo0CowTzTT+N/o/mHLvqPPSvvOduJGhi86HY0OMb7hZOjcd3SOOnZ1iOftrgztulDGhIR6x46OsPkDUMQo8HWHFkI+u0/83jzRTMPaBnHs3h6vGIMyUNztbqwHrafkRhsKSJq4xDnBZ/dR+ddKgr4gAU+Ag68eRBmjpPC2QuRqOYW3FVK1ooq2L9twt7sxjjCGRaaWv1VOTGoMCp0C2xGbGPkbcAfoKu2ic08nuhQdzmYnCcUJAzKnDxYhKOB3+tn37D6MI4wE3AG83V6yrs4CQj7Bqneq2P/r/cRkxOBocJBxaQaJUxIJuAOUv1mOEBAQggIHXz2IXC1n5O0jB3z9qr9VoUvVhW2RUmgVFH2viMq3K2le1yxqp6PuHIU6Xk3RbaHv2ne24+n0EJMeI/qj+0OXrKP6nWoUMQocDQ6SpyWLfuvMKzKpXFGJtcyKq92FPksfpnkrtAriRsZx4DcHUGqVqOJUFH2vCF2qjqx5WZS+VIox14jX5kWhUTBy0ch+XQ9ZV2VR/mY5+5/fj1KvxNvlpWhRkfi9aayJyrcrUcYocTY5ybgsA22SNuI8x/vz0+akUb2ympLnStBn6nE2OkmdlUryjGTxHpW/VU7zhmYQQhr+qDtHoUkIPWvxo+Kp/2c9B185iMfqCeVFmJZE0Bvk8BuH8fZ48dv9HHz1INnXZIf5/ntN6oW3hW97y7sxj/K3yunc14lKr8LZ7KTwu4XoUnUIgsCRVUfQmDWojWpsR2xkfyM8yU3WvCwOvHAAY65RdPf04mx2Uva7MoLeIEJAYPQPRod9X/P3GjxdHs5bet6Jb8gxKHVKvFYvZb8rI+AKINfIyf126G81eVoytR/X0l3Rja/HF9U6kDQliap3q+jc24m7083IO0aiS9aRdU0WFX+pYO+yveiSQ3/n2ddkh5nRT4Tf4af8rXJy5ucMOHh1qCLlTpc4ZQZTAEUIChF+SZlCFrZfM+gL4mpzodKrxC1B4u8DAu52N35XaNvV8RG3XqsXd6cbrVkb8dszRcATwNXiQq6Sh7bDHSd8vFYvHqsHXYruqGtACG3BORaZXCYKAghptDK5rE/zpqfLg0KjEDWaYxECAvZ6O0qtEl2KLqxPQkDA2eKMmF9PpweNWUPAEyD4/9k77/Aoq6yB/6bPZGp6Jj0hIVTpvTdFEbEtq6uirgoWioKFXQG7WHBFXVdX17K4q7vrt+KKqKCIdKRKJ6T3XidTM+X7Y8wkw0wgiUhA39/z5Hky8973tpl5z73nnHtOsxuZRkZzUzNimZj64/VU7Kqg19xemIvNyLXygAVSs6kZa6UVTYIGt8srENra3z1uD9YKKxKlJOB8rsvuwlphRaaR+eagqaCJqj1VnI5IIiL52mTfa1v1j05gMa1z77K7vKpVEZiLzCjCFH5j9bg8PgfEnA9zCDGGYJzkH+mrubEZW63Nu2g87TNwO91YSixIFJKA+QWvWtpWbUMeKvd95sG++3KdPEDjY6+zBz+/7PGO1WV3oYrx90/AA9YqK84mJyHGkKACylxi5tS7p4geE+1z/tvz0B4GPjoQd7Mbp9mJJkkTMJb6k/UUfV5E/wf7B/apHVpszuYSMx6XB02Cf72OBge2au/vVKaTYa+1owxXBpRxWpwoI5X+Y8U7D82mZr9ngMvmwu10+75zbqebZlOzby7NJWay/p5F1IioDjmXtiDEThcQaINILPJ5K7eHWCZGHRfcM1kkEZ1x1S03yM+b8G6hxfu5PYL2ScRZ5yGYcG7LmQJViCQitMnBj6WJJKKg89silCUKie/B2FYIg9cHoL16ZVoZMu2PNvIgFjuRWOR35r8tEoX3NEJb5Aa535n71or8Xwabx7aLu9MDhridbo7/+TjGCUavBud4HXGXBp4Xlulk7dpeWzz020MsFweEce3Idx/O8LmKCKpJaLmmilLBGYKxqePU9F/SP8DUAT/W2452ue5YHYlXnT18bVtanMba+x3L9XK/kKzB5uX0Mn79jVAG3CNRSpDQ+rmLpWK/uRSJRKT9Lu2iDprUFkGICwgIdBhllJKIwRFnL3gOOdND/KcglopJuiqJ6gPVeNwees3t1SHh+ktAGiL1Oz5mnGQ8oyObx+0htHfoLyJm//mKjX++EIS4gIBAh1HHqdvdVV2MaFO15zyk58VIi/9Ge4jEoqBe+gLdj+CdLiAgICAgcJEiCHEBAQEBAYGLFEGICwgICAgIXKQIQlxAQEBAQOAiRXBsE+h+PPzsscQ7S1NBE5W7K0HkjQjWkhhC4JdP1Z4qlBFKweFN4KJA2IkLdC8eOPj0QcCbg9mXHKEbsdfayXwnk9C+oUQMiaD029KAcJACZ8ZSZqExp7G7u9ElGrIasNd3PPlHQ2ZDQNhbAYHzhSDEBboVe73ddwbYUe84Y+CS84Up14Q+XU9ov1B0PXT0vrt3QKQogTNTuasSU46pu7vRZTqaQAO8udndDmGRJ9A9COp0gW6jYmcFljILTrOTws8LvQk63B7EMjG6tDNnFirZWEJo/1Aqd1XS3NRMxOAIQvu1xoK219gp3VyKo8GBroeuNVUp3vCbljILcr2cyt2V3kQpU+OQG+SUfltKw6kGmk3NFH5eiMKgIHpsa8x2a7mV8m3luGwudD11RA2P8pkCmgqb/OqVaWS+sI4NJ711OhodGCcYKfmmBHW8mphxZ45Xbq+1U3u4Fk2ihoqdFYjlYowTjN4Qn3iDcFTursSU5xWYEYMjfMlIAKr2VqGKUmHKNWHKN6FJ0hAzPgax1JuFq2J7BbZqGzKdjJhxMSgjlNjr7NQfr8dpdmKvtxM7OZaSb0q8IUknGH39KvuuDHudHW2K1len2+GmeGMxjTmNyDQyXJ+7UIYriRrVGkKsIbOBqj1VeNweIodH+vW3el81inAFpnwTTXlNqBPVGCcaz7qIKt9WTmi/UN8isDGnEafFSVj/MOpPeJOC2OvsNJxqICTGG1q1JZqb0+Kk5JsSrBVWQnuH+qdT9eA3v2GXhPm+Z02FTdQersVpcVKxswKJUoKht8GXFcvj8lCxo4KGrAbkejmxk2J/1hj+Ar9OhO2FQLehTdYi18rRpeuIGhmFx+UhdnKsX/7h9ijbWkbex3noeugIHxRO/if5VO31xtd21Ds4/pfj6DP0JF2VhCnXRMH/Cnz3WkotFG8opnJ3JdGjo1FGKGkq8mbe0qZoUUWpkBvk6Hvq/fJUW0otHPvzMdQJaqJGRVH1fRX5n+a3Xi+zULKhxFevKkqFudiMo95B/qf56NJ1WMosZP8zG+NEI0Xri3DZg+c5bsFR76DsuzJKvy0lckQkykglx1475sv2VXe0DnuNnZgxMYQPDKfg0wK/cJq1h2rJ/mc2bqcb4wQjtiqbb9dY+m0pygglxklGVJEqb5IKu4vmxmYKPi1A20OLvcZO9gfZGCcYKf6y2Jfc5fhfjqNN1ZI4M5GmwiYK1v44v2LQJGi8cdHDFWgSNH4hQmsO1lC4vhDjJCPGCUby/pvn398jteR8lIPb4cY40Yi91t5uLui2VO2p8mW7Au9CraXexuxG8v6bh73WTsy4GGxVNjLfzvQKaw+cePMEcq2cxBmJWCus1B+v9yVDqTtRh7XCStSoKMIHhVO4vtCXGU6mlnnjwzvdqOPUaBI0vnCz4I3Fbq20kjQrCXWCmmN/PtahsQgIdAZhJy7QbYTEhtCQ2YA6Tu1NegAdzkIE3ixIvpjabm9+4shhkZRvLSdyeKQvdWnSNUkcfuEwydck++71uD2k3ZKGSCxCl96669emaDEXe5M1nB5isuTrEm8Wq5HeXWXP23py8KmDxE6O9QsLmn5LOiKJyHe/Kc+EOl6NNkVLSGwIcr3cl5zCZXcFJG85HUejg/6/7Y9MK0OfrsdSbKFydyXx0+MJuySMsEta44q7m91+6R4B9D31vpSsbZ212s6HJlFDY24jjVmNyLQy1PFqdD101MbUIg2REmIMQaaV4bK5qNhVQcSgCMIHesN2plyfwsEnD5JyfQpiqZiwAWFU769Gl6oLiHle/FUxaXPSfFHfjBONVO3xT3upS9MRf1l8QH/PyFnSOMk0MhKv9Mb91iZrOfD4AczFZpw2b3KZlqQnSVcnUX2w2ndfaJ9QQvu0yfblgco9lYT192bGU4QpwAPhg8L9wpZaK62YCkwM/ONAX6z02h9qqT9Z75s3AYFzgSDEBboFj8tD3dE6GrK96s2qvVV4PB4qdlYQPiAcqfrsX82Q+NYYyCqjyudcZCmzYK2yUnes7sfGAJE3o1RLpqiQ2BCfer2jWEotPgEO3sQYcoMca7nVJ8RVRpUvK1ZbWt4TS8RIld6xdbR9RZjCb4enjldjLjYD3ixdpd+W0niqEbfLjcvmCkiy0p5mw1JmoWxzGZYyC3i82cd0aTpkWpmvvyJJYPY0a5kVS5mF+sz61vFJRTitTl+yFrfLjUjqPz6P24Ot2kbuv3J9Jgi33R2QzKMjmpjTOVsyxrbJWkQSEapoFdYqK26Hf/siicibBe3H6lx2F+Vbyqk7XofH7cFlc/klg3G73D4TUFus5VZcFhdHVx/1vddsasZpOXteewGBziAIcYFuoeWh66h3YOhlwF5n92UkOv2B2G4dbfJUe5wen4CWhkiJvyyeyGHtpGPCK0w7i1gmptnUqrL1uD3eFJ2K1rq6Uu/Z8Dj9BZTL4fK1mfm3TPQZenrf2xuxTEzDqQZKvi7xKx9ssWCvtZP5diY9ftfDa8MVQe5/cqED/lmSEAmxk2P97NynI5aJ8bj9+y0Si5AoJPS+p/cZM7N1dnEFIJFLcDlaVdXuZv+BtL3Wcl2qlOISuXCa/QVrW0Gb9fcsNIka+tzbB7FcjCnPROFnhb7rYqkYkVjkl9IUQKKSoDKq6Lugb6fHIiDQGQSbuEC30KJ2xQORwyMJiQlBHa9G31MfkFe5PWoP1/r+rzlU41Nf63vqfY5T8KOwbWwOWkdnCO0f6j07/qNsqjlY402dmfDzpjR0NDpoKvDa7D0uD3VH6tD31ON2ujHlmogZF+Nb+ATLux2MpoImlFFKrwOhCFxWF/XH6s9+I975rfy+svXYnceb87ktEqXEN+duh9tn+9el6fyOEbqsrrP6BXQERbgCS4nF2x2Xp1UL8yMNJxt87diqbFgrrWhTtOh66DDlmnz5ve01dqyVVp+9vDGr0Tu/8vbnVxoi9eWEb1nkaRI02KvtmEvMvnItfgwCAucSYScu0K2IJN7dma3G1uk0kJYyCyf/ehKP24O91k6f+/oAEDEsAlOeicMvHkabosVSYkGXrvPZRLtK7ORYmgqbOPKnIyjCFTQVNJE+Jz2o+vxcItfJKfy8EJlGhrnEjDpeTfiAcBB5bbEn3zqJNkmLucR8Vvt6C7p0HUVfFJHzYQ4SlQRruRWZPnjO7NOJGBSBKcfEkRePoO3hnV9NssbPxh4xKIKsNVmYck1YK6z0uKkHmkQNydcmk/lOJqYcEzKdDFOeiZTrUvxs4l0hdnIsJ/96Eku5BVu1DblB7qepUYQpOP7n417P9xwTydcmI1FJkKgkJMxI4Nirx9Aka3A2OdGl6rwLQBFEDI0g82+Z3u9RmSXoAjN8cDin3j2FMlJJs6mZfg/0Q6KUkDI7hVPvnEKXpsPlcGGtsNJvYT8kqo59RgICHUHkOZsxqRMsXryYDRs24HYLZyZ/TYSEhDDilhEoRp6/4zP7lu3zqio93rPm+nR9gDB11Dt8i4O2jmcuuwu3w+1nZ26Ly+bC7XT72T7bYq204jQ7Ucer/VT/Lrt3VynX+duk3U43TrMTuV6O0+z02Zkd9Q5vv86wBjDlmsj9dy79H+pPU0ETErkEdYK/zdhcYsbZ5O2PSCrCbXcj03n77mh0IFFIggp3t8NNU2ETIrEITZKG5qZmJAoJYpkYp9mJTCfzqpZFIFVJsdd5z/S3qLtb5lcRpgh6vt9R76DZ1IwqWuUv/DxgrbDitDq9Dn5tbO7NpmbEcnGHFyNtcZqdmIvNqGJUSFVSnDandwG0zqv+jp3iXYSFxIQE+A3Y6+zYqm1oEjV4nB4/X4CW+Q2JC0Eil+C0OgPym5tLzIglYu/Rvzafp9vhxlxqRiwTE2LsvB+GwIWD9pSWxVctxmC4sKI3CjtxgZ+M3W7Hbe6ehZsqRtWuR7vcIA94WAPtCjXfdaUECe1fV0UFb6+9esVSse+h39ZhL1jf2kMsFfvOH5/O6fm92/bh9AWFX51y//P4bQVTyyKgre36dEHd3vye9bqo/VMI7S2sOoJULfXb0cvl/m1LQ6Tths9VhLZZiJy2Hjl9fk+vN1iZFsRysZ9TncDFi8QuQau98D5LQYgL/GRcLhcuy/k9/xo5NDLAa/qXiEwnI3yQcCTpp6BJ0gQ42QkIdBapR4pEcuE9cwQhLnBOcDQ4zmsik6Srk85PQ92MMkJJ/PT47u7GRU3bc/QCAl3CDXr5T/Pb+LkQvNMFzgmVBZV4KoTdjoCAwC8PUaWIwb0Hd3c3giIIcYFzQlFeEebvzWcvKCAgIHCREV4WztBBQ7u7G0ERhLjAOcHlclF6shR3kXAyQUBA4JeDrEzGmEvGBHVovBAQhLjAOSMvM4/aTbVw8WagFBAQEGilCRIbEpkwekJ396RdBCEucM7weDz8sOMHGtc3QlN390ZAQECg64hMIoxZRu668a5O5Zc/3whCXOCc4nQ62bNpD7X/rcVVIKRdFBAQuPiQlcroUdyDRb9fhEJxYeeAF46YCZxznE4ne7fsJak0ifje8aiHqxHFiIQlo4CAwIWLG8SVYkJLQxk7YCwTrplwQe/AWxCEuMDPgsfjIf9UPkU5RRgPGTGmGFHoFYgVYkQa0Xk7Ty4gIPDLQKlUolarkUrPndjyuDxIHBJkHhl6hZ4hfYYwZMYQZLKuRw483whCXOBnxeVyUZxfTHF+MQAikQiFQoFYLGzLBQQEOs6YMWP4ze9/Q0JCwjmrUyqVotFoLshIbB1FEOIC5xWPx4PNZuvubggICFxkeDwedDod4eFCGOK2CNshAQEBAQGBixRBiAsICAgICFykCEJcQEBAQEDgIkUQ4gICAgICAhcpghAXEBDwIyQkpLu7IHCeED7rix9BiAsICAAgFou57rrreO+991Cr1d3dHYHzwJIlS/jDH/5AaGhod3dFoIsIR8y6kYzfZKBL1vleV/5QScHXBWe8RyKXkDw9mYi+Ecg1choKGsj/Kp/Gwsafu7sCFzFJSUnMmzeP9957j6ysrIDrMpmMJUuWMG7cONatW4fT6eyGXv56kUhEuFye897ud999x3333cfLL7/MihUrKCwsPO99EPhpCEK8G2kqb80Skjw9GXP5WfJxi2Dog0PRJmgp+KYAe70d43Ajw/8wnC1LttBsaf6ZeyxwsaLRaBg4cCAeT3BBsWDBAsaMGcOf/vQnNm3aFHBdLhdz/Y1RjBpjICpGhrMZDh008Z8Py8nP/eWe+4+IlPPUC6lnLbf0/mwaGpyseq0nak2rgtNm9VBZYWfXjga2bKqjneln6mVhnDppIS/XGvT6cy+nozcEBiQ5esjM66uLAO9n9Mpfe7LvexPvvFkSUPaNd3vz5efVfPZJle+9bdu2cerUKZ599lmeffZZFi1aRE1NzVnHK3DhIAjxbqRkW+sPLenSpLOW1yfrCU0P5fDbhyndWQpA8ZZiYkfF/ioEeGRkJC+88ALffvstH3zwQXd35xfDlClTmDp1Ku+8805QAQ7w0KNJ9B+g4d8fVpB9ykJ4uIzxk0N58dWeLHsom8wTFr/yE6eEcuudsbzwTD4njp5lcXoBY7W42Phlre91SqqKcRMNrFtbRV1dq7bCZncDEBklo7bGycF9Xs2YTi+lZ68QFj2YSO8+at54tThoOxOmhGKMU7QrxL9cV41CKab/AA1jJxh496+l2O1uqiocvjIiEcQYFVw+U863G2sD6oqKkeF2B64iKioqeOKJJ1i9ejWLFi1ixYoVHZwdgQsBQYhfRKhjvHbK2pOtDxW3003xtuAPhl8aarWa6Ohoiot/HeM9H0ilUm655RZOnjzJJ598ErRMRKSMoSN0vP92Kf/7b+subsu3ddx2Vyx5OYE78bBwGYZQKQV5wYXSxYLZ7GLtfyp9r8dNNDBuooGvv6qlqCC4BuLkCTNr3i3zvZZIRMx/IIFpl4fzv/9WUVpi9ysfY1TQq4+a6Bg5H7xbFlStvuXbOgCkUhFjJxj4blMtpsbgWQIlEhG3zTXy2NLcDo+zsLCQNWvWMHfuXAYMGMChQ4c6fK9A9yII8U6gS9QRmhGKXCOn2dxMbWYtjQXBbdGqSBVRA6KQaWSYy8xUHqzE5ehaas6QqBBUESr0KXrAuyNvEeiNBY00m7u+CxeLxQwZMoSkpCRsNhsHDx6kpMRfFWcwGEhOTubkyZN4PB7GjBlDdHQ0J06c4Icffuhy2+BV844aNYqoqChcLhcFBQXs378fh6N1hxEaGkpSUhLx8fGAd0c+cOBAAOrr68nPz/erUyKRMGDAAHr06IFIJKK4uJgDBw4EDfealJTE1KlTCQ0NJTs7my+++AKDwcCUKVM4fvy438NMoVAwcuRI4uPjaWpqYvfu3VRUVPyk8QP07duXPn36IJFIOH78OEeOHPFTe+t0OlJTUzl16hTNzc2MHj2a2NhYsrKy2LdvX9A6+/fvzyWXXILL5WLHjh2+90/PyjR8+HCioqJ4/fVI5/TTAAAgAElEQVTX21W1h4V7k0HU1vh/zzweeO+tUr/3EpOVGEKlJCYpcbk8pGeoffUWF9oD6ghRixk6XE+MUY7V6iY7y8LJY+agaufho/QMHqZFKhGxZ3cje3Y10Le/moFDtHy5rsav7rBwGcNH6TAYZJSX2/l+RwNWqzvo+H5uXC4Pn62tYuLUUHqkqwKE+PjJBsrL7ERHK+g/QMMPB0w/qb0D+xoZPFTHiFF6vt/V0OH7vvjiC2bPns2MGTMEIX4RIQjxDtLz+p6kXpFKXXYdtlobaqOaXjf2omJ/BYffPozL3iqge8zsQdrVaTTkN9Dc1EzStCTSrk5j36p9WGs6vzMxjjSSclkKYqnX1tb/zv7w40Nu/yv7qTtV16UxRUVFsWLFClJSUigtLUWr1XL33XfzySef8P777+N2ex96ffr0YdmyZTz99NPccccdKJVKRCIRt9xyC//6179Ys2ZNl9rPyMjg2WefpampiePHj6PX6/ntb39LQ0MDK1euJDMzE/AKuYULF/qSFNxwww0+x6tdu3bx8ssvt86V0chjjz1GXFwcJSUluFwu4uPjaWhoYPny5RQUtDoODh8+nGXLlpGfn09xcTGzZ89m5syZ6HQ68vPzMZvNvodZeno6y5cvByA7O5uEhATuuOMOXn31Vb755psujV+lUvHII48wcOBAjh49ikwm4+abb2bXrl2sXLnSN/8ZGRk88cQTrFy5kptvvhmtVovb7SYsLIy1a9fy9ttv++oUi8U88sgjjBkzhtzcXJxOJzfccAPr1q0L2ofhw4djsVg4cOBAu/0sLLBhbnIxY1YkB/aaMJvbX4zOuCqCMeMNyOQiJBIRD/2x1Uz01z8Xs+27et/rkWP0LFiciFQqorDAilotwRgXy+GDTTzzWB4OR6vQ/f28WK64KoKD+0y43fDIsmQKC61ERsrJybKyd1ejT4hPnxHOnffGUVhgo7qymSuuiuCmW2N4clleu7vnnxuptP20fRMnh/LNV7VcMlDDxCmhP1mI797RgEop4dY7jezf24jT2TGHOYfDwZ49exg3bhwSiQSXq2ubDoHziyDEO4BUKSXl8hTyNuSR+e9M3/vhfcPpfWNvxFKxT4jHDI8h/Zp0r916l3eXogpXMXLZSPre1pd9LwXfOZ2JnM9yyPksh8TJifS5pQ9bHvzpTmwSiYQVK1YQERHB4sWLyczM9AnmG264gdraWj799FO/e+677z7ee+89vv32W0QiEStWrGD27Nl8+umnNDZ23jv+uuuuo6mpibvuusu38w4PD2fp0qV+R162b9/O9u3bGTp0KE8++SSPP/44hw8fDlqn2Wzmhx9+YOnSpdTXewVGREQETz75JIsXL2bRokW+svPmzWPz5s2sXr0aj8eDwWDg9ddfZ/369bz//vu+chqNhuXLl5Odnc3zzz+P3W5HIpHwwAMPsGDBAo4cOdKlHfndd99NRkYGCxYsoKjI65w0atQoli1bxlVXXRUw//fccw8ffPABGzZsQCQSsXTpUmbNmsUnn3zic0aaPXs2w4cP5+GHH+b48eMADBkyhMcffxwI3IknJCSQnZ19xge2zermzdeKuf/hRFb9OZ3/+1clWzbVBRUOb7xazBuvFnPXvXFMvSycm68/2m69FrObjz+qYN3aKp8Kuf8ADcufTuXa2VH86x/lAMQnKph5TSQvrSxg+xbvZzp4mJblT6XyxyXZnDjWanPvP1DD3PnxvPvXEj7/tBoAvUHKypfSuP+hRJbMP9Vuf34uFEox190Qhc3q5tDBJr9r6RkhxMQq2LG1nsYGJ3fcHYdCKcZu+2lag/feLuX51elcPjOCdWurzn7Dj2RmZnLppZcSERFxTrRMAj8/wjnxDuB2uvE4PWjjtUjkrR6iNcdq2PXULj91do8re1D5Q6VPgANYa6zkb8wnvE84Cr3ivPa9PUaMGEFqaip///vffTtej8fDmjVryMzMZPbs2QHpQr/66iu++eYb3G43LpeLzz77DLFYTFxcXJf6YLVa0Wq1xMbG+t6rqalh+fLl7N69O6C8XC4HOGMWtMbGRt58802fAAeorq7m3XffJT09nejoaMBrXzcajWzcuNGn7q2vr+f7778nOTnZr86pU6cSFhbG6tWrsdu9qlCXy8Xf/vY3ZDIZ48aN6/TYw8PDmTx5Mh988IFPgINXs3D48GEmT54ccM+mTZv48ssvffP/v//9D7FY7DMziMViZsyYwebNm30CHGD//v3tqkf1er3fXLXH9i31PLY0h8ZGF/MfSOCN93ozeVoYEknwHaZcIcZiPfMxtcM/mPj0/yr9bMBHDjWxaUMtY8YbfO+l9gjBYnH5BDjAgb0mamuaSUpW+tV53ewoTh43+wQ4QEO9k//+u5LUNBUJSf7lfw6GDtfyxMoePLGyB8+9nM67/+zLsOF63vxzMY0N/nMyaWooWZkWKsod7NrRgFQmYtQY/U/uQ1amhS3f1vHbm6LR6Tu+V2v5Luh0urOUFLhQEHbiHcDtdHP070fpd3s/xq0cR8nOEsp2ltFU1uSnRpeFyNAmaLFUWEiY6J/zVhWpQiQWoYpUYW+wn97EeWfQoEEAbN26NeDa9u3bueOOO0hJSSEnJ8f3/p49e/zKVVV5V/hdDRTxwQcf0K9fP1avXs2uXbv49ttv2b9/f7tCWqn0PoA7kspUqVTSq1cvwsPDkUgkPkEdExNDRUUFVqsVq9VKTEwMx44d891nNBqprKz0q6tfv37U1dUxZsyYgHbsdnuXFjF9+/ZFIpEQGhrK5Zdf7ndNKpUGrXPv3r1+r1t232FhYYDXdyE8PJyTJ08G3Hvo0CEGDx4csBM3m80djtp17IiZRxZlMWSYjtk3RbNgSQKXzwzn6eV5NJwmnJRKMbYO2qBDw2T0zAhBo5PgcnlwuTxERstax1ntQKEQYzDIqK/3LphD1GI0WolfuyIR9Omv4ejhJi69wj9dZYzRuwA0xsp/dpW61eqmvMz7Gx8yXIfD4ebRh7ICjuJJJCLGjDfw8UfeHa+5ycWBvY2MnxzKd5u6ZiJryz/eK2PUWD033BzNW68HHjkLhkqlAqCpqeksJQUuFAQh3kFKd5ZSn11P8qXJJExIoMeVPajPrufkv05Sn+Ndvco03gePNlGLKkIVUEdjfiN0j29NAHq9HpvNhtkcePynPeF8uvBssdlKpV37GlVXV3PPPfcwffp0pk2bxhNPPEFdXR0ff/wx69atC1DxtrTT3Ny+KUEqlXL77bdzxRVXYLPZqKqqQiQSodVqAXyRyNxuN7t27eKOO+6gubmZ4uJiJkyYwCWXXMLSpUv96tTr9SiVygBhC1BUVERtbW3A+2dDr/futkaOHBnUoSxY0I3T57/FL6BlXjQaTdByQLvmjsrKSlJTz34Oui379zayf28j4yYaWPRQIjf/3sjrLxf5lZHJRGe1xUZGyZm3II7BQ3VUlNsxNbpQKMToQ6UoFGKkUm8dWacs1Nc5eWhZEv94vwy3y8Pv5hgxN7k4uL/VfhwSIkEmE5GQqESnC/xO5mRZcZ6Hk5jHjpj562veExQTpoRy/0OJKBSBSs8hw3Xo9FL69teQkOhdoBpCZfRIVxEaJqOu9qd1tqa6mc8+qeK62dF8ua5jZ7/j4uJwuVxd+k4LdA+CEO8ElkoLx/9xnBMfnSBqYBQ9r+/J8KXD2fXELkzFJp/3eeGmQvI35ndvZ8+CyWRCLpcjl8v9PMGhVcB0xc7dWRwOB5999hmfffYZycnJzJ49m7lz56LVats9Cy6TyYK+D7Bo0SKGDRvG008/zYEDB3wC0mg08s477/iVXb9+PWPHjuWhhx5CIpHQ2NjIq6++ypEjR/zK2e12KioqWLhw4U8crX+dAM8///w5OzLXsntq2Zm3Jdh7AD/88ANjxowhNjaW0tLSoGXaY9t39UyYHErf/sFDtMrl7VvrlCoxz6zqQU11M/fdcZKy0lbt1JVXR3LH3bG0KA0cdg/ffl3LVddE8uyqNACKCmw8tTzPb7dvt7vxeOC7TbX88/3yTo3l52L7d/XM/l00N91qZMXSHL9r4ycZKCux+6nY83OtJKcqGTfR4BeUpaus/U8l06aHc+tdxg6VHzJkCEePHvV9PwUufASbeAdRx7Y+qDwuDxX7K9j74l7EUjER/SMAsDfYsdfbMaQZ2qvmguHYsWOIxWKGDh0acK3FY/n0o1vnmsTERL/X+fn5vPDCCxw5coRRo0YFlG8RUi1q9dNRKpVMnjyZ9evXs3//fr8dbv/+/QPK33jjjfz3v/9l9uzZ3Hrrrdx0001s2LAhoFxOTg6JiYnn1E6YnZ0NeNXq54r6+nqqq6sZMmRIwLXT7fwt7Ny5E7vdztVXX91uvWKJ9+hYMJxOD1ZLoHrJZHIhV7TvkT10uI7IKDnvvVXqJ8DB69zWFoVSzKzrIln+SA633XCM3//uOAvnZZKX43/Sw+n0UJhvo08///u7E5fLw8cfVtB/oMZvXCFqMcNG6lj7f5U+Z8CWv/17TEyaGnzR1VmsVjcfriljyDAdYnH7nwdA79696dmzJ9999905aVvg/CAI8Q4QOSCSoYu94U7boon1/igtlT9Gq/JA4beFRA+JJn5cfEDZqEFR56W/4LVtLVmyhBtvvDHo9R07dlBWVsadd95JRESE7/1p06YxZMgQPvvss4Ad+rlEqVTy1FNPMWvWLD87rUqlIiIigrKysoB7WhzA2grkAQMGkJKSAngd8zweT4CQNxqN3HrrrQH1qdVqJk6cyLRp0+jVqxeDBg2id+/eKBT+zodfffUVbreb+++/3y8xiEwmY+bMmb6jb50hNzeXY8eOMWfOHHr06OF3rWVn3FncbjdfffUVgwYN8luc9e7d27coOt0mXldXx+eff85ll13Wrlp9+owIli5Ppnc//x338FF6ho3Qs21LoP22uNCGwSAjPtE7lxKJiMuuCPc5wjU3exdYSqX/3I2dYGDYSP/FklIhRiIR8dubohk2QkfvvmouGaQhKUUZIJi+XFdNn35qrr4+yu9aZJSc8ZOC+24kJClZ+lgyw0f9dIeyYGz7rp6SYju/uzXG997ocQbEYhG7tgee496+pZ7kVGW7C6fO8u3GOgryzuwHIJVKmTdvHmVlZV0+MinQPQjq9A7gsrsQS8SMWjGKusw67A12lKFKQjNCqTpUReXBVkeo3PW5aOI09Pt9PxKnJtJY0EhIRAiGdAPle8r9yg5eOBhlqPeHKpaIiRkWgyHVu4vP35jv5+HeWQYOHMiUKVMAWLt2bYCd1G6388wzz/Dkk0/y1ltvceTIEXQ6HRkZGWzbto0PP/ywy213BJlMRkFBAfPmzWP69Onk5OSgUCjo168fIpGIv//97wH3FBUVcfToUW677TYGDBiAXq8nNTWV119/nby8POx2O59//jlXXXUVoaGhlJWVERsby9ChQ9m5cyfTpk3zq+/IkSNMmzaNW265xU8422w2XnvtNTZv3gxAWVkZq1atYsmSJbz99tu+4239+vXD5XKxZ8+eLh3HeeGFF3jqqad45ZVXOHToENXV1aSmppKSksKf/vSnTqu3Af7zn//Qr18/Hn/8cU6cOIHT6SQjI4N169ZxzTXXBPVf+Oc//8mwYcN49NFHeeihhwLsoadOWpBeL+KZF9MoyLdSVdFMZJSc5FQle3c3su6T6oA6d2xtYPbvYnj6xTSyTlpISFKi1Uo5ftRMUaGNg/sbKcy3sfgPiezYUo/d5iatZwgxsXJ+OGBi0JDWBbPV5iL7lJW4BAV3L4z384ivKHfw1LJcSoq9u/mNX9aQlKLk1juNTLk0jFOZZkLDZPTtp+HIIRPbt9QHhB699PIwRozSExomY08ngqN0FJfLw/99VMGihxIZPEzLgb0mJk4J44f9JppMgUf79u9pxGp1M2lKGH9/pxS5QsSzq9IBMIR6P78nnuuB2wVlpXZeWnnmpEkul4f33y7lsWeDL9JEIhH33HMPaWlpPPLII8L58IsMkae9ME1dYPHixWzYsMHn8PRLQiKXED00GkOqAblWjsPkoOZkDZUHKvEEiUcclhFG1KAolKFK7A12qg5XUX3U/2GXMDEBaUjwdVTtiVoa8vwfKLpEHeH9winYWIDbeeY5VigU3H777ZSXlwecN25LSEgIkyZNIjExEbvdzoEDBzh06JCfKjouLo7Ro0ezYcMGPzu5Vqtl+vTp7N69m6KiItRq9VlVzg6Hwy/BQp8+fRg2bBiRkZG4XC7y8vLYvHkzDQ3BH6YqlYrLLruM6Ohoqqur2bt3r58TmEgkYtSoUQwaNAiNRkNpaSmbNm1CKpUyYsQIdu7cSUlJCZMmTWLevHnMnTuXxsZGJBIJOp2O8PBw5s2bR3R0NHPmzPFrOyIigkmTJpGUlITb7SYrK4tvvvkGq9Xqm8sWf4L2aG5uprq69XsglUoZP348ffr0ISQkhPLycjZv3ux37MxoNDJ27Fg2bdrkJ2BDQkKYMWMGe/fu9TN9SCQSJkyYwIABA7BarWzcuJHKykouv/xy9uzZ4xfwpoWEhARWrlyJ3W7n6aefJi8vz++6XC5m5Bg9vfuq0Wgl1Nc5+WG/iQP7GttN6mGMVTBhcihqjYSiQhs7ttZjbmoVEEqVmGnTw0lOVSIRiziVaWHLpjp691OTkKTks/96z48vXppESIiYZx7Lw+MBuUKEVuuNCvfgH5PZvqUuICZ5ekYIo8caiIiSYTJ5+7p3d/C+xsTK+e3vYti8qZbDB8/slZ2QqGToSB2bNtQGHBcDrz2/pMjm53AHXk3ElVdHUFZqZ9+eRq66NpJjh81kZVoC6gAYM96AXC5i8zd1SKUiZl4bGbRcY4OTTRu83wmxBGZdF8UP+0xBY7BfdkU4OdlWsk+1tqlSqZg/fz4TJkzg9ddf58svvzzj+LuTqVOn8sADD3TaEfOXjiDEBc4ZV199NXPnzj1jmaNHj/Lwww+fpx61z9KlS4mOjuaBBx4IuHbDDTdw3XXX8Zvf/KZTdV555ZXce++9Zyxz8uRJFi9e3Kl6zxeRkZGsWLGC5ORkHnjgAZ/dvrv5aG1/Pvm4ko8/DNR2PPdyOhXldl5+Xkih2RX+8pe/EBUVxcsvv+wXnvdCRBDiwRHU6QLnjK+//prvv//+jGV+Tjt7Z8jJyWHMmDHcdNNNbN26lerqahQKBX369GHmzJlBz8+fjZZz7mfiTMfjupuqqiruv/9+pk+ffkElmcnLsTLjqggqyx0cPdyExexCo5UyYXIoaT1VfglKBDrHJ598wv79+6mr++nn0gW6B0GIC5wzzGZz0HPnFyKffPIJTqeTSy+9lJtuusn3fn19Pd988w3/+Mc/Ol2nxWLBYgmuHr1YcLlcrF+/vru74ccLTxcw+6YobptrxGBoPV5YXGjntZeKOpXkQ8AfwYnt4kcQ4gK/SlwuF2vXrmXt2rVIpVIMBgMOh+O8nI0X6Bz19c289XoJb71egkIpRqORYDa7OhwRTkDgl4wgxAV+9TidTj9nM4ELF7vN/ZOTgwgI/JIQzokLCAgICAhcpAhCXEBAQEBA4CJFEOICAgI/KwaDgQceeICRI0d2d1cEzgNDhw5lyZIlXc5uKNA5BJt4NxI/Ph51dGuksPqceioOnD3yV3ifcCL6RiANkWIqMlGyo8QvJaqAwIVCamoqTz75JDKZzBcBT+CXjcPhYNiwYQwZMoTHHnuMrKys7u7SLxpBiHcj6hg1uiRvhLPw3uEUflt4ViHe55Y+JE5OpDazFludjbSr00iZnsLOx3fSbLlwzyAL/PpITEzk+eefp6amhmXLlgV1HgwNk3Hl1REkJilRqsTk5VjZ+EUtxUU/b87v7uY3N0bjcLj533+DZyqbfVM0NdXNvmhsbZk+I5yv1gdPLXrFVRHEGOW8/3ZZQHjZ0xk7wRCQbKaFt14vweXy3j9xaii9+7RuNsxNLnJzrOzd3YjdHuhkePjwYe69916eeeYZnn/+eRYvXvyzJ1P6NSOo07uRzP9ksvfFvex9cW/Q0K2nozFqSJycSNbaLPY8t4fDfz3MjuU7qDpa9asQ4AaDgQcffJAZM2Z0d1e6BZ1Ox4MPPsisWbO6pX2NRsODDz54xoxnLSgUCpYtW4bVauWRRx4JKsB79VHzl3d6MXyUjtwcK1mZFtIzQlj9Zk+mXR6YxUutlnD/Q4lc1U4I0ouJkBAJv7s1BpUq8BFsMMiY/btoCPJICAmRcPvcOGKMisCLwKAhWoaN0J9VgIM3lK5aI8EQKuXSK8JJ6xmCWiNBrZHQNk9O3/4aRo8z+K71G6Dh/ocTefmNnr486KdTW1vL0qVLMZlMLF++vN3MgwI/HUGIX0Ro4r2r5raJUewNdo6vOd5dXTqvGAwGJk+e/KvNdazVart1/C3tdyTq3IwZM4iPj+ell15qNw7+DTdHU1/nZMn8U3y0ppw175Txh8XZfPV5TdD0pmqNhAlTQn0Z0C5mtm+tQy4XM2xkYKz9ISO0eDywe2fgvI0aq0euEDFuUvvpjjsaSfvbr2tZ9WwBb7xSAsA3X3lfr3q2AKfTv46yUrvv2sOLsrj/nkzEYhHLnkppN+VsQ0MDL774IkajkauuuqpDfRLoPIIQ7wQSuQR9ip6IfhHoU/WIpe1Pn0gsQp+iJ7xPOMqwn7YKlcglSEOkqCJUgDefuTREijREiugsOYI7gk6no2/fvqSmpgZNqymVStFoNL5rRqORAQMGnLP82vHx8QwYMIBLLrkEgyHw4dTSfktyEYfDgUajQaPRtLvCV6vV9O3bl759+xIW1n5uZoVCwejRo7n88svp2bMn4E0gMmjQIOLj4wPKR0dHM3DgQNLT0xGLz83PJzY21jf+YM5ALeNvmRu73d6p8YeHh7fbtlqt9tWh1+sZMGAACQkJfmM7vX2bzXbG9sViMddeey0//PADhw4darftuAQFOdkWHHZ/gfH3v5WyfUu977VEIkKtkaA3eK1/DrvbtytUBtnJAqhUYnr3VdOnn5rwCFnQMgAymYgRo/RcekU4vX5UGYsl0H+ghoSkwLFFRMroP1BDz4wQv2xqnSUny0p5qYMx4wO/76PG6Dl00OSXLKaFCVNCsZjdTJzSvU5jxYV2XnmxkKhoOdOmt//9Onr0KPv27eOaa67pUspegbMj2MQ7SMywGPrc0geJQkJzUzMKgwKnzUnOZznkb8j3KxvRL4J+t/dDoVfgsruQqqSU7SnjyLtHcDs6H6iix1U9SJ3RGvR/4ksTff/vfWEvNSeC28fOhlwu5+6772batGm+H1h1dTVvvPEGu3bt8pUbOXIkf/zjH1m8eDHXXHMNY8eORSQSYbPZWLVqFTt37uxS++Hh4SxbtoyePXtSU1ODRqNBLpeze/duXnvtNd8Obvz48Tz44IO++5YuXer7f+vWrTz33HO+1yqVinnz5jF16lSfIPJ4PHz//fe8/PLLmEyt2aUSExN56qmn0Ol0NDY2EhERwbZt20hOTiY6Opq1a9eyZs0awCvgFi9ezNChQ2lsbESn01FeXs5zzz3XZccdg8HA8uXL6dWrl2/8CoXCN/76eq8gGz16tN+YH3roId//u3bt4qmnnvK9ViqV3HXXXVx66aW+z9Tj8bB//35WrVoVEJFuzZo17Nq1i9zcXObMmYNcLgfgxIkTPPnkkzQ0NDBixAgeffRR3z1Llizx/f/999/zxBNP+NWZnp5OWFgY77777hnHX1rsoHdfNXqDlIb61oxgp++0hwzX8ofHUnyv5y9OYP7iBAAO7DXx1PJc3zW5QsTtd8UxdXoYUmmrkD2438QrLxb6tWOMVbD86RQiIuXU1zUTHiFj7+5GomMUxMYp2PBFNe/+1av1Umsk3Hd/AiNG62gyudBoJdTWOPnTcwWcONa1UMM7ttUz85oIVCox1h+jz6lUYi4ZpOWvrwXGrg+PkNG3v5pXVxVx/8OJpGeEtJsF7Xxw4piZgjwbI8foWf+/9oMlbdq0iaFDh9KrVy+OHTt2Hnv460AQ4h1ALBVzyZ2XUHW4isNvH8bl8ArmlOkppM1Ko3xvObZaryOOPkXPkPuHUPRdESf/cxK3w03UwCgG3DuAXuZeHP+g86rvvC/zKN5ajHGkkfRr0tmxYofPG91e33XV6sMPP8zIkSN5++232bx5MwaDgblz5/Loo4/y2GOPBSTzuP/++zl27Bh33XUXUqmUP/7xjzzwwAPs37+/SyreOXPmYDQamTt3LiUlJYjFYoYOHcr8+fMZP34869atA2Dv3r0sXLiQPn36cPfdd/Paa6/5BOfpsdojIyNJTU3l6aef5siRI7hcLgYPHszChQuZP38+K1eu9BtPVVUV8+fPx2Qy0a9fP5555hk2btzIvffe68vGJ5FIePLJJ5HL5dxzzz0UFhb6FiCPPfYY8+bN61LM+FtuuYXExERfnWKxmCFDhnDfffcxadIk1q5dC8CBAwdYuHAhGRkZ3HffffzlL3/h5MmTQccfERFBeno6zz77LEeOHMHpdDJo0CAWLFjAokWL/AR+CxkZGWRkZPDEE09w8uRJkpOTmTNnDk1N3rSchw4dYuHChaSlpbFw4ULefPNNjh8/HrR9gLS0NABf3vX2+HBNGU8+34OVf0rj738rY+/uxqC23KOHm3hwwSmSU1XMfyCBd94s8QlOs9l/txoeLiejdwgvrSzg6OEmmps99B+g4d5F8SxYksDTy1tTrM6bH4fF7Gbu4hPU1zfTMyOEJ57rwa4dDTy08JRPpSwWi3j08RQMoVIenJ9FXq4Vg0HGQ8uS+MNjKSycm0l9fed9UnZsqee630YxbKSerZu9CUiGDNchEgVXpU+YHEplhYOtm+uYeU0kk6aGdqsQB8jPtTJ0xJk1ckeOHAG8iztBiJ97BHV6BxDLxIikIuyNdlwO70PDaXWStTaL3U/v9glwgIzfZNCQ38CJD0/4dg8vPb4AACAASURBVN2VP1RS9G0RcWPjkCg6r1JqNjdjqbTQ3OR9UFirrVgqLVgqLb7+dJa+ffsyevRo/v3vf/O///2PxsZGCgsLeeKJJ6itreWuu+4KuCc7O5vXXnuN0tJSCgsL+eijj1Cr1UHVzh1BrVbjdDp9ObLdbjd79uxh2bJlfP75575yJpOJ7Oxsn3NUbm4u2dnZZGdnU1ZW5ldnYWEhixYtYvfu3ZjNZmw2Gzt37uTdd99lzJgxPhOASqWiV69e/OMf//Dtzo8ePcqOHTswGAx+6XTHjx9Peno6K1eu9OUur6mpYfXq1YSFhTF27NifNP6W/Oput5u9e/eyfPlyvxzwTU1NZGdnU1npzdaVl5fX7viLi4tZtGgRu3btoqmpCZvNxq5du/jb3/7GiBEjgqrrY2JiWLFiBQcPHsRqtXLixAmWL1+Oy+Xya79l/s/UPkBoaCgej8cv93kwMk9YeGxpLi6Xh6Urkln9Rk9GjdX7OVUBWMxucrKsVFd5v/+F+XZysqw+lXRbykrtLJl/it07GmgyubDb3Oz7vpF33ixl8FCdT7UulkC/ARr+82G5TwCfyrSwdXMdhlCpn0142AgdvfupefmFQl+e7vr6Zl5dVYhGK2HsxDPnkG+PvFwrJcV2P5X68FHtq9InTgllx9YGPB7YvqWeMeMNP0mlfy6oqnKg1kgQn+GxVl9fj9vtFs6N/0wIQrwDOK1OCjcVkjg5kcELBxM5INJni24qbfKVkygkhGaEUneqDlWEipCoEN+fpcqCRC5BbVS318x5ZfTo0QB88cUXfu83NzezceNGEhMTiYuL87u2YcMGv9clJV6HmDPZXM/E2rVr0Wg0/PnPf2bmzJk+AVtYWBjUOUeh8HrkWq3WM9bbcq9CoSAmJgaj0UheXh5isZiEBK8atrm5GafTiUzmby+VSCQ+4dXC8OHDKS8vp7m5GaPR6PtzOBzY7XZ69OjR5fErlUpee+01Zs2a5bM5FxUVBR1/i/25K+PPz89HLBaTmJgYUP7w4cMBwtjpdAaUa1G1n639lns7YgM9edzMonmn+NNzBYhEIh5elszzq9OJjpEHlFUqvI8rm/XMC9eWqVMoxETHyIkxKijItyISQWKLndsjwuHwIJP5PwIlUhFul//cDx2ho6HeianRRYxR4fsTiUQ0mVykpoWcdZztsXNrPQOHaAgJkSCVihgyXMuubYG78NQ0FQlJSrZ/5zWxbN9Sh1YnZcjwc+OX0lXMZpfvKNrZuFDSEP/SENTpHeTERyeoz64n9cpUhtw/BEejg5IdJeSuz6XZ7F3JK/QKRGIRKZenkHJ5StB6ZCHtO9mcTyIjI7Hb7b5dYFtKS712wKioKJ+gBgK8jFt+lC3CtbMcO3aM+fPnc/PNN3PnnXcyd+5c9uzZw8cff+xTF7elRYic7WEwZswYrrvuOtLT0wMEiVarBbyC5sSJE9xyyy3k5eVRXV3NqFGjGDNmDC+99JLfPREREcTExPDOO+8EbS8kpGsP8czMTO69917mzJnD7bffzp133sm+ffv4+OOPferqtnR0/KNGjeL666+nZ8+e7Y6/LR1N/tLyOZ+t/ZqaGkQiEREREUF36qfjdnvY9l09O7Y2MH6SgTvvjWPZk6ksujvTT70uk3sXzvaz+JUMHaHjmt9EkdE70PlMq5P62jx+pInf3hRN9ikLFeUOBg7WMm6igXffLPW7JzRcit4g5Y33egVtT63uusPW9q31/OZ30QwbqcPU6EIuFwdVpU+cEkqTyUVcgoK4BO/nUFPdzPhJBvZ0YypWnVZKfZ0T9xnWVZGRkYjF4qDPGoGfjiDEO4oHyvaUUbanDE2chvjx8SRNScI4wsjOJ3biaHT4dkAnPzpJ8fZAxxQAl+3CiKxmt9uRSCSIRKKAXV/L7vR8HGUqLi7mueeeQ6PRMGHCBGbNmsWqVatYtWoV3333nV9Z0Y961tN3z225/vrrufXWW/nXv/7FSy+9RFVVFSKRiMTERF555RU/r+v333+fl156iTVr1mCz2VAqlWzcuDGgXZfLRWZmJsuXLw/aZkeOXLVHWVkZzz//PGq1mvHjxzNr1ixefPFFVq9ezddff+1XtiPjv/rqq7njjjv4+OOPefnll6msrEQkEhEbG8vrr78e1KO+rengTHSkffCaJQAGDx7cqdzkbreH7zbVodFKuOPuOOISFBQVtJqqWtTsp++e2zL9ynDuujeOT/+vijdeKaaywrvgiIqW89rbGYja3PrBe2WsfiODN9/vjc3qRqkSs31LPd+cFmDF44aiAht/WJIdtM3Tj2N1hsJ8G8WFdkaN09NQ5wyqShdLYNzEUMxmF9f8Jqq13WYPw0bqCFGLsZi7J7NbSpqKnKwz2+UHDBgAtNrGBc4tghDvILIQmS+gSlNJEyc/OknFvgpG/HEEMcNiKNxUiK3WhsvuIiQmBKclUB15IZGbm8uUKVPIyMgI2PX2798fp9Pps//+XKjVap9jVFNTE+vXr+frr7/m1Vdf5eqrrw4Qpjab94HenhCRyWTcfPPNrF+/nn/+859+1yIiIgLKz5gxgy+++IKdO3ei1WrJy8sLOubi4mImTpyIzWYLqmbuKm3Hbzab+fLLL/n666955ZVXmDVrVoAQbxl/y478dMRiMbfeeisbNmzwedW3EGz8neVs7bdQVlZGfn4+l112GV988UW755a1OgmmxsBFbWWF93d2+nrD/uNRNLk8uB1YLBZxy++NbP6mjg/e9dcAhIYHPuqmz4hgy6Y6Nn1di8EgpajATn5eoKmgpNjOwCFampvdAcfhzgXbt9Zx7eworFY3H7wTqLkYOEiHIVTKvb8/SVlp68I6LFzG2x/0ZvQ4A998dWb/g5+D6Bg5/QdoePPV4BuWFi677DLy8/P9tHoC5w7BJt4BtPFaBi0chFTl/yBocTRrcWDzuDyU7SkjbnQc2gR/taVEISEkuuu2s84iFou5/PLLGTRoUNDrW7ZswWq1cttttyGVto4rPT2dCRMmsG3bNp938s/Vv8cee4xevfxVlM3NzVit1qBagPLycgA/u67BYPA5zCiVSuRyeYBDlUwm46abbgqoLzU1FaPRSGNjI4cPH25X3bd582ZCQkK48cYbA6511R4uEolYtmwZffv29Xvf5XJhsVg6PH69Xu/zSVAoFMjlcurq6vzuk0ql3HzzzV3qZ7D2W/wKwBtjINgC4aOPPiItLY1x48YFrUulEvPMi2n0zPD/TUgkIi69Ioya6maKCv1Dr5aXeeckvk2UMK1OQkSkd1Ehk4lQKMTU1fprRiQSETfdagzoQ3KqkqhoOTarm6OHzVRWBtc8bd1c56vjdKe7lB6qoPeAN1zp8FFnt1nv3NqAXC5Go5EEVaWPn2wgK9PiJ8ABamuaOXbEzMQp7cdB+LnQ66UsXppEeZmDzd/UtVtu9OjR9O7dm48++ug89u7XhbAT7wC6ZB2h6aGMeWoMJdtKsDfYUYYqiRsbh6XSQvnecl/ZzP9kEtojlJHLR1KytYTGgkZUESqMI41Yq63sfXGvr2zK5SnINT/uakRgSDOQ8ZsMACoPVVJ3qv0fx9kYMmQICxYswOVycf311wcIhdraWl599VUefPBBVq9ezbZt29Dr9Vx22WWUlJTw1ltvdbntjhAWFkZMTAwvvvgiGzduJDc3F5lMxogRI0hLSws4ewyQlZVFRUUFd955J3FxcajVaqZNm8a///1v/u///g+TycShQ4e49tprsVqtlJeXExMTw8yZMwPOR4P3jPmcOXP8Fjput5sTJ07w5ptvkpOTA3ht9x9//DE33ngjffr0YdeuXYjFYgYMGMDgwYO5++67O2T7bYvBYCA2Npbnn3+er7/+muzsbGQyGUOHDqV3795Bj4Ll5uZSVlbG7bffTlRUFGq1milTpvDpp5/y0UcfYbVa2bdvH7NmzaKpqYnS0lKio6O58soru3QE7nTy8vIoLi7mtttuIyoqCpVKxZQpU1i3bl2A5mP79u3s37+fBQsWkJOTE7AL0+qkKJRiVr6cxr7vG8nJsiKTixg+Uk9svIIXns4PsLMWFdgoKrTxu1tjCA+XoVSJmTQ1jK8+r+bDNeXY7W727Wnk8pkRNDa4KC22Exkt4/IrI7BYAnf8O7c18Pt5sbzwSrrvPZfLQ262lb+9UcKpk141cW62lX99UM6Nc2JI7xnCrh0NuFzeo2tDR+h4cMEpCvL8Fxw9M0JY9GAiHg/cdctxaqrbN7kUFdooyLNRXe0IUKUrVWJGjtbz4ZryoPdu31LP3QviiYySU1XpNR3oDFLuWRj8xMhnn1RRUux9FsyY5Y1Zr/jRYXD4aB2RUbIfy1X7HZsLDZNyzewoZDIRsbEKho7U0Vjv5Mllee06thmNRhYtWsSBAwfYvn17u+MX+GlIHn/88cfPVWUbNmwgJyenw2H/LhZMhSYqD1YiDZES0S+C8N7hKCOUVB2q4sg7R/zilrsdbkp3luJxegjvHU5k/0iU4f/P3nmHR1VtD/udXtImPQFSaaEECFVK6IpcBARBxIKgXi8ooqKi/LyKqBcQbODFci8gts92UQSRagTp0gKBUEICpPcymUwy/ftjzJBhJg0CIXje58nzJGf27HLOyV57r7X2WkpyD+dy5v+dwWq+bLsK7ReK0k+JzENGRU4FpgoTMg8ZMg8Zumwd+jxnW5PMQ4ZYJibvSB62ejxCDQYDPXr04OTJk/z+++9uy1y6dImjR48SEhJC9+7d8fHxISEhgRUrVjgFRVGr1ajVag4ePIhef7lPMpmMgIAAjhw50mDnqGr0ej3btm2joqKCmJgY+vTpQ3R0NPn5+axcudJtpC+bzcaxY8cIDg6mU6dOAGzcuNFJZXvw4EHUajXx8fHEx8ej0Wj48ccf2bt3LyqViqNHj1JUVETnzp154oknmDdvHuvXr2fv3r3s37+fM2fO0KtXL/r06eNkz01MTCQlJYWoqCjHQqOgoIB3332XjIyMRo0d7KrprVu3UlFRQYcOHejduzdt27aluLiYjz76yOWMfs3xBwUFOca/adMmNmzY4Bj/oUOHUCqVDBo0iMGDB+Pr68uGDRvYuXMnHh4eHD161OlZderUiXPnzrl1JHRHzfbFYjGbN2/mp59+cvs/f+TIEYYNG8bo0aM5fvy4k4agosLCjq3F6PUWoqLVdIn1JDRUwYXUSj5cnsHJE+4XHYlHdAQGyejYyQOJRMS2X4rY+GOhwyP96KFy5AoxA+I1DBjsg3+AnM0bC9n5aykeHhKOHy2nIN9EdDsVTz0XxsL/S2P99wUc2FfGwb1aziTr6drNk0FDNPy8/vJ9OpVUwdlkPeGRKnr39aZDRzXFRSY+fD+TtPOuKvjKKgtdu3mRfqmS7ZuLqXdKFMGZZL2L9iGqrQpvHykbfihwG4o2P89IcIicogITuTlGAoPkGAxW5HKx25/TpyocAW+69fAkKESOWCIi41IVJqPNEQnv+LFyR3utWivw9JQSEqrA01NCaYmJrZuK+O+HWWi17s1LUVFRLFq0iMrKSl599dUm8a+Jjo6mf//+wlG1KxDZmlDizp07l61btzbYUUbg1mLMmDE8/PDDdZZJTk6mCdeNV80LL7xASEiIU/SxaqZOncrkyZOZOHFio+ocNWoUjz76aJ1l6nKQuxXRaDQsWLCAtm3bsnLlSpdjis3FzDltiOnkwTOzzrp8dtfdAcx4vBX3jk1q8PEpgcuMHDmSp556iosXL7JgwQJH5MGmqPfZZ58lOjq6/sJ/IQR1ukCTcerUKT799NM6y9QXAORGUVZWRv/+/YmLiyMxMdGxkwwICKB///5XFVnq9OnT9Y6/qSa0lkJpaSnPP/8848aNuyYv/qZGW2YmtLWc3v28OXqo3HGUzUcjZeBgDWdP6wUBfpWYTCa++OIL1q9f36SOoALuEYS4QJNx8eLFFpM3+OuvvyYoKIjXX38dnU5HcXExCoWC4OBgzp49y/LlyxtdZ3p6+nX36G+JWCwWRwjZm4Ufv8sntJWCF1+JpFJvpbjIhFQmIjTUnpTl3SXCc7xadu3a1dxd+EshCHGBvyTl5eW8+eabeHt7Ex4ejo+PD2azmczMTOEozF+Aykor7yy+hNpDTESUCo1GisUCOVkGF7u0gMDNjCDEBf7SaLVaR3ASgb8e+gorp09eu+e+gEBzIZwTFxAQEBAQaKEIQlxAQEBAQKCFIghxAQEBAQGBFopgE28kEpkCT/8oJDIF2vzzmA03hz1NJBIRHByM0Wi8aY5x1YZYLCYoKIjKykqXzGgCzojFYoKDg52uFRYWXrfjWhqNBqVS6QixKiAgcHMjCPFG0Krz7cTe8RxytQab1YLVYiZl31pS9tZ9NvhGIBaLWb16NRs3buTjjz9u7u7UiUqlYvXq1Xz66af873//a/oGRCIGPPAhYomUfV8+idXScvMY+/n5sWbNGqdrc+bMISUlpc7vLV68GJVKxYsvvtioaFmzZs0iNDSUOXPmXFV/BQQEbiyCEG8gHn5h9Bz3GjnndnHilyVYTJVE9bmX0I5DOb//c2x1JdS9gdSX6/lqUavVxMbGUlJSwrlz55qkzuu1mxSJxHgGRCKRKhFL5S1aiBcXFzN58mQA4uPjGyRcq1Ovenh4oFAoGh3y8nq9Q9eCQqGge/fu6HQ6t7nWBQT+qgg28QbiH9YDkVjCya1vY6rSYrWYSD3wFYfWzb9pBPj1pGPHjrz66qv4+d34jEmNxWa1sOu/D/DbJ5MxG65fJrYbgdVqRafTodPpGiyMbTYbTzzxBI899pjbxC8tkcjISBYuXEirVq2auysCAjcVwk68gag1oVjMBgwVzvZmg65xiT9aKgqFAqDWdJ03G1c+p78at5qvQXUO88Ym2hEQuNURhHg9tOs/jcCoPqh92yAWS+l//weOzxJ/foNKbb5TeU//CNoPnI5v61hARGn2Kc7t/RRd4QWncnKVD70mvEnGiU3oy3KJGfw4ngGR6EuzSD3wFTlnd151n7t3784DDzxAeHg4OTk5bNy4kYSEBKcyKpWK8ePHM2DAAPz9/SkvL+fUqVN8/fXXThNlfHw8d955Jz4+PgD84x//cOwI9+7dyy+//OLS/uDBg7n99ttp1aoVRqORxMREvv/+e7cOdxEREUyfPp327duTm5vLmjVrrlpdKhJLuO2+9x1/G/WlHFnvmmxEKlfTZ9JbXDj8PfrSLDoOnolv687oS7NJ/vUDijNPNLrt9u3b88gjj7Bq1SpHCtNqvLy8+L//+z+++eYbR3Y2pVLJ+PHjGThwIIGBgeh0Osf9z8vLa3T71SxatAjRn0mvdTod//rXv2otO2rUKMaOHUtISAhZWVl88cUXAI7vXw1xcXFMmjSJ8PBwZDIZ+fn57Nmzh59++slFk9C+fXumTJlCTEwMNpuNEydO8Nlnn5Gff/l/qm/fvkyYMAFPT08Apk+fTmWlPWvY4cOHWbdu3VX3VUDgVkBQp9eDvjSb0pwzGHRF2GwWSnPOOH4sZmfboW/rrsRPX4Nv61jSEzeQfuxHfEI6Ej99DX5hPZzKiqUyAiL7ENJxKH0nv402P4Vzu1dRWZZHl5HPIBJf3fqqS5cuLFiwgJSUFL755husVivPPfcc48ePdyr39NNPM2rUKI4cOcLq1atJSEigZ8+evPfee2g0Gke50tJSzp8/79iBX7p0ifPnz3P+/HkKCgqc6hSJRDz77LPMmzcPrVbL+vXr2b9/P/Hx8axcuZLw8HCn8m3btmXRokXk5OTwww8/4Ofnx5tvvklgYOBVjR2bzfFslJ4B+Lbp7raYSCwlILIPoR2H0f/+lVRqc0g98BVKz0Buu/8DVN5BjW46LS2NyMhI7rrrLpfPunbtSo8ePZzCuc6ePZu77rqL48ePs2rVKrZt20ZsbCwrVqwgICCg0e1Xk5KSQkpKChqNhq5du9Zabtq0aTz11FMkJiayfPlyDhw4wPz584mKirrqttu3b88bb7xBRUUF7733HkuWLOHIkSNMmTKFl19+2alsnz59eOeddzCbzaxYsYI1a9bQsWNH3n//faf3r6ysjJSUFMe7lpmZ6RhjbYudVq1a8cUXX7Bo0SIkEslVj0dAoCUg7MTrIfv0DrJP7yBm6Cy8AqI4/dtKt+VEIjE97noFs7GCvZ/9HYPenjv5UuJPxE9fQ9y4BSR8NBmb1TmrT3C7gez9Yial2fasWReP/oBXQJRLuYYSExPDm2++yf79+wF7vu1ly5Yxbdo0duzYQUWF/UjcmjVrKC8vd+xqALZv385HH33EmDFj+OqrrwBISkoiKSmJMWPG0Lt3b7799lunnVJNRo0axciRI3nvvffYsWOH4/q6det4+umnXXbigwYN4vnnnyctLc3R1vvvv0/fvn2dcnk3FJvN6ng+Sq9A/MN71lk+tNNw9n7+OGW59nSUxRnHGTR9NUFtB3LpWOMSdlgsFrZv385dd93FJ598QlXV5fjb/fr1IzU11UnDsXbtWiorKx3PA2Dr1q188sknjB8/ntWrVzeq/Wqqs6j5+PjQt29ft2VCQkKYMmUKq1atckpMUl5ezpNPPsnp06evqu0uXbogkUhYvXq1Q8AmJiZy+PBhxOLL+wWZTMazzz7L7t27WbZsmeN6UlISq1atYtKkSaxatQqwp249e/Ysw4YNo3///qxbt44LF5y1WlcSFRVFQECA47hczXssIHCrIezEmwjfNt3w9I/g/P4vHQIcwFRVTsr+z1H7hBIY1cfle7kpux0CvJrywronqbpIS0tzCHCwC5cNGzagVCrp3Lmz43p+fr6TAAcoKSnh8OHDxMbGutSrVCoBXL5Tk3HjxnHmzBknAQ5QUVHBokWL0OmcncwSEhIcAhzsu0iDwUBQUON3wldDxolNDgEOUJKTjMVsQOUdXMe3amfz5s0olUqGDBniuKZQKIiPj3fJo11YWOgiXLRaLQcPHnR7/5uSAQMGIBKJ2Lx5s9P1HTt2XNOJgRMnTmA2m5k7dy4RERGO66dOnSIpKcnxd69evfD19XXJbFZYWMjp06fp0cNZawWXbeI1F0e1sX//fpYtW8ZLL70kCHCBWx5hJ95EeAe2BaA0x3UXo809A4BXYDT5qfudPivPT3Upfy1caY8FuwoScBGOHTp0YOjQoURFRaFWqzGZTPj4+GCxuHrb1zeJSqVSwsPD+eGHHxrc1+p+1USn0znsn9ebiuJLzhdsNsxV5ciUXldVX05ODsePH2f06NEOoT1o0CAkEgm//fabS/l27doxdOhQ2rZti4eHB2azGS8vL6TS6/tvGRQURFlZmcuzrKqquibHsbS0NBYuXMgTTzzBxx9/TFpaGtu3bychIcHJS75NmzYAPPPMM1itVqc6QkJC3C4kGrKIrMZqtbr4gAgI3KoIQryJEEtlAJiNrit/s9E+8UikCpfPrObGneGtD3dC1t3RpJkzZ/K3v/2Nffv2sX//fnQ6HWq1moEDB7rdCVerQ2uqRWsil8sRiUQuk3Jd2Gy2Bpe9HtisTd/+li1beOmll4iMjOTixYvcfffd7Ny500UL8dhjj3H33Xezf/9+Dhw4QHl5OWq1mv79+xMZGdnk/aqJWCxGJpO5/cxisVyTY9vhw4d57LHH6NWrFyNGjGDGjBk8+OCDvPfee+zduxewC2Sr1cqWLVvcLhjdva/V79219E1A4FZEEOJNhL40BwAv/0jKC9KcPvMMiASgouT656l2d4672lGsepc1YMAAxo4d62Q7ryY2NtatEK+ebGUymdtJVq/XU15eTrt27a55DC2Zffv2odVqGT16NL///jvt2rXjnXfecSrTu3dv7rnnHpYuXeqyQ+/UqdN172NxcTEeHh5oNBpKS0sd16tDvF654GgsVquVQ4cOcejQIQICAvjnP//J888/zx9//IHJZKKoqAixWExiYqJbbYw7qt+/6qOOAgICdgSbeBNRePEPzEY9Eb3ugSt2C5E9J2IxG8hP3Xfd+xEXF+cy0cXHx2MwGDhxwn50KiYmBovFwh9//OFULiAggF69ermtt9oprfqoWTU1d0a7du2ie/fudOvWzeX7NT2Ob2VMJhM7duxg+PDhTJkyhcTERC5evOhUJiYmBsBlAaXRaGp1RmtKqp/7yJEjna5369YNmUx21bvd2267jY4dOzpdKyws5LfffkOpVOLlZTdTJCYmYrFYGD58uEsdtWl6qt8/b29vp+u19dXX19ehghcQuJURhHgTYTZWcjrh3wRE9CJu7Kt4BUTh4RdOjzEvE9R2AGd2foSpqvy690MikTBv3jw6d+5MaGgoDzzwACNHjuS7775z2BMvXryIRCJh+PDhiMViJBIJMTExvPrqq7Wqw0+dsjvf3XfffYSEhNClSxcWLVpE9+6Xj3F9+eWXFBYWsmDBAu6++27CwsJo164ds2bN4j//+Q/+/v7XdexiiRyfkBh8QmKQq7wRS6SOvyWyG7eD27JlCx4eHvTp04cNGza4fF7tXT1y5EjEYjFisZj27dvz2muvuZRVKpW0b9+e9u3bOxKhhIWFOa7VPEIllUod1318fJBIJI6/ay7s0tLS2L17Nw8++CCjRo0iKCiIXr168fTTT19TkJhp06bx+uuvM3LkSDQaDZ6envTo0YPx48dz5swZhyDOyclh06ZNTJkyhccee4yQkBCUSiWxsbG8/fbbxMfHu9SdnJyM1Wrl3nvvpVWrVsTExLBw4UIGDhzoUrZjx4588cUXfPzxxw5fDgGBWxVBnd6EXDz6A1arhZghj9Om62jAHjksacsyLh69MUEpXnnlFSZOnOg4umMymfj+++/57rvvHGV27txJTEwMTz31FLNmzUIkElFZWcnnn3/OnXfe6dgx1SQrK4tvvvmGyZMnM3z4cMdOvuZZ3fLycp577jmefPJJHn30Uf7+9787rq9du/a6R3tT+QQz+JG1TteqbPu33AAAIABJREFU/9616gG0TexEWBsZGRkkJyfj5+fnou0Au8r9xx9/ZObMmTz22GOIRCIMBgNffvklgwcPJjQ01FG2bdu2vP32207ff+GFFxy/T5o0yeGB7evry4oVK5zKVv99ZdKUd999l6effpo5c+YgFosxGAysWrWKTp06OZ1iaAyLFy/m0Ucf5ZlnnnFaXBw/ftxlDP/5z38oLS1l0qRJ3HPPPYBdDX/gwAFHQJyaFBcX8+mnnzJ9+nQGDhyI1WrlyJEjpKenu5RVq9VIJBJUKlWtO3sBgVsFka0JvYvmzp3L1q1bG+Xc1FKQyBSIxLIGxeIWiSWofUJBJEZfmu32zLdIJEaq8MBqNmJpIuc2Dw8Px4QeEhKCv78/6enplJe71wB4e3vTunVrjEYj6enpmEwm1Go1IpGo1qM5np6e+Pr6kpubW+dxJI1GQ+vWrTEYDFy4cMHJgUkkEuHh4YHBYHCpo9pLu7FJO+z12u+pO8xG/eUY9yIRMoUnFpPBJTmKTOmJ1Wy65meiUCgQiUR1Homqvv9ms5lLly5hNBodgqf6/ovFYtRqda11VFRUOBwE6ypbWVnp1onM39+fwMBAMjMz0el0KBQKZDIZOp0OjUbDxIkT6x3r2rVrnf7n1Wo1rVu3RiqVkpubS0lJSa3frT7VoFKpyMrKcrLRu8PDwwM/Pz/y8/PrfEeioqIoLS2ts22BlsXIkSN59tlniY6Obu6u3FQIO/EGYjEZgAYmoLBaqCip22HHZrM2uXq9puDNzc2tNye0Vqt1SZCh1+vr/E51Mo76KC0trXVCttlstdZxLed6G3xPbbZay5mqmiZhSkMWIe7u/5VHqKoToDSExpStpqioyElDYjAYHH2Xy+W0b9++3jrEYrGTENfr9fWmSq3GbDY7xQqoj4qKiga9I/UFhBEQuFUQhLiAgIBb8vPzmT9/fnN3Q0BAoA4Eg5GAgICAgEALRRDiAgICAgICLRRBiAsICAgICLRQBCEuICAgICDQQhGEuICAgICAQAtFEOICAgICAgItFEGICwgI/GWQyWRMnjyZKVOmNHdXbggajYY33niD8PDw5u6KwHVCOCcuAIBvB1807S4nKTFqjWTtqT/rmjpYTUjvEJR+SioLKsnen42hrGnTq94IOnXqRNeuXR1/l5WVsW3btmbs0Y1l0qRJZGdns2/f9U/Scy2IxWLuuecezp8/z7Fjxxr1XW9vb1599VViYmL49NNPnT6bOHEi+fn57Nmzpym72+SIRCImTZrEhQsXOHz4cL3lVSoVrVq1YsWKFSxbtsyRDlbg1kHYiQsAoA5UE9AlgIAuAbQd05aIkRH1fqfVgFYMenMQrQe1RuYpI2xYGPFL4vEO9673uzcbISEhxMXFERcXx6RJk5gwYUJzd+mGMn36dGJjY5u7G/Uik8mYMWMGbdu2bdT3lEolixcvJiIigpdeeol165xzGUybNs0pmc/NikgkYsaMGQ2KpAf2ZDNPPvkkJ06cYP78+QwYMOA691DgRiPsxAUAyNqbRdZe+86770t9kSrreTVEEHNfDEWnizj6/lFsVhtiqZjYx2KpyLv60KnNxW+//ebI7f2vf/3LbV72G8HgwYMZNGgQH3300Q2P+92EaRScCAoK4qGHHuLw4cPs2rXrurRRH7NnzyY8PJwXX3yR5OTkG9p2QEAA06ZNIzExkYSEhBvaNkBVVRWvv/46ixYt4rnnnuPChQvk5OTc8H4IXB+EnbjAVaHwUSD3kpOzPweb1T75W81Wjn98HIvBNdGGQMOo1gbcSok7QkNDGTFiRL1x+a8XHTt2ZPjw4Xz99dc3XICDfREzcuTIOpPhXG/MZjNLly4F4OGHH262fgg0PcJOvKUiAo9gDxQaBVaTlfLM8jqFpzpIjcpfRWVxJfq8q59MRRIREoUEj2B7tjCLyYJUbX+NbCYbFtO1CXCJREJkZCQqlYqMjAyX/NYqlQpwTRQCdlWrQqFwyuwFdjtqdZ35+fkUFBRcdf+qM4Xp9XqXbH21ZWBrSPsqlcqRPlOr1eLp6en4zF1b1e1FRERgsVhIS0urM6tcY/D396dNmzbk5eXVmUSnOlNeRUUF6enpLn2sfh4+Pj4AGI1Gx7hMJlOtSWI8PDwICQnBZDKRlZXlNvtazbKRkZHk5eVRWFjotsykSZPQarX88MMPdY67Gj8/P8LCwsjPz69zxxocHExAQMB1GX9wcDBms7nB42/Ie11YWMiGDRuYPHkyISEh9SZIEmgZCEK8BeLRyoO4J+LwCPXAqDUi95Zjs9rI2JnBue/OOQlSdZCa2Edj8e3gi7nSjFQlpeRcCcc/OU5VceN3BoHdAuk5p6fj77gn4xy/p/yQQurGq8/ZPWDAAGbNmoVGo8FsNiOVStm4cSOrVq1yTJDTpk1jxIgRPPTQQy6T4NSpUxk9ejQPPPAANpsNkUjEPffcw+TJk51ypJ89e5Z33nmHzMy6M825o1OnTixbtoz58+e75L3+5JNP2L9/PytXrgTs9ssJEyYwZcoUp/bPnTvHu+++65QLe/HixXTo0MHxd838788//7zTDlIsFjN9+nTGjRuH1WpFJpNRUVHBypUr2b17d6PHVLPexx9/nHHjxjnycB8+fJi33nrLKXNYp06deOKJJ5zs0lqtltWrV7N9+3bHtTFjxvD44487jbGan3/+mQ8//NCpfW9vb2bNmkV8fLyj/cLCQj777DN+/fVXl/6OHz+ehx9+GKVSic1m48cff2TVqlVOZWQyGb169SIhIaFBO+FHHnmEiRMnOto/evQob731llM63w4dOvDkk0862aV1Oh1r1qxhy5Ytjmt33HEHTz75pOPv119/3fH7li1bXHK/e3l5MXPmTAYPHuzIx15cXMznn3/u1slyzJgxPPLII46F7U8//cQnn3xS5/h27NjBlClT6NevHz/99FO990Pg5kcQ4i2QLtO6IJKI2PX8LqpKqpDIJIT2DyVmagyFJwspOG5fkUtVUvq+2Bd9gZ6dz+2kqrgK73Bvej7dk7jZcRx484BDFd5Qis8Us2/hPjxDPen2eDeSv0ymNNWectRQevVe6T179uTll1/mu+++4+uvv8ZisTBixAjmzJmDVqvlm2++AeyT3/jx4xk4cKCLfbF79+4kJiY6BL5KpWLo0KF8/vnnHD58mLKyMjp06MATTzzBggULmDlzZp27HHeIRKIGl1UqlQwbNowvvviCw4cPU1paSvv27Zk1axYLFizgH//4B2azPdf8O++8g0KhYNasWfj4+LBkyRJHPVcuNh5//HFGjBjB4sWL+eOPP1AqlcyePZt58+aRnZ1NaurVLaSGDh1KeXk5L774Inl5eQwZMoQZM2bwwgsv8NprrznKxcfHk5aWxooVK0hPT8fPz48pU6bw9NNPk52dzalTpwDYuXMnJ0+epE+fPjz00EMsWbKE7OxsALcalnfeeQeVSsWSJUtITk5Go9Ewbtw4nnvuOQoKCjhx4oRTX00mE6+++ip5eXk8/PDDTJw4kUOHDjktrsLCwlAqlSQlJdU7/vj4eCoqKpg/fz45OTkMGjSIRx99lBdffJF//vOfjnKDBg0iIyODlStXcunSJfz8/Jg4cSJPPfUUOTk5jvb37NnD2bNniYuLY8aMGSxbtoyMjAwAlxS0SqWSZcuW4e3tzbJlyzh58iQ+Pj7cddddPPPMMxQWFnL06FFH+YEDB2K1Wlm4cCHZ2dk89NBDjB8/nqNHj3Lo0KFax5iVlUVRUVGDHeMEbn4EId4CkXvKMevNjqNcFpOFzN8zKUsrozzz8o4hclQkUpWUY/8+hklnV7Vq07Wc+eYMPZ7ogaathpKUxtlezZVmtBe18Kfs1+fp7X9fI4899hiHDh3is88+c1zbtm0bcXFxjB07lu+//x6LxcKlS5c4ffo0o0aNchLiAQEBdOzYkf/973+Oa3q9njlz5jipOU+cOMHSpUv597//TWxsLImJiY3qZ/UOrSFUVlby9NNPO7WflJTEW2+9xUcffUT37t05cuQIgGNyN5vNlJWVcf78ebd1hoaGMnbsWD744AMOHjzoaGfFihX07NmTsWPH8v777zdqTNV4eHgwd+5ch6Bdt24dISEhjBkzhrCwMEcfa2pGwO4BvWLFCtq1a8fo0aMdQrw6p3y1wDh37lytKtypU6cSHBzMU089xaVLlwD7LvT999/n2LFjTgIc7LvWWbNmOezsH3/8MUOGDKFz585OQtzX1xegVlV7TTw9PZk3b55DQ7J+/XqCg4MZP348UVFRjhzla9eudRn/hx9+SPv27Rk9erSj/bKyMsrKyoiKigLg/Pnzjnt4Jffeey9t2rRhzpw5jvzqxcXFfPDBBxw/ftzlOJ2Pjw8zZ850aEg++eQThg0bRkxMTJ1CvPpeNJfjpkDTIzi2tUDSfknDJ8qH216+jVa3tUIstz/GmgIcIKhHEGUXy5CpZaiD1I6fauHvFe7lUndzEBQURGRkJKdOnSI0NNTpJysrC19fX8dkDPbdeNeuXWndurXj2ogRIygrK3MItmqqJ1uJRIK/vz+hoaEUFhZSWlpKZGRko/vamJ14be2XlJRQXFzstn2FQlGrrRSgT58+AKSlpTndJz8/P7Kzs4mOjm5U/2py8uRJhwCvplo9HxMT4zImsKvAq9tPSkoiIsL1aKJCoQCoU509dOhQDh8+7BDgNXHn0b5r1y4nRzmdTodWq3URTtWajmr1dF2cOXPGycQB8PvvvwO1j9/Ly4vQ0FD8/f1JSkpy+0zlcjng3o+jmiFDhnDs2DGHAL+yD1eeHNi9e7eTiUOv11NcXIy/v38dI7QjkUgc90Wg5SPsxFsg2fuyqSyspN3d7Yj9eyydDZ3JPZxL2s9p6PMvT2xKXyXeEd4Mfmuw23pkHrIb1eU6CQgIAGDGjBnMmDHDbRm1Wu34fffu3Tz++OOMGjWKNWvWIJFIGDVqFNu2bXNxLmrXrh333XcfPXv2RKlUOn3m7X39z7NHR0czdepUevXq1aD25XJ5nZ7p/v7+iESiWnfb13J0KC8vz+VatbNUtXMW2HfskydPZtiwYQQGBrotX5NqIVbb4kQmk+Hv78/OnTsb3Ncr1dFgXyRULxiqKS4uBi6/Y3XhTktQvYOvOX6VSsWkSZMYPnw4wcHBTuXdPbvqPhmNRrftisVigoKCXBagdVHTRl+Nu/G7IyAgoFZNj0DLQxDiLZSScyUcWnoIpZ+S1oNaEz4inNB+oRxaeshho7bZbGTvyyb5K/fHamym63MuuLFU7zKWLFniZPerSc1dV1VVFTt37mTkyJF8/vnn9O3bl8DAQDZt2uT0ne7du7Nw4UL279/Pyy+/TGZmJkajES8vL5YvX94o1Xg11TZ0d9+VSp3/nbp168brr7/OwYMH+ec//0lGRoaj/ffee8/trl4kErnUcyVms5mHHnrIrT3fnRd7Q6lrTNW7SIVCwbJly1AoFKxevZpTp05RXl6OWq3mgQceoF+/fi51VI+ztnFZrVZsNpvTQq2pyMrKorS0lO7du9d7Rtvdbv3K8cvlcpYuXYqHhwdr167l1KlTaLVa1Go1U6ZMYejQoY2qH+zvv8ViwcPDo4GjunoiIyPRaDScPHnyurclcGMQhHgLRKqSYq60q8OqiqtI3ZBK5s5M4hfHEz4y3CHEK3Ir8GzjiVl/c6vOsrKysNlstG7d2qG+rI8tW7YwZswYbrvtNsaNG8fevXtd7J7Tpk3j4sWLLF261EkdqVKpnNTzjaGoqAjA5fuenp5OHugADz30EOnp6SxZssSpfYVCUatNsr7dVGZmJlKpFH9/f7eq12uhpnmimuqY29XOdSNHjiQyMpKZM2c6qZ6NRiOhoaFu663egSsUCrc7SIvFwoULF+jZsydisfiaFiJXYrVaOXDgAIMHD2bVqlVu26+mrvFnZdkDIQ0bNoy2bdsye/Zsp/vf0PG7w2azceHCBeLi4pBIJI12tmwMd9xxBwaDoV67uUDLQbCJtzDEcjG9n++NKkDldN2kN2ExWrCaajjcHMjBO9yb0L5XTC4i8GztyY2kZ8+eDBs2zO1nWq2Wo0ePctddd7moPb28vNza+VJTU0lJSeHhhx+ma9eubo/LaDQaCgsLXeyJ06dPv+pxFBUVYTAY6Nu3r9P10aNHu+ysfXx83Lb/8MMP12pbz8vLIzQ01GnXVjN5xcGDB6mqquLBBx902dmFh4c3yPZbGzExMU42XYlEwt13301hYaHDu9vHxwebzeaiNu/WrRtxcXG4o1rFX1NIBgQEOO08N27cSEhICI8++qjTvZHL5QwfPvyqxwTw/fffo1AomDp1ap3l2rVrR7t27Rx/i8Vi7r77bkpLSx0OkNUmkCsXjJ07d6Z3795u661W04eFhTmu+fn5OcUC2LhxIwEBAfz97393Gr9MJmPEiBENGWa9hIaGMnr0aDZt2lTnYkagZSHsxFsYXq298GzlyYCFA8j4LQN9nh6pSkpI3xCkainpOy7vjjJ/zySwWyDdZ3YnsEcgxaeLkaqkBPYIxLO1J7tf3I25yr5LD+kbgk+E3e6nDlQjlorpOLkjAOUZ5WQfyHbtTAPx9vZm4cKFSCQS8vPzHd7LNVm5ciVLly7lo48+4ueffyYjI4NWrVpx++23c+rUKUe0qZps2bKFp556ipSUFE6fPu3y+Z49e5g4cSL33XcfZ86cwc/Pj2HDhjlNntUMHDjQcU67VatWqFQqh30+PT3dcU7ZaDSyadMmJkyYgF6vJzExkZiYGHr27ElJSYmTEN27dy+TJ09m6tSpnD59Gj8/P4YMGVKnLX7v3r0MHjyY+fPnc/r0abp27UrPnj2ZOnUqer0erVbL8uXLeeGFF/j3v//Ntm3b0Ol0dOjQgdtvv5133323wdqMKzl37hyLFy9m586dFBcXM2jQIKKjo1m4cKFjd3zw4EHuu+8+5s2bx5YtWxCJRHTu3Jlhw4aRm5uLTObqZ3HixAnKy8uZM2cOmzdvxs/Pj1GjRvHJJ584zj9v376dLl26MGHCBDp16sSRI0eQyWTEx8fj6+tLUlLSVQfpycnJ4fvvv2fKlCkcP368VtvzuXPnePPNN9m5cydFRUUMGDCA9u3b88YbbzgcwQ4dOsSDDz7Ic889x+bNm7HZbHTu3JkRI0aQnZ3too0Bu8NgWVkZs2bNIjIyEl9fX26//XbWrl3rMAElJCTQpUsXxo0b5/Awl0qlxMfH4+fnR3Jy8jX5OyiVSubPn09ZWRnffvvtVdcjcPMhea3mAdBrZOvWraSmpl63GMwC9rPY2fuzEUvEBHQOICguCK8wLypyKkhanUR5Ro0Vtg1yD+ViKDXg18GPkN4heId7U55RzsnVJ52yjQV2D8S3gy8KjQKTzoRRa0ShUaDQKDBXmSk56+ywI1FK8GzlScHxgnrPh5tMJmJiYqiqqmL9+vVuHZx0Oh0JCQnIZDL69u3Lbbfdhr+/PwkJCaxdu9atijErK4v27dvz448/ug3ckpSUhNFoZPDgwQ418OHDh/nqq6+IiIggKSnJcaa6T58+xMbG4u/vj06no6SkBH9/f/z9/amqqnI64nTs2DEMBgN9+vShf//+FBcXs2zZMjw8PKisrHTs2k6ePInBYHBq/+jRo3z55ZdERkZy8uRJFwej9PR0KioqiImJISoqitzcXFasWEF+fr6jzKVLlzh06BChoaH079+fuLg4RCIRa9euveosXJ07d+att94iIyODwYMH06tXL/Ly8njvvfecjmyVlJSQnJxMt27dGDlyJHFxcWi1Wj744ANUKhXl5eUumbLMZjMnT54kMjKSLl26YLPZWLduHQkJCU5zxYEDB8jMzCQqKopevXoRFhZGUlISy5YtczjdicViOnXqxPHjx1082Tt06MClS5fchlY9efIkHTt2ZNKkSWRkZLgc9ercuTPLli0jLS2N+Ph4evfuTWFhIe+//76Tn0ZpaSmnTp0iNjaWkSNH0qtXL3Q6HStWrECpVKLX610C7lgsFofnfpcuXQB7YJbt27c7jf+PP/4gPT2dyMhIevfuTUREBMnJySxbtszp1ECXLl04ceIEFy9edGqnXbt2ZGZmuiySvb29ef3114mIiGDBggUtNm56dHQ0/fv3v2pT2K2KyNaEEnfu3Lls3bq1SW1aAgICjSciIoJXX3213nKPPvroDejNzYFMJmPu3LkMGTKEjRs38tFHHzV3l647HTt2ZMGCBQC89tprnDt3rpl7dPWMHDmSZ5999pqOUd6KCOp0AYFbkJKSEqfANwJ2jdBbb73F/v37G53KtKViNpvZv38/n332mdtjeQItH0GICwjcgmi1WjZv3tzc3bgp+f3336/ab6ClkZqaygcffNDc3RC4jgje6S0UiUTiEmhDQEBAQOCvhSDEWyjdu3fnpZdeau5uCAgICAg0I4IQb6EMGzasUWEqBQQEBARuPQQh3gJRKBT07dv3L2PXExAQEBBwjyDEWyDR0dFkZma65GQWEBAQEPhrIQjxFkhERIRLykQBAQEBgb8ewhGzFkhYWJjbCGU3O2q1GqVSiaeXElk9mboEBAQEaiKTQ9LJo2TnuOacv1q8vXzx9tYQGBh4Q7LIXQ+EmbQFolAoqKioaO5uNAiVSkWPuGhCQj0Ij4TgEND42ZBKhdC8AgICjaEU+ImmnDlyi8VczFZTutcLk9GPttHdGThgpNscADcrghBvgRgMBpRKZXN3o04kEgm9+3SkS6wXI+40ovaoO766gICAQPNgAEqAdHKzz/LpZwfp3WsUPeP6N3fHGoQgxFsglZWVN7XqR6lUMnhILJPutxIYXNXc3REQEBBoECGtKglpdYbTSYWkp6cybuz9iMU3t+vYzd07AbecP3+ejh07Nnc33CKTybhjVHdmzDQTGGxu7u4ICAgINJpOsYV4+e3khx8/a+6u1IsgxFsgx48fp1OnTjed3UYkEjFkaDfufciEUiVkshMQEGi5tA7XofA4xN69O5q7K3UiCPEWSGVlJSkpKQwcOLC5u+JE27ZtGDFahIena+5vAQEBgZZG+07FnEn57aZ2JBaEeAvl22+/5YEHHrhp7DVisZiu3UKJ6Wxq7q4ICAgINBmxcels276uubtRKzeHBBBoNMePH6eoqIjRo0c3d1cACAjwo3e/5u6FgEDzYLXCzl91VFbe3GaknTvKqdDd3H282fDWmCgqTsVqvTnvmyDEWzArVqzg/vvvp127ds3dFWI6taJdjLG5u3Fd2b+ngg/fL+DD9ws4kVjptkxpiZmv1hazfFk+G9aVYqgSzsNfidVq49etWt5fms9n/y0iI73lvDcJ28o5dcL52RuNNj54J5/DB/VY/7QkHdxXwc4d5Te8fx+vKKRSX/s7V1JsYeH/5ZCbIzidNobAkHwuXEht7m64RRDiLZjs7Gw+/PBD5s+fj7+/f7P2xdtHjkJxawusmM5K/jbOB0OVjbwc92aDj5YX4uUlZsqDvpSVWbl0seUIqCtJv2jk5An3i5Vr4fcEHQf36Zl0ny+xcSp2/apr8jauFxdSjZSVOe/IPltVjFwuYu5LQXh42qfU7EzTNQlKvd7K7wk6x6KgoRw+qKcuC9uEezXcdbcP7y3Jr1PYCzgTEFTOpfSzzd0NtwhCvIWzd+9eNm3axNKlS2nVqlWz9UOhEDVb2zcKXz8JkdFyx0R9JWazfZK/a4KGkFAZD0z3pUOM4gb3sunYs6uCtJSmD9JzJtnAiFHetAmX0bO3mvsf9m3yNm4U505XceZUFY/8I6BO4dlY8nPNbPqpDLGk6eqsZvBwT6Layti4vrTpK79F8fSyUlZW1NzdcIsQ7OUW4IcffkCn07FkyRKWLl3KyZMnb3gfZLLGC/Fvvyyhb381v23XodVa6HubBwMG1x/EJu28gTOnXIPIRLVV0Klr/ZHsjh7Ss2eXDoPBRus2csZO8MbL2z5bGg02Nm0oIy3FiI9GzF0TfAgJrf8o37HDlRz5owKbDb5YUwzAhHt98PZu2CyclWFkx1YdxYVmwiJkjB7r41gsXEwzsn1LObpyC11ilYy80xux2L7rqqy0cfK4ng4xSrx9JOzbrWPUGG9iOiv5/utSevdVs/PXcspKrfTqqyZ+qP3+nkisxFBlpc9t9r8rdFY2/aTl3gc0GA02vv6ihHNnqlCrxZSUFBMULGX0WG+n8e7ZqcNitTEg3pO+/dX1jrG0xMxP67SknK2iQmfhTHIV3eNU9OilcpTZ93sFh//QI5XCkOGedOl2+bOdO8oJCJKSdt5IyhkDEVEyxk7wYcfWcuKHeTrda5sNvvmihKnTfDGbbSRsK+fsafuCpHc/Nf0HXX7Pdv2qwz9QwoVUI+dOG4iIsr8TCqUYrdbCT9+XkZ1loks3JVbbn5VXf/c3HSPu8EShdH3/bTbYuknLyeOVtAmXM+4eH1Qq+zMtLjKzdVM5OVkmNL4Sbh/tTViE/T37dauW5CQDlXorn62yv0sT7/W5/I4abWzfXE7K2So8PCUMHeFJ+46XF4tlZRY2f1FGSYmFEaO8iO2u4krGTvDhrdfzmHSfb5MuPm5V5AorFRU3p8ZIeHy3CNu2bWP58uXMmzePRx555MafIb+KjfjeXTq+/ryE3v3UjPqbNxt+KCNhe/12RKlMhNpD7PIjV9T/OuflmPjsv8XcPUnDP54KIKSVlP99Y9+R2Gyw/O18xCIRjz3hT+dYFcvezMdQVb9DS2S0nIFDPJH8KXyGDPdErW6YAL+QauSdxfl0iFFw92QNEomI1R8XAnDpgpF3l+TTtZuSCZM1JCVWsvY/9h1B2nkDe3bpuOc+Dd9+WcKFVAO3DfRk03p7itoDeyr48tNi4nqpGTXGiy0/l7HtF62j3rTzl1X9BoONvb/bJymJFPr0U+OjkRDVVkGffmpiOl8WEnt26di8sYzJD2iYdJ8vP35XSuKR+tXRFMU8AAAgAElEQVTuHp4Shgz3JCBQSmwPFUOGexIZLXd8/vP6MrZu0vK3cd4MGuLJmk+KOXpI7/g8+WQVn/+pup58vwaz2a79yM02k3rOLqBffzmX0hILBoONlLP2a2eSq9CVW5kwWcOoMd5s+VnLH/srXOqVSmHy/RosFhtGow2rFd5+M59WbWTMeNyfqkobR/5wPmqUes7otNCoyZ5dOip0ViZO0aArt/LB2wWOz3YlVBAZLef+6X506abi3SV5lJfb37MOMSratpfj5S2hTz81ffqpUSjt77bFYuPtf+VRVGBi/D0+9OipZNVHhZSWXNa7f/bfYmLjVPQf5MnHywvR613f39ZhciRSMfl5gm28IYhEQJNGbW86hJ34LcSRI0eYPXs2s2fP5oMPPuDTTz/l4MGDzd2tOhk83JNucfZJcMqDvvzvmxKG3+5V53fCI+SER8jrLFMbCqUIi9VGXq6ZNuFyht/u5bA7XkgzUl5mZfwkHwAGxHuw69dyTp8yOO0W3eHrJ0EuFyERi5wEU0NY920pEyZrHLvDqLZyh5fzLxvKGH6Hp+OzfzwVyHNPZjr62LO3iqBgGV7eEgYP98JosDp5SA8a6uno+30P+fLlmhLu+Jt3zc2kCxKJiM6xShK2lxMZJadzrLN2Y9N6LbOeCXBoKEaN8WL/Hl2990gms98blVpCYJDU6T6ZTDZ++UnL/NdCHDvSyfdrWP+/Unr2ubzL7xKr5M677BqB8Ej79yOjZVy8YCSmi5KMSwYSj+jp2FlJaGv79Na1m4quNQTtpKkadu7Q0bf/5d1451glo8f6ONV7+lQVCqWIEaPs7+PEKRr2/l5BzRWrvsKCp5f7xWNwiJSJUzQAPPSonGdnZpKbYyIkVMaEyT6OciGhUs4kV5F0TM+AwZ6ERcgoKJCh8ZW63Psjf+ix2WDaY3YfmKi2Crp2Vzl2+AD3TNUQ3dY+hl82lJGTZaZte9d30tNLhL7CgiAGWjbC07vF0Gq1LFq0iH79+jFt2jQmT57M559/zokTJ5q7a26pnjABWofJKGjAzuBa1OkaXynPzQ/ix+/K+OaLEvoNUPO3cT54eonJzTKRn2/ixaezHeWrqqzoK67v0ZL0CwYXu3D1pJyZYWLgYE/HdU8vMYHBMrIy7I51EoldoIjFIJeDyeisEomIvKyRaRMmp7DAjNVqo04p/idmM8jkzvVZrZCfZ+bf7xQgsm9PMJttDsF7tRQXmjGZbLQJv1xPVLSc7EwzNlv1TgjC3CzeIqMVbPihjPPnDAwa4snxY5X4+klo08ZetrLSysYfyzhzqgqL2b5gUHs4jyss3LXewgILgUGXp0ix2C5wa+KtkVJWasE/wHUqjYy6XKdcLsI/UEJhgZmQUBkZ6UZ+/lFLbo4JmxXKy60EBV/W3JhNrvce4NJFE+06OPtZ1BTgAK1aX76Hnp4SKipcveNsNigrseCjuQ5Gd4EbiiDEb1EOHjzIoUOHGDJkCLNmzcJms/HLL7+QkJCAXq+v9/symYy4uDjOnTtHaen1c4Ax1XDeNhisqNTX38IT1VbB3PlBFBaY2bCujHeX5PPqv0Lw9BLTvoOC5/4v+Lr3oSYqDzGlJWanydfxmUrspCq12aC4yNTg+2Qw2Gr8bkWhECEWi5ArxJSVXl4wVbkxGcjldgFdE7EYVGoRr/wrFK9adqBXg0otxmy2odNZHfWWlFhQqUUOAQ4glrgKtrAIORmXjJxNrmLknV58uLyQzAwzbcLs9/ODdwroEqvk/xaGIJWKOHe6im+/LHGqQ+RmKEollJY6C8Arz1h37KQgKbGK6HauDoxGo/O9MxpApZJQUmzhvSX5zHo60GHL/vrzEmoeQ1YoRC73HkCtFpNZz5G8hli2LqYZUKrEbhcfAi0LwSZ+C2O1Wvntt9944okn+Pjjj+nSpQuffvopL774IkOGDKkzE5rJZKKiooL//ve/rF27ljlz5jB48GB8fZvWk/jgvss2xv17KojtXr9jWnQ7BX8b7+Py0xCntrTzBg7stbcZECjlrgk+5OeZHPVmpJtIv3R5ksyt5SjZ1VBVZeP4UVfbcb/+HmxYV+aY9MvLrVxItdtze/VVsfNXHeY/5e3hgxWoVBKi2zVMZe9yf3vY1cohoTJSzxscG/KD+1wXdh4eYvJz7Q1X6C6r6bt2U7L9T9s6gFZruebjSt4+EjrEKNix5bJPxI4t5fTuV7+jo1wuwstLQso5I63ayOkYo2DXr+WEtpZis0HKmSoGDfFEKhVhs8GuhIY5KMV0VnIx1UhOtv0dyM81k51lcrKMDr/dk4Tt7gOonEqqctijU1PsjmoRUTIyLhnxD5A6BLi+wsqhA862dg9PMQV5JqxWGzYbFBXan0OvvmqOH63kQqr9HbXZIOl4448BbvihjJF31m22EmgZCMuwvwA2m40TJ05w4sQJfHx86NevH0OGDGH27NmkpKSQlJREcnIyZ8+eparqspr61KlTvPTSSyxZsoQ77riDwYMHo1AoKCkpITExkaNHj5KUlERR0dUfvSgrNfPmKznYbCIMBhsvvBzUFEOuFZtNxE//K+O37eVofCWkphi59377wsTTS8yMx/1ZvjSf6HYKqiptWCw2nn4hEIVSzOaNWk6eqCQrw8SFVAOJRysZ9Tdvh02/Po4e0vPflYW8s7I1fv6X//XG3aNh1YcFvPh0Fq1ay8nKNHL3JB+i2iq442/epF808coL2QQGScnOMvHEMwEONXp96CusvPHPXEQiGxU6Ky/8065l6BanYtsvWha/lovaQ4J/gASTyVkQ9R/kwfK3Czh3xkBRoZl/PBVAVFs59z3ky0crinjzlRz8A+xCacY//J08pK+GR2cFsHxZPmeTK6mqsuHhIeHRmQ1bNEZEybBa7Wr32B4q9u3RExAoRSSCYbd78e7ifNp1UJCVaXS693Xh5S3hwRl+LHktj/BIOUajlc5dlZhMl8V46zA58UM9+XhFIXNeCHQ6pdEtTsWiBbn4+EjJzDDy2JP+SKUi2scoMFTZWL4sHx+NhII8C4HBzlqYyGg5CoWYN1/JxWCw0bufmgmTNYSESnn47368uySP1m1kaMusBARK6dBR4XB+q4/NG7RoSy0Mv0MQ4rcCIputAcaxBjJ37ly2bt1604anE3BGqVTSvXt3OnfuTKdOnWjbti3Z2dlcvHiRjIwMMjIySE9PRyKRsGjRIry9vZFI7DY0q9WK0WhELpej1WopLcuiTeQ5bOJUbBQ3qP05f8/gxVeDUanFlBRbiYqWX5dzsVdis0FhgRltmYWQUJnLuW+zGXKyTChVIiebaHm5lUq9s3rV20eK8s/jRVarjeIiCwGB7oVEtT35SrtqNcVFFkqKzbQOkzvqrKak2EKFzkpIKylSqf2zCp0VscSuci8usqDRSLDabOjKrWh8JTz3ZBZPPReIj0ZCUaGZqLZyJ+FvtdoDuqg9RAQFyyjIN+MfIEEsvlxGV26lrNRMcKgcaY1uV+8OK/U2Qltf7tN3X5W6DT1620APOnayC/myUgsqlRi5m9gCVquN3BwzUikEXSHYtGUWZHKRiw0Y7IsVkUiESi3CYrFRVmrFz//yy5SfZ6K0xEpYuByx2B5MxdfP/rlWa0Emc18vQLnWQkGBhfAIOYYqKyIRqD0ul7Va4cs1RVy8YGL23AD8/KVU6KwolCKsFsjMMBISKnP+jgUy0o1YrRARJUdXbkEmE6NSX74nZrM9qJDGV+ryjhqqrGSkm/D1kzipxK98hqUlZtRqCXKFXQux9r9FpF808sy8IMEe3kgO7xnEgw/Mbu5uuCAIcQEHMpmM6OhowsPDCQsLc/z4+/tjMplQKBRIJBKHQ1NNbDYbIrENbCIQmbCRi4WDfwp1rZvWLgvx1mFX52kuUDfVQryx3vLXwrkzBswm1yklpJW0wTvglsrxo5VERNm9ym9W9v1eQd8BHk4LMoGGcbMKceFRCjgwmUycPXuWs2ddwwt6eXkxcOBAZs92/xKLRCK7AMcCNik2xCA2YKN2e93IO70cASwEmp7hd3jd8N1WS45Qd61079kws0pz0pBgSgItC0GICzQIT09PZsyYYd9xi2qqZO1qdalUgkh2BpsoEasoBaj/qNi4ezTXsccCY8Z7119IQECgRSMIcYF68ff3Z9myZahUKsRiMRaLBbPZjNVq5dChQ+zcuZPo9iLufbDpk2UICAgICNSOIMQF6sTHx4e3334bjUbjOHa2e/dudu3axenTpx3+D1Ht+jZzTwUEBAT+eghCXKBW1Go1r7/+OhKJhPXr17N7927OnTtHE/pCCggICAhcA4IQF6iV9u3bs3z5ctLS0pq7KwICAk1Mebm1SaPutSRupbHfGqMQuC4cP378ugrw1BQjyUlVJCdVuYSo/CuRdt7guA+1ZUyzWuHsaQOnTtgDoQg0DKPBRnJSFQ059Wq12hyR8W4UzfHe22yw7ptSlr2Ze93bupBqoLjoxt7USr2Nc6ddcytUo9VaeHVejtsohS0RQYgLNBsnj1dy6KCe997Kp7Tkr5sS8VRSFYcO6lm+LJ/iYtdkFVYrLH0jl62btBw9XMkrL2S7TS/ZknAXF/x6UFpiYeX7BQ3Kmf3jd2W8syivSdu3Wu2LA3cc+UPPs7MyHVn0bhRfrS3m/DkDLy0Iue5tff15CZcu1B3rvT4a+65kpBv55sva8z14e0uY90oQ331VzL7dFbWWaykIQlyg2Rg/yYeHH/ND9heP9TJ2gv0+KGsJm5maYkCvtzLn+UAeesSP5/4vCPUNSBRzvXhtfg7JJ2vfKTU1bmITuWXY7Z5MndZ0uQEK8s3Mfiy91oRxXbqpmD038IZEKazmxLFKThyr5KnnA2/YO+QuOFRDSdhWzn/+fTVhnesW/KGtZDwzL5gvPy2ixM3CuSUh2MQFbloMVTbOJFcilYqI6aJ0ChlqNsOZ5Coq9VZCQmUuqTAvXTCSn2dGpRYR01nVoAhVpSVmt+pNjUbqNkSo6/ctXEg1YrXaaNdB4RJo5UKqgcICCxFRcoKCG/avl5NtIuVMFV5eUi6mGZFKRU7pOuvDbLZx7kwVJqONDjEqp7CehiorZ08bEEtExHRWOu5RdqYJLx8JqecMxHRRoi0zk5drpms3FWazjZxsE63byDh9yoBEDB07KR2CKDfHhI9G4ghhWq61YDDYCAiUoq+wkp9nplxrparSysU0IxpfsVOEs9ISM6kpRjw8xXSIUTiFgK2PtPMGykqtREbLHSFVq7FYbJxKqkImE9EhRuH0LmVlGDH9medGpXJtL+OSidwcE2ERcpeQuRaLjZSzBgxV9mdeHR61IN/MxTQjSqWYjEv2ylu1kSH/M71oTrYJQ5XNJRe5rtyKXm91ej8qdPZr1SGAzWYbZ5KrMBntWdRqhnO1WGycTTZQUWHFP1DqyCtezaafyrh7ko+LAM/Jtj+3kiIL+Xn2/OPePk23ushIN1KQb6Zde4VTvVYLXLxopLjQTJtw5/ubftFIdpYJiYRa3/3sLPuzCQ6WukR+tFrh9MlKRGLo1EXlspgLi5DRf6An2zeXc+8DLTdmheS11157rakq27p1K6mpqYL38l+Qrt1a07X71a1oN/1UxtCRXnh4Xv7nvnTByJuv5mI02Ug5a+Sn/5XSs48alVqM0WhjwYs5VFZasdlg66YysjLNxHa3R8z69ssSfv9Nh4enmNOnDPz8Yxl9b/NwTKC18d1XJWzfUs7hg3qnn7AIeb0pG1POGnh3cR6eXvZUk999VUpwqJTgEBk2G3zyQYFdiEhFfPtVCUqlmIhI50lny0Ytg4Z64uV1+T78nlDBqaQqCvPN6PU28nNNdI5tWGSwrAwTixfmUlZiJSfHxDdflNCqjYzgEHs+63+9+v/bu+/wqKr88ePvuVMzM5lMkknvCZ1QQ2iB0BRXVAQEFyzrsmLF39pWhRVBURd1XV2FFcvq1y6iYlsbIiAIAiH0mgChpZEyKTOTTP/9MTJxnBCKgSR6Xs/D85CZmzvnTu7M55TPOacce6OHooMOli0107e/Fp1O4unHT7B/r52SYifff1fP0cNONq63odXKUapk/PPRExTst1NR7mLLZhsrv61nSI5vl7D/PFtJZJTvugE2rrPxwxorWdlajh5x8vX/ajl62IHN6qXokJ0QrZz4RN+xmzfaeHVxFYYwOXkbbfy41sagHN1pW9Jut5fn/llB/qYG7I0e3n/bjMEgkZSiwmrxsOpbC3t3N2KucpO/ycaaVRaGDNf7A/kXn9axc1sDG9fb2LPLztDhTauavfZSFfkbrahUMj58z4wkyUjL8K1KV1bqYsG8MqqrPJSXOnn/bTMxsUri4pWsX2Phx3VWKk+4qa11U7jfTueuan/wXP5FPRt+sPLNF/X84fKmRXlqzG4WPFLG2EsN/u1Rv/ysltJiJ126aaitcfPkI+U4HL714d96rZrM3hoMYXI8Hi+PzinDXO1Gknz3zp5dDWRlawHfWPGSt8zceGukf637k5576gQF+xzkb2qgutrFkjfNpKSpiTrDyuaprF1t4dgRJ9u3NFBrdvPem2YyumiINCnwuOHJR8soPubAavHw+bJaGho8dOnm241w6bs1HNhvx1zt4kS5bwe5Pj9tNuTxwH8XV7Hq23qcdi/ffFnPwQN2+g/QUl3lZmu+jZ3bGigtdrF2lZXqahc9m/nchITIWP5V/RltBlNyNJnevdvfVFrREhfapdderOLKyUZGjtEDsOQtM0veMnP7XVHUmN3U1riZen04Wq3EZVcaAsbd9u1pZNz4MLIH+768ig46gjaQaM6fZkSec3kPFDSSlqFm0h+NyGQwYozev4lJ3gYbVovHv0/5gMFanniknJxc/WnHai+70kBUtJzNGxu4YUbEGZfH64UXF1Yw6epwBg31vQ9HDztQ/lSRef3lai65LJSx43wBZNn7Nbz7RjV33ufbRe4Pl4Wi1kg8Ob+cvz0Yw5ef1VFa4iA5VUl9vZvcUXr6Z2vxeuGZBeWs+Kaey640tFiBz+is4va7oph54zH+fHNkwAYlDruXt16rZt4/YomI9G0h+uicMnZus9Gnv7bFa131rQWb1cODj8QiyWH0WCePzS0l86dtVxsaPAzL1TE0V4/H4+XJ+eWsXW1h9MW+L+6TXeibfrSyZlXTGOm2/AbKy1zMmhuDTAZDc/XMe6CU3NGhKBTw8qJKxl0ZxvCRvnu0+JjT33V+8aUGUtLUvPN6NbffFRVU5qumGiktcfLUoycCHo+OVRCfoGLbVhv9B/iue+vmBm6e6bs3P1pSw9BcHZdc5vu7hUfI+WxZHbfdacLeCMePOrlnVjSGMDmXT8C/ZSlAdZWLMKP8lLuduZxe7pvj+/tvXG/jrdeqWfBsfIvv/ZnweuH+h3zv4eoV9bz/tpmHHvP9re64J8q/9HKDzcusu4u5+A8G1BoZN90eydJ3alCqYOKUwJbyqm/rMVe7mPd4HJLcF9Q3b7T5K3wlxU6m32IiPUNFaYmTBQ+X+3cr/LnYeCWVFa233XBbEEFcaHdqzG6OHXUEtIhyRuh4fK4vmzY6RsGoi/U8dF8pg4fpGDpM628dAUyZFs6rL1aRv8nGkGFaevVpOQic1NjoxeMODkJqjey0W3/mjgplW/4JHp1TSk6ujsE5en/FYc+uBtxu+OC9pmQbq9VNjdl13jYFOVHmxFztZuCQpmtP/qnlb7N6OHTAzt9+qlQA5OTqmHNfnT8Jy2BUIMNLeIQchUKGUiGj0e57Ti6X0fenwCqTQb8BWnZsawAMpxz//TmH3RPUK3K4yAFe+G55017fbpeX4mNO+vRv+Xw7tjYwZJjO36WfmKwkPkHJ/j12UlJ9u5YNHOq7lyRJRu9+2jNKttq7uxG8Xj5c0vR3c7m8VFY4UalklBQ7ycnV+59LSArs6rXbPWhOsTNaS3JH61iz0kr/AVqOHXGiVMr8XcW7dzYCXv+9VF3povi4LwiFaGVc9UcjD88uZcAgLcNG6En7WXe6UilrMUksa2DTvdJvQAgvPl9BfZ37V+9vMHCo1h9c+2Zpeef1ajweL5IkQ62R2PSjldJiF168qFQyKk64/N3mdruH0NDgz0j+JhsjLwr1/80liYB7PSFB5R9KiIlVYKl343J5g3ogXC5QKc99zL49EEFcaHecTi8yKTAhSZKB2+Wr1ctkMHlaOKPHhvLDagv/fPwEQ4fr/eNaPXppWPBsPFvybHy2rI6P3q9h1tzYgLHD5ix9u5qiQ8Ff7tP+FHHajT10eonZD8dysNDBym/r+PSjEm6+w0Rmbw0eN0TFKEhNa/pCvfX/RaHXn7+MJofTi1wua7Yr2uHwbaeJzAv4DpBJvtbMmQRhpVIWkIyl1kj+DGtJkuH9WeJ8c4nZckXw38Ht9qLVyQLeo9Q0VVBgbI7L5UX6RSVLJpPhdPgKolDIAr68VSpwnsHULrfbiylaGVCmGbebCDPKqa3xIJdaTpqTn8V4/s9lDdTx7htmqqtcfPtVbUBXr8fjJSVNhSHM99WdmqYiZ0TT+3npeANDc3WsX2vluX+eoGcvDX+51QRAZJQce6OXGrMbY3jwvffzvA+FwhcYW2NDyp8nbKrVEm5301a2//rHCbKHaElNU+H1wga5DYej6UUliZO3aACHwxuwd/svKX5225zMq2ju3i46aCcptWNn1oogLrQ7kZEK9Do5+/fayeztGx/bvbORlDQVMpmv67WmxkV0jJLxVxkZMSaUe24/zuSpRiS5r1szIUnJ4Bwdg3N0LHiknF07Ghg4pOUdnH5Nd3rxMQdxCUoyOqvI6Gxi5fJ6Vn1bT2ZvDQlJSo4UOfzd+xdCbJwSp9PjH4sF3xeyJIEhTE54hIK9uxv9Xbb799pJSFKctscBfN3TJ8qd/v2+Dx+yk/hTYqEhTB4wTa7keHBXpUYjw273BCR1xScosVg8ZPYOTL47E6npavbuavAPvdisHo4fdZCa7rtuh8O3L3dM3E/lLXKSeAaVg8RkFTu22pr9u6lUMuRy2Le7ke6ZvnvU4/EF9ZOBXa2R0WA9+yioUPj2X//q8zr27bEH3JeJSSpCQqRmy+R2ezlR7iIuXsmlVxgYfXEod8w4ypRrwgk1yJHLZWQN1LL6u3omTA5O5Dp21MmAQT/9/4iTUIOcUMOvz2A/ftTxs6EtO3EJSuRyGSu/tdA3K8TfVd7Q4KG+LjCvRqORaGxm7YT0Tmo2b7LRb0DTOPfJ+/tsrF5Rz+Ac/ekPbMdEYpvQKs4lsW3rZhvHjzrZkteAKUpBVaWb6BgFCoUMQ5ic994wYwiTc7DQwScf1jD9ZhOmKAVbNjfw6otVhIRIWC0e1qyyoFRKDB+lp8Hm5dGHyrA3+hbuKNjXSP5GG1dMMqI7TUv813j3DTM//mBFqZRxotzJ6hUW+vTXkt5JTVKyik8+qKW6yo1c4ev+XbvK4h/r3b6lgWNHnGzJsxERqaC6yo3JJPe3HouPOykpdp2yEvDBezV8+WkdOT/bZlIu972Hr79SjUIB5aVO3nm9GoNBIi5ehTFczruv12AwSL7EtvdruP7GCKJjlKxeYSF7sA4ZvrHRMZeEcmC/HZfbS2Kyiu+/s3CkyIEmRGJrfgNrV1uYfrOJEK2ETAafflRDXLySHVsbWbfGQmSkPKCrdu/uRg4fstPY6OWjJb6ERZ1OwlLv4YtPawkLkygrcfHeW2Yy+4SgPs3MgKQUJcver/Flvds9vPN6NT16hTB8pB6rxcO6NRaOHPZ1S2/a0MCWPCs3zIhErQ68H4qPOzly2OkfxklIUvLlZ/WUlbpQKGXs3tHAyuUW+g3QIkkywiMV/N9LVcgVMsrLXCx5y5eweDJRTxMisWxpLSEhEgcL7Wxcb6PXT+P0O7Y2UHTQwe6dDUTHKJEkAhIaI00KXl1cxdhxBn8lASA2TsHrr1SjVMlobPTy3Tf11NZ6SE5RsW+3nf88W4laLaPB5uXHdVasFi8Xjwv1T/NKSlHxxivV9OytCZg9sWaVhQP77chkvsz6JW+ZueQyAxmdm3qgPvuolv97uZrcUXrkijOraK1dbeHwId+MjdISJ0vfNTNhspHEJBUV5U62b20gJk5JxQkXn35YQ8lxJ8NG6AmP8LUvbVYPK76qI9QgZ933vutJTFKSkqZi2fs1HD/qwOXysu57G6tW1DNoqI6qSje7djQwYkxTD8anH9VyxcSwgErq+jVWdu1o4Pq/RJzRLIj2mtgmgrjQKnr0TKBX37ML4nk/WqmqcpPeSU1Dg5eaGjedu2pQKGQkp6hIy1Czd1cjDQ0epl0fQXonX7dXQqKSlFQ1RQcdHCiwY4pSMPVP4SiVMpRKGYNzdJSWOCnY14i9Ef54XTix8Wc+Letc9B+gxeOBwgI7JcUuBg3VMWyEHpnM1507fKSe8jIne3c3olLL+MPlBn8Q2bzRRmWFm9QMNY2NvvehUxe1PwnN4wWdTiIpufluP7fLS5hRTnqnwC7/lFQVXbpp2LenkapKFzm5ofTN8gXTxCQVGZ3V7N1tx2b1cvW14f4Wu9PpJb2z+qducxmduqjxeCA8XE6IVmL9Wis33xHF1nwbDQ0ebpgR4Z/+FJ+oJCxMzpa8BsKMcq6YaECuCMzE795TQ/FxJ/V1HoaP0vunU/XsHUJoqMSeXXbM1W5GXRRKbJwCh8PLsSNOaszuoH/6UF+ZhgzTcfyYk2NHnPTP1nLZlWHIZL4u1IQkFYOH6diW3whe+PNNEQHT2k4qOuSgvMzJ4BxfEJfLZQwboedEuZP9e+3I5TLGjTf4E8MSk1V06xlCwT47FeVOBg7Rkj24qSLlm86moXC/HaVSYszYUP8Y+Za8BurqPaSmq6mpcRNmlGOKaipTqEGOSi0jd7Q+oLIRHqGgX5aWwgIHhwp9Y/5Dc333WVSMgi7d1BwuclC4345eL3Hd9MiARDatTiImTsEr/6kiLV9SbHYAACAASURBVEPtn3WxZpWF8ZPCqK/zcLDQwcgxoQwbGdhCbWz0sHJ5PaMuDvVPITwdjwem3RDOwUIHR4vsjBsf5u/9SU5VIZMk8jdZMVe5+cMVYeh0El26qwn5KYs/LkGJQiHj6GFf78nAoTokyTeEM3S4nspKt/87YOIfjSiVMrxeUKmkgM+D0wk9M0P8Gf/ff2fhkw9ruOuBaAxnMObv9cgoL06jV6/sM7ruC0nmbcWIe8899/DNN9/4d7YSfj+unDiAP9/SsbM8hdMrLXHy+NwyFv036YK9ZlWli/ffNjf73DU3hDcbkM/UqhUWBg3VolFL/OfZClLSlL+Lfe4L99tZ+o6Z2Q/HIknw2EOljLuyKcA2p/iYg6Xv1nD3A9EXsKStr+S4k/ferGb6LaaAGRItsdRLlB0Zz+WXTT3PpTt7YkxcaBUOu+h9+T2QZKDTXcAlxvB1LTc3Tas1NFjdPHRfKU6Hh87dNPzh8rDz8jrtTeeuah6c37TsakiIPChz+5fWrbEx43bT+S7aeRefqPRP9zxTlno5EeFn9zsXimiJC62ib78u3Hm/HmN4x17CUPh9cru9Z5TUJ/w+bd2UwNjRfycy8tyTX8+XjrsAs9CuHCgsYXu+6NgROiYRwIWWWOsi2mUABxHEhVZisVjYvdOD0yluKUEQfjuOHAwlM3N4WxfjlMQ3rtBq8jYWsPrbjr1wgiAIwkkup4wjB1MZNFAEceF3oKamjnXfV7NnhwjkgiB0bF4vbPwhhSsu//Ov2k71fBODmEKr2r69EL2+F5JCTbceYsqZIAgdj8cDeeuSyM6aTFxcQlsXp0UiiAutbt26nVhtXRg2wsiIixwoFGL6mSAIHUNtjZxtmzIYe9GfSEvr1NbFOS0RxIXzYtvWAo4eMVK4rxP9Bkhk9nWi04uph4IgtD9eL1SUKzl8IB5dSHeuu2YKOl3Ley20FyKIC+dNdXUN33y9mbxN4XTuEkdUtJaQEBmaEAmlUrTOBUE4cxqNBoPBgELRSksoe7002mVIMg0yQkhN7cWVlw8hLKxjLfgjgrhw3lVXm9m4oWnZTEmSkM52uyFBEH7XRo4cyW23TSc1NbXVzqnVXridBc8XEcSFC87j8YhV/QRBOGsajeY3EXhbk2gOCYIgCEIHJYK4IAiCIHRQIogLgiAIQgclgrggCIIgdFAiiAuC4CdJEklJSW1dDOECSU5ObusiCL+SCOIdVftdylfooJKTk3n22We5995727oowgWg0+lYtGgRDzzwQIdZ2EQIJqaYtRFJITH6udEBj239z1aq9lS1+HsR3SLoMrkLhhQDXq+Xqt1VFHxQgKXEcj6LK3Rw1157LWPHjuUvf/kLbrc76PmePXsyb948amtrefrpp095Hk2IRESEktoaF1Zr8HmEJlqtHLvdg9vdPhc2slqtPP/889x222089dRTPPTQQ1RXV7d1sYSzJIJ4G/F6vOx6fRcAoYmhZIzPQFK03DESlhZG9n3ZVO+tZstzW5AUEhnjM+h6dVfy/51/IYrd5gwGA263G6vV2tZF6VBUKhUymazZAB4XF8fcuXMpKytjzpw51NXVBR3TI1PHzTMTSUnTAOB2e9m2pZ43/lvKsSONQcfL5TKM4QpqzK52G8TOxOiLI/h/9yZx920FHC5qCHr+2j/Hcsk4E9On7Q66zjGXRHDsSCPbttQH/d6wEUbunZ3Co3OK2LI5+P3+ufROIfxrUZdmn1v8/HGWf+mr+Gf21vPoUxn+5xx2L+XldtauruGzjyqw24PXZlixYgVHjx7lscce46GHHuL+++/H6RQbF3UkIoi3Ea/HS1leGQCuBtcZ/U7sgFjcdjdbFm7Bbfd9Gdcfr//ddK1PnDiRm266iZkzZ1JUVNTWxelwTrWd4r333ovX62X+/PnNBvD4BDXz/pHO/r027r2jAHO1i6RkDVOuiWb+k+ncesO+oAAx9/E04hM03HLD3vNyLRfKhvU13PrXRIYMD2s2iA8eamTj+tpmKyq5o4wUH7M3G8RPslpP/9kvLbYzb/ZBAK6fHodGI+eVxccBKD5mDzp+0TPHqKhwEBIip08/PVdfE0P/AaHMm30Qhz24nAUFBTz99NPMmzePadOm8eabb562TEL7IYJ4B6KN0WIts/oDOEBDZfAXy2+VWq0GoLS0tI1L8tsxYMAAevTowXPPPUdlZWWzx2QPNqBUSjyz4Cg1Nb5Wmrnayb69FgYPNTbbwlOr5ZSV2vF4Om4rHMBm9bB9Sz05uUbee7Ms4LmERDWJyWpefbE46PeSkjV06qIlKVmDeqGEvfHcVyhsaPCwY6tvuKx+shuXy+v/uTn799o4fszXO7JxfS0bf6xj7mNpXPXHmKBrOGnTpk18//33TJgwgc8++4yamppzLq9wYYkgfoaUeiWJwxMJTQxFUko0VjdSvqUcc4E56Fi5Wk5CTgLGDCMANQdrKP6hGLfj3MYQE4cnogxVok/QI8kl0salAeB1ezn8zeFzviaA1NRURo8ejclkorKykpUrV3L4cOA5BwwYQExMDF988QWZmZmMHj0auVzO2rVr2bx58zm/tiRJDBkyhL59+2IwGGhsbGT37t2sXr0ah8PhP27w4MEkJSXRp08f3G43V1xxhf+5TZs2ceTIkaBrys3NJS4uDrvdTkFBAStXrqSxMbDbV6lUMmnSJAYMGIAkSWzYsIFPP/2Uiy++mEGDBrF48eKACkOfPn0YNmwYBoOBsrIyvv76619doYiKiuKSSy4hISEBu93ODz/8EPSe9u/fn4SEBD7//HO6d+/OmDFjUCqVrF+/no0bNzZ7zksvvZSUlBSqqqr4/PPPgeZb4hdffDFms5kVK1acsowhIfJmH3fYvaxZFXj/Xz7BhFIlYYpSYrO6mXh1tP+5Lz+tDAj4MhlkDTTQLysUQ5iC2hoX27bUs3ljcG+AKUrJlGkxpGWEYLO6+eKzSvbtsTLp6mhMUSr+taDpHlAqZeSOCqdnbz1yORwoaOC7b6qx2c7t87duTQ133pdMWnoIRYeaKs0Dh4ZRX+dm5/bggJo72sjhQ43ExqkYnBPG998Ff09cKNu31JO3oY4/XBbJ0nfKTzm8sXTpUkaOHMnw4cP994zQ/ons9DOgClWR80gOyWOSsdfYsZZaMaQYGDR7EJl/yQzoztZEaBgybwjpl6Vjr7PjsrvoenVXBs0ehFzd/Jfh6RgzjJh6mlDpVSj1Skw9TZh6mojsHvmrrmvChAksWrSIoUOH+gPqokWLmDBhQsBxw4cP56qrrmLatGnMmzcPo9FIZmYmjzzyCKNHjz7F2U/vjjvuYNasWajVag4dOoRMJuPWW29l8eLFxMTE+I/r2rUrw4cPJyUlBUmSGD58uP9fdHR0wDnHjx/PokWLGDhwIA6HA4PBwE033cSiRYswGAz+42QyGXPmzGHy5Mns2bOHnTt3ctVVV7FkyRIuueQSDh8+jELRVMe9/fbbefzxx9FqtZSVlZGdnc1//vMf+vTpc87Xn52dzUsvvUT//v0pKytDr9czf/58br755oDjhgwZwtSpU5k0aRKPPvookZGR/kS0Sy65JODYbt268cILLzB06FCqq6sxmUz8+9//bnbTCEmSyMrKIi8vr9mx8pPy83xBdcbt8SiVLY/dDBlmJGe4EUOYgkiTipzhRv8/haLpd2UymDM/nfvnpJKcqsFu95CeEcKDj6Rx132B057CI5Q8vbAL3Xro2LyxjspKJ7PmprL4te7EJ2goL3Nwsn5iCFPwj3914vob47DUu6ivczNlWgxPPd+ZUMO5ff42/ViHw+FhyPDA3a2G5IQ125UuSTJGjA7nu+VVbPyxlpFjws/pdVvThnW1GMIU/pyG5hw+fJjS0lIGDBhwAUsm/FqiJX4GTL1MaCI0rJ+3nrqjTa2E+KHxhJhC4Gef4V439kLmlfHDnB/8Y93H1xxnyENDSLs0jQOfHDjr1z+ZAJd1dxYKjYK8f+b9ugvCl408Y8YM1qxZw9NPP43H40GSJO655x5uuukmDh48yM6dO/3Hm0wmsrKyuPHGG6mrq0OtVvPCCy8wYcIEVq5cedavL5fLueiii/j88895+eWX/Y+/8cYb3HnnndTW1gY89sYbbzBz5kxGjRrFX//611Oed9u2bTz88MMBrdnY2Fiee+45pk2bxksvvQT4gl12djZ33303+/fvB2DNmjUsXLiQ//73v+zYscP/+2PGjGHcuHHMmzeP/HxfAuHbb7/Nk08+yZ133smMGTPOekMXo9HIfffdx+rVq1m4cCFer+8mmjJlCtOnT2ft2rXs3ds0nmwwGMjNzWXGjBnU1NSgUql4/vnnmThxIt98843/Pb3vvvvYv38/Dz/8MC6X7/678sorueWWWzCbA1uDBoMBrVZ72vyCwv023v6/Uq6bHkfnrlo+/uAEK5ebcTiCr/nBv/nu71fe6sHe3VaeeeJI0DHg27/5u+XVvLjwOBUnmnpdckeFc/cDyaz+zuwfS/7D5ZHY7R7uv6vQ3y1dWmxn0tUxPPXY4YAgevPMBMIjlNx5y35qa3zX/79PK3hucTeu+VMcLy063uK1Nsdmc7Mt30JOrpF33/B1R0ealHTqom22e7p7Ty2RJiU//lBLWamDWXNTiYhUUl3VdgljZaW+9zg+Qc2hA6cegisqKiIxMfFCFUtoBaIlfgZsJ2wAJI1KCsggL1lfwsHPD/p/1ifoiewRScHHBQHJanWH66jcXUlMVlPrsq1NnDiRxsZGFi9e7A9AHo+HF198EZvNFtQalySJf/3rX/7EJ7vdzoYNG4iLizun13e73VRUVDBgwICAc1RVVTFv3rygrm/wjYk3NLScA3D06NGg7uiysjK++eYbBg4c6H8sLi6Ouro6fwAHOHToEFVVVUFfYuPHj2fz5s3+AA7gdDr55JNPiI2NJT09/cwu+mfGjBmDVqvl1Vdf9QdwgI8//hiXy8WQIUMCjpfL5TzzzDP+sUqHw8H69euJi4vzd5NnZmYSFxfHkiVL/AEc4KuvvsJiCe7y1ev1ANTXnzrx6qRlS08wb/ZBqquc3HJHIi+81o2LLolALm++Za5WSzQ2tFyxWb+2JiCAA6xZZeb4UTtZA0P9j8UnqNm7yxowrpyfV4dWJxEe0bS3dESkksE5YXz4Xrk/gAOUlTj48Ycahgw7932if/jeTHyCmrT0EMCXJ2Cpd7NjW/D7OuqiCPbutlFV6WRbfj2NDR6GjzSe82u3hsoK3/scom35K7++vp7Q0NAWjxHaF9ESPwM1B2rYv3Q/nSd1JiYrhrJNZRSvL6b2UG1AKzwszfclEdkjEmNa4IdWHaYmJDLkQha7Rd27d6ewsDDoC9xisVBYWEj37t0DHjebzUHjv2azGZ1Oh0qlChjDPlMLFizg73//O6+88gr5+fmsXLmSDRs2YLcHZ9yCL4g3F9x/SZIkevfuTZcuXfyLWMTGxhIZ2TT8UFpaSmhoKHFxcf7rio6OJjw8nPLycv9xcrmc9PR09u/fz/Tp0wNe5+T5oqOjOXDg7HpYOnfujNVq5eqrrw56zuFwBA0TWCwWjh49GvBYTU0NSqUSnU6HxWLxVz5+mSPgcDg4fPhwUOXkZIXsZDA/nZ3bLMzedoDe/fRMvTaWmXcnMXxkOPPnHArqUlapZc0mvP2STi+nX1YoiUkaVCoZLpcXj8dLpEnlP6a0xM7goWFIcvD81OvfpZsOh91LbU1T6zYtIwS5XEbX7jqiY1QBrxMbrybMqEClkprtQTidvI112O2+LvWiQw0MHhrGph+Du9LVaonBw8J4+zXfPeVyedmwrpYRY8L59KOKs37d1mapbzkvIDQ0NKAXTGj/Wj2In2oaS0dX9FURZZvLSByeSPzQeJLHJFNbVMvu13f7u9iVOl+rQB2mxqsP/HDbym1YjrefBVl0Ot0pM1DNZjO9evUKeKy5uaMnW5Dn+jc/cOAAt956KyNHjmT06NHcd9992Gw23njjDb744oug4xUKRYtjtwA9evTgb3/7G6GhoezatYva2lrUajUJCQmoVCrkcjlut5v9+/dTVFTEY489xrJly/B4PEyaNIljx46xdetW//m0Wi1yuRy1Wk1sbGzQ661duzaom/pM6PV6vF5vs+fMz8+nsLAw4LHm3v+TPSiSJPnLeqpjm5tXX19fj8ViISUl5azKvmOrhR1bDzB+UhTTb45n5EXhfPdN4CIhSoWEy9VysLx8golrb4jDbHZy6EADDTYPoQY5hjAF1dVN17BmlZnxk6J48JE01q6qISpGxaQp0Xz1v0qczqbPmT7UN+ZtCFOgVAXek5UVDn9r9Fw0NnjYkufLUv/0owoy++h5fG7wMET2YAM6nZzRYyMYMsxXkQ+PVJCUrCElTcORotNXQs+HqGhfpaa8rOX3IDk5Oaiy2F7IZDL/vS40adUgLpfL/V+Sv0UNFQ0ULivkwCcHMPUy0fNPPcm6J4s1963B7XTjsvm68AqXFWIpbj8Buzk1NTVEREQ0+1xUVNQ5BaZz4XA4WL58OcuXLycuLo4ZM2Ywc+ZMqqqq2LBhQ8CxXq8XlUp1ijNBREQE8+fPJy8vj2eeeSYgmE2ZMoW0tDT/zx6Ph71799KnTx+uv/56wDee/vLLLweMb9tsNjweD9u2bePVV19trcvGYrHgcDhYsGBBq53zZAsqOjo66Is4Kioq6Hiv10t+fj7Z2dnn9Ln936cVTLs+1t/F/HMerxe1+tRfuMNGGLnx1gQWPXssqALwj6c7BfxcUe6k4oSD8AglN9+RiKXexWfLKnj/3cDxaNtPK8h9/MEJdu1o/c/f+rU1DBmWwpRpMdisnma70nNHhVO438aKX1zTtTfEMmJ0OG++2jbTI3v10WOpd7dYiUhISCAxMZFly5ZdwJKdOUmS0GhOnZj3e9Wq1RqFQhGQ0ftboY3Woo3R+n/2erxUbK/gwCcHUIep0UT5bqyaA76WramnqU3KeTa2bNlC9+7dg77co6Oj6d69O1u2bDnvZcjKygr4ubS0lCeeeAK32x3UnQ++7t+Trc3m9OvXD61Wy9KlSwMCuEwmCxgPB19S16WXXsoTTzzB1KlTmTp1Kk888UTQspNut5uCggL69evXqr1M+/btw2QyteoGFLt2+RIgc3JyAh7X6XQkJCQ0+zsrVqzAZDIxYsSIU563R6aOsLDgz7VGLUehlFHVTMJWbY3rlFPTAAYNDaOq0hkUwCMilWR0DvwbDx4WRkiInHtuL+CaiTu5+U97ee+tMn/X+kmF+xrwuKFv1vkZ0920oRZ7o4fLJ5ia7UoPC1PQPzuULz6rZPmXVQH/1v9QS+6ocCTpwvdU6kPlXHqFie9XmltcPW/KlClYrVbWrVt3AUt35mQymQjizWjVIB4WFkZISPsZ920tXf/Ylb6390Uf3zR2KFfJie4XjdPipLHSV7u1lFoo31JO50mdic1u6iaVFBKdJ3YmvMuFm2qSnZ3Nq6++yqWXXtrs8x9++CFut5tZs2b5W+RGo5H7778fl8vFBx98cF7Ll5OTw5w5c7j44osDguOwYcOQy+VB3ckABw8exGAw+Kd1GQwGbr31Vv8iMCdboj8f+5UkiWuvvTaoUuB2u3E4HNx///3cfPPNTJkyhQkTJjBixAjCwgIToJYuXUp6ejp33nlnQCWid+/eXH/99ecU3L/99luqq6uZPXt2wPSv8PBwZs6cidF49olQJSUlbNy4kYkTJ9K1a1fAt9zqbbfdhkKhaLac+fn57Ny5k+nTp5/yNf98UzwPL8igR2bTJhlhRgX3zErG7fKydlVwr83hQw307qdHp/cF8rSMEP54XVNiZ32dC51OjtHYlJhmNCr5671JyH9RX2iweYg0KXnosTSumx7HxKujufQKE/2zQwOmvNXUOFnxTRVXXhXFmEsi/NPO5HIZEyZHM2CQgeZMvS6Wha90pVOXU1cQwTcvfvOmOuRyGevWBA9F5Yww4nJ52bQ+eEx57SozkSYlmX0u3EYjkiSjR6aOR5/KwOHw8P47zS/0AtCrVy/GjBnDsmXLzijR8UKTJAmZTBYwTVTwadVmc0JCAjqdjqqqljfx6GhK1pfQ/dru5Dyag7XcisfpQRutBS9sf2l7wCIuu17dRa+betH39r40VjfSWNNIaEIobrs7YHMTbYyWoXOHAiD7KcO37+198f5UU17/yHp/Vvy5GDhwIHFxceTk5PDVV18FX1NJCY888gizZs3i9ddf58SJE0RHR2Oz2Xj44YcpLg5ehao1FRQUsHfvXu6++26mT59OZWUlRqMRk8nEV1991Wxr4Pvvv2fy5Mk89thjVFRUEBUVxbFjxzAajZSXl7N161Y2b97M/fffz7hx43A6naSmplJeXs6GDRsYOnSo/1whISGUlJRQWVlJr1690Ol0GI1GNBoNjY2NPPjgg/4pXhs2bOCFF17gxhtvJDc3lyNHjhAZGUlERMQ5dz1arVbmzp3L3//+dxYuXEhxcTEej4fk5ORmKzBn6rnnnmP+/Pk8++yzVFRUoNfrOXToEF9//TVjxoxBJpMFZMMDPPPMMzz//PM8+OCDzJs3D5st8L57+/VS7rg7icef7kR9nRu73UN4hAJ7o5enHj9MZUVwS/yTDyt4eEE6/32rB1arm4hIJevXNgW+ZUtPMHBIGAtf6cqeXRa0OjnpnUL44pNKOncNDHSNDR4qKxy4XTBwiIGQEIkwoxKlUsaxo43MuuuAfyGX114qQa2RuOPuJK69IZbKCicJSWrwynju6ebHeoeNNJKQqKZXXz0HClr+zP3wfQ29+4Y225U+Ykw4+ZvqaGgmK3/fHhuVFQ5GjokIWG3tiWc7N/s6K7+tZuG/jgEwfKSRe2Y15S143PD2h5kAfPZxBUvfKQ/43YcXpONyeTEaFSiVEnkba3n5P8XU1zU/XJKcnMysWbMoKChg6dKlLV5/W9FqtYSEhIgx8WbIvL/8RP8K69at46WXXiIv79fPY25vJIVERLcI9PF6JKVEQ2UDVXuqcNQ3nyhiSDUQ0SXCd2xFAxU7KnA1Nk17kavl/hXdmmM+YMbziyxaQ7IBmVxGbdHps0eNRiNjx45l7dq1La4qplarycrK8q/Ylp+fH5QdnpycjF6vZ8+ePQGPR0dHEx8fz86dO3G73fTq1eu0LciioiKOH2+aq5ucnExmZiZ6vR6r1cqePXtanLes0WgYOHAgGo2G4uJi9u7dGzCGLZPJ6Nu3L+np6chkMgoLC9mxYwfJycmEh4ezfft2vF4vTzzxBGVlZfz73/8OOH9kZCTPP/88+fn5PPPMMwHPGY1GBgwYQEREBDabja1btwZUdnr27HnKPIOTjhw5EjBeLZfL6d+/PykpKXi9Xg4ePMiOHTsCrikpKYmwsDB/d/lJJpOJxMRE//t/kiRJDBw4kKSkJIqLi9m4cSPh4eEkJiaya9eugOlnJ/Xq1Yu5c+dSWlrK008/HTSmrlDIyOytJylFg1wBlSecbN5U1+I0stg4NT176XA4PBQdbPQvBXqSSi1j0JAwYuPV2KxutuTVU1piJ7O3HqvVTdHBBoxGJS++3o1///MoG9YF3vfZgw38/eE0nv7HkaCWcXKqhszeekJCJKoqfWU9VWZ2eqcQevcN5esvKk87LU6tlujZSx+0aYkkycjso6O02BE0be6k1LQQNCES+/ZYCTO2vPCKudrl31jGGK4gObX5Y0+UOygr8b1eWJiCzD6+3kKn00ON2UVpif2UwRt8lf177rkHs9nMgw8+2G53MYuPj2fy5MncdtttbV2UdqdVg/iJEydYsGABX3/9dWudUuhAFixYcNoVzF577TU+/PDDC1SiU/v444/56KOPePvtt4OeW7hwIQcPHgwK8Kczf/7806529eabb7JkyZKzOu+FkpKSwgMPPIDRaOS666476wVszodeffTMfzKD++8spHB/YCs5IlLJq+/04In5h9nYTBe20LLk5GRefPFF1q9fz7PPPtuudwYcOHAgt9xyS0BvmuDTqt3p0dHRqNVq9Hp9s4tLCL9ts2fPbusinLG8vDwmT56MQqFgx44d1NfXYzQaGTVqFCkpKSxatOiszzl37tzzUNIL58iRI9xxxx1kZma2iwAOcOhAA+ZqJ/fOTuHTjyo4UtSA2+0lJlbNlVdFUVbiYGt+y1t5Cs07duwYs2fPZvv27W1dlBbJZDIiIiL8eR5CoFZtiQN88sknvP322+zevbs1TysIrUqpVDJ27FiGDRtGfHw8ISEh2Gw29u3bx7JlyygoKGjrIgo/iTQp+cPlkfQfYCA8wjfOa652sn2rhY+WnPDvrCb8NplMJsaOHctDDz3U1kVpl1o9iJeVlfHkk0+yfPnydlObFwRBEDqmwYMHM336dHJzc9u6KO1Sq6f6xcbGkpycfMp5qYIgCIJwJrRaLSaTiUGDBrV1Udqt85KvP378eHr06CGmAwiCIAjnLDMzk9GjR/vXghCCnZcom5GRQefOnenUqdPpDxYEQRCEXwgPDyc+Pp7Ro0e3dVHatfPWVL7mmmvIyMg44x2SBEEQBAF8ax1kZ2czdepU0Qo/jfMWxMPDw5kyZQqDBw8W3eqCIAjCGevXrx+ZmZmnXXdCOI9BHGDIkCH07duX7Ozs3+wWpYIgCELrSU1NJSMjgxtuuKGti9IhnPctx2644QbMZjONjY3tflEBQRAEoe3ExcXRp08f7rzzzha3HRaanPd+boVCwcyZM+nduzdZWVmiRS4IgiAESUpKIisri7/+9a+n3YNAaNLqi72cisPhYPHixezcuZNNmzYF7PcsCIIg/D7JZDK6d+9Ot27duOuuu4iKimrrInUoFyyIA3g8Hj755BPWrFlDXl5eu90xRxAEQTj/QkJCGDhwIF27duWmm24Ss5nOwXkfE/85SZKYNGkS3bt39+/nvH379qCtLwVBEITfLrlcTqdOnUhPr88DDAAAAbZJREFUT2fcuHH+ve6Fs3dBW+I/53A4+PLLL1m7di0lJSUUFBRgs9lO/4uCIAhCh6RUKklOTqZz58507dqVqVOnYjQa27pYHVqbBfGTLBYLK1asYN26dTQ0NHDgwAGqqqrEVqaCIAi/AWq1moiICNLT0zEYDPTo0YPLL7+c2NjYti7ab0KbB/GTPB4Pe/fuJS8vj/3799PY2IjT6cRsNlNbW4vb7cblcrV1MQVBEIRTkCQJuVyOXq8nIiICjUaDQqEgNTWV7Oxs+vTpg0ajaeti/qa0myD+S7W1tZSVlVFRUYHFYqG+vl6MnQuCILRjSqUSnU6HXq8nMjKSmJgYoqKixHj3edRug7ggCIIgCC0Ti5oLgiAIQgclgrggCIIgdFAiiAuCIAhCByWCuCAIgiB0UCKIC4IgCEIHJYK4IAiCIHRQIogLgiAIQgclgrggCIIgdFAiiAuCIAhCByWCuCAIgiB0UCKIC4IgCEIHJYK4IAiCIHRQIogLgiAIQgclgrggCIIgdFAiiAuCIAhCByWCuCAIgiB0UCKIC4IgCEIHJYK4IAiCIHRQCiC3rQshCIIgCMLZ+/+CjpsCD7MpQgAAAABJRU5ErkJggg==)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gCM7oYtPga5x" + }, + "source": [ + "\n", + "### `make_dataloader`\n", + "\n", + "The first step of the `fit()` method is to ensure that the data is in an appropriate format for iteration. Both the `train_set` and `valid_set` are passed along with their respective keyword arguments. Here's the actual code:\n", + "\n", + "```python\n", + "if not isinstance(train_set, DataLoader):\n", + " train_set = self.make_dataloader(\n", + " train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs\n", + " )\n", + "if valid_set is not None and not isinstance(valid_set, DataLoader):\n", + " valid_set = self.make_dataloader(\n", + " valid_set,\n", + " stage=sb.Stage.VALID,\n", + " ckpt_prefix=None,\n", + " **valid_loader_kwargs,\n", + " )\n", + "```\n", + "\n", + "By default, this method handles potential complications to DataLoader creation, such as creating a DistributedSampler for distributed execution. As with all the other methods in the `fit()` call, this can be overridden by creating a `make_dataloader` method in the Brain's sub-class definition." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gW2c5Ekx3Kzj" + }, + "source": [ + "### `on_fit_start`\n", + "\n", + "Besides the dataloader, there's some setup that needs to happen before the training can begin. Here's the relevant code:\n", + "\n", + "```python\n", + "self.on_fit_start()\n", + "\n", + "if progressbar is None:\n", + " progressbar = self.progressbar\n", + "```\n", + "\n", + "The `on_fit_start` method takes care of a few important things, which can most easily be explained by sharing the code:\n", + "\n", + "```python\n", + "def on_fit_start(self):\n", + " self._compile_jit()\n", + " self._wrap_distributed()\n", + " self.init_optimizers()\n", + " if self.checkpointer is not None:\n", + " self.checkpointer.recover_if_possible(\n", + " device=torch.device(self.device)\n", + " )\n", + "```\n", + "\n", + "Basically, this method ensures that the torch modules are prepared appropriately, including jit compilation, distributed wrapping, and initializing the optimizer with all of the relevant parameters. The optimizer initialization also adds the optimizer parameters to the checkpointer if there is one. Finally, this method loads the latest checkpoint in order to resume training if it was interrupted." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XPrYWtcp-ncp" + }, + "source": [ + "### `on_stage_start`\n", + "\n", + "This next section starts the epoch iteration and prepares for iterating the train data. To adjust the preparation one can override the `on_stage_start` method, which will allow for things like creating containers to store training statistics.\n", + "\n", + "```python\n", + "for epoch in epoch_counter:\n", + " self.on_stage_start(Stage.TRAIN, epoch)\n", + " self.modules.train()\n", + " self.nonfinite_count = 0\n", + " if self.train_sampler is not None and hasattr(\n", + " self.train_sampler, \"set_epoch\"\n", + " ):\n", + " self.train_sampler.set_epoch(epoch)\n", + " last_ckpt_time = time.time()\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MI8fdqsYA9pT" + }, + "source": [ + "### Training loop\n", + "\n", + "The longest blocks of code in this tutorial are devoted to training and validation data loops. However, they really only do three important things:\n", + "\n", + "1. Call `fit_batch()` on each batch in the DataLoader.\n", + "2. Track average loss and report it.\n", + "3. Optionally save a checkpoint periodically so training can be resumed.\n", + "\n", + "Here's the code:\n", + "\n", + "```python\n", + "enable = progressbar and sb.utils.distributed.if_main_process()\n", + "with tqdm(\n", + " train_set, initial=self.step, dynamic_ncols=True, disable=not enable,\n", + ") as t:\n", + " for batch in t:\n", + " self.step += 1\n", + " loss = self.fit_batch(batch)\n", + " self.avg_train_loss = self.update_average(\n", + " loss, self.avg_train_loss\n", + " )\n", + " t.set_postfix(train_loss=self.avg_train_loss)\n", + "\n", + " if self.debug and self.step == self.debug_batches:\n", + " break\n", + "\n", + " if (\n", + " self.checkpointer is not None\n", + " and self.ckpt_interval_minutes > 0\n", + " and time.time() - last_ckpt_time\n", + " >= self.ckpt_interval_minutes * 60.0\n", + " ):\n", + " run_on_main(self._save_intra_epoch_ckpt)\n", + " last_ckpt_time = time.time()\n", + "```\n", + "\n", + "Perhaps the most important step is the `fit_batch(batch)` call, which we show a trimmed version of here:\n", + "\n", + "```python\n", + "def fit_batch(self, batch):\n", + " outputs = self.compute_forward(batch, Stage.TRAIN)\n", + " loss = self.compute_objectives(outputs, batch, Stage.TRAIN)\n", + " loss.backward()\n", + " if self.check_gradients(loss):\n", + " self.optimizer.step()\n", + " self.optimizer.zero_grad()\n", + " return loss.detach().cpu()\n", + "```\n", + "\n", + "This method calls the most important methods for fitting, `compute_forward` and `compute_objectives` that both must be overridden in order to use the Brain class. Then the loss is backpropagated and the gradients are checked for non-finite values and excessively large norms before the update is applied (large norms are automatically clipped by default)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AI9amAcWEAtJ" + }, + "source": [ + "### `on_stage_end`\n", + "\n", + "At the end of the training loop, the `on_stage_end` method is called for potential cleanup operations, such as reporting training statistics.\n", + "\n", + "```python\n", + "self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch)\n", + "self.avg_train_loss = 0.0\n", + "self.step = 0\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uD2HftQ5E7XE" + }, + "source": [ + "### Validation loop\n", + "\n", + "Much like the training loop, the validation loop iterates the dataloader and handles one batch of data at a time. However, instead of calling `fit_batch` this loop calls `evaluate_batch` which does not backpropagate the gradient or apply any updates.\n", + "\n", + "```python\n", + "if valid_set is not None:\n", + " self.on_stage_start(Stage.VALID, epoch)\n", + " self.modules.eval()\n", + " avg_valid_loss = 0.0\n", + " with torch.no_grad():\n", + " for batch in tqdm(\n", + " valid_set, dynamic_ncols=True, disable=not enable\n", + " ):\n", + " self.step += 1\n", + " loss = self.evaluate_batch(batch, stage=Stage.VALID)\n", + " avg_valid_loss = self.update_average(\n", + " loss, avg_valid_loss\n", + " )\n", + "\n", + " if self.debug and self.step == self.debug_batches:\n", + " break\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rs2_TtBBF9qk" + }, + "source": [ + "### `on_stage_end`\n", + "\n", + "This method is the same one as the method for the train stage, but this time is only executed on a single process, since often the process will involve writing to files. Common uses include: updating learning rate, saving a checkpoint, and recording statistics for an epoch.\n", + "\n", + "```python\n", + "self.step = 0\n", + "run_on_main(\n", + " self.on_stage_end,\n", + " args=[Stage.VALID, avg_valid_loss, epoch],\n", + ")\n", + "```\n", + "\n", + "The very last thing is a simple check for debug mode, to run only a few epochs.\n", + "\n", + "```python\n", + "if self.debug and epoch == self.debug_epochs:\n", + " break\n", + "```\n", + "\n", + "Congrats, you now know how the `fit()` method works, and why it is a useful tool for running experiments. All of the parts of training a model are broken down and the annoying bits are taken care of, while full flexibility is still available by overriding any part of the Brain class." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qQ_j50q7H7-A" + }, + "source": [ + "## The `evaluate()` method\n", + "\n", + "This method iterates the test data in much the same way as the validation data of the `fit()` method, including calls to `on_stage_start` and `on_stage_end`. One additional method that is called is the `on_evaluate_start()` method, which by default loads the best checkpoint for evaluation.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P6tFofSQG7ZU" + }, + "source": [ + "## Conclusion\n", + "\n", + "The Brain class and the `fit()` method in particular were inspired by other popular Python libraries for statistics and machine learning, notably numpy, scipy, keras and PyTorch Lightning.\n", + "\n", + "As we add tutorials about more advanced usage of Brain class, we will add links to them here. Some examples of planned tutorials:\n", + "\n", + "* Writing a GAN with the Brain class\n", + "* Distributed training with the Brain class\n", + "* Non-gradient-based usage of the Brain class\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1cYIsQiKlXTyfGR3j4gKs5Rq648JDaqaB", + "timestamp": 1612451962852 + } + ] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/basics/checkpointing.ipynb b/docs/tutorials/basics/checkpointing.ipynb new file mode 100644 index 0000000000..a0ef6f23ed --- /dev/null +++ b/docs/tutorials/basics/checkpointing.ipynb @@ -0,0 +1,580 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/basics/checkpointing.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/basics/checkpointing.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "95iYRMRpskWo" + }, + "source": [ + "# Checkpointing\n", + "\n", + "By *checkpointing*, we mean saving the model and all the other necessary state information (like optimizer parameters, which epoch, and which iteration), at a particular point in time. For experiments, this has two main motivations:\n", + "- *Recovery*. Continuing an experiment from half-way through. A compute-cluster job can run out of time or memory, or there can be some simple error, which stops the experiment script before it finishes. In that case, all progress that isn't saved to disk is lost.\n", + "- *Early stopping*. During training, performance should be monitored on a separate validation set, which gives an estimate of generalization. When training progresses, we expect validation error to decrease at first. If we train too long, though, validation error can start to increase again (due to *overfitting*). After training, we should go back to the model parameters that performed best on the validation set.\n", + "\n", + "Besides, it is also important to save the trained model parameters, so that the model can be used outside the experiment script.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aJre_xHT7dWY" + }, + "source": [ + "## The role of the SpeechBrain checkpointer\n", + "\n", + "The SpeechBrain checkpointer simply orchestrates checkpointing. It keeps track of all the things which should be included in checkpoints, how each of those is saved, where the checkpoints should go, and it centralizes loading and saving.\n", + "\n", + "The checkpointer doesn't actually save things to the disk itself. It either finds a suitable saving function by type (class inheritance considered), or you can provide a custom hook." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6RBQlaRvTSN4" + }, + "source": [ + "## Installing dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DPX-4BBbX5L9" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Hi80ogTiuo1b" + }, + "outputs": [], + "source": [ + "import speechbrain as sb\n", + "import torch\n", + "from speechbrain.utils.checkpoints import Checkpointer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dau1tkXHVydl" + }, + "source": [ + "## The SpeechBrain Checkpointer in a nutshell\n", + "\n", + "Run the following code block multiple times. Each time you run the block, it trains one epoch, then ends. Running the block again is similar to restarting an experiment script." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JBPPtyM-Wb9Q" + }, + "outputs": [], + "source": [ + "# You have a model, an optimizer and an epoch counter:\n", + "model = torch.nn.Linear(1, 1, False)\n", + "optimizer = torch.optim.Adam(model.parameters(), lr=1.0)\n", + "epoch_counter = sb.utils.epoch_loop.EpochCounter(10)\n", + "# Create a checkpointer:\n", + "checkpoint_dir = \"./nutshell_checkpoints\"\n", + "checkpointer = Checkpointer(checkpoint_dir,\n", + " recoverables = {\"mdl\": model,\n", + " \"opt\": optimizer,\n", + " \"epochs\": epoch_counter})\n", + "# Now, before running the training epochs, you want to recover,\n", + "# if that is possible (if checkpoints have already been saved.)\n", + "# By default, the most recent checkpoint is loaded.\n", + "checkpointer.recover_if_possible()\n", + "# Then we run an epoch loop:\n", + "for epoch in epoch_counter:\n", + " print(f\"Starting epoch {epoch}.\")\n", + " # Training:\n", + " optimizer.zero_grad()\n", + " prediction = model(torch.tensor([1.]))\n", + " loss = (prediction - torch.tensor([1.]))**2\n", + " loss.backward()\n", + " optimizer.step()\n", + " print(f\"Model prediction={prediction.item()}, loss={loss.item()}\")\n", + " # And finally at the end, save an end-of-epoch checkpoint:\n", + " checkpointer.save_and_keep_only(meta={\"loss\":loss.item()})\n", + " # Now, let's \"crash\" this code block:\n", + " break\n", + "else:\n", + " # After training (epoch loop is depleted),\n", + " # we want to recover the best model:\n", + " print(\"Epoch loop has finished.\")\n", + " checkpointer.recover_if_possible(min_key=\"loss\")\n", + " print(f\"Best model parameter: {model.weight.data}\")\n", + " print(f\"Achieved on epoch {epoch_counter.current}.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Et7kVaJxbt-3" + }, + "outputs": [], + "source": [ + "# You can use this cell to reset, by deleting all checkpoints:\n", + "checkpointer.delete_checkpoints(num_to_keep=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CMgQnm44VhFH" + }, + "source": [ + "## What does a checkpoint look like?\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qc8aOduf5s60" + }, + "source": [ + "The checkpointer is given a top-level directory, where all the checkpoints go:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UPt5VC3q5PCP" + }, + "outputs": [], + "source": [ + "checkpoint_dir = \"./full_example_checkpoints\"\n", + "checkpointer = Checkpointer(checkpoint_dir)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y7FwgfET4uCi" + }, + "source": [ + "Each checkpoint should contain many things like model parameters and training progress." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VgiLyDeT4Bw5" + }, + "outputs": [], + "source": [ + "# You have a model, an optimizer and an epoch counter:\n", + "model = torch.nn.Linear(1, 1, True)\n", + "optimizer = torch.optim.Adam(model.parameters(), lr=1.0)\n", + "epoch_counter = sb.utils.epoch_loop.EpochCounter(10)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Stelxn6i4lpl" + }, + "source": [ + " Each entity to save is assigned to the checkpointer separately, with a unique key, like a name:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ai9e8ITo4oO0" + }, + "outputs": [], + "source": [ + "checkpointer.add_recoverable(\"mdl\", model)\n", + "checkpointer.add_recoverables({\"opt\": optimizer, \"epoch\": epoch_counter})" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jPWCoH6W6pK2" + }, + "source": [ + "When a checkpoint is saved, the checkpointer creates a directory inside the top-level directory. That sub-directory represents this saved checkpoint. Inside the newly created directory each entity, that was passed to the checkpointer, gets its own file.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Or0mF2xc_BRB" + }, + "outputs": [], + "source": [ + "ckpt = checkpointer.save_checkpoint()\n", + "print(\"The checkpoint directory was:\", ckpt.path)\n", + "for key, filepath in ckpt.paramfiles.items():\n", + " print(\"The entity with key\", key, \"was saved to:\", filepath)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "S3hXv3MO_8CA" + }, + "source": [ + "### What goes in each file?\n", + "\n", + "That is upto the entities. The checkpointer finds a saving \"hook\" by type (class inheritance considered) and calls that hook with the object to save and a filepath.\n", + "\n", + "Torch entities (Module, Optimizer) have default save and load hooks already:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "D7TU-p2yAOgf" + }, + "outputs": [], + "source": [ + "torch_hook = sb.utils.checkpoints.get_default_hook(torch.nn.Linear(1,1), sb.utils.checkpoints.DEFAULT_SAVE_HOOKS)\n", + "print(torch_hook.__doc__)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iVeC5y9EBB-C" + }, + "source": [ + "Classes can register their own default saving and loading hooks:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8EPuL6MPBjO9" + }, + "outputs": [], + "source": [ + "@sb.utils.checkpoints.register_checkpoint_hooks\n", + "class Duck:\n", + " def __init__(self):\n", + " self.quacks = 0\n", + "\n", + " def quack(self):\n", + " print(\"Quack!\")\n", + " self.quacks += 1\n", + " print(f\"I have already quacked {self.quacks} times.\")\n", + "\n", + " @sb.utils.checkpoints.mark_as_saver\n", + " def save(self, path):\n", + " with open(path, \"w\") as fo:\n", + " fo.write(str(self.quacks))\n", + "\n", + " @sb.utils.checkpoints.mark_as_loader\n", + " def load(self, path, end_of_epoch):\n", + " # Irrelevant for ducks:\n", + " del end_of_epoch\n", + " del device\n", + " with open(path) as fi:\n", + " self.quacks = int(fi.read())\n", + "\n", + "duck = Duck()\n", + "duckpointer = Checkpointer(\"./duckpoints\", {\"ducky\": duck})\n", + "duckpointer.recover_if_possible()\n", + "duck.quack()\n", + "_ = duckpointer.save_checkpoint()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "V2aCU6GHD-Lc" + }, + "source": [ + "### Meta info\n", + "\n", + "The checkpoint also stores a dictionary of meta information. You can put e.g. validation loss or some other metric there. By default, only the unix time is saved." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AlSrsN2hEXyB" + }, + "outputs": [], + "source": [ + "# Following from the cells of \"What does a checkpoint look like?\"\n", + "checkpointer.save_checkpoint(meta={\"loss\": 15.5, \"validation-type\": \"fast\", \"num-examples\": 3})\n", + "ckpt = checkpointer.save_checkpoint(meta={\"loss\": 14.4, \"validation-type\": \"full\"})\n", + "print(ckpt.meta)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7lWlW5uUFCg0" + }, + "source": [ + "This meta information can be used to load the best checkpoint, not just the most recent one:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Qmdwe1SUFPAr" + }, + "outputs": [], + "source": [ + "ckpt = checkpointer.recover_if_possible(min_key=\"loss\")\n", + "print(ckpt.meta)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y0SyTFCuFdgh" + }, + "source": [ + "There are also more advanced filters available:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "l5qfajLQFct6" + }, + "outputs": [], + "source": [ + "checkpointer.save_checkpoint(meta={\"loss\": 12.1, \"validation-type\": \"fast\", \"num-examples\": 2})\n", + "ckpt = checkpointer.recover_if_possible(importance_key=lambda ckpt: -ckpt.meta[\"loss\"]/ckpt.meta[\"num-examples\"],\n", + " ckpt_predicate=lambda ckpt: ckpt.meta.get(\"validation-type\") == \"fast\")\n", + "print(ckpt.meta)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Cpj3kIIKHHpV" + }, + "source": [ + "## Keeping a limited amount of checkpoints\n", + "\n", + "Neural models these days can be huge, and we don't need to store every checkpoint. Checkpoints can be deleted explicitly, and the same types of filters can be used as with recovery:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mVRFNGbQHQy3" + }, + "outputs": [], + "source": [ + "checkpointer.delete_checkpoints(num_to_keep=1, ckpt_predicate=lambda ckpt: \"validation-type\" not in ckpt.meta)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dX4oJxG9HiXr" + }, + "source": [ + "But for convenience, there is also a method which saves and deletes at the same time:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "n7wOm8vKKAgr" + }, + "outputs": [], + "source": [ + "checkpointer.save_and_keep_only(meta={\"loss\": 13.1, \"validation-type\": \"full\"},\n", + " num_to_keep = 2,\n", + " ckpt_predicate=lambda ckpt: ckpt.meta.get(\"validation-type\") == \"full\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "paC8hwHBnjWR" + }, + "source": [ + "### Pretraining / parameter transfer\n", + "\n", + "Transferring parameters from a pretrained model is different from recovery, although the have some similarities.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "awsgErXzgGHi" + }, + "source": [ + "### Finding the best checkpoint\n", + "\n", + "The first step in parameter transfer is to find the ideal set of parameters to take. You can use the checkpointer for that: point an empty checkpointer at the top level checkpoints directory of an experiment, and find a checkpoint with your criterion." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MtTuVT7pnjWT" + }, + "outputs": [], + "source": [ + "\n", + "ckpt_finder = Checkpointer(checkpoint_dir)\n", + "best_ckpt = ckpt_finder.find_checkpoint(min_key=\"loss\",\n", + " ckpt_predicate=lambda ckpt: ckpt.meta.get(\"validation-type\") == \"full\")\n", + "best_paramfile = best_ckpt.paramfiles[\"mdl\"]\n", + "print(\"The best parameters were stored in:\", best_paramfile)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sllsaFKFnjWU" + }, + "source": [ + "### Transferring parameters\n", + "\n", + "There is no generic formula for parameter transfer, and in a lot of cases you may have to write some custom code to connect the incoming parameters to the new model.\n", + "\n", + "SpeechBrain has an almost trivial implementation for transferring parameters to another torch Module, which simply loads the matching layers (by name) and ignores saved parameters for which no matching layer is found:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DSdQ-9y-njWV" + }, + "outputs": [], + "source": [ + "finetune_mdl = torch.nn.Linear(1,1,False) #This one doesn't have bias!\n", + "with torch.no_grad():\n", + " print(\"Before:\", finetune_mdl(torch.tensor([1.])))\n", + " sb.utils.checkpoints.torch_parameter_transfer(finetune_mdl, best_paramfile)\n", + " print(\"And after:\", finetune_mdl(torch.tensor([1.])))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "89_vY7edkDV5" + }, + "source": [ + "### Orchestrating transfer\n", + "\n", + "SpeechBrain has a parameter transfer orchestrator similar to Checkpointer: `speechbrain.utils.parameter_transfer.Pretrainer`. The point is primarily to implement the parameter download-and-load for `speechbrain.pretrained.Pretrained` subclasses such as `EncoderDecoderASR` and to aid in writing easy-to-share recipes.\n", + "\n", + "Similar to Checkpointer, Pretrainer handles mapping parameter files to instances, and calling the transfer code (implemented as similar hooks as checkpoint loading).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1RW3HhdzNMfQ63vLklK0TJZyG2-z0FkTy", + "timestamp": 1612462046857 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/basics/data-loading-pipeline.ipynb b/docs/tutorials/basics/data-loading-pipeline.ipynb new file mode 100644 index 0000000000..80ce293841 --- /dev/null +++ b/docs/tutorials/basics/data-loading-pipeline.ipynb @@ -0,0 +1,2670 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/basics/data-loading-pipeline.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/basics/data-loading-pipeline.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5ZZTjoafZw49" + }, + "source": [ + "# Data Loading\n", + "\n", + "Handling data consumes 90% of the active work time in many machine learning projects.\n", + "\n", + "SpeechBrain complements standard PyTorch data loading in handling variable-length sequences, large datasets, and complex data transformation pipelines. These are typical challenges when working with speech, but SpeechBrain tries not to make assumptions about your data." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EnirXev8XUyt" + }, + "source": [ + "## Install dependencies\n" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "metadata": { + "id": "5spDEaL1yddy" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'speechllm_librispeech'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "metadata": { + "id": "DPX-4BBbX5L9" + }, + "outputs": [], + "source": [ + "import speechbrain\n", + "import torch" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HL_oCiDyoP8Y" + }, + "source": [ + "In this tutorial we will use MiniLibriSpeech from https://www.openslr.org/resources/31: we download the validation set in the following two cells as well as images and scripts." + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "metadata": { + "id": "vtSWx0KAX5L9" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# downloading mini_librispeech dev data\n", + "!wget https://www.openslr.org/resources/31/dev-clean-2.tar.gz\n", + "!tar -xvzf dev-clean-2.tar.gz" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HBlTW2Mq5S35" + }, + "source": [ + "## Preface: PyTorch data loading pipeline\n", + "\n", + "SpeechBrain data-IO follows and extends [PyTorch data loading](https://pytorch.org/docs/stable/data.html). This preface section recaps PyTorch data loading and does not yet consider the SpeechBrain data loading extensions.\n", + "\n", + "### Overview\n", + "PyTorch data loading can run in many configurations,\n", + "but a typical approach has these basic elements:\n", + "- a [Dataset](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset), which loads data points one-at-a-time.\n", + "- a [collation function](https://pytorch.org/docs/stable/data.html#dataloader-collate-fn), or ``collate_fn`` for short, which takes a list of data points and forms a batch.\n", + "- a [Sampler](https://pytorch.org/docs/stable/data.html#data-loading-order-and-sampler), which determines the order in which the Dataset is iterated.\n", + "- a [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader), which combines the elements above (and has defaults for ``collate_fn`` and Sampler), and orchestrates the whole pipeline.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qHNJPq6PfInp" + }, + "source": [ + "### Dataset\n", + "The role of the Dataset is to produce single data points. Typically they are loaded off the disk, but they could also come from some more complex source or in some cases just from RAM. You can write your own Dataset subclass or sometimes you can use a standardized class. The training, validation, and test subsets get their own Dataset instances.\n", + "\n", + "The Dataset interface is simple; it implements\n", + "`__getitem__` and usually also `__len__`. Usually, \"map-style\" Datasets are used, but it's worth noting that PyTorch also has a notion of [IterableDataset](https://pytorch.org/docs/stable/data.html#iterable-style-datasets)s.\n", + "\n", + "`__getitem__` can return _anything_ because data can look like _anything_. Often, though, a data point consists of multiple associated things (e.g. an image and a label, or a speech waveform and its transcription). The Dataset should return all of these associated things.\n", + "\n", + "It is also relatively common for the Dataset to somehow transform the data on the fly, on the CPU.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "huVMufrKo9uf" + }, + "source": [ + "### Collation function\n", + "\n", + "The ``collate_fn`` just converts a list of examples into a PyTorch tensor batch. If the data has variable-length sequences, ``collate_fn`` usually needs to implement padding." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZmGerYfHpBxu" + }, + "source": [ + "### Sampler\n", + "Typically users don't need to create their own sampler; the two default options are to iterate in the original Dataset order or to iterate in random order.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fpuhtBUEpECO" + }, + "source": [ + "### DataLoader\n", + "\n", + "The DataLoader takes the other elements above and many other arguments such as batch size. The DataLoader has basic defaults for all arguments (except the Dataset, of course), but it's worthwhile to understand the [args](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader).\n", + "\n", + "The DataLoader object is iterated in the training loop, and every Dataset instance (e.g. training, validation, test) gets its own DataLoader.\n", + "```python\n", + "train_loader = DataLoader(train_data, collate_fn=PaddedBatch, batch_size=32, num_workers=2)\n", + "for batch in train_loader:\n", + " pred = model(batch.signal)\n", + " ...\n", + "```\n", + "\n", + "The iterator that DataLoader returns can either load batches in the same process that it is created in (`num_workers=0`), or it can start a new process (`num_workers=1`) or multiple new processes (`num_workers>1`). Because of the Global Interpreter Lock, Python cannot work on two tasks at the same time in a single process. Using at least one background worker process to load data while simultaneously running training is often essential for taking full advantage of GPU compute resources." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4rXqNQYb_Taw" + }, + "source": [ + "## SpeechBrain Basic dataIO\n", + "\n", + "The basic dataIO pipeline is organized around three \"key\" blocks: **DynamicItemDataset**, **Dynamic Items Pipelines (DIPs)** and **CategoricalEncoder** which are tightly connected.\n", + "\n", + "**DynamicItemDataset** inherits from `torch.utils.data.Dataset` and has been built to work together with **Dynamic Items Pipelines** to provide a straightforward and flexible way to fetch and transform data and labels from the raw dataset stored in your disk.\n", + "\n", + "**DIPs** consists of user-defined functions in which the user specifies operations applied to metadata and data contained in the dataset. E.g. reading and augmenting an audio file or encoding a sequence of words using SentencePiece tokenizer.\n", + "These functions are called inside **DynamicItemDataset** `__getitem__` method and are run in parallel on the CPU.\n", + "\n", + "The **CategoricalEncoder** is a convenient abstraction we provide for multi-class classification problems and it is sub-classed into **TextEncoder** and **CTCTextEncoder** which instead can be used for sequence-to-sequence applications related to text, such as ASR.\n", + "\n", + "\n", + "Thanks to these abstractions, most of the work necessary to set up the data IO pipeline is parsing the dataset into suitable annotation supported by **DynamicItemDataset** (SpeechBrain supports both CSV and JSON formats).\n", + "\n", + "\n", + "Once this annotation is ready, a flexible and efficient pipeline can be created in few lines of code, as SpeechBrain will take care under the hood of padding and other operations." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kOmWMuv_XDg4" + }, + "source": [ + "In the following tutorial, we will explain in detail how these blocks work.\n", + "We will start from the required CSV or JSON annotation whose purpose is to represent and describe the information contained in the dataset.\n", + "For example:\n", + "\n", + "* Paths to audio files, pre-extracted features et cetera.\n", + "* Metadata such as words spoken in the audio files, Signal-to-Noise-Ratio, sound event tags, speaker identities et cetera.\n", + "\n", + "Basically any information required to train your algorithm." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T9VSd39Cmqxm" + }, + "source": [ + "### Dataset Annotation\n", + "\n", + "SpeechBrain offers native support for JSON and CSV formats for **describing a dataset** and, in fact, in official recipes (such as LibriSpeech ASR recipes) we provide parsing scripts to obtain such formats. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZFaNums8qj1J" + }, + "source": [ + "We can take a glimpse to how these files can be structured using the downloaded Mini-LibriSpeech example.\n", + "\n", + "Each file in Mini-LibriSpeech is an utterance from a single speaker, thus either JSON and CSV formats can be used to contain the absolute path to that file, the speaker identity and the words the speaker utters. This is enough to build an Automatic Speech Recognition (ASR) system.\n", + "\n", + "Creating those files should be rather easy for most datasets as Python offers many tools for manipulating CSV and JSON files (such as [pandas](https://pandas.pydata.org/)). In fact parsing the data to JSON, for example, can be done in few lines of codes:" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "metadata": { + "id": "nzgmi6IyNWMb" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install soundfile" + ] + }, + { + "cell_type": "code", + "execution_count": 75, + "metadata": { + "id": "N-UPJi6v93w_", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "b03f60e3-62da-4c9a-a6c8-a98b30cfce73" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Number of flac audio files 1089\n", + "Number of transcriptions 1089\n" + ] + } + ], + "source": [ + "import soundfile\n", + "from pathlib import Path\n", + "\n", + "# Load all the audio files into one list\n", + "dev_clean_root = Path(\"./LibriSpeech/dev-clean-2\")\n", + "flac_files = list(dev_clean_root.glob(\"**/*.flac\"))\n", + "print(\"Number of flac audio files {}\".format(len(flac_files)))\n", + "\n", + "# we build a dictionary to map utterance id to words for each utterance\n", + "# each row in the text file is simply:\n", + "# \n", + "text_files = dev_clean_root.glob(\"**/*.txt\")\n", + "text_contents = [line.split(maxsplit=1) for file in text_files for line in open(file, encoding=\"utf8\")]\n", + "words_dict = {utt_id: words.strip() for utt_id, words in text_contents}\n", + "print(\"Number of transcriptions {}\".format(len(words_dict)))\n", + "\n", + "# Our dictionary has four keys, including annotation of transcript and speaker identity,\n", + "# making this manifest suitable for automatic transcription or speaker identification\n", + "examples = {\n", + " path.stem: {\n", + " \"file_path\": str(path),\n", + " \"words\": words_dict[path.stem],\n", + " \"spkID\": int(path.stem.split(\"-\")[0]),\n", + " \"length\": soundfile.info(path).frames / soundfile.info(path).samplerate,\n", + " }\n", + " for path in flac_files\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lhaZEuQBNWMb" + }, + "source": [ + "Both JSON and CSV formats are briefly illustrated here:" + ] + }, + { + "cell_type": "code", + "execution_count": 76, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "OVXbuTX-NWMc", + "outputId": "9d5feb59-4b5f-405f-8448-a9534dc04cf1" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "{\n", + " \"3576-138058-0019\": {\n", + " \"file_path\": \"LibriSpeech/dev-clean-2/3576/138058/3576-138058-0019.flac\",\n", + " \"words\": \"GIVE ME MY HORSE AND ARMS AND WAIT FOR ME HERE I WILL GO IN QUEST OF THIS KNIGHT AND DEAD OR ALIVE I WILL MAKE HIM KEEP HIS WORD PLIGHTED TO SO GREAT BEAUTY\",\n", + " \"spkID\": 3576,\n", + " \"length\": 9.935\n", + " },\n", + " \"3576-138058-0021\": {\n", + " \"file_path\": \"LibriSpeech/dev-clean-2/3576/138058/3576-138058-0021.flac\",\n", + " \"words\": \"THEY MADE HASTE TO OVERTAKE THEM WHICH AS THE PARTY MOVED SLOWLY THEY WERE ABLE TO DO WITH EASE\",\n" + ] + } + ], + "source": [ + "import json\n", + "\n", + "with open(\"data.json\", \"w\") as f:\n", + " json.dump(examples, f, indent=4)\n", + "\n", + "!head data.json" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "sM6h9p7FNWMc", + "outputId": "0defa3f6-7af2-4578-e4d4-8c0c0500ec1b" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "id,file_path,words,spkID,length\r\n", + "3576-138058-0019,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0019.flac,GIVE ME MY HORSE AND ARMS AND WAIT FOR ME HERE I WILL GO IN QUEST OF THIS KNIGHT AND DEAD OR ALIVE I WILL MAKE HIM KEEP HIS WORD PLIGHTED TO SO GREAT BEAUTY,3576,9.935\r\n", + "3576-138058-0021,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0021.flac,THEY MADE HASTE TO OVERTAKE THEM WHICH AS THE PARTY MOVED SLOWLY THEY WERE ABLE TO DO WITH EASE,3576,6.18\r\n", + "3576-138058-0028,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0028.flac,CLAUDIA TOLD HIM SHE MEANT TO GO TO A MONASTERY OF WHICH AN AUNT OF HERS WAS ABBESS WHERE SHE INTENDED TO PASS HER LIFE WITH A BETTER AND EVERLASTING SPOUSE,3576,10.46\r\n", + "3576-138058-0014,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0014.flac,DON QUIXOTE WAS ON FOOT WITH HIS HORSE UNBRIDLED AND HIS LANCE LEANING AGAINST A TREE AND IN SHORT COMPLETELY DEFENCELESS HE THOUGHT IT BEST THEREFORE TO FOLD HIS ARMS AND BOW HIS HEAD AND RESERVE HIMSELF FOR A MORE FAVOURABLE OCCASION AND OPPORTUNITY,3576,18.13\r\n", + "3576-138058-0000,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0000.flac,MASTER AND MAN DISMOUNTED FROM THEIR BEASTS AND AS SOON AS THEY HAD SETTLED THEMSELVES AT THE FOOT OF THE TREES SANCHO WHO HAD HAD A GOOD NOONTIDE MEAL THAT DAY LET HIMSELF WITHOUT MORE ADO PASS THE GATES OF SLEEP,3576,14.14\r\n", + "3576-138058-0010,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0010.flac,SANCHO ROSE AND REMOVED SOME DISTANCE FROM THE SPOT BUT AS HE WAS ABOUT TO PLACE HIMSELF LEANING AGAINST ANOTHER TREE HE FELT SOMETHING TOUCH HIS HEAD AND PUTTING UP HIS HANDS ENCOUNTERED SOMEBODY'S TWO FEET WITH SHOES AND STOCKINGS ON THEM,3576,15.735\r\n", + "3576-138058-0022,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0022.flac,THE WOUNDED GENTLEMAN OPENED HIS ALL BUT CLOSED EYES AND RECOGNISING CLAUDIA SAID I SEE CLEARLY FAIR AND MISTAKEN LADY THAT IT IS THOU THAT HAST SLAIN ME A PUNISHMENT NOT MERITED OR DESERVED BY MY FEELINGS TOWARDS THEE FOR NEVER DID I MEAN TO NOR COULD I WRONG THEE IN THOUGHT OR DEED,3576,26.9\r\n", + "3576-138058-0005,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0005.flac,SEEING THIS SANCHO GOT UP AND GRAPPLING WITH HIS MASTER HE GRIPPED HIM WITH ALL HIS MIGHT IN HIS ARMS GIVING HIM A TRIP WITH THE HEEL STRETCHED HIM ON THE GROUND ON HIS BACK AND PRESSING HIS RIGHT KNEE ON HIS CHEST HELD HIS HANDS IN HIS OWN SO THAT HE COULD NEITHER MOVE NOR BREATHE,3576,16.655\r\n", + "3576-138058-0017,LibriSpeech/dev-clean-2/3576/138058/3576-138058-0017.flac,HE SAW ME HE PAID COURT TO ME I LISTENED TO HIM AND UNKNOWN TO MY FATHER I LOVED HIM FOR THERE IS NO WOMAN HOWEVER SECLUDED SHE MAY LIVE OR CLOSE SHE MAY BE KEPT WHO WILL NOT HAVE OPPORTUNITIES AND TO SPARE FOR FOLLOWING HER HEADLONG IMPULSES,3576,17.325\r\n" + ] + } + ], + "source": [ + "import csv\n", + "\n", + "# CSV is flat list with special \"id\" column\n", + "csv_examples = [{\"id\": key, **values} for key, values in examples.items()]\n", + "with open(\"data.csv\", \"w\") as f:\n", + " writer = csv.DictWriter(f, fieldnames=csv_examples[0].keys())\n", + " writer.writeheader()\n", + " writer.writerows(csv_examples)\n", + "\n", + "!head data.csv" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P9YLGDk0XdG-" + }, + "source": [ + "Unlike other toolkits which have a rather strict requirements on how a dataset must be specified to be able to process it **we do not impose any restriction on JSON and CSV syntax except the requirement for a different unique ID string for every single example**.\n", + "\n", + "This means that the JSON file must contain a dictionary whose keys are the example ids and each entry of the dictionary contains metadata for that example. Instead, CSV files, must have at least one column called id.\n", + "\n", + "These are the only strict requirements to guarantee that the JSON and CSV dataset description files will work with SpeechBrain data IO pipeline.\n", + "\n", + "Users are given great flexibility in how to represent their datasets in the JSON and CSV files as their goals and applications could be different: speech separation, enhancement, ASR, diarization, VAD et cetera.\n", + "\n", + "This because in SpeechBrain we aim at many different tasks and datasets.\n", + "\n", + "- **Every Dataset is unique**, it can be single channel or multichannel, it can provide different metadata such as speaker IDs, or speaker positions or even\n", + "multi-modal data such as audio and video.\n", + "\n", + "- **Every task is unique**, the annotation used in this example is suitable for applications like ASR and Speaker Recognition. But for diarization, for example, the user would like to have also the start and stop (in seconds, frames whatever !) of each uttererance too.\n", + "\n", + "This also allows to keep the annotation very simple and focused to the particular task with only the necessary information for the current application, instead of having a cumbersone do-it-all annotation.\n", + "\n", + "\n", + "**TIP**\n", + "\n", + "It is useful, when building the parsing script, to have a `length` or `duration` for each example containing the length of the example in seconds or samples or even frames. This allows for subsequent operations such as filtering examples that are too long (to avoid OOM issues) or sorting them for faster training. Regarding CSV files if a `duration` column is specified it is automatically cast to float when a DynamicItemDataset is built from the CSV.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "frGoZYqCrb3A" + }, + "source": [ + "Hereafter we show how the **DynamicItemDataset**, **DIPs** and **CategoricalEncoder** can be used to build a data pipeline for Speaker Recognition.\n", + "\n", + "In particular we have to:\n", + "\n", + "- read the audio\n", + "- read the speaker ID from annotation and encode it to integer\n", + "- cache filterbank features once and reuse them across epochs\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fIHCwY-BunQ1" + }, + "source": [ + "### DynamicItemDataset\n", + "\n", + "**DynamicItemDataset** is at the heart of SpeechBrain data pipeline and is built on top of `torch.utils.data.Dataset`.\n", + "\n", + "As the name implies it allows the **dynamical creation of new \"objects\"** from the entries specified in the JSON (or CSV) dataset annotation." + ] + }, + { + "cell_type": "code", + "execution_count": 78, + "metadata": { + "id": "QOjqauY9i6Ll" + }, + "outputs": [], + "source": [ + "#`creating a DynamiItemDataset instance from JSON or CSV annotation is immediate\n", + "from speechbrain.dataio.dataset import DynamicItemDataset\n", + "\n", + "dataset = DynamicItemDataset.from_json(\"data.json\") # or equivalently, DynamicItemDataset.from_csv(\"data.csv\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "340CyO9Pj965" + }, + "source": [ + "What does it mean dynamical creation of \"objects\" from the entries specified in the data.json annotation ?" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "metadata": { + "id": "epqTBMW6j4SB", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "86afee0a-8b8d-4658-92e7-da7d0ddbaffb" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{}" + ] + }, + "metadata": {}, + "execution_count": 79 + } + ], + "source": [ + "dataset[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Na1uMIKokMXs" + }, + "source": [ + "As it is now, this `Dataset` object does not return anything.\n", + "\n", + "Dynamical creation means exactly that, **items the user wants to be returned must be specified** in some way by the user. These items can depend on the entries specified in the data.json examples:" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "metadata": { + "id": "5RPpu88_k_b2", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "f15aeeb1-9edf-49da-fa16-18ca0d840880" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "dict_keys(['file_path', 'words', 'spkID', 'length'])\n" + ] + } + ], + "source": [ + "print(next(iter(examples.values())).keys())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CETiI0IglI3L" + }, + "source": [ + "Namely from `['file_path', 'words', 'spkID', 'length']`.\n", + "\n", + "For example one \"dynamic item\" could be the audio signal which will depend on the `'file_path'` key. Another one could be the `spkID` encoded to an integer value if one wishes to perform speaker recognition or, for ASR, it could be the words encoded by a tokenizer.\n", + "\n", + "To obtain these \"items\" one should specify in some way a function which, when applied to the corresponding key, will provide the new item.\n", + "\n", + "E.g. a function which reads the audio when applied to `'file_path'` key. in order to make the `Dataset` class return, for each example. the audio signal.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UMXJkQi5usJg" + }, + "source": [ + "### Dynamic Item Pipelines (DIPs)\n", + "\n", + "This task is handled by specifying **Dynamic Item Pipelines** for each dynamic item the user wants to get from the dataset. The user can specify an arbitrary number of pipelines.\n", + "\n", + "For example, regarding the audio signal:" + ] + }, + { + "cell_type": "code", + "execution_count": 81, + "metadata": { + "id": "IuEewb4yoB-g" + }, + "outputs": [], + "source": [ + "@speechbrain.utils.data_pipeline.takes(\"file_path\")\n", + "@speechbrain.utils.data_pipeline.provides(\"signal\")\n", + "def audio_pipeline(file_path):\n", + " sig = speechbrain.dataio.dataio.read_audio(file_path)\n", + " return sig" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sYELrX1GocuG" + }, + "source": [ + "We specify a function that takes the `file_path` for each example and provides a new item called `sig` which is a tensor containing the audio.\n", + "\n", + "\n", + "We use here some pre-built function in `speechbrain.dataio.dataio` for reading audio. But the user can also use its own.\n", + "\n", + "Once specified, the pipeline must be added to the `DynamicItemDataset` object and, following, the outputs requested by the user should be specified with the `set_output_keys` method.\n", + "We request two items in the output: a new one `sig` and also `file_path` which is in the JSON annotation." + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "metadata": { + "id": "IjfzuoFxowKF", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "4afeb4f3-60d8-46ba-a3da-12f4fde9e8e4" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'signal': tensor([ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029]),\n", + " 'file_path': 'LibriSpeech/dev-clean-2/3576/138058/3576-138058-0019.flac'}" + ] + }, + "metadata": {}, + "execution_count": 82 + } + ], + "source": [ + "dataset.add_dynamic_item(audio_pipeline)\n", + "dataset.set_output_keys([\"signal\", \"file_path\"])\n", + "dataset[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1_qZ_CYHQMdt" + }, + "source": [ + "Note that a more compact syntax can be used for applying a simple function like reading the audio file." + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "metadata": { + "id": "d3ACymf1JQsP", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "98107a04-0fc8-45cc-b81d-588a03cc3d5c" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'id': '3576-138058-0019',\n", + " 'signal': tensor([ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029]),\n", + " 'words': 'GIVE ME MY HORSE AND ARMS AND WAIT FOR ME HERE I WILL GO IN QUEST OF THIS KNIGHT AND DEAD OR ALIVE I WILL MAKE HIM KEEP HIS WORD PLIGHTED TO SO GREAT BEAUTY'}" + ] + }, + "metadata": {}, + "execution_count": 83 + } + ], + "source": [ + "dataset.add_dynamic_item(speechbrain.dataio.dataio.read_audio, takes=\"file_path\", provides=\"signal\")\n", + "dataset.set_output_keys([\"id\", \"signal\", \"words\"])\n", + "dataset[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ngjASy73p4Pc" + }, + "source": [ + "Now the dataset object will return this new specified item \"sig\" as well as the `file_path` as specified in the JSON. To show that its really loading the signal, let's plot the waveform." + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": { + "id": "2nnvmJ_xqArq", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 452 + }, + "outputId": "108e3ed3-0f5d-4aae-d1c8-2d9d153db8b2" + }, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjYAAAGzCAYAAAA8I13DAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAbD9JREFUeJzt3Xd0FGXbBvBr00NIIaSRkBB66J0YQECIgiA2VFAURMQG0hQFC+hrARVRX16UDxWxgGBBbIh0FIiE3nuNQBIgJqGm7Xx/YJZssmV2d/pev3NyDmxmZ+7Z7M7e85T7MQmCIICIiIjIAHzUDoCIiIhIKkxsiIiIyDCY2BAREZFhMLEhIiIiw2BiQ0RERIbBxIaIiIgMg4kNERERGQYTGyIiIjIMJjZERERkGExsiEgWycnJePjhhyXb3yuvvAKTySTZ/ojImJjYEJFLdu3ahXvuuQd16tRBUFAQEhIScPPNN2PGjBmKx/Lmm29i8eLFih+XiLTLxLWiiEisDRs24KabbkJSUhKGDBmCuLg4ZGVl4a+//sKRI0dw+PBhy7ZFRUXw8fGBv7+/JMcuLS1FaWkpgoKCLI9Vr14d99xzD+bOnSvJMYhI//zUDoCI9OONN95AeHg4Nm3ahIiICKvf5ebmWv0/MDBQ0mP7+fnBz4+XLCJyjF1RRCTakSNH0KxZsypJDQDExMRY/d/WGJudO3eiW7duCA4ORu3atfH666/js88+g8lkwvHjxx0eu/IYG5PJhEuXLuHzzz+HyWSCyWSyOt6pU6fwyCOPIDY2FoGBgWjWrBnmzJljtc81a9bAZDLhm2++wauvvoqEhASEhobinnvuQUFBAYqKijBmzBjExMSgevXqGDp0KIqKikS9VkSkDt7+EJFoderUQUZGBnbv3o3mzZu79NxTp07hpptugslkwsSJExESEoJPPvnE7ZadL7/8Eo8++ig6duyIxx57DABQv359AEBOTg5uuOEGmEwmjBw5EtHR0fjtt98wbNgwFBYWYsyYMVb7mjJlCoKDgzFhwgQcPnwYM2bMgL+/P3x8fPDPP//glVdewV9//YW5c+eibt26mDRpklsxE5ECBCIikZYtWyb4+voKvr6+QlpamvDcc88Jv//+u1BcXFxl2zp16ghDhgyx/P/pp58WTCaTsG3bNstj58+fFyIjIwUAwrFjxxwee/LkyULlS1ZISIjVMcoNGzZMqFWrlnDu3DmrxwcOHCiEh4cLly9fFgRBEFavXi0AEJo3b251Dvfff79gMpmEW2+91er5aWlpQp06dRzGSUTqYlcUEYl28803IyMjA7fffjt27NiBt99+G7169UJCQgJ++uknh89dunQp0tLS0Lp1a8tjkZGRGDRokKQxCoKA77//Hv369YMgCDh37pzlp1evXigoKMDWrVutnjN48GCrQc6pqakQBAGPPPKI1XapqanIyspCaWmppDETkXSY2BCRSzp06IBFixbhn3/+QWZmJiZOnIgLFy7gnnvuwd69e+0+78SJE2jQoEGVx2095omzZ88iPz8fs2fPRnR0tNXP0KFDAVQd6JyUlGT1//DwcABAYmJilcfNZjMKCgokjZmIpMMxNkTkloCAAHTo0AEdOnRAo0aNMHToUHz77beYPHmyqnGZzWYAwIMPPoghQ4bY3KZly5ZW//f19bW5nb3HBVbJINIsJjZE5LH27dsDAM6cOWN3mzp16ljVuSln6zGxbFUijo6ORmhoKMrKypCenu72volIn9gVRUSirV692mZrxZIlSwAAjRs3tvvcXr16ISMjA9u3b7c8lpeXh3nz5rkdT0hICPLz860e8/X1Rf/+/fH9999j9+7dVZ5z9uxZt49HRNrHFhsiEu3pp5/G5cuXcddddyElJQXFxcXYsGEDFi5ciOTkZMsYFluee+45fPXVV7j55pvx9NNPW6Z7JyUlIS8vz611oNq1a4cVK1Zg+vTpiI+PR926dZGamoqpU6di9erVSE1NxfDhw9G0aVPk5eVh69atWLFiBfLy8jx5GYhIw5jYEJFo06ZNw7fffoslS5Zg9uzZKC4uRlJSEp566im89NJLNgv3lUtMTMTq1asxatQovPnmm4iOjsaIESMQEhKCUaNGWS2VINb06dPx2GOP4aWXXsKVK1cwZMgQpKamIjY2FpmZmfjPf/6DRYsW4cMPP0TNmjXRrFkzvPXWWx68AkSkdVwriohUNWbMGPzf//0fLl68aHewLhGRWBxjQ0SKuXLlitX/z58/jy+//BJdunRhUkNEkmBXFBEpJi0tDd27d0eTJk2Qk5ODTz/9FIWFhXj55ZfVDo2IDIKJDREppk+fPvjuu+8we/ZsmEwmtG3bFp9++im6du2qdmhEZBAcY0NERESGwTE2REREZBhMbIiIiMgwDDfGxmw24/Tp0wgNDXWr4BcREREpTxAEXLhwAfHx8fDxcb/dxXCJzenTp6usyEtERET6kJWVhdq1a7v9fMMlNqGhoQCuvTBhYWEqR0NERERiFBYWIjEx0fI97i7DJTbl3U9hYWFMbIiIiHTG02EkHDxMREREhsHEhoiIiAyDiQ0REREZBhMbIiIiMgwmNkRERGQYTGyIiIjIMJjYEBERkWEwsSEiIiLDYGJDREREhsHEhoiIiAyDiQ0REREZBhMbIiIiMgwmNkRE/1q6OxtLd59ROwwi8oDhVvcmInLH5eJSPPHVFgDArlduQWiQv8oREZE72GJDRATgaonZ8u8rJWUqRkJEnmBiQ0RERIbBxIaIiIgMg4kNERERGQYTGyIiIjIMJjZERJWcvVCkdghE5CYmNkRElSzZxVo2RHqlSGIzc+ZMJCcnIygoCKmpqcjMzHS4fX5+PkaMGIFatWohMDAQjRo1wpIlS5QIlYiIiHRM9gJ9CxcuxLhx4zBr1iykpqbi/fffR69evXDgwAHExMRU2b64uBg333wzYmJi8N133yEhIQEnTpxARESE3KESERGRzsme2EyfPh3Dhw/H0KFDAQCzZs3Cr7/+ijlz5mDChAlVtp8zZw7y8vKwYcMG+Ptfq/yZnJwsd5ikMxeuliAkwA8+Pia1QyEiIg2RtSuquLgYW7ZsQXp6+vUD+vggPT0dGRkZNp/z008/IS0tDSNGjEBsbCyaN2+ON998E2VltiuBFhUVobCw0OqHjG3D4XNo8coy9J2xTu1QiIhIY2RNbM6dO4eysjLExsZaPR4bG4vs7Gybzzl69Ci+++47lJWVYcmSJXj55Zfx7rvv4vXXX7e5/ZQpUxAeHm75SUxMlPw8SFse+GQjAGDfGSaxJJ3MY+fVDoGIJKC5WVFmsxkxMTGYPXs22rVrhwEDBuDFF1/ErFmzbG4/ceJEFBQUWH6ysrIUjpiIjOBgzkW1QyAiCcg6xiYqKgq+vr7IycmxejwnJwdxcXE2n1OrVi34+/vD19fX8liTJk2QnZ2N4uJiBAQEWG0fGBiIwMBA6YMnIiIi3ZG1xSYgIADt2rXDypUrLY+ZzWasXLkSaWlpNp/TuXNnHD58GGbz9ZV2Dx48iFq1alVJaoiIiIgqkr0raty4cfj444/x+eefY9++fXjyySdx6dIlyyypwYMHY+LEiZbtn3zySeTl5WH06NE4ePAgfv31V7z55psYMWKE3KESERGRzsk+3XvAgAE4e/YsJk2ahOzsbLRu3RpLly61DCg+efIkfHyu51eJiYn4/fffMXbsWLRs2RIJCQkYPXo0nn/+eblDJSIiIp2TPbEBgJEjR2LkyJE2f7dmzZoqj6WlpeGvv/6SOSoiIiIyGs3NiiIiIiJyFxMbIqJKjp+/rHYIhpZbeBUr9+XAbBbUDoUMiIkNERGAiotz/LqTq3vLqds7azDs881YtO2U2qGQATGxISIiRV0pubZEztqDZ1WOhIyIiQ0REalCENgVRdJjYiOz/dmFSJuyEt9s4lIPREREcmNiI7NnvtmBMwVX8dz3O9UOhYhIU9heQ3JgYiOz0jJ+dImIiJTCxIaICIDJ5HwbItI+JjZERERkGExsiIiIyDCY2BCR1yu8WoJpyw6qHYb34RBEkgETG9K1kjKz2iGQAcxee1TtEIhIIkxsSNeW781ROwQygItFpWqH4J04YJtkwMSGdI0tNqSEnMKr6DdjHRZuOql2KMbCriiSARMbIiInpv62H7tOFeD573epHQoROcHERmYHci6oHQJpRP7lYizdfQbFpWxl0ptLBuuqKikz48UfdmHpbq5iTsbDxMaLGHHBuZ93nFY7BNEGzv4LT3y1FR+s5OwbUtfCTVmYt/Eknvhqq9qhEEmOiY2XKC0zo+9/12H4F5vVDkVSK/blqh2CaPuzr7XezVx9ROVIyNvlXihSOwQi2TCx8RLbsvKx90whZxER2ZBdcFXtEFxypuAK3l12ADmF+oqbSAl+agdAyliQmaV2CESatXRPtsPfHz9/SaFIxBn8aSYO5V7EmgNn8fPTXdQOh0hT2GLjJQ7nchAzkbsO5ly0/PvYOfWTnEO51+LZdapA5UiItIeJjRcy4iBiIqWcyb+idghE5AATGy+0+cQ/aodApFuz/+TyC0RaxsRGQWazNlpKCi6XqB0CkW5dKS5TOwTPsdWWDIyJjYLYUkJERCQvJjYK4rpGRPqXd6lY7RAMQ+BiUSQDJjZEKrhcbKwS/d6kfEYSEWkTExsiFaw9cFbtEIhUZ4JJ7RDIgJjYeAmNjFsmIrJYvjfHcAuMkvqY2HgJoxTyunCVM7qIjKK4zIxRX29TOwwyGCY2XsBIg5YrVoAlIv1buV8/C9mSPjCx8QIbjpy3+v+BHC6vQOTN2DNNRsbExguYKxXjeuf3A9h2kjV1iIjIeJjYeIG//6m6ts2iradUiITK8Y6Z9OZw7kU8/fU2HGSLL2kcExsvMH3ZgSqPffnXCRUioYqyC67ixR924RC/KEhhOYVXLf8Wu0TEg59sxM87TuOejzbIFRaRJJjYEKlkxPytmLfxJG6bsU7tUMjLlJZdbzMUW/03+99kqPAqp2eTtjGxURBLUVFF5VPwi0qNM2uNSKtO5V/BiHlbsYVr9hkeExsiIi+28VgeTudXHYdnNGMXbMevu86gP7vSDM9P7QCIiEhZFTufhn62CQBwfGpfdYJRyIm8S2qHQApRpMVm5syZSE5ORlBQEFJTU5GZmSnqeQsWLIDJZMKdd94pb4BEChMEcGqUjnEZAP3hulTeQ/bEZuHChRg3bhwmT56MrVu3olWrVujVqxdycx1Xmzx+/DieffZZ3HjjjXKHSKQKsYM2SXsyKhW9JCLtkD2xmT59OoYPH46hQ4eiadOmmDVrFqpVq4Y5c+bYfU5ZWRkGDRqEV199FfXq1ZM7RMUUGWhpAy05pdPxASVlTGz06rfd2fhpx2m1w7A4U3AFJ86L72qx1XYhCMZ+P2ZXmOJOxiZrYlNcXIwtW7YgPT39+gF9fJCeno6MjAy7z/vPf/6DmJgYDBs2zOkxioqKUFhYaPWjVWaNLbGtxwuZrZobZUwQSGHfb/0bo77ehoLL6i/KKggC0qasQrd31oheJNbWJ4Zf/GQUsiY2586dQ1lZGWJjY60ej42NRXZ2ts3nrFu3Dp9++ik+/vhjUceYMmUKwsPDLT+JiYkexy2XUo0lNhoLR5TZfx5VOwRJ5F8pVjsEksClYm2NtcnxIDn5bZftazKR3mhquveFCxfw0EMP4eOPP0ZUVJSo50ycOBEFBQWWn6ysLJmjdN+XGepU+9Vh/mJX3qUitUOQxKs/7VU7BCIr+86o19q9dHc2yvR4p0WaJOt076ioKPj6+iInJ8fq8ZycHMTFxVXZ/siRIzh+/Dj69etnecxsvjYuxc/PDwcOHED9+vWtnhMYGIjAwEAZopfeusPn1A6BNKKY460M4UqJuOUItEZr3dBPfLUFr93RDA+lJasdChmArC02AQEBaNeuHVauXGl5zGw2Y+XKlUhLS6uyfUpKCnbt2oXt27dbfm6//XbcdNNN2L59u6a7mbRMY9cwl1S+ANs6lw1HmDCSOhZu0m4LsavUvkysOXBW5QjIKGTviho3bhw+/vhjfP7559i3bx+efPJJXLp0CUOHDgUADB48GBMnTgQABAUFoXnz5lY/ERERCA0NRfPmzREQECB3uF7lqsbvNq8Ul+GmaWsw7pvtlsdsJTaZx/OUC4q8zkUHNWu0/hlyxZkCdWcXrtyfi7nrj6kaAxmD7InNgAEDMG3aNEyaNAmtW7fG9u3bsXTpUsuA4pMnT+LMmTNyh0E2/LhdO9NVK/tozRE0mbQUx89fxqKtpxxuu/PvgiqPbTh8DjNXH9Zckzvpx5d/nUDyhF/xwqJdaofikFRrja0/rH5tnld+5tgz8pwiSyqMHDkSI0eOtPm7NWvWOHzu3LlzpQ+IAABlZu2O83hr6X6bj1+2MQvlcO7FKo898MlGAEDdqBD0aVFL2uDIK7y8eDcAaKpejS3fb/1b7RCINEVTs6KInDl+/rJL22flubY9kSu2Z+WrHYJXLu/w684zqs7iIm1jYiOjQzkX1A7BMRPXTiHyhK1uUDVtOv6PqO303EH719HzGDF/K2794E+1QyGNYmIjI0eDDomIpDZvozq1spS0ny015AQTG2/mZQNrOZCYiMj4mNiQV1i09W+kvrkSuzTWdeCJsxeK8MIPu7BDA+M8iNTAmxWyhYmNFyi4YmdhPC8aYzPumx3IvVCEkV9vVTsUyTz//U7M33gSd8xcj82s5UNe6IDWxzGSKpjYyIj3EuqouOJy3mXrxSZPuDirSssqjjWYILLWSplZwOu/7MXKfTnON/YStlaMJ30olqiGDxkLExvSHE+n0JZWqM9jtrGw3oWrdlqwvMD3W//GJ+uOYdjnm9UORTO0tkK3J3afEjew9uJV7Z7z2QvGWOiW1MPExot9ptHy5bmFV2Xd/7yNJ2Xdv1JOF7j+OmW78RzStivFrrdarNyfK0Mk0lh7UPyaUSv35bJWFVXBxMaLHT17Se0QZJdr4+5v6m+2qxrrmTcWaZOK3sefvrfioNohqOaDlYdw49ur1Q6DNIaJDVkpvFqC4+eMk/BoeT0sKZ1hSwx5CS1UeyZtY2IjIz3eCbZ/bQW6T1uDI2errr9E2uHuNFfvmQfn3U7lX8HwLzbjh23GW0dqsZfcrJD7mNiQleKya/31G46ot9KvycY09NP5V1SIRFnzNp7A/bP/ElWxWuwgUXJOMOD8xc5TV2H53hyMXbgDe04bp3YTkRhMbEgXjp933D1mhGmfL/6wGxlHz6Ptf5Y73baolFOUJSNDXnOpqFQzrZ4bj+qrxhFbFclTTGy8nFEKuz333Q61Q5BMcZkZf//DmR56ccpGa2K3d9ag57trNVEVunID6OFcbSRcRHJhYuPlftudrXYIkqi4qnHlG/CrJfpr3WDROP04YaM18dzFa7PxVmigEGLeJesilbkXtD3Q3J0GtJIy/bfYknSY2Hi5T9dpr5aNraZokwcN1O+vOOR+MGR4co6wmbHqsIx7t63ywPLKMRzMNt4yBCed1LLR480NuY+Jjaz0NSjx1Z/3qB0CANu1ZzyReUy9gdCkjNzCq/hx+ym37tylnr2o9S/RV37eq3YIimOLjnfxUzsA0o7P1h9XOwQAwPK9nnWPVW7b2Xoy36P9aUWZWYCvD4dW2tLnv3/i3MVinDh/GaN6NlQ1lmKVv0T1WGaiIjne4Tp/SchFbLEhquDPQ2ex+5T602MrD/jcc7oATSYtxczVnnVtGPUCf+7itXEk7iwVYMTp3nJzt46SWnQWLnmIiY2BzFx9GEt2nXH5eV9kHJc+GInZKG1jxdbMFFdl5V3GQ59m4rYZ6zB/40lNdSlM/nEPikvNeOf3Ax7tp+IFXky9HHJOa9OpHX2HS1UW4eedrl9n5FRmY7FbK0xsvAoTG1kp122w5UQe3vn9AJ6at9Xl577+yz4ZIpKWsz7y8GB/j49RcQDiCz/swttLPUsitC67wPhFD5WwSGPVfX/Zab8yb5abZQQqt9AcytHWAOTPNxx3+Hu2ynkXJjayUu7DdNaDAbdqjwkQ46FPM2F2cFcWUc3zxKaytQe1uwKyu7Zn/eN8I3JJVp62EsRDOfbr1Lzl5gKwlT96Whvptc3JODp2RXkXJjYGIfUHV80Ll71TuVhsv+tEini1dbG2H03BlRK39lhaZsbqA2fdDciw5P7SU7oCsaNu22V7HdfV2XfG9lId5kovUqmzrh8iFTGxkdFSBYrfnc6/gue+24F9FWpTnDzv3VVrxVxytV6krLKK59Tq1WXY5cYAZ2/4MnInOZX6VcmotM5az3fX4rKDpFxLztjpnvz7H221SrnK+O98qojTvWX0dWaW7McYMX9rlWbYgzkXkFSzmuzHVpqjrihX7TtzATGhQZLtT2rOBku/6oW1SPRiy4mq3X2bj/+Dro2iFTm+rbdO3qViRIYEOH2uvdYrZ2NY1Obs86K3WVzkGbbY6JytvmWjfoQXbT0ly34FQcCkH3fjo7VHZNm/Oypfp4+dc7wIqDtW72e3FKDMl97gOZnKLVxq41ve0/dP5ddIa9cYZ4kNeRcmNjLiXYK0HDWHmzy4su09U4gvMk7gz0Pn3N6H3Cqv9yOFN5ZoYzaclC1x7pDqY2o2Czh5/rLdz/3lInXLB2Q5WXYAsP9a6L0XU+fhk4uY2MiIHyb32Lu4ynVXxgUn1bMjKx8tX12maldH5YGx7nr2ux3o+s5qLNhkuwu6fGFMtXyz2f2u8crTpeVsILE3gNkTvMf0LkxsZKTnD5MWQ/9hm7iuKLZK68cz3+7AxaJSTP5JmnXKTrtRqFGqz2l5V+mFq7YHCntaNVqs7Vn5VR4rvFIiakFOey+Fki02n7ixMO/uU46TIXfq2AiCwFZ3nWJiIyO1ikKV6qAujTuk7I4pM2vrNap8AfWka01PpD5LqRdQFUtMRV+lZqUV2aiY/baEFasBz1pPVbk+ufjSm80C7v5oAx76NJPJjQ4xsZGRVOXLXfWkG9WHbTly9iLunLkeK/c5rn2hR3PWHbf8295F+shZ6Qfs2nPrB39a/d/etFvSJqm6s6RgKxKx3TtKfIl/nnFClv0u2mq/ArSrZ5X1z2VsO5mPdYfP4YqGllYhcZjYyEjNAXfOliBwpqikDD3fXYvtWfkY9vlmiaKSj6s3kH8dPe98IwXtz7YuUf/qT94xnftQrrLF67yCB9cdJdYPW39YnkH6477ZgT2nbdd3spWvCYKAXX8X4JKNc/7n8vUimJdUHvRNrmNio2NSLPxoz+u/qjdjRjv3vuqR4wvGle6DgislVnfvx89dwrC5m2zWaCFx43SU6l70pPVo3Dc7RG3nScOOnK1CB7Jtr2Fla1jAzzvPoN//1uHuDzdU+V3NCjV/qgX4ShcgKYKJjY4ddVCq/Q0VExNPeXrhc+XZV0vKsPe09LMwPHUq/wq2npQ2iRD7sm45kYdWry7D2IXbLY89/uUWrNyfi/4fVf0S0Dspvmbzr4gb/7Xz73zZWizKaf3GQM74XBmEXt51dUBjC3qS55jYGNRcjVcKldpVF/vBywdyDpj9F17+UZoZOVKzdScpRuXWFld9uPpaocLF26+vEv23m6tCewsxi9AKgoDb/7cegz7ZiOwC+Zb0UGK8jydTsuUMz96MNNtdUeL26SXj+A2FiY2OyVGNVq9Ou/lFscPG1NiKNhzRbtE+W3afKkCrV5fhia+2qB2KbkjRNTKmQuuWGEt2nfH4mPYoMbbPkxmKarQo2Tqm1lu2yH1MbBR2WMLBkm86qRwrptJoOVdbPLzF7wosZCqlOeuv1QD5fU/VmWy885TPUREz6Cp+kf7nl73YKNcAdg3N0AKA8xeLMHHRTpv1ddTEadzGxcRGYe5O4z17oajKB/FqieOZT9OWia9dsVhk8TvSOJmu1fwK8NzaA9Zrc83+46gsx9Ha8geTftyDrzOzcOfM9arF4CyJ+WnHaYe/J31hYqMDM1cfRoc3VrhcZMuVG3QtDaBzp3osUbkfttmvZ2KLUnlA5ZluK/fn4rgM3cm7Ttme8uwZ9xfBrNxKrUZLibNDjvp6mzKBkCIUSWxmzpyJ5ORkBAUFITU1FZmZmXa3/fjjj3HjjTeiRo0aqFGjBtLT0x1u7w3e+Teh+WiNfKtPf7b+uMPfe1oXxxWeFsZztFimq+QqJiYXR9dvsd8nGrvhd9nYheKmLJdTs0dit526K1ojZnC03rAnyrhkT2wWLlyIcePGYfLkydi6dStatWqFXr16ITc31+b2a9aswf3334/Vq1cjIyMDiYmJuOWWW3DqFLtK1DTHjfVb1LLKgJWSxZLrbljJxNbbXSkuw4/bT6GgQpE4ta3YZ3293vm3+IRMC2O7bH0sikW+p8u01rdHTsme2EyfPh3Dhw/H0KFD0bRpU8yaNQvVqlXDnDlzbG4/b948PPXUU2jdujVSUlLwySefwGw2Y+XKlXKHajhSFgT7n0IL+CnJqGtqeepQbtVuyZIyfV3c/7vykNohuO3Vn/dg9ILtGDrXmC3VarSU2CrQJ2bANwDM23hS6nBIZrImNsXFxdiyZQvS09OvH9DHB+np6cjIyBC1j8uXL6OkpASRkZE2f19UVITCwkKrH7pGyrt3e/UhtEjsWY8VWWVVT6T4i2fl6X+M0/TlB13YWr2kzWRjJFz5KvZbT+YrHI1rnFWhPp1/BbtlGe/jOtuNLuL+7qv32+5dIO2SNbE5d+4cysrKEBsba/V4bGwssrPFTaN9/vnnER8fb5UcVTRlyhSEh4dbfhITEz2OW0s8aVVwpbnYSMTmcz8bbCbEuYtFLtc2MuLYiXJiE3s1x1pooZvGXf0/2oAVe+13+3aaugq3zVhXZR00W60ncrNVtFDs333jsTzNTVV3ZOW+HI8KKBqBpmdFTZ06FQsWLMAPP/yAoKAgm9tMnDgRBQUFlp+srCyFo3SNrTs0Rz7xYGxLKfuGvUr711dYJbNFpc5rEz01z/VCfpeL9dN6JwY/Je579AvtL5AL2E5yK9fucjSO7F0XSmeoafepAgz7fDNu/eBPtUNRlayJTVRUFHx9fZGTY53V5+TkIC4uzuFzp02bhqlTp2LZsmVo2bKl3e0CAwMRFhZm9WMkU3/bb/PxgyKmZ+v5bpCu8aQ7sfCK8wRk03HX16Pq+e5au7/be7oQ6w7pq1qz2LEWcvDGj6gqY2wqHXPdoXO4VGyd2FScTanXGVNivhe8gayJTUBAANq1a2c18Ld8IHBaWprd57399tt47bXXsHTpUrRv317OEBUnVTPsLe/9Icl+xArw03TjnmFpcdDuGQfLV/T575948NONstRnkYu9FaGVVlpmxqn8K4a/IVFn8LC1Bz/dqHwQCtDTWEg5+cl9gHHjxmHIkCFo3749OnbsiPfffx+XLl3C0KFDAQCDBw9GQkICpkyZAgB46623MGnSJMyfPx/JycmWsTjVq1dH9erV5Q7XUKS8PhaX2m6mLS4140pJGcKD/SU8mrYIgiDpDDNXmEzXVoTWmxN5l5EcFaJ2GKKoMeajXMW31UOfZiJDrmUWvJyrC4PqNbl0ZXVzI5P9NnzAgAGYNm0aJk2ahNatW2P79u1YunSpZUDxyZMncebM9QXhPvroIxQXF+Oee+5BrVq1LD/Tpk2TO1RFuDrGRksuXL1eV+NKcRlO51/BTdPWoNWry3DuonEHoapJEIDb/6deKXp7hszJxKr91l3MBVe0U3cF0Ed3wur915ZZOHexiEmNjMwi5mBUvDJXfu+odWPjiorXZ8C718KSvcUGAEaOHImRI0fa/N2aNWus/n/8+HH5A9KI4lIzBAgI9PNVOxRRtp7MR7dG0QCAru+stppRk3HkPPq1ilcrNCs6uAbp3tqDZ7H24Fkcn9rX8tinf15f+0hPF1U1Q124OQtv3dMSk37crV4QCtPKrCi5Hc69iNd+2YtRPRuiXZ0ash/v/RXWtZs+WHkIY9IbyX5cLeLACZWYzQI6vLEC7V5bYXNKtyAINheNGzl/q+hjnLtYLNt0XiNPE65sTaXFC/VC6QSvqML7+OM/5VngUQ5qp2BbTuRhyS59rSLvCS0MHrZlW5b9gfTuJOqPzN2EtQfPov9HG1x+rjv+OGh9naqc6HgTJjYqKbhSgoIrJbhYVIoHPtlY5YNzuuCqzdoJv+w8I3q67cWiUnR4Y4UU4eqK1BfOoXM3wazDqfOV8xp7r8uGI+fwxJdbnBZcc8X6w+p3q+SL7RpTuXWp/0fiipUahRqvtpgWm4prjEnRqvT3P5fdfm5JmRmHcy+4lFAdqrTYqDdjYqOSim/XzGN5+OtontXv/7lUbPe5an3HOmoAMHr3zz+X7f89tErsuIAHPt6IpXuyFbuzVErb15bj5ulrcaZA/5WUyTOeXjLdGWPjyTEf+2Iz0qf/ge+2uLZSPV3DxEYllTPxi0WllX5v/7kdXlenFeYLBytd/7jdWFV8yQ0abNQ6lHsRaVNWqR0GqUyNMV8VD3n7/9a59NzV/3Z/f7b+uIQReQ8mNiqp3OriU+mGoPCq/Wb0KyXOK8rKYYWDVbOXOyitTuoweCOaZDSYjxmbBy+4vbITzqjdk7zz7wLRLYfZFepEXTJYlW+lMLGRibMxGZX7cCu2dF4tKcOgT7RZQEqPNVW8lVoJsBY5Kkegowlcqliy64zzjVzgyfiV33a7F4unLTZS3CTkXxY35mvGquuDfk+cd3+cDqCv2YlSYmIjk43H8hz+PrtS9dZ9Z65XP5VyEKfUtFhTpTI1C65pyYdrDlv935tfl/1n7FcX9ubXRYyn5jmeialklekrxe4l62q32ACAj8hxOpVbpdw9ZwBeWxuJiY1MnC1A+EGlqXgV1yl5f8VBWWIi/fHkS7fie8rbHT3HGSPuEHPHn+nkJk5KJW5mKK7Wsam8uRR5kdjxx6crdVl5sujs5SLvbLVlYqOSymt6fJ150vLviis0k+u8qcaOI0qMsckptL9ulJa84qDUvJe21osix2vjyT5P57uXrIs95vPf7bQ5vlGKLp3K4yhtyS286rB1kcRhYiOTRVtP2Xz8YtG1D03mcft3OVq/zmq93/Z0vj6+bOUmtunbE6lvrnS+kQZooStCj8S8bI4WRXV3n/ZkHHGva0XsNWvh5ixMX1a1xfxPkSvWHzl7Ed9t+dvmGMuLTlpPCq+WoOObK3G+UqkPvnVdx8RGJj/tsD39ecpv++0+Jyvv34FiGn8na71FyUg1dTzJIaVY30brSSzJS8zf/z0Fu87dXRrhrAtr2c3dcNztS3DPd9fi2W93YPH2qje2tirJV3TinGcDhek6JjYKczTK/YUfdgHQ/mDGuRuOqx2CQ/wuvkbtBG9/dqFL2z/51RZ16o0ofkT9kOO18eRv7G5is/e0a+9FT207mS/ZvsrY3OgyJjYaUt7cWVKm7TfymgO5aofgkNYTQ1dsctBl6Sq5r4+Vd9/7/T9dev5vu7Nx5Kzyg3yZCFv76q/rhTjVem3sTb4Qs0q3La6ehqN7gh+3n8KwuZusxuIIgoA3ft3rPA43XlAxN5L7ziibuGkdExuN8WQEvFK88Q6iQOy6QxL7wc5YLXescKOIYuWK2I7kSjKQWPlmJiMlwlJ4e+l+5BRexZtL9uFknvRTucW82va+/8U819Yae66u9eZo69ELtmPl/lzMXHW9nMKe04X4+M9jTvf7j8haNhX9ecj5Irwb7Iw98tZ3NhMbGXzuQVfNe8u1P9W78Kr2ky+pvfP7AVWO627TO1B1FsbJPNf78D/+Q/wq3YtdXFZj/eGqAzL9fQ00QEqnBOFa7ZrZfxzFXR9Ku37Y1ZIyj4rOiWmZ+MXG+EY57sX+74+j+GHbtbWcKt8AXLJzQ+DOu/uSB1O2h3+x2e3n6hkTGxlMdjC11BkxWT85ZpLhrv+33dmqLKboyQV5b6UvAXdyJDmT2E/XVX2v+/mqcEny1ttaO8yCYCkSWrkshae++usE8hws8FuuuMzNPifY/nPK1SpXviJ45SrfZRL24R0TUQCRg/ytMbGR2OJt0nUdkLao0ZrmSYtNVp7+CvSp0WLDrwRrlzyodOuM2C7dLx0suOsOVz9GriYKQz/bZPV/qZehINcwsZHYmIXb1Q7B68l1d6bG0CK9f+m6OmbMk2Z3ko7aM+r+/kffU5+VngCybA8XIa6IiQ2RSKq09kp4THcSPjFdEY6+hOwVqgRsD0KXc92hUjvdG2zGr8rTvMbTgeRS/0naJEVIu8MKftkpfmyZlLMcy529UOSw4GvyhF9dLr2gd0xsyDBsDUbVO0+6oqRwMMd5effzF+2Pmag8YLmkzIxdfxfAbBaw9mDV2R4+YurOu2nFPt7ViuVpccdTdpY+UKIhaNepqgVEXT2fytV/HRk5f5vobR/7covd37nzkn/51wl0eGOF0+1cLb2gd0xsyDAGfbIRJ84rt9KwEtRObIAKFbEdOJxrOwGa/cdRqxaR8d/uQL//rcMHKw/Z3H71fvlqJOUU2q4+q4GXWHM8TUCkfkmLS82ib1xsLcrpaqvc1n8HT2vNhiPn8Pove3H138HKLy/erXJE2sTEhgzluAdTSbVIyi/dfDdqaAgQcP/HfznZBhgxz/5d68p915OV8inhH609YnPb33bLN+hy8k97UFzq/mwbb1Lq4YCywzmeFVpcsCnL6v+v/7oXgz7Z6Pb+XL1BkHJWk5Qe+HgjPll3DHPWc/asI35qB+CN9FCET8/kmO4NqFPIbZkbRfXscWcpDEEA/v7H8eyqn3ecRt5l+033j/5bS6NXs1jLY/b+Qn4+8t5rnb9UhFrhwVaPafMrTL8WZJ7EhEW7JN3nFx7Okgr2d+2rTmsJ8NaT/yA08Po5/L4nB091b6BiRNrGxEYFuXaaxPUuK+8yEiOrqR0G2WBv4Kwzl0VM/c08lieqqf/3CjM37G1d4kH9Endp9OZcl7Zn5UuW1GzPykeTWqH4fovnJTQaxlZ3aXt7lXzltPWk/e6vuysVStxho7oyXcfEhiRz0uCJjZzjP+QkCAJe/dn5Oja2iCkOZmuwprtyL8ib9NurCKt15y8W4cLVUiRHhagdikPOVrAuKjUjJS4U+7OdD0q/c+Z6NK0VVqXQpBJsJdiCIHg8qNresXb+XeD2Z1SslftyUKdmCBrEuJbk6RHH2JBkClVaT6mydTLNjnJnnRe1XSoqRY931+LLv6QteOYptcqkpE//o8pj8RFBKkTimnavr0D3aWuQXSDFelzqcXWsi7tJjaddSbZKEeyRaYXwST/uQf+PNsi+Bt+wzzcjffpaWY+hFUxsVPDN5iznG+mQ2kW9qKrDuRdFtbpoRZC//Jekyt1mtWtot5Wx8pfdPp3XI1Gq2+/Vn62XtXH1uLYG2l8tkad45NeZJ2XZrz33ztqA/606ZOj6TUxsVPDhGtszQvSPmY3eXbiqbqtUVPVA2Y+x+kDlLkXtXuC/2Zxl9QXko/O7B6XCn7fxJA6I6O6yx50FY/Vi0/F/MG3ZQavZikbDxIYk4ytjcTWxzsk8RkNvjrtY12e6yqvLD+yQKPsxzlZ6j2j5xnXZnmxkV6jie1mnY4TU8EXGcUn3p+G3iVtOq7Cor1KY2EhIrqZKvdBAXoNnvt2hdgiaMnrBdpe2P6lyHSA5Kw+Xq7yOj5a/sFYfOIuikuvjRWytiK4ncgy+tce6F8/zv7IGLm8kEhMbCf28Q/yaIUYkZl0hW577bgfu/nC921OSSTorFZr5VWRncKdZgZVGK7dKabnF5oHUJGyuUAV3s0Yr4h4SsfQGoPS6XNIeS6q9aWVsi1Yme8iBiY2ElF7RVWu+3/q3W8/7ZvPf2HoyH5uOa/OiXdFpO2vgkDSUyG3zXFgHSG1Bfr5qhyCK2OUOlPxON2v0PkmB3F2UacvU7XaWExMbkoynzcx6GBe5ZJd8Jf8JKFPh20iNitJimUzWXSBarUGiZBeTWGcvXh9LpZFGEgDaabExMiY2EtLyBVIJf9hYrdkVWhh87Iyna+iQY2qs0aOn75mQAG224IjNa5R8qVdJ3K0q1fvkIgeAy46JDUnKk5opepjKqvZ0aKNTY5iVlvOaRZW6d7X6pegNsxGlamn5YOUhbDv5jybGFD41bwsKDXhNY2JDknK11aZiTZEjuRc18WF3ZOZqo9Yg0gZXK9Ma3T+XS6xaQ46c1Waxxf+uOixqO7X+vFIcNvNYngR7AT5bfxx3fbgBby3dL8n+PLFkVzamLFE/DqkxsZGQXKtK60lENX+Xth/62SbLv1/6cbfbM6vIGOQuK2+L1sc8fKWx5TC81bsS13j6+E9tTN3/OvMkrohY7FZPmNhIyNvH2ABAQkSw6G3PVCoQ5en6LkrZdFyaOzeq6tN1x5A84VfdLlYph60n89UOQTK8RmqTGguNyomre0voaok+vpjl9P3Wv/Hdlr9xuuAq1h8+h4yJPVBcaobJZMLm43mWgnExoYE2V3LWwTAb3DsrAw93SsYrtzdTOxTDajb5dxyf2hcf/3EUEdX8cW97aSsSX7hagtAg11oXyTFbK2JX9tn64/IHYoPGG+VU9/qve/HDU53VDkMyTGw8VFJmhp+PCSaTCe/8bry+Sld9nWm9wGfHN1ba3M5WUgPopztv7obj6NUsDmn1a6odimHd/eF6S2tFcIAvbmsZL9m+31t+CJP6NQWgv/pTO//OVzsEm/afcX9tJnKdlN1H207m480l+/BCnyYoKTNje1Y+WidGwN9Xn506JkHrHcwuKiwsRHh4OAoKChAWFib5/jcfz8PU3/ZXqQAaVT0Q5y4af2aA3G5sGIU/D4kr9qUFa8d3R52aIaK2TZ7wq8zRGNuWl9JRs8IimWVmAf1mrEPHupGYu+G4W/usGRKA8zoq2Eeu6ZBcA98+0QkHsi+g1/t/eLy/9RN6oPPUVRJEds0f429C13dWu/XcxMhgZOXJVzC0bVIEFj3VGfmXixEa5I/iUjPeXXYA97SvjZQ46b9bAem+vxVJbGbOnIl33nkH2dnZaNWqFWbMmIGOHTva3f7bb7/Fyy+/jOPHj6Nhw4Z466230KdPH1HHkjOx+XZzFsZ/t1PSfZL+fTa0A1rXjkBENX+YTCas3p+LX3edwfO9UyzrhyVGVmNiI4G/JvbEuYtFaFIrDPVfWKJ2OKQDjWNDUVJmxlEPSlF4u9aJEdielW/5f8va4fhpZBfJj6ObxGbhwoUYPHgwZs2ahdTUVLz//vv49ttvceDAAcTExFTZfsOGDejatSumTJmC2267DfPnz8dbb72FrVu3onnz5k6PJ1dic7WkDCkvL5Vsf0RERHo1vldjjLipgaT71E1ik5qaig4dOuB///sfAMBsNiMxMRFPP/00JkyYUGX7AQMG4NKlS/jll18sj91www1o3bo1Zs2aVWX7oqIiFBVd7wIqLCxEYmKi5InNj9tPubxSMhERkVEdn9pX0v1JldjIOjKouLgYW7ZsQXp6+vUD+vggPT0dGRkZNp+TkZFhtT0A9OrVy+72U6ZMQXh4uOUnMVHa2RPl6kdrc40WIiIiuk7WxObcuXMoKytDbGys1eOxsbHIzs62+Zzs7GyXtp84cSIKCgosP1lZWTa381SzeHkGSxEREZF0dD/dOzAwEIGBgc439JAWV68lIiJSw2Nd66kdgl2yJjZRUVHw9fVFTk6O1eM5OTmIi4uz+Zy4uDiXtlfS/z3UDo9/uUXtMEhjDr9xK7L+uYLkmtUcJsCcFeWZyv35nr6eU+5ugYmLdnm0D9K+JrXCsM9glXWBa7MxKy5JI7eEiGCcyr82vTwyJAAv9Gmi2LFdJWtXVEBAANq1a4eVK68XaTObzVi5ciXS0tJsPictLc1qewBYvny53e2V1KtZHOY83L7K460TI5D5Qk90TI5UISpSS4CvD45P7Qs/Xx/UjQphq57CVj/bHZP7NcXwG+u6/NwHb0jC/R2TsPXlm2WITF5H3hRX+kJpu1/tpXYIVWx+KR2/jb4Rr/xbjNFTo3pIOwvo2BT3/5Y3Na46q1gK43s1xtIxN+LYlD74/slOAICwID/8/HQXzHs0FZkv9sTGF3rKcmypyN4VNW7cOAwZMgTt27dHx44d8f777+PSpUsYOnQoAGDw4MFISEjAlClTAACjR49Gt27d8O6776Jv375YsGABNm/ejNmzZ8sdqig9UmJxfGpfnLtYhE3H8rDj7wI816sxfHxM0EnRXMX0bhaHpXuujY0K8vcRteTEinFdkT7d80JaShjYUZ6B6lSVrS+UulEhqBt1LalxdUHBO1snALh25zkoNQnzNp70PEiF+Ppo80JTPVB7IxvCg6VdNuPmpnGiVzIXQ0s3Q091r4/xvRpbxdSuTg38NvpGxEcEIzzYH50bRKkYoXiyvxMHDBiAs2fPYtKkScjOzkbr1q2xdOlSywDhkydPwsfnesNRp06dMH/+fLz00kt44YUX0LBhQyxevFhUDRslRVUPxK0tauHWFrUsj2nnLaqe6fe1wrhvdgAAZj3UDleKy7Bsbza6NIjCyn25eO57ZwUO9fMqNq3FAeVyKV8n6o0l+/Bwp2SMvbmRpPtvX6F1NTpU/jF6pI7yJQGkSiB89LnCgCiPda1n83VqosPrnCIp9siRIzFy5Eibv1uzZk2Vx+69917ce++9MkclvQdSk7DxmHev/Hx329q4oV5N1KgWAODaGj93/Ht3fG/72kioEYxBn2wEANSLDsHRs/qrBjomvSF2nyrE3W1rqx2KIQX6Xfv2GN61Hh7pUlf2FgpXVqRXw20ta+GXnWfUDoMA0cuniBEapJ0WrswXeyLi32u2EWjnlTWAFgnhaoegCfF2vihMJhM6N4jCklE34sM1h/HMLY0RGRKAVq8uA6CtD7ojT3VvgAA/A9+6qayo9HqXpVa7XZSkh4UIpe7y0Sopu9v8NPLejgwJQExokNphSEr7nxgd0VJ/qZY1jQ/D/x5oi7pRIQgP9seSUTeiR0oMvnk8DXp4CfWU1Ghx3AO5pk7NapZ/P9e7sYqR2PfsLdJ2FXqDxMhqzjdSwDiJu3m1QD9XaDKspvFhmPNwB1305Y7q2VDtEFzSI0WemRNGouUbkujQQHRrFG35/xNd66sYjX03N1W/HIfefPRgO7VDAAAMSk1SOwTJMbEhckF6E30lChr+ziYRvnsizSrx8tFI90VlYt9nag241+LnQAtju+oZtEwFExsJGe/tQZWZdPZXlneJW3l0ql9T1v0naaQLQAw9jK9xxY8jO6sdgiaEBPiqHQIAQIeXB1GM9alRmVHfJErSetqgt5sbAddmn+lJr2bydmvoaaCrXt5vYsNUMlHT8tiR2YOrFnpVQ6nZeW0xPWJiQ2RggiAgRmd1Wgan1ZF1/5WTBS3nDrppIdRgmC1qa3eWqlYK3T3aRbvrPXmCiY2ENPjZ9jpPdZd3cKVe7qDL1Y0KwTv3tFI7DFEeSE3C8al9Ze/zv71VvKz7J23Q2UdVFfd3NN7AYYCJDRlMZIi8RaZ0cwf9r6e6N0BiZDX0ahardihOmc3KdOb2bVnL+UYaIuhgoJQWPxcVrwU6eAkVt+3lm3VVusIVxjwrUsUN9Yy/CKjeSqoHa2SQopZEVddP15xeWggDNDjIuWXtCEn3585iq2KotXhyDZlvAtWkvXejjunlIiSXN+5qoXYIsndjaPHOVAw93LEu2JSlyHH0NNPIBG3X2SkXXk2eAdlaKjA58dYmkuyn8oz9uY90wLdPpIl6bmyYfpJyNennE06aV1OCOwBPL+JyfwVUrAKrJ1yaQL/00BWldVK8hlLVEJpxf1ur/1cL8EMHhVttHu0iT+uTVjCxIclIsYhaoMb7fIP89dm1I3bBzqd7NKjy2Fv91W+JI2VpqaXEaOLC3V+XSYphaLteuQUv3dbU8x1pmLa/RXTGkzcsXWNvAU3yjNiKyaNtLBlRN6q61OGo5ueRXdQOwTUqNbTpvYXvvvbiEnm9kaLlKTRIP3Wc3MXERkKBfuLu5m19eZA0tFy7Qk0mk0lUYTpbXYEdkms4fE7j2FC341LSbS1r8f0hkqeJjVQtPt0bRzvfyIZa4dY3SJ6mA6FB2mjB0uIgbS3iq6QCvd8NaVlLfnF5xNZb09m4pw51HSc+WvHsLbZXxtbB2FzFeXqN8vf1/EV9pV9TvHl3C7eWH3hE4jEkWkkoatfQ5xg/pWnjr0UkgdfuaCa61YxsU2oGzuPdlK94qpW7bleoNQvPTwM3Xw93rouwIH+XW1tCA/10tWyGK+5skwAAaBR7rXu4Twuuqm6L/j7pRHY8lJasdghea/ukm9H6P8tFbz+hdwr+b+1RGSMyBrVak4I9HCTvcdePB11ZL91WdVq2p0NTtDIvbWCHRDSIqY4mta51/4YHi5uw0a5ODSTXDMH9HRPlDE8z2GKjAvXvhYikE109CBHVAtAiQXw3oB5qs2iFGi1N0we0RoKKA/mrBV5PrFxNSsJUHhz7nzuaybZvHx8TOtaNtAwAfvYWcQt9pjeJxbv3tUJ7lYoBKo2JDZGXkKseyvCu18YzCJq5r3WN1nOsBjGhGNWjgaxfmJWlxIVi/YQebj9fyq4sLRSlc+WzMzgtGX+Mv0nS4/dvWxtzHq66InjN6oGiVjFvnRghaTxax8RGYb+O0tl0UyInqgW416LQPCFM4kiMpzw9GHdLYwzWQVfrewNaITo0EDMfaOt8YweaxV9v/ft4cNUvdEdsDXxWOuVOclDIMzzY9c/Lu/e1Qo8U2+u9PdbV8Xi1V/o1RVr9mi4fU8+Y2Cis4geW9CUuzNh1ip7oJu/K6JXNe/QGRY9H4rm77MRdbWoj84WeaOVhC8Hb97S0/LthbCiiQ8W32tyUIq5mkyucJUautFA1iJG2PIKzoqEPdzZ2lWFbmNioQOtN32SbXrtaxFK6yd+oM1ekpNZYJE+me0sRc+WFSl/sI36dJjXWAnuyu7I3BeQYExsiAuD5oHZfkV9oRq0KS/Kp5uEq9XKvt8XlvLSFiY3E+otck4f0R68re3vClS6AAR2SZIxEPlr+u2o3Mu+i9cRl4q0paoegKUxsJCamed3dwZZEckqOCqnymCsVV0MCxd1VS10VlqS17eWb1Q5B0/7voXZWy4iMuEn9bqhbmrFQX0VMbCQ24qb6SIlzPDjsgVR93tmS8sTWqRDD2U1n01rKzFJKieNsKC2rESKu6JuSOjeI8uj5ntbkKbhSYvl3r2Zx+H1sV8v/XanfJJe6Nm5KvBkTG4nVrB6IpWO6OtzG2Sh20iY1xnF6OrukIntrJUmFRfekZ8SX1J1p/iEeLqrZS8YWDUEA4iUoZnhn63gJoiGAiQ2RpplgwuePdJRkX4mR8laSlXuApqfsRWfE5EEqcsyUe+FW8TOcpOLj4iwvV8ZKCrg2IP7RLnXx2dAOLkZ23dT+LZ1vRKIwsSESSY3vPwECujWKlmRfWh4kq4SaGuxicUbtRV1/eKqz9DvVwduwcrJb3UmLkZ+vD166rSluaux+DR05WvIHtPeOtaEqY2Ijkxn3t1E7BEXc3zEJSZHVMPXuFmqHQk44q8Oj7fYWz+mxqyzYw2nOnpKii8UIbLVGtk2KgK+PCV0aejb+R06uzGo0Ek7PkUm/VvF4+uttaochu7pR1TDlbmnXRdEqNWYeeHsrC1G5YH9fXCkpk/04jWKri9ruuyc6objMrOkxkzrM5SXBFhuDW/RUJ7VDMIynNDCtk0gPHH2hupuse1INWaw/n7sJPz8tbj0/Hx+TppMab8YWG4Nrm1RD7RAMw5WaLlLR8h1Xo9jqmNxPuRWnieSWGGl/8Uo96t5YmvF5esPEhkjDfCTMbG6o53iFX1uTmhwdftnYbh5GREbl5yP9TUDL2uHYcOS85PsVQ6/jz9rViVQ7BFWwK4pIwzrWle7C5E7Fa1dmcLerw9ZBukaObqOnezSUfJ9kTExsiDRMiXEF7hjfq2qxv9o1jNWMT/JwtxGSiTOJxcRGJY9yvRzSMTFrotniaWl7OehxGrg3CvDzzq+rx7vVUzsE3eEYG5XEhQepHQKR7BIignEq/woAYMOEHojUYZE8IjWLaqdK2B3tLbwzBSZyg9Frysh9RxwfEczpsTo084G2aoegOmfFLcX6zx2cRagEWa9keXl5GDRoEMLCwhAREYFhw4bh4sWLDrd/+umn0bhxYwQHByMpKQmjRo1CQUGBnGGSB7T4Zc+eBde81LcJnu+dIktryqAbrq1kn+ZkRhYAjEnn4FAt6tuylqT70+PHU6oWm/rR4or/SWHWg+0UO5bWyJrYDBo0CHv27MHy5cvxyy+/4I8//sBjjz1md/vTp0/j9OnTmDZtGnbv3o25c+di6dKlGDZsmJxhkos+GNha7RAc0vhajJrz6I318GR314oPik0eH+9aH988noY5DztfHDA0yL1xO57S4xctGVN6E/fXmuqYbN1l1bu58pXStUK2xGbfvn1YunQpPvnkE6SmpqJLly6YMWMGFixYgNOnT9t8TvPmzfH999+jX79+qF+/Pnr06IE33ngDP//8M0pLS+UKlVx0R+sEtUMghXja+uXrY0LHupGi1jxigkFG52zQva0bDLGt4l8/doPl3+lNYl0LzGBkS2wyMjIQERGB9u3bWx5LT0+Hj48PNm7cKHo/BQUFCAsLg5+f7XHORUVFKCwstPrRg5a1I9QOwS33ta+tdgikAXK0iuVfKZF+p0RuiAuTZ3JH84Rwh7/3ZIZexdIQj97o3bNuZUtssrOzERNj3azm5+eHyMhIZGdni9rHuXPn8NprrznsvpoyZQrCw8MtP4mJ+lim3V7htZS4UIUjES82LBBv39NK7TDIoOauP6bKcasHcnKokpx9ebdw8uWvBFe7ZsUQU5IquWaIRMfy7vZPlxObCRMmwGQyOfzZv3+/x4EVFhaib9++aNq0KV555RW7202cOBEFBQWWn6ysLI+PrZa+LWrhi0c6qh2GXXoZuyLXZzos2HhfgCEiuoiUotbbq1sj71xPR6v8fNX/Ug6plOwq9d5kOQRpuHylfuaZZ/Dwww873KZevXqIi4tDbm6u1eOlpaXIy8tDXJzjQU0XLlxA7969ERoaih9++AH+/vb7JQMDAxEYGCg6fi0b2aMBYmRqAiXPGbGQm7NzUjKZVevV9dFodWe9aO9iRWBBL3dIEvvI3VlKfHu6zOXEJjo6GtHRzu9w0tLSkJ+fjy1btqBdu2t/0FWrVsFsNiM1NdXu8woLC9GrVy8EBgbip59+QlCQ93zR6/F7M9Bfe6WQ7muXiIWb9dtypyRnXzKv3m677oaY92qr2s67FEKD/HDhaum/+9ThB4Ak1yw+TO0QqvIgF2tXpwa+Hn6D11ZOVoNsr3STJk3Qu3dvDB8+HJmZmVi/fj1GjhyJgQMHIj4+HgBw6tQppKSkIDMzE8C1pOaWW27BpUuX8Omnn6KwsBDZ2dnIzs5GWVmZXKGSSBW/d8b3aoyOyZG4r732xjT19GDKJFlrEON+3Y0BHZKcbrPpxXS390/65CyBDfTTRveolIkIkxplyfpqz5s3DykpKejZsyf69OmDLl26YPbs2Zbfl5SU4MCBA7h8+TIAYOvWrdi4cSN27dqFBg0aoFatWpYfPY+dsWfLS+5d1AN81f+QjLipAb55Io2VZMkjFd8/BZwVZSWimjp1fVylZMdSbJhyww6aV2g5GntzI5efX56/2ZsoIid/DYxTUpOsoyEjIyMxf/58u79PTk62agrv3r27V/W/1qxu/SFtFCNuRtTrdzXHc9/tdLrdqB4N3IqLlJdcsxqOn78s6T7jJViPTMlPoxZmw2jJ72O6qh2CLJxd47XylVyxZalfK9erL695tjtW78/FwI7OWy4rCvTzQVGp2fJ/Vz4XT3avj6y8y2idGOHSMY1G/Vt/AgD4+ZhED2KsJfILq3Li5KnIEGMM0jaqjwa1Rc0Ksyom2xkfI4X4cOlX6dZLC4VSYnUykUAriYjUKp6XO/fbdWqG4OHOdV1u1Z7Ur6nV/6NcuI4/3zsF/3ugrdePV2NiQ6L974E2aodgWFK0jKTWq4mMiT0t/68p49TR7o2lnyLdRsa7zOhQJuVa4cmXrqKz9FTKDbQw1EDv+ArqkNgPt9QfTCUXcNOSqOoBWDGum9phOGWC9SDFOBEte87eSvbeQva+nO5u6/5yGzVkTMSekqHgGrnHm4YbkDqY2KhsSFodANdmGZE2jU5v5NHsIDGkzEEXj+iMuUM7oHaNapLt8/U7m4varl6U+5VT5fy+8+6GeXnd2kLa1b8dvQ06N4iS9FgVaaUrlGmf54xXSlVnXrm9GR7pUhdJkeK/hLy8+5SckGPg4A315J/ZYZYxs+nWmCUA5NIzRbnX9sEb6uCHbadk2XeNatYthmxY0i+22KjMZDKhTs0QWQZ78YPpXeRIeJV8C4lZAdxddT1oSVLDzU3dX525Uay+u4wdvY39ZKwSXXnPFQf9ip2wIQletz3GxEaDbpJhYKY38ffSYlh6TWQn3dYU3RtH45528qwc/1jXerLsV07P905x+7lKJ3FSJ9RaaZGuGIcfB/TqCv9aGvTWPS0l2Y9WLhBKu1HGfnhvJWfS9EiXupg7tKNsFWfHprteXE1tnhSi02uCK4ac17Rxt+jvfUK2MbHRoEBfxxf4hiIL+XkrsXdX7evUwPzhqbitpbSDH92hVt0JsV+CFTfTU3n49wa0krWLSy6hQdoYyKoGR+9JOZO221rGy7dzUpR+rlBeRHDSySpmGi+J06l+FNLq11Q7DEmmwLqTGzl7r9nSv608XUZyuKOV+9PP9cpbW2qJyjGxIa9VfifftSHHNLlCT1+cYqt5a0lKnGctsiaFJ7d7c+uSHNy52SBrTGzIa43q2RAAkOjCVHtvkxDh/tIJMaz265aQQH1V4Yh0sbAiv7YdM/IYKaUwsdEgqd7YPRSsL6FHrl6QtU6OO/Xy8TQV35Nij9KzCd9/auAdP3k7JjYGJmXl2ftdXKGW9EH84OHrG4rtivL2hfjc9fJtTZ1vpBGtaru+Iruzd8UtzeLcC4boX/pq8yTVdGvEcShGxHt77UiJC8XPT3eBv45qpriThDl7z3WsK3+VazI2/XyCiEg1FVt2qgfqY7DoBwNbqx2Cy/SU1DzSuS7aJyubhDSKZakLck4/nyIvUj3IeUOar8KzPTok11D0eKRdT3azv1J2/3+rB2uhenZyTX0to6A3k/op32Wmx5pErtJTcqtVfAU1SMwb+xWFLyo1q3OGC10TFmw/8a4VHoz9r/XGnIc7KBgRadF/72+j+DEf76a/5TMqu62V+gVD9Y6JjU4l1HB/Gi65pkY1fXS9yMlqVpSTQcFB/r4cOEy4vVU8BqfVUfSYwf7Stejc9O+K8KEKT7+vuLSI0sc2Cr5qOsVaB8q5tblO7qCYS+ja0z0aqh2CaCEiu4Re6dcMX2SckDkaeQxOq4O48CC0r6N8N/zvY7rig5UHMaF3E8WPbQRMbEg3gvx9cLXErPhxlR7PpCg3pnvrRfME16ciqyHAzwd/jL9JV0ulJItcQVyPlZ/L+fn6oE8LdW5qGseF4sNB7VQ5thGwK0qnvLGlPy5MPxd+V2mp6+b2VsZYDFAvCakJxl7/rXzcS18FkgS2ZBPAFhsiw5AqNwq1MSuPXxjeLcDXB8Vl7rWWjr+lMW5uEosWbhTzI3IHW2yIvJjYLqbypRXIO4UFWw+gdyXR9fP1QfvkSKtBsURyYouNzrzdv6XaIahG6u6aGtWMtVaUr4zdWQ1jquOO1vGI4rR/ItI4JjY6Y7SFG10hSNwfYrTXUs5VoU0mEz4YqHxdEnKdHIuh6oWGhqqRiti+TKQBtpK2xiwfT5rBQVakH0xsiDSquwaWJSD90frUfA5EJ7kxsZGRnF0dztbBqVOzmmzH9iZiC5ERuat8fS05NYyp7tHz5ezmJJIaExsZpdaVb+XbetGeXaj0SI1aL487WPBRSomRniWiz/VuLFEkpLRJt8m/7punCys2j5duqjbHwZDcmNgQaUADW3fULnwBSLlGDikrSA9/OwmTEXZFkdyY2BABWPDYDWqH4BF3v3f4JeMdeqTEqB2CIhrGcMA9MbGRVd6lYtWO3bJ2hGrH1oP+ba3HNdxQr6bN7YxemM5sI7O5reW1JRVqcwV5t/j7aq+vpa7ItZ3E0mp3Up8WcWqHQBpg7Ku2yi4Wlap27Ee71JVsX0kejv9Qwvhero0xaVsnQtR2D91Qx41olBcf4V4SYqvBJq1+TawY1w3Lxnb1LCgvNfVu7y2iqTYtrblG6mFiY1CeDhasaGSPBpLtSy5yLZCp1GyQm5vGqvr8yhrEVEe1AM6EIduYP5CWMbGRkScf/se61pMuEA9ppWm9a8MoyfYVXmntG7V1TPZsBh3vVLXDG4Yt6X1s1mt3Nlc7BJIRExuNGpSa5HSb6FDvWrfnzjYJku3r1ua1JNsXGU+tcHlaALXuhnrylajQkhtkLMVB6mNiIyNP7mrqOCnABwDfPJ7m/gF0yMdBq4SPi+9kXx+2cJB9Rnt3sEHPWkMuV2JoTGx0zFHtkprVxVc9fucex4Md9bConhwtMBHVtNVdRcoa2CHRrecZcTZZbTcHpxOpgYmNQcW6MJj23vbuXcC1JNCA07LvbC1d1xu5rlmCe9V27ZUO0LOnezaUbF9aX8uK9E/Wb4O8vDwMGjQIYWFhiIiIwLBhw3Dx4kVRzxUEAbfeeitMJhMWL14sZ5i6VSNEuhYFPYzXcdQKpffBjLY0qRWmdgikQ560sNp7bvVKswNZJ4u0TNbEZtCgQdizZw+WL1+OX375BX/88Qcee+wxUc99//33OdPDiUA/6UqxN3Cw9pRW/gy1a2i/no67DJiXkUo8aRER+1xP6jvpoWub9E22xGbfvn1YunQpPvnkE6SmpqJLly6YMWMGFixYgNOnTzt87vbt2/Huu+9izpw5coWnCDlW99bFujJEXmr1s93VDkHU8gmv3t7Mo2P4aaQEhCc8XfGctEu2xCYjIwMRERFo37695bH09HT4+Phg48aNdp93+fJlPPDAA5g5cybi4pyXxy4qKkJhYaHVj1Y8c4tnKy6XX6A+G9oB43s1Rr9W8ehUX57++zgvnd7q7YzYhacmqZcucEen+s7rPamZmGhljI2jWZakb7KVFs3OzkZMjPWdg5+fHyIjI5GdnW33eWPHjkWnTp1wxx13iDrOlClT8Oqrr3oUq1xiwzwbt/LJ4PY4d7EIMWFBuKmxvIvYPdm9Pn7YdkrWY+gNv/RJbvMeTZV0fyEB9lt0PxjYGqMXbMeUu1vYXCOMyChcbrGZMGECTCaTw5/9+/e7FcxPP/2EVatW4f333xf9nIkTJ6KgoMDyk5WV5daxtcjHx4QYmZYKqKyagwuiHrhy89W9cbR8gZAm3N4qXvFj/jb6Rpe2f6xrPXRuIF01bQBoYKc+S8vaEbijdQIOvN4b93d0XvxTj1onRqgdAmmEyy02zzzzDB5++GGH29SrVw9xcXHIzc21ery0tBR5eXl2u5hWrVqFI0eOICIiwurx/v3748Ybb8SaNWuqPCcwMBCBgdqf0SOXTS+mo8MbKzzej6N1gTrqoEqnKwPNZ9zfxubjzeLDsOe0droyyX0fDGzt0fMFiCsh0L5ODdSPro7mCWGansXW4N/xJFJNONBiJ87Qzslqh0Aa4XJiEx0djeho53e8aWlpyM/Px5YtW9CuXTsA1xIXs9mM1FTbza8TJkzAo48+avVYixYt8N5776Ffv36uhuoVpJqmbW+gc+YLPRVrNXLk4U7Jku0rNMj2NPmBHZPw8uLdkh1HSp8N7YChn21SOwzdkGJG5e2t4vH9lr+x8Vie3W3qR1fHW04KXKotwUZxPXszk1qIrN3DjizSMtkGDzdp0gS9e/fG8OHDkZmZifXr12PkyJEYOHAg4uOvNROfOnUKKSkpyMzMBADExcWhefPmVj8AkJSUhLp168oVqtfp3cz5oOxyWkhqAKBJLflLoGvxLrSc3GOsqKogf18sdLJsycCO2i9uOX+4+HE8d7AoJBmArHVs5s2bh5SUFPTs2RN9+vRBly5dMHv2bMvvS0pKcODAAVy+fFnOMKiSO9soP/5ADwJ8jVe9mORVo5r0JR2UYK9BS88zhTjGhsrJNisKACIjIzF//ny7v09OTobgZHS+s99rmVZD7+VCi41WKPFa3t46Hs99v9Py/0axytW50O/XifE93aMBZqw6bPN3yRqY3k3XiFk4uCKtTDsn6fEW1QvZG3/w0aC2CkeiLZWLHwaouP6UuwswkvS0PCjYaPQ+O5O0gYkNWTSsNFVUygG75Jqp/bU9INXojLioqhghgQokFg4aSnxV6gpLiZN/DB8pxzs/vSRKYqQx1mbSQjVYZ9gori0NvLTcvqvdOUbRJilC7RBIQkxsZKTjcXiG4u6MIq2NkQoNlHVIHFUg5m//joNp3uHBtksKEDigjGTHxIbs6pBcQ+0QqIJu/1ZMtlWXxF2hQUyW3HVve/vjoBwlPUqLCNbYzC2N3TCQ8TCxIbta1o5Q5bjP3NxI0v0ZpeXszbtb4OXbmuL7JztJts/Gdkrwk2cC/bUzCDa8GluPbEmsYYyudqqKiY2X8VdxVV+xbK00bpTkxBNhQf4Y1qUuV2JXgF4bFRrqfGzQf+0sdyKHsRLfQJF2MLExIEdjMfRcgMuezx/p6PD3xjtjUpLWxlo58nLfpoocR45ZY+8NaIWbUpSrsB3CMWuGxcTGgOIlHIOhB9V5gSJCv1bxinU7yTF7ys+HX0ckDb6TyKKGQfvidXTDTRJ4qnt9tUMQhXWitKM712IzFCY2BpRaL9Kt59WsLs1K4SS/V/op0+WgN3WjQvBc7xS1w3DqlqaxHrWsGnFdMz8f9zuNX+rbxKNjd2sU7dHzSVuM9+nQELX65ifcqv0Lu5QSI72r6w0AaoRobAqvwThbo87TBRef7tHQo+frmb1XNr1prNv7bOxh5WADDj30akxsDKhagP0xJ0b8AMeEip8lZMQ7XVLe3W0TPHq+v5/0H8TGCi7a6gl7OaM/P5skEb6TvJwW65jYuu6lxHEhQjnc1rKW2iHokhbvD+5uW1vtEEQxy9CU7c4NS5D/9eeYNPkXJXdxOomXE3QytLaVh03/7uiqYL+7J+ML3BEdGoj37muNtPo1FT2uUSj53hBLL6Uc5EhsOiS7Pq6wVngwxqY3QnCADwK8dNFTo2JiQ1RBxsQeOJJ7CecvFaFPC+VaM0wKfyklRASjS8MoRY9pJLFh+i6SqGYKJMetlI+bNwaj0713rJORMU01gP46aYJWU/mihK2drOJbK/zaF/4drRMU7/NXutWGvJeqjTseZjaTbuOMQHKMiY0XSK0rvpl26ZgbcXfbBPz53E0yRqS8xSM6Y/iNdfE/BUu2a5lOei006f6OiQjS0FpQeuNpV1RSJNd4IseY2BjAnW3iJdtXSlwYpt/XGokGu3jUjQrBi32bIkaDXQjxbqz91DaJK6/LqW9L+5+pKXdrZ+Vud6m5TIRZH8P6SMeY2Cjo2VvkWXSNM4b06827WuCHEZ0BAH3/naHUPMH539NoiafWtKwdbvPx0T2lGZPhK2OTmVy7Dg2SZkimsxpBRJ5iYqOg6FB5KvuGBbt+wRnWpS4A4xbzq1iZV8u9Lg+kJlkGor5xVwu83b8lvngkVeWoyJ5bmrlfRK6iBjKuwp2g8bXiPE1rEmp4dn5ibhxI35jYGECgn+v9/S/f1hQ7X7kFPVKkuVBrzUNpyZZ/62VKc/VAP9zXIRGRIqsK//J0F0RVD8Bvo290+Vi1a7DFR6zqErVUlBvfq7Gss+Bk27dEDS2ettg0qRWG6fe1wmQ3lxVpx25cw+N0by8WFmTMRS8BwNfHhF9HdcHe04XokWLMBe6aJ4Rj80s3u/Scr4ffgIWbTuJlziwR7cYG0k6LD/TymikNJSgKWl6MsF2dGojT4Lg5UhcTGxnVrM71fKTWLD4Me04Xitw2HM3ibY+V8FZp9WvqpgVLKyrWSBHb2NDEwdpFD6QmeRqSrtWPro4Fj92A6NBA9Hx3rUf7alk7wuXnhATya8/ovPvWQWbudBHJoV60PtaQEaOhjGMTiJwRO2g7JiwIq5/tXuXxFeO6OVzLzVvcUK8m6it8XXrzrhZIrRuJJ7rXV/S4pDwmNgbhaGDyve1ZwI/IE5teTMe652+yFHoUo25UCI6+2cfy//QmsdINGvZwGE2n+lW715x12b50WxMA1yce6M0DqUlY+Hiaobvg6RreOhhESlwozl4osvk7OaeWEhlR5S4nd2c0ulvq3ykbXWKurPtma2ZRGyfrsQ3okISbGsdIOrvzk8Ht8egXm/H7mK6S7ZOIiQ0RkQG4O4j2tTua4di5yxjetZ7TbaUucJneNBbHp/aVdJ9E7IoyiBQHgxWJyPjcneadVr8mJvVrymUiyDDYYmMQY9Ibwc/XB7c2j1M7FFnpfVVl8l71okPUDoHIK7DFxiBCAv3wfO8Ut6Y/at0ng9tb/u3tU2XJMS2OJvv2iTQ83ClZsuUYxLij9bW1rm5uaswCnESOsMWGNC+9wsW5Tk3e9ZL0Xr+zOV5avFuWfXdIjkSH5EhZ9m3PBwPb4I27WqA6a7aQF2KLjYLUWvut4iyGJ7s1UCcIIg178IY6aocgOUdJTcWWLdbVIaPhO9oLxFdYFK9JLQ4yJvJ2Pj4mvNW/BS5cLbW6PhAZARMbIqJKEiON/2U/oAPHq5ExsSvKy/j5anF4JZG26K17pqbIFeGJvIG+Pr06p2YB4KGdk7HndCG6NoxWLwgiktzqZ7sjRsJqwER6x8RGZiEBvrhUXKZ2GJjcr5naIUhmSFodfJ5xQu0wiDShbhRnChJVxK4o0p1nezW2/Pvedlzgk4iIrmNiQ5pTK1x8deHqQWx0JCKi62RLbPLy8jBo0CCEhYUhIiICw4YNw8WLF50+LyMjAz169EBISAjCwsLQtWtXXLlyRa4wSYO6NIhy+PuQCgM7Y0K5xAJJg106RMYg2+3uoEGDcObMGSxfvhwlJSUYOnQoHnvsMcyfP9/uczIyMtC7d29MnDgRM2bMgJ+fH3bs2AEfHzYseRNni/n5+Fz/vZ8PZ3mRNFaM64Zfdp5Ge4WrBBORtGRJbPbt24elS5di06ZNaN/+2jo/M2bMQJ8+fTBt2jTEx8fbfN7YsWMxatQoTJgwwfJY48aNbW5LRCQlXx8T7midoHYYROQhWZpCMjIyEBERYUlqACA9PR0+Pj7YuHGjzefk5uZi48aNiImJQadOnRAbG4tu3bph3bp1Do9VVFSEwsJCqx8iIiLyTrIkNtnZ2YiJibF6zM/PD5GRkcjOzrb5nKNHjwIAXnnlFQwfPhxLly5F27Zt0bNnTxw6dMjusaZMmYLw8HDLT2JionQnIgFn3SrkWEiAr9ohkA6E/rsu0g31a6ocCRGpzaXEZsKECTCZTA5/9u/f71YgZrMZAPD4449j6NChaNOmDd577z00btwYc+bMsfu8iRMnoqCgwPKTlZXl1vHl8kjnZLVD0LV372utdgikA0tG34iX+jbBi32aqB0KEanMpTE2zzzzDB5++GGH29SrVw9xcXHIzc21ery0tBR5eXmIi4uz+bxatWoBAJo2bWr1eJMmTXDy5Em7xwsMDERgoHarbnZpGI3/rjqsdhi61ToxQu0QSAcSI6vh0RvrqR0GEWmAS4lNdHQ0oqOdl+RPS0tDfn4+tmzZgnbt2gEAVq1aBbPZjNTUVJvPSU5ORnx8PA4cOGD1+MGDB3Hrrbe6EiYZwMYXeqLwSgni7NS0CQvyQ+HVUnRrrN8lIupHc3oxEZHUZJkV1aRJE/Tu3RvDhw/HrFmzUFJSgpEjR2LgwIGWGVGnTp1Cz5498cUXX6Bjx44wmUwYP348Jk+ejFatWqF169b4/PPPsX//fnz33XdyhEkaFhsWhNgw+zVqMib2xPmLxUiqWU3BqKT189Nd1A6BiMhwZKtjM2/ePIwcORI9e/aEj48P+vfvj//+97+W35eUlODAgQO4fPmy5bExY8bg6tWrGDt2LPLy8tCqVSssX74c9evXlytM0qmQQD+EBOq76rDeVpAmItID2a6skZGRDovxJScnQxCEKo9PmDDBqo4NERERkVgs6aug2jX0221CRESkB0xsZNYiIdzy706ssUFERCQrdvLLLDjAF/tf6w0/HxOL9REREcmMLTYKCPL3hZ8vX2pvN74X1z0jIpIbv22JFJIQEax2CEREhsfEhoiIiAyDiQ2RQnx9OMaKiEhuTGyIFHJLs1i1QyCDaBBTXe0QiDSLiQ2RQgL9fNUOgQzi/x5qhztbx+PXUVyWg6gyTvcmItKZxMhqeH9gG7XDINIkttgQKSi1biQAoB5X9iYikgVbbIgU9MmQ9li87RR6NY9TOxQiIkNiYkOkoNAgfzyUlqx2GEREhsWuKCIiIjIMJjZERERkGExsiIiIyDCY2BAREZFhMLEhIiIiw2BiQ0RERIbBxIaIiIgMg4kNERERGQYTGyIiIjIMJjZERERkGExsiIiIyDCY2BAREZFhMLEhIiIiwzDc6t6CIAAACgsLVY6EiIiIxCr/3i7/HneX4RKbCxcuAAASExNVjoSIiIhcdeHCBYSHh7v9fJPgaWqkMWazGadPn0ZoaChMJpOk+y4sLERiYiKysrIQFhYm6b61wujnaPTzA3iORmH0czT6+QE8R1cJgoALFy4gPj4ePj7uj5QxXIuNj48PateuLesxwsLCDPsmLWf0czT6+QE8R6Mw+jka/fwAnqMrPGmpKcfBw0RERGQYTGyIiIjIMJjYuCAwMBCTJ09GYGCg2qHIxujnaPTzA3iORmH0czT6+QE8R7UYbvAwEREReS+22BAREZFhMLEhIiIiw2BiQ0RERIbBxIaIiIgMg4kNERERGQYTG5FmzpyJ5ORkBAUFITU1FZmZmWqHBACYMmUKOnTogNDQUMTExODOO+/EgQMHrLa5evUqRowYgZo1a6J69ero378/cnJyrLY5efIk+vbti2rVqiEmJgbjx49HaWmp1TZr1qxB27ZtERgYiAYNGmDu3LlV4pH7dZo6dSpMJhPGjBljqPM7deoUHnzwQdSsWRPBwcFo0aIFNm/ebPm9IAiYNGkSatWqheDgYKSnp+PQoUNW+8jLy8OgQYMQFhaGiIgIDBs2DBcvXrTaZufOnbjxxhsRFBSExMREvP3221Vi+fbbb5GSkoKgoCC0aNECS5Ys8fj8ysrK8PLLL6Nu3boIDg5G/fr18dprr1ktdqe3c/zjjz/Qr18/xMfHw2QyYfHixVa/19L5iInF1XMsKSnB888/jxYtWiAkJATx8fEYPHgwTp8+rZtzdPY3rOiJJ56AyWTC+++/r5vzE3uO+/btw+23347w8HCEhISgQ4cOOHnypOX3urvGCuTUggULhICAAGHOnDnCnj17hOHDhwsRERFCTk6O2qEJvXr1Ej777DNh9+7dwvbt24U+ffoISUlJwsWLFy3bPPHEE0JiYqKwcuVKYfPmzcINN9wgdOrUyfL70tJSoXnz5kJ6erqwbds2YcmSJUJUVJQwceJEyzZHjx4VqlWrJowbN07Yu3evMGPGDMHX11dYunSpZRu5X6fMzEwhOTlZaNmypTB69GjDnF9eXp5Qp04d4eGHHxY2btwoHD16VPj999+Fw4cPW7aZOnWqEB4eLixevFjYsWOHcPvttwt169YVrly5Ytmmd+/eQqtWrYS//vpL+PPPP4UGDRoI999/v+X3BQUFQmxsrDBo0CBh9+7dwtdffy0EBwcL//d//2fZZv369YKvr6/w9ttvC3v37hVeeuklwd/fX9i1a5dH5/jGG28INWvWFH755Rfh2LFjwrfffitUr15d+OCDD3R7jkuWLBFefPFFYdGiRQIA4YcffrD6vZbOR0wsrp5jfn6+kJ6eLixcuFDYv3+/kJGRIXTs2FFo166d1T60fI7O/oblFi1aJLRq1UqIj48X3nvvPd2cn5hzPHz4sBAZGSmMHz9e2Lp1q3D48GHhxx9/tLqu6e0ay8RGhI4dOwojRoyw/L+srEyIj48XpkyZomJUtuXm5goAhLVr1wqCcO3i4+/vL3z77beWbfbt2ycAEDIyMgRBuPbG9/HxEbKzsy3bfPTRR0JYWJhQVFQkCIIgPPfcc0KzZs2sjjVgwAChV69elv/L+TpduHBBaNiwobB8+XKhW7dulsTGCOf3/PPPC126dLH7e7PZLMTFxQnvvPOO5bH8/HwhMDBQ+PrrrwVBEIS9e/cKAIRNmzZZtvntt98Ek8kknDp1ShAEQfjwww+FGjVqWM65/NiNGze2/P++++4T+vbta3X81NRU4fHHH/foHPv27Ss88sgjVo/dfffdwqBBgwxxjpW/MLR0PmJiceccbcnMzBQACCdOnNDdOdo7v7///ltISEgQdu/eLdSpU8cqsdHT+dk7xwEDBggPPvig3efo8RrLrigniouLsWXLFqSnp1se8/HxQXp6OjIyMlSMzLaCggIAQGRkJABgy5YtKCkpsYo/JSUFSUlJlvgzMjLQokULxMbGWrbp1asXCgsLsWfPHss2FfdRvk35PuR+nUaMGIG+fftWicEI5/fTTz+hffv2uPfeexETE4M2bdrg448/tvz+2LFjyM7Otjp2eHg4UlNTrc4xIiIC7du3t2yTnp4OHx8fbNy40bJN165dERAQYHWOBw4cwD///CPqdXBXp06dsHLlShw8eBAAsGPHDqxbtw633nqrYc6xIi2dj5hYpFJQUACTyYSIiAhDnKPZbMZDDz2E8ePHo1mzZlV+b4Tz+/XXX9GoUSP06tULMTExSE1Ntequ0uM1lomNE+fOnUNZWZnVHwwAYmNjkZ2drVJUtpnNZowZMwadO3dG8+bNAQDZ2dkICAiwXGjKVYw/Ozvb5vmV/87RNoWFhbhy5Yqsr9OCBQuwdetWTJkypcrvjHB+R48exUcffYSGDRvi999/x5NPPolRo0bh888/t4rR0bGzs7MRExNj9Xs/Pz9ERkZK8jp4eo4TJkzAwIEDkZKSAn9/f7Rp0wZjxozBoEGDDHOOFWnpfMTEIoWrV6/i+eefx/33329Z5Vnv5/jWW2/Bz88Po0aNsvl7vZ9fbm4uLl68iKlTp6J3795YtmwZ7rrrLtx9991Yu3at5dh6u8b6ubQ1adqIESOwe/durFu3Tu1QJJOVlYXRo0dj+fLlCAoKUjscWZjNZrRv3x5vvvkmAKBNmzbYvXs3Zs2ahSFDhqgcnTS++eYbzJs3D/Pnz0ezZs2wfft2jBkzBvHx8YY5R29WUlKC++67D4Ig4KOPPlI7HEls2bIFH3zwAbZu3QqTyaR2OLIwm80AgDvuuANjx44FALRu3RobNmzArFmz0K1bNzXDcxtbbJyIioqCr69vlRHgOTk5iIuLUymqqkaOHIlffvkFq1evRu3atS2Px8XFobi4GPn5+VbbV4w/Li7O5vmV/87RNmFhYQgODpbtddqyZQtyc3PRtm1b+Pn5wc/PD2vXrsV///tf+Pn5ITY2VtfnBwC1atVC06ZNrR5r0qSJZVZC+f4dHTsuLg65ublWvy8tLUVeXp4kr4On5zh+/HhLq02LFi3w0EMPYezYsZZWOCOcY0VaOh8xsXiiPKk5ceIEli9fbmmtKT+2Xs/xzz//RG5uLpKSkizXnhMnTuCZZ55BcnKy7s8PuPb95ufn5/T6o7drLBMbJwICAtCuXTusXLnS8pjZbMbKlSuRlpamYmTXCIKAkSNH4ocffsCqVatQt25dq9+3a9cO/v7+VvEfOHAAJ0+etMSflpaGXbt2WX1Ayy9Q5W/4tLQ0q32Ub1O+D7lep549e2LXrl3Yvn275ad9+/YYNGiQ5d96Pj8A6Ny5c5Up+gcPHkSdOnUAAHXr1kVcXJzVsQsLC7Fx40arc8zPz8eWLVss26xatQpmsxmpqamWbf744w+UlJRYnWPjxo1Ro0YNUa+Duy5fvgwfH+vLja+vr+WO0QjnWJGWzkdMLO4qT2oOHTqEFStWoGbNmla/1/M5PvTQQ9i5c6fVtSc+Ph7jx4/H77//rvvzA65d1zp06ODw+qPL7xCXhhp7qQULFgiBgYHC3Llzhb179wqPPfaYEBERYTUCXC1PPvmkEB4eLqxZs0Y4c+aM5efy5cuWbZ544gkhKSlJWLVqlbB582YhLS1NSEtLs/y+fKreLbfcImzfvl1YunSpEB0dbXOq3vjx44V9+/YJM2fOtDlVT4nXqeKsKCOcX2ZmpuDn5ye88cYbwqFDh4R58+YJ1apVE7766ivLNlOnThUiIiKEH3/8Udi5c6dwxx132Jw63KZNG2Hjxo3CunXrhIYNG1pNO83PzxdiY2OFhx56SNi9e7ewYMECoVq1alWmnfr5+QnTpk0T9u3bJ0yePFmS6d5DhgwREhISLNO9Fy1aJERFRQnPPfecbs/xwoULwrZt24Rt27YJAITp06cL27Zts8wI0tL5iInF1XMsLi4Wbr/9dqF27drC9u3bra4/FWcAafkcnf0NK6s8K0rr5yfmHBctWiT4+/sLs2fPFg4dOmSZhv3nn39a9qG3aywTG5FmzJghJCUlCQEBAULHjh2Fv/76S+2QBEG4Nn3P1s9nn31m2ebKlSvCU089JdSoUUOoVq2acNdddwlnzpyx2s/x48eFW2+9VQgODhaioqKEZ555RigpKbHaZvXq1ULr1q2FgIAAoV69elbHKKfE61Q5sTHC+f38889C8+bNhcDAQCElJUWYPXu21e/NZrPw8ssvC7GxsUJgYKDQs2dP4cCBA1bbnD9/Xrj//vuF6tWrC2FhYcLQoUOFCxcuWG2zY8cOoUuXLkJgYKCQkJAgTJ06tUos33zzjdCoUSMhICBAaNasmfDrr796fH6FhYXC6NGjhaSkJCEoKEioV6+e8OKLL1p9AertHFevXm3zszdkyBDNnY+YWFw9x2PHjtm9/qxevVoX5+jsb1iZrcRGy+cn9hw//fRToUGDBkJQUJDQqlUrYfHixVb70Ns11iQIFUp/EhEREekYx9gQERGRYTCxISIiIsNgYkNERESGwcSGiIiIDIOJDRERERkGExsiIiIyDCY2REREZBhMbIiIiMgwmNgQERGRYTCxISIiIsNgYkNERESG8f+P+J5GecbvJgAAAABJRU5ErkJggg==\n" + }, + "metadata": {} + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "plt.figure(1)\n", + "plt.title(\"Sig item\")\n", + "plt.plot(dataset[0][\"signal\"])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oL0MnsAyqet_" + }, + "source": [ + "It can be seen that the `DynamicItemDataset` object returns the two specified items from the JSON annotation.\n", + "\n", + "The `file_path` item, is taken directly, as it is, from the JSON annotation without further processing.\n", + "The other one is instead a new item derived from `file_path` item with the pipeline we have defined before." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vk3vEoWHroqE" + }, + "source": [ + "There is no constraints in what can be done in the pipelines: as said the user can also use their own functions:" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "metadata": { + "id": "EKIlpL3zrnac" + }, + "outputs": [], + "source": [ + "import soundfile as sf\n", + "@speechbrain.utils.data_pipeline.takes(\"file_path\")\n", + "@speechbrain.utils.data_pipeline.provides(\"sig_numpy\")\n", + "def audio_pipeline_numpy(file_path):\n", + " sig, _ = sf.read(file_path, dtype=\"float32\")\n", + " return sig" + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "metadata": { + "id": "Q89JrIT2t4IQ", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "eb237467-c893-459b-a56c-ffffd7f5be06" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'signal': tensor([ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029]),\n", + " 'file_path': 'LibriSpeech/dev-clean-2/3576/138058/3576-138058-0019.flac',\n", + " 'sig_numpy': array([ 0.00201416, 0.00061035, 0.00036621, ..., -0.00332642,\n", + " -0.00335693, -0.00292969], dtype=float32)}" + ] + }, + "metadata": {}, + "execution_count": 86 + } + ], + "source": [ + "speechbrain.dataio.dataset.add_dynamic_item([dataset], audio_pipeline_numpy)\n", + "speechbrain.dataio.dataset.set_output_keys(\n", + " [dataset], [\"signal\", \"file_path\", \"sig_numpy\"],\n", + " )\n", + "dataset[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PkBvDF3IuFG_" + }, + "source": [ + "The dataset object now also returns the signal as read with the soundfile library instead of only the one with read with the built-in speechbrain function which is based on torchaudio." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "j9W6Jseaua_m" + }, + "source": [ + "**Multiple outputs can be specified by one pipeline by using python generators syntax**.\n", + "\n", + "In the example below three outputs are specified, the last two depend directly from the first one (sig) and are transformed version of this latter: with a random gain factor `rand_gain_sig` and with a constant offset `offset_sig`." + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "metadata": { + "id": "uiA8ccBevMMC", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 452 + }, + "outputId": "7836dc9f-9478-4ad9-af3f-eb11a8912891" + }, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjYAAAGzCAYAAAA8I13DAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAvIBJREFUeJzsnXd401YXh3/ezg6QhLDD3hA2Ye8wCxQKLVA2ZVNKP2YphZYWStnQllU2tJRN2XvvvXfCTBgBEpIQx0PfHyGOh2xLtmTJzn2fhwdbuuPYkaVzzz1DQlEUBQKBQCAQCAQvQCq0AAQCgUAgEAhcQRQbAoFAIBAIXgNRbAgEAoFAIHgNRLEhEAgEAoHgNRDFhkAgEAgEgtdAFBsCgUAgEAheA1FsCAQCgUAgeA1EsSEQCAQCgeA1EMWGQCAQCASC10AUGwIhmxEREYGePXtyNt7EiRMhkUg4G08s7N69G5GRkVCr1ZBIJHj37h0AYNWqVShVqhQUCgWCg4MFlZFAIFhDFBsCwUu4du0aOnbsiEKFCkGtViNfvnxo2rQp5s2b53ZZfvnlF2zZssXt83JFQkICOnXqBB8fH/z+++9YtWoV/Pz8cPv2bfTs2RNFixbF4sWLsWjRIs7n/uOPP7B8+XLOxyUQsgsSUiuKQPB8Tp48iYYNG6JgwYLo0aMHwsPD8eTJE5w+fRoPHjzA/fv3jW01Gg2kUikUCgUnc+t0Ouh0OqjVauMxf39/dOzY0WMf0Lt370aLFi2wb98+NGnSxHh8wYIFGDhwIO7du4dixYrxMne5cuUQEhKCw4cP8zI+geDtyIUWgEAguM7PP/+MoKAgnDt3zmp75OXLl2bvVSoVp3PL5XLI5d51K8n8zmx9l2QLikAQL2QrikDwAh48eICyZcvSPnDDwsLM3tP52Fy9ehX169eHj48P8ufPj8mTJ2PZsmWQSCSIjY21O7elj41EIkFKSgpWrFgBiUQCiURiNt+zZ8/Qu3dv5M6dGyqVCmXLlsXSpUvNxjx8+DAkEgn+/fdfTJo0Cfny5UNAQAA6duyIxMREaDQaDB8+HGFhYfD390evXr2g0WgYfVfr169HlSpV4OPjg5CQEHTr1g3Pnj0znm/QoAF69OgBAKhWrZpR/oiICPzwww8AgNDQUEgkEkycOBEAcP78eURHRyMkJAQ+Pj4oXLgwevfubTavwWDA7NmzUbZsWajVauTOnRv9+/fH27dvjW0iIiJw48YNHDlyxPjdNWjQgNHnIhAIGXjXMotAyKYUKlQIp06dwvXr11GuXDlWfZ89e4aGDRtCIpFg7Nix8PPzw5IlS5y27KxatQp9+/ZF9erV8dVXXwEAihYtCgB48eIFatasCYlEgiFDhiA0NBS7du1Cnz59kJSUhOHDh5uNNWXKFPj4+GDMmDG4f/8+5s2bB4VCAalUirdv32LixIk4ffo0li9fjsKFC2PChAl2ZVu+fDl69eqFatWqYcqUKXjx4gXmzJmDEydO4NKlSwgODsZ3332HkiVLYtGiRfjxxx9RuHBhFC1aFO3atcPKlSuxefNm/Pnnn/D390eFChXw8uVLNGvWDKGhoRgzZgyCg4MRGxuLTZs2mc3dv39/4/zDhg1DTEwM5s+fj0uXLuHEiRNQKBSYPXs2hg4dCn9/f3z33XcAgNy5czv1dyAQsi0UgUDwePbu3UvJZDJKJpNRUVFR1KhRo6g9e/ZQ6enpVm0LFSpE9ejRw/h+6NChlEQioS5dumQ8lpCQQOXMmZMCQMXExNid+4cffqAsbyV+fn5mc2TSp08fKk+ePNTr16/Njn/++edUUFAQlZqaSlEURR06dIgCQJUrV87sM3zxxReURCKhWrRoYdY/KiqKKlSokF0509PTqbCwMKpcuXLUhw8fjMe3b99OAaAmTJhgPLZs2TIKAHXu3Dnaz/rq1Svjsc2bN9O2NeXYsWMUAGrNmjVmx3fv3m11vGzZslT9+vXtfhYCgWAbshVFIHgBTZs2xalTp/DJJ5/gypUrmDZtGqKjo5EvXz5s27bNbt/du3cjKioKkZGRxmM5c+ZE165dOZWRoihs3LgRbdq0AUVReP36tfFfdHQ0EhMTcfHiRbM+3bt3N3NyrlGjBiiKstrmqVGjBp48eQKdTmdz/vPnz+Ply5cYNGiQmaNzq1atUKpUKezYscOpz5W5/bd9+3ZotVraNuvXr0dQUBCaNm1q9rmrVKkCf39/HDp0yKm5CQSCNUSxIRC8hGrVqmHTpk14+/Ytzp49i7Fjx+L9+/fo2LEjbt68abPfo0ePaCN8uI76efXqFd69e4dFixYhNDTU7F+vXr0AWDs6FyxY0Ox9UFAQAKBAgQJWxw0GAxITE23O/+jRIwBAyZIlrc6VKlXKeJ4t9evXR4cOHTBp0iSEhISgbdu2WLZsmZnPz71795CYmIiwsDCrz56cnGz1uQkEgvMQHxsCwctQKpWoVq0aqlWrhhIlSqBXr15Yv3690fFVKAwGAwCgW7duRudcSypUqGD2XiaT0bazdZwSIHuFRCLBhg0bcPr0afz333/Ys2cPevfujRkzZuD06dPw9/eHwWBAWFgY1qxZQztGaGiom6UmELwXotgQCF5M1apVAQBxcXE22xQqVMgsz00mdMeYQpeJODQ0FAEBAdDr9Wa5YdxFoUKFAAB37txBo0aNzM7duXPHeN5ZatasiZo1a+Lnn3/G2rVr0bVrV/zzzz/o27cvihYtiv3796N27drw8fGxO443ZnEmENwJ2YoiELyAQ4cO0Vordu7cCYB++yWT6OhonDp1CpcvXzYee/PmjU3rAhP8/PyMJQgykclk6NChAzZu3Ijr169b9Xn16pXT8zGhatWqCAsLw4IFC8y2iXbt2oVbt26hVatWTo379u1bq+8+018pc55OnTpBr9fjp59+suqv0+nMviu6745AIDCHWGwIBC9g6NChSE1NRfv27VGqVCmkp6fj5MmTWLduHSIiIow+LHSMGjUKq1evRtOmTTF06FBjuHfBggXx5s0bpywIVapUwf79+zFz5kzkzZsXhQsXRo0aNTB16lQcOnQINWrUQL9+/VCmTBm8efMGFy9exP79+/HmzRtXvga7KBQK/Prrr+jVqxfq16+PL774whjuHRERgW+++capcVesWIE//vgD7du3R9GiRfH+/XssXrwYgYGBaNmyJYAMP5z+/ftjypQpuHz5Mpo1awaFQoF79+5h/fr1mDNnDjp27Agg47v7888/MXnyZBQrVgxhYWFWFiYCgWAHIUOyCAQCN+zatYvq3bs3VapUKcrf359SKpVUsWLFqKFDh1IvXrwwa2sZ7k1RFHXp0iWqbt26lEqlovLnz09NmTKFmjt3LgWAio+Ptzs3Xbj37du3qXr16lE+Pj4UALP5Xrx4QQ0ePJgqUKAApVAoqPDwcKpx48bUokWLjG0yw73Xr19vNi6bMGxbrFu3jqpUqRKlUqmonDlzUl27dqWePn3q9DwXL16kvvjiC6pgwYKUSqWiwsLCqNatW1Pnz5+3mnvRokVUlSpVKB8fHyogIIAqX748NWrUKOr58+fGNvHx8VSrVq2ogIAACgAJ/SYQWEJqRREIBFqGDx+OhQsXIjk52aazLoFAIIgN4mNDIBDw4cMHs/cJCQlYtWoV6tSpQ5QaAoHgURAfGwKBgKioKDRo0AClS5fGixcv8NdffyEpKQnff/+90KIRCAQCK4hiQyAQ0LJlS2zYsAGLFi2CRCJB5cqV8ddff6FevXpCi0YgEAisID42BAKBQCAQvAbiY0MgEAgEAsFrIIoNgUAgEAgEr8HrfGwMBgOeP3+OgIAAkpqcQCAQCAQPgaIovH//Hnnz5oVU6rzdxesUm+fPn1tV/iUQCAQCgeAZPHnyBPnz53e6v9cpNgEBAQAyvpjAwECBpSEQCAQCgcCEpKQkFChQwPgcdxavU2wyt58CAwOJYkMgEAgEgofhqhsJcR4mEAgEAoHgNRDFhkAgEAgEgtdAFBsCgUAgEAheg9f52BAIBAKBPyiKgk6ng16vF1oUggeiUCh4L6xLFBsCgUAgMCI9PR1xcXFITU0VWhSChyKRSJA/f374+/vzNgdRbAgEAoHgEIPBgJiYGMhkMuTNmxdKpZIkQSWwgqIovHr1Ck+fPkXx4sV5s9wQxYZAIBAIDklPT4fBYECBAgXg6+srtDgEDyU0NBSxsbHQarW8KTbEeZhAIBAIjHEl1T2B4A4rH7lCCQQCgUAgeA1EsSEQCAQCgeA1EMWGQCAQCNmSnj17ol27dkKLQeAY4jxMIBAIhGzJnDlzQFGU0GIQOIZYbAgEAuEjm+9txtm4s0KLQXATQUFBCA4OFloMAscQxYZAIBAAXH99HRNOTkCfvX2EFsVjoCgKqek6Qf6xsbRs2LAB5cuXh4+PD3LlyoUmTZogJSXFaivq/fv36Nq1K/z8/JAnTx7MmjULDRo0wPDhw7n/8gi8QbaiCAQCAcDz5OdCi+BxfNDqUWbCHkHmvvljNHyVjh9hcXFx+OKLLzBt2jS0b98e79+/x7Fjx2gVoxEjRuDEiRPYtm0bcufOjQkTJuDixYuIjIzk4RMQ+IIoNgQCgUDwWuLi4qDT6fDpp5+iUKFCAIDy5ctbtXv//j1WrFiBtWvXonHjxgCAZcuWIW/evG6Vl+A6RLEhEAgECyiKIuUCGOCjkOHmj9GCzc2EihUronHjxihfvjyio6PRrFkzdOzYETly5DBr9/DhQ2i1WlSvXt14LCgoCCVLluRUbgL/EMWGQCAQLHiY+BBFg4sKLYbokUgkjLaDhEQmk2Hfvn04efIk9u7di3nz5uG7777DmTNnhBaNwBPEeZjgkWgNWqFFIHgxekovtAgEDpFIJKhduzYmTZqES5cuQalUYvPmzWZtihQpAoVCgXPnzhmPJSYm4u7du+4Wl+AiRLEheBwLryxE5VWV8cuZX4QWheBFpOpShRaBwANnzpzBL7/8gvPnz+Px48fYtGkTXr16hdKlS5u1CwgIQI8ePTBy5EgcOnQIN27cQJ8+fSCVSsm2pIdBFBuCxzH/8nwAwN+3/xZYEoI3sfDKQqFFIPBAYGAgjh49ipYtW6JEiRIYP348ZsyYgRYtWli1nTlzJqKiotC6dWs0adIEtWvXRunSpaFWqwWQnOAs4t4cJRAIBDfxPCUr3Jtko/UeSpcujd27d9OeW758udn7gIAArFmzxvg+JSUFkyZNwldffcWniASOIYoNgUAgEAgALl26hNu3b6N69epITEzEjz/+CABo27atwJIR2EAUGwKBQLCA+FRkX6ZPn447d+5AqVSiSpUqOHbsGEJCQoQWi8ACotgQCASCBala4kjMJynaFGj0GuRQ5RCVElmpUiVcuHBBaDEILkKchwkeRXxKvNAiELwUCbIesKturhJQEu8nNjEWcclxSNGmCC0KwQshig3PvEh5gbkX55IHMkdo9SR/DYF/krXJQouQLUg3pAstAsELIVtRPDP4wGDceXsHh54cwua2mx13IBAIgkCBREK5HfKVE3iAWGx45s7bOwCA++/uCyyJd0AePgR3YLotRSAQPAui2BAIBAIAA2UQWgQCgcABRLEhEAjZnuPPjgstAoFA4AheFZujR4+iTZs2yJs3LyQSCbZs2WK3/eHDhyGRSKz+xccTx1sCPUnpSUKLQPACZl2YZfbecsvzg+4D/nvwH96mvXWnWAQPpGfPnmjXrp3b5st8br579473uSZOnIjIyEje53EVXhWblJQUVKxYEb///jurfnfu3EFcXJzxX1hYGE8SEjwdjU4jtAisINsdnsm0c9Mw7vg49NvbT2hROCMuOQ56A6li7unUqlULcXFxCAoK4n2u//3vfzhw4ADv87gKr1FRLVq0oC005oiwsDAEBwczaqvRaKDRZD3ckpLICt4Wz5KfIac6J3zkPkKLwhnJ2mSEIlRoMRgx9+JcLL62GLs77EY+/3xCi0MwwdJCY+k8vCd2D4CsYABP59DjQxh2aBga5G+AeY3nCS2O20lPT4dSqRRaDE5QKpUIDw93y1z+/v7w9/d3y1yuIEofm8jISOTJkwdNmzbFiRMn7LadMmUKgoKCjP8KFCjgJik9i1sJt9B8Y3PUWFNDaFE4Zf3d9UKLwJjF1xYDAJpvbC6wJARL3nx4I7QIrHj94TV2PtzpdF6nFTdXAAAOPz3smiAUBaSnsP4n0X6ARPsBSE91qj/SUzLmZkiDBg0wZMgQDB8+HCEhIYiOjsbMmTNRvnx5+Pn5oUCBAhg0aBCSk7PyFy1fvhzBwcHYs2cPSpcuDX9/fzRv3hxxcXHGNnq9HiNGjEBwcDBy5cqFUaNGsSqg+v79e3Tt2hV+fn7IkycPZs2ahQYNGmD48OHGNqtWrULVqlUREBCA8PBwdOnSBS9fvjSet9yKYiK3PQ4fPozq1avDz88PwcHBqF27Nh49egTAeitKp9Nh2LBhxs8/evRo9OjRw61bcXSIKo9Nnjx5sGDBAlStWhUajQZLlixBgwYNcObMGVSuXJm2z9ixYzFixAjj+6SkJKLc0NBlZxcA3hcuve/RPoyqNkpoMQgeTkJagt3z79Pfu0kSZny+/XO8SH2BmKQYDI4cLJwg2lTgl7ysu5XhYu5xzwGlH+PmK1aswMCBA42L5V27dmHu3LkoXLgwHj58iEGDBmHUqFH4448/jH1SU1Mxffp0rFq1ClKpFN26dcP//vc/YwXwGTNmYPny5Vi6dClKly6NGTNmYPPmzWjUqBEjmUaMGIETJ05g27ZtyJ07NyZMmICLFy+aKQ9arRY//fQTSpYsiZcvX2LEiBHo2bMndu7caXNcR3LbQqfToV27dujXrx/+/vtvpKen4+zZszbLXvz6669Ys2YNli1bhtKlS2POnDnYsmULGjZsyOjz84WoFJuSJUuiZMmSxve1atXCgwcPMGvWLKxaRZ/iXKVSQaVSuUtEVrDR3PlGZ9AJLQIvkIzOBHejN+ghk8oEleFF6gsAwOEnh4VVbDyI4sWLY9q0acb3ps+aiIgITJ48GQMGDDBTbLRaLRYsWICiRYsCAIYMGWKs+A0As2fPxtixY/Hpp58CABYsWIA9e/Ywkuf9+/dYsWIF1q5di8aNGwMAli1bhrx5zRXF3r17G18XKVIEc+fORbVq1ZCcnGxzW8iR3LZISkpCYmIiWrdubexbunRpm+3nzZuHsWPHon379gCA+fPn21W43IWoFBs6qlevjuPHSSgml6RqU+Gr8BVaDALBIzkVdwp18tURWgxxoPDNsJyw5GbCLQBAuG84cvrkcH5uFlSpUsXs/f79+zFlyhTcvn0bSUlJ0Ol0SEtLQ2pqKnx9M8b29fU1PuCBjF2FzG2gxMRExMXFoUaNrO19uVyOqlWrMlrUPnz4EFqtFtWrVzceCwoKMlO4AODChQuYOHEirly5grdv38JgyAhAePz4McqUobd92ZPbHjlz5kTPnj0RHR2Npk2bokmTJujUqRPy5Mlj1TYxMREvXrwwk18mk6FKlSpGGYVClD42ply+fJn2S/VExBKB4G3bUQSCO/G0SDw6OLMmSyQZ20Es/1EKH1AKH0Dp61R/KP0y5maBn1/WtlVsbCxat26NChUqYOPGjbhw4YIxejc9Pat+lUKhsPi4Erda4lNSUhAdHY3AwECsWbMG586dw+bNm63ktMQVuZctW4ZTp06hVq1aWLduHUqUKIHTp087/yEEgFfFJjk5GZcvX8bly5cBADExMbh8+TIeP34MIMM/pnv37sb2s2fPxtatW3H//n1cv34dw4cPx8GDBzF4MDG1ErwLb90azA4sv7FcaBEILnLhwgUYDAbMmDEDNWvWRIkSJfD8OTvLU1BQEPLkyYMzZ84Yj+l0Oly4cIFR/yJFikChUODcuXPGY4mJibh7967x/e3bt5GQkICpU6eibt26KFWqFCPLi6tUqlQJY8eOxcmTJ1GuXDmsXbvWqk1QUBBy585tJr9er8fFixd5l88RvG5FnT9/3syJKNPJt0ePHli+fDni4uKMSg6QoYF+++23ePbsGXx9fVGhQgXs379fcEckZyGWEe7xlu80OT0ZwepgocUg2MBevqHLry67TxAvJz41HoGqQMil7vWKKFasGLRaLebNm4c2bdrgxIkTWLBgAetxvv76a0ydOhXFixdHqVKlMHPmTMaJ8gICAtCjRw+MHDkSOXPmRFhYGH744QdIpVKjs27BggWhVCoxb948DBgwANevX8dPP/3EWk6mxMTEYNGiRfjkk0+QN29e3LlzB/fu3TMzQJgydOhQTJkyBcWKFUOpUqUwb948vH371qazsbvg9Wpq0KCBXfPX8uXLzd6PGjUKo0aRCBeuSdWmCi0CZxx6fEhoEQjZgFNxp4QWIVtAURSeJz9HwcCCbp23YsWKmDlzJn799VeMHTsW9erVw5QpU2w+wG3x7bffIi4uDj169IBUKkXv3r3Rvn17JCYmMuo/c+ZMDBgwAK1bt0ZgYCBGjRqFJ0+eQK1WAwBCQ0OxfPlyjBs3DnPnzkXlypUxffp0fPLJJ6w/MxN8fX1x+/ZtrFixAgkJCciTJw8GDx6M/v3707YfPXo04uPj0b17d8hkMnz11VeIjo6GTCasc72EElPoDgckJSUhKCgIiYmJCAwMFFQWA2VAxZUVje8vf3lZkGiKRVcXYd6lrCRcVXNXxV/Rf0EqEb2LlRXfn/geW+5vMTt2rcc1YYRhSfkV5Y2vj3U+Riw2IsL0b5OJ6XVleV7oay5TnlI5S2F9G/a5nHrs6oGLLzO2DJh+lrS0NMTExKBw4cLGB6+z3Hh9w/haLpWjZM6SdlpnH1JSUpAvXz7MmDEDffr0EVoc1hgMBpQuXRqdOnWyaVmydx1x9fz2vCcbgTWWSezOvziPZdeXCSQNAcjYUnuf/h4HHh2ARu/5zqgEzyJFm8K6z97YvYhPiUeaLo1TWSyzPGcnLl26hL///hsPHjzAxYsX0bVrVwBA27ZtBZaMGY8ePcLixYtx9+5dXLt2DQMHDkRMTAy6dOkiqFxEseERsRjD6HK9zL442/2CEMwYcmAIhh8ejhnnZwgtCiGb4UxpiPmX5sNAGTw2d5SBMiBVm+q2+/Ljx4+NJQjo/mX6l06fPh0VK1ZEkyZNkJKSgmPHjiEkJIQ3uezJdOzYMVZjSaVSLF++HNWqVUPt2rVx7do17N+/327uG3cg+jw23oTQDlUEcZG5FbDtwTaMqzFOYGkInggXD+k1t9agVt5aKBxUmAOJnMBNt8Xnyc+RqElELp9cCPfjv7ZS3rx5jRHBts4XLFiQcRQVV9iTKV8+djXsChQo4LDskRAQxYbgUWRnszWBwAdTz04FILzfEN8kajIcehM+JLhFsZHL5ShWrBjv87BFjDJxDdmKciNi2ZoiCI9p2Lq90GKCODkbdxZP3j8RWgwjCR8SnPKbEQNksULgGmKx4RFvybkiJmISY4QWgXM+6D4ILQKBJX32ZkSsiMHKkahJRIN/GwBwTR6Kohhtl2sNzlUUJxDcBbHYuJGnyU+FFsHj8ZbkaGJa7RM8mztv2DsC03H19VVOxmFLuj4drz+8JhZtAmcQxcaN7Hi4Q2gRPI645Dho9fZXiJ64glxxY4XQIhA4INNvwxsQ0nL4IuUF3mneCTY/wbsgik02Rsz5Uw48OoDp56aj2cZm6Larm922Z+POWh27/eY2Lrxwb7QBG56+J9Y7sXPy2UmUX1EeG+9utNnmUdIjN0pET7rBdjFENsQmxnIyjrM8T36OB+8eCCoDwTsgig2PiN3H5nkyu6Jv7iJVm4rhh4djxc0Mq8bNhJt226++tdrq2Gf/fYaeu3viVeorXmQkeD/992ekkZ94aqLNNmJw/F5+fTkn4/x55U9OxnEFpsn/tHotp1tXJ06cQPny5aFQKNCuXTubx8SERCLBli1bhBZDlBDnYR65+kqYPWtPJ13PbgVqz/rxIvUFQn1DXRWJc8Su9BKYsfXBVkSGRQoqg6lvzPv09whQBggoDf8kpyfjUdIj+Cn8EBEUwcmYI0aMQGRkJHbt2gV/f3+bx1yhZ8+eePfuHWfKSFxcHHLkyMHJWN4GsdjwyPn482bv7729J5Ak3k1sUqzDNlq9Fs+Sn/EvDENuv7nNyTg6g46TcQjOcffNXaFFMGPfo31Ci8A7b9PeAnCuLIQtHjx4gEaNGiF//vwIDg62eUxMhIeHQ6VSCS2GKCGKjRtJSEsQWgSPgI8MzV/u+hLNNzbHufhznI8tFDse7kClVZXQYF0Dzuv3EDwDChRneWCcGYeiKKRqU1n/S9Ol2fznsL8uoz+brSiNRoNfxv6CeqXroXL+yqhTpw7OnTuH2NhYSCQSJCQkoHfv3pBIJFi+fDntsbdv36Jr164IDQ2Fj48PihcvjmXLsmruPXnyBJ06dUJwcDBy5syJtm3bIjY2FgAwceJErFixAlu3boVEIoFEIsHhw4ftypyeno4hQ4YgT548UKvVKFSoEKZMmWI8b7kVdfLkSURGRkKtVqNq1arYsmULJBKJ3UzD3grZiuIRsW83iDUx1vv09y71v/bKOpfHjYSMasK99/QWRe4RLhhzbAyADIV51c1V6Fehn8M+z5KfYejBoehepjvaFWvHs4SegUvWM5H9hJLTk53u68zC64PuA2qsreH0nK6wPHo547ajRo3C/u378fO8n5G3QF5sWrwJ0dHRuHfvHuLi4lCyZEn8+OOP6Ny5MwICAtC8eXOzY0FBQRg5ciRu3ryJXbt2ISQkBPfv38eHDxmRZFqtFtHR0YiKisKxY8cgl8sxefJkNG/eHFevXsX//vc/3Lp1C0lJSUZlKGfOnHZlnjt3LrZt24Z///0XBQsWxJMnT/DkCX2aiKSkJLRp0wYtW7bE2rVr8ejRIwwfPpzx9+NtEMWGR8Su2Ox9tBdfVfhKaDGsWH5juUv9n6fYd4p+mPgQRYKKuDSH2HiR+oJRu6lnpuLe23v4/sT3RLH5yMzzM53u++bDGw4lcY5UXarx9fTz09G9bHcBpXEv8SnxCFIFwUfuY7NNSkoK/vzzT0yeNxl1m9QFACxevBj79u3D0qVLMXLkSEgkEgQFBSE8PKPUgp+fn9Wxx48fo1KlSqhatSoAICIiwjjHunXrYDAYsGTJEqPFedmyZQgODsbhw4fRrFkz+Pj4QKPRGMdzxOPHj1G8eHHUqVMHEokEhQoVstl27dq1kEgkWLx4MdRqNcqUKYNnz56hXz/Hix1vhCg22Zh5l+ZZKTY3E24iXZ8uqEMkl/k0Lr64iHIh5cyOtd3S1musNpkwDd0nWY6tMcD5yCahk27efWvu42O6mNLqtVDIFLzO7yP3wZkuZ1j3u5Vwy+a5YjmKQSG1LXdmX5VMhYQPCUj4kICyIWVttn/w4AG0Wi0qVa9kPKZQKFC9enXcumVbDksGDhyIDh064OLFi2jWrBnatWuHWrVqAQCuXLmC+/fvIyDA3HE7LS0NDx44F8Les2dPNG3aFCVLlkTz5s3RunVrNGvWjLbtnTt3UKFCBajVauOx6tWrOzWvN0AUGz6xMNiIPbMmRVHovL0zAOBY52MIVgcLKxAH/Hb+N49ZwR58fBBP3z9lJO+TJHOTNOOcRCLbOiHww4B9A3DieUbV5avdr/LitwZk+Hn4KnxZ91PL1TbP+ch9oJQpnerLJy1atMCjR4+wc+dO7Nu3D40bN8bgwYMxffp0JCcno0qVKlizZo1Vv9BQ56IyK1eujJiYGOzatQv79+9Hp06d0KRJE2zYsMHVj+L1EOdhNyL2rSnTnBxv0oQ3sZviKF+IN0QHfX3oa/x2/je039reYdtdsbvcIFH2gA/H63/v/ItZF2ZxPi5TMpUaIGPr1RRXfdg8jaJFi0KpVOLS2UvGY1qtFufOnUOZMmVYjRUaGooePXpg9erVmD17NhYtWgQgQwm5d+8ewsLCUKxYMbN/QUFBAAClUgm9Xs9qvsDAQHTu3BmLFy/GunXrsHHjRrx5Y31vLlmyJK5duwaNJmuBc+6c9wRKsIUoNjyip9hdxEKwO2a30CJYQefU7MjRef/j/cbXlgqko5IMYuP+u/us+4jVEdwTuPLqikv96ZTun07/hKXXl2L/o/00PfglVZtq9t6y7MOCKwvcKY7g+Pn5YcCAAZgxcQaOHziOB3ceoF+/fkhNTUWfPn0YjzNhwgRs3boV9+/fx40bN7B9+3aULl0aANC1a1eEhISgbdu2OHbsGGJiYnD48GEMGzYMT59mbFdGRETg6tWruHPnDl6/fg2t1v59aebMmfj7779x+/Zt3L17F+vXr0d4eDht6HmXLl1gMBjw1Vdf4datW9izZw+mT58OgJ8oU7FDFBseyYzEETMjj440vp5xYYaAkrjGB61t3xExZFTlm50xO4UWQVBuJNzA2ltrBckEbKlUvkt7Z3z9zeFv3CyNdTHLRdcWmb1feXMl6zFff3jtkkxCM+GnCWjSugnGDh6Lzxp/hvv372PPnj2sEtwplUqMHTsWFSpUQL169SCTyfDPP/8AAHx9fXH06FEULFgQn376KUqXLo0+ffogLS0NgYGBAIB+/fqhZMmSqFq1KkJDQ3HixAl70yEgIADTpk1D1apVUa1aNcTGxmLnzp2QSq0f24GBgfjvv/9w+fJlREZG4rvvvsOECRMAwMzvJrtAfGx4ROw+NZasurlKaBE4wfJBs/jaYsZ9jz09hiLBRZDPPx/XYrnEo6RHmHtxLvpV6IdSOUs5P5BnXZKM+Xz75wCAAGUA2hRtI6gsF19eFHT+JE2S2fvENNcLdYq1/ApTJEoJxk0Zh3FTxgGAlbPxu3fvrPpYHhs/fjzGjx9vc47w8HCsWGG7uG1oaCj27t3LWOZ+/frZjWqyfL7UqlULV65kWR/XrFkDhUKBggULMp7TWyAWGzfCZaZMS96mvcUnWz7BkmtLWPfdG8v8xyYUjvyT2JZhoOPks5MYdGAQmm9sjp67e4qqcvPA/QOx99Feo3O3s3j6ytsRQmT3fvz+sdvntIe9KK/TcaedG9PCEsbF741LHFnqskM04MqVK3H8+HHExMRgy5YtGD16NDp16gQfH9uh8N4KUWx4JFlrnizLGd8Jpvx17S/EJMZgzsU5rPt+e+RbHiTili93fWn3vDORGZZceJlVDfzCiwv469pfLo/JFU/eZ0RBZd7AX6a+ZD0GRVF4kEiqJ3PNP7f/sXve3Zbbyy8v2zzXb69zeU0s/QXFptiIaRHClF9++QX+/v60/1q0aMF6vPj4eHTr1g2lS5fGN998g88++8zo3JzdIFtRPOKqUyIT7ry5g5U3V+Kd5p3xmIEyQCpxUWcVmb8Z1wVFKYqycqqz3MLi08LmCuVXlEeD/A1Y97P0vSBww+pbqzG6+mjje8vffZ+9fbA0eqnb5LGMELyecJ1x3zRdGm04dUxijNl7sTmre9q2PwAMGDAAnTp1oj3njJVl1KhRGDVqlKtieQVEsfFwOv7X0erYB90H+Cn8BJCGXyyjPWzBJKxea9Aac2UYKAMWXlnotJleCA4/Pcy6j6XvxavUV6KsfO7pLL1ursSciz+HSacm4YeoH9wyvy2lg8nD39aWzqRTk5BHmccluQRFhHpPzpw5HZZVIDgH2YoieAzXXzNfebJhV8wu/HHlD7dY2MREo/WNhBYBz5OfY/q56Zw5pwqZK+rIkyMYe2ws7bkNd92XVM1WeO8/d+xvmQH2vz8DDBnnKX6/Z6fGdmBAEnsOseyEO6xrxGJDoEfA+4Ctm9DlV5cZ9WdrJn+W/IxVe09FjPksBuwfgJjEGBx5egT/tf/P5fFefXjFgVTOMeTgELvnL7+87JZSJQcfH7Q6dvTpUfxy5heHfe09dBJ1idAZdKDS+b05JHxIQF7/vKz68OEcnKhJhFKmtFuHisCe9PQM/yyZTMbbHESx8UJ+PPUjfq33q8vj7H+0H7Mvzsa0etNQJhe7DJ2ucO01fR0nZyK+bPH0/VMUCbZfCNOlqs8sabmppdl7W74O3kam70ZsUiwn4+14uANT607lZCyuYVqolI95Bh8YzKivPctGmiENRxOOIloejSSfJMgCZU4py4maRBi0tqOYUqgUpMltZ4Om6/tG+wYyvQzBqmDaPrp0nVm/tDT72aZTtanGBU/xHMXttiUwx2Aw4NWrV/D19YVczp/6QRQbDyYuOY72+M6Ynfilzi+QSZ3XiGdfnI1DTw4BAHru7omzXc86PRZbLB0VM+HSYXH/4/34Kvgru+O609k2M+opk/V31+PLMvYjwfjCQBlwM+EmSuYoaSyiGJMYg39u/4M+5fsgzDdMELnEjBCJAfngwbsHdq1K219tBwAEKgOR+Ma5SKS4lDi7liGFTAGdj+0SKS+T6SMCX+Ilwv3CaQMn3qa9NbPqyN/Jka5PxzvNOwQqA60WEYmaRGPwgPwdeUxyiVQqRcGCBXm1IJO/mAdjL/HclLNTML6m7WRSjshUagDx5IBgWg+KyX56Zrj0pnubMPfSXJfk4oNp56Yh1CcUzQs3Z933Xdo7BKmCnL5xLLq6CL9f/h3REdGYXj8jLfsXO75AijYFt97cwsoW7DPXejsnn5902IYCheGHhiPEJ8Sl3yafTDs3DWtbrbV5ngKF/179h7YV26J0rtJOzTFy20iH4eLb2m+zee7rzV/bPNe1dFd0LmWd6+nfs//ixLOsTL/b2m9D1x1djXWzLOfbF7sP827NcygLgT1KpZI2ezKXEMXGg6HbS89k3Z11or15Oku6wfbNcGfMTlYZZzO/nx9O2o9UeZn6UjALxcijI1krNntj9+LbI9/iyzJfYlQ150I/l11fBgDYE7vHqNhkrl75cuD2dJg4B999cxcHHh8AkGHxGFZ5GN9isaZ2vtqM2umgY5yqX6vX4vKry4gMjYRCpkB8erzDavT2xo5Lp7dUA8DZhLPooe5hdTzJkGTWT61W49GHR0Y5LOfTyXTG9tmxJIGnQ6Ki3IzWwL4gY8KHBGy6t8kq3DkhLcFuv3PxzKu7iq2aN1uOPzvOy7ieFin12/nfANCXxxBb7hG+ECKniamF0xamCTsXX1ssaPVvV2FzLf185mf03tMbP53+CYAwfx86K67p9uGRJ0fcKQ6BZ4hi4wE0+LcBfjj5A8afYGeBeZzEPNV7sw3N7J73pARYr1K5i4wRojqzK3Dxd7K3lecJylH7re3dPicTH5u/b/9t9n7p9aWiSwK55f4WRu3YhE9vvLcRALD5/mbWfdnCJoePqRyOItoIngVRbDyIfY/28Ta2I9Pw0adHeZuba2ZfmM2oHRMlwNOqZnOhlNjzqbK3HSgWHiQ+wIsU90QguUqazjo65+jTo2i/tT1uJtx0uzzxKfGM2k0963zkmRA5ZegUT6Y+e2LxMSQwhyg2IseVaAsuvc6FuMk6C9OH76Kr3lVHRaPXQKtnv9XpjTTZ0ERoERhB9xsdfGAw7r+7jyEHxGtFiE2KxcD9A+0uDhZeWYgB+wZYnxDA+OuKMtV+a3uPsljHJcdl+/sAUWxEzuTTk2mP6w162uOmeMK2gZD8ceUPoUXglKqrq+Kt5i2rPs74FsQmxto8pzPoGF2bhAzs/UYti+iKjePPjmP2xdm0596mvcX8y/Nx4vkJq3Ni2Ipis2B8lvwMex/tdUkud3Hp5SU029jMYdFgb4coNiJn/d31tMf77O3jVjnEmLXWVTxpFcYXE05OYN2nzRb66DOdQYcWm1rg022fku+WIaYP4mfJz3A2zn35orhg9c3VtMfthXMLsRVlOWeHbR1Y9bfMMyVWppyZAgC4kXBDYEmEhSg2HsqFFxcctuFSGVl4daHZ+7dpb/Ei5QVmXpiJmedncjaPO/GU+jHfn/heaBGs+OPyH1b+GLff3EZ8SjweJj50KvovO/IsJSO77Zu0N2i+sbnbFyyu4sw9Rgil19JCc//dfbfM687PmqpNxa03twSZW2wQxUZAll5figOPDtg8/89t66J1d97cYTw+RVGMK2I7QmfQme3b1ltXD002NMGy68uw7MYyvEt7x8k8YkTorLJMI1UcwqHR7c8rf6Lv3r5mx6afn258LfZtFLHw+fbPEZ8Sj6/2fiW0KE7hKOiADj4XFLff0pdBMcDxb5jr3/n2h9tRd11dXHxxkdNxbdFqcyuz9/89dL32mqdCFBt38/E3ffnlZcy6MAvDDw/HhBPW2wFHnx7Fz2d+tjre8b+OjB3DJpycgBpra7gkrin2bkg6ilmEgSdSfU11oUXgjTdpbzDj/Ax029mNdW2sR0mPzN7ffXvX+NqWbxgf2PLpuZFwg3GUj5A03dAUd95aL1hINI41jqrA2/x7M9ClFlxZYNLcdeVr7LGxSNQkYujBoaz7Xnt1DcuuL2Plr/b6w2uz998d/471vN4CUWwEIjOlP5CR38Eyn8W/d/612bffvn68yWWPM3FnbJ678dp793SdWZV6EstvLMeVV1fQf19/1wYyeRbwmZrAkqT0JNrjn2//HE03NMXA/QPdJguBX+JSbGcdtgcTReXPK386NbYjktKT8OOpHxm3N1AGdNnZBTMvzOTOWpvNIIoNTzjKkWBpGjV1ItTqtTjy1Ha0ChP/Gj4YdGCQTWsRSXDl+biafVoonyVH0X98ZaXOLtgr3eJuTOs9sUFPCRupZysIhI55l+YZX2eW4CCwgyg2PJGZZdOSzJv/vljzFa2ps+XkM+4z47Ol8urKQotAYIhlhI23hv8zcWB1VHSRQE98Sjy+PmS76CQAWj8+vqIo2SgIZniQH+2Sa0uMr489O+bSWN5ubbYFUWx4wlHYpmVehO0Ptxtfb7q3iReZCNkLd4SoOvJ5EAtk5escTLZ+HNWs45J3mndO9WNqTey/rz80eo1VRBFdhmg6tAYtktOdc5ynKMpYgJYttoJEmGZX9jaIYsMTu2N30x63lV9g6tmpHrOqzM5hhJ6EVML/zzt6Y7TxtatbUYmaRKf6MbEOuCsyxdtg8lv3hK0+phFPJ5+fxMSTE62uZct0F7Zos7kNov6Oot3WXXd7nd2+B58cxMwLzqXOYJuY09shio2bsRfe3W+vME7BbBHTnru3w2U+GL5Xb5YPwR9O/sCqf51/6jg1L5MtNncoed6CaXQNE2X1r2t/8SkOJ1x8yVyxNbWe03Hg8QEsvLLQ7Ho3UAYsvroYz5Iz8hKdiz9n1c+Ri8HT909pj9tyjjclLtk5p2pvhfzaeeDU81NO9bv48qJHeMEPPzxcaBHcjrPWBFc59PiQ030tH+arbq5iPQab6tOpOnNzuKMtVa62sZgoNo/eP3LYhpDBrAuzsDt2N5qsb4Lrr687bP8iVVwFR+ke8lzmqBl+aDjmX56PYYeGGa/hXTG7MPfSXM7mMGX9Hcd+ReNPjKc9/uDdA67F8QiIYsMx8Snx+Gqf7WRbjlZAYswyS8iwJrB5yHOFK85/ltlVL7+6zHoMeyH+rvLt4W85GYfJVpSz0TTZkdcfXmPkkZF4kfrCLOkiF9irM2aKK/XG6HIC8cHhJ4fx6bZPAcDqe+IyYamtelymvE9/T3u8686unMnhSRDFhmOabmhq97ynpPEnWLP/0X63z+nK9bL8xnK3zW/LF8Oej8aDRG5Wk94a7SUUJ5+fdHkMW3/3+FRmCRPpime6OjerMRhe9ynaFBgog1VyPHdHttrbrrrz5k6284skig2HMFnRZ7cLzJ1cfnmZ1/GFUErdXc7B2evTlpyn4sy3Zd+lvcPJZydhoAy0mXX5/LzZNfRVCA4+cc0Pj2l2dToev3/s0twAWEU2Ra6MZNx2+jnXLWCmv9Hjz47j8+2f223f8b+OnCxyPAmi2HCILecvgnv4cteXXhfeyKUizCRFv5XyRjmO5gCAZTfow1T77+tvpqx0/K8j+u/vb9P/xhnllGnOlN/O/cZ6bIJzOOtnmMm9d/fM3p98fhK9dvdi1JeL7TM2YddsFjwrbq5wRhwj44+PR6vNrYy/5YH7BzKq5O1stJWnQhQbN0OBIgoQj/C54hfC2rYndo9b56P7jI7M6o+THmPOxTk2z1dcWRHlV5RH151djY6mtrb1TsedZiEtO9bdcaygEVwjRZuC8ivK49ATeqd3ptuGv1/+HR90H0BRFJ4kPUH/ff1x/sV5LkW1ixDh06aVuS0ZsH8AdsXswtYHW/Hk/RNMPDnRfYJ5IESx4RAmK8c3aW94dcgk8IcQNztXfA0yufPmDiqvYpYx2rLUx9l4+4kmATB+4Fx9dTXrjY2fCpPQVldwNnma0Bx4dACHnxwWWgyH1Fxbk7Oxqq+pjhnnZ6Dl5pacjekqfAQQPE56jGXXl2HHwx0225x4dgKjjo4yvt8Zs5P1PHMvzsW1V9ecktHTIIqNm9kVsws7YmxfwATxsjuGPumi2On4X0fm+XAsDDZrb6912GVv7F6HbZhiK7qDK6L+juJ1fD5I1CRi+OHhGHpwqEu+J56Is1s3x566VorAFm8+uFZPjY5Wm1u5Zato8bXF6LKzC4YeHIrN9+hL/ngLRLERALrkTQRu4LPYnT1TsVhhm+DPmSyyj5K4yxET7hfO2Vi2sIxgETNXXl0xy0Xi6VGV7opgG3RgENnyt8HhJ4cx4eQEvEgRV/4hLiGKDYe4ozaPN3L4yWFOQkwB88q4BOCf2/+waj/s0DDWczxN5u4BUjy4OGdj2cKTbugnn500+xvGJMYIKI1nwfVCxNsiWvm2jgoJUWw45PfLvwstgqDEpzDLUWFKwocEDD041Bg946qPhTPZddnAZYkDd2CZpE8s2EqY547v156js9gokbMEdsXuMr6354chJGK0jnCtiLg79QLfbLy3UWgReIMoNhyS3ROFOVMzxrQysAQSPHz3kEuROOfe23uOG4kIT7sZuyNcn0sLk7upFl5NaBFoYZJKwN3sf5wVeSeXyF0ez9Kx3lnEYvlZfWu1zargng5RbAic8c8ddtseAHD6eVZ4ryfkoPG0qAJPU2zcYbHxpC3jhVfMq0qL9e/JZ5i+s+yKybJ05fbL7fJ4XDluV1hZAd12duOsVpor1Fhbg1MfObFAFBsCp7Cp8ZKcnozfzmclTWMSgSM0k89M9qiQ4TRdmtAisMJTCsG6C0s/kfdacfpFTDs3TWgR7MKFlWT0sdEcSJLBlVdX8OPpHzkbzxVab24ttAicw6tic/ToUbRp0wZ58+aFRCLBli1bHPY5fPgwKleuDJVKhWLFimH58uV8isgpj5NcT+Xt6USuikT5FeWN//ru7Yv++/qbHcv8Zxl6O/38dI+I+oj6OwoTT050qVCfu9j7iLtQbHew4+EOfH/ie7TZ3AZ3397Ft4e/xZyLczg333uCdZAOTw/T7bO3jyDzquVql8fgOo+QmAqzfnf8O7OSI2K1DDLF9Y1HO6SkpKBixYro3bs3Pv30U4ftY2Ji0KpVKwwYMABr1qzBgQMH0LdvX+TJkwfR0dF8isqINF0aRh0dRZtVUy6Ve+zNkk/YJiPsvqs7T5Jwy8Z7G7Hx3kb82/pflM5V2uo8RVGMU/0TrIlNikWHbR2M75dcW4IDnx1AmG+Y8djrD68x+8JsHHvGPmdJpVWVoJKpPK5+1Nn4syi/orzQYtCy/u56oUWw4n9H/ocx1cegQ/EOZtZhZ1l4daHjRixI1CQ63XfNrTWcybHtwTZse7DN7NjY6mPRtlhbXHhxARVCKiDdkI5RR0dhZLWRKJurLGdz84GEcpMnk0QiwebNm9GuXTubbUaPHo0dO3bg+vXrxmOff/453r17h927mSVHS0pKQlBQEBITExEYGOiq2EYSNYmo808dzsYjEAgEAsFTufTlJcil3NpGuHp+i8rH5tSpU2jSpInZsejoaJw6ZbugmkajQVJSktk/Phh0YBAv4xIIBAKB4GlUWlVJaBFsIirFJj4+Hrlzm3uv586dG0lJSfjwgT6ccMqUKQgKCjL+K1CgAC+yfVn6S17GJRAIBAKBwB2iUmycYezYsUhMTDT+e/KEn1DO6AjhfXwIBAKBQBADVXNXFVoEm/DqPMyW8PBwvHhhnu78xYsXCAwMhI+PD20flUoFlUrFu2wSiQRb225F261tac/3K98Pi68t5l0OgrhY0GQBovJGIUWbggBlgM12BsqAiisrulEy7+NaD/McQgP3D3SqthUAlM1VFpNqTcLCqwux79E+LsTL9syoPwPfHvlWaDHMGF1tNNL0aZxlmy4fUh7XXnOXy6pXuV5Ydn2ZU30PfHYAjdc35kwWUzqW6IgG+Rvg/rv7mH1xNgDg+5rf46fTPyHEJwSzGsxCZFgkL3NzgagUm6ioKOzcaV6Ofd++fYiKEkdF3iLBRXCtxzUkpyfj9YfXePL+CdL0aWhaqCkAEMXGgvE1xmPymckAgIlREzHx1ESHffpX6M955AFfbG23FUWCigCAXaUGAKQSjzeOCsreDtZh6382+RNARnRUw38bshpvbau1kEqkmNlgpmijjGxxrcc1UcrcLKIZcERoKczpVqYbgIxEoGfi2UVo0jGg4gAMPjDY5XEyGVFlhNOKjWmEIBfs67gPPnIfBKmCjMfq5a+H6uHVUSxHMfjIfdCpZCdO5+QLXhWb5ORk3L+fVasmJiYGly9fRs6cOVGwYEGMHTsWz549w8qVKwEAAwYMwPz58zFq1Cj07t0bBw8exL///osdO8RVH8Vf6Q9/pT8igiLMjnct3ZXTEDxP5Gr3q1h0dREaFWyE4jmKo3OpzsZzLYu0xM6HO40KzrR60zDq6Cjj+Wrh1TjJN8E3Pcv2hFQiNSo1BO651uMaktKTEJcchxI5SnAeOk8UTYIzcBkF1L5Ye87GcpX9HffTZmeWSCQoHyo+JdoRvCo258+fR8OGWSupESNGAAB69OiB5cuXIy4uDo8fZyW1K1y4MHbs2IFvvvkGc+bMQf78+bFkyRJR5LBhQrHgYkKLIDgSiQT9K/anPecj90GHEh3QpmgbPHn/BEWDiyI6Ihp/XP4DJ56dwKRak7A7hllYv1CUylkK31YVl7ndHu2KtfPYTL6BykAE5nQc8untNdry+efDs+RnAIA1LcW5cDrw2QGhRbDCT+GX9YajSyTEJ4SbgQDUL1Cfs7FcYWu7rZyUnBATvCo2DRo0sJsxlC6rcIMGDXDp0iUepSIIjVKmRNHgogAyVs5DKg3BkEpDAED0mYfF+mCxRYXQCh6n2JQP4XeFKJPIeB2fSza02YDjz44b/RwqhFYQViAbMN0WiQyNxOVXl/kV5iN8FAzlUoluVKARZ2M5S7NCzbzS8kzssQTO4OIHIvZU3kqZUmgRWCGWSsJsyPSdYQrbbaqvK3/Nqr2QlMxZUmgROGV58+Vum+uzEp8ZX3OlkHA1ztyGc0WRmTxzgeltEMWGQ7zdJO4ILtJsu/ogbh7R3GUZvIlAZSC6lOoitBhWFAosZPOcqfMiH3jrzdwTkEndZy2rkacGp+NFBEZwpow0LMjO2Z0vupbuKrQIvEAUGwJnFAh0PTmiAa5ZbAZWHOiyDN5E00JNMbLaSKHFsIJuETCiygirkG4+qJuvLu9zEIRHLuHW06JkzpJetXhd1WIV74sIoSCKDYeIwbQoJH3L9RVaBM6cBL0FmVTGeT0XS2rmqcm6D50v1ZJrS5yan+3DJrv/Tj2JnOqcTvfl2jr0XY3vOLm//K/q/8ze/9H4D7NtM3cQ4hMi6jw0rkIUGw7xNP8LrlHIFC6PUS5XOZf6S8klTUvbovSJJbkg1CeUk3GS0vmp82YK3RZYyRze5cfiTfDtSM6UcrnKIYc6BycWmy6lzbeG6+aviwlRExj15SJNQasirfBX9F8ujyNmyFOAQ6ILeUZYupipl7+eS/39lf4cSeJd/BD1A6N2cxvOtTr2c52f7fbJ5ZOLtTxcmvTZjOVpUW3ZHWcDEqbWncqtHMEZcnBx3Sqkzi8AuQiumFp3qldGQplCFBsOYWqx8JHTl4cguL5NwGWeCW+C6bVplvvjI44sGiVylGAtT5lcZVj3cZVLX16i9SkgW1Pck0OVw6X+CqkCPcv2xICKA5zq36JwC5fmp5MHED6xY+mcpQEAAQr7mc6zO0SxEQBPDMH1BA53Oiy0CB6PSs6+7ppCpsCQyCGs+oyrMY71PK5iy9fImxxCucIZZZVLGhdsjG+rfgtfhS/rvrs77LZSQLj6G+cPyM/JOM4yu+FsfFbiM6xutRoAcwUuMjQSJXOUxNLopXyKJxqIYuNmauapKfokdJ6KM1sihCzaFWuHCiHsE8DVy1fPZrZpW3AZjeHtFpeOJToizDcMnUt2dtyYI/5o/Af6le/ndP8QX9csp65YRvzk1lZHrmAi19qWaxERGMHpvD5yH3xf83vk9c+LCVETjFtJ39f8nlH/GQ1mYMMnG3hJWihGiGLjZn6r95vQIhCcpFbeWkKLwCuti7S2UhJaRDheETqzqgbEU4JE7AuNIFUQ9nXch/E1x7ttztx+uTGs8jDW/eY1mocWhVvgqwpfuTR/j7I9jK+3tt3Kqi/ddqo7KR9aHv+1/4/TMU93OU1bgDJAGYATX5yw23duw7mcF8wUO0SxcTPB6mChRSA4SeGgwkKLwCt0igYfeS4aFshITtanfB9OxgtUOq4nZQ9P2IoS2reDKQ0KNMC0etPgr3DNid/UB6tIcBFWuYe4iM5ki6vRnI6w9/d3dP2LJRmgO/GMXwuBQOAdOh8UNnlEmJrfud5S8fatKHdTKayS0CJYkdc/r9Ai2KR7me6CKFOmiCUsXiwQxYZjmNS5GV55OP+CELwCd25d0lpnWOgMBQMLMmonti297Gamd4QYHUxbF2ntUn9Xk9Hde3vP+HrjJxvRu1xv43vTbTOhWNLMueSW3gpRbDimTr46ON3ltN025UL4NVsS+EGILYvGhRpzFhpdO29tTsYxxZnMsJkWFsvvU6iq22q5WpB5xQrfmaozQ5bZ4Goduu5lurvUP1mbbHxdIkcJfFnmS+N7hVRhLKzarXQ3p+dwJfeOLT83of2NhIIoNjzgp/Czm0+lQij7yBOxw8dDk5Dx8F/bci0nY4X7hTs1vz24zLYtVOZukn7BNv+0/ofzMRsXbMy6j6tbPWz75/PPZ/be0uLjK89SJFQyFarkroIzXc5gdPXRTsvYqkgrp/vaYmKtiZyP6Qnwq5pnY9a1XofG6+l/wJ7iCMiE5c2X423aW0TljRJaFN5pXliYyuHurIhMh72oIVPFh62CYKk08W0pILDHVUuJWGBjbf2yzJc4+Pig3Ta+Cl8sbLoQoLKsJc5GBzqiRrjzVcqDVcHcCeJBkDsJT4hl335fx31ouqEpb+OrZWo0KdSEt/HFRMXQim6fk8vtL1cKCtpiYtREp/taRgi+T3/vmjAE0WBvW1HM4fUnvziJAGWAQ8UGEJ+vGB1Fg4oKLYIgeI/pIBvBps6HM9sPBPHApXXPsvieK0TlicKFbhdQK1/Wzb16eHVWY4T7iuPaFPOD1lPhIxFcZpoAZ2C6QAhQ8luqoElB9y4CQ325KVDraRDFxgPxhJUCgRu4DGXm0ofl68pfW40XHcGuCCxRKLwXPrYVf633K+djMkXIXEd18tURbG5PhWxFCUQ+/3x4lvyM1zkqh1XmdXyC92Prhu6jsC7k6kgJG11tNGrny3Iyt/TJcTXRHsG7cal4sIt6CVdKuDOWrI4lOnIyd3aCWGw8EKETkjXI3yDrDcmN5vFEhkbi85Kf8zJ2h+IdAGQ4ZHYr081u9uaZDWbyIoMjMis3E+jZ2o5dSQOCbT4r8RnrPs5uR89uONupft4AUWw8EKHDU4V6ABH4YVXLVfiu5nes+jA1zY+vOR5rW67Ft1W+tTpnuQrmo3wDE/j2q/B02Pj0iRWxlM1wpKTUz1/f6hjThaxpEsOupbs6FVbvLRDFRiCGVxlOe7xEjhIO+wppsfm5zs+Cpw8nMMMyERrdzb15hP0QdolEQqtI05nm1TLrRHdyqRzlQ8vThqxbjmGa3dWdkC0wcTEocpDQIqBv+b5m77lSjBylbmhfrL3TY/cq18v4enQ15/PpeANEsREIulVQjfAaWNR0kQDSMKNF4Rb4pOgnQotBYEjNPDUdtmGiSDPF1QKv7zTvOJGDLabp8Qns6VW2l+NGLCiTk5tM22xpWigrLQYffi2WyhIdjQo24mQuod0VhIY4DwuESqayOrYkmlm9D6G3okwJUgqzfUBwD22LtsWbtDdWx5msYOm2n0wpEFCA9ZjOkkudy+Y5f6VrlaizO55Q9Z7JteVqRXJbNC7YGBVCK6BLKcfpFugUErFso3kSxGIjEIUCCzld5ZhpbhqubzimP7BZDWZhUq1JyB+Qn9M5uIBEg32Ewf3QUbRHbr/cTk/foEADu+d95D7Y+MlGp8dngyeGlguRENIdiPVvYc/K4YolpXnh5uhdrjfvNclMyzxkd4jFRkDG1xyPdXfWse73WYnPMP38dIftnFWcbGHqiyDmbMPZ1Qwb7heO+JR44/tWhbmvPZOJS6G3Jpj65ZDisObMbzRfaBG8Bib3BHuWkbr56rKec0ObDbj2+hqiC7HL72QJU4Uof0B+DI4cTHzGQCw2ooGJP0QmTMNTuU6SNThyMKfjeRJc+xHwwd4Oe/Fbvd+M70vmLOmwD5NtTZXcetuUq4zWefzyGF/zVWsHAP5X9X+8jc0XrvosEbjDmcVSyZwl0bFER9Z9WxZuyXquTAZUHMBphnFPhVhsBKZpoabY92gfepbtyfnYXO7NVgqrlG1vtNPrTzdzLBQrEokEzQs3x803NzktflckqAiUUiXSDekO25YPKY9rr68xHttU+TZQBqfkY4K3buuIgep52JXScIS9rSq1TI00fRrrMT3JT0WsW3WeBLHYCMz0+tNxqNMhs4ysDmH4G+XyB8LV1oMnUjy4uEdVZB9RZQTnkT6jqo1i1I7OKd4epqtZPn0ECgYW5G1sPnDkeC0m8vnnc9tc/Sv2523s7LqF7Y14zt3aS5FKpAjxCTE75sjpUgjsRZWIDa6tK2QFxe93MKnWJAyvPJw35WPXp7t4GZdPupftLrQIgmHPuiKXOLfJwERpqZbbvNwByUjtuZCtKBHStmhbHH5y2OZ5pj9uTzK/ckmlsEqM2k2uPRnPkp/hzyt/8iyR52Oq2PQr34/TsT8t/imn45ly8LODHlnh2JMshFxjT4nmU8FuUbgFb2MT3AtRbDwQIUymYjfTWmbZZULBwIJoW6wtglRBmHp2qs122VVBtEXpXMy+azFcM56o1GTW1/JaRGoAtbxehbLUmjr0C1VmxNPJvssCEcPVDyqPfx7HjRgi9hBCZ7YxMi07ZXOV5Vocj4HptcY0KaSpM3DBAM/yaxELnpDwjk+4jub0NMjWt+tk7yvIy/FT+HE2Vo+yPTgbiw+IVcUxdN8RY8XGiZut0BabMJ8wQednS7AqGFF5ovB5KX4qrfPBzk93cj5mVJ4om+fcqfSI4Z4ipizzngSx2IgQ09weYkHsJlHTm5AYbkhihKuVoL36UtXCq9k8525WtlwptAisaFWkFabVn8Y6skwoepfrbVUWwx5MlRJ7hSIdbdOJ8d7JFuK07DpEsREhTDKwsrmhcIHYw72Ftg54AlwpNoUCC9k816tsL/wQ9QMvK3m2uDMMOTvyTZVv7J63dDLn4h7iKIkjl1WtuUpCyZavK39tfC12FwCxQhQbD4UkHDPHVLHheo86IiiC0/GEwhWzNtO+CpkCHUt0dLvi7Q00j2gutAiMYVJiYFjlYfilzi9ukCaLQBV3isD8RvNRK28trGzhXstfuF84KoRWAAD83uR3t87tLRDFhuAxjKk+xuY5qQuXsqPSA94SeuuKYsNnVuDsTtXcVbGz/U5EhkUKLQpjGhdszKhdm6JtjK+bFWrGlzi8UCS4CBY2Xcg4fQSXrGm5Btd6XEORoCJun9sbIM7DBI/Bnu+BK1tRYt9m4wpaSxbNocJBhRGTGGN2jO/KxNkZlVyFAoHea+E62vkojj87LurCuQTvgig2HsahToeEFkEw7G0xEedhx7iyRfdJ0U+w4+EOdqU/CAQAOdQ5zCw3BALfEMXGw8i0LpBcB+Z4o/Nwt9LdOB3Pla0otVyNFS1WcCgNIbtC7l0EvvEO54FsSLlc9iOnhlce7h5BRAJffjB18tXhZVwmROW1nc/DGcgDRZxUD+e2OjaBkN0hio2H0izCviMelz4RQj7cTbG3xcTX9hPXVbL54p/W/zhsQ5J9iZMvy3wptAgEgldBFBsPI/Ph5OhB7iu3n+/B2zDdigpWBTtsbxnV8Vv932jbCRkRxUZZY5LUi85iQ3eseHBxxvMSXMcdCdlcLVwqk9hOmicmigUXc9hmYtRE/gUhCApRbLyU1kVaczZW00JNORuLLz4vmZWGnkmNrKGVhpq9jy4UTduuQkgF1wRjiKsPt1zqXA7b0Fls6BSb8TXH4/OSnzOyAhGsEWMpB3vZor2JHOocDtsoZUo3SEIQEqLYiBS5hN6vO9My4agOlELG3SqwVt5anI3lCvZuzmxLPvgr/M3e23I+5vJ7tMeEqAku9c/l41ixYdIGyHg4fFfzu2xdHNQVgtTiLj9ij4YFGjJq5+nO+tklxUN2hSg2ImVXh112zztKLe6NZGbjpMPetk3xHNZbK7n9cjucr3TO0swE44Dcvo7lcZXsXjWZDfaSQXokDPWQ7BKWvbntZqFFIPAIUWxECpM6JVPqTuFkroVNF9o97wl5YcJ8bZv/nXWaZZpdlS9CfEIEnT874ysJQ6hPqFN9PynyCcfSuE6QkpkVyVYGb0+qOM4EbyiWSbANUWw8mJp5anIyjli2mlzB003jdL4upXO5z2JEMOeDVo/KuSs71bdaHvFUOM+kcFBhl/pbOh8zKdTrbthYPT1hsUZwHqLYeBim0QnuWtGLQWkoGFBQaBEIBIesarFKaBEQERjB+ZiWUVGuOCPzlU+JTV0lMdzTCPxBFBse+abKN2bv2d4MBkcOBgCc+uIUxlQfg+9rfs9bzR57qfLFsLoRopZOdkhoR3Lb0OPstxIZFom8fnkZt/+i1BdOzmQbW3lxMrdfupXu5vQ2G4HgCRBvQh6xvHmwVUoGVByAARUHAAC6lu5K2yZAEYD32vfOCWhCh+IdcOLZCdpzKrnt4pNug8Pn75yGc7gbjCBK1rZc6/IYbJ2t/2v3HwBmIcdARu6k5hHNWctlj17letHKHeoTio2fbMSll5cQlTcKe2L3cDqvGMi8VxIIxGLj4ezpyM0NylaY5y91fkGgMpCTOVyhb/m+nI3VqGAj2uMBygDO5uAavqJ0vNUqVT60vMtjfF3pa+Tzz2e3zc72O7Hpk0041/UcIoIiWI3vriSanxT9BDKpDAHKANTLXw8KqcKmFbZUrlJukYkPCgR4b4V0AjuIYuPhOPMwpiuuaGt1KpbwT3tRT45gut1SJXcVp+fgG1sWOwJ/5PHPg90ddtttUyCwAIrnKM7bFjEX/FznZ+uDNnaXHSlyfOOKssf0Xkj8a7wfothkQ0ZUHSG0CKLkq/Jfmb13p2+R5VytirRy29wEayhD1mtHyTDFhhh84pwlMizS6b5MlUtP/n4IzCCKTTbEVvr+8TXGm71fGr3UHeLwTtkQZhl0hcxGarklNLn2ZIEkIQDA6YcJxtdki8N9CFXuYGTVkYLMS+AHotgQjFiaaJmm4Bc7nxb/1Kl+QvqfuCtLMImKoufJ21Tja1sr/Em1JrlLHK9C6GuO7u/JpL4cwXMgio0bEYMJlE2OCzZ5IcQM08rEYnekXdJsCSqEVMD6NuuFFiXb82nxT+0qzL3K9XKjNE4g0kvdHaVFCN4PUWy8kJaFW9o817ZYW7P3YlReqodXtzpmTymcXn86n+KIhhp5amBNqzUoldNzI1c8EWcUXlulCfjGtMo9l7gj5UON8BoYWmko7/NkWqa/rfIt73MRhIEoNl6IvUrXlgqCafZiT916io6I5mQcsVtsCN4F1747sxrMQl5/5skB2eAO/7Op9abavXdxjb/S3/iaJCz0Lohi44U0LdTUqX62ctm4m+yuYPgr/B22ISUm6KkWXg1HOx/lfZ5catcWAf3K93O5fpMlQjnecoUrod4HPzvIuC2d9deVaCyC+HCLYvP7778jIiICarUaNWrUwNmzZ222Xb58OSQSidk/tVq8OSLY4K4HdrVw20X4LB2ETfe0pRJx6Lm0zoUcuSd5Q8FPAKiex3q7zhnEnH/FGUrkKME4868rWG7psmVY5WEcSZKFpyi7BtNYehN8Fc4rNsHqYKf7ErwP3p9k69atw4gRI/DDDz/g4sWLqFixIqKjo/Hy5UubfQIDAxEXF2f89+jRI77FzLaIMUcHnQKY3z8/7/NaFhUtmaMk73NmIoSvU4hPiNcl/nNXxI0Yty7YZj4WCluKjSvIJSyiCD8ukpoWagq5RI6oPFGcy0MQFt5jSmfOnIl+/fqhV6+MKIEFCxZgx44dWLp0KcaMoU8TL5FIEB4ezrdoBA+Cq2yh4X62r6uc6pxY3nw5Hic9hkQiQYMCDTiZ01W5+KB3ud5WRVq9AXdZRV2xLogBvYAh13woNs7cH4JUQTjT9YzNvF4Ez4VXi016ejouXLiAJk2aZE0olaJJkyY4deqUzX7JyckoVKgQChQogLZt2+LGjRs222o0GiQlJZn9Eys+Mn4c8AIUzMsqHL37yu753xv/jiJBRXCo0yFXxRIVC5suRKsirTCiiv2sy1VyV0H74u3Rrlg7t6Vez6nOybqPq6nvxZB6QMzIYfs3NajiIDdKwg9JH3SCze2qYtOlVBeOJMnwSyIlFrwPXhWb169fQ6/XI3du89wEuXPnRnx8PG2fkiVLYunSpdi6dStWr14Ng8GAWrVq4enTp7Ttp0yZgqCgIOO/AgXEmyV0cKXBvIw7vuZ4x40+cvJ+gtn7UF9zk3q9/PWwtd1Wq20Zd8LHdkKtvLUwta57oy6YUDdfXSxptgRAlv/PZyU/c9jvyzJf8iqXp8LVteMH+q3BD88+x8DIgZzMwRe5/RzngnHFssVq24cGPaV3qb+rfmFEqfd+xOEtakJUVBS6d++OyMhI1K9fH5s2bUJoaCgWLlxI237s2LFITEw0/nvy5ImbJbaN5U02RM2PstC8cHPWfeY2nIt2xdqhe5nuPEgkPMubLze+tlTexMQfTf5A8RzFAWSE6y5utphRLg+VTIXaeWsDcK78Qr4AYYsdih3T366pf5cuKdLlsXuX6+3yGPbg3WdEYL2gV9leKBxUGCqZc7l1iGLj/fDqYxMSEgKZTIYXL16YHX/x4gVjHxqFQoFKlSrh/v37tOdVKhVUKv6TRzmDuxxC2UYzaXR6NCzYEA0LiiO8mw+q5K6C2Q1nQ6vXCl6xmCm+Cl/UzFOTcfsFTReAoihWpvTFzRbjbNxZtC/W3hkRRQ9XPjam32mX0l2w//F+4/srT96hYoFgu/3tJVHsUbaHy/LZg++tFVcVg97lemPlta3QvqsCVchh1v2D1cHY1m4bDJQBh58cRrmQcqz6e1skIMEaXi02SqUSVapUwYEDB4zHDAYDDhw4gKgoZqsKvV6Pa9euIU8ez6vlYVl80SCQw16gMtDsvVafPfLENC7Y2ClrlifB9iFWM09NDKs8zG21qLyBiqEVYdD5Qf8hw3Lz6r3GYZ/oiGh8X/N7q+P/tPrHKZ8qrlEZrO+ndfLVsdsnzCcMAFA1d1WX5g71DUXKvXFIf+Xab1MqkaJRwUYI8w1j1H5I5BA0LtjYaOkkeC+8b0WNGDECixcvxooVK3Dr1i0MHDgQKSkpxiip7t27Y+zYscb2P/74I/bu3YuHDx/i4sWL6NatGx49eoS+ffvyLSrvCKVOtCnaRqCZCQTPRylTIuXeOKTGZjgNn41947CPRCJBp5KdcK3HNeOxkVVHMq40zzc5dU2sjjnaAl3ZciUGVByAqfWmciBBhkJeJiDDarym5RoOxrRP/4r9MbvhbMikzGrHETwX3pdtnTt3xqtXrzBhwgTEx8cjMjISu3fvNjoUP378GFJpln719u1b9OvXD/Hx8ciRIweqVKmCkydPokyZMnyLyjnuNNA0yN8Ah58eps33QvaUCQRXyXoYLjr6EONalhZQFteRmNz6t7TdgmfJz1Aml/17bD7/fBgcyW0ARFByd1zqNhtymejcPQkejFvs0UOGDMGQIUNozx0+fNjs/axZszBr1iw3SOVdTK4zGRvvbaQtgOmupGV8MbvhbOPrSjkb49KbA7YbEwgccPXpO87GqhhaEVdeXUGziGacjckUJtGNRYOLomhwUTdIY83emy8w7+B9fNO0hCDzE7wToia7ET4VjCBVEHqX602b6C3ubdZq05AegjWnPSuTc+OCjY2vc37oLKAkBG9F89Lc3+NdqpazsVc0X4HTXU67NQnjoU6HMChyEHZ9ustGC/H4WK07J55IVoJ3QBSbbMCbF+WNr3WpRTFl122cjXHsJyAGglXBZu8lXnbJfkh3LacHwRxnFw/pCfU5liQLmVTm9tIlIT4hGFhxoM0IIJW+KLSJFaF51YjxmJcev8X/1l/By/dpXIkJACD58Qhc411PCZEhlg2gY3dNsjFTGX/yTgttZ372JHQp7q+xxCWlJ+zGrmtxQotB8HA/tBI52G3lXHz0DmnPv0D6a+bbY+3/OIkNF55i9IarbMWzi9SNmo2nb8sTmEEUGzciWOpuvR/SXrREWnxrgFIKI4OTWOYlkcE8ZxGl5b+SM9+M5PhBQcg+SCg11rRcg1UtVjk9xuOEVGj1zMscPHyd4vRcdLjrtrj7ehwif9yHY/fsl5UheD5EsXEjQq4JtW/qQfvWfp4KsWAvz4dUKsGHp1m1YtLf1nCHSAQPwV1FMMVEhdAKLhXlrPfbIXRbcoZDidjhLsVmwOqLSPygxZd/nXXPhATBIIoNQXT0q9DP7nldsmlWV3IJE7hFl1zcYZsUjc6rtjXOCOhzR9JRELhGPK7xXojljc97boP8Yq9aeeIHrcdtpzkiWSNcpWVCFu9vTYFU9QIGjeNMtmV/2IO8QWqcHNvYYVu+0RsodF96BoVD/DC5XXnHHVzkbUo6p+NJiV5D4Biy3HUjaVoSAcOWhgXM61ntufHCRksCAS7mY5HAoAkH09vi80Ruo4Oc5cKjtzhxPwGrTz92aZxYhr4zSWncKuKxCam4//I9p2MSsjdEseEZXUrWjfa/K88ElMRz8FNmhcaOrjZaQEkInsI/rf7B15W/RqeSndw6r94gvB1WZ2Du+GuPZ+8+cDKOMzSZeRS34pIcNyQQGEAUG57RfyhgfJ2azs0NiCvE6iOQU50To6tOxIiKP6LjHxfx1/EYqzYGbRAoSgaDJjeuP0u0Oq/TG0RvIdO89t7q6u6mbEhZ9C3fFwqpgrMxEz9ocfTuKzx8lWyzDZcZioXmfRp3SQmdocWcY1h3zjWrE4EAEB8bXhGn2pDFo4RURIS4N3EYE7R6A8avykws9h4/bb+JPnUKm7VJuT8KkBgASoGNF5+iXL4gs/P1fzuM+KQ03JgUDbVCnEXv0l9FCy0CwQ4VJ+0VWgRGHLj1kpNxZu+/h+blrKt+u5PRG6+hc7WCgspA8HyIxYZH3qSkA4aszJ8KiY+A0lijF6nFJpnRHr4MoDJW50fuWOelePbuA/QGCnfiyd49gT8uPn4ntAj4+2yWlcOVrbHXyRouxOGdF0lpaDnnGNaeIdYdAj1EseGRf889QfqbWgAAfVoevE+lT29OcI1XHnJDJngf/115LrQIZuy9ES+0CLwzdddt3IxLwrjN14QWhSBSiGLDN5QS729NRWrM17j3glgPmMA2Ydd7Bhaepcdj0GzWEc7r3LCB0qscN2LBzedJaPf7CSw88kC0/lIE/jH9uSS55CfjGXHXqekkPQLBPkSx4RGxP2rEehtzNdDkOU10x4/bb+Lui2T0Xn7OtcFdQJ9WwHEjFrT/4wQuP3mHKbtuY+c1Ziv1FI0O03bfpnW4zq54Ux4hV/RbsTvbZ3LvpW1nbgIBIIoNr4h9EX33hThvEP+ef+JS/0t2/B6uP0sS7EGmfVuN0/E0uqwou1MPXzPqM3PfXfxx+AFazzvOqSyezLhNzm9pGAT6kVOGjCSVSl0EUkwqxK8967zfiVgUPEcWmVRN1uc98zBB8Ggugvggik02ZsDqC1bHtCIIk77HocL1hiZLaqcFwlQ216U4TtXvLExrGJJcIdYkpDjvo3X1qTCWr5SYoUhPqAfN886ikIdL6H6zppj61HVedBod/jzJt0gED4MoNm5E5AYcUBSF2lMPouKkvUjXiSvnjrMM+/uS1bGbQj3cDb4waDPC0tPi21idfpGUhrsM/bA+pJsrnykiWW0T3AOVHgrNy5Z4n0ofabnjahwixuzAqA1X3CwZ/1hGfonV8kwQDqLY8IhlpWGxO3gaKODlew00OgMev2GWXt1dOPvdvRfZAz/l/hgk3xsN7dvaVudq/HIAzWYdxe+H7jscx5UtB4I5fPwsb8cn4dSDBO4HZsjgtRcBAP+ef2rlc2YQQbZkAoFPiGKTzfGWGi1nHtp+iIhLoZSA0uWw2+K3PXccjvLBwg+BbSQZIYuTLiogdNdX89nH8MXi03jyJtWlsZ3B0tr6NtV8a2eNyJViCbmYCS5CFBseefBKXFYPOprMPGp8feCW5xaYfGTnAXLoDjeZWcXM+di3jNp56zPjQ7oejxPcr0TQYbpV0mruMbfPf89isbLunLkz/vdbrrMeU6Nzn98dHwsRplu8BO+AKDY8kvjBs7z1v1pl7UwsBFw/fHsvP8+47Yd0vcgsPBkka3TYcOEp3qXSO1YKWcBQDDSddQT1fjuEK0/eCS0Kjt3LyoTNdSVsJlgqeFx8J7fiPFsxeEBCxLMVRLHhEcsHJJ8m1nSdAb/svIUT95mF/ZryKEH8liWHcKCLPH2bitITdqPmlANYcOQBdExDjdzAmI1X8b/1V9B3BXMljQ6tTnxKGxc8fZuh2O28Fuf2uT9YRBEK7Xhv7y/srFXL0mGXz0WbM/dJR4uR407cFwmeC1Fs3MiFR8y2C5xh1elHWHT0IbouOcO6b6eFwoQ/s2HpiVi753UcOERmmuxfJGkwdddtsxo8QrP9asYD+/zHa8hZp+izsW84k4mQwZrT4rlOANitj9Z7hXMJKi3z9YgtwePhu9b14kzRimiRQuAfotjwyLF7/K8SEj9oseNqnEvlGl4kib/W0k/bb9o976Pk/lK+LdICmvV/O4Rnb9lvPYnFB8Xb+HnnLbP3lrXLlhx76E5xrPLyvDOxrtx3sCVjy9qUmGpuoRGbr9ZTB78HEe4uE3hELrQABNeo++tBq3381HQdfJWu/mlFducCc6dCyzB7OtJ1BijlWcrQ1afvHCYGEwuPElLxyAkl5cFr84eaVm+AQkbWNlwzebu5ojN5xy3kDfZBy/J53DK/1ELrYHOtaPXmv4tMftphf2EhdkiEe/aC3NU8HDrnRG/9EV+0UyqBLabKz6XHb/HJ/BNYc0ZcWwp802+la/46XKDR6XHywWu3Rt3wxZM3qfjvynMrnxsAGLTmotvksFRsMmESem7r1mGlHInsHuNoGSbGgAACfxDFhmAD8d0I+Ir8ccbh2hs4fMe+X4I7+H7LdXRZfMapEGQ6hCwHUnfaIQylyXSdibty2tD52KRodKg77ZDDvmJQAA46kXbiZVKa3fPOfKqEZI1XKNzZEaLYeCHHOfLtefk+DQuPPEBCsnt9cN7a2BKaauHLYAsJg220NK1jZ8J3qe4L11981NwPQwwPGHfw7/mnZv+7yopTjzgZhw/cVcfpFE2yyrI/7OF2Ehd2qh3VK9t86RnrMecevG/Xf4jt7yku8QOqTN6PkuN3s5aFIDxEsfFgLOsFZUJX3JItZ2PeovrPBzBl121jenZ3ceA2fUI9LkNM/zVJWmYrvHSHG0OHLR1Qd1+P53wONs8iywfBh3Q9jt59JXgoM4Ff6LbRuMbVlAW2aDLziM3rk257/mVSGiZuu0GrEM094LisCUG8EMXGg1l01Ha0xSEbygFTxm2+Znx9+qE4QoSZ5rdg4jyclJahJN14noh/zz9x0Nr9DFxzETefc1usk+n3dz72Dar9vB//XXluPDZozQV0X3oWU3fd5lQmb+HGc2bWmL/PPhYk1w5Thq61vZVmhgsGRUfV1F3xpbOVZdwyXB0Ahvx9CctPxuKT+cetzlXMH+S0DAThIYqNB3Pyge0tp17LnctXIWaS7eRuiWFZvmLewYwVWau5x+1GjQiZ/6KlE+n4H75KRoc/T7qk2PZefg6vk9PN/EUOffTHWX1avFs9QsLEwvb0bSrGbrqGQWsu4myMOBYLlkQWDGbUjm3eKNPCm3zuslqWj7A3Z2ZG5lQay7dUKr6oUAJziGLjZiwzeDKBoii8em+9yjnj4ObIxllRSKdLLph7kB/T8UEXLV/uZtg/l3Dh0VuXFFtnrtHszkI71tNMTNMJdFp4Chcf85ew01lUchnnY+67+QLlJu4xKn9CXF10VlzTI3T3V4LnQhQbN0NnEnVErakHUe3n/fiHZSZcNtE+fx5+wFYs0cLEMZgpQlRndoU3yZ6Ri4dP1guwtcjEshfz2tyq+OkfJ0UXdXOXYVJKNnexfivPIzVdn+X7J4BmY6D585j6kVX7eb8bpSHwDVFsPIC4xIxQxjGbrjlo6TxzDtyze/7BK88pIrfgCHdK2uQdzCKxxAIXzwy9HeVbR/eEEBkjN1x1u9WJyXpl703rMOb3NHmoYl6nYMquW4JYEXbfYOa0ftRBCQN7MPGBcxZbG0h0C0qtnmHCz2wSoehNEMXGi+Ey7fn2K+J1eLTEUdr4TM57Yd0kZyyCltizeHnKLlU9BjlbxADdT7Tt/ONYeOQhRvx72d3iMOav4zE46cAifOHRG1o/QCH0BNZTmnT4/RCJkPI0iGIjcuw5CDuCST4XpvC5yhKKL/86K7QInDJw9QW7db/orobY1+wru3tCQUG+kjlyDV2kWmY28Ys8Fs3lgi5Lztjc7k7R6NDhz1Posph9UV4+cMXqMn3vXVxzUw4iV0lI1qD57KP463iM0KIIClFsRI6tG8OqU7GOO3NoseFSSRILaSLzb3CVXU7kvnEmR1FvO47JP++4iZl777AeM7ti+quiKMrM58YTlhK2ynLQbbFlwufnsmWlttRrZu27y2rco/eEz9LNhJn77uJ2/HuHRYO9HVIE00P5fusNt853/hH9ts3Ru69AAahfItSt8nCBp2ydb7zATVZeOuIT7aeip+PYvde48uQdyuYNhNykiOaTN6lYfCxjpTikUXHaYooEczK3DimKQsPph/HIw5zVnfFl4mK71NU5HfkUeiIURWW7ene2IHceAbnxPBHP7ZjM6R46bG4kXNpYjt17DZ3JFsTQvy/hk/nH0X3pWfRYehYpdnLMEFzj2/VXOBmHbjXrrB9W299P4LvN5vWdlhzLCnkmIePMyCyOOWPvXcQmpHqMsp2JxolM1Hx+RlvZyV29HJ35nVx/logR6y67bVv0F4vs5Ux9Db0Roti4mcwf9ZM3qWg19zhqTT2I3detHXOfvk1FzSkHrI4XHbeT8Vx/n33Mafpy02iZ/648N6t9Q5fkylsYy2M0mtC8Tk7H1afv8Puh+2Z5VpiwziKs2rTGz7pzwq8cDR6gXJ2JeYP2f5zAfBoHVW/+TTmLo2v0XCy9XxKTK8G0xh4XPoWt5x3HpkvPMNSJ7d4UjQ73XzILvc8k01qayad/nGA9r7dAFBuBuP4sSykYsPqiVYI8e6nr99GEjdJx8fE77HeiUq4tkj7YtsqwfSh6En+zzB/kaXwy/wR+23MHX//DMJ0+Ayb+5749/kQbxUqLjNuJiDE7zOqCiZFLLpQQyG44m1eKifNwt7/4cXS++PgdLtjYyrdFvWmH0GTmUZcyVCfZ8XPydohiIxCWuUJ0ZinHKWy/aju82pbDHt/YS2LVc5l3RRhlR465WBVeKPuIvbw7ADBq41U3SeKdvEsVz6Ll4WvntleE8OsxpcOfpxi3vfDoDRI+LhTFXFdMzBDFhidshWlnmjjt1TbawKOzqKvYWtXHOeGESuCXRwnm1xjvkW0CPTuYfCri8+McyRodIn/cZ7cN3ZYflzm0TPltt3MRd67qNe6MCjVVgpafjHVpLJ0HpGbgA6LY8MQRB5k5Z1iEG14wyVkxcoN4V5hbLz933IggCk49SOB9jg8i8ANh8hC9Hc+sUro/UiFH9jXhW3KHQYmFp2/dlzPouZMLKKaKzfITMaAoyqq9uyw+d1+w86vJxJYCk+aEc7c3QBQbnlh4hL4oni2v/R5Lz3pM6m5PkTO7I+Vr2WxC96VZfglCXRVMVtO2qj6bkgNJuK7ui/vq7lyI5RUweaA/TxR/MkSmisnE/27ShkxvY7igm73/LnovP0eraFz+WE3cFjefJ6HZrKOM5rEkPolYzE0hio2bWXYi1ua5laceuU8QF7jj5KqCK3rI9iBW3QWDZFsFlUP0WDzv+dBzbEWhAKCN9rOH0yUEGHwuJh+9iSwreiUHmFl4vBGzJIEM9IEpdgIdxEICi+CG8VuuWx0zvefFJX7A1afvrNrcfJ6E2fvv4eDtlzh0x9pi3+53+1FKtrI4M9lOEoPlVEwQxYYH3qdlWWUkMKCY5CkkyLg47V2kP2y7IUjhO7bM2S9scqtJihUAgFGKdW6bUygrlStREZYPc0fbo3SwCZlOtshlNGC1/TBXy0jATRef2WhpHyYKG5OK78Nkm42v80tcc6T2ZBYeeYhbcUn4bvM1vGBgCbjiwBJBR1PpeYyQ/ws+7HyW1xXAPqeLPamiphzEJ/NP4Ni9V8bfx6XHb9Fy7jFjm3QOt4BM0yjYwpZyqWdY6NPbIIoNx1AUhfIT9xrfj5Kvw37VKIyR//3xvP3+9iKPxIIzqfu5IgzC1M8pPHanIHlR7CVwdITltufq0+wtgid59NP5kaO070ysMZY5dyxRQ4MC0izF70uZtcNsKN7BMwoduMbVp+/QYs4xrDnzGN+su8zp2JkRVouVMzFMvgVNpOxzvDjC1eg+pnz511kM+xhMsYTH2kxbLjtWbC4+pr8vVvxxL+1xb4coNhxjmUBvoPw/AEB/+Q4Awt0WA5CKWHUXo4LlqfhIzC1a4eDfQTYTV3MCjZH/jVh1F1SRMI/scMVpcfKOW44bOSAlnT9H2i0MVqJMoCskyZaZij/N3ueWmD8oOssO4Zx6EGLVXV2eiwsKSeLxlew/+MA534oCkhfYrRyNDlJrn479t14aX+s4VuZvPDff4ssjof/90m31uBOmP7vMtBw7LNJzOFODzRYn7ju+x72zkcspu0IUGw4xGCiL1N3iWd1dU/cFAAyQ/wcpvMdT/rR6qPE1nQmaS2w5fjNlwEcld6NqEuM+7jYSObvlZsuaFUfjWJpppqfLrOvM/EzVGntjt5TZz8P0q2Kx8XV5CX1ggDs5qPwW4xR/Y7T8H6f6/yhfjlLSJ5ihXMCxZBncjGPmo1RbegMyWF8Hz9857wzLRWi/o9xIpnz7L/OSJ2cecrsQe5yQitn77Rf0jBizAw9fZa/yCkSx4ZB7Fvu4EgvFRga9ILVgmkgvmL1/qO6GttLj7heEAypKHtg8V2HSXo9Iow8AfviAYpKn+FG+DLlh24+Gy8/D5Nqja8Mka+q5WPo2UVMOmr3/dfdtlBi/yyzztikvnfAxY2qwMbVEOKKY1LY16T/VeMbj8IVMkvGHqiZ1Lq9LoCQrg28pyWOo4Ni5Nkp6A0sV05APjn21jtA4z9LRXHYOA2XbrI6bOjADGSVmFh6x/ds3ZcDqC7THA5GCttLj8GVg5WJTEmTjReZ5xzovOm3zHJPreP35J5i47YZRSW/7+3HMZuDz2GjGEcYyegNEseEQy20DuuuUAsW7ZcGSJcoZVsfmKP9wqwzsoQBQyIEkMwtTO5ntyIJ0nQFag6k1ilslh8vR1EjHftUodJfvwyLlTJvtbjPII8KUDwyuO8vPSFGOs6Z+SNfbvWFHjNmBiDE7sOpULP48nPFwmraH/oHMJCzbEqbJ09hk7M73cYuEzpoAALHqLohVd2E8Hl9kfHIKv8oXYZBsC+N+VaRZD8PdqjG4o+7psM/fyp/RSHYZc5S/22xjMFCYsusWq8Ry/1OsR0vp6Y8KfsYV+PU/l83atJ1/wuXoq6vqfpij/AM31b0dtr3+zP1RcfYit/bffIGHr5IxcsNVLD8Zi38/+oy9JVtQtBDFhkcUFom+WknPgKKs92OFgskqTRgobFF+j1h1V1xSD8BKxRTjmUayy1at60mtTcEblBOxTznKqBQF4z1qSG7BFfWEy+y1fypnG19XlNre2lh6wrZTohw61JTeRCGJfWduiqKw50ZWG3sJ6Cy3a14nO7ag7L3JzJn8+603jK9tqSJMlC93EavuggfqL+0qMBES9/yWXydrbNZjay89js7ywxil+NcYfekMXWTWRXfpqCq1vfXReOYRmzm8ANt/9z+Uc3FGPcTMh2nAqgs4G/MG5X7YY/bQD0QK/lOOYywvl/CRwVqrN+D+y/dGpZ+OvivPm1ldRm9kX5j3wqO3SErLHooQUWw4xNKU2El22Ox9K9lpLD8Zi8dOFnLjmq/lm6yONZJeRGsp87omfFBWEotIk4d9HdkNO62Blcpfzd6H4h2qSu+iuPQZPpcdAgAcV32NdaqfEKvuysiUTsd/V7jLulydZguhkCQeLaRnEAjb5TayoHBf3R3/KCfjiGqEXYfkwmN3ov+qDPN8WUks7qp6fAy1pRvVHLqcHpYc5zAKJc6FKDAmRNMkQMvpQs6ancpxtMfV0CAY3Fjb0rR6VJ28H5V/2mf1YI2QxGOWMsvxOfM3XVN6E7HqLugp2814nl8Uf7ksa8xrB9cuI+NaxmfcfSMenRaeskojsEQ5HeWlsfhF8Rdi1V3QQmpdvDKVJ6f3p2+5v3d/tfI8msx0LjEfGzr8eRIVJu7Fudg3eJwgjmcQXxDFhkfCJO9o3885wEceGAqNpReQF8wfMoPk23BH1cNoWfpGvgFLldMxXznvY2irMIySW+en8YP9B17m55ZAgurSLJP1L4q/sFrxM/wlWfvqJ9Rf2xiFwmeyw6gvvYIG0suwfMw/4vlmcEQ1An8q5+Cqup/VuVxIxDDZJuT5GAXWSGpes2u+ch6jOX5W/AWphMIw+Rba87GOHkw0OMqoyoZAH4VT/VpKT2O6YoFDK+SdF++RYvGgrC11rLzZwleigeV1IoUBt9W9cFndH/+juZbZ8PzdB7OSBVqLPFi+FlGCw+WboEI6/lFOBgBMVKxEWUms8bwEBqe20FyxBLHFUQ4hy0XBn8o5Vm3KTNhjtuVvaT0XE3TJ/PjkswWnUO+3Q2b51rwNothwSLJFmfihFg+PStL7nM85TzH3435/V/ylnIGT6mGsop5UEi3ufUwhb2rBCZSkAKAQAP41+7cp6WYm0voy61pZN9R9bPo7AEAjWcaDnq74KL3FJ+thJIMev8gXI1bdFb8pFmGF8lcsV05Dc+k5sx7PeLQmOHogX1APxAjFBpxSD0Wsugv6y7ebnc8joXfeNU3KF4BUREqzzN102yh9nagcb+k0z4YcSMIE+Uqj1aRKoRxOjfOHci46yo6iG03+GUssk87JXHxob1NmOBNHS8/hiHI4Bpg4ww6Rb0Wsugu2KJ1zON586Rk2mTin0kWSWWK5RbNDNQ5KaPGnYhZi1N3s9rWlwPSS7TF7X5jHLThnojarS25Z3R8O3s5yFv9avpH1mOUkD5ELWU7u3lZKxpsLFxPFhkPmHuRecQEyHrxlJLGwXBkWlzxFG5m10+ZDdTeMYhkGanlDO6AaiVh1V1xT98UE+UpGYzizAviQrkeln/ahwsS9oCjKbhTQA/WXNs/pP17KPZedY+RFU1easUfdXHoWN1W90EV+yKrNAuVsuCtk39J5846qu92w4hpSekfKQKRgmGyT0e9m17WsB1Blqbml8LDqW6v+jxJSUUTyHKsUv6CaJGuOutKr2K4cZ7b6d4XMqKhL6gHoLd+Ni+oBAFyvwvy9Yo3DNqssEhVWlDKLtrFFBWkMVEjHQuUsFJK+xCiF9TZfpPQhykpiMEb+N+Yr5qCz7JDZQ9MW+XP4YPGxrOuAiVNuH/kuq2N31T3QQnaOprU5RWwoLBMUq8zeh1so0m/tOL76IA3R0rPGaCRTnzk6hppkgGbKv6qf8ED9pc3vlC5fTqTkPs6pBmCDcqKVRaeC5AG2q8bjgnqg8ZiHBFwy5hbDkHxPhCg2HPKCgQbMdjWSF6/xQP0ldqrGYbEiK3omB5KwTzXKZr9B8m2oKGGuaNlbyfWW77bpLyCBwagUmVYoZ8oTiz3rK04m5hpkskouJ4112H6VcioKS+KwQDkbKoltM7VlQja2qdmdRSXRmYQVM7uj7lCOxVV1P4xQbMAR1QgA5pF6dKP401jkDqr+h7qy61iv+tF4bJVyKspJY7FDNe7jw4NepghJHPJLHJvWbUWApDOoi+OIWHUX/Cy37S9iupIHgJwS131hmEQV7VB9hwHy/9Badga/Khbjgnqg2QNVDh2CkIwG0svYpRyNspJY+Chk0JqkxS8c4utwHlfKQRxQjWTU7m/lz7im6mO0NNKF6Xf/WNPtlro3FipnG5Mg1pPZd3z9TO68v8l4xWrja9PcLSqYL7ryS15hi2oCQiVJqCq9a7RaZ1LBxMdvtPxvSGDwCIuNL9IwTb6Q0QLEMvLMmyCKDYcwyUOQqdhIYMAdVXfEqrugh4WZF8hQaGLVXXBSPcx4rKnsAmLVXVBdcguXPq5w7TFQ/h92KUcz/wB2uKzujyhp1pbODMUfiFV3wW1VT8SouyEAqei5zPGK0BJLK4+zmXZN0+EPklvnxaDjEI3Fgg7T7KyPEtj7oLjKUsVvjNqVlVqXTDBdZVI0npvX1X2xRfk9/lH+xEghATK2xS6oBqCZxVZdZcldHFZ9i+Oqr1FV4lxoro6j2jZd5Qfwj/InxKq7IFpqnnzP0leqldR2qDrfDJNvwjnVANxU9cJ9dXdcUX+F5cppKC19gqXKaThjUSssxF/ldhlt5X0JkHwwKnSWARFBSMaPH2u6ZdKcgcXIMfavj/omEZLT92ZFbwUrzBXm4yprP7uikqzcRRpk+XoNlP+HNtLTrC02TaQXcEo1BCsVU8x+D/MP3sPvh+7bdHCWwIA5ivlYoviNlW9QV9l+3FT3Rif5EexQjWMUBNJ3xXkrvy1vgCg2PCCHDt/I19Oey7xQY9TdjJaCSYoViFV3MT5Y1ih+NlNoLPlX9RMjOZrLzqG0lH1eEFv8rfwZseouUCEdHWQZCf4yP8O/yozVPZuEcnoDZZYj5fDdV2CeR9Z9ZGRnzfhczviguApdiDsTQvHWwmJD/91GSh+gpvQW7c3eFrkk77FIOQvlTLbLliunGV9vUP3IWFHKpLLkrnM3WQ29xaWmNKOkxELlbITY2faRS4S7sQ+Vb0GoJMnKCRgAckve4S+LGkRMwu9dJcMCSyG/5BX88MFh3pecSLLKEaTmKZXERPkKu+dzSugtqkxSW2Rsv3dBAckLq+SqdaTXjDWb8uI1akuvQQ3zv0WU9Aa+l6/CWPkaABSWKGcgj+QN6smuYYPqx48BCRkK12977mD6HvOweTl0+E2+ADHqbmgrO4kmsktWliRbSGDAz4qlZsfmK+c53CHYf+sFin9nvXXp6ciFFsCbSNPqHUYcfCE7hKX6FrTn2DxYhITO7F5ampGpc9uV53iTko7YhBS8TNJgaONiCFAp8ORtKl6912D4usuQSSWoUyzEqtp0r2XnsHFglNNyNZBewmFDJaf72yNW3RWl05biA6VGxJgd+G9IHZTPH8TLXKaUlDDPgGrJeuWPaHBxlvF9Zug7G0LxFumwHam0XTUeEWlr4Y9Us2y2QMb1PEvbAZsMdSAFhedUCEpInmCgfBtGaAehrUWyxU2qiYjYVgI/bLuBVX2qo06xEGy6+AwlwwNQLp+979qxMn1ePRB1NHPwlAp12FbMJCTzn3tqmGwzckqS0EPu2BEbAC6qByAiba3xfQ4kIUhCb9n8euUJnGOwdglEMpLgh4yFDgVfaFBZeg895Y6LOlaX3MJZqrTZsWoG5mUPjqm+wThtH7NjneRHELHhKo6MbGC16PxAKeEjMf+7ZNYGNGW5chpqp83BM2Rcg9uuPMeENmWM59tKT9Juw81WzMdYbV+kQYkCklcoKXmCfYaqADK2fv8nX4/WNL6WQIa/JQA01vyGB1Q+m5/5yZtUFMjpeJvTU3CLYvP777/jt99+Q3x8PCpWrIh58+ahevXqNtuvX78e33//PWJjY1G8eHH8+uuvaNmypTtEdciLpDScfpiA5+/S8OvuDPNi87Lh6Fy9AHRvHgEOLMUTFKvwheyg/UYezHCLasC7b1gnb9MbKDOlJgSJ0EGKdwjA+vNPUcXJuZcrf0PJtOVO9nbMBPkqjNVlhGK3mX8ckQWCsXlQLU6KMNpij2qM030jpC8ggQErFL869GvIxDJdwDn1YDwx2FcGykoyfG/o+EaxEd/AOiKlmOQ5StFYEwfJtuAPfTt8+Zf59lGVQjmwYUCU1Xd9/N5rhKh0KGVXwo9tPy4cItLWYObeO1ArZSidJxANGfQVjgylLdPXK2LHWugNFPrzOOM3CvYRREBGaPoe5SiUlNouMVBcextQOh7rqvorAMB4bS9MVixjJce/qp9wzRCBNum/4PKTd4gsEMyqP2A7p49Obx2VZqnU2OOE+mujEvg6WQOKysga3Vl+2GafdrKTaCc7aXZslrYD4hMb0wYA0GHpO7VY1xI/67L8KutOy1j0+CplCPZR4HliGo6ObIiCuXyh0emhksug1Rtw6kECqhfOCbVCxmheoZBQPHtErVu3Dt27d8eCBQtQo0YNzJ49G+vXr8edO3cQFhZm1f7kyZOoV68epkyZgtatW2Pt2rX49ddfcfHiRZQrV87hfElJSQgKCkJiYiICAwM5/SwRY3ZABr0xAkcKCgrojPuxI+TrbeYHyS70TB8FBXRIgxL5JK9xy1AQGihRWBKHRPihiCQOOfAe+SSvoZDo0UF2zNi3atqf+AAlbqj72JlBWF5TgTikj8QUXRckwRc6k7VB87LhGNOiFBpMPwwA+LtfTXyxOGMl1alqfky7Xs/t8s7XtcUQ+Va3z+sqhdNWIxSJCJCkQgcZHlNhoEx2zqsUyvHRWZ2CGum4re7FeOx4Kgdqan5HhtIgwWVVPwTbsDCIgXgqB8I/VhvvmT4Khw2RoijnYMpaXUOM0/VzKNc+fWU0lXFX+doeC3RtMFX3BQBw8n1Fa6biA1Q4qvrG5bEAoEbafLxATqdli0hby8nn2quvgim6LnhMhUEPqTH9gR72lZfYqa1cntsSrp7fvCs2NWrUQLVq1TB//nwAgMFgQIECBTB06FCMGWO9Gu3cuTNSUlKwfXtWno6aNWsiMjISCxZYV6LVaDTQaLL2OpOSklCgQAHOFZtFa9fhq7tfcTYegUAgEAieyj+IxucT6TOYOwtXig2vzsPp6em4cOECmjRpkjWhVIomTZrg1Cl6j+1Tp06ZtQeA6Ohom+2nTJmCoKAg478CBQpw9wFMyJtsP60/gUAgEAjZhc9hHc0rFnhVbF6/fg29Xo/cuXObHc+dOzfi4+kL58XHx7NqP3bsWCQmJhr/PXnCXRSQKS36TuJlXAKBQCAQPI1Omu+FFsEmHh8VpVKpoFLxn9tBJpWgQ+5ddpPQtZGexDzlfN5lESunDaXxefr3UCEdGhsegplhlxGSeNyhCkKFdLSSnkYK1NhvqILPZEcwVbHEnWKzoo5mDl5RQZCAQtpHT/EmpXMjKU2LpT2rQS2X4tqzRASo5SgWFgC9gcLlJ++g0elRa1VRt8vbQDODsYOhWCiVtsz43UphgAESABL82z8KpfIE4OqTRPgopcZUATmQxCivUya90/+Hg4bKqFciFEfvvsJKxRTGztXu5hPNT6gjvY5RioyaU8XTVkILueh8bCLS1gCQOJTrm/SBZkU7+aSeZhYeUxmL5FWKX1BX5nxNMAA4qS+D73W9GCcxtMcNQyG0Sv8FTL4zWzQK2IqD79u6LEeX9O+QBF8zHzY6QgNUePUxEeOwxsXxb9MSLs3NJ7wqNiEhIZDJZHjx4oXZ8RcvXiA8PJy2T3h4OKv27mTjwFp4nazB8XuvcSsuCaceJqBE7gCMaVEKIf4qFBsj3kJr7uDz9AwN3pZSY3ruDlXQ+H6TIcup9k6etsBr5xWbwmmrHdbDcQXTcOHNg2qhUkHr2kamx2RSCaoUyiFY1tJYKg8i0tYiHAk4rR7qsP2P2i+t0uf/pWtBm6Y/k46aCdhgkqWYCZv1tdHeItwbgFnYMAAYPt5sP6mYF9UL5wQA1CkeAgC4/3MLJGt0eBoXB5iLbJPx2l44aKiMWz82h49SBr2BwtEffnXcUSCuUkWRg8rKzaKFPMNpcyK/857Ql0Vt2hprtpAwkmuzoS5mgbliU18z05hFmw1/6trgMZXb6OB6bcpvgAspgF5QwfhSOxYLPi8DbHF+HAAok7YUqVADyHDArTbmdzSQXcFvikUO+/6h+wSD5NuwUNcKB79twPo6OG0ojXOGktisr4NHVG4zB+HmZcPRrGxuNCoVhiAfBSQSCVI0Oiw8+hAty4ejZO4A6AwUFDLxp7/jVbFRKpWoUqUKDhw4gHbt2gHIcB4+cOAAhgwZQtsnKioKBw4cwPDhw43H9u3bh6go5/ObcEmIvwrtKuVDu0rWOQEqFAwBXtJ0MqFi2iIMlW9GXzsPCk/mwvgmqDJ5P/rXL4KxLUpDb6Dw8FUyioX54/GbVHy24JQx/XrxMH+zAoqFQ/zQpGxe4IhzcxdJW+1w1eEKVwxFjK/zBKlR3m5uFXOcDQlPoxRQS1yvwhuPXA7blE37CynwMVNstutrYoO+nl3F5jxVCuXTluCaui8jWXbrq+Eb7WDEGPJghGKD8fg+fWXj69iprXA+9g12XItDj6gIRIT4WY0jl0kR7KuENozZosdUafJRZtzQZVKJUXkSK0cMFfBN+kDcofjxH6Sjr/Zb1NNfxULlbFb9OmomoKnsAi4aSmChcpbjDjbIuBbVACSsrY5F0lZb/U0vq6ujvOaS0/I8MOSFHjJIVQFIptTwl2RlYz6qL29m8Wur+REdZMfQ3UYeoEylJvP+8Qo5sF7fwK5is0TXAot1rfACObBW3whPqVD0R4YF7yvZdoykqU+WyRlDKXRPH4NCkhe4R+WjvUf+3L4cutYoZHXcTyXHCBPLjEImvgSqdPD+ix4xYgQWL16MFStW4NatWxg4cCBSUlLQq1dGeGb37t0xduxYY/uvv/4au3fvxowZM3D79m1MnDgR58+ft6kIiYnPqhZAkbTVGJI+FJO09AUbE+GPn3Vdac/xwVF9ebfM86lmIgAgl78KsVNbYWyLjARZMqkExXMHQCKRoFAuP5z9rgn2j6iPMS1KYduQOjg7rrFxjPGtStMNzRi+H1B90jNM0DFTWuLE6EaQu2HlUlGz2KW97MImCsHQdPu/oRT4WB27Y8hvlYXVlK/TBwEA3sMXU7WfM5JpgDYjXHaHoYbZ8XeUv9n7qhE58UObsrRKjSmuphESu2IDSLDZUBc3qQiMal6S99kKp63GB6ixx1Cd8bW3tm/G3/I8VQpTdF1x1WQRYMlGfV2H42Vcixl/2Ayro+PippnQ/T0P+DMPTf5F+wU0lPma/zkyrIR5gnxQSbMIdTRzsFbXCIf1FdFdOxal0pbhF+0XaKSZjitUMUzQOU4/YLk4rqOZQ9vupqEQJuu+xAvkBCDBUyoMmd+NFnL8rm9nZenMpIlmGjqnT4AGStylCtAqNT+1LUur1HgyvPvYdO7cGa9evcKECRMQHx+PyMhI7N692+gg/PjxY0ilWV92rVq1sHbtWowfPx7jxo1D8eLFsWXLFkY5bMSAAVJsN2RYl35Q0NvHKUhx2VAUkU5WFV6la4Iv5fsdtntgyIPu2rGIlXG7H59E+RqzzLbS/IwbVGFW/YuF+aNYWMZDzEcpQ8yUlkj8oEWwrxI3n4u34uxrBOHsd40hkUhcfpgyRQOlVRZVNvSvVwRjNmWsJo8YKtpsd8ZAn+IumUbZAYAWmil4SQUjAVlWq7X6RhijoK8qv1FfBymUD7bpsyyvlg+gnRaKjrs4biiHprILgsztiDmfRyL2dSpm7c9Ivz+oQTHe5zR9+J2lSmOnvjpays7a6QHUKhZi9j7OjoXwGWXfenjNEEFzVIISaStwV93Dbl9TmpTOypOmZ/ioy1QQQiWJ6CffaTyuozK+E5lUAi3keEqFYpwuy0KZBhUW6ds4HL9a2h/G111rFDQ795QKxUNDOIpIzQNlvkx3LknnP7oGuE/ld9iuW03vUmoAN9WKGjJkCB49egSNRoMzZ86gRo2sG9jhw4exfPlys/afffYZ7ty5A41Gg+vXr4sm6zCXdEifSHu8Yprjfda/9Y0YzTFX156NSA65ZSiAFpopqKBZgo36OjiuL4ublOs/ColEgmDfDN8bLjxR3lis/LkkVIAihM7SNX2swyKY07WfISJtLTqnT6AdY62+MRIpa4vJMyrETKkBgCSYf+8900ciIm0tiqStxrfaQZig64XzVJYCFUOFI5lSm8iXwd5vuE9keEJf1vi6f31za8JqfRPL5qLgJ21XtI3MB5VCWIvSIO1wu+eHf7TaMeHIyAYOq8HN17WjPW6vtAcdpr5utuqkmbJXn5XzfLquk9m5xfoMiw+bBU2mtXyXvhoAoL1mEl4hGADwv2YlaLP3NkqfYXXsDQIYzVcmLatWVETaGozROc67NqRhMV4zpwuF2G2wXoseMkSkrUFHzQRs19dEtbTfEZG2FonwN/PloOMWVdDu+UwuUcW5EBUAEEflRMv0Kbj1UZH5VjsI3bTfma3u2kXmdXkeZ31sU6gsheMHbU9GfUx9OuyxXNfMqHCK+SZQNu0vfJH+HR4Y8mCWtgNOGMrDQFEol892oit720xAhsXoGUJxy2Du35FRx8eap1TWyv2WIeNaodsaKJAzY6uhnCbrZpxpUSoexk4xZfIX6aHNqnIfkctcdkcZVoVgYPrX+EvPfWZXPthiqM24baFcfg6vuQ926tIs1zVjPFePWhFZbySO/8ZzdB2MrzVQGq00APCAyociIX6sSvT20I5GubQlGKj9BhFpa83ux/3q2brHS7BDb15uiKnfYCrUKJe2BKXSloFpMeHm5YQPyuEDothwCPtHngTnqVIYoh2GV8haXfRNt+8ox/RCzwx15ILmmqkO5/VRur6zSTlps9n7sSjc7uF18ZDKw6hPP+23iEqbhw6aH+y2m69rj0TwZwXiihT44JShLBqnz8AcfcZNuk3FvFjaI2PFKKGp9Mv0mv1X38D4umqa7aiWCSZKZYZPAD0Kk+3niLS1iEhbg/GtyyF2aivOlMd5unYokbYCEWlrzUpffFrZdjFAMVA7bQ52mWzLCRRQx4gVuqZgehW9ZWhJPW6w7Rf4u85+ePM6XQPja5Wc3ePtBhVh9n6HoSaADB8XAFApZKwsNhSkSAZ9YUmV3LaitVWfpSj+Y/J5mJAMX2OqBEf0iCrkoLis50IUGxFiquRYslCXsYr7Q/eJu8QBAEYP9mGNXd//t3cTX6Kjr4oOAOlUhplaLmVzSUsQh1y4QNnPx/Aawvz4Lxhct7gF+SgQFpix3aOEdQE/R6vnTHboM27ydwz5rb6PoqFZFpCDhsqolTYXRdJW2x3PelYJjt17RdPSMbYUoRm6TrTbF/YeKkJwwVAcU7Wfo0jaakSkrTVWfxYzC3RtEJG2Bj/YcZKtkvYnBqQPN77vmJ6xgHB0zZla+AJU5osle/dGANhsqGN8bRqW7MyCaZy2D8Zp+xh9XGZ1rghnlq+WtK5gvvAa3qQ4wgKylJG9hqpYomuB4/qy+EHX0+X5bDGprWf4rToDUWw4xB27FHN1nwIAFuha223XQGO9V+ssK3VNGbXLE0TvaMqGkADbq43dH/eq6ci8oUkkwD0GDnN3DKZtJDYcFjOSfLmDg/pIq2O2fA3o6Jk+yu75EH8lUmhWclKJ9Q2/R3rGto3pNfYSOVAmbSmap0+1al8hf7DZ++cIcSrS6PAd5xQbVzFVzISge/oYLNB/wmt01l92FgWmxBrorbyJlLnlIYYKh6OHfAKCsNtQ/aNFbi0eUBmWMja3ybJ2tlHpuGDgLmlcCnywVt8YCQhCniA1SoVzU3vwJwuFYniTEjhjEh0KSDBZ9yW6ab+zygkmk7r+kAlUy7Gke1WXxxEzRLHhkPolzKuV6ynnL0JT73lTMiuJJ8Hf7n5zLMPtGAD43oFPymyTvWe++ayKbaXE1PHUktOGMgAAuVTCyMkwOn2a2fs26b9Ytdmur8Hpdp49VuijrY4dNVRg3P+wIdLu+d3D6+ED1DRnzBWbsS1K4YihIsqm/YWpuoxoukxn21SoabcjlU6EvfO9BrD0Ccpk/4j6VseUNBacc4YSDn3d6Nitr4YWmimM26dRCtowe1egU9L/ZGjhnaKjj6DsZ7E9bupoyxamVkIAyOlnO9knHdqPW46dq5r//SUmV9w36QNRPo1dEtCqERnbqlwsXoN9re9PTLdf9QbX9yWv/NAMTcq4574mFESx4ZDwIPMHx2gLr/SLhoytGiYPglcIRmPNb5it+xRF0lajhWYKmml+NfMVmKjriWnazi7LvUrfDGXT/qI9d0BfCW/AXZV0R9BFCgAZqeWZUDCn9Z729o/bKJk009Bnml2rM482u2Pj4cgHepqfoqlT6+D0YS6NH2Ijmktq8ZApnz9jm8n0Ydu2on2flCoR9rcH6PBVcbcdRPdIoIvyujC+iTHNgKP+f+jaolf6KJw22A+136KvZXw9XfsZBmi/MTrYO2K7viYaamYyasuUrfpaWKU3t7AaKAleIwitND8jjVLgU81ExFP0f7MDhkq0x+9aWEHfOrgn2FNImOoGRUP9MK4l81QHpvcwe/0MkOC9he/LE4P97T//j9crFwq5Kz5kziwiuJzfUyCKDY/oKfqvl+l19YDKh9m6jjBAiltUIdylyTz6h97amW6P3raZ8W9dQ7NkTloq4webAh90SR9n1rZ42kr00bpeF4UL3trx8bn+cYW6ond12h+t5VYa3fcIACv0zCMuuMZyHVYybbnZ+x2Gmsbtpgla5rk8HKG1SEQmpfn+HF2vvkoZ/JTsFJXZnSNZtWcLXdRWLpah+m8QiM/Tv8cNA72i0jv9fxiuHWLcapmvz0qvcNnguC7Yb7pOdvO9OMMsXQer0OYimozkdjeowiilWYGLdnzKdDbyvVgqAo6wX0LE9rmmmgxLatMyuXHg2wbIn4P5vJnK+NKeVRFkYRUxvYYzt/xMHXMHar9mNIfl4tXdrOpTHeGBaiz+uJWUPwc7a9/XjbmLlBUzRLFxI9cMhRGg5r/u6CA7P9JYyjy8b5auo/H1SYP53q9WoBqpdKvkJ1QYTcsMWn/cRqpfgn7VlQYl4qgMU/Iv2i9sjnOPym9Ukt5S/ljjpvwmdw35cNbkM182FKWtt3XYEImItLVYqY/GZC032auX6ZsbX9ctHkIbJeEoKqds3iDc+LG5/UYWFAtjlpuDCaYPradUCGqn0WdwZcMTk5pgaTR/i6Jpq3DQYDtdwElDWZvngAyl6BHFLNS2djFmyk/ZtL8QS+XBa8p1Z/cNA6JQKjzrb8RlSLzOxlhF01YZ/eN87SjK72jyKpkuBCoVsG9BzLSO/qDrCQMlwX1DXlynmG07+jKI/BzWiJ8kirWK5kKNIrlwelxjNP24lbSid3UHvTLY+009nB7bGMObEMWG4CL7Deb70Cv1zbBhQC3OnYwtV/eWNyG6MMs+6d/ib11D/KU3dyosk7YUA9KHo7CDqBY+sdyDf0UFgq0R+GuTpGFXqSJopJmONprJWKS37XRtgBRt0icjIm0NqmgWWG3B5eVhtTZZ2xWd0idACzkaa37DYl1L9E3/n8N+S/Ut8Pij+byjhj65Hh0/a7vgsqEIyqctQUTaWrOVeItyeeBvEYVSpZDjbabCDkoeuJOj+vKcRBWZZmz9WjvY+Hp4+iBUT/vd4YN+tc62UpxZiJMpFfIHY/vQOrgw3r6inWmxOOTA3woAdumtH4iH9FmZqatG5MTu4eaJEi993Er/3Y6/zsAGReGrlGF4E9tWoeW65nhiCMWcj4EQmZh+p7VNMhn/0dX8u6qh+R0XDcWMltjp2s/MFgJKG2HeMR8do0999MfTQIkimjVokj7dpqzOMKJZSdydzMxZmyn/fFUTS3pYW+KLhvrjwLfWfmOmNC2TGyVyByA8SJ0ttqEAN5RUyM5Ymm+1kKNkeADneSk0UKKhZgbmKOZjtNY622S39LHYofoOQFZ9ngOGKjhgsHYATIUauw3MVgHuopWJM2Yzza/Yqxptp3UGWw11sDUtK/TzA9S4xmBVlukca6BRpJqV5T6Z1RKTJGwPqHz4WcesMrkBUtRLZ2+ZWKxvjcU2lLtmZa0dCsvm5d6/KjOE95f25TFu8zUHrR1jvop2IpuUBFita4xu8gO0559SYaiUtgAaKI0FDB3xHCEokbYCvkjDdMWCjBT96d/iFYKcKtTKLt9I1nfQM51+K3mWrgN6yfcAAHqlj0Q5SQzm6T+lbZtJ5/TvUU4SgyuU7W220c1L4X/NSuKondD91whC3Y/X7gZ9XXSWHTZLjgcAHStnKZYty+dBoVy+eJSQUcZFAyU+Tc+oJk9Xk8lPRf9Ya5r+G3yQznpbzRlsKVfOUrOIbatd0VD7qTgWe3kEFB1EsREAPvJtxVB58En6z7TnblCF8ZO2G9Kg/FhATdxM03bGRtUkABnRFy9NclfcpQrglqEASkufAIDD5HoE5tCpBJYWHC74qV3Glqe97QY2mD5E7lPOJd8br+tjVGxe0WzlOHKWpSMdCqRDgb48+qlpKAV+0nXDEYsIOltFETNJgj8i0xYiDUqkQYVDoHcaNiUdCrv+OZmwCUl+QuXGdJ11AITUYoy6xUPwKOEx43Hp0EGO9zw88hqWDMX7NB3OP3rL+dgE5yBbURwz1GJ/1dTRM7N6cQsB0lj/pW/pNp8RV7lAlcThj2bx/2n7W51vkf4rNujr4Yi+Ai5yWDbC3Yyksa5Z0qai62UqmBLk41oyu0almCnNmWncubSKt9dMwkxtR6zUM8u5ZEqgOuNzR6StRYW0RaimsZ1ZWWxoIMdqfVM8cSItwTsE0GapdbS1wQSuNzzKu5ghl23YuCVJH3TG1zM7VTQr+/FdK+eL1HLFzmGOK6ZnJ4hiwzFDGxU32xNeqW+Glbqm+F7b0xip8UV1ZrWesjM9taMRkbbWqrBiJv/TDkAP7RinzPrO4uqDeLzW3Gy+3qRMgS2mf8Y8l40jHCWikzsRSqqQsf9SbIX0u8Ilqjjm6j+1GdVjj5z+WQ89W9eb2Nj6Mcx8gc5xRWm2ONraEIK2ka6VwRjd3HYOLCYcv//a+PrTyvmxtl9WColcfirjNm54oPN+eM1d2Oouw8OWsSdDFBuOUcqlaFneNDmeBBN0vbDKJJS4dB7vuwjZ1mXJjqzWN8ViXUukUipUsVNvyRSZRIL9I7ipdl0twnbtJls4UuZMq53bD/F1DNd+CYwRcS0mW3yrHYBWml/wp57f0ipTP7Vdt8lZRjRlnx3Y1fsLW4uNZRh1YwtrpGk+GalEgt61C2Phl1WwfVgdOMvvXZk7lDPl+9ZlOB/TEyBPI574u19Nm+foTP6eyrSOFdCyfDi2D3X+B+0plOFAIf1Z1w1lNMuQwKL+FJeh0c7gbGFStqg4SD6WXdBBjhtUBO8Wy8+dtC7bi75xxvDpajQPm+6hNGVdFBbXZpCvAgPqF8WA+kUR5KuAXCZFdNlwm4kwmWDLN6maEwkwMzEN2c9OEOdhnogqym3iLWf5s2tlDFxzkbfxS4UHoFNV92XoFZIOlR3XoMpufOrCd2KZRfW9RmejJYHgHuZ3qYSaRXKh3e8nHLYd08K17S2muGLJzOXvmm+Rp0KWSF5Oi/LMa0YR7GMZqeGWOTn0sO1btzBnYwWo5Fjasyq+Nkn4Fc6yCCpdeQMhMHCdf4GAciLz+ZAwtBO1rpDXJauLI5qUdm+NJq4Kd3oaRLHxQLpHMatFQ/B8uFSmQgO4SzA4r0slNCqV28xE349DxYng2dgrX+GsGnmQg2gtZ+FqfeGMs32+YG6LpGYHyFaUQKjkUmh0Bqf6Ms0TwSRrLIHgDHQ1fBxFO9UpFoJuNbN8NoidhMCGIgJGa3Fl1MvtRNTUt81KcjN5NoJYbATCmQs8E6ZmVXfsnDCVheCZuPrXLfIxxDyyQDBW962B5uVsb43yEYHDhGySZd5p+C5Y6g7E8jd2plaTs/UFTRcR2Q2i2AiEuyJN+OD02MZCi0DgkNNjG+PaRHaVzZk+KFb3qYFhjYtjUXfr8h2Wq2B2ZQO4w9Xkbd5Ou0qu5ZAhZMFHJm9bZNdQb4AoNoLR0s7KVczULR6CcB6KQRL4h04ZqZg/COFBagSo6VMQ2FJg6EzzdI7OeYN9MKJpCYTR+PdYKvevkjX0kxGyFXWLhzhuxBK2BhtLGbiy+DhKgjnpE+uq8Eyt4qbZh2//1JxV1nBvgyg2AmFrFbSDQYInocyqzcuGY2VvcRXIJNjmq3qOi342ZhClwdS/IHega9EkD14mu9TfWXpERQgyr7fAtSIi1N8jskCw8fX4VtxbO5iU0ulczfnUGVKTpzkf2b09CeI8LBB0yknMlJaiLiuvlEut5GNT8I7gXpj8ZRz9/aoUos9WTHeZWl4b0TTVwk3J5WeuCHEZ2m6JvRDe4rmzZxIzruC6nhkflwGT+2qJ3FnOybYqhDtD4RA/hPqr8EMba2uMJXQKiYgfCaKFWGwEokiIPwItnMKYKjVMdQmutXZT8XpEFUKLcuEonUd8DwVvLFnhFAyuE0dlEFxJNPmNg9T5oQEq/NYxqxaWzuBclCAzPM+njTzQ3Iu9LZ8iDuqs2ePzagXw74Ao3rfwSSBHFkSxEQilXIrz49lXIgaAfnUdbzEAwOCGxRw3cpJJbcvhz25VRGlhcjaKwNtwtSKyq1hmFqajqkn9qrJ5hZVXbPw3xPvLlLgLJncpe7eynrXY52ia1qECmpTOje4ubq0x+R0BQPEwf1QqGOz2JIBihCg2AmKaKrt2MeYr4xwMozhy+HIb7dG3DjOFyhupVDBYaBEccnlCUzN/h1Y8Zp0unMv5FawpISYp3/msocbnd8EXQkWJ8Y1YEz3bU2ycKWvQqVoBLOlRFT5KdpbzluXNfXGYJumUSiXYNLAWlvSoymo+b4QoNgIj/3jR1irKQyQAh8aUUuEBKJ/fO2+0jvi0cj4s+lL8N4tgXyWW9ayGoqF+qF44JyNrGpOHTEmaQnq2brbBvuyUEz9llnWNT+Nf52rZN6cH3xQO4UbJzYQPxUeEhmWbyKXOP5bFaEEXAqLYCMzhkQ0wrWMFxttLbODyBhFoIxw4OzCoQVHair9iRC6TYt839bHuK9vV5dmikElpw1DpKMnSEddUQeJzJW/qGOoJNCwZKrQIjKkWQe9gzgf9GUT6eToiNWh5FESxEZj8OXzRqWoBlyq4uoN8OTynXkkpGgtDdkIqlXC+cuMz+O2TinlRPSInyvDk9D2+VWmH+UPExu9dKwstgmDYu3SdTabI5PcQHmh+j2Pq20IQH+QvJ0Lmd6lk97yM4UOLy2ebJxk4O1bJz6hdds7MyRbTVSQbfzAmzP2iEv4dEMVL9fSf2pVDXx6soXzjq8y+DvBC+eAwyfvkDjzpXitWiGIjQuQObvB8PAAcIvJfm2kkFNN8KHmD1Iid2gqfMVSECBl0q+E51eW/rOk5smbCR+ZdgmPYOvnyhalex9ZnjZABUWxECFcrFi6jTMSeI6FeCfY+CU3LZIRFdnIh22d2gek1aZrwj5TecI4K2dRJn5CBQaxhYx4EUWy8GFcqiFvyZZS4V77OqF2ZfhfZJXky3cdkegt1lMgvE1NrWXbeTnEWpVyKL2tGCC0GY/7gwRfIE1IruAui4zgHUWxEiEohvj+LK5k33QEJc3QM3T2S6Y3TtFkuO+UJwkQUPba6Tw2hRWBFz1oRuDEp2qMsXS2dyA9kWfzUEnvXV6NSYaznI2Q/xPcEJaB+CfH9eMUe7u2a1SV7KEVcrf6qReSweW5cq9JoUjo3/hJBkrA6HuirovCgSJyYKS3tnrdUci1LyDiDo7peP7Ur5/IcQmPq80fWa87hOb+ibASTwpLtIrktPOfp8Pn7Dw/ynFB3u7ig2JgqRfasYyH+KizpUZVR1XCCOXyFu/OBQuY4pcDx0Y1QNq97P1PxMO7yFc3qHAkgI12AO2lQMmth+z0PVcazA2QT3EPJjlsvQxsVw7yD92nPufJ95Au2r7j4c1jpV0gcbQHY70vgixB/FQY3LIoOHhSd91Nbx5YRpVyK7UProPDYnQCACI4zFNPBpU9Kqwp50KhUc0Gipe5OboHYhBROFbXsBLHYEDyGPHYsJ67oeZ7k0+AKBpqbPp2yUyiXr3U74sXIG2XzBqJX7cKMLLWehkQiwdbBtdGqQh783sXzkg4KFQKulEtRIndAtlzAcgFRbDyMtf08yyGSS+z9xpnmrsnOMFVO6PIoNSuTUZiP67pABO+nYoFg/N6lMgrktFaYCQQ+8A4bezaiQv5gANlzBW3vI3ujWlOR43wmrlwxBXP54sL4JggQuRM5IXtD1jcEgCg2HksOBzVTxJIe3F3wdUMLsRN6yjeDGxbjdDxXdWF7YbgE5xFTiLw7yIZrMoKbIVtRHkqvWoXtns/Lod9IA5FUGranvPCVGfnn9sKFj7IJ/V3Ru7rDNq44DxP4Y2xL90bdEAjeDlFsPBS1CJP4CYnU5OtwJheIrfD5HL7OVRPmBBa6Wu5Ax6t+upUy3TGlXBw1c7ILzlasZkN9J0qOmOIpjs2hDKxfnauSEireDnk6einOZAS1hSeEHJreuEvkdizvlsG1zd7P6BRJ2y4ixDMcHn0UjpURWsWGpt2szhWRP4cPZnxW0XXBCKLg08r5hBbBLRQNdfzbr1k0pxskIQgJUWw8FKXc/p8ujMs6USKpXWOvqGemUzXALKeNZRZUWwvSsAD3hIIPbljUpf6FcjmOVgr0YeZSVyo8EMdHN/KovCpioqSD7LhCwDRqMIIm1J8O4qRLEDNEsREpKx34TAS7cYtEIRfHXSy6bLhT/egeNJZhy0Lni4gqwn/6f1KUkjl1PbAcAxcMa1xcaBHcAh/FOwnigSg2IqUegz3xKZ+W52SugQ3sWwv4csxli719/lz+7BQ9JorM0EbcRiWxRSElP0+h6FU7wum+5TkO0+cCutxEdNhSfiMLBHMojfA0d3KRRPAMyJ3TgymXl5sb6OjmpTgZR0hUPDi8Kt1YkJAuYimqaC63zU8wRwIJWjnpp9a1RkGOpXEdV5WtaR0rmL131xYtX5CtNO+GKDYejLtWhuQmIAyeEonitTjx9Q9rXBxyEVraXM3MndNi61ssKSBMYbN9KPTWM4FfxPcL9CIsV245fNllbfX7WKdk48AoNCgZimZlcvNWkDFA5IUeXQ1XdYZskfWFZEvjlBFNSzB20gaAEJZbqEwY08KxBdbVpICuKAbkiiPwDVFseKRqRA6z92yr2175oRmuTGiGKoVyYnmv6ljUvSqX4pkx2s7N0NPrMFlu83xRXXxbBQRu+aZJCZf6O5PMMNPnjUmEGgB8Ub0Adn1dj/U89ugRVQjBNNGDQT4K7BhWB91qFsS575pwOqdYaFomt9AiEEQCUWxEjFwmRZADK88/X9XkZC5beW+alcnNKOkV3zTh8KY18ZMynI3lLriuG5WJt66ev27ienRPx8qOw92X9aqGWkVz4c+ulVkrzE15+G2plTLarWM/lRxl8wZhcrvyducMY5DoUawwjZokRkrvhyg2Hk7NIuwdTOl8N2xlP+XTSsSGei6E31reyGw5GpfJE+j0HHyzrn+U0CJkOxqWCsP+EfYtKg1KhGJtv5powWFCTK45P97aQmPLCFu5YA76Ex6AmmEAgYcboAkMIIoNj4h1ZeCJpmh3fJcjm5fkfxIbWIbUW5ZIUDPILEzgnmJh9pPtidEJ1fJaoivkKpYUDpZULhjsdF9H1u1MxPrZCdxBFJtsiC3rjGVyrvldKrlDHN4JZnjDyxvkY/benbc/S5+OTYNq22hJcDdituR5Gzn9hNkK6x5VSJB5CfxAFBseEeFizi6WkRKW2Xk9lUENnUu0J6TBLV+wj+NGHCBWq6KYsPU7blwqzL2CEDiB7u8Z5cSWPkG8EMXGjXiYnoMSIqx54wyWdaE8lcxMyFxlnCY4T6sKebCkh23/s08q0leLdwsMbjTORH1xBtGmCTxDFBsvpGoh2w6AlmUCfJW2fTcUbsy8a0r1COvqu/asX60qOHLc9DSVkp5vm5XEhfFNSLi6m6F7Dvsr5Xb9a/LlcI/FzRK+LA8ymXt+Q0xy8LhK5t+tb53CvM9FEAai2HghZfPa9gmwrAVjWgNGLJlu2cox/wvv8AViQi4aR1CCZ+KvYpew0xG9axdGg5L8bI8FqrmVlY4z4xqjWJg/7/NkUtRkLrlAizgCP5C/Jo8IZXEtzmILyVTGtpECms9NYGsm5yoyRVDzvAki0S89ljV9awgtAiOqRXAbWl2nuGf7ibhSm21BtyqM29L9vBqKsEQEwXl4U2zevHmDrl27IjAwEMHBwejTpw+Sk5Pt9mnQoAEkEonZvwEDBvAlottx12Pz82oFGLcNMPE/sVXZ15vwhCrFfgz+Dp2qOk4elx3pWSsCtYs5n/OIKd1quhZFEzOlJeeh4gFusKpwgcHGjTCHjWhNJjQu7ZqlilhsvAvenmRdu3ZFXFwc9u3bB61Wi169euGrr77C2rVr7fbr168ffvzxR+N7X19fvkT0Wuz9SC3vpXmCxFell87SlTuQGzkD7DgS+yrMz7nT6dhWCL4tZBwVWhSLlc7TyBPs2vXIR/4be751YsLAgymbTdmXzKYV3FREmOB+eLlz37p1C7t378a5c+dQtWpG5MC8efPQsmVLTJ8+HXnz2r6Z+vr6IjycWWpsguuIY/PFHDqZ3JGgrmAuXwxrXBx7rsejSKgfvqjhPifdsnnde5PtU6cwBjUo6nU+O5Sb9n/pkt4JDRtlScgkdXoe/kbO+AeWzRuEdV/VRJ4gYRy9CfzBi/3t1KlTCA4ONio1ANCkSRNIpVKcOXPGbt81a9YgJCQE5cqVw9ixY5Gammq3vUajQVJSktk/gvN0q5nxMF/0JfM9a09gZHRJ+Kvk+L61/TpRI5qWwJ5v6uHPblVsll4QA3IXHXGUcqnXKTVcYk9JKJDT8x+EQubYclWvae0wCtI+pkpdjSK5UDAX2RXwNnix2MTHxyMszHzPUy6XI2fOnIiPj7fZr0uXLihUqBDy5s2Lq1evYvTo0bhz5w42bdpks8+UKVMwadIkzmTnky48hel+27QEZuy761Rfy5vM5HblMaF1WSjl3rXnPLhhMQysXxRSEXrmzvk8EkCGspGuM6A8AxP50MbFsOr0I54l8zy4sgXUteGnUzpPILYOFndWaCZOuK4oF67+hPS2nGwYEs7RtjTBe2H19BozZoyVc6/lv9u3bzstzFdffYXo6GiUL18eXbt2xcqVK7F582Y8ePDAZp+xY8ciMTHR+O/JkydOz8831Qtb52fhgu61Ilj3GdSgKEL8lRhMk5VXcKWGg6dTxypZzrWqj59HjErNncnN0TYyHwBg57A66F27MGZ/VHTsERaQdXMvFc4+kaJQOYo8BVM/NYVJDpftQ+u4/Pvg24G9RXl+t/JdTQvhqmLjOG+VfTwtIzyBPawsNt9++y169uxpt02RIkUQHh6Oly9fmh3X6XR48+YNK/+ZGjUywjbv37+PokWL0rZRqVRQqcRpUndXlEKQD/t5RjUvhf81KynKhz0X/NaxAjZceIoiIX528/oIjel2V7GwAExoY3+rzJRrE5vhxP3XiC7L/Dc1Mroktl+NQx+SnIwxgxsWw1erLgBg/lC395uc2akiJ3LZQsxbqADQtExuLD8Zi5x+SrxJSWfdv1LBHDgysgH8VXJM33sHrcoTB3iCOawUm9DQUISGOo73j4qKwrt373DhwgVUqZLhq3Hw4EEYDAajssKEy5cvAwDy5HFNQxcKsdeS8ValBsjwkYid2kpoMXglQK1A83LsfhuDGxajtdJ5C3z4Dkc6UXG6R1QEDt5+ibMxb8yOd6tZEEVC3ZeEzhZqhbXVKTSA2QIxyMf5sGwAqF0sBP8NqYOCuXxRcdJep8YolCujjt2UTysw7lMszB/3XyajVlHPzvdDcAwv9ujSpUujefPm6NevH86ePYsTJ05gyJAh+Pzzz40RUc+ePUOpUqVw9uxZAMCDBw/w008/4cKFC4iNjcW2bdvQvXv3/7d371FVVfsewL97s9kbUN4IyEvwSQIqChI+zxlyQuPm8+YjJHvcSsPhIwdaQ81xb8cka5SPa5qd0sbNJB3XR6lpiopaCImiIoR4NTUVOWUIpia6f/cPL+uyhASEzd578f2MwRiw52St32+Ja/+YrDknBg0ahB49Gv7Da0tspXBozLo2RKTm6+qEr6b2R+aswQ3+HmejAza8Eo+8eQnKa6/+pRP+PtI29vma+beutV5bUM9o4X+92BeRgW5Y+3xsk88fFeSuGtV6rY54mtvO6QNx6t8T4eHStMKMbJ/FFupYt24dpk6diiFDhkCv12PMmDFYtmyZ0l5VVYXi4mJl1pPRaMSePXuwZMkS/P777wgODsaYMWMwb948S4WoKYEezrhUfqvONueH7AdFRPXrEeTxSN9Xc+ZZY9cqsqSa09Xf/dceOHnpOp6sZ/RvYJd2GNileVfo/fGtofhn5R8I9rL8zCSDg54L8bUSFitsvLy8HroYX2hoqGrNieDgYGRlZVkqHM379LlYzN9a0CK/+Vjag1sb1Nz0sZ2rCf+s/KOlQyI7YSvbYtQlKtA2F4R7OiYYT8dYZ1TXydGhRYoaal1YvrYgS64d1s3fFRteicfj9ezu28YOR28WjoxUPv/vyf2sGAlp1WPtLfeA+d5Zg/GPZ2MQZ6Gdt+vywbieyrmJWhsWNq3A2Bq/jSX1sK8ZBJ4ujqpnlXxcbWc4n7Tjq6mWW5umY7u2SOjuZ7Hj12VUdBB+Sk/60weVe/7flPMgT/tfbJDoQdrf9ZBUf9tv6hoUtiYy0HanclPLe9RR0da2rk9bkwFF/zFUtUYPkVawsGlB1loYytfVhH6dvGFw0Lfoxo6W8OAaHZ6c4UCtmF53fwr/0MjGL8rHSQWkVfb9LkcNotPpsO7f4iyyo7ClPfgLuINeh/Gxwcj44f4K0y8P6tjyQZHNst1Hhy2jjdGAWU90s3YYRDaldY2/tmL2VNRM+Uvdq0xXq7nAnEcTFwsjIiJtYWFDNqdTPSuzcnooWcKBtL/iX3q0x6E5f7V2KETUBPxTFNk0ztqgxmjKcgYh3i74z2d6N2M0RGQNHLGxMIPGZiG1BH2NP5stGx9txUjIXrw9KgqxoZ6a3geLiBqGIzYW9tKgjli5/3+sHYZdCfJ0xvCeAWhjcqh3w0A/d9vc2b0hwv1drR2CZjwTF4Jn4kLq70hEmsfCxsI4XtN4Op0OyyY8fKRm+7QBuHnnHnxdnVooqua3/qXHrR0CEZHmsLCxsJqTkfR2NDPJ1kUE2Oa+O43haUObIhIRaQWfsbGwMb2DlM/5ICwREZFlccTGwjq2a4uj8/8GNyeDXa0lQ0REZI84YtMCvNoYYWhle9FQbf07t9zuzkRErRXfbYlaSO8QT2uHQBrD/Z6IamNhQ9RCxvfldGRqHv94Ngad2rXBJ5NirR0Kkc3hMzZELSTQ4/8fHk/q0d6KkZC9S+juh4TuftYOg8gmsbAhakG5c4dgV0EpRkYHWjsUIiJNYmFD1IJ8XZ2QEh9q7TCIiDSLz9gQERGRZrCwISIiIs1gYUNERESawcKGiIiINIOFDREREWkGCxsiIiLSDBY2REREpBksbIiIiEgzWNgQERGRZrCwISIiIs1gYUNERESawcKGiIiINIOFDREREWmG5nb3FhEAQEVFhZUjISIiooaqft+ufh9/VJorbCorKwEAwcHBVo6EiIiIGquyshLu7u6P/P06aWppZGPMZjMuX74MV1dX6HS6Zj12RUUFgoODcfHiRbi5uTXrsW2F1nPUen4Ac9QCrecHMEetaM4cRQSVlZUICAiAXv/oT8pobsRGr9cjKCjIoudwc3PT7A9pNa3nqPX8AOaoBVrPD2COWtFcOTZlpKYaHx4mIiIizWBhQ0RERJrBwqYRTCYTFixYAJPJZO1QLEbrOWo9P4A5aoHW8wOYo1bYYo6ae3iYiIiIWi+O2BAREZFmsLAhIiIizWBhQ0RERJrBwoaIiIg0g4UNERERaQYLmwZasWIFQkND4eTkhLi4OOTm5lo7JADAokWLEBsbC1dXV/j6+mLkyJEoLi5W9bl9+zZSU1Ph7e2Ntm3bYsyYMbh69aqqz4ULF5CUlAQXFxf4+voiLS0Nd+/eVfXZv38/evfuDZPJhM6dO2Pt2rW14rH0dUpPT4dOp8OMGTM0ld+lS5cwceJEeHt7w9nZGVFRUThy5IjSLiJ488030b59ezg7OyMhIQElJSWqY1y7dg3Jyclwc3ODh4cHXnzxRdy4cUPV58SJExg4cCCcnJwQHByMxYsX14pl48aNCA8Ph5OTE6KiorBjx44m53fv3j3Mnz8fYWFhcHZ2RqdOnfDWW2+pNruztxwPHDiAp556CgEBAdDpdNiyZYuq3ZbyaUgsjcmvqqoKc+bMQVRUFNq0aYOAgAA8++yzuHz5st3kV1+OD5o8eTJ0Oh2WLFmiuRyLioowfPhwuLu7o02bNoiNjcWFCxeUdru7xwrVKyMjQ4xGo3z66ady6tQpeemll8TDw0OuXr1q7dAkMTFR1qxZIwUFBZKfny9PPvmkhISEyI0bN5Q+kydPluDgYMnMzJQjR47I448/Lv369VPa7969K5GRkZKQkCDHjh2THTt2iI+Pj7zxxhtKn7Nnz4qLi4u89tprUlhYKMuXLxcHBwfZuXOn0sfS1yk3N1dCQ0OlR48eMn36dM3kd+3aNenQoYM899xzkpOTI2fPnpVdu3bJmTNnlD7p6eni7u4uW7ZskePHj8vw4cMlLCxMbt26pfQZOnSo9OzZUw4fPiwHDx6Uzp07y4QJE5T269evi5+fnyQnJ0tBQYGsX79enJ2d5aOPPlL6fPfdd+Lg4CCLFy+WwsJCmTdvnjg6OsrJkyeblOPChQvF29tbtm3bJufOnZONGzdK27ZtZenSpXab444dO2Tu3LmyadMmASCbN29WtdtSPg2JpTH5lZeXS0JCgnz55Zfy448/SnZ2tvTt21f69OmjOoYt51dfjjVt2rRJevbsKQEBAfLBBx9oKsczZ86Il5eXpKWlydGjR+XMmTOydetW1X3N3u6xLGwaoG/fvpKamqp8fe/ePQkICJBFixZZMaq6lZWVCQDJysoSkfs3IEdHR9m4caPSp6ioSABIdna2iNz/wdfr9VJaWqr0Wblypbi5uckff/whIiKzZ8+WiIgI1bnGjRsniYmJyteWvE6VlZXSpUsX2b17twwePFgpbLSQ35w5c2TAgAF/2m42m8Xf31/effdd5bXy8nIxmUyyfv16EREpLCwUAPLDDz8ofb755hvR6XRy6dIlERH58MMPxdPTU8m5+tzdunVTvh47dqwkJSWpzh8XFyevvPJKk3JMSkqSF154QfXa6NGjJTk5WRM5PviGYUv5NCSWxuZXl9zcXAEg58+ft7v8Hpbjzz//LIGBgVJQUCAdOnRQFTZayHHcuHEyceLEP/0ee7zH8k9R9bhz5w7y8vKQkJCgvKbX65GQkIDs7GwrRla369evAwC8vLwAAHl5eaiqqlLFHx4ejpCQECX+7OxsREVFwc/PT+mTmJiIiooKnDp1SulT8xjVfaqPYenrlJqaiqSkpFoxaCG/r776CjExMXj66afh6+uL6OhofPzxx0r7uXPnUFpaqjq3u7s74uLiVDl6eHggJiZG6ZOQkAC9Xo+cnBylz6BBg2A0GlU5FhcX47fffmvQdXhU/fr1Q2ZmJk6fPg0AOH78OA4dOoRhw4ZpJseabCmfhsTSHK5fvw6dTgcPDw/N5Gc2m5GSkoK0tDRERETUarf3HM1mM7Zv346uXbsiMTERvr6+iIuLU/25yh7vsSxs6vHLL7/g3r17qn8wAPDz80NpaamVoqqb2WzGjBkz0L9/f0RGRgIASktLYTQalZtNtZrxl5aW1plfddvD+lRUVODWrVsWvU4ZGRk4evQoFi1aVKtNC/mdPXsWK1euRJcuXbBr1y5MmTIF06ZNw2effaaK8WHnLi0tha+vr6rdYDDAy8urWa5DU3N8/fXXMX78eISHh8PR0RHR0dGYMWMGkpOTNZNjTbaUT0Niaarbt29jzpw5mDBhgrLDsxbye+edd2AwGDBt2rQ62+09x7KyMty4cQPp6ekYOnQovv32W4waNQqjR49GVlaWcm57u8caGtWbbFpqaioKCgpw6NAha4fSbC5evIjp06dj9+7dcHJysnY4FmE2mxETE4O3334bABAdHY2CggKsWrUKkyZNsnJ0zWPDhg1Yt24dvvjiC0RERCA/Px8zZsxAQECAZnJsraqqqjB27FiICFauXGntcJpNXl4eli5diqNHj0Kn01k7HIswm80AgBEjRmDmzJkAgF69euH777/HqlWrMHjwYGuG98g4YlMPHx8fODg41HoC/OrVq/D397dSVLVNnToV27Ztw759+xAUFKS87u/vjzt37qC8vFzVv2b8/v7+deZX3fawPm5ubnB2drbYdcrLy0NZWRl69+4Ng8EAg8GArKwsLFu2DAaDAX5+fnadHwC0b98e3bt3V7322GOPKbMSqo//sHP7+/ujrKxM1X737l1cu3atWa5DU3NMS0tTRm2ioqKQkpKCmTNnKqNwWsixJlvKpyGxPKrqoub8+fPYvXu3MlqjhfwOHjyIsrIyhISEKPee8+fPY9asWQgNDdVEjj4+PjAYDPXef+ztHsvCph5GoxF9+vRBZmam8prZbEZmZibi4+OtGNl9IoKpU6di8+bN2Lt3L8LCwlTtffr0gaOjoyr+4uJiXLhwQYk/Pj4eJ0+eVP0Hrb5JVf/Ax8fHq45R3af6GJa6TkOGDMHJkyeRn5+vfMTExCA5OVn53J7zA4D+/fvXmqJ/+vRpdOjQAQAQFhYGf39/1bkrKiqQk5OjyrG8vBx5eXlKn71798JsNiMuLk7pc+DAAVRVValy7NatGzw9PRt0HR7VzZs3oderbzcODg7Kb4xayLEmW8qnIbE8iuqipqSkBHv27IG3t7eq3d7zS0lJwYkTJ1T3noCAAKSlpWHXrl2ayNFoNCI2Nvah9x+7fA9p1KPGrVRGRoaYTCZZu3atFBYWyssvvyweHh6qJ8CtZcqUKeLu7i779++XK1euKB83b95U+kyePFlCQkJk7969cuTIEYmPj5f4+HilvXqq3hNPPCH5+fmyc+dOadeuXZ1T9dLS0qSoqEhWrFhR51S9lrhONWdFaSG/3NxcMRgMsnDhQikpKZF169aJi4uLfP7550qf9PR08fDwkK1bt8qJEydkxIgRdU4djo6OlpycHDl06JB06dJFNe20vLxc/Pz8JCUlRQoKCiQjI0NcXFxqTTs1GAzy3nvvSVFRkSxYsKBZpntPmjRJAgMDlenemzZtEh8fH5k9e7bd5lhZWSnHjh2TY8eOCQB5//335dixY8qsIFvKpyGxNCa/O3fuyPDhwyUoKEjy8/NV956as39sOb+G/Bs+6MFZUVrIcdOmTeLo6CirV6+WkpISZRr2wYMHlWPY2z2WhU0DLV++XEJCQsRoNErfvn3l8OHD1g5JRO5P36vrY82aNUqfW7duyauvviqenp7i4uIio0aNkitXrqiO89NPP8mwYcPE2dlZfHx8ZNasWVJVVaXqs2/fPunVq5cYjUbp2LGj6hzVWuI6PVjYaCG/r7/+WiIjI8VkMkl4eLisXr1a1W42m2X+/Pni5+cnJpNJhgwZIsXFxao+v/76q0yYMEHatm0rbm5u8vzzz0tlZaWqz/Hjx2XAgAFiMpkkMDBQ0tPTa8WyYcMG6dq1qxiNRomIiJDt27c3Ob+KigqZPn26hISEiJOTk3Ts2FHmzp2rehO0txz37dtX5/+9SZMm2Vw+DYmlMfmdO3fuT+89+/bts4v86suxLnUVNlrI8ZNPPpHOnTuLk5OT9OzZU7Zs2aI6hr3dY3UiNZb+JCIiIrJjfMaGiIiINIOFDREREWkGCxsiIiLSDBY2REREpBksbIiIiEgzWNgQERGRZrCwISIiIs1gYUNERESawcKGiIiINIOFDREREWkGCxsiIiLSjP8FWiSCCrN7/dwAAAAASUVORK5CYII=\n" + }, + "metadata": {} + } + ], + "source": [ + "import random\n", + "@speechbrain.utils.data_pipeline.takes(\"file_path\")\n", + "@speechbrain.utils.data_pipeline.provides(\"sig\", \"rand_gain_sig\", \"offset_sig\")\n", + "def audio_pipeline(file_path):\n", + " sig = speechbrain.dataio.dataio.read_audio(file_path)\n", + " yield sig\n", + " rand_gain_sig = random.random()*sig\n", + " yield rand_gain_sig\n", + " offset_sig = sig + 1\n", + " yield offset_sig\n", + "\n", + "\n", + "speechbrain.dataio.dataset.add_dynamic_item([dataset], audio_pipeline)\n", + "speechbrain.dataio.dataset.set_output_keys(\n", + " [dataset], [\"sig\", \"rand_gain_sig\", \"offset_sig\"],\n", + " )\n", + "\n", + "plt.figure(1)\n", + "plt.title(\"Sig item\")\n", + "plt.plot(dataset[0][\"sig\"])\n", + "\n", + "plt.title(\"Sig item with random gain\")\n", + "plt.plot(dataset[0][\"rand_gain_sig\"])\n", + "\n", + "plt.title(\"Sig item offset\")\n", + "plt.plot(dataset[0][\"offset_sig\"])\n", + "plt.legend([\"sig\", \"rand_gain_sig\", \"offset_sig\"])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7tzJSmbT7kT_" + }, + "source": [ + "This toy example demonstrates that multiple items can be fetched from the same pipeline and dynamically created items can depend on other dynamically created items (`offset_sig` depends on `sig`)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "et85McvM9Lhu" + }, + "source": [ + "But dynamic items can also depend on dynamically created items from another, pre-specified pipeline:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 88, + "metadata": { + "id": "f0EYYxh670AI", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "84f82610-9a72-4d4e-a73a-cfe57db57951" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[0.00201416015625,\n", + " 0.0006103515625,\n", + " 0.0003662109375,\n", + " 0.001129150390625,\n", + " 0.000946044921875,\n", + " 0.0001220703125,\n", + " -0.000732421875,\n", + " 0.00164794921875,\n", + " 0.002685546875,\n", + " 0.000457763671875]" + ] + }, + "metadata": {}, + "execution_count": 88 + } + ], + "source": [ + "@speechbrain.utils.data_pipeline.takes(\"sig\")\n", + "@speechbrain.utils.data_pipeline.provides(\"sig_as_python_list\")\n", + "def to_list_pipeline(sig):\n", + " yield sig.numpy().tolist()\n", + "\n", + "\n", + "speechbrain.dataio.dataset.add_dynamic_item([dataset], to_list_pipeline)\n", + "speechbrain.dataio.dataset.set_output_keys(\n", + " [dataset], [\"sig_as_python_list\"],\n", + " )\n", + "dataset[0][\"sig_as_python_list\"][:10]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HcTAPOk_-mOP" + }, + "source": [ + "In this example we have defined a new pipeline which takes `sig` and turns it from `torch.tensor` to a python list obtaining a new dynamic item `sig_as_python_list`.\n", + "\n", + "\n", + "**NOTE**\n", + "\n", + "\n", + "Since we are requesting in the output only `sig_as_python_list` which depends itself from `sig`, dynamic items `offset_sig` and `rand_gain_sig` are not computed at all. Only `sig` is computed implicitly as it is necessary to obtain `sig_as_python_list`.\n", + "\n", + "\n", + "In fact under the hood `DynamicItemDataset` finds a suitable evaluation\n", + "order for the requested items by constructing a **computational graph** defined by the **pipelines**.\n", + "\n", + "**An error is returned if any circular dependency is present between the pipelines**.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eFLiGm6xh3tZ" + }, + "source": [ + "A **DIP** can also take multiple items/annotation keys in input, the syntax is the same as for the output items:" + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "metadata": { + "id": "HH9WhV1ih28l", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "9acd2f96-aee4-4d7c-cfde-48a2e837dfaf" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'sig': tensor([ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029]),\n", + " 'spkidstring': 3576}" + ] + }, + "metadata": {}, + "execution_count": 89 + } + ], + "source": [ + "@speechbrain.utils.data_pipeline.takes(\"file_path\", \"spkID\")\n", + "@speechbrain.utils.data_pipeline.provides(\"sig\", \"spkidstring\")\n", + "def multiple_dip(file_path, spkID):\n", + " sig = speechbrain.dataio.dataio.read_audio(file_path)\n", + " yield sig\n", + " yield spkID\n", + "\n", + "speechbrain.dataio.dataset.add_dynamic_item([dataset], multiple_dip)\n", + "speechbrain.dataio.dataset.set_output_keys(\n", + " [dataset], [\"sig\", \"spkidstring\"],\n", + " )\n", + "dataset[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": { + "id": "YPN3AXaVjQka", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "0e6e9be7-d8dc-4e24-9906-30f0d8db62a4" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'sig_tuple': (tensor([ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029]),\n", + " 3576)}" + ] + }, + "metadata": {}, + "execution_count": 90 + } + ], + "source": [ + "@speechbrain.utils.data_pipeline.takes(\"file_path\", \"spkID\")\n", + "@speechbrain.utils.data_pipeline.provides(\"sig_tuple\")\n", + "def multiple_dip(file_path, spkID):\n", + " sig = speechbrain.dataio.dataio.read_audio(file_path)\n", + " yield sig, spkID\n", + "\n", + "\n", + "speechbrain.dataio.dataset.add_dynamic_item([dataset], multiple_dip)\n", + "speechbrain.dataio.dataset.set_output_keys(\n", + " [dataset], [\"sig_tuple\"],\n", + " )\n", + "dataset[0] # sig now is a tuple" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uXKHONsvTDTs" + }, + "source": [ + "And also the same **DIP** can be used in multiple datasets.\n", + "E.g. you want usually the read audio **DIP** to be the same for validation, training and test:" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "metadata": { + "id": "wGLev2Q1U534", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "36abdd93-2c15-4c80-ffd9-3c17c48d7711" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'id': '3576-138058-0019',\n", + " 'signal': tensor([ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029]),\n", + " 'words': 'GIVE ME MY HORSE AND ARMS AND WAIT FOR ME HERE I WILL GO IN QUEST OF THIS KNIGHT AND DEAD OR ALIVE I WILL MAKE HIM KEEP HIS WORD PLIGHTED TO SO GREAT BEAUTY'}" + ] + }, + "metadata": {}, + "execution_count": 91 + } + ], + "source": [ + "validation = DynamicItemDataset.from_json(\"data.json\")\n", + "train = DynamicItemDataset.from_json(\"data.json\")\n", + "\n", + "speechbrain.dataio.dataset.add_dynamic_item([validation, train], speechbrain.dataio.dataio.read_audio, takes=\"file_path\", provides=\"signal\")\n", + "speechbrain.dataio.dataset.set_output_keys([validation, train], [\"id\", \"signal\", \"words\"])\n", + "validation[0]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 92, + "metadata": { + "id": "JVrWPCG1VZ_t", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "72000543-9158-4570-97d6-87402a6d388c" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'id': '3576-138058-0019',\n", + " 'signal': tensor([ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029]),\n", + " 'words': 'GIVE ME MY HORSE AND ARMS AND WAIT FOR ME HERE I WILL GO IN QUEST OF THIS KNIGHT AND DEAD OR ALIVE I WILL MAKE HIM KEEP HIS WORD PLIGHTED TO SO GREAT BEAUTY'}" + ] + }, + "metadata": {}, + "execution_count": 92 + } + ], + "source": [ + "train[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ihy-XYoDNWMg" + }, + "source": [ + "#### Cached pipeline\n", + "One last thing you can do with pipelines is to cache the result if the computation is expensive and static. For example, if you are using frozen deep embeddings, these may be a good candidate for caching. Here, we simply cache the feature extraction, although it doesn't add much time to the overall pipeline." + ] + }, + { + "cell_type": "code", + "execution_count": 93, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "IOCmOZTyNWMg", + "outputId": "2b61f905-65c6-416a-8ea6-50ff1fcfe1f3" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "3576-138058-0019 torch.Size([994, 40])\n", + "3576-138058-0021 torch.Size([619, 40])\n" + ] + } + ], + "source": [ + "filterbank = speechbrain.lobes.features.Fbank()\n", + "\n", + "# Must take \"id\" for storing and retrieving from the cache\n", + "@speechbrain.utils.data_pipeline.CachedDynamicItem.cache(\"feature_cache\")\n", + "@speechbrain.utils.data_pipeline.takes(\"id\", \"sig\")\n", + "@speechbrain.utils.data_pipeline.provides(\"feats\")\n", + "def feature_pipeline(uid, sig):\n", + " # Fake batch dimension -- data items are singular\n", + " return filterbank(sig.unsqueeze(0)).squeeze(0)\n", + "\n", + "speechbrain.dataio.dataset.add_dynamic_item([dataset], feature_pipeline)\n", + "speechbrain.dataio.dataset.set_output_keys([dataset], [\"id\", \"feats\"])\n", + "print(dataset[0][\"id\"], dataset[0][\"feats\"].shape)\n", + "print(dataset[1][\"id\"], dataset[1][\"feats\"].shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QLvUzBGtNWMg" + }, + "source": [ + "The results of the dynamic items are stored in torch format, one file per uid:" + ] + }, + { + "cell_type": "code", + "execution_count": 94, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "2y4-8bJLNWMg", + "outputId": "8ca40891-98d9-4d31-e356-a64eeef19fb6" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "['5694-64038-0005.pt', '1988-24833-0004.pt', '6295-244435-0018.pt', '5895-34622-0014.pt', '3576-138058-0001.pt', '1988-24833-0025.pt', '5895-34629-0025.pt', '2803-154320-0005.pt', '5338-284437-0006.pt', '1462-170145-0019.pt', '6295-244435-0040.pt', '7850-281318-0007.pt', '5338-284437-0025.pt', '2803-161169-0005.pt', '5895-34629-0001.pt', '5895-34615-0014.pt', '777-126732-0019.pt', '8297-275156-0010.pt', '251-136532-0003.pt', '2412-153948-0013.pt', '5895-34622-0022.pt', '1462-170145-0016.pt', '1272-135031-0019.pt', '2803-154320-0013.pt', '5338-284437-0009.pt', '6319-57405-0012.pt', '8842-304647-0002.pt', '3752-4944-0054.pt', '3536-23268-0008.pt', '6241-61946-0019.pt', '7976-110523-0013.pt', '2803-154320-0000.pt', '7976-110523-0006.pt', '3752-4944-0011.pt', '3752-4944-0038.pt', '5338-284437-0004.pt', '777-126732-0057.pt', '3536-23268-0000.pt', '1462-170142-0028.pt', '3752-4944-0039.pt', '1988-147956-0007.pt', '1988-24833-0028.pt', '251-118436-0018.pt', '2428-83699-0028.pt', '3576-138058-0035.pt', '1988-24833-0008.pt', '5895-34629-0002.pt', '3536-23268-0022.pt', '2428-83699-0041.pt', '3000-15664-0035.pt', '2428-83699-0023.pt', '3000-15664-0024.pt', '6295-244435-0000.pt', '6241-61946-0021.pt', '2035-147961-0002.pt', '7850-281318-0011.pt', '2412-153948-0015.pt', '1272-135031-0001.pt', '777-126732-0039.pt', '1462-170145-0005.pt', '5895-34629-0019.pt', '2428-83699-0024.pt', '2035-152373-0007.pt', '777-126732-0066.pt', '7850-286674-0012.pt', '3752-4944-0006.pt', '777-126732-0073.pt', '1272-141231-0010.pt', '2035-147961-0021.pt', '84-121550-0032.pt', '7976-110523-0003.pt', '84-121550-0001.pt', '174-168635-0007.pt', '1272-141231-0022.pt', '2412-153948-0000.pt', '3536-23268-0023.pt', '8842-304647-0005.pt', '2803-161169-0009.pt', '5895-34629-0030.pt', '2035-147960-0001.pt', '6319-57405-0000.pt', '3000-15664-0023.pt', '2428-83699-0029.pt', '8297-275156-0009.pt', '777-126732-0058.pt', '7850-286674-0017.pt', '1272-135031-0009.pt', '5895-34622-0020.pt', '7976-110523-0018.pt', '3576-138058-0028.pt', '2428-83699-0042.pt', '3536-23268-0025.pt', '8297-275156-0006.pt', '777-126732-0063.pt', '1988-24833-0007.pt', '5338-24640-0001.pt', '777-126732-0023.pt', '1462-170142-0006.pt', '2428-83699-0001.pt', '6241-61946-0001.pt', '174-168635-0012.pt', '251-118436-0014.pt', '6295-244435-0033.pt', '3752-4944-0004.pt', '251-136532-0006.pt', '777-126732-0070.pt', '5895-34622-0017.pt', '1988-24833-0024.pt', '3576-138058-0014.pt', '1462-170142-0010.pt', '1993-147964-0004.pt', '2035-147960-0016.pt', '84-121550-0014.pt', '1988-147956-0018.pt', '2035-152373-0014.pt', '6241-61943-0005.pt', '1272-141231-0028.pt', '5895-34615-0015.pt', '3536-23268-0005.pt', '1462-170142-0041.pt', '777-126732-0022.pt', '3576-138058-0036.pt', '1462-170145-0010.pt', '6295-244435-0010.pt', '1462-170142-0011.pt', '3000-15664-0009.pt', '1462-170145-0021.pt', '7850-281318-0019.pt', '3000-15664-0014.pt', '3752-4944-0002.pt', '3576-138058-0022.pt', '5338-24640-0007.pt', '3576-138058-0017.pt', '3000-15664-0031.pt', '3752-4944-0040.pt', '777-126732-0055.pt', '6241-61946-0012.pt', '2803-161169-0002.pt', '7850-281318-0014.pt', '2035-152373-0002.pt', '5694-64038-0000.pt', '5694-64038-0002.pt', '6241-61946-0008.pt', '7850-286674-0007.pt', '84-121550-0012.pt', '1988-24833-0002.pt', '2803-154320-0011.pt', '5895-34622-0023.pt', '251-136532-0022.pt', '2428-83699-0010.pt', '5895-34622-0003.pt', '7850-286674-0003.pt', '5694-64038-0011.pt', '5694-64038-0016.pt', '3536-23268-0024.pt', '3576-138058-0024.pt', '6241-61943-0027.pt', '1462-170142-0005.pt', '6319-57405-0004.pt', '1988-147956-0004.pt', '5338-284437-0029.pt', '1988-147956-0006.pt', '1462-170142-0000.pt', '3000-15664-0029.pt', '84-121550-0003.pt', '777-126732-0048.pt', '1993-147964-0005.pt', '5895-34622-0004.pt', '5895-34629-0013.pt', '7850-281318-0016.pt', '777-126732-0021.pt', '3536-23268-0002.pt', '6295-244435-0034.pt', '8842-304647-0013.pt', '3752-4944-0059.pt', '5338-284437-0020.pt', '1462-170142-0001.pt', '1988-24833-0013.pt', '6295-244435-0012.pt', '1272-141231-0018.pt', '6241-61943-0002.pt', '3752-4944-0056.pt', '251-136532-0014.pt', '2428-83699-0005.pt', '7976-110523-0005.pt', '8297-275156-0007.pt', '1988-24833-0019.pt', '1462-170142-0026.pt', '3000-15664-0022.pt', '8842-304647-0012.pt', '7850-281318-0006.pt', '3752-4944-0013.pt', '1988-147956-0020.pt', '5338-284437-0010.pt', '1988-24833-0014.pt', '251-136532-0023.pt', '3000-15664-0034.pt', '84-121550-0018.pt', '6295-244435-0038.pt', '174-168635-0000.pt', '5895-34629-0024.pt', '2803-161169-0010.pt', '1272-135031-0008.pt', '7976-110523-0014.pt', '2803-154320-0006.pt', '1993-147964-0007.pt', '2035-147961-0010.pt', '3576-138058-0009.pt', '2035-147961-0014.pt', '6241-61943-0023.pt', '5895-34615-0021.pt', '251-136532-0005.pt', '1272-141231-0009.pt', '3536-23268-0009.pt', '2428-83699-0004.pt', '3576-138058-0016.pt', '7976-110523-0010.pt', '84-121550-0010.pt', '1462-170142-0025.pt', '1462-170142-0029.pt', '5694-64038-0021.pt', '2428-83699-0032.pt', '3752-4944-0025.pt', '2035-147961-0016.pt', '3752-4944-0010.pt', '251-136532-0018.pt', '251-136532-0013.pt', '5338-24640-0005.pt', '2803-154320-0001.pt', '1988-147956-0014.pt', '777-126732-0046.pt', '2803-154320-0007.pt', '1272-141231-0000.pt', '1993-147964-0006.pt', '3752-4944-0052.pt', '1462-170142-0018.pt', '3000-15664-0025.pt', '1993-147964-0009.pt', '5338-284437-0003.pt', '2035-147961-0039.pt', '3000-15664-0033.pt', '1272-135031-0021.pt', '1462-170142-0004.pt', '3752-4944-0036.pt', '5895-34629-0026.pt', '777-126732-0027.pt', '6241-61943-0007.pt', '3752-4944-0065.pt', '5338-284437-0014.pt', '3536-23268-0001.pt', '3752-4944-0050.pt', '3752-4944-0034.pt', '2035-147961-0007.pt', '3000-15664-0013.pt', '5895-34622-0000.pt', '1272-135031-0018.pt', '777-126732-0031.pt', '3000-15664-0045.pt', '1462-170145-0007.pt', '3576-138058-0007.pt', '2035-147961-0023.pt', '6295-244435-0011.pt', '5338-284437-0024.pt', '2803-154320-0004.pt', '3752-4944-0017.pt', '174-168635-0014.pt', '6241-61943-0011.pt', '5694-64038-0025.pt', '2803-161169-0004.pt', '6295-244435-0023.pt', '5895-34615-0001.pt', '1462-170142-0008.pt', '1993-147964-0010.pt', '3576-138058-0025.pt', '7976-110523-0001.pt', '7850-286674-0016.pt', '3576-138058-0015.pt', '5694-64038-0001.pt', '777-126732-0047.pt', '174-168635-0010.pt', '3752-4944-0005.pt', '1988-24833-0022.pt', '1462-170145-0011.pt', '5895-34629-0022.pt', '1272-141231-0019.pt', '5895-34615-0003.pt', '7850-286674-0006.pt', '7850-281318-0001.pt', '174-168635-0002.pt', '1462-170142-0022.pt', '8297-275156-0000.pt', '6241-61946-0000.pt', '3000-15664-0030.pt', '5895-34615-0004.pt', '2803-161169-0017.pt', '2428-83699-0036.pt', '777-126732-0061.pt', '2428-83699-0008.pt', '2412-153948-0012.pt', '2428-83699-0021.pt', '2035-152373-0016.pt', '5338-24640-0004.pt', '7850-281318-0023.pt', '2035-147961-0036.pt', '2412-153948-0002.pt', '7976-110523-0009.pt', '2803-161169-0013.pt', '2035-152373-0015.pt', '5694-64038-0003.pt', '2035-147961-0038.pt', '3576-138058-0018.pt', '1462-170145-0022.pt', '6241-61946-0011.pt', '251-118436-0023.pt', '777-126732-0081.pt', '2035-147960-0015.pt', '6241-61946-0007.pt', '2803-161169-0011.pt', '2035-152373-0000.pt', '777-126732-0005.pt', '251-136532-0017.pt', '777-126732-0053.pt', '174-168635-0011.pt', '777-126732-0051.pt', '3536-23268-0014.pt', '2035-147961-0027.pt', '5895-34629-0005.pt', '6295-244435-0026.pt', '3000-15664-0002.pt', '251-136532-0015.pt', '2428-83699-0006.pt', '3752-4944-0026.pt', '1272-141231-0027.pt', '7850-286674-0002.pt', '1272-135031-0005.pt', '5895-34629-0015.pt', '7850-286674-0005.pt', '2035-147960-0006.pt', '6295-244435-0013.pt', '3000-15664-0010.pt', '2035-152373-0001.pt', '5895-34629-0009.pt', '2035-147960-0000.pt', '7976-110523-0002.pt', '3576-138058-0012.pt', '1272-135031-0016.pt', '251-118436-0000.pt', '1462-170142-0017.pt', '8842-304647-0004.pt', '174-168635-0022.pt', '6241-61946-0010.pt', '5338-284437-0015.pt', '777-126732-0062.pt', '8842-304647-0010.pt', '1462-170142-0002.pt', '777-126732-0012.pt', '2428-83699-0009.pt', '2035-147960-0010.pt', '3752-4944-0001.pt', '2428-83699-0000.pt', '2803-154320-0012.pt', '2803-154320-0002.pt', '3576-138058-0003.pt', '3752-4944-0061.pt', '3000-15664-0038.pt', '1272-141231-0013.pt', '7976-110523-0004.pt', '1462-170145-0015.pt', '5338-284437-0011.pt', '5895-34629-0027.pt', '1462-170142-0032.pt', '2035-147961-0013.pt', '1462-170145-0018.pt', '777-126732-0052.pt', '8297-275156-0003.pt', '8842-304647-0001.pt', '6241-61943-0008.pt', '5338-284437-0018.pt', '84-121550-0004.pt', '3536-23268-0013.pt', '2803-161169-0015.pt', '3576-138058-0011.pt', '6295-244435-0002.pt', '6241-61943-0012.pt', '1462-170142-0033.pt', '2035-147960-0009.pt', '6295-244435-0039.pt', '6241-61946-0020.pt', '2035-147961-0015.pt', '3752-4944-0067.pt', '84-121550-0020.pt', '3576-138058-0010.pt', '174-168635-0020.pt', '6295-244435-0024.pt', '3536-23268-0029.pt', '3752-4944-0000.pt', '251-118436-0003.pt', '3752-4944-0043.pt', '174-168635-0018.pt', '6295-244435-0025.pt', '84-121550-0028.pt', '777-126732-0028.pt', '777-126732-0038.pt', '5338-284437-0012.pt', '5338-284437-0032.pt', '777-126732-0060.pt', '6295-244435-0019.pt', '3000-15664-0015.pt', '2035-152373-0003.pt', '6241-61943-0016.pt', '251-118436-0010.pt', '6295-244435-0016.pt', '2035-147961-0020.pt', '6295-244435-0005.pt', '2428-83699-0034.pt', '777-126732-0045.pt', '3536-23268-0018.pt', '1272-141231-0004.pt', '777-126732-0030.pt', '2035-152373-0005.pt', '5338-284437-0002.pt', '1272-135031-0013.pt', '1462-170142-0009.pt', '6241-61946-0004.pt', '2035-152373-0006.pt', '84-121550-0025.pt', '5895-34615-0007.pt', '1988-147956-0017.pt', '2428-83699-0035.pt', '777-126732-0068.pt', '2035-152373-0017.pt', '5338-24640-0009.pt', '1988-147956-0021.pt', '3752-4944-0069.pt', '174-168635-0009.pt', '6295-244435-0030.pt', '6319-57405-0007.pt', '251-136532-0020.pt', '5895-34629-0000.pt', '84-121550-0033.pt', '1988-24833-0020.pt', '3752-4944-0016.pt', '5694-64038-0012.pt', '2035-147960-0004.pt', '6241-61946-0005.pt', '2035-147961-0005.pt', '1462-170142-0016.pt', '84-121550-0026.pt', '7850-286674-0000.pt', '251-136532-0007.pt', '6295-244435-0009.pt', '1988-147956-0012.pt', '2428-83699-0019.pt', '2428-83699-0016.pt', '3576-138058-0020.pt', '5895-34615-0010.pt', '84-121550-0008.pt', '251-118436-0009.pt', '1462-170142-0040.pt', '2035-152373-0010.pt', '7976-110523-0008.pt', '251-118436-0013.pt', '251-118436-0017.pt', '174-168635-0019.pt', '1993-147964-0003.pt', '777-126732-0067.pt', '1988-147956-0015.pt', '6241-61946-0006.pt', '8842-304647-0003.pt', '251-118436-0011.pt', '5895-34629-0031.pt', '5694-64038-0004.pt', '5694-64038-0008.pt', '1462-170142-0031.pt', '1272-141231-0021.pt', '6241-61943-0006.pt', '5895-34615-0017.pt', '1272-135031-0000.pt', '6295-244435-0032.pt', '3000-15664-0018.pt', '6241-61943-0026.pt', '5338-284437-0000.pt', '5895-34622-0009.pt', '2035-147961-0011.pt', '1988-24833-0003.pt', '2412-153948-0003.pt', '1462-170142-0020.pt', '5895-34622-0006.pt', '5895-34622-0016.pt', '6241-61943-0025.pt', '1272-141231-0003.pt', '5895-34629-0006.pt', '3536-23268-0012.pt', '1988-24833-0011.pt', '6319-57405-0011.pt', '3536-23268-0010.pt', '777-126732-0026.pt', '5895-34629-0029.pt', '3576-138058-0027.pt', '1272-141231-0023.pt', '8842-304647-0007.pt', '6241-61943-0022.pt', '5895-34615-0006.pt', '251-118436-0004.pt', '251-136532-0000.pt', '6241-61946-0014.pt', '777-126732-0075.pt', '1272-141231-0017.pt', '5895-34615-0016.pt', '3576-138058-0019.pt', '2428-83699-0012.pt', '1462-170142-0012.pt', '3752-4944-0048.pt', '84-121550-0002.pt', '6241-61943-0015.pt', '3000-15664-0046.pt', '3752-4944-0062.pt', '2803-161169-0012.pt', '3000-15664-0008.pt', '251-118436-0021.pt', '777-126732-0059.pt', '8842-304647-0008.pt', '5895-34615-0002.pt', '6319-57405-0010.pt', '5895-34622-0002.pt', '1993-147964-0001.pt', '2412-153948-0014.pt', '3000-15664-0012.pt', '7976-110523-0015.pt', '2428-83699-0013.pt', '6241-61946-0018.pt', '6295-244435-0020.pt', '6295-244435-0035.pt', '5338-284437-0022.pt', '1462-170142-0023.pt', '251-136532-0012.pt', '777-126732-0037.pt', '1462-170142-0034.pt', '8297-275156-0004.pt', '777-126732-0016.pt', '6241-61943-0004.pt', '1462-170142-0019.pt', '1272-141231-0026.pt', '5895-34615-0011.pt', '5895-34629-0021.pt', '2428-83699-0003.pt', '1272-135031-0012.pt', '3000-15664-0037.pt', '2035-147961-0012.pt', '7850-281318-0021.pt', '5895-34629-0010.pt', '84-121550-0035.pt', '7850-286674-0011.pt', '1988-147956-0008.pt', '7976-110523-0017.pt', '8297-275156-0013.pt', '2412-153948-0008.pt', '5694-64038-0006.pt', '3536-23268-0006.pt', '2803-154320-0003.pt', '777-126732-0069.pt', '1272-135031-0024.pt', '7976-110523-0011.pt', '2803-161169-0003.pt', '777-126732-0034.pt', '5895-34622-0012.pt', '6295-244435-0007.pt', '2035-147961-0008.pt', '84-121550-0015.pt', '1272-141231-0016.pt', '6241-61946-0009.pt', '2035-147961-0034.pt', '251-118436-0008.pt', '3536-23268-0030.pt', '3752-4944-0046.pt', '6241-61946-0002.pt', '1988-147956-0029.pt', '777-126732-0014.pt', '1272-141231-0030.pt', '5338-284437-0030.pt', '1272-141231-0031.pt', '777-126732-0024.pt', '7850-286674-0010.pt', '3576-138058-0021.pt', '3576-138058-0008.pt', '777-126732-0079.pt', '6241-61946-0003.pt', '174-168635-0008.pt', '5895-34615-0005.pt', '3752-4944-0051.pt', '251-136532-0002.pt', '2035-147961-0019.pt', '1988-24833-0018.pt', '7850-281318-0008.pt', '3752-4944-0057.pt', '6295-244435-0037.pt', '3752-4944-0031.pt', '5338-284437-0026.pt', '1988-24833-0006.pt', '5895-34629-0028.pt', '251-136532-0019.pt', '3752-4944-0018.pt', '777-126732-0008.pt', '174-168635-0004.pt', '1272-135031-0003.pt', '8297-275156-0011.pt', '251-136532-0016.pt', '2035-147960-0013.pt', '777-126732-0040.pt', '1988-24833-0026.pt', '3536-23268-0028.pt', '1462-170145-0000.pt', '2428-83699-0033.pt', '3576-138058-0031.pt', '2428-83699-0037.pt', '1462-170145-0009.pt', '3536-23268-0027.pt', '1988-24833-0023.pt', '8842-304647-0009.pt', '7850-281318-0018.pt', '3752-4944-0041.pt', '2035-147961-0009.pt', '1988-147956-0001.pt', '5895-34629-0020.pt', '2428-83699-0026.pt', '5338-284437-0027.pt', '3576-138058-0029.pt', '5895-34629-0007.pt', '3576-138058-0033.pt', '3576-138058-0034.pt', '3000-15664-0042.pt', '84-121550-0006.pt', '7976-110523-0007.pt', '5694-64038-0010.pt', '777-126732-0015.pt', '5895-34622-0011.pt', '1272-141231-0007.pt', '84-121550-0034.pt', '5338-24640-0002.pt', '2412-153948-0001.pt', '5338-284437-0023.pt', '6295-244435-0031.pt', '6241-61943-0021.pt', '251-136532-0008.pt', '5338-284437-0031.pt', '84-121550-0013.pt', '3536-23268-0016.pt', '3752-4944-0035.pt', '777-126732-0054.pt', '2428-83699-0015.pt', '84-121550-0023.pt', '6295-244435-0001.pt', '7850-286674-0009.pt', '3576-138058-0013.pt', '1462-170142-0036.pt', '777-126732-0001.pt', '6241-61943-0000.pt', '3536-23268-0003.pt', '1988-147956-0028.pt', '1272-141231-0014.pt', '7850-286674-0014.pt', '84-121550-0021.pt', '6241-61946-0023.pt', '2035-147960-0007.pt', '5338-284437-0021.pt', '84-121550-0029.pt', '2035-147961-0017.pt', '7850-281318-0017.pt', '5895-34622-0013.pt', '84-121550-0009.pt', '1272-135031-0014.pt', '8842-304647-0000.pt', '1988-147956-0025.pt', '5694-64038-0009.pt', '1988-147956-0002.pt', '5895-34615-0019.pt', '3752-4944-0022.pt', '2428-83699-0025.pt', '777-126732-0010.pt', '2035-152373-0018.pt', '7850-281318-0005.pt', '3752-4944-0003.pt', '2412-153948-0005.pt', '6241-61943-0024.pt', '2803-154320-0010.pt', '1272-141231-0005.pt', '84-121550-0027.pt', '3000-15664-0027.pt', '777-126732-0076.pt', '1272-135031-0022.pt', '1272-135031-0020.pt', '5895-34622-0010.pt', '5895-34629-0004.pt', '3000-15664-0040.pt', '1272-135031-0011.pt', '3752-4944-0045.pt', '5694-64038-0023.pt', '777-126732-0013.pt', '3000-15664-0016.pt', '777-126732-0000.pt', '6241-61946-0015.pt', '2412-153948-0007.pt', '5895-34622-0008.pt', '1272-141231-0032.pt', '2035-152373-0013.pt', '6319-57405-0005.pt', '2428-83699-0027.pt', '3000-15664-0006.pt', '2035-147960-0012.pt', '2428-83699-0039.pt', '8297-275156-0002.pt', '2428-83699-0020.pt', '1462-170145-0004.pt', '251-118436-0015.pt', '1272-135031-0007.pt', '5694-64038-0007.pt', '6241-61943-0017.pt', '5895-34615-0012.pt', '3752-4944-0066.pt', '84-121550-0030.pt', '251-118436-0019.pt', '3752-4944-0033.pt', '777-126732-0049.pt', '5895-34615-0018.pt', '5895-34622-0021.pt', '3752-4944-0023.pt', '5895-34629-0018.pt', '777-126732-0071.pt', '1272-141231-0008.pt', '3752-4944-0015.pt', '777-126732-0011.pt', '777-126732-0020.pt', '2803-161169-0007.pt', '3576-138058-0004.pt', '3000-15664-0011.pt', '2035-147961-0006.pt', '174-168635-0021.pt', '2803-154320-0014.pt', '777-126732-0035.pt', '3000-15664-0026.pt', '2803-161169-0000.pt', '6241-61943-0003.pt', '3752-4944-0053.pt', '2035-147961-0030.pt', '6295-244435-0008.pt', '5694-64038-0013.pt', '1462-170142-0039.pt', '3752-4944-0029.pt', '7850-281318-0003.pt', '6241-61943-0013.pt', '5694-64038-0022.pt', '5895-34615-0020.pt', '3576-138058-0037.pt', '2803-161169-0008.pt', '777-126732-0042.pt', '84-121550-0000.pt', '3000-15664-0021.pt', '5338-284437-0033.pt', '1272-135031-0017.pt', '6241-61943-0019.pt', '3752-4944-0007.pt', '5694-64038-0024.pt', '6241-61946-0022.pt', '1462-170142-0015.pt', '8297-275156-0008.pt', '3752-4944-0027.pt', '1462-170142-0038.pt', '5694-64038-0017.pt', '1988-24833-0021.pt', '84-121550-0016.pt', '777-126732-0074.pt', '5338-24640-0003.pt', '2428-83699-0018.pt', '777-126732-0004.pt', '2035-147961-0040.pt', '5895-34629-0032.pt', '3000-15664-0032.pt', '6241-61943-0001.pt', '777-126732-0032.pt', '251-118436-0005.pt', '3000-15664-0036.pt', '777-126732-0018.pt', '2428-83699-0014.pt', '6241-61946-0013.pt', '7850-281318-0009.pt', '5895-34622-0001.pt', '777-126732-0056.pt', '2035-147960-0014.pt', '6295-244435-0036.pt', '777-126732-0078.pt', '251-136532-0001.pt', '6319-57405-0003.pt', '6295-244435-0006.pt', '251-136532-0010.pt', '2428-83699-0011.pt', '7850-286674-0008.pt', '1272-141231-0025.pt', '3576-138058-0000.pt', '5895-34615-0008.pt', '3752-4944-0021.pt', '251-118436-0012.pt', '1462-170142-0013.pt', '5895-34629-0016.pt', '1988-147956-0009.pt', '5694-64038-0019.pt', '84-121550-0007.pt', '6241-61946-0016.pt', '3576-138058-0030.pt', '8842-304647-0006.pt', '1993-147964-0000.pt', '2035-147961-0001.pt', '251-118436-0016.pt', '1462-170145-0013.pt', '5338-284437-0017.pt', '6319-57405-0001.pt', '1272-141231-0020.pt', '6295-244435-0022.pt', '1988-147956-0000.pt', '777-126732-0033.pt', '1988-147956-0003.pt', '7850-281318-0000.pt', '2412-153948-0004.pt', '1988-147956-0023.pt', '8297-275156-0005.pt', '1462-170145-0008.pt', '1272-141231-0001.pt', '1272-135031-0010.pt', '6295-244435-0003.pt', '251-136532-0004.pt', '5895-34629-0014.pt', '3752-4944-0014.pt', '5895-34629-0012.pt', '7850-281318-0012.pt', '777-126732-0065.pt', '1462-170142-0024.pt', '3536-23268-0026.pt', '1988-147956-0005.pt', '7976-110523-0000.pt', '174-168635-0006.pt', '1988-24833-0016.pt', '1272-141231-0015.pt', '1462-170145-0020.pt', '3752-4944-0020.pt', '2803-161169-0016.pt', '777-126732-0044.pt', '1988-147956-0011.pt', '174-168635-0003.pt', '5694-64038-0018.pt', '2035-147960-0003.pt', '5895-34615-0009.pt', '3536-23268-0015.pt', '3752-4944-0030.pt', '3752-4944-0009.pt', '7976-110523-0021.pt', '174-168635-0015.pt', '7850-281318-0015.pt', '7850-281318-0022.pt', '5895-34629-0023.pt', '7976-110523-0020.pt', '6295-244435-0028.pt', '3536-23268-0019.pt', '777-126732-0041.pt', '3000-15664-0007.pt', '251-118436-0020.pt', '777-126732-0064.pt', '2412-153948-0011.pt', '5895-34622-0007.pt', '5338-284437-0016.pt', '2035-147960-0005.pt', '7850-286674-0013.pt', '3752-4944-0068.pt', '3752-4944-0019.pt', '2428-83699-0022.pt', '1988-147956-0019.pt', '1462-170145-0003.pt', '174-168635-0013.pt', '3536-23268-0020.pt', '3576-138058-0006.pt', '3752-4944-0008.pt', '3576-138058-0002.pt', '1988-147956-0010.pt', '5895-34629-0017.pt', '1462-170142-0037.pt', '1272-141231-0002.pt', '1988-24833-0010.pt', '777-126732-0002.pt', '2428-83699-0038.pt', '2035-147961-0037.pt', '1462-170145-0017.pt', '3000-15664-0005.pt', '7850-281318-0013.pt', '7850-281318-0020.pt', '7850-281318-0002.pt', '5694-64038-0014.pt', '2035-147960-0011.pt', '84-121550-0019.pt', '3752-4944-0037.pt', '5895-34629-0011.pt', '3752-4944-0044.pt', '1988-24833-0001.pt', '3752-4944-0055.pt', '2412-153948-0006.pt', '1462-170142-0027.pt', '1272-141231-0029.pt', '7976-110523-0016.pt', '2035-147961-0033.pt', '2803-161169-0001.pt', '1462-170145-0006.pt', '777-126732-0077.pt', '5338-284437-0019.pt', '5694-64038-0015.pt', '3000-15664-0044.pt', '2412-153948-0010.pt', '777-126732-0029.pt', '3752-4944-0047.pt', '3576-138058-0039.pt', '2035-147961-0028.pt', '2035-147961-0025.pt', '6319-57405-0006.pt', '8842-304647-0011.pt', '777-126732-0009.pt', '1988-24833-0015.pt', '8297-275156-0012.pt', '2428-83699-0040.pt', '1462-170142-0042.pt', '777-126732-0050.pt', '251-136532-0011.pt', '3000-15664-0020.pt', '5338-24640-0000.pt', '1272-141231-0006.pt', '2803-154320-0008.pt', '6295-244435-0004.pt', '3576-138058-0005.pt', '6295-244435-0017.pt', '5895-34622-0005.pt', '7976-110523-0012.pt', '3000-15664-0039.pt', '84-121550-0022.pt', '2428-83699-0017.pt', '174-168635-0017.pt', '2035-152373-0012.pt', '2035-147961-0022.pt', '3752-4944-0049.pt', '3000-15664-0019.pt', '1462-170142-0021.pt', '2035-147961-0031.pt', '1462-170142-0003.pt', '2412-153948-0009.pt', '3000-15664-0043.pt', '1988-147956-0027.pt', '7850-286674-0001.pt', '7976-110523-0019.pt', '251-118436-0002.pt', '251-136532-0021.pt', '1272-141231-0024.pt', '6241-61943-0009.pt', '777-126732-0017.pt', '3752-4944-0063.pt', '2035-147961-0004.pt', '84-121550-0005.pt', '777-126732-0003.pt', '3752-4944-0042.pt', '5895-34622-0015.pt', '3536-23268-0007.pt', '3000-15664-0028.pt', '2803-161169-0006.pt', '84-121550-0024.pt', '5895-34622-0019.pt', '2035-152373-0009.pt', '1462-170145-0012.pt', '777-126732-0043.pt', '6241-61943-0014.pt', '5895-34615-0000.pt', '5338-284437-0005.pt', '3752-4944-0058.pt', '2035-147961-0000.pt', '174-168635-0001.pt', '6241-61943-0020.pt', '3536-23268-0017.pt', '6319-57405-0009.pt', '1988-24833-0005.pt', '2035-147961-0024.pt', '1988-147956-0024.pt', '1272-135031-0004.pt', '6241-61946-0017.pt', '777-126732-0080.pt', '3536-23268-0004.pt', '2428-83699-0007.pt', '1988-147956-0022.pt', '3576-138058-0032.pt', '7850-281318-0004.pt', '1988-147956-0013.pt', '5895-34629-0033.pt', '251-118436-0006.pt', '3752-4944-0032.pt', '3752-4944-0024.pt', '1272-135031-0002.pt', '5338-284437-0013.pt', '5338-284437-0028.pt', '6295-244435-0029.pt', '174-168635-0005.pt', '5895-34622-0018.pt', '2803-161169-0014.pt', '5338-24640-0008.pt', '777-126732-0025.pt', '6241-61943-0018.pt', '1272-141231-0012.pt', '777-126732-0007.pt', '1993-147964-0008.pt', '1462-170142-0014.pt', '1988-24833-0012.pt', '84-121550-0031.pt', '2035-147961-0029.pt', '6295-244435-0027.pt', '1988-147956-0016.pt', '6319-57405-0008.pt', '2035-147961-0018.pt', '3576-138058-0038.pt', '3752-4944-0064.pt', '251-118436-0007.pt', '1988-24833-0009.pt', '84-121550-0011.pt', '2035-147961-0032.pt', '6319-57405-0002.pt', '3000-15664-0001.pt', '3752-4944-0060.pt', '1988-24833-0017.pt', '5338-284437-0008.pt', '3000-15664-0003.pt', '2428-83699-0002.pt', '1272-135031-0006.pt', '777-126732-0006.pt', '7850-286674-0004.pt', '5895-34615-0013.pt', '3000-15664-0041.pt', '251-136532-0009.pt', '2035-152373-0008.pt', '3000-15664-0000.pt', '5338-24640-0006.pt', '251-118436-0001.pt', '3752-4944-0028.pt', '2428-83699-0030.pt', '5895-34629-0003.pt', '251-118436-0022.pt', '8297-275156-0001.pt', '2035-147961-0026.pt', '2035-152373-0004.pt', '2035-147960-0002.pt', '3000-15664-0004.pt', '6295-244435-0021.pt', '1462-170145-0001.pt', '1462-170145-0002.pt', '7850-281318-0010.pt', '6295-244435-0014.pt', '1462-170145-0014.pt', '1988-147956-0026.pt', '2428-83699-0031.pt', '5694-64038-0020.pt', '3000-15664-0017.pt', '6241-61943-0010.pt', '777-126732-0072.pt', '1462-170142-0030.pt', '2035-152373-0011.pt', '1272-135031-0023.pt', '5338-284437-0001.pt', '3576-138058-0026.pt', '1272-135031-0015.pt', '777-126732-0036.pt', '3752-4944-0012.pt', '1462-170142-0007.pt', '6295-244435-0015.pt', '2035-147961-0003.pt', '2035-147960-0008.pt', '7850-286674-0015.pt', '5895-34629-0008.pt', '3576-138058-0023.pt', '3576-138058-0040.pt', '174-168635-0016.pt', '1462-170142-0035.pt', '3536-23268-0011.pt', '1993-147964-0002.pt', '1988-24833-0000.pt', '2803-154320-0009.pt', '5338-284437-0007.pt', '84-121550-0017.pt', '1988-24833-0027.pt', '3536-23268-0021.pt', '1272-141231-0011.pt', '2035-147961-0035.pt']\n", + "Size of 3576-138058-0019 torch.Size([994, 40])\n" + ] + } + ], + "source": [ + "import os\n", + "print(os.listdir(\"feature_cache\"))\n", + "test_id = dataset[0][\"id\"]\n", + "print(\"Size of\", test_id, torch.load(f\"feature_cache/{test_id}.pt\").shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R0WcsqDVNWMg" + }, + "source": [ + "You can warm the cache by just loading every item. We provide a convenience function for this:" + ] + }, + { + "cell_type": "code", + "execution_count": 95, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ZGEee9mvNWMg", + "outputId": "73786759-14d3-47fb-93f5-545795dc82f0" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "100%|██████████| 1089/1089 [00:09<00:00, 118.08it/s]\n" + ] + } + ], + "source": [ + "dataset.iterate_once()" + ] + }, + { + "cell_type": "code", + "execution_count": 96, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "c4uZIuguNWMg", + "outputId": "16ef8e42-7649-4ef3-a043-7991bfb8179c" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Number of files in the cache folder: 1089\n" + ] + } + ], + "source": [ + "print(\"Number of files in the cache folder:\", len(os.listdir(\"feature_cache\")))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fFXowLeYNWMg" + }, + "source": [ + "#### HDF5 cached pipelines (single-file cache)\n", + "\n", + "`CachedHDF5DynamicItem` keeps every cached output in a single HDF5 file instead of one `.pt` file per example. This reduces filesystem overhead and is easier to share across multi-worker dataloaders. The first argument still needs to be the example `id`, and optional compression is supported.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "IR_-VA3-NWMg", + "outputId": "03be58f0-c8db-4a23-f814-301d66200f9c" + }, + "execution_count": 97, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "('3576-138058-0019', torch.Size([994, 40]))" + ] + }, + "metadata": {}, + "execution_count": 97 + } + ], + "source": [ + "import os, shutil\n", + "from speechbrain.integrations.hdf5.cached_item import CachedHDF5DynamicItem\n", + "\n", + "shutil.rmtree(\"hdf5_feature_cache\", ignore_errors=True)\n", + "os.makedirs(\"hdf5_feature_cache\", exist_ok=True)\n", + "\n", + "hdf5_filterbank = speechbrain.lobes.features.Fbank()\n", + "\n", + "@CachedHDF5DynamicItem.cache(\n", + " \"hdf5_feature_cache\",\n", + " compression=\"gzip\",\n", + " cache_filename=\"features.hdf5\",\n", + ")\n", + "@speechbrain.utils.data_pipeline.takes(\"id\", \"sig\")\n", + "@speechbrain.utils.data_pipeline.provides(\"hdf5_feats\")\n", + "def hdf5_feature_pipeline(uid, sig):\n", + " # Deterministic features are great candidates for caching.\n", + " return hdf5_filterbank(sig.unsqueeze(0)).squeeze(0)\n", + "\n", + "speechbrain.dataio.dataset.add_dynamic_item([dataset], hdf5_feature_pipeline)\n", + "speechbrain.dataio.dataset.set_output_keys([dataset], [\"id\", \"hdf5_feats\"])\n", + "\n", + "first = dataset[0]\n", + "first[\"id\"], first[\"hdf5_feats\"].shape\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "l4BQ8wDcNWMh" + }, + "source": [ + "The cache now lives in a single file instead of many small ones:\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "2gxXDwsxNWMh", + "outputId": "d7fa1909-92de-4578-ee89-19bd0324353f" + }, + "execution_count": 98, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Cache file: hdf5_feature_cache/features.hdf5\n", + "Datasets inside cache (truncated): ['3576-138058-0019']\n", + "Compression: gzip\n" + ] + } + ], + "source": [ + "hdf5_path = hdf5_feature_pipeline.hdf5_path\n", + "print(\"Cache file:\", hdf5_path)\n", + "print(\"Datasets inside cache (truncated):\", list(hdf5_feature_pipeline.hdf5file.keys())[:3])\n", + "print(\"Compression:\", hdf5_feature_pipeline.hdf5file[first[\"id\"]].compression)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ms6e--ZjNWMh" + }, + "source": [ + "You can warm the cache once, then reopen it read-only when using multiple DataLoader workers to avoid write contention:\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "qY-8lg6gNWMh", + "outputId": "4e4b0b9a-e664-4969-9c54-faeafde53fa8" + }, + "execution_count": 99, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "HDF5 mode after switching: r\n", + "Number of cached items: 1089\n" + ] + } + ], + "source": [ + "dataset.iterate_once(output_keys=[\"id\", \"hdf5_feats\"], progressbar=False)\n", + "hdf5_feature_pipeline.change_file_mode(\"r\")\n", + "print(\"HDF5 mode after switching:\", hdf5_feature_pipeline.hdf5file.mode)\n", + "print(\"Number of cached items:\", len(hdf5_feature_pipeline.hdf5file.keys()))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WZJnmyiou6cK" + }, + "source": [ + "### CategoricalEncoder" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "18cYd9eT7iuZ" + }, + "source": [ + "SpeechBrain `dataio` provides a `CategoricalEncoder` class for encoding labels which belongs to a discrete set: e.g. for speaker recognition or any other multi-class classification problem.\n", + "\n", + "Given a collection of hashables (e.g. strings) it encodes\n", + " every unique item to an integer value: `[\"spk0\", \"spk1\"]` --> `[0, 1]`\n", + "\n", + "\n", + " Internally the correspondence between each label to its index is handled by\n", + " two dictionaries: `lab2ind` and `ind2lab`.\n", + "\n", + "It is built to tightly integrate with `DynamicItemDataset` and `dataIO pipelines`.\n", + "\n", + "For example one can obtain the encoding for speaker identities (`spkID` in JSON) from our Mini-LibriSpeech dataset by creating an instance of CategoricalEncoder and fitting it to the dataset object." + ] + }, + { + "cell_type": "code", + "execution_count": 100, + "metadata": { + "id": "oAKvZSz5DX-N" + }, + "outputs": [], + "source": [ + "from speechbrain.dataio.encoder import CategoricalEncoder\n", + "spk_id_encoder = CategoricalEncoder()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7ICJwSyUDyHL" + }, + "source": [ + "Since `DynamicItemDataset` right now does not return spkID we have firstly to set its output to return that dynamic item:" + ] + }, + { + "cell_type": "code", + "execution_count": 101, + "metadata": { + "id": "OYIqLGDDEBFF", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "6eff4d71-f015-4230-a457-95e8a0d14611" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'spkID': 3576}" + ] + }, + "metadata": {}, + "execution_count": 101 + } + ], + "source": [ + "speechbrain.dataio.dataset.set_output_keys(\n", + " [dataset], [\"spkID\"],\n", + " )\n", + "# sig is a torch.tensor with audio signal as specified before.\n", + "# REMEMBER: no need to specify the pipeline for spkID as we can read directly the value from the JSON.\n", + "dataset[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wqIDYxTcEg-s" + }, + "source": [ + "The speaker identity `spkID` is a string.\n", + "\n", + "Note that in librispeech it is a string containing a unique integer so one can argue that performing the encoding here is pointless as casting to integer will suffice.\n", + "\n", + "However, it could happen that it is not an unique integer but an unique string like `spk1`, `spk2` et cetera.\n", + "\n", + "`spk_id_encoder` can be used for this purpose. We fit the encoder to the dataset and specify which *dynamic item* we want to encode:" + ] + }, + { + "cell_type": "code", + "execution_count": 102, + "metadata": { + "id": "0JQxKP-9F2-B" + }, + "outputs": [], + "source": [ + "spk_id_encoder.update_from_didataset(dataset, \"spkID\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "83_QlLNRXEPl" + }, + "source": [ + "**NOTE**\n", + "\n", + "\n", + "This will iterate over dataset fetch for each example spkID and construct\n", + "internal dicts lab2ind and ind2lab.\n", + "\n", + "Because of this, it is important to call `dataset.set_output_keys` avoiding\n", + "computationally costly dynamic items (e.g. can happen if the pipeline does data augmentation) before fitting the encoder.\n", + "\n", + "Setting only the key on which the encoder will be fitted is a good approach." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8U0Qy4DuX9BO" + }, + "source": [ + "We can look now at how much unique speaker ids there are in the dataset by using `__len__`" + ] + }, + { + "cell_type": "code", + "execution_count": 103, + "metadata": { + "id": "tz-Ntq0zF-49", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "2fc7efb4-83ca-411c-bf71-b1021ec31044" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "26" + ] + }, + "metadata": {}, + "execution_count": 103 + } + ], + "source": [ + "len(spk_id_encoder)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5MIcC0sAYBEe" + }, + "source": [ + "We can also take a look to the encoder internal dictionaries `lab2ind` ans `ind2lab` which contains the mappings between the labels (speaker ids in this case) and corresponding integers encodings:" + ] + }, + { + "cell_type": "code", + "execution_count": 104, + "metadata": { + "id": "YoLjI4HtGIgC", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "49042c9f-49dd-4624-fab2-7095420cb79e" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{3576: 0,\n", + " 174: 1,\n", + " 6295: 2,\n", + " 3752: 3,\n", + " 1988: 4,\n", + " 5694: 5,\n", + " 1462: 6,\n", + " 1272: 7,\n", + " 2428: 8,\n", + " 7976: 9,\n", + " 2412: 10,\n", + " 5895: 11,\n", + " 251: 12,\n", + " 2803: 13,\n", + " 8842: 14,\n", + " 5338: 15,\n", + " 6241: 16,\n", + " 3000: 17,\n", + " 6319: 18,\n", + " 84: 19,\n", + " 777: 20,\n", + " 7850: 21,\n", + " 8297: 22,\n", + " 1993: 23,\n", + " 2035: 24,\n", + " 3536: 25}" + ] + }, + "metadata": {}, + "execution_count": 104 + } + ], + "source": [ + "spk_id_encoder.lab2ind\n", + "# contains label --> integer encoding" + ] + }, + { + "cell_type": "code", + "execution_count": 105, + "metadata": { + "id": "Jm23A0dCIuSG", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "8d113311-2e7b-4c64-caab-c13b21394993" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{0: 3576,\n", + " 1: 174,\n", + " 2: 6295,\n", + " 3: 3752,\n", + " 4: 1988,\n", + " 5: 5694,\n", + " 6: 1462,\n", + " 7: 1272,\n", + " 8: 2428,\n", + " 9: 7976,\n", + " 10: 2412,\n", + " 11: 5895,\n", + " 12: 251,\n", + " 13: 2803,\n", + " 14: 8842,\n", + " 15: 5338,\n", + " 16: 6241,\n", + " 17: 3000,\n", + " 18: 6319,\n", + " 19: 84,\n", + " 20: 777,\n", + " 21: 7850,\n", + " 22: 8297,\n", + " 23: 1993,\n", + " 24: 2035,\n", + " 25: 3536}" + ] + }, + "metadata": {}, + "execution_count": 105 + } + ], + "source": [ + "spk_id_encoder.ind2lab # contains integer encoding --> label" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DZEqe2d1Vthc" + }, + "source": [ + "Once fitted the `CategoricalEncoder` object can be used in a suitably defined pipeline to encode the spkID key and return the encoded value:" + ] + }, + { + "cell_type": "code", + "execution_count": 106, + "metadata": { + "id": "tX43vtVTyuCy" + }, + "outputs": [], + "source": [ + "@speechbrain.utils.data_pipeline.takes(\"spkID\")\n", + "@speechbrain.utils.data_pipeline.provides(\"spkid_encoded\")\n", + "def spk_id_encoding(spkid):\n", + " return torch.LongTensor([spk_id_encoder.encode_label(spkid)])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "metadata": { + "id": "y1A41rDQyy-C", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "864bcffb-d970-4b7b-ad2e-64997e70e122" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'spkid_encoded': tensor([2])}" + ] + }, + "metadata": {}, + "execution_count": 107 + } + ], + "source": [ + "speechbrain.dataio.dataset.add_dynamic_item([dataset], spk_id_encoding)\n", + "speechbrain.dataio.dataset.set_output_keys(\n", + " [dataset], [\"spkid_encoded\"],\n", + " )\n", + "\n", + "dataset[100]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "knC4t9fKvHHU" + }, + "source": [ + "### PaddedBatch and SaveableDataLoader\n", + "\n", + "SpeechBrain offers a way to conveniently pad right automatically tensors of different length on multiple dimensions.\n", + "This is achieved using the `PaddedBatch` class defined in `speechbrain.dataio.batch`.\n", + "\n", + "`PaddedBatch` is both a `collate_fn` and a batch object.\n", + "\n", + "When a `torch.utils.data.Dataset` (and thus also a `DynamicItemDataset`) is passed to the `Brain` class `PaddedBatch` is used as the default collate function `collate_fn` and examples are automatically padded together.\n", + "\n", + "As the default DataLoader the Brain class instantiates, a SpeechBrain custom DataLoader: `speechbrain.dataio.dataloader.SaveableDataLoader`.\n", + "\n", + "\n", + "This DataLoader is identical to the plain one except that it allows for intra-epoch saving. So if for some reason training stops in the middle of an epoch it is possible to resume from exactly that step. See the [Checkpointing Tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/checkpointing.html).\n", + "The default `collate_fn` for this DataLoader is `PaddedBatch`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 108, + "metadata": { + "id": "NtsZ0UA6vLfc" + }, + "outputs": [], + "source": [ + "from speechbrain.dataio.dataloader import SaveableDataLoader\n", + "from speechbrain.dataio.batch import PaddedBatch\n", + "\n", + "speechbrain.dataio.dataset.set_output_keys(\n", + " [dataset], [\"id\", \"spkid_encoded\", \"signal\"],\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ljw1yIG3xRPT" + }, + "source": [ + "We set as the dynamic items we are requesting `sig` which is the audio tensor\n", + "and the speaker id encoded with the `CategoricalEncoder` object (`spkid_encoded`) defined before and the example `id`." + ] + }, + { + "cell_type": "code", + "execution_count": 109, + "metadata": { + "id": "puBQt7QRwmnZ" + }, + "outputs": [], + "source": [ + "dataloader = SaveableDataLoader(dataset, batch_size=2, collate_fn=PaddedBatch)" + ] + }, + { + "cell_type": "code", + "execution_count": 110, + "metadata": { + "id": "ccIqOHQkxEmD" + }, + "outputs": [], + "source": [ + "batch_obj = next(iter(dataloader)) # let's look at the batch obj" + ] + }, + { + "cell_type": "code", + "execution_count": 111, + "metadata": { + "id": "cpFw6pycxJNC", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 221 + }, + "outputId": "1bf327d6-84e3-469d-bdb8-85546ebb6730" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "speechbrain.dataio.batch.PaddedBatch" + ], + "text/html": [ + "
\n", + "
speechbrain.dataio.batch.PaddedBatch
def __init__(examples, padded_keys=None, device_prep_keys=None, padding_func=batch_pad_right, padding_kwargs=None, per_key_padding_kwargs=None, apply_default_convert=True, nonpadded_stack=True)
/usr/local/lib/python3.12/dist-packages/speechbrain/dataio/batch.pyCollate_fn when examples are dicts and have variable-length sequences.\n",
+              "\n",
+              "Different elements in the examples get matched by key.\n",
+              "All numpy tensors get converted to Torch (PyTorch default_convert)\n",
+              "Then, by default, all torch.Tensor valued elements get padded and support\n",
+              "collective pin_memory() and to() calls.\n",
+              "Regular Python data types are just collected in a list.\n",
+              "\n",
+              "Arguments\n",
+              "---------\n",
+              "examples : list\n",
+              "    List of example dicts, as produced by Dataloader.\n",
+              "padded_keys : list, None\n",
+              "    (Optional) List of keys to pad on. If None, pad all torch.Tensors\n",
+              "device_prep_keys : list, None\n",
+              "    (Optional) Only these keys participate in collective memory pinning and moving with\n",
+              "    to().\n",
+              "    If None, defaults to all items with torch.Tensor values.\n",
+              "padding_func : callable, optional\n",
+              "    Called with a list of tensors to be padded together. Needs to return\n",
+              "    two tensors: the padded data, and another tensor for the data lengths.\n",
+              "padding_kwargs : dict, None\n",
+              "    (Optional) Extra kwargs to pass to padding_func. E.G. mode, value\n",
+              "    This is used as the default padding configuration for all keys.\n",
+              "per_key_padding_kwargs : dict, None\n",
+              "    (Optional) Per-key padding configuration. Keys in this dict should match\n",
+              "    the keys in the examples. Each value should be a dict with padding parameters\n",
+              "    (e.g., {'value': -100, 'mode': 'constant'}). If a key is not in this dict,\n",
+              "    the global padding_kwargs will be used.\n",
+              "apply_default_convert : bool\n",
+              "    Whether to apply PyTorch default_convert (numpy to torch recursively,\n",
+              "    etc.) on all data. Default:True, usually does the right thing.\n",
+              "nonpadded_stack : bool\n",
+              "    Whether to apply PyTorch-default_collate-like stacking on values that\n",
+              "    didn't get padded. This stacks if it can, but doesn't error out if it\n",
+              "    cannot. Default:True, usually does the right thing.\n",
+              "\n",
+              "Example\n",
+              "-------\n",
+              ">>> batch = PaddedBatch(\n",
+              "...     [\n",
+              "...         {"id": "ex1", "foo": torch.Tensor([1.0])},\n",
+              "...         {"id": "ex2", "foo": torch.Tensor([2.0, 1.0])},\n",
+              "...     ]\n",
+              "... )\n",
+              ">>> # Attribute or key-based access:\n",
+              ">>> batch.id\n",
+              "['ex1', 'ex2']\n",
+              ">>> batch["id"]\n",
+              "['ex1', 'ex2']\n",
+              ">>> # torch.Tensors get padded\n",
+              ">>> type(batch.foo)\n",
+              "<class 'speechbrain.dataio.batch.PaddedData'>\n",
+              ">>> batch.foo.data\n",
+              "tensor([[1., 0.],\n",
+              "        [2., 1.]])\n",
+              ">>> batch.foo.lengths\n",
+              "tensor([0.5000, 1.0000])\n",
+              ">>> # Batch supports collective operations:\n",
+              ">>> _ = batch.to(dtype=torch.half)\n",
+              ">>> batch.foo.data\n",
+              "tensor([[1., 0.],\n",
+              "        [2., 1.]], dtype=torch.float16)\n",
+              ">>> batch.foo.lengths\n",
+              "tensor([0.5000, 1.0000], dtype=torch.float16)\n",
+              ">>> # Numpy tensors get converted to torch and padded as well:\n",
+              ">>> import numpy as np\n",
+              ">>> batch = PaddedBatch(\n",
+              "...     [{"wav": np.asarray([1, 2, 3, 4])}, {"wav": np.asarray([1, 2, 3])}]\n",
+              "... )\n",
+              ">>> batch.wav  # +ELLIPSIS\n",
+              "PaddedData(data=tensor([[1, 2,...\n",
+              ">>> # Basic stacking collation deals with non padded data:\n",
+              ">>> batch = PaddedBatch(\n",
+              "...     [\n",
+              "...         {\n",
+              "...             "spk_id": torch.tensor([1]),\n",
+              "...             "wav": torch.tensor([0.1, 0.0, 0.3]),\n",
+              "...         },\n",
+              "...         {\n",
+              "...             "spk_id": torch.tensor([2]),\n",
+              "...             "wav": torch.tensor([0.2, 0.3, -0.1]),\n",
+              "...         },\n",
+              "...     ],\n",
+              "...     padded_keys=["wav"],\n",
+              "... )\n",
+              ">>> batch.spk_id\n",
+              "tensor([[1],\n",
+              "        [2]])\n",
+              ">>> # And some data is left alone:\n",
+              ">>> batch = PaddedBatch(\n",
+              "...     [{"text": ["Hello"]}, {"text": ["How", "are", "you?"]}]\n",
+              "... )\n",
+              ">>> batch.text\n",
+              "[['Hello'], ['How', 'are', 'you?']]\n",
+              ">>> # Per-key padding configuration:\n",
+              ">>> batch = PaddedBatch(\n",
+              "...     [\n",
+              "...         {\n",
+              "...             "wav": torch.tensor([1, 2, 3]),\n",
+              "...             "labels": torch.tensor([1, 2]),\n",
+              "...         },\n",
+              "...         {"wav": torch.tensor([4, 5]), "labels": torch.tensor([3])},\n",
+              "...     ],\n",
+              "...     per_key_padding_kwargs={\n",
+              "...         "wav": {"value": 0},\n",
+              "...         "labels": {"value": -100},\n",
+              "...     },\n",
+              "... )\n",
+              ">>> batch.wav.data\n",
+              "tensor([[1, 2, 3],\n",
+              "        [4, 5, 0]])\n",
+              ">>> batch.labels.data\n",
+              "tensor([[   1,    2],\n",
+              "        [   3, -100]])
\n", + " \n", + "
" + ] + }, + "metadata": {}, + "execution_count": 111 + } + ], + "source": [ + "batch_obj # the dataloader returns an PaddedBatch obj now\n", + "type(batch_obj)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8IZb33Bt0L9d" + }, + "source": [ + "Dynamic Items can be accessed in the batch object by using `dict` syntax:" + ] + }, + { + "cell_type": "code", + "execution_count": 112, + "metadata": { + "id": "dB42KZ3AxLth", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "643c11bb-fbf0-49e1-a9b5-7a1d2799e0c6" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "PaddedData(data=tensor([[0],\n", + " [0]]), lengths=tensor([1., 1.]))" + ] + }, + "metadata": {}, + "execution_count": 112 + } + ], + "source": [ + "batch_obj[\"spkid_encoded\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 113, + "metadata": { + "id": "ZKJlXaW10XRg", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "c5186323-026f-4477-d0b8-f31ac2db1614" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "PaddedData(data=tensor([[ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029],\n", + " [-0.0018, -0.0023, -0.0027, ..., 0.0000, 0.0000, 0.0000]]), lengths=tensor([1.0000, 0.6220]))" + ] + }, + "metadata": {}, + "execution_count": 113 + } + ], + "source": [ + "batch_obj[\"signal\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 114, + "metadata": { + "id": "gEG6g0sc0ZXn", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "5fa1ffa7-b0cb-436a-dd81-4d4f01f9e087" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "['3576-138058-0019', '3576-138058-0021']" + ] + }, + "metadata": {}, + "execution_count": 114 + } + ], + "source": [ + "batch_obj[\"id\"] # example ids in this batch useful for debugging" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UF3k_9060g4y" + }, + "source": [ + "As said, all elements in PaddedBatch which are `torch.Tensors` are padded together by adding zeros to the right.\n", + "When these elements are accessed a [namedtuple](https://docs.python.org/3/library/collections.html#collections.namedtuple) is returned: the actual padded tensors and a `length` tensor." + ] + }, + { + "cell_type": "code", + "execution_count": 115, + "metadata": { + "id": "G2pFsSa702_x" + }, + "outputs": [], + "source": [ + "wav_data, length = batch_obj[\"signal\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GhPDU20bcvNA" + }, + "source": [ + "As it is a [namedtuple](https://docs.python.org/3/library/collections.html#collections.namedtuple) the two items *length* and *data* are also accessible as attributes:" + ] + }, + { + "cell_type": "code", + "execution_count": 116, + "metadata": { + "id": "glL_x9sqb8vO" + }, + "outputs": [], + "source": [ + "lengths = batch_obj[\"signal\"].lengths\n", + "wav_data = batch_obj[\"signal\"].data" + ] + }, + { + "cell_type": "code", + "execution_count": 117, + "metadata": { + "id": "YDzqHldc0bBe", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "0d13cecb-97ed-4279-e7d1-50c31215e8c0" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([1.0000, 0.6220])" + ] + }, + "metadata": {}, + "execution_count": 117 + } + ], + "source": [ + "lengths" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zSbkh9tJ08j2" + }, + "source": [ + "This length tensor contains the relative true length of each sequence.\n", + "In this example it means that the second example in the batch has not been padded (relative length == 1) while the first instead has been padded to more twice its length.\n", + "\n", + "The use of relative lengths instead of absolute indexes guarantees that that these values do not change even after feature extraction: the relative true length remains the same even after STFT whatever is the window.\n", + "\n", + "The absolute indexes are easy to obtain:" + ] + }, + { + "cell_type": "code", + "execution_count": 118, + "metadata": { + "id": "tx8HUi2U1Tsu", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "63974e5d-2a89-442d-87cf-6a294ab7ac24" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([158960, 98879])" + ] + }, + "metadata": {}, + "execution_count": 118 + } + ], + "source": [ + "abs_lens = (lengths*wav_data.shape[1]).long()\n", + "abs_lens" + ] + }, + { + "cell_type": "code", + "execution_count": 119, + "metadata": { + "id": "KLClRlo-1xjg", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "ed4b3d3a-30a1-4b11-8368-74d97115736f" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([-0.0018, -0.0023, -0.0027, ..., 0.0042, 0.0052, 0.0038])" + ] + }, + "metadata": {}, + "execution_count": 119 + } + ], + "source": [ + "wav_data[1][:abs_lens[1]] # no zeros" + ] + }, + { + "cell_type": "code", + "execution_count": 120, + "metadata": { + "id": "NmH1C4Dx2Fyt", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "a561c82d-e9cd-4799-bdb6-90b9dbada775" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([0.0019, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000])" + ] + }, + "metadata": {}, + "execution_count": 120 + } + ], + "source": [ + "wav_data[1][abs_lens[1]:] # zeros begins at abs_lens[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BRIP5d4h2e2Y" + }, + "source": [ + "The PaddedBatch object allows for conveniently moving all dynamic items which are` torch.Tensor` to the right device using to:" + ] + }, + { + "cell_type": "code", + "execution_count": 121, + "metadata": { + "id": "gtn6h1hM2Xw2" + }, + "outputs": [], + "source": [ + "batch_obj = batch_obj.to(\"cpu\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IyZ84-4-20wn" + }, + "source": [ + "Of course items which are not tensors such as `id` are not moved and are not padded. Instead they are simply returned as a list." + ] + }, + { + "cell_type": "code", + "execution_count": 122, + "metadata": { + "id": "O8-4oUMw2xNd", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "05816f8e-bd9e-41a8-f469-37c38f5e305f" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "['3576-138058-0019', '3576-138058-0021']" + ] + }, + "metadata": {}, + "execution_count": 122 + } + ], + "source": [ + "batch_obj[\"id\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Vz09N7Zb3Iho" + }, + "source": [ + "It is also possible to iterate over the examples of `PaddedBatch`:" + ] + }, + { + "cell_type": "code", + "execution_count": 123, + "metadata": { + "id": "05vyHnk23H8X", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "5d6f6b79-645c-4fe9-90eb-573c06671964" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "['3576-138058-0019', '3576-138058-0021']\n", + "PaddedData(data=tensor([[0],\n", + " [0]]), lengths=tensor([1., 1.]))\n", + "PaddedData(data=tensor([[ 0.0020, 0.0006, 0.0004, ..., -0.0033, -0.0034, -0.0029],\n", + " [-0.0018, -0.0023, -0.0027, ..., 0.0000, 0.0000, 0.0000]]), lengths=tensor([1.0000, 0.6220]))\n" + ] + } + ], + "source": [ + "for ex in batch_obj:\n", + " print(ex)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sXjZOd3O3tt1" + }, + "source": [ + "And access a single example by its position:" + ] + }, + { + "cell_type": "code", + "execution_count": 124, + "metadata": { + "id": "2mPr65B22_r8", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "087360a2-824d-43d2-9a66-4d64e6dc09b8" + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "PaddedData(data=tensor([[0],\n", + " [0]]), lengths=tensor([1., 1.]))" + ] + }, + "metadata": {}, + "execution_count": 124 + } + ], + "source": [ + "batch_obj.at_position(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ze8DO5Nj32eE" + }, + "source": [ + "These methods can be conveniently used in the `Brain` class `compute_forward` and `compute_objectives` methods.\n", + "As we have shown in the first example of this tutorial where a complete dataIO example was illustrated:\n", + "\n", + "\n", + "\n", + "\n", + "```python\n", + "def compute_forward(self, batch, stage):\n", + " audio, audio_len = batch[\"sig\"]\n", + " # the examples are automatically padded, audio_len contains the relative\n", + " # length of the original sequence.\n", + " return self.modules.model(audio.unsqueeze(1)).mean(-1).unsqueeze(-1)\n", + " \n", + " def compute_objectives(self, logits, batch, stage):\n", + " spk_ids, _ = batch[\"spkid_encoded\"]\n", + " return torch.nn.functional.cross_entropy(logits, spk_ids)\n", + "```\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ufpPurk3bsvc" + }, + "source": [ + "## Full Example: Training a simple Speaker Recognition System." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yKox1vfgbyey" + }, + "source": [ + "Hereafter we show how the **DynamicItemDataset**, **DIPs** and **CategoricalEncoder** can be used to build a data pipeline for Speaker Recognition.\n", + "\n", + "In particular we have to:\n", + "\n", + "- read the audio\n", + "- read the speaker ID from annotation and encode it to integer\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_0e4ksGZpErL" + }, + "source": [ + "We firstly instantiate the dataset from that JSON annotation" + ] + }, + { + "cell_type": "code", + "execution_count": 125, + "metadata": { + "id": "9RKlcn6JpKo9" + }, + "outputs": [], + "source": [ + "dataset = DynamicItemDataset.from_json(\"data.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KdI7Uakdi1kh" + }, + "source": [ + "Then fit the **CategoricalEncoder** to speaker IDs (`spkID`) in the annotation." + ] + }, + { + "cell_type": "code", + "execution_count": 126, + "metadata": { + "id": "4dLTFQyPi0yk" + }, + "outputs": [], + "source": [ + "spk_id_encoder = CategoricalEncoder()\n", + "spk_id_encoder.update_from_didataset(dataset, \"spkID\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aM2U403ejF3b" + }, + "source": [ + "We add the **DIP** which encodes `spkID`" + ] + }, + { + "cell_type": "code", + "execution_count": 127, + "metadata": { + "id": "IRWERFtjjFEF" + }, + "outputs": [], + "source": [ + "dataset.add_dynamic_item(spk_id_encoder.encode_label_torch, takes=\"spkID\", provides=\"spk_encoded\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2PNpmwVZjPLn" + }, + "source": [ + "We add the **DIP** for reading the audio\n" + ] + }, + { + "cell_type": "code", + "execution_count": 128, + "metadata": { + "id": "L4ccm3_KjSOF" + }, + "outputs": [], + "source": [ + "dataset.add_dynamic_item(speechbrain.dataio.dataio.read_audio, takes=\"file_path\", provides=\"signal\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ge4RwtGSNWMm" + }, + "source": [ + "#### Caching features for the speaker pipeline\n", + "\n", + "We can cache the filterbank features so each epoch only reads them from a single HDF5 file instead of recomputing them or creating thousands of small `.pt` files. This mirrors the caching workflow shown earlier, but plugs directly into the speaker-recognition `DynamicItemDataset`.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "_6davA36NWMm" + }, + "execution_count": 129, + "outputs": [], + "source": [ + "import os, shutil\n", + "from speechbrain.integrations.hdf5.cached_item import CachedHDF5DynamicItem\n", + "from speechbrain.lobes.features import Fbank\n", + "\n", + "# Clean cache for a reproducible run in the notebook\n", + "shutil.rmtree(\"spk_feat_cache\", ignore_errors=True)\n", + "os.makedirs(\"spk_feat_cache\", exist_ok=True)\n", + "\n", + "fbank = Fbank()\n", + "\n", + "@CachedHDF5DynamicItem.cache(\n", + " cache_location=\"spk_feat_cache\",\n", + " cache_filename=\"speaker_feats.hdf5\",\n", + " compression=\"gzip\",\n", + ")\n", + "@speechbrain.utils.data_pipeline.takes(\"id\", \"signal\")\n", + "@speechbrain.utils.data_pipeline.provides(\"feats\")\n", + "def cached_fbank(uid, sig):\n", + " return fbank(sig.unsqueeze(0)).squeeze(0)\n", + "\n", + "dataset.add_dynamic_item(cached_fbank)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0s13XGkjNWMm" + }, + "source": [ + "The cache is lazy: the first access to an utterance writes it into `speaker_feats.hdf5`. Reusing the notebook (or multiple workers) just reads from the same file.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0_oJNpxkpgqF" + }, + "source": [ + "and set the outputs of the dataset we want to access in training loop" + ] + }, + { + "cell_type": "code", + "execution_count": 130, + "metadata": { + "id": "sahSr0yJpkhY" + }, + "outputs": [], + "source": [ + "dataset.set_output_keys([\"id\", \"feats\", \"spk_encoded\"])\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ovhe-jc8jWzh" + }, + "source": [ + "We sort the dataset based on length to speed up training so that we minimize in batches the amount of padded elements." + ] + }, + { + "cell_type": "code", + "execution_count": 131, + "metadata": { + "id": "jG36aYVvjWB7" + }, + "outputs": [], + "source": [ + "sorted_data = dataset.filtered_sorted(sort_key=\"length\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IPGrUCU5kNY3" + }, + "source": [ + "We can train now a simple classifier, by passing the dataset object directly to the **Brain** class. The **Brain** class will automatically create a SaveableDataLoader with specified `train_loader_kwargs` and will be handling the padding for you." + ] + }, + { + "cell_type": "code", + "execution_count": 132, + "metadata": { + "id": "OFFiFjaek8uN", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "b63d6a3b-7ce0-406e-eb32-719215c702d2" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "INFO:speechbrain.core:Gradscaler enabled: `False`\n", + "INFO:speechbrain.core:Using training precision: `--precision=fp32`\n", + "INFO:speechbrain.core:Using evaluation precision: `--eval_precision=fp32`\n", + "INFO:speechbrain.core:SimpleBrain Model Statistics:\n", + "* Total Number of Trainable Parameters: 23.8k\n", + "* Total Number of Parameters: 23.8k\n", + "* Trainable Parameters represent 100.0000% of the total size.\n", + "100%|██████████| 68/68 [00:25<00:00, 2.70it/s, train_loss=4.22]\n" + ] + } + ], + "source": [ + "from speechbrain.lobes.features import MFCC, Fbank\n", + "from speechbrain.nnet.losses import nll_loss\n", + "\n", + "\n", + "class SimpleBrain(speechbrain.Brain):\n", + " def compute_forward(self, batch, stage):\n", + " x = batch.feats.data\n", + " x = self.modules.encoder(x)\n", + " x = self.modules.pooling(x, batch.feats.lengths)\n", + " x = self.modules.to_output(x)\n", + " return self.modules.softmax(x)\n", + "\n", + " def compute_objectives(self, logits, batch, stage):\n", + " return nll_loss(logits, batch.spk_encoded.data)\n", + "\n", + "modules = {\n", + " \"encoder\": torch.nn.Sequential(torch.nn.Linear(40, 256),\n", + " torch.nn.ReLU()),\n", + " \"pooling\": speechbrain.nnet.pooling.StatisticsPooling(),\n", + " \"to_output\": torch.nn.Linear(512, len(spk_id_encoder)),\n", + " \"softmax\": speechbrain.nnet.activations.Softmax(apply_log=True)\n", + "}\n", + "brain = SimpleBrain(modules, opt_class=lambda x: torch.optim.SGD(x, 0.01), run_opts={\"device\": \"cpu\"})\n", + "brain.fit(range(1), train_set=sorted_data,\n", + " train_loader_kwargs={\"batch_size\": 16, \"drop_last\":True})\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "H9vn3zCqNWMn" + }, + "source": [ + "## Authors\n", + "\n", + "- SpeechBrain team: Mirco Ravanelli, Titouan Parcollet, Peter Plantinga, and Adel Moumen (2026)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lvCQK0beq8Fc" + }, + "source": [ + "## Acknowledgements\n", + "\n", + "\n", + "\n", + "* Many thanks to Nasser Benabderrazik ([lenassero](https://github.com/lenassero)) who helped improving this Tutorial.\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/docs/tutorials/basics/hyperpyyaml.ipynb b/docs/tutorials/basics/hyperpyyaml.ipynb new file mode 100644 index 0000000000..3bde5c4cd8 --- /dev/null +++ b/docs/tutorials/basics/hyperpyyaml.ipynb @@ -0,0 +1,556 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/basics/hyperpyyaml.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/basics/hyperpyyaml.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jr2jH1sDZcml" + }, + "source": [ + "# HyperPyYAML Tutorial\n", + "\n", + "An essential aspect of any deep learning pipeline is the definition of hyperparameters and other metadata. These hyperparameters, in conjunction with the deep learning algorithms, govern various aspects of the pipeline, including model architecture, training, and decoding.\n", + "\n", + "In SpeechBrain, we emphasize a clear distinction between hyperparameters and learning algorithms in the structure of our toolkit. To achieve this, we separate our recipes into two primary files: `train.py` and `train.yaml`.\n", + "\n", + "The `train.yaml` file follows a format developed by SpeechBrain, known as \"HyperPyYAML.\" We chose to extend YAML due to its highly readable nature for data serialization. By building upon this already user-friendly format, we have created an extended definition of hyperparameters, ensuring that our experimental code remains concise and easily readable.\n", + "\n", + "Here's a brief example using PyTorch code to illustrate the use of HyperPyYAML. It's important to note that PyTorch is not a requirement for utilizing HyperPyYAML:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sahuT6WdbeAy" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install torch\n", + "!pip install hyperpyyaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OYuI_vzLbhJz" + }, + "outputs": [], + "source": [ + "import torch\n", + "from hyperpyyaml import load_hyperpyyaml\n", + "\n", + "example_hyperparams = \"\"\"\n", + "base_channels: 32\n", + "kernel_size: 11\n", + "padding: !ref // 2\n", + "\n", + "layer1: !new:torch.nn.Conv1d\n", + " in_channels: 1\n", + " out_channels: !ref \n", + " kernel_size: !ref \n", + " padding: !ref \n", + "\n", + "layer2: !new:torch.nn.Conv1d\n", + " in_channels: !ref \n", + " out_channels: !ref * 2\n", + " kernel_size: !ref \n", + " padding: !ref \n", + "\n", + "layer3: !new:torch.nn.Conv1d\n", + " in_channels: !ref * 2\n", + " out_channels: 1\n", + " kernel_size: !ref \n", + " padding: !ref \n", + "\n", + "model: !new:torch.nn.Sequential\n", + " - !ref \n", + " - !new:torch.nn.LeakyReLU\n", + " - !ref \n", + " - !new:torch.nn.LeakyReLU\n", + " - !ref \n", + "\"\"\"\n", + "\n", + "# Create model directly by loading the YAML\n", + "loaded_hparams = load_hyperpyyaml(example_hyperparams)\n", + "model = loaded_hparams[\"model\"]\n", + "\n", + "# Transform a 2-second audio clip\n", + "input_audio = torch.rand(1, 1, 32000)\n", + "transformed_audio = model(input_audio)\n", + "print(transformed_audio.shape)\n", + "\n", + "# Try a different hyperparameter value by overriding the padding value\n", + "loaded_hparams = load_hyperpyyaml(example_hyperparams, {\"padding\": 0})\n", + "model = loaded_hparams[\"model\"]\n", + "transformed_audio = model(input_audio)\n", + "print(transformed_audio.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jYsOid7mE673" + }, + "source": [ + "As this example shows, HyperPyYAML allows for complex hyperparameter definitions with compositions. In addition, any value can be overridden for hyperparameter tuning. To grasp how all of this works, let's first briefly look at the basics of YAML." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DnfiIsycbqIs" + }, + "source": [ + "## Basic YAML syntax\n", + "\n", + "Enough prelude: lets talk YAML! Here's a brief example of a yaml snippet and what it would look like once loaded to python:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8cARbf0cZYCT" + }, + "outputs": [], + "source": [ + "import yaml\n", + "yaml_string = \"\"\"\n", + "foo: 1\n", + "bar:\n", + " - item1\n", + " - item2\n", + "baz:\n", + " item1: 3.4\n", + " item2: True\n", + "\"\"\"\n", + "yaml.safe_load(yaml_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pf7vC0x5d5vg" + }, + "source": [ + "As you can see, YAML has built-in support for a variety of data types, including string, int, float, bool, list, and dictionary. Our HyperPyYAML format keeps all of this functionality." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xMkgyuexc_3M" + }, + "outputs": [], + "source": [ + "from hyperpyyaml import load_hyperpyyaml\n", + "load_hyperpyyaml(yaml_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WmTHoVgjfNEv" + }, + "source": [ + "Our primary additions to yaml format are added with YAML tags. Tags are added before an item definition, and are prefixed with `!`. For the purpose of illustrating how tags are used, here is an example with a minor addition that we've made, the `!tuple` tag:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRNSh8PCej15" + }, + "outputs": [], + "source": [ + "yaml_string = \"\"\"\n", + "foo: !tuple (3, 4)\n", + "\"\"\"\n", + "load_hyperpyyaml(yaml_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7Qwa5t-OkL4b" + }, + "source": [ + "Now you know the YAML basics, time to move on to our additions!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KmLsqpZUgi87" + }, + "source": [ + "## Tags `!new:` and `!name:`\n", + "YAML tags can contain a suffix to more specifically define what type of tag it is. We use this to define a tag that is able to create any python object, not just a basic type. This tag starts with `!new:` and contains the type of the object. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lSlv5ootgW-g" + }, + "outputs": [], + "source": [ + "yaml_string = \"\"\"\n", + "foo: !new:collections.Counter\n", + "\"\"\"\n", + "loaded_yaml = load_hyperpyyaml(yaml_string)\n", + "loaded_yaml[\"foo\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uVxoxhIDhoBD" + }, + "outputs": [], + "source": [ + "loaded_yaml[\"foo\"].update({\"a\": 3, \"b\": 5})\n", + "loaded_yaml[\"foo\"][\"a\"] += 1\n", + "loaded_yaml[\"foo\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NtJ7bt2FmLpQ" + }, + "source": [ + "Of course many python objects take arguments during creation. These arguments can be passed with a list for positional arguments or a dictionary for keyword arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jBi0dTUzl_Eq" + }, + "outputs": [], + "source": [ + "yaml_string = \"\"\"\n", + "foo: !new:collections.Counter\n", + " - [a, b, r, a, c, a, d, a, b, r, a]\n", + "bar: !new:collections.Counter\n", + " a: 2\n", + " b: 1\n", + " c: 5\n", + "\"\"\"\n", + "load_hyperpyyaml(yaml_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2xlFj5Te7A9f" + }, + "source": [ + "Another python object that is useful to create is a function object. In HyperPyYAML this can be done with the `!name:` tag. Behind the scenes, this tag uses `functools.partial` to create a new function definition with the default arguments provided. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "60wI82NRm8M8" + }, + "outputs": [], + "source": [ + "yaml_string = \"\"\"\n", + "foo: !name:collections.Counter\n", + " a: 2\n", + "\"\"\"\n", + "loaded_yaml = load_hyperpyyaml(yaml_string)\n", + "loaded_yaml[\"foo\"](b=4)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qfBKfFDB8ds8" + }, + "source": [ + "The default arguments can be overridden, just as a normal python function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "P2SKGzIE8MQ5" + }, + "outputs": [], + "source": [ + "loaded_yaml[\"foo\"](a=3, b=5)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "o8Dh9KB18rn7" + }, + "source": [ + "## Tags `!ref` and `!copy`\n", + "\n", + "Of course some hyperparameters get used in multiple places, so we added a mechanism for referring to another item called `!ref`. The node that this tag is applied to must be a string that contains the location of the node to copy. Sub-nodes can be accessed with square brackets, same as in Python. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BkpqtyKR8mqa" + }, + "outputs": [], + "source": [ + "yaml_string = \"\"\"\n", + "foo:\n", + " a: 3\n", + " b: 4\n", + "bar:\n", + " c: !ref \n", + " d: !ref \n", + "\"\"\"\n", + "load_hyperpyyaml(yaml_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3sgZ3MNa-uGH" + }, + "source": [ + "The `!ref` tag can support simple arithmetic and string concatenation for basic hyperparameter combinations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BT0xjRzp-qym" + }, + "outputs": [], + "source": [ + "yaml_string = \"\"\"\n", + "folder1: abc/def\n", + "folder2: ghi/jkl\n", + "folder3: !ref /\n", + "\n", + "foo: 1024\n", + "bar: 512\n", + "baz: !ref // + 1\n", + "\"\"\"\n", + "load_hyperpyyaml(yaml_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vp3a9jsnGWBV" + }, + "source": [ + "The `!ref` tag can also refer to objects, in which case it makes a reference to the same object, rather than a copy. If you'd prefer to make a copy instead, use the `!copy` tag." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Wy0NDnrO_Vb0" + }, + "outputs": [], + "source": [ + "yaml_string = \"\"\"\n", + "foo: !new:collections.Counter\n", + " a: 4\n", + "bar: !ref \n", + "baz: !copy \n", + "\"\"\"\n", + "loaded_yaml = load_hyperpyyaml(yaml_string)\n", + "loaded_yaml[\"foo\"].update({\"b\": 10})\n", + "print(loaded_yaml[\"bar\"])\n", + "print(loaded_yaml[\"baz\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d8NBOA9WHQIG" + }, + "source": [ + "## Other tags\n", + "\n", + "We introduced a variety of other tags as well:\n", + "* `!tuple` to create python tuples. Note this is implicitly resolved, so you do not need to explicitly write out the tuple tag, just use parentheses as you would in Python.\n", + "* `!include` to insert other yaml files directly\n", + "* `!apply` to load and execute a python function, storing the result\n", + "\n", + "We use `!apply` to set the random seed at the beginning of loading the yaml, so that the models have the same parameters each run. The result is not stored, because it starts with `__`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Q9ftz9wMGp9L" + }, + "outputs": [], + "source": [ + "yaml_string = \"\"\"\n", + "sum: !apply:sum\n", + " - [1, 2]\n", + "__set_seed: !apply:torch.manual_seed [1234]\n", + "\"\"\"\n", + "load_hyperpyyaml(yaml_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0jzO7UWLJ_iP" + }, + "source": [ + "## Overrides\n", + "\n", + "In order to run experiments with various values for a hyperparameter, we have a system for overriding the values that are listed in the yaml file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "TuZ9s7mBJI0B" + }, + "outputs": [], + "source": [ + "overrides = {\"foo\": 7}\n", + "fake_file = \"\"\"\n", + "foo: 2\n", + "bar: 5\n", + "\"\"\"\n", + "load_hyperpyyaml(fake_file, overrides)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TRseBBcVL4Hv" + }, + "source": [ + "As shown in this example, overrides can take an ordinary python dictionary. However, this form does not support python objects. To override a python object, overrides can also take a yaml-formatted string with the HyperPyYAML syntax." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "R05TG8UzLQNj" + }, + "outputs": [], + "source": [ + "load_hyperpyyaml(fake_file, \"foo: !new:collections.Counter\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nsWb8t-NMxOo" + }, + "source": [ + "## Conclusion\n", + "\n", + "We are proud to present our HyperPyYAML syntax, and we think that it provides a readable and concise way to structure hyperparameter definitions. In addition, it removes unnecessary complexity from experiment files, allowing the algorithms to become clear. As is evident in the first example, overrides are easy, making hyperparameter tuning a cinch. Overall, we have found this package to be a valuable tool for deep learning!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "10jZah2QHZ7xuajv9M1yIwRQdePxPV97U", + "timestamp": 1612452207452 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/basics/introduction-to-speechbrain.ipynb b/docs/tutorials/basics/introduction-to-speechbrain.ipynb new file mode 100644 index 0000000000..f0cf21f185 --- /dev/null +++ b/docs/tutorials/basics/introduction-to-speechbrain.ipynb @@ -0,0 +1,660 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/basics/introduction-to-speechbrain.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/basics/introduction-to-speechbrain.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmf1KHEN6g32" + }, + "source": [ + "# **Introduction to SpeechBrain**\n", + "\n", + "SpeechBrain is an **open-source** **all-in-one** speech toolkit based on **PyTorch**. It is designed to make the research and development of speech technology easier.\n", + "\n", + "## Motivation\n", + "There are many speech and audio processing tasks of great practical and scientific interest. \n", + "\n", + "In the past, the dominant approach was to develop a **different toolkit for each different task**. Nevertheless, learning several toolkits is **time-demanding**, might require knowledge of **different programming languages**, and forces you to familiarize yourself with **different code styles and standards** (e.g., data readers).\n", + "\n", + "Nowadays, most of these tasks can be implemented with the same **deep learning** technology.\n", + "We thus explicitly designed SpeechBrain to natively support **multiple speech processing tasks**. We think that this might make much easier the life of speech developers. Moreover, we think that the combination of different speech technologies in single **end-to-end** and **fully differential system** will be crucial in the development of future speech technologies.\n", + "\n", + "We did our best to design a toolkit which is:\n", + "* *Easy to use*\n", + "* *Easy to customize*\n", + "* *Flexible*\n", + "* *Modular*\n", + "* *Well-documented*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iiwk7738KFvt" + }, + "source": [ + "## Supported Technologies\n", + "\n", + "You can thus use speechbrain to convert *speech-to-text*, to perform authentication using `speaker verification`, to enhance the quality of the speech signal, to combine the information from multiple microphones, and for many other things.\n", + "\n", + "More precisely, SpeechBrain currently supports many conversational AI technologies, including:\n", + "\n", + "- Speech Recognition\n", + "- Speaker Recognition\n", + "- Speech Separation\n", + "- Speech Enhancement\n", + "- Text-to-Speech\n", + "- Vocoding\n", + "- Spoken Language Understanding\n", + "- Speech-to-Speech Translation\n", + "- Speech Translation\n", + "- Emotion Classification\n", + "- Language Identification\n", + "- Voice Activity Detection\n", + "- Sound Classification\n", + "- Self-Supervised Learning\n", + "- Interpretabiliy\n", + "- Speech Generation\n", + "- Metric Learning\n", + "- Alignment\n", + "- Diarization\n", + "- Language Modeling\n", + "- Response Generation\n", + "- Grapheme-to-Phoneme\n", + "\n", + "For all these tasks, we propose recipes on popular datasets that achieve **competitive** or state-of-the-art **performance**.\n", + "\n", + "SpeechBrain is an ongoing project (still in beta version) and we are building a large community to further expand the current functionalities." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qbn5PX8mKHFz" + }, + "source": [ + "## Installation\n", + "\n", + "There are essentially two ways to install SpeechBrain:\n", + "* **Local installation**: it is suggested if you want to modify the toolkit or train a full speech processing system from scratch.\n", + "\n", + "* **Install via PyPI**: it is suggested when you wanna just use some core functionality of SpeechBrain in your project.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Z60RdK8V54dW" + }, + "source": [ + "### Local Installation (Git clone)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NQ3rDQslkn12" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "\n", + "# Clone SpeechBrain repository\n", + "!git clone https://github.com/speechbrain/speechbrain/\n", + "%cd /content/speechbrain/templates/speech_recognition/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XG16N39sfnJs" + }, + "source": [ + "Once installed, you should be able to import the speechbrain project with python:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hbMISpjh0s3e" + }, + "outputs": [], + "source": [ + "import speechbrain as sb" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lxVO1Mj9MsSh" + }, + "source": [ + "## Running an Experiment\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TpYzWszYjOZR" + }, + "source": [ + "To run an experiment with SpeechBrain, the typical syntax is:\n", + "\n", + "```\n", + "python train.py hparams.yaml\n", + "```\n", + "\n", + "All the hyperparameters are summarized in a yaml file, while the main script for training is `train.py`.\n", + "\n", + "For instance, let's run one of the minimal examples made available with SpeechBrain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "fCukWAmn5TN2" + }, + "outputs": [], + "source": [ + "%cd /content/speechbrain/tests/integration/ASR_CTC/\n", + "!python example_asr_ctc_experiment.py hyperparams.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PAdSpX2mn0YT" + }, + "source": [ + "In this case, we trained a CTC-based **speech recognizer** with a tiny dataset stored in the folder `samples`. As you can see, the training loss is very small, which indicates that the model is implemented correctly.\n", + "The validation loss, instead, is high. This happens because, as expected, the dataset is too small to allow the network to generalize.\n", + "\n", + "For a more detailed description of the minimal examples, please see the tutorial on \"minimal examples step-by-step\".\n", + "\n", + "All the results of the experiments are stored in the `output_folder` defined in the yaml file. Here, you can find, the checkpoints, the trained models, a file summarizing the performance, and a logger.\n", + "\n", + "This way, you can compare your performance with the one achieved by us and you can have access to all the pre-trained models.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "19-Fpm4ArfP_" + }, + "source": [ + "## Hyperparameter specification with YAML\n", + "\n", + "Machine learning systems often require the specification of several hyperparameters. In SpeechBrain, we do it with YAML. YAML allows us to specify the hyperparameters in an elegant, flexible, and transparent way.\n", + "\n", + "Let's see for instance this yaml snippet:\n", + "\n", + "\n", + "```yaml\n", + "dropout: 0.8\n", + "compute_features: !new:speechbrain.lobes.features.MFCC\n", + " n_mels: 40\n", + " left_frames: 5\n", + " right_frames: 5\n", + "\n", + "model: !new:speechbrain.lobes.models.CRDNN.CRDNN\n", + " input_shape: [null, null, 440]\n", + " activation: !name:torch.nn.LeakyReLU []\n", + " dropout: !ref \n", + " cnn_blocks: 2\n", + " cnn_channels: (32, 16)\n", + " cnn_kernelsize: (3, 3)\n", + " time_pooling: True\n", + " rnn_layers: 2\n", + " rnn_neurons: 512\n", + " rnn_bidirectional: True\n", + " dnn_blocks: 2\n", + " dnn_neurons: 1024\n", + "```\n", + "\n", + "As you can see, this is not just a plain list of hyperparameters. For each parameter, we specify the class (or function) that is going to use it. This makes the code **more transparent** and **easier to debug**.\n", + "\n", + "The YAML file contains all the information to initialize the classes when loading them. In SpeechBrain we load it with a special function called `load_hyperpyyaml`, which initializes for us all the declared classes. This makes the code extremely **readable** and **compact**.\n", + "\n", + "Our hyperpyyaml is an extension of the standard YAML. For an overview of all the supported functionalities, please take a look at the [YAML tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html).\n", + "\n", + "Note that all the hyperparameters can be overridden from the command line. For instance, to change the dropout factor:\n", + "\n", + "`python experiment.py params.yaml --dropout=0.5 `\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-yZGzRmFxJGg" + }, + "source": [ + "## Experiment File\n", + "The experiment file (e.g., `example_asr_ctc_experiment.py` in the example) trains a model by **combining** the functions or **classes declared in the yaml file**. This script defines the data processing pipeline and defines all the computations from the input signal to the final cost function. Everything is designed to be** easy to customize**.\n", + "\n", + "\n", + "### Data Specification\n", + "The user should prepare a data specification file (in **CSV** or JSON) format that reports all the data and the labels to process.\n", + "\n", + "For instance, in the minimal example ran before, the data specification file is this:\n", + "\n", + "\n", + "```csv\n", + "ID, duration, wav, wav_format, wav_opts, spk_id, spk_id_format, spk_id_opts, ali, ali_format, ali_opts, phn, phn_format, phn_opts,char,char_format,char_opts\n", + "spk1_snt5,2.6,$data_folder/spk1_snt5.wav, wav, ,spk1,string, ,$data_folder/spk1_snt5.pkl,pkl, ,s ah n vcl d ey ih z dh ax vcl b eh s cl t cl p aa r dx ax v dh ax w iy cl,string, ,s u n d a y i s t h e b e s t p a r t o f t h e w e e k,string,\n", + "spk2_snt5,1.98,$data_folder/spk2_snt5.wav, wav, ,spk2,string, ,$data_folder/spk2_snt5.pkl,pkl, ,vcl jh ah m cl p dh ax f eh n s ae n hh er iy ah cl p dh ax vcl b ae ng cl,string, ,k e n p a I r s l a c k f u l l f l a v o r,string,\n", + "```\n", + "\n", + "You can open this file with a CSV reader for better rendering. For each row, you have an example with the corresponding paths to wav signal and labels.\n", + "\n", + "As an alternative, users can specify the data in a **JSON** format:\n", + "\n", + "\n", + "```json\n", + "{\n", + " \"spk1_snt5\": {\n", + " \"wav\": \"{data_root}/spk1_snt5.wav\",\n", + " \"length\": 2.6,\n", + " \"spk_id\": \"spk1\",\n", + " \"ali\": \"{data_root}/spk1_snt5.pkl\",\n", + " \"phn\": \"s ah n vcl d ey ih z dh ax vcl b eh s cl t cl p aa r dx ax v dh ax w iy cl\",\n", + " \"char\": \"s u n d a y i s t h e b e s t p a r t o f t h e w e e k\"\n", + " },\n", + " \"spk2_snt5\": {\n", + " \"wav\": \"{data_root}/spk2_snt5.wav\",\n", + " \"length\": 1.98,\n", + " \"spk_id\": \"spk2\",\n", + " \"ali\": \"{data_root}/spk2_snt5.pkl\",\n", + " \"phn\": \"vcl jh ah m cl p dh ax f eh n s ae n hh er iy ah cl p dh ax vcl b ae ng cl\",\n", + " \"char\": \"k e n p a i r s l a c k f u l l f l a v o r\"\n", + " }\n", + "}\n", + "```\n", + "\n", + "JSON is less compact than CSV but more flexible. For many applications, using the CSV file is enough. For more complex tasks (e.g, speaker diarization, speaker diarization + recognition), however, people might take advantage of the hierarchical structure offered by JSON.\n", + "\n", + "All datasets are formatted differently. In general, the users have to write a **data preparation** script that parses the target dataset and creates the data specification files. For all the proposed recipes, however, we also release the corresponding data preparation library.\n", + "\n", + "### Data processing pipeline\n", + "Thanks to our Dynamic datasets, the data reading pipeline is fully customizable in the experiment file directly. For instance, in the minimal example, you can define the following intuitive function to read the audio file\n", + "\n", + "\n", + "```python\n", + " # 2. Define audio pipeline:\n", + " @sb.utils.data_pipeline.takes(\"wav\")\n", + " @sb.utils.data_pipeline.provides(\"sig\")\n", + " def audio_pipeline(wav):\n", + " sig = sb.dataio.dataio.read_audio(wav)\n", + " return sig\n", + "```\n", + "\n", + "The function takes in input the wav path and returns a signal read with the specified reader. In the variable `batch.sig` (see `example_asr_ctc_experiment.py`) you will have your batches of signals ready to be used. Note that here you can add any kind of processing (e.g, adding noise, speech change, dynamic mixing, etc) just by coding with the desired pipeline.\n", + "\n", + "A similar function should be written for all the entries that our script is supposed to process. The minimal example, for instance, reads a sequence of phoneme labels as well:\n", + "\n", + "\n", + "```python\n", + " @sb.utils.data_pipeline.takes(\"phn\")\n", + " @sb.utils.data_pipeline.provides(\"phn_list\", \"phn_encoded\")\n", + " def text_pipeline(phn):\n", + " phn_list = phn.strip().split()\n", + " yield phn_list\n", + " phn_encoded = label_encoder.encode_sequence_torch(phn_list)\n", + " yield phn_encoded\n", + "\n", + "```\n", + "Here, we read the phoneme list, separate each entry by space, and convert the list of phonemes to their corresponding indexes (using the label_encoder described [in this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)).\n", + "\n", + "As you can see, we directly expose in the main script the data reading pipeline because this adds a lot of transparency and flexibility.\n", + "\n", + "### Custom forward and cost computation methods\n", + "The other thing that users often want to customize is the sequence of computations that go from the input to the final cost function. In the experiment file, users are required to specify them in the `forward` and `compute_objectives` methods.\n", + "In the minimal example, the forward method is defined as follows:\n", + "\n", + "\n", + "```python\n", + " def compute_forward(self, batch, stage):\n", + " \"Given an input batch it computes the output probabilities.\"\n", + " wavs, lens = batch.sig\n", + " feats = self.hparams.compute_features(wavs)\n", + " feats = self.modules.mean_var_norm(feats, lens)\n", + " x = self.modules.model(feats)\n", + " x = self.modules.lin(x)\n", + " outputs = self.hparams.softmax(x)\n", + "```\n", + "\n", + "The input is the variable batch that contains all the entries specified in the data loader (e.g, we have `batch.sig` and `batch.phn_encoded`). As you can see, we compute the features, we perform a mean and variance normalization, and we call the model. Finally, a linear transformation + softmax is applied.\n", + "\n", + "The compute objective function looks like this:\n", + "\n", + "```python\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " \"Given the network predictions and targets computed the CTC loss.\"\n", + " predictions, lens = predictions\n", + " phns, phn_lens = batch.phn_encoded\n", + " loss = self.hparams.compute_cost(predictions, phns, lens, phn_lens)\n", + "\n", + " if stage != sb.Stage.TRAIN:\n", + " seq = sb.decoders.ctc_greedy_decode(\n", + " predictions, lens, blank_id=self.hparams.blank_index\n", + " )\n", + " self.per_metrics.append(batch.id, seq, phns, target_len=phn_lens)\n", + "\n", + " return loss\n", + "```\n", + "We take the predictions done in the forward step and compute a cost function using the encoded labels in batch.phn_encoded. During validation/test, we also perform actual decoding on the speech sequence (in this case using a greedy decoder and, in a more general case, using beam search) to monitor the performance.\n", + "\n", + "### Brain Class\n", + "To make training easier, we implemented a simple trainer called **Brain class**. The Brain class defines a set of customizable routines that implement all the steps needed in standard **training and validation loops**. After defining the data pipeline, the forward, the compute objective, and other custom methods, you can call the fit method of the brain class for training (and the eval one for the test):\n", + "\n", + "```python\n", + " # Trainer initialization\n", + " ctc_brain = CTCBrain(hparams[\"modules\"], hparams[\"opt_class\"], hparams)\n", + "\n", + " # Training/validation loop\n", + " ctc_brain.fit(\n", + " range(hparams[\"N_epochs\"]),\n", + " train_data,\n", + " valid_data,\n", + " train_loader_kwargs=hparams[\"dataloader_options\"],\n", + " valid_loader_kwargs=hparams[\"dataloader_options\"],\n", + " )\n", + " # Evaluation is run separately (now just evaluating on valid data)\n", + " ctc_brain.evaluate(valid_data)\n", + "```\n", + "For a more detailed description, take a look at the [Brain class tutorial here](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html\n", + ").\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gOE1UED8z05V" + }, + "source": [ + "## Pretrain and use\n", + "Sometimes you might only want to use a pre-trained model rather than training it from scratch. For instance, you might want to transcribe an audio file, compute speaker embeddings, apply a voice activity detector, and doing many other operations in your scripts. To make this easier, we uploaded several models in [HuggingFace](https://huggingface.co/speechbrain/). The models uses inference classes to make inference easier. For instance, to transcribe an audio file with a model trained with librispeech you can simply do:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PQonIcrNhebC" + }, + "outputs": [], + "source": [ + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "\n", + "asr_model = EncoderDecoderASR.from_hparams(source=\"speechbrain/asr-crdnn-rnnlm-librispeech\", savedir=\"pretrained_models/asr-crdnn-rnnlm-librispeech\")\n", + "asr_model.transcribe_file('speechbrain/asr-crdnn-rnnlm-librispeech/example.wav')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UzbhBKksjTH4" + }, + "source": [ + "As you can see, in this case there is a matching between the text uttered by the speaker and the content of the audio file.\n", + "We have similar functions for speaker recognition, speech separation, enhancement." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_YgdsT3x_xz8" + }, + "source": [ + "## Folder Organization\n", + "The main folder is organized in this way:\n", + "\n", + "* **SpeechBrain** contains the main libraries of SpeechBrain. You can find here the core.py that implements core functionalities such as the Brain class. You also find here libraries for data loading, decoders, neural networks, signal processing, and many others. Under the folder lobe, you can find combinations of basic functionalities that we think are useful for speech and audio processing. For instance, you can find here the implementation of features like FBANKs and MFCCs, the data augmentation functions, as well as some popular neural networks used a lot in the recipes.\n", + "* **Recipes** contains training scripts for several speech datasets. For instance, you can find recipes for *LibriSpeech*, *TIMIT*, *VoxCeleb*, *VoiceBank*, and many others.\n", + "* **Samples** is a tiny speech dataset used for training minimal examples and to perform debug tests.\n", + "* **Test** is a collection of unit and integration tests that we use for debugging and continuous integration." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "raC03quSSCYC" + }, + "source": [ + "## Tensor Format\n", + "All the tensors within SpeechBrain are formatted using the following convention:\n", + "\n", + "`tensor=(batch, time_steps, channels[optional])`\n", + "\n", + "The batch is always the first element, and time steps are always the second one. The remaining dimensions are channels, which are options and there might be as many as you need).\n", + "\n", + "Let's now some examples. For instance, let's try to compute the FBANKS of an input signal:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "slYIC4fbXKER" + }, + "outputs": [], + "source": [ + "import torch\n", + "from speechbrain.lobes.features import Fbank\n", + "\n", + "signal = torch.rand([4, 16000]) # [batch, time]\n", + "print(signal.shape)\n", + "\n", + "fbank_maker = Fbank()\n", + "fbanks = fbank_maker(signal) # [batch, time, features]\n", + "print(fbanks.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fZVzo5sUWUih" + }, + "source": [ + "The `Fbank` function expects in input a signal formatted as `[batch, time]`. It returns the features in the format `[batch, time, features]`.\n", + "\n", + "Let's now try to compute the STFT of any audio signal:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Zto6xdG6ZRm1" + }, + "outputs": [], + "source": [ + "import torch\n", + "from speechbrain.dataio.dataio import read_audio\n", + "from speechbrain.processing.features import STFT\n", + "\n", + "signal = torch.rand([4, 1600]) # [batch, time]\n", + "print(signal.shape)\n", + "\n", + "compute_STFT = STFT(sample_rate=16000, win_length=25, hop_length=10, n_fft=400)\n", + "signal_STFT = compute_STFT(signal) #[batch, time, channel1, channel2]\n", + "print(signal_STFT.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tCLEwNFmaJpx" + }, + "source": [ + "The output here is `[batch, time, channel1, channel2]`, where `channel1` is the feature axis and `channel2` is the real and imaginary part." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sAWfaD24bAvU" + }, + "source": [ + "**Why do we need a tensor format?**\n", + "Defining a tensor format makes model combination easier. Many formats are possible. For SpeechBrain, we selected this one because it is commonly used in recurrent neural networks.\n", + "\n", + "In SpeechBrain, the basic building blocks of the neural networks (e.g, *RNN*, *CNN*, *normalization*, *pooling*, ...) are designed to support the same tensor format and can thus be combined smoothly.\n", + "\n", + "To convince you about that, let's try to combine a CNN and an RNN using SpeechBrain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "fURQxwJvcLf5" + }, + "outputs": [], + "source": [ + "from speechbrain.nnet.CNN import Conv1d\n", + "from speechbrain.nnet.RNN import LSTM\n", + "\n", + "inp_tensor = torch.rand([10, 15, 40])\n", + "print(inp_tensor.shape)\n", + "\n", + "# CNN\n", + "CNN = Conv1d(input_shape=inp_tensor.shape, out_channels=8, kernel_size=5)\n", + "cnn_out = CNN(inp_tensor)\n", + "print(cnn_out.shape)\n", + "\n", + "\n", + "# RNN\n", + "RNN = LSTM(input_shape=cnn_out.shape, hidden_size=256, num_layers=1)\n", + "rnn_out, _ = RNN(cnn_out)\n", + "print(rnn_out.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GUZcFaxqfAKd" + }, + "source": [ + "The combination is done without any tensor reshaping (e.g, we don't have to transpose, squeeze, unsqueeze). The basic nnet functions are a wrapper of the original pytorch functions. The difference is that er manage for you all the annoying tensor reshaping operations. This makes the code cleaner and easier to follow.\n", + "Let's try to do the same operation with raw PyTorch:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZGedYMKMgDHA" + }, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "inp_tensor = torch.rand([10, 15, 40])\n", + "print(inp_tensor.shape)\n", + "\n", + "# CNN\n", + "CNN = torch.nn.Conv1d(in_channels=40, out_channels=8, kernel_size=5)\n", + "inp_tensor_tr = inp_tensor.transpose(1,2) # requires (N,C,L)\n", + "cnn_out_tr = CNN(inp_tensor_tr)\n", + "print(cnn_out_tr.shape)\n", + "\n", + "# RNN\n", + "cnn_out_tr2 = cnn_out_tr.transpose(1,2)\n", + "RNN = torch.nn.LSTM(input_size=8, hidden_size=256, num_layers=1)\n", + "rnn_out, _ = RNN(cnn_out_tr2)\n", + "print(rnn_out.shape)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "K_UyB9yzi0PY" + }, + "source": [ + "The raw pytorch approach requires two transpose operations because of the different tensor formats used in CNN and RNN modules. In SpeechBrain, this is managed internally and users do not have to worry about it." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/basics/what-can-i-do-with-speechbrain.ipynb b/docs/tutorials/basics/what-can-i-do-with-speechbrain.ipynb new file mode 100644 index 0000000000..22022e25bc --- /dev/null +++ b/docs/tutorials/basics/what-can-i-do-with-speechbrain.ipynb @@ -0,0 +1,4692 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/basics/what-can-i-do-with-speechbrain.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/basics/what-can-i-do-with-speechbrain.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmf1KHEN6g32" + }, + "source": [ + "# **What can I do with SpeechBrain?**\n", + "\n", + "SpeechBrain can already do a lot of cool things. You can use SpeechBrain for the following types of problems:\n", + "\n", + "- **speech classification** (many-to-one, e.g. speaker-id)\n", + "- **speech regression** (speech-to-speech mapping, e.g, speech enhancement)\n", + "- **sequence-to-sequence** (speech to speech mapping, e.g., speech recognition)\n", + "\n", + "![SpeechBrain-Page-4.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAkwAAADdCAYAAABJwS0MAAAJBHRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDYtMTJUMjMlM0E0MSUzQTI3LjgxMFolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY4MS4wLjQwNDQuMTIyJTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE0LjcuNyUyMiUyMGV0YWclM0QlMjJlWHlpUlR2eG52eks0QUZHNXZtVyUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjJ2S2p2b1NPTWtNakVXT21ObWowTiUyMiUzRTdWcHRjNkk2RlA0MSUyRllnajclMkZoUlhhMjJkbHRiZTYzN3BSTWdRQ29RRzZKb2YlMkYwR0NTb0N1OTVXdlowN2RxYVZucHdjemt1ZWsyZk9lQ1czZyUyQlUxQVRQdkR0dlF2NUxxOXZKSyUyRm5FbFNhS3NxdXdqa2F4U2lWSFhVb0ZMa00yVnRvSW45QUc1c002bGMyVERLS2RJTWZZcG11V0ZGZzVEYU5HY0RCQ0M0N3lhZyUyRjM4VzJmQWhRWEJrd1g4b25TTWJPcnhLTlQ2VnQ2RHlQV3lONHQxdmhLQVRKa0xJZyUyRllPTjRSeVowcnVVMHdwdWxUc0d4RFAwbGVscGQwWDdkaWRlTVlnU0U5WklPVWJsZ0FmODVqNDM3UlZSYXNTJTJGQjh4dFVnb1hCWmxtSmdadXIxb2d2aUpqQjJJaUFPSUNVcnBzSU42WEs2ZzU4RlNlTVc0bTFtVlVOUFpkNU9WamV2QXJ5YTdzYjBObUQyd0dNdWoxJTJGJTJCZSUyRnpRWnJYbiUyRjJKQ1BlemlFUGlkcmJURkVoVGFNTEZZWiUyRiUyQjl6WU1aUDdJczZ0WjJ5d0RqR1JPS2lRNmtkTVdWd0p4aUp2Sm80UFBWaUFKQyUyQldwaUVZWVpCdEpWZ3Fld2pYMU0xZzdLM1c2ZCUyRld4V3NoT1p2RHdOSm9tZzhueGtBZU01c2JpV1VsNHZBbjFBMFNKdnFpejVmT3NEUnV3bG16b2I5VnlkMWIzcXNaaGRTUG1ldlFKdW5EaW9wa3BKVFRXZmhkdHk4TnFoYlhHMTl6bk9Gb1JvbmVNbVV4Q04yWEs3eUo3YzVMUHRneWhDRG9Ja3M4YzhTVTJtQ29XanN6MFlTZUZpRDFINE5BUHJOTWVzTSUyQmFyN2lEZjM2bXE0emlTWlpYVjI5Wk1UZFhZQ3ZDUkd6S1p4WXJKbk1vY0tFRnBvZUNWYUpUeVZkTEZBaGpGVEdVWGpOb1JzS2dlZ01YUWJpYjlPd2w2WFEzcjN5Tm5GeDk3T2JjQk5KelNuRzh3ZGlpaWlnbmU3V2NsR2N4a1gwU1ozcWlwdVFxSzZwNk5GT2tGb0IxZ1NkNnpkRHpJYXBXUVpVWkFjczE0Z0VTUTVrQTdwNDVnRkdHYTdUU3JrUjd6dkNkWUR6RUpnTDlySnZNcWs5allpZ1NVUUl3MWNTSGhIa0xEVkd5dElkbUN6bUFxbUtKbEM0cHBTb0lNVE5NQWttSkRSYTcyREFWdTdqeGtlcUltc2I2ejF0czZzM05HTXFtaUZOVWlZdVYwUEVvVE10Uk1xaUoxZlUlMkJydVJpN1BweEhrTEE3bTdMVFdyTnd3TmJJcmQlMkJUckg5aUUwJTJCN3Q4OHliS0xKemZCNTBnVllmNmJ6MTUlMkJkWDczV2UzY3lWRDdvd0IyWm52S2pIZDZQZXJGMSUyRnpybzNic091OEFXU05FRXVwQWw5S3dPM3VSeDglMkJHbUxienJqU2Y5MVZyR3dIR1cxM2U5NlZDd2hnUHZUYjUlMkZsRmVMY1glMkJxa3NmMmc5SXhybEc3RzJOJTJGSEZkbmpMbHBWblpaMXV6bzM5dnJWM3JqQmtwRmFpSTJUdFFOOVJQZFlqdndTQVhNViUyQlpNdlZhcnBROCUyRnk1TiUyQjJJM0hhNUZyeWJ1dE5NUWgzT3U3WEZTNHpKSlNJY2E1bTN3aFFMYTlwbHRsaGM1VHNDUGNnMHIlMkJIcFNLbFRkS0NuOE1TbXBjS0dtUmtqWk9Ra2tsVFQ4VEoyMVVvam1hZ2ZEemFINkVMb0ZSaEhjcGFXcnhKSlFVYWhXVVZHJTJCWTYzcWZocExtcTNST1RwcE5IQzZrOUdzNDA0N0ZTa3RNblk2V2l1S0ZsMTU0NmZmbHBka0dYVDhmTHhYTFJvYmZHUk82cWxpV1poZ3BKZ3dJVEVHM0hWVVFOVkdTZ1d4QlU3YyUyRmd3bjlNRXlVUU9mem1KanElMkZaOHU2ajhNbCUyQjBtRUpxRzJGOUk5NzF4NjJFMGVnJTJGdG5xOWJDdkdWY2FNdk9WVG9VR0tQYnljM0hUZHlSekNJWSUyQiUyQm1LVVQ2TWdoYnJlNTBabjBBOCUyRkZGSk1PWGF4T003UmRuNlNOeUolMkJqTGx2VmlrTVZnM3Y5NDhDZTk2ZUpYWkM3dVhvVm9NVmtCNlM3cVJrYSUyRk4lMkZ6Mm1ERHlwRUVzR1N1ZkRpT1hzWElKaHhkUE0xZFc5SE9SZUxGNnN2dzFGdjhFM3lYMmV4WU9iMWl3bk1PYmhxcW9wJTJCTHdpdmdmY3ZqTFlQa0lIRjdSajhiaFMweWRrTU5mWnNzWER2JTJGOU9iemMyT01yNTV3MWk5WEQ1cTlkYklWaDg4aURDZUpBMGcxUXhQN2d4RHBkUzlNdkVmeDUlMkJQekhlJTJGRiUyRk0zeVc5OGlyVkx3dHBmcHh4czhKU0RmZk5rbmI2JTJGWTdPM0xuTnclM0QlM0QlM0MlMkZkaWFncmFtJTNFJTNDJTJGbXhmaWxlJTNFrzJ2ygAAIABJREFUeF7snQm4TdX7x19TpnAzhgxlrkRSVCKEpAEVJaGiSXNSmjRKmiWhRBGpSCmKhEyRpPoVGTJllq4MkaH/81n+67bvcc69+95zzt77nPuu57nP5Z691/Bdw/td3/dd6+T6999//xVNioAioAgoAoqAIqAIKAIREcilhElHhyKgCCgCioAioAgoAhkjoIRJR4gioAgoAoqAIqAIKAKZIKCESYeIIqAIKAKKgCKgCCgCSph0DCgCioAioAgoAoqAIhAdAqowRYefvq0IKAKKgCKgCCgCOQABJUw5oJO1iYqAIqAIKAKKgCIQHQJKmKLDT99WBLKNwLx58yR//vxyxhlnZDsPfVERUAQUAUXAGwSUMHmDs5aiCByFwNChQ+X444+XNm3aSN68eRUhRUARUAQUgQAjoIQpwJ2jVUteBLgv9q677pLTTjtNChcuLFdddVXyNlZbpggoAopAEiCghCkJOlGbkHgIHDp0yBCmEiVKyIknnihdu3ZNvEZojRUBRUARyEEIKGHKQZ2tTQ0GAqhLW7dulfvvv19SU1OlatWq8vzzzwejcloLRUARUAQUgbAIKGHSgaEIeIwAhGnVqlXy4IMPyr59+6RChQry2muveVwLLU4RUAQUAUUgKwgoYcoKWvqsIhAFAs7vuf7ll1/k4YcfloMHD8oJJ5wgr7/+ehQ566uKgCKgCCgC8UZACVO8Edb8cxwChw8fNkTomGOOSdf2v/76ywR4586dWyBMKEx//PGHnHTSSfL2229Lrly5chxW4RoMsVQsdCgoAopA0BBQwhS0HtH6JDwCO3bsEIw+Ad3ONH/+fDn99NPN3Us//fST9OnTR2bOnCmNGjWSyZMnGyKV04kCuG3fvl1KliyZ47FI+ImgDVAEkgwBJUxJ1qHaHP8R+Pzzz6VixYpy8sknp6vMp59+KikpKXLOOefI448/LgsXLpRp06ZJjRo1ZMCAAdK8eXNDpixpymlKC+397bffpFChQuZ+qpxOHv0fyVoDRUARcCKghEnHgyIQYwRQi4oUKSLnnXdeupzHjBljXHA9e/aUxx57TN566y3ZuHGjubTy0UcfNS68m266ybwLWVi/fr2UL18+xyhPuDK///57KVu2rPlRwhTjganZKQKKQFQIKGGKCj59WRE4GoEXX3xRateuLS1atEj7EDLw7LPPGlfdAw88IH379pVhw4aZ6wXy5MkjF198sfkNybrlllskX758Yl14O3fuNATCmWwAeSRSQQwVz5CPH4n2HjhwwJBB2uUm8c6SJUtMW0MVJj4j0V4lUm7Q1GcUAUUg1ggoYYo1oppfjkEgkssMd9vZZ58tLVu2TMOCiyqJWTr22GPN6bhHHnlE3nzzzXSECZLDz+DBg6Vy5coyd+5cOeWUU+TXX381+TnTsmXLzP1Nkb5SZe/evYYwEWTudaLc3bt3y8cffyyNGzc21yZAcjJzMYLRggULpFKlSlKuXLl0rsnff/9dypQpI9u2bTOqmyZFQBFQBLxGQAmT14hreUmBAMZ/8+bNRgkhSJkAb4K2SbjbIDitWrVKR5juvPNOKVWqlCFLvXr1ktGjRxsCwHsoS5s2bTKxT/369TNfyDt9+nSpW7euUZouueSSdMrKd999Z8hUgQIFjsKTui1fvtyoSxAvWy/74P79+w15Cfeum87JjPhA+saOHSsjRoww1yVUq1bNEMEtW7aYKxQyUsWGDx9uYrogWrbeqEv9+/eXm2++WSBOfJ2MJkVAEVAEvEZACZPXiGt5SYEARnzRokWG2Hz22WdSv359E3vEDwoSRKV79+7pCNNtt91mFB8CvDt16iTjx483RAJiwOk5YpZw5VmFioBwCMbSpUuldevWaWoRZRNAjooU7jvoIDSjRo2SdevWmdvEQ91yXG9AHgSgZzWR94YNG4zKE4n4/Pnnn0YlI/i9QYMG0qRJE0Mg+dtDDz0U0UUHFhCm4447Ti6//PK051CecGP27t3bkEolTFntNX1eEVAEYoGAEqZYoKh55DgEMOJffvmlnH/++eaWbpQmfpo2bWoIE24lCJNVSXj+1ltvla+//lp+/vlnufrqqw1h4u8QD06GcUIOMsB7HTp0kBkzZgjxUHzPXJs2baRo0aIGZ9xdkKpTTz017HfQQWoIIifvHj16mO+qcyYIzcCBA43SFao+ZdaRkJr3339f2rVrZ4gY7zvzoOyVK1fKM888I7gNUcEmTZokX331lakTyhPv8Rw/zneJecJNye3nt99+e5q7kXZA/O677z5ziu7MM8+M6IrMrP76uSKgCCgC2UUgKQnTvHnzzOKM24IFngUWSf+ss87KLk5xfQ91grqSXnrpJfOlrM6EAX766afNbv2bb76Ja100c3cIYNxRPAje5nvgqlSpYtxtjLVXXnnFuOkgUlbdYRwSzD116lRj9CFMEyZMMKTGJogT+aA+8ewPP/wg1113nRm7l156aZoiBOGBPHBtwT333HNUhSEiKDmURf1w59kgcetKvOGGG4xLsHjx4q6DqHmXU37Uhfrzf9xkzgs6Ua4gRyhFKFyQSAgTsVb8m3gsMFm7dq2ULl1aChYsmFb/f/75R+69915DHCFcFjswAms+e+edd4zLDsKmSRFQBBQBLxFIOsLEKRvuufn777/T4cgO/ttvvz3qbhwvwY5UlpMwEQvD94wVK1Ys7XElTEHopfR1wLhfdtllhhx169ZN6tWrJzVr1pTq1asbZYibvFFiMP4knoe44F4jsJl4plDCxHMQJtxXBIjjomvfvr1x4UEa7rjjDpMXcU8NGzaU66+/3riqQk+hQWSaNWsmq1evlpdfftkQHFQpYpcgPLt27TKkjM8uvPBC16fYKJcbyT/44AMTW0U8EcQIImQTxBCiSNwVpIhywQIiRcwUJ/4gQu+9955R45zvUj+IGIoYJNFJNq+88kp54YUXzN/BDzKpKf4I4Ppl3LpJEGMONdCvpNmzZ5tLWRMlrVmzRkaOHClt27Y14ztS4rLZRG0jbQrXTi/b5BbncPhnpZ5ZedbtGE06wsTOc+LEicZd8cUXXxgcGNwEwbIYcxdO0JKTMFE3jCWBvzYpYQpaj4lxG6EmPfXUUyaOCNcShAklcNasWeYm7w8//DAtsJrnuTqAcYjBh+ygANnj8raFkATIDi43iDMKEwHQ3HyNO4vEVQQQM9QlyJo9hWbzIE9iq1CyIFQokyRcZKg/BJYzD3D92fufQhEOd23B4sWLjRsRVySuNlxvED/ijWyC9KBe0XZOukH6IFEk1KTU1FSDFV8Lc+ONN5qTfjbt2bPHYIlSN2TIkDTlivdxcVIurjnIEhhyai40jgrlD3WPz7LqbgzeKPO/RjmJMD355JNmPnBYgXkVKbEZgMCTPvroI7MOJFIK1854kItImLjFWQlTnEcVu3DiPAiGffXVV4UgW9Jzzz1nJH0+42sr3N4LE+fqpmVvCdO5555rjpJjWDBGGBySEiavesJdORASSAuBzxh9VBYMNWSHcQdRIRAco26P9UMGIAcYcYgPdzTxjvMLeSkd1xUKEy4pyBUXWVIeGwGUHRKnzSxh6tixowkM5z17SzjPEzzO+7jcIDGMf1yHtWrVMjeMUy4n86gn6mtowp0GcXG6zCCCqFWMS0gY+aOAEW9klTSUXZ7hhB/vUrZ1OzLvCDinrsRzQSIJTrekB+Wrc+fORgXjfZsnOKG84o6mXJQqAuLr1KmTjjDZ04GoBMRnhWuXux7WpywCkFVUQpvYhNqDBqiEzpOgqEtz5sxJWPXFrsOZEaZEHx3h2uklYYoG56zUMyvPuu3TpFKY2EFjEEjs3u3Fgc5dEsYsNAjWLVjxes4OIFSld9991wQFs/t/4403lDDFC/Qo8sWI4B5CvSGomhu7IQUQAlQfCIuN4eHUHAmiAGHiGdxKqDDO+CVbHQgVFzcSX4QaBHHiOQgT6g6kALLGUX1i3QgOt0QKYg1po3zGOOqODayG3HHCDHICAeE3qg0ubDYSoUoNpIqEakaMEj+ffPKJITQQEVx5ECZcLhjNCy64wOSB+43PKBvChCvSSZiIv6KOKGy4LiFhtIWECw8VDsz4zBJA6svJOerEbh6M2NmjxIUGnBPMTp6otIm2849iSHr2qnMtxQVHfzmT00gRS8ohh0GDBpnxjxuZayb4smmbINiotOPGjTNjhn6GcDPuQ7+LMbSRjAsug2XNxP0MwUeNfeKJJ4xr2ybIP39DoWSzAeHHzcj1H5wU5TQnp1BDEyoxnorQFMkQ/+9//zMKM+2mHFRhToiiWhF3l1Fing4dOtQcesBtz/+ZF9dee62Z5/a+tSlTpshFF11ksmI+4/5m/YHUhsPXWWZG7USVtW7GePVbZjhn1k+0xYk9/wYP4hpZV9hMcRLXfiVVpH6KZswlFWGio1FpSOzubZA3qo31pSOnMrCClCxhYgITC8OCgWFlwkIAVWEKUm+JIRyQW/oLlYN+gqRAGHCPQSg4ek+Mjz3ZxiWOliQRi4SRCHXH2VZCKDAsjFXUEggHcXns3lmISRgEgqZZoHHbMdZRiygPksJv1J2MEmQDpYaTfqHuK9QcDABGBnUKw2GvBUDBpY0sTJASDAMuRNrP4k0gOBiFEibKYCPDe6i/lI37nPLBAsMKrihmGEFIHuVgPFDKwMO6F4kPw8jZevMM7j5chBBLDCmB80FTk4M1krNem6wQJkgx4wGyzZgkMY5wqdJvjGu+PxGCS8wmF73y1Tio66yDjMFIN9XT38QaQeLJi3whEMTZQZx4F2KGISUv4maYm8QmYUhRMykPxYwDN4xxNii2jow9CAybCjeEifypA0oy5Ij5uWLFCvPD/GBjktGFq8xh5jqJetNuDkiQiJVkg8D8YiPBRoHEZgnsaBdXnITiG1rvjNoJ4bSEKV79llH5rFeZ9VMoYWLskKc9TMKYAGOEE9TOcIQpmjFH+UlFmFiA7e3KxFtwtw2JnaldaJkUdmBkfbmIzxuWMHGiCSPMLomASYgThlYJU3xwz26uLP58Hxy7wdDEooa6gwoEabKECWUGY8OCiruMBTTUHWfzwgCwC8aIoNjwHG46CBMkhrzZIbMQE5fH1QMsEOy22AxQBp9lRpgoj503SlmoYWKe4HrBsHA6jVN3xBUxFiFMJMg87YPM0DbqzeeoAxAgyAq/nfFQLHIsahhN2gKpof48Q3wSCpFV6iBA7AZxJWK4cNMxd8kTtwkGg2ftLeJ8Lx91ZrN0zTXXmKB25+GJ7Pa3vvcfAlkhTBBfCDLKKkYY1ZRkN7OQYuYFybpbGVuMK9SGjFxjrIsQJhLzkM0I4x5CBOEiBon3Ua4YVyTIP3XBlci8IfEs5AaFBaJFyswlF84Qc/gDJYi8UF6ZC4xTXOaoa8wh583/zjHlVF5Y/7EDJEg/c5PEJgD3t7NsCBoiAYosKhaxQU58w43bSO105hvPfotUvtt+ctaTvkbBpP0obTYEB6JLqES4fopmzIFnUhEmSAZkI3TQJIrCZAkTO2kUBRJtYheEIqHXCgTDdBF7g2Jpr4IIrRWqCIslu1/rFoZEsViwqENuICIZJUgARISYO8gErjF2l7hrWQhYTNm5QySQ8CEOKEAYDnba7FLtrj6jctjZQkhwhVi3nL2pGyNn1S2MHYsS115YF5t1/6F04U7BSHDdgSVA4cq19zZZdQ0iCBGibCcJtQSOxdCeXOJzFkPKv+KKK4wqwM6YurM5QtEjAJ1gedwZ5KtfoxLbOZMVwoTb2l57wQaWgwgkjCNkl5OaxPNxCAF3rE30M2MZggBRCJcgRJzYhLAT82ZdVhBxxiJzDOXSmVCbmH/MF0teICu4lKMlTIRPYKSpB5sG3GYYdDff5WjVJdYN6mHjBtnw4JakzrjmcD05SYAlBrSRdcG6Dy2+2SVM8ew3Nzhn1E/O9hP2wBpFYq1jvQQr7q1jzQhHmKIZc0lHmNgt2HgIdtvEUpAwXMiaJCYmEzRIKVRhom4sFsiwECdcDwSxKmHKuNesEbaXIoZ+UWtmX+kRKffQE2MQJnaqEJNIiYWTYGjUFv4NYcKgR1KVwuVjlRM+YzFF5cHlwAk3Fh7bHusOhESwUGBsiOGxp9MyQs0SElQvG/fBQoOaxEk3yoCYEatlr0sIzY/PISe4BTGQECs3ifZhUFAcwBNDiVFwJoggpAcjyEJIkDjtRUFjR48rBCOJGxDXKEH3tJs6oWYxh6wKlZUv7Y1mrCSzGzArhAnFn9g2Eq5ke4WEVXDYdLCZjZQgAPRpuMSBBRRX3LuQhUgJhZITmbgGUXpCk43DcmPI7bvhDDFqKOQfd5BNkDbG4N13321imSIl2xZcec73eZ6NEq45sGLz7Czb6S1hXWDjQspIIXOjMMWz3yKV77afMgrk5iAMLlDrag33bDRjDmyTSmFi58luk907u1wGKgmjxaSBrTOwsrJwuln4o30mHGFiknB0mzax6CAnK2HKGGkWZSYerhm+mgTDye7TurggOuCJMYSAMBbYmRBkzf95FmOHEScf1B2Op+PWIU+78yM+ByWT75KLlCiT2AJIDuoHhJ08spuoF246lCOMjG2HMz9IGWRl2LBh5rLIcEHloeUTWM3ulWdxB7CrZaFB4ne2j7IhKuHaDF7sqlmMcINRttvEXMSosDvHqLHjD02WOIKpJcP8m36xJ/lwxbFJwohST04nYqBZAyByKHHgT14QMPqQvic/nmUhp8/pbwwd/wdzfhhHuEHJj7FiiTLv0nbGHcHDvItR5plkTVkhTM57mMIZSmu8IL+4UEMTigHjMFyy70IoUIwiJeIFIdH0E+5nCBbk27pvYkWYKJ/Aa9yDjENcjJagsWGiHLuBD61rRoTJkgDWG+KVIhEGt4TPDWGKZ79FKt9tPznbzzpoPTFgyqYLdRnFkHUkI8KUnTGXdISJBuETx0+JH5yBy6KIWwRliV0yxoTE7p8AQyZcuFgUO6g51s0Ohq+sYOdOcvs3t2WEI0yUgxGxJ+X4vxKmYJghDCtKJjFK4RLGHIUDNwS7Rv6PeoPBZjzyGeQtUrLxD87PIQaMP0gbiy9ytD2ZZ7+PDnWHoGnGKyQBUpBZYoPB3CBf69YgP4LJIVAkSAPzhqByLtt0Jj5DpYJ88m9iLrhoM6Nk22dvNieei3dRmCBNzsSzdqPDdQIQOt5jocRA0V5279QdBQ+DyDOoExh3SKvex5TZKHD/eSwJEyQJtRSimdHmI1zt7LuQWzY69rZ5lE4uKGYDhBGGKBMPZeNBycsZMxRLwuSsJ2SaeuAiJ1aP04SRNhK4lVCFQl1yxHNh2FlvyAc7FQTCFE2/RSJMbvvJ2X6nksZ6CsEGK1ycrCPhsIqm7klJmJDkICCh8itgQpCsOw7/Nowdv7o9YRBuYhJEi8uA03fsXklu/+a2jEiECQOLYbZBtkqY3C/s8XwSg9ylSxdzk3e4xBUAKIK4ixh3JNxHxFwwsVEqICnh3HMYd4IuITD44yFlPIfLjAXA3mDPbgojARHDJcW/ITPsYiFSGBLqiXoCcUKNsW48Z50JFCUWiB24VV4pD0KGmwu1Bpcwu3TcfKFtRvFCxeFeJepOPFRm7kC+F49gYOpJEDdEDMLDRsdexImKQ53YqHCPmlWhcBVST8gc5fKedblRV+R4dsi4tHHfuYkhiedYSba8Y0mYnAG4NqiZPoQcoOhxOWSkCySJ07H3QXGilLEKgebUJm4tS1BQE1lHrerAXGDMWVcgMVTkg0uNzQMJJYoQiEgpnCHmlCjuX+a+Ddrmfa6HYS5lRJiIUbXqk7NsZyC3vSYnWsIUqZ1ZyTeafotUvtt+ctaTU3UEfbPG2aB7MCcsAfd9ZkHfWR1zSUmYaBQ7VowAwdMsrkie7HpxcdlkyQzkh+ciJbfkKNxzbsuIRJioEy4de+u3EqZgmB8WXYgBbl5cWta1Y2sHOSIWB8Jk72EiFgNjjnHnN+Qp3LUCkA52W+QNEcIYQGAgGRzBZzfNexBpyEqvXr3MjooxDkFg4eFzFn82DRAqykTpQnF1nlpjR4uCiWGy6pJtAwsRgasQLwwBAayc9IGwUB/rrkLpgbzYGA2IIOpu6FcTmcUmVy6jFnFKiVN+4IgijHoFsWHnj+sSdYjgcXCl/iyM/OZkEy5HyCF1wvCFqkfsLHEPTp48OcOvtwjGSEq8WsSSMKFk4mpi/WU8QXZQKnGrMH7ZqNqY1FCkGMeQIDYmodcKsDng9Bhqo/O4Ps+jNrGJoWzyt3OI05ooXRA1xhdudOZ4uK92CWeI2awQQ8jcYLxa1y0qM+OeU30ZfcWMDWKnnbjhqB93BpKsusS/s0JsIo2ucO2kvHBf9xJOEYqm36hTuPIhlfZahYz6iXWMzRMJm0vcMhtQVHWwZyPJWosaHQ6raOueVDFMWV1+GOAMZnvxX1bfd/O8F2W4qYc+EzsE7MWVKCn2LhjUEhvEjGuJCYxRt7dNoxJCQFjEUTkx6pEurmQRx/BD/CEp5AtRYfHnHXZpEBh2wRgZZGYWaRYdCBzGBNJBHBwGhxNJGBFi+TAE1J/4LIgZCzUkJTRQGSOG8cBw4d6D0LCztHdJYXQ4rk1ZkBtLXDB4LFqQNWfQOujzDEQSFzmGksWYXR5Gyd47RluIxYJUYbjIgzIwIqhY7CQhj5wSDHfhJuWiMBH3Z28Kj13Pa06xJEygiSqEOsMBA9RUxhUbTf6W2WWPqLWMaVQiiDrjAWOKSsN4ITHOCY2AjGNQcc0RZwehwhXGdxuyOWEuQWpw4+EeZOOBmspmNjRFIi1gw/j88ccfzf1PBGEzL9nU2NPbkUYQdWNzhNvNBn4zhwkjQaWy6m8sCFO4dtIPbglTtP0WrnzWPDf9RH/YiztxdYIZawUbNMYNd7nZi6kjYRXNmMuxhAmAMVxIufb+ilgvh16UEes6a36ZI4ABR/1BKWLCYuRRQ3CjobjYL6ZFRbJGG/cYcj0Ehx0Uiky4U2woMMQtQH44vMD4xIig+KD22MUKw0D+kBB25OSP7I9SRP1QIyFtHLMnHolFmGchJ8RWYFSoBy5pFvNQwoSbmgXb3rZNuRgd8mPesBvk0k6MnPNWZeoBGeRoMp+Dk3U9UgYGit+QP9wiqGbgRd1Qz4gPJH8IkfPLdyF6kCuMGJiAUUbGh8+Cdrgj85GlTygCikCQEcixhAmDwd1GXAzo/ALQWHaWF2XEsr6al3sEIAbs/iAHEA6UFdxi7FC5KBV3XDjChFqD/E8wsv2KEnu6ChWKo8moQbipOP1D7ALEiRgiG9hK2bgi7MWRnNBih83Oyp4kowxiN3C3cfcIahM7dkgEdxexM2c3xvPO+CWLALs3nnfGAEHA2LGTkMPDHde3J+kgk5AtVDLreuR51DGIJYQLtYl22DLAAwJKW/j6GPt3VDWeRx1AQbLtdN9b+qQioAgoAtEjkGMJU/TQaQ45GQHUIVQg4hIgByg4BBoj8+MSgtRASqzRtwoTyg9xabgPcClAYKzsj6qDQgUZIQaPG4P5QY3imgTr9oKAQIYgEqgxTteX/TfqDT+4sVCmUHns+8RxQFwIJI+kwjiP79t+plxUU4iQJUvhxgBt5dg2p9Y4WGHjmSB8KEy8z98gS5Agm+yFmcQl2O/F4zPKhXjhXqQtqhzl5JmnbVcE/ENACZN/2GvJCYwARhwyQ4wPhIm4BwKoITDEY+COwn1lSQrqCeSFE3CQAb4ygRgn1CEbq4FvnfxQrji5Yy9PtLdjO+HCFYYyFRqszTO8x+eUSZxSaGA0ZIln7KmgWHcDRA4XJYoUbj9UKUgOLj5uR4dEhiNkvEcgLqovsVW2bTwL+eJqBYI5NSkCioAi4AcCSpj8QF3LTHgEMOLE3PAb8kPAJESHQGkOERCXA2GyyV4MibJEIDfXEnCyhgshrUsY9QVCwP1b9ruuIgFF2RCeSHcM2a9FsW48Zz72O+biGRQNLrjPUJBQuqgneHCSLxzJs0QP4knQLEHbzrgq6gzR0juVEn7qaAMUgYRFQAlTwnadVtxvBCAFqDgEOOPe4lvEcSeFI0w8i6KEKw2liZN0ECTcbZAr4p0gChAqgrTtF0dn1Mbsfn2Hl7gR9E08Eu3mNy7JjEgPyh3HgsFTyZGXPaVlKQKKQGYIKGHKDCH9XBHIAAHibvgaHogAQd+4wnDNWZec81WIAJefchKMGB8uV8NVxRForhHgOw+J0yE/e39TooMPPsRq2Qsm+Z1RDBIkEJxw3ylhSvTe1/orAsmFgBKm5OpPbY3HCGDguSOJmBtOf6GQcPoLpYgg8FDCRFwP9yJx2aO904hL7rjgjtuLiW/iaoFk/gLXzFQzJUweD2ItThFQBFwhoITJFUz6kCIQGQFOt/EVHjZeiAvsIE7OE2C8zTUBBH1z0gtyhAsPckQixomgb64UyKlkCRxUYdKZpggoAkFFQAlTUHtG65UwCBC3xN1Alujw3VCoRpAoZ+KeJYK6cTXxPXG4p+IZeJ0wADoqqoQpEXtN66wI5AwElDDljH7WVsYRAXuTtY3N4RvTIVBc9x8pJULAdhwhyzBr1DZIpd635FcPaLmKgCIQDgElTDouFIEYI8AReG7+xv2mKesIKJnMOmb6hiKgCMQfASVM8cdYS8hhCBC/hDqiCkkO63htriKgCCQ1AkqYou3eNWtEKleONhd9XxFQBBQBRSAnIKA2I2F7WQlTNF336aci114rMmqUyMUXR5OTvqsIBBsBHevB7h+tXWIgoPMoMfopQi2VMEXTfeefLzJrlkiTJiIzZ0aTk76rCAQbAR3rwe4frV1iIKDzKDH6SQlTjPspNVWkbFmRfftE+ELQTZtEUlJiXIhmpwgEAAEd6wHoBK1CwiOg8yjhu1AVpux24WOPiTz++H9v9+0rwt80KQLJhoCO9WTrUW2PHwjoPPID9ZiWqYQpO3CyUzjxRBF+24S6tHq1qkxiluXpAAAgAElEQVTZwVPfCS4COtaD2zdas8RBQOdR4vRVBjVVwpSdbrQ7hTp1RH74QcT+VpUpO2jqO0FGQMd6kHtH65YoCOg8SpSeyrCeSpiy2o3sFBj8d90lMnLkEbccRKlbN5GXXz7ymcYyZRVVfT6ICOhYD2KvaJ0SDQGdR4nWYxHrq4Qpmq60uwZVlqJBUd9NBAR0rCdCL2kdg46AzqOg95AqTHHrIR38cYNWMw4YAjrWA9YhWp2EREDnUUJ2m620KkzRdJ8O/mjQ03cTCQEd64nUW1rXoCKg8yioPeOqXkqYXMEU4SEd/NGgp+8mEgI61hOpt7SuQUVA51FQe8ZVvZQwuYJJCVM0MOm7SYCALvRJ0InaBN8R0HnkexdEUwElTNGgp4M/GvT03URCQMd6IvWW1jWoCOg8CmrPuKqXEiZXMKnCFA1M+m4SIKALfRJ0ojbBdwR0HvneBdFUQAlTNOjp4I8GPX03kRDQsZ5IvaV1DSoCOo+C2jOu6qWEyRVMqjBFA5O+mwQI6EKfBJ2oTfAdAZ1HvndBNBVQwhQNejr4o0FP300kBHSsJ1JvaV2DioDOo6D2jKt6KWFyBZMqTNHApO8mAQK60CdBJ2oTfEdA55HvXRBNBZQwRYOeDv5o0NN3EwkBHeuJ1Fta16AioPMoqD3jql5KmFzBpApTNDDpu0mAgC70SdCJ2gTfEdB55HsXRFMBTwjT/t0b5a+Ns2XP9iWyf89GOXxwXzR11nfjiEDuvAUkf+FyUrhkXSla7jzJf2y5OJaWfFnvT/1V/lo1Wvau/0L2py6Twwd3JV8jk6RFufMWkfwpNaVQhVZStEpnyZ9SI0lalvjN2LNxg2z8epbsWLJYdm/YKAf3/Z34jUrSFuQtUFCOLV9OitetJ+UaN5HC5conaUtF4k6YtiwdIam/z5CUMnWlSPGTJH/hUpInb4GkBTTRG3bo4D7Zv2eb7Nrxm6RuWSIpJzSVMrWuS/RmeVL/rfPvkNRfR0hKxfZybOlGkr9YNcmTt4gnZWshWUfg0MFdsn/nCtm9dY6krpsgKTWuk9JnD8x6RvpGTBFYNuIN2TD9Kylbr56UqFJVCpcuLfkKqM2IKcgxzOzAvn2yZ+tW+WPVStm0eLGUb95Mal7XI4YlBCeruBGmA/v+lA2LB0j+gsWkdOXzlCQFp89d1wTytHXNbNn/904pX6+35CtwnOt3c9KDB/dulA1T28oxhSpK6Vq3KUlKwM6HPG1dOkj+2btOyrecKHkLqbLqdTfu27FDfhjQTwqlpMiJTZoqSfK6A2JQHuRp9awZsjc1Ver0flAKFC8eg1yDk0XcCNOaeX3k2GLlpWTFs4PTWq1JthDYvm6+7N65QSqf80y23k/2l9ZOPEsKF68vJavdkOxNTfr2bV8xXPbsWCSV2i5M+rYGrYELHuglKSdUkMrnNgpa1bQ+WURgzdw5kvr7emnQ//ksvhnsx+NCmHDDHd63RcpWbRHs1mvtXCOwaeU0yV2gjLrnQhDDDXdo10YpW7uPayz1wWAjsOmnZyRPkXLqnvOwm3DDHdiyRaq3au1hqVpUPBFY/sUUyVemTFK552JOmAjwXjP/Aala/yZ1w8VzNHqcN+65lYuGSuWz+2sg+P9jT4D32on1pUrzSeqG83g8xrM43HOrpl8ildou0kDweAL9/3kT4P1N717SoOdt6obzAG+visA9t+C1QdJwwPNJEwgec8K0bfk4Obxvg5Q5sYlX/aLleITAltWzJHeB8lKqekePSgx2Mdu+e0T+3blOSte6I9gV1dplGYGtSwdKrmIVpdQZT2b5XX0hawiseG+MHNi4Qao0bZa1F/XpwCOwasZXkq9ceal2VafA19VNBWNOmIhdKl2xgRQqVsFN+fpMAiGwd+d62bpugcYy/X+fEbtUqmoPKVSiXgL1olbVDQJ7/1gs21a+obFMbsCK8hlilyqffa6kVKwYZU76etAQSF23TtbMn5s0sUwxJ0y/TusqVevfqO64oI3cGNTniFtumNRo8XYMckv8LJaPLCpVmn+i7rjE78qjWnDELXepVO/2VxK2LlhNmn5tJ2lwa091xwWrW2JSG+OWG/yaNB81Jib5+Z1JzAnT0ikdpVaj3n63S8uPEwJL5wyQWq3HxSn3xMp22Zu5pGabRYlVaa2tawSWfVZfanb/1/Xz+mD2EPjiynZy/oMPZ+9lfSvwCMzs95S0+uCjwNfTTQWVMLlBSZ9JQ0AJ03+DQQlTck8MJUze9K8SJm9w9qsUJUwZIK8Kk1/D0ptylTApYfJmpPlfihImb/pACZM3OPtVihImJUx+jT3fy1XCpITJ90HoUQWUMHkDtBImb3D2qxQlTEqY/Bp7vperhEkJk++D0KMKKGHyBmglTN7g7FcpSpiUMPk19nwvVwmTEibfB6FHFVDC5A3QSpi8wdmvUpQwKWHya+z5Xq4SJiVMvg9CjyqghMkboJUweYOzX6UoYVLC5NfY871cJUxKmHwfhB5VQAmTN0ArYfIGZ79KUcKkhMmvsed7uUqYlDD5Pgg9qoASJm+AVsLkDc5+laKESQmTX2PP93KVMClh8n0QelQBJUzeAK2EyRuc/SpFCZMSJr/Gnu/lKmFSwuT7IPSoAkqYvAFaCZM3OPtVihImJUx+jT3fy1XCpITJ90HoUQWUMHkDtBImb3D2qxQlTEqY/Bp7vperhEkJk++D0KMKKGHyBmglTN7g7FcpSpiUMPk19nwvVwmTEibfB6FHFVDC5A3QSpi8wdmvUpQwBYAwrV2/SV4YOFq+mD5f1m/YIsfkyyeVKpaVdpc0lVu7XyGlSxVPq2XDZt3k4MFDsujrUb6MmfqNr5W8efPIN1+NNOUfOHBQOvd4RKZMnSu5c+eWLaumyrktbkj3jC8VdVGoEqb4ECbGZ76KDcL2QOmSxaVGlUpy+/Ud5YqLm0uuXLlc9JQ+Ei0CSpiiRdDd+14Qps3bt8uAt0bItPnzZf2mzWYOlS5RQs49va7c2bmz1KlR3V1ls/nUgh9/lCeHDpPvly6TQ4cOSfXKlaVXt65yadPz0+Xodz2z2bwMX1PC5DNhmvrVN3L5Nb3lwMGDcmXbC+T0OjVk//5/ZP7Cn+SzL+ZIubIlZcr4V+XUk6uYmvpNmF4c9K7kzp1L7rq1k6nPR5NmSPtr7pObb7hcLmp5rrRp1UheHjw23TPxGLixyFMJU3wJ05l1T5YbO7dPK+Tff/+VTVu2y7hPpskvy3+Th+68Xp66/9ZYdKXmkQkCSpi8GSLxJkxb/9ghDTpdI3/v2yc3tG8ntatXN5vTZb+tlmEffih7/94nU98YKvVq1YpLg2d/t1ha33yLVK9cSW5o317y5skjIyd+LN8vWyYfvvSiXNyksSnX73rGpfEiooTJR8K07vfNUrtBRylSpLB8+clgqVm9crraTJ+5UC7peLdUq1JRFs9+V/Lkye07YQqFa+hbE+Tmu/rJj/Pfk9qnVI3pOIU45s9/TEzzdGamhCm+hKnjpS3lvSH9juq//f/8I2e27iLLV62THUu/kkIFC8Stj23Ghw4dFggbxiURUqzrq4TJm16PN2F6dvhb0ve1wfLZ4NekecP0Ku6q9eul7uVXSpvGjeW95wfEpcGNu14n6zdvlh8mfChFCxc2Zezas1dqXHyJ1KlRQ6YMGWz+5nc949J4JUwZw7p0Skep1ah3vLCXu+5/QV55fax89uErRp0Jl94Z+5n5c8f2LQx5CKcw8cyrQ8bJ8pVrJU+ePEaNevzBm6Rp4/ppWf6+Yas8/ORgmT5roWzbnirFjysqLZo2kP6P3y5ljy9pnnPzjNMld/5FN8qsOYvTVfvvrfOkUcujXXKQvyeefVO++36pMVx1aleXPvd2k0taH9mRkM46v4txP159ZSu5p89LctYZp8ik91+KG/5KmPwhTJR616MvyCtvjpWV8yZKlconmIrs3rNX+j4/VD6Y9KVs3vaHlC5RXC5t1ViefuBWOa5Y0bTKfvfjUvP+oh9+kZRiRaR7p7Zy8QXnScOLu8mE4c9Ju9ZNZfL0udLm2jtl+vuvS7+BI+TrBYtl3idvSf06J7su550PPpOBw9+TFavXyeHD/xpX4p3dr5Zrr7gorS5unvnjz53y8LODZdLU2WbnXeK4YnJh03Ok3wM9pWyZI3Mvo/rGYgIoYYoFipnnEW/CdO9zz8trY9+TDV99KSVSUo6q0IYtW+X4UiUlT+7caZ/NWLhQnh72pny/9MjaiyrV+/puhljZdPjwYXls8Ovy9sefyM5du6R29WrS/+675NnhI+T3LZtl8Qfvm0ffHD/BlNuuebN0ZV/Q/Ubz3LJJn5i/+13PzHsqe0+owpQBbvEmTFXrtJV/DhyQdb8cIUVuUihhevf9KdK5+yNy9RWtpFOHC2Xfvn9kwMtvy48/r5TFs0fLyTVPMtlCdLb/kSqPP3STVChfRlav3ShP9H9DjjuuqCyZOyZLz9gYppW/rZeR706Sp597Sz4cNUCqnHiC1KldTc5s0iVdDNOUafPkkg53SYumDaXnjVcaUjdsxAT5+LNZMn70ABOrRYJo7d9/wMjKt9/UUapXrSjNmpzpBpZsPaOE6T/Ylr2ZS2q2WZQtHENfsjFMkRQmnm/e4Rb5+pvFsnvlbMl/zDGCotL0iptkyc+/yuO9bpZ6tWvK/5atlEefGyInVTpB5k8aYcbU9h2pUv3c9ubf/R+6XU4oW9oQrz17/5ZZ8xfLpLdfkotbnCdfzl4oLTreKs0anSkVyx8vLRo3kJZNGhri5aacT6fNlku63m1ciu1any///isy/rPpMnzsx2mkzM0zzO+zWneVlWvWy5O9b5HTT61h3JEP9n9NypQsLku+HCsFC+SPWN+SxY82itnpJCVM2UEt6+/EmzCN+WyyXP/Io3L1RRfJoIf6SOGCBTOs5Bdz50n7O++S5g0bys0dr5Q8ufPI8AkTZNLMWUaFuqzpkbW33xtvyhOvD5FubS+Tjhe2kt83b5EBI0aYeNmUIkVk/pjREcuBhFW9qI3UqFxZJr9+RGEKYj2z3ptHv6GEySfCZIxK8QZy6UWN5eP3XnTdl6GECbIye973Mnn8KybomvTLst/klLM6yBMP3SyP3N9ddvz5l5So1EyeeuRWeei+69PK+v6HX+XLmQvk1h5XGqKS2TOFCxU0xMsZ9D1k+Hi55e5n5KdvxqXFWYU+U7thR7ND/2He2DSXCAay7rlXm7rwLskqVjMnD5Mmjeq5xiS7Dyphii9hQukZ9txDaYWYGKat22XY6I/ktRHvyx03XCWvPNnLfD7+s6/kih69Zczgp+Xqtq3S3hn3yVS56uYH5cM3BsjlbZoZcoS69PmYV6XV+Web55hLdVtcLT//+lsaYZo57ztDjM4/5wyZ8eHQtPzclnPHw8/J0FET5O/VRw4z2PTyG2PktFrVDBFz88yoDydLlzseldGDnpRr2rdOy2fC5K/k8u69ZfiLj8r1V10qkeqb3bEd+p4SplghmXE+8SZMKEEde91nCE+RwoWk6Vlnybmnny6N658hdWvUOOoQxRkdjqy9344ba+KNSIcOH5azrjqy9n73/jghz4otWsmJ5cvL7HeOHOYhzf3+e2l+Qw8TDzXv3ciHjKz77ZNBA6XlOeeYd4NYz1iMACVMGaAYT4Xpr117pFj5JnJNh9Yy+s0nXfelm6BvS8Z6dGsnwwY+JP/8c0COr9pKSpZIkbcGPyrnNqxz1MRy8wyVzCph2rR5u5SrfqHcd2cXQ+Cc6dGnh8hzr7wjW3+bJqVKHmcIEyQu9feZnpyeUsL0X2/EQ2GKNKiLpxQ1yg2Ki40puql3P3lzzETZ/vOXUrDAfzFNqI2la18gN117ubzW737pfNsjAonau2qu5MuXN62IZ197Wx54+tWjCNPzj94l997cOe05t+U8//ooue/JV+TBO66Xe266xrjRQpObZ66/+wkZMe4T+Wv511Lk2EJpWfy9b78UrtJIuna4WEa81DeNMIXW1/XCkMmDSphihaS/hMmWPmX2HBk7ebJ8tfBb2f7nn+bPFY4/Xm69qqPc2fkaQ/I5pVa55YVyT9cu8ugt6dde1KQX335Hfp8+TXbt3Su1LrlM7ruumzx5+23pGli+2QVSqWzZiISJgO+eTz0td17bWfrdecdR4ASlnrHqfSVMPhEmis1f8mxp3uRMmTx+oOv+DCVMuNkee2aYTJ3+jWzesl3+OXDQ5EXA9A1dLpM3Bz1i/v/Ntz/JlV3uN3FKxAm1bNZQOl7eQi6+8Ly0st08k1XC9O3iX0xsUkbp+zljpO5p1Q1h2rhpuyz/foJrPKJ5UAlTfAkT6s5Dd96QVggxR336DZJXn+4tt13XIV3XEW9EHE+kdFmrJjJxxAvS8qqe8uPSFbL5h6npHrWKjXXJWcXm3deekk7tLkx71m05bDp6PvisIXHcfHBm3VOkTfNGcsPVl6XFHbl5ptXVt8mC7/8nqctmHtW0Eic3M/milkWqbzTj2/muEqZYIRkMwmRrgWq7bPVq+WrBQnNa7acVK6THFZfLqw/2kUU//yKNrs147V0wdow5cXf+ddfLc73ukds7HTn9bNOZHa+WfHnzhiVMTw4ZKk8Pe0Pu7dpFng5Dlpz5+FnPWPa8EqYM0IynwkSxuM22btshm1Z8EfH0DgPNeVdNKGFq0vpGWbT4F3nq0Vul0dl103axtepfkY4wUR4L/FdffyufT5snn34+R1asWicd2reQcSOfSUMhs2eySpgWff+LiWnqeWMH6dzxP5eEE3aC1I8tXMgQJgjg/xYcCTCMd1LCFF/CFBrDxFgmMHvl6vWyfO5H6VSbi7vcJV9+vVBmjv/Pfebsf4K7a1atbOKSflq28ijCNPHzmdLu+l5HKUwfDHvW3Pdkk9ty7PPrNmyWSVO/lqmzvpEvZn5j4o0+G/2KnFP/tLQ8M3rmwk63y6z538nfq+cdNZyL12omDeqdKlPeHZhGmELrG6s5oIQpVkgGizA5a0O83EW39JR5S5bI1q9nyq9r1si5nbvIzR06SKc24dfek6tUkZ+Wr5Cm198QljDVaX+FiZNyuuRwt/V47HF5b/IUebH3fXJThyuzBK5X9cxSpVw+rITJR8LEqTVikEYOeUy6dro4bE0GDXtfxn7wuYwf/ZwcX6ZEulNyXHhZ+ZRLTFwS8Uk2bd7yh5St1uoowhRagC1//vQR0vDM2mHLD30mq4Rpy9YdcnzVlnLbjR3k1eczPnGohMnlrI3DY/FwyYUL+p777Q/S6LIbjEtu6IAH01pyywPPyJB3xsv2n6eHdX/ZB4ln+mjKDNn721xzzYZN1j0WqjCFEhC35YSDeP3GLXLWRV2kzsnVjSrk5pnu9z5pAsVD20WQepFqjU380psvPKKEKQ5j2o8s4xnD9Pf+/TJ+6jRzSq31eY3CNu+hVwbKC2+/Iz9OGG+CtSu2aCm3dOwgL90fee1dsXad1G7X/iiXHLFOZZs0laoVK6YjTHc/O0CGT/hIRvXvlxY07qxMUOoZj/5XwuQjYdq2/U+jMhE/xPH58845PV1tJk+dK1dee7+ccXotmTVlmFGanArTTz+vlNPOvkpefOYeubvnf1Jqn8cGSf8XR0q3ay6REa/3lYXf/SxcODnk5T7mGLZNM75eJM0uvtmUjZsus2dw32WVMFEWdURJW/3TJClYMP9/Rm7gKHNVAifiSEqY4jHF3eXpFWGiNgR3fzRlpnw75R1zGo5kXWov9L3bxAzZ9PumrdL3uSHSu2dXc6y//6CRxq03e+Kb0uisuuYxVNF6ra6Rn5auzFRhclvOQ/0HS/njS8mt3dLvnpu0v9GcyFv0+Shx84wNWn+9fx+5ucvlae167+OpcvUtD6YFuVuXnCpM7sZrUJ+KJ2FC2Tn5srayb/8/MmfU23JCmTLpYNi9d6807trN3JO0fvqXUuCYY6R+h6tk65875NdPJ0nB/P+tvS+9M8qcTiXmCWJUvmlzqVKhgswd/U5angSWX3nPvemCvj+ZMVM63NtLRjz1pFx9UXjVKgj1jNf4UMLkI2Gi6Dnzl0i7Tr3kjx07zS3ZKD0HDx6UeQt+FG4Br1enpnz6wctpdyU5CRNxSifWvtRc/Dfohd5SIH9+eW/8F7Jnzz6Zv/BH4+Z7a3BfqVTheEPMOKZ/200dTV4bN20zAdfcyUTM0O7dezN9pljRY7NFmLix/NKO98gZdWvK3bddI8elFDEuwdeGvS/PPnGH9L7riJ9dCVO8pnnm+XpJmH5bu0FqNb5Czjitlsz9ZLjZCHBqsnG77vLtD7/IA7d1M2Row+at8szAkYIh+GXWB5JStIig8tRo1F5KlThO+vXpaX5zcg7ShNssM4XJbTm9nnjZ3MF03y1d5Nwz65j7a2bMWyQvDn3XlEsd3TxDvc6+5DpzlQBB7qedXE1+/GWFuS4BAvjNpyNN8LoSpszHaCI8EU/CRPv5OpQO9/QyruEOrS6UmidVlmPy5pO1mzbJuM8/l7UbN8nghx+S69q1NXARdH353fdIvVo15fZrrpHjihaRyV/PkSHvv2/ijog/Illl6sqWLeXylhfImg0b5dUxY6Rg/gLmgkpccnwbxWntLpeDhw7Jgz26h+2Ojq0vlEIFCvhaz3iOEyVMPhMmioe84Hr7ZPIsWbtusyE61apUkGuvbiPdu7RNp8qExjBBjO7s/bz8b+kqOS6lqLmPqV/fnjL8nY/l/kcHmpNxK5ZMFNSox/sPM0Tqz9RdUrrUcXJ+o/rm2gGIFGnJj8szfSY7CpOZ6DMWyFMDhpuLK0nVqlaQnj06SPeuRya2EqZ4TvPM8/aSMFEbyMYLQ0abE2LdOl5iKrhrNxdXDjFXDHD9ACfTmjc6y1xcWemEsmmNmD5nodz72MuydMVqE4Dds9uVcmrNqnJR5zvk03deljYXNMqQgLgph13ygMHvyOjxk2XN+k3G/Ve1cgXpcU07uena9obkuXmGSv+58y+jRhFnte2PP6VMyRJy2YVN5Kn7b0m7kFMJU+ZjNBGeiDdhAoNfVv0mr48bJzO//dachENx4pLXhqedZhQjrhlwpunfLJBn3hxuLq4kVa1YwcQ1WVLF3yBDD748UMZOmSK79uyRU6pUkQH33i0PDRwkBw4cMIRpyx9/SKUW/135Ea4/Vk2ZLOXLlDYf+VXPeI4TJUwBIEzx7GDNOzICGvT9HzaxJEx+jDlu3O56Z1+ZNWGYNG4Y/zu8/GhjNGVq0Hc06Ll/1wvC5L420T95Xpdu5gt2M7qHKfpSos/Bq3oqYVLCFP1oTdAclDAlHmHCndfnmUHmqgCuGrCJyyHHTvxCtvw4TbjnSVN6BJQweTMilDB5g3NoKUqYso57rn8JNIhhive1AjGsqmaVDQSUMCUeYTpw4KCc2rSD7Ej9y8QEVShXRqZ9vcDEG/Gdcs6bxbMxJJL2FSVM3nStEiZvcFbCFD3OSpiixzBH5aCEKfEIEzUm8JuTcl/OXmCIE98n1/nyi+SRu7qnu/07Rw3mTBqrhMmb0aCEyRuclTBFj7MSpugxzFE5KGFKTMKUowZpjBqrhClGQGaSTbIRJm9QS5xSNIYpg75Sl1ziDOTs1FQJkxKm7IybRHxHCZM3vaaEyRuc/SpFCZMSJr/Gnu/lKmFSwuT7IPSoAkqYvAFaCZM3OPtVihImJUx+jT3fy1XCpITJ90HoUQWUMHkDtBImb3D2qxQlTEqY/Bp7vperhEkJk++D0KMKKGHyBmglTN7g7FcpSpiUMPk19nwvVwmTEibfB6FHFVDC5A3QSpi8wdmvUpQwKWHya+z5Xq4SJiVMvg9CjyqghMkboJUweYOzX6UoYVLC5NfY871cJUxKmHwfhB5VQAmTN0ArYfIGZ79KUcKkhMmvsed7uUqYlDD5Pgg9qoASJm+AVsLkDc5+laKESQmTX2PP93KVMClh8n0QelQBJUzeAK2EyRuc/SpFCZMSJr/Gnu/lKmFSwuT7IPSoAkqYvAFaCZM3OPtVihImJUx+jT3fy1XCpITJ90HoUQWUMHkDtBImb3D2qxQlTBkg/+u0rlK1/o2SJ28Bv/pHy40TAocO7pOVi4ZJjRZvx6mExMp2+ciiUqX5J5Inb5HEqrjWNlMEDh3cJaumXyrVu/2V6bP6QHQITL+2kzS4tafkK6A2Izokg/f2gX37ZMHg16T5qDHBq1w2ahTzL99dM6+PlK7YQAoVq5CN6ugrQUZg7871snXdAql8zjNBrqZndVs78SwpVbWHFCpRz7MytSBvENj7x2LZtvINqdR2oTcF5uBSFjzQSyqffa6kVKyYg1FIzqanrlsna+bPlQb9n0+KBsacMG1bPk4O79sgZU5skhQAaSP+Q2DL6lmSu0B5KVW9o8IiItu+e0T+3blOSte6Q/FIMgS2Lh0ouYpVlFJnPJlkLQtec1a8N0YObNwgVZo2C17ltEZRIbBqxleSr1x5qXZVp6jyCcrLMSdM+3dvlDXzH5Cq9W9St1xQejkG9Tjijhsqlc/uL/mPLReDHBM/i/2pv8raifWlSvNJ6pZL/O5Ma8ERd9wlUqntIsmfUiOJWhbMpuzZuEG+6d1LGvS8Td1yweyibNXKuONeGyQNBzwvhcuVz1YeQXsp5oSJBm5ZOkIO79siZau2CFp7tT7ZRGDTymmSu0AZKVPrumzmkJyvbZ1/hxzatVHK1u6TnA3Mga3a9NMzkqdIOSl99sAc2Hp/mrxsxBtyYMsWqd6qtT8V0FJjjsDyL6ZIvjJlpOZ1PWKet18ZxoUw0RhimY4tVl5KVjzbr7ZpuTFCYPu6+bJ75waNXYqAJ7FMhYvXl5LVbogR4pqNXwhsXzFc9uxYpLFLPnQAsUwpJ1SQyuc28qF0LTKWCKyZO0dSf1+fNLFLFpu4EaYD+/6UDYsHSP6CxaR05fPUPRfL0ehRXrjhtq6ZLfv/3inl6/WWfAWO8yNMflkAACAASURBVKjkxCrm4N6NsmFqWzmmUEUpXes2dc8lVveZ2uKG27p0kPyzd52UbzlR8hZSt7PX3bhvxw75YUA/KZSSIic2aaruOa87IAbl4YZbPWuG7E1NlTq9H5QCxYvHINfgZBE3wmSbiHsu9fcZklKmrhQpfpLkL1xKyVNw+v+omkCS9u/ZJrt2/CapW5ZIyglN1Q3nsr9wz6X+OkJSKraXY0s3kvzFqil5comdH49BkvbvXCG7t86R1HUTJKXGdeqG86MjQsrEPbdh+ldStl49KVGlqhQuXVrJUwD6JVIVIEl7tm6VP1atlE2LF0v55s2Syg3nbHfcCROFEQj+18bZsmf7Etm/Z6McPrgvwN3vvmp59hw4sjstnM/9SwF/MnfeApK/cDkpXLKuFC13ngZ4Z7G/CAT/a9Vo2bv+C9mfukwOH9yVxRyC+XievUfqdahQMOuXnVrlzltE8qfUlEIVWknRKp01wDs7IMbpHQLBN349S3YsWSy7N2yUg/v+jlNJ3mab98ARm3EwX/LYjLwFCsqx5ctJ8br1pFzjJkkT4B1uZHhCmLwdkh6W9thjRwqzvz0sWotSBDxFQMe6p3BrYUmKgM6jhO5YJUzZ7b7UVJETTzzy9urVIikp2c1J31MEgo2AjvVg94/WLjEQ0HmUGP2UQS2VMGW3C9kpPP74kbf79lWVKbs46nvBR0DHevD7SGsYfAR0HgW/jzKpoRKm7HSh3Snwm4S6pCpTdpDUd4KOgI71oPeQ1i8RENB5lAi9lGkdlTBlClGYB5w7BfuxqkzZQVLfCToCOtaD3kNav0RAQOdRIvRSpnVUwpQpRCEP2J1CsWIia9ce+bBSJZGdO1VlyiqW+nywEdCxHuz+0dolBgI6jxKjn1zUUgmTC5DSPTJy5JH/dusmkivXkX//+6+I8+9ZzVOfVwSCiICO9SD2itYp0RDQeZRoPRaxvkqYoulKJ2GKJh99VxEIOgI61oPeQ1q/REBA51Ei9JISprj0kg7+uMCqmQYQAR3rAewUrVLCIaDzKOG6zFlhVZii6T4d/NGgp+8mEgI61hOpt7SuQUVA51FQe8ZVvZQwuYIpwkM6+KNBT99NJAR0rCdSb2ldg4qAzqOg9oyreilhcgWTEqZoYNJ3kwABXeiToBO1Cb4joPPI9y6IpgJKmKJCz3FKLpp89F1FIOgI6EIf9B7S+iUCAjqPEqGXItZRCVM03aeDPxr09N1EQkDHeiL1ltY1qAjoPApqz7iqlxImVzCpSy4amPTdJEBAF/ok6ERtgu8I6DzyvQuiqYASpqjQU5dcNPDpuwmEgC70CdRZWtXAIqDzKLBd46ZiSpjcoBTpGR380aCn7yYSAjrWE6m3tK5BRUDnUVB7xlW9lDC5gkldctHApO8mAQK60CdBJ2oTfEdA55HvXRBNBZQwRYWeuuSigU/fTSAEdKFPoM7SqgYWAZ1Hge0aNxVTwuQGpUjPPP74kU/69o0mF31XEQg+AjrWg99HWsPgI6DzKPh9lEENlTAldPdp5RUBRUARUAQUAUXACwSUMHmBspahCCgCioAioAgoAgmNgBKmhO4+rbwioAgoAoqAIqAIeIGAEiYvUNYyFAFFQBFQBBQBRSChEVDClNDdp5VXBBQBRUARUAQUAS8QSFjCNG/ePMmfP7+cccYZXuCkZSgCioAioAgkMAJqMxK48wJS9YQlTEOHDpXjjz9e2rRpI3nz5g0InFoNRUARUAQUgSAioDYjiL2SWHVKSML077//yl133SWnnXaaFC5cWK666qrEQl1rqwgoAoqAIuAZAmozPIM6qQtKSMJ06NAhQ5hKlCghJ554onTt2jWpO0kbpwgoAoqAIpB9BNRmZB87ffM/BBKOMLFT2Lp1q9x///2SmpoqVatWleeff177VBFQBBQBRUAROAoBtRk6KGKFQEISplWrVsmDDz4o+/btkwoVKshrr70WKzw0H0VAEVAEFIEkQgDCpDYjiTrUx6YkDGFi0Nv0yy+/yMMPPywHDx6UE044QV5//XUfIdSiFQFFQBFQBIKGgNqMoPVI4tcncITp8OHDhggdc8wx6dD966+/TIB37ty5BcKEwvTHH3/ISSedJG+//bbkst8Cnfh9ElULWCQUi6gg1JcVAUUggRBQmxFdZ6nNcI9f4AjTjh07hA4koNuZ5s+fL6effrq5e+mnn36SPn36yMyZM6VRo0YyefJkQ6RyOlEAt+3bt0vJkiVzPBbup4A+qQgoAomMgNqM7Pee2oysYRc4wvT5559LxYoV5eSTT07Xkk8//VRSUlLknHPOkccff1wWLlwo06ZNkxo1asiAAQOkefPmhkxZ0pTTWDPt/e2336RQoULmfqqcTh6zNg30aUVAEUhUBNRmZK/n1GZkHbfAESbUoiJFish5552XrjVjxowxLriePXvKY489Jm+99ZZs3LjRXFr56KOPGhfeTTfdZN6FLKxfv17Kly+fY5QnZOnvv/9eypYta36UMGV9MgTpjcWLF0vx4sWlcuXKQaqW1kURCBwCajOy1yVqM7KOW+AI04svvii1a9eWFi1apLWGjn322WeNq+6BBx6Qvn37yrBhw8z1Anny5JGLL77Y/IZk3XLLLZIvXz6xLrydO3caAuFMNhgwEqkghopnyMePRHsPHDhgyCDtcpN4Z8mSJaatoQoTn5ForxIpN2j698zu3btNrB6KKkprnTp1zFi0/edfzbRkRSCYCKjNEFGb4c3Y9I0wRXKZ4W47++yzpWXLlmkIcOkYMUvHHnusOR33yCOPyJtvvpmOMEFy+Bk8eLDZlc+dO1dOOeUU+fXXX01+zrRs2TJzf1Okr1TZu3evMVIYLq8T5WI0P/74Y2ncuLG5NgGSk5mLEYwWLFgglSpVknLlyqVzTf7+++9SpkwZ2bZtm1HdNPmHwOrVq804LlWqVNhK/Pjjj1KrVi0ZO3asIUvcZg8RZixWr17dv4pryYqAzwiozQjfAWozvBuYvhAmOnjz5s1GCSFImQBvgrZJuNsgOK1atUpHmO68805jZCBLvXr1ktGjRxsCwHsoS5s2bTI78n79+pkv5J0+fbrUrVvXKE2XXHJJOmXlu+++M2SqQIECRyFN3ZYvX27UJYiXrZd9cP/+/Ya8hHvXTbdlRnwgfRjLESNGmOsSqlWrZojgli1bzBUKGaliw4cPNzFdEC1bb3Ye/fv3l5tvvlkgThhgTf4hAKklKL9KlSphK7Fo0SLTR++8846J2WvXrp25mLVZs2ZSv379hFMIGX9ZVTZRV3mPmERNigAIqM3IFXEgqM3wbo74QphYDDEMEJvPPvvMGAJij/hBQYKodO/ePR1huu2228wumwDvTp06yfjx4w2RgBhweo6YJVx5VqEiIByCsXTpUmndunWaWkTZuDtQkcJ9Bx0Tc9SoUbJu3Tpzm3ioW47rDcgDY5bVRN4bNmwwKk8k4vPnn38alYxAxgYNGkiTJk0MgeRvDz30UEQXHVhAmI477ji5/PLL055DecKN2bt3b0MqlTBltddi+3wkwsQlrJBwPkdheuaZZ0wA//XXX2++BgjSf80117h20ca21tnPzXkQwW0ubIT+/vtvswHSpAiAgNoMtRlBmAm+ECaM+Jdffinnn3++uaUbpYmfpk2bGsKEWwnCZFUSnr/11lvl66+/lp9//lmuvvpqQ5j4O8QDw8JuFDLAex06dJAZM2YIvm2+Z65NmzZStGhRgzfuLkjVqaeeGvY76CA1BJGTd48ePcx31TkThGbgwIFG6QpVnzLrUEjN+++/b1QDiBjvO/Og7JUrVxpjidsQFWzSpEny1VdfmTqhPPEez/HjfJddOW5KDO/tt9+e5m6kHRC/++67z5yiO/PMMyO6IjOrv34ePQKRCBP9XbNmTZk9e7bpJ1RQNggXXnihicuDLF122WVm3P7zzz+mD7M6/qKv/dE5MB8g6eES7nA2JmwuQudRRnVhM0UbORGrSREAAbUZajOCMBN8IUwYdxQPgrdxN+CewN2GMX/llVeMmw4iZdUdiAZGY+rUqcaYQJgmTJhgJpFNECfyQX3i2R9++EGuu+4644669NJL0xQhFnjIA9cW3HPPPUf1AUQEJYeyqB87ext0a2XhG264wbgEOcXkNoiadznlR12oP//HTea8oJNdFOQIpQiFCxIJYcI48m8MEJisXbtWSpcuLQULFkyrPwbm3nvvNcQRwmWxAyOw5jPcPLjsIGya/EEA9ZL4udC4OggSiuu4ceOMKsrdMowNNg+oi507d5aGDRua6zNwZxcrVsyMi1AFNJLLl3FvnyWGKjPXsFt0GJ/MkXAJcsh4pK60wy3Be+6558zcYp6FJuaI23zctkGfCz4CajPUZgRhlPpCmDDu7JYhR926dZN69eqZ3TVBrShD3OSNEmNjGHieRRlDwiJMPFMoYQJMCBOGiABxXHTt27c3LjxIwx133GHwRu7H8ODqwFUVegoNQ0K8CMG5L7/8siE4qFLELkF4du3aZUgZn7H7d3uKjXK5kfyDDz4wsVXEE0GMIEI2QQwhisRdQYooFywwErhrOPGH0XvvvfeMGud8l/pBxNjJQxKdZPPKK6+UF154wfwd/CCTmvxBAKLNOLXB+dSCfrWKIu43gvZRUwn4RzFk3F9xxRWGQHHDPfFsc+bMMfFtoS5W1BmUSSeZpgwIE+7kkSNHmo0A70PQeI4NCu5wxj5E3U1wOWOVzQJHupmbKEmMUSeZoS48A/lhk/Lhhx+6Ah3ChCJ84403pm1IqKMd/4z70G8CcJWxPpSwCKjNUJsRhMHrC2HCCKAmPfXUUyaOiEUWwvT000/LrFmzzE3eLK42sJrnuTqAYGwMPmQHBcgel7dAQhIgO7jc+LJFFCYCoAmyxZ1F4ioCDALqEmTNnkKzeZAnhgQlC0JFHBEJg4b6Q1wFd0Lh+rP3P4V2ZLhj4NyrgxsRVyQGDdcbxI94I5sgPeyqaTtGE9KHYSJh2FJTUw1WGE2MCUqFTXv27DFYotQNGTIkzaDwPsaZcnHNQZbAkFNzoeoYuzgME5/pLj620xOyAgmwChNqIePPkviJEyca8oOS1LFjRzO+UQMhNvQFiijjkkMRjCU2DByWuPvuu9NVFJJC7J6TTPMAY4cxgEsbBZVrOlBi2ZRwfxcEhHnB2HAeuIiEAvOLMYkyBmH69ttvjbvQSdS4XJb5RNnEIK5YscKVIkvd2FCw2bH5oWRx7xoKG+SPuqJcacoZCKjNUJsRhJHuOWFiAYW0EPiM0UdlwVBDdl599VVDVAgEZ+G2x/ohA5ADDAfEhzuaeMcSEwskLgoUJlxSkCsusqQ8XFAoOyR255YwYZgwLrxnbwnneYLHeZ+dMSQGlxauQ4JxMQKUy8k86kn8VGhilw5xcRoPiCBqFUQIEkb+KGDEG1kljUBXnuGEH+9StnU7omRhdKkr8VwsIBhfS3pQvjC2qGC8b/MEJwzrN998Y8qFhBIQz5F1J2GypwNRIIjPCteuIAzYRKwDYwoiCilBQWJcQjQgESRcbCiLxOyghKIsokiiGKJIMe5RgHDhPvnkk0Z9gkBA3iE9dpwxfhiffL8iGwFnQp2B6DP3KJ85QrwgxA1CQ+JrhtwSJq46wLWH+sVmh2swqIvzKg7UJ8ajVVNRS53KEGM73FcaseGBXLGBguwz1skfDJmDzAt+2rZtm4jDQeucRQTUZqjNyOKQidvjnhMmFA/cQ6g3BFVzYzcLJ4QA1YfJYWN4MBIkiAKEiWdwK6HCOOOXLDosvlzciJFBDcIo8ByECXUHUoDBwJWB4SI43BIpjAekjfJxa6Hu2MBqyB0nzCAYEBB+s5BjNFANQpUajBYJQ4KB4OeTTz4xhAYigisPwoSBYjd/wQUXmDwwKHxG2RhBZGgnYbJxKChsuC4hYbSFhAsPFQ7M+MwSQOpLUC51QtUDo48++sgocaEB5wSzkycuTZ7VFB0CgwYNMsoQ44gxiWLSpUsXM/YYD4w5xiuECdWQu8f4jTuYxGEFSLlNqJqoQ8wP3FaMX55nnDG+UXhQpBhPkCnmAGomiiGEGSLMuMWdizuczQf3lTEWGfcoXmvWrHGlMKFK4WJG8WHMoZBBcJyEyaphbB4YV6hiEB7mGQQOXCBrzAli8myiHsx5XMmQRvDgXcYr84ExDzkMPeWKW5C8mTd+3KEW3WjRtyMhoDZDbUZQZofnhAkDzl1JLKaoHLjf7F0t7IpZPDl6z67Unmxjd2lJEjtwXHmh7jgLKAsm9xcRB4RawgLLzh1DgrpEIoaEBZoFHrfdWWedZQwT5bHY8psdbEaJxRulhpN+oe4rjNP//vc/4wJBncKQ2WsBWNRpI4YUUoK7kB01hAnyiIoARqGEiTJw0/AeygRlY6QoHywwzuCKYvbuu+8aBY9yMEooZeBh3YvEh3HVgq23NVy4CDG81l3jNj4rKIM5aPW49tprzRhnLHNNBAoqLlgUHk400k8XXXSRUZfoT8gNf4P8kjD6qKs2QaC4ZgKigmsNtxtqImPCurAZ5+SPkgjJYLMAYYawQbLoa06tMReIZYLI4MZDLSKukCs+IrnkbKA4BowvvsbFy7yE3JAHhBsF1SbilhiX1GHevHmGAFImc4/TgLgc2bDg7qbOzBMSpJI8UYCdhIn5xJiGELLxsSdVIU/MF9YJriSBhIGTpuRAQG2G2oygjGTPCROEhO+Dw2CEJkgD6g6LIaTJEiaUGe5OwnjgLkPZCXXH2bwgASzaxDeg2PAcbjoMCSSGvIm5gKiwGHP1AMZiypQpJhicMvgsM8JEeU888YRRykJPKrGzZ/eLm4zTaagCxBWhKECYSOy0aR9khrZRbz5HFYMAQVb47YyHYjcPESIOibbgAqH+PIPxwmBZpQ6jgSHBlYiqgGFFWSBPLsVEneBZe4s48SHUGZcMR9gJas8JMSL0CyQ1swTGjA3GituEKkofoCIRswaZhQxBkiAzuMggE7jQcDdB5HHnQhbCJeuutSQK4kOfQ3Qg/JAQiBAbAcplczF06FAzZl566SVTHq4uxhpkDJcWN4sz1yiTcc94sWPDWQeID/OIvCgXcs4YQRXjb8xnSA/qsVVc+T9fYUR+qJqMX8ae3aygcqJysWlCbaY+4AupxEgyHjlgYRUmNh+MWRRWSBt1pS4QKUgU9UGdxZ3J5sPe5B8uptBtH/KcVcD5N+SXfkSVY4MXGlyflXz1WXcIqM1Qm+FupMT/Kc8JE/EIuKKI4QiX2EFi1HFZ2O+Tg0RhUKzBgohklOzJHI5ms1jissBNxa6cnTEKij2yTQwGizAKELtlXG8YMCZpZgl1ikWbHbw1EvbWVRZ1q26xu0dFwGhZF5t1/2E8WOQxYuzILQEKV7a9t8mqaxgwiBBlO0moJXC4SlDZMER8jvGkfE5cYXwwRNSdiz8xBBgqgnlxeZBvsn6NivNIPf2HKzj0NmpcRhBaiATuTbBHwQA7noWQYqzpC9t/GFDGJsb+iy++MIad8W4T72HEcZcRqwchgNTghqW8zBJqIuPXJuoF4ULFot8gLTwDeaa+/Ibo2AtdubIAckTC0OOqs7FxlE9bqAtuPsaW874v5gyKKCSQzQTkHjyYo7jcIEIoagSh00awgCihnPEOhBFSg7sXUo6yxFiHVFEHSCQuZeoPmWMjAT7Ec9nvRkSdInGQg/WAOkAy2TyANfML8sSmhPENmWPukS9/YzxTd1zbEDPKoR+J8SPZvgZX8kel4znqTb4k1G3KJX6Q/FmvILmogyR77YG9Iw5cUQRxw9rYK+YlZdkNiVXY2czRJ5rSI6A248j1OWoz/J8ZaYTJGmG7SIYakOze2xK6u2Pws4BCTCIlFlyCoVFb+DeECYMeSVUKl49VTviMRQ2Vh50nu2wWQ9seu1hhCCEXECcWdXs6LaMusoQEg2S/6gKDhmqBi4QyIGbsnu11CaH58TnkBLcghhti5SbRPiYQxgc8IUTcs+RMGCKMBAadCzxZ5GkvSgILOPEqqFUoLOzycRnRbuqEGoAqYFUot/dNUX40Y8ULNyAKJVc7kBgXKG/2RBqqI8aOZyCOKBUoOLg0MbQE6mPUUEZQ4uhXcOLvuDxRCyElEBjUlUjEGzxxi0JcUJuyMradfcyVHBBegqtx0ZJw0UEC6FNcU4wNiBObEGdCeWRs8CyGnb6HzNH3b7zxhiE5KFAosihKtIWxTswVKipthYwwn3BDQ1ogG6ho5IEb0kmYcJdBiJj75EE8IpsO2g6pZ6xCRqyrmFOB9AMEiXchZTZBqHC1E+cE1vZON9Q11gzaCzaQRHvtAXWz6i3khDlH/tSVdYk5C5aQH9QqXIRgRL/SRp6B5EEo+c1YoSyC23EfMm/sfVqsA9QRokaQvlX7GEP0O3OV+c64g4wxh1HBUdMTJanNUJuRU2yGnZNphAkpHxkf1wyLE4sni7B1cbFYYEhY3CAgkA8WUNwN/J9nMXYYcfJB3cGwsIsiTytds/tmAWTBjJQokx0qxowFDINFHtlN1As3HcoRQa62Hc78IGWQFYwcJ5DCBZWHls8CzK6aZ4nJYgfJQsl1A872UTaLS7g2gxdGAtWNRZqy3SaMLos9pw0x7Bio0GSJI5g6FQP6xZ7kYyePS5IFm3qy82ZXj1qAUcFogD95YdToQ/qe/HgWY0Kf09/syPk/mPPDOMI4kR9jxRJl3qXtjDuUGt5l180z8U7gjBGENBE/Bja4qjBcxNOg/tAeyCR3XmEc6VPUQIgRYxIFg3gvxjL9C06oS6iYKHeQEIhwRuMII4nCR/xPdgkTc88SDNSYcIQcIoMKQ/86EwaefqQf6CfqQD+grEAKIFG4niAGjAU+p418Tt0tYWLc8T44MM+pExsIFGEnYWI88BkkgvkNeUIBonzrHqQs5oMzMW/BHLydibFHXBQHGnB9c90CpIp5SZshTJBXyuPKBvqC9qBm0TeMVdYpTh4y7xmbfEayyhRzls0b7xEDRn+h3KFM4sJHvaZMiB/to/7EULIhYRNG3VkTqA9kyF5CyzyH3LFZIR/itugPN+7heM8Pt/mrzVCbkVNsxlGEye0kifY5DBE7d+saCM2PxZ+Fg8WP3Rf/Z/HFYLO485nTzRHu/dCAcIgByhGLOQsVp4xsXIL9Pjp2e+yiMXgYP0hBZonFk900+dp4CfJjIYRAkVhEWYyJMUFJcCY+Q6WCfPJv4qE4SZVRAg+rioELO2TeRUWANDkTz2K4yR8jB6GD9LAzx7jSXhQI6o6ChzLBM+ysMYYYtWS8jwkDxbUQbAxQBLhDiPbjnoEIoQxgKPkMAoVLhn6GQIAHxhH3F8HzuDHtCUX6AUUjKwmjj0HPLmFyUxZ9bslrZs8zXhj7uCMhg/wbIoQiwtyz36XI2CNGEIWJsWKTJWEQD96FMEFMQskceaLk8Dnzh3wgMbgEUYFDE32GihUpcWCBNQWViDqghDG2aQcKFIQLYs7cx7WI0gOJZ52BDDM3mfvUg/nFZ5A7XIcod5BalGdiqhgPkCIIN5sJyCSnDlH0IF+oc1z9wUaGdYYyiD9jvkHmwIP1AJce9WLMsGlB+YX4aUqPgNqM//BQm+Hv7PA8hgmDzG4KYxMuEZPBTh93EYaLxG6NOACIEgsZBiqcgcGYEZDJAszulgWU51g82eUx8UjsjllUWfRREfg3CyaLNQscCxr1xJix6KPGWDees84sjhhYdsjWZUV5LMrsFFl4kd+RLVlsQ9vM7pPFlsWVurNrzcwdyI6a03LUE1cSCy+Eh9NV9iJOdsrUiV01u2KrQuEqpJ6QORtrYl1u1JUFnMBhVBTcd6HB7P4O1diVTptRFDkRRhwOYwrXFJgyNiFJuKNQU3gO/Ij/wrhBpBg3PG/jVxiXEDDGDApmIienEol7jvgcxhVGnfbZecec4f+QbIhTaEJ5QY2EBIVLlMO4ZRPEfIVgMUdRqrOTUP8gRChB5HfuueeafmXzAxFCDWEjQAwSGwbmHpsw7qGi31G4IDbUA4IFcaKNEEXmE2oRJwrJA6IEOYKM0XYwInyAeQ6hpEziqQhqh2QzZsCD+c944j172z6uOtQsiCr5RloXs4NJsryjNuO/nlSb4e+o9pwwQUAgBkj+7Lasa8fCADlikYIw2XuYWJww5hg6fkOewl0rwCLMjo+8WZDYBbLAQzLYJbIY8x4KFwOvV69eRi3A2EEQWOD5HOWIhQ/DSJkoXbisnKfWMJbsJImhsOqSbQMxCyy8EC/iW1gI2TlCWKiPdVexcENe7BFoiCCn5yyxcw4NiA6LKi4iYjLAEYkf9Qpig1vEfjkrwePgSv1xS/AbVwEuR4wAdYJshapHKFS4Q4jJsDE+/g7P+JQO8UGZow8hh6gSkB4IFCoTBBuyjQElqBm3DWoTgdD8Ztxy9J6+sIm+pk/cnK6MT6tinyubD9QYiAdzyW2CFFgXPUpOZolxiLpC8DqbJedmCHJB+RChjBJ9ApnBtUo/QJjoK+LM6EfKwMWPkkP9IDFW2aM8TpzyPPODDRJrhI1rQyliTaIMflhb+IwDI9xHxRzjhxhI3IxWEUfZZQOCusT6Yy8vtWST9liyzvy298VlhldO+1xthtqMoIx5zwmTvYQMJYVdHYGo9kQJhh/XEjtCjLq9bZqFDQLCbpZdL0Y90sWVECEMPwYRksJCBFGBGPEOBhEywB0uGAMWVBY7VCEbFMrCy86TRZOTPiyg7CAxtNQfAwoxY+cNSQkNVCZAmLgdiBe7TdqFAmTvkkI5Iw4CAga5scSFBRb1C7LmDFpnsPAMizYG3J4SIqaD2CfKh1TSFowBhpxFmTwog5vNUbFwG0AeOfEU7sJNymWBx+VgbwoPykCNZT0INmZXD94ogdwRhBICAWWsoQbggsF4oxJA3vmhz/gbMTOc7kKhYCzhQiLuCawxlsQwWZdvZrFw9pJUN4cMMsIAFSySmpMd7NgEaBkNRgAAD7tJREFU0HY2BGxi7FUGjDUIRuiGxTle7d1TkdSi0LFN/disoK4wtp2YoRwxxtmwODcSVgnjXTYZzFfGOfOOPoQw0Ve4TFEDIbtc/ImLjb/Td+RBX+OuhiSjJNmvY6IfbRmojsw7NjbMd1RlYuBYYyByuNLZBDLHINKsN7SHMUN8E3WjjtQBIs74YNzQ5yiZ5M2ax/gjpk5TegTUZhwy3ha1Gf7PDM8JEwstOzSUInatGHkry6O42C+mRUWyRptdO7svCA4LEgtXOAODAsNOFfKDOwVjyEBD8bH35xBLgXEhfxZE5HfyR4LHSFA/FkMWMBZb4pFw0fAsCzfxLty/RD04xcMCH0qYMLaQLnvbNt2MAkR+ED52ohgVFk97uo5n7HFt7rLhc3Cyu23KQN7nN+QP1w+7VvCibhgnjDj5Q4icX75rA2YxSGACRpFStHfW+D+kM68BBhm3pj3JBHFCXcBNSTwOJwoh8pBQ+hGjigLJuLPGFpLEMxhClDvcn6gbEH76hsBy+gNXXkYJYwlJzSguL/MWiRnTjM3QO5xsTFGkPBg7EJhQYseGhrnC+EeBhfyDBwSKMRLaLpQSq6BAuMHTHv4ILZt5ytywCg/jOhJhIl9wJ/bQ3mFGfhgP+oDNDFijEuIygzBB7lhXuNqB+ck8ZVNEHpxmY0MFmWGeMZ8gwLSVNYJ+QNFgLWAeQ5CIn6IO3Hll3dsoScxPyDbjhbqwLrBGkS8xkRAwxgPxk/yNdkOeKA9iRH5gynu8T1k29tFNn+eUZ9RmqM0Iylj3nDBZYsBRe5QiFjQMFosy/nwWExazcIQJtQbpnGBk+xUl9nQViyk7OBZQ3FTI5RhFiBOxBfY7rCAlGD17cSRxD7jC2OnbHSVlsAPFOBDkySKHBI9hYXeI2sSxZZ53xi/ZTuVEEs87Y4AgYChTJBbgcMf17Uk6yCQGhR2s3cnzPOoY8RkQLtQm2mHLAA8IKG1BQbF/xxDyPG4BjKltZ1AGoF/1YLxgXOkTXJAQVFRMTkRhbDHgEAP6DWOKusm4JGaMdyHlKIk2+JlxgQsLBQ+FDxdoZoSJPsbwE++S2d1i4XCyAaCMCwg5CqY9TEH8nf3aEtQvXEeQgdAAbDYUkBJicewdUYwRNgcoqBh5DDsbBOLhmG+0iw2GdanTdog6BIHfqCqonxBHSEc4woQCxFi1X2PCu06FyRJQlFxutkcRZpNkcaIPCPSGyKH4oK6h8hC8jxuQuQBBYh5yQg08IEqoQfQjWHAi0x6EYPNBDCN/Yz3CjccawPqE6suawvrDD1cY8HcIIQoYGxDeR4kjUQ/WMurLOMENyHwlb+Ypahl1YZ4yrlDace+TN2uYpqMRoJ/UZhy5aNh5xYvaDG9niy+EyS4kxOJADlBwiCVhYWO3zeSAlFijbxUmlB/iEHAnseAxcIg7IbEwo1BBRlhAkcr5YWfIaSjr9mKAsRBCJFjcne4B+28Wen6Q2DEwGE77PgGeNs4l0t1EzuP7tjsplwUWI2PJUriupq0YCBZ+dszWDQHhYyfO+/wNsgQJsslemInEb78Xj88o194pRFuycp+St0PRn9Iw2AR+gyXYolRAyBlr9DPkl75gXEGcICaQbYgRKgqECZIOUQFfCDikH9IFYYGM4RIKPU1GaxnHkBHygdS7SZAae3UHJA8iQJ8yViBpKKb0OUHOEAqIBVdfoDJSf+aY8zQlxJA2oMJSH+YdRp3NAu0iL4gM9efOKfJH4aT9jGNL7CFZzDeIDnMRggEJcBIm6k3ekAlUH9rO/1GkwJh6USYn6FCCyAMXO+QLMouCZd189BubFuIc2dTwf9zekBDcaswBPuN9VFg7/8DLqaLafzPvrDvOzl/UI9YIfiBZrAH8GxUIcmW/mxH8mX82lpG1yeZFfcEVHCBetv7Ui3mJ8sV79JNdG9yMg5z2jNqMI2RJbYa/I98XwsTixeLK7hDCRAAmiyuLEQYLQ4D7ypIUFlXICzt4FkIWU2KcME7sIEkYA/JjF8KOzl6eSB6hwc0YN3bKocHa5MN7fE6Z7G5D38WI8gxlxSNB5Fj8UTbY1aOAMFFwDWFYMTrhCBnvcYklSgHKgPNrITBGXK1gF/F41DtR82Qsom7YW80hxIxHdvvOhEGFhHC0HfewJS0Q2VCXLMoJLhdi8OgPiAYEBBXEqoy8BzHDwDPerDoR6dSZrYs9MMAYoY4os7QBlw9XHaCqQI6I0WJzwefUA7cic4T5gRqCigJRshfJoqJCxDkNx4EA6mfJBKSPeQopgyxCOsibuqLkQqIgJyg7zF0UFuYnLkt+Q/TBC9cV9ed0KQaQ2CDmGASJz1GRwBhyyhzmckzIFRshCC39Ym/jhniwVvAsZI26sqbQNhQfCA350p+UkZ3EuxA4fsDbXhaL25bTr6H9Hq4Muw5BuNi40Vd2TbGfZaduOe0dtRmRe1xthnezwRfCxELBgsdvyA8LNAs5LhKCMtkRQphsshdDslMjkJuFlmsAWBDZtZFYlCEEnGhhp51RomwIT6Q7huztzNaNF2o4+X88g6LBBcOAkUDpop7gQRxNOJJniR4GhZuZ2bE7F3OMPQYpGe9UisVUcRoxSAUxMJG+ogJ1EhdWZlgSz0R/YWSJUYHE0j+QJ/oDhYYgZWKFGLscMKBMlAdcfaEJwgMxQTFl/NpbvLnHCNKDq5aYLMpk/KMKEeRMGfQ9qiUxPai6kBJUNdQk6kN7GOvMPd4jPsc59m0Au720krpRLlhAsuyXUOOmgjDZC0m5DR33JEQe1x4kC/IGGUKNY/ODaw4yz+d2LvObNrAesGlgPBMQTRupA3MCtYl2sIlyJkgj9UURi/arfZxfleIsw36BdizGnubhDgG1GRnjpDbD3TiK9ilfCJM18Kg47Iw5ocKJHNxJ4QgTg4GdKgs0ShOBlBgZjAfkingniAKEiiBVdtiZpUTY3RFUigJBu/mNmygjQ43hx5CCZ2YGPTN89PPwCLgdNzxH8D0xKSgSqBQQHlQR4l4I0Mblg2EnWRURN4/zK0BsLSANEAVUGxQfxjwbDfKziTkAESLgGDIBkUIFIkGmGEO4gBgbqDeQK1xxKLX85ofLFa3ClNEYoCyIEGSLOB1c6rghLVFHweIWYJQuNjeQHVxwtI86QaogkihBuNWdsRnkg5sNlZWNFJsbe6CBfxNfRNB5OIUHwsS6wmYjWsKkcyBYCDCn1GZk3CdqM+I7Zn0jTDQLWZ6vnYAI4DrANYFrzrrknE2HCLAAs3CyW0beZ5FlB03sA4aEXTr52fub4gtd/HMHH4weqhLGgd8ZxSCxoNiTXUqY4t8/bkpAaaLvcMfhOuJWadQVAn8hHLibSCgtkAqCzgkCDk0QJggEihDvog7xLhsJJ2HCZcxcQh3B/UeZJOKqcJVBViASkDUIFEoNqhp5o+jwrr0KI6P24cJDLcLVRPsgQJAhZyJv7kRjTuNOJmYHhQuXGeOY022oWaF3fhEjxOlBiCVzmsS8Z53gdnDmOmQrHGFCgSJmjHUhkhrrpt/0mWAioDYj435RmxHfcesrYcLAc2cJ7gqMBQoJp0lQitixhhImFnXuRWJBtXcacfKEHSexF0j8SPtuYgviC6s/uSth8gf3jEpFVYJMQHxxPaGc4EpGlXGqVdwkDdlBcYVAECfE4of7jvdQYpgXxEZBRPg/t1ajxtgEObNfbRNaJ06yoVyhyqL+QOB4n7gjXIG4ASFLuOLcuJudMT0QLcolJis0QZaoM/FPlGUVLp6D+KCGWlJn37VfeMxctom4J5Qz4pRQ0Yh3CjfPne7V4I0GrVG0CKjNiBbB9O+rzcganr4SJqrK6RFO1tiYCdwXLHrOE2A8R3Ap8QrsziFHuPDsgkrMArtk3B45lSyBkQ7+rA1+L54m3oixSeAxhIEYO8g+hMeZCCRHHeE+HggMGwHigyBRBFqzqeD/nHwjT4KwIUhOJZFymAvMg9DE3yFJxBlBcCgL9YYrB+wXxJJndlK4Qwg2H9yIXJiKIkbQs3N+WvU0VAnCXcgpOmc7iK1is8Q7KHOQO1VRs9Nbif+O2ozY9aHajKxh6TthIm6JHaZdSAlMxVhAopyJQEuCulkkCcplkXWzE84aHIn9tA7+4PUfxAQiw5gmEdPD6bTQ+3aI8+G+LGJwcNtBhnB74aaGMOE+44oDXFyMf1x04YLDM0KAuCPmlQ3i5llcdyhXuO/sdzfGEkXuf+JkIUTR7WYGNYp6Ok+iQiRR5zQpAmozYjcG1GZkDUvfCZPzThSqbmM92PVGSm4Db7MGRXI8jTG0tzcnR4sSuxWopfxYFSUSYSL2jIshiTOCHKEiQTb4O3E7vMe/uVOJxMWMXLCaleS8Hygr70XzLMHnkD8uwnSbCD6HMBGMbhPBrMQ2alIE1GbEdgyozXCPp++EKbSqxEIQYxF6XNh9k3L2k0omg93/kQgTpz65Q4n7fSBMqD7EHRG3Z2+Xxi2NK4pEbBAnyBIhUW9UMrcJwkRsU7zuOnNbD30uMRBQmxFdP6nNcI9f4AgTu/HQ69/dN0efVASCjQDxObjnwhEIXFec7iLeB7JAEDV/Q23l/rFETVzeaa83cNMGTsNyUatTYXLznj6TMxFQm5Ez+92PVgeOMPkBgpapCHiFAAHLHKt3fumyLdsebOD0Gvcb8RzPo7ZwrD5RE199xMWbbhN3VHGazm3Mk9t89TlFQBFQBKJBQAlTNOjpu4pAFhHIiDBxKR+xTtz+zdUDqEyQB07QcZlkoiZuOIcAuk2c4uMOJk2KgCKgCAQJASVMQeoNrUvSI5ARYbKN52Z3bvCGMPFdiVwQiYtKkyKgCCgCioB/CChh8g97LTkHIuDmSgwUGe4d4otl+doQYoCUMOXAwaJNVgQUgUAhoIQpUN2hlUl2BLi9m0MNGcXncJM33xfHl0wT0KqEKdlHhbZPEVAEEgEBJUyJ0EtaxxyFAJdJcgcTX2OihClHdb02VhFQBAKMgBKmAHeOVi1nIsDN4JyY48tsuSOF+5gyusg1Z6KkrVYEFAFFwFsElDB5i7eWpghkigCqErdyE+ytSRFQBBQBRSAYCChhCkY/aC0UAUVAEVAEFAFFIMAIKGEKcOdo1RQBRUARUAQUAUUgGAgoYQpGP2gtFAFFQBFQBBQBRSDACChhCnDnaNUUAUVAEVAEFAFFIBgIKGEKRj9oLRQBRUARUAT+r906pgEAAGAQ5t81NjjqYOkeCBAYCwim8TmmESBAgAABAg8BwfT4wQoCBAgQIEBgLCCYxueYRoAAAQIECDwEBNPjBysIECBAgACBsYBgGp9jGgECBAgQIPAQEEyPH6wgQIAAAQIExgKCaXyOaQQIECBAgMBDQDA9frCCAAECBAgQGAsEuMuZGjDwbAEAAAAASUVORK5CYII=)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sKem2vASoBYd" + }, + "source": [ + "More precisely, SpeechBrain supports many Conversational AI tasks ([see our README](https://github.com/speechbrain/speechbrain/?tab=readme-ov-file#-supported-technologies)). See also all the different tutorials.\n", + "\n", + "For all these tasks, we provide recipes that allow users training a model from scratch. We make pre-trained models and logs for our experiments available." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qI8o3GK1u6Qq" + }, + "source": [ + "The usual way to train your model from scratch with SpeechBrain is the following:\n", + "\n", + "```bash\n", + "cd recipe/dataset_name/task_name\n", + "python train.py train.yaml --data_folder=/path/to/the/dataset\n", + "```\n", + "Please, refer to the aforementioned tutorial for more information about training.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fAg49UahnrIF" + }, + "source": [ + " In this brief tutorial, we just show how to use some of the pre-trained models made available on [HuggingFace](https://huggingface.co/speechbrain/). First of all, let's install SpeechBrain:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "executionInfo": { + "elapsed": 24204, + "status": "ok", + "timestamp": 1708877170105, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "DnuF8iAbe-T5" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "executionInfo": { + "elapsed": 10780, + "status": "ok", + "timestamp": 1708877184154, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "eto6x24aKo7e" + }, + "outputs": [], + "source": [ + "%%capture\n", + "%cd /content\n", + "!wget -O example_mandarin.wav \"https://www.dropbox.com/scl/fi/7jn7jg9ea2u6d9d70657z/example_mandarin.wav?rlkey=eh220qallihxp9yppm2kx7a2i&dl=1\"\n", + "!wget -O example_rw.mp3 \"https://www.dropbox.com/scl/fi/iplkymn8c8mbc6oclxem3/example_rw.mp3?rlkey=yhmqfsn8q43pmvd1uvjo3yl0s&dl=1\"\n", + "!wget -O example_whamr.wav \"https://www.dropbox.com/scl/fi/gxbtbf3c3hxr0y9dbf0nw/example_whamr.wav?rlkey=1wt5d49kjl36h0zypwrmsy8nz&dl=1\"\n", + "!wget -O example-fr.wav \"https://www.dropbox.com/scl/fi/vjn98vu8e3i2mvsw17msh/example-fr.wav?rlkey=vabmu4fgqp60oken8aosg75i0&dl=1\"\n", + "!wget -O example-it.wav \"https://www.dropbox.com/scl/fi/o3t7j53s7czaob8yq73rz/example-it.wav?rlkey=x9u6bkbcp6lh3602fb9uai5h3&dl=1\"\n", + "!wget -O example.wav \"https://www.dropbox.com/scl/fi/uws97livpeta7rowb7q7g/example.wav?rlkey=swppq2so15jibmpmihenrktbt&dl=1\"\n", + "!wget -O example1.wav \"https://www.dropbox.com/scl/fi/mu1tdejny4cbgxczwm944/example1.wav?rlkey=8pi7hjz15syvav80u1xzfbfhn&dl=1\"\n", + "!wget -O example2.flac \"https://www.dropbox.com/scl/fi/k9ouk6ec1q1fkevamodrn/example2.flac?rlkey=vtbyc6bzp9hknzvn9rb63z3yf&dl=1\"\n", + "!wget -O test_mixture.wav \"https://www.dropbox.com/scl/fi/4327g66ajs8aq3dck0fzn/test_mixture.wav?rlkey=bjdcw3msxw3armpelxuayug5i&dl=1\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2yyQ46wxAYRI" + }, + "source": [ + "Once installed, you should be able to import the speechbrain project with python:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "F8jUAb9mAhTc" + }, + "outputs": [], + "source": [ + "import speechbrain as sb\n", + "from speechbrain.dataio.dataio import read_audio\n", + "from IPython.display import Audio" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3Zdgu_LS9_iJ" + }, + "source": [ + "## Speech Recognition on Different Languages\n", + "\n", + "### English" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bE5JwtDuitfs" + }, + "outputs": [], + "source": [ + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "\n", + "asr_model = EncoderDecoderASR.from_hparams(source=\"speechbrain/asr-crdnn-rnnlm-librispeech\", savedir=\"pretrained_models/asr-crdnn-rnnlm-librispeech\")\n", + "asr_model.transcribe_file('/content/example.wav')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ps6S2OcS_v3X" + }, + "outputs": [], + "source": [ + "signal = read_audio(\"/content/example.wav\").squeeze()\n", + "Audio(signal, rate=16000)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9yKHvvrOGg0Q" + }, + "source": [ + "### French" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OHiAa_VGGrLq" + }, + "outputs": [], + "source": [ + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "\n", + "asr_model = EncoderDecoderASR.from_hparams(source=\"speechbrain/asr-crdnn-commonvoice-fr\", savedir=\"pretrained_models/asr-crdnn-commonvoice-fr\")\n", + "asr_model.transcribe_file(\"/content/example-fr.wav\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8j67HL7yHTh7" + }, + "outputs": [], + "source": [ + "signal = read_audio(\"/content/example-fr.wav\").squeeze()\n", + "Audio(signal, rate=44100)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "crDx5hOQHpBg" + }, + "source": [ + "### Italian" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3bpyI2GxHuOl" + }, + "outputs": [], + "source": [ + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "\n", + "asr_model = EncoderDecoderASR.from_hparams(source=\"speechbrain/asr-crdnn-commonvoice-it\", savedir=\"pretrained_models/asr-crdnn-commonvoice-it\")\n", + "asr_model.transcribe_file(\"/content/example-it.wav\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "YAaPAlhCH93C" + }, + "outputs": [], + "source": [ + "signal = read_audio(\"/content/example-it.wav\").squeeze()\n", + "Audio(signal, rate=16000)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xmsa-pyaIgQ8" + }, + "source": [ + "### Mandarin" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 580, + "referenced_widgets": [ + "ab522eae5bfb433c8f5898894a4d874d", + "69a4deafe19240e49c58bf0d9fc61a68", + "a6c6972b906c41489f675d56024d1068", + "2235124b5a83473695d755984e047922", + "6837de0ebddd4951959afeff08631c12", + "91fada4c093f48e28497b28286e728c8", + "ce19b42961b6439f8a0dafb7db9af72e", + "8630cd5856184025b3dfdb079fbe68fe", + "71178918511444a985511dc6cd977137", + "9551b53d8c8c41e4b73a800df02ec0c3", + "9275b41bbf514f8a8437d5cc77d1083f", + "2c39bf8cb4fa4f4c9fdce2bcbc82561c", + "5acdc14bce03418a87f758dbd46838cc", + "d4e2c5533ad5489b997e609a9d6f2186", + "76a29921b7a4414ba508e282915b379e", + "3f44a873dadd4eb1beb94f976aa9c27c", + "b8ba06c14960454fb0dec9e8d0f5bc26", + "241ed1a198e0404cad3a24a31262cc5b", + "6f270504a5aa493b8f25574e9beaa26e", + "5a0ac98e6406421ea32fc561178fc3f3", + "bf08cbf6446540a6bf0afd9c9066522f", + "228d66e0118f4e71859a460312fa5532", + "9939be9b6bea4232a761da576b3fa04c", + "849379af488644f9abafc54be4031dcd", + "51fbaf9158a74fbfaf241e1867d313d9", + "17be10aef0b745c2884b51cc42dd7c7d", + "1f8052e4c9214e80b6324376344499ac", + "ccbb0a0a3c1b40fb9338e110d79de57b", + "67a30cbfd45c47789f4de06501f867b0", + "799de17193c849f5905d89e9f8cd7735", + "3968be06bc994f3488580b549372966f", + "4d173f299f164188852052ed720db2f9", + "8a71c9fa9d9b43c397f2ea588d3c3b29", + "2150482ae0da480c878dcdb2d0506cba", + "d98e38e61f6d474fb61a6bfb9a878dea", + "9f8f1eeb67794faa9bc17fd8c213ed6f", + "2f5d5c9265bf435e8267595025228d5e", + "ae3541638c444094b45286ffdac676b1", + "a0ca595db6f34670a485f101728a747c", + "39674b7ac8754a6aa909102aaaf0ba7b", + "cc3fbd99a7594cd2891b931316caf9ce", + "cdd2ccb0fbc54fc9a1dd56e58c73c3cc", + "37665ad0898b4f0dab7512fbe6a9497d", + "79b1b219aaf64eedac88dd004cac9047", + "8168eade061449a7a57d04eb1a784278", + "32aee971719247cc834eae6d9584855d", + "c6ed6458529948dba27b9c94b8838b36", + "0ce6288af503427184488c6e9b676649", + "3059798bb803454cae2b7a59c341d969", + "5ddd1272e5b8446b841758d116754abd", + "54ee75dabed6447480e7e2861ca32e99", + "ed1f3de3a47a4619b0d0c0ff1d130fd2", + "4916a8e03a4948a8b6c3dae2ed6104b5", + "f706beec94bb45ed9da63c7eb2a9b856", + "53cc0d405c5344a4836ca9715cf064a6", + "9bc36b4aceb34ea28ca19870d4662571", + "6ef38982c00940cca04a04a14632fa83", + "88e4a47c090042e0980956be8c79b427", + "dd561349ad8641b8b34722b5a708ebfb", + "f0ff1af50f9543b0892ad47c10369148", + "931cea73f91b40618295e2e24ec42e80", + "589abc6fb5a2459ab16d9a76578bcd44", + "380381f7ac104124997c4cd0c88025b6", + "661c914dccf34f60b7a62e5434885080", + "b372eff6a1e34523863f5905830e26a6", + "a29dddb603f641cba8a91c4fd0406e8b", + "6056e50bad024561a6b78d6b19be87fa", + "c2a35fd606874ef984927fe33407ed1c", + "ae298101ed3141de8a80d7b5a5404ee3", + "507d5b389b614bfd873bfb0113f539be", + "68dacbc921d74f65b729ddcfd574e17f", + "64d8436cbdf14b8d81d19a7b427f8fac", + "96525547b254478c89f4ace549d384c7", + "0203ed2c456f452bbf9f186979dadf4b", + "a10f89e80a8542b5940137b1bfc2b77b", + "c538360ac187475ab0e2e00f35384b4d", + "a6f9ebe10aec4df790a58f2d7dd34e14", + "ac5f868bfb7e496fb0dc98dd511f6fe5", + "1be0270e98d74707ac4866044fd7252b", + "f2ffd33f8be14fbfa87bd0cb2c919ff2", + "548d16c7d3ed44c4851ea25cb926dde3", + "23f104c6f8b74833824ad84656d9ce81", + "ddc2db40b7c74f309707ebde8e727022", + "72a243c7e10540cc97c1856e610eb824", + "6400039eb1964ad5a40fb58a0c7bcac8", + "403b2ac4059e4b00a1e2249ca25124ba", + "6f7a599759504dbabafa7b6fd9bf01c8", + "fab022b192da4c88a4684e79f90d2a97", + "2919120146af40098a79ab14c874ef77", + "f4e5e9d9e730465e8523b1d336064174", + "fcfb59123dea416a8a06fc6ab31dc2c2", + "4d069f585ac342caaac9becc5c9cd57d", + "279068fb77db4096bd7b28e2a030dab3", + "f1b68e3d56c84fc79212fa19fbfd4862", + "d8ffee6e8ad94c0ca2b41c333ddf65e3", + "5f9e7bf681cb41ed8a3faa0e2b37c5bc", + "9985fd07514c4da9ad090e2de14da124", + "4c04e84b8e314e72b9ecc1d3b3d09414", + "9451caaa81194d4fb22c35da2513e05a", + "7f4dd150edf94cb4af67a4a0f1063b88", + "4375bd23042a46058d85b3e23972d3ec", + "f9d1a2ea53714d65839e93794c2abe54", + "92efade1f2e74c2ca0c1e61988707c2a", + "37c3889727b94645bc86344f019f5d5d", + "a88604fd1d654ead9a9672059eb510f8", + "3b055ddd555e4c749cd2e5150613e5dd", + "85656ea55f9e4293bc7ccf603a01606b", + "9440481908c842b1b80986afd13b65db", + "7c1e179fcaa04e9d8bc3e80d1143a806", + "346dad1ab8b3471984296d7723abd3b0", + "9a9e523b86c7443a8d05bb4117b38be4", + "ebcf67e9e1fc442c90bf60d0a227b073", + "8322f00492724cfb8f4ec318abdb98d1", + "36f3734aa32848c3ae65b440129799be", + "92dacc2ee4a24f64941e5d97354bd614", + "9f45228ef69247559d00f589534f4a12", + "5d0009347abe45c79683f21a504043fb", + "2fe4dfd5c2eb4aafbbf80e87c5ccb2f5", + "5c997b887c9b481bb4c1923947162fb8", + "21f26b3f0d9e42fe9f73fc004b62eb7d", + "e8ff377a9f9d482aafdfea0f94167abe" + ] + }, + "executionInfo": { + "elapsed": 60833, + "status": "ok", + "timestamp": 1708877251333, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "aq5MrTVSIilP", + "outputId": "8fa08a24-ffe9-4992-de8c-5a6e9336b0a0" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ab522eae5bfb433c8f5898894a4d874d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "hyperparams.yaml: 0%| | 0.00/2.30k [00:00`_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Parcollet T. & Moumen A. + - Dec. 2022 + - Difficulty: medium + - Time: 20m + - `🔗 Google Colab `__ + + +This tutorial describes how to combine (use and finetune) pretrained models +coming from HuggingFace. Any wav2vec 2.0 / HuBERT / WavLM or Whisper model integrated to the transformers interface of HuggingFace can be then plugged to +SpeechBrain to approach a speech-related task: automatic speech recognition, speaker recognition, +spoken language understanding ... + +.. rubric:: `🔗 Neural Network Adapters for faster low-memory fine-tuning `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Plantinga P. + - Sept. 2024 + - Difficulty: easy + - Time: 20m + - `🔗 Google Colab `__ + + +This tutorial covers the SpeechBrain implementation of adapters such as LoRA. This includes how to integrate either SpeechBrain implemented adapters, custom adapters, and adapters from libraries such as PEFT into a pre-trained model. + +.. rubric:: `🔗 Complex and Quaternion Neural Networks `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Parcollet T. + - Feb. 2021 + - Difficulty: medium + - Time: 30min + - `🔗 Google Colab `__ + + +This tutorial demonstrates how to use the SpeechBrain implementation of complex-valued and quaternion-valued neural networks +for speech technologies. It covers the basics of highdimensional representations and the associated neural layers : +Linear, Convolution, Recurrent and Normalisation. + +.. rubric:: `🔗 Recurrent Neural Networks `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Feb. 2021 + - Difficulty: easy + - Time: 30min + - `🔗 Google Colab `__ + + +Recurrent Neural Networks (RNNs) offer a natural way to process sequences. +This tutorial demonstrates how to use the SpeechBrain implementations of RNNs including LSTMs, GRU, RNN and LiGRU a specific recurrent cell designed +for speech-related tasks. RNNs are at the core of many sequence to sequence models. + + +.. rubric:: `🔗 Streaming Speech Recognition with Conformers `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - de Langen S. + - Sep. 2024 + - Difficulty: medium + - Time: 60min+ + - `🔗 Google Colab `__ + + +Automatic Speech Recognition (ASR) models are often only designed to transcribe an entire large chunk of audio and are unsuitable for usecases like live stream transcription, which requires low-latency, long-form transcription. + +This tutorial introduces the Dynamic Chunk Training approach and architectural changes you can apply to make the Conformer model streamable. It introduces the tooling for training and inference that SpeechBrain can provide for you. +This might be a good starting point if you're interested in training and understanding your own streaming models, or even if you want to explore improved streaming architectures. diff --git a/docs/tutorials/nn/complex-and-quaternion-neural-networks.ipynb b/docs/tutorials/nn/complex-and-quaternion-neural-networks.ipynb new file mode 100644 index 0000000000..068a6ff31f --- /dev/null +++ b/docs/tutorials/nn/complex-and-quaternion-neural-networks.ipynb @@ -0,0 +1,733 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/nn/complex-and-quaternion-neural-networks.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/nn/complex-and-quaternion-neural-networks.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FRTJCdy-Y6pw" + }, + "source": [ + "# Complex and Quaternion Neural Networks\n", + "\n", + "This tutorial demonstrates how to use the SpeechBrain implementation of complex-valued and quaternion-valued neural networks for speech technologies. It covers the basics of highdimensional representations and the associated neural layers : Linear, Convolution, Recurrent and Normalisation.\n", + "\n", + "## Prerequisites\n", + "- [SpeechBrain Introduction](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/introduction-to-speechbrain.html)\n", + "- [YAML tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html)\n", + "- [Brain Class tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html)\n", + "- [Speech Features tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-features.html)\n", + "\n", + "## Introduction and Background\n", + "\n", + "**Complex Numbers:**\n", + "Complex numbers extend the concept of real numbers into a two-dimensional space. Comprising a real and an imaginary part, a complex number `z` is typically expressed as `z = r + ix`, where `r` is the real part and `ix` is the imaginary part. This mathematical extension finds diverse applications in real-world scenarios, offering a powerful algebraic framework for manipulating concepts in two-dimensional space, such as rotations, translations, and phase-related operations. Complex numbers naturally represent the speech signal, with the Fourier transform being a notable example that operates in the complex space, capturing amplitude and phase information.\n", + "\n", + "**Quaternion Numbers:**\n", + "Quaternions generalize complex numbers to the three-dimensional space, featuring a real (`r`) and an imaginary part, which is a 3D vector (`ix + jy + kz`). A quaternion `q` can be expressed as `q = r + ix + jy + kz`. In practice, quaternions define 3D rotations and find extensive utility in physics, computer science, computer graphics, and robotics. They provide a stable and natural framework for conceiving and interpreting movements in three-dimensional space.\n", + "\n", + "### Connection to Neural Networks:\n", + "\n", + "As the resurgence of modern deep learning gained momentum, researchers explored the integration of complex and quaternion numbers into neural networks to address specific tasks. Complex-valued neural networks (CVNN) can directly handle the output of the Fast Fourier Transform (FFT), while quaternion neural networks (QNN) can be implemented to generate realistic robot movements.\n", + "\n", + "Beyond their natural fit for certain representations, CVNN and QNN share a compelling property: **weight sharing**. The algebraic rules governing complex and quaternion numbers differ from those of real numbers, influencing the multiplication of quaternions or complex numbers. This distinction leads to a unique mechanism of **weight sharing** within Q-CVNN, as opposed to traditional dot products in real-valued networks. This mechanism has proven to be exceptionally useful for learning expressive representations of multidimensional inputs while preserving internal relationships within the signal components, such as amplitude and phase for complex numbers.\n", + "\n", + "In this tutorial, we won't delve into all the intricacies of these properties due to their extensive nature. Instead, we aim to provide a detailed guide on how to effectively implement and utilize CVNN and QNN within SpeechBrain.\n", + "\n", + "\n", + "### Relevant bibliography\n", + "- *Andreescu, T., & Andrica, D. (2006). Complex Numbers from A to... Z (Vol. 165). Boston: Birkhäuser.*\n", + "- *Altmann, S. L. (1989). Hamilton, Rodrigues, and the quaternion scandal. Mathematics Magazine, 62(5), 291-308.*\n", + "- **Complex Neural Networks Survey:** *Hirose, A. (2012). Complex-valued neural networks (Vol. 400). Springer Science & Business Media.*\n", + "- **All about Quaternion Neural Networks:** *Parcollet, T., (2019) Quaternion Neural Networks, PhD Thesis, Avignon Université*\n", + "\n", + "## SpeechBrain Representation of Complex and Quaternions\n", + "\n", + "In SpeechBrain, algebraic operations are abstracted in the neural layers, freeing users from the need to focus on the initial representation. This abstraction ensures that users can manipulate real-valued tensors without explicitly declaring a specific tensor type for complex or quaternion numbers. The underlying operations are expressed in a tensor/matrix format, facilitating seamless integration with modern GPU architectures.\n", + "\n", + "Practically, any PyTorch tensor generated in your recipe can be interpreted as a complex or quaternion-valued tensor, depending on the layer that processes it. For instance:\n", + "- If processed by a `torch.nn.Linear` layer, the tensor will be real.\n", + "- If processed by a `nnet.complex_networks.c_linear.CLinear` layer, the tensor will be complex.\n", + "\n", + "**How are tensors interpreted and constructed?**\n", + "\n", + "Let's illustrate with an example. Suppose we want to consider a tensor containing `3` complex numbers or `3` quaternions. The different parts of the numbers will be concatenated as follows:\n", + "\n", + "For a complex tensor (`c_tensor`): `[r, r, r, x, x, x]`\n", + "\n", + "For a quaternion tensor (`q_tensor`): `[r, r, r, x, x, x, y, y, y, z, z, z]`\n", + "\n", + "This flexibility allows any tensor declared in your code to be viewed as a complex or quaternion tensor when processed by a {C/Q}-Layer in SpeechBrain, as long as the features dimension can be divided by 2 for complex numbers and 4 for quaternion numbers.\n", + "\n", + "To explore this further, let's proceed with the installation of SpeechBrain." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8pyD-gKql_qF" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "\n", + "!git clone https://github.com/speechbrain/speechbrain.git" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uDizGKe3mds9" + }, + "source": [ + "Now, let's try to manipulate some Tensor to better understand the formalism. We start by instantiating a Tensor containing 8 real numbers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AVQsQ3CKm-_d" + }, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "T = torch.rand((1,8))\n", + "print(T)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P3nGWQH1nKJT" + }, + "source": [ + "Then, we access the SpeechBrain libary for manipulating complex numbers and we simply display the different parts (real, imaginary)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "G72PTJ7bnYMm" + }, + "outputs": [], + "source": [ + "from speechbrain.nnet.complex_networks.c_ops import get_real, get_imag\n", + "\n", + "print(get_real(T))\n", + "print(get_imag(T))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xJdNeYasoN7w" + }, + "source": [ + "As you can see, the initial Tensor is simply splitted in 2 and the same happens with 4 and quaternions.\n", + "\n", + "## Complex and quaternion products\n", + "\n", + "At the core of QNN and CVNN is the product. Of course, others specificities exist such as the weight initialisation, specific normalisations, activation functions etc. Nevertheless, the basic product is central to all neural network layers : a weight matrix that multplies the input vector.\n", + "\n", + "A very good thing to know is that a complex number can be represented in a real-valued matrix format:\n", + "\n", + "\\begin{equation}\n", + "\\left(\\begin{array}{rr}\n", + "a & -b \\\\\n", + "b & a\n", + "\\end{array}\\right).\n", + "\\end{equation}\n", + "\n", + "The same goes for a quaternion number:\n", + "\n", + "\\begin{equation}\n", + "\\left(\\begin{array}{cccc}\n", + "a & -b & -c & -d \\\\\n", + "b & a & -d & c \\\\\n", + "c & d & a & -b \\\\\n", + "d & -c & b & a\n", + "\\end{array}\\right).\n", + "\\end{equation}\n", + "\n", + "And even more interestingly, if we multiply two of these matrices, then we obtain the product corresponding to the considered algebra. For instance, the complex product between two complex number is defined as:\n", + "\n", + "\\begin{equation}\n", + "\\left(\\begin{array}{rr}\n", + "a & -b \\\\\n", + "b & a\n", + "\\end{array}\\right)\\left(\\begin{array}{lr}\n", + "c & -d \\\\\n", + "d & c\n", + "\\end{array}\\right)=\\left(\\begin{array}{cc}\n", + "a c-b d & -a d-b c \\\\\n", + "b c+a d & -b d+a c\n", + "\\end{array}\\right),\n", + "\\end{equation}\n", + "\n", + "which is equivalent to the formal definition:\n", + "\n", + "\\begin{equation}\n", + "(a+\\mathrm{i} b)(c+\\mathrm{i} d)=(a c-b d)+\\mathrm{i}(a d+b c).\n", + "\\end{equation}\n", + "\n", + "**Ok, so how is this implemented in SpeechBrain**?\n", + "\n", + "Every single layer that you can call either on the complex or quaternion libraries will follow two steps:\n", + "1. *init()*: Define the complex / quaternion weights as torch.Parameters and initialise them with the adapted scheme.\n", + "2. *forward()*: Call the corresponding operation that implements the specific product. For instance, a complex linear layer would call the `complex_linear_op()` from `speechbrain.nnet.complex_networks.c_ops`.\n", + "\n", + "In practice, the `speechbrain.nnet.complex_networks.c_ops.complex_linear_op` function simply:\n", + "1. Takes the weights of the layer and builds the corresponding real-valued matrix.\n", + "2. Apply a product between the input and this matrix to simulate the complex / quaternion products.\n", + "\n", + "Example:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ALubUSk47CAT" + }, + "outputs": [], + "source": [ + "def complex_linear_op(input, real_weight, imag_weight, bias):\n", + " \"\"\"\n", + " Applies a complex linear transformation to the incoming data.\n", + "\n", + " Arguments\n", + " ---------\n", + " input : torch.Tensor\n", + " Complex input tensor to be transformed.\n", + " real_weight : torch.Parameter\n", + " Real part of the quaternion weight matrix of this layer.\n", + " imag_weight : torch.Parameter\n", + " First imaginary part of the quaternion weight matrix of this layer.\n", + " bias : torch.Parameter\n", + " \"\"\"\n", + "\n", + " # Here we build the real-valued matrix as defined by the equations!\n", + " cat_real = torch.cat([real_weight, -imag_weight], dim=0)\n", + " cat_imag = torch.cat([imag_weight, real_weight], dim=0)\n", + " cat_complex = torch.cat([cat_real, cat_imag], dim=1)\n", + "\n", + " # If the input is already [batch*time, N]\n", + "\n", + " # We do inputxconstructed_matrix to simulate the product\n", + "\n", + " if input.dim() == 2:\n", + " if bias.requires_grad:\n", + " return torch.addmm(bias, input, cat_complex)\n", + " else:\n", + " return torch.mm(input, cat_complex)\n", + " else:\n", + " output = torch.matmul(input, cat_complex)\n", + " if bias.requires_grad:\n", + " return output + bias\n", + " else:\n", + " return output\n", + "\n", + "# We create a single complex number\n", + "complex_input = torch.rand(1, 2)\n", + "\n", + "# We create two Tensors (not parameters here because we don't care about storing gradients)\n", + "# These tensors are the real_parts and imaginary_parts of the weight matrix.\n", + "# The real part is equivalent [nb_complex_numbers_in // 2, nb_complex_numbers_out // 2]\n", + "# The imag part is equivalent [nb_complex_numbers_in // 2, nb_complex_numbers_out // 2]\n", + "# Hence if we define a layer with 1 complex input and 2 complex outputs:\n", + "r_weight = torch.rand((1,2))\n", + "i_weight = torch.rand((1,2))\n", + "\n", + "bias = torch.ones(4) # because we have 2 (complex) x times 2 = 4 real-values\n", + "\n", + "# and we forward propagate!\n", + "print(complex_linear_op(complex_input, r_weight, i_weight, bias).shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "C-eaHzWP8m9R" + }, + "source": [ + "**It is important to note that the quaternion implementation follows exactly the same approach.**\n", + "\n", + "## Complex-valued Neural Networks\n", + "\n", + "Once you are familiar with the formalism, you can easily derive any complex-valued neural building blocks given in `speechbrain.nnet.complex_networks`:\n", + "- 1D and 2D convolutions.\n", + "- Batch and layer normalisations.\n", + "- Linear layers.\n", + "- Recurrent cells (LSTM, LiGRU, RNN).\n", + "\n", + "*According to the litterature, most of the complex and quaternion neural networks rely on split activation functions (any real-valued activation function applied over the complex/quaternion valued signal). For now, SpeechBrain follows this approach and does not offer any fully complex or quaternion activation function*." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FL_zMI-C7F5v" + }, + "source": [ + "### Convolution layers\n", + "\n", + "First, let's define a batch of inputs (that could be the output of the FFT for example).\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0_yW5_jspf-O" + }, + "outputs": [], + "source": [ + "from speechbrain.nnet.complex_networks.c_CNN import CConv1d, CConv2d\n", + "\n", + "# [batch, time, features]\n", + "T = torch.rand((8, 10, 32))\n", + "\n", + "# We define our layer and we want 12 complex numbers as output.\n", + "cnn_1d = CConv1d( input_shape=T.shape, out_channels=12, kernel_size=3)\n", + "\n", + "out_tensor = cnn_1d(T)\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a67ZqJs-qSU3" + }, + "source": [ + "As we can see, we applied a Complex-Valued 1D convolution over the input Tensor and we obtained an output Tensor whose features dimension is equal to 24. Indeed, we requested 12 `out_channels` which is equivalent to 24 real-values. Remember : **we always work with real numbers, the algebra is abstracted in the layer itself!**\n", + "\n", + "The same can be done with 2D convolution.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vgJE0QQTqo0J" + }, + "outputs": [], + "source": [ + "# [batch, time, fea, Channel]\n", + "T = torch.rand([10, 16, 30, 30])\n", + "\n", + "cnn_2d = CConv2d( input_shape=T.shape, out_channels=12, kernel_size=3)\n", + "\n", + "out_tensor = cnn_2d(T)\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Z65CK8DNraT-" + }, + "source": [ + "Please note that the 2D convolution is applied over the time and fea axis. The channel axis is used to be considered as the real and imaginary parts: `[10, 16, 30, 0:15] = real` and `[10, 16, 30, 15:30] = imag`." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15YS16o59JT6" + }, + "source": [ + "### Linear layer\n", + "\n", + "In the same manner as for convolution layers, we just need to instantiate the right module and use it!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_zpY9fHj99I2" + }, + "outputs": [], + "source": [ + "from speechbrain.nnet.complex_networks.c_linear import CLinear\n", + "\n", + "# [batch, time, features]\n", + "T = torch.rand((8, 10, 32))\n", + "\n", + "# We define our layer and we want 12 complex numbers as output.\n", + "lin = CLinear(12, input_shape=T.shape, init_criterion='glorot', weight_init='complex')\n", + "\n", + "out_tensor = lin(T)\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bnE2n50p9_P4" + }, + "source": [ + "Please notice that we added the `init_criterion` and `weight_init` arguments. These two parameters that exist in **ALL** the complex and quaternion layers define how the weights are initialised. Indeed, complex and quaternion-valued weights need a carefull initialisation process as detailled in *Deep Complex Networks* by Chiheb Trabelsy et al. and `Quaternion Recurrent Neural Networks` from Titouan Parcollet et al." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MrGfOb4n-tsh" + }, + "source": [ + "### Normalization layers\n", + "\n", + "One do not normalise a set of complex numbers (e.g the output of a complex-valued layers) in the same manner as a set of real-valued numbers. Due to the complexity of the task, this tutorial won't go into the details. Please note that the code is fully available in the corresponding SpeechBrain library and that it strictly follows the description first made in the paper *Deep Complex Networks* by Chiheb Trabelsy et al.\n", + "\n", + "SpeechBrain supports both complex batch and layer normalisations:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SCe-dYJc_VH0" + }, + "outputs": [], + "source": [ + "from speechbrain.nnet.complex_networks.c_normalization import CBatchNorm,CLayerNorm\n", + "\n", + "inp_tensor = torch.rand([10, 16, 30])\n", + "\n", + "# Not that by default the complex axis is the last one, but it can be specified.\n", + "CBN = CBatchNorm(input_shape=inp_tensor.shape)\n", + "CLN = CLayerNorm(input_shape=inp_tensor.shape)\n", + "\n", + "out_bn_tensor = CBN(inp_tensor)\n", + "out_ln_tensor = CLN(inp_tensor)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kmx2EsrtAC8E" + }, + "source": [ + "### Recurrent Neural Networks\n", + "\n", + "Recurrent neural cells are nothing more than multiple linear layers with a time connection. Hence, SpeechBrain provides an implementation for the complex variation of LSTM, RNN and LiGRU. As a matter of fact, these models are strictly equivalent to the real-valued ones, except that Linear layers are replaced with CLinear layers!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bxC24HR3AeGO" + }, + "outputs": [], + "source": [ + "from speechbrain.nnet.complex_networks.c_RNN import CLiGRU, CLSTM, CRNN\n", + "\n", + "inp_tensor = torch.rand([10, 16, 40])\n", + "\n", + "lstm = CLSTM(hidden_size=12, input_shape=inp_tensor.shape, weight_init='complex', bidirectional=True)\n", + "rnn = CRNN(hidden_size=12, input_shape=inp_tensor.shape, weight_init='complex', bidirectional=True)\n", + "ligru = CLiGRU(hidden_size=12, input_shape=inp_tensor.shape, weight_init='complex', bidirectional=True)\n", + "\n", + "print(lstm(inp_tensor).shape)\n", + "print(rnn(inp_tensor).shape)\n", + "print(ligru(inp_tensor).shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bot3cQpGBSbw" + }, + "source": [ + "Note that the output dimension is 48 as we have 12 complex numbers (24 values) times 2 directions (bidirectional RNNs).\n", + "\n", + "## Quaternion Neural Networks\n", + "\n", + "Luckily, QNN within SpeechBrain follow exactly the same formalism. Therefore, you can easily derive any quaternion-valued neural networks from the building blocks given in `speechbrain.nnet.quaternion_networks`:\n", + "- 1D and 2D convolutions.\n", + "- Batch and layer normalisations.\n", + "- Linear and Spinor layers.\n", + "- Recurrent cells (LSTM, LiGRU, RNN).\n", + "\n", + "*According to the litterature, most of the complex and quaternion neural networks rely on split activation functions (any real-valued activation function applied over the complex/quaternion valued signal). For now, SpeechBrain follows this approach and does not offer any fully complex or quaternion activation function*.\n", + "\n", + "Everything we just saw with complex neural networks still hold. Hence we can summarize everything in a single code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ovxi7PdaCf5r" + }, + "outputs": [], + "source": [ + "from speechbrain.nnet.quaternion_networks.q_CNN import QConv1d, QConv2d\n", + "from speechbrain.nnet.quaternion_networks.q_linear import QLinear\n", + "from speechbrain.nnet.quaternion_networks.q_RNN import QLiGRU, QLSTM, QRNN\n", + "\n", + "# [batch, time, features]\n", + "T = torch.rand((8, 10, 40))\n", + "\n", + "# [batch, time, fea, Channel]\n", + "T_4d = torch.rand([10, 16, 30, 40])\n", + "\n", + "# We define our layers and we want 12 quaternion numbers as output (12x4 = 48 output real-values).\n", + "cnn_1d = QConv1d( input_shape=T.shape, out_channels=12, kernel_size=3)\n", + "cnn_2d = QConv2d( input_shape=T_4d.shape, out_channels=12, kernel_size=3)\n", + "\n", + "lin = QLinear(12, input_shape=T.shape, init_criterion='glorot', weight_init='quaternion')\n", + "\n", + "lstm = QLSTM(hidden_size=12, input_shape=T.shape, weight_init='quaternion', bidirectional=True)\n", + "rnn = QRNN(hidden_size=12, input_shape=T.shape, weight_init='quaternion', bidirectional=True)\n", + "ligru = QLiGRU(hidden_size=12, input_shape=T.shape, weight_init='quaternion', bidirectional=True)\n", + "\n", + "print(cnn_1d(T).shape)\n", + "print(cnn_2d(T_4d).shape)\n", + "print(lin(T).shape)\n", + "print(lstm(T)[0].shape) # RNNs return output + hidden so we need to filter !\n", + "print(ligru(T)[0].shape) # RNNs return output + hidden so we need to filter !\n", + "print(rnn(T)[0].shape) # RNNs return output + hidden so we need to filter !\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Q75NZOMhEFv2" + }, + "source": [ + "### Quaternion Spinor Neural Networks\n", + "\n", + "**Introduction:**\n", + "Quaternion Spinor Neural Networks (SNN) represent a specialized category within quaternion-valued neural networks (QNN). As mentioned earlier, quaternions are designed to represent rotations. In QNN layers, the fundamental operation involves the Hamilton product (`inputs x weights`), where inputs and weights are sets of quaternions. This product essentially creates a new rotation equivalent to the composition of the first rotation followed by the second.\n", + "\n", + "**Rotation Composition:**\n", + "Multiplying two quaternions results in a rotation that combines the individual rotations represented by each quaternion. For instance, given `q3 = q1 x q2`, it implies that *q3 is a rotation equivalent to a rotation by q1 followed by a rotation from q2*. In the context of Spinor Neural Networks, this concept is employed to compose new rotations, not to physically rotate objects, but to predict sequential rotations. For example, predicting the next movement of a robot involves using the previous movement (represented as a quaternion) as input to produce a new quaternion as the output, capturing the expected next movement.\n", + "\n", + "**Modeling Rotations with SNN:**\n", + "Spinor Neural Networks (SNN) are specifically designed to model rotations. In scenarios like robotic movements, SNNs take 3D coordinates (x, y, z) of the object before the movement as input and predict its coordinates after the movement as the output.\n", + "\n", + "**Formal Rotation Equation:**\n", + "To achieve this, the standard product in all layers of the network is replaced with the following equation:\n", + "\n", + "\\begin{equation}\n", + "\\vec{v_{output}} = q_{weight} \\vec{v_{input}} q^{-1}_{weight}.\n", + "\\end{equation}\n", + "\n", + "This equation formally defines the rotation of a vector $\\vec{v}$ by a unit quaternion $q_{weight}$ (with a norm of 1), where $q^{-1}$ represents the conjugate of the quaternion. Both left and right products in this equation are Hamilton products.\n", + "\n", + "In summary, Quaternion Spinor Neural Networks are tailored to model rotations, making them particularly suitable for applications where predicting sequential rotations or movements is crucial, such as in robotics or animation.\n", + "\n", + "\n", + "**Ok, so how is this implemented in SpeechBrain?**\n", + "\n", + "In the exact same manner than for the standard Hamilton product! Indeed, such rotation can also be represented as a matrix product:\n", + "\n", + "\\begin{equation}\n", + "\\left(\\begin{array}{ccc}\n", + "a^{2}+b^{2}-c^{2}-d^{2} & 2 b c-2 a d & 2 a c+2 b d \\\\\n", + "2 a d+2 b c & a^{2}-b^{2}+c^{2}-d^{2} & 2 c d-2 a b \\\\\n", + "2 b d-2 a c & 2 a b+2 c d & a^{2}-b^{2}-c^{2}+d^{2}\n", + "\\end{array}\\right).\n", + "\\end{equation}\n", + "\n", + "Hence, we just need to define the `quaternion_op` that follows the same usual process:\n", + "1. Compose a real-valued matrix from the different weight components\n", + "2. Apply a matrix product between the input and this rotation matrix!\n", + "\n", + "[Check the code!](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.nnet.quaternion_networks.q_ops.html#speechbrain.nnet.quaternion_networks.q_ops.quaternion_linear_rotation_op)\n", + "\n", + "### Turning a quaternion layer into a spinor layer\n", + "\n", + "Spinor layer can be activated with a boolean parameter in all quaternion layers.\n", + "Here are a couple of examples:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oioMgs8eIe3K" + }, + "outputs": [], + "source": [ + "from speechbrain.nnet.quaternion_networks.q_CNN import QConv1d\n", + "from speechbrain.nnet.quaternion_networks.q_linear import QLinear\n", + "\n", + "# [batch, time, features]\n", + "T = torch.rand((8, 80, 16))\n", + "\n", + "#\n", + "# NOTE: in this case the real components must be zero as spinor neural networks\n", + "# only input and output 3D vectors ! We don't do it here for the sake of compactness\n", + "#\n", + "\n", + "# We define our layers and we want 12 quaternion numbers as output (12x4 = 48 output real-values).\n", + "cnn_1d = QConv1d( input_shape=T.shape, out_channels=12, kernel_size=3, spinor=True, vector_scale=True)\n", + "lin = QLinear(12, input_shape=T.shape, spinor=True, vector_scale=True)\n", + "\n", + "print(cnn_1d(T).shape)\n", + "print(lin(T).shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fVZm7xqFLkPF" + }, + "source": [ + "Two remarks on Spinor layers:\n", + "1. We need to set a vector_scale to train deep models. The vector scale is just an other set torch.Parameters that will scale down the output of each Spinor layers. Indeed, the output of a SNN layer is a set of 3D vectors that are the sum of rotated 3D vectors. Quaternion rotations do not affect the magnitude of the rotated vector. Hence, by summing over and over rotated 3D vectors, we might end up very quickly with very large values (i.e the training will explode).\n", + "2. You might consider to use `weight_init='unitary'`. Indeed, quaternion rotations are valid only if the considered quaternion is unitary. Therefore, starting with unitary weights may facilitate the learning phase!\n", + "\n", + "## Putting everyting together!\n", + "\n", + "We provide a minimal example for both complex and quaternion neural networks:\n", + "- `speechbrain/tests/integration/ASR_CTC/example_asr_ctc_experiment_complex_net.yaml`.\n", + "- `speechbrain/tests/integration/ASR_CTC/example_asr_ctc_experiment_quaternion_net.yaml`.\n", + "\n", + "If we take a look at one of these YAML params file, we can easily distinguish how to build our model out of the different blocks!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "RGU7t6oeM2ox" + }, + "outputs": [], + "source": [ + "yaml_params = \"\"\"\n", + "model: !new:speechbrain.nnet.containers.Sequential\n", + " input_shape: [!ref , null, 660] # input_size\n", + " conv1: !name:speechbrain.nnet.quaternion_networks.q_CNN.QConv1d\n", + " out_channels: 16\n", + " kernel_size: 3\n", + " act1: !ref \n", + " conv2: !name:speechbrain.nnet.quaternion_networks.q_CNN.QConv1d\n", + " out_channels: 32\n", + " kernel_size: 3\n", + " nrm2: !name:speechbrain.nnet.quaternion_networks.q_CNN.QConv1d\n", + " act2: !ref \n", + " pooling: !new:speechbrain.nnet.pooling.Pooling1d\n", + " pool_type: \"avg\"\n", + " kernel_size: 3\n", + " RNN: !name:speechbrain.nnet.quaternion_networks.q_RNN.QLiGRU\n", + " hidden_size: 64\n", + " bidirectional: True\n", + " linear: !name:speechbrain.nnet.linear.Linear\n", + " n_neurons: 43 # 42 phonemes + 1 blank\n", + " bias: False\n", + " softmax: !new:speechbrain.nnet.activations.Softmax\n", + " apply_log: True\n", + " \"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UCbAmc8ONIhH" + }, + "source": [ + "Here, we have a very basic quaternion-valued CNN-LiGRU model that can be used to perform end-to-end CTC ASR!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "29vJJu--NH52" + }, + "outputs": [], + "source": [ + "%cd /content/speechbrain/tests/integration/ASR_CTC/\n", + "!python example_asr_ctc_experiment.py example_asr_ctc_experiment_quaternion_net.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/nn/conformer-streaming-asr.ipynb b/docs/tutorials/nn/conformer-streaming-asr.ipynb new file mode 100644 index 0000000000..0d7f7ddee6 --- /dev/null +++ b/docs/tutorials/nn/conformer-streaming-asr.ipynb @@ -0,0 +1,1628 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7fc69f2f", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/nn/conformer-streaming-asr.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/nn/conformer-streaming-asr.ipynb)" + ] + }, + { + "cell_type": "markdown", + "id": "3e9ea69e-e1eb-4cea-8426-2daefa29d902", + "metadata": {}, + "source": [ + "# Streaming Speech Recognition with Conformers\n", + "\n", + "Automatic Speech Recognition (ASR) models are often only designed to transcribe an entire large chunk of audio and are unsuitable for usecases like live stream transcription, which requires low-latency, long-form transcription.\n", + "\n", + "This tutorial introduces the Dynamic Chunk Training approach and architectural changes you can apply to make the Conformer model streamable. It introduces the tooling for training and inference that SpeechBrain can provide for you.\n", + "This might be a good starting point if you're interested in training and understanding your own streaming models, or even if you want to explore improved streaming architectures.\n", + "\n", + "This tutorial goes very in-depth with the implementation. It may be okay to skim through depending on your goals.\n", + "\n", + "The model and training procedure described here are not state-of-the-art, but it is a reasonably good and modern end-to-end approach. It has successfully been applied as the following recipes (non-exhaustive list):\n", + "\n", + "- [`LibriSpeech/ASR/transducer`](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/ASR/transducer) ([pre-trained on HuggingFace](https://huggingface.co/speechbrain/asr-streaming-conformer-librispeech))\n", + "- [`VoxPopuli/ASR/transducer`](https://github.com/speechbrain/speechbrain/tree/develop/recipes/VoxPopuli/ASR/transducer)\n", + "- [`CommonVoice/ASR/transducer`](https://github.com/speechbrain/speechbrain/tree/develop/recipes/CommonVoice/ASR/transducer) (French, Italian)\n", + "\n", + "## Recommended prerequisites\n", + "\n", + "- [Speech Recognition From Scratch](https://speechbrain.readthedocs.io/en/latest/tutorials/tasks/speech-recognition-from-scratch.html)" + ] + }, + { + "cell_type": "markdown", + "id": "27f8fc9c-3e55-455e-8803-415bd710174a", + "metadata": {}, + "source": [ + "### Installing SpeechBrain" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "266d75ba-482d-4504-855e-92b3170f218c", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f43f965c-fb08-4542-a7f4-4f30ebc38ca9", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "%pip install matplotlib" + ] + }, + { + "cell_type": "markdown", + "id": "33ab0dab-527a-4b2d-96e9-b0dea7afca35", + "metadata": {}, + "source": [ + "## What a streaming model needs to achieve\n", + "\n", + "We need a fine-grained way to restrict and remember context so that the model attends only on **recent context** and **not future frames**. That strategy must somehow be **consistent between training and inference**.\n", + "\n", + "Traditional models might have the luxury of reusing the same forward code path for both training and inference. Training and inference when streaming kind of have opposite performance characteristics, and this results in special casing in some layers.\n", + "\n", + "- For inference, we generally need to process outputs chunk-by-chunk as they come in, and this usually implies caching some past hidden state at different layers.\n", + "- For training, we prefer to pass in a large batch of whole utterances, once, to maximize GPU occupancy and lower Python and CUDA kernel launching overhead. Thus, we would prefer to enforce these restrictions by masking.\n", + "\n", + "### Summary of the tutorial\n", + "\n", + "This tutorial attempts to split up the theory and practice in separate sections. This is a summary of each of the sections:\n", + "\n", + "1. Introduce architectural changes to the Conformer model. We will discuss:\n", + " - How we solve future dependencies of the self-attention mechanism with chunked attention masking.\n", + " - How we solve future dependencies of the convolution module with Dynamic Chunk Convolutions.\n", + " - Why we can avoid changing the feature extractor and positional embeddings for training.\n", + "2. Explain the Dynamic Chunk Training strategy for training. We will discuss:\n", + " - How to train the model to support various chunk sizes and left context sizes to be selected at runtime.\n", + " - The consequences of changing the chunk size and left context size.\n", + " - The implications of different loss functions for streaming model training.\n", + "3. List the actual changes required to a train a streaming Conformer in SpeechBrain.\n", + "4. Explain how to debug neural layers to ensure correct streaming behavior.\n", + "5. Introduce all the parts involved in streaming inference. We will:\n", + " - Introduce the wrapper to adapt a non-streaming feature extractor into a streaming one.\n", + " - Explain the streaming context object architecture and streaming forward methods.\n", + " - List misceallenous other changes to be done to the model.\n", + "6. Give a practical introduction to inference tools in SpeechBrain. We will:\n", + " - Demonstrate how to make a trained streaming-capable model ready for streaming inference.\n", + " - Provide complete examples of the `StreamingASR` inference interface for stream or file processing." + ] + }, + { + "cell_type": "markdown", + "id": "773b199e-1c6a-4e2c-8b4b-1f379799b2aa", + "metadata": {}, + "source": [ + "## Architectural Changes to the Conformer" + ] + }, + { + "cell_type": "markdown", + "id": "28372287-c508-4d12-a280-34fa6bf905da", + "metadata": {}, + "source": [ + "\"Simplified\n", + "\n", + "The above is a (very) simplified diagram of the vanilla Conformer architecture as used in one of our models, to read from top to bottom. \n", + "Colored bricks are the ones that require us to be careful when streaming, as they propagate information across time steps." + ] + }, + { + "cell_type": "markdown", + "id": "d49278db-9cdf-4096-9599-fb25d4b4d441", + "metadata": {}, + "source": [ + "### Chunked Attention\n", + "\n", + "#### What is Causal Attention?\n", + "\n", + "If you are familiar with Transformer architectures, you may be familiar with causal attention. In a nutshell, causal attention makes it so that the output frame at timestep $t$ cannot attend to an input from a \"future\" timestep ($t+1$, $t+2$, etc.).\n", + "\n", + "This directly implies that if you want to predict the output of the model at timestep $t$, you only need \"current\" and \"past\" inputs ($t$, $t-1$, $t-2$, etc.). This is important to us, because we _don't_ know the future frames!\n", + "\n", + "Causal attention is very simple to apply (naively), and actually fits the bill for streaming ASR... But we won't use it here.\n", + "\n", + "#### What is Chunked Attention, and why do we prefer it?\n", + "\n", + "Causal attention is simple to implement, but for streaming ASR, it is found to disproportionately degrade the Word Error Rate.\n", + "For this reason, chunked attention is often chosen in streaming attentive models instead.\n", + "\n", + "Conceptually, chunked attention introduces the idea of _chunks_ that group a given amount of frames (`chunk_size`). For example, if you were to have a chunk size of 4, then you would look at your input like this:" + ] + }, + { + "cell_type": "markdown", + "id": "4655bd01-8acd-48a5-b2f4-352cc6e62e98", + "metadata": {}, + "source": [ + "\"Chunking" + ] + }, + { + "cell_type": "markdown", + "id": "0e147465-b395-4fe1-ac38-2e9d1dddb34a", + "metadata": {}, + "source": [ + "**Frames within a chunk can attend to each other**. This retains more of the expressive power of attention compared to causal attention. \n", + "Chunks can also attend to past chunks, but we **limit how far into the past** to reduce the computational and memory cost at inference time (`left_context_chunks`).\n", + "\n", + "At training time, we enforce this using an **attention mask**. An attention mask answers the question: Can the `j`-th output frame attend to the `i`-th input frame? \n", + "As such, it is defined as a boolean tensor defined with a shape of `(t, t)`. Here follows an example of one (although the actual mask is a transposition of this):" + ] + }, + { + "cell_type": "markdown", + "id": "1bd674a3-7a10-4908-b0a7-b92862a174ff", + "metadata": {}, + "source": [ + "\"Chunked" + ] + }, + { + "cell_type": "markdown", + "id": "f2aebfdc-2833-4a54-aa12-f5bdf9693b84", + "metadata": {}, + "source": [ + "In fact, we can rather easily reproduce this exact mask. Note that we are transposing the mask for display, and that here, a `True` (red) means **masking**, and `False` (blue) means **not masking**:" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "id": "cbd33073-55bf-4888-99ff-5cbe7d4499e3", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaAAAAGdCAYAAABU0qcqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAbCUlEQVR4nO3df2zUhf3H8dfR0qMh7UnraLlxhc4QUUDGLBBg2WZoJITg2KJsBLGDZImm/Cg1BNhSyCJQwc0voqQIfyDJAPUPi46EEVYrSORHodZJNgvEBjtIqSZ6HyjhJL3P9w/HzUp/yuf6vrs+H8kn4T73uX7eabh75nP3uU99ruu6AgCgnw2yHgAAMDARIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYCLdeoDvikajunLlirKysuTz+azHAQD0keu6unbtmoLBoAYN6vo4J+ECdOXKFYVCIesxAAB3qbm5WSNHjuzy/oQLUFZW1n//1Swp23IUIG7CCliPAMSNIymkb7+edy7hAvS/t92yRYCQqvifjYGgp49ROAkBAGCCAAEATBAgAIAJAgQAMBG3AG3fvl2jR4/WkCFDNHXqVJ0+fTpeuwIAJKG4BOiNN95QeXm51q9fr/r6ek2cOFGzZs1Sa2trPHYHAEhCvnj8Se6pU6dq8uTJeuWVVyR9c3WDUCikZcuWac2aNd0+1nEcBQIBSWFxsipSlSuu8oHU5UgKSAqHw8rO7vp13PMjoK+//lpnz55VcXHx/3YyaJCKi4t14sSJO7aPRCJyHKfDAgBIfZ4H6IsvvlB7e7vy8vI6rM/Ly1NLS8sd21dWVioQCMQWLsMDAAOD+Vlwa9euVTgcji3Nzc3WIwEA+oHnl+K59957lZaWpqtXr3ZYf/XqVeXn59+xvd/vl9/v93oMAECC8/wIKCMjQw8//LBqampi66LRqGpqajRt2jSvdwcASFJxuRhpeXm5SkpKVFRUpClTpmjr1q1qa2vT4sWL47E7AEASikuAfvOb3+jzzz/XunXr1NLSoh//+Mf6+9//fseJCQCAgSsu3wO6G3wPCAMB3wNCKjP7HhAAAL1BgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEwQIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADAhOcBqqys1OTJk5WVlaXhw4dr3rx5amxs9Ho3AIAk53mAjh49qtLSUp08eVJHjhzRrVu39Oijj6qtrc3rXQEAkpjPdV03njv4/PPPNXz4cB09elQ/+9nPetzecRwFAgFJYUnZ8RwNMOPKZz0CEDeOpICkcDis7OyuX8fT4z1IOByWJOXk5HR6fyQSUSQSid12HCfeIwEAEkBcT0KIRqMqKyvTjBkzNH78+E63qaysVCAQiC2hUCieIwEAEkRc34J75plndOjQIR0/flwjR47sdJvOjoC+iRBvwSF18RYcUpn5W3BLly7VwYMHdezYsS7jI0l+v19+vz9eYwAAEpTnAXJdV8uWLVN1dbXee+89FRYWer0LAEAK8DxApaWl2rdvn95++21lZWWppaVFkhQIBJSZmen17gAAScrzz4B8vs7f2969e7d+97vf9fh4TsPGQMBnQEhlZp8BxflrRQCAFMG14AAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEwQIACACQIEADBBgAAAJggQAMAEAQIAmEi3HqArYQWUbT3EAOGTaz3CgMPvvH+58lmPgE5wBAQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEzEPUDPP/+8fD6fysrK4r0rAEASiWuA6urq9Oqrr+qhhx6K524AAEkobgG6fv26Fi5cqF27dmnYsGHx2g0AIEnFLUClpaWaM2eOiouL47ULAEASi8u14F5//XXV19errq6ux20jkYgikUjstuM48RgJAJBgPD8Cam5u1ooVK7R3714NGTKkx+0rKysVCARiSygU8nokAEAC8rmu6+lleQ8cOKBf/epXSktLi61rb2+Xz+fToEGDFIlEOtzX2RFQKBRSWOJq2P2EKzMj1XE17P7lSApICofDys7u+pXc87fgZs6cqY8//rjDusWLF2vs2LFavXp1h/hIkt/vl9/v93oMAECC8zxAWVlZGj9+fId1Q4cOVW5u7h3rAQADF1dCAACY6Je/iPree+/1x24AAEmEIyAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEwQIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwERcAnT58mU9+eSTys3NVWZmpiZMmKAzZ87EY1cAgCSV7vUP/PLLLzVjxgw98sgjOnTokH7wgx/owoULGjZsmNe7AgAkMc8DtHnzZoVCIe3evTu2rrCw0OvdAACSnOdvwb3zzjsqKirSE088oeHDh2vSpEnatWtXl9tHIhE5jtNhAQCkPs8D9Omnn6qqqkpjxozR4cOH9cwzz2j58uXas2dPp9tXVlYqEAjEllAo5PVIAIAE5HNd1/XyB2ZkZKioqEgffPBBbN3y5ctVV1enEydO3LF9JBJRJBKJ3XYcR6FQSGFJ2V4Ohi755Ol/ASDhuPJZjzCgOJICksLhsLKzu34l9/wIaMSIEXrwwQc7rHvggQf02Wefdbq93+9XdnZ2hwUAkPo8D9CMGTPU2NjYYd358+c1atQor3cFAEhingdo5cqVOnnypDZt2qSLFy9q37592rlzp0pLS73eFQAgiXn+GZAkHTx4UGvXrtWFCxdUWFio8vJy/f73v+/VYx3HUSAQ4DOgfsRnQEh1fAbUv3r7GVBcAnQ3CFD/I0BIdQSof5mdhAAAQG8QIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEwQIACACQIEADCRbj0A7LnyWY8w4PjkWo8woPD77m+OpECPW3EEBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATHgeoPb2dlVUVKiwsFCZmZm677779Nxzz8l1OQ8fAPA/nn8RdfPmzaqqqtKePXs0btw4nTlzRosXL1YgENDy5cu93h0AIEl5HqAPPvhAv/zlLzVnzhxJ0ujRo7V//36dPn3a610BAJKY52/BTZ8+XTU1NTp//rwk6aOPPtLx48c1e/bsTrePRCJyHKfDAgBIfZ4fAa1Zs0aO42js2LFKS0tTe3u7Nm7cqIULF3a6fWVlpf70pz95PQYAIMF5fgT05ptvau/evdq3b5/q6+u1Z88e/fnPf9aePXs63X7t2rUKh8Oxpbm52euRAAAJyOd6fHpaKBTSmjVrVFpaGlu3YcMG/fWvf9Unn3zS4+Mdx1EgEFBYUraXgwEJhKszI7V9czXscDis7OyuX8k9PwK6ceOGBg3q+GPT0tIUjUa93hUAIIl5/hnQ3LlztXHjRhUUFGjcuHH68MMP9eKLL2rJkiVe7woAkMQ8fwvu2rVrqqioUHV1tVpbWxUMBrVgwQKtW7dOGRkZPT6et+AwEPAWHFJb796C8zxAd4sAYSAgQEhtRp8BAQDQGwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEwQIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEz0OUDHjh3T3LlzFQwG5fP5dODAgQ73u66rdevWacSIEcrMzFRxcbEuXLjg1bwAgBTR5wC1tbVp4sSJ2r59e6f3b9myRdu2bdOOHTt06tQpDR06VLNmzdLNmzfvelgAQApx74Ikt7q6OnY7Go26+fn57gsvvBBb99VXX7l+v9/dv39/r35mOBx2JblhyXVZWFJ0SYARWFjiuPz3dTwc7vb13tPPgJqamtTS0qLi4uLYukAgoKlTp+rEiROdPiYSichxnA4LACD1eRqglpYWSVJeXl6H9Xl5ebH7vquyslKBQCC2hEIhL0cCACQo87Pg1q5dq3A4HFuam5utRwIA9ANPA5Sfny9Junr1aof1V69ejd33XX6/X9nZ2R0WAEDq8zRAhYWFys/PV01NTWyd4zg6deqUpk2b5uWuAABJLr2vD7h+/bouXrwYu93U1KSGhgbl5OSooKBAZWVl2rBhg8aMGaPCwkJVVFQoGAxq3rx5Xs4NAEh2fT31ura21pV0x1JSUhI7FbuiosLNy8tz/X6/O3PmTLexsbHXP5/TsFkGwpIAI7CwxHHp3WnYPtd1XcP+3cFxHAUCAYUl8WkQUpVPCfW0AzzmSAooHA53+7m++VlwAICBiQABAEwQIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEwQIACAiXTrAYCByJXPeoQBxSfXegR0giMgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgos8BOnbsmObOnatgMCifz6cDBw7E7rt165ZWr16tCRMmaOjQoQoGg3rqqad05coVL2cGAKSAPgeora1NEydO1Pbt2++478aNG6qvr1dFRYXq6+v11ltvqbGxUY899pgnwwIAUofPdd3v/RVhn8+n6upqzZs3r8tt6urqNGXKFF26dEkFBQU9/kzHcRQIBBSWlP19BwOAb+FKCP3NkRRQOBxWdnbXr+RxvxRPOByWz+fTPffc0+n9kUhEkUgkdttxnHiPBABIAHE9CeHmzZtavXq1FixY0GUFKysrFQgEYksoFIrnSACABBG3AN26dUvz58+X67qqqqrqcru1a9cqHA7Hlubm5niNBABIIHF5C+52fC5duqR333232/cA/X6//H5/PMYAACQwzwN0Oz4XLlxQbW2tcnNzvd4FACAF9DlA169f18WLF2O3m5qa1NDQoJycHI0YMUKPP/646uvrdfDgQbW3t6ulpUWSlJOTo4yMDO8mBwAkN7ePamtrXUl3LCUlJW5TU1On90lya2tre/Xzw+GwK8kNS67LwsLC4sGSACMMsOW/r+PhcLev930+AvrFL36h7r46dBdfKwIADCBcCw4AYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEwQIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAICJPgfo2LFjmjt3roLBoHw+nw4cONDltk8//bR8Pp+2bt16FyMCAFJRnwPU1tamiRMnavv27d1uV11drZMnTyoYDH7v4QAAqSu9rw+YPXu2Zs+e3e02ly9f1rJly3T48GHNmTPnew8HAEhdnn8GFI1GtWjRIq1atUrjxo3z+scDAFJEn4+AerJ582alp6dr+fLlvdo+EokoEonEbjuO4/VIAIAE5OkR0NmzZ/XSSy/ptddek8/n69VjKisrFQgEYksoFPJyJABAgvI0QO+//75aW1tVUFCg9PR0paen69KlS3r22Wc1evToTh+zdu1ahcPh2NLc3OzlSACABOXpW3CLFi1ScXFxh3WzZs3SokWLtHjx4k4f4/f75ff7vRwDAJAE+hyg69ev6+LFi7HbTU1NamhoUE5OjgoKCpSbm9th+8GDBys/P1/333//3U8LAEgZfQ7QmTNn9Mgjj8Rul5eXS5JKSkr02muveTYYACC1+VzXda2H+DbHcRQIBBSWlG09DICU4FNCvcwNAI6kgMLhsLKzu34l51pwAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACYIEADABAECAJggQAAAEwQIAGCCAAEATBAgAIAJAgQAMEGAAAAmCBAAwAQBAgCYIEAAABMECABgggABAEwQIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMpFsP8F2u60qSHOM5AKQSXlH61ze/79uv511JuABdu3ZNkhQyngNAKglYDzAgXbt2TYFA1797n9tTovpZNBrVlStXlJWVJZ/P1+P2juMoFAqpublZ2dnZ/TChN5i7fyXr3FLyzs7c/SuR5nZdV9euXVMwGNSgQV1/0pNwR0CDBg3SyJEj+/y47Oxs81/698Hc/StZ55aSd3bm7l+JMnd3Rz63cRICAMAEAQIAmEj6APn9fq1fv15+v996lD5h7v6VrHNLyTs7c/evZJw74U5CAAAMDEl/BAQASE4ECABgggABAEwQIACAiaQO0Pbt2zV69GgNGTJEU6dO1enTp61H6lFlZaUmT56srKwsDR8+XPPmzVNjY6P1WH32/PPPy+fzqayszHqUHl2+fFlPPvmkcnNzlZmZqQkTJujMmTPWY3Wrvb1dFRUVKiwsVGZmpu677z4999xzPV5by8KxY8c0d+5cBYNB+Xw+HThwoMP9rutq3bp1GjFihDIzM1VcXKwLFy7YDPst3c1969YtrV69WhMmTNDQoUMVDAb11FNP6cqVK3YD/1dPv+9ve/rpp+Xz+bR169Z+m68vkjZAb7zxhsrLy7V+/XrV19dr4sSJmjVrllpbW61H69bRo0dVWlqqkydP6siRI7p165YeffRRtbW1WY/Wa3V1dXr11Vf10EMPWY/Soy+//FIzZszQ4MGDdejQIf3rX//SX/7yFw0bNsx6tG5t3rxZVVVVeuWVV/Tvf/9bmzdv1pYtW/Tyyy9bj3aHtrY2TZw4Udu3b+/0/i1btmjbtm3asWOHTp06paFDh2rWrFm6efNmP0/aUXdz37hxQ/X19aqoqFB9fb3eeustNTY26rHHHjOYtKOeft+3VVdX6+TJkwoGg/002ffgJqkpU6a4paWlsdvt7e1uMBh0KysrDafqu9bWVleSe/ToUetReuXatWvumDFj3CNHjrg///nP3RUrVliP1K3Vq1e7P/3pT63H6LM5c+a4S5Ys6bDu17/+tbtw4UKjiXpHkltdXR27HY1G3fz8fPeFF16Irfvqq69cv9/v7t+/32DCzn137s6cPn3aleReunSpf4bqha7m/s9//uP+8Ic/dM+dO+eOGjXK/b//+79+n603kvII6Ouvv9bZs2dVXFwcWzdo0CAVFxfrxIkThpP1XTgcliTl5OQYT9I7paWlmjNnTofffSJ75513VFRUpCeeeELDhw/XpEmTtGvXLuuxejR9+nTV1NTo/PnzkqSPPvpIx48f1+zZs40n65umpia1tLR0+P8SCAQ0derUpHyu+nw+3XPPPdajdCsajWrRokVatWqVxo0bZz1OtxLuYqS98cUXX6i9vV15eXkd1ufl5emTTz4xmqrvotGoysrKNGPGDI0fP956nB69/vrrqq+vV11dnfUovfbpp5+qqqpK5eXl+sMf/qC6ujotX75cGRkZKikpsR6vS2vWrJHjOBo7dqzS0tLU3t6ujRs3auHChdaj9UlLS4skdfpcvX1fMrh586ZWr16tBQsWJMSFPruzefNmpaena/ny5daj9CgpA5QqSktLde7cOR0/ftx6lB41NzdrxYoVOnLkiIYMGWI9Tq9Fo1EVFRVp06ZNkqRJkybp3Llz2rFjR0IH6M0339TevXu1b98+jRs3Tg0NDSorK1MwGEzouVPRrVu3NH/+fLmuq6qqKutxunX27Fm99NJLqq+v79Wfs7GWlG/B3XvvvUpLS9PVq1c7rL969ary8/ONpuqbpUuX6uDBg6qtrf1ef36iv509e1atra36yU9+ovT0dKWnp+vo0aPatm2b0tPT1d7ebj1ip0aMGKEHH3yww7oHHnhAn332mdFEvbNq1SqtWbNGv/3tbzVhwgQtWrRIK1euVGVlpfVofXL7+Zisz9Xb8bl06ZKOHDmS8Ec/77//vlpbW1VQUBB7nl66dEnPPvusRo8ebT3eHZIyQBkZGXr44YdVU1MTWxeNRlVTU6Np06YZTtYz13W1dOlSVVdX691331VhYaH1SL0yc+ZMffzxx2poaIgtRUVFWrhwoRoaGpSWlmY9YqdmzJhxx2nu58+f16hRo4wm6p0bN27c8Ye80tLSFI1GjSb6fgoLC5Wfn9/hueo4jk6dOpXwz9Xb8blw4YL+8Y9/KDc313qkHi1atEj//Oc/OzxPg8GgVq1apcOHD1uPd4ekfQuuvLxcJSUlKioq0pQpU7R161a1tbVp8eLF1qN1q7S0VPv27dPbb7+trKys2PvggUBAmZmZxtN1LSsr647PqYYOHarc3NyE/vxq5cqVmj59ujZt2qT58+fr9OnT2rlzp3bu3Gk9Wrfmzp2rjRs3qqCgQOPGjdOHH36oF198UUuWLLEe7Q7Xr1/XxYsXY7ebmprU0NCgnJwcFRQUqKysTBs2bNCYMWNUWFioiooKBYNBzZs3z25odT/3iBEj9Pjjj6u+vl4HDx5Ue3t77Lmak5OjjIwMq7F7/H1/N5SDBw9Wfn6+7r///v4etWfWp+HdjZdfftktKChwMzIy3ClTprgnT560HqlHkjpddu/ebT1anyXDadiu67p/+9vf3PHjx7t+v98dO3asu3PnTuuReuQ4jrtixQq3oKDAHTJkiPujH/3I/eMf/+hGIhHr0e5QW1vb6f/pkpIS13W/ORW7oqLCzcvLc/1+vztz5ky3sbHRdmi3+7mbmpq6fK7W1tYm7NydSeTTsPlzDAAAE0n5GRAAIPkRIACACQIEADBBgAAAJggQAMAEAQIAmCBAAAATBAgAYIIAAQBMECAAgAkCBAAwQYAAACb+HxRNXGPS6nghAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from speechbrain.lobes.models.transformer.TransformerASR import make_transformer_src_mask\n", + "from speechbrain.utils.dynamic_chunk_training import DynChunkTrainConfig\n", + "from matplotlib import pyplot as plt\n", + "import torch\n", + "\n", + "# dummy batch size, 16 sequence length, 128 sized embedding\n", + "chunk_streaming_mask = make_transformer_src_mask(torch.empty(1, 16, 128), dynchunktrain_config=DynChunkTrainConfig(4, 1))\n", + "plt.imshow(chunk_streaming_mask.T, cmap=\"bwr\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "72b0284b-5bab-4ff3-9bbd-eb246329d64b", + "metadata": {}, + "source": [ + "#### Chunked Attention during inference\n", + "\n", + "When designing streaming models, we need to be very careful about how dependencies between output frames and input frames propagate across layers.\n", + "\n", + "For instance, thinking back about _causal_ attention, you might wonder if we could have regained some accuracy by allowing the output frame at timestep $t$ to attend to an input frame at timestep $t+1$, i.e. giving it some \"right\"/future context at every layer. \n", + "_Yes,_ we _could_, and it does help somewhat, but consider the implications when stacking layers! For instance, consider two layers of attention, where $a$ is the input, $b$ is the output of the first layer, and $c$ is the output of the second layer: $c_t$ will attend to $b_{t+1}$ (among others), which itself will attend to $a_{t+2}$. This gets worse in practice, when we might have something like 12 or 17 layers. \n", + "This is cumbersome, and will likely have a negative effect on latency (we'd need to buffer a bunch of \"future\" frames) and memory/computational cost.\n", + "\n", + "On the other hand, chunk attention plays very nicely with this. **Let's ignore left context first.** The following example focuses on the 4th chunk of the input, and on which frames effectively depend on/attend to which:" + ] + }, + { + "cell_type": "markdown", + "id": "58e272dd-e23d-4bec-842e-952eb5a1b522", + "metadata": {}, + "source": [ + "\"Chunked" + ] + }, + { + "cell_type": "markdown", + "id": "699805b0-0f06-4dde-bce0-061c64e8bcee", + "metadata": {}, + "source": [ + "Ignoring left context, frames within one chunk may attend to each other. If you stack attention layers, the **boundaries of chunks remain the same across layers**.\n", + "\n", + "Now, let's add left context. In the following example, we will assume a left context size of 1 chunk. For clarity, we omit the connections for 12,13,14 but they attend to the _same frames_ as 15." + ] + }, + { + "cell_type": "markdown", + "id": "0039438a-e2e4-4405-a7e9-7f1f7f2e6747", + "metadata": {}, + "source": [ + "\"Chunked" + ] + }, + { + "cell_type": "markdown", + "id": "0ee8cdb3-eb5b-44d1-90d8-99eb36cbf38a", + "metadata": {}, + "source": [ + "> _Wait, shouldn't this mean that the outputs 12,13,14,15 of `Layer #2` need us to remember the embeddings for inputs 4,5,6,7?_\n", + "\n", + "No! The 12,13,14,15 chunk at `Layer #2` does indeed depend on 8,9,10,11 of `Layer #1`, which itself depends on 4,5,6,7 of `Inputs`. \n", + "However, the hidden state of 8,9,10,11 at `Layer #1` isn't at all affected by our red chunk! Thus, when inferring, we can cache however many left context chunks we want to use, and the amount of things we have to cache/recompute doesn't explode with the number of layers we have.\n", + "\n", + "[`speechbrain.lobes.models.transformer.TransformerASR.make_transformer_src_mask`](https://speechbrain.readthedocs.io/en/latest/_modules/speechbrain/lobes/models/transformer/TransformerASR.html#make_transformer_src_mask) is the function that generates these masks.\n", + "\n", + "> _How does that work out at inference time?_\n", + "\n", + "Left context is defined so that a given chunk $i$ can attend to `left_context_chunks` chunks, i.e. that all output frames within chunk $i$ can attend to all frames within the past `left_context_chunks` chunks. \n", + "Ultimately, this design allows us to define the math for processing attention for a given input chunk at inference time to something that looks like this:\n", + "\n", + "```python\n", + "attention_module(concat(cached_left_chunks, input_chunk))\n", + "```\n", + "\n", + "Ignoring KV caching, here, `cached_left_chunks` ends up being, _for each layer_, a tensor of size `(batch_size, left_context_chunks * chunk_size, emb_dim)`. This is fairly reasonable, and it is the only thing we have to save at inference time for the attention part." + ] + }, + { + "cell_type": "markdown", + "id": "d3e37b4b-5561-4761-88bc-928a005c4aa1", + "metadata": {}, + "source": [ + "### Dynamic Chunk Convolutions" + ] + }, + { + "cell_type": "markdown", + "id": "1e6a10af-610e-4873-a049-54ad7680cf3f", + "metadata": {}, + "source": [ + "#### Vanilla Convolutions\n", + "\n", + "\"Vanilla\n", + "\n", + "Credit: Xilai Li et al, 2023 (Dynamic Chunk Convolution paper)
\n", + "Example with a convolution $k=5$, meaning that \"half a window\", $\\frac{k-1}{2}$, is $2$
\n", + "\n", + "Vanilla convolutions operate over windows, which, for the convolution output at timestep $t$, spans indices from $t-\\frac{k-1}{2}$ to $t+\\frac{k-1}{2}$, where $k$ is the kernel size. Thus, the output at timestep $t$ will depend on future frames, which we want to avoid. \n", + "We could pretend to ignore the problem by training normally, and at inference, right-pad frames we don't know as zeros (see figure). This would, however, be a major mismatch between training and inference, and would harm accuracy significantly.\n", + "\n", + "#### Causal Convolutions\n", + "\n", + "\"Causal\n", + "\n", + "There _is_ a direct solution: Causal convolutions. They merely shift the window for output $t$ to instead cover indices from $t-(k-1)$ to $t$. \n", + "The math for that is very simple: You just need to pad the input to the left by $\\frac{k-1}{2}$ frames, pass it to the convolution, and truncate these $\\frac{k-1}{2}$ output frames on the left.\n", + "\n", + "#### Dynamic Chunk Convolution\n", + "\n", + "\"Dynamic\n", + "\n", + "Unfortunately, causal convolutions result in worse accuracy. To remedy this, Xilai Li et al, 2023 introduces the idea of _Dynamic Chunk Convolutions_ for streaming chunked ASR.\n", + "\n", + "With this, we reuse the same chunk boundaries we used for chunked attention. \n", + "In the above figure, consider frame $T_{15}$: It looks a lot like a vanilla convolution, except that **any input frame that belongs to a future _chunk_ is masked off**. This solves our problem of depending on future frames. \n", + "\n", + "Note how the leftmost output of the example chunk, $T_0$, depends on $\\frac{k-1}{2}$ past frames: At inference time, we will need to cache at this, at each layer. This is reasonably lightweight, though.\n", + "\n", + "The implementation for this at training time is actually far from obvious, because PyTorch convolution operators cannot merely take in a mask similar to the self-attention mask we used. If you are feeling adventurous, you can read the source code for [`speechbrain.lobes.models.transformer.Conformer.ConvolutionModule`](https://speechbrain.readthedocs.io/en/latest/_modules/speechbrain/lobes/models/transformer/Conformer.html#ConvolutionModule), which is a pile of (commented and illustrated) tensor reshaping magic." + ] + }, + { + "cell_type": "markdown", + "id": "4e1f2ec2-dd40-4f70-acf2-993e91ded8c9", + "metadata": {}, + "source": [ + "### What we _aren't_ changing\n", + "\n", + "Some parts of the architecture don't really matter and don't require any special care for streaming, because they don't propagate information between frames (i.e. they are only pointwise). \n", + "Some, on the other hand, need some explanation as to why they do or don't matter.\n", + "\n", + "#### Feature Extraction\n", + "\n", + "As implemented in SpeechBrain, the feature extractor for the Conformer is _not_ causal. This would normally be a concern for streaming, but we are leaving it unchanged in training. What gives?\n", + "\n", + "It turns out that the feature extraction **does not really need much right context** (i.e. to see many future frames). We _can_ afford to introduce the idea of some right context for this, as it represents speech in the order of milliseconds anyway. This kind of simplifies the whole ordeal, and gives more flexibility for poking around the feature extractor.\n", + "\n", + "SpeechBrain provides a wrapper, [`speechbrain.lobes.features.StreamingFeatureWrapper`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.features.html#speechbrain.lobes.features.StreamingFeatureWrapper), which abstracts this for you almost entirely, by automatically padding and caching context. It still needs to be told the characteristics of the feature extractor, which we will expand on later on.\n", + "\n", + "Normalization is another part of the feature extractor that was not edited. This actually creates a discrepancy between training and test, but we found it to be minimal, even between full audio normalization and per-chunk normalization. It is thus pretty much ignored, although you could give it more care.\n", + "\n", + "#### Positional embeddings\n", + "\n", + "We won't go into very detailed explanations of positional embeddings here, even though they play a major role in model accuracy in ASR. What is important to know is that they enrich the attention mechanism with position information. Otherwise, the model would lack information of where tokens are relative to each other.\n", + "\n", + "Luckily, we are using a model that we define in SpeechBrain to use relative positional sinusoidal encodings ([`speechbrain.nnet.attention.RelPosEncXL`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.nnet.attention.html#speechbrain.nnet.attention.RelPosEncXL)). We will underline why this is useful below.\n", + "\n", + "In self-attention, any _query_ can attend to any _key_ (as long as that _query_/_key_ pair is not masked, as we do need for chunked attention). \n", + "\n", + "- Without a positional embedding, the attention mechanism would ignore where the _query_ and _key_ actually are in the sentence. \n", + "- With a rather naive positional embedding, we would care about the position of the _key_ relative to the start of the sentence. This works, but is problematic for streaming ASR in a few ways. Most notably, the distances would get quite long.\n", + "- With our **relative positional embedding**, we look at the **difference** of position between the _query_ and the _key_.\n", + "\n", + "Since we have chunked attention that restricts how much into the past and future a _query_ can attend, the distance we encode is never larger than the window of frames we attend to. \n", + "In other words, if we attend to chunks of 16 tokens with 48 tokens of left context, we will at most represent a distance from the rightmost token to the leftmost token, i.e. $63$. \n", + "The distance of $63$ would have its own fixed positional encoding vector, which is taken into account for the score calculation in self-attention for that specific _query_/_key_ pair.\n", + "\n", + "Furthermore, no matter if we are $0$ second or $30$ minutes into the stream, these distances remain the same, as they are relative positions.\n", + "\n", + "The following example demonstrates a relative positional encoding on a sequence of 16 timesteps and an embedding size of 64:" + ] + }, + { + "cell_type": "code", + "execution_count": 165, + "id": "150c131c-e703-4bd1-b08d-0274508db8cc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(batch, seq_len*2-1, emb_size): (1, 31, 64)\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAGzCAYAAADpDmETAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAy2ElEQVR4nO3dfVyUVdoH8N+gMCDIIKggCai9LL6iYSKrlSlJPvuUJdZu2War1aqgKbmlW+muWbi9aGm+pLZqT7numplZaRn5HlISbqVJmhokgrbJDJK85JznD2u2Yc5R7mHGYTi/7+czn09cc+bMuYWrmbnua85tEkIIEFGzF+DrBRDRpcFkJ9IEk51IE0x2Ik0w2Yk0wWQn0gSTnUgTTHYiTTDZiTTBZCfSREtfL6AhFi5ciGeeeQZlZWVISkrCggUL0K9fvwY91m63o7S0FK1bt4bJZPLySom8RwiByspKxMbGIiDAjddp0cStWbNGBAUFib///e9i//794v777xcRERGivLy8QY8vKSkRAHjjrdncSkpK3MolkxBN+4swKSkpuOaaa/Diiy8COP9KHRcXh4kTJ2LatGkXfbzVakVERARKbrkF4YGBzneuWyd/0NNPu4RmHP+jdOgLL8inmDhRHp/dabn8joceco3ddpt06Ad3r5TG775bPnWfPvL4phe/dg0OHSof3KaNNCw+2SuNm67pK5/n9Gl5/P33XULDsi6XDi0slE/x6qvyeNqr98rvWL/eNfbcc9Khjx27TxpfsEA+9YMPyuOzLnvJNfjww/LBGRlOP9rq6hD31luoqKiAxWKRP+YCmvTb+NraWhQUFGD69OmOWEBAANLS0pCXlyd9TE1NDWpqahw/V1ZWAgDCAwNdk131tj4kxCVkNocbWrvZLI+HS+ZWrqX+en8SGipfi+pwWip+y+FhYa5B1dvDFi2kYRGuWItivHJ+yVpatjR2nKGh8rjL7/1CEyl+P179/asOSLFudz+ONukC3XfffYdz584hOjraKR4dHY2ysjLpY3JycmCxWBy3uLi4S7FUoiavSSe7O6ZPnw6r1eq4lZSU+HpJRE1Ck34b37ZtW7Ro0QLl5eVO8fLycsTExEgfYzabYVa9hyLSWJNO9qCgICQnJyM3Nxe33norgPMFutzcXGRlZRmaK/34310+A+48NFs++Ne/dgn9LUJeifmb/aA03rWb/HPVK9+Pl8Z3FbnGr7zveunY9NtaSeNVioLjna8Mk8ZbJF7pEnvhhXLJSCDrmz9J46YAxefHqVOl4RcTnpHGH0x0jd1xh3zqqtc3ye+4LUMev+YaafhQkd0lNnCgfIrISHlc2BX17UTJAQFARYVkIYekQ6+917lA+eOPNgCKonIDNOlkB4Ds7GyMHj0affv2Rb9+/fD888+jqqoKf/jDH3y9NCK/0uST/be//S1OnTqFGTNmoKysDL1798bmzZtdinZEdGFNPtkBICsry/DbdiJy1uyq8UQkx2Qn0oRfvI33hMJC10alhMHydsznl7hWpG8rWyyfOCpKGv7y3Dn5+BkzpOH3jri2y/5P6Xbp2GN18qnHvSuP/2OWvNr7j2P3uAYn7pFP8tPZkPpU1WjTCHmrb9az8up9Vv/+rsFZr0jHTpwvP7uwpO4HabxTqTSMF4+4xsoflrfLYtYseTxK0Sn45JPS8PoY17MukwfLpzh50vnnxja285WdSBNMdiJNMNmJNMFkJ9KENgW6k6/vQHi970DO3n2DdOw9krpVq1byNtennpHHx7b6h3whsu+tA0iveNwldkgxNv9/n5DG75N/5RotFrm2xQLA3Xe7fk141dv/kU/yu99Jw8p22bQ0efy776Th0dmuhc5XFR2n3brJ47t2yeMpb7v+2wIAbpMU4yIi5GOXLJGGX/7hTmn8z3+WT/ODpIb4yCPysY8N2Or0s62qCpab5WMbgq/sRJpgshNpgslOpAkmO5EmmOxEmmjyu8s2ls1mg8VigfWqqxBefxPEL7+UP+iKK1xj48ZJh564S14xf/RR+dSqDW1lVVpVQVs198ByxeTPPiuP75G0xip2AJKeogAg5vxNGjdNU5SYX5G3wEK2p6CshRZQboyxK1q+eYWicxUffOAaayXfF6T+Rq8XnbvDakXbrayqf/iwfGzXrk4/2s6dg+Wrr2C1WhGu2OjzQvjKTqQJJjuRJpjsRJpgshNpgslOpAltqvETJlhdLuHzN3khGYEzJY3NK1fKB584IY/3VVzrTFFJzu/0W5fYbMVO15LLogEAgoLk8REj5HHZ/HFvKC5et2iRPP7VV/L4VVfJ4xMmSMMlI1wvjvbYY/Ip3nhDHq+tlcdVl6+TzZ9y7J/ywaozGnvl17pDhw7y+L33uoTq/vqUdGj9nvmaGhsWLbKwGk9EF8ZkJ9IEk51IE0x2Ik0w2Yk0oU013lpa6lLBXPV6qPQxa9a4xj76SDW/PB4fL4+rKsOjR7vGBoYWygf//e/y+ObN8riq9zokxDWWkiIfq7jKohgn36nHtESx9fa//iWP5+e7xs6elY+VfXcBAG66SR4fM0Ya3lXVxyW2apV8CtUZkOJieVxVLJdcM1S1CRBGj6xy+tlms8ESG8tqPBFdGJOdSBNMdiJNMNmJNKFPgQ6AS0mjTRv5gwYMcI3dfbd06H/SXNtcAWDpUvnUqlbPzz5zjdnt8rGqrZRvuUUef+ABeTxu30bXoGpziR075PH6FyT7Wfv28vh118njks0xSnrL901W/du+9ZY8fuCAPB4geanr1Us+VtVyrPq3jfpA0Xb76quusd275WNPn3b60QbAArBAR0QXxmQn0gSTnUgTTHYiTTDZiTShTTUeknq8atvgLl1cY6pdjVXtr7feKo8Hvve2/I733nONqaq0hw7J42fOyOOytlgAuFJywUdZPyegbEUVtwyXxk1vbZDPo2rplfUjq45T1UYbFiaPy44TkJ91SU+XDq1L/19p/M035VOr2mtlu3cfOSIf67q9+Pl6PKvxRHRBTHYiTTDZiTTBZCfSBJOdSBPaVOOtJ0+6VDA3fWiWPkZWMFZtXqHaSVm1qYXqDIBsPwZPnQFo8a6kBx4ANm1yjeXlyceqNsBQnQFQVcZVG0+kprrGhg2TDj33P/KeeU9UxlWHKbvwJqDepEK1k7bsZIdqz41hg2ucfrbZbLC0b89qPBFdGJOdSBNMdiJNMNmJNOHTZN+xYwduvvlmxMbGwmQy4c16FRYhBGbMmIEOHTogJCQEaWlpOKRqoSSiC2rpyyevqqpCUlISxowZgxGSrUCefvppzJ8/H6tWrULnzp3x+OOPIz09HQcOHEBwcLCh5/r7a2aEhDhX31Vt4C/MsroGd+6UD5ZtgQwA+/bJ46ry/ZFS19hnikr3MpM8HhEhj8fFyeOyLW/uuks+VnFqQAy8Vho37VL8e8lK4ADw6aeuMcWVHVvI9t0GkFFRIY+rTjjJzhh0jJWPVZXXe/eWx1Vbcl/r+u/1ebFFOvSllc5/r2fPys8eNZRPk33YsGEYpji9IoTA888/j8ceewzDh5//ssUrr7yC6OhovPnmm/idarNtIpJqsp/Zjx49irKyMqSlpTliFosFKSkpyFOdCwZQU1MDm83mdCOiJpzsZWVlAIDo6GineHR0tOM+mZycHFgsFsctTvUWlkgzTTbZ3TV9+nRYrVbHraSkxNdLImoSmmyyx8TEAADKy8ud4uXl5Y77ZMxmM8LDw51uROTjAt2FdO7cGTExMcjNzUXvnyqeNpsN+fn5GD9efjHBC3niCdd9whXFW/z4o2t1tFUr+U4lMTHyuKp42+tWebxfP9fYwIHysdE2xenHXbvk8U8+kcf372/4HIrKuKm2Vj4+KEgeV+0nL9seSLaTDABcc408rvgHKw+X71QjO9SPP5ZPLdvXHwC+Wi2Pl82Vx2U99i0VWVj/5IrqOgIN5dNkP3PmDA7/4psHR48exb59+xAZGYn4+HhMnjwZs2fPxpVXXuk49RYbG4tbVd/4ICIlnyb73r17ccMNNzh+zs7OBgCMHj0aK1euxMMPP4yqqio88MADqKiowMCBA7F582bD59iJyMfJPmjQIFzoG7YmkwmzZs3CrFmzLuGqiJqnJlugIyLP0mfzilmzEF7/7b+qdfXrr11jx4/Lx546JY9XVsrjP/4oj5skLbChofKxkZHyeKyi1bNTJ3lctpGE6qqRPXpIw6JHT2nc9MXn8nm++EIel119UbWTxLFj8nippOUYAL7/Xh6vqnKNqdJBVUVr3Voeb9dOHr/sMtfY5ZfLx9ar8tqqq2GZMYObVxDRhTHZiTTBZCfSBJOdSBNMdiJNaFONj421IiDAuYKparGXfVFOdW3AxER5XFG8Rq9e8ri5WNICq6pcy9pcAfWFEFXVa9mXhE6flo9VbRmtOrugql6rtphu08Y1pvrGoursguqX1L27PC75JdXEy+dQtcuqfkUHD8rjsl+R6rta9b/cabfbUFrKCzsS0UUw2Yk0wWQn0gSTnUgTTHYiTWhTjbe2bYvw+rtXqPrXq6tdY0b/mVSbN6iu7CirrkZFycfW25fPQXV6oWNHeVxW1VZVuhVx0UXe1206Ivl+AaA+MyCLq8Z++608rtqbsN5uRw7/+Y9rTLVBqerKjqrNO1Rk34FQfWW7Xt+9zW6H5bvvWI0nogtjshNpgslOpAkmO5EmmOxEmmiyW0l72ksPf42QEOcKpqrwLNs4RLbTMQCYv1PsYHPkiDyuqjB/841rTNU0rdqRRbbbCwBs3y6Py85GqHrga2qkYZPqLIWs6gwAZsXFCWU980Z3gVFtU63aelrWe5+QIB+r+mNR/GHUtJXsSAP5n4VsYyTA9U/l7Fkb8LD8IpANwVd2Ik0w2Yk0wWQn0gSTnUgT+rTLAnBpMDRSRFK1NIaEyOMWRSFF1eYo2x5atqEDoC5QqeIdOsjjsrZb1VjFNtUiRj7eVHZCPo+quHhCMl7V5iobC6i39VbFZRt1qLadVrXRWq3y+Nmz8risFVtR/Kzfom0DYAHYLktEF8ZkJ9IEk51IE0x2Ik0w2Yk0oU27LI4edW2/VFV1ZXHVxgiq+MmT8vh33zU8rmqX/Vxx0USjGy/ILmxYVycfq9gyWnE+Q021xXRgoGtMdWFLIxuAAEBERMPjqgtbtm0rj6tadFUbicjiqjMg9eOVlUDnzvKxDcBXdiJNMNmJNMFkJ9IEk51IE0x2Ik3oU41PSQHqbyWt6neXbaSgqgCrKsaq3nhVZVi2CYJqrKyP/kJxVY+9bKtq1RyKarSIlG93bfpesk0zoD4bIetJl231DKgvPqnqa1fFKypcY6ozGocPy+MFBfK47EwHID8zotowpH4fvd0uH9dAfGUn0gSTnUgTTHYiTTDZiTTBZCfShD7V+Oxs1+q7rBqriqvGqnYqUV00UtVLL6vIqiq6RnZBAdQ7oZw75xpT9cYrNjQy3Buv2h1I1hvfooV8rGo7aqO7CcnOpMjOxFworjpjotqSWja+ob371dXAtGnysQ3AV3YiTTDZiTTBZCfShE+TPScnB9dccw1at26N9u3b49Zbb0VRUZHTmOrqamRmZiIqKgphYWHIyMhAuWrXUSJS8mmyb9++HZmZmdizZw+2bNmCuro6DB06FFW/KExNmTIFGzduxNq1a7F9+3aUlpZixIgRPlw1kX9qUvvGnzp1Cu3bt8f27dtx3XXXwWq1ol27dli9ejVGjhwJADh48CC6du2KvLw89O/f/6JzOvaNHzgQ4fV3SfFElVY1h+qihKp5ZLusGBnrTly2RoP73Ytw+XiTTXGWwsj+66ozGqo5jMZlZ0CMjAXUa1SdMWnEWRfbjz/CsmtX89g33vrTLzzypy9jFBQUoK6uDmlpaY4xiYmJiI+PR15ennSOmpoa2Gw2pxsRNaFkt9vtmDx5MgYMGIAePXoAAMrKyhAUFISIeucbo6OjUaY4X52TkwOLxeK4xckuy0ukoSaT7JmZmfjiiy+wZs2aRs0zffp0WK1Wx61EtWkjkWaaRAddVlYW3n77bezYsQMdO3Z0xGNiYlBbW4uKigqnV/fy8nLEKHbvNJvNMKs6rIg05tNkF0Jg4sSJWL9+PbZt24bO9bbJTU5ORmBgIHJzc5GRkQEAKCoqQnFxMVJTU409WVSUa0umqjAi22Dh22/lY1XbNKtaV2trGz7eSJsroG51NTLeYL3WcLusciLJTLIWWkDdRmt0vJELeAYFyeOq8arNTmTjVUXe+huGqH6/DeTTZM/MzMTq1auxYcMGtG7d2vE53GKxICQkBBaLBWPHjkV2djYiIyMRHh6OiRMnIjU1tUGVeCL6L58m++LFiwEAgwYNcoqvWLEC9957LwBg3rx5CAgIQEZGBmpqapCeno5FixZd4pUS+T+fv42/mODgYCxcuBALFy68BCsiar6aTDWeiLyLyU6kiSZx6u2SiIpyraj+4jSfE1l1VFUxVVVdPRE30s4LGG/pNdIWrFi3aCVfi+kHA1spA57ZvEPV0mpkHtX6PBWXPWdDNyNRnclpIL6yE2mCyU6kCSY7kSaY7ESaYLITaUKfavwnn7j2SKt6z2U9yD/+KB/rzf51g9s6K9eoWouRuRW82huvoup1r785ycXmNrJ9tSf67gH5GlVz15/DyO9Rgq/sRJpgshNpgslOpAkmO5EmmOxEmtCnGn/77a67hBjZlUQ11lNxXzynLG5wDhEs77s3VRu8+KQsbmSsp+JGdhK61M9ZXQ18/rl8bAPwlZ1IE0x2Ik0w2Yk0wWQn0gSTnUgT+lTjlywBAur9v03VSy6Lq/qSVXG73dh4WU+6ag6jc6t44JqeHuuNl05ucHZVn3r93/uF4qrnNDq3arwsrurprx9X/d4biK/sRJpgshNpgslOpAkmO5Em9CnQzZrluh2y6mJ9srhqrKq4YmRuVbyhmxp48jkNziGC5Gsx1So29VC1hsriRsZeKG5kkxJPPaeq+NuY4/zhB+C+++RjG4Cv7ESaYLITaYLJTqQJJjuRJhqd7NWq7+0SUZPiVjXebrfjySefxJIlS1BeXo6vvvoKXbp0weOPP45OnTph7Nixnl5n4z30kGsrpKpiKmOkzRUw3upq5DlVPND+apRX22WN8lR7rYyRltsLrcXIc9Y/09PI369br+yzZ8/GypUr8fTTTyPoF6dkevTogeXLlzdqQUTkHW4l+yuvvIKlS5di1KhRaPGL/1MlJSXh4MGDHlscEXmOW8l+/PhxXHHFFS5xu92OOtVVTIjIp9xK9m7dumHnzp0u8ddffx19+vRp9KKIyPPcKtDNmDEDo0ePxvHjx2G32/HGG2+gqKgIr7zyCt5++21Pr5GIPMAkhHslvp07d2LWrFn497//jTNnzuDqq6/GjBkzMHToUE+vsVFsNhssFgusGzYgPDTU+U5VX7usYqrqUzfaM28k7s25VfMbnEO0lP+7mH5UfJwzsmGI0b5zo3HZ/N6cG5D346vOutSb21ZVBcvw4bBarQgPD5c/5gLc/iLMtddeiy1btrj7cCK6xNz6zD5mzBisWrXKJW6z2TBmzJhGL4qIPM+tZF+5ciUmTJiASZMmwf6LJpGzZ89K/ydARL7ndrvsO++8g3fffRfp6ek4ffq0J9dERF7gdrJ369YN+fn5qKurQ79+/fDll196cl1E5GFuFehMP/X9RkVF4YMPPsC4ceOQmpqKZ555xqOL86g77nDtVzbSp97IbXwdjPS7+6DX3agm1RtvlJFeeiM97Rei6qVvyNhG/j24ley/PFvXsmVLLF++HN26dcOECRMatRgi8h63kn3r1q2IjIx0imVnZ6NXr17YvXu3RxZGRJ7ldlONv3A01ZjNCOfbePolP3sbbxMClpoa7zfVZGdn44knnkBoaCiys7MvOHbu3LkNmnPx4sVYvHgxjh07BgDo3r07ZsyYgWHDhgE4vzHGQw89hDVr1qCmpgbp6elYtGgRoqOjG7psIvpJg5O9sLDQ8Y22wsJC5TiTgf9bduzYEXPmzMGVV14JIQRWrVqF4cOHo7CwEN27d8eUKVPwzjvvYO3atbBYLMjKysKIESPc+6jw3ntAWJhzTNUaKvu/r9FWVNX/wY3M4825VfMYbZcNkL/imewNawG9YFz1bspo66qRebw5tyre0DnOnAEGDZKPbYAm9zY+MjISzzzzDEaOHIl27dph9erVGDlyJADg4MGD6Nq1K/Ly8tC/f/8Gzed4G79tG8KZ7Befh8nuvblV8QbOYTtzBpZBg9x+G++RDSdtNhvefPPNRm1cce7cOaxZswZVVVVITU1FQUEB6urqkJaW5hiTmJiI+Ph45OXlKeepqamBzWZzuhGRm8l+xx134MUXXwRwvkW2b9++uOOOO9CzZ0+sW7fO0Fyff/45wsLCYDabMW7cOKxfvx7dunVDWVkZgoKCEBER4TQ+OjoaZWVlyvlycnJgsVgct7i4OMPHR9QcuZXsO3bswLXXXgsAWL9+PYQQqKiowPz58zF79mxDc/3qV7/Cvn37kJ+fj/Hjx2P06NE4cOCAO8sCAEyfPh1Wq9VxKykpcXsuoubErfPsVqvVcZ598+bNyMjIQKtWrfCb3/wGf/rTnwzNFRQU5NjiKjk5GZ988gleeOEF/Pa3v0VtbS0qKiqcXt3Ly8sRExOjnM9sNsOsuhYakcbcSva4uDjk5eUhMjISmzdvxpo1awAAp0+fRnBwcKMWZLfbUVNTg+TkZAQGBiI3NxcZGRkAgKKiIhQXFyM1NdX4xEOGNPy8qtEtnI1oWvXQRvPrdlkjjG5TbURDz+H7ol128uTJGDVqFMLCwpCQkIBBP50O2LFjB3r27NngeaZPn45hw4YhPj4elZWVWL16NbZt24b33nsPFosFY8eORXZ2NiIjIxEeHo6JEyciNTW1wZV4Ivovt5J9woQJSElJQXFxMW688UYE/HQKp0uXLoY+s588eRL33HMPTpw4AYvFgl69euG9997DjTfeCACYN28eAgICkJGR4dRUQ0TGefU8e3h4OPbt24cuXbp46ykuynGevUUL13ZZFb6Np/qawNt4mxCwnDvn2/PsKk2sX4dIa7yKK5Em3N5d1u/k5zeuN97ohf2MtroaaV01uhYjazQ4h+F2WSMXvDTaomo0bqR11WhbrJF5jPTG9+0rH9sAfGUn0oRXk93IN+CIyLtYoCPSRKOTXQihTOpNmzbhsssua+xTEJEHuJ3sL7/8Mnr06IHg4GAEBwejR48eWL58udOYgQMHsk+dqIlw+yquc+fOdbSvAkBeXh6mTJmC4uJizJo1y6OL9Ii+fRvXGMGPJFKsyhjky79B4Ya2bduK1atXu8RXr14toqKi3JnSa6xWqwAgrIAQJpP7t/P/1Lzx1rhbI/4GrYAAIKxWq1u54Nbb+Lq6OvSVnO9LTk7Gj6rzjkTkU24l++9//3ssXrzYJb506VKMGjWq0YsiIs8ztJX0z0wmE5YvX47333/f8XXT/Px8FBcX45577vH8Komo0Rr8rbcbbrihYROaTPjwww8btShPcnzrDWj4t95kGvbPRHRhjfgbtAkBC+D9i0Rs3brV8ORNyv79QOvWzjEjfeCe6kdvZs8pFPV4ExT/c/REz7g34035OSsrge7d5WMbgL3xRJpw6zx7dXU1FixYgK1bt+LkyZOw1/s/0KeffuqRxRGR57iV7GPHjsX777+PkSNHol+/fvzCC5EfcCvZ3377bbz77rsYMGCAp9dDRF7iVrJfdtllaF2/2NXUNaKwQWp8T+c/3CrQPffcc3jkkUfwzTffeHo9ROQlbr2y9+3bF9XV1ejSpQtatWqFwMBAp/u///57jyyOiDzHrWS/8847cfz4cTz11FOIjo5mgY7ID7iV7B999BHy8vKQlJTk6fUQkZe49Zk9MTERZ8+e9fRaiMiL3HplnzNnDh566CE8+eST6Nmzp8tndnf6dr3u669d22VVPNG6amRuo/N4ql3WA3N4tV1WxVOtq96aw+g8DZ27shK4/HJj6/gFt5L9pptuAgAMHjzY6fO6EAImkwnnvHn5JCJyi1vJ7vdfiiHSkFuf2a+//noEBARg2bJlmDZtGq644gpcf/31KC4uRouGXmuaiC4pt5J93bp1SE9PR0hICAoLC1FTUwPg/Pdsn3rqKY8ukIg8w61knz17NpYsWYJly5Y5FecGDBjAb7wRNVFufWYvKirCdddd5xK3WCyoqKho7Jq8oxFVTFJjO5X/cOuVPSYmBocPH3aJ79q1C126dGn0oojI89xK9vvvvx8PPvgg8vPzYTKZUFpaitdeew1Tp07F+PHjPb1GIvIAt97GT5s2DXa7HUOGDMEPP/yA6667DmazGVOnTsXEiRM9vUYi8oAG7y4rU1tbi8OHD+PMmTPo1q0bwsLCPLk2j3DaXdbXiyFqBBtwaXaXlQkKCkK3bt0aMwURXSKNSna/cuwYUP//ht7sRzc6z6We20Nr8VhvfGPHenu8N3vjGzqHzQZ06mRsHb/AraSJNMFkJ9IEk51IE0x2Ik3oU6BrRGGD1Ngu6z/4yk6kCSY7kSaY7ESaYLITaaJJJfucOXNgMpkwefJkR6y6uhqZmZmIiopCWFgYMjIyUF5e7rtFEvmpJlON/+STT/DSSy+hV69eTvEpU6bgnXfewdq1a2GxWJCVlYURI0Zg9+7dxp7g22+90y5rVDN7TsPtsirebF1tLs9pswEdO7r91E3ilf3MmTMYNWoUli1bhjZt2jjiVqsVL7/8MubOnYvBgwcjOTkZK1aswEcffYQ9e/b4cMVE/qdJJHtmZiZ+85vfIC0tzSleUFCAuro6p3hiYiLi4+ORl5cnnaumpgY2m83pRkRN4G38mjVr8Omnn+KTTz5xua+srAxBQUGIiIhwikdHR6OsrEw6X05ODv761796Y6lEfs2nr+wlJSV48MEH8dprryE4ONgjc06fPh1Wq9VxKykp8ci8RP7Op8leUFCAkydP4uqrr0bLli3RsmVLbN++HfPnz0fLli0RHR2N2tpalx1ry8vLERMTI53TbDYjPDzc6UZEPn4bP2TIEHz++edOsT/84Q9ITEzEI488gri4OAQGBiI3NxcZGRkAzm9jXVxcjNTUVGNP1ogqJqmxN95/+DTZW7dujR49ejjFQkNDERUV5YiPHTsW2dnZiIyMRHh4OCZOnIjU1FT079/fF0sm8ls+L9BdzLx58xAQEICMjAzU1NQgPT0dixYt8vWyiPxOo3aX9QfcXZaai8buLtskzrMTkfcx2Yk00eQ/s3tMWZlrb7yKp3rSvTW3L3rmFTzWGy/ji350b87R2LltNkBxyrkh+MpOpAkmO5EmmOxEmmCyE2mCyU6kCX2q8Y2oYpIae+P9B1/ZiTTBZCfSBJOdSBNMdiJN6FOgO3myabTL+uI5vTi3V9tlVZpC66ovntNmA9q3d/tp+MpOpAkmO5EmmOxEmmCyE2mCyU6kCX2q8Y2oYpIa22X9B1/ZiTTBZCfSBJOdSBNMdiJNMNmJNKFPNf677xreG2+EL/roVXywFp/0xqv4oq9dxRtrsdmAtm3dfngT+kslIm9ishNpgslOpAkmO5EmmOxEmtCnGt+IKiapsTfef/CVnUgTTHYiTTDZiTTBZCfShD4Fuu+/9067rDc1pVZchSbVLmtUU2qvbQibDYiMdPvhTf+viYg8gslOpAkmO5EmmOxEmmCyE2lCn2p8I6qYpMZ2Wf/BV3YiTTDZiTTBZCfSBJOdSBM+Tfa//OUvMJlMTrfExETH/dXV1cjMzERUVBTCwsKQkZGB8vJyH66YyH/5vBrfvXt3fPDBB46fW7b875KmTJmCd955B2vXroXFYkFWVhZGjBiB3bt3G3+iS90b7wd97Z7g173xnnAp++sb2Rvv82Rv2bIlYmJiXOJWqxUvv/wyVq9ejcGDBwMAVqxYga5du2LPnj3o37//pV4qkV/z+cvPoUOHEBsbiy5dumDUqFEoLi4GABQUFKCurg5paWmOsYmJiYiPj0deXp5yvpqaGthsNqcbEfk42VNSUrBy5Ups3rwZixcvxtGjR3HttdeisrISZWVlCAoKQkREhNNjoqOjUVZWppwzJycHFovFcYuLi/PyURD5B5++jR82bJjjv3v16oWUlBQkJCTgX//6F0JCQtyac/r06cjOznb8bLPZmPBEaAJv438pIiICV111FQ4fPoyYmBjU1taioqLCaUx5ebn0M/7PzGYzwsPDnW5E1AQKdL905swZfP311/j973+P5ORkBAYGIjc3FxkZGQCAoqIiFBcXIzU11fjk7I33CvbG+w+fJvvUqVNx8803IyEhAaWlpZg5cyZatGiBO++8ExaLBWPHjkV2djYiIyMRHh6OiRMnIjU1lZV4Ijf4NNm//fZb3HnnnfjPf/6Ddu3aYeDAgdizZw/atWsHAJg3bx4CAgKQkZGBmpoapKenY9GiRb5cMpHfMgkhmnX3g81mg8VigRUAP72TP7MBsOB8D4o7tagmVaAjIu9hshNpoklV473KSG+8Jn3tnqB9b7wnNLS/nvvGE1FDMNmJNMFkJ9IEk51IE/oU6Ngu6xVsl/UffGUn0gSTnUgTTHYiTTDZiTTBZCfShD7V+IqKS7uVtCbYLnsJ2WxAvT0ZjeArO5EmmOxEmmCyE2mCyU6kCSY7kSb0qcY3oopJauyN9x98ZSfSBJOdSBNMdiJNMNmJNMFkJ9KEPtV49sZ7BXvjLyH2xhNRQzDZiTTBZCfSBJOdSBNMdiJN6FONZ2+8V7A33n/wlZ1IE0x2Ik0w2Yk0wWQn0oQ+BTq2y3oF22UvIbbLElFDMNmJNMFkJ9IEk51IE0x2Ik3oU41nu6xXsF3Wf/CVnUgTTHYiTTDZiTTh82Q/fvw47r77bkRFRSEkJAQ9e/bE3r17HfcLITBjxgx06NABISEhSEtLw6FDh3y4YiL/5NNkP336NAYMGIDAwEBs2rQJBw4cwHPPPYc2bdo4xjz99NOYP38+lixZgvz8fISGhiI9PR3V1dU+XDmR/zEJIXzWxDxt2jTs3r0bO3fulN4vhEBsbCweeughTJ06FQBgtVoRHR2NlStX4ne/+91Fn8Nms8FiscBaUYFw9sZ7HHvjLx2bzQZLRASsVqtbf8s+fWV/66230LdvX9x+++1o3749+vTpg2XLljnuP3r0KMrKypCWluaIWSwWpKSkIC8vTzpnTU0NbDab042IfJzsR44cweLFi3HllVfivffew/jx4zFp0iSsWrUKAFBWVgYAiI6OdnpcdHS04776cnJyYLFYHLe4uDjvHgSRn/Bpstvtdlx99dV46qmn0KdPHzzwwAO4//77sWTJErfnnD59OqxWq+NWUlLiwRUT+S+fJnuHDh3QrVs3p1jXrl1RXFwMAIiJiQEAlJeXO40pLy933Fef2WxGeHi4042IfJzsAwYMQFFRkVPsq6++QkJCAgCgc+fOiImJQW5uruN+m82G/Px8pKamXtK1Evk94UMff/yxaNmypXjyySfFoUOHxGuvvSZatWolXn31VceYOXPmiIiICLFhwwbx2WefieHDh4vOnTuLs2fPNug5rFarACCsgBC88ebHNysgAAir1epWvsGtR3nQxo0bRY8ePYTZbBaJiYli6dKlTvfb7Xbx+OOPi+joaGE2m8WQIUNEUVFRg+dnsvPWXG6NTXafnme/FBzn2QHw0zv5MxsAC+Cf59mJ6NJhshNpQp/NK7iVtFewXfYS4lbSRNQQTHYiTTDZiTTBZCfSBJOdSBP6VOO5lbRXcCtp/8FXdiJNMNmJNMFkJ9IEk51IE82+QPfzl/q47ST5u5//ht39omqzT/bKykoAALedpOaisrISFovF8OOa/ffZ7XY7SktL0bp1a1RWViIuLg4lJSXNem86m83W7I9Th2MEnI/z57/h2NhYBAQY/wTe7F/ZAwIC0LFjRwCAyXT+rLAuG1HqcJw6HCPw3+N05xX9ZyzQEWmCyU6kCa2S3Ww2Y+bMmTCbzb5eilfpcJw6HCPg2eNs9gU6IjpPq1d2Ip0x2Yk0wWQn0gSTnUgT2iT7woUL0alTJwQHByMlJQUff/yxr5fUKDt27MDNN9+M2NhYmEwmvPnmm073CyEwY8YMdOjQASEhIUhLS8OhQ4d8s9hGyMnJwTXXXIPWrVujffv2uPXWW10uBlpdXY3MzExERUUhLCwMGRkZLlf+bcoWL16MXr16ORpnUlNTsWnTJsf9njo+LZL9n//8J7KzszFz5kx8+umnSEpKQnp6Ok6ePOnrpbmtqqoKSUlJWLhwofT+p59+GvPnz8eSJUuQn5+P0NBQpKeno7q6+hKvtHG2b9+OzMxM7NmzB1u2bEFdXR2GDh2Kqqoqx5gpU6Zg48aNWLt2LbZv347S0lKMGDHCh6s2pmPHjpgzZw4KCgqwd+9eDB48GMOHD8f+/fsBePD43LpCnJ/p16+fyMzMdPx87tw5ERsbK3Jycny4Ks8BINavX+/42W63i5iYGPHMM884YhUVFcJsNot//OMfPlih55w8eVIAENu3bxdCnD+uwMBAsXbtWseYL7/8UgAQeXl5vlpmo7Vp00YsX77co8fX7F/Za2trUVBQgLS0NEcsICAAaWlpyMvL8+HKvOfo0aMoKytzOmaLxYKUlBS/P2ar1QoAiIyMBAAUFBSgrq7O6VgTExMRHx/vl8d67tw5rFmzBlVVVUhNTfXo8TX7L8J89913OHfuHKKjo53i0dHROHjwoI9W5V1lZWUAID3mn+/zR3a7HZMnT8aAAQPQo0cPAOePNSgoCBH1NhT1t2P9/PPPkZqaiurqaoSFhWH9+vXo1q0b9u3b57Hja/bJTs1HZmYmvvjiC+zatcvXS/G4X/3qV9i3bx+sVitef/11jB49Gtu3b/foczT7t/Ft27ZFixYtXKqX5eXliImJ8dGqvOvn42pOx5yVlYW3334bW7dudXxlGTh/rLW1taioqHAa72/HGhQUhCuuuALJycnIyclBUlISXnjhBY8eX7NP9qCgICQnJyM3N9cRs9vtyM3NRWpqqg9X5j2dO3dGTEyM0zHbbDbk5+f73TELIZCVlYX169fjww8/ROfOnZ3uT05ORmBgoNOxFhUVobi42O+O9Zfsdjtqamo8e3weLiI2SWvWrBFms1msXLlSHDhwQDzwwAMiIiJClJWV+XppbqusrBSFhYWisLBQABBz584VhYWF4ptvvhFCCDFnzhwREREhNmzYID777DMxfPhw0blzZ3H27Fkfr9yY8ePHC4vFIrZt2yZOnDjhuP3www+OMePGjRPx8fHiww8/FHv37hWpqakiNTXVh6s2Ztq0aWL79u3i6NGj4rPPPhPTpk0TJpNJvP/++0IIzx2fFskuhBALFiwQ8fHxIigoSPTr10/s2bPH10tqlK1btwoALrfRo0cLIc6ffnv88cdFdHS0MJvNYsiQIaKoqMi3i3aD7BgBiBUrVjjGnD17VkyYMEG0adNGtGrVStx2223ixIkTvlu0QWPGjBEJCQkiKChItGvXTgwZMsSR6EJ47vj4FVciTTT7z+xEdB6TnUgTTHYiTTDZiTTBZCfSBJOdSBNMdiJNMNmJNMFkp4vatm0bTCaTy5cxyL8w2cmntm3bhr/85S9OsWPHjmHs2LHo3LkzQkJCcPnll2PmzJmora294FwnTpzAXXfdhauuugoBAQGYPHmy9xbuh5js5BNLlixx2gOwtrYWzz33HOrq6nDw4EHY7Xa89NJL2L9/P+bNm4clS5bgz3/+8wXnrKmpQbt27fDYY48hKSnJ24fgd5jsfuT1119Hz549ERISgqioKKSlpTk2Xly+fDm6du2K4OBgJCYmYtGiRU6P/fjjj9GnTx8EBwejb9++WL9+PUwmE/bt2+fWWnbt2oVrr70WISEhiIuLw6RJk5w2gezUqROeeuopjBkzBq1bt0Z8fDyWLl3quD8uLg633HIL1q9fj/3792Pw4MEAzl9W+6abbsKKFSswdOhQdOnSBbfccgumTp2KN95444Jr6tSpE1544QXcc889jbq0cbPlue/ukDeVlpaKli1birlz5zq+Crlw4UJRWVkpXn31VdGhQwexbt06ceTIEbFu3ToRGRkpVq5cKYQ4/3XYdu3aibvuukt88cUXYuPGjaJLly4CgCgsLLzoc//8DbvTp08LIYQ4fPiwCA0NFfPmzRNfffWV2L17t+jTp4+49957HY9JSEgQkZGRYuHCheLQoUMiJydHBAQEiIMHDzrGVFRUiMTERNGqVSvx6aefXnANjz76qEhOTm7wv9f1118vHnzwwQaP1wGT3U8UFBQIAOLYsWMu911++eVi9erVTrEnnnjC8Z3nl156SURFRTl9l33x4sVuJ/vYsWPFAw884DRm586dIiAgwPEcCQkJ4u6773bcb7fbRfv27cXixYuFEEJs2rRJ9O/fX0yaNEmMHDlSDBw4UDz//PPixx9/dHn+Q4cOifDwcLF06dKLrvVnTHZXfBvvJ5KSkjBkyBD07NkTt99+O5YtW4bTp0+jqqoKX3/9NcaOHYuwsDDHbfbs2fj6668BAF9++SV69eqF4OBgx3yN2cXl3//+N1auXOn0fOnp6bDb7Th69KhjXK9evRz/bTKZEBMT4/icfvToUWzYsAG33XYbunfvjtzcXNTV1cFutzs91/Hjx3HTTTfh9ttvx/333++I//K5x40b5/ax6IQbTvqJFi1aYMuWLfjoo4/w/vvvY8GCBXj00UexceNGAMCyZcuQkpLi8hhvOHPmDP74xz9i0qRJLvfFx8c7/jswMNDpPpPJ5Ejm8ePHAwAOHDgA4Pz2YVOnTnUaX1paihtuuAG//vWvnT7vA3CqNYSHh7t/MBphsvsRk8mEAQMGYMCAAZgxYwYSEhKwe/duxMbG4siRIxg1apT0cV27dsX//d//obq62vHqvmfPHrfXcfXVV+PAgQO44oor3J7jZ4MGDcKgQYNc4sePH8cNN9yA5ORkrFixAgEBzm9CPfHcumGy+4n8/Hzk5uZi6NChaN++PfLz83Hq1Cl07doVf/3rXzFp0iRYLBbcdNNNqKmpwd69e3H69GlkZ2fjrrvuwqOPPor7778f06dPx7Fjx/Dss8+6vZZHHnkE/fv3R1ZWFu677z6EhobiwIED2LJlC1588cVGH+vx48cxaNAgJCQk4Nlnn8WpU6cc911sR9WfX/HPnDmDU6dOOfZd79atW6PX5fd8XTSghjlw4IBIT08X7dq1E2azWVx11VViwYIFjvtfe+010bt3bxEUFCTatGkjrrvuOvHGG2847s/LyxNJSUkiKChI9O7dW6xbt87tAp0QQnz88cfixhtvFGFhYSI0NFT06tVLPPnkk477ExISxLx585zmSUpKEjNnzrzo861YsUK599zFyB6TkJBw0cfpgHvQaerYsWPo3LkzCgsL0bt3b18vhy4BVuOJNMFkJ4wbN87pVBZPazVPfBtPOHnyJGw2m/S+8PBwtG/f/hKviLyByU6kCb6NJ9IEk51IE0x2Ik0w2Yk0wWQn0gSTnUgTTHYiTfw/fEfYk/ofryAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from speechbrain.nnet.attention import RelPosEncXL\n", + "from matplotlib import pyplot as plt\n", + "\n", + "test_pos_encoder = RelPosEncXL(64)\n", + "test_pos = test_pos_encoder.make_pe(seq_len=16)\n", + "print(f\"(batch, seq_len*2-1, emb_size): {tuple(test_pos.shape)}\")\n", + "plt.imshow(test_pos.squeeze(0).T, cmap=\"bwr\")\n", + "plt.xlabel(\"seq_len*2-1\")\n", + "plt.ylabel(\"emb_size\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "311c95ac-5186-4d6c-a4db-4c00acb3e53b", + "metadata": {}, + "source": [ + "In the above figure, the center column corresponds to the position embedding vector for a positional difference of zero, i.e., the key being attended to is the same input as the query.\n", + "\n", + "The horizontal distance from the center represents the distance between a given query and key pair inside the self-attention. \n", + "One column to the right of the center would represent a distance of $1$, etc.\n", + "\n", + "This doesn't depend on the distance of either the key or the query from the start of the sequence. \n", + "At inference time, we only need to make the `seq_len` above ever as large as the attention window is (left context + active chunk).\n", + "\n", + "Note that this embedding is further enriched by being passed into a learnable linear layer first." + ] + }, + { + "cell_type": "markdown", + "id": "449a9836-9cb2-4b19-8398-83c755fcf4e7", + "metadata": {}, + "source": [ + "## Training strategies and Dynamic Chunk Training\n", + "\n", + "### What metrics does the chunk size and left context size impact?\n", + "\n", + "Usually, when streaming, we try to split the input stream in a way that matches the chunk size, and we process chunks one by one as they arrive.\n", + "\n", + "**Smaller chunks degrade accuracy** more, but result in **lower latency**. \n", + "It's a tradeoff depending on the final usecase, and it's worth benchmarking different chunk sizes on whatever test dataset is representative of the final application.\n", + "\n", + "Looking at the data from [`speechbrain/asr-streaming-conformer-librispeech`](https://huggingface.co/speechbrain/asr-streaming-conformer-librispeech), for a left chunk count of 4, and this _specific model and dataset_, we get a curve like this (mind the scale):" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ff57b4d7-adb6-4c70-99b2-2faad41f6d8b", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk0AAAHFCAYAAADv8c1wAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAABhRklEQVR4nO3dB5gU9f3H8S/H0TsoRUB6ERAEqRaKCoiIGqMQE0EjSKTZ/qLBghKiYCwo0aAoIoQiKnYEBKVopKmA9ICAIFU84Ohwd/t/Pj+cze7e3jHIHXvl/Xqe4djZ2dnZ2dnZz/7a5AkEAgEDAABAuuLSvxsAAABCaAIAAPCB0AQAAOADoQkAAMAHQhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGjCafnXv/5lb775pmV3VatWtdtvv91ym88//9yaNm1qRYoUsTx58tgHH3xgOdncuXPd69RfzxNPPOHmRR4P1157bYY/V06gz0nRokV/02O1T4sVK2ZJSUlh85cuXer2VYUKFVI95ssvv3T3jRw5Mmy/pjWFno/atm0bdl/BggWtXr169ve//92OHz9u2YW2vX///jF5bu/zsWfPnkw7nqpWrWrZVXysNwDZLzSdc8452T5wvP/++1a8eHHLTXTFpK5du1rt2rXto48+csGpTp06ltv06tXLrr766gxfb5MmTWzBggXuSxontWvXzqZNm2bffPONtWzZMjhfQUjH386dO23t2rVWt27dsPu8x4Z66qmnUs2TGjVqhN2uXr26TZw40f3/559/ttdff90ee+wx27Jli40ePTrDXyNOj96Le+65x7IrQlMuc/jwYStcuPBZea4TJ064Xyzx8VnvMGvcuLHlNtu3b7eEhAT73e9+Z1deeWWGrPPIkSPu13xkyc3ZdLrHWaVKldyU0c+vEB4aDPC/4KMgFBmarr/+epszZ46bIkOTfpg1aNAgbF21atXytX8LFSoUtlynTp1ckB03bpwrvdLxitipERFysxuq57KYDRs22J///Gd3glC4qVixonXp0sVWrFgRtlxKSoorclZJgU4SJUuWtIYNG9qLL76Yqpj1u+++s5tuuslKlSqV7gG7ceNG+8Mf/mDnnXeeFShQwMqVK+e+XJctW+buV5HqqlWrbN68ecHib6+Y1StC//e//23/93//57Zb69DrkdmzZ7t16YtFr+vSSy91VUW/5bV7zzVp0iR76KGHXBG/qg+07K5du+zAgQPWu3dvd+LVpHUePHgw3eo5b52TJ0+2Rx55xO0DbetVV11l69atS1Vio1+9VapUcSdgVXfNmjXLVQ1o8uPYsWP2t7/9zS644AK3jjJlyrgvmK+//jq4zNGjR23QoEFWrVo1y58/v9sf/fr1s3379qV6LaoGmTFjhivt0PGgL6E33ngj7FjwgoL2Weh7J1999ZV7f1SVon1/ySWXuBKCUKoG0eM+++wzu+OOO+zcc891y+q16HXrS04lLXqstkHrHzt2rHus1qVt0/IXXnih29ZI69evtz/+8Y9WtmxZd+xo37z88stR3/u0jjM/olXPhZZA6nOk90QlFl4VkZ/nj1Y9d6rPVOj7d6rnlsTERHvggQfCjol7773XDh06lOoYVanwRRdd5N4LffZ1DtD2RNJ7oW0qUaKEe3+034cNG5ZqOb3Ga665xn3WKleu7F6/3vv06Pn13KH7ROcuVcHpmGnTpo0LTR5VoekY8qrZMoLCtLZD64787ET7MXX55Zenmp+cnOz29Y033hicN2rUKGvUqJHbH/rc6DP38MMPZ8hn36PjTMvpfdFzffLJJ76quqId416V36nWGY1KA3VMtmjRwnbv3p3mcirZ07lXx4eOd50jdK7X+T+tbfa2NdoUeo7W+6fvPO1nb906t+s5z6asVwSQy6k0QB+i4cOHu4NCJQP6haSDVe0AvOqUf/zjH+5ge/TRR61169bu164O7GgnBX3QdeK+6667Up1cQ+mEqJOD1n3++ee7Om19kL116qSuE69Orjohiw7eUPqSb9Wqlb3yyisWFxfnvgAnTJhgPXr0cL8s9Vry5ctnr776qnXs2NFmzpwZLPXw+9o9OkHpZKMv882bN7svk1tuucWdJHUyUADS47ScTmrRvoQiaVl9yFWkry8oBQyFsTVr1ljevHndMgpV+lLRyUH7duvWra7KR++Bqr5ORe079OtXXxz6wrviiivcvIULF7oqBIUOfendcMMNLlhqn+pE/v3339vjjz/uvlQ0he775cuXuy+xv/71r+6LWdvfs2dPq1mzpjs+tH3aJ9reAQMGuHDiPV4huH379u4Le8yYMW6+3l+9bu3Dbt26hW2/AlPnzp3dyVfHk95PUVWLTmIPPvigC2j//Oc/3bLaP++++67btzp29IWh16YvcIUJWb16tXvdOu6ee+45K1++vDs27r77bncc6nWf6jg7Uwoyej/0udLzq4pH1Qg6WevYOtXz6/Wf7mfqdJ5bpcQKGT/99JPbl3q/9CNm8ODB7oeFvpi8L8q//OUv7nOh/ff000+7z5L2u/axjhUdI6L3+84773Tr1WvR6/jvf/9rK1euDNs+HdvXXXedO6Z0nM2fP9+GDh3q3k89f1q0b3T8adt0jOuzqde6d+9e95zaN6HvrT4DKr2MVg2nsBXZNkr8lDBu2rTJ/bDUeSU9On613xXg9ePNox8KOj/pfnnrrbesb9++7rP07LPPutepUKnj+Ew/+x790FiyZIl73xTMdAyplFg/4hRgfovfsk6dH7SM3sdJkyalW1PRvXt39yP9ySefdOdCHee6/csvv5xWdfl7771nzzzzjNWvXz/43uv7Q/tN5xftpx9//NEdOwrYqv7Vj4OzIoAsLSkpKXD8+PFArVq1Avfdd19w/rXXXhu46KKL0n3s448/HtBbPHjw4FM+z549e9yyL7zwQrrL1a9fP9CmTZtU8+fMmeMe37p167D5hw4dCpQuXTrQpUuXsPnJycmBRo0aBZo3b37ar917rsh13nvvvW7+3XffHTb/hhtucNsQqkqVKoHbbrst1TqvueaasOXefvttN3/BggXudkJCQqBAgQKBbt26hS2n+7VctH0Tafz48W7Z1157Lc1lZsyY4Zb5xz/+ETZ/ypQpbv7o0aPDXkvBggUDP/74Y3DekSNH3Gv+y1/+Epy3adMm99hnnnkmbJ0tW7YMlC1bNnDgwIGwfd+gQYNApUqVAikpKW7e2LFj3eN79OiRanv1unXfN998E5z3yy+/BPLmzRsoVKhQYNu2bcH5y5Ytc8uOHDkyOK9jx47uufbv3x+23v79+7vXpv2e3nGWFm95/Y38XITSPsyTJ4/btlDt27cPFC9e3B3Hp3r+yOfy+5ny+9zDhg0LxMXFBZYsWRK23Lvvvuue59NPPw07Fp977rmw5bZu3ereiwcffNDd1vut9V922WXB9zgafU60Pn0WQumzUqdOncCp6PXr8V9//bW7re2qUKGC+//q1avdfStXrnS3hwwZ4m5rfuR+TWvS6wo9DnWOOnHihJt27Njhzn9a7pVXXjnltuo9y58/f+Dhhx8Om9+1a9dAuXLl3Dq947JkyZKB0+Xnsy9aRs+XmJgYnLdz5073/us4CH1vdPxEinaM+12n99iff/458O9//9vtD51Tdc4+laJFi7rzcHrS2mbPl19+6T7zf/rTn4LH5eTJk902TZ06NWxZfRY0/1//+lfgbKF6LovRrw5V/agOXsXv+hWlv/rlo9IOT/Pmzd0vRv3a0S9ylYqk5fe//33w//rs6DlCJyldurSrulO6f/75510JjdL96Qp9LtGvav3Kve2228KeU+vWrwv96vFKv/y+dk9kbycVOYtKQSLnaxsiq+ii0a/pUPo1L/pVI/pFqOJ1NagOpTYUkcXk+hUd+Zpl+vTprlhepTBp+eKLL9zfyAb3N998s2tAG1m1qeoHlWR4tH790vO2Oy3a94sWLXIliKE9pFSqpl+NKtWIrJ6MfI89qia9+OKLg7d1TKnkQtvmlSiFvk/etqkaUq9Hv2b1KzZ0n6mkRvdrv6e3DZHH9MnviNOjX7UqjQulEjl9tvRr2c8+CHU6nyk/z61qFFWBan+GvlaV2IZWC2o53b711lvDllMJlp7DW06fTa1f55BTVYXpfpU8Rn42TnV8RbZr8v6qlMk7FnSMeFV0uk+lYN4xEkolZjpfRE5eqZlHpW8q/dSkY1KlKioZVOnbqaikW69TJdzee6VSsQ8//NCVlnulWjr/qhRFJdu6z29PMz+f/dD9phJyj16n9pWffZ4R61Rpkc4/KvlXsw+Vpp2K9otKOFWNps+sSihPh87zOgerJEnNC7zjUse0Sgr13oQe0/os6Lg+mz1WCU1ZzP333+96F6j64uOPP3ZfaDox6GSnYmuPTgIqFtaBqeJefdhVzaViykih3Xq96rHQSXRw6otLJ2AV2ar9iYqyVbyvNkJ+RXYhVhsj0Zdy5PPqJKgvNwWa03ntoV9KoRSw0puvL99T0X4M5VVhec/vFTNHnqijzdMXZujr1clbVAevEJHeSUjPoxN0ZHWC3iedJCKLuyO329v2aPstlL4Q9B5E6/rtBZ3I54q2bLT97u37U70fWr9OgKrOizxGFJok8kspdBtUNRv5OFUpnC7t17Tm+d0HoU7nM+XnufVZUhVt5GvVl6DeQ28faTnd1vEYuazOF95yXlsQP43iFWYjG1Dr+PLzmVIbNrUtVDDy2jN5oUlU7aMvPf0YUbVztKo5UfWR2g9GTt45LPRzp/PG4sWL7Z133nHnD1Wnq0rNDwWabdu2uXaKoipqbVvoDxj9oNCXusKGArSCh5oReI9Ji5/P/pl+pjNqnWpWoXZcatrh15QpU9wPZDUPUPW1PvsKm9GqriOp+lM/pHU8qnrOO094x7RCquZFHtNad2YNjxANbZqyGK/9j0pcQumgUNL26AtVIUOTDia1GVA7B52g1YYktN459FekkrpOKNGoYbPaOIjaNbz99tuujYXaVai9gx+Rv1h1shR9IabV88ULG35feyx5Jx0vDIbShze0tEnBL7ShrBdC9MWphtf6Aknr5KnnUZDQSTY0OOnLUM/TrFmzDHk9aqSrbdixY0fUk1joe+jJ6J5y2gavZEsN3aNRw+e0tkH7NfKY/i1DKUQ7sXvzIr9s/O4Dv58pP8+t90HtNkIb+Ify3if91fYpnES2ORRvnndcqTQxM2lbFJLU4FxBRuer0NCk/2ufKDAphKUVmvzyOmeIPidan0ry1IZIpdOnGnNK51AdU+rEoP/rrwJR5FASat+kSaW1auOl9jVav95nve/R+Pnsn+5rjdYYPyNChN4vtWdUe0qF/7ReUygdey+88IKb1EZLQ5uonaUaj0fr/OFRiad+IGm/fPrpp66tXOR69TlIax2hpWeZjZKmLEYnmMgTnRrv6ZdPWhQoVJKjLxyV2uiXd1p04EX+UotGVTtqZK5fiaFVE6f7S0eNqrV9aiAZ7VeiJu8XxW957WebTp7aRv2iCqVf8JFF3Np3oa/TC00qGdSXQ3qDhHqN4xUkQ02dOtWdpDNqyABV9ek16Zdd6Puqk5eeW7/6/DRuPxMK+PpiU/WVqnyiHSPRfiF7dPxELv9bTqKq1lGVdyg1fNW6VEp0ptL6TPl9bn0h//DDD1E/w5q8wK7lFK71uYm2nJ5fVAWiLyeFt99SnXk69P7quFVVpUplQqvfFJpUmqYfVt6yGcnrXKIfOt5zpMcL8Br4VcFTpffpVafpM6TPtDqIKAzrvUyLn8/+6dB7rkAS+iNO26AmG2dKIckL3gpOaiZxOtRcQL311Mkk8ngPpe1V1by+t1R9Ga3kU8e0jhE1eYh2TJ/N8eYoacpidHDoA6VulfoC+fbbb92JJvJAUomR2jfogNGvF31hK93rQA/t9eGXiv11gKvNjB6vLyK1q9F8/VLw6ISrYm6FBhWX65eOdxKORr/qdKJSka0CncKdTpoqQdGXhP6q6+7pvPZYUnGzSvdU3K8SEn3Y9Ut9yJAhrsrGz69HtYPQr1f1ZlR7IX1JKKSoOlJfJioO14lGv3LVe0+/whQ+vd5z6hatk3pG0WvR82k71FNL7716z6kHlaomzsYYTGozcdlll7mTc58+fdyXgaqw1CNJJXZeG6/MpFCr9hQq9dB7qdCo6hZVI/+Wsc38fqb8PrdKShSaVZ113333uc+Ijhv9olfvLvVqUwDWsaKenSoF0Re+ltcXu0oTVcqhz6v2sT6b6qmo3ksaWkO96FTqq32uz+ZLL72UYfvWC0JeD9xQOo8p2Og+VQeldf7Sl3Zk2za/426pBFvtytSkQT8uTzWwrUKS9r3alal0L7IHqfaV5mtf6/1SqaA+Rwqh6ZUC+/nsnw5tl3ov6nEDBw50gUy9hBUuMoJem6q6dS7ScTRr1qxU42d59u/f716P9pnO4Qr8KgFW6VDoUA2RdCzrc6EaBrU7DX2P9d2m6la9PvUoVWmUejeq7ZSq5nTuVbWvetbpXHxWnLUm5/Bl7969gZ49e7reTIULF3Y9W9SbQL1CQntmqQfKJZdcEjjnnHNc74bzzz/fPW7z5s1Re0Gcyq5duwK33357oG7duoEiRYq4XhANGzYMjBgxwvWk8mj9HTp0CBQrVsyt2+sF4fVweeedd6Kuf968eYHOnTu7Hl358uULVKxY0d0OXd7va0/rubzeXZG9i6Lth7R6z0Wu0+txpnV71KPj73//u+vtpX2v/fTJJ5+43oC/+93vAn6od5t69ahnoNZRpkyZwBVXXBHsYeQt89BDD7lt1T5Tj6M+ffq4/RRK92tfRorcb2n1nhPtZz2/3nv1sFKPuo8//tjX/g3ttRQprW3Tevr16xc2T9t3xx13uGNDr/fcc891x7j2tedUx9mZ9J7Tdqonml6H3pOqVasGnn/++ajri/b8kc/l9zPl97nl4MGDgUcffdT1WtNyJUqUCFx44YWud6l6QoV64403Ai1atAi+pzVq1HA9H0N7OIp63en903L63NWrVy/w9NNPB+/X50T3RYq2H9NTvnx5t/xLL72U6j71cNV96jGV1n5Na3rkkUdOeRzKtGnT3PLqoeeHjr20tmncuHGBdu3aud5oeh/OO+8818Pu+++/z5DPfrTPR7Tzlvf+qSe13uPq1au7/ZtW7zk/64x2vty3b1/g0ksvdefvaJ9/OXr0aOCuu+5yx7h6ZWp7dJxqfV4P0Gi957yet9Gm0O1Sz8Vnn33WnWfVu06fJ3221EN4/fr1gbMlj/45O/EMyLk0Dox+XakkyM8Ad4BHpWr69e5nkEEAsUX1HHCaVHWhaiu1CVExv4rZ1TtK/9fgfwCAnInQBJwmtQ9RWxH1ilJPILVj0Ki0Gtck2lAEAICcgeo5AACA7DTkgHoeqJeOeoikRy35Neqwd1HLaOMHqYeJxtRQV0n9Va+MUGqFrwsKqieUehyEUrdHdQ1Ob4RtAACQ+2SJ0KRuiaNHjw5esiK9xrbqcqhuyd6FWDW6rkKSRwOkqRumumSr7Yn+6pIX6tLpDfqlLrbqeqqxLDRCdujV3NUVV2N6nKpLKgAAyF1iXj2ncRk0eJvGhdH1anQtGY03FI3GrNEIo6HXIdN4FwpHCkuiwKRSIg2S5dHQ7BpTR413NSKtxkPxRtvV8hrrSCVOGkxO4w/pWkIAAABZqiG4BhrTBVY1uJpCU3oUjDp06BA2T4NuqUGuLgyowa60jAbLilzGC2IaOO3w4cOupEoDQaqUSwOZaeBFDRLmXTjyVDR0fejw9RqgTOvQIG1nYzBAAABw5lR2pMF0/VwXMKahSSNLa3j1tK6FFkmlQ5G9k3Rb1+hStZs3Mmu0ZbySJZU4qUpOI8TqshH6q1Cl4DRgwABXBaiSKIUwjc4bOXptaBssjQINAACyP1239VSjy8fHcuM0HLqG/4+8enZ6IktxvNrF0PnRlgmdp+HWQ4dc1xW2V6xY4S4bULNmTVeNpyuMa6h2DR2vy35EGjRokLucRugQ8rrWjl4X7aEAAMge1KRHncP8XLMyZqFJ1xXThQbVE86j6+XoatEKL6r60oUTQynIRF4NXOuIj48PXtAzrWXSGj9Hz9O3b193rSddc0mlVt4VuNWLTg3IdZ23SOqZF+0K4gpMhCYAALIXP01rYtZ7TldpV+nOsmXLgpMaZP/pT39y/48MTNKqVSt3wcBQKqnS49SeKb1lNHpzNEOHDnVXnlZjdIU2hSaPqugy6sKHAAAge4tZSZOKwSKvlqyRllVi5M1XFdi2bdts/PjxwZ5yKoVStZiuMq1G32oEruo0j6r8VKWmK1TrysfqCTd79mx3de9Iq1atcr3lFNJE1w5TIzCtUyVWa9euTfeK1QAAIPeIee+59OzYscO2bNkSvF2tWjX79NNPXe+4l19+2bV0HzlypP3+978PLqMSJTUwf/TRR+2xxx6zGjVquGDUokWLVO2cevfubSNGjHBhTQoVKmRvvvmm69GnajsFtIoVK57FVwwAALKqmI/TlJMakukaZGoQTpsmAABy3vd3lhgRHAAAIKsjNAEAAPhAaAIAAPCB0AQAAOADoQkAAMAHQhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGgCAADwgdAEAADgA6EJAADAB0ITAACAD4QmAAAAHwhNAAAAPhCaAAAAfCA0AQAA+EBoAgAA8IHQBAAA4AOhCQAAwAdCEwAAgA+EJgAAAB8ITQAAAD4QmgAAAHwgNAEAAPhAaAIAAPCB0AQAAOADoQkAAMAHQhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGgCAADI6qFp1KhR1rBhQytevLibWrVqZdOnT09z+dtvv93y5MmTaqpfv35wmTfffDPqMkePHg0uM3HiRKtcubKVLl3aBg4cGPYcmzdvttq1a1tiYmImvWoAAJAdxcfyyStVqmTDhw+3mjVrutvjxo2z66+/3pYuXRoWhDwvvviiW96TlJRkjRo1sptvvjlsOQWwdevWhc0rWLCg+7tnzx7r1auXC1fVq1e3zp07W9u2bd1f6dOnj3sOrQMAACBLhKYuXbqE3X7yySdd6dPChQujhqYSJUq4yfPBBx/Y3r177c9//nPYcipZKl++fNTn3Lhxo1tHt27d3O127drZ6tWrXWiaNGmS5c+f32688cYMeoUAACCnyDJtmpKTk+2tt96yQ4cOuWo6P8aMGWNXXXWVValSJWz+wYMH3TyVZF177bWu5MpTq1YtO3z4sJuXkJBgS5YscVWE+v/gwYPtpZde8vXcx44dc1V4oRMAAMi5Yh6aVqxYYUWLFrUCBQrYXXfdZe+//77Vq1fvlI/bsWOHa/+kqrZQdevWdVVvH330kU2ePNlVy1166aW2fv16d3+pUqVcNWCPHj2sefPm7m/Hjh3tgQcesAEDBtimTZuscePG1qBBA3v33XfTfP5hw4YFS740qY0UAADIufIEAoFALDfg+PHjtmXLFtu3b59NnTrVXn/9dZs3b94pg5NCy3PPPWfbt293VWppSUlJsSZNmljr1q1t5MiRUZeZO3euaxCu51X7KoUtVe8pVClslS1bNmpJkyaPSpoUnPbv3097KAAAsgl9f6vww8/3d0zbNIkCj9cQvGnTpq66TA2+X3311TQfo5z3xhtvWPfu3dMNTBIXF2fNmjULljRFUvDp27evTZgwwTZs2OAal7dp08bdp150ixYtStX2SlQypgkAAOQOMa+eixaIQktwolGJkAJOz549fa1v2bJlVqFChaj3Dx061Dp16uRKo9SuSqHJc+LECTcPAAAgpiVNDz/8sAssqtY6cOCAawiuqrIZM2a4+wcNGmTbtm2z8ePHp2oA3qJFC9fuKNKQIUOsZcuWrsG3itxUJafQ9PLLL6dadtWqVTZlyhR3v9ceSiVTWr+q59auXetKqQAAAGIamnbt2uWq2NSoW/WJ6sWmwNS+fXt3v+arvVMo1Tmq7ZOq8KJR26jevXvbzp073TrVqHv+/PmufVJkCZSWGzFihBUpUsTNK1SokGtE3q9fP1fapZ50FStWzLTXDwAAso+YNwTPjQ3JAABA9vv+znJtmgAAALIiQhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGgCAADwgdAEAADgA6EJAADAB0ITAACAD4QmAAAAHwhNAAAAPsT7WQixk5wSsMWbEmz3gaNWtlhBa16ttOWNyxPrzQIAINchNGVhM1busCEfr7Yd+48G51UoUdAe71LPrm5QIabbBgBAbkP1XBYOTH0mfBcWmGTn/qNuvu4HAABnD6Epi1bJqYQpEOU+b57u13IAAODsIDRlQWrDFFnCFEpRSfdrOQAAcHYQmrIgNfrOyOUAAMCZIzRlQeoll5HLAQCAM0doyoI0rIB6yZ1qYIFR836w9bsOnKWtAgAgdyM0ZUEah0nDCkiedJaZ/9+f7eoXv7THPlhpCYeOn9VtBAAgtyE0ZVEah2nUrU2sfInwKjiVQL1yaxObfX8b61CvnOtB9++FP1qbZ+bY6Pk/2LGk5JhtMwAAOVmeQCBAv/UMkJiYaCVKlLD9+/db8eLFz9qI4F//sMf+/skaW70j0d2uUqawDepU1zrWL2958jByOAAAGfX9TWjK4qHJb7Ca+t1P9szMdfbzgWNuXotqpe2xa+tZg4olzuq2AACQnRCacllo8hw6lmSvzPvBRs/faMeSUkwFTTc2rmQPXl3HyhWnpx0AAJEITbk0NHm27Ttiz8xYax8s2+5uF8qX1+5qU8N6t65uhfLnjem2AQCQlRCacnlo8izdsteGfrLavtuyL9iIXKVO1zeqaHEh7aIAAMitEglNZ19WDE2it/eT73fY8OlrXQmUNKpUwh69tp41q1o61psHAEBMEZpiIKuGJs/RE8n2xn822b/m/GAHjyW5eZ0vrGB/7VTXKpcuHOvNAwAgJghNMZDVQ5NHveuen7XOpizZaikBs/x54+yOy6pZv3Y1rFjBfLHePAAAzipCUwxkl9DkWbMj0f4+bbX9Z8Mv7naZIvnt/g61rVvTyhaflzFPAQC5QyKh6ezLbqFJ9NZ/sXa3PTltjW3cc8jNq1OumD3S+QJrXfvcWG8eAACZjtAUA9kxNHlOJKfYhIU/2guz19v+IyfcvHZ1znXhqWbZYrHePAAAMg2hKQayc2jy7Dt83EZ+vsHGL9hsSSkBd7mWW1ucb/deVdtKFckf680DACDDEZpiICeEJs/Gnw/aU5+utdlrdrnbxQvG291X1rIerapa/njaOwEAcuf3d0y/AUeNGmUNGzZ0G6mpVatWNn369DSXnzt3rrsIbeS0du3asOWmTp1q9erVswIFCri/77//ftj9EydOtMqVK1vp0qVt4MCBYfdt3rzZateu7XZiblX93KL2+m1NbWKvFla3fDFLPJpkf5+2xjqMmGczV+10baEAAMhtYhqaKlWqZMOHD7dvvvnGTVdccYVdf/31tmrVqnQft27dOtuxY0dwqlWrVvC+BQsWWLdu3ax79+62fPly97dr1662aNEid/+ePXusV69e9uyzz9rMmTNt3LhxNm3atODj+/Tp47Ypu5cWZYRLa55j0+6+3J7+/YV2TtECtvmXw/aXf39rt7y20FZt3x/rzQMA4KzKctVzKv155plnrGfPnlFLmtq1a2d79+61kiVLRn28ApNKiUJLrK6++morVaqUTZ482RYvXmzXXXed7dy5M7h806ZNXYnTpEmTbMqUKfbhhx/m6uq5aDQg5qi5G+y1LzfZ8V8vBnzzxZXsgQ51rCwXAwYAZFPZpnouVHJysr311lt26NAhV02XnsaNG1uFChXsyiuvtDlz5oTdp5KmDh06hM3r2LGjff311+7/KpU6fPiwLV261BISEmzJkiWuilD/Hzx4sL300kuZ8Oqyv6IF4m1gx7r2xf+1sS6NzjNF7be/+cnaPjvX/vn5ejfiOAAAOVnMQ9OKFSusaNGirv3RXXfd5dofqR1SNApKo0ePdm2W3nvvPatTp44LTvPnzw8uoxKkcuXKhT1Ot72SJZU4qUquR48e1rx5c/dXoeqBBx6wAQMG2KZNm1woa9Cggb377rtpbvexY8dcOg2dcoNKpQrbP29pbFP7XGIXVS5ph48n23Oz/mtXPDvXPly2jfZOAIAcK+bVc8ePH7ctW7bYvn37XBh6/fXXbd68eWkGp0hdunRxjcE/+ugjdzt//vwuFN1yyy1hDb9V3Xf06NGo61C1n6rn9Lw1a9Z01Xjly5d3oWr9+vVWtmzZVI954oknbMiQIanm59TquWh06Hy0fLs9PX2tbd9/ct8qSD127QV2cRUuBgwAyPqyVfWcQo6CitoVDRs2zBo1amQvvvii78e3bNnSBRuPwo5XquTZvXt3qtKn0BKjvn372quvvmobNmywpKQka9OmjSvFUi86rwF5pEGDBrkd7E1bt2613EZh9fqLKtoXD7S1gR3rWJH8eW3Z1n32+1ELrP+k7+ynvYdjvYkAAGSYmIemaKUXCjJ+qW2Squ08ag81a9assGU+++wzu+SSS6I+fujQodapUydr0qSJa1el0OQ5ceKEmxeNqhO9oRK8KbcqmC+v9WtX0+YMbOuuXadG4p98v8OueG6e/WPGWjtw9OQo4wAAZGfxsXzyhx9+2AUWjZl04MAB1xBcVWUzZswIluZs27bNxo8f726/8MILVrVqVatfv76r1pswYYKr0tPkueeee6x169b29NNPu+EL1BNu9uzZ9tVXX6V6fg1toN5yy5Ytc7fr1q1rcXFxNmbMGFdipfGfmjVrdtb2R3ZXtlhBe/qmhtbjkir290/W2IKNv9i/5v5gb3+z1f6vQx3r2rSyG2UcAIDsKKahadeuXW4cJY21pPpE9WJTYGrfvr27X/PV3smjoKQG2wpShQoVcuFJYyxdc801wWVUoqTw9eijj9pjjz1mNWrUcMGoRYsWqUq0evfubSNGjLAiRYq4eVrnm2++af369XOlXepJV7FixbO2P3KK+ueVsEl3trBZq3fZU5+uceM7DXpvhY37erM9dm09N/4TAADZTcwbgucUOX2cpt9KYzr9e+GP9uLs/7qRxeWqC8raoGsusBrnFo315gEAcrlErj139hGa0rf30HF78fP1LkAlpwQsXhcDblnF7r2qlpUszMWAAQCxQWiKAUKTPxt262LAa+yLtbvd7RKF8tk9V9ay7q2qWL68Wa5fAgAgh0skNJ19hKbT8+X6n+3JaWts7c4D7nb1c4q4KjtV3WkoAwAAzgZCUwwQmk6fqummLNlqz89aZ3sOHnfzLqlRxh7tXM/qncc+BABkPkJTDBCafjuN46ShCcZ89b+LAWu8p/s71HbDGAAAkFkITTFAaDpzWxMO2/AZa23a9zvcbY0w3rddTet5WTU3gCYAABmN0BQDhKaM883mBBv6yWpb/tN+d7tiyUL2UKe61qVhBdo7AQAyFKEpBghNGSsl5deLAc9Yazt+vRhw4/N1MeB61uT8UrHePABADkFoigFCU+Y4cjzZXvtyo42a+4MdOXHyOoDXNTrPlTypBAoAgDNBaIoBQlPm2pV41J6Zuc6mfveT6YgtEB9nd15e3fq0rWFFCsQHe+Mt3pRguw8cdQ3Im1crzbXuAADpIjTFAKHp7Fi5bb9r77RoU4K7fW6xAvZAh9pWrEC8DZ22JliVJxVKFLTHu9SzqxtUiOEWAwCyMkJTDBCazh4dsjNX7bJh09fYj78cTnM5r4xp1K1NCE4AgDP+/ua6Fch21IPu6gbl7bP7WtugTnWD4SiS92tgyMerXdUdAABngtCEbKtAfF5rWKlkMBxFo/tUZffB0m2uRx4AAL/VyRa0QDalRt9+/N87y23whyut/nklrH7F4tbgvBJ2YaUS7pp38VwoGADgA6EJ2Zrfy6zkzxtnh44n2+LNCW7yFMwXZ/UqFLcGFUu4IKW/tcoVtXwEKQBABEITsjUNK6Becjv3H41aTaf2TuVLFLS5D7S1HxMO24qf9tvK7ftt1bZEW7V9vwtS323Z5yZP/vg4q1u+WDBIXVixhNUuX9RVBwIAci96z2UQes/FzoyVO6zPhO/c/wOn0XtObZw2/XLIDWNwckp0gerA0aRUy8bH5bHa5Yq5ANWgYnGrX7GEK6HimngAkL0x5EAMEJpiH5zUS+5Mx2nSx2FLwmEXoFZs2+9Ko/R33+ETqZbVwJk1zy3q2kidDFMng5Q32CYAIOsjNMUAoSn2MmtEcH1Etu07crIkSiVS20+WTO05eDzVsrqesBqXK0ApSHkNz4sXzHfG2wEAyHiEphggNOUu+tjsSjwWFqIUqnYmRu/NV7VMYVel57WRqn9ecStVJP9Z324AQDhCUwwQmiA/Hzj2a0Pzk9V6ClIqpYqmUqlCv/bY+7X3XsUSdk7RAmd9mwEgN0skNJ19hCakZe+h47+WRiUGS6XSuvxL+eIFfw1Q/xtLqmyxAm4UdABAxiM0xQChCadj/5ETtnr7/9pIqVRq055DFu3TqNInhSivjZSC1HklChKkACADEJpigNCEM3XwWJKt2ZEYNpbU+t0HLNrVX0oVzhes0vOq+M4vXdh3kMqsRvMAkN0QmmKA0ITMcOR4sq3Z+WuJ1K9tpP6764AlRUlSxQrGB6v01NBcgapamSIWFxGGMmp4BgDICQhNMUBowtly9ESyC06hY0mt3XHAjienpFq2SP68wWEPVL237/Bx+9sna1Itd6qBQAEgpyI0xQChCbF0PCnFVeWpSs/12tu+31X1HT2ROkilxbvkzFcPXUFVHYBcI/E0vr8ZuhjIAXS9PFeidF4J69qsspuXlJxiP/z862Vitu+3rzf8Yut2HUhzHfr1pCq7D5ZusxubVKShOQBEoKQpg1DShKzuw2Xb7J63lvla9txiBVzj8JbVSluL6mWsVtmihCgAORIlTQBSUS85P/LlzeMG6Zz2/Q43Seki+a15VQWo0taiWhmrW75YqgbmAJDTEZqAXEIlR+olt3P/UVcVl1abps/vb+PaRS3alGCLNv1i3/641xIOHbcZq3a6SUoUymfNqpa2lr+GqHrnFacdFIAcj+q5DEL1HLIDDTfQZ8J37v8Bn73n1Mh8xbZ9tnCjQlSCfbs5wQ4dTw5bpliBeGtatZSrymtRrbQb7iBf3rhMfz0AcKboPRcDhCZkF2c6TpMamK/cnmiLNv7iQtSSTQl24FhS2DKF8+e1i6uUspbVy7gSroaVSliB+LyZ8noA4EwQmmKA0ITsJCNHBNe6NLzBwl9DlNary8SEKhAfZ03OV0nUyeq8xueXtIL5CFEAstf3d0zLz0eNGmUNGzZ0G6mpVatWNn369DSXf++996x9+/Z27rnnBpefOXNm2DJvvvmm6+UTOR09+r9f1RMnTrTKlStb6dKlbeDAgWGP37x5s9WuXdvtRCCnUkBqVaOMXX9RRff3TNoj6bGqjut1eXV7rUdTW/pYe5t+z+X2RJd61qlBeStTJL8dS0qxBRt/sRdmr7dbXltoDZ/4zLq+ssCe+2ydfbV+jx0+Hl5SBQBZUUwbgleqVMmGDx9uNWvWdLfHjRtn119/vS1dutTq16+favn58+e70PTUU09ZyZIlbezYsdalSxdbtGiRNW7cOLicAtW6devCHluw4MmeQ3v27LFevXq5cFW9enXr3LmztW3b1v2VPn36uG2itAj4bdSr7oIKxd10+6XVTIXZG3Yf/LVheYKr1tt94Jgt3pzgpn/aBouPy+Oq8Lw2UU2rlraiBeinAiBryXLVcyr9eeaZZ6xnz56+lle46tatmw0ePNjdVhi69957bd++fVGXX7x4sV133XW2c+fJXkB6bNOmTV2J06RJk2zKlCn24YcfnvZ2Uz0H+KNTzuZfDgfbROnv9pD2VcHSq/OKh4Uo9dgDgIyWLcdpSk5OtnfeeccOHTrkqt38SElJsQMHDrigFergwYNWpUoVt86LLrrIhg4dGiyJqlWrlh0+fNiVZmmZJUuW2B133GEJCQkueM2ZMydTXh+Ak1RdXu2cIm76Q/PzXYj6ae+RYJsoDXOwNeGILf9pv5tGz99oGlfzgvLFg22iFKRKFckf65cCIJeJeWhasWKFC0lqc1S0aFF7//33rV69er4e+9xzz7mQ1bVr1+C8unXrutKmCy+80KXHF1980S699FJbvny5C0ylSpVy1YA9evSwI0eOuL8dO3Z0wWnAgAG2adMmVxJ14sQJe+KJJ+ymm26K+tzHjh1zk4c2UMBvD1GVSxd2081NT14CZvu+Iy48Lfp1mINNew7Z6h2Jbhr7n81umTrligVDlBqyaxRzAMjR1XPHjx+3LVu2uOq0qVOn2uuvv27z5s07ZXCaPHmya5ukqrSrrroq3dKoJk2aWOvWrW3kyJFRl5k7d66rntPzqn2V1l2+fHlr3ry5rV+/3sqWLZvqMQpUQ4YMSTWf6jkg4+1KPBqsytNftZGKVOPcIsHqPA11UK64vxHQAeRuidl5yAEFoBo1atirr76a5jJqd/TnP//ZVed5DbjTc+edd9pPP/0UtWeeSotUdTdhwgSLj493z7979253X7NmzVyVnRqb+ylpUo88QhOQ+fYcPOaGNvBC1NqdqS9EXLVM4ZNVeSqNql7GKpYsFJNtBZC1Zcs2TR5luNAwEkmlQKpK018/gUnrW7Zsmauui0btnTp16uRKo9TOKSnpf12fVUWndlHRFChQwE0Azr5zihaway6s4CbZd/j4yRD1a5uo1dsTXWNzTVO+2eqWqVSqUDBEtaxWxiqXLsRFiAGclpiGpocfftgFFpXQqEH3W2+95arKZsyY4e4fNGiQbdu2zcaPH+9uKyipDZLaKbVs2TLYA65QoUIuJYqqzHSf2i8pPapKTqHp5ZdfTvX8q1atcqVWut9rDxUXF2djxoxx1XNr1651pU0AsraShfNbh/rl3SSJR0/YN5tVEpVgCzcl2Mpt+11j85/2/mRTv/spOAq62kJ5Qar6OUUIUQCybmjatWuXde/e3Xbs2OFCjwa6VGDSWEyi+Wrv5FGVnUqC+vXr5ybPbbfd5hp/i9pG9e7d2wUqrVNVbxrfSe2TIkugtNyIESOsSJEiwfCl9WjdKu166aWXrGLFimdpbwDIKMUL5rMr6pZzkxw8luQuPOxV533/0z53GZkPl213k6ghuUJUSwWp6mWsVtmihCgAWbtNU3bFOE1A9nHkeLJ9t+VkiFJJ1LKt+9yFiUOVLpLfmldVgDpZGlW3fDE3cCeAnCVbNwTPrghNQPZ19ESyLd+6L9gmSqVSR0+EhygNrtmsqnrmnQxR9c4rfkaXnwGQNRCaYoDQBOQcKnVasW2fLfx1nKhvNyfYoePhnUKKFYi3plVLBYc50PX38uWN6eU8AfwGhKYYIDQBOVdScoqt3J4YbBO1ZFOCHTgWfpHhwvnz2sVVSrkxotQ2StfSKxCfN2bbDMAfQlMMEJqA3CM5JWBrdiQGL/2i4Q72HzkRtkyB+Dhrcr5Kok5W5zU+v6QVzEeIArIaQlMMEJqA3CslJWDrdh0IlkQpRP1y6HjYMvnzxtlFlUsGQ1STKiWtcP4sN1QekOskEprOPkITAI9Oq7rUy8mG5SdHLt99IHzQ3vi4PK4Kz2sT1bRqaStagBAFnG2EphggNAFIi06zGp3cK4nS3+37j4Yto554Dc4rHhai1GMPQOYiNMUAoQmAXzrtaoRyr02UhjnYmnAkbBmNq1mvQvHgiOUaM6pUkfwx22YgpyI0xQChCcCZ2L7viAtPuvSLgtSmPYdSLVOnXLFgmyj10NMo5gDODKEpBghNADLSrsSjwao8/VUbqUg1zi0SrM7TUAfliheMybYC2RmhKQYITQAy056Dx1yvPC9Erd15INUyVcsUDlbnKUxVLFkoJtsKZCeZGpp0IdvFixfb5s2b7fDhw3buuee6i+JWq1bNcjNCE4Czad/h4ydD1K9tolZvT7SUiLN5pVKFgiGqZbUyVrl0IS5CDJyN0PT111/bP//5T/vggw/s+PHjVrJkSStUqJAlJCS4IFW9enXr3bu33XXXXVasWDHLbQhNAGIp8egJ+2azSqIS3EWIV27b7wbhDFWhREHXFsoLUtXPKUKIQq6XmNGh6frrr7clS5bYH//4R7vuuuusadOmVrhw4eD9GzdutC+//NImT55sy5cvt/Hjx1v79u0tNyE0AchKDh5Lchce9qrzvv9pn51IDj/dqyG5QlRLBanqZaxW2aKEKOQ6iRkdml5++WW78847LX/+U3d3XbVqlW3fvp3QBABZyJHjyfbdlpMhSiVRy7bucxcmDlW6SH43tIHXQ69u+WIWF0eIQs6WSEPws4/QBCA7OXoi2ZZv3RdsE6VSqaMnwkOUBtdsVlU9806GqHrnFXeDcKZHVYJqa7X7wFErW+xkdeCpHgPkitC0cuVKmzdvniUnJ9sll1ziqu1yK0ITgOxMpU4rtu2zhb+OE/Xt5gQ7dDw5bJliBeKtadVSwWEOGlQsYfnyxgXvn7Fyhw35eLXtCBntXO2oHu9Sz65uUOGsvh7kLMmZGMbPSmhSld3f/vY3a9OmjZ04ccK++OILe/DBB+2RRx6x3IjQBCAnSUpOsZXbE4NtopZsSrADx5LClimcP69dXKWUGyNKXyXPfvbfVOvxvtZG3dqE4ITfJLPDeKaEpp9++skqVaoUvH3BBRe4xt/nnHOOu71gwQLXSPznn3+23IjQBCCn/9JfsyMxeOkX/erff+SE78eXKBRvD19zgSuZUglBXJ48YX9VYJU3Ls7y5sljcfq/d7/uCy4T8Zhfl42Pi0v/MW45qgiza2DqM+E7C2RiGD+d72/fl9S+8sorrW/fvnb33Xe73hVlypSxmTNn2k033eSGIJg9e7YbswkAkPO4CwpXLOGmXpdXt5SUgK3bdcCVRH26cqcLUenZfyTJHpq6wmIpVdDK8+u8iBAXn/d/QSv4NySUhYe98P+HBzdzf+OjPEdoCAx9Lj3PqYPiyXWGPSatdYY8Nj4ipJ76NYdvR1weXRMxz1kN6iphilayo3naEt3fvl75s9Zuzndo0pADDz30kLVo0cJeffVVGz16tHXv3t1N2okqeRo3blzmbi0AIEvQl+oFFYq7SRcSPlVokgsqFLNzihawlEDAfSGmpJglBwKW5P7/67xf79N8zQve5+ZbyGNP3uctp7+nqjdx69XXrWuqFd7oHf7E/Ro0XdDygluq4BcetE4dFP8XMl1poyt1zGN7D50Iq5KLpLdb9+vYa1WjTNYKTSqyGjVqlP3nP/+x22+/3a666ipXPadG4Jo02CUAIPdRw1w/Bl9bP1O/3AIhgcv9DQlmYYHMm0ICl7dsUkrKr8tZqhCXnBzlMQp9yd5yJ58rNAC6YBcS/EIDYHJKSlgQDH1s6Ov433afXDY0ZIauMxgkQx6j8U3Tf80R++sUwTMlYJbixvsK2DHLGtQ4/GzxHZo8l156qX3zzTc2bNgwd/mU559/3jp37pw5WwcAyPLUk0kNc3fuPxq1KkUVJ+V/HY08M+X5tZrptL/YEBY8vaDlBa/Q0r6UiJD1v+WiPCZKMEsvZIaHt4D9sPugjVvwY4aF9ozg+9hKSkqy1157zVavXm2NGjVyveT+8Ic/2F/+8hd788033SVWypcvn7lbCwDIclSVop5MarCrgBQanLyWJrqf8ZqyPgXPk9VksX+vklMC9tnqXTEP46H+N8DGKWhEcAWjIkWK2NixY+2+++6z2rVr25w5c6xjx47WqlUrV30HAMh91INJPZn0JRZKtxluAGcSxiUywsUqjPsecqBUqVLuor1q8H3kyBFr0KCB/fDDD8H7d+/ebffee69NmjTJciOGHAAARgRHzh6nyXf1XNmyZe2zzz6zGjVq2Oeff+6GHIi8P7cGJgDASQpIZ6snE3KHqxtUcMMKZIUw7js0vfTSS3brrbfa/fffbxUqVLC33347c7cMAADAsk4Y9x2a2rdvbzt37rQ9e/YwiCUAAMh1fDcE91rVE5gAAEBu5Cs0XX311a4R+KkcOHDAnn76aXcxXwAAgJzEV/XczTffbF27drVixYq5i/I2bdrUzjvvPCtYsKDt3bvXjd301Vdf2aeffmrXXnutPfPMM5m/5QAAAGeR7yEHdFHed99916ZMmeIun7Jv376TK8iTx+rVq+fGatJYTnXq1LHciCEHAADI2d/fvkNTJK1c4zVp6IF8+fJZbkdoAgAg+8mUcZoi6Qk0AQAA5Aan1XsOAAAgt4ppaNK16ho2bOiKwzTp+nXTp09P9zHz5s2ziy++2DVCr169ur3yyiuplpk6daprZ1WgQAH39/333w+7f+LEiVa5cmUrXbq0DRw4MOy+zZs3u2vqqbgOAAAgS4SmSpUq2fDhw+2bb75x0xVXXGHXX3+9rVq1KurymzZtsmuuucYuv/xyW7p0qT388MN29913u5DkWbBggXXr1s26d+9uy5cvd3/V82/RokXufg3O2atXL3v22Wdt5syZNm7cOJs2bVrw8X369HHbRLskAACQIQ3BM4tKfzRkQc+ePVPd99BDD9lHH31ka9asCc676667XDhSWBIFJpUShZZYaZwpXXB48uTJtnjxYjdsgkY395bXEAoqcdK189Q78MMPPzzt7aYhOAAA2c/pfH9nWEnTjh07rH///r/58cnJyfbWW2/ZoUOHXDVdNApGHTp0CJunoQ5USnXixIl0l/EG56xVq5YdPnzYlVQlJCTYkiVLXBWh/j948GB3jT0/jh075nZ06AQAAHKu0wpNGsRSo32PHj06OE6Tqrvuu+8+177oiy++OO0NWLFihRUtWtS1P1KpkdofqR1SNCodKleuXNg83U5KSnLbkd4yXsmSSpxUJdejRw9r3ry5+6tQ9cADD9iAAQNcFWDjxo2tQYMGblyqtAwbNizYg1CT2kgBAICcy/eQA5988on9/ve/D5bo/OMf/7DXXnvNtRdSwHjnnXfcaOCnS4NhLlu2zIUwtU267bbbXGPvtIKTBtMM5dUuhs6PtkzovN/97ndu8sydO9eFN5Uy1axZ01XjlS9f3oWq1q1bW9myZVNtx6BBg+z+++8P3lZJE8EJAICcy3dJ05NPPulKghQO1Ih648aN7raCzpw5c35TYJL8+fO7oKJ2RSq9adSokb344otRl1WQ8UqMPLt377b4+Hg3yGZ6y0SWPoVWs/Xt29deffVV27Bhgyu1atOmjQtz6kXnNSCPpJIxr9efNwEAgJzLd2hS4+t+/fq5qjT1WIuLi7MXXnjBlcRkJJUKKchEo7ZOs2bNCpv32WefucDljUqe1jKXXHJJ1HUOHTrUOnXqZE2aNHHtqhSaPCpV0zwAAADf1XMqYSpZsuTJB8XHW6FChVxJzJnQkAEKLKrWOnDggGsIrqqyGTNmBKvAtm3bZuPHj3e3VbKlKjRVi+k6d2r0PWbMGFed5rnnnntckHv66afd8AXqCTd79mx3QeFIGtpAveVUPSh169Z1YVDrVInV2rVrrVmzZmf0GgEAQM4Qf7oNwb2qL5UIrVu3zvV2C6WeaH7t2rXLjaOknndqTK3HKjC1b9/e3a/5W7ZsCS5frVo1+/TTT13DczVIP++882zkyJGurZVHJUoKX48++qg99thjVqNGDReMWrRoEfbc2v7evXvbiBEjrEiRIm6eguCbb77pStRU2qWAVrFixdPZRQAAILeP06QSGDWmjra4N19/c2t1FuM0AQCQ/WTKBXvVFR8AACC38h2aqlSpkrlbAgAAkBN6z2lcpiNHjgRvz58/P6yXmxpyq+s+AABArm7TlDdvXtcw2xvoUfV+6nWmkcC9Rt1qmE2bJto0AQCQq689F5mtsth1fgEAADJVhl2wFwAAICcjNAEAAGT04Javv/66u4yK6HIjGgjynHPOCTYEBwAAsNzeELxq1apu8MpTya3jOdEQHACA7CdTBrfcvHlzRmwbAABAzm7TtGHDhszdEgAAgJwQmmrXrm2VK1e2Hj162NixYyl5AgAAuYrv6rl58+a5ae7cuda/f387evSonX/++XbFFVdYu3bt3FSxYsXM3VoAAICs3hA81IkTJ2zBggUuQGlauHChu6RKzZo1bd26dZYb0RAcAICc/f39m0KTR9ei++qrr2zmzJn22muv2cGDB7mMCqEJAIDc3XtOVCX39ddf25w5c1wJ05IlS6xatWrWpk0bGzVqlPsLAACQE/kOTQpECkk1atSw1q1b24ABA9y8cuXKZe4WAgAAZKfQpBKmChUquAbfbdu2dcHJGw0cAAAgp/M95MC+ffts9OjRVrhwYXv66addT7kLL7zQ9aR799137eeff87cLQUAAIih39wQXNeaUyNwr33T8uXLrVatWrZy5UrLjWgIDgBAzv7+9l3SFKlIkSJWunRpN5UqVcri4+NtzZo1v3V1AAAAOaNNU0pKin3zzTeuVEmlS//5z3/s0KFDrppO7Zxefvll9xcAACBXh6aSJUu6kKTG4GoI/vzzz7uQpN50AAAAOZ3v0PTMM8+4kKRr0AEAAOQ2vkPTX/7yl8zdEgAAgCzsNzcEBwAAyE0ITQAAAD4QmgAAAHwgNAEAAPhAaAIAAPCB0AQAAOADoQkAAMAHQhMAAIAPhCYAAAAfCE0AAABZPTQNGzbMmjVrZsWKFbOyZcvaDTfcYOvWrUv3MbfffrvlyZMn1VS/fv3gMm+++WbUZY4ePRpcZuLEiVa5cmUrXbq0DRw4MOw5Nm/e7K6xl5iYmAmvGgAAZEcxDU3z5s2zfv362cKFC23WrFmWlJRkHTp0sEOHDqX5mBdffNF27NgRnLZu3eqCz8033xy2XPHixcOW01SwYEF33549e6xXr1727LPP2syZM23cuHE2bdq04GP79Oljw4cPd+sAAAA4rQv2ZoYZM2aE3R47dqwrcfr222+tdevWUR9TokQJN3k++OAD27t3r/35z38OW04lS+XLl4+6jo0bN7p1dOvWzd1u166drV692jp37myTJk2y/Pnz24033pgBrxAAAOQUWapN0/79+91flRz5NWbMGLvqqqusSpUqYfMPHjzo5lWqVMmuvfZaW7p0afC+WrVq2eHDh928hIQEW7JkiTVs2ND9f/DgwfbSSy+d8nmPHTvmqu9CJwAAkHNlmdAUCATs/vvvt8suu8waNGjg6zGqcps+fbqragtVt25d167po48+ssmTJ7tquUsvvdTWr1/v7i9VqpSrkuvRo4c1b97c/e3YsaM98MADNmDAANu0aZM1btzYbce7776bZnssr9RLk9pHAQCAnCtPQGklC1DbJrUr+uqrr1zpkB8KLs8995xt377dVamlJSUlxZo0aeKq/EaOHBl1mblz57oG4WpnVbNmTRe2VL2nUKWwpWrDyJImTR6VNCk4qbSMtlAAAGQP+v5W4Yef7++YtmnyqHRHpULz58/3HZiU9d544w3r3r17uoFJ4uLiXC89r6QpksJP3759bcKECbZhwwbXIL1NmzbuPvWiW7RokXXp0iXsMQUKFHATAADIHWJaPafg079/f3vvvffsiy++sGrVqvl+rEqEFHB69uzp63mWLVtmFSpUiHr/0KFDrVOnTq40Kjk52YUmz4kTJ9w8AACQu8XHukpOvdU+/PBDN1bTzp073XwVkxUqVMj9f9CgQbZt2zYbP358qgbgLVq0iNr+aciQIdayZUvX4FvFbqqSU2h6+eWXUy27atUqmzJlirvfaw+lkimtX9Vza9eudaVUAAAgd4tpaBo1apT727Zt21RDD2gQS6+x95YtW8LuV73j1KlT3ZhN0ezbt8969+7tQpgCmBp1q+pP7ZMiS6C03IgRI6xIkSJunsKaGpEr0KnaTj3pKlasmKGvGwAAZD9ZpiF4bmpIBgAAst/3d5YZcgAAACArIzQBAAD4QGgCAADwgdAEAADgA6EJAADAB0ITAACAD4QmAAAAHwhNAAAAPhCaAAAAfCA0AQAA+EBoAgAA8IHQBAAA4AOhCQAAwAdCEwAAgA+EJgAAAB8ITQAAAD4QmgAAAHwgNAEAAPhAaAIAAPCB0AQAAOADoQkAAMAHQhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGgCAADwgdAEAADgA6EJAADAB0ITAACAD4QmAAAAHwhNAAAAPhCaAAAAfCA0AQAA+EBoAgAAyOqhadiwYdasWTMrVqyYlS1b1m644QZbt25duo+ZO3eu5cmTJ9W0du3asOWmTp1q9erVswIFCri/77//ftj9EydOtMqVK1vp0qVt4MCBYfdt3rzZateubYmJiRn4agEAQHYW09A0b94869evny1cuNBmzZplSUlJ1qFDBzt06NApH6twtWPHjuBUq1at4H0LFiywbt26Wffu3W358uXub9euXW3RokXu/j179livXr3s2WeftZkzZ9q4ceNs2rRpwcf36dPHhg8fbsWLF8+kVw4AALKbPIFAIGBZxM8//+xKnBSmWrdunWZJU7t27Wzv3r1WsmTJqMsoMKmUaPr06cF5V199tZUqVcomT55sixcvtuuuu8527twZXL5p06auxGnSpEk2ZcoU+/DDD09r2/V8JUqUsP379xO2AADIJk7n+ztLtWnSBouqzE6lcePGVqFCBbvyyittzpw5YfeppEklVqE6duxoX3/9tfu/SqUOHz5sS5cutYSEBFuyZIk1bNjQ/X/w4MH20ksvnfL5jx075nZ06AQAAHKuLBOaVOB1//3322WXXWYNGjRIczkFpdGjR7s2S++9957VqVPHBaf58+cHl1EJUrly5cIep9teyZJKnFQl16NHD2vevLn7q1D1wAMP2IABA2zTpk0ulGk73n333TTbYymZepPaRwEAgJwry1TPqW2T2hV99dVXVqlSpdN6bJcuXVxj8I8++sjdzp8/vwtFt9xyS1jD7549e9rRo0fTrPZT9ZyqBmvWrOmq8cqXL+9C1fr16121YWRJkyaPSpoUnKieAwAg+8h21XMq3VHgUTXb6QYmadmypQs2HoUdr1TJs3v37lSlTx6Fn759+9qrr75qGzZscA3S27Rp40qx1IvOa0AeSr3ytHNDJwAAkHPFNDSpkKt///6umu2LL76watWq/ab1qG2Squ08rVq1cr3xQn322Wd2ySWXRH380KFDrVOnTtakSRNLTk52oclz4sQJNw8AAORu8bGuklNvNfVU01hNXumQiskKFSrk/j9o0CDbtm2bjR8/3t1+4YUXrGrVqla/fn07fvy4TZgwwbVv0uS55557XO+7p59+2q6//nq3/tmzZ7uqv0irVq1yveWWLVvmbtetW9fi4uJszJgxrsRK4z9pLCkAAJC7xTQ0jRo1yv1t27Zt2PyxY8fa7bff7v6vMZi2bNkSvE9BSQ22FaQUrBSe1BbqmmuuCS6jEqW33nrLHn30UXvsscesRo0aLhi1aNEiVUlX7969bcSIEVakSBE3T+t88803XaBTtZ160lWsWDFT9wMAAMj6skxD8OyOcZoAAMh+sl1DcAAAgKyO0AQAAOADoQkAAMAHQhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGgCAADwgdAEAADgA6EJAADAB0ITAACAD4QmAAAAHwhNAAAAPhCaAAAAfCA0AQAA+EBoAgAA8IHQBAAA4AOhCQAAwAdCEwAAgA+EJgAAAB8ITQAAAD4QmgAAAHwgNAEAAPhAaAIAAPCB0AQAAOADoQkAAMAHQhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGgCAADwgdAEAADgA6EJAAAgq4emYcOGWbNmzaxYsWJWtmxZu+GGG2zdunXpPua9996z9u3b27nnnmvFixe3Vq1a2cyZM8OWefPNNy1PnjyppqNHjwaXmThxolWuXNlKly5tAwcODHv85s2brXbt2paYmJjBrxgAAGRXMQ1N8+bNs379+tnChQtt1qxZlpSUZB06dLBDhw6l+Zj58+e70PTpp5/at99+a+3atbMuXbrY0qVLw5ZToNqxY0fYVLBgQXffnj17rFevXvbss8+6wDVu3DibNm1a8LF9+vSx4cOHu3UAAABIfCx3w4wZM8Jujx071pU4KQy1bt066mNeeOGFsNtPPfWUffjhh/bxxx9b48aNg/NVslS+fPmo69i4caOVKFHCunXr5m4reK1evdo6d+5skyZNsvz589uNN96YAa8QAADkFFmqTdP+/fvdX1WZ+ZWSkmIHDhxI9ZiDBw9alSpVrFKlSnbttdeGlUTVqlXLDh8+7OYlJCTYkiVLrGHDhu7/gwcPtpdeeikDXxUAAMgJskxoCgQCdv/999tll11mDRo08P245557zlXnde3aNTivbt26rl3TRx99ZJMnT3bVcpdeeqmtX7/e3V+qVClXJdejRw9r3ry5+9uxY0d74IEHbMCAAbZp0yZXaqXtePfdd6M+77Fjx1ybp9AJAADkXHkCSitZgNo2qV3RV1995UqH/FAgUtskVc9dddVV6ZZGNWnSxFX5jRw5Muoyc+fOdQ3C1c6qZs2abt2q3lOoUthStWGoJ554woYMGRK1tIy2UAAAZA8q9FCTHT/f31mipEmlOyoVmjNnju/ANGXKFOvZs6e9/fbb6QYmiYuLc730vJKmaKVGffv2tVdffdU2bNjgGqS3adPG6tSp43rRLVq0KNVjBg0a5HawN23dutXnqwUAANlRTEOTCrn69+/vhhH44osvrFq1ar4ep1Kg22+/3TXaVuNtP8+zbNkyq1ChQtT7hw4dap06dXKlUcnJyS40eU6cOOHmRSpQoIBLpKETAADIueJjXSWn4KPqNY3VtHPnTjdfxWSFChUKluhs27bNxo8fHwxMaoP04osvWsuWLYOP0fJ6nKjaTPepwbeK3VQlp9D08ssvp9qGVatWuVIr3e+1h1LJ1JgxY1z13Nq1a10pFQAAyN1iGppGjRrl/rZt2zbV0AMqSRKNr7Rly5bgfapCU0mQApcmz2233eYaf8u+ffusd+/eLlApSKlRt8Z3UvukyBIoLTdixAgrUqRIMHxpPVq3qu3Uk65ixYqZuBcAAEB2kGUaguemhmQAACBryHYNwQEAALI6QhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGgCAADwgdAEAADgA6EJAADAB0ITAACAD4QmAAAAHwhNAAAAPhCaAAAAfCA0AQAA+EBoAgAA8IHQBAAA4AOhCQAAwAdCEwAAgA+EJgAAAB8ITQAAAD4QmgAAAHwgNAEAAPhAaAIAAPCB0AQAAOADoQkAAMAHQhMAAIAPhCYAAAAfCE0AAAA+EJoAAAB8IDQBAAD4QGgCAADwgdAEAADgA6EJAADAB0ITAACAD4QmAACArB6ahg0bZs2aNbNixYpZ2bJl7YYbbrB169ad8nHz5s2ziy++2AoWLGjVq1e3V155JdUyU6dOtXr16lmBAgXc3/fffz/s/okTJ1rlypWtdOnSNnDgwLD7Nm/ebLVr17bExMQMeJUAACAniGloUvjp16+fLVy40GbNmmVJSUnWoUMHO3ToUJqP2bRpk11zzTV2+eWX29KlS+3hhx+2u+++24Ukz4IFC6xbt27WvXt3W758ufvbtWtXW7Rokbt/z5491qtXL3v22Wdt5syZNm7cOJs2bVrw8X369LHhw4db8eLFM3kPAACA7CJPIBAIWBbx888/uxInhanWrVtHXeahhx6yjz76yNasWROcd9ddd7lwpLAkCkwqJZo+fXpwmauvvtpKlSplkydPtsWLF9t1111nO3fuDC7ftGlTV+I0adIkmzJlin344Yente16vhIlStj+/fsJWwAAZBOn8/2dpdo0aYNFVWZpUTBSaVSojh072jfffGMnTpxId5mvv/7a/b9WrVp2+PBhV1KVkJBgS5YssYYNG7r/Dx482F566aVMeHUAACA7i7csQgVe999/v1122WXWoEGDNJdT6VC5cuXC5um2qvZU7VahQoU0l/FKllTipCq5Hj162JEjR9xfhao77rjDBgwY4KoAVRKlEPbEE0/YTTfdlGo7jh075qbIwEc7KAAAsg/ve9tPxVuWCU39+/e377//3r766qtTLpsnT56w294LDZ0fbZnQeb/73e/c5Jk7d66tWLHClTLVrFnTVeOVL1/emjdv7qoKVW0Y2Yh9yJAhqbZNjcsBAED2cuDAAVdNl+VDk0p31E5p/vz5VqlSpXSXVZDxSow8u3fvtvj4eCtTpky6y0SWPnlUYtS3b1+bMGGCbdiwwZVatWnTxt2nXnRqQN6lS5ewxwwaNMiVjHlSUlJc9Z62ITKw5eR0rpC4detW2nGdJezz2GC/xwb7PTZy234PBAIuMJ133nmnXDY+1huqwKThAFTSU61atVM+plWrVvbxxx+Hzfvss89cQ+58+fIFl1FvvPvuuy9smUsuuSTqOocOHWqdOnWyJk2auHZOCk0eVdElJyeneoyGMtAUqmTJkpYb6UOVGz5YWQn7PDbY77HBfo+N3LTfS5yihClLhCYNN6DeauqpprGavNIhbXyhQoWCJTrbtm2z8ePHB3vKqQpNpTx33nmna/Q9ZswYV53mueeee1yV2tNPP23XX3+9W//s2bOjVv2tWrXK9ZZbtmyZu123bl2Li4tz61SJ1dq1a91YUgAAIJcLxJCePto0duzY4DK33XZboE2bNmGPmzt3bqBx48aB/PnzB6pWrRoYNWpUqnW/8847gTp16gTy5csXqFu3bmDq1KmplklJSQlccsklgY8//jhsvm6ff/75gXLlygVee+21DH3NOcn+/fvd+6W/ODvY57HBfo8N9ntssN/TlqXGaUL2orZgahCv0sDIqkpkDvZ5bLDfY4P9Hhvs97QRmgAAAHzIUoNbAgAAZFWEJgAAAB8ITQAAAD4QmgAAAHwgNCFN6j2h0c3vvffe4Dz1G9D1+DRyqsbSatu2rRvrKrLnhQYtPeecc6xIkSLuOn4//fRTDF5B9qGxyG699VY3onzhwoXtoosusm+//TZ4P/s942kQ20cffdQNqqt9Wr16dfvb3/7mRvf3sN/PnK70oCsqaB/qfPLBBx+E3Z9R+3jv3r3WvXt3N86fJv1/3759llult981aPNDDz1kF154odufWkbXYN2+fXvYOtjvUaQzHAFyscWLF7sxsBo2bBi45557gvOHDx8eKFasmBv3asWKFYFu3boFKlSoEEhMTAwuc9dddwUqVqwYmDVrVuC7774LtGvXLtCoUaNAUlJSjF5N1paQkBCoUqVK4Pbbbw8sWrQosGnTpsDs2bMDGzZsCC7Dfs94f//73wNlypQJfPLJJ26fa2y3okWLBl544YXgMuz3M/fpp58GHnnkEbcP9ZXz/vvvh92fUfv46quvDjRo0CDw9ddfu0n/v/baawO5VXr7fd++fYGrrroqMGXKlMDatWsDCxYsCLRo0SJw8cUXh62D/Z4aoQmpHDhwIFCrVi33QdHAol5o0mCg5cuXdyc5z9GjRwMlSpQIvPLKK8EPowYUfeutt4LLbNu2LRAXFxeYMWNGDF5N1vfQQw8FLrvssjTvZ79njs6dOwfuuOOOsHk33nhj4NZbb3X/Z79nvMgv74zax6tXr3brXrhwYXAZBQHNUyjI7aKF1Wg/lLXcjz/+6G6z36Ojeg5RL2/TuXNnu+qqq8Lmb9q0yV3qpkOHDsF5GvhMFzf++uuv3W1VKanoN3QZFf02aNAguAzC6WLVunbizTffbGXLlrXGjRvba6+9Fryf/Z45LrvsMvv888/tv//9r7u9fPlyd6mla665xt1mv2e+jNrHupyWqoZatGgRXKZly5ZuHu+DP/v373fVeN41VNnvWfDac8h63nrrLfvuu+9syZIlqe7zrg1Yrly5sPm6/eOPPwaXyZ8/v5UqVSrVMt7jEW7jxo02atQodz3Fhx9+2BYvXmx33323+/JQOwP2e+ZQmw59Ueh6k3nz5nUX5n7yySftlltucfez3zNfRu1j/dUPjkiax/twakePHrW//vWv9sc//jF4gV72e3SEJgRt3brVXez4s88+s4IFC6a5nH6NhFLpb+S8SH6Wya3U8FglTU899ZS7rZImNYRVkFJo8rDfM5Yu1D1hwgR30fD69eu7i3ar04N+Td92223B5djvmS8j9nG05XkfTk2lSX/4wx/ceehf//rXKZcP5PL9TvUcglQcu3v3brv44ostPj7eTfPmzbORI0e6/3u/BiN/Qegx3n3ly5e348ePux4VaS2DcBUqVLB69eqFzbvgggtsy5YtwX0q7PeMNXDgQPfrWl8Y6kWkXj/33Xef6zUq7PfMl1H7WMvs2rUr1fp//vln3odTBKauXbu6atJZs2YFS5mE/R4doQlBV155pa1YscL94vYmlYD86U9/cv9Xl2x9SPTh8uhDpWB1ySWXuNsKXPny5QtbZseOHbZy5crgMgh36aWX2rp168LmqZ1NlSpV3P/VJZ79nvEOHz5scXHhp0BV03lDDrDfM19G7eNWrVq5qlZVbXsWLVrk5vE+pB+Y1q9fb7Nnz3bDnYRiv6chjQbigBPae07Uy0U9W9577z3XPfiWW26J2j24UqVKrtu8uqleccUVdME+Ra+V+Pj4wJNPPhlYv359YOLEiYHChQsHJkyYEFyG/Z7xbrvtNted2htyQPv2nHPOCTz44IPBZdjvGdMbd+nSpW7SV87zzz/v/u/10sqofayu7xoiRb23NF144YU5uuv7mez3EydOBK677jq3T5ctWxbYsWNHcDp27FhwHez31AhNOK3QpC7Cjz/+uOsmXKBAgUDr1q3diS7UkSNHAv379w+ULl06UKhQIfcB2rJlSwy2Pvv4+OOP3fgm2qd169YNjB49Oux+9nvG05eyju3zzz8/ULBgwUD16tXduDahXxrs9zM3Z84c96UdOSm0ZuQ+/uWXXwJ/+tOf3JhPmvT/vXv3BnKr9Pa7fiREu0+THudhv6eWR/+kVQoFAACAk2jTBAAA4AOhCQAAwAdCEwAAgA+EJgAAAB8ITQAAAD4QmgAAAHwgNAEAAPhAaAIAAPCB0ATgrNq8ebO7ArquZ5jZqlatai+88EKGrGvu3Lluu/ft22eZacyYMdahQ4dMW/+xY8fs/PPPdxfoBnB6CE0A4IMuQKoLlpYoUSJTA83gwYPtsccey7TnKFCggD3wwAP20EMPZdpzADkVoQkAfMifP7+VL1/elTZllqlTp1rRokXt8ssvt8z0pz/9yb788ktbs2ZNpj4PkNMQmgBkuJSUFHv66aetZs2armRD1UFPPvlk2DIbN260du3aWeHCha1Ro0a2YMGC4H1PPPGEXXTRRWHLq5pN1W2e22+/3W644QZ79tlnrUKFClamTBnr16+fnThxIs3tGjt2rCspmjVrVtT7f/zxR+vSpYuVKlXKihQpYvXr17dPP/00avVc27Zt3e3ISdWPsn//fuvdu7eVLVvWihcvbldccYUtX7483f321ltv2XXXXRc2z3udTz31lJUrV85KlixpQ4YMsaSkJBs4cKCVLl3aKlWqZG+88UbwMcePH7f+/fu7/VKwYEG334YNGxa8X/tKJWeTJ09Od3sAhIuPuA0AZ2zQoEH22muv2YgRI+yyyy5z1Vpr164NW+aRRx5xgadWrVru/7fccott2LDB4uP9n5bmzJnjgoH+6rHdunVzYevOO+9MtayeS8Fh5syZ1rJly6jrU+hS4Jg/f74LTatXr3YlP9G89957btnQx65atcoFG10HvXPnzi7QKHQpqL366qt25ZVX2n//+183PxqV/qgUKNIXX3zhgpG26z//+Y/17NnThczWrVvbokWLbMqUKXbXXXdZ+/btrXLlyjZy5Ej76KOP7O2333aBdevWrW4K1bx5c/d8AE5DAAAyUGJiYqBAgQKB1157Ler9mzZtCujU8/rrrwfnrVq1ys1bs2aNu/34448HGjVqFPa4ESNGBKpUqRK8fdttt7nbSUlJwXk333xzoFu3bsHbul+P++tf/xqoUKFC4Pvvv0932y+88MLAE088EfW+OXPmuG3cu3dvqvuef/75QMmSJQPr1q1ztz///PNA8eLFA0ePHg1brkaNGoFXX3016vq1Xq1//vz5YfO915mcnBycV6dOncDll18evK19UKRIkcDkyZPd7QEDBgSuuOKKQEpKSpqv9cUXXwxUrVo1zfsBpEZJE4AMpXYyatCsUpX0NGzYMPh/lRbJ7t27rW7dur6fS9VnefPmDVvPihUrwpZ57rnn7NChQ/bNN99Y9erV013f3XffbX369LHPPvvMrrrqKvv9738ftp3RTJ8+3f7617/axx9/bLVr13bz1DPt4MGDrhos1JEjR+yHH36Iuh7dJ6pOi/Y64+L+15pCpVkNGjQI3tY+0HNp/3lVeip1qlOnjl199dV27bXXpuqRV6hQITt8+HC6rw1AONo0AchQ+jL2I1++fMH/e42r1RZKFBBUxRUqWlul0HV46/HW4VGj6uTkZFdVdSq9evVyba26d+/uwlfTpk3tn//8Z5rLq/ruD3/4gw0fPjwslGgbFOA0rELotG7dOtcOKRqFHm3/3r17fb3O9F57kyZNbNOmTTZ06FAXxrp27Wo33XRT2PIJCQl27rnnnnKfAPgfQhOADKU2SgpOn3/++W9eh77Md+7cGRacfuu4Tmq7M2PGDNeQ+plnnjnl8moTpPZBarP0f//3f65tVjS//PKLazR+44032n333Rd2n0KLtl/ts9QYPnQ655xz0uydV69ePRfEMoIan6uNl7ZfbZ7UM09BybNy5Upr3LhxhjwXkFtQPQcgQ6l6SWMAPfjggy4IXHrppfbzzz+7RtJqwOyHeqbpMf/4xz9cCYlCj6rBFAR+i1atWrnHq6pKQSYy5Hjuvfde69Spk6tmU4mPGmBfcMEFUZdVWFI4VE8/BaTQwKeqPT2ner2pF6GqybZv3+4ahWueSrCi6dixo3311VduO86EGuCrpEuN4lVq984777jhEtTzzqNG4CqJAuAfoQlAhtPgjAonGqhRYUFf4Cq98UtB5V//+pcrHdIXu9oWaUDG0aNH/+ZtUnibNm2aXXPNNa4NkNovRVI1nnrB/fTTTy6gKWQpgESjnmwSOgyCqFpM8xSQ1CvwjjvucAFQoUW93dQeKS3q9adSKg1XcCaDaKrHn8La+vXr3Wtt1qyZ2x6vXZR63uk5IqvsAKQvj1qDn2IZAMBZovZHqjbTsA2Z5eabb3bP8fDDD2facwA5EW2aACALUburtMaGygjq2ajBRNOqogSQNkqaAAAAfKCkCQAAwAdCEwAAgA+EJgAAAB8ITQAAAD4QmgAAAHwgNAEAAPhAaAIAAPCB0AQAAOADoQkAAMBO7f8BJIBdlKlm/0gAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from matplotlib import pyplot as plt\n", + "import matplotlib.ticker as mtick\n", + "\n", + "xs = [1280, 960, 640, 480, 320]\n", + "ys = [3.10, 3.11, 3.31, 3.39, 3.62]\n", + "plt.scatter(xs, ys)\n", + "plt.plot(xs, ys)\n", + "plt.ylim(2, 4)\n", + "plt.title(\"asr-streaming-conformer-librispeech WER vs chunk size\")\n", + "plt.xlabel(\"chunk size (ms)\")\n", + "plt.ylabel(\"WER (%)\")\n", + "plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter())\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "0d3d019f-b21b-4d9d-b81f-269a71e78081", + "metadata": {}, + "source": [ + "Left context size is purely a tradeoff between accuracy and computational/memory cost. Here too, it is worth evaluating the model with different sizes depending on the desired tradeoff.\n", + "\n", + "### How to pick the chunk size?\n", + "\n", + "Curiously, it doesn't have to be static! The following strategy works surprisingly well:\n", + "\n", + "- For 40% of the batches (at random), we train normally without any chunking strategy.\n", + "- For the other 60%, we do the following:\n", + " - For each batch, we sample a random chunk size between some reasonable values (e.g. uniform sampling between 8 and 32 vanilla conformer frames)\n", + " - For 75% of these chunks, we restrict the left context similarly (e.g. 2-32 chunks). For the other 25%, we don't.\n", + "\n", + "This strategy is abstracted in SpeechBrain by [`speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.dynamic_chunk_training.html#speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler).\n", + "\n", + "The consequence of this is very interesting: The trained model can still infer in a **traditional, non-streaming fashion**, but it can also infer in a streaming fashion with a **chunk size chosen at run-time**! Surprisingly, we found the error rate degradation vs. an unmodified model in the former case is sometimes minimal, but the impact might likely be more significant for other hyperparameters and datasets.\n", + "\n", + "Let's write an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "e6b61de8-bf3f-4f6b-8869-2d6395438c81", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Draw #0 -> None\n", + "Draw #1 -> DynChunkTrainConfig(chunk_size=5, left_context_size=None)\n", + "Draw #2 -> DynChunkTrainConfig(chunk_size=23, left_context_size=None)\n", + "Draw #3 -> None\n", + "Draw #4 -> DynChunkTrainConfig(chunk_size=12, left_context_size=14)\n", + "Draw #5 -> DynChunkTrainConfig(chunk_size=19, left_context_size=14)\n", + "Draw #6 -> DynChunkTrainConfig(chunk_size=24, left_context_size=None)\n", + "Draw #7 -> DynChunkTrainConfig(chunk_size=16, left_context_size=None)\n", + "Draw #8 -> DynChunkTrainConfig(chunk_size=8, left_context_size=6)\n", + "Draw #9 -> DynChunkTrainConfig(chunk_size=12, left_context_size=None)\n", + "\n", + "Test config -> DynChunkTrainConfig(chunk_size=32, left_context_size=16)\n", + "Valid config -> None\n" + ] + } + ], + "source": [ + "from speechbrain.core import Stage\n", + "from speechbrain.utils.dynamic_chunk_training import DynChunkTrainConfig\n", + "from speechbrain.utils.dynamic_chunk_training import DynChunkTrainConfigRandomSampler\n", + "\n", + "sampler = DynChunkTrainConfigRandomSampler(\n", + " chunkwise_prob=0.6,\n", + " chunk_size_min=8,\n", + " chunk_size_max=32,\n", + " limited_left_context_prob=0.8,\n", + " left_context_chunks_min=2,\n", + " left_context_chunks_max=16,\n", + " test_config=DynChunkTrainConfig(32, 16),\n", + " valid_config=None\n", + ")\n", + "\n", + "for i in range(10):\n", + " print(f\"Draw #{i:<2} -> {sampler(Stage.TRAIN)}\")\n", + "\n", + "print()\n", + "print(f\"Test config -> {sampler(Stage.TEST)}\")\n", + "print(f\"Valid config -> {sampler(Stage.VALID)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "83704992-9a84-4b06-b9e1-708d1279ced0", + "metadata": {}, + "source": [ + "### Loss function(s)\n", + "\n", + "The easiest way to train a streaming Conformer model is currently using the RNN-T loss (and optionally CTC as an auxiliary loss to improve training). For a refresher, see [Speech Recognition From Scratch](https://speechbrain.readthedocs.io/en/latest/tutorials/tasks/speech-recognition-from-scratch.html) and its linked resources.\n", + "\n", + "It may be possible to also add encoder-decoder cross-entropy as either an auxiliary loss (to improve model accuracy even if using the RNN-T path for inference), or used for streaming, but this was not tested and is currently unsupported. \n", + "To implement this, you may want to explore the literature and the approach taken by competitive models." + ] + }, + { + "cell_type": "markdown", + "id": "0ca5784c-1f5c-44e5-8ba8-d6b7c5248203", + "metadata": {}, + "source": [ + "## Training: Piecing it all together with SpeechBrain\n", + "\n", + "This was a whole lot of theory, but how do we make use of what SpeechBrain has implemented? \n", + "The following describes what code should be used, and what streaming-specific code there is in a typical streaming Conformer-Transducer recipe.\n", + "You would preferably adapt a known-good recipe (such as [`LibriSpeech/ASR/transducer`](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/ASR/transducer).\n", + "\n", + "If you are trying to adapt a different model, this might help you, but you may need to do more research and work.\n", + "\n", + "### Automatic masking by passing a Dynamic Chunk Training configuration\n", + "\n", + "The [`speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.dynamic_chunk_training.html#speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig) class was added, whose purpose is to describe a streaming configuration for _one_ batch.\n", + "In order to implement a complete Dynamic Chunk Training strategy, your training script may sample a random configuration for each batch from a [`DynChunkTrainConfigRandomSampler`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.dynamic_chunk_training.html#speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler). (You are free to implement your own strategy, if you prefer.)\n", + "\n", + "Various functions were enhanced, such as [`TransformerASR.encode`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.models.transformer.TransformerASR.html#speechbrain.lobes.models.transformer.TransformerASR.TransformerASR.encode), to take in a `dynchunktrain_config` as an optional argument. \n", + "This parameter allows you to pass a dynamic chunk **training** configuration for this specific batch. When `None`/not passed, nothing is changed.\n", + "\n", + "The argument is passed down to each layer, as required. With a stock Conformer configuration, passing this object is all you need to make the encoder module streaming-capable. This makes it rather easy to navigate through the code.\n", + "\n", + "### Changes to the `.yaml`\n", + "\n", + "These following snippets are relevant:\n", + "\n", + "```yaml\n", + "streaming: True # controls all Dynamic Chunk Training & chunk size & left context mechanisms\n", + "```\n", + "\n", + "As described just before, the config sampler is useful to describe the training strategy in hyperparameters:\n", + "\n", + "```yaml\n", + "# Configuration for Dynamic Chunk Training.\n", + "# In this model, a chunk is roughly equivalent to 40ms of audio.\n", + "dynchunktrain_config_sampler: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler # yamllint disable-line rule:line-length\n", + " chunkwise_prob: 0.6 # Probability during a batch to limit attention and sample a random chunk size in the following range\n", + " chunk_size_min: 8 # Minimum chunk size (if in a DynChunkTrain batch)\n", + " chunk_size_max: 32 # Maximum chunk size (if in a DynChunkTrain batch)\n", + " limited_left_context_prob: 0.75 # If in a DynChunkTrain batch, the probability during a batch to restrict left context to a random number of chunks\n", + " left_context_chunks_min: 2 # Minimum left context size (in # of chunks)\n", + " left_context_chunks_max: 32 # Maximum left context size (in # of chunks)\n", + " # If you specify a valid/test config, you can optionally have evaluation be\n", + " # done with a specific DynChunkTrain configuration.\n", + " # valid_config: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig\n", + " # chunk_size: 24\n", + " # left_context_size: 16\n", + " # test_config: ...\n", + "```\n", + "\n", + "Make sure that you are using a supported architecture (such as Conformer, with `TransformerASR`'s `causal` parameter set to `False`).\n", + "\n", + "Currently, only greedy search is supported in a streaming context. You probably want to make it so that your `test` set is evaluated with greedy search.\n", + "\n", + "Additionally, you can specify a `valid_config` or `test_config` to the sampler (see comments) in order to emulate streaming when evaluating your model on either of the sets.\n", + "\n", + "### Changes to the `train.py`\n", + "\n", + "In the `compute_forward`, you should sample a random config (so that it is different for each batch):\n", + "\n", + "```python\n", + "if self.hparams.streaming:\n", + " dynchunktrain_config = self.hparams.dynchunktrain_config_sampler(stage)\n", + "else:\n", + " dynchunktrain_config = None\n", + "```\n", + "\n", + "Then, assuming the encoder is available as an `enc` hyperparameter, edit its call to forward the `dynchunktrain_config`:\n", + "\n", + "```python\n", + "x = self.modules.enc(\n", + " src,\n", + " #...\n", + " dynchunktrain_config=dynchunktrain_config,\n", + ")\n", + "```\n", + "\n", + "For training, that should be it!" + ] + }, + { + "cell_type": "markdown", + "id": "94aeb5c5-2717-42f3-9e60-325115f3d055", + "metadata": {}, + "source": [ + "## Debugging Streaming architectures\n", + "\n", + "[`speechbrain.utils.streaming`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.streaming.html#module-speechbrain.utils.streaming) provides some useful functionality, including debug features we will demonstrate.\n", + "\n", + "### Detecting future dependencies in NN layers\n", + "\n", + "As you may have noticed, retrofitting streaming support to an existing architecture is not trivial, and it is easy to miss accidental dependencies to the future. \n", + "[`speechbrain.utils.streaming.infer_dependency_matrix`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.streaming.html#speechbrain.utils.streaming.infer_dependency_matrix) can compute a matrix of dependencies between output frames and input frames for you. \n", + "It does so by repeatedly calling your module and figuring out which outputs were affected by the randomization of which input. \n", + "It can also detect if your model is not deterministic enough, i.e. that two consecutive calls resulted in different data. \n", + "\n", + "The output can then be visualized using [`speechbrain.utils.streaming.plot_dependency_matrix`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.streaming.html#speechbrain.utils.streaming.plot_dependency_matrix). \n", + "A red cell means that a given output can have its value affected by a given input. As a result, these plots might look very familiar if you have seen the previous figures. \n", + "Note that due to the implementation, on larger plots and on some models, you might see some random holes. These can be false negatives. Don't rely on `infer_dependency_matrix` to give perfect outputs!\n", + "\n", + "Here are examples of dependency plots with actual Conformer layers:" + ] + }, + { + "cell_type": "code", + "execution_count": 119, + "id": "5c73e2f8-87f1-4f1a-9c46-2067e58a9d9c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAb0AAAHHCAYAAAArl4bjAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAugElEQVR4nO3deXRUdZ7+8adIQgpCKgKyRQ2g4sIioCytoEDDEBHRuLE0QoBRW2Qx0I1EpyOFG4FuBRUGhNMjioDM2AZRG5RWlmYUZLdVZBMhoICK3LBYAZLv7w+H+lkmBEJSuan6vl/n3HP63rr3U0/FLh/vrc1jjDECAMACVdwOAABARaH0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9IBK7Ouvv5bH49Hs2bPdjlJhPB6P/H6/2zEQpSg9RJTZs2fL4/EEF6/Xq+TkZKWmpuqFF17QkSNH3I4Il3zxxRfy+/36+uuv3Y6CSozSQ0R64oknNGfOHE2fPl0jRoyQJGVkZKhFixb69NNPXU4HN3zxxRcaP348pYcSxbodADgfPXr0UJs2bYLrjz76qD788EPdeuutuu2227RlyxZVq1bNxYQAKiPO9BA1fvvb3yorK0u7d+/Wa6+9FnLbl19+qbvvvlu1atWS1+tVmzZttGjRopB9Tl86XblypX7/+9+rdu3a8vl8GjhwoH788cci97d48WLdeOONSkhIUGJionr27KnPP/88ZJ9BgwapRo0a2rdvn9LS0lSjRg3VqVNHf/zjH1VQUBCy7+HDhzVo0CAlJSXpggsuUHp6ug4fPlzsYy3N4/nf//1fjR49WnXq1FFCQoLuuOMOfffdd8U+nk6dOikxMVE+n09t27bVvHnzJEnjxo1TXFxcscc98MADuuCCCxQIBIrN+su/w1dffaXU1FQlJCQoOTlZTzzxhM7lh142btyoHj16yOfzqUaNGuratatWr14d8ljvueceSVKXLl2Cl7+XL19+1tmwC6WHqDJgwABJ0vvvvx/c9vnnn+s3v/mNtmzZoszMTD377LNKSEhQWlqacnJyiswYPny4tmzZIr/fr4EDB2ru3LlKS0sL+ZfznDlz1LNnT9WoUUMTJ05UVlaWvvjiC3Xs2LHI5bWCggKlpqaqdu3a+stf/qJOnTrp2Wef1cyZM4P7GGN0++23a86cObr33nv11FNPae/evUpPTy+Sr7SPZ8SIEdq8ebPGjRunoUOH6u2339bw4cND9pk9e7Z69uypQ4cO6dFHH1V2drZatWqlJUuWBP+up06d0oIFC0KOO3HihN544w3ddddd8nq9Z/rHEvw73HzzzapXr54mTZqk6667TuPGjdO4ceNKPO7zzz/XjTfeqM2bN+uRRx5RVlaWdu3apc6dO2vNmjWSpJtuukkjR46UJD322GOaM2eO5syZo6uvvrrE2bCQASLIyy+/bCSZtWvXnnGfpKQk07p16+B6165dTYsWLUwgEAhuKywsNDfccINp0qRJkdnXXXedOXHiRHD7pEmTjCTz1ltvGWOMOXLkiLngggvM/fffH3K/+/fvN0lJSSHb09PTjSTzxBNPhOzbunVrc9111wXXFy5caCSZSZMmBbedOnXK3HjjjUaSefnll8/78XTr1s0UFhYGt48aNcrExMSYw4cPG2OMOXz4sElMTDTt27c3P/30U0jOXx53/fXXm/bt24fc/uabbxpJZtmyZaYkp/8OI0aMCJnds2dPU7VqVfPdd98Ft0sy48aNC66npaWZqlWrmp07dwa3ffPNNyYxMdHcdNNNwW3/8z//c05ZYDfO9BB1atSoEXwX56FDh/Thhx+qd+/eOnLkiL7//nt9//33+uGHH5Samqrt27dr3759Icc/8MADiouLC64PHTpUsbGx+vvf/y5JWrp0qQ4fPqx+/foF533//feKiYlR+/bttWzZsiKZHnzwwZD1G2+8UV999VVw/e9//7tiY2M1dOjQ4LaYmJjgm3ROO9/H4/F4Qu67oKBAu3fvDj6eI0eOKDMzs8jZ2i+PGzhwoNasWaOdO3cGt82dO1eXXHKJOnXqVOQxF+eXZ5gej0fDhw/XiRMn9I9//KPY/QsKCvT+++8rLS1Nl156aXB7gwYN9Lvf/U6rVq1SXl7eOd03IHF5E1Ho6NGjSkxMlCTt2LFDxhhlZWWpTp06Icvpy2oHDx4MOb5JkyYh6zVq1FCDBg2Cly23b98u6efXEH898/333y8yz+v1qk6dOiHbatasGfI64e7du9WgQQPVqFEjZL8rr7wyZP18Hk9KSkqR+5YUvP/TJda8eXOVpE+fPoqPj9fcuXMlSY7j6J133lH//v1DyvFMqlSpElJcknTFFVdI0hnfcfndd9/p+PHjRf4OknT11VersLBQubm5Z71v4DTevYmosnfvXjmOo8svv1ySVFhYKEn64x//qNTU1GKPOb3vuTo9c86cOapfv36R22NjQ59WMTExpZp/Lvddmsdzpvs35/AGkl+qWbOmbr31Vs2dO1ePP/643njjDeXn5+vee+8t1RzATZQeosqcOXMkKVgIp88s4uLi1K1bt3OasX37dnXp0iW4fvToUX377be65ZZbJEmXXXaZJKlu3brnPPNsGjZsqA8++EBHjx4NOdvbunVryH7n83jO5vTj+eyzz876HwADBw7U7bffrrVr12ru3Llq3bq1mjVrdk73U1hYqK+++ip4didJ27ZtkyQ1atSo2GPq1Kmj6tWrF/k7SD+/g7VKlSq65JJLJOmczjYBLm8ianz44Yd68skn1bhxY/Xv31/Sz8XUuXNnvfTSS/r222+LHFPcW/BnzpypkydPBtenT5+uU6dOqUePHpJ+LlSfz6dnnnkmZL+SZp7NLbfcolOnTmn69OnBbQUFBXrxxRdD9jufx3M23bt3V2JioiZMmFDkYwe/Phvs0aOHLrzwQk2cOFErVqwo9Vne1KlTQ2ZPnTpVcXFx6tq1a7H7x8TEqHv37nrrrbdCLoEeOHBA8+bNU8eOHeXz+SRJCQkJknTGj3kAEmd6iFCLFy/Wl19+qVOnTunAgQP68MMPtXTpUjVs2FCLFi0KeUPGtGnT1LFjR7Vo0UL333+/Lr30Uh04cEAff/yx9u7dq82bN4fMPnHihLp27arevXtr69at+s///E917NhRt912myTJ5/Np+vTpGjBggK699lr17dtXderU0Z49e/Tuu++qQ4cOIf9yPxe9evVShw4dlJmZqa+//lpNmzbVm2++Kcdxiuxb2sdzNj6fT5MnT9Z9992ntm3b6ne/+51q1qypzZs36/jx43rllVeC+8bFxalv376aOnWqYmJi1K9fv3O+H6/XqyVLlig9PV3t27fX4sWL9e677+qxxx4r8prnLz311FNaunSpOnbsqIceekixsbF66aWXlJ+fr0mTJgX3a9WqlWJiYjRx4kQ5jqP4+Hj99re/Vd26dUv190CUc/W9o0ApnX4b/umlatWqpn79+ubf/u3fzPPPP2/y8vKKPW7nzp1m4MCBpn79+iYuLs5cdNFF5tZbbzVvvPFGkdkrVqwwDzzwgKlZs6apUaOG6d+/v/nhhx+KzFy2bJlJTU01SUlJxuv1mssuu8wMGjTIrFu3LrhPenq6SUhIKHLsuHHjzK+ffj/88IMZMGCA8fl8JikpyQwYMMBs3LixyEcWSvt4fv3xjmXLlhX71v5FixaZG264wVSrVs34fD7Trl07M3/+/CLZP/nkEyPJdO/evegf+gxO/x127txpunfvbqpXr27q1atnxo0bZwoKCkL21a8+smCMMRs2bDCpqammRo0apnr16qZLly7mo48+KnI/s2bNMpdeeqmJiYnh4wsolseYUr6aDUSp2bNna/DgwVq7dm3IV5wh1ObNm9WqVSu9+uqrwS8DOJtBgwbpjTfe0NGjR8OcDigZr+kBKJVZs2apRo0auvPOO92OApQar+kBOCdvv/22vvjiC82cOVPDhw8PvnEEiCSUHoBzMmLECB04cEC33HKLxo8f73Yc4Ly4+preypUr9ec//1nr16/Xt99+q5ycHKWlpYXss2XLFo0dO1YrVqzQqVOn1LRpU/3tb38r8i0TAACcjauv6R07dkwtW7bUtGnTir19586d6tixo6666iotX75cn376qbKyss76be4AABSn0rx70+PxFDnT69u3r+Li4oLfsgEAQFlU2tf0CgsL9e677+qRRx5RamqqNm7cqMaNG+vRRx8tcgn0l/Lz85Wfnx8y59ChQ6pduzZfUwQAEcgYoyNHjig5OVlVqpTxAqWLnxEMIcnk5OQE17/99lsjyVSvXt0899xzZuPGjWbChAnG4/GY5cuXn3HO6Q/9srCwsLBE15Kbm1v2rinzhHIihZbevn37jCTTr1+/kP169epl+vbte8Y5gUDAOI4TXPbs2fPzH0syThiWzMzMsMxldvRlZzazo3l2OOfn6ufSO/3Dx2VRaS9vXnjhhYqNjVXTpk1Dtl999dVatWrVGY+Lj49XfHx8ke2+/1vKm9frDctcZrszn9nMZnblnV8eL1FV2m9kqVq1qtq2bVvkJ0W2bdumhg0bupQKABDJXD3TO3r0qHbs2BFc37VrlzZt2qRatWopJSVFY8aMUZ8+fXTTTTepS5cuWrJkid5++20tX77cvdAAgIjlaumtW7cu5Mc6R48eLUlKT0/X7Nmzdccdd2jGjBmaMGGCRo4cqSuvvFJ/+9vf1LFjR7ciAwAimKul17lz5yI/UvlrQ4YM0ZAhQyooEQAgmlXa1/QAAChvlB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAaHnO2X3GNcHl5eUpKSlJmZqa8Xq/bcQAApRQIBJSdnS3HceTz+co2zEQ5x3GMJONIxoRh8fv9YZnL7OjLzmxmR/PscM53JCPJOI5T5k7g8iYAwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGq6W3sqVK9WrVy8lJyfL4/Fo4cKFZ9z3wQcflMfj0ZQpUyosHwAgurhaeseOHVPLli01bdq0EvfLycnR6tWrlZycXEHJAADRKNbNO+/Ro4d69OhR4j779u3TiBEj9N5776lnz54VlAwAEI1cLb2zKSws1IABAzRmzBg1a9bsnI7Jz89Xfn5+cD0vLy9c8QAAEcZjjDFuh5Akj8ejnJwcpaWlBbdNmDBBy5Yt03vvvSePx6NGjRopIyNDGRkZZ5zj9/s1fvz4ItszMzPl9XrDkBwAEE6BQEDZ2dlyHEc+n69sw0wlIcnk5OQE19etW2fq1atn9u3bF9zWsGFDM3ny5BLnBAIB4zhOcMnNzTWSjCMZE4bF7/eHZS6zoy87s5kdzbPDOd+RjCTjOE6Zu6bSfmThn//8pw4ePKiUlBTFxsYqNjZWu3fv1h/+8Ac1atTojMfFx8fL5/OFLAAASJX4Nb0BAwaoW7duIdtSU1M1YMAADR482KVUAIBI5mrpHT16VDt27Aiu79q1S5s2bVKtWrWUkpKi2rVrh+wfFxen+vXr68orr6zoqACAKOBq6a1bt05dunQJro8ePVqSlJ6ertmzZ7uUCgAQrVwtvc6dO+vn97Ccm6+//jp8YQAAUa/SvpEFAIDyRukBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCs4TGl+RXXCJSXl6ekpCRlZmbK6/W6HQcAUEqBQEDZ2dlyHEc+n69sw0yUcxzHSDKOZEwYFr/fH5a5zI6+7MxmdjTPDud8RzKSjOM4Ze4ELm8CAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArOFq6a1cuVK9evVScnKyPB6PFi5cGLzt5MmTGjt2rFq0aKGEhAQlJydr4MCB+uabb9wLDACIaK6W3rFjx9SyZUtNmzatyG3Hjx/Xhg0blJWVpQ0bNujNN9/U1q1bddttt7mQFAAQDWLdvPMePXqoR48exd6WlJSkpUuXhmybOnWq2rVrpz179iglJaUiIgIAokhEvabnOI48Ho8uuOACt6MAACKQq2d6pREIBDR27Fj169dPPp/vjPvl5+crPz8/uJ6Xl1cR8QAAEcBjjDFuh5Akj8ejnJwcpaWlFbnt5MmTuuuuu7R3714tX768xNLz+/0aP358ke2ZmZnyer3lGRkAUAECgYCys7PlOE6J//4/J6aSkGRycnKKbD9x4oRJS0sz11xzjfn+++/POicQCBjHcYJLbm6ukWQcyZgwLH6/PyxzmR192ZnN7GieHc75jmQkGcdxytw1lfry5smTJ9W7d29t375dy5YtU+3atc96THx8vOLj4ysgHQAg0rhaekePHtWOHTuC67t27dKmTZtUq1YtNWjQQHfffbc2bNigd955RwUFBdq/f78kqVatWqpatapbsQEAEcrV0lu3bp26dOkSXB89erQkKT09XX6/X4sWLZIktWrVKuS4ZcuWqXPnzhUVEwAQJVwtvc6dO+vnl/OKV9JtAACUVkR9Tg8AgLKg9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1vCYKP958ry8PCUlJSkzM1Ner9ftOACAUgoEAsrOzpbjOPL5fGUbZqKc4zhGknEkY8Kw+P3+sMxldvRlZzazo3l2OOc7kpFkHMcpcydweRMAYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1XS2/lypXq1auXkpOT5fF4tHDhwpDbjTF6/PHH1aBBA1WrVk3dunXT9u3b3QkLAIh4rpbesWPH1LJlS02bNq3Y2ydNmqQXXnhBM2bM0Jo1a5SQkKDU1FQFAoEKTgoAiAaxbt55jx491KNHj2JvM8ZoypQp+tOf/qTbb79dkvTqq6+qXr16Wrhwofr27VuRUQEAUaDSvqa3a9cu7d+/X926dQtuS0pKUvv27fXxxx+7mAwAEKlcPdMryf79+yVJ9erVC9ler1694G3Fyc/PV35+fnA9Ly8vPAEBABHHY4wxboeQJI/Ho5ycHKWlpUmSPvroI3Xo0EHffPONGjRoENyvd+/e8ng8WrBgQbFz/H6/xo8fX2R7ZmamvF5vWLIDAMInEAgoOztbjuPI5/OVbZipJCSZnJyc4PrOnTuNJLNx48aQ/W666SYzcuTIM84JBALGcZzgkpubayQZRzImDIvf7w/LXGZHX3ZmMzuaZ4dzviMZScZxnDJ3TaV9Ta9x48aqX7++Pvjgg+C2vLw8rVmzRtdff/0Zj4uPj5fP5wtZAACQXH5N7+jRo9qxY0dwfdeuXdq0aZNq1aqllJQUZWRk6KmnnlKTJk3UuHFjZWVlKTk5OXgJFACA0nC19NatW6cuXboE10ePHi1JSk9P1+zZs/XII4/o2LFjeuCBB3T48GF17NhRS5Ys4bU5AMB5cbX0OnfurJ9fziuex+PRE088oSeeeKICUwEAolWlfU0PAIDyRukBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKzhMSX9dHkUyMvLU1JSkjIzM+X1et2OAwAopUAgoOzsbDmOI5/PV7ZhJso5jmMkGUcyJgyL3+8Py1xmR192ZjM7mmeHc74jGUnGcZwydwKXNwEA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANY4r9J79dVXlZ+fX2T7iRMn9Oqrr5Y5FAAA4XBepTd48GA5jlNk+5EjRzR48OAyhwIAIBzOq/SMMfJ4PEW27927V0lJSWUOBQBAOMSWZufWrVvL4/HI4/Goa9euio39/4cXFBRo165duvnmm8s9JAAA5aFUpZeWliZJ2rRpk1JTU1WjRo3gbVWrVlWjRo101113lWtAAADKS6lKb9y4cZKkRo0aqU+fPvJ6vWEJdVpBQYH8fr9ee+017d+/X8nJyRo0aJD+9Kc/FXt5FQCAkpSq9E5LT08v7xzFmjhxoqZPn65XXnlFzZo107p16zR48GAlJSVp5MiRFZIBABA9zqv0qlSpUuKZVkFBwXkH+qWPPvpIt99+u3r27Cnp5zPM+fPn65NPPimX+QAAu5xX6b355pshpXfy5Elt3LhRr7zyisaPH19u4W644QbNnDlT27Zt0xVXXKHNmzdr1apVeu655854TH5+fshnCPPy8sotDwAgsnmMMaa8hs2bN08LFizQW2+9VS7zCgsL9dhjj2nSpEmKiYlRQUGBnn76aT366KNnPMbv9xdbvJmZmWF/DRIAUP4CgYCys7PlOI58Pl/ZhplytHPnTpOQkFBu8+bPn28uvvhiM3/+fPPpp5+aV1991dSqVcvMnj37jMcEAgHjOE5wyc3NNZKMIxkThsXv94dlLrOjLzuzmR3Ns8M535GMJOM4Tpl75bwubxbnp59+0gsvvKCLLrqovEZqzJgxyszMVN++fSVJLVq00O7duzVhwoQzvpkmPj5e8fHx5ZYBABA9zqv0atasGfKanjFGR44cUfXq1fXaa6+VW7jjx4+rSpXQL42JiYlRYWFhud0HAMAe51V6kydPDim9KlWqqE6dOmrfvr1q1qxZbuF69eqlp59+WikpKWrWrJk2btyo5557TkOGDCm3+wAA2OO8Sm/QoEE6fPiw/vrXv2rLli2SpKZNm+r6668v13AvvviisrKy9NBDD+ngwYNKTk7W73//ez3++OPlej8AADuc1xdOr1u3TpdffrkmT56sQ4cO6dChQ5o8ebIuu+wybdiwodzCJSYmasqUKdq9e7d++ukn7dy5U0899ZSqVq1abvcBALDHeZ3pjRo1Sr169dKsWbOCXzp96tQp3XfffcrIyNDKlSvLNSQAAOXhvEpv3bp1IYUnSbGxsXrkkUfUpk2bcgsHAEB5Oq/Lmz6fT3v27CmyPTc3V4mJiWUOBQBAOJxX6fXp00f//u//rgULFig3N1e5ubl6/fXXdd9996lfv37lnREAgHJxXpc3//KXv8jj8WjgwIE6deqUJCkuLk5Dhw5VdnZ2uQYEAKC8nFfpVa1aVc8//7wmTJignTt3SpIuu+wyVa9evVzDAQBQnsr0NWTVq1dXixYtyisLAABhdV6v6QEAEIkoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1PMYY43aIcMrLy1NSUpIyMzPl9XrdjgMAKKVAIKDs7Gw5jiOfz1e2YSbKOY5jJBlHMiYMi9/vD8tcZkdfdmYzO5pnh3O+IxlJxnGcMncClzcBANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qj0pbdv3z7de++9ql27tqpVq6YWLVpo3bp1bscCAESgWLcDlOTHH39Uhw4d1KVLFy1evFh16tTR9u3bVbNmTbejAQAiUKUuvYkTJ+qSSy7Ryy+/HNzWuHFjFxMBACJZpb68uWjRIrVp00b33HOP6tatq9atW2vWrFklHpOfn6+8vLyQBQAASfIYY4zbIc7E6/VKkkaPHq177rlHa9eu1cMPP6wZM2YoPT292GP8fr/Gjx9fZHtmZmZwHgAgcgQCAWVnZ8txHPl8vrINM5VYXFycuf7660O2jRgxwvzmN7854zGBQMA4jhNccnNzjSTjSMaEYfH7/WGZy+zoy85sZkfz7HDOdyQjyTiOU+ZeqdSXNxs0aKCmTZuGbLv66qu1Z8+eMx4THx8vn88XsgAAIFXy1/Q6dOigrVu3hmzbtm2bGjZs6FIiAEAkq9SlN2rUKK1evVrPPPOMduzYoXnz5mnmzJkaNmyY29EAABGoUpde27ZtlZOTo/nz56t58+Z68sknNWXKFPXv39/taACACFSpP6cnSbfeeqtuvfVWt2MAAKJApT7TAwCgPFF6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa3iMMcbtEOGUl5enpKQkZWZmyuv1uh0HAFBKgUBA2dnZchxHPp+vbMNMlHMcx0gyjmRMGBa/3x+WucyOvuzMZnY0zw7nfEcykozjOGXuBC5vAgCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKwRUaWXnZ0tj8ejjIwMt6MAACJQxJTe2rVr9dJLL+maa65xOwoAIEJFROkdPXpU/fv316xZs1SzZk234wAAIlRElN6wYcPUs2dPdevW7az75ufnKy8vL2QBAECSPMYY43aIkrz++ut6+umntXbtWnm9XnXu3FmtWrXSlClTit3f7/dr/PjxRbZnZmbK6/WGOS0AoLwFAgFlZ2fLcRz5fL6yDTOV2J49e0zdunXN5s2bg9s6depkHn744TMeEwgEjOM4wSU3N9dIMo5kTBgWv98flrnMjr7szGZ2NM8O53xHMpKM4zhl7pXY8mjhcFm/fr0OHjyoa6+9NritoKBAK1eu1NSpU5Wfn6+YmJiQY+Lj4xUfH1/RUQEAEaBSl17Xrl31r3/9K2Tb4MGDddVVV2ns2LFFCg8AgJJU6tJLTExU8+bNQ7YlJCSodu3aRbYDAHA2EfHuTQAAykOlPtMrzvLly92OAACIUJzpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCs4THGGLdDhFNeXp6SkpKUmZkpr9frdhwAQCkFAgFlZ2fLcRz5fL6yDTNRznEcI8k4kjFhWPx+f1jmMjv6sjOb2dE8O5zzHclIMo7jlLkTuLwJALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsEalL70JEyaobdu2SkxMVN26dZWWlqatW7e6HQsAEIEqfemtWLFCw4YN0+rVq7V06VKdPHlS3bt317Fjx9yOBgCIMLFuBzibJUuWhKzPnj1bdevW1fr163XTTTe5lAoAEIkq/ZnerzmOI0mqVauWy0kAAJGm0p/p/VJhYaEyMjLUoUMHNW/evNh98vPzlZ+fH1zPy8urqHgAgErOY4wxboc4V0OHDtXixYu1atUqXXzxxcXu4/f7NX78+CLbMzMz5fV6wx0RAFDOAoGAsrOz5TiOfD5f2YaZCDFs2DBz8cUXm6+++qrE/QKBgHEcJ7jk5uYaScaRjAnD4vf7wzKX2dGXndnMjubZ4ZzvSEaScRynzF1S6S9vGmM0YsQI5eTkaPny5WrcuHGJ+8fHxys+Pr6C0gEAIkmlL71hw4Zp3rx5euutt5SYmKj9+/dLkpKSklStWjWX0wEAIkmlf/fm9OnT5TiOOnfurAYNGgSXBQsWuB0NABBhKv2ZnjHG7QgAgChR6c/0AAAoL5QeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAaHhPlP02el5enpKQkZWZmyuv1uh0HAFBKgUBA2dnZchxHPp+vbMNMlHMcx0gyjmRMGBa/3x+WucyOvuzMZnY0zw7nfEcykozjOGXuBC5vAgCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsERGlN23aNDVq1Eher1ft27fXJ5984nYkAEAEqvSlt2DBAo0ePVrjxo3Thg0b1LJlS6WmpurgwYNuRwMARJhKX3rPPfec7r//fg0ePFhNmzbVjBkzVL16df3Xf/2X29EAABGmUpfeiRMntH79enXr1i24rUqVKurWrZs+/vhjF5MBACJRrNsBSvL999+roKBA9erVC9ler149ffnll8Uek5+fr/z8/OC64ziSpLwwZQwEAsyuwNnhns9sZjO78s0/PdMYU/ZhphLbt2+fkWQ++uijkO1jxowx7dq1K/aYcePGGUksLCwsLFG27Ny5s8y9UqnP9C688ELFxMTowIEDIdsPHDig+vXrF3vMo48+qtGjRwfXDx8+rIYNG2rPnj1KSkoKa97ylJeXp0suuUS5ubny+XxuxzlnkZpbitzs5K5Y5K54juMoJSVFtWrVKvOsSl16VatW1XXXXacPPvhAaWlpkqTCwkJ98MEHGj58eLHHxMfHKz4+vsj2pKSkiPsHLUk+n4/cFSxSs5O7YpG74lWpUva3oVTq0pOk0aNHKz09XW3atFG7du00ZcoUHTt2TIMHD3Y7GgAgwlT60uvTp4++++47Pf7449q/f79atWqlJUuWFHlzCwAAZ1PpS0+Shg8ffsbLmWcTHx+vcePGFXvJszIjd8WL1Ozkrljkrnjlmd1jTHm8BxQAgMqvUn84HQCA8kTpAQCsQekBAKxB6QEArBH1pRdpv8U3YcIEtW3bVomJiapbt67S0tK0detWt2OVWnZ2tjwejzIyMtyOclb79u3Tvffeq9q1a6tatWpq0aKF1q1b53asEhUUFCgrK0uNGzdWtWrVdNlll+nJJ58sn+8mLGcrV65Ur169lJycLI/Ho4ULF4bcbozR448/rgYNGqhatWrq1q2btm/f7k7YXygp98mTJzV27Fi1aNFCCQkJSk5O1sCBA/XNN9+4F/j/nO3v/UsPPvigPB6PpkyZUmH5zuRccm/ZskW33XabkpKSlJCQoLZt22rPnj2lup+oLr1I/C2+FStWaNiwYVq9erWWLl2qkydPqnv37jp27Jjb0c7Z2rVr9dJLL+maa65xO8pZ/fjjj+rQoYPi4uK0ePFiffHFF3r22WdVs2ZNt6OVaOLEiZo+fbqmTp2qLVu2aOLEiZo0aZJefPFFt6MVcezYMbVs2VLTpk0r9vZJkybphRde0IwZM7RmzRolJCQoNTVVgUCggpOGKin38ePHtWHDBmVlZWnDhg168803tXXrVt12220uJA11tr/3aTk5OVq9erWSk5MrKFnJzpZ7586d6tixo6666iotX75cn376qbKysuT1ekt3R2X+9s5KrF27dmbYsGHB9YKCApOcnGwmTJjgYqrSOXjwoJFkVqxY4XaUc3LkyBHTpEkTs3TpUtOpUyfz8MMPux2pRGPHjjUdO3Z0O0ap9ezZ0wwZMiRk25133mn69+/vUqJzI8nk5OQE1wsLC039+vXNn//85+C2w4cPm/j4eDN//nwXEhbv17mL88knnxhJZvfu3RUT6hycKffevXvNRRddZD777DPTsGFDM3ny5ArPVpLicvfp08fce++9ZZ4dtWd60fJbfKd/Gqk8vmi1IgwbNkw9e/YM+btXZosWLVKbNm10zz33qG7dumrdurVmzZrldqyzuuGGG/TBBx9o27ZtkqTNmzdr1apV6tGjh8vJSmfXrl3av39/yP9fkpKS1L59+4h6nko/P1c9Ho8uuOACt6OUqLCwUAMGDNCYMWPUrFkzt+Ock8LCQr377ru64oorlJqaqrp166p9+/YlXro9k6gtvZJ+i2///v0upSqdwsJCZWRkqEOHDmrevLnbcc7q9ddf14YNGzRhwgS3o5yzr776StOnT1eTJk303nvvaejQoRo5cqReeeUVt6OVKDMzU3379tVVV12luLg4tW7dWhkZGerfv7/b0Url9HMxkp+n0s+/Izd27Fj169ev0n+Z88SJExUbG6uRI0e6HeWcHTx4UEePHlV2drZuvvlmvf/++7rjjjt05513asWKFaWaFRFfQ2arYcOG6bPPPtOqVavcjnJWubm5evjhh7V06dLSX2N3UWFhodq0aaNnnnlGktS6dWt99tlnmjFjhtLT011Od2b//d//rblz52revHlq1qyZNm3apIyMDCUnJ1fq3NHo5MmT6t27t4wxmj59uttxSrR+/Xo9//zz2rBhgzwej9txzllhYaEk6fbbb9eoUaMkSa1atdJHH32kGTNmqFOnTuc8K2rP9M7nt/gqk+HDh+udd97RsmXLdPHFF7sd56zWr1+vgwcP6tprr1VsbKxiY2O1YsUKvfDCC4qNjVVBQYHbEYvVoEEDNW3aNGTb1VdfXep3hFW0MWPGBM/2WrRooQEDBmjUqFERdZYtKfhcjNTn6enC2717t5YuXVrpz/L++c9/6uDBg0pJSQk+T3fv3q0//OEPatSokdvxzujCCy9UbGxsuTxXo7b0fvlbfKed/i2+66+/3sVkJTPGaPjw4crJydGHH36oxo0bux3pnHTt2lX/+te/tGnTpuDSpk0b9e/fX5s2bVJMTIzbEYvVoUOHIh8J2bZtmxo2bOhSonNz/PjxIr8tFhMTE/wv4kjRuHFj1a9fP+R5mpeXpzVr1lTq56n0/wtv+/bt+sc//qHatWu7HemsBgwYoE8//TTkeZqcnKwxY8bovffeczveGVWtWlVt27Ytl+dqVF/ejMTf4hs2bJjmzZunt956S4mJicHXNZKSklStWjWX051ZYmJikdcdExISVLt27Ur9euSoUaN0ww036JlnnlHv3r31ySefaObMmZo5c6bb0UrUq1cvPf3000pJSVGzZs20ceNGPffccxoyZIjb0Yo4evSoduzYEVzftWuXNm3apFq1aiklJUUZGRl66qmn1KRJEzVu3FhZWVlKTk4O/nC0W0rK3aBBA919993asGGD3nnnHRUUFASfq7Vq1VLVqlXdin3Wv/evyzkuLk7169fXlVdeWdFRQ5wt95gxY9SnTx/ddNNN6tKli5YsWaK3335by5cvL90dlfn9n5Xciy++aFJSUkzVqlVNu3btzOrVq92OVCJJxS4vv/yy29FKLRI+smCMMW+//bZp3ry5iY+PN1dddZWZOXOm25HOKi8vzzz88MMmJSXFeL1ec+mll5r/+I//MPn5+W5HK2LZsmXF/n86PT3dGPPzxxaysrJMvXr1THx8vOnatavZunWru6FNybl37dp1xufqsmXLKm3u4lSWjyycS+6//vWv5vLLLzder9e0bNnSLFy4sNT3w08LAQCsEbWv6QEA8GuUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHhBhOnfuHBG/SA9URnw4HYgwhw4dUlxcnBITE92OAkQcSg8AYA0ubwIR5peXNxs1aqRnnnlGQ4YMUWJiolJSUir9l2UDbqL0gAj37LPPqk2bNtq4caMeeughDR06tMhPsAD4GaUHRLhbbrlFDz30kC6//HKNHTtWF154oZYtW+Z2LKBSovSACHfNNdcE/7fH41H9+vV18OBBFxMBlRelB0S4uLi4kHWPxxNxv6AOVBRKDwBgDUoPAGANSg8AYA0+nA4AsAZnegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGv8P01Kyu98sNzDAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from speechbrain.lobes.models.transformer.TransformerASR import TransformerASR\n", + "from speechbrain.utils.streaming import infer_dependency_matrix, plot_dependency_matrix\n", + "from matplotlib import pyplot as plt\n", + "\n", + "noncausal_model = TransformerASR(\n", + " tgt_vocab=64, input_size=64, d_model=64, nhead=1, d_ffn=64, \n", + " encoder_module=\"conformer\", normalize_before=True,\n", + " attention_type=\"RelPosMHAXL\",\n", + " num_encoder_layers=4, num_decoder_layers=0,\n", + " causal=False\n", + ")\n", + "noncausal_model.eval()\n", + "noncausal_deps = infer_dependency_matrix(noncausal_model.encode, seq_shape=[1, 16, 64])\n", + "plot_dependency_matrix(noncausal_deps)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 120, + "id": "60a14fc2-c40d-4f2e-9fb0-55ae85fabd5e", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAb0AAAHHCAYAAAArl4bjAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAulUlEQVR4nO3deXRUdZ7+8adIQgpCKgKyRQ2g4sIioCytoEDDEBHRuLE0QoBRW2Qx0I1EpyOFG4FuBRUGhNMjioDM2AZRG5RWlmYUZLdVZBMhoICK3LBYAZLv7w+H+lkmBEJSuVX5vl/n3HO8t+791JNo5fHe2jzGGCMAACxQxe0AAABUFEoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KD4hgX3/9tTwej2bPnu12lArj8Xjk9/vdjoFKitJDVJk9e7Y8Hk9w8Xq9Sk5OVmpqql544QUdOXLE7YhwyRdffCG/36+vv/7a7SiIYJQeotITTzyhOXPmaPr06RoxYoQkKSMjQy1atNCnn37qcjq44YsvvtD48eMpPZQo1u0AwPno0aOH2rRpE1x/9NFH9eGHH+rWW2/Vbbfdpi1btqhatWouJgQQiTjTQ6Xx29/+VllZWdq9e7dee+21kNu+/PJL3X333apVq5a8Xq/atGmjRYsWhexz+tLpypUr9fvf/161a9eWz+fTwIED9eOPPxa5v8WLF+vGG29UQkKCEhMT1bNnT33++ech+wwaNEg1atTQvn37lJaWpho1aqhOnTr64x//qIKCgpB9Dx8+rEGDBikpKUkXXHCB0tPTdfjw4WJ/1tL8PP/7v/+r0aNHq06dOkpISNAdd9yh7777rtifp1OnTkpMTJTP51Pbtm01b948SdK4ceMUFxdX7HEPPPCALrjgAgUCgWKz/vL38NVXXyk1NVUJCQlKTk7WE088oXP5opeNGzeqR48e8vl8qlGjhrp27arVq1eH/Kz33HOPJKlLly7By9/Lly8/62zYhdJDpTJgwABJ0vvvvx/c9vnnn+s3v/mNtmzZoszMTD377LNKSEhQWlqacnJyiswYPny4tmzZIr/fr4EDB2ru3LlKS0sL+eM8Z84c9ezZUzVq1NDEiROVlZWlL774Qh07dixyea2goECpqamqXbu2/vKXv6hTp0569tlnNXPmzOA+xhjdfvvtmjNnju6991499dRT2rt3r9LT04vkK+3PM2LECG3evFnjxo3T0KFD9fbbb2v48OEh+8yePVs9e/bUoUOH9Oijjyo7O1utWrXSkiVLgr/XU6dOacGCBSHHnThxQm+88Ybuuusueb3eM/1rCf4ebr75ZtWrV0+TJk3Sddddp3HjxmncuHElHvf555/rxhtv1ObNm/XII48oKytLu3btUufOnbVmzRpJ0k033aSRI0dKkh577DHNmTNHc+bM0dVXX13ibFjIAFHk5ZdfNpLM2rVrz7hPUlKSad26dXC9a9eupkWLFiYQCAS3FRYWmhtuuME0adKkyOzrrrvOnDhxIrh90qRJRpJ56623jDHGHDlyxFxwwQXm/vvvD7nf/fv3m6SkpJDt6enpRpJ54oknQvZt3bq1ue6664LrCxcuNJLMpEmTgttOnTplbrzxRiPJvPzyy+f983Tr1s0UFhYGt48aNcrExMSYw4cPG2OMOXz4sElMTDTt27c3P/30U0jOXx53/fXXm/bt24fc/uabbxpJZtmyZaYkp38PI0aMCJnds2dPU7VqVfPdd98Ft0sy48aNC66npaWZqlWrmp07dwa3ffPNNyYxMdHcdNNNwW3/8z//c05ZYDfO9FDp1KhRI/gqzkOHDunDDz9U7969deTIEX3//ff6/vvv9cMPPyg1NVXbt2/Xvn37Qo5/4IEHFBcXF1wfOnSoYmNj9fe//12StHTpUh0+fFj9+vULzvv+++8VExOj9u3ba9myZUUyPfjggyHrN954o7766qvg+t///nfFxsZq6NChwW0xMTHBF+mcdr4/j8fjCbnvgoIC7d69O/jzHDlyRJmZmUXO1n553MCBA7VmzRrt3LkzuG3u3Lm65JJL1KlTpyI/c3F+eYbp8Xg0fPhwnThxQv/4xz+K3b+goEDvv/++0tLSdOmllwa3N2jQQL/73e+0atUq5eXlndN9AxKXN1EJHT16VImJiZKkHTt2yBijrKws1alTJ2Q5fVnt4MGDIcc3adIkZL1GjRpq0KBB8LLl9u3bJf38HOKvZ77//vtF5nm9XtWpUydkW82aNUOeJ9y9e7caNGigGjVqhOx35ZVXhqyfz8+TkpJS5L4lBe//dIk1b95cJenTp4/i4+M1d+5cSZLjOHrnnXfUv3//kHI8kypVqoQUlyRdccUVknTGV1x+9913On78eJHfgyRdffXVKiwsVG5u7lnvGziNV2+iUtm7d68cx9Hll18uSSosLJQk/fGPf1Rqamqxx5ze91ydnjlnzhzVr1+/yO2xsaEPq5iYmFLNP5f7Ls3Pc6b7N+fwApJfqlmzpm699VbNnTtXjz/+uN544w3l5+fr3nvvLdUcwE2UHiqVOXPmSFKwEE6fWcTFxalbt27nNGP79u3q0qVLcP3o0aP69ttvdcstt0iSLrvsMklS3bp1z3nm2TRs2FAffPCBjh49GnK2t3Xr1pD9zufnOZvTP89nn3121v8BGDhwoG6//XatXbtWc+fOVevWrdWsWbNzup/CwkJ99dVXwbM7Sdq2bZskqVGjRsUeU6dOHVWvXr3I70H6+RWsVapU0SWXXCJJ53S2CXB5E5XGhx9+qCeffFKNGzdW//79Jf1cTJ07d9ZLL72kb7/9tsgxxb0Ef+bMmTp58mRwffr06Tp16pR69Ogh6edC9fl8euaZZ0L2K2nm2dxyyy06deqUpk+fHtxWUFCgF198MWS/8/l5zqZ79+5KTEzUhAkTirzt4Ndngz169NCFF16oiRMnasWKFaU+y5s6dWrI7KlTpyouLk5du3Ytdv+YmBh1795db731Vsgl0AMHDmjevHnq2LGjfD6fJCkhIUGSzvg2D0DiTA9RavHixfryyy916tQpHThwQB9++KGWLl2qhg0batGiRSEvyJg2bZo6duyoFi1a6P7779ell16qAwcO6OOPP9bevXu1efPmkNknTpxQ165d1bt3b23dulX/+Z//qY4dO+q2226TJPl8Pk2fPl0DBgzQtddeq759+6pOnTras2eP3n33XXXo0CHkj/u56NWrlzp06KDMzEx9/fXXatq0qd588005jlNk39L+PGfj8/k0efJk3XfffWrbtq1+97vfqWbNmtq8ebOOHz+uV155JbhvXFyc+vbtq6lTpyomJkb9+vU75/vxer1asmSJ0tPT1b59ey1evFjvvvuuHnvssSLPef7SU089paVLl6pjx4566KGHFBsbq5deekn5+fmaNGlScL9WrVopJiZGEydOlOM4io+P129/+1vVrVu3VL8PVHKuvnYUKKXTL8M/vVStWtXUr1/f/Nu//Zt5/vnnTV5eXrHH7dy50wwcONDUr1/fxMXFmYsuusjceuut5o033igye8WKFeaBBx4wNWvWNDVq1DD9+/c3P/zwQ5GZy5YtM6mpqSYpKcl4vV5z2WWXmUGDBpl169YF90lPTzcJCQlFjh03bpz59cPvhx9+MAMGDDA+n88kJSWZAQMGmI0bNxZ5y0Jpf55fv71j2bJlxb60f9GiReaGG24w1apVMz6fz7Rr187Mnz+/SPZPPvnESDLdu3cv+os+g9O/h507d5ru3bub6tWrm3r16plx48aZgoKCkH31q7csGGPMhg0bTGpqqqlRo4apXr266dKli/noo4+K3M+sWbPMpZdeamJiYnj7AorlMaaUz2YDldTs2bM1ePBgrV27NuQjzhBq8+bNatWqlV599dXghwGczaBBg/TGG2/o6NGjYU4HlIzn9ACUyqxZs1SjRg3deeedbkcBSo3n9ACck7fffltffPGFZs6cqeHDhwdfOAJEE0oPwDkZMWKEDhw4oFtuuUXjx493Ow5wXlx9Tm/lypX685//rPXr1+vbb79VTk6O0tLSQvbZsmWLxo4dqxUrVujUqVNq2rSp/va3vxX5lAkAAM7G1ef0jh07ppYtW2ratGnF3r5z50517NhRV111lZYvX65PP/1UWVlZZ/00dwAAihMxr970eDxFzvT69u2ruLi44KdsAABQFhH7nF5hYaHeffddPfLII0pNTdXGjRvVuHFjPfroo0Uugf5Sfn6+8vPzQ+YcOnRItWvX5mOKACAKGWN05MgRJScnq0qVMl6gdPE9giEkmZycnOD6t99+aySZ6tWrm+eee85s3LjRTJgwwXg8HrN8+fIzzjn9pl8WFhYWlsq15Obmlr1ryjyhnEihpbdv3z4jyfTr1y9kv169epm+ffuecU4gEDCO4wSXPXv2/PzLkowThiUzMzMsc5ld+bIzm9mVeXY45+fq59I7/cXHZRGxlzcvvPBCxcbGqmnTpiHbr776aq1ateqMx8XHxys+Pr7Idt//LeXN6/WGZS6z3ZnPbGYzO3Lnl8dTVBH7iSxVq1ZV27Zti3ylyLZt29SwYUOXUgEAopmrZ3pHjx7Vjh07guu7du3Spk2bVKtWLaWkpGjMmDHq06ePbrrpJnXp0kVLlizR22+/reXLl7sXGgAQtVwtvXXr1oV8Wefo0aMlSenp6Zo9e7buuOMOzZgxQxMmTNDIkSN15ZVX6m9/+5s6duzoVmQAQBRztfQ6d+5c5Esqf23IkCEaMmRIBSUCAFRmEfucHgAA5Y3SAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWMNjzvYtrlEuLy9PSUlJyszMlNfrdTsOAKCUAoGAsrOz5TiOfD5f2YaZSs5xHCPJOJIxYVj8fn9Y5jK78mVnNrMr8+zg/HD+HXecMs/i8iYAwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGq6W3sqVK9WrVy8lJyfL4/Fo4cKFZ9z3wQcflMfj0ZQpUyosHwCgcnG19I4dO6aWLVtq2rRpJe6Xk5Oj1atXKzk5uYKSAQAqo1g377xHjx7q0aNHifvs27dPI0aM0HvvvaeePXtWUDIAQGXkaumdTWFhoQYMGKAxY8aoWbNm53RMfn6+8vPzg+t5eXnhigcAiDIeY4xxO4QkeTwe5eTkKC0tLbhtwoQJWrZsmd577z15PB41atRIGRkZysjIOOMcv9+v8ePHF9memZkpr9cbhuQAgHAKBALKzs6W4zjy+XxlG2YihCSTk5MTXF+3bp2pV6+e2bdvX3Bbw4YNzeTJk0ucEwgEjOM4wSU3N9dIMo5kTBgWv98flrnMrnzZmc3siJgdRuGa7ziOkWQcxynzrIh9y8I///lPHTx4UCkpKYqNjVVsbKx2796tP/zhD2rUqNEZj4uPj5fP5wtZAACQIvg5vQEDBqhbt24h21JTUzVgwAANHjzYpVQAgGjmaukdPXpUO3bsCK7v2rVLmzZtUq1atZSSkqLatWuH7B8XF6f69evryiuvrOioAIBKwNXSW7dunbp06RJcHz16tCQpPT1ds2fPdikVAKCycrX0OnfuLGPMOe//9ddfhy8MAKDSi9gXsgAAUN4oPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDU8pjTf4hqF8vLylJSUpMzMTHm9XrfjAABKKRAIKDs7W47jyOfzlW2YqeQcxzGSjCMZE4bF7/eHZS6zK192ZjP7nGeHSThnh3N+8O+445R5Fpc3AQDWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANZwtfRWrlypXr16KTk5WR6PRwsXLgzedvLkSY0dO1YtWrRQQkKCkpOTNXDgQH3zzTfuBQYARDVXS+/YsWNq2bKlpk2bVuS248ePa8OGDcrKytKGDRv05ptvauvWrbrttttcSAoAqAxi3bzzHj16qEePHsXelpSUpKVLl4Zsmzp1qtq1a6c9e/YoJSWlIiICACqRqHpOz3EceTweXXDBBW5HAQBEIVfP9EojEAho7Nix6tevn3w+3xn3y8/PV35+fnA9Ly+vIuIBAKKAxxhj3A4hSR6PRzk5OUpLSyty28mTJ3XXXXdp7969Wr58eYml5/f7NX78+CLbMzMz5fV6yzMyAKACBAIBZWdny3GcEv/+nxMTISSZnJycIttPnDhh0tLSzDXXXGO+//77s84JBALGcZzgkpubayQZRzImDIvf7w/LXGZXvuzMrmSzwyRaZ4dzvuM4RpJxHKfMsyL68ubJkyfVu3dvbd++XcuWLVPt2rXPekx8fLzi4+MrIB0AINq4WnpHjx7Vjh07guu7du3Spk2bVKtWLTVo0EB33323NmzYoHfeeUcFBQXav3+/JKlWrVqqWrWqW7EBAFHK1dJbt26dunTpElwfPXq0JCk9PV1+v1+LFi2SJLVq1SrkuGXLlqlz584VFRMAUEm4WnqdO3eWMeaMt5d0GwAApRVV79MDAKAsKD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDU8ppJ/PXleXp6SkpKUmZkpr9frdhwAQCkFAgFlZ2fLcRz5fL6yDTOVnOM4RpJxJGPCsPj9/rDMZXbly85sF2aHCbMrdn7w77jjlHkWlzcBANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANZwtfRWrlypXr16KTk5WR6PRwsXLgy53Rijxx9/XA0aNFC1atXUrVs3bd++3Z2wAICo52rpHTt2TC1bttS0adOKvX3SpEl64YUXNGPGDK1Zs0YJCQlKTU1VIBCo4KQAgMog1s0779Gjh3r06FHsbcYYTZkyRX/60590++23S5JeffVV1atXTwsXLlTfvn0rMioAoBKI2Of0du3apf3796tbt27BbUlJSWrfvr0+/vhjF5MBAKKVq2d6Jdm/f78kqV69eiHb69WrF7ytOPn5+crPzw+u5+XlhScgACDqeIwxxu0QkuTxeJSTk6O0tDRJ0kcffaQOHTrom2++UYMGDYL79e7dWx6PRwsWLCh2jt/v1/jx44tsz8zMlNfrDUt2AED4BAIBZWdny3Ec+Xy+sg0zEUKSycnJCa7v3LnTSDIbN24M2e+mm24yI0eOPOOcQCBgHMcJLrm5uUaScSRjwrD4/f6wzGV25cvO7DPMDhNmV+zscM53HMdIMo7jlHlWxD6n17hxY9WvX18ffPBBcFteXp7WrFmj66+//ozHxcfHy+fzhSwAAEguP6d39OhR7dixI7i+a9cubdq0SbVq1VJKSooyMjL01FNPqUmTJmrcuLGysrKUnJwcvAQKAEBpuFp669atU5cuXYLro0ePliSlp6dr9uzZeuSRR3Ts2DE98MADOnz4sDp27KglS5bw3BwA4Ly4WnqdO3eWMeaMt3s8Hj3xxBN64oknKjAVAKCyitjn9AAAKG+UHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGh5T0leXVwJ5eXlKSkpSZmamvF6v23EAAKUUCASUnZ0tx3Hk8/nKNsxUco7jGEnGkYwJw+L3+8Myl9mVL3tUzw4TZlee2eGcH/w77jhlnsXlTQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDXOq/ReffVV5efnF9l+4sQJvfrqq2UOBQBAOJxX6Q0ePFiO4xTZfuTIEQ0ePLjMoQAACIfzKj1jjDweT5Hte/fuVVJSUplDAQAQDrGl2bl169byeDzyeDzq2rWrYmP//+EFBQXatWuXbr755nIPCQBAeShV6aWlpUmSNm3apNTUVNWoUSN4W9WqVdWoUSPddddd5RoQAIDyUqrSGzdunCSpUaNG6tOnj7xeb1hCnVZQUCC/36/XXntN+/fvV3JysgYNGqQ//elPxV5eBQCgJKUqvdPS09PLO0exJk6cqOnTp+uVV15Rs2bNtG7dOg0ePFhJSUkaOXJkhWQAAFQe51V6VapUKfFMq6Cg4LwD/dJHH32k22+/XT179pT08xnm/Pnz9cknn5TLfACAXc6r9N58882Q0jt58qQ2btyoV155RePHjy+3cDfccINmzpypbdu26YorrtDmzZu1atUqPffcc2c8Jj8/P+Q9hHl5eeWWBwAQ3TzGGFNew+bNm6cFCxborbfeKpd5hYWFeuyxxzRp0iTFxMSooKBATz/9tB599NEzHuP3+4st3szMzLA/BwkAKH+BQEDZ2dlyHEc+n69sw0w52rlzp0lISCi3efPnzzcXX3yxmT9/vvn000/Nq6++amrVqmVmz559xmMCgYBxHCe45ObmGknGkYwJw+L3+8Myl9mVL3vYZ4cJs5nt9nzHcYwk4zhOmWed1+XN4vz000964YUXdNFFF5XXSI0ZM0aZmZnq27evJKlFixbavXu3JkyYcMYX08THxys+Pr7cMgAAKo/zKr2aNWuGPKdnjNGRI0dUvXp1vfbaa+UW7vjx46pSJfRDY2JiYlRYWFhu9wEAsMd5ld7kyZNDSq9KlSqqU6eO2rdvr5o1a5ZbuF69eunpp59WSkqKmjVrpo0bN+q5557TkCFDyu0+AAD2OK/SGzRokA4fPqy//vWv2rJliySpadOmuv7668s13IsvvqisrCw99NBDOnjwoJKTk/X73/9ejz/+eLneDwDADuf1gdPr1q3T5ZdfrsmTJ+vQoUM6dOiQJk+erMsuu0wbNmwot3CJiYmaMmWKdu/erZ9++kk7d+7UU089papVq5bbfQAA7HFeZ3qjRo1Sr169NGvWrOCHTp86dUr33XefMjIytHLlynINCQBAeTiv0lu3bl1I4UlSbGysHnnkEbVp06bcwgEAUJ7O6/Kmz+fTnj17imzPzc1VYmJimUMBABAO51V6ffr00b//+79rwYIFys3NVW5url5//XXdd9996tevX3lnBACgXJzX5c2//OUv8ng8GjhwoE6dOiVJiouL09ChQ5WdnV2uAQEAKC/nVXpVq1bV888/rwkTJmjnzp2SpMsuu0zVq1cv13AAAJSnMn0MWfXq1dWiRYvyygIAQFid13N6AABEI0oPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA2PMca4HSKc8vLylJSUpMzMTHm9XrfjAABKKRAIKDs7W47jyOfzlW2YqeQcxzGSjCMZE4bF7/eHZS6zK192v98ftv/Omc1st2eHc37w77jjlHkWlzcBANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9AAA1oj40tu3b5/uvfde1a5dW9WqVVOLFi20bt06t2MBAKJQrNsBSvLjjz+qQ4cO6tKlixYvXqw6depo+/btqlmzptvRAABRKKJLb+LEibrkkkv08ssvB7c1btzYxUQAgGgW0Zc3Fy1apDZt2uiee+5R3bp11bp1a82aNavEY/Lz85WXlxeyAAAgSR5jjHE7xJl4vV5J0ujRo3XPPfdo7dq1evjhhzVjxgylp6cXe4zf79f48eOLbM/MzAzOAwBEj0AgoOzsbDmOI5/PV7ZhJoLFxcWZ66+/PmTbiBEjzG9+85szHhMIBIzjOMElNzfXSDKOZEwYFr/fH5a5zHYxe5gwm9mVeXY45zuOYyQZx3HKPCuiL282aNBATZs2Ddl29dVXa8+ePWc8Jj4+Xj6fL2QBAECK8Of0OnTooK1bt4Zs27Ztmxo2bOhSIgBANIvo0hs1apRWr16tZ555Rjt27NC8efM0c+ZMDRs2zO1oAIAoFNGl17ZtW+Xk5Gj+/Plq3ry5nnzySU2ZMkX9+/d3OxoAIApF9Pv0JOnWW2/Vrbfe6nYMAEAlENFnegAAlCdKDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA2PMca4HSKc8vLylJSUpMzMTHm9XrfjAABKKRAIKDs7W47jyOfzlW2YqeQcxzGSjCMZE4bF7/eHZS6zzzI/TJjNbGZH3vzg33HHKfMsLm8CAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArBFVpZednS2Px6OMjAy3owAAolDUlN7atWv10ksv6ZprrnE7CgAgSkVF6R09elT9+/fXrFmzVLNmTbfjAACiVFSU3rBhw9SzZ09169btrPvm5+crLy8vZAEAQJI8xhjjdoiSvP7663r66ae1du1aeb1ede7cWa1atdKUKVOK3d/v92v8+PFFtmdmZsrr9YY5LQCgvAUCAWVnZ8txHPl8vrINMxFsz549pm7dumbz5s3BbZ06dTIPP/zwGY8JBALGcZzgkpubayQZRzImDIvf7w/L3KifHUbhnM9sZjM78uY7jvPz33HHKfOs2PJo4XBZv369Dh48qGuvvTa4raCgQCtXrtTUqVOVn5+vmJiYkGPi4+MVHx9f0VEBAFEgokuva9eu+te//hWybfDgwbrqqqs0duzYIoUHAEBJIrr0EhMT1bx585BtCQkJql27dpHtAACcTVS8ehMAgPIQ0Wd6xVm+fLnbEQAAUYozPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANTzGGON2iHDKy8tTUlKSMjMz5fV63Y4DACilQCCg7OxsOY4jn89XtmGmknMcx0gyjmRMGBa/3x+WuRUyO0zCOTvc85nNbGZH3vzg33HHKfMsLm8CAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArBHxpTdhwgS1bdtWiYmJqlu3rtLS0rR161a3YwEAolDEl96KFSs0bNgwrV69WkuXLtXJkyfVvXt3HTt2zO1oAIAoE+t2gLNZsmRJyPrs2bNVt25drV+/XjfddJNLqQAA0Sjiz/R+zXEcSVKtWrVcTgIAiDYRf6b3S4WFhcrIyFCHDh3UvHnzYvfJz89Xfn5+cD0vL6+i4gEAIpzHGGPcDnGuhg4dqsWLF2vVqlW6+OKLi93H7/dr/PjxRbZnZmbK6/WGOyIAoJwFAgFlZ2fLcRz5fL6yDTNRYtiwYebiiy82X331VYn7BQIB4zhOcMnNzTWSjCMZE4bF7/eHZW5wdphE6+xwz2c2s5kdefMdx/n577jjlHlWxF/eNMZoxIgRysnJ0fLly9W4ceMS94+Pj1d8fHwFpQMARJOIL71hw4Zp3rx5euutt5SYmKj9+/dLkpKSklStWjWX0wEAoknEv3pz+vTpchxHnTt3VoMGDYLLggUL3I4GAIgyEX+mZ6LndTYAgAgX8Wd6AACUF0oPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANSg8AYA1KDwBgDUoPAGANj6nkX02el5enpKQkZWZmyuv1uh0HAFBKgUBA2dnZchxHPp+vbMNMJec4jpFkHMmYMCx+vz9s2Zld8fOZzWxmR9784N9xxynzLC5vAgCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsERWlN23aNDVq1Eher1ft27fXJ5984nYkAEAUivjSW7BggUaPHq1x48Zpw4YNatmypVJTU3Xw4EG3owEAokzEl95zzz2n+++/X4MHD1bTpk01Y8YMVa9eXf/1X//ldjQAQJSJ6NI7ceKE1q9fr27dugW3ValSRd26ddPHH3/sYjIAQDSKdTtASb7//nsVFBSoXr16Idvr1aunL7/8sthj8vPzlZ+fH1x3HEeSlBemjIFAQHl54ZnO7Iqfz2xmMzvy5p+eaYwp+zATwfbt22ckmY8++ihk+5gxY0y7du2KPWbcuHFGEgsLCwtLJVt27txZ5l6J6DO9Cy+8UDExMTpw4EDI9gMHDqh+/frFHvPoo49q9OjRwfXDhw+rYcOG2rNnj5KSksKatzzl5eXpkksuUW5urnw+n9txzlm05paiNzu5Kxa5K57jOEpJSVGtWrXKPCuiS69q1aq67rrr9MEHHygtLU2SVFhYqA8++EDDhw8v9pj4+HjFx8cX2Z6UlBR1/6IlyefzkbuCRWt2clcscle8KlXK/jKUiC49SRo9erTS09PVpk0btWvXTlOmTNGxY8c0ePBgt6MBAKJMxJdenz599N133+nxxx/X/v371apVKy1ZsqTIi1sAADibiC89SRo+fPgZL2eeTXx8vMaNG1fsJc9IRu6KF63ZyV2xyF3xyjO7x5jyeA0oAACRL6LfnA4AQHmi9AAA1qD0AADWoPQAANao9KUXbd/FN2HCBLVt21aJiYmqW7eu0tLStHXrVrdjlVp2drY8Ho8yMjLcjnJW+/bt07333qvatWurWrVqatGihdatW+d2rBIVFBQoKytLjRs3VrVq1XTZZZfpySefLJ/PJixnK1euVK9evZScnCyPx6OFCxeG3G6M0eOPP64GDRqoWrVq6tatm7Zv3+5O2F8oKffJkyc1duxYtWjRQgkJCUpOTtbAgQP1zTffuBf4/5zt9/1LDz74oDwej6ZMmVJh+c7kXHJv2bJFt912m5KSkpSQkKC2bdtqz549pbqfSl160fhdfCtWrNCwYcO0evVqLV26VCdPnlT37t117Ngxt6Ods7Vr1+qll17SNddc43aUs/rxxx/VoUMHxcXFafHixfriiy/07LPPqmbNmm5HK9HEiRM1ffp0TZ06VVu2bNHEiRM1adIkvfjii25HK+LYsWNq2bKlpk2bVuztkyZN0gsvvKAZM2ZozZo1SkhIUGpqqgKBQAUnDVVS7uPHj2vDhg3KysrShg0b9Oabb2rr1q267bbbXEga6my/79NycnK0evVqJScnV1Cykp0t986dO9WxY0ddddVVWr58uT799FNlZWXJ6/WW7o7K/OmdEaxdu3Zm2LBhwfWCggKTnJxsJkyY4GKq0jl48KCRZFasWOF2lHNy5MgR06RJE7N06VLTqVMn8/DDD7sdqURjx441HTt2dDtGqfXs2dMMGTIkZNudd95p+vfv71KicyPJ5OTkBNcLCwtN/fr1zZ///OfgtsOHD5v4+Hgzf/58FxIW79e5i/PJJ58YSWb37t0VE+ocnCn33r17zUUXXWQ+++wz07BhQzN58uQKz1aS4nL36dPH3HvvvWWeXWnP9CrLd/Gd/mqk8vig1YowbNgw9ezZM+T3HskWLVqkNm3a6J577lHdunXVunVrzZo1y+1YZ3XDDTfogw8+0LZt2yRJmzdv1qpVq9SjRw+Xk5XOrl27tH///pD/XpKSktS+ffuoepxKPz9WPR6PLrjgArejlKiwsFADBgzQmDFj1KxZM7fjnJPCwkK9++67uuKKK5Samqq6deuqffv2JV66PZNKW3olfRff/v37XUpVOoWFhcrIyFCHDh3UvHlzt+Oc1euvv64NGzZowoQJbkc5Z1999ZWmT5+uJk2a6L333tPQoUM1cuRIvfLKK25HK1FmZqb69u2rq666SnFxcWrdurUyMjLUv39/t6OVyunHYjQ/TqWfv0du7Nix6tevX8R/mPPEiRMVGxurkSNHuh3lnB08eFBHjx5Vdna2br75Zr3//vu64447dOedd2rFihWlmhUVH0Nmq2HDhumzzz7TqlWr3I5yVrm5uXr44Ye1dOnS0l9jd1FhYaHatGmjZ555RpLUunVrffbZZ5oxY4bS09NdTndm//3f/625c+dq3rx5atasmTZt2qSMjAwlJydHdO7K6OTJk+rdu7eMMZo+fbrbcUq0fv16Pf/889qwYYM8Ho/bcc5ZYWGhJOn222/XqFGjJEmtWrXSRx99pBkzZqhTp07nPKvSnumdz3fxRZLhw4frnXfe0bJly3TxxRe7Hees1q9fr4MHD+raa69VbGysYmNjtWLFCr3wwguKjY1VQUGB2xGL1aBBAzVt2jRk29VXX13qV4RVtDFjxgTP9lq0aKEBAwZo1KhRUXWWLSn4WIzWx+npwtu9e7eWLl0a8Wd5//znP3Xw4EGlpKQEH6e7d+/WH/7wBzVq1MjteGd04YUXKjY2tlweq5W29H75XXynnf4uvuuvv97FZCUzxmj48OHKycnRhx9+qMaNG7sd6Zx07dpV//rXv7Rp06bg0qZNG/Xv31+bNm1STEyM2xGL1aFDhyJvCdm2bZsaNmzoUqJzc/z48SLfLRYTExP8P+Jo0bhxY9WvXz/kcZqXl6c1a9ZE9ONU+v+Ft337dv3jH/9Q7dq13Y50VgMGDNCnn34a8jhNTk7WmDFj9N5777kd74yqVq2qtm3blstjtVJf3ozG7+IbNmyY5s2bp7feekuJiYnB5zWSkpJUrVo1l9OdWWJiYpHnHRMSElS7du2Ifj5y1KhRuuGGG/TMM8+od+/e+uSTTzRz5kzNnDnT7Wgl6tWrl55++mmlpKSoWbNm2rhxo5577jkNGTLE7WhFHD16VDt27Aiu79q1S5s2bVKtWrWUkpKijIwMPfXUU2rSpIkaN26srKwsJScnB7842i0l5W7QoIHuvvtubdiwQe+8844KCgqCj9VatWqpatWqbsU+6+/71+UcFxen+vXr68orr6zoqCHOlnvMmDHq06ePbrrpJnXp0kVLlizR22+/reXLl5fujsr8+s8I9+KLL5qUlBRTtWpV065dO7N69Wq3I5VIUrHLyy+/7Ha0UouGtywYY8zbb79tmjdvbuLj481VV11lZs6c6Xaks8rLyzMPP/ywSUlJMV6v11x66aXmP/7jP0x+fr7b0YpYtmxZsf9Np6enG2N+fttCVlaWqVevnomPjzddu3Y1W7dudTe0KTn3rl27zvhYXbZsWcTmLk6kvGXhXHL/9a9/NZdffrnxer2mZcuWZuHChaW+H75aCABgjUr7nB4AAL9G6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QFRpnPnzlHxjfRAJOLN6UCUOXTokOLi4pSYmOh2FCDqUHoAAGtweROIMr+8vNmoUSM988wzGjJkiBITE5WSkhLxH5YNuInSA6Lcs88+qzZt2mjjxo166KGHNHTo0CJfwQLgZ5QeEOVuueUWPfTQQ7r88ss1duxYXXjhhVq2bJnbsYCIROkBUe6aa64J/rPH41H9+vV18OBBFxMBkYvSA6JcXFxcyLrH44m6b1AHKgqlBwCwBqUHALAGpQcAsAZvTgcAWIMzPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDX+H8cQmALK0UeoAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "causal_model = TransformerASR(\n", + " tgt_vocab=64, input_size=64, d_model=64, nhead=1, d_ffn=64, \n", + " encoder_module=\"conformer\", normalize_before=True,\n", + " attention_type=\"RelPosMHAXL\",\n", + " num_encoder_layers=4, num_decoder_layers=0,\n", + " causal=True\n", + ")\n", + "causal_model.eval()\n", + "causal_deps = infer_dependency_matrix(causal_model.encode, seq_shape=[1, 16, 64])\n", + "plot_dependency_matrix(causal_deps)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 121, + "id": "b086c99f-f54b-4798-890e-a780ff046a62", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAb0AAAHHCAYAAAArl4bjAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAuj0lEQVR4nO3deXRUdZ7+8afIVhBSEZAtagAVFxYBZWkFBRqGiIjGjaURAozaIouBbiQ6HSncCHQrqDAgnB5RBGTGNojaoLSyNKMgu60imwgBBVTkhsUKkHx/fzjUzzIhEJLiVuX7fp1zz/HeuvdTT6KVx3tr8xhjjAAAsEAVtwMAAHC+UHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6QAT7+uuv5fF4NGvWLLejnDcej0d+v9/tGKikKD1ElVmzZsnj8QQXr9erlJQUpaWl6YUXXtDhw4fdjgiXfPHFF/L7/fr666/djoIIRukhKj3xxBOaPXu2pk2bpuHDh0uSMjMz1bx5c3366acup4MbvvjiC40bN47SQ6li3Q4AnIvu3burdevWwfVHH31UH374oW699Vbddttt2rx5s6pWrepiQgCRiDM9VBq//e1vlZ2drV27dum1114Lue3LL7/U3XffrZo1a8rr9ap169ZauHBhyD6nLp2uWLFCv//971WrVi35fD4NGDBAP/74Y7H7W7RokW688UYlJiYqKSlJPXr00Oeffx6yz8CBA1W9enXt3btX6enpql69umrXrq0//vGPKiwsDNn30KFDGjhwoJKTk3XBBRcoIyNDhw4dKvFnLcvP87//+78aNWqUateurcTERN1xxx367rvvSvx5OnbsqKSkJPl8PrVp00Zz586VJI0dO1ZxcXElHvfAAw/oggsuUCAQKDHrL38PX331ldLS0pSYmKiUlBQ98cQTOpsvetmwYYO6d+8un8+n6tWrq0uXLlq1alXIz3rPPfdIkjp37hy8/L1s2bIzzoZdKD1UKv3795ckvf/++8Ftn3/+uX7zm99o8+bNysrK0rPPPqvExESlp6crNze32Ixhw4Zp8+bN8vv9GjBggObMmaP09PSQP86zZ89Wjx49VL16dU2YMEHZ2dn64osv1KFDh2KX1woLC5WWlqZatWrpL3/5izp27Khnn31WM2bMCO5jjNHtt9+u2bNn695779VTTz2lPXv2KCMjo1i+sv48w4cP16ZNmzR27FgNGTJEb7/9toYNGxayz6xZs9SjRw8dPHhQjz76qHJyctSyZUstXrw4+Hs9efKk5s+fH3Lc8ePH9cYbb+iuu+6S1+s93b+W4O/h5ptvVt26dTVx4kRdd911Gjt2rMaOHVvqcZ9//rluvPFGbdq0SY888oiys7O1c+dOderUSatXr5Yk3XTTTRoxYoQk6bHHHtPs2bM1e/ZsXX311aXOhoUMEEVefvllI8msWbPmtPskJyebVq1aBde7dOlimjdvbgKBQHBbUVGRueGGG0zjxo2Lzb7uuuvM8ePHg9snTpxoJJm33nrLGGPM4cOHzQUXXGDuv//+kPvdt2+fSU5ODtmekZFhJJknnngiZN9WrVqZ6667Lri+YMECI8lMnDgxuO3kyZPmxhtvNJLMyy+/fM4/T9euXU1RUVFw+8iRI01MTIw5dOiQMcaYQ4cOmaSkJNOuXTvz008/heT85XHXX3+9adeuXcjtb775ppFkli5dakpz6vcwfPjwkNk9evQw8fHx5rvvvgtul2TGjh0bXE9PTzfx8fFmx44dwW3ffPONSUpKMjfddFNw2//8z/+cVRbYjTM9VDrVq1cPvorz4MGD+vDDD9WrVy8dPnxY33//vb7//nv98MMPSktL07Zt27R3796Q4x944AHFxcUF14cMGaLY2Fj9/e9/lyQtWbJEhw4dUt++fYPzvv/+e8XExKhdu3ZaunRpsUwPPvhgyPqNN96or776Krj+97//XbGxsRoyZEhwW0xMTPBFOqec68/j8XhC7ruwsFC7du0K/jyHDx9WVlZWsbO1Xx43YMAArV69Wjt27AhumzNnji655BJ17Nix2M9ckl+eYXo8Hg0bNkzHjx/XP/7xjxL3Lyws1Pvvv6/09HRdeumlwe3169fX7373O61cuVL5+flndd+AxOVNVEJHjhxRUlKSJGn79u0yxig7O1u1a9cOWU5dVjtw4EDI8Y0bNw5Zr169uurXrx+8bLlt2zZJPz+H+OuZ77//frF5Xq9XtWvXDtlWo0aNkOcJd+3apfr166t69eoh+1155ZUh6+fy86Smpha7b0nB+z9VYs2aNVNpevfurYSEBM2ZM0eS5DiO3nnnHfXr1y+kHE+nSpUqIcUlSVdccYUknfYVl999952OHTtW7PcgSVdffbWKioqUl5d3xvsGTuHVm6hU9uzZI8dxdPnll0uSioqKJEl//OMflZaWVuIxp/Y9W6dmzp49W/Xq1St2e2xs6MMqJiamTPPP5r7L8vOc7v7NWbyA5Jdq1KihW2+9VXPmzNHjjz+uN954QwUFBbr33nvLNAdwE6WHSmX27NmSFCyEU2cWcXFx6tq161nN2LZtmzp37hxcP3LkiL799lvdcsstkqTLLrtMklSnTp2znnkmDRo00AcffKAjR46EnO1t2bIlZL9z+XnO5NTP89lnn53xfwAGDBig22+/XWvWrNGcOXPUqlUrNW3a9Kzup6ioSF999VXw7E6Stm7dKklq2LBhicfUrl1b1apVK/Z7kH5+BWuVKlV0ySWXSNJZnW0CXN5EpfHhhx/qySefVKNGjdSvXz9JPxdTp06d9NJLL+nbb78tdkxJL8GfMWOGTpw4EVyfNm2aTp48qe7du0v6uVB9Pp+eeeaZkP1Km3kmt9xyi06ePKlp06YFtxUWFurFF18M2e9cfp4z6datm5KSkjR+/Phibzv49dlg9+7ddeGFF2rChAlavnx5mc/ypkyZEjJ7ypQpiouLU5cuXUrcPyYmRt26ddNbb70Vcgl0//79mjt3rjp06CCfzydJSkxMlKTTvs0DkDjTQ5RatGiRvvzyS508eVL79+/Xhx9+qCVLlqhBgwZauHBhyAsypk6dqg4dOqh58+a6//77demll2r//v36+OOPtWfPHm3atClk9vHjx9WlSxf16tVLW7Zs0X/+53+qQ4cOuu222yRJPp9P06ZNU//+/XXttdeqT58+ql27tnbv3q13331X7du3D/njfjZ69uyp9u3bKysrS19//bWaNGmiN998U47jFNu3rD/Pmfh8Pk2aNEn33Xef2rRpo9/97neqUaOGNm3apGPHjumVV14J7hsXF6c+ffpoypQpiomJUd++fc/6frxerxYvXqyMjAy1a9dOixYt0rvvvqvHHnus2HOev/TUU09pyZIl6tChgx566CHFxsbqpZdeUkFBgSZOnBjcr2XLloqJidGECRPkOI4SEhL029/+VnXq1CnT7wOVnKuvHQXK6NTL8E8t8fHxpl69eubf/u3fzPPPP2/y8/NLPG7Hjh1mwIABpl69eiYuLs5cdNFF5tZbbzVvvPFGsdnLly83DzzwgKlRo4apXr266devn/nhhx+KzVy6dKlJS0szycnJxuv1mssuu8wMHDjQrF27NrhPRkaGSUxMLHbs2LFjza8ffj/88IPp37+/8fl8Jjk52fTv399s2LCh2FsWyvrz/PrtHUuXLi3xpf0LFy40N9xwg6latarx+Xymbdu2Zt68ecWyf/LJJ0aS6datW/Ff9Gmc+j3s2LHDdOvWzVSrVs3UrVvXjB071hQWFobsq1+9ZcEYY9avX2/S0tJM9erVTbVq1Uznzp3NRx99VOx+Zs6caS699FITExPD2xdQIo8xZXw2G6ikZs2apUGDBmnNmjUhH3GGUJs2bVLLli316quvBj8M4EwGDhyoN954Q0eOHAlzOqB0PKcHoExmzpyp6tWr684773Q7ClBmPKcH4Ky8/fbb+uKLLzRjxgwNGzYs+MIRIJpQegDOyvDhw7V//37dcsstGjdunNtxgHPi6nN6K1as0J///GetW7dO3377rXJzc5Wenh6yz+bNmzVmzBgtX75cJ0+eVJMmTfS3v/2t2KdMAABwJq4+p3f06FG1aNFCU6dOLfH2HTt2qEOHDrrqqqu0bNkyffrpp8rOzj7jp7kDAFCSiHn1psfjKXam16dPH8XFxQU/ZQMAgPKI2Of0ioqK9O677+qRRx5RWlqaNmzYoEaNGunRRx8tdgn0lwoKClRQUBAy5+DBg6pVqxYfUwQAUcgYo8OHDyslJUVVqpTzAqWL7xEMIcnk5uYG17/99lsjyVSrVs0899xzZsOGDWb8+PHG4/GYZcuWnXbOqTf9srCwsLBUriUvL6/8XVPuCRVECi29vXv3Gkmmb9++Ifv17NnT9OnT57RzAoGAcRwnuOzevfvnX5ZknDAsWVlZYZnL7MqXndnMrsyzwzk/Tz+X3qkvPi6PiL28eeGFFyo2NlZNmjQJ2X711Vdr5cqVpz0uISFBCQkJxbb7/m+paF6vNyxzme3OfGYzm9mRO78inqKK2E9kiY+PV5s2bYp9pcjWrVvVoEEDl1IBAKKZq2d6R44c0fbt24PrO3fu1MaNG1WzZk2lpqZq9OjR6t27t2666SZ17txZixcv1ttvv61ly5a5FxoAELVcLb21a9eGfFnnqFGjJEkZGRmaNWuW7rjjDk2fPl3jx4/XiBEjdOWVV+pvf/ubOnTo4FZkAEAUc7X0OnXqVOxLKn9t8ODBGjx48HlKBACozCL2OT0AACoapQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALCGx5zpW1yjXH5+vpKTk5WVlSWv1+t2HABAGQUCAeXk5MhxHPl8vvINM5Wc4zhGknEkY8Kw+P3+sMxlduXLzmxmV+bZ4ZzvSEaScRyn3J3A5U0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANVwtvRUrVqhnz55KSUmRx+PRggULTrvvgw8+KI/Ho8mTJ5+3fACAysXV0jt69KhatGihqVOnlrpfbm6uVq1apZSUlPOUDABQGcW6eefdu3dX9+7dS91n7969Gj58uN577z316NHjPCUDAFRGrpbemRQVFal///4aPXq0mjZtelbHFBQUqKCgILien58frngAgCjjMcYYt0NIksfjUW5urtLT04Pbxo8fr6VLl+q9996Tx+NRw4YNlZmZqczMzNPO8fv9GjduXLHtWVlZ8nq9YUgOAAinQCCgnJwcOY4jn89XvmEmQkgyubm5wfW1a9eaunXrmr179wa3NWjQwEyaNKnUOYFAwDiOE1zy8vKMJONIxoRh8fv9YZnL7MqXndnMrsyzwznfkYwk4zhOubsmYt+y8M9//lMHDhxQamqqYmNjFRsbq127dukPf/iDGjZseNrjEhIS5PP5QhYAAKQIfk6vf//+6tq1a8i2tLQ09e/fX4MGDXIpFQAgmrlaekeOHNH27duD6zt37tTGjRtVs2ZNpaamqlatWiH7x8XFqV69erryyivPd1QAQCXgaumtXbtWnTt3Dq6PGjVKkpSRkaFZs2a5lAoAUFm5WnqdOnXSz69hOTtff/11+MIAACq9iH0hCwAAFY3SAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWMNjyvItrlEoPz9fycnJysrKktfrdTsOAKCMAoGAcnJy5DiOfD5f+YaZSs5xHCPJOJIxYVj8fn9Y5jK78mVnNrMr8+xwznckI8k4jlPuTuDyJgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAarpbeihUr1LNnT6WkpMjj8WjBggXB206cOKExY8aoefPmSkxMVEpKigYMGKBvvvnGvcAAgKjmaukdPXpULVq00NSpU4vdduzYMa1fv17Z2dlav3693nzzTW3ZskW33XabC0kBAJVBrJt33r17d3Xv3r3E25KTk7VkyZKQbVOmTFHbtm21e/dupaamno+IAIBKJKqe03McRx6PRxdccIHbUQAAUcjVM72yCAQCGjNmjPr27Sufz3fa/QoKClRQUBBcz8/PPx/xAABRwGOMMW6HkCSPx6Pc3Fylp6cXu+3EiRO66667tGfPHi1btqzU0vP7/Ro3blyx7VlZWfJ6vRUZGQBwHgQCAeXk5MhxnFL//p8VEyEkmdzc3GLbjx8/btLT080111xjvv/++zPOCQQCxnGc4JKXl2ckGUcyJgyL3+8Py1xmV77szK5ks8MkWmeHc77jOEaScRyn3LMi+vLmiRMn1KtXL23btk1Lly5VrVq1znhMQkKCEhISzkM6AEC0cbX0jhw5ou3btwfXd+7cqY0bN6pmzZqqX7++7r77bq1fv17vvPOOCgsLtW/fPklSzZo1FR8f71ZsAECUcrX01q5dq86dOwfXR40aJUnKyMiQ3+/XwoULJUktW7YMOW7p0qXq1KnT+YoJAKgkXC29Tp06yRhz2ttLuw0AgLKKqvfpAQBQHpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAaHlPJv548Pz9fycnJysrKktfrdTsOAKCMAoGAcnJy5DiOfD5f+YaZSs5xHCPJOJIxYVj8fn9Y5jK78mVndiWbHSbROjuc84N/xx2n3LO4vAkAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsIarpbdixQr17NlTKSkp8ng8WrBgQcjtxhg9/vjjql+/vqpWraquXbtq27Zt7oQFAEQ9V0vv6NGjatGihaZOnVri7RMnTtQLL7yg6dOna/Xq1UpMTFRaWpoCgcB5TgoAqAxi3bzz7t27q3v37iXeZozR5MmT9ac//Um33367JOnVV19V3bp1tWDBAvXp0+d8RgUAVAIR+5zezp07tW/fPnXt2jW4LTk5We3atdPHH3/sYjIAQLRy9UyvNPv27ZMk1a1bN2R73bp1g7eVpKCgQAUFBcH1/Pz88AQEAEQdjzHGuB1Ckjwej3Jzc5Weni5J+uijj9S+fXt98803ql+/fnC/Xr16yePxaP78+SXO8fv9GjduXLHtWVlZ8nq9YckOAAifQCCgnJwcOY4jn89XvmEmQkgyubm5wfUdO3YYSWbDhg0h+910001mxIgRp50TCASM4zjBJS8vz0gyjmRMGBa/3x+WucyufNmZXclmh0m0zg7nfMdxjCTjOE65Z0Xsc3qNGjVSvXr19MEHHwS35efna/Xq1br++utPe1xCQoJ8Pl/IAgCA5PJzekeOHNH27duD6zt37tTGjRtVs2ZNpaamKjMzU0899ZQaN26sRo0aKTs7WykpKcFLoAAAlIWrpbd27Vp17tw5uD5q1ChJUkZGhmbNmqVHHnlER48e1QMPPKBDhw6pQ4cOWrx4Mc/NAQDOiaul16lTJxljTnu7x+PRE088oSeeeOI8pgIAVFYR+5weAAAVjdIDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFiD0gMAWIPSAwBYg9IDAFjDY0r76vJKID8/X8nJycrKypLX63U7DgCgjAKBgHJycuQ4jnw+X/mGmUrOcRwjyTiSMWFY/H5/WOYyu/JlZ3Ylmx0m0To7nPODf8cdp9yzuLwJALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwxjmV3quvvqqCgoJi248fP65XX3213KEAAAiHcyq9QYMGyXGcYtsPHz6sQYMGlTsUAADhcE6lZ4yRx+Mptn3Pnj1KTk4udygAAMIhtiw7t2rVSh6PRx6PR126dFFs7P8/vLCwUDt37tTNN99c4SEBAKgIZSq99PR0SdLGjRuVlpam6tWrB2+Lj49Xw4YNddddd1VoQAAAKkqZSm/s2LGSpIYNG6p3797yer1hCXVKYWGh/H6/XnvtNe3bt08pKSkaOHCg/vSnP5V4eRUAgNKUqfROycjIqOgcJZowYYKmTZumV155RU2bNtXatWs1aNAgJScna8SIEeclAwCg8jin0qtSpUqpZ1qFhYXnHOiXPvroI91+++3q0aOHpJ/PMOfNm6dPPvmkQuYDAOxyTqX35ptvhpTeiRMntGHDBr3yyisaN25chYW74YYbNGPGDG3dulVXXHGFNm3apJUrV+q555477TEFBQUh7yHMz8+vsDwAgOjmMcaYiho2d+5czZ8/X2+99VaFzCsqKtJjjz2miRMnKiYmRoWFhXr66af16KOPnvYYv99fYvFmZWWF/TlIAEDFCwQCysnJkeM48vl85RtmKtCOHTtMYmJihc2bN2+eufjii828efPMp59+al599VVTs2ZNM2vWrNMeEwgEjOM4wSUvL89IMo5kTBgWv98flrnMrnzZwz47TJjNbLfnO45jJBnHcco965wub5bkp59+0gsvvKCLLrqookZq9OjRysrKUp8+fSRJzZs3165duzR+/PjTvpgmISFBCQkJFZYBAFB5nFPp1ahRI+Q5PWOMDh8+rGrVqum1116rsHDHjh1TlSqhHxoTExOjoqKiCrsPAIA9zqn0Jk2aFFJ6VapUUe3atdWuXTvVqFGjwsL17NlTTz/9tFJTU9W0aVNt2LBBzz33nAYPHlxh9wEAsMc5ld7AgQN16NAh/fWvf9XmzZslSU2aNNH1119foeFefPFFZWdn66GHHtKBAweUkpKi3//+93r88ccr9H4AAHY4pw+cXrt2rS6//HJNmjRJBw8e1MGDBzVp0iRddtllWr9+fYWFS0pK0uTJk7Vr1y799NNP2rFjh5566inFx8dX2H0AAOxxTmd6I0eOVM+ePTVz5szgh06fPHlS9913nzIzM7VixYoKDQkAQEU4p9Jbu3ZtSOFJUmxsrB555BG1bt26wsIBAFCRzunyps/n0+7du4ttz8vLU1JSUrlDAQAQDudUer1799a///u/a/78+crLy1NeXp5ef/113Xffferbt29FZwQAoEKc0+XNv/zlL/J4PBowYIBOnjwpSYqLi9OQIUOUk5NToQEBAKgo51R68fHxev755zV+/Hjt2LFDknTZZZepWrVqFRoOAICKVK6PIatWrZqaN29eUVkAAAirc3pODwCAaETpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKzhMcYYt0OEU35+vpKTk5WVlSWv1+t2HABAGQUCAeXk5MhxHPl8vvINM5Wc4zhGknEkY8Kw+P3+sMxlduXLHvbZYcJsZrs9P/h33HHKPYvLmwAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrRHzp7d27V/fee69q1aqlqlWrqnnz5lq7dq3bsQAAUSjW7QCl+fHHH9W+fXt17txZixYtUu3atbVt2zbVqFHD7WgAgCgU0aU3YcIEXXLJJXr55ZeD2xo1auRiIgBANIvoy5sLFy5U69atdc8996hOnTpq1aqVZs6cWeoxBQUFys/PD1kAAJAkjzHGuB3idLxeryRp1KhRuueee7RmzRo9/PDDmj59ujIyMko8xu/3a9y4ccW2Z2VlBecBAKJHIBBQTk6OHMeRz+cr3zATweLi4sz1118fsm348OHmN7/5zWmPCQQCxnGc4JKXl2ckGUcyJgyL3+8Py1xmV77sYZ8dJsxmttvzHccxkozjOOWeFdGXN+vXr68mTZqEbLv66qu1e/fu0x6TkJAgn88XsgAAIEX4c3rt27fXli1bQrZt3bpVDRo0cCkRACCaRXTpjRw5UqtWrdIzzzyj7du3a+7cuZoxY4aGDh3qdjQAQBSK6NJr06aNcnNzNW/ePDVr1kxPPvmkJk+erH79+rkdDQAQhSL6fXqSdOutt+rWW291OwYAoBKI6DM9AAAqEqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwhscYY9wOEU75+flKTk5WVlaWvF6v23EAAGUUCASUk5Mjx3Hk8/nKN8xUco7jGEnGkYwJw+L3+8Myl9mVL3vYZ4cJs5nt9vzg33HHKfcsLm8CAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArEHpAQCsQekBAKxB6QEArBFVpZeTkyOPx6PMzEy3owAAolDUlN6aNWv00ksv6ZprrnE7CgAgSkVF6R05ckT9+vXTzJkzVaNGDbfjAACiVFSU3tChQ9WjRw917dr1jPsWFBQoPz8/ZAEAQJI8xhjjdojSvP7663r66ae1Zs0aeb1ederUSS1bttTkyZNL3N/v92vcuHHFtmdlZcnr9YY5LQCgogUCAeXk5MhxHPl8vvINMxFs9+7dpk6dOmbTpk3BbR07djQPP/zwaY8JBALGcZzgkpeXZyQZRzImDIvf7w/L3KifHUbhnM9sZjM78uY7jvPz33HHKfes2Ipo4XBZt26dDhw4oGuvvTa4rbCwUCtWrNCUKVNUUFCgmJiYkGMSEhKUkJBwvqMCAKJARJdely5d9K9//Stk26BBg3TVVVdpzJgxxQoPAIDSRHTpJSUlqVmzZiHbEhMTVatWrWLbAQA4k6h49SYAABUhos/0SrJs2TK3IwAAohRnegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa1B6AABrUHoAAGtQegAAa3iMMcbtEOGUn5+v5ORkZWVlyev1uh0HAFBGgUBAOTk5chxHPp+vfMNMJec4jpFkHMmYMCx+vz8sc6N+dhiFcz6zmc3syJsf/DvuOOWexeVNAIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUivvTGjx+vNm3aKCkpSXXq1FF6erq2bNnidiwAQBSK+NJbvny5hg4dqlWrVmnJkiU6ceKEunXrpqNHj7odDQAQZWLdDnAmixcvDlmfNWuW6tSpo3Xr1ummm25yKRUAIBpF/JnerzmOI0mqWbOmy0kAANEm4s/0fqmoqEiZmZlq3769mjVrVuI+BQUFKigoCK7n5+efr3gAgAjnMcYYt0OcrSFDhmjRokVauXKlLr744hL38fv9GjduXLHtWVlZ8nq94Y4IAKhggUBAOTk5chxHPp+vfMNMlBg6dKi5+OKLzVdffVXqfoFAwDiOE1zy8vKMJONIxoRh8fv9YZkb9bPDKJzzmc1sZkfefMdxfv477jjlnhXxlzeNMRo+fLhyc3O1bNkyNWrUqNT9ExISlJCQcJ7SAQCiScSX3tChQzV37ly99dZbSkpK0r59+yRJycnJqlq1qsvpAADRJOJfvTlt2jQ5jqNOnTqpfv36wWX+/PluRwMARJmIP9Mz0fM6GwBAhIv4Mz0AACoKpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsAalBwCwBqUHALAGpQcAsIbHVPKvJs/Pz1dycrKysrLk9XrdjgMAKKNAIKCcnBw5jiOfz1e+YaaScxzHSDKOZEwYFr/fH5a5UT87jMI5n9nMZnbkzQ/+HXeccs/i8iYAwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBqUHgDAGpQeAMAalB4AwBpRUXpTp05Vw4YN5fV61a5dO33yySduRwIARKGIL7358+dr1KhRGjt2rNavX68WLVooLS1NBw4ccDsaACDKRHzpPffcc7r//vs1aNAgNWnSRNOnT1e1atX0X//1X25HAwBEmYguvePHj2vdunXq2rVrcFuVKlXUtWtXffzxxy4mAwBEo1i3A5Tm+++/V2FhoerWrRuyvW7duvryyy9LPKagoEAFBQXBdcdxJEn5YcoYCASYXdLs/HBND+98ZjOb2ZE3/9RMY0z5h5kItnfvXiPJfPTRRyHbR48ebdq2bVviMWPHjjWSWFhYWFgq2bJjx45y90pEn+ldeOGFiomJ0f79+0O279+/X/Xq1SvxmEcffVSjRo0Krh86dEgNGjTQ7t27lZycHNa8FSk/P1+XXHKJ8vLy5PP53I5z1qI1txS92cl9fpH7/HMcR6mpqapZs2a5Z0V06cXHx+u6667TBx98oPT0dElSUVGRPvjgAw0bNqzEYxISEpSQkFBse3JyctT9i5Ykn89H7vMsWrOT+/wi9/lXpUr5X4YS0aUnSaNGjVJGRoZat26ttm3bavLkyTp69KgGDRrkdjQAQJSJ+NLr3bu3vvvuOz3++OPat2+fWrZsqcWLFxd7cQsAAGcS8aUnScOGDTvt5cwzSUhI0NixY0u85BnJyH3+RWt2cp9f5D7/KjK7x5iKeA0oAACRL6LfnA4AQEWi9AAA1qD0AADWoPQAANao9KUXbd/FN378eLVp00ZJSUmqU6eO0tPTtWXLFrdjlVlOTo48Ho8yMzPdjnJGe/fu1b333qtatWqpatWqat68udauXet2rFIVFhYqOztbjRo1UtWqVXXZZZfpySefrJjPJqxgK1asUM+ePZWSkiKPx6MFCxaE3G6M0eOPP6769euratWq6tq1q7Zt2+ZO2F8oLfeJEyc0ZswYNW/eXImJiUpJSdGAAQP0zTffuBf4/5zp9/1LDz74oDwejyZPnnze8p3O2eTevHmzbrvtNiUnJysxMVFt2rTR7t27y3Q/lbr0ovG7+JYvX66hQ4dq1apVWrJkiU6cOKFu3brp6NGjbkc7a2vWrNFLL72ka665xu0oZ/Tjjz+qffv2iouL06JFi/TFF1/o2WefVY0aNdyOVqoJEyZo2rRpmjJlijZv3qwJEyZo4sSJevHFF92OVszRo0fVokULTZ06tcTbJ06cqBdeeEHTp0/X6tWrlZiYqLS0NAUCgfOcNFRpuY8dO6b169crOztb69ev15tvvqktW7botttucyFpqDP9vk/Jzc3VqlWrlJKScp6Sle5MuXfs2KEOHTroqquu0rJly/Tpp58qOztbXq+3bHdU7k/vjGBt27Y1Q4cODa4XFhaalJQUM378eBdTlc2BAweMJLN8+XK3o5yVw4cPm8aNG5slS5aYjh07mocfftjtSKUaM2aM6dChg9sxyqxHjx5m8ODBIdvuvPNO069fP5cSnR1JJjc3N7heVFRk6tWrZ/785z8Htx06dMgkJCSYefPmuZCwZL/OXZJPPvnESDK7du06P6HOwuly79mzx1x00UXms88+Mw0aNDCTJk0679lKU1Lu3r17m3vvvbfcsyvtmV5l+S6+U1+NVBEftHo+DB06VD169Aj5vUeyhQsXqnXr1rrnnntUp04dtWrVSjNnznQ71hndcMMN+uCDD7R161ZJ0qZNm7Ry5Up1797d5WRls3PnTu3bty/kv5fk5GS1a9cuqh6n0s+PVY/HowsuuMDtKKUqKipS//79NXr0aDVt2tTtOGelqKhI7777rq644gqlpaWpTp06ateuXamXbk+n0pZead/Ft2/fPpdSlU1RUZEyMzPVvn17NWvWzO04Z/T6669r/fr1Gj9+vNtRztpXX32ladOmqXHjxnrvvfc0ZMgQjRgxQq+88orb0UqVlZWlPn366KqrrlJcXJxatWqlzMxM9evXz+1oZXLqsRjNj1Pp5++RGzNmjPr27RvxH+Y8YcIExcbGasSIEW5HOWsHDhzQkSNHlJOTo5tvvlnvv/++7rjjDt15551avnx5mWZFxceQ2Wro0KH67LPPtHLlSrejnFFeXp4efvhhLVmypOzX2F1UVFSk1q1b65lnnpEktWrVSp999pmmT5+ujIwMl9Od3n//939rzpw5mjt3rpo2baqNGzcqMzNTKSkpEZ27Mjpx4oR69eolY4ymTZvmdpxSrVu3Ts8//7zWr18vj8fjdpyzVlRUJEm6/fbbNXLkSElSy5Yt9dFHH2n69Onq2LHjWc+qtGd65/JdfJFk2LBheuedd7R06VJdfPHFbsc5o3Xr1unAgQO69tprFRsbq9jYWC1fvlwvvPCCYmNjVVhY6HbEEtWvX19NmjQJ2Xb11VeX+RVh59vo0aODZ3vNmzdX//79NXLkyKg6y5YUfCxG6+P0VOHt2rVLS5YsifizvH/+8586cOCAUlNTg4/TXbt26Q9/+IMaNmzodrzTuvDCCxUbG1shj9VKW3q//C6+U059F9/111/vYrLSGWM0bNgw5ebm6sMPP1SjRo3cjnRWunTpon/961/auHFjcGndurX69eunjRs3KiYmxu2IJWrfvn2xt4Rs3bpVDRo0cCnR2Tl27Fix7xaLiYkJ/h9xtGjUqJHq1asX8jjNz8/X6tWrI/pxKv3/wtu2bZv+8Y9/qFatWm5HOqP+/fvr008/DXmcpqSkaPTo0Xrvvffcjnda8fHxatOmTYU8Viv15c1o/C6+oUOHau7cuXrrrbeUlJQUfF4jOTlZVatWdTnd6SUlJRV73jExMVG1atWK6OcjR44cqRtuuEHPPPOMevXqpU8++UQzZszQjBkz3I5Wqp49e+rpp59WamqqmjZtqg0bNui5557T4MGD3Y5WzJEjR7R9+/bg+s6dO7Vx40bVrFlTqampyszM1FNPPaXGjRurUaNGys7OVkpKSvCLo91SWu769evr7rvv1vr16/XOO++osLAw+FitWbOm4uPj3Yp9xt/3r8s5Li5O9erV05VXXnm+o4Y4U+7Ro0erd+/euummm9S5c2ctXrxYb7/9tpYtW1a2Oyr36z8j3IsvvmhSU1NNfHy8adu2rVm1apXbkUolqcTl5ZdfdjtamUXDWxaMMebtt982zZo1MwkJCeaqq64yM2bMcDvSGeXn55uHH37YpKamGq/Xay699FLzH//xH6agoMDtaMUsXbq0xP+mMzIyjDE/v20hOzvb1K1b1yQkJJguXbqYLVu2uBvalJ57586dp32sLl26NGJzlyRS3rJwNrn/+te/mssvv9x4vV7TokULs2DBgjLfD18tBACwRqV9Tg8AgF+j9AAA1qD0AADWoPQAANag9AAA1qD0AADWoPQAANag9IAo06lTp6j4RnogEvHmdCDKHDx4UHFxcUpKSnI7ChB1KD0AgDW4vAlEmV9e3mzYsKGeeeYZDR48WElJSUpNTY34D8sG3ETpAVHu2WefVevWrbVhwwY99NBDGjJkSLGvYAHwM0oPiHK33HKLHnroIV1++eUaM2aMLrzwQi1dutTtWEBEovSAKHfNNdcE/9nj8ahevXo6cOCAi4mAyEXpAVEuLi4uZN3j8UTdN6gD5wulBwCwBqUHALAGpQcAsAZvTgcAWIMzPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDUoPQCANSg9AIA1KD0AgDX+H/WrcpIVI0hcAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from speechbrain.utils.dynamic_chunk_training import DynChunkTrainConfig\n", + "\n", + "chunked_model = TransformerASR(\n", + " tgt_vocab=64, input_size=64, d_model=64, nhead=1, d_ffn=64, \n", + " encoder_module=\"conformer\", normalize_before=True,\n", + " attention_type=\"RelPosMHAXL\",\n", + " num_encoder_layers=4, num_decoder_layers=0,\n", + " causal=False\n", + ")\n", + "chunked_model.eval()\n", + "chunked_conf = DynChunkTrainConfig(chunk_size=4, left_context_size=1)\n", + "chunked_deps = infer_dependency_matrix(lambda x: chunked_model.encode(x, dynchunktrain_config = chunked_conf), seq_shape=[1, 16, 64])\n", + "plot_dependency_matrix(chunked_deps)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "c02122de-e15e-4e63-a81e-71757786cb45", + "metadata": {}, + "source": [ + "As a reminder, it is normal for the _above_ to have e.g. the output at timestep $t=15$ depend on $t=0$. \n", + "In none of the layers does $t=15$ _directly_ attend to $t=0$. Read the chunked attention section for more details.\n", + "\n", + "If we want to see pure chunking without any left context, we can reduce the kernel size of the convolution module, disable left context entirely, and observe the following:" + ] + }, + { + "cell_type": "code", + "execution_count": 124, + "id": "562db0b9-da1a-45ee-a661-56c93dd46a1c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAb0AAAHHCAYAAAArl4bjAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAuyElEQVR4nO3deXQUZb7/8U+ThDSEdARkixpAxYVFQFlGQYGBS0QE48YyCAGuOiKLwAwSvBNp3AjMKKhwQThzRRGQex2DqAPKKMtwFWR3VGQTIaCAilRY7ADJ8/vDH31tE0IC6VR3nvfrnDrHerrqW9/u2P2hlu7yGGOMAACwQCW3GwAAoLwQegAAaxB6AABrEHoAAGsQegAAaxB6AABrEHoAAGsQegAAaxB6AABrEHpABPv666/l8Xg0Z84ct1spNx6PR36/3+02UEEReogqc+bMkcfjCU5er1fJyclKTU3VCy+8oKNHj7rdIlzyxRdfyO/36+uvv3a7FUQwQg9R6YknntDcuXM1Y8YMDR8+XJI0cuRINWvWTJ9++qnL3cENX3zxhSZMmEDooVixbjcAnI9u3bqpVatWwflx48bpww8/1O23366ePXtq69atqlKliosdAohE7Omhwvjtb3+rzMxM7dmzR6+99lrIY19++aXuuece1ahRQ16vV61atdLixYtDljlz6HTVqlX6/e9/r5o1a8rn82nAgAH68ccfC21vyZIluvnmm5WQkKDExER1795dn3/+ecgyAwcOVLVq1bR//36lpaWpWrVqqlWrlv74xz8qPz8/ZNkjR45o4MCBSkpK0kUXXaT09HQdOXKkyOdamufzv//7vxo9erRq1aqlhIQE3Xnnnfruu++KfD4dOnRQYmKifD6fWrdurfnz50uSxo8fr7i4uCLXe/DBB3XRRRcpEAgU2esvX4evvvpKqampSkhIUHJysp544gmV5EYvmzZtUrdu3eTz+VStWjV17txZa9asCXmu9957rySpU6dOwcPfK1asOGdt2IXQQ4XSv39/SdL7778fHPv888/1m9/8Rlu3blVGRoaeffZZJSQkKC0tTdnZ2YVqDBs2TFu3bpXf79eAAQM0b948paWlhXw4z507V927d1e1atU0adIkZWZm6osvvlD79u0LHV7Lz89Xamqqatasqb/85S/q0KGDnn32Wc2aNSu4jDFGd9xxh+bOnav77rtPTz31lPbt26f09PRC/ZX2+QwfPlxbtmzR+PHjNWTIEL399tsaNmxYyDJz5sxR9+7ddfjwYY0bN05ZWVlq0aKFli5dGnxdT58+rYULF4asd/LkSb3xxhu6++675fV6z/ZnCb4Ot956q+rUqaPJkyfrhhtu0Pjx4zV+/Phi1/v888918803a8uWLXr00UeVmZmp3bt3q2PHjlq7dq0k6ZZbbtGIESMkSY899pjmzp2ruXPn6tprry22NixkgCjy8ssvG0lm3bp1Z10mKSnJtGzZMjjfuXNn06xZMxMIBIJjBQUF5qabbjKNGjUqVPuGG24wJ0+eDI5PnjzZSDJvvfWWMcaYo0ePmosuusg88MADIds9cOCASUpKChlPT083kswTTzwRsmzLli3NDTfcEJxftGiRkWQmT54cHDt9+rS5+eabjSTz8ssvn/fz6dKliykoKAiOjxo1ysTExJgjR44YY4w5cuSISUxMNG3btjU//fRTSJ+/XO/GG280bdu2DXn8zTffNJLM8uXLTXHOvA7Dhw8Pqd29e3dTuXJl89133wXHJZnx48cH59PS0kzlypXNrl27gmPffPONSUxMNLfccktw7H/+539K1Avsxp4eKpxq1aoFr+I8fPiwPvzwQ/Xq1UtHjx7V999/r++//14//PCDUlNTtWPHDu3fvz9k/QcffFBxcXHB+SFDhig2NlZ///vfJUnLli3TkSNH1Ldv32C977//XjExMWrbtq2WL19eqKeHHnooZP7mm2/WV199FZz/+9//rtjYWA0ZMiQ4FhMTE7xI54zzfT4ejydk2/n5+dqzZ0/w+Rw9elQZGRmF9tZ+ud6AAQO0du1a7dq1Kzg2b948XXbZZerQoUOh51yUX+5hejweDRs2TCdPntQ//vGPIpfPz8/X+++/r7S0NF1++eXB8Xr16ul3v/udVq9erdzc3BJtG5A4vIkK6NixY0pMTJQk7dy5U8YYZWZmqlatWiHTmcNqhw4dClm/UaNGIfPVqlVTvXr1goctd+zYIennc4i/rvn+++8Xquf1elWrVq2QserVq4ecJ9yzZ4/q1aunatWqhSx39dVXh8yfz/NJSUkptG1Jwe2fCbGmTZuqOL1791Z8fLzmzZsnSXIcR++884769esXEo5nU6lSpZDgkqSrrrpKks56xeV3332nEydOFHodJOnaa69VQUGBcnJyzrlt4Ayu3kSFsm/fPjmOoyuvvFKSVFBQIEn64x//qNTU1CLXObNsSZ2pOXfuXNWtW7fQ47GxoW+rmJiYUtUvybZL83zOtn1TggtIfql69eq6/fbbNW/ePD3++ON64403lJeXp/vuu69UdQA3EXqoUObOnStJwUA4s2cRFxenLl26lKjGjh071KlTp+D8sWPH9O233+q2226TJF1xxRWSpNq1a5e45rnUr19fH3zwgY4dOxayt7dt27aQ5c7n+ZzLmefz2WefnfMfAAMGDNAdd9yhdevWad68eWrZsqWaNGlSou0UFBToq6++Cu7dSdL27dslSQ0aNChynVq1aqlq1aqFXgfp5ytYK1WqpMsuu0ySSrS3CXB4ExXGhx9+qCeffFINGzZUv379JP0cTB07dtRLL72kb7/9ttA6RV2CP2vWLJ06dSo4P2PGDJ0+fVrdunWT9HOg+nw+PfPMMyHLFVfzXG677TadPn1aM2bMCI7l5+frxRdfDFnufJ7PuXTt2lWJiYmaOHFioa8d/HpvsFu3brr44os1adIkrVy5stR7edOmTQupPW3aNMXFxalz585FLh8TE6OuXbvqrbfeCjkEevDgQc2fP1/t27eXz+eTJCUkJEjSWb/mAUjs6SFKLVmyRF9++aVOnz6tgwcP6sMPP9SyZctUv359LV68OOSCjOnTp6t9+/Zq1qyZHnjgAV1++eU6ePCgPv74Y+3bt09btmwJqX3y5El17txZvXr10rZt2/Sf//mfat++vXr27ClJ8vl8mjFjhvr376/rr79effr0Ua1atbR37169++67ateuXciHe0n06NFD7dq1U0ZGhr7++ms1btxYb775phzHKbRsaZ/Pufh8Pk2ZMkX333+/Wrdurd/97neqXr26tmzZohMnTuiVV14JLhsXF6c+ffpo2rRpiomJUd++fUu8Ha/Xq6VLlyo9PV1t27bVkiVL9O677+qxxx4rdM7zl5566iktW7ZM7du318MPP6zY2Fi99NJLysvL0+TJk4PLtWjRQjExMZo0aZIcx1F8fLx++9vfqnbt2qV6PVDBuXrtKFBKZy7DPzNVrlzZ1K1b1/zbv/2bef75501ubm6R6+3atcsMGDDA1K1b18TFxZlLLrnE3H777eaNN94oVHvlypXmwQcfNNWrVzfVqlUz/fr1Mz/88EOhmsuXLzepqakmKSnJeL1ec8UVV5iBAwea9evXB5dJT083CQkJhdYdP368+fXb74cffjD9+/c3Pp/PJCUlmf79+5tNmzYV+spCaZ/Pr7/esXz58iIv7V+8eLG56aabTJUqVYzP5zNt2rQxCxYsKNT7J598YiSZrl27Fn6hz+LM67Br1y7TtWtXU7VqVVOnTh0zfvx4k5+fH7KsfvWVBWOM2bhxo0lNTTXVqlUzVatWNZ06dTIfffRRoe3Mnj3bXH755SYmJoavL6BIHmNKeTYbqKDmzJmjQYMGad26dSE/cYZQW7ZsUYsWLfTqq68GfwzgXAYOHKg33nhDx44dC3N3QPE4pwegVGbPnq1q1arprrvucrsVoNQ4pwegRN5++2198cUXmjVrloYNGxa8cASIJoQegBIZPny4Dh48qNtuu00TJkxwux3gvLh6Tm/VqlX685//rA0bNujbb79Vdna20tLSQpbZunWrxo4dq5UrV+r06dNq3Lix/va3vxX6lQkAAM7F1XN6x48fV/PmzTV9+vQiH9+1a5fat2+va665RitWrNCnn36qzMzMc/6aOwAARYmYqzc9Hk+hPb0+ffooLi4u+CsbAABciIg9p1dQUKB3331Xjz76qFJTU7Vp0yY1bNhQ48aNK3QI9Jfy8vKUl5cXUufw4cOqWbMmP1MEAFHIGKOjR48qOTlZlSpd4AFKF78jGEKSyc7ODs5/++23RpKpWrWqee6558ymTZvMxIkTjcfjMStWrDhrnTNf+mViYmJiqlhTTk7OBWdNxB7e/Oabb3TJJZeob9++mj9/fnC5nj17KiEhQQsWLCiyzq/39BzHUUpKinJycoK/0VeWJk6cqHHjxpV5XWq7U5/a1C5x7ays8NTOyIjK2uGsnyvpMv38u6pJSUkXVCtiD29efPHFio2NVePGjUPGr732Wq1evfqs68XHxys+Pr7QuM/nC0voeb3esNSltjv1qU3tEtcOS+XorV0e9cviFFXE/iJL5cqV1bp160K3FNm+fbvq16/vUlcAgGjm6p7esWPHtHPnzuD87t27tXnzZtWoUUMpKSkaM2aMevfurVtuuUWdOnXS0qVL9fbbb2vFihXuNQ0AiFquht769etDbtY5evRoSVJ6errmzJmjO++8UzNnztTEiRM1YsQIXX311frb3/6m9u3bu9UyACCKuRp6HTt2LHSTyl8bPHiwBg8eXE4dAQAqsog9pwcAQFkj9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANbwmHPdxTXK5ebmKikpSRkZGfJ6vW63AwAopUAgoKysLDmOI5/Pd2HFTAXnOI6RZBzHCUt9v98flrrUdqc+tald4tpSWKZorR3O+o5UZp/jHN4EAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWMPV0Fu1apV69Oih5ORkeTweLVq06KzLPvTQQ/J4PJo6dWq59QcAqFhcDb3jx4+refPmmj59erHLZWdna82aNUpOTi6nzgAAFVGsmxvv1q2bunXrVuwy+/fv1/Dhw/Xee++pe/fu5dQZAKAicjX0zqWgoED9+/fXmDFj1KRJkxKtk5eXp7y8vOB8bm5uuNoDAEQZjzHGuN2EJHk8HmVnZystLS04NnHiRC1fvlzvvfeePB6PGjRooJEjR2rkyJFnreP3+zVhwoRC4xkZGfJ6vWHoHAAQToFAQFlZWXIcRz6f78KKmQghyWRnZwfn169fb+rUqWP2798fHKtfv76ZMmVKsXUCgYBxHCc45eTkGEnGcZyw9O33+8NSl9ru1Kc2tUtcWwrLFK21w1nfkcrsczxiv7Lwz3/+U4cOHVJKSopiY2MVGxurPXv26A9/+IMaNGhw1vXi4+Pl8/lCJgAApAg+p9e/f3916dIlZCw1NVX9+/fXoEGDXOoKABDNXA29Y8eOaefOncH53bt3a/PmzapRo4ZSUlJUs2bNkOXj4uJUt25dXX311eXdKgCgAnA19NavX69OnToF50ePHi1JSk9P15w5c1zqCgBQUbkaeh07dpQpxcWjX3/9dfiaAQBUeBF7IQsAAGWN0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFjDY0pzF9colJubq6SkJGVkZMjr9brdDgCglAKBgLKysuQ4jnw+34UVMxWc4zhGknEcJyz1/X5/WOpS25361KZ2iWtLYZmitXY46ztSmX2Oc3gTAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1XQ2/VqlXq0aOHkpOT5fF4tGjRouBjp06d0tixY9WsWTMlJCQoOTlZAwYM0DfffONewwCAqOZq6B0/flzNmzfX9OnTCz124sQJbdy4UZmZmdq4caPefPNNbdu2TT179nShUwBARRDr5sa7deumbt26FflYUlKSli1bFjI2bdo0tWnTRnv37lVKSkp5tAgAqECi6pye4zjyeDy66KKL3G4FABCFXN3TK41AIKCxY8eqb9++8vl8Z10uLy9PeXl5wfnc3NzyaA8AEAU8xhjjdhOS5PF4lJ2drbS0tEKPnTp1Snfffbf27dunFStWFBt6fr9fEyZMKDSekZEhr9dbli0DAMpBIBBQVlaWHMcp9vO/REyEkGSys7MLjZ88edKkpaWZ6667znz//ffnrBMIBIzjOMEpJyfHSDKO44Sha2P8fn9Y6lLbnfpRXVsKy0Tts9QO598yCmuHs77jOGX2OR7RhzdPnTqlXr16aceOHVq+fLlq1qx5znXi4+MVHx9fDt0BAKKNq6F37Ngx7dy5Mzi/e/dubd68WTVq1FC9evV0zz33aOPGjXrnnXeUn5+vAwcOSJJq1KihypUru9U2ACBKuRp669evV6dOnYLzo0ePliSlp6fL7/dr8eLFkqQWLVqErLd8+XJ17NixvNoEAFQQroZex44dZYq5jqa4xwAAKK2o+p4eAAAXgtADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFjDYyr47clzc3OVlJSkjIwMeb1et9sBAJRSIBBQVlaWHMeRz+e7sGKmgnMcx0gyjuOEpb7f7w9LXWq7Uz+qa0thmah9ltrh/FtGYe1w1i/Lz3EObwIArEHoAQCsQegBAKxB6AEArEHoAQCsQegBAKxB6AEArEHoAQCsQegBAKxB6AEArEHoAQCsQegBAKxB6AEArEHoAQCsQegBAKxB6AEArOFq6K1atUo9evRQcnKyPB6PFi1aFPK4MUaPP/646tWrpypVqqhLly7asWOHO80CAKKeq6F3/PhxNW/eXNOnTy/y8cmTJ+uFF17QzJkztXbtWiUkJCg1NVWBQKCcOwUAVASxbm68W7du6tatW5GPGWM0depU/elPf9Idd9whSXr11VdVp04dLVq0SH369CnPVgEAFUDEntPbvXu3Dhw4oC5dugTHkpKS1LZtW3388ccudgYAiFau7ukV58CBA5KkOnXqhIzXqVMn+FhR8vLylJeXF5zPzc0NT4MAgKjjMcYYt5uQJI/Ho+zsbKWlpUmSPvroI7Vr107ffPON6tWrF1yuV69e8ng8WrhwYZF1/H6/JkyYUGg8IyNDXq83LL0DAMInEAgoKytLjuPI5/NdWDETISSZ7Ozs4PyuXbuMJLNp06aQ5W655RYzYsSIs9YJBALGcZzglJOTYyQZx3HC0rff7w9LXWq7Uz+qa0thmah9ltrh/FtGYe1w1nccp8w+xyP2nF7Dhg1Vt25dffDBB8Gx3NxcrV27VjfeeONZ14uPj5fP5wuZAACQXD6nd+zYMe3cuTM4v3v3bm3evFk1atRQSkqKRo4cqaeeekqNGjVSw4YNlZmZqeTk5OAhUAAASsPV0Fu/fr06deoUnB89erQkKT09XXPmzNGjjz6q48eP68EHH9SRI0fUvn17LV26lHNzAIDz4mrodezYUaaY62g8Ho+eeOIJPfHEE+XYFQCgoorYc3oAAJQ1Qg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA2PKe7W5RVAbm6ukpKSlJGRIa/X63Y7AIBSCgQCysrKkuM48vl8F1bMVHCO4xhJxnGcsNT3+/1hqUttd+pHdW0pLBO1z1I7nH/LKKwdzvpl+TnO4U0AgDUIPQCANQg9AIA1CD0AgDUIPQCANQg9AIA1CD0AgDUIPQCANQg9AIA1CD0AgDUIPQCANQg9AIA1ziv0Xn31VeXl5RUaP3nypF599dULbgoAgHA4r9AbNGiQHMcpNH706FENGjTogpsCACAcziv0jDHyeDyFxvft26ekpKQLbgoAgHCILc3CLVu2lMfjkcfjUefOnRUb+3+r5+fna/fu3br11lvLvEkAAMpCqUIvLS1NkrR582alpqaqWrVqwccqV66sBg0a6O677y7TBgEAKCulCr3x48dLkho0aKDevXvL6/WGpakz8vPz5ff79dprr+nAgQNKTk7WwIED9ac//anIw6sAABSnVKF3Rnp6eln3UaRJkyZpxowZeuWVV9SkSROtX79egwYNUlJSkkaMGFEuPQAAKo7zCr1KlSoVu6eVn59/3g390kcffaQ77rhD3bt3l/TzHuaCBQv0ySeflEl9AIBdziv03nzzzZDQO3XqlDZt2qRXXnlFEyZMKLPmbrrpJs2aNUvbt2/XVVddpS1btmj16tV67rnnzrpOXl5eyHcIc3Nzy6wfAEB08xhjTFkVmz9/vhYuXKi33nqrTOoVFBToscce0+TJkxUTE6P8/Hw9/fTTGjdu3FnX8fv9RQZvRkZG2M9BAgDKXiAQUFZWlhzHkc/nu7Bipgzt2rXLJCQklFm9BQsWmEsvvdQsWLDAfPrpp+bVV181NWrUMHPmzDnrOoFAwDiOE5xycnKMJOM4Tpn19Ut+vz8sdaO+thS2KZz1o7p2OP+e1Ka2i/Udxymzz/HzOrxZlJ9++kkvvPCCLrnkkrIqqTFjxigjI0N9+vSRJDVr1kx79uzRxIkTz3oxTXx8vOLj48usBwBAxXFeoVe9evWQc3rGGB09elRVq1bVa6+9VmbNnThxQpUqhf5oTExMjAoKCspsGwAAe5xX6E2ZMiUk9CpVqqRatWqpbdu2ql69epk116NHDz399NNKSUlRkyZNtGnTJj333HMaPHhwmW0DAGCP8wq9gQMH6siRI/rrX/+qrVu3SpIaN26sG2+8sUybe/HFF5WZmamHH35Yhw4dUnJysn7/+9/r8ccfL9PtAADscF4/OL1+/XpdeeWVmjJlig4fPqzDhw9rypQpuuKKK7Rx48Yyay4xMVFTp07Vnj179NNPP2nXrl166qmnVLly5TLbBgDAHue1pzdq1Cj16NFDs2fPDv7o9OnTp3X//fdr5MiRWrVqVZk2CQBAWTiv0Fu/fn1I4ElSbGysHn30UbVq1arMmgMAoCyd1+FNn8+nvXv3FhrPyclRYmLiBTcFAEA4nFfo9e7dW//+7/+uhQsXKicnRzk5OXr99dd1//33q2/fvmXdIwAAZeK8Dm/+5S9/kcfj0YABA3T69GlJUlxcnIYMGaKsrKwybRAAgLJyXqFXuXJlPf/885o4caJ27dolSbriiitUtWrVMm0OAICydEE/Q1a1alU1a9asrHoBACCszuucHgAA0YjQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFjDY4wxbjcRTrm5uUpKSlJGRoa8Xq/b7QAASikQCCgrK0uO48jn811YMVPBOY5jJBnHccJS3+/3h6Vu1NeWwjaFs35U1w7n35Pa1Haxfll+jnN4EwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGANQg8AYA1CDwBgDUIPAGCNiA+9/fv367777lPNmjVVpUoVNWvWTOvXr3e7LQBAFIp1u4Hi/Pjjj2rXrp06deqkJUuWqFatWtqxY4eqV6/udmsAgCgU0aE3adIkXXbZZXr55ZeDYw0bNnSxIwBANIvow5uLFy9Wq1atdO+996p27dpq2bKlZs+eXew6eXl5ys3NDZkAAJAkjzHGuN3E2Xi9XknS6NGjde+992rdunV65JFHNHPmTKWnpxe5jt/v14QJEwqNZ2RkBOsBAKJHIBBQVlaWHMeRz+e7sGImgsXFxZkbb7wxZGz48OHmN7/5zVnXCQQCxnGc4JSTk2MkGcdxwtKj3+8PS92ory2FbQpn/aiuHc6/J7Wp7WJ9x3HK7HM8og9v1qtXT40bNw4Zu/baa7V3796zrhMfHy+fzxcyAQAgRfg5vXbt2mnbtm0hY9u3b1f9+vVd6ggAEM0iOvRGjRqlNWvW6JlnntHOnTs1f/58zZo1S0OHDnW7NQBAFIro0GvdurWys7O1YMECNW3aVE8++aSmTp2qfv36ud0aACAKRfT39CTp9ttv1+233+52GwCACiCi9/QAAChLhB4AwBqEHgDAGoQeAMAahB4AwBqEHgDAGoQeAMAahB4AwBqEHgDAGoQeAMAahB4AwBqEHgDAGoQeAMAahB4AwBqEHgDAGoQeAMAaHmOMcbuJcMrNzVVSUpIyMjLk9XrdbgcAUEqBQEBZWVlyHEc+n+/CipkKznEcI8k4jhOW+n6/Pyx1o762FLYpnPWjunY4/57UpraL9cvyc5zDmwAAaxB6AABrEHoAAGsQegAAaxB6AABrEHoAAGsQegAAaxB6AABrEHoAAGsQegAAaxB6AABrEHoAAGsQegAAaxB6AABrEHoAAGsQegAAaxB6AABrRFXoZWVlyePxaOTIkW63AgCIQlETeuvWrdNLL72k6667zu1WAABRKipC79ixY+rXr59mz56t6tWru90OACBKRUXoDR06VN27d1eXLl3OuWxeXp5yc3NDJgAAJMljjDFuN1Gc119/XU8//bTWrVsnr9erjh07qkWLFpo6dWqRy/v9fk2YMKHQeEZGhrxeb5i7BQCUtUAgoKysLDmOI5/Pd2HFTATbu3evqV27ttmyZUtwrEOHDuaRRx456zqBQMA4jhOccnJyjCTjSMaEYfL7/WGpG/W1wyic9alNbWpHXn3HcX7+HHecC64VWxYpHC4bNmzQoUOHdP311wfH8vPztWrVKk2bNk15eXmKiYkJWSc+Pl7x8fHl3SoAIApEdOh17txZ//rXv0LGBg0apGuuuUZjx44tFHgAABQnokMvMTFRTZs2DRlLSEhQzZo1C40DAHAuUXH1JgAAZSGi9/SKsmLFCrdbAABEKfb0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADW8BhjjNtNhFNubq6SkpKUkZEhr9frdjsAgFIKBALKysqS4zjy+XwXVsxUcI7jGEnGkYwJw+T3+8NSN+prh1E461Ob2tSOvPrBz3HHueBaHN4EAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWIPQAwBYg9ADAFiD0AMAWCPiQ2/ixIlq3bq1EhMTVbt2baWlpWnbtm1utwUAiEIRH3orV67U0KFDtWbNGi1btkynTp1S165ddfz4cbdbAwBEmVi3GziXpUuXhszPmTNHtWvX1oYNG3TLLbe41BUAIBpF/J7erzmOI0mqUaOGy50AAKJNxO/p/VJBQYFGjhypdu3aqWnTpkUuk5eXp7y8vOB8bm5uebUHAIhwHmOMcbuJkhoyZIiWLFmi1atX69JLLy1yGb/frwkTJhQaz8jIkNfrDXeLAIAyFggElJWVJcdx5PP5LqyYiRJDhw41l156qfnqq6+KXS4QCBjHcYJTTk6OkWQcyZgwTH6/Pyx1o752GIWzPrWpTe3Iq+84zs+f445zwbUi/vCmMUbDhw9Xdna2VqxYoYYNGxa7fHx8vOLj48upOwBANIn40Bs6dKjmz5+vt956S4mJiTpw4IAkKSkpSVWqVHG5OwBANIn4qzdnzJghx3HUsWNH1atXLzgtXLjQ7dYAAFEm4vf0TPRcZwMAiHARv6cHAEBZIfQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANYg9AAA1iD0AADWIPQAANbwmAp+a/Lc3FwlJSUpIyNDXq/X7XYAAKUUCASUlZUlx3Hk8/kurJip4BzHMZKMIxkThsnv94elbtTXDqNw1qc2takdefWDn+OOc8G1OLwJALAGoQcAsAahBwCwBqEHALAGoQcAsAahBwCwBqEHALAGoQcAsAahBwCwBqEHALAGoQcAsAahBwCwBqEHALAGoQcAsAahBwCwBqEHALBGVITe9OnT1aBBA3m9XrVt21affPKJ2y0BAKJQxIfewoULNXr0aI0fP14bN25U8+bNlZqaqkOHDrndGgAgykR86D333HN64IEHNGjQIDVu3FgzZ85U1apV9V//9V9utwYAiDIRHXonT57Uhg0b1KVLl+BYpUqV1KVLF3388ccudgYAiEaxbjdQnO+//175+fmqU6dOyHidOnX05ZdfFrlOXl6e8vLygvOO40iScsPUYyAQoHZRtXPDVT289alNbWpHXv0zNY0xF17MRLD9+/cbSeajjz4KGR8zZoxp06ZNkeuMHz/eSGJiYmJiqmDTrl27LjhXInpP7+KLL1ZMTIwOHjwYMn7w4EHVrVu3yHXGjRun0aNHB+ePHDmi+vXra+/evUpKSgprv2UpNzdXl112mXJycuTz+dxup8SitW8penun7/JF3+XPcRylpKSoRo0aF1wrokOvcuXKuuGGG/TBBx8oLS1NklRQUKAPPvhAw4YNK3Kd+Ph4xcfHFxpPSkqKuj+0JPl8PvouZ9HaO32XL/ouf5UqXfhlKBEdepI0evRopaenq1WrVmrTpo2mTp2q48ePa9CgQW63BgCIMhEfer1799Z3332nxx9/XAcOHFCLFi20dOnSQhe3AABwLhEfepI0bNiwsx7OPJf4+HiNHz++yEOekYy+y1+09k7f5Yu+y19Z9u4xpiyuAQUAIPJF9JfTAQAoS4QeAMAahB4AwBqEHgDAGhU+9KLtXnwTJ05U69atlZiYqNq1aystLU3btm1zu61Sy8rKksfj0ciRI91u5Zz279+v++67TzVr1lSVKlXUrFkzrV+/3u22ipWfn6/MzEw1bNhQVapU0RVXXKEnn3yybH6bsIytWrVKPXr0UHJysjwejxYtWhTyuDFGjz/+uOrVq6cqVaqoS5cu2rFjhzvN/kJxfZ86dUpjx45Vs2bNlJCQoOTkZA0YMEDffPONew3/f+d6vX/poYceksfj0dSpU8utv7MpSd9bt25Vz549lZSUpISEBLVu3Vp79+4t1XYqdOhF4734Vq5cqaFDh2rNmjVatmyZTp06pa5du+r48eNut1Zi69at00svvaTrrrvO7VbO6ccff1S7du0UFxenJUuW6IsvvtCzzz6r6tWru91asSZNmqQZM2Zo2rRp2rp1qyZNmqTJkyfrxRdfdLu1Qo4fP67mzZtr+vTpRT4+efJkvfDCC5o5c6bWrl2rhIQEpaamKhAIlHOnoYrr+8SJE9q4caMyMzO1ceNGvfnmm9q2bZt69uzpQqehzvV6n5Gdna01a9YoOTm5nDor3rn63rVrl9q3b69rrrlGK1as0KeffqrMzEx5vd7SbeiCf70zgrVp08YMHTo0OJ+fn2+Sk5PNxIkTXeyqdA4dOmQkmZUrV7rdSokcPXrUNGrUyCxbtsx06NDBPPLII263VKyxY8ea9u3bu91GqXXv3t0MHjw4ZOyuu+4y/fr1c6mjkpFksrOzg/MFBQWmbt265s9//nNw7MiRIyY+Pt4sWLDAhQ6L9uu+i/LJJ58YSWbPnj3l01QJnK3vffv2mUsuucR89tlnpn79+mbKlCnl3ltxiuq7d+/e5r777rvg2hV2T6+i3IvvzK2RyuKHVsvD0KFD1b1795DXPZItXrxYrVq10r333qvatWurZcuWmj17ttttndNNN92kDz74QNu3b5ckbdmyRatXr1a3bt1c7qx0du/erQMHDoT8/5KUlKS2bdtG1ftU+vm96vF4dNFFF7ndSrEKCgrUv39/jRkzRk2aNHG7nRIpKCjQu+++q6uuukqpqamqXbu22rZtW+yh27OpsKFX3L34Dhw44FJXpVNQUKCRI0eqXbt2atq0qdvtnNPrr7+ujRs3auLEiW63UmJfffWVZsyYoUaNGum9997TkCFDNGLECL3yyitut1asjIwM9enTR9dcc43i4uLUsmVLjRw5Uv369XO7tVI5816M5vep9PN95MaOHau+fftG/I85T5o0SbGxsRoxYoTbrZTYoUOHdOzYMWVlZenWW2/V+++/rzvvvFN33XWXVq5cWapaUfEzZLYaOnSoPvvsM61evdrtVs4pJydHjzzyiJYtW1b6Y+wuKigoUKtWrfTMM89Iklq2bKnPPvtMM2fOVHp6usvdnd1///d/a968eZo/f76aNGmizZs3a+TIkUpOTo7oviuiU6dOqVevXjLGaMaMGW63U6wNGzbo+eef18aNG+XxeNxup8QKCgokSXfccYdGjRolSWrRooU++ugjzZw5Ux06dChxrQq7p3c+9+KLJMOGDdM777yj5cuX69JLL3W7nXPasGGDDh06pOuvv16xsbGKjY3VypUr9cILLyg2Nlb5+flut1ikevXqqXHjxiFj1157bamvCCtvY8aMCe7tNWvWTP3799eoUaOiai9bUvC9GK3v0zOBt2fPHi1btizi9/L++c9/6tChQ0pJSQm+T/fs2aM//OEPatCggdvtndXFF1+s2NjYMnmvVtjQ++W9+M44cy++G2+80cXOimeM0bBhw5Sdna0PP/xQDRs2dLulEuncubP+9a9/afPmzcGpVatW6tevnzZv3qyYmBi3WyxSu3btCn0lZPv27apfv75LHZXMiRMnCt1bLCYmJvgv4mjRsGFD1a1bN+R9mpubq7Vr10b0+1T6v8DbsWOH/vGPf6hmzZput3RO/fv316effhryPk1OTtaYMWP03nvvud3eWVWuXFmtW7cuk/dqhT68GY334hs6dKjmz5+vt956S4mJicHzGklJSapSpYrL3Z1dYmJiofOOCQkJqlmzZkSfjxw1apRuuukmPfPMM+rVq5c++eQTzZo1S7NmzXK7tWL16NFDTz/9tFJSUtSkSRNt2rRJzz33nAYPHux2a4UcO3ZMO3fuDM7v3r1bmzdvVo0aNZSSkqKRI0fqqaeeUqNGjdSwYUNlZmYqOTk5eONotxTXd7169XTPPfdo48aNeuedd5Sfnx98r9aoUUOVK1d2q+1zvt6/Due4uDjVrVtXV199dXm3GuJcfY8ZM0a9e/fWLbfcok6dOmnp0qV6++23tWLFitJt6IKv/4xwL774oklJSTGVK1c2bdq0MWvWrHG7pWJJKnJ6+eWX3W6t1KLhKwvGGPP222+bpk2bmvj4eHPNNdeYWbNmud3SOeXm5ppHHnnEpKSkGK/Xay6//HLzH//xHyYvL8/t1gpZvnx5kf9Pp6enG2N+/tpCZmamqVOnjomPjzedO3c227Ztc7dpU3zfu3fvPut7dfny5RHbd1Ei5SsLJen7r3/9q7nyyiuN1+s1zZs3N4sWLSr1dri1EADAGhX2nB4AAL9G6AEArEHoAQCsQegBAKxB6AEArEHoAQCsQegBAKxB6AFRpmPHjlFxR3ogEvHldCDKHD58WHFxcUpMTHS7FSDqEHoAAGtweBOIMr88vNmgQQM988wzGjx4sBITE5WSkhLxP5YNuInQA6Lcs88+q1atWmnTpk16+OGHNWTIkEK3YAHwM0IPiHK33XabHn74YV155ZUaO3asLr74Yi1fvtzttoCIROgBUe66664L/rfH41HdunV16NAhFzsCIhehB0S5uLi4kHmPxxN1d1AHyguhBwCwBqEHALAGoQcAsAZfTgcAWIM9PQCANQg9AIA1CD0AgDUIPQCANQg9AIA1CD0AgDUIPQCANQg9AIA1CD0AgDUIPQCANQg9AIA1CD0AgDX+H0X2Q5dMw1JTAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from speechbrain.utils.dynamic_chunk_training import DynChunkTrainConfig\n", + "\n", + "chunked_model_nopast = TransformerASR(\n", + " tgt_vocab=64, input_size=64, d_model=64, nhead=1, d_ffn=64, \n", + " encoder_module=\"conformer\", normalize_before=True,\n", + " attention_type=\"RelPosMHAXL\",\n", + " num_encoder_layers=4, num_decoder_layers=0,\n", + " kernel_size=1,\n", + " causal=False\n", + ")\n", + "chunked_model_nopast.eval()\n", + "chunked_conf = DynChunkTrainConfig(chunk_size=4, left_context_size=0)\n", + "chunked_deps = infer_dependency_matrix(lambda x: chunked_model_nopast.encode(x, dynchunktrain_config = chunked_conf), seq_shape=[1, 16, 64])\n", + "plot_dependency_matrix(chunked_deps)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "d308b9b7-fc7c-4e6f-a8d8-5f502346d3fd", + "metadata": {}, + "source": [ + "## Inference: The gory details\n", + "\n", + "### Wrapping the feature extractor for inference\n", + "\n", + "We briefly touched on wrapping the feature extractor for streaming inference. The Conformer feature extractor we use here has essentially three layers:\n", + "\n", + "1. Filterbank extraction, derived directly from a fourier transform of the signal (see the [Fourier Transforms and Spectrograms](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/fourier-transform-and-spectrograms.html) and [Speech Features](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-features.html) tutorials)\n", + "2. Normalization (which we choose to apply per-chunk at streaming, as described earlier -- it doesn't really affect things)\n", + "3. Two downsampling CNNs, each being a convolution with a stride of 2, effectively dividing the time dimension by 4.\n", + "\n", + "We have two problems here:\n", + "\n", + "- We define chunk size at transformer level (after feature extraction). Thus, we need to know exactly how many frames we should give the extractor to get the expected shape. For this, we need to know exactly how the feature extractor transforms the shape.\n", + "- We need to handle left/past and right/future context correctly for the feature extractor to behave basically exactly the same as it did during training.\n", + "\n", + "Let's try to visualize the problem. We will define a fairly standard Conformer feature extractor for a 16kHz input waveform. Note that the stride on the x axis is 16, meaning 1ms. Thus, the x axis counts can be seen as milliseconds (and there is in reality 16x more input samples than shown in the plot)." + ] + }, + { + "cell_type": "code", + "execution_count": 78, + "id": "01984d22-5b02-4e05-a452-c0d872e7844c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABNEAAAB4CAYAAADc8GRLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAh/ElEQVR4nO3deViVdf7/8ddhV+CAKAhU4lJZGppJkpNmhZeIVlpWSpZIZuVWythi1yhYTZpldZWNLVNaLo3VZXs2rllNZKZl48YoqbgAmgZCxv75/dGP8+0Eetj03Hiej+viujyfe3m/buaeD+e8O/d924wxRgAAAAAAAABOysvdAQAAAAAAAACro4kGAAAAAAAAuEATDQAAAAAAAHCBJhoAAAAAAADgAk00AAAAAAAAwAWaaAAAAAAAAIALNNEAAAAAAAAAF2iiAQAAAAAAAC7QRAMAAAAAAABcoIkGAABgYXv37pXNZtPChQvdHeWMsdlsysjIcHcMAAAAJzTRAABAs7Jw4ULZbDbHT0BAgKKjo5WYmKjnn39eRUVF7o4IN9m+fbsyMjK0d+9ed0cBAABnIZpoAACgWXr00Ue1aNEizZ8/X5MmTZIkTZ48WbGxsfrxxx/dnA7usH37ds2cOZMmGgAAOC183B0AAACgIZKSkhQXF+d4PW3aNK1du1bXXXedbrjhBu3YsUMtWrRwY0IAAACcTfgmGgAAOGtce+21mj59uvbt26fFixc7Ldu5c6duvvlmhYWFKSAgQHFxcfrwww+d1qm+VPSLL77QPffco9atW8tut2vUqFH65ZdfatRbsWKF+vbtq8DAQAUHB2vw4MHatm2b0zqjR49WUFCQDh48qKFDhyooKEjh4eGaOnWqKisrndYtKCjQ6NGjFRISotDQUKWkpKigoKDWY63P8fznP/9RWlqawsPDFRgYqBtvvFFHjhyp9Xj69eun4OBg2e12XX755Vq6dKkkKT09Xb6+vrVud/fddys0NFQlJSW1Zv3j7+Gnn35SYmKiAgMDFR0drUcffVTGmJNuV+37779XUlKS7Ha7goKClJCQoG+++cbpWG+55RZJ0jXXXOO43Pfzzz93uW8AAIC6oIkGAADOKnfccYckaeXKlY6xbdu26YorrtCOHTv08MMPa+7cuQoMDNTQoUP13nvv1djHxIkTtWPHDmVkZGjUqFFasmSJhg4d6tTsWbRokQYPHqygoCA9+eSTmj59urZv364+ffrUuJywsrJSiYmJat26tZ5++mn169dPc+fO1SuvvOJYxxijIUOGaNGiRbr99tv1+OOP68CBA0pJSamRr77HM2nSJG3ZskXp6ekaN26cPvroI02cONFpnYULF2rw4ME6duyYpk2bptmzZ+vSSy/VZ5995vi9VlRUaNmyZU7blZWV6d1339WwYcMUEBBwsv9ZHL+HgQMHqm3btpozZ4569uyp9PR0paenn3K7bdu2qW/fvtqyZYsefPBBTZ8+XXv27NHVV1+tDRs2SJKuuuoq3XfffZKkRx55RIsWLdKiRYt08cUXn3LfAAAAdWYAAACakQULFhhJZuPGjSddJyQkxPTo0cPxOiEhwcTGxpqSkhLHWFVVlfnLX/5iLrjgghr77tmzpykrK3OMz5kzx0gyH3zwgTHGmKKiIhMaGmrGjh3rVDcvL8+EhIQ4jaekpBhJ5tFHH3Vat0ePHqZnz56O1++//76RZObMmeMYq6ioMH379jWSzIIFCxp8PP379zdVVVWO8SlTphhvb29TUFBgjDGmoKDABAcHm/j4ePPbb7855fzjdr179zbx8fFOy5cvX24kmXXr1plTqf49TJo0yWnfgwcPNn5+fubIkSOOcUkmPT3d8Xro0KHGz8/PZGdnO8YOHTpkgoODzVVXXeUYe+edd+qUBQAAoCH4JhoAADjrBAUFOZ7SeezYMa1du1a33nqrioqK9PPPP+vnn3/W0aNHlZiYqF27dungwYNO2999993y9fV1vB43bpx8fHz06aefSpJWrVqlgoICJScnO/b3888/y9vbW/Hx8Vq3bl2NTPfee6/T6759++qnn35yvP7000/l4+OjcePGOca8vb0dD02o1tDjsdlsTrUrKyu1b98+x/EUFRXp4YcfrvFtsj9uN2rUKG3YsEHZ2dmOsSVLlui8885Tv379ahxzbf74DTibzaaJEyeqrKxMq1evrnX9yspKrVy5UkOHDlXHjh0d41FRUbrtttv01Vdf6fjx43WqDQAA0Bg00QAAwFmnuLhYwcHBkqTdu3fLGKPp06crPDzc6af6MsLDhw87bX/BBRc4vQ4KClJUVJTjMs1du3ZJ+v0ebH/e58qVK2vsLyAgQOHh4U5jrVq1crrP2r59+xQVFaWgoCCn9Tp37uz0uiHH065duxq1JTnqVzfFLrnkEp3K8OHD5e/vryVLlkiSCgsL9fHHH2vkyJFOzbaT8fLycmqESdKFF14oSSd9ouaRI0d04sSJGr8HSbr44otVVVWl/fv3u6wNAADQWDydEwAAnFUOHDigwsJCnX/++ZKkqqoqSdLUqVOVmJhY6zbV69ZV9T4XLVqkyMjIGst9fJzfYnl7e9dr/3WpXZ/jOVl9U4cb+v9Rq1atdN1112nJkiWaMWOG3n33XZWWlur222+v134AAACaI5poAADgrLJo0SJJcjSYqr/55Ovrq/79+9dpH7t27dI111zjeF1cXKzc3FwNGjRIktSpUydJUkRERJ336UpMTIzWrFmj4uJip2+jZWVlOa3XkONxpfp4tm7d6rKhOGrUKA0ZMkQbN27UkiVL1KNHD3Xt2rVOdaqqqvTTTz85vn0mSf/73/8kSe3bt691m/DwcLVs2bLG70H6/QmlXl5eOu+88ySpTt+GAwAAaCgu5wQAAGeNtWvX6rHHHlOHDh00cuRISb83uq6++mq9/PLLys3NrbHNkSNHaoy98sorKi8vd7yeP3++KioqlJSUJOn3Bp3dbtcTTzzhtN6p9unKoEGDVFFRofnz5zvGKisr9cILLzit15DjcWXAgAEKDg7WrFmzVFJS4rTsz99WS0pKUps2bfTkk09q/fr19f4W2rx585z2PW/ePPn6+iohIaHW9b29vTVgwAB98MEHTpd85ufna+nSperTp4/sdrskKTAwUJJUUFBQr0wAAAB1wTfRAABAs7RixQrt3LlTFRUVys/P19q1a7Vq1SrFxMToww8/dLpB/osvvqg+ffooNjZWY8eOVceOHZWfn6/MzEwdOHBAW7Zscdp3WVmZEhISdOuttyorK0v/+Mc/1KdPH91www2SJLvdrvnz5+uOO+7QZZddphEjRig8PFw5OTn65JNPdOWVVzo1i+ri+uuv15VXXqmHH35Ye/fuVZcuXbR8+XIVFhbWWLe+x+OK3W7Xs88+q7vuukuXX365brvtNrVq1UpbtmzRiRMn9MYbbzjW9fX11YgRIzRv3jx5e3srOTm5znUCAgL02WefKSUlRfHx8VqxYoU++eQTPfLIIzXuGfdHjz/+uFatWqU+ffpo/Pjx8vHx0csvv6zS0lLNmTPHsd6ll14qb29vPfnkkyosLJS/v7+uvfZaRURE1Ov3AQAAUBuaaAAAoFmaMWOGJMnPz09hYWGKjY3Vc889p9TUVMdDBap16dJF3333nWbOnKmFCxfq6NGjioiIUI8ePRz7+aN58+Y57vtVXl6u5ORkPf/8806XC952222Kjo7W7Nmz9dRTT6m0tFTnnHOO+vbtq9TU1Hofj5eXlz788ENNnjxZixcvls1m0w033KC5c+eqR48ejTqeuhgzZowiIiI0e/ZsPfbYY/L19dVFF12kKVOm1Fh31KhRmjdvnhISEhQVFVXnGt7e3vrss880btw4PfDAAwoODlZ6errLzF27dtWXX36padOmadasWaqqqlJ8fLwWL16s+Ph4x3qRkZF66aWXNGvWLI0ZM0aVlZVat24dTTQAANAkbKa+d5QFAAA4Sy1cuFCpqanauHGj4uLi3B3HsrZs2aJLL71Ub775pu644446bTN69Gi9++67Ki4uPs3pAAAATg/uiQYAAIB6efXVVxUUFKSbbrrJ3VEAAADOGC7nBAAAQJ189NFH2r59u1555RVNnDjRcSN/AAAAT0ATDQAAAHUyadIk5efna9CgQZo5c6a74wAAAJxR3BMNAAAAAAAAcIF7ogEAAAAAAAAu0EQDAAAAAAAAXGjW90SrqqrSoUOHFBwcLJvN5u44AAAAAAAAcCNjjIqKihQdHS0vr6b97lizbqIdOnRI5513nrtjAAAAAAAAwEL279+vc889t0n32awv5wwODpb0+y+msLDQ6efhhx+uMXay8fqse7rHqekZWTylppWyeEpNK2XxlJpWyuIpNa2UxVNqWimLp9S0UhZPqWmlLJ5Ss0FZpBo/TTF+OvdtpZpWyuIpNa2UxVNq7tfvqntGTcmt30TLyMio8Xj0zp07a+fOnXXavvoSTrvdLrvd7rQsICCgxtjJxuuz7ukep6ZnZPGUmlbK4ik1rZTFU2paKYun1LRSFk+paaUsnlLTSlk8paaVsnhKzQZlqTHaNOOnc99WqmmlLJ5S00pZPKVmtdNx2y+3X87ZtWtXrV692vHax8ftkQAAAAAAAAAnbu9Y+fj4KDIy0t0xAAAAAAAAgJNy+z3Rdu3apejoaHXs2FEjR45UTk7OSdctLS3V8ePHnX4AAAAAAACA081mjDHuKr5ixQoVFxerc+fOys3N1cyZM3Xw4EFt3bq11hvA1XYPNen3G8kFBAScicgAAAAAAACwqJKSEs2ePVuFhYW13l+xUYyF/PLLL8Zut5t//vOftS4vKSkxhYWFjp/9+/cbSaawsLDGuhkZGbXuo7bx+qx7usep6RlZPKWmlbJ4Sk0rZfGUmlbK4ik1rZTFU2paKcsZqSk5/dQ25q5xanpGFk+paaUsnlLTSlk8paaVsnhKzULJnKxX1FgNupzzzTffVGlpaY3xsrIyvfnmmw1u6IWGhurCCy/U7t27a13u7+8vu93u9AMAAAAAAACcbg1qoqWmpqqwsLDGeFFRkVJTUxscpri4WNnZ2YqKimrwPgAAAAAAAICm1qAmmjFGNputxviBAwcUEhJS5/1MnTpV69ev1969e/X111/rxhtvlLe3t5KTkxsSCwAAAAAAADgt6vVggR49eshms2nLli3q2rWrfHx8HMsqKyu1Z88eDRw4UG+//Xad9jdixAh98cUXOnr0qMLDw9WnTx/9/e9/V6dOneq0/fHjxxUSEsKDBQAAAAAAAGCdBwtkZGSYjIwMY7PZzNSpUx2vMzIyzBNPPGGWLl1qSktLm/zGbSdTWFh40pvFWe4GtRbO4ik1rZTFU2paKYun1LRSFk+p2aAsZ9GNW7lBr2fUtFIWT6lppSyeUtNKWTylppWyNPuap/N9y2ka95SaVsriKTVP1StqLB9XTbY/Sk9PlyS1b99ew4cP59tfAAAAAAAA8Aj1aqJVS0lJaeocAAAAAAAAgGU1qInm5eVV64MFqlVWVjY4EAAAAAAAAGA1DWqiLV++3KmJVl5eru+//15vvPGGZs6c2WThAAAAAAAAACuo19M5XVm6dKmWLVumDz74oKl2eUo8nRMAAAAAAADVLPN0Tleys7NNYGBgU+7ylHg6JzWba5YzUtMCTxw6a59+ZOGaVsriKTWtlMVTalopi6fUtFIWT6lppSxn7XslC2fxlJpWyuIpNa2UxVNqWimLp9Q8nU/n9GqqZtxvv/2m559/Xuecc05T7RIAAAAAAACwhAbdE61Vq1ZO90QzxqioqEgtW7bU4sWLmywcAAAAAAAAYAUNaqI9++yzTk00Ly8vhYeHKz4+Xq1atWqycAAAAAAAAIAVNKiJNnr0aBUUFOi1117Tjh07JEldunRR7969mzQcAAAAAAAAYAUNejrnd999p4EDByogIEC9evWSJG3cuFG//fabVq5cqcsuu6zJg9aGp3MCAAAAAACgmuWeztmnTx8zevRoU15e7hgrLy83KSkppm/fvk3zyIM6cDxxoRk+5chKWTylppWyeEpNK2XxlJpWyuIpNa2UpdnXrIXVn/7kKTWtlMVTalopi6fUtFIWT6lppSyeUtNKWTylppWyeErN0/l0zgZdzvndd9/p1VdflY/P/23u4+OjBx98UHFxcU3U3gMAAAAAAACswashG9ntduXk5NQY379/v4KDgxsdCgAAAAAAALCSBjXRhg8frjFjxmjZsmXav3+/9u/fr3/961+66667lJyc3NQZAQAAAAAAALdq0OWcTz/9tGw2m0aNGqWKigpJkq+vr8aNG6fZs2c3aUAAAAAAAADA3Rr0dM5qJ06cUHZ2tiSpU6dOatmyZZMFqwuezgkAAAAAAIBqlns6p1XwdE5qNtcsnlLTSlk85Uk0VsriKTWtlMVTalopi6fUtFIWT6lppSyeUtNKWTylppWyeEpNK2XxlJpWyuIpNU/n0zkbdE+0pvTiiy+qffv2CggIUHx8vL799lt3RwIAAAAAAACcuLWJtmzZMqWlpSk9PV2bN29W9+7dlZiYqMOHD7szFgAAAAAAAODErU20Z555RmPHjlVqaqq6dOmil156SS1bttTrr7/uzlgAAAAAAACAkwY9nbMplJWVadOmTZo2bZpjzMvLS/3791dmZmat25SWlqq0tNTxurCwUJJ0vJZ1S0pK6jxen3VP9zg1PSOLp9S0UpaSkhIdP15z7dM57o6aVsriKTWtlMVTalopi6fUtFIWT6lppSyeUtNKWTylppWyeEpNK2XxlJpWyuIpNatfG2NqrN9YjXo6Z2McOnRI55xzjr7++mv17t3bMf7ggw9q/fr12rBhQ41tMjIyNHPmzDMZEwAAAAAAAM1Mdna2Onbs2KT7dNs30Rpi2rRpSktLc7wuKChQTEyMcnJyFBIS4sZkaK6OHz+u8847T/v372/6R9/CY3AeobE4h9BYnENoCpxHaCzOITQW5xCaQmFhodq1a6ewsLAm37fbmmht2rSRt7e38vPzncbz8/MVGRlZ6zb+/v7y9/evMR4SEsL/wdAodrudcwiNxnmExuIcQmNxDqEpcB6hsTiH0FicQ2gKXl5eTb/PJt9jHfn5+alnz55as2aNY6yqqkpr1qxxurwTAAAAAAAAcDe3Xs6ZlpamlJQUxcXFqVevXnruuef066+/KjU11Z2xAAAAAAAAACdubaINHz5cR44c0YwZM5SXl6dLL71Un332mdq2bVun7f39/ZWenl7rJZ5AXXAOoSlwHqGxOIfQWJxDaAqcR2gsziE0FucQmsLpPI/c9nROAAAAAAAAoLlw2z3RAAAAAAAAgOaCJhoAAAAAAADgAk00AAAAAAAAwAWaaAAAAAAAAIALzbqJ9uKLL6p9+/YKCAhQfHy8vv32W3dHgkXNmjVLl19+uYKDgxUREaGhQ4cqKyvLaZ2rr75aNpvN6efee+91U2JYTUZGRo3z46KLLnIsLykp0YQJE9S6dWsFBQVp2LBhys/Pd2NiWE379u1rnEM2m00TJkyQxByE2n3xxRe6/vrrFR0dLZvNpvfff99puTFGM2bMUFRUlFq0aKH+/ftr165dTuscO3ZMI0eOlN1uV2hoqMaMGaPi4uIzeBRwp1OdQ+Xl5XrooYcUGxurwMBARUdHa9SoUTp06JDTPmqbv2bPnn2GjwTu4moeGj16dI3zY+DAgU7rMA/B1XlU23skm82mp556yrEOc5Hnqsvn+bp8HsvJydHgwYPVsmVLRURE6IEHHlBFRUW9sjTbJtqyZcuUlpam9PR0bd68Wd27d1diYqIOHz7s7miwoPXr12vChAn65ptvtGrVKpWXl2vAgAH69ddfndYbO3ascnNzHT9z5sxxU2JYUdeuXZ3Oj6+++sqxbMqUKfroo4/0zjvvaP369Tp06JBuuukmN6aF1WzcuNHp/Fm1apUk6ZZbbnGswxyEP/v111/VvXt3vfjii7UunzNnjp5//nm99NJL2rBhgwIDA5WYmKiSkhLHOiNHjtS2bdu0atUqffzxx/riiy909913n6lDgJud6hw6ceKENm/erOnTp2vz5s1avny5srKydMMNN9RY99FHH3WanyZNmnQm4sMCXM1DkjRw4ECn8+Ott95yWs48BFfn0R/Pn9zcXL3++uuy2WwaNmyY03rMRZ6pLp/nXX0eq6ys1ODBg1VWVqavv/5ab7zxhhYuXKgZM2bUL4xppnr16mUmTJjgeF1ZWWmio6PNrFmz3JgKzcXhw4eNJLN+/XrHWL9+/cz999/vvlCwtPT0dNO9e/dalxUUFBhfX1/zzjvvOMZ27NhhJJnMzMwzlBDNzf333286depkqqqqjDHMQXBNknnvvfccr6uqqkxkZKR56qmnHGMFBQXG39/fvPXWW8YYY7Zv324kmY0bNzrWWbFihbHZbObgwYNnLDus4c/nUG2+/fZbI8ns27fPMRYTE2OeffbZ0xsOzUJt51BKSooZMmTISbdhHsKf1WUuGjJkiLn22mudxpiLUO3Pn+fr8nns008/NV5eXiYvL8+xzvz5843dbjelpaV1rt0sv4lWVlamTZs2qX///o4xLy8v9e/fX5mZmW5MhuaisLBQkhQWFuY0vmTJErVp00aXXHKJpk2bphMnTrgjHixq165dio6OVseOHTVy5Ejl5ORIkjZt2qTy8nKnOemiiy5Su3btmJNQq7KyMi1evFh33nmnbDabY5w5CPWxZ88e5eXlOc09ISEhio+Pd8w9mZmZCg0NVVxcnGOd/v37y8vLSxs2bDjjmWF9hYWFstlsCg0NdRqfPXu2WrdurR49euipp56q9+UvOLt9/vnnioiIUOfOnTVu3DgdPXrUsYx5CPWVn5+vTz75RGPGjKmxjLkIUs3P83X5PJaZmanY2Fi1bdvWsU5iYqKOHz+ubdu21bm2T1McwJn2888/q7Ky0ungJalt27bauXOnm1KhuaiqqtLkyZN15ZVX6pJLLnGM33bbbYqJiVF0dLR+/PFHPfTQQ8rKytLy5cvdmBZWER8fr4ULF6pz587Kzc3VzJkz1bdvX23dulV5eXny8/Or8YGjbdu2ysvLc09gWNr777+vgoICjR492jHGHIT6qp5fans/VL0sLy9PERERTst9fHwUFhbG/IQaSkpK9NBDDyk5OVl2u90xft999+myyy5TWFiYvv76a02bNk25ubl65pln3JgWVjFw4EDddNNN6tChg7Kzs/XII48oKSlJmZmZ8vb2Zh5Cvb3xxhsKDg6ucWsU5iJItX+er8vnsby8vFrfM1Uvq6tm2UQDGmPChAnaunWr0/2sJDndlyE2NlZRUVFKSEhQdna2OnXqdKZjwmKSkpIc/+7WrZvi4+MVExOjt99+Wy1atHBjMjRHr732mpKSkhQdHe0YYw4C4E7l5eW69dZbZYzR/PnznZalpaU5/t2tWzf5+fnpnnvu0axZs+Tv73+mo8JiRowY4fh3bGysunXrpk6dOunzzz9XQkKCG5OhuXr99dc1cuRIBQQEOI0zF0E6+ef5M6VZXs7Zpk0beXt713jSQn5+viIjI92UCs3BxIkT9fHHH2vdunU699xzT7lufHy8JGn37t1nIhqamdDQUF144YXavXu3IiMjVVZWpoKCAqd1mJNQm3379mn16tW66667TrkecxBcqZ5fTvV+KDIyssZDlyoqKnTs2DHmJzhUN9D27dunVatWOX0LrTbx8fGqqKjQ3r17z0xANCsdO3ZUmzZtHH+/mIdQH19++aWysrJcvk+SmIs80ck+z9fl81hkZGSt75mql9VVs2yi+fn5qWfPnlqzZo1jrKqqSmvWrFHv3r3dmAxWZYzRxIkT9d5772nt2rXq0KGDy21++OEHSVJUVNRpTofmqLi4WNnZ2YqKilLPnj3l6+vrNCdlZWUpJyeHOQk1LFiwQBERERo8ePAp12MOgisdOnRQZGSk09xz/PhxbdiwwTH39O7dWwUFBdq0aZNjnbVr16qqqsrRqIVnq26g7dq1S6tXr1br1q1dbvPDDz/Iy8urxiV6gCQdOHBAR48edfz9Yh5Cfbz22mvq2bOnunfv7nJd5iLP4erzfF0+j/Xu3Vv//e9/nZr61f/hqEuXLnXO0mwv50xLS1NKSori4uLUq1cvPffcc/r111+Vmprq7miwoAkTJmjp0qX64IMPFBwc7LjmOSQkRC1atFB2draWLl2qQYMGqXXr1vrxxx81ZcoUXXXVVerWrZub08MKpk6dquuvv14xMTE6dOiQ0tPT5e3treTkZIWEhGjMmDFKS0tTWFiY7Ha7Jk2apN69e+uKK65wd3RYSFVVlRYsWKCUlBT5+Pzfn2DmIJxMcXGx07cR9+zZox9++EFhYWFq166dJk+erMcff1wXXHCBOnTooOnTpys6OlpDhw6VJF188cUaOHCgxo4dq5deeknl5eWaOHGiRowY4XQ5Mc5epzqHoqKidPPNN2vz5s36+OOPVVlZ6XiPFBYWJj8/P2VmZmrDhg265pprFBwcrMzMTE2ZMkW33367WrVq5a7Dwhl0qnMoLCxMM2fO1LBhwxQZGans7Gw9+OCDOv/885WYmCiJeQi/c/X3TPr9PwS98847mjt3bo3tmYs8m6vP83X5PDZgwAB16dJFd9xxh+bMmaO8vDz97W9/04QJE+p3OXBTPF7UXV544QXTrl074+fnZ3r16mW++eYbd0eCRUmq9WfBggXGGGNycnLMVVddZcLCwoy/v785//zzzQMPPGAKCwvdGxyWMXz4cBMVFWX8/PzMOeecY4YPH252797tWP7bb7+Z8ePHm1atWpmWLVuaG2+80eTm5roxMazo3//+t5FksrKynMaZg3Ay69atq/XvV0pKijHGmKqqKjN9+nTTtm1b4+/vbxISEmqcX0ePHjXJyckmKCjI2O12k5qaaoqKitxwNHCHU51De/bsOel7pHXr1hljjNm0aZOJj483ISEhJiAgwFx88cXmiSeeMCUlJe49MJwxpzqHTpw4YQYMGGDCw8ONr6+viYmJMWPHjjV5eXlO+2Aegqu/Z8YY8/LLL5sWLVqYgoKCGtszF3k2V5/njanb57G9e/eapKQk06JFC9OmTRvz17/+1ZSXl9cri+3/BwIAAAAAAABwEs3ynmgAAAAAAADAmUQTDQAAAAAAAHCBJhoAAAAAAADgAk00AAAAAAAAwAWaaAAAAAAAAIALNNEAAAAAAAAAF2iiAQAAAAAAAC7QRAMAAGhmrr76ak2ePNndMQAAADyKzRhj3B0CAAAAdXfs2DH5+voqODjY3VEAAAA8Bk00AAAAAAAAwAUu5wQAAGhm/ng5Z/v27fXEE0/ozjvvVHBwsNq1a6dXXnnFvQEBAADOQjTRAAAAmrm5c+cqLi5O33//vcaPH69x48YpKyvL3bEAAADOKjTRAAAAmrlBgwZp/PjxOv/88/XQQw+pTZs2WrdunbtjAQAAnFVoogEAADRz3bp1c/zbZrMpMjJShw8fdmMiAACAsw9NNAAAgGbO19fX6bXNZlNVVZWb0gAAAJydaKIBAAAAAAAALtBEAwAAAAAAAFygiQYAAAAAAAC4YDPGGHeHAAAAAAAAAKyMb6IBAAAAAAAALtBEAwAAAAAAAFygiQYAAAAAAAC4QBMNAAAAAAAAcIEmGgAAAAAAAOACTTQAAAAAAADABZpoAAAAAAAAgAs00QAAAAAAAAAXaKIBAAAAAAAALtBEAwAAAAAAAFygiQYAAAAAAAC4QBMNAAAAAAAAcOH/AQtJ2lWzVL0aAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from speechbrain.utils.streaming import infer_dependency_matrix, plot_dependency_matrix\n", + "from hyperpyyaml import load_hyperpyyaml\n", + "from matplotlib import pyplot as plt\n", + "\n", + "feat_extractor_hparams = load_hyperpyyaml(\"\"\"\n", + "compute_features: !new:speechbrain.lobes.features.Fbank\n", + " sample_rate: 16000\n", + " n_fft: 512\n", + " n_mels: 80\n", + " win_length: 32\n", + "\n", + "cnn: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd\n", + " input_shape: (8, 10, 80)\n", + " num_blocks: 2\n", + " num_layers_per_block: 1\n", + " out_channels: (64, 32)\n", + " kernel_sizes: (3, 3)\n", + " strides: (2, 2)\n", + " residuals: (False, False)\n", + "\n", + "feat_extractor: !new:speechbrain.nnet.containers.LengthsCapableSequential\n", + " - !ref \n", + " - !ref \n", + "\n", + "properties: !apply:speechbrain.utils.filter_analysis.stack_filter_properties\n", + " - [!ref , !ref ]\n", + "\"\"\")\n", + "feat_extractor_hparams[\"cnn\"].eval()\n", + "\n", + "feat_extractor_deps = infer_dependency_matrix(\n", + " # we need some shape magic here to adapt the input and output shape to what infer_dependency_matrix expects\n", + " # for the input, squeeze the feature dimension\n", + " # for the output, flatten the channels dim as the output is of shape [batch, t, c0, c1]\n", + " lambda x: feat_extractor_hparams[\"feat_extractor\"](x.squeeze(-1)).flatten(2),\n", + " # 100ms audio (@16kHz)\n", + " seq_shape=[1, 3200, 1],\n", + " # 1ms stride (@16kHz)\n", + " in_stride=16\n", + ")\n", + "feat_extractor_fig = plot_dependency_matrix(feat_extractor_deps)\n", + "feat_extractor_fig.set_size_inches(15, 10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "5b68025c-ae74-4a0d-88d0-038efd62002e", + "metadata": {}, + "source": [ + "#### Using and defining filter properties\n", + "\n", + "To solve this:\n", + "\n", + "1. We consider filterbank extraction and the CNN as filters (in the signal processing sense) with a specific stride, kernel size (plus dilation and causality which we don't use here). In SpeechBrain, this data is represented as [`FilterProperties`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.filter_analysis.html#speechbrain.utils.filter_analysis.FilterProperties). \n", + "2. We provide [`get_filter_properties` methods](https://speechbrain.readthedocs.io/en/latest/search.html?q=get_filter_properties&check_keywords=yes&area=default) for some modules (note that not many have it yet). \n", + "3. [`stack_filter_properties`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.filter_analysis.html#speechbrain.utils.filter_analysis.stack_filter_properties) is then used to \"stack\" these filters and get the resulting properties for the entire feature extractor.\n", + "\n", + "Let's demonstrate these." + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "id": "016df691-6805-41b2-b415-0cb0d787bb50", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Filter properties of the fbank module (including the STFT):\n", + " fbank -> FilterProperties(window_size=512, stride=160, dilation=1, causal=False)\n", + "\n", + "Filter properties of the downsampling CNN:\n", + " ... of each layer:\n", + " cnn[0] -> FilterProperties(window_size=3, stride=2, dilation=1, causal=False)\n", + " cnn[1] -> FilterProperties(window_size=3, stride=2, dilation=1, causal=False)\n", + "\n", + " ... with both layers stacked:\n", + " cnn -> FilterProperties(window_size=7, stride=4, dilation=1, causal=False)\n", + "\n", + "Properties of the whole extraction module (fbank+CNN stacked):\n", + " both -> FilterProperties(window_size=1473, stride=640, dilation=1, causal=False)\n" + ] + } + ], + "source": [ + "from speechbrain.utils.filter_analysis import stack_filter_properties\n", + "\n", + "print(f\"\"\"Filter properties of the fbank module (including the STFT):\n", + " fbank -> {feat_extractor_hparams['compute_features'].get_filter_properties()}\n", + "\n", + "Filter properties of the downsampling CNN:\n", + " ... of each layer:\n", + " cnn[0] -> {feat_extractor_hparams['cnn']['convblock_0'].get_filter_properties()}\n", + " cnn[1] -> {feat_extractor_hparams['cnn']['convblock_1'].get_filter_properties()}\n", + "\n", + " ... with both layers stacked:\n", + " cnn -> {feat_extractor_hparams['cnn'].get_filter_properties()}\n", + "\n", + "Properties of the whole extraction module (fbank+CNN stacked):\n", + " both -> {feat_extractor_hparams['properties']}\"\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "b47aacee-9812-4d19-a557-380edf6ee84e", + "metadata": {}, + "source": [ + "The stride of the extraction module is 640 input frames. Since we're dealing with 16kHz, that amounts to a stride of effectively around `640/16000=40ms`. \n", + "And thus, a chunk size of 16 basically means we will shift the input by `16*40ms=640ms` every chunk, excluding worries about window size and padding. \n", + "Note how stride is relatively straightforward to compute here. You might notice how it is the product of all three strides. Window size is a bit more involved, see [`FilterProperties.with_on_top`](https://speechbrain.readthedocs.io/en/latest/_modules/speechbrain/utils/filter_analysis.html#FilterProperties.with_on_top) for the whole implementation.\n", + "\n", + "The end result is that we can treat the feature extractor as a simple filter with known properties. This solves both of our earlier problems, because we know both:\n", + "\n", + "- The number of input frames that are required for the feature extractor to produce `chunk_size` timesteps (at transformer level).\n", + "- The window size (along with the other properties) which enables us to know exactly how many frames we need to keep around as left and right context.\n", + " - **Important note:** The effective window size of the feature extractor has a direct negative implication on latency! The window size should never be so high it get closes to the chunk size (in terms of input frames).\n", + "\n", + "#### Automatically wrapping a non-streaming extractor\n", + "\n", + "[`StreamingFeatureWrapper`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.features.html#speechbrain.lobes.features.StreamingFeatureWrapper) is the final piece of the feature extraction puzzle, and it turns our somewhat arbitrary feature extractor into a purely chunkwise one, which takes a fixed, known amount of input frames (see [`StreamingFeatureWrapper.forward`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.features.html#speechbrain.lobes.features.StreamingFeatureWrapper.forward)). \n", + "This is a low-level demonstration use of it if you are curious; this is otherwise fully abstracted by `StreamingASR`!" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "id": "a3c9542a-48c1-414f-9fd8-768d2c2c823a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chunk size selected: 4 (2556 frames, 159.750ms)\n", + "(bs, t, ch0, ch1) = (1, 4, 20, 32)\n", + "(bs, t, ch0, ch1) = (1, 4, 20, 32)\n", + "(bs, t, ch0, ch1) = (1, 4, 20, 32)\n", + "(bs, t, ch0, ch1) = (1, 4, 20, 32)\n" + ] + } + ], + "source": [ + "from speechbrain.lobes.features import StreamingFeatureWrapper\n", + "from speechbrain.utils.dynamic_chunk_training import DynChunkTrainConfig\n", + "\n", + "feature_wrapper = StreamingFeatureWrapper(\n", + " module=feat_extractor_hparams[\"feat_extractor\"],\n", + " properties=feat_extractor_hparams[\"properties\"]\n", + ")\n", + "\n", + "filter_properties = feat_extractor_hparams[\"properties\"]\n", + "chunk_size = 4\n", + "# see: StreamingFeatureWrapper.forward docs\n", + "# reason for the `-1` is that the stride is only applied `window_size-1` times in such a filter\n", + "chunk_size_frames = (filter_properties.stride - 1) * chunk_size\n", + "batch_size = 1\n", + "\n", + "# a fair amount of streaming stuff carries around _streaming contexts_, which are opaque objects\n", + "# that you are meant to reuse across calls for the same streaming session.\n", + "# these will be detailed further in the next subsection.\n", + "streaming_context = feature_wrapper.make_streaming_context()\n", + "\n", + "print(f\"Chunk size selected: {chunk_size} ({chunk_size_frames} frames, {1000*chunk_size_frames/16000:.3f}ms)\")\n", + "\n", + "for t in range(4): # imagine we're iterating over a stream, etc.\n", + " sample_chunk = torch.rand((batch_size, chunk_size_frames))\n", + " latest_outs = feature_wrapper(sample_chunk, context=streaming_context)\n", + " print(f\"(bs, t, ch0, ch1) = {tuple(latest_outs.shape)}\") # output for our chunk!\n", + "\n", + "# normally you _may_ have to inject a final chunk of zeros.\n", + "# see StreamingASR for an example implementation." + ] + }, + { + "cell_type": "markdown", + "id": "7794fbc5-2436-4dd5-807a-cd5d28f8e82a", + "metadata": {}, + "source": [ + "This doesn't tell us _what_ it really does. [The source code for `StreamingFeatureWrapper.forward`](https://speechbrain.readthedocs.io/en/latest/_modules/speechbrain/lobes/features.html#StreamingFeatureWrapper.forward) really best illustrates this:\n", + "\n", + "```python\n", + "feat_pad_size = self.get_required_padding()\n", + "num_outputs_per_pad = self.get_output_count_per_pad_frame()\n", + "\n", + "# consider two audio chunks of 6 samples (for the example), where\n", + "# each sample is denoted by 1, 2, ..., 6\n", + "# so chunk 1 is 123456 and chunk 2 is 123456\n", + "if context.left_context is None:\n", + " # for the first chunk we left pad the input by two padding's worth of zeros,\n", + " # and truncate the right, so that we can pretend to have right padding and\n", + " # still consume the same amount of samples every time\n", + " #\n", + " # our first processed chunk will look like:\n", + " # 0000123456\n", + " # ^^ right padding (truncated)\n", + " # ^^^^^^ frames that some outputs are centered on\n", + " # ^^ left padding (truncated)\n", + " chunk = torch.nn.functional.pad(chunk, (feat_pad_size * 2, 0))\n", + "else:\n", + " # prepend left context\n", + " #\n", + " # for the second chunk ownwards, given the above example:\n", + " # 34 of the previous chunk becomes left padding\n", + " # 56 of the previous chunk becomes the first frames of this chunk\n", + " # thus on the second iteration (and onwards) it will look like:\n", + " # 3456123456\n", + " # ^^ right padding (truncated)\n", + " # ^^^^^^ frames that some outputs are centered on\n", + " # ^^ left padding (truncated)\n", + " chunk = torch.cat((context.left_context, chunk), 1)\n", + "\n", + "# our chunk's right context will become the start of the \"next processed chunk\"\n", + "# plus we need left padding for that one, so make it double\n", + "context.left_context = chunk[:, -feat_pad_size * 2 :]\n", + "\n", + "feats = self.module(chunk, *extra_args, **extra_kwargs)\n", + "\n", + "# truncate left and right context\n", + "feats = feats[:, num_outputs_per_pad:-num_outputs_per_pad, ...]\n", + "\n", + "return feats\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "03d0bfe1-6452-4b56-97f0-9b9a9bea15db", + "metadata": {}, + "source": [ + "In the above case, we effectively induce padding/latency of >80ms, as shown below:" + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "id": "6f4f7fa0-c8e0-471c-8317-a4eb401298e9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "80.0ms\n" + ] + } + ], + "source": [ + "print(f\"{1000 * feature_wrapper.get_required_padding() / 16000}ms\")" + ] + }, + { + "cell_type": "markdown", + "id": "ed27ad5a-3e50-46e6-b872-b30e82be0103", + "metadata": {}, + "source": [ + "This isn't _great_, but is not a _huge_ penalty to typical chunk sizes in the order of 500-1000ms. \n", + "It does, however, underline that while you can poke around the feature extractor, you should remain careful of its effective window size not exploding." + ] + }, + { + "cell_type": "markdown", + "id": "329f7ab7-fb9e-4bc5-abf6-ff9de025d2d2", + "metadata": {}, + "source": [ + "### Streaming context objects\n", + "\n", + "In order to implement streaming, we need to cache/store arbitrary tensors of context to reuse for subsequent chunks.\n", + "\n", + "Because these are extremely model-specific, and don't really ever share functionality, these were implemented in the form of dataclasses that are:\n", + "\n", + "- **Mutable**: The context object gets updated after a forward pass for a given chunk. The same object should be passed over again for the next processed chunk.\n", + "- **Recursive**: A context object might contain any amount of context objects.\n", + "- **Opaque**: These contained context objects can be of an arbitrary other context object type, treated as black boxes that are to be moved around.\n", + "\n", + "Each such object holds any required configuration and a batch of streaming sessions. The object is to be reused for each subsequent call of whatever layer it's called on.\n", + "\n", + "#### Examples\n", + "\n", + "The top-level class of our Conformer encoder is the [`TransformerASR`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.models.transformer.TransformerASR.html#module-speechbrain.lobes.models.transformer.TransformerASR) abstraction.\n", + "\n", + "[`TransformerASRStreamingContext`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.models.transformer.TransformerASR.html#speechbrain.lobes.models.transformer.TransformerASR.TransformerASRStreamingContext) is its relevant \"streaming context\". It is defined as:\n", + "\n", + "```python\n", + "@dataclass\n", + "class TransformerASRStreamingContext:\n", + " \"\"\"Streaming metadata and state for a `TransformerASR` instance.\"\"\"\n", + "\n", + " dynchunktrain_config: DynChunkTrainConfig\n", + " \"\"\"Dynamic Chunk Training configuration holding chunk size and context size\n", + " information.\"\"\"\n", + "\n", + " encoder_context: Any\n", + " \"\"\"Opaque encoder context information. It is constructed by the encoder's\n", + " `make_streaming_context` method and is passed to the encoder when using\n", + " `encode_streaming`.\n", + " \"\"\"\n", + "```\n", + "\n", + "In the above case, `encoder_context` is a field of an arbitrary type, depending on the exact encoder selected. \n", + "`TransformerASR` doesn't need to know the details of it; it merely needs to be able to create, store, and pass it around (e.g. `TransformerASR.encode_streaming` will call `encoder.forward_streaming` with `context=context.encoder_context`).\n", + "\n", + "For the Conformer, this would be a [`ConformerEncoderStreamingContext`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.models.transformer.Conformer.html#speechbrain.lobes.models.transformer.Conformer.ConformerEncoderStreamingContext), which needs to hold a list of [`ConformerEncoderLayerStreamingContext`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.models.transformer.Conformer.html#speechbrain.lobes.models.transformer.Conformer.ConformerEncoderLayerStreamingContext)s (documentation stripped):\n", + "\n", + "```python\n", + "@dataclass\n", + "class ConformerEncoderStreamingContext:\n", + " dynchunktrain_config: DynChunkTrainConfig\n", + " layers: List[ConformerEncoderLayerStreamingContext]\n", + "```\n", + "\n", + "The [`ConformerEncoderLayerStreamingContext`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.models.transformer.Conformer.html#speechbrain.lobes.models.transformer.Conformer.ConformerEncoderLayerStreamingContext) itself needs to store a bunch of tensors (documentation stripped):\n", + "\n", + "```python\n", + "@dataclass\n", + "class ConformerEncoderLayerStreamingContext:\n", + " mha_left_context_size: int\n", + " mha_left_context: Optional[torch.Tensor] = None\n", + " dcconv_left_context: Optional[torch.Tensor] = None\n", + "```\n", + "\n", + "Thus, if you had a `TransformerASRStreamingContext` object configured for a Conformer, you could reach the first layer's `mha_left_context` cache object through `context.encoder_context.layers[0].mha_left_context`.\n", + "\n", + "#### Creating streaming context objects\n", + "\n", + "Much like each module will have a corresponding `StreamingContext` dataclass, they should also specify a `make_streaming_context` method. This will either be called by a parent module or by the user. Usually, it will take a single [`DynChunkTrainConfig`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.utils.dynamic_chunk_training.html#speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig) object, with some exceptions. \n", + "And much like the data structure of contexts is arbitrarily recursive, `make_streaming_context` may call into submodules' own `make_streaming_context` method.\n", + "\n", + "Let's demonstrate by reusing a `TransformerASR` we initialized earlier:" + ] + }, + { + "cell_type": "code", + "execution_count": 106, + "id": "7b3fc845-5fc2-409c-a29a-724ef2f44543", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "TransformerASRStreamingContext(dynchunktrain_config=DynChunkTrainConfig(chunk_size=16, left_context_size=2), encoder_context=ConformerEncoderStreamingContext(dynchunktrain_config=DynChunkTrainConfig(chunk_size=16, left_context_size=2), layers=[ConformerEncoderLayerStreamingContext(mha_left_context_size=32, mha_left_context=None, dcconv_left_context=None), ConformerEncoderLayerStreamingContext(mha_left_context_size=32, mha_left_context=None, dcconv_left_context=None), ConformerEncoderLayerStreamingContext(mha_left_context_size=32, mha_left_context=None, dcconv_left_context=None), ConformerEncoderLayerStreamingContext(mha_left_context_size=32, mha_left_context=None, dcconv_left_context=None)]))" + ] + }, + "execution_count": 106, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "test_context = chunked_model.make_streaming_context(DynChunkTrainConfig(16, 2))\n", + "test_context" + ] + }, + { + "cell_type": "markdown", + "id": "3169a70b-ffeb-4f7a-b43a-1004dc2df3e8", + "metadata": {}, + "source": [ + "### Streaming forward methods\n", + "\n", + "For the modules that require a streaming context, streaming inference requires using a different method than the usual `forward`, typically `forward_streaming` (but not necessarily, e.g. see `TransformerASR.encode_streaming`).\n", + "\n", + "See the following practical example. Here, we choose a **chunk size of 16**, and a **left context size of 4 chunks**. Watch, for each subsequent chunks, the left context tensor growing in size as the left context comes in. Once enough chunks were accumulated, the left context tensor stays at that length." + ] + }, + { + "cell_type": "code", + "execution_count": 140, + "id": "0ef82c0f-5288-4cce-aef1-38178d2285b6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "chunk #0:\n", + "\tbefore forward MHA left context: (None)\n", + "\tencode_streaming output: (1, 16, 64)\n", + "chunk #1:\n", + "\tbefore forward MHA left context: (1, 16, 64)\n", + "\tencode_streaming output: (1, 16, 64)\n", + "chunk #2:\n", + "\tbefore forward MHA left context: (1, 32, 64)\n", + "\tencode_streaming output: (1, 16, 64)\n", + "chunk #3:\n", + "\tbefore forward MHA left context: (1, 48, 64)\n", + "\tencode_streaming output: (1, 16, 64)\n", + "chunk #4:\n", + "\tbefore forward MHA left context: (1, 64, 64)\n", + "\tencode_streaming output: (1, 16, 64)\n", + "chunk #5:\n", + "\tbefore forward MHA left context: (1, 64, 64)\n", + "\tencode_streaming output: (1, 16, 64)\n", + "chunk #6:\n", + "\tbefore forward MHA left context: (1, 64, 64)\n", + "\tencode_streaming output: (1, 16, 64)\n", + "chunk #7:\n", + "\tbefore forward MHA left context: (1, 64, 64)\n", + "\tencode_streaming output: (1, 16, 64)\n" + ] + } + ], + "source": [ + "test_context = chunked_model.make_streaming_context(DynChunkTrainConfig(16, 4))\n", + "for chunk_id in range(8):\n", + " print(f\"chunk #{chunk_id}:\")\n", + " test_chunk = torch.rand((1, 16, 64))\n", + " test_mha_context = test_context.encoder_context.layers[0].mha_left_context\n", + " model_output = chunked_model.encode_streaming(test_chunk, context=test_context)\n", + " \n", + " print(f\"\\tbefore forward MHA left context: {tuple(test_mha_context.shape) if test_mha_context is not None else '(None)'}\")\n", + " print(f\"\\tencode_streaming output: {tuple(model_output.shape)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "ee0a6b94-185c-40be-bcff-7182ba5ace46", + "metadata": {}, + "source": [ + "### Streaming tokenizers\n", + "\n", + "A small detail we nevertheless need to care about is tokenization in a streaming context.\n", + "\n", + "Normally, the tokenizer always decodes full sentences at a time, with the consequence that the first space in decoding (e.g. in a `▁are` token) will get removed. \n", + "However, when streaming, we might be decoding mid-utterance where spaces must not be removed mid-sentence. [`spm_decode_preserve_leading_space`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.tokenizers.SentencePiece.html#speechbrain.tokenizers.SentencePiece.spm_decode_preserve_leading_space) handles this case and requires carrying around a small context object.\n", + "\n", + "### Streaming transducer Greedy Search\n", + "\n", + "[`TransducerBeamSearcher`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.decoders.transducer.html#speechbrain.decoders.transducer.TransducerBeamSearcher) provides a [`transducer_greedy_decode_streaming`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.decoders.transducer.html#speechbrain.decoders.transducer.TransducerBeamSearcher.transducer_greedy_decode_streaming) method, which like other `_streaming` methods, requires the user to make and carry around a context object.\n", + "\n", + "It is, in this case, a rather simple wrapper that caches and passes around the latest hidden state of the greedy searcher." + ] + }, + { + "cell_type": "markdown", + "id": "dca4481c-afd0-4ee8-980d-acb5b4e4dfc8", + "metadata": {}, + "source": [ + "## Inference: Practical example with `StreamingASR`\n", + "\n", + "### From trained model to `StreamingASR` hyperparameters\n", + "\n", + "Currently, in SpeechBrain, you need to define a separate hyperparamters file for training and inference. You can mostly copy the training hyperparameters and remove/add keys as relevant for inference. \n", + "In this case, [`speechbrain.inference.ASR.StreamingASR`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.inference.ASR.html#speechbrain.inference.ASR.StreamingASR), the higher level inference interface, requires a certain set of **keys and modules** to be defined by inference hyperparameters, in a relatively flexible way.\n", + "\n", + "If you [look at the documentation](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.inference.ASR.html#speechbrain.inference.ASR.StreamingASR), you can find the following hyperparameter keys and `modules` dict entries requirements:\n", + "\n", + "```python\n", + "HPARAMS_NEEDED = [\n", + " \"fea_streaming_extractor\",\n", + " \"make_decoder_streaming_context\",\n", + " \"decoding_function\",\n", + " \"make_tokenizer_streaming_context\",\n", + " \"tokenizer_decode_streaming\",\n", + "]\n", + "MODULES_NEEDED = [\"enc\", \"proj_enc\"]\n", + "```\n", + "\n", + "Let's look at what this entails. For our Conformer model, we can use [`speechbrain/asr-streaming-conformer-librispeech`](https://huggingface.co/speechbrain/asr-streaming-conformer-librispeech/blob/main/hyperparams.yaml) as a reference. (As a reminder, keys initialized with `!name:` are merely references to functions here.) See:\n", + "\n", + "```yaml\n", + "make_tokenizer_streaming_context: !name:speechbrain.tokenizers.SentencePiece.SentencePieceDecoderStreamingContext\n", + "tokenizer_decode_streaming: !name:speechbrain.tokenizers.SentencePiece.spm_decode_preserve_leading_space\n", + "\n", + "make_decoder_streaming_context: !name:speechbrain.decoders.transducer.TransducerGreedySearcherStreamingContext # default constructor\n", + "decoding_function: !name:speechbrain.decoders.transducer.TransducerBeamSearcher.transducer_greedy_decode_streaming\n", + " - !ref # self\n", + "\n", + "fea_streaming_extractor: !new:speechbrain.lobes.features.StreamingFeatureWrapper\n", + " module: !new:speechbrain.nnet.containers.LengthsCapableSequential\n", + " - !ref \n", + " - !ref \n", + " - !ref \n", + " # don't consider normalization as part of the input filter chain.\n", + " # normalization will operate at chunk level, which mismatches training\n", + " # somewhat, but does not appear to result in noticeable degradation.\n", + " properties: !apply:speechbrain.utils.filter_analysis.stack_filter_properties\n", + " - [!ref , !ref ]\n", + "```\n", + "\n", + "With the prior details, nothing should be particularly surprising here. But, in more detail, the idea is to give some amount of flexibility to the model via the hyperparameters file. We effectively need to define:\n", + "\n", + "1. `fea_streaming_extractor` which is a [`StreamingFeatureWrapper`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.lobes.features.html#speechbrain.lobes.features.StreamingFeatureWrapper) (or anything with a compatible API, really), and processes the input waveform (alongside providing filter properties and such)\n", + "2. `modules.enc` and `modules.proj_enc`, the actual encoder, [see the source for `StreamingASR.encode_chunk`](https://speechbrain.readthedocs.io/en/latest/_modules/speechbrain/inference/ASR.html#StreamingASR.encode_chunk)\n", + "3. `decoding_function` which can be called as `hparams.decoding_function(output_of_enc, context=decoding_context)`, where...\n", + "4. ... the `decoding_context` is initialized from `hparams.make_decoder_streaming_context()`\n", + "5. `tokenizer_decode_streaming` and `make_decoder_streaming_context`, [see the source for `StreamingASR.decode_chunk`](https://speechbrain.readthedocs.io/en/latest/_modules/speechbrain/inference/ASR.html#StreamingASR.decode_chunk)\n", + "\n", + "As for the files you need to move from the save directory after training, into a typical streaming Conformer model directory for `StreamingASR`, these basically are:\n", + "\n", + "- `hyperparams.yaml` (modified for inference)\n", + "- `model.ckpt`\n", + "- `normalizer.ckpt`\n", + "- `tokenizer.ckpt`" + ] + }, + { + "cell_type": "markdown", + "id": "dbc1da3c-e575-453a-9a43-5e314b3f2847", + "metadata": {}, + "source": [ + "### Inference with `StreamingASR`\n", + "\n", + "Let's make use of [`speechbrain/asr-streaming-conformer-librispeech`](https://huggingface.co/speechbrain/asr-streaming-conformer-librispeech/) to demonstrate streaming audio decoding." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "5a36cb12-3aed-44ef-9cf6-6938e703cf43", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:speechbrain.utils.fetching:Fetch hyperparams.yaml: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch custom.py: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch model.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch model.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch normalizer.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch normalizer.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch tokenizer.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch tokenizer.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n", + "INFO:speechbrain.utils.parameter_transfer:Loading pretrained files for: model, normalizer, tokenizer\n" + ] + } + ], + "source": [ + "from speechbrain.inference.ASR import StreamingASR\n", + "from speechbrain.utils.dynamic_chunk_training import DynChunkTrainConfig\n", + "asr_model = StreamingASR.from_hparams(\"speechbrain/asr-streaming-conformer-librispeech\")" + ] + }, + { + "cell_type": "markdown", + "id": "fc628080-3a34-43ba-9177-694ecf6b78d4", + "metadata": {}, + "source": [ + "Here is a simple transcription example, which _does_ perform chunkwise inference:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "d8897dd9-3ef8-4afa-94ee-4a178f1dd592", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:speechbrain.utils.fetching:Fetch test-en.wav: Fetching from HuggingFace Hub 'speechbrain/asr-streaming-conformer-librispeech' if not cached\n" + ] + }, + { + "data": { + "text/plain": [ + "'THE BIRCH CANOE SLID ON THE SMOOTH PLANKS'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "asr_model.transcribe_file(\n", + " \"speechbrain/asr-streaming-conformer-librispeech/test-en.wav\",\n", + " # select a chunk size of ~960ms with 4 chunks of left context\n", + " DynChunkTrainConfig(24, 4),\n", + " # disable torchaudio streaming to allow fetching from HuggingFace\n", + " # set this to True for your own files or streams to allow for streaming file decoding\n", + " use_torchaudio_streaming=False,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6ebafd90-4db5-419c-a716-eceb5925aec4", + "metadata": {}, + "source": [ + "Let's try a more challenging example: transcribing a minutes-long audio file. This would typically result in a sequence that is way too long to process due to the memory and computational cost of transformers on long sequences.\n", + "\n", + "This time, let's use [`transcribe_file_streaming`](https://speechbrain.readthedocs.io/en/latest/API/speechbrain.inference.ASR.html#speechbrain.inference.ASR.StreamingASR.transcribe_file_streaming). This method enables us to iterate over the transcribed chunks as they get processed." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "0c392c2b-b6f7-413f-ac7f-8cf5443e2904", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:speechbrain.utils.fetching:Fetch Economics-of-coffee.ogg: Using existing file/symlink in /home/sdelang/projects/src/python/speechbrain/docs/tutorials/nn/Economics-of-coffee.ogg\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " 0: \"\"\n", + " 1: \"\"\n", + " 2: \"ECONOMICS\"\n", + " 3: \" OF COFFEE\"\n", + " 4: \"\"\n", + " 5: \" FROM\"\n", + " 6: \" WICKPEDIA\"\n", + " 7: \" THE FREE\"\n", + " 8: \" SECLOPAEDIA\"\n", + " 9: \"\"\n", + " 10: \"\"\n", + " 11: \" COFFEE\"\n", + " 12: \" IS AN IMPORTAN\"\n", + " 13: \"T COM\"\n", + " 14: \"MODITY\"\n", + " 15: \" AND A POPULAR\"\n", + " 16: \" BEVERAGE\"\n", + " 17: \"\"\n", + " 18: \" OVER A TWO POINT\"\n", + " 19: \" TWO FIVE BILL\"\n", + " 20: \"ION CUPS\"\n" + ] + } + ], + "source": [ + "from speechbrain.utils.fetching import fetch\n", + "\n", + "long_audio_fname = fetch(\"Economics-of-coffee.ogg\", \"https://upload.wikimedia.org/wikipedia/commons/8/81\", savedir=\".\")\n", + "long_audio_chunks = []\n", + "\n", + "for i, decoded_chunk in enumerate(asr_model.transcribe_file_streaming(long_audio_fname, DynChunkTrainConfig(16, 4))):\n", + " print(f\"{i:>3}: \\\"{decoded_chunk}\\\"\")\n", + " long_audio_chunks.append(decoded_chunk)\n", + "\n", + " # let's just process the 20 first chunks as a demo\n", + " if i >= 20:\n", + " break" + ] + }, + { + "cell_type": "markdown", + "id": "9a9e8428-a640-4745-a17f-ea5fe6b99a30", + "metadata": {}, + "source": [ + "The model made some mistakes (which is not particularly surprising given what it was trained on and what we're testing inference with), but other than that, streaming seems to work just fine, and transcription of words that cross chunks doesn't look garbled." + ] + }, + { + "cell_type": "markdown", + "id": "e4fe5484-bef5-410a-ba84-da1219b5c357", + "metadata": {}, + "source": [ + "### ffmpeg live-stream functionality\n", + "\n", + "`StreamingASR` has support for torchaudio's ffmpeg streaming functionality. This means that you can easily do things like transcribing web radio streams:" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "330999a6-f446-4ba4-810b-327cf7f8f319", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "THEY WERE SO QUICK THEY DID THIS AIM AND THEN IT TOOK THEM TWO DAYS TO INSTALL THE SECOND DAY THEY WORKED UNTIL AFTER SEVEN P M AND THAT WAS IT I MEAN YET LIKE" + ] + } + ], + "source": [ + "audio_stream_url = \"http://as-hls-ww-live.akamaized.net/pool_904/live/ww/bbc_radio_fourfm/bbc_radio_fourfm.isml/bbc_radio_fourfm-audio%3d96000.norewind.m3u8\"\n", + "\n", + "for i, decoded_chunk in enumerate(asr_model.transcribe_file_streaming(audio_stream_url, DynChunkTrainConfig(16, 4))):\n", + " print(decoded_chunk, end=\"\")\n", + "\n", + " # let's just process the 20 first chunks as a demo\n", + " if i >= 20:\n", + " break" + ] + }, + { + "cell_type": "markdown", + "id": "9b067c47-6ca6-4e30-be6e-2499297e0372", + "metadata": {}, + "source": [ + "### Manually transcribing chunks\n", + "\n", + "More examples are available on the [HuggingFace page of the model](https://huggingface.co/speechbrain/asr-streaming-conformer-librispeech). In particular, the Gradio example shows how to process an arbitrary stream of audio chunks yourself, bypassing the audio loading functionality." + ] + }, + { + "cell_type": "markdown", + "id": "e62ad196-7c92-4dcf-8149-e061e6237ad8", + "metadata": {}, + "source": [ + "## Alternatives and Further Reading\n", + "\n", + "This tutorial covered the modification of a mostly-vanilla Conformer model for chunkwise streaming support. \n", + "Over the years, alternatives and improvements to the Conformer model have been developed, in order to improve accuracy, improve runtime performance, lower memory usage, lower real-world latency, or add miscellaneous features. \n", + "The following is far from being a complete list, and it doesn't only include architectures that have been successfully adapted to a streaming context.\n", + "\n", + "- [Branchformer](https://arxiv.org/abs/2207.02971) (and [E-Branchformer](https://arxiv.org/abs/2210.00077))\n", + "- [Zipformer](https://arxiv.org/abs/2310.11230) (implemented by k2/[icefall](https://github.com/k2-fsa/icefall), among other Conformer variants, with streaming support implemented)\n", + "- [FastConformer](https://arxiv.org/abs/2305.05084) (still heavily used by NVIDIA as of 2024, despite its simplicity, and easy to migrate to from a vanilla Conformer)\n", + "\n", + "There is also a lot of research around other parts of the training or inference pipeline. We might expand on this list with more references in the future:\n", + "\n", + "- [Pruned RNN-T](https://arxiv.org/abs/2206.13236)" + ] + }, + { + "cell_type": "markdown", + "id": "5e2409d8", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/tutorials/nn/neural-network-adapters.ipynb b/docs/tutorials/nn/neural-network-adapters.ipynb new file mode 100644 index 0000000000..e3721b6603 --- /dev/null +++ b/docs/tutorials/nn/neural-network-adapters.ipynb @@ -0,0 +1,775 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f1302eb5", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/nn/neural-network-adapters.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/nn/neural-network-adapters.ipynb)" + ] + }, + { + "cell_type": "markdown", + "id": "f82d21c0-4bdb-4caf-af73-30d0f43ecbab", + "metadata": {}, + "source": [ + "# Neural Network Adapters for faster low-memory fine-tuning\n", + "\n", + "This tutorial covers the SpeechBrain implementation of adapters such as LoRA. This includes how to integrate either SpeechBrain implemented adapters, custom adapters, and adapters from libraries such as PEFT into a pre-trained model.\n", + "\n", + "## Prerequisites\n", + "- [Speech Recognition From Scratch](https://speechbrain.readthedocs.io/en/latest/tutorials/tasks/speech-recognition-from-scratch.html)\n", + "- A CUDA-enabled device for running this tutorial\n", + "\n", + "## Introduction and Background\n", + "\n", + "As pre-trained models become larger and more capable, there is growing interest in methods for adapting them for specific tasks in a memory-efficient way, within a reasonable time span. One such technique is freezing the original parameters and inserting a small number of additional parameters into the original model, which are called \"adapters.\" These adapters can often match the performance of full fine-tuning at a fraction of the parameter count, meaning faster and more memory-efficient fine-tuning [1]. One popular technique for doing this is known as Low-Rank Adaptation (LoRA) [2].\n", + "\n", + "On the software side, HuggingFace has produced a popular library for adapters called PEFT [3]. Our implementation includes some of the features of this library, as well as including the ability to integrate PEFT adapters into a SpeechBrain model. We'll start with a basic example YAML so you can just try it yourself if you learn better from experimentation.\n", + "\n", + "### Relevant bibliography\n", + "1. N. Houlsby, A. Giurgiu, S. Jastrzebski, B. Morrone, Q. De Laroussilhe, A. Gesmundo, M. Attariyan, and S. Gelly, \"Parameter-efficient transfer learning for NLP.\" In *International Conference on Machine Learning*, 2019.\n", + "2. E.J. Hu, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, L. Wang, and W. Chen, \"LoRA: Low-rank adaptation of large language models.\" In *International Conference on Learning Representations*, 2021.\n", + "3. S. Mangrulkar, S. Gugger, L. Debut, Y. Belkada, S. Paul, and B. Bossan, \"PEFT: State-of-the-art parameter-efficient fine-tuning methods.\" *GitHub Repository*, 2022.\n" + ] + }, + { + "cell_type": "markdown", + "id": "e863ce00-65fa-43eb-9678-cdb2e2c9a219", + "metadata": {}, + "source": [ + "## Too Long ; Didn't Read\n", + "\n", + "The TL;DR of this tutorial is that you should use a section like this in your HyperPyYAML file to create a model with adapters:\n", + "\n", + "```yaml\n", + "adapted_model: !new:speechbrain.nnet.adapters.AdaptedModel\n", + " model_to_adapt: !ref \n", + " adapter_class: !name:speechbrain.nnet.adapters.LoRA\n", + " all_linear: True\n", + " unfrozen_layers: [\"conv_1d_*\"]\n", + " adapter_kwargs:\n", + " rank: 8\n", + "```\n", + "\n", + "Adding this section to the YAML takes the already-defined key `model` and adds a LoRA adapter to every linear layer with the keyword argument `rank=8`.\n", + "The `all_linear` and `all_conv` arguments simply add adapters to all linear or all convolution layers respectively.\n", + "By default, this class freezes the parameters of all layers that are not adapted, but the `unfrozen_layers` argument can be used to specify the names\n", + "of layers that should also be trained, at the cost of a higher parameter count. One can specify specific layers that should be adapted with\n", + "the `target_layers` argument. These arguments both support unix-style wildcards through the use of python's `fnmatch` library.\n", + "\n", + "If the TL;DR isn't enough and you need to work through an example in more detail, continue to the next section." + ] + }, + { + "cell_type": "markdown", + "id": "3ca49a1d-5c1b-40db-b7e1-dcc8a85bcce3", + "metadata": {}, + "source": [ + "## Detailed Tutorial\n", + "\n", + "We'll demonstrate how to use adapters on a template recipe, which includes everything necessary for full training. The first step is to pretrain a model so we can later add adapters." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a45871e4-e087-486b-af01-411603e43070", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "fatal: destination path 'speechbrain' already exists and is not an empty directory.\n", + "/home/pplantinga/Documents/Repositories/uvenv/bin/python: No module named pip\n" + ] + } + ], + "source": [ + "!git clone --depth 1 --branch v1.0.2 https://github.com/speechbrain/speechbrain.git\n", + "!python -m pip install -e speechbrain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23dc826e-49a3-4463-a49d-9c5981849cff", + "metadata": {}, + "outputs": [], + "source": [ + "# In order to use speechbrain in this repo we have to add it to the path\n", + "import os, sys\n", + "\n", + "sys.path.append(os.path.join(os.getcwd(), 'speechbrain'))" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "0d88fe51-301b-46d5-858e-fd47877e92ae", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/pplantinga/Documents/Repositories/speechbrain/docs/tutorials/nn/speechbrain/templates/speech_recognition/ASR\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/pplantinga/Documents/Repositories/uvenv/lib/python3.12/site-packages/IPython/core/magics/osm.py:417: UserWarning: This is now an optional IPython functionality, setting dhist requires you to install the `pickleshare` library.\n", + " self.shell.db['dhist'] = compress_dhist(dhist)[-100:]\n" + ] + } + ], + "source": [ + "%cd speechbrain/templates/speech_recognition/ASR" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9dabda46-19ae-41b9-ab0f-6b9995b1e27f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:speechbrain.utils.seed:Setting seed to 2602\n", + "WARNING:speechbrain.utils.train_logger:torchvision is not available - cannot save figures\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/autocast.py:68: FutureWarning: `torch.cuda.amp.custom_fwd(args...)` is deprecated. Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.\n", + " wrapped_fwd = torch.cuda.amp.custom_fwd(fwd, cast_inputs=cast_inputs)\n", + "speechbrain.core - Beginning experiment!\n", + "speechbrain.core - Experiment folder: results/CRDNN_BPE_960h_LM/2602\n", + "mini_librispeech_prepare - Preparation completed in previous run, skipping.\n", + "../data/noise/data.zip exists. Skipping download\n", + "../data/rir/data.zip exists. Skipping download\n", + "speechbrain.utils.fetching - Fetch lm.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-crdnn-rnnlm-librispeech' if not cached\n", + "speechbrain.utils.fetching - Fetch lm.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-crdnn-rnnlm-librispeech' if not cached\n", + "speechbrain.utils.fetching - Fetch tokenizer.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-crdnn-rnnlm-librispeech' if not cached\n", + "speechbrain.utils.fetching - Fetch tokenizer.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-crdnn-rnnlm-librispeech' if not cached\n", + "speechbrain.utils.fetching - Fetch asr.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-crdnn-rnnlm-librispeech' if not cached\n", + "speechbrain.utils.fetching - Fetch asr.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-crdnn-rnnlm-librispeech' if not cached\n", + "speechbrain.utils.parameter_transfer - Loading pretrained files for: lm, tokenizer, model\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/checkpoints.py:199: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " state_dict = torch.load(path, map_location=device)\n", + "speechbrain.core - Info: ckpt_interval_minutes arg from hparam file is used\n", + "speechbrain.core - Gradscaler enabled: False. Using precision: fp32.\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/core.py:793: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n", + " self.scaler = torch.cuda.amp.GradScaler(enabled=gradscaler_enabled)\n", + "speechbrain.core - ASR Model Statistics:\n", + "* Total Number of Trainable Parameters: 173.0M\n", + "* Total Number of Parameters: 173.0M\n", + "* Trainable Parameters represent 100.0000% of the total size.\n", + "speechbrain.utils.checkpoints - Would load a checkpoint here, but none found yet.\n", + "speechbrain.utils.epoch_loop - Going into epoch 1\n", + "speechbrain.augment.augmenter - No augmentation is applied because the augmentation start index is greater than or equal to the number of examples in the input batch.\n", + "100%|████████████████████████| 760/760 [08:28<00:00, 1.49it/s, train_loss=1.35]\n", + "100%|█████████████████████████████████████████| 545/545 [01:28<00:00, 6.18it/s]\n", + "speechbrain.utils.train_logger - epoch: 1, lr: 1.00e+00 - train loss: 1.35 - valid loss: 1.31, valid CER: 7.71, valid WER: 20.06\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/CRDNN_BPE_960h_LM/2602/save/CKPT+2024-10-08+11-08-06+00\n", + "speechbrain.utils.checkpoints - Loading a checkpoint from results/CRDNN_BPE_960h_LM/2602/save/CKPT+2024-10-08+11-08-06+00\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/checkpoints.py:199: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " state_dict = torch.load(path, map_location=device)\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/nnet/schedulers.py:240: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " data = torch.load(path)\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/processing/features.py:1311: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " stats = torch.load(path, map_location=device)\n", + "100%|███████████████████████████████████████| 1310/1310 [09:25<00:00, 2.32it/s]\n", + "speechbrain.utils.train_logger - Epoch loaded: 1 - test loss: 1.30, test CER: 5.75, test WER: 17.57\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/CRDNN_BPE_960h_LM/2602/save/CKPT+latest\n" + ] + } + ], + "source": [ + "!python train.py train.yaml --number_of_epochs=1 --batch_size=2 --test_scorer \"!ref \" --enable_add_reverb=False --enable_add_noise=False #To speed up" + ] + }, + { + "cell_type": "markdown", + "id": "b9782e5b-5498-487f-8055-0b73f75a8ec8", + "metadata": {}, + "source": [ + "## Inference\n", + "\n", + "To prove that this is working, let's just perform inference on one file. This code taken from `transcribe_file.py`" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "9dd9de34-e7d2-4fdd-beb7-d7ab07551c04", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:speechbrain.utils.fetching:Fetch lm.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-crdnn-rnnlm-librispeech' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch tokenizer.ckpt: Fetching from HuggingFace Hub 'speechbrain/asr-crdnn-rnnlm-librispeech' if not cached\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/autocast.py:68: FutureWarning: `torch.cuda.amp.custom_fwd(args...)` is deprecated. Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.\n", + " wrapped_fwd = torch.cuda.amp.custom_fwd(fwd, cast_inputs=cast_inputs)\n", + "INFO:speechbrain.utils.parameter_transfer:Loading pretrained files for: lm, tokenizer, model, normalizer\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/checkpoints.py:199: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " state_dict = torch.load(path, map_location=device)\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/processing/features.py:1311: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " stats = torch.load(path, map_location=device)\n" + ] + }, + { + "data": { + "text/plain": [ + "'THE METAL FOREST IS IN THE GREAT DOMED CAVERN THE LARGEST IN ALL OUR DOMINIONS REPLIED CALICO ⁇ '" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "from speechbrain.utils.fetching import fetch, LocalStrategy\n", + "\n", + "# Ensure all the needed files end up in the same place to load with the transcriber\n", + "save_dir = os.path.abspath(\"results/CRDNN_BPE_960h_LM/2602/save/CKPT+latest\")\n", + "fetch(\"lm.ckpt\", \"speechbrain/asr-crdnn-rnnlm-librispeech\", save_dir, local_strategy=LocalStrategy.SYMLINK)\n", + "fetch(\"tokenizer.ckpt\", \"speechbrain/asr-crdnn-rnnlm-librispeech\", save_dir, local_strategy=LocalStrategy.SYMLINK)\n", + "fetch(\"inference.yaml\", os.getcwd(), save_dir, local_strategy=LocalStrategy.SYMLINK)\n", + "\n", + "transcriber = EncoderDecoderASR.from_hparams(source=save_dir, hparams_file=\"inference.yaml\")\n", + "speech_file = \"../data/LibriSpeech/dev-clean-2/1272/135031/1272-135031-0015.flac\"\n", + "transcriber.transcribe_file(speech_file)" + ] + }, + { + "cell_type": "markdown", + "id": "179c13b9-5a32-412e-a099-951163f4ed3c", + "metadata": {}, + "source": [ + "## Adding adapters\n", + "\n", + "So now that we've proved that the model is at least working, let's go ahead and add adapters. We basically need to create a new yaml file adding adapters to the model and then train with this new yaml file. To do this we'll just load the old yaml file and then we'll change all the parts necessary to train the adapted model." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "b0904742-9f03-46c4-88bb-fd7cb4326686", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting train_lora.patch\n" + ] + } + ], + "source": [ + "%%writefile train_lora.patch\n", + "--- train.yaml\t2024-10-07 19:23:49.839501714 -0400\n", + "+++ train_lora.yaml\t2024-10-07 19:25:40.340933091 -0400\n", + "@@ -30,7 +30,7 @@\n", + " NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1\n", + " RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1\n", + " \n", + "-output_folder: !ref results/CRDNN_BPE_960h_LM/\n", + "+output_folder: !ref results/crdnn_lora/\n", + " test_wer_file: !ref /wer_test.txt\n", + " save_folder: !ref /save\n", + " train_log: !ref /train_log.txt\n", + "@@ -41,7 +41,7 @@\n", + " # speechbrain HuggingFace repository. However, a local path pointing to a\n", + " # directory containing the lm.ckpt and tokenizer.ckpt may also be specified\n", + " # instead. E.g if you want to use your own LM / tokenizer.\n", + "-pretrained_path: speechbrain/asr-crdnn-rnnlm-librispeech\n", + "+pretrained_path: results/CRDNN_BPE_960h_LM/2602/save/CKPT+latest\n", + " \n", + " \n", + " # Path where data manifest files will be stored. The data manifest files are created by the\n", + "@@ -481,10 +481,9 @@\n", + " ctc_lin: !ref \n", + " seq_lin: !ref \n", + " normalize: !ref \n", + "- lm_model: !ref \n", + " \n", + " # Gathering all the submodels in a single model object.\n", + "-model: !new:torch.nn.ModuleList\n", + "+model_pretrained: !new:torch.nn.ModuleList\n", + " - - !ref \n", + " - !ref \n", + " - !ref \n", + "@@ -629,8 +628,31 @@\n", + " loadables:\n", + " lm: !ref \n", + " tokenizer: !ref \n", + "- model: !ref \n", + "+ model: !ref \n", + " paths:\n", + " lm: !ref /lm.ckpt\n", + " tokenizer: !ref /tokenizer.ckpt\n", + "- model: !ref /asr.ckpt\n", + "+ model: !ref /model.ckpt\n", + "+\n", + "+new_encoder: !new:speechbrain.nnet.adapters.AdaptedModel\n", + "+ model_to_adapt: !ref \n", + "+ adapter_class: !name:speechbrain.nnet.adapters.LoRA\n", + "+ all_linear: True\n", + "+ manual_adapter_insertion: True\n", + "+ adapter_kwargs:\n", + "+ rank: 8\n", + "+\n", + "+new_decoder: !new:speechbrain.nnet.adapters.AdaptedModel\n", + "+ model_to_adapt: !ref \n", + "+ adapter_class: !name:speechbrain.nnet.adapters.LoRA\n", + "+ all_linear: True\n", + "+ manual_adapter_insertion: True\n", + "+ adapter_kwargs:\n", + "+ rank: 8\n", + "+\n", + "+model: !new:torch.nn.ModuleList\n", + "+ - - !ref \n", + "+ - !ref \n", + "+ - !ref \n", + "+ - !ref \n", + "+ - !ref \n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "8517a99a-1dd0-46e9-84b6-2e503e2aa270", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "patching file train_lora.yaml (read from train.yaml)\n" + ] + } + ], + "source": [ + "!patch train.yaml -i train_lora.patch -o train_lora.yaml" + ] + }, + { + "cell_type": "markdown", + "id": "e3645cf7-a902-46dd-b4ea-b5a60de556d8", + "metadata": {}, + "source": [ + "Because we are loading the pretrained parameters using the pretrainer, we have to insert this code to insert the adapters after the pretrained parameters have been loaded.\n", + "\n", + "This is the reason for the `manual_adapter_insertion: True` in the yaml and the following brief change to the training code:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "36508f13-7423-47eb-98dd-40274f1990b6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting train_lora_py.patch\n" + ] + } + ], + "source": [ + "%%writefile train_lora_py.patch\n", + "--- train.py\t2024-10-07 14:57:21.534381751 -0400\n", + "+++ train_lora.py\t2024-10-07 19:33:12.839895913 -0400\n", + "@@ -473,6 +473,8 @@\n", + " # the path given in the YAML file). The tokenizer is loaded at the same time.\n", + " hparams[\"pretrainer\"].collect_files()\n", + " hparams[\"pretrainer\"].load_collected()\n", + "+ hparams[\"new_encoder\"].insert_adapters()\n", + "+ hparams[\"new_decoder\"].insert_adapters()\n", + " \n", + " # Trainer initialization\n", + " asr_brain = ASR(" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "6b1b9c73-36a1-4212-9481-654f97b5f441", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "patching file train_lora.py (read from train.py)\n" + ] + } + ], + "source": [ + "!patch train.py -i train_lora_py.patch -o train_lora.py" + ] + }, + { + "cell_type": "markdown", + "id": "8792396c-c05c-4589-b3f3-149cfd98fb34", + "metadata": {}, + "source": [ + "## Training the adapted model\n", + "\n", + "Training works identically to before, using the updated lora file. The adapted model is designed to work as an in-place replacement. Notice how the number of trainable parameters is reduced to close to 1% of the original parameters." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "d088fc19-5f0b-4160-b470-0a4198f0f0e8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:speechbrain.utils.seed:Setting seed to 2602\n", + "WARNING:speechbrain.utils.train_logger:torchvision is not available - cannot save figures\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/autocast.py:68: FutureWarning: `torch.cuda.amp.custom_fwd(args...)` is deprecated. Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.\n", + " wrapped_fwd = torch.cuda.amp.custom_fwd(fwd, cast_inputs=cast_inputs)\n", + "speechbrain.core - Beginning experiment!\n", + "speechbrain.core - Experiment folder: results/crdnn_lora/2602\n", + "mini_librispeech_prepare - Preparation completed in previous run, skipping.\n", + "../data/noise/data.zip exists. Skipping download\n", + "../data/rir/data.zip exists. Skipping download\n", + "speechbrain.utils.parameter_transfer - Loading pretrained files for: lm, tokenizer, model\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/checkpoints.py:199: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " state_dict = torch.load(path, map_location=device)\n", + "speechbrain.core - Info: ckpt_interval_minutes arg from hparam file is used\n", + "speechbrain.core - Gradscaler enabled: False. Using precision: fp32.\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/core.py:793: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n", + " self.scaler = torch.cuda.amp.GradScaler(enabled=gradscaler_enabled)\n", + "speechbrain.core - ASR Model Statistics:\n", + "* Total Number of Trainable Parameters: 1.8M\n", + "* Total Number of Parameters: 120.0M\n", + "* Trainable Parameters represent 1.4807% of the total size.\n", + "speechbrain.utils.checkpoints - Would load a checkpoint here, but none found yet.\n", + "speechbrain.utils.epoch_loop - Going into epoch 1\n", + "speechbrain.augment.augmenter - No augmentation is applied because the augmentation start index is greater than or equal to the number of examples in the input batch.\n", + "100%|███████████████████████████| 760/760 [04:09<00:00, 3.04it/s, train_loss=1]\n", + "100%|█████████████████████████████████████████| 545/545 [01:40<00:00, 5.42it/s]\n", + "speechbrain.utils.train_logger - epoch: 1, lr: 1.00e+00 - train loss: 1.00 - valid loss: 1.26, valid CER: 7.29, valid WER: 19.16\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/crdnn_lora/2602/save/CKPT+2024-10-08+11-23-53+00\n", + "speechbrain.utils.checkpoints - Loading a checkpoint from results/crdnn_lora/2602/save/CKPT+2024-10-08+11-23-53+00\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/checkpoints.py:199: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " state_dict = torch.load(path, map_location=device)\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/nnet/schedulers.py:240: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " data = torch.load(path)\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/processing/features.py:1311: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " stats = torch.load(path, map_location=device)\n", + "100%|███████████████████████████████████████| 1310/1310 [12:55<00:00, 1.69it/s]\n", + "speechbrain.utils.train_logger - Epoch loaded: 1 - test loss: 1.26, test CER: 5.62, test WER: 17.05\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/crdnn_lora/2602/save/CKPT+latest\n" + ] + } + ], + "source": [ + "!python train_lora.py train_lora.yaml --number_of_epochs=1 --batch_size=2 --test_scorer \"!ref \" --enable_add_reverb=False --enable_add_noise=False #To speed up" + ] + }, + { + "cell_type": "markdown", + "id": "f960be7a-6edb-47c1-bd8d-c7c0d486c81d", + "metadata": {}, + "source": [ + "## Custom adapter\n", + "\n", + "We designed this so that you could replace the SpeechBrain adapter with a `peft` adapter:\n", + "\n", + "```diff\n", + "new_encoder: !new:speechbrain.nnet.adapters.AdaptedModel\n", + " model_to_adapt: !ref \n", + "- adapter_class: !name:speechbrain.nnet.adapters.LoRA\n", + "+ adapter_class: !name:peft.tuners.lora.layer.Linear\n", + " manual_adapter_insertion: True\n", + " adapter_kwargs:\n", + "- rank: 16\n", + "+ r: 16\n", + "+ adapter_name: lora\n", + "```\n", + "\n", + "But this trains exactly the same thing as before, so no need for us to go through the whole thing. Perhaps more interesting is designing a custom adapter:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f9682f70-489a-4a1d-b8c6-1c73d98a824d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting pool_lora.py\n" + ] + } + ], + "source": [ + "%%file pool_lora.py\n", + "\n", + "import torch\n", + "\n", + "class PoolLoRA(torch.nn.Module):\n", + " def __init__(self, target_module, stride=2, rank=16, alpha=1.0):\n", + " super().__init__()\n", + "\n", + " input_size = target_module.weight.data.shape[1]\n", + " output_size = target_module.weight.data.shape[0]\n", + " \n", + " # Disable gradient for pretrained module\n", + " self.pretrained_module = target_module\n", + " for param in self.pretrained_module.parameters():\n", + " param.requires_grad = False\n", + " device = target_module.weight.device\n", + "\n", + " self.adapter_down_scale = torch.nn.AvgPool1d(kernel_size=stride)\n", + " self.adapter_down_proj = torch.nn.Linear(\n", + " input_size // stride, rank, bias=False, device=device\n", + " ) \n", + " self.adapter_up_proj = torch.nn.Linear(\n", + " rank, output_size, bias=False, device=device\n", + " ) \n", + " self.adapter_up_proj.weight.data.fill_(0.0)\n", + "\n", + " self.scaling = alpha / rank\n", + "\n", + " def forward(self, x: torch.Tensor):\n", + " \"\"\"Applies the LoRA Adapter.\n", + "\n", + " Arguments\n", + " ---------\n", + " x: torch.Tensor\n", + " Input tensor to the adapter module.\n", + "\n", + " Returns\n", + " -------\n", + " The linear outputs\n", + " \"\"\"\n", + " x_pretrained = self.pretrained_module(x)\n", + "\n", + " x_downsample = self.adapter_down_proj(self.adapter_down_scale(x))\n", + " x_pool_lora = self.adapter_up_proj(x_downsample)\n", + " \n", + " return x_pretrained + x_pool_lora * self.scaling" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "c2e702a9-c07d-4a76-94bc-847b8f890579", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting train_pool_lora.patch\n" + ] + } + ], + "source": [ + "%%writefile train_pool_lora.patch\n", + "--- train_lora.yaml\t2024-10-07 22:44:02.767830301 -0400\n", + "+++ train_pool_lora.yaml\t2024-10-07 22:41:30.602641301 -0400\n", + "@@ -30,7 +30,7 @@\n", + " NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1\n", + " RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1\n", + " \n", + "-output_folder: !ref results/crdnn_lora/\n", + "+output_folder: !ref results/crdnn_pool_lora/\n", + " test_wer_file: !ref /wer_test.txt\n", + " save_folder: !ref /save\n", + " train_log: !ref /train_log.txt\n", + "@@ -636,19 +636,21 @@\n", + " \n", + " new_encoder: !new:speechbrain.nnet.adapters.AdaptedModel\n", + " model_to_adapt: !ref \n", + "- adapter_class: !name:speechbrain.nnet.adapters.LoRA\n", + "+ adapter_class: !name:pool_lora.PoolLoRA\n", + " all_linear: True\n", + " manual_adapter_insertion: True\n", + " adapter_kwargs:\n", + "- rank: 8\n", + "+ stride: 2\n", + "+ rank: 16\n", + " \n", + " new_decoder: !new:speechbrain.nnet.adapters.AdaptedModel\n", + " model_to_adapt: !ref \n", + "- adapter_class: !name:speechbrain.nnet.adapters.LoRA\n", + "+ adapter_class: !name:pool_lora.PoolLoRA\n", + " all_linear: True\n", + " manual_adapter_insertion: True\n", + " adapter_kwargs:\n", + "- rank: 8\n", + "+ stride: 2\n", + "+ rank: 16\n", + " \n", + " model: !new:torch.nn.ModuleList\n", + " - - !ref " + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "532bfcda-4a30-463d-8cfe-eed96d28732c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "patching file train_pool_lora.yaml (read from train_lora.yaml)\n" + ] + } + ], + "source": [ + "!patch train_lora.yaml -i train_pool_lora.patch -o train_pool_lora.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "56aefd64-1325-4891-a9a4-1c4e85691b96", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:speechbrain.utils.seed:Setting seed to 2602\n", + "WARNING:speechbrain.utils.train_logger:torchvision is not available - cannot save figures\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/autocast.py:68: FutureWarning: `torch.cuda.amp.custom_fwd(args...)` is deprecated. Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.\n", + " wrapped_fwd = torch.cuda.amp.custom_fwd(fwd, cast_inputs=cast_inputs)\n", + "speechbrain.core - Beginning experiment!\n", + "speechbrain.core - Experiment folder: results/crdnn_pool_lora/2602\n", + "mini_librispeech_prepare - Preparation completed in previous run, skipping.\n", + "../data/noise/data.zip exists. Skipping download\n", + "../data/rir/data.zip exists. Skipping download\n", + "speechbrain.utils.parameter_transfer - Loading pretrained files for: lm, tokenizer, model\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/checkpoints.py:199: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " state_dict = torch.load(path, map_location=device)\n", + "speechbrain.core - Info: ckpt_interval_minutes arg from hparam file is used\n", + "speechbrain.core - Gradscaler enabled: False. Using precision: fp32.\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/core.py:793: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n", + " self.scaler = torch.cuda.amp.GradScaler(enabled=gradscaler_enabled)\n", + "speechbrain.core - ASR Model Statistics:\n", + "* Total Number of Trainable Parameters: 1.8M\n", + "* Total Number of Parameters: 120.0M\n", + "* Trainable Parameters represent 1.5210% of the total size.\n", + "speechbrain.utils.checkpoints - Would load a checkpoint here, but none found yet.\n", + "speechbrain.utils.epoch_loop - Going into epoch 1\n", + "speechbrain.augment.augmenter - No augmentation is applied because the augmentation start index is greater than or equal to the number of examples in the input batch.\n", + "100%|████████████████████████| 760/760 [04:19<00:00, 2.93it/s, train_loss=0.98]\n", + "100%|█████████████████████████████████████████| 545/545 [01:44<00:00, 5.24it/s]\n", + "speechbrain.utils.train_logger - epoch: 1, lr: 1.00e+00 - train loss: 9.80e-01 - valid loss: 1.26, valid CER: 7.18, valid WER: 18.92\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/crdnn_pool_lora/2602/save/CKPT+2024-10-08+11-43-00+00\n", + "speechbrain.utils.checkpoints - Loading a checkpoint from results/crdnn_pool_lora/2602/save/CKPT+2024-10-08+11-43-00+00\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/utils/checkpoints.py:199: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " state_dict = torch.load(path, map_location=device)\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/nnet/schedulers.py:240: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " data = torch.load(path)\n", + "/home/pplantinga/Documents/Repositories/speechbrain/speechbrain/processing/features.py:1311: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " stats = torch.load(path, map_location=device)\n", + "100%|███████████████████████████████████████| 1310/1310 [14:07<00:00, 1.55it/s]\n", + "speechbrain.utils.train_logger - Epoch loaded: 1 - test loss: 1.25, test CER: 5.61, test WER: 16.99\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/crdnn_pool_lora/2602/save/CKPT+latest\n" + ] + } + ], + "source": [ + "!python train_lora.py train_pool_lora.yaml --number_of_epochs=1 --batch_size=2 --test_scorer \"!ref \" --enable_add_reverb=False --enable_add_noise=False #To speed up" + ] + }, + { + "cell_type": "markdown", + "id": "21ef247c-3022-4b65-8cd0-86d01a618b79", + "metadata": {}, + "source": [ + "## Conclusion\n", + "\n", + "That's it, thanks for following along! Go forth and make cool adapters." + ] + }, + { + "cell_type": "markdown", + "id": "11474af6", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/tutorials/nn/recurrent-neural-networks-and-speechbrain.ipynb b/docs/tutorials/nn/recurrent-neural-networks-and-speechbrain.ipynb new file mode 100644 index 0000000000..334e12d29c --- /dev/null +++ b/docs/tutorials/nn/recurrent-neural-networks-and-speechbrain.ipynb @@ -0,0 +1,947 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/nn/recurrent-neural-networks-and-speechbrain.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/nn/recurrent-neural-networks-and-speechbrain.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "N20rXBhI1dx1" + }, + "source": [ + "# Recurrent Neural Networks\n", + "\n", + "Recurrent Neural Networks (RNNs) provide a natural approach for processing sequences.\n", + "\n", + "In most cases, the elements of a sequence are not independent. The emission of a particular output often depends on the surrounding elements or even the entire history.\n", + "\n", + "To adequately model the sequence evolution, the presence of **memory** is crucial to keep track of past or future elements. Memory is implemented using **feedback connections**, introducing the concept of \"state.\" RNNs rely on the following equation:\n", + "\n", + "$ h_t = f(x_t, h_{t−1}, θ)$\n", + "\n", + "where \\(h_t\\) and \\(h_{t−1}\\) are the current and previous states, and \\(θ\\) represents the trainable parameters.\n", + "\n", + "Due to this recurrence, the current state depends on the previous one, and the previous one depends on the element before, creating a dependency on all previous elements.\n", + "\n", + "## 1. Vanilla RNN\n", + "\n", + "The simplest form of RNN is the vanilla RNN, described by the following equation:\n", + "\n", + "$ h_t = \\tanh(W x_t + Uh_{t−1} + b) $\n", + "\n", + "Here, the parameters \\(θ\\) include the weight matrix \\(W\\) (feed-forward connections), the matrix \\(U\\) (recurrent weights), and the vector \\(b\\) (bias).\n", + "\n", + "To train an RNN, a network **unfolding over the time** axis is necessary, as illustrated in the following figure:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "v22oq1OpvZaW" + }, + "source": [ + "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABqIAAAGyCAIAAAAAhEIgAAAgAElEQVR4Aezdd1wT9/8H8E9YYYkCKqCIIio4GIJ1fmuLs1i1LrR1W0erdXSobd0L66h8UXHXjXvWhXUgaF1VURRBXCAgewRIyE5+j6/3M42EkU2Se90f329yuc/nPp/n+67Ay8sdkWKBAAQgAAEIQAACEIBA1QJCoTA6Onr16tULFiyY/25ZsGDBihUr3rx5IxKJqm6HTyAAAQhAAAJaEOByuefPn2/VqpWNjY2ZmRl5t7Ru3frSpUtisVgLO0AXEDAhAWJCc8FUIAABCEAAAhCAAAS0LyAUCnft2jV06FBPT8+67xZHR0dvb++srCyJRKL9/aFHCEAAAhCAgJyARCIRCAQlJSWrV6/28vKiYr6PPvooPz9fbiu8hAAE/ieAmA/HAQQgAAEIQAACEIBAdQJisbioqCg9PX327NlOTk6EEDs7u969e7PZ7Oqa4TMIQAACEICAVgWuXbvWtWtXQoiDg8PXX3+NS/m0qovOTEQAMZ+JFBLTgAAEIAABCEAAAroW2Lt3r7e3NyHExcVl7ty5fD5f13tE/xCAAAQgAAGZwKVLlzp37kwI8fT0XLt2rWw9XkAAAjIBxHwyCryAAAQgAAEIQAACEKhOYPfu3a1atSKEeHl5HTp0SCgUVrc1PoMABFQREAqFpaWlHA5HlUbYFgL0Eti3b1+7du0IIR999NHp06fpNXnMFgLKCSDmU84JW0EAAhCAgMkJcLncsrKyUrmlrKwM3/4wuTpjQloTkEgky5cvd3JyYjAYAQEBSUlJOF+0houO6CcgFApzc3Pj4+Pvv18uXbq0d+/ehIQEnFn0OxwwY2UFfvvtt6ZNmxJCQkJCkpKSlG2G7SBAJwHEfHSqNuYKAQhAAALvBcRi8d27d48cObLn/bJ3797Dhw+XlJTgkQLvkfD/EPhAoKys7KuvvrKwsGAymX369BEIBB98jDcQgIAqAmw2+9ixY/7+/o6OjpaWltQjBVq1anXmzBk8wFoVSGxLIwGxWPz111/XrVvX3Nx8woQJ5eXlNJo8pgoBpQUQ8ylNhQ0hAAEIQMCEBPh8/syZM11dXc3MzBgMBiHE3Nzczc3t7du3JjRLTAUC2hR4/Phxr169CCGurq7z5s3TZtfoCwK0FJBIJJmZmdOmTWvcuDEhhMFgBAYGZmZm4l+baHk4YNI1CEgkEhaL1b59e0KIk5PT77//XkMDfAwBugog5qNr5TFvCEAAAvQWkEgkbDY7KSnpyy+/tLe3J4TUqVPn888/x02R6H1cYPbVCZw7d65jx46EkHbt2u3Zs6e6TfEZBCCgnIBYLD5x4kRQUBD1AOsxY8bgG7vKyWEr2gmIRKJ//vmndevWhBA/P78jR47QjgAThoByAoj5lHPCVhCAAAQgYIoCIpFoy5YtLVu2pC5QWrBgAZ4caop1xpy0I7B+/XovLy9CyCeffHLr1i3tdIpeIEB7gTNnznTo0IEQ0qRJkxUrVtDeAwAQqFxAIBBs377dw8ODENKvX7+bN29KpVKxWJyZmXnw4MFly5bNmTNn0aJFUVFRubm5uCS2ckSspYcAYj561BmzhAAEIACBKgR2797dokUL6smhJ0+exJNDq3DCaghIZ8yY4eTkZG5uPmzYsNzcXKlUyuFw7t69u3v37t9//3316tURERGnTp0qLi7G5Ug4XCCgvMDOnTvbtGlDCGnfvv3Ro0eVb4gtIUArAR6PN2PGjPr16xNCJk+e/ObNGw6Hc+nSpbFjxwYEBLi6utatW7dBgwaBgYFz5swpLCzETyJaHR6YrLwAYj55DbyGAAQgAAHaCaxbt87Z2dnMzCwgIODly5f4pZB2RwAmrJxAeXl5v379rKysnJyc5s+fz+fz8/Pzt23bNnjw4EaNGpmbmxNCbGxs2rdvHxERUVJSolyv2AoCEJAuWbKEujdf7969Hz58CBEIQKBSAQ6H06NHDzs7O1tb2/Dw8IyMjPPnz/fu3btNmzY9e/bs2LFjw4YNCSEWFhbu7u5nz57lcrmV9oOVEDB5AcR8Jl9iTBACEIAABKoU4PF433//vYWFhY2NzYABA3ApX5VS+IDeAmKxODEx0dfXlxDStm3b7du3Z2dnR0REBAQE+Pn5de3a1c/Pz9nZmXqUTbNmzZ4+fYpHhdL7kMHslRUQiURjxoypU6eOpaXlxIkTEZErC4ftaCYgFotzcnKaNGlibm7evHnzHTt2XLhwoXfv3r169dq8eXNsbOzJkydHjhxpa2tL3eZyzpw5RUVFNEPCdCHw/wKI+XAoQAACEIAAfQWePXsWEhJCCGncuPGiRYvoC4GZQ6BaAaFQeOTIEerGfD179ty8efPGjRubNGkyZMiQPXv2PHz48Pjx459//jl5vxw6dIjNZlfbJT6EAASkEokkPz8/MDCQENKwYcOIiAigQAAClQrweLzo6Og6deoQQrp27TpmzJh+/fqFhITcv39f9j2MM2fOUD+nrK2tR44cmZ+fX2lXWAkBkxdAzGfyJcYEIQABCECgSoELFy506tSJemTbgQMHqtwOH0CA3gJ8Pn/RokWurq6EkEGDBs2aNatFixbDhw8vKCigYNhs9vr16xkMBhX0bd68ubi4mN5mmD0EahYQiUQ3b96knhwaGBh44sSJmttgCwjQUoDFYq1du9bGxob6p9kGDRr07t07Pj5eHiM2NrZLly7UHSRGjRqFmE8eB69pJYCYj1blxmQhAAEIQOADgc2bN7dq1YoQ8umnn967d++Dz/AGAhB4L8DlcgcMGEBdRuHs7Ozr6ztu3Lj09HTZNRRisTguLs7R0REx33sz/D8EahYQCASRkZFNmjQhhPTv3//OnTuyNnfv3t23b9/WrVujoqIePHggW48XEKCnQF5e3vjx45lMJnX3vS5duhw7dqzCvVauX7/evXt3Qoitre1PP/2Ef22i56GCWUulUsR8OAwgAAEIQIC+AnPmzGnQoIG5uXloaKjsHi4ZGRkXLlzYsmXL2rVrd+zYER8fj7uM0fcQwczfCeTn5zdr1szCwoIQwmQy+/Tpc/36dYlEIs9z8+ZNOzs7KuY7ceJEeXm5/Kd4DQEIKArw+XzZk0O//fbbt2/f8vn8hISERYsWde/e3cfHp2XLlq1bt+7bt++uXbsEAoFiD1gDAZoIpKen+/v7Uz+G6tevP3/+/JycnApzP3/+vI+PDyHEwcFh586duHdEBR+8pY8AYj761BozhQAEIACBDwT4fP6AAQOYTGb9+vWXLFkiEom4XG50dPS0adO6devWsmXLJk2a+Pj4DB48+NKlSxwO54PGeAMB2ggIBII7d+7Uq1ePivBat269adOmCikej8c7ePCghYWFmZmZg4PDo0ePZBf60cYJE4WAygJcLjc4ONjW1tbe3n7Tpk15eXlXr1794osv3N3d27Rp06hRI2tra0KItbV1YGBgYmIikj6VidHAJATEYvH9+/fr1KlD3Rpi4MCBf//9d4WfMmKxeOPGjQ4ODoQQJyenf/75B+eLSRQfk1BHADGfOmpoAwEIQAACxi4gFotTUlICAgKoG/P98ccfeXl5f/zxx3/+85/WrVv7+/t7enpSt4CxsrLq1avXixcvKvxCaewCGD8ElBRgs9k7duygvrHLZDJ/+umnN2/eVGibk5OzYMEC6rtUvr6+mZmZFTbAWwhAoIKASCTKyMhwd3c3Nzf39vbeuXNnTEzM4MGD/f39Z86cGRUVtWfPntGjR1Oxhb29/YYNG0pLSyt0grcQoIMAm80+cOAA9U9NVlZWa9asKSwsrDDxoqKiWbNmEUIsLS1bt25dUFBQ4ZLzCttTbyUSCYvFiouLO3v27MOHD3EBYKVKWGl0Aoj5jK5kGDAEIAABCGhBQCQSnThxokWLFoSQvn377ty5c8eOHX5+fj179vztt9+ioqLCwsK6du1KCGEwGFZWVmfOnOHxeFrYMbqAgLEJFBYWzpo1i0q9mzVrdubMGcUZPH36dPTo0dRlR6NGjcrLy1PcRiKRZGdnJyUlJXy4JCYm5ubmKvP3mGKfWAMB4xXgcrnnzp2jUrzu3bt///33Y8aM6dKly/79+2VZQ3R0NPUlRBsbm+nTpytGG8Y7fYwcAsoLZGdnz58/n4r5mjZtevHiRcV/eU1MTBwxYgQhpG7dul9++SWfz1emfz6ff/jwYW9vb3Nz8wkTJqSmpirTCttAwMAFEPMZeIEwPAhAAAIQ0ImAQCBYuXJl48aNCSFDhw5dsmRJp06d+vTpk5KSQt3Rmcfj7dq1i/qdkhCyffv2kpISnQwFnULAsAWys7N79OhhZWVFCBkyZMjDhw8Vxyt7vqGdnV14eHiFk0UikZSVlb18+XLatGnu7u7Ozs4tW7Zs/25xdnZu0KDB3LlzMzMzlfyrTHHvWAMBYxQoKSn5/fffqTtaBgYGBgQEdOvWLSoqSn4uN27coB4pYGNjM3nyZNmzreW3wWsImLxASkrKkCFDqF/JQkNDX716pTjlCxcudOvWjXoO76pVq5T5xq5QKExJSenatautrS0hBDGfoirWGKkAYj4jLRyGDQEIQAACGgnw+fzhw4dTtxvz9PTs2LHjZ5999ubNG/l/H75x44Ys5tu0aRMe2aaROBobrUBaWlq9evXMzc0JIStXrqz0eqLjx483bNiQuowiNja2wqWvfD5/ypQpDRo0MDMza9Wq1erVqwUCgfjdEhYW5uHhYWZm1qVLl7/++stokTBwCKgskJeXN3LkSOruewwGw8/P79ChQxUua42Li6OuK7e1tV20aBF+DKmsjAYmIfDw4cOgoCDqV7KVK1dWesH4tm3bPD09CSHe3t4XL15U5uFpxcXFv/76q52dHXXLP8R8JnGwYBL/E0DMh+MAAhCAAAToKMDlctu3b09doGRpaRkcHHzt2rUKvxTKx3wHDhyQfYuKjl6YM10F+Hz+xYsXzc3NGQyGvb19TEyMfBROqZSWlv76668WFhZWVlb+/v4lJSUVogo+nz9p0iQnJ6c2bdpERETInmotlUqzs7OnT5/u4uLi4OAwaNCg7OzsCm3pCo95m75AZmZmhw4dLC0tqQeDzpkzR/HJodHR0a1btyaE1KlTJyoqCg+DMv3DAjNUEBCJRIcOHaK+3k4IOX78eIVnQEmlUoFAMHnyZOrmEkFBQTk5OTX+KCkqKtq2bZubm1tISIibm5uFhQViPgV7rDBWAcR8xlo5jBsCEIAABNQWEAgESUlJLi4uZmZmhJCWLVtu2LChwm+N5eXlu3fvpu7NZ2dn9+jRowohoNp7R0MIGJFAQUHB77//Tl1D0a5du0ePHikO/vnz52PGjCGEODo6jhs3rsKlfFKpVBbzTZkyJTk5Wb4HsVh87NixTp06mZmZ+fn54USTx8FrExYQi8V37txxcHCgfgwNHDjw5s2bFX7KiESi9evXU1ed16tX7+HDh9Q9JUyYBVODgKJAcXHxqlWrzM3NzczM7O3tk5KSFP+16fXr159//rmZmVmdOnXGjh1b4VRS7FMgEERHRwcFBTVr1mzbtm1t2rRBzKeohDXGK4CYz3hrh5FDAAIQgICaAmw2e+/evdS/DDOZzPHjx6ekpFToKycnZ/HixdSTQwMCAjIyMipsgLcQoINAWlralClTqJhvyJAhL168UJx1XFxc7969qTsi/fbbb4p3RBKJRA8ePDh79uzTp0+5XG6FHq5evdqjRw9CiI+Pz507d2r886xCc7yFgDEKsNns/fv3U2eWhYXF6tWr5a9ypWaUl5c3a9Ysc3NzJpPp5+dXVFRU1QVKpaWlMTExGzduXC+3bNu27f79+4qBiDFyYcx0Fnj16tW0adOoR+j6+vpmZWUpaly8eLFTp06EEHd39+XLlytuUGHNs2fPvv7666ZNmy5ZsiQuLq59+/Z4BEcFIrw1agHEfEZdPgweAhCAAATUESgqKpo9ezZ1x+VmzZrt2bNH8Q+hlJSUiRMnEkKYTObXX39d6Y1g5PfN4/Hu379/7dq1av4Sk98eryFgFAKJiYmffvopFUbMmTPn7du3isM+dOhQu3btqAtjz5w5o2pOh5hPkRRrTF4gOzt70aJF1Jnl6el5+fJlxQjv0aNHQ4cOpa6THTNmjOJ1siKRKCEh4dKlSxEREcOHDx86dOiq90vXrl1dXV0HDx585syZ3Nxck/fEBE1Y4J9//unXrx8hxMbGZuTIkfn5+YqT3bFjB/X19jZt2uzcuZPagMfjpaam5uTkVPgdLy8vLyIiolOnTt9++216evq9e/cQ8ymSYo1RCyDmM+ryYfAQgAAEIKCOQG5ubr9+/ZhMJiFkwIAB9+7dU+zl77///uSTTwghdnZ2kZGRpaWlittQa8rLy9PT02NiYrp27dq8eXPFL15V1RDrIWDgAmKxOC4uzt3dnQojtmzZwmKxFMccHh7eoEEDQoifn19KSgr1B1VpaWl2dnaNTwygbroUGBhobm4eGBhY6bexFPeINRAwdoGXL19+9dVX1JkVGhpa6XWyZ8+e7dy5M3WB0rp16xSvk+Xz+RMnTnRycnJwcBg+fPjt27dlLAcOHOjUqROTyWzTps327dsV28q2xAsIGLjA+fPn27RpQ92hMiwsrMKT3KnBr1u3rnnz5oSQgICAw4cPUzeLiI2NnT179oEDB+SvIufxeHv37u3WrdugQYOoy13j4+MR8xn4MYDhqSqAmE9VMWwPAQhAAAJGL5Camtq4cWPqqQLz589X/AKIRCI5efJkkyZNGAxGvXr1bt++XenfSAKBoLS09OrVq9S/M5uZmbm7uyPmM/rjAxN4L8Dlcjdv3kzdodLCwiImJqbSE2HZsmXUtbH+/v6vX78WiURcLvfEiRPffffd8ePH33dW+f9nZmaOGDHCysrKyclp9OjRfD6/8u2wFgKmJfDo0aMOHTpQMV9YWFilF4xHRkZ6eHhQ32dXfEgUFWRQMV+3bt1OnDghfz0gn89fvnx506ZNraysunTp8urVqwoXNJkWJ2ZjsgJisXj37t1169YlhNSrV6/S529IpdLIyMhWrVoRQlq0aLF27dri4uK4uLgePXoEBARs2LBBlgxKJJL79+/37du3Q4cO27dvpx74jpjPZI8eGk8MMR+Ni4+pQwACEKClgEgkiomJYbxbmEzm3r17FW9qzuVy58+fz2AwLCwsWrRoUdUzdm/evNm7d29LS0vqHuqI+Wh5QJnypLOzs+fPn08IMTc39/DwSE9Pr3S2YWFhderUocKIK1euvH37Njw83NfXNzg4+NKlS5U2ka1cs2ZNs2bNGAxGz549ExISZOvxAgKmLXDz5k3Zk0NPnz4tf7URNXGBQDBu3DjqcfBBQUEsFks+xaO2kV3NN3/+/Ozs7ApiV65cCQ4OZjAYrVu3vnv3rqrfpq/QG95CoFYEioqKZs6cyWAwCCH169dPSUmp9EiOjY3t1q0b9Y9S9erV69q1q42Njbu7+/bt2+W/jcHlcseNG9e0adOff/6Z+vKvWCxGzFcrlcVOdSqAmE+nvOgcAhCAAAQMTqCoqCgyMlJ2R6SbN28qDvHFixfjx48nhNStWzc0NLTCH2ACgeDBgwcDBgxo2bJl3759N23atHnzZi8vL8R8ipJYY9QCT548ob5XyGQye/XqlZOTU+l0jhw54u/vT93IslmzZt7e3q6urv379798+XKFc0e+uUgk+vvvvzt16mRlZeXv779//37FwF1+e7yGgMkIlJSUREREUE8OrVOnzvPnzxUjvIyMjIEDBzIYjLp1606aNKnSa/EkEgmLxcrJySktLVXMPq5evRocHGxpadm5c+eXL19W2oPJkGIipiqQkJAwbNgw6udLhw4dqvpnVxaLNWnSJCo6t7CwaNiw4YABA86dO8disWRHvlAo3LVrl7e398iRIx88eECtV4z5ysvLo6OjFyxYkJqaaqqqmJfJCyDmM/kSY4IQgAAEIPCBQEZGxowZM6iYLyQkJDk5+YOP372JjY3t1asXIcTNzW3ZsmUVvkhYXl5+7NixHj16bNiw4fHjx8XFxQ8ePOjatStiPkVJrDFqgevXr3fv3p0QYmtrO2vWLMUngVKzS0pKmjBhgr29PfVk6jZt2kyfPv327dtV/T0mlUqFQmFycnJISIiDg0NgYOC+ffuq6tyoATF4CFQq8Pr16+nTp1NPDu3QoYPijSOkUmlMTAx1dZKHh0dYWFil/VSzUigUrl+/3tvbu0GDBhMmTKjmZKymE3wEgVoXKCsre/78+b179+7fv5+UlKQYZ1MjlEgkCQkJ69ev//nnnzds2BAfH//q1SsOhyML0Llc7vXr1zt06BAUFLR///6ysjKqYYWYLyUl5cWLFyNGjBg4cODTp09lzWvdAQOAgEoCiPlU4sLGEIAABCBg9ALPnj377LPPqJhvxowZGRkZilM6ePBg27ZtCSHNmzc/efJkhYuMhEJhWlra7du3ZY8jSE5ODg4ORsynKIk1Ri2QlZV15cqVo0ePnjx5MjExsULeLZtaeXl5QkLCzp07f/rppzVr1ly5ciU1NVXxqaCy7fl8fkJCwqhRo+zt7amMr9InJ8q2xwsImJjA/fv3Bw4cSD05dPz48QUFBYoT3Ldvn6+vLyGkXbt2+/btozYQiURZWVmyHz2KrWRrLl++HBIS4ujo+PHHH587dw5phUwGL0xVQCQSFRUVZWZmKv6jkUQiefPmzYQJE6ytrd3d3UeMGLFixYrId8v69evHjx/v4uJiZmbm6+v79ddfjx071t3dvV+/foj5TPVQocO8EPPRocqYIwQgAAEI/Cvw4MED6nFshJD//ve/hYWF/372/lVERETDhg0JIW3atHn06BH1zQ42m52fny/7F+D32/7v/xHzyWvgNT0FeDxebm5uhUxckYLP5z98+HDSpElWVlZNmjRZs2ZNbm6u4mZYAwETFrh06VJAQAAhxN7efvXq1bLnA8hPef369S1atCCEBAUFnT59WiqVisXiW7duLV269Ny5c1V9HV4oFGZkZFy5cmXYsGEBAQHjx48/depUpT+25PeF1xAwbQGJRJKVlbVjx46VK1eu+HBZunTpsGHD6tevb2Zm5u3t/fXXX69YseK33347dOgQ/v3JtI8K054dYj7Tri9mBwEIQAACHwiIRKILFy5YWVlRj9e4ePFipdccLV++3NramhDi5+f3+vVrsVjM5XIvXboUGRn5zz//fNDjuzeI+RRNsAYCigJisTgxMfGbb76xtrZu3Ljxjz/+WNVjPRTbYg0ETENALBbv3bu3fv361JNDz5w5U2lmFx4e7uXlRf1r08aNG3Nycm7evBkaGurn57d69eoK/0AlEAiys7OTk5Pv3Lkzd+5cOzs7CwuL0aNHJyYmmgYaZgEBHQlU+NIu7senI2d0q2cBxHx6BsfuIAABCECgNgXy8/OXLVtGPTm0fv36aWlplY5m+fLlNjY21JNDY2NjWSzW5cuX+/bt26VLl6NHjyo2QcynaII1EFAUyM3N/emnn+zs7Bo3bjxt2rTi4mLFbbAGAqYtUFZW9tNPP1GPBG3YsOHLly8rvd2Y7Eu7dnZ2fn5+U6ZMadWqlaur66+//qr4kys7O3vp0qXt2rXz8fHx9vZ2cXFxcnJasWJFamoqh8ORPYLAtGExOwioIYCYTw00NDF8AcR8hl8jjBACEIAABLQmkJycPG7cOOrG5926dav0xudSqXTPnj3Ut6VsbGx8fHy6du3q5OTk7++/ffv2Sq/+Q8yntQqhI9MV4PP5ixcv9vDwaNy48cyZMyu9gsl0Z4+ZQeD/BZKSkqgHWFtbW3fq1KmqE+Hly5chISFUGmhmZsZkMl1dXTdv3lxQUKB4oz2JRCISiQTvFh6Pd+fOnfbt29vY2Dg7O0+YMKGqZ2SjJBCAAGI+HAMmKYCYzyTLiklBAAIQgEDlArdv3+7duzd14/MpU6ZUeuNzqVR69+5d6v7oZmZmVlZWNjY2X3zxxc2bNzkcTqX9IuarlAUrISAvcOHChY8//tjR0XH06NGKj75JTU1dtmzZ7t27BQKBfCu8hoCJCaSmpm7evHnq1Kk//PDDpk2bqrqdpVAo/OeffxYuXDjk3bJgwYIbN26UlJRUeulfBSKBQJCQkDBgwAAmk+nm5jZ16lScVhWI8BYClABiPhwJJimAmM8ky4pJQQACEIBA5QJpaWk7d+5cunTpypUrY2JiqrqMgs1mX79+fe7cuSNGjBg/fvyGDRsSExPLy8sr7xSP4KjKBesh8E5ALBZnZmaOGjXKzc0tNDT0zp07FS5HKisri4qKCggIWLRoUaUXzAISAiYjwOPx8vPzMzIy3r59W+mlebKZUo+1efVuycnJUSmqEwqFp0+f/s9//mNhYeHn5xcbG6tMPijbNV5AgCYCQqFw//79Xl5eDAaja9euR44cwZfcaVJ6054mYj7Tri9mBwEIQAACHwjweLzCwsLc3Ny8vLzy8vJqfpnjcrkZGRlPnjx59uxZSUlJNVviSbsfEOMNBD4UkEgkbDY7LCzMw8ODyWR27tx53rx5Wz5cFi5c+OmnnzZt2jQyMlKlLOPDXeEdBCDwr8CLFy8mTJhgbm7u7u6+fv16nFn/0uAVBKRSiUSSl5d34MCB3r17Ozg4EEKcnZ0/++yz/fv3IxPHAWLsAoj5jL2CGD8EIAABCNS+AL60W/s1wAgMVUAkEqWkpAQGBlKPtSFVL82aNTtw4ADCCEOtJMZlZAIZGRnTpk2zsLBwdXVdtGgRn883sglguBDQpYBEInn79u2WLVuWLVu25P0SFhZ28ODBqr5Kr8vhoG8IaFMAMZ82NdEXBCAAAQjQUwAxHz3rjlkrIyAQCB4/fvz111+HhoYOq3aZOnXqjRs38PeVMqrYBgJSqVQoFGZkZLx+/bq0tFQRJDU1dcqUKRYWFriaTxEHayAAAQiYsABiPhMuLqYGAQhAAAJ6EkDMpydo7AYCEIAABGoeZKsAACAASURBVN4JiMXi3NzcHj16eHp6RkVFKV4GGx8fP2TIEDMzs3bt2j148ADfQ8SBAwEIQIAmAoj5aFJoTBMCEIAABHQogJhPh7joGgIQgAAEFARkMZ+Dg8PixYtzc3MrbHLo0KF27drZ2dmFhIRU8wipCq3wFgIQgAAEjF0AMZ+xVxDjhwAEIACB2hdAzFf7NcAIIAABCNBJQBbz2dvbu7m5LVmyJDs7WwZw8ODB//znPw0aNPjyyy+fP39e4dnWss3wAgIQgAAETE8AMZ/p1RQzggAEIAABvQoUFhZu3brVxcWFwWDY2tpu3LixqKhIryPAziAAAQhAgH4C5eXlP/30k6urKyHE09Nz2LBhP71fAgICxowZc/To0VevXlX/pHj6sWHGEIAABExcADGfiRcY04MABCAAAV0IcLncCxcu7NmzZ8eOHfPmzevYsaPs8aHdunVbunTpjh07du3a9eeff5aVleEyCl2UAH1CAAIQoLmAWCx++vTpyZMn91e2PHr0CN/VpfkRgulDAAL0FEDMR8+6Y9YQgAAEIKCRAJvN3rVr1/LlyxdUvSxevHjbtm3FxcWI+TSyRmMIQAACEIAABCAAAQhAQDkBxHzKOWErCEAAAhCAAAQgAAEIQAACEIAABCAAAQgYsABiPgMuDoYGAQhAAAIQgAAEIAABCEAAAhCAAAQgAAHlBBDzKeeErSAAAQhAAAIQgAAEIAABCEAAAhCAAAQgYMACiPkMuDgYGgQgAAEIQAACEIAABCAAAQhAAAIQgAAElBNAzKecE7aCAAQgAAEIQAACEIAABCAAAQhAAAIQgIABCyDmM+DiYGgQgAAEIAABCEAAAhCAAAQgAAEIQAACEFBOADGfck7YCgIQgAAEIAABCEAAAhCAAAQgAAEIQAACBiyAmM+Ai4OhQQACEIAABCAAAQhAAAIQgAAEIAABCEBAOQHEfMo5YSsIQAACEIAABCAAAQhAAAIQgAAEIAABCBiwAGI+Ay4OhgYBCEAAAhCAAAQgAAEIQAACEIAABCAAAeUEEPMp54StIAABCEAAAhCAAAQgAAEIQAACEIAABCBgwAKI+Qy4OBgaBCAAAQhAAAIQgAAEIEBjAR6Pl5WVlZubK5FIaMyAqUNAfQGcROrboaVxCiDmM866YdQQgAAEIKB7AYFAUFhYWFxcjD+udI+NPZimAE4i06wrZqVHgbdv365fv37nzp1CoVCPu8WuIGA6ApmZmTiJTKecmIkSAoj5lEDCJhCAAAQgQEuBvLy8qKio48eP448rWtYfk9aCAHUSnThxAieRFjTRBf0EBAJBTEzMRx991KdPn7y8PLFYTD8DzBgCGgkIBIKrV6/iJNIIEY2NTQAxn7FVDOOFAAQgAAG9CIhEon/++Sc4OPiLL77ABX16IcdOTE1AJBLdvXs3ODh48ODBLBYLV8WaWoExH90LZGZmzp07lxDi4OBw5MgRDoej+31iDxAwKYGMjIw5c+YQQurWrXv06FGcRCZVXUymCgHEfFXAYDUEIAABCNBbID8/f8WKFYSQOnXqXLx4kcfj0dsDs4eAygL5+fnLly+nEoq//voLJ5HKgmhAbwGJRHLp0qXAwEBCiJWVVd++fd++fUtvEsweAqoJSCSSv/76S/4kysrKUq0LbA0BIxRAzGeERcOQIQABCEBA9wJ///33xx9/TP1xFRoampOTo/t9Yg8QMCmBGzdu/Oc//yGEMJnM0NDQ3Nxck5oeJgMBHQvk5uZ+//33lpaWhBAGg8FkMo8dO4ZrkXSsju5NSiAnJ2fWrFk4iUyqqJiMEgKI+ZRAwiYQgAAEIEAzgaKioqVLl9apU4cQYmZm5uTkdP78+fLycpoxYLoQUF+gqKhoyZIl8ifRhQsXcBKpD4qW9BM4e/Zs586didwyYsSIFy9e0E8CM4aAmgJnzpzp1KmT3DlERowY8fLlSzW7QzMIGIkAYj69FkokEmW/WwQCgV53jJ1BAAIQgIAqAjExMT169JD/vXDSpEmpqamq9IFtIUBrgatXr1Y4iSZPnpyWlkZrFEweAkoLFBUVzZo1q169evI/iVxdXQ8fPowL+pRWxIa0FigqKpo5cyZOIlofBHSdPGI+vVaew+FsfLcUFhbqdcfYGQQgAAEIKC1QVla2aNGihg0byv9x5enpeebMGdxcTGlFbEhrgbKysoULF1Y4iZo3b46TiNaHBSavisDZs2c/+ugj+R9D1Otx48YlJSWp0hO2hQBNBao6icaPH5+cnExTFEybHgKI+fRXZ5FIlJqa2vHd8uDBA6FQqL99Y08QgAAEIKC0QFxcHHVXvgp/X02dOhVf9FBaERvSWiAuLo66K1+Fk2jatGmvXr2iNQ0mDwElBDgcztSpU+vXr1/hDCKEeHl57d+/H//mpIQiNqG1AIfD+eabb6o6iaKiovh8Pq2BMHmTFkDMp7/ylpSU7Nixg/pp/dtvv+Xn5+tv39gTBCAAAQgoJyAQCH7++Wc3NzfFP67atGlz4sQJkUikXE/YCgI0FRAIBHPnznV1da30JDp16hROIpoeGZi2cgIikSg2NtbHx0fxDKLWjB49OiUlRbnOsBUE6CggEomuXbvm7e1d1Uk0ZsyY58+f05EGc6aHAGI+/dX5+fPnspvUdOnS5e7duxKJRH+7x54gAAEIQKAmAbFY/OjRIz8/v6p+L5w4cWJ6enpN3eBzCNBXgDqJfH19qzqJJk2ahJOIvscHZq6EAJfLHTdunJOTU1Unkbe39+7du/F3hBKW2ISmAuXl5WPGjHF0dKzqJPLx8dmzZw9OIpoeHzSYNmI+PRW5tLR0+/bt1tbW1H9rmEzmihUrcIc+PeljNxCAAASUE+Dz+d9//32FG4rJ/47o5+d3+PBh5TrDVhCgowB1EjVo0ED+xJF/7e/vj5OIjkcG5qycgEAguHPnjqurq5mZmfyJI//a0tJy6NChiMuVE8VWtBMQCAS3b992cXGp/iQaNmwYTiLaHRy0mTBiPj2VOiEh4YsvvpD/Cd2jR4+4uDg97R67gQAEIACBmgSEQmFiYmKrVq0sLS3l/3Mt/9rW1nb8+PE5OTk1dYbPIUBHAYFA8OTJk5YtW+IkomP5MWdtCBQUFEydOtXGxkb+R4/iax8fn+3bt2tjh+gDAqYmUFBQ8O2338our1E8fag1rVu3xklkarXHfN4LIOZ7L6HL/2ez2Vu3bnVxcZH/r4yzs/PSpUtZLJYu94y+IQABCEBAWQEWizVv3jw7Ozv5/1Yrvvbz8ztw4ICynWI7CNBJoLi4WMmT6ODBg3SCwVwhoJSARCJJT0//7rvvBg0aNPD9EhgYSAixsrL67LPP3q8b+NVXX/33v//FA/2UYsVGdBKQSCRv3ryp9CRiMpkhISHyJ1FERAROIjodHTSaK2I+fRT7wYMHQ4cOVfxbsWfPnrGxsfoYAfYBAQhAAAI1CWRmZk6cOPGTTz7p9n6hbt5sYWHRsWPH9+u69evXb82aNXiGQE2c+JyOAhkZGZWeRJaWlp06dZI/idauXYuTiI6HCOZcrYBEIikpKfnnw+X3338nhNSrVy8mJkb+k6dPn+IkqpYTH9JRoNKTaO3atYQQJyena9eu4SSi42FBvzkj5tN5zXk8XmRkZNOmTRVjPjc3t4ULF3K5XJ0PAjuAAAQgAIFqBSQSSWFh4fkPlwULFhBC7O3tjxw5Iv/JjRs38M+/1XLiQzoKSCSSgoIC+TPl/Pnz1ElUp06do0ePyn+Ek4iOhwjmrJbA9evXCSFubm4cDketDtAIAnQXiIuLI4Q0bty4vLyc7haYPz0EEPPpvM5PnjwZMGCAYsZHrencufPdu3d1PgjsAAIQgAAEVBe4ceMGIaRhw4ZlZWWqt0YLCEBASiUUrq6ubDYbHBCAgBoCiPnUQEMTCMgLIOaT18BrOggg5tNtlSUSSXh4uKenZ1Uxn6ur65w5c/Awb2XKIJFIxO8WkdwCOmXosA0EIKCeAGI+9dzQCgIyAcR8Mgq8gIB6Aoj51HNDKwjIBBDzySjwgiYCiPl0W+jXr1/37NnTwsKiqpjP3Ny8bdu2z549w801qq+ESCR68ODB7t27169f//PPP49/t8ycOfP58+f46lz1dPgUAhBQWwAxn9p0aAgBSgAxH44ECGgogJhPQ0A0hwBiPhwDdBNAzKfDiovF4rVr11Z6Vz751M/Z2Xn27Nm43Ub1lRAKhRcvXvzxxx87duzo6urq6Ojo7Ozs5+eXnZ0tFourb4tPIQABCKgngJhPPTe0goBMADGfjAIvIKCeAGI+9dzQCgIyAcR8Mgq8oIkAYj4dFprP5y9dujQ0NLT/++XTTz+V3ZLv/br+gwcPnjVrVkFBAb5/Wk0xxGJxbm5uYmLijBkzXFxcCCF2dna9e/fGA0yqQcNHEICAhgKI+TQERHMIIObDMQABDQUQ82kIiOYQQMyHY4BuAoj5dFhxoVAYHx9/R245efIkFfPt2LFDbvWdu3fvcjgcxHzKFCMqKsrX15cQQt3WEN/YVQYN20AAAuoJIOZTzw2tICATQMwno8ALCKgngJhPPTe0goBMADGfjAIvaCKAmE+vhc7OzqZivsTERL3u2IR2dujQoYCAAEJIy5Yt9+/fj3samlBtMRUIGJwAYj6DKwkGZGwCiPmMrWIYr8EJIOYzuJJgQMYmgJjP2CqG8WoqgJhPU0GV2iPmU4mr0o03bNjg4eHBYDA6dOjw6NEjXAJZqRJWQgACWhFAzKcVRnRCZwHEfHSuPuauFQHEfFphRCd0FkDMR+fq03PuiPn0WnfEfJpz//zzz9bW1paWlr179+bz+Zp3iB4gAAEIVCWAmK8qGayHgJICiPmUhMJmEKhKADFfVTJYDwElBRDzKQmFzUxGADGfXkuJmE9D7oKCgoEDB5qZmbm6us6bN0/D3tAcAhCAQPUCiPmq98GnEKhRADFfjUTYAALVCyDmq94Hn0KgRgHEfDUSYQMTE0DMp9eCIubTkPvmzZtdu3YlhLRr127Pnj0a9obmEIAABKoXQMxXvQ8+hUCNAoj5aiTCBhCoXgAxX/U++BQCNQog5quRCBuYmABiPr0WFDGfhtxRUVHt2rUjhAQHB9+4cUO+Nz6fX1ZWJhAI5FfiNQQgAAFNBBDzaaKHthCQSqWI+XAYQEBDAcR8GgKiOQQQ8+EYoJsAYj69Vhwxn4bcS5YscXd3Nzc3HzFixNu3b6VSKY/HS0lJuXLlytatW5cvX75p06a4uLiCggI8gVdDajSHAASkUiliPhwGENBQADGfhoBoDgHEfDgGIKChAGI+DQHR3OgEEPPptWSI+TThFovFw4cPt7e3d3Bw+OWXX3g8XnFx8cmTJydMmODh4WFubk4Isba2bt26dUREBIvF0mRfaAsBCEAAMR+OAQhoLoCYT3ND9EBzAcR8ND8AMH3NBRDzaW6IHoxLADGfXuuFmE8T7pKSEuobuz4+Pnv27CksLNy5c2eLFi3c3d09PT3d3d3r1KlDCDE3N69fv/6tW7eEQqEmu0NbCEAAAriaD8cABDQUQMynISCaQwAxH44BCGgogJhPQ0A0NzoBxHx6LRliPrW5hULh9evXvby8CCG9evX6448/tm7d6uDg0L1793379mVkZMTGxn755Zfk/bJp06bi4mK1d4eGEIAABHA1H44BCGgugJhPc0P0QHMBxHw0PwAwfc0FEPNpbogejEsAMZ9e64WYT21uHo8XHh7u5uZGCBk6dOj3338fEBAwaNCgZ8+ecTgckUjE5/PPnj1rb29PBX1hYWH5+flq7w4NIQABCCDmwzEAAc0FEPNpbogeaC6AmI/mBwCmr7kAYj7NDdGDcQkg5tNrvRDzqc1dXl7+zTffODk5EUKaNWvWqVOnESNG3L9/X/5RG7dv36Yu9yOELFu2LC8vT+3doSEEIAABxHw4BiCguQBiPs0N0QPNBRDz0fwAwPQ1F0DMp7khejAuAcR8eq0XYj61ucvKyjp16mRjY0MIYTKZvXr1unjxonzGJ5VKb9686eHhQV3Nt23btpKSErV3h4YQgAAEEPPhGICA5gKI+TQ3RA80F0DMR/MDANPXXAAxn+aG6MG4BBDz6bVeiPnU4xaJRG/evHF2djYzMyOENG/efN26dRwOR743sVh86tQpe3t7BoPBZDJv3bolEAjkN8BrCEAAAqoK4BEcqophewhUEEDMVwEEbyGgqgBiPlXFsD0EKggg5qsAgrcmL4CYT68lRsynHnd5efnZs2etra0JIRYWFqNHj05MTKzQVWlp6erVq6kNmjRpkpWVVWEDvIUABCCgqgBiPlXFsD0EKggg5qsAgrcQUFUAMZ+qYtgeAhUEEPNVAMFbkxdAzKfXEiPmU4+7uLh41apVVlZWhBBnZ+dNmzZV+LquVCp98eLFN998Qwixtrbu06cPnr+hHjVaQQAC8gKI+eQ18BoCaggg5lMDDU0gIC+AmE9eA68hoIYAYj410NDEqAUQ8+m1fIj51OPOyckZO3aspaUlIaRHjx537txR7OfWrVt9+vQhhNSpU2fevHm4MZ8iEdZAAAKqCiDmU1UM20OgggBivgogeAsBVQUQ86kqhu0hUEEAMV8FELw1eQHEfHotMWI+9bjfvHkTFBRkbm5OCPnpp5/S09MV+zl9+nTLli2py/1OnjzJ5XIVt5GtKSws3Llz56effvrjjz9W2ptsS7yAAAToLICYj87Vx9y1IoCYTyuM6ITOAoj56Fx9zF0rAoj5tMKIToxIADGfXouFmE8NbpFIdOPGDQcHBwaDQQjZsmULm82u0I9AIAgLC7O1tTUzM/Pw8Hj16pXit3rlm6SkpAwdOtTOzm7ixIm5ubnyH+E1BCAAAZkAYj4ZBV5AQD0BxHzquaEVBGQCiPlkFHgBAfUEEPOp54ZWxiuAmE+vtUPMpwZ3aWnpzp07ybvFxcXl2rVrYrG4Qj8ZGRkTJkwghNja2vbo0aO8vLzCBvJvi4qKdu3a5e7uXq9eva1btyqGhvIb4zUEIEBnAcR8dK4+5q4VAcR8WmFEJ3QWQMxH5+pj7loRQMynFUZ0YkQCiPn0WizEfGpwZ2Vl/frrr1TM17lz52fPnil2cuvWrZCQEEKIi4vLDz/8IBQKFbcRiUQPHz68fPlyeHh49+7dCSH29vazZs06duzYhXfLtWvXnj17JpFIFNtiDQQgQE8BxHz0rDtmrUUBxHxaxERX9BRAzEfPumPWWhRAzKdFTHRlFAKI+fRaJsR8anA/e/YsNDSUivlGjRqVlpam2MmJEyeCgoIIIV5eXn/88YfsG7uSdwu1PZvN/uKLLxwcHKiuFP+3efPmS5cuVbxUUHF3WAMBCNBEADEfTQqNaepOADGf7mzRM00EEPPRpNCYpu4EEPPpzhY9G6YAYj691gUxnxrc9+7dCwwMpFK5ZcuW5efnK3ayadOmZs2aEULatWt39epVKqoTCAQFBQUsFksqlUokkuLi4i+//LJFixaOjo4WFhbm5ub29vZNmjTx8fFp+2754osvTp06pdg51kAAArQVQMxH29Jj4toSQMynLUn0Q1sBxHy0LT0mri0BxHzakkQ/xiKAmE+vlULMpwZ3TEyM7BK8I0eOcDgcxU6WLVvm6OhICPH3979z5w4V812+fHnUqFFLly4VCARUE5FIxGazf//996ZNmzo7O48ePbqwsFD4fhGJRLiUT9EWayBAZwHEfHSuPuauFQHEfFphRCd0FkDMR+fqY+5aEUDMpxVGdGJEAoj59FosxHyqcrNYrLVr15qbmzMYDCaTefv2bdkXcuW7WrduXaNGjQghzZs3j4yMzMjI2LZt28cff9y3b98LFy7I326PzWbPnz/f3t7ey8tr165dld7FT75nvIYABOgsgJiPztXH3LUigJhPK4zohM4CiPnoXH3MXSsCiPm0wohOjEgAMZ9ei4WYT1Xu169ff/fdd4QQKysrHx+f9PT0Sns4ceLERx99RD1pt127dn369PH29g4JCTl16hT1pV1Zq+fPn48ZM8bc3NzX1/f69eu4fE8mgxcQgICiAGI+RROsgYBKAoj5VOLCxhBQFEDMp2iCNRBQSQAxn0pc2NgEBBDz6bWIiPlU5U5MTJw2bZqXl1fbtm0nT55cXFxcaQ8vXrz47rvvnJ2dLSwsHB0d/f39R44ceeHCBcXtr1+/3qdPHyaT2aNHj8zMzEp7w0oIQAAClABiPhwJENBQADGfhoBoDgHEfDgGIKChAGI+DQHR3OgEEPPptWSI+VTlzs7O/uuvv/bt23fw4MHbt2/zeLxKexAIBHfu3ImMjFy5cuWGDRtOnDiRmJjI5XIrbCwWi3fv3u3v71+/fv0JEybI7tlXYTO8hQAEIEAJIObDkQABDQUQ82kIiOYQQMyHYwACGgog5tMQEM2NTgAxn15LhphPr9wKO+NwONOnT69fv36rVq3WrFmDb+wqCGEFBCDwgQBivg848AYCqgsg5lPdDC0g8IEAYr4POPAGAqoLIOZT3QwtjFsAMZ9e64eYT6/cCjsrLCwMDQ21srLq3r17XFycwudYAQEIQOADAcR8H3DgDQRUF0DMp7oZWkDgAwHEfB9w4A0EVBdAzKe6GVoYtwBiPr3WDzGfXrkVdpaSkvLZZ59ZWFh8/vnnSUlJCp9jBQQgAIEPBBDzfcCBNxBQXQAxn+pmaAGBDwQQ833AgTcQUF0AMZ/qZmhh3AKI+fRaP8R8euVW2Nnx48fbt29vbW09evTorKwshc+xAgIQgMAHAoj5PuDAGwioLoCYT3UztIDABwKI+T7gwBsIqC6AmE91M7QwbgHEfHqtH2I+vXIr7Gzt2rWenp4ODg7Tp09ns9nU50Kh8OLFi3///TeHw1FogRUQgACtBRDz0br8mLw2BBDzaUMRfdBaADEfrcuPyWtDADGfNhTRhzEJIObTa7UQ8+mVW2Fnixcvbty4sbW19dixY3NycqRSKY/H27JlS+/evVetWlVUVKTQAisgAAFaCyDmo3X5MXltCCDm04Yi+qC1AGI+Wpcfk9eGAGI+bSiiD2MSQMyn12oh5tMrt8LOtmzZ4u3tTQjx9/dfu3btX3/9FRkZ2a5du/79+58/f768vFyhBVZAAAK0FkDMR+vyY/LaEEDMpw1F9EFrAcR8tC4/Jq8NAcR82lBEH8YkgJhPr9VCzKdXboWdPXny5Kuvvqpbty55t5ibmzdq1GjkyJF3794Vi8UKm2MFBCBAdwHEfHQ/AjB/jQUQ82lMiA7oLoCYj+5HAOavsQBiPo0J0YGRCSDm02vBEPPplbuyncXGxg4fPtzz3dK2bdslS5awWKzKNsQ6CEAAAlLEfDgIIKChAGI+DQHRHAKI+XAMQEBDAcR8GgKiudEJIObTa8kQ8+mVu7KdiUQiPp/Pe78IhUKJRFLZhlgHAQhAADEfjgEIaCqAmE9TQbSnvQBiPtofAgDQVAAxn6aCaG9sAoj59FoxxHx65cbOIAABCGgmgKv5NPNDawhIEfPhIICAhgKI+TQERHMIIObDMUA3AcR8eq04Yj5NuIVCYXp6+qv3S3p6OpfLlUql+fn5qamp71e/Sk1NFQgEmuwIbSEAAQhQAoj5cCRAQEMBxHwaAqI5BBDz4RiAgIYCiPk0BERzoxNAzKfXkiHmq4abz+cXFRW9efMmISEhPj7+2rVrFy9eTE5O5vP5xcXFhw8fnj9//sSJE8e+XxYsWPDq1SuJRHLgwIHp06e/Xz12woQJv/zyS1JSEp/Pz8rKiouLu3z5cnx8fGJiYkZGRklJiVAorGYY+AgCEICATAAxn4wCLyCgngBiPvXc0AoCMgHEfDIKvICAegKI+dRzQyvjFUDMp9faIeajuEUiEZvNzs3Nff36dVJSUklJiVgsTklJ2bhx4w8//DB8+PAhQ4Z06dLF19c3LCwsLy8vNTXVz8/PwcGhYcOGru+XXr16xcfHSySSRYsWtW3b9v1q14YNG1paWh48eJDNZl+6dKlnz55BQUFDhgwZNWrU7Nmz165de//+fR6PJxAIsrOzCwsLcemfXs8B7AwCRiWAmM+oyoXBGqIAYj5DrArGZFQCiPmMqlwYrCEKIOYzxKpgTLoUQMynS12Fvmkb84nfLVKptLi4OCMj459//jlw4MCKFSvGjx/ft2/fK1eulJeXnzt3LigoyMnJqcm7xcfHp23btkuXLs3KysrNzf3111+nTJny888/z3+/REZGpqenS6XS8+fPh4WFvV89/+effx4+fPiDBw8EAkF0dPQnn3zi4+Pj4eHRuHFjR0dHOzu7OXPm5OfnFxYWLlu2LDw8PDo6OiUlJTs7u7i4mMvl4okcCoctVkCAvgKI+ehbe8xcSwKI+bQEiW7oK4CYj761x8y1JICYT0uQ6MZoBBDz6bVUtIr5xGKxUCjk8XglJSVZWVmFhYVSqXTdunUBAQF2dnbm5uZMJtPOzs7BwWHLli3FxcWPHz+eP3/+pEmT1r1brly5cuvWrVevXvF4PLWLxGaznz17dunSpY0bN65atWrs2LF9+vTZuXMnl8t98eKFi4uLlZUVg8Hw8PDo37//okWLoqOjqd2JRCKxWKz2ftEQAhAwDQHEfKZRR8yiFgUQ89UiPnZtGgKI+UyjjphFLQog5qtFfOy6VgQQ8+mVnVYx3+vXrw8dOjR9+vSPPvqoc+fO586dk0gkGzZsaNGiha2tbfPmzfv3779gwYL9+/enpKTw+XyRSMR/twjeLaJ3i4ZZm0QiEYvFIpGI6pPH43G5XCp8jI+PnzFjRufOnanM0crKqk6dOiEhIVTMd+vWrRcvXuD7vHo9PbAzCBieAGI+w6sJRmRkAoj5jKxgGK7hCSDmM7yaYERGJoCYz8gKhuFqLICYT2NCVTqgQ8xXVlb2559/Tpo0qWPHjs2bN69fv761tXXr1q2PHTsmFoufP39+6dKlBw8eZGRk5OTkFBcXFaR3HwAAIABJREFUs9lsgUCg56/KSiQSHo9XWFiYlZWVlpZ29uzZhQsXTp48OTIyUigUPn/+vF+/fn5+fuPGjdu2bdvdu3c5HI4qdca2EICAiQgg5jORQmIatSeAmK/27LFnExFAzGcihcQ0ak8AMV/t2WPPtSOAmE+v7qYX80kkkrKysocPH548efLo0aPJycklJSVbtmyp924JCAgYMWLE4sWLz5w5k5eXJ5VKqSvpRCKRXt1r2hmHw8nOzk5LS8vJyRGJRPfu3QsODrazs6tXr56Pj09wcPDkyZOvXbvG5XJr6gmfQwACJiWAmM+kyonJ1IYAYr7aUMc+TUoAMZ9JlROTqQ0BxHy1oY591qYAYj696ptMzCcUCrOzs+/evXv8+PG5c+cOHTq0Q4cOnTp1OnDgAI/Hu3379rRp03bv3n358uXHjx/n5uYKhUK9QmuwM7FYnJ+ff/Xq1VWrVo0aNapjx47169d3dHRctWpVSUlJWVlZdnY2i8XSYA9oCgEIGI0AYj6jKRUGaqgCiPkMtTIYl9EIIOYzmlJhoIYqgJjPUCuDcelKADGfrmQr7dfYYz6hUFhUVCQQCNhs9smTJz///PMmTZowmUx3d/f27dsPGjTo/PnzEomEz+dnZWUZ2iV7lVakmpUcDufZs2enT59eunTp4MGDjx07Vl5efvPmzbCwsB07djx+/DgvL0/P3zWuZrT4CAIQ0IUAYj5dqKJPWgkg5qNVuTFZXQgg5tOFKvqklQBiPlqVG5OVSqWI+fR6GBhpzCcSidhsdnZ2dlxcXGRkZHp6OofD2bt3r7e3t7+/f2ho6NKlS48fP56cnFxeXq5XUL3sjM/nP3/+vLS0VCwWR0ZGtmzZ0tHRsWPHjqtWrUpNTS0pKTH2QFMvitgJBIxSADGfUZYNgzYkAcR8hlQNjMUoBRDzGWXZMGhDEkDMZ0jVwFj0IYCYTx/Ksn0YXcwnFosFAsHbt2/37ds3aNAgW1tbS0vLrVu3lpaWZmRkHD9+PCkpSTY7k38hEAguXLgwceLEVq1a2djYWFlZeXh4/P777/n5+SY/d0wQAvQUQMxHz7pj1loUQMynRUx0RU8BxHz0rDtmrUUBxHxaxERXRiGAmE+vZTK6mC8lJSUsLKxr165169ZlMpkMBsPX1/ePP/4oKioSi8VCoVAsFutVsLZ3JhQKuVzuy5cvly1b1qZNGysrq4CAgMePH9f2uLB/CEBAJwKI+XTCik7pJICYj07Vxlx1IoCYTyes6JROAoj56FRtzPV/Aoj59HocGEvMJxaL8/LyxGLxuXPnevTo4ejo6OPj8+233x4/fvzVq1f4mqpQKCwuLn716tXy5csjIyNzcnLS09P37dsXERGRkpJCt+hTr6cQdgYB/Qog5tOvN/ZmggKI+UywqJiSfgUQ8+nXG3szQQHEfCZYVEypWgHEfNXyaPtDw4/5iouLr1y5smbNmh9//LG0tDQ5OXnlypXz5s2Ljo5OS0srLy/HQydkB4VYLM7JycnPzxcIBFeuXPn444+9vLwGDx58+PDhzMxMI3q4sGxGeAEBCFQQQMxXAQRvIaCqAGI+VcWwPQQqCCDmqwCCtxBQVQAxn6pi2N7YBRDz6bWChhzzFRQU3L59Ozw8PDg4uEGDBq1bt379+nV5eXl6enpmZiauUKv+QElISJgxY4a/v7+1tbW/v//3338fFxdXUlJSfSt8CgEIGLgAYj4DLxCGZ/gCiPkMv0YYoYELIOYz8AJheIYvgJjP8GuEEWpXADGfdj1r6M1gY760tLStW7f27NmzQYMGLi4uwcHBK1asePPmDZ4hW0NF338sFApTU1P37t37+eefu7q61qlTZ+jQofHx8e8/x/9DAAJGKYCYzyjLhkEbkgBiPkOqBsZilAKI+YyybBi0IQkg5jOkamAs+hBAzKcPZdk+DCrmk0gkIpGI+m7pihUrPD09nZycAgICpk2bduvWLdmY8UJ5AaFQ+Pz587lz57Zv37579+4xMTFCobD03YIvOyvPiC0hYDgCiPkMpxYYiZEKIOYz0sJh2IYjgJjPcGqBkRipAGI+Iy0chq22AGI+tenUaWg4MZ9YLC4tLU1JSXn27JlUKj127FifPn0mT54cGxuLm8qpU9oP21y7di0mJqagoODNmzerVq0KDw/ncDj44vOHSHgHASMQQMxnBEXCEA1bADGfYdcHozMCAcR8RlAkDNGwBRDzGXZ9MDrtCyDm075pNT0aTsz3/Pnz2bNn+/r6fvXVV1wut7i4OCsrq6SkBBlfNeVT/iPhu0UgEBw5cqR+/frOzs6hoaH3798XCATKd4ItIQCBWhdAzFfrJcAAjF0AMZ+xVxDjr3UBxHy1XgIMwNgFEPMZewUxflUFEPOpKqbR9oYQ85WWlkZFRQ0ZMsTV1bVhw4ajRo1isVgikQjfKtWotFU0Tk9PX7hwobOzc7169bp3737o0KHi4uIqtsVqCEDA4AQQ8xlcSTAgYxNAzGdsFcN4DU4AMZ/BlQQDMjYBxHzGVjGMV1MBxHyaCqrUvnZjPpFIlJaW9ssvv7Rv397Z2TkgICAsLCwxMRGXmKlURJU2FgqFb968iYiICAgIqFevXlBQUEREREZGhkqdYGMIQKC2BBDz1ZY89msyAoj5TKaUmEhtCSDmqy157NdkBBDzmUwpMRElBRDzKQmlnc1qN+bj8XhHjx719PR0d3cfMmRIVFRUVlaWdiaGXqoWEIvFLBYrOjp66NChDRs2HDt2bEpKStWb4xMIQMCABBDzGVAxMBTjFEDMZ5x1w6gNSAAxnwEVA0MxTgHEfMZZN4xafQHEfOrbqdGyVmI+KmbKzc0tLy+PjY0dNGjQ4sWL7927x+fz1ZgCmqgtcPPmzTlz5uzfv7+oqKi4uPjZs2ccDgfflVbbEw0hoAcBxHx6QMYuTFsAMZ9p1xez04MAYj49IGMXpi2AmM+064vZKQog5lM00eEa/cd8YrH47du3f/zxx7JlyzIzM6VS6aNHj8rKynQ4SXRdtQCHw+HxeMXFxXv27Pnkk0/OnTtXUlKCJ/BWDYZPIFDLAoj5arkA2L3xCyDmM/4aYga1LICYr5YLgN0bvwBiPuOvIWagmgBiPtW8NNxa/zFfdnb27NmzXVxcPD09V69ereH40VwrAo8fP+7SpYulpaWHh8eBAwdKSkpwTZ9WYNEJBLQugJhP66TokG4CiPnoVnHMV+sCiPm0TooO6SaAmI9uFcd8EfPp9RjQc8yXm5s7derURo0aOTs7Dxs27OnTp3qdLXZWhQCXy7148aK/vz+TyWzatGlEREReXl4V22I1BCBQmwKI+WpTH/s2CQHEfCZRRkyiNgUQ89WmPvZtEgKI+UyijJiECgKI+VTA0nxTvcV8YrE4Ly9v6tSpHh4ebm5uU6ZMefz4MZ6oq3kFtdKDRCLhcDhXr17t06ePo6Njq1atli9fnpaWppXO0QkEIKBFAcR8WsREV/QUQMxHz7pj1loUQMynRUx0RU8BxHz0rDudZ42YT6/V11vMx+VyIyMjPT09W7Ro8f3338fHx4tEIr1OFTurSUAgEDx+/Pjbb791d3f/8ssvExMTa2qBzyEAAX0LIObTtzj2Z3ICiPlMrqSYkL4FEPPpWxz7MzkBxHwmV1JMqAYBxHw1AGn3Y73FfDwe79ChQ/369Vu0aFFiYiIyPu3WUYu9JSYmrlq16sCBA7m5uTweLzc3F0/k0CIvuoKAhgKI+TQERHMIIObDMQABDQUQ82kIiOYQQMyHY4BuAoj59FpxXcd8EomksLDwxYsXeXl5QqHw2rVr2dnZiI30WmPVd8ZisUpLSwsLC8+ePbtq1arc3FzEsqorogUEdCKAmE8nrOiUTgKI+ehUbcxVJwKI+XTCik7pJICYj07Vxlz/J4CYT6/Hga5jPhaLFRkZ2bt378jISKR7ei2tZjsTi8VxcXHBwcF2dnZr1qxB0qcZJ1pDQGsCiPm0RomO6CqAmI+ulce8tSaAmE9rlOiIrgKI+ehaefrOGzGfXmuv05hPIpFcuHChY8eONjY23bp1Y7FYEolEr9PDzjQQSE9P//bbby0sLGxsbMLDwwsKCjToDE0hAAHtCCDm044jeqGxAGI+GhcfU9eOAGI+7Thq3ItEIhGLxSKRSKVrKWRN8HeZxhVQvwPEfOrbaaMldRaodO7In244d9QoAmI+NdDUb6LTmC8pKemrr76ys7Pr0qVLbGwszgf161QbLUUiUUZGxtSpU21tbV1dXdesWZOTk1MbA8E+IQCBfwUQ8/1rgVcQUEsAMZ9abGgEgX8FEPP9a1Grr+7evfvLL7988803K1euLCgoUCbsY7PZCxcunDx58uzZs69fv47b8tRWARHz1Za8VCotLy//448/pk2bNmHChB07dmRmZiozmPj4+MWLF0+YMOGHH35ISEgQCoXKtMI2MgHEfDIKfbzQXcwnFAqXLVvWvHlzf3//nTt3crlcfcwH+9CqgFAoTE9PHzVqlLOzc+vWrbdu3cpms7W6B3QGAQioJoCYTzUvnW1dVlZ2/vz56dOnT5kyZfv27eXl5TXuSiQSvXz5cvny5RMnTly1alVSUlKNTbCBLgQQ8+lCVaU+V6xYMX78+IkTJ+7bt4/D4dTYViAQ/PXXX5MmTRo3btw333zDYrFqbIINdCqAmE+nvMp3vm3bNjc3N2dn56CgoHv37imTOxQWFjZu3NjZ2dnV1TU8PLysrEz53WFLLQog5tMipqpdsVisoUOHNm7c2MnJadSoUfHx8cr0cPr06Y4dOzo5Ofn4+Bw7dozH4ynTCtvIBBDzySj08UJ3Md/du3eDg4NdXV0XLlyYl5enj8lgHzoQEIlEycnJI0eO7Ny58/r16/HbgA6M0SUEVBBAzKcCli43zc7Onjdvnq2tLXVXiqKiohr3xufzz5w54+3tbWlp2b59+zNnztTYBBvoQgAxny5UVeqzS5cu5ubmlpaWU6ZMUebcYbPZq1atsrS0NDMzYzKZubm5Ku0OG2tdADGf1knV63Dt2rXk3dKkSZPY2FiBQFBjP/n5+QwGg2q1ePFihOY1iuloA8R8OoJVptuioqJPP/3UxsaGENKvX7/bt28r0+rIkSMtW7YkhLi4uOzZswfXMCmDJr8NYj55DZ2/1l3Md+3atZCQkH79+l25ckXn08AOdCzw119/nTx5Mjk5mcPh4II+HWOjewhUJ4CYrzodPX6Wnp4+c+ZMQoilpWWzZs2UuXspj8fbu3evm5sbIaRFixaHDh3S43ixq38FEPP9a1FLr4KCgqiUYfz48cqcOywWa+nSpVQTQghivlqq27+7Rcz3r0WtvkLMV6v8Gu0cMZ9GfJo1RsynmZ+arRHzqQmnXjPdxXwFBQVHjx49f/48fhtTrzQG1UooFEokkrKysvv37x89erSoqEiZ238Y1BQwGAiYhgBiPgOpI2I+AymEGsNAzKcGmnabIObTrqf+e0PMp3/zSveImK9SFqNYiZivFsuEmK9W8BHz6ZVdFzGfWCxmsVglJSV8Ph+P3dBrOXW8s/j4+C+++MLDw2PDhg3l5eUoro690T0EKhFAzFcJSm2sQsxXG+ra2SdiPu04atCL/mM+kUjE4XDy8/NzsGhD4NixY4SQBg0avH79Whv9oY+c3Nzc/Px8Doej0jMx9Bnz4STS7mF69OhR6uufOIm0BZubm1tQUMDhcGq8GAUxnwY/wNVviphPfTs1Wuoi5mOxWIMHDx47dmxcXBxuTqlGUQy2SWpq6syZM83NzRs2bHjr1i1l7gBisHPBwPQpQD2BXoxFGwJUQtGgQYPS0lJt9Ic+/l9A1X+3qJWYD6eSVo5X6hoKFxeXsrIyrXSITsRiseTdouRPFv3HfHl5eeHh4U2aNJHdlUz2FWC8gIAhCFhZWTVv3jw8PFyZu1XKTjR9xnx5eXnr1q1zd3fHSWQIBwzGoChgZWXl5eX13//+t7i4WHaOVPoCMV+lLLpeiZhP18If9K/1mE8gENy5c8fe3t7Z2fnPP/9EEvQBt5G/EYlECQkJAwcOtLKy6tat25MnT1T9w9jIATB8dQSKiooOHjwYEhISGBjYHovGAp6enoQQc3NzX19fjTtDB/8T+Oijj/r37x8VFaXSncj1HPNxOJzo6OiBAwd27NgRZdNQACeRhoCKzWUnUUlJiTI/J/Qf812/ft3Ly8vCwgIJheLfxlhjCAIMBsPKyqpRo0bPnj1T5iSittFnzIeTyBCOE4yhGgHqJGrcuHFKSkr1JxFivup9dPQpYj4dwVberdZjvqKiojVr1jAYjC5dujx9+hQxUOXuRruWzWafPHmyefPmDg4OCxcuzM7ONtqpYOB6Erh3717Hjh1tbGzwx1U1v5rgo1oUMDMzs7W19fX1ff78ufJnhZ5jvrS0tE8++cTOzs7MzKwWrbBrCFQqYGZmZmdn5+vr++LFC2VOIv3HfNHR0VZWVpUOHishYCACDAbDzMzsyZMnypxE1Db6jPkuXLiAk8hADhUMoyoBBoNhbm6emJhY/UmEmK96Hx19iphPR7CVd6v1mC8rK2vq1KkMBmP69OnIgCpHN/K1eXl5y5cvt7e3DwoKiouLq/H2B0Y+XQxfU4HLly9bW1tX9fMY6yFgCALU74WPHz9W/nDXc8yXlJRka2uLrNwQjhaMoVIB6iRSMqHQf8x3/vx52bDnzZu3AQsEDElgypQpsuNTyZOI+mmlz5jv3LlzskHiJDKkwwdj+Z/A5MmTZccnYj7lf5vV55aI+fSpLdV6zJeenh4aGspgMJT5Yrxep4qdaUlAJBKlp6ePHj16xowZjx8/RsynJVeT7SY6Olr2c3fkyJEzsEDAkAQGDBggOz4TEhKUPw/1HPMlJibKxvn5558bEiHGQneB/v37yw5OJbPy2o35nj59qvyZji0hoAeBM2fOyE4io4j5kpKS9MCCXUBAeYE///xTdhIh5lPeTZ9bKhXzlZaWvnz58vHjx4+waCZw8eJF6pQ4cOCAZj39f+uTJ08GBgYyGIwffvghJiZGK30aXSdJSUlZWVkm/PgRsVicnJxcWFhYUlJSWFhYVlamz/9GYF/GJSAf86n0y6txTROjNVIB+csTjCXmO3XqlJFqY9gmKXD27FnZH1eI+UyyxJiUrgUQ8+laGP2bvABiPsMvsVIx3969e7t06WJnZyf7xQIvIGA4Aq6urt98801ycrLhn2+ajFAoFF6/fn3t2rWXLl3CNX3KS4qEQolYrPz2xr4lYj5jr6Bpjx8xn2nXF7PTgwBiPj0gYxemLYCYz7Tri9npQQAxnx6QNdyFUjGfm5sbk8nETWoMJ9jCSOQFGAwGk8k8duyYhieDgTdPSUnp16+fpaXliBEjioqKDHy0BjS8hMeC8nIDGo+Oh4KYT8fA6F4jAcR8GvGhMQSkUsR8OAogoKEAYj4NAdEcAoj5DP8YUCrms7CwQMYnnyvhtaEJMBiMw4cPG/75pskIhULh1q1bW7Ro4e/vf/DgQU26olfbnXulGzaXpaXRZNaI+WhSaCOdJmI+Iy0chm04Aoj5DKcWGImRCiDmM9LCYdiGI4CYz3BqUdVIlIr5ZJnOV199dQ4LBAxGYM+ePUwmkzo+Dx06VNVRbjLrnzx5MmLECAcHh2HDhpWWlprMvHQ3EYlEIt24RerXW7phM58el0Ai5tPd4YSeNRdAzKe5IXqguQBiPpofAJi+5gKI+TQ3RA80F0DMZ/gHgGox3+LFiw1/ShghfQQyMzNtbW3pE/OVl5fv3LmzWbNmrVq1OnHihFAopE+t1Zvp/8d8xFfq/an04BERDb69i5hPvUMFrfQjgJhPP87YiwkLIOYz4eJiavoRQMynH2fsxYQFEPMZfnER8xl+jTDCKgXoFvNJpdInT558+eWXTk5OEydONOGHC1dZchU/+DfmI02knw6SXrlm8o/jQMyn4jGCzfUqgJhPr9zYmSkKIOYzxapiTnoVUDvmW7dunbm5OSGkSZMmV69eFQgENY47Pz9fduerxYsXs1isGptQG8j/uExKSlKyFTaDgH4EVI35evXqRV2aExIScuvWLWUGeejQIS8vL0KIq6trVFQU/uxVBk1+G8R88hp4bWQCNIz52Gz2kSNHevTosXz5cvz3rsbjVS7mc5OS5tKh46QvXtbYyqg3QMxn1OUz+cHL/92SkJCg/HzT09NnzpxJCLG0tGzUqFFBQUGNbXk83u7du93c3AghLVq0UOnGDomJibLblZw6darGfWEDCOhNQJOYb9y4cbm5uTUOlcViLVmyRHYKKNOkQp/nz5+XNX/69GmFT/EWArUroHbMFxkZaW9vTwhp1KjR6dOna/w9XCwWp6WlUTGfmZlZeHg4h8NRcu7yPy4R8ymJhs30JqBSzMdisYYOHUqdOz169IiLi1NmnHv37nV3dyf/x95ZgEWx/f9/dlkWlhYEaQwExQDBbrGwxUbFBrEwEVvsuNa9Fsa1vTYoiCgoYFEinQIqHQJLb+/8n6/nd/e/l9id7YU98/DwnJ05+TnnPTPnNScQxMTEJDAwkEajYQkF/XAsADEfxxTQ0fYsoICYD0XRmpqa1NRUNptNo9FYLFbbqzYp5vi/mM8IRXqiew/RqdT/nW+nB8R87bRi20mxuPstAmG+oqKizZs3IwiCx+NVVFRKSkr4WoRCoVy6dMnQ0BBBECsrK4G2aYKYj695oQdZWUAIzDd48GA8Ho8gyPz583/8+ME35xUVFbt370YQBIfDEYlEiPn4Wgx6aFsWEBrz3b1718LCAkGQTp06Xbx4kUKh8C44hUIJCgoCmE9NTe3ly5e8/XNf5X5cQszHbRnolgcLCIT5amtrt23bpqOjgyCIvb09FiGw2ew///xTU1MTQRALC4v09HQmkykPBW9DeYCYrw1VFsxqUwsoJuZDUZTFYtXX1z9//rykpKQdE6um9S3472aYzwhFeqAPn9Lb7xchiPkEbyYwhPQswN1vEQjz1dfXHzlyBHAHHA4XGRnJd3HSxsbG3bt3d+zYEbxWBgQEYC8nxHzYbQV9StkCQmA+zjCKSZMmxcbG8s3wz58/161bhyAIkUjs0aMHlsGzTeKEo/maGAT+lCsLCI35AgMD7e3tEQTR09PbtWsX36F5NTU1vr6+APN169YtPDwcux24H5cQ82G3G/QpHQsIhPkaGhouXboE3se6du366NEjvplsbGxcvXo10I6lpSW1XQ/R4GsN4TxAzCec3WAoubCAwmK+srIyJyenzp07+/r6VinGBrLCNbiWMJ8pivRFY74w2inpg5hPuKYCQ0nHAtz9FoEwH4vF8vf3V1dXBzMB/f39+fav6urqli9frq2tjSCIk5PTp0+fsJcRYj7stoI+pWwBITCfp6engYEBgiAjRox48+YN3wwnJibOnz8fQRB1dfXJkydjX02MEzPEfBxTQIccWkBozPfx48fx48cjCKKhoTFlypSamhrepSspKVm7di1AFcOGDcMC2TkRcj8uIebjmAU65MQCAmE+Go0WFhZmbGyMIIiamtr27dvr6+t5FyQ1NXXmzJnga5OjoyMc1MLbXC1ehZivRbPAk23DAgqL+SoqKpYuXUoikZYsWZKbm9s2aksWuWwJ8xmhiAnqNB9NSW2X23FAzCeLhgbTxGoB7n6LQJgPRdHw8PAuXboAzLdnz55fv37xSJXNZhcUFNja2qqqqiIIsnjxYoEWCIOYj4dt4SXZWkAIzHfy5MnOnTsjCNKlS5dDhw7xHQn7+vXrQYMGIQiio6Ozfv362tpaQYsMMZ+gFoP+pWkBoTFfcnLyggULEAQhEAj6+vohISGNjY2t5ZzBYISFhXXr1g1gvqVLl2ZlZbXmufl57sclxHzN7QPPyNYCAmE+FouVl5dnaWmppKSEx+NHjRrF+4MTg8E4dOgQeOXr1KnTtm3bZFvYNpo6xHxttOJgtv9nAYXFfPX19X///be2tnafPn2+fv3apDVQqVS4fgGwSSuYzwhFOqObdjTm5TUxXTv4CTFfO6jEdlwE7n6LoJgvPj5+7NixAPM5ODh8/vyZxy6HdXV19+7d09LSAmv5eXt7l5aWYjcsxHzYbQV9StkCQmC+wMDAvn37gmEUEyZM+PLlC488l5eX7927F8yuMjQ0fPjwId99BprHBjFfc5vAM/JjAaExX01NzdWrV8HYWBwOt2DBgvj4+BZ3BqDT6fHx8e7u7gQCAYfDderU6cqVK2QyGbsRuB+XEPNhtxv0KR0LCIT5UBSl0Whr167t1KkTgiAdO3ZcsWJFi62azWZTqVR/f//BgwerqKgQCIQhQ4aEhYVJp1DtLBWI+dpZhSpWcRQW8zEYjKysLAsLCy0trZs3b+bk5GRkZMTFxX369OnNmzdv374V6E2iHTea1jGfEYr0abh1t1GQV642YSiI+dpENSlsJrn7LYJivqKior1795JIJBwOp6ys7OnpmZKS0mL/qqGh4dOnT5MnTwbbDtjb2/v7+7fos7WKgJivNcvA8zK3gBCYr7i4ePny5WD5c2Nj440bN+bm5rY4pq+qqurvv/92cHAA0xKdnJxKSkqE+HAIMZ/M2wnMAA8LCI35UBRNTU2dO3eukpISDofT19d3d3d/+/ZtYWFhTU0Ng8FgsVhkMrmoqOjNmzerVq0C+4QiCLJs2bKsrCyBJh5yPy5bBCI8CggvQQtI2gKCYj4URRMTE6dMmUIikRAEMTQ0dHd3j4qK+v79e3l5ecPvo7S0ND09/cqVK2PHjgUrrlhYWOzfv5/HmFlJF7NNxw8xX5uuPkXPvEJhPjqdXldXV1lZWVJS8vPnz9TUVGtra2Vl5dGjR69YsWL27NmDBg3q1q0bgUDYuXNncXGxojeO3+XnifmM0L7jqYGv2tl2HBDzwZYvzxbg7rcIivmYTGZkZOSAAQPABChjY+P169d/+vSprKyMTCbX/z4qKyuLiooxxZb/AAAgAElEQVTevXu3YMECIpGIw+E0NTVPnz4t0FA+0JEDwwYRBPH395dnk8K8KZoFhMB8KIr6+/sPHTqUQCAoKSnp6+u7ubklJCQUFxdXVVXV1NRUV1eXlZX9+PHD19d3wIABJBJJSUmpd+/ed+7cEc68EPMJZzcYSjoWEAXzNTQ0vHjxwtraGpA+ZWXlESNGeHt7X7lyJTAw8M2bNxcvXty5c+fgwYNVVVXxeLyampq1tXV0dLRAn5pQFOV+XELMJ52GAVPBbgEhMB+KoleuXLG1tVVRUUEQREVFpVevXh4eHkePHr37+zh48KCLi4uuri6Y26unp+fh4SHQiivY868IPiHmU4RabrdlVCjMV1BQ8Pjx4wMHDixbtmzYsGFdu3YlEAicjihwgEEu/v7+FAql3da6IAXjg/l+L9LHTkhsT4v0QcwnSAOBfqVtAe5+i6CYD0XRmpqaW7dudejQAUyDUlZWtrW19fT0PHXq1OPfx8GDB+fPn29lZYUgCB6PJ5FIY8eOzcnJEbSccDSfoBaD/qVmAeEwX21t7YEDB0xNTTlvDp07d160aNHRo0d9fX3PnTu3Zs0aBwcHsJalsrKykZGRu7s7341uWis1xHytWQaelwcLiIL5UBStqqq6dOlSly5dNDU1lZSUmryKc34SCARdXd1hw4Y9fPhQiNdy7sclxHzy0GxgHrgtIBzm+/Xr16lTpwYOHKihoQHmW3D0wnHgcDhVVVVjY+Ply5fHxcVxJwrdAlkAYj6BzAU9y5cFFA3zLViwQE1NDXyNx+PxYEgL57aIIIiSkpKlpeXPnz/lq55klxt+mM8IRSxQ5yVoZaXs8ijmlCHmE7NBYXRitQB3v0UIzIeiaH19/eHDh01MTMBICjweTyAQlJWVVX4fysrKBAIBvDuqq6tPmzbty5cvgo6hgKP5xFrnMDIxW0A4zIeiaEFBgY+Pj5mZGXht4GiH+PsAwgHvFZaWlnv37i0oKBA66xDzCW06GFAKFhAR86EoSqfTs7KyFi9erK+vz/0ezu22sLDYunVrcXExi8USolDcj0uI+YQwIAwiUQsIh/lQFGUymRkZGatXr9bS0uLWC8eNw+FGjhz5+PFjMpksnHYkWvA2FDnEfG2osmBWm1pAoTAfk8kMDg4eM2YM5z7Y3KGqqrpixYry8vKmllLU3xgwnzGK2KB7D7UbC0HM126qsl0WhLvfIhzmY7FYdXV14eHhS5cuNTExaX4bBGuKjRgx4v79+xUVFXQ6/X/3AQEPOJpPQINB79KzgNCYj8lk1tbWJiQkLF++HEwnbC4fXV3dlStXRkVF1dbWCrEkH8cKEPNxTAEdcmgB0TEfoBVVVVXv378Hs3RdXV1HjBjh6Oi4atWqo0eP3rp1Ky4urrq6WmhOwf24hJhPDluRgmdJaMwHKDmZTP7x48eNGzdOnz69devWNWvWbNy48eDBg/fu3YuMjCwtLW1sbBRaOwpeNZziQ8zHMQV0tD0LKBTmQ1G0trb25s2blpaWzV/NwRl1dfVbt27V19e3vbqUTI4xYD4jFDFFOw6kvHhJo1Ilkwupxgoxn1TNDRMT0ALc/RbhMB9IkEql5uXlvXv37uLFi7t373Zzc5s/f/6yZct27tx54cKF4ODgzMzM2tpaAXP3/71DzPf/bQFdcmYBoTEfKAeNRsvPz4+NjX306NGpU6e2bt3q6em5e/fu06dPx8TEfP36NT8/X4gJhk2MBDFfE4PAn3JlAbFgPlAiKpVKJpNLSkry8vIyMzO/ffuWn5//69ev2tpaHnvBY7EG9+MSYj4sFoN+pGkBUTAfyCeLxaqpqamsrCwuLi4oKCgqKiovL6+rqxPlC5M0LSD/aUHMJ/91BHPYqgUUDfOhKJqbm+vp6amurt6c9CkpKZmamn7//h3eHzktBhvmM0IRM9R6NPVLHL3tkz6I+Ti1Dx1yaAHufosomA8Ujclk1tXVFRcXZ2dnp6SkZGZmFhYWijgKCcQMMZ8cNh6YJWABETEfx4x0Or2qqiovL+/Hjx/FxcWVYl28AmI+jp2hQw4tIEbMJ7nScT8uIeaTnJ1hzMJZQHTMJ1y6MBR2C0DMh91W0KfcWUABMR+Dwfj06ZOTk1NzzKehoTFp0iS46Th3M8WM+YxQxBhd5E7NzGrrkBRiPu4GAN3yZgHufovomE9ypYOYT3K2hTGLaAFxYT4Rs8E7OMR83PZpbGz89evXz58/v/57ZGdnV1RUUAX5sgi+amT+Pmpqarjjb+5ms9mNjY0JCQm5ublC76PSPNp2cwZivjZXlUBEeXl5CQkJQEbSEVF8fDwUUYutBWK+Fs0iVych5pOr6oCZEcwCCoj5wAr0AQEBpqamTbb3MjExOXHihBCLzQtm9DblWxDMZ4Qi5uiB4/SiojZVxKaZhZiP2yJMJrO+vr6oqOj79+8pKSmJiYmZmZnfv38XaP1KNpvNYDDKysrS0tJKSkqwSKysrCwnJ6egoADOoOeuDhRFIeZrYhD5/9lcRBkZGbm5ueXl5djXzeGIKD09vaioCMucUCCi/Pz8uro6+beSNHMIMZ80rS1iWmB5gdDQ0GvXrh0+fHjpv8fu3buDgoJ+/fqFJX4mk1lZWRkfH//48WMvL68jR46kpqbyDshkMrOzszds2LB///7AwEARFzHgnVZbvAoxXxuqNSqVmp+f//bt22vXrh09etTNzQ3IaPfu3S9fviwtLcVSFgaD0UREaWlpvAMCEa1fvx6KqEVDQczXolnk6iTEfHJVHTAzgllAMTEfiqLV1dX79u3r0KEDZzNyHA5nY2MTGxvb1gejCdYC+PkWEPMZoUgv9OJVVkMDv4jl9zrEfKBuWCxWY2NjYWFhWFjYiRMntmzZMn78+KFDh7q4uGzatOn+/fsYq5DBYFRXV3/79u3ixYvTpk27evUqlnfKBw8erF27dt++fREREZWVlTQa7X9NER4Q87WpNgAGBHFEtHnz5nHjxg0dOnTOnDnr16+/d+8exrFIHBFduHBh1qxZFy5cwLId/D///LN27do9e/aEhoZWVFRAEXHaDsR8HFPIs4PNZlOp1KSkpB07dvTp00dHR6dTp04GBgZqampgO+O1a9fm5+fzLQKFQsnPz793797YsWPxeLyGhsa0adM+f/7MOyCdTn/79u2gQYNIJFKnTp3mz5//+vXrqqoqJpMJH0YoikLMx7v9yMlVjoj27t1ra2urra1t8PvgiGjVqlVZWVl8c0uhUPLy8rhFNH36dL4iotFob9++HThwIBDRggUL3rx5A0XEsTbEfBxTyK0DYj65rRqYMf4WUFjMx2KxSkpKJkyYoKmpCWbvEonEkSNHwhm7TRqN4JjPGO09ti7kbZN42tBPiPlAZdXW1j5+/Hj69Ol6enpaWlo9evTQ0NBQUlIiEAh6enqbNm3CWKffv38/evSokZGRqqoqiUTy9vb+9u0b37CXL1+2s7MjEonGxsYLFy788uULljGAfKNtBx7gaL42VIlUKvXFixfOzs7NRdSxY8fNmzdj5AVARIaGhkBEW7ZswbLO1KVLl2xtbYlEor6+/owZM+Li4kRcz74NWZ53ViHm420fOblKo9HCwsL69OlDJBIdHR3v3LmTm5ubmZm5atUqXV1dAoGwf/9+LMNaQ0JC5s2b16FDByKRaGBgsGPHjvT0dL4PFDB+9vPnz0uXLu3UqROBQDA0NFy2bFl+fj6DwZATE8kwGxDzydD42JOm0WifPn0CIho+fPilS5cyfh8rV64EIvLx8cEiojdv3sydO5dbRBkZGdhFtGTJEo6Ili9fXlBQAEWEoijEfNhbsqx8QswnK8vDdMVgAYXFfCiKMhiM9+/f29nZgam7ZmZmPj4+GDtdYjC9xKKovn2P5eklxj+0lyOKWKCIEeY/c3TecmpyisSKKNmIIeYDn399fHz69etnZmY2Z86cV69epaamxsTETJo0SVNTs0ePHufOneNbDY2NjY8ePZo4caKhoSGRSBw9evTt27dzc3OxvFP++vXr1atXK1asMDU11dbWtrGxOXr0aFEbnw/O12JYPEDMh8VKcuLn+PHjAwYMMDExcXZ2biKibt26HT9+nG8+GxsbHz58yBHRsGHDrl279u3bN4wiCgoKcnNzs7Cw0NTUtLGx2b17d15eHt9E270HiPnkv4rBunhDhgwhkUgDBw68ceNGVVUVnU6nUqkJCQmzZs2aNGnS48eP+U57f/jw4bhx47S1tbt06bJ58+bIyMjCwkIqlYrxZa+hoSEvL8/f33/JkiW6v49p06Z9/foVEnOI+eRfRFQqNTU1dejQoSQSqW/fvpcvXwbDuikUCkdET5484Suip0+fTpgwQUtLy8zMzM3NLSIiQjgRubq66urq6unpTZ8+PT4+HooIYj75FxHEfPJfRzCHrVpAkTEfiqIUCuX8+fM9e/ZEEMTe3t7f379VS7WdC40r16GIpVj/LFDEBDPjAzTQCl23lVFc3HbM9v9zCjFfY2PjjRs37Ozs1NTUZs6cGRISUl9fz2azmUzm1atXnZycFi9e/OrVq/9vspZcVVVVFy5cGD58uIGBgYODw/Hjx9+/f//r1y/sn3BramqSk5OvX7/u7OxMJBItLS29vb2TkpKwx9BSvtr8OYj52kQVUiiUW7du2dvbq6qqTpw4MSAggCOi27dvT5061cXFJTAwkHdZKisrfX19R44cqa+vb2tre+jQobCwsLKyMuy9o5qampSUlNu3by9atEhdXb1z586bN29OSEhQcBFBzMe74cnD1ZycnKVLl6qpqamqqh4+fLigoADkis1mUyiU2NjYqKgo3h9+KBTKP//8M3r0aC0tLUtLyx07dmRmZgo3db26uvrjx4+bNm1SVlbW0tJycXGJjIxUcBFBzCcPMuGdh5ycnGXLlqmpqREIhJ07d2ZnZwP/LBaLI6Jini/qVCo1MDBw/PjxWlpa3bt39/b2TklJoVAoGCk5d/aaiGjhwoVRUVEKLiKI+bhbiHy6IeYTrF5YLBaNRvv582dSUtLXr19TU1Nzc3OxLK7RJBkGg/Hjx4/c3FwssywrKyu/f/9eWFjYJBL4U8ExH4qixcXFO3bsMDY2njp16vfv39tBk6ifs0RAJId9mB52n8Yo0hc9fJLdBpdUU3DMR6FQEhMTR4wYoaamZmlp+ddff9XW1nJ0kZubGxQUFBoayul0cS5xHCwWq66u7q+//urfv7+6unr//v2vX79eVFQk3KqXVVVVQUFBzs7OKioqnTt33rJlS1xcHCctBXRAzCf/lQ7GSgwfPlxNTc3MzOzEiROVlZWcbBcUFAQHB/MVEYVCuXjx4uDBg9XV1e3s7C5dupSfny+ciKqrq8PCwlxdXVVVVc3MzDZv3vz161dOfhTQATGfnFc6mUx+/Pixvr4+giB9+vQJDg7mO+CoSYkaGhpiYmJGjx6trq6ur6+/YcOG5OTkJn4E+tnQ0BAVFTVz5kwikdihQ4ctW7bI80bnAhVNOM8Q8wlnN6mFqq+vDwgI6NixI4Ig1tbWz58/5zvHtkneKBRKUlLS5MmTNTU1dXV116xZk5CQ0MSPQD8bGhoiIyOhiDhGg5iPYwq5dUDMh7VqKBTKjx8/Xr165evru3v3bg8Pj5UrV65fv37Hjh1//fUXxlhYLFZ9fX1cXNyDBw927dp19uzZzMxMvmE/ffp0/PjxgwcP+vn5JSUlNTQ0CPrGwDeJNuqhvWI+BoORn58fGxv7DsOxadMmCwuLIUOG3Llzh6/36Ojo79+/N8jxFhPygfmMUMQUJfVHA1/RabS2pQ4Fx3wlJSUnTpwgkUgIgsyfP//jx4+CVh+VSvXz87OzsyMQCJaWlgcPHqyqqhI0Em7/dXV1oaGhAwYMIJFI5ubm+/fvF+LLEHeEbdoNMZ/8V19xcfHx48eBiJydncPDwwXNM41Ge/369cCBA4lEYufOnXfv3o1xR9HWEmpoaPj48SPYTwCKCGK+1tqJnJxPTk728PAA6yYvWbJEULgA9vdcu3atiooKkUicM2dOWFiY6EWrra198+ZNv379VFRUunfv/scff4ioStGzJMMYIOaTofGxJJ2VleXl5QVEtGjRovj4eCyhOH5YLNaPHz+2b98ORDRjxozXr1+LPviutrb29evXdnZ2UERwbT5OY5NnB8R8mGqHTCZHRUUdPnx4yJAhFhYWtra2NjY2enp6SkpKqqqqY8eOxRJLY2NjdnZ2QEDA4sWLzczMwGq4kZGRfMP6+flNmzbNyMioc+fOq1evDg4Ozs/Px7K0Dd+Y27qH9or50tPTvb29rayswBNOjP+7du3q4eHx4cMHua16ucF8RihihtqOr8nJYdDpcmuu5hlTZMzHZDLj4+MHDx6spKSEw+EOHTrEe1ZUc+vRaLTMzMxRo0apqanp6OisW7cOy14BzeNpcoZKpYL59QQCoX///n///Tf2eYtNomrrPyHmk/MabCKiw4cPCyoiBoPx8+dPMIZCR0dnzZo1Ig5EAhajUqm+vr69evWCIoKYT55FxGAwnj9/bmNjA97cTp06xWPweIsFqa6ufvLkiZqaGoIgvXv3fvr0qbg+zVIolFOnTpmbmyMIMnHixDdv3rSYAUU4CTGfPNcyg8EICAjo27cvENHp06cFndBWX18fGBioo6MDRPTw4UPuiR2ilL2xsZEjIicnJ0UWERzNJ0pDkk5YiPn425lGoz1//nzChAnKyso9evQ4f/58UlJSeHj44sWLO3ToYGRkhGXTRiqVGhcXt3btWi0tLXV19S5dunh6esbExPBPHkUbGhoiIiJWrVplYGCgoqJiZGS0f//+1NRUhe0ocozWXjHfgQMHTExMxEj3uKPS0dFxd3fn2FDeHPKE+X5P8nX1qM3PZ7NY8mao1vKjyJiPTCY/ePAAtHY1NbW7d+8K+vG2qKho7969ampqOBxu2rRpoaGhrdlZoPNgPSZPT08jIyM8Hj99+nRBO34CJSfPniHmk+faQVGUTCb/888/HBHdu3dPUBH9+vXrypUrGhoaOBxu+vTp4hXRhg0bgIhmzJihsCKCmE+eRVRWVnb06FGgIDweHx4eLuhkw+Tk5DVr1oAYjh8/LsZtZ8CTaOzYsQQCwdTUdPfu3YKqW54tL1DeIOYTyFxS9lxVVfXHH39wRBQRESGoiDIzM7du3QpiOHHihNhF5OjoCES0Z88ehRURxHxS1oUQyUHMx99okZGRU6dOJRAIPXv2vHr1KpVKZTKZdDr96dOnY8aMsbKy+vPPP/nG8vbt28mTJ6uoqJBIJA8Pj0+fPpHJZIy3BjabzWAwfv36FRYW5uzsrKGhoaqqumLFii9fvvBNt317aK+Yz9nZGY/Hg+eT2P/jcDgnJye5bRhyh/kQU/TUXzSupank1nQgY4qM+ZKTk1evXg0kM2DAgNDQUEEXWk5LS+vXr5+SkpK6uvr169exrJ2KpT2wfx9RUVHjx49HEKRnz56XLl3CErD9+YGYT87rNDk52d3dHYho4MCBb9++FUJEQ4cOJRAI6urqf//9t3hFFBkZOW7cOARBbGxsLl++LOfGlFD2IOaTkGHFEu3bt2+nT5+OIAgOhzM1NU1ISBBUQYGBgd26dUMQhEgkhoeHi/GLPngS7dq1y9zcHIfDTZo06du3b2IpdZuLBGI+ea6yyMjIxYsXc0SUmJgoqIiCgoLAiFoikRgRESF2Ee3cuROIaPLkyQorIoj55FlEIG8Q8/GqIzabXVJS4uLioqOjo6en5+7uXlFRwQlAJpOzsrJSUlLKyso4J1t0vHz5csaMGZqamvr6+qdPn87MzBRifT0mk1lfX5+dnb127VpDQ0NdXd0lS5ZER0e3mKKCnGyvmG/atGlip3vcEU6YMEFuW4j8YT4jVNkBvXWPWV8vt0bjzpjCYj4Gg+Hn59e1a1fQ1BcuXCjoZhcVFRV3795VVVVFEGTEiBHv3r0T7yqoDQ0Nq1evJpFImpqas2bNolKpgr65cld0G3VDzCfPFQemSvXu3RuIaP78+YJ+Tayurn769CmYbzhy5MiwsDDxiqi+vt7d3Z1EImlpac2ePVsxRQQxn3yKqKio6Pnz5y4uLnp6eoBQ6Orqzp071/3fY+/evYmJibw/8Ofl5R04cIBIJBIIhDFjxqSnp4v9MeHn5zdo0CAwmfHatWvyaUxJ50ommO/69evnz5//8eMHxtJxPy7Fsn4IxnRl4o3NZlOp1JSUlICAgBUrVhgYGLQooj179sTGxvIW0a9fv06dOqWiokIgEBwdHTMyMsQuomfPnnFEdP36dZlYTOaJygTzXbt27cKFC9hFJHMryTYDEPPxsj+Dwbhz506PHj1wONyAAQNu3brFy3cr16Kjo11dXXV1dfX19ZctW1ZQUMD79tRKNP93ms1mJyQkODs7a2lpGRkZrVmzRpHbOsR83PAOuxtiPgE38zVFB09BA4LYTCZvecrDVQXEfLm5ue/evfvrr7+mTZtGJBKBEAYOHLh+/fqD/x4nT56srq7mTRzi4+NdXV1B8O3bt2PZH0nQGj979qy1tTUej+/Vq9fnz5+pVKqgMbR1/9z9Fqlt9RgUFPTs2bPi4mLs1ktNTeXcUf39/bEHbKM+c3NzIyIiLl26NGvWLADpEAQZMGDAunXr/tXQwT/++KOmpoa3iNLS0jw9PYHpvL29s7KyxG6QM2fOWFlZ4fH43r17K6aIZIL5Xr58KZCIgoKCOApKS0sTezOQqwjr6ur2798/e/bsgQMHGhgY4HA4UHZlZWVdXV2Df4+RI0d+/fqV98Ci169fT5o0CUEQEol05MgRge5aGG2Sn58/e/ZsPB6vpaU1Z84cGo0mdgiCMScy9CYTzLdu3bru3bsvWrTo6tWrWETB/bhs95ivtrZ29+7dU6ZMGTRokJGREQ8RffnyhXc/+v3793PnzgUiOnDggKRFNHfuXMUUkUww39q1a7t37+7q6opRRDK8ychD0hDztVoLYFfcOXPmaGtrIwgyb948QZeRZrPZZWVlW7duNTc319DQmDRpkhA71jXJH5jA+/jx41GjRhGJxG7duh09epROp/N+824SSbv5CTEf5zVaIAfEfAJiPiMU6YLOXc5MSJR/7SgU5mMymaWlpbt27ZoyZUrv3r21tLQ4QtDW1jYxMbH4fVhaWo4bN66iooJHZ4ZKpT59+hQMBlRXV3/06FF1dbXYq/vt27dgrG7Hjh29vLwkkYTY8yzeCLn7LVLDfJs2bXJwcNi5c2dwcPDPnz+xlEhxMB+TySwpKdm5c+f06dPt7Ow6dOjQooi6devm6OhYWVnJQ0R0Oj0gIKBXr14IgqioqEhIRKGhoVOnTkUQRGFFJBPMt2nTpv79+2MXkQJiPjc3t2XLlvXr1w8sukIkEqdMmeLq6rrs32Pfvn2lpaU8XtfB6v6AcRgYGHz69Elcm29w3/QYDMauXbsMDQ3B5Pfo6GhBFz7jjq2NumWC+dzc3MC9sWvXrq6uro8ePUpJSeGxrAH341IRMN/BgweBiOzt7ZWVlcG89alTpzYRUVFREQ8R0Wi08+fPd+7cGUEQPT299+/f10tgLk4TEcXExCigiGSC+VasWAFE1K1bNyCi1NRUuCtpa/dhiPlaswxKoVASExO7dOmCx+PV1NS2b98u6J2CyWT6+fnZ2tricDgHB4fr16/z/ojXalaaXSCTySdOnLCyslJWVra1tU1NTVXAUSEoikLMx+mPCeSAmE9wzGeEIr3Yuw80yv0ifYqG+fLz8729vRcvXjxt2rQePXoAIWhqak6ZMmXxv8eKFSsOHTrE+wael5fn4+OjpKSEx+Pt7Oy+fv3K4z2y2S0Z64mioqKtW7cqKysTicTevXvn5OTw/iiNNd6244+73yI1zAdWbCSRSL169fLy8oqKiiooKOD9Uq5QmC8vLw+IyNnZuU+fPmCqlKam5tSpU//V0OLly5f7+Pjwhg4lJSXHjh0DIurRo0dcXJyERLRlyxYgoj59+uTm5iqaiGSC+cCKjdhFpFCYj8FgZGZmpqenx8bGrlixQklJCUEQXV3doKCgpKSk9H8PvpNvCgoKwOYbRCLRzs6urKyMr4IaGhqSkpJCQ0Pf/D5iY2NLS0v5PhBu3rw5YMAABEGMjY1PnTpVV1fHN0g78yBDzAfeUggEgr6+/sKFC58/f/7t27cWq4D7cdnuMR+DwcjJyUlPT4+Pj3d3dwczM3R1dV+9esUtotzcXN5D58rLyzdt2qT0+7C0tCwvL5eQiG7fvj1w4EAgotOnT/N+w2xn8gHFkSHmwy6idml57IWCmK9VW5WXl584cQLsxm1paXnx4sVWvbZ0gc1mNzQ0LF68WFdXV1VVdcOGDdzr+rUUQrBzycnJbm5uOByuQ4cOPj4+4o1csKzIzjfEfALRPY5niPmEwnxGKGKH+l5nMBg8xrPITg3/l7JCYT5uaycmJnp4eCAIoqSkZGdnl5KSwn2VrxtscAS+Hru7u3///p1vEAaDERcXF/XvkZGRwZscgQgvXLigr6+PIIiqqmpoaGiLL/d8k267Hrj7LVLGfJz3QmNj4w0bNsTExJSXl7f2hUxxMB93W8rOzt6+fTtHRKmpqdxX+bojIyMXLVoERLRkyZLc3Fy+QRgMRnJyckxMDJBRWloaFkWcP3++Y8eOYE5WSEgIliB8c9KGPMgQ83GLyNPTMzY2tjURKRTm4zSezMzMpUuXAgVZW1sLOl47Li5u/vz5CIJoaWk5OzvX1NRwYm7NUVBQsHPnzkGDBtn/PlauXIllb+tXr15NnDgRsMh169YJms/WMtOGzssc83FeyPX09GbNmuXv719cXNwEFXE/Lts95uM0nvz8fPBRQUlJqUePHlhUwAmLomhaWtqSJUvA02H48OFY2nZBQcGOHTs4Ilq1ahUWEYWGhk6ePBmMGVy7dq2g+eTOcxt1yxzzcYto9uzZLYqojTQwGVMAACAASURBVNpWXNmGmK8FS4KJsRkZGRMnTgTLsY8ZM+bJkycNXAeTyeTd1afRaNnZ2V26dMHhcD169PD19W0hJRFO0en0P//8k0QiKSsr9+7dOzc3l+/3ChFSk9OgEPNx7nECOSDmExbzGaNIX5TBYNLpcioJFFVYzBcaGgr2N1RRUVm8eDEWxMBdif7+/sOHDwevhocPH8aymEt1dTVYJRqob8qUKUVFRdxxtui+d+8eZ1bjnTt3FO0LDXe/RSaYj3OrNDAwWLZsWWRkJJVKbT4cTDEx38ePHwFlEE5EQUFBEyZMAAh7+/btBQUFLUqA+2RNTY29vb2KigqolzFjxmDZ9OPu3bscEV27du3Xr1/ccbZ7t8wxXxMRRUVFNReRYmK+t2/fgp6/lpbWrFmzBAXQYWFhYFUHPT09Nzc3LMFzc3NdXFw4Cho5cuSTJ0/4SuDjx4+zZs0CPHHevHlVVVV8g7QzD9yYLzo6mqt7J0EnwE8c+XA7NDU1R48eff/+/YaGBs5CTNyPS8XBfJGRkc7OzqBxzp49uwn65NsOP378OHPmTBB85syZtbW1fIPk5OS4uLhwVnYeNWrU06dP+YaKjo4GKwBqamrOmDFDAUXEjfliYmIkqByuqMEWzNza4biBiP7555/GxkaOiPjWY/v2ADFfC/VLoVDi4+M3bdoElgYA21G5u7sf5zqysrKadwy446qoqDh58iRY5mbu3Lnv37/nviq6m81mBwYGDhkyBHw2fPbsGZZPFqKnK1cxQMzHubsJ5ICYT1jMZ4QiZmh/JzQnlzfll6FMFBbz3bp1y8bGBkEQNTW148ePl5SUCFQL9+7ds7W1BZjv3LlzWOY9CYf5/Pz8Bg8eDEY8nTx5EgsZFKggcu6Zu98iW8yHx+NVVVXNzc3nzZv3+vXrJnZTTMz34MGD/v37AxEJsXL5w4cPQXASiXTixAksGhQO8z179gyISEVF5dChQ4WFhU2qr33/lB/MxxHR/PnzQ0JCuM2umJjP19fXzs4OzOPbv38/70nu3OYC7mfPno0ePRpBEH19/U2bNkkO86Wmpi5btgw870aMGKFoX5tQFOXGfHp6eiZSOTg0tvkbOx6PBxu2DB482NfXl0wmoyjK/bhUHMx39+5dsImtoaHhjh07eKxd2FxBKIq+ePFi7NixYPHWNWvWYBGRcJiPIyJVVdWBAwdWyv2SPi2aS5ST3JhP3kR05coVICJRCtgOwkLM959KrKio2LNnz4ABA3r37s09TINEIunr63OeAv369YuOjubdz8/Jyenbty8AhXv37sXyvvufrGD4kZiYCFbxQBBk6dKlWLZtwhBrW/ICMV/zdwUsZyDmEwHzGf+P9C1ZUyfIrp3SFJXCYr7jx49raGiAT7gvX74UdALF5cuXzczMEARRV1d/+PAhlg+zwmG+oKAgR0dHBEGUlZW3b9+el5cnzeYh87S4+y2DBw+eJpUDLDbf4r2RQCBoa2v36tVr5cqVfn5+nGajmJjv7Nmz4M1HS0srMDCQYw2Mzebq1atgExs1NbXr169jGWQnHOZ7+fLlmDFjgIjWrVuHZYo9xiK0CW/cmG/IkCFS0dA0viLq3bs3EBEYPqOYmG/v3r0mJiYIglhaWj5+/BjLMg7cTe769ev29vYIghgZGfFdTxYEFG40X25uLljjQllZuUePHuXl5dzZUAQ3N+Zr8dEgk5N4PF5FRWXp0qVghCz341JxMN/+/fvBy5ilpeWjR48EFdGtW7fAknmdOnXy9vbGMhhQOMyXl5e3du1aBEHweLypqakCiogb88lELy0m2kREinA341FGiPn+Y5yqqqr79+/v3Llzy5Yts2fPBg1ITU1txowZO7mOU6dO8e6bkcnkJ0+egAm/ZmZmt2/fFvQ+9Z9stfKjqqrq2rVrYK1fc3PzoKAgRdtrBmK+Fu9xfE9CzCcC5jP6HdaKcflao1zOFFNMzAcWXcbhcAiCGBgYCLG1xalTp8Cm6hoaGu/evcPyBVg4zBcWFgYmFxMIhGXLln379q2VG3z7PM3db+F7p5KaBzwer6Gh4e7uHh8fDx7WCoj5ampqvL29wbdJAwODrKwsQTcNO3XqFJjBoKGh8fjxYyzf0oXDfO/eveOIaN68eVlZWe1TLa2UihvzSU0jfBPiiCghIYFGoykg5mtsbFy6dCl487e1tU1LS+M96ad59Z4+fdra2hpBEHNz80uXLmEZx5Sfn79169a+ffv2+H0sXry4+djk5gkVFxdv3LgRbLajr69fWFioaMv+cGO+S5cuRUjlAF/4WpOSrq7upEmTzpw5k5CQUFVVxWQyuR+XCoL56HT6ihUr1NTUEASxtbVNSkoSVERnzpwBu7EZGxsfOnQIy4javLy8LVu2cETk6uqKRURVVVXbtm0Dtamjo5OdnS1oVpsLs22d4cZ8ly9floqGIsB4Z94iOnv2LEdEEjIpi8WqqamJjY19+fLlx48fm4/lLC0t/fjx46tXr75+/dqENdPp9IKCgvDw8BcvXqSnpzdHN1Qq9dOnT/fv3/fz80tOThalXUHM958GQKVSS0tLv3//HhMTc+jQIdCMunfvfuXKle9cR1FRUWvrdoPocnNzd+/eDYKPHz/+7du3/0mmlR8fPny4f//+rd/HkydPSkpKeFctk8kMDQ0FHz2UlJQuXLigaB8TIOZr7U7H+zzEfCJjPiPUciT92XN6Q0MrapbZacXEfLGxsfPmzQMzYR0cHLCMxeOuIRqNduDAAQA4NDU14+Limj93uf0Dd11d3YwZM0b+e3h7e2MZvhQTEwMW6FFSUpo4caKguxw0z0bbOsPdb/H29v5bKgfYU7K1u6KmpubgwYM3bNgQHBxcWFgI2JYCYr60tDRXV1cgIjs7u4qKCoF6/kwmk1tEYWFhTV5tW2yo9fX1q1atGjt2LJDRxo0bMzIyWvTJfTI6OpojotGjRwu63w53VG3RzY35duzYIRUN/c1bRFpaWoMHD/b09OSISAExX3Z2tpOTExhk6ujoWFdXJ5CCUBTdt28fGAzYuXPn27dvY3kM1dTUhIeH37p168bvIygoiO9+viiK1tfX79q1C9wSNTQ00tPTBWX6bVE43HnmxnxSu4G4ubm1+BgyNTWdPHnyiRMnYmJiamtrOc2G+3GpIJgvLy9v6tSpOBxOSUlp1KhRZDKZYw3u6uPh5oyotbCwwMjKm4jo1atXWEREo9H27t3LEdGnT5+wIEUeOW9zl7gxn9ReZVesWCGQiCRk1fr6en9/f2dn5yFDhjg5Ofn5+XG/8FCp1IsXLzo5OQ0fPnzp0qXBwcHcE0Dz8/MPHDgwduzYgQMHurq6Np+O8PXr16VLl/bq1cvBwWHdunUZGRncwQUqEcR8LZsrKytr06ZNoCVNnDgxLCysZX+tnE1KSlq4cCEIvmLFiq9fv7bi8T+nvb29+/fv3/P3MWbMmJiYGL7P+Ojo6KFDh4KE9uzZk5OT858Y2/sPiPlavNnxPQkxnxgwH2KCTpzHfP+RxWDIlc4UE/M9evQIbKCho6Pj6uqKZdFl7lqrrq728vICwtHS0sL4VZZGowUEBDz59/j06ROWwRdJSUlgogcOh+vXr198fDx3Ttq9m7vfItu1+cAGef379/fw8AgICGhCtRQQ8wUFBY0fPx5BEB0dHRcXFywDWrmba11dHdilF0EQbW3t+Ph43l9DQVg6nR4aGvrs2TMgo/fv32Nh9JwVS3A4nI2NDcZXLO7ctmk3N+ZLTk6WTlnA3pfN3y709PT69++/du3agICAyspKTp9cATFfUFAQWFOsQ4cOS5YsEaJevLy8wBbSXbt2vX//PhYFCZEKiqIMBmP//v2gNjU0NL58+cK3uyFcQnIbSk4wn5mZ2ZgxY44ePRoZGcmNCYDduB+XCoL5QkNDwaLz2traixYtEqL9bNu2DYioS5cut27dkmjDPnjwIBCRurr669evBX3zFKJ0chVETjCfmZmZo6NjayKSkMV+/frl5eVFIBBAA3B3d+emdb9+/QKffBAEMTQ09PLyYjKZnJzExcWBVYzBN6HQ0FDuqyiK3rt3r3fv3iBmGxubwMBAzoOVEwlGB8R8LRsqOjoabBiHIIi7u3tCQkLL/lo5GxMTM27cOFBDmzdvxnh3dnV11dTUBKHMzMzCw8P5dhq/fv3Kyef69eul9sLXSrmlfRpiPtBaBP0PMZ84MJ8RipijC1ayv2VLu93zTE8xMd/Jkye7d+8OFj4/cuSIoN9Uf/36xfmuo62tXVRUJPQzlWfl/O8iN+azsrLCsq8o3zjbkAfufosMMV+HDh2srKzc3NwCAwNb3L1KATEf9+4BwokITAMUCPMJ13S5MZ+5uXlsbKxw8bTRUHKC+XiLSAEx39mzZ8GU2y5duvj4+AjRuvbu3WtsbIwgSOfOnSVKKGpqanbs2AFeHTU0NBITEyWHFIWwgxSCyBbzEYlEQ0PDvn37+vj48Pgiwv24xNiRlILpJJrExYsXe/bsCSSwf/9+IdLas2cPEJG5ufn58+f59qOFSAIEodPp+/bt44jo48ePgr55Cp20nASULebjiOjAgQM8RCQhWzXBfEuWLOGeiFBWVjZp0iSwjlDHjh3XrFnDDfKaYL779+83gdFNMF9AQIDQXRKI+VpoAGw2OzQ01NLSEqj35MmTBQUFLfhr/dTHjx8dHBxAcB8fH27E23ogVAjMl5SUxFlDcMGCBVFRUTzib3+XIOYDbUzQ/xDziQnzGaGINeXAUe7bt8xVpoCYj8VirV+/XktLC0EQKyur4OBgQddCJZPJnDVWNDU1U1JSJNfnSUxMBGuf4/F4BwcHQb8hybyBiZgB7n6L9DEfWJtZU1NzyZIl4eHhPF7KFQ3zsdlsLy8vfX19BEG6d+/+/PlzIUS0detW8DDS0tLCOPNduOaUkJCwevVqsPZ59+7d4+LihIunjYaSLeYDItLS0lq6dGlERERrIlJAzLdmzRqwg82AAQMeP34sROvirM1namp65syZ1mwrRMxNgpSWlm7evBmoVVtbG8vO8k1iaOs/ZYX5CAQCiUTq2bPnvn378vLyePfeuR+XCoL5PD09jYyMEASxs7O7ceOGEM3szJkzgLabmJgcPXpUciIqKyvjfB7GPgtEiBLJbRBZYT4gIhsbGywikpD1yGQyWNFbVVVVXV19//793BMR6uvrFy1apKWlBcR+8eJFbqWnpKRMmTJFTU2NRCJZWFg0fxN+9erV0KFDSb+PQYMGRUVFwUm74qxHCoVy//59sLUFgiDPnj0T9GtASEiIqakpeIJip4RCYL6srCzOZrvjx48PDQ0VpyHkPi6I+QQFfMA/xHziw3zGqNkw9GWw/GhFATFfSUnJzJkzwXczOzu7goICQcEri8XizGDS1NSMjIwU9J6PvQF8/Phx7ty5gFBMmDBBaqsCYc+hRH1y91uav9xIKGmAhMDsCQ8Pjy9fvtTV1TEYDB5vToqG+erq6ubNmwdEZGtrK8QmNiwW68CBA2AOi6amZkBAgKAb9WKv/Q8fPnBENHLkSEWbxyBbzGdkZMQREZPJbE1ECoj5HB0diUQigiDjxo2LiYnB3p45Pq9du9avXz+w0+7BgwclRygyMjJWrlyJIIiSklLXrl3Lyso4eVAQh6wwn4ODw8mTJ3NycqhUKnfPv0Wzcz8uFQTzcUTk6Oj48ePHFs3C++TNmzfBQqL6+vqenp7Np0LzDo79ak5ODljKAOy0W1ZW1trNEHucbcunrDCfg4PDH3/8gVFEEjIpk8ksLi6+e/funj177ty5U1RUxF37bDY7Kyvr6tWrx48ff/bsWZMbbENDQ0xMzJ9//nn48OG3b982XyClvr4+NDT05MmTPj4+QUFBgvZouIsMR/NxW+P/3D9//jxw4AAAIsbGxu/fv+d7L24SS0BAAHjYIwjy999/N9+BpYl/8HP37t3Dhg3r9/twcnLCslhGbm4uZzUce3v758+ftxhzez0JMR/EfOIDdmALXUH/m6Pj56D5go32lageFRDzRUVFgb23VFVVJ02aRKPRuB+3GK3NvdPuy5cvJUcoQkNDp06diiAIgUBYsmSJIu+0K03MZ2Njs3Xr1oiIiF+/ftHpdL4tRNEwX2pqKliYT0lJafTo0VQqla+Jmivr7NmzYFEkdXX1e/fuYXzzaR4P3zPcIpo/f74i77QrNcTp7u5uY2Ozbds2joh4V5NCYT4Gg5Gdnd23b18AyufNm8c9soO3obivPn36dNSoUQiC6Ovrb9q0qXn3j9uzKO74+PhFixYhCEIikYYNG1ZRUSFKbG0xrEww39evX1NSUqqrqzH22xUK8zGZzJKSEltbWyCiOXPmCDfI9MWLF2BH4w4dOixbtkxyIkpJSQE7QamoqAwYMEABRSQTzCeoiCR3d2KxWHV1dVVVVXV1dc0VTafTa2trq6urGxoamkAkFotFo9FqamrIZDKFQmlyFUVRNptNoVCqq6vJZLKIH3sg5muhAcTFxYGdXHA4nKOjY2JiYgueWj/V0NBw584dDn/x8/NrMum6taBJSUmhoaHBv4/w8PCKiorm7aZJ2Ly8PM4KoObm5vfu3WvioX3/hJiP08wEcsDRfGKCgyZon3Go3wsW19KqMlecAmK+mzdv2tragq7R6tWrhauCK1eudOnSBUEQNTW127dvS+6NLSgoaOzYsWDl3c2bN2PZ0E24EslnKO5+i9QwX0hIyJs3bwoLC7FPRFU0zPf06VOwJrSent7KlSuFazzXr1+3srICIvL19cWy8bRwCXGLaOPGjYomIpmM5hNURAqF+SgUysuXL7t27YogiJaW1saNG/m+vbfY8kNCQiZPngy2wRFiL6kW42zx5OfPn+fMmYMgiKam5syZM4WDki3G3FZOygTz0Wg0gRoG9+Oy3Y/mo1Kpb9686datG4IgGhoa69evF2735w8fPsyYMQMsETt79mzJbYsRFRUFBpVrampOnz5dAUUkE8wnqIjayh1JQvmEmK8Fw7569WrMmDFgUpWHh0d2tmBL7FdXV1+9epWDXQICArB3LVrIDc9TBQUFx44dA2kZGBjcvHmTp/f2dhFiPk4zE8gBMZ94MJ/GANbFK7SaGrnSlQJiPm9vbzMzMwRBrK2tT58+LVx13L9/387ODoxu+PPPP5uMsRcuzhZDPXnyBMwoIRKJR48eLSoqatFbez3J3W+RGuajUCiCdhgUDfMdOnQIQIru3bsfO3ZMuOb38OFD0LZJJNLhw4eLi4uFi4dvqMePHwMoSSQSDx8+XFhYyDdIe/IgE8wnqIgUCvPV1dUdO3YMrClmbW199uxZ4dpbbGzsvHnzAH2bNm2a5AaVv3r1auLEiQiC6Orqrl69usVtiIQrQlsJJRPMJ6hxuB+X7R7z1dXVnTx5EoioS5cux48fF9RcwH9qaqqrqyt4lxs+fLjk2ja3iDw8PCSnVuHsIIVQMsF8UihXe0oCYr4WavPGjRvm5uZgUtWff/5ZUlLSgqfWTzU0NNy+fZuDXR4+fCi5pQF+/vzp4+MD0jIzM7t7927r+WqHVyDm4zQzgRwQ84kD83VnHzhWL3/dS4XCfGw2m8lkzp49G+xRPnjw4MDAQOHudC9evACzpVRVVfft2yc5cHDz5k2wv5OKisrff/8tuRFPwtlB0qG4+y1Sw3xCFErRMJ+rq6uuri6CIA4ODg8ePBDCYiiKvnz5EoxUVVFR2bBhQ15ennDx8A1148YNICJVVdUrV66Ul5fzDdKePMgE8wlqQIXCfGQy2cXFRVtbG0GQMWPG+Pn5CWou4D83N9fd3R2Hw6moqDg4OBQWFgo0+At7otevXweLABobG//xxx+Sm9iIPUtS9gkxn5QNzjc5Mpns6uqqo6ODIMiQIUOEnp1WUlKyceNGZWVlJSWlzp075+XlMRgMvqkL4YF7Jc0TJ05IrqcvRN6kEwRiPunYWZRUIOZraj0ajXbs2DGwjLSKisq7d++EeP69ePECxIAgyJUrVyQ3/ys7O5uz0U+/fv38/f2blqdd/4aYTyC6x/EMMZ/ImM8Unb20Lj1DDuWlUJiPxWJVVlYOHDgQLOYyceLE1NRU4Srl/fv3YBITkUhcsGCBoIO4MSZKp9PPnTsH9jPV0NB49eqVon0BhpgPY1ORmjc2m93Y2Dhs2DDw0jJ27Fjhdg9AUTQyMnLhwoVgQrqzs7OE1p2k0+lnz54FItLU1AwODlY0EUHMJzV1YEmIzWaXlZXZ29urqKggCDJv3rzo6GgsAZv7qa2tPXDgACAdWlpa4eHhkmAHdDp9x44dBgYGOByuR48e4eHhkttZvnkZ5eQMxHxyUhEgG2w2u7S0tF+/fkBEM2fODA8PFy6HdDr9zJkzYFSglpbW69evJTFvl06ne3t7AxFZW1uHhIQooIgg5hOuiUozFMR8Ta398+fPDRs2IAiCw+F0dHSys7OF+JgWEhJibGwMqMqJEycKCiS1Qn96erqbmxtIaNy4cSEhIU3L065/Q8zHIXcCOSDmExXz4e3RsPdsFksO5aVQmI9Go0VERIDlwHA43MKFC4V+08rJydmxYwfu92FiYhIbGyvEFgR820NBQQF4vhAIBCsrq7y8vOaL7/KNpE17gJhP3qqPTqfHx8f37NkTPEQWLFggdKeotLT02LFjgLnr6+vHxMRIonkXFBSsX78ezLewtrbOz8+XRCryVk3c+YGYj9saMnfT6fSMjAwNDQ2goI0bN4oyXT0oKGjcuHEIgqiqqu7cuVMS48rz8/NnzpwJZjVOnDiRQqFI4mEn83rhnQGI+XjbR8pX6XR6enq6uro6ENH69etFGQz++vVrsMalqqrqli1bJNEHz8/PBysAkkgkJycnxRQRxHxSlokQyUHM19RoYWFh4PmnoqJib2/fZI/kpr5b+f3+/fvevXuDu9X27dszMzNb8Sjq6aSkpNmzZ4OE5s+fHxkZKWqMbSo8xHyg6gX9DzGfCJjPGEU6o4+e0evr5VMrCoX5Ghsbr169Chbm09fX3759u9DdFQaD4e/vD5Ynw+PxN2/elMQo7JcvX06YMAFBkI4dO27bto1MJgudYflsfnxzBTEfXxNJ2UNjY+OdO3csLCzA7gGenp5CUzMmkxkQEGBtbQ2WNr569aok1rh8+fIl2BRYX19/27Zt1dXViiYiiPmkrBHeyZHJ5KdPn6qpqQFwduzYMSEGB3CS+Pnz5969ewHFHjZsWFpaGueSuBycNTTBMoLs34e4Im8r8UDMJ1c1RSaTnzx5QiKRQMv38fFpbGwUOoelpaVHjx5FEERJSalPnz4pKSlCR9VawAcPHoD1Ybt37/7HH38opogg5mutecjPeYj5mtbFnTt3Bg4cCDa6WrVqlXA9vejoaLCfN4Iga9euTU5ObpqMmH5/+fKFk9Dq1asTEhLEFHHbiAZiPkEBH/APMZ8ImK8b+jOPVlUln0P5UBRVKMxXW1u7Zs0aPT09BEHs7Ox8fX1FuXPFx8cvWbIEaGTdunVCz//lkYeDBw+ampoiCNK9e/f4+HjJ7c7EIw+yvQQxn2zt3zz12tpaLy8vMAe2e/fup06dau4H+5m0tDQwXhVBkOXLl0vineTIkSNg9WQrKyvFFBHEfNgbpBR8FhYW7t+/H0w27Nmz5+3bt0VJlEqlPn78uHPnzmC/0devXzc0NIgSYfOwW7duNTExwePxkyZNavcbOzQvPjgDMV9rlpHJ+YKCgv379xOJRARBOnfufPXqVaG/NqEoSqfTAwIC+vbtC8h7YGCgEKtv8bbDli1bTExMEAQZN25cYmIib8/t9SrEfPJfsxDzNa2jI0eOgG5Yp06dzp49K9yaL0lJSS4uLqC76OrqGhsb2zQZMf2OjIwE3xMQBNm5c6eElsIRU2bFHw3EfBDziQDsjAQP2w1d6NbQ0MBiMsXfmsUUo0JhPrAwn6qqKoIgkyZNev36tShWrKysvHfvHlihzN7e/s2bN6LE1jxsYWHhwoULiUSitrb23LlzGxsbRXmRbR5/mzgDMZ+8VVNlZeW4cePAbKlRo0Y9ffpUlBxWV1c/ffoUbIljY2Pz6tUrUWJrHraiomLp0qUqKio6Ojrz5s1TTBFBzNe8YcjwTGZm5syZM8GDY+bMmWFhYSJmJikpadWqVeDtbu/evTk5OSJGyAnOZDJ//Pjh6OiooqJiaGjo5eVFoVA4VxXKATGfXFV3RkYGR0QTJkwIDg4WMXspKSnr1q0DItq9e7cYV1sGIhozZoyKikqnTp22bt0qiQU0RSy+dIJDzCcdO4uSCsR8/7FeXV3dmjVrwLBhCwuLkJAQ4YYN5+Tk7Ny5E9xfxowZI6El8xgMxps3b8D3BDwef/78eUlMkPmPgeTsB8R8EPMJjuqEoHsgiAU6fDoa9l7ORNA0O4qD+RgMRm5ubqdOnXA4HB6Pd3d3F/FNjsViJScn9+/fX0lJSUND4+TJk6WlpU3tK8LvR48egaHiPXv2vHz5sggxteGgEPPJVeWxWKzCwkJzc3MlJSUEQVxdXUXc/pjFYqWkpIwaNYpAIJBIJLGLKCgoaNiwYQiC2NjYKKyIIOaTHxGxWKzY2NiuXbuCJSl37NghOpWrqqp68OBBhw4dEASxt7f/559/yGSy6EVmsVhkMvnQoUNgKN+4ceMCAgJEj7aNxgAxn/xUHIvFio6O5oho8+bNog8yraqqevz4MXg/tLe3f/DgQXV1tehFZrFYVVVVBw8eBOvvjx49WuhttUXPjMxjgJhP5lXANwMQ8/3HRNnZ2c7OzmD/jV69ehUWFgq3D3dlZeU///wDxvCbmJjcvHlTEpOzKioqrly5Aj4hGhkZBQYGKtp3OYj5IOaTFuYzQS1Hon/fRhmM/9wy5O+H4mC+2trakJAQsPC5jo7OsWPHRL8BlpaWnjhxQltbG0GQ8ePH+/n5Cfelp0m7YLFY379/X7x4cceOHUkkkrOzCccZCQAAIABJREFUs+hvsU2SaCs/IeaTq5pqbGyMjo4GIlJXV9+/f7/ofaGysrJz587p6uricLgJEyb4+/uLS0T5+flubm6GhoYkEmnWrFkKKyKI+eRHRA0NDS9evADv4VpaWjdu3BB9aA+TyczMzHRxcQHr/c2fPz88PFz0TkR1dfWLFy+6detGIBC6dOly7NgxRRsZwN1sIObjtoZs3fX19f7+/kBEqqqqV65cEW4iXZNSfP/+3c3NTVNTE4fDLViwICIiQiwiev78edeuXQkEgqmp6b59+/Lz85ukqzg/IeaT/7qGmO8/dRQSEjJ69GgEQVRUVMaMGfOfa4L8YDKZaWlp1tbW4LYlod2y4uPj3d3dAeiZO3eu5FYAFKToUvULMR/EfNLCfDbokT/YdXVSbd9CJaY4mK+4uPjIkSNg8DX4WiuUwf4TiEajZWZmDh06lEQiEQgEV1fXhIQEEafWcj7/gjEUffv2/euvv/6TqiL9gJhPrmq7rKzs/PnzQES9e/e+e/eu6Nmj0+m5ubmOjo7q6upARImJiaKLqLq6+syZM2ZmZng8vm/fvufPnxc9q200Boj55Kfifv78eeTIEfAmNnz48PfvxTPen0KhJCYmDhkyRE1NTUNDw9PTU8RBglQqNTo6esSIEQQCQUtLa9OmTQrYZeBuNhDzcVtDtu4fP35wRNS7d+/Q0FCx5IdKpaanp48ePVrj97Fx40YRRUShUKKiooYPH04gEDQ1NdevXy+JxWfFUnbpRAIxn3TsLEoqEPP9x3p//fVXr169EAQxMDDw8PD4zzUBf5SWlu7YsUNLSwtBkGnTpol93i6bzX727Fm/fv3A2MMnT56IZVS/gKWUsff2ivlmzJgBJoAIR/F4h8LhcBMnTpRxzbWefP2cJdIid9hn75qgG7zoBYWt51qOrigI5mOz2enp6RMmTACfUlauXBkXFyeWaqDRaMHBwTY2NjgcTldXd/369XV1dUJvo8Zms2tra4ODgw0NDXE4XIcOHfbu3VtVVSWWrLbFSCDmk59aY7PZWVlZc+fOBSKaM2eOuCAFnU7/9OlT7969gYg2bNggoogaGho+f/5sZGQERLRv3z4FfOHhtByI+TimkK2DxWJFRESAzdPxePzp06cLCgrElSUajRYWFmZvb08gEMzNzX18fGg0GpPJFHRfaTabzWAwkpOTPT09wd6jc+bMiYqKElc+22g8EPPJScWxWKzw8HCOiI4fP56XlyeuvIEnkYODA4FAsLCwEFFESUlJYIMpJSWl6dOnf/78WVz5bKPxQMwn/xUHMd9/6mjNmjWc/ebOnj37n2sC/qBSqWlpaWD2vpWV1YULFwSMgI/3xsbGs2fPqqmpKSsr29jYfPv2TcSv5XzSk8vL7RXzrVu3TkdHhzetE/qqqqrqvHnz5LI+/5cpecR8s5c1JKfI87Yb3LWpIJiPQqG8fv0afEcxMTG5f/++uLYjZLPZjY2Np0+f7tGjBx6Pt7a2Pnr0KIVCEbRzBSqFQqF8/vzZ1taWQCCoqant2LEjMzNTAe/VnCYKMR/HFDJ3ABGBZ42Ojs7NmzdFn28ICsVms6lU6unTp62trYGIwJx6oUUUFRU1aNAgZWVlNTW1nTt3ZmVlKbKIIOaTuXZABrKysrZv366qqqqsrOzo6JiUlCTcOj8tFofNZlMolJCQkPHjx6upqZmYmCxcuPDDhw+CTjysrq5++PDhhAkTNDQ0iETismXLEhISqFRqi4kqzkmI+eSkrrOysry8vICIhg8fLnYRUanU8PDwiRMnkkgkExOTRYsWffz4kU6nC1T86urqx48fT548GYho0aJF8fHxUEQQ8wnUimTiGWK+/zM7m82m0WhTp04FC+oNGTIkIiJClCphsVj19fWzZs3S1tZWVVVdt25deXm5KBE2CZuQkLBixQrwZXvPnj0VFRVNPCjCz/aK+d68eePk5AQ2KxQa57UYkEgk9uvX7/r163LbPOQO85kNq46KprWdd2IFwXzR0dGzZ8/mrHou9m5/UVGRr6/v4MGDVVRULC0tV65cGRERUVtbi104DAYjMTHxyJEjQ4cOVVFRUVNT27VrV1JSkoK/GkLMh70JSdpnVFTUrFmzgIjWr1+fmpoqXnZWWFh49erVYcOGqaiodO/efdWqVREREXUCLn2Qnp5+/PhxMI9eXV199+7dUEQQ80laGjziZ7FYdXV1SUlJ//zzz6pVq7p160YkEm1sbJ4/fy76upbN062pqQkLC1uzZk3Xrl11dXUdHBw2b94cHx/Pd71L0KkJDAxcuXJlr1699PT0rKysDhw4kJKSIvoits3z2ebOQMwnwyoDjTM9Pf3hw4erVq3q2rUrkUjs1auXv7+/JERUV1cXFhbm4eHRpUsXXV3d/v37b9myRVAR9e7du2PHjtbW1gcOHEhKSuIrQBmaV2pJQ8wnNVMLnRDEfP9nOiaTmZWVNXjwYBwORyQSZ8+eLTo4YzKZN27csLKyQhCkf//+N27cEPQDQmv1Wl1dferUKWtra2Vl5b59+yYmJirmY7u9Yr6amprExMS3b98GNzt8fX2nTJlCIpH69evn5+fX7DqfE2/evImJiZHndZflCfMZo0g39PI1qjj252pNy2I/374xX05OTkhIiK+vL9jOQk1NbebMmZGRkeIaysddHYWFhXfv3p0zZ46RkZGent64ceN8fHyioqKwwL7c3Nzr16+7uLh0795dS0tr8ODBx44dS0tLU8wbNbdVIebjtoZM3Nwi0tPTIxKJM2fO/PDhg6AADkvmi4uLHzx4sGDBAjMzMyCiAwcOYBfRjRs3Fi9ebG1tDUXEbW2I+bitIWU3jUaLjIxcvnz5wIEDDX8fzs7Ojx49qqysZDKZkshMfX19YmLi5cuXFy5caGlpaWpq6uXllZaWxjstBoORkpIyb968zp0729nZubm53b9/Pzc3V4zjDXlnQM6vQswnwwqi0+nJyclubm6DBg0yNDQ0MjJydnZ+/PixREWUkJBw+fJl8FZmZmbm5eXFdxMnIKK5c+cCEbm7u0MRcTcbiPm4rSGfboj5/q9eGAzG06dPe/TogSCIkZGRt7e36BXGYrF+/PixatWqTp06aWpqTp069dOnTyJGC1bZ8Pf3d3R0JBKJXbp08fHxodPp4v0IL2ImpRa8vWI+HgYsLy8/ePBghw4dFi9e3C6Rgdxgvt+Mb902ZlkZm8XiUSPydql9Y75bt265uLjY2tqamZnZ2Ni4ubmFhISIZVO2FuuxsrIyIiJi3759Y8aMMTU17dat26lTp7BsrObn5+fk5GRhYTF48ODVq1c/ePCgvLwc9q9QFIWYr8WWJs2TN2/edHFx6dOnj7GxsY2NzfLly0NCQrDAa+EySSaTP336dPDgQScnp86dO1taWp48efLnz598Y/Pz85s4cSIQkYeHBxQRx2IQ83FMIX0HjUb78OHD2rVr582b5+bmdvTo0ZCQEDqdLtyEdOz5J5PJkZGRf/311+rVq48ePYqFUKSnp2/ZsmXr1q03btz4+vVru3xjxG7AJj4h5mtiEGn+pNFo8fHx69evByI6duxYaGiodET0+fNnIKJjx45lZGTwLjWDwUhPT9+8eTMQUXx8PBQRt8Ug5uO2hny6Ieb7v3qh0WibNm3q1KkTDocbPnz4o0ePxFVh7969mzFjhpqamoGBgZubW3FxsSidPTabnZKSMmfOHG1t7Y4dOy5fvjw3N1dcWW1z8Sgg5quvr3/9+rWnp+e5c+fENThUrupdbjBfF3TSfFZ+QVtZko9Tie0b8507d2727NlTpkxZvnz5mTNnsrOzJTSAgmNPFEXJZHJAQMCuXbtmzJjh6+uLZZH1Z8+eeXh4rFu37v79+z9+/OCOTcHdEPPJvAGcO3fO2dl54sSJS5YsOXv2rHREVFNT8+7dOx8fn5kzZ166dAnLIutPnz6FImqxtUDM16JZpHOSyWSWlZWlp6cnJydLf2IEk8ksLy8vLCzku4wmmBeZnZ2tyDs+8WgSEPPxMI6kLzGZTDKZnJGRAUUkaVNLNH6I+SRqXrFEDjHf/8zIYDDy8/N79eqlrKysra29adMmLB057BVw8+bNIUOGqKqqGhgYXLhw4fv371QqVdBPfywWi0Kh5Ofnb9y40cjISFNTc9asWW/fvsWejfbnUwExH4vFYjKZLBarvb66NSxehSLmYv0zRRFjAXfvNUUHODE/RbZFybRvzJeVlZWZmSmrxl9eXl5dXY0Fr5PJ5KqqKvjht7mCIOZrbhMpn8nKykpNTRXvYsHYiyCQiMhkMhRRc9tCzNfcJvAMtIBAFoCYTyBzQc/QAs0tADFfc5vI2xmFxnzs3weLxSoqKvL29tbQ0EAQZNKkScHBweKtJzqd/uzZsyFDhuDxeFVV1Q0bNiQnJ1OpVIwzbdlsNtjQ4+vXr3PnztXQ0MDj8XPmzBFxkxDxllEmsSkg5quuri4pKcHYcmRSKSImSjl0HB00WZx/iB2KmAmG+Uj9qTdui1gQWQVv35hPVlaF6YrLAhDzicuSMB6FtQDEfApb9bDg4rIAxHzisiSMR2EtADGf/Fe9QmO+ysrKtLS04ODgffv2aWtr43A4c3Pza9euiX2lJzabXV9f//79excXFxUVFS0trZ49e3p5ecXFxWFpIo2NjR8/fly3bp2FhYW6urqurq6np2dUVBTc6EcBMV9gYKCtre2cOXNKSkoEHRCKpbHJ3A+loaGhpkZsf9XV6JE/UKSXIJivJ7rrAJtMlrkphMsAxHzC2Q2Gko4FIOaTjp1hKu3YAhDztePKhUWTjgUg5pOOnWEq7dgCEPPJf+UqNOYLCAhwdXXt27evubk5Ho+fNm3a/fv3CwsLJTRUqq6uLikp6c6dO1OnTtXX1zc2Nvbw8Pj69SvfVvL69euFCxcaGxsbGhrOnz//wYMHmZmZfBfm4BttO/CgaJivvr7+4MGDKioqEyZMwDJzsB1UsYhF+B8JPX8ZRfpgxnxmqPsmZmZW20WoEPOJ2GZgcIlaAGI+iZoXRq4IFoCYTxFqGZZRohaAmE+i5oWRK4IFIOaT/1pWaMz38uXLNWvWzJ07d9u2badPnw4PD6+srJQQ4wNNgclkVldXR0REXL58ecOGDSdOnMjMzOTbSj5//nzw4MHNmzffvn07MjKypqZGCqvO882VPHhQNMz38+fPVatWaWtrr169Wh7sL/95EBjzjZ9D/xzFpNPlv2it5RBivtYsA8/LgwUg5pOHWoB5aNMWgJivTVcfzLw8WABiPnmoBZiHNm0BiPnkv/oUGvPl5uZGRER8+PDhx48fjY2NEgV8TZoClUrN/H00NDQ0udT8Z0VFRUZGRnZ2dvNLCn5G0TBfdHT0/2PvvOOiOP7/P1fgaIIUAbEgBEUsBNSo2EVFjUbEErFgiWKJoqiosURiLEkssceS2BC7MWiMioUigg0bIoKI9HJ0juOOK3v3e3ycb+53OeAEbq+/9w/d2515z7yf733vca+dnRkzZoyjo+O2bdv0PPSNdL9pMp9tX97fN3jV1Y00rpnFQObTzLhArzABkPngSgACChIAmU9BgFAdCIDMB9cAEFCQAMh8CgJUQXW9lvlUwBeaUCoBfZP5Lly44O7u7uHhcf78eaWC1RnjjZb5HMToM8HvJziVldruO8h82h5B3e4/yHy6HV/wTgUEQOZTAWRoQrcJgMyn2/EF71RAAGQ+FUBWsAmQ+RQECNXVSUCvZD6RSLRjxw57e/sxY8akpKSok7v2tN04mc9BjJzEE2ZyWCzt8azBnoLM1yAaOKEBBEDm04AgQBe0mwDIfNodP+i9BhAAmU8DggBd0G4CIPNpfvxA5tP8GEEPGySgVzJfTU3Nnj17Bg0a9PPPPzdIBE78l0DjZL524o6DxUVMEUH8t7ZWfgKZTyvDpjedBplPb0INjiqLAMh8yiILdvWGAMh8ehNqcFRZBEDmUxZZ8uyCzEceS7CkcgJ6JfOJRCIOh1NRUQGLLDf+QmuEzNdG7DpEfC9GlVNzNr7/zSgJMl8zoEEVlREAmU9lqKEhXSUAMp+uRhb8UhkBkPlUhhoa0lUCIPNpfmRB5tP8GEEPGySgPzIfn8+PiopKSkpqzJotDfLSvxOflvms+oiOHBNwODrDBmQ+nQmlTjoCMp9OhhWcUiUBkPlUSRva0kkCIPPpZFjBKVUSAJlPlbSb1xbIfM3jBrU0goCeyHwEQaSmpvr7+8+ePfvp06cagV5LOvEpmc+Vv+mnGiZTS7xpVDdB5msUJiikJgIg86kJPDSrOwRA5tOdWIInaiIAMp+awEOzukMAZD7NjyXIfJofI+hhgwT0ROYTCASHDx/u0KFDnz59oqOjG8QBJ+oQkCvzOYr953JfvqpTSbsPgMyn3fHT9d6DzKfrEQb/lE4AZD6lI4YGdJ0AyHy6HmHwT+kEQOZTOmKFGwCZT2GEYEB9BPRB5iMIory8/KuvvrKwsFi4cOH79+/Vx1v7Wm5Y5nMQ9xwpvn1PN5bdkA4MyHzSNGBf0wiAzKdpEYH+aB0BkPm0LmTQYU0jADKfpkUE+qN1BEDm0/yQgcyn+TGCHjZIQB9kPi6Xe+3atZYtW1pZWYWFhXG53AZxwIk6BBqQ+RzEqJP44p+6NCWfxHWQ+SQoYEcDCYDMp4FBgS5pFwGtk/lu3br1HjYgoEkEjhw5gv7dXr9+rZl3AOmvS0giTbp8oC//I3D48OF/cwglJydrZhLpea9A5tPzC0C73dcHma+4uPirr74yMjLy9/dPT0/X7oCpvPcNynw793ErKlTeHVU0CDKfKihDG80lIP275dUrzX1lPjk5WfL3619//dVcd6EeECCfgNbJfBQKhQobENAkAhQKRXKH1wqZD5JIky4f6Mv/CEgnEch85H/Tk2ERZD4yKIINNRHQeZlPIBDcv3/f2trayMjo/PnzsMxuUy+0+mS+DuKp82qKiwmCaKo1rSgvLfN16NDBFTYgoEkE2rRpI/lxpS0yn4ODgyYhhL7oOwHpJEpKStLML6abN28aGhpKkh12gIDGEtBYmQ+SSGOvGeiYDAGQ+TTzixhkPs2MC/SqUQR0XuYjCKKsrOz06dObNm0qLCzUVWWqUcFuVqE6Ml/7/03J9+KlUCBolj0tqBQZGWlgYCDzBQwfgYAGEtBkmS8lJcXQ0FD6YbUGAoQuAQGNlflevXo1atQoCBAQ0GQCVCp1yJAhmZmZmvm3HSSRJl880DdMgEqlDh06NCsrSzOTSM97BTKfnl8A2u2+bst8tbW1TCYzPz+fy+UWFhYKdFeZUt5V+F+Zr424TT/x2YsioVB5Lard8vPnz/v37w9/fwABTSZAoVB69uypyQsK5eTk+Pj40Gg0TcYIfdNnAjiJMjIy1P6lU28HWCzWs2fP/oSNDAL79+8fM2YMQsjY2Hjx4sVhYWFkWAUbf165cuXp06dsNrvea1jtByGJSLxG9+3bJ0miJUuWQBKRxRYnEbxtpvbbRb0dAJmvXixwUDsI6LbM9+7du+3bt+/YsaOgoADG8TXvivyvzOcp/vlXYXV180xpS62ysrKbN2/+DBsZBIKDg728vBBChoaGvr6+33//PRlWwcb/CFy7dq2qqkpj04rNZsfExGzfvh2ipSCB4ODgvn37QhIpiLHe6hqeRBqb3VrXsXfv3gUGBiKEzMzM9uzZw2KxtM4F6DAQUC+BtLS0efPm4STau3cvJJF6wwGtq4ZA02S+gICA27ABAY0hcO7cOQaDgZ/nnzt3TjU5o5pWOBxOWFiYs7Nzt27d7t27J9TpAWjKQyol830mXrGWX1CgvLbAsu4RyM7OXrFiBULI1NT05MmTGvvMX/fIg0c6QyA7O3v58uX4xxUkkc6EFRxRGQE+n3/hwgVXV1eEkIGBQZ8+fdLS0uBvQpXxh4Z0gACfzz9//rx0Er179w6SSAciCy7IJ9AomY/ycdPnlyPAd80noEsyn0gkSkhI+OqrrywsLCZPngzigvy7mJyz/yfzUXuIJ89mv3gppyScAgIyBAiCuHnz5ueff45/XA0dOjQ7OxvG1cpQgo9AQA4BgiBu3Ljh7u6OR/MNHTo0JycHkkgOMTgFBGQIfPjwYc6cOdJ/hB86dKiyslKmGHwEAkCgIQIZGRmzZ8+GJGqIDxzXVQKNkvmMjIyoVKp0esA+ENA0Arok89XU1CxatMjCwqJv376RkZG6evdRgV//J/N9Nb0s4eH/9mEDAo0mUFhYuGTJEvzdR6FQqFTq6dOnQXNvND8oCATEhYWFixcvlk6i8PBwSCK4MoBA4wmEh4d37dpV+k9ub2/vN2/eNN4ClAQCek7g9OnTXbp0kUmilJQUPccC7us8gUbJfCtWrHBwcJBOD9gHAhpFYMCAAfHx8TqTrpcuXerVq1e7du127NhRW1urM36p3pH/SXuXrojjEgS6PiWf6tnqfIsRERG9evWSvtH5+vqmpqbqvOPgIBAgi8Bff/0lk0Tjx49PS0sjyz7YAQK6TaCgoGDatGmS2Wnw95GpqemBAwc0eW5T3Q4KeKddBAoKCqZOnVo3iQ4ePAhJpF2hhN42lUCjZL6CgoJXr149hU0xAnfu3Pnuu+/wl/SiRYv++eefZts7fvy4ZDFNc3PzmTNnRkdHN9uaDlRMS0vTmelUBQLBvHnz7OzsJk2a9Pz586amNJSXIcCpqBByuTCUTwYLfJRPoKSk5Ntvv23RooW0zGdjYwOTi8nnBmeBgIRASUnJokWL6ibRqVOnYECfhBLsAAE5BE6dOtWtWzfpryG8P27cuKdPn8qpCKeAABDABE6cOCEzHlaSRImJiUAJCOgwgUbJfDrsvypdKyoqWrVqFb65zJ8/PyMjo9mtp6amTpgwAZsyMTEZPHjw1atXdUbnajYW3agoFAovXbq0evXqv//+G1Yo142YghdaR+DKlSsyo5Dw/dbf3z8pKUnr3IEOAwHVE7hy5UrPnj1x4kj/O3XqVEgi1YcDWtQuAiKRqKysbMqUKebm5tLpg/ft7e337dsHcrl2xRR6q2ICOIkmT57cUBLt378fkkjFQYHmVEkAZD4V0ebz+XFxcW5ubvgb2sXF5a+//mr2+5jPnj0bM2aM5IufQqH06dPnr7/+Ki4uhlFLKoqoEprhcrkvX74sLCxks9mFhYUwmFwJjMEkEPgEAZFIVF1dHRgYaG1tLbnHSnYcHR2PHTvG5XI/YQVOAwE9JoCTaN68eVZWVpLckexAEunxpQGuN5aAQCA4f/78Z599JkkcmR0Y0NdYlFBOXwkIBIKzZ886OzvL5I7ko6+vLwzo09erQy/8BplPRWHOz89fv3695M6CEFqwYEGzB/TFxMR4e3tLW0MIeXh47NmzJy8vD5axU1FQSW2Gx+O9fPnS29v7xx9/LCkpAbmWVLpgDAg0loBQKLx161bnzp1lbrCSj19//fXr168baw7KAQH9I4CTyNXVVZI1MjtTpkyBJNK/6wI8biwBgiAqKirGjRtX7ygknE3t27fftWsXj8drrFEoBwT0iQBBEOXl5WPHjpWZOEL6y8jR0XH37t2QRPp0XeiXryDzqSLeBEFER0f36NFD+ubSpUuXS5cuNU+Su379er9+/aSt4X0nJ6cffvihqKioeWZVwQLaqI8AQRCpqanTpk2j0+lOTk4PHz4UCoX1FYRjQAAIKJdATU3N9OnTLS0t695g8ZHPPvvs8OHDkKHKDQNY11oCIpHok0nk4uICSaS1EYaOK50Al8u9fv26hYVFQ19D+PiIESOSk5OV3htoAAhoIQEul/v333/LEcpxEvn4+EASaWF4ocuNIgAyX6MwKViouLh4zZo1NBpN+gubSqXOnj07Ly+vGcYvX75c77xRVCrVwcFh4cKFhYWFzTALVdRFgMlk/vDDD3Q63dzc/PLlyxwOR109gXaBgD4T4PP5Dx48sLGxoVAo0rdr6X0qlTpmzJgPHz7oMyjwHQg0RIDH48XFxVlbW8tPorFjx2ZmZjZkBI4DAX0mwGQyx48fL7M2qPTXEN53dHTctm2bPoMC34FAQwSYTKavr29jkujnn39uyAgcBwJaTQBkPlWE79atW/UOvvv888/PnDnTjB6cPHmy3mWDEEJ0Ot3BwWH69OnZ2dkw3qQZbFVfhcVi7d+/38XFxcHB4ccff2SxWDAYU/VRgBaBgFgsLi0tnTNnjrGxcd0fVNJHXFxc9u7dC8SAABCoS6CkpKQxSdSxY8f9+/fXrQ5HgICeExCJRFlZWatXr/72228X/bsNHz4cIWRkZDRnzpx/jy1aunTpgQMH+Hy+nhMD94GADAGRSJSZmVlvEhkbG3/zzTeQRDLE4KNOEgCZT+lhLSsrCwkJqXfsvZmZWUBAQHFxcVM7sX//fjnz8tLpdFtbWz8/v5SUFPj6bypbFZcXCASnT5/u169fmzZtFi1axGQyQeNTcQigOSCACYhEopycnGXLlvn7+3/979a3b1+EkKGhoa+v77/Hvp41a9bu3bsFAgGgAwJAQJqASCTKzs6uN4kYDMb48eOlk2jPnj2QRNL0YB8IYAK1tbW5/91Onz6NELKxsUlNTZU+A380wjUDBOolwOVypTMlNzcXJ5GtrW1aWpr0KUiiegHCQR0gADKf0oN448aNeofy4bEhXbp0OXv2bFM78csvv7Rt21Z6dIlkn06n29nZDRkyZPny5bm5uTCgr6lsVVyeIIibN29Onz59xowZ8fHxKm4dmgMCQECaAJvNfv3f7eDBgwghCwuL+Ph46TPp6elwd5VGB/tAABOom0QHDhxACFlaWiYkJEASwXUCBJpB4P79+wih1q1b19TUNKM6VAECQCA2NhYh1KZNG5gZCS4GPSEAMp9yA81ms4ODg1u1aiWR4WR2LCws/P39m/qeZmhoaL02nZ2dfX19f/jhhzt37oDGp9zQKmydIIj09PTy8vKysrInT548evQzIhKcAAAgAElEQVQIVAOFoYIBIEAygbi4OISQra1tdXU1yabBHBDQDwJYobC3t2ez2frhMXgJBEgmADIfyUDBnP4RAJlP/2Ku7x6DzKfcKyA6OtrDw0NG2pP52K5du7///rtJ63mvWLHCzMwMIUSlUhkMRuvWrc3Nzel0+uTJk+Pi4kAtUm5QybAuEAiys7MDAgL++OOP0tJSMkyCDSAABMgnADIf+UzBop4RAJlPzwIO7pJPAGQ+8pmCRT0jADKfngUc3BWDzKfEi4AgiOXLl3fo0KHFv5upqSnW+IyNjf891sLe3t7f37+qqqrxXZk3b56RkZGpqWnr1q0HDBjw/fff+/j4WFlZjRo1KjY2FiZ3azxJtZQUCoW5ubn+/v7GxsY9e/Z89eqVWroBjQIBIPBJAiDzfRIRFAAC8gmAzCefD5wFAp8kADLfJxFBASAgnwDIfPL5wFndIwAynxJjyuFwdu3atW7durX/bvPnz8cy39SpU/89tnbdunWhoaFMJlMkEjWmN3w+f+TIkW5ubkFBQbGxsTweTyAQJCQkfPHFF8bGxosXLy4vL2+MHSijLgI5OTlBQUEMBsPCwuLUqVMsFktdPYF2gQAQkE8AZD75fOAsEPgkAZD5PokICgAB+QRA5pPPB84CgU8SAJnvk4iggI4RAJlPiQEViUQsFqtSaktJScEy3/3796UO/2+38W/a8ni8hw8fvn37try8vLa2FjvA4/F++umnDh06eHh4nDlzRolegWkFCAgEgri4uHnz5tna2lpbWx84cKC0tJSU0Zc1NTUpKSmvXr1KT09v/PLKeXl5r1+/fvfuXUVFhQJuQVUgoLMEQObT2dCCY6oiADKfqkhDOzpLAGQ+nQ0tOKYqAiDzqYo0tKMpBEDmU2kkCgsLscyXnJzc7IYJguDxeHVlwXfv3m3cuHH9+vXYuEAgaHYTUFFJBPLy8tasWePg4ODi4vLTTz8VFxfXjWMzmubxeJGRkcOHD/f29vbx8cnPz/+kEZFIlJeXFxQUNGzYsFGjRh07duyTVaAAENBDAiDz6WHQwWVyCYDMRy5PsKaHBEDm08Ogg8vkEgCZj1yeYE3zCYDMp9IYkSLzNdRjgiCysrIyMzOrq6tTU1O3bt2alZUFYl9DuFR8HL9bXVZWduLEia+//nrPnj1FRUWkjOMTi8XV1dX79u3DCjJCKD09/ZPeEQTx9OlTd3d3vJBLSEjIJ6tAASCghwRA5tPDoIPL5BIAmY9cnmBNDwmAzKeHQQeXySUAMh+5PMGa5hMAmU+lMVKqzCfxpKSkJDQ01NLSctWqVUlJSVwuV3IKdlRPQCQS1dTUREREXL9+vbi4OCcn5/nz52VlZST2pHky38OHD7t3747FwRUrVpDYHzAFBHSGAMh8OhNKcERdBEDmUxd5aFdnCIDMpzOhBEfURQBkPnWRh3bVRQBkPpWSV43MV1BQsGjRIjs7O2Nj41mzZj18+JDNZqvUT2jsXwJCobCsrOzKlSseHh4DBw68efMmKW/p/mv+//4HmU8GCHwEAmQRAJmPLJJgR28JgMynt6EHx8kiADIfWSTBjt4SAJlPb0Ovt46DzKfS0KtG5hOJRAUFBStXrmzTpo2BgcGAAQOuX7/e+GUZVEpEpxsTCoVMJvPgwYO2trYMBqNXr15KCgTIfDp9HYFz6iQAMp866UPbOkEAZD6dCCM4oU4CIPOpkz60rRMEQObTiTCCE00gADJfE2ApXlQ1Mp9YLBaJRDweLzw8/PPPPzcyMurVq9fZs2cV7z9YaBKB9PT0ZcuWWVpaMhgMLy+v2NhYycrITbLzycIg830SERQAAs0jADJf87hBLSAgIQAynwQF7ACB5hEAma953KAWEJAQAJlPggJ29IQAyHwqDbTKZD7sFZvNjoqKmjRp0vTp01+8eEEQBJPJJGvZB5WC08LGsrKy1qxZ06pVK3t7++XLl+fk5PB4PJFIpAxXQOZTBlWwCQTEYjHIfHAZAAEFCYDMpyBAqA4EQOaDawAIKEgAZD4FAUJ1rSMAMp9KQ6ZimU8sFnM4nPSPW1VVVUZGxuTJkw8cOJCWlgYr8Cov8NXV1QRBsNnsY8eOjRkzZvfu3bm5uUpVV0HmU140wbKeEwCZT88vAHBfcQIg8ynOECzoOQGQ+fT8AgD3FScAMp/iDMGCdhEAmU+l8VK9zCdxj8PhXL161dzc3MnJaf78+Xfv3q2srJSchR1SCPB4vDdv3mzevDk+Pr66ujovL+/p06fFxcWkGJdjBGQ+OXDgFBBQhADIfIrQg7pAQCwWg8wHlwEQUJAAyHwKAoTqQABkPrgG9I0AyHwqjbgaZT4ul/vkyZOZM2d27NjRxsZm5MiRJ06cSE9P53K5KkWgo40JhcKSkpKYmJjZs2dbWlrOnTs3MzNTZb6CzKcy1NCQvhEAmU/fIg7+kk4AZD7SkYJBfSMAMp++RRz8JZ0AyHykIwWDGk4AZD6VBkiNMp9YLCYIory8fPfu3QMHDmzZsmXr1q3nzp2bnZ0tFouFQqGSpo1TKV81NVZTU5Oamrp9+3Z3d3cjIyMnJ6fly5dnZWWprDsg86kMNTSkbwRA5tO3iIO/pBMAmY90pGBQ3wiAzKdvEQd/SScAMh/pSMGghhMAmU+lAVKvzCdxNSEhwd/fv02bNuPGjSsqKiIIIisri8ViCYVCSRnYaTyBu3fvjhgxwtjY2MLCwtPT89SpU2w2u/HVFS8JMp/iDMECEKiXAMh89WKBg0Cg8QRA5ms8KygJBOolADJfvVjgIBBoPAGQ+RrPCkrqBgGQ+VQaRw2R+YRCYVlZWfzHjc/nv3nzpk+fPt98882dO3dqampUSkQnGjtx4oSrq6uzs/OaNWtyc3P5fL6Kh0aCzKcT1xE4oYkEQObTxKhAn7SKAMh8WhUu6KwmEgCZTxOjAn3SKgIg82lVuKCzJBAAmY8EiI03oSEyn1gsFolEPB6v9uN25syZNm3aWFtbd+7cOSAg4MKFC2VlZY13Sg9LCgSCtLS0ffv2/fbbb3gJ47Nnz968ebOsrEypK+o2hBpkvobIwHEgoCABkPkUBAjVgQDIfHANAAEFCYDMpyBAqA4EQOaDa0DfCIDMp9KIa47MJ3FbKBTm5+cfPXp00qRJbdu2bdmypaen5/z581+9esXn8yXFYAcT4PP5b9++PXLkiK+vb8eOHbt16/by5UsOh8NisdQ4EBJkPrg+gYCSCIDMpySwYFZ/CIDMpz+xBk+VRABkPiWBBbP6QwBkPv2JNXiKCYDMp9IrQQNlPux/dXX1y5cvDx06NGnSJBcXF0dHx/j4eD6f/+7du6SkpOLiYrUMUlNpbBrRWGZmZlhY2KxZs9zc3MzNzXv06LF06dKsrCy1z2kIMl8jogdFgEBzCIDM1xxqUAcISBEAmU8KBuwCgeYQAJmvOdSgDhCQIgAynxQM2NULAiDzqTTMGivzYQpcLvfVq1cHDhxYtmxZZWUlm83etGnTlClTtmzZcuvWraysrJqaGn3T+wiC4PF4eGBjWFhYjx49LC0t3dzcZs6ceenSJSaTqQlAZGS+pKSkT17WBEE8fPiwe/fu6OO2YsWKT1aBAkBADwmAzKeHQQeXySUAMh+5PMGaHhIAmU8Pgw4uk0sAZD5yeYI1zScAMp9KY6ThMh9mQRAEh8MRi8V5eXkBAQF2dnYWFhZOTk6zZ8++fPlyXl4eXmJCxatMqDROHxsTCoXV1dUfPnyIjo5OSkoiCOLMmTMjRowYP37877//XlhYqPouNdRiTU3NwYMHsWCHELpz584nBxgSBJGQkIBlPhqNtmrVqoaMw3EgoM8EQObT5+iD76QQAJmPFIxgRJ8JgMynz9EH30khADIfKRjBiBYRAJlPpcHSCplPmsiLFy++//57Ly8vU1NTBoNBpVLHjRuXmZnJ5XKrqqrwMDehUKhjkp9QKOTz+ZmZmfv27Rs4cGCbNm2CgoL4fH5lZWViYmJBQYE0Ik3Y5/P5J0+epFKpWOk7e/bsJycKJAgiOjq6a9euCCFzc/NNmzZpgiPQByCgaQRA5tO0iEB/tI4AyHxaFzLosKYRAJlP0yIC/dE6AiDzaV3IoMMKEgCZT0GATauudTKfUCisra0tKiq6fv36okWLHB0dg4KC8vPzk5KSlixZMmTIkDVr1ly7dq2oqEgT3l1tWjAaKC0QCKKjo1esWOHu7m5ubm5oaGhra4tlPpFIJBQKNdPThIQEa2trLPPt3bu3vLy8Af/+77BQKAwLC/vss88QQk5OTocPH5ZfHs4CAf0kADKffsYdvCaRAMh8JMIEU/pJAGQ+/Yw7eE0iAZD5SIQJprSCAMh8Kg2T1sl8mA5+jbekpCQzMzMlJYXL5cbExAwdOtTY2Nje3r5z5869e/detmzZ9evXS0tLVQqUvMZYLBaWxoqLi4cOHdq6dWtTU9MuXbosW7bs7t27GjIHnxx3X7582blzZyzzrV+//pPvFAsEgtDQUAcHB4RQjx49Ll26JMc4nAICeksAZD69DT04ThYBkPnIIgl29JYAyHx6G3pwnCwCIPORRRLsaAsBkPlUGiktlfmkGeFXdEtKSm7fvh0aGjp16lR3d3cjIyMbG5v169fn5eWxWKw7d+7cvHnzzZs3LBZLk9/nFYlEpaWl8fHxx48fDw4O3rFjR2FhIYvFmj17tp+f34YNGyIjI3Nycng8njQBzdxPTU0dMWIElvm8vLxiY2PljDokCKKkpGTIkCEmJiYIoREjRsTExGimX9ArIKBeAiDzqZc/tK4DBEDm04EgarULVVVVsbGxJ06cCAsLi4iI4HK58t3hcDjJycknPm5nz57NzMxszJ+yUVFRYWFhJ06ciI6OJv3vRpD55IcMziqbQHV1dVxcnCSJamtr5bdYW1ubkpKCkygsLCw9Pf2Tk4aLxeKYmJjw8PATJ07ExMSQnkQg88kPGZzVPQLaIfOVlJTcu3fvzJkzly5devTo0SfDUFVV9fjx4zNnzpw9ezYiIqKqqkp+FZFIxGazr169eu7cuYsXL757905++Waf1QGZT+K7QCAoKSlJSkq6cePGpk2bZs6ceeXKlaqqqjdv3owcOXLAgAHTp0//7rvvjh8/fufOnaSkJLxYraS6endYLFZiYuKFCxdCQkJGjx7t5ORkaWk5bty4xMREoVD44sWLxMTEoqIigUCg3n42vvWCgoLvvvsOy3wmJiahoaFyBvRxOJzTp0/b2NhQqVQTE5NFixZlZ2c3vi0oCQQaT4DJZEZFRZ05c+bChQuPHj365O+rmpqaBw8enD9//syZM7dv35ZzGUv6UFFRgZu4cuXKixcvJMdJ2QGZjxSMYEQRAtJJFBcX98kk4nA4CQkJOIlu3brVmCSqqqpSXhKBzKdI9KGu4gTy8vJCQkKcnZ1dXFwGDhz45s0b+YpDXl7ehg0bnD9ubm5uf/zxR2VlpZxuiESiioqKqVOnduzY0cXFZfny5Ww2W075ZpwCma8Z0KAKiQSKiorWrFmDk2jQoEEpKSnyk6igoGDLli04iTp16nTgwAH573vhJJo+fbqrq6uzs/PSpUtZLBaJ/ReLxSDzkcsTrGk+Ae2Q+Z49e4ZHKpmZmU2aNKmmpkb+g7WkpKSJEycihKhUqo2NzePHj+XrNXw+PzExsV27djQazdLS8tSpU0qKnC7JfNKIBAJBZmZmZWUlQRBJSUkjR450dXW1tbU1NDS0sLDo0qXLypUr8S+T5OTkV69epaWlZWZmFhQUVFZWcjgc+dGUbqip+yKRiMfjVVRUMJnMjIyMFy9epKWlsVisnJycFStWtGzZ0tjYuH379n379p0/f/7x48czMjKa2oSGlOdyuVFRUU5OTjQaDSHk5eV1+vTpwsJCmTF9BEGUl5fHxsYOGzaMwWAghHr16nXx4kX539Ya4iN0QxsJPH369KuvvkIIGRkZjRo1qri4WL4X+fn5Q4YMMTAwQAi5ubn99ddfMtdw3eoxMTH9+/dHCDk4OGzcuLFuAUWOgMynCD2oSwqBJ0+e4CRiMBj9+/fPzc2Vb7agoODLL7/ESeTi4nLx4kX5fwKJxeKHDx8OGjRISUkEMp/8eMFZZROoqKhYu3Yt/pvHysrq9OnTHA5HTqMvX77s168ffm6KEFq4cOH79+/llBcKhTExMZ06dUIIWVhYhISEfHKskxxr9Z4Cma9eLHBQZQSqqqrWrVuHk8ja2jo8PFx+EiUnJ/v4+EiSaObMmW/fvpXTW6FQGB0d3bFjR4SQsbFxUFDQJ9cSlGOt3lMg89WLBQ7qMAHtkPnS09N9fX3xQqKdOnV69OiRfFXin3/+wXcKfH/5+eefS0pK5ESxpqbmt99+s7KyQgi1a9fu/PnzcgorckpXZT5pJlwu9/Hjx4cOHVq5cqWvr6+np2fHjh1XrFghFov5fP7AgQNdXV2HDRs2derUkJCQI0eOXL9+ncvlEgTBYrFKSkpKS0urPm5sNpvL5eJ3hPFKILW1tQKBQEYTJAiCx+PV1tZyudyamhpcl8ViCYVCHo+Xk5MTGRn522+/ff/9935+fo6OjmPHjn38+HFxcfHu3bu7d+8+YcKELVu2REdHy39UK+2gxu5XVlZu2LChZcuWOFNcXV3XrFmTmJjIZDJLP25MJjMlJWXv3r0eHh44NczNzX/++Wcmk6mxTkHHtJ1Aamrq3Llz8UMXc3Pz9PR0ObIdQRBv3rxp0aIFhUJBCBkYGKxdu/aTw7F3796NF5P57LPP9uzZQy4xkPnI5QnWmkFAkkQUCoXBYNy/f1+ObEcQRGpqauvWrSVrrwcFBX3yJn/06FE8u6uLiwvpSQQyXzOCDlXIJXDx4sW2bdsihMzMzJYuXSrnTz6CIO7evduqVSuJQjFs2DD5LxLxeLxff/21TZs2CKHOnTuHh4eT23mxWAwyH+lIwWBTCVy4cKFdu3YIoRYtWixbtkxOEolEopiYGEdHR0kSeXl5xcXFyWmRx+Pt2rULzxju5OR07NgxOYWbdwpkvuZxg1raS0A7ZD4+n//tt9+ampoihNq2bXvw4EE5b+zz+fxt27YZGhpKbi4zZ86U/yCuvLx8+vTp2L6Pj098fLySIqoPMp8Muvz8/Fu3bsXHxwuFwuTkZA8PD2NjYyMjI0NDQwMDAwaD0bZt26Kioqqqqp07d3p7e0+cOHHdx+3AgQP//PNPSUkJn8/Pysq69XFLSkpisVg8Ho/P52MFsKKi4v79+7du3bp+/foff/yxbt269evXb9mypaqq6vXr15MmTaJSqTQaDbdlbGzcu3fvmJgY/MZxZmamTG+1/WNJSUmvXr3MzMwkP/BMTEw8PT0nTJjg6+vbr18/e3t7hBDl42ZoaDhx4sTU1FRt9xr6r8kEqqurf/31V8kFeebMGTlvM7FYrOPHj0tu3QihqVOnvnnzRo6DIpFo5syZZmZmCKH+/ftHRUXJKdyMUyDzNQMaVCGXQHV19a5duyRJdOTIETnaN4fDuXjxonQS+fn5PXv2TH6XFi1aZGlpiRAaMGBAdHS0/MJNPQsyX1OJQXnSCcTExAwcOBAhZGho2KtXr6KiooaaYLFY27dvl84gBweHq1evNlReLBZzOJwZM2bgDBo1apR8OUOOHTmnQOaTAwdOqYZAXFyct7e3JInkPD2qra3dv3+/dBJZWlpevnxZZqCGdLc5HM706dNbtmyJEBo6dOjdu3elz5KyDzIfKRjBiBYR0A6ZTywWnzp1ytXVFSFkZWU1d+5cOUOFMzMzAwMD8WAQfIvx9PR8+vSpnKgwmcxOnTrhN1wWL1784cMHOYUVOaWHMh9BEIKPm1gsFgqFLBYrISHhjz/+2LJly/z58318fObPn19ZWZmfnx8YGGhkZGRsbGz+cbO0tOzRo0d0dHRFRcWRI0dsPm52dnbOzs6dOnUaMGDA/v37hULh8+fPe/fujc9aWlqam5tbWVn17duXyWTm5OQsWrSoQ4cOQ4cOnTFjxrZt227cuFFYWIhnCSQ+bopEUwPrEgTx4cOHdevW4cFNeAiVoaGh8ceNwWDgV3rpdLqtre3mzZvz8/PljArRQAehS1pHAD/UtbOzw3fjkJCQgoKChrzIzc1dtmyZ9J+GgwYNioyMbKi8SCQqLCzs1asXVkB8fX3z8vIaKty84yDzNY8b1CKRgEgkio6OliRRUFBQfn5+Q/YLCgrWrVsnnUS9e/e+du1aQ+XFYjGLxerTpw9OovHjx8sxLseInFMg88mBA6dUQyA9PX3BggX4MaehoaGct4Lev3+/cOFC6Qyi0WgbNmyQo62z2ewuXbrg4QULFy785Gv1zXAZZL5mQIMq5BJ4//69dBI9fvy4oVfrsrOzly9fLp1EVCo1JCREzqt1bDbbzc0NJ9Hs2bPT09PJ7TzMzUc6TzCo+QS0RuaLj4//8ssv8TOETp06yZlSOjY2Vno6AISQqanpqVOnGloCgs/nJyQkGBkZUSgUAwOD3bt3kz4dgOQ60EOZT+K7ZIfH47FYrPLyciaTmZuby2QysfwXGRm5Y8eO9evXz/64jRo1yt/f//Hjx1VVVWfPnvX8uLVu3RpPDGFpabl8+XKCILKzs/38/Dw9PXv16jVy5MjZs2cHBgb++uuv1dXVfD6/qKgoIyMjJyensLCwvLwcvx0s6YlO7hAEUVpaGhkZGRwcPHjwYGdnZ8kXbYsWLZydnQcPHrxkyZIbN26UlpY29A2tk2TAKXURwMvy4Otw2LBhcgaQJicnDxkyRHLF4uHbv/32W0M9FwqFN2/exDMiWVtbb968mXTZGmS+huDDcVUSSE5OHjlyJE6NgQMHypnk6O3bt2PHjpVOIhsbm927dzfUW6FQ+ODBA/zGrrW19datW0n/XgCZryH4cFxlBFgs1s6dOyUv+vzxxx8NyXYJCQn4RwSNRrOysqLT6QihGTNmNJR0PB4vPj6+ZcuWFArF0tLy5MmTDf3cUMRZkPkUoQd1SSFQU1Oze/duSRIdO3asoSRKTEz09fXFQw1atGiBk2jy5MkNLZJWW1v74MEDCwsLCoVibGx89OhROS/tNdsXGM3XbHRQUUsJaI3Ml5ubu2jRIvwgztzc/P79+w3dAs6cOdO5c2cqlWpsbGxvb49vLt99911DymBlZeXhw4fx38QuLi63b9+WM6hYwTCDzNcQQIIgqqurS0tLCwoK3n/cXr9+nZKSUllZidW6Jx+3+/fv37lzJzIy8t69e8nJyWKxuLa29vXr10+ePElMTHz9+vX79+8zMjKKiopI/6HSUM818ziHw8nJyXn58mVCQsKNGzd++umnffv2Xb16NT4+/uXLl1lZWQ2lj2a6A73SagL5+fmSZaBbt259586deu+xBEFcv37d3t6eRqPZ29ubmJhQqVQGg/Htt9829J4vj8fbtm0bnhGpR48ely9fJh0UyHykIwWDzSCQn5+/Zs0a/IeKtbX1jRs3GvqOe/TokZOTE5VKtba2NjIyolKpdDp93rx55eXl9bbL5/MPHDiAZ1zy8PC4ePFivcUUOQgynyL0oC4pBAiCiIyMdHJywkm0fPnyhkatRkREuLi40Gg0a2vrgIAAa2trPCPEjRs36u1JZWXlzp07TUxMEEIeHh737t2rt5iCB0HmUxAgVFecgEgkun37tnQSNfRyxo0bN9zd3alUqrm5+ddff43nvu/Zs+dff/1VbzcqKyt37NhhbGyM1167fft2vcUUPAgyn4IAobrWEdAama+2tva3336zsLDAS/Ds37+/3pW2BQLBxo0bzc3NzczMunbtOm7cOPzVO3HixOfPn9cbntzc3MDAQPzF7+vrK38eqHotNP4gyHyNZwUlySIgEokqKytra2vr1VbIagXsAIGGCLDZ7DNnztDpdAqFQqfTDx48WF1dXbcwi8X69ddf6XQ6g8Hw8fFxd3c3NzdHCPn4+Lx+/bpuebFYzOVyJ0+ejL8Xxo8fn5iYWG8xRQ6CzKcIPahLFgE2mx0eHo6TCCG0a9euioqKusZra2vPnTvHYDAMDQ379+/ftWtXnETe3t4NrSHA5XIlMyL5+vo+efKkrlkFj4DMpyBAqE4KgTdv3uC3ghBCgwYNqnd0Hl4HwPTj1q9fv3PnzuFXItq0aXPo0KF6u8FkMqdNm4ZfNPH398dPoOstqchBkPkUoQd1ySKQkpKCl33HSVTvyxlCoXDfvn2WlpbGxsYeHh6nT5/GymCrVq22bt1a7yJsRUVF06ZNw+ME/fz8Ghr0p6AXIPMpCBCqax0BrZH5xGJxVFQUXiGUwWDMmTOnuLi4Lu7CwsKvv/4av+o1e/bsX3/9FU/n+fnnn9+8ebNuebFYnJKS0qNHDyzzhYaGNjTor966TT0IMl9TiUF5IAAEtJ0AQRCJiYlt2rTBk38tW7as3tVvMjMzg4KC8EqI33///YIFC/Cfhh4eHvWufk4QRElJiZubm4GBAZ1OX7t2rZxpX5rNEGS+ZqODiiQSkEmihQsX1ruwWH5+/urVq/FcJcuWLZs3bx5Ooq5dux4/frxuf3ASubq64iRaunSpnKUJ6lZv5BGQ+RoJCooplUB+fv7atWslQ2KjoqLqDonNycn55ptvEEKtW7deuXLlhw8f+vTpQ6PRGAxGSEhIvdOCv3v3rmvXrnQ6nUaj/fjjj/X+NlHcL5D5FGcIFhQnUFhYuHHjRkkSRUdH102ikpISPLulra3t/PnzP3z44OXlRf+4zZgxo95x5WlpaTiJEEJr165taKStgv0HmU9BgFBd6whok8yXnJw8depUhBCdTvf09MzIyKg7OikuLg5P7eTu7v7bb7/dv3/f3t4ez5dx9OjRujcjgiAePHhgZmZGoVAYDMa1a9eU+jIjyHxalyHQYSAABBQnUFBQMG7cOLzM0fDhw+sdWPTkyRM81MLS0jIiIiIsLKx3794IIQcHh9DQ0Lp9qK2tvXv3Lh7KZ21trYw3doSVLgMAACAASURBVMViMch8dcnDEbUQkE6ihkbnvXjxYtKkSQihli1bhoWFnTx5EidRq1atVq5cWfdPJh6PFxcXh0f8WVtb7927l/TZLcViMch8arlgoFEZAjU1NRcvXjQwMMBr9O3evbuyslKmTFxcHJ4E09XVNSIiQigUzpo1CyfIyJEjk5KSZMrz+fzIyEgGg0GhUCwsLC5evKiMDJIkUevWrZU3e7iMa/ARCNQlwOVyr1y5IkmiPXv21J2eLzExcdy4cQihjh07nj59WigUzps3D/+pNnz48LpLYvL5/Js3b+IkYjAY4eHhtbW1dZtW/AjIfIozBAvaRUCbZD4mk/njjz9KltD9559/6k5ze+LEiW7dulEolCFDhiQmJlZUVHTp0gX/tgwMDKw7Uq+8vHznzp0IIRqN1qlTp4yMDKXGD2Q+peIF40AACGgmgYqKii1btuCJVzp06HD9+vW6/bx+/bqbmxuFQrGzs8vJyUlNTf3yyy8pFIqhoeHUqVPrKhSVlZWbN2/G0zL079///v37dW0qfgRkPsUZggVSCOAkMjIyQgjZ2tpGRETUNXv79m1PT08KhWJra5ucnJyenj5mzBgKhUKj0QYPHszlcmWqsFisvXv34iTq06dPbGysTAFSPoLMRwpGMKI4gQ8fPjg6OtJoNITQt99+++HDBxmbZ86ccXd3p1AovXv3xuu279y5E7+36+Hhce7cOZnyZWVl+EcEQqh3795KemMXZD4Z7PBRjQQyMjIkSbR48eK6SfTnn39+8cUXFAqlV69eqampIpFox44dcsaVl5aWSpLIzc3t1atXSvIOZD4lgQWzGktAm2Q+Pp9/9uxZMzMzPFp4586dpaWlMmTXr19va2tLp9MnT57MZrP5fP7cuXPxBLojR46Mj4+XKf/+/Xs8tJhOpwcEBNTVAWXKK/gRZD4FAUJ1IAAEtJEAHjSE7950On3z5s0yT2sFAsGWLVtMTU1NTEyGDRvG4XCEQuEPP/yAV17z9PR8+fKlzJQuTCbTz88Pz4j0zTffpKWlKYMMyHzKoAo2m0GAx+Pdv3/f1NQUP5jcsGFD3RmKw8PDLSwsTE1Nhw8fjpNo48aNOIm6desWExMjk0TFxcWzZ8/GSTR37lwlJRHIfM0IN1RRBoGCggI/Pz/87N/Hx6fuTJQ//fSTjY2NmZnZ3LlzcbJcv369V69eeC6gzZs3y/QqIyNj5syZ+FfJkiVLcnNzZQqQ9RFe2iWLJNhRkEBhYWFAQABOopEjR9Ydnbd79+42bdowGIzp06fjt+giIyPxuHJbW9u1a9fKdOD9+/cBAQE4iWbPnp2VlSVTgKyPIPORRRLsaAsBbZL5xGLxs2fP8NctQmju3LkyzxA4HM6YMWMMDAzs7e23bt0qEomEQuGhQ4fwg7iuXbuGhYXJBCYhIaF///4IIQaDceTIkbpjj2XKK/gRZD4FAUJ1IAAEtJGASCQqLS11dXXFUyzPnDlTRlDIy8ubP38+lUq1s7NbuXIlHql98uRJV1dXhJCTk9OxY8dkZl3Izs52dHTEa6lv37697utXpIACmY8UjGBEcQIikaikpKRTp044iQICAmSmPy8vL1+9ejVeqDokJAQnUVhYWJcuXRBC7du337Nnj8wbhTk5OW5ubjiJfvrpp7KyMsX7WdcCyHx1mcARtRDAq+LiIbEODg5///23dDcqKyunT59uYGDg6Oj4yy+/4FN5eXl4NgkTExNfX1+Z6fmeP3/es2dPrFAcP368rvIubV+RfZD5FKEHdUkkUFVVdeDAAfxwyMHBQeblDC6Xix8dtW7d+ocffsDtFhQU4CQyMDAYOXKkzPR8z58/l0yRv2vXLiV9DYnFYpD5SLwMwJRWENAymS83N3fBggX4C7V3797Si+eKRKJXr17hO0XPnj3xPE0EQTx9+tTT0xMhZGlpuWLFCukfigRBnDp1ytbWlkqltmjR4vHjx3XfAiY3iiDzkcsTrAEBIKAtBDgcTkBAAF4TaciQIXfu3JHu+aNHj8aMGYMnc7l06RK+UcfExAwfPhwh1KpVq6CgIOn7c21tbVRUFJ7Mxc7O7vbt29L3dmnLCu6DzKcgQKhOIgEOhzNjxgycRIMGDbp9+7a08ZcvX06YMAEh5OLiEh4ejjMiLi5u1KhR+E+ggIAA6dmHcRKZmJhQKBR7e/tbt27JiIDSxhXZB5lPEXpQl0QCPB4vNjbW3NycSqUaGBj88ssv0su+P378eODAgQihXr16/fnnn7hdoVC4atUqCwsLKpXao0ePV69eSYbECoXCP//8s2XLllQq1draOikpSUlfQ/DSLonXAJhSkACfz3/48KEkibZv3y6dRC9fvpRMkR8eHi5JonXr1uFX67p37x4XFyedRJcvX8ZJZGJi8vTpU+m/9BTsqkx1kPlkgMBHnSegZTJfZWXlkSNHsMxnZWV15coVye1AKBSGhYV17NgRITR+/HiJAlhdXe3r62toaEij0caOHSv9Wm5FRcW6devodLqRkZGHhweTyaw7/RO5VwDIfOTyBGtAAAhoCwE+n3/kyJF27dphGeLkyZPSPb948aKnpyeVSv3iiy8kyytJFj00Njb28vKqrKyU3KJLS0t/+eUX/F0wbNiwt2/fSlsjcR9kPhJhgikFCfD5/MOHD7dt2xaPzvv999+lDV67dq1v374IIQ8Pj6SkJPw7Kjc3NzAwkEajGRoafv755yUlJZLfV6WlpTt27MBJNHTo0JSUFGlrJO6DzEciTDClCAGRSFRYWNitWzc8FmnWrFnS48r/+OMPNzc3hNCoUaOkV9s4deoUPu7s7Hz8+HGJlldaWvrjjz/i94EGDRqkjFWqJc7CaD4JCthRO4Hi4mJ3d3c8Knb27Nnv3r2TdCk8PLxbt24IoWHDhkkvthYeHt69e3eEUIcOHQ4ePCidRJs2bUIIGRoa9uzZU/pHusQmWTsg85FFEuxoCwEtk/kEAoHkGQJCKDQ0tKSkBLMWCAQhISH29vZ0On316tXSQ4I3bNhgZ2eHEOrTp09UVJQkNm/fvsXTAVhbWwcFBckMxZcUI3EHZD4SYYIpIAAEtIiAUCh88eJFly5dKBSKmZnZhg0bJNPziUSibdu22dvbm5iYTJgwQTKkiM/nb9++HU9GZm1t/eLFC8mpjIyMadOmYYVizZo1BQUFSkIBMp+SwILZZhDASYRXqmEwGGvWrJEkkVgsPnDgQPv27Q0NDb/88kvJaht8Pn/nzp2WlpYIIWtr69jYWMmAvg8fPsyZMwcn0fLly/Pz85vRpcZUAZmvMZSgjGoIcDicOXPm4IwYOnTo3bt3Je0uX768VatWhoaGCxYskH79VrL8ro2NzaJFiyTDC5KTk2fMmIEQMjMz++6775Q0cQTuHsh8kjDBjtoJcDic+fPnW1lZIYS8vb3v3bsn6dL69esdHBxoNNo333wj+YUuFosfPHiAx5VbWVnNmTNHOommT5+OEDI1NV26dKn0j3eJTbJ2QOYjiyTY0RYCWibzicViJpPp5eWFH8QFBAS8efMGs66trR0xYoSpqamlpeXp06elA3D+/Hn83q6zs/OuXbskp+7evTt48GD8VPz8+fOSm46kAOk7IPNhpARBCAQC3sdNMrKAdNpgEAgAAY0iwOfzvb298d17zJgxkpnFqqurp0yZQqfT27ZtKzPH+c2bN/GTYXNz86NHj7LZbOzR48eP8aLqNBrt0qVLNTU1SvIUZD4lgQWzzSPA5/OHDh2Kk2js2LGSsUgCgWDlypUMBsPOzm7dunXSxiMjIz08PBBC5ubmO3fulOgXz54969OnD16H9+TJk5Lj0nVJ2QeZjxSMYIQUAjwe7+jRo3hcuZOTk2RILJ/P//LLLw0NDe3s7A4ePCjdVnZ29pw5cygUCoPB6NOnj+Tr5t69eziDrK2tr169KtHWpeuStQ8yH1kkwY7iBPh8flhYGE4iZ2fnY8eOYZtCoXDChAkmJiYWFhbSP7fFYnFBQcG3335LpVLpdHq3bt3YbDZ+OePu3bt4dQ5LS8vw8HClDrgBmU/x0IMF7SKgfTJfZWVlSEhIixYtEEIDBgyIjo4Wi8UEQeTl5dnb2yOEevbsGRkZKR2Gly9fjh49mkKhtGzZct68eZJTx48fd3R0pFAobm5uOTk5KtCb9FzmE4lExMeNxWK9efPm3r17cXFx5eXlKiAvCTrsAAEgoEYC+EkvQqh3797Xrl3DPXn16pW3tzdCyNPT88KFC9Lde/78OZ5uzMjIaPr06fjhsEAgOHnyJIPBoNFolpaW2dnZ0lXI3QeZj1yeYE1xAmvXrnVwcMAvKEimP3///r2Pjw9CqFu3bidOnJBu5fnz535+fgghIyOjiRMnSpIoLCysRYsWOIkeP34seSNeui4p+yDzkYIRjJBCgCCI5OTkzp07UygUAwODefPm8Xg8oVCYlJTUuXNnPDFfRESEdFsCgWDDhg146ZtWrVolJyfjceXnzp1r1aoVjUZr37699MAl6bpk7YPMRxZJsKM4AYIgcnJy8MsZBgYGgYGBPB6PIIjs7Gy84lO3bt1k/pYTi8UnT57EL2fY2NjEx8fjceVnz561sbGhUCh2dnZ5eXlK/T0IMp/ioQcL2kVA+2Q+Lpd75coVPJFnq1atzp49KxaLORzO6dOn8fjhSZMmSSbmw8Fgs9lz5841MDCg0Wienp6FhYUEQXA4nAULFhgYGJiZmU2ZMkWpdxbJNaHPMh+Xy929e/egQYM+//zzTp06tWvXzsHBwdPT8/3795I5GiSgYAcIAAGdJHDu3Dl3d3eEkLOz86+//op9PHnyZNeuXRFCw4cPl7l75+bmhoSEIITodLqrqyt+r5DJZIaGhlIoFCMjIz8/v+LiYuWxAplPeWzBcvMInDt3Dk9y1K5du23btmEjFy9e/PzzzxFCgwcPjouLk7bMZDI3bNiAEKLRaK1bt87NzcUvRoSGhlKpVAaDMWLECOW99i5ZPcDe3l4yGle6e7APBFRMQCgUTpo0CS8+M27cuHfv3vH5/BMnTrRv3x7P7v306VOZLl27dg0nnYWFxZEjR9hsdmVl5apVq2g0WsuWLadMmSL9+rxMXVI+gsxHCkYwQhYBgiD8/f3NzMwoFIqvry9Ooj///LNDhw54dssHDx7ItBUZGYnXyTQ3N9++fTuLxcIDd2g0WosWLcaMGaPU8bCw0q5MOOCjPhDQPplPKBR++PDB2dmZRqMZGBisXr26rKyssrIyKCjIzMwMIbRq1SqZKWZEItG+ffvatGmDEHJycrpx4wafz09JSRk7diyeDXT79u2qCbY+y3xCobCwsDAsLKxfv3543lYGg9GvX7+amhrlDSJQTVihFSAABBpJIC0tbcSIEXgelsmTJ+OpEtasWYOnVZ02bVpFRYW0KQ6Hc/jwYfx7zNjY+P79+3w+/82bN3haVTMzs61bt1ZVVUlXIXcfZD5yeYI1xQmkpqbiFahNTEwmTpyIk2jTpk1t27al0+nTpk0rLS2VbkUgEFy7dg0nkYGBwY0bNzgcjiSJWrRo8eOPPyo1iWA0n3Q4YF8TCHz//ff4R0Hfvn1v3LjB5XKXLFliY2ODEFqyZEld1TsxMdHX1xchZGJiMmvWrLKysuTk5K+//hoh1K5du127dknmjVWSdyDzKQksmG02ga1btzo6OiKEvLy8cBKtW7fO1tYWIRQYGJiZmSlj+dWrV1OmTEEIGRsbjx8/vrS0VJJEDg4OoaGhyk4iGM0nExH4qPMEtE/mw2P3vvrqK3Nzc4TQhAkTXr58WVxcjCfsMzIyOnfuXN2nardu3RowYABCyNbWduPGjVwuV7Im3RdffCHzkq/yoq7PMh+mymKxduzYgRdEtra2njdvHgzlU971BpaBgKYRqK2tXbp0qYmJCV5U9927dxwOZ/To0UZGRra2ttu2bZMZWC0SiWJjY/FrIAihXbt2lZaWxsbG9uvXDyFkaWkpvaSAMpwFmU8ZVMGmIgRqa2uDgoJwEvXu3Ts9PV0gEEyYMMHU1NTW1vann36SSSKxWPz8+XM8QzFCaOPGjUVFRTExMZIkioyMrPtXkyI9lKkLMp8MEPiodgKXL1/GE1biceVsNtvLy8vExKRFixZHjhypO1V3fn7+mjVrEEIGBgZdunQpLCy8evVqnz59EELdu3dPSEiom3Tk+ggyH7k8wZriBG7evPnFF1/glzN2797NZrNHjhxpZmbGYDAOHjxYd2heUVHRunXr8MsZjo6OaWlpZ8+exUnUuXPnf/75R9lJBDKf4kEHC9pFQCtlPoFAsG3bNjz3Z58+fc6ePfvmzRsrKysajebs7Pzw4cO6MUhJScHrYZmamg4bNozNZu/atcvJyYlOp48ZM0apUztJdwZkPrFYfOHCBfx7o0OHDnv37lX2bV2aP+wDASCgdgKHDx92cnJCCLm6ul64cCEpKQm/sevh4XHp0qW63UtPT8dPgBFCAQEBycnJx44ds7e3NzQ07N69e2lpqVKHA4PMVzcicETtBA4dOoSTqHPnzhcvXszIyPDw8KBSqR4eHn/++Wfd7n348GH27Nl4UV0/P7+kpKQTJ044ODhINAulfhGDzFc3InBEvQRSU1N9fHyoVGqLFi2mTZuWnp6OZ9lzc3O7c+dO3b7xeLyzZ8+amppSqVQTE5PHjx9v3LjRwcEBL2ytvOVrJD0BmU+CAnY0hEB2dvbo0aNxEgUEBGRmZrZr145Op7dv3/7mzZt1O8nj8c6fP29paYmnxYyIiFiyZAn+GvLx8WEymUr9Ww5e2q0bETii8wS0UuYjCOLevXt4+cV27dotWbLkzJkz+O/XMWPGSNbelQ5eZWXlli1b8JTtrVq1+vDhw8yZM83MzCwtLZcuXarsccKSnoDMJxaLDx8+7OLighByd3ePiopS9m1dAh92gAAQ0AQCd+/eHTp0KELIwcEhODj4999/x5O5jB07tt6HNMXFxdu2bcN3+B49epw5c2bJkiUIISsrq9mzZ+NZnJXnF8h8ymMLlptN4M6dO0OGDEEItW7detmyZRcvXvzss88QQmPHjn306FFds2VlZbt27cJJ5OrqGhYWFhwcbGBg0LJly+nTpys7iUDmqxsROKJeAlwud9GiRXiqn06dOu3cuRNPJuPr6/vixYt6+/b06VM3NzecRJs2bRo8eLChoWGrVq3WrFlTb3lyD4LMRy5PsKY4AYFAsHjxYvxqXdeuXY8cOYKTaPjw4U+ePKnX/rNnz3r16oWTKCQkxMvLy9DQ0MrKavny5fWWJ/cgjOYjlydY03wCWinzicXi4uLiIUOG0Gg0Op3eo0ePpUuXSu4aeXl59XK/ceMGHgDIYDC2bdvWqVMnvCZdWFhYveWVcRBkPrFYHBoaam1tTaVSvb29lTp3vjIiCDaBABBQkEBGRsasWbMoFIqZmdkXX3zxzTfftGrVikKhLFy4EC8OIGOfx+PduHHDwMAAV1mwYMGYMWPwtKqHDh1S9kMakPlkwgEfNYFARkbGzJkzKRSKqampp6dncHCwnZ0dhUJZtGhRvX8CCYXCmJgYBoNBoVAMDQ1nzZqFl+V1dHTct2+fspMIZD5NuGagDzIEjhw5gh85m5ub9+jRg0ajIYTWrFlTbwaJxeL09HR/f3/8W6Nv37729vb4cXV4eLiMZWV8BJlPGVTBpoIETp06hd/GsLKyGjRoEJ1ORwgFBQV9+PChXsv4zz+cRN27d8fLabq5uf3+++/1lif3IMh85PIEa5pPQFtlPrFYHBISgm8Q5ubm+O0VhNCxY8caGjz/6NEjPISESqW6uLiYmJgghL788st6H30rKXIg8/F4vHHjxiGEWrRosWzZMiVxBrNAAAhoLAE+n79hwwZDQ0O89KejoyODwTA0NNy8eXPdGZGwF1lZWe3bt6dSqVjdk/y+Sk5OVurLhmKxGGQ+jb2Q9LljfD5//fr1BgYGeLIwd3d3ExMTQ0PDrVu3NqTZ5eTkdOvWDWsZrVq1atmyJX7S+fDhQ2UnEch8+nytaqzvd+/exUNiseiA/z158mRDg1uLi4t/+ukn6cIIoREjRsisDq8kf0HmUxJYMKsIgQcPHowcOVImKX777beGFlUvLy/ft2+fTPkhQ4bIrA6vSJfk1AWZTw4cOKWTBLRY5jt+/Dh+hoCX3KVQKC1atJDzq+/Dhw+LFy9GCOGn2fgX47x584qKilQWWpD5kpOTBw8ejBBydHRU2QLHKosvNAQEgEBjCEREROC3n/AULVQqtVOnTtevX2+obnFxcUBAAIPBwKIGnU43MjLy9/dvSNFoyE4zjoPM1wxoUEUFBP7666/OnTvjP2kYDMYnk6i0tHTx4sX4pSo6nU6j0QwNDb29vZW6xi7mADKfCq4HaKKpBDIzM2fOnCmtODg7OycmJjY0k0xtbe3Vq1elyxsYGMycObO6urqpTTejPMh8zYAGVZRNoLCwMDAwUDop7O3tHz161NCjIz6fHxkZiceVS2r5+/uXl5cru6swN58KCEMTmkZAi2W+hw8fent7S24TdDrdy8srJyenIcTV1dUHDhyQlMer7h4+fFgFPxQlXQKZ7+rVq3h1s549e9Y73b6EFewAASCgqwSePn2KR/VKbsijR49++vRpQ/6yWKy9e/fiIdi4Stu2bTdv3txQeRKPg8xHIkwwRSKBp0+ffvXVV5IMQgjJT6Lq6uoTJ06YmppKqtjb269ataqh32MkdhVkPhJhgimyCPB4vAMHDlhYWEgyws/PLyMjoyH7IpEoLS2tS5cueEgsHlp+8ODBhmTBhuw07zjIfM3jBrWUSkAgEBw5csTW1laSREOGDElNTZXTaEZGxoABA/DrvXh62V27dqngawhkPjlBgVO6SkCLZT4mkzlr1iz8aBohxGAwVq9eXVpa2lCoCIKIi4tr37695GY0YMCAqKiohsor4zjIfDt37sRvWI8ePVoyzzGXy3379m10dPTt27cTEhIyMjJU82eTMkIMNoEAEPgkgdzc3NWrV0v+zkMIrVixQs5DGh6P9+DBgzZt2kh+X/Xq1SsiIuKTDSleAGQ+xRmCBWUQyM/PX7NmjXQSrVy5Uk4SCQSCZ8+eSSdRjx49Ll++rIy+ydgEmU8GCHzUEAKvXr3atGlT0L/b1atX5Q9uZbFYv//+e3BwMK6xa9eu169fq8YXkPlUwxlaaSqB169fb9269d8cCjpz5oz8oXnV1dXh4eGSJNq+ffurV6+a2mjzysNLu83jBrW0l4AWy3xCoTAiIiIwMNDPz2/ixImBgYHPnj2rra2VE4zi4uKjR49OmjTJz89v6tSpYWFhBQUFcsqTfgpkvsDAQEtLSzqdHhQUVFlZKRQKMzIyjh49+s033wwZMsTLy2v06NHBwcFJSUkNTY9CelDAIBAAAiomwOVy79+/j+/efn5+s2bNio2N5XA4DXVDJBJVVFT88MMPU6dO9fPzmzZt2v79+/Pz8xsqT+JxkPlIhAmmSCSAte/GJ5FYLGaxWKGhoTiJ/P399+7dq5okApmPxLiDKRIJ8Hi8kpKS7H83Lpcr/xkzQRDV1dW5ubm4RllZmcreBwKZj8S4gykSCcgkEZvNlj80T41JBDIfiXEHU1pBQItlPrFYTBBEcXHx27dv09PTy8rK5N9ZcDwEAsH79+9TU1Nzc3Pla4LKiJ8+y3wikai2tnbw4MF0Ot3a2vrgwYM8Hi8tLW3BggUdOnRo27atlZUVnlDcxMTE19c3Nze3MQFVRpjAJhAAAsomQBBEeXn5249baWlpY34s8fn8zMzMt2/f5uXlcblcZfcQ2weZTzWcoZVmEBAKhc1Oouzs7IZmSW9GT+RXAZlPPh84CwQ+SQBkvk8iggJAQD4BkPnk84GzukdAu2U+rYuHPst8QqEwKSnJxcUFIeTp6Xnu3Lm0tDRfX18HB4eRI0d+9913M2fOdHZ2xkujIIT+/PPPmpoarQsxdBgIAAFdIgAyny5FE3xRCwGQ+dSCHRrVJQIg8+lSNMEXtRAAmU8t2KFRNRIAmU+l8PVZ5uPxeOHh4W3btkUIjRs3bsuWLf7+/u3bt4+IiOBwOARB8Pn833//3czMDE+euGPHjrKyMpWGBxoDAkAACPyXAMh8/+UBn4BAkwmAzNdkZFABCPyXAMh8/+UBn4BAkwmAzNdkZFBBywmAzKfSAOqzzMfhcIKDg62trRFCeA6+AQMGXLx4UXoylLt379rZ2WGZb+vWrSUlJSoNDzQGBIAAEPgvAZD5/ssDPgGBJhMAma/JyKACEPgvAZD5/ssDPgGBJhMAma/JyKCClhMAmU+lAdRnmY/FYo0cORIP1rO0tPTy8tq9e3d1dbV0AGJjY+3t7bHM9+uvv8JoPmk4sA8EgIDqCYDMp3rm0KKOEQCZT8cCCu6ongDIfKpnDi3qGAGQ+XQsoODOJwmAzPdJRGQW0FuZjyCInJycdu3a4UU2rKysVq1aJbPGH0EQly5dsrCwwDLflStX5Ky8SWZUwBYQAAJAoAECIPM1AAYOA4HGEgCZr7GkoBwQaIAAyHwNgIHDQKCxBEDmaywpKKcrBEDmU2kk9Vbmq62tvXfvHoPBwBLe0KFDo6KiZNCz2ewdO3bQ6XQajdayZcuUlBRYaVcGEXwEAkBAxQRA5lMxcGhO9wiAzKd7MQWPVEwAZD4VA4fmdI8AyHy6F1PwSD4BkPnk8yH5rN7KfJWVlXv27MFD+UxNTX/++efKykoZuDk5OcHBwQghBoPRu3dvTZ6Yj8PhZGVlZWRkCAQCGS/gIxAAArpEAGQ+XYom+KIWAiDzqQU7NKpLBEDm06Vogi9qIQAyn1qwQ6NqJAAyn0rh663Mx2QyFy9eTKfTEULu7u6xsbF1uT9//nzixIkIITMzs8DAwLo6oHQVgiA4HE5FRUV1dbVQKJQ+pdR9Pp/PYrFiYmIGDRrk6elZXFwsEomU2iIYBwJAQI0E/yVUNwAAIABJREFUQOZTI3xoWjcIgMynG3EEL9RIAGQ+NcKHpnWDAMh8uhFH8KLxBEDmazwrEkrqrcyXlZXVv39/Go2GEJoxY8bbt2/r0rx582bPnj0RQlZWVkeOHKmpqalbRnKkqKho8+bNVlZWfn5+GRkZkuPK3rl7966Pjw+DwTA2NgaZT9m0wT4QUDsBkPnUHgLogLYTAJlP2yMI/Vc7AZD51B4C6IC2EwCZT9sjCP1vKgGQ+ZpKTKHyeivzpaamWlpaUqlUhNCOHTvqXUL31KlTlpaWCCF7e/vnz5/Lfx/29evXo0ePNjMzW7hwYb3WFIpTncpCofD+/ftTpkxxcXExNTXFbxaDzFeHExwAArpGAGQ+XYso+KNyAiDzqRw5NKhrBEDm07WIgj8qJwAyn8qRQ4NqJgAyn0oDoJ8yH5fLjYiIoNFoFArFxMQkKiqq7mu2FRUVISEhdDodT8xXXV0t52VYJpO5c+dOGxsba2vr8PBwpS7Iy+FwHj58uHjxYi8vry+//HL79u3z5s2ztbVlMBgg86k0eaAxIKAOAiDzqYM6tKlTBEDm06lwgjPqIAAynzqoQ5s6RQBkPp0KJzjTCAIg8zUCEnlF9FPmKyws/OGHH/Aau66urm/evKlL9OXLl+PHj8dv7M6ZM6euDigWiwUCQWRk5B9//BEcHOzh4YEQMjExmTJlys6dOw9+3E6dOvXw4UM5+mDddj95pKys7PDhw1988cXmzZsfPHhQVlZ24cKFrl27gsz3SXRQAAjoAAGQ+XQgiOCCegmAzKde/tC6DhAAmU8HggguqJcAyHzq5Q+tq54AyHwqZa6fMt+7d+8CAgKwzDd27Nh6p9K7evVqnz59EELt27f/5Zdf6sp8IpGIxWKNGjXKzMwMm6r7r4uLyy+//EKuzFddXf3kyZOIiAgej4evlTt37vTs2RNkPpVmDjQGBNREAGQ+NYGHZnWHAMh8uhNL8ERNBEDmUxN4aFZ3CIDMpzuxBE8aRwBkvsZxIqmUfsp8z549GzBgAFblgoODCwoK6uLcv39/hw4dEEKdO3e+dOkSQRBisVgoFHK5XKyvYZlv7dq1o0aNcnFxMTIyMjAwsLe3Hzhw4MSJE6d83FatWlXvGr51m1PkCMh8itCDukBAuwiAzKdd8YLeaiABkPk0MCjQJe0iADKfdsULequBBEDm08CgQJeUSgBkPqXilTWuhzKfSCS6c+eOra0tlvkOHTpUWVkpy0Us3rhxo4WFBULI09PzyZMnxMctKyvr3r17qamp0uV5PN6OHTvatWtna2u7cOFCpU7MJ92uZB9kPgkK2AECOk8AZD6dDzE4qGwCIPMpmzDY13kCIPPpfIjBQWUTAJlP2YTBvqYRAJlPpRHRQ5mvpqZm165dFAoFy3wxMTF8Pr8u9LoyH5PJnDt3brdu3UJDQ6VX3WWz2atWrTIyMnJ1db148SIe91fXoPKOgMynPLZgGQhoGgGQ+TQtItAfrSMAMp/WhQw6rGkEQObTtIhAf7SOAMh8Whcy6LCCBEDmUxBg06rrocyXk5MTHByMEKLRaDY2Nrm5ufXOnXfs2LHOnTsjhGxsbCZMmPDzzz/7+Ph07tx51apVMnP5vXnzZvLkyVQq1cPD4+HDhyDzNe0ShNJAAAg0hQDIfE2hBWWBQD0EQOarBwocAgJNIQAyX1NoQVkgUA8BkPnqgQKHdJoAyHwqDa8eynyJiYl4CV0jI6OBAweWlZXVSzw+Pn7UqFEIIQMDAysrqw4dOri7u2/atCk1NVV6KJ9YLI6KivL29jY2Nh49enRxcXG91vDBhISErVu3Lm/KtmrVqp07d7LZbDlmYTSfHDhwCgjoGAGQ+XQsoOCO6gmAzKd65tCijhEAmU/HAgruqJ4AyHyqZw4tqpcAyHwq5a+HMl9qaur27dsDAwOXLFny+++/19TU1Eu8oqIiPDx8woQJw4YN8/HxmTFjxpUrV/Ly8mQ0PqFQeODAgS5duuCJ+WTOylg+duxYp06d6i7IK+eIkZFRjx49SktL6x1yiO2DzCfDGT4CAR0mADKfDgcXXFMNAZD5VMMZWtFhAiDz6XBwwTXVEACZTzWcoRXNIQAyn0pjoYcyH5vNzsvLy8zMzM7OrqiokPOObVlZ2bNnz6Kjo+/fv5+SklLvFH5sNnv+/PlWVlZdu3Y9ePCgHDFOLBYnJibu27cvtCnb5s2bDx8+LH9ZD5D5VJoz0BgQUCsBkPnUih8a1wUCIPPpQhTBB7USAJlPrfihcV0gADKfLkQRfGgKAZD5mkJL4bJ6KPMpzOw/BoqLi8ePH29oaOjt7R0fH/+fc6r6ADKfqkhDO0BA/QRA5lN/DKAHWk4AZD4tDyB0X/0EQOZTfwygB1pOAGQ+LQ8gdL/JBEDmazIyRSqAzKcIPbFYnJSUNGzYMDqd7uvrm5aWpqC15lUHma953KAWENBGAiDzaWPUoM8aRQBkPo0KB3RGGwmAzKeNUYM+axQBkPk0KhzQGRUQAJlPBZD/fxMg8/1/Fs3aO336dNeuXU1MTObMmcNkMptlQ9FKIPMpShDqAwHtIQAyn/bECnqqoQRA5tPQwEC3tIcAyHzaEyvoqYYSAJlPQwMD3VIaAZD5lIa2PsMg89VHpQnHtmzZ0q5du5YtWwYHB/+/9u48rsoy///4fViOILsKkRqLIAKiIhgpzmBmkMsorpWKZkMqIa711TSztGmcxsmaylQKMpvUxpTUHBVcUBk3NEdFBEQLEFlEluNhOWu/x3g/Oj8G8ECuZ3ndf+jh3Nd939f1/JxLOe9zn/uur68Xt1QqlRs3bty1a9etW7d+w77utikx393KsR0CxidAzGd8NaPHBiZAzGdgBaE7xidAzGd8NaPHBiZAzGdgBaE7D1yAmO+BEzc+ADFfY427eLx06dLOnTtLpdLJkycXFRVptVq5XP7mm2+GhYWtXLmysrLyLvb5Wzch5vutYrRHwHgFiPmMt3b03EAEiPkMpBB0w3gFiPmMt3b03EAEiPkMpBB046EJEPM9NOr/HoiY7x65k5OTAwICBEHw8/ObO3fuunXrXn/99R49esTGxh4/flyhUNzj/lvd/MaNG6tWrXr88cetrKy8vLwOHTr0EA7aaq9ogAACD0iAmO8BwbJb8xEg5jOfWjPSByRAzPeAYNmt+QgQ85lPrRmpKEDM91BfCcR898idn58fHx/v5uYm3F6sra0DAgLefPPNS5cuabXae9x5i5trtVqZTJaZmZmenn7o0KFVq1b169dPPLqtre3UqVO///77Q4cOHT58ODs7m8ivRUOeRMB4BYj5jLd29NxABIj5DKQQdMN4BYj5jLd29NxABIj5DKQQdOOhCRDzPTTq/x6ImO/euU+fPv3GG2+Mur08//zzn3/+uVwuv/fd3mkPGo0mNzd3zpw5o0ePFg/a4p/jx49fvXp1VVXVnfbD8wggYIwCxHzGWDX6bFACxHwGVQ46Y4wC58+fHzVq1EsvvaS7LLUxjoI+I/AIBcRJ9PLLLzc0NDzCbnBoBB6aADHfQ6P+74GI+R4qNwdDAAEE7k3g5MmTXbt2DQ4OfqAfJ9xbH9kaAYMWOHHiRNeuXUNCQmpraw26o3QOAQQQQAABBBAwCQFivodaRmK+h8rNwRBAAIF7E1Cr1XW3lwd0WYB76x1bI2AEAiqViklkBHWiiwgggAACCCBgKgLEfA+1ksR8D5WbgyGAAAIIIIAAAggggAACCCCAAAJmI0DM91BLTcz3ULk5GAIIIIAAAggggAACCCCAAAIIIGA2AsR8D7XUxHwPlZuDIYAAAggggAACCCCAAAIIIIAAAmYjQMz3UEtdU1Oz6PZSXFz8UA/MwRBAAAEEEEAAAQQQQAABBBBAAAEETFqAmM+ky8vgEEAAAQQQQAABBBBAAAEEEEAAAQTMQ4CYzzzqzCgRQAABBBBAAAEEEEAAAQQQQAABBExagJjPpMvL4BBAAAEEEEAAAQQQQAABBBBAAAEEzEOAmM886swoEUAAAQQQQAABBBBAAAEEEEAAAQRMWoCYz6TLy+AQQAABBBBAAAEEEEAAAQQQQAABBMxDgJjPPOrMKBFAAAEEEEAAAQQQQAABBBBAAAEETFqAmM+ky8vgEEAAAQQQQAABBBBAAAEEEEAAAQTMQ4CYzzzqzCgRQAABBBBAAAEEEEAAAQQQQAABBExagJjPpMvL4BBAAAEEEEAAAQQQQAABBBBAAAEEzEOAmM886swoEUAAAQQQQAABBBBAAAEEEEAAAQRMWoCYz6TLy+AQQAABBBBAAAEEEEAAAQQQQAABBMxDgJjPPOrMKBFAAAEEEEAAAQQQQAABBBBAAAEETFqAmM+ky8vgEEAAAQQQQAABBBBAAAEEEEAAAQTMQ4CYzzzqzCgRQAABBBBAAAEEEEAAAQQQQAABBExagJjPpMvL4BBAAAEEEEAAAQQQQAABBBBAAAEEzEOAmM886swoEUAAAQQQQAABBBBAAAEEEEAAAQRMWoCYz6TLy+AQQAABBBBAAAEEEEAAAQQQQAABBMxDgJjPPOrMKBFAAAEEEEAAAQQQQAABBBBAAAEETFqAmM+ky8vgEEAAAQQQQAABBBBAAAEEEEAAAQTMQ4CYzzzqzCgRQAABBBBAAAEEEEAAAQQQQAABBExagJjPpMvL4BBAAAEEEEAAAQQQQAABBBBAAAEEzEOAmM886swoEUAAAQQQQAABBBBAwAAEtFqtWq1W3F7UarWeHonNlEqlVqvV04xVCJibQNsnkUqlUigUTCJze4WY+XiJ+cz8BcDwEUAAATMV0Gq1mtuL/vdOumZmysSwEbiDQBunRltm2R2OwNMImKaAVqutq6vLy8tLS0s7dOhQQUGBSqVqPlStVltfX5+Tk5OWlnbixIn6+vrmbXgGAfMU+E2T6NKlS2lpaSdPnmQSmeerxTxHTcxnnnVn1AgggIBZC9TX1x8+fDgxMXHDhg3Z2dkKhaJFjpqamrS0tHXr1v3www/l5eX6A8EW98CTCJikQE1NTWpq6tq1a7/66quCgoI7nY5UVlb2ww8/JCUl7d+//9atWyZJwaAQ+K0CWVlZc+fO7d69u7u7e+fOnQMDA3fv3l1bW9tkPwcOHBg/fryXl5e7u/vAgQPT09PvNNGabMiPCJi8QFZW1pw5c3STqGfPnv/6179anETjxo3z9PR0d3f/3e9+xyQy+RcGA9QJEPPpKHiAAAIIIGD6Ag0NDRkZGQkJCX379vX19fXz83vuuec2b97c5DNeuVy+Z8+emTNn9u7d29fX96mnnrpy5Yrp6zBCBFoTUCgUaWlp06dP7927t4+Pj5+f36hRow4ePNgkK6+urt68efPkyZODgoL8/f2HDRsml8tb2zfrETB9gby8vLi4OG9vbxcXF6lUKpFIrK2tR40alZubqxu8Wq1OS0t77rnnnJ2dLSwsBEEIDg4uKCjQaDS6NjxAwGwF8vLyZs6c6e3t3alTp/bt2+smUV5ens5EpVLt27cvKirKyclJnER9+/YtLCxkEumIeGDaAsR8pl1fRocAAggg8P8FVCpVWlrauHHjvLy8fH197e3tLS0tbW1tp02bdunSJV07hUKxZcuWyMhIZ2dnQRAsLS0DAgIKCwt1DXiAgHkKqNXq77//fvTo0T4+Pl5eXra2thYWFnZ2dvPmzbt69arOpKGh4e9//3v//v3t7e0FQWjfvv3gwYPr6up0DXiAgBkKaLXaGzduLFiwYPDgwe++++7WrVtnzZplbW0tkUg6duy4fft2MStvaGg4ffp0ZGSki4tLYGCgl5eXjY1NdHR0i1/sNUNGhmzOAhqNpry8fP78+U8//fSf/vSnrVu3zp49WzeJUlJSGk+iZ599VpxE3t7ednZ2Y8aMYRKZ84vH3MZOzGduFWe8CCCAgPkKZGdnv/LKK6GhoQsXLty8efOUKVM6deokCEJoaOi2bdtEF7VavXfv3qioKHd3d39//+DgYKlUOmHChNLSUvOFY+QI3BbIzMwcP378M888s3z58i+++GLMmDHt2rUTBGHAgAGHDx8WkRoaGjZv3hwSEvL444/36dMnMDCwU6dOCQkJDQ0NKCJgzgIajebUqVPDhw9fvHjx1atXVSrV6dOnfXx8LC0tBUFYvnx5eXm5SqXKy8ubPHmyo6PjiBEjvvzyy48//jg2Nnb9+vXmTMfYERAF1Gr1mTNnhg0b1ngS+fn5iZNoxYoV4iTKzc2dNGmSnZ1dVFRUUlLSp59+GhcXl5iYCCMC5iNAzHefay3eyqfV84EbGhq4vsZ9pmd3CCCAQGsCqampkyZNWrJkyY0bNzQaTWZmZnBwsIWFhYuLy9tvvy3eKyArK2vkyJHe3t4vvPDCF198sXnz5lmzZm3bto0ri7Wmy3rTF9i4cePo0aM//fTTqqqqhoaGo0ePurq6WlhYuLm5bdy4UaPRKBSKjIyMJ598skuXLuPHj//888+Tk5MXLVqUmprKaRSm//pghHoFNBpNTk7O119/fenSJfGdQllZ2QsvvCBm5a+88kpWVlZxcfGf/vQnOzu7QYMGpaen19XVyeXywsLCGzdu6N03KxEwCwGNRpOfn/+Pf/wjJydHnETl5eUxMTHiJJo+ffrFixeLi4vfffddOzu7AQMG7N27Vy6X19bWXr9+nUlkFi8RBvmrADHfrxL3/Hdtbe3NmzfPnj2bnp6ek5Mjk8maX6xdrVbfunWrvLz8yJEjFy9elMlk93xYdoAAAggg0FaBgoKCw4cP667eolQqR44c2b59e0EQxo4de+PGDZlMNm/ePA8Pj9GjR2dmZrZ1v7RDwDwELl26dOjQoeLiYnG4dXV1oaGh4vur119/vbKysqioaNKkSS4uLmPGjPn3v//NJ5rm8bpglHcpIJPJPvzwQzs7O0EQhg8fvnXr1g0bNvj5+XXv3n3//v1Nrhh7l8dgMwRMWqCmpmb16tXiL3IjRoz47rvvmEQmXXAG11YBYr62Sulvp9FoEhMTg4ODxV92u3TpsmDBgibfT9FoNIWFhQsXLhQv9iSRSObOncuH2/phWYsAAgg8UIHly5d37dpVEITBgwfv2bNn69atzs7O4eHhe/fuJaF4oPLs3AQEFApFbGxsx44dBUGIiYnZs2fPRx99JJVKIyIizp071/zDThMYMkNA4D4KKBSKw4cPOzk5SSSSfv36vfzyy0OHDu3WrdvXX3/NG4T76MyuTFhAoVCkp6c7OjpKJJInn3xSnETe3t5JSUlMIhOuO0NrVYCYr1Wi1hvIZLK33nrLy8urXbt2EolEEAQrKys/P7/Nmzc3vvHctWvXpk+f7uLiIl4+oHPnzsnJya3vnRYIIIAAAg9MYOPGjb169RIEoXfv3vPnz+/Vq5eHh0dKSgp3BX1g5OzYdASUSuUnn3zi4eEhBuWvvPJKz549AwMDT5061fj3H9MZMCNB4L4KaLXa6upqX19fKysrGxsbR0fH3r17/+1vf+M8vvvKzM5MWUCr1VZVVXXr1k03iXr06LFs2TImkSlXnbG1QYCYrw1IepvU1NRs2rTpiSeeGD9+/N69ez/77LOwsDBBEJycnKZOnap7o1hQUPDOO+88/vjjDg4O3bt3t7e379+//759+/Tum5UIIIAAAg9W4MiRIxEREYIgODg4eHh4PPbYY+vXr6+srOREpAfrzt5NQkCtVqenp/v7+wuC4OLi0qVLl169en333XdNvs1gEmNlEAg8EIH6+vqoqCjxntSurq4zZsz46aefHsiR2CkCJipQV1cXGRkpTiJnZ+cpU6ZcvnzZRMfKsBBoqwAxX1ul7tTu2rVr8+fPHzhw4MmTJ+vr64uKihISEgRBkEql3bt3Ly4u1mq1FRUVa9euDby9/OUvf9m/f//cuXOXLVt26dKlO+2W5xFAAAEEHoLAtWvXoqOjhduLq6vr3LlzxbtztP3QWVlZ33zzTVpaGl/ybTsaLU1DQKvVXr9+PSQkRJxBvr6+77//vlwu15+Si9dQT05OXvW/y0cffbRt2za+ZmUarw1G0UYBhUIxa9Ys8Z7v/fr127RpU6v38RP3rFAosrKyVq9e/be//U03k5hEbWSnmSkJNDQ0xMfHi5MoKCgoOTm5jZOoCYJMJktLS/vggw+Sk5NLSkrubidN9smPCDwqAWK+e5WvqqpKS0tLTU0Vv5+iUqmSkpKcnJwsLCzat2+fmZkpk8m2bds2aNCggICAVatW1dTUaDSa3NsL9228V322RwABBO5NQKlULliwQLwCeo8ePdLT09uY1olRRWpq6owZM3r06BEXF8e3FO+tFGxtlAJqtXrUqFE2NjaCIAwZMuTMmTP6h1FQULB9+/YZM2Z4eHj4+fnNmDHjvffe++CDD6ZMmeLh4dGzZ89PPvkkPz9fqVTq3w9rETANAYVCsWzZMnd3d0EQBgwY8MMPP7Q6roKCgoMHDyYlJc2cOXPgwIErV6784PYSExPDJGpVjwamJ6BQKN566y1xEoWGhm7fvv0uxqjRaFJTU4cOHWppaRkYGHjy5Mk2/jZ4F8diEwQeggAx3/1HPnLkSJ8+fQRBsLa23rZt27/+9a/o6GgfH5833nijoqLi/h+PPSKAAAII3K2ASqVauXKlm5ubIAj+/v5tuTeoUqksKyvLzMyMi4sTN3RyciLmu9sKsJ1xC6hUqtjYWPHeYpGRkRcuXNA/nn/84x8BAQE2NjZBQUErV67UXdvk3LlzEydOtLe3t7GxSUhIKCoq4kwK/ZKsNQEBtVpdVlY2derUDh06CILg4+PzxRdftDou3SQKCQlpfLOO//znP7pJNHv27GvXrjGJWsWkgbELqNXq0tLSKVOmiJPIw8Nj3bp1dzGogoKCV155RbyjFDHfXQCyiaEJEPPd/4pkZWVNmDBBvBHH8uXLR4wY4e3tPXPmzCtXrtz/g7FHBBBAAIF7ELhy5Yr4vkgQBE9Pz88//1zPaURqtfrWrVsXLlxYtGhRhw4dvL29HR0dLS0tifnuoQJsasQCGo3m4sWLERER4tl8/fr127Fjh/7xiAmFt7f3O++80ziDUKvVqampffv2lUgkjo6O27Ztq6ur078r1iJg1AJarfbmzZsffvhhhw4dbGxsxP9K3n333cbzosUBipOoW7duy5cvb9xYrVbv27evb9++FhYWjo6O27dvZxK1CMiTJiMgTqLVq1frJpGNjc3SpUsbz4u2DLahoWHNmjXBwcE2NjYWFhbEfG1Bo42BCxDz3f8CFRcXL1myRBAECwsLZ2dnqVQ6adKk8+fP3/8jsUcEEEAAgXsQUCgUsbGxbm5u4k3SH3vssUWLFun57u21a9cSEhKsra3d3d2XLFlSVlb20ksvdezYkZjvHorApkYs0NDQMGHCBCcnJ3EG+fn5rVmzRv94xITi2WefzcrKatKyoKBgxowZktvLunXrqqurmzTgRwRMSaCurm7btm2urq5hYWEhISEuLi4SiWTs2LFlZWX6hylOosjIyBYn0fTp08VJtH79eiaRfknWGrtAXV3dd999J06i/v37d+rUSSKRjBs3rry8vO1DU6vVO3bs8PPzCw8PDw0NtbGxIeZrux4tDVaAmO/+l6a+vn7Dhg2CIEgkEktLyyFDhhw7dkzP6SH3vwfsEQEEEECgNQGlUvnnP/+5W7dukyZNioyMFATB3t5+xIgRek5/uHjx4pIlSxYuXHjlyhW5XK7RaFasWPHEE08Q87WGzXoTFKiurn7vvfc6deoUFxcnXqvE1dX1tdde0z/Uurq6mzdvVlVVNb/VhhjziXfzWLt2bVVVlf5dsRYB4xWor69PSUkJDQ319PRMS0tbtGiRj4+PIAgRERFHjx7VPy79k2j69OmczacfkLWmIVBfX799+/a+fft27tw5LS1t6dKlfn5+4iTKyMho4xg1Gk1hYWFISEjXrl2XLl06depUa2trYr426tHMkAWI+e5/dTQazbFjx8Tb/QiCsHLlyt/0kcL97xB7RAABBBD4X4Ha2tpdu3YFBARERkamp6evWbPGycnJ0tLS09OzuLj4Tl/3qK2tvX79euNTLf7yl794eHgQ8/2vLj+ZvsDNmzeTk5N9fX2HDBly7ty51157zcHBQSqVRkZG1tfX3934ifnuzo2tjE5ArVbv3r07MjKyW7duK1asuHXr1pYtW8Q7VgcGBiYlJelGJJfL2z6hNBrN4cOHw8PDra2tn3rqqVOnTnEPAZ0kD0xMQK1W79mzZ+jQoeK31+Vy+Y4dO8LCwgRBCAwMTE5O1o23vr5ez9k2lZWVCxcudHBweP3113fv3h0XF2dlZUXMp9PjgfEKEPM9kNplZWV5enqKn0ivXr26srKyjYepqKjYu3fv+vXrDxw4oNVq27gVzRBAAAEE2i5QV1eXkZERERHh6emZmJhYVVW1Z8+eHj16CILg6OiYnp7e0NAg7q2+vl7/5zTEfG1np6XJCMhksm3btoWFhXl6ev7zn/+sra1NSkoSz0UKDAw8ceKELlyoqam5ceNGWwauUqnS09NDQkLEC57s3LlTNw3bsjltEDBwAZlMlpKSkp2dLZPJDhw4MGHChO7du8fHx4s3yjh9+vQzzzwjCIJ47QhxLPX19Zs2bUpKSrp69WpbRpebm5uQkNDp9vLJJ5/cvHmzLVvRBgFjEaipqUlJSblw4UJNTc3+/ftfeOEFX1/f2NhYcRJdunQpKipKEISOHTvOnDlT/G9IqVRu2bJlw4YNLU6i6urqb7/91t/ff8KECRcuXMjJyXn11VeJ+Yzl9UA/9QsQ8+n3uZu1NTU13377re5svnnz5hUWFrZxR8eOHRs+fHjXrl0XL15MzNdGNJohgAAC+gW0Wm1lZeXZs2cLCwtv3Lhx5MiRF1980d7efs6cOUVFRVqt9vjx40OGDBEEoX379itWrBC/LVhdXZ2WlvbRRx/JZLI7/YNMzKdfnrWmISDYIaZ0AAASCklEQVTeD/TUqVOlpaVlZWU7d+4cNmzY448/vmDBAplM9ssvv6SmpkZERAiC0KVLl1WrVomnTlRUVGzdujU5ObktX7/9+eefX3vtNalU2q5du8mTJ//00093mnSmQcoozEpAqVT++OOPQUFBf/zjH//6179GR0f7+PhER0frvp9bVVU1ffr0du3a2djYREVFXb58uaSkJCUlJTIyMiYm5uzZs/q5Kioqjh07tnjx4uDg4KioqLVr15aWlurSdv3bshYBoxBQKpVnzpwJCgqaOnXq+++/Hx0d7eXlNXTo0AMHDoj9l8vlr776qq2trZWVVUhIyNGjR0tLS3fu3Pncc89NmTKl+SRqaGhIT09/9tlne/XqtW/fvvr6+qKiImI+o3gx0Mm2CBDztUXpN7RpaGhIS0sbNmyYq6ureDbfyJEjm18it8U9yuXyzz77zNvbOyAgYOPGjS224UkEEEAAgd8qoFard+3aNXz48AULFrz//vtjxoxxdnYeMmTI5cuXxTdCeXl506dPFwTB2to6LCzszJkzpaWlKSkpw4YNGzVqlPhBcYsHJeZrkYUnTUxALpd/9dVX/fv3X758+XvvvTd48GA3N7dJkyb99NNP4jfcs7OzJ06cKAiCg4PDsGHDCgoKSkpKNmzYEBkZGRsb2+JpFI2J6urqtmzZ0qNHj3bt2vXs2fP06dN67oTTeEMeI2AUAlVVVZ9++qlEIpFKpXZ2do6Ojk8//fSmTZsaX6Fy3bp14jeBXF1dp02btmzZsoG3l6SkJLlc3nyYarW6qqoqKyvrwoULH3/8ca9evSwtLcPCwr7//vvmjXkGAWMXqK6u/uyzz3STyM7O7qmnnvryyy8bT6LExERvb2/xhL7nn3/+3Xff/d3vfjdw4MDk5OQmk0ir1V66dGnWrFnibd/FtcR8xv4iof+NBYj5Gmvc62O1Wp2ZmTlx4kQ/P7+YmBhLS0uJRBIYGHjkyBE9u1ar1dXV1Tdv3ty5c6f4YXhoaOjevXtv/rpUV1fz1RU9gKxCAAEE9AsoFIolS5a4u7uLn77Y2NiEh4dnZGToTnaorq7++OOP27VrJzaIiYmJj48fMGBARETEDz/8oGfnxHx6cFhlMgI3btyIjY21sbERJ4idnd2oUaOOHz+uG2BNTc3SpUvFGWRraztnzpy4uLigoKCRI0emp6frmrX4QK1WZ2Rk/OEPf5BKpQEBAWvWrCHjaxGKJ41XoKioaN68eRKJxMrKyt7e/sknn9y4cWOT3+1PnTr14osv2tnZWVpaWlhY2NnZ+fr6fvHFF3e6W65cLt+5c2doaGifPn169+7t5eXl5uYWHR195syZFm9xY7x69ByBX375paSk5M0339RNop49e65bt65JeHfy5Mnnn3++ySRKSkpqPolkMtlf//rXwMDAUaNGlZSUiCePN4/5lEqlXC5vMlUpBwJGIUDMdz/LVFBQEBMT07Vr14SEhP/85z/Ozs4WFhbt2rX77LPPdG8mmxxPq9Xm5OQ89dRTDg4OVlZWFhYWgiBYWFhYWVlJf11CQkI+//xzvr3ShI4fEUAAgTYKNDQ0TJgwwcnJSQwp+vfvv2/fvib/qJ47dy40NFRsYGFhYWlp2bdv3y1btjRp1uSIxHxNQPjRJAWuX7/++9//XiqVihMkOjo6MzOzydTYv39/v379BEGQSCSWt5eIiIj09PQmzZr7XL58ecyYMVKp1N/ff+3atXe6AU7zDXkGAWMREGM+S0tLDw+PiRMnHjx4sHlwIF4+YvTo0a6uro6OjmPHjt23b1+TFKPJeLVarfrX5eeff46Li5NKpba2tr6+vmfPnm18llOTDfkRAaMTKC4uXrhwoXirND2T6NixY9HR0a6urk5OTmPHjk1NTW1xEh04cOCZZ54ZMGDA3r17dRTNY76MjIwFCxZ88803rf5HptsJDxAwEAFivvtWiPLy8oSEBA8Pj0mTJmVnZ5eVlfXv39/GxkYikSxatKi0tFR3JKVSqfvHQqVSZWZmdujQwdLSUvztufmfTz/9dFpamm5zHiCAAAII/CYBhUIxe/bsiIiIIUOGLF68+OTJk83fYtXV1R05cuSPf/xjeHh4WFjY7Nmzjx8/XldXp/9AxHz6fVhrGgJlZWUTJ04cMGBAVFTUn//85+zs7OY3Lrx169bu3bunTZsWdntZsmTJxYsXm0+0xiAajSYvLy86OtrZ2Tk4OLj596oaN+YxAsYroFKpampqSkpKysvLq6qqFAqF7o1A40EpFIrKysrS20tlZaVCoWh76q1Wq4uLi9977z1bW9t27doNGDDg/PnzLR6l8RF5jICxCKhUKplMdl8m0dmzZ8eNGxcUFPT222/X1NToBBrHfCdOnJDL5XPnzh0xYsTXX3/d9pmo2xsPEHi0AsR8d+8vl8vPnTt37NixioqK8vLyOXPmiJcC3bdvn1KprK6unjZtmoODgyAI48ePFy/8qVar8/PzZ82apftsQavVyuXyM2fOpKSkDB061MnJyd3dfdKkSbt27Tr965Kbm9v8ZOO77zdbIoAAAmYmoNFoCgsL8/Ly8vPzS0tL7xQ9NDQ0FBUV5d5erl+/Xl9f36oTMV+rRDQwAQGFQvHTTz/l5uZeuXKloqKiecYnjrG2tlY3g8rKyu7UTGysUqlycnLGjh3r4uLi4+OzYsWKkpISE7BiCAg8KgGNRpOTk/PSSy9JJBJbW9uPP/6Ym+0+qlpwXIMVUCqVb731lqenp4ODQ1BQ0Lhx4179dYmOjvb29rawsHBwcIiIiBgxYsQTTzzRv39/Yj6DrSYd0yNAzKcHp5VVeXl5b7755siRIxcuXDh//vwuXbr06dNn06ZNYiRXV1e3fv168X67AQEBiYmJlZWVOTk5CQkJ/v7+W7dura2tbXwAmUz24osvOjg4hIWFtfo1scYb8hgBBBBA4FEJEPM9KnmOa9QCGo3mypUrMTExdnZ23bp1e+utt3Jzc416RHQeAUMQqKqqWrNmjZWVlUQiiYuL+/nnnw2hV/QBAcMRUKvVp06d2rFjx9Zmy8qVK8PDwy0tLR977LF58+Zt3rx569at+/fvLygoMJz+0xME2ihAzNdGqBaaHTx4cPDgwVKptGPHjs7Ozp6enp9++qnuy7lqtfrq1at9+vSxtra2tbV99tlnV6xYER8f7+PjExcXl5+f3+RE+pycnEGDBllaWv7hD3/48ccfWzgeTyGAAAIIGJgAMZ+BFYTuGIGAVqstLCxcvny5ra2th4fHkiVLLl++3OSXIiMYBl1EwPAEZDLZV199JZVKJRLJxIkT8/PzDa+P9AgBAxVo/KXdkydP3unC+gbae7qFwP8KEPP9r8dv+embb77x8fERBEEqlXbr1m3+/PllZWWNd6DRaObPn9+5c2fxlhpOTk5+fn4TJkzIzc1t8g+HVqtNSUkJCgqytrZ++eWXi4qKGu+HxwgggAAChilAzGeYdaFXhixQU1OTmJjo5eXl7Ow8f/78wsLCxr3VaDTFxcUlJSVt+dZ84w15jIA5CGi12rKysvz8fN3tQRuPurq6OjEx0draWiKRxMfHcxZSYxweI6BfgJhPvw9rjUuAmO/u6/Xll18GBwcHBASEh4d/9NFHFRUVzT+LvnLlSmxsbHBwcFhY2MSJE9euXVtcXNz8Kp4ajebDDz/09vZ2c3NbsWIF98a6+6qwJQIIIPAQBYj5HiI2hzIFAY1Gc/z48REjRjg4OAwePLi8vLzxqLRabW1tbUxMTHx8/OXLlxuv4jECCPzyyy8NDQ2LFy/29/efOXNm85tElZaWLlu2TCKRWFlZJScny2Qy0BBAoI0CxHxthKKZUQgQ8919mdRqtUqlUiqVKpWqydl5up2Kt7pX/bo0D/jElnK5PDo62snJqVevXomJiXdqptstDxBAAAEEDEGAmM8QqkAfjEiguLh4zpw5Dg4O4eHh+fn5TX7hUSgU6enpnTt3njx5MjGfEZWVrj40ATHm69Kly3PPPZednd3kuNnZ2UOGDLGwsOjateuxY8eazK8mjfkRAQQaCxDzNdbgsbELEPM9+gpqNJry8vLg4GCpVDp+/PjTp08/+j7RAwQQQAABvQIajebf//73gAEDpFKppaXloEGDMjIy9G7BSgTMXUCpVH744YcBAQEWFhZ2dnb+/v6///3vBzVawsPDu3XrZm1t/X//93/Xrl0zdy/Gj0AzATHm69y5s4ODw7Bhw7Kzs3VZ3pkzZ1544YUOHTp079796NGjd7qnfLNd8gQCCPxX4OjRo0OHDpVIJG5ubrNnz+ZkWF4WRi1AzPfoy6dSqc6fP+/j42NpaTlr1izuivXoS0IPEEAAgZYE1Gp1RkbG8uXLZ8+eHR8fP3DgQAcHB+H20qFDh0GDBs2+vSxcuDA9PZ0ri7VEyHNmLXDt2rUpU6bY29uLs0bPn0uWLLl+/bpZYzF4BFoS0Gg0W7ZsefLJJwVBcHR0HDx4cHx8vPhfT1RU1NNPP7127drz588rFIqWtuY5BBBoQUCpVH799dfR0dGurq7iZfc9PT2nTZt29erV5pfkamF7nkLA8ASI+R59Terr67/66it3d3cbG5u33367oqLi0feJHiCAAAIINBPQaDSXL1/etWvXFr3Ld999l5ubq1Qqm+2AJxAwawGZTHbkyJFvv/1W7wT678rz58/X1taaNRaDR+AOAiUlJQcPHmxxEh06dKi6uvoO2/E0Agi0LKBWq0+cOLF9+3bdtPr222937tzJu/KWvXjWGASI+R59lWQyWUJCgpOTk4uLy9///nfd77VqtfrChQvl5eV3uvDfo+86PUAAAQQQQAABBBBAAAEEEEAAAQQQMAwBYr5HX4eqqqro6Gg7OzsXF5dPPvmkrq5OvNPc/v37hw8fvmPHDl3w9+j7Sg8QQAABBBBAAAEEEEAAAQQQQAABBAxSgJjv0Zelurr6xRdfFC/wNHPmzPPnz5eXl+/evdvJycnPz2/Hjh1c4OnRF4keIIAAAggggAACCCCAAAIIIIAAAoYtQMz36OtTX1+/ceNGNzc38ZKf9vb2Tk5Ojo6O3t7e586dU6lUj76L9AABBBBAAAEEEEAAAQQQQAABBBBAwLAFiPkefX00Go1cLn/77befeOIJ8a5zXbt2feONN65fv84V3B99eegBAggggAACCCCAAAIIIIAAAgggYAwCxHyGUqWSkpIff/zx2O3lxx9/vH79uqH0jH4ggAACCCCAAAIIIIAAAggggAACCBi8ADGfwZeIDiKAAAIIIIAAAggggAACCCCAAAIIINCaADFfa0KsRwABBBBAAAEEEEAAAQQQQAABBBBAwOAFiPkMvkR0EAEEEEAAAQQQQAABBBBAAAEEEEAAgdYEiPlaE2I9AggggAACCCCAAAIIIIAAAggggAACBi9AzGfwJaKDCCCAAAIIIIAAAggggAACCCCAAAIItCZAzNeaEOsRQAABBBBAAAEEEEAAAQQQQAABBBAweAFiPoMvER1EAAEEEEAAAQQQQAABBBBAAAEEEECgNQFivtaEWI8AAggggAACCCCAAAIIIIAAAggggIDBCxDzGXyJ6CACCCCAAAIIIIAAAggggAACCCCAAAKtCRDztSbEegQQQAABBBBAAAEEEEAAAQQQQAABBAxegJjP4EtEBxFAAAEEEEAAAQQQQAABBBBAAAEEEGhNgJivNSHWI4AAAggggAACCCCAAAIIIIAAAgggYPACxHwGXyI6iAACCCCAAAIIIIAAAggggAACCCCAQGsCxHytCbEeAQQQQAABBBBAAAEEEEAAAQQQQAABgxcg5jP4EtFBBBBAAAEEEEAAAQQQQAABBBBAAAEEWhMg5mtNiPUIIIAAAggggAACCCCAAAIIIIAAAggYvAAxn8GXiA4igAACCCCAAAIIIIAAAggggAACCCDQmgAxX2tCrEcAAQQQQAABBBBAAAEEEEAAAQQQQMDgBYj5DL5EdBABBBBAAAEEEEAAAQQQQAABBBBAAIHWBIj5WhNiPQIIIIAAAggggAACCCCAAAIIIIAAAgYvQMxn8CWigwgggAACCCCAAAIIIIAAAggggAACCLQmQMzXmhDrEUAAAQQQQAABBBBAAAEEEEAAAQQQMHgBYj6DLxEdRAABBBBAAAEEEEAAAQQQQAABBBBAoDWB/wfBkB+jQz606wAAAABJRU5ErkJggg==)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "s6J4qdfHw_5e" + }, + "source": [ + "After unfolding, the RNN can be treated as a feed-forward neural network, that is very **deep along the time axes**.\n", + "Therefore, the same algorithms adopted for training feed-forward neural networks can be used. Sometimes, the back-propagation algorithm in the context of recurrent neural networks is called **back-propagation through time**, to emphasize that the gradient is propagated through the time axes.\n", + "\n", + "An important aspect of RNNs is that the **parameters are shared across the time steps**, making model generalization easier.\n", + "Let's now take a look into a Vanilla RNN using SpeechBrain. First of all, let's install it and download some test data." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "executionInfo": { + "elapsed": 24341, + "status": "ok", + "timestamp": 1708524174929, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "JwI8QTTCx5BT" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R3M-dxFWy2RE" + }, + "source": [ + "SpeechBrain has a bunch of RNNs implemented in `speechbrain.nnet.RNN`. Let's see an example with a vanilla RNN:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 11814, + "status": "ok", + "timestamp": 1708524186736, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "BzM1wQtJzhnh", + "outputId": "009aa35a-2ef3-41ef-973a-0f1843f4baef" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([4, 10, 5])\n" + ] + } + ], + "source": [ + "import torch\n", + "from speechbrain.nnet.RNN import RNN\n", + "\n", + "inp_tensor = torch.rand([4, 10, 20]) # [batch, time, features]\n", + "net = RNN(hidden_size=5, input_shape=inp_tensor.shape)\n", + "out_tensor, _ = net(inp_tensor)\n", + "\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nn1ewbdF0aT2" + }, + "source": [ + "As you can see, the expected input must be formatted as $[batch, time, features]$. This is a standard followed by all the neural networks implemented in SpeechBrain.\n", + "\n", + "The output has the same number of batch (i.e., four), the same number of time-steps (i.e., 10), and the transform feature dimension (which in this case is five like the selected hidden_size).\n", + "\n", + "Let's now take a look into the parameters:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 16, + "status": "ok", + "timestamp": 1708524186737, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "xqWA0OmT2qNp", + "outputId": "2da1677d-0895-41e2-e03b-8a704d4371af" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "rnn.weight_ih_l0 torch.Size([5, 20])\n", + "rnn.weight_hh_l0 torch.Size([5, 5])\n", + "rnn.bias_ih_l0 torch.Size([5])\n", + "rnn.bias_hh_l0 torch.Size([5])\n" + ] + } + ], + "source": [ + "for name, param in net.named_parameters():\n", + " if param.requires_grad:\n", + " print(name, param.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mGUI5PBn3Oe4" + }, + "source": [ + "\n", + "* The first parameter is the matrix $W$ (input-to-hidden) with dimension $[5, 20]$, where five is the hidden size, and 20 is the input dimension.\n", + "* The second parameter is $U$ (hidden-to-hidden) that corresponds to the recurrent weights. It is always a square matrix. In this case, the dimensionality is $[5,5]$ because of the selected hidden dimension.\n", + "* Finally, we have a couple of vectors composed of hidden_dim elements. These two tensors represent the bias term $b$. In this case, the term is split into two biases (one for the input and one for the recurrent connections). In other cases, a single bias (that embeds both) is used.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FFkGlSlNEO8g" + }, + "source": [ + "When setting **bidirectional=True**, a bidirectional RNN is used:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 14, + "status": "ok", + "timestamp": 1708524186737, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "0JoxqiDSEc_G", + "outputId": "d0ecba13-1da2-49fb-b603-a3ce635a66de" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([4, 10, 10])\n" + ] + } + ], + "source": [ + "inp_tensor = torch.rand([4, 10, 20]) # [batch, time, features]\n", + "net = RNN(hidden_size=5,\n", + " input_shape=inp_tensor.shape,\n", + " bidirectional=True\n", + " )\n", + "out_tensor, _ = net(inp_tensor)\n", + "\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uVUSxwgPEnJn" + }, + "source": [ + "In this case, we have two independent neural networks that scans the input sequence left-to-right and right-to-left. The resulting hidden states are then concatenated in a single \"**bidirectional**\" tensor. In the example, the feature dimension is now 10, which corresponds to two times the original hidden dimension." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d9C8r9JzFUVn" + }, + "source": [ + "In the previous examples, we use a single layer RNN. We can make the model deeper in the feature dimension just by stacking more layers:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 11, + "status": "ok", + "timestamp": 1708524186737, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "KozFLrBfFkFe", + "outputId": "8e2c8709-5e9b-4964-8c73-b17792e40e8a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([4, 10, 10])\n" + ] + } + ], + "source": [ + "inp_tensor = torch.rand([4, 10, 20]) # [batch, time, features]\n", + "net = RNN(hidden_size=5,\n", + " input_shape=inp_tensor.shape,\n", + " bidirectional=True,\n", + " num_layers=3,\n", + " )\n", + "out_tensor, _ = net(inp_tensor)\n", + "\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "z3hVBHUg5dbH" + }, + "source": [ + "RNNs requires back-propagating the gradient through many time steps. This operation, however, can be complicated by vanishing and exploding gradients. These issues impair learning long-term dependencies.\n", + "\n", + "* **Exploding gradient** can be tackled with simple clipping strategies.\n", + "\n", + "* **Vanishing gradient**, instead, is more critical. It could be attenuated by adding \"gradient shortcuts\" in the network design (think about residual networks, skip connections, or even attention mechanisms).\n", + "\n", + "A common approach for RNNs relies on **multiplicative gates**, whose core idea is to introduce a mechanism for better controlling the flow of information through the various time-steps.\n", + "\n", + "The most popular network relying on gating mechanism is the Long-Short Term Memory (LSTM) that will be described in the following." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3P8ah1dSDoXw" + }, + "source": [ + "## 2. Long-Short Term Memory (LSTM)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6aUX8Ua4Gy64" + }, + "source": [ + "LSTMs rely on a network design consisting of memory cells that are controlled by forget, input, and output gates:\n", + "\n", + "$f_t = \\sigma(W_f x_t + U_f h_{t-1} + b_f)$\n", + "\n", + "$i_t = \\sigma(W_i x_t + U_i h_{t-1} + b_i)$\n", + "\n", + "$o_t = \\sigma(W_o x_t + U_o h_{t-1} + b_o)$\n", + "\n", + "$\\widetilde{c}_t = \\sigma(W_c x_t + U_c h_{t-1} + b_c)$\n", + "\n", + "$c_t = f_t \\cdot c_{t-1} + i_t \\cdot \\widetilde{c}_t $\n", + "\n", + "$h_t = o_t \\cdot \\sigma(c_t)$,\n", + "\n", + "where $\\sigma$ is the sigmoid function.\n", + "\n", + "As you can see, the network design is quite complex, but this model turned out to be very general-purpose.\n", + "\n", + "The easiest way to see why this model can learn** long-term dependencies** is the following: with proper values of the f_t, i_t, and o_t, we can store the internal cell state $c_t$ for an arbitrary number of time steps (if $f_t = 1$ and $i_t=0$).\n", + "\n", + "Let's see how to use an LSTM within SpeechBrain:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 8, + "status": "ok", + "timestamp": 1708524186737, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "YORiCIM1LY4G", + "outputId": "cd2d9e00-db77-4bbc-dfb4-5252c21056ce" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([4, 10, 5])\n" + ] + } + ], + "source": [ + "import torch\n", + "from speechbrain.nnet.RNN import LSTM\n", + "\n", + "inp_tensor = torch.rand([4, 10, 20]) # [batch, time, features]\n", + "net = LSTM(hidden_size=5, input_shape=inp_tensor.shape)\n", + "out_tensor, _ = net(inp_tensor)\n", + "\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "70jAh8sILomt" + }, + "source": [ + "As you can see, the dimension in input and output are the same as the vanilla RNN (when using the same hidden_size). The number of parameters, however, is very different:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 6, + "status": "ok", + "timestamp": 1708524186737, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "sk4R1BSgMAi_", + "outputId": "9fbb6dfb-a608-443f-9886-42b49cc43b9f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "rnn.weight_ih_l0 torch.Size([20, 20])\n", + "rnn.weight_hh_l0 torch.Size([20, 5])\n", + "rnn.bias_ih_l0 torch.Size([20])\n", + "rnn.bias_hh_l0 torch.Size([20])\n" + ] + } + ], + "source": [ + "for name, param in net.named_parameters():\n", + " if param.requires_grad:\n", + " print(name, param.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T1q6j-YRPkPG" + }, + "source": [ + "As you can see we gather in single big tensors the groups of parameters. For instance, `rnn.weight_ih_l0` gathers all the four input-to-hidden matrixes of dimension [5, 20] for $f$,$i$,$o$,$c$ is a single tensor of dimension $[20,20]$. A similar concatenation is done with the hidden-to-hidden weights `rnn.weight_hh_l0` and the biases.\n", + "\n", + "Similarly to the vanilla RNN, we can use the parameter `bidirectional=True` to employ a bidirectional neural network. We can also stack more layers using the argument `num_layers`." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-XwWy-wpRqC5" + }, + "source": [ + "## 3. Gated Recurrent Units (GRUs)\n", + "LSTMs rely on memory cells that are controlled by forgetting, input, and output gates. Despite their effectiveness, such a sophisticated gating mechanism might result in an overly complex model.\n", + "\n", + "A noteworthy attempt to **simplify LSTMs** led to a novel model called Gated Recurrent Unit (GRU), which is based on **two multiplicative gates **only. In particular, the GRU architecture is described by the following equations:\n", + "\n", + "$z_{t}=\\sigma(W_{z}x_{t}+U_{z}h_{t-1}+b_{z})$\n", + "\n", + "$r_{t}=\\sigma(W_{r}x_{t}+U_{r}h_{t-1}+b_{r})$\n", + "\n", + "$\\widetilde{h_{t}} =\\tanh(W_{h}x_{t}+U_{h}(h_{t-1} \\odot r_{t})+b_{h})$\n", + "\n", + "$h_{t}=z_{t} \\odot h_{t-1}+ (1-z_{t}) \\odot \\widetilde{h_{t}}$.\n", + "\n", + "where $z_{t}$ and $r_{t}$ are vectors corresponding to the update and reset gates, respectively, while $h_{t}$ represents the state vector for the current time frame $t$.\n", + "Computations denoted as $\\odot$ represent element-wise multiplications.\n", + "\n", + "Similar to LSTM, also GRU is designed to learn long-term dependencies. GRU, in fact, can store the hidden state $h_t$ for an arbitrary number of times steps. This happens when $z_t = 1$.\n", + "\n", + "Let's now see how we can use a GRU within SpeechBrain:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 377, + "status": "ok", + "timestamp": 1708524187110, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "P64McqyTVwjG", + "outputId": "01498252-41f9-4ef8-9712-0fae98b38cab" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([4, 10, 5])\n" + ] + } + ], + "source": [ + "import torch\n", + "from speechbrain.nnet.RNN import GRU\n", + "\n", + "inp_tensor = torch.rand([4, 10, 20]) # [batch, time, features]\n", + "net = GRU(hidden_size=5, input_shape=inp_tensor.shape)\n", + "out_tensor, _ = net(inp_tensor)\n", + "\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pnedeOypV6ha" + }, + "source": [ + "The output tensor has the same size as the vanilla RNN and the LSTM. The parameters inside the model are different:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 12, + "status": "ok", + "timestamp": 1708524187110, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "QM3gu_lAWIdY", + "outputId": "38b2c02f-b393-407c-b73f-4893fa2c405a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "rnn.weight_ih_l0 torch.Size([15, 20])\n", + "rnn.weight_hh_l0 torch.Size([15, 5])\n", + "rnn.bias_ih_l0 torch.Size([15])\n", + "rnn.bias_hh_l0 torch.Size([15])\n" + ] + } + ], + "source": [ + "for name, param in net.named_parameters():\n", + " if param.requires_grad:\n", + " print(name, param.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "muXnwHnXWKb4" + }, + "source": [ + "Similar to LSTM, also the GRU models gather weight matrixes in bigger tensors. In this case, the weight matrixes are smaller because we have only two gates. You can play with the bidirectional and num_layers to use bidirectional RNN or to employ more layers." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OUGesx-7W-a6" + }, + "source": [ + "## 4. Light Gated Recurrent Units (LiGRU)\n", + "\n", + "Despite the interesting performance achieved by GRUs, which is normally comparable to that of the LSTMs, a further simplification of the model is possible.\n", + "\n", + "Recently, a model called **light GRU** has been proposed and turned out to perform well on speech processing tasks. The model is based on a single multiplicative gate, and it is described by the following equations:\n", + "\n", + "$z_{t}=\\sigma(BN(W_{z}x_{t})+U_{z}h_{t-1})$\n", + "\n", + "$\\widetilde{h_{t}}=\\mbox{ReLU}(BN(W_{h}x_{t})+U_{h}h_{t-1})$\n", + "\n", + "$h_{t}=z_{t} \\odot h_{t-1}+ (1-z_{t}) \\odot \\widetilde{h_{t}}$\n", + "\n", + "The LiGRU can be derived from the standard GRU model with the following modifications:\n", + "\n", + "\n", + "1. **Remove the reset gate**: in speech applications, the reset gate turned out to be redundant. In the LiGRU model, this is thus eliminated without any performance loss. As a result, the neural network is based on a single multiplicative gate only, with benefits on speed, parameters, and memory.\n", + "2. **Use ReLU + BatchNorm** in the the candidate state $\\widetilde{h_{t}}$*:\n", + "ReLU is the most popular activation function for feedforward neural networks. Differently from *tanh* and `sigmoids` they don't have saturation points that cause small gradients. The adoption of ReLU-based neurons was not so common in the past for the RNN architectures. This was due to numerical instabilities originating from the unbounded ReLU functions applied over long time series. To circumvent these numerical issues, LiGRU couples it with batch normalization. Batch normalization not only contributes to limit numerical issues but also to improve performance.\n", + "3. **shared parameters** When using bidirectional architectures, two independent models are normally employed. The LiGRU, instead, shares the same parameters for both the left-to-right and right-to-left scan. This not only helps to reduce the total amount of parameters but also improves generalization.\n", + "\n", + "Let's see now how to use the LiGRU model in speechbrain:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 9, + "status": "ok", + "timestamp": 1708524187110, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "T1x77D2We99i", + "outputId": "f3b01bd9-78f6-49f0-cb20-2463fd523f7c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([4, 10, 5])\n" + ] + } + ], + "source": [ + "import torch\n", + "from speechbrain.nnet.RNN import LiGRU\n", + "\n", + "inp_tensor = torch.rand([4, 10, 20]) # [batch, time, features]\n", + "net = LiGRU(hidden_size=5, input_shape=inp_tensor.shape)\n", + "out_tensor, _ = net(inp_tensor)\n", + "\n", + "print(out_tensor.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P2BLcBY_fHJ_" + }, + "source": [ + "As you can see the dimension in input and output are the same as the other recurrent models. The number of parameters, instead, is different:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 7, + "status": "ok", + "timestamp": 1708524187111, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "4a64Jhwsfb-Y", + "outputId": "2a192848-5970-4c1d-8045-a7dcb3523844" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "rnn.0.w.weight torch.Size([10, 20])\n", + "rnn.0.u.weight torch.Size([10, 5])\n", + "rnn.0.norm.weight torch.Size([10])\n", + "rnn.0.norm.bias torch.Size([10])\n" + ] + } + ], + "source": [ + "for name, param in net.named_parameters():\n", + " if param.requires_grad:\n", + " print(name, param.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ndEkFRNqfe2w" + }, + "source": [ + "If we employ a bidirectional model, the number of parameters is the same (due to parameter sharing):" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 5, + "status": "ok", + "timestamp": 1708524187111, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "HScHidfAfnz6", + "outputId": "974c5bf9-f7fe-4b9c-a3a5-c3224184d26e" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([4, 10, 10])\n", + "rnn.0.w.weight torch.Size([10, 20])\n", + "rnn.0.u.weight torch.Size([10, 5])\n", + "rnn.0.norm.weight torch.Size([10])\n", + "rnn.0.norm.bias torch.Size([10])\n" + ] + } + ], + "source": [ + "import torch\n", + "from speechbrain.nnet.RNN import LiGRU\n", + "\n", + "inp_tensor = torch.rand([4, 10, 20]) # [batch, time, features]\n", + "net = LiGRU(hidden_size=5,\n", + " input_shape=inp_tensor.shape,\n", + " bidirectional=True)\n", + "out_tensor, _ = net(inp_tensor)\n", + "\n", + "print(out_tensor.shape)\n", + "\n", + "for name, param in net.named_parameters():\n", + " if param.requires_grad:\n", + " print(name, param.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nPqsn_XVf28_" + }, + "source": [ + "Similar to LSTM and GRU, LiGRUs can learn long-term dependency. The hidden state $h_{t}$, for instance, can be store for an arbitrary number of time steps when $z_{t}=1$.\n", + "\n", + "To conclude, let's compare the number of parameters employed by the different models discussed here:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 2008, + "status": "ok", + "timestamp": 1708524189116, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "YqEyaB04gnPz", + "outputId": "21d19890-ddbe-43b3-9abc-8c62017e315a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "RNN: 0.5332992 M\n", + "RNN: 2.1331968 M\n", + "RNN: 1.5998976 M\n", + "RNN: 0.5332992 M\n" + ] + } + ], + "source": [ + "import torch\n", + "from speechbrain.nnet.RNN import RNN, LSTM, GRU, LiGRU\n", + "\n", + "hidden_size = 512\n", + "num_layers = 4\n", + "bidirectional=True\n", + "\n", + "inp_tensor = torch.rand([4, 10, 80]) # [batch, time, features]\n", + "\n", + "rnn = RNN(hidden_size=hidden_size,\n", + " input_shape=inp_tensor.shape,\n", + " bidirectional=bidirectional,\n", + " num_layers=num_layers\n", + " )\n", + "\n", + "lstm = LSTM(hidden_size=hidden_size,\n", + " input_shape=inp_tensor.shape,\n", + " bidirectional=bidirectional,\n", + " num_layers=num_layers\n", + " )\n", + "\n", + "gru = GRU(hidden_size=hidden_size,\n", + " input_shape=inp_tensor.shape,\n", + " bidirectional=bidirectional,\n", + " num_layers=num_layers\n", + " )\n", + "\n", + "ligru = LiGRU(hidden_size=hidden_size,\n", + " input_shape=inp_tensor.shape,\n", + " bidirectional=bidirectional,\n", + " num_layers=num_layers\n", + " )\n", + "\n", + "\n", + "def count_parameters(model):\n", + " return sum(p.numel() for p in model.parameters() if p.requires_grad)\n", + "\n", + "print(\"RNN:\", count_parameters(rnn)/10e6, \"M\")\n", + "print(\"RNN:\", count_parameters(lstm)/10e6, \"M\")\n", + "print(\"RNN:\", count_parameters(gru)/10e6, \"M\")\n", + "print(\"RNN:\", count_parameters(ligru)/10e6, \"M\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1doJN4KjibQw" + }, + "source": [ + "The LiGRU is very **parameter efficient** and, in the bidirectional case, has the same number of parameters as a vanilla RNN (with the advantage of being able to learn long-term dependencies).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9s3dIbhjneQ9" + }, + "source": [ + "**## References**\n", + "\n", + "[1] S. Hochreiter, J. Schmidhuber, \"Long short-term memory. Neural computation\", 9, 1735--1780, 1997. [pdf](https://www.bioinf.jku.at/publications/older/2604.pdf)\n", + "\n", + "[2] J. Chung, C. Gulcehre, K. Cho, Y. Bengio, \"Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling\", 2014 [ArXiv](https://arxiv.org/abs/1412.3555)\n", + "\n", + "[3] M. Ravanelli, P. Brakel, M. Omologo, Y. Bengio, \"Light Gated Recurrent Units for Speech Recognition\", 2018 [ArXiv](https://arxiv.org/abs/1803.10225)\n", + "\n", + "[4] Y. Bengio; P. Simard; P. Frasconi, \"Learning long-term dependencies with gradient descent is difficult\", IEEE Transactions on Neural Networks, 1994\n", + "\n", + "[5] M. Ravanelli, \"Deep Learning for distant speech recogniton\", PhD thesis, 2017 [ArXiv](https://theses.eurasip.org/theses/755/deep-learning-for-distant-speech-recognition/download/)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/nn/using-wav2vec-2.0-hubert-wavlm-and-whisper-from-huggingface-with-speechbrain.ipynb b/docs/tutorials/nn/using-wav2vec-2.0-hubert-wavlm-and-whisper-from-huggingface-with-speechbrain.ipynb new file mode 100644 index 0000000000..836d4b5650 --- /dev/null +++ b/docs/tutorials/nn/using-wav2vec-2.0-hubert-wavlm-and-whisper-from-huggingface-with-speechbrain.ipynb @@ -0,0 +1,691 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/nn/using-wav2vec-2.0-hubert-wavlm-and-whisper-from-huggingface-with-speechbrain.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/nn/using-wav2vec-2.0-hubert-wavlm-and-whisper-from-huggingface-with-speechbrain.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AI6a5KiMQWma" + }, + "source": [ + "# Fine-tuning or using Whisper, wav2vec2, HuBERT and others with SpeechBrain and HuggingFace\n", + "\n", + "This tutorial describes how to combine (use and finetune) pretrained models coming from the HuggingFace Transformers library including, for instance, Whisper, wav2vec 2.0, HuBERT, WavLM and others. Those models can be plugged easily into SpeechBrain to approach a speech- or audio-related task: automatic speech recognition, speaker recognition, spoken language understanding ...\n", + "\n", + "**What About Pre-training?**\n", + "Pre-training large SSL models is complex for many reasons ranging from necessary resources (dozens of GPUs for hundreds of hours) to replicability issues due to the pipeline. For now, SpeechBrain only provides pre-training of [wav2vec 2.0 models](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/self-supervised-learning/wav2vec2).\n", + "\n", + "**Why SpeechBrain?**\n", + "Different and numerous reasons may be mentioned to motivate the use of SpeechBrain. However, in the very specific context of pretrained models, SpeechBrain enables researchers and users to connect these architectures to state-of-the-art speech and audio-related technologies. For instance, SpeechBrain allows you to easily fine-tune a pretrained wav2vec2 model with a transformer decoder coupled with a beam search algorithm and a transformer language model to build a SOTA speech recognizer. It could also help you to simply use the encoder of a pretrained Whisper to perform emotion recognition.To the best of our knowledge, most of other toolkit do not enable you to achieve this.\n", + "\n", + "**Architectures of interest for this tutorial**\n", + "We will only consider two of the most up-to-date existing pretrained model: wav2vec 2.0 and Whisper. However, SpeechBrain support many others: wavLM, HuBERT ...\n", + "\n", + "Wav2Vec is a transformer-based encoder architecture enabling self-supervised representation learning of speech. Please refer to the official paper to obtain more details: [wav2vec2](https://arxiv.org/abs/2006.11477).\n", + "\n", + "\"drawing\"\n", + "\n", + "*Illustration of Wav2vec2, [source](https://medium.com/georgian-impact-blog/how-to-make-an-end-to-end-automatic-speech-recognition-system-with-wav2vec-2-0-dca6f8759920).*\n", + "\n", + "Whisper is a full transformer (encoder-decoder) trained on large amount of semi-supervised data (600k+ hours of speech). Please refer to the official paper to obtain more details: [whisper](https://cdn.openai.com/papers/whisper.pdf)\n", + "\n", + "\"drawing\"\n", + "\n", + "*Illustration of Whisper, [source](https://openai.com/blog/whisper/).*\n", + "\n", + "**With this tutorial, you will learn how to:**\n", + "1. Instantiate a wav2vec2 or a Whisper to extract features from an audio file.\n", + "2. Use wav2vec2 and Whisper encoders as a block of your pipeline (ASR, TIMIT).\n", + "3. Use Whisper as an encoder-decoder architecture for fine-tuning (ASR, LibriSpeech)\n", + "4. Understand current limitations of our integration.\n", + "\n", + "\n", + "## Prerequisites\n", + "- [SpeechBrain Introduction](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/introduction-to-speechbrain.html)\n", + "- [YAML tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html)\n", + "- [Brain Class tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html)\n", + "- [DataIOBasics](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)\n", + "- [Pretrained Models and Fine-Tuning](https://speechbrain.readthedocs.io/en/latest/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.html)\n", + "\n", + "## Wav2Vec 2.0 and Whisper from HuggingFace\n", + "\n", + "Wav2vec 2.0 models were originally shared via the [Faiseq GitHub](https://github.com/pytorch/fairseq/blob/master/examples/wav2vec/README.md) and moved very recently to [HuggingFace](https://huggingface.co/facebook) thanks to a nice integration to the HuggingFace [Transformers API](https://huggingface.co/transformers/model_doc/wav2vec2.html). The same thing happened with Whisper model than went from the [original repository](https://github.com/openai/whisper) to the HuggingFace [Transformers API](https://huggingface.co/docs/transformers/model_doc/whisper). Hence, if you want to use a pretrained Transformer model within SpeechBrain you only need a HuggingFace repository! (e.g. \"facebook/wav2vec2-large-lv60\", \"openai/whisper-large\" or \"microsoft/wavlm-large\").\n", + "\n", + "But first, let's install all the needed packages ...\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "12izOP1EZjfU" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain\n", + "BRANCH = 'develop'\n", + "!git clone https://github.com/speechbrain/speechbrain.git -b $BRANCH\n", + "%cd /content/speechbrain/\n", + "!python -m pip install ." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "U5x89dTJaytG" + }, + "source": [ + "Install the HuggingFace Transformers interface." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ein3JZ8UeQvI" + }, + "source": [ + "Finally, let's download and load an audio file to play with." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DQj06kpWeW-T" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.dropbox.com/s/u8qyvuyie2op286/spk1_snt1.wav" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GdgmeP9efRhV" + }, + "outputs": [], + "source": [ + "import speechbrain as sb\n", + "\n", + "source = sb.dataio.dataio.read_audio('spk1_snt1.wav').squeeze()\n", + "print(source.shape)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2BUusNKEIDbV" + }, + "source": [ + "This is the imported signal:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BHm2ZR9oIJIP" + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "plt.figure(1)\n", + "plt.plot(source)\n", + "plt.show()\n", + "\n", + "from IPython.display import Audio\n", + "Audio('spk1_snt1.wav')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2HUOD6STmQja" + }, + "source": [ + "Wav2vec2, HuBERT, WavLM and Whisper models are offered as **lobes** in SpeechBrain. Hence, their implementation can be found in:\n", + "+ speechbrain.lobes.models.huggingface_wav2vec.py\n", + "+ speechbrain.lobes.models.huggingface_whisper.py\n", + "\n", + "Now, we instantiate one of each. It is important to note that in the following example, the returned object are **standard PyTorch Module** as it is almost always the case with SpeechBrain." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Z6KBl6x2mZuT" + }, + "outputs": [], + "source": [ + "# BE CAREFUL, IF YOU ARE NOT CONNECTED TO A GPU RUNTIME, THIS WILL CRASH\n", + "# THis only happens on Colab, you can of course load models on\n", + "from speechbrain.integrations.huggingface.wav2vec2 import Wav2Vec2\n", + "from speechbrain.integrations.huggingface.whisper import Whisper\n", + "\n", + "# HuggingFace model hub\n", + "model_hub_w2v2 = \"facebook/wav2vec2-base-960h\"\n", + "model_hub_whisper = \"openai/whisper-tiny\"\n", + "\n", + "model_w2v2 = Wav2Vec2(model_hub_w2v2, save_path='/content/pretrained/')\n", + "model_whisper = Whisper(model_hub_whisper, save_path='/content/pretrained/')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EGUWY37XUb9t" + }, + "source": [ + "Here, we can explore the model ..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FUOO9DAaUbPW" + }, + "outputs": [], + "source": [ + "print(model_whisper)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qrVUm3ovrMdG" + }, + "source": [ + "Now, we can try to extract audio features from those models! In our examples however, we have two different models that require different forward operations if our goal **solely is to retrieve the latent representation of the audio input.** Wav2vec 2.0 is a transformer encoder, so we just need to get the output of the last layer. Whisper, on the other end, is packaged as a fully trained encoder-decoder. **Hence, we must make sure that we only retrieve the output of the encoder!**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "L4PxS5MYrd_7" + }, + "outputs": [], + "source": [ + "source = source.unsqueeze(0)\n", + "print(source.shape)\n", + "\n", + "fea_w2v2 = model_w2v2(source)\n", + "print(fea_w2v2.shape)\n", + "\n", + "# This can be given as an argument when we instantiate the model as well\n", + "model_whisper.encoder_only=True\n", + "fea_whisper = model_whisper(source)\n", + "print(fea_whisper.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "psdnsUwfsGgf" + }, + "source": [ + "**What am I looking at?**\n", + "\n", + "These features correspond to the Context Representation obtained after the transformer (See the *C* in the initial wav2vec2 illustration). Hence, this output dimension is *768* for the Base model (as described in the [paper](https://arxiv.org/abs/2006.11477)). Then, wav2vec2 has an output frequency of 50Hz, and the audio file is 2.87 seconds long explaining the *143* that we obtain in the time dimension. Indeed the shape is [batch, time, features]. The same logic can be applied to Whisper as we obtain the last hidden state of the transformer encoder.\n", + "\n", + "## Wav2Vec 2.0 and Whisper encoders as a block of your pipeline (ASR, TIMIT)\n", + "\n", + "Until now, we only saw how to use pretrained wav2vec2 and whisper to infer on a single audio file. Of course, if you just want to do extract features, you could simply loop over your dataset and store everything ... OR you could use SpeechBrain to directly plug those models into your pipeline to compute the features on-the-fly (and fine-tune them!)\n", + "\n", + "In fact, if you are familiar with our YAML formalism (and if you are not, please first check [our tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html)), HuggingFaceWav2Vec2 and HuggingFaceWhisper can simply be added as a block to your hyperparams file:\n", + "\n", + "For Wav2vec 2.0:\n", + "```yaml\n", + "wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2\n", + " source: !ref \n", + " freeze: True\n", + " save_path: !ref /wav2vec2_checkpoint\n", + "```\n", + "\n", + "For Whisper:\n", + "```yaml\n", + "whisper: !new:speechbrain.integrations.huggingface.whisper.Whisper\n", + " pretrained_path: !ref \n", + " freeze: True\n", + " encoder_only: True\n", + " save_path: !ref /wav2vec2_checkpoint/model.pt\n", + "```\n", + "- *freeze* enables you to fine-tune (False) or freeze (True) the neural parameters. Note that you can also ask to freeze only the encoder for Whisper or only the feature extractor for wav2vec 2.0. You are left in your pipeline with two PyTorch module objects that can be used as standard layers to propagate your data!\n", + "\n", + "**After this point you will need basic knowledge about SpeechBrain. Please refer to the prerequisites (at the beginning of this tutorial) if you do not understand something.**\n", + "\n", + "Now, we will dive deeper into the Librispeech ASR (CTC) recipe that can be found [here](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/ASR/CTC).\n", + "\n", + "If you are not familiar with CTC ASR, please refer to our simplified and highly commented [template](https://github.com/speechbrain/speechbrain/tree/develop/templates/speech_recognition/ASR).\n", + "\n", + "In the following section, we will only highlight important parts of the code that are necessary to use the whisper or wav2vec2 models in your recipe!\n", + "\n", + "### Understanding the yaml parameters.\n", + "\n", + "In this setup, we would like to fine-tune the whisper or wav2vec2 models with respect to our dowstream task. More precisely, the architecture of the model is:\n", + "\n", + "```\n", + "[ wav -> wav2vec2 or whisper -> Dense ] = encoder\n", + "```\n", + "\n", + "To achieve this our YAML file is comprised of different key components (delete the w2v2 references if you are interested by whisper and vice-versa):\n", + "\n", + "```yaml\n", + " [...]\n", + "\n", + " # URL for the biggest and already fine-tuned english wav2vec2 model and parameters.\n", + " # URL for the medium whisper as well.\n", + " wav2vec2_hub: \"facebook/wav2vec2-large-960h-lv60-self\"\n", + " whisper_hub: \"openai/whisper-medium\"\n", + " freeze_pretrained: False\n", + " lr_pretrained: 0.0001\n", + "\n", + " [...]\n", + "\n", + " # The instianciation of the SpeechBrain lobe\n", + " wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2\n", + " source: !ref \n", + " freeze: !ref \n", + " save_path: !ref /wav2vec2_checkpoint\n", + "\n", + " # The instianciation of the SpeechBrain lobe\n", + " whisper: !new:speechbrain.integrations.huggingface.whisper.Whisper\n", + " source: !ref \n", + " freeze: !ref \n", + " encoder_only: True\n", + " save_path: !ref /whisper_checkpoint\n", + " \n", + " # A simple DNN that receive as inputs the output of the pretrained model\n", + " # Here the output dimensionality of the LARGE wav2vec2 and MEDIUM whisper are 1024.\n", + " enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN\n", + " input_shape: [null, null, 1024]\n", + " activation: !ref \n", + " dnn_blocks: !ref \n", + " dnn_neurons: !ref \n", + "\n", + " [...]\n", + "\n", + " # Two optimizers and schedulers to allow:\n", + " # 1. The learning of the encoder and the decoders.\n", + " # 2. Slowly fine-tune only the pretrained (w2v2 or whisper) parts.\n", + " adam_opt_class: !name:torch.optim.AdamW\n", + " lr: !ref \n", + "\n", + " pretrained_opt_class: !name:torch.optim.AdamW\n", + " lr: !ref \n", + " \n", + " lr_annealing_adam: !new:speechbrain.nnet.schedulers.NewBobScheduler\n", + " initial_value: !ref \n", + " improvement_threshold: 0.0025\n", + " annealing_factor: 0.8\n", + " patient: 0\n", + "\n", + " lr_annealing_pretrained: !new:speechbrain.nnet.schedulers.NewBobScheduler\n", + " initial_value: !ref \n", + " improvement_threshold: 0.0025\n", + " annealing_factor: 0.9\n", + "\n", + " # We add the wav2vec2 / whisper to the modules list so it is uploaded on the GPUs.\n", + " # Remove the one that is not used!\n", + " modules:\n", + " wav2vec2: !ref \n", + " whisper: !ref \n", + " enc: !ref \n", + " emb: !ref \n", + " dec: !ref \n", + " ctc_lin: !ref \n", + " seq_lin: !ref \n", + "\n", + " # We do not add the wav2vec2 / whisper to the model list, so we can apply one optimizer\n", + " # to the randomly initialized model and the other to the pretrained model.\n", + " model: !new:torch.nn.ModuleList\n", + " - [!ref , !ref , !ref , !ref , !ref ]\n", + "\n", + " # We add the wav2vec2 /whisper to our checkpointer so the model can be saved!\n", + " checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer\n", + " checkpoints_dir: !ref \n", + " recoverables:\n", + " model: !ref \n", + " wav2vec2: !ref \n", + " whisper: !ref \n", + " lr_annealing_adam: !ref \n", + " lr_annealing_wav2vec: !ref \n", + " counter: !ref \n", + "```\n", + "\n", + "And we combine everything in the python recipe file:\n", + "\n", + "```python\n", + " class ASR(sb.Brain):\n", + " def compute_forward(self, batch, stage):\n", + " [...]\n", + " # The compute forward is strictly identical to any compute_forward method\n", + " # for ASR, except that we just call the wav2vec2 / whisper on the wavs instead of computing acoustic features (FBANKs, MFCCs ...).\n", + " feats = self.modules.wav2vec2(wavs)\n", + " feats = self.modules.whisper(wavs)\n", + " x = self.modules.enc(feats)\n", + " [...]\n", + " \n", + " def init_optimizers(self):\n", + " # Initializes the whisper optimizer and model optimizer. The same can be done for wav2vec2.\n", + " self.pretrained_optimizer = self.hparams.pretrained_opt_class(\n", + " self.modules.whisper.parameters()\n", + " )\n", + " self.adam_optimizer = self.hparams.adam_opt_class(\n", + " self.hparams.model.parameters()\n", + " )\n", + " [...]\n", + " \n", + " def on_stage_end(self, stage, stage_loss, epoch):\n", + " #Gets called at the end of a epoch.\n", + " [...]\n", + " if stage == sb.Stage.VALID:\n", + "\n", + " # Here we apply our learning_rate annealing on both optimizers\n", + " old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam(wer)\n", + " old_lr_pretrained, new_lr_pretrained = self.hparams.lr_annealing_pretrained(wer)\n", + " sb.nnet.schedulers.update_learning_rate(\n", + " self.adam_optimizer, new_lr_adam\n", + " )\n", + " sb.nnet.schedulers.update_learning_rate(\n", + " self.pretrained_optimizer, new_lr_wav2vec\n", + " )\n", + "\n", + " def fit_batch(self, batch):\n", + " # Override of the Brain Class fit_batch function.\n", + " # Managing automatic mixed precision\n", + " [...]\n", + " outputs = self.compute_forward(batch, sb.Stage.TRAIN)\n", + "\n", + " loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)\n", + " loss.backward()\n", + "\n", + " # Here we manage both optimizers\n", + " # (Learning enc+dec and Fine-tuning wav2vec2).\n", + " if self.check_gradients(loss):\n", + " self.pretrained_optimizer.step()\n", + " self.adam_optimizer.step()\n", + "\n", + " self.pretrained_optimizer.zero_grad()\n", + " self.adam_optimizer.zero_grad()\n", + "\n", + " return loss.detach().cpu()\n", + "\n", + "```\n", + "\n", + "Note: Of course, if you are playing with a frozen wav2vec2 model, no need to employ two different optimizers ;-) And this is it! If you just run your recipe like that, your whisper / wav2vec 2.0 pre-trained encoder will be part of your architecture and be fine-tuned (or not) depending on your requirements.\n", + "\n", + "## Using Whisper as a fully pre-trained encoder-decoder\n", + "\n", + "Whisper is a full transformer. In theory, this means that you could take it and perform zero-shot speech recognition or speech translation. In practice, you most-likely want to fine-tune it on your in-house dataset. Both options can be done within SpeechBrain, and we only need to slightly change our YAML and recipe accordingly. Indeed, we won't need a DNN decoder anymore, as Whisper has one. We won't rely on the CTC loss as well, as the Transformer decoder can be trained with a negative log-likelihood. Finally, we must decide if we want to connect our model to a greedy search decoding or to a more complex beam searcher with or without language model scoring! A summary of what is supported by SpeechBrain for Whisper is:\n", + "\n", + "- Feature extraction\n", + "- Encoder fine-tuning\n", + "- Encoder-decoder zero shot ASR or ST\n", + "- Encoder-decoder fine-tuning\n", + "- Greedy decoding\n", + "- Beam search decoding with and without LM\n", + "\n", + "Here, we will focus on fine-tuning a base whisper on Librispeech with a greedy decoding.\n", + "\n", + "To achieve this we must first modify our previous YaML file, and python script. Here, we needs to set the `encoder_only` to `False` because we wants to keeps the decoder. We also needs to integrate a search function that will takes the most probable token predicted by the decoder and feed it back (concatenated with the previous tokens) to the decoder in an auto regressive manner.\n", + "Contrary to the previous example, we don't need to add a language modeling head on top of the Whisper decoder because it will be already created for you when you will fetch the Whisper model. Now you have everything needed for fine-tuning the Whisper Encoder-Decoder!\n", + "\n", + "\n", + "Let's see what happens in practice:\n", + "\n", + "\n", + "\n", + "```yaml\n", + " [...]\n", + "\n", + " whisper_hub: \"openai/whisper-medium\"\n", + " freeze_pretrained: False\n", + " lr_pretrained: 0.0001\n", + "\n", + " # we need to specify the language of the inputs audios.\n", + " language: english\n", + "\n", + " # These values will be used during decoding.\n", + " # The first one design the first token to be added during searching.\n", + " # The second is the token to stop the expansion of hypotheses that have reached eos.\n", + " timestamp_index: 50363\n", + " eos_index: 50257\n", + "\n", + " # This value is the ratio of steps during the decoding.\n", + " # e.g, encoded speech is [B, T, F], then the maximal number of steps will be T * max_decode_ratio.\n", + " max_decode_ratio: 0.5\n", + "\n", + " [...]\n", + "\n", + " # The instanciation of the SpeechBrain lobe\n", + " whisper: !new:speechbrain.integrations.huggingface.whisper.Whisper\n", + " source: !ref \n", + " freeze: !ref \n", + " encoder_only: False # :)\n", + " save_path: !ref /whisper_checkpoint\n", + "\n", + " [...]\n", + "\n", + " pretrained_opt_class: !name:torch.optim.AdamW\n", + " lr: !ref \n", + " \n", + " lr_annealing_pretrained: !new:speechbrain.nnet.schedulers.NewBobScheduler\n", + " initial_value: !ref \n", + " improvement_threshold: 0.0025\n", + " annealing_factor: 0.9\n", + "\n", + " # We add the whisper to the modules list so it is uploaded on the GPUs.\n", + " modules:\n", + " whisper: !ref \n", + "\n", + " # We creates the searcher method to decode the Whisper model.\n", + " valid_greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SWhisperGreedySearch\n", + " model: !ref \n", + " bos_index: !ref \n", + " eos_index: !ref \n", + " min_decode_ratio: 0\n", + " max_decode_ratio: !ref \n", + "\n", + " # We add the whisper to our checkpointer so the model can be saved!\n", + " checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer\n", + " checkpoints_dir: !ref \n", + " recoverables:\n", + " whisper: !ref \n", + " scheduler_whisper: !ref \n", + " counter: !ref \n", + "\n", + "```\n", + "\n", + "And we combine everything in the python recipe file:\n", + "\n", + "```python\n", + " class ASR(sb.Brain):\n", + " def compute_forward(self, batch, stage):\n", + " wavs, wav_lens = batch.sig\n", + " bos_tokens, bos_tokens_lens = batch.tokens_bos\n", + " \n", + " [...]\n", + "\n", + " # The compute forward is similar to any compute_forward method for ASR \n", + " # with Transformers in SpeechBrain.\n", + "\n", + " # Forward encoder + decoder\n", + " enc_out, logits, _ = self.modules.whisper(wavs, bos_tokens)\n", + "\n", + " log_probs = self.hparams.log_softmax(logits)\n", + "\n", + " hyps = None\n", + " if stage != sb.Stage.TRAIN:\n", + " # perform greedy searcher and return the hypotheses found\n", + " hyps, _ = self.hparams.valid_greedy_searcher(enc_out, wav_lens)\n", + "\n", + " [...]\n", + "\n", + " return log_probs, hyps, wav_lens\n", + "\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " log_probs, hyps, wav_lens, = predictions\n", + "\n", + " tokens_eos, tokens_eos_lens = batch.tokens_eos\n", + "\n", + " [...]\n", + "\n", + " # compute the NLL loss\n", + " loss = self.hparams.nll_loss(\n", + " log_probs, tokens_eos, tokens_eos_lens,\n", + " )\n", + "\n", + " if stage != sb.Stage.TRAIN:\n", + " tokens, tokens_lens = batch.tokens\n", + "\n", + " # Decode token terms to words\n", + " predicted_words = self.tokenizer.batch_decode(\n", + " hyps, skip_special_tokens=True\n", + " )\n", + "\n", + " # Convert indices to words\n", + " target_words = undo_padding(tokens, tokens_lens)\n", + " target_words = self.tokenizer.batch_decode(\n", + " target_words, skip_special_tokens=True\n", + " )\n", + "\n", + " # Compute our metrics\n", + " self.wer_metric.append(ids, predicted_words, target_words)\n", + " self.cer_metric.append(ids, predicted_words, target_words)\n", + "\n", + " [...]\n", + "\n", + " return loss\n", + " \n", + " def on_stage_end(self, stage, stage_loss, epoch):\n", + " \"\"\"Gets called at the end of an epoch.\"\"\"\n", + " # Compute/store important stats\n", + " stage_stats = {\"loss\": stage_loss}\n", + " if stage == sb.Stage.TRAIN:\n", + " self.train_stats = stage_stats\n", + " else:\n", + " stage_stats[\"CER\"] = self.cer_metric.summarize(\"error_rate\")\n", + " stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\")\n", + "\n", + " # Perform end-of-iteration things, like annealing, logging, etc.\n", + " if stage == sb.Stage.VALID:\n", + "\n", + " old_lr_whisper, new_lr_whisper = self.hparams.lr_annealing_whisper(\n", + " stage_stats[\"loss\"]\n", + " )\n", + "\n", + " sb.nnet.schedulers.update_learning_rate(\n", + " self.optimizer, new_lr_whisper\n", + " )\n", + " self.hparams.train_logger.log_stats(\n", + " stats_meta={\"epoch\": epoch, \"lr_whisper\": old_lr_whisper},\n", + " train_stats=self.train_stats,\n", + " valid_stats=stage_stats,\n", + " )\n", + " self.checkpointer.save_and_keep_only(\n", + " meta={\"WER\": stage_stats[\"WER\"]}, min_keys=[\"WER\"],\n", + " )\n", + " elif stage == sb.Stage.TEST:\n", + " self.hparams.train_logger.log_stats(\n", + " stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current},\n", + " test_stats=stage_stats,\n", + " )\n", + " with open(self.hparams.wer_file, \"w\") as w:\n", + " self.wer_metric.write_stats(w)\n", + "```\n", + "\n", + "With that, you can fine-tune the latest Whisper model on the dataset of your choice!\n", + "\n", + "You can play around with this model and try to improving it with a beam search decoding instead of the greedy search, or you could just scale up and take the largest available whisper model... and everything via SpeechBrain!\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/notebook-footer.md b/docs/tutorials/notebook-footer.md new file mode 100644 index 0000000000..60f5696ea2 --- /dev/null +++ b/docs/tutorials/notebook-footer.md @@ -0,0 +1,24 @@ +## Citing SpeechBrain + +If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry: + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with {SpeechBrain} 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` \ No newline at end of file diff --git a/docs/tutorials/notebook-header.md b/docs/tutorials/notebook-header.md new file mode 100644 index 0000000000..c5a6814e9c --- /dev/null +++ b/docs/tutorials/notebook-header.md @@ -0,0 +1,6 @@ + + + +[Open In Colab](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/{tutorialpath}) +to execute or view/download this notebook on +[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/{tutorialpath}) \ No newline at end of file diff --git a/docs/tutorials/preprocessing.rst b/docs/tutorials/preprocessing.rst new file mode 100644 index 0000000000..3d4d7c7e84 --- /dev/null +++ b/docs/tutorials/preprocessing.rst @@ -0,0 +1,133 @@ +Speech Preprocessing +==================== + +.. + Originally generated with https://gist.github.com/asumagic/19f9809480b62bfd16094fb5c844a564 but OK to edit in repo now. + Please ensure for each tutorial that you are adding it to the hidden toctree at the end of the file! + +.. toctree:: + :hidden: + + preprocessing/speech-augmentation.ipynb + preprocessing/fourier-transform-and-spectrograms.ipynb + preprocessing/speech-features.ipynb + preprocessing/environmental-corruption.ipynb + preprocessing/multi-microphone-beamforming.ipynb + preprocessing/voice-analysis.ipynb + + +.. rubric:: `🔗 Speech Augmentation `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Jan. 2021 + - Difficulty: easy + - Time: 20min + - `🔗 Google Colab `__ + + +A popular saying in machine learning is "there is no better data than more data". However, collecting new data can be expensive +and we must cleverly use the available dataset. One popular technique is called speech augmentation. The idea is to artificially +corrupt the original speech signals to give the network the "illusion" that we are processing a new signal. This acts as a powerful regularizer, +that normally helps neural networks improving generalization and thus achieve better performance on test data. + +.. rubric:: `🔗 Fourier Transforms and Spectrograms `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Jan. 2021 + - Difficulty: easy + - Time: 20min + - `🔗 Google Colab `__ + + +In speech and audio processing, the signal in the time-domain is often transformed into another domain. +But why do we need to transform an audio signal? This is because some speech characteristics/patterns of the signal (e.g, pitch, formats) +might not be very evident when looking at the audio in the time-domain. With properly designed transformations, +it might be easier to extract the needed information from the signal itself. + +The most popular transformation is the +Fourier Transform, which turns the time-domain signal into an equivalent representation in the frequency domain. +In the following sections, we will describe the Fourier transforms along with other related transformations such as +Short-Term Fourier Transform (STFT) and spectrograms. + +.. rubric:: `🔗 Speech Features `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Jan. 2021 + - Difficulty: easy + - Time: 20min + - `🔗 Google Colab `__ + + +Speech is a very high-dimensional signal. For instance, when the sampling frequency is 16 kHz, +we have 16000 samples for each second. Working with such very high dimensional data can be critical from a machine learning perspective. +The goal of feature extraction is to find more compact ways to represent speech. + +.. rubric:: `🔗 Environmental Corruption `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Feb. 2021 + - Difficulty: medium + - Time: 20min + - `🔗 Google Colab `__ + + +In realistic speech processing applications, the signal recorded by the microphone is corrupted by noise and reverberation. +This is particularly harmful in distant-talking (far-field) scenarios, where the speaker and the reference microphone are distant +(think about popular devices such as Google Home, Amazon Echo, Kinect, and similar devices). + +.. rubric:: `🔗 Multi-microphone Beamforming `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Grondin F. & Aris W. + - Jan. 2021 + - Difficulty: medium + - Time: 20min + - `🔗 Google Colab `__ + + +Using a microphone array can be very handy to improve the signal quality +(e.g. reduce reverberation and noise) prior to performing speech recognition tasks. +Microphone arrays can also estimate the direction of arrival of a sound source, and this information can later +be used to "listen" in the direction of the source of interest. + + +.. rubric:: `🔗 Analyzing Vocal Features for Pathology `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Plantinga P. + - Nov. 2024 + - Difficulty: easy + - Time: 20min + - `🔗 Google Colab `__ + + +This notebook goes through a simple voice analysis using 4-5 interpretable features to demonstrate traditional techniques for pathology detection. This includes +features such as jitter, shimmer, harmonicity, and glottal-to-noise excitation. \ No newline at end of file diff --git a/docs/tutorials/preprocessing/environmental-corruption.ipynb b/docs/tutorials/preprocessing/environmental-corruption.ipynb new file mode 100644 index 0000000000..fd072815e7 --- /dev/null +++ b/docs/tutorials/preprocessing/environmental-corruption.ipynb @@ -0,0 +1,345 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/preprocessing/environmental-corruption.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/preprocessing/environmental-corruption.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VsPvUujzoDkw" + }, + "source": [ + "# Environmental Corruption\n", + "\n", + "In realistic speech processing scenarios, the signals captured by microphones are often corrupted by unwanted elements such as **noise** and **reverberation**. This challenge is particularly pronounced in **distant-talking** (far-field) situations, where the speaker and the reference microphone are positioned at a considerable distance. Examples of such scenarios include signals recorded by popular devices like Google Home, Amazon Echo, Kinect, and similar devices.\n", + "\n", + "A common strategy in neural speech processing involves starting with clean speech recordings and artificially introducing noise and reverberation to simulate real-world conditions. This process is known as **environmental corruption** or *speech contamination*.\n", + "\n", + "Starting with clean signals allows for the controlled introduction of various types of noise and reverberation, making environmental corruption a potent regularization technique. This regularization helps neural networks generalize better when exposed to real-world, noisy conditions during testing.\n", + "\n", + "The environmental corruption process transforms a clean signal $x[n]$ into a noisy and reverberant signal using the equation:\n", + "\n", + "$y[n] = x[n] * h[n] + n[n]$\n", + "\n", + "where $n[n]$ represents a noise sequence, and $h[n]$ is an impulse response that introduces the reverberation effect.\n", + "\n", + "In the following sections, we will delve into the details of how this transformation is carried out. Before that, let's download some signals that will be essential for the rest of the tutorial.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9ZFaNUKuycE3" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.dropbox.com/s/vwv8xdr7l3b2tta/noise_sig.csv\n", + "!wget https://www.dropbox.com/s/aleer424jumcs08/noise2.wav\n", + "!wget https://www.dropbox.com/s/eoxxi2ezr8owk8a/noise3.wav\n", + "!wget https://www.dropbox.com/s/pjnub2s5hql2vxs/rir1.wav\n", + "!wget https://www.dropbox.com/s/nyno6bqbmiy2rv8/rirs.csv\n", + "!wget https://www.dropbox.com/s/u8qyvuyie2op286/spk1_snt1.wav" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ildeW0Np9kkU" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain\n", + "BRANCH = 'develop'\n", + "!git clone https://github.com/speechbrain/speechbrain.git -b $BRANCH\n", + "%cd /content/speechbrain/\n", + "!python -m pip install ." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HQeluuEa2tqC" + }, + "source": [ + "A clean speech signal looks like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "RZIni_cQ2tGm" + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "from speechbrain.dataio.dataio import read_audio\n", + "from IPython.display import Audio\n", + "\n", + "clean = read_audio('/content/spk1_snt1.wav').squeeze()\n", + "\n", + "# Plots\n", + "plt.subplot(211)\n", + "plt.plot(clean)\n", + "plt.xlabel('Time')\n", + "\n", + "plt.subplot(212)\n", + "plt.specgram(clean,Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "Audio(clean, rate=16000)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TrljmzSm9LgK" + }, + "source": [ + "## 1. Additive Noise\n", + "\n", + "In SpeechBrain, we designed a class able to contaminate a speech signal with noise (`speechbrain.augment.time_domanin.AddNoise`). This class takes in input a csv file that itemizes a list of noise signals:\n", + "\n", + "\n", + "```\n", + "ID, duration, wav, wav_format, wav_opts\n", + "noise2, 5.0, noise2.wav, wav,\n", + "noise3, 1.0, noise3.wav, wav,\n", + "```\n", + "When called, `AddNoise` samples from this noise collection and adds the selected noise into the clean signal with a random **Signal-to-Nose Ratio** (SNR).\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ji_LVoE5_LPK" + }, + "outputs": [], + "source": [ + "import torch\n", + "from speechbrain.augment.time_domain import AddNoise\n", + "\n", + "noisifier = AddNoise('tests/samples/annotation/noise.csv', replacements={'noise_folder': 'tests/samples/noise'})\n", + "noisy = noisifier(clean.unsqueeze(0), torch.ones(1))\n", + "\n", + "# Plots\n", + "plt.subplot(211)\n", + "plt.plot(noisy.squeeze())\n", + "plt.xlabel('Time')\n", + "\n", + "plt.subplot(212)\n", + "plt.specgram(noisy.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "Audio(noisy.squeeze(0), rate=16000)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TsOSikxXB2zl" + }, + "source": [ + "The amount of noise can be tuned with the **snr_low** and **snr_high** parameters that define the sampling range for the SNR. The length vector is needed because we can process in parallel batches of signals with different lengths. The length vector contains relative lengths for each sentence composing the batch (e.g, for two examples we can have lenght=[0.8 1.0] where 1.0 is the length of the longest sentence in the batch).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jkkgPahzDKLA" + }, + "source": [ + "## 2. Reverberation\n", + "When speaking into a room, our speech signal is **reflected multi-times** by the walls, floor, ceiling, and by the objects within the acoustic environment. Consequently, the final signal recorded by a distant microphone will contain multiple **delayed replicas** of the original signal. All these replicas interfere with each other and significantly affect the intelligibility of the speech signal.\n", + "\n", + "Such a **multi-path propagation** is called reverberation. Within a given room enclosure, the reverberation between a source and a receiver is modeled by an **impulse response**:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ylSShiytFv42" + }, + "outputs": [], + "source": [ + "rir = read_audio('/content/rir1.wav')\n", + "\n", + "# Impulse response\n", + "plt.subplot(211)\n", + "plt.plot(rir[0:8000])\n", + "plt.xlabel('Time')\n", + "plt.ylabel('h(t)')\n", + "\n", + "# Zoom on early reflections\n", + "plt.subplot(212)\n", + "plt.plot(rir[2150:2500])\n", + "plt.xlabel('Time')\n", + "plt.ylabel('h(t)')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NirXcf9SGSEF" + }, + "source": [ + "The impulse response is a complete description of the changes that the sounds undergo when traveling from a source to a receiver. In particular, each peak in the impulse response corresponds to a replica reaching the receiver. The first peak corresponds to the **direct path**. Then, we can see the **first-order reflections** on walls, ceiling, floor (see the second picture).\n", + "\n", + "Globally, the impulse response follows an exponential decay. This decay is faster in a dry room characterized by low reverberation-time and it is slower in a large and empty environment.\n", + "\n", + "The reverberation is added by performing a **convolution** between a clean signal and an impulse response. In SpeechBrain, this operation is performed by `speechbrain.processing.speech_augmentation.AddReverb`.\n", + "\n", + "When called, `AddRev` samples an impulse response from a given csv file:\n", + "\n", + "```\n", + "ID, duration, wav, wav_format, wav_opts\n", + "rir1, 1.0, rir1.wav, wav,\n", + "....\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Hd5PIl-AMUDf" + }, + "outputs": [], + "source": [ + "from speechbrain.augment.time_domain import AddReverb\n", + "\n", + "reverb = AddReverb('tests/samples/annotation/RIRs.csv', replacements={'rir_folder': 'tests/samples/RIRs'})\n", + "reverbed = reverb(clean)\n", + "\n", + "# Plots\n", + "plt.subplot(211)\n", + "plt.plot(reverbed.squeeze())\n", + "plt.xlabel('Time')\n", + "\n", + "plt.subplot(212)\n", + "plt.specgram(reverbed.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "Audio(reverbed.squeeze(0), rate=16000)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fhvVPlg_PrA7" + }, + "source": [ + "Reverberation is a convolutive noise that \"smooths\" the signal in the time (see the long tails that appear in regions that were silent in the clean signal) and frequency domain.\n", + "\n", + "The amount of reverberation is controlled by the parameter **rir_scale_factor**. If rir_scale_factor < 1, the impulse response is compressed (less reverb), while if rir_scale_factor > 1 the impulse response is dilated (more reverb). Feel free to play with it in the previous example!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-9-_N33ElyqN" + }, + "source": [ + "## References\n", + "[1] M. Ravanelli, P. Svaizer, M. Omologo, \"Realistic Multi-Microphone Data Simulation for Distant Speech Recognition\", in Proceedings of Interspeech 2016 [ArXiv](https://arxiv.org/abs/1711.09470)\n", + "\n", + "[2] M. Ravanelli, M. Omologo, \"Contaminated speech training methods for robust DNN-HMM distant speech recognition\", in Proceedings of INTERSPEECH 2015. [ArXiv](https://arxiv.org/abs/1710.03538)\n", + "\n", + "[3] M. Ravanelli, M. Omologo, \"On the selection of the impulse responses for distant-speech recognition based on contaminated speech training\", in Proceedings of INTERSPEECH 2014. [ArXiv](https://isca-speech.org/archive/archive_papers/interspeech_2014/i14_1028.pdf)\n", + "\n", + "[4] M. Ravanelli, A. Sosi, P. Svaizer, M.Omologo, \"Impulse response estimation for robust speech recognition in a reverberant environment\", in Proceeding of the European Signal Processing Conference, EUSIPCO 2012. [ArXiv](https://www.eurasip.org/Proceedings/Eusipco/Eusipco2012/Conference/papers/1569588145.pdf)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1sIvjMP1xfgbyLTf6bjYmLVQ35a-BgxVd", + "timestamp": 1612793552720 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/preprocessing/fourier-transform-and-spectrograms.ipynb b/docs/tutorials/preprocessing/fourier-transform-and-spectrograms.ipynb new file mode 100644 index 0000000000..3965e73e69 --- /dev/null +++ b/docs/tutorials/preprocessing/fourier-transform-and-spectrograms.ipynb @@ -0,0 +1,519 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/preprocessing/fourier-transform-and-spectrograms.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/preprocessing/fourier-transform-and-spectrograms.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yLHQsyqgmbLp" + }, + "source": [ + "# Fourier Transforms and Spectrograms\n", + "\n", + "In speech and audio processing, the signal in the time-domain is often transformed into another domain. Ok, but why do we need to transform an audio signal?\n", + "\n", + "Some speech characteristics/patterns of the signal (e.g, *pitch*, *formats*) might not be very evident when looking at the audio in the time-domain. With properly designed transformations, it might be easier to extract the needed information from the signal itself.\n", + "\n", + "The most popular transformation is the **Fourier Transform**, which turns the time-domain signal into an equivalent representation in the **frequency domain**. In the following sections, we will describe the Fourier transforms along with other related transformations such as **Short-Term Fourier Transform** (STFT) and **spectrograms**.\n", + "\n", + "## 1. Fourier Transform\n", + "The Fourier transform of a time-discrete sequences $f[n]={f[0],f[1],..f[N-1]}$ is called Discrete Fourier Transform (DFT) and it is defined in this way:\n", + "\n", + "$F_{k} = \\sum_{n=0}^{N-1} f_{n} e^{-j\\frac{2\\pi}{N}kn}$\n", + "\n", + "The inverse transformation, called Inverse Discrete Fourier Transform (IDFT), maps the frequnecy-domain signal $F_k$ into a time-domain one $f_n$:\n", + "\n", + "$f_{n} = \\sum_{k=0}^{N-1} F_{k} e^{j\\frac{2\\pi}{N}kn}$\n", + "\n", + "The two representations are equivalent and we are not losing information when applying them. It is just a different way to represent the same signal.\n", + "\n", + "\n", + "#### What is the intuition?\n", + "The idea behind the Fourier transform is to represent the signal as a **weighted sum of complex sinusoids with increasing frequency**.\n", + "The complex exponential $e^{j\\frac{2\\pi}{N}kn}$, for instance, dermines the frequnecy of this \"complex sinoudoid\":\n", + "\n", + "$e^{j\\frac{2\\pi}{N}kn} = cos(\\frac{2\\pi}{N}kn) +j sin(\\frac{2\\pi}{N}kn)$.\n", + "\n", + "The term $F_{k}$, instead, is another **complex number** that determines the amplitude and shift (phase) of the frequency components.\n", + "It can be shown that with N complex sinusoids with proper **amplitude** and **phase**, we can model any signal. In other words, the complex sinusoids are the basic bricks that compose your signal. If you properly combine many of them like in a LEGO building, you can create all the signals you want (both periodic and non-periodic).\n", + "\n", + "The transformation has $O(N^2)$ complexity because for each element k of the frequency representation $F_k$ we have to loop over all the N elements of the sequence. This makes it impossible to compute DFT and IDFT of long sequences.\n", + "\n", + "Fortunately, there are algorithms called **Fast-Fourier Transform (FFT)** that can compute it with $O(Nlog(N))$. The FFT splits the input sequences into small chunks and combines their DTFs.\n", + "\n", + "This concept of \"complex sinusoids\" might be quite difficult to digest. Nevertheless, on-line you can find excellent material full of cool graphical animations to help you with that (see the tutorials in the reference). For now, let's just consider the Fourier transform as a **linear transformation** that maps real-valued sequences into complex-valued ones.\n", + "\n", + "Before computing some DTFTs, let's download some speech signal and install speechbrain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jQhXYHD12JeN" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.dropbox.com/s/u8qyvuyie2op286/spk1_snt1.wav" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "kcm6OXygDaLg" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain\n", + "BRANCH = 'develop'\n", + "!git clone https://github.com/speechbrain/speechbrain.git -b $BRANCH\n", + "%cd /content/speechbrain/\n", + "!python -m pip install ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "aD1Hn73P2PC4" + }, + "outputs": [], + "source": [ + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from speechbrain.dataio.dataio import read_audio\n", + "\n", + "signal = read_audio('/content/spk1_snt1.wav')\n", + "print(signal.shape)\n", + "\n", + "# fft computation\n", + "fft = torch.fft.fft(signal.squeeze(), dim=0)\n", + "print(fft)\n", + "print(fft.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ay1Iq2ZU_Utj" + }, + "source": [ + "As you can see, the input signal is real (and thus the imaginary part is filled with zeros). The DFT is a tensor containing both the real and the imaginary parts of the transformation.\n", + "\n", + "Let's now compute the magnitude and phase of the DFT and plot them:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JABfGdt0l-PD" + }, + "outputs": [], + "source": [ + "# Real and Imaginary parts\n", + "real_fft = fft.real\n", + "img_fft = fft.imag\n", + "\n", + "mag = torch.sqrt(torch.pow(real_fft,2) + torch.pow(img_fft,2))\n", + "phase = torch.arctan(img_fft/real_fft)\n", + "\n", + "plt.subplot(211)\n", + "x_axis = torch.linspace(0, 16000, mag.shape[0])\n", + "plt.plot(x_axis, mag)\n", + "\n", + "plt.subplot(212)\n", + "plt.plot(x_axis, phase)\n", + "plt.xlabel('Freq [Hz]')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EVeGj-4sCWJk" + }, + "source": [ + "There are few interesting things to notice from the plots:\n", + "\n", + "\n", + "* The plot of the magnitude is symmetric. The last element of the x-axis corresponds to the sampling frequency $f_s$, which in this case is 16kHz. Due to this symmetry, it is only necessary to plot the magnitude from 0 to $fs/2$. This frequency is called Nyquist frequency.\n", + "* The plot of the phase is very noisy. This is expected too. The phase is notoriously not easy to interpret and estimate.\n", + "\n", + "Let's not plot the magnitude from 0 to the Nyquist frequency:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BYA_rDiDD3vq" + }, + "outputs": [], + "source": [ + "half_point = mag[0:].shape[0]//2\n", + "x_axis = torch.linspace(0, 8000, half_point)\n", + "plt.plot(x_axis, mag[0:half_point])\n", + "plt.xlabel('Frequency')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MDeVaOrKEkng" + }, + "source": [ + "We can see that most of the energy of a speech signal is concentrated in the lower part of the spectrum. Many important phonemes like vowels, in fact, have most of their energy in this part of the spectrum.\n", + "\n", + "Moreover, we can notice some peaks in the magnitude spectrum. Let's zoom in to see them more clearly:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "iEchxUJ2GXTe" + }, + "outputs": [], + "source": [ + "plt.plot(mag[0:4000])\n", + "plt.xlabel('Frequency')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zsX7WpxvG9Ia" + }, + "source": [ + "The peaks corresponds to pitch (i.e, the frequency at which our vocal cords are vibrating) and formats (which corresponds to the resonant frequency of our vocal tract).\n", + "\n", + "Let's now try to go back to the time domain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uM5eGLwvHSWp" + }, + "outputs": [], + "source": [ + "signal_rec = torch.fft.ifft(fft, dim=0)\n", + "signal_rec = signal_rec # real part\n", + "signal_orig = signal\n", + "\n", + "# Plots\n", + "plt.subplot(211)\n", + "plt.plot(signal_orig)\n", + "\n", + "plt.subplot(212)\n", + "plt.plot(signal_rec)\n", + "plt.xlabel('Time')\n", + "\n", + "print(signal_orig[0:10])\n", + "print(signal_rec[0:10])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kF0a3KMAJKp_" + }, + "source": [ + "As you can see from the plot, the signal can be recunstructed in the time domain. Due to some numerical round-off errros, the two signals are very similar but not identical (see the print of the first 10 samples)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tSMFRHgxJo6y" + }, + "source": [ + "## 2. Short-Term Fourier Transform (STFT)\n", + "Speech is a \"dynamic\" signal that evolves over time. It could thus make sense to introduce a mixed time-frequency representation that can show how the frequency components of speech are evolving over time. Such a representation is called Short-Term Fourier Transform.\n", + "\n", + "The SFTF is computed in this way:\n", + "\n", + "1. Split the time signal into multiple chunks using overlapped sliding windows (e.g, hamming, hanning, blackman).\n", + "2. For each small chunk compute the DFT\n", + "3. Combine all the DFT into a single representation\n", + "\n", + "Let's now compute an STFT of a speech signal:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "o65I1Qm4hZXH" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import STFT\n", + "\n", + "signal = read_audio('/content/spk1_snt1.wav').unsqueeze(0) # [batch, time]\n", + "\n", + "compute_STFT = STFT(sample_rate=16000, win_length=25, hop_length=10, n_fft=400) # 25 ms, 10 ms\n", + "signal_STFT = compute_STFT(signal)\n", + "\n", + "print(signal.shape)\n", + "print(signal_STFT.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "C3mdJoSCieo3" + }, + "source": [ + "* The first dimension of the STFT representation is the batch axis (SpeechBrain expects it because it is designed to process in parallel multiple signals).\n", + "* The third is the frequency resolution. It corresponds to half of the fft points ($n_{fft}$) because, as we have seen before, the fft is symmetric.\n", + "* The last dimension gathers the real and the imaginary parts of the STFT representation.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-4BMZzzWkhxx" + }, + "source": [ + "Similar to the Fourier transform, the STFT has an inverse transformation called **Inverse Short-Term Fourier Transform (ISTFT)**. With properly-designed windows, we can have a perfect reconstruction of the original signal:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i-DhG95XlN85" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import ISTFT\n", + "\n", + "compute_ISTFT = ISTFT(sample_rate=16000, win_length=25, hop_length=10)\n", + "signal_rec = compute_ISTFT(signal_STFT)\n", + "signal_rec = signal_rec.squeeze() # remove batch axis for plotting\n", + "\n", + "# Plots\n", + "plt.subplot(211)\n", + "plt.plot(signal_orig)\n", + "\n", + "plt.subplot(212)\n", + "plt.plot(signal_rec)\n", + "plt.xlabel('Time')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "45uku4U9m4DF" + }, + "source": [ + "## 3. Spectrogram\n", + "As we have seen before, the magnitude of the Fourier transform is more informative than the phase. We can thus take the magnitude of the STFT representation and obtain the so-called spectrogram. The spectrogram is one of the most popular speech representations.\n", + "\n", + "Let's see how a spectrogram looks like:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "765Zsw_bnzbK" + }, + "outputs": [], + "source": [ + "spectrogram = signal_STFT.pow(2).sum(-1) # power spectrogram\n", + "spectrogram = spectrogram.squeeze(0).transpose(0,1)\n", + "\n", + "spectrogram_log = torch.log(spectrogram) # for graphical convenience\n", + "\n", + "plt.imshow(spectrogram_log.squeeze(0), cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oDshVtzCpxQn" + }, + "source": [ + "The spectrogram is a 2D representation that can be plotted as an image (yellow areas correspond to time-frequency points with high magnitude).\n", + "From the spectrogram, you can see how the frequency components are evolving over time. For instance, you can clearly distinguish vowels (whose frequency pattern is characterized by multiple lines corresponding to pitch and formants) and fricatives (characterized by the presence of continuous high-frequency components). Normally, we plot the power spectrogram that corresponds to the squared magnitude of the STFT.\n", + "\n", + "The time and frequency resolution of the spectrogram depends on the length of the window used for computing the STFT.\n", + "\n", + "For instance, if we increase the length of the window, we can have a higher resolution in frequency (but a lower resolution in time):\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "61Lq4GakrZ-g" + }, + "outputs": [], + "source": [ + "signal = read_audio('/content/spk1_snt1.wav').unsqueeze(0) # [batch, time]\n", + "\n", + "compute_STFT = STFT(sample_rate=16000, win_length=50, hop_length=10, n_fft=800)\n", + "signal_STFT = compute_STFT(signal)\n", + "\n", + "spectrogram = signal_STFT.pow(2).sum(-1)\n", + "spectrogram = spectrogram.squeeze(0).transpose(0,1)\n", + "spectrogram = torch.log(spectrogram)\n", + "\n", + "plt.imshow(spectrogram.squeeze(0), cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R1eJdAZauEsA" + }, + "source": [ + "Vice-versa, we can have a larger time resolution at the price of a reduced frequency resolution:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nzn9hxyLuO7w" + }, + "outputs": [], + "source": [ + "signal = read_audio('/content/spk1_snt1.wav').unsqueeze(0) # [batch, time]\n", + "\n", + "compute_STFT = STFT(sample_rate=16000, win_length=5, hop_length=5, n_fft=800)\n", + "signal_STFT = compute_STFT(signal)\n", + "\n", + "spectrogram = signal_STFT.pow(2).sum(-1)\n", + "spectrogram = spectrogram.squeeze(0).transpose(0,1)\n", + "spectrogram = torch.log(spectrogram)\n", + "\n", + "plt.imshow(spectrogram.squeeze(0), cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ImUfs42quxDW" + }, + "source": [ + "Despite being very informative, the spectrogram is not invertible. When computing it, in fact, we are only using the magnitude of the STFT and not the phase.\n", + "\n", + "The spectrogram is the starting point for computing some popular speech features, such ad FilterBanks (FBANKs) and Mel-Frequency Cepstral Coefficients (MFCCs) that are the object of [another tutorial]()." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aUyevcpb7Y2n" + }, + "source": [ + "## References\n", + "\n", + "[1] L. R. Rabiner, Ronald W. Schafer, “Digital Processing of Speech Signals”, Prentice-Hall, 1978\n", + "\n", + "[2] S. K. Mitra Digital Signal Processing: A Computer-Based Approach [slides](http://doctord.webhop.net/courses/bei/ece410/mitra_2e/toc.htm)\n", + "\n", + "[3] \n", + "\n", + "[4] \n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1dCY2XHC6NiDA9mor3jd9H9w-XEcD3M4L", + "timestamp": 1612452429167 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/preprocessing/multi-microphone-beamforming.ipynb b/docs/tutorials/preprocessing/multi-microphone-beamforming.ipynb new file mode 100644 index 0000000000..81f354a183 --- /dev/null +++ b/docs/tutorials/preprocessing/multi-microphone-beamforming.ipynb @@ -0,0 +1,940 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/preprocessing/multi-microphone-beamforming.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/preprocessing/multi-microphone-beamforming.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "q5UgWOI8vYoC" + }, + "source": [ + "# Multi-microphone Beamforming\n", + "\n", + "## Introduction\n", + "\n", + "Using a microphone array can be very handy to improve the signal quality (e.g. reduce reverberation and noise) prior to performing speech recognition tasks.\n", + "Microphone arrays can also estimate the direction of arrival of a sound source, and this information can later be used to \"listen\" in the direction of the source of interest.\n", + "\n", + "### Propagation model\n", + "\n", + "We assume the following propagation model for sound:\n", + "\n", + "$x_m[n] = h_m[n] \\star s[n] + b_m[n]$,\n", + "\n", + "where $m$ stands for the microphone index, $n$ for the sample index, and $h_m$ for the room impulse response. The expression $s[n]$ stands for the signal of the speech source, $b_m[n]$ the additive noise and $x_m[n]$ the signal captured at microphone $m$. The signals can also be expressed in the frequency domain:\n", + "\n", + "$X_m(t,j\\omega) = H_m(j\\omega)S(t,j\\omega) + B_m(t,j\\omega)$,\n", + "\n", + "or in the vector form:\n", + "\n", + "$\\mathbf{X}(t,j\\omega) = \\mathbf{H}(j\\omega)S(t,j\\omega) + \\mathbf{B}(t,j\\omega)$.\n", + "\n", + "Note that $\\mathbf{X}(t,j\\omega) \\in \\mathbb{C}^{M \\times 1}$.\n", + "\n", + "In the anechoic case, we can substitute $h_m[n] = a_m[n] = \\delta(n-\\tau_m)$, and we write $H_m(j\\omega) = A_m(j\\omega) = e^{-j\\omega\\tau_m}$, where $\\tau_m$ is the time delay for the direct path in samples, or in the vector form $\\mathbf{A}(j\\omega) \\in \\mathbb{C}^{M \\times 1}$.\n", + "\n", + "### Covariance matrices\n", + "\n", + "We also use the following covariance matrices with some beamformers:\n", + "\n", + "$\\displaystyle\\mathbf{R}_{XX}(j\\omega) = \\frac{1}{T}\\sum_{t=1}^{T}\\mathbf{X}(t,j\\omega)\\mathbf{X}^H(t,j\\omega)$\n", + "\n", + "$\\displaystyle\\mathbf{R}_{SS}(j\\omega) = \\frac{1}{T}\\sum_{t=1}^{T}\\mathbf{H}(j\\omega)\\mathbf{H}^H(j\\omega)|S(t,j\\omega)|^2$\n", + "\n", + "$\\displaystyle\\mathbf{R}_{NN}(j\\omega) = \\frac{1}{T}\\sum_{t=1}^{T}\\mathbf{B}(t,j\\omega)\\mathbf{B}^H(t,j\\omega)$\n", + "\n", + "In practice, it is common to use an time-frequency mask to estimate the covariance matrices for speech and noise:\n", + "\n", + "$\\displaystyle\\mathbf{R}_{SS}(j\\omega) \\approx \\frac{1}{T}\\sum_{t=1}^{T}M_S(t,j\\omega)\\mathbf{X}(t,j\\omega)\\mathbf{X}^H(t,j\\omega)$\n", + "\n", + "$\\displaystyle\\mathbf{R}_{NN}(j\\omega) \\approx \\frac{1}{T}\\sum_{t=1}^{T}M_N(t,j\\omega)\\mathbf{X}(t,j\\omega)\\mathbf{X}^H(t,j\\omega)$\n", + "\n", + "### Time Difference of Arrival\n", + "\n", + "The time difference of arrival between microphone $1$ and $m$ can be estimated using the Generalized Cross-Correlation with Phase Transform (GCC-PHAT) with the following expression:\n", + "\n", + "$\\displaystyle\\tau_m = argmax_{\\tau} \\int_{-\\pi}^{+\\pi}{\\frac{X_1(j\\omega) X_m(j\\omega)^*}{|X_1(j\\omega)||X_m(j\\omega)|}e^{j\\omega\\tau}}d\\omega$\n", + "\n", + "### Direction of Arrival\n", + "\n", + "#### Steered-Response Power with Phase Transform\n", + "\n", + "SRP-PHAT scans each potential direction of arrival on a virtual unit sphere around the array and compute the corresponding power. For each DOA (denoted by the unit vector $\\mathbf{u}$), there is a steering vector $\\mathbf{A}(j\\omega,\\mathbf{u}) \\in \\mathbb{C}^{M \\times 1}$ in the direction of $\\mathbf{u}$:\n", + "\n", + "$\\displaystyle E(\\mathbf{u}) = \\sum_{p=1}^{M}{\\sum_{q=p+1}^{M}{\\int_{-\\pi}^{+\\pi}{\\frac{X_p(j\\omega)X_q(j\\omega)^*}{|X_p(j\\omega)||X_q(j\\omega)|}}}A_p(j\\omega,\\mathbf{u})A_q(j\\omega,\\mathbf{u})^* d\\omega}$\n", + "\n", + "The DOA with the maximum power is selected as the DOA of sound:\n", + "\n", + "$\\mathbf{u}_{max} = argmax_{\\mathbf{u}}{E(\\mathbf{u})}$\n", + "\n", + "#### Multiple Signal Classification\n", + "\n", + "MUSIC scans each potential direction of arrival on a virtual unit sphere around the array and compute the corresponding power. For each DOA (denoted by the unit vector $\\mathbf{u}$), there is a steering vector $\\mathbf{A}(j\\omega,\\mathbf{u}) \\in \\mathbb{C}^{M \\times 1}$ in the direction of $\\mathbf{u}$. The matrix $\\mathbf{U}(j\\omega) \\in \\mathbb{C}^{M \\times S}$ contains the $S$ eigenvectors that correspond to the $S$ smallest eigenvalues obtained while performing eigendecomposition on $\\mathbf{R}_{XX}(j\\omega)$. The power corresponds to:\n", + "\n", + "$\\displaystyle E(\\mathbf{u}) = \\frac{\\mathbf{A}(j\\omega,\\mathbf{u})^H \\mathbf{A}(j\\omega,\\mathbf{u})}{\\sqrt{\\mathbf{A}(j\\omega,\\mathbf{u})^H \\mathbf{U}(j\\omega)\\mathbf{U}(j\\omega)^H\\mathbf{A}(j\\omega,\\mathbf{u})}}$\n", + "\n", + "The DOA with the maximum power is selected as the DOA of sound:\n", + "\n", + "$\\mathbf{u}_{max} = argmax_{\\mathbf{u}}{E(\\mathbf{u})}$\n", + "\n", + "### Beamforming\n", + "\n", + "We apply beamforming in the frequency domain: $Y(j\\omega) = \\mathbf{W}^H(j\\omega)\\mathbf{X}(j\\omega)$.\n", + "\n", + "#### Delay and sum\n", + "\n", + "The delay and sum beamformer aims to align the speech signal to create constructive interference. The coefficients are chosen such that:\n", + "\n", + "$\\mathbf{W}(j\\omega) = \\frac{1}{M} \\mathbf{A}(j\\omega)$.\n", + "\n", + "#### Minimum Variance Distortionless Response\n", + "\n", + "The MVDR beamformer has the following coefficients:\n", + "\n", + "$\\displaystyle\\mathbf{W}(j\\omega) = \\frac{\\mathbf{R}_{XX}^{-1}(j\\omega)\\mathbf{A}(j\\omega)}{\\mathbf{A}^H(j\\omega)\\mathbf{R}_{XX}^{-1}(j\\omega)\\mathbf{A}(j\\omega)}$.\n", + "\n", + "#### Generalized Eigenvalue\n", + "\n", + "The GEV beamformer coefficients correspond to the principal component obtain from generalized eigenvalue decomposition, such that:\n", + "\n", + "$\\mathbf{R}_{SS}(j\\omega)\\mathbf{W}(j\\omega) = \\lambda\\mathbf{R}_{NN}(j\\omega)\\mathbf{W}(j\\omega)$\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HH5Ko_p_1HT2" + }, + "source": [ + "## Install SpeechBrain\n", + "\n", + "Let's first install SpeechBrain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wPcEjLmRvWs_" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TpMW386z87VF" + }, + "source": [ + "## Prepare audio\n", + "\n", + "We will then load a speech signal obtained by simulating propagation in air for a 4-microphone array. We will also load diffuse noise (in all direction) and directive noise (can be modeled as a point source in space). The goal here is to mix the reverberated speech with noise to generate the noisy mixture, and test the beamforming methods to enhance speech.\n", + "\n", + "We first download the audio samples to be used:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZmIGszy4ovdm" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.dropbox.com/s/0h414xocvu9vw96/speech_-0.82918_0.55279_-0.082918.flac\n", + "!wget https://www.dropbox.com/s/xlehxo26mnlkvln/noise_diffuse.flac\n", + "!wget https://www.dropbox.com/s/4l6iy5zc9bgr7qj/noise_0.70225_-0.70225_0.11704.flac" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZNAQYH8NprTe" + }, + "source": [ + "We will now load the audio files:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1nxAIphAp3z5" + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "from speechbrain.dataio.dataio import read_audio\n", + "\n", + "xs_speech = read_audio('speech_-0.82918_0.55279_-0.082918.flac') # [time, channels]\n", + "xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]\n", + "xs_noise_diff = read_audio('noise_diffuse.flac') # [time, channels]\n", + "xs_noise_diff = xs_noise_diff.unsqueeze(0) # [batch, time, channels]\n", + "xs_noise_loc = read_audio('noise_0.70225_-0.70225_0.11704.flac') # [time, channels]\n", + "xs_noise_loc = xs_noise_loc.unsqueeze(0) # [batch, time, channels]\n", + "fs = 16000 # sampling rate\n", + "\n", + "plt.figure(1)\n", + "plt.title('Clean signal at microphone 1')\n", + "plt.plot(xs_speech.squeeze()[:,0])\n", + "plt.figure(2)\n", + "plt.title('Diffuse noise at microphone 1')\n", + "plt.plot(xs_noise_diff.squeeze()[:,0])\n", + "plt.figure(3)\n", + "plt.title('Directive noise at microphone 1')\n", + "plt.plot(xs_noise_loc.squeeze(0)[:,0])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-D_V_zwMKxn_" + }, + "source": [ + "We can listen to the reverberated speech:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jQrxqNUDK7sB" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_speech.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sAR6qZIeLosc" + }, + "source": [ + "We now mix reverberated speech with noise to create the noisy multichannel mixture:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "leHtExv6LkUg" + }, + "outputs": [], + "source": [ + "ss = xs_speech\n", + "nn_diff = 0.05 * xs_noise_diff\n", + "nn_loc = 0.05 * xs_noise_loc\n", + "xs_diffused_noise = ss + nn_diff\n", + "xs_localized_noise = ss + nn_loc" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F_rctgfSMapW" + }, + "source": [ + "We can look at the noisy mixture:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XJqEPdMUMZvt" + }, + "outputs": [], + "source": [ + "plt.figure(1)\n", + "plt.title('Microphone 1 (speech + diffused noise)')\n", + "plt.plot(xs_diffused_noise.squeeze()[:,0])\n", + "plt.figure(2)\n", + "plt.title('Microphone 1 (speech + directive noise)')\n", + "plt.plot(xs_localized_noise.squeeze()[:,0])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MVNz-7hWLzZu" + }, + "source": [ + "We can listen to the noisy mixture:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "315oSXLuL57U" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_diffused_noise.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gMkdjhyYM5KI" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_localized_noise.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QAokkFKbNKCa" + }, + "source": [ + "## Processing\n", + "\n", + "### Steered-Response Power with Phase Transform\n", + "\n", + "STFT will convert the signals in the frequency domain, and then covariance will compute the covariance matrix for each frequency bin. The SRP-PHAT module will return the direction of arrival. We need to provide the microphone array geometry, which in this example is a circular array with four microphones uniformly spaced and a diameter of 0.1m. The system estimates the DOA for each STFT frame. In this example we use a sound source that comes from direction $x=-0.82918$, $y=0.55279$ and $z=-0.082918$. We see from the results that the direction is quite accurate (there is a slight difference due to the sphere discretization). Also note that as all microphones lie on the $xy$-plane, the system cannot distinguish from the positive $z$-axis and negative $z$-axis.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vXuwwIwlBF2C" + }, + "outputs": [], + "source": "from speechbrain.dataio.dataio import read_audio\nfrom speechbrain.processing.features import STFT\nfrom speechbrain.processing.multi_mic import Covariance\nfrom speechbrain.processing.multi_mic import SrpPhat\n\nimport torch\n\nmics = torch.zeros((4,3), dtype=torch.float)\nmics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])\nmics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])\nmics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\nmics[3,:] = torch.FloatTensor([+0.05, -0.05, +0.00])\n\nstft = STFT(sample_rate=fs)\ncov = Covariance()\nsrpphat = SrpPhat(mics=mics)\n\nXs = stft(xs_diffused_noise)\nXXs = cov(Xs)\ndoas = srpphat(XXs)\n\nprint(doas)" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Xau4s6U0Fih-" + }, + "source": [ + "### Multiple Signal Classification\n", + "\n", + "STFT will convert the signals in the frequency domain, and then covariance will compute the covariance matrix for each frequency bin. The MUSIC module will return the direction of arrival. We need to provide the microphone array geometry, which in this example is a circular array with four microphones uniformly spaced and a diameter of 0.1m. The system estimates the DOA for each STFT frame. In this example we use a sound source that comes from direction $x=-0.82918$, $y=0.55279$ and $z=-0.082918$. We see from the results that the direction is quite accurate (there is a slight difference due to the sphere discretization). Also note that as all microphones lie on the $xy$-plane, the system cannot distinguish from the positive $z$-axis and negative $z$-axis.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ug7Qub43Fq5r" + }, + "outputs": [], + "source": "from speechbrain.dataio.dataio import read_audio\nfrom speechbrain.processing.features import STFT\nfrom speechbrain.processing.multi_mic import Covariance\nfrom speechbrain.processing.multi_mic import Music\n\nimport torch\n\nmics = torch.zeros((4,3), dtype=torch.float)\nmics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])\nmics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])\nmics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])\nmics[3,:] = torch.FloatTensor([+0.05, -0.05, +0.00])\n\nstft = STFT(sample_rate=fs)\ncov = Covariance()\nmusic = Music(mics=mics)\n\nXs = stft(xs_diffused_noise)\nXXs = cov(Xs)\ndoas = music(XXs)\n\nprint(doas)" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JbLs2iHNBlW9" + }, + "source": [ + "\n", + "\n", + "### Delay-and-Sum Beamforming\n", + "\n", + "STFT will convert the signals in the frequency domain, and then covariance will compute the covariance matrix for each frequency bin. The GCC-PHAT module will estimate the Time Difference of Arrival (TDOA) between each microphone, and use this TDOA to perform delay and sum." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YlDJWGQbUscv" + }, + "source": [ + "#### Speech corrupted with diffuse noise" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "fxCgiowJNPup" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import STFT, ISTFT\n", + "from speechbrain.processing.multi_mic import Covariance\n", + "from speechbrain.processing.multi_mic import GccPhat\n", + "from speechbrain.processing.multi_mic import DelaySum\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "stft = STFT(sample_rate=fs)\n", + "cov = Covariance()\n", + "gccphat = GccPhat()\n", + "delaysum = DelaySum()\n", + "istft = ISTFT(sample_rate=fs)\n", + "\n", + "Xs = stft(xs_diffused_noise)\n", + "XXs = cov(Xs)\n", + "tdoas = gccphat(XXs)\n", + "Ys_ds = delaysum(Xs, tdoas)\n", + "ys_ds = istft(Ys_ds)\n", + "\n", + "plt.figure(1)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.imshow(torch.transpose(torch.log(Xs[0,:,:,0,0]**2 + Xs[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(2)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.plot(xs_diffused_noise.squeeze()[:,0])\n", + "plt.figure(3)\n", + "plt.title('Beamformed signal')\n", + "plt.imshow(torch.transpose(torch.log(Ys_ds[0,:,:,0,0]**2 + Ys_ds[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(4)\n", + "plt.title('Beamformed signal')\n", + "plt.plot(ys_ds.squeeze())\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "M0toyEtcQ-NO" + }, + "source": [ + "We can also listen to the beamformed signal and compare with the noisy signal." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9Voy03xBQ_kJ" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_diffused_noise.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "buI16SQ7Q_V7" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(ys_ds.squeeze(),rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KfV7jVPTSdqZ" + }, + "source": [ + "#### Speech corrupted with directive noise\n", + "\n", + "When we have directive noise, this is more tricky as GCC-PHAT can capture the TDOAs from the noise source. For now we will simply assume we know the TDOAs, but ideal binary mask could be applied to differentiate the speech TDOAs from the noise TDOAs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "aIW_9BszS1sW" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import STFT, ISTFT\n", + "from speechbrain.processing.multi_mic import Covariance\n", + "from speechbrain.processing.multi_mic import GccPhat\n", + "from speechbrain.processing.multi_mic import DelaySum\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "stft = STFT(sample_rate=fs)\n", + "cov = Covariance()\n", + "gccphat = GccPhat()\n", + "delaysum = DelaySum()\n", + "istft = ISTFT(sample_rate=fs)\n", + "\n", + "Xs = stft(xs_diffused_noise)\n", + "XXs = cov(Xs)\n", + "tdoas = gccphat(XXs)\n", + "\n", + "Xs = stft(xs_localized_noise)\n", + "XXs = cov(Xs)\n", + "Ys_ds = delaysum(Xs, tdoas)\n", + "ys_ds = istft(Ys_ds)\n", + "\n", + "plt.figure(1)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.imshow(torch.transpose(torch.log(Xs[0,:,:,0,0]**2 + Xs[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(2)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.plot(xs_diffused_noise.squeeze()[:,0])\n", + "plt.figure(3)\n", + "plt.title('Beamformed signal')\n", + "plt.imshow(torch.transpose(torch.log(Ys_ds[0,:,:,0,0]**2 + Ys_ds[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(4)\n", + "plt.title('Beamformed signal')\n", + "plt.plot(ys_ds.squeeze())\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Xn3H2PxdQj4u" + }, + "source": [ + "We can also listen to the beamformed signal and compare with the noisy signal." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tOQKGWVhQr26" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_localized_noise.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xIxw9lCjQzSV" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(ys_ds.squeeze(),rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9i88nwH4Q9qs" + }, + "source": [ + "### Minimum Variance Distortionless Response\n", + "\n", + "STFT will convert the signals in the frequency domain, and then covariance will compute the covariance matrix for each frequency bin. The GCC-PHAT module will estimate the Time Difference of Arrival (TDOA) between each microphone, and use this TDOA to perform MVDR beamforming.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WbyyjssjUcCG" + }, + "source": [ + "#### Speech corrupted with diffuse noise" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nGwSJB65RegM" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import STFT, ISTFT\n", + "from speechbrain.processing.multi_mic import Covariance\n", + "from speechbrain.processing.multi_mic import GccPhat\n", + "from speechbrain.processing.multi_mic import Mvdr\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "stft = STFT(sample_rate=fs)\n", + "cov = Covariance()\n", + "gccphat = GccPhat()\n", + "mvdr = Mvdr()\n", + "istft = ISTFT(sample_rate=fs)\n", + "\n", + "Xs = stft(xs_diffused_noise)\n", + "Nn = stft(nn_diff)\n", + "NNs = cov(Nn)\n", + "XXs = cov(Xs)\n", + "tdoas = gccphat(XXs)\n", + "Ys_mvdr = mvdr(Xs, NNs, tdoas)\n", + "ys_mvdr = istft(Ys_mvdr)\n", + "\n", + "plt.figure(1)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.imshow(torch.transpose(torch.log(Xs[0,:,:,0,0]**2 + Xs[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(2)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.plot(xs_diffused_noise.squeeze()[:,0])\n", + "plt.figure(3)\n", + "plt.title('Beamformed signal')\n", + "plt.imshow(torch.transpose(torch.log(Ys_mvdr[0,:,:,0,0]**2 + Ys_mvdr[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(4)\n", + "plt.title('Beamformed signal')\n", + "plt.plot(ys_mvdr.squeeze())\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qf0vgNEKSSoG" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_diffused_noise.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8DivcNLlSWOW" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(ys_mvdr.squeeze(),rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xpD8JemOTH8f" + }, + "source": [ + "#### Speech corrupted with directive noise\n", + "Once again, when we have directive noise, this is more tricky as GCC-PHAT can capture the TDOAs from the noise source. For now we will simply assume we know the TDOAs, but ideal binary mask could be applied to differentiate the speech TDOAs from the noise TDOAs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jbYio7uGTUQ7" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import STFT, ISTFT\n", + "from speechbrain.processing.multi_mic import Covariance\n", + "from speechbrain.processing.multi_mic import GccPhat\n", + "from speechbrain.processing.multi_mic import Mvdr\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "stft = STFT(sample_rate=fs)\n", + "cov = Covariance()\n", + "gccphat = GccPhat()\n", + "mvdr = Mvdr()\n", + "istft = ISTFT(sample_rate=fs)\n", + "\n", + "Xs = stft(xs_diffused_noise)\n", + "Nn = stft(nn_loc)\n", + "XXs = cov(Xs)\n", + "NNs = cov(Nn)\n", + "tdoas = gccphat(XXs)\n", + "\n", + "Xs = stft(xs_localized_noise)\n", + "Ys_mvdr = mvdr(Xs, NNs, tdoas)\n", + "ys_mvdr = istft(Ys_mvdr)\n", + "\n", + "plt.figure(1)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.imshow(torch.transpose(torch.log(Xs[0,:,:,0,0]**2 + Xs[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(2)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.plot(xs_diffused_noise.squeeze()[:,0])\n", + "plt.figure(3)\n", + "plt.title('Beamformed signal')\n", + "plt.imshow(torch.transpose(torch.log(Ys_mvdr[0,:,:,0,0]**2 + Ys_mvdr[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(4)\n", + "plt.title('Beamformed signal')\n", + "plt.plot(ys_mvdr.squeeze())\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "A7LCNEA2Schg" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_localized_noise.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "B3QdXk1NSoPy" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(ys_mvdr.squeeze(),rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oSwWR_VOTnBf" + }, + "source": [ + "### Generalized Eigenvalue Beamforming\n", + "\n", + "STFT will convert the signals in the frequency domain, and then covariance will compute the covariance matrix for each frequency bin. We assume we can compute the covariance matrix for speech and noise, respectively, and use it for beamforming. The covariance matrix can be estimated using ideal binary masks.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EifFtuy3U2ak" + }, + "source": [ + "#### Speech corrupted with diffuse noise" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c_h1T92uU49p" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import STFT, ISTFT\n", + "from speechbrain.processing.multi_mic import Covariance\n", + "from speechbrain.processing.multi_mic import Gev\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "stft = STFT(sample_rate=fs)\n", + "cov = Covariance()\n", + "gccphat = GccPhat()\n", + "gev = Gev()\n", + "istft = ISTFT(sample_rate=fs)\n", + "\n", + "Xs = stft(xs_diffused_noise)\n", + "Ss = stft(ss)\n", + "Nn = stft(nn_diff)\n", + "SSs = cov(Ss)\n", + "NNs = cov(Nn)\n", + "Ys_gev = gev(Xs, SSs, NNs)\n", + "ys_gev = istft(Ys_gev)\n", + "\n", + "plt.figure(1)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.imshow(torch.transpose(torch.log(Xs[0,:,:,0,0]**2 + Xs[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(2)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.plot(xs_localized_noise.squeeze()[:,0])\n", + "plt.figure(3)\n", + "plt.title('Beamformed signal')\n", + "plt.imshow(torch.transpose(torch.log(Ys_gev[0,:,:,0,0]**2 + Ys_gev[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(4)\n", + "plt.title('Beamformed signal')\n", + "plt.plot(ys_gev.squeeze())\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "T_hyaz0JVHD4" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_localized_noise.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0C42BoRZVKKR" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(ys_gev.squeeze(),rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kKFqwp0nVA5J" + }, + "source": [ + "#### Speech corrupted with directive noise" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6Tha-6I7UER4" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import STFT, ISTFT\n", + "from speechbrain.processing.multi_mic import Covariance\n", + "from speechbrain.processing.multi_mic import Gev\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import torch\n", + "\n", + "stft = STFT(sample_rate=fs)\n", + "cov = Covariance()\n", + "gccphat = GccPhat()\n", + "gev = Gev()\n", + "istft = ISTFT(sample_rate=fs)\n", + "\n", + "Xs = stft(xs_localized_noise)\n", + "Ss = stft(ss)\n", + "Nn = stft(nn_loc)\n", + "SSs = cov(Ss)\n", + "NNs = cov(Nn)\n", + "Ys_gev = gev(Xs, SSs, NNs)\n", + "ys_gev = istft(Ys_gev)\n", + "\n", + "plt.figure(1)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.imshow(torch.transpose(torch.log(Xs[0,:,:,0,0]**2 + Xs[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(2)\n", + "plt.title('Noisy signal at microphone 1')\n", + "plt.plot(xs_localized_noise.squeeze()[:,0])\n", + "plt.figure(3)\n", + "plt.title('Beamformed signal')\n", + "plt.imshow(torch.transpose(torch.log(Ys_gev[0,:,:,0,0]**2 + Ys_gev[0,:,:,1,0]**2), 1, 0), origin=\"lower\")\n", + "plt.figure(4)\n", + "plt.title('Beamformed signal')\n", + "plt.plot(ys_gev.squeeze())\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qwYFvqNPTtlJ" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(xs_localized_noise.squeeze()[:,0],rate=fs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BJXl4ZGJT5rz" + }, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "Audio(ys_gev.squeeze(),rate=fs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/tutorials/preprocessing/speech-augmentation.ipynb b/docs/tutorials/preprocessing/speech-augmentation.ipynb new file mode 100644 index 0000000000..49ef9f34f2 --- /dev/null +++ b/docs/tutorials/preprocessing/speech-augmentation.ipynb @@ -0,0 +1,1150 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/preprocessing/speech-augmentation.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/preprocessing/speech-augmentation.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lQucKemQUYbA" + }, + "source": [ + "# Speech Augmentation\n", + "\n", + "A popular saying in machine learning is \"*there is no better data than more data*\". However, collecting new data can be expensive, and we must cleverly use the available dataset.\n", + "\n", + "One popular technique is called **speech augmentation**. The idea is to artificially corrupt the original speech signals to give the network the \"*illusion*\" that we are processing a new signal. This acts as a powerful *regularizer*, which normally helps neural networks improve generalization and achieve better performance on test data.\n", + "\n", + "SpeechBrain currently supports various augmentations:\n", + "\n", + "1. Speed Perturbation\n", + "2. Time Dropout (Chunk Drop)\n", + "3. Frequency Dropout (Freq Drop)\n", + "4. Clipping\n", + "5. Random Amplitude\n", + "6. Channel Dropout (for multi-channel data)\n", + "7. Channel Swap (for multi-channel data)\n", + "8. CutCat (for multi-channel data)\n", + "9. Drop Bit Resolution\n", + "10. Add Noise\n", + "11. Add Reverberation\n", + "\n", + "\n", + "This last two items are covered [here](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/environmental-corruption.html).\n", + "\n", + "This tutorial will cover some of these augmentation techniques implemented in `speechbrain.augment`. Let's get started by installing SpeechBrain.\n", + "\n", + "\n", + "\n", + "First of all, let's install SpeechBrain.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oi2OhKeCUQmZ" + }, + "outputs": [], + "source": [ + "%%capture\n", + "\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E0h43pIInzjN" + }, + "source": [ + "Let's also download a speech signal for testing:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "LK7nUxsGm1_9" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.dropbox.com/s/u8qyvuyie2op286/spk1_snt1.wav" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "%config InlineBackend.figure_format = \"jpg\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eptguoupeF1M" + }, + "source": [ + "## 1. Speed Perturbation\n", + "With Speed perturbation, we resample the audio signal to a sampling rate that is a bit different from the original one. With this simple trick we can synthesize a speech signal that sounds a bit \"*faster*\" or \"*slower*\" than the original one. Note that not only the speaking rate is affected, but also the speaker characteristics such as pitch and formants.\n", + "\n", + "Let's now try it!\n", + "\n", + "First, let's read a speech signal" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 452 + }, + "executionInfo": { + "elapsed": 12499, + "status": "ok", + "timestamp": 1704405859476, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "LFvVBlSbiBP9", + "outputId": "c63aae82-e064-4e35-96d7-0b13b77eb59b" + }, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGzAjgDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACisPxX4iXw1o63S2zXd3cTpa2lsrbTNM5wq57DqSfQGsG/8AEXirwtFBqXiSDSJ9JaVIrp9PEivabyFDneSHUEgHG085xQB3VFc/qXjbw9pOpTadeX5W9hVGa3SCSR8PkqQFU5HByRnHfGRSWOuWlrba/e3+uxz2tjfOkjyQeSLMBEPlZ/jxuzu5zvx2oA6Giue0vxx4d1m/gsbHUDJdzq7xwvBJG5VQCSQyjAwwwTjPOM4NVrLxXpemeHYb/VvEKXMM19NbJePbGFd4kfEZUDjaEK7jgHbnPNAHVUVjaN4q0XxAboabe+a9rgzo8TxMgOSCVcA4ODg4wcVX0zxz4c1jUYrCy1HdczAtCskEkYlAGTsZ1Afjn5SeKAOhorm/D2qXt94l8U2lzNvgsbyKK3TaBsVoEcjIGT8zE8561Druv6svii08N6HHYpeTWj3klxf7jGsasF2qqkFmJPqMAUAdVRWAmr3mhaDc6h4vn0+BYJQDNZLIUKMVVSVILA7mIxyOhz1w7SPGXh/XdQew03UUmuVTzAhjdN6ZxuQsAHX3XIoA3aK5y48eeGLXVG06bVY1uElEDny3MaSHjY0gGxWzxgnNWta8V6L4elih1O98uaVS6QxxPLIVHVtqAnHvjFAGzRWRJ4o0OLw+uvPqduNLdQy3O7Ktk4AHcnPGOueMVzfifxzBL4C1jVfDl863VkIvmktmRo90ijlJVHBGecUAd3RWHpXjHQNb1J9O0/UVmulQyBPLdQ6A4LIzABxnupIrcoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKQkKMkgD3oAWim70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQByHxFsryXTNJ1SytpLp9H1SG/kt4hl5I13K4Ud2AYkD2rC8ZeJ9L8ceGpfDHhu4OoX+ptHEwjibFrHvBeSXIGwAA8HnJHFemb0/vL+dG9P7y/nQBxehwR/8AC2vFUu0GRLCxRWPUA+bkfjtH5VymoQSy6L4puFgkuLez8YRXd3BGu4yQRrbs4298DnHtXr+9P7y/nVbUI5rmwmhs74Wdyy4jnCLJsPrtPB+lAHnw8QaR4j+LXhq50iQXMaWF4r3SIQjf6shASOSuckdt4rGgiSbwf4TSRA6Hxk5KkZBxcXBrvNI8NXcHiD+3Nb1wapex25trcJbLBFCjEFsKGbLEqOSegrp96f3l/OgDhtTmuLT4nX9xZQCe6Twwzxxf89XWYlV/M4/GuIfXpdZvvB08nii61S8bVrZ7mzWxSKGzYhgVJCBg3JUAsSRk9q9w3p/eX86N6f3l/OgDkfCf/I4+N/8AsIQf+k0VReOW8ETz2tp4uCRSIpltrlxJHsycHbKuNp4HGeeODXZ70/vL+dG9P7y/nQB4bqU9zP8ACrxnFFcX2o6BHd2y6VPeljJKnmRb1DMAWQNwpNdbLrOneKvHfhRdB3yPpjTzXjCFk+yxtEUEb5AwxYqNvX5faus8V6Inifw3c6R9sW289o283Zv27JFfpkZztx171s70/vL+dAHhGmw2dt4Tn8L+JvGWp6bcBpYbrS/sUTGTLsd0Z8ou4bIYMCTk1v6wknh/x/fXWo+Jr/RLK7sbaO1vlt45El8sMGjd3RtrZO7HGdx9K9Y3p/eX86N6f3l/OgDySSysNK0fw9rVpeXur6Ja65LfXs8lvgqXVx5oRVHyLId2VXvkcVq+P/EWjeJfhbr7aVepeRRrCsjxg7eZV4yRg/h0r0ben95fzo3p/eX86AOP8QokXxB8DiNFUB71AFGML5GcfTgflXZU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUUUUAFFFFABRRRQAUUUUAFcv4/t4brwzFb3ESTQy6np6SRyKGV1N3ECCD1BBxXUVznjf8A5ANt/wBhXTv/AEshoAX/AIQHwd/0Kuif+AEX/wATR/wgPg7/AKFXRP8AwAi/+JroqKAOd/4QHwd/0Kuif+AEX/xNH/CA+Dv+hV0T/wAAIv8A4muiooA53/hAfB3/AEKuif8AgBF/8TR/wgPg7/oVdE/8AIv/AImuiooA53/hAfB3/Qq6J/4ARf8AxNH/AAgPg7/oVdE/8AIv/ia6KigDnf8AhAfB3/Qq6J/4ARf/ABNH/CA+Dv8AoVdE/wDACL/4muiprusa7nYKvTJOBQBz/wDwgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRUUAc7/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRUUAc7/wgPg7/AKFXRP8AwAi/+Jo/4QHwd/0Kuif+AEX/AMTXRUUAc7/wgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRUUAc7/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRUUAc7/wgPg7/AKFXRP8AwAi/+Jo/4QHwd/0Kuif+AEX/AMTXRUUAc7/wgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRUUAc7/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRUUAc7/wgPg7/AKFXRP8AwAi/+Jo/4QHwd/0Kuif+AEX/AMTXRUUAc7/wgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRUUAc7/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRUUAc7/wgPg7/AKFXRP8AwAi/+Jo/4QHwd/0Kuif+AEX/AMTXRUUAc7/wgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRUUAc7/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRUUAc7/wgPg7/AKFXRP8AwAi/+Jo/4QHwd/0Kuif+AEX/AMTXRUUAc7/wgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRUUAc7/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRUUAc7/wgPg7/AKFXRP8AwAi/+Jo/4QHwd/0Kuif+AEX/AMTXRUUAc7/wgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaxPFPhHw1pmm2d5YeH9LtbmPVdP2TQWkaOubuIHBAyMgkfjXe1znjf/AJANt/2FdO/9LIaAOjooooAKKKKACiiigAooooAK5zxv/wAgG2/7Cunf+lkNdHXOeN/+QDbf9hXTv/SyGgDo6KKKACiiigAooooAKKKKACuH+Kkjp4VgVWID3iKwHcbXP8wK7ivNvixqkYtrPSPLbzS4uS/YKAy4+vJ/KujCJutGxzYySVGVz0miqel6jFqum295EV/exo7IGDFCyhtp9+auVg007M6E01dBRRRSGFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAVznjf/kA23/YV07/ANLIa6Ouc8b/APIBtv8AsK6d/wClkNAHR0UUUAFFFFABRQSACScAVwmneL9Zni0XWLu3sRous3KwQRRq4nhD58p2Yna27AyABjcOTigDu6KwvF9/q+l+HLzUdINj5lpBJcSC7R2BVELYAUjk49apeIvEGp6f4V07UbGOI3N1LbpIWtZLhY1f7zCNGDNjrgGgDqq5zxv/AMgG2/7Cunf+lkNWPC+qvq+lvcSalZ30izNGz2trJbiMgD5GjkdmVh744I49cTxp4i0SWC30mPWLB9SGr6eptFuEMoIu4iRszngAnpQB21FFFABRRRQAUUUUAFFFFABXjfxTkZvFcak8JaoB/wB9Mf617JXhvxEd38cX6sxIQRqoJ6Dy1OB+JJ/Gu7L1er8jgzF2o/M6r4RuptNVjz8weNiPYhv8DXpFeY/CL72sfSH/ANnr06s8Z/Hl/XQ0wP8AAj/XUKKKK5TrCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv/wAgG2/7Cunf+lkNdHXOeN/+QDbf9hXTv/SyGgDo6KKKACiiigAIBBBGQa4XTvCGswRaLo91cWJ0XRrlZ4JY2czzhN3lIykbV25GSCc7RwM13VFAHM67ZeJNU8F3GnJFpLaneQS29wTPJHCiurLuQ7GYkZXggd+als08UWnh21hFno51CArGYzdy+U8QXGd/lZDZxxtIx3roaKAMDw5o19YXWralqclub3U51leK1yYolRFRVBYAscLksQM56cVF43Uf2FbHAz/aunc4/wCnyGukrnPG/wDyAbb/ALCunf8ApZDQB0dFFFABRRRQAUUUUAFFFFABXhfxC/5HnUf+2X/opK90r568TyPL4q1ZpHZmF3KoLHPAYgD8AAK9DLl+8b8jzczf7tLzOz+Ec8S3Wq25cCV0jdV9VUsCf/Hl/OvUq8e+FP8AyNNz/wBeT/8AoaV7ASAQCQM8D3rPHK1ZmuAd6CFooorjO0KKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACiiigAooooAKKKKACuc8b/8AIBtv+wrp3/pZDXR1znjf/kA23/YV07/0shoA6OiiigAooooAKKKKACiiigAr5z1u4ju9e1G5hOYprqWRD6gsSK+in+430NfM1enlq1k/Q8rM3pFepp6DrEmhan9vhAMqxuqAjIyQQM+1dRFr91rL+ETczPJcR6mySucDcd8ZHT2bH51wlTWk32e8gnyw8uRXyvXg54r0KlKMnzdf+H/zPOp1pRXL0/4K/wAj6Vooor5w+mCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIaAOjooooAKKKKACiiigAooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigAooooAxPDmr3GrHVftAjH2XUJbaPYMfIuMZ9+a2686+Ft9cX02uyzPnzJknKj7ody+4ge+B+Qr0WtsRDkqOJhhp+0pKQUUUVibjX/1bfQ18zV9A+Kdej8O6HJeum+RiI4U5wzkHGfQYBP4V8/V62XRajKXRnj5nJOUY9UFFFFekeWe8eD/E0HiHTQitK11axRLctIoG5yvJGPcN6V0deY/CHrrA/wCuP/tSvTq+exMFCq4x2PpMLUdSipS3CiiisDoCiiigDlLjx7psetQaVBHJNcve/ZJQRt8vnbu9xn+R9s9XXhf/ADVH/uNf+1q9E8XeLz4e1bSoYpYXhd2N5H951T5QDgcjqxHriu2thknGNPdo4KOKbjKVTZOx2FFYvh3xPp/iaCaSyEqtCQJI5VwVznB4JHOD3rarklFxfLLc7YyjNc0XdBRRRUlBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXOeN/wDkA23/AGFdO/8ASyGujrnPG/8AyAbb/sK6d/6WQ0AdHRRRQAUUUUAFcfp/jia8l066l0gw6LqlwbeyvftAZ2Y7thePb8qvtODuPUZAzXXsAylT0Iwa870zQNeFh4d8OXWnCKz0W6jlfUfPRknjhz5YRAdwY/LncABg8nigDq/FGsX2g6JcalZaYl+LeN5pUa5EIVFUsTna2Tx0xUeqeJDp+i6ddxWTXF3qMkUNraiQLukdS2CxHAChiTjovSqfiObWtR8CXkEXh+d9Rv7aa2NpHcwnySyMoZnZlUjp0yeelUriy1m+8P6Ddpo8tvqOjXccv2KeeLdOqxNG21lZlGQ5IyRyvOM0Abug67Lqst/Z3tl9i1GwkWO4gWXzUwyhlZXwMqQe4ByCMVh+NPEWiSwW+kx6xYPqQ1fT1NotwhlBF3ESNmc8AE9K0vDVhqH9ra1reo2hspNReJYrVpFd4440wCxUldxJY4BOBjmjxuo/sK2OBn+1dO5x/wBPkNAHSUUUUAFRXM6WtrNcSZ8uJC7YHOAMmpazfEMnleGtUkxnbaSnH/ADTirtImTtFs4T4Q/8xn/th/7Ur02vMvhD/wAxn/th/wC1K9Nrpxv8eXy/I5sD/u8fn+bMDUNZuLbxjo+kx+X5F1HK0uRlvlUkY9ORW/XmMkhHxwXJJAIUDPTNv/8AXr06orwUFG3VXLoVHNzv0bX5HC/Ff/kVrb/r9T/0B68er1T4t3UiWOmWg2+VLI8rcc5UAD/0M/pXldergVaijyMe7138gooorsOI7j4WSRxeJ7kyOqA2bAFjjJ3pXsLyJHt3uq7jtXccZPoPevmau21bxy2oXmgiTc8Fg8FxcMoGZZQFLYBAxg7h6H8q87FYWVSopI9PCYuNKm4yPZaKKK8g9kKKKKAPn7V7qSy8bX13DjzYNRkkTcMjKyEjP5VkzzSXM8k8zl5ZGLux6sxOSav+I/8AkaNX/wCv2b/0M1mV9LTS5U/I+XqN8zXmd/8ACecrr17b87Xtt554yrAdP+BGvXK8W+GN2tt4wWJlJNzA8SkdiMPk/gh/Ovaa8fHq1Y9rL3ej8woooriO4KKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIaAOjooooAKKKKACiiigAooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigArnvHNzJa+C9TkjxuMaxnIzw7BT+hNdDXMfEL/kRtR/7Zf+jUrWjrVj6oyru1KXozmfhD/zGf8Ath/7Ur02vLvhHKiz6tCW/eOsTqPUAsD/AOhD869RrXG/x5fL8jHA/wACPz/M8um/5LeP95f/AEnFeo15dN/yW8f7y/8ApOK9RoxX2P8ACgwn/Lz/ABM8y+L3/MG/7b/+068xr074vf8AMG/7b/8AtOvMa9TBfwI/P8zycd/vEvl+SCiiiuo5AooooA+jdEuJbzQdOup23TTWsUjtjGWKgk4HuavVleGHWTwrpJRgwFnEMg9wgBH5itWvmZq0mj6mDvFMKKKKks+d/Ef/ACNGrf8AX7N/6GazK0/Ef/I0at/1+zf+hmsyvpofCj5ap8bOn+Hv/I86d/21/wDRT17pXgfgm6Wz8Z6XKylg0vlYHq6lAfzavfK8jMV+8T8j2Mtf7prz/wAgooorgPRCiiigAooooAKKKKACiisHxjq91onhue+s9gmV0ALrkcsM8VUIuclFdSZyUIuT6G9RWfous2uvact9Z+YImZlxIuDkHFaFJpxdmOMlJXQUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXOeN/8AkA23/YV07/0shro65zxv/wAgG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/9LIa6Ouc8b/8gG2/7Cunf+lkNAHR0UUUAFcl8SblYPBdxGykm4kjjUjsQwbn8FNdbXC/FWQL4Yt48cvdr+itW+GV60fUwxTtRl6HO/Cf/kPX3/Xt/wCzCvXK8j+E/wDyHr3/AK9v/ZhXrla47+MzHL/4CPH9dYr8Y4ipIP2y1HB7FY69grx7Xv8AksUX/X7afyjr2GjFfDT9Awnx1PVnmPxdYbtHXIyBMSP++K8yr0f4t/8AH7pf/XOT+YrzivTwf8CP9dTy8b/Hl/XQKKKK6TkCiiigD3nwJ/yJWmf7jf8AobV0Vcb8MbyW68I+XJtxbXDwpgfw4Dc/ixrsq+crq1WSfc+mw7vSi12CiiisjY+d/Ef/ACNGrf8AX7N/6GazK0/Ef/Iz6t/1+zf+hmsyvpofCj5ap8bNXwz/AMjVpP8A1+Rf+hivoavnnwz/AMjVpP8A1+Rf+hivoavKzH44nrZZ8EvUKK4X4k67d6Pb6WtlNJDM85l3I2AQgHysO4JYcdOK7rORkVxSpuMFN9b/AIHfGopTlBdLfiFFFFZmgUUUUAFFFFAGV4k1d9C8P3WpRxLK8IXCMcA5YL/WvOPG/jKHXdCsYrCTZHK7/aoXA3qV2lfw5OD3x7EV0vxTmki8KRIjlVlu0RwP4htZsH8VB/CvG69XA0Iyiqj3TPIx+InGTprZo9e+FV+k+gXNlkeZbT7sY/hYcfqG/Su9rx/4UE/8JPdDPH2JuP8AgaV7BXJjI8tZ26nZgZuVBX6BRRRXKdYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFZV/4i03TdWtNMuZitzdcoAvAHIBJ7AkEVq14j8R9RTUPF8yR7SlrGtvuVs7iMsfxBYj8K6MLRVafKzmxVd0Ycy3PbUdXRXRgysMhgcgilrxjw38Q73RoUtbuP7TaQwlIo1AVg2eMt6YyK3Yfiykt3BHJpgggaUCWUzFtsZPUAL1HX8PetJ4Gsm7K5nDH0ZJNuzPSqK8nvviDJd+M7KW0uZbfSYpFjkBPyyKW+ZyMeh4B6YzwTivVwQwBBBB5BFY1aEqSXN1N6VeFVvl6C0UUVibBRRRQAVznjf/AJANt/2FdO/9LIa6Ouc8b/8AIBtv+wrp3/pZDQB0dFFFABRRRQAUUVx+n+OJryXTrqXSDDouqXBt7K9+0BnZju2F49vyq+04O49RkDNAHYUVia7rs2mXNhYWNiL3Ub9nEMLTeUgVBl3Z8HAGQOATlhWLfePzZ+HU1BtPhiu11L+zbmC7vBFHby5OS0oVhtxgg45BHSgDta5zxv8A8gG2/wCwrp3/AKWQ1o6HqFxqmmLd3CWKl2Ow2N59piZfUPsX34x+NZvjdh/YVsMjP9q6dxn/AKfIaAOkooooAK4H4sf8i/Zf9fQ/9Aau+rgfix/yL9l/19D/ANAaujC/xonNi/4EjB+E/wDyHr3/AK9f/Zlr1yvHvhUxHii4UHg2bZH/AANK9hrTH/xmZ5f/AAEeO66wb4wxEf8AP9aj8hGK9ir598TzSDxfqkokcSJeSbWDHIwxxg+2BX0FV4yNoU/T/IjBSvOp6/5nlfxb/wCP3S/+ucn8xXnFd58V3Y+JbRCx2CzUhc8Al3yf0H5VwdejhFajE83GO9eQUUUV0HKFFFFAHrHwlJ/snUFycCdSB/wGvQ684+EkytZapCAdySRuT2wQQP8A0E16PXgYz+PI+iwX8CIUUUVzHUfO/iP/AJGfVv8Ar9m/9DNZlaGvSxz+IdTmidXjku5WRlOQwLkgis+vpofCj5afxM1fDP8AyNWk/wDX5F/6GK9j8W+LIvCtvbMbU3MtwzBE37BhcZOcH1HHvXjnhn/katJ/6/Iv/QxXU/FbUBPrlpYqyMtrCWbHUO55B/BVP41x16SqV4xltY7cPVdLDzlHe6Mvx5rVrr2rWV5aPlTZIHXvG+5iVPuMivZ9NdZNLtHRgytChDA5BG0c183V9AeD72G+8JaY8DZEdukLjuGQBSD+X5EVjjqahTilsjfAVXOrJvdm3RRRXlnqhRRRQAUUUUAcJ8VyP+EYtRkZN4px/wAAevH69W+Lf/IO03/rs/8AIV5TXuYFfuUeBj3+/Zp+Hria28Q6e0Erxs1xGpKMRkFhkH29q+iK+ctE/wCQ9p3/AF9Rf+hCvafBGuXfiDQWvL3y/OE7p8i4GOCP54rnzGDdpLodOWzSvF9TpKKKK8s9YKKKKACiiigAooooAKKKKACue8Za/ceHNES9too5JGnWPEmcYIJPT6frW7PPHbW8k8zhIo1Lux6KAMk14h448Tf8JFqwFu+6wgH7j5SpOVG4kHvkEV1YSi6tTVaI5MZXVKno9XsemXHj7QIbyzt0vFm+0kZkT7kQPQuTjHuOo74rE8aePls7eG20K6ikmmG97hCHEa5xgdsnB69B9QR5NRXowwFOMk9zzZ5hVlFra5774P13/hIPD0N1IQblCYpwBj5x3/EYPHrjtW9Xl3wjukW41S0aQ+Y6xyInPQFgx/8AHlr1GvLxNNU6ritj1cLUdSkpPchu5/stnPcFd3lRs+M9cDNfNjuZHZ2OWYkmvb/iBrNzo3hrfaiMtcS/Z2LjO1WRske/FeHV6GXQtFy7nnZlNOaj2CiiivRPMCvY/AnimbWp5NP8lEtrS1iEZ/iJACtnnB56V45Wz4W1gaH4itb2R5FgUlZhHyWQjkEdx0P4Vz4miqlNrr0OnC1nSqJ9Op9BUUisGUMDkEZFLXz59GFFFFABXOeN/wDkA23/AGFdO/8ASyGujrnPG/8AyAbb/sK6d/6WQ0AdHRRRQAUUUUAIwDKVPQjBrzvTNA14WHh3w5dacIrPRbqOV9R89GSeOHPlhEB3Bj8udwAGDyeK9FooA5DUodbnvdA8Rx6Mxu7NbiG501bmMv5cu3lXJCEgxocEjgnnIpml2eqaTpd5e3Ogm+vNR1OS8ksoZoi1upUKvzOQrMFRc4PVjjOK7KigDmfCGlXlhJrN5dWcenpqN59oisEdW8kCNEJJX5dzFSxAyOeprP8AGnh3RIoLfVo9HsE1I6vp7G7W3QSkm7iBO/GeQSOtdtXOeN/+QDbf9hXTv/SyGgDo6KKKACvPvizIg0SwiLASNclgueSApyf1H516DXmXxe/5g3/bf/2nXTg1evE5ca7UJf11Mn4Vf8jVP/15v/6Elex1458Kv+Rqn/683/8AQkr2OtMf/GM8v/g/M+ePEhz4p1fP/P7N/wChmvoevnfxJ/yNOr/9fs3/AKGa970a5kvdD0+7mIMs9tHI5AwNzKCf51rjl7kH/XQxy9/vKi/rqeV/Ff8A5Gm2/wCvJP8A0N64Wu6+K/8AyNNt/wBeSf8Aob1wtd+F/gxODF/xpBRRRW5zBRRRQB6d8If+Yz/2w/8AalVfF+r39lf+I47e6ljD3NnH8rkbVMTsdvpkqM4qX4Rzxrc6rblgJXSJ1X1VSwJ/8eX86x/HkpXxBrcOBte5tnJ75ELD/wBmNeco3xUr9l+h6blbCRs+r/U9ltmL2sLMckopJ/Cpaz9Blefw7pk0rbpJLSJmb1JQE1oV5MlZtHrxd0mfMlFFFfTnypPZ3Uljew3cJAlhcSISM4YHI/WoWYsxZiST1JNJRRbqO+lgroPCXiFvD1/czZOJbaSNO6rJwVJHflQPxrn6KmcFOPKyoTcJKUdz3TQPEePAltresz9A3myBOv7woOAPpXRm4hEKzNKixNja7HAOen55rzb/AJoZ/n/n5rF1rxYuveCYrSZI4bm3uIlCK+TIoRgWx9f5ivH+q+0k3H+Zo9n637OKUt+VM9d1O8On6VeXoTebeB5dmcbtqk4z+FU/DGrS654dtNRmjSOWYNuVM4yrFeM/TNP8Sf8AIrav/wBeU3/oBrC+G2oW1z4SgtIpMz2hYTJj7u52ZfzFYKCdFytrc6XNqso30szsKKKKwNzzL4vf8wb/ALb/APtOvMa9O+L3/MG/7b/+068xr3sF/Aj8/wAz57Hf7xL5fkgBIIIOCO9ey/C3/kU5P+vp/wD0Fa8ar2X4W/8AIpyf9fT/APoK1GP/AIPzNMu/jfI7aiiivEPdCiiigDlvH2s32h+HkudPlEUz3Cx7yobAwx4B47VB8OtYutY0S6kvroz3CXTfeIyqlVI/DO7H/wBaq/xU/wCRVg/6/E/9Beuf+E16U1XULHaNssIm3Z6FGxj8d/6V3xpp4RytqefKq44xRb0sesVxPgnxlca9f3en3sY8+PfMkiDC7AwG3HqCevpVLxr4t1PRPFFla20gW0CxzSIFGZPmYFcnoCBWD8LXL+LbljwWtHJx/vpShhrUJTl20HUxN68YR72Z7DWIPEKP4xbQUjVtlr5zyBuVfI+XH+6QfxrbrxHxxeXVj4/1Ka0uZreXEY3xOVOPLTjIrLC0VVk4vsa4qs6MVJdzovilr0EltbaPbuHYv58ro/AA3KFOOvOT7bRXmFFFe1RpKlDlR4des603NhRRRWpid18KP+Rpuf8Aryf/ANDSvYa8e+FH/I03P/Xk/wD6Glew14eP/jHvZf8AwfmcL8V/+RWtv+v1P/QHrx6t7xTqd9ca3qdlNdzSW0d/MyRO5KqdxAx9BwPSsGvUwtN06aTPKxdVVKrkgoooroOYKKKKAPoHwlqc2r+FrG9uAoldWVtvQlWK5/HGa2q4v4X3Ulx4R8p9u23uXiTA7EB+fxc12lfOV48tSS8z6ehLmpRb7BRRRWRqFc543/5ANt/2FdO/9LIa6Ouc8b/8gG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/wDSyGujrnPG/wDyAbb/ALCunf8ApZDQB0dFFFABXmXxe/5g3/bf/wBp16bXj3xS1NrrX4dPwnl2ceQR94s4BOfwC114GLdZPscePklQafWxT+G109v4zt4lCkXEUkTZ7ALv4/FRXst9dpYafc3kisyW8TSsF6kKCTj8q8T+Hv8AyPOnf9tf/RT17D4k/wCRW1f/AK8pv/QDWuOinXS7pfmY4CTVCT7N/keA6ndjUNWvL1UKC4neUKTnbuYnH61794b/AORW0j/ryh/9AFfO9fRHhv8A5FbSP+vKH/0AVtmCtCKRjlrvOTZ5j8V/+Rptv+vJP/Q3rha7r4r/API023/Xkn/ob1wtdeF/gxOTF/xpBRRRW5zBRRRQB1fw81aHSvFMYmR2F2gtUK9mZ1wT7cVF8QSf+E41IZ4zH/6LWsLTrs2Gp2l4Bk28ySgeu0g/0rQ8VapBrXiO61C3V1imEZAcYIwigg/iDWHs7V+fy/yOn2l6HJ2f6M9v8N/8itpH/XlD/wCgCtOud8Czm48FaY7DBCMnXPCuy/0roq8GqrTa8z36TvTi/JHzJRRRX0p8uFFFFABRRRQB0KeKrj/hE5fD0kSfZvLAjZR82/zhJkn0xkflXPUUVMYKN7dS5TlK1+mh9EeJP+RW1f8A68pv/QDXDfCL7usfWH/2eu28SyovhPVXY4VrOUDPqUIH6muI+ER+XWB7w/8As9eNT/3afqj26v8AvVP0Z6bRRRXEdx5l8Xv+YN/23/8AadeY16H8Wrtn1fT7IqAsUBlDdyXbBH/jg/OvPK9/Bq1CP9dT57Gu9eX9dAr2P4VsG8KTAfw3bg/98of6145XsPwo/wCRWuf+v1//AEBKzx/8EvL/AON8juqKKK8Q94KKKKAOG+KpA8Kwe94n/oD15t4SuZbTxbpUkRAZrlIzkZ+VztP6E103xJ165udQutEdIxb200MkbAHdkxknP/fX6Vx+hzrba/p1w+SkV1E7Y64DA17eGptYez6/qeFiqieJuun6G98RrqSXxvco+CtukaJgdtobn8WNc3aztDFdBZChkiCHBxu+dTj9P0rW8bXaXvjPVJY1YKsoiIbrlFCH8MqawK6KMbUoryRzV5Xqyfmz6S06dbnTbWdJBIrxKwcHO7jrmvFPiF/yPOo/9sv/AEUld/8AC+5muPCbpK5ZYLl448/wrtVsfmxrznxvM1x4z1N2ABEoTj0VQo/QV5+DhyYiUe3+Z6ONqc+HhLv/AJM5+iiivVPICiiigDtPhfcxweLjHISGuLZ448DqwKt/JTXs9eF/D3/kedO/7a/+inr3SvFzBWq/I93LXei/U+d/En/I06v/ANfs3/oZrMrT8Sf8jTq//X7N/wChmsyvXp/AjxanxsKKKKsgKKKKAPR/hTq0ME95pc1wQ9wyvbxYJBIVt59BwF6+leqV4H4Ju0svGelyyKzBpfKwvq4KD9WFe+V4mPhy1brqe7l8+ajZ9AoooriO8K5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIaAOjooooAKKKKACiiigAooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigArwv4hf8jzqP/bL/wBFJXuleF/EL/kedR/7Zf8AopK78u/iv0/yPPzL+EvX9GUPCt09n4r0uZJAn+kojMf7rHa36E17h4k/5FbV/wDrym/9ANfPMbtHIsiHDKQQfQivoXxEwbwrqzKQQbGYgjv8hrbHR/eQZhl8v3c4nzxXoa/EC60jwvo9rbRRSXDWrBnkOdgV2ROB7L39q88pzyO6orHIRdqj0GSf5k131KUaluZbHn0q0qd+V7nVePr46jqGk3TlPOl0uB5QnRWbcxHt1rk6UknGSTjgUlOnDkiooVSfPJyfUKKKKszCiiigAoJzRRQB7p8PCD4G08Dt5oP/AH8aunri/hhei58KfZ9m02szJnOd2fmz7dcfhXaV87iFarL1PpsM70Y27I+ZKKKK+iPmQooooAKKKKACiiigD2vxfdi7+GU13GGQTwQSAE8gM6HH5GsD4RMA+sL3IhP5b/8AGtXXv+SOxf8AXlafzjrkvhheG38WeRuwtzA6bc8Ej5h+PB/WvKhG+GqJd/ysexUnbFU2+353PZ6KKjNxCs/kGaMTbN/llhu29M49PevMPUPIviv/AMjTbf8AXkn/AKG9cLXU+P8AWrXW/Enm2bF4oIhBv7OQzEke3NctX0WHTjSimfNYmSlWk0Fev/Chs+GrtfS8Y/8Ajif4V5BXqXwjuJGtdVtiR5cbxyKMd2DA/wDoIrLHK9Fm2Adq6+Z6TRRRXhHvhRRRQB4b8QWA8a6mvcmE/wDkJf8AGuZhk8meOTGdjBseuDXSfEL/AJHnUf8Atl/6KSuYr6Oh/Cj6L8j5mu/30vV/mTXd1Je3s93NjzZ5GkfAwNzHJ/nUNFFapWMm76nqnwkupHsNTsyF8uKVJVOOcuCD/wCgD9a4bxh/yN+q/wDXw1dj8ImIk1dexEJ/9D/xrjvGHPi/Vf8Ar4auGkrYqfp/kd1V3wkPX/MxKKKK7jgCiiigC7pOp3GjapBqFqV86FsjcMggggg/UEivQPDfxLnuNWFtrCwpbzyHZMox5WfuqfUds++Sa8yoBIIIOCOhFY1aEKq95am9LEVKT916Gn4k/wCRp1f/AK/Zv/QzWZU15cveXs91Jy80jSN9Scn+dQ1pFWikZSd5NhRRRVEhRRRQBp+G/wDkaNI/6/Yf/QxX0RXzICQcjg19A+E9VTWPDNjcLKZJViEcxZgW8xRhs/Xrz2INeXmMHpM9bLJr3ofMzvh/rN9rmh3N1qE3myi7dFIUAKu1TgY9ya6uvK/h34o07SbBNLujIs11enY4XKrlUAyfcjFeqVx4qnyVXpZHbhKinSWt31Cuc8b/APIBtv8AsK6d/wClkNdHXOeN/wDkA23/AGFdO/8ASyGuc6To6KKKACiiigArmbHxxp19fW0KWt9Ha3czwWl/JEoguJFzlVO7dztbBKgHHBNdKw3KRkjIxkV5jpVpqMujeFPCz6VfQ3Wj3kL3dxJAywCODdhlk+6+/wCXAUk/Mc4xQB3Wta/Bov2WNra5u7u7cx29raqGkkIG5iNxCgADJJIHT1qifGmm/wBhx6ksN2zyXP2JbIRf6QbgEgxbScBhgnrjAznHNZuq3c8mq+G/FCaXqRtIEuoLi3+ysbiISbdrmIZYjMWOMnDA1jx6dqUMdv4kbTLsoPEUupNZCMmdbd4TAG8sc7uj7euCe/FAHc6JrtvrkVx5cFxbXFrL5Nxa3KhZIXwGAIBIIIYEEEgg9az/ABuw/sK2GRn+1dO4z/0+Q1F4ViuLnWvEGtyWlxa29/NCtvHcxmORkjjClyh5XJJwDg4A4ql408O6JFBb6tHo9gmpHV9PY3a26CUk3cQJ34zyCR1oA7aiiigCrealY6eFN7e29tvzt86VU3Y64yea8N8bXttqPi/ULm0mWaBiiq69DtRVOPUZB5r2fV/Deka68T6lZidogQjb2UgH/dIzXifi3TLbR/FF9YWgYQRspQMckBlDYz6DOK9LL+Tmdr3t8jy8x5+RXta/zMWu08P+LxD4b1jSdUu5WWSzaOz3KWCnYV2ZHIH3cdhg9K4uuo8O+FotW8Pazq1y8yJZxMYPLIAd1QsQcg8D5fTrXoV1Bx9887Duan7nmcvRRRWxgFFFFABRRRQAUUUUAFFFFAHWeBPFI8Pam0N1IF0+4/1zFWYoQDggD3wDx/KvTtT8Z6LZ6bPcQaha3MqRF0hjnXc56Aex5+vtXkfhXwzP4l1RYR5kdonM86rkIMcD6np+vavQPEHgPQLHQr+9tbORZYbZyg85yNw53cnrx9PavMxMaDrLmvd9j1cLOuqL5bWXc8hooor0zygooooAKKKKACiiigD0u/8AEml6j8KzYxXSLewwQRNA5w5KugJH94YXPGeOtec21zPZ3KXFtK8U0ZyrocEGu7vPB2m6f8Nm1hg81/LDDMsjMQI97LwFHHRsc59eK8/rmwyhaShtdnXinU5oue9kbr+MvET3EU7arPviO5egXOMcrjB/EevrWXcaheXV09zPcyvM4YFyxzhs5HsDk8dOTVcKzMFAJJ6ADrU0lndQymKW2mSQJv2MhB24znHpjnNbKEI7JIwc5y3bZBRRRVmYV1PgTxInh7WWF1IEsblds7bCxUgEqRgZ6nH41y1dF4M8Np4m1lraeSSO2iiMkjR9TyAACQQDk5+gNZVuR03z7G1BzVRcm57Ani3w/JAJV1izCld2GlCtj/dPOfbGam/4SPQ8A/2zp2D0P2pP8axo/hv4ZS2ETWkruFx5zTvuPvwQufwqb/hX3hfYqnSxx38+TP8A6FXiNYfo3+B7qeI6pfezTPiPQwSDrOnAjt9qT/GnHX9GDhDq9gGJwB9pTJ/Wsv8A4QDwvu3f2Wuc5/10mPy3U5vAXhhpA50pMg54lcD8t2KVqHd/cv8AMd6/Zfe/8jyrx5PFceNdRlglSWMmMB0YMCRGoPI9wRXOVveNLC203xbfWlnEIoEKFUBJAyik9fcmsGvdo29nG3ZHgVr+1lfuwooorQyOq8B+I4PD2syteyOtpPFtfau7DA5UkDn1H41z+p3S3+rXl4qlVnneUA9QGYn+tafhLw9/wkutfYmn8mNIzLIwGTtBAwPfJFZN5ayWN9cWcpUywStE+05GVODj8qxioe1bW9kbyc/ZJP4buxBRRRWxgFFFFABRRRQAUUUUAFFFFABRRRQAV3Xw38R2Gi3V3a6hL5KXWzZI33FYZGD6Zz16cc1wtdZ4O8GT+Ip0upxs0xHKyOGwzkAHao/Ec/XvWGIUHTam9DowzmqqdNXZyeTjHavpOw3/ANnWvmHL+Um456nAzXzZX0bo05utD0+4K7TLbRvtznGVBrjzLaJ25Y/el8i9XOeN/wDkA23/AGFdO/8ASyGujrnPG/8AyAbb/sK6d/6WQ15R650dFFFABRRRQAUUUUAFFFFABXOeN/8AkA23/YV07/0shro65zxv/wAgG2/7Cunf+lkNAHR0UUUAFeJ/EuBYfGUzqSTNFG7Z7HG3j8FFe2V4x8UP+RuH/Xsn8zXdl/8AF+RwZj/B+Zxde931hFpngG9s4o1RYtOkUhQOSIzknHUk968IgglubiK3hQvLK4RFHVmJwBX0pLFHPC8MqK8cilXVhkMDwQa6MwlZwObLoXU/67nzPRRXrHhvwnpniHwbok92jCWBpPmTgunmudjeo/lXZWrRpJSkcVChKtJxjueT0V0njbw/a+HNZhs7V5HR7cSkyEZyXYY/ICubrSE1OKktmZzg4ScXugoooqiAooooAKKKc6PG2HVlJAOCMcEZB/EEGgZ7D8LtNa18OSXrOCLyQsqjsFJXn3yD+ldx1rm/AUUkPgjTUlQoxV3AI7M7EH8QQfxrpK+dxEuarJ+Z9Lho8tGK8j5kooor6I+ZCiiigAooooAKKKKAPYde/wCSOxf9eVp/OOuK+HFkbvxlbPtVkto3mcMPbaPxywrufE0Mlt8JRBKu2SK1tUYehDRg1jfCJVL6w5UbgIQDjkA78/yH5V5VOfLhqjXd/jY9epDmxNNPsvwuekyWtvNLFLLBE8kRzGzICUPse1LdW0V5aTWs6lopo2jcA4ypGD+hqWivNuz1LI8M8d6HZ+H9XtLGyD+X9kV2ZzlmYu/J6dgB+FcvXdfFf/kabb/ryT/0N64WvocPJypRbPm8TFRqyS2CvTfhFF/yF5Sn/PJVbH++SM/l+leZV7B8KAP+EXujjn7a3/oCVljnaizbAK9dHd0UUV4R74UUUUAeSfFDQpbfVV1pAPs9yEjkJbkSgHHHptUfka8/r2H4r/8AIrW3/X6n/oD149XvYKblRV+h8/joKNZ26hRRRXUcZ6P8JLWN73VLsg+bFHHGvPGGJJ/9AFcX4k/5GnV/+v2b/wBDNd38If8AmM/9sP8A2pXKePIY4PG2pJEgRSyOQPVkVifxJJ/GuKnL/apryX6HfUj/ALJB+b/U5yiiiu04AooooAK2/CehPr+v29qUJt1PmXB5wEHUZHr0/GpvAtvHdeNdMjlBKiRpBg45VSw/UCvd4oYoFKwxJGGYsQigZJ6njvXDi8U6XuJatHfg8Iq3vt6Jnzxr8UcHiPVIokVI0u5VRFGAoDkAAelZ1afiT/kadX/6/Zv/AEM1mV2Q+FHHP4mFFFFUQFFFFAFvSrRL/WLKzkZlS4uI4mZeoDMBkfnX0VaWkFjaRWttGI4IlCIg7AV4X4Hsf7Q8Y6dGQ+yKTz2KDpsG4Z9iQB+Ne9V5OYy95RPZyyPuSkeH+GPA8/iXT/tsV5HDGlyYZFZTkKFBJHqfm6cfWvbLeCK1toreFQkUSBEUdlAwBXE/Cn/kV7r/AK/X/wDQEruqwxlWUqji9kb4KlGNNSS1YVznjf8A5ANt/wBhXTv/AEshro65zxv/AMgG2/7Cunf+lkNch2HR0UUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/wDSyGujrnPG/wDyAbb/ALCunf8ApZDQB0dFFFABXj3xUtZo/EkFyyYhmgCo2epUnI/DI/OvYa87+LX/ACC9O/67N/6DXXgpWrLzOPHR5qD8jz/wtE83ivSVTGRdxtz6KwJ/QV9CV4P4Di87xtpi5xh2b8kY/wBK94rXMX+8S8jHLF+7b8z5kr3T4e/8iNp3/bX/ANGvXjeuwx23iHU4IUCRR3cqIo6KocgCvcfCFslp4Q0qNBgNbrJ1zy/zH9WNb5hJOlHzf6GGXRarS8l+p518V1I8TWj4+U2agH3Dv/iK4OvQPiz/AMh2x/69v/ZjXn9dOF/gxOXF/wAeQUUUV0HMFFFFAF3SLH+0tZsrE7ws86RsUGSFJAJ/AZNafja1isfFt3aQKVhhSGNATnAESAVqfC+2in8XGSQEtb2zyR4PRiVX+TGqXxC/5HnUf+2X/opK5udvEcnZfqjq9mlhufu/0Z7D4b/5FbSP+vKH/wBAFadZnhv/AJFbSP8Aryh/9AFadeFP4me/T+Beh803Nu9pdzW0mPMido2weMg4NRVf1z/kYNS/6+pf/QzVCvpYu6TPmJKzaCiiimSFFFFABRWlLoOoR6NBq/kF7KVS3mryEw+zDehz/P61nvG8ZIdGUglTkYwR1H1pKSexTi1uj3TxwufAuoKVziNOMejrWb8MNJjs/Dp1ESs0l+fmUjhAjMox+pro/En/ACK2r/8AXlN/6AazPh7/AMiNp3/bX/0a9eEpNYZpd/0/4B77gniU32/X/gnT0UUVynWeW/Fwf6TpR/2Jf5rXm1elfFz/AF+k/wC7L/Na81r3sH/Aj/XU+exv8eX9dAr2H4Uf8itc/wDX6/8A6AlePV7D8Kf+RWuf+v1//QEqcf8AwS8v/jfI7qiiivDPeCiiigDhfiv/AMitbf8AX6n/AKA9ePV7p470K88QaAltYhDNHOsoVmxuADDGf+BZ/CuE0DwTrFj4s037fp+63DmR3GGQBc4yemcgED3HvXr4OtCFHV6q542NoTnX0WjtqYHi6yXT/FmpWyoiIJt6qnQBgGA/I1i11HxERl8cX5ZSAwiKkjqPLUZH4g/lXMqjuTsUtjrgZ74/mRXbRd6cW+yOGsrVZJd2evfCm2WPw3c3BjAkluiN/dlVVwPwJb8zXFeO4Jbr4g30EEbSSyGJURRkk+Wle0WNjbabZx2dnCsNvGMKi9u9cT/Yy3HxgluZN2yO2W6AK8FgBGBn68/hXl0a69tOr5P9D1q+HfsYUvNfqc/4N8IWniDwtqU7OyXjyGCJzyqbQjjjHc8E+lcGQVYg9Rwa978K6Tc6Rb6lHcqi+fqEs8YU5Gw7cfTp0ryfx9bJa+NNQSKFYozsdVVdoOUUkj8c/jmurDV3OrKPTc5MTh1CjCVtdmc3RRRXceedP8Pf+R507/tr/wCinr3SvE/hrZvceMoJVYAW0MkrA/xAjZx+Lj8q9srxcwf71eh7mWq1F+v+R4R470h9J8VXO+VZBds10uBjaHduD7gg1zdet/FhE/sKyk2LvFzt3Y5xtbjNeSV6WFqOdJNnmYumqdZpBRRRXQcwUUDk16ZffC15dfVbSZYNKKKWYnc6kDBGD1JIzngDJ9MHKpWhTa53Y2p0J1U+RXsL8JbHnUtQaLj5II5PzZx/6BXp1VNO02z0mySzsYFhgToo5yfUk8k1brwq9X2tRzPoMPS9lTUDlPAGl3uk6NeQX0DQyNeyOoJHK7VGfpkGurooqKk3OTk+pdOChFRXQK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIags6OiiigAooooAKKr317babYXF9eSiK2t42llkPRVUZJ/IVk6P4qt9Wv8A7C9hqGn3TwfaYY72JUM0WQCy7WPQlcg4YZGRQBvUVka14hg0aa0thaXd7e3e/wAm1tFUuyqAWb5mVQBkckjqB3qhP42sU0fTdQtbK/vDqF2bKK2hRFlWYByyuJGUKV8tweeo70AdNXOeN/8AkA23/YV07/0shrZ0+7lvrNZ5rC5sXJIMFyYy4x3OxmXn61jeN/8AkA23/YV07/0shoA6OiiigArzv4tf8gvTv+uzf+g16JXn3xZjY6LYSgfItwVJ9ypx/I104T+NE5sZ/Akcf8OopJPG9iyIWWNZGcgfdGxhk/iQPxr3GvHvhR/yNNz/ANeT/wDoaV7DWuYO9X5GOXK1H5nzv4k/5GnV/wDr9m/9DNe6+G/+RW0j/ryh/wDQBXhnidGj8VasrqVJvJWwR2LEg/ka+g4oo4IUhiRUjRQqIowFA4AA9K1xz/dwRjgF+8mzyz4tW7LqWm3BI2SQsgHfKnJ/9CFedV6d8XRxo5/67f8AsleY12YN3oR/rqceNVq8v66BRRRXScgUUUUAenfCO141S7aEf8s4klK/7xZQf++Cfwqb4paTYw6amqRwAXs91HHJLk5KhH/wH5Crfwo/5Fq7/wCvxv8A0BKd8V/+RWtv+v1P/QHryHJ/XPmeyoL6l8jp/Df/ACK2kf8AXlD/AOgCtOo4IY7aCOCFAkUahEUdFUDAFSV58ndtnoxVopHzprv/ACMGpf8AX1L/AOhms+tDXf8AkYdT/wCvuX/0M1n19LD4UfMT+JhRUlvBJdXMVvCu6WVwiLkDLE4A5qfU9OuNJ1KewugvnQttbacjpnj8DTur2FZ2v0KlFFFMk9O/5oZ/n/n5rO0fwwfFFrrFulwIJItRDhmXcCPmBGP1/D3ruND0Oyu/AGn6ZcK7209skrjdg5YiTqP9o07wtoVzo17rckyosV1dmSAK2fkySM+nXH4V4/1hRjPlet2/xR7f1dylDmWlkvwZo+JP+RW1f/rym/8AQDWX8Pf+RG07/tr/AOjXro7iEXFtLCTgSIUzjPUYrN8M6TJoXh6102WVZXh3ZdRgHLFv61yKS9k49b/ozrcH7ZS6Wf5o1qKKKyNjzP4uqNmkNjnMoz/3xXmFeyfE7SDfeH1v1kIaxO7YFzvDlVP0x1/OvG69zAyTopLoeBj4tV231LelWiX+sWNnIzKlxcRxMy9QGYAkfnX0ZBBHbQJDEgSNBgADFeP/AA00KDVdYmvLjlLHy3RQxB8wtlT9BtP5ivZK48wqXmorod2W0+WDm+oUUUV556IUUUUAFFFFAHH+NPClhqdveazPJOJ7Wxk2IrAISoZgTxnqfWuL+FfPiqb/AK9H/wDQkr2MgMCCAQeCD3rnLTQp7Xx1caqscSWL2IgQKcENleMemF/UV2UsR+6lTl20OKrhv3sake+p0lFFFcZ2hWF4p8MW/iiwjgkl8iaJw8cwQMR6j6H6jkD0rdoqoycHzR3JnCM4uMtjw7xL4IuNAurCCK5F4965jjAj2HdlQBySOd3rV7RPhnql+bee/dbS2ckyIc+coBxjBGAT2Off2r2B4o5GRnjVih3KWGdp9R6U+ux4+py269ziWX0udt7djN0fQ7HRLOG3tYhmKPy/NYDewyW5OPUk4960qKK4nJyd2d0YqKsiC7srW/gMF5bxTxHnZIgYZ9ee9eG33hi7sPCialc2ssE6XrwyK6nJQqu1vYBgwz3yK95pksUc8LwzIskcilXRhkMDwQRW9DEyo7bHPiMNGtvufONhpt5qkzw2Nu88qRmRkTk7R1wO/XoOarMrIxVgVYHBBGCDX0RpegaVozStp9lHA0py7DJJ9snkD26VDP4U0G6vvtk2lWzz53E7cBjknLL0J57iu7+0Y8z00OB5ZLlWup4p4VtJL3xVpkMcYkxco7qem1TubP4A19B1SstH03TppZrKxgt5JTl2jQLn/CrtcWKxHtpJpWsd2Ew/sItN3uFFFFcx1BRRRQAVznjf/kA23/YV07/0shro65zxv/yAbb/sK6d/6WQ0AdHRRRQAVn6rp95fxRrZ6xd6Yyklnto4XLj0PmIw/LFaFFAHFeIPCmtXnhbWLMeIr7U5bizeOG3uYreNC/BHMcannGOTjmltLq48ReMtM1RNL1CytdNsbhZjeW7RM0spjxGobG7AjYlhkdMGu0ooA4q+vJo/EOh+K/7K1NrKXTprWaBbVmuLZpGikUtEuW/5ZlTgHBxVG00+xTw2ZPE3h+7uor7VbnUI7QWT3LW+9m2b0QEg7W54OCxFeh0UAct4EtLq00q+WS2ubWye+kfT7a5J8yK3IXAIJyo3byFPIBA46VR8Z6BZxw2+qLNqBuDq+nnY2oTmHm7iH+qL7P8Ax3jr1rt65zxv/wAgG2/7Cunf+lkNAHR0UUUAFcL8V/8AkVrb/r9T/wBAeu6rhfiv/wAitbf9fqf+gPW+F/jROfF/wZHMfCj/AJGm5/68n/8AQ0r2GvHvhR/yNNz/ANeT/wDoaV7DWuP/AIxjl/8AB+Z4b49ikm8f30USM8jtEqooyWJjTAAr3KvHte/5LFF/1+2n8o69hqsW/wB3TXl/kLBr95Vfn/meZ/F37mj/AFm/9krzCvWPi1ab9J0+8348qdotuOu9c5/8c/WvJ678E/3C+Z52PVq7+X5BRTlRmDFVJCjLEDoM4yfxI/Om11nIFFFFAj1/4Uf8i1d/9fjf+gJW5400efXPDc1naxRyXG9Hj3kDBB5IJ6cE1jfCoAeFrg9zePn/AL4Su5rwa83HEOS6M+iw8FPDqL6oKKKK5TqPnTXv+Rh1P/r7l/8AQzWfWn4jAHifVgBgC9m/9DNZlfTQ+FHy0/iZoaD/AMjFpn/X3F/6GK6n4pad9m8SRXqqQt3CCxJ6uvyn/wAd2VgeErdrnxZpiLG0gW4R2AGcAHOT7V6h8SdMjvfCsl15ReezYOjKOQpIDfhjk/T2rkrVOTEQ8/1OyjS58NPy1+48Vop6QySRySJG7JGAXYDIUE4GT25rY8I6U2seKLG28sPEsglmDJuXYvJDex+7z3IrrlJRi5PoccIuUlFdT3HQ4XtvD+mwSAh47WJGBGMEIAav0UV803d3PqIqysFFFFIYUUUUAZviJFk8NaorgEG0l4P+4a+fVsrl7J7xYJGtkcI8oX5VY9ie1fSZAYEEAg8EHvXD614Wj0X4e6lp2mRz3LyypJjbudj5idgOwH8zXdg8Qqfu92jz8bh3U97smM+FVo8Ph65uXj2ie4OxscsFAH5ZyPzrvKbGCsSAjBCgU6uWrU9pNz7nZRp+zgodgooorM0CiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuc8b/wDIBtv+wrp3/pZDXR1znjf/AJANt/2FdO/9LIaAOjooooAKKKKACiiigAooooAK5zxv/wAgG2/7Cunf+lkNdHXOeN/+QDbf9hXTv/SyGgDo6KKKACuT+I9vFP4Ku5JFy0DxyRnPRt4XP5MfzrrK5/xvbNdeDNTjXOREJOPRGDH+Va0HarF+aMq6vSkvJnD/AAliRtX1CYj50gCA57Fsn/0EV6xXlXwk/wCQhqf/AFyT+Zr1Wtsd/GZhgP4C+Z5Nqdsbv40JGG27biCTOM/cjVsfjtxXrNeXTf8AJbx/vL/6TivUaMU9IL+6gwi1qP8AvM4f4qxu/hWBlRmCXiMxAztG1xk+nJA/GvHK+ktQsLfU7Cayu03wTLtdc4r5trty+d4OPY4cyhaop9/0PVNC0JL34SXEVurfaLsSTHaMlnR/lUfXywPxNeV17p8Pf+RG07/tr/6NevC6vCybqVE+/wDX5EYuCVOnJdv6/MK6Pxxow0TxF9mjJMBt4jEScsVChMn3yppdB8Jahq0Vlf2sST27XXlSqSBsAwSTnqMGu1+KmkLNpdvq0ceZbdxFKw/55nOM/Rv/AEKrnXiq0Yp90RDDydCUmuzLXwr/AORVn/6/H/8AQUruK4/4aWv2fwbFLv3faZpJcY+7g7Mf+OZ/GuwryMS71pep7OFVqMfQKKKKwNz538Sf8jTq/wD1+zf+hmsytPxJ/wAjTq//AF+zf+hmqV3azWV1NbTLiSGRonxyNynBGa+lg/dXofL1F7zfmd38JkY61fuFOwW4BbHAJYYH6H8q9YkjSWNo5EV0cFWVhkEHqCK86+EtpcRWmp3TxMsEzRrG56MV37sfTIr0evFxsr13Y93AxtQVzzT4Zafaah4e1W3u7dJYpZlV1YfeAGRz7Hmur0XwjYaHrN7qVsW33OVWMAKsSkglQB7gfgKx/hxZ/wBnx63Z794t75ot+MbtvGf0ruKMTUl7SST0YYWlH2UXJaoKKKK5DrCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIaAOjooooAKKKKACiiigAooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigAqrqdmdQ0q8sg+w3EDxb8Z27lIzj8atUU07O6E1dWZ5V8JP+Qhqf/XJP5mvVa86+GkEUWr+I1jQKI5URB6Luk4/QV6LXTjHes/l+Ry4FWoL5/mcHq0W/wCMGjbQBizLMfp5td5XEaj/AMlf0n/rwb/2rXb1FfaHp/mXQ3n6/ogr5u1K1+xareWmMeRO8fXP3WI/pX0jXz14njeLxVqyyIyk3crYYY4LEg/iCDXXlr96SOPM17sWe2eEraO08I6VHFna1skhz6uNx/VjXhmuReR4g1KLOdl1KufXDkV9B6faDT9NtbJXLrbwpEGIwTtAGf0rwDxJ/wAjTq//AF+zf+hmqwMr1JvuTmEeWlBdj37TrSCw063tbaJY4Y0AVR9P1PvWZ4ztTeeDtUiBA2wGXkf3CH/9lrcVdqKvoMVBf2iX+nXNnIzKlxE0TMvUBgQSPzrzoytNSfc9OULwcV2MD4e/8iNp3/bX/wBGvXT1n6HpMWh6Nb6bDI8iQg/O+Mklix6e5NaFOrJSnKS6sVKLjTjF9EgooorM0PAdStP7Q8e3dlv2faNTeLdjO3dKRn9a9U8R+BrLX57Z1kWzVJHkm8qIbpS23Jz6/KOeetcUtgZfFQu4rdnkTxKyO6qThd4PPtwT+dev16OKrSi4OL2R5uEoxkpqavdlawsLXTLKOzsoVhgj+6gzx378mrNFFee227s9FJJWQ1Y0TdsRV3Hc2BjJ9TTqKKQwooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG/wDyAbb/ALCunf8ApZDXR1znjf8A5ANt/wBhXTv/AEshoA6OiiigAoorK1WHX5JYzo99plvGF+cXlnJMSfYrKmB+BoAuahqFppVhNfX06QW0K7pJHPAH+eMd6raRr2m66kzafOztAwWWOSJ4pIyRkbkcBhkcjI5rlfFFv4gj0KO51i5sb22tNRs7qVLCykjIijmVpCwaR9wAw2Bj7p607Ttb0248W6/4mt7pG0O20uCKe8jBaN3RpXbBH3tqMM4z1xQB0mqeJtJ0a+tLK/uXinu3SOFRBI4LO2xclVIXLHHJFSavr+m6EsJ1CdkadisUccTyySEDJ2ogLHA6nHFct8R9e0my0vSorrUbaCSTU7G4RJZApMS3CFnwewAJJ7U7U9Z02z8Z6N4iub2EaLPpk9vDfbswrI0kbjLdBuVTg99uKAOwsL+01SwhvrGdJ7aZd0ciHhhWL43/AOQDbf8AYV07/wBLIah8ARuPDs9wY3igu9Qu7q2R1KkQyTOyHB6ZB3Y96o+M9LvEht71td1B7c6vp5+wskHkjN3EMZEe/jr979OKAO3ooooAKKKKAOE8BqqeJvFyKMKt4AB6DfLXd1BBZWtrNPLBbxxyTtvlZFwXPqfWp60qz5583p+RlRp+zhy+v5nEaj/yV/Sf+vBv/atdvXEaj/yV/Sf+vBv/AGrXb1dfaHp/mRQ3n6/ogrx34nxvL4wt441LO9rGqqOpJZgK9irzXxvaS3fxB0GKCPfKyoePRZCSfoBk1pgpctW/kzPHR5qVvNHpVeBeNbRbLxlqkSsWDTebk+rgOR+BbFe+1wfjzTrOfXPDkktujPPepBKf76bl+U/mfzp4KpyVNeqFjqXtKenRneUUUVxnaFFFFABRRRQBBbWVtZmY28CRGeQyyFRje56k+9T0UUN33ElbYKKKKBhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAVznjf/kA23/YV07/ANLIa6Ouc8b/APIBtv8AsK6d/wClkNAHR0UUUAFFFFABRRRQAUUUUAFc543/AOQDbf8AYV07/wBLIa6Ouc8b/wDIBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAcRqP/ACV/Sf8Arwb/ANq129cTqP8AyV/Sf+vBv/atdtW9faHp/mc9Defr+iCq8ljay3kN5JbxtcwgrHKV+ZAeuDViisE7G7Se4Vm6poltq1xYTXDSK9lOJ49hAyR2PtwK0qKak4u6CUVJWYUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/AOQDbf8AYV07/wBLIa6Ouc8b/wDIBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAFFFFABXOeN/8AkA23/YV07/0shro65zxv/wAgG2/7Cunf+lkNAHR0UUUAFFFFABRRRQBA1latfJetbxm6RDGsxX5gvpn0qeiii9xWSCiiigYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/9LIa6Ouc8b/8gG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFUdX0iz1zT2sb5JGgZ0k/dytGwZGDqQykEEMoPB7VeooA5z/hCNK/5+tc/8Hl5/8do/4QjSv+frXP8AweXn/wAdro6KAOI1TwZGmo6Ktlc68bd7xlvca1dnEXkSkZzLx+8EfI/lmtT/AIQjSv8An61z/wAHl5/8dpNX8aWOkXt1btZahdLZRrLfTWsQdLVGGQXywJ4G7ChiBziuijkSaJJI2Do4DKynIIPQigDnv+EI0r/n61z/AMHl5/8AHaP+EI0r/n61z/weXn/x2ujooA5z/hCNK/5+tc/8Hl5/8do/4QjSv+frXP8AweXn/wAdro6KAOc/4QjSv+frXP8AweXn/wAdo/4QjSv+frXP/B5ef/Ha6OigDnP+EI0r/n61z/weXn/x2j/hCNK/5+tc/wDB5ef/AB2ujooA5z/hCNK/5+tc/wDB5ef/AB2j/hCNK/5+tc/8Hl5/8dqfUPE0Om67YaXPp9+ftsogiuljXyQ5RnCklgc4RugNV9W8a2Gk3t3bvZ39zHYosl9cW0QaO0VhkF8sCfl+YhQxA5NAGb4i8FxReGdVk0m5146ktpKbULrV2xMuw7MAy4POODWkvgjS9ozda5nHP/E8vP8A47UmreLrTTLz7LDZX+ozLbi6lWxjV/KhJIDsSw64bAGScHAou/F9jCmn/Ybe71WXUIDdW8NiisxhAGZDuZQF+ZRycknABoAZ/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtLJ4z03+y9NvbSG7vn1Jitra28Y85yoJcEMVC7cENuIwRitHRdZtdd0/wC12qyptkeGWGZdskUinDIw7EH/ABHFAGb/AMIRpX/P1rn/AIPLz/47R/whGlf8/Wuf+Dy8/wDjtdHRQBzn/CEaV/z9a5/4PLz/AOO0f8IRpX/P1rn/AIPLz/47XR0UAc5/whGlf8/Wuf8Ag8vP/jtH/CEaV/z9a5/4PLz/AOO10dFAHOf8IRpX/P1rn/g8vP8A47R/whGlf8/Wuf8Ag8vP/jtaOratJphhWLSdQ1B5d3y2aIdoGPvF2VR145yefSs9vGem/wBh22qRxXcpubg2kVokX79pwWDR7SQAwKNnJAAUnOKAE/4QjSv+frXP/B5ef/Hay9A8GRyadK2qXOvC4F5cqm7Wrtf3QncRdJf+eYTnv35rVXxppv8AY13qM0N3A9pOLWazkiHnrMxULGFBIJbeuMEg7hzRB4y042OqXN7Ddac+loHu7e7QeYisCVYbCwYNggYJ5BHWgBP+EI0r/n61z/weXn/x2j/hCNK/5+tc/wDB5ef/AB2n6f4us7uW6hvLS90qe2t/tbxX6KhMPP7wFWYYGDkZyO4FJo/jC01e+gtDY6hZPdQG4tDeRKguYxjLJhj03KdrYbBzigBv/CEaV/z9a5/4PLz/AOO0f8IRpX/P1rn/AIPLz/47XR0UAc5/whGlf8/Wuf8Ag8vP/jtH/CEaV/z9a5/4PLz/AOO10dFAHOf8IRpX/P1rn/g8vP8A47R/whGlf8/Wuf8Ag8vP/jtdHRQBzn/CEaV/z9a5/wCDy8/+O0f8IRpX/P1rn/g8vP8A47W5e3LWdnJcLbTXLIMiGAAu/sMkD8yK58+ONPghvzqNlf6dcWUccr2txGpkdZGKJs2MytuYbQAevXFAEn/CEaV/z9a5/wCDy8/+O1lweDIz4ov45bnXv7MWzt2gP9tXePNLzeZz5uT8oi4/xNa1t4x097fU5L6C70uTTYRcXUN6ih1iIJDjYzBgdrDgk5BHWl0zxbaX91La3NlfaZOlv9rEd/GqF4c4LgqzDAOMg4IyMigBn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O0aR4zstXvbW2FlqFoL2JprKW6iCJdIMElMMSOCDhgpxzimWPjjTr6+toUtb6O1u5ngtL+SJRBcSLnKqd27na2CVAOOCaAH/wDCEaV/z9a5/wCDy8/+O0f8IRpX/P1rn/g8vP8A47XR0UAc5/whGlf8/Wuf+Dy8/wDjtH/CEaV/z9a5/wCDy8/+O10dFAHOf8IRpX/P1rn/AIPLz/47R/whGlf8/Wuf+Dy8/wDjtdHRQBzn/CEaV/z9a5/4PLz/AOO0f8IRpX/P1rn/AIPLz/47XR1h6t4q0/R9Z03Spknlur+RY1EKgiLdnazkkYBIIHUnB44NAEH/AAhGlf8AP1rn/g8vP/jtZeqeDI01HRVsrnXjbveMt7jWrs4i8iUjOZeP3gj5H8s1ot4405dRa3NrffZFuxYtqPlL9nE+duwndu+98u7btzxmjUfHGnabe3UMlrfS21k6R3t9DEpgtWYAgOSwY4DKTtBwDzigB/8AwhGlf8/Wuf8Ag8vP/jtH/CEaV/z9a5/4PLz/AOO0useMbPSL24tfsOoXrWsIuLx7OJXW2jOcF8sCchScKGOBnFP1HxbZ2c1tBaWd7qk9xb/a1jsEVyIe0hLMowc8DOTzgGgCP/hCNK/5+tc/8Hl5/wDHaP8AhCNK/wCfrXP/AAeXn/x2tnTNRtdX0y21Gyl821uYxJG+MZB9ux9qtUAc5/whGlf8/Wuf+Dy8/wDjtH/CEaV/z9a5/wCDy8/+O10dFAHOf8IRpX/P1rn/AIPLz/47R/whGlf8/Wuf+Dy8/wDjtdHRQBzn/CEaV/z9a5/4PLz/AOO0n/CDaMZYXkk1abyZUmRJ9XupU3owZSVaQg4YA8jtXSUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAee6ob7SL/xfbrpF/etrSrJYvbQGRGcwLCUdhxHgpnLYGGrtNFsn03QtOsJGDvbW0cLMO5VQCf0q9RQAUUUUAFFFFABRRRQAUUUUAcR441LydZ8ORpp2q3P2PUlu53tdOmmRY/JmTO5FIJ3MvHXms6/kvbAeLrSPRtRu314CaweO2Yqxe3SLZI2MRbWTJ344PrxXpFFAHAWyXfgvWrmWbTr/AFGG60u0hiksrdpszQB1KNgfLncpDHA65NVNF02+8FSeH7m+sbu6iTQxYXH2GBp2gmDhwNq5JU5YZHHyjOM16VRQB5np2naloSeHNbu9Nu3SOXUXura3iM0tuLqTzUOxck7doU7c4LGum8F2tzHa6rfXNtLa/wBpalLdxQTLtdIyFRdw7EhN2Oo3c8101FABRRRQAUUUUAFFFFAHM+Mtfv8AR7W2t9NsbyW5vGKfaobKW5S0UYy7rGpJPPyr3PUgA1hvYxWGmeGNS0my1S5tdKv5ZbmOa1kW6k82OVHlMbqGZt8m44HIJxXoVFAHmV1p2pah/aniSHTLsIdZs72GzkjKTywwKiM2w4IY/MQpwTtHrS6vpuo+Kf8AhJdUstOu4kezs4LSG6iMEly0ErTN8r4IB3BASBnntXplFAHnWp2d541v9Ums7C9soD4fudOR76BoGeaYqcBWwSF2cnp83BNWtPmu9f8AEfhyYaTf2KaTbzNdtd27RKJGjEYjQn7/APEcrlcKOea7uigAooooAKKKKACiiigCrqN2tjYS3Lw3MypjKW0ZkkIJAyFXk4znjnivLZtFvJ7q/vtF0/WJ7GGSxuyNUWT7TcSQT72jjM37wrszgNxu6dTXrlFAHmmtabqHjB/Ed/Yafd28b6TFZ2qXsLQPcSrI0pG1wCF+6uSACSe1Wb6C78bas8ttp9/YQQ6LeWZkvrdoCZrjYAoDcsFCElhkcjBNehUUAee6ab3WtS8JwnSdQsTo0cj3r3MBjRX8hoQiMeJMlycrkYXrzVLSrTUZdG8KeFn0q+hutHvIXu7iSBlgEcG7DLJ919/y4Ckn5jnGK9PooAKKKKACiiigAooooAK8w1zwz4qt9Qs7i3vdPu2uNcjuml/s6VpIwFcJ5hEuPLRcLgBfXOSc+n0UAeXvZ6iPDs3gwaXfG7fV2kW78hvs/kNd/aPNMv3chTjbnduHSpNWg1G103xh4dTSb65udbnlaynigZoGWeNUJeQfKmwhs7scAYzmvTKKAPP7r7Z4b1bxEv8AZWoaguqW0P2N7W3aVWkSLyjG5HCcgHLYGGPPFR6baXngrUdOmu7C+voP7AtdPZ7G3adkmgLZBVeQG38Hp8vJFeiUUAc/4J0260nwdp1pex+VdBWkkjznyy7s+3Ptux+FdBRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAf/9k=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "from speechbrain.dataio.dataio import read_audio\n", + "signal = read_audio('spk1_snt1.wav')\n", + "plt.figure(1)\n", + "plt.title(\"Original Speech\")\n", + "plt.plot(signal)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2eR2_1PVkYLh" + }, + "source": [ + "You can also play the audio" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 61 + }, + "executionInfo": { + "elapsed": 483, + "status": "ok", + "timestamp": 1704405861855, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "Tzf7Gd3QkXcT", + "outputId": "bede5714-32fa-48bf-8d91-e55ff3c22ff3" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from IPython.display import Audio\n", + "Audio('spk1_snt1.wav')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xCWgOVzFoYGe" + }, + "source": [ + "We can now initalize the speed perturbator:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "nuPDtYohohpu" + }, + "outputs": [], + "source": [ + "from speechbrain.augment.time_domain import SpeedPerturb\n", + "\n", + "perturbator = SpeedPerturb(orig_freq=16000, speeds=[90])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SdcQBKgnpFxo" + }, + "source": [ + "The speed perturbator is a class that can be initialized with three parameters:\n", + "\n", + "\n", + "* **orig_freq**: it is the sampling frequency of the original signal\n", + "* **speeds**: It is a list with all the speeds that the signal should be changed to, as a percentage of the original signal (i.e, `speed=[100]` won't change the original signal). When adding more values (e.g, `speed=[90, 100, 110, 120]`) the speed will be randomly chosen among the specified values.\n", + "\n", + "Let's now apply it to the original signal:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 530 + }, + "executionInfo": { + "elapsed": 1690, + "status": "ok", + "timestamp": 1704405914703, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "pYdNolk6qs0i", + "outputId": "fdd4769c-bc37-4339-c420-04e7b3655a64" + }, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGzAjgDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKK5vxvreoaDocNxpi2xu5r23tVNyjMi+ZIEyQrAnGfWgDpKK4yx8Sa3p/i6LQPEg0xxc2cl3Dd2QeNVEZAYOrs2ODndntWlpfjnw3rWopYWGprLcSBmiVonRZgOpjZlCvjr8pNAHQ0VzUnj7w1He3Fn/AGg8l1byvDNFFayyMjJ1yFU4HoehPAyayvDPxO0jV/CzavqDSWbW8LT3Q+yT+XGofbkMUw3bhSTyfSgDuqKp3eqWVjeWNpczbJ76RordNpO9lUuRkDA+VSecdKy4PG3h661dtKt78y3y3DWzwpBISkinBDHbhRkEAnAOOCaAOgorzzwN8SNM1Dw5okWs6yj6zdoEkYwlUMpJwpZVCKxGPlyDyOOa6fWvF+heHrhLbUr7y53TzBFHE8rhOm4qikhfc4FAG5RWPe+KtDsNDh1m41KEafPtEMyZfzS3QIFBLE+gBPBrG8O+K18QeNtVt7K+FxpcFjbyRp5e0xys8ofcCAwPyrw3T05oA7GiuZ8U+IL/AE3UNH0jSobVtQ1WSRY5bxmEUSxruYkLyx6AAEfXirFne6zpen6je+KptMFraRGcT2CSjCKGLlkbceABjBOeaAN6iud07x34a1bUotPstUSW5mBMIMbqsuBkhWICsR3AJIwc9KXVfHPhvRb97G/1NY7iMAyhYncQg9PMZVITPX5iKAOhorJ1fxNo+hW1vPqF8kaXJxAI1aV5eM/IqAs3HPAotfE+iXuhy61BqUB06Ld5s7nYIyvUMGwVI9CAaANaiuQvPHGmaj4Y1y40G+c3lnp01zGXtnjxhCVcCRQGGQPUUvhnx3ourQ6VYPqiS6tcWyMw8plWSQIC4VtoQsDnKqeMHjigDrqKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKQkKMkgD3oAWim70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6uI+KUK3PheygZnVZNWskLRuVYAzKMgjkH3FdrvT+8v50b0/vL+dAHA+Jfh/aweEPEB0SK6uNau7B4FnubqSeZ06mMM7HAOMYGM5rE0mbRtd1LQoZfG+p3dzaXMc8OmSWMUbxSKCNrhIgUABIOSB+les70/vL+dG9P7y/nQBxfw8ijSfxfKqAO/iG53Njk4CY/z71x+mzw6n8D9U8K2sgfXrSzuPPsAD5yFZWONvXnjHrkV7JvT+8v50b0/vL+dAHml94u0jxN408Ef2RO1yi3czyuI2CxE20mEYkfe68dRjntWv8OYo0HiuRUAeTxFeFmxycMAK7Ten95fzo3p/eX86APA9J1nTb34FWXhO0jc69doI7ezETB2czbhMDjG0fe3ZwMEdRiuj1R5NC+IevXGqeKL3QINQW3e1uEtoninVIwrLveNsMrAnbkfezjmvWd6f3l/Ojen95fzoA8lFvpvhy28F6xFeXV74ds7y8ea7mgK+W0wYLIUCjagfcAcADcD0IrY8I6nYax8UvE1/pylraWws9s+wqJ8GQb1z1HG3PfbXoW9P7y/nRvT+8v50Acv44k8JvY21p4ujQ2krlopJEfbG6458xf9WcHg5Gea4K2nU6D4+ttE1C+1DwrHocpt5rp3kWOcxSbkid+WXGCeSAfrXsu9P7y/nWfrunprfh7U9J+0LD9utJbbzcbtm9Cu7GRnGc4yKAPMk1jTde0TwNoGkRv/AGrZ3dlPLbiFlazjiXMjNkYAxkD+9uGM5qshj0fU/Eum694vvtDa71G4nW3+yQvHdQyH5WRmiYt8vykA8bcYr13TrdNP0y0svOWT7PCkW/pu2qBnHbpVnen95fzoA8g1KxXw/q/hm9XX9S03Qo9DFlBqf2ZHZG3KwEodD5e5NvOBymKLvTLS48Kajqmk6ne+I4G1i0vdRzbhfPSIpvCKqKr/AChScA52+tev70/vL+dG9P7y/nQBxes+LdC8R+CfEkej6gl20elXDuERvkHlsMEkcH261l6hFHB4O+HKxIqBNQ04LgYxmIg/oTXpG9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOopu9P7y/nRvT+8v50AOooooAKKKKACiiigAooooAK5fx/bw3XhmK3uIkmhl1PT0kjkUMrqbuIEEHqCDiuornPG/8AyAbb/sK6d/6WQ0AL/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRUUAc7/wgPg7/AKFXRP8AwAi/+Jo/4QHwd/0Kuif+AEX/AMTXRUUAc7/wgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRUUAc7/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRU13WNdzsFXpknAoA5/8A4QHwd/0Kuif+AEX/AMTR/wAID4O/6FXRP/ACL/4muiooA53/AIQHwd/0Kuif+AEX/wATR/wgPg7/AKFXRP8AwAi/+JroqKAOd/4QHwd/0Kuif+AEX/xNH/CA+Dv+hV0T/wAAIv8A4muiooA53/hAfB3/AEKuif8AgBF/8TR/wgPg7/oVdE/8AIv/AImuiooA53/hAfB3/Qq6J/4ARf8AxNH/AAgPg7/oVdE/8AIv/ia6KigDnf8AhAfB3/Qq6J/4ARf/ABNH/CA+Dv8AoVdE/wDACL/4muiooA53/hAfB3/Qq6J/4ARf/E0f8ID4O/6FXRP/AAAi/wDia6KigDnf+EB8Hf8AQq6J/wCAEX/xNH/CA+Dv+hV0T/wAi/8Aia6KigDnf+EB8Hf9Cron/gBF/wDE0f8ACA+Dv+hV0T/wAi/+JroqKAOd/wCEB8Hf9Cron/gBF/8AE0f8ID4O/wChV0T/AMAIv/ia6KigDnf+EB8Hf9Cron/gBF/8TR/wgPg7/oVdE/8AACL/AOJroqKAOd/4QHwd/wBCron/AIARf/E0f8ID4O/6FXRP/ACL/wCJroqKAOd/4QHwd/0Kuif+AEX/AMTR/wAID4O/6FXRP/ACL/4muiooA53/AIQHwd/0Kuif+AEX/wATR/wgPg7/AKFXRP8AwAi/+JroqKAOd/4QHwd/0Kuif+AEX/xNH/CA+Dv+hV0T/wAAIv8A4muiooA53/hAfB3/AEKuif8AgBF/8TR/wgPg7/oVdE/8AIv/AImuiooA53/hAfB3/Qq6J/4ARf8AxNH/AAgPg7/oVdE/8AIv/ia6KigDnf8AhAfB3/Qq6J/4ARf/ABNH/CA+Dv8AoVdE/wDACL/4muiooA53/hAfB3/Qq6J/4ARf/E0f8ID4O/6FXRP/AAAi/wDia6KigDnf+EB8Hf8AQq6J/wCAEX/xNH/CA+Dv+hV0T/wAi/8Aia6KigDnf+EB8Hf9Cron/gBF/wDE0f8ACA+Dv+hV0T/wAi/+JroqKAOd/wCEB8Hf9Cron/gBF/8AE0f8ID4O/wChV0T/AMAIv/ia6KigDnf+EB8Hf9Cron/gBF/8TR/wgPg7/oVdE/8AACL/AOJroqKAOd/4QHwd/wBCron/AIARf/E0f8ID4O/6FXRP/ACL/wCJroqKAOd/4QHwd/0Kuif+AEX/AMTR/wAID4O/6FXRP/ACL/4muiooA53/AIQHwd/0Kuif+AEX/wATWJ4p8I+GtM02zvLDw/pdrcx6rp+yaC0jR1zdxA4IGRkEj8a72uc8b/8AIBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAFFFFABXOeN/+QDbf9hXTv/SyGujrnPG//IBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAFFFFABXHfE0keD3wes8ef1rsa83+LGqNHbWOlxsmJS00q4+YAYC/gTu/KujCxcq0bHNi5KNGVz0iiszw9qL6v4fsr+TZ5k0YL7Om7of1FadYSTi2mdEZKSTXUKKKKQwooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuc8b/APIBtv8AsK6d/wClkNdHXOeN/wDkA23/AGFdO/8ASyGgDo6KKKACiiigAoorj9P8cTXkunXUukGHRdUuDb2V79oDOzHdsLx7flV9pwdx6jIGaAOworF8UaxfaDolxqVlpiX4t43mlRrkQhUVSxOdrZPHTFVPEPiw6H4asdWFrbubuWCILcXXkRx+Z/E0m1sAdzigDpa5zxv/AMgG2/7Cunf+lkNaGg6jcarpi3dwlgu9jsNje/aomX137F5znjHbrWd43Yf2FbDIz/auncZ/6fIaAOkooooAKKKKACiiigAooooAK8h+K5H/AAkdoO/2Qf8AobV69XjHxQ/5G4f9eyfzau3AL998jhzB/ufmd78OZo5fBNkiOGaJpEcD+E7y2D+DA/jXVV5v8JLmVrTVLQkeTG8cijHRmBB/RFr0iscVHlrSX9am2ElzUYvy/LQKKKKwOgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACiiigBGAZSp6EYNed6ZoGvCw8O+HLrThFZ6LdRyvqPnoyTxw58sIgO4MflzuAAweTxXotFAHK+I5ta1HwJeQReH531G/tprY2kdzCfJLIyhmdmVSOnTJ56VJaXWpx+GLLz/Ct1LcwFIns3nty4Cp/rFO/YeeMbgea6aigDmPCOlXllc61fXVkmnJqN2s0Vgjq3lARqhZivy7mK5IGR05JzVDxp4d0SKC31aPR7BNSOr6exu1t0EpJu4gTvxnkEjrXbVznjf/AJANt/2FdO/9LIaAOjooooAKKKKACiiigAooooAK8M+IjM3jjUAzEhREFBPQeWp4/Emvc6+f/F92974v1WWQKGW4aIbfRPkH6KK9DLl+8b8jzsyf7pLzOy+EXXWP+2P/ALPXp1eRfCeeVfEF7bh8RPal2XHVldQD+TN+deu1ljlauzXAO9BfMKKKK5DsCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv/wAgG2/7Cunf+lkNdHXOeN/+QDbf9hXTv/SyGgDo6KKKACiiigAooooAKKKKACuc8b/8gG2/7Cunf+lkNdHXOeN/+QDbf9hXTv8A0shoA6OiiigAooooAKKKKACiiigAr538RkN4n1Yggg3kxBH++a+hn+430NfM1enlq1kzyszekV6m54V8RHwzqsl6IBMHgaIqTjrgjn6qPwzXS+F/FV9Zw6dbptcajq7rK0pLEKfKyBz6uTXn1amjX7QanpSuN0NterOAOpJZM/ogruq0YyTdjgo15QaV/wCtD6Hooor54+kCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigAooooAKKKKACiiigArnPG//ACAbb/sK6d/6WQ10dc543/5ANt/2FdO/9LIaAOjooooAKKKKAMnQtZbWP7R3QiL7JeyWow2dwXHP61rVwPwquBNol+jS75xdmR8nLYZRgn6kH8q76ta8FCo4roY4ebnTUn1CiiisjYa/+rb6GvmavpDVNQt9L0ye9u3KQRLliBnrwP1Ir5vr1ctTtJ+h5GZtXivUKASCCDgjvRRXpnlH0Vo+u6dr1u8+nXHmpG2x8qVIOM9DWjXmHwiZQ+sJuG4iEgZ5IG//ABFen187iKap1HBH0uGqurSU3uFFFFYm4UUUUAZV14j0y0uLaBrlZJLi5+yqIvn2ycZVsdMEgfjWrXh5uJT8R0ti58lNdMgTtuMwBP5KK9S8TeKrPwvBA9zFLLJOxEaRgdBjcST6ZH511VcM4uMY6tnJRxSkpSnokb1FIrK67lYMD3BzS1ynWFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXOeN/+QDbf9hXTv8A0shro65zxv8A8gG2/wCwrp3/AKWQ0AdHRRRQAUUUUAFYlp4u0K+1X+zLe/D3Jd40/dOEkZPvKkhGxyMHIUkjB9K2mBKkA4OOD6V5No91BceH/BnhqDP9uabfwteW2077fyg/mu/oG5AJ+9vGM5oA9H1vxBpnhyy+16pO8MHPzLC8h4GTwgJwACc1Je63p2naUup3dyI7NgpVypJbdjaAoGSTkYAGa53xh4l0WT4a6pff2jAltfWNxDbSSNs82QxuAqhsEkkHj2rLv9W0+98N+EtYtbuK60zTdQha9lgbesQEDplsdNrOhPp17UAdtpOtafrlq9xp85lSOQxyK0bRvG46qyMAynkcEDqKy/G//IBtv+wrp3/pZDVPwlcRan4l8TaxYsJdNupLeOCdfuTPHHh2U/xDlVyODtPpVfxnpd4kNvetruoPbnV9PP2Fkg8kZu4hjIj38dfvfpxQB29FFFABRRUF7cfY7G4uSu7yYmk25xnAzj9KEr6A3bU85+EP/MZ/7Yf+1K9NrzL4Q/8AMZ/7Yf8AtSvTa6sb/Hl8vyOTA/7vH5/mwoqNp4kmSFpUWWTJRCwBbHXA74qSuU6zjfiddvbeEDEiqRc3CRMT2Ay/H4oK8Wr1b4tXZTTNNstgIlmaUtnpsGMfjv8A0rymvcwEbUb9zwcwleu12Ciiiuw4Tu/hQxHia6XJ2mzYkf8AA0/xr2CvnXRNavNC1Fbuzl2MQFk+UHcmQSvPrgV6rqvxFs9N1WwgSATWVzAkzzhyGjDk4+XHYDOOvP5+VjMPOdW8Ve/6HsYHEU4UrSdrfqdrRRRXmHqBRRRQB4RJKkPxLeWV1SNNYLMzHAAE3JNY+p6nd6xfyXt7KZJn6noAOwA7Cp/Ef/I0at/1+zf+hmsyvpKcVZS8j5ipJ3cfM9J+E95cNd39m0ztbrErrGSSFO7sO2c816lXj/wplKeJbmIsAr2jHB7kMuP0Jr2CvGxytWZ7eAd6CCiiiuQ7AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACiiigAooooAKKKKACuc8b/8AIBtv+wrp3/pZDXR1znjf/kA23/YV07/0shoA6OiiigArH8V3a2PhTVJ3bb/o7Ipxn5mG1f1IrYrmPiF/yI2o/wDbL/0alaUVepFPujOs7U5Ndmcz8If+Yz/2w/8Aalem15l8If8AmM/9sP8A2pXptbY3+PL5fkYYH/d4/P8ANnGarcwy/FHQIEkVpYYpvMUdVzGxGfwrs68ulJX44Ag4+ZR/5LivUaWIjyqHov1Kw0uZzf8Aef6HmXxe/wCYN/23/wDadeY16d8Xv+YN/wBt/wD2nXmNergv4Efn+Z4+O/3iXy/JBRRRXUcgVNdXBuplkK7cRxx4zn7qBf6VDRRbqO/Q+kdMuzqGlWd6U2G4gSXbnpuUHH61arM8N/8AIraR/wBeUP8A6AK06+ZmrSaR9TB3imwoooqSj538R/8AI0at/wBfs3/oZrMrT8R/8jRq3/X7N/6GazK+mh8KPlqnxs6f4e/8jzp3/bX/ANFPXuleFfD5lXxxppYgDMg5PcxsBXuteRmP8Ven+Z7OW/wn6/ogooorgPQCiiigAooooAKKKKACiisPxbrNxoPh6e/tUjeZGRVEgJXlgD0I7VUYuUlFdSZyUIuT6G5RRnNFSUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXOeN/wDkA23/AGFdO/8ASyGujrnPG/8AyAbb/sK6d/6WQ0AdHRRRQAUUUUAFFFFABRRRQAVznjf/AJANt/2FdO/9LIa6Ouc8b/8AIBtv+wrp3/pZDQB0dFFFABXMfEL/AJEbUf8Atl/6NSunri/ijI6eEgqsQHuUVgD94YY4P4gH8K2w6vVj6mOJdqMvRnM/CaRxrV/GGOxrcMVzwSGGD+p/OvWq8j+E/wDyHr3/AK9f/Zlr1ytsd/GZhgP4CPLZv+S3j/fX/wBJxXqVeWzf8lvH++v/AKIFepUsVtD/AAorCb1P8TPMvi9/zBv+2/8A7TrzGvSPi4zG70pM/KEkIHuSv+ArzevTwf8AAj/XU8nHfx5fL8gooorqOQKKKKAPevAzM/gvTCzFj5ZGSc8BiB+ldDXI/Da++2eD4Y9m02srwk5+9/Fn8mA/Cuur5yurVZLzPpsO70otdkFFFFZGx87+I/8AkaNW/wCv2b/0M1mVp+I/+Ro1b/r9m/8AQzWZX00PhR8tU+Nmr4Y/5GrSf+vyL/0IV9DV88+GP+Rq0n/r8i/9CFfQ1eVmPxxPWyz4JeoUVheJvE0HhqGzlmjEizziNhuwVT+JwMc444963a4HFpKT2Z6Kmm3FboKKKKkoKKKKACiiigCO4uIbWB57iVIokGWeRgqj6k1x3xNvraLwt9leUedcupiUDO4KQSf1H51b+Is0cfgi9R3VWlaNEBP3jvU4H4An8K8RaaRoliZ2MasXCk8AnGT+OB+Vehg8Nz2qX2Z5uOxXJekluj2b4ZTibwiF3lmjuHVs9icN/XP412VeUfCa+lXVL7T+sUkAm5J+VlYLwPcNz9BXq9YYuHLWZ0YOfNRj9wUUUVzHUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRXl3xC8RXen+LbFbO4GLOIS+WG4EjE/ex1+Xbx6H3rWjSdWXKjGvWVGPMz1GiuU8L+NbLWooraWQi9jtfOuXK7Y1wQDyT7g+nNaNp4t0G+uPIg1OAymXylRjtLN2xnrnsRSlRnFtNbDjWpySae5tUVz2o+LrTTvE9lobQvJLc7cyKwxGWOFBH+eCOtdDUyhKKTfUuM4ybSewUUUVJQUUUUAFc543/wCQDbf9hXTv/SyGujrnPG//ACAbb/sK6d/6WQ0AdHRRRQAUUUUAFFFFABRRRQAVznjf/kA23/YV07/0shro65zxv/yAbb/sK6d/6WQ0AdHRRRQAVwXxXdh4ctEB4a7BP4I1d7XA/Fj/AJF+y/6+h/6A1dGF/jRObF/wJGD8J/8AkP3v/Xr/AOzLXrleQ/Ch1HiK8Qn5jakj8HX/ABr16tMd/GZnl/8AAR5bP/yW8f76/wDogV6lXls//Jbx/vr/AOiBXqVLFbQ/worCb1P8TPK/i3/x/aX/ANc5P5ivOK9B+LNwG1qwt9pzHbl93ruYj/2X9a8+r1MJ/BieRjHevIKKKK6TlCiiigD134TyhvD95Dg7kui5PbBRR/7LXfV538Jf+QXqP/XZf/Qa9Er5/F/xpH0eD/gRCiiiuc6T538R/wDIz6t/1+zf+hmsytPxH/yM+rf9fs3/AKGazK+mh8KPlqnxs1fDH/I1aT/1+Rf+hivoG5urezhM11PFBEDgvK4Vc/U18/eGf+Rq0n/r8i/9DFdv8XLkF9KtVm5AkkeIN67QrEfgwH41wYul7WtGB6GEq+yoTn5lX4sXhl1bTrQBdkduZlcHrvbH/sg/OvVbeUz20UpGC6BsemRmvnC6vbm98n7TM0nkRLDHn+FF6KPzr3/w3P8AafDGly+Z5ha1j3NnOWCgH8c5rHGUvZ0oR7XN8FW9pVnLvY1KKKK849IKKKKACiiigDhfit/yK1t/1+p/6A9ePV678WJQPD1nDg7muwwPbhGH9a8ir3MB/BPBzD+Ozo/A2sHRvFFvJ5e9Ln/RnA6gMRgj6ECveK+ctE/5D2nf9fUX/oQr37SNYstcsvtdhIZId5TJUqcj2P4H8a5cxh7ykkdeWz91xbL9FFFeaemFFFFABRRRQAUUUUAFFFFABVW/1Gz0u2+0X1wkEO4Jvc8ZPQVarzL4oeILWWJdCiVmuIpVllY8BPlyB75DZrahSdWaiY4iqqVNyPTay9f1218O6Yb67Dsm8IqJjcxPpkjtk/hXj2oeOtY1DULC8eQIbMq6xoSEdh1LAcnPTHpn1NZms+INS1+SF9RuPNaFSqYUKOTknA4z0/IV2U8vldc706nFUzKPK+Ra9D6EgmjubeOeFw8Uqh0YdGUjINSVwHws1a5vdLvLGdy6WZTymY5IVt3y/Qbf19q7+uGtTdObg+h3Uaiq01NdQJwMnpXzhquoT6pqlze3Lh5ZnLEjpjoAPYDA/CvevEd/bad4fvZbqZY1aF0QMQC7FThR6k18816OXR+KR52Zz1jEKkgnktriOeFyksTh0YdVYHINR0V6Z5Rak1G7l1I6i87NeeaJfN77wcg19AaPq9tqlpCUuIHufIjkmjjcEoWXPI7V8612fwz1RLHxN9mkUkXkZjVh2Ycj8OCPyrixlBTp3XQ7cFXcKnK/tHtFFFFeIe8FFFFABXOeN/8AkA23/YV07/0shro65zxv/wAgG2/7Cunf+lkNAHR0UUUAFFFFACMSFJAyccD1rybR7WC28P8AgzxLBn+3NSv4Vvbncd9x5ofzUf1C8kA/d2DGMV61WJaeEdCsdV/tO3sQlyHeRP3rlI2f7zJGTsQnJyVAJyfWgDK8VW0Oq+LfDej36CbTJ1uppbd/uTSRqmxWH8QAd2wePlz2rBt/DUniLw/qGh28lsbPS/ELfZkvYjPF5KAN5RXILKGdlxngDHau0l8J6LNpFtpb2bfZbV99vtnkWSJueVkDbwfmPQ9Dilbwpox0iDS0tpIbSCQyRi3uZYnDnOW3owYk7mySec80AZ3gmSO3j1XRhpmn2E+m3YjlXTo/LglLxo4cL2JVgCDnBHU1S8Zz66YbeKTTtPXTP7X0/Fwt65mx9rix+78oDrgff9+eldTpWj2GiWjW2nW4hiZzI/zFmdz1ZmYksTxySTxWV43/AOQDbf8AYV07/wBLIaAOjooooAK4H4sf8i/Zf9fQ/wDQGrvq83+Llw62mlWwx5cjySHjnKhQP/QjXThFetE5sY7UJGJ8Kv8Akap/+vN//Qkr2OvHPhX/AMjVP/15v/6Elex1pj/4xll/8H5nkuqXJtPjOkgUNm4gjwTj70aLn8M5r1qvCfH7q/jjUmRgQDGMg9xGoP6ivdqMXG0Kb8hYOV51I+f+Z498V/8Akabb/ryT/wBDeuFruviv/wAjTbf9eSf+hvXC16mF/gxPLxf8aQUUUVucwUUUUAenfCInGsDPH7nj/vuu48Ra7b+HdJe/nUvhgiRg4Lsew/DJ/CuH+EP/ADGf+2H/ALUrQ+K00f8AwjltDvXzPtinbnnGx/8AEV41aCni+V7afke3Rm4YPmW+v5ndxuJYkkXOGAYZ96dVXTZkudKs54zmOSBHU4xkFQRVquFqzsegndXPmq7uZL28nupiDLNI0jkDAyTk/wA6hoor6dKx8q3c0NCuIrTxBp1zO22GG5jkdsZwoYEn8qseJfEM3iXVft00Kw7YxGkanO1Rk9e/JNY/Sio5Fzc/UrnlycnQK9Q+HPixVtf7GukjihtLeScTl+SA24jHsGJ+gry+pIJ5LaTzIm2ttZM4zwwKkfkTU1qSqw5WXQrOjPmR9G6dqFtqthFe2cnmW8oJRsEZwcHg+4NWq5j4e/8AIjad/wBtf/Rr109fP1IqM3FdGfR0pOcIyfVBRVe/u0sNOubyRWZLeJ5WVepCgnA/Kq+h6rHrejW2oxxtGs6k7GOSpBIP6ilyvl5ug+Zc3L1NCiiipKPN/i5NttNKg2/feR92em0KMf8Aj36V5ZXp3xe/5g3/AG3/APadeY172C/gR+f5nz+O/jy+X5E1ncGzvre5ChjDIsgU98HOP0r1/wCFn/Ipyf8AX0//AKCteNV7L8Lf+RTk/wCvp/8A0FajHr90Xl7/AH1vJnbUUUV4h7oUUUUAUtU1ax0az+16hcCGHcF3EE5J7AAEml03VbHV7d57C4WeJJDGzLnG4dR+o/OuT+Kv/Iqwf9fif+gvVP4S3ivpWoWO3DRTibOeoZcdPbZ+tdSoJ0Pa9TleIaxHsulj0SiiiuU6gpvmJ5vlb18zG7bnnHrj0p1eYar4mj0T4pXdzfiaSCG2W3RYlBIBVX7kdyfzrWlSdVtLormNasqSTfV2Ov8AGXiBvDmhNcxx755X8mLnG1ipO4+uMdK8Jubme8nae5mkmmfG6SRizHAwMk+1bHirxNN4n1CK4kh8iOJNiRBywHJOfqeB+ArCr2cJQ9lDXdniYzEe2npsgooorqOQ7z4USOPEl3EHYRtaMxXPBIdMHHqMn8zXr9ePfCj/AJGm5/68n/8AQ0r2GvDx/wDGPey/+CcL8V/+RWtv+v1P/QHrx6vU/ivqtv8AY7TSMP8AaTItznHyhMOvX1z/ACryyvQwKaoq552Padd2Ciiiuw4gqa0u57C7iurWQxzxMGRx2NQ0UNX0Y07ao+lbO5jvLOG4ikSRJEDBkOQamrlvh3Ij+B7BVdWKGRWAOdp8xjg+nBB/Gupr5qpHkm49mfUUpc8FLugoooqCwrnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACiiigAooooAKKKKACuc8b/8AIBtv+wrp3/pZDXR1znjf/kA23/YV07/0shoA6OiiigArzL4vf8wb/tv/AO069NrzL4vf8wb/ALb/APtOurBfx4/P8jkx3+7y+X5oyPhYwHiuUEgFrRwMnqdy17JXhfw9/wCR507/ALa/+inr3StMwVqvyM8ud6PzPnrxRI0vivVmbGRdyrx6BiB/KvoC3nS6tYriIkxyoHXPoRkV8+eJP+Rp1f8A6/Zv/QzXuvhv/kVtI/68of8A0AVrjl+7gzHAP95NHmPxX/5Gm2/68k/9DeuFruviv/yNNt/15J/6G9cLXdhf4MThxf8AGkFFFFbnMFFFFAHoXwmunTWb+zAHly24lJ75RgB/6GayvHv/ACNGsf8AXa3/APRRrK8LXc1n4n014p2hD3MaSENgFC4yD7Vp+PWUeK9XTI3GWAgewi/+uK5OS2Jcu6/VHbz3wqj2f6M9d8N/8itpH/XlD/6AK06y/DRB8LaRg5/0KH/0AVqV4k/iZ7lP4F6HzJRRRX0x8sFFFFABRRRQB7p8Pf8AkRtO/wC2v/o166evOPB2tponw2utRMRnFtclTGG25LFB1/4EDXUa74w0zw/FatdmVpLgBlijUFgv9488D+fbvXgVqU3VlZbtn0VGrCNGPM7WS/yLniT/AJFbV/8Arym/9ANZnw/OfA+mn2kH/kRq0/En/Irav/15Tf8AoBrkfhVqdxdaXd2EpUw2ZUxYGCN5ckH15H60Ri3h5Ps0EpJYmKfVM9BooormOo8y+L3/ADBv+2//ALTrzGvTfi6RnRxkZ/fcf98V5lXvYL+BH5/mfPY7/eJfL8gr2X4W/wDIpyf9fT/+grXjVexfCpi3hW4B/hvHA/74Q/1qMf8AwfmXl/8AG+R3NFFFeIe8FFFFAHD/ABV/5FWD/r8T/wBAeuL+G+prp/iuOGRsR3kZgyXwA3BU+5yNo/3q7P4rHHha297xP/QHrzDw3/yNOkf9fsP/AKGK9fDRUsK0/M8bFSccWmvI+iKK8++KV5cWUGky20zxOk7uCrEcgDB/U/nXC2firW4Y5Gj1G4DR24jUs5bjzAc4PGecfTiuSlg5VIKaZ2VcbGlUcGtj3uvC/iF/yPOo/wDbL/0Ule5I6yIrqcqwyD6ivDPiCwbxzqRUgjMY4/65rV5d/Ffp/kZ5l/CXr+jOZooor2TxAooooA7r4Uf8jTc/9eT/APoaV7DXiHw3uZYPGtrHG2FnSSOQY6rsLfzUV7fXiY9WrfI93Lnej8zxH4japb6n4pb7Pv8A9Fj+zSbhj51d849ua5KtPxJ/yNOr/wDX7N/6GazK9ejFRppI8etJyqSbCiiitDIKKKKAPW/hNcxvod9aAnzY7nzGGONrKAP/AEBq9Arxb4aanc2vimKxiZRBeA+cCuSdiOy4Pbmvaa8LGw5az89T6DAz5qK8tAooorkOwK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIaAOjooooAKKKKACiiigAooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigArxT4lX09x4tmtXkYw2qosaHou5VY4+uR+Ve114X8Qv+R51H/tl/wCikruy9Xqv0PPzJtUV6/5kfgOeO38baa8rBVLugJ9WRlA/EkCvatZuZbLQtQuoSBLDbSSISM4YKSP5V8/6PcG01uwuAu7yriN9ucZwwOK978Sf8itq/wD15Tf+gGtcdH97F9/8zLL5fuZrt/kfPdxPLdXMtxO5eWVy7se7E5J/OvdPC+r2h8JaI880cBljW2jV2HzuvyYHudteD1qXesyzaVpNjE7otirsMHB8xpGbcD9Nv612Ymh7VKP9bHHhcR7GUpf1udN8V/8Akabb/ryT/wBDeuFrZ8Ra0utzWEgEu63sYreR5Tlndclm/Ek1jVpQi4U1FmWImp1XJdQooorUxCiiigBVYqwZSQwOQR2qa7vLi/uWubqZppmADO5yTgADJ78AVBRRbqO7tY90+Hv/ACI2nf8AbX/0a9dPXLfDtifA9gP7pkH/AJEaupr5yv8AxZerPpcP/Cj6I+ZKKKK+jPmQooooAKKKKANaLWp4/C1xoqkLFJdLOSM5YbcEHtjKofqPyq6jqt7q0yTX07TOiCNSQBhR2AFU6KlQinexbnJqzZ7t45uJYvAuoyxMUZo0UkejOoYfiCRXL/CI8awP+uP/ALPW34tnN18LpLhiGMtvbuSOhy6Gud+El1Gl9qdoc+ZLEkq+mFJB/wDQxXkwj/sk15/5HsVJf7ZB+X+Z6pRRRXnnonj3xX/5Gm2/68k/9DeuFruviv8A8jTbf9eSf+hvXC19Dhf4MT5zF/xpBXsPwo/5Fa5/6/X/APQErx6vW/hNKh0K+iDgyLc7iueQCowfxwfyrLHfwWa5f/HR6BRRRXhnvBRRRQB5D8SNdu7jU7vRpBH9ltpoZI8L82TGScnv979BXF6dI8Op2ksbFXSZGVh2IYYNb3xCJ/4TjUhnj91/6KWuZBKsGUkEcgjtX0OHilRil1X6HzeIm3Wk30f6nefFPVftWt2+nxyo0VrHuYLyRIx5B/AL+dcGrsoYA4DDB9xnP9Ks6neHUdUu70ggzytJg9snOKq1VGn7Omok1qntKjn3Pc/h9eNeeDbPzHd5Ii8TM5z0Y4/AAgfhXk/jD/kb9V/6+GruvhLeSyWGpWbEGKGRJE9QXBBH0+Qfma4Xxj/yOGq/9fDVyYePLiZo7MTLmwsGYlFFFegeaFFFFAHR+Aporfxtp0k0iRpl13OcDJjYAfiSB+Ne4215bXgl+zTxy+VIYpNjZ2sOoPvXzXWjoesXGhavDf2x+ZDhl7Op6g/564rixWE9s+ZPWx34TGexXI1pcd4k/wCRp1f/AK/Zv/QzWZWjr8iTeJNUljYMj3crKR3Bc4rOrrh8KOOfxMKKKKogKKKKANbwvI8XivSWjdkY3cSkqccFgCPxBI/GvoWvnDSbtLDWLG8kVmS3uI5WC9SFYE4/Kvoq1uYry0huoG3QzRrIhxjKkZH6GvJzFPmiz2Msa5ZIr6bq1jrEEk9hcLPFHIYmZQcbhjPXr1HNXa4X4Uf8itc/9fr/APoCV3VcVaChUcV0O+hN1Kam+oVznjf/AJANt/2FdO/9LIa6Ouc8b/8AIBtv+wrp3/pZDWRqdHRRRQAUUUUAFFIxCqWPQDJrzvTNf142Hh3xHdaiJbPWrqOJ9O8hFSCObPllHA3Fh8udxIOTwOKAPRaK5rxHfag2uaNoWnXhsXvxPNLdLGrukcQXIUMCuS0i8kHgHisJPEmtzW0GiC8RdUbW5NKfUBCv+rSIzeYEPy7ygAxjGSTjtQB6FXOeN/8AkA23/YV07/0shpPDN/ftqWtaNqV19sm02aPy7oxqjSxyIGXcFAXcDuGQACAOKzPGev2ckNvpaw6gLgavp43tp84h4u4j/rSmz/x7np1oA7eiiigArwv4hf8AI86j/wBsv/RSV6r4j1+/0V7dbLQrrUhKGLNDnCYxgHCt698fj28W8Q6lPq+v3l7c2/2eaR9rQ85TaAuDnvxz7+lell9OSk59LHl5jUi4KHW/6GarFWDKSGByCO1e5z6/Y+IPBOrXNlKGK2Mvmx/xRsYycH/HocV4XWnpWu3mkW2oW1t5ZhvoDDMrrnggjIx0IBPtz0rtxFD2tmt0cOGxHsm09mZlFFFdJyhRRRQAUUUUAFFFFABRRRQB6z8KL4SaTe2TSkvFMJFQnorDt+IP+TXoJIAJPQV86aNq1xoeqw6jarG00W7CyAlTkEHOCPWvQdW8da7Po9zEvhu9sneE/wClZfEa92HyDH1zxXk4rCSlV5o7M9jC4yEaXLLdHl9FFFeseOFFFFABRRRQAUUUUAew69/yR2L/AK8rT+cdcH4E1a20fxTDPdyCOCRGiaQnhcjgn8QB+Oe1W5PFmpX/AIEm0mXTGktolji+2xhgsaqy7Q3BGeMdR1H48fXHQotU5wn1bO2vXTqQnDoke/v4x8OxywxnV7UtMSFKvkDH94jhfxxWPcfE3Q4byWBRNIkaORKq/KzgnCj644PTkV4xRURy+mt22aSzKq9kkaviHXrjxFqz31wAgxsjjHPloCSFzgZ6nn3rKoorujFRVlscEpOT5nuFel/COWNZtWiLqJHWJlXPJA35OPxH515pV/RtXudD1SLULQIZYs4WQEqQQQQQCPWssRTdSm4I1w9RUqqmz6MorzmD4h67JYC5/wCEXmlj8vPnxhxGSM5b7p49s9utXF8da42D/wAIVqODzkF//jdeK8LVXT8Ue4sXSfX8H/kd1RXDyeN9dSQr/wAIVqJAPUM5/lHikbxzrgYgeCdRIB65f/43S+rVO34r/Mf1ql3/AAf+RwnxC/5HnUf+2X/opK5itnxXfT6l4ku7u5spLKaQJut5M7kwijnIHXGenesavdopqnFPsjwKzTqSa7sKKKK0Mj0r4RzItxq0JZQ7pE4XPJALAkD0+YfmK43xVKJvFmquBgfanX8jj+lQ6Hrl54e1H7bYlPMKFGWRcqynsfxAP4VQmmkuJ5J5nLyyMXdj3JOSa540mq0qnc6Z1lKhGn2bGUUUV0HMFFFFABRRRQArMWYsxJJOST3pKKKACiiigAooooAK9o+G+utqugfYpVbztPCxlyRhkOdv0wBj8Ae9eL1u+F/E194bv2ktUE0c2Flgb+PHTB7Hk4+vSubFUfa07Lc6sJW9lUu9upN4e8X3/h5IIIGX7KLkzTx7QTICFBGT04HGO9e6xSrNCkqHKOoZT7GvmevpOwUJp1sq9BEgH5CuLMYRVpJas7stqSlzRb0VixXOeN/+QDbf9hXTv/SyGujrnPG//IBtv+wrp3/pZDXmnqHR0UUUAFFFFABXH6f4Hms5dOtZdXM2i6XcG4srL7OFdWG7YHk3fMqbjgbR0GScV2FFAHNXnhvUbqHSrn+2lGt6c0my9a0BSRX4ZWiDDgjb0YHKg1X/AOEJK6TFGmqONWj1A6mNQMIINwwIbMefuFGKbc9Mc55rraKAMbQNDk0l766u7z7bqN/MJbicReWvyqFVVTJ2qAPUnJJzzVXxv/yAbb/sK6d/6WQ10dc543/5ANt/2FdO/wDSyGgDo6KKKACvC/iF/wAjzqP/AGy/9FJXuleKfEyPZ4xlbP34Y2/TH9K7svf71+h5+ZL9yvX/ADOPr1DwxoEul+ANb1C5ULNfWMjRjIOIvLJU8dCdxOPYV5fX0ZqNg1z4fu9OgIDSWrwIXPGShUZrsxtRxUY93+Rx4CkpuUuy/M+c6KKtz6bdW9jaXrxk290pMbgHGQzKVJ9flJx6EV2tpbnAk3sVKKKKYgooooAKKKKACiiigDvvhn4ftdSvZtSugJBaMBHEygqWIPJz6dvfntXpuu2MupaFfWUJUSzwsiFumSOM1g/Da0ht/BttPGmJLl5Hlb1IcqP0UV11eDiqrdZvt+h9DhaSjQS7r8z5kooor3j54KKKKACiiigAooooA9f1uNI/g3EsaKoNnathRjktGSfxJJryzStMudZ1OHT7QKZ5iQu44AwCSSfoDXrHiNPL+EaoM/LaWo5/3o65X4WWEs/iKa9UL5NtEVck8gvwMfka86hU5KM5+bPTxFPnr04eS/Urv8MfEKSQKBasJSQzLLxF/vZGfyzVm6+Fuq20M0wu7eVI4Wk2xhi7MFJ2hcc5PGc/h2r2CiuX6/WOv+z6PmfNdzZ3Fn5QuYXiMqeYodSCVyRnB9wagruviv8A8jTbf9eSf+hvXC169KftIKXc8atT9nUcF0Cu9+Femx3WuXN7Igb7JEAmR912PB/IN+dcFXqvwktXSw1O7JGyWVIgPdQSf/QxWWMly0ZG2CjzV4no1FFFeAfQhRRRQB4d8RYpI/G96zoVWRY2QkfeGxRkfiCPwrla9X+K+n2x0u11Ly/9LEywb8n7m12xj6ivKK+gws+eivLQ+cxcOStLz1+8KKKK6DmO4+GGlx32v3FzPbJNBbQcF8ELIxG3j6B/84rldatorPXdQtYFKxQ3MkaAnOFDED9BXo3wktJY7HU7xgBFNJHGh7kqGJ/D5x+tcD4oieLxXqyyIyMbuVgGGOCxIP4gg1yU5t4ia8kdtWCjhoO27Zk0UUV1nEFFFFABTkRpHVEUs7HCqoySfQVNYWcuo6hb2UJUSzyLGpboCTjn2r0Dwd8P9Qt9aivtYgSKG2bcke8MXcfdPHYHnr2HGKyq1oUk3Jm1GhOq0oo87ngktriSCZCksTlHU9VYHBFR1p+JP+Rp1f8A6/Zv/QzWZVxd0mZyVpNBRRRVEhRRRQAV634E8DpYRwavqcTi+yWhiY8RqQMFlx97r9Mjv0828PIsnibSkdQyteQggjII3ivomvOx9aUUoR6np5dRjNucuh8yV9GaEsqeH9NS4DiZbWISB/vBtgzn3zXAfDvw7pOsaA9zf2Uc80N62xmz0CIcHHUZPQ8V6dWGPrKb5F0OjL6DgudvcK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIa889E6OiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACvGPigCPFwJB5tkI/Nq9nrzX4uQsYdJmGNqtKh9ckKR/I114GVqy8zjx8b0G+x5xptp/aGqWllv2faJki3Yzt3MBnH419I18/eEYRP4u0pGJAFyj8eqnd/SvoGt8yfvRRhli92TPmQggkHqK9z+H6I/gXTN6q2DIRkZwfNfmvGdZtkstc1C1jz5cNzJGueuAxA/lXuvhK2itPCOlRxAhWtkkOTn5nG5v1Y1rmEk6UfNmOXRaqy8l+p5j8TbO2svEdtFawRwxm0VisahRkyPk8Vxdd78WEx4is5M8taAY+jt/jXBV1YV3oxZyYpWrSQUUUVuc4UUUUAFT3dq1pMsTkEtFHLx6Ogcfo1aHhaw/tPxRptqUR1adWdH6Mi/MwP8AwEGt74kaPfW/iK51WSDFjcNGkcoYHLCMDGOo+6aydVKoqb7GypN0nUXc9J8GWf2HwdpcW/fuhEucY++S+Pw3Y/Ct2szw3/yK2kf9eUP/AKAK06+fqO8233Po6StCKXY+ZKKs6jbLZ6nd2qElIZnjUt1IDEc/lVavpE7q58u1Z2CiiimIKKKKAHiKQwtKEYxqwVmxwCckD8cH8jRMgjnkQZwrEDP1rtdP0lrT4V6xqRlDC+MICbcbNk2OvfOao+LfCcuiLbX0Hmy2lyis7sB8kh5K8dvSsI14uXL52+6x0SoSUFLyv99z0rx9GR4D1BAM7Vi/SRa5n4RA41g44PkjP/fddf40t3ufB2qRx43CHzDk9lIY/oDVX4doq+B7BlUAsZCxA6nzGHNeVGdsLJd3/X5HryhfFxfZf8D9TqKKKK4ztPI/ixbuuu2NyceXJbeWOecqxJ/9CFcBXpXxc/1+k/7sv81rzWvfwjvRifO41WryCvYfhR/yK1z/ANfr/wDoCV49XsPwo/5Fa5/6/X/9ASox/wDBNMv/AI3yO6ooorwz3gooooA4X4r/APIrW3/X6n/oD149XsPxX/5Fa2/6/U/9AevI7a2mvLmK2t42kmlYKiL1JNe3gdKJ4OYK9f7i5rWi3mg3iWt6oWV4llABBwCOn4EEfhWdXp/xbt4gmmXAjUTEuhfHJUYIB/En8zXm8ABhucgcRjHt861tQqupTU2Y4ikqdVwR7V8PbC60/wAKxxXcLQyNK7hW64OMH9K81+IX/I86j/2y/wDRSV7daArZQKwIIjUEHtxXjvizTpNX+J9xYRNted4kDEZ2/ul5rz8JU5q8pvs/zR6OMp8tCEI91+TOPSGSRJHRSViXc59BkDP5kfnTK9Z+HujW114W1Sy1C1Ri148MwIG75VTjPsckeh5ryy6tpbO6lt5o3jkjYqyuMEV6NOspzlDsebVoOEIz7kNFFFbGB03w9GfHOm/9tf8A0W1e614X8Pf+R507/tr/AOinr3SvGzH+KvT/ADPcy3+E/X9EfO/iT/kadX/6/Zv/AEM1mV3/AMUtIa31eDUYbZI7aaMI8iADdLlicjqTjHPtXAV6lCanTTR5OIg4VHFhRRRWpiFFFBBHUYoA674b6c974uhn2K0VojSvuGRnG1ce+SCPofSvba8/+FWlzWul3l/MroLp1WNWTGVUE7ge4JYj/gNegV4WNnzVn5H0GBp8lFeepw3wrXZ4au1Pa+cf+OJXc1h+F/D/APwjmn3Fr9o88S3LzBtu3AIAA/8AHa3KxryU6jkjbDwcKUYvcK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIayNjo6KKKACiiigAooooAKKKKACuc8b/wDIBtv+wrp3/pZDXR1znjf/AJANt/2FdO/9LIaAOjooooAK87+LX/IL07/ru3/oNeiV538Wv+QVp3/Xdv8A0GunCfxonLjP4Ejjfh+qv4400MARmQ8+ojYivdq8T+Gto9z4zglVgBbRSSsD3BGzj8XFe2VtmD/er0MctX7p+v8AkfO/iT/kadX/AOv2b/0M17xoMUkHh3TIZUKSR2kSurDBUhACDXiHjGA23jDVEOOZy/H+1839a9/q8dL93D+uxngI2qVP67nknxZ/5Dtj/wBe3/sxrz+vSfi5Ai3WlXAzvdJEPphSpH/oRrzau3CO9GJw4xWryCiiiuk5QooooA7v4Uws/iW6m8sskdow37chWLLjnsSA35Gum+K//IrW3/X6n/oD1S+Elq6WWqXZK+XJJHGozzlQSf8A0MfrV34r/wDIrW3/AF+p/wCgPXkzlfGI9inG2CfmdP4b/wCRW0j/AK8of/QBWnWZ4cBHhfSQRgiyh4/4AK068+fxM9Gn8CPnPXP+Rg1L/r6l/wDQzVCtDXf+Rh1L/r6l/wDQzWfX0kPhR8zP4mFFFFUQFFFFAHtPhDTbbWPhrZ2F4paCXfuCtg8TMRz9QK6/yo/LWMopRcYUjIGOlc38Pf8AkRtO/wC2v/o166evna7ftJLzZ9Nh0vZRfkjM8Sf8itq//XlN/wCgGsv4e/8AIjad/wBtf/Rr10V1bRXlpNazgtFNG0bgHGVIwf0NQaVplto+mQafaBhBCCF3NknJJJJ9ySaXOvZcnW9xuD9qp9LW/EuUUUVkanl/xdB83SD22y/+yV5pXqHxd/1Ok/70v/sleX172C/gR/rqfPY7+PL5fkFe2/DrSrzSfDckd7D5Ty3DSqpIJ2lVAPH0NeR6BFHP4j0uKVFeN7uJXRhkMC4BBHpX0V0rnzGo0lDudOW0k26nYKKKK8k9gKKKKAMfxJ4et/EumpZXEskSpKJQ0eM5AI7+xNc3pXw3TSPEVnqEF+0kEG5mR0+YtzjGOMYIz9PfjvKK1hXqQjyp6GM8PTnLnktSrd6bZ38lvJdW6SvbSeZEWH3G9a57Tfh7omnXU8xSS5WZSphn2sijcGGBj2FdXRUxqzirJlSpQk7yQVy0fh+X/hY8utPbr9m+yAJLuGfN4Xp1+7keldTRRCbje3XQc4Kdr9HczNF0aPRY7xI5mkF1dvcncMbS2OP0ryr4laXeW3iefUJIj9lutnlSDkZVFBB9DxXtFVNR0yy1a2Ftf2yTwhg4V+zDoR/noSO9bUMQ6dTnepjiMMqtLkWltj5vor1HxP8ADmOW/sTolu0UU0pW5O/csQyPmAJz/e4z2FWtC+GFnaNbXOqS/aJ4yWeBTmJjn5eoBIxjI7n26+o8bS5ea/yPJWBrc/Lb59DE+G2g3SeII9Ru7OZIBatLbyspClmO0fXKluPofSvW6AABgDAFFeRXrOtPmZ7NCiqMOVHEfE6yu73QLVbS2mnZLkMyxIWIG1uSB2rxzB2hsHBOAa+mq8h1XwPqdh4OijNu1zexX7sFtgX/AHTooJwBnqi/TNd2CxCjHkkcGOw0pS9pHt+RwVFbnh/wpqXiMTtZoFjhU/vJOFZv7gPr/LvWffaXfabffYry1liuM8RkctzgYx1Ge4r0VOLly31PLdOSipNaMrQxSTzRwxIXkkYKqqMliTgAV7lfeB9L1HxINYuAz5UB7cjKOQNoJ+gxx7D3z5p4P8Nare+IrWcWrxQ2VxHJM8o27cENjB5JIHb1Fe4152OrNSSg+n5nqYCgnBua6r8AAwMDpRRRXlnqhRRRQAVznjf/AJANt/2FdO/9LIa6Ouc8b/8AIBtv+wrp3/pZDQB0dFFFABRRWfqujwaxFHHPcX0IjJINney25P1MbAkfWgB+sapbaJo15ql2WFvaQtNJtGSQozgD1PQVk6V4kvp9Zi0vWNHGm3NzbNdW225EwdVKh1Y7V2uu9cjkc8E4qnq3gOC58P6pZWeoambi7tXhjN7qVxcRqxwVJV3YdQOcZxmiyt9b1jxTY6vqekHTY9Ns5olja4SRpppSmSuwnCAR8E4J3dBigDT1vXriw1Gy0vTdPW/1G7SSVY3n8mNIk2hnZ9rEcuoAAOSaxdQ+IQtPD2n6iljbxz3Ootp08V/ei3jtZUEhffIFYYBiIBxzuBqa8h1sato3ieLRHkuVsZbS901LmPzIxIY3BV2IRtrR4PIyDkdKbpdlqeiaJ5lzoJ1K8vL+e9ntoJos2xkYkAGQqGwpCkg+vagDo9GvZ9R0uK6uEs0eTJH2O6+0RFc8EPtXP5VzHjTxFoksFvpMesWD6kNX09TaLcIZQRdxEjZnPABPStLwZpN3pVlqL3dtHZ/bb+S7js43DC3Rgo25HGSVLHHGWNHjdR/YVscDP9q6dzj/AKfIaAOkooooAK8/+LMTHQ7GUY2rc7T9SpI/ka9Arhfiv/yK1t/1+p/6A9dGF/jRObF/wJHMfCj/AJGm5/68n/8AQ0r2GvHvhR/yNNz/ANeT/wDoaV7DWmP/AIxll/8AB+Z4j41tHvfiPdWcO0STywxpu4G5kQDP4mvbq8e17/ksUX/X7afyjr2Gni37lNeQsGvfqPz/AMzzL4ujjRz/ANdv/ZK8xr0/4u/c0j6zf+yV5hXo4L+BH5/mebjv48vl+QUUUV1HIFFFFAHr/wAKP+Rau/8Ar8b/ANASn/FVWbwtbkAkLeISQOg2OP60z4UD/imrs/8AT43/AKAldP4j0UeINEm07z/I8wqfM27sYYHpkeleJOahiuZ9z3qcHPCcq3savSiiiuI7j5013/kYdT/6+5f/AEM1n1oa9/yMWp/9fcv/AKGaz6+mh8KPlp/Ey1ptst7qlpaOxVZ5kjJHUBmA/rVzxPpcWjeI73T4d/lRMNm85OCoYfzqLQOfEemY/wCfuL/0MV2XxX0wxanZ6kifJPGYnIXjcpyMn1IOP+A1lKpy1ox7pm0afNQlPqmjzyiilVSzBVBLE4AA5Jrc5z6E8LRRw+FNJSJFRTaRuQoxyygk/iST+Na1U9ItmstFsbVxh4beONhnOCFA/pVyvmZu8mz6mCtFIKKKKkoKKKKAOR+Iukxah4WmuvLZriz/AHkRBxgEjfn1G3J/CvEq+j9Xt3vNFv7aJQ0k1vJGoPQkqQK8HttAlufC95rizKI7WZYmjKnLZwMg/wDAhxXrYColTcW+v5nj5jSbqKUV0/I6n4VaXHc6reX8qI620aogdM4djkMD2ICn/vqvW64n4X2Ulr4bneWORHluWO11xwAB/PNdtXFjJ81ZndgoctFeYUUUVzHUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQA2ONIk2RoqLycKMDnmlKqxBZQSDkZHQ0tFABRRRQAUUUUAFFFFABXOeN/+QDbf9hXTv/SyGujrnPG//IBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAFFFFABXOeN/+QDbf9hXTv8A0shro65zxv8A8gG2/wCwrp3/AKWQ0AdHRRRQAVwvxX/5Fa2/6/U/9Aeu6rmPiGAfA2o5A48rH/fxa2w7tVj6mGJV6MvQ4X4Uf8jTc/8AXk//AKGlew15N8JoVbWNQnOd6W4QemCwJ/8AQRXrNbY5/vmY5ev3CPHte/5LFF/1+2n8o69hryu6tku/jascgyqyRyDnHKQhh+qivVKMU/dpryQYRe9Uf95nA/FiCNtAsrgr+9S62K3orIxI/wDHR+VeRV7l4/0W61vw15Vmu+aCZZxGOrgBgQPf5s/hXhtd+AknSt2PPzCLVa/cvRaPfzaRNq0duWsYXEcku4fK3HGM5/iHbvVGvWvD2nQS/CG5VYPMe4guJWXBJaRSwUgeo2L+VeS1tRq+0cl2djCtR9movurhRRXUePNGTRNfigicvG9rEVJGMbRs/wDZM/jWjmlJR7mSg3Fy7fqd38K/+RVn/wCvx/8A0FK7iuH+Ff8AyKs//X4//oKV3FeDiv40j6HC/wAGPoFFFFYHQfO/iT/kaNX/AOv2b/0M1mVp+JP+Rp1f/r9m/wDQzWZX0tP4UfLVPjZv+C7K4vfFmnmCFpFhmWWQgcIoPU16p8QdPOoeD7vYm+S3Kzr7bfvH/vktXH/CWF21fUJgv7tIAjN6EtkD/wAdP5V6xXlYyq41010PXwVFSw7T+1c+b7fTrm5sby9jT/R7RVaVz0+ZgoA9+fyBrc8AaX/ani61ycR2v+kvzg/KRtx/wIr+Ga6/4VRRz6FqUUsayRtOAyOMgjaOortLDQ7DTb68vLWHZPeMGlOc9PT0GSTW2IxnK507a9DHD4LmUKl9Opo0UUV5J7AUUUUAFFFFABXO+KdDa68IXmm6TaRLJIyssSBUBPmKxPYdjXRUVUJuEk10JnBTi4vqNQFY1U9QAKdRRUlBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAVznjf8A5ANt/wBhXTv/AEshro65zxv/AMgG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFc543/wCQDbf9hXTv/SyGujrnPG//ACAbb/sK6d/6WQ0AdHRRRQAVg+NLU3ng3VIgfuw+b1/uEP8A+y1vVU1S0OoaTeWSuEa4geIMRkDcpGf1q6cuWaZFSPNBx7o81+En/IQ1P/rkn8zXqteVfCT/AJCGp/8AXJP5mvVa6Md/HZzYD+AvmeZf81z/AM/8+1em157dWQg+NFlMrk/aLczMD/DiN0wP++Qfxr0KpxLvyf4UVhU1z3/mYV8yV9N1866/FHD4j1SKJFSNLuVURRgKA5AAHYV1Za9ZL0OTM1pF+p7H8Pf+RG07/tr/AOjXrw+4ha2uZYHILxOUYjpkHFe/eErdbXwjpUaYwbZJOBjlhuP6mvD/ABCqp4m1VFACreTAAdhvNaYOV61Tz/zM8bG1Gn5L9EdF4b8BX2rQ2Gov5cdq0oMqSghmjBB3KMYII4rrvilpxufDkV6gXNpKCxI52t8vH47f8iu2hAEEYAwAo/lWP4vthd+ENVjLFQtu0mR/sfNj/wAdrk+synWjJ9Gdn1WEKEorqjL+Glr9n8Gwy7932maSXGPu4OzH/jmfxrr65j4e/wDIjad/21/9GvXT1jiHerL1Zvh1ajH0QUUUVibHzv4k/wCRp1f/AK/Zv/QzSazol7od7JBdQyKgldIpWQqJQpHzL7YIP41ev7IX/j65tZN6xXGqvCzqOm6XBx74NeqeM/CR8U29r5d0IJrZm27lyrK2M575+UY/+vke5LEKk4J7NHgQwzqqclunoZfwt0tLXQZtQEjM94+CuMBQhYD+Z/Su7qG0tLextY7W1hSGCMYREGAKmrx61T2k3Pue1Rp+zpqHY4r4eWa2H9u2ke4xQag8SM3UhePzxiu1owB0FFKpP2knJjpU/ZwUUFFFFQaBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/AOQDbf8AYV07/wBLIa6Ouc8b/wDIBtv+wrp3/pZDQB0dFFFABRRRQAUVW1DULTSrCa+vp0gtoV3SSOeAP88Y71W0jXtN11Jm0+dnaBgssckTxSRkjI3I4DDI5GRzQBpUVk6p4m0nRr60sr+5eKe7dI4VEEjgs7bFyVUhcscckVJq+v6boSwnUJ2Rp2KxRxxPLJIQMnaiAscDqccUAaVc543/AOQDbf8AYV07/wBLIa2rC/tNUsIb6xnSe2mXdHIh4YVi+N/+QDbf9hXTv/SyGgDo6KKKACiiigDz34cKo1vxMQoyJ1A46DdJ/hXoVcJ4DUL4n8XKoAUXgAA7fPLXd10Yp3qv5fkjmwitSXz/ADZxGo/8lf0n/rwb/wBq129cTqP/ACV/Sf8Arwb/ANq121KvtD0/zHQ3n6/ogrwXxxYHT/GGoIA+yaTz1Zh13/McewYkfhXvVeOfFRGHimBiDta0XB9fmat8vlarbujDMYp0r9metadaf2fpdpZb9/2eFIt2MbtqgZ/SvAPEn/I06v8A9fs3/oZr6Irwzx9o8mleKbmQq3kXjGeJz3LcsPwYnj0x61eXy/eO+7IzGH7uNtke5Ku1FX0GKhvbSO/sLmzlLCO4iaJivUBgQce/NT0V56dnc9Fq6sUdH0qDRNJg062Z2ihBwznLHJJOfxJq9RRQ227sElFWQUUUUhnlj6XfjXY5TZXAjPiVpAxjP3NwO7/dwOvSvU6KK1q1XUtdbGNKiqd7PcKKKKyNgooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigArK1WHX5JYzo99plvGF+cXlnJMSfYrKmB+BrVooA4PxRb+II9CjudYubG9trTUbO6lSwspIyIo5laQsGkfcAMNgY+6etO07W9NuPFuv+Jre6RtDttLginvIwWjd0aV2wR97ajDOM9cV3VFAHCfEfXtJstL0qK61G2gkk1OxuESWQKTEtwhZ8HsACSe1O1PWdNs/GejeIrm9hGiz6ZPbw327MKyNJG4y3QblU4Pfbiu5ooA5bwBG48Oz3BjeKC71C7urZHUqRDJM7IcHpkHdj3qj4z0Czjht9UWbUDcHV9POxtQnMPN3EP8AVF9n/jvHXrXb1znjf/kA23/YV07/ANLIaAOjooooAKKKKAM+w0Wx0y7vrq1iKS3snmTEsTk8/lySfxrQoopttu7EkoqyOJ1D/kr+k/8AXg3/ALVrtq4nUP8Akr+k/wDXg3/tWu2ravtD0/zMKG8/X9EFeb+OdMOqeOtCtHikeGZAr7ePlDktz7DmvSKKmlVdOXMi61JVY8rCuc8UeG31690eZfKKWdzvlWQn5oyQWA9/lFdHRUQm4PmRc4KceWWwUUUVJQUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXOeN/8AkA23/YV07/0shro65zxv/wAgG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/9LIa6Ouc8b/8gG2/7Cunf+lkNAHR0UUUAFFFFABRRRQBA1latfJetbxm6RDGsxX5gvpn0qeiii9xWSCiiigYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/wDSyGujrnPG/wDyAbb/ALCunf8ApZDQB0dFFFABRRRQAUUUUAFFFFABVHV9Is9c09rG+SRoGdJP3crRsGRg6kMpBBDKDwe1XqKAOc/4QjSv+frXP/B5ef8Ax2j/AIQjSv8An61z/wAHl5/8dro6KAOI1TwZGmo6Ktlc68bd7xlvca1dnEXkSkZzLx+8EfI/lmtT/hCNK/5+tc/8Hl5/8dpNX8aWOkXt1btZahdLZRrLfTWsQdLVGGQXywJ4G7ChiBziuijkSaJJI2Do4DKynIIPQigDnv8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OigDnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OigDnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6Oqep37adZG4Wyu7xgQBDaoGc5+pA/EmgDI/wCEI0r/AJ+tc/8AB5ef/HaP+EI0r/n61z/weXn/AMdpR4y05vC+na8kF28Wo+WLS2WMGaV35VAM4zgEnnAAJzQnjTTRo97qFxDd2z2My29xZyxDz1lbbsQKpIYtvXbgkHd1oAytL8GRvqOtLe3OvC3S8VbLOtXYzF5EROMS8/vDJyf5YrU/4QjSv+frXP8AweXn/wAdp1v4y05rTVJr6G60yTTIxNdwXiKHSMglWGwsGB2kDBPII607TvFtpe3Fxb3Vne6XPDb/AGsx38aoWh6FxtZhgdwSCMjIFAEf/CEaV/z9a5/4PLz/AOO0f8IRpX/P1rn/AIPLz/47S6P4xs9Xvbe1+w6hZm7haeze7iCLdRjGSmGJHBBwwU4OcV0VAHOf8IRpX/P1rn/g8vP/AI7R/wAIRpX/AD9a5/4PLz/47XR0UAc5/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtdHRQBzn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O10dFAHOf8ACEaV/wA/Wuf+Dy8/+O0f8IRpX/P1rn/g8vP/AI7XR1z+seLI9EluWudI1VrK1Aa4vo4VMMa4BLcsGYAHkqpxg+lADP8AhCNK/wCfrXP/AAeXn/x2szxF4Lii8M6rJpNzrx1JbSU2oXWrtiZdh2YBlweccGtfVvFltpl6tnDY3+o3H2f7VJHYxq5ihyQHbcwHJBwBknBwKbdeMLCOHTXsYLvVJNRhNxbQ2SKXaIAEud7KFA3KOSDk460AMXwRpe0Zutczjn/ieXn/AMdpf+EI0r/n61z/AMHl5/8AHaV/GemnSdOvrWG7vH1FzHbWkEY853XO9SGIC7drbskAY60n/Caab/YY1Lybsubr7F9i8r/SPtGceVtzjd364xznHNAB/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtX9F1631tLlUguLW5tJfKuLW6ULJExAYZwSCCCCCCQa1KAOc/4QjSv+frXP/B5ef/HaP+EI0r/n61z/AMHl5/8AHa6OigDnP+EI0r/n61z/AMHl5/8AHaP+EI0r/n61z/weXn/x2ujooA5z/hCNK/5+tc/8Hl5/8do/4QjSv+frXP8AweXn/wAdro6KAOc/4QjSv+frXP8AweXn/wAdo/4QjSv+frXP/B5ef/Ha6CWTyoXk2O+xS21Blmx2HvXPR+M7OO4kg1Swv9IZbaS7U3yJteKPG8go7D5QQSDg89KAF/4QjSv+frXP/B5ef/Hay5/BkY8UWEcVzr39mNZ3DTn+2rvHmh4fL583I+Uy8f4CtfSfF1pql4LSWyv9OmktzdQi+jVPOiBALLhjjG5chsMMjiotK8bWGrXtpAlnf28V+rPYXNxEFiuwoydmGJHy/MNwXIGRQA7/AIQjSv8An61z/wAHl5/8do/4QjSv+frXP/B5ef8Ax2mW/jjTrnUIYFtb5bS4uWtINQeJRbzTKSCqndu6qwBKgEjgmifxxp1vqEsDWt81pBdLZzaisS/Z4piQAjHdu6sASFKgnBIoAf8A8IRpX/P1rn/g8vP/AI7R/wAIRpX/AD9a5/4PLz/47XR0UAc5/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtdHRQBzn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O10dFAHOf8ACEaV/wA/Wuf+Dy8/+O0f8IRpX/P1rn/g8vP/AI7XR0UAc5/whGlf8/Wuf+Dy8/8AjtH/AAhGlf8AP1rn/g8vP/jtb9xcQ2ltLc3EqxQRIXkkc4CqBkkn0xXM2/j7TLrRBqUdnqGXvTYw2hhAnmmHZV3YHAJ+YjABzjFAE/8AwhGlf8/Wuf8Ag8vP/jtZegeDI5NOlbVLnXhcC8uVTdrV2v7oTuIukv8AzzCc9+/Naq+NNN/sa71GaG7ge0nFrNZyRDz1mYqFjCgkEtvXGCQdw5og8ZacbHVLm9hutOfS0D3dvdoPMRWBKsNhYMGwQME8gjrQAn/CEaV/z9a5/wCDy8/+O0f8IRpX/P1rn/g8vP8A47T9P8XWd3LdQ3lpe6VPbW/2t4r9FQmHn94CrMMDByM5HcCk0fxhaavfQWhsdQsnuoDcWhvIlQXMYxlkwx6blO1sNg5xQA3/AIQjSv8An61z/wAHl5/8dpP+EG0YywvJJq03kypMiT6vdSpvRgykq0hBwwB5HaukooAKKKKACiiigAooooAKKKKACiiigAooooA891Q32kX/AIvt10i/vW1pVksXtoDIjOYFhKOw4jwUzlsDDV2mi2T6boWnWEjB3traOFmHcqoBP6VeooAKKKKACiiigAooooAKKKKACqmo6hFplk91NDcyopAKWtu87nJxwiAk/lVuigDyjQheJ4L8GzDSdV87w9NGb21ksZY5CrQyRsUVlBk2lwflzwDVq807UtT/ALX8RW+m3YQ6rYXdvZyxGOeeK22b22Nggn5toOCdo45r02igDzPWdN1Hxa3iTUrHT7uGN9Nt7W0jvImge5kileZvlcAgfMFBIGST2qzqNrd+NtTvZbWwvrGBdBu7ASX1u0DNNPswoDckLs5I4yRgmvQ6KAOB02S81zXPCx/sm/sRo8Mr3jXUBjVZDF5QjQnh+WJyuRhRzzXfUUUAFFFFABRRRQAUUUUAFcD4t1Z9R1iTw7dWGsRaKqK17cW2mXE32wEZ8lGjQgLj77Zz/CO5HfUUAcVc3M3h7xhf6qdL1C8s9S0+3SE2dq0jJJEZP3bKBlMiRSCcDg5IrJ0XTb/wa/h29v7C7uI00d7K5WyhadreUyLIBtQElfvLkAjKjsa9LooA8zsNO1LRhoOvXWm3bIt7qE9zawRGWW3S6cuh2LknbhQQMkbj6Ug07UhCviP+zLvZ/wAJIdT+xeWfP+zmD7Pu8vru/j29cds8V6bRQByvhaO4u9e8Qa49pc2ttfPBHbpcxGKR1iQguUPK5LEAHBwucV1VFFABRRRQAUUUUAFFFFAEVxN9ntZZ/Lkk8tC+yJdztgZwo7k9hXlWtaHceJLzUhoNprOLzTLuG4k1dJUVHbBjSHzvmXLDkJ8mAM9BXrVFAHATrd+MtbspItNv9OhtNNu4ppLy3aHEsyooRc/extYllyvA55qDSlv9UPg3S30i/s5dEYS38s8BSNDHbvEFRzxJuZ8jaTwOcdK9GooA8vsLPUToeg+Em0u+S707U4pZ7loGEHkwzGQSLL91t4CgAHOWOQMUXtnqI0LWvCK6XfPd32qyyw3SwMYPJln80yNL90FQSCCd2V4HNeoUUAFFFFABRRRQAUUUUAFFFFAGD4p0bUNbs7WCxvbaBY7hZpo7mBpY5wvIVgrqcbtrdeduDxkVwmm6X4j066XWNRtvtUVj4iubh4LSykSRkkjkjaVFLMXXLoQBzgNy1es0UAeZXWnalqH9qeJIdMuwh1mzvYbOSMpPLDAqIzbDghj8xCnBO0etLq+m6j4p/wCEl1Sy067iR7OzgtIbqIwSXLQStM3yvggHcEBIGee1emUUAedanZ3njW/1SazsL2ygPh+505HvoGgZ5pipwFbBIXZyenzcE1a0+a71/wAR+HJhpN/YppNvM1213btEokaMRiNCfv8A8RyuVwo55ru6KACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA//Z", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1, 45920])\n", + "torch.Size([1, 41328])\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "clean = signal.unsqueeze(0) # [batch, time]\n", + "perturbed = perturbator(clean)\n", + "\n", + "plt.figure(1)\n", + "plt.title(\"Perturbed Speech\")\n", + "plt.plot(perturbed.squeeze())\n", + "plt.show()\n", + "\n", + "print(clean.shape)\n", + "print(perturbed.shape)\n", + "\n", + "Audio(perturbed,rate=16000)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "94S7Ev3Lq90c" + }, + "source": [ + "The perturbed tensor contains the signal with the perturbed speech. The change factor is 90% as can be observed from the shapes of the original vs perturbed signals (41328/45920 = 0.9)\n", + "\n", + "One more thing to note is that this function supports multiple batches in input and the original signal must be unsqueezed to allocate the batch dimension in the first dimension of the tensor.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E8GXaLBFsr5H" + }, + "source": [ + "## 2. Time Dropout\n", + "Drop chunk replaces some random chunks of the original waveform with zeros. The intuition is that the neural network should provide good performance even when some piece of the signal is missing. Conceptually, this similar to **dropout**. The difference is that this is applied to the input waveform only. The other difference is that we drop consecutive samples rather than randomly selected elements like in dropout.\n", + "\n", + "Let's see an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 496 + }, + "executionInfo": { + "elapsed": 1374, + "status": "ok", + "timestamp": 1704405953286, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "bOMhZYA0u2Vw", + "outputId": "4855236f-bbd9-4890-9c4f-512d3d30c745" + }, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGzAjgDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAormfFPiC/03UNH0jSobVtQ1WSRY5bxmEUSxruYkLyx6AAEfXinCfxDaaFrE2vX2mWvk2zyQX1jDI3lYViztE+c7cAgAnPIoA6SiueufFmj6HpWly6pqu97yJTC6W7l7j5QS4jQFgOcnjAzSx+OPDcujXmsR6oj6fZSrDPOsbkI7bcDpk/fXpnGeehoA6CisPSfGGga5qMlhp2oLNconm7PLdN6ZxuQsAHXOOVJHNQL478NNqy6Z/aQW5eY2674ZFjaQHGwSFdhbIxjPWgDo6K5u21S9k+I+oaS02bGLS4LhIto4kaSRWOcZ5CrxnHFaWs69pugW0c+pXBiSR/LQLE8jO2CcBUBJ4B7UAaVFcpqfjvSYvBOp+ItMuRdJaxyKqiGQkTKpIR1xuTnGdwGAcnA5rk9J+JQ07+wl1LVL3VBqbObmR9JliNtth37YlSIFxuIGfm45zgUAer0Vgah400DSobOS8vWjN5EJ4YhbyvK0ZAO4xqpdRzzkDHQ0+58Y+HrPRrPWJ9VgXTryQRwXIyUZiCcEgcfdbOcYxzQBuUVR0rV7PW7IXlg8jwFioaSF4ySPZwD+OK5Gx1nxnreoa5/Zkmgx22najJZRx3NvNvfaqtkur4H3/7vagDvKK5LSvHdpJoOo3+uR/2XPpVz9k1CLJlWOQlQpUqMsrblIOO/tmrR8eeGV06XUG1MC0jlWESGGTEjt0EY25kzjjZmgDo6Kw7fxjoF3oV5rMGoK1jZBjcv5bhodoyQyEbwcdsZqPTfG/hzV9UXTbHU0lunVmjXy3USheuxiAr477ScUAdBRWZ4h1y18N+H73WLzcYLWPeVT7zHOAo9ySAPrWFbXPxAkiivZbLQUR8O2nb5RMin+HzfulgP9nGe/egDsKK4JfH1ppHi/wAUWOu6l5dtaTW4tIlgLsiGBHc4RS23c2SzcDPUV1n9vaWZtMiW7R21RWayKAsswCbyQQMAbeecUAaNFZdx4i0q1fVUmutraTbrc3o8tj5UZVmDcD5uEbgZPH0qlp3jnw3q2pw6dY6oktzOpaEeU6rJgZIVioViB1AORg56UAdDRXHaN4p8lPFl3rt+kdlpmqvbxO6geXGI4yFG0ZY7mOOpJOPSutgmS5t454w4SRQyh0KNg+qsAQfYjNAElFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRSEhRkkAe9AC0U3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AOX8cSeE3sba08XRobSVy0UkiPtjdcc+Yv8Aqzg8HIzzXDafOp0nxzbaJqF9qHhWPRpDbzXTvIsc5jfckTvyy4wTyQD9a9h3p/eX86N6f3l/OgDy631G08N+J/D2tay/kaZceG4rOG6dSUjmDB2UkD5SykYz124rAu5bbUPh18Q54Ldo7a41+NlR4yhZWa2O4qeRuznnnmvWtbtNXvfIOj67FprJuEgktFuFkzjHBZSCMHoe/NM8M6FF4d06aBr17y6ubh7q6upAFM0r4y2BwowAAB0AFAGTr6rH8R/BexQvy3ycDHy+Upx9OB+VeZ+JvEt7rHhO8OoeIbldVW6XzdBgsUVLULOMF3KFwAADu3DJIA6173vT+8v50b0/vL+dAHI2f/JX9V/7Alr/AOjpqZ491280aXR4o9S/sjT7qaRbvVPs4l8jC5RfmBUbjkZIPSux3p/eX86N6f3l/OgDxjTrg3OifFWQX91fxNpwZLq5hWJph9lcbtqqowcDBxyADW7q93BpLfDjWL6QQadaq6z3DA7It9oyruPYE8Zr0ren95fzo3p/eX86APJtWu0s/Ht7rNx4outG0vVbC1ewv4IIpYZlUNlC7o20/MGA4yG702503TIvD/hdbG/n1S0ufFsVyZ7mER73bzC2FCqNu4EjAxzxxXre9P7y/nRvT+8v50AOrzHwz4x8PeHtT8XWuq6rBbXLa9PIsLZLspSMAhQCTyD09K9M3p/eX86N6f3l/OgDxjXLO4vPAHjfXru0mtYNa1C2a3gnQo5gSSKNWZTypbBOD2xXX+NXi0jxP4S1y8jK6PYSXMdxIqFlt2kjCxuwA4UYZc9t1dxvT+8v50b0/vL+dAHj+sXEOs6d8SNf0wM2k3GkxwJcbCq3MsaPuZc9QAyrnvj2q2+sab4i/wCEI0bRUf8AtHT7y3uLiAQspsoY4yJA5I+XOQoHfPGa9B8SaUniDw1qWj/alt/ttu8Hm7d+zcMZxkZ/MVftUS2tIYPMVvLjVN3TOBjNAHPfETRrzXvAupWOnoJLzEc0MZON7RyLIF/Hbj8ar23xM8NXEUSi4nXUHwp037PIblXP8BjxnIPGenviuu3p/eX86N6f3l/OgDi/DEMR+I3j2QxqXaeyRiR1X7MvH05Ncfod1FpHhX4b65es0em2T3MVxPtJWESJIiFsdFyAM9BkV7JvT+8v50b0/vL+dAHkdzq1trR+Kt5Z72tW0KFYpWQqJQILj5lzztzkA98ZHFbGqxRweHfhysSKgj1KyVNoxtBgcED8K9E3p/eX86N6f3l/OgDxeC2uLLxfr3ia5gfUdG07XZvtFkoJNuxihxdKo++VHBByQOV5zXslrdW99aRXVrMk1vMgeOSM5VlPIINSb0/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6im70/vL+dG9P7y/nQA6iiigAooooAKKKKACiiigArl/H9vDdeGYre4iSaGXU9PSSORQyupu4gQQeoIOK6iuc8b/APIBtv8AsK6d/wClkNAC/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRUUAc7/wAID4O/6FXRP/ACL/4mj/hAfB3/AEKuif8AgBF/8TXRUUAc7/wgPg7/AKFXRP8AwAi/+Jo/4QHwd/0Kuif+AEX/AMTXRUUAc7/wgPg7/oVdE/8AACL/AOJo/wCEB8Hf9Cron/gBF/8AE10VFAHO/wDCA+Dv+hV0T/wAi/8AiaP+EB8Hf9Cron/gBF/8TXRU13WNdzsFXpknAoA5/wD4QHwd/wBCron/AIARf/E0f8ID4O/6FXRP/ACL/wCJroqKAOd/4QHwd/0Kuif+AEX/AMTR/wAID4O/6FXRP/ACL/4muiooA53/AIQHwd/0Kuif+AEX/wATR/wgPg7/AKFXRP8AwAi/+JroqKAOd/4QHwd/0Kuif+AEX/xNH/CA+Dv+hV0T/wAAIv8A4muiooA53/hAfB3/AEKuif8AgBF/8TR/wgPg7/oVdE/8AIv/AImuiooA53/hAfB3/Qq6J/4ARf8AxNH/AAgPg7/oVdE/8AIv/ia6KigDnf8AhAfB3/Qq6J/4ARf/ABNH/CA+Dv8AoVdE/wDACL/4muiooA53/hAfB3/Qq6J/4ARf/E0f8ID4O/6FXRP/AAAi/wDia6KigDnf+EB8Hf8AQq6J/wCAEX/xNH/CA+Dv+hV0T/wAi/8Aia6KigDnf+EB8Hf9Cron/gBF/wDE0f8ACA+Dv+hV0T/wAi/+JroqKAOd/wCEB8Hf9Cron/gBF/8AE0f8ID4O/wChV0T/AMAIv/ia6KigDnf+EB8Hf9Cron/gBF/8TR/wgPg7/oVdE/8AACL/AOJroqKAOd/4QHwd/wBCron/AIARf/E0f8ID4O/6FXRP/ACL/wCJroqKAOd/4QHwd/0Kuif+AEX/AMTR/wAID4O/6FXRP/ACL/4muiooA53/AIQHwd/0Kuif+AEX/wATR/wgPg7/AKFXRP8AwAi/+JroqKAOd/4QHwd/0Kuif+AEX/xNH/CA+Dv+hV0T/wAAIv8A4muiooA53/hAfB3/AEKuif8AgBF/8TR/wgPg7/oVdE/8AIv/AImuiooA53/hAfB3/Qq6J/4ARf8AxNH/AAgPg7/oVdE/8AIv/ia6KigDnf8AhAfB3/Qq6J/4ARf/ABNH/CA+Dv8AoVdE/wDACL/4muiooA53/hAfB3/Qq6J/4ARf/E0f8ID4O/6FXRP/AAAi/wDia6KigDnf+EB8Hf8AQq6J/wCAEX/xNH/CA+Dv+hV0T/wAi/8Aia6KigDnf+EB8Hf9Cron/gBF/wDE0f8ACA+Dv+hV0T/wAi/+JroqKAOd/wCEB8Hf9Cron/gBF/8AE0f8ID4O/wChV0T/AMAIv/ia6KigDnf+EB8Hf9Cron/gBF/8TR/wgPg7/oVdE/8AACL/AOJroqKAOd/4QHwd/wBCron/AIARf/E0f8ID4O/6FXRP/ACL/wCJroqKAOd/4QHwd/0Kuif+AEX/AMTWJ4p8I+GtM02zvLDw/pdrcx6rp+yaC0jR1zdxA4IGRkEj8a72uc8b/wDIBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAFFFFABXOeN/8AkA23/YV07/0shro65zxv/wAgG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFcP8VJHTwrCqsQHu0Vh6ja5/mBXcV5z8V9TtxYWmlfObkyi56fKEAZevrk/oa6MKm60TmxbSoyPRqKr2N2l/p9teRqypcRLKobqAwBGfzqxXO1bQ6E7q6CiiigYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXOeN/+QDbf9hXTv/SyGujrnPG//IBtv+wrp3/pZDQB0dFFFABRRRQAUUVzNj4406+vraFLW+jtbuZ4LS/kiUQXEi5yqndu52tglQDjgmgDpqKyfEWvJ4c0uTUJbC+vIo1Z5FtI1YoqqWLHcygAAetR6r4lt9K0ez1E2d3ci8liihggCeYzSfdHzMqjr60AbVc543/5ANt/2FdO/wDSyGtfTL6a/tTNPpt3p7hivk3RjLkcc/u3YY/HPHSsfxuw/sK2GRn+1dO4z/0+Q0AdJRRRQAUUUUAFFFFABRRRQAV458U5S/iqJD0S1QD/AL6Y17HXh/xHneXxteIxGIUjROO2wN/NjXdl6vV+RwZi7Ufmeq+D2ZvCGlFiSfs6jn0HArbrD8G/8ifpf/XAVuVyVfjl6nXS/hx9EFFFFQaBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/AOQDbf8AYV07/wBLIa6Ouc8b/wDIBtv+wrp3/pZDQB0dFFFABRRRQAjDcpGSMjGRXmOlWmoy6N4U8LPpV9DdaPeQvd3EkDLAI4N2GWT7r7/lwFJPzHOMV6fRQByXirV/tfw81CSPTNW87ULSe3hthYyPMHZHUb0QEqCR1PHI9aiN1pOo+C7AatoWpXdrG0cT20ulzM6uqfeMRXcVB4yARk12VFAHJeBrSa1/tcxWdzY6PLdBtOtblSrxpsUOQh5RS4YhTjHPAzVbxp4d0SKC31aPR7BNSOr6exu1t0EpJu4gTvxnkEjrXbVznjf/AJANt/2FdO/9LIaAOjooooAKKKKACiiigAooooAK8L+IX/I86j/2y/8ARSV7pXz/AOL7t7zxfqssgUMtw0Q2+ifIP0UV6GXL9435HnZk/wB2l5nsng3/AJE/S/8ArgK3Kw/Bv/In6X/1wFblcdX45erO2j/Dj6IKKKKzNAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuc8b/8AIBtv+wrp3/pZDXR1znjf/kA23/YV07/0shoA6OiiigAooooAKKKKACiiigArnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIaAOjooooAKKKKACiiigAooooAK+d/En/ACNGr/8AX7N/6Ga+h2OFJ9BXzPI7SyNI5y7Esx9Sa9PLVrJnl5m9Ir1PffBv/In6X/1wFblYfg7/AJE/S/8ArgK3K4Kvxy9WehR/hx9EFFFFZmgUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXOeN/+QDbf9hXTv/SyGujrnPG//IBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAFFFFABXOeN/+QDbf9hXTv8A0shro65zxv8A8gG2/wCwrp3/AKWQ0AdHRRRQAUUUUAFFFFABRRRQA1/9W30NfM1fS88kcNvJLK6pGilmdjgKAOSTXzRXqZb9r5fqeTmn2fn+h7/4N/5E/S/+uArcrD8G/wDIn6X/ANcBW5Xn1fjl6s9Kj/Dj6IKKKKzNAooooAKKKKAGvIkZUO6qXO1QTjJ9BTq8Um1G8uPiLFaT3MskEWt5jR2yE/egcZ6DAHFe11vWoeytd7nPQrqrzWWwUUUVgdAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/9LIa6Ouc8b/8gG2/7Cunf+lkNAHR0UUUAFFFFABRSMSFJAyccD1rybR7WC28P+DPEsGf7c1K/hW9udx33Hmh/NR/ULyQD93YMYxQB61RXL/ELSrHU/A2sNe26Tm2sbiaLdnCuImw2PUVm6+oudB8H6bMT9hv7y3gu1BIEiCB3CHHZmRQR36d6AO6rnPG/wDyAbb/ALCunf8ApZDVPwlbxaZ4l8TaPYqItNtZLeSCBfuQtJHl1UfwjhWx0G4+tV/Gc+umG3ik07T10z+19PxcLeuZsfa4sfu/KA64H3/fnpQB29FFFABRRRQAUUUUAFFFFAHFfFG5mg8JokUhVZrpI5AP4l2s2PzUflXjNev/ABXdB4atIyw3teKQueSAj5P6j868gr28Av3J4OYP998j3/wd/wAifpf/AFwFblYfg3/kT9L/AOuArcryKvxy9We1R/hx9EFFFY+t6+mj3OmW/lrLLfXSQBS+0qpOC2O+CR+dTGLk7IqUlFXZsUUUVJQUUUUAeESyxwfEt5pXVI49YLOzHAUCbJJr3evnjxF/yNWrf9fs3/oZr6Hr0MctIPy/yPNy96zXn/mFFFFeeekFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXOeN/8AkA23/YV07/0shro65zxv/wAgG2/7Cunf+lkNAHR0UUUAFFFFABWJaeEdCsdV/tO3sQlyHeRP3rlI2f7zJGTsQnJyVAJyfWtuigDDufCOi3egxaHLbzjTo84hju5o8g5yGZXDMDuPBJFOTwnoyaGdGNtLLYlw4Se6llZWGMFXZiy4wMYIx2raooAoaToun6Havb6fAYkkcySM0jSPI56szsSzHgcknoKy/G//ACAbb/sK6d/6WQ10dc543/5ANt/2FdO/9LIaAOjooooAKKKKACiiigAooooA8y+L3/MG/wC2/wD7TrzGvTvi9/zBv+2//tOvMa97BfwI/P8AM+ex3+8S+X5I9/8ABv8AyJ+l/wDXAVuVh+Dv+RP0v/rgK3K8Wr8cvVnu0f4cfRFe/uTZ6dc3QXcYYnkCk4zgE4/SvAdT8Q6hqutLqs0oS4RlaMR52xlcY2gk45GfrXu+uf8AIA1L/r1l/wDQDXznXoZdFNSbR5uZTknGKeh9MxSxzRJLE6vG6hkdTkMD0IPcU6szw3/yK2kf9eUP/oArTrzJKzaPUi7xTCiiikUfPHiL/katW/6/Zv8A0M19D188eIv+Rp1b/r9m/wDQzX0PXpY/4Yf12PMy/wCOp6/5hRRRXmnphRRRQAUUUUAFFFFABRRRQAUUUjMFGWIA9SaAFooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuc8b/wDIBtv+wrp3/pZDXR1znjf/AJANt/2FdO/9LIaAOjooooAKKKKACiiigAooooAK5zxv/wAgG2/7Cunf+lkNdHXOeN/+QDbf9hXTv/SyGgDo6KKKACiiigAooooAKKKKAPMvi9/zBv8Atv8A+068xr0v4uyAy6RHg5VZWz9dn+FeaV72C/gR/rqfPY7+PL5fke/+Df8AkT9L/wCuArcrD8Hf8ifpf/XAVuV4tX45erPdo/w4+iM3xExXwxqzA4Is5iD/AMANfO1fRHiT/kVtX/68pv8A0A18716WXfDI8vM/jifQPhG5W68I6VIqlQLZI8H1UbT/ACrarnfAn/IlaZ/uN/6G1dFXm1VapJebPUou9OL8kFFFFZmh87+IuPFOrf8AX7N/6Ga+iK+d/Ef/ACNGrf8AX7N/6Ga+iK9LH/DD+ux5mX/HU/ruFFFFeaemFFRzXENv5fnTRx+Y4jTewG5j0UZ6n2qSgAooooAKKKKACiiigArk/iQSPBd1z1kj/wDQhXWVxHxTmePwpEiNhZbtEcY6jax/mBW2GV6sfUwxLtRl6Gv4N1m617w8l7eBBMZHU7BgYB44roK82+E+qK1te6S5+dG+0R8nJBwrfkQv/fVek08TDkqtBhqntKUZBRRRWBuFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRXIeJfGo8O+IrGwkgU20kYlnlOSVUkj5QO425q6dOVR2iRUqRprmlsdfRVewvYdSsILy3YmGZA6EjBwasVLVnZlJpq6Cio2nhSZIXlRZXyUQsAzY64HepKQwooooAKKKKACuc8b/8AIBtv+wrp3/pZDXR1znjf/kA23/YV07/0shoA6OiiigAooooAKKKKACiiigArnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIaAOjooooAKKKKACiiigAooooA8r+Lf8Ax+6X/wBc5P5ivOK9H+Lf/H7pf/XOT+Yrzivfwf8AAifO43+PL+uh7/4O/wCRP0v/AK4CtysPwb/yJ+l/9cBW5XiVfjl6s96j/Dj6IzPEn/Irav8A9eU3/oBr53r6I8Sf8itq/wD15Tf+gGvnevTy74JHl5n8cT2L4VztJ4XnjaQt5V2wVSc7VKqePQZ3H867mvO/hL/yCtR/67r/AOg16JXBi1atI78I70IhRRRXOdJ87+I/+Rn1b/r9m/8AQzX0RXzv4j/5GfVv+v2b/wBDNfRFelj/AIYf12PMy/46n9dwooorzT0zzX4sagY10yyjlAfc07AH5lxgKfbq/wCVekRussayIcqwDA+oNeQfFf8A5Gm2/wCvJP8A0N69W0qVJ9IspozlJII2U46gqCK7K8UqFP5nFQm3iKi9C3RRRXGdoUUUUAFFFFABXC/Ff/kVrb/r9T/0B67quF+K/wDyK1t/1+p/6A9b4X+NE58X/Bkcp8LriG38UyiaaOPzbVo03sBuYumAPU8HivZa+ctE/wCQ9p3/AF9Rf+hCvo2unMI2qKXc5stnem49gooorzz0QooooAKKKKACiiigAooooAKKK5H4h61caNoULWV2Le6knULgjcVAJOAe2dufr71dODqSUV1IqVFTg5voddRXAXHxT08XtlHbQO1tIVNzLICDED1AAzkjr/KsHxx46TV7eGx0mV1tWXfO5BVmOThPp398j0reGDqykk1a5zzxtKMW072PXaK5X4f622s+Go1mkL3VqfJkLHlh1U/lxn1BrqqwqQcJOL6HRTmqkVJdQrwXxtq66z4pup4pC9vHiGEkD7q9cexbcfxr3K/ma20+5nTG+OJnXPTIBNfNpJYknqeTXfl0E3KR52ZzajGBr6J4l1LQJmks5QcxGMJLllUEg5AzjOR+prXg+I+v/bLZ7m6DQJMHkSONVLpnlc49MiuQor0ZUacndo8yNepFWjJnWxeILnxH8QdLvpkWIC5ijjRf4U35AJ7nk817dXz14X/5GvSf+vuL/wBCFfQteZj4qMopbWPWy6TlGUpb3CiiivPPRCiiigArnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIaAOjooooAKKKKAAkAEk4ArhNO8X6zPFousXdvYjRdZuVggijVxPCHz5TsxO1t2BkADG4cnFd2QCCCMg1wuneENZgi0XR7q4sToujXKzwSxs5nnCbvKRlI2rtyMkE52jgZoA2vEGr6hbanpej6StsL7UDK/nXSs0cUcYBZiqkFjlkAGR1znisG88ZazH4dMsdvbJqdtq39m3jLay3MagcmRY0IcgqVbGcjOOa1L/SNfun0XWEOm/23p/nJJDvkW3ljkwGUNtLKflQ5weQRjml03Sde0XSriS2Gm3Wq31895drLK8UI3DG1GCsflCoMlecHpmgC/4Y1RtX0k3D6jZ37iVkZ7W2e3CEY+Ro3ZmVh3yR1HFYfjTxFoksFvpMesWD6kNX09TaLcIZQRdxEjZnPABPStjw3o17p02q3+pSW5vdTuRPJHbZ8qILGsaqCcFjhASxAyT0FQ+N1H9hWxwM/wBq6dzj/p8hoA6SiiigAooooAKKKKACiiigDx74r/8AI023/Xkn/ob1wtd18V/+Rptv+vJP/Q3rha+hwv8ABifOYv8AjSPf/B3/ACJ+l/8AXAVuVz/giVZvBmmOmcCIpz6qxB/UV0FeFV/iS9We/R/hx9EZniT/AJFbV/8Arym/9ANfO9fRHiT/AJFbV/8Arym/9ANfO9enl3wSPLzP44nqXwjm3WuqwbfuPG+c9chh/wCy/rXpG5ckbhkdRnpXmfwh/wCYz/2w/wDalZnjW4lh1DxNHG5VZrqySQf3l8p2x+ag/hWFaj7XEyje236G9Gv7LDRk1ff9f8j2CiorUlrSEk5JjUkn6VLXnnoo+ctauIrvXdQuYW3RTXMkiNjGVLEg19G18yV9N16eYqygvX9Dystd3N+n6hRRRXmHqnj3xX/5Gm2/68k/9DevRfBt2L3wfpcqqVCwCLB/2Pkz+O3NedfFb/kabb/ryT/0N67j4dureB7AKwJUyBgD0PmMf5EV6Ndf7LBnm4d/7XNf10Opooorzj0goqnq129ho99eRqrPb28kqhuhKqSM/lVLwpqtxrfhqz1C6WNZpd+4RgheHZe5PYVXI+Xn6bEc65+TrubNFFFSWFcL8V/+RWtv+v1P/QHruq4X4r/8itbf9fqf+gPW+F/jROfF/wAGR5XozBdc09mICi5jJJ7fMK9j+Hup3mq+G3uL6dp5RcOu9uuMA4/U14fXsvwt/wCRTk/6+n/9BWvSx8V7O55eXSfteX1O2ooorxj3AooooAKKKKACiiigAoorn18RNJ45fQY1jaGK08yRsEMsmQcZ6EbSPz61UYOV7dCZTUbX66Gvf31tptlLd3cqxQxrksxA/Ae57CvC/FviJ/EmsNcqZVtUAEEUgGU+UbunqQTXVfE/xDa3kdtpNpJFOEfz5ZI3DBWG5QvHfk5/CvN69bA4dRj7SW7PHx+Icpezjsgooor0DzT0f4SXUaX2qWhz5ssccq8cYUkH/wBDFeqV498KP+Rpuf8Aryf/ANDSvYa8PHK1Znv4B3oI4/4k6ld6b4YRrOYxNPOIJCACSjI+Rz06Dkc14pXsPxX/AORWtv8Ar9T/ANAevHq78Al7K55+YN+2sFFFFdpwGt4X/wCRr0n/AK+4v/QhX0LXz14X/wCRr0n/AK+4v/QhX0LXkZj8cfQ9nLPgl6hRRRXnHphRRRQAVznjf/kA23/YV07/ANLIa6Ouc8b/APIBtv8AsK6d/wClkNAHR0UUUAFFFFABRRRQAUUUUAFc543/AOQDbf8AYV07/wBLIa6Ouc8b/wDIBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAFFFFAHj3xX/5Gm2/68k/9DeuFruviv/yNNt/15J/6G9cLX0OF/gxPnMX/ABpHunw9/wCRG07/ALa/+jXrp65j4e/8iNp3/bX/ANGvXT14df8Aiy9We9h/4UfRfkZniT/kVtX/AOvKb/0A18719EeJP+RW1f8A68pv/QDXzvXpZd8EjzMz+OJ6T8I541udVty3710idV9VUsCf/Hl/Osfx5Ky+IdbhGNrXFsx9ciFgP/QjUfw61W30vxSouA5+1x/ZkKjOHZ1xn24qD4gk/wDCcakM8Zj/APRa1oof7U33X+RlKd8JFdn/AJnsmgSPN4c0uWRizvaRMzHuSgya0azPDf8AyK2kf9eUP/oArTrxp/Ez2qfwI+ZK+m6+ZK+m69LMvs/P9DzMr+38v1CiiivLPWPE/iWSfGUuT0hjx+Vd18Mf+RPX/ru/9K4X4l/8jlN/1xj/AJV3Xwx/5E9f+u7/ANK9Sv8A7pH5Hk4f/fJfM7KiiivLPWMzxJ/yK2r/APXlN/6AawfhrqFtc+E4bOJ8z2hYTLjpudmX9P5VveJP+RW1f/rym/8AQDXDfCL7usfWH/2euuEU8NJ9mjknJrEwXdP/ADPTKKKK5DrCuF+K/wDyK1t/1+p/6A9d1XC/Ff8A5Fa2/wCv1P8A0B63wv8AGic+L/gyPHq9l+Fv/Ipyf9fT/wDoK141XsfwrYN4UmA/hu3B/wC+UP8AWvUx/wDB+Z5WX/xvkdxRRRXiHuhRRRQAUUUUAFFFFABXiHju5ntPH2oy208kMmIxujcqceUncV7fXhfxC/5HnUf+2X/opK78v/iv0/yPPzLSkvX9GcxRRRXsnhhRRRQB3Xwo/wCRpuf+vJ//AENK9hrxn4XXMUHi1o5CQ09s8cYx1YFW/kpr2avEx/8AG+R72Xv9z8z5/wDE97dS6/qltJczPAl9MyxM5Kg7iMgfTisStPxJ/wAjTq//AF+zf+hmsyvYpq0EeLUbc3cKKKKszNbwv/yNek/9fcX/AKEK+ha+evC//I16T/19xf8AoQr6FryMx+OPoezlnwS9Qooorzj0wooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigAooooAKKKKACiiigArnPG//ACAbb/sK6d/6WQ10dc543/5ANt/2FdO/9LIaAOjooooAKxYdeMvjC40IQDbDaicy7uc5Axj6MK2q4XS/+Sxa3/15L/KGtaUVJSv0RjWm4uNur/zO6ooorI2CiiuU8W+NofDE0Vstsbi5liMgG7AXnAz65IP5VcKcqkuWK1IqVI0480nocT8V/wDkabb/AK8k/wDQ3rha7D4iXn9oanpV6UCG40uGUoDnbuLnGfxrj697DK1KKZ89inetJo90+Hv/ACI2nf8AbX/0a9dPXMfD3/kRtO/7a/8Ao166evDr/wAWXqz38P8Awo+i/IzPEn/Irav/ANeU3/oBr53r6I8Sf8itq/8A15Tf+gGvnevSy74JHmZn8cS1pl2LDVrO8I3C3nSXHrtYH+laXi/UbfVvFF3fWjFoJhGVJGD/AKtQQfxBrDoJzXfyLm5+p53O+Tk6bnvXga4Nz4L0yRs5EZj5OfusV/pXQ1zHw9/5EbTv+2v/AKMaunr56srVZerPpKDvSi/JHzJX03XzJX03XfmX2fn+h52V/b+X6hRRRXlnrHifxL/5HKb/AK4x/wAq7r4Y/wDIoD/r4f8ApXC/Ev8A5HKb/rjH/Ku5+GH/ACKA/wCvh/6V6lf/AHSPyPJw/wDvkvmdnRRRXlnrGV4mZU8K6sWIA+xyjJ9ShArh/hEfl1ge8P8A7PXUfEB2j8D6kUYgkRjI9DIoP6GuW+ERG7WFzyRCf/Q67qa/2Sb81+hwVX/tcF5P9T06iiiuE7wrhfiv/wAitbf9fqf+gPXdVwvxX/5Fa2/6/U/9Aet8L/Gic+L/AIMjx6vYfhR/yK1z/wBfr/8AoCV49XsHwob/AIpm6X0vGP8A44n+Ferj/wCCeTl/8Y7uiiivDPeCiiigDBs/EElz4y1HQ2gVY7WFJFlDcsSFJyP+BD8vet6vNJdaj0X4sagzwmX7V5FsAGxt3LH83vjFel1vWp8vK0t0jChU5+ZN6pv8wooorA3CvBPHE5uPGmpuVAxIEwP9lQv9K9Z8O+IZ9Z1bXLKaCNBp9z5UbIT8y5Yc57/J+vtXkPjD/kb9V/6+Gr0sDBwqtPex5eYTU6MXHa/+ZiUUUV6x44UUUUAdP8Pf+R507/tr/wCinr3SvnXQdWfQtbtdSjjWRoWOUbuCCp/HBOK9P8N/EeHV9V+wXtutsZnIgkD/AC4/hVs/xdsjqe1eXjqE5y54rRI9bL69OEeST1bPMvEn/I06v/1+zf8AoZrMrT8Sf8jTq/8A1+zf+hmsyvRp/AjzKnxsKKKKsg1PDRI8U6QR/wA/kI/8fFfQ9fO/hv8A5GnSP+v2H/0MV9EV5OY/FE9nLPgl6hRRRXmnphRXNeCfEF14j0e4vLtIkdblo1WMEALtUjr3+aulqpwcJOL3REJqcVJbMK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIaks6OiiigAooooAKKKpQ6xplxqMunQ6lZyX0QzJbJOpkQe6g5FAF2iq19qFlplq11qF5b2lupAaW4kEaD6knFI2p2Cad/aLX1stjt3/AGkyqItvruzjHvQBarnPG/8AyAbb/sK6d/6WQ1uWd9aajapdWN1BdW7/AHJYJA6N9CODWH43/wCQDbf9hXTv/SyGgDo6KKKACuF0s/8AF4ta/wCvJf5Q13VcHpoz8ZNY9rNf/QYq6KG0/T9Uc+I3h/i/RneUUUVznQFeB+M9Yi1vxPc3VvzAuIo2/vBeM/icke2K98r5kr0suinKUux5eZzajGPcmuLqa6aNp5C5jjWJSeyqMAfgKhoor1rWPIbue6fD3/kRtO/7a/8Ao166euY+Hv8AyI2nf9tf/Rr109fOV/4svVn02H/hR9F+RmeJP+RW1f8A68pv/QDXzvX0R4k/5FbV/wDrym/9ANfO9ell3wSPMzP44hRRRXonlntXwzu47jwhHCm7fbyuj59SdwI9sN+hrsa8k+GGvpZ38mjzKdt22+JxjCsFOc/UAY9/rXqd9eQ6fYz3lw4SGFC7MRnArwcXTcazXc+iwlRSoJ9j5sr6br5kr6brqzL7Pz/Q48r+38v1CiiivLPWPE/iX/yOU3/XGP8AlXc/DD/kUP8At4f+lcH8SHLeNboH+FIwP++Af613fwv/AORR/wC3l/5CvVxH+6R+R5GH/wB8l8ztKKKK8o9c5j4hf8iNqP8A2y/9GpXCfCy9aDxJNa5/d3EB4/2lIIP5bvzru/iF/wAiNqP/AGy/9GpXjGk6pc6LqcOoWmzzoSdocZByCCCPoTXqYSHPhpR7v9EeTjKns8TGXZL82fR1FeVab8StX1HXtNtWt7WKGa4SKQIpJYMQvUnjrmvVa4KtGdJpS6noUa8Kybh0CuF+K/8AyK1t/wBfqf8AoD13VcL8V/8AkVrb/r9T/wBAeqwv8aJOL/gyPHq9V+El0X0/U7TaAIpUk3Z67gRj/wAc/WvKq7v4XavFZa1Pp0i/8fwXY+eAyBjg/UE/l717GMi5UZWPGwUlGvG57BRRRXgH0IUUUUAeNeJ/+Ss/9vVt/wCgx17LXjXif/krH/b1bf8AoMdey124v4KfocOD+Op6hRRRXEdxwfgVv+Kq8XL3N5n/AMfk/wAa878Yf8jfqv8A18NXZeENTjs/iJr9pM8ccdzPMQznBLrIcAc+hb8hXBa7creeINRuEk8xJbmRkbOcqWOPwxivaoRaryfkjw8RJOhGPmzPoooruPPCiiigApVYo4ZThgcg0lFAy1qV19u1W8u+P387y8Dj5mJ/rVWiiklZWBu7uFFFFMRp+G/+Rp0j/r9h/wDQxX0RXzv4b/5GnSP+v2H/ANDFfRFeTmPxRPZyz4JeoUUUV5p6Z5z8MNVsLXRHsp7uKO5nvW8qJmwW+RP8MV6NXzRFNJDJHJE5R423ow6g8c/pX0hZSvNYW8sn33iVm47kZNd+Oo8kufuefl9bnhyNbE9c543/AOQDbf8AYV07/wBLIa6Ouc8b/wDIBtv+wrp3/pZDXAegdHRRRQAUUUUAI2dp243Y4zXkukfY/wDhE/AX2fyv7Z/tOPztuPO8zD/at3fpv3Z9vavW6pQ6PpdvqMuow6bZx30oxJcpAokce7AZNAHM67cWV34y8ITyTQTaa/2vy5NwaJrjYoTB6Z2+dj8a5eD7Nvs93lf8I/8A8JhNs6eVjyn247bftGcdt2K9Nk0XSptNGmy6ZZPYDpatApiHOfuYx156VI2mWD6d/ZzWNs1jt2fZjEpi2+m3GMe1AHN+E/J/4Snxd9i2fYPtkOPK+553kr5uMcZ+7n3z3qt4z0u8SG3vW13UHtzq+nn7CyQeSM3cQxkR7+Ov3v04rsLOxtNOtUtbG1gtbdPuRQRhEX6AcCsPxv8A8gG2/wCwrp3/AKWQ0AdHRRRQBgeI9Av9aaA2eu3OmrGGDLCDh8464Zf6159oXh6+g8fX+lHV5obiOJne6g+9IDtbnPruBPXkd+tewVwtnC8Xxi1B2AxLZB1+mEX+amuzD1pKMo9LHFiKMXOMurZpf8Irqf8A0Nep/wDjtH/CK6n/ANDXqf8A47XUUVh7af8ASRv7CH9NnL/8Irqf/Q16n/47XhlfTdfMlell83Lmv5fqeZmNOMOW3n+gUUUV6J5h6t4O8P3174VsriHxBfWkb78QxY2rh2HH1xn8a3f+EV1P/oa9T/8AHaX4e/8AIjad/wBtf/Rr109eDWrTVSS830R9FQowdKLfZdWcRrXhrUYNB1GZ/E2ozJHays0b4w4Ck4PseleN19EeJP8AkVtX/wCvKb/0A187134CblF3POzGChKNgooorvPOLemabc6vqMNhZoHnmJCgsAOASTk+gBNd9rfgjWLLSbq6n8ST3NvBaktG2/5sEHZgsRt4zn2HFW/hPYBbK9v5LcBncRxTFeSAPmAPpnH5e1egXtpDf2U1pcKWhmQo4Bxwa8vE4uUavKtkevhcHGVHmluz5rr3FPC+ptGrHxXqfIB/hrw6vpmL/VJ/uirzCbjy28/0M8uhGfNfy/U5n/hFdT/6GvU//HaP+EV1P/oa9T/8drqKK8320/6SPT9hD+mzwHxjay2Xiq8tp7uW7kTZmaX7zZRTz+eK6vwBo17qegTSwa5eWSJcsnlQ42n5VOefr+lYPxFiePxvfMy4Eixsp9RsUfzBruPhT/yK1x/1+P8A+gJXqV5tYaMl5HlUIJ4qUX5mh/wiuqf9DXqX5LR/wi2q/wDQ16l/3ytdTRXl+2n/AEker7CH9N/5nnHjPw9qNp4XurmbxFe3UURQtBIoCvlgOcHsSD36V5XXunxC/wCRG1H/ALZf+jUrybwjpMOt+J7SxuVka3Ys0mw44VSeT2BIA/GvUwdX9y5S6f5Hk42l+/jCPVL8ylosTT69p0KStC8l1GqyJ1QlgMj3HWvZf+EV1P8A6GvU/wDx2oo/h3odvqVle2qzwPayLJsEhYOVOQTnJ6gdK62uXE4pTacPyOzC4R001U/Bs5f/AIRXU/8Aoa9T/wDHa5T4gaLeaboME1xrd5fI10qiOfGAdrHPHfjH416nXC/Ff/kVrb/r9T/0B6jDVZOrFP8AJF4mjBUZNfmzx6tHRNEvNf1JbGyC+YVLFnJCoo7kgHjoPxFZ1ekfCOBGu9VuCBvjSNAcc4YsTz/wEV61eo6dNyR4+HpqpVUGXYPh5rkeniL/AISmeJxHt8iPeYwOy53Dj/gP4Vd/4QbXPLUf8JrqIYdfv4/9GV3NFeK8VVfX8Ee4sJSXT8X/AJnD/wDCDa2XyfGupbc5wN/T/v5T28FayZldfGepBQc7Tu9f9/H6V2tFL6zU7/gv8h/VaXb8X/meB+J7W503xjcwPfzXNxHJGwuX4ckqrA9e2QPw7V6p/wAIrqf/AENep/8Ajtee/ES0ltvHEkz42XKxSJj0ACHPvlT+le1V1YmrJU6bXVf5HJhaUXUqJ9H3fmcv/wAIrqf/AENep/8AjtH/AAiup/8AQ16n/wCO11FFcftp/wBJHb7CH9Nnz8+i3+qeK7zTbTfdXIuJQ0jnGQGOXY9v/r1jOjRuyOpV1OGVhgg+hr1PwPaO/j/xLeAjZFJLEw75eUkf+gGvPvEn/I0av/1+zf8AoZr2qVVym4dkjw6tFRpqfdszKKKK6TlCiiigAooq1plhLqmp21jCCZJ5AgIXOM9Tj0AyT7Ck2krsaTbsirRVvVLQafq15ZK5dbed4gxGN21iM/pVShO6ugas7MKKKKYi9osTXGu6fCkrQvJcxosidUJYDI9xXsv/AAiup/8AQ16n/wCO14/4b/5GjSP+v2H/ANDFfRFeZj6koyVj1supxlGVzl/+EV1P/oa9T/8AHaZJ4T1R42UeLNTBIIB44/LFdXRXn+3n/SR6HsIf03/mfMlfSOmTJcaVZzxnKSQI6nGMgqCK+fLPR9S1FUezsZ50eTyg6ISu7g4J6DgjrX0HplodP0qzsi+828CRbsY3bVAz+ld+YtNRV9Tz8sUk5O2harnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGvLPWOjooooAKKKKACiiigAooooAK5zxv/wAgG2/7Cunf+lkNdHXOeN/+QDbf9hXTv/SyGgDo6KKKACuShA/4Wvce2lD/ANGLXW1yUH/JVrn/ALBI/wDRi1tR+16GNb7PqjraKKKxNgryzxB8MZEu7d9DLvBK+2VJXBMOT94HjKgduvHfPHqdFa0a06TvExrUIVlaZ856vo19od0ltfxCKVk3hdwPy7iM8f7pqhXdfFf/AJGm2/68k/8AQ3rha96jNzpqT6nz9eCp1HFdD3T4e/8AIjad/wBtf/Rr109cx8Pf+RG07/tr/wCjXrp68Gv/ABZerPocP/Cj6L8jM8Sf8itq/wD15Tf+gGvnevojxJ/yK2r/APXlN/6Aa+d69LLvgkeZmfxxCinwwyXE8cEKF5ZGCIo6sScAVoa/pp0jV3sGULJFFF5gDZ+cxqW5+pNehzK/Kebyvl5uh7D8Pf8AkRtO/wC2v/o166esfwpbxW3hPSY4V2qbWNyP9phuY/iSTWxXzlZ3qSfmz6airU4ryR8yV9Mp/q1+gr5mr6aAwAK9DMvs/P8AQ83K/t/L9RaKKK8s9Y8Y+KH/ACN3/bsn8zXXfCn/AJFa4/6/H/8AQErkvij/AMjav/Xsn82rrfhT/wAitcf9fj/+gJXq1v8AdI/I8mj/AL5L5nc0UUV5R6xzHxC/5EbUf+2X/o1K5D4S2aSalqN6Sd8MSRKMcfOSSfr8n6mux8fpv8EakM44jP5SKa534S2lxFa6ncyQusE5iETkYD7d+ceuMiu+m7YSXr/kefVjfGQ9P8z0eiiiuA9AK4X4r/8AIrW3/X6n/oD13VcL8V/+RWtv+v1P/QHrfC/xonPi/wCDI8er1v4TW0a6FfXQH7yS58tj7KoI/wDQzXklew/Cj/kVrn/r9f8A9ASvVx38Fnk5ev36O6ooorwz3gooooA4f4pKv/CPWTYG4XyDOOcbHruK4f4pf8i3Z/8AX8n/AKA9dxW8/wCDD5/oc9P+NP5fqFFFFYHQcL4F/wCRp8X/APX6P/Q5a868ZWhsvGGqRFw5acy5Ax9/58fhuxXr3h7w7No2sa5eSzxyJqFwJY1UHKjLHn3+fH4e9eWfEL/kedR/7Zf+ikr1sLNSxErbWX6Hj4qDjh48293+pzFFatj4evtQ0a+1WKIm2tByRyWORkY9lO4n2rKr0VJO6XQ81xaSb6hRRRTJNXw3pg1jxHY2DjMcsn7wZxlANzc/QGvYPDvgfTPDl293C809wQVV5SPkU9gAOvv/ACrzH4e/8jzp3/bX/wBFPXuleVmFWanyJ6NHsZdShKHO1qmfO/iT/kadX/6/Zv8A0M1mVq+JlZPFWrBlKn7ZKcEY4LkisqvSp/AjyqnxsKKKKsg0/Df/ACNOkf8AX7D/AOhivoivnfw3/wAjTpH/AF+w/wDoYr6Irycx+KJ7OWfBL1CiiivNPTOF+FH/ACK1z/1+v/6Ald1XD/C1Qnhu8UdBfOP/ABxK7it8V/Gkc+E/gxCuc8b/APIBtv8AsK6d/wClkNdHXOeN/wDkA23/AGFdO/8ASyGsDoOjooooAKKKKACiiigAooooAK5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIaAOjooooAKrCwtRqJ1AQqLsxeSZe5TOcfnVmimm0JpPcKKKKQwooooA8j+LAH9vWRwMm1xn/gTVwFegfFn/AJDtj/17f+zGvP6+gwn8GJ85jP48j3fwAoXwRpoH91z/AORGrpK5vwF/yJOm/wC6/wD6G1dJXiVv4svVnvUP4UfRGZ4k/wCRW1f/AK8pv/QDXzvX0R4k/wCRW1f/AK8pv/QDXzvXpZd8EjzMz+OJ0fgTTW1PxfYrhvLt2+0uVI4Ccjr23bR+NSfEL/kedR/7Zf8AopK2vhNaO+s394CvlxW4iYd8uwI/D5D+lVfiRot9Br93q8kYFlO8Ucb7hkt5eMY/4Af0rRVF9aab6GLpv6oml1/zPUfDf/IraR/15Q/+gCtOszw3/wAitpH/AF5Q/wDoArTrxp/Ez26fwL0PnLWUWPXdQRFCqtzIAoGABuNfRtfOeuf8jBqX/X1L/wChmvoyvQzD4YfP9Dzsu+Kp8v1CiiivNPUPGvil/wAjYn/Xqn82rq/hT/yK1z/1+v8A+gJXK/FP/kbIv+vRP/QmrqvhT/yK1z/1+v8A+gJXqVv9zj8jyaP++y+Z3VFFFeWesYvi62W68I6rGzFQLZpMj1Qbh/KqXw9/5EbTv+2v/o161PEn/Irav/15Tf8AoBrL+Hv/ACI2nf8AbX/0a9dCf+zv1/RnM1/tCf8Ad/VHT0UUVznSFcL8V/8AkVrb/r9T/wBAeu6rhfiv/wAitbf9fqf+gPW+F/jROfF/wZHj1ew/Cj/kVrn/AK/X/wDQEryBEaR1RFLOxAVVGST6CvefB3h9/DeiG0kn82SWUzMdu3aSAMf+O16eYSSpcvVnl5dBurzLZHQUUUV4p7gUUUUAZmt6FZ+ILOK1vfM8uOZZhsbByMjH0wSK06KKbk2rCUUncKKKKQwryXWdI/tj4uyWc0TNBKY2fqAUES5OR9MfWvWqyo9Djj8US64JmLyWotzFjgfMDnP4Dit6FX2bk/I58RS9qorzuYPw3thZ6Nqdru3iHUpY92MbsKgzivKvEGlNomu3enNIshhYYZRgEEBh+hFfQNrZW1kJRbQpEJZDLJtGNznqT715f8TfDUsN2/iCKQyRTsiTR7TmMhQoOfQ7cc9yOueOvC1067v9r8zjxeHaoRt9n8jzuiinRxvLIscaM7uQqqoyST0AFeseQdL8Pf8AkedO/wC2v/op690rzz4e+ErvS501i62p59qyCB1IdGL9fb5VHv8ANXodeHjqkZ1fd6HvYCnKFL3urueYfFfSgHtNYEvJAtjFt9Nzbs/pXmde6+N/Dt14k0iK2tJYklim83EhIDDaRjI+teHNBKtslwUIid2RW9WUKSP/AB5fzrvwNRSpJX1R52PpuNVytoyOiiiu04TT8N/8jTpH/X7D/wChivoivnfw3/yNOkf9fsP/AKGK+iK8nMfiiezlnwS9QooorzT0ynp2l2WkwSQ2MCwxySNKyqTyx6nn6CrlFFNtt3YkklZBXOeN/wDkA23/AGFdO/8ASyGujrnPG/8AyAbb/sK6d/6WQ0hnR0UUUAFFFUdT1rStFjjk1XU7KwSQ7Ua6nWIMfQFiM0AXqK5PWfH2iW/h7VL3R9Y0vUr20tXmS3gu0kJI4BIU527iuT702wuda0bxXYaRqmrHVItRs5pVdoEjMMsRTcF2AZQiTjOSNvU5oA66iuY1281K68T6d4f02/bT/NtZry4uUiR5AiMiKqhwVGWkySQeF96wZtZ8UX/hy0Wye7e+tNZlsdQm06GEySQxiUb1WbKLkiIn3JAoA9FrnPG//IBtv+wrp3/pZDU/hTUU1HR2YXt7dTQzPDOb6GOKaOQdUdUVVBAI6DkEHJrE8Z6/ZyQ2+lrDqAuBq+nje2nziHi7iP8ArSmz/wAe56daAO3ooooAKKKKACiiigAooooA8v8Ai7Ggl0iQKN7CZS3cgbMD9T+deaV6f8XR8ujn3m/9krzCvewX8CPz/M+ex38eXy/I938Bf8iRpv8Auv8A+htXSVzfgH/kSNN/3X/9Daukrxq38SXqz3KH8KPojM8Sf8itq/8A15Tf+gGvnevojxJ/yK2r/wDXlN/6Aa+d69LLvgkeZmfxxPWPhLaomkahdgtvlnERHbCrkf8AoZ/SrPxX/wCRWtv+v1P/AEB6b8KP+Rau/wDr8b/0BKm+KUEkvhNHQZWG6R356Day/wA2Fc7f+2a9zdL/AGLTsdRolvLaaDp1tOu2WG1ijdc5wwUAj86vUUVwt3dz0ErKx86a7/yMOpf9fUv/AKGa+i6+dNd/5GHU/wDr7l/9DNfRdejj/hh/XY8zLviqfL9QooorzT1Dxz4qf8jXD/16J/6E9dT8Kf8AkVrn/r9f/wBASuX+Kv8AyNUH/Xmn/ob11Hwp/wCRWuf+v1//AEBK9Sr/ALnH5Hk0f99l8zuqKKK8s9Yoa3CbnQNRgUgGS1lQE9sqRWP8Pf8AkRtO/wC2v/o166cgEEEZB6iobW1t7K3S3tYY4YUztjjUKoycnge5rRT/AHbh53M3D94p+VvyJqKKKzNArhfiv/yK1t/1+p/6A9d1XC/Ff/kVrb/r9T/0B63wv8aJz4v+DI838KWtxdeKdN+zwvL5VzFLJsXO1A65Y+gFfQVeZ/CSzGzU75kQklIUf+IdSw+h+X8q9MrbH1Oary9jHL6fLS5u4UUUVxHcFFFFABRRRQAUUUUAFFFFABRRRQByvirweviK80yRTDFFBKTcjGGkQ44BHfjH41f0bwnpGiRwfZ7VJJ4d225lRTIcknkgD1x9K26K0dabioX0MlRgpudtWFFFFZmoVxGo/Dy2PhaTS7CdhKtw11E8oByxXbsz2BAH4jNdvRWkKsqbvFmdSlCorSR5J4a+G13c3M512Bre3VCihJRv38YIxkYAz1rOufhv4gi1A28NuksBfC3HmKFC56kZyOOTgGvbKK6Vj6vM2cry+jypHl3hf4dajZeIYrvUzCLe0kDoEckysOVI9ADg8+mMV6jRRXPWrSqu8joo0IUY8sQooorI2CiiigArnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIaAOjooooAKa8aSAB0VgP7wzTqKAM/VdFstX0i80y4iCwXcLQyGMAMAwxkH171l6X4bv4dXj1TV9ZGo3Vvata2pW1EKxqxUuzDc25zsXJ4HHAGa6SigDmbvw3qc/9lX0etomuWMLwPePZho7hX27w0QYYyUVhhuCO9OtPDmpaTo0VppOsRx3RnkuLq4urTzhcSSMWYlQ67eTxg8DA5rpKKAMjw9oZ0O2uvNu3u7u8uWurmdkCB5CFXhR91QqqAMnp1NU/G/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACiiigAooooAKKKKAPM/i79zR/rN/7JXmFevfFiKM+HLOYoDIt2FVscgFGJH44H5V5DXu4F3oo8DHq1d/I938A/8AIkab/uv/AOhtXSVzfgH/AJEjTf8Adf8A9GNXSV49b+LL1Z7VD+FH0Rm+IlLeGdWUDJNnMBj/AHDXztX031r5y1nT30rWbyxcDMMpUYPUdj+WK78tkveiedmcX7sj1L4UD/imbo/9Pjf+gJXW6vpNtremyWF4H8mQqTsbB4IPX8K5X4V/8irP/wBfj/8AoKV3FceJbVeTXc7cMk6EU+wUUUVznSfOuvgjxHqgIwRdy/8AoZr6Kr538Sf8jTq//X7N/wChmvoivSx/ww/rseZl/wAVT+u4UUUV5p6Z478Vv+Rpt/8ArzT/ANDeuo+FP/IrXP8A1+v/AOgJVb4jeFbvU3/ti1YyGCBIvsyRlnf5zyMf736GtX4c6XeaV4ZaO9gaGSa4aVY3GGClVHI7H5TxXo1KkXhEk9TzKdOSxjbWh11FFFecemFFFFABRRRQAVy3xFRH8EXzMqko0bKSPunzFGR+BI/GuprJ8TaRJrvh+602KVYnm2YdhkDDBv6VpRko1It90Z1ouVOSXZlHwT4dl8OaI0FxIj3E0hlcxklRwAAM47D9a6SkVdqKvoMUtTObnJyfUdOChFRXQKKKKksKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv/wAgG2/7Cunf+lkNdHXOeN/+QDbf9hXTv/SyGgDo6KKKACiiigAooooAKKKKACuc8b/8gG2/7Cunf+lkNdHXOeN/+QDbf9hXTv8A0shoA6OiiigAooooAKKKKACiiigDjfibZTXnhIyRAEW06zOD124ZeP8AvoH6ZrxavpplDqVYAqRggjg18y16+XTvBx7fqeNmULTU+/6HvHgJSvgnTQwwdrn83Y10dYPgr/kTdL/64/1Nb1eZW/iS9WepR/hx9EFeMfE+xFr4s+0LvIuoEkJI43D5MD8FU/jXs9cV8T9N+1+GBdqBvs5Q5J67W+UgfiVP4Vtg6nJWXnoY42nz0X5ajPhX/wAirP8A9fj/APoKV3Fcn8OLeOHwVaSIMNO8kjnPU7yv8lFdZWeJd6svU0wytRj6BRRRWJufO/iT/kadX/6/Zv8A0M19EV87+JP+Rp1f/r9m/wDQzX0RXo4/4Kf9djzMv+Op/XcKKKK849MKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuc8b/8AIBtv+wrp3/pZDXR1znjf/kA23/YV07/0shoA6OiiigAooooAKKKKACiiigArnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIaAOjooooAKKKKACiiigAooooAK+cdZt1tNc1C2XG2G5kjGBgYDEdO1fR1fP3i20uLPxXqa3ETRmS5klTcPvIzEgj1Fejlz96SPMzNe5FntnhiNI/CukqigA2cTYHqVBP6mtWobS1jsrKC0hBEUEaxpk5O1RgfyqauCTvJs9GCtFIKxvFtst14R1WNyQBbPJx6oNw/UVs1FdW0V5aTWs67oZo2jcZxlSMH9DRCXLJPsE480XHuc78Pf8AkRtO/wC2v/o166eqmm6dbaTp8NjZoUgiBCqWJPJJPJ9yat06klKbkurFSi4QjF9EFFFFQWeC3dtHe/EWe0mBMU+rNG4BwdrS4P8AOveq8qt9Mur7xA89tbNKLbxK7SMo+4m4Ek+3y/yr1Wu3GT5uVdkcGChy8z7sKKKK4jvCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACiisrVdLv7+WN7PX77TFVcMlvDA4c+p8yNj+WKAJ9Y1e10PS5dQvC/lRlVCxrud2Zgqqo7sWIAHvVbRfEMGsz3dqbS7sb202Ga1u1UOquDtYbWZSpweQTyCK5/xBoGsQ6Olx/auoa5JaX1rei3ligRisUoZwnlomWK5IBzyoxTrG8mn8R614q/svU0sYdNitoYXtWS4uWRpJG2RNhv4woyBk5xQBs654mh0G7sobjT7+aO6migFxBGpijaRwi7iWHcjoCcVJrPiGDR7i1tBZ3d9e3QdorWzRWcqmNzHcyqFGVGSRyQBmsH4gX5/s7TIItP1S4kOoWd2RbWEs2yOOdHfcUUgEAHg8mn6hdyWvinSvE66fqM+ny6dNaSLFZyNNAzPG6losbwDtYHjg4zQB0uj6ta65pcOoWZfyZdw2yLtZGUlWVh2IYEEeorK8b/8AIBtv+wrp3/pZDR4Jsrq00OaW8t3tpr6+ub37O/3olllZ1VvfaRkepNZfjPQLOOG31RZtQNwdX087G1Ccw83cQ/1RfZ/47x160AdvRRRQAUUUUAFFFFABRRRQAV498ToZJ/GNtDGu6SS2jVB0yS7AV7DXm/ja0mvfiDoMECbpCqNj2VySfwAJrrwUuWrfyZx46PNSt5o9IooorkOwKKKKACiiigAooooAYkUcW7y41Te25toxuPqfen0UUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAVznjf8A5ANt/wBhXTv/AEshro65zxv/AMgG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFc543/wCQDbf9hXTv/SyGujrnPG//ACAbb/sK6d/6WQ0AdHRRRQAUUUUAFFFFABRRRQAVBJZWst5DeSW8bXMIKxylfmQHrg1PRQnYTSe4UUUUDCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuc8b/8gG2/7Cunf+lkNdHXOeN/+QDbf9hXTv8A0shoA6OiiigAooooAKKKKACiiigAqjq+kWeuae1jfJI0DOkn7uVo2DIwdSGUgghlB4Par1FAHOf8IRpX/P1rn/g8vP8A47R/whGlf8/Wuf8Ag8vP/jtdHRQBxGqeDI01HRVsrnXjbveMt7jWrs4i8iUjOZeP3gj5H8s1qf8ACEaV/wA/Wuf+Dy8/+O0mr+NLHSL26t2stQulso1lvprWIOlqjDIL5YE8DdhQxA5xXRRyJNEkkbB0cBlZTkEHoRQBz3/CEaV/z9a5/wCDy8/+O0f8IRpX/P1rn/g8vP8A47XR0UAc5/whGlf8/Wuf+Dy8/wDjtH/CEaV/z9a5/wCDy8/+O10dFAHOf8IRpX/P1rn/AIPLz/47R/whGlf8/Wuf+Dy8/wDjtdHRQBzn/CEaV/z9a5/4PLz/AOO0f8IRpX/P1rn/AIPLz/47XR0UAc5/whGlf8/Wuf8Ag8vP/jtH/CEaV/z9a5/4PLz/AOO1PqHiaHTddsNLn0+/P22UQRXSxr5IcozhSSwOcI3QGq+reNbDSb27t3s7+5jsUWS+uLaINHaKwyC+WBPy/MQoYgcmgDN8ReC4ovDOqyaTc68dSW0lNqF1q7YmXYdmAZcHnHBrSXwRpe0Zutczjn/ieXn/AMdqTVvF1ppl59lhsr/UZltxdSrYxq/lQkkB2JYdcNgDJODgUXfi+xhTT/sNvd6rLqEBureGxRWYwgDMh3MoC/Mo5OSTgA0AM/4QjSv+frXP/B5ef/HaP+EI0r/n61z/AMHl5/8AHaWTxnpv9l6be2kN3fPqTFbW1t4x5zlQS4IYqF24IbcRgjFaOi6za67p/wBrtVlTbI8MsMy7ZIpFOGRh2IP+I4oAzf8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OigDnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha0dW1aTTDCsWk6hqDy7vls0Q7QMfeLsqjrxzk8+lZ7eM9N/sO21SOK7lNzcG0itEi/ftOCwaPaSAGBRs5IACk5xQAn/CEaV/z9a5/4PLz/AOO1l6B4Mjk06VtUudeFwLy5VN2tXa/uhO4i6S/88wnPfvzWqvjTTf7Gu9Rmhu4HtJxazWckQ89ZmKhYwoJBLb1xgkHcOaIPGWnGx1S5vYbrTn0tA93b3aDzEVgSrDYWDBsEDBPII60AJ/whGlf8/Wuf+Dy8/wDjtH/CEaV/z9a5/wCDy8/+O0/T/F1ndy3UN5aXulT21v8Aa3iv0VCYef3gKswwMHIzkdwKTR/GFpq99BaGx1Cye6gNxaG8iVBcxjGWTDHpuU7Ww2DnFADf+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OigDnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OigDnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa3L25azs5Lhbaa5ZBkQwAF39hkgfmRXPnxxp8EN+dRsr/Triyjjle1uI1MjrIxRNmxmVtzDaAD164oAk/wCEI0r/AJ+tc/8AB5ef/Hay4PBkZ8UX8ctzr39mLZ27QH+2rvHml5vM583J+URcf4mta28Y6e9vqcl9Bd6XJpsIuLqG9RQ6xEEhxsZgwO1hwScgjrS6Z4ttL+6ltbmyvtMnS3+1iO/jVC8OcFwVZhgHGQcEZGRQAz/hCNK/5+tc/wDB5ef/AB2j/hCNK/5+tc/8Hl5/8do0jxnZave2tsLLULQXsTTWUt1EES6QYJKYYkcEHDBTjnFMsfHGnX19bQpa30drdzPBaX8kSiC4kXOVU7t3O1sEqAccE0AP/wCEI0r/AJ+tc/8AB5ef/HaP+EI0r/n61z/weXn/AMdro6KAOc/4QjSv+frXP/B5ef8Ax2j/AIQjSv8An61z/wAHl5/8dro6KAOc/wCEI0r/AJ+tc/8AB5ef/HaP+EI0r/n61z/weXn/AMdro6KAOc/4QjSv+frXP/B5ef8Ax2j/AIQjSv8An61z/wAHl5/8dro6w9W8Vafo+s6bpUyTy3V/IsaiFQRFuztZySMAkEDqTg8cGgCD/hCNK/5+tc/8Hl5/8drL1TwZGmo6Ktlc68bd7xlvca1dnEXkSkZzLx+8EfI/lmtFvHGnLqLW5tb77It2LFtR8pfs4nzt2E7t33vl3bdueM0aj4407Tb26hktb6W2snSO9voYlMFqzAEByWDHAZSdoOAecUAP/wCEI0r/AJ+tc/8AB5ef/HaP+EI0r/n61z/weXn/AMdpdY8Y2ekXtxa/YdQvWtYRcXj2cSuttGc4L5YE5Ck4UMcDOKfqPi2zs5raC0s73VJ7i3+1rHYIrkQ9pCWZRg54GcnnANAEf/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O1s6ZqNrq+mW2o2Uvm2tzGJI3xjIPt2PtVqgDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OigDnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdpP+EG0YywvJJq03kypMiT6vdSpvRgykq0hBwwB5HaukooAKKKKACiiigAooooAKKKKACiiigAooooA891Q32kX/i+3XSL+9bWlWSxe2gMiM5gWEo7DiPBTOWwMNXaaLZPpuhadYSMHe2to4WYdyqgE/pV6igAooooAKKKKACiiigAooooA4jxxqXk6z4cjTTtVufsepLdzva6dNMix+TMmdyKQTuZeOvNZ1/Je2A8XWkejajdvrwE1g8dsxVi9ukWyRsYi2smTvxwfXivSKKAOAtku/BetXMs2nX+ow3Wl2kMUllbtNmaAOpRsD5c7lIY4HXJqpoum33gqTw/c31jd3USaGLC4+wwNO0EwcOBtXJKnLDI4+UZxmvSqKAPM9O07UtCTw5rd3pt26Ry6i91bW8RmltxdSeah2LknbtCnbnBY103gu1uY7XVb65tpbX+0tSlu4oJl2ukZCou4diQm7HUbuea6aigAooooAKKKKACiiigDmfGWv3+j2ttb6bY3ktzeMU+1Q2Utylooxl3WNSSeflXuepABrDexisNM8MalpNlqlza6Vfyy3Mc1rIt1J5scqPKY3UMzb5NxwOQTivQqKAPMrrTtS1D+1PEkOmXYQ6zZ3sNnJGUnlhgVEZthwQx+YhTgnaPWl1fTdR8U/8ACS6pZaddxI9nZwWkN1EYJLloJWmb5XwQDuCAkDPPavTKKAPOtTs7zxrf6pNZ2F7ZQHw/c6cj30DQM80xU4CtgkLs5PT5uCatafNd6/4j8OTDSb+xTSbeZrtru3aJRI0YjEaE/f8A4jlcrhRzzXd0UAFFFFABRRRQAUUUUAVdRu1sbCW5eG5mVMZS2jMkhBIGQq8nGc8c8V5bNot5PdX99oun6xPYwyWN2RqiyfabiSCfe0cZm/eFdmcBuN3Tqa9cooA801rTdQ8YP4jv7DT7u3jfSYrO1S9haB7iVZGlI2uAQv3VyQAST2qzfQXfjbVnlttPv7CCHRbyzMl9btATNcbAFAblgoQksMjkYJr0KigDz3TTe61qXhOE6TqFidGjke9e5gMaK/kNCERjxJkuTlcjC9eapaVaajLo3hTws+lX0N1o95C93cSQMsAjg3YZZPuvv+XAUk/Mc4xXp9FABRRRQAUUUUAFFFFABXmGueGfFVvqFncW97p921xrkd00v9nStJGArhPMIlx5aLhcAL65yTn0+igDy97PUR4dm8GDS743b6u0i3fkN9n8hrv7R5pl+7kKcbc7tw6VJq0Go2um+MPDqaTfXNzrc8rWU8UDNAyzxqhLyD5U2ENndjgDGc16ZRQB5/dfbPDereIl/srUNQXVLaH7G9rbtKrSJF5RjcjhOQDlsDDHnio9NtLzwVqOnTXdhfX0H9gWuns9jbtOyTQFsgqvIDb+D0+XkivRKKAOf8E6bdaT4O060vY/KugrSSR5z5Zd2fbn23Y/CugoooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAP//Z", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import torch\n", + "from speechbrain.augment.time_domain import DropChunk\n", + "\n", + "dropper = DropChunk(drop_length_low=2000, drop_length_high=3000, drop_count_low=5, drop_count_high=10)\n", + "length = torch.ones(1)\n", + "dropped_signal = dropper(clean, length)\n", + "\n", + "plt.figure(1)\n", + "plt.title(\"Signal after Drop Chunk\")\n", + "plt.plot(dropped_signal.squeeze())\n", + "plt.show()\n", + "\n", + "Audio(dropped_signal,rate=16000)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1lGnxwYZw-4e" + }, + "source": [ + "In the example above, we exaggerated a bit to make the effect more evident. The number of zeros can be managed by playing with the following parameters:\n", + "\n", + "* **drop_length_low** and **drop_length_high** , that determines the maximum and minimum length of the random chunk of zeros.\n", + "* **drop_count_low** and **drop_count_high** , that affect the number of random chunks to add into the original signal\n", + "\n", + "The length vector is needed because we can process in parallel batches of signals with different lengths. The length vector contains relative lengths for each sentence composing the batch (e.g, for two batches we can have lenght=[0.8 1.0] where 1.0 is the length of the longest sentence in the batch). In this case, we have a batch composed of a single sentence, and the relative length is thus length=[1.0]." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "610wUR_KyaEA" + }, + "source": [ + "## 3. Frequency Dropout\n", + "Drop Freq instead of adding zeros in the time domain, it adds zeros in the **frequency domain**. This can be achieved by filtering the original signal with **band-stop filters** randomly selected. Similarly to drop chunk, the intuition is that the neural network should work well even when some frequency channels are missing.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 472 + }, + "executionInfo": { + "elapsed": 2551, + "status": "ok", + "timestamp": 1704406077531, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "LN3zqEjM8F-N", + "outputId": "b2c39ac3-1501-4a1e-92c5-f9514c43fd5a" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Frequency')" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeB+KQB8RvEgAH/AB8w/wDpPDXvtfP/AIswPiR4kOMn7TCP/JaGtaKvI4cyV8Ox12iyTKyhclFyce1U75IxaxEIOn51et0MgXcCGHQ+tUdSBCR98Z/CpoO01DsfNQ1mmZ0KjzkwACXAH513cUKxwq6hcMoB461wsKo06Fh/Ev8AOu+hibyI8N04GRXJncrKGvc70ryOe1O2U3IKqOV5OKq+UB8oXt1xWxc4edmBBK/KxHrVFyuQoGG6H61ph6rdNRPPqfEyiLcf888/hUotVKZCZPpjpUigYO7f/jVuEDAO44I44rapWaRKbZmxWqvIFOODz7VbkhjQFQoHHp0HrVpEEYclAy9vWo5hvOFA5657+1ZOs5y8hvbUpKgCqFwPU010i3HaQW7g1PLFGvy5zIOdg9KaykR/6vOe4rZST1J1Ma5RdxI6mt0RqdBhO1cnk/WsS5Q5UHgittFB8P2uFK8kH0xx/jWuKelP1/RnZN3pIzmUGN/lHCdPxqpEikSYGMLnpz2q62FEnYbarQ4/enqdn9RW8HoyKT91iIoYJ0/KnGMDOAOnccVMi5RAuMgA4qXyg5AGD/Sk52ZMqlmRRwA7RtB/CptiJlTgEdgKtwQgn5gMLz+GOtUblispC8Anr61jGftJWMtZMU7cdAQRy2KQJgg7RjsMVGrbjz29KsKm9huP196qXug1bQiZFzkAYzzxXW2cMX2VS+0DaMlhXOSRnj5cLngV1Wm4NvGWToteLm9S1GLXc6sGuadmPkgBkEYAwvPSuO1KApcOMDArttsgJyyjqRXG6tITJISRj0rDJJydRryRti1Zxa7ml5B+xWz7F4X/ABrCuYv3rLtUjPYdK6cEHRYc4xt49qwJPnbdnivRwFRtyv0b/M46nuNNdkUFRXPAA6HGKDCCAMAfhV7y9knzY68D2pyQjeFxyGwfpXoustxe1d9DLFoM4wCf5UGAKT8o+hFbLxKpAIy5GPwqEouDxkdvaiOIuV9Yl1MZgsbg4AB45FIyDJz+VXp4kBGeD0qicbsDqDzXVGXMjspT51dDRwp+UflS7BkEgYPp2owAvSggAcsSMdKs13AICcjB5pABkjAHbFSYIOc1HkYAxjHegE7lzTlBvkQAc5zn6Vfa2jLklRwT2rP05guoxOSc8/yNau8sT9Sa4cQ2qmnb/M4MVpNW7FrSUT7V80YIHOQOldJJEEXYsS7CMYx0rntIybk4O0Z/OulUkEYfOcAgjNfLZvJqujqwKvBkSrFjzAmD0AI/CuW1jb9sZSAW+ldRKQgC7iSD0rmtcci4J47VrlH8e/dE4vZLzMWeNNuRt4HHtQqAspwOB6U+UlUBxnHFLFkonTOOlfVX905uZ8g0LzOdo4A7fSq7IvyttGee1XsA+cR/dB/UVVlAKLx604P+vkXSlr/XYRIxnkCrHlDZwB+IpkQGOB2HFaAiXyyckkdqzq1OVmVWbuY+xQ+3A69KsWygXSAAZz6VHOMTNgcUtsxN7GB13VrLWDfkbu8o38i1druuZfkHBNV541yCVGMZ6Vpzrm6uWZcAMT9eaoXJzgdK56M7pIwhJ81is0eccD8qAgAGQv5dakduQOaa5BI456V03Zum2rEexQvQdc1KFXPIXnsRSKcoCR1HSpQBmlJinJkbRgovGdoP86ryIMHgc+1aDABcjjis5yzFlBwT0p03cqhJyZZ09PllAxgr3Fen/B1dmka6uMY1X/23hrzLTdzPIuOdteo/CNdum6+B21X/ANt4K5cS/eaO7AN/W5J9v8j0OiiiuY9sKKKKACiiigAooooAKKKKACvn/wAWbT8SfEYJ/wCXmH/0nhr6Ar5+8Wkf8LJ8Rgfe+0w/+k8NbUfiOLMP4DLJfY+0HBVRx7YrM1FiyRrycg5q5dSf6SQB/CvP4VTvyfLi+bscCow0bSjLv/kfM0/jRQtSPtUQAIxIOPxr0iDJtSx6bcjPY9K86t2/0mHpneuPzr0aEFbJflUgg5A7c15fEb0p+p6tDWb9DDuFkGFDDJX5iB1P0rMkI85j17E1rXMvlzknBzkGsidFWY4+6cnit8HdrU8qra+gqHGfnwM5xUySEKSAucZ+tV1XLZG3I457VKEPlsNuDjtXTNLqZoQS7pGy7Z7nsaTfk5xggfL7e9JFExxyP930psiFPlY8Dg+pNUlG9kA7ziHJRRyMFmPNOUb0OJQB3qpKOfnY+yimh9oxtIFaeyutAsVrwKshwwOM1tEh9AtkU4ZQSawrvBYDP0rYDj+w4iDjAwaeJjeNP1/RnU9KSMudh5xxj7uKihH72Xn+DH8qV/mLED86WPhpD22/4V2JWjYpaQt/XQsxNtjBAHCirYdWdCR3zxVOLBi3cdBz6VK2RyCfwNc04ps5ZbmiJ0TJB249KzJiZJCSM89aYxcewP61Im0KCSc+lRTpKnqg13GKpC9QB2q1bupwQMkngdqr5HfG4eh4FMhl2ZwcAdSauUXJMLN6mlNsIUBvnzXS2LqsEAAbpkt6npiuQF0DHtQAc9a6m1YtbQr0AXr714Wa0n7KKfd/kdWDbjNstDmYgc7iSdx6CuN1QFrqTjrkfSuuWZfJ5yX+7+NcfqUu64fIOaWSwkqstDXFSvy2N2TMeiwIRn5eDn3rJicMx3YxjpWtPKTp8EYUEeWPyrCw/mNsA44rswUbxnfu/wAzkrO7XoiRmDbsA596TBbkMQScfgKVFABZuhP6010KrhMfhxXarXsjAtMiMMqTnGKhK79wznipVJI5PQdKYT8x9x1FZRuhspyxbiOufestowsr45JPNbEzEnO4nmsibJkJHHrXoUG2jrwjd2hi5JX0oy27HAHrR2BGcUuQSPT0rpO0FHzkknNJztbPFOPJJHHPekJ6jGcnGKQrliwXN7HyMYP8q2UVSWXjGQKxtMX/AE6IZ55/rWuJMFzkZ3Vw4q7nZdv1OHF/GXrKMC87A9veuiPOD3Bwea5zSmD3jMR07nrW2y5X92+WJzz2r5nMo3qpN9DowjtBsVw0kZC5B65P1rmdabdOAyj5cAkfSuoHyjBLfOMVzOssDM6gcjjNa5Q/31hYrZMyyN6KT36UoUiTHYUg4RQRyOaTJycAcV9McnkTRceb64xj8agkQMfSpIx94k5zShecn8qS0dxJ8ruNRdpPH4VaWXfGR2/u1ATj2zQCV6cewqJLm3JeupVmbEj8c5xSWuft0Q980kpG9ic5zxz7Uto3+kx5wDmuh/A/Q7FpT+X6G5OqtLL3O4kZrHlJxkdTV+UlJpAWyVyM1TmUsBt/GuTDrlRywfvalYD52bsaQ8YGelS7Qqrkc0mwHJwPrXZc6VNDYz8i46e9SoAyknBGORSBB8v1qXy9gPPbHSok0ZzkugJjy8cYx0H1rPmBSUnPXpgVcztA+n9aqSt+8PGcGrprVmmHTUmWtNG0y467ea9O+EBJ0nXiev8Aan/tvBXmNlkmXjA216d8HznSdeP/AFFf/beCuXEbs7sv1xUn5f5Ho1FFFcx7gUUUUAFNkkSGJ5ZXVI0UszMcBQOpNOrN8Q6fLq3hnVdNhYJLd2c0CMTjDOhUH8zQBW0fxGNakVoNJ1KKzkTzIb2eNEilXsQN28ZByNyiqmn+ONO1G+tYUtb6K2vZHisr6WJRBcuoJIQhiwyFYjcACBxmuU8L6OU1LQ007R9S0yaGzki1qa4ieMTMYwoBZuJW8z5gy5AAPIzipNJt9RuNL8IeGn0m+t7nRbmJr2eSBlgVIEZQUkPyvvO3AXPBOcYoA6ceONOOoi3+y332Q3f2Eaj5S/ZzPnbszu3fe+Xdt27uM0X/AI407T765he1vpLWzlSG8v4olMFs7YIDHdu4DKSVBAzziuVFnqP/AAji+C/7LvvtY1cSG78hvs/kC7+0eb5v3c7eNud27tTtWtdRh0jxb4YTSb6e61m7lezuI4GaApOqgs8n3U2HdkMQflGM5oA9Or588X5HxK8REf8APzD/AOk8Ne7X2mQajYCzuJLpYxg7re5kgfj/AGo2DfrXgXiGwh0/xz4gtIXnaOO5iw087zPzbxHl3JY9e59q2ofGceP/AIDJLk7rhWA4IXkfSq18R5agL0yAafcMUkAB42r0+lNuNjW0bc55zWlNW5T5mCtJNlW2H+lR/wC8P516Pab2tSzrgAYArziAf6RGOcBga9It5kFkCG6IMjHNeHxJflhZHqYZr2ju+hzt+f8ASCHI9cj+tZ+d7rnggEGruo4knZQcYyM+tUEGJME9ua7sMv3S9DyKnxsnjjxIT5effNXYo1EbMxYH0Paq8AjJJKv14xnmrnmfIfmBHYf41hXnJuyKgluyBlRY87CoHIPc1WlfzSB0cdBT5JBliZDuHBzwPwqo75O5fXHXrW9Km92RJj3TP+qQH1Y1XkjcKT5in2qaJ+CkrYBHIFS7bVkZY1JP41upODswRiThic9D61rkL/ZEGQenJzWZdRlX2561rMmdHgwQcjpW2IkrQ9f0OibvTRkMoG/HcZ5pkP8Ay0552/4VO4A3A9cVApAMgAydv+FdKd0XB3iy1EfkGB2HNOZwnY0kY/d8dcDg0hAz1NZaNnO7OQ8yLkY7etVpbhh93GM4odiWIGTio1U45A6960jBLVm0KcVqxY5XdtvtTupAIyD0FOjADDAGO5pFTBBDY96d0NyV9NCSPAYZ5OegrtbQKttGTxlAQK5GJVVhgV2Fr/x6Rs4A+UYB7Cvn86l7sSsK7zZKsa8gKSRkj3FcdqSBJGzkdcGuzjYmNSPlAHzZrkNSKsSewz3zXNk0pe1lf+tzTF2XK0aUjD+z4SCRiMAfnWWHVXPOD2JrUJQ6XFxyF6E9axpTu+VepPNenhFfmXmzhqatehM8oZtnG3gGm+bxg59aZt+UEjp196Y+RGeMZrrUFsRa5ehmLKq4XgHJNOcKSPujocj0xVKNiox0PrTmfHqMVk6XvaB5CzBSvDZ+asibBlbB6VcmlJHU/liqD53Z9etd1CDitTrw0WtRuQOeaCT120BVwewzS7sqRyPeug7X5DR90Y456048DHc9DSKD5ZB780KDgDPA5oBlvSx/xMIj25H6Grkx2zsAOrH+dU9Pz9tjbp1/kausN0meOp4zXHV/i38v8zgxL9/5GhorD7S27k9MHvXR2wVAHA++cDtWDo0Qe4bAHy810IcKFVVBUc59PWvls2leq4o6cGvd5mN6SbnyQDx6CuX1jYbuQgkY9/WuneTerRgnIXP3ff16VzGtlTPhAMnritMnT9tr2Fi9kkYzScACn7+nTJHFMUBSQecGl+9tIxxxz1r6uyMGkSRHk5xUjNjv+AqBBtck89qkYblHXB/Woa1M5JcwoPXn8DT8fX6CkWPPT6VIQFAyCcVm2jNszLgt57bRT7T5rpMjv09KbNzK2Bg06zOLmMDOQ3NdL+D5He/4XyNO6Obic9OTUC4OevWidy9w2ckk59KdjBHFcsVyxSOB6DJQcADOO9NwClPfA/8ArGmgZbpVrYpPQMfMp4xmglncdcEUq9vTIp0fIGcZobE2RSjAXA7dfxqlJwxOwk9DV+X7oz71QYktkGtaex1Ycs2OQ030r034OEnRtdyOf7VP/pPDXmenkMZdvPGDXpvwd/5BGvZ/6Cp/9J4a58Ru/kd+X/7zP0/yPR6KKK5T2wrP1n+1ls1fR3sVuEfc63obY6YORuXlTnB3YPTpzWhXM+Mhp8kWjW2q2kFzZ3OpJFILhiI0zHIQWGcNkgKA2RlhxwKAM3TPidpdxdS2mpQPZSQMEkuYXFzZhjwB56cL/wADC13Fc9LPZS64PCQtLV7CXTZJZoUUARrvVAhUcAMHbH+6a6GgAooooAKKKKACvn7xeP8Ai4/iM/8ATzD/AOk0NfQNfP8A4vA/4WJ4kycD7VD/AOk0NbUPjOLMP4DKjkSPwAcAZPpTL3ckES4AX2q5HCGkAAPIGD2JqDUYwpXqcCqhNOaifMU37yKEO4XUQHHzjP516BbArahzgqB61wEQ/wBIjGeriu8gZksVRee5JFeXnybjBLuehSa57vsYt3GWun9+mD09apeUPMDFhz1rXVY5ZpHOCDwAD6Vk3I8tgAOvQVrhpt+52R59SOtyxDjaw80jHtU5XcgDj5fY8mqcczADGwAetOEhIJywPqe1OdN3uSmrC3RwhD7do5wOtUWIDZU8+nanyyZJwrcjrURjOAWBx2Hqa66UOWOoA0gHI5I60+K4cfdA/GoJImA+Y7fYVFtVc7C1b8kZItRTQ26JLE561s7R/YVuwPWsaY4Az2FdDEqDw9bj+9n8OlYYyXLGn6/ozZq9MwZVIDgDkCoIuS/TO3mrk2AHOOdv9aqRYLOfauyDvEdN3gy5EwWIc54A+tMJIHGOKhjVhgAjkdKeYmORntU8qTM3FKW4wfMSev0pD0wcjnrVgQkJjPB9KhCYJUtmqUkylNMkRlUgenanOUJz29KiEfzdcA1IkXDBeDnkmpdtyGo3vcaWO+MH+8OldpbzApFuI+5jmuQVeVCgk5610cU5SFWIOQoGK8jNaftIxXqaUqnKy+kw8x0l+7jHFcvfiESyKM4HHXgVvKd1zhmxhs4HrWHqpLTuAApB/OufLYKNZpdUh1pcyV+5cPFjFtyQFzz2rMCtJIdgBbuBV9mZrWMKuSU/XpTI1EClnXbgD5s9a7aUuRPvc5nuReUUJLDAGfxqOVMJ15xnileXALZ68c1AXYc9iDiuiEZbslIE+Zdozz3NO2Fd3JGKqrK4JB/CpBMQv3jj3rdwlfQ0cJIinJGM1TD8DPfjFW7hgx4+9VPlW6cZrop7HbQXujsDr3xTT/Dg0pfLY7U3GAf88VojdeY9ec0E5HTrSbRjJOOKTBV+OT70hWRb04KbyM5JAJ/lVmdsSkLzz0FVbBdt2gxgYP8AI1ZCFizEd+lc1S3tLvscWItz3NXRSzTs6nB9K3YZW3SxOPmY/Lj09qwdJ3xmQLtAHPI610McihNwyBjqOozXzWafxXpc1w22jASoJnCKTtUAnPf0/Kuc1QhrhmxtO49u1dC5BkLBiAQBj+tYGs4FzjJGB+FGVpKt8hYm7j8zEdGKkjjoc06JDuUDHI5p5yyEZ78io0LrKuTx2xX0920YptxaC5YpwPWlEoCKPWmzOWLe1Rtwqf1ppaK5cYpxSZbjnxwQKGmDZGfwFU1z2yPrUsUZJ6cUnCK1JlSitSB2yxPvxU1qQbyPA5qB1AYoCVOamsjtvIcnOWxzWk/gZ0SS5HbsW51C3LZ7NjmpgQ7dRmk1DBvGP+1nFIuRgd8Vxp80IvyOCWwx1y2OMUbMAHinEkN3/GkcbgOSPpVXewrirEAp6DGD1pVAUHPrwKRWLLhRnPcU/ABI6e9S2+onfqV5iMZNZsijlT90k5rUuIt0K5zz6fWs5lG8jtXTRasdmGaSLGnqUMjIMkrXp/we50nXv+wqf/SeGvNdKjLNKBwduPpXpPwe/wCQRr2f+gqf/SeGufEO8mvQ9DAO+Kl6f5Ho9FFFcx7YVz3jG4ddKt7GPT7K+bUblbTy77/ULkM25xg5HyYA7sVFdDXL+OoLO90qy0+60qLU5bu8WK1t55mij83Y7bnZckAKrngHPHFADPCPh7UPD8s0T2Hh6zs5FyV0u3eN2kyMFix5GN36V1def+CE0eG80iex0aOzuNV0U3pdLh32jdFuTDHpl1IPsa9AoAKKrrf2b30lil3A15EgkktxIDIinoxXOQD61FYaxpmqtKunajaXhhO2UW86ybD6NtJwfrQBdoqla6xpl9eT2dpqVncXUH+uhinV3j7fMoOR+NEesaZLqb6bHqVm9+gy9qs6mVR6lM5H5UAXa+fvGBx8RPEmAT/pUP8A6TQ19A18/eLiB8SPEfPP2mH/ANJ4a2ofGcWP/gMt2ERlCtvIGORjrUetQrGyoeeM5rX07bFCuxVJKjP5Vla42JFBAVR0A9K8uhWlPGW6I+ecFGmn1MNE2zoRxhhxXcwKWtRsPCrnp3riLYhryPnILr/OvQUTZaqijnrxSz6py+zXU6sPByk79jAkWRJn5UBj09PWsi7AWQfMSO1a122ZcbAWDZPNZF4MBSVAcEgmuvBXumzgl8VhYyc52Aj61IZAUOG3EduwqojjPIY/Spss424GD0A611yhqZtWHQ5ebOd7DtVkRb5MYy/duy0+2gwgLgbfRetW5ISVAb5F7IOprkq10pWRSi3qUdkSP8sbSt644qRopSOYF21MUm7MsSfrTooVO4i7YnvmspVev+b/ACGo30MC/QRycjt09K1o9x0eCND0XOKp6siq6jO5h1PrWlZKkmlxrnonfoa6cRUvRhPzNVrDlMG5Vkc89Biq6ggNg9s/rV/UI9ssgxxgVRjHJ+lehSlzQTLpv3C3boSgfsRU4hZmB25OPwp9jGzQkjGOMH0q4IumU59F7Vx1a1pNHNK7bKbpgBSAufTvVOVWEgGBxzmtYxdcKEHr3qjNGDIQE5B6DvTo1LsIvlZWXLsCB+JqyIhty2SOyigRqHUu2TnhRV1VIYiFBk9XNVVq22Bu+xCkDgBmwoz0rS3HYiZxx+marRRR5ABeV89fSpHYrIVJxt6/nXBVfPJLsOOiuToWS6kdh8pI2+9YmpSBpnPcZFbably+MqBn6fSucv23SvwevWtMDG9VvyNErtI6IKW06MEbGCjFZ8yvL82RzxWgdwsI8ngoMD1rMI3O38POBWWG3k/NkVOhV2FQR1OaRlG3PI7CrfkhgdmBgcD3ps9uVQnnd+ld6qq9iLmeU3LhsZ9KQLgBeR7GrcMWUxjrU6WpbofzFayrKO5p7R7IzPLwT7+lVnXDEdeMGtl7Ug8gVkzoUmbtk5rWlUU9jehU5pWZDnk4HFL7YzSI24nqPrSk/KNozzya6Dse9hRg8Y7cUEjAIzmkPQBR05HNNUbfXNISRb045vos853de3BrSWEnJ5PJ59KzdOGL6Lnn5uP+AmtxInaMMCQDnp2rgxU+Wfy/zOHFfGrdh+lxgTud2Dg9e1aMRZEwTkknBrNsYcXBIcYPU1eBZG3Bg/cCvIxS5qj1JpuyJv3nmksueOueKzNbxv3t0wBj8K0BJ0B3EfXNZOsqgZRj3+lGCj+/Vy5u6t5mQXILD1p4PAwAM9CaaoGM8VMoyB04r35NIiTSI/K3t834+9O8kMPpVgKOOevaniL6f4Vk6tjL2jKoiHvUipkjauf5VPs96dHF5mONw9TwKiVXS7FzNmRcKROwxT7H/j8gUDq1T36COZsjOeMCo7Hb/aVuAD96uhyvRb8v0O2MuanbyL99F5U8ueTu4xUAbp6AVdvSZLuX5TgHIrNO5Cec5rmoXlTV9zksm2icEFun5Ggden51X8znHfFOD9K15GJwZPvwvt70x5Ny9OnpTN3ymoXfg96Iw1HGF2PkmbYAp5x0/GqTZLZbgg1bSMsAfaqkq7WdTnJreFlojrocqbSNHSW2ySgD+DFekfCAg6VrxH/QVP8A6TwV5lpRKySZ7rXpnwfGNJ17/sKn/wBJ4K466tNv0O3L1bFSXl/kejUUUVge4FYXiy2e60hIo9HudTfzlZUtblbeWEgEiRXZlwQcDg559M1u1zPji+i0/RYJJb3WbQPdJGraREkkzMQwC4ZW4J9BnOKAMPwJaw6RrcmlP4f1SwuVsQY59Rvo7g+SjBRHHtY4UFskAccZ6iu21W4u7TSbu4sLQ3d5HEzQ24YL5j44XJ4GTXnui65p+na7LNLa+NdT1VbfYPt9jlooWYE7VVVGCyjJx/COa9OoA8Mv31O1uvEEUejavFqVz4bme5uZxCHeQs5aTCSNhTjYoGSMKMYGa62/+x/8JJo3/CK+R5n9hXmPsuP9TiPyc47b/u/8C969B+y2/wBrN35EX2kx+UZtg37M527uuM84qvYaPpmlNK2nadZ2ZmO6U28Cx7z6ttAyfrQB53pP9m/Zfhp/ZHkfa8fP5eN/k/ZX87f3/wBZszn+LHeqmn/Zf+EO8K7PK/tz+3o/NxjzvO89vtG7v9zzM57fhXp9ro+l2N5PeWmm2dvdT/66eKBUeTv8zAZP40R6PpcWpvqUem2aX7jD3SwKJWHoXxk/nQA7UY9QltCumXNtbXORiS5gaZMdxtDof1rwDxBHfR+OvEC6hPBPdC5i3yQQmJG/cRYwpZiOMfxH+lfRVfPvi9j/AMLH8SDHH2mE/wDkvDW1D4zix/8AAZo6dJ+6XzN2ckADvVHWCpkJBLHHStC3UqAAwUECsnWVcODjvjj0riwyTxN0fOK7SRRtl23cJA/jXP516TCy+QAR1HDdq85tVxPFz1cdfrXoEMyrZLnCjvmuDiSLl7Ox6OCmlKV+xjXkaLM+3LNkt07elY91GCOR/u+9dExEplKvzk846c1mXcALR4ZQM468kVrgq/LaMtzgrQ15kY8cOWwSwPtWrbWHybmAQY+93p8UIjOBIoPoasqy4wmSR/e6V0YjFSlpEzjG7vIlto0T/Vx7Dj77d6eWjLnYN0ndz0FQGTJ+eXzP9haRwSRufaP+eS9685wbleT/AK/P9Do5rKyGyC3yWYNK/oBTjcxogJtSPwGaVhP8oRY419T1qvMsmcfaUY+ma1hGM7Jv8X+hm246ox9SkDTNgYB5FXbGbZaQKehHGKy77c05z+dW7UuIYgemK9qrTToxRm9IJiakQ80hX+6KzY1/eMCe3NWrtyHl4421VifLkkkAr09K6KMeWnY0hfkbN3RkDWzkAA5wM1cKKAMq6+wPNV9FVWtHPXnpnrWgEiyPnZR7gk14eJqWryBRuit5IxlUOf7znp9Kz7iLy24Zl55zyTW8nk9VDP8A72QB+dZ95IqzZG1z0yegpYbESc7WJqU0le5mrEyyAqoUE9T1NaqQoFzO+FAyEFUo3i84cmWTPA7CtMHcS0UYZj1Zq2xVSWi2JppPcTzDGBtiCID171RnnP7xgd3PK/jU04Py75y7eg6dfas52Cu+SOSQaMPST94VST2NFJSIJCN2M5wK5+7JMjZPOTkVrxuRF8pJAwDWNMD5r5+vSu3BwUZyZVN3aOqC50mEHkKnA75rJ8tircYbOa6Exr/ZqFcFljGaywApc/3sV5WErfHbu/zLrws437FeO33bT68A96bJmJwAPYZNPSQ7mRSVAJGaqSvjq3Tpkd69CEZSlqc7LEJTDHbu5zk9qkWdyu3Kt7niqkDfKOTkfxCpmYn+43070TgubUV2iZcnCvgc8d6w7+HbK5B5BNaZcgngjHqayr2RjdHjqcmujCxambULuehUXA7ZbHNMViGGMgelOzglv7x7U44Ixx716R6XqIRg7v5UAhsEDg96AMvnpQu4gjp6A9aQuhY04bb2MgkkBv5Gt6O4ZYRjA9/6GsTTR/p8Qz1DfyNauMIRwOe/evPxaUp2fl+pw4pvnT8h0UuJGBH8OQfpUzSHaSmMkEDJ71TTd5pOM5HX0p6jY2M5I6ZrnlTTdzmuSrOVwvII5yTxUOouJIUDsC460md74OOT1Ham3MaG2JBJYcY96uEIxmmOL1KCLxwDjtVpE46D+tMjKhAG7d6mjHIwB+PX8K6akmVOTbLMUW5RUhiwQfl4/MUiSFRjJ/EVIHYkcKce3T6VwScr3JVgEQ3ckA47U5QjHao3N6ngU072IYD5TjJA5qZVVI8hC3+0eKxk2UkYd9GVuXBOcf4VHp2P7Tg4/jp2oAm4LY7+tN0w41SHIH3uletr9Xfp+h0U/gNu4jJvJTzgsehqjNb5J4/PrWmw8yWXI25YkGoWOCVcAketeZRquNkc8lrcyjD7GmmE+9aDICeg/A03av0+tdqrMnmaM/yjinR25Zjxz7Vd2qM8H8eaTPJIJxj6VXtm9h87ItnlITtGffrWXPzI2fWthlwAd3PsM5rIuWxO31rXDu7ZthviJ9P5Lg9xivTvhCMaXr4/6iv/ALbwV5jY9ZMj+GvTvhAc6Vr3/YV/9t4KzxHxM9LL/wDepen+R6LRRRXMe6Fc141S3XSba7l1J9PuLS7Sa0lS3M5aYqyBPKHMm5XYbRzznjFdLXOeLkLroxhnEF6uoqbOR498Xm+VIMSDIO1lLrkchmXrQBneF3Oo61aaxqGtxX17c6YZLKG3smt41tndCzYZmJYkR5yeOOOc12lcd4O8K6po32GTWLy0mfT9PGnWsdojBRHlSzsW5LMY06AAY75rsaACiiigAooooAK+f/FylviP4jGet1CP/JeGvoCvBPFA/wCLjeJD6XUP/pNDWtJ2lc4MydsO2bNlBkD5QWwOT2FZOtpGJiFz04zV+1uPkAZjnoNvesvWnLzHcQRjsa8nBwn9bvJngzlF00kZkBP2mMYyNw6fWu4gh8+3GW2+ma4e2/4+Iz/tD+ddhFNviJBKle1a51GUuXlNaEoqTuNliKSSqD8hJG73zzVKeM+eN2AR056VqxSZPzgHfyAapTQMZlLc9fxrz8PValaQqsE1eJCiNk/Kjc8E9alA3DBff/sLTo7cs2fKLH1B4NXBbERZZljGOSOtOriIxdrhTpSa2KQicDLhIlx1HUe1PWNiwaGPIwcyv2qzFbruJSJ2Yjq+cGkuVCAeZJx/zzWsfrHNLlX9fL/NmnsrR5mVnW2BHmytIwPQE/0pCbH/AJ92z67TUMt26kiK2AHYtUEl1dkf8s/euuFCcrXbXzt+Rg6kVsvwMvVRCsxCDAPI9qtWKK8CZYdOBms29fzJW9T/ADq3at5VtBj0r26kGqEY31MXblTI762ZJpfQYrN3bXxnrWrJMZPPLnPyj+dZpTdJgdBXRh2+W0uhpSa1T2Oj0RC9mwC8E9q0lG3703U/xACs7SJxBYsrNjODn0pz6gWbny2b2rwK9KpUrzstLlKcIxXc0Sqk4MjOcdB0rFvYbh7nZtUnHIQcD61M2oTY2gqCf7o5piyOFYPIUJOSe5q8PSqUXzOxnUqRnoiCO3aCVTLIqjPIFar4VSZJhHH2UccVTGGBKxE56s9UGaUOcoZOc5PSt5U3Xd27W9CFLlNcTwZKwwn/AH2FZErxtK7Y43HpTTeytlWYKB6UigdcdTW9HD+yu2Kcm9y9EAbdlKj5sHg9u1YtwdsjAcj1rV3MIsEckGsOY/M4J7da6MLH3pM0oLmkdkxb7IhXI3Ioxis93ZRgAEjrxWtGVawC88IP/QRis2VSiguMDoa8bDSV2muo60dmVXOxgT93FV5snqBkCp2AHynoBzmmMuYtzZyDkV6kHazOYjhGEzyMnv0qXYSPuqf90023ViACevbHFTtGQM+X+RonK0hlcj2I/Gsy5IMzHritSRWxkDFZd0MTNweua6cPubYf4itgtwAMdBQoC7gTz3zTl5Ptuof77Dsa7L9D0L62GMCANp59TT8dSe9ICMbhTdzdG/OgerL2kgHUYe5Of5GugW1Z4yfl5J+8K57SmKaimOoyR+RroY5D5BOBknLc15OP5lUTj2X6nHXtz6kS2hSULkYAw2aikhIYnIGc8ZpGuGZ8b8ZOKrbyB1596mEKj1bOV26D1jYS9Rz0qK+RxD34PBHrTlZ1bGeaddyu0SIU5x+dbrmU0EdJJmcjHYOeSec960bbBCkbfz5qOK2XYC7AEnOKDGY2KpwccDNaVJRnoi5yUtjTjjXIyTx69qsL5Yx+8X6f4VipLdLxg4FOFzPu5X6cVxTwspPcSdjZbAGN2eM8Cjy94yAW9ycVlpdzZ5yB9Kf9qbkcn8cVj9VmtmPnXVFK9jH2qQdD7morEbdUh+bneKfK5aTd6+tNtRnUYT6NmvUs1SafY0hJ2fob7IwmkOe/emtGD1zj25FWLeRZWcMBgHgimtHl8rgnHVTXhqo07PoJxTV0Q+Wv+z+IxUZjUjkL17VYMb5/i+mKZ5LnsfyrSM/Mhx8iqYRgng+wNRFQCeB09c1eaJsH5e/daieEk9+nYVvCr3ZDiVGB29+n0rInAMjVtyoduD19zWLOpEucYxXoYV3ubYfSRNp/Bk7/AC/rXpnwf40nXv8AsK/+28Fea6eADIf9mvSvg+MaTrw/6iv/ALbwVNf4n8j08uf+1S9P8j0aiiiuc90K8w+JOo2DS61pWsb5Yl0mK70yDy5CrXYNwM5Qc9IuGOOlen0UAcT4MPgz+0W/4R5pDffZz5gZrgjZlc/6zjrt6c121FRXMJuLWaASyQmRGTzIjh0yMZU9iO1AEtFcLprPp/i28i0q/wBRvtNsrKT+0Ptd09wgucqURGckh9u/cBwMrwDWdpM2o2dh4N8QSavfXNxrk0SX0Es5aFhNC0g2RnhNhAxtA4BzmgD0uivL4bzUR4es/GR1W+N3NqyRvaGdvs5ge68jyhF90EKQd2N24Zz2o1W81F9F8UeKk1W+iutIvpktbZJ2WARwEAo0f3W34bJIJ+YYxigD1CvBvEx/4uL4l/6+Yf8A0mhr27UbyeyszPb6dc38mQPIt2jD89/3jKvH1rwjV7iW88deILieynspGuot1vOULp/o8Q5KMy89eCetXDc8/NP92fyL0KvGcq6qvU5FZ2pOPO+717Gt6HT2kIKIGbgkk4xWXq1mUlba5OM5NceFxFOVe19T5905RipNaGTA2JoyF6OMfnXRxOgdmzhjxmueiRvNjJ/vjH51shd5ZTwSeDXTjYqVhOWuhcgk3qxdsbWyvFSCUrJErMCozzTYUWMbW5yMUqRgXQCHjGBXjz5W32NY81kWkkTcTudT/d7VOOmUjOf7zdKgjUrx5wA9GHNWUgDKOZJPbtXm1nFa/wBfodtNSZG8pKkSTjHTCDnNUZ3kU/uIcBurv6/StdLabkeVHGOinOazbyGNGIlkaVuTsUdarCVabnyr+vu0+9ixFOajzP8Ar7zGnMbN+8ucn0Wqzm1IOHbI960Hil3fJa7V/wBo1E8VyRzEg/Gvo6dSKtr+KPLcX2MSdQzgg5x3q6Bi2gIB6VTnUiRt3B7irwBazt9vKlc5rvqPSP8AXQqfwIqlsrL9B/OqvQkjv1FWHJVJe/T+dQLgtj1raC0ZpT0TZr2R3wZ3bRxzVwQk4+RSag0xf3LYxnPOauiIDG6EZHoa8fEVLVGjJRuRMCg52IP1pgH/ADzHuS9WkhIPyRKg7kmoGWNpCCrSkdwOBWUZp/1/X5jcSEkM53Ss5/ur0pZIiykyShF7AdatCOUjgJGPXrTRHGpJit2dj/EehNV7VdP0/wCGFymZIluoHlqxPqajHysQSc1pTxXDod6oijnArLbljjOfWu2jPnW/6kSTTLqNiMhsEAYJrCuQB5gVeTz9a24wwt2YLyw4J9KxpGJJ3dQOa3wqtKVjag2pXOut7lBChGM7FH6CmyoznII9R71UNm4SNoxkbVyKtxKwbGOBxg9q8SUIQfPBjUnL3ZIoPD5vOGBGSaSZWG3cuSRyMY4rSEQMvmMuADt47+9SSW/JLDcQMZHvV/W0mkyfYtozIEOzC7ceh71IYAEx5TDvwc1sWthm2UhFKnOQTU505COInHriuSpmdNTaOiOCqOKZzrQHAIU81j3sRWWQEd67l7BAAOcD8z9a5vWIyLmRQOv6V3ZdmEatSyIqUJUGpM5tvQevT1ppIzuPUdasOmG49ah/vfMOp4r6JM6ISTQbBjnoaQKAcfjig4PFDoNvsPWmWn0bLml5bUImx1zn8jWt1DDaWAJrJ0hP+JpEo6c49uDW59n+85jZsEjI69a8zFySq69l+pxYlXkrFH5QSwXOfeo9oUbVwQOlW2jZG8sphWOPpVZ0Jx82APanCSZzEYY4zwSOamun+ZWb5RtH8qTawHI7jpS3Y2lcnAqrpzQ0KpV8AKD6+9S4XAxgj361FaqnlkgFiTzntU6RF228Hvz1rKbSb8hW1I1HzcBhTwgzyW/EVOkCqcFGGPTpSlFBPMn41i6qb0HylYqpA+9TPLBGQh+rGpioKdHP17VG0eR9zP8AvGtIyJKjgByPm6enFNtRm8iOcYf86kfAkPLf0ploCL6Nc9Wrqv7j9DWGxqJLskcDP3j061cEo77Sffg1VigLSP0OCQAe9WCjDIGQQO4yK8iryN2HDmRJvGRhQPo1IZgCBg/n0qJueCV6elREDaMFcVCpxe43NlrzgORu/nTTIrNyCePpUSoMfLt607yvnONoOPrRyQQXkxkjLjChF7461gXYCyN7k4FdDKh2456dhXPXm5ZHxkjNejgGm3YcE+ck0sgtPznC4/GvS/hJj+ztfx/0Ff8A23grzjRYRM0ysOi5B/OvRvhEMaZr/wD2Ff8A23grSu06sl6Hp5ev9ql6f5HolFFFZHuhRRRQAVDeW5u7Ke2E81uZo2jE0JAePIxuUkEAjqODzU1FAHO+HPCf/CNRRW9vrepXNnGhVba4W32ZJyWJSJWLZyck8knOaj03wRp+mXtpMl3fTW9iztY2U0qtDalgQSgChjgMQNxbAPGK6aigDmU8D6dHqKTi6vjaR3Zvk04yr9nScktvA27vvEtt3bQecUXngfTr2/uJnur6O1up1uLqwjlUW88i4wzDbu52rkBgDjkGumooAK8H8Sgf8LG8SknH+lQ/+k8Ne8V4L4nOPiN4kIHP2qH/ANJ4auG55+Z/7u/kbltdgIElYoe23uKy9YkTco8vGBgL2q8rMGwrKowOtY2sNllKybgM5JryMFRj9Z5keJVm3BRZnxEi4iwP4h/OtjcXcjucjJ+uaxYCTcxAn+MfzrahkR5WVlYYzg44Jr1cYrNMwa6Fm3kIIfGRnk9yK1ogk0kYAAB7eorEjLMuwqQDx/8AWrVsdy3iBgTzweoUDvXg46Gjkt0mdeFl7yi9tDWS3bOR5bD1NSlTHEd0y474FOUAk5gJPqMYNMmBUZWGNT6mvk3Nzkk3+R9GoKEW1+pSaSAc4lm7hRz+PNTyZmiCwxCP3ao2D4OZ1X6CoJJ7aJlElxKzHoBnmvQUOdrlu2vV/wCSOFz5U+bRP0X+bKVzBmX57rGewFVWtrcMc3LknsTWyj2rD5bIlj3Kim5LKf8AQdo7ZAr0qeMnFctmrecUcM8NF6pr8WcZqUCiXAYn39avQxbrS2UdAtS6osStu8rGeuf4an08qscAOPu9K96eIbw8ZJbf5HnqPvcjMO5jKtOpXGMfzrPUDzM9xXSanPA0s5KY+QKKwOGl9BivRwlWU6d5K3/DDj7t0tjodHQtaYMQb61eMPOPKcD2qfQrVX01GKluAcCtFLKJFxul49WJNfKYzMIQxE12Z3UcHKdNS7mSsC4z5B/4HUUwUcyTBB/s1tPZqR8qyN7ZrOlsHWUhUjjHvUYfGQqS1dv6+f5BWws4LYpILfeAsbyc9TzU7pMwO6dI17ADpTxbbJBuuh16AVa8i3xxamXHcgH+db1cRFNNa/152RlToyd09P68jIdLfdxK8jg9Kz5UKSMSPmz1A6107LKhVRaKFPpjiue1CRY7twQ3XgelduBxDqy5V+af5HPiKPIv+A/1BcCA5Hzbe9YcxDscc5HWtpcTROAxxj9axphiX+devhPikZ0tzp1me3QYOQyLgY6cCofOkCZZhtBz/wDWpknmMkBUH7vIP0qPfMo2lRt5zXnQpLfS4Smy99pJ+UrgY6+tSJOV/dkAKegFZayMcIwwCuKlEiyEY5+tTPDR2sNVWdNaSobdQY2wO47VKblIz/rWGPWs62cJbqWLjHcGnPOCMic4P+zXztTCKVR9r/10Z7EMS4wXf+vQnnvcgHIYHoR3rlNUuDLPLux94gH2rblO4Ak59xxn61zeoZ8+XHQtXu5RhoQnojzsTVlUauzOc/OKjOAecdaewO7B70wdD9a+oRcdEMVjkYGRRlsggc46dqeDnsBx1oUKBjtTuacy7F3R2A1SEnAPOfyNddD5LwHJYtknI7VyOlY/tWLIGDn+RrqY5VWPaZCvbAGa8DNouVRW7L9TNySkV7t4o23KpYE9e+azW2t8pyMDOas3LbmZd55O0EiqjZL7WOQBzV4eHLFHDN3YFlH3hn5uv4VJd4YKVOcqD9KiCqzYBGMdMUTBoAAx3D+EV0WXMrbkkkKq45DbAcfL296vhQem119T1qCyVHhB+YZORtrQhst/IKHPrwa4cTWjFvmdrGtODexWI5HysPxphjbOR5nX1rXXTWIHyKSPen/2aQQdh/76rg/tCjHqdP1Oq+hiiE7jlWP1NI1vkf6tf+BGtr7CQeU/M0f2eD1SL8af9ow3v/X3h9Tn2ORniZblsDH0p1umLyJieh71p30aQSzfIDtwMisyIsL2Inu1e3Sq+1pXXb9DkaalY6S2hRot4I+f260jxkHgEcdFNLZy8FskLnPTNa+yNxkrH06mvmMRiJUKjurnp0aKqw0djCMT7ujUCJyOjflW79lU4O2P8DQLdB1VfzrP+1I9i/qEu5iLHJ05/EVMsUhJ64x6VreXEoJMefoeajcxgcKfxNT9fc3ZRH9T5VqzJltXIz/M1y+ooyzOuBnJrtZZEAzsTj1rldVuFkuHZUXrgYr3MprVJTaa0OKvCMJJx3HeGIwLi4DnICd/xrvfhMAth4gCgADVeg/694K4HQ5FDTuwx8n+Nd98JiDp/iAgYB1X/wBt4K7pp/WZt+R35dK9d33s/wBD0KiiiqPbCiiigAooooAKKKKACiiigArwTxSwX4i+JOMn7VD/AOk0Ne914P4iVW+JviIMRj7TF/6TQ1cHZts8/M/93Zpx25nQSLHv3KOp6cVi6rbPExzgrnt6122n29s9pGZCUwMDB61U1Sxsi2cOR7Zr5nCZsoYlwadvT/gnn1cA1RVVNfecLGoE8fruFbEK4lI5Jyce1RXFpDHdR+Wj/eByQeOan81EuHHmc+mOlfQV6vtUnHseY99S3bSFBzgr05q3YvJ/aAXsExt7VnPM7wBY/kIPzMAORVvS2L3Ls4fC9x2NeRiaf7qcmuh1UJfvIpM6CN1GRmUEduaeFhJOUkcnsSf61WE5X/luPxApfOJ5+0n8AP8ACvlpUJ3uv1/yPoI1o2s/0/zLH2bzD8loo9ziopNMumxgwxr3PeniYAczyEew/wDrVE89q7ASG4Y+wf8ApU03Xi9F+Df5lTVCS1/NL8kI1m8fBvAD+FQyxuqkfbxn8KthrTr5Eh+oal8yxJw9m+fXyya1jWmneUW/+3YmcqMGrKSXzkcfqcLSMwaTeO/HWn20TPHAwUt8oxWvfQQtIfJjkX1+Q0/TbdI2VZeFQYOc59q+jeYJYVO23Q8RYRuvyX+Zy13bTb5Thj71lhDvKlce9dnqUsLtsRto74FcxdRnzG2jA9e9e1l+KlVprmVjmqRVKbinc7bw9tGkxbiV+UdPpWphc/8AHwfptrF0KR00yPLqvyjGa0fNyc+ZFnpXwmYUZPFVH5v+tj6TCVoqhBf1+ZaO0Dmdj9FrHuiqzsxjlckcA5rRWbH/AC1jqG4Incjzc44IQZxWeEvSqXa0/rsh4q1SGm/9d2UIzG7ACyUE92Aq+YZwvM6J9KhW0iSRHxMxU5HWrANrt4hmYe4aunEVVJpwTt6X/NnPQpNJ87X32/JFOSNsZN/u/wBnjmuW1EN5rknd8+K7TbA3SzYe+01z2pQK07xpA+Qc9DXq5RiUqjTX5L8jhzCjaKkn+f6mVHK5iZU+UDqMVlyDcxJ64rYdTEjAo64/2etZbqzHJVskelfU4eSu2jy4tpm75gBj65MYwKjd9zbV6gjdkc1PeNF5MAHEoUHjrUAnBc5Toa82nquZIqejtcJkQq2QQzHjjpTreHJ28Z9fbtTGkEgJYEEnqO1WI3VeQhZuOAac3JQsKNmzVjiPlKoLjjqBkUjQPj/WH8V/+tWrbsiQxjzFQsPut1qRhuHyvDXycsfKM2rf19x9BHBRlBO/9fec9LbtgjjI9K5u/jdZZM9ecGvQ2SEg+YiZ/vKK5vVbaxM7Kuc7snk17OU5nzVOVxZwYzB+ySmmjjWXLhec0hAXKsOQa1JrUebiPsRz61Ulh2yMMZOTX10K0ZHJGsnoysVKjHXFKqgAdc1KqYbkYx0phUgknjFaXuXz30LOlrnU4fx/ka6mKBtnDMuSf4c965vScnVLfpt56fQ139uoNuqlo1OOh64r5rPcU6M46br/ADN6FH20tXscvewAMP3mT3JGKzWfnI6Y/Sup1S3XapDx++BXOSIoc7uDj861y/EqtSTOLE0nTqNECgbgTy2KsXnMcY2nlRkj6VX3fOr49ttX5VebaNmEAzmu2o+WcWzFbEemsYwuHZfwyDXTWkfmqHIi5qnZadGbSNg+zcM81oxxeSiopQgCvm8yxUKsmobnq4SjKPvSWhaWLH8I/BqUREgfI3/fX/16r8j/APaoDkdj/wB9/wD168N05dH/AF956inHt/X3FgwEnO382qKWAqpwqfiaBLz93/x7/wCvUiyA9UX8TUL2kHf+vzKfs5K39fkclqMDNM5AB59fas2KJ47qPcvOfXNdbdWYlmYhVwfesy509opElCHhucHPFfYYPMIOCg30/Q+fr4acZN9DQsLVm5UfrWn5EZJDN1GCpXNJBtWFdqZBGQQcGlZ2z/F+lfL4ivUq1G9j26NKFOC6iiJRjBHT+7QY1wOnXj5aZ5jg/eb8hSea5x8zdfQVjyVO/wDX3GnPDt/X3khiBBwR+VRNbK2ckflS+a/PLfkKPNfPU/kKqKqx2f8AX3Et03uipcWgKZUn8Frlb+2kiLbVyWOfu4rtw7EYYtj6CsXUoGaRipO33xXt5VjZwqckzzcbQjyqcTn9IjdnuQy/wcZ9ea9A+Eq7dO18f9RX/wBt4K4+wi8q4l35+5ke9dn8Kxts/EIBz/xNB/6TQV9B7XnrS+RWV61XLyf5o7+iiitD3gooooAKKKKACiiigArjNW8Sa79u13+xoLBrXQ0Uzrchy9y5jEpRCpATCFeSG5PQYrs64zVvDeu/btd/saewW11xFE73JcPbOIxEXRVBD5QLwSvI6nNAHQN4g0uHQ7bWLu9gs7G4jSRJbmRY1w4yoJJxnmvB/EXiLRJvHmv3MWq2UkMtzE0cqTqVcCCIHBBweQR+Br6CsbKHT9NtbGEEw20SQpu/uqAB+gqfy0H8C8+1NOxhiKCr0+Rux4fD490a3iSManp7YHH79f8AGkuviFpDw4XUNP3/APXZf8a9x2J/dX8qNif3V/KvN/snCc/O46mP1Wpy8ntHb5Hz3N400qd8/b7Fen/LZf8AGq7eI9G3s/8AalkSef8AXr/jX0ZsT+6v5UbE/ur+Vd0KUIK0VY5pZTGTu5s+dIvE+kJGf+JpZktjP79f8atWnjfSrMtt1GybccnMq/419A7E/ur+VGxP7q/lSqUKVROM43THDK1B3jNnhZ+IOisOb2x/7/L/AI0q/EDRu19Yf9/l/wAa9z2J/dX8qNif3V/KuT+y8J/L+LN/qk/+fj+5Hhh+IukhflvrDP8A12X/ABqI/EbT93F9p+P+uo/xr3jYn91fyo2J/dX8qI5Xg1/y7QPC1H/y9f4HhI+I2n7c/wBoWH/f1f8AGpF+I2m4B/tKw57ecv8AjXr+gaQdH06W2kZJGe8ubgMq4wJZ3kA/AOB+FaexP7q/lSeVYJ/8u0NYWqv+XrPE1+IujFRvv7DcRn/XL/jUP/CeaLvLnUrHB9J1/wAa9y2J/dX8qNif3V/KpjlGEjfljuOWGqSteo/wPnq48X6KZCE1GzIJyD5y/wCNUZfE2kmQv/aNmeMYEy/419J7E/ur+VGxP7q/lXo04QpqyRyvKYN3cmeEad440S2tVjbULFiPWdf8auD4geH8j/TrDn/puv8AjXtexP7q/lRsT+6v5V59XKsLUk5yjq/NnTDCzhFRjPReSPFf+Fg6B2vrD/v+v+NLH8Q9CQkjUNPBY8/vl/xr2nYn91fyo2J/dX8qzeS4Nq3L+LLVGqndVH9yPGj8StGCNjUNP4/6bL/jVI/EjTg3Goafj/rsv+NeweItJOteGdV0qJkikvbSW3WRlyFLoVBP51orGoUDavA9KcMmwUfsXCVGvLeq/wADxD/hY2m5/wCQnYD/ALbL/jVG78d6a8wcajY5I6iVf8a9+2J/dX8qNif3V/Kt6WW4SlLmjBGU8HOatOq2fOc/jGwmYZ1KxGT/AM9V/wAaik8S6QcganacHH+uXn9a+kdif3V/KjYn91fyrtjGEdIxsc7yiD1cmfPMviDQTbKw1izaVR3nX/Gq58S6OAB/admQf+m6/wCNfR2xP7q/lRsT+6v5VMYKKs9QeUU39pnzlH4k0ULzqdoGzz+/Xp+dJD4r0iNlJ1G1yT185f8AGvo7Yn91fyo2J/dX8qpxi73W4LKKa+0zxJfiBoWxQ99YlgOvnr/jSn4haACB9usvwnX/ABr2zYn91fyrM1/SDrGnRW0bJGyXltcFmXORFOkhH4hCPxry3k+Ef2fxZ2LD1F/y8f3I8iPj3QZCc6jaIBzkXCnP61l3vi7RZJcLqVmevzCZe/419CbE/ur+VGxP7q/lXTQwNCjK8EY1MB7RWnN/gfOC+JtFJ41G0DZySZl/xpj6/opkY/2nZnJ5/fL/AI19JbE/ur+VGxP7q/lXYrJ3Rh/Y9P8AmZ80LreiH72p2mDz/rl/xpv9taLuBbUrQn/ruvH619M7E/ur+VGxP7q/lV+0Y/7Ih/Mz5rtfEGjW92sn9pWfGefOX/Gujh8faEsSh9QsiV4J89f8a9x2J/dX8qNif3V/KuTFYWlire1WxrSy/wBk7wm/wPDrnx5oEqbRfWPHfz1/xrKn8YaLIdwvLIE+sy/419DbE/ur+VGxP7q/lWdDA0KCtBfix1cv9q7zm/uR84N4m0Y4P2+yBHpMv+NWZPF2j4iCanZYC4YeavX869z1TSDqGo6LcoyIun3jXDqV++DBLHgfjID+FaexP7q/lXTKEJWutjFZTBbSZ4pa/EHQFt0Rr6xG0YwZ14/WmzfETw/syLuzPHQTr/jXtuxP7q/lRsT+6v5V5v8AY2D5ublf3s7fYVOXl9pp6I8LX4g6ERk3VoPb7Sv+NA+IWgk4+1Wv/gQv+Ne6bE/ur+VGxP7q/lWn9l4T+X8WR9Un/P8Agv8AI8Mb4g6COl1aH/t4X/Gmr8RNDI4uLUfW4Wvddif3V/KjYn91fyp/2XhP5fxf+YfVJ/8APx/cv8jw5PiBoGcG7tPqbhf8aV/HXh2UEfb7MH3uF/xr3DYn91fyo2J/dX8qn+ysLe6TXzYfVJtWc/wR4ifHnh9UWN7+zIxtytyp/kaU+OvDjA51G09P+Phf8a9t2J/dX8qzJ9IM3iiw1YMgjtrO4t2j28sZHhYH8PKP51P9k4bs/vY/qsv5/wAEeRnxx4cBJ/tK0zjH/Hwv+NB8ceHFIH9pWnXHFwv+Ne37E/ur+VGxP7q/lVf2Vh/P7xfVH/N+CPDx438OdP7StOef+Phf8aU+OPDnIOpWhyMH/SF/xr2/Yn91fyo2J/dX8qP7Lw/n94vqb/m/BHhx8deHFz/xMLX5eP8AXj/GmyeNvDrAp/aFnjH/AD8L/jXuexP7q/lRsT+6v5U1lmHTur/eweDbVub8EfPn/CYaEJiyX9oAB3nX/GvQ/hNfQalp/iC6tnR4pNU+UowIOLaAdR7g16BsT+6v5UoUL0AH0rsjRhF3itSsNg1Qk5KV7i0UUVodgUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBneINU/sTw5qeq7PM+xWstwE/vFFLY/HFc5pt7rul69odrquqjUY9YglLqYEj+zzIgf5CoBKEbhhsngc11eoWMGp6bdWF0u63uoXhlXOMqwII/I1zdn4R1OKeC4vPEH2qewtJLbTn+xhPJLqF82Qbj5j4UD+EdeOaAIfFtx4hh120/s9tZTShaSNO2lW9tLJ5u5duRMDxt38Lz04rpNFvYNR0SxvLW7e8hlhVluHUK0vH3mAAAJ7gAYOeBVK90/xDKkC2Wv29viARzNLp4kLv3kXDqFJ9CGFXND0eDQNEtNLtnkeK2TYHkOWc9Sx9yST+NAGhRRRQAUUUUAFFFFABUF5FPPZyxW1ybadlwkwQPsPrg8H8anqG7S4ktJUtJo4bhlIjkkj8xVbsSuRke2RQBg+Cb6/vtHu/wC0rxrye21K7tRO0aIWWOZkXIUAZwB2rN1K+17U9a1+DStVGnx6NDHsQQJILiZozJ+8LAkJgqMLg8k5qfRfC/iDSNP1G1HiS1Z7qeW5jlj0zaYpZJfMc4aVgw5IA4xnqcVNq/hS8u9Rv7rS9ZOnLqcCwXqG2EpbaCoeMlhsfacZIYcDjigDZ0HU/wC2vD2m6qE8v7baxXGz+7vQNj9a0Kr2NlBpun21jbLst7aJYYl9FUAAfkKsUAFFFFABRRRQAUUUUAU9Ttru7sjDZag9hMSP36RLIQO4AYEfmDWZ4H1K71jwPouo38vnXdzaJJLJtC7mI5OAAB+ArU1GK+msnTTbuG1uiRtlmgMygZ5yoZc8e9YHhzw1rWgaBYaR/b8EsVk8So8en7C8K/ejbMjctx8wxjHSgDHu9W1rTvE97NrF9rdloy38aW0kFrata+UVjAEjFDKAXLAt0GRyK9CrmNX8Narrpms77XI/7GllDvbRWW2VkDBhGZd5GOACQoOO9dPQAUUUUAFFFFABRRRQAVz3idL2C2n1JfEz6RZW0BZgttE4LDPLFwSR0G1cH35roa5bxH4a1XWtasL221i0htbMb0srqxaeMzZOJTtlTJAwADkA5PXGADY8P3V9feHdNu9Stxb301tHJcQgY2OVBIwenPbtXFad4h119O0HxNPqQlstXvY4G07yECQxTMVjKuBvLA7Cckg5PA4rto7bVPNsHm1KIiFXF2kdrtW4YgbSMsSgBycZOc1z9l4HmtZbC1k1cy6Jp10bq0sfs4V1YElFaTd8yqWyBtB4GScUAdhRRRQAUUUUAFFFFABRRRQBx3jObWNHsLzWLTxA8TJtWy037LG6TykALESQXYu2R8pGM+1dRctcDTZniXbc+SxRRzh8cD35rm9S8Ma3d+K/7bt9csQkUYSztrvTWmFtkYdlKzICzc/MRkDgcZzuyWupPcySJqaxxNaeUsQtwds2T+9yTkjoNvTjrQByngzVNUF7a2XiO+1tdUnsvM+y39tbJDI42+Y0TQrk7Sfus2cNnHcd3XOWfh3UH1211bWtXjvpbOOSO2it7T7PGhfAZmBdizYAHUAc8V0dABRRRQAUUUUAFFFFABRRRQAUUUUAFRXNxFaWs1zO4SGFGkkc/wAKgZJ/Kpapayt0+h6gtjFHLeG2kEEcoBRpNp2hs8YJxmgDJ0rxja6pfWlq2najZG+iaazku41VbhAATt2sSDgg4YKcUzT/ABxp2o31rClrfRW17I8VlfSxKILl1BJCEMWGQrEbgAQOM1yfhjSGj1vRhp9nrsYispYNQfVYnCQAoMLDv4U7wOIvl2j2FS6Tb6jcaX4Q8NPpN9b3Oi3MTXs8kDLAqQIygpIflfeduAueCc4xQB048cacdRFv9lvvshu/sI1Hyl+zmfO3Zndu+98u7bt3cZov/HGnaffXML2t9Ja2cqQ3l/FEpgtnbBAY7t3AZSSoIGecVyos9R/4RxfBf9l332sauJDd+Q32fyBd/aPN837udvG3O7d2p2rWuow6R4t8MJpN9PdazdyvZ3EcDNAUnVQWeT7qbDuyGIPyjGc0AenUVSvtMg1GwFncSXSxjB3W9zJA/H+1Gwb9a8R1mO5s/Geuafb6vra21tcRpEn9rXJ2gwRMefMyeWJ5ppXMcRXjQhzy2Pe6K8IuYLlZAItY1zBUH/kL3PHH+/Va8F1DBG6a3rgJHJ/ta55/8frOnUU7W6nE81op2s/w/wAz6Aor5xjn1BpFH9t64csBj+1bj/4uurXS3EYY6rrZyOT/AGxdcH/v5UYuvHC25+vY1p5hTneyf9fM9jorwi+tbqKZVi1nWwCP+gvc/wDxdQGO7HH9ta3ux/0Frn/4uqhVjOKkuplLNaMXZp/h/me/UV8+hb3vrWu/hqtx/wDF1KIbzZu/tvXD6f8AE1uf/jlW5Jbi/teh2f4f5nvtFeARwag7hTrWucn/AKC1zx/4/VmS3uFGBrOuZ6D/AIm1z+f+sqJVIxdhrNaLV7P8P8z3aivAxFd7VH9s62Se/wDa1z/8coeK7Qn/AInetn2/ta54/wDIlPnV7C/taj2f4f5nvlFfOU82oIx267rf/g1uP/i61hBcnSY5/wC2Nd8xhnP9rXP8vMq6r9mot9XYpZpRauk/w/zPd6K+ema92Ow1rXBhc/8AIWufX/fquk2oMrn+3Nb4Gf8AkK3H/wAXVxg2Ss2oPo/w/wAz6Nor50WXUG2f8TvW8nr/AMTa4/8Ai6UvqAJ/4netnjp/a1xx/wCP0/ZsP7Xodn+H+Z9FUV89IL84zrWuHPpq1x/8XU2y8HXWtbGO39rXPP8A4/WbstBf2vQ7P8P8z3+ivn4/ahx/bWt5Iz/yFrnj/wAfpF+25ydZ1vb2/wCJtc8/+P0B/a9Ds/w/zPoKivn1hegjGta3jPP/ABNrnj/x+ugtdKaWFWOra2SVBP8AxOLof+1K5sTioYeKlPqbUcxpVXaKf4f5nsVFeQvpBEgQaprmRyf+Jxdf/HK56+jvoJmVNY1wKB/0Frn/AOLqcLjaeJdofiVWx9Oj8Sf9fM9/orxH7FObaGT+1dc5Xn/ib3PP/kSsm4+3RylBrOuAZ4I1a5P/ALPW1GrGq2o9DKWaUY7p/h/mew3njnQ7C/FjcnUluGkaJFGk3TeYy5JCERkPwCcrkYGela9hqK6irSR2t1FBtVo5J4vL8zPojYdcdwyr7Zr59eKe4mhmfVdXeSAlopG1O4LRkgglTv4JBI47GoZtPa4ExkvtTkNxtM+/UJz5mz7u7L847Z6V0ezZP9r0Oz/D/M+lKK+Xv+EdtTbPbE3XkSyea8X2qXYzj+Ijdgn361ZvdO/tK2W2v7zUru3QgrFPfzOqkdMAtiq9k+4/7Wodn+H+Z9MUV82RyXluiQxaxrKRIAqIuqXChQOgAD043OpZP/E81vH/AGFbj/4un7CRX9p0ez/D/M+kaK+bBd6jg51vXP8Awa3H/wAXS/adSz/yHNcHp/xNbj/4un7CQf2lS7P8P8z6Sor5t+0aln/kOa3j/sK3H/xdILrUSf8AkOa3/wCDW4/+Lo9hIP7To9n/AF8z6Tor51spL+a7WN9a1xlOf+YtcDt/v1dMN5u/5DeuAZIx/a1z/wDF1hP3JcrJea0V0f4f5nvtFeGWFvNNPtl1fXcdR/xN7n/45Wu+kqnH9pa7g9D/AGzdf/HK4K+Y0qE+SSd/l/mdFLG06seZJnrlFeRjSUILDVdcKjj/AJDF11/7+Vh6jFPbzlI9Y1tfYavcn/2pTw2Pp4ifJBO/mKtjadKPNJM94or52me/QZGt630/6C1x/wDF0gkvyy/8TvW8Y5/4mtx/8XXo8jtc5/7Xodn+H+Z9FUV87+Zffvv+J1rmFAx/xNrn2/26hM+oAAjXNb5/6itx/wDF01TbGs2ovo/w/wAz6Oor5zWXUSedb1z/AMGtx/8AF1MTf7MjW9bP/cWuf/i6Tg1uJ5vQXR/h/mfQ1FfOPnajuwdc1zr/ANBW4/8Ai6mgkvmuFVta1sgnp/a1z/8AF05U2lcf9rUOz/D/ADPoiivny5N4k8ix6xrgVScf8Ta5/wDi6hle/U8a3rg4z/yFbj/4upjFySa6i/tehe1n+H+Z9E0V85GbUe2t63/4Nrj/AOLoE2o451vW/wDwbXH/AMXV+yY/7Wodn+H+Z9G0V84+dqGM/wBua51/6Ctx/wDF1IJL8/8AMb1vH/YWuP8A4uk6TQnm1BdH+H+Z9FUV86mTUNqn+29czjn/AImtx6/79QvcakM/8TzW/wDwa3H/AMXTVJsazWi+j/D/ADPpCivnWzk1CUSbta1tsLkf8Ta5H/s9ek/Cae6m0nWkury6ujFqWxGuZ3mZV8iFsbmJOMsT+NRKPK7M3oY2nXm4RTuj0GiiipOwKKKKACiiigAooooAKKKKACvBfE7BfiN4lJJH+kxf+k0Ne9V87eNbjyviX4iXP/LxCf8AyXirWjHmlY48fTdSi4o1IpVlAOeQMEeuKpajKBHEucAdAKojUDGwAqpeX5MRPpTpYOUaifQ+dp0KkpJWLUJU3EZyQNw/nXcRyHyU5BA7V5ja3u+4RcnrnNdWmqkRADjA61z5rgp1uW3Q6JxlhZWl1NS6kUzZIHTpVIyx7VKnkisp9SYscknPOTVVr4mQtzmijl8oxSZzOM5ttI21lBU/vAPwq1E/HVeRx71zP28nt+dSDUCFxzj61pUwMpISpVI9Doo/JQuWHvuHekmkQ5xkkjB55xWB/aTkdcA9qYdRYLk5496hYCd7tj5KlrWNp1Cpu3kAHpUbPEFzt+bGM1kHUMnLEmk/tA7MVssJPqT7Cp2H3DbivrW2rgaDbYLHPByea5C7vtrgknv0rUW/xpcKg9Bk/jWuIw0pqHk/8zonh506a030LpdVEgzn5arQsuZT6p/UVmyahgkA9eKjgvctJyfT+taxw7SYo4Kryt2NtCFRCo7AmpGCk49R1FY6XwGOTwAKmF6MjB+tRKhK5lLC1E9jdgCFh1GKq3MwWVgOeepqkmo7ScEn3NVJ73dIazp4WXPdihh5ydrGmJQQF6AdvWrAcyMOcdj7CsBb3b2OasJqA6ZP+NaTwz6FSwlSPQ2nZOFAO0EfjXTWMkUlugbptyQa4Jr8lQe2ela9tqxjiVc/UCvLzDL51aaS3KoylQlzSWjOtE7Fj8hx/e9RXK6rcBpJCBgetK2syFj8zY+tc/fX7NIcng9KzyzK5UqjlJGs5yxLUIncCZG0qL/d4NYU0gZ8gDnrVZ9Sb7EkYYgAVQF6pyvIrpwmXyp8z7tmE1Oray2NRQokzgAA4/CnKE3Lz3xWS18STyc+1AvzuJyc12PDTZH1ep2NwpEvzHg4xgVCypzuxgfdPrWY16xOc59KY16cEDIPapjhZrqCoTfQtTRqSDkfQ1VLDPXPPNV5Lwk1mm+xK+W59PSu2nSlbU7cPgqkkzZ3Db1NG4jq3GOlYx1EDg5IFNGpqy55I+laeyZ0f2dV7G4Gx06U3evQdR61kDUtx60w6ku05yMc+9L2TBZdV7HS6dIF1GN88c/yNannqxIxxkmuPsb9XnyGPyitMXvbmuSvhOefN5HHicLVhLlsdPpUwNwwXuec966ITHI3EEHj6V59a35jkJUnjsDWn/a7DG0kY59a8LH5TOtU5kFDESoLlaOpeYKoyRwelc7rEyfaCwUZ9aqPqjHJ/iPesvUb8uwJJya1y/KpUqnMxVKssRaCRYkkUKMjjFPRuF4HSsd70so60qXpB78V7jw7sS8FUsbRIzPg/wAI/mKqSsCi9R1qpHe/fzkZGPrVeW7PAFEKDTHDCVHKxrxMuPXAFXwF2ZJH4da5tLwocZOT0q0L8Yz/ABetRVw0m9CKmEqJ7E8zKs7Y7d6fbzAXkZ6/NWRNfDzTk021vx9tjwec81q6DcLeR0LA1OTmt0OrldGu7jjADEmqV1ICMDFVJb4lnOfvHP1qjJek9cnnv2rno4Vq1znhhqk3exqM4JAyKazqcc1kG+IY5zimm+PHWulUGbrL6hsh1K9BipAVzzj/AArEF8Qo61IL7kHmk6DJlgahtkqFyOmKovJkMFxVY3uVOeRjvVB9RCynqKKdBoqhgKjb0Oh06UFpBj+GvSvhNxp/iDnP/E1/9t4K8e0/UMmQjIPAr1v4NyGXRddc9Tqp/wDSeGubE0nGXMduCoSpYmV10/yPSKKKK5j2QooooAKKKKACiqFvrek3eoS6fbapZTXsWfMt47hGkTHXKg5FOg1jTLnUJdPt9Ss5b2HmW2jnVpE/3lByPxoAu0VS/tjTP7U/sv8AtKz/ALQxu+yeevm4xnOzOentRNrGmW2oxafPqVnFfTDMds86rI/0UnJ/CgC7Xzv41t/N+JXiJsDP2mEf+S8NfRFeA+K/+Sk+IyRx9ph/9J4a2oO07o5MdNwouSMq4scSYx2FV7uyIiUHgtW5clftIx3A/lVa+8sIoHJGRWlLESfKfOU8RUjJa7GHa2QS4QgDJbH1rrV0dzCr44I7dqxbcD7TH6bh/OvQLfZJbZxhQMc968/OcdUw/I4nVZ4qfvvZHETacUfDDIqpJp+18Hoea6a8CGYgjaeT9aona7L9CDW1DGTlFNnE5zpuyZjDT29Min/YTgjGMda2I1h8w7t2fxq5FCjRltwIxxmnUx8o7oaqVJdTmlsHbsD7ZpraeV4IyB1rpfKgVcocEclvWoJTHIAyADnpjrRHHyk9FoN1ai6nOPY/3uPYU0W3bawrekiiRvlXfIT+VQSJIFJKqBXTHFNh9ZqLS5z93p+9h2x0960RZf8AEtjboMYpZuTkDntWriM6VCuSDjJGOOautXlFR9TWeJqShFN7HMtZZ3YHfvRHZbWkwPcc1psgG7vx1pIsYfv8vT8q29q7Gn1yo4vUqR2OY8/1p7aePTA9q0I9oTA64HFPJRf4uvqKydeVzneLq33MlrNx2wM04WCnDN1z0rVJj45Df0qCSdV+6o+tCrTlogWJqvRFL7COeOT+lRRWgBwB07mryy7iFHXHNLweDwP51ftJLcr6xVV02VxbAgYXvjNbEOkl41IGOM1UiChhu4BIrsLWNPIXpjaCK8vMcbOhFcoU1KtKzZzv9lgLnqc4/GsO7s9srZByK9CEUQyPlyOSBXKahGqyNyNwzWeXZhKrNpjlGWHaknuQy6eRBGAvBXNZ32BgxIWutk2fYoiCOIwPes5dgYjIJ/nW+Hxs3F6GXtJ09E9zGWwzyRSPYsvQVtuYyfLC46dKYHQr6Hr0rdYqe9hfWKl9zLFgeuBSGx6+ntW5G0ThRsBPPNOaOLICqFPH0HFZ/XZJ2aD2tTe5zj2PvWXNpwMhIGOeTXWzRgL2PzdqypVHmHFdlDEOR04bGVYPcxTpy5BwTS/YAWBGMVq/L3PFJ8noOK6faM6/r1UzG05T0WmnTgeCoPPHtWrxjjk570uABg/e+lL2jD69VXUp2Fgq3AUDl60PsPX2NTaaF/tCP8Qfyq1LtWdwO7GuarXkqnKuxxYnEVJT5rley08yzEAcd60W0d9uUIINWNH2m4YsR6EVvW6xgEnBDHj3rw8dmVSjUaXQVKk62smcwNII6sST7dKyr+xCuyleRxXebU8z5sAA4AFc7qyobpwrfd6596rAZlUq1eWXYdSm6FpxZzH2HheOaPsWc8VoM6hRzTsr1xzivd9rIn63VKCWXU00aeMknr6VpxbTkYP4mpG2jrjiodeSdifrdVPcyjYjJyKQWODnke1agwcng8de9LhcdcZ/Oj28g+t1O5gy2Q3Nkc0Wtgv2lCQAfpWjOQJjjkU61VJLlM9Cfoa2dV8lzr+t1fZ79COSyYOwPVRg1Xk0/OGGcj3reutpnnC4PJxnpUIVSOveuaGJlZM5I4qpB6MxPsGV5Aph07OSOvrmtyQKAMYPqcU0Km3tWqxErXNFj6q6mONP+7xUv9ngA5Fam0Bl44zSsQzgcEEelJ15EvG1X1Mb7EAPXiqsmnqZCSM1vyhFAx1x6e9VZNgY55zxWsKzZ0UcbVvdFKx0/b5u1duBXrPwZTZomuLjGNVP/pPDXndljMoPIwa9I+DxzpOvEf8AQV/9t4K5sRUctGdmBrzq4iXN2/yPRqKKK5D2AoorP1fVG0m0S5Gn3t6hkCutnGJHRcH5tuQSOAMLk89KANCsvxM12nhTWG0/d9tFjMbfZ18zYduPfOKNI8SaPru8abqEM0sf+shyVlj/AN6NsMv4gVqUAeX+E7i50weFbS01K0v4dTsHbyI7eNPspWIMGUqN23dhTvJJJHOeKraP9h/4Rj4d/Y/K/tb7cnm7cebu8uT7Vu79d27PfGe1emWejaXp1xNcWOm2dtPOcyyQQKjSH/aIGT+NEGj6XbahLqFvptnFezf625jgVZH/AN5gMn8aAPMB9l/4QaP/AFX/AAkP/CS+3nfaft3Pv/qs/wDAPanax9i/4RX4gfavK/tj+0X8rdjzt+2P7Lt7/wBzbjvn3r03+x9L/tT+1P7Ns/7Qxt+1+Qvm4xjG/GenvRNo+l3OoxahPptnLfQjEdy8CtIn0YjI/CgAvo9SlsAun3NrbXfGZLiBpk9xtDof1r588Sy3dt4816LUbmCa7+0RF5IITEjfuIsYUsxHGP4j/SvpCvmzx/G7fE7xCy4x58I/8l4q6MKk6lmY16cakHGQkt+N4y2eBiorm8UxKxcY7+1U/LZnOR2qK6jYQjcMDH4V3RpQTVjxaeFpc0VctW96DOgBG7dXZwayiW4BYZxzXnFlHNHdKJGDZOeBjHtXSpHL5eccVyY/CUq1lMnHQVCa9m90WL3UA8jHd7VUW/G7O7rVaeKQvgcVALZxIWHfrWtOhTjGxhChSlG8nqbCagc/6wYqx/aYKFSB7+9YqRsF6jil8qQfSolhqT3IdGF9JGm+qZUg429gKgOoAMGDc9qznikCklce9RbXAAx+daRw1NbGscLTetzXXUMoQGAJ70jXOU/1ufxrHIfHzHp6U9S5xgGq+rwWxTwcejJJ7wKOtaH9oA6fGueMdq5+5jdmyRxV9IW/s+M54K8Vc6UGlc3rYakoQ16kr3qANlgOKiivFO/D54xVCWKYbj1x2qOJJcPhQe+BxWipxsdEcHS5HZm4l6AnXjFI14O5zWeiSbORQY2waj2cbnP9Vpc25da9GeDj6d6h+2KR8x71WKkt0qMo28jHFUqcUbQwtJGgl0iHOfxpBe4cZ5qmsbAcj6DFJ5LgE5GM5zRyRF9Wo31ZrRXS5ALZNdDFqyrCisfurgdq4o71dMg8kdvetpVdlGB26Vx4rC06iXMcOIpKlZxe50A1pVUEEE4weKwL++EjHLDPTIpAHBKkY9azrmNvM655yazwuDpUpXiRRj7SdpvY3ZL8fZUXfjCgZqhJfAZIYZqB1cRA8HjPFVFhcnIH510UqFOKLpYenLWTNH7cDht/XrQ16AD83JqkYJBnOMY601oHAGcfStPZQLWHot7mlHqG1eG+tPOo8DL5rHVHJwBnNO8uQHPp2pOhTuN4SlfcvyagB0yMc1ntfruJ3cmo5A+GyOorPlVjyMda1hSitjtw2CpGh9tUD7wPNPN/GFOWFZHkS9c89cDpTWhlKqM475FaciO36lRfU1hqEYTG7J7Uo1BcAbwW681krBcY6rwewoaCRkAXr14HWlyIPqdDudBY3w+1BlP3atSXgMmWPU1h6dFIspGQcjFWplcPx1FZSpRcrnmV8LT9typ9Ddsb5Y5chu/NbKazGqBS2cc5FcnZLIdxH5Yq4FcEjHP0rz8TgqVSV5Hn1I+ym1Fm++sJs+U89TWJqV+ruzq33hTNj5I61Qu4mL9RTw2Do05XiOilUmlNiC8X+JsEd6c14uV59qzjC46HjilWCUkepFelyRPUeFo73NFL4BiN3FOe9GMBsE9/SsyVHjxnHXmh94Kj16YFL2UXqR9TpOzRq/bAf4qeLwYHzAe/estQ6jgClffjAxnvUulEyeDpt2Jpr1Vmb5gTTrW+UTocjJNZkgO8nH40ttG32pCMHnP4Vbpx5bHZLB0vZP0N6e/BkY5xu5povABndxWfKjmTj6U4wu2eg44rNUoJWOH6rS5Vdl1r4EH5s/Wmi9HZh+FUHgkz8uCaXyJASeMewp+zgWsLQtuaAvV4570q3qZwGHFZ4tpcehzR5LdemOKXs4EvDUO5elvRgfMKpSagm/qDntUUscm3A7eoqhJDJ5rcjbWkKcUdeGwdF7s3bK+UiT5hkjGRXqnwZcSaJrjjodVP/pPDXi2nQyRiTjqBx+dey/BLI8Pa3kYP9qnj/thDXJi4JRuu5tQoQp4iTg+h6bRRRXAd4VieJZZhDp9pb6pNp0t7diBZYIEkdvkdio35C8KTuIONvTmtuub8bHTxo8H22S+jn+1IbFtPTfcC4w2PLGCCdu/ORjbuzQBlXXgvw1LqDabILt9cuLV7iHVp5WkuY9jKu5JCflKl1+UYXnpXc1xHhBJJtbmu9Qh8RzagbYxrd6rbRQxpHuBKIseACTgnjJ29eK7egAooooAKKKKACvnXxwCfiR4jwP8Al4h/9J4q+iq+fvGAX/hYfiQt/wA/UP8A6TQ1vh3aZy4yfJSuYyxNu4U8io7tG2jIPHbFdFa2nmANkbSOfao9Ss1iIXIBIzVxxkfaqHU+dhXkpKbWhy8W8zrkY5ro4i4ttvlk1npCBMmORuFddHaL9nUBQcDdWeYYqNNRui8RL27XKtjl2VmkPynFVnDKQCproGg2yt+7wCeM1n3MWx8E/jV0cQpOxzRm4vVFFT/s0u6Tk4BA6Craxx55Un6VM0QCZPI7AVbqpPYHVXYyXZiM4JJFRlH44I9a1Ui3SZYDHoKmNqpcH+M9F9qp4iMdDWOI5dkYTRsOAMZqNPMDEAn8q6JrW3iYGWT5j0FJ5EHTyiPfsaFi49jRYyys0c3MCcEr9a040J02LK9uKddwIj98Y4rTWELpUO0Z4orYhKMX3YVa/PTVlsc9KpAY7TnFVYw258Iela80e1jn0qFIgAfpmuiNRWNqeJShsUohLgAr2pzJKT0yD7VqQwDgn8Kl8gEjIIPoO1Q66TM5Y1c10jHELKpG3r3phRjwVreNsoXAXHqTVaSMBwNvIpRxCkEcbd7GUEfOcHn1p6Ruc/KSc/lWj5algep9qnFsGUljtX19acq6W45YzyMlV+Zcj5s8VqKxwMoeBU8dsTghPl9a0PJXy1UDnoa5K+KjorHLVq+16GV85kxt75qhdghiAh5610iQIbmTcPlBB+tZl/GnmuQQMZpUMQpTsl0FSlySTKpz5YAU9KYqFPmKnJ65roFt0eyRlXDBe9UpoS3RePX3op4qMm1YPaOOltzJaQ4JI4phd/7pxzV/ycKd+M/1oaFduQRjpXSqkTRVoroZSyOD93FO85sYZc5q6YQy9MCkEI244Nae0ibuvB9DNnLZyq5/pVT5g3KHg8HFbYgAJ+n5VA0IDnOTxWkai2Omji4pWsZhJzjaaZ84H3T7VqeWgJAFL5Sd6rnRusXFdDMCv1wQKRd+/lT6YxWr5KEbccYoMS8HgkUe0QvrkexUslYSgbTgAmp9jFiQvGe1XLGMNdoOxz/I1cFvz+JxWFSuoyszhr4q1S9inZBkB+Qk1oJnIbYcjvip9Ptd0zcgir0cACkE/McgDtXm4jFRUmjinepLmMghg7bUxnrVG8yrj5a6XyUaQ7gfcAVS1WBA+e2B/Kihi4uoo2CHuPmZzR3FD8tNQurL8uOOK0NoG7FOEa8YGT616ftF2Oz6ykrWMucls/JzTGVgF+UgAVrfZwzE469aU2qkAY+lP20UXHGxikrGOu/sM1IiSFj8p/KtUWq49j2qQQICB+g7UnXiKWOj0Rz7Iykrt5z3qS03fakyvU1fmhUTEYOaltIlNzEuOS1XKquW5vLFp03puivcqfO4Q9aUbgc7ecVq3VsI5JN3BB71EIlOB7flXPGunFNHB9Y91JrYziWzyh59qDuwMKa0hCm7uPrSiJc0/bLsL6yuxmgkgjaeaXa3pWl5agYGDTHVCvHBoVZPoL6xrsZc8bmMYBqg6sWKlTz6VvyHCZA7VTaPc2ehzzW1OpoduHxTS1RBpyOwf5T0HUV6x8F8/wBha5nr/ap/9EQ155psabpAecrXpPwgAGla8B0/tU/+k8FcmJqczcfQ6cFW9piZ6dP8j0WiiiuQ9gK5fx1HG2j2k51WXTZre8SS3mgtftErSYZQiJgkk7iOAeM9s11Fcz42lsrTTbK+utWttLmtLxZbWe6XdEZdjrsYAgkFGccEEde1AFfwrfXVzdWslx4mk1KO9sPtlvA+nrBlCV+fcO4yAV/2hXXV5p8N3sJru1hPibS9UvNM0z7Fb2+nggJCGTc7liSWJWP0Ax716BqepWuj6XdaleyeXbWsTSytjOFAyeKALdFeMSePJba/1zU4vEVpc3UuhPdW9jDepNFbzBm2oqgkM6oAzEcn5j0xjqJ2u/But2UcWpX+ow3em3cs0d5cNNmWFUYOufu53MCq4XkcUAd/RXnOlNf6WfBuqPq9/eS62wiv4p5y8bmS3eUMiHiPayYG0Dg8561VsLzURoeg+LW1W+e71HVIop7Vp2MHkzTGMRrF91dgKkEDOVOSc0AeoV89eMpvL+JHiMHp9ph/9J4a961G8nsbQzW+n3N/ICB5Fs0Yc+/7xlX9a+fvEUkt/wCO9fuZ7Gezka5izBOULpiCIclGZeevBPWt8Pbn1OTG8vsXzGhY3UUSDdkn0qpqeoCaToRjjmrllapJFknGDg8daq6nZqr5yOlYU/Y/WPM+Yh05tjLiuQ9wo/2hXVxX6x2wAHJ6g1y8Fsv2mIAdWA/Wu6i02IwAMASB3FZZvWoU+XnR1cjnL9z2Oanvw0hIY9az7i5BIbk+1bl1pipKxyM5P5VmXFkMHPbv610YarQaTicqtGXvopreEDO4AU/7WCDtBB9adHZZPDAeoxV6DSy44GPc10VKtGGrKbpt+6rlSCbbyuQfU1ObraPk+8f4jWlBpkS/c+Zx1Bp76dEr5wGlboPSuGeMoORLpt6pGQLwR8pHuPqakF/KeoXGKuS6bEmXlkwO6ikFtZhQdp9jjrT9tQkrpXFy20MS/uAzgn8au295/oUY7heahvrdBIwH4Vas7VRbRAgAFeK6qkqfsotlScfZpLcy7u4+ZzVWO6yW54x+Va1/aqsjgYwAD0qglqN5+ldNKcHC6OilOkoWa1LFvcDyhkcVYFztAwfzqfTrJZoGI5I7VYOnrjIbA9TXHUr0VNxZyzSbukZ7XOTk/Me3tVaSb5uGPvWuLCNvukufp0qrJZbW+Uj3NVTr0r2QRcY6tFFZjuAAwPWrIuAMlst6D0oW1/ehFQnP8WOK0Y9K8xCXwiD9adavSj8Q3aT91FFbp2I3NxngCpWvzkAdhjmri2kKkKkfQ43EVWntwgZmAK5/rWCqUZu1iGrCDUMtls4FZV3dBpjgjJNbS2qiNyCMA8+1ZFxB85+tbYZ0uZ8qNKTipe8aZ1A/Z1UjouM1T+2nsxIFa508PYRnA+7nd3rOFkdrEDoe9Y0KlB3t3JaS+IrGfuATxzTZJyBnHA7VeTTyT0+hHeke0EbBTzXQq1K9kF4rWxnJNuXJ604Sk9V/KtCOzTJz17Edqf8AZ/lwYwT6ih4iF9BuceiMsy4PfiqM1yVkY4yM10K2W/Hy4+vasq8sikjH0rajWpylY3w9Smpe8jOW8Uk4/wD1U5rtVAPXNSLbIO3NIsIDAD7tdV4noc1FvRDPtQGMAE59e1ILwADJ/CpjbKDnGKT7MpOMA80rxEp0OxJYXP8ApIb0B/lWmLshTgZz1qhY2226UcHIP8q2EgHlj5Rn+dceIlBS1R52LcHU91Fe2vGjlJGefyq2NTO7DAgD3qGO3XeVxjAzUjWoVcgEnFc1T2Mpao579iVtUHHIHHWs/UL0yqPm4/nVlYF4HVuuMVFe2gaFcgZPaijGjCasioSXMubYyluQ2Tgk1KLgYA60JbL0HbvVhLMYzt/Gu+U4I6JzpdCMXA9KUT8d6uR2G4A8flUn9n4Odo+tc7r0loYOUOxQ+0DFOS4Pbir407J6AfWl+wIPlyS3oO1Q8RRE+XsYlzMBK2cjI6020uAt5DzyD61bu7PbMQw6UyxtEN/CMZy2K6ueHsm/I7Izp+zt1sWry8Ek7d+c1RF51yDWzNYgXMgC4GTjiqcliCMjkeorno1aPKkjmjKmtJIprejJwT+NO+2VIbMZ6j8RTfsQHOBW96ZfNQY03fy9sVGbsYx0qUWh9MHPahbEE8inemhp0FuQCcnB9KryXOGbJwa1xZhELFc1nz24LngDnrVU6kJPQ2o1aTlqh+nXnzSH2GK9R+Dbb9G11vXVT/6Tw15lY2wJcY5Ir0/4Pp5ek68vpqv/ALbwVzYlxu7bnbgXB4qXKun+R6NRRRXKeyFZPiG/vdOsI5rBtLErShT/AGldNBHtwTwwVstwOMdM+la1cx440iXVtLstmmwanFaXqXM9jMVXz0VWG0FvlBBYNyQDtIJwaAM7SL+STxI+sa1q3h22xaG2WCwvfM8zLhtzswX7u0hRjje3PNduyhlKsAQeoNef6J4YsdT8Rtqkngyy0vTfsZi8m4ht3a4kLKVfbGWVQqhhnOTu9AK9BoAxbnwtpd5qlzfXEO83NibCWEgCNoiSTwBnJ3EZz0qHSfCNppd4LuW+v9Rmjtzawm+kV/JiJBKrhRnO1clsscDmugooA5rSvBVhpN7aTpeX9xFYKyWFtcShorQMMHZhQT8vyjcWwDgUlv4H0621CGdbq+a0t7lruDT3lU28MzEkso27urMQCxAJ4ArpqKACvAfFkgX4i+JAe91D/wCk0Ne/V88eNFdviT4j29PtMPf/AKd4a2oK87HHjoKdFpmrbzKgVQoPcH0rN1S4DSZXj1ojiuGxjOenBqnqEUobaxyeazoUIKte+p8zD3movYdbv+/jLHPzD+dd1DdRG1XnPv6V5tCZVmUE5wRXRwC7lh2qxC/WsM2wUayi5StY6YzeHl7utzWlljZpHCqW5Az3rNuiAUGOhwfSqjC6jkdcnuM5qu5uGwCW46ZPWpw+EUHdSOadRz3NKLYDlow3vVz7SiR4Zsj+6vWsDFzuOC2O2DUgSZRu6H681dTCRm9ZCUnHY3RdkqMgRJjgjr9Kb9oGf3eFU/ec9TWGiXT8tnHbceKcVuHkCqSw9B0rP6jTT3RTqyNdrqBSAsZdj3xUUt2+PmgGO2KosJkAzIFx2FROznrOwNVDCwvff7yHUb0Ib2YNLn26elW7a5AhhGOg/Ksi7R1b7+Se/rU8CTNEnPavRnRi6aVzSUEqakmWbm4BaT6Cqsco81uOq9ahuY5gzAmqY81T1/WtqdKPLZM2pYdSje512lSqto3zYAPNXSxYjlW9Sa5yx8/7P8vQdatCO4/gIH415NfCJ1JS5jBycXym6HRf9Y6geg71SupIfNzIox2AqiUmUnLDcRxg1Rn+0LJg53H3qaGCXNfmDnc9DVWVC4aRwqD+EVdeeEEtIxbPRR2rmo45xIGfr6k1cRZ1BPA/2jWtbCRbXvCvyaLU1JrqRlBICL6LVEzjc+SSGNVGaRnxvLYqo+/ccueta0cJGKsJJzerN1LoLHtODnANY80gaVyc9aVBLsPfNUJDIs/Pc1vQw8YybTNKVPmdr7HdFkFhGe2wEis8SIGfPfpmqLTXItsBsggY5qozT4xn9a8+hgbXvLdkynz2NZboLlVIZlPGfWq8t1uPJHHfNZw81TnPB5qF1k7cd8V2QwkE7iUObS5rQzgKMEe6+tSG4iXsy/0rGj83aRn6c04m4XqTgVTwsW9w9nZ2ua4uhkHdn04rMvLkG4bkncc1GTL1BrPuPMMpJ6itKGGjGVzahR5pasnEoDMfU8UpZMDHes9lmOADzSr5vOW5/lXZyI9B4Zb8xf8AMG/GcihZFwQo6VQPnBhg59aX98Gzn5aORC+rK25sWEg+1pnHQ/yNaYlUR45xn16fSuesBKbkHvg4rS8m4Zcr6+tceIpRc9WefiaajUtcuLOokJPAPGKek+PvNnGeh61nCGcNjPPemsk4Yn9aydCD6mHIu5oNOGb5eD7+lMuW3QlywzxnFZ4E+/GaZOZkjPpVxoJSVmXGleSVy7E6FByM/TqKlSVQeST6e1YqSylMjPP6VZjE5AOeO9aToLqy6mH5d2bsdyqjoPqKd9pjJ6N+fWsdUcsOcE1KsMuc7uPrXHLDQve5jt1NM3KluB19T0p6SxxrlnAb0WsoxyAcmkImxhAT9Kn6tFqyYk3cbfz7rhmDkDvxSafIv9owk9N1UrhZmkIzg+lR2qyi9Tnnd0zXeqUfZON+h2QpL2bd9bHWNOryS7eCGOM1CZ43zu6+orNPnB27c9qYVmz1wep964I4WK6nI231LzSJnhvzFJuT1Bql5c2PvA/jTfLmx1rZUY9xcq7l7cn+yfpSiVEJICjj8azik/fpTSswPHHuar2CfUap+ZpGUEEsc/WsueRPNOMc04+bjk5rNuFlaQkEVvRopPc6cNQUpas1rKVQ0nOPlr0v4RNv0zX29dV/9t4K8ksxLl/pXq3waz/Ymubuv9qn/wBJ4ayxMEtT0cBSUMTKz6f5HpNFFFcp7QVy/ji2+16fp8clt9tthfI9zYBlDXcYR/kVWIDkHa+3uENdRXCeOru5uI9RhtLg2M+gWcWtR3SxiRmOLhTGFPA4jIzzwxGKAK/w/s9SZtHmudKutOj03RF0+U3ShGnlzGflXOdq7G5OM7+K9DrD0Sw123m87U9fTUIHiwsS2KwlWJB3bgxzxkY963KACiiigAooooAK+ffF7AfEjxHn/n5hP/ktDX0FXgXiuES/EbxKSM4uYf8A0mhrWi0panDmDSoO4QXoH3CoPAJNUNRukd8g7RVqOwcHAXI9T2qle2qBsEEj3ooxo+1vE+bp25lzbFWCVfNUjGMjrXRR3qonykYrn4bdRIuB/EBW0lgCTz+tVjFSduc0rNc3uE4u4ZCd56k4qnLOrSAnj+lTxWO5TwBg9zThp/zorr65JNckZUYN2Zg1JldJVbkSYHpipRLEPuAl/U9KsrpiE5Vwo7g1L/ZqBcqrSN6dM1nLE0e5SpT6IpiRSP30uVPZe1KbobdsYEaH+I9atnTSUy6rGO5qpNZKh2xo0jdj2pQq0Zvf/L/IHCcdyB7m1Tk5cioXvrc9IR+VEmnOT8xVR6ZpjWKAcOufrXdBUe7YLkW5Ru5UdyevoPSrFtOqQx/Nziq01qQ2BVgWaiCEgdRXXLk5UmbTdP2aVx006y+aTjoKzGkTzAKvNb/u5OOlVvsoyTjg1VLlSsjSg6cb3ZsabcpHbHO3P8Oae17AT0K88AVWtrRniG0fjmpv7OyQTE2a4Zxo87bepyyabfYcdQiUnanzepqIXe4liAXPc9qk/s4jpFgerHpSfYQPuL5h74NEXQWwnYjaSM/M75PoKhe838O5x6CrBsWz85VQe1Daa7gnYFX1JrRTordjjy9Sv9sQDCKAfUiohImTk5GasSWMaY+cE+1RraANg8H0rWMqdroq9NbDxcLs44OKzWmDSkZ5zWsLMYwRxjGay5bVVcsM5zmqoyg27GuHdO7udD5sZtwDtyAMHHTiqLSxn0GetaCWaGMN1yo4zUUumnrsOBXn06lKLabOblfUzzKpGCR9aa0isM5HFWm0/cOB65pr2YAGV4PpXWqlPoyrxRXSRcYI4p/mqB8r/TIqxHYHbwhK9zTv7PXGfmA9xSdalfcHysqNIPWs6eRQ7HIwTW0+nkKOv1rNurIK7AryK2o1KbejN8PKEZalJJVyBnvTXkTc4OM1L9lAzgU02ikgkY9RXVeJ6KlSve43zU7kYFJ9oQjHGTTxZDnjGaQWansRii8RqdDuT6dOouwey5P6Vsrdr5f3sZORx0rKsLRTeKAPvZ/lWibMYOQcA9RXHiPZuep5uLcHUuhpuxuOPzqH7TkZJz7U8Wagk8002hHGPxpR9kjFezGLcDPOOKdcTxNEBxSC2HUjnrT57RcrxgYBqrw5kVempJlWMKR+NPEoh43Zx0qwtqCBhT74NONiDztz7g0OrDqN1YN67EK34HUfjinC9QkYBpwslyAP1FPFgM4+WocqJLdHohBeqewP1pwvQBjcFHtSGxHHIx7d6YbFeyk1P7lk/u/Mryzq0hY8+4qKGVBexEY+9VhrMAkcD2psFmGuY8L/ABV0c0OV+h0RnSUX6Gos8TluRu/nUbTJnGRj0NItoBIwweDVgWCt95M+4NcDlSj1OS1ysZl9F/CmmZfSrh05M8Bh+FILFPU/iKFWpBylPzVx3H40wyKT/jV/7Ao75+o4ppsFY8/oKpV6Q7FBplx1H4VmTSqJOv1rffTlHSP8SayLmyUO2R3NdVCrTk9Dqw0qcZe8JZyLmQ+1epfBtt2ja6fXVT/6Tw15lp9oGEoHOB2r1D4QJ5ela8gGMar/AO28FZ4mUW7I9HAuLxMrdv8AI9FooorlPaCsLWfB3h/xBd/atV01LmfyhDvZ2GUBJCnBGRlm/Ot2igDF0fwloeg3bXWmWAt5mjMZYSO2VJBxgkjqBWrc+eLWb7KIzcbG8oSkhN2ON2OcZ64qWobw3IspzZCFrvy28kTEhC+Pl3EAkDOM4GcUAc3aa3rFj4lXSNbfTrgSWMl6JbKJ4zCEZVIdWZsg7uGyPuniqOl+K9blGg6lqNvYppOuyBLeOEP51vvRpIi7ElX3BcHAXBI61P4V0TXbFrr+3bTTJpr5Sb2/hvpJJZmxgKEMKhUAJAUNx7kk1BpfhTW4hoOm6jcWL6ToUge3khLma42I0cQdSAqYDZOC2SB0oAZH4w1lreDXmgsf+Efn1EWSxhX+0Khm8lZi2dvL4O3b0PXNGoeMNZgh1jWbaCxOh6RdNbzxSK5nmVCBK6sDtXaScAg52nkZoj8H6ytvBoLT2P8Awj8Goi9WQM/2hkE3nLCVxt4fA3bug6Zo1DwfrM8OsaNbT2I0TV7prieWRnE8KuQZUVQNrbiDglhjceDigDsNQ1Sw0m0N3qV7bWdsCFMtxKsaZPQZJxXh2pXtnqvjjxDeWF3DdWkl3FsmgkDo2LeIHBHB5BH4V72QCMEDFeH64v8AxcPxNgDH2qH/ANJoaG7JnnZp/u79UXFtGOCd5BxgKKzdUtJEl5UE89O1dNaTAx5TaDgA7qytWMeOHOMEk9ya8LB4uq8TytHlV6EI0lJM56FP3sZC4G8D9a2xHndtzkHismJ8XEQPI3j+dbWRuO3r2A9c16uNk7o46eu5NDbjaQ/cUogxMFPzBRxSxSAuC3PP4Cr6QIZFKfnXhVq8qbfN1PQpUlNKxAkLE/6pW/2s1OIW2/MyqPbrVkWy7uY3z7dKsLEUT/UgH1Jrya2N7f1+J6VLBvr/AF+BnJbRsWxFI5/2gcGq1xbuFO9lhQZ9K1mcqMyzoig9RxUc8Nu6hkBlf86qjjJxmnLb7/zsiamFi46b/d+Wpy7wWwfAEkmO/JqFoYcHFvIPqDW9NFNvwsSKvvULW1yc5aPHsK9+njlZNv8AH/I8ieGaei/A5OZfn4HHp6VobB9kgGO2c0zUrdklJGNx6ir0MX+hWyAfw9a9arXTpwkv60OVRbujHYbUlyPT+dVwoLY/Kr9xGo89QDkY/nVJRiQe1dlKV4tr+tCVojZ06FTbkEEj0FXliXA+dx9RUekqjWuSSCeeP51eIA/5aD6mvn8TWftpI66VP3Eyr5CMcfO/1GKY8ChsM4jPouOavKqkf60n2WmGA7iUjAz1LVhHEWer/r+vIt0dNimsEeT5cLMc9WzSvbwjPmylj/dH/wBare3BCyXAHsuKdh8YituOxJqniJX3/r1f6CVJf1/wDKkhQriO3YAfxEVSdRvJBGR2relilx+8kUKeoFZTRIsjkjHXIJruwuIUl/TOatT5WEajyGY5OR07VjS7WJOMH0rokUfZvQAdB2rBnADtjgntXbg53lIlq1jb8mSJU67dq54q5GMkDkgDGafHL5ceJEyAi4OfYUwTn+5jnr7V406k5rY6VGMeoi26GXI4XO0545p0lohOXXbgYxUnnocqOvU1KswZSpyT6msJVaq1NVCm9CODT08pSQ3PcdqlbT1xxJn6itC1ZPIUCQD696n3kdkavKq5hWU2ejTwdJwRktpwwAQPc9qwdVtkSaQBeSeK62adB1G0+ma5jVJleaUrwQSteplFevOreWxw4+lSpx93c55ogrfjUez73HGanc5YCo8c9+Sa+yi3bU4IydhhHAx2pHTHIPPQmlBGRkUFvbgiq1LTdy3pKE6lEueOf5GtnyQNxL7Rk59Ky9Hx/akJ9c/yNdYkCtFnKgkng9+a8LM8T7Ksr9l+bNIUnVMAwoD5Zzk8A1A0bZXavv8AWtu5jjjfLuODms9lBGFI3daVDEc65jnqUuV2KgjXByuOQOaddx8qCOwqwQpOGbB3cD14pb2NSFyM5AIwa2VV86I5dGyC2gXZvZuc8Y7VKsO44wevUUsCADYjBecZPc1eEQRuQyn1XpWNau1JlwhcqpbLn74z7jmn+QMnmOrJIz99fyqM5z1TH0rn9rJvc05EiqY1253L9RUbJx95v+AjirgTnGV+gFI0fHV/+AitVWIcDLdP3hBKfQ9ajtE/02NSP4qknBW5Ydven20Z+3Qvjv2r0HK1N+hkvisXUt8yMdp+UnpVgxD0BPpnBq5Bb8M2D8xz9KHXB7dO4rwpYvmlZdDuVC0bsplP9kjjrmozGRjg1aKjPO2jy1x0GPrVKrYl07lcRED+L8qXySGJwTx06VZVR2UdfWpAgyflHT1qJYhopUrlCSIbegBx3NYF5xKwYDg9RXVyRsRwOPYVzeoKFkcYOc9AK9PLavNJowqw5Wg0eASmYLkEAH69a9A+Ewxp/iAZz/xNev8A27wVxfhqMvNOG7IOn412/wAK12WfiFck41Xv/wBe0Fbyq3xM4drHp5ZD97zd0/zR39FFFaHuBRRRQAUUUUAFFFFABRRRQAV4Z4gYL8RPExJ4+1Q5H/bvDXudeF+ISB8RPEhJ/wCXuHj1/wBGho6M87NP93fqjZULuH7ssQBisvWXcOuQM85xWlgnDbyoKjAFYupq4JVgcdc/WvGwMU66bPHxD9yxSgbdcxdjvH863ItkkjBGGVyTzWHCu24jOcfMK2olxIc4Byce9ehjrdOxzUiWJwY9qHn+XvWlZvm5RSQMHAHrVSDYeHXp3q1aS5vANuQF6968DF+9GSS6M9LDaSi2+prqUHHmlT6HtTZSm08yP7Cnxu2PvKffFDKz53zKB7Cvl00pa/1+B9E/ejZf1+JT2H+GAcdMmnjzlxlo0HcYpzRxngTO3sDVWWzEjqRbNIw6EjpXdBxm7Sdvl/mzilGUPhV/n/kgaC2c7muWY+gYVAYrJ8kOSRxkGriwToB/o6j8f/rUP9pVD/o6f99f/WraNZp2Ur/9vJGUqSau42+TOc1K2h3gmQ8/dqzYopghDDgLUOprK+5du3PpToNwWE7toCjivonzSw0U2eKrRrPQZqEMBkn2t/APzrn9o83C96070uzS7hmsteXwpORXr4GDhS1d/wDhjmqyUpNpWOw0a3/4l6NkKCAc1fW0k2jc8ZPqFx/WjQUzpkRGPur1+laPlOW/1Sn15r4THYyaxNRJ9fI+nwmEi6EJW6eZnPAyjmRB9BWfLAS23Ekg6810XluBny0H41m3D4mYvMqr7Cng8ZNya/r8CcXhIxin/X4mfHbSq64hUAepqy9umP3tyRx0UgUAW7tn7Q7H2NWwrBfkt/z4rpr4iSae34fmc1GhFp9fx/IzvJs0cDDbieprMvNiTsCQADxmuhc3JGTAigf7Wf6VzGpZaZ3ZQDux0r0csm6tT3n+NzkxsFCOi/CxKQzRPtxnb19vSsKUBZcDpWzE6pCwRM5Oc1kTfM5Yd+cV72DTjKS6HnTa0N6WX5YeMhgP5VH5y7NhQ85PSrIwNg4wEBH1qOTAxx1P4CvNhKO1jeSe9yNZt4wDzt4NSlySDuyR3FMkhGGZCBg/LRBD7fMMinLktzIlc17GzbOwiXDL7g06TJP+rU++ajRD5IXap4pphYDHlD8DXhOMXNyv/X3o9ZSkoJW/r7iOTOAPu47Hmud1A/v5R710EisFwM8djXPX5PmyZB78V7uVr3zzcW9jMblvftTeuc+tPbdketIBwRnBzX0i2OZPQYAM8ClVfU8+tL90cjpSqpwCabY2y3pSj+1Yxk9D/I11UIIiIVVP1PWuV0sH+04T9f5GumSJmXOwNyec189m6TqK76L82dVB9ipcsTuXaMr0Ge9VGJLbcYJB59Kt3cDqwO0A53YHTNU3YBjnOSOarD2cFynNVupajShPynJHrSuzKm2UYCgDPt2pqA7wwOOOlTXqq0cZyNxAIz9K3btNRZCWjZNaRAw/Kw5PQ96uRwSejY9ulUtMZvLUFQyjqCeRW9bxhsFQ6j0HSvJx1aVGTud2FpKokU/szHBJb8qf9lPHP6VqqMD7x/EU7qB84/KvFeYz/r/hj1FgYf1/w5kfZ+fvfkKPspboZPwFapTJzu/IUx1KjOX/AAFCx8nt/X4CeDitzmrqBI5XLsQV9aoRSZvYscKW/CtLUdxkk64J549qzYebqIt68cV9XhW5Uby7foeDVSVSy7nR2jAgIACB0Ge1XjbI394GqNrDwVwCvoRWmIXIIBwCOoavlcZNRqXjKx72Fg5Q1VyD7KP9r8RSi1X1/SrISQY5bp60FW45b864/rM/5jq+rw7FcW0YBJbH1FBjRRwwP0FTlHwfvVE0MjZ6/nVRquT96RMqaXwxK8mzHJfHsK5nVxB9oZgGGP1rpJ4JFX/E1zF9EI95kUtlu56V9Fkyjz8ykeNj27JNEmgsPMuGBIyuf5123wswbXxFg5H9q/8AttBXCaN8zXIGQFTt+Ndz8KgRZeIQev8Aav8A7bQV7PKliZv0N8qb57eT/NHoFFFFbnuhRRRQAUUUUAFFFFABRRXN6v40sdIvbq3ay1C6WyjWW+mtYg6WqMMgvlgTwN2FDEDnFAHSV4brsfm/EnxGmcA3UX/pNDXuEciTRJJGwdHAZWU5BB6EV4hr3y/EXxI4bB+1xD/yWhobdnY8/M/9317o6qwsS9snluvAAORVLVNI3nDSLjr0qe31C3igRXLAgdqbd39i0RYgk+lfDUni4Ynminv2FUWFnh0m1f1OZk08wXMZ8wH5xx+NX0G2dvlXjkc0T3Fk8oZFyRjnHTmqzFfNeQk/nX0vNUrRXPdadjwmowful15VWFfLXcxbkE4xVnT38y7YggMB3rMhnURsWx82AParlje20JcyqPmORjmuPE0JeykoxbZ0UKq9pFt2RuggHJhXJ6804O+flhQfj/8AWrPN9ZEcSMPxoF/Z4++x/Gvn3g6j+w/uf+Z7KxUF9pfh/ka6yyjGAg/Gh5bhsAXCJ6/Lmso6nYBcnJFQNqmm7+Ycn12iohl1Zu6g/u/zZrLH00rc6+9/ojZaPccvdH8KabSKT/l7f6ZrMGr2GOIuPoKeNT09sMYQffaKf1PFR2i/uQvrWGlu197Ib6xVHPzB8+tO060z5cZGNq88cCpxd6bIN+wA/QVGt7ah22Hap9K7faYmVF0uV3XWxxunQjVVS6t2uV7+3jUkIoJbqTXM3MYjlO0c+3Stq5uIhKQWJbNZs0sRlbP3SPzNfQZdGpTilK7PIxcozldaHT6JJnTYwYy3yitAtzkIwHpmsjTbu2SzRXYqQMYFXRd2mf8AXv6/er5fGYebxE5KL37M9/C14qjFcy27ouBgesZP1qvcxB3ykSg46mmG8tP+ezn8aEu7HcSzM2TwCc4rmhRqwfMov7mbSq05rlcl96/yGJbyLIrGRAoPPFXCjEYa5/IYqI3+mgZKdPUCqg1XTlOPJ/8AHRWrpYms78j08kQp0KStzrXzZaMER/5eXJ9M1gajbIHYBwSGzgCtgarYg8RD8hWdd6hZfaN4jHI9BXoZdHE06usZfgcONdCcFyyX4mYsaojbcEn71ZcgBOQAOOAK1rm9tHOFg+8cdBzVaQQDpGuRxX1GHnKOsou7PEqJbJmpcQrHFDIpAO0Ag+lRAw+YxGASefeiZXMMcrMBgYwDVfei8EgMec1w0oNx3vubzklLYncrJyvGDwCKnj2R/OzhemapRsGTduBGcfSnQXUe4FmGCeh7U50W4tLoKM1e7OqggQxKTGHyOD7U5oEwf3LfhVVL60KoRKy4HQHAqT+0LYf8vDfnXyEqWI5m7P7mfSxqUOW11+BIbSN84LKf7prA1PS/nP7wct6dvStdr+CQ7UmDeuax76ePzid3zHOea9TK44mFW7bXyPPx8qEoaJfeYU1n5cowNxBwaqyxYdifWtrdGW3bwSWHFV5YkMrknqeB6V9dSxEtpHhNW1RlhdxPf1pvQ8DgVfWAPnDAHNM+zqz53AA8fWulVoiuP0kA6nAMHnJz+BrtobaN4VIiyfWuO09VhvkZm+7nGfpXWw39sY0JlZTjGAeK+Yz/ANpKcXTT26fM9bLJU7NTKmoWm3BCHn3rDaEAkDgbenpXSXdzaSJjzmJ+tY1w1kSWVyQeOKMtrVeTlmn9xnjacOe8WvvKG4CRWz8hGM1cnCSBFUcjHP0GKgbyOCGPHqauPPDGIhj7ycn0r0asneLinfU4oJWabJ7TTT5Csqhsjg961oVaGNVO7OKS1uLH7LGBIeB1Bp0t7YqM/aMfjXy+Jr1683GUG9ezPeoUaVKKkpK9u5KJm/vH8qUTn++PyqmNSszyLkYoGoWZ/wCXkVzvCVOtN/d/wDdYmP8AOvv/AOCXhPz9/wDSl3q4wXP4VQOoWa9bmkGq2Xa6qfqVXeMH93/AKWLhtKS+/wD4JFdW2Zn2lsdazZbYpcRuSMA4wRW0t/Zsf+PgZps0trKhHnA16eHxVelaMou22z/yPPrYelO8oyX3/wDBLduqwxjDYPvTmYZzhc1WS8txEqCcKwGOR3pWuYTnE6/lXlyoVHNuSf3P/I9CNaCikmvvX+ZY8wZHyr+dIZBx8i9fWqxuIc5M64x6UnnwjGZl/Kn9WfZ/iL267r8C15o5+UfnR5oz90fnVXz4sf65fyo8+Ln98o444p/Vn2f4i+sef5Fr5HGCiVi6hAokI8tcDvWj9phHW4Tjg8d6hllt3UgzDPsK6sHz0al7O3zOfE8lWFrq/wAjH02LbPKCAvy5OK674XqVt/EQP/QVH/pNBXMebbpOcSg8Hp6V1Pw0MZj8RmI5T+1Bg/8AbrBX02GnKVdya3RjlkUptJ7J/mjuqKKK9E9oKKKKACiiigAooooAK891Q32kX/i+3XSL+9bWlWSxe2gMiM5gWEo7DiPBTOWwMNXoVFAGNbaBG3hbT9HvJrj/AEa3iiZ7a5kgYsigffQq2OPWsaT4W+FZriW4lt9QknlIaSR9VuizEAAZPmZPAA59BXZUUCaT0Zxx+GHhY9YNRP8A3Fbr/wCOUH4XeFW62+oH66rdf/HK7GilZE+zh2Rxg+FfhMdLW/8A/Bpdf/HKd/wq7wqf+XfUP/Brdf8Axyuxoph7OHZHG/8ACrfCmMfZr/Hp/at1/wDHKT/hVfhLOfsl9/4NLr/45XZ0UB7KHZHG/wDCrPCf/Prf/wDg0uv/AI5QPhb4UHS21Af9xW6/+OV2VFAezh2Rxv8Awqzwn/z63/8A4NLr/wCOUn/Cq/Cf/Prf/wDg0uv/AI5XVahfQaZpt1f3Tbbe1heaVsZwqgkn8hWBpPiq9utTsbPVdGOnf2jA09kwuRLuCgEo4CjY+1gcDcODzxQHsodkYOgfDDRJNOlOqWmoif7ZdKobUrlf3QncRcCT/nmE57961P8AhVvhQAD7Nf8AHT/ia3X/AMcqbxL4yk0LXrPSooNM3XNs8/najqX2RBtZV2g+W+5juzjjoa6e2eWS1hedI0mZFLpG+9VbHIDYG4Z74GfQUB7OHZHJ/wDCrvCv/PvqHHH/ACFbr/45Qfhf4VIwbfUCP+wrdf8AxyuxopWQezh2Rxh+FnhMnJtb8n1OqXX/AMcoPwq8JEYNpfEf9hS6/wDjldnRTD2cOyOOHwu8KqMC31AfTVbr/wCOUf8ACr/C3/PvqH/g1uv/AI5XY0UrIPZw7I47/hV/hb/n31D/AMGt1/8AHKP+FX+Fv+eGo/8Ag1uv/jldjUF5LcQ2cslrbi5nVcpCZAm8+m49KLIPZw7I4LxH8M9Gj8Maq+k22pHUVtJWtQup3LEy7Ds4MmDzjrWiPhb4UKgta3+cc/8AE0uv/jlbPhjXJtf0ua5ubJbOeG7ntZIVm80BopChIbauQSvpVDV/Fd5aajf2ul6MdRXTIFnvXNyIioYFgkYKne+0ZwSo5HPNOwezh2RX/wCFXeFT/wAu+of+DW6/+OU1vhZ4TY5a1vyffVLr/wCOV1djewalp9tfWzb7e5iWaJvVWAIP5GrFGwezh2Rxn/Cq/CXH+iX3HT/iaXX/AMco/wCFV+Ej/wAul96/8hS6/wDjldnRQHs4dkcd/wAKw8LYx5Go49P7Vuv/AI5Tf+FWeEzjNrf8f9RS6/8AjldnRQlYPZw7I40fCzwmOlrf/wDg0uv/AI5Tf+FVeEcg/Y77I6f8TS54/wDIldpRQHs4dkccfhf4VPW31D/wa3X/AMcrL8QfDHRk0+F9MtdRa5+2WytjU7lj5JnQS9ZP+ee/6dua7nU5762sjJp1il7c5AWJ5xEMdyWwcY+hqt4a1n/hIvDOnax9n+z/AG2BZvK379mRnGcDP5ClZB7OHZGH/wAKu8K8/wCj6hz1/wCJrdf/ABymt8LPCbHLWt+T6nVLr/45TofGtzd+LbzQ7e10lfsl2tu32jVfLuJF2I5dIfKO4APx83JU8iuxph7OHZHGD4V+ElORaXwPtql1/wDHKX/hVnhMnP2W/wA/9hS6/wDjldlRQHs4dkcZ/wAKr8Jf8+l9/wCDS6/+OUf8Kr8Jn/l1v+P+opdf/HK7OigPZQ7I4z/hVfhPOfst/n1/tS6/+OUo+F3hQDAttQwf+ordf/HK7KigPZw7I47/AIVf4V/599Q/8Gt1/wDHKb/wqvwn/wA+t/8A+DS6/wDjldnWNrGoa3ZzY0zRIb2FYvMeSW+EHPPyqNrZOAOu0c9etAezh2RyOq/C/RE1HRVsrPUDbveMt7jUrg4i8mUjOZOP3gj5H9TWp/wqvwn/AM+t/wD+DS6/+OV0ujarBrmiWOq2yusF5Ak6K4wwDDIB9+a52y8cTXUthdSaQYtE1G6NraX32gM7MSQjNHt+VWK4B3E8jIGaA9lDshF+F3hVfu2+oD6ardf/ABykPws8KFdptr8j0/tS6/8AjldlRRYPZw7I40fCzwmOlrf/APg0uv8A45SD4VeEgci0vs/9hS6/+OV2dFAeyh2Rxv8Awqzwmf8Al1v/APwaXX/xykHwq8JKOLS+H01S6/8AjldnRQHsodkcaPhb4UHS2vx/3FLr/wCOUo+F3hUHIt9QB/7Ct1/8crsaKLB7OHZHHN8L/Cz43W+oNjpnVbo/+1Ky5/hlo48UWMcVtqX9mNZ3DXB/tO5x5weHy8nzM52mX/IFdB4g8RatoUd7fHQo59Jso/NmuPtoWVkC5YpHtIOOeCyk447Vu3F0sOny3arvVIjKF6ZAGfwpWQezh2Ryx+F3hUjBt9QP/cVuv/jlB+F3hUnm31A9+dVuv/jlTeDfFtz4rgS6NvpMdu9uspW01X7TNGzAEJInlLtOCc88EYx3rq6LIPZw7I47/hV3hX/n31D/AMGt1/8AHKP+FX+FT/y76h/4Nbr/AOOV2NFFkHs4dkcafhZ4UOc2t+c9f+Jpdc/+RKX/AIVf4Vzn7PqGf+wrdf8Axyuxop2D2cOyOMHwr8JDOLS+56/8TS65/wDIlbugeG9L8MWs9tpUMkUU8vnSeZO8pZ9oXOXJPRQOvataigahGOqQUUUUFBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGd4g0v+2/Dmp6Vv8v7bay24f+6XUrn8M1y0Fv4jv9S0m+vdD+zNolpMRH9qjb7ZcNGECxkE7Uxu5fB5HHBruqKAOc1W6v2jiB8INqLTWw8wefBtjY/ejcuwyPdQc+lWfCOk3Oh+E9M0y8kWS4toAjlCSo/2QTyQOg9hW1RQAUUUUAFFFFABRRRQAVDdzSQWks0NtJcyopZYY2UNIfQFiAD9SBU1FAHDeFpPEOl6Vq6TeFrpLh765vYI5Lu3AlEs5YJlXbBCtk5GOOtP1Kx17TNa1+fStKGoR6zDHscTpGLeZYzH+8DEEpgKcrk8EYrtqKAM/QdM/sXw9pulB/M+xWsVvv8A72xAuf0rQoooAKKKKACiiigAorDn8aeFbW4lt7jxLo0M8TlJI5L+JWRgcEEFsgg8YrQsdUstTDPYzi4iCqwmjBMThuhR8bX6fwk470ALqN1cWdk89rp81/KCALeF0Vm57F2VeOvWuW8E/wBvaP4N0fSrvw7PFcWghtZvMuocbMfNKpVmyF9DgnPFdpRQBw/iiw1PX4ZtJtvDX2d3uo3TVZJodkYVw3mqA3mb8LwNo574ruKKKACiiigAooooAKKKKACuO8Zf29e3dvpVpo17c6LLHuvprKeBJZeSPIHmSIVBHLMOcHAxkkdjRQBl2s91Eul28OivbWrxMJVaWMGzCqNibVJDZ6fKSBiuN07w/rqadoPhmfTRHZaRexztqPnoUmihYtGFQHeGJ2A5AAweTxXo1FABRRRQAUUUUAFFFFABRRRQBw3iVdc1PxCLOfw5fXnh222SBbWe2AvZeD+8EkqkIp/hx8xGTwMHqJru+8+WJdJMkH2Qyq7ToN8uT+5K9uP4unNaNFAHGWWn6hqHjHTtXbQf7Ft7G2mhkMksTSXG/btTETMNi7d3J64wOtdnRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHJ+JI0/wCEz8G/IvN5c546/wCiy1z3ijUNRhXxvHa6hc25gOnC3aOQjyN7AMVHQZ7+vevSJLW3mmhmlgiklgYtC7IC0ZIKkqexIJHHY1FLpmnzmczWNtIbjZ52+FT5u37u7I5x2z0oA4hdEnfxte6D/b2tjTzpkV5j7c/mLMZHTcJM7guFB2A7c9u1YcOu69r9t4Ttnk3C70YXch/tN9PNzMCoP7yNGYkDnaMfeyc4r1gWtuLs3YgiFy0YjM2wbygJIXd1xkk496qT6Do9zp0WnXGk2MtjDgR20lujRJjphSMD8KAKfhEakvh2BNVuoLm6R5F82GfzgUDkKC+1dzAYUnAyQa3KhtbW3sbZLa0t4reCMYSKJAiqPYDgVNQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAf/Z", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from speechbrain.augment.time_domain import DropFreq\n", + "\n", + "dropper = DropFreq(drop_freq_count_low=5, drop_freq_count_high=8)\n", + "dropped_signal = dropper(clean)\n", + "\n", + "# Let's plot the two spectrograms\n", + "plt.subplot(211)\n", + "plt.specgram(clean.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "plt.subplot(212)\n", + "plt.specgram(dropped_signal.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5pX06BDNBpZ4" + }, + "source": [ + "The amount of frequency drop is controlled with the following parameters:\n", + "\n", + "\n", + "* **drop_count_low**/**drop_count_high**, that impact on the number of frequency bands to drop.\n", + "* **drop_freq_low**/**drop_freq_high**, that correspond to the minimum and maximum frequencies that can be dropped. \n", + "* **drop_width**, which corresponds to the width of the band to drop." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "k-Cmou5_Dq2W" + }, + "source": [ + "## 4. Clipping\n", + "Another way to remove some piece of information from a speech signal is to add clipping. It a form of non-linear distortions that clamps the max absolute amplitude of the signal (thus adding a saturation effect).\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 904 + }, + "executionInfo": { + "elapsed": 1181, + "status": "ok", + "timestamp": 1704406093908, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "GN_sHLQwMvQm", + "outputId": "21f3d327-becc-4933-f501-d05dda700033" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Frequency')" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGwAi8DASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAopsj+XGz7WbaCdqjJPsKyNA8Rw6/JfxJY3tlNYyrFNFeIqtlkVwQFY8bWHXBoA2aKxdY8SQ6TewWKWN7qF9NG0wtrJFZ1jUgFzuZQBkgdck9AavaVqlrrWl2+o2Tl7a4TehKkEeoIPQg5BHYigC5RXL+MIDeX3hqxa4u4YLnU2Sb7LcyQM6i1ncDchBxuVT17VL/whGlf8/Wuf+Dy8/wDjtAHR0Vzn/CEaV/z9a5/4PLz/AOO0f8IRpX/P1rn/AIPLz/47QB0dFc5/whGlf8/Wuf8Ag8vP/jtH/CEaV/z9a5/4PLz/AOO0AdHRXOf8IRpX/P1rn/g8vP8A47TW8F6Qi7nvNbUep128H/tWgDpaK5seCdJIBF3rhB7jXbz/AOO0v/CEaV/z9a5/4PLz/wCO0AdHRXOf8IRpX/P1rn/g8vP/AI7R/wAIRpX/AD9a5/4PLz/47QB0dFc5/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtAHR0Vzn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O0AdHRXOf8ACEaV/wA/Wuf+Dy8/+O0f8IRpX/P1rn/g8vP/AI7QB0dFc5/whGlf8/Wuf+Dy8/8AjtH/AAhGlf8AP1rn/g8vP/jtAHR0Vzn/AAhGlf8AP1rn/g8vP/jtH/CEaV/z9a5/4PLz/wCO0AdHRXOf8IRpX/P1rn/g8vP/AI7R/wAIRpX/AD9a5/4PLz/47QB0dFc5/wAIRpX/AD9a5/4PLz/47TT4L0dWCm91sMegOu3mT/5FoA6Wiuc/4QjSv+frXP8AweXn/wAdo/4QjSv+frXP/B5ef/HaAOjornP+EI0r/n61z/weXn/x2j/hCNK/5+tc/wDB5ef/AB2gDo6K5z/hCNK/5+tc/wDB5ef/AB2j/hCNK/5+tc/8Hl5/8doA6Oiuc/4QjSv+frXP/B5ef/HaP+EI0r/n61z/AMHl5/8AHaAOjornP+EI0r/n61z/AMHl5/8AHaP+EI0r/n61z/weXn/x2gDo6K5z/hCNK/5+tc/8Hl5/8do/4QjSv+frXP8AweXn/wAdoA6Oiuc/4QjSv+frXP8AweXn/wAdo/4QjSv+frXP/B5ef/HaAOjornP+EI0r/n61z/weXn/x2j/hCNK/5+tc/wDB5ef/AB2gDo6K5z/hCNK/5+tc/wDB5ef/AB2mr4L0dmIW91skdQNdvOP/ACLQB0tFc5/whGlf8/Wuf+Dy8/8AjtH/AAhGlf8AP1rn/g8vP/jtAHR0Vzn/AAhGlf8AP1rn/g8vP/jtH/CEaV/z9a5/4PLz/wCO0AdHRXOf8IRpX/P1rn/g8vP/AI7R/wAIRpX/AD9a5/4PLz/47QB0dFc5/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtAHR0Vzn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O0AdHRXOf8ACEaV/wA/Wuf+Dy8/+O0f8IRpX/P1rn/g8vP/AI7QB0dFc5/whGlf8/Wuf+Dy8/8AjtH/AAhGlf8AP1rn/g8vP/jtAHR0Vzn/AAhGlf8AP1rn/g8vP/jtH/CEaV/z9a5/4PLz/wCO0AdHRXOf8IRpX/P1rn/g8vP/AI7R/wAIRpX/AD9a5/4PLz/47QB0dFcTe6JBofiPwxJZXmq/6RqDwypPqlxOjp9lnbBWRyPvKp6dq7agAooooARmCqWIJAGcAZNcF4b1sDxN4puW0nW0ju5o7m3Mulzx+YsdtGrAFlADbkYAEgntXfUUAcXeXc2neLbXxI+l6lLY3ukrblILVpZoJA/mBXjXJGQ5GegK84rT8D6fdab4StYb2EwXMkk9w8JIJi82V5Ahx3AcA+4roaKAOI1XQLPTfFfha7gm1B5JdUlDLcahPMgzaXB4R3Kjp2Ax0rt65zxL/wAh7wj/ANhV/wD0jua6OgAooooAKKKCcDJ6UAZWv65FoWn+cUM1xKwjt4F+9K56AVjWvg/+1AL3xRK97dvyLcSFYoP9lQD+v/66TRF/4STxFP4glG6ytS1vp6nocfek/HoP/rV19buTpe7Hfr/kc8Yqs+aW3Rfr/kcZd6Jd+EwdR8PNLJZp81xpruWVl7lCeQR/n0rqdN1G21XT4b60ffDKu5T3HqD7jpVquQs1/wCEX8XNYD5dL1YmS3HaKcfeUegP+Aov7VO/xL8Qt7KSt8L/AAOvooorA6AooooAKKKKACiiigAooooAKKKKACiis7XdWi0TRri/lGfLX5E/vueFH500nJ2QpSUVdmXrusXs2opoGiFRfyLvnnIyttH6n/aPYf41FH8P9DaI/bEnvLpuXuZp33k+vBxVzwppEmm6a1xefNqV63n3TnruPRfoM4/Ot6tpVHD3ab+fcwjTVT3qivfp2OOhur3wfqMNnqFzJdaLcNsgupTl7d+yue4967Gqmp6db6tp09jdLuhmXafUehHuDzWN4Rv7hre40bUGzf6Y/lMx/wCWkf8AA/4j/PNKX7yPP1W/+Y4/u5cnR7f5f5HSUUUVibhRRRQAUUUUAFFFFABRRRQAUUUUAFISACScAdSaWuX8XXU9y1r4dsXK3OokiVx/yygH3m/Hp+dXCHPKxFSfJG5UMt541vJY7a4ltNAhYo0sR2vdsOoB7L/P+VmXwBoyRBtNE+n3af6u5hmYsD7gnkV0dlZwafZQ2lsgSGFQiKPQVPVutJO0NF/W5mqEWr1NX/W3Y5zw/rV295NomshU1S3XcHXhbiPs6/1H/wBcDo65zxbpk1xZxapYDGpac3nQkfxr/Eh9QR2/xrX0nUodX0q2v7c/u5kDY/unuD7g5FKok1zx+fqOm2m4S+XoXKKKKyNgooooAKKKKACiiigAooooAKKKKACq19qFpplsbm9nSGEMF3t0yTgVZrkMf8JR4wbd82l6M2AP4Zbj+u3+f1rSnBSbb2RnUm4pJbs6+iiiszQ5zxL/AMh7wj/2FX/9I7mujrnPEv8AyHvCP/YVf/0jua6OgAooooAKKKKACiiigDnPEv8AyHvCP/YVf/0jua6Ouc8S/wDIe8I/9hV//SO5ro6ACiiigArnPGd9Nb6MtjaH/TdRkFrDjtu+8fwHftkV0dcnbf8AE68f3FyfmttHi8mP085/vH8Bx+Va0kr8z2Wv9fMyrN8vKt3p/n+B0Wm2EOl6bb2MAxFAgQe/qfqTzVqiis223dmiSSsgrE8V6U+raDMkGRdwET2zDqJF5GPryPxrbopxk4yUkKcVKLi+pnaDqqa1odpfrgGVPnA/hccMPzBrRrk9B/4k/ivVtEPywT4vrUdsNw4H49vausqqsVGWmxNKTlHXdaMKKKKzNAooooAKKKKACiiigAooooAK5PUx/bvjWz0v71ppii7uR2Mh+4p/n9Ca6e6uY7O0muZm2xQoZHPoAMmue8E20h0qbVrlcXWqTNcvnsp+4PpjkfWtqfuxc/kvn/wDGp70lD5v5f8ABOmooorE2CuT8R/8STxDp3iJOIGP2O9x/cY/Kx+h/pXWVR1jTY9X0e7sJMYnjKgnsex/A4NaUpKMtdupnVi5R036F6isHwfqMmo+HYRcZF1ak204PUOnHPvjB/Gt6pnFxk4voVCSnFSXUKKKKkoKKKKACiiigAooooAKKKKAEZgqlmIAAySe1cp4TVtW1HUPEsoOLlzBaA/wwocZ/Ej9Ks+Nb2WDQvsVsf8AS9RkW0iH+91P5Z/OtrT7KLTdOt7KEYjgjCL74HWtl7tO/V/kYv36lui/Ms0UUVibBXJaD/xI/FGo6C3Ftcf6bZ+gB4dR9D29q62uW8aRPaQWOvwKTNpk4dwOrRN8rj+X61tR1bh3/PoY1tEprp+XU6mimRSpNEksbBkdQysO4PQ0+sTYKKKKACiiigAooooAKKKKACiiigDI8Tat/YugXN2nM+PLgXqTI3C8d/X8KXw3pI0XQbazbmbbvmbqWkbljnvzx+FZOp/8Tnxzp+mj5rbTU+2TjsZDwg+o6/jXWVtL3YKPfX/Ixh71Ry7af5/15BRRRWJsc54l/wCQ94R/7Cr/APpHc10dc54l/wCQ94R/7Cr/APpHc10dABRRRQAEgAknAFcJp3i/WZ4tF1i7t7EaLrNysEEUauJ4Q+fKdmJ2tuwMgAY3Dk4ruyAQQRkGuF07whrMEWi6PdXFidF0a5WeCWNnM84Td5SMpG1duRkgnO0cDNAGj4y17UtEfSk0/wApEup3S4nksZrsRKI2YHy4mDcsAM+9a3h/UP7U0O2vDf2l8ZA2bi0iaKNsMRgIzMVI6EE5yD06VWkk8Upp9oY7bRpr35vtSNcSxRjn5SjbGPTqCPxpfC+i3Gi6dcLeTRS3d3dy3k5hUrGryNkqgPOBwMnk8nvQBiat4i0TVfFXhaz0/WLC7uotUkaSGC4R3QC0uASVByMEgfjXbVzfiRQNe8IkAA/2q/b/AKc7mukoAKKKKAKmqXyaZpd1fSfdgiZ8euBwPxPFZXguwey8NwST83N4TdTMepZ+efwxVXxoTff2XoKE51C6Hmgf88k+Zv6flXUgBVCqAABgAdq2fu00u/6f0zFe9Vb7fm/6QtFFFYmwUUUUAcp4xB0+40rxAgx9huAk5H/PF/lb+n511YIIBByD3qnq1gmqaRd2L4xPEyAnsccH8Dg1m+Db97/wxambP2i3Bt5geoZOOffGD+NbP3qafbT7/wCmYr3arXfX7v6RvUUUVibBRRRQAUUUUAFFFFABRRRQBy/jeV57Cz0WFiJtTuFhOOojBy5/Dj866WKJIIUhjULGihVUdgOAK5e2/wCJt8Q7q46waTbiFPTzX5Yj8Miurrap7sYw+f3/APAsY0/elKfy+7/g3CiiisTYKKKKAOUsf+JP4/vbPpb6rELqL081eHH1Iya6uuW8bI1ra2GuRAmTTLlZGx1MbHa4/HiunR1kRXQhlYAgjuK2qe9GM/l93/AsY0vdlKHz+/8A4Nx1FFFYmwUUUUAFFFFABRRRQAUUVHcTx2ttLcSnbHEhdz6ADJoA5hv+Jx8QwvW30aDJ9POk/wDsf1FdXXMeB4JG0aXVJxi41Od7ls9lJwo+mBn8a6eta3xcq6af5/iY0dY8z66/5fgFFFFZGwVDd2sd7Zz2swzFNG0bD2IwamooTsDV9DmvBF1I2htp1wf9J02ZrST3Cn5T9MYH4V0tcon/ABKfiK6dINYttw9POj6/+O/zrq61rL3uZddf6+ZjRfu8r6af18gooorI2CiiigAooooAKKKKACmySJFE8kjBUQFmJ7AU6uc8b3ckPh17S3P+k38i2kQ9S55/TP51UI80lEipLki5diHwTG1zaX2uTKRLqdw0i56iNTtQfhzXU1BZWkdhYW9pEMRwRrGv0AxU9OpLmk2FOPJBIKKKKgs5zxL/AMh7wj/2FX/9I7mujrnPEv8AyHvCP/YVf/0jua6OgAooooAKKKKACiiigDnPEv8AyHvCP/YVf/0jua6Ouc8S/wDIe8I/9hV//SO5ro6ACiioLy6jsrGe7lP7uGNpG+gGaEr6A3bU5uw/4mvxA1C86w6ZAtrH6b25Y/Uciurrm/A9rJD4bjupx/pF/I93KfUueP0xXSVrWfv2XTQxoL3OZ9dfvCiiisjYKKKKACuU0f8A4lfjfWNMPEV4q38I9z8r/r/KurrlfFf/ABLtW0PXBwsFx9nnP/TOQYyfYf1rWjq3DujGtolPs/8AgM6qiiisjYKKKKACiiigAooooAKhurmOzs5rqU4jhjaRj7AZNTVzPjmdzoSadAcT6lcJapjsCck/TAx+NXTjzSUSKkuSDkL4GtpE8PC+nH+kahK93If948foAfxrpajt4EtraKCIYjiQIo9ABgVJRUlzScgpx5IqIUUUVBYUUUUAVtRsk1HTbmyk+5PE0ZPpkYzWP4JvXu/DFvFNxcWha1lHoyHA/TFdDXKaV/xK/HeraeeIr6Nb2Ef7X3X/ABJ5/Ctoe9CUfmYz92pGXy/r+up1dFFFYmwUUUUAFFFFABRRRQAVzXjm4kXw99hgP+kajMlpH/wI8/oCPxrpa5S9/wCJp8Q7G16w6Zbtcv6eY/yqD74wa1o/FzdtTGv8HKuuh01rbx2lpDbRDEcKLGo9gMCpaKKy3NkrBRRRQAUUUUAcv44jeHTLXV4VJl0y5SfjqUzhh+o/KumjkSaJJY2DI6hlI7g9KhvrSO/0+4s5f9XPG0bfQjFYngi7kuPDUVvP/wAfFi7Wko9ChwP0xWvxUvR/mY/DV9V+R0dFFFZGwUUUUAFFFFABRRRQAVyl7/xNPiFY2vWHTLdrl/TzG4UH3Awa6snAya5XwZ/pzatrrc/b7oiI/wDTJPlX+v5VtT0Upf1r/wAC5jV1lGHz+7/g2OqooorE2CiiigDnPEv/ACHvCP8A2FX/APSO5ro65zxL/wAh7wj/ANhV/wD0jua6OgAooooAKKRjtUnBOBnArzHSrvUYtG8KeKX1W+mutYvIUu7eSdmgMc+7CrH91Nny4IAPynOc0Aen0VyviZp73xLoGhLd3NraXa3NxcNaymKRxEECoHXDKCZMnBB+WpPBd1dSWuq2N1cy3X9m6lLaRTzNud4wFddx7kB9ue+3nmgB/iX/AJD3hH/sKv8A+kdzXR1xGq6peXnivwtBPoWoWMaapKVuLh4Cj4tLgYASRm568gdPwrt6ACuY8czO+iw6ZCcT6lcR2y46gE5Y/TAx+NdPXP3mnXd7420+6khIsLG3d0ckYaVjjGOvAwa1otKXM+mplWTcOVddDdhhS3gjhjXbHGoRR6ADAp9FFZGoUUUUAFFFFABWV4l03+1vDl/ZBcvJESg/2xyv6gVq0U4ycWmhSipJxfUyfDOpf2t4bsLwtl3iAkP+2OG/UGtauV8Kf8S/V9c0Q8LDcfaIB/0zkGcD2H9a6qrqxSm7bEUZNwV9/wDIKKKKzNAooooAKKKKACuVuf8AiafEW0g6w6VbNM3p5j8Aflg11JIAJJwB1JrlvBQN4mp64w51G7Yxk/8APJPlX+tbU9Iyl8vv/wCBcxq6yjD5/d/wbHVUUUVibBRRRQAUUUUAFcr4u/4l+o6Jrg4Ftc+TMf8ApnIMEn6f1rqqzPEWnf2t4evrLGWkiOwf7Y5X9QK0pSUZq+xnWi5QdtzTorI8L6j/AGr4asLsnMjRBZP99flb9RWvUSi4tp9CoyUoqS6hRRRSKCiiigAooooACcDJ6Vyvgsfbm1bXW6390REf+mSfKv8AX8qveL9QOm+Fr6VM+a6eTGB1LP8AKMfnn8Ku6Lp40rRLOxAGYYlVsd2xyfzzWq92m330+7+kYv3qqXbX7/6ZfooorI2CiiigAooooAK5XTP+JX4+1WxPEWoQreRDtuHyv+JPP4V1Vcr4v/4l9/ouuDgWtz5Ux/6ZSDBJ+n9a1o6tx7r/AIYxraJT7P8A4c6qiiisjYKKKKACiiigAooooAxPF+oHTfC19MhPmunlR467n+UY/PP4Vc0TTxpWiWViAMwxKrY7tjk/nmsTxH/xMvFGhaOOUSQ304/2U+7n2JyK6qtZe7TS76/oYx96pKXbT9X+gUUUVkbBRRRQBzniX/kPeEf+wq//AKR3NdHXOeJf+Q94R/7Cr/8ApHc10dABRRRQAVzNl4H06xvraZLq+ktbSZ57SwklUwW8jZyyjbu43NgMxAzwBXTUUAYFz4UgubCwhfU9SF3YOz2+oCZTcKWyGySpVgQcYKkcD0q9oujW2haf9ktmlk3SPNLNM26SWRjlnY9yT+HYcVo0UAc54l/5D3hH/sKv/wCkdzXR1zniX/kPeEf+wq//AKR3NdHQAUUUUAFFFFABRRRQAUUUUAFFFFAHK6v/AMSzxzo+ojiK9RrGY+/3k/M/yrqq57xtZvdeF7iSH/X2hW6iPoUOSfyzWxp14mo6bbXsf3J4lkHtkZxWs/ehGXyMYe7UlHvqWaKKKyNgooooAKKKKAMLxjftp/ha9kjz50qeREB1LP8ALx78k/hWho9gul6NZ2K4/cRKhI7nHJ/E5rB1/wD4mfi7Q9IHMcLNfzj2XhP1yK6utpe7TUe+pjD3qkpdtP1YUUUVibBRRRQAUUUUAFFFFAHK+Fv+Jdrmu6IeFjnF1AP9iQZIHsDgfjXVVymtf8SzxrouqDiO6DWEx9zyn6/yrq61q6tS7r/gGNHROHZ/8EKKKKyNgooooAKKKKAOV8Rf8TLxVoWjjmONzfTj2ThPzORXVVyvh3/iZeKNd1g8xpILGA/7Kfex7E4NdVW1XS0Oy/4JjR1vPu/y0CiiisTYKKKKACiiigArM8Q6d/a3h6+scZaWI7B/tDlf1ArTopxbi00KUVJNMx/C2o/2r4ZsLtjmQxBJP95flP6itiuV8Lf8S7Xdd0U8Kk4uoB/sSDJA9gcD8a6qrqpKbtt/mZ0ZNwV99vuCiiiszUKKKKACiiqOs6gulaLeXzY/cRMy57tjgficU0m3ZCbSV2Yfh7/iZeK9d1c8xxOLGA+ycv8Arg11VYfg/T207wtZRSZ86RPOlJ6ln+bn8wPwrcrSs05u2y0+4zopqCb3ev3hRRRWRqFFFFAHOeJf+Q94R/7Cr/8ApHc10dc54l/5D3hH/sKv/wCkdzXR0AFFFFABRRRQAUUUUAc54l/5D3hH/sKv/wCkdzXR1zniX/kPeEf+wq//AKR3NdHQAUUUUAFFFFABRRRQAUUUUAFFFFADZEWWNo3UMjAqwPcGuY8Du0GnXmjysTLpl08Iz1KE5U/jk/lXU1ykn/Ep+Isb9INXtih/66x9P/HePxranrGUfn93/AuY1PdlGfy+/wD4Njq6KKKxNgooooAKKKzPEOpDSPD99fZw0UR2f754X9SKcU5NJClJRTb6GR4a/wCJl4j13WTynmizgP8Asp97HsTg11VY/hbTTpPhqxtGGJBHvkz13t8x/U4/CtirqtObttt9xnRi1BX3ev3hRRRWZqFFFFABRRRQAUUUUAc/40sXvfC12Ysie2xcxEdQyHPH4ZrV0y+TUtKtb1MbZ4lkx6ZHIq0yq6FWAKsMEHuK5fwSzWltqGhyE+ZptyyLnqY2O5D+PNbL3qTXb9f6Ri/dqp91+X9M6miiisTYKKKKACs/XdRGk6He3xIBhiLLnu3RR+ZFaFcp4w/4mF3pGgLyLy5Ek4/6ZR/Mc/X+laUoqU0nsZ1ZOMG1uaHhLTjpnhexgcHzWj82XPXc3zHP54/CtuiiplJyk5PqVCKjFRXQKKKKkoKKKKACiiigAooooA5XW/8AiWeNdF1QcR3QawmP15T9f5V1VYPjKwfUPDF2IsieAC4hI6hk5498ZH41o6RfpqmkWl8mMTxK5A7HHI/A5FbT96nGXbQxh7tSUe+v+ZdooorE2CiiigArlfGZ+3NpOhL/AMv90DKB/wA8k+Zv6flXVVymn/8AE18f6he9YNMhW0i9PMblz9RyK1o6Ny7f0vxMa2qUO/8AT/A6sAAYHAooorI2CiiigAooooA5zxL/AMh7wj/2FX/9I7mujrnPEv8AyHvCP/YVf/0jua6OgArP1XUp9NijeDSL7Ui5IKWZiynufMdBj6ZrQooA4bxR4n1T/hDddlh0HV9KlisZHS6uDBhD0JHlysdwBJHGOOtJZaTp/hrx9o9nocQgtr7Trk3cUbErJ5bRbJW9W+dhuPJ3V280MdxBJDNGskUilHRxkMpGCCO4rJ0nwro2hySSWFoySSRiIvJNJKyxjoil2JVf9kYHtQBl+MvCMvia90q4RdLlWx87MGpWpnikLhQPlDDkbTzmrvgu9ivfDMPlWFvYfZ5ZrV7e2H7pHikZG2cD5SVJH1p8vg/RZbGysvJuY4LKLyYBDezxFU4+UsrgsOB94mtSw0+00qwhsbG3jt7WFdscUYwFH/6+aAOR1WDXY/FfhZ9R1HT7i0OqS+XHb2TxOp+yXGMsZWB4yPuj8OldvXOeJf8AkPeEf+wq/wD6R3NdHQAUUUUAFFFFABRRRQAUUUUAFFFFABXNeN7aRtDXUbcZudNmW6j9wp+YfTHP4V0tMmiSeGSGVQ0cilWU9wRgirhLlkpEVIc8XEZaXMd5Zw3UJzFMiyIfYjIqauX8Eyvb2l7oc7EzaXOYgT1aM8ofyz+ldRRUjyyaCnPnipBRRRUFhXKeKf8AiZ63ougjlJJftVyP+madAfYnI/CurrlPDP8AxNte1fX25jZ/sdqf+madSPYnn862paXn2/Mxra2h3/L+tDq6KKKxNgooooAKKKKACiiigAooooAK5S9/4lHxAsrvpb6rCbaQ9vNXlSfcjAFdXWB4y0+S+8OTPb5+1WjC6gI6hk54/DNa0Wuaz2ehlWT5brdam/RVPStQj1XSbW/i+7PGHx6HuPwORVys2mnZmiaaugooopDCuU0b/ibeNdW1XrBZKLCA9sjlz+f862df1RdG0K8v2IzFGdgPdzwo/Miq/hTTG0nw5aW8oP2hl82YnqXbk5+mcfhW0fdpuXfT/P8ArzMZ+9UUe2v+X9eRtUUUVibBRRRQAUUUUAFFFFABRRRQAhAIIIyD1Fct4OJ0+fVfD7n/AI8bgvCD/wA8X+Zf6/nXVVyeuf8AEn8X6VrI4guv9AuT255Q/n39BW1L3k4d/wA0Y1fdan2/J/0jrKKKKxNgooooAq6lfR6Zplzey/cgjZyPXA6fj0rI8FWMlp4bhmn/AOPm8ZruY+rPz/LFVfGLHUJtM8PRk5v5w8+O0KfM39PyrqlUKoVQAAMADtWz92ml3/JGK96q32/N/wBIWiiisTYKKKKACiiigDnPEv8AyHvCP/YVf/0jua6Ouc8S/wDIe8I/9hV//SO5ro6ACiiigAooooAKKKKAOc8S/wDIe8I/9hV//SO5ro65zxL/AMh7wj/2FX/9I7mujoAKKKKACiiigAooooAKKKKACiiigAooooA5PWP+JH4x0/WB8ttfD7FdegbqjH+WfQV1lZ2uaVHrejXOnyYHmp8jf3WHKn8DiqfhPVpNT0nyrvK6hZt9nukPUOvGfx6/nW0vfgpdVp/l/kYx9yo49Hr/AJ/5m7RRRWJsYPi/U5NN0CUW+TeXTC2t1HUu/HH0GTV/RdMj0fRrXT48YhjCkj+JupP4nJrAtT/wknjN7wfNp2j5ihPaSc/eP4D+hrrq2n7sVD5v+v63Mafvyc/kv1/ryCiiisTYKKKKACiiigAooooAKKKKACjrRRQByfhc/wBj6xqfhyThI3N1Z57xOeQPof611lcv4vt5rQ2fiKzQtcaa+ZUHWSE8OPw6+3NdFa3UN7aRXVu4eGVA6MO4NbVfeSqd9/X+tTGl7rdPtt6f1oTUUVW1C+g0zT5725bbDChdj/Qe56Vkk27I1bSV2c5rx/tvxTpuhJ80Fuftt56YHCKfqe3uK6yua8H2U/2S41m+XF7qj+cyn+CP+BfwH866WtKrs1BdP6ZlRTac31/LoFFFFZGwUUUUAFFFFABRRRQAUUUUAFZfiLShrWg3Vj0kdMxN/dccqfzFalFOMnFpoUoqSaZj+F9WOs6Bb3MnFwo8qdT1Ei8HP8/xrYrkUP8AwjXjVkb5dO1o7lPaO4HUf8C/mfauuq6sUpXWz1M6Um42e60/r1Ciiud8X6lNbadHp1lzqOot9ngA/hB+830A7+9TCLlJRRc5qEXJlXw5/wATrxFqfiFuYAfsdmfVFOWYfU/1rrKp6Vp0Ok6XbWEH+rgQKD6nufxOT+NXKqpJSlpsTSi4x136hRRRWZoFFFFABRRRQBzniX/kPeEf+wq//pHc10dc54l/5D3hH/sKv/6R3NdHQAUUVlarDr8ksZ0e+0y3jC/OLyzkmJPsVlTA/A0AXL+/ttMsZby7kKQRAFmCFjycDgAk8kdBUWkavY67pkWo6dMZbWUsEdo2Q5VirAqwBGCpHI7VStrnUtItLq88TarpP2WNQRLBbPbrHzyWLyOD1Hp+Nc38O/E2iDwPcyDU7Z1sJby5uhG+8xRG4lcMQMnBXketAHUax4n0jQZI4tRumjlkRpFjjheVgi9XIRSQoz944HvWnb3EN1bRXFvKksMqB45EOVZSMgg9wRXE3etadoXj271XVrlYLG/0m3WzuZAdrFHlZ4x/tESIQvU/hWt4AtLix8CaPb3ULwyrBu8pxho1JJVSOxAIGO2KAF8S/wDIe8I/9hV//SO5ro64jVdLvLPxX4Wnn13UL6N9UlC29wkARM2lwcgpGrcdOSev4129ABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXKa9Z3Wi6sPEumQtKu0JqFsnWWMdHH+0P8966uirhPldyJw51Yqabqdnq9ml1YzpNEw6qeQfQjsfasHX9dluZ/7B0JxLqUw2yyqcrap3Zj2PoP/rZsXvgnQ725e4+zPBK/3zbyGMP9QOK09L0bT9FtzBp9qkCHlsclj7k8mrTpRfMrvyM2qslyuy81/WgaRpVvoulwWFsP3cS4LHqx7sfcmr1FFZNtu7NkklZBRRRSGFFFFABRRRQAUUUUAFFFFABRRRQAjKroUdQysMEEZBFcZaTnwTqR0+8J/sK5ctaXB5Fux5MbHsPQ/wD18dpUVzbQXlu9vcxJLC4wyOuQa0hNRunszOpBys47oVp4VgM7SoIQu4yFhtx659K495D441VIog3/AAj9nJukkIwLuQdFHqo/z2q6PAHh4SA/ZpjEDuEBnfy8/TNdHDDFbQpDBGkcSDCogwAPYVSlCGsNX+RDjOppPRfn/wAAkAwMCiiisTcKKKKACiiigAooooAKKKKACiiigAooooAztc0eHXdKlspiVLfNHIOsbjowrL8PeIJHlOja0RBq8Hy/McC4Xs6Hvn0/yOlrP1bQ9N1uERahapMF+63IZfoRyK1jNW5ZbfkZTg788N/zHatrFjotk11fTrGg+6v8Tn0UdzWL4d0+7v8AUZfEmrRGO4mXZaW7f8u8Xv8A7R7/AP18Czp/gzRNPuluktnmnT7j3EhkK/QHit+m5RimodeolCUpJz6dAooorE2CiiigAooooAKKKKAOc8S/8h7wj/2FX/8ASO5ro65zxL/yHvCP/YVf/wBI7mujoAKKKKACiiigAooooAzdZ0Kx16K3S+Fx/o0vnQvb3MkDo+1lyGjYH7rMOves7/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdrLn8GRjxRYRxXOvf2Y1ncNOf7au8eaHh8vnzcj5TLx/gK7euZn8cadb6hLA1rfNaQXS2c2orEv2eKYkAIx3burAEhSoJwSKAH/wDCEaV/z9a5/wCDy8/+O0f8IRpX/P1rn/g8vP8A47XR0UAc5/whGlf8/Wuf+Dy8/wDjtH/CEaV/z9a5/wCDy8/+O10dFAHOf8IRpX/P1rn/AIPLz/47R/whGlf8/Wuf+Dy8/wDjtdHRQBzn/CEaV/z9a5/4PLz/AOO0f8IRpX/P1rn/AIPLz/47XR0UAcT4i8FxReGdVk0m5146ktpKbULrV2xMuw7MAy4POODWkvgjS9ozda5nHP8AxPLz/wCO07Q/FZ150e20LVIrJ2kUXsxgEWULKeBKX6qQPlqPTfG+nane2kMdpfQ298zrY3s0QWG6KgkhCGJGQpI3BcgcZoAf/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtdHRQBzn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O10dFAHOf8ACEaV/wA/Wuf+Dy8/+O0f8IRpX/P1rn/g8vP/AI7XR0UAc5/whGlf8/Wuf+Dy8/8AjtH/AAhGlf8AP1rn/g8vP/jtdDI/lxs+1m2gnaoyT7CsLS/FtnqNvq001reacNKbF0L1FUoPLEm75WbjawPr7UAY8HgyM+KL+OW517+zFs7doD/bV3jzS83mc+bk/KIuP8TWp/whGlf8/Wuf+Dy8/wDjtO0jxfaarex2j2N/YSTwG5tvtsSoLiIYyy4Y4xuXIbDDI4qPS/G+n6re2kCWl/BDf7/sN3PEFiu9oJOwhiRlQWG4LkAkZoAd/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtdHRQBzn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O10dFAHOf8ACEaV/wA/Wuf+Dy8/+O0f8IRpX/P1rn/g8vP/AI7XR0UAc5/whGlf8/Wuf+Dy8/8AjtZev+DI49OibS7nXjcG8tlfbrV237ozoJesv/PMvz27c1t6h4mh03XbDS59Pvz9tlEEV0sa+SHKM4UksDnCN0BqKfxWU1270m00LVL+W0EZnlt/ICJvGV+/KpPGegoAb/whGlf8/Wuf+Dy8/wDjtH/CEaV/z9a5/wCDy8/+O1JrHiu20i/aySw1DULiOD7TOllEr+TFkgM25h1KtgDLHaeK2LK8t9Rsbe9tJRLbXEayxSL0ZWGQfyNAGH/whGlf8/Wuf+Dy8/8AjtH/AAhGlf8AP1rn/g8vP/jtdHRQBzn/AAhGlf8AP1rn/g8vP/jtH/CEaV/z9a5/4PLz/wCO10dFAHOf8IRpX/P1rn/g8vP/AI7R/wAIRpX/AD9a5/4PLz/47XR1R1XUW0y2SVNPvL53cIsNois2cE5O4gAcdSR29aAMr/hCNK/5+tc/8Hl5/wDHay9L8GRvqOtLe3OvC3S8VbLOtXYzF5EROMS8/vDJyf5YrWTxlpv9jX+o3EV3bGwlEFzazRfvklO3agVSQxbemMEg7hzRbeMdPe31OS+gu9Lk02EXF1DeoodYiCQ42MwYHaw4JOQR1oAb/wAIRpX/AD9a5/4PLz/47R/whGlf8/Wuf+Dy8/8AjtTaP4pt9Wv2sJLC/wBPu/IFzHDexqrSxZxvXazDgkAg4IyMjmt2gDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OigDnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OueufF0NlfCG80nVLe1a5FqL+SFRCZGbYv8AFvClsAMVxyOeaAMnVPBkaajoq2Vzrxt3vGW9xrV2cReRKRnMvH7wR8j+Wa1P+EI0r/n61z/weXn/AMdp0visjXrzSbTQtUvZLMxieaDyBGm9Qw+/KrHg9hS6x4ttdIvpLNbDUL+aCAXNyLKJX+zxEkBmyw67Wwq5Y7TxQAz/AIQjSv8An61z/wAHl5/8do/4QjSv+frXP/B5ef8Ax2t20uoL+zgvLWRZbeeNZYpF6MrDII+oNTUAc5/whGlf8/Wuf+Dy8/8AjtH/AAhGlf8AP1rn/g8vP/jtdHRQBzn/AAhGlf8AP1rn/g8vP/jtH/CEaV/z9a5/4PLz/wCO10dFAHOf8IRpX/P1rn/g8vP/AI7SN4I0vacXWuZxx/xPLz/47WpqupzaakZh0q+1BnJGy0EeVx3Jd1A/Osj/AITnT5LW0e1stQuru6kmiSwiiUTq0RxLuDMFXacAktjkYzmgCh4d8FxS+GdKk1a514ak1pEboNrV2pEuwb8gS4HOeBWn/wAIRpX/AD9a5/4PLz/47St40006NZ6hBDd3D3k5toLOKIee0y7t6FWICldjZyQBtPNaGia3ba7aSzQRzwyQTNBPb3ChZIZFxlWAJHQg5BIIIINAFK18G6Taaha3yvqU09q5kh+06nczqjFWQna8hGdrMOnet+iigAooooAKKKKACiiigAooooAK8vvbPURoWteEV0u+e7vtVllhulgYweTLP5pkaX7oKgkEE7srwOa9QooAKKKKACiiigAooooAKKKKAPMv7LsbzWtN/wCEX0DUdJuDNIdQuZLOS2TyGjcMrlsCVixUjG7BGcinaTDqF5YeDfD76TfW1xoc0T308sDLCohhaMbJD8r7yRjaTwTnFel0UAFFFFABRRRQAUUUUAIzBVLEEgDOAMmvLwZvEX/Ceabb6dq1vJrSf6FLdadPBG2LRI/mZ1AX5lI5r1GigDgMXfi3WtKZNMv9OjsbC5S5e7t2iCyyoqCNM/fx8xJXK8DnmqulR6hqFv4M0V9IvrSfQ5EkvppoCkS+VA8QCOeJNzMMbSeM5xXpNFABRRRQAUUUUAFFFFAHEeONS8nWfDkaadqtz9j1Jbud7XTppkWPyZkzuRSCdzLx15rP8YW2l6mmoNY+G9UfxJLCv2K9jsZY2WQoPLbz8BUCkjIJHQgg16FPdW9r5f2ieKLzZBHH5jhd7nooz1J9KloA4eW4vvDPirUtQutNvtQTUbC2VHsbdpf38XmBkYKPlB3ghjhevIrc8G6VcaJ4M0fTLvAuba1RJQDkK2OQD7Hj8K3KKACiiigAooooAKxPFWt3OgaKbqy0261C6kkEMUUEDyhWOfncICwQYySAT0A5IrbooA83bTWufB0txYQ6lfalFqltqd4bqyktZLuSOSNmCJIq8bE2qBwNoGc80zWtN1Dxg/iO/sNPu7eN9Jis7VL2FoHuJVkaUja4BC/dXJABJPavS6KAONsJ5/EXjew1WPTb+ytNP0+eKRr23aFnllaM7AG5YKIySwyORgmuyoooAKKKKACiiigArz+/1c654sFlqOnazb6Tp12vlRrpdw63s6t8sjOqFRErYI5+YjccADPoFFAHnXjSy069k1JdP8O6i/ih1C2moQ2ciYlCjY4uMbQq8ZBbsRg1cuJ73w34o1m9m0u+1BNSs7fyGsrdpQZow6tG2PuA5UgtheTzxXc0UAYvhDS59E8HaNpd0Qbi1s4opcHIDBRkA+ma2qKKACiiigAooooAw/E13axWSWt5DrLRXGf3mlRzl0KkHlofnXOfocHNcRoFpqPhy90zWLnStQfTkS+tERLYyXMcck0ckUkqICxZtjBjgnkFuSa9TooA8ytNO1LTf7J8Rz6bdlBq99eT2cUZeeGG4DhG2Lkkj5SVGSNx9K6bwhBcvda9q89rPaR6nfiWCGdCjiNIo4wzKeVLFCcHnGM109FABRRRQAUUUUAIzBVLMQFAySTwBVSw1bTtVSR9O1C1vFibZI1vMsgRuuDtJwfarbKrIyuAVIwQRwRXG+FpLK08UeM1je3hgS8gYBSqqqi0hyfQAc0AdPqGq6dpMKzalqFrZRO21XuZljBPoCxHNW0dZEV0YMjDKspyCPUVxl1JYT/Ey2lvnt5LR9EZrN5SpjYmX96VJ4Py+X07e1Wvhx/yIlhtz5HmT/Zs/wDPDzn8rHts249sUAW/E2oapaT6LaaTNaQz6hem3aW6gaZUUQSyZ2q6nOYwOveo/sXjP/oPaH/4J5f/AJJrL1XS7yz8V+Fp59d1C+jfVJQtvcJAETNpcHIKRq3HTknr+NdvQBzn2Lxn/wBB7Q//AATy/wDyTR9i8Z/9B7Q//BPL/wDJNdHRQBzn2Lxn/wBB7Q//AATy/wDyTR9i8Z/9B7Q//BPL/wDJNdHXLeILi91DxBYaBYXktpuja6uZ4j8yoOFA+p/pVwhzOxE58iuTfYvGf/Qe0P8A8E8v/wAk0fYvGf8A0HtD/wDBPL/8k1D/AMInf/8AQ1at/wB9ij/hE7//AKGrVv8AvsVXJD+b8GR7Sf8AJ+KJvsXjP/oPaH/4J5f/AJJo+xeM/wDoPaH/AOCeX/5JqH/hE7//AKGrVv8AvsUf8Inf/wDQ1at/32KOSH834MPaT/k/FE32Lxn/ANB7Q/8AwTy//JNH2Lxn/wBB7Q//AATy/wDyTS+Eb+6uLS70/UJTLe6fcNBJIesi9Vb8R/KuiqJxcZcrNITU4qSOc+xeM/8AoPaH/wCCeX/5Jo+xeM/+g9of/gnl/wDkmujoqSjnPsXjP/oPaH/4J5f/AJJo+xeM/wDoPaH/AOCeX/5Jro6KAOc+xeM/+g9of/gnl/8Akmj7F4z/AOg9of8A4J5f/kmujooA5z7F4z/6D2h/+CeX/wCSaPsXjP8A6D2h/wDgnl/+Sa6OigDnPsXjP/oPaH/4J5f/AJJo+xeM/wDoPaH/AOCeX/5Jro6KAOc+xeM/+g9of/gnl/8Akmj7F4z/AOg9of8A4J5f/kmujriraLUvFWp6ldRaxd2Fjbzm2gS3ON5X7zH6k/5xVwhzXbdkjOc+WySu2aX2Lxn/ANB7Q/8AwTy//JNH2Lxn/wBB7Q//AATy/wDyTUP/AAid/wD9DVq3/fYo/wCETv8A/oatW/77FVyQ/m/Bk+0n/J+KJvsXjP8A6D2h/wDgnl/+SaPsXjP/AKD2h/8Agnl/+Sah/wCETv8A/oatW/77FU9W0LWdL0q41C08R6jPNbL5oilYFXC8kH8M01Tg3ZS/MTqzSu4fkaX2Lxn/ANB7Q/8AwTy//JNZWteD9b8RQrHq154YvPLVliafQXdot2MlS1wcHgfkK67Tr2PUtNtr2LhJ4lkA9MjOKtVk1Z2Zsmmro880X4f614dtbaHR9U8PWUkSxpJcRaDiW5VB0kbzuc9SRg56EVof2F49+2rP/wAJvY+WHZjb/wBiLsYHopPm7sDths+pNdnRSGcjZaP46tY5Fm8X6ZeF2JV59FIKD0GyZRj6gmm2Gi+O7PzPP8ZadfbzlftGi42ew8uZf1zXYUUAc59i8Z/9B7Q//BPL/wDJNH2Lxn/0HtD/APBPL/8AJNdHRQBzn2Lxn/0HtD/8E8v/AMk0fYvGf/Qe0P8A8E8v/wAk10dFAHOfYvGf/Qe0P/wTy/8AyTR9i8Z/9B7Q/wDwTy//ACTXR0UAc59i8Z/9B7Q//BPL/wDJNH2Lxn/0HtD/APBPL/8AJNamtakukaLd6gwDeRGWVT0Ldh+JxXO6f4c1i90+C6vPE2pRXEyCR44iAqE84A9s4rSNNOPM3ZGcqjUuVK7L32Lxn/0HtD/8E8v/AMk0fYvGf/Qe0P8A8E8v/wAk1D/wid//ANDVq3/fYo/4RO//AOhq1b/vsU+SH834Mn2k/wCT8UTfYvGf/Qe0P/wTy/8AyTR9i8Z/9B7Q/wDwTy//ACTUP/CJ3/8A0NWrf99iqrR6h4Y8QaWZ9Xu76xvZGtpBcEHY5HyEfU/1pqlF6Rlr8wdWS1lGy+RofYvGf/Qe0P8A8E8v/wAk0fYvGf8A0HtD/wDBPL/8k10dFYmxzn2Lxn/0HtD/APBPL/8AJNH2Lxn/ANB7Q/8AwTy//JNdHRQBzn2Lxn/0HtD/APBPL/8AJNH2Lxn/ANB7Q/8AwTy//JNdHRQBzn2Lxn/0HtD/APBPL/8AJNH2Lxn/ANB7Q/8AwTy//JNdHRQBzn2Lxn/0HtD/APBPL/8AJNH2Lxn/ANB7Q/8AwTy//JNdHRQBzn2Lxn/0HtD/APBPL/8AJNH2Lxn/ANB7Q/8AwTy//JNdHRQBzn2Lxn/0HtD/APBPL/8AJNH2Lxn/ANB7Q/8AwTy//JNHiq+vBLp2j6dOYLrUZSpmXlo41GWI98f1qIeFL8MD/wAJTqxwem8Vqqa5U5O1zJ1HzNRV7Ev2Lxn/ANB7Q/8AwTy//JNH2Lxn/wBB7Q//AATy/wDyTXR0VkanJSXfifS9c0SDUNR0q7tdQu2tnWDT5IXXEEsgIYzOOsYGMd662uc8S/8AIe8I/wDYVf8A9I7mujoAKKKKAEZVdSrAFSMEEcEVj2nhHw1YSmWz8O6TbyMjIXhso0JVhgjIXoRwRWzRQBQu9E0m/s4bO80uyuLWHHlQzW6OkeBgbVIwMD0q8iLGioihUUYVVGAB6ClooA5zxL/yHvCP/YVf/wBI7mujrnPEv/Ie8I/9hV//AEjua6OgAooooAK5Xw5/xMPFPiDVTyiSrZRH0CD5vzODXQ6hdrYabdXj/dgiaQ/gM1j+CLRrXwlZGTmW4BuHJ7lzkH8sVrHSnJ99P1/QxnrUiu2v6fqdDRRRWRsFFFFAHKxf8S34kTJ0i1SzDj3kj4/9B5rqq5Xxl/oc+iawOPsd6qyH0jf5W/kK6qtamsYy8vy/4FjGlpKUfO/3/wDBuFFFFZGwUUUUAFFFFABRRRQAUUUUAUdYvhpmjXt6TzBCzj3OOB+eKo+DrE6f4U0+JgfMePzXz13P83P54/CqXjpjPpdnpSE7tRvIoDj+7nJP6CuoVQqhVAAAwAO1avSkl3f5f0zFa1W+y/P+kLRRRWRsFNdFkjaNwCrAgg9wadRQBy/gV2i0e50uQkyabdSW/PUrnIP6n8q6iuVsv+Jf8RdRt+keoWiXK+m5DtP49TXVVrW+Pm76mNDSHL20/r5BRRRWRsFFFFABRRRQAUUUUAFFFFAHK+Nv9Mj0rRV/5iF4gkHrGnzN/Suq6Vyp/wCJh8SlHWPTLLP0kkP/AMTXVVrU0jGPz+//AIFjGnrKUvl93/BuFFFFZGwVzvji1e58K3MsX+utStzGfQock/lmuiqOeFLi3khkGUkUow9QRg1UJcslLsRUjzxce5HY3SX2n213H9yeJZB9CM1YrmfAcznw2LKU5msJ5LV/qrZH6EV01OpHlm4hTlzQUgoooqCwooooAKKKKACiiigAoopCQoJJwByTQBy1v/xMfiPdy9Y9MtFhHtJId2fyyK6quW8DA3On32rsPm1G8kmUn+4DtUfhg11Na1tJcvbT+vmY0NYc3fX+vkFFFFZGxzniX/kPeEf+wq//AKR3NdHXOeJf+Q94R/7Cr/8ApHc10dABRRRQAUUVzNj4406+vraFLW+jtbuZ4LS/kiUQXEi5yqndu52tglQDjgmgDpqKxtd8Qpok2n240+9v7m/laKCG08vcSqFzkyOoA2qe9aFhcyXllHPNZXFlI+c29wULpgkc7GZeevBPWgDF8S/8h7wj/wBhV/8A0jua6Ouc8S/8h7wj/wBhV/8A0jua6OgAooooA5nx5M48NmziOJr+eO1T6s2f5A10UEKW8EcMYwkahFHoAMCuZ1z/AE7xr4fsOqQeZeSD6DCH8811Vaz0hFfP+vuMYa1JS9F+v6hRRRWRsFFFFAGN4rsf7R8K6lbAZYwl1H+0vzD9RVjQL7+0tAsLwnLSwKW/3sYP65rQIDAgjIPBFcv4FJt9LvNLY/Np97LAAf7ucg/qa1WtJrs/z/pGL0qp91+X9M6miiisjYKKKKACiiigAooooAKKKKAOVvv+Jh8RtNt+sen2kly3puc7R/Q11Vcr4Z/03xJ4j1Q8qbhbSM+gjGDj6kiuqrWto1Hsv+CY0dU5d2/8v0CiiisjYKKKKAOV8Uf6Fr/h3VRwEujayH/ZkGBn6YNdVXPeOLVrrwjfFOJIFE6EdihDE/kDWxp92t9p1rdr92eJZB+IBrWWtOL7XX6/qYw0qSXez/T9CzRRRWRsFFFFABRRRQAUUUUAFFFUNbvf7O0K+vM4MMDsv+9jj9cU0ruyE2krsxPBv+lz63q55+13rIh9Y4+F/ma6qsXwjZf2f4T02AjDeSJGHu3zH+dbVXWd5uxnRVqav/VwooorM1CiiigDldG/0Dxzrth0S5SO9jH/AI65/Ouqrldb/wBB8b6Bf9EuBJZyH6jKD866qtautpd1+WhjR05o9n+ev6hRRRWRsFFFFABRRRQAUUUUAFY3iy+/s7wrqVwDhhCUU/7TfKP1NbNcr4z/ANLfRdJHP2y+QuPWNOW/pWlFJzVzKs2qbsbOg2P9m6BYWZGGigUMP9rHP65rRooqJO7uzSKUUkgooopDOc8S/wDIe8I/9hV//SO5ro65zxL/AMh7wj/2FX/9I7mujoAKKKKAEYblIyRkYyK8x0q01GXRvCnhZ9KvobrR7yF7u4kgZYBHBuwyyfdff8uApJ+Y5xivT6KAOT1q50TV9M0+51rw3qN1C7O0cMmmyTPCw+X50UEqSOhx0qbwLa3lp4feO6huLeE3UzWVvctmWG3LExq2SSMDsSSBgdq6aigDiNV0Cz03xX4Wu4JtQeSXVJQy3GoTzIM2lweEdyo6dgMdK7euc8S/8h7wj/2FX/8ASO5ro6ACiikZgilmOFAySe1AHLaT/p3j3W73qlpDFZxn6/M3611Vcv4DUy6HPqLj59Qu5bg564LYH8q6ita/x27afcY0Pg5u+v3hRRRWRsFFFFABXK6d/oPxD1a16JfW0d0o91+Q11Vcrr/+heMPDuoDhZHks5D67h8o/PNa0tW491/wf0Ma2iUuzX+X6nVUUUVkbBRRRQAUUUUAFFFFABUN5crZ2U90/wByGNpG+gGf6VNXOeOrlrfwjeLH/rbjbAg9SzAEflmrpx5pKPcipLlg5dg8C2zQeErSST/W3Ja4c+pZiQfyxXR1BZWy2Vjb2qfdhiWMfQDH9KnoqS5puXcKceWCj2CiiioLCiiigCO4gS5tpYJBlJUKMPYjBrnvAczv4Wht5T+9s5ZLZ/Yqxx+hFdLXK+Hf9C8V+I9OPCtMl3GPXePmP54rWGtOS9H+n6mM9KkX6r9f0OqooorI2CiiigAooooAKKKKACuX8eMZdDg05CQ+oXcVsMdcFsn+VdRXLax/pvjvQrLqttHLdyD8Nqn8xWtH479tfuMa/wAFu+n3nUKoRQqjCgYAHYUtFFZGwUUUUAFFFFAHMePY2HhwX0YzLYXEVyn1DY/ka6WORZokkQ5R1DKfUGqmr2f9oaNe2eMmaB0H1IOP1rP8G3n27wjpspOWWLym9codv9K1etJeT/P/AIYxWlV+a/L/AIc3aKKKyNgooooAKKKKACiiigArlZf9P+JVunVNOsWk+jyHH/oNdVXLeFf9M1vxHqh5El4LZD/sxDHH51rT0UpeX5mNXVxj5/lqdTRRRWRsFFFFAHOeJf8AkPeEf+wq/wD6R3NdHXOeJf8AkPeEf+wq/wD6R3NdHQAUUUUAFFFFABRRRQBzniX/AJD3hH/sKv8A+kdzXR1zniX/AJD3hH/sKv8A+kdzXR0AFY3iu8+weFNTuM4IgZFPoW+Ufqa2a5bxv/pNvpWlDn7dfxo4/wBgct/StKKvNXMqztTdjY0Cz/s/w/p9oRhooEDf72Of1zWjRRUSd3dmkVypJBRRRSGFFFFABXM+PYmPhlruMZlsp47lPqrY/kTXTVU1WzGoaReWZH+vheMfUggVdOXLNMirHmg4liGVJ4I5kOUkUMp9iM0+sHwXeG98IabIT8yReU2eoKEr/St6lOPLJx7DhLmipdwoooqSgooooAKKKKACuV8U/wCma74c0vqJLs3Lj2iGefzNdVXLRf6d8S536pp1isf0dzn/ANBrWjo3LsmY1tUo92jqaKKKyNgooooAKKKKACuVvv8AQfiNplx0S/tJLY+m5DvH9BXVVyvjj/RrXTNVHH2G/jdz/sE4b+la0dZcvfQxr6Q5u1mdVRRRWRsFFFFABRRRQAUUUUAFcto/+m+PNdveq20cVpGf/HmH5iuoZgilmOABkk9q5jwGpl0S41Fgd+oXktxz1wWwP5VrDSEn8v6+4xqazjH5/wBfedRRRRWRsFFFFABRRRQAVyvg3/RbjXNKP/Lrfs6D0R+V/ka6quVi/wBB+JdwnRNRsVk+rocf+g1rT1jKPl+RjU0lGXnb7/8Ag2OqooorI2CiiigAooooAKKKKAIL25WysLi6f7sMTSH6AZ/pWJ4Gtmt/CNk0nMs4adz6lmJB/LFJ46uGg8JXccf+tuStug9SzAEflmt20t1s7KC2T7kMaxr9AMVrtS9X+X/DmO9X0X5/8MTUUUVkbBRRRQBzniX/AJD3hH/sKv8A+kdzXR1zniX/AJD3hH/sKv8A+kdzXR0AFFFFAASACScAVwmneL9Zni0XWLu3sRous3KwQRRq4nhD58p2Yna27AyABjcOTiu7IBBBGQa4XTvCGswRaLo91cWJ0XRrlZ4JY2czzhN3lIykbV25GSCc7RwM0AbXiDV9QttT0vR9JW2F9qBlfzrpWaOKOMAsxVSCxyyADI65zxUvhjWbnV7K7W+iijvrG7ks7kQk+WzrghlzyAVZTg9M45qhf6Rr90+i6wh03+29P85JId8i28scmAyhtpZT8qHODyCMc1f8MaNc6RZXbX0sUl9fXcl5cmEHy1dsAKueSAqqMnrjPFAGJq3iLRNV8VeFrPT9YsLu6i1SRpIYLhHdALS4BJUHIwSB+NdtXN+JFA17wiQAD/ar9v8Apzua6SgArl75WvfiLpkRUmOxtJLjOONzHZ+fQ11FFXCXK2yJw5kkFFFFQWFFFFABRRRQAUUUUAcr4O/0S717Sj/y7XzSIPRJBlf5GuqrlY/9B+Jcy9E1GwD/AFdDj/0GuqrWtrLm7pGNDSPL2b/r7gooorI2CiiigAooooAK5Xwd/pVzruqnn7VfsiH1RBhf5mt7VrwafpF5eZx5ELyD6gEis3wZZmy8IabGR8zxea2euXO7+tax0pt97L9f8jGWtWK7Xf6f5m9RRRWRsFFFFABRRRQAVj+KrP7f4V1K3xkmBmUerL8w/UCtikZQylWGQRgg1UZcsk+xMo80XF9TO8P3n9oeHtPuictJAhY/7WMH9c1pVy3gNjDotzprH5tPvJbfn0DZH866mqqx5ZtImjLmppsKKKKzNAooooAKKKKAMfxXefYPCup3GcEQMin0LfKP1NT6BZ/2f4f0+0Iw0UCBv97HP65rG8b/AOk2+l6UOft1/Gjj/YHLf0rqq1elJLu/6/UxjrVb7JL+vwCiiisjYKKKKACiiigArlvFP+h694c1McBLs2zn2lXHP5GuprnPHVs0/hG8eP8A1tvtnQ+hVgSfyzWtF/vEu+n36GVdfu2101+7U6OioLO5W8sbe6T7s0ayD6EZ/rU9ZNWNU76hRRRQAUUUUAFFFFAHLeKP9M8QeHNM6hro3Tj2jXIz+Zrqa5WH/TviVcv1TT7FYvo7ndn/AL5rqq1q6KMfL89TGlq5S8/y0CiiisjYKKKKAOc8S/8AIe8I/wDYVf8A9I7mujrnPEv/ACHvCP8A2FX/APSO5ro6ACiiigAooooAKKKKAOc8S/8AIe8I/wDYVf8A9I7mujrnPEv/ACHvCP8A2FX/APSO5ro6ACiiigAooooAKKKKACiiigAooooA5XxV/oet+HNUHAjvDbOf9mUY5/KuqrnfHNs1z4RvWj/1sAWdD6FSCT+Wa27K5W9sLe6T7s0SyD6EZ/rWstacX2uv1/UxhpUku9n+n6E9FFFZGwUUUUAFFFFAHMePZW/4RlrOM4lvp4rZPqzZ/kDXSRRLBCkSDCIoVR6AcVzGv/6b4x8O6eOVieS8kHptHyn8811Vaz0hFer/AE/QxhrUk/Rfr+oUUUVkbBRRRQAUUUUAFFFFAHK6R/oXj3XLPot1FFdoP/HWP5muqrldZ/0Lx1oN70W5SW0kP4blH5muqrWrryy7r8tP0MaOnNHs/wA9f1CiiisjYKKKKACiiigDlb3/AE74j6bB1Sws5Lg+gZzsH9DXVVyvhz/TfFXiPUTyqzJaRn02D5v1xXVVrW0aj2S/z/Uxo6py7t/5foFFFFZGwUUUUAFFFFABUF7bLeWNxav92aNoz9CMf1qeihO2oNX0Oc8C3LXHhGzST/W2+6Bx6FWIA/LFdHXLeFv9D13xHpnQR3YuUHtKM8flXU1rWX7xvvr9+plQf7tJ9NPu0CiiisjUKKKKACiiqWsXn9n6Ne3mcGGB3H1AOP1ppXdkJuyuzD8G/wClTa5qp/5er9lQ+qJwv8zXU1heDbP7D4R02IjDNCJW+rnd/Wt2tKzvUdjOirU1f+rhRRRWRqFFFFAHOeJf+Q94R/7Cr/8ApHc10dc54l/5D3hH/sKv/wCkdzXR0AFFFZ+q61Z6LFHJeC7KyEqv2azmuDn3EasR+NAGhRXFeIPHdsnhbWLnSBfJe21m8sbXOm3EKKeADmSNVOCQcZ7U6yt7jw3400vTE1TUL611GyuGmW9uGmKyxGPEils7ciRgVGF6YFAHZ0VyXjLSda1K90qTTBNJawecbqCLVJbHzMhdnzx8nBB46Vp+E7+31Hw7BJbx3UXlPJBJFdzNNLHIjlHVnYkthgecnIxQBW8S/wDIe8I/9hV//SO5ro64jVdUvLzxX4Wgn0LULGNNUlK3Fw8BR8WlwMAJIzc9eQOn4V29ABRRRQAUUUUAFFFFABRRRQAUUUUAQ3dut3ZT2z/cmjaNvoRisLwLcNP4StI5P9bbF7dx6FWIA/LFdHXK+GP9C8ReI9LPAFyt2g9RIMnH0wK1jrTkvR/p+pjPSpF97r9f0OqooorI2CiiigAoooJABJOAKAOV07/TviHq911WytorVT7t85/WuqrlvAoNxpt9qrddQvZZlP8As5wB+hrqa1raT5e2hjQ1hzd9fvCiiisjYKKKKACiiigAooooA5fx4rRaFDqKDL6fdxXIx14bH9a6dWV0DKcqwyD6iqGu2X9o6Df2YGWlgdV/3scfriqvhK9/tDwpps+ct5ARj7r8p/UVq9aS8n+ZitKr81+X/Do2qKKKyNgooooAKjuJktraWeQ4SJC7fQDNSVz3je7a08I3+zmSZRAgHcuQpH5E1UI80lHuRUlyxcuxH4DhdPCsNxKP3t5JJcv7lmOP0ArparadaLYaZa2a9IIVj/IAVZp1Jc03IKceWCj2CiiioLCiiigAooooAKKKKAOVl/0H4lwP0TUbFo/q6HP/AKDXVVyvjP8A0SXRNWHH2S+VXPpG/DfyFdVWtTWMZeX5GNPSUo+d/v8A+DcKKKKyNgooooAK5jx7Ix8OCyjOJL+5itlx6ls/yFdPXK65/p3jXQLDqkHmXkg+gwh/PNa0fjT7a/cY1/ga76fedRHGsUSRoMIihVHoBTqKKyNgooooAKKKKAOc8S/8h7wj/wBhV/8A0jua6Ouc8S/8h7wj/wBhV/8A0jua6OgAooooAr31lbalYXFjeRCW2uI2iljPRlYYI/I1kaT4Ut9KvTevqGoX90tv9mhlvJVYwxZBKrtUdSFyTknaMmt+igDCl8OTPY2VtF4i1qA2sXlGZJY2knHHMhdGBbjqADyav6PpFpoWmRafZK4hQsxaRyzuzMWZmY8kkkkn3q9RQBzniX/kPeEf+wq//pHc10dc54l/5D3hH/sKv/6R3NdHQAUUUUAFFFFABRRRQAUUUUAFFFFABXK3f+gfEewn6JqFm9ufQsh3Z/LArqq5bxyDbWOn6uvXT72OVj/sE7WH45Fa0dZcvfQxr6Q5u2v9fI6migEEZByDRWRsFFFFABWR4pvf7P8AC+pXIOGWBlU+jN8o/UiteuV8a/6WNI0cc/br1N49Y05b+laUVeauZVm1TdjX8PWX9neHdPtCMNHAu4f7RGT+pNadFFRJ8zbZpFKKSQUUUUhhRRRQAUUUUAFFFFABXK+C/wDRH1nSTx9jvnKD0jflf611Vcqn/Ev+JUi9I9Tsg31kjOP/AEGtaesZR8r/AHf8C5jU0lGXnb7/APg2OqooorI2CiiigArlfFX+ma14d0ociS7Ny4/2Yxnn65rqq5W2/wCJh8SL2brHptmkA9nc7s/lkVrR0bl2X/AMa2qUe7X+Z1VFFFZGwUUUUAFFFFABRRRQAUUUUAYni6y/tDwnqUAGWEJkUe6/MP5Vc0S9/tLQ7G8zkzQIzf72Of1zV5lDqVYZUjBB71y/gRjDpF1pbk79OvJbfnrtzkH9TWq1pNdn+f8ASMXpVT7r8v6Z1NFFFZGwUUUUAFcrpH+nePdcveqWkUdnGf8Ax5h+YrqJZFhieVzhEUsx9AK5rwHGzeH31CQYl1C5lumz15bA/l+taw0hKXy/r7jGes4x+f8AX3nT0UUVkbBRRRQAUUUUAc54l/5D3hH/ALCr/wDpHc10dc54l/5D3hH/ALCr/wDpHc10dABRRRQAUVFc3MFnbvcXU8cECDLySuFVR7k8CmWV9aalaJd2F1BdW0mdk0EgdGwcHDDg8gj8KALFFU9Q1fTNJWNtS1G0sllbbGbmdYw59BuIyatgggEEEHoRQBzviX/kPeEf+wq//pHc10dc54l/5D3hH/sKv/6R3NdHQAUUUUAFFFFABRRRQAUUUUAFFFFABWdr9j/afh+/swMtLAwQf7WMr+oFaNFOLaaaFJKSaZjeFL/+0vC2nXJOWMIRz/tL8p/UVs1yvhD/AEG/13RjwLa786IekcgyB+n611VXWSU3b+rmdFt01ff/ACCiiiszUK5Vv+Jj8SUHWPS7IsfaSQ4/9Brqq5XwX/pn9r60eft143ln1jT5V/rWtPSMpfL7/wDgXMamsox87/d/wbHVUUUVkbBRRRQAUUUUAFFFFABRRRQAVyvjH/QrnRNYHAtL0JIfSOQbWP6D866qsnxRp/8AanhnULQDLNCWQf7S/Mv6gVpSaU1cyrRbg7bmtRWX4c1D+1PDlheE5aSFd5/2hw36g1qVEk4tpmkZKSTXUKKKKQxCQoJJwBySa5fwMDc2Ooauw51G8klUn+4DtUfhg1e8X3/9neFNQnU4kaIxJjrub5Rj88/hVzRLAaZodlZYwYYVVv8Aexz+ua1WlJvu/wAv6Ri/eqpdl+f9Mv0UUVkbBRRRQAUUUUAFFFFABRRRQAVytj/xLviJqVt0j1G1S5T03J8pH16muqrlfFf+gazoGsjhYbo20p/2JBjJ9hj9a1o6tx7r/gmNbRKXZ/8AAOqooorI2CiiigDA8a3hsvCd8UyZZkECAdSXO3j8Ca1NMsxp2lWlkuMQQrHx3wMZrA8S/wDEw8S+H9JHKiY3ko9BGPlz7E5FdVWstKcV31/QxhrUk+2n6hRRRWRsFFFFABRRRQBzniX/AJD3hH/sKv8A+kdzXR1zniX/AJD3hH/sKv8A+kdzXR0AFZWq3WuwSxjSdKsbyMrl2ub9rcqfQARPn65FatFAGPZSanfQ3EfiDSdOtoAFKiO8Nyr85+YNEgGMDHX8MVj/AA9uba38CrI88McMNzevIzOAsai5lOSewx3rqruztdQtJLW9tobm2kGHhmQOjj0IPBqhZ+GNA0+G5hstD0y2iuk8u4SG0jRZk5G1wB8w5PB9TQBz0zac/wASNQfVmtmt20SE2pnKlDH5kvnYzx/zzz7Yq/8ADvzf+Fe6J5u//j2Hl7858vJ8vr/sba2b3RdK1KKGK/0yzuo4DmJJ4FcRn/ZBHH4VdACgAAADgAUAcTqsGux+K/Cz6jqOn3FodUl8uO3snidT9kuMZYysDxkfdH4dK7esTxFo99qr6XPp17b2lzp92blGuLczI2YpIyCodD0kJ69qr/YvGf8A0HtD/wDBPL/8k0AdHRXOfYvGf/Qe0P8A8E8v/wAk1mzXHjOHxHZaR/bGhn7Taz3Pm/2TL8vltEu3H2jnPm9c8bffgA7Wiuc+xeM/+g9of/gnl/8Akmj7F4z/AOg9of8A4J5f/kmgDo6K5z7F4z/6D2h/+CeX/wCSaPsXjP8A6D2h/wDgnl/+SaAOjornPsXjP/oPaH/4J5f/AJJo+xeM/wDoPaH/AOCeX/5JoA6Oiuc+xeM/+g9of/gnl/8Akmj7F4z/AOg9of8A4J5f/kmgDo6K4vXp/Geh+HdS1b+2NDm+xWslx5X9kyrv2KWxn7QcZx1xWgtn4zKg/wBvaHyP+gPL/wDJNAD9W8O3dzrCarpWp/2ddmLyZW8gSCRc5GQeMioP7F8Wf9Dan/gujqX7F4z/AOg9of8A4J5f/kmj7F4z/wCg9of/AIJ5f/kmtVWklbT7kZOjFu+v3si/sXxZ/wBDan/gujo/sXxZ/wBDan/gujqX7F4z/wCg9of/AIJ5f/kmj7F4z/6D2h/+CeX/AOSaPbS7L7l/kL2Ee7+9/wCZWm0DxTcQSQyeLFMcilGAsEBwRg8g5FdDpenw6VpdtYQf6uBAgOOvqfxOT+NY/wBi8Z/9B7Q//BPL/wDJNH2Lxn/0HtD/APBPL/8AJNKVSUlZ/kkVGlGLuvzb/M6Oiuc+xeM/+g9of/gnl/8Akmj7F4z/AOg9of8A4J5f/kmszQ6OiuKhuPGc3iO90j+2NDH2a1gufN/smX5vMaVduPtHGPK6553e3Ol9i8Z/9B7Q/wDwTy//ACTQB0dFc59i8Z/9B7Q//BPL/wDJNH2Lxn/0HtD/APBPL/8AJNAHR0Vzn2Lxn/0HtD/8E8v/AMk0fYvGf/Qe0P8A8E8v/wAk0AdHRXOfYvGf/Qe0P/wTy/8AyTR9i8Z/9B7Q/wDwTy//ACTQB0dFc59i8Z/9B7Q//BPL/wDJNZut3HjPRrCK6/tjQ5t91b223+yZVx5syRbs/aD035x3xjigCxF4V1jTnnj0bxD9jsXlaRLdrRZPLz1AJPSpf7F8Wf8AQ2p/4Lo6l+xeM/8AoPaH/wCCeX/5Jo+xeM/+g9of/gnl/wDkmtvbye9vuX+Rj7CC2v8Ae/8AMi/sXxZ/0Nqf+C6Oj+xfFn/Q2p/4Lo6l+xeM/wDoPaH/AOCeX/5Jo+xeM/8AoPaH/wCCeX/5Jpe2l2X3L/IPYR7v73/mVz4X1e9u7RtY18XtrbzLP5AtFj3MOmSD0rq65z7F4z/6D2h/+CeX/wCSaPsXjP8A6D2h/wDgnl/+Samc3PcuFOMNv8zo6K5z7F4z/wCg9of/AIJ5f/kmj7F4z/6D2h/+CeX/AOSags6Oiuc+xeM/+g9of/gnl/8Akms3TbjxnqN/q9r/AGxocf8AZ10ttu/smU+ZmGOXdj7Rx/rMY56Z70AdrRXOfYvGf/Qe0P8A8E8v/wAk0fYvGf8A0HtD/wDBPL/8k0AdHRXOfYvGf/Qe0P8A8E8v/wAk0fYvGf8A0HtD/wDBPL/8k0AdHRXOfYvGf/Qe0P8A8E8v/wAk0fYvGf8A0HtD/wDBPL/8k0AdHVDWdKg1rSp7C4yElXhh1VhyCPoay/sXjP8A6D2h/wDgnl/+SaPsXjP/AKD2h/8Agnl/+SaabTuhNKSsyBND8VoioPFqkKMDNghP5k807+xfFn/Q2p/4Lo6palceM9Ov9Itf7Y0OT+0bprbd/ZMo8vEMku7H2jn/AFeMcdc9q0vsXjP/AKD2h/8Agnl/+Sa09tLsvuX+Rl7CPd/e/wDMi/sXxZ/0Nqf+C6Oj+xfFn/Q2p/4Lo6l+xeM/+g9of/gnl/8Akmj7F4z/AOg9of8A4J5f/kmj20uy+5f5B7CPd/e/8x+i+H7uy1a41TU9T/tC8kiEKP5AjCIDkjAPc4roK5z7F4z/AOg9of8A4J5f/kmj7F4z/wCg9of/AIJ5f/kmolNyd2aQgoKyOjornPsXjP8A6D2h/wDgnl/+SaPsXjP/AKD2h/8Agnl/+Sako6Oiuc+xeM/+g9of/gnl/wDkmkaz8ZhSf7e0Pgf9AeX/AOSaAOkori9Bn8Z654d03Vv7Y0OH7bax3Hlf2TK2zeobGftAzjPXFaP2Lxn/ANB7Q/8AwTy//JNAB4l/5D3hH/sKv/6R3NdHXLjQvEN3rGlXeq6zps0Gn3DXAittOeJnYxSR43NM4xiQnp2rqKACiiigAooooAKKKKACiiigArCm8VeH4dcXT5bxBfK4t93kuVR3wRGZduxWPy/KWBPHHSt2vJL65hTw1r/hVyf+Egu9ZleC22nzJRJciSOUf7IQglug2EdqAPW6KKKACiiigAooooAKKKKAMHT/ABVouvTGztPtdwsm+Ms+nTrC23IYeYyBD0I689KfYeLtC1LUhp9nfCS4JcR/unVJSn3hG5AV8d9pOK4i3l0uDU9LsvBGuXV3JcTyrd2guXmiiiZHZnZTxCQ5UjG0knGDmo9GvbW/0rwFoNkCNX0qeJr63CkPaCKB0lMn93LHAz97dxmgD1aiiigAooooAKKKKAGySLFG0jnCICzH0ArL0nxNpGuQ3c1hdF47Rtk5lieLyztDc7wONpBz0wa1WZUUsxAVRkk9hXkx1nTfEQ+I+m6LqlpdX2ox/wChxQTKzTgWUanbg88gr9aAO/0fxPoWu3siabciS5MQk+aB42kiB4ZSyjemT1XI596NP8XaFquo/YLK/Ek7b/LzE6pLtOG8tyAsmO+0nFcmdRtPFWv6IPDsoc2Wm3QuGQEfZvMRFSN/7rbhnb1GwmqejX9pqdn4B0awBGp6VJG19AFIezWO2kjkEn93LMFGfvZzQB6nRRRQAUUUUAFFFFAGTdeJtJstbttHuLl0vrlgsSeRIVZiCwG8LtBwrHBPaq9/4q0S31OXSrgXc91AY2kjh06e4WMn5kJZEZQeMjntWB4+8RaNp2t+Fre91S0t5oNVW4lSWUKUi8idd5B6LkgZ9TVDxZeeH4JdV1HSPEFzD4nlhje3trO5Ym4kCAwjyRxIrZHODwx5FAHaax4p0bQZkh1G7McrRmXYkLylYwcF2CKdq/7RwPetWKWOeFJoXWSKRQyOpyGB5BB7iuDl1i08OeMtXvPEbJbJf6ba+QWGVkMfm+ZCnq2XB29Ture8C2V1p3gPQrO9Ro7mKyjV426odo+U+46fhQB0NFFFABRRRQAVR1XV7LRbZLi+kdEdxGgjheVmYgnAVAWJwD0Har1YnirxRYeEdFOo37DDSCGGMuE82Vs7V3HhRwSSeAATQBJB4n0a40e41ZL9FsrYss8kitGYmHVWVgGVuRwRnketN0zxFomoW99dWlyqLbfPd+dE0Dx/LkM6uFYDaOCRjA9q4aZba68HXerQalaaxcPrFpqOrf2c4ljVY5It0ahckhI0HXk7Se+Kj8QsPFkniu+8Pn7daf2JDbNJBytxIsjyGNT/ABEIccf3wKAO/wBH8T6Pr0skWnXTSSxoshjkheJih6OA6gspx94ZHvWvXD2Oq2HiT4hadfaJOtxa2emXCXU8YO1TI8RjjJ7MNjnb1HfrXcUAFFFFABRRRQAVh/8ACYaGNTGnteOk5n+zBnt5ViMucbBIV2FsjGM5zW5XnWr+K9D8R+J18PyazYWljp17GbrzrhElubiNwywxqTnaHA3N3I2juQAdPd+KtEg1h9NlF3Ne2rIWEOnTziJnX5SXRCqkqfXoTUuseKtF0G4WDUbwxStGZSqQvIUjBxvfYp2Ln+JsDg81xvjG40Wwu9YvNJ1y7g8WMEMVlbXDkzTKgEamDo6ngEkHAJ5FW7nV7Lw54u1+48QMsC6hYWxttwJE+wSB4k/vMGb7vU7xQB30ciTRJJG6vG4DKynIYHoQadWD4JsrrTvA2hWd8rLdQWMKSI3VSEHyn6dPwreoAKKKKACiiigDP1XWrLRkja8Nx+9JCLBayzscdeI1Y1Sl8Y6DFptrf/b/ADILp2jgEMLySSMudwEaqXyMHIxxjnFL4m1aw06ySC88QLoklxnyro+WD8pGQPMUr3AwRnnivPfDd5Bomq6XrGqv5elFdSt49TnVkWaR545BO+77hkCvg8A44wCBQB6IfEuhWmgWuppeRDTptsduYY2beegREUFi3BG0DIweOKu6Xq1jrVkLzT5xNCWKE7SrKwOCrKQCpB6ggGvNLFxp50TxFeK8GinXNQuRJIhVYY5hIIpGH8Kkk8np5g9a6vwVIt5eeJNVts/2dfal5lq2MLKFhiRpF9QXRue+M0AdbRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRWN4jtL6awe4sdavNNa3ikci3jhcSHGRu8yNumO2Op9sAGzRXn2mX2oW/hzwxq2qatqOpyatJZAxM8VukLyruLDyo1LKP7jEg96nufiBeW0eoXp8PO+mafqJ0+edLoeYx3hAyRlfmGWXOWGM8ZxQB3VFcjJ43OmprI17ThYzaZax3myK4Ewlicsq4YhcNuQqQeBxyRzTfDnjy31zW/7JkGnC4e3a4iNhqSXi7VIDK5UDY3zDjkHnBOKAOwooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACobuD7VZT2+7b5sbJuxnGRjNTUUAc3/wAIn/xT/hzSvtv/ACBpLV/N8r/XeSu3GM/Ln6nHvUM/gzztC1TTPt+Pt+p/2h5nk/c/fJJsxu5+5jOR1zjtXVUUAc1q/g631vUtTuLq5cQ3+nRWJjRcNGUkkkEgbPXLjAx/D3zirWj6XrFlcl9R1a1vIhGUVIbAQMTkfMzb2yeD0CjnpW3RQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB//2Q==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AlgDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABRRRQAVU1WybUtHvbFJ3t2ubeSETJ96MspG4e4zmrdFAHEaT4V1VdT0aXUYNItbfSreSBf7PLlrkMmzDAquxf4tuW5A54pumeEtahTQ9JvrixbR9EmEsEsTOZ7gIrLErqRtTaGBJBbJUdK7migDhB4P1n7MugefY/8I8NR+2+Zuf7QU87z/J242/f437vu9s0upeEdani1vSLO4sV0bWrgzTyyM4ngDhRKqKBtbdgkEkY3Hg4ruqKAECKFChRtAwBXy38T4g3xR8Qcf8tYf/SeKvqWvmL4lLn4neIf+u0P/pPFXThP4hpTdpXOL8kelHkj0q5to216tjo9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpR5I9KubaNtFg9oU/JHpXvfwDXb4R1Yf9RRv/RMNeIba9y+BAx4V1j/ALCjf+iIa5Mav3a9TKrO6PU6KKK8wwCiiigAooooAKKKKACiiigAooooAK+ZfiOP+Lm+Ief+W0P/AKTxV9NV80fEQZ+JniL/AK7Q/wDpPFXVhP4o07HLY96Me9Sbf85o2/5zXqj5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9GPepNv+c0bf8AOaA5iPHvRj3qTb/nNG3/ADmgOYjx70Y96k2/5zRt/wA5oDmI8e9e3fAr/kV9Y/7Cjf8AoiGvFdv+c17X8DP+RY1n/sKN/wCiIa48b/DXqJu56jRRRXmCCiiigAooooAKbJIkMTyyuqRopZmY4CgdSadWb4h0+XVvDOq6bCwSW7s5oEYnGGdCoP5mgCto/iMa1IrQaTqUVnInmQ3s8aJFKvYgbt4yDkblFVNP8cadqN9awpa30VteyPFZX0sSiC5dQSQhDFhkKxG4AEDjNcp4X0cpqWhpp2j6lpk0NnJFrU1xE8YmYxhQCzcSt5nzBlyAAeRnFSaTb6jcaX4Q8NPpN9b3Oi3MTXs8kDLAqQIygpIflfeduAueCc4xQB048cacdRFv9lvvshu/sI1Hyl+zmfO3Zndu+98u7bt3cZov/HGnaffXML2t9Ja2cqQ3l/FEpgtnbBAY7t3AZSSoIGecVyos9R/4RxfBf9l332sauJDd+Q32fyBd/aPN837udvG3O7d2p2rWuow6R4t8MJpN9PdazdyvZ3EcDNAUnVQWeT7qbDuyGIPyjGc0AenV80/EMf8AFzPEX/XeH/0nir6JvtMg1GwFncSXSxjB3W9zJA/H+1Gwb9a+b/GdjFp3j7XrSB53jjniw087zOcwRHl3JY9e59q6sJ/FEzDxRinYoxXqk8w3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3FGKdijFAcw3Fe0/A3/kWdZ/7Cjf+iIa8YxXs/wADv+Ra1n/sKH/0RDXHjf4a9Rp3PUKKKK8woKKKKACs/Wf7WWzV9HexW4R9zrehtjpg5G5eVOcHdg9OnNaFcz4yGnyRaNbaraQXNnc6kkUguGIjTMchBYZw2SAoDZGWHHAoAzdM+J2l3F1LaalA9lJAwSS5hcXNmGPAHnpwv/AwtdxXPSz2UuuDwkLS1ewl02SWaFFAEa71QIVHADB2x/umuhoAKKKKACiiigAr5s+II/4uV4i/67w/+k8VfSdfNvj/AP5KV4i/67w/+k8VdOE/ikz2OcxRinYoxXqmNxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxRinYoxQFxuKMU7FGKAuNxXs3wP/5FvWf+wof/AERDXjeK9k+CH/Iua1/2FD/6Ihrkxv8ADXqXB3Z6fRRRXmGoUUUUAFc94xuHXSrexj0+yvm1G5W08u+/1C5DNucYOR8mAO7FRXQ1y/jqCzvdKstPutKi1OW7vFitbeeZoo/N2O252XJACq54BzxxQAzwj4e1Dw/LNE9h4es7ORcldLt3jdpMjBYseRjd+ldXXn/ghNHhvNInsdGjs7jVdFN6XS4d9o3Rbkwx6ZdSD7GvQKACiq639m99JYpdwNeRIJJLcSAyIp6MVzkA+tRWGsaZqrSrp2o2l4YTtlFvOsmw+jbScH60AXaKpWusaZfXk9naalZ3F1B/roYp1d4+3zKDkfjRHrGmS6m+mx6lZvfoMvarOplUepTOR+VAF2vm7x9/yUrxF/13h/8ASeKvpGvnDx6P+Lk+I/8ArvD/AOk8VdOE/iGdX4TnaKdijFeoc/MNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2inYoxQHMNop2KMUBzDaKdijFAcw2vY/gh/wAi5rX/AGFT/wCiIa8exXsPwR/5F3Wv+wqf/RENcmM+BeppSd5Hp1FFFeadAUUUUAFYXiy2e60hIo9HudTfzlZUtblbeWEgEiRXZlwQcDg559M1u1zPji+i0/RYJJb3WbQPdJGraREkkzMQwC4ZW4J9BnOKAMPwJaw6RrcmlP4f1SwuVsQY59Rvo7g+SjBRHHtY4UFskAccZ6iu21W4u7TSbu4sLQ3d5HEzQ24YL5j44XJ4GTXnui65p+na7LNLa+NdT1VbfYPt9jlooWYE7VVVGCyjJx/COa9OoA8Mv31O1uvEEUejavFqVz4bme5uZxCHeQs5aTCSNhTjYoGSMKMYGa62/wDsf/CSaN/wivkeZ/YV5j7Lj/U4j8nOO2/7v/AvevQfstv9rN35EX2kx+UZtg37M527uuM84qvYaPpmlNK2nadZ2ZmO6U28Cx7z6ttAyfrQB53pP9m/Zfhp/ZHkfa8fP5eN/k/ZX87f3/1mzOf4sd6qaf8AZf8AhDvCuzyv7c/t6PzcY87zvPb7Ru7/AHPMznt+Fen2uj6XY3k95aabZ291P/rp4oFR5O/zMBk/jRHo+lxam+pR6bZpfuMPdLAolYehfGT+dADtRj1CW0K6Zc21tc5GJLmBpkx3G0Oh/WvnPxbHeR+PNfXUJ4J7oTxb5IITEjfuIsYUsxHGP4j/AEr6Xr5y8dj/AIuR4j/6+If/AEnirpwv8QyrfAc/ijFPxRivTOPmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYoxT8UYoDmGYr2D4Jf8i9rX/YVP/oiGvIsV678E/wDkX9b/AOwqf/RENcuM+BeptQd5HptFFFeadYUUUUAFc141S3XSba7l1J9PuLS7Sa0lS3M5aYqyBPKHMm5XYbRzznjFdLXOeLkLroxhnEF6uoqbOR498Xm+VIMSDIO1lLrkchmXrQBneF3Oo61aaxqGtxX17c6YZLKG3smt41tndCzYZmJYkR5yeOOOc12lcd4O8K6po32GTWLy0mfT9PGnWsdojBRHlSzsW5LMY06AAY75rsaACiiigAooooAK+c/HQ/4uR4j/AOviH/0nir6Mr518cD/i4/iP/r4h/wDSaGujC/xDDEu0DBxRinYoxXp3PP5huKMU7FGKLhzDcUYp2KMUXDmG4oxTsUYouHMNxRinYoxRcOYbijFOxRii4cw3FGKdijFFw5huKMU7FGKLhzDcUYp2KMUXDmG4oxTsUYouHMNxRinYoxRcOYbijFOxRii4cw3FGKdijFFw5huKMU7FGKLhzDcUYp2KMUXDmG4oxTsUYouHMNxRinYoxRcOYbijFOxRii4cw3FGKdijFFw5huKMU7FGKLhzDcUYp2KMUXDmG4oxTsUYouHMNxRinYoxRcOYbijFOxRii4cw3FGKdijFFw5huKMU7FGKLhzDcUYp2KMUXDmG4oxTsUYouHMNxRinYoxRcOYbijFOxRii4cw3FeufBT/kX9b/AOwqf/RENeS4r1r4K/8AIA1v/sKn/wBEQ1y4v4F6nThXeb9D0yiiivOO4KKKKACvMPiTqNg0utaVrG+WJdJiu9Mg8uQq12DcDOUHPSLhjjpXp9FAHE+DD4M/tFv+EeaQ332c+YGa4I2ZXP8ArOOu3pzXbUVFcwm4tZoBLJCZEZPMiOHTIxlT2I7UAS0Vwums+n+LbyLSr/Ub7TbKyk/tD7XdPcILnKlERnJIfbv3AcDK8A1naTNqNnYeDfEEmr31zca5NEl9BLOWhYTQtINkZ4TYQMbQOAc5oA9Lory+G81EeHrPxkdVvjdzaskb2hnb7OYHuvI8oRfdBCkHdjduGc9qNVvNRfRfFHipNVvorrSL6ZLW2SdlgEcBAKNH91t+GySCfmGMYoA9Qr538bj/AIuN4j/6+If/AEmhr33UbyeyszPb6dc38mQPIt2jD89/3jKvH1r578TXEt5448QTz2U9lI1xFm3nKF0xbxDkozLz14J61vhv4hzYt2pGXijFPxRivRueVzDMUYp+KMUXDmGYoxT8UYouHMMxRin4oxRcOYZijFPxRii4cwzFGKfijFFw5hmKMU/FGKLhzDMUYp+KMUXDmGYoxT8UYouHMMxRin4oxRcOYZijFPxRii4cwzFGKfijFFw5hmKMU/FGKLhzDMUYp+KMUXDmGYoxT8UYouHMMxRin4oxRcOYZijFPxRii4cwzFGKfijFFw5hmKMU/FGKLhzDMUYp+KMUXDmGYoxT8UYouHMMxRin4oxRcOYZijFPxRii4cwzFGKfijFFw5hmKMU/FGKLhzDMUYp+KMUXDmGYoxT8UYouHMMxRin4oxRcOYZijFPxRii4cwzFGKfijFFw5hmK9Y+C3/IB1v8A7Cp/9EQ15VivVvgx/wAgPXP+wqf/AEnhrmxXwL1OvBO9R+h6VRRRXAemFFFFABRRRQAVDeW5u7Ke2E81uZo2jE0JAePIxuUkEAjqODzU1FAHO+HPCf8AwjUUVvb63qVzZxoVW2uFt9mScliUiVi2cnJPJJzmo9N8Eafpl7aTJd301vYs7WNlNKrQ2pYEEoAoY4DEDcWwDxiumooA5lPA+nR6ik4ur42kd2b5NOMq/Z0nJLbwNu77xLbd20HnFF54H069v7iZ7q+jtbqdbi6sI5VFvPIuMMw27udq5AYA45BrpqKACvnzxkM/ETxJ/wBfMX/pNDX0HXz/AOL1z8Q/En/XzF/6TQ1tQdpnFj3aiYm2jbUu2jbXdzHh+0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFto21Lto20cwe0IttG2pdtG2jmD2hFtr1L4M/8gTXP+wqf/SeGvMttenfBwY0bXf8AsKn/ANJ4a58Q7xPQy6V6r9P8j0iiiiuM9kKKKKACiiigAooooAKKKKACiiigArwTxWm74g+JT/09Rf8ApNDXvdeKa5aNceO/EzKpOLyIf+SsNHtFT95nn5mm8O7d0c55ftR5ftW1/Zsn9w0f2bJ/cNP69T7nznLU7GL5ftR5ftW1/Zsn9w0f2bJ/cNH16n3DlqdjF8v2o8v2ra/s2T+4aP7Nk/uGj69T7hy1Oxi+X7UeX7Vtf2bJ/cNH9myf3DR9ep9w5anYxfL9qPL9q2v7Nk/uGj+zZP7ho+vU+4ctTsYvl+1Hl+1bX9myf3DR/Zsn9w0fXqfcOWp2MXy/ajy/atr+zZP7ho/s2T+4aPr1PuHLU7GL5ftR5ftW1/Zsn9w0f2bJ/cNH16n3DlqdjF8v2o8v2ra/s2T+4aP7Nk/uGj69T7hy1Oxi+X7UeX7Vtf2bJ/cNH9myf3DR9ep9w5anYxfL9qPL9q2v7Nk/uGj+zZP7ho+vU+4ctTsYvl+1Hl+1bX9myf3DR/Zsn9w0fXqfcOWp2MXy/ajy/atr+zZP7ho/s2T+4aPr1PuHLU7GL5ftR5ftW1/Zsn9w0f2bJ/cNH16n3DlqdjF8v2o8v2ra/s2T+4aP7Nk/uGj69T7hy1Oxi+X7UeX7Vtf2bJ/cNH9myf3DR9ep9w5anYxfL9qPL9q2v7Nk/uGj+zZP7ho+vU+4ctTsYvl+1Hl+1bX9myf3DR/Zsn9w0fXqfcOWp2MXy/ajy/atr+zZP7ho/s2T+4aPr1PuHLU7GL5ftR5ftW1/Zsn9w0f2bJ/cNH16n3DlqdjF8v2o8v2ra/s2T+4aP7Nk/uGj69T7hy1Oxi+X7UeX7Vtf2bJ/cNH9myf3DR9ep9w5anYxfL9qPL9q2v7Nk/uGj+zZP7ho+vU+4ctTsYvl+1Hl+1bX9myf3DR/Zsn9w0fXqfcOWp2MXy/ajy/atr+zZP7ho/s2T+4aPr1PuHLU7GL5ftR5ftW1/Zsn9w0f2bJ/cNH16n3DlqdjF8v2o8v2ra/s2T+4aP7Nk/uGj69T7hy1Oxi+X7UeX7Vtf2bJ/cNH9myf3DR9ep9w5anYxfL9qPL9q2v7Nk/uGj+zZP7ho+vU+4ctTsYvl+1Hl+1bX9myf3DR/Zsn9w0fXqfcOWp2MXy/avSPg+MaTrw/6iv/ALbwVyP9myf3DXafCqMw2XiGMjBGq/8AttBS+swq+7Fnp5UpKs7rp+qPQKKKKD6AKKKKACiiigAooooAKKKKACuM1bxJrv27Xf7GgsGtdDRTOtyHL3LmMSlEKkBMIV5Ibk9BiuzrjNW8N679u13+xp7BbXXEUTvclw9s4jERdFUEPlAvBK8jqc0AdA3iDS4dDttYu72CzsbiNJEluZFjXDjKgknGea8wXxn4Yi8T+JJH1bTpI576N4pBcoVdfs0K5BzyMqw+oNes2NlDp+m2tjCCYbaJIU3f3VAA/QVT0rSDp+o61cuyOuoXi3CKF+4BBFHg/jGT+NcuMwkMVS9lN2XkF2tjz3/hOvCf/QU03/wJT/Gj/hOvCf8A0FNN/wDAlP8AGvVdif3V/KjYn91fyryf9XcN/M/vDmqd19yPKv8AhOvCf/QU03/wJT/Gj/hOvCf/AEFNN/8AAlP8a9V2J/dX8qNif3V/Kj/V3DfzP7w5qndfcjyr/hOvCf8A0FNN/wDAlP8AGj/hOvCf/QU03/wJT/GvVdif3V/KjYn91fyo/wBXcN/M/vDmqd19yPKv+E68J/8AQU03/wACU/xo/wCE68J/9BTTf/AlP8a9UaNSpG1eR6VneHdJOi+GdK0qVklksrSK3aRVwGKIFJH5Uf6u4b+Z/eHNU7r7keef8J14T/6Cmm/+BKf40f8ACdeE/wDoKab/AOBKf416rsT+6v5UbE/ur+VH+ruG/mf3hzVO6+5HlX/CdeE/+gppv/gSn+NH/CdeE/8AoKab/wCBKf416rsT+6v5UbE/ur+VH+ruG/mf3hzVO6+5HlX/AAnXhP8A6Cmm/wDgSn+NH/CdeE/+gppv/gSn+Neq7E/ur+VGxP7q/lR/q7hv5n94c1TuvuR5V/wnXhP/AKCmm/8AgSn+NH/CdeE/+gppv/gSn+Neha/pB1jToraNkjZLy2uCzLnIinSQj8QhH41p7E/ur+VH+ruG/mf3hzVO6+5HlX/CdeE/+gppv/gSn+NH/CdeE/8AoKab/wCBKf416rsT+6v5UbE/ur+VH+ruG/mf3hzVO6+5HlX/AAnXhP8A6Cmm/wDgSn+NH/CdeE/+gppv/gSn+Neq7E/ur+VGxP7q/lR/q7hv5n94c1TuvuR5V/wnXhP/AKCmm/8AgSn+NH/CdeE/+gppv/gSn+Neq7E/ur+VGxP7q/lR/q7hv5n94c1TuvuR5V/wnXhP/oKab/4Ep/jR/wAJ14T/AOgppv8A4Ep/jXoU+kGbxRYasGQR21ncW7R7eWMjwsD+HlH8609if3V/Kj/V3DfzP7w5qndfcjyr/hOvCf8A0FNN/wDAlP8AGj/hOvCf/QU03/wJT/GvVdif3V/KjYn91fyo/wBXcN/M/vDmqd19yPKv+E68J/8AQU03/wACU/xo/wCE68J/9BTTf/AlP8a9V2J/dX8qNif3V/Kj/V3DfzP7w5qndfcjyr/hOvCf/QU03/wJT/Gj/hOvCf8A0FNN/wDAlP8AGvVdif3V/KjYn91fyo/1dw38z+8Oap3X3I8q/wCE68J/9BTTf/AlP8aP+E68J/8AQU03/wACU/xr0LS9IOn6jrVy7I66heLcIoX7gEEUeD+MZP41p7E/ur+VH+ruG/mf3hzVO6+5HlX/AAnXhP8A6Cmm/wDgSn+NH/CdeE/+gppv/gSn+Neq7E/ur+VGxP7q/lR/q7hv5n94c1TuvuR5V/wnXhP/AKCmm/8AgSn+NH/CdeE/+gppv/gSn+Neq7E/ur+VGxP7q/lR/q7hv5n94c1TuvuR5V/wnXhP/oKab/4Ep/jR/wAJ14T/AOgppv8A4Ep/jXquxP7q/lSNGpUjavI9KP8AV3DfzP7w5qndfcjyv/hOvCf/AEFNN/8AAlP8aP8AhOvCf/QU03/wJT/GvQ/DuknRfDOlaVKySyWVpFbtIq4DFECkj8q0tif3V/Kj/V3DfzP7w5qndfcjyr/hOvCf/QU03/wJT/Gj/hOvCf8A0FNN/wDAlP8AGvVdif3V/KjYn91fyo/1dw38z+8Oap3X3I8q/wCE68J/9BTTf/AlP8aP+E68J/8AQU03/wACU/xr1XYn91fyo2J/dX8qP9XcN/M/vDmqd19yPKv+E68J/wDQU03/AMCU/wAaP+E68J/9BTTf/AlP8a9V2J/dX8qzNf0g6xp0VtGyRsl5bXBZlzkRTpIR+IQj8aP9XcN/M/vDmqd19yPPf+E68J/9BTTf/AlP8aP+E68J/wDQU03/AMCU/wAa9V2J/dX8qNif3V/Kj/V3DfzP7w5qndfcjyr/AITrwn/0FNN/8CU/xo/4Trwn/wBBTTf/AAJT/GvVdif3V/KjYn91fyo/1dw38z+8Oap3X3I8q/4Trwn/ANBTTf8AwJT/ABo/4Trwn/0FNN/8CU/xr1XYn91fyo2J/dX8qP8AV3DfzP7w5qndfcjyr/hOvCf/AEFNN/8AAlP8aP8AhOvCf/QU03/wJT/GvVdif3V/KsyfSDN4osNWDII7azuLdo9vLGR4WB/Dyj+dH+ruG/mf3hzVO6+5Hnv/AAnXhP8A6Cmm/wDgSn+NH/CdeE/+gppv/gSn+Neq7E/ur+VGxP7q/lR/q7hv5n94c1TuvuR5V/wnXhP/AKCmm/8AgSn+NH/CdeE/+gppv/gSn+Neq7E/ur+VGxP7q/lR/q7hv5n94c1TuvuR5V/wnXhP/oKab/4Ep/jR/wAJ14T/AOgppv8A4Ep/jXquxP7q/lRsT+6v5Uf6u4b+Z/eHNU7r7keVf8J14T/6Cmm/+BKf41vfDa/tNSHiS7sZYpbeTVRsaJgynFtADgj3Brt9if3V/Ks3StKfTtQ1q5aRWXULxblFUY2AQRR4P4xk/jXZgsqpYOo6kG22ra/15BeT3f4GpRRRXqAFFFFABRRRQAUUUUAFFFFABXnN34g11dN1TxTFqISx0/UXt103yEKSwxzeU5L43hzhmGDgcDBr0auPufA808t1arq5TQ7y9+23Fj9nBdnLh2US7uEZxkjaTyQCM0AX/FupXtjbabaadMsF1qV/HZrcMgfyQVZ2YKeCdqNjPGSK5678Sa1o0OuaS94l7qFtcWUNneTQqpxdOEUuqAKSjBzwACAOK6LU/Dt3qti0c+rEXcN/9tsbhbdR9mwflQrn5wAWUk4JDHpVI+CTd6dqy6lqbT6lqUkUr3kMIiELREGHYhLYCkZ5JyScnmgCO01m78P67qGm67qxvbSHT11BLyWJI3QbyjoQgAPIUjAzyRzxSeGdX1y98X6rbaq3lQGxtru3svLUG2DvKu1mAyWxGpOSQCSBxVy08Jedc3134juYNYnu4EtWQ2ojhWFGLBQhZsksckknoOmKZpPgLRNE8VT63p9jZW2+2jgjhhtlTymBcs4Yd2DAHj+EdewB1NFFFAGd4g1T+xPDmp6rs8z7Fay3AT+8UUtj8cVzmm3uu6Xr2h2uq6qNRj1iCUupgSP7PMiB/kKgEoRuGGyeBzXV6hYwanpt1YXS7re6heGVc4yrAgj8jXN2fhHU4p4Li88Qfap7C0kttOf7GE8kuoXzZBuPmPhQP4R145oAdq11q2peL/7B03U20yOCwF5LPHCkjuzuyIuHBAUbGJ4yeORWTYeJNZ8SReHtOgvF067uoLqa+uYIlcj7PIISIw4Kjc7Z5BwBj3rdv/Deoy39rqmn60lrqiWn2O4mktBKk6Z3Z2bl2sG3Ec4G4gg1XHgo2Nnow0bUmtb7S45I0uJ4fOEyyYMnmKCudzANwRgigDHm1jxRe+HYorKS6kv7LWpLG+n0+GHzJYUD/Oqy5RSf3effOK6fwnqK6jpMn+m311PBO0Nx9vhjimikGDsZY1VeAQQQOQQcmoLTw5qWlaOltpesRx3j3ElzdXNzaeaLh5CWY7A67eTxg8AAc1c8P6EdFhvHmvGvL2+uDc3M7IEDOVVQFUfdUKqgDJ6dTQBsUUUUAc94s1K+tI9KsdOmW3utTvltBcFA/krseRmAPBO2MgZ4ye9M8NX+of2trWiajd/bZNOeJorpo1R5I5EyAwUBdwIYZAGRjirviDRDrdpbrFdG0u7S4S6tbgIH8uRQRypxuBVmBGRweoqlp3hvULGaa8bWRJqF5dxz3swtQqSxIu0QohY7BjHOSc59aAOdv/EOurp2t+J4dSEdlpV/JbrpvkIUliikCSFnI3hyQ5GCAMDg81Z1XVdfvJ/E95pmqLZwaD8kVsYEdbmRYVmfzCw3AHeFG0jGM89Ku3ngea5lvbVNXMeiX92Lu6sfs4Ls5IZ1WTd8qMy5I2k8nBGafq3g66vrvVTY6y1jZ6wqrqEAtxIzYQRlo33DYSgCkkN0yMGgDF1TxlqGo61Dp+ljVIYl02HUJTp1rFNMfMzgZl+QKoHTBZi3HQ13OjXsOpaJY3tvdG7imgR1uCu0yZH3ivGCe4xxWPe+FbhNVTUdB1NNMnNmtlKr23no0aElCBuXDLubByRzyDWvomk2+haJZ6Val2htYhGrOcs2OpPuTk/jQBfooooA5TVLnV9U8XyaHpuqNpkVrYJdyzRwJK8jyO6ouHBAUeWxOBk5HIp2m+KZpfhu/iW7hQ3FvZzTTxx8Kzxbg2PQEocfWrGr+Hr251pdX0jVV069Nt9kmMlsJ0kjDFl+XcuGUlsHJHzHINFh4WSx0tdG+2NLo32A2j2zxjfI7E75TJ1ywJyMYyc0AY9hqWu6Rqeg/wBr6qL+HWIJTLH9nSMW8qxeb+7KgEpgMMMSehzVXS9d19bPw14gvdRWe0124jjk0/yEVLdZlZoyjAbiRhQdxOcnpWzpPhK7tdQ0+41TWTqMemQNDZR/ZhEVDAKWkO4732jbkBRyeOah03wRNZS6Zbz6wbjR9JlM1jZ/ZwrqcMqB5Nx3hAxwAq9BnOKAK+srrdl4n0SxsvE+oSPf3jSS28kFsY47WMbpOREG7ogOc/MOa7isiHRNviu612a4813tUtLeLZgQIGLPznksxXPA+6BWvQAUUUUAec3fiDXV03VPFMWohLHT9Re3XTfIQpLDHN5TkvjeHOGYYOBwMGul8W6le2Ntptpp0ywXWpX8dmtwyB/JBVnZgp4J2o2M8ZIqhc+B5p5bq1XVymh3l79tuLH7OC7OXDsol3cIzjJG0nkgEZrQ1Pw7d6rYtHPqxF3Df/bbG4W3UfZsH5UK5+cAFlJOCQx6UAc7d+JNa0aHXNJe8S91C2uLKGzvJoVU4unCKXVAFJRg54ABAHFWm1TXdEvdb0qS/TVJ4tIOo2U90kcOHBZSjlQq7chSDxgE5PerZ8Em707Vl1LU2n1LUpIpXvIYREIWiIMOxCWwFIzyTkk5PNDeCn1GHVzr2qG9utSshYGW3g8hYYRuOEXc3JLEkknOBwAMUAZXg7xJe6l4ht7Qavc6hbS6ebmcX1ktq8cm5AvlAKpkQ5bJG4DC/Nzz6FXN2HhvURrdpqms6yl/LZQSQ2yw2ggA37dzP8zbmIUDjA68V0lABWd4g1T+xPDmp6rs8z7Fay3AT+8UUtj8cVo1W1Cxg1PTbqwul3W91C8Mq5xlWBBH5GgDlNNvdd0vXtDtdV1UajHrEEpdTAkf2eZED/IVAJQjcMNk8DmrOrXWral4v/sHTdTbTI4LAXks8cKSO7O7Ii4cEBRsYnjJ45FNs/COpxTwXF54g+1T2FpJbac/2MJ5JdQvmyDcfMfCgfwjrxzVm/8ADeoy39rqmn60lrqiWn2O4mktBKk6Z3Z2bl2sG3Ec4G4gg0AYVh4k1nxJF4e06C8XTru6gupr65giVyPs8ghIjDgqNztnkHAGPeoJvFeqx6TawXupxWHlavPpt/rAhXbGsYYo+GyiF/kBJBUEn2rcHgo2Nnow0bUmtb7S45I0uJ4fOEyyYMnmKCudzANwRgirVn4dvtK0L7HpuqxrfSXD3Nzd3Nr5ondyS+UDLgZIxg8BQOaAKngfxE+t/wBr2j3y6iNOuhFFfKgUXEbIrq2FwpOSRlQAQAR1rrayNA0L+xYrp5bpry+vZ/tF1csgTe+0KAFH3VCqoA56dSa16ACue8WalfWkelWOnTLb3Wp3y2guCgfyV2PIzAHgnbGQM8ZPeuhrI8QaIdbtLdYro2l3aXCXVrcBA/lyKCOVONwKswIyOD1FAFLw1f6h/a2taJqN39tk054miumjVHkjkTIDBQF3AhhkAZGOK52/8Q66una34nh1IR2WlX8luum+QhSWKKQJIWcjeHJDkYIAwODzXRad4b1CxmmvG1kSaheXcc97MLUKksSLtEKIWOwYxzknOfWqd54HmuZb21TVzHol/di7urH7OC7OSGdVk3fKjMuSNpPJwRmgCXXrnVtP8W+H2h1VxYX16baWy8iPaQIJXzvILZ3IOhFN1C51nWPFl9o+l6qdMi0+zhmeRIEkaWWVpNoO8H5AI+cYJ3dRipfEfh3WtZ1bTryy1uzs4tPm+0QxS6e0xMmx0JLCVcjDnjHXvT9R8N6jNqh1TS9aWwvZ7RbW7Y2glSUKSVZV3DawLvgksMHkHFAF3wprEmv+FdN1WaNY5rmBWlRfuh+jY9sg4rYqjo+lW+h6NZ6Xabvs9pCsSFjliAMZJ9T1NXqACuF1TUtfvr7xLLpeqLZQ6EqrFB5COtzL5ImbzCwyFwyr8pB6nJruq5TV/B93e32qSafrRsLfV41jv4vs4kZsLs3RtuGximFyQ3QHGaAJNa8Sz2/gBNdsY0W4uobcwLJyqPOyIpPqAZAfwqHTrjWLHxTN4fvdWa/E+nG8t7qS3jR4nVwjAhAFK/OhHGeoJNXbzwx9v0i/0ia9ZdOliijs4oogrWnlgYIbncdyqwz0xik0vw9fW2q3Gr6nqqX2ovbC1hdLXyY4owdx+TcSSWwSc9hgCgCv4QutTkvvENjqepPqDWF+sMUzwpGdpgifGEAHVz7+9N8P3OrR+Mtc0rUdVe/hgtrWeHdBHH5ZkaYMBtGSPkXqTTvD3hzW9I1nUb691yzvItQlE88UWnNCd4jWMbWMrYGEHGD9abp/hvXbTxbda3NrtlNHdpHFNbrprIfLjLlAH844P7w5ODnHQUAdXRRRQAV5zd+INdXTdU8UxaiEsdP1F7ddN8hCksMc3lOS+N4c4Zhg4HAwa9Grj7nwPNPLdWq6uU0O8vfttxY/ZwXZy4dlEu7hGcZI2k8kAjNAF/xbqV7Y22m2mnTLBdalfx2a3DIH8kFWdmCngnajYzxkiqekXOsTX2v+Hp9WMl1ZCGS31E26b/LlBI3IAELAo4yABgjirup+HbvVbFo59WIu4b/7bY3C26j7Ng/KhXPzgAspJwSGPSk03w/f6emqXbapFNrWoFS921riJNi7UURB87Rycbskk80AVvCV1quq+FLoXOpmS/ju7y1S8eBMjy5nRWKKApICimaNd6pb+M7nRZtXk1e1ishPPLLDGjW0xYBEJjVR8y7mwRkbQc80aF4Z17RdJ1Gx/wCEhtZHuWnmgmTTShgmldnLEGVgwBY4Xj6mp/Cfh3VPDkP2a41Sxu7Yhncx2LxTSykgmR5GmfcTznj06AYoA6aiiigAooooAKiubiK0tZrmdwkMKNJI5/hUDJP5VLVLWVun0PUFsYo5bw20ggjlAKNJtO0NnjBOM0AZOleMbXVL60tW07UbI30TTWcl3GqrcIACdu1iQcEHDBTimaf4407Ub61hS1vora9keKyvpYlEFy6gkhCGLDIViNwAIHGa5PwxpDR63ow0+z12MRWUsGoPqsThIAUGFh38Kd4HEXy7R7CpdJt9RuNL8IeGn0m+t7nRbmJr2eSBlgVIEZQUkPyvvO3AXPBOcYoA6ceONOOoi3+y332Q3f2Eaj5S/ZzPnbszu3fe+Xdt27uM0X/jjTtPvrmF7W+ktbOVIby/iiUwWztggMd27gMpJUEDPOK5UWeo/wDCOL4L/su++1jVxIbvyG+z+QLv7R5vm/dzt4253bu1O1a11GHSPFvhhNJvp7rWbuV7O4jgZoCk6qCzyfdTYd2QxB+UYzmgD06iqV9pkGo2As7iS6WMYO63uZIH4/2o2DfrXzr41n1vSvHWs6dpesawllbyxrGjarckrmGNjyXJPLE8+tXTpupLlQH0vRXyX/a3iz/oN6t/4NLj/wCLo/tbxZ/0G9W/8Glx/wDF10fU6ndDsu6+8+tKK+S/7W8Wf9BvVv8AwaXH/wAXR/a3iz/oN6t/4NLj/wCLo+p1O6Cy7r7z60or5L/tbxZ/0G9W/wDBpcf/ABdH9reLP+g3q3/g0uP/AIuj6nU7oLLuvvPrSivkv+1vFn/Qb1b/AMGlx/8AF0f2t4s/6Derf+DS4/8Ai6PqdTugsu6+8+tKK+S/7W8Wf9BvVv8AwaXH/wAXR/a3iz/oN6t/4NLj/wCLo+p1O6Cy7r7z60or5L/tbxZ/0G9W/wDBpcf/ABdH9reLP+g3q3/g0uP/AIuj6nU7oLLuvvPrSivkv+1vFn/Qb1b/AMGlx/8AF0f2t4s/6Derf+DS4/8Ai6PqdTugsu6+8+tKK+S/7W8Wf9BvVv8AwaXH/wAXR/a3iz/oN6t/4NLj/wCLo+p1O6Cy7r7z60or5L/tbxZ/0G9W/wDBpcf/ABdH9reLP+g3q3/g0uP/AIuj6nU7oLLuvvPrSivkv+1vFn/Qb1b/AMGlx/8AF0f2t4s/6Derf+DS4/8Ai6PqdTugsu6+8+tKK+S/7W8Wf9BvVv8AwaXH/wAXR/a3iz/oN6t/4NLj/wCLo+p1O6Cy7r7z60or5L/tbxZ/0G9W/wDBpcf/ABdH9reLP+g3q3/g0uP/AIuj6nU7oLLuvvPrSivkv+1vFn/Qb1b/AMGlx/8AF0f2t4s/6Derf+DS4/8Ai6PqdTugsu6+8+tKK+S/7W8Wf9BvVv8AwaXH/wAXR/a3iz/oN6t/4NLj/wCLo+p1O6Cy7r7z6RvPHOh2F+LG5OpLcNI0SKNJum8xlySEIjIfgE5XIwM9K17DUV1FWkjtbqKDarRyTxeX5mfRGw647hlX2zXylJe+JJpoZpdU1F5YCWidtSnLRkgglTu4JBI47GopZNdnM5mvr2T7Rt87ffzHzNv3d2W5x2z0o+p1O6Cy7r7z6+or43NhfG2ktj5nkSSea8X2qXaz/wB4jOCferF6da1K2W2v7y8uoEIKxT30zqpHTALYo+p1O6Cy7r7z7Aor5Ii1HxPBEkUOr6nHGgCqiancAKB0AAbgU/8AtbxZ/wBBvVv/AAaXH/xdH1Op3QWXdfefWlFfJf8Aa3iz/oN6t/4NLj/4uj+1vFn/AEG9W/8ABpcf/F0fU6ndBZd1959aUV8l/wBreLP+g3q3/g0uP/i6P7W8Wf8AQb1b/wAGlx/8XR9Tqd0Fl3X3n1pRXyX/AGt4s/6Derf+DS4/+Lo/tbxZ/wBBvVv/AAaXH/xdH1Op3QWXdfefWlFfJf8Aa3iz/oN6t/4NLj/4uj+1vFn/AEG9W/8ABpcf/F0fU6ndBZd1959aUV8l/wBreLP+g3q3/g0uP/i6P7W8Wf8AQb1b/wAGlx/8XR9Tqd0Fl3X3n1pRXyX/AGt4s/6Derf+DS4/+Lo/tbxZ/wBBvVv/AAaXH/xdH1Op3QWXdfefWlFfJf8Aa3iz/oN6t/4NLj/4uj+1vFn/AEG9W/8ABpcf/F0fU6ndBZd1959aUV8l/wBreLP+g3q3/g0uP/i6P7W8Wf8AQb1b/wAGlx/8XR9Tqd0Fl3X3n1pRXyX/AGt4s/6Derf+DS4/+Lo/tbxZ/wBBvVv/AAaXH/xdH1Op3QWXdfefWlFfJf8Aa3iz/oN6t/4NLj/4uj+1vFn/AEG9W/8ABpcf/F0fU6ndBZd1959aUV8l/wBreLP+g3q3/g0uP/i6P7W8Wf8AQb1b/wAGlx/8XR9Tqd0Fl3X3n1pRXyX/AGt4s/6Derf+DS4/+Lo/tbxZ/wBBvVv/AAaXH/xdH1Op3QWXdfefWlFfJf8Aa3iz/oN6t/4NLj/4uj+1vFn/AEG9W/8ABpcf/F0fU6ndBZd1959aUV8l/wBreLP+g3q3/g0uP/i69p+Cd5qF74Y1U6leXN1NHqTIrXE7ylV8mI4BYk4ySfxrOrh5U1dg7dGj0uiiisBBRRRQAUUUUAFFFFABRRRQAUUUUAFfNHxEhupPiZ4iMEwRfOhBB9fs8VfS9fM3xGtZJ/ib4hZLpogJoRtB/wCneLmunCfxBN262+Vznfsuof8AP0v60fZdQ/5+l/Wo/wCz5v8AoIP+dH9nzf8AQQf869QXMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/Wj7LqH/AD9L+tR/2fN/0EH/ADo/s+b/AKCD/nQHMv5l/wCAkn2XUP8An6X9aPsuof8AP0v61H/Z83/QQf8AOj+z5v8AoIP+dAcy/mX/AICSfZdQ/wCfpf1o+y6h/wA/S/rUf9nzf9BB/wA6P7Pm/wCgg/50BzL+Zf8AgJJ9l1D/AJ+l/WvavgYsieGdZWVgzjVGyR3/AHENeI/2fN/0EH/OvbfgXG0XhfWEaQyEao3zHv8AuIa5MZ8C9RqSfVP0Vj1KiiivNGFFFFABRRRQAUUUUAFFULfW9Ju9Ql0+21SymvYs+Zbx3CNImOuVByKdBrGmXOoS6fb6lZy3sPMttHOrSJ/vKDkfjQBdoql/bGmf2p/Zf9pWf9oY3fZPPXzcYznZnPT2om1jTLbUYtPn1KzivphmO2edVkf6KTk/hQBdr5o+IltbzfEzxE0spRhPCAN2OPs8VfS9fMvxHjsX+JviE3JIfzocYJ6fZ4vSunCfxBO/S/y3Of8AsNl/z8H/AL7NH2Gy/wCfg/8AfZqHydJ/vN+bUeTpP95vzavUFeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32aPsNl/z8H/AL7NQ+TpP95vzajydJ/vN+bUBeXef3E32Gy/5+D/AN9mj7DZf8/B/wC+zUPk6T/eb82o8nSf7zfm1AXl3n9xN9hsv+fg/wDfZo+w2X/Pwf8Avs1D5Ok/3m/NqPJ0n+835tQF5d5/cTfYbL/n4P8A32a9p+BqJH4Z1lI23INUbBzn/lhDXiHk6T/eb82r274FCJfC+sCD/V/2o23r/wA8IfWuTGfAvUab6uXzVj1KiiivNGFFFFABRRWfq+qNpNolyNPvb1DIFdbOMSOi4PzbcgkcAYXJ56UAaFZfiZrtPCmsNp+77aLGY2+zr5mw7ce+cUaR4k0fXd403UIZpY/9ZDkrLH/vRthl/ECtSgDy/wAJ3Fzpg8K2lpqVpfw6nYO3kR28afZSsQYMpUbtu7CneSSSOc8VW0f7D/wjHw7+x+V/a325PN2483d5cn2rd367t2e+M9q9Ms9G0vTria4sdNs7aec5lkggVGkP+0QMn8aINH0u21CXULfTbOK9m/1tzHAqyP8A7zAZP40AeYD7L/wg0f8Aqv8AhIf+El9vO+0/buff/VZ/4B7U7WPsX/CK/ED7V5X9sf2i/lbsedv2x/Zdvf8Aubcd8+9em/2Ppf8Aan9qf2bZ/wBoY2/a/IXzcYxjfjPT3om0fS7nUYtQn02zlvoRiO5eBWkT6MRkfhQAX0epS2AXT7m1trvjMlxA0ye42h0P6183eNJvsvj7Xo9Znt7i98+IvJDCYkYeRFjClmI4/wBo/wBK+nq+aviFN5fxL8RD7KZf38J3Af8ATvFxXThf4hMldW/J2Ob+3aX6LR9u0v0Wl+0/9Q9vyo+0/wDUPb8q9Mz9mu0v/AkJ9u0v0Wj7dpfotL9p/wCoe35Ufaf+oe35UB7Ndpf+BIT7dpfotH27S/RaX7T/ANQ9vyo+0/8AUPb8qA9mu0v/AAJCfbtL9Fo+3aX6LS/af+oe35Ufaf8AqHt+VAezXaX/AIEhPt2l+i0fbtL9FpftP/UPb8qPtP8A1D2/KgPZrtL/AMCQn27S/RaPt2l+i0v2n/qHt+VH2n/qHt+VAezXaX/gSE+3aX6LR9u0v0Wl+0/9Q9vyo+0/9Q9vyoD2a7S/8CQn27S/RaPt2l+i0v2n/qHt+VH2n/qHt+VAezXaX/gSE+3aX6LR9u0v0Wl+0/8AUPb8qPtP/UPb8qA9mu0v/AkJ9u0v0Wj7dpfotL9p/wCoe35Ufaf+oe35UB7Ndpf+BIT7dpfotH27S/RaX7T/ANQ9vyo+0/8AUPb8qA9mu0v/AAJCfbtL9Fo+3aX6LS/af+oe35Ufaf8AqHt+VAezXaX/AIEhPt2l+i0fbtL9FpftP/UPb8qPtP8A1D2/KgPZrtL/AMCQn27S/RaPt2l+i0v2n/qHt+VH2n/qHt+VAezXaX/gSE+3aX6LR9u0v0Wl+0/9Q9vyo+0/9Q9vyoD2a7S/8CQn27S/RaPt2l+i0v2n/qHt+VH2n/qHt+VAezXaX/gSE+3aX6LR9u0v0Wl+0/8AUPb8qPtP/UPb8qA9mu0v/AkJ9u0v0Wj7dpfotL9p/wCoe35Ufaf+oe35UB7Ndpf+BIT7dpfotH27S/RaX7T/ANQ9vyo+0/8AUPb8qA9mu0v/AAJCfbtL9Fo+3aX6LS/af+oe35Ufaf8AqHt+VAezXaX/AIEhPt2l+i0fbtL9FpftP/UPb8qPtP8A1D2/KgPZrtL/AMCQn27S/RaPt2l+i0v2n/qHt+VH2n/qHt+VAezXaX/gSE+3aX6LR9u0v0Wl+0/9Q9vyo+0/9Q9vyoD2a7S/8CQn27S/RaPt2l+i0v2n/qHt+VH2n/qHt+VAezXaX/gSE+3aX6LR9u0v0Wl+0/8AUPb8qPtP/UPb8qA9mu0v/AkJ9u0v0Wj7dpfotL9p/wCoe35Ufaf+oe35UB7Ndpf+BIT7dpfotH27S/RaX7T/ANQ9vyo+0/8AUPb8qA9mu0v/AAJCfbtL9Fo+3aX6LS/af+oe35Ufaf8AqHt+VAezXaX/AIEhPt2l+i0fbtL9FpftP/UPb8qPtP8A1D2/KgPZrtL/AMCQn27S/RaPt2l+i0v2n/qHt+VH2n/qHt+VAezXaX/gSE+3aX6LXtPwNeKTw1rLw48s6ocY/wCuENeL/af+oe35V7R8Dm3+GtZbyzHnVD8p7fuIa5cZ8C9SoRs9n83c9QooorzTQKKKKACsTxLLMIdPtLfVJtOlvbsQLLBAkjt8jsVG/IXhSdxBxt6c1t1zfjY6eNHg+2yX0c/2pDYtp6b7gXGGx5YwQTt35yMbd2aAMq68F+GpdQbTZBdvrlxavcQ6tPK0lzHsZV3JIT8pUuvyjC89K7muI8IJJNrc13qEPiObUDbGNbvVbaKGNI9wJRFjwAScE8ZO3rxXb0AFFFFABRRRQAV82fEC8lt/iV4iRLd5AZ4TlVJ/5d4q+k6+b/HwvT8SfEX2by9nnw53KTz9ni966ML/ABDOry8vvWt56HN/2pP/AM+Mv/fBo/tSf/nxl/74NSbdV9Yf++D/AI0bdV9Yf++D/jXp6HNaj2j97I/7Un/58Zf++DR/ak//AD4y/wDfBqTbqvrD/wB8H/GjbqvrD/3wf8aNAtR7R+9kf9qT/wDPjL/3waP7Un/58Zf++DUm3VfWH/vg/wCNG3VfWH/vg/40aBaj2j97I/7Un/58Zf8Avg0f2pP/AM+Mv/fBqTbqvrD/AN8H/GjbqvrD/wB8H/GjQLUe0fvZH/ak/wDz4y/98Gj+1J/+fGX/AL4NSbdV9Yf++D/jRt1X1h/74P8AjRoFqPaP3sj/ALUn/wCfGX/vg0f2pP8A8+Mv/fBqTbqvrD/3wf8AGjbqvrD/AN8H/GjQLUe0fvZH/ak//PjL/wB8Gj+1J/8Anxl/74NSbdV9Yf8Avg/40bdV9Yf++D/jRoFqPaP3sj/tSf8A58Zf++DR/ak//PjL/wB8GpNuq+sP/fB/xo26r6w/98H/ABo0C1HtH72R/wBqT/8APjL/AN8Gj+1J/wDnxl/74NSbdV9Yf++D/jRt1X1h/wC+D/jRoFqPaP3sj/tSf/nxl/74NH9qT/8APjL/AN8GpNuq+sP/AHwf8aNuq+sP/fB/xo0C1HtH72R/2pP/AM+Mv/fBo/tSf/nxl/74NSbdV9Yf++D/AI0bdV9Yf++D/jRoFqPaP3sj/tSf/nxl/wC+DR/ak/8Az4y/98GpNuq+sP8A3wf8aNuq+sP/AHwf8aNAtR7R+9kf9qT/APPjL/3waP7Un/58Zf8Avg1Jt1X1h/74P+NG3VfWH/vg/wCNGgWo9o/eyP8AtSf/AJ8Zf++DR/ak/wDz4y/98GpNuq+sP/fB/wAaNuq+sP8A3wf8aNAtR7R+9kf9qT/8+Mv/AHwaP7Un/wCfGX/vg1Jt1X1h/wC+D/jRt1X1h/74P+NGgWo9o/eyP+1J/wDnxl/74NH9qT/8+Mv/AHwak26r6w/98H/GjbqvrD/3wf8AGjQLUe0fvZH/AGpP/wA+Mv8A3waP7Un/AOfGX/vg1Jt1X1h/74P+NG3VfWH/AL4P+NGgWo9o/eyP+1J/+fGX/vg0f2pP/wA+Mv8A3wak26r6w/8AfB/xo26r6w/98H/GjQLUe0fvZH/ak/8Az4y/98Gj+1J/+fGX/vg1Jt1X1h/74P8AjRt1X1h/74P+NGgWo9o/eyP+1J/+fGX/AL4NH9qT/wDPjL/3wak26r6w/wDfB/xo26r6w/8AfB/xo0C1HtH72R/2pP8A8+Mv/fBo/tSf/nxl/wC+DUm3VfWH/vg/40bdV9Yf++D/AI0aBaj2j97I/wC1J/8Anxl/74NH9qT/APPjL/3wak26r6w/98H/ABo26r6w/wDfB/xo0C1HtH72R/2pP/z4y/8AfBo/tSf/AJ8Zf++DUm3VfWH/AL4P+NG3VfWH/vg/40aBaj2j97I/7Un/AOfGX/vg0f2pP/z4y/8AfBqTbqvrD/3wf8aNuq+sP/fB/wAaNAtR7R+9kf8Aak//AD4y/wDfBo/tSf8A58Zf++DUm3VfWH/vg/40bdV9Yf8Avg/40aBaj2j97I/7Un/58Zf++DR/ak//AD4y/wDfBqTbqvrD/wB8H/GjbqvrD/3wf8aNAtR7R+9kf9qT/wDPjL/3waP7Un/58Zf++DUm3VfWH/vg/wCNG3VfWH/vg/40aBaj2j97I/7Un/58Zf8Avg0f2pP/AM+Mv/fBqTbqvrD/AN8H/GjbqvrD/wB8H/GjQLUe0fvZH/ak/wDz4y/98Gj+1J/+fGX/AL4NSbdV9Yf++D/jRt1X1h/74P8AjRoFqPaP3sj/ALUn/wCfGX/vg0f2pP8A8+Mv/fBqTbqvrD/3wf8AGjbqvrD/AN8H/GjQLUe0fvZH/ak//PjL/wB8GvZfgfK03hvWZGQoTqhypGCP3ENePbdV9Yf++D/jXsXwR80eHda8/Hmf2oc7Rgf6iGuXF/AvU1oqnze6l8nc9OooorzjoCiiigArl/HUcbaPaTnVZdNmt7xJLeaC1+0StJhlCImCSTuI4B4z2zXUVzPjaWytNNsr661a20ua0vFltZ7pd0Rl2OuxgCCQUZxwQR17UAV/Ct9dXN1ayXHiaTUo72w+2W8D6esGUJX59w7jIBX/AGhXXV5p8N3sJru1hPibS9UvNM0z7Fb2+nggJCGTc7liSWJWP0Ax716BqepWuj6XdaleyeXbWsTSytjOFAyeKALdFeMSePJba/1zU4vEVpc3UuhPdW9jDepNFbzBm2oqgkM6oAzEcn5j0xjqJ2u/But2UcWpX+ow3em3cs0d5cNNmWFUYOufu53MCq4XkcUAd/RXnOlNf6WfBuqPq9/eS62wiv4p5y8bmS3eUMiHiPayYG0Dg8561VsLzURoeg+LW1W+e71HVIop7Vp2MHkzTGMRrF91dgKkEDOVOSc0AeoV84ePVuz8SfEf2dlC/aIc5Pf7PFX0JqN5PY2hmt9Pub+QEDyLZow59/3jKv6186eLFuNT8d6/dSxXOnSNcRBraZkLriCLrsZl568E9a6ML/EMa8oxheVvmrmPs1L+/H+Zo2al/fj/ADNP/syX/n8f86P7Ml/5/H/OvSujj9rS7x/8BYzZqX9+P8zRs1L+/H+Zp/8AZkv/AD+P+dH9mS/8/j/nRdB7Wl3j/wCAsZs1L+/H+Zo2al/fj/M0/wDsyX/n8f8AOj+zJf8An8f86LoPa0u8f/AWM2al/fj/ADNGzUv78f5mn/2ZL/z+P+dH9mS/8/j/AJ0XQe1pd4/+AsZs1L+/H+Zo2al/fj/M0/8AsyX/AJ/H/Oj+zJf+fx/zoug9rS7x/wDAWM2al/fj/M0bNS/vx/maf/Zkv/P4/wCdH9mS/wDP4/50XQe1pd4/+AsZs1L+/H+Zo2al/fj/ADNP/syX/n8f86P7Ml/5/H/Oi6D2tLvH/wABYzZqX9+P8zRs1L+/H+Zp/wDZkv8Az+P+dH9mS/8AP4/50XQe1pd4/wDgLGbNS/vx/maNmpf34/zNP/syX/n8f86P7Ml/5/H/ADoug9rS7x/8BYzZqX9+P8zRs1L+/H+Zp/8AZkv/AD+P+dH9mS/8/j/nRdB7Wl3j/wCAsZs1L+/H+Zo2al/fj/M0/wDsyX/n8f8AOj+zJf8An8f86LoPa0u8f/AWM2al/fj/ADNGzUv78f5mn/2ZL/z+P+dH9mS/8/j/AJ0XQe1pd4/+AsZs1L+/H+Zo2al/fj/M0/8AsyX/AJ/H/Oj+zJf+fx/zoug9rS7x/wDAWM2al/fj/M0bNS/vx/maf/Zkv/P4/wCdH9mS/wDP4/50XQe1pd4/+AsZs1L+/H+Zo2al/fj/ADNP/syX/n8f86P7Ml/5/H/Oi6D2tLvH/wABYzZqX9+P8zRs1L+/H+Zp/wDZkv8Az+P+dH9mS/8AP4/50XQe1pd4/wDgLGbNS/vx/maNmpf34/zNP/syX/n8f86P7Ml/5/H/ADoug9rS7x/8BYzZqX9+P8zRs1L+/H+Zp/8AZkv/AD+P+dH9mS/8/j/nRdB7Wl3j/wCAsZs1L+/H+Zo2al/fj/M0/wDsyX/n8f8AOj+zJf8An8f86LoPa0u8f/AWM2al/fj/ADNGzUv78f5mn/2ZL/z+P+dH9mS/8/j/AJ0XQe1pd4/+AsZs1L+/H+Zo2al/fj/M0/8AsyX/AJ/H/Oj+zJf+fx/zoug9rS7x/wDAWM2al/fj/M0bNS/vx/maf/Zkv/P4/wCdH9mS/wDP4/50XQe1pd4/+AsZs1L+/H+Zo2al/fj/ADNP/syX/n8f86P7Ml/5/H/Oi6D2tLvH/wABYzZqX9+P8zRs1L+/H+Zp/wDZkv8Az+P+dH9mS/8AP4/50XQe1pd4/wDgLGbNS/vx/maNmpf34/zNP/syX/n8f86P7Ml/5/H/ADoug9rS7x/8BYzZqX9+P8zRs1L+/H+Zp/8AZkv/AD+P+dH9mS/8/j/nRdB7Wl3j/wCAsZs1L+/H+Zo2al/fj/M0/wDsyX/n8f8AOj+zJf8An8f86LoPa0u8f/AWM2al/fj/ADNGzUv78f5mn/2ZL/z+P+dH9mS/8/j/AJ0XQe1pd4/+AsZs1L+/H+Zo2al/fj/M0/8AsyX/AJ/H/Oj+zJf+fx/zoug9rS7x/wDAWM2al/fj/M0bNS/vx/maf/Zkv/P4/wCdH9mS/wDP4/50XQe1pd4/+AsZs1L+/H+Zr2D4JCQeHtaExBk/tQ5x/wBcIa8i/syX/n8f869e+CcZh8P63GXLkaqfmPf9xDXLi2nBepvh5wlK0WvkrHptFFFeedYUUUUAFZPiG/vdOsI5rBtLErShT/aV00Ee3BPDBWy3A4x0z6VrVzHjjSJdW0uy2abBqcVpepcz2MxVfPRVYbQW+UEFg3JAO0gnBoAztIv5JPEj6xrWreHbbFobZYLC98zzMuG3OzBfu7SFGON7c8127KGUqwBB6g15/onhix1PxG2qSeDLLS9N+xmLybiG3driQspV9sZZVCqGGc5O70Ar0GgDFufC2l3mqXN9cQ7zc2JsJYSAI2iJJPAGcncRnPSodJ8I2ml3gu5b6/1GaO3NrCb6RX8mIkEquFGc7VyWyxwOa6CigDmtK8FWGk3tpOl5f3EVgrJYW1xKGitAwwdmFBPy/KNxbAOBSW/gfTrbUIZ1ur5rS3uWu4NPeVTbwzMSSyjbu6sxALEAngCumooAK+cvHdusvxI8Rs0xQi4hGAf+neKvo2vnTxzFaP8AEfxGbg4b7RDj6fZoq6MM7VDHEScYXV/krnP/AGKP/n6b86PsUf8Az9N+dP8As+mf3h+tH2fTP7w/WvS5vX7jg9tLvL/wFDPsUf8Az9N+dH2KP/n6b86f9n0z+8P1o+z6Z/eH60c3r9we2l3l/wCAoZ9ij/5+m/Oj7FH/AM/TfnT/ALPpn94frR9n0z+8P1o5vX7g9tLvL/wFDPsUf/P0350fYo/+fpvzp/2fTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/8An6b86PsUf/P0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/wABQz7FH/z9N+dH2KP/AJ+m/On/AGfTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/wDn6b86PsUf/P0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/AMBQz7FH/wA/TfnR9ij/AOfpvzp/2fTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/+fpvzo+xR/8AP0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/wFDPsUf8Az9N+dH2KP/n6b86f9n0z+8P1o+z6Z/eH60c3r9we2l3l/wCAoZ9ij/5+m/Oj7FH/AM/TfnT/ALPpn94frR9n0z+8P1o5vX7g9tLvL/wFDPsUf/P0350fYo/+fpvzp/2fTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/8An6b86PsUf/P0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/wABQz7FH/z9N+dH2KP/AJ+m/On/AGfTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/wDn6b86PsUf/P0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/AMBQz7FH/wA/TfnR9ij/AOfpvzp/2fTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/+fpvzo+xR/8AP0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/wFDPsUf8Az9N+dH2KP/n6b86f9n0z+8P1o+z6Z/eH60c3r9we2l3l/wCAoZ9ij/5+m/Oj7FH/AM/TfnT/ALPpn94frR9n0z+8P1o5vX7g9tLvL/wFDPsUf/P0350fYo/+fpvzp/2fTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/8An6b86PsUf/P0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/wABQz7FH/z9N+dH2KP/AJ+m/On/AGfTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/wDn6b86PsUf/P0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/AMBQz7FH/wA/TfnR9ij/AOfpvzp/2fTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/+fpvzo+xR/8AP0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/wFDPsUf8Az9N+dH2KP/n6b86f9n0z+8P1o+z6Z/eH60c3r9we2l3l/wCAoZ9ij/5+m/Oj7FH/AM/TfnT/ALPpn94frR9n0z+8P1o5vX7g9tLvL/wFDPsUf/P0350fYo/+fpvzp/2fTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/8An6b86PsUf/P0350/7Ppn94frR9n0z+8P1o5vX7g9tLvL/wABQz7FH/z9N+dH2KP/AJ+m/On/AGfTP7w/Wj7Ppn94frRzev3B7aXeX/gKGfYo/wDn6b869e+CaCPw9rShiwGqnk9/3ENeSfZ9M/vD9a9c+CixroGtiH/VjVTj/vxDXLi3eC9Tow1RylZt/NWPTKKKK887QooooAK5fxxbfa9P0+OS2+22wvke5sAyhruMI/yKrEByDtfb3CGuorhPHV3c3Eeow2lwbGfQLOLWo7pYxIzHFwpjCngcRkZ54YjFAFf4f2epM2jzXOlXWnR6boi6fKbpQjTy5jPyrnO1djcnGd/Feh1h6JYa7bzedqevpqEDxYWJbFYSrEg7twY54yMe9blABRRRQAUUUUAFfOnjlrUfEfxH565b7RDjjt9mir6Lr548azRx/EbxGHgMh+0xHOM/8u0Nb4b+Ic+Kv7PS/wAtDnt+n/3f0NG/T/7v6Gp/tUH/AD6H/vmj7VB/z6H/AL5r0dez+88y8u0v/AkQb9P/ALv6Gjfp/wDd/Q1P9qg/59D/AN80faoP+fQ/980a9n94Xl2l/wCBIg36f/d/Q0b9P/u/oan+1Qf8+h/75o+1Qf8APof++aNez+8Ly7S/8CRBv0/+7+ho36f/AHf0NT/aoP8An0P/AHzR9qg/59D/AN80a9n94Xl2l/4EiDfp/wDd/Q0b9P8A7v6Gp/tUH/Pof++aPtUH/Pof++aNez+8Ly7S/wDAkQb9P/u/oaN+n/3f0NT/AGqD/n0P/fNH2qD/AJ9D/wB80a9n94Xl2l/4EiDfp/8Ad/Q0b9P/ALv6Gp/tUH/Pof8Avmj7VB/z6H/vmjXs/vC8u0v/AAJEG/T/AO7+ho36f/d/Q1P9qg/59D/3zR9qg/59D/3zRr2f3heXaX/gSIN+n/3f0NG/T/7v6Gp/tUH/AD6H/vmj7VB/z6H/AL5o17P7wvLtL/wJEG/T/wC7+ho36f8A3f0NT/aoP+fQ/wDfNH2qD/n0P/fNGvZ/eF5dpf8AgSIN+n/3f0NG/T/7v6Gp/tUH/Pof++aPtUH/AD6H/vmjXs/vC8u0v/AkQb9P/u/oaN+n/wB39DU/2qD/AJ9D/wB80faoP+fQ/wDfNGvZ/eF5dpf+BIg36f8A3f0NG/T/AO7+hqf7VB/z6H/vmj7VB/z6H/vmjXs/vC8u0v8AwJEG/T/7v6Gjfp/939DU/wBqg/59D/3zR9qg/wCfQ/8AfNGvZ/eF5dpf+BIg36f/AHf0NG/T/wC7+hqf7VB/z6H/AL5o+1Qf8+h/75o17P7wvLtL/wACRBv0/wDu/oaN+n/3f0NT/aoP+fQ/980faoP+fQ/980a9n94Xl2l/4EiDfp/939DRv0/+7+hqf7VB/wA+h/75o+1Qf8+h/wC+aNez+8Ly7S/8CRBv0/8Au/oaN+n/AN39DU/2qD/n0P8A3zR9qg/59D/3zRr2f3heXaX/AIEiDfp/939DRv0/+7+hqf7VB/z6H/vmj7VB/wA+h/75o17P7wvLtL/wJEG/T/7v6Gjfp/8Ad/Q1P9qg/wCfQ/8AfNH2qD/n0P8A3zRr2f3heXaX/gSIN+n/AN39DRv0/wDu/oan+1Qf8+h/75o+1Qf8+h/75o17P7wvLtL/AMCRBv0/+7+ho36f/d/Q1P8AaoP+fQ/980faoP8An0P/AHzRr2f3heXaX/gSIN+n/wB39DRv0/8Au/oan+1Qf8+h/wC+aPtUH/Pof++aNez+8Ly7S/8AAkQb9P8A7v6Gjfp/939DU/2qD/n0P/fNH2qD/n0P/fNGvZ/eF5dpf+BIg36f/d/Q0b9P/u/oan+1Qf8APof++aPtUH/Pof8AvmjXs/vC8u0v/AkQb9P/ALv6Gjfp/wDd/Q1P9qg/59D/AN80faoP+fQ/980a9n94Xl2l/wCBIg36f/d/Q0b9P/u/oan+1Qf8+h/75o+1Qf8APof++aNez+8Ly7S/8CRBv0/+7+ho36f/AHf0NT/aoP8An0P/AHzR9qg/59D/AN80a9n94Xl2l/4EiDfp/wDd/Q0b9P8A7v6Gp/tUH/Pof++aPtUH/Pof++aNez+8Ly7S/wDAkQb9P/u/oaN+n/3f0NT/AGqD/n0P/fNH2qD/AJ9D/wB80a9n94Xl2l/4EiDfp/8Ad/Q1658FDGdA1sxDCf2qcf8AfiGvKvtUH/Pof++a9W+C7K+ha4ypsB1U/Ljp/o8Nc2KvyL1OvCN87unt1dz0uiiiuA9AKKKKACsLWfB3h/xBd/atV01LmfyhDvZ2GUBJCnBGRlm/Ot2igDF0fwloeg3bXWmWAt5mjMZYSO2VJBxgkjqBWrc+eLWb7KIzcbG8oSkhN2ON2OcZ64qWobw3IspzZCFrvy28kTEhC+Pl3EAkDOM4GcUAc3aa3rFj4lXSNbfTrgSWMl6JbKJ4zCEZVIdWZsg7uGyPuniqOl+K9blGg6lqNvYppOuyBLeOEP51vvRpIi7ElX3BcHAXBI61P4V0TXbFrr+3bTTJpr5Sb2/hvpJJZmxgKEMKhUAJAUNx7kk1BpfhTW4hoOm6jcWL6ToUge3khLma42I0cQdSAqYDZOC2SB0oAZH4w1lreDXmgsf+Efn1EWSxhX+0Khm8lZi2dvL4O3b0PXNGoeMNZgh1jWbaCxOh6RdNbzxSK5nmVCBK6sDtXaScAg52nkZoj8H6ytvBoLT2P/CPwaiL1ZAz/aGQTecsJXG3h8Ddu6DpmjUPB+szw6xo1tPYjRNXumuJ5ZGcTwq5BlRVA2tuIOCWGNx4OKAOw1DVLDSbQ3epXttZ2wIUy3Eqxpk9BknFfPfifV7a+8c+ILrTnjvrWW4iKT258xGxbxA4Zcg4IIr6OIBGCBivn7xg10nxD8SC3RCn2mLqvf7NDW1D4zkxtvZar73YwPtsn/PnJ/37NH22T/nzk/79mrPm6j/zzj/75P8AjR5uo/8APOP/AL5P+Ndt/JfeeP7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs0fbZP+fOT/v2as+bqP8Azzj/AO+T/jR5uo/884/++T/jRfyX3h7n8q/8CZW+2yf8+cn/AH7NH22T/nzk/wC/Zqz5uo/884/++T/jR5uo/wDPOP8A75P+NF/JfeHufyr/AMCZW+2yf8+cn/fs16v8F3Mmha45QoTqp+UjGP8AR4a8x83Uf+ecf/fJ/wAa9Q+DZkbRddMoAf8AtU5AH/TvDWGI+H/gnbgOX2jsktOjv2PSKKKK4z1gooooAKKKKACiiigAooooAKKKKACvAvFkN3J8QvEpt8bftUWc+v2aGvfa8G8U2s0/xA8StHOUAuohj/t2hq6cuWV/zOHMHagzH+zal/s0fZtS/wBmp/7Ouv8An7P50f2ddf8AP2fzrf2y/mX3M8DnXl9zIPs2pf7NH2bUv9mp/wCzrr/n7P50f2ddf8/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/wBmp/7Ouv8An7P50f2ddf8AP2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv9mp/7Ouv+fs/nR/Z11/z9n86PbL+Zfcw515fcyD7NqX+zR9m1L/Zqf+zrr/n7P50f2ddf8/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/2an/ALOuv+fs/nR/Z11/z9n86PbL+Zfcw515fcyD7NqX+zR9m1L/AGan/s66/wCfs/nR/Z11/wA/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/2an/s66/5+z+dH9nXX/P2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv9mp/7Ouv+fs/nR/Z11/z9n86PbL+Zfcw515fcyD7NqX+zR9m1L/Zqf8As66/5+z+dH9nXX/P2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv8AZqf+zrr/AJ+z+dH9nXX/AD9n86PbL+Zfcw515fcyD7NqX+zR9m1L/Zqf+zrr/n7P50f2ddf8/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/2an/s66/5+z+dH9nXX/P2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv9mp/wCzrr/n7P50f2ddf8/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/wBmp/7Ouv8An7P50f2ddf8AP2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv9mp/7Ouv+fs/nR/Z11/z9n86PbL+Zfcw515fcyD7NqX+zR9m1L/Zqf+zrr/n7P50f2ddf8/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/2an/ALOuv+fs/nR/Z11/z9n86PbL+Zfcw515fcyD7NqX+zR9m1L/AGan/s66/wCfs/nR/Z11/wA/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/2an/s66/5+z+dH9nXX/P2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv9mp/7Ouv+fs/nR/Z11/z9n86PbL+Zfcw515fcyD7NqX+zR9m1L/Zqf8As66/5+z+dH9nXX/P2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv8AZqf+zrr/AJ+z+dH9nXX/AD9n86PbL+Zfcw515fcyD7NqX+zR9m1L/Zqf+zrr/n7P50f2ddf8/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/2an/s66/5+z+dH9nXX/P2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv9mp/wCzrr/n7P50f2ddf8/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/wBmp/7Ouv8An7P50f2ddf8AP2fzo9sv5l9zDnXl9zIPs2pf7NH2bUv9mp/7Ouv+fs/nR/Z11/z9n86PbL+Zfcw515fcyD7NqX+zR9m1L/Zqf+zrr/n7P50f2ddf8/Z/Oj2y/mX3MOdeX3Mg+zal/s0fZtS/2an/ALOuv+fs/nR/Z11/z9n86PbL+Zfcw515fcyD7NqX+zXpXweWRNI15Zv9YNV5/wDAeCvPP7Ouv+fs/nXovwhjaLSteR33sNV5b1/0eCs6k1JWTXyR6OWSTqu1tui80ei0UUVie4FFFFABRRRQAUUUUAFFFFABRRXN6v40sdIvbq3ay1C6WyjWW+mtYg6WqMMgvlgTwN2FDEDnFAHSV4nrulpe+O/E0jXPlEXkS4xn/l1h/wAa9qjkSaJJI2Do4DKynIIPQivK7my0258ZeKXvJAsgv4wB7fZYP/r1y4zEPD0vaJv5K7+45sXSdWnyq3zdjA/4R+H/AJ/h/wB8/wD16P8AhH4f+f4f98//AF66T+ytC/57Cj+ytC/57CvJ/tqf80//AABHlfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXo/wCEfh/5/h/3z/8AXrpP7K0L/nsKP7K0L/nsKP7an/NP/wAAQfUJf3f/AAI5v/hH4f8An+H/AHz/APXrtfhTbi1svEMKvvC6r971/wBGgNZ39laF/wA9hW18OIoIP+EljtjmEaqNp/7dYP612YLMJYmo4Nyel9YpHZg8M6VRydtujudvRRRXqHpBRRRQAUUUUAFFFFABRRRQAV57qhvtIv8AxfbrpF/etrSrJYvbQGRGcwLCUdhxHgpnLYGGr0KuPufHE0E11dLpBfQ7O9+xXF99oAdXDhGYRbeUVzgncDwSAcUAa9toEbeFtP0e8muP9Gt4ome2uZIGLIoH30Ktjj1rlNO+Gej3Gsa9LqVvqbK16v2aR9TucvH9nh5J8zLfPvGTk8Y6AV13iDW/7DsoZI7Zrq6ubhLW2t1cJ5kjZwCx+6AASTzwDwayD42Npp2rPqWmPBqWmyRRPZwzCUTNKQIdjkLkMTjkDBByOKBNJ6Mi/wCFW+FP+fbUP/Brdf8Axyj/AIVb4U/59tQ/8Gt1/wDHK1NH8QXV3qt1pOq6cmn6hBClyFjuPOjkiYkbg21eQVIII4465qDQPF6+INe1GwhsmjtbeCKe3uzJn7Sjs67guOFzG2Dk7gQeM0E+zh2RS/4Vb4U/59tQ/wDBrdf/AByj/hVvhT/n21D/AMGt1/8AHK7KigPZw7I43/hVvhT/AJ9tQ/8ABrdf/HKP+FW+FP8An21D/wAGt1/8crqdQvoNM026v7pttvawvNK2M4VQST+QrA0nxVe3Wp2NnqujHTv7RgaeyYXIl3BQCUcBRsfawOBuHB54oD2cOyKjfC7wrtOLbUM44/4mt1/8crO8O/DHRZfDOlSata6iNSa0iN0G1O5UiXYN+QJMDnPArpNX8Q3lrrMekaTpa6hfG3N1Kslz5CRx7to+ba2WY5AGOxyRVMeNTfWejHRtNa6vtUjkkS3nm8kQrHgSeYwDY2sQvAOSaA9nDsiH/hVvhT/n21D/AMGt1/8AHKP+FW+FP+fbUP8Awa3X/wAcqO/+IItPD1nqIsIIriXUW024hvr0QR20qh9xaUKwx8nBxyGB4rptFvrjUtMju7hLNGkyV+xXf2mJl7EPtXP5UB7OHZHO/wDCrfCn/PtqH/g1uv8A45R/wq3wp/z7ah/4Nbr/AOOV2VFAezh2Rxv/AAq3wp/z7ah/4Nbr/wCOUf8ACrfCn/PtqH/g1uv/AI5W34g1s6JaW7RWrXd3d3CWtrbhwnmSMCeWOdoCqxJweB0NM0HXZdVlv7O9svsWo2Eix3ECy+amGUMrK+BlSD3AOQRigPZw7I5TX/hjosenRNpdrqJuDeWyvt1O5b90Z0EvWT/nmX57dua0/wDhVvhT/n21D/wa3X/xynXnjia2lvbpNIMmiWF2LS6vvtADq4IV2WPb8yKzYJ3A8HAOKfq3jK6sbvVRY6M19Z6OqtqE4uAjLlBIVjTad5CEMQSvXAyaA9nDsiL/AIVb4U/59tQ/8Gt1/wDHKP8AhVvhT/n21D/wa3X/AMcqXWvHNtp97BZ2QsZpZYEuWkvb5bSFUckIN5ViWba2FA6Kc44rqLaSSW1hkljWOR0VnRX3hSRyA3f696A9nDsjkv8AhVvhT/n21D/wa3X/AMco/wCFW+FP+fbUP/Brdf8AxyuyooD2cOyON/4Vb4U/59tQ/wDBrdf/AByj/hVvhT/n21D/AMGt1/8AHK0dX8Q3ttrS6RpGlLqN6Lb7XMJLkQJHGWKr821ssxDYGAPlOSKtWPiOxvvCyeIQXiszbNcSCQfNGFBLAj1GCD9KA9nDsjkZ/hjoo8UWEcVrqP8AZjWdw05/tO5x5oeHy+fMyPlMvH+ArT/4Vb4U/wCfbUP/AAa3X/xyrGk+Lru61DT7fVNGOnR6nA01lJ9pEpYKAxWQbRsfad2AWHB54qHTfG817LplxPo7W+katKYbG8+0BnY4ZkLx7RsDhTghm7ZxmgPZw7Ib/wAKt8Kf8+2of+DW6/8AjlH/AAq3wp/z7ah/4Nbr/wCOVLqfiXX9K1XT7WbQtPeK/vhawtFqbmTacsXKGADhFLEbu2M11tAezh2Rxv8Awq3wp/z7ah/4Nbr/AOOUf8Kt8Kf8+2of+DW6/wDjldlRQHs4dkcb/wAKt8Kf8+2of+DW6/8AjlH/AAq3wp/z7ah/4Nbr/wCOU658cTQTXV0ukF9Ds737FcX32gB1cOEZhFt5RXOCdwPBIBxW14g1v+w7KGSO2a6urm4S1trdXCeZI2cAsfugAEk88A8GgPZw7I5DS/hjor6jrS3trqIt0vFWyzqdyMxeRETjEnP7wycn+WK0/wDhVvhT/n21D/wa3X/xypT42Npp2rPqWmPBqWmyRRPZwzCUTNKQIdjkLkMTjkDBByOKenjCa0XV49b0s2d3ptl/aBit5/PWaHDco21fmBQgggdRyQaA9nDsiv8A8Kt8Kf8APtqH/g1uv/jlH/CrfCn/AD7ah/4Nbr/45VrRvFV5faxbadqGlxWj3lm17bPb3guAUUqCH+Vdp+dcYyDzg8V1FAezh2Rxv/CrfCn/AD7ah/4Nbr/45R/wq3wp/wA+2of+DW6/+OV2VVtQvoNM026v7pttvawvNK2M4VQST+QoD2cOyOW/4Vb4U/59tQ/8Gt1/8cpG+F3hXacW2oZxx/xNbr/45VvSfFV7danY2eq6MdO/tGBp7JhciXcFAJRwFGx9rA4G4cHnirGr+Iby11mPSNJ0tdQvjbm6lWS58hI4920fNtbLMcgDHY5IoD2cOyOb8O/DHRZfDOlSata6iNSa0iN0G1O5UiXYN+QJMDnPArS/4Vb4U/59tQ/8Gt1/8cqYeNTfWejHRtNa6vtUjkkS3nm8kQrHgSeYwDY2sQvAOSaavjZrixsRaaWz6td3ktj9hlnCLFLEGMm6TB+UBCQQCTkcc8Aezh2RH/wq3wp/z7ah/wCDW6/+OUf8Kt8Kf8+2of8Ag1uv/jlbWga6dYW9huLX7Jf2E/2e6txIJArbVcFWwNylWBBwD1GOK2KA9nDsjjf+FW+FP+fbUP8Awa3X/wAco/4Vb4U/59tQ/wDBrdf/AByuyrI8Qa2dEtLdorVru7u7hLW1tw4TzJGBPLHO0BVYk4PA6GgPZw7IxP8AhVvhT/n21D/wa3X/AMcrM1/4Y6LHp0TaXa6ibg3lsr7dTuW/dGdBL1k/55l+e3bmur0HXZdVlv7O9svsWo2Eix3ECy+amGUMrK+BlSD3AOQRism88cTW0t7dJpBk0SwuxaXV99oAdXBCuyx7fmRWbBO4Hg4BxQHs4dkN/wCFW+FP+fbUP/Brdf8Axyj/AIVb4U/59tQ/8Gt1/wDHK0dT8Q32m+JdL05tKRrHUJ/s63n2oBg/lvJ/q9pyPkI6jrTdW8R31vrMmlaPpA1K6gtlurnfciFURiwRQdrbnbY2BwOOSM0B7OHZFD/hVvhT/n21D/wa3X/xyj/hVvhT/n21D/wa3X/xyuk0fVbfXNGs9UtN32e7hWVAwwwBGcEeo6Gr1Aezh2Rxv/CrfCn/AD7ah/4Nbr/45R/wq3wp/wA+2of+DW6/+OV2Vcpq/jC7sr7VI9P0Vr+30iNZL+X7QI2XK79sa7TvYJhsEr1AzmgPZw7Ig/4Vb4U/59tQ/wDBrdf/ABysyf4Y6KPFFhHFa6j/AGY1ncNOf7TuceaHh8vnzMj5TLx/gK6/VPEFppnhqTXcPPbCJZIljHzS78BFGe7FlH41T0zxFfz6nc6VqWkLZ6lHai7hijuhMk0eSvD7VwwbAII/iHJoD2cOyM//AIVb4U/59tQ/8Gt1/wDHKP8AhVvhT/n21D/wa3X/AMcrU8Na/d61JqkF9pqWFzp90Ld40uPODZjSQHO1ezgY56daZoniG+1HX9U0m/0pLGSyihmVluhL5iyGQDICgKR5Z7nrQHs4dkZ3/CrfCn/PtqH/AINbr/45R/wq3wp/z7ah/wCDW6/+OV2VFAezh2Rxv/CrfCn/AD7ah/4Nbr/45R/wq3wp/wA+2of+DW6/+OV2Vcfc+OJoJrq6XSC+h2d79iuL77QA6uHCMwi28ornBO4HgkA4oD2cOyG/8Kt8Kf8APtqH/g1uv/jlWfBnh9PDk/iC1ggmis5NRWS282VpC6fZ4QTuYkn5g45PatPxBrf9h2UMkds11dXNwlrbW6uE8yRs4BY/dAAJJ54B4NU9P8RX17Fqlq+kCPWdOZA9mLkMkgcZRlkKj5SM9VBBUjFA1CMdkdFRWBoXiC71rw3PqI0sR3kU1xB9jS4DBnikZMByFGCV6kd6bpniC+m146Lq2lR2V29qbuEwXXno6KwVgTtUqwLLxjBzweKCjoaKKKACiiigAooooAKKKKACvObvw/rrabqnhaLTQ9jqGovcLqXnoEihkm81wUzvLjLKMDB4ORXo1FAHKa/aavqtra3cOmBLvSdWFzBbtcL/AKVEoZCQ3RSyyMQG6EDOKyLvw3rWsw65qz2a2WoXNxZTWdnNMrHFq4dQ7ISoLkuOCQARzXoVFAHDtoGpeKdS1S81O3udEhuNOXTokSaN58by8jEqWUA8KBzxnOM0/QfC+r6V46u7+fVru6sDp8EEbSR26CQq0vybY0UgIGBGAM7upxgdrRQAUUUUAZ3iDS/7b8OanpW/y/ttrLbh/wC6XUrn8M1y0Fv4jv8AUtJvr3Q/szaJaTER/ao2+2XDRhAsZBO1MbuXweRxwa7qigDkL2HWrLxLD4itNGe9N1pqWlzZx3EavA6uzqdzEKy5dwSDngEA1m2HhvWfDcfh7UYLNdRurWC6hvraCVUJ+0SCYmMuQp2uuOSMg59q9BooA43S7LVNG0aSe50E6heX2ozXs1rBNFm23k7QDIVViFwCQepOOKu+DdJvNMt9Umu7WOyF/fvdRWUbhhbqVRcEjjJKljjjLV0tFABRRRQBz3izTb27j0q+06EXF1pl8t2LcuE85djxsoJ4B2yEjPGR1FUtHg1qDV9R1ufRykmq3UEJtTcx77a3jQjzHIJVjuJO1SeCOetddRQB5zf+H9dbTtb8MQ6aJLLVb+S4XUvPQJFFLIHkDITvLglwMAg5HI5q1qula/Zz+J7PTNLW8g1754rkzoi20jQrC/mBjuIGwMNoOc44613lFAHn8/he40TxEuo2uhprkEukxacUMkaPGY88nzCBsYEZxkjb0Oa6jwppVxofhPS9Lu5VkntbdI3ZSSoIHQE84HQewrYooAKKKKAOU1S21bS/F8muabpbanFdWCWksMc6RvG8buyNlyAVPmMDg5GBwar6b4e1CLwZJ4Su4FEdxpkyzXySAoJ5i+9An3sDeSG9K7OigDhrDTdc1fU9B/tfShYQ6PBKJZPtCSC4laLyv3YUkhMFjlgD0GKq6XoWvtZ+GvD97pwgtNCuI5JNQ89GS4WFWWMIoO4E5UncBjB616HRQBzp0y8vfHw1O6h2WGnWflWRLA+ZLKcyvgHIwqooz/eauioooAKKKKAPObvw/rrabqnhaLTQ9jqGovcLqXnoEihkm81wUzvLjLKMDB4ORW3r9pq+q2trdw6YEu9J1YXMFu1wv+lRKGQkN0UssjEBuhAzir8/jTwra3EtvceJdGhnicpJHJfxKyMDgggtkEHjFaFjqllqYZ7GcXEQVWE0YJicN0KPja/T+EnHegDibvw3rWsw65qz2a2WoXNxZTWdnNMrHFq4dQ7ISoLkuOCQARzU9xpXiLXLnW9VS2Oi3kuk/wBn2CSzI7hyWYyMULBRkqBgk8E8dK7uigDgPDHh2ew8SWt3Y+Gh4dso7R4rxPtEcn2tyV2cIxztwx3thjuxjk139FFABWd4g0v+2/Dmp6Vv8v7bay24f+6XUrn8M1o0UAcLBb+I7/UtJvr3Q/szaJaTER/ao2+2XDRhAsZBO1MbuXweRxwat3sOtWXiWHxFaaM96brTUtLmzjuI1eB1dnU7mIVly7gkHPAIBrr6KAPPrDw3rPhuPw9qMFmuo3VrBdQ31tBKqE/aJBMTGXIU7XXHJGQc+1Sx+HbhPDckmp6HNfX11qkuoG2srxYpbRnJ2lJd6fMq7QSGGcnqK7yigDk/A/hyXRI9Uu7i3+yzaldCb7MZzM0SKiooeQkl3OCzHJ5bqcZrrKKKACue8Wabe3celX2nQi4utMvluxblwnnLseNlBPAO2QkZ4yOoroaKAOR0eDWoNX1HW59HKSardQQm1NzHvtreNCPMcglWO4k7VJ4I561j3/h/XW07W/DEOmiSy1W/kuF1Lz0CRRSyB5AyE7y4JcDAIORyOa9GooA4zxd/bUuvaFJp/h28voNOvPtUk0U9ugYGGWPaA8inILg8jGO9Pu4db0rxPd61pujNqCalYwxSQC4jjeCaIuV3FiAVIkwSpJBXoc12FFAGP4U0eTQPCum6VNIsk1tAqyuv3S/Vse2ScVsUUUAFcLqmm6/Y33iWLS9LF7Drqq0U/noi20vkiFvMDHJXCq3yhj1GBXdUUAcdqWhajeeD7nw5b2yx/YIrQWNzLKNty8JRxkDlBujCnPrmpdMt9Wv/ABXL4gv9JfT0g082cFs88ckkrM4d2yjFQPkUDJz1ziusooA4zwmdai8Ra7Lf+HLyxt9SuluY5pbi3cIFhjj2sEkY5JQ9ARTdKbW1+IGp38/hq9gsb23t7ZZ2uLchPKMpLELIWwfMXGAT6gV2tFABRRRQAV5zd+H9dbTdU8LRaaHsdQ1F7hdS89AkUMk3muCmd5cZZRgYPByK9GooA5TX7TV9VtbW7h0wJd6TqwuYLdrhf9KiUMhIbopZZGIDdCBnFGj2+qwX+v8AiK70mWO4vFhSDTlmjaXZErYy27YGZnbjdgDHNdXRQBxHhJ9f0vQdUiuPDN1HcrcXd5bxvdW+JzJM8ixgrIdpwwBJwPrUvg6DVRf3F7ruiX0OrXUeZ7yaS3MSKD8sESpKzKgyTyOTkk5IFdlRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHJ+JI0/4TPwb8i83lznjr/ostc94o1DUYV8bx2uoXNuYDpwt2jkI8jewDFR0Ge/r3r0iS1t5poZpYIpJYGLQuyAtGSCpKnsSCRx2NRS6Zp85nM1jbSG42edvhU+bt+7uyOcds9KAOIXRJ38bXug/29rY086ZFeY+3P5izGR03CTO4LhQdgO3PbtWHDruva/beE7Z5Nwu9GF3If7TfTzczAqD+8jRmJA52jH3snOK9YFrbi7N2IIhctGIzNsG8oCSF3dcZJOPeqk+g6Pc6dFp1xpNjLYw4EdtJbo0SY6YUjA/CgCn4RGpL4dgTVbqC5ukeRfNhn84FA5CgvtXcwGFJwMkGtyobW1t7G2S2tLeK3gjGEiiQIqj2A4FTUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAf/9k=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import torch\n", + "from speechbrain.augment.time_domain import DoClip\n", + "\n", + "clipper = DoClip(clip_low=0.7, clip_high=0.7)\n", + "sinusoid = torch.sin(torch.linspace(0,20, 300))\n", + "clipped_signal = clipper(sinusoid.unsqueeze(0))\n", + "\n", + "# plots\n", + "plt.figure(1)\n", + "plt.subplot(211)\n", + "plt.plot(sinusoid)\n", + "plt.xlabel('Time')\n", + "\n", + "plt.subplot(212)\n", + "plt.plot(clipped_signal.squeeze())\n", + "plt.xlabel('Time')\n", + "\n", + "# freq domain\n", + "plt.figure(2)\n", + "plt.subplot(211)\n", + "plt.specgram(sinusoid,Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "plt.subplot(212)\n", + "plt.specgram(clipped_signal.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-Lv73YnUPhyn" + }, + "source": [ + "The amount of clipping is controlled with the parameters **clip_low** and **clip_high**, which set the lower and upper threshold over which the signal is clamped. In the frequency domain, clipping adds harmonics in the higher part of the spectrum." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xSSapLoxD2Pv" + }, + "source": [ + "## 5. Augmentation Combination\n", + "\n", + "Let's consider the scenario where you aim to augmentation pipeline by integrating the previously defined augmentation techniques in a stochastic way.\n", + "\n", + "This integration is facilitated by a dedicated class called `Augmenter` within the `speechbrain.augment `module.\n", + "\n", + "For simplicity, let's examine a case where we want to combine the frequency dropper and the chunk dropper:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 968 + }, + "executionInfo": { + "elapsed": 2897, + "status": "ok", + "timestamp": 1704407501395, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "-OFXNna-jEdo", + "outputId": "66739da9-e5df-4105-cec2-4f2c5284bec3" + }, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGzAjgDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKwfF2tXWiaKsmnxwy6ldXEVpZxzA7GlkYAbsEHAG5jz0U0Ab1Fc5o3iu3ufA8fiLVCtoIYWa+CqzCGSMlZRgZPDK3HJxWrdaxYWbWC3E+w38ohtvkY+Y5UsBwOOFJ5x0oAvUVzmoePPDOlajNYXuqpHcQAGYCN2WIkZAZwpVWPYE5PHFSXnjbw7p8GnzXWoiNNRgNxaZicmZMKeAFzn51+Xqc9KAN+iuWf4j+Eo7CO9bWFMDlgdsMjNHtOG3oF3IATyWArU1TxJo+jWEF7fX0ccFwQICgMhlJGRsVQS3HPANAGrRWCnjPw/J4euddTUVOnWpK3EnlvuiIIBDJjcDyOCM80um+MvD+r6m+nWOorNdKhkCeW6iRQcFkYgBx7qTQBu0Vxvg/4g2PimW7tyksNxFdTxxg2syoYkYgMzsoVWI5Kkgj0rQ0/x34Z1TUo9Ps9VSS4lLLF+7dUmI6hHKhX/AOAk0AdFRXMa7440HSZ7vTpdUSLUY4s7RGzLExHyb2AKoTxgMRmrng6/udV8FaJqF7L5t1c2MM00m0LudkBJwAAOT2oA26K4i217xT4hv9W/sFdFt7PTr2Sx23wleSV48bj8hARcnjhj3ramvZ49f0K1udTjtp7i3naXT44DItyyiPJEhHyhCT6bt3tQBu0Vyt58SPCNhNdxXOsIhtCyzMIZGQMvVQwUqzD+6CTx0q7q/jLQNC1D+z9Rv/KvPJWcQLDJI7ISygqFU7jlW4GSMZ6UAbtFY3/CV6H/AMI9Hry6gjaZJgJOis24k7cBQN2c8YxnNM0/xTpWvWF9LpF55slqp8xHiaOSI4JG5HAYZxxkc4oA3KKwfBOo3er+CNF1G+l827ubOOWWTaF3MVyTgAAfhW9QAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRSEhRkkAe9AC0U3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHUU3en95fzo3p/eX86AHV574jGra/8RbKw0O5s4W0G3N5M93C0sfnTAoilVZfmCByDn+KvQN6f3l/Ojen95fzoA8kmstZtbHx14TvXt7m/wBS0+TVbQWkTIkjOCkiqpJIO5VOM9Xz3qze+LdJ8Q6l4Fg0yZ52j1JGnIjYCFhbyDY5I4bk8dflNepb0/vL+dG9P7y/nQB5AdZ03QvDPjXw9q6P/a9xdX0qWxhZmu0lyY5F4wVwQCf4dpzjFaGkRI+v/DEugLR6BMykj7p8qAZ/In866TX/AA5rWuveWo8VfZdIuxslto7JDKIyoDKsu7jPPJUkZrprWG3s7SG1gCpDCixxqD91QMAfkKAOC0O2gGp/Ek+UmZLra/H3h9mU4P4s35muWRbm10j4fa1careaZpsWim2e/t4El+zyMsZG8MrBVYLt3Y7AZANe170/vL+dG9P7y/nQB4zrkGlyfDnxxqdh4hutbkvIoBcXEsKxpuQgDaVRVY4IBIz0Fdr4jijh8deBEiRUVJ7uNQoxhfsr8D2+UflXY70/vL+dG9P7y/nQB5Vplwlz4c8beE7e5WLxDcXeptBaOdsjq5Yowz2II596o6O+kavFoGnXnjXVGurW4tnj0h7GJJIJoyMIwWIMoByCc4xnnFex70/vL+dG9P7y/nQB5Qms6b4ftfG+h6zG/wDad9e3dxBbmFma9ilX93swMMAPlP8Adxziu1+H3/JOfDf/AGDLf/0WK6Len95fzo3p/eX86APKfF0vw9mvtTu2vrnSvEsRePzLIzQXEkq5AwowJMkDBwcjvV6wl1W48Q/DuXWVaPVH0q+NwGXDB9kHJHY+o9a9I3p/eX86N6f3l/OgDwXU9a0yy+Bd74Tu43GvWkbR3FkYmLrIJdxlJxjafvbs4OR3OK9HjiRvjdcSlAXTw5EFYjkA3Mmf5Cn6t4T1TXZpLXU/E4l0SS4857GOyRHdA+5Yml3cqMAcKCQOTXYb0/vL+dAHk8esX2heGdYlsZxZxN4suYLi88jzRZwNIS0gXpwcDngbqTwxeC8+IOtSxa1daxC+gri8ngSISYkb7mxFDKMkbsHnIzxXrO9P7y/nRvT+8v50Acz8N/8Akmvhz/sHw/8AoIrqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKbvT+8v50b0/vL+dADqKKKACiiigAooooAKKKKACuX8f28N14Zit7iJJoZdT09JI5FDK6m7iBBB6gg4rqK5zxv/wAgG2/7Cunf+lkNAC/8ID4O/wChV0T/AMAIv/iaP+EB8Hf9Cron/gBF/wDE10VFAHO/8ID4O/6FXRP/AAAi/wDiaP8AhAfB3/Qq6J/4ARf/ABNdFRQBzv8AwgPg7/oVdE/8AIv/AImj/hAfB3/Qq6J/4ARf/E10VFAHO/8ACA+Dv+hV0T/wAi/+Jo/4QHwd/wBCron/AIARf/E10VFAHO/8ID4O/wChV0T/AMAIv/iaP+EB8Hf9Cron/gBF/wDE10VNd1jXc7BV6ZJwKAOf/wCEB8Hf9Cron/gBF/8AE0f8ID4O/wChV0T/AMAIv/ia6KigDnf+EB8Hf9Cron/gBF/8TR/wgPg7/oVdE/8AACL/AOJroqKAOd/4QHwd/wBCron/AIARf/E0f8ID4O/6FXRP/ACL/wCJroqKAOd/4QHwd/0Kuif+AEX/AMTR/wAID4O/6FXRP/ACL/4muiooA53/AIQHwd/0Kuif+AEX/wATR/wgPg7/AKFXRP8AwAi/+JroqKAOd/4QHwd/0Kuif+AEX/xNH/CA+Dv+hV0T/wAAIv8A4muiooA53/hAfB3/AEKuif8AgBF/8TR/wgPg7/oVdE/8AIv/AImuiooA53/hAfB3/Qq6J/4ARf8AxNH/AAgPg7/oVdE/8AIv/ia6KigDnf8AhAfB3/Qq6J/4ARf/ABNH/CA+Dv8AoVdE/wDACL/4muiooA53/hAfB3/Qq6J/4ARf/E0f8ID4O/6FXRP/AAAi/wDia6KigDnf+EB8Hf8AQq6J/wCAEX/xNH/CA+Dv+hV0T/wAi/8Aia6KigDnf+EB8Hf9Cron/gBF/wDE0f8ACA+Dv+hV0T/wAi/+JroqKAOd/wCEB8Hf9Cron/gBF/8AE0f8ID4O/wChV0T/AMAIv/ia6KigDnf+EB8Hf9Cron/gBF/8TR/wgPg7/oVdE/8AACL/AOJroqKAOd/4QHwd/wBCron/AIARf/E0f8ID4O/6FXRP/ACL/wCJroqKAOd/4QHwd/0Kuif+AEX/AMTR/wAID4O/6FXRP/ACL/4muiooA53/AIQHwd/0Kuif+AEX/wATR/wgPg7/AKFXRP8AwAi/+JroqKAOd/4QHwd/0Kuif+AEX/xNH/CA+Dv+hV0T/wAAIv8A4muiooA53/hAfB3/AEKuif8AgBF/8TR/wgPg7/oVdE/8AIv/AImuiooA53/hAfB3/Qq6J/4ARf8AxNH/AAgPg7/oVdE/8AIv/ia6KigDnf8AhAfB3/Qq6J/4ARf/ABNH/CA+Dv8AoVdE/wDACL/4muiooA53/hAfB3/Qq6J/4ARf/E0f8ID4O/6FXRP/AAAi/wDia6KigDnf+EB8Hf8AQq6J/wCAEX/xNH/CA+Dv+hV0T/wAi/8Aia6KigDnf+EB8Hf9Cron/gBF/wDE0f8ACA+Dv+hV0T/wAi/+JroqKAOd/wCEB8Hf9Cron/gBF/8AE0f8ID4O/wChV0T/AMAIv/ia6KigDnf+EB8Hf9Cron/gBF/8TWJ4p8I+GtM02zvLDw/pdrcx6rp+yaC0jR1zdxA4IGRkEj8a72uc8b/8gG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/wDSyGujrnPG/wDyAbb/ALCunf8ApZDQB0dFFFABRRRQAUUUUAFFFFABXDfFV2XwrAFYgNeIGAPUbHOD+IH5V3NcL8V/+RWtv+v1P/QHrfC/xonPi/4MvQ7qis7Qbpr3QNPnklEsz20TStkZ3FATn35rRrGSs7G8XzK6CiiikMKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACiiigAopGO1ScE4GcCvMdKu9Ri0bwp4pfVb6a61i8hS7t5J2aAxz7sKsf3U2fLggA/Kc5zQB6fRXN+PLV5vB2qXEV9fWktpaTXEb2lw0JLLGxGSvJGecZqrrllrGseDtKTTJpftJe2lnK3r2zSRgAuplT5hn1FAHXVznjf8A5ANt/wBhXTv/AEshpng66Qwahpr297bXtjcBLmG7vpLwgsisrJK5JKFSMDjBzwKzvGeqXjw29k2hagluNX08fbmeDyTi7iOcCTfz0+7+nNAHb0UUUAFFFFABRRRQAUUUUAFcL8V/+RWtv+v1P/QHruq4X4r/APIrW3/X6n/oD1vhf40Tnxf8GRS+EjZsdUXd0kjOM9Mg/wCFej15j8Iuusf9sf8A2evTqvGfx5f10JwT/cR/rqFFFFcp1BRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/9LIa6Ouc8b/8gG2/7Cunf+lkNAHR0UUUAFFFFABXM2XgfTrG+tpkur6S1tJnntLCSVTBbyNnLKNu7jc2AzEDPAFdNRQBgah4W/tDw0NDk1vVkhZWSa4EkbTTIwYFXZ0IxhuwB4HNLF4amg0SHTY/EOsBoZA0d0Gh80KF2hP9XsK/VSc963qKAMvRdBttDS5MU1xc3F1L51zc3LhpJWwFBJAAAAAAAAAx0qh43/5ANt/2FdO/9LIa6Ouc8b/8gG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFcL8V/+RWtv+v1P/QHruq4X4r/8itbf9fqf+gPW+F/jROfF/wAGRl/CL72sfSH/ANnr06vH/hQzDxNdLk7TZsSM8E70/wATXsFaY5WrMzwDvQXzCiiiuQ7AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuc8b/APIBtv8AsK6d/wClkNdHXOeN/wDkA23/AGFdO/8ASyGgDo6KKKACiiigAooooAKKKKACuc8b/wDIBtv+wrp3/pZDXR1znjf/AJANt/2FdO/9LIaAOjooooAKKKKACiiigAooooAK4X4r/wDIrW3/AF+p/wCgPXdVwvxX/wCRWtv+v1P/AEB63wv8aJz4v+DI8y0HWLjRL2W6tZvKlMDopxnJI44+oB59K7AeJLzVl8HyS3bNcHUGS48v5AxDoBkDA+636153VixuDaahbXAYqYZVkBHbBBz+le3Uoxk+a2v/AADwqVeUFy30/wCCj6Tooor50+lCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv/AMgG2/7Cunf+lkNdHXOeN/8AkA23/YV07/0shoA6OiiigAooooAKKKKACiiigArnPG//ACAbb/sK6d/6WQ10dc543/5ANt/2FdO/9LIaAOjooooAKKKKAMTw5q9xqx1X7QsY+y6hLbR7Bj5FxjPvzW3Xnnww1CfUJ9eklICyTpcbF6Kzl92PyH5V6HW2IhyVHEww8/aU1IKKKKxNwrhfiv8A8itbf9fqf+gPXdVwvxX/AORWtv8Ar9T/ANAet8L/ABonPi/4Mjx6iiivoT5s+gPDGv2uu6XE0dys11FDH9qAUja5XJ7eoPT0rbrzL4Q/8xn/ALYf+1K9Nr57E01TquKPpcNUdSkpMKKKKwNwooooAKKK5nxT4pbw3qGkRtHG1tdyMs7tnKKNoyMem7P4YqoQc5csdyJzjCPNLY6ais3Rtf03X4ZJdOuRKIyA6lSrLnpkH1wa0qUouLsyoyUldPQKKKKQwooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACiiigArmbHxxp19fW0KWt9Ha3czwWl/JEoguJFzlVO7dztbBKgHHBNdKw3KRkjIxkV5jpVpqMujeFPCz6VfQ3Wj3kL3dxJAywCODdhlk+6+/5cBST8xzjFAHc+IteTw5pcmoS2F9eRRqzyLaRqxRVUsWO5lAAA9aTUfEVnpuj22oyxzyC6MaW8ESBpZXf7qKM4z+OBgnOBWT4q1f7X8PNQkj0zVvO1C0nt4bYWMjzB2R1G9EBKgkdTxyPWs67lnvvD3hnVLXTdRf+x72KS5tZLSSOYqIXjYrG4DNjzAeBzg4yaAOq0XXrfW0uVSC4tbm0l8q4tbpQskTEBhnBIIIIIIJBqh43/5ANt/2FdO/9LIag8LR3F3r3iDXHtLm1tr54I7dLmIxSOsSEFyh5XJYgA4OFziqXjPQLOOG31RZtQNwdX087G1Ccw83cQ/1RfZ/47x160AdvRRRQAUyWRIYnlkYLGilmY9AB1NPrO19xF4c1N26LaSk4/3DTirtIUnZNnB/CH/mM/8AbD/2pXpteZfCH/mM/wDbD/2pXptdON/jy+X5HLgf93j8/wA2FFFFcp1hXC/Ff/kVrb/r9T/0B67quF+K/wDyK1t/1+p/6A9b4X+NE58X/BkePUUUV9CfNne/CeWQeIryEORG9oWZc8Eh1AP4bj+devV836dqd7pN19psLh4JtpXcvceh9elesar46VL3QLezaNDePDLdF2B8qN9p2n0OGzn2HrXlYzDzlU5o9f0PXwWJhCk4y6fqdxRRRXmHqhRRRQAV89+KJpZvFOqmWR3K3cqLuYnChzgD2FfQlfO/iT/kadX/AOv2b/0M16OXfHI8zM/gidf8JrgrrF/bbjiS3EmOx2sB/wCzV6zXjXwsmji8WSI7hWltXRAf4juVsD8FJ/CvZayx6tWZtl7vQQUUUVxnaFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/wDSyGujrnPG/wDyAbb/ALCunf8ApZDQB0dFFFABRRRQAUUUUAFFFFABXOeN/wDkA23/AGFdO/8ASyGujrnPG/8AyAbb/sK6d/6WQ0AdHRRRQAVheM7pbPwdqkrKWDQGLA9XIQH/AMerdrmPiF/yI2o/9sv/AEalaUVepFeaMq7tSk12ZzPwh/5jP/bD/wBqV6bXmXwh/wCYz/2w/wDalem1tjf48vl+Rjgf93j8/wA2FFZsuuWcPiCDRXMn2ueEzJhflwM9T68H8vpWlXM4tbnUpJ7BXC/Ff/kVrb/r9T/0B67quF+K/wDyK9t/1+r/AOgPW2F/jRMMX/BkePUUUV9CfNhSkljkkk4xzSUUAfSWn3a6hptreqhRbiFJQpOSAwBx+tWazPDf/IraR/15Q/8AoArTr5mStJpH1UHeKbCiiipKCvnfxJ/yNOr/APX7N/6Ga+iK+d/En/I06v8A9fs3/oZr0cu+OR5mZ/BE1Ph7/wAjzp3/AG1/9FPXuleE/D91TxxppdgozIMk45MbAD8692qcx/ir0/zLy3+E/X9EFFFFcB6AUUUUAFFFFABRRRQAUUVS1XVbTRtPe+vXKQIQCVXJ5OBxTSbdkJtJXZdooopDCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG//ACAbb/sK6d/6WQ10dc543/5ANt/2FdO/9LIaAOjooooAKKKKACiiigAooooAK5zxv/yAbb/sK6d/6WQ10dc543/5ANt/2FdO/wDSyGgDo6KKKACuY+IZA8Dajz18r/0YtdPXEfFM48KRc9btP/QWrbDq9WPqYYl2oy9DnPhNI41m/jDHY1uGK54JDDB/U/nXrVeR/Cf/AJD19/17f+zCvXK2x38ZmOA/gI85a4lm+NiRyNlYIvLjGOi+SWx+bH869Gry6UkfHAYP8S/+k4r1GpxKtyf4UVhXfn/xMK4X4r/8iva/9fq/+gPXdVwvxX/5Fe1/6/V/9Aeowv8AGiXi/wCDI8eooor6E+bCiiigD3vwRI8vgzTGdixERXJ9AxAH5AV0Fcx8P7iKfwXYrFIrGLcjgfwtuJwfwIP4109fOVlapL1Z9PQd6UfRBRRRWRqFfOet3Ed3r+o3ERJjlupZEJGMgsSK+jK+Z5v9fJ/vH+denlq1k/Q8rM3pFeppeGf+Rq0n/r8i/wDQxX0NXzz4Z/5GrSf+vyL/ANDFfQ1TmPxxKyz4JeoUVkeIPENr4ct7ae7V2jmnERKfwAgktjuBjtWvXA4tJN7M9FSTbS3QUUUVJQUUUUAFFFFAEVzdQWdu9xczJDCnLSSMAB261xvxN1G2h8MLaM+Zrtw0QAyCFIJOfxH51a+JN1Hb+C7mN87riSONMD+IMH/kprx281W/1C2tre7upJorYEQh+SgOMjPXsPpivQweG57VOzPOxuJ5L0rbr+vwPYfhveyXvhQGWd5pI53Ri5JI6N1P1/Wuuryr4TajKuoX2mHJheL7QOfusCFOB7hh/wB8ivVa58XDlrNHRg589GLCiiiuc6QooooAKKKKACiiigAooooAKKKKACiiigAooooAKKK8z8feJNS0XxdYfY7phFDAsrQbiEdizA7gOvAHXp2rWjSdWXKjKtWVKPNI9MornPDHi6y8QwQwiVRqPkeZPCqMAuCAcE8dSOM966OonCUHyyRcJxnHmi9AoooqSgooooAKKKKACuc8b/8AIBtv+wrp3/pZDXR1znjf/kA23/YV07/0shoA6OiiigAooooAKKKKACiiigArnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIaAOjooooAK4H4sH/AIp6zHb7UP8A0Bq76uB+LH/Iv2X/AF9D/wBAaujC/wAaJzYv+BIwfhP/AMh69/69f/Zlr1yvIvhO6jxDeIT8xtSQPoy/4167WmO/jMzy/wDgI8l1O5Np8aEkCht1xBHgn+/Gq5/8ezXrVePa8c/GKLH/AD+2n8o69hoxS92m/JBhH71Rf3mFcJ8Vz/xS9qO/21f/AEB67uvMvi8eNHH/AF2/9p1GEV60S8Y7UJHmNFFFe+fOhRRRQB678J5Q3h+8hwdyXRYntyij/wBlrvq87+Ev/IK1H/ruv/oNeiV8/i/40j6PB/wIhRRRXOdIV8zzf6+T/eP86+mK+Z5v9fJ/vH+deplv2vl+p5WafZ+f6Gl4Z/5GrSf+vyL/ANDFfQ1fPPhn/katJ/6/Iv8A0MV3nxbupUg0u1WUiORpHdAepG0KT+bVWLpe1rRh3IwdX2VCc7XsUvi1eb9U06y2Y8mBpd+eu9sYx7bP1r1K3lM9tFKRgugbHpkZr531PVrvV3tnu2DNb2626MByVXOM+p5PNe/aJMbnQNOnYAGW1icgdsqDWOLpezpQj2ub4Or7StOS62L9FFFeeeiFFFFABRRRQBwvxX/5Fa2/6/U/9AevHq9c+LEwXQLKDB3Pdbwe2FVh/wCzCvI69zAfwUeDmH8dnUeAdZh0bxMjzRu63SfZhs/hLOuD9OK9zr5y0T/kPad/19Rf+hCvoe1vLa+h861njniyV3xsGGR15FcmYwtNSR2ZbNuDiyaiiivOPSCiiigAooooAKKKKACiiigAqK4ure0i825mjhjyF3SMFGTwBk1LXnPxW1WAafa6UpDTtL5zYYfIACBkded36VrRp+0momVer7Km5no1UtU1ex0WyN3qFwsMIIUEgksT2AHJNeM3Pj/Xrq9srl7gKLUq3lRjakhHUtjrkcY6emKpeIvE974mltpLxUQwRlQseQpJOS2D0yNo6/w12Qy+fMuZ6HFPMocr5Fqe9Wl3Bf2cV3bSCSCVQ6OO4NTVwPwq1OS60W6098kWcgKH0V8nH5hj+Nd9XFWp+zm4djto1Pa01PuFfO3iDVX1vXrvUHRU85/lVeygBV/HAFe865dQ2mh3ss0yRL5DgMzY52nAHvXzpXoZdD4pHnZnP4YnU/Dt3TxxYKrMA4kVgD94eWxwfxAP4V7lXhfw9/5HnTv+2v8A6KevdKyzH+KvT/M2y3+E/X9EFRJcwSXEluk0bTRAGSMMCyA9Mjtmpa4Kx1GG0+LmrW0xw11FGkZ/2hGhx+Iz+XvXJTp86l5K511KnI4+bsd7RRRWZqFFFFABXOeN/wDkA23/AGFdO/8ASyGujrnPG/8AyAbb/sK6d/6WQ0AdHRRRQAUUUUAIx2qTgnAzgV5jpV3qMWjeFPFL6rfTXWsXkKXdvJOzQGOfdhVj+6mz5cEAH5TnOa9PrmbLwPp1jfW0yXV9Ja2kzz2lhJKpgt5GzllG3dxubAZiBngCgCPxM0974l0DQlu7m1tLtbm4uGtZTFI4iCBUDrhlBMmTgg/LWCLfX9X0C80qxvLm4l0vXjAZWvntpJrZVD7WmT5s4cLnqduTzXU3PhSC5sLCF9T1IXdg7Pb6gJlNwpbIbJKlWBBxgqRwPSiLwrFaaOlhYapqVm4na4ku4pEaaaRs7i5dWVsk5+72GMYoAh8HXcb2t9p7QXlveWFz5dzDd3z3ZDMispWVySVKsCOmOeBWb4z1S8eG3sm0LUEtxq+nj7czweScXcRzgSb+en3f05ro9F0K20OK4EMs9xPdS+dcXNy++SZ8BckgAcAAAAAADpVDxv8A8gG2/wCwrp3/AKWQ0AdHRRRQAVwPxY/5F+y/6+h/6A1d9XnPxbudthplrsz5krybs9NoAx/4/wDpXThFetE5sY7UJGF8Kv8Akap/+vN//Q0r2OvHPhV/yNU//Xm//oSV7HWmP/jGWX/wfmeNax/yV6P/ALCFv/7JXsteD+ItQdviBd3cI8uSG9AXvhoyFB/Nc17xTxiajT9CcFJOVS3cK8y+L3/MG/7b/wDtOvTa8y+L3/MG/wC2/wD7TrPBfx4/P8jXHf7vL5fmjzGiiivePngooooA9Q+EUjGPV4yfkUwsB6E78/yFd9q2q2ui6dJfXjlYY8A7Rkkk4AA79a8/+EP/ADGf+2H/ALUpnxH1k3tteaYYAgsL23xJuzv3xSHp2xXj1aXtMU49NP0Pbo1vZYRS662/E9PVg6K6nKsMg+1LUFkyvYW7KwZWiUgg5BGBU9cD3PQWqCvmeb/Xyf7x/nX0xXzPN/r5P94/zr08t+18v1PLzT7Pz/Qv+HZEh8S6XLK6pGl1EzMxwFAYZJNXvGXiGLxLrgvIIXihjhWJA+NxAJJJxx1Y1gA4NJXoumnPn6nmKo1DkWwV7D8PfFcN/paaVc7lubKAsZDgIYlIAOexAIH4Zrx6rVjfz6dLJJA2PNieFx2ZGUgg/nn6gVniKKrQ5TTDV3RnzLY+i7W6gvbZLm1mSaGQZV0OQamrmPh7/wAiNp3/AG1/9GvXT14NSPLNx7M+hpy54KT6oKKKKgsKKKKAPN/i5KBaaVFg7meRge2AFH9RXllenfF7/mDf9t//AGnXmNe9gv4Efn+Z8/jv48vl+RZ064S01O0uZAxSGZJGC9SAQTivXPhb/wAinJ/19P8A+grXjVey/C3/AJFOT/r6f/0Fazx6/dX9C8vf763qdtRRRXinuhRRRQBDdXdtZQGe6uIoIgQC8rhVGfc1HY6jZ6nC81lcJPGjmNmQ5AYdRXI/FT/kVYP+vxP/AEF6q/CW6D6RqFnt5inEpbPXeuMf+OfrXSqC9h7U5XiGsR7K3Q9CooormOoKKK4G68Tx2PxPniv7s21lBaiEAlijMQr5I6A/MRn0FaU6bqXS6K5nUqxp2cursdJ4o8QR+G9Fe9dGeRm8qFQOC5BIz7cV4RqOo3WrX8t7eyeZcS43vtC5wABwOOgFdF438XL4luYI7VZY7KAEhH4LOT94gH0xj0yfWuTr2MHQ9nC8lqzxMbiPaztF+6gooorsOI9A+E11Imt31oMeVLbea3HOVYAf+hmvW68e+FH/ACNNz/15P/6Glew14eOX75nv5e/3COF+K/8AyK1t/wBfqf8AoD149XrfxXuoBoVraecn2g3KyiLPzbNrjdj0ycV5JXoYFfuTzswf75nT/D3/AJHnTv8Atr/6KevdK8L+Hv8AyPOnf9tf/RT17pXFmP8AFXp/md2W/wAJ+v6IK8T8Z31xpvxHvLy1cJPC0TIxAOD5S9jXtleF/EL/AJHnUf8Atl/6KSll6TqNPt/kGYtqkmu/+Z7hBKJ7eKVWVg6BgVOQcjPFSVzPw+Zn8D6aWYk4kGSewkYCumrkqR5ZuPY7acueCl3QUUUVBYVznjf/AJANt/2FdO/9LIa6Ouc8b/8AIBtv+wrp3/pZDQB0dFFFABRRRQAUUUUAFFFFABXOeN/+QDbf9hXTv/SyGujrnPG//IBtv+wrp3/pZDQB0dFFFABXmXxe/wCYN/23/wDadem15l8Xv+YN/wBt/wD2nXVgv48fn+RyY7/d5fL80ZPwr/5Gqf8A683/APQkr2OvDfh3I6eOLBVdlDiRWAONw8tjg+vIB/CvcWZUUsxAUDJJPAFaZgrVvkZ5c70fmfPHiT/kadX/AOv2b/0M19CxOskKSKwZWUEMDkEHvXzxr8sc3iPVJYnV43u5WR1OQwLkgg9xXvHh1i/hnSmY5Js4Sf8AvgVrjl+7gzHL3+8mjSrzL4vf8wb/ALb/APtOvTa8y+L3/MG/7b/+065sF/Hj8/yOrHf7vL5fmjzGiiivePngooooA9E+Et3s1TUbLZnzYVl3Z6bGxj8d/wClZPjxiPEusrk4M9uSP+2R/wAap+CNVn0rxTZ+QqMLqRLWQMP4Xdc49+BU3j+Rh4z1SMfdZ4mP1EYx/M1xqFsS33X6o7nO+FS7P9GeweG/+RW0j/ryh/8AQBWnWV4ZdX8K6SVYEfY4hkeoQA/rWrXjVPiZ7dP4F6BXzPN/r5P94/zr6Yr5nm/18n+8f516OW/a+X6nm5p9n5/oMooor1DyAooooA9XsL19P+CxnjGWMUkY5xjfMyZ/Ddn8K67wuzP4V0pmYsxtY8knJ+6K89tJ2l+Ct8hcMIZwgH90eajY/wDHs/jU9l4ms1bwesd+0UNsrw3YJKqG2IBu7EZJ5+teRUoualb+Z/lc9qnWUHG/8q/Ox6JrF1JY6Jf3cOPNgtpJU3DIyqkjP5VS8Japcaz4Xsr+6KmeQMHKjAJViuce+M1P4k/5FbV/+vKb/wBANcP8InYxauhY7Q0RC54BO/J/QflXPGmpUJS6po6Z1HHERh0aZ6XRRRXMdJ5l8Xv+YN/23/8AadeY16d8Xv8AmDf9t/8A2nXmNe9gv4Efn+Z89jv94l8vyQV7L8Lf+RTk/wCvp/8A0Fa8ar2L4VMW8Kzg/wAN44H/AHwh/rUY/wDg/MvL/wCN8juaKKK8Q94KKKKAOH+Kn/Iqwf8AX4n/AKC9cn8Lr8W3ieS1eRgt1Ayqo6M64YZ/4CH/ADrq/iqceFrcet4g/wDHHrzHw0xXxTpBBIP2yEcf74r1sPHmwrT8zx8TPkxakvI+h6KKK8k9gK8L+IX/ACPOo/8AbL/0Ule6V4X8Qv8AkedR/wC2X/opK78u/iv0/wAjz8y/hL1/RnMUUUV7J4YUUUUAd18KP+Rpuf8Aryf/ANDSvYa8T+Gl21t4yhiCgi5hkiJP8IA35/8AHAPxr2yvEx6tW+R72XO9H5nzz4nkeTxTqu92bbeTKMnOAHOBWVWn4k/5GnV/+v2b/wBDNZlexT+BHiVPjZ0/w9/5HnTv+2v/AKKevdK8L+Hv/I86b/21/wDRb17pXk5j/FXp/mezlv8ACfr+iCvC/iF/yPOo/wDbL/0Ule6V4X8Qv+R51H/tl/6KSjLv4r9P8gzL+EvX9Gdz8KZVbw1dReYC6XbEpnlVKLjjsCQfyNd3Xj/wu1cWeuSaaYd/28DEm7GzYrt0xznNewVljIONZ+ZrgpqVFeWgUUUVynWFc543/wCQDbf9hXTv/SyGujrnPG//ACAbb/sK6d/6WQ0AdHRRRQAUUUUAFFFFABRRRQAVznjf/kA23/YV07/0shro65zxv/yAbb/sK6d/6WQ0AdHRRRQAV4/8UdVN3r0WnCMBbFSd2eWLhSf0Ar2CvC/iF/yPOo/9sv8A0Uld2XxTq3fRHBmMmqNl1YfD3/kedO/7a/8Aop69h8Sf8itq/wD15Tf+gGvCvDlwLXxLpk7OUVLqPcwPRdwz+ma918Sf8itq/wD15Tf+gGtccv30X/W5lgH+4mv62PnevobwywbwrpJHT7HEPyQCvnmvd/AMrzeCNNaRixCuuT6B2A/QCtcxX7tPzMcsf7xryOkrzL4vf8wb/tv/AO069NrzL4vf8wb/ALb/APtOuHBfx4/P8jvx3+7y+X5o8xooor3j54KKKKAJba4ktLqG5iOJInWRT6EHIq5rmrPrmsT6jJEsbzBdyqcgEKF4/Ks6ilyq/N1K5nbl6Hu/gCRpfBGms5yQrr+AkYD9BXSVy/w8YHwPp4HYyg/9/GrqK+dr/wAWXqz6Wh/Cj6IK+ZK+m6+ZK78t+18v1POzT7Hz/QKKKK9Q8kKKKKANyLXpLbwdLokXllbq5aSfcp3AARlcHp1VvWscyZt0ix912bP1A/wqOipjFR2LlNy3PoXxTKsPhTVmc4BtJF/EqQP1NcR8IjxrA/64/wDs9bni+dbn4YS3CElZYLdwT1ILoawvhERu1hc8kQn/ANDryIRthZ+v+R7NSV8XD0/zPTqKKK4D0DyL4sTyN4hs4C5MSWodV7BmdgT+O1fyrga7r4r/API023/Xkn/ob1wtfQYX+DE+cxf8eQV7D8KP+RWuf+v1/wD0BK8er1/4UMf+Ebu17C8Y/wDjiVnj/wCCa5f/ABjvKKKK8M94KKKKAPKfiX4gN1NPoRtwotJ4pBLuzuzGSeO33h+VcPo9x9l1uwuNu7yrmN9ucZwwOK3PiCwHjXU17kwn/wAhL/jXMRSGKVJFxlGDDPtX0GHglRSXVfofOYibddt9H+p9M0UyJ/MiSTGNyg4+tPr58+jCvAfGcjyeMdUZ2LETFcn0AAH6Cvfq+f8Axf8A8jfqv/Xw1ehl38R+h5uZ/wANepiUUUV7B4oUUUUAdP8AD3/kedO/7a/+inr3SvmuzvLjT7yK7tJminibcjqeR/8AW7Y712Phj4gaja6wF1S6kuLK4kJkDDcYy3de4AOOOgGcCvOxmGnUlzx6I9LBYqFKPJLqzmvEn/I06v8A9fs3/oZrMrT8Sf8AI06v/wBfs3/oZrMrup/AjgqfGzpvh7/yPOm/9tP/AEW1e6188+GXZPFWklWKn7ZEMg44LgH9K+hq8rMV+8T8j18sf7trzCvC/iF/yPOo/wDbL/0Ule6V4X8Qv+R51H/tl/6KSll38V+n+RWZfwl6/oyp4Ou2svGGlyqoYtOIsH0f5Cfw3Zr3+vnbw8yp4m0p3YKq3kJJJwAN4r6JqsxXvxfkRlj9yS8wori/hld3N74cuZbq4lnk+2ON0rljjYh6n3J/Ou0rhqQ9nNx7HfSqe0gprqFc543/AOQDbf8AYV07/wBLIa6Ouc8b/wDIBtv+wrp3/pZDUGh0dFFFABRRRQAUUVzNj4406+vraFLW+jtbuZ4LS/kiUQXEi5yqndu52tglQDjgmgDpqKyta1+DRfssbW1zd3d25jt7W1UNJIQNzEbiFAAGSSQOnrVE+NNN/sOPUlhu2eS5+xLZCL/SDcAkGLaTgMME9cYGc45oA6Ouc8b/APIBtv8AsK6d/wClkNX9E1231yK48uC4tri1l8m4tblQskL4DAEAkEEMCCCQQetUPG//ACAbb/sK6d/6WQ0AdHRRRQAV4X8Qv+R51H/tl/6KSvarzUrHTwhvb23tg+dvnSqm7HXGTzXhvja9ttQ8Yahc2kyzQMUVZF6HaiqceoyDz3r0Mui/aN9LHm5lJezSvrf9GYcMnkzxy4zsYNjPXBr6C8QSJL4S1SSNldHsZWVlOQQYzgivnquy8O+LBbeHda0rU7yZlls2SzD5cK2wrsHUgHK47DB6d+zF0XPlkuhx4OuqfNGXU42vdPh7/wAiNp3/AG1/9GvXhde2eAb6zh8FafHLdQI48zKtIAR+8btU5gr0lbv/AJlZa0qrv2/VHX15l8Xv+YN/23/9p16H/aen/wDP9bf9/V/xrzb4r3Vvc/2R5E8Uu3zs7HDY+56VwYNP28fn+R6GNknQl8vzPN6KKK90+fCiiigAooooA9k+F149x4Xe3ZVAtp2RSO4OG598k/pXb15b8KLuC3XVlnuI4gTCVEjhc/fzjP4V6R/aen/8/wBbf9/V/wAa8DFwarSsfRYOadCN2Wq+ZK+kf7T0/wD5/rb/AL+r/jXzdXZlqa5r+X6nFmbT5Lef6BRRRXpnlBRRRQAUUUUAew69/wAkdi/68rT+cdcr8LbpYPFUkLuR9otmVVzwzAhv5Bq1tR8Q6Vf/AAqNlDeRi7ht7eJoHO1yysmcA/eHBORmvNoJ5radJoJXilQ5V0bBB9jXBQpOVKcHpds9GvVUasJrWyX6n0vUDXlsl19la4iFx5fm+UXG7ZnG7HpnvXz+niTW0uo7kardtLG29S8pYA4x0PHSqM13c3EzTTXEskrZBdnJJznPPvk/maxWWvrI3eZrpE3/AB1rtvr/AIjM9qp8iCMQK+f9ZhmO4eg+b9K5qiivThBQioroeVUm5ycn1CvUfhHNI0GrQFj5aNE6r6EhgT/46Pyry6uo8C+JF8Pa0ftMojsLhds52FsYBKkYGepx+NY4qDnSaW5thJqFaMnse50VlWvibQ70J9n1W0Yuu4KZQrY+h5H0q3/aen/8/wBbf9/V/wAa8Fwkt0fQqcXqmWqKq/2np/8Az/W3/f1f8aZJrOlwgGXUrNAe7TqP60csuwc0e54x8Qv+R51H/tl/6KSuYrovHVzBd+Mr+e2njmhby9skThlOI1BwR71ztfQ0P4UfRHzdfWrL1Z9E+HnaTw1pTuxZ2s4SzMcknYOTWlXK+CNVgbwbpwur2ESqjJh5FBCq7BRj6AV0H9p6f/z/AFt/39X/ABrwakGpteZ9DSmnCLv0Rar5+8XMG8XaqVII+0sOD717TqfirRdJiSS6v4sOcKIzvJ/Bc14Nqd0t9qt5eKpVZ53lCnqAzE4/Wu7L4SUnJrQ8/MqkXFRT1KtFFFeqeQFFFFABRRRQBLcztdXc1w5JeV2dsnJyTnrUVFFGw3qafhv/AJGnSP8Ar9h/9DFfRFfO3h5lTxNpTuwVVvISSTgAbxXv/wDaen/8/wBbf9/V/wAa8nMU3KNj18taUJXLVeF/EL/kedR/7Zf+ikr2r+09P/5/rb/v6v8AjXiXj6WObxrqEkTq6Hy8MpyD+7XvU5emqrv2/wAi8yadJW7/AKM5uvffBuqRar4WsJEfdJFEsMoLZYMoxz9cZ+hrwKu4+G/iKw0W9u7a/k8lLvZslb7qld3B9M7uvTiuzG0nUp3W6OHA1VTq2ezLPgHxdZaJZR6ZcxSZurwkyggLGCqKCc+459q9ar5kr6S08MNNtQ5ywhTJ98CuPH0oxkprqduXVpTi4PoWa5zxv/yAbb/sK6d/6WQ10dc543/5ANt/2FdO/wDSyGvPPSOjooooAKKKKAEYblIyRkYyK8x0q01GXRvCnhZ9KvobrR7yF7u4kgZYBHBuwyyfdff8uApJ+Y5xivT6KAOK1W7nk1Xw34oTS9SNpAl1BcW/2VjcRCTbtcxDLEZixxk4YGsePTtShjt/EjaZdlB4il1JrIRkzrbvCYA3ljnd0fb1wT34r02igDlvCsVxc614g1uS0uLW3v5oVt47mMxyMkcYUuUPK5JOAcHAHFUfGegWccNvqizagbg6vp52NqE5h5u4h/qi+z/x3jr1rt65zxv/AMgG2/7Cunf+lkNAHR0UUUAZWr+G9I114n1KzE7xAhG3spAP+6RmvE/Ful2+jeKL2wtN3kRlSgY5IDKGxn2zivoGvEviVbiHxnPIGJM0UbkHt8u3/wBlr0cvqS5+VvSx5uY04+z5ktbnI10/h3wvHq2gazqty8yR2ULNDsIAeQKWIOQeB8v/AH11rmK95utPi0v4f3lpFEsfl6bIGwoBZvKOScdSe5rtxVZ00kt2zhwlFVG29kjwavXvBXhfRNR8I2N1d6dDNPJ5m52zk4kYDv6AV5DXunw9/wCRG07/ALa/+jXrPHycaaadtf8AMvL4xlVakr6f5Fn/AIQrw3/0CLf9f8a4D4l6Lpuj/wBl/wBn2kdv5vm79mfmxsx/M167XmXxe/5g3/bf/wBp1xYOpN1opt/0jvxtOEaEmkun5nmNFFFe2eCFFFFABRRRQB6D8NdB03V4dSk1C1juNjRqgcH5eGz+fH5V3n/CFeG/+gRb/r/jWF8LNOltvD897IRsu5cxgdcLlcn8c/lXd14eKqy9tLlke/hKMPYx5oq5g/8ACFeG/wDoEW/6/wCNeBV9N18yV1ZfOUubmd9v1OTMoRjy8qtv+gUUUV6R5YUUUUAFFFFAHoN74Q0zT/hodWKNLqEkUMwlLEBN7L8oUHHRsc59eO3n1ew69/yR2L/rytP5x1xHw7sRfeMbYvGkkcCPK6uuR0wD+DMDXFQqv2c5y1s2d+Iop1YQgrXSOVAycCnNG6NtZGVsBsEYOCMg/THNfR50ywM0MxsrfzYP9U/lLlPoccU69tI7+xubSUkJcRNExXqAwIOPzrH+0lf4fxNv7Mdvi/A+bKK6bxvoVr4d1SzsbVnZfsgd3fqzF354+gH4VzNejCanFSR5tSDhJxe6Cug8H+HB4l1r7LJI8dtHGZJnTrjoADgjJJ79ga5+vTPhEimTV5CPmUQgH2O/P8hWWJm4UnJbmuFpqpWjF7HTWnw78NWqIGsWuHUcyTSsS31AIX9Kt/8ACFeG/wDoEW/6/wCNb1FeG61R7yf3nvqhSWiivuMH/hCvDf8A0CLf9f8AGmSeBvDUqbW0mIDOflZlP5g10NFL21T+Z/eP2NP+VfceA+MtOtdK8V3tlZReVbx7Nibi2MopPJJPUmsKvQPihoMlvqY1tMGC52RyZbkSBSBx6bVH45rz+vew8+elF3PnsTDkqyVranr3grwvomo+EbG6u9Ohmnk8zc7ZycSMB39AK3/+EK8N/wDQIt/1/wAag+H6MngfTQykHEhwfQyMR+ldLXi1qs1UlaT3Z7lClTdKLcVsuhxmtfDfSdSWD7FjT2jYlzGm4SKexyeDxwfc8GvHbu2ksr2e0mAEsEjRuAcjcpwf5V9K187+JP8AkadX/wCv2b/0M13YCrOTcZO5wZjRhBKUVZszKKKK9I8sKKKKACiitbw1o767r9rYqMxs26U5xiMct+nA9yKUpKKbZUYuTUV1Mmir+uW8Vpr+pW0CbIYrqWNFznChiAPyqhQndXE1Z2NDQoI7nxDpsEyB4pbqJHU9GUuARXtv/CFeG/8AoEW/6/414r4b/wCRp0j/AK/Yf/QxX0RXmZhOUZR5XY9bLoRlGXMrmD/whXhv/oEW/wCv+NeQ+NbK207xdfWtpCsMEfl7UXoMxqT+pNe+V4X8Qv8AkedR/wC2X/opKjATlKo03fT/ACKzCnCNJOKtr/mcxXUeD/B9z4ju0mkUppkblZpQwySADtA65ORz0/lWDptp/aGq2dkX2C4nSLdjONzAZ/WvomwsbbTLGKztIhFBEu1VH+evfNdWMxDpR5Y7s5MFhlWlzS2R82V9HaPP9q0PT7jbt822jfbnOMqDivFvD3grUPEdmLu1mgSEXBhk3k5XAB3Y79ele4WltFZWcFrCCIoI1jQE5IVRgfyrnzCpGVop6o6ctpzjeTWjJq5zxv8A8gG2/wCwrp3/AKWQ10dc543/AOQDbf8AYV07/wBLIa8w9U6OiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACvGPih/yNw/69k/ma9nrx/4qWc0XiG3u2UeTPAFQ57qeRj/gQ/Ou3AP998jhzBfufmcKAWYKoJJOAB3r6ZZQylWAKkYIPevnnw0pfxTpIVSx+2RHAGeA4Jr6HrbMnrFGOWL3ZP0PmSvdfh8CPA2mggj/AFh5/wCujV4bNH5U0kec7GK59cGvoDwoMeE9JH/TrGf0rXMX+7S8zHLV+8b8jYrzL4vf8wb/ALb/APtOvTa8y+L3/MG/7b/+064cF/Hj8/yO/Hf7vL5fmjzGiiivePngooooAKKnsrSS/v7eziKiSeVYkLHgFiAM+3Nani2wXS/Ec9gr7xbxQR79uNxESZOO2anmXNy9S+R8vN0PWfh7/wAiNp3/AG1/9GvXT1l+G/8AkVtI/wCvKH/0AVqV87Vd6kn5s+loq1OK8kFfMlfTdfMlehlv2vl+p5uafY+f6BRRRXqHkhRRRQAUVsjwxfnwy2v5h+xA4xuO/wC+E6Y9T69jWSY8QJLn7zMuPoB/jUqUZbMqUJR3R7N4ttWsfhbJaMctBb28RPqVdB/Sud+Elsr3+p3RPzRxJGBjsxJP/oArsPHyBvA+pKegVD+Tqayvhbpv2Xw5LfMo33kpIYHqifKMjt82/wDOvIjO2Flfq/8AI9qdO+LhbZL/ADO5ooorgPQPHviv/wAjTbf9eSf+hvXC16P8W40F7pcoUB2jkUtjkgEYH6n8684r6DCO9GJ87jFavIK9f+FCKPDV24A3G8YE+oCJj+ZryCvYfhR/yK1z/wBfr/8AoCVnj/4Jpl/8Y7qiiivDPeCiiigDhfiv/wAitbf9fqf+gPXj1ew/Ff8A5Fa2/wCv1P8A0B68er28B/BPBzD+MfQnhWFYPCekopJBtY359WUE/wA616itbaKztIbWBSsUMaxoCc4UDA/QVLXjTfNJs9yC5YpBXgvji1jtPGmpxxZ2tIJDk55dQ5/VjXvVeF/EL/kedR/7Zf8AopK7cuf71+n+RwZkv3Sfn/mcxRXVeHvB0+u+HdR1GBlaeI+XDCcglhtYnP8AukgD1PauVr1ozjJtLoePKnKKTfUKKKKsg3fBljHqHi/TbeXBj8wyEEZB2KWwR6HbivcNP0bTdKMhsLGC3MhJYxoATznGfT26CvGfh7/yPOnf9tf/AEU9e6V5GYSftEr6WPay2K9m5W1v/kfO/iT/AJGnV/8Ar9m/9DNZlbfi6xubHxVqIuYmj864kmjz/EjOSCKxK9Sm7wVjyaiam0+5p+G/+Rp0j/r9h/8AQxX0RXzv4b/5GnSP+v2H/wBDFfRFeZmPxRPVyz4JeoV4X8Qv+R51H/tl/wCikr3SvD/iPbyw+NbuSRNqzJHJGc/eXYFz+akfhUZd/Ffp/kXmX8Jev+ZQ8G2hvfGGlxBwhWcS5Iz9z58fjtx+Ne/15R8JrGOXU9QvW5eCJY0BAwN5OT9fkx+Jr1elj53q8vYeXQ5aXN3OF+FH/IrXP/X6/wD6Ald1XEfC4BfDt6AMAX7jH/AErt6wxX8aRvhf4MQrnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIawOg6OiiigAooooAKKKKACiiigArnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIaAOjooooAK85+Lcamw0yQj51ldQfYgZ/kK9Grzv4tf8AIL07/rs3/oNdOD/jROXG/wACRxPgZGk8aaYqDJEhb8ApJ/QV71Xhfw9/5HnTv+2v/op690rbMf4q9P8AMwy3+E/X/I+ctagFrruoW6kssVzIgJ6kBiK938Lgr4U0kH/n0iP/AI6K8a8b2qWfjPU4o87WlEpz6uoc/qxr2jw3/wAitpH/AF5Q/wDoArXGy5qMH/WxlgY8taa7f5mnXmXxe/5g3/bf/wBp16bXmXxe/wCYN/23/wDadcuC/jx+f5HXjv8Ad5fL80eY0UUV7x88FFFFAHWfDnTk1DxfC8m0paxtcFWGckYAx7hmB/CoviF/yPOo/wDbL/0Ulb/wktEfUNTvCzb4okiA7EOST/6APzNavxUtbdNAhulgjW4ku0V5Qo3MAj4BNee6tsXbyt+p6So3wd/O/wCh1vhv/kVtI/68of8A0AVp1meG/wDkVtI/68of/QBWnXkz+Jnr0/gXoFfMlfTdfMlellv2vl+p5mafY+f6BRRRXqHkhRRRQB6Yq4+BzH1Of/JmoPh54YW+K6hfRSoLSbfCjp8sm5RzyOcfKRirP/NDP8/8/Nd9oUbQ+H9NicYZLWJT9QgryKlVwhNLrJntUqSnUg30ihmv2MWpaBfWkxYI8ROVPII5H6gVlfD3/kRtO/7a/wDo166WSNZYnjcZV1Kn6GqejaVBomkwadbNI0MO7a0hBY5Ysc4A7k1xqf7pw87/AJna4fvVPya/IvUUUVkanl3xc/1+k/7sv81rzWvTvi6g/wCJRJnn96uP++K8xr3sF/Aj/XU+ex38eXy/IK9h+FH/ACK1z/1+v/6AleR21vLd3UNtAm+aZ1jjXIGWJwBk+9fQPh/QbXw7pYsrXeQzmSRnbJLkAH8MAD8PWsswmlT5erNsupt1OfojVooorxj2wooooA4f4pxSSeFrfYjNtvELbRnA2uOfxIH415j4b06LVfEdhZXAcwzSfOF4JUZJ/lX0NVUabYi7S7FpALiMFUlCDcoPJwffJ/M+tdtHF+zpuFjhr4P2tVVL/ItUUUVxHcFeZ3OkSX/xkbcoMKKly+cH5FjVeR7tgfjXplU10uzTV31VYsXjw+Sz5PKZzjHTsPyrajV9nzPurGNal7TlXZ3Od+H1qLLTdVt1jZEj1OZEDZ+6AoHX6V5b4u0yDR/FN9Y22/yY2UrvOSNyhsfrX0BXn/j/AMFvqZGqaXA8l+zqs0Ybh1xgNz3GAPpXThcQlWblpc5MXhm6CjHVo8koq7qGk6hpMipf2c1uWzt8xSA2OuD0PUU2x0u91K6ht7S2lkkmPyYU4xnBOfQdz2r1+aNr30PG5ZX5bam/8OYZJfG1k6IWWJZHcj+EbGXJ/EgfjXuNcj4Q8Ep4clW9knL3j25ilReUyW3ZHfoFH4H1rrq8PGVY1al47Hv4KjKlStLdnnHxYsLb7DZ6j5f+leYIN+T9zDNjHTrXldfQXiXw5beJtOW0uJpYvLfzEePHDYIGQeo59vrXhDWEq6RFqIyYnneA4H3WVVYZPuGOP90134GqnT5b6o87MKTVXmtoyx4b/wCRp0j/AK/Yf/QxX0RXg3gSNJfGumK6K4Ds2GGeQjEH8CAa95rnzF++l5HVli/dt+YV5X8Q9JvNY8bWFpZxF5ZbMBSeF4dycn2/rXqlFcdGq6UuZHZXoqtHlbMbwx4eh8NaT9iimaZmcySSEY3MQBwOwwBWzRRUSk5Pme5pGKilGOwyKGKBSsMSRqzFiEUDJPU8dzT6KKkoK5zxv/yAbb/sK6d/6WQ10dc543/5ANt/2FdO/wDSyGgDo6KKKACiis/Vdas9FijkvBdlZCVX7NZzXBz7iNWI/GgDQorivEHju2TwtrFzpAvkvbazeWNrnTbiFFPABzJGqnBIOM9qdZW9x4b8aaXpiapqF9a6jZXDTLe3DTFZYjHiRS2duRIwKjC9MCgDs6K5PXvP1XxlpegG9u7SyayuL2Y2kzQvKyPGirvUhgB5hJwecCsEQeIdd8NQWdpdXFzLpeuz29xJ/aElm9zbx+aqhpYxuzkx5wOSuaAPSq5zxv8A8gG2/wCwrp3/AKWQ07wdeRXOlXFusN5BcWN09tcw3d2906SABuJGJLKQykH0PQVk+M59dMNvFJp2nrpn9r6fi4W9czY+1xY/d+UB1wPv+/PSgDt6KKKACvO/i1/yCtO/67t/6DXolcJ8V1U+GbVsDcLxQD3AKP8A4CujCO1aJzYxXoSOV+FkEcviyR3QM0Nq7xk/wtuVc/kxH417LXj3wo/5Gm5/68n/APQ0r2Gtcf8AxjLL/wCD8zwv4hf8jzqP/bL/ANFJXuMUSQQpFEoWNFCqo6ADgCvJNc0yHWfi3Np9wzrFMyBihAPEIPGfpXr1PFyXs6cfIWDi/aVJef8AmFeZfF7/AJg3/bf/ANp16bXmXxe/5g3/AG3/APadZ4L+PH5/kaY7/d5fL80eY0UUV7x88FFFFAHrnwnijGgXsoRRI11tZsckBVIH4ZP51L8V/wDkVrb/AK/U/wDQHpvwo/5Fq7/6/G/9ASrPxPtvP8IGTdt8i4STGPvZyuP/AB7P4V4zf+2a9z3Ev9i07HU6ZaHT9Js7JnDm3gSIsBjdtUDP6Vaoorhbu7s70rKyCvmSvpuvmSvTy37Xy/U8rNPsfP8AQKKvaLGk2u6fHIivG9zGrKwyCCwyCK2fiBp40/xhdBIljhnCzRhQACCME4H+0Gr0XUSmoHmqm3Bz87HMUUUVZmez6N4fk1P4XWukTS+Q1xH5gcDdgGTzF4yO2PzrsIIhBbxwqSRGgUE+wxVDw3/yK2kf9eUP/oArTr5yrNuTT7tn09KCjFNdkgooorI1CiiigDzn4s208ljp1wkTtDC8gkcDITdtxn0zivKq+gfFtml94S1SFyQBA0gx6p84/VRXz9XtYCfNS5ex4eYw5avN3Og8F6Xc6n4oszbBSLWVLmQscYRXXOPfmve684+ElvtstTud2fMkSPbjptBOc/8AAv0r0euLHVOarbsd2Ap8tHm7hRRRXEdwUUUUAFFFFABRRRQAUUUUAFFFFAGPrfhuz1650+a7LH7FKZAmAVcHGVYHqPlH61oWVjbadapbWkKQwJnaijgZOT+pqxRVOcmlFvQlQipOSWrCiiipKCud1LwZpN7oEukwQi0jaUzo0efllwRuweowcY9OmOMdFRVRnKLvFkyhGatJHE+EPAR8O6nLf3V1HcShSkIRMAA4+Y579Rgfmc121FFOpUlUlzS3Jp0o0o8sFoFFFFQaBRRRQAUUUUAFc543/wCQDbf9hXTv/SyGujrnPG//ACAbb/sK6d/6WQ0AdHRRRQAUUUUAV76yttSsLixvIhLbXEbRSxnoysMEfkayNJ8KW+lXpvX1DUL+6W3+zQy3kqsYYsglV2qOpC5JyTtGTW/RQBz9z4ThubTTF/tTU473TkMcOorKpuGVgAwcspVt2ATleoB4pU8KxWukW2nadqmpaesLvIZoJEaSZnJZmcurBiWJOcdT2rfooAzdE0S10KzkgtnmlaaVp55533yTSNjLMfXAA4AAAAArO8b/APIBtv8AsK6d/wClkNdHXOeN/wDkA23/AGFdO/8ASyGgDo6KKKACuF+K/wDyK1t/1+p/6A9d1XE/FKCWXwnG8aFlhukeQj+FdrLn82A/Gt8L/Gj6nPiv4MvQ5T4Uf8jTc/8AXk//AKGlew15D8J4nPiO8lC/ItoVJ9CXUj+Rr16tcf8AxmZZf/BPMTEZPjg3ONuGP/gOK9Org7q2ig+MdlJGCGuLMySZPVgrr/JRXeVGJd+T/Ci8Mrc/+JhXmXxe/wCYN/23/wDadem15/8AFmKM6HYzFQZFudqt3AKkkf8Ajo/KjBu1eIY1XoS/rqeSUV0Wn+HVufBep66WLvBIIUiAPy8oS+e/DEY+prna92M1K9uh4EoONm+oUUVPeWc1jcG3uEKShVYqQQRuUMAQe+CKq/Qm3U9Z+FKkeGLk+t42P++ErtLyzttQtmtruFJoXxuRxkHByP1Fcd8K/wDkVZ/+vx//AEFK7ivn8S2q0n5n0WFSdCKfYKKKK5zpCvmSvpuvmTvXqZb9r5fqeTmn2Pn+hp+HY2l8TaWi9Tdxf+hCvQPivpTyW9lqscYIizDM3fBOV/DO7865r4daQmp+JkleVkFmBcAAZ3kEAD2616v4l0g674fu9PVwkkigxs3QMCCM+2Rj8arE1lDERfbf5k4ai54aa77fI+eqnsrSS/v7eziKiSeVYkLHABY4Gfbmt3w/4RufEGj6heWrbp4CqQwggb2JBOSeANufx/Xb8AeHZ4fGlx9shcHTVbLAfJ5h+UDkc8FiPoDXXUrwjGVnqjjp4ecpRutGer20CWtrDboSUiRUUnrgDFS0UV88fSbBRRRQAUUUUAQXtqt7Y3Fo7FVniaMkdQGGP615Nf8Agb+xfBF/fakq/wBoRzL5RjkJAQsq8/XJP5fSvYKoa1pMGuaTPp1w8iRTbdzRkBhhg3GQfSuihXlTdr6XVznxGHjVV7a2dil4U8PReG9H+yJMZ3kcyvIV25JAA4ycYAHf1rcpAAqgDoBilrGUnKTk9zaEVCKitkFFFFSUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/9LIa6Ouc8b/8gG2/7Cunf+lkNAHR0UUUAFFFFABRRRQAUUUUAFc543/5ANt/2FdO/wDSyGujrnPG/wDyAbb/ALCunf8ApZDQB0dFFFABXMfEL/kRtR/7Zf8Ao1K6esfxXCJ/CWqoSQBau/H+yN39K0ou1SL80Z1lenJeTOC+En/IQ1P/AK5J/M16rXlXwk/5CGp/9ck/ma9VrfHfx2c+A/gL5nGX8Mp+LWlyiNzGLBssFOBzJ3/EfmK7Oiiuec+a3krHRCHJfzdwrgviv/yLlp/19j/0Bq72uD+K4/4pq0PYXij/AMcetcL/ABomeL/gSJvBmmJdfDRraHEcl/HOrucn5iWQHH0C/lXjVe8+BIDb+CtMRjyUZ+mPvOzf1rwu5gNtdzW5IYxOyEjvg4r0cJK9SovP/M8vGRtSpvy/yGxRNNMkSDLuwVR7mu5+KelNba9FqSRkRXUYV3znMi8fh8u38jWnoHw+e60fS7m9b7Lcx3HnlQuWeI7SFPoeD9Nx4re+JNhHeeEJp2yJLR1lQjvk7SD7Yb8wKU8TF14qPmvvKhhZKhNyXZr5FX4V/wDIqz/9fj/+gpXcVy/w7RV8D2BVQC5kLEDqfMYZP4AflXUV5uId6svU9PDK1GPoFFFFYm4V8yd6+m68C8VaBcaLqczyW/kW09zMLZcg5jUggjnphhXpZdJKUovqeXmcG4xkul/0Op+EluGvdUudxzHHHGF9dxJ/9l/WvVK4z4ceH5dH0eW6uUliubwgtFIu3YqlgvHXJyTz7V2dc2LmpVm0deDg4UYpnnvwm/5A+of9fA/9BFehYrnfC2iXGjT6x5scccVxevLAqHgRnpx2+ntXRVOIkp1XJFYaDhSUX0CiiisDcKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG/wDyAbb/ALCunf8ApZDXR1znjf8A5ANt/wBhXTv/AEshoA6OiiigAooooAKKKKACiiigArnPG/8AyAbb/sK6d/6WQ10dc543/wCQDbf9hXTv/SyGgDo6KKKACmTQx3EEkEyB4pFKOrdGBGCDT6KAPK/hKjLqGqZUjbGgOR0OTxXqleffDkEa14nBGCLhOP8AgUteg11Yx3rN+n5HLglail6/mFFFFcp1BXC/Ff8A5Fa2/wCv1P8A0B67quF+K/8AyK1t/wBfqf8AoD1vhf40Tnxf8GR1Hh5Gj8NaUjqVdbOEMrDBB2Dg14P4gjWLxJqkaDCpdyqB7BzX0VXzv4k/5GnV/wDr9m/9DNdmXu85M4sxVqcUfQsP+pj/AN0fyrL8VQR3HhPVklXKi1kcDPdVLD9QK1gAAAOgoIDAggEHgg150Zcskz05R5ouJzPw9/5EbTv+2v8A6NeunpscaRRrHGioijCqowAPQCnU6kuebl3FTjyQUeyCiiioLCqt1p1lfSwS3VrDNJbsWiaRA2w+o/IfkKtUU02thNJ6MKKKKQwooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv/yAbb/sK6d/6WQ10dc543/5ANt/2FdO/wDSyGgDo6KKKACiisrVdAttYljknutThMa7QLPUJrcH6iNgCfc0AL4g1pNB0prxoHuJWkjgggQgNLLIwRFBPAySOewyaraJr1zf6le6VqenrYajaxxzGOOfzo5In3BXV9qk8owIIGCO+ay9Y8HNFpO7SJ766vYLu3vIor/UZplcwyBtgMjNs3DIyO+M9KWyh1t9Y1jxPNorxXH2CO1stNe5j8yXYXc7nUlF3M4A5OAMmgC/4j8Q32g3FgY9KS5sri6gtpbg3QjMTSyiMYXaS2NwPb61JrWvXNjqdnpWmact/qNzHJP5ck/kxxxIVBZm2serqAADnPbFZnjhdYvbHTrbTtBub1kvLW8laKeFAgimR2T53UkkKcYGPenahDq8evaZ4mtNGmnf7FLaXWn+fEs0YZkdWDFthwUIPzfxcZxQBt+H9aj17SEvVge3kEkkM0EhBaKVGKOpI64ZTz361neN2H9hWwyM/wBq6dxn/p8hqbwjpV3pWiyC/VEvbu7nvZ4423LG0sjPsB74BAz3IrH8aeHdEigt9Wj0ewTUjq+nsbtbdBKSbuIE78Z5BI60AdtRRRQAUUUUAcN4GVl8UeLyVIBvRjI/25f8RXc0gVVLFVALHJIHU0taVZ88uYzpU/Zx5fX8wooorM0CuJ+J0D3WgWFvGQHl1CNFz0yVcV21UdT0m11ZLdLtWZbedbhNrY+dc4z7cmtKM1Cak+hlWg503FdS9Xhfj3TzaeNLxIoiq3BWZAOdxYDJ/Ft1e6VxfjuNJdU8MI6gq+oorD1G5a3wdTkqephjqXPS9H/wDtKKKK5DsCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5zxv/yAbb/sK6d/6WQ10dc543/5ANt/2FdO/wDSyGgDo6KKKACiiigAooooAKKKKACuc8b/APIBtv8AsK6d/wClkNdHXOeN/wDkA23/AGFdO/8ASyGgDo6KKKACiiigAooooAKKKKACiiigAqC4srW7eFriCOVoHEkRdc7GHQj3qeihO2wmr7hRRRQMKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArnPG//IBtv+wrp3/pZDXR1znjf/kA23/YV07/ANLIaAOjooooAKKKKACiiigAooooAKo6vpFnrmntY3ySNAzpJ+7laNgyMHUhlIIIZQeD2q9RQBzn/CEaV/z9a5/4PLz/AOO0f8IRpX/P1rn/AIPLz/47XR0UAcRqngyNNR0VbK514273jLe41q7OIvIlIzmXj94I+R/LNan/AAhGlf8AP1rn/g8vP/jtJq/jSx0i9urdrLULpbKNZb6a1iDpaowyC+WBPA3YUMQOcV0UciTRJJGwdHAZWU5BB6EUAc9/whGlf8/Wuf8Ag8vP/jtH/CEaV/z9a5/4PLz/AOO10dFAHOf8IRpX/P1rn/g8vP8A47R/whGlf8/Wuf8Ag8vP/jtdHRQBzn/CEaV/z9a5/wCDy8/+O0f8IRpX/P1rn/g8vP8A47XR0UAc5/whGlf8/Wuf+Dy8/wDjtH/CEaV/z9a5/wCDy8/+O10dFAHOf8IRpX/P1rn/AIPLz/47R/whGlf8/Wuf+Dy8/wDjtT6h4mh03XbDS59Pvz9tlEEV0sa+SHKM4UksDnCN0Bqvq3jWw0m9u7d7O/uY7FFkvri2iDR2isMgvlgT8vzEKGIHJoAzfEXguKLwzqsmk3OvHUltJTahdau2Jl2HZgGXB5xwa0l8EaXtGbrXM45/4nl5/wDHak1bxdaaZefZYbK/1GZbcXUq2Mav5UJJAdiWHXDYAyTg4FF34vsYU0/7Db3eqy6hAbq3hsUVmMIAzIdzKAvzKOTkk4ANADP+EI0r/n61z/weXn/x2j/hCNK/5+tc/wDB5ef/AB2lk8Z6b/Zem3tpDd3z6kxW1tbeMec5UEuCGKhduCG3EYIxWjous2uu6f8Aa7VZU2yPDLDMu2SKRThkYdiD/iOKAM3/AIQjSv8An61z/wAHl5/8do/4QjSv+frXP/B5ef8Ax2ujooA5z/hCNK/5+tc/8Hl5/wDHaP8AhCNK/wCfrXP/AAeXn/x2ujooA5z/AIQjSv8An61z/wAHl5/8do/4QjSv+frXP/B5ef8Ax2ujooA5z/hCNK/5+tc/8Hl5/wDHaP8AhCNK/wCfrXP/AAeXn/x2tHVtWk0wwrFpOoag8u75bNEO0DH3i7Ko68c5PPpWe3jPTf7DttUjiu5Tc3BtIrRIv37TgsGj2kgBgUbOSAApOcUAJ/whGlf8/Wuf+Dy8/wDjtZegeDI5NOlbVLnXhcC8uVTdrV2v7oTuIukv/PMJz3781qr4003+xrvUZobuB7ScWs1nJEPPWZioWMKCQS29cYJB3DmiDxlpxsdUub2G6059LQPd292g8xFYEqw2FgwbBAwTyCOtACf8IRpX/P1rn/g8vP8A47R/whGlf8/Wuf8Ag8vP/jtP0/xdZ3ct1DeWl7pU9tb/AGt4r9FQmHn94CrMMDByM5HcCk0fxhaavfQWhsdQsnuoDcWhvIlQXMYxlkwx6blO1sNg5xQA3/hCNK/5+tc/8Hl5/wDHaP8AhCNK/wCfrXP/AAeXn/x2ujooA5z/AIQjSv8An61z/wAHl5/8do/4QjSv+frXP/B5ef8Ax2ujooA5z/hCNK/5+tc/8Hl5/wDHaP8AhCNK/wCfrXP/AAeXn/x2ujooA5z/AIQjSv8An61z/wAHl5/8do/4QjSv+frXP/B5ef8Ax2ty9uWs7OS4W2muWQZEMABd/YZIH5kVz58cafBDfnUbK/064so45XtbiNTI6yMUTZsZlbcw2gA9euKAJP8AhCNK/wCfrXP/AAeXn/x2suDwZGfFF/HLc69/Zi2du0B/tq7x5pebzOfNyflEXH+JrWtvGOnvb6nJfQXelyabCLi6hvUUOsRBIcbGYMDtYcEnII60umeLbS/upbW5sr7TJ0t/tYjv41QvDnBcFWYYBxkHBGRkUAM/4QjSv+frXP8AweXn/wAdo/4QjSv+frXP/B5ef/HaNI8Z2Wr3trbCy1C0F7E01lLdRBEukGCSmGJHBBwwU45xTLHxxp19fW0KWt9Ha3czwWl/JEoguJFzlVO7dztbBKgHHBNAD/8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OigDnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigDnP+EI0r/n61z/weXn/AMdo/wCEI0r/AJ+tc/8AB5ef/Ha6OsPVvFWn6PrOm6VMk8t1fyLGohUERbs7WckjAJBA6k4PHBoAg/4QjSv+frXP/B5ef/Hay9U8GRpqOirZXOvG3e8Zb3GtXZxF5EpGcy8fvBHyP5ZrRbxxpy6i1ubW++yLdixbUfKX7OJ87dhO7d975d23bnjNGo+ONO029uoZLW+ltrJ0jvb6GJTBaswBAclgxwGUnaDgHnFAD/8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHaXWPGNnpF7cWv2HUL1rWEXF49nErrbRnOC+WBOQpOFDHAzin6j4ts7Oa2gtLO91Se4t/tax2CK5EPaQlmUYOeBnJ5wDQBH/whGlf8/Wuf+Dy8/8AjtH/AAhGlf8AP1rn/g8vP/jtbOmaja6vpltqNlL5trcxiSN8YyD7dj7VaoA5z/hCNK/5+tc/8Hl5/wDHaP8AhCNK/wCfrXP/AAeXn/x2ujooA5z/AIQjSv8An61z/wAHl5/8do/4QjSv+frXP/B5ef8Ax2ujooA5z/hCNK/5+tc/8Hl5/wDHaT/hBtGMsLySatN5MqTIk+r3Uqb0YMpKtIQcMAeR2rpKKACiiigAooooAKKKKACiiigAooooAKKKKAPPdUN9pF/4vt10i/vW1pVksXtoDIjOYFhKOw4jwUzlsDDV2mi2T6boWnWEjB3traOFmHcqoBP6VeooAKKKKACiiigAooooAKKKKAOI8cal5Os+HI007Vbn7HqS3c72unTTIsfkzJncikE7mXjrzWdfyXtgPF1pHo2o3b68BNYPHbMVYvbpFskbGItrJk78cH14r0iigDgLZLvwXrVzLNp1/qMN1pdpDFJZW7TZmgDqUbA+XO5SGOB1yaqaLpt94Kk8P3N9Y3d1EmhiwuPsMDTtBMHDgbVySpywyOPlGcZr0qigDzPTtO1LQk8Oa3d6bdukcuovdW1vEZpbcXUnmodi5J27Qp25wWNdN4LtbmO11W+ubaW1/tLUpbuKCZdrpGQqLuHYkJux1G7nmumooAKKKKACiiigAooooA5nxlr9/o9rbW+m2N5Lc3jFPtUNlLcpaKMZd1jUknn5V7nqQAaw3sYrDTPDGpaTZapc2ulX8stzHNayLdSebHKjymN1DM2+TccDkE4r0KigDzK607UtQ/tTxJDpl2EOs2d7DZyRlJ5YYFRGbYcEMfmIU4J2j1pdX03UfFP/AAkuqWWnXcSPZ2cFpDdRGCS5aCVpm+V8EA7ggJAzz2r0yigDzrU7O88a3+qTWdhe2UB8P3OnI99A0DPNMVOArYJC7OT0+bgmrWnzXev+I/Dkw0m/sU0m3ma7a7t2iUSNGIxGhP3/AOI5XK4Uc813dFABRRRQAUUUUAFFFFAFXUbtbGwluXhuZlTGUtozJIQSBkKvJxnPHPFeWzaLeT3V/faLp+sT2MMljdkaosn2m4kgn3tHGZv3hXZnAbjd06mvXKKAPNNa03UPGD+I7+w0+7t430mKztUvYWge4lWRpSNrgEL91ckAEk9qs30F3421Z5bbT7+wgh0W8szJfW7QEzXGwBQG5YKEJLDI5GCa9CooA89003utal4ThOk6hYnRo5HvXuYDGiv5DQhEY8SZLk5XIwvXmqWlWmoy6N4U8LPpV9DdaPeQvd3EkDLAI4N2GWT7r7/lwFJPzHOMV6fRQAUUUUAFFFFABRRRQAV5hrnhnxVb6hZ3Fve6fdtca5HdNL/Z0rSRgK4TzCJceWi4XAC+uck59PooA8vez1EeHZvBg0u+N2+rtIt35DfZ/Ia7+0eaZfu5CnG3O7cOlSatBqNrpvjDw6mk31zc63PK1lPFAzQMs8aoS8g+VNhDZ3Y4AxnNemUUAef3X2zw3q3iJf7K1DUF1S2h+xva27Sq0iReUY3I4TkA5bAwx54qPTbS88Fajp013YX19B/YFrp7PY27Tsk0BbIKryA2/g9Pl5Ir0SigDn/BOm3Wk+DtOtL2PyroK0kkec+WXdn259t2PwroKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD//2Q==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sdelang/env/sb312/lib/python3.12/site-packages/matplotlib/axes/_axes.py:8089: RuntimeWarning: divide by zero encountered in log10\n", + " Z = 10. * np.log10(spec)\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeD+JQo+IniQFRj7VD/AOk0Ne814N4mGfiL4lH/AE9Q/wDpNDVw3POzT/d38gvo1+07iBjauPyqheWyJbxEKPmGTWndqJJIz/sgn8qp6g+LWJu2DiscNJ+4v62PnftuxlwIsc6HaOGH867e2iSNGJUYZQQK4mEZmQerj+dd4EAgR/RQK5s7lbkXf/gHbSTcmzvvCyo3hy0O0c7+3+21bGxP7q/lWT4W/wCRctf+B/8AobVsV0Uf4cfRH0ND+FH0Q3Yn91fyr58mgV5pG2jIYjOK+ha8HlXIY+5q+flZ5Oc/Y+f6GYsakhdo4PpV/wCzpHGflHAzSQQYlUDrn5qtTccep/SlWrXkoo8VK6uUokjjQPsHpTGRWkcFeKsBc7AOlExU8DtTU/eJMO4UCVjt6nFbggRdER1UAs24/pWRcnkgd63QD/YEB7lTWuLk0qfr+h070/l/kdh8LkXdqo2jpD2/66V6HsT+6v5V578Lc51XPXEP/tSvRKme59Blv+6x+f5sbsT+6v5V5d8TY0Orx/KP+PVO3+29ep15d8TRnV4x/wBOqf8Aob0luicz/gfNHArAkiAFRjPNWkRI12hRzTY4z8oXuc1YwEDDvWtSd9D5mUmyJlUhm2jpxTFRCwBA45p5YBlA6KM0gXnf3aktESRtEm8naM7hXUWtvHJbg7BkoP6VzkgBYH+6c111iA9unoFH8q8nN6rjSiztwceedmEsabUG0ZzzxXIagkZuCCoyBmu1kQG42jqACa5DVIj9pYnqAcVz5JUXO15GuNTTTNHyk+zW3yjBUk/kawrqFAWAUcGuiETfYYG7bawbmPbckr0Jya9LAzvKWvf82ck7pplQxqD90ZpGRQgwBzUyfP8AN7CjZuAz2ya9TmtuLns9SiIl7AYFOEC5PA9as+SRgDpSbep79K057mrrN9SBUUPggcCmkDdgAcYp7kbgT60hHWqNE+p23wpRD4nuTtH/AB5P/wChpXsGxP7q/lXkHwn/AORouvezf/0NK9hrjxHxnu4H+EN2J/dX8qNif3V/KnUVgdh836aqm6AwOQf5VPJbhpvujIPXFQ6aN19Fj3/ka1No3YHZjmt68+Spp2/zPjq1+a6NrwPAi+K7LCjhn/8ARb16/sT+6v5V5H4IG3xXZA9SZCf+/bV69WF2z3Mo/gP1/RDdif3V/KvO/ilEh/sr5R0m7f7lejV538UzxpX/AG2/9kqofEb5l/usvl+aPMpowAuAMDNARfMT5R92pbhQVx6ikX7yntjiutP3T5pS90QIuZhgdB/SqxiQSBsDjIq10+0Eeg/pUDcqv4mnB/18i4Nr+vIRY1znaKnCL5ecAE0yMc/hVkqrJn8aU5akTlqUAgzjAyDVq0VBdJ8o+9UPR/fNT24zdJ9RTqaxfoXJtomvok+1vhQNpIFU7mFHIDKCMg/lVy73Ncyn3NQT/e/CsqLajH0IjJqV0XvDiL/wkum8Di7h/wDRi177sT+6v5V4L4bGPEemepu4f/Q1r3yorbnu5S/cl6jdif3V/KuN+Jka/wDCMRfKP+Ppe3+y1dpXG/Ev/kWIv+vpf/QWrKO6OzG/7vP0PIHUMkeAMYPb3NV3UYbgVbYHYB2A/rVaRQOtd8GfN05ak+n7fLnUgfdr0n4OqE0fXVAwBqp/9J4a83sU3eZ7rXpXwhGNL14f9RX/ANt4K5q7XMz0ctf+0y9P8j0WiiisD3QooooAKKKKACiiigAooooAK8E8Tkj4k+JMf8/MX/pNDXvdeCeKGC/EXxL6/aYsf+A0NaU9zz8z/wB3ZLncUJ6MoH6VS1E4hhjHYVetPniweo6VQ1IbUi9cGsKH8ZR7HzkejM1N4uI9oBwwP6130Th4Il745rhrc5uIz/tj+ddzHEPIXB5rkz1r3L+Z3UG3J2PQvDIx4etQP9v/ANDNa1Y/hYEeHLQHr8//AKG1bFdND+FH0R9DR/hx9EFeIADe3uSa9vrxJEG9st1z/OoxDskeXmy+D5/oQQWxy7rk5Ofzp06P85A52hR+tTIxiaR+qH9KiuMsV2nlRuI9axU5SndnjNJRKJZlwB60GFgDJzyKtyoEh3kDORxVd3Kx7T3rqjNy1iZNW3Me66pW4rN/YEGR8xXpWJOCdoI71ugh9AtcddxH8q3xj0p/4v0Z0L+H/Xc7L4W5zq2fSH/2pXoleefDD/Watj0h/wDaleh0pbn0GW/7rH5/mwry74mAnV4wP+fVP/Q3r1GvMviMM63GP+nVP/Q3qb2syM0/3d+pxsSEMuBUc3+sYmrtspDYK9Dj8MVn3mTcMM4wcUUpc1Ro+a5dBp+6qdyMVM7Ybj0wPrVdD68ntVqNNzKD/CcmtZ2W4mI8ZVFHckfzrqbSIyWsa9ARzj2rmpCS2cd+K6rTXBgjzx8teHnE5KjGS3ud2AipVGmT7gz7j16VyergCWQj71dQI3yx5weRXJarnzJGJxzmufJYr2zs+xtj23GN11NwqTpEDDsuDWDOo80+9b6k/wBkRejJmsCRdzjnvXdl2kp+r/M5MV9n0RXSHEmB9KUISwz9KmVNspYnHzZp6DJXIwd9eo6jOXci8giTgdVzUbwlgDjnvV9gFjwT82eKhYbSx6hhmohWbG9DImjywHpURJJNX5kDYPQ9aotgkdsGvQpyujqpTurHb/CjP/CT3GR/y5P/AOjEr2GvIPhT/wAjPc/9eT/+hpXr9cuI+M+hwH8H5sKKKKxOw+b9LI/tSLB7H+RrYYZBHcsc1j6ZgX0R+v8AI1qgkk/j/Orxa/e38l+p8fXeqN7wQP8AiqrM+rP/AOi3r12vIvBDZ8V2fHG98f8Aft69drNdT28o/gP1/RBXnnxRxnSs+k3/ALJXodec/FTIGlEek3/slXDWRvmX+7S+X5o85kT5B7inIPlB9hTJSVUH2xTo9xjQ45wOPxrpd+U+Y15R2MGcf7I/mKqy5CKR6n+dXjj9+e+0fzFU5D8q8cc5opP+vkVB6odECQM9cA1c2/KMdTVWPnkegrTCKIS3pzWVedmiJasxpF2zk9xU1s3+mx5/vCo7j/j4c9qW3Ob2If7QrolrD5G1rx+RpXEf+mTj1c/zqpdAAAd+KvyNuvLokcKx/rVC6IbiuOg3dJ9kZfbL/h7A8R6YO/2uH/0Na96rwXw9j/hI9M55+1w/+hrXvVXV3Peyj4JeoVxvxL/5FmL/AK+k/wDQWrsq474lf8i1D/19J/6C1ZrdHbjv93n6Hku3Ax6iqsmFDZ7VeflQenFZrqWDjNdtLU+Yo6vUu6f1kx3WvSfhH/yDdf8A+wr/AO28FeZ6aSXkA7LmvTvhONth4gHpqv8A7bQVzV/4jR6eWq2Kl6f5HoNFFFZHvBRRRQAUUVHPCtxbywOzqsiFCY3KMARjhgQQfcHIoAkoryrVpJ9A1LxNqGj3+qPBoOktuS61Ge4R7uQblysjsPkQKf8AgftWtO134N1uyji1K/1GG7027lmjvLhpsywqjB1z93O5gVXC8jigDv6K850pr/Sz4N1R9Xv7yXW2EV/FPOXjcyW7yhkQ8R7WTA2gcHnPWqtheaiND0Hxa2q3z3eo6pFFPatOxg8maYxiNYvursBUggZypyTmgD1CvAvFWP8AhY3iPP8Az9Q/+k0Ne5ajeT2NoZrfT7m/kBA8i2aMOff94yr+teD6zPLfeOvEE09lPZSNcxZt52QumLeIclGZeevBPWtKW5wZn/u7NS3RUA6A4zWbqmBGhPPXFaJ4YY7AA/lWXqWfLiz3B/pXJhVesmz5xdEULbmeLnGXH869CiTECY7cn8q8+tgDdRH0dT+tehwZ8lsHquVri4ibXJ8z0cIk5M7jwwd3h62OMcyf+htWvWN4U3DwzZ7zlsNk++41s16FFWpxXkj3KP8ACj6IK8RGcn1Un8a9urw9pSC2O2f51nXTdrHl5s7cnz/QkZ0w0XHHWoN+XTjljyfQUzbuOcjefvGgvw57gbRWcaaWx4rlcGV5btst+6CdO2aCqNFyBkU17gRv5ag5YdaDGzL8vU1pZ6X0RLZkXR/eDHvW2qiPw/bHHJyc/lWNeLsk46ZJrZk+bw7a4/H6Vvi3dUv8X6M2j/Dfp+p2Pww+/q30h/8Aaleh1558MDmTV/YQ/wDs9eh1Utz6HLf91j8/zYV5r8QiBr8IPe1Qf+PvXpVebfEAK3iGDd2toz/4+9Zz+EnM/wDd/mc1CmQ2TgkED61kXiYmxnJB5PrxWxuBOR2Gax5mzJn1JqMJfnbPm5aJWIokJ5J69ParsKliCOCTyPWqi5YYHbmr0DKMyY6DFdNduxG7JZQo2r0966KwUfZ0HqAAfwrn5kHkq3fNdHZhVtoFHYbv8/nXzmay/cRt3Z6WAj+9ZIz/ADEKcgZ6Vxmrk+dIuegrr8rHIQAfnYiuP1Xm5kPrkVeRRSqu3YrHyuo37m6GxocJHdeP5Vj+WxXGeeufxrWJCaJEuOQv9azYXLSsD90iuvCXiptfzP8AM5MRq4ryRHL98kDilBUTMe2QRSv91gCKiKlxuXglsfhXcldanMTyENICRgAVEw2g5Occ1bliULn1AFViv3lb0/SopyTWhUk0ULj7wIOOQazc5lf2rWnjHH6VmMoV29Sa9WhJOJtQa1R3Pwq/5Ge5/wCvJ/8A0NK9frx74U5/4Si59PsTf+hpXsNc+I+M+jwH8EKKKKxOw+bLLIvIiOMZP6GtdchC36VlWAJvox2wf5GtpV5Ye4NaYuVp/I+OrayRs+C/k8XWCY/ikOf+2b169XkvhAZ8X2HsZB/5DevWqxi7q57uUfwH6/ogrzj4rEgaSB3E3/tOvR685+KoydI/7bf+yVrT+JG+Y/7tL5fmjzqblB7mlhypXJ7Uh5UE9uKVceYB6c1v9mx8t0sSRjcJvdP6iqsnACn0q3CcLKx5yP6ioZY9xBFKDtJ/10HF2YkH6cVp9IWbqOmKz04Y+lXFcshP8HTFY11dpib1M2cZmaktP+P+In+9SzMN7n0OKbbc3sXrurq/5dv0N4/CbV0As021fvsQTWXN8xHbFbM6gSSgd2NYkx4PrmuLBu6MmveNDw4QfE2njHS8h/8AQ1r32vAPDX/I0ad/19Q/+hrXv9bVtz3sqVoS+X5BXG/Ew48LxnGcXK8f8BauyrjviV/yLMX/AF9L/wCgtWS3R2Y3/d5+h5PkvDnGOOn41QPE+O2Oa0U5TnoV/rWdIGEjc/Su2lu0fM0N2i5pRHmynHRa9L+FBzY+ID/1Ff8A22grzLTgQZP92vSfhCSdL14nr/av/tvBXNXX7xs9LLX/ALTL0/yPRaKKKyPeCiiigAooooAx7Tw1p9rpmpWDiS5h1Kaaa7M5BMpl+8DgDgLhR6ACq+k+EbTS7wXct9f6jNHbm1hN9Ir+TESCVXCjOdq5LZY4HNdBRQBzWleCrDSb20nS8v7iKwVksLa4lDRWgYYOzCgn5flG4tgHApLfwPp1tqEM63V81pb3LXcGnvKpt4ZmJJZRt3dWYgFiATwBXTUUAFeBeKSR8SvEXp9qh/8ASaGvfa8A8WHb8RvEh/6eYf8A0nhrWiryOHMdcOy1K+24ZexVf5Vn36krEe2OKsXUgEx55Kj+VUb6UmOP0FZYaDTi/wCtj5lazsitari7iXsXH869DgKixz2CkfrXnlvIBPET2df5138Ew+yjdgcE/rXncQxclT9T08NK03fsd74Wz/wjlrnqDIP/AB9q2KxvCh3eG7U+pf8A9Datmu2l/Dj6I9yh/Cj6IK8KZwGf05z+de614JMRmXnkn+tOceZo8nOHbk+f6DUYiRpB0al3bD7AZ/Gq6SEuP7g5zSFtyBCeW5NaOnqeGTlljBc/hUsN1+7J/Lis52MrYzwOKcrAAAHqeacqCcdQ1RDfSFpGPYnithH/AOJPEOyrWBeN84A7CtfzMaPCP9k1WJp3hTXn+hu1amn3O5+Fjbn1g/8AXH/2evRa82+E5OdYz6w4/wDH69JpVFaVj6PL1bDR+f5sK8z+ImP7dh9rZD/4+9emV5f8SyRrEZHX7Kn/AKG9ZtX0M80/3d+pyjXPDYPeqp2k7vU5qu023juafHyQ3at40lBXR8007XZKGwq+rGn2rF9gPrk1Bkffz04FJBJ5a4B5NEoXi7CS0NNpWkIx90HBrftT+6gkPUrs/D/IrmDOFgAGM5rfhnBtoVz/AA7q8TMaTcIpLq/yOvCTtJtmgpxOGPQDArkNSUC5YnsTXTo6mANu425Brk9RkDzsQeCKnJ6bVWX3GuMlzKKOgnAGlwOOoTNY0Unzle5zWpPMg0+BCefLxWCZQsjAHkV14Gm3CSfd/mc1d3krdkTrtO8c43Y/KnRuJNv93JNQRnJPoeaR5AEKjg138l3Y5zSDHGW/CokYOzhuo4NRiXPfoKazhySOuMcVzxpsq42RdrY7DgVkyEec3tmtCWRiwJ7VlTviQ46mvRw8X1NsPG8nY7v4Uf8AIz3P/Xk//oaV7DXjvwn/AORouR6WTf8AoaV7FWWI+M+kwP8AC+YUUUVgdh836e4N7H+P8q2I3PmNj+9+lYOnki9iPbn+ta3nYZmH97BrbFQvP5Hx9dcszp/CDhvGNmo7Fz/5DavWa8h8EPu8XWY7gv8A+i3r16ueMeVWPcyh3ov1/RBXnHxWz/xKcdhN/wC069Hrzn4qnCaWfab/ANkrWn8SN8x/3aXy/NHnatiOP680u5fMJqJWBjQ54o3gce1dHKfLuJYjIUyemM1E2c57GmRkkyE+1KGJb2pctmHLZkoUZIHQVJC2VK/wmod23IFIH5wOlS43RNiCcYmbHc8/lS2uPt0R96ik5d2J7062f/SEI6etdDXuNeR1Wah8jfkkXzZB3bJrHl4Az3qwZTvc+1VJ26H1rkw9PkdjCN3I0vDR/wCKk03/AK+4f/Ri179XgPhk58Q6Z/19w/8Aoxa9+p1/iPfyr4Z+oVxvxLO3wxGf+nlf/QWrsq4z4mkjwvGR/wA/K/8AoLVlHdHXjf8Ad5+h5QrZQZ9KpXHzP+NTBuB9P61Wdv3hJrvpxs7nzdGNpXLunnaZc/3K9I+EP/IL17/sK/8AtvBXmVpIMyDP8Nem/CE50vXv+wr/AO28Fc1dWk2ehlq/2mXp/kei0UUVge8FFFFABRRRQAUUUUAFFFFABXz14xfb8SvES+tzCf8AyXhr6Fr5w8dSFPif4hGD/wAfEP8A6TxV0YZXnY58VDnpND7iUmcnPGB/Kq95IPJQZqtPO3m5yenSq17OywZ5yOldVOjax4FHCtyiTwS5nj/3h/Ou5WYfZYyG+6ORXm1pcFrlFIIwRzXWx3MnkldxxiuPMsN7Xl8i8bF0Jpd0ey+DjnwrZnOcmT/0Y1btYHgk7vB9gfUP/wChtW/WCVlY93DfwYei/IK+erhyJ5hnvgV9C1853bEXcoGeta0o3keZm8b8nz/QdGw2hc896SSQAnHbgVTDsp4JzmkklJwR0HJ966lS1PJVBtk8koUYB5NQea4OefSoWc7smmGclSdpFbRp2OmGH02JbiQbs55IrRE//EpjHUgVzlxcneDzWikzHT4xyMiipSuo+ptWwrjTh6nqXwjbcmrN6+T/AOz16XXmPwfOYNVP/XH/ANnr06uCt/EZ6+CVqKXr+bCvLPie2zVoj/07J/6G9ep15R8V32ajG2M/6NH/AOhvUQV5JeZlmK5qNvNHAO2MnOcnipBPhAo65xVBpjxwaabgDB716fs7nk/VnJGgZgTgHgVFHN8xJPANUPtRyQM5pnnEjAPHWmqRpHBO1ma3nbsDd/FXRWtwohTJzgYzXGRuwZct6fzrcimZYwOSK4sZh1OKRyV6botWNn7cpiCqwwDgc9q5y+mJnYDpmrSudvAI5/Ksi7kYux570sJho05OwsOnUnr0OknmzbQjP8ANZZO5ic85oe4b7PH14WqaytuOc960oUOVMmFKUrsvrLtG7PFEkgbpVCSUhNvOTQJyc9QAfzrX2XUpYZ7mmsoAwTTTOOSpzVIS528HJpu8hgBnGalUUSsP3LLzbuelZkj5kJ9DU0krY4BHNZU8zCQ9RzXRSp2PRweGu2emfCNy3iq6H/Ti3/oxK9nrxD4MSmXxTe5B4smHP+/HXt9cGKVqh7FCHJHlCiiiuc2PmOzlxeIPTNX/ADh83uaxIJCNTIGeP8KsNMyykEkjJr1J0uaVz5zEYbmmrdkzuPh+5k8YWbE/89OP+2bV7RXh/wANpC/jC0P+/wD+i3r3CuCsrTaPRyyPLTkvP9EFea/FttqaT7+d/wCyV6VXmPxgO2LSD6ed/wCyUUVeojoxqvQa9PzR5t5mFA7YpA4JbJ78VnSXZSPODx2pRMwOeelel7M8n6nK1zTWXAPvSibBI/I1mRyuQQScjvT2uSoX5T1xn0qfZGbwjvY0DLjjPNHmYGAeazzOck8mgTsF7k+tHsg+qMllmO5hS2kwFwik96zJ7hvNY5OPSiznZ72PggetaOn7p2vB/uW/L9Do5ZgJZPQZqvI+4DnoagmkPmt6d6hMzY71jClZHn08M7Jo6Lws27xHpo7/AGuH/wBGLX0DXzx4Rfd4n07j/l6h/wDRi19D1yYhWmerl0eXnXn+gVxfxOOPCsZ/6eV/9Bau0rivikceElP/AE8L/wCgtWMPiR04xXoSR4yZeOuOP61VeTMj5OM9qY84VQT0qjcTMZCQ23IIH1r2IwOHD4W7NqzfBlPtXqfwbOdF10/9RU/+k8NeP2U5IlHPAHNeu/BRt/h/W29dVP8A6IhrjxcbK/ma4Wi4YiTfb/I9MooorhPSCiiigAoorL8TNdp4U1htP3fbRYzG32dfM2Hbj3zigCW31vSbvUJdPttUspr2LPmW8dwjSJjrlQcinQaxplzqEun2+pWct7DzLbRzq0if7yg5H41554TuLnTB4VtLTUrS/h1OwdvIjt40+ylYgwZSo3bd2FO8kkkc54qto/2H/hGPh39j8r+1vtyebtx5u7y5PtW7v13bs98Z7UAemf2xpn9qf2X/AGlZ/wBoY3fZPPXzcYznZnPT2om1jTLbUYtPn1KzivphmO2edVkf6KTk/hXmA+y/8INH/qv+Eh/4SX2877T9u59/9Vn/AIB7U7WPsX/CK/ED7V5X9sf2i/lbsedv2x/Zdvf+5tx3z70AetV85eOlB+JniE/9PEP/AKTxV9AX0mpRWAbT7a1ubvjMdxO0Ke53BHP6V87eK5L1/H2vPfwQQXZuIt8UExlRf3EWMMVUnjH8I/rXThf4hlX+B2IJY8yK30qvexhkBPrVl3CkZ9sVDdEsi+h6V3RvdHiUHJSiVbeP/SEJ65FddBBm1OOorlIX/eRn/axXZW7gW31WuPMJSSVjPMbucbnq/gcEeDtPB6gP/wChtXQVgeCf+RQsf+2n/oxq365D3cL/AAIei/IK+drkA3Mxx0avomvnq5K+fIP9qrpP3jy84+x8/wBCiY9qs3c1EsYUBKsNLkkdhUDNhi1d0WzzYOQ0xb3z/DQ8a7OKd5mImA61GVbYc1auaxcurM6aAFgce1aQjH2CLP8AdFUJ3AFaGQ2nxj2FXNuyOvESk4Qv3PS/g+CINVB/6Y/+z16dXmXwgOYdW/7Y/wDs9em15tf+Iz08H/BXz/NhXk3xZB/tCLH/AD7x/wDob16zXk/xXIGoxZ728Y/8fepp/GvVEY/+GvVHmo5PPYVUmUAfjViRwx46DrUIw/X1r149zlpXj7wyFd0+D2zTzFgoB05zT4QFJxRG/wAwLU7mkpycm0PCAlBjvXQRRhYEJ7rn9axIAOp61vRFWjQHOVWuLFN2R5GNldpCiEkuO2MisS9TazHsa6FJcKufSsG+O7issJKTk7meFdqisWCv+jrnutVMASfWrrc26D2qlI6hWPoa6aety6N3dClR5maYU7dqUnJB7GhmAU+taam6uiSJOBntUhVVOfxqKN/lzTvMwQD/AJFQ07mUlLmGthh+NZMqq8rjH3a05XAHFZrt8zEetbUz0MEmrs9B+DIC+J7sf9OT/wDoaV7dXiHwYz/wlF5n/nyf/wBDSvb68/F/xWetDr6hRRRXMWfLFtH/AMTMse5/pU8kYExIHUmooW26kv0/pVh2Blx717DvzfI8KtKXtE/7qOr+GilfGNrnuZD/AOQ3r3KvEfhwP+KwtD/v/wDotq9urzcR/EZ25e7wk/P9EFeY/GD/AFWk/wDbb/2SvTq8w+MRAg0nP/Tb/wBkpUP4iN8X/Bfy/NHkToG2g9RUgUYX3BNIuGHv1pxOdje1esccpPYfGg3E+opzqDketMQ5fA6Yp8jYU7evapd7mEubmECAfLTvK4CjpSA5GD1xUm/gKPvUnciTkZs8OZ2p1pGRcIB0B5pZn2TMTUlmwM6H1NaNvlO+U5ex+X6F2QDfIfQ1EqAjA/Cn3Uqq5z0zSA84XtWSvY82HMopm14STb4m04/9PUP/AKMWvoWvnvwmSfE+nZ6faof/AEYtfQlcGJ+M9DL7+/fy/IK4v4oc+FE/6+V/9Bau0ri/if8A8iqn/Xyv/oLVjH4kdOM/gSPDLi2WRFBHHX9apT2+5h6q2a05T8oxVJ5MS8/dI/WvZg2cuGqTsS2UIHnKfSvXPgku3w9rQ9NVP/oiGvJbM70mx1Ir1z4Lf8gLXP8AsKn/ANEQ1yYz4fma0JN15J/1sel0UUV553BRRRQAUUUUAUrPRtL064muLHTbO2nnOZZIIFRpD/tEDJ/GiDR9LttQl1C302zivZv9bcxwKsj/AO8wGT+NXaKAKX9j6X/an9qf2bZ/2hjb9r8hfNxjGN+M9PeibR9LudRi1CfTbOW+hGI7l4FaRPoxGR+FXaKACvm/x4GPxN8Q4HH2iH/0nir6Qr538ax7/iV4jP8A08Q/+k8NdGFdqhjiJqFNtmHOrM5U5HApJs/ZlGORV6aDc+abcQhLVM9cYruVRaHhwxEXyox4Edp0GDjeDXWQlzFgfwrWFDGFmTjqwrrre13RDAwWXPSuXH1owSuTmFR1ZRSPU/A+f+EO0/PXD/8AobV0NYHgsY8J2Q9DJ/6Mat+uO99T3cL/AAIei/IK+cb4sLqbaM/NX0dXz1dQMZZX7luBWlKSjPU83NnZwfr+hkhiQfY4qLc78gdOK1JLdVj+VRnNVxEFYjHArujUi9UedCvHexUUspwfxqfcWXgVJ5QLZxUscS89KJTW4p1ovWxhXqMCAByavKjGwUHIyMCkuowXOfXFX0h/4l8ZI7YFXOokkdVbEfu4ep6H8HwRBqoP/TH/ANnr02vM/hEMR6uO48n/ANnr0yvPrfxGevgnein6/mwryX4tf8f8f/XvH/6G9etV5V8U1DapED/z7x/+hvU03aa9UZ5g7Uk/NHk7FyxCrkVHiTgKueefatFogXIUe+RSeT8p4xk16/OjnWJilsVQrHGcgDmmeW24OxI7YrTS3zgYoe3H3cehzS9ormSxiTsVl37hgcVtRK/krkYJFZ5j5QDj5hXTRWwcIoHHl5z78Vw4uuoJNnn4mXtLWM0CRowcfdrHvC2SQK6owExuqLnC54HWufu7dhI/BxjpUYStGUmRh3yTuwLN5CjHOKoyb2yAtbXkj7OhIx2qkYgJDXRTqLUqjWUW9Cnh9gyORTDv8sZFaQhzxjrTHhGOnFaKojaOJjfYoqX2AEYpzs205GOOtWxFmPOKaI+CGHSnzor28W72M2VnwQBmqMglHIXLelbcsS8Yx61VEYPUc4raM1Y9DD4mKjex23wV3HxTfFhjNm3/AKHHXuNeK/CCPZ4svMdDYt/6Gle1V5uL/iHoUpKS5kFFFFcxofKsKn+0mPPJ4/KrJDbunOadbx7r1ePX+VWXjw/TkGvXlNXseBWrrnS8kdN8Ndx8XWpYY5f/ANFtXuFeK/DtceL7X/gf/otq9qrza7vUZ3Za705Pz/RBXmHxiGYNJA/6bf8Aslen15r8Wl3LpA/67f8AslFF2qJm+Nly0W/T80eNgOjHaMjOaP3kig7cEcYq48O3JHTNPigr1edbnE8TG3MUlEiZJXtUmHO044q5LEAvSlEWUHGKnnW5k8Smr2KwRmJ46U7DLyBmrqwgjA60kkYAwBzUe0TZz/WE3Yw7jc8zcUWxYXUUY65yavSRDfnHen29uPtaNjmtnNcp6LxUVSs10I5A3m7cZ4zSjeAMjFW5Ic3BOMc1M0AyOOKy9olY894mKSRc8IMT4o0/0+0w/wDoxa+h68A8MRhPE2nEf8/UP/oxa9/rgxDvI9HLpKSm15fkFcV8UP8AkU0/6+V/9Bau1rjPicM+Fox63S/+gtWUfiR04zShI8LbdsH41nSBmORnrW/JCD8u3jFZpgwjjHOTj869iE0zjwuIjqxmmGRhOSuMHj3Few/BPI8P63nr/ap/9EQ15ZpkIDSgjjFer/BobdG10emqn/0RDXJi5XVjajVU8VNLt/kek0UUVwHeFFFFABRRRQAUUUUAFFFFABXz94vXPxG8Rn/p6h/9Joa+ga8B8Wj/AIuH4kP/AE9Q/wDpNDWtF2kcOYu1BlFkzIAPbNRXkZVIw3Q1bRSGYHqRxTdQwVjT2rSM3zpHzVOVpIzY4s3MQH94fzrubeLZAGIzxiuOiwLmM/7YrvbdS2ncdf8A69eXnlVxjDzZ3Ul7SXojuvCC7fC9mPeT/wBGNW3WP4X/AORdtfq//obVsV0UneCfkfQUFalFeS/IK8KaMmZy3QNXuteJPIrkKOmTms60mmrHl5uk+T5/oZksRjy56HnFVXQY6cnmtC6fzNy/w1QZsYb8BXVQlJq7PB2egxwBgDtyaahJPA705uFO7vSQyqAetdP2dC+hVuYxnPvmtryl/sWBuOay7hcn2rZjTf4fgx/CSD+lYYqbUab8/wBGat81P0Ow+FChTrA94f8A2evSK87+Fy7X1f38n/2evRKmo7yufRZc74aL9fzYV5Z8UFLapHj/AJ9k/wDQ3r1OvLviccarGf8Ap1T/ANDepj8S9URmf8D5o86VApPHJpBhjjHfFTRqdhY0jDuOtehzang8+o5Bg5pzKM7vamA7m9s0q/NkDoKh9zNp3uM24dAR1YV11tGPLj28fLzXMBd8iemRXVwqQsZXrtzXk5rP3Yr1NaXvMZbqEmZSOg5rCvwolcAcnJromAMpB6uwFc/qUTJNKT94niufL5KVVt9UhVlZJeZNLGBaRkf3c1mmIscjtW1sAs0B67cf1qnapunfd93jFdtGtaMn2MGrPQhEWCSRwCajkh2xgHqRmtCba28L2qs+HPPYEVpCq3qTsU1UH5AKcYuGJ+lPiCoQfSpiA6sy9SK2lNphcy5Y8fgKqKOAxrRuAd2PTrVILkge9dlOV0d1Gfu6ndfCcY8UXP8A15N/6GlexV5B8Klx4ouf+vJv/Q0r1+uPEfGe7gXelcKKKKwOw+a7FAbxD65/lU1yoWTOO9R6eP8ASY/x/lU8uTKSfWu2b/e/I+Qqv3zp/h6oPim1bofn/wDQGr2SvH/h8ufEtq/u/wD6A1ewVxyd5s9vKf4L9f0QV5z8VF3HSPbzv/ZK9Grzv4pAE6SD/wBNv/ZKcHaRtmX+6y+X5o8uePKsPpTo4jvTHpU7IFiPsMURcSJ9K7HP3dD5z2jcStdJhh9eacMBFFSXSZb8aiIwE+lVF3iiou8EiROKUgYzjk0xW70obcxP0pNaktO9yowG5j71NbDNyg96ZgEH61PZIDex/XFXN2izonL3WSzoBcNj+9ipljznPSlvozFdsB3anxg7yT93HFcnPeCaOKVy34fQJ4k03H/P3D/6MWveK8M0JQPEWnY/5+4f/Ri17nWcnc97J/4cvUK474lY/wCEZiyP+XpP/QWrsa5D4jjPhuIf9PS/+gtUXszux3+7z9DyCQDBGKz3Qh/bJrSuPlQVRfktXo0XofO4dtElpHtVyOu2vT/g7/yCNd/7Cp/9J4a8500BvMz2TNekfCD/AJBWv/8AYVP/AKTwVz15XbR35c28TK/b/I9FooormPdCiiigAqOeeO2t5biZtsUSF3bGcADJPFSVV1L7N/ZV59scpa+Q/nMOybTuPHtmgDO0rxZpGsxtLaPdiFYfPM1xYzwRGPj5g8iKp4OeD056U/R/FWja9O8GnXhklWMS7HheItGTgOu9RuXP8S5HvXmmpRR3WnX+h+CNVu9W0+XRLmOeH7S1zHCyqoiVHOdrN867AenYYrcuNQtPF3iHTj4blEn2XSbxJ3QbRAZVjWOJj2bcpO3qNtAHWab4u0LV7/7FY34lnIZo8xOqyhThjG7ALIB32k0QeLtCuNX/ALLivw10ZGhX904jaRclkWQjYzDByoJIweOK4rR9Qs9WHgLS9Nz9v0oh76EIQ1miWzxOkg/hJdlUA9eo4qpp1zDJ4c8N+FkJOv2WsRPc220+ZEI5y8kreisoJDdDvHrQB63XgfisZ+IfiT/r6h/9Joa9x1GznvrQw2+oXNhISD59ssZce37xWX9K8F1y3msvGviGCa8nvZVuoszzhA75t4jyEVV46cAdK0pfEefmf+7v1RaWP5s9woNQaggUqO+KuwR5lWTPYAj8Kq6ohyCeOlYUp/vkrnzaWlzNiXM8Y/2h/Ou6hzDYoB39a4aD/j5ix0LDmu5i5tg2c5GAK4892hfY7sNfmfod/wCFjnw7bH3k/wDQ2rYrE8Itv8MWp/2pP/RjVt12U1aCXkfQ0HelF+S/IK8MdcF8dSa9zrwqQbGdsnqcVNTdHlZxtD5/oVp2XGwdcYquF+bJ+6oqSSI7sZOSKc6bcRfxHmt4tRVkeEU3RpOo4JprRBeR1q3MOQiDJ9qhMEvUqRW8Z3W9ik2Qy8BfoM10UKD/AIR6AH+KucuAV+pFdFC2dAtiOwrjzC/LTt/N+jN6fwN+R1vwxADarj0h/wDaleg1578MDl9Wx6Q/+1K9CrR7n0GW/wC6x+f5sK8t+JwJ1WMD/n2T/wBDevUq8x+JAzrEY/6dU/8AQ3pXs0/NE5nph/mjglQ7DnpUSx4yPerpGUHHC1Xb5Xx/e6V1Qm3c+aUmRhMvx0p+zCbR1Jp33Co71Im1dzk8E5FEpMTkxMKoRe+a6SBwYVbP8IFc9HEWZWP96twIfs+AcAgH9a8rMEpKKua0HZtlmNfMuMn6isPWG3ykr2bBraSQC6OOzYrE1Mhbhx681z4BNV7+RrWfuK3csyP/AKGjdjHj8c4ptvs27P4lA3fiKlWEtbRMwwCuMfjUc5WGIhfvcZ963i07wXcxd1qys7beV6YIqsX5b8cVI75TA6hqjbBUHviu+EbbmJWST72exqUErwOhqE/KcngGlB+QEHI611NXNGuw665AbuetUfusasuxdsdsVA4AyfxrSmrKx0UdFZndfCv/AJGm6/68m/8AQ0r1+vHvhR/yNN0fWyb/ANDSvYa5K/xnv4BWohRRRWJ2nzdYf8fcf0P8jU45Zm/2sVFYDF5GPXP8jVtUGGU9SSRXVVlafyX6nx1Z6nS+Amb/AISe0QDgFyf++GxXr9eReBiq+KrNR1Jf9I2r12uTq2e7lH8B+v6IK88+KPXSvpN/7JXoded/FNtv9lH/AK7f+yVcdzbMv91l8vzR5wfue2aiiJMwDdKnByrD3qPbl1I+ldSejR8xF6NDZmJLenao2GFT8adL1YH2pJPurjrVx2RrHRIZ9KkjHNIqYyffNTwoBgnqaJyshTkrFSRQ2QD3qWwG27i+tRyfLM3oTUlnn7ZBgcb+ac/4b9DW75LGjqPN4xHc01cFQOxFLfnN2+PWkGPl9CK4Ifwo+hyy3L+hKP8AhItNIJ4u4R/4+te514boWP8AhItOwf8Al7h/9DWvcqGe9k/8OXqFcj8RRnw5EP8Ap5X/ANBauurkfiM23w5EfW6Uf+OtUs7sb/u8/Q8muEDQoPX/ABqgRiQjtmtWZQYFA7VlucSHP0rsw8rpnzFF7otaYu4zD1TFei/CD/kFa9/2Ff8A23grgNGwZJfQKTXoHwiGNM18f9RX/wBt4KwqyvUkvQ9PLf8AeJen+R6LRRRWZ7wUhIGMnr0pawPF0Flc6TFFe6Ld6tmceTDaDEiSbWw4fcvl4GRu3DGcd6AN+ivPbCy8e6PbXN9HPFcWka74tHvJjczsB1UXAC4Y9g28Z716FQAUUUUAFFFFABXgXiv/AJKH4kP/AE9Q/wDpNDXvteA+LgW+IXiQA/8AL1D/AOk0Na0fiODMv4D9UW7RXLJjJDgD6U3VovlXPucVb0zJhUd8dai17iVdp4215UKj+uKPqfP8lqPMc/GNtxHj++K7iMlYEAHAXdmuKX/XoP8AbH867i2y9tz0C4pZ5Kyg2dGE95v0O58I4/4Rm1x0LSn/AMiNW3WP4WUJ4ctVHQb/AP0Nq2K7qTvBPyPoqCtSivJfkFeETMQ7s33Qe9e714LM+95EYcZ5okryR5Oc7Q+f6EMLFrje/TGcGrGzIMpGWJ+WoLbEspXHHvV1ELTA9I07GprS5WeLFXEit0Q+Y+MnnmlaSJsrxkUx1muJMI2FHBzUxsFHzjG7ua53KKd5y1LSb+FGLeqBIa1VYpotuoHG01l6ouyXHoAK1oB5mlwp2CZzXViGvZU5Pv8AoON+Vo674W/f1fP/AEx/9nr0SvPvhkMT6wP+uP8A7PXoNaSd3c+jy3/dY/P82FeZ/EbH9tR5/wCfVP8A0N69MrzX4hIJNdiU/wDPsh/8feom7K7IzT/d36nFybdgVV69cVSkYBhkcjpWjIqoQqj73pVGZF84n0Fa0Gj5pb6jM8gnrU6R+Y2MYUVBGu5gW7cVewREEQ/MRzV1ZW0QmIhJkAA+XNazOfJjUd+/41USJIogD1PWrXQoO2N1eViJKTVuhtTTVx8LA3soxwpH4msfVOZXPcZrX2bJmYdXIrB1ByZpBnuavAxvWuuyKqbJeZukhtMjK87VHSs26cueOuAa1I0EVig7FATVBkRlz3zWeGklKT82KqnZehmplUbPXrSuvGR0xipHXcTt9etNYMIwCc5NeqpXdznKpG9famhSq46ipgBjbjrTQCpCnnHNbqRpzdCLaeccZFQN97B54q6V9Op6VUkAMjeo4rSErmtKV2dt8Ksf8JVdAf8APk3/AKGlewV498Kf+Rpuj/05N/6Glew1y4j4z6PAfwUFFFFYnYfOGm/8f0YPPXH5GrqqWJ7HccfnVHTf+P8Ah/4F/I1sJHuBbupNa4mXLU+S/U+Prr3jX8ERMPF9ixb+KT/0W9ew15F4Kb/iq7Fe4eT/ANFvXrtZXb3Pbyj+A/X9EFee/FAAjSwf+m3/ALJXoVedfFRtsel/9tv/AGSqirvQ3zL/AHaXy/NHm27G6nhgq+p61CThiOueKkUAbSRya62j5lrQCu9hx160pTK49DUijBPrT1A/EVm52J5iEREc54PNTKoBBJAB4pQOB/nilWPzCC33c8A1Epdyb3M+4wJX+vFS2PF1CO5amXSAXL+3+FOsBm/t8/3hXRJ/um/L9DqWsLF+6TEsxPJzTFIyB2xVrVFCzSBBjLc1SB9emK4qL56aZzzVm0auh7f+Eh07GP8Aj7h/9DWvca8M0DH/AAkGm46/a4f/AEYte50M97J/4cvUK474lf8AIsxf9fSf+gtXY1xvxL/5FiP/AK+l/wDQWoW6O7G/7vP0PJ5ZSseRzx/WqTHcc+9WHIICVXICk+1d1NJI+apJI09IIWSUf7GK9B+EmP7O8QY/6Cv/ALbwV53pXMsv+7XoXwhOdL18/wDUV/8AbeCuOqv3sn6HoZZ/vEvT/I9FoooqD3wrB8VW+ovb6feabbtdSWF6tzJaLIEM6bHUqCSBkbwwBIBKit6uT8fxyy6RYqtlqF/bC+Q3dpYZ8yaLa+QSCPlDbW687QO9AEmkyapq/ikavPpV1pdjDZPbLFdunmTuzo24qjMAFCEAk5O88V1FcX4QtNDg1aV9M8K6ppM5gIae7hZFZdy/KCWPOcH8DXaUAFFFFABRRRQAV4F4rz/wsfxIe32mH/0mhr32vAvFbAfEXxIvrdQ/+k0Na0viODMv93Z0GlRIkIJ6lQazNcAVkVeg4rStfngjUHooz+VZOtfJIC3fmvAwabxrk2eNWf7hJIx4Dm8QHrvU/rXexLss1K9yc1wVqwN3F/vr/OvQcfuFQdCOfyquIXZ00bYBayfkdp4XOfDtqf8Af/8AQ2rXrG8KLt8N2q+hkH/j7Vs16lHSnG3ZHu0P4UfRBXgd0wLSKvUmvfK8Alj/ANIk2nJZulXpzJs8nOdofP8AQsWsYZQide9W5gQFhTqQc1HbR+RGHHLHjB9atlPKjLH77etefWq/vNDyoQ90rPMsA2DOcZpkc1wdxbG09OKmW3CsZJM5qzHNCwIGOPas5VIxXuxv5lRi29XYwNTUM0b/AN7mtK1TOkwg/d8s5qjrRG8KOlaWnuH01FPTZXXXk/q0JeYQWrR1nw2ULc6wB6Qf+z139cF8Ohi81j6Qf+z13tdMXeKfkj6HLv8Ado/P82FecePk36/FjqLVP/Q3r0evOPHob/hIIto/5dU/9Deoq/ARmf8AA+ZyLxrECe54FUbiHEgx681p+Vgl36gcCs+ZXMjKBznNLDz13PmpKxCql5lVegYZ+lacaLCHlbqRxVUFYgFH3icVZMZuJRnhV9KK0r76IIiW8csnzyYxu4q0z8rjvkCmrKXKpEAcEZochZmB6r/jXJJuUtUapJLQniOZOeg6Vz2o581z7mt/Ydu8dFGa5y+LGd/TdXRl6vUbQ5bpHSbv9CjXuUFZkgKudvQcVplQLFCOrKCP8/hWaudzFupb+lYYay5mu5NboRMgjwF+6FzTJVYfOfugVbCB1k9cYptzEfKK44rrjV95JmXL1KMMeYjnqTkU9VPAbr3p8CZX3xxVuOPeMEc960qVeVsSVzPaPDZ7VnS/LM+PWt54Npx+VYlwu2Zs+tdGGqKTNaOkrM7P4U5/4Si6P/Tm3/oaV7BXjvwmZj4nusj/AJc3x/32lexUsR8Z9NgVakFFFFYHYfN+m8X8WevzfyNbAcqDj7ozmsjTj/pkQ7/Nj/vk1vRp+6RsezU8bJKevb/M+QqpuRpeB1B8W2j990gP/ftq9fryjwcFPiyyYf7f/otq9XqIy5tT28oVqD9f0QV5z8Vl3JpQ/wCu3/slejV518VDhdK/7bf+yVrD4kb5j/u0vl+aPNlUc+tTADCgdaiUgjPoc1KijB9eoreZ8tIlA6DuRTgi9fSnBgQM/eNSCMZznp1rllKxNiLHf1/lTkj84DP3c5FPwBz6/wAqfCvmlcfc7GolOyuNLUzLtB9ocd8U2wX/AImFuO4cVNqS+XMcdTiorFj/AGnb/wC8K7E70G/L9DeF7GtqXN1IvYHNZSvgn0rTud7XcxAyDyPrWa4wcjvXPhVamo+SMpayZqeHiD4h03H/AD9Q/wDoxa92rwfw4w/4STTh3+1Qn/yIte8VVRWZ7uTq0JeoVxnxNx/wi8ef+flf/QWrs64z4m/8isn/AF8r/wCgtUx3R3Y3/d5eh5EFzj0xVdw3z+varsSZUfSqko2yMK7oS95o+apS95ouaS37yXP9yvRfhB/yCte/7Cv/ALbwV5xpagSNj+6K9I+EQxpmv/8AYV/9t4K5a38SXyPRy7/eZen+R6JRRRWR7wVzXjUTHS7Tm8Gn/a0/tE2RcS/Z9rZxs+fG/Zu287d1dLWB4uudPtdJifUvEU+gwmcBbmGVIy7bW+TLqwwRk9M/LQBzXw81G61T+xmjN81vZ6ItvfPcJIqNc5j2gb8bmUCTJGfvDmvRK8+8IahFdeL5otN8Vah4g037CzO8zRtHBLvUAbkRQWYbsDPAVs5yMdrqth/amk3dh9pmthcxNEZoCA6AjGVJ6GgDml+IVp/aWrpJaOumafYverfCTP2hUYq2xMdNysAc8444wTZ0/wAV3X9oR2eu6SNKa4tHu7dhciYMibd6v8o2uoZTgZHXB4rl7/4c6vdajfWo1q6k0+XQmsYnlit0QNltkZWNFIVcq2QB0xkjitn+zNa8Uava3GsaWdKgtLC4t2zOkpmlmCqSmwnCAKfvYJyOBigC1pPjK5vrrSvtujNZWOsKx0+4NwHZjsMiiRNo2FkBYYLdMHBplr44muJrO6fSDHod9efY7a++0Auzliqs0W35UZhgHcTyMgZqlpek6/dyeF7DU9MW0t9AIkluhOjrcukLQp5YB3AHeWO4DGMc1WsvD+urpuj+F5dNCWOmahHcNqXnoUlhilMkYVAd4c4QHIAHJyeKAPRSQBknFeA+K8H4i+IyMH/Soef+3aGvdNR0yw1e0NrqVlb3luSGMVxEsiEjocEYrwPXtPtNM8c+ILOwtYbW1juYtkMEYRFzBETgDgckn8a1o/EcOY/wGdBYyM0ahRkHgn0xWdrTM8pz0HT3q5pcm2DB7k4qrrLLngc4rysPHlxj0Pn5O9JamPaLtvIec/Ov869IiXMAPtXnNv8A8fcH/XRc/nXosL4j2EcEZBrj4mu/Z28z0ctacpXOw8MjHh+3H+1J/wCjGrXrL8O/8gK3x6v/AOhtWpXrYd3owfkvyPapK0IryCvCPJxPJLnPJwPxr3evF4IMu5YfLk8GpxFT2aueVm0eZwXr+hLZ2+wh5PunnB7VOYt8plbhV6DtSROZi0e0gDofWnyuGIhQ/UivDqTnKpd7/kjkjGKh/W5RmSSebC5CccirKWQVcg/pRNMkER2gEj0qv9vcRglWGeMVv++qRXJojL93Fvm1Zj6qv70/7NaGnECwhH+zzWZqUhaZ/Ruau2ThLaDuCuK9atBvDRT/AK0OaMrNs7v4fDF9rH+7B/7Uruq4X4ekNd6uR/dg/wDald1WlP4F6I+lwH+7x+f5sK868dsV8QxYXP8Aoi/+hvXotee+OGVfEEZIz/oif+hvUV/4bIzH+D8zkxG7ku4Khe3rVF32MWcYZjhQa1WZpgFAKjqao3KoXDOANp4zXPh53dpI+cqRVtCpEu1xJIeTjg9q0gCVEaDLd8VQEbSzhicJnpW1bIsKNI2MkZqsZUUUnu+w6MeZ2Hw26QKCcZPtVWZlZ36A5xUhkkaTJztBqldPhHkXqDz+dclGnJzvJ6s1qSXLZIuxvthkUjPzY/A1zl6w81h71uiT9zI+Pu8/WsC6HznPXmvQy+FqkmZyd3E6b72kQHo23+tZRypZv9rpWwAP7MjbGcJxWUEJjYt1J3VyYSS9/wBX+ZVdfD6DVDZDgnJHK/WpgSSAw7ZNPjTJBHeopyRJtAIzxmujm55WMbWVxIYt+5vu7G/OrimNo8qRk8cVWikxnH3e9IFI4RsDORioqRc3qyouy0JGG5R22n865694nbP0roYgc7WP4+tYuoxgSOfc114KSVRxHF2kmzqfhQv/ABVF03b7E3/oaV7DXj/wpP8AxU1yP+nJv/Q0r2CujEfGfS4F/ufmFFFFYnYfNuksWvoSwx97+RrpoHQQ9QQ2a5uxwLyPHo38jWyhPlBegIyD6GlmEOea/rufJVZ+/dI3/BYx4qs+c8N/6LavWK8i8Fuf+Ewsl7EOR/37avXaUFZHs5R/Afr+iCvOfisMxaX/ANtf/ZK9Grzz4pjMel/9tf8A2StIaSRtmP8Au0vl+aPM04X+dWY04zn8KrpyTxwTiriKcZz07VvVdj5ee5NGoIXjmpvKGQd3TrSwqCgOOTUjRHIYNwOo9a82dT3rDUdCPYFbPUHipEQ7cqMKOmKkVBu5wR0FLu3ERJwPUVjKo3saKJhXxMlw2fX+lN01f+JnBn+9U2oJsunHpg1Fp3/ITt/9+vWvfDu3b9BwfQ2LnIvJgFyMmsydDnIH4VrzqReSd8sTVKZC2WAx7Vw4apZL0RnUXvMk8OqR4j04kY/0qH/0Yte714boGf8AhINO4I/0uH/0Yte5V0yd2e5k/wDDl6hXG/EsZ8MRf9fSf+gtXZVyPxGGfDcXGf8ASV/9BapvZ3O7G/7vP0PKoY8jceAB0rOuP9YT71qMrLFuzj2rKl4kbPNdNB3k2fL0fiLWnjJkx/dr0f4R/wDIM1//ALCv/tvBXm+m53MPbFek/CQY07xAP+or/wC28FZ1vjZ6eWf7zL0/yPQ6KKKyPfCuZ8bahqFjZabHpuow6dPeX6WxuZ4RJGilXJyCR12gD1JA7101c74xncadaadHb2UzapdrZj7dF5sCZVnLMmRu4TAGRkkc0Ac/GfFjeK/7EHjK0kJszdb49MQmPDqu1xv4zuyDnna3pXoVcD4DuraAaLa2Wl6ZZrqWijUbkWVuIiJQ0Y5wfune2M8/Kea76gAooooAKKKKACvA/Ff/ACUPxJ/19Q/+k0Ne+V4D4sbHxG8Rgf8AP1D/AOk8Na0viODMlfDs1LREEalh93mszWG2S59Qa1IYixX0wKx9ZyHBf+9xXnYT3sTufPte6lYq2rb7iP8A3x/OvQ4QTZrnqK8+tRi4ix3cfzrvoX3Wqkdv/wBVcHEib9nY78ta5peh23hv/kAwf70n/obVq1k+GjnQbc/7Un/obVrV6uH/AIMPRfke7S+CPoFePI5dCo4ycc17DXj7sMfJ941y4/XlR5+Y/Z+f6EjusaBY/vDioWYQAn/lo1CRrAfObO4jmm+UHcXMnVc4xXnxjFddPzfY8yTb9fyHC3XG98cnNRzeURj096SQzTOqrjZ3qGWydBkde/NbwSuueWplJ6e6jF1A4nwOgGKu2uPs0A9AKz70Fpyh6jBq3Z7jHEO4UZr26sf3MTn6HoPw2O6fV/pD/wCz131ef/DT/X6x/wBsf/Z69AqUrJH0uXf7tH5/mwrzzxwqnxFET2tE/wDQ3r0OuB8ZRCTxHFntaof/AB9658VLlpNsePV6VvM51BvIVQRnrVO7t1lnCY+UcmtYlIRhfvN0rLvXMUgA6vxXk4WpKVT3dDxa8FGOpVHzzpEnQda0TG0srLn5FAGKpxlLcZ/iJq7LKyqEj645zXTXcnJKP9eZjTSs7kdzOq7Y0rNJ3GYdqutbCFA38RPrVBt29s4wSc/nW+GUEvdM6rk3qXIWDQEdyozWBcZaZyTxk1tKAFHsRWJKMSyY7nNduDVpyYr7HXhCLGJR02A1QCAtJu5A6VrMu3S48fwxg1mDG58d8H9K8PC1HJTa7/qdVeFnH0GwuoRWI+7yKq3DliGzk81ICJZWT3Iqq4C5x0AxXp0oLmv1OOUtLEkDBVCkfK9SmMp90jA5qC3x5YjP4VKzSIOcYFXNe9oJbDvNywyDxisq/cm6kHbdWgZN5ITqME596yL5ybon3wa6MJC0/kXD3nY7D4U/8jVeegs2x/32lewV4/8ACn/kabrP/Pk3/oaV7BWuI+M+mwP8EKKKKxOw+cNOAN7H7hv5GtZQxh2g9D8tZWmjOoRH2b+RrW2t5Zx1B4qsW/3nyX5s+Prbmz4M/wCRzsfbzMf9+2r1+vHvBXHjKwB6nzP/AEW1ew1J7eUfwH6/ogrzr4qkiLSyPWXP/jlei1578UsfZ9OJ7CX/ANkqou0kdGY/7tL5fmjzmNRsU44OBU8W5jnPApkH3Np7809C2QRjHeqm73PlHuaMIGzI708oxI5GP4qgTGMj8KflwR0wetebKLvdGyeg9uHJJGMYFEQKnK1G2TIu77vAGPWrPmLFH5a9qiV0rLW5S1dzB1KUi6f3xTNNBOpwg/3s0uoMftLHtn+lN0skanCT3avYtbDO3b9Bw2udDNlZZCezGoZE3jcvQ9amz5kkqnqWLCoQWGcfjXjU7peaFLUk0ZSNf07P/P3D/wCjFr2yvF9J3HXdOz/z+Q/+hrXtFehB3iezlHwS9QrlPiCQPD8RP/Pyv/oLV1dcn8Qzjw7F/wBfK/8AoLUS2O3G/wC7z9Dy8kyEkn5ewNZNwoErD3rWeIFAzdQcisi4x57V0YXd2PlqXxE9jkNJjrjNelfCU507xAf+or/7bwV5pYE7pP8Acr0n4R/8g3X/APsK/wDtvBRW+NnqZZ/vMvT/ACPRKKKKxPfCuQ8e6jb2+nRRS2balbxTxyahYxQiV/sxWTDkH7gDJuDccoRkV19cV4q8M6/ql9qz6TcaalvqmlLp0/2sSb0wZvmXbx0m7+lAEngtNCtJprXRfDOpaSHjDvLdWbRhwuAF3sSTjPA9M4rsaw9FTxNHPs1k6QbVYsJ9jEm/fkYzu4xjP6VuUAFFFFABRRRQAV4D4sA/4WL4lJ/5+Yf/AEnhr36vAfFYLfEbxIPW6h/9J4a1o/EcGY/wH8jZhDMVCjjABrL1pASB7mtyzG2POOTWZrkarKeeq/livEwlX/a+U8SpC1JSMa2OJ4s9nX+ddr5jLaBUGWP+NcTAw+0xg8fMP512scZlt8oTntirztK8HIvB395Lsd94YBHh62z6yf8AobVr1k+Ght0C2U84Lj/x9q1q7qDvSi/JH0dHSnH0QV435Sws0m4nvg17JXjwjO8s5O0etcuNduXXv+h5+ZK/J8/0I1EjuPMGExnPvTyWmmEaj93jkimhnuCY1UqvZh3FSuRCBEnLsDj1rgk3e1tfy8zy0tL9BJbiOHaoxmqr3FwzkGMbD0NX47RQoaTk9eRU5WDbj5eKxVelT2jzeZq6U57uxxl9xLuPBq5bD9xFj8aTWERZmAxnORU1imYFJ6AV9DOonh4yPP5deU7b4af6/WP+2P8A7PXoFcB8NkKXWsj3h/8AZ67+rvdJn02Xf7tH5/mwrgPGgZvEcQHT7Kuf++3rv64Lxlk+IIwOptU/9DeubFu1Fhjlel8zn1RYFOWJOOM1l3krI5LAZ7VriArlnYnA6GufvpJJJsuhXrgfjXBgEqlRu9zwsReMUthLf57kSOep6VtRBY0MknUjNYlrFI06M2VXI4rYMTzStkkKCAPet8dbmSb0M6F97akLCS5bkYUelZc+4SOMdGroHkjiBRcbuKxZdrXDEnADc08FUbb0sugq8Ura3Y+IAW7sx4XBzWRPjzGIrbhYNbMrKAC35isO4wsjLmvQwjvORnbax2YmH2Ha39wY+mBWazHGMYq1If8ARlYDjy1FUXcogBHNeLhqSV7dWdNebdr9iJcKzHPU5FVLlSCAOhJq0T+8CnpjrVadsgH0r1aV+a5xsW3bKEd+1SmZgBvUDnn6VDCvyMR1zxUu8EfOoHrn0qppcz0EhpKtnb+NZd2M3BNabhSQVbGD271mXH+uY5row3xGlN+8dh8Kc/8ACTXGev2J/wD0NK9grx/4VHPiu5/68m/9DSvYKdf4z6fAfwQooorE7D5y0znUofbP8jWuysFYoM4JrJ0nm/iJ68/yNbgV9rEKTyRSxsrVfkvzZ8hUV2aPgrB8X6efXef/ACG1ev15J4Pi8vxdp3HaT/0W1et0Raauj2soVqL9f0QV558UnCx6YD0Pmj/0CvQ684+LAzDpf/bX/wBkq4K8kjozFXw0l6fmjztX4OOo6VcgJJGB8p61moTsUj1rTtiRtG3gjrWldWifLyVmWRGGIwTzVhIDnPOD1pIouQQc9wKsqj5HXDDn2rx6tW2iZrCFyIQBYwpJ44BoYqibRy3vUpjO0Ak8cZ9aaVVABwzVkp3eruXy2OfvmBnZR2PNM04H+1IfTcKlvl/0mTA54NQ2Cn+04Dk/fHFe8mvq79P0M4W1OiBCzSY65qGQZcsvJ9KkZdssh6knIoZNxBHB9K8WLSdynroSaUH/ALd03cMD7XD+e9a9nrx7TUI1nTSe15D/AOhrXsNd1CXNC57OVK0ZeoVyfxDz/wAI7FgZP2lf/QWrrK5P4hf8i9Fj/n5X/wBBarlsdmN/3efoeYPkqC3FYtzzMxHatplwuWPbpWROB5jYPWujCPVny9J2kTWA3GT6V6P8ISTpev56/wBq/wDtvBXnOncNJn+5Xo3whOdL1/8A7Cv/ALbwUVvjZ6eWf7zL0/yPRaKKKxPfCiiigApkqu8LrHJ5bspCvjO09jg9afUVwkslrKkE3kzMhCS7d2xiODjvg84oA81vfGOoeH77VFTVLrVo7LTLi5lTUbJbZllQqE8vCIZEJJyQGA4O7kZ2P7T1rwvq9rb6xqh1WC7sLi4bMCRGGWEKxCbAMoQx+9kjA5OasXXgu51yYv4l1WO+jW1ntoorW0+zqolUK7HLuS2BxyAPSp9P8KXX9oR3mu6sNVa3tHtLdRbCEKj7d7P8x3OwVRkYHXA5oAydL1bX7STwvf6nqa3dvr5EctqIERbV3haZPLIG4gbCp3E5znjpVey8Qa62naP4ol1EPY6nqMdu2m+QgSKGWUxxlXA3lwShOSQeRgVs6T4NubG60oXuste2Ojqw0+3NuEZTsMamR9x3lUJUYC9cnJplr4Hmt5bO1fVzJodjefbLax+zgOrhiyq0u75kVjkDaDwMk4oA6XUdRg0u0NzcJcvGCFxbW0k78/7MalvxxXg+s3sWpeOdfuoEnWOS7i2ieB4XGLeIco4DDp3HvX0HXg/iT/ko3iT0F1D/AOk0NXB2Z5+af7u/kdDZhRjdjAUEflWLrZPnEseCPyrRtWMqLg8Dg5rJ1phLKQe1eFgabWLuzx6806KRl267rlD33D+dd3ZsIoADXC23NzGR/fFdiCdmVPAxW2ew9pyxY8FPkk5Hofhw50OE/wC3J/6MatWsnwyc+H7Y+pf/ANDatau7Dq1GC8l+R9JSd4J+QV4+C0rMhBAz1NewV5MzF1Kx8MfWuLMXbl+fy2OHMFfl+f6DCyRR/uxllGMCnRxKqieXBcZxntT4bZIW8w/ePXmmyB53A/5Z9wa8nnUnaL06v9Dg5WldrXoivLNNK+E3KM1E0U4JPmcelWZriK3Bx2qjLqigDhvm9q7KEakkvZx0Oeo4p+9LUx9VkJnHqBzV7Tph5ESlSQwPNZuoNmVm9eauWWUtIAOle3Vgvq8YnKn1PQ/ACgXmrkdxD/7PXcVwXw3cvPq5PbyR/wCh13tKmnGCT7H0uXu+Hi/X82FcF4yk8vxBG2CT9lT/ANDeu9rgPGzrHraM3a2T/wBDescWr0mh452pX8zE3NK/Jwo6g96z7iBbi+LcbEGMU59QDKVTIJHcVWEkphIQ/vGPJrz6FCpB823T/gng1asZabjppAjhY1PyntVm4vViiwnLd8HvURVIoyT94+lZsbrvLv65/GumFGNXVrb8TFzlDbqXo1fcZZHzkd6zGYuzkH+I/wA6nkvDINqnoRUSgBmPqa7KUJQu5GUmnoi/HgRYPZRWDcsGmc9K2FcNAfXBrDlGS+fvY5rbBxtKTZcNWjsjGxs4vm4CjP5VnyKdzZ5xyK1kQHT1B6FAf/HRWa3IUDp0NeHhamsvU6K8bWKkx+QMO3FQzDH0IxU+3GFPTBqMqXjLHseK9WDSONjYVKxhs5x2qf8AduuSME8c1Xt3OAw+7zmrLIjA460VNJagiFlVemP89KybgkzP9cVqS4UZHUnFZdyMSt9a68LuXT+I7L4U8+J7o462bf8AoaV6/XkHwq/5Ge6/682/9DSvX6K/xn02A/ghRRRWJ2nzno/zajB6ZPH4Gupi2pFyQfmOfzrldJbbqCN6Z/ka6KIp5JwDmRi1c+aQvUXov1PlXK0jb8LAHxbZMOxfH/ftq9QryjwhIW8XWYHTMn/otq9XqqEXGFmexlbvSk/P9EFedfFYZi0v/tr/AOyV6LXnfxTOE0v/ALa/+yV0Q+JGmY/7tL5fmjzJcqg4J5xWjZzDYikde9QQRFhntnNOZRDkL6cVrVcZ+6fMSldmxGwU5B4FSC6YYXB+Y8H0rGju2XCk9KmW93HHNedPBu+quWqjRqs5kPBxgfrT02BdzkFqzEu2LYB5HJqXdkEuRXPLDNabFqr1KF6R9tcjpVezONUhPYuKfM5ExJ5plsf+JnB/vZr14q1Jry/Qzg9To3X942B34NNYZIKnae4qxEwl3r3U4qF1DOWHDEd6+fhPWz6HTKOl0S6eG/trTMtx9sh/9DFew149pyt/bem7iMfa4fz3ivYa9TDfwz1cr+GQVyfxCBPh+IDr9pX/ANBausrlPiCCfD8QXr9pXH/fLVrPY68b/u8/Q8xdcL8xzxWLMcSMK25VxHluWA7ViT8SjFb4PqfL0/iLFhy0n0r0f4RY/szX8dP7V/8AbeCvOtOHzS4/u16J8IP+QVr3/YU/9t4KK3xv5Hp5Z/vMvT/I9FooorI98KKKKACiiigAooooAKKKKACvCfEg/wCLheJvT7VD/wCk0Ne7V4X4hx/wsTxKD/z9Q/8ApNDTTsedmn+7v1RctpGCgL/FWdrJUyHHpVu3mWFtrHk/dzVHU0/fck1w4aFsRc8CUrwSKNt/r4/TcP511lu5ETg1ylvgTx89HH866SLiV3z1GMVWaRUrIqhK0rnpnhTP/CNWmevz/wDobVs1jeFDnw1aH/f/APQ2rZropfAvQ+pofwo+i/IK8xwkURbvXp1eX+WEy7McdcHpXjZzb3Lvv89jLE30t5jYoZJGPmDCEZ4NMvZRGvlx/exxT5LnehSIAn29KqyBbdGeRuSc89q8ylCUpqU/kjzqklGLjD5szJLbc+6QnJ5PNI0UeO9R3Essz/Ivy+oqtLBMFz83519JThJpc0rHjyavoilc4MxH4VchOLaA98Vn3GTICeCKvKCtvbnsRivQqr3Ir+tiX8J33wx/1mr/APbH/wBnr0KvPPhdndq+f+mP/s9eh1nLc+ny3/dY/P8ANhXnfj/H9rJnp9mT/wBDevRK898dqG1hATj/AEZP/Q3rCu7Qv5r8xZj/AAGcUzbiFTrUysIAR/EalPlx8DBY9OKaAE+aTjnjNc7mpK1tD5zlsxEid/nl4x0waoS2pllLnoORg1baWWeQqq/u+oYUy4kyCkQyQMVpSc4y9fwFK3Qzlby2fH4VOnCrn6mnGyMahznJHSmRklmJHFdjlGavEzZZZcJjtg1hzEeZIfTIroYyHh45+WufuE2mQ+rZp4OXvNM2ppKR20Tf6HgdSi/qAKozr5SA984NXrcgRqe2xf5Cq1wQWA96+eoO1Ro6aqvBMzpGwx9hSMCtud3XrS3CNkMozuP6U+Xa+OeGH8q9ZPRHFbchtgVABHynrU7wDblSaLcIEEZPJ6U827ImAxPOaidT397FKOhVkjwue3Ab6Csm6UidvSt0oeuO3Ssa8H7x8+tduEneQQ0kdb8Kf+Rnuv8Arzb/ANDSvX68h+FYx4nuv+vNv/Q0r16tK/xn02X/AMEKKKKxO0+cdMH/ABMUHsf5Gt5MG3x3HSsPTMf2lEfUEfoa18F1ZemCcYpY5XqL0X6nyFR63NjwWwPjCxA/vS/+i2r12vH/AAUu3xlY89TIP/Ib17BTSVtD2so/gP1/RBXnXxTXeNJX187/ANkr0WvO/ijnfpGBk/vv/ZKqG5vmX+7S+X5o4iJ0ijVD1NROiCQxnOWGal8pSo+bl8D6U8ICo3cPjArHmSd0fLFIWhBOOlIIXVuRwatqHVsY4HenDcWwV47Vp7aQFdEkU9BTuWJ3damKscfL061H5Hy7mJBPap509WFimxO4H+Gls/8AkIR/71OcEMRj5R3pLfi9gx3eulu8H6FQ3OhtGy7t+VSFUlfPOcVVico7465OBVpdr98EdhXz1WPLLmOuDvGxJYRbdZ00+l5D/wChivXq8msEC6rp/JP+lw/+jFr1mvQwcuan8z18tVlL5BXLePRu0KAf9PK/+gtXU1zHjpd+jW4/6eR/6A9bVXaDZ1YxXoSPMpY1Gc5yawrhQsjfWumkWOJcE5J6ZrnrlcSyD/aNXgKnM2fL25ZEmn5/eY64r0T4R8aZr/8A2Ff/AG3grz3S+TNnslei/CgAWPiADp/av/ttBWlV/vGj0srX+0S9P8j0GiiioPfCiiigAooooAKKKKACiiigArwrxF/yUPxN6/aof/SaGvda8J8RZPxG8SAf8/UP/pNDVLqedmn+7v1RbjjThnxkAdaydTLeaMk5wTWqkbyN97C8VmakQZixU8ZFceEf77e58/LZFGAkypxg7x/OumikAOMdTjNc5EQJ4v8AeFbZyAxHGDWuPipNIIys7o9V8Jc+GLM/7/8A6G1bVYfhD/kVbLPo/wD6G1blXD4UfV4f+DD0X5BXlnzyEqcha9TryoSvINiqynnk15ObJ+415/oYYxr3U/MczxRDKgFhxxWdcxSXMiyMxVR1U9600tlXDSYPc/WqF1K0jeXGCOSM152Ekuf3N+7PNxEXy+99xRlmhiO1dtVXvVZSMD86nayUMC+CaiktIFBwB+de7T9j5s8yXMZU+GfOe+atjJtbc/7PSqMw2ylR36e2K0D/AMeMHrgV6NXRR/roQ/hO6+FvXV8nvD/7PXoled/C3/mLH3h/9nr0SonufT5b/usfn+bCvP8Ax0m/WEAOP9GT/wBDevQK8/8AHKu2tR7Dg/Zk/wDQ3rlxOlP7h5h/BOVKLGNzEEjpmosGc5YbQvY1MLdsfvmDDNROskjfuztHQ1yQab3+Z87JPsNMqp8sYBI9KeEjhQs2Nx55pywRw5YgbqYltJMzNI2VJ4B7U+aD62X5is+2pVuLhpMBVOKr52ybccVoXUUccWFwDkfzrNdjvLV20HGUfdWhjNNPUuphQSDgDt7Vg3OW3r0yOtbkQ3RSMT8u2secKXJHTmujCaTZcHZpnUwTD7NGM9EXn8qkkjDAkHkjIrLRmQInqoOa0omJ2qT8wGM14tal7N80TeE+bRlMAuMkYKg8VHNGQq8YPWrvlEXA54I6UssQkfI6bSK0VdKS7EOm2itDEHRjnDdAfSnlZEXHLYHWrFtaloc8Bs8GpXgkRTnJ79KxniI87VzSNF8t7FFo2IDc89qxr9MTSDHeumMRKocY3YwPQ1j6rEqTS12YHEJ1LGdSm4e8bXwr/wCRouv+vNv/AENK9eryP4WjHii6H/Tm3/oaV65Xp1/jPocu/gIKKKKxO4+c9LGdQiPpn+RrWwzFiMjBP41kaST/AGjGM9M/+gmtcBy5KnC5IxRi/wCL8l+bPj6u5seC/m8X2B/66H/yG1evV5H4JTb4rss9RvH/AJDavXKSdz28o/gP1/RBXnXxSYI2kk+k3/tOvRa84+KuDJo4IyD53/tOtIK8jozH/dpfL80cRGd5689RU5If5s4PpVa0QuGccbTU4G7BU4OawqJKVux8shELbgME+9TB+SNnToaaiupAOSRyTT92SRsNZSabGhhclQduD1NMCtIAxJXPanliVyAR3NRt5jgMrbR6VUV8hMqyMRIy7flx1ptmP9MiJ7NxT3f94U2mmWhBvIx/tZrs/wCXb9AjuaYciV/XccCrYYHvtNU0XdIT3Bq1hSeRj1JryaqWhtBsuaeD/bOm/vM4u4eP+BrXr1eQ6cB/bOnYI/4+4f8A0Na9erowv8M9zLPhkFcx4640SH/r4X/0Fq6euY8d/wDIDh/6+F/9Bata3wM7MX/Al6HnT7cYYhj1rBvTiVsdya3ZgqqTjJx2rBumDSMcc54owG9z5Z/EiTS2y0/+7j+dejfCf/jw8Qf9hX/22grzjTEJWUr2AY+/WvRfhHn+zdfz1/tX/wBt4K3qpe1kz0ss/wB5l6f5HodFFFQe+FFFFABUVzbxXdrNbTKWimRo3AYqSpGDyORx6VLUN5aQ39lPZ3CloJ42ikVWKkqwwQCCCOD1BzQBwuh2NlH4lvNS8K2MdtpFpZy28rQDbFfXO5SNqjhtm1gX7lyMnBrJ0aytbDSvAevWRJ1fVZ4lvrjcS92JYHeUSf3sMMjP3dvGK7vRvCWk6A8Z04X0axx+WkUmo3Esar6BHcqOnpxRYeEdC0zU/wC0LSwEdwpcpmV2SIv94xoSVTPfaBmgDz+G1hXwtYeLAD/wkcuuIj3G4+Y+688loD/sCMldvQbc9aNXtYZ/DvjDxPKCdd07UZls7ncd8HlFRFGnorDGV/i3nOc16AvhHQk1j+1VsQLvzTOP3r+WJSMGQR52B/8Aaxn3ouvCOhXmrf2ncWIe5LpI371wkjpjazxg7GYYGCQSMD0oA0NRk1CKzLaZa21xc5GI7m4aFMd/mVHP6V4bqb3knjjxA+oQQQXf2uLfHBMZUX/R4sYYqpPGOw/rXvteFeIf+Si+JR/09Q/+k8NNdTzs0/3d+qNaO2kdUEeADy2fSs7WbceaQO1dBaukMe89SoH6ViasHUAEDe4JI/nXz2BrzlirdF/T+48vEUoxoprcwYUxIhP98fzrbQFi6+prIhOZ4h/tD+dbONrnHbmvaxj1SOGGup6j4Q/5Fe0A6AyD/wAiNW3WJ4ROfDFofeT/ANGNW3WkPhR9bh/4MPRfkFeZb1VMgHNem157HFHGhc/WvAz+pGCp38/0HUhKbVinHbzyM4lKlD0xVe68qzjY4PFXzdbztiwTnn6VDPY7yJpM5HavHo1mqi9touyOKrSTh+61fc5qVrieXcuNmOMioDbXOCWK1uXEkcT7R1+lUWvI33KOo68V9NRxE5RThDQ8OpSjFvmlqc9Mp3YPUEg1fIJtbcjoVqrfgCZiOjDNaEKAafbD/Zr1a0/chL+tjG14nafC4YOrfWH/ANnr0OvPfhiu19XHvD/7PXoVKTuz6bLf91j8/wA2FcD43YrrKEdfsyf+hPXfVwXjYhdYRv8Ap2T/ANCeuTGfwvuKx38E5ZUllf5iNvemSSpDJ5aA7iM1OGlbhFHvTDHHbMxYnLHPNedGSvZ/cjwWtLr7yJLeSUkykbc54p01yceXFnOMcilEskoAjANWvLjgj56051OVrnV+yCMLr3X8zJNrK26SXGB0xVBmBZhWxLc+Ydi9DxWaYNs7D+E9K9HD1JNPn0OWpFL4R8eTaH+6wrFbjK+groVH7javQLWFcIVd/TBrswkrykVHRnQywN5cbDoFWrES5bcvXIPP0qxbAPCM9kX+QpgYdV65xXgus5Jw7HSqaVpCR/LJ5j9Q5UY9KldPKBA+n+FKT8hx1JJp6ktCS33jiuaU3e5tGKtYlt4ZDAjDHHWrBfpkHjrxTrUslqnHHf8AOrSPG2PfjpXjV67522r6s9SjRTirO2iKbx87uxrmNZXNy6+tdfOAGGOlcprJKzyMB8uCD+VerkdRyrfI4cyhyx+ZrfDH/kaLr0+xt/6Gles15N8MT/xU9yP+nNv/AENK9Zr6yr8R25Z/u6Ciiisz0D500lc6rF9D/I1uF/LDNg4BIP51jaOD/a0J7EH+Rrp1VQhOOMnP51zZlUUayur6L82fKcvN1JvB4YeMbDPQ+Z/6LavW68v8LLnxZZMOxk/9FtXqFaUZ80LnsZUrUZLz/RBXnXxSA36Rn0m/9p16LXnnxRj8w6UPQTf+yVvF2eprmX+6y+X5o4i3f9yCvY4NSxx7nyv1qGHEMQJ7nJq6kbBg6DIrkrS5W7dT5mKuKj4Ow9e9KZUJIweKeXGeetRmWMnbnnp0rlSu72NdupB5qlNyg881G29x8uMVPuUnC0xhIVygFdEWl0MmUXdRIVINR2i/8TBB/tZp0pxcFT60tsuNQhI/vV37U36Ex3NWOIl5dv3gTt+tTsFbKODnHNPgibdIcc5JWnsQOH4OO1eFOreR1xhaIaaqLrmmYzn7XCP/AB8V7DXkOnBf7a07bz/pcP8A6GK9er0cM70z1srVoyCuY8dKH0SEHp9oX/0Fq6eub8agNo0Oen2gf+gtV13am2duKV6MkecyBFGAD0rm77/XyFemeK6mQoBgVzl8MSN6Z5qctl7zPmJe7JE+grue4Hbb/jXf/CUg6f4gI/6Cv/tvBXCeHB5ks2P7o/rXd/CYMNP18MMN/anP/gNBWs3/ALRNeh6eXL9835P9D0KiiiqPcCiiigAooooAKKKKACiiigArwzX1z8RfEvtdQ/8ApNDXudeF+ICf+FjeJlHGbqHn/t3hp9Gedmn+7v1RuWMgkiO/jaazdXuATvABO3Aq1HE5ZSrlR3HrWdrBUSKFxjBrwcJSg8Vdf8MeTWnL2NmZEbYuYsf3x/OtrO4lB1OR+uaxYsfaIv8AfH862lG1898mvYxlro44bHqXhD/kV7PH/TT/ANDatysPwf8A8irZf8D/APQ2rcq4fCj63DfwYei/IK8+W2KxjLNjrzXoNeetJI8e0Ky5HWvnOIub90ovv+hpLl3khjSwW4LsVAHGabczG5iCxcj2qI2u4AStuXHIPc1MkkMOFG3NeFywi1KN5SX3HNzTknGVoxf3mXLYZlLsxye1IbKIA4xn6U6Z7iWT5UcAfrUTwXJbduYD0xXtwlUsuaaR5Mowu+WNzA1SFRIVzyKuQrm2tx229ar6mjmXODnvV+yj32sA77c49a9urUth4Nv+rHnxjeTSOr+GoxNrA94f/Z67+uJ8Aw+TdasPUQn/ANDrtq6ac1OCkj6TL044eKfn+bCuF8Z4Gsxs3T7Og/8AHmruq4nxeFbWUDDINuvX/eauXHu1Bv0LxivTsc0sqkHywCaj8gu2ZOMetWisUakqq59BVCWSWcYUNGc149FuTfJou7PGqWive1J/NhicKNuc1CbOSclmdlBIOKSKEb13kE571NLcSAYjjYjHUVrrB2pvXuyNJL39hJIIolySM/Ssd/kLgnoTz+Nai2dw53yOxHXaRVG6jUSlePvc114SSTcea7MK8Xa9rCxnFpxywHIrBnOGYDmt1iEiZgccYrBcEPg8kd69TBLWTMuqOm5iUtuI3IvH4ColLdycZ3VLMQViHqo/lUZUmA4PP/168yG131NJb6E4n3NtPGBUscjFiCMdQKplwFL452g1I0uHVgOKzlST0SKjO3U37SdPKAOMGpWCHkNj0qlaGPygDj2p0qNklZMDHFfOVKMfauzse3Cq/ZK6uE8pTCk8DvXL6jIWmmz031uzMQoVzkjjJ/irm9Qb9/L/AL1fRZRRUZHkY2o5WR0nww/5Gm5/682/9DSvW68k+F//ACNFzz/y5t/6Glet171b4j2ct/3dBRRRWR3nzvox/wCJpBn/AGv5GuyhEf2faSMkk/rXFaVn+04scYDfyNddbqTCSX5PT2rzs7jeone2i/NnzVF2lsavh0geLLNVAx8/P/bNq9IrzHwwceK7BScn95/6LavTq2wUeWkketlzvTk/P9EFeffE/J/ssDv53/sleg15/wDE372lDuRN/wCyV1oeZf7rL5fmjiYChky2MElQKu4eKTCrlMZzVK1QGI7sZzkZ9auJIwG0qW7Zrz6/xaHzlPYcZFJGQM96jbYT2zUhAJHydacI0PGB7VgpKJo02QgqG4Az3FI5fHyJkVZ2oDkKCT1pGJXhYyR7UKprsHJoYNyP9JbIqS2jxdwkHPOc1PPGGuCdlRxHbeRqPXivV9pzU7Lt+hzfaOjgi/dZzzUT/wC0oBqa0DeUqlvmXqfWnyQsGIKE+9fMe05ajUmetyXgmiHTgP7Z0/H/AD9Q/wDoa163XllnHt1XTzsx/pUP/oYr1OvcwE+alddzvy+PKpIK53xmM6PF/wBdx/6C1dFWD4sAOlR5H/LYf+gtWmLly0JM7K6vTaPPZEJGQuR61zGo7hK6gZyxrs3+7gR9a5bVoCl4wxwcmuTKa/NUaZ83iYcjTJfCyAXFwPRP8a7n4V/8eniL/sK/+20FcP4fYpPckDHy/wCNdx8Kjmz8Q/8AYV/9toK7Wn9cqPyR35bK8/k/0O/ooorc9oKKKKACiimySLFG0jnCICzH0AoAdRWTpPibSNchu5rC6Lx2jbJzLE8XlnaG53gcbSDnpg0zR/Fei69cNb6deGWUR+aFeF498ecb03qN65/iXI5HNAGzRWJp/i7QtV1H7BZX4knbf5eYnVJdpw3luQFkx32k4oi8XaFNrH9lJfg3XmtAP3TiNpVGTGJMbC4wcqDng8UAbdeE+JG2/ETxKfS7h/8ASaGvdq8I8SYPxF8Sg97qH/0nhqonnZp/u79Ua+91O1T2FY+sqyMgz65rZ89Yn+bOdorF1WbeN/ua8jAKXtk7aHi1muW19TOt+biIHqHH866CICSRhkZUH+dc/bn/AEqL/eFbFuhjndx/GTmvRx0b/cYQdmeq+DTnwrZn3k/9GNW7WH4PAHhazA/2/wD0Nq3KqPwo+sw38GHovyCvPprgJCMKSQB0r0GuAk8tIw3oBXzfEPLzUbq+/wChrK/K7OxRKzyAgNjPPIp6WacNJgsO9K14oHy5z9KpXMt7JInlFPLyd2a8unCtP3VaCPOnKlDV3kzU3xJHxj86rNdRkHHP41BFZngnr9akXTYo0IUHk561CpYem3zSbZbqV5r3YpGNqXlhtwH3xmn6ccRQk9l4qtq6rGpLZ/dk4qW1k8tIAehXFfSuF8Kktf8AhjxL2rNs7nwW4e+1THZYf/Z66+uH+H5zeauf+uP/ALPXcV6WGhyUYx8j6DBS5qKfr+bCuQ8TRLLrADY/490/9Cauvrk/ESF9ZXHX7Ov/AKE1cOcy5cJJp22/M2qR5rK1zBe0iQbsDIrMuC3nbUB5rdNrv+/0+tU5xHBIRzXzODxfvW+JnFisN7t/hRlpaTmQEuMVqKkUC44H41Te+XzMLnjrxVkWZmBMnf0NdWJlOSTrPlRzUIxTapq7I2vFLhMEZOKxL3YLqT3NdBNYx5Vucj3rmNWRvtTYxtzXdlLpTqfu9NDmx6qRj749k3ROvUbc1iTf64Vt2vmNC5bHXH4Vizj96R6E19DhHaconn9mbk7lVhIPylenrUQueQACAc1a8ndHAD/CvH5VE8S7iP4hxXDCULWZUlLcjSQOCP8AZqwSAQB0qqEEUi4/h4NPhLMcfnVTit1sTF9Ddto1aBDwGHSnSJMDw4xUUSsIAy43YpDLPsGcZ714DjJ1G01v1PWUkoJNDH+YAPyV71zeonFxMPU5ro3k3Lk9Rwa53UMedJ7mvbyxNTdzgxDWh0nwt/5Ge6/682/9DSvXK8l+F4H/AAlFyR/z5MP/AB9K9ar1q3xHu5a70EFFFFZHefPGkkf2rEPUH+RrqY0ZoyVYDniuU0sgarCfXI/8dNdMrS4/d4xnvXFmsW6qt2X5s+WhZbl/wtkeMLEMcndIP/Ib16pXlfhQH/hMLPf97MmMf9c2r1St6HwI9fK/4UvX9EFef/E0hW0pj287/wBkr0CvPPikNw0odv32f/HK3SuzTMv91l8vzRylpEssOe4ORVpI2U4PP0qppwMkCbeuO9bFu+3CyfePpXh4ypKEmlr5HiUIKSV9CIIuAdh54qTyEyPl6VoIkbDoeaeYI8d+K8WWOs7anpxwd1fQzBGi87etHGdoQ1eaKNW780B407Gn9aurpNi+r20bsc/dBI5JARyP8KyYX/0+L/erV1Uq8k49SMfpWVGALuADsa+pwetG73a/Q8WrpUsjprQktlTj5q1AwIwQSaxrMPsOzG8DvWuJxGhZ8/KMnAr5bMIfvNNT2sFP3NdB1uA19ZMFIxdQ9f8ArotejV57DOj3diFz81zCRx/trXoVe1kbk8PLmVtf0R6FJJSdnfb9QrD8VY/suPPTzh/Jq3KwfFrBdJjJ6CYf+gtXfjlfDzXkXVdoNnGyTquMKea5nWLkNcPweK35Zhs47jNcpfM08jkdmINefk9BKo5NHzuNqc1o3LeguGec/wCx/jXcfCs5tPEOP+gr/wC20FcBozeWs+f7mP513vwn/wCPDxB/2Ff/AG2gr15RtiJv0OjK2va2XZ/mj0GiiitD3gooooAKRmVFLMQFUZJPYUtFAHkp1nTfEQ+I+m6LqlpdX2ox/wChxQTKzTgWUanbg88gr9a0zqNp4q1/RB4dlDmy026FwyAj7N5iIqRv/dbcM7eo2E16PRQB5Zo1/aanZ+AdGsARqelSRtfQBSHs1jtpI5BJ/dyzBRn72c1Xs7mF/DOi+FFJ/wCEhttajea22nzIgl0ZXmP+wUBIbodwHevW6KAKmox6hLaFdMuba2ucjElzA0yY7jaHQ/rXhmqJeReN/EI1G4gnuluot8kEJiRv9HhxhSzEcY7n+le/V4N4n5+IniMA/wDL3Fx/27Q1UVdnnZp/u79UbCxqfnY9VH8qwtR4kYe+fwro7aAzwISSPlH8qydahAJZVzyBXj4CvFYjkb1PGr03yKfQyIV/0iM/7QrYgVt7buOeKyE5uI1/2x/OtqMM02MY5P8AKvRxj0+RzQ3PUfB//IrWfsZB/wCRGrcrD8HgjwtZg9cyf+jGrcq4/Cj63DfwYei/IK4CG0UgOWbkZIrv68/XzGAAyAR1r5viNytT5Xbf9DW0brmVx8ixJnp+VZtxcAFVjALHOBWnHZEtlnJ+tWPskCAFguR3Ir5qliKdJ3fvBUw1WstPdMIG6xnyxn60x3vQh/dr+dbMssCNjK1VnuoAuNy5PQV2UsVKbVqX5nHVw0YJ/vDk9S3yH5x83cVLbruWFu4XgVNqB3yNsQGi1haWGEnKnaCcdq+q9qvq8b6f8MfP8j9q0tTqvhyP3+seuYs/+P13lcX4Bi8qXVP9oxH/ANCrtK9GlJSgmj6HAK2HivX82Fcr4h3/ANsjYMn7On/oTV1Vctr0qxa0M4ybdcD/AIE1eZnl/qUrK+35nW7cyu7GWFnI+ZcVnSW6/aWZic1qtcMRhUyaxr2OZpCMMuR1r5LAczm1dRuYY3lUE1eROBEo6DP0prXEhU+WoNVLe2cOu6Rm571snyY1/hroxDhRkl8bOegpVYt/CjHZrxyPMjAXuQa5+/8Amlcsed2K6+W6hZdqspJ4rlNSGWf5cZevbyeo5Td4cp5mYQUUrSuN87bEVHQ1kS4Zi2eorShyUkBXpyKzmGDyO3Svo8OlGTseam9DoxgMpB58ocVHKwCoD95jSFyDGCMZjFMJZ5GVlxtI2n14ryow1uzZsfPEWWXA+lJAMpjHzDI/GnSFlUJ1JODUluB5hyMDOfxNDk1TBJORoIkghXaM8UwmYDlBVqNZNqlULKRnNOYNg5jrw/bWk9Ez1PZXXUzpRlcjqOtc7fH95Ie4yK6uWNTnnB6kVzWpRYmcjoRj8a9vK6qcrHn4qDjZnQ/C0Y8T3X/Xm3/oaV63XkvwwyPFFyMcfYm/9DSvWq9mr8R7mW/wEFFFFZHefOuk5OpQZHPP8jXTIJUU7VBGT1rndKGdSi49f5GusiV1jHyZXk5rhzepy1F6f5ny1KPOy14VVx4ssTIMM2/j/tm1eoV5r4dYt4tsPlx9/n/tm1elVrhJOVJNns5ckqcku/6IK8++J6hhpY9RN/7JXoNeffE4HOk49Zv/AGSupFZl/usvl+aOM0wyJsdRkHrXRwSRuRnqfauc08sij5cqec+ldRYiMwqTjJ6V85nLSbk19x5WXJt2TLCohHU80eSuPvHipPLU9GoWEYHznivl3U63PfVPpYjMaAkknmo5PLVetWxEvdqjmjjCnJFOFZOSTbFOk1FuyOR1IL5zcnr/AErOtgBdRnPetq+iR5HGQOazPI8u6iANfe4SqnR5fL9D5OtFqo35m/aR88dQa0xHJuwUBXFQWEIcZzg1eDshK7cgd6+Pxtduo1HofQ4SilBOQ2FT9uscgDF1F/6GK9CrgI3JvbIbcZuYv/Q1rv6+gyBt4Z37/ojrgkpu3kFYXitS2lRgDJ84f+gtW7WP4k/5B0fGf3o/ka78xly4Wb8i5rmi0cDcxybOFFczeFo95KgZbmuznV3ThK5fU7Z342kZOa8zJsQm7SsfO5hS5Wmino53PdZ7Jn+dd38JhjT/ABAP+or/AO20FcTo0JSS5B/iTH867n4V/wDHn4h/7Co/9JoK9xyTxE0vL8jbK0va3XZ/mjv6KKK0PeCiiigAooooAKKKKACiiigArwnxCnmfEvxEvrdRf+k0Ne7V4dreB8TfER7/AGqL/wBJoad7Jtdmefmf+7v1R0trav8AZodh/hwcCqGqaa7NjPHU8VuaXMI7Ub+cgVX1O5XdkKa+DoYqvDFtR7sdbDUXhVJvXQ4l7NobmMn++O3vWkjkTv8AKeDUd3Nvu4lwcZB/WnGdVuHHpX1s5zqRXMtbHzySi9Gep+Eju8MWZ9d//obVtVieEDnwvZn/AH//AENq267IfCj6rD/wYei/IK85+2MoCiNjgda9GrzmOZNitg9BXz+fxTVO8b7/AKDqSaatKxMLmU4AVhTJI7qcjbMyDvTvtSqM7TUDasqOF8uT8q+ZhSqt3pQ/X8wnVpWtUmWFsGJBdwx9SKc2lROMkKSOelQ/2gzdA35Ug1R0faVc59BRyYxvRj58Gt0Zl/a+RIQOcnHFLpcO9YlPXbUt5c+fLjY2cZzipdO2RylyCNoxXrzrVVg/f+I8qNOm8V7vw3Ol8JIsdzfqoA+WL+b11Fcr4PkEt1qJH92L+b11VfSZcmsLBS3senQacLrbX8wrkvEe0a0rMAcW64/76autrkfEgB1xQ3T7On/oT1hnP+5y+X5lVOnqUElQHoKq30m5sKpzjtUwWIc8fnTJHRSCASenFfFUUo1FJJsKrlKnyt2KEUE7TxneVUHkY61oLZOVw8m76io2nxgBWz9KX+0Cy5CsPwrprTxFWzikjnpRoU7qTuOOnRgZAXI9q5zUrYhm56NxW+tzK+euPpWJqDSSXUi84GD09q9DKnXjWfPI4sw9i6acFYzseXH0zkgGsuU7mJx2rUkkMayblPHtWUxzjg9K+vwyerZ4bOiu49kEEoGThQcelJhS+RgEGnXU37iGMcHYKpLK248/WvKowlKnr5nRUlFS0J58sGZW6Hip4EyQc8ZFU0cgAepqxvkCbY2w3AFOpB8vKhQkr3Z09sQkQVhT28ruBSWxRoIxJjdjBqRoIWBBAwfevh6kkqjcr7n1sIt01y2IXtYnBdVXPQ1zGq6e4dgD1ct07eldWSId2Pu+grF1K5VrgoAcjB/WvWyivWjW93VHnZlSpOnd6Mm+HERj8T3HH/Lm3/oaV6lXm3gHnxPden2Zsf8AfSV6TX2yk5JNjyvTD/NhRRRTPRPnzSsf2nAO+T/6Ca7uIJ9nUEDpzXC6UoGrQN35/ka761jQ2678bjz1r57iWSjOD8v8zw8rjzXSH6GijxRYFVxzJz/wBq9BriNIRF8RWe31f/0Bq7eu3J6nPhU/M9ShDk5l5/ogrgfiUCW0oD/pt/7JXfVw/wAQ03vpnsJc/wDjlelOXLFsyzFXw0l6fmjkLWExwAbCQR6VsWyqsCjIB7UlgqtZxBh/CDVhoUJBHGBgV8fjMV7Sbg9NTkw2HcIqa10Abs/6ynAScfvO9M8sjuKTDj+IVwNJ7NHWm10JwHycycYp4QMMMwNVhvz94YqRA3dhWM4Napm0Jp6NGXe26mdwMDnNZUkLJdxtnKlttdDLAJJGY4yaz7y2aMKwIwGzX0GBxasoN9DxcVh3dyS6mxbx7YQVOCKc0hB6E0xN5iVkOMihi4714DXNNtnsp8sEkSQS7r6yBU83MX/oYr0KvOrdj/aNiD3uYv8A0MV6LX1mRxUcO7d/8h0pXlL5BWL4mbZpsZxn98P/AEFq2qw/FOf7Mix/z2H/AKC1d2YK+FmvI1m7Rucv5pZfunmsLUmIlK7TxW0jsKzb+MyMz18tl3LTrbaHmY686W+pkaWC88nGPlzXZfCz/j08Q/8AYVH/AKTQVylopjuJjnqhxXW/DDHkeIsf9BQf+k0FfUUpc1eXojDKlaXyf5o7yiiiu09wKKKKACiiigAooooAKKK5vV/GljpF7dW7WWoXS2Uay301rEHS1RhkF8sCeBuwoYgc4oA6SvCvESkfEfxI46/aoh/5LQ17nHIk0SSRsHRwGVlOQQehFeGeI5FX4h+JQf8An7hP/ktDTV+h52af7u/VHY2IR7SMEngU67ij2FvWud+3mFAobqBjmo7vUi1t5ZbHcHNfGrKK0q3OpaNmP9pUlS5HHWxau4oxJ78GspkU3Lvznoaha+aZsk8jA/WnPJt3EdSM19FQw9SjHlk9TxqtWM3dI9a8GsH8KWTDod//AKG1btc/4IOfB9gfXzP/AEY1dBXclZWPqcN/Ah6L8grzhII8Dr+dej14udUbewB6EivJzXC1cRyKm7Wv+hjjMRCjyuave504jjApP3SmuVfVm2cNz9TVVtRYyffP5mvJhkNeXxSOKWbU18MTtw8Qp6yRVxA1JghbecD3qZdRbap3HketRPh6ovtFxzlfynYtFE/zVTKp5r4/zisSLVyqBS3JBPWmjUdreYx9fyqKWUYiDab9B1Myozs0jtPA+fP1PPX91n/x+uxrh/h/KJbjVmByD5J/9DruK+tw8XGlFM68C70E15/mwrkfEcYl1wK3/Psh/wDHnrrq4Txjd/ZddQjHNqn/AKG9cuZ0p1cNKEN9PzNMRUjTipT2uQfZIx69c9akt7WJHkbnLEE8+2KwP7ZJHUc0Lqx4GepxXy0srxji02cscwwsZJqJ1YjiAzVZWiGa5t9WIRju6e9Z76k28Zcj8TTo5BWnfmkOrnNJW5YnaebEDWVeNGt1urE/tBs/MxHHrVS7viJFy3b1ruweSTp1L36HHic0VWFuU072SLJrLlCAfWqTzF3T5ict+lSyzrkjPQ4r6KjhHSSje55FSpzu9i9MxkEUw+7jApByhI+9mp5pIzYDafu1ntcBWXB4IrKlFzjothz0ZPGSQwboeBU9qwMm4/dOaqRXKmIA4yGqGG9VCgzyW4rSVGU1JWFGVmjujbRyCKTJyo459qeIAo4J/OufTV2RArYBxkUra4UIVsAkZFfJSyrGN2T0PfjmGG3a1NqY+XgHoeBWDfgeZu7Hcf0p66zvdvM2hV5zWdfXyeds3etell+ArUqlpI4sZiqdWPunU/D5SPEFyx6G2bH/AH0lek15l8OrlZtfnRTyLVif++0r02vooJpWkejln+7r1YUUUVR6B4Bpa41GNuwB/ka7iGJZESTJztx1rhraVYrlTmtyDV9kQAIIU4NePnWErYiUZU+h8zgMTCldVNjr9GgWPXbRxnOWHX/Yau1rzjwxqRuvE1pDxj5z/wCONXo9a5TRq0cPy1d7/wCR72Gq06sXKntf/IK4rx8R5mmg9xKP/QK7WvP/AIlyeXLpJ7Ym/wDZK9CpDni4meYO2Gk/T80MtbSM2US8/KB3p0tsABjsPWsK01rMCjI3AYI9xTZ/EJWPeApGK+LeV451XbuYrH4VU0mtbGx5bZx2pfKaueTxBKUDFEyaF8RsxxtXNdLyjG9kYLH4fzOg8o0ojNc+/iCQdFTGKiXxJK4yqJjHvQsnxsleyD6/h13OmETNyetQ3FuzIR2rHTxA5bG1cVN/bXmAqNuahZdjKc07DeMw04tGuqSLbLsxnb39aHEvOMdOPrWR/bJhKpIFAJAFSf20pBzt4OKzeX4lO/KmX9coNWu0alqJf7Ts92Nv2iL894r0avMdKv1udTtkBHFxEf8Ax9a9Or6DKKc6dKUZqzv+iOrCzjO8ou4VheK939lR7MbvOGM/7rVu1z3jK4Fto0cp7TqPzBrsxkXKhKMVds3rNRpts5YCbj7v3f1qCaF5Ijuxu9qpNrkah8kfKQD+NI+rgsV+Xgc18zDBYqMrqNjyZ4mg42bGLEVuCW9K6n4bII18SIOg1Uf+ksFcM2qB5mIxgKcmu0+Fk32i18Qy+uqD9LaAV9BhqNWFTmntYMsnB1ZJdv8AI76iiiu89oKKKKACiiigAooooAK891Q32kX/AIvt10i/vW1pVksXtoDIjOYFhKOw4jwUzlsDDV6FRQBjW2gRt4W0/R7ya4/0a3iiZ7a5kgYsigffQq2OPWsaT4W+FJrmW4lttQknlIaSR9VuizEAAZPmZPAA59BXZUUCaT0Zxx+F3hU9bfUDj/qK3X/xykPwt8KN1tr8/XVbr/45XZUUWJ9nDsjjB8KvCQ6Wl9/4NLr/AOOUv/CrfCh/5dr/AP8ABrdf/HK7KigPZQ7I5iDwDoltCsNvLrMUS/dSPWrxVH0AlqT/AIQjSv8An61z/wAHl5/8dro6KC0raI5z/hCNK/5+tc/8Hl5/8dqh/wAKt8KZz9mv8/8AYVuv/jldlRQS4xlujjP+FV+E/wDn1v8A/wAGl1/8co/4VV4S/wCfS+/8Gl1/8crqtQvoNM026v7pttvawvNK2M4VQST+QrA0nxVe3Wp2NnqujHTv7RgaeyYXIl3BQCUcBRsfawOBuHB54ouL2UOyMDQPhdocmnSnVLPUBP8AbLpVDalcL+6E7iLgSf8APMJz3781q/8ACrPCYxi1v+On/E0uv/jlTeJfGUmha9Z6VFBpm65tnn87UdS+yINrKu0Hy33Md2ccdDXT2zyyWsLzpGkzIpdI33qrY5AbA3DPfAz6CgPZQ7I5L/hVvhT/AJ9r/jj/AJCt1/8AHKD8LfChGDbagR/2Fbr/AOOV2VFAezh2Rylt8OvD9nu+ynVoN+N3laxdrnHTOJfc1Y/4QjSv+frXP/B5ef8Ax2ujooKSSVkc5/whGlf8/Wuf+Dy8/wDjtVp/hz4dunD3A1WZwNu6TWLtjj05k966yigGk9Gcb/wq7wr/AM+2of8Ag1uv/jlL/wAKu8K/8++of+DW6/8AjldjUF5LcQ2cslrbi5nVcpCZAm8+m49KVifZw7I4LxH8MdFj8Maq+k22pHUVtJWtQup3LEy7Ds4MmDzjrWgPhZ4UKgta3+cc/wDE0uv/AI5W14Y1ybX9LmubmyWznhu57WSFZvNAaKQoSG2rkEr6VQ1fxXeWmo39rpejHUV0yBZ71zciIqGBYJGCp3vtGcEqORzzTD2cOyK3/CrfCh/5dr//AMGt1/8AHKRvhX4SY5a0viffVLr/AOOV1djewalp9tfWzb7e5iWaJvVWAIP5GrFAezh2Rxf/AAqrwjx/od9x0/4mlz/8co/4VV4SPW0vvX/kKXP/AMcrtKKd2Hs4dkcd/wAKu8K7cfZ9Qx6f2rdf/HKb/wAKr8JHraX3H/UUuv8A45XZ0Utg9nDsjjP+FV+Eh0tL7/waXX/xyk/4VT4QyD9jvcjp/wATO54/8iV2lFO7D2cOyONPwu8Knrbah/4Nbr/45WX4g+GGipp8L6Zaai1z9stlbGpXLHyTOgl6yf8APPf9O3Nd1qc99bWRk06xS9ucgLE84iGO5LYOMfQ1W8Naz/wkXhnTtY+z/Z/tsCzeVv37MjOM4GfyFIPZw7Iwv+FW+FOf9Gv+ev8AxNbr/wCOUh+FfhJjlrS+J9Tql1/8cp0PjW5u/Ft5odva6Sv2S7W3b7Rqvl3Ei7EcukPlHcAH4+bkqeRXY0B7OHZHI2vw08NWUpktI9TgkI2lotXukJHpkSewq3/whGlf8/Wuf+Dy8/8AjtdHRQUklojnP+EI0r/n61z/AMHl5/8AHaP+EI0r/n61z/weXn/x2ujooGcX/wAKq8JZz9kvs+v9qXX/AMcpw+FnhQDAtb/B/wCopdf/AByuyooI9nDsjkYfhp4atphNBHqcUo6Omr3SsPxElW/+EI0r/n61z/weXn/x2ujrG1jUNbs5saZokN7CsXmPJLfCDnn5VG1snAHXaOevWgpRUdEjB1TwZGmo6Ktlc68bd7xlvca1dnEXkSkZzLx+8EfI/lmrl18OfD17s+1/2rcbM7fN1i7fbnrjMvHQVu6NqsGuaJY6rbK6wXkCTorjDAMMgH35rnbLxxNdS2F1JpBi0TUbo2tpffaAzsxJCM0e35VYrgHcTyMgZoBpNWY1fhb4UX7ttqA+mq3X/wAcpD8K/CZGDa35Hp/al1/8crs6KCfZw7I4z/hVfhIf8ul9/wCDS6/+OUn/AAqrwiDkWd9n/sKXP/xyu0oouHs4dkcZ/wAKr8Jf8+l9/wCDS6/+OUg+FPhFRxZ3w+mqXP8A8crtKKLh7OHZHGD4V+Ex0tb/AP8ABpdf/HKUfC3woDkW1+D/ANhW6/8AjldlRQHsodkcc3wu8Kvjdb6g2OmdVuj/AO1Ky5/hjow8UWMcVrqP9mNZ3DXB/tO5x5weHy8nzM52mX/IFdB4g8RatoUd7fHQo59Jso/NmuPtoWVkC5YpHtIOOeCyk447Vu3F0sOny3arvVIjKF6ZAGfwosHs4dkcxH8M/DMLh4otSRgQQy6tdAgjp/y0q5/whGlf8/Wuf+Dy8/8AjtV/Bvi258VwJdG30mO3e3WUraar9pmjZgCEkTyl2nBOeeCMY711dFilFLZHOf8ACEaV/wA/Wuf+Dy8/+O1DcfD7QruPy7mTWJkznbJrV2wz64MtdTRQDSejOMPwr8JnObW/Oev/ABNLrn/yJS/8Kt8KZz9m1DJ/6it1/wDHK7Kign2cOyOLHwq8IjOLS+56/wDE0uf/AI5W9oHhrS/DFrPbaVDJFFPL50nmTvKWfaFzlyT0UDr2rWooGoRWqQUUUUFBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGd4g0v+2/Dmp6Vv8AL+22stuH/ul1K5/DNctBb+I7/UtJvr3Q/szaJaTER/ao2+2XDRhAsZBO1MbuXweRxwa7qigDnNVur9o4gfCDai01sPMHnwbY2P3o3LsMj3UHPpVnwjpNzofhPTNMvJFkuLaAI5QkqP8AZBPJA6D2FbVFABRRRQAUUUUAFFFFABUN3NJBaSzQ20lzKillhjZQ0h9AWIAP1IFTUUAcN4Wk8Q6XpWrpN4WukuHvrm9gjku7cCUSzlgmVdsEK2TkY460/UrHXtM1rX59K0oahHrMMexxOkYt5ljMf7wMQSmApyuTwRiu2ooAz9B0z+xfD2m6UH8z7FaxW+/+9sQLn9K0KKKACiiigAooooAKKw5/GnhW1uJbe48S6NDPE5SSOS/iVkYHBBBbIIPGK0LHVLLUwz2M4uIgqsJowTE4boUfG1+n8JOO9AC6jdXFnZPPa6fNfyggC3hdFZuexdlXjr1rlvBP9vaP4N0fSrvw7PFcWghtZvMuocbMfNKpVmyF9DgnPFdpRQBw/iiw1PX4ZtJtvDX2d3uo3TVZJodkYVw3mqA3mb8LwNo574ruKKKACiiigAooooAKKKKACuO8Zf29e3dvpVpo17c6LLHuvprKeBJZeSPIHmSIVBHLMOcHAxkkdjRQBl2s91Eul28OivbWrxMJVaWMGzCqNibVJDZ6fKSBiuN07w/rqadoPhmfTRHZaRexztqPnoUmihYtGFQHeGJ2A5AAweTxXo1FABRRRQAUUUUAFFFFABRRRQBw3iVdc1PxCLOfw5fXnh222SBbWe2AvZeD+8EkqkIp/hx8xGTwMHqJru+8+WJdJMkH2Qyq7ToN8uT+5K9uP4unNaNFAHGWWn6hqHjHTtXbQf7Ft7G2mhkMksTSXG/btTETMNi7d3J64wOtdnRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHJ+JI0/4TPwb8i83lznjr/ostc94o1DUYV8bx2uoXNuYDpwt2jkI8jewDFR0Ge/r3r0iS1t5poZpYIpJYGLQuyAtGSCpKnsSCRx2NRS6Zp85nM1jbSG42edvhU+bt+7uyOcds9KAOIXRJ38bXug/29rY086ZFeY+3P5izGR03CTO4LhQdgO3PbtWHDruva/beE7Z5Nwu9GF3If7TfTzczAqD+8jRmJA52jH3snOK9YFrbi7N2IIhctGIzNsG8oCSF3dcZJOPeqk+g6Pc6dFp1xpNjLYw4EdtJbo0SY6YUjA/CgCn4RGpL4dgTVbqC5ukeRfNhn84FA5CgvtXcwGFJwMkGtyobW1t7G2S2tLeK3gjGEiiQIqj2A4FTUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAH//Z", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from speechbrain.augment.time_domain import DropFreq, DropChunk\n", + "from speechbrain.augment.augmenter import Augmenter\n", + "\n", + "freq_dropper = DropFreq()\n", + "chunk_dropper = DropChunk(drop_length_low=2000, drop_length_high=3000, drop_count_low=5, drop_count_high=10)\n", + "augment = Augmenter(parallel_augment=False, concat_original=False, min_augmentations=2, max_augmentations=2,\n", + " shuffle_augmentations=False, repeat_augment=1,augmentations=[freq_dropper, chunk_dropper])\n", + "\n", + "augmented_signal, lenghts = augment(clean, lengths=torch.tensor([1.0]))\n", + "\n", + "\n", + "plt.figure(1)\n", + "plt.title(\"Augmented Signal\")\n", + "plt.plot(augmented_signal.squeeze())\n", + "plt.show()\n", + "\n", + "plt.specgram(augmented_signal.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "\n", + "\n", + "Audio(augmented_signal,rate=16000)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IaCQ4R_VmpcW" + }, + "source": [ + "The `Augmenter` accepts augmentation techniques through the `augmentations` argument and combines them to generate the augmented output.\n", + "\n", + "Users have the flexibility to set various hyperparameters to tailor the augmentation strategy according to their preferences.\n", + "\n", + "For example, by setting `parallel_augment=False`, the selected augmentations will be applied in a sequential pipeline. Conversely, if you opt for `parallel_augment=True`, each selected augmentation will produce a distinct augmented signal:\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 928 + }, + "executionInfo": { + "elapsed": 3095, + "status": "ok", + "timestamp": 1704408995764, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "wdxwizg0nk85", + "outputId": "8b022b08-6856-45c4-9e18-1dea8e4aa549" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([2, 45920])\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sdelang/env/sb312/lib/python3.12/site-packages/matplotlib/axes/_axes.py:8089: RuntimeWarning: divide by zero encountered in log10\n", + " Z = 10. * np.log10(spec)\n" + ] + }, + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Frequency')" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeCeKAo+IviQbRj7VD/AOk0Ne+V4H4pGfiN4k/6+Yf/AEnhrSluefmf+7v5Db+NftO4gY2rj8qoXlsiW8RCj5hmtK7USSxn/ZB/SqeoPi1jb2OKnDSdoI+ci3z6GXbosc6HaMBh/Ou5tYo40YlRh1BArh4eZUHq4/nXfBAIEf0UCuXOpW5F3/4B3JNz5jmNSgVbosFADfNj3qEIoT7o6Vo6ooe5Q/7JxVNgNxT2zXVQqc1KJ583rYpC2U4AAp/kKcnaMrxnFTYP8NT4zCD3reVRoHOTKAjUkLtHB9KvG3SOM/KOmaSCH96o9+atTDHHqePpWNWreSigbbKUSRxoHCD0pjIrSNlePpU4XOzHSlmKngdqal7xNzEuVCysdvU46VuCBBoiOqgFm3H9KyLk8kDvW4Af7AgP8RBrTFSaVP1/Q65u9NP+uhlui7HG0fd/rVKNFUOdo9avNny5D3Cf1qtEQVkb/Z/wrog9GKk2osAFxGMCgRqBtwMAZpyAYRfyp7KDkDsOadwcrOw1bdJEAKjGeatIiIu0KOabHH90L65qfAQMO9YTnfQwnNvS5GwUhm2jpxTFRCwG0cc08sAygdAM0gXnf3aktiehG0SbydozkV1NpbxyW4bYMlB29MVzTgFgf7pzXY6eoe2T0Cr/ACrys3quFKLOrCx55WYSxptjG0ZzzxXG36RmcgqMiu4lQNdbR1ABNcdqcRFy5PUA4rnySonJryua4pcskzR8pPstsNowVJP5GsK5hTLAKODXQ+UxsLdu22sK5j23BK9Cea9LAzvKWvf82cusZIqGNAfujNIyKEGAOamTD/N7Cjy9wGe2TXp81tzTns9SgIV7KMCni3XJ4HrVnySMAdO9IV6t36VfPc29u3syuqKHwQOBTWA3YAHGKe5G4E9c0Edc9ao0T6sYqIQeB6U0xrkDA4OaBna314p4BVgT3qjS7XUZtUsRt75/OhUVcqFFSYzjHqDTVOG59eaQua6LenKpusYHOf5VPJbhpvujIPWodPXdfxj6/wAjWntG7A7Mc1x1p8tT5HBiG1O6JtLtI3uApUFQwNdMLWNbfy9q7kTAPvXPaQxju8N0YHdXRz7gysehwPxr5fNZzddRvodWDS9m5PVjFt0MygoPlHXHtXMazHGbp8KMA11iyq8LMM9dv9K5TWCFuyi9BxVZPKTru/RBjElFcvcxZoxhcAYANARfMT5R92pbhQVx6ikX7yntjivq0/dOZS9wQIoabgdv6VVaJPMD7RxkfrVvp9oI9v6VA3Kr+JqoP+vkXTbT/rsNWNc52irARdmcAE0yMYOParDKrJn8aU5akVJ6lFUGcYGQatWiIt0nyj71Q9H981Nbj/Sk+ooqaxZpUbaZNfRJ9qfCgbSQKp3UEcu1WUEZB/EHIq7dhmuZT7moJvvfhWdJtRiZU5NNNMrPGmQCopRGp6gHNPZOmaFQBQOwre+htzabkIiQKBtGN1TeUhY8CkP3akAB579KTYpzYx1DJHgDGD/M1XdFw3Aq4R8g9AP61WkUd6IMqlLWxNpxXy51IH3a9N+DShNF11QMAaqf/SeGvNLBA3mY7rXp3wfGNJ14f9RX/wBt4K5sRuztwD/2qa8v8j0aiiiuU9sKKKKACiiigAooooAKKKKACvAPFRI+JXiQDp9oh/8ASeGvf68B8VsF+I3iT1+0w4/8Boa1o/EcOY/7uxxO4qexUD9Kp6gcQwxjnFXbT549p6jpVHUhtSL1INRR/iqPY+apr3kZqbxcR7RnDA/rXfxOHt4U745rhbc5uIyf74/nXdRQg26bTzXJnjjaF/M74tuVl2Mu9jBusHoBxVHZtG4jgDOa0roETAHqABVGQgqgz2IxSw0nyJHBVXvMrh12nFW4gMfUZqvHsIJOBirUMa5+994cVtWaSIitSKC2OXdckk5/OnTo3zkDnaFH15qaNjC0j9UPT2qK4yzLtPKjcR61mpylO72KaViiWZcAetBhYDzOeRVuVAkO8gZyOKru5CYPeuqM+b4SNjIuuCnvW2jN/YFvuHzFelYs4J2gjnNbikP4ftMddxH8q1xb0p+v6M7JfwjO/gkz/cqtCuWmI6bP6irbnmQdttVoukuB/B/UVvB6Mypv3WOiXakeepxUjoccd/5UYJijwMkAVMVO4ccEVLlrczlLW4+JCCuBUcvEjE1dtlO7BXvj9Kz7zmdgDjBxWFOXNUaJiriH7qp6jFSucHj0wKrofXr2q1GgZlB/hOTWs9Nxy0YjxlUUdzj+ddXZxGW1jTkAgZI9q5iQkvnHfius0twbePPHy14WcTkqMZLe51YFKVTlZZ3Bn3dzxXIauAJZCPvV1QjfLHBAPIrj9Vz5kjEkc5rDJYr2zs+xvjG2o3XU3ipOjW7DsuDXPzr+9PvXQKT/AGPF6Mma5+RcuOe9d2XaOfq/zOPEbx9EQJFiTA+lKEO4Z+lTKuJSxOPmzTkGWXI53V6jqGLk2ReQRJx3XNRvCWGe/er7ALHgn5s8VCw2lj1Dc1EKrYrtGRPHlgPSoiSSfetCZA2D0NUGAJHOMGu+Ero76M+ZWGjds5HOKN4Khf4gKXPyEHrRtC898VZ0XXUVMKcUzBwCepNPK9DTQc/Ke1Al3LemEf2tGAex/ka12GcjuWOayNOIF/G2PXn8DWmCSx/H+dcGJX7y/l/mcWKfvL0L2kY+0Nnq3FdQGWXIHUDNcvpA8y5YY7gg10vl48shsYIz718rnCXttXqdmAvyPsREqUQDscn865rWowL1pMnBFdLKAqA5wQ3IrmNbJ+0MRz0Nb5PrWuvMjGfCkZMi/IB6inIPlB9qZKSqj6YpYtxiQ4+bA4/GvqvsnG0+UfjBnHsP5iqsuRGpHqQfzq8cHzv90fzFU5T8qjHHOaKb/r5FUnqv66DogSBnrgZq3tG3jqarR8jI9BWkEAhLdxzWVaVmZ1HqY8i7ZyfSprZv9Njz/eFRz/69z24pYDm8iA/vCt5aw+R0bwv5GlcR4vJx/tn+dU7rAXHfir8hzeXRI4Vj/M1RucMMVy0G9L+RzR+MhfAx60188emP1pzFcgE8npTWYcV1I3jfQNmVI9aeFzg+hpiHK7s8VMAMj65obsKTaDbgY9RVSTChs9qvPyoPtWbIu4OM0UtSsPq9S7p/V/QrXpvwh/5Bevf9hX/23gry/TCS0gHZc16l8JRt07xAP+or/wC28Fc2J+Ox6GAVsXJeX+R6HRRRXOe4FFFFABRRUc8K3FvLA7OqyIUJjcowBGOGBBB9wcigCSivKtWkn0DUvE2oaPf6o8Gg6S25LrUZ7hHu5BuXKyOw+RAp/wCB+1a07Xfg3W7KOLUr/UYbvTbuWaO8uGmzLCqMHXP3c7mBVcLyOKAO/orznSmv9LPg3VH1e/vJdbYRX8U85eNzJbvKGRDxHtZMDaBwec9aq2F5qI0PQfFrarfPd6jqkUU9q07GDyZpjGI1i+6uwFSCBnKnJOaAPUK+f/FuP+Fj+I8/8/UP/pNDXuuo3k9jaGa30+5v5AQPItmjDn3/AHjKv614Hrk8t9471+eeynspGuYs285QumLeIclGZeevBPWtaPxHDmOlBmlbIqAHgHGaztTwI1Y89cVoHhhjsAD+VZepf6uLJ6g/0rHDK9VNnzcPiSKFtlpohnGXH869FijxbpjnHJ/KvPLYA3UR9HU/rXosGfIfB6plfauDiFtKFj06CTmzFkVt0zO2TvJGfTNZsnFyR/CDkVfufM2IhP7zYMn1NUJMCUqe/FbYba55dXcFRSOCBzzViIELjuvAPqKqopLnnjOanWYhD1yB/Wtqib0RnEnZ0w0XHHWoC+XTjljyfQUzZuPUbz96gtw57j5RUxppbDcrgyvLdNlj5QTp2zQVRouQMimvcCN/KUHLDk0GNmX5eprSzVr6IGZF2f3gx71tqqx+HrU46kn+VY14uyTjpkmtuT5vDlpj8fpW2KelL/F+jOnR0vl+pkythnQDGUzn8arQMd8q+kf9RU1wf3pH+zioYB+9l9AmP5V2RXuhBLkZct8nB7bF/lVhVKOu45ycCq0T7Y1A7KP5VeUq7pu7HP4iuWq2mc0viLMCZDZOCQQPrWVeJ++xnJzyfXitfcDyOwzWRM2ZCT3JrDC352yk7WsRRKepPXp7VchUsQRwSeR61VXLcDtzV2BlGZMdBiuis3YUndksoUbV/Wul05V+zIPUAA/hmucmjHkq3fNdNYhVtbdR2G7/AD+dfO5tL9xG3dnbgI/vGStJlyFOQM9K4nVyfOkUnpXZDbHKQoPzuRXGaqd1zL75FVkUUqrt2RpjJXcb9zd3Y0GEjuvH8qxfLYr1565/GtglU0KFcchf61mQvulYfwkV24O8VNr+Z/mcdfVx9ERyj58jpSgqJmPbIIof7rAEZqMoWAZeMtj8K7krrU5yeQhpAcYAFRMNoOTkDmrcsShc+qgGqxX7yt6fpWdOSa0G0UbjqCDjkGszOZn9BWtNGOP0rMZAjse5NenRasduFkrNCEZApCCw60gzuGOlG47wg64zWx1WfQeWBfaKYV4J7g8+9KijcWA5Jx+VJnKn1oBabFiyz9siI4wCf0Nay5Cs1ZdgD9ujHbB/ka2lXJYe4NcOKlaXyODFfGvQsaWxiuwAPxrpRkEAnPI4rnbIH7XkfwiuiOGCnHzE4NfK5tZ1U/I68D8DIZxkls5XOa5nXsibjuBXSTcwEJx3P51zWsyCSZRjsP5V05On7VeVycU1oZk3KAeppYcqVGe1IeUBPbilGPMA9K+m6WOP7NiRBuE3un9RVWXgBT6VbiOFlY85H9RUMse4gilB2kwhK0tRsHT8q0+kJbt0xVBAAx9Ktq5ZCf4emKyrK7uRN3dzOmAMzU20/wCP+LP96lnYB3I7HFNtub2LPUNkV0/Yfodcf4b9P0Nq6AWafav3mIJrMm+Yjtiti4UebMB3Y1izHg+tceEd0c0fjISQXbjlTx+VBIO09qB/rWJ9OKbjoPQ13nVYegHlr6YqUtjJxkgdKiTAVQKmQZ+bviokZT3uPBLw5xjI6fjWeeJ8dsc1pLynPQj+tZsgYSvz9KKW7ReG3ki3pRHmynHRa9R+Epzp2vn/AKiv/tvBXlunAjzP92vTvg+SdJ17PX+1P/beCubEL32zvwH+9y9P8j0aiiiuc9wKKKKACiiigDHtPDWn2umalYOJLmHUppprszkEymX7wOAOAuFHoAKr6T4RtNLvBdy31/qM0dubWE30iv5MRIJVcKM52rktljgc10FFAHNaV4KsNJvbSdLy/uIrBWSwtriUNFaBhg7MKCfl+Ubi2AcCkt/A+nW2oQzrdXzWlvctdwae8qm3hmYkllG3d1ZiAWIBPAFdNRQAV8/+KyR8S/EXp9ph/wDSeGvoCvn/AMW8fEfxI3f7TCP/ACXhrWj8Rw5h/AZPK224ZexVf5VnX6ZWJu2Dir12wE2O5Vf5VQv2by4uOAOKjDLWL/rY+ah/EKNoh+2RqehkGPzr0e22/YARnG0j9a88t2AuIfUOp/WvRrdgLRfdSR+deXxG3an6nq4Z3m79jHuFLS7f7owaxZHDSMx9625XHnkHuDWJNHi4cepNbYHaz7HmV9wWXadg6kcVKrjZnsRz+dRKwDHgZ7VJhWiYg9RmuuaXYwRFGW8xpB/FS52/QAH8abESTkj5RQRlVj9ck1o1qAEiMFz+FSw3J8s/pxVRwZG/2RxSqRgAHqeaqVNNaj2Kl9IxkYnoTxWyjH+xoh2C1h3gwwUdhWz/AMwSDH9008Sly015/odL/hIx5nzKT7UkWfMlJ6bRj9KJOC2e2KWI/vJfTaP6V1/ZKXwaf1sTxn92F77RVlSWZd3bkVBGQYz7gYp5HXYSTXPNXZyy3LLXHDY9eaqkKTu9Tmo2k28DqaljTOHPQUlBQVws0rgDhV9SafaZfYD2OTTAVIMgPsKbA/ljaDyaJK8XYOhfaRpCCPuA4NdFaDEMEh6lSn4f5Fc00oWAKMZyK6SBt1pAv+zmvCzOL9nFW6v8jrwb95stL8s4Y9AMCuN1JQt05PYmuwTb9nD5425Brj9TYNcOc1GSp+2l9xrjNom5cJjSbdx1CZ/WsSJ/nK9zmtyeRBpsC5/5Z4rnmcLKw7iu3L05Qkn3f5nLiLcyt2X5DwFO/rjdiljYOB6ZJoiTJPvzTZDtjI6V37uxzl0Z25b8KiX5mYN16GnBs8jpimN85bscAVgkMgkQq2Ow4FZEmPOb2ya1pi24E9jWROcSEDqa9DDXOnCq8mNQfdPpRjB3Dqc/zoI24oPLgdq6jt6igYfHakYBQx+tK/Dn0zmjP3vc0CV9yzpzZvI/x/lWxGx8xh/tfpWLpik3sJ7c/wBa2Q+C7dw2DXn4te/by/U48UrT0L9k+dQ2D8a3t4yc/wB4AVz+mHN4V7gitaRWiVm6sTnB9a+ZzCmpVlHyOjCycabZOiD5j3x/WuV1lSbl2HbGPyrqFRtjA8blBrnNbwHYj7wHStcpdsQwxXwxMlTiOP6807K+YTSLzEhpCwU49Rx9a+nOK12SRkAyemKjYHPHeliBLSE98f0pQCWyOnNLZi2YoXkgdBUkLZQj+E03pkCkB5wOlS9UTuVJlxM+O55/Ki1H+nRN2zSSrl3Yk8H+lOtG3XCEdK6H8HyOy/7v5fobckg86Qd2yax5hgDPerxZhI5I5xVO542+prkw8eV2OWm7yIAf16UxjyR3qZQNgppXg8c12J6nSpK42PhVB65qeNjsPrimBelSAMARjjbwaiTuZzaY5DlBn0qjcgs/481bHQZ9P61UkIEhJ9aqmrM0oK0my1p+VMuem2vTPg/xpOvf9hX/ANt4K8ys2y0uP7tenfCH/kF69j/oK/8AtvBXNiNzuy//AHqXp/kei0UUVzHuhRRRQAUUUUAFFFFABRRRQAV8/eLT/wAXK8Rrjj7TCf8AyXhr6Br598WnHxM8Rf8AXzD/AOk8NbUPiOLHr9wwuv8Aj7Y5/hXj8KragwMUKj0P9Knuv+PoEdwufyqveqBFGT1AIoorWH9dD5mHxop25/0qHjguOfxr0pFBs0I4KivNrb/j6i9Aw/nXpEGGt+DnC8ivG4k/5dvsethrc7XkYV6wWX0IJrNuTmUFeuKvX/zTEg9ySKz35dGHdWrqwkbQT8jyarvJioyFiDgHOB78VMIcK+OhHFMihVpdxxxVqJGw+eijA960qzS2IirleFAQEPGD19ajuAVdgFwScCrjQbIlwRuzzUN23KsBnbiphU5p3Q3Gy1KEpx8i/mKh2MvPNXHRY13tjJPFVHmJBGDXfTldaCjfoVbpvnHGS36VrByNBj45BwKxrlsMWxn0rYMbHR4CGwCOlPEJWhfudMlamjJPO7PfmnJj94R/d/wppUqJMnJ/pSQnmQ9iuf5V09CrXi7f1sW4T+5zjkAVK43LlePpUUXCbu2BxT+QOvUVzyWtzlluNdMAHqTTwxCBcd6OmN34VFJKqcd6FeWgleWg/cM4HAHNQowXc7HoaYJeAOuRmkxuwueO9aqFtzZQtuWEfcBk/wAQNdhZgeTE2cjbtA+vNcZENzj0yK7WzXECgdgCPyrw86soRNsMrVADnyioHAYj8K5DUCTcuO2c12ix5wcYGTn3NcdqCkOzdxms8nlH2krDxCacbm3MA9pbgf8APIHP51j+WHduOc/nWtISthB7RdfqTWYgIbcT06j1rpwl1GVu7/M5au6FX5E3+9JKm/t0P50+TBUoCBkCm7iycHBzXSr7mRKmQCu3jHBqMnLFhxkYx71YjKmJckbuabIijDLjnt71kpe9qVbQoykkZIxzWVKMuSeua15s7eR/FWTP/rD9a9LDnRhdyLBKjJ5xQCTgEY7UoBzyeKOPyrpO24DHzc5yaQj5se+aANqg9eaXnYT3oDYtaZzfxD0z/WtIyKDJ0+8f51m6ZxqMX0P8jV2UBZn92P8AOuKsk6tvL9WcWKXvmno5xdSM3bvXQBxKAowxGM+1c9on7y6O7oQMg10NmgDvxjGQT618xm1o1XJ7pI6cGm4qPcCWXyxyc55rmNYyblx711CkyTdMKhxz3rl9bVlvGweOT+tXlD/f2e9hYte6mu5m4+Uc8YpuA24Hj0prNhQKUkcn2r6ixx2ZJGRt69OtOUbQR2PemRbcn0PJp5bHy1DWpD3EbI4HPvQeRge/NIDyQeuBzS4Jyo4460AU5eHYZpbRgl1GvbNMnbbOQfSnWw3XaD3610P4Pkdtv3evY1bg7Z5h6E1VdQwGexqzeHFxcHrgk4quFJB571x0vhTOLZ3ISCqqNvBPJ9KQDg1LKSqqp5z3pCPlAA61unoWpaDQBlfrUzjaPwqMbd68d8052PmY9qT1ZL1ZC3T3x/WqkvLuDx3q7KMKD3x/WqjKN5J5JramzqoPqWbFcGY9cgmvTPg9/wAgjXv+wqf/AEnhrzKwJDTAnsa9N+Dpzo+u/wDYVP8A6Tw1y4jd/I7sv/3mXp/kej0UUVzHuBRRRQAUUVl+Jmu08Kaw2n7vtosZjb7OvmbDtx75xQBLb63pN3qEun22qWU17FnzLeO4RpEx1yoORToNY0y51CXT7fUrOW9h5lto51aRP95Qcj8a888J3Fzpg8K2lpqVpfw6nYO3kR28afZSsQYMpUbtu7CneSSSOc8VW0f7D/wjHw7+x+V/a325PN2483d5cn2rd367t2e+M9qAPTP7Y0z+1P7L/tKz/tDG77J56+bjGc7M56e1E2saZbajFp8+pWcV9MMx2zzqsj/RScn8K8wH2X/hBo/9V/wkP/CS+3nfaft3Pv8A6rP/AAD2p2sfYv8AhFfiB9q8r+2P7Rfyt2PO37Y/su3v/c247596APWq+ffF4z8SPEZP/PzD/wCk8Ne630mpRWAbT7a1ubvjMdxO0Ke53BHP6V4D4ie9k8c+IHv4IILs3MW+OCYyov8Ao8WMMVUnjH8I/rW1D4zjx/8ABZLcZM6MO4Gfyqtf5ZR6EnNTyvtK7vRcflUN2peJG7E8VpS0cT5im7TTK9sM3Uf+8P516LYxlLNm/jbqf5V5zA2J4vXeBXplqQLNc90FeDxNJqEEup62Ejeq2+xzN9tS6b/aXJ+tZyZ3qG6LkVe1XAlPvk/hVBCfMJ9V4rvwq/cp+R5FT42WFjk3nBG2r8DFYCH6jrVKIS7zgDBOf0rSyrRY/OubEy2TKpLqVWZljeQ9xkVTBYqIyfmJBNWJJyzsP4AKqOdsu/ua3oxdtURJhKhnkGfuCmSwoIzjr9akWQmF1XqP51GbWURMWHf1rpi7aN2sSZUq8jPrWx8w0m35G3FZE5xkemK12AOjQg9gK3xH2PU6Z3cFcy353E9dtRw4w47bf8KmYZDY9KijXBc/7NdC2Ki/daLEWWUY+7gZpZGZSKIuUx6gUhOTzWf2jB/EOMgY89hmqU+Dg+9TuwLcdhUKjeMn1rSCtqbUly+8Ng5l2noAal6EY6tSxIFNNRiGBbtVN3ZUnzN2J0ONoHXNdvaEJaRMepjBrjLeP5hnqa7O3RXgiznKIK+dztpxin3Kwr992LGSxdfbcK43VDtlb0bJrtEJVFJx92uN1ZSzYPUZrkyR/vpI1xtvduaMjk6fGD0KDH51liQCUg9DWq0ZbTYfdf61jTcBs9c16uEUXzLzZ59S916EhOZ89himbmHGexzTSp4J6MaG4QnvXYoogs2/zEE9FB/wqdtqMPQYYfliqcJIXJ71IXK7c9B1+lYTg3IL2EmdXUYH8Wax5SDK+O1aE8g/h7VmN95j6mu7Dwsjpwy1bI92Bmg7uTSAEL75qUkbCT0rpO56bEYPyqD0PSnNw2B0J5pF/wBUR6HigNlB65oBrUuaUP8AiYRntk/yq1Pxct6Ek1V084v4vx/katyfNOR7n+dclT+Lfy/zOHEP3/kaOiti4cHueK6SAkwxMONzc5rnNJjAud3fOK6eOIeUm77y5P518rnEoqr/AF2Z1YFNp2/rYYcq5x0Lf0rl9XkJvJsngEY/KupmKi3z6/44rldbj2SsfUVWTWdbX0FjVokYjsSAPwp/GAPUE0xF3AU5l5Rj2FfXPsYO2xJEoBPoRUjtjgfhUEZJkOPu4qRzheOp6Vm1qYyj7w4dMHrT+eAOtR4zweuKlAJAA61MiGZtypedvapLHP2qMdwRmmzjZMxNPsj/AKRG3q1byf7v5He3+6+RpXB/0iYnpuNQIc5FPu3DXDgdN1NUc4FcsFaCOBjZdxApFPycdac+4delM7kdjVrYa2FwMqR6mkUFnVj6U4DIA9GzTlHShuwXsQzAkLj0/rVKbf0U8g1oTfdGP881Rdvn9jW1J6HTh2yxYj5ps9cV6Z8G8jRddz1/tU/+k8NeaWPPm+pU16Z8Hv8AkEa9/wBhU/8ApPDXPiHq/kehl7/2mXp/kej0UUVynthRRRQAUUUUAUrPRtL064muLHTbO2nnOZZIIFRpD/tEDJ/GiDR9LttQl1C302zivZv9bcxwKsj/AO8wGT+NXaKAKX9j6X/an9qf2bZ/2hjb9r8hfNxjGN+M9PeibR9LudRi1CfTbOW+hGI7l4FaRPoxGR+FXaKACvn/AMXf8lG8Sf8AXzD/AOk0NfQFfPvi7P8AwsjxIP8Ap5h/9J4a2ofGcWYfwGVbobZgeuVXj8KJ3Bs4vUZ/pS3KFpc5/hH8qS4QLZRE9SDmtI29z+uh81GzsVITi5TjowNekQSL9iDZ6IK84tyPtEee7gV31qM223OQy9PTBrxeIoKSp36M9HDTcZu3YxNQPmXMiY4BwD/OqCKBIMnpj9KuXpIupB6E5P41QyRORXZho/ukvI8uo7yZpW04BbdjOeKsMAFZlP3ucVTgaMICSOOKlkLJFuBLZ4ArmqQvPQuMtCFyp3RqclSM1TkJJJA+6cCrN1FiIsrYYnnHeqY4bYW56120UrXRmyW1xF94/nVt7lZEZVwcVnnmTOeCMYqxboOcnv8A0oqwi/fe4k2Zd2mJMetazx50e3YfSsq8bMpPpxWqGP8AY8BJ4Ga2r35ab8/0N3/DRmOMbh7VXyR5gH93/CppGzlvaoIx88hPTb/hXXFaF01ZalyPBgwOuBQ2Dx606EBo/cqMU3GcduhrLqzn6sryH5yFqNFP61IQA57mkJwufetk9DqTsrIkXJkUdhzmk2AsKfEpIHqaGQoMZ5qL6mN9bFiM4biuwst32JGYYOB/KuK8zYUxzkgV20ILQxopwPKB4/Cvn86XuwXmb4NWk2PjHmqj5IKAggf59q5PUmO4swx1z7V1gfbE6IMnbnIrjr4udysCeOaxyaLdWT6F4xq0Ua+8tpMZA+ZQSB61iSneQp4PU1rAlrCHBwMYP5VlFcSkE5x3r08JFRcvVnHN3a9BR9wE9Fpj58s547CplG7K465pkg+T2FdcXqZrcbGTgLTyxHBH/wCqmKfk3D6UgfcDu4OMU2rsLEEzggYqk65IP41cmAwMd6qnn61109EdtDRXQ0Y5Pv8A0pA2QR7UAYOO3WkOBgdzWh02BVwmCe2aVQMj2FO7YpCMA4ouK9yzp4P21D25/lV0hmbOMEMao6ef9MiA98/kaus2x8ZyQTXJV/ifL/M4sTfnNjRV33EmOoFbzsd4x0xz7Vz+iTBLp1x171vR4MbndnYD+PFfKZmmsQ2+yOvCfw7IRnRtybvnC52+2a5rXT+/XHPQEfhXSIoMrApyUHzVzWrRFbxstkCtspsq/wAicU3ypsxwQhYDqDQRvCt6cU1/lJJ9adFk+wr6rpcwe3MKnyuxFSMeBx9Ka+EUnvTgRsXP4VL7mTd9SRUzx3qRj5agLyQKSMcZzk9aJWAUnvWD1djLdmbMS0rginWhBuIwOzUkuN+fen2qYvU9DXXL4H6He2vZ28izM3+kELzzmn85HpzTJeLokDIzU7p8wA6YrmbskcUtLET5X6VHyW9qfKCD1zSAEMDjrVLYFsC9R9RTohgAA5IpqA9x/EKfGvGQeR1pSYMjk4UD61nt8xOexrRmHGKzHYrGxxkgnj1relqjrwyvsXbAhzJtOcda9M+DvGj69/2FT/6Tw15lpIzJMOny9fzr074PjGla8P8AqKn/ANJ4a5sR8TXod+AVsVNeX+R6NRRRXMe2FFFFABRRRQAUUUUAFFFFABXz/wCLv+Sj+Iz/ANPUP/pNDX0BXz/4v/5KJ4kP/TzD/wCk0NbUPjOLMP4DKsjZkUDvgVFe7vLjUnjmpU3Bm9SOKTUCCkSVpHScUfM0/jRShBN1CP8AbH869AtT5cAbBOAQK4GIgXMbejivQbYltNyOv/168fiB+7C+1z0aGs/RGBdruupOPvAVUEX7xSe+RWjcktckntWcZCMY6kVvh3JwSXY8yp8TLMVuhDdMd6kU87SDtU8flUEKtg4+64zVgyKQqfgazqXvbccbWK0+VJZuV+8BVGQD7/8AFVu6lEgZB90cVQZ8YP4Cu7Dxdrk7vQe7BQMfU01JmzwTg1Exwp3dDRDMmD14rp5PdK5PduQ3Wcn3Oa3Cg/sK3PHXNYlyMkelbcYL+HrfH8JIP6Vhi3pT9f0Zs9aRjTcK4HXHWoYgSWBPG2rUoG5x/s5qrFkNIPauyLvEqm/cZei+WEeoFRNuC/e6UkcmUH60jPgNioUWmYqLUhg4yTyTxScN8pHfFOjB2Fj+FI3qOtX1NepPG21s+9OfBbd2xVYNuanK28MB0B5qHDW5m4O9x2cOgP8AertbVjsiw38GK4sL5kiegIrrYAVERQjO3Jrx84ipQivU2oStItW0mJmVvTmuc1EoJpB3bJ+lbjY84+ruFrnNRRknlZvvE8VzZbTXtm+6QYiTcVHzL8mFs4gvQrmspgxYtmtUgCyRT/dx/WqVqu+dgfu9q9ChLljJnLLcVBhmJH3SajkXEQB6lc1amZWDgdqrSNv6+hFaQbbuTsQJyuwfWl2A7iaSLCEH0qVsMrMvUit5OzB7lCbIOOwFVFbIDGrlxndj061SAJwO2a7Kex6FDWI7t+FNYA7T3FPK8fhUYOQCe1WjaPdDl5P0pSeCaQNgn86RcFwo6gUBbqXdOVRfRnHc/wAqnuTslJx/FVTTWzcRnvlv5GrMpJlJPrXNNfvfkcddWqWZq6MN8jMPaty2fHno3XcB9axdB+87HpW7FCWuQ/8ACR81fM5nJe1kpdvyNcNF6W7krTKkzIByEBz+Nc3qh8yZ2z/y0P6ZreEZSaYvj5j8uPTArA1WPZcf73NLK4xVXTewYtycde5iSKWVh3yDmnRqcpj05qR1CxH2FERxIn0r6hy905+e8SO64IAPfmlDAItJdqS34/0qI8BPYVUVeKKjFSgi3HIQetDOGGT3qsrDr3NKr7mJ+mKlw1uS6WtyBzlmz0zVi2YG6jHvVbqD9anslBvYz74rSfwM6ZpcjLc4C3LYHAbFWF+Y/wAqhvlMV4w9Xp8ZO7n7uOK43rBM4JIjdSH68Y6UBTtBPrmnkfMcUjNhBmnd2Fcci7lIxjvSKAAx96eGJRj+VM6njueanURFKRg1mvw+Pc1fuDtSqEhOWx1rrorQ7cMi1ZZRZCOu2vTvg8c6Rrx/6ip/9J4a8003DeZnslel/B//AJBOvf8AYVP/AKTw1z4h6s78v/3qV+3+R6NRRRXMe4FFFFABUc88dtby3EzbYokLu2M4AGSeKkqrqX2b+yrz7Y5S18h/OYdk2ncePbNAGdpXizSNZjaW0e7EKw+eZrixngiMfHzB5EVTwc8Hpz0p+j+KtG16d4NOvDJKsYl2PC8RaMnAdd6jcuf4lyPevNNSijutOv8AQ/BGq3erafLolzHPD9pa5jhZVURKjnO1m+ddgPTsMVuXGoWni7xDpx8NyiT7LpN4k7oNogMqxrHEx7NuUnb1G2gDrNN8XaFq9/8AYrG/Es5DNHmJ1WUKcMY3YBZAO+0miDxdoVxq/wDZcV+GujI0K/unEbSLksiyEbGYYOVBJGDxxXFaPqFnqw8BaXpuft+lEPfQhCGs0S2eJ0kH8JLsqgHr1HFVNOuYZPDnhvwshJ1+y1iJ7m22nzIhHOXklb0VlBIbod49aAPW6+f/ABdz8RPEn/X1D/6TQ17rqNnPfWhht9QubCQkHz7ZYy49v3isv6V4Fr9vNZeNvEEE15PeyrdRZnnCB3zbxHkIqrxnHAHStaPxnDmP8B/InWP5s9woNQaggUqO+KuwR5lV/YAj8Kq6ohyCSR0qKU/3qR8xBapmbEuZ4x/tD+dd7ATDYIo6H1rhIT/pMQHILDmu6hGbZWz1GAK4s81UL7XPQpX5n6FNVV7qWRvu4x+VY1wQjZHStaKQkvvXbyf51k3hUuuDwKMImqjTOKrZpD0ncEbQMU/Hykj7zVAkoA6DI6U8DYhYk8jiumUbMyuRTOuNoPOMVXC/Nk/dUU94juxk5Ip7ptxH3PNdMWoqyBMpujSdehNNaILyOtW5h/AgyR6VAYJepUitoz07GsZv0Ipei/QZrooVH/COQA/xGucuMj6kV0MT58P2pHauPHp8tO3836M2X8NvyMibARsf3f61Tj5Zz7VZnIO4Z6rVWH+P0xXdTXukUl7jFjDDkfdODTmViDjpipIxkqoHGKk2fNzxkYxTc7MJVNRiodhz0qFI9uR2zV0jKA44XrVdvlfH97pUxm2RGbd0MCZfjpT9mE2jqTTvuFQOtSJtXcxPU5FEpMJSYmFQKvfNdLbuDCrZ/hArnI4iWVj/AHq3Qh+z4BIBAOfxry8elJRVx03Z6FqNfMucn6isLWH3zMU7Ng1tJLi7OOzYrE1MhbmQetc+ATVe77F1HordyzJJ/oiN2MePxyRTLcpt2fxKBu/EVKsJa2iZuAVxj8ajn2wxsq/e4z71vFp3gu5g77sru237vTBFVi5y34093ymB1DUxiCAe+K74RsSkVkk+9nsalBK8DoahPy/MeAaUH5AQeOtdDVzolFPVDrrkZ7mqGdrGrTsXfHbH5VWcAZP41pTVlY6aCsuVjiTuwPXFMOec+vFGMktS9qs2Wg4Ln8KYMo3t3p+7CZHXFB++OOCKBJvqWLE5vEx6H+RqwOWZvfFQWIAvYwO+f/QTVxUGGU9SeK5qjtP5L9TixD94vaVJIpcRgHHJrpkKldmeGU5/Hiud0vAkkUdSK2o3DQYB+YYx718zmkeapoa4WVkSSxDzycnGwKPzrA1lf9JyOqjBrbL7TtJyRg1h65lbkt24H6U8sTVZJvp/kPE2cdO5kHlPbNRRFjMA3Tmpwcqw96i25dSPpX0yejOeL0aYk7E7vTtUbDaqfjTperA+1JJ91ccmrjsjWGiS/rYZ9PSpIxzSKm3J981PCgHJ6miUrIVSaSKcqK2QCetS2A2XcX1qOX5Z29CaktM/bIMDI34NOfwP0Nm37O3Sxpajzesw7mmLgqB6inXxzduB6k0gxx6EVxQ0px9DgkIcBsg9OKbIm/ax7c0pAL8HpR6CrJDfhNrcA8VLhWyvcc1HEhI3MO1SMQCT0zxUS3shMr3CB4UHY/41nkYkI7ZrVmAMKgdqzHOJCD9K6aDujrw7eqLWmLu84eqYr0n4Pf8AII17/sKn/wBJ4a870bBkl9ApNei/B8Y0rXv+wqf/AEnhrnrv35L0PRy//eZen+R6NRRRWB7gUhIGMnr0pawPF0Flc6TFFe6Ld6tmceTDaDEiSbWw4fcvl4GRu3DGcd6AN+ivPbCy8e6PbXN9HPFcWka74tHvJjczsB1UXAC4Y9g28Z716FQAUUUUAFFFFABXz/4u4+IniQ/9PUP/AKTQ19AV8++MQW+IfiMA4/0qH/0mhrah8ZxZh/BZZtFcumMkOAPpTdWiwq9upxVzS8mFR3xwai1/iZdp42158Kr+uKHqfOKFqfMc8g23EeP74ruoyUt0AU4C7s1xK/65B/tiu6tcvaknoFxUZ5K0YNnVh/el8jFaYi4lG35cjB9c1k3KN5oBbocfWtudUWXGOAaxb58yDHUmujBNN6LocUk1IdGUzhiM1IWIUs33RUCRq3zHGaf5gkQoQcD1rplHUyGwsWn3v0xnBqxsyDKR8xPFQW+JZSuOKuohaYHpGmeDUVZcrKtcSO2VD5j4yeeaVpImyuBkdqY4muJMI2FHBzU5sFADDG7ua53KKadR6jSb2MS9UCQ+3StVWKaJbqBxtNZmqLskx7AVqwjzNLhTsEzmunENezpyff8AQ119mYkxIY/SoFJVWOO3T8at3igSNVZe/wDu/wBa74O8bmlN+4WYAfzFTYBYMeMim267oFYdcA1aWAMR024GB71zVJpN3OaXxMY+Nm1R164qnIwDDI5HStCRVQhV/i9KozIvnE+gootBC19RueQT1qdY/MbGMKKgjXcQW7VdwRGET7xHNVUlbRDn2EQ5cADjNazOfIjVe/GfxqokSRRAEfMatdCg7bd1eZiJKTVugQ0uPhYG/lGOFI/E1j6n80znuM1sbNkzMOrkVg6g5M0g9zVYKN6vMuyNHfReZukhtLjK87VHSsy6feffANasaCKwQfwlATWeyIy++azw0kpSfmyavQzkBCNnk9aHXjI9MVIy5J2/nSMGCAE9TXqp63M763KpG9eelNClVx1FTADG3HWmgFSFPOOa2ubKXQi2nnHGRVdgA2088VeK88dT0qnIAZGx1HFXB3N6MrsYSAxHTmlGDxTNuTn86dgkj2rQ6GOwCMe1IwIAPvQp5+opMnA5oFbUtad/x/RgnPXH5GrqqWJ7HccVR03/AI/4QevzfyNa6JkFu6k1xYiXLP5L9TixXxol0tGW4duTxxWpbk+USTg5qjpbBZnzyQDV8AAL6EnIrw8ZK9Rp+Q6Xwpgr7pdzN1Hes7WhmcZ6bR/KtIwAOWONuBj86y9bYBdwIz0pYOzxEXHsVUT5LPuYm7Aanhgq46nrUJOGI654qRQBtJGSa+jaIlFWArvYcdetKY8rjuDUijBPrTlA/KocjNzZEIj1zweamVQCCSADQBwP88U5UEhBb7ueAaiUu5Dk3uZ1xgSt+lS2PFzCO5amXKAXL57f4U6wGb+3z/eFbyf7p+h270/kaF0mJZj3zUankDtireqKFnlCDGW5qkD+WK5KT56aZxNdCXCljjGaReCc8nPFNXGeOtL/AD6iqsSO3FQB7U2Vg8fHBpCSFyeajY/KacY63GkNllKx5HPH9aoudxB96svggL2xVc4Un2rpppI7aCSXmaWkELJKP9jFejfCL/kGa/j/AKCv/tvBXm2l8yy/7tej/B850nXv+wqf/SeGuOsv3jfoduXf7zL0/wAj0aiiisT3QrB8VW+ovb6feabbtdSWF6tzJaLIEM6bHUqCSBkbwwBIBKit6uT8fxyy6RYqtlqF/bC+Q3dpYZ8yaLa+QSCPlDbW687QO9AEmkyapq/ikavPpV1pdjDZPbLFdunmTuzo24qjMAFCEAk5O88V1FcX4QtNDg1aV9M8K6ppM5gIae7hZFZdy/KCWPOcH8DXaUAFFFFABRRRQAV8/eLs/wDCyPEZ7faYf/SeGvoGvn7xcwHxH8Rr3NzD/wCk8NbUPiOLMP4DOg0mJEhDHqVBrL1wKroq9AMVp2nzwRqOyjP5Vka1lJct35rxMGm8a5NnhVH+5SSMeA5vEU9d6n9a7+FfLsVKdCTn9a4C1YG7j/31/nXoeP3CoOhHP5U+IXZ00dOEjrJ+RgXaAygnqXBP5VjXiAFJO5zmtO7BaXywT8p/lWXdoURVPRWrvwStbU82T98jjDMT6VM5Hl7V61VR3B+UAiplj7gklu1d01rdkSVmW7aPcoROtXJ1ICwJ1YHNMtYvIjDjljxg+tWynlRlm++1eXWq/vNP6ZpGOhWedLcbBnOM1HHNcHeWxtI44qdbcK3mSZzVqKaBgQMce1ZyqRivdjccYtvV2Oe1NQzRv/e5rTtUzpEIP3fLOao60wMgUdO1aOnuH01VPTy66q8n9WhLzKitLGJfrtkkx6CqcYyzDtitDURiaQ+wqhFkk5HavTou9NMdN+4zSs+IST0UA1c8kZAXtzVewjDQ7/wNXBGyKqrye+fSvPrz992Zg1qQvGsQJ7ngVRuIcSDHrzWn5WCXfqBwKz5Q/mMoHOc1VCd3uLYiVS8yqvQMM/StGNFhDyt1PSq2VjAUfePFWTGbiUZ4VfSnWlffRAncS3jlk+eTGN3FWmflcd8gU1ZS7IkQBwRmhyFnYN1XP865JNylqi1otCeE5l57Y21z2o582Q+5roNh27x0UZrnL4sZn9N1b4BJ1G0aR+JHSb/9BjX1QVlyAq529BxWmygWCEfeZQR/n8KzVzuYt1Lf0rHC2XM13M6vS5EyCPAX7oXNMlVh8x+6BVtUEiyeuMU25iPlFe1dUanvJMzt1KMUeYjnrninqpOA3XvUkK5GO/arUce8YI571pUq8rYNtsz2j+bPas6X5Znx13ZreeDacflWLcIFmbPrW+HqKTOnDO0mmQDOT79KXgKDTImJPzDHPFK+WUY7GuvqdrWth2NqqPQ0yPf944x2+lOZd3ftikyemPaga2LWm/8AH/Fn/a/ka2FcqDjpzmsjTsC9jHf5gP8Avk1vRJ+6Rsd8NXm4ySU9e3+Zw4nWa9CKwOLhmHVjtNacT7pTnoDVazVGuQ49KsSKrj5T1FeXiZKVS3kTTulcny7AK2OM1h6xDyB2zWsCykAcg1m603zp64GfyowSca6SKm7q/Ux1Uc+tTDG1QOvaolIK59DUqLwfXqK92RnNkoHQHqRShF6+lODAgepqQIM5z061zuVjEix39f5U5I/Nxn7ueKfgDn1/lT4lMpGPudiKiU7K4IzbtB57jvim2Cf8TC2HcOKm1JfLmOOpxUNkxOp2+B/EK6k70G/L9Dsp35DW1L/j7kXsDmstX5PpWlc72vJjjIOT+NZzjByB1rnwqtTUfJHPvJjgwPTrSg8DPWodw3Y74zTlbtXS4jcSQkhSaiY5BB6U/oKgk75oihwjqAUnb6YqtIrfvP73ar0SZUewqnMNsjCtYPWx0UZe80W9JY+ZLn+5/jXpfwe/5BOvf9hU/wDpPDXmelqA7YP8Ir0z4PjGla9/2Ff/AG3grkxHxv5HdgP97lbt/kejUUUVznuBXNeNRMdLtObwaf8Aa0/tE2RcS/Z9rZxs+fG/Zu287d1dLWB4uudPtdJifUvEU+gwmcBbmGVIy7bW+TLqwwRk9M/LQBzXw81G61T+xmjN81vZ6ItvfPcJIqNc5j2gb8bmUCTJGfvDmvRK8+8IahFdeL5otN8Vah4g037CzO8zRtHBLvUAbkRQWYbsDPAVs5yMdrqth/amk3dh9pmthcxNEZoCA6AjGVJ6GgDml+IVp/aWrpJaOumafYverfCTP2hUYq2xMdNysAc8444wTZ0/xXdf2hHZ67pI0pri0e7t2FyJgyJt3q/yja6hlOBkdcHiuXv/AIc6vdajfWo1q6k0+XQmsYnlit0QNltkZWNFIVcq2QB0xkjitn+zNa8Uava3GsaWdKgtLC4t2zOkpmlmCqSmwnCAKfvYJyOBigC1pPjK5vrrSvtujNZWOsKx0+4NwHZjsMiiRNo2FkBYYLdMHBplr44muJrO6fSDHod9efY7a++0Auzliqs0W35UZhgHcTyMgZqlpek6/dyeF7DU9MW0t9AIkluhOjrcukLQp5YB3AHeWO4DGMc1WsvD+urpuj+F5dNCWOmahHcNqXnoUlhilMkYVAd4c4QHIAHJyeKAPRSQBknFfP3i7B+I/iMjB/0mHn/t2hr3fUdMsNXtDa6lZW95bkhjFcRLIhI6HBGK8A8Q6faaZ478QWdhaw2trHcxbIYIwiLmCInAHA5JP41tQ+M4sf8AwGb1jKzRoFGQRgn0rN1pmkmOeg6e9XdLl2QEHuTiqmtMu75RzivOoR5cY9D52/uLUyLRNt5Dzn51/nXpcKZtwT6V5tb/APH3B/10XP516RDJiLYRwRkGuHie79nbzPTwLTk3I5u5Q/aHb1crWReqXANb+oFUeTaASCWwKxbj+P0J+Wu7ATbSkeVWXLNlBCVOAuau28BVTITn0FQwqBwwyRWvZ2+Rl/ujnmuzFVlCJCXNKyLFlblCHl+6eee1WGi3ytK3CL0HakhkMxaPaVA6H1p0sgYrAh/3iK+fqTnKprv+SO2MYqH9blCaOSechc7OORVpLEKuQfrxRNPHBEdoBb0FV/7QkEYYqwzxj0ro/fVIpQ0Rl+7i/e1Zj6qv70j+7V/TmAsIV77eazNRkLzOezc1dsn2W0HcFcV61aDeHin/AFoZJ2VytqgxO/0FZics2PStTU2DzSf7o/nWbEAJD7CuvDfwlcqD0ZtaQmYWyeOBVv50A+XJPWoNFG+0kXGCT1q2JcpvZCD6GvJxEn7aSsK2hXEcjku4Khe3rVF32MWcYZjgA1qszTAKFKjqaoXKoXDMMbTxmtKE7u0kZzSRWiXa4kc8nHB7VogEqI4xlu5FUBG0k6sThM9DW3bRpCjSvjJGeaeLqKKT3Y6ceZj4LdIFBONx9qqTsjSSdA2cVIZJGk3EnaG/pVO6cBHkXqDzj61y0KcnO8nqy5yVrJF2KTbDIpGfmx+BrnLxh5jD3rdEn7iR8crz9a5+5HzHPUZrvwELTkwTu0dQcNo9uej7en41kHI3N79K2gP+JXG2M4j4rHCExsW653Vy4OS9/wBX+Yq61XohFDAhxnOOV+tSgkkBh2yakjjyQfXioZyRLtAIzxXRzc0rGVrIIYvM3N93Y1XVMTRZUjJ44qrFJgnH3e9NCnojYGcjFRUi5vVjTsSsN6jJxtP51z97xO1dBEpztY/j61jajGN78cgmurBu02jWi7TTZnIvO7tilRgPlPegNgAD61Gpy4avV3PQs5XuSHiTr7YoIpW+UbjzSA7mHpk0heZY0w51CInp838jXTQOiwDkENmuascC8jAHZv5GtlCfKC5wCNwPoa8zHQ55L+u5x4iXv38iaD5WOD2FTbyASeAFzVKKQiZx2KVNLueM7WwCuMVyTh72pzp6EqTNtUnpn9KqawRJGrr3zzSrIT+76cdaZegC2jTcCRz+FXSpqNWLHGTMtOF+nWrMacZzVdOp44JxVtFOM56dq9Go7DqMmRQccc1L5XIO7p1FOhUFAccn9KkaI5DBunX3rz51PesZpEewK2ex4qRIztyowo6YqRUG75sEHgU7duIiTgeorGVRvYpRMK9JkuGz6/0pmmrnU4M/3qmv02XTj05qPTj/AMTO3/38V6l74d27fodFN+7Y17rIvJgFyMmsyZDnIH4Vr3CkXsnfLE1SmQtlgMe1cWGnZL0Rzy0kzPKknJXHHWkIweOnrVnHOCtNK+1d6mNTIOccHNIFJPrUmzsOOafHHye9NySRXOkPhjz8x4AHSs65/wBYT6mtRlZYs5x7VlzcSNnmii7ybLw/xXLOnjmTH92vTPhB/wAgrXv+wr/7bwV5lpucsPbFenfCIY0zXx/1Ff8A23grHEfEz0su/wB6l6f5HolFFFc57oVzPjbUNQsbLTY9N1GHTp7y/S2NzPCJI0Uq5OQSOu0AepIHeumrnfGM7jTrTTo7eymbVLtbMfbovNgTKs5ZkyN3CYAyMkjmgDn4z4sbxX/Yg8ZWkhNmbrfHpiEx4dV2uN/Gd2Qc87W9K9CrgfAd1bQDRbWy0vTLNdS0UajciytxERKGjHOD9072xnn5TzXfUAFFFFABRRRQAV4B4u/5KJ4k9ftUP/pNDXv9fPvi9sfEfxGB/wA/MP8A6Tw1tQ+M4swV6DNKzRFjUsPu/MKzdYbZLn1BrUgiLFfTAzWNrOQ4L/3uK48J72I3PnIq9lYq2zbriPj+Mfzr0eBSbJCT8w6V53ajFxEB3cfzr0CCTdaIR2//AFV53Eib9nbuehgGlOXoZs8SGeR2GTyP1rGukAMfoeRW48YcTerE/wA6zbyFmdAv3Vcg0YGrZpN/1Y5MRDqVba3jchmHP1rTjbdH5a8A8VVjtieR0PvVpiFjAT73StcRPne9/wBDOmuVErusUYWP7w4qFmFuCf8Alo9CRLAfPfO4jn603yw7i5k/hzjFc8YxXW6/N9jVtv1HLbrje+OTmo5/KIx6e9NkM87qq42d6imsXQbh1781vTSuueWplJ6e6jE1A4nwOABirtqR9ngHoBWfejM5Q9Rg1btNxjiHcKM17VSP7mJEvgQy7bLyE/3R/OqceN7n2qa6crJLn7oXNV4MFyw6MtbUo2gVCPuNm/oxK2jgdc5zV0sJcMQQOwNVtFI+xSN6GtPylkcf3Rgj618/iqijXm2aRi5RIkG8hEBGetU7y3WW4CY+UDJ/CtclIRhfvN0rKvnMUoA6vxWWFqSlU93QdaCjHUqD57hIk6A1pGNppWXP7tQBiqUbJbDP8RNXppmRQkfUjnNdFdycko/15mVNKzuR3U6rtjT9KyydxmHarzWohQN/ET61QbdvbPQk5/Ot8MoJe6Z1G76lyFg1uR3KjNYNxlpnJPGTWyoAQexFYsoxLJjuc124RWlJlU3qdeEIsIlHQoDWeqAtJu5A6VsOu3SY8fwxA1lLjc+3vg/pXiYSo5KbXd/mb14crj6DYXUIrEfdORVW4kLMGzk81LkSysnuRVRwFzjoBivTpQXNfqcjelh8DBQFP3XqYxmP7pGByKgt8GMRn8KlZpEHOMCrmve0EO87LDIPBH51l37E3Mg7bqv+ZuJ2dRyc+9ZN85N0ceuDW+FhaZtRXNKxVXh39AcCkKYGffNHqTTjkhcfjXonoNtMTOWKGlQKIxgYGKb96TPcdaRcyBiOh6UDa0LmnYN6nuG/ka1lDGEqD0OVrK01Qb+L6MP0Na21vLOOoPFefin+8+79TgxPxjE/1xGeMEU+KRnCgHrnP5VChCy4PVqlUBd2OvUVjJHMNckPkduD/OoLt9yb/oP0qYfMwVupyf0pbpFFmWGflwKqLUZK5UdykgGxTjg4FWI9zHPYdajg+5tPfmnoWyCOnetpu9wluaMIXZkd6eUYkcjHVqgTHUfh9afmQEdMHrXnSi76DTJG4c5PGMCkhBU5X86jbJkXd93gDHrVnzFhj8te1RK6Vlrca1dzC1KUi6frzio9NBOqQg/3s06/Y/aGPbP9KbpZP9qQk92r1rWw79P0OiHwXOgnysshPZjUMibxuXgHrU2RLJMp6lywqEFhnH415FO6XmjCW5TcYPI600KG6DA6VYYn+KkGGHHTvXYpuxkVxGDwBzUiKkbHjkin/KcgdaYQqsSM5IxVczegCEmQkk/L2BrJuFAlb61rPFlQzdQcisi5x55NdOGtd2OjD/ET2OQ0mOuM16d8IjnTNfP/AFFf/beCvMbAndJ/uV6b8If+QXr/AP2Ff/beCoxHxHp5d/vUvT/I9FooornPdCuQ8e6jb2+nRRS2balbxTxyahYxQiV/sxWTDkH7gDJuDccoRkV19cV4q8M6/ql9qz6TcaalvqmlLp0/2sSb0wZvmXbx0m7+lAEngtNCtJprXRfDOpaSHjDvLdWbRhwuAF3sSTjPA9M4rsaw9FTxNHPs1k6QbVYsJ9jEm/fkYzu4xjP6VuUAFFFFABRRRQAV8/eLwP8AhY/iUn/n5h/9J4a+ga+fvFw3fEfxGPW6h/8ASeGtqHxnFj/4DNeAOzKFHGADWXrSAsB7mtyyG2LOMk1ma7GqTHnqv5YryMJV/wBq5TwJRtTUjGtzieLPZ1/nXb+Yy2YWMZY/41w8DA3MYPHzD+ddtHGZbbKE54xiqztRvByNcNzXkl2K0blBMT1Bbj8ap3MrF4lIAyeatyx4llizyR+frVKbLTdM46Vz4dRcub+tjKq2lYeHl3kKuVzwaeIxDmUsSTzg0yOUxlkKZ2ngnvUyRkne5O3rg1c3byX5kxVxiiSRx5gwmN2fenktNMI1H7oDkimh3uCY1UqvZh3FSyEQgRJzIwOMdazk3e1tfy8yktL9AmuI4QqgjNVHubhnIMY2Hoa0I7NAoaUgnryKnK2+zHy8VisRSpuyjzeZr7Kct3Y4q94l3HrVy3A8iIj8aNYRFmYLjrkVLYx5hUnoBX0EqidCMjja05TNvP8AWOPUVVjXB49MVdvYyk8nfBFUhlWJ65rtpO8FY1p/DY6PRGK2TFh8oxz/ADrR8jkYY8c1Q0b5rVwfu5rQjgkjBG9myc5r5vFu1eetncuCuloKqLApyxJxxmsu9lZZCXAz2rYW3K5Z3JwOhrnb6SSSbLoV64H41WASqVG73JrpqKWwlv8APciRz1PStyILFGZJOpGcGsK1ika4RmBVcitrynnlbJKoCAPet8da6TehnRvuiBhJdNyMKvp9ayp9wkcY6NXQySxRAom3dx0rDl2tOxJwA1PBVG29LLoKtFR63ZJEALd2Y8Ag5rJmxvY+tbULBrVlZQAW/MViXGFdlzXfhXecgprVHaCf/QNjf3Bj6YFZjMduMAValJ+yqwHHlKKoPIUQAjmvGwtJK7XVmtabdk+xEuFZjnqciqtwCCAOhJqySPMCn7uOtVpmyAcdK9alfmucosDZQjv2qQzMAA4HXn6VFCvykjrnipd4I+dceuaqaXNsIaSrZ2/jWXdrm4J7AVpuFONpxz+dZtwP3zHNb4fc3oaTKhBKAkc//XpQxO4rye1IxJJAHbFLGuBXaeg9tQB+b8OaXOGIxwKZICiLjk9DT8ZJ96BNK1y3pnOpw/8AAv5GtdlYKxXnBNZWkj/iYRcev8jW2FfaxCk8kfWvLxkrVfkvzZx4jWSKkfJ3Afw5FKf9aT6VKkLJNGMdRgj0pkinzGIHBJrPmTkc1iGNyZOR9aS4fZBg9GakUN5rDbzUV8D5Jzwev0reMU5ocVeSREr5Bx1HSrcJJIwPlPWs5D+7Uj1rStj90Y49a0rKyLqRsywIwxGCcGrKQHOecHrSRRcgg57gVZSNwR1ww/KvJq1baJhCFyIQBYwpPTgGhiqJsBy3vUpjO0KSeOM+tIyqgA4ZqyU7vVl8tjnr1gZ2UdQeaZp4P9qQ+m4VNfKPtEmBzwahsFP9pwHJ++OK9xP9w/T9CqduVo6EELNIB1zxUMoy5ZOT6VIy7ZpCOSTkf4UMm4gg4PcV48Wk7mb10K5WTuvGKYQw5x7H6Vcwy4+UkHrUZ+YZK456e1axqEOJTYqQRnHamg88cnFTlRzuXA9ajIAJ2jqOtdEZLYga+SgLcVi3QJmYjtW0wwuWPasmcDzGwetdeGerOjDO0yWwAYyfSvTPg+SdJ17PX+1f/beCvNNO4Mmf7lel/B/nSde/7Cp/9J4KnEfEz08u/wB6l6f5Ho1FFFcx7oUUUUAFMlV3hdY5PLdlIV8Z2nscHrT6iuElktZUgm8mZkISXbu2MRwcd8HnFAHmt74x1Dw/faoqapdatHZaZcXMqajZLbMsqFQnl4RDIhJOSAwHB3cjOx/aeteF9XtbfWNUOqwXdhcXDZgSIwywhWITYBlCGP3skYHJzVi68F3OuTF/Euqx30a2s9tFFa2n2dVEqhXY5dyWwOOQB6VPp/hS6/tCO813VhqrW9o9pbqLYQhUfbvZ/mO52CqMjA64HNAGTpera/aSeF7/AFPU1u7fXyI5bUQIi2rvC0yeWQNxA2FTuJznPHSq9l4g11tO0fxRLqIex1PUY7dtN8hAkUMspjjKuBvLglCckg8jArZ0nwbc2N1pQvdZa9sdHVhp9ubcIynYY1Mj7jvKoSowF65OTTLXwPNby2dq+rmTQ7G8+2W1j9nAdXDFlVpd3zIrHIG0HgZJxQB0uo6jBpdobm4S5eMELi2tpJ35/wBmNS344rwTXL2LUvHWvXcCTrHJdxbRPA8LjFvEOUcBh07j3r6GrwTxR/yUfxH6C6h/9Joa0pbnBmX+7s6CyCjG7GAqkflWHrhPnkseCPyrStXaVFweBwc+lZGtuJZiD6V4+BptYttniVZp0kvMzLdd1yh77h/Ou9sWWKAA9f8A69cFbf8AHzGR/fFdkGOwFTxxWueQ9oowZeGnyTch06gyPJty29gD7ZqhcFUnCgfjWnA4d3B/zmsyX550Hrk152Fb5nF9EOva3MuoqMhPzD5qkUtNlMEL0yabFDHIQ569OtWCS0eyLhugJrWpJJ2X/DGcItq7/wCHELRwx/uxllGML1p0UKqguJsFxnGeozTobVIW8xvvHrz3psoe4kA/5Z9wa5uZSdovTq/0NuVpXa16IryzTSvhNyjNRNFOCT5nHpVqe5itwR6elZ8uqqAOG+b2rtoRqSS9nDQ56jin7z1MjVJC0+e4HNXdOmAgiUrkMDzWbqDAys/rzVyyJS0gA6CvaqwX1eMTJv3Ex+pIPNmYegNYTEiQenTFbMrmT7QCf4R/Oslv9aF7Gt8InGHK+n+RdF6s6nQI99mc9CavvM0BVSCxJPI7VQ0NmXTpQD838NTteo5LkH0r57EU5TxM9Lq/9f15GqmowWtn/wAEtbnlfk4UdQe9Z1zAtxfE8eWgxinSaiGUqmQSO4qqsshhKof3jHk1eHoVIPm26f8ABM6lWMtNx08gjcLGv3T2q1cXywxYTlsc4PeodiRRlj941mxuu8u/rn8a6o0YVdWtvxMeeUdupejV9xlkfOR3rNZi7OQf4j/OppLxpBtU9DUKgBmPqa66UJRu5ENmhHgRYPZRWFcsGmY9611kDQHHXBFYcwyXz97HNbYSNpSbNqOsjszGxsovm4CjP5VnSqdzZ5A5FbCIDpyg9CgP/jorLbkKB06GvEws23L1KrxtYqTH5Qw7cVDMMfQjFTbcYU9MGoyC6bj2PFerB2OUSJSsYbOcVP8Au3UEjBPHNVraQlQw+6c5FWWRGBI60VNJajIWVV6f59Kyrkkyv+VacpCAepIWs25GJW+tdWH3ubYf4iBACQ3TmmtlWfHrTh97B6bqHPzyL3rr6nctxDzj3pSDs689M0AYOTTQ+VFAWuXtHy2pQ/U/yNdTDtSHkhvmIPtzXKaQ+3UEb0z/ACNdFEyeSeDmRi1eNmcHKovl+phVdpkh2iZm/KqsjoeFx0JHPSopZzvIXoTiqofavuamlh3uzklK5YGDKpHfrUWoR/uN3cmo45CCT6H+lTXkoe3j56AZrdRlGpGwo6MylyqDjPOK0bSUbEUjr3qvBGWXPbNOKiHIX04rpq2n7ppOSk7GxGwVsjovSpBdsMLg/MevpWNHdsuFz0qZb3ccc1588I76q5PNJGszmQ8HGB+tPj2BdzkFqy0u2LYB56mpA+QS5rCWGaVtilU1uUb3H2xyOlQWZxqkJ7FxTpnIlJNMtj/xM4P97Neslak15foOn19DpHXEj4Hfg0xhuIKnacdKswsJRIvdDgVAyh3LrwxHevAhLWz6FyjpdDcsByc1GWyM4/CnHzAckjGOajJY89/6VvFGbYhIdSGXjpVdvvbVGOOtWWYbfmBI6VDJuJwnBx3rem7GbInXC/Mc8VjTHEjCtmVcR5blsdqxZ8CYY969HC9TbD/EWLDlpPpXpnwgx/ZWvY6f2r/7bwV5rp/3pcf3a9J+D3/II13/ALCn/tvBU1/ifyPTy7/epen+R6NRRRXOe6FFFFABRRRQAUUUUAFFFFABXgnin/koniXPT7TD/wCk8Ne914F4qYD4jeJQf+fmH/0mhrSl8RwZmv8AZ38i3bTlFwv8VUNXZTKcHjGKLa5EZ+bGe1U745kOSeazoUOWvzHzkXe0WR27YmT03D+ddVbTEQuGrj4GCyRjPRh/Ot6K4w7tnjHSnmFH2ljSUvZzubMUgQRg/eI/Wo5lHmjH8RyPpWcLxAodmxtbNSLd7zCWIDDPH5V5P1WcZcyH7ZNWZcjg3uWXO0+9aA8uCAt3ArIW7eKUqFBXJ5/CrCNj53Y7Tyc1zV6M5W5np+ZtSqRjstSzCkk7ESfcIyMU2/uFhQRx/exxUEupKVEcJBb29Kps4jDSyNyecHtRSws5TUpqy6IJ1oqPLHfuVnhZ23SHk9eaRoowOpqnPemVjs6dOKrs0mO/519BChNpXdjh5SK4YNI35flVyFsW0Hrisud8yA9MdatrJiKH0IrsnD3UjepTagiZnws30H86pAgyk+1SNJ+7k/D+dUxIBIwzxirpwtcqjSbTOlsLgw2eewANMeZHOATiqcEwWEemMGpNqtjBrz3RipuT6nLJvZkjSZIVepqZJBACD941DuSMZyCe1Rxyku5kGFBG0+vFDhzLbQS7luNHk+eTt0xVGSEySFuwqSS7Z2KqBt7EUye4CjauCcYqqcJxfqPW+hXVxGzYqZW4GfrUBjIwT360JJlj6V1SjfYtxvqi8ThQvsaxpmHmSH8DWqJQ0eOM7axbg4MhHdqMNHVm2EjeTR3UUn+h7R12L+qgVnz/ALlAe+cVLbzAICTxsXP5CqtxOrHsRmvFoUnGbM6k7pETv8x9hTScQHPXrVeZzyV5J6U+SQP0PBFekoaIxt1JLc4AB6GpXiAXKmq8LKY9pOCaDvRcDJ71MotyAWUARknp/F+FZl1kTsT0FaDSNtyBkkdKzbt8l66qCaZ0YZe+RoRn8aSRj5hI9eah8wLxTXcmQEdDwa6+XU9JUm5XJg2ULe9NPyPmoSfvAHqfyp2e2e4qrF+zsaGmn/iYKPY/yNbyMDbY7jpXO6e4F+jZ65/lWoZNynB6GvOxVPmmvl+p5mL0mvQcJQHAHTcc1DvAHPemg/MeajDgL9KpU0c6jcn4MmPakuE4Ve2MVEsoYg+9SXMnzrj0o5WpIai1JFiJ1jjVT1NRuqltpzkiouCn3vvU7duGT94DFRyWdybDBAQxx0oCMG+tOErA4xS+aCe3PStLyKbkKm9fTNO3Fj8x5pnmHgY571HgsMkkGp5b6sm19xHYhs9qLRh/aEfs2ajZ+cdvWkhkC3cWO7Vq43g15HRGLs/Q6W0l/eSP6HipCUmfeCelZcU5R2x61ZWRXwwbBHYV41Sg1LmMlPSxYKsOlIFbuBmoTKyYI5oFye4GetTyStoHMiQhgDuAxUb7j0xSC6IOWA20x595woGKuMJJ6oltEcigA561i3AAc49a2W2opy3PvWJcH94w969PCdTXDK8izYN/rMdcV6X8IP8AkFa9/wBhX/23gry/TpMmX2XivUfhD/yCte/7Cv8A7bwUsQrSZ6mXx5cXJeX+R6LRRRXMe4FFFFABRRRQAUUUUAFFFFABXzz4yl2fEnxIP+niH/0nhr6Gr508bIzfE3xHjp58I/8AJeKujDJOepz4qKdJpkSyjOSelUb67O9fUn1qykR2Ybn1qlewEuCewNddNR5jwMNGn7XUjhuQZR838Y71tR3A559utc7DAVlQ991aixsMnOBVVoRZtjaVO6sy/FcKVcNggnvTln2Srk5A6VVSAkEdutHltvAJziudwg2zznCF9zTjvlK/NjI96DqDS5jHA9c1nBCGOQeuKmjgdvuggnuRWEqFJaslq3UupMkR3ZBbGKp3N000nUhemM1I0BxkkEiqxhYsNvfJzTpwhfm6hC3UaZY4/QU1rpdvaopYBkbyDUTxqB2rrjCLOmFOm7XZXuJcuMdzVkT5ghHtVCRfm6d6sCNvs0ePSt5RVkehUpw5Yoc1wDFLg1UM4wD1NKyFY5fc/wCFVRG4Ld+aqMUdNGjCz/rsblrKDHktweMVN5jDGGNUrOE+Vk/dz0q35EhOQ1c81G55FaMFUeo9WIOWfI96Hn804VsYprRELhjmokt2ySvA71KUdzNRg9Wyfz1QcYJoVl+8xH41BIgQE4yakW3cj5jx6UWigcIJXuE12MYH86gE4U4zSyxLjgVUaJ92c9K0hGNjppUqbVjRFwF+lZU1wHl255JHGatpGxiJPpWYYv3qSY5yM1dOKTZ04OlTUpM6kXg8sDPYDrTHmzk/pVEIwIB9MiplRy2M8YxXL7KMdUeXKlFa3HGYEfTNMM/Ax07UfZzu47ij7PlRgcY4q1yDXs0OVzyQ1OFy2MFTSxWxI9DmpTZuq8881nKUL2ZDcLkDTEr12571m3koDtzWsbQsqgjgdqzL63AdyV6DFbUZQb0OrByp+0M03KsDyPY5pn2oBQC3zAnvUclsyEY6Z6Yqu1swd2JzzXXofSQpUmtzQWdVAJYc+9NFzgHnJ64qm0ErYG/p04pkkEyOh3cE4PHaiw1QpvqbWmzhrpWznAP8q0muCp3AZHpWFo0TreyIxyO35VtGFyCAawqJc2p4uPp041/khRcZyQaY0gLZDcelMEBClc1Ayt0WkoroYwpQb0ZOs20Yz71JNcAFCT2FVfKJHrRcRn5cjO2q5U2X7KnKaLInIQsrbsHIGalafPzA/hVWGA+XxwccU5I22hc1LjEynCnfQmW5B6j61IJ0IBBHtVcWrYPPWgWxU4AwO1TywIcKL2ZYNwB0xmo2uC3TioTCx6cUvlMo601GI1TpoGuEyRkVHbzZukz2bioHgbcx7ZogRvtSLWnKrM7FRp8js+hrG4Idj71KLjP3Xx64NUQjFyD1NPEbR8YyT1Nc7hE82VKBdN0yjOSfbNJ9sPU9frVYhiKZsPHPtUqlAhUodS2L3cMEdfej7Zzhf51U8pyOG5zmlEZU/Wn7KBTpUyw0+c5asm8uQjt8wyc4GauSIx4GayL2LM3I5GQDW1KCTO7AUYOerL2nThvOCt2Az+det/Bs7tG105z/AMTU/wDpPDXjmkwYWYjpxx+dewfBYFdC1wHr/ap/9EQ1hi0rHfRhGOLny9v8j0uiiiuA9AKKKKACorm3iu7Wa2mUtFMjRuAxUlSMHkcjj0qWoby0hv7KezuFLQTxtFIqsVJVhggEEEcHqDmgDhdDsbKPxLeal4VsY7bSLSzlt5WgG2K+udykbVHDbNrAv3LkZODWTo1la2GleA9esiTq+qzxLfXG4l7sSwO8ok/vYYZGfu7eMV3ejeEtJ0B4zpwvo1jj8tIpNRuJY1X0CO5UdPTiiw8I6Fpmp/2haWAjuFLlMyuyRF/vGNCSqZ77QM0Aefw2sK+FrDxYAf8AhI5dcRHuNx8x9155LQH/AGBGSu3oNuetGr2sM/h3xh4nlBOu6dqMy2dzuO+DyioijT0VhjK/xbznOa9AXwjoSax/aq2IF35pnH71/LEpGDII87A/+1jPvRdeEdCvNW/tO4sQ9yXSRv3rhJHTG1njB2MwwMEgkYHpQBoajJqEVmW0y1tri5yMR3Nw0KY7/MqOf0rwHXvtk3jvxA+oQQQXRuYt8cExlRf9Hixhiqk8Y7D+tfRNeB+KFz8R/Evr9ph/9J4a1ou0rnDmLth2ySLTmmXCj361n6laBZWUDheK6y2ZII9x6soH6ViavCUfbj5mBY152Exs54jle39XPn5U/ZxU09Tmo4MOpP8AeFaSwByQe9QxDM0YP94fzrVMQVyR/DzXrV61mkTWqSk0xi24Uc9MUfZQ1104bir0EfmyA9h0q2LLy5M87zzj3NeVPGcjs3qTGk5K6KMVpE2Cw56danEWU2qPYVf/ALLWVt2WGeeDWjFbRQQ59B3rysRmcI2s7vsdtHAzlvojAh0eQyHzMbD6Gq95BHbKUUHiugM/m/LHgnP6VDNpi8Ty5yMnHailmM1UTru3kE8InH91r5nGNaPMAcfSmNp0mMsB6jmujunjjkKDr9KoNcq25R1HXivoKWMqzV0tDjcpQ0TObmh56dDirghzbQt6inXygSEj+IVciixYW3+7XdOtaMWa1KrdNMx5ogI5PTioEjxIo7GtOeLCSj6fzqogPmgEcCt4TujelWbg/wCuhfs4PMgKn1xU/wBmlXAGPerGnWzSWzFRyeat/OAdyjOa8uriWqjSOGV22zNgspW/12OPQ0kyEHbH16VqLHNIcBRg9aR4Y7UnJOW9azWKfNrv2E03qZsdiSMvj86ZIrSNtUcdK0R5s2AgBBq6YYrePnrSli3B+9q+w1Fy1Of+wMqMzj361S2Zcr+NbksxlfYoGDxVNoAJSO1dlKvL7Y1VabK4h/cZ7dayZE2sxPQc10qx5iIA6Cse4jIdiBxg1vQq3bRvhazUnfqab2pMSMB/CKdHbbmwB0rXtY1eLnsi/wAhUeARkdd2K8r623ePY53B2KCwbCWYfx44qVrPYWA6Cr5UbOnJJNSqmYWLfeNYSxctylSuVYdO822RwOme9PeFo1ywzt5OPSta1R0tYzgYxz+dXI2iYDPfjpXk1synGT0uk2d1PBRklrZ2MB7MMBIo+ua57Urf/SHGO9d3cRDeCtcprKhZ5W7cg16OU411Z/IxxFH2Mk0cvLF83Hc1B5K4baOSSf1q+4yQKh8rk+5NfVRkdlKs7FTygflPbmnSRgqcjpVlYwRigqCdp6mq5jX27uGmQg6inHUEfpWx5BXJA4BNVdJjzqkXpz/I10ojATcBwCQfzry8ZifZ1EvL/M83Ft1J38jnTAyyjd0NR+SuQ2O2K25YN0mccbs1RZAEB7VVPEcxze0kikINufrS3EYUgnpVtkY8gfxAU67gDYHtmtFW95XGqr5k2U4k3RAjtUqW/mHjqKlRfLiBP1NXFtvmEqc8VnUrcpLm23YoiJ1IU9utKRzjHWtA8cEDPemFoycf0rFV2+hDM1kY/d/GmmBm9MVoHBOFH1ppikIyq1qqxSm1sZTLtfaaZbwj7aox1OatyDbOVPrS28Y+3RY/vV0upaLfkdEarUWu6Jo7bfI4x0bA+tSGHbww5rSt7Yl5TjvlfensoXiQYOK8uWL96yMHFtXZjmFDxg0026gY7VqGONjik+zpjGT6VaxNibSM9bRe3X60v2VVPP8AOtJbRCMAnrmpBaRq5ck5xioljF3K5ZMx3jHIArEvoizsVrrpVXBC1g3q4ZuOAea7cHX5mbYabp1CHRoCzzg9Mf416j8HRjR9dx/0FT/6Tw1574ej86WXH90f1r0X4Rgrpmvg9f7V/wDbeCpxFS9WUe1j1cE28XNvt/keiUUUVgeyFFFFABRRRQAUUUUAFFFFABXg/iUZ+I3iU+l1D/6TQ17xXgfih8fEbxKucZuYef8At3hq6auzz8zV8O/kbdg6yRfPj5TWZrFwGO8AE4wKVPMMqFJCq45A71T1aRfNUAjGDXm4bDJYnm7/AIHgyqOVNRM5GxcxY/vD+da5O4lR1OR+uaxEYefHn+8P51qLIBITnua9XEwu0ZVNLF+CQK684VWya2Yj5ssZI7k/yrnopFEOeCcZ+ta1ncHz4VzndwfbNeFjqLacl0udGGqJOz8jVEcqSFVUlexqdbUqg3M2OpzUcd4AMMOR6moZb+STdGqMvYNXzbhiJyslbzPaU6EFdu/kSvNbWqmR2VQOM4pl3cfaogsOG+lZ5t2lKiaTcgXlT3PrVtJ7e2GCyA/Wup4aEGpxvKa+4wVeU04u0YszpdPDS72Y5Pag2MIB6Z+lQ3N9NI5KI2AM8d6rGe5IDYcZ7V7dOliHFc0rHlynSTdlczdThUSFc9KtRLm3t17betUb6Rnk3c5PUelX7T57eEZ6LmvXqKSoxv8A1oYfZM+c5M69hiqS8P8AStG8TZLMvcgH9aywdrnJz3rtoO8Lo2orRo6XSJdtoBjJ6/hV8ywk5JGDwKz9IKJabmA9PwrQ2QtjG3HavAxSj7aTaYRvbQcLiMqfLwT7VGtv5jZk4we9P/cQrkBc1QuLySQYTK89jWdKnKT/AHenmwnJL4i/5sEThRtzmq/2N7hmZnYAnOKrRLl1Ltk571PJqBBCRIWB7g1p7KcHalq+5HPGXxbE8lvFEmeM/SsZ/lLgnoT/ADq2vnysHctt9DVS4x5pGep5rqw0HFtSd2Z1GnsrFhDi04+8ByKxJzhmHXrWp5m2NsHtisV3/ekN2A59a9DCwacmaUFzO/Y6jJiQtuI3RqcfgKiUnuf9qmzSBkjwf4R/Ko2fMJIPNcEIO2vUyb1LouNzFTgYFSxyksQeOoFZvnL9/jOM1I10BIpXke1Zyw99EilUaOos7mPylUkYNTOsZyQ2PTFZVnJGYQMgEdKJTKGLLMcelfOzwd6zs7HrRxVqaurlm4l2YQngd65bUZC882em81qz3DFBkksvX/arn72U+dJ9a9/K8LyO5wVqjqysii/36ZjkfXNI74cGo3YtnBxg19GkdMYMUMQwwM9aQsdobHzYzimocHBPIpVYMc9hxV2NuWxpaMc6lATx1/ka7KAR/ZtjEZYk/rXC6ZKRqKcEbQf5GurgYm3z5nOOPavAzei5TTvbb9TlqS5KlrdBb6VInI4CjvWYSjfLxwM067kyoDHJzk1VZwpyD2Iq8NR5YLucM5c0rkwcLkH+9/SnXo4TB+8oqm8mR79akkl3ojbs4UDFdHs2pJitYsQFDJ8+MElcGrnzwy7VXKYzms23IaM5ODnINW0ujjawz2zWFam29ATtoWDLGSOme9RsYs9uuKZ5kZPAGaXMZ7jNYqHL3G3ceCgPAGe9NZ2x8iZFODx9eDmk85VHyrke1CTvsBlT/wDHw2akt0xdxc980TbWnJwKbFJtuYx716OrhZdgT7HT28X7nOeaikYY+cAECktZiYlAPK9felkOWO5OvrXzvK1UfMdLacVYgPll8ZAOMil2pgfP14H1p5Ee7O0A4oCxkA8Y7fWt+b1M7DQgbgOc5qYIoJJbPHSmKFJ4IHvU6mMDlgayqSZpGKK8i5B2qCPWub1AkSOoXOSa6t5E2YVR+FctqKkXbenNejlkm5u6FZKaLvhZAtxcjPIj/wAa7/4TnNh4gz/0Ff8A23grzvQZ/KmuWxjjH869C+Ebb9N19vXVf/beCtakX9ZnJ+R6eAleu11s/wBD0Oiiime0FFFFABRRTZJFijaRzhEBZj6AUAOorJ0nxNpGuQ3c1hdF47Rtk5lieLyztDc7wONpBz0waZo/ivRdeuGt9OvDLKI/NCvC8e+PON6b1G9c/wAS5HI5oA2aKxNP8XaFquo/YLK/Ek7b/LzE6pLtOG8tyAsmO+0nFEXi7QptY/spL8G681oB+6cRtKoyYxJjYXGDlQc8HigDbr588YSBPiR4j/6+Yf8A0nhr6Dr5y8b5/wCFn+IgO88P/pPFW+GV5nNi4KdJpls3xjIAzk4FZuo3BABOSQDT0B3ZP3sCq16Cw/CtqVKEZ6HzVCK9okyCGYtJGC3zBh/OtYPubr0zXNRhheIR1JANays4ckY5610VaaZ2YzDpNWfQ0FnwoA9cfhV+wumW4BLZweKxIo3bkVasw/nbh6ZFcVejCUGjz5RUHdPY69LmGUbyRk9eabcahFGhxyR6GudRQWJ9aRlZmP6V4yyynzavQ0+tztZIvS6nK52ISCec0IrSsGmYMKoJuDkKPmFDyXAAyRjvXb9XjH3adkYc7bvI3lkhSPORx71C15Fgj+tYYDsoK/d75pPLIU1nHAQTu5Fus9hLxkEjY781LZXACRg9AMVm3BYtg9qICw2j+HHFeo6KdPlZXs/cvc0L+RXmlYf3AKw2k/e8HtU907AsfUYrJJdbnPbvXRh6SjGx6GDw/MnJs7TSWD2g3HjAq5sI+6RWTpyMbJMdMgVoBZj0xivGxEP3kmmeZJ2k0SuoxlyDis+e6WNztB/CrEkbsCGxzWZJHtlYDtWmGpxe7uTo3qSrcSGRWycelakIhhQZwPxrHRmJ+WpfJmcZlx+BrWtSUtL2DZmpJfxr8oB596xrmUGZ8HkmnSx4IPcVl3RdZS3pWmFw0Iv3TajD2srNl4yExsM9s1lzTDzgp71PGZPJ3PjOOcVmShzcr0wDXdTgk2ehhMPHmkmzpJLjiMZ7Cm/asZA6VUaN2KZ7Cm+WwrFUo2OFUYW3LYnB4z2p5mAwo6Vm7Shyeg4p4aQ4zim6SKeHW6Z0EMx8hSrYYUNezrwzk85rNiaRY+1OLzHHIxXE8NHmd7HLZrS5be63jOcN61j3s3zyc9Tmp2yWIrOvUyDn6110KUYvQ6sLTTqasg847irHOTxTVmwCCec1VkDCQelRSK+Ce1d3Kj6COHiy/wCapO7+LFKr4IAPHQ1nDzNrEY3YoBlDDGOaOUv6srWubmnyj+0F57H+Vbgnl2fI+AOK5PTBJ9sLHG7Fb0bSBccVx4iknK54eY0eWro+iJJZic7jk1AZs4/OoZtwZj371CQ+4kY6VUaasZU6EWr3LRnHmAEHkYpfN249BVPec4PWnXAZVB7Gq5FsaewjdRNGJi0XynBBqZZgOG71n224ooHXAzVry25z6VhOCvZnHUgoyaZcUx9RTvkBByPzqlGnGADUgjPftWDpq+5i0kWgYx3GPrSiWNVwB+tUmi3AHnHUU1F25AzxS9kn1HZWHSsgdumapxz/AOlxjvnrUd1xKx71Cud8ZHUmu2FNcp3UqC5Lvqjoobto3DqTgE5A71qi8ilQE8fU1zUBden3iKt7GY/rXmYjCQk7s5OZw0RrGWEnt+dKrRYwMYHvWUYmxSBJBuA71j9VjbSQvaM2kaHkAjnnrUcjwrjJHHTmsfa6HcKicPIeeoNOOCV78w/aX0NhryJBxz9DWHqNyrTM1DRlQ3vyay7pWlfd2U13YXCwhLmTN8PBVJ6vQ0dLnBeb/dr0z4PndpOvH/qKn/0nhryKw3IZvQ16x8FyToWuE/8AQVP/AKTw1eJgk3L0PVwdJQxU2u3+R6XRRRXGeuFFFFABSMyopZiAqjJJ7ClooA8lOs6b4iHxH03RdUtLq+1GP/Q4oJlZpwLKNTtweeQV+taZ1G08Va/og8OyhzZabdC4ZAR9m8xEVI3/ALrbhnb1GwmvR6KAPLNGv7TU7PwDo1gCNT0qSNr6AKQ9msdtJHIJP7uWYKM/ezmq9ncwv4Z0XwopP/CQ22tRvNbbT5kQS6MrzH/YKAkN0O4DvXrdFAFTUY9QltCumXNtbXORiS5gaZMdxtDof1r5+8RRXcfjrxANRngnuxcxb5IITEjfuIsYUsxHGP4j/Svoyvn/AMWru+I/iMf9PUP/AKTQ1tQdpnFj3ag2NFuoXLcZANZ17EcYI6iuptLETwKW7KO3tWZq1uA5IHGcdK58NjIyrcl9T5yMZU7TZz0UH7xMjnNaEdvhjn14qJFHnov+0K1YoSZsEHqf5V3V63Kh1qspNFeCMhtuOO9T2sTC8UquQAM/TvVmKNdhZfmO7afb1q3YW4Go4P3SuK82vilGMm+xnCDlJLuP/sWOUs29huHarcOlRRADJO0Y5q0kEqEqAxAPBqQ2ckgKlmXI6183Vx9R6OpoerDCx6Q1M6aKFCcAZzjpWZNCzTxx7R82T+VdTbaUqKgdy5CgZI61bNnbphiqZHQkVnHOqdB2jeRr/ZVSqru0TjZdKldflUj6VA2lXCxMCh/Ouvmlt0fZlc+lVZ5oihA259K7KGb4iVvd0OapgKUL+8cLdWzqfmGGHGKfDB8sefStO/UySOVjzinW9n50UQ+78uenSvo3i7UlKWh53vP3Ec9cxHDZHIqkIssQBkiumu7BtzkA4x1x1rH8nZKciu2hiIzjdM6aNdwTizptF08XWmpuyCMHir50qTkAHGc9ak0KNl0qMouTtFaglYDlMH0r4nG4+vHETUHpc7aOFpTpqUt2Y50dmVg24ZHrWe1hHDKykk4HeunaSVuFi61i6hYTPLj5lBGSa0wOOqzm41ZJGWKwsIRvTTZR2wpgADJOOlMayllyVH61dttPxIm5y3PcVtuII0/h/KunEZh7CSVP3mzGjhHUTctEjj20u4JBdCFHPWsi5gAZt397Fd3LPE6FV2kmuV1CL52+XGXr08ux1SrJqorGVSCoyXK7mf5eV9qoSQAtnvitiKM7HGOlUnQq3I7V7NOpq0OhWcW7F4w/d4/gFRtDgKPWrzRlTECPvIKaY2dmUrjnArjjWOW7RUe2yG4ojticDHtV1oSihMknNS28Q8w56f41LxDUbjTlsM/s5jGGUEnHSoWsbgDOzknnmuhjtZdisiFhjNDRTYb9weMY968tZk72umb/AFd2vqc4bcsCCMOvUVlXkeS478iuylto92CQr9cY6iuev7cLM5HTpXpYLFqpIUL0Z3Zzzwk9R9KRYBtwfWrxQmVRt4prxlZWAGcmvY5z01iW1YofZ1iQk+pNOjtVCDr1zVySPO7jORTgnA+lHPoU8VJrci0+D/Tk45wR/Otz7JLt3IucmqWmx7tQj47kD8q6iK3lRVAiLKckt6c15eOxXs5r+u55+JcqtS/kc7NZvGvK8mq7R8j1xW9eKxYbkx2rKdM5zwcc+1XQrucbs51NxdiqsPcjmpriH92mR1UVKsfIGe1S3NuwEZ5xhf5Vo6vvLUftG3cqW0DGNSoyRW3axblBZR71Ss43UBgpKnrXUWCRGBdwXLe1eXmWLdON7XLpwdapqyklvCccDn2p5tIiD/hWx9niPTH5UotY/UflXzssyW92egsC/IxGt4YyoP8AFwOKgljiUEADP0rpPskXfB9Miop7eAKchfyqqWaRcktWKeAkk3ocDeRqJWB9aqQoPNT03Yror23RpZOAOfSs42oW4jC19fQxKlT+R5qm4pxLVtZiQ7TkEGtVdLdUG0Z+tW9Os1deeD9KvCN4sqFLY7181i8zl7Rxg9jsoYO8eaXUxPsU4xmMe9BtJsDEY68/StzDkj931puJMA+V/Fj8K51mU+yNPqUfMxTZSd4xioHsJznbGOtdHtbnMdJtbOBHTjmk10QngYvqzlZ9Ol2HcuKw7uFoyxx1Nd7PbySxE7CDXMajYucggjJz0r3MtzD2jtJo5Z03Qkn0MfTYi32jI5C5/nXp/wAHFK6NroPX+1T/AOk8NcFpVttkuAe6f416F8JBt07Xx/1Ff/beCu+tV5puK8vyPRy+fPiZNdV/keh0UUVie2FFFFABRRRQAUUUUAFFFFABXgviRPM+JniJfW5i/wDSaGveq8K17A+J3iI9/tUX/pNDVRdrtdmcGZf7uzpLWyc2sJj4+XBwKz9V0py2M8AZPHWug0mfy7Rd/OQKr6rdLnIU88V8Nh8ZiI4xxj3ZFbC0HhFNvXQ4SSyaG5jJ/vjt71oISJ3+U8cii7m33ka4OMg/rSmdUuHHpX18qk6kVzLWx8899yaRo7e1R0jyXPIHvzVnTwZLtiD90bqo5Z19gc/hVzSZGiuZDglBgAD8a4cRFqhO25vRadWN9jcjujgZjOaQ3rlsCJqXfGTkiniaNecGvlnGF78h7ylK1uccJpmAADCo5bW7nZStwUAPIx1qb7WijO01XbWFRwvlyflUU4V+a9Kmvw/U0nKja1WbJ10sFgzkFvUipTpMbL0XP0qD+0nboGH4Ug1Z0faVc59BU8mOfUtTwK3RkajZfZ5cAZyccCjSbfesKH720Zqxe3QnlxsbOM8iptN2Rys5BGwYFezOvWWD9/4rHkxpUnivd+G5Q1eMRrtRevpXKXKhHbPWurv7lXlHXqc1zN0m+VnPUZH4V7uTuUaaUzgxTj7VtbHX6BKE02PIzwK0jJGXzgVl6EiDTIyw6qK0wkJ9Pzr5LMFBYqo9dz3cK5+witCRJUB+7VTUHLttRDkjqKshYR/+umSvGrAgEnpxXLRajUUops2q3lT5W0jMgtJzcRsXIUHkYrQXTWK4kcN9RQ1xtwArZPtR/aTOMqrj8K661XFVWnBWOelTw1O6m7inTIlXIC5+lczqdntZySOG4ro1upnJ64+lYWomSS7lXBwMEce1ehlEsRGs1Ulc48wVF006cbGVs8uPOMkkA1myfMScdq1HkaNZNyk49qzGOccHpX1+Hvq2ePHRnQ3cIW3t5AOcKKjKKz5GMg1LdzfuII14bYKpLK+48/WvLoxnKnd+ZpUcVLQknXeHZT34qxBDlge2RVRGwAPerO+QJtjbB4Aqqily8qZMWr3Z1NqBFEFIzUjeV3ApbYo1vGJPvYwakNvAwOQOfevgqlRKo3K+/Q+vp026a5bFaWyglHmBFLAYrldW0x1lYA9XLdO3pXYHEG7H3fQVhapdK1wyDORgk/jXsZNia8a1ou6PNzKlS9nfaRx8sPluMjvVd0O9j74rWuE8ydvTcMVRnTEhA9TX3lGrzJXPDhNp2KqrhgO/emnAf2qdVwxz1NR7eWU10Jmyldl3SgDqUAGOp/ka7yEILZF2g8c1wmkIBqtufr/I16FaRRm2XfjceetfIcS1FGcG/wCtz0MvhzydjD1REG0bBznmsB4wWJx1Fddq0UQRRjv61y74R2GOgroyivz0U0cGOp8lVoqgD7Qq44xirt2AY41HXj+VUju2pz8+ev41fETNIGPpn8K9Sq7NSb2ORbWJ7WAxwD5CQRnpW1axIsKA4DdRS6eiNZRBh1UGrLQRkggcgYHNfKYzG+0m4PTU9nDYbliprUaEOfv04I3H7yk8ojuKTa4/iFcDd9mdiVt0TBDnl/pTxCjjDEGq4D55YYqVFbuwrCaa1UjaDT3iY19aIbhwMDmsmS2Md5G2cqW21001sJJWc4yazb21eMK6kYDZr6LA43RQb1tY8XFYZpuaWhsW8G2AFSAwPNOLMOoJoj3mFWQgZXmhi4718+25TbbuewkoxVkAlOR8hpPtHC/uzycfSmlmFJvNV7NPoTztdR/n5B/dng4o875iNh4Gc1HuNLuY9Kfs49he0l3JPMLr9081z2p5ExTaeK6BGYdTWXqMZkdnruy2Sp19tDlxyc6V+pi6XGXnkzx8uTXb/CgYsfEAxj/iaf8AttBXI2amO4m56xnFdj8LsfZfEOP+goP/AEmgr6eE+atLtZGeUq1T5P8ANHe0UUV0HvBRRRQAUUUUAFFFFABRRXN6v40sdIvbq3ay1C6WyjWW+mtYg6WqMMgvlgTwN2FDEDnFAHSV4N4lUj4keI3HX7VCP/JeGvd45EmiSSNg6OAyspyCD0IrwnxNIq/ETxID/wA/UJ/8loauF76Hn5pf6u7eR2VgEeziBJ4FOvIothbnmua/tAwoqhuoGOaiu9TZ7Xyy3uDmvj1k9eVfnUtGzl/tKkqPI462Ll5DGJPfg1kMgNy7856GoXvWmbJPIwP1pzSbdxHUjNfR0MPOjHlk9Tx6tRTldKxat5Mxy56NjFa+itGxmHq1c5HNshGPXJp8d+IiNrYbjis8XgpVoShHS5dCv7Oak1ex2UlrGR1P50qW8Y7n865oa0Sg57e9A1dz0PP1NeL/AGTi7Wcj0fr9BO/KdYIowO9J+6U1yL6uxThufqaqPqT787z+ZpQ4fry+KZbzWC+GB3gkiFPWWI1wa6kwQnecD3NTLqTbVO88j1qJ8OT/AJio5yl9k7ZoYn+aqRRBM+P84rBh1kpGFLckE9TTBqe1/MY46/lUUsnxEG036Dq5lRnZqI+5QF2dvvZ5rLmUG4Ze2KddXym4IDZDHNUZZwGZyeCMGvqcLh5xSv2PEqPmlojtNHhSWwTOeOBV9bGL1PXPWuWsNTMFsqLggYq6utnIHHIr5rF5bi3WnKm9Gz18PjMPGnGM1qjd+xRjueuetS21pCjyNzlyCefbFc6dbJ9OaRdYPAz1OOtcssqxsotOR0RzDCxkmonYCOEDNVVaIZ9q5h9YbYx3fd9zWa+puXBLkfiaKHD1ed+aRdXOqatywO6E0QNY988S3ZasL+0Wz8zEcetU7y+PmLlj045rvweRTpVL83Q4sTmntocqial9JFuNZUoQD61TeYu6/MTk1JJMuSM8A4r6OjhXSSje55NSTm72L0zGURTDpjaKQfMhI+9mp5pIzYAKRlcdqz2nCspB4IrOknONkthS0ZPGSQwboeBU9qwMuT905qpFcKYgDgENUMN6sZQZ53ce9XKjKakrBF2Z3xtY5RFJk5UZHPtipBbhRwT+dc2msukYVsA4yKc2vFSFbAJGR1r5GWU4xuyeh78cww27WpuTHysA9DwPrXPago8zf2O4/pUia3vdvNChV5zWZfXyGbYG7mvTy7AVqVS0kcOMxMKsfdGhDuLHpuGKrSR5nkPvxThdq+1QeQeaSSRd5APQ5NfQQjOL1PMZVMZOfXNMaIvIR/DyGqzHKgPJ680gkCkZxya6FOXYpSaY7SkCahER0UH+RrvIYVlSOTJzsx1rg7eVYroHNbsGs+XCACCFODXhZ1hK2IcZU9zuweJhTb9p1Nu+s0MPfnjrWDdWKeYeuMetST600gK8YHNZU2pNKMnGTXPl2DxdNWkx4yvRqSvBCvbKjLjOPrWkQqxxL6pWK1zkYJqe4ukYwFX5RMEV6tWhUm4p+Zwxklc7i0s4zYwrz8qgdaWW1AAxngetYFnrhMCjI3AYI9xTJ/EbLHvUKRj3r5J5VjnWdu59AsfhfZpNa2NvymyR2pfJaubTxFMVBKJk/WlXxK7HG1c/jXU8oxvZGCx2H8zovJalETD/APXXOP4ilHRE6e9Rr4lmcZVEx+NCyfGtbIPr2H6XOoETNyetQ3FuzIRxisRPEUhbG1cfjU51zzQVAXNQstxtOadgeMw0otamyqSLbJ5eM7O/rQ4m5xjpx9axv7caAqkgUAkBak/txSCTt4OKzeXYpO/KmX9coNWu0aOJ938O3b+tAEuFzj3rNOtoASSuBSNrSKQCRknFWsDif5CfrVH+Zml++wchc5/Sg+dhtuOny/Ws0a0hIGV5oOtIDjK5o+o4n+QX1qj/ADM0x5/Gdv3f1qCeGSSI78bu+Kotr8aBskfKQD+NNfWckr8vA5rSngcVGSko2JniaDjZtjFhK3J3ehrrfhpGIo/EaDoNUH/pLBXBPqoediMYCnP9K7j4VTfaLPxDL66oP0toBXvUKNWE+ae1h5XKLqyS7f5Hf0UUV2HuBRRRQAUUUUAFFFFABXnuqG+0i/8AF9uukX962tKsli9tAZEZzAsJR2HEeCmctgYavQqKAMa20CNvC2n6PeTXH+jW8UTPbXMkDFkUD76FWxx61iy/CzwpPcy3M1tfyTykGSR9UuizEAAZPmZPAA59BXZ0UCaT0Zxp+FvhQ9ba/OP+ordf/HKQ/Cvwm3W1vz9dUuv/AI5XZ0UE+zh2Rxf/AAqnwiP+XO+/8Glz/wDHKX/hVnhM/wDLrf8A/g0uv/jldnRRcPZQ7I4z/hVfhLGPsl9j/sKXX/xyk/4VT4R/5873/wAGdz/8crtKKd2Hs4dkcX/wqrwj/wA+d9/4NLn/AOOUo+FfhIdLS+/8Gl1/8crs6KVw9nDsji/+FVeEv+fS+/8ABpc//HKP+FU+Ef8Anzvf/Bnc/wDxyur1C+g0zTbq/um229rC80rYzhVBJP5CsDSfFV7danY2eq6MdO/tGBp7JhciXcFAJRwFGx9rA4G4cHnindh7OHZHP6B8LdCk06U6pZX4n+2XKqG1K4X90J3EXAk/55hOe/fmtX/hVfhIY/0S+46f8TS6/wDjlT+JfGUmha9Z6VFBpm65tnn87UdS+yINrKu0Hy33Md2ccdDXT2zyyWsLzpGkzIpdI33qrY5AbA3DPfAz6ClcPZw7I5H/AIVX4T/59b/0/wCQpdf/ABylPws8JkYNrfn/ALil1/8AHK7KigPZw7I4s/CnwiTk2d8T6/2pc/8AxykPwo8IEYNlekf9hO5/+OV2tFO7D2cOyOMHws8Jr0tb8fTVLr/45S/8Kt8Kf8+1/wD+DS6/+OV2VFIPZQ7I43/hVnhT/n1v/wDwaXX/AMco/wCFW+FP+fa//wDBrdf/AByuyqC8luIbOWS1txczquUhMgTefTcelAeyh2RwPiP4YaJH4Y1V9JtdROoraStahdTuWJl2HZwZMHnHWtEfCrwmVBa0vs45/wCJpc//ABytrwxrk2v6XNc3NktnPDdz2skKzeaA0UhQkNtXIJX0qhq/iu8tNRv7XS9GOorpkCz3rm5ERUMCwSMFTvfaM4JUcjnmgPZw7Iq/8Kr8Jn/l1v8A/wAGl1/8cpG+FPhFjlrO+J99Uuf/AI5XWWN7BqWn219bNvt7mJZom9VYAg/kasUXD2cOyOL/AOFUeEOP9DveOn/Ezuf/AI5R/wAKp8In/lzvfX/kJ3P/AMcrtKKd2Hs4dkcb/wAKt8Kbdv2a/wAen9q3X/xym/8ACqvCJ/5c77/waXP/AMcrtKKWwezh2Rxf/CqvCI/5dL7/AMGlz/8AHKT/AIVP4QyD9ivcjp/xM7nj/wAiV2tFO7D2cOyONPws8KHra3//AINLr/45WV4g+F+iJp8LaZaag1z9stlbGpXDHyTOgl6ydPL3/TtzXd6nPfW1kZNOsUvbnICxPOIhjuS2DjH0NVvDWs/8JF4Z07WPs/2f7bAs3lb9+zIzjOBn8hSD2cOyMH/hVfhPn/Rb/nr/AMTS6/8AjlIfhT4RY5azvifU6pc//HKfD41ubvxbeaHb2ukr9ku1t2+0ar5dxIuxHLpD5R3AB+Pm5KnkV2NFw9nDsjix8KPCAORZ3oPtqdz/APHKP+FVeESc/ZL7/wAGlz/8crtKKd2Hs4dkcX/wqnwj/wA+d7/4M7n/AOOUf8Kp8In/AJc73/wZ3P8A8crtKKLsPZw7I4v/AIVT4Rzn7He59f7Tuf8A45Sj4V+EgMC1vsf9hS6/+OV2dFK4ezh2Rxn/AAqzwn/z63//AINLr/45Sf8ACqfCP/Pnff8Ag0uf/jldpWNrGoa3ZzY0zRIb2FYvMeSW+EHPPyqNrZOAOu0c9etFw9nDsjkNV+FuhJqOirZWV+bd7xlvcajcHEXkykZzJx+8EfI/qa1P+FU+Ef8Anzvv/Bpc/wDxyum0bVYNc0Sx1W2V1gvIEnRXGGAYZAPvzXO2Xjia6lsLqTSDFomo3RtbS++0BnZiSEZo9vyqxXAO4nkZAzTuw9nDsho+FnhNfu2t+Ppql1/8cpD8KvCRGDaXxHp/al1/8crs6KQezh2Rxf8AwqrwiP8Al0vv/Bpc/wDxygfCjwgDkWd7/wCDO5/+OV2lFO7D2cOyOL/4VV4R/wCfS+/8Glz/APHKB8KPCA6Wd6Ppqdz/APHK7Sii7D2cOyOMHwq8JDpaX3/g0uv/AI5QPhZ4TByLW/B/7Cl1/wDHK7OikHsodkca3ws8KPjdbX7Y6Z1S6P8A7UrLn+GGiDxRYxxWmo/2Y1ncNcH+0rnHnB4fLyfMznaZf8gV0PiDxFq2hR3t8dCjn0myj82a4+2hZWQLlike0g454LKTjjtW7cXSw6fLdqu9UiMoXpkAZ/CgPZw7I5Q/CzwmRg2t/wD+DS6/+OUH4WeEycm1vz/3FLr/AOOVY8G+LbnxXAl0bfSY7d7dZStpqv2maNmAISRPKXacE554IxjvXV0B7OHZHGf8Ks8J/wDPrf8A/g0uv/jlL/wqzwmf+XW//wDBpdf/AByuyooD2UOyOMPwq8JHObS+Oev/ABNLrn/yJS/8Ks8J5z9lv8/9hS6/+OV2VFAezh2RxY+FPhEZxZ3vPX/iZ3P/AMcre8P+GtL8MWk1tpMEkUU8vnSCSd5Sz7QucuSeigfhWtRRcahFapBRRRQUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAZ3iDS/7b8OanpW/y/ttrLbh/7pdSufwzXLQW/iO/1LSb690P7M2iWkxEf2qNvtlw0YQLGQTtTG7l8HkccGu6ooA5zVbq/aOIHwg2otNbDzB58G2Nj96Ny7DI91Bz6VZ8I6Tc6H4T0zTLyRZLi2gCOUJKj/ZBPJA6D2FbVFABRRRQAUUUUAFFFFABUN3NJBaSzQ20lzKillhjZQ0h9AWIAP1IFTUUAcN4Wk8Q6XpWrpN4WukuHvrm9gjku7cCUSzlgmVdsEK2TkY460/UrHXtM1rX59K0oahHrMMexxOkYt5ljMf7wMQSmApyuTwRiu2ooAz9B0z+xfD2m6UH8z7FaxW+/wDvbEC5/StCiigAooooAKKKKACisOfxp4VtbiW3uPEujQzxOUkjkv4lZGBwQQWyCDxitCx1Sy1MM9jOLiIKrCaMExOG6FHxtfp/CTjvQAuo3VxZ2Tz2unzX8oIAt4XRWbnsXZV469a5bwT/AG9o/g3R9Ku/Ds8VxaCG1m8y6hxsx80qlWbIX0OCc8V2lFAHD+KLDU9fhm0m28NfZ3e6jdNVkmh2RhXDeaoDeZvwvA2jnviu4oooAKKKKACiiigAooooAK47xl/b17d2+lWmjXtzosse6+msp4Ell5I8geZIhUEcsw5wcDGSR2NFAGXaz3US6Xbw6K9tavEwlVpYwbMKo2JtUkNnp8pIGK43TvD+upp2g+GZ9NEdlpF7HO2o+ehSaKFi0YVAd4YnYDkADB5PFejUUAFFFFABRRRQAUUUUAFFFFAHDeJV1zU/EIs5/Dl9eeHbbZIFtZ7YC9l4P7wSSqQin+HHzEZPAweomu77z5Yl0kyQfZDKrtOg3y5P7kr24/i6c1o0UAcZZafqGoeMdO1dtB/sW3sbaaGQySxNJcb9u1MRMw2Lt3cnrjA612dFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAcn4kjT/AITPwb8i83lznjr/AKLLXPeKNQ1GFfG8drqFzbmA6cLdo5CPI3sAxUdBnv6969IktbeaaGaWCKSWBi0LsgLRkgqSp7EgkcdjUUumafOZzNY20huNnnb4VPm7fu7sjnHbPSgDiF0Sd/G17oP9va2NPOmRXmPtz+YsxkdNwkzuC4UHYDtz27Vhw67r2v23hO2eTcLvRhdyH+03083MwKg/vI0ZiQOdox97JzivWBa24uzdiCIXLRiMzbBvKAkhd3XGSTj3qpPoOj3OnRadcaTYy2MOBHbSW6NEmOmFIwPwoAp+ERqS+HYE1W6gubpHkXzYZ/OBQOQoL7V3MBhScDJBrcqG1tbextktrS3it4IxhIokCKo9gOBU1ABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB/9k=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeB+KAo+IviQbRj7VD/AOk8Ne+14F4qGfiP4k/6+Yf/AEnhrSl8RwZn/u7+Qy/jX7TuIGNq4/KqV1bokMJCj5uTWjdr5ksZ/wBlSfyqpfNm3iPbtSw8naCPm4t8+h7n4dRP+EY0n5R/x5w9v9gVpbE/ur+VZ3hz/kWNJ/684f8A0AVp1L3PsKfwL0G7E/ur+Vcb8SkX/hHbf5R/x9Dt/wBM3rtK434kjPh23H/T2P8A0W9C3MMd/u8/Q8fFspwMCn+QpydoyvGamwf4amxmEHvXTKo0fKOpLuUVjUkLtHB9KvfZ0jQ/KOmaSCH96oHrzVqbjj1PH0rGrVvJRQNtjNGiQa5pBCjm/t//AEate97E/ur+VeEaV/yH9Gx0+3Qf+jVr3mibukz38o/gy9f0Q3Yn91fyry74mxqdYhIUAi2Xt/tvXqdeY/EoE6tHjr9lX/0J6zTs0a5p/u/zR566LscbRwuf1qlGiqHO0epq8SfKkPfZVaIgrI3+z/hXowejPEpNqLABcIABQI1A24GAM05AMRr+VPZQcqOw5p3Bys7DVt0kQAqMZ5q0iIi7Qo5pscZ+UL3OanwEDDvWE530Oec29LkbBSGbaOnFMVELAbRxzTywDKB0AzSBed/96ktiVsRtEm8naM5Fep/DpVLagdg/1cHUf9dK8wcAsD/dOa9T+HpBN9j/AJ42/wD7Urnry96PzPQy13xEfn+TO22J/dX8qNif3V/KnUVJ9MeReL40Pi/UQVGN6Z4/6YrXF3MKZYBRwa7nxfGW8Vakw6b48/8AfpK4q5j23BK9Cea0w8v3j1Pk67axEn5v8yoY0B+6M0jIoQYA5qZPn+b2FGzcBntk13c1tw57OzKAhXsBgU4W6ZPA9ateSRgDp3pCvVu/Sr5zf27ezK6ookwQOBTWA3bQBxinuRuBPrSEdc+tUaJ9WNVEIJwPSvY/hKi/8IrdDaOL1+3+xHXjQztP14r2f4SgjwrdZ6/bW/8AQI6xxPwHZhP4p3WxP7q/lRsT+6v5U6ivOPVG7E/ur+VGxP7q/lTqKAMLxdGh8NXI2j78Xb/pqtcKLWNLfy9qhkTAPvXeeLTjw3cH/ppD/wCjVrhp9wZWPQ4HHrXi5o5e0hFM8zFpe0ba6L82RrboZlBQfKOuPauZ1iOM3T4UcGusWVXhZhnrt/pXJ6wQt2UXoOKeUSk67v0R5mLSUVy9z0b4XIn/AAjNz8o/4/G7f7CV22xP7q/lXGfDAbfDVz/1+N/6AldrXuT+JnuYL/d4eg3Yn91fyrmviAijwPqXyjpH2/6aLXT1zPxBz/wg2pY64j/9GLSh8SNa/wDCl6P8jwlY1znaKsBF8vOBk0yMYOParDKrJn8a75vU+XqT1KKoM4wMg1atERbpPlH3qh6P75qaAf6UnuRTqaxZpUldMmvok+1PhR8pIFVLqCOXaGUEZB/EHIq5dhmuZT7moJvvfhWdJtRiY05NNNMrPGmQCBSiNSOQDmnsmcZ60KgCj0FbX0N+bTchESBANoxuqbykLHgUn8NSgA/N36UmxTm+5G6hkjwOOf51XdBhuBVxh8g9AP61WkUDrTgyqMtbE2nFfLnUgfdr074NKE0XXVAwBqp/9J4a8zsE3eZjuor0/wCDwxpGvD/qK/8AtvBXLiHqztwEv9qmvL/I9GooorlPaCiiigAooooAKKKKACiiigArwDxWSPiV4kx0+0Q/+k8Ne/14B4sYL8R/Enr9phx/4DQ1rR+I4cx/3dik7mU+qgfpVTUDiGGMdquWnzx7T1HSqWojakXqc1NH+Ko9j5mn8SPd/Dn/ACLGk/8AXnD/AOgCtOszw5/yLGk/9eUP/oArTqZbn2NP4F6BXG/Elgvh61J6fa1/9AeuyrjviQu7w/bA/wDP2v8A6A9C8zDHf7vP0PLQ67TirUIGPqM1Xj2YJOOKtwoufvfeHFRWaSPkorUihtjlnXJyc/nSzo3zkDnaF/Hmpo2MLSP1Qjj2qK4yzLg8qNxHrWSlKU9SmlYNH/5D+kg/8/8AB/6MWvea8P0yIDWdKbGD9vt//Ri17hXS5KUU0e/lH8KXr+gV5d8THxrUSjqbVf8A0N69Rry34mYOu247m2TH/fb0lujXNP8Ad36o4P8Agkz/AHKrQrlpiOhT+oq255kH+zVaLpLgfwf1Fd0Xozwab91jol2pHnqcVI6Ht3/lRgmKPAyQBUxU7hxwRUuWtzOUne4+JCGXFRy/fYmrlsp3cr3x+lULz/XsAehxWFOXNUaIirsQ/dVO5GKlc4PHpgfWq6H169qtRpuZQf4Tk1rPTcclZiPGVRR3OP516f8ADz71/wD9c4P/AGpXmkhJfOO+BXpnw85+3H1ig/nJXHUbco38ztyz/eI/P8mdxRRRTPqDy7xap/t/U2A/5aR5/wC/SVxU6jzT712vitj/AMJDqqY4LRnP/bJK4qRcuOe9Z4a6qTv3Pksbb20vV/myBIsSYH0pQhJGR7VKq4lLHjDZpyDJXI53V2uZzOTZH5BD8DquajeLcM4571fYBY8E/NnioWG0seobmojVbDmaMiePLAHtURJJNaEyhsHoaoMASOcYNd0JXR6FGpzKwwbinI5xXsvwk/5FS5z1+2t/6BHXjmfkIPWvY/hKMeFrvn/l+b/0XHWeJ/hno4N3qne0UUV556oUUUUAYfjBN/hi6X1eL/0atccGWXIHJUZrtPFQz4duB/txf+jFrh/Lx5ZDEYIz714OcWcops8/EXVX5L82RkqUQDscn865rWowL5nyeR/9eulmAVAc4IbpXMa2T57Ec9DWmT61rrzPJxeyR6P8MBjwzP8A9fbf+gJXa1xXwwz/AMIzcZ/5+2/9ASu1r3J/Ez3MF/u8fQK5j4hkjwJqZHXEf/oxa6euZ+IOP+EG1LPTEf8A6MWnT+NGtf8AhS9GeIRAlRnrgGre35eOtVo+RkegNaIRREW7iuirKzPkar94yJF2zk+lTWzf6bHn+8KjuP8AXue3FEBzeRAf3q3esPkdNrwv5GncR4vJx/tn+dU7rAXHfir8hzeXRI4Vj/M1RucNx3rloN6X7I5o/wAQhfAwM80188emP1pzMuVBPJ6UjMMV1I3jfQTZlSPWnhckexpiHKbs8VMAMj65obsKTaDbgYPpVSQhQ2e1XnGVz7VmyLvDjPWilqVh9XqXdP6vjoVr074Q/wDIK17/ALCv/tvBXl2mklpAOy5r1P4SDbp2vj/qK/8AtvBXNifisd+AVsXJeX+R6HRRRXMe4FFFFABRRUc8K3FvLA7OqyIUJjcowBGOGBBB9wcigCSivKtWkn0DUvE2oaPf6o8Gg6S25LrUZ7hHu5BuXKyOw+RAp/4H7VrTtd+Ddbso4tSv9Rhu9Nu5Zo7y4abMsKowdc/dzuYFVwvI4oA7+ivOdKa/0s+DdUfV7+8l1thFfxTzl43Mlu8oZEPEe1kwNoHB5z1qrYXmojQ9B8Wtqt893qOqRRT2rTsYPJmmMYjWL7q7AVIIGcqck5oA9Qr5/wDF2P8AhY/iPP8Az8w/+k0Ne66jeT2NoZrfT7m/kBA8i2aMOff94yr+teB65PLfePNfnnsp7KRrmLNvOULpiCIclGZeevBPWtaPxHDmLtQZo26KgU8A4zWdqIG1Ceck1oHIYY7AA/lWXqGdsOTyc/0rLDq9W581D4kj3jw7/wAixpP/AF5w/wDoArTrM8O/8ixpP/XnD/6AK06b3PsKfwL0CuP+I3GgWp/6e1/9Aeuwrj/iP/yALYHvdj/0W9Iwx3+7z9DzBUUjjAyeasRAhcd14B9RVVFJY46ZzU6zEIfUD+tRUTeiPkkTtImGi4461AXy6ccseT6Cmbdxzn5z96kLcOe4G0VMaaWw27lrTS7eJ9MO4+WLy3wPfzVr3SvDNLkH/CQaXGOv223Of+2i17nW32UfQZR/Cl6/oFeX/EoAa3CxHItVwf8Agb16hXl/xMXOrxe1sn5b3pLdGuaf7u/VHASthnTGMpnP41WgY75V9I/6iprg/vSP9nFQwD97L7Jj+VehFe6eJTSUGXLfJ2ntsX+VWApR13HOTgVWifbGAOyj+VXlKu6bux/UVzVW0zln8RZgTIbJwSCB9aybtMTYzkg8n14rXDA8jsM1kzNmTJ7k1hhb87ZSdtiKJT1J69ParkKkkEcEnBHrmqq5YYHbmrsDKMyY6DFdFZuwpu7JJQo2qMfWvR/h+u37bg5Bih/nJXnc0Y8lW75r0fwCAq3YH/PKH+clee5e9D5nfli/2hf10Z2dFFFdB9MeW+Lzt1zVCP78Y/8AISVx3lsV689c/jXX+MJAuu6oh/vxn/yGlcrC+6VgfukVjRvFzfmfJY3WtL1f5sjl++SOlKCBMx7ZBFD/AHWAIqMqWAZeMtj8K6UrrU5CeQhpAcYAFRMNoOTnHNWpYlC59QAarlfvK3p+lRTkmtBtFG46gg45BrM3Zmcela00Y4/SsxkCOx7k816VFqx3YWSs0IeQK9j+Ev8AyK13/wBfz/8AouOvGhncMdK9l+En/IrXf/X8/wD6LjpYn+Gepg1aqd7RRRXnHqhRRRQBjeKjt8OXJ9Gj/wDRi1w4yCATnkcV3His48N3JH96P/0YtcOcNtOPmJwa8LNv4kfQ87E/xfkvzZDOMktnK5zXM69kTcdwK6SbmDCcdzXNazIJJlHPQfyrbKE/aryPJxLV0ek/DA58NXP/AF+N/wCgJXa1xXwvGPDNx/19t/6AldrXsz+Jnu4L/d4egVzPxB58DalxniP/ANGLXTVzXj8Z8EaiB/0z/wDRi0Q+JGmI0pS9H+R4lB09uK0ukLN1HTFUEGGPpVtXLITn5OmK3ras+RqO7uZ0wBmam2n/AB/xE/3qWdgHc+hxTbbm9iz1DZrp+w/Q64/w36fobV0As0+0feYgmsyb5iO2K151AkmA7saxpj8p9a48K7o5YfGQkgu3HKng/hQSDtPagf61ifTim46D0Ndx1pD0AMa+mKlLYycZIHSokwFUCpkGfm74qZGc97jwxeHOCMjp+NZ54uMdsc1pJyvPQj+tZsgYSvz34opbtFYbeSLelEebKcdFr1H4SHOna+f+or/7bwV5bpwI8z/dr0/4PEnSNez1/tT/ANt4K5sSvfbO/Af73L0/yPRqKKK5j2wooooAKKKKAMe08Nafa6ZqVg4kuYdSmmmuzOQTKZfvA4A4C4UegAqvpPhG00u8F3LfX+ozR25tYTfSK/kxEglVwoznauS2WOBzXQUUAc1pXgqw0m9tJ0vL+4isFZLC2uJQ0VoGGDswoJ+X5RuLYBwKS38D6dbahDOt1fNaW9y13Bp7yqbeGZiSWUbd3VmIBYgE8AV01FABXz/4sJHxL8Ren2mH/wBJ4a+gK+f/ABbx8R/Ejd/tMI/8l4a2o/EcWYfwGSyttuGXsVX+VZ98nELdgDir12wE2O5Vf5VRvmYrFxwOlRhlrF/1sfMQ+M948O/8ixpP/XnD/wCgCtOs3w7/AMixpP8A15w/+gCtKpe59jT+BegVx3xJ/wCRet/+vof+i3rsa4/4kDd4etx/09D/ANFvSMMd/u8/Q8sWXadg6kcVMrjZn1HP51ErAMeBntUmFaJiD1FOaR8iRRk7zIP4qUHb9AM/jTYyS2SPlFIRlVT1yTVtagXdHXHiHST630H/AKMWveK8E0UlvEmlnsL6AD/v4te90T2R9Dk/8KXr+gV5j8SDjWY/+vRf/Qnr06vLviZ/yGoj6Wi/+hPWdrtG2af7u/VHncz5lJ9qbFnzJT22jH6UScFie2KWI/vJPTaP6V6X2TxV8Gn9bE8Z/dhe+0VZUlmXd25FQRnMZ9wMU8jrtOTXPNXZyy3LDXHDY/GqxAJ3epzUbSY4HU1LGmcOegpKCgritZXAHCr6k0+1JfYD2OTTBtILg57CmwP5Y2jqaJK8XYLaF9pGkIP8IOK9G+HxJN/n/nnDj85K82aULAFHXNelfD//AJfT6xQf+1K4ZK046dzvyz/eF/XRnbUUUVqfTnlHjBf+Kk1Nx1DR/wDotK5OJvnI7nNdf4uZV8RamCerxj/yElcWzhZWHcVNFOTmj5HFr99K3d/mOAU7xzjdinRkOB6ZJpIkyT780kh2xkdK33djlLgzty34VEvzFg3XpTg2eR0xTW+ct2OAOKwSGQSIVbHYcCsiTHnN7ZrWmLbhnsayJziQgdTXoYe51YRXkxqD7p9K9j+Ev/IrXZ9b5/8A0COvHMbcV7L8J/8AkVrrH/P63/oEdVifgPXwj/end0UUV556wUUUUAYvi048NXP+/F/6MWuH3jnP94AV3XikZ8O3A/24v/Ri153IrRKz/wAR5wa8bM4qdSKfY8vGScal/JfqTIgO499v9a5bWVJuXYdsY/KuoVG2sDxuANc5rWA7EfeA6U8pdsQzzcT8MT0T4Xf8ixPn/n7b/wBASu2rifhd/wAivOfW7b/0BK7avbn8TPdwf8CIVzfj0Z8Faj/2z/8ARi10lc747/5EzUP+2f8A6MWpjuisT/An6P8AI8UCjkDoKkhbKkfwmm9MgUgPJA6V1PVHyD1Kc4xM2O55/Kltf+P6Ju1JKuXdiTwf6U60bdcIR0rd/B8jtv8Au/l+htSSDzpB3bJrIlGAM1dLMHckc4qpc8bfU1y4ePK7HLTd5EAP/wBamE8kd6mUDYKaR145rrT1OlSVxsfCqD1zU8bHyz64pgXGKkAYAjHG3g1Mncio0xynKDPpVG5yz/jVsdBnrj+tVJCBKSfWnTWpdBWm2i1p+VMuem2vTfg/xpOvf9hX/wBt4K8xs2y0uP7ten/CH/kFa9/2Ff8A23grmxG53Zf/AL1L0/yPRaKKK5j3AooooAKKKKACiiigAooooAK+ffFx/wCLleIlxx9phP8A5Lw19BV8+eLmx8TPEQ/6eYf/AEnhrah8Rx49fuGF1/x9Mc/wrx+FV75gUgUe/wDSprr/AI+QR3C5/Kq92oCRnuOKdFfB/XQ+Yh8Z714d/wCRY0n/AK84f/QBWnWZ4d/5FjSf+vOH/wBAFadYvc+wp/AvQK474k5/4R63x/z9D/0W9djXHfEg48PW/wD19D/0W9JGGO/3efoeXoVLEHAOcD34qYRfK2OhHFMihDS7jjirUSth85IUYFZ1JpbHyaVyCFAQEPY/nUdwCrsAuCTgVcaDZEuCN2eagu25VgM7aUKnNO6Bqy1F0fH/AAkelAdr2D/0Yte814VpEYTX9KJxlr6D/wBGLXutdEndI9/J/wCFL1/QK8t+JrbdXT1+yp/6G9epV5X8TQW12IA4/wBET/0N6mKu0bZp/u/zR52ed2e/NOTH7wj+7/hTSpUSZOT1/CkhPMh7Fc/yr0uh41rxdv62LcJ/c5xyAKlcbhlePpUUXCZ7YHFP5A69RXPJa3OSXxDXTGD1Jp4YhAuO9HTG7n0qKSVU470K8tASctB+RnAGAOahRgmXbsaYJeAOuRmkxuAUnjv71qoW3NlTtuWEfcBk/wAQNesfD48XuP8AnlD/ADkrySIbnHpkV618Phhr8dhHB/7PXDiUvaR+Z1ZerYlf10Z29FFFSfSnkvjAFvFWpr2DRn/yElcp5Yd247/nXV+MMjxTqjA9DH/6KSuWQENuJ4HUUoOzlY+Pxf8AGn6v8xV+VN/vRKu/t0NPkwV2DuBTNxZOuDmrTe5zkyZAK44x1qMnLFhxkYx71YjKmJckZ5pkiKMMuOe3vWSl71mO2hSlJIyRjmsqUZck9c1rTZ28j+Ksqf8A1h+tejhzqwu7IsEryecV7J8JP+RUuf8Ar9b/ANAjrxwA5yTxXsnwm/5Fa7/6/n/9AjqsT8B7GD/ineUUUV556oUUUUAYviw48NXR94//AEYtcEHEoCjDEYz7V3/igA+HbkHoWj/9GLXA2aAO/GMZBPrXi5q0mpPdI8zFpuul3X+YHcuwcnJPNcxrGTcuPeuoUmSbphUOOe9cvrSst4xB45P608of7+z3seZil7qa7no/wu/5Fi4/6+2/9ASu2rivhf8A8izcf9fbf+gJXa17k/iZ7uD/AIEfQK53x3/yJmof9s//AEYtdFXOeO/+RM1D/tn/AOjFqY7orE/wJ+j/ACPFWyOBz70dsD86QHkg9cDmlwTlRxx1rrPkSnNw7DNLaMEuo17Zpk7bZyD6U62Aa7Qe/Wuh/B8jut+717GrcHbPN6AmqrqGA9RVm7OLi4PUAk4quASDz3rjpfCmcK0dyEgqFG3gk5PpQBwTUkpKqoPOe9IR8oA71unoaKWg0AZX61M42j8KjGA68d8052PmY9ql6sl6shbp74/rVSXl3B471dkGFB74/rVRlG8k8k8VtTZ1UGtyzYrgzHrkE16b8Hv+QRr3/YVP/pPDXmViSGmBPY16b8HOdH13/sKn/wBJ4a5sRuzuy/8A3mXp/kej0UUVynthRRRQAUUVl+Jmu08Kaw2n7vtosZjb7OvmbDtx75xQBLb63pN3qEun22qWU17FnzLeO4RpEx1yoORToNY0y51CXT7fUrOW9h5lto51aRP95Qcj8a888J3Fzpg8K2lpqVpfw6nYO3kR28afZSsQYMpUbtu7CneSSSOc8VW0f7D/AMIx8O/sflf2t9uTzduPN3eXJ9q3d+u7dnvjPagD0z+2NM/tT+y/7Ss/7Qxu+yeevm4xnOzOentRNrGmW2oxafPqVnFfTDMds86rI/0UnJ/CvMB9l/4QaP8A1X/CQ/8ACS+3nfaft3Pv/qs/8A9qdrH2L/hFfiB9q8r+2P7Rfyt2PO37Y/su3v8A3NuO+fegD1qvnzxgM/EjxGT/AM/MP/pPDXu19JqUVgG0+2tbm74zHcTtCnudwRz+leAeInvZPHOvvfwQQXZuYt8cExlRf3EWMMVUnjH8I/rW2H+M48d/BZLcZM6MOhAz+VV7wklD2zU0r7du70XH5VDdZMaOfu5yK0p6OJ8xT+JHvnh3/kWNJ/684f8A0AVp1meHf+RY0n/rzh/9AFadcz3PsKfwL0CuO+JAzoFr/wBfan/xx67GuO+JBx4ftsdTdqP/ABx6SMMd/u8/Q80WOTeSCMVfgYrAQ/UdapRCXfwBgnP6Vo5VosfnXHiZbJny9NdSszMqPIe4yKpgsVEZPzEgmrEk5Z2H8AFVHO2Xf3PSt6MXbVGcmXNLy/iPSW7LfQD/AMiLXuteF6I4Ot6Yo7X0H/oxa90rd7JHv5P/AApeoV5X8Tf+Q/bn/p0X/wBDevVK8t+JyhtYiz2tU/8AQ3oi9UbZp/u/zR54/O4nrtqOHGHXtt/wqZhkHHpUUa4Mh/2a9BbHixfutFiPLKP7uBmlkYqRRD9zHqBSE5PNZ9TB/EOMgY89hmqU+Dg/7WandgW47dahUbxk+taQVtTakuX3hsHMu3sAalxgjHVqWJNp4pqMQwLU27suT5m2idONoHXNet/D8H/TSe8UH/s9eUW8fzZPU1614CGPtg/6YwfzkrzcVL97Beprl2uJX9dGdnRRRSPpDyLxiSPFmqj+E+V+flpXL7wJSD0NdZ4xjL+JdTx/fjH/AJCSuPm4DZ65p0rSk0fIYtfv5er/ADJCQZ89himbmHGexzTSDkE9GNB4Qmt1E5yzB8xBPRQf8KnbajD0GGH5YqnCSFye9SFyu3PQfyrCcG5BcSZ1deB/FmsaUgyvjqK0JnH8Pas1vvsfU124eNkdWFjq2R7sDNey/CT/AJFW6PrfP/6BHXjIBVfxr2f4THPha6/6/W/9Fx1eJ/hns4T+Lod5RRRXnHqBRRRQBjeK/wDkXLn/AHo//Ri1wcBJhiZeNzc5rvPFf/IuXP8AvRf+jFrh44h5SbvvKSePevDzeSTjf+tzzcSm6+nb9RhyrnHQtz+Vcvq8hN5NnoCAPyrqZiot8+vH64rldaj8uVj61WTWdW79DzMYtEj0f4Xf8ixP/wBfbf8AoCV21cT8Lf8AkVpv+vpv/QErtq92p8TPcwf8CIVznj3P/CFahjr+7/8ARi10dc34+z/whOo46/u//Ri0o/Ei8T/Bn6P8jxQHjB60/ngDrUeM8HrUoBIAHWupnyMjNuVLzt7U+xz9qjB6gjNJP8kzE0+yP+kRt6tXRL+H8jvb/c/I0rg/6RMT6nNQIc5FPu3DXDgdN3NNA5wK5YL3Eef0Gy7iBzSKfk46059w69KZjk+hq1sUthcDKkeppACzqx9KcBkAehzTkHSi9hXsRTg/Lj0qhNv6KeQa0ZfujHXH9aou3z+x/nWtJ6HVhmWLFfmmz6V6Z8GsjRddz1/tU/8ApPDXmljz5vqVNem/B3/kEa9/2FT/AOk8Nc+Ie/yO/L3/ALTL0/yPR6KKK5T2wooooAKKKKAKVno2l6dcTXFjptnbTznMskECo0h/2iBk/jRBo+l22oS6hb6bZxXs3+tuY4FWR/8AeYDJ/GrtFAFL+x9L/tT+1P7Ns/7Qxt+1+Qvm4xjG/GenvRNo+l3OoxahPptnLfQjEdy8CtIn0YjI/CrtFABXz94u/wCSjeI/+vmH/wBJoa+ga+ffF+f+FkeJB/08w/8ApPDW1D4zix/8BlS6+WYHrlV4/ClmcNaRDuM8flRcJulz/sj+VEyBbWH1PWtY2tH+uh80mtD3rw7/AMixpP8A15w/+gCtKs3w7/yLGk/9ecP/AKAK0q5XufX0/gXoFcf8RsDQbUnoLtf/AEB67CuN+JQz4etv+vtf/QHoRhjv93n6HnltOAW3AZzxVlgArMpzu5xVKBo9gJIyOM1K5ZIsgls8AVxVIXnofLRloROVO6NTkqRmqchJJIH3TgVZuosRFlbDE8471THDbC3PWu2ila6M2X9BXZrulDPP22D/ANGLXvNeC6Mc+JNKOePtsAx/20Wvequp0Z9Bk/8ACl6hXl3xMBbWoQP+fVP/AEN69Rry/wCJTY1uH/r1T/0N6hbo2zT/AHd+qPPnGNw9qr5x5gH90f0qaRidze1QRj55Cem3/CvRitDxKastS4mDDgdSBQ2CcetOhAaP0yoxTcZx271j1Zz9WV5CN5C1Gin9akIAc9zSE4UnpzW62OpOysiRcmRR2HOaTYCwp8SkgDuaGTYMZ5qL62MebWxYjOG4r1P4fFiL0sMfuof5yV5P5mwpjnJAr1zwJjN5j/njB/7UrzsRpVh53OvLI/v0/wCtmdlRRRTPpTynxhn/AISHVCOodDj/ALZJXFykuQp4PU11/jEN/wAJVqJDEAsg/wDIS1yJGJSCc4706CSlJnyOKf7+fq/zF/gyf4aY+fLOeOoFTKN2Vx1zUcg+X2FbxepzJ6iRk4C9qeWI4I//AFUxT8m4fSkD7gd3Bxim1dhYgmcFRiqTrlgauTAYGO9VTz9a6qeiO7D6K6GjHJ9/6V7D8JOfCt3/ANfz/wDouOvHAMHH417J8JRjwrdf9fzf+i46jE/wz1MH/F+R3tFFFeeeqFFFFAGN4rGfDlyP9uL/ANGLXBux3jHQjn2rvPFZ2+HLgns8X/oxa4KPBjkO7OwH8eK8TNF78W+x5mL/AItvJfqIzo+6Pd86rnb7ZrmddOZlx7A/lXSooMrApyU+9XNatEVvGy2QKvKbKv8AI8vEt8qbPSPhgMeGbgf9Pbf+gJXa1xXww/5Fq4/6+2/9ASu1r2p/Ez3cF/u8PQK5vx7n/hCtQx1/d/8Aoxa6Suc8ef8AIl6h/wBs/wD0YtEfiReJ/gz9H+R4sqZ471Ix8tQF5IFEY4znJpJGABPU1u3d2Pj92ZsxLSuCKdaEG4jA7NSS48zPvT7VMXqehrql8D9D0G17O3kWZW/fkLz3p/cenNMk4uiQOM1Oy/MAOBiuduyRwy0SInyv0pgyW6cU6UEHrn2pACGBx1qlsNbAO31FOiGFAByRTUB7jowp8a8ZzyOtKTEyOThQO/NZ7fMT7GtGUcAd6zHYrGxxkgn8a2pao68LrsXbAhjJtOcda9M+Dn/IH13/ALCp/wDSeGvMdJGZJh0+Xr+deofB/jSdeH/UVP8A6Tw1z4j4mvQ78ArYqa8v8j0aiiiuU9sKKKKACiiigAooooAKKKKACvn7xeP+Lj+Iz/09Q/8ApNDX0DXz94v/AOSi+JD/ANPMP/pPDW1D4zizD+AyrIxMigd8Co7wsFiB6VIm4M3qRxSX5ysSjqK1jpNI+Zp/Gj3nw7/yLGk/9ecP/oArTrN8O/8AIsaT/wBecP8A6AK0q5XufYU/gXoFcf8AEfnQLUet2o/8ceuwrjfiVn/hHrUjqLxf/QHoWphjv93n6HnUVuhDdMVIp52kfKp4/KoIVbBx91hn8asGUEKn4GuWpe9tz5VWsVZ8qSzHK/eAqlIB97HzVbupfM3IPuiqDPjB/AV20Iu1ybXehpaMR/wkWkAf8/sBP/fxa97r5/0LP/CSaTnvfQf+jFr6AqqqtY+gyhWpS9Qry74lc63GPS1jP/j716jXmHxJjLazGw7WqZ/77esb2Zrmn+7v1R51NwrgdcVDDklgTkbatSgbnH+zmqsWQ0n0r0ou8TxabvBl6L5YR7Com3BfvdKSOTKDH40jPgN+tQotMxUWpDBxz1J4o4b5SO+KWMHYWNI3qOtX1NepPG21s+9OfBbd2xVcPualVt4YdgcGocdbmbg73HZw6A/3hXrnw/bc19z0igH/AKMryQL5kiegIr1n4eIFfUSO8cB/9GVx4m3PD5nZl7/2iPz/ACZ3FFFFQfSHkfjRiPFGpAf3oz/5CSuRYMWLZrrvGRH/AAlOpqe5j/8ARSVzNqu6dgfu9qcJcvMz5DFfx5+r/MVRhmJGNpNRyLiIA9SuaszMrbwO1V5CH6+hFODbdzm6kCcrsHfmnbAdxNNiwhB9KlbDBmXqRW8nZlPcoTcH2AqojZAarlxndj061RAztHbNddPY9ChrEf2/CvY/hL/yKtz/ANfrf+i468dK8fhXsPwkOfCl1/1+uP8AxxKzxH8M9DBfxDvaKKK889YKKKKAMPxeM+GLof7UX/oxa89tXI89G67gPrXoniz/AJFu5z/fi/8ARi159FCWuVfjaR81eVmMo7S7fqeRjU/bprt/mSNMqTNGByEByPrXN6mfMmc5/wCWh/TNb4jKTTFyOT8v0wK5/VY9lx/vc1OVxiqunY83FOTWvc9H+GH/ACLM/wD19t/6AldrXFfC8Y8M3A9Ltv8A0BK7WvZn8TPfwX+7x9Arm/Hpx4K1E/8AXP8A9GLXSVzXj/jwRqP0j/8ARi0Q+JGmI/gz9H+R4tHIfWhnDDJ71WVh17mlD7mJPtXdya3PlnS1uQOcs2emasWzA3UY96rcEH61PZKDexn3xWk/hZ0zS5GW5wFuWx0DYqdfmzUV8pju2Hq9Pjzu5+7jiuN6wTOCS0RG6kP14xQFO0E+uaeR8xx2pGbCDNO7JuKi5U8YoUABj708MSjfpTOp47nmp1ERSkYNZrjD49zV+4O1KoSEktjrXXRWh3YVaXLNllFkI67a9P8Ag8c6Rrx/6ip/9J4a80035vMz2TNemfB7/kE69/2FT/6Tw1z4h6s7sv8A96lft/kejUUUVynuBRRRQAVHPPHbW8txM22KJC7tjOABknipKq6l9m/sq8+2OUtfIfzmHZNp3Hj2zQBnaV4s0jWY2ltHuxCsPnma4sZ4IjHx8weRFU8HPB6c9Kfo/irRteneDTrwySrGJdjwvEWjJwHXeo3Ln+Jcj3rzTUoo7rTr/Q/BGq3erafLolzHPD9pa5jhZVURKjnO1m+ddgPTsMVuXGoWni7xDpx8NyiT7LpN4k7oNogMqxrHEx7NuUnb1G2gDrNN8XaFq9/9isb8SzkM0eYnVZQpwxjdgFkA77SaIPF2hXGr/wBlxX4a6MjQr+6cRtIuSyLIRsZhg5UEkYPHFcVo+oWerDwFpem5+36UQ99CEIazRLZ4nSQfwkuyqAevUcVU065hk8OeG/CyEnX7LWInubbafMiEc5eSVvRWUEhuh3j1oA9br5/8XjPxE8Sf9fUP/pNDXuuo2c99aGG31C5sJCQfPtljLj2/eKy/pXgPiC3msvG3iCGa8nvZVuoszzhA75t4jyEVV4zjgDpW1D4zhzD+A/kTqmWz3Cg1BfptZB3xV2CPMqv2wAR+FVdTjJYEkjpzUU5/vUj5iG6Z7p4d/wCRZ0n/AK84f/QBWlWb4d/5FnSf+vOH/wBAFaVS9z7Kn8C9ArjfiUdvh22J7Xa/+gPXZVxvxKx/wjtvnp9rH/ot6FuYY7/d5+h5mk7gjaBTsfKSD8zVAkoUHgZ7VIAUQtk8jilKNmfIkMzLjYOuMVAF+bJ+6op7xHdjJyRTnTbiP+I810RairIpOxPoIJ8SaSx6fboMf9/Fr6ArwTSDjxLo6qOPtsGT/wBtFr3upqu6TPocpd6T9Qrzf4iYOqop72yD/wAfevSK80+Ih/4nUZ9LVD/4+9c01dfNGuZ/7uzz6bARsen9apx/ec+1WZyDu56rVWE/f9MV6dNe6eDSXuMWMMOR904NOZWIOOmKkjGSqgcYxUuwbueMjFDlZhKpqRqh2nPSokj25HbNXCMoDjgdart8r4/vdKUZNkQm3dDAmX46U/ZhNo6k077hUDrUiYXcxPU5FDkwlNiYVQq9816p8O2D/biP+ecA/WSvLY4yWVj/AHq9R+HQAbUAP7kP85K4q9uePzOrLn/tMfn+TO6oooqT6c8h8att8U6pn1jA/wC/SVg25Tbs/iUDd+Irf8Ypv8XakD0zH/6KSsGfbDGyrjdxmobTfL1PkMT/AB5+r/MrO237vTBFVy5y34kU92ymB1DVG2CAe+OldkI2MEiukn3s9jUoJXgdDUJ+U5PANKD8gIOR1roaudEop6oW65Ge561R+65q1IxdsdsVWcAZOfetKasrHTQVo8rHFjuwPXFexfCTP/CLXmf+f9//AECOvGsZJbNezfCX/kVbr/r+f/0BKyxP8M9DBq1T5HeUUUV556oUUUUAYXjI7fCt4fRov/Ri1wyFWXZk4ZTn8eK7nxku7wrdr6tEP/Ii157G+YMA8jGPfFeVmUOblPIxkrV/l+rJJYx55OTjYFH51g6yo+05HVRg1tl9p2k5IxWHreVuS3bj+VLLE1WSb6f5Hm4izjp3PRfhh/yLNx/19t/6AldrXF/DH/kWrj/r7b/0BK7SvYn8TPfwX+7w9ArmviB/yI+pfSP/ANGLXS1zXxA/5EfUvpH/AOjFoh8SNMR/Cl6P8jwn6elSRjmkVNuT75qeFAOT1NehKVkfNVJpIpyoHyBnrUtgNl3F9ajk+WdvQmpLTP2uDA434NVP4H6G0m/Z26WNLUeb1iO5pikFQPUUt8c3b455NAxx6EVxQ/hx9Dz5DTgNkduKSRN+0ntzSkAvwelHoKsnYN2E2txnipMK2V7jmmRISNzDtUjEAk9M1Et7IGV7hA0KDsf8azyMSEds1qzAGFQD0rMc4kOfpXTRd0dWHk7NFnTF3ecPVMV6X8Hf+QRrv/YVP/pPDXnWj4MkvoFJr0b4PjGk68P+oqf/AEnhrmry9+S9D0cvf+1S9P8AI9GooorA9wKQkDGT16UtYHi6CyudJiivdFu9WzOPJhtBiRJNrYcPuXy8DI3bhjOO9AG/RXnthZePdHtrm+jniuLSNd8Wj3kxuZ2A6qLgBcMewbeM969CoAKKKKACiiigAr5/8X8fETxIf+nqH/0mhr6Ar598Ygt8RPEYBx/pUP8A6TQ1tQ+M4sf/AAWWLRXZ0xkhwB9KTVYsBM8dTirWl5MQA644NRa/xKNp421wQqN4tQ9T5tQ/d8x7J4d/5FnSv+vOH/0AVpVmeHP+RY0n/rzh/wDQBWnXU9z66n8C9ArjPiX/AMi3b/8AX0P/AEW9dnXGfEz/AJFuD/r6H/ot6I7mGN/3efoeWRlM4YjNSliFLH7oqukat8x61J5gkQoQQBWko6nyI2Fi0+9+mM4NT7MgykfMTxUNviWUrjj3q6iFpgekaZ4qKsuVjtqT6Tb7Nc0tz1N9B/6MWvcq8P0x2fX9MP8ACL63H/kRa9wqU24rmPoco/hS9f0CvMPiSxXWogB1tF/9CevT681+Iib9biX/AKdVP/jz0pNLVmuaf7u/U81mJDH6VApKqxx26fjVu8UB2qsvf/d/rXpwd43PFpv3CzAD+lT4BYMeARTLdd0CsOuAatLAGI6bcDA965qk0nqcs37zGPjZtUdetU5GAYZHI6VoSBUIVf4vSqMyL5xPoKKLQU7X1G55BPWp1j3tjGFFQRruIJ7VdwRGEX7xHNVUlbRDm7aCIcuAB8ua9K+HRy+pf7kP85K8/SNIogCMk16B8OgBJqWOmyH+clcUpKU1Y7MtX+0x+f5M7uiiiqPqDyjxev8AxUeqsByHj/8ARaVyty5dvwFdZ4rbHinVVPTMZ/8AIaVzDIjLnvmsqbtUbf8AWh8ji/40vV/mzOTIQ55PWh14yPTFSMuSdtIwYIAT1Negnrc5763KpG9famhSq46ipgBjbjrTQCpCnnHNa3N1LSxFtPOOMiq7ABtp54q8V546npVOQAyN6jitIO5vRldjCQGI969j+Ev/ACK13/1/P/6AleNbcnP517N8JRjwrdf9fr/+gR1nif4Z6eDt7Q7yiiivPPVCiiigDC8Y5/4Ra7x13Rf+jFrzW3J8kknBzXpfi848MXRP9+L/ANGLXmgAAX0JORXDi3sjw8y/jp+X6sVX3S7mbqO9Z2tDM4z02j+VaRgAcsfu4GPzrL1t8LuBGelRg7OvFx7HnTT5bPueifDD/kWbj/r7b/0BK7WuK+GH/Isz/wDX23/oCV2tenP4mfR4L/d4egVzfj4Z8E6j9I//AEYtdJXO+Ov+RM1D/tn/AOjFpR3ReJ0oz9H+R4gIiO/B5qYKAQWIGeBQBx/npTlTzCC33c8A11Sl3PkXJvczrjAlf1zxUtjxcwjuWplygFy3tTrEZv7cf7QreT/dP0O3el8v0NC5TEsx6nNRqeQO2KtaooWeUIMZaqYPr6VyUnz00ziaJMKWOMZoXAJzyc8U1cZ44NH8+oqrEj9xUAGmytuj44NISQuTzUTH5TzTjHW40hJZdseRzx/WqLncQferLkEBO2KrnCk+xrpppI7aCSXmaWkELJKP9jFejfCL/kGa/j/oKn/0ngrzbS+ZZP8Adr0j4PnOk68f+oqf/SeGuKuvfb9Dsy7/AHmXp/kejUUUVie6FYPiq31F7fT7zTbdrqSwvVuZLRZAhnTY6lQSQMjeGAJAJUVvVyfj+OWXSLFVstQv7YXyG7tLDPmTRbXyCQR8oba3XnaB3oAk0mTVNX8UjV59KutLsYbJ7ZYrt08yd2dG3FUZgAoQgEnJ3niuori/CFpocGrSvpnhXVNJnMBDT3cLIrLuX5QSx5zg/ga7SgAooooAKKKKACvn7xdn/hZHiM9vtMP/AKTw19A18/eL3A+I/iNe5uYf/SeGtqHxnFj/AOAzf0iJEhDHqVBrK1wKroi9AMVqWnzwRqD0UZ/KsjWgVly38XNeNg03jXJs8Go/3KSR7R4d/wCRZ0n/AK84f/QBWlWb4d/5FnSf+vOH/wBAFaVeo9z6in8C9ArjPiZx4bg/6+h/6LeuzrjPiZ/yLUH/AF9D/wBFvRHcwxv+7z9DyeMMxOOlSuRs2r1qsjuD8oyKlVO4JJaumS1uz5OSsy3bR7gEXrVydSAsCdSDmo7aPyIw4+8eMGrZTyoyzffb1ry61X95p/TLjHQNPKrrelRjqL23J/7+LXtleKadAU1nTHb7xv7f/wBGLXtddFNpw0Peym/s5X7/AKBXm/xAXdriZ+79jXP/AH09ekV5546wdcUHvZr/AOhPUVnaFzbMleh8zzW/XbI4HoKpx8sw7YrQ1EYmkPsKoRZJORg4r1KLvTTPBpv3GaNnxCSf4QDV3yRkBe3NV7CPdDv/AANWxGyBQvJ759K8+tP33ZnO1qQvGsQJ7ngVSnhxIMevNaXlYJd+oHAqhKrF2UDnOauhPXcWqZEql5lVegYZ+laMaLCHlbqRxVbKxgKPvHirJjNxKM8IvpRWlffRBe4lvFLJ88mMbuK9C+HZBl1PH92H+clcKspdkSEA4IzXcfDoEXWsA+kP/tSueLcp66Hfly/2iNvP8md5RRRWx9OeVeLv+Rl1T3aP/wBFpXJyAq529K6vxfn/AISTU8f3o8fXy0rlFzuYt1LVnT0lJnyGM/jS9X+bI2QR4A+6FzTJQwyx+6BVtUDrJ64xTbmI+UV7VvGp7yTOfzKMSZiOepPFPVTwG696fCuRjvVqOPeMEYPerqVbNjbbZQaP5s9qzpvlmfH97NbrwbTj8qxrlAszZ9a3w9RSZ0YaXvNMgGeT69K9k+E3/Iq3P/X6/wD6AleMxMSfmGOeK9m+Exz4Vuv+v1//AEBKvE/AexhVatY7yiiivPPWCiiigDB8ZjPhS8A9Y/8A0YteZxPulwegr0/xdz4Zus/3ov8A0YteXyKrj5T1FceJs2os8LM/4yfl+rJsyMArEcZ6ViaxDyB2zWqCykAcg1na03zoO4Az+VLBJxrpI86Tukz0f4X/APIsT/8AX23/AKAldrXE/C//AJFif/r7b/0BK7au+fxM+kwX8CPoFc945/5E2/8A+2f/AKMWuhrn/G3/ACKF/n/pn/6MWpReK/gT9H+R4xj9f5U5I/Nxn7van4A59afEvmkY+52NaSnZXPjkZt2g89x3xTbBP+JhbA9Q4qbUlMcxx1yKhsmP9p2/+8K6U70W/L9Dtp35DW1L/j7kXsGzWWr8n0rSud7Xkxxwcn8aznGDkd6wwytBR8kc6s5McGB6dcUoPTPWodw3Y74zTlNdDiNxJCSFJqJjkEHpin9FqGToc04ocI6iBc49MVWkVv3nr2q9EmVH0qnN8sjCtIPWx00Ze+0W9JY+ZLn+5/jXpfwe/wCQRr3/AGFT/wCk8NeZ6WoDtg9VGa9N+D/Gk69/2Ff/AG3grkxHxv5HbgLfW527f5Ho1FFFc57YVzXjUTHS7Tm8Gn/a0/tE2RcS/Z9rZxs+fG/Zu287d1dLWB4uudPtdJifUvEU+gwmcBbmGVIy7bW+TLqwwRk9M/LQBzXw81G61T+xmjN81vZ6ItvfPcJIqNc5j2gb8bmUCTJGfvDmvRK8+8IahFdeL5otN8Vah4g037CzO8zRtHBLvUAbkRQWYbsDPAVs5yMdrqth/amk3dh9pmthcxNEZoCA6AjGVJ6GgDml+IVp/aWrpJaOumafYverfCTP2hUYq2xMdNysAc8444wTZ0/xXdf2hHZ67pI0pri0e7t2FyJgyJt3q/yja6hlOBkdcHiuXv8A4c6vdajfWo1q6k0+XQmsYnlit0QNltkZWNFIVcq2QB0xkjitn+zNa8Uava3GsaWdKgtLC4t2zOkpmlmCqSmwnCAKfvYJyOBigC1pPjK5vrrSvtujNZWOsKx0+4NwHZjsMiiRNo2FkBYYLdMHBplr44muJrO6fSDHod9efY7a++0Auzliqs0W35UZhgHcTyMgZqlpek6/dyeF7DU9MW0t9AIkluhOjrcukLQp5YB3AHeWO4DGMc1WsvD+urpuj+F5dNCWOmahHcNqXnoUlhilMkYVAd4c4QHIAHJyeKAPRSQBknFfPvi/B+I/iIjB/wBJh5/7d4a941HTLDV7Q2upWVveW5IYxXESyISOhwRivn/xFp9ppnjvxBZ2FrDa2sdxFshgjCIuYIicAcDkk/jW1D4zjx/8Fm9Yys0ShRkEYJ9MVna0zPMTj7vT3q5pcuyDB7k4qrrDL5g2jnFefQjy4t6Hzd/cWp7V4d/5FnSf+vOH/wBAFaVZvh3/AJFnSv8Arzh/9AFaVdb3PrafwL0CuM+Jgz4agH/T0P8A0W9dnXG/Ev8A5Fy3x/z9D/0W9EdzDG/7vP0PIkJBwFzV23gKgyE544FQwqo4YZIrXs7fIy/3RzzTxNZQifK/E7InsrfYQ8v3SM4ParDRb5WlbhFzgdqSGTzi0e0gDofWnyyBisCf8CIrwKk5yqa7/kjsjGKh/W5Ba7m17TWA+T7db/8Ao1a9nryC3KDU9MRcZ+32+cf9dVr1+vWw0uaktOv+R62WK0JeoV5z48OPEcQ/6dF/9DevRq80+IDhPE1vnvaJ/wChvWlRXg0VmbtQ+aOF1QYnfjsKzE5Y49K09TYPNJ/uj+dZ0YAkPsK7sN/CVzwYP3WbOkJmFsnjgVb+dAPlyTUOijfayDGCT1q2JcpvZCD6GvJxEn7aSJsVxHI5LuCoXt61RZyjFnGCxwBWqzNMAoBUdTVC5VC4ZhgKeM1rQnd2kjKSRVjXa4dz1xwe1aQBKiOMZbviqAjaScMeFz0NbdtGkKNK+MkZp4uoopPdjhHmY+C3SBVJxuPtXWfD1g15rH0h/nJXGGSRpNxJ2hv6V2Pw5INxqx9RD/OSufDRlzuUndtHo4Fr6xBLz/JneUUUV2n0Z5Z4vGfEGpevmR/+ikrkDkbm9+ldl4sA/t/VGxkhk/8ARSVyAQmNieud1Y0Ze9P1Pksav30vV/mIoYEOM5xyv1qUEsQGHbJqSNMkH1qGckS7QCO2avm5pWOW1kEMPmbm+7sb86uK0TRZUjJ44qrFJjOPu96QAjhG4zkYqKkXJ6spOxKw3KB02n865+94nbNb8QOdrH8T3rH1GMb39QTXVg3abRrQdpq5nIvO7tivZPhJ/wAipdf9fz/+gJXjgbAAH1r2L4Rnd4Uuz/0/P/6BHXbifgPbwd3Vud9RRRXnHqhRRRQBg+Mzt8KXh9DH/wCjFryreQCTwAua9T8b/wDIoX31j/8ARi15NLueM7WwCuKynFNo+ezZ2rr0/VkqSkqpPTPX2qprBEkaOvfPNKshP7vpx1qO9AFtGm4Ejn8KqlTUasWeZGTPTPhdx4Wm/wCvpv8A0BK7auK+GH/Isz/9fbf+gJXa1rP4mfV4P+BEKwPGv/Io33/bP/0Ytb9YHjT/AJFK9/7Z/wDoxahlYr+BP0f5HkOwK2ex4qVIztyowo6YqRUG75sEYwKXduIiTgeorCVRvY+RSMO9JkuGz6/0pmmr/wATODP9+pr9Nly49Oaj07/kJ25z/HXqX/cO3b9Dppv3LGtdZF5MAvGTWbMhzkD8K17hSL2TvliapTIWywGPauLDTsl6I5npJlDac5Ixx1ppyD7etWcc4K0wqPTiu5TKUyEHjIOaQKSfWpNnYcc0+NME96pySK50h8MfG48ADpWdc/6wnvmtRlZYs5x7VlzcSNnmlRd5Nl4bWbZZsBkyY/u16b8IP+QVr3/YV/8AbeCvMdOzlh7Yr074QjGl6+P+or/7bwVjiPiZ6OX/AO9y9P8AI9FooornPdCuZ8bahqFjZabHpuow6dPeX6WxuZ4RJGilXJyCR12gD1JA7101c74xncadaadHb2UzapdrZj7dF5sCZVnLMmRu4TAGRkkc0Ac/GfFjeK/7EHjK0kJszdb49MQmPDqu1xv4zuyDnna3pXoVcD4DuraAaLa2Wl6ZZrqWijUbkWVuIiJQ0Y5wfune2M8/Kea76gAooooAKKKKACvn/wAXf8lE8Sf9fUP/AKTw19AV8++L2x8SPEYH/PzD/wCk8NbUPjOLMFegzRs0RY1LD7vzCs3VTsnHvWnBEWK+mBmsjV8iUF/73FcmF1xG583FXsj3Xw7/AMizpX/XnD/6AK0qzfDv/Is6T/15w/8AoArSrZ7n11P4F6BXH/EcZ8P22f8An7H/AKA9dhXI/EMbtDtB/wBPa/8AoD0m7K5hjf8Ad5+h5lbW6OQzDmtONt0flrwDxVaO2J5HQ+9WmYLGAn3ulediJ873v+h8zTVlqSu6xRhY/vD5ahZhbgn/AJaPQkawHz3zuI5+tN8oO4uZM/LnGK54xivT832NW2/UmsodmqaY56tf2/8A6MWvZK8Zs5Hk1nS842/brf8A9GLXs1enh7+z97v/AJHsZXb2crdwrzD4i4/4Sa3z2tE/9DevT68u+I2f+Ent8f8APov/AKG9bdCs1/3f5o4e6bLSk/3R/OqcePMY9yKmunKvLn7oXNV4MFyw6Mtd1ONoHgwj7jZv6MSLVwPvZzmrhYS4Yggdgar6MQLKRvQ1p+Ssjj+6MEfWvn8VUUa8my4xckRIN5CKCM9apXlustwEx8oGT+FbBKQjC/ebpWVfMYZQB956ywtSUqnu6BWgox1Kg+e4SJOgPNaJiaaVlz+7UAYNUo2S2GR94mr00zIoSPqRzmumu5OSUf68zKnazuR3U6LtjT9K674aHMurfSH+clcY1qIUDfxH3rsvhmD5urE9T5X85K2w6go+6deBbeLjfz/JnoFFFFdB9MeX+KQzeJNTAPG+P/0Ulc0qAvJu5A6Cup8T5/t3VmH8LRn/AMhpXMrjc5Xvg/pXBGT5527/AOR8rjI/vX6v82NhdQisR905FVbhyzBs5OTUmRLMye5FVHAXOOgGK7KUFzX6nG3pYkgYKApHyvUxjaP7pGOtV4MGMRn8KlZpEHOMCrmve0EP87LDIPBFZd8xa5kHbdV/zCxITqOT+NZF65N0cdzg1vhoWmbUFzTsVl4d/QHAr2P4SLt8LXf/AF/P/wCi468b9Sa9l+E3/IrXX/X63/ouOunE/Ae5hP4vyO8ooorzz1QooooA5/xtz4RvR7x/+jFrx+KRn2gHGc5/KvYPG+f+EQvsdcx/+jFrx5QF3Y69RTsmj5zN/wCOvT9WI5IfI4xwagu33Jv+g/Sph8zBW6nJoukUWbNzxgVcWoyVzzI7o9O+F/8AyK82P+fpv/QErta4r4Yf8ixP/wBfTf8AoCV2tTP4mfWYL/d4+gVheMQD4VvQenyf+hrW7WD4zOPCd6f+uf8A6MWolsy8T/Bn6P8AI8obhzkjGMCiEFTlaibJkXd93gDHrVrzFhj8tew71zSulZa3Pklq7mFqUpF0/XnFR6aCdUhB/vZp1+x+0Me2f6U3SyRqcLHu1eta2Hfp+h0w+C50E+VmkJ7MagkTeNycA9anyJZZlPUuWFQgsM4x715FO6XmjmluU2GDyOtNChug46GrDMf4qbww46d67VN2MyARg8Ac1IipGxOOSKf8rZA60whVYkZyRinzN6ANJMhJJ+XsDWVcKBKw961niyoZuoORWRcY88munD76HRhviJ7HIaTHXGa9P+ERzpmvn/qK/wDtvBXl9gTuk/3a9P8AhB/yCte/7Cv/ALbwVGI+I9PL/wDepen+R6LRRRXMe6Fch491G3t9Oiils21K3injk1CxihEr/ZismHIP3AGTcG45QjIrr64rxV4Z1/VL7Vn0m401LfVNKXTp/tYk3pgzfMu3jpN39KAJPBaaFaTTWui+GdS0kPGHeW6s2jDhcALvYknGeB6ZxXY1h6KniaOfZrJ0g2qxYT7GJN+/IxndxjGf0rcoAKKKKACiiigAr598Xgf8LH8SE/8APzD/AOk8NfQVfPvi8FviP4jHrcw/+k8NbUPjOLH/AMBmtAGZlCjjABrM1lfnH1Jrcshtizjk9KzdcjVJzz1Xp6YrysJV/wBq5T5+UbU1I9j8O/8AIs6T/wBecP8A6AK0qzfD3/Is6V/15w/+gCtKvQe59VT+BegVyPxC/wCQJZ/9fi/+gPXXVyPxDz/Ydpjr9sX/ANAeplszDG/7vL0PPt8u8hVBXPB/CpBGIcyljk84NMjlaPchTO08H1qVIzne5O3rg15s3byX5nzUVcYokkceYMJjOfepCWmmEaj91jkimh3uCY1UqvZh3FSyEQARJzIwOPWs5N3tbX8vMpLS/Qmt2T+1tLReovrf/wBGLXr9eQ2dt5Wo6azHLG+t+v8A10WvXq7sG4ul7rvq/wBD28tT5JX7hXl/xGH/ABU1v6fZEz/329eoV5r8QEDeI4Se1ov/AKG9dEpcqbHmivh/mjzq85kceoqrGuDx2GKu3sZSeT2IqkMqxPXNejSd4Kx4dP4bHR6IxWyYsPlGOf51o+RkjazfLzWfo3zWrg/dzWhHBJGMb2bJzXzmLdq09bMqGq2HLGsCnLEnHGayr2VlkJYDPatgW5XLSMSAOhrnr6SSSbLoV64H41WBSnUbvciumopbCQfPciRz1PStuJVijMknUjPNYdrFI06MQVXIrZ8p55WySqAgD0Nb4210m9CKV+hAwkum5GFHcfWuy+GoIl1YH/pl/OSuakliiBjTbu44FdL8NjuuNYP/AFx/nJSws3K+lkduCilioa3ev5M7+iiius+kPM/E0qr4g1ZCeS0fH/bNK5hmO3GK6DxZx4k1Rsc5jH/kNK5p5CiAEc1xRpr2kmur/Q+Uxkm6sk+7/NkS4V2OepyKq3AIIHYk1aJHmBT0x1qtM2QD6V30viucfUWBsofXtUhmYAbwOvP0qKJflJHXPFS7wR8649c+lVNLmE9xpKtnb+NZd2ubgnsBWm4U42nHNZtwP3zHNb4fc3w+kyoQSgJHPf8AOvZfhMQfCt0R/wA/z/8AoCV40xJJAHbFeyfCQY8KXQ/6fn/9AStcT/DPcwf8Q72iiivPPVCiiigDnvHH/In331j/APRi14+f9aT6V7F41G7wlej1MX/oxa8fkU72IHBJppq9j5zOP469P1ZDG58zkfWkuH2wYPRmpFDeaw281FfA+Tjoev0raMU5o8yCvJI9Y+GP/Isz/wDX23/oCV2lcT8LefCsv/X0f/QErtqyn8TPrMErUIhWF4xXf4VvV9fL/wDQ1rdrF8Wjd4YvB67P/Q1rObtFs0xGtGfo/wAjygQBIwpJ44BoYqibByw9alMZ2hSTxxn1prKqADhmrhU7vV3PlOWxz96wMzKOSDzTNPB/tSH03Cpr5f8ASJNo5ODUNgp/tOA5I+ccV7if7h+n6GlO3K0dCCFmkx1zUEoy5ZeT6VKy7ZpCOSTkUjJvII4PpXjxaTuYvXQgKyd04xUZDDnHsfpVzDLj5cg1GRuGSuOentWsahDiVGKkEZx2poPJxycVOVHO5cD1qM4BO0dR1roUkSNfJUFuKxboZmYjtW0wwuWPbpWTOAZGwetdWGerOjCu0yWwG4yfSvTfg/k6Tr2ev9q/+28FeZ6fwZM/3K9M+D/Ok69/2FT/AOk8NTiN2epl/wDvUvT/ACPRqKKK5j3AooooAKZKrvC6xyeW7KQr4ztPY4PWn1FcJLJaypBN5MzIQku3dsYjg474POKAPNb3xjqHh++1RU1S61aOy0y4uZU1GyW2ZZUKhPLwiGRCSckBgODu5Gdj+09a8L6va2+saodVgu7C4uGzAkRhlhCsQmwDKEMfvZIwOTmrF14LudcmL+JdVjvo1tZ7aKK1tPs6qJVCuxy7ktgccgD0qfT/AApdf2hHea7qw1Vre0e0t1FsIQqPt3s/zHc7BVGRgdcDmgDJ0vVtftJPC9/qeprd2+vkRy2ogRFtXeFpk8sgbiBsKncTnOeOlV7LxBrrado/iiXUQ9jqeox27ab5CBIoZZTHGVcDeXBKE5JB5GBWzpPg25sbrShe6y17Y6OrDT7c24RlOwxqZH3HeVQlRgL1ycmmWvgea3ls7V9XMmh2N59strH7OA6uGLKrS7vmRWOQNoPAyTigDpdR1GDS7Q3Nwly8YIXFtbSTvz/sxqW/HFeB65exal46167gSdY5LqLas8DwuMW8Q5RwGHTuPevoevA/FH/JSPEfoLqH/wBJoa0pbnBmTth2b9kFGN2MBVI/KsXW2IuCWOQRwK0bRmlRcHgcHPpWTrT+ZOc9RXkYGm1i22eHUknSS8z2rw9/yLOlf9ecP/oArSrN8O/8izpX/XnD/wCgCtKvUe59RT+BegVyfxAx/Ytpn/n7X/0B66yuU8frv0a0H/T2v/oD1E/hZhjP4EjgEZCfmA3VIpaXKYKr05psUEcpDnr061YJLR7IuG6AmvHqTSdl/wAMfPQi2tRpaOGP90AWUYwtPihVUFxNguM4z1FPhtUgbzWHzHrz3pkoeeQD/ln3Brn51J2i9Or/AEN+VpXa16ILSd5dY08EEL9tt8Z/66LXr9eR27xjV9MjXr9tg/8ARi165XsYO3sdFbX/ACPSy6/LK7vqFebePnC+JrdSM7rRef8AgT16TXmHxEYjxNb4PH2RD/4+9dEo80Wh5o7Yf5o4/UkHmzEegNYTFhID26VsyuZPtAP90fzrKYfvQtduEvGHK+n+R4VF6s6jQI99mc9CavNM0DKpBYknkVR0NmXT5AD838NTteo5LkHPSvnsRCU8TO6ur/1/XkaqSjBa2f8AwS1ueWTkkKOoPes65gW4viRjYgxinPqO5CqZBI7iqqySmEqh/eMeTWlChUg+bbp/wTKpUjLTcWeQI4WNfuntVq4vlhi2py3fB71FsSKMsfvH0rNjdd5Z/XP410xowq6tbfiZczjt1L0avuMsj5yO9dn8L23Pq5/65fzkrgJLxpBtU9CK7/4XddV+kP8AOSumMJRTcjry5/7TH5/kz0Oiiig+oPLvFcZbxJqLZwA8ef8Av2lcvKp3NnnHIrrfEy7vEGqg9NyH/wAhJXLtyFA6dDXFCb9rJdmfKYyP72Xq/wAypMflDDtxUMwx9CMVNtxhT0waYVLxlj2PFd8HY4hkSlYw2c4qf5HXJGCeOagt3OAw+73qwyIwJHWip8Wo+pCVUdKyrkkyv+VakmFAx1JxWZcjErfWunD7m2H+IgQAkN05r2P4UDHhi7H/AE/P/wCgJXjgHOD03cV7J8Kf+RZvB/0/P/6LjrTEfAezg/43yO6ooorgPYCiiigDA8aNt8J3jHs0R/8AIi15LJJGeFx0JHtXrHjkZ8HXw9TH/wCjFrxkNtX3NHs1LU+dzd/vl6fqycYMqkHr1qPUI/3G7uTUcchUk57/ANKmvJQ9vH7AZq1GUakbHlx0Z6Z8LhjwtMP+npv/AEBK7auK+GH/ACLM/wD19t/6AldrSn8TPrMF/AiFYfi87fC14Qcf6v8A9DWtyuf8bNt8IXzenl/+jFqGrqxeJ0oz9H+R5izmQ8HGB+tPj2BdzkFqzEu2LYB5HJqTfkEua5ZYdpW2Pk1U1uUbw/6Y5XpUFocapD6FxT5XIlJNMtj/AMTOAf7Wa9ZK1JryLp9fQ6J1xK+B3ODTWGTkHa2OlWYWEokXuhwKgdAz7hwxHevBhPWz6DlHS6GZYDk5phbIzj8KcRIDnIx3qM7jz3/pW8UjNiEh1IZTjpVdvvbVGOOtWWbC/MDiopAxOE4OO9bU3YhkLrhfmIPFY0xxIwrZlXCZblsVjT8TDFehhdbm2H+Jk9hy0n0r034QY/srXsdP7V/9t4K8008fNLj+7mvSfg9/yCNd/wCwp/7bwVNf4n8j08u/3qXp/kej0UUVznuhRRRQAUUUUAFFFFABRRRQAV4L4oH/ABcTxN6faYf/AEnhr3qvB/EuP+Fi+JR/08w/+k0NXDRnn5p/u7+RZtpGVcL/ABVQ1cqZ+PTFWLaZYW2sfmP3QapakuJsk1zYeFsRc+c5rxSPcfD3/Is6V/15w/8AoArSrN8Pf8izpX/XnD/6AK0q3e59hT+BegVy3jxS2k2YHe8X/wBAeuprmPHBxpVn/wBfa/8AoD1lVdqcn5Myxf8ABkcLHaeYxYZwfer+2OCAnuBVZTKkrBU+TsfwqYRhB5jscdSDXzNeUpNcz07HkUUop2Wo2GCSVj5gwhGeKZfSiNRHH97HFSS3XmIUhALe3pVOQLbIzysck557U6MJSmpT+SFUlGMOWHzZV0+Fl8Q6XI33jewZ/wC/i17ZXiWmTNN4h004+X7ZBg/9tFr22vp4c3IuY6sotySt3CvK/iSceJrf/r0T/wBDevVK8p+JZx4ntv8ArzT/ANDetIK7Nc0/3d+qOMLYWb6D+dUhgysfQVaY/JL+H86pj5ZGHY967Ka0Z4FJaM6DT5jDafQA1G88cjFQTwKjtnxAMc4wCParIijYgg4rzZKMZyk+pk23oQM24hU61KrCAEfxGpm8uPgYLHpTABH80vHPGahzUla2gWsCQu43y9umKoS2xlkLnO0cjmrbSyzyFVX931DCmXEmQUiGSBitKTnGXr+APTYzlbYzY/CvSvhb01P/AHYT+slefGzMah2zkivQfhac/wBqfSH+cldM5Rkro7cvf+1R+f5M9EooorE+oPNPEpP/AAkOqgdd8ePxiQVzFwvlICOucGum8QkDxVqZPTdH/wCikrnrggsPrXmptYiS/rY+YxavOT83+ZnSNhjnsKawK2/zdetLcIxIZR94n8qfLtfHPDDH5V6KeiPPIrcEYBHynrU7wDblSaLcIEEZPJpxtmRMBiec1E6nv72GloVpI8LnseG/Csq6UidiegrbKHrjtyKyLwfO+fWuzDTvI0oO0ypH1/GvY/hV/wAizd/9fz/+gJXjbccds8mvY/hOc+F7o/8AT6//AKBHW+I+C57WCX76/kd3RRRXAewFFFFAHPeOf+RPvvrF/wCjFrxfdgc969n8cnHg6+J7GL/0YteLoAqBeuPWtaex87m/8Zen6sdtBkwaS5h+6vYjFAcEhjwAalvD+9ULyQB/Kqu1NI8tXTPUPhku3w1cD/p7b/0BK7SuN+Guf+EduM/8/bf+gJXZVjJ3Z9Zgf93h6BXO+OhnwbqA/wCuf/oxa6KsDxqM+Eb4f9c//Ri0r21LxX8Cfo/yPFkV1PQU7JJO7rUxVjjjp1qPyfl3MSCe1a86e58cVGJ3A/w0tp/yEI/96nPkMRj5R3ptvxewY7uK3esH6G0HodDZtmSR/fipGVJZCcnOKqROUd8DnJwPWribX74YdhXgVY8suZFwd1YYYT2pBC/cc1L5ZUDBJoyw7c9aj2j6D5UQGKTByB1NRvGx7DGKthmByV4pjAyN0wMdq0jUd9SXBGbLGoznOTWJcKFdsetdLKscS4LZJ6Zrn7lcSuP9o162Cqc1wpe7IfYZ/eY64r0r4Qf8grXv+wr/AO28Feb6ZyZgeyV6V8JQBp3iADp/av8A7bwVVd++0ell3+9S9P8AI9DooorE94KKKKACiiigAooooAKKKKACvBvE3/JRPE3/AF8w/wDpNDXvNeC+Jsn4j+JAP+fqH/0mhq6e55+af7u/kTxxpkM+MgDk1l6iSZlBPJrUSN5G64XjNZuoEG43FTxkVjhn+93ufOLoe5+Hv+RZ0r/rzh/9AFaVZvh7/kWtK/684f8A0AVpVo9z7Cn8C9ArmPHLBdJtCf8An7H/AKA9dPXL+Ol36Rag9Pta/wDoD1lWt7OV+xli/wCDKxxUVwQNrLkg1KN8vByFqCF42UMww3TmpRK0q+WgKk96+dqRs7pWPFhK61Y5nihXKBSw44rNuopLqRZHJRB1U961EtUX5pMHufrWfdytK3lxAjkjPatMJJc/uavuycQny+99xFpxj/t/S1jI4vYc4/66LXtdeKaVbeTrumkkFjeQf+jFr2uvoKduRWdzuyi/JK/cK8o+Jg/4qe2P/Tmn/ob16vXlXxM/5GOD/r0T/wBDetYbmuaf7u/VHCn7s3PYcVWPIB7irPG2T1OKrY5P1rth1PCp9TY0/b5G5sYOAaveRnAVsAelU9NQPAQelXVim4w/TrXkYiVqj1sZJeQGNYxucgkdM1Fg3B+YbQvY96mW3fH75gwzUUiySN+6O0dDWUWm9/mDQ0yqnyxqCR6U8JFChZsbjzTlgjhyxxupiW0kzM0jZUngGqcovrZfmFmVLi4aTAC8V3vwvGG1Ue0P85K4u6iijiwuAcjvXafDA5fVj7Q/zkrppyjKHurQ68uTWKjfz/JnodFFFM+oPLPFD7fFWp8/xxf+i0rJkiDAkHkjIq34ycjxfqCg4y0f/otKpRMTtUn5lXGa4cRBxlzo+VrSTrTi+7/MpgFxkjBUHio5oyFXjB61dMRFyOchh0pZoRJJkdNpFUq6TXY5uR2K0EQdGOQG6A08rIigctgdas2toWgyCA2eDUjwSopzk9+lZTxEedq5apS5b2KDxsQG55xxWPfIRNIMd66cwkqjYI3YwPQ1jarEqTS8d66sFiE6lhOLg0zAODnPc8V7D8Jf+RWux6Xz/wDoEdeQlQG5HQ16/wDCcY8LXf8A1/P/AOgJXrYj+GezgX+8O8ooorgPYCiiigDnPHf/ACJl/wD9s/8A0YteNuBliPTpXsvjoZ8G3w9TF/6MWvGHYKVx61rTPnc3/jr0/VjMDaR7f1qW4OzYx/urz+FIFGDS3WGChhxx/KrveSPLTTZ6r8NDnw5Of+ntv/QErsq4z4Z/8i1N/wBfTf8AoCV2dYS3PrMD/u8PQKwPGpx4Svj/ANc//Ri1v1geNBnwlff9s/8A0YtJl4r+BP0f5HkJclQduD1NRgNIAxJXPanliVyAQepqNvMcBgcD0oij44ruxEhXbxjrTbQD7XGT/e4pzP8AvCuD9abaHN3GP9rNdX2H6GsdjRDkSvzzuOKuBge+01TRd0hOeQatYUnkY9Sa8uqloKJJ8wYHzM4HSmmRsjk8HP8A9ahxwdrAHFQ/MAAWyfWsoxTKbsTGZtpOCfak8xi2Pu4FRqHAOWzzShW3kluPSq5IoXMwfbjDEMetYN6cSNjuTW7MFVc4yQO1YN0wZ2OOe1d+C3uVD4yTS2Baf/dx/OvS/hJ/yDtf/wCwr/7bwV5npiErKV7AMa9K+EOf7L1/P/QV/wDbeCtK699s9TL/APepen+R6LRRRWJ7oUUUUAFRXNvFd2s1tMpaKZGjcBipKkYPI5HHpUtQ3lpDf2U9ncKWgnjaKRVYqSrDBAIII4PUHNAHC6HY2UfiW81LwrYx22kWlnLbytANsV9c7lI2qOG2bWBfuXIycGsnRrK1sNK8B69ZEnV9VniW+uNxL3Ylgd5RJ/ewwyM/d28Yru9G8JaToDxnThfRrHH5aRSajcSxqvoEdyo6enFFh4R0LTNT/tC0sBHcKXKZldkiL/eMaElUz32gZoA8/htYV8LWHiwA/wDCRy64iPcbj5j7rzyWgP8AsCMldvQbc9aNXtYZ/DvjDxPKCdd07UZls7ncd8HlFRFGnorDGV/i3nOc16AvhHQk1j+1VsQLvzTOP3r+WJSMGQR52B/9rGfei68I6Feat/adxYh7kukjfvXCSOmNrPGDsZhgYJBIwPSgDQ1GTUIrMtplrbXFzkYjubhoUx3+ZUc/pXhOrveSeOfED6hBBBd/aot8cExlRf8AR4sYYqpPGP4R/WvoGvBvEv8AyUbxKP8Ap5h/9J4auG55+af7s/kakdrLIqCLGDy2fSs3WbcCbA7V0do6Qx726sqj9KwtYEigAgeY4LEV4eBrzlirdF+Pf7jx69KMaSa3PX/D3/ItaV/15w/+gCtKs3w9/wAi1pX/AF5w/wDoArSr2nufS0vgXoFcx44/5BNqPW6A/wDHHrp65vxoM6XagdftIx/3w9Y13alJ+RnilelI4hLaKRvMPUjHWrQZY4wFBz0FNFkHbIJweetX4oI4Y9xJ49a+PxOJhpdt+RwUKEneyt5lGK2uJWcTFSh6YqtdiKxjbg8fjWi15vO2LBOeeO1QXFgHInlyPbPFOjXkqi9ton0QqtKLh+61fcxNLeWbxFprn/V/a4Mf9/BXtleS2xjGt6ci9ReQf+jFr1qvrcNU9pTTSsXlkeWM1e+oV5T8SznxPbj/AKc0/wDQ3r1avLfiMAfE8BP/AD5p/wChvXTF21LzT/d36o4CT5UkP0/nUKkFwOxqzPHhZh9P51VTPmqOwrug7xPDp6xf9dDd01WeAgYwCfz7VeVpUA3EZ71V0uJmtmZRycGrxkIGWAzXg4iV6sktSIrS4xUlmcbiNveo5JUhk8tAdxGanVpW4RR70wxR2zMWJy3PNZRkr2l9yG1pdfeRJbyTEmUjbnIp01yf9XFnOMdKUTSTACIA1b8uOCPnr70TqcrXOrvogjG6937zIa0lbdJLjA6Yrt/hfy2q/SH+clcjNdeYdi4weOldj8M0EcmqqOmIf5yV30Zycff0OjL0lio28/yZ6BRRRWh9OeReMo2bxbqTDs0X/otKz4V3PuXqSD+lb/iFQ3ivVc9mj/8ARSVihhjcvXOK8+vWbk4W2Plq8LVpS83+YR/LJ5j9Q5UY9KlePyQQPp+fSlyPLOOpJNSKS8JLgbziuKc3e44xWxNbQSGCNxjjr+dWC/TIPHXinWhZLSPjjBz+dXI5Imx78dK8TEYiXO21dJvY9WjQTirO2iKLxjO/sa5XWVzdOvqa7OcBWG3pXI62dtzIwHy8g/lXq5HUcqt/I4sxhypW7nOyj5vxr134Uf8AIr3X/X8//oCV5HIeRXrfwo48L3f/AF/P/wCgJX2Fb+GaZd/ER3dFFFcR7gUUUUAc945/5E+9/wB6L/0aleMmMHB9sV7R41Xd4SvB6tF/6MWvHHACA9qqMraHzmb/AMden6siClQc9MjFPuwN4z6U5gTggcZApb2LdgewNUpe+rnlo9O+Gf8AyLU//X03/oCV2dcd8NQR4bmz/wA/R/8AQErsah7n1uB/3eHoFYXjHA8KXpP+x/6GtbtYXjHH/CK3uf8AY/8AQ1qJbM0xP8Gfo/yPITKpTcoPPNRNvcfL0qxuUnCj60xhIVyqilF26HxzKTsA5Uio7Vf9PjH+1mlkOJyppbdcahCR/ersekH6Fw0NWKEmSUr94E7c+tTuFfKODnHOKfbxNukbHOSV96kYqDh+DivEnVvI0jCyKpjUHjrjFMaPp7c/jUxVGPU0eSoXGTVqdtyXEaI2x8pGacYcn5umKkSAAcE9c1IIVBLEms5VkupagVJAiDAB4Fc5fZEzlemeK6iUoAQK52+GHb0B5r0svlqwi+WoibQVLSXA7bf8a9E+ERB0zXyP+gr/AO28Fef+HP3ksuP7g/rXoHwjDDTNfDDDf2pz/wCA8FXVf7+a9D1MAv8AaZPy/wAj0SiiipPbCiiigAooooAKKKKACiiigArwjxIufiN4lPpdQ/8ApNDXu9eDeJSf+Fj+JV6ZuYef+3eGqgefmn+7P5G5YSCSIiTjaazNYnDNuwMkYFWo4XLKUcqO49aztYKiZAuNuDXj4SlH61zLr+B4lWcnSUWey+Hv+Ra0r/rzh/8AQBWlWd4f/wCRb0r/AK84v/QBWjXrvc+opfAvQK5/xcu6wtM9roH/AMceugrA8Vn/AEKzGOt0B/449cuN0w8/Riq6w+78zlRFKjlVUleoNTLasqDc7Y6nNOSZkG0oSR3pksssilQrLnvXwTnVk7aLzIUKUVfV+RG8tvbAuzKAOMmm3UxuoQsIyPaoTabgBM+9QOQe5qdJIIcKNucV0uEItTheUl9xzqc5JxlaMX95nW1qYtb0+QkkteQZB7fvFr1qvK7WSSXVrItGyqLuAgn/AK6LXqlfYZfKTo+/vf8ARCwCiubl2CvMviEu7xPbjHH2Rcn/AIG9em15z45j8zxNCPS0Q49fneuqpLlg2GZq9C3mjzufO6dewxz+NUlGH+lbWoWpjmmXHVQ3T3rG27XPNd2GqKcLr+tDwIaXizo9HkK2mAMnqPpWgZIt2TjnpVbRo1WyDFQSePwrQWO3kAYbCO1fN4upFV5Oz3N6cW4rUiEqlT5YBPtUYty7ZkyMVbKwxKSqrn2rPmkmnGFDRkGsqN5N8mi7sKlo/FqT+bBE4Ubc5xUBspLglmdlBOcUkUA3rvYMc96nluJAMRRswx1Fa6wlam9e7I0krz2ElgihTJIz9K6X4aDE+rqTnHlfzkrllsriQ75HYqedpFdb8O1CXesDviHP/kSuzByV3Hmuzpwaf1mDtZa/kzvKKKK7T6I8n8VuU8V6qc45j/8ARaVhKW6knGd3+fzra8XkHxbqK/7cef8Av0lYxUm3bB5/+vXPNJSfmfJYn+NP1f5k4n3MVPGBUscrFyDx1A96plwFMmOdoJFSNLh1YDiuaVJPRIhTaOis7hPJAJGDUzBDkq2PTFULMx+SAcZ7U+WN8krLgY4FfNVKEfbOzse3Cs/ZK6uFxMUwpPA71ympSl55s9N5renYhArtkjgk/wAVc1fsfPl/3q+jyigoyPKxVRzaRmv9/ivXfhR/yK91/wBfz/8AoCV5C/DA56V6/wDCn/kV7o/9Pr/+gJX0Nf8Ahnfl/wDFXod1RRRXCe2FFFFAHP8AjZtnhK9b0MR/8iLXkJKvheOBXrnjr/kTr/6x/wDoxa8gfCfMOuCMUWPnM3/jr0/Vi7wuQe7cflT70cJg/eUVWYllyRz1+lTSNviQ5ydoGPwp8tpJnl9D1H4cnPh64/6+m/8AQErr64/4b/8AIuTZ/wCfo/8AoCV2FI+twP8Au8PQKwvGX/IqXv8A2z/9DWt2sTxcN3he8H+5/wChrUzdotmmJ1oz9H+R5OCobAAzxmkcvj5I8irO1AchQSeDSEleFiOK4lU12PlOUwbgf6S2RUlsmLuEg981PPGGuG+Wo4ji7jUevFenz3p2XYyvrY6S3i/c5B5qKT/aUA4qayDGFE3fMvBPrUkkLBiChbPfFfMe05arUmelyXgmjPIXd1xxQFXGd3HQVd8kZ5joEC4+4K2+sxsZexZTVAeA561MsYDE7icjpVhbcZ+7j3qVYkAyQDWVTEroaQoMz5IyeiZHrXMahuEjqFzkmu0k4XAj68VyuqwMl4wxxya9TKa/NNpmNWHJJMl8KoFubgekf+Nd78KP+PHxDn/oK/8AtvBXB+H28u4uSBjK/wCNd58Jzmw8QE/9BX/23grqmn9aqPyR6OXy5q3yf6HoNFFFaHtBRRRQAUUU2SRYo2kc4RAWY+gFADqKydJ8TaRrkN3NYXReO0bZOZYni8s7Q3O8DjaQc9MGmaP4r0XXrhrfTrwyyiPzQrwvHvjzjem9RvXP8S5HI5oA2aKxNP8AF2harqP2CyvxJO2/y8xOqS7ThvLcgLJjvtJxRF4u0KbWP7KS/BuvNaAfunEbSqMmMSY2Fxg5UHPB4oA268F8Tnb8RfEh9LqH/wBJ4a96rwTxRg/EbxID3uof/SeGtKe55+Z/7u/kau91bap6gfyrI1hSkkYz65rXE6wv82fuj+VY+pS+awY9zivNwKkqydtDwajVj2zw9/yLWlf9ecP/AKAK0qzvD/8AyLel/wDXnF/6AK0a73ufV0vgXoFYPiv/AI8bTHX7SMf98PW9WD4rA+wWpP8ADchvyR65Md/u1T0Yqvw/d+Zgo8bDLDmobm6VEOFJ+lPS3ikbzOcn3pJxHEpPPFfnkFT9otG/Iubqezey8zOK3EoIDYzzyKkSyQYaXBYd6V71APlBz9KoXUt9LInklPLJO7NezThWqe6rQX3HlTlShq7yZroY/tFiExn7ZB0P/TRa9Ery2wgMV9Yk9Tdwd/8ApotepV7+UU1ToSUXf3n+SOvDTc221bb9Qrz3xkwHiyIkdLNP/Q3r0KvOPHDiPxZAT0azQf8Aj7134iPNSkvIjMHalfzRzWqMjyznv5YFcyw3S4FamoSESTbTy1ZCMBICfpXTgKXs6Vv62PnruUpSO30GFTpyOwHKgVox6bBGgVFAUdOareH4hLpSA9Cq/wAq1PIkBIGMV8Lj8TJYmpFTtqe9hcOnRjJxuU5LOFBuwMjpzWVcs3n7UU8j0roTZ7/v9PrVK4EVvKQM1WCxi5rayZni8K+W9uVGRHZzmUMXHrWukcMC44H41RfUF83Cg8deKsiyM4Jk7+hrrxUpySdZ8qOWhGKbVJXZG16hcJtIycV0HgEAX+skd/J/nJWLNYRZVjnK471s+AARqGtA9P3OP/Ild2UypSk3S7fqb0o1I4mHtPP8mdzRRRXuHsHj3jNtvjLUP96PI/7ZJWILk5AAIBzW94tiEnjbUSexj/8ARSVivENxH8Q4qJyhzWZ8hik/bTfm/wAxiSB8j/Z/OpycEAfdFVQgikH+zwakgLMcfnUzit1sYJm/aRq9uh4DDpTpUmByJBjFRRK4tgyY3AUhluNg3bc96+ecZOo2mt+p6iklBJojk+YAPyy965vUTi4mHqc10Ukm5QT94cGudv8AHnSe5Ne3liam7nHVaujLJ5II4Jr2H4Uf8itdf9fr/wDoCV5C+AwP4V678KP+RWuv+v5//QEr2a/8M9PAO9VHd0UUVwnthRRRQBzvjn/kTr7PrH/6MWvH+rqPYmvYvGwDeEb0HoTF/wCjFrx5v9Yx7AZqk+h85nH8den6saQd/PQjFNOYyQf4efzpySfOFbpj+tPvlyBjoQM/lTTtJRZ5aR6h8ODnw7Of+npv/QErsK434ac+GZT63J/9ASuyqGraH1uC/wB3h6BWR4nXd4dugf8AY/8AQxWvWZ4gAbRLgHp8v/oQrHEy5aM32T/I3qK8GvI80EUanO3rS8Z2hDV9oY1bvzSh404wa+X+t3V0mzx/q9tG0jm7sJHJKCOR3/CsmFv9Pi/3q1dVZXluB3JGPyFZUYAu4QPWvqsHrRu92v0PInZTaXmdPZklsqcfNmtcOCACpJ9aw7FX2HZjzAO9bIuBHGzSZ+UZOBXyuYwftPd1PXwU/d10D5Wwdh5OKcqJ/d9qlW4jdVxnkZHFNMkfHX5jivN5p7WaO7ljvdMFWMggrxUcgiUcCldo3Ug5x0qtI6LnrwK0pU5SfUzqTSXQSSdVxhTzXL6xch7l+DxXQTTDZx3GelcnfM08jkdmINfT5PQSm5NHkYqpzNJvQtaE4Z5z/sV3vwo5sfEOP+gr/wC28FeeaM3lLPn+5ivQfhJ/yDdf/wCwr/7bwV6s42rzfodeWte3aXZ/mj0Oiiig90KKKKACkZlRSzEBVGST2FLRQB5KdZ03xEPiPpui6paXV9qMf+hxQTKzTgWUanbg88gr9a0zqNp4q1/RB4dlDmy026FwyAj7N5iIqRv/AHW3DO3qNhNej0UAeWaNf2mp2fgHRrAEanpUkbX0AUh7NY7aSOQSf3cswUZ+9nNV7O5hfwzovhRSf+Ehttajea22nzIgl0ZXmP8AsFASG6HcB3r1uigCpqMeoS2hXTLm2trnIxJcwNMmO42h0P614RrCXkXjjxANRngnuluot8kEJiRv9HixhSzEcY/iP9K+gq8C8V8/EbxGM/8AL1D/AOk0NaU1dnn5mr4dmssanMjHqo/lWFf/AOvI75z+FdJa25uLeNiSMKv8qydYhCy7lHcCvLwNeKruF9TwqtNqClbQ9l8P/wDIt6X/ANecX/oArRrO8P8A/It6X/16Rf8AoArRr0XufVUvgj6BWB4sbbYWh7faRn/vh636wPFoBsLUHvcgf+OPXJjbfVp37MVX4NP61ObjhYD5ScfWnx2ShyxZjnnmq8bzoSuwkDofWph5rHHIz3r4Gp7RN+8tSabptL3WPlSFM/4Vl3NwAVSMAuc4FasViS2WkJGO9WfsdumCwUkdyKKWJpUXd3kVUwtWstLRMO0886hYGRAB9rg6f9dFr1GuBkMX2yyVMZF3B0/66LXfV9jktb22HcrW1f5Izo0vZTlG99v1CvN/HabvFFu3dbRcD/gb16RXnfjiIy+JoQCRi0Q5H++9elWly022YZkr0bLujz663B5fXOfxqgMH61uXlmxaUgHHHOOtYwjKTHPvXZhakZQ0PnI+7dM9A8PIX0mLb/dBrR/0kMAEGO9Z3h92j0mLaMnaP5Vri6HdQD6V+a5g5fW6llfVn1eDUPYQu2tCILcMPmQCs2W2X7U7Mxye1azXLEEKmTWJfRztIRhlyOo7VeX8zm1dRuZY7kUE1eROBCmMgflTXuZCp8pQap21s4kXdKzc963G8iNf4RXTiXCjJL42c9BSqxb+FIxWa+cjzIwE7kGug+HbFrzWN3X9z/OSs6W7gZSqspJ4xWl8Pjm+1njH+p/9qV7mUVJTcuaHLoZ04KOJhaXNv+TO6ooor2z2DybxSceM9U9f3f8A6KSsWVsBAfvMa2PFpI8Y6lxwWjGf+2SVhktJIysuNpAU/hXPOP7y58liX+9mvN/mOniLLLge4ogXKYx83I/GnSFlUJySTg1JbAeacjAzn8TWbk1TMUk5GkiSLAu0Z4phMwHKCrcay7VKxllIzmnMHwcx14Xt7Sd0men7K66mXMNy5HUfernb0/vJD3GRXWyxqc84PUiuZ1KICaQjoePxr3MrqqUrHFWjyyTZjuP8a9f+FH/IrXX/AF+v/wCgJXkbA+aq449a9f8AhYNvhm7H/T8//oCV7dd+4ejgJfvUvI7iiiiuE9wKKKKAMDxt/wAije/WP/0YteOMw3D1xg17H40OPCd6feP/ANGLXjDEtyw2nGSPSqirs+czj+OvT9WCKWbc3HFWr5Q0EY9VH8qrr94D2NT3atth64CqP0pS/iRPMWx6Z8Nv+RZk/wCvg/8AoCV2Nch8ORjw0/8A18H/ANASuvpbn1mC/wB3h6BWbrwzotwD/s/+hCtKs/WxnR5x/u/+hCubGO2GqP8Auv8AI6WrqxxBiQMSSfmqOTy0Q81cEK85aop44whyRXwdOsnJJtnPUotRbSRxupBfPbk9f6VnWwAuYzn+Ktu/iR5ZBkDmsvyPLuYgDX6BhaidHl8v0PlaitNnQ2cfOVzkGtVYpd2Cg2Y61X06AOuc4NaAdkJXbkDvXxuPxDdVxjq0e/g6CVNSl1IsEdQKD0zgVLvJx8lJuOB8neuDnfY7OVdyFhwc9KhZGPQZq7nIOUGKTPogrSFZx6EToqXUx7qOXYcIK5e8LRlyygEnmu2uFkeM4jrlNUtnY4KkZOa+oybEKTtKx4mOpcsk+hT0g73us9kz/OvQPhGMabr4/wCor/7bwVwmjQ7JLnP8SY/nXffCj/jx8Qf9hT/22gr2pyTrSS8vyOnLLOu2u3+R6DRRRQe6FFFFABRRRQAUUUUAFFFFABXgviRN/wATPES+tzF/6TQ171XhOv4HxO8Qn/p6i/8ASaGqi7Xa7M4My/3dnTWlo/2WHyzj5cNgVnatpcjMQGxxnpW/pE4jtBv5yBiq+q3SlshTzxXw2HxeIhjHGPdkVsNQeEU29dDutBG3w7pintaRD/xwVoVR0X/kBaf/ANe0f/oIq9X26d9Tvp/AvQK57xeD/Z1rjqLkH/xx66Gue8Xv5enWjelyP/QHrnxd/YTt2ZNe3s3c5mO7+XJQ5pDfNniJqYXjJyRTxNGozg18O6cL35DnVSdrc5Mt1KcAKwqOWK7nK7Z2QdxjrTvtaKM7TUDayqOF8uT8qzp0a170qf6/mazq0rWqzZZS1aG5s2d9xN3AM/8AbRa9CrzqK8Nxd2YwwH2uDqP+mi16LX1WRKosPL2m/M/yRVN03J+z2sv1CuI8UReb4piH/Ton/ob129cf4i2p4kWVv4bRQP8Avp66s0k44SbW4q8VLlT2ujkdXQRLtRevpXKXICSN611OoXKvKOvU5rmbpC8rMeoyPwqsn5o00pnzWLcXVbWx2Xh+dU0yMHngVpNJGXJwOayNBRBpkZYdVFagSH2/OvkswhBYqo9dz3sLOfsIrQmSZAegqpqEu9sIhyR2qcLEP/10yWSNWBAJPTiuSioxqqUU2bVXKVPlbSM2G3uGuIm3lVDcjHWtJbGQriSTd9RUbXG3ACtk+1L/AGkzjKq4/CuutPE1bOCS+456McPTuptscdMiUZAXI56VpeBY/K1DWF74h/8AalZK3Uz564+lbXgrJ1HVie6Qf+1K9TJXWVaUasr6B+5dam6atq/yZ2NFFFfTnoHknjIEeKNRcDOJYsj/ALZpWUQrPkYyDWr4wcr4q1JR1Lx4/wC/SVgLK248/WsJwcmz5LEtKvP1f5k8+WDOp6HirEEfzA54yKoo+AB71Z3yCPbG2DwBWNSD5eVGUJK92dXaERwhWGakYw9wtJbFHtoxJ97bg81I1vA4IIBz718FUlFVW5XWvQ+vpxk6aUbEMlpE6mRVXPQ1ymrac6yOAerlunb0rriRBux930FYWq3SvclBnIwSfxr2Mmr141vd1R5mZU6Xs7vRnITReW4yO9erfC7/AJFu7/6/X/8AQErza4TzJ29AwxXpnwzXb4eux/0+v/6AlfbupzQ1OHK5fv7eTO0ooorI+kCiiigDA8af8ine/WP/ANGLXjrplicdRXsnjH/kVbz6x/8Aoxa8gchHYY6Cle0tD53N/wCMvT9WVx/x8KBwMYq7d4Mcajrx/KqB3bU5+bPX8av+WzSBj6Z/CirZSjJ9Dy1tY9M+Hq7PDsg/6bn/ANASurrlvAP/ACAJP+u5/wDQErqamm7xTPrcH/Aj6BWbr2f7FuMHB+X/ANCFaVZniHP9h3OOvy/+hCssWr4ea8n+RvN2izjQHycycY4p4jDDDMDVUCTP3hipUDd2FfCTptapnNCaejRkX1spuHAwOcmsiSBkvI2zkbttdJNbiSVnOMms29tXjCuCMBs19HgcWrKDetrHi4rDtNyS0ubdtFtgBU4Ydac0rDOQTTI95iVkIGV5oZnHevnnHmm29T2VLlgkhwnOR8hoNzwv7s8nH0qIuw60m8mqVGL6E+1l3JvtGQfkPBxR5+GI2Hpmod5o3HtT9hHsL20u5P5xdfuHmue1RiJSu08Vuo7DvWXqMZkdnruyzlp19tDlx150t9TH0pS88nb5cmu2+FP/AB5eIP8AsK/+20FchZqYribnrGcV2Pwtx9l8Q4/6Cg/9JoK+ohLmrS7WRllK9/5P80d9RRRXQe8FFFFABRRRQAUUUUAFFFc3q/jSx0i9urdrLULpbKNZb6a1iDpaowyC+WBPA3YUMQOcUAdJXg3iZSPiR4jcdftUI/8AJeGvd45EmiSSNg6OAyspyCD0IrwjxPIq/ETxID/z9Qn/AMloauF76Hn5nf6u7eR2VgEeziBzwKdeRR7C3rXNDUDCiqG7DFNm1NniWIt1Iwc18h/Y9d1vaKWjZyf2lTVLkcdbHr+j/wDIDsP+vaP/ANBFXaoaGc6Bpp9bWL/0EVfr6tKyPYp/AvQKwPFgBsLQHp9pH/oD1v1zHjuTydDif0nz/wCQ3rHEwc6M4rqmRiJKNNyfQ5qS1jb1/OnJbxj1/OuZGtEqOe3vQNXduh5r5/8AsnF2s5HjfXqCd+U60Rxgd6T9yprkH1dynDc/Wqrai5kzvP5mlDIK8vimW81gvhgd0GQ3diF6/bIP/Ri16HXjPh67afVLRSxI+1Qd/wDpotezV7GW4R4SnKk3fX9Ed+DxHt7ztbb9QrjPFIB1xc9rZD+TPXZ15747uha65CxOP9GX/wBCatMdSlVoOEd2XjKip0+Z9zl7lAXZ2+9nBrMmUG4Ze2OaddXymcgNwxzVCWcBi5PBGDW+FoTilfsfLTfM9Edro8KS2CZzxwKvrYxA9T1z1rlrDU2gtljXBAxVxdcbIHHNfN4vLcW605QejZ62HxmHjTjGa1RvfYo/U9c9altrSFJJG5y5BPPtiuc/tssB05oXWTkDPU461ySyrGyi05HRHMMLGSaidgI4VXNVVaIZ9q5h9YbYx3fd9zWa+puXBLkfiaKHD1eV+aRdXOqenLA7oTQg1peEyp1PVCvTy4P5yV5v/aLZ+ZiOPWu2+HM5nuNWJOcLCP8A0ZXq5ZlEsJW9o3fQmlmP1mrCFrb/AJM7yiiivfPUPIPGYP8AwluosOzx/wDopKxByhI+9mt/xkQPEmqk9ni/9FJXMNOFZcHgips5t2PkMWrV5+r/ADJ4ySCG6HgVPasDLuP3TmqkVwpiAOMhqhhvFQoM8luPelKjKakrGMbpnfG1jlEUuTlRxz7YqQW4UcE/nXNx6y6IFbAOMinNr5UhWwCRkda+RllOMbsndHvRzDDb21NyY+VgHoeBXPago8zf67j+lSJre9280KFXnNZl9fIZtgbua9PLsBWo1LSRw4vEwqx90aEO4sem4Yr0v4cDGhXn/X6//oCV5aLtX2qDyDzXqXw4wdAuiP8An8b/ANASveUZR+IMr/3j5M7Ciiig+lCiiigDD8Xjd4YulPdoh/5EWvL7qxTzCecY9a9O8ZP5fhW7b0aL/wBGrXkE2ovKM8c1hOlVnUTg9Dwc1cVVSa6L82K9sqMuM4+taLBVjiX1SsY3ORgmpp7pGMO1+UTBFVVo1JuKfmeTGVrnrfguMRaLIg6Cb/2Ra6OuZ8DSGbQGc9TLz/3wtdNSw6apRUtz63DW9lGwVmeIBnRLgD/Z/wDQhWnWL4snNt4YvJgASoQ8/wC+tGIi50pRju0/yNKslGDb2SOO8lqURMK5x/Eco6ImMe9RL4lmcZVEI/GvnVk+Na2R4316hukzqBEzcnrUVzbsyEdqxE8RSbsbUxU51zzQVAXNQstxtOadgeMw0otamyqSLap5eN2zv60SCbnGOnH1rG/txoCEkCgE4FP/ALcUgk7eDisv7OxSd+VMv65Qatdo0sT7ui7dv60gEuBnGe9Zp1tFBJIwKG1pFIBIyTirWBxP8hH1qj/MzSHnYOduc/pQfOw20Dp8v1rN/tpDjkc0HWkBxlc0/qOI/kD61R/mZpjz+Pu/d/WoJ4JJYTvxu9qoNr8a7skfKQD+NI+s5Yr8vA5q6eBxUZJqNiZ4ii42bZGsJW5O70Ndf8NIxFH4jQdBqg/9JYK4F9VDzsRjAU5ruPhVN9osvEMvrqn8raAV71GjVhNSntYrK5R9rJLt/kegUUUV1nuBRRRQAUUUUAFFFFABXnuqG+0i/wDF9uukX962tKsli9tAZEZzAsJR2HEeCmctgYavQqKAMa20CNvC2n6PeTXH+jW8UTPbXMkDFkUD76FWxx61iy/CzwnPcy3M1tfyTykGSR9UuizEAAZPmZPAA59BXZ0UCaT0Zxp+FvhQ9ba/OP8AqK3X/wAco/4VZ4T4/wBFv+On/E0uv/jldlRQT7KHZHNR+BNHijWOO41tEUBVVdbvAAB0AHm07/hCNK/5+tc/8Hl5/wDHa6Oigs5z/hCNK/5+tc/8Hl5/8dqG4+H2hXcfl3L6xMmc7ZNau2GenQy+5rqaKBNJ6M4v/hVPhH/nzvv/AAaXP/xylHwr8JDpaX3/AINLr/45XZ0UXJ9nDsji/wDhVXhH/nzvv/Bpc/8Axyj/AIVT4R/5873/AMGdz/8AHK6vUL6DTNNur+6bbb2sLzStjOFUEk/kKwNJ8VXt1qdjZ6rox07+0YGnsmFyJdwUAlHAUbH2sDgbhweeKd2Hs4dkYfhv4Z6OljJJfW2pRXS3tz5ZGp3KERrO/kn5ZP7gQg9e/Wuh/wCEI0r/AJ+tc/8AB5ef/HareJfGUmha9Z6VFBpm65tnn87UdS+yINrKu0Hy33Md2ccdDXT2zyyWsLzpGkzIpdI33qrY5AbA3DPfAz6CkUopbIwf+EI0r/n61z/weXn/AMdqrc/Dfw5eOHul1SdgMAy6vdscenMldbRQDSejOLPwp8Ik5Nnek/8AYTuf/jlIfhR4QIwbK9I/7Cdz/wDHK7Windk+zh2Rxg+FfhNelrfj6apdf/HKX/hVnhP/AJ9b/wD8Gl1/8crsqKQeyh2Rxv8Awqzwn/z63/8A4NLr/wCOUf8ACrfCn/Ptf/8Ag0uv/jldlUF5LcQ2cslrbi5nVcpCZAm8+m49KA9lDsjgfEfww0SPwxqr6TaaidRW0la1C6ncsTLsOzgyYPOOtaA+FXhIqC1pfZxz/wATS5/+OVt+GNcm1/S5rm5sls54bue1khWbzQGikKEhtq5BK+lUNX8V3lpqN/a6Xox1FdMgWe9c3IiKhgWCRgqd77RnBKjkc80B7OHZFX/hVfhM/wDLrf8A/g0uv/jlWLb4c+HrIubT+1YC+N3laxdrux0ziXnqa6OxvYNS0+2vrZt9vcxLNE3qrAEH8jVigahFO6Rzn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O10dFBRyE3wz8M3MjyTxalK743M+rXTFsDHJMnPAFRf8Kp8In/lzvv/AAaXP/xyu0ooIdOD1aOL/wCFVeER/wAud9/4NLn/AOOUn/Cp/B+QfsV7x0/4mdzx/wCRK7Windh7OHZHGn4WeEz1tb//AMGl1/8AHKyvEHwv0RNPhbTLPUGuftlsrY1K4Y+SZ0EvWT/nnv8Ap25ru9TnvrayMmnWKXtzkBYnnEQx3JbBxj6Gq3hrWf8AhIvDOnax9n+z/bYFm8rfv2ZGcZwM/kKQezh2Rg/8Kr8Jc/6Lfc9f+Jpdf/HKQ/CnwixybO+J9Tqlz/8AHKfD41ubvxbeaHb2ukr9ku1t2+0ar5dxIuxHLpD5R3AB+Pm5KnkV2NFw9nDsjix8KPCAORZXoPtqdz/8cq3bfD3QbOMx2ravAhO4rFrN2oJ9cCX2FdTRRcahFO6Rzn/CEaV/z9a5/wCDy8/+O0f8IRpX/P1rn/g8vP8A47XR0UFHOf8ACEaV/wA/Wuf+Dy8/+O0f8IRpX/P1rn/g8vP/AI7XR0UAcxP4B0S5iMU8usyxtjKSa1eMDg5HBl9apf8ACqfCP/Pne/8Agzuf/jldpWNrGoa3ZzY0zRIb2FYvMeSW+EHPPyqNrZOAOu0c9etFyXCL3RyGq/CzQk1HRVsrK/Nu94y3uNRuDiLyZSM5k4/eCPkf1Nan/CqfCP8Az53v/gzuf/jldNo2qwa5oljqtsrrBeQJOiuMMAwyAffmudsvHE11LYXUmkGLRNRuja2l99oDOzEkIzR7flViuAdxPIyBmndi9nDsixB8P9CtY/Lt5NYhTOdsetXaj8hLUv8AwhGlf8/Wuf8Ag8vP/jtdHRSKStojnP8AhCNK/wCfrXP/AAeXn/x2o5/AOiXMLQzy6zLE33kk1q8ZT9QZa6eigbV9GcX/AMKq8I/8+d9/4NLn/wCOUg+FHhBRgWV6Ppqdz/8AHK7Windkezh2RxY+FXhEdLS+/wDBpc//ABylHws8Jg5Frfg/9hS6/wDjldnRSD2UOyONb4WeE3ILWt+cdM6pdf8Axysuf4YaIPFFjHFaaj/ZjWdw1wf7SucecHh8vJ8zOdpl/wAgV0PiDxFq2hR3t8dCjn0myj82a4+2hZWQLlike0g454LKTjjtW7cXSw6fLdqu9UiMoXpkAZ/CgPZw7I5Q/Cvwmetrf/8Ag0uv/jlB+FnhMnJtb89/+Qpdf/HKseDfFtz4rgS6NvpMdu9uspW01X7TNGzAEJInlLtOCc88EYx3rq6A9nDsjjP+FWeE/wDn1v8A/wAGl1/8co/4VZ4T/wCfW/8A/Bpdf/HK7OigPZQ7I4s/Crwic5tL456/8TS55/8AIlL/AMKs8J5z9lv8/wDYUuv/AI5XZ0UB7OHZHF/8Ko8IDOLO956/8TO5/wDjlb3h/wANaV4YtJrbSYJIop5fOkEk7yln2hc5ck9FA/Ctaii41CK1SCiiigoKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAzvEGl/wBt+HNT0rf5f221ltw/90upXP4ZrloLfxHf6lpN9e6H9mbRLSYiP7VG32y4aMIFjIJ2pjdy+DyOODXdUUAc5qt1ftHED4QbUWmth5g8+DbGx+9G5dhke6g59Ks+EdJudD8J6Zpl5IslxbQBHKElR/sgnkgdB7CtqigAooooAKKKKACiiigAqG7mkgtJZobaS5lRSywxsoaQ+gLEAH6kCpqKAOG8LSeIdL0rV0m8LXSXD31zewRyXduBKJZywTKu2CFbJyMcdafqVjr2ma1r8+laUNQj1mGPY4nSMW8yxmP94GIJTAU5XJ4IxXbUUAZ+g6Z/Yvh7TdKD+Z9itYrff/e2IFz+laFFFABRRRQAUUUUAFFYc/jTwra3EtvceJdGhnicpJHJfxKyMDgggtkEHjFaFjqllqYZ7GcXEQVWE0YJicN0KPja/T+EnHegBdRurizsnntdPmv5QQBbwuis3PYuyrx161y3gn+3tH8G6PpV34dniuLQQ2s3mXUONmPmlUqzZC+hwTniu0ooA4fxRYanr8M2k23hr7O73Ubpqsk0OyMK4bzVAbzN+F4G0c98V3FFFABRRRQAUUUUAFFFFABXHeMv7evbu30q00a9udFlj3X01lPAksvJHkDzJEKgjlmHODgYySOxooAy7We6iXS7eHRXtrV4mEqtLGDZhVGxNqkhs9PlJAxXG6d4f11NO0HwzPpojstIvY521Hz0KTRQsWjCoDvDE7AcgAYPJ4r0aigAooooAKKKKACiiigAooooA4bxKuuan4hFnP4cvrzw7bbJAtrPbAXsvB/eCSVSEU/w4+YjJ4GD1E13fefLEukmSD7IZVdp0G+XJ/cle3H8XTmtGigDjLLT9Q1Dxjp2rtoP9i29jbTQyGSWJpLjft2piJmGxdu7k9cYHWuzoooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDk/Ekaf8Jn4N+Reby5zx1/0WWue8UahqMK+N47XULm3MB04W7RyEeRvYBio6DPf1716RJa2800M0sEUksDFoXZAWjJBUlT2JBI47GopdM0+czmaxtpDcbPO3wqfN2/d3ZHOO2elAHELok7+Nr3Qf7e1saedMivMfbn8xZjI6bhJncFwoOwHbnt2rDh13XtftvCds8m4XejC7kP9pvp5uZgVB/eRozEgc7Rj72TnFesC1txdm7EEQuWjEZm2DeUBJC7uuMknHvVSfQdHudOi0640mxlsYcCO2kt0aJMdMKRgfhQBT8IjUl8OwJqt1Bc3SPIvmwz+cCgchQX2ruYDCk4GSDW5UNra29jbJbWlvFbwRjCRRIEVR7AcCpqACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD/2Q==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "augment = Augmenter(parallel_augment=True, concat_original=False, min_augmentations=2, max_augmentations=2,\n", + " shuffle_augmentations=False, repeat_augment=1,augmentations=[freq_dropper, chunk_dropper])\n", + "\n", + "augmented_signal, lenghts = augment(clean, lengths=torch.tensor([1.0]))\n", + "\n", + "# We here have two signals, once for each augmentation\n", + "print(augmented_signal.shape)\n", + "\n", + "plt.figure(1)\n", + "plt.specgram(augmented_signal[0],Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "plt.figure(2)\n", + "plt.specgram(augmented_signal[1],Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "waJD1W-MsYQd" + }, + "source": [ + "The option `concat_original` aurgment can be used to concatenate the original signal in the output batch." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "executionInfo": { + "elapsed": 4355, + "status": "ok", + "timestamp": 1704409158813, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "MpdbxIyUsfrF", + "outputId": "039d1bbd-5e7e-4808-eba7-aa067d402d48" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([3, 45920])\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sdelang/env/sb312/lib/python3.12/site-packages/matplotlib/axes/_axes.py:8089: RuntimeWarning: divide by zero encountered in log10\n", + " Z = 10. * np.log10(spec)\n" + ] + }, + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Frequency')" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeB+KAo+IviQbRj7VD/AOk8Ne+14F4qGfiP4k/6+Yf/AEnhrSl8RwZn/u7+Qy/jX7TuIGNq4/KqN5bIlvEQo+YVpXa+ZLGf9lSfyqnfvi1jbtg4pYeTtBHzcW/aaGXbosc6HaOGH867m1iSNGJUYdQQK4eEEyoPVx/MV3wQCBH9FArlzqVuRd/+Ad1m58xzGpQKt0WCgBvmx71CEUL90dK0dUUNcp/u8VTYDds9s11UJ81KJ583rYpC2U4GBT/IU5O0ZXjNTYP8NTYzCD3reVRoHUl3KKxqSF2jg+lXvs6RoflHTNJBD+9UD15q1Nxx6nj6VjVq3kooG2ylGkcah9g9KYyKzsCvH0qcDOwDpSzFTwO1NS94m5iXKgSsdvU46VuCBBoiOqgFm3H9KyLo8kDvW4Af7Atz3INaYqTSp+v6HZUd6af9dDLdF2ONo4XP61SjRVDnaPU1eJPlSHvsqtEQVkb/AGf8K6IPRipNqLABcIABQI1A24GAM05AMRr+VPZQcqOw5p3Bys7DVt0kQAqMZ5q0iIi7Qo5pscZ+UL3OanwEDDvWE530Oec29LkbBSGbaOnFMVELAbRxzTywDKB0AzSBed/96ktiVsRtEm8naM5FdRaW0cluG2DJQZ/DFc24BYH+6c12Gnqr2yegVf5V5Wb1XClFo6sMueVmLLGgWMbRu3c8Vxt+kZnIKjIruJUDXW0dQATXHanERcuT1AOK58kqJya8rmuJXLJM0PKT7NbDaMFST+RrDuYUywCjg10PlN9gt24xtrCuY9twSvQnmvTwU7ylr3/NnKrxkioY0B+6M0jIoQYA5qZPn+b2FGzcBntk16XNbc157OzKAhXsBgU4W6ZPA9ateSRgDp3pCvVu/Sr5zf27ezK6ookwQOBTWA3bQBxinuRuBPrSEdc+tUaJ9WNVEIJwPSmmNcgYHBzQM7T9eKeAVYE96Zeq6jNqliNvfP50KirlQoqTG7H1zTVOG5655oC90W9OVTdYwOc/yqeS3DTfdGQah08br+P8f5GtPaN2B2Y5rjrT5anyPPxEnGd12JtLtI3uApUYDA10wtY0t/L2qGRcA+9c9pDGO7wejA5ro59wZWPQ4H418xmspuuo30OjBpcjk9WRrboZlBQfKOuPauZ1iOM3T4UcGusWVXhZhnrt/pXJ6wQt2UXoOKrKJSdd36IMWkorl7mNNGAFwBgA0BF8xPlGdtS3Cgpj1HNIoyyntjivqk/dMFL3BAihphgdv6VWaJPMD7RxkCrXT7QR14/pUD8qv4mnD+vuNKbaf9dhqxrnO0VYCL5ecDJpkYwce1WGVWTP40pvUzqT1KKoM4wMg1atERbpPlH3qh6P75qaAf6UnuRTqaxZpUldMmvok+1PhR8pIFVLqCOXaGUEZB/EHIq5dhmuZT7moJvvfhWdJtRiY05NNNMrPGmQCBSiNSOQDmnsmcZ60KgCj0FbX0N+bTchESBANoxuqbykLHgUn8NSgA/N36UmxTm+5G6hkjwOOf51XdBhuBVxh8g9AP61WkUDrTgyqMtbE2nFfLnUgfdr074NKE0XXVAwBqp/9J4a8zsE3eZjuor0/wCDwxpGvD/qK/8AtvBXLiHqztwEv9qmvL/I9GooorlPaCiiigAooooAKKKKACiiigArwDxWSPiV4kx0+0Q/+k8Ne/14B4sYL8R/Enr9phx/4DQ1rR+I4cx/3dik7mU+qgfpVPUDiGGMdqu2nzx7T1HSqWojakXqQamj/FUex81TXvozE8wXEe0A4YH9a7+Fw9vCnfHNcNb83EZP94fzruooR9nTaea5M7cbQv5nem3KyXQy76MG6x2A4qjs2jcegGc1o3QImAPUKBmqUhBVBnsRSw8nyJHn1F7zK4ddpxVqEDH1Garx7MEnHFW4UXP3vvDit6zSREVqRQ2xyzrk5OfzpZ0b5yBztC/jzU0bGFpH6oRx7VFcZZlweVG4j1rJSlKepTSsUizLgD1oMLAeZz0q1KgSHeRzkVAzkJg966oz5vhJ2Me66pW4jN/YFvuHzFelYs4LbQRzmtxSH8P2mOu4j+Va4p6U/X9Gdk/4SM7+CTP9yq0K5aYjoU/qKtueZB/s1Wi6S4H8H9RW8XozOm/dY6JdqR56nFSOh7d/5UYJijwMkAVMVO4ccEVLlrczlJ3uPiQhlxUcv32Jq5bKd3K98fpVC8/17AHocVhTlzVGiIq7EP3VTuRipXODx6YH1quh9evarUabmUH+E5Naz03HJWYjxlUUdzj+ddXZxGW0jTkAgZI9q5iQkvnHfArrNLcG3jzx8leHnE5KjGS3udWCSlUsyxuDPu7niuQ1fAmkI+9XViJ8scEA8iuP1XPmSMTjnNc+SxXtnZ9jbGNvluupvFSdHt2HZcGufnUeafeugXP9jxejJmufkXLjnvXfl+jn6v8AM5K+8fRECRYkwPpShCSMj2qVVxKWPGGzTkGSuRzur03MxcmyPyCH4HVc1G8W4Zxz3q+wCx4J+bPFQsNpY9Q3NRGq2HM0ZE8eWAPaoiSSa0JlDYPQ1QYAkc4wa7oSuj0KNTmVhg3FORzil3gqF/iApc/IQetG0Lz3xWhvddRUwpxTMHAJ6k08r0NNBz8p7UAu5b0wj+1owD2P8jWuwzkdyxrI04gX8Zx68/ga0wSWP4/zrgxK/eX8v8zgxb95ehe0jH2hg3VuK6gMsuQOoGa5fSB5l0R7gg10nl48shiMEZx3r5XOEvbK71OvAXUH2IyVKIB2OT+dc1rUYF8z5PI/+vXSzAKgOcEN0rmNbJ89iOehrfJ9a115kYvZIyZE+QD1FOQfKD7CmSkqoPtili3GJDj5sDj8a+p6HK0+W4/GDOB6D+lVZgRGpA7kH86vHH7712j+YqnKTtUY45zTpvX+uxVJ6r+ug6IEqM9cA1b2/Lx1qtHyMj0BrRCKIi3cVlVlZmVV+8ZEi7ZyfSprZv8ATY8/3hUdx/r3PbiiA5vIgP71bvWHyOm14X8jTuI8Xk4/2z/Oqd1gLjvxV+Q5vLokcKx/mao3OG471y0G9L9kc0f4hC+BgZ5pr549MfrTmZcqCeT0pGYYrqRvG+gmzKketPC5I9jTEOU3Z4qYAZH1zQ3YUm0G3AwfSqkhChs9qvOMrn2rNkXeHGetFLUrD6vUu6f1fHQrXp3wh/5BWvf9hX/23gry7TSS0gHZc16n8JBt07Xx/wBRX/23grmxPxWO/AK2LkvL/I9DooormPcCiiigAooqOeFbi3lgdnVZEKExuUYAjHDAgg+4ORQBJRXlWrST6BqXibUNHv8AVHg0HSW3JdajPcI93INy5WR2HyIFP/A/atadrvwbrdlHFqV/qMN3pt3LNHeXDTZlhVGDrn7udzAquF5HFAHf0V5zpTX+lnwbqj6vf3kutsIr+KecvG5kt3lDIh4j2smBtA4POetVbC81EaHoPi1tVvnu9R1SKKe1adjB5M0xjEaxfdXYCpBAzlTknNAHqFfP/i7H/Cx/Eef+fmH/ANJoa911G8nsbQzW+n3N/ICB5Fs0Yc+/7xlX9a8D1yeW+8ea/PPZT2UjXMWbecoXTEEQ5KMy89eCeta0fiOHMXagzRt0VAp4Bxms3U8CNWPPXFaJyGGOwAP5Vl6j9yLJ6g/0rLDK9VNnzVP4kihbZaaLnGXH869FijxbpjnHJ/KvPLcA3UR7B1P616LBnyHweqZX2rg4hbSh8z06KTmzEkVt0zO2TvJXPpWdJxckfwg5FX7nzNqIT+82cn3qhJgSlT9K3w21zy6m4KikcYGTzViIELjuvAPqKqopLHHTOanWYhD6gf1rWom9EQidpEw0XHHWoC+XTjljyfQUzbuOc/OfvUhbhz3A2ipjTS2G3cVleW6bLHywnTtmgqjRcgZFMe4Eb+WoOWHWjy2YfL1NaWatfRCZk3R/eDHvW2qqnh61OOSSf5VjXi7JOOmSa2pPm8OWmPfP0rbFPSl/i/RnVo6X9dzKlbDOmMZTOfxqtAx3yr6R/wBRU1wf3pH+zioYB+9l9kx/KuuK90dNJQZct8nae2xf5VYClHXcc5OBVaJ9sYA7KP5VeUq7pu7H9RXNVbTOWfxFmBMhsnBIIH1rJu0xNjOSDyfXitcMDyOwzWTM2ZMnuTWGFvztlJ22IolPUnr09quQqSQRwScEeuaqrlhgduauwMozJjoMV0Vm7Cm7sklCjaox9a6XTlH2ZB6gAH8M1zs0Y8lW75rprEKtrbqOw3f5/Ovns2l+4jbuzswC/eMlaTLkKcgZ6VxGrE+dIpPSuyG2OUgA/O5FcbqvzXMvvkU8iilVduyNcW7uN+5u7saFCQeq/wD1qxfLYr1565/GtglU0KFcchf61mQvulYH7pFduEulNr+Z/mcVfVx9ERy/fJHSlBAmY9sgih/usARUZUsAy8ZbH4V3JXWpgTyENIDjAAqJhtByc45q1LEoXPqADVcr95W9P0qKck1oNoo3HUEHHINZm7Mzj0rWmjHH6VmMgR2PcnmvSotWO7CyVmhDyBSEFs80gzuGOlG75wg64zWx12fQeXBfaKYV4JzyDz70qKNxbHJOPypM5U+tAlo9CxZZ+2REcYBP6GtZc7WasuwB+3R+mD/I1tKuSw9wa4cVK0jz8X8a9CxpbGK7Cgc+tdKMggE55HFc5ZA/a8j+EV0ZwwU4+YnBr5bNbOqn5HVgfgZDOMktnK5zXM69kTcdwK6SbmDCcdzXNazIJJlHPQfyroyhP2q8icS1dGZNyg9zSxZBUZ7Uh5UE9uKUY8wD0r6bpY5fs2JEG4Te6f1FVZOAFPpVuI4WVjzkf1FQyx7iCKUHaTCErS1Eg6e3FaXSFm6jpiqCDDH0q2rlkJz8nTFZVtWRUd3czpgDM1NtP+P+In+9SzsA7n0OKbbc3sWeobNdP2H6HXH+G/T9DaugFmn2j7zEE1mTfMR2xWvOoEkwHdjWNMflPrXHhXdHLD4yEkF245U8H8KCQdp7UD/WsT6cU3HQehruOtIegBjX0xUpbGTjJA6VEmAqgVMgz83fFTIznvceGLw5wRkdPxrPPFxjtjmtJOV56Ef1rNkDCV+e/FFLdorDbyRb0ojzZTjoteo/CQ507Xz/ANRX/wBt4K8t04EeZ/u16f8AB4k6Rr2ev9qf+28Fc2JXvtnfgP8Ae5en+R6NRRRXMe2FFFFABRRRQBj2nhrT7XTNSsHElzDqU0012ZyCZTL94HAHAXCj0AFV9J8I2ml3gu5b6/1GaO3NrCb6RX8mIkEquFGc7VyWyxwOa6CigDmtK8FWGk3tpOl5f3EVgrJYW1xKGitAwwdmFBPy/KNxbAOBSW/gfTrbUIZ1ur5rS3uWu4NPeVTbwzMSSyjbu6sxALEAngCumooAK+f/ABYSPiX4i9PtMP8A6Tw19AV8/wDi3j4j+JG7/aYR/wCS8NbUfiOLMP4DJZW23DL2Kr/Ks+/TKxN22nFXrtgJsdyq/wAqoX7MY4+OAOKjDJ3i/wCtj5mH8Qo2qEXkanoZBivR7bb9gyM42kfrXnluwFxD6h1P616NbsBaL05UkfnXl8Rt2p+p6uHd5u/Yx7hS0u30GDWLIwaRmPfNbcrjzyD3BrEljxcMPUmtsDtZ9jy624iy7TsHUjiplcbM+o5/OolYBjwM9qkwrRMQeorsmkYkUZO8yD+KlB2/QDP402Mktkj5RSEZVU9ck1bWoCkiMFz+FSw3J2H9OKqOpkbP8I4pVIwAD35qpU01qPYqX0jGRj2J4rZRj/Y0Q7BeKw7wYYKOwrZ/5gkGP7pp4lLlprz/AEOmX8JGRM+ZSfamxZ8yU9tox+lEnBYntiliP7yT02j+ldf2S18Gn9bE8Z/dhe+0VZUlmXd25FQRnMZ9wMU8jrtOTXPNXZyy3LDXHDY/GqxAJ3epzUbSY4HU1LGmcOegpKCgritZXAHCr6k0+1JfYD2OTTBtILg57CmwP5Y2jqaJK8XYLaF9pGkIP8IOK6K0GIbeU9Suz8P8iuaaULAFHXNdJA260gX/AGc14WZxfs4q3V/kdWD+JstL8s4Y9AMCuN1JALpyexNdgm37OHzxtyDXH6mwa4c5qMlX76X3G2L2ibdwmNJt3HUJn9axYm+cjuc1uTyINNgXP/LMCueZwsrDuK7sAnKEk+7/ADOSulzK3ZDgFO8c43Yp0ZDgemSaSJMk+/NJIdsZHSu7d2MC4M7ct+FRL8xYN16U4NnkdMU1vnLdjgDisEhkEiFWx2HArIkx5ze2a1pi24Z7Gsic4kIHU16GHudWEV5Mag+6fSjH8Q6nP86MbcUHlwB0rpO3qGCH9qGAUMfrSvw59M5oz973NAK+5Z05s3kfvn+VbEbHzGHv+lYumKTewntz/WtgPgue4bBrgxS9/wCX6nDilapoX7J/+JhsH41v7xk5/vACuf0w7rzb3BrWkVolZ/4jzg181mEFKso+RvhZOMGyZEB3Hvt/rXLaypNy7DtjH5V1Co21geNwBrnNawHYj7wHStcpdsQx4n4YmSpxHH9eadlfMJpFx5aGkLBTj1HH1r6Y47XZJGQDJ6YqNgc5HeiJSWkJ4zj+lOAJbPalsxbMUKOQOgqSFsqR/Cab0yBSA8kDpUvVEPUpzjEzY7nn8qW1/wCP6Ju1JKuXdiTwf6U60bdcIR0rd/B8jtv+7+X6G1JIPOkHdsmsiUYAzV0swdyRziqlzxt9TXLh48rsctN3kQA//WphPJHeplA2CmkdeOa609TpUlcbHwqg9c1PGx8s+uKYFxipAGAIxxt4NTJ3IqNMcpygz6VRucs/41bHQZ64/rVSQgSkn1p01qXQVptotaflTLnptr034P8AGk69/wBhX/23grzGzbLS4/u16f8ACH/kFa9/2Ff/AG3grmxG53Zf/vUvT/I9FooormPcCiiigAooooAKKKKACiiigAr598XH/i5XiJccfaYT/wCS8NfQVfPni5sfEzxEP+nmH/0nhrah8Rx49fuGF1/x9Mc/wrx+FVtQYeVCo9D/AEqe6/4+QR3C5/Kq16oESE9QCKdFfB/XQ+Yp/wARFS3P+kw8cFxz+NekooNnHjgqK83tv+PqL0DD+dekW+Gt+D0XkV43Ef8Ay7Z6uHtzteRg3rBZTjggms25OZQV9KvX/wA0xIPckiqD8ujDup49a68JG0EzyajvJghUsQcA5wPfiphF8rY6EcUyKENLuOOKtRK2HzkhRgVdSaWxKVyCFAQEPY/nUdwCrsAuCTgVcaDZEuCN2eagu25VgM7aUKnNO6Bqy1KMpx8i/mKh2MvOTVxkWNd7YyelVHmJBGCK7qcrrQI36FW5b5x6t+lawcjQY89QcCsa5bDFup7VrmNjo8B3cEdKeIStC/c6Zq1NGUed2e/NOTH7wj+7/hTSpUSZOT1/CkhPMh7Fc/yrp6F2vF2/rYtwn9znHIAqVxuGV4+lRRcJntgcU/kDr1Fc8lrc5JfENdMYPUmnhiEC470dMbufSopJVTjvQry0BJy0H5GcAYA5qFGCZduxpgl4A65GaTG4BSeO/vWqhbc2VO25YR9wGT/EDXYWQHkxNnI27cfXmuMiG5x6ZFdpZriBQOgUEflXiZzZQia4dWqChz5W0DgMR+FchqBJuXHQZzXaLHkA4wMnPua47UFIdm7jIrLJ5R9pKw66acbm1MA9pbgf88gc/nWR5Yd247/nWtJlbCD1EXX6k1loCG3E8DqK6sI2oyt3f5nLV3FX5U3+9Eq7+3Q0+TBXYO4FM3Fk64Oa6E3uZEyZAK44x1qMnLFhxkYx71YjKmJckZ5pkiKMMuOe3vWSl71mO2hSlJIyRjmsqUZck9c1rTZ28j+Ksqf/AFh+tejhzqwu7IsEryecUKc4BGO1KAc5J4o4/Kuo7bgMfNznJpCPmx75oA2qD15pedhPegNmWtM5v4h6Z/rWkXUF+R94/wA6zdM/5CMXuD/I1dlAWZx6sa4qyTq28v1OHFL3zT0c4upC3bHNb4cSgKMMRjPtXP6J892d3QgZBrobNAHfjGMgn1r5nNbRquT3SR0YRNrl7gdy7Byck81zGsZNy4966hSZJumFQ4571y+tKy3jEHjk/rVZQ/39nvYMUvdTXczMDaOeMUmA24Hj0PpTWOFAp2RyfavqLHJZj4yNvX605RtBHY0yLbk+/WnlsfL+VQ1qRJa2EbI4HPvR2wPzpAeSD1wOaXBOVHHHWgRTm4dhmltGCXUa9s0ydts5B9KdbANdoPfrXQ/g+R3W/d69jVuDtnm9ATVV1DAeoqzdnFxcHqAScVXAJB571x0vhTOFaO5CQVCjbwScn0oA4JqSUlVUHnPekI+UAd63T0NFLQaAMr9amcbR+FRjAdeO+ac7HzMe1S9WS9WQt098f1qpLy7g8d6uyDCg98f1qoyjeSeSeK2ps6qDW5ZsVwZj1yCa9N+D3/II17/sKn/0nhrzKxJDTAnsa9N+DnOj67/2FT/6Tw1zYjdndl/+8y9P8j0eiiiuU9sKKKKACiisvxM12nhTWG0/d9tFjMbfZ18zYduPfOKAJbfW9Ju9Ql0+21SymvYs+Zbx3CNImOuVByKdBrGmXOoS6fb6lZy3sPMttHOrSJ/vKDkfjXnnhO4udMHhW0tNStL+HU7B28iO3jT7KViDBlKjdt3YU7ySSRzniq2j/Yf+EY+Hf2Pyv7W+3J5u3Hm7vLk+1bu/Xduz3xntQB6Z/bGmf2p/Zf8AaVn/AGhjd9k89fNxjOdmc9PaibWNMttRi0+fUrOK+mGY7Z51WR/opOT+FeYD7L/wg0f+q/4SH/hJfbzvtP27n3/1Wf8AgHtTtY+xf8Ir8QPtXlf2x/aL+Vux52/bH9l29/7m3HfPvQB61Xz54wGfiR4jJ/5+Yf8A0nhr3a+k1KKwDafbWtzd8ZjuJ2hT3O4I5/SvAPET3snjnX3v4IILs3MW+OCYyov7iLGGKqTxj+Ef1rbD/GceO/gsluMmdGHQgZ/Kqt/llHoSc1Ylfbt3ei4/KobsF4kbsTxWlLRxPmKbtNMr2wzdR/7w/nXotjGUs2b+Nup/lXnMDYni9d4FemWpAsxnugrwuJZNQgl1PVwsb1W32OZvsJdN/tDJ+tZyZ3qG6LmruqYEp98n8KoITvJ9RxXfhl+5T8jyKnxssrHJvJBGKvwMVgIfqOtUohLv4AwTn9K0cq0WPzrnxMtky6a6lZmZUeQ9xkVTBYqIyfmJBNWJJyzsP4AKqOdsu/uelb0Yu2qM5MJUM7jP3BUcsKCM46/WpVkJhdV6j+eajNrKImLDv610RdnZu1hGVKmSM+tbGW/sm35+XbWROcZHpitdgDo0OfQGt8R9j1OmprBXMt+dxPXbUcOMOvbb/hUzDIOPSoo1wZD/ALNdC2Li/daLEeWUf3cDNLIxUiiH7mPUCkJyeaz6mD+IcZAx57DNUp8HB/2s1O7Atx261Co3jJ9a0gram1JcvvDYOZdvYA1LjBGOrUsSbTxTUYhgWpt3ZcnzNtE6cbQOua7e0IS0iJ6mMGuMt4/myeprs7ZFeCLOcogr57O2uWKfcMM/fdixksXX23CuN1Q7ZW9Gya7RCVRSccrXG6qpZsHqM1yZI/30kaYy3u3NGRydPjB6FBj86y94EpB6GtRo2bTIR6r/AFrHm4DZ65r1cIk+ZebOCotVckJBnz2GKZuYcZ7HNNIOQT0Y0HhCa7FEgswfMQT0UH/Cp22ow9Bhh+WKpwkhcnvUhcrtz0H8qwnBuQXEmdXXgfxZrGlIMr46itCZx/D2rNb77H1NduHjZHVhY6tke7AzQd3J9aQAqv41KSNhJrqO9u2xGD8qg9D0pzcMAOhPNIv+qI7g8UBsoPXNAmtS5pY/4mEZ7ZP8qtT8XLehJP61V087b+I/X+Rq3J805Huf51yVP4t/L/M4cQ/f+Ro6KxFw+e54rpICTDEy8bm5zXN6TEBc7u+cV1EcQ8pN33lJPHvXy2cSiqv9dmdGCTadv62GHKucdC3P5Vy+ryE3k2egIA/KupmKi3z68friuV1qPy5WPrVZNZ1bv0DGLRIxHYkKPwp/GAPUE0xFDD3pzLzGx6gV9Y+xi7bEsKgE+hFPdscD8KgjJMhx93FSOcLx1PSoa1MZR97UcDxg9afzwB1qPGeD1qUAkADrUMiRm3Kl529qfY5+1Rg9QRmkn+SZiafZH/SI29Wrol/D+R3t/ufkaVwf9ImJ9TmoEOcin3bhrhwOm7mmgc4FcsF7iPP6DZdxA5pFPycdac+4delMxyfQ1a2KWwuBlSPU0gBZ1Y+lOAyAPQ5pyDpRewr2Ipwflx6VQm39FPINaMv3Rjrj+tUXb5/Y/wA61pPQ6sMyxYr802fSvTPg1kaLruev9qn/ANJ4a80sefN9Spr034O/8gjXv+wqf/SeGufEPf5Hfl7/ANpl6f5Ho9FFFcp7YUUUUAFFFFAFKz0bS9OuJrix02ztp5zmWSCBUaQ/7RAyfxog0fS7bUJdQt9Ns4r2b/W3McCrI/8AvMBk/jV2igCl/Y+l/wBqf2p/Ztn/AGhjb9r8hfNxjGN+M9PeibR9LudRi1CfTbOW+hGI7l4FaRPoxGR+FXaKACvn7xd/yUbxH/18w/8ApNDX0DXz74vz/wALI8SD/p5h/wDSeGtqHxnFj/4DKl18swPXKrx+FE7g2cXqM/0pbhN0uf8AZH8qS4QLZxE9SOa1jb3f66HzUbOxUhOLlOP4ga9IgkT7EGz0QV5xbkfaI892ArvrUZttuchl6emDXi8QwUlC/Rno0JuM3bsYmoHzLmRMdDgH+dUEUCQZPTH6VcvSRdSD0Jyfxqhkiciu3DR/dpLseXPWTNG2nALbgM54qywAVmU53c4qlA0ewEkZHGalcskWQS2eAK5akLz0KjLQicqd0anJUjNU5CSSQPunAqzdRYiLK2GJ5x3qmOG2FuetdtFK10ZsltcRY3H86ttcrIjKuKzzy+c8YxirFug5ye/9KKsE/fe4XZl3iYkx61rPHnR7dgayrxsyk+nFaqsf7HgJPAravflpvz/Q3f8ADRmOMbh7VXzjzAP7o/pU0jE7m9qgjHzyE9Nv+FdcVoaU1ZalxMGHA6kChsE49adCA0fplRim4zjt3rHqzn6sryEbyFqNFP61IQA57mkJwpPTmt1sdSdlZEi5MijsOc0mwFhT4lJAHc0MmwYzzUX1sY82tixGcNxXX2Qb7CjMMHA/lXF+ZsKY5yQK7WEFoY0U4HlA/wAq+fzpe7FeZtg1aTZJGPNVGzgoCCP8/SuT1JjuLMMdc+1dXv2xOiDJ25yK4++LncrAnjmscni/ayfQrF291GvvLaTGQPmUEgetYkpLkKeD1NawJawhwcDBH6VlEYlIJzjvXqYSKi5erOSb1XoL/Bk/w0x8+Wc8dQKmUbsrjrmo5B8vsK6ovUzT1EjJwF7U8sRwR/8Aqpin5Nw+lIH3A7uDjFNq7CxBM4KjFUnXLA1cmAwMd6qnn611U9Ed2H0V0NGOT7/0pA2QR7UgGDj8aDgYHc1qdNkCrtTB9M0KBkewp3G2gjaDgc0rhe5Y08H7Yh7ZP8qvEMzZxghjVHTz/pkSjrzn8jV1mKPjOSCa5av8T5f5nDib85saKu+4kxyQK3XY7xjoRz7VgaJNsu3XHXvW9HgxyHdnYCPrxXymZprENvsjpwn8OyEZ0fdHu+dVzt9s1zOunMy49gfyrpUUGVgU5KfermtWiK3jZbIFbZTZV/kTiW+VNmOCFLAdQaMbwG9KR/lJJ9aWIE+wr6rpcye3MKnyuxFSMeBx9Ka+EUmngjYv6VD7mT11HqmeO9SMfLUBeSBRGOM5yaSRgAT1NYt3djHdmbMS0rginWhBuIwOzUkuPMz70+1TF6noa6pfA/Q9Btezt5FmVv35C896f3HpzTJOLokDjNTsvzADgYrnbskcMtEiJ8r9KYMlunFOlBB659qQAhgcdapbDWwDt9RTohhQAckU1Ae46MKfGvGc8jrSkxMjk4UDvzWe3zE+xrRlHAHesx2KxscZIJ/GtqWqOvC67F2wIYybTnHWvTPg5/yB9d/7Cp/9J4a8x0kZkmHT5ev516h8H+NJ14f9RU/+k8Nc+I+Jr0O/AK2KmvL/ACPRqKKK5T2wooooAKKKKACiiigAooooAK+fvF4/4uP4jP8A09Q/+k0NfQNfP3i//koviQ/9PMP/AKTw1tQ+M4sw/gMqyMTIoHfAqK93eXGpPHNSpuDN6kcUmoEFIkrWOk4o+Zp/GijECbqEf7Q/nXoNqfLgDYJwCK4GIgXMZ9HFeg2xLabkdf8A69ePn792F9rno0dZ/IwLsFrqTI+8BVMRfvFJ75FaNyS1yxPas8yEYx3Fb4dtwSXY8yfxMsxW6EN0xUinnaR8qnj8qghVsHH3WGfxqwZQQqfgaipe9txq1irPlSWY5X7wFUpAPvY+ard1L5m5B90VQZ8YP4Cu2hF2uTa70HuwUDH1NNSVs/KTg1Exwp3d6IZkwevFdPJ7pfJ7tyG6znr1Oa3Cg/sK3PHXNYtyMkelbUYL+HrfH8JIP6VhinZU/X9GbPWkY03CuB1xUMOSWBORtq1KBucf7OaqxZDSfSuyLvEqm7wZei+WEewqJtwX73Skjkygx+NIz4DfrUKLTMVFqQwcc9SeKOG+UjviljB2FjSN6jrV9TXqTxttbPvTnwW3dsVXD7mpVbeGHYHBqHHW5m4O9x2cOgP94V2tqx8uLDfwYriwvmSJ6AiutgBURFDzt5zXj5vFShFeprRlaRZtpMTMrenNc5qJUTSDuc1uNjzj6u4Wud1GNknlZsbieK58tgvbN90hV3eKj5l+T5bKIL0K5rKYMWLZrUIAskU/3cf1qnarunYH7vau+hLljJnM9xVGGYkY2k1HIuIgD1K5qzMytvA7VXkIfr6EVpBtu5HUgTldg7807YDuJpsWEIPpUrYYMy9SK3k7Mp7lCbg+wFVEbIDVcuM7senWqIGdo7Zrrp7HoUNYj+34Uxhnae4qQrx+FRgggE9qtG0e6HLyee1KTwTTVbBNC4Lhe4FAW1uXNOVft0Zx3P8AKrFyQshOP4qqaY2biM98n+RqzKSZST61zTX735HHXVqtmaujDfKzD2rctXI89G67sfWsXQfvO3at2KEtcq/G0j5q+bzOUfayUu35F4eL0t3JGmVJmjA5CA5H1rm9TPmTOc/8tD+ma3xGUmmLkcn5fpgVz+qx7Lj/AHuaWVxiqunYMU5Na9zFkUsrDvwc06NTuTHpUjqFiPsKIjiRPpX07l7pi53iR3WQQAeM0oYbEz1pLtSW/GojwE9gacVeKKjFSgkW45D60M4YZPeqysOvc0ofcxJ9qXJrcl0tbkDnLNnpmrFswN1GPeq3BB+tT2Sg3sZ98VpP4WdM0uRlucBblsdA2KnX5s1FfKY7th6vT487ufu44rjesEzgktERupD9eMUBTtBPrmnkfMcdqRmwgzTuybiouVPGKFAAY+9PDEo36UzqeO55qdREUpGDWa4w+Pc1fuDtSqEhJLY6110Vod2FWlyzZZRZCOu2vT/g8c6Rrx/6ip/9J4a80035vMz2TNemfB7/AJBOvf8AYVP/AKTw1z4h6s7sv/3qV+3+R6NRRRXKe4FFFFABUc88dtby3EzbYokLu2M4AGSeKkqrqX2b+yrz7Y5S18h/OYdk2ncePbNAGdpXizSNZjaW0e7EKw+eZrixngiMfHzB5EVTwc8Hpz0p+j+KtG16d4NOvDJKsYl2PC8RaMnAdd6jcuf4lyPevNNSijutOv8AQ/BGq3erafLolzHPD9pa5jhZVURKjnO1m+ddgPTsMVuXGoWni7xDpx8NyiT7LpN4k7oNogMqxrHEx7NuUnb1G2gDrNN8XaFq9/8AYrG/Es5DNHmJ1WUKcMY3YBZAO+0miDxdoVxq/wDZcV+GujI0K/unEbSLksiyEbGYYOVBJGDxxXFaPqFnqw8BaXpuft+lEPfQhCGs0S2eJ0kH8JLsqgHr1HFVNOuYZPDnhvwshJ1+y1iJ7m22nzIhHOXklb0VlBIbod49aAPW6+f/ABeM/ETxJ/19Q/8ApNDXuuo2c99aGG31C5sJCQfPtljLj2/eKy/pXgPiC3msvG3iCGa8nvZVuoszzhA75t4jyEVV4zjgDpW1D4zhzD+A/kTqmWz3Cg1BfoFKjvirsEeZVftgAj8Kq6ohyCeOhqKU/wB6kfMQ3TM2Jczxj/aH8672AmGwRR39a4OE/wCkxY6Fgc13cI3WytnqMAVxZ5qoJ7XPQpt8z9Ckqq91LI33cVj3BCNkdK1opCS+9dvJz+dZN4VLrg8CjCJqbTOKpZpD0ncEbQKdj5SQfmaoElCg8DPapACiFsnkcV1SjZmJDMy42DrjFQBfmyfuqKe8R3YyckU5024j/iPNdEWoqyKTsVHRpPoTTWiC8jrVuUfwIMn2qAwS9SpFbRnp2NYzfexFLwF+gzXRRKP+EcgB/iNc5cZX6kV0MT58P2pHauPHp8tO3836M1/5dt+RkTYCNj0/rVOP7zn2qzOQd3PVaqwn7/piu6mvdJpL3GLGGHI+6cGnMrEHHTFSRjJVQOMYqXYN3PGRihyswlU1I1Q7TnpUSR7cjtmrhGUBxwOtV2+V8f3ulKMmyITbuhgTL8dKfswm0dSad9wqB1qRMLuYnqcihyYSmxMKoVe+a6S3cGFWz/CBXOxxksrH+9W7sP2bAJAIBz+NeZj0pKKuKDs9CzGvmXOT9RWFrD75mKdmwa20lxdnHZsViamQtzIPWufAJqvr2NJvRW7lmST/AERD2MePxyRTLcpt2fxKBu/EVIsJa2iZuAVx+tMn2wxsq43cZreLTvBdzB+ZWdtv3emCKrlzlvxIp7tlMDqGqNsEA98dK7oRsJIrpJ97PY1KCV4HQ1CflOTwDSg/ICDkda6GrnRKKeqFuuRnuetUfuuatSMXbHbFVnAGTn3rSmrKx00FaPKxxY7sD1xTDnnPrxRjJLZpas2Wg4Lnn0pgyje3en7sJkelB/1nTgikJN9SexObxMeh/kasjks3vUFkAL2MDvn+Rq4qjDKepJxXNUdp/JfqcOIfvfIvaVJIpdYwDjk5rpkKsuzPDA5/Hiuc0vAkkUckitqN90GAfmGMe9fNZnHmqaGuGlZEksY88nJxsCj86wdZUfacjqowa2y+07SckYrD1vK3Jbtx/KnliarJN9P8gxFnHTuZB+57ZqKIsZgG6VMDkMPembcupH0r6ZPRmMXo0xszE7s9O1RsAqpj3p0vVgfakk+6uOtWtkaw0S/roM+npUkY5pFTbk++anhQDk9TSlKyFUmkinKgfIGetS2A2XcX1qOT5Z29CaktM/a4MDjfg1U/gfobSb9nbpY0tR5vWI7mmKQVA9RS3xzdvjnk0DHHoRXFD+HH0PPkNOA2R24pJE37Se3NKQC/B6Uegqydg3YTa3GeKkwrZXuOaZEhI3MO1SMQCT0zUS3sgZXuEDQoOx/xrPIxIR2zWrMAYVAPSsxziQ5+ldNF3R1YeTs0WdMXd5w9UxXpfwd/5BGu/wDYVP8A6Tw151o+DJL6BSa9G+D4xpOvD/qKn/0nhrmry9+S9D0cvf8AtUvT/I9GooorA9wKQkDGT16UtYHi6CyudJiivdFu9WzOPJhtBiRJNrYcPuXy8DI3bhjOO9AG/RXnthZePdHtrm+jniuLSNd8Wj3kxuZ2A6qLgBcMewbeM969CoAKKKKACiiigAr5/wDF/HxE8SH/AKeof/SaGvoCvn3xiC3xE8RgHH+lQ/8ApNDW1D4zix/8Fli0V2dMZIcAfSm6tFhVzweTirel5MQA644NR69xMu08ba4IVH9bUPU+bUbU+Y55BtuI8f3xXcxkrboAuQF3ZrilH75B/tCu5tcva5PQJiozuVowbOqj70vkYrzH7RKNp25GD65rJuVbzQC3Tj61uXCxpLjHAIrFvWzIMdTXRg2m9F0OOSfNYWMpnDEZqUsQpY/dFV0jVvmPWpPMEiFCCAK6ZR1MhsLFp979MZwan2ZBlI+YniobfEspXHHvV1ELTA9I0zxUVZcrHbUSO3VP3j4z15oaSJsrgZHamuJriTCNhRxzUxsFA3DG7vXO5RTvN6lWb2MW9UCQ/pWqrFNFt1A42mszVF2SY9ABWrCPM0uFOwTOa6a7/d05Pv8Aoa/8uzEmJDH6VApKqxx26fjVu8UB2qsvf/d/rXfB3jc1pv3CzAD+lT4BYMeARTLdd0CsOuAatLAGI6bcDA965qk0nqcs37zGPjZtUdetU5GAYZHI6VoSBUIVf4vSqMyL5xPoKKLQU7X1G55BPWp1j3tjGFFQRruIJ7VdwRGEX7xHNVUlbRDm7aCIcuAB8ua1mc+RGqjrxn8appGkUQBGSatjgoO23d+tebiJKTVugQ0uPhYG+lGOFI/E1j6n80znuM1sbNkzMOrkVg6g5M0g9zTwUb1brsjTW6XmbhIbS4yvO1R0+tZty5dvwFakaeVYIP4SgJqgyIy575qMNJKUn5sip0M5MhDnk9aHXjI9MVIy5J20jBggBPU16ietyL63KpG9famhSq46ipgBjbjrTQCpCnnHNa3N1LSxFtPOOMiq7ABtp54q8V546npVOQAyN6jitIO5vRldjCQGI96UYPFM25Ofzp2Mke1aHS0OwCMe1IwIAPvQp5+opMkgc9aRNnctad/x/RgnPUj8jV1VJJ7HccVR03/j/hB6/N/I1rpHuBbupNcWIlyz+S/U4sV8ZLpaMtw7cnjitS3J8kknBzVDS2CzOD1ANXwAAvoScivFxjvUafkKltcVX3S7mbqO9Z2tDM4z02j+VaRgAcsfu4GPzrL1t8LuBGelLB2deLj2Kmny2fcxC2N1PDBRxyetQk/MR1zxUigDaSMk19E0KUVYCu9hkdetKUyvToakUYJ9acoH5VDkZubIhER34PNTBQCCxAzwKAOP89KcqeYQW+7ngGolLuQ5N7mdcYEr+ueKlseLmEdy1MuUAuW9qdYjN/bj/aFbyf7p+h270vl+hoXKYlmPU5qNTyB2xVrVFCzyhBjLVTB9fSuSk+emmcTRJhSxxjNC4BOeTnimrjPHBo/n1FVYkfuKgA02Vt0fHBpCSFyeaiY/KeacY63GkJLLtjyOeP61Rc7iD71ZcggJ2xVc4Un2NdNNJHbQSS8zS0ghZJR/sYr0b4Rf8gzX8f8AQVP/AKTwV5tpfMsn+7XpHwfOdJ14/wDUVP8A6Tw1xV177fodmXf7zL0/yPRqKKKxPdCsHxVb6i9vp95ptu11JYXq3MlosgQzpsdSoJIGRvDAEgEqK3q5Px/HLLpFiq2WoX9sL5Dd2lhnzJotr5BII+UNtbrztA70ASaTJqmr+KRq8+lXWl2MNk9ssV26eZO7OjbiqMwAUIQCTk7zxXUVxfhC00ODVpX0zwrqmkzmAhp7uFkVl3L8oJY85wfwNdpQAUUUUAFFFFABXz94uz/wsjxGe32mH/0nhr6Br5+8XuB8R/Ea9zcw/wDpPDW1D4zix/8AAZv6REiQhj1Kg1la4FV0VegGK1LT54I1B6KM/lWRrWUl3N35rxsGm8a5Nng1H+5SSMeA5vEU9d6n9a7+FfLsVKdCTnP41wFqwa7i/wB9f516Hj9wqDoRz+VHEDs6aOrCrWT8jAu0BkBPUsCfyrFvEAKSdznNal2paXywT8p/lWXdoURVP8LV6GCVranmt++MjDMTjpUrkbNq9arI7g/KMipVTuCSWrtktbsiSsy3bR7gEXrVydSAsCdSDmo7aPyIw4+8eMGrZTyoyzffb1ry61X95p/TLjHQrPOsA2DOcZqOOa4O4tjaenFTrbhW8yTrVqKaFgQMce1RKpGK92Nxxi3u7HP6koZo3/vc1pWqZ0mEH7uw5qlrLDzAo6dq0dPcNpqqemyumvJ/V4S8y4r3bGJfrtkcD0FU4+WYdsVoaiMTSH2FUIsknIwcV6dF3pplU37jNGz4hJP8IBq75IyAvbmq9hHuh3/gatiNkCheT3z6V59afvuzOdrUheNYgT3PAqlPDiQY9ea0vKwS79QOBVCVWLsoHOc1dCeu4tUyJVLzKq9Awz9K0Y0WEPK3Ujiq2VjAUfePFWTGbiUZ4RfSitK++iC9xLeKWT55MY3cVaZ+Vx3yBTVlLsiQgHBGaRyFnYNwV/xrkk3KWqK2WhYhOZeegxtrntRz5kh9zW/sO3eOijNc5fFvOfjjdW+ASdRtGsL8yOk35sY19UFZkgKudvStNlAsEI+8ygj/AD+FZi53MW6lqxw1vea7mVXoRsgjwB90LmmShhlj90CraoHWT1xim3MR8or2rrjU95JkeZRiTMRz1J4p6qeA3XvT4VyMd6tRx7xgjB71dSrZsbbbKDR/NntWdN8sz4/vZrdeDacflWNcoFmbPrW+HqKTOjDS95pkAzyfXpS8BQfzpkTEn5hjnilfLqMdAa6+p3te9Ydjaqj0P6UyPf8AeOMdvpTmXf8AlikyeBj2oBbFrTT/AKfFnr838jWuGKg46DOaydPwL2Md/mA/75Nb0Uf7pGA9mrzcZJKev9bnBitZr0IbA4uGbux2mtSJ90uD0FVbNUa5D+1WZFVx8p6ivLxElKpbyIhdK5NmRgFYjjPSsTWIeQO2a1QWUgDkGs7Wm+dB3AGfyp4JONdJFyd0mY6qOfWphjaoHWolIIz6HNSqvB9ete5IibJQOg7kUoRevpTgwIHqaeEHBz061zuVjAjx+v8AKnJH5uM/d7U/AHPrT4l80jH3OxqJTsrgjNu0HnuO+KbYJ/xMLYHqHFTakpjmOOuRUNkx/tO3/wB4V0p3ot+X6HbTvyGtqX/H3IvYNmstX5PpWlc72vJjjg5P41nOMHI71hhlaCj5I51ZyY4MD064pQemetQ7hux3xmnKa6HEbiSEkKTUTHIIPTFP6LUMnQ5pxQ4R1EC5x6YqtIrfvPXtV6JMqPpVOb5ZGFaQetjpoy99ot6Sx8yXP9z/ABr0v4Pf8gjXv+wqf/SeGvM9LUB2weqjNem/B/jSde/7Cv8A7bwVyYj438jtwFvrc7dv8j0aiiiuc9sK5rxqJjpdpzeDT/taf2ibIuJfs+1s42fPjfs3bedu6ulrA8XXOn2ukxPqXiKfQYTOAtzDKkZdtrfJl1YYIyemfloA5r4eajdap/YzRm+a3s9EW3vnuEkVGucx7QN+NzKBJkjP3hzXolefeENQiuvF80Wm+KtQ8Qab9hZneZo2jgl3qANyIoLMN2BngK2c5GO11Ww/tTSbuw+0zWwuYmiM0BAdARjKk9DQBzS/EK0/tLV0ktHXTNPsXvVvhJn7QqMVbYmOm5WAOecccYJs6f4ruv7Qjs9d0kaU1xaPd27C5EwZE271f5RtdQynAyOuDxXL3/w51e61G+tRrV1Jp8uhNYxPLFbogbLbIysaKQq5VsgDpjJHFbP9ma14o1e1uNY0s6VBaWFxbtmdJTNLMFUlNhOEAU/ewTkcDFAFrSfGVzfXWlfbdGaysdYVjp9wbgOzHYZFEibRsLICwwW6YODTLXxxNcTWd0+kGPQ768+x2199oBdnLFVZotvyozDAO4nkZAzVLS9J1+7k8L2Gp6Ytpb6ARJLdCdHW5dIWhTywDuAO8sdwGMY5qtZeH9dXTdH8Ly6aEsdM1CO4bUvPQpLDFKZIwqA7w5wgOQAOTk8UAeikgDJOK+ffF+D8R/ERGD/pMPP/AG7w17xqOmWGr2htdSsre8tyQxiuIlkQkdDgjFfP/iLT7TTPHfiCzsLWG1tY7iLZDBGERcwRE4A4HJJ/GtqHxnHj/wCCzesZWaJQoyCME+mKzdaZpJjnoOnvV3S5dkGD3JxVTWWXdlRyBXBQjy4t6Hzd/cWpkWibbyHnPzqP1r0qFM24J9K82t/+PuD/AK6Ln869JhkxFsI4IyDXBxNzfu7eZ6mCac25HNXKf6S7erlaybxS4Brf1AqrybQCQS2BWLcfx+hPy134GbaUjyaq5ZlBCQcBc1dt4CoMhOeOBUMKqOGGSK17O3yMv90c8114msoRJ+J2RPZW+wh5fukZwe1WGi3ytK3CLnA7UkMnnFo9pAHQ+tPlkDFYE/4ERXgVJzlU13/JHZGMVD+tyhNHJPOQuQhxyKtJYhVyCfyomnS3iO0AsPQ1W/tCQRhirDPGPSt/31SK5NEZfu4v3tWZGqL+9YddtX9OYCwhX/Z5rM1GQvM57NzV2ycJbQdwVxXrVoN4eKf9aGadkVtUGJ347CsxOWOPStPU2DzSf7o/nWdGAJD7CuvDfwlcuD91mzpCZhbJ44FW/nQD5ck1Doo32sgxgk9atiXKb2Qg+hrycRJ+2kibFcRyOS7gqF7etUWcoxZxgscAVqszTAKAVHU1QuVQuGYYCnjNa0J3dpIykkVY12uHc9ccHtWkASojjGW74qgI2knDHhc9DW3bRpCjSvjJGaeLqKKT3Y4R5mPgt0gVScbj7VTnZGkk6Bs4qUySNJuJO0N/SqV0wCPIvUHn865KNOTneT1ZpOStZIuxSbYZFIz82PwNc7eMPMYe9bgk/cSPj7vP1rAuR8xz15rvwMLTkwi7yR05G7R7c9H2/wBayDkbm9+lbQA/sqNsZxHxWQEJjYnrndXLg5fH6v8AMVdbeiEUMCHGc45X61KCWIDDtk1JGmSD61DOSJdoBHbNdHNzSsY2sghh8zc33djfnVxWiaLKkZPHFVYpMZx93vSAEcI3GcjFRUi5PVlJ2JWG5QOm0/nXP3vE7ZrfiBztY/ie9Y+oxje/qCa6sG7TaNaDtNXM5F53dsUqNj5T3oDYAA+tRqcuGr1Nz0rOV7kh4k6+2KCKVuBuPNIDuYemTSJ8yxphzqERPT5v5GulgdFg6ghs1zdjgXkYA7Mf0NbCE+UFzwRkH0Nebjoc8l/Xc4sTL94n5E0HysSD2FTbyASeAFzVKJysrjsUqeXc8Z2tgFcVyTh72pzp6EqSkqpPTPX2qprBEkaOvfPNKshP7vpx1qO9AFtGm4Ejn8KulTUasWOMmZicL9OtWY04zmq6ck8cE4q2inGc9O1ejUZVVkqLnHFTeUMg7unWnQqCoOOTT2iIIbdwOo9a8+VT3rGSRHsCtnseKlSM7cqMKOmKkVBu+bBGMCl3biIk4HqKxlUb2KSMO9JkuGz6/wBKZpq/8TODP9+pr9Nly49Oaj07/kJ25z/HXqX/AHDt2/Q6ab9yxrXWReTALxk1mzIc5A/Cte4Ui9k75YmqUyFssBj2riw07JeiOZ6SZQ2nOSMcdaacg+3rVnHOCtMKj04ruUylMhB4yDmkCkn1qTZ2HHNPjTBPeqckiudIfDHxuPAA6VnXP+sJ75rUZWWLOce1Zc3EjZ5pUXeTZeG1m2WbAZMmP7tem/CD/kFa9/2Ff/beCvMdOzlh7Yr074QjGl6+P+or/wC28FY4j4mejl/+9y9P8j0Wiiiuc90K5nxtqGoWNlpsem6jDp095fpbG5nhEkaKVcnIJHXaAPUkDvXTVzvjGdxp1pp0dvZTNql2tmPt0XmwJlWcsyZG7hMAZGSRzQBz8Z8WN4r/ALEHjK0kJszdb49MQmPDqu1xv4zuyDnna3pXoVcD4DuraAaLa2Wl6ZZrqWijUbkWVuIiJQ0Y5wfune2M8/Kea76gAooooAKKKKACvn/xd/yUTxJ/19Q/+k8NfQFfPvi9sfEjxGB/z8w/+k8NbUPjOLMFegzRs0RY1LD7vzCszV28uXPqDWpBEWK+mBmsfWchwX/vcVyYXXEbnzkVeyZVtm33EfH8Y/nXo8Ck2SZPzDpXnVqMTxAd3H869Bgk3WiEf57V53EibVO3c9DAtKcvQzZ4kM8jsMnkfrWLdIB5fHB5FbrxhxMO7E/zrMvImZ0C/dVyD+lPBVLNJv8Aqxx4iPUq21ujkMw5rTjbdH5a8A8VWjtieR0PvVpmCxgJ97pWmInzve/6GdNWWpK7rFGFj+8PlqFmFuCf+Wj0JGsB8987iOfrTfKDuLmTPy5xiueMYr0/N9jVtv1HLbJje+OTmop/KYY9PekkM87qq42d6hmsXQbh1+tdEErrnlqZSenuoxdQOJ8DoBgVdtSPs8A9AKz70bpih6jBq1abjHEO4UZr2akf3USZfw0Numy0pP8AdH86px48xj3Iqa6cq8ufuhc1XgwXLDoy1tTjaBcI+42b+jEi1cD72c5q4WEuGIIHYGq+jECykb0NafkrI4/ujBH1r5/FVFGvJsuMXJESDeQigjPWqV5brLcBMfKBk/hWwSkIwv3m6VlXzGGUAfeessLUlKp7ugVoKMdSoPnuEiToDzWiYmmlZc/u1AGDVKNkthkfeJq9NMyKEj6kc5rpruTklH+vMyp2s7kd1Oi7Y0/SswncZh2q81qIUDfxH3qg27e2cYJOfzrfDKCXukVG29S5CwaAjuQM1gz5aZyTxmtlQAg9iKxZRiWTHc5rswitKTLpPU68IRYRKOhQGs9UBeTdyB0FbDrt0mPH8MQNZS43OV74P6V4mEqOSm13f5mteFnH0GwuoRWI+6ciqtw5Zg2cnJqTIlmZPciqjgLnHQDFenSgua/U5W9LEkDBQFI+V6mMbR/dIx1qvBgxiM/hUrNIg5xgVc172gh/nZYZB4IrLvmLXMg7bqv+YWJCdRyfxrIvXJujjucGt8NC0zaguadisvDv6A4FIUwM/iKPUmnHJC/rXonottMTOW2GhAojGBgYpPvPnuOtIuZAxHQ9KBtaFzTgDeJ7hv5GtZQxhK56H5aytNGb+L6MP0Nau1thx1B4rz8S/f8Au/U8/FfGNUfviO2CKdFIz7QDjOc/lUSHbLg9WqRQF3Y69RWMkcwjkh8jjHBqC7fcm/6D9KmHzMFbqcmi6RRZs3PGBVRajJXKjuiogGxTjg4FTR7mPXgUyD7m09+aehbII6dDWs3uEnqaEIXZkd6eUYkcjHVqgTA5H4fWn5cEdMHrXnyi76AmSNw5yRjGBRCCpytRNkyLu+7wBj1q15iwx+WvYd6iV0rLW5S1dzC1KUi6frzio9NBOqQg/wB7NOv2P2hj2z/Sm6WSNThY92r1rWw79P0OmHwXOgnys0hPZjUEibxuTgHrU+RLLMp6lywqEFhnGPevIp3S80c0tymwweR1poUN0HHQ1YZj/FTeGHHTvXapuxmQCMHgDmpEVI2JxyRT/lbIHWmEKrEjOSMU+ZvQBpJkJJPy9gayrhQJWHvWs8WVDN1ByKyLjHnk104ffQ6MN8RPY5DSY64zXp/wiOdM18/9RX/23gry+wJ3Sf7ten/CD/kFa9/2Ff8A23gqMR8R6eX/AO9S9P8AI9FooormPdCuQ8e6jb2+nRRS2balbxTxyahYxQiV/sxWTDkH7gDJuDccoRkV19cV4q8M6/ql9qz6TcaalvqmlLp0/wBrEm9MGb5l28dJu/pQBJ4LTQrSaa10XwzqWkh4w7y3Vm0YcLgBd7Ek4zwPTOK7GsPRU8TRz7NZOkG1WLCfYxJv35GM7uMYz+lblABRRRQAUUUUAFfPvi8D/hY/iQn/AJ+Yf/SeGvoKvn3xeC3xH8Rj1uYf/SeGtqHxnFj/AOAzWgDMyhRxgA1l6ygLAe5rdshtizjk9KzNdjVJjz1X8sV5WEq/7Vynz7jampGNbnE8Wezr/Ou28xlswkYyx5/WuIgYG5jB4+YV20cRltsoTkYxinnSjeDkbYfmu0uxWRygmJ6gtx+NU7mVjJEpAGTzVuaPEssWecfn61Smy03TOOlc+HUXLm/rYxqtpWHb5d5CqCueD+FSCMQ5lLHJ5waZHK0e5Cmdp4PrUqRnO9ydvXBq5u3kvzJirjFEkjjzBhMZz71IS00wjUfusckU0O9wTGqlV7MO4qWQiACJOZGBx61nJu9ra/l5lJaX6CTXEcIVRjOaqPc3DOQYxsPQ1ox2aBQ0pBPXkVOVt9mPl4rFYilTekebzNfZTlu7HFXvEm49auW4HkRfrRq6IszAY65FS2KboFJ6AV9BKonQjI5JL3UjNvOZHHqKqxrg8dhirt7GUnk9iKpDKsT1zXbSd4KxtT+Gx0eiMVsmLD5Rjn+daPkZI2s3y81n6N81q4P3c1oRwSRjG9myc185i3atPWzKhqthyxrApyxJxxmsq9lZZCWAz2rYFuVy0jEgDoa56+kkkmy6FeuB+NVgUp1G73IrpqKWwkHz3Ikc9T0rbiVYozJJ1IzzWHaxSNOjEFVyK2fKeeVskqgIA9DW+NtdJvQilfoQMJLpuRhR3H1rLn3CRxjo1dDJLFEDGm3dxwKw5drXDEngNTwdRtvSy6CqxUet2SRAC3dmPCkGsmbHmMc9a2oWDWzKygAt+YrEuMK7Lmu7Cu85BTWqO0E/+gbGx9wY+mBWWzHbjFWpSfsqsBx5SiqDyFEAI5rx8LSSu11ZpWm3a/YiXCuxz1ORVW4BBA7EmrRI8wKemOtVpmyAfSvVpfFc5uosDZQ+vapDMwA3gdefpUUS/KSOueKl3gj51x659KqaXMJ7jSVbO38ay7tc3BPYCtNwpxtOOazbgfvmOa3w+5vh9JlQglASOe/50oYncV5I6UjEkkAdsUsa4H1rtPSe2oA/N+HNKDhjx8opkgKKuOT0NPAyTnvSE0rXLem86lD+P8jWuysFYqM4JrK0kf6fFn3/AJGtsI+1iFJ5I+tebi5Wq/JfmzhxOskVI+TuA/hyKU/60n0qVImSaMAZyMEelMkU+YxA4JNZcyctDlsQxufM5H1pLh9sGD0ZqRQ3msNvNRXwPk46Hr9K3jFOaKgrySIlfg46jpVuEkkYHynrWch/dqR61pW5xtGOPWtKysi6sbFgRgkYJwasJAc55wetJFFyDnPcCrSRvkdcMOfavKq1baJihG5EIAkYUk8cA0MVRNg5YetSmM7QpJ44z601lVABwzVip3eruXy2OfvWBmZRyQeaZp4P9qQ+m4VNfL/pEm0cnBqGwU/2nAckfOOK9xP9w/T9DSnblaOhBCzSY65qCUZcsvJ9KlZds0hHJJyKRk3kEcH0rx4tJ3MXroQFZO6cYqMhhzj2P0q5hlx8uQajI3DJXHPT2rWNQhxKjFSCM47U0Hk45OKnKjncuB61GcAnaOo610KSJGvkqC3FYt0MzMR2raYYXLHt0rJnAMjYPWurDPVnRhXaZLYDcZPpXpvwfydJ17PX+1f/AG3grzPT+DJn+5Xpnwf50nXv+wqf/SeGpxG7PUy//epen+R6NRRRXMe4FFFFABTJVd4XWOTy3ZSFfGdp7HB60+orhJZLWVIJvJmZCEl27tjEcHHfB5xQB5re+MdQ8P32qKmqXWrR2WmXFzKmo2S2zLKhUJ5eEQyISTkgMBwd3Izsf2nrXhfV7W31jVDqsF3YXFw2YEiMMsIViE2AZQhj97JGByc1YuvBdzrkxfxLqsd9GtrPbRRWtp9nVRKoV2OXclsDjkAelT6f4Uuv7QjvNd1Yaq1vaPaW6i2EIVH272f5judgqjIwOuBzQBk6Xq2v2knhe/1PU1u7fXyI5bUQIi2rvC0yeWQNxA2FTuJznPHSq9l4g11tO0fxRLqIex1PUY7dtN8hAkUMspjjKuBvLglCckg8jArZ0nwbc2N1pQvdZa9sdHVhp9ubcIynYY1Mj7jvKoSowF65OTTLXwPNby2dq+rmTQ7G8+2W1j9nAdXDFlVpd3zIrHIG0HgZJxQB0uo6jBpdobm4S5eMELi2tpJ35/2Y1LfjivA9cvYtS8da9dwJOscl1FtWeB4XGLeIco4DDp3HvX0PXgfij/kpHiP0F1D/AOk0NaUtzgzJ2w7N+yCjG7GAqkflWJrhPnkseCOnpWlaM0qLg8Dg59KyNbYSzEH0ryMDTaxbbPDqSTpJeZmW677lD33D+dd5YsIoAD1/+vXB23/HzGR/fFdmC2zcp44rXPIe0UYPY0w8+SbkLOoMjyBctvYZ9s1QuCqThQv41pwOHdwf85rMlG+dB65Jrz8K3zOMuiCulbmXUVGQn5gN1SKWlymCq9OabFBHKQ569OtWCS0eyLhugJrSpNJ2X/DGcItrUaWjhj/dAFlGMLT4oVVBcTYLjOM9RT4bVIG81h8x6896ZKHnkA/5Z9wa5+dSdovTq/0N+VpXa16IrSzTSvhNwGaieKcEnzDj0xVue5itwQO3pWfLqgAHDc+1dlCNSSXs4aHPU5U/eepkapIWnz3A5q7p0w8iJSuQwPNZ1+2ZWb15q3ZZS1gAPAr2qsF7CMTJu0Ex+pIPNmI9AawmLCQHt0rZlcyfaAf7o/nWUw/eha3wl4w5X0/yLovVnUaBHvsznoTV5pmgZVILEk8iqOhsy6fIAfm/hqdr1HJcg56V89iISniZ3V1f+v68jVSUYLWz/wCCWtzyyckhR1B71nXMC3F8SMbEGMU59R3IVTIJHcVVWSUwlUP7xjya0oUKkHzbdP8AgmVSpGWm4s8gRwsa/dParVxfLDFtTlu+D3qLYkUZY/ePpWbG67yz+ufxrpjRhV1a2/Ey5nHbqXo1fcZZHzkd6zWYuzkHHzH+dTSXjSDap6EVEoAZj6muulBxu5ENl6PAiweyisO5YNKx6Vrq4aA+uDWHMM78/exg1rhI2lJs2oayOzMbGxh+b5Qq5/Ks2VTubPOORWyiA6ao7FAf/HRWW3IUDp0NeLhal3L1KrxtYqTH5Qw7cVDMMfQjFTbcYU9MGmFS8ZY9jxXqwdjlGRKVjDZzip/kdckYJ45qC3c4DD7verDIjAkdaKnxaj6kJVR0rKuSTK/5VqSYUDHUnFZlyMSt9a6cPubYf4iBACQ3TmmtlWfHrxTgOcHpu4of78i967Op3rcQ8496Ug7OvPTNAGDk00NkCgN9i9pGW1KD6n+RrqYdqQ8kN8xB9ua5TSW26gh9M/yNdFCU8k4BzIxavGzKDlUXy/U56ztMedolZvyqrI6HhcdCR7UyWZt5C9CSKqB9q+5qaVB7s5JSuTjBlUg9etR6hH+43dyajjkKknPf+lTXkoe3j9gM1uoyjUjYUdGZSZVBxnnFaFpKNiKR1qGCMsM9s5pzKIcgenFdNRqfumlSSk7GvG4U5B4Wni7YYXB+Y9fSseO6ZcLnpUy3m445rz54R31VybyiarOZDwcYH60+PYF3OQWrMS7YtgHkcmpN+QS5rCWHaVthqprco3h/0xyvSoLQ41SH0LinyuRKSaZbH/iZwD/azXrJWpNeRdPr6HROuJXwO5waawycg7Wx0qzCwlEi90OBUDoGfcOGI714MJ62fQco6XQzLAcnNMLZGcfhTiJAc5GO9Rncee/9K3ikZsQkOpDKcdKrt97aoxx1qyzYX5gcVFIGJwnBx3ram7EMhdcL8xB4rGmOJGFbMq4TLctisafiYYr0MLrc2w/xMnsOWk+lem/CDH9la9jp/av/ALbwV5pp4+aXH93Nek/B7/kEa7/2FP8A23gqa/xP5Hp5d/vUvT/I9HooornPdCiiigAooooAKKKKACiiigArwXxQP+LieJvT7TD/AOk8Ne9V4P4lx/wsXxKP+nmH/wBJoauGjPPzT/d38izbSMq4X+Ks/WCplOOmMVZtplhbax+Y/dBqlqSfvuSa5sPC2IufO83upFK2/wBenpuH866y2ciF1Ncpb4E8fPRh/Ouji4ldweoxjtTzOKlZGkZWlcvxMFCA9W/nUc0Q81cfxHIqCOQOAQeUfcfapUm3GAH72CQPyrxnTlGXMjdTUo2Y6O08xiwzg+9X9scEBPcCqymVJWCp8nY/hUwjCDzHY46kGuOvKUmuZ6djoopRTstRsMEkrHzBhCM8Uy+lEaiOP72OKkluvMQpCAW9vSqcgW2RnlY5Jzz2p0YSlNSn8kKpKMYcsPmzLkttzbpCcnrg01oYwveo7iWWZ/kX5faq8kEoGfmr6WnCTS5pWPKdr6FK5wZSPwq3CcW0B9qo3GTIrHgirqjbbwHsRiu+ovdii5r3ENLYWb6D+dUhgysfQVaY/JL+H86pj5ZGHY96umtGVSWjOg0+Yw2n0ANRvPHIxUE8Co7Z8QDHOMAj2qyIo2IIOK82SjGcpPqZNt6EDNuIVOtSqwgBH8RqZvLj4GCx6UwAR/NLxzxmoc1JWtoFrAkLuN8vbpiqEtsZZC5ztHI5q20ss8hVV/d9QwplxJkFIhkgYrSk5xl6/gD02M5W2M2PwqdeFGfrTjZmNQ7ZyRTEJLEkcV1uSkroUtSyVwmO2DWJMRvkPpkV0EZDw8c/LWBcJt8w+rZp4R6tM3w9ubU7aFs2W1epRf1AFULhfKQEdc4NX7YgRqx6eWv8hVW4ILD618/QdqjRdVXijOkbDHPYU1gVt/m69aW4RiQyj7xP5U+Xa+OeGGPyr1k9EchFbgjAI+U9aneAbcqTRbhAgjJ5NONsyJgMTzmonU9/ew0tCtJHhc9jw34VlXSkTsT0FbZQ9cduRWReD53z612Yad5GlB2mVI+v40khJkOOueaG447Z5NIwy4I6Hiu49BLW4gOUJ96aV2Pkd6dtyGA7mkCY46jjmqLui5pg/wCJii+x/ka3kwbbHcdKw9Nx/aUZ7nI/Q1r4Lqy9ME4xXl4xXqL0X6nBiX7yfkQB8SADpu5qHdgc96lWP5zyahUBUC9dvrVxt0OYdtBkwaS5h+6vYjFAcEhjwAalvD+9ULyQB/Ki7U0hq6Y6JljjVT1NROi+ZsOcsKk8tcD5uWx+FP2AqN3DYxWfMk7iKgtSCcdKQQsG6datKGVsY4HelG4tgrx2q/ayHzMgRXU9BTskk7utTFWOOOnWo/J+XcxIJ7UudPckqMTuB/hpbT/kIR/71OfIYjHyjvTbfi9gx3cVu9YP0NoPQ6GzbMkj+/FSMqSyE5OcVUico74HOTgetXE2v3ww7CvAqx5ZcyLg7qwwwntSCF+45qXyyoGCTRlh2561HtH0HyogMUmDkDqajeNj2GMVbDMDkrxTGBkbpgY7VpGo76kuCM2WNRnOcmsS4UK7Y9a6WVY4lwWyT0zXP3K4lcf7Rr1sFU5rhS92Q+wz+8x1xXpXwg/5BWvf9hX/ANt4K830zkzA9kr0r4SgDTvEAHT+1f8A23gqq799o9LLv96l6f5HodFFFYnvBRRRQAUUUUAFFFFABRRRQAV4N4m/5KJ4m/6+Yf8A0mhr3mvBfE2T8R/EgH/P1D/6TQ1dPc8/NP8Ad38ieONMhnxkAcmsrUi3mjJOeTWqkbyN1wvGazNRIMxYqeMgVjhX+93ufOLdFKAkyJxg7x/OulilAOMdTj9K5yIgTRf7wrbOQGI4wa0x0VJpFylZ3LVo6qshIHJqTKx3EbDBHOPpUVsm9CMc4HPrSxjM6I4+6K8qaXNJ/wBbFRbsi/FcEDay5INSjfLwchagheNlDMMN05qUStKvloCpPevNqRs7pWOyErrVjmeKFcoFLDjis26ikupFkclEHVT3rUS1Rfmkwe5+tZ93K0reXECOSM9q0wklz+5q+7JxCfL733FCWaGI7V2/hVV71WUjA/Op2sVDAvgmopLWFQcAfnXu0/Y+bPOd+plT4Z8575q2Mm1tz0+XpVKbiUqO/T2xV8/8eUHrgV6FTRRLn8CKp+7Nz2HFVjyAe4qzxtk9Tiq2OT9a1h1NKfU2NP2+RubGDgGr3kZwFbAHpVPTUDwEHpV1YpuMP0615GIlao9bGSXkBjWMbnIJHTNRYNwfmG0L2Peplt3x++YMM1FIskjfujtHQ1lFpvf5g0NMqp8sagkelPCRQoWbG4805YI4cscbqYltJMzNI2VJ4BqnKL62X5hZlS4uGkwAvFQZ2vtxxWhdRRRxYXAOR3rOcneWrsoOMo+6tCGnfUtp8oJBwB29qw7rL+YmcZHWtqIZikJPy7ayZwC5I6c1vhtJM1w7tK51EEw+zIM9EX8elSSRBgSDyRkVlozIET1UHNaUTE7VJ+ZVxmvGrUvZvmiOM+bRlMAuMkYKg8VHNGQq8YPWrpiIuRzkMOlLNCJJMjptIrRV0muxHI7FaCIOjHIDdAaeVkRQOWwOtWbW0LQZBAbPBqR4JUU5ye/Ssp4iPO1ctUpct7FB42IDc844rHvkImkGO9dOYSVRsEbsYHoaxtViVJpeO9dWCxCdSwnFwaZgHBznueKaTgY9CealKgNyOhpmF+YEd691M7otDgAoBpo6474zSEnA5701s4z0J4ppDjG5e0oZv4T/AL38jWsQzbiOME1kaQW/tCNSeRnP/fJrXw+8lSQuSCK87FfxfkvzZx4pe+V2ywyGx3prgZJHp0qXy/3bZ6gcVA7BSuPWiLu9DlQzA2ke39aluDs2Mf7q8/hSBRg0t1hgoYccfyqr3kik02OjO89eeoqckPhs4PpVa1VnDOONpqYDdgrxzWU0uYTVnYELbgME+9Sh+SCnToaaiuCAckjnNSbgcjYayk02CGFyVB24PU1GA0gDElc9qeWJXIBB6mo28xwGBwPSqihFd2IkK7eMdabaAfa4yf73FOZ/3hXB+tNtDm7jH+1mur7D9DWOxohyJX553HFXAwPfaapou6QnPINWsKTyMepNeXVS0FEk+YMD5mcDpTTI2RyeDn/61Djg7WAOKh+YAAtk+tZRimU3YmMzbScE+1J5jFsfdwKjUOActnmlCtvJLcelVyRQuZg+3GGIY9awb04kbHcmt2YKq5xkgdqwbpgzscc9q78FvcqHxkmlsC0/+7j+del/CT/kHa//ANhX/wBt4K8z0xCVlK9gGNelfCHP9l6/n/oK/wDtvBWlde+2epl/+9S9P8j0WiiisT3QooooAKiubeK7tZraZS0UyNG4DFSVIweRyOPSpahvLSG/sp7O4UtBPG0UiqxUlWGCAQQRweoOaAOF0Oxso/Et5qXhWxjttItLOW3laAbYr653KRtUcNs2sC/cuRk4NZOjWVrYaV4D16yJOr6rPEt9cbiXuxLA7yiT+9hhkZ+7t4xXd6N4S0nQHjOnC+jWOPy0ik1G4ljVfQI7lR09OKLDwjoWman/AGhaWAjuFLlMyuyRF/vGNCSqZ77QM0Aefw2sK+FrDxYAf+Ejl1xEe43HzH3XnktAf9gRkrt6DbnrRq9rDP4d8YeJ5QTrunajMtnc7jvg8oqIo09FYYyv8W85zmvQF8I6Emsf2qtiBd+aZx+9fyxKRgyCPOwP/tYz70XXhHQrzVv7TuLEPcl0kb964SR0xtZ4wdjMMDBIJGB6UAaGoyahFZltMtba4ucjEdzcNCmO/wAyo5/SvCdXe8k8c+IH1CCCC7+1Rb44JjKi/wCjxYwxVSeMfwj+tfQNeDeJf+SjeJR/08w/+k8NXDc8/NP92fyNSO1lkVBFjB5bPpWbrNuBMQOgro7R0hj3t1ZVH6VhauJFwGHzuCxH868PA15yxVui/Hv9x49elGFJNbmBCmJEz/fH8620BYup7msmE5njB/vD+dbBG1zjtzXtYt6pHI23uXYRs47AUpG67IHcYFMhzLICfujkVaFt5cqnndj+deFUkoSd3rY6oRclpsPS2ikbzD1Ix1q0GWOMBQc9BTRZB2yCcHnrV+KCOGPcSePWvGxOJhpdt+R6dChJ3sreZRitriVnExUoemKrXYisY24PH41otebztiwTnnjtUFxYByJ5cj2zxTo15KovbaJ9EKrSi4futX3OXla4nl3LjZjjIqBra5xlsVu3MkUTlB1+lUGvY3yo6jrxX1FHETlFOENDxZ01F2b1OfmHzc9ckVeIJtoCOhWq1+AJmI6NzV6FANPth/s16dWfuxf9bClrBMzZPlSQ/T+dQqQXA7GrM8eFmH0/nVVM+ao7CumDvE1p6xf9dDd01WeAgYwCfz7VeVpUA3EZ71V0uJmtmZRycGrxkIGWAzXg4iV6sktSIrS4xUlmcbiNveo5JUhk8tAdxGanVpW4RR70wxR2zMWJy3PNZRkr2l9yG1pdfeRJbyTEmUjbnIp01yf9XFnOMdKUTSTACIA1b8uOCPnr70TqcrXOrvogjG6937zIa0lbdJLjA6YqixBZhWvNdeYdi4weOlZph2zMP4e1ejh6kmvf0MZJJ6D48m0P91qxm4yvoK31H7javQLzWJcKVdz2wa6sLK8pGtF62Ogmgby43HQKtWIV3PuXqSD+lWbUB4Rnsi/yFRhhjcvXOK8N1nJOFtivZpWkEfyyeY/UOVGPSpXj8kED6fn0pcjyzjqSTUikvCS4G84rmnN3ubRitia2gkMEbjHHX86sF+mQeOvFOtCyWkfHGDn86uRyRNj346V4mIxEudtq6Tex6tGgnFWdtEUXjGd/Y1yusrm6dfU12c4CsNvSuR1s7bmRgPl5B/KvVyOo5Vb+RxZjDlSt3OdlHzfjUJUfMe5NTyHkVX2nJ9ya+3jsYU9g4PBpZMEfSkCg/X/Cg4J2nqao06lzSVzqsf0P8jW4X8oM2DgEg/nWPo6n+1ofTB/ka6dVUITjI3EH868bMKijVV1fRfmznqx5mY7BhOuehNQNGpwfbFakse+TIHAas9wAmexqqVTmWhxtWZEFKg56ZGKfdgbxn0pzAnBA4yBS3sW7A9ga1UvfVwQW7/ucr2PNSRxlnyv1qGL9zGM9zk1eSJgwkTkVjVlyt26jSuxY3IOxuvelMqEkYPFPLjPPWozLETtzz06VypXd7F7dSAyqU3KDzzUTb3Hy9KsblJwo+tMYSFcqorpi7dDNlJ2AcqRUdqv+nxj/AGs0shxOVNLbrjUISP71dj0g/QuGhqxQkySlfvAnbn1qdwr5Rwc45xT7eJt0jY5ySvvUjFQcPwcV4k6t5GkYWRVMag8dcYpjR9Pbn8amKox6mjyVC4yatTtuS4jRG2PlIzTjDk/N0xUiQADgnrmpBCoJYk1nKsl1LUCpIEQYAPArnL7Imcr0zxXUSlACBXO3ww7egPNell8tWEXy1ETaCpaS4Hbb/jXonwiIOma+R/0Ff/beCvP/AA5+8llx/cH9a9A+EYYaZr4YYb+1Of8AwHgq6r/fzXoepgF/tMn5f5HolFFFSe2FFFFABRRRQAUUUUAFFFFABXhHiRc/EbxKfS6h/wDSaGvd68G8Sk/8LH8Sr0zcw8/9u8NVA8/NP92fyNywkEkREnG01maxcBjvABO3Aq1HC5ZSjlR3HrWdrJUSqFxjBrx8JSh9a5l1/A8SpOXslFmQjYuYv98fzrZzuJQdTkfrmsaLH2iL/fH862FG2TPck17GLtdHO+hZt32uvOFVsmtmI+bLGT2JJ/SsSLCwFjycZrXs3/fxLnOeCPTNfPY+N05Lpc7cJLVRfWxfEUqOVVSV6g1MtqyoNztjqc05JmQbShJHemSyyyKVCsue9fMOdWTtovM99QpRV9X5Eby29sC7MoA4yabdTG6hCwjI9qhNpuAEz71A5B7mp0kghwo25xXS4Qi1OF5SX3HOpzknGVoxf3mRLp+ZS7McntSNYwgEjGfXFOne5mlysbgDn61E9vdFtwZwPTFe7CVSy5ppHkSULvljcwNUhAlKZ5FW4kzbWw7betV9SRzNnBz3rQso/MtoB325x617dWpy0INv+rHGlfRGLPndOvYY5/GqSjD/AEra1C1Mc0y46qG6e9Y23a55r0MNUU4XX9aFw0vFnR6PIVtMAZPUfStAyRbsnHPSq2jRqtkGKgk8fhWgsdvIAw2Edq+bxdSKrydnub04txWpEJVKnywCfaoxbl2zJkYq2VhiUlVXPtWfNJNOMKGjINZUbyb5NF3YVLR+LUn82CJwo25zioDZSXBLM7KCc4pIoBvXewY571PLcSAYijZhjqK11hK1N692RpJXnsJLBFCmSRn6ViuNpcE9Cf51rLZXEh3yOxU87SKz7qNRKV4zu5rrwkkm4812Y1k97WQ6P/j045YDkVhTnDMOvFbjEJEzA9sfWsJwQ5B5I716eDWsmFPc6XmJS24jdGvH4ColLdSTjO7/AD+dSzHckXuo/lUZUm3bB5/+vXmw2u+pMtycT7mKnjAqWOVi5B46ge9Uy4CmTHO0EipGlw6sBxWcqSeiRam0dFZ3CeSASMGpmCHJVsemKoWZj8kA4z2p8sb5JWXAxwK+aqUI+2dnY9uFZ+yV1cLiYphSeB3rlNSlLzzZ6bzW9OxCBXbJHBJ/irmr9j58v+9X0eUUFGR5WKqObSM1/v8AFR4559c09+GBz0phy2ccc19Qi47EYYhhgZ65oLHAbHzYzinKuDzQMEbgM9qo2ui/oxP9pwE8df5Gu0gEf2bYSMkk/rXE6Vn+048dg38jXXWyEwli/JHHtXzmdQvNO9tvzZmnae3QgvpUic9AvrWWSr4XjgVaveVAJ3EcmqT4TLDrgjFXhYJU13OCrK8hd4XIPduPyp96OEwfvKKrMSy5I56/SppG3xIc5O0DH4V1ctpJkdB8BQyZfGCSoBq7h4pMKu5MZzVK1RTEd3XOQT61cSVwNpUt2zXNX+LQuA4yISOBnvUbeWT2zUhC5HydacI0P8I9qwUlEuzZCCobAAzxmkcvj5I8irO1AchQSeDSEleFiOKFU12DlMG4H+ktkVJbJi7hIPfNTzxhrhvlqOI4u41HrxXp896dl2Mr62Okt4v3OQeaik/2lAOKmsgxhRN3zLwT61JJCwYgoWz3xXzHtOWq1Jnpcl4JozyF3dccUBVxndx0FXfJGeY6BAuPuCtvrMbGXsWU1QHgOetTLGAxO4nI6VYW3Gfu496lWJAMkA1lUxK6GkKDM+SMnomR61zGobhI6hc5JrtJOFwI+vFcrqsDJeMMccmvUymvzTaZjVhySTJfCqBbm4HpH/jXe/Cj/jx8Q5/6Cv8A7bwVwfh9vLuLkgYyv+Nd58Jzmw8QE/8AQV/9t4K6pp/Wqj8kejl8uat8n+h6DRRRWh7QUUUUAFFFNkkWKNpHOEQFmPoBQA6isnSfE2ka5DdzWF0XjtG2TmWJ4vLO0NzvA42kHPTBpmj+K9F164a3068Msoj80K8Lx74843pvUb1z/EuRyOaANmisTT/F2harqP2CyvxJO2/y8xOqS7ThvLcgLJjvtJxRF4u0KbWP7KS/BuvNaAfunEbSqMmMSY2Fxg5UHPB4oA268F8Tnb8RfEh9LqH/ANJ4a96rwTxRg/EbxID3uof/AEnhrSnuefmf+7v5GrvdW2qeoH8qx9ZVkdBnnnNbAnWF/mz90fyrF1WbzBvPXJrzcCpe2TtoeDNrbqZ8HNxED1Dj+db8QEkjDIJUH+dYFuR9qj/3hWvboY7h3H8ZINehjY3+4iTs0WI2DoFHc8/StTTsfa/RicCs2C3IIZO/8q1LWIG8jfsuXrwsdKPJJJ9GdGFT54vzRvo8bDLDmobm6VEOFJ+lPS3ikbzOcn3pJxHEpPPFfGwVP2i0b8j6ibqezey8zOK3EoIDYzzyKkSyQYaXBYd6V71APlBz9KoXUt9LInklPLJO7NezThWqe6rQX3HlTlShq7yZr74Ui4I/OqjXcXOOfxqvDZHgt1+tSrpcMUZC55OTzWapYam3zSbZTqV6i92KRh6p5YfcB98Zp+nHEcBPZeKrauqxgls/uycVLaS+WsAPQrivp3C+FSWv/DHi3tVbY/VGR5Zz38sCuZYbpcCtTUJCJJtp5ashGAkBP0r1MBS9nSt/WwruUpSO30GFTpyOwHKgVox6bBGgVFAUdOareH4hLpSA9Cq/yrU8iQEgYxXwuPxMliakVO2p72Fw6dGMnG5Tks4UG7AyOnNZVyzeftRTyPSuhNnv+/0+tUrgRW8pAzVYLGLmtrJmeLwr5b25UZEdnOZQxceta6RwwLjgfjVF9QXzcKDx14qyLIzgmTv6GuvFSnJJ1nyo5aEYptUldkbXqFwm0jJxWFfbBdye5ro5rCLKsc5XHeuV1ZGF22Pu5ruyl0pz/d6aGGNjUStUHsm6N16gLu/GsWb/AFwratfMaGQnGM4/CsacfvD7E19DhXaUonHT3NudiqwEHgqMj1qIXJyAAQDmrXk7o4Af4V/mKheIbiP4hxXFCUGrMiSe4xJA+R/s/nU5OCAPuiqoQRSD/Z4NSQFmOPzq5xW62Emb9pGr26HgMOlOlSYHIkGMVFEri2DJjcBSGW42Ddtz3r55xk6jaa36nqKSUEmiOT5gA/LL3rm9ROLiYepzXRSSblBP3hwa52/x50nuTXt5Ympu5x1WroyyeSCOCaQHqPepHwGB/CoxgA/U19AjoTug3buMc4zSooHA6DikUYGe5FKMnGKBvsi7pRB1aIeuf5GupijZoiVYDnArldLIGqw++R/46a6VGlx+7xjJ614eaRbqK3ZfqZSaTRRug37wZ+bkA1V5LqPYmrNyXWb5sZLfpVdv9YzdsZrWj8JxS3GkHfz0IxTTmMkH+Hn86cknzhW6Y/rT75cgY6EDP5VqnaSiwSLdpEssOeMg5FWo42U4IJ+lVNOVpYE2da2rZ9mEkHzH0FeTjKkqcmlr5HVRpqVr6EKxrtBKHnipPs8eR8vStOOONh0PNONvFjvx714Usws7ao9OODur6GUIo1OdvWl4ztCGr7Qxq3fmlDxpxg1X1u6uk2L6vbRtI5u7CRySgjkd/wAKyYW/0+L/AHq1dVZXluB3JGPyFZUYAu4QPWvqsHrRu92v0PInZTaXmdPZklsqcfNmtcOCACpJ9aw7FX2HZjzAO9bIuBHGzSZ+UZOBXyuYwftPd1PXwU/d10D5Wwdh5OKcqJ/d9qlW4jdVxnkZHFNMkfHX5jivN5p7WaO7ljvdMFWMggrxUcgiUcCldo3Ug5x0qtI6LnrwK0pU5SfUzqTSXQSSdVxhTzXL6xch7l+DxXQTTDZx3GelcnfM08jkdmINfT5PQSm5NHkYqpzNJvQtaE4Z5z/sV3vwo5sfEOP+gr/7bwV55ozeUs+f7mK9B+En/IN1/wD7Cv8A7bwV6s42rzfodeWte3aXZ/mj0Oiiig90KKKKACkZlRSzEBVGST2FLRQB5KdZ03xEPiPpui6paXV9qMf+hxQTKzTgWUanbg88gr9a0zqNp4q1/RB4dlDmy026FwyAj7N5iIqRv/dbcM7eo2E16PRQB5Zo1/aanZ+AdGsARqelSRtfQBSHs1jtpI5BJ/dyzBRn72c1Xs7mF/DOi+FFJ/4SG21qN5rbafMiCXRleY/7BQEhuh3Ad69booAqajHqEtoV0y5tra5yMSXMDTJjuNodD+teEawl5F448QDUZ4J7pbqLfJBCYkb/AEeLGFLMRxj+I/0r6CrwLxXz8RvEYz/y9Q/+k0NaU1dnn5mr4dmssanMjHqo/lWDqPEjD3z+FdLa25uLeNiSMKv8qyNahCuWUdwteXga8fbuDep4VSm1FTtoY8S/v4z/ALQrYt1bzH3evFZKczxr0+YfzrYiDNPjGBk/yr0sW9PkY7tF6ykC5U/SrNjMRqKqPubOtUovlUsOeQD7etW7BP8AiY4PTZjNeBiox5ajfY66Epc0Eu5uxwsB8pOPrT47JQ5YsxzzzVeN50JXYSB0PrUw81jjkZ718pU9om/eWp9FTdNpe6x8qQpn/Csu5uACqRgFznArVisSWy0hIx3qz9jt0wWCkjuRRSxNKi7u8iqmFq1lpaJz4N3tz5Yz9aY8l+EP7pfzrbmlt0bblc1TnvLcLjcuT0Fd1HFym1al+DOOrhYwTvUOQ1IySN84w3cVLbJuELdwvAqbUTvkYogP0otIGlhh6qdoJx2r6z2q+rxvp/wx8/yt1GlqY91uDy+uc/jVAYP1rcvLNi0pAOOOcdaxhGUmOfevVwtSMoaEx926Z6B4eQvpMW3+6DWj/pIYAIMd6zvD7tHpMW0ZO0fyrXF0O6gH0r81zBy+t1LK+rPq8GoewhdtaEQW4YfMgFZstsv2p2Zjk9q1muWIIVMmsS+jnaQjDLkdR2q8v5nNq6jcyx3IoJq8icCFMZA/KmvcyFT5Sg1TtrZxIu6Vm571uN5Ea/wiunEuFGSXxs56ClVi38KRis185HmRgJ3INc5fndM5Y87sV2Ut3AylVZSTxiuR1IZd/lxl69zJ6spTfNDlPNx0FG1pXG+dtiKjoayJQGYtntWlDko4K9ORWewweR26V9FQSjJ2OGm9TohgMpB58ocfhUcrYCA/eY0hcgxAjGYxTCWkkZWXG0gKfwrzIw1uxtjp4iyy4HuKIFymMfNyPxp0hZVCckk4NSWwHmnIwM5/E0OTVMEk5GkiSLAu0Z4phMwHKCrcay7VKxllIzmnMHwcx14Xt7Sd0men7K66mXMNy5HUfernb0/vJD3GRXWyxqc84PUiuZ1KICaQjoePxr3MrqqUrHFWjyyTZjuP8aUDK4NOYHzVXHHrTHyrsAM819Dc1Tukho+VeaciYUGlkGdwHpSg8Ae1FxuWmhZ0sf8AEzh/H+RrpIxMinYoK5PWue0oZ1KLjuf5GuthSRY1/dkrySfxrws1qctRen+ZPLzSMe8jfGXGHYZxVRmG4euMGta/fLjKgDHWsViW5YbTjJHpWmEk5wTZx1FaTSBFLNubjirV8oaCMeqj+VV1+8B7Gp7tW2w9cBVH6VtL+JES2HaYZEKOoyD1rpreSOQgnGT7VzOns6AYXKnv6V1dgIjCpOMt0rws6aXvNfcehgE5SsmWVRCOp5o8hcD5jwak8tT0brSrCMD5ycV8q6vXmPeVPpYiMSBiST81RyeWiHmrghXnLVFPHGEOSKdOsnJJtiqUWotpI43Ugvntyev9KzrYAXMZz/FW3fxI8sgyBzWX5Hl3MQBr9AwtROjy+X6HytRWmzobOPnK5yDWqsUu7BQbMdar6dAHXOcGtAOyErtyB3r43H4huq4x1aPfwdBKmpS6kWCOoFB6ZwKl3k4+Sk3HA+TvXBzvsdnKu5Cw4OelQsjHoM1dzkHKDFJn0QVpCs49CJ0VLqY91HLsOEFcveFoy5ZQCTzXbXCyPGcR1ymqWzscFSMnNfUZNiFJ2lY8THUuWSfQp6Qd73WeyZ/nXoHwjGNN18f9RX/23grhNGh2SXOf4kx/Ou++FH/Hj4g/7Cn/ALbQV7U5J1pJeX5HTllnXbXb/I9BooooPdCiiigAooooAKKKKACiiigArwXxIm/4meIl9bmL/wBJoa96rwnX8D4neIT/ANPUX/pNDVRdrtdmcGZf7uzprS0f7LD5Zx8uGwKz9V0uQtjPueK3tInEdoN/OQMVX1W6XOQp9K+Gw+LxEcY4x7sithqDwim3rocK9m0NzGT/AHx2960kcidxtPBzUd3Nvu41wcZB/WnNOqXDj04r7Ccp1IrmWtj57Z7lm4mW3tEdY9284IHbPNWdPLSXbEHoN1Z2WdOvAOfwq5pMjRXMhwSq4AA/GvPxFPloStub0Z3qxvsdBHd/Lkoc0hvmzxE1MLxk5Ip4mjUZwa+WdOF78h76qTtbnJlupTgBWFRyxXc5XbOyDuMdad9rRRnaagbWVRwvlyflWdOjWvelT/X8zWdWla1WbLK6czEGRwx9SKc2kROMlVJHPSoP7SdugcfhSDV3R9pVzn0FHs8c3oxqeCWjRk6hZ/ZpSBzk44FO0qHesKk87ealvrrz5cbHzjPIqbTNkcpcgjYMCvZnWqrB+/8AFY8mFOm8V7vw3KWroIl2ovX0rlLkBJG9a6nULlXlHXqc1zN0heVmPUZH4V7uT80aaUzgxbi6ra2Oy8PzqmmRg88CtJpIy5OBzWRoKINMjLDqorUCQ+3518lmEILFVHrue9hZz9hFaEyTID0FVNQl3thEOSO1ThYh/wDrpkskasCASenFclFRjVUops2quUqfK2kZsNvcNcRNvKqG5GOtaS2MhXEkm76io2uNuAFbJ9qX+0mcZVXH4V11p4mrZwSX3HPRjh6d1NtjjpkSjIC5HPSuZ1S1Ks5J6NxXRLdTPnrj6Vg6i0kl3KnOBgjj2r0coeIjWaqSucWY+xdNOmrGZjy484zkgGsyQ7mJx2rTeRo1k3A8e1ZjHOOD0r6/D31bPHjudFeR7Le3lAycKCB6UwhWfIxkGn3c37iCNeG2CqSytuPP1ry6MJSp6+ZpUaUtCefLBnU9DxViCP5gc8ZFUUfAA96s75BHtjbB4AqqkHy8qFCSvdnV2hEcIVhmpGMPcLSWxR7aMSfe24PNSNbwOCCAc+9fBVJRVVuV1r0Pr6cZOmlGxDJaROpkVVz0Ncpq2nOsjgHq5bp29K64kQbsfd9BWFqt0r3JQZyMEn8a9jJq9eNb3dUeZmVOl7O70ZyE0XluMjvVd1O9j74rVuE8ydvQMMVRnTDke5r72jU5krnhwnZldVwQOp700nD47VKi4Y56mo9vLCt0zZO7L2lYOpwAepP6Gu+hCC2UEA8c1wOkKBqtu3fn+Rr0K0iQ2y78bjz1r5DiWSjODf8AW56GXx5pNLt1MXU0jG35BzmufdMsTjqK63VoogijHf1rl3wjsMdBXRlFbnopo4MfT5KrRXH/AB8KBwMYq7d4Mcajrx/KqB3bU5+bPX8av+WzSBj6Z/CvUq2UoyfQ5FtYs2sJjgUbCQR6Vt2qqsCDIB7U3T0RrKLcOqg1ZaBMgj+EYHNfKY3FqpNwlpqe1hsO4RU1roKu7P8ArKcBJx+970zyyO4pNsg/iFee0ns19x1ptbplgB8nMnGOKeIwwwzA1VAkz94YqVA3dhWE6bWqZtCaejRkX1spuHAwOcmsiSBkvI2zkbttdJNbiSVnOMms29tXjCuCMBs19HgcWrKDetrHi4rDtNyS0ubdtFtgBU4Ydac0rDOQTTI95iVkIGV5oZnHevnnHmm29T2VLlgkhwnOR8hoNzwv7s8nH0qIuw60m8mqVGL6E+1l3JvtGQfkPBxR5+GI2Hpmod5o3HtT9hHsL20u5P5xdfuHmue1RiJSu08Vuo7DvWXqMZkdnruyzlp19tDlx150t9TH0pS88nb5cmu2+FP/AB5eIP8AsK/+20FchZqYribnrGcV2Pwtx9l8Q4/6Cg/9JoK+ohLmrS7WRllK9/5P80d9RRRXQe8FFFFABRRRQAUUUUAFFFc3q/jSx0i9urdrLULpbKNZb6a1iDpaowyC+WBPA3YUMQOcUAdJXg3iZSPiR4jcdftUI/8AJeGvd45EmiSSNg6OAyspyCD0IrwjxPIq/ETxID/z9Qn/AMloauF76Hn5nf6u7eR2VgEeziBzwKdeRRbC3rXNDUDCiqG7DFRXeps9r5ZbHcHNfILJ68q/OpaNnJ/aVNUeRx1sW72GMScdeDWQyKbl35z0NRPetM2SemB196c0m3cR1IzX0lDDzox5ZPU8erNSldKxat5Mxy56HGK19FaNjMPVq5yObZCMeuTT478REbWw3FZYvBSrQlCOly6Fb2c1K17HYyWsbev505LeMev51zI1olRz296Bq7t0PNeL/ZOLtZyPR+vUE78p1ojjA70n7lTXIPq7lOG5+tVW1FzJnefzNKGQV5fFMt5rBfDA7wSRCnrLET/9auDGpOEJ3nA9zUy6k21TuPI9aifDk/5io5zb7J2zQxP81USiec+P84rCh1kpGFLckE9aYNTKt5jH1/KopZPiINpv0CrmVGdmo2HXKAuzt97ODWZMoNwy9sc066vlM5AbhjmqEs4DFyeCMGvqcLQnFK/Y8ab5nojtdHhSWwTOeOBV9bGIHqeuetctYam0Fssa4IGKuLrjZA45r5vF5bi3WnKD0bPWw+Mw8acYzWqN77FH6nrnrUttaQpJI3OXIJ59sVzn9tlgOnNC6ycgZ6nHWuSWVY2UWnI6I5hhYyTUTsBHCq5qqrRDPtXMPrDbGO77vuazX1Ny4Jcj8TRQ4eryvzSLq51T05YHdCaEGse+eJbwt7Vg/wBotn5mI49aqXd8fNXLHpxzXoYPIp0ql+bocWJzT20OVRNO/ki3HFZcuwD61TeYu6/MTk1JJMuSM9DivoqOFdJKN7nlVJOTvYvTMZRFMPu4wKaOUJH3s1YmkjNgApHy9az2nCsuDwRWdJOcbJbCluTxkkEN0PAqe1YGXcfunNVIrhTEAcZDVDDeKhQZ5Lce9XKjKakrBG6Z3xtY5RFLk5Ucc+2KkFuFHBP51zcesuiBWwDjIpza+VIVsAkZHWvkZZTjG7J3R70cww29tTcmPlYB6HgVz2oKPM3+u4/pUia3vdvNChV5zWZfXyGbYG7mvTy7AVqNS0kcOLxMKsfdGhDuLHpuGKqyR5nkPvxTxdq+1QeQeaSSRd5APQ5NfQQjOL1PNZVMZOfXNMaIvIR/DghqsxyIDyevIpokCkZxya6FKXYpSaY/S0EeoREdFB/ka7yGFZUjkyc7Mda4O3lWO6BzW5BrPlwgAghTivCznCVsQ4yp7ndg8TCm37TY3L6yQw55yeOtYN1Yp5h64x61JPrTSArxgc1lTai8oycZNYZdg8XTVpMMXXo1JXghXtlRlxnH1rRYKscS+qVjG5yME1NPdIxh2vyiYIr1atGpNxT8zijK1zubSzjNjCvPyqO9LLagAYzwPWsCz1wmBRkbgMEe4pk/iRlTeoUjHvXyTyrHOs7dz31j8L7NJrU2/KbJFL5LVzaeIpigYomTQviVycbUz+NdLyjG9kYLG4fzOj8lqURMK5x/Eco6ImMe9RL4lmcZVEI/GmsnxrWyD69Q3SZ1AiZuT1qK5t2ZCO1YieIpN2NqYqc655oKgLmoWW42nNOwPGYaUWtTZVJFtU8vG7Z39aJBNzjHTj61jf240BCSBQCcCn/24pBJ28HFZf2dik78qZf1yg1a7RpYn3dF27f1pAJcDOM96zTraKCSRgUNrSKQCRknFWsDif5CPrVH+ZmkPOwc7c5/Sg+dhtoHT5frWb/bSHHI5oOtIDjK5p/UcR/IH1qj/MzTHn8fd+7+tQTwSSwnfjd7VQbX413ZI+UgH8aR9ZyxX5eBzV08DioyTUbEzxFFxs2yNYStyd3oa6/4aRiKPxGg6DVB/wCksFcC+qh52IxgKc13Hwqm+0WXiGX11T+VtAK96jRqwmpT2sVlco+1kl2/yPQKKKK6z3AooooAKKKKACiiigArz3VDfaRf+L7ddIv71taVZLF7aAyIzmBYSjsOI8FM5bAw1ehUUAY1toEbeFtP0e8muP8ARreKJntrmSBiyKB99CrY49axZfhZ4TnuZbma2v5J5SDJI+qXRZiAAMnzMngAc+grs6KBNJ6M40/C3woettfnH/UVuv8A45SH4V+Ez1tb8/XVLr/45XZ0UE+zh2Rxf/CqfCI6Wd7/AODO5/8AjlL/AMKr8Jn/AJdb/wD8Gl1/8crs6KLh7OHZHGf8Kq8JYx9kvsf9hS6/+OUn/CqfCH/Pne/+DO5/+OV2lFO7D2cOyOL/AOFU+Ef+fO+/8Glz/wDHKUfCvwkOlpff+DS6/wDjldnRSuHs4dkcX/wqrwj/AM+d9/4NLn/45R/wqnwj/wA+d7/4M7n/AOOV1eoX0GmabdX90223tYXmlbGcKoJJ/IVgaT4qvbrU7Gz1XRjp39owNPZMLkS7goBKOAo2PtYHA3Dg88U7sPZw7I5/QPhboUmnSnVLK/E/2y5VQ2pXC/uhO4i4En/PMJz3781q/wDCqvCQx/ol9x/1FLr/AOOVP4l8ZSaFr1npUUGmbrm2efztR1L7Ig2sq7QfLfcx3Zxx0NdPbPLJawvOkaTMil0jfeqtjkBsDcM98DPoKVw9nDsjkf8AhVfhL/n1vvT/AJCl1/8AHKD8LPCZGDa3+P8AsKXX/wAcrs6KA9nDsjiz8KfCJOTZ3pP/AGE7n/45SH4UeECMGyvSP+wnc/8Axyu1op3Yezh2Rxg+FfhNelrfj6apdf8Axyl/4VZ4T/59b/8A8Gl1/wDHK7KikHsodkcb/wAKs8J/8+t//wCDS6/+OUf8Kt8Kf8+1/wD+DS6/+OV2VQXktxDZyyWtuLmdVykJkCbz6bj0oD2UOyOB8R/DDRI/DGqvpNpqJ1FbSVrULqdyxMuw7ODJg8461oD4VeEioLWl9nHP/E0uf/jlbfhjXJtf0ua5ubJbOeG7ntZIVm80BopChIbauQSvpVDV/Fd5aajf2ul6MdRXTIFnvXNyIioYFgkYKne+0ZwSo5HPNAezh2RV/wCFV+Ez/wAut/8A+DS6/wDjlIfhT4RY5azvSffU7n/45XWWN7BqWn219bNvt7mJZom9VYAg/kasUXD2cOyOL/4VR4Q4/wBCveOn/Ezuf/jlH/CqPCB/5c731/5Cdz/8crtKKd2Hs4dkcb/wq3wpjH2a/wAen9q3X/xym/8ACqfCJ/5c77/waXP/AMcrtKKQezh2Rxf/AAqrwiP+XO+/8Glz/wDHKT/hU/g/IP2K946f8TO54/8AIldrRTuw9nDsjjT8LPCZ62t//wCDS6/+OVleIPhfoiafC2mWeoNc/bLZWxqVwx8kzoJesn/PPf8ATtzXd6nPfW1kZNOsUvbnICxPOIhjuS2DjH0NVvDWs/8ACReGdO1j7P8AZ/tsCzeVv37MjOM4GfyFIPZw7Iwf+FV+Euf9Fvuev/E0uv8A45SH4U+EWOTZ3xPqdUuf/jlPh8a3N34tvNDt7XSV+yXa27faNV8u4kXYjl0h8o7gA/HzclTyK7Gi4ezh2RxY+FHhAHIsr0H21O5/+OUf8Kq8I5z9jvv/AAaXP/xyu0op3Yezh2Rxf/CqPCH/AD53v/gzuf8A45R/wqnwj/z53v8A4M7n/wCOV2lFF2Hs4dkcX/wqnwjnP2O9z6/2nc//AByl/wCFV+EgMC0vsf8AYUuv/jldnRRdh7OHZHGf8Ks8J/8APrf/APg0uv8A45Sf8Kp8I/8APne/+DO5/wDjldpWNrGoa3ZzY0zRIb2FYvMeSW+EHPPyqNrZOAOu0c9etK4ezh2RyGq/CzQk1HRVsrK/Nu94y3uNRuDiLyZSM5k4/eCPkf1Nan/CqfCP/Pne/wDgzuf/AI5XTaNqsGuaJY6rbK6wXkCTorjDAMMgH35rnbLxxNdS2F1JpBi0TUbo2tpffaAzsxJCM0e35VYrgHcTyMgZp3Yezh2QxfhZ4TX7trfj6apdf/HKQ/CrwiRg2l8R6f2pc/8Axyu0opB7OHZHF/8ACqvCI/5c77/waXP/AMcpP+FUeEAciyvc/wDYTuf/AI5Xa0U7sPZw7I4v/hVXhH/nzvv/AAaXP/xykHwo8IKMCyvR9NTuf/jldrRRdh7OHZHFj4VeER0tL7/waXP/AMcpR8LPCYORa34P/YUuv/jldnRSD2UOyONb4WeE3ILWt+cdM6pdf/HKy5/hhog8UWMcVpqP9mNZ3DXB/tK5x5weHy8nzM52mX/IFdD4g8RatoUd7fHQo59Jso/NmuPtoWVkC5YpHtIOOeCyk447Vu3F0sOny3arvVIjKF6ZAGfwoD2cOyOUPwr8Jnra3/8A4NLr/wCOUH4WeEycm1vz3/5Cl1/8cqx4N8W3PiuBLo2+kx2726ylbTVftM0bMAQkieUu04JzzwRjHeuroD2cOyOM/wCFWeE/+fW//wDBpdf/AByj/hVnhP8A59b/AP8ABpdf/HK7OigPZQ7I4s/Crwic5tL456/8TS55/wDIlL/wqzwnnP2W/wA/9hS6/wDjldnRQHs4dkcX/wAKo8IDOLO956/8TO5/+OVveH/DWleGLSa20mCSKKeXzpBJO8pZ9oXOXJPRQPwrWoouNQitUgooooKCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAM7xBpf9t+HNT0rf5f221ltw/90upXP4ZrloLfxHf6lpN9e6H9mbRLSYiP7VG32y4aMIFjIJ2pjdy+DyOODXdUUAc5qt1ftHED4QbUWmth5g8+DbGx+9G5dhke6g59Ks+EdJudD8J6Zpl5IslxbQBHKElR/sgnkgdB7CtqigAooooAKKKKACiiigAqG7mkgtJZobaS5lRSywxsoaQ+gLEAH6kCpqKAOG8LSeIdL0rV0m8LXSXD31zewRyXduBKJZywTKu2CFbJyMcdafqVjr2ma1r8+laUNQj1mGPY4nSMW8yxmP8AeBiCUwFOVyeCMV21FAGfoOmf2L4e03Sg/mfYrWK33/3tiBc/pWhRRQAUUUUAFFFFABRWHP408K2txLb3HiXRoZ4nKSRyX8SsjA4IILZBB4xWhY6pZamGexnFxEFVhNGCYnDdCj42v0/hJx3oAXUbq4s7J57XT5r+UEAW8LorNz2Lsq8detct4J/t7R/Buj6Vd+HZ4ri0ENrN5l1DjZj5pVKs2QvocE54rtKKAOH8UWGp6/DNpNt4a+zu91G6arJNDsjCuG81QG8zfheBtHPfFdxRRQAUUUUAFFFFABRRRQAVx3jL+3r27t9KtNGvbnRZY919NZTwJLLyR5A8yRCoI5Zhzg4GMkjsaKAMu1nuol0u3h0V7a1eJhKrSxg2YVRsTapIbPT5SQMVxuneH9dTTtB8Mz6aI7LSL2OdtR89Ck0ULFowqA7wxOwHIAGDyeK9GooAKKKKACiiigAooooAKKKKAOG8Srrmp+IRZz+HL688O22yQLaz2wF7Lwf3gklUhFP8OPmIyeBg9RNd33nyxLpJkg+yGVXadBvlyf3JXtx/F05rRooA4yy0/UNQ8Y6dq7aD/YtvY200MhkliaS437dqYiZhsXbu5PXGB1rs6KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA5PxJGn/CZ+DfkXm8uc8df9FlrnvFGoajCvjeO11C5tzAdOFu0chHkb2AYqOgz39e9ekSWtvNNDNLBFJLAxaF2QFoyQVJU9iQSOOxqKXTNPnM5msbaQ3Gzzt8Knzdv3d2RzjtnpQBxC6JO/ja90H+3tbGnnTIrzH25/MWYyOm4SZ3BcKDsB257dqw4dd17X7bwnbPJuF3owu5D/ab6ebmYFQf3kaMxIHO0Y+9k5xXrAtbcXZuxBELloxGZtg3lASQu7rjJJx71Un0HR7nTotOuNJsZbGHAjtpLdGiTHTCkYH4UAU/CI1JfDsCardQXN0jyL5sM/nAoHIUF9q7mAwpOBkg1uVDa2tvY2yW1pbxW8EYwkUSBFUewHAqagAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA//9k=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeDeJgo+IviQbRj7VD/AOk0Ne9V4L4nGfiL4kH/AE9Q/wDpNDVw3PPzT/d38gv4l+07yBjauPyqheWyJbxEKPmGTWldqJJIz/sgn8qp6g/+ixN2wcVnhpP3F/Wx859vQy4EWOdDtHDD+ddxbRJGjEqMMoIriYRmVB6uP513gQCBH9FArlzuVuRd/wDgHbTTcnI5nU4FW6LBQAw3Y96hEahB8o6Vo6moe5T/AHTj9KpsBu2e2a6aFTmpRucFTSTRSFspwABTxApydoyvGamwcfLU4GYQe9dEqrQuZsoCJSQu0cH0q99nSOM/KOBmkgg/eqB1zzVqbjj1P6VhVrXkooN1cpRJHGgfYPSmMitI4K8fSrG3OzHSiYqeB2pqXvCMS4UCVjt6nHStwQINERlUAs24/pWRcnkgd63AD/YEB/iKmtcXJpU/X9DpetP+vIy3RdjDaOFz+tUo0UBztHrV458qQ99n9arREFZG/wBn+oropvRk02+VhhcIMDmgRqAVwOBmnIBhF/KnMoPyjsOapsTlbQRYEkUAqMZ5q0iJGu0KOabHGflC9zmp8BAw71z1J30MpSbI2VSGbA6cUxUQsBtGRzTywDKB0UZpAv8AH/epLREkTQpvJ2jORXU2tvHJbg7BkoO30rm3ALBv7pzXX2AD2yegUfyryc3quFKLOzBx552YSom1BtGc88Vx+oJGbggqMgZrtpEBudo6gAmuQ1SI/aWJ6gHFYZJNc7XkbY1NNM0PKT7NbfKMFST+RrDuoUywAHBroRE32GBu22sK5j23JK9Cea9LAzvKWvf82ckrpplQxqG+6M0jIoQYA5qVMP8AN7Cl8vcBntk16fNbcOez1KIhXqFGBThAuTwPWrPkkYA6d6aV5J79K05zb2zezIFRQ+CBwKaQN2ABxinuRuBPrQR1zVFp9RiohB4HpSeWuQMDg5oGdp+vFOAwQT3pl7dRu1SxG3vmhUVcqFFPxnH1zTVPPPrzQF7ot6cim6AwOc/yqeS3DTfdGQfSotOBa/j/AB/ka0to3YHZjmuKtPlqadv8zjrNqV0TaZaRvcBSoIDA10gto1g8vaoZFwDWBpLGO7w3Rgc10U+4MrHocD8a+XzWc3XUb6HdglH2blbUjW3QyrlR8o649q5rWI4zcthRxXVLIrxMwz121yusELdFF6DiqyeUniHfohY1JQXL3MaaMYXAGADQEXzE+UfdqWdQVx6imqMsvpivrE/dOFS90Ai5m4HQf0qsYk8wPtHcVa6faCPQf0qBuVX8TTg/6+RcG1/XkNWNc/dFWAi7M4AJpkY5x7VYZFZM8+tKctSaktSiEGcYGQatWiILpPlH3qh6P75qa3H+lJ7kU6msWXN3RNfRJ9qfCgbSRVS5hSTAZQRkH8quXe5rmU+5qCb734VlRbUY+hnGTUk0VnjTIGBSiNT1A5pzJnGaVVAUegre+hrzabkIiUKBtGM1N5SljwPSk/hqRQDz36UNilJjHUMkfAxg/wAzVd0XDcDrVxh8gHoP61WkUA80QZVKWtibTivlzqQPu16V8HFCaPrqgYA1U/8ApPDXm9igbzMd1r0v4QDGla8P+or/AO28Fc1dq7O/Ln/tMvT/ACPRaKKK5z3QooooAKKKKACiiigAooooAK8D8UEj4k+JMf8APxF/6TQ175XgfilgvxG8S+v2mHH/AIDQ1pS3ODM/93ZJncUJ6MoH6VT1E4ghjHYVdtPni2nqOlUdSG1IvXmsaH8ZR7Hzcd0zNTeLiPaM4YH9a72Jw9vEvfHNcPbnNxGT/fH867mOIfZ1wea5c8a9y/md1FtyduxmXsYNzg9AOKpbNo3HoBnNaF0CJQD2UCqUjAqg9iKnDyfIkcdVe8yuHXacVbiAx9Rmq8ZjIJOOKtwov97qOK2rNJGcFqQwWxBd1ycnP506dG+cgc7Qo+vNTRsYmkbqp/SorjLFcHlRuI9azU5SndltJRKJZlwo9aDCQDJz0q3KgSDfgZyKru5VMHoa6ozctYmTVjIuuqZrbVm/sGDcPmK9KxZwTtBHOa3AQ/h+0x13EfyrbFvSn/i/RnT/AMu/67mf/BJn+5/WqsK5M2Omz+oq25GZB/sVWi6S4H8H9RW8Ho/kZQejHRLtSPPU4p7xnGR3/lS4JijwMkAVMVO4ccEVLlrcht3uSRIQVwKil/1jE1dtlO7lehx+lZ95zcMM4wcVhTlzVGgSEP3VTuRipXbB49MCq6E9+varUaBmUH+E5Naz03ExHjKoo7kj+ddVaRGS1jXkAjkj2rmZCS+7HfArq9McG3jzx8teFnE5KjGS3uduASlUaZPuDPu7niuS1cASyEferqVjfLHBx1FcjqufMkYnHOawyWK9s7Psb45tqN11N0qTpEDAdFwawJ1Hmn3rfUk6RF6MmawJFy45713Zfo5+r/M48T9n0RAkOJMD6UqoSwz9KmVMSlicYbNOQZK5HO//AAr03UZzkXkFZOBnK5qN4SwzjnvWgwCx4J+btUDAKWPUMM1EKrYbGTPHlgD2qIkkmr8yhsHoetUWwSOcYNehTldHXSndWGDdt5HOKXeCAv8AEBS5+UjvSYA59q0NvUVcKcU0ZwCepNOK9DTQc/Ke1ALuW9MIGqxgHsf5GtdhkEdyxzWRppAv4z9efwNagOSfx/nXnYpfvL+S/U5cQ9UXdJx57Z6txXShhJkDqBmuZ0n95csPQgg10flj5CGI5Gfevls4S9truehl7fIxmVKqB2Of1rmtaiAvTJk8iuklAVAc4IauZ1snz2xz3rbJ9a915meN+FIypE+QD1FKg+UH2pspKqPpili3GJDj5sDj8a+rfwnn2fKPxgzgeg/mKqy5CKR6n+dXTj99/uj+YqpIflUY45zRSev9diqb1Q6IEgZ64FW9vy8dTVaM5BI9BWkEUQlu45rKtLlZE9WY8i7ZyfSprZv9Njz/AHhUc/8Ar3NEBzeRAf3hXRLWHyN7XjfyNO4j/wBLnHq5/nVO6wBjvxV+Rs3l1kcKx/maoXWG4zXJQb0T7Iw+2RPgY9aa4PHpj9acxXIBPJ6U1mGK60aRvoGzKketOC5wc9DTFOV3Z4qcAZH1zSk7Ck2g24GPUVUkwobParz8qD04rNddwcZp0tS6Cu9S7p/WT3WvSvhH/wAgzX/+wr/7bwV5lppJaQDsua9P+E422HiAf9RX/wBt4K5q/wAbR6OXK2Kl6f5HoVFFFYnvBRRRQAUUVHPCtxbywOzqsiFCY3KMARjhgQQfcHIoAkoryrVpJ9A1LxNqGj3+qPBoOktuS61Ge4R7uQblysjsPkQKf+B+1a07Xfg3W7KOLUr/AFGG7027lmjvLhpsywqjB1z93O5gVXC8jigDv6K850pr/Sz4N1R9Xv7yXW2EV/FPOXjcyW7yhkQ8R7WTA2gcHnPWqtheaiND0Hxa2q3z3eo6pFFPatOxg8maYxiNYvursBUggZypyTmgD1CvAfFhH/CxvEf/AF9Q/wDpNDXueo3k9jaGa30+5v5AQPItmjDn3/eMq/rXgWu3Mt5471+eeynspGuYs285QumLeIclGZeevBPWtaPxHDmP+7s1bdVQDoOM1namQI0J564qyZdrD6AfpWbqDEomTnrWOHpv2qkz5qGskitbkmaIZxlx/OvQol2wJg9OT+Ved25H2mL2cH9a9At5f3ZyeNuRXFn8W1C3mejRspu5myAl5mds/NkZ7Cs2Q4uSv8IOfwqzdTkuUHUDk+tUHkHmEHqeK1w1NpXZ5tSSbJlVWHBA55FTxEhSMHKHAPqDVBWO8gHjOasLckLnPQVtUptkp2LjSx4aLjjrUBly6fLyx5PoKqk5Ytn5z1NKZsBj3AwKmNC2w3O5O26Wdhu/dhenagiNoucZFVzciMlRnLDk0wliuF71apvToS2Ubo/vBj3rbXCaBbHucn+VYN0drjHTk1syODoFqPrW2KjdUl5/ozptal/XcoSvhmX1TrVeFvnlH+x/UUs8n7wgf3cVDC37yT2TH8q64x90IQ9xsv25JIPbav8AKrKgq43HOTgVSil2xqB2UVbWVZHQnse/rXNVi7nPJal+AZ3ZODggfWsq8XE3XJzyfwrQ84Z47DNZc0mZMnuTWOGhJTbKTvsJEpPJPXp7Vdh+Ygjgk4PvWer54HbmrsMig7/7ororJ2FJO+palCjavFdHp4AtkHqAM1y8pBiV++a6SycLbQDPQZr5/NIt0Ul3OvAtKo2y40vzEKcgZ6VxerMfOkXPSuoSVRI+AeWI/WuS1N91zKfqKrJaPJVenRGmKnz8vqdB5mNDiIP8PH8qx9rbcZ59fxq+ZVXRYl7hcfrWdHNmRl7YrswkHFTa/mZyVndr0Q6UjfkdKcGUTMSRjIIqFnwGHrUed2T6nFdip3WpgXZHDSA9gKhPyqQxzg5/CnyKBz2KgVAWzuU9KmEdNAZXnIyCDWcWzK49KvzAcZ/Cs5sK7HuTivRorQ7cMlZjzyBQcsOtRB/mHpR5mTgda2sdPs2Tlxu2imEcZ7g0xSNxPvik35UkUWBQtsXLIkXkZHYE/oa1VPylv0rHsmP2yPnsf5VrK3Ue4rhxK944cSrTXoX9McxXIAHNdGGwQCc8jj0rl7OTF1n+6K6DzVba44Zjg18zmlLmqJ26HTg6lotDpjlixbK5ziub1xsSjHcDmtu4l/cEKef6Vz2rTCR0x6D+VdGU0mqiYsTNSdijMQUH1pYSVKAntURbKAntxShhvA9K+j5fdscvL7tiwhyJs90/qKqycKAfSponwJCehH9ailGWBFKCtJhDSWo+A8e3FaecQs3bpistGAJHarQlJU8/JjpWVaDkyJ73Kc2DM1JaH/Tos/3qbK43sR64plu/+lxexrp5fca8jpjF8j9DduiqzT4H32IJrMmO4jtitGZh5kvuxrImkx9TmuTCx0MIJyloDEFjxyvT8qQkHB7VGGzI2fSm7ug9DXaonSoE6YMa+mKk3/N746VXRgqqo6CpEIz5nfFTKJnOG7LO4vF6cf1qgeJ8dsc1cV9yZ7EVmyM298HB7UUlq0Vh43bRo6Uw82U4wAtem/CY50/xAf8AqK/+28FeWae2PMz/AHa9P+D5LaTrxPX+1P8A23grmxEffcj0MvX+1S9P8j0aiiiuc9wKKKKACiiigDHtPDWn2umalYOJLmHUppprszkEymX7wOAOAuFHoAKr6T4RtNLvBdy31/qM0dubWE30iv5MRIJVcKM52rktljgc10FFAHNaV4KsNJvbSdLy/uIrBWSwtriUNFaBhg7MKCfl+Ubi2AcCkt/A+nW2oQzrdXzWlvctdwae8qm3hmYkllG3d1ZiAWIBPAFdNRQAV88+MZdnxM8RD1uYf/SeGvoavnDxyWHxQ8REdPPh/wDSeKujDK87GGJgp0mmSvPiVhnqB/Kqd5ISiemKinlYS47kDFQXbv5CnFdVOkk0fPUMP78X3G28mLlFHdhmu4gnxZYY8bcfrXntszfaYyR/EK6tZJFiUeozXNmNBVOVGmPXspq3Yknm+cL3AxWfJKGkLUjyMH56nmqb7g59DV0aKijjpUuZ6l1bgAEDrT1mBUn161nbmBHFOywU1q6SNHh0XBcYct2NHnAH2HzGqSs+0ZFIxfaeOafskP6ur2LpmAy7dBUsd2u3P9KypHYrz0qOOQqDu7nih0E0X9UTjcmvLoNlh0+laSXG7Tol7Ktc7es56DpWijv/AGdHx1WqqUk4xN6+GSpQt1Yk048w0kcuHkP+z/hWa7vlwByCPyqSJ33yAj+GtuRJHT9UUYf15GpHKDHz12irAl3Yz25rLjZjGMDtUm51OfaspU0ziqYZNmobrANVzICd3bOao7yM0JuPzdqUaSiJYVR1uXhKAoPrToZiwUDueaz8tkE9ulJBMwJH8XpTdNNFPCpxbRsNdb8KOlbVrd4iiOf4dorlPOwn1OK1IncRRqO3NcOKw0ZRSOScHS1RvJdDzlLHoMVzF25MxJ9TVt5JMZA9qxb6VjM/pSwWFVOTaLw8ZVZ2OinkAsoG9FrPSfk46miaR/ssY9FxWdvZSSetbUaFotPuTTo86ZoebyR70LLnA98iqKs7YDDB60FnXqOe1beyRp9XWxqC4Yjk8VGJd2c9elVN7kZxxSMzc461KpIzVAneUBuemdtZkkgMzAnpmp5ZHyPlFY1y0m99v3iK6KcLHp4LDKTZeSTBWjfhtx681nmR1VVxwRjNDPMNiquV7kntWvKeh9V1NES4fFIX2hsVRLuWJApDJJ8/HTpRyiWGRs2Uv+lJ/ntWmkx3H61zumGQTKWHODWmHflsdyK56tNNnlY3DJVbLsbNtOBcc9Ola4vA2efutgVy9s7GUHvVxvMIyeMc15mIwkZy1POd6bsjYWbduJPzY/rWFqcoMu4dqnV5ACfUVk3zsGY1phMOo1Ll0E6lRJjxKNkee5pd48zPaqAZti0BnANej7M73hl3NFJQC4PTFRmU556Gqkbvliwxmk3Sf3eM0vZq4lhkmXgwGQOlSRyZQoehrPDOM4HWmiWUA4XK4yD70Oncl4bmW5NM+JCB64pLaT/TovSqE7vvZselLaO5uowBketaOHunb9WtSb8v0OkeYeY5Hck1nSSDbk96RncO3HQVUnLnAUdetY06SRwYfD+9qydZQaYZuTj1qs28qMDoaYVkLHjrzW/Kjvjh49WXYp1ZVPOc4qwk42E+1ZibwoyMHPSpgZFBG3tUygmZ1cPFsvpNuQEdMVSuJhvyO9Q73DDj+H+tVZ5WEjFhgKacYJM0oYRKd0bNjLzJnsteq/Bs50XXT/1FT/6Tw143YyO3ncdsfzr2H4LHdoOtn/qKn/0RDXJi42VzXD0uTEyfl/kel0UUVwHohRRRQAUUUUAFFFFABRRRQAV87eNU3fE3xF/18Q/+k8NfRNfPfi8f8XM8RE9PtEP/AKTw1vh3aZy4yXLRbRQnhHnYxzgVXvEHloK0rlB9oGOuB/KoLuILEmcZGRXRTqfCfN0qrUomVbw/vo+P4h/Ou1S0DWYyMMB6e9crbpm5j9Nw/nXoFvArwA8HC9K83OcT7LkZ1VE68/kcjdQ7ZCMcg1Wlh3MPpW1fRAzEgc5yaoMmSvuprooV7xTPP5nB2RUEQzjHNSfZsg8dRmrcVupck4qxHbMFc9cDA/OnPEpBzyexlRw5O0rwOlJJAckFcZPpWu1qBGoGNwPNQXC5IIHTilHE80tA55J3MeSPc+wLkY60w2mDn9K1XhWJd2Bk1UcsyngiuqFXm2NoV5bIzriIFgMdetXvJA0uPnBxVecYOcZrW+z7tJgbjkU6tTlUb9zarUfJHyZgGHBb5e2c06OPhyBztqyImRGDHJ65oiXG8/7P+Fb8+hu690xkUWYcYwcDFTNEGHTkVNGoWPdjoBxTvLIyc9axdTU45Vm5XKrwDAwM5pBEfu7cAHrV3Zt61DK4XIFCm3oEa0noiDYMnjioVjEYZupqwD8oB5OM0bNxA7DrWidjZVHHRkaJkDI5zXQ21tmJTjJ9PTiseJMsPqK621t/3XAx8orzMxxHs4oxn+9lYyfszFehHzelYV5BmVvzruRbfKMjpwfeuTvYdhbuRkVnl+LVSbQRTozT7k0kGYoTjgx5/nVIQAk8VuzRf6DCQOkf86z1jxknqK3oV7xfqY8zgUvJwm7HNK0G7t3q86A/L7CmbMr171sqz3F7WRW8kgcLkYpvlFhyME1pxqrRrwM80kkK4BXGTgVmsRrZgqkjJeHA59ay5YBvORzmugljIXkd6y548yH1Brso1LndhK7TZQNuu3n0pPLK7BtyCcE+lXBFzzyPSl29a35zv+ssqeUCDgdTQYfmxjvVry9qg+9KIz5Z55o5g9u11I9PiH2xF9jWgUALD3NR6fGP7Qi45II/Q1dkQLK2PU1z1Knv28jz8XVvUv5C6fb5lYdSK1Y7XemMZK9eKr6PEXvAT90D5h61v2cIy4I6Egn1rxMwxTpyflYwhTdWV2Y/2M/IcEZJOMdKxb+3HnMM5Ga7FIvMm3YwFOMGud1a3CXDBR3JqsBi3OryvsHI6TUkYvlAAHpxSLFgtkdeasMvAB+tKQMHHpXt85r7Z2K6RgjjmneTg5xwf0qxEijgAYpzDnGOD+lS56kSrO+hVMXzYxwKPJB7YFWQuODzwOad5eflHTFL2ge2ZkTQjzGFFtHsukwOM1ZnULOfcUtsA19Gm3gjOe1bc/und7Z+yfoTzxbZJcDPXioWgBWtG6TZPcHqBniofLyK54VPdTPPjWaRRNvtXpTRDhelX3jKoAec0gj+XpWiqaGqxLsUxCMrx3qR4cA8Z4qxtG5Rj3pWU+Zj1FLnZLrybM8w4GMdv61VlgG5geR3rXljwoxwSKqtEMndg54rWFQ66GJe4yzgAEuO4r1f4MLt0PXB/wBRU/8ApPDXmVkmGlB98V6f8G+NG13/ALCp/wDSeGuXFSurHTgqjliZX7f5HpFFFFcR64UUUUAFFFZfiZrtPCmsNp+77aLGY2+zr5mw7ce+cUAS2+t6Td6hLp9tqllNexZ8y3juEaRMdcqDkU6DWNMudQl0+31KzlvYeZbaOdWkT/eUHI/GvPPCdxc6YPCtpaalaX8Op2Dt5EdvGn2UrEGDKVG7buwp3kkkjnPFVtH+w/8ACMfDv7H5X9rfbk83bjzd3lyfat3fru3Z74z2oA9M/tjTP7U/sv8AtKz/ALQxu+yeevm4xnOzOentRNrGmW2oxafPqVnFfTDMds86rI/0UnJ/CvMB9l/4QaP/AFX/AAkP/CS+3nfaft3Pv/qs/wDAPanax9i/4RX4gfavK/tj+0X8rdjzt+2P7Lt7/wBzbjvn3oA9ar5+8XqG+I3iPP8Az8w/+k8Ne6X0mpRWAbT7a1ubvjMdxO0Ke53BHP6V4H4ge9k8ceIHv4IILs3MW+OCYyov+jxYwxVSeMfwj+tbUPjOLMP4DJJ1Pnqw7gZ/Kq98CVU9smrMr7Sob0XH5VDdqXiRux6U6Ts4ny8H7yZVtlzdR5/vD+dehWkW20J/iPWvPoDieL13ivSbbAtBnuteHxLNxjCx62CjzVHfsczfALcnPUrzWcgIcBuQM/lWhqm0Sn3yfwqihPmE+q8V3Yb+Cn5HlVfjZKsL7iQRjpV6DKwEP1XrVaIS7zgDBOf0rR+VosH8a5sRN6Jl0o9SkwKo8h/iGRVQBsCM/eJBNW5JizkfwAVUc7Zd/c9K3o3tqZzt0GyRGeQZ+4KZLDGIzj+dTLJmF0XqP55qI2kvlMWHf1rojKzs3axJkypyPrWwR/xKbc9gKypzjI9MVrMobRoQewBroxL+D1N5O8NTKcZDZ67aZCAA47Bf8KnYZB+lRRrguf8AZrpT0HGXusmiUlQf4cDNLJlSKdDymPUCmk5PNZX94xb94UsGPPYVTlA4+uasOwLcdhzUKjeMn1rSCtqa0/d1GQDMu0+9SbcEY/iPNPiQKaahIYFqpu7KlK7bRKgxtA65rtLUBLSJm6mMGuQt4/mBPU12VuivDFnPyIK+ezua5YrzNcJrN2JNpYuvtkVyOpgpK3o2TXZoSqKT3FchqylmweozXJks37aSNcbFJRZec/6BGD0KA1mbgJTnoa1mjLabCPVax5uA2eua9TCNPmXmzgqrVegpwZ89gRTDuzjtyDSlT8pP8RNI3EZPeuxGRYgUMQT/AAg1OyojA46YYfliqsBITJ71IXK4z0HX6VhOLchpkcrK6g475rIlwZX9q0Z3H8Pas1vvMfU13YeNkb0N2yLOBn0oKkjNKAQv41ISNhNdVzscrbEY+6voaUjDYHQnmhf9UR6Gjd8g9c0hdS1pa51CM+5/lVqdQLlvck1X044v4vx/katyDdOR7n+dclV/vr+X+ZyV37xd0XPnuG7nj6V0cK/uoyvG5qwNKiAud3fOK6WOEGNN2dy5P518tm9RKr/XY7MDFyVyIqQ/HTdz+Vczqzf6VL6DFdVMVEGfX/HFcvrceyVj61WTyvW19BY2NkkYT8gCn4HHuCaRF3CnMvKMeoFfWt9DlbWw6JACfQ1I5xwKij5c4+7ipHOF46npWb+IykveADsetP2nhRTQM8HrUoBIAHWok7ENmZcJvmYU+zB+0xr6EZomG2ZjT7L/AI+I2Pdq6JP938jtcn7L5F+44nmJ/vGoV5yBUt2+64cDpu5pijnA965IfAjiYyRW45+tIv3eKe+4cnpTMcn3rRPQaegYGVI9aaqEurHqBUgAIA7hs05B0x1ocrBzWIJlOFxVKVWzx1BrRmHyjH+eapO3z+1bUnodFCTJrJfmlz2FelfBwY0bXQf+gqf/AEnhrzixGfN91Nek/B//AJBWvf8AYVP/AKTw1z13q16Ho5a/9pl6f5Ho1FFFc57oUUUUAFFFFAFKz0bS9OuJrix02ztp5zmWSCBUaQ/7RAyfxog0fS7bUJdQt9Ns4r2b/W3McCrI/wDvMBk/jV2igCl/Y+l/2p/an9m2f9oY2/a/IXzcYxjfjPT3om0fS7nUYtQn02zlvoRiO5eBWkT6MRkfhV2igArwHxZ/yUXxJ/19Q/8ApNDXv1eAeLM/8LH8SD/p5h/9J4a1o/EcOY/wGVrobZs9cqvH4UTyA2cXquc/pS3KFpc5yNo/lSXCBbKInqRzVRt7n9dD5latFSE/6SnH8QP616NDIv2MNnogrzq3I+0R57uBXe2ozb7c53L09Oa8XiKCkoX6HpYObjN27GLfnzLiRMdDgf1qgigSDJ6Y/Srl6SLpx6Zyfxqhkidh6114ZP2SS7Hm1H77NK2mALZAznj6VYIAVmU53c4qnA0YQEkZHGalcskWQS2eAK5qkLz0NIy93Uicqd0YOSpGapSEsSwH3TtFWbmLERZWwxPOO9VFOG2FuetdtFK10Yy3JbUCL7x/OrTXKyIyrg1nn5pM54IxirFug557/wBKKsIv35bgpPYy7xMSYx1rWePOjwMDWVeNmUn04rVVj/Y8BJ4ArbEX5ab8/wBDX7BmOMbh7VXzjzAP7v8AhUsjE5PtUKD55Cem3/Cu2K0CC0LiYMGB1wKGwTj1p0ADRnt8oxTMZx26Vh1Zi9yvIfnIWmIp/WpCAHPc0hOFz05roT0sbp6WRIo/eAfwjvSbAWFOiUkAdzSshQYzzWd9bGd9SzEcPxXW2e77EjMMHA/lXGeZsKY5yQK7SIboY0U4Hl5r5/Ol7sPM68CrSbJIx5qo2cFAQR/n6VyupMclmGOufaup3bYmVBk7c5FchfFzuUgnjmscni3Vk+heNatFGvvLaVGQPmAyB61iTHeVU8Hqa1gSbCEA44xWUVxMQTnHevTwaUXL1ZxVXe3oKPuAkcJTHz5Zzx1AqZRuyuODmmSj5fYV1RepkJGTgL2p7MQMEfX6VGpym4fSkD7gd3Bxim1d3AgmcEDFUnGTmrkwGBjvVU8/WuynotDqo6K6GjHJ96QNkEe1GMHFBxwO5rU6LCKuEx7ZpVAyPYU7tSEYBxQK9yzpw/01D25/lV0hmfOMEMapaef9MiA98/kauMxR8ZyQTXHWv7T5f5nLX+I2NGXdPJjkitxmO4Y6Y59qwdFmCXTrjr3rcTBR+c7Afx4r5PM01iG32R34N/u7IRmRtybvnC5x7Zrm9dOZlxz0B/KukQAytlOSo+aub1aMi8bLZArfKeVV/kRjG+RMxgQhYdwaXG8K3pxTX+Uk+9OiBP0r6vpc43tcFG12IqVjwP0pr4VSe9OBGxf0qH3M276kipnjvUjHy1CryQOaIxxnOT1pJWAUnvXO3eViDNmJaVwadaEG4jHo1JJjfn3p9qmLxPTNdkvgfodl1yfIszN/pBC885p/OR6c1HLxdEgcZqw6fMAOBiuZuyRyMhfK/SmclunFOlBB659qACGXjrVLYFsC84+op0Q+UAHJFNQHuP4hT414yDyOtKT3BkUnCgfWqLcn8a0JgMAd6zWYqjHGSD+db0dUdOH1LlgQzSbT0616T8HuNJ17/sKn/wBJ4a810kZkmHT5etel/CEY0vXx/wBRU/8ApPDXPXfvNeh6OXK2KkvL/I9FooorA90KKKKACiiigAooooAKKKKACvAfFn/JRvEZ/wCnqH/0mhr36vAfFv8AyUPxIf8Ap6h/9Joa1o/EcGZfwGV5CTIoHfAqO93eXEpPHNSJkM3qRxSaiQUiSqhpOKPmafxIpQgm6hH+2P5131sdkAOM4BFcHEQLmM+jiu/tyW07I6//AF68jiB+7C/c9LC6yfoYV0N1zJkfeAqp5X7xSfcVo3BLXJJ7VnmQgDHUitcO5OCS7Hn1EuZliKBCG6Y71Kp52kcKePyqCFW5x91hn8asGQEKg/Gs6l723KjaxVnypLNyv3gKoygffH3j0q5dSiQMg+6Kz3kxhvwFd2Hi7XMpb6D5GAAA+ppsczZ+UnBqJjhSW6GiGZcHrxXVye73Dl0uQ3Od2fU5rcKD+wrc++axbkZI9K2owX8PwY/hyD+lYYt+7T9f0ZutafyMabhXA6+tQxZ+YHkbatSgbpB/s5qrFkM/0rtg7xFB+4y9F8sI9QKhbcF+9nFJHJlBj8aGfAOKzUWmZWdxg7k8k8UnB+UjvinRg7Cx/Cmt6jrWnU06liNtrZ96c+C27tiqwbc3tmnK28MOwPNQ4a3IcGLnDoD/AHq7W2Y7IsH+DFcYF8yRPQEV1sAKiIp128142cRUoRXqdOGlaRYt5MTMrenNc9qJQSyDu2TW22POPq7AVzuoxss8rNjJPH0rmy2C9s33SHiZNxS8y/JhbOIL0K5rKYMWLZrVIAskU/3cf1qlaruncH7vGK78PLljJ/1ucs1docgwzEjAUmopExCAepXNWpmDBwO1VpCH6+hFaU227kPQgTldg7807YDuJ+lNi2oQfSpWwysy9WFdEnZiKE2QcdgKqK2QGq5cZ3Y9OtUgM4HvXZT+E7KPwj+34UwgHae4p5HH4UwHIB9K0RtHuOXn8KUng00NgmhcFwO4FArdS5pygX0Z9z/Kp7ohZScfxGqumtm4jPfn+RqxMSZiT61yzX775HNWXv2ZqaMN8jMPaty2fHno3Xdj61jaD1du1bkcJa4D8bSOa+ZzOS9rJS/qx1YWLsrdyRplWVkA5Cg5/Guc1M+ZKxz/AMtD+ma3hGVlmL45Py49MVgaogScf7XNLK4xVXTsLGOTjr3MSRSysPcHNPjU7kHtk1I6hYj7CiE4kT6GvqHL3dDj5rxI7rggDpmlDDYuaS6XLfjUR4CfSqirxRSScUW45cdT70M4YZPeqysOvc0qvuYk/hUunrchwIXOWbPTNT2rA3UY96rdQfrU9koN7GffFa1PgZ0yXustzgLctjoGxVgfNnP4VDfKYrxh6vT487ufu44rhesE/I42iORSH68Y6UBTtBPrmnkfMcdqGbCDNVd6IQqLuUjoetIoADN708MSjH8qZ1PHc81F3qBFKRg+tZr8Pj3NX7k7Uqg5OWx1rtorQ6cOi1Z5QSFeu2vSvhAc6Vrx/wCoqf8A0ngrzjTfm8zP9yvSPhB/yCtf/wCwqf8A0ngrnrv3mj0ct/3mV+3+R6LRRRWB7wUUUUAFRzzx21vLcTNtiiQu7YzgAZJ4qSqupfZv7KvPtjlLXyH85h2Tadx49s0AZ2leLNI1mNpbR7sQrD55muLGeCIx8fMHkRVPBzwenPSn6P4q0bXp3g068MkqxiXY8LxFoycB13qNy5/iXI96801KKO606/0PwRqt3q2ny6Jcxzw/aWuY4WVVESo5ztZvnXYD07DFblxqFp4u8Q6cfDcok+y6TeJO6DaIDKsaxxMezblJ29RtoA6zTfF2havf/YrG/Es5DNHmJ1WUKcMY3YBZAO+0miDxdoVxq/8AZcV+GujI0K/unEbSLksiyEbGYYOVBJGDxxXFaPqFnqw8BaXpuft+lEPfQhCGs0S2eJ0kH8JLsqgHr1HFVNOuYZPDnhvwshJ1+y1iJ7m22nzIhHOXklb0VlBIbod49aAPW68C8Wc/EPxJ/wBfUP8A6TQ17lqNnPfWhht9QubCQkHz7ZYy49v3isv6V4JrtvNZeNfEME15PeyrdRZnnCB3zbxHkIqrxnHAHStaXxHBmX8B+qLSx5Oe4UGq+oIFKjvirsEZMiydsAEfhVXVEOQSSBxWFKf75K58zFdTNiXM8Y/2h/Ou8hJhsUUdDXCwn/SYsdCw5ruYRm2DZzkYArjz3aF9jvw9+Z+hTVVe5lkPTFZFwQjZHStWKQktuXbyf51lXhUumDwOtLCJ+0aZy1rNJjkncEbQKfj5SR1aoElAB4GR0qQLsQtk8iumUbMxTIZ3XGwHnGKrBfmyfuqKkeI78ZOSKe6bcRfxHmumLUVZElN0aTr0JprRBRkZzVuYdEQZNQGCXqVYVvGenYtSZFLwF+gzXRQqP+EegB/irm7jK/U10ULZ0C2I7Vx5gny07fzfozeHwN+Rjz4CN67f61Uj5Zz7VZnIO7nqtVYv4/TFd1Ne6ZwXusVAw5HQ4pzKxBx0xUkYJKqBxipNnzc9xj8qcp2YpT1I1Q7DnpUSR4yPerpGUBxwvWq7fK+P73SpjNu5MZPUYEy/HSn7MJtHUml+4VHepEwu5ieCcjNEpMHJhhUCL3zXSQODCrA/wgVzscRZlY/3q3dh+z4BIBAP615eYJSUVc0otptlmNfMuMn6isPWGLysU7Ng1tJJi6OOzYrE1Mhblx61zYBNV7+RpVfuq3csySf6GjdjHj8c4ptuV27P4lA3fiKkWEtbRMwwCuP1pk22GNguN3Gfet4tO8F3MXfdlZ22529MEVWLnLfjT3fKYHUNUbYIB74rvhG25kV0k657GpQSvAPBqA/KcngGnA/ICDkda6XG5rJX1Qt1yN3c9apfdY1Zdi7Y7Yqu4AyfxrWmrKx0UdFZikndgUznnPrxS4ySaWrNloKFz+FN+43t3p27C59qD9/2NBKb6liw5vEx6H+RqccszepqKxAF7GB3z/I1bVBhlPUnIrlqytP5L9TlrvUvaVI6l1QA45Oa6RCrLszwRz+Nc/peBJIo6kVsxtmDAPIxivmMzjzVNDqwkrRHyRjzicnGwKPzrB1lf9IDDqowa2y5U7SeRisTW8rclu3A/SnliarpN9AxVnDTuY55T2zUURJmAbpzU4OVYe9R7cupH0r6hPRo4ovRpiTMTu9O1RsMKn40sv3mH0of7i461cdkax0SGfT0qSMc0ipjJ981PCgGCeppSlZCnJWKcihsgE9amsBsu4vrUcnyzN6E1JaZ+1wYGRvwaqf8N+hq2+Sxo6jzesR3NMUgqB2xTr85u3xzyaQY+X0IrhhpSj6HJLcaQA2R24pJE37Se3IpSAX4PSj0FV5khvwm1uAeOKlwp3L3HNRxISNzDtUjEAk9M8VMt7ICvcIGhQdj/jWeRiQjtmtaYAwqAelZbnEhB+ldNCV0dFF7otaYu7zh6pivRvg//wAgnXv+wqf/AEngrz/RsGSX0Ck16D8IRjS9fH/UVP8A6TwVhWl+8kvQ9LLf95l6f5HotFFFZHvBSEgYyevSlrA8XQWVzpMUV7ot3q2Zx5MNoMSJJtbDh9y+XgZG7cMZx3oA36K89sLLx7o9tc30c8VxaRrvi0e8mNzOwHVRcALhj2DbxnvXoVABRRRQAUUUUAFeA+LOPiH4kP8A09Q/+k0Ne/V4B4vBb4heJAD/AMvUP/pNDWtH4jgzH+A/kWrRXLJjJDgD6U3VovlXt1OKt6ZkwqO+Ki17/WrtPG2vMhUf1xR9T5/ktS5jn0G24jx/fFdyhK26gLwF3Zril/16D/bFdxbZe256BcVOeSsoNnRhfeb9DHaYi4lG35cjB9c1k3St5oBbpx9a251RZMY4BrFvmzIMdTW+CaclZdDkqJpjoimcMRnpUhYhSx+6KgjjVvmPWpPMEkZQghR/SumUddDIZCxa43v0xnBqxsyDKRliflqC2xLKVxxV1ELTDtGmeDUVpcrGlcSO2VD5j4JznmlaSJsrgZFNcTXEmEbCjg5qU2CgBhjd3Nc7lFO83qWk38KMW9UCQ/pWqrFNFt1A42mszVF2S49ABWtAPM0uFOwTNdWIf7qnJ9/0KjfkMOYkMfpUCkqGOO3T8at3igSNVZe/+7/WvQpu8UxwfulmAfqKnwCwc8ZFMtl3QK464q0IAxHTbgYHvXLUmlJ3MXuNk27Aqjr1xVKRhuGRyK0JAqEKo+96VRmRfOJ9BSoNAt9RmeQT1qwsfmNjGFFQRruYFu1XcERhE+8etXVlbRAxEOZAAPlzWsznyY1Xvxn8aqJEkUQBAyatdCg7bd1eXiJKTVuhpTTVx8LA30oxwpH4msfVPmmc9xmtjZsmZh1cisHUHJmkGe5qsDG9W67Iqd7JeZukhtMjK87VHSsy6feeBzgGtSNBFYIP4SgJqiyIy++azw0kpSfmxVU7L0M1MqjZ5PWh14yPTFSOuSdvr1pGDCMAnqa9VSu7mFyoRvX2pApVcdRUoAxtx1pACpCnnHNbqRqpdCHaeccZFQtgHB54q4V546npVSQAyN6jitIO5tSldjDjcaUYPFNxk5pcZIrQ3Y7AIxSMCAD70A8/UUZOBzSJtqWdO/4/o8nPUj8jV5VLE9juOPzqjpv/AB/xA/7X8jWuke4Fu6k1w4mXLU+S/U56694k0tGW4dsk8cVpwE+USTg5qlpbASuD1GavAABfQk5FeHjJXqNPyNKPwpiq+6XcW6jvWdrQzMAem0fyrRMAD7jjbjIrM1twE3AjJ4owdniI8o6qfJZ9zE3YDU4MFXjk9aiJwxHXPFSKANpIyTX0jRztaCFd7Djr1pSmVx6GpVGCfWnKB+VZudiOZkQiI5zweamVQCCxAzxQAcD/ADxTljEhBb7ueAaiUu4m7mdcYErfWpbHi6hHctTLlALl/b/Cn2Azf24/2hXRJ/um/L9DrWsPkX7pMSzE8nNRqeQO2Kt6ooWeQIMZbmqQOOvTFcVF89NM5ZKzaJcKWOCM0i4Gc8nPFNXGcjg0v8+1VYgduKgDrxTZW3x8cGkJIXLc1ExO004x1uNCSylY8j0/rVFzuIPvVl8EBe1VzhSfauumkkdVFJGlpBCySj/YxXofwkx/Z3iDH/QV/wDbeCvOdL5ll/3a9E+EBzpevn/qK/8AtvBXHVX7yT9D0Ms/3iXp/kei0UUVme8FYPiq31F7fT7zTbdrqSwvVuZLRZAhnTY6lQSQMjeGAJAJUVvVyfj+OWXSLFVstQv7YXyG7tLDPmTRbXyCQR8oba3XnaB3oAk0mTVNX8UjV59KutLsYbJ7ZYrt08yd2dG3FUZgAoQgEnJ3niuori/CFpocGrSvpnhXVNJnMBDT3cLIrLuX5QSx5zg/ga7SgAooooAKKKKACvAfFef+Fj+JD2+0w/8ApNDXv1eA+LGA+I3iRe5uof8A0mhrWj8Rw5j/ALuzoNKiRIQT1Kg1l64ArIq9BxWnafPBGo7KM/lWTrWUky3fmvCwabxrk2eJVf7hJIx4Dm8QHrvU/rXexLss1KdCTnNcFasDdxf76/zr0HH7hUHQjn8qfELs6aN8CtZPyMK6QNICepYE1jXqAFJO5zmtS6UtL5YJ+U/yrKu0KIqn+Fq78ErW1PPqP3iOMMxPpU0jDy9q9elVUkcH5RkVMsfUgks3au6cdbsyehctYwyhU696tzggLCnUg5plrH5EYf8AiPGDVsp5UZY/favJrVf3l1/TN4Q90rPMsA2DOcZqOOa4O4tjaenFTrbhW8yTrVmKaFgQMce1RKpGK92Nxxi29XY5/U1DNG/97mtO1TOkwg/d8s5qjrLDzAo6dq0dPcNpqKemyuqvJ/VoS8wgtWjEv12yPj0FUoxlmHbFaOpDE0h9hVCPJJyO1epQd6SZMdIs0rM4hJPRcGrnlDIC9uar6fGGh3/gauBGRVVeT3z6V51efvuzISIWjWIE9zwKpXEGJBj8a0vKwS79QOBWfMrmRlA5zmqw89dxSViJVLzKq9Awz9K0o41hDyt1I4qrlYgqj7x4q0YzcSjPCJ6U60r76IIjbeOWT55MYzxVpn5XHfIpqylyqRAHBGaHIWdgeCv+Nccm5S1RorJaE8RzIc9ulc9qOfNkPua6DYdu8dFGa5y+LGZ+ON1dGXpOo2inukdHv/0GNfVBWZICrnb24rTZQLFCPvMoI/z+FZq53MW6luaxw1lzNdyavQjZBHgD7oXNRyqwyx+6BVxUDrJ64xTbmI+UV7GuuNX3kmZW6lCGPMRz1JyKeqk4DfjUkC5Hv2q1HGHGCMHoa0qVeVsW7M9o/mz2rOl+WZ8etbzwbTj8qxbhAszZ9a3w1RSZtQ0lZkAzz707gKDTI2JPzDHPFK2WUexrs6nW1rYXG1QPQ0xN33jjHb6U5l3flikyen4UAti1pv8Ax/xZ6/N/I1sq5UHHQZz+dY+n8XsY7/MB/wB8mt6JP3SNjvhq8zGySnr2/wAzmrK8iKwP+kM3djtNacT7pTnoKrWaq1wHHpViRVcfKeorysTJTqW8h001G5Nl2AVscZ6ViaxF0HbNawLKQByDWbrLfMg7gDP5U8CnGurFVXeNzGVRz61MAMKB1qJSCufQ1Mi8H1617sjmkSgdM9SKUIvXuKcGBUf3jUgjGQc8jrXM5WIIsY59f5U5I/Oxn7vUU8ADn1p8S+aVx9zsRUSnZXGkZt2g+0OO+KbYL/xMLYdw4qbUl8uZsdTiobFj/adv/vCutO9Bvy/Q6IXsa2pf8fbr2BzWWr8n0rSud7XcxAyDyPrWa4wcjvXPhVamo+SMXrJjgw7dacD0z1qHcN2O+M0qmulxE4kpJCk1ExyCD0xT+gqCTvRFDirsApO30xVZw3z+var0SZUfSqko2yMK1hK7sb0pe80W9Jb97Ln+5Xo/wg/5BWvf9hX/ANt4K820tQJGwf4RmvSfhCMaXr//AGFf/beCuWv8b+R6OXf71L0/yPRaKKKxPdCua8aiY6Xac3g0/wC1p/aJsi4l+z7WzjZ8+N+zdt527q6WsDxdc6fa6TE+peIp9BhM4C3MMqRl22t8mXVhgjJ6Z+WgDmvh5qN1qn9jNGb5rez0Rbe+e4SRUa5zHtA343MoEmSM/eHNeiV594Q1CK68XzRab4q1DxBpv2Fmd5mjaOCXeoA3Iigsw3YGeArZzkY7XVbD+1NJu7D7TNbC5iaIzQEB0BGMqT0NAHNL8QrT+0tXSS0ddM0+xe9W+EmftCoxVtiY6blYA55xxxgmzp/iu6/tCOz13SRpTXFo93bsLkTBkTbvV/lG11DKcDI64PFcvf8Aw51e61G+tRrV1Jp8uhNYxPLFbogbLbIysaKQq5VsgDpjJHFbP9ma14o1e1uNY0s6VBaWFxbtmdJTNLMFUlNhOEAU/ewTkcDFAFrSfGVzfXWlfbdGaysdYVjp9wbgOzHYZFEibRsLICwwW6YODTLXxxNcTWd0+kGPQ768+x2199oBdnLFVZotvyozDAO4nkZAzVLS9J1+7k8L2Gp6Ytpb6ARJLdCdHW5dIWhTywDuAO8sdwGMY5qtZeH9dXTdH8Ly6aEsdM1CO4bUvPQpLDFKZIwqA7w5wgOQAOTk8UAeikgDJOK8A8WYPxG8RkYP+lQ8/wDbtDXuuo6ZYavaG11Kyt7y3JDGK4iWRCR0OCMV4Fr+n2mmeOfEFnp9rDa2sdzFshgjCIuYIicAcDkk/jWtH4jhzH+AzoLGVmiUKMg8Ejtis3WmLzHPQdPerulybLfB7k4qprTLngc4rzMPHlxj0Pn270lqZFou28h5z86/zr0iJMwAn0rzi3/4+4P+ui/zr0eF8R7COCMg1xcTX/d28z0cuacpXOduU/0h29XK1k3qFwDW7flVaTaASCWwKx7no/oT8tdmAm2oyPMrrlk0Z6EqcBc1dt4Co8zJPHAqGFVHDDJBrXs4CRlvuj1rtxVdQiZQjzOyJ7K32EPJ9084ParDRb5TK3Cr0Hakifzi0e0gDofWnSuGKwIfqRXz9Sc5VLvf8kehGMVD+tyjMkk82FyE45FWUsgq5BP5UTTJBEdoBb2qv9vcRhirDPGPSt/31SKUNEZ/u4t82rMjVR+9I/u1f04gWEI77eazNSkLzOezc1dsnCW0HfK4r1q0G8NFP+tDni7O5W1QYnf0wKzE5Zselamptvlk/wB0fzrNjAEh+ldmF/gq4J7mzpCZhbJ44FXPnQD5ck1Boo32kgxgk9atiTKb2Qg+hryMRJ+2krFpe6iARu5LuCoXt61Qd9jFnGGY4UGtVmaYBQCo6mqNyqFwzgAKeM1ph53dpIznFdCrEu1xJIeTjg9q0gCVEcYy3fFZ4jaWcMThM9DW3bRrCjSPjJGaeMqKKT3YUo8zHQW6QKCcZPtVWZlaSToGzipC8jSbiTtBqndPhHkXqDzj61y0acnO8nqzWpJctki7E+IZFIz82Pwrm7xh5rD3reEmIZHxyvP1rn7kfOc9ea9DAQtUkyJO7idP97SLc9H29PxrJOV3MfXpWyAP7MjbriPgVkhCY2Ldc7q5MJJe/wCr/MddfD6CKGyHBO7HK/WpQSSFYdsmnxpkgjvUU5Il2gEZ4zW/NzysY2sriQxeZub7uxquqY2iypGTxxVWKTGcfd70gUjhG4zkYqakXN6sqLsiVhuUZ42n865694nbNdBGDnax/E96xtRjAkc+5rrwUkqjRUHaSbM9F5z2xQjY+U96AcAAUwHLhq9Xc7LXvceeH/TFBFKeBuNAO4j05oJ8yxph/wCJhET0+b+RrpYHRYeow2a5qxwLyPHo38jWyhPlBc4BG4H0NeXj4c81/XcwqytMmg+ViQewqbeQCTwAuapRSFZnHYpU0u54ztbGVxXHOHvamUZaEqSttU9s/pVTWCJIkZe+eaVZCT5fI460y+AFtGm4Ejn8KulTUasWHO7GUnC/zqzGnGc1AnJPHBOKtopxuz07V6VV2Jm9SaNcheOam8oZBz06ilhUFQccmpGiOVIbp1HrXnTqe9YFHQj2BWz1B4qVIztyowo6Yp6oN3zcjoKduDERJwPUVhKo3sWomFfEyXDZ9cfpTNNX/iZwZ/vVNfpsunHpg1Hpx/4mduf9uvWvfDu3b9C4PSxr3OReTALkZNZk6HOQPwrXnBF5J3yxNUpkLZcDHtXDhqlkvRGU9JMzypzyuOOtIeD7etWcc4K0xk9BXepk8xCM44OaaFJbPWpdnYcc0+NOT3qnNJD5rD4Y8jcTgAdKz7j/AFjH3rTZWWLdnHtWXLxI2eaKDvJsuh8RZ08cyY/u16T8IuNM1/8A7Cv/ALbwV5tp2dzD2xXpXwkGNN18f9RX/wBt4Kyr/Gz08t/3qXp/keh0UUVie+Fcz421DULGy02PTdRh06e8v0tjczwiSNFKuTkEjrtAHqSB3rpq53xjO406006O3spm1S7WzH26LzYEyrOWZMjdwmAMjJI5oA5+M+LG8V/2IPGVpITZm63x6YhMeHVdrjfxndkHPO1vSvQq4HwHdW0A0W1stL0yzXUtFGo3IsrcREShoxzg/dO9sZ5+U8131ABRRRQAUUUUAFeBeLP+Sh+JP+vqH/0mhr32vAPFrY+I3iP/AK+of/SeGtaPxHBmSvQZp2aIsalh935qzNYbZL9Qa1YYyxX0wM1jazkOC/TdxXBhPexN7nz9vdSsVbZt1xHx/GP516JCCbNc9R0rz21GJ4gO7j+dd/C+61Ujt/8Aqrz+JE37Ox35c0pS9DOniQzSOwyeRWPdIB5fHB5FbbxhhMO7E/zrOu4mZ0C/dVyKWCqWaTf9WObEwvrYq29vG53MOfrWlG26PYvAPHNVktieR396tMwWMBPvdK1xE+d73/QzpR5USu6xRhY/vdKhYiAE/wDLRqEjWA+c2dxHNN8sO4uZP4c4xXPGMV10/N9jZtv1/IUW643vjJOajmMRGPT3pJDNM6quNneoprJ0G4de/NbwSuueWplJ6e6jE1A4nwOgGBV21x9ngHoBWfegmcoeowat2m4xxDuFGa9urH9zEwfwoZdtl5Sf7o/nVOPHmMfap7tyskufu7c1Wh5csOhWtqUbQHFe62b+jEi0cDrnNXCwlwxBA7A1X0bH2J29DWl5SyMP7owR9a+exdRRrzb7nRTi5RRGg3kKoIz1qleW6yzhMfKBk/hWuSkIwv3m6Vl3rmKVQOr8VlhakpVPd0KrQUY6lUfPOkSdB1rQMbTSsuf3agDFU4yluMj7xNXZZWVQkfXHOa6K7k5JR/rzMaaVncjuZ1XbGn6Vmk7zMO1XWthCgbncfeqLbt7Z6EnP51vhlBL3TOq5X1LcLBoCO5UZrBuMtM5J4ya2lACD2IrElGJZMdzmu3BpKcmCex14QixiUdNgNZ6oC0m7kDoK12XbpceP4Yway1xufHfB/SvEwtRyU2u/6nTXhZx9BIXUIrEfdORVW4csQc561JkSysnuRVRwFzj7oGK9KlBc1+pySk7WJIGCqFI+V6lMbJ90jHUVBb4MYjP4VKzSIOcYFXNe87ErYeJcsMg8YrKv2JuZB23VoeZuJCdRyc+9ZF65N0T74NdGFhafyNKfvSsVV4d/Y4FBTAz+NJ6mnnJC16Z2PQbnLbTSoFCDAxxSZ3Pnv3pFy4JHQ9KAtoXNOAN6nuG/ka1lDGEqD0OVrJ00Zv4vow/Q1rbW8s4xkHivOxb/AHn3fqctf4hif64jPGCKfFIzhQDjrn8qhQ7ZsH7zCpVAXdjr1FYTRghJCQ+Rxj5T/Oq9425N/wBB+lTD5mCt1OTx9KW7RRZlh/DgVUGoyiNaspIBsU44OBViLcx68DrUcH3Np781IhbIIxjvW03e4nuaEIXZkd6eUYkcjHVqgTGMj8KflwR0wetedKLvoaJ6EjcOcnjGBSRAqcrUbZMi7vu8AfWrPmLFH5a9qzldKy1uUtXcwtSlIun98VHpoJ1OEH+9mnX7H7Sx7Z/pTdLJ/tOEnu1exa2Gfp+hcNrnQTZWWQnsxqGRN43LxnrU2fMklB6liwqEFhnH4149O6XmiJalRxg4I600KG6DA6VYYn+LpTRhhx0712KbsY2IBGpOAOakRUicnHJFP+Vs4zmmFVVyRncRiq5m9GA0kyE5Py+hrJuFAlYe9a7xZQM3Y5GKx7jHnk11YXd2NaPxE9jkNJjrjNel/CQ507Xz/wBRX/23grzSwJ3Sf7telfCL/kGa/wD9hX/23gqa/wAbPTy3/eZen+R6JRRRWJ74VyHj3Ube306KKWzbUreKeOTULGKESv8AZismHIP3AGTcG45QjIrr64rxV4Z1/VL7Vn0m401LfVNKXTp/tYk3pgzfMu3jpN39KAJPBaaFaTTWui+GdS0kPGHeW6s2jDhcALvYknGeB6ZxXY1h6KniaOfZrJ0g2qxYT7GJN+/IxndxjGf0rcoAKKKKACiiigArwDxYB/wsXxKT/wA/MP8A6Tw17/XgPiwE/EbxGPW6h/8ASeGtaPxHDmP8B/I14AzFQo4wAay9aTLAe5rcsxtizjk9KzdcjVJTz1X8sV42Eq/7VynhVIWpKRjW5xPFns6/zrtfMZbQKgyx/wAa4mBgbmMHj5h/Ou1jjMtuChOe2KrO0rwcjTCc15JdisjlBMT1Bbj8aqXMrb41IAyeatSx4kkizyR/+uqcuWmzjOOlc2HUXLmIqtpco/fLvIVcrng08RiLMhY5PODTI5THuQrnaeD61KkZJ3OTt64NVN28l+Yo6/1sMUSO48wYTGc+9PJaaYRqP3eOSKQM9wTGqlV7MO4qSQiECJOXYHGOtRJu9ra/l5lJaX6BLcRwhVBGc1Ve4uGcgxgIehq/HaKFDSYJ68ipysG3Hy8Viq9Knoo83maulOW7scXfcS7jwauW4/cRY/GjWERZmAx1yKlsUzApPQCvoZ1E8PGRwNfZM28/1jj1FVUXB49Ku3sZWeTvgiqQyrE+tdtF3grFQ2aOj0RitkxYfKMc/wA60fJyRhm45rP0b5rVgfu5rQjgkjGN7Nk5r5vFu1ebvZ3OimrxWgqosCnLEnHGazLyVlclgM9q1hAVyzsTgdDWBfSSSTZdCvXH508ClOo3e5OIvGKWwlvl7kSOep6VtRBY4zJJ1IzzWHawyNOjMCq5HFbPlPNK2SVUED61vjrcyTehnQvvYhYSXLcjCj0+tZc+4SOMdGroJJI4gUXbu44FYku1p2JOAGp4Ko23pZdBV4pW1uySLi3dmPCkHNZE2PMY+tbcLBrZlZQAW/MVh3GFkZc16GEd5yIS2sdmJv8AQdrY+4MfTArNLHbjFWZD/oysOnlqKou5RACOa8XC0krtdWdFebdr9iJcIzHPU5BqpcgggdiTVon94FP3cdarztkA+letSvzXORhbtlCO/apTMwA3qOvP0qGFflYjrnipd4I+dQPXPpVTS5thCFlbO38ayrsZuCewrTcKSNrY5HTvWbcD98xJrow3xGlL4ioQSgJHP/16UNncV59KCckgDtilRcCu47XtqAPzfhzS5wx44FMcFFXHJ708DJPvQJpWuWtM51KH8f5GtdlYKxXnBNZWlD/iYRfj/I1uBX2sQpPJH1rysbK1Vei/NnPVV2VIsMdwHO3IpSf3pPpUqRFJowB1GCPSmSKd7EDgk1lzJyMLWRBG583kDjrRcvtgwejNSKG85vlOc/0qK+B8nng9fpW8YpzQR1diFX4OOo6VbgYkjA+U9azkPyKR61p2xxtG3gjrWtdWiOUbMsCMEjBODVhIDnPOD1pIouQQc9wKspG+R1ww59q8irVtomaQhciEIWMKc8cA0MVRNo5b3qUxnaASeOM+tNZVQAcM1Yqd93ctxsc/fMDOyjqDzTNOB/tSH03Cpr5R9pkwOTg1DYKf7TgOSPnHFe8n/s79P0JhazOiBCzSAdc1BIMuWXk+lSMNs0hHOTkUFNxBBwfSvFi0ncHroQFZO68Y5qMhhzj2P0q5hlx8pINRn5hkrjnp7VrGoQ4lRipBGcdqYCM4HJAqwVHO5cD1qM4BO0dutdEZIzaGPlly3FY1zkzMRW0wwuWPbpWROB5jY712YV6s1ou0iWwAYyfSvSfhCSdL14nr/av/ALbwV5vp3Bkz/cr0j4Q86Xr/AP2Ff/beCiv8TPTy3/eZen+R6LRRRWB74UUUUAFMlV3hdY5PLdlIV8Z2nscHrT6iuElktZUgm8mZkISXbu2MRwcd8HnFAHmt74x1Dw/faoqapdatHZaZcXMqajZLbMsqFQnl4RDIhJOSAwHB3cjOx/aeteF9XtbfWNUOqwXdhcXDZgSIwywhWITYBlCGP3skYHJzVi68F3OuTF/Euqx30a2s9tFFa2n2dVEqhXY5dyWwOOQB6VPp/hS6/tCO813VhqrW9o9pbqLYQhUfbvZ/mO52CqMjA64HNAGTpera/aSeF7/U9TW7t9fIjltRAiLau8LTJ5ZA3EDYVO4nOc8dKr2XiDXW07R/FEuoh7HU9Rjt203yECRQyymOMq4G8uCUJySDyMCtnSfBtzY3WlC91lr2x0dWGn25twjKdhjUyPuO8qhKjAXrk5NMtfA81vLZ2r6uZNDsbz7ZbWP2cB1cMWVWl3fMiscgbQeBknFAHS6jqMGl2hubhLl4wQuLa2knfn/ZjUt+OK8G1q9i1Lxzr93Ak6xyXcW1Z4HhcYt4hyjgMOnce9fQleD+Jf8Ako/iQ9hdQ/8ApNDV09Gefmf+7s6CzCjG7GAoI/KsTWyfOJY8EVpWrGVFweBwc1ka0wlmIavEwNNrF3Z41aadFLzMu3Xfcoe+4fzrvLJligANcJbc3MZH98V2QJ2blPAxWuew9pyxZWDnySchZlBkd9uTuYZ9s1RuCqThQPxrShcMzg/5zWbKN8yD1zXnYVtScX0RpXStzLqKjIT8w+anqzS5TBC9MmkihjkIc9enWrBJaPZHw3Ymtak0nZf8MZwi2tRpZIk/djLKMYFOjiVVE8uC4zjPanw2yQt5jD5j15pkoedwP+WfcGubnUnaL06v9DblaV2teiK8s00r4TcBmo2inBJ8zj0qzPcRW4IHaqEuqKAOG+b2rtoxqSS9nDQ56jin70tTI1WQtOPUDmrunTDyIlK5DA81nag2ZWb15q5ZZS0gA6V7dWC+rxicyelx2pIPMmI9Aaw2JEg9K2ZXMn2kH+6P51kMP3u2t8HeMOV9P8ioNXbOp0GPdZHPQmrzTNCyqQWJJ6VQ0RmGnyAH5u1WDeI5LkHPSvncTCU8TPS6udMJqNNa2f8AwSzuaV+ThR1B71n3MC3F8W42IMYpz6gGTamQSO4qsskphKof3jHk1dChUg+bbp/wTOrVjLTcWaQI4CL909qs3F6sMWE5bvg1HsSKMk/eNZkbrvLv65/GumFGFXVrb8TFzlDbqX41fcZZHzkd6zGYuzkH+I/zqaS8aQbV9RUSgBmPqa7KUJQu5GUmnoi/HgRYPZRWFdMGmc1sK4aA+uCKwpRnfn72Oa2wcbSk2a09Wdn5ZNlF83AUZ/Ks6VTubPOORWuiA6co7FAf/HRWY3IAHToa8TC1NZeptXjaxVmPyBh24qGYY+hGKm24wp6YNRlS8e49jx9K9WDscbGwqVjDZzjtU/7uRclQM8c1Xt3OAw+73FWWRGBxSqaS1GiEqq9P8+lZNwSZX+uK1ZcKOOpOKy7kYlb6114bcul8RAoBIbpzSNkM+PWlA5wem6h/vuveu3qda3EPOPenEHZ159aQDByaaGyBQG5e0jLalD9T/I11MO1IuSD8xz+dcrpLbdQQ+mf5GuiiKeSeDmRi1eJmkHKovRfqTKVpDztErN+VVZHQ8LjoSPao5Zm3kL0JIqoH2r7mopUHuzmlO+hYGDKpHcc1FqEf7jd6nn3pkchUk56H+lS3koe3j57DNdCjKNSNiIsyVyqDjPOK0rOYbEUjr3qCCIsM9s5pzKIchfTiuqq1P3RyldmxGwVsg8Cni6YYXB+Y9fSsaO7ZcKT0qdb3ccc1508I76q5SqNGqzmQ8HGB+tPj2AbnIJrLS7JbAPI5NShsglyK55YZrTYtVepQvcfbHI6VBZnGqQ+hcU+ZyJSTTLY/8TOD/ezXsJWpNeX6EQep0br+8bjvwaYw3HKnacdKsxMJd691OBUDKGfcOGI718/CWtn0OiUdLoblgOTmmFsjOPwpSJAc5GO9MO7r3/pW8UjJsaSGUhl46VA33tqjHHWrLN8p3A4qGTcSQnBx3rem7ESIZFwvzHPFY03EjCtqVcR5blsdqxZsCUYr0cJrcdL4ixYctJ9K9I+EWP7M1/HT+1f/AG3grzjTx80uP7tejfCD/kFa9/2FP/beCit8T+R6eWf7zL0/yPRaKKKxPfCiiigAooooAKKKKACiiigArwjxKP8Ai4fib0+1Q/8ApNDXu9eFeIsf8LE8Sj/p6h/9JoaqLsedmn+7v1RbtpGCgL/FWfrJUyHHpirVvMsLbWPzH7oNUdTT99yTXFh4WxFz5+UrwSKVt/r4/TcP511du5ETg9K5W3wJ4+ejD+ddHFxK756jGO1PNIqVky6MrSuXo2ChAep/nTJox5q4/iORUEbh8EHlG3GpElDGEH72CQK8V05RldHVzqUbMdHa+YxYZwfer21IYSe4FV1MiSMFT5exqYRhB5jscdSDXJXk5Ncz0OmklFOy1GxQySsfM4QjPBpl7KI1Ecf3scU+S53oUiwT7elVZAtujPI3JOee1OlCUpqU/khVJRjG0PmzMkttzlpCcnrzTWhjx3qO4llmf5F+X1FVpYJgufm/OvpacJNLmlY8eTV9EUrnBlI/CrkJxbQeuKz7jJkBPBH86vqCtvbnsRiu+qvciv62B/ChhbCz/QfzqkMGUn2qyx+SX8P51UHyyMOx71rSWj/rsOC0Z0GnymK0+gBpjTxyMVBPAqO1fEAxzjAP0qwIo2IIP6V5U1GNSUn1C7asQs24hU61KrCAEfxGpT5cfAwWPSmgBPml454zUOakrW0DlsEcTuN8vGOmKz5bUyylzkKORg1baWWeQqq/u+oYU24kyCkQ3EDFa0nOMvX8BO3QzVby2bH4VOnCqD9TTjZmNQ5zkjpTIySxJHFdjlGSvEhlorhMdsGsOYjfIfTNdDGQ8PHPy1z9wm0yH1bNGDd5NM2pW5jtoWzZ7R1KL+oAqhOvlICOucGr9uQI1Pby1/kKq3BBYD3r5+g7VGjoqq8EzPkbDnPYU1lItzu69aW4RidyjO4/pT5dr454YYH4V6qeiOKxDbAqACPlPWp3gG3K5/OltwgQRk8npTjbMiYDE85qZ1Pf3sUo6FWSPC57HhvoKyrpSJ29K3Ch6+3IrHvB8759a7MJO8hw0kVE6/jQ5PmHH40jccdqRhlwR0Nej1udSWtxAcqT70hXaxI707GdwHc0gXt1HHNMu5b0wf8AEwQex/ka30wbfHcdKw9Mx/aUZ9QR+hrXwXVl6YPGK8rGq9Rei/U5qr1uQCTEgA6buag3YHPfipVi+duTUKgLGF67fWqjbocw4qDJg+lF1D9xe2MUiuCVZuADU14371QvJAHFO7U0hruOidIo1U9TUTook8s5ywzUnlKVHzHLYH0p4QFRu4fGKy5kncCmLUgkDp9aQQsrcjg1aUOrYxwO9OAYtgrx2q/bSDUrokinOBTskk7uvtUxVjjjp1qPyTt3MSCe1LnT1YrFRidwP8NLZ/8AIQj/AN6nPkMRj5R3ptvxew47uK6G7wfoXA6G0bLu3vxUhVJXyCc4qrExRnx1ycD1q2u1++GHYV89Vjyycjpg7qwwwntSCJ+4Gal8sqBgk0ZYduetR7R9CuVdSAxPjkDrUbxsewxVvcwOSvFMYGRumB7VpGo76kuCM6WNRnOcmsO4UK7Y9a6aRY4lwWyT0zXPXK4lkH+0a9jA1OZsyS5ZD9Pz+8x1xXo3wi40zX/+wr/7bwV53pfLTZ7JXo/wnAFh4gA6f2r/AO28FaVX+8aPSyxf7TL0/wAj0Giiisz3wooooAKKKKACiiigAooooAK8J8Rj/i4fib/r6h/9Joa92rwjxHk/EbxIB/z9Q/8ApNDVRPOzT/d36otRxpkM+MgDk1lakW80Ak55NaiRvI3X5eMiszUiDMWKnjIFcuEf77e5889kUoCTKnGDvH866WKUA4x1OK5yIgTRH/aFbZyAxHGDWuPipNIalZ3Ldq6hZCQOtPJVJ42GCBnFR2yb0Ixzgc+tLGMzIjDoK8iaXNJ/1sbxb5UXY7ggbSuSDUo3y8HIWoInRhlh83vUolaRdiAqT3rzqkbO6VjshK61Y5nihXKgFhxxWdcxSXMiSMxRB1U9600tVXDSYPc/WqF1K0jeXECOSM1phJLn9zfuycRF8vvfcUJZoYjtXbVZ71WUjA/OpmslDZfBNRyWkCg4A/Ovdp+x82eZLmMmfDPkHvmrYybW3PT5elUphtlKjv0PpitA/wDHlB64FejV0Uf66Ca90qfwTc9hx+NVjyAe9WeNsvviq2OT9a2h1KgbOnYMG5uh4Oau+RnAVsYqppiB4CD0q4sU3GH6da8XEStUlrYcVpsBjWMbmIJHTNR4M5+YbQvY1Ktu2P3zBuaidXkb92do6Gs4tN7/ADG15DTKqfLGASOOKkCRwoWbG480qwRw5YgbqYltJMzNI2UJ4BpuUH1svzFZ/Mq3Fw0mAF4qvnbJtxxWhdRRRxYXAPH86znY7y1dtBxlH3VoZTTT1LifKpIOAO3tWFdZfeucZHWtuIbonJPy7ax5wC5I6c1vhNJs0pu0kzqYJh9mjGeiL/SpJIwwJzyRkVmIzIETnlQc1pRMTtUn5gMZrxa1L2b5omsJ82jKQBcZIwVzxUcyEKvGD1q6YiLgc8MOn86dNCJJMjptIrVV0pLsQ6baKsMQdCcgN0B9KkKyIgHLYHWrFtaFoM8Bs8GpHglRTnJ79KxniI87VzSNF8t7FFo2IDc89qxr9CJpBjvXTmIlUOMbsYHoaxtViVJpa68DiE6liKkHD3jBODn3PFNJwMenepCoDcjoaZgfMCO9fQJmyaHABcU0dcd8ZpDnA+tNbOM9CeKEhpF7Shm/hP8AvfyNa2GYswyME1kaRu/tCJSemc/ka2AJN7FThckEfjXmYv8Ai/JfmzCutSu2WXIbb3pjgZYr6HiphH+6bPUDiq7sFK47miGr0OYZ/AR7Z/Wpbg7NjHrtXn8KQKMHilu8MFDDjj+VXe80NDozvPXnqKnJD/NnB9KrWiM4ZxxtNTgbsFTjmsaiSlbsGwIzbwME+9ShySRsxjoabGrggHJPXNSbskjYaym02UhhclQduD1NR7WkAYkrntTyxK5AIPU1G3mOAwO0elVFfITK0jkSMu35cdabZj/S4s9m4pzv+8K7Tj1ptoQbyMf7Wa6/+Xb9AiaQciV/XccVcDA99pqmi5kJ7g1ZAUnkY45JryqqWhrBsk+YMD5hOB0ppdsjk8HP/wBahxwdrAHFRfMAAWyfWsYxTLbsTGVgCcE+1J5jFscrUahwDls80BW3klhg1XJFC5mD7cfMdx61g3pxI2O5NbswVVJxk4rBumDSMccg8V6GA3uR9pEulkFrj/dx/OvR/hP/AMeHiD/sK/8AttBXm2mISspXsAT79a9G+Eef7N1/PX+1f/beCt6yXtGz1Mt/3mXp/keiUUUVme8FFFFABUVzbxXdrNbTKWimRo3AYqSpGDyORx6VLUN5aQ39lPZ3CloJ42ikVWKkqwwQCCCOD1BzQBwuh2NlH4lvNS8K2MdtpFpZy28rQDbFfXO5SNqjhtm1gX7lyMnBrJ0aytbDSvAevWRJ1fVZ4lvrjcS92JYHeUSf3sMMjP3dvGK7vRvCWk6A8Z04X0axx+WkUmo3Esar6BHcqOnpxRYeEdC0zU/7QtLAR3ClymZXZIi/3jGhJVM99oGaAPP4bWFfC1h4sAP/AAkcuuIj3G4+Y+688loD/sCMldvQbc9aNXtYZ/DvjDxPKCdd07UZls7ncd8HlFRFGnorDGV/i3nOc16AvhHQk1j+1VsQLvzTOP3r+WJSMGQR52B/9rGfei68I6Feat/adxYh7kukjfvXCSOmNrPGDsZhgYJBIwPSgDQ1GTUIrMtplrbXFzkYjubhoUx3+ZUc/pXhmqPeSeOPED6hBBBd/a4t8cExlRf9Hixhiqk8Y/hH9a9+rwnxF/yUXxKP+nqH/wBJ4apdTzs0/wB3fqjVjtpHVBFjB5bPpWdrNuPNwOgrobV0hj3k8lQP0rE1cOoAIG9wWI/nXz+BrzlirdF/T+48rEUoxoprcwIUxImf74/nW4gLF19TWPCczxA/3h/OtrG1zjtzXtYyWqRxR11ZciGzAHQClIzdYHcYpkR82QE/dHIq0LfZKDzux/OvBqSUJO+9juhFyWmw5LeORt56kY61ZDKkYCg56Cmiz3NkE4PPWr0UMcUe4549a8jEYiOl235HpUKEtdLeZSit55GcSlSh6YqvdeVZRscHj05q+bvedsWCc8/SoZ7EOfOlz9M8U6NZqovbaLsgq0k4futX3OZla4nlLKRsxxkVCba5wSxWty4kiifaOv0qi15G+5R1HXivpqOInJLkhoeHUpxTactTnZgd3PUHBq+QTa25HQrVa/AE7EdGGavwoBp1sP8AZr1as/chL+tjK14mZINqSfh/OoVILgetWZ48LMPp/OqqZ81R2FdVN3iyofCze01S9uQOgJ/PtV1WlQDcRnvVXS42NqzL1PIq95hAywGa+fxMr1ZLc0pr3UxipLK/zEbR1pkkqQyeWgO4jNTK0rcIo96YY47YsWJy3PNYxkr2l9yLa0uvvIkt5JSTKQVzninTXJ/1cWc4x0pRNJKAIgDVvy44I+evvROpytc6v2QRhde795ktaytuklxgdMVQYgswrXmufMOxcYPFZpg2zMP4T0r0sPUk0+fQ5qiS+EfHk2h/usKxm4yvoK6BR+42r0C1hzoVd/TBrrwkrykVDR2Ohlgby42HQKv9KnhXL7l6kg/pVi2AeEZ7Iv8AIU0MMbl65xXgyrOScOx0Kml7wkfyyeY/UOVGPSpHTyQQPp/hS5Gw46kk1IpLwkt944rmlN3ubRitiW3hkMCOMcdasF+mQeOvFOtSyWqHHHf86tI8bY9+OleNXry522r2bPUo0U4qztoim8fO7sa5fWVzdOvqa7CcAMMdK5TWiVuJGA+XBBP4V6uR1HKt8jhzKHLH5nOyj5vxqEqMMe5NTSHJFQ7Tk+5Nfcx2OCnsJ14NLJgj6UBQaDydvrVGnUuaSM6rF9D/ACNbhfygzYOASD+dY2jqf7Wh9Of5GunVVCE443EH868PMaijWV1fRfmyXHmMhgwnXP3SartGpKn2xWpLHvkyBwGqg4ATPY1VGrzbHLKNiEKUBLdMjFPvAN4z6U9gTggfxAUt7FvAH0Naqfvq5NtBLd/3OV7HBqWOPe+V+vNQw4hiB9Tk1eSNgwdORWNaXK3bqVFXFR8HY3XvSmVCSMHinFxnnGajMsZO3PPTpXIld3sa7LchMoKblB55qJvMcZXGKsblJwv40xhIVygFdMWl0MmUXdRIVNR2i/8AEwjH+1mllOLgqfWltlxqEJH96u56U36CjuascRMku37wJ2/Wp2CvlHBzjmnwRNukOOckr71IxA4fg4rwp1byOqMLRK3lqOnUDFMZOntz+NTFUY8E0nkrtxk1op23ZLiNEbY+UjNOMWThuhFSLCAOCeuak8lcliTWUqtnuWqdypIEUYAPArnL7/XOR0zxXUylACBXOXwxI3pnmvTy6XvMzfuzRPoKlpLgdtv+Nd/8JSDp2vkf9BX/ANt4K4Pw5+8lmx/dH9a734Shhp2vhhhv7U5/8BoK1qP/AGia9D08uX79vyf6HoVFFFM9wKKKKACiiigAooooAKKKKACvC/EC5+IviU+l1D/6TQ17pXhXiEn/AIWN4lXpm6h5/wC3eGmup52af7u/VG5YyCSIh+NprM1i4BO8AE7cCrccTllKvtGOR61nawVEihcYwa8PCUovFXXX8DyK05exszHjbFzFj++P51tZ3EoOpyP1zWNFj7RF/vj+dbKja+e5Jr2MZa6OSOxYgfa68/KrZNbEZ82WMnsSaxosLAWPJxmtS0f9/EoOc9R6Zr53Hx0cl0uejhJaqL62L4ilRyqqSvY1KLVlQbmb1OackrKNpQkjvTZZZXUqFZc96+bc6knbReZ7yjTir6sieW3tgXYqB0yabczG5iCxc/Soja7gBKwZccg9zUySQw4Ubc10csItSjeUl9xz805JxlaMX95lS2GZS7McntSGyiAJGM/SnzPcTS5VHAH61E8FyTuDMB6Yr3ISqWXNNI8mUYXfLG5gapCokK55FW4UzbW47betV9TRzKDg571oWUe+2gH+znHrXt1alsPBt/1Y4Iq8nExrjO6dccDHP41RUYf6VtahbGOWYY6qG6e9Y23a55969HC1FOF1/WgkuW8WdHo8hFmABk9fwq+Xj3ZOOelV9HjVbIMRkn+VXwlu4DDaR2r5nF1IqvN2e520oycFqRrKpB8sAmo/ILtmTIxVorDEp2quewFUJZJpxhQ0ZBrOjeTfJou7HUtFe9qTebDE4Ubc5xUJs5JyWZ2UEg4pIoRvXewY+9TS3EgGI42Ix1Faawlam9e7I0krz2ElgihXJI/KsdxsLgnoTz+Naq2dw53yOxHB2kVQuo1EpXj73NdeEkk3HmuzCvF2vaw6Pi045YDkVgznDMOtbjEJEzA44xWE4IfB5I716uCWsmZrdHTDMSltxG6NePwFRKWHJJxndUkxBSP3A/lUZUmA4PP/ANevMhtd9S5b6E4n3MVPGBUscrFyCMdQKplwFMmOdoJFStLh1YDispUk9Eiozsb9pOnkgHGDUrBDyGx6VStDH5QBxntT5UfJKyYGOBXzlShH2zs7HtwrP2SuriTylPlJ4HeuX1KUvNNnpvrdmYhQrnJHGf71c3qDfv5f96vosooqMjycbUc7IzH+/wAUzHPPrTn+9nPSmHLZx619UjKOwwMQwwM9aQk4BxzjpT1XBwaQcjIGe1Ua3Rf0Yn+04Cff+Rrs4RH9n2kjJJP61xWlZ/tOP2DfyNddboTCSX5PT2r5vO43qJ3tovzZdKVpbEF7KkTnoF9azCVf5eOBmrV6MqATuPU1SfCfMOuCMVWFppQXc460ryFDhCQe7cflTr4fKmD94CqzEsmcYbr9KmkbzIUOcnaBj8K6uW0lIyvo0PgKGTLkYJK4q9h4pMKu5MZzVK1RTEd2M5yCfWriSsBtKlu2a5a/xaGtPYUyISOBnvUbbCe2c1KQMj5OtOEaHjAz2rBSUS7NkI2huAM8Zprl8fImRVnagOQoJPWkYleFjOPahVNdg5dDBuB/pLZGKkto8XcJBzzmp54w1w3yVHEdt5Go9eK9Xn5qdl2/Q5/tHR28X7rIPNRyf7SgHFS2YYwqu7leD70+SFgxBQtnvivl/actVps9XkvBNFEhd3XFAVcZ3e1XPJBP+roEC4+7+FbfWI2M/YsqKgPAc9amCAMTuJ46VOtuM/dx71KIlA6A1lUxK6GkKDM+RCQcLketczqO4SOoGck12cnC4EfWuW1WApeMMcHJr1cpr81RpnNiIcjTJfCyAXFwPSP/ABruvhX/AMefiH/sK/8AttBXC+H22T3JA/h/xrufhUc2fiH/ALCv/ttBXZJP63Ufkj0Mtlep8n+h6BRRRWx7QUUUUAFFFNkkWKNpHOEQFmPoBQA6isnSfE2ka5DdzWF0XjtG2TmWJ4vLO0NzvA42kHPTBpmj+K9F164a3068Msoj80K8Lx74843pvUb1z/EuRyOaANmisTT/ABdoWq6j9gsr8STtv8vMTqku04by3ICyY77ScUReLtCm1j+ykvwbrzWgH7pxG0qjJjEmNhcYOVBzweKANuvCPErbfiJ4lPpdw/8ApNDXu9eD+JcH4i+JAe91D/6Tw1UNzzs0/wB3fqjX3up2qew/lWPrKsjIM885rY89Yn+bOdorF1WbeN/ua8rAKXtk7aHiVmuW19TPt+biIHqHH866CLEjtyCVB/nXP25H2qI/7QrXt08ud3H8ZINehjo3+4xi7MsRsHQKO55+laen4+1ejHis+G3IIZe/X6VpW0YN2j9hl68LGyjySSfRnZhYy54vzRuI8bDLDmorm5CIcKePSnpbxyNv5yfemzeXGCeeK+QgqftFo35H083U9m9l5lAieQEBsZ55FPSzQYaTBYd6VrxQOM5+lUrmS9kkTyivl5O7NetThWn7qtBfceXOVKGrvJmrviSPjH51Va6jOcc/jUEVmeCev1qRdNijQhc8nJ5qFSw9Nvmk2y3UrzXuxSMXU/LD7gPvjNP044ihJ7LxVbV1WMEtn5CcYqW1l8tIAehXFfSuF8Kktf8AhjxL2rNsdqbI8s57+WBXMkbpeK1tQkIebaeWrHRgJAT9K9bL6Xs6X9djGcuebkdtocKnTkZh1UCtCPToUUKigKOnNVtBiEulID0Kr/KtPyXBKjGK+Fx+JksTUipW1PosLh06MW430KklpEg3YGR05rLuC3nbUB5Fb5tN/wB/p9apTiKCQjmqweL963xMjFYZ8t/hRlx2k5kDFx61qLHDAuOB+NU3v18zCg8deKsizMwJk7+hrrxMpySdZ8qOahGKbVJXZG14hcJgjJxWHe7BdSe5roZrGLKtg5HvXL6sjC6bGNua7cpdKdT93poc2OVSMffHsm6J16gLn8axJv8AXCtu18xoXLY64/CsacfvSPQmvosI7TlE4Fumbc7lVgYH5So49ajFzyAAQDmrPk7o4Af4V4/KoniG4jHzDiuGEoWsxyT3GJIHBH+zU5OCAPu1VCCKRf8AZ4NPhZmOPzqpxW62Ji+hvWsatAh4DDpTpEmByHGKiiVhbqy/exSGWfYM7c96+fcZOo2mt+p6yklBJoY/zAB+WXvXN6icXMw9TmuieQMuT1HBrnr/AB50nuc17eWJqbucGIa0MonkgjqaAeo96e+AwP4VGMAH619EthrVBu3fXFKqgcDoKRRjnuRS8nGKBvyLukkHVoh65/ka6mJGaMlWA5IFcrpZA1WH3yP/AB010qNLj93jGT1rwc1i3UVuy/NlRstylchv3gz83IBqpyXUexNWrguk3zYyW/Sq7cyMw6YzWlH4TjnuNIO/J5BGKYf3RIP8Jz+dPST5wrdMZ/WnX6khcdDjP5Vsn7yi+pNtLly0iWSHPGQcirMcbKcEZ+lVNOUyQJt64rZt32YST73tXkYypKnKSWvkdtCmpWvoRBF2glDzxUn2ePI+XpWhGkbDoeaeYI8d+K8SWPs7ao9OODur6GWIo15C9aXjO0IavNFGrd+aUPGnGDT+tXV0mxfV7aNpHO3QSOSUEcismFv9Pi9N1auqlXknHckf0rLjAF3AB2NfVYPWjd7tfoeLVsqjSOmtCS2VP8VaoYEYIJNYlkr7Dsx5gHetgTiOMs+flGTgV8rmEP3mmp7WCn7mugnytg7DzxTlRP7vtUqzxuo4PIz0ppkj46/McV5/NPazR3csd7pgqxnII4pkgjXoKHaNlIOcdKgkdFz14FaU4Sk+pFSaS6DZJ1XGFPNczrFyGuH4PFb8sw2cema5S+Zp5JCOzEGvpsnoJVHJo8bGVOa0blrQmDPOf9j/ABrufhUc2fiEj/oK/wDttBXAaM3lrPn+5j+dd78Jv+Qf4g/7Cv8A7bQV60o2xE36HRlbXtWl2f5o9Coooqz3gooooAKRmVFLMQFUZJPYUtFAHkp1nTfEQ+I+m6LqlpdX2ox/6HFBMrNOBZRqduDzyCv1rTOo2nirX9EHh2UObLTboXDICPs3mIipG/8AdbcM7eo2E16PRQB5Zo1/aanZ+AdGsARqelSRtfQBSHs1jtpI5BJ/dyzBRn72c1Xs7mF/DOi+FFJ/4SG21qN5rbafMiCXRleY/wCwUBIbodwHevW6KAKmox6hLaFdMuba2ucjElzA0yY7jaHQ/rXheqpeReN/EI1GeCe6W6i3yQQmJG/0eLGFLMRxjuf6V7/Xgvijn4i+IwD/AMvUP/pNDVwV2edmn+7v5Gwsan52PVR/KsHUeJGHvn8K6S1gNxAjEkYUfyrJ1qEBiyr3Arx8BXisRyN6ni1qb5FPoY8K/wCkRn/aFbNurb23DoeKyE5njXP8Q/nWzEGabGMcn+VeljHp8jnjq0XbOQL8p+lWLOUi/UcbNnWqcXyqWA7gH29atWSf8TAg9NuM14GJjHlm32O+hKXNBLubccJH3ScfWnx2ahyxZjnnmoI3mUldhIHepR5rHGCM96+Xqe0TfvI+hp+zaXusdIkSAnj8qzbi4AKrGAWOcCtSOyJbLOSMd6sfZIEALBSR3IopYinSd37xVTDVay0tEwQbrbnyxn60x5L0If3S/nWzLLAjYytVZ7qALjcuT0FdlLFSm1al+ZxVcNGCf7w5LUt8jfOMN1IqW2TcIW7heBU2oHfIxRAaLWEywQnlTtBOO1fVe1X1eN9P+GPn+R+0aWpj3e4NN65z+NZ4wc+tb15ZsTKcHHHPrWKI9kxz716+FqxlDQxa5W7nfaAhfSotv90GtA/aAwAQY71n6C7R6VFtGTtH8q1RcgcFRn0r82x7l9aqWV9WfX4NQ+rwu7aEYWdh8yYrOkt1+0uzMc1qNcMRhUyaxr2OdpCMMuR19KvAczm1dRuZ47lUE1eROBCvYflTWuJCp8pQaqW9s4dd0jNz3rabyI1/hFdGIcKMkvjZz0FKrFv4UYzNeuR5kYC9yDXPX53SuWP8WK7CW6hZdoZSTxXJ6kMu/wAuMvXuZPUcpvmhynmZhBRStK4nnbYio6GseUBmLZ7VpQ5KSAr05FZ7jB5HbpX0WHSjJ2POTZ0YwGUg8+UOPwqKVgAgP3mNBchowRjMYphZnkZWXG0gKfXivKjDW7NWx08RZZcD6UkAymP4hkfjTpCyqE6knBqW3A8w5GBnP4mhyapgknI0ESQQrtGeKYTMBygq1Gsu1SqFlIzmnMGwcx14ftrSd0mep7K66mbKMrkdR1rnb4/vZD3GRXWSxqSecHqRXNalEBNIR0PH417mV1U5WPPxMOVpsx3H+NAGVpzA+aq44pr5V2AGa+jvclO+g0fKvNORMKDSuM5x6Uo6Ae1F9Bt6FnSx/wATOH8f5GulQSop2KCMnrXO6UP+JlFx6/yNdZErrGP3eRyc14ObVOWovT/M0px5mZN5G+MuMO3NVGYBgO+MGtW+fLjK4GOtYrEtyw2nGSPSrwjc4Js5ayUZOwqKWbc3GBVm+UNBH7qP5VAn3gPY1Ndq2yHrgKo/StpP95EhfCx2mGRNjqMg9a6WCSNyDxk+1c3p5dFHy5U85rqLHyjEpOMnpXh5y0m5Nfcenlycna5YVEPc80vkrj7x4p/lqejYzSrDgD5zxXyzq9bnvKn5ERjQMSSeajk8tF61bEK85ao5o0CnJFOnWTkk2xTotRbsjkNSC+c3J6/0rPtgBdRnPVq2r6JHlkGec1l+R5d1EBzX3uFqp0eXy/Q+TqxamzoLSPnI6g1qCOTdjYNuKr2EIdc5wavh2QlduQO9fH46u3Uaj0PocJRSgnIiwRjgUHp0qXeT/BSbjgfJ3rh5n2OzlXciYcHPSoWQnoM1cznOVFJn0UVpCs49CJUk+pk3Mcuw4QVzF4Wj3llAJPNdpOruhwlctqds78bSMnNfT5NiE3aVjw8fS5Wmino53PdZ7Jn+dd78JRjT9fH/AFFf/beCuH0aHZJcg90x/Ou7+FX/AB5+If8AsKD/ANJoK9uUk680vL8jbK7e1uuz/NHf0UUVZ7wUUUUAFFFFABRRRQAUUUUAFeEeIk8z4l+Il9bqL/0mhr3evDdcwPib4iPf7VF/6TQ072Ta7Hn5n/u79UdLa2r/AGaHYf4cHAqhqmmuWxnjr0rc0qcR2o385FQapcrnIU18JQxdeGMaj3Y62GovCqTeuhw72bQ3MZP98dvetJXInf5TxzTLubfdxLg4yD+tOM6rcOPSvrZznUiuZa2PndIvRlieZbe1R1j3b2wQO2asWBZ7piD0GRVDLOvXgHP4Vb0qRo7iQ4JUYAA/GuDEU7UJW3OmjO9WN9jdS645Q5pDetniJqbvjJziniWNecGvl3The/Ie8qk7W5yUXMpwArCmSx3U5G2dkHfil+1Kozg1A2rqj7fLk/Kop0qzd6UP1/M0nVpWtUmyyunsSC7hj6kU5tJicZIUkc9Kh/tBmHAYfhSDVXR9pVzn0FHJjL6MfPglujLv7T7PIQOcnHApdLh3rEp67ealvbnz5cbGzjOSKl07ZHKXwRtGBXrzrVVg/f8AiPLjTpvFe78NynqqiJdqL19K5W6ASRvWupvrlWlHXrzXNXaF5Wc9RkfhXu5PzRppTPNxri6ja2Ov0GZU0yIHngVomSMuTgc1k6GiDTYyw6qK0wkR9Pzr5PHwgsVUeu7Pfwkp+witNiVJUB6Cq19IWbCqckdqmCxDn+tMkdFIIGe3Fc1FRjUUkmzeq5Sp8rdjPit52niO8hQ3Ix1rQWycrh5N31FRtcYwArZPtS/2iWHyq4/CumtPE1bOKSOelHD07qTuOOnRgZAXI9q5zU7YhnOejcVvrdTPnrj6Viag0kl1InOBgjj2r0cpdeNZ88rnFmPsXTTgrGbjy484zkgGsuU7mJx2rUkkaNZNynj2rLYk44OMV9dhr6tnhs6K8j2QW8oGThQcelNIUvkdQafdTfuIYxwdgqikr7jz9a8ujCUqevmdFSUVLQnnywZlPQ8VYgTJB7ZFUkfAA9TVjfII9sbYPAFOpB8vKhQkr3Z1FriOIKwzUjGLuBSWxRreMSfexg0828DAggc+9fDVJRVRuV9z66EW6a5bET2sTguqrnoa5fVdOcOwB6uW6dvSurJEO7H3fQVi6ncq1wUGcjBz+Netk9etGt7uqPNzKlSdO70ZyE0XlOMjvVeRfnY++K1rhPMnb0DDFULhMSMB6mvvaNXmSvufOp2ZWUcgdfWkJw+O1SquGOepqPbyy10Jmiepe0rB1OAD1P8A6Ca7uEJ9nUEDpzXCaSoGq27f738jXoFrGht1343HnrXyHEslGcH5f5nq5ZHmbSMbUkQBfkHOawZEy5PqK6rVI4gijHf1rmnIR2HotbZTV56KaOLH0+Sq0Vh/x8qvQYxV67wY41HXj+VZ53bU5+fPX8av+WzOGPpn8K9OtZOMn0OOOzRZtYTHABsJBFbVsqrAoyAe1NsFVrOLcP4QasNCmQRxgYFfK4zFe0m4PTU9zDYdwiprXQBuz/rKcBJx+870zyyO4pNrj+IVwtJ7NfcdabXQsAPk5k+lPEYYYZgaqgPn7wxUqBu7CsJwa1TNoTT3RlXtupncDAOaypIGS7jYnI3ba6GWASSsxxk1nXls0YVgRgNmvocDi1ZQb6WPFxWHd3JLqbNvHtgBU4Ip7SEckE0xN5iVkOMihi4718+48022eypcsEkOE5yPkNBufu/uzycfSoi7Um8mr9jF9CfayXUm8/g/IetJ54BI2Hpmodxpdx7Uexj2D2su5N5pZcbTzWDqTESldp4rbR2FZmoRmR2eu7LbU622hyY686W5kaWC88nGPlzXZfCv/j08Q/8AYVH/AKTQVylmpjuJvdDiuu+F+Ps/iLH/AEFB/wCk0FfUUpc1eXojDKlafyf5o7yiiiuw90KKKKACiiigAooooAKKK5vV/GljpF7dW7WWoXS2Uay301rEHS1RhkF8sCeBuwoYgc4oA6SvCfESkfEfxI46/aoh/wCS0Ne6RyJNEkkbB0cBlZTkEHoRXhfiSRV+IfiQH/n7hP8A5LQ1Ub9Dzs0/3d+qOxsQj2kYJPAp13FHsLetc79vMKKobqBjmorvUy1t5ZbHcHNfGrKK0q/OpaNmH9pUlS5HHWxbvIoxJ78GslkU3Lvznoaha+aZsk8jA/WnvJt3EckjNfRUMPUox5ZPU8arUU3dKxat5Mxy5+6cYrV0do2Mw9WrnY5tkII7nJp8d+IiNrYbg4rPFYKVaEoR6l0K6pzUmr2Ove2jPr+dKtvGO5/OucGskqOaBqzN0PP1rxv7JxVrOR6X1+he/KdUI4wKT90DXJvqzFOG5+pqq2ouX++fzNKGQV5fFIqWa018MDuQ8QpyyRVw41JghO84HvU66k21TvPI9aifD019oqOcr+U7JoYn+aqRVBK+P84rDh1crGFLckE9aaNS2tvY46/lUUsoxEG036DqZlRnZqI64QF2dvvZwazJlBuGXtjmnXV8vnkBvvHNUZZwGZyeCMGvqMLh5pK/Y8SrJSeh2WkxJLYpnPHSry2UfqeuetcxYakYbZUXBAwKujWjkDjmvm8Xl2LdaTg9Gz2MPjMOqcVNao2/scY7nrnrUtvaxI8jc5cgnn2xXP8A9sk+nNC6ueBkcnFcssrxji02dEcwwsZJqJ1YjiAqurRDNc0+rkIx3fd96zn1Nt4y5/M06OQVp35pFVc5pq3LE7YSxA1k3rRrd7qxP7Rb+JscetU7u+/eLlu3HNd2DyOdOpfm6HFic0VWHKomnfSRZNZkoQDp1qk8xd0+YnJqSWdckZ4BxX0VHCuklG9zyalRzd7F+ZjIIpR93GBTRyhI+9mp5pIzYAKR8tZ7XAVlweCKzpJzjZLYJ6MsRkkMG6HgVPasDLk/dOapxXKmIA43BqhhvVQoM8luPerlRlNSSQoys0d2baOQRSZOVHHPtUggAHBP51zyawyIFbGcZFK2ulCFbAJGRXyUsqxjdk7o9+OYYbdrU2pj5WAeh4FYN+o8zd6hj+lPTWt7t5m0KvOazb6+Tztgbua9LL8BWpVLSWpxYzEwqx90QIdxY9NwxVWSPM8p7dqcLtX2qDyDzSSSLvIB6HJr6CEZxep5bKpjJz65pjRF5CP4eQ1WY5UB5PXkUgkCkZxya6VOXYSbQ7S0CahGR0UH+RruYYllRJMnO3HWuGt5Vjugc1uQaxsiABBC8GvCzrCVsRKMqe56OBxMKbftDZvbNDF35461iXVknmHOcY9akn1hnBXjjmsqbUmlXPGTXPl2DxVNWkx4zEUKjvBCvaqjKBnA960SFWOJfVaxjdcYJqee6RjDtflEwRXqVaFSbin5nFGajc7W1tIzZRLz8oA60stsABjPA9awrTWswKMjcBgj3FMn8RFY94CkYz3r5R5XjnVdu576x+F9mk1rY2vLbJHajymrnU8QylAxRMmlXxIxONq5/Gul5RjeyMVj8P5nQeSacIyK51/EMo6ImMVGviSVxlUQj8aayfGtXshfX8OtrnTCJm5PWori3ZkI7Vip4gctjauKnOt+YCo25FZrLcZTmnYbxmGnFo2FSRbZPLxnb39aRxNzjHTj61kf20YSqSBQCQBUn9tKQc7eDis3l2KTvypl/XKDVrtGhibd0G3b+tIBLxnHvWc2tIoJJGBQ2sopAJGScVawOJ/kJ+tUf5jR/fYPAzn9KD5uG246cfWs4awmRyvNB1hAcEjNH1HEfyC+tUf5jSHnf7P3f1qCeF5Ijuxu74qk2uxqHyR8pAP40j6wCxX5eBzWlPBYqMk1GxM8TQcbNsjWIrcHd6Gur+GqCNPEiDoNVH/pLBXCtqgediCMBTk12vwsm+0WniGX11UfpbQCvfw9GrCpzT7Blk4OrJLt/kd9RRRXce2FFFFABRRRQAUUUUAFee6ob7SL/wAX266Rf3ra0qyWL20BkRnMCwlHYcR4KZy2Bhq9CooAxrbQI28Lafo95Ncf6NbxRM9tcyQMWRQPvoVbHHrWNJ8LPCk1zLcTW2oSTykNJI+qXRZiAAMnzMngAc+grsqKBNJ6M44/C7wqetvqBx/1Fbr/AOOUh+FnhRutrfn66pdf/HK7Kign2cOyOLHwq8JDpaX3/g0uv/jlO/4Vb4UP/Ltf/wDg0uv/AI5XZUUB7KHZHGf8Kr8J4x9lv8en9qXX/wAcpP8AhVXhH/nzvv8AwaXP/wAcrtKKA9lDsjjP+FVeEv8An0vv/Bpdf/HKB8LPCY6Wt+P+4pdf/HK7OigPZQ7I4z/hVfhL/n0vv/Bpdf8Axyk/4VV4R/59L7/waXP/AMcrq9QvoNM026v7pttvawvNK2M4VQST+QrA0nxVe3Wp2NnqujHTv7RgaeyYXIl3BQCUcBRsfawOBuHB54ouHsodkYGgfC7Q5NOlOqWeoCf7ZdKobUrhf3QncRcCT/nmE579+a1P+FWeExj/AEW/46f8TS6/+OVP4l8ZSaFr1npUUGmbrm2efztR1L7Ig2sq7QfLfcx3Zxx0NdPbPLJawvOkaTMil0jfeqtjkBsDcM98DPoKA9lDsjkv+FWeFP8An1v+OP8AkKXX/wAcoPwt8KEYNtfn/uK3X/xyuyooD2cOyOLPwq8JE5NpfE+v9qXX/wAcoPwp8IkYNnekf9hO5/8AjldpRTuw9nDsjjR8LfCi9La/H01S6/8AjlH/AAq3wp/z7ah/4Nbr/wCOV2VFIPZw7I43/hVvhT/n21D/AMGt1/8AHKX/AIVd4V/59tQ/8Gt1/wDHK7GoLyW4hs5ZLW3FzOq5SEyBN59Nx6UWD2cOyOC8R/DHRY/DGqvpNrqJ1FbSVrULqdyxMuw7ODJg/NjrWgPhX4TKgtaX+cc/8TS6/wDjlbXhjXJtf0ua5ubJbOeG7ntZIVm80BopChIbauQSvpVDV/Fd5aajf2ul6MdRXTIFnvXNyIioYFgkYKne+0ZwSo5HPNAezh2RW/4VZ4UPW2v/APwaXX/xykb4VeEmOWtL4n31S6/+OV1djewalp9tfWzb7e5iWaJvVWAIP5GrFAezh2Rxf/CqfCPH+h33HT/iaXP/AMco/wCFVeET/wAud96/8hS5/wDjldpRTuw9nDsjjv8AhV3hXGPs+oY9P7Vuv/jlN/4VV4SP/Lpff+DS6/8AjldnRS2D2cOyOM/4VX4SH/Lpff8Ag0uv/jlJ/wAKo8IZB+xXvHT/AImdzx/5ErtKKd2Hs4dkcafhb4UPW21D/wAGt1/8crL8QfDDRE0+FtMtNRa5+2WytjUrlj5JnQS9ZOnl7/p25rutTnvrayMmnWKXtzkBYnnEQx3JbBxj6Gq3hrWf+Ei8M6drH2f7P9tgWbyt+/ZkZxnAz+QpB7OHZGF/wqzwpz/ot/z1/wCJpdf/ABykPwq8JMctaXxPqdUuv/jlOh8a3N34tvNDt7XSV+yXa27faNV8u4kXYjl0h8o7gA/HzclTyK7GgPZw7I4sfCnwiDkWd8D7apc//HKX/hVfhInP2S+/8Gl1/wDHK7Oii4ezh2Rxf/CqfCP/AD533/g0uf8A45R/wqrwif8Al0vv/Bpc/wDxyu0op3Yezh2Rxf8AwqrwjnP2S+z6/wBqXP8A8cpw+FnhMDAtb/B/6il1/wDHK7KikHs4dkcb/wAKt8Kf8+1//wCDW6/+OU3/AIVV4S/59L7/AMGlz/8AHK7SsbWNQ1uzmxpmiQ3sKxeY8kt8IOeflUbWycAddo569aA9lDsjkdV+FuhJqOirZWWoG3e8Zb3GpXBxF5MpGcycfvBHyP6mtP8A4VV4S/59L7/waXP/AMcrptG1WDXNEsdVtldYLyBJ0VxhgGGQD781ztl44mupbC6k0gxaJqN0bW0vvtAZ2YkhGaPb8qsVwDuJ5GQM07sPZQ7Iavwt8KL922vx9NVuv/jlIfhX4SIwbS+I9P7Uuv8A45XZ0Ug9nDsjjP8AhVfhIf8ALpff+DS6/wDjlJ/wqnwiDkWd9n/sKXP/AMcrtKKdw9nDsjjP+FV+Ej/y6X3/AINLr/45SD4U+EVGBZ3w+mqXP/xyu0oouHs4dkcYPhX4SHS0vv8AwaXX/wAcpR8LfCgORbX4P/YVuv8A45XZUUg9lDsjjW+F3hR8brbUDjpnVbr/AOOVlz/DHRR4osY4rXUf7MazuGuD/adzjzg8Pl5PmZztMv8AkCuh8QeItW0KO9vjoUc+k2UfmzXH20LKyBcsUj2kHHPBZSccdq3bi6WHT5btV3qkRlC9MgDP4UB7OHZHKn4W+FCMG2v8f9hW6/8AjlB+FvhQ9ba/Pf8A5Ct1/wDHKn8G+LbnxXAl0bfSY7d7dZStpqv2maNmAISRPKXacE554IxjvXV0B7OHZHG/8Kt8Kf8APtf/APg1uv8A45R/wq3woT/x7X//AINbr/45XZUUB7OHZHGH4V+EjnNpfHPX/iaXXP8A5Epf+FW+FM5+zX+f+wrdf/HK7KigPZw7I4sfCnwiM4s77nr/AMTS5/8Ajlb2geGtL8MWs9tpUEkUU8vnSeZO8pZ9oXOXJPRQOvataii41CK1SCiiigoKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAzvEGl/wBt+HNT0rf5f221ltw/90upXP4ZrloLfxHf6lpN9e6H9mbRLSYiP7VG32y4aMIFjIJ2pjdy+DyOODXdUUAc5qt1ftHED4QbUWmth5g8+DbGx+9G5dhke6g59Ks+EdJudD8J6Zpl5IslxbQBHKElR/sgnkgdB7CtqigAooooAKKKKACiiigAqG7mkgtJZobaS5lRSywxsoaQ+gLEAH6kCpqKAOG8LSeIdL0rV0m8LXSXD31zewRyXduBKJZywTKu2CFbJyMcdafqVjr2ma1r8+laUNQj1mGPY4nSMW8yxmP94GIJTAU5XJ4IxXbUUAZ+g6Z/Yvh7TdKD+Z9itYrff/e2IFz+laFFFABRRRQAUUUUAFFYc/jTwra3EtvceJdGhnicpJHJfxKyMDgggtkEHjFaFjqllqYZ7GcXEQVWE0YJicN0KPja/T+EnHegBdRurizsnntdPmv5QQBbwuis3PYuyrx161y3gn+3tH8G6PpV34dniuLQQ2s3mXUONmPmlUqzZC+hwTniu0ooA4fxRYanr8M2k23hr7O73Ubpqsk0OyMK4bzVAbzN+F4G0c98V3FFFABRRRQAUUUUAFFFFABXHeMv7evbu30q00a9udFlj3X01lPAksvJHkDzJEKgjlmHODgYySOxooAy7We6iXS7eHRXtrV4mEqtLGDZhVGxNqkhs9PlJAxXG6d4f11NO0HwzPpojstIvY521Hz0KTRQsWjCoDvDE7AcgAYPJ4r0aigAooooAKKKKACiiigAooooA4bxKuuan4hFnP4cvrzw7bbJAtrPbAXsvB/eCSVSEU/w4+YjJ4GD1E13fefLEukmSD7IZVdp0G+XJ/cle3H8XTmtGigDjLLT9Q1Dxjp2rtoP9i29jbTQyGSWJpLjft2piJmGxdu7k9cYHWuzoooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDk/Ekaf8Jn4N+Reby5zx1/0WWue8UahqMK+N47XULm3MB04W7RyEeRvYBio6DPf1716RJa2800M0sEUksDFoXZAWjJBUlT2JBI47GopdM0+czmaxtpDcbPO3wqfN2/d3ZHOO2elAHELok7+Nr3Qf7e1saedMivMfbn8xZjI6bhJncFwoOwHbnt2rDh13XtftvCds8m4XejC7kP9pvp5uZgVB/eRozEgc7Rj72TnFesC1txdm7EEQuWjEZm2DeUBJC7uuMknHvVSfQdHudOi0640mxlsYcCO2kt0aJMdMKRgfhQBT8IjUl8OwJqt1Bc3SPIvmwz+cCgchQX2ruYDCk4GSDW5UNra29jbJbWlvFbwRjCRRIEVR7AcCpqACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD/2Q==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeB+KAo+IviQbRj7VD/AOk8Ne+14F4qGfiP4k/6+Yf/AEnhrSl8RwZn/u7+Qy/jX7TuIGNq4/KvTfhrCieGZcKOblj0/wBhK82u18yWM/7Kk/lXpvw4OfDcp/6eWx/3wlZQb9nFHlZY/wDafvOt2J/dX8qNif3V/KnUUz6QbsT+6v5UbE/ur+VOooA+b3gVn6Cl8hTk7RleM1MQc/L6CpsZhB710uo1Y+K55FFY1JC7RwfSr32dI0Pyjpmkgh/eqB681am449Tx9Kyq1byUUDbZSjSONQ+welMZFZ2BXj6VOBnYB0pZip4Hampe8TcxLlQJWO3qcdK3BAg0RHVQCzbj+lZF0eSB3rcAP9gW57kGtMVJpU/X9DsqO9NP+uhlui7HG0cLn9apRoqhztHqavEnypD32VWiIKyN/s/4V0QejFSbUWdX8PEU+LLD5R95/wD0U9e17E/ur+VeK/DsY8V2A/2n/wDRT17ZXHW+NntZZ/Cl6/ohuxP7q/lXlfxMCjxFbLtHNovb/bevVq8s+JZA8Qwev2Rf/Q3qIbjzX/d36o4hgpDNtHTivZfASqfBdgdo6y9v+mjV40WAZQOgGa9n8Cf8ibY/WX/0a1aVPhPOyj+M/T9UdDsT+6v5UbE/ur+VOorE+iPOvFSJ/b0/yjO5D0/2BXnF+kZnIKjIr0rxIofxLcqOoCE/98ivPNTiIuXJ6gHFceXSSxNSPr+bPm8bpV5vNmh5SfZrYbRgqSfyNYdzCmWAUcGuh8pvsFu3GNtYVzHtuCV6E812YKd5S17/AJs4leMkVDGgP3RmkZFCDAHNTJ8/zewo2bgM9smvS5rbmvPZ2ZQEK9gMCnC3TJ4HrVrySMAdO9IV6t36VfOb+3b2Z3nwpjUaxOCo/wCPV+3+2les7E/ur+VeUfCrnWrj/r2f/wBDSvWa86r8bPWy7+C/Vjdif3V/KsrxQi/8IlrPyj/jxn7f9M2rXrK8Uf8AIpaz/wBeM/8A6Lapj8SO57HzjtUsRt75/OhUVcoFHNSY3Y+uaap+bnrnmvWPCvdH0X4ZRP8AhFNH+Uf8eMPb/YFamxP7q/lWb4a/5FXR/wDryh/9AFaleTL4me5H4UN2J/dX8qNif3V/KnUVJR5hfQI2t3hKDi4l5x/tGuQ1iOM3T4UcGu21FgNQv/8Ar5kH5sa4nWCFuyi9BxXk5bJyxc/n+Z81jFZK3cxpowAuAMAGgIvmJ8oztqW4UFMeo5pFGWU9scV9On7pzqXuCBFDTDA7f0qs0SeYH2jjIFWun2gjrx/SoH5VfxNOH9fcaU20/wCuw1Y1znaKsBF8vOBk0yMYOParDKrJn8aU3qZ1J6lFUGcYGQatWiIt0nyj71Q9H981NAP9KT3Ip1NYs0qSumTX0Sfanwo+UkCql1BHLtDKCMg/iDkVcuwzXMp9zUE33vwrOk2oxMacmmmmVnjTIBApRGpHIBzT2TOM9aFQBR6Ctr6G/NpuQiJAgG0Y3VN5SFjwKT+GpQAfm79KTYpzfcjdQyR4HHP86rugw3Aq4w+QegH9arSKB1pwZVGWtibTivlzqQPu16d8GlCaLrqgYA1U/wDpPDXmdgm7zMd1Fen/AAeGNI14f9RX/wBt4K5cQ9WduAl/tU15f5Ho1FFFcp7QUUUUAFFFFABRRRQAUUUUAFeAeKyR8SvEmOn2iH/0nhr3+vAPFjBfiP4k9ftMOP8AwGhrWj8Rw5j/ALuxSdzKfVQP0r0/4cDHhuVR2uWH/jiV5jafPHtPUdK9O+HIx4enz/z9N/6AlY9eXsePlf8AvHyZ19FFFM+mCiiigD57RgI8HrVuEDH1GarRbdvzY4xmrkKLn733hxU1mlc+IitSKG2OWdcnJz+dLOjfOQOdoX8eamjYwtI/VCOPaorjLMuDyo3EetZKUpT1KaVikWZcAetBhYDzOelWpUCQ7yOcioGchMHvXVGfN8JOxj3XVK3EZv7At9w+Yr0rFnBbaCOc1uKQ/h+0x13EfyrXFPSn6/ozsn/CRnfwSZ/uVWhXLTEdCn9RVtzzIP8AZqtF0lwP4P6it4vRmdN+6zqvh4u3xVp3uX/9FPXtdeL+AP8AkatN/wCB/wDop69orkqu8me5lbvSk/N/oFeUfE8f8VFbN/06L/6G9er15J8U/wDkYrXn/l0X/wBDeikryLzNXofNHGH7qp3IxXtXgIk+C7An1l/9GtXiSH169q9u8CDHgyw+sv8A6NatK2iPPyrSu15fqjo6KKK5z6A4PxDgeILk9/lH/jq155q+BNIR96u/8RI7eILw84+TB/4Ated6rnzJGJxzmvNy6K+uVHfr+p87mGs0rdWbxUnR7dh2XBrn51Hmn3roFz/Y8XoyZrn5Fy45710Zfo5+r/M4a+8fRECRYkwPpShCSMj2qVVxKWPGGzTkGSuRzur03MxcmyPyCH4HVc1G8W4Zxz3q+wCx4J+bPFQsNpY9Q3NRGq2HM0dd8Lht1ycf9Orf+hpXq1eWfDIf8T6c+to3/oaV6nWc3eTPpcrd8P8ANhWV4n/5FPWf+vGf/wBFmtWsrxP/AMilrP8A14z/APotqI/Ej0JbM+dUwpxTADgMepNPK9DTQcjaeMV6x4K7n0b4a/5FXR/+vKH/ANAFalZfhn/kVNH/AOvGH/0AVqV5Et2e7D4UFFFFIo811Qg6pdAdRcSZ/wC+jXH61GBfM+TyP/r11urYGo3bZ5FzJ/6Ea5DWyfPYjnoa8nLF/tUref5nzeNfTzMmRPkA9RTkHyg+wpkpKqD7YpYtxiQ4+bA4/GvpehxtPluPxgzgeg/pVWYERqQO5B/Orxx++9do/mKpyk7VGOOc06b1/rsVSeq/roOiBKjPXANW9vy8darR8jI9Aa0QiiIt3FZVZWZlVfvGRIu2cn0qa2b/AE2PP94VHcf69z24ogObyID+9W71h8jpteF/I07iPF5OP9s/zqndYC478VfkOby6JHCsf5mqNzhuO9ctBvS/ZHNH+IQvgYGeaa+ePTH605mXKgnk9KRmGK6kbxvoJsypHrTwuSPY0xDlN2eKmAGR9c0N2FJtBtwMH0qpIQobParzjK59qzZF3hxnrRS1Kw+r1Lun9Xx0K16d8If+QVr3/YV/9t4K8u00ktIB2XNep/CQbdO18f8AUV/9t4K5sT8VjvwCti5Ly/yPQ6KKK5j3AooooAKKKjnhW4t5YHZ1WRChMblGAIxwwIIPuDkUASUV5Vq0k+gal4m1DR7/AFR4NB0ltyXWoz3CPdyDcuVkdh8iBT/wP2rWna78G63ZRxalf6jDd6bdyzR3lw02ZYVRg65+7ncwKrheRxQB39Fec6U1/pZ8G6o+r395LrbCK/innLxuZLd5QyIeI9rJgbQODznrVWwvNRGh6D4tbVb57vUdUiintWnYweTNMYxGsX3V2AqQQM5U5JzQB6hXz/4ux/wsfxHn/n5h/wDSaGvddRvJ7G0M1vp9zfyAgeRbNGHPv+8ZV/WvA9cnlvvHmvzz2U9lI1zFm3nKF0xBEOSjMvPXgnrWtH4jhzF2oM0bdFQKeAcZr0j4d/8AIAuOet03/oCV5schhjsAD+Vej/DjP/CPXGT/AMvbf+gJXNBXbkeRlf8AvCXkzsKKKK0PpQooooA+f40DIDkDOM1ZiBC47rwD6iqkat07df0qwsxCH1A/rU1U3oj4hE7SJhouOOtQF8unHLHk+gpm3cc5+c/epC3DnuBtFRGmlsNu4rK8t02WPlhOnbNBVGi5AyKY9wI38tQcsOtHlsw+Xqa0s1a+iEzJuj+8GPettVVPD1qcckk/yrGvF2ScdMk1tSfN4ctMe+fpW2KelL/F+jOrR0v67mVK2GdMYymc/jVaBjvlX0j/AKiprg/vSP8AZxUMA/ey+yY/lXXFe6OmkoM674fknxbp/od3/op69qrxb4f/API2acPTd/6Kevaa46vxHs5V/Bl6/ogryX4pjPiK0x2tV/8AQnr1qvJ/ih/yMdr/ANei/wDob0UviLzN2ofM4eJT1J69PavbfAfPguw+sv8A6MavFFywwO3Ne1+AxjwZYD3l/wDRjVpWeh5+VO9d+n6o6Oiiiuc+gOD8Ruf7euVHYL/6Ctea6sT50ik9K9H8REJ4iuMjJcoB/wB8CvONV+a5l98ivOyxWxlX1/Vnz2Pd6i9Wbu7GhQkHqv8A9asXy2K9eeufxrYJVNChXHIX+tZkL7pWB+6RW2EulNr+Z/mefX1cfREcv3yR0pQQJmPbIIof7rAEVGVLAMvGWx+FdyV1qYE8hDSA4wAKiYbQcnOOatSxKFz6gA1XK/eVvT9KinJNaDaOv+Gn/Ien/wCvVv8A0NK9Sry74agDX5wP+fRv/Q0r1Gm9z6XKv93XqwrL8Tf8iprH/XjN/wCgGtSsvxN/yKusf9eM3/oBpx+JHoS+FnzoWBfaKZt6nPOefelRRuLY5Jx+VJklTjrXrHhLR6H0d4Z/5FTR/wDrxh/9AFalZXhn/kVNH/68Yf8A0AVq15MviZ7sfhQUUUVJR5fqqn+1L1icr9okOP8AgRrkteyJuO4FdbrGTfXoXr9olJ/77NcjrMgkmUc9B/KvMy6/1lvzZ8zirJmZNyg9zSxZBUZ7Uh5UE9uKUY8wD0r6LpY5Ps2JEG4Te6f1FVZOAFPpVuI4WVjzkf1FQyx7iCKUHaTCErS1Eg6e3FaXSFm6jpiqCDDH0q2rlkJz8nTFZVtWRUd3czpgDM1NtP8Aj/iJ/vUs7AO59Dim23N7FnqGzXT9h+h1x/hv0/Q2roBZp9o+8xBNZk3zEdsVrzqBJMB3Y1jTH5T61x4V3Ryw+MhJBduOVPB/CgkHae1A/wBaxPpxTcdB6Gu460h6AGNfTFSlsZOMkDpUSYCqBUyDPzd8VMjOe9x4YvDnBGR0/Gs88XGO2Oa0k5XnoR/Ws2QMJX578UUt2isNvJFvSiPNlOOi16j8JDnTtfP/AFFf/beCvLdOBHmf7ten/B4k6Rr2ev8Aan/tvBXNiV77Z34D/e5en+R6NRRRXMe2FFFFABRRRQBj2nhrT7XTNSsHElzDqU0012ZyCZTL94HAHAXCj0AFV9J8I2ml3gu5b6/1GaO3NrCb6RX8mIkEquFGc7VyWyxwOa6CigDmtK8FWGk3tpOl5f3EVgrJYW1xKGitAwwdmFBPy/KNxbAOBSW/gfTrbUIZ1ur5rS3uWu4NPeVTbwzMSSyjbu6sxALEAngCumooAK+f/FhI+JfiL0+0w/8ApPDX0BXz/wCLePiP4kbv9phH/kvDW1H4jizD+AyWVttwy9iq/wAq9M+HAx4cm97lj/44leY3bATY7lV/lXpvw3OfDk3/AF9N/wCgJXPBe6mePln+8/edhRRRVH0oUUUUAfPccrKAh+8RU6uNmfUc/nUKPyDgZxxUuFaJiD1FOaV9UfDkUZO8yD+KlB2/QDP402Mktkj5RSEZVU9ck1bWoCkiMFz+FSw3J2H9OKqOpkbP8I4pVIwAD35qpU01qPYqX0jGRj2J4rZRj/Y0Q7BeKw7wYYKOwrZ/5gkGP7pp4lLlprz/AEOmX8JGRM+ZSfamxZ8yU9tox+lEnBYntiliP7yT02j+ldf2S18Gn9bHWfD/AP5GzTh7v/6Kevaq8W+H5z4ssPq//op69prhq/EexlX8GXr+iCvKPidg+JLUelop/wDH3r1evK/iWoPiS2Y9rRf/AEJ6UHZlZp/u79UcSDhV9Sa9n8Cf8iZYfWX/ANGtXjAKkFwfYV7P4D/5Eyw+sv8A6NarqbHnZR/Hfp+qOjooorE+iOA8QHHia5Y9AFA/75WvOtSQC6cnsTXo3iMAa5eMT/d/9AWvOdTYNcOc152W/wC91X5/qz5zMPjS82bdwmNJt3HUJn9axYm+cjuc1uTyINNgXP8AyzArnmcLKw7iujAJyhJPu/zOGulzK3ZDgFO8c43Yp0ZDgemSaSJMk+/NJIdsZHSu7d2MC4M7ct+FRL8xYN16U4NnkdMU1vnLdjgDisEhnXfDcbfEdyvpaMP/AB9K9Rry74b5/wCEiuMj/l0b/wBDSvUatn0uVf7uvVhWX4l/5FXWP+vKb/0A1qVleJjjwprB/wCnGb/0A04/Ej0JfCz50wQ/tQVADGlfhz6ZzRu+97mvVPA13Porwz/yKmj/APXjD/6AK1ay/DP/ACKuj/8AXjD/AOgCtSvKl8TPfj8KCiiipKPMbxQ2p6gT1+0TD/yI1cdrKk3LsO2MflXZaghGqagOga4kIP8AwM1yWtYDsR94DpXl5dL/AGyfzPmsXsvUyVOI4/rzTsr5hNIuPLQ0hYKceo4+tfRHFa7JIyAZPTFRsDnI70RKS0hPGcf0pwBLZ7UtmLZihRyB0FSQtlSP4TTemQKQHkgdKl6oh6lOcYmbHc8/lS2v/H9E3aklXLuxJ4P9KdaNuuEI6Vu/g+R23/d/L9DakkHnSDu2TWRKMAZq6WYO5I5xVS542+prlw8eV2OWm7yIAf8A61MJ5I71MoGwU0jrxzXWnqdKkrjY+FUHrmp42Pln1xTAuMVIAwBGONvBqZO5FRpjlOUGfSqNzln/ABq2Ogz1x/WqkhAlJPrTprUugrTbRa0/KmXPTbXpvwf40nXv+wr/AO28FeY2bZaXH92vT/hD/wAgrXv+wr/7bwVzYjc7sv8A96l6f5HotFFFcx7gUUUUAFFFFABRRRQAUUUUAFfPvi4/8XK8RLjj7TCf/JeGvoKvnzxc2PiZ4iH/AE8w/wDpPDW1D4jjx6/cMLr/AI+mOf4V4/CvU/hv/wAi5P8A9fTf+gJXlV1/x8gjuFz+VeqfDUY8Nzj/AKem/wDQErO3uR9Dxss/3n5M7GiiipPpQooooA+fYWUgA4Bzge/FTiL5Wx0I4qK2gBILEcCrkSth85IUYFRWmk9D4lK5BCgICHsfzqO4BV2AXBJwKuNBsiXBG7PNQXbcqwGdtTCpzTugastSjKcfIv5iodjLzk1cZFjXe2MnpVR5iQRgiu6nK60CN+hVuW+cerfpWsHI0GPPUHArGuWwxbqe1a5jY6PAd3BHSniErQv3OmatTRlHndnvzTkx+8I/u/4U0qVEmTk9fwpITzIexXP8q6ehdrxdv62Ov+Hxz4r076v/AOinr2qvFfh+MeLNP+r/APop69qrgq/Gz18q/hS9X+SCvKviaSPENsPW1X/0N69Vryb4pPs8Q2xJ/wCXRcf99vSpq8rF5mr0LeaOMyM4AwBzXtPgT/kTLD6y/wDo1q8NEvAHXIzXuXgP/kTLD6y/+jWratGyODK4ONd37fqjo6KKK5j3zzvxQ5/t66XHGU/9AFecagSblx0Gc16V4kj3eIblu2V/9AWvONQUh2buMiuPLWvrNRLv+rPmsbdVde7NqYB7S3A/55A5/Osjyw7tx3/OtaTK2EHqIuv1JrLQENuJ4HUVthG1GVu7/M4au4q/Km/3olXf26GnyYK7B3Apm4snXBzXQm9zImTIBXHGOtRk5YsOMjGPerEZUxLkjPNMkRRhlxz296yUvesx20Or+GxJ1+ckY/0Vv/Q0r1CvL/hvn/hILjP/AD6v/wChpXqFWfS5V/u69WFZXif/AJFPWf8Arxn/APQDWrWV4n/5FPWf+vGf/wBANVH4kejLZnzoMfNznJpCPmx75oA2qD70v8BPUivWPA2Z9GeGv+RV0f8A68of/QBWpWV4Z/5FTR/+vGD/ANAFateTL4me9H4UFFFFSUeZ6qSNWuQOhuJP/QjXHaxk3Lj3rsL5jJrd72VLiQfX5jXH60rLeMQeOT+teVlumLknvr+Z81jNVfzMzA2jnjFJgNuB49D6U1jhQKdkcn2r6Sxx2Y+Mjb1+tOUbQR2NMi25Pv1p5bHy/lUNakSWthGyOBz70dsD86QHkg9cDmlwTlRxx1oEU5uHYZpbRgl1GvbNMnbbOQfSnWwDXaD3610P4Pkd1v3evY1bg7Z5vQE1VdQwHqKs3ZxcXB6gEnFVwCQee9cdL4UzhWjuQkFQo28EnJ9KAOCaklJVVB5z3pCPlAHet09DRS0GgDK/WpnG0fhUYwHXjvmnOx8zHtUvVkvVkLdPfH9aqS8u4PHersgwoPfH9aqMo3knknitqbOqg1uWbFcGY9cgmvTfg9/yCNe/7Cp/9J4a8ysSQ0wJ7GvTfg5zo+u/9hU/+k8Nc2I3Z3Zf/vMvT/I9HooorlPbCiiigAoorL8TNdp4U1htP3fbRYzG32dfM2Hbj3zigCW31vSbvUJdPttUspr2LPmW8dwjSJjrlQcinQaxplzqEun2+pWct7DzLbRzq0if7yg5H41554TuLnTB4VtLTUrS/h1OwdvIjt40+ylYgwZSo3bd2FO8kkkc54qto/2H/hGPh39j8r+1vtyebtx5u7y5PtW7v13bs98Z7UAemf2xpn9qf2X/AGlZ/wBoY3fZPPXzcYznZnPT2om1jTLbUYtPn1KzivphmO2edVkf6KTk/hXmA+y/8INH/qv+Eh/4SX2877T9u59/9Vn/AIB7U7WPsX/CK/ED7V5X9sf2i/lbsedv2x/Zdvf+5tx3z70AetV8+eMBn4keIyf+fmH/ANJ4a92vpNSisA2n21rc3fGY7idoU9zuCOf0rwDxE97J45197+CCC7NzFvjgmMqL+4ixhiqk8Y/hH9a2w/xnHjv4LJbjJnRh0IGfyr1b4bHPh24P/T23/oCV5PK+3bu9Fx+VerfDM7vDdwfW7bH/AHwlKS9xHjZX/vC9GdnRRRWR9KFFFFAHgMUcnBBGMVowMVgIfqOtUYRJu+UDBOf0rSyrRY/OuPEyvZM+MprqVmZlR5D3GRVMFiojJ+YkE1YknLOw/gAqo52y7+56VvRi7aoiTCVDO4z9wVHLCgjOOv1qVZCYXVeo/nmozayiJiw7+tdEXZ2btYRlSpkjPrWxlv7Jt+fl21kTnGR6YrXYA6NDn0BrfEfY9TpqawVzLfncT121HDjDr22/4VMwyDj0qKNcGQ/7NdC2Li/daOu+H3PizTz2y3/op69qrxX4ff8AI2WH1b/0U9e1Vw1vjZ7OV/wpev6IK8f+LWP+EgtM/wDPsv8A6E9ewV5H8VlDeIbXPa1X/wBCenQ+NGuPdqV/M8/g5l2noM17z4D/AORMsPrL/wCjGrw2FNpr3LwH/wAiZYfWX/0Y1bYl3OHAS5sQ7dv1R0dFFFch7ZwPiIltfvE9kI/75Fec6odsrejZNek+IW269c/Rf/QRXnGqqWbB6jNeZlj/ANsq+v6s+dzBLnV+7NGRydPjB6FBj86y94EpB6GtRo2bTIR6r/WsebgNnrmu/CJPmXmzzai1VyQkGfPYYpm5hxnsc00g5BPRjQeEJrsUSCzB8xBPRQf8KnbajD0GGH5YqnCSFye9SFyu3PQfyrCcG5Bc7P4cMG8Q3BHe1f8A9DSvT68t+GoH/CQXDDvat/6GlepUNW0Ppcq/3derCsrxR/yKWs/9eM//AKLatWsnxR/yKOtf9eE//otqcPiR6L2PnPPyqD0J4p5yGAHQmmr/AKojuDRu+QHvmvWPBtqfRvhn/kVNH/68Yf8A0AVqVl+Gf+RU0f8A68Yf/QBWpXky+Jnux+FBRRRUlHmV/ldWvcdDdSZ/76Nchq8hN5NnoCAPyrs9Zwt9eH1nf/0M1xetR+XKx9a8nLGpYmTfmvxPm8bFq0fMxHYkKPwp/GAPUE0xFDD3pzLzGx6gV9Q+xg7bEsKgE+hFPdscD8KgjJMhx93FSOcLx1PSoa1MZR97UcDxg9afzwB1qPGeD1qUAkADrUMiRm3Kl529qfY5+1Rg9QRmkn+SZiafZH/SI29Wrol/D+R3t/ufkaVwf9ImJ9TmoEOcin3bhrhwOm7mmgc4FcsF7iPP6DZdxA5pFPycdac+4delMxyfQ1a2KWwuBlSPU0gBZ1Y+lOAyAPQ5pyDpRewr2Ipwflx6VQm39FPINaMv3Rjrj+tUXb5/Y/zrWk9DqwzLFivzTZ9K9M+DWRouu56/2qf/AEnhrzSx5831KmvTfg7/AMgjXv8AsKn/ANJ4a58Q9/kd+Xv/AGmXp/kej0UUVynthRRRQAUUUUAUrPRtL064muLHTbO2nnOZZIIFRpD/ALRAyfxog0fS7bUJdQt9Ns4r2b/W3McCrI/+8wGT+NXaKAKX9j6X/an9qf2bZ/2hjb9r8hfNxjGN+M9PeibR9LudRi1CfTbOW+hGI7l4FaRPoxGR+FXaKACvn7xd/wAlG8R/9fMP/pNDX0DXz74vz/wsjxIP+nmH/wBJ4a2ofGcWP/gMqXXyzA9cqvH4V6x8MGDeGJsdrph/44leUXCbpc/7I/lXrHwxXb4XlHf7SxP/AHwlObXs0eTljXtl6M7SiiisD6MKKKKAPCbSdQMHHt+VWmACsynO7nFUoGjEYJxkcVK5ZIsgls8AVyVIXnofGRloROVO6NTkqRmqchJJIH3TgVZuosRFlbDE8471THDbC3PWuyila6M2S2uIsbj+dW2uVkRlXFZ55fOeMYxVi3Qc5Pf+lFWCfvvcLsy7xMSY9a1njzo9uwNZV42ZSfTitVWP9jwEngVtXvy035/obv8AhozHGNw9qr5x5gH90f0qaRidze1QRj55Cem3/CuuK0NKastTsfh/j/hKtP8AXLf+inr2mvFfh/8A8jZp57Hd/wCinr2quGr8TPZyr+DL1f5IK8j+Koz4itP+vVf/AEJ69cryP4qnHiC1P/Tqv/ob06Pxo0zH+D80cQufMUdhzmvcPAf/ACJlh9Zf/RrV4jEpIHqa9u8B8+DLD6y/+jGrSvscGWP9+/T9UdHRRRXMe8cD4jXf4guDk/JtGPqi155qTHcWYY659q9B8SyEa5fKo5wnI/3BXnN8XO5WBPHNefl0X9aqN9/1Z83mDTqJebNfeW0mMgfMoJA9axJSXIU8Hqa1gS1hDg4GCP0rKIxKQTnHeu7CRUXL1ZwTeq9Bf4Mn+GmPnyznjqBUyjdlcdc1HIPl9hXVF6maeokZOAvanliOCP8A9VMU/JuH0pA+4HdwcYptXYWO4+GRB1ycj/n1b/0NK9Tryv4Y4/tyfHT7K3/oaV6pWEviZ9Nlf+7/ADYVk+KP+RR1r/rwn/8ARbVrVleKP+RS1n/rxn/9FtRD4keg9j5zRdseCe2aVAMj2FLxtoxgHFeqeA3c+i/DX/Iq6P8A9eUP/oArUrL8Nf8AIq6P/wBeUP8A6AK1K8mW7Peh8KCiiikUeZaq6Pq1/Hu+dZXO323muP105mXHsD+VdffgHXL/AOTkzSDd/wADNchq0RW8bLZAry8uSWKkvU+Zxbe77mOCFLAdQaMbwG9KR/lJJ9aWIE+wr6bpcxe3MKnyuxFSMeBx9Ka+EUmngjYv6VD7mT11HqmeO9SMfLUBeSBRGOM5yaSRgAT1NYt3djHdmbMS0rginWhBuIwOzUkuPMz70+1TF6noa6pfA/Q9Btezt5FmVv35C896f3HpzTJOLokDjNTsvzADgYrnbskcMtEiJ8r9KYMlunFOlBB659qQAhgcdapbDWwDt9RTohhQAckU1Ae46MKfGvGc8jrSkxMjk4UDvzWe3zE+xrRlHAHesx2KxscZIJ/GtqWqOvC67F2wIYybTnHWvTPg5/yB9d/7Cp/9J4a8x0kZkmHT5ev516h8H+NJ14f9RU/+k8Nc+I+Jr0O/AK2KmvL/ACPRqKKK5T2wooooAKKKKACiiigAooooAK+fvF4/4uP4jP8A09Q/+k0NfQNfP3i//koviQ/9PMP/AKTw1tQ+M4sw/gMqyMTIoHfAr1n4aAjw3Pk/8vTf+gJXkybgzepHFet/Dj/kXJva6b/0BKVTRJHj5X/vC9GdhRRRWR9KFFFFAHg1vCjxknGMc1Mp52kfKp4/Kq9sGKZX7rj/AOtVkyghU/A1zVL8zW58WtirPlSWY5X7wFUpAPvY+ard1L5m5B90VQZ8YP4Cu2hF2uTa70HuwUDH1NNSVs/KTg1Exwp3d6IZkwevFdPJ7pfJ7tyG6znr1Oa3Cg/sK3PHXNYtyMkelbUYL+HrfH8JIP6VhinZU/X9GbPWkY03CuB1xUMOSWBORtq1KBucf7OaqxZDSfSuyLvEqm7wZ2Xw/GPFOnD0L/8Aop69orxT4eNu8V2Hsz/+inr2uuGqrTZ7GVpqlK/d/kgryT4qAHxDag97Vf8A0N69bryT4q5/4SC1I6i1X/0N6dH40aZj/B+aOLjba2fevbPAfHguw+sv/oxq8ND7mr3LwJ/yJlh9Zf8A0Y1aV1ZHBlkbV36fqjo6KKK5j3jzzxBIB4pvVPTCZ/74FcJqJUTSDuc12XijH/CUXg7u0a/+OCuH1GNknlZsbieK48JTSxU3fc+Xxsm5teb/ADL8ny2UQXoVzWUwYsWzWoQBZIp/u4/rVO1XdOwP3e1dVCXLGTOJ7iqMMxIxtJqORcRAHqVzVmZlbeB2qvIQ/X0IrSDbdyOpAnK7B35p2wHcTTYsIQfSpWwwZl6kVvJ2ZT3Oy+GAxrc4/wCnVv8A0NK9Uryz4Zf8h24HpasP/H0r1OsJfEz6XK/93+bCsrxP/wAinrP/AF4z/wDotq1ayvE//Ip6z/14z/8AotqI/Ej0JbM+dF5P0pS2ATTVfBNC4LhR1Ar1TwLa3Po3w1z4V0f/AK8of/QBWpWX4a/5FXR/+vKH/wBAFaleTLdnvQ+FBRRRSKPMNRnVdZv0wciZzn/toa5PUz5kznP/AC0P6Zrp79Nus6kzfxXMm36bjXL6rHsuP97mvMwMYrEytufL4tya17mLIpZWHfg5p0ancmPSpHULEfYURHEifSvo3L3TBzvEjusggA8ZpQw2JnrSXakt+NRHgJ7A04q8UVGKlBItxyH1oZwwye9VlYde5pQ+5iT7UuTW5Lpa3IHOWbPTNWLZgbqMe9VuCD9anslBvYz74rSfws6ZpcjLc4C3LY6BsVOvzZqK+Ux3bD1enx53c/dxxXG9YJnBJaIjdSH68YoCnaCfXNPI+Y47UjNhBmndk3FRcqeMUKAAx96eGJRv0pnU8dzzU6iIpSMGs1xh8e5q/cHalUJCSWx1rrorQ7sKtLlmyyiyEddten/B450jXj/1FT/6Tw15ppvzeZnsma9M+D3/ACCde/7Cp/8ASeGufEPVndl/+9Sv2/yPRqKKK5T3AooooAKjnnjtreW4mbbFEhd2xnAAyTxUlVdS+zf2VefbHKWvkP5zDsm07jx7ZoAztK8WaRrMbS2j3YhWHzzNcWM8ERj4+YPIiqeDng9OelP0fxVo2vTvBp14ZJVjEux4XiLRk4DrvUblz/EuR715pqUUd1p1/ofgjVbvVtPl0S5jnh+0tcxwsqqIlRznazfOuwHp2GK3LjULTxd4h04+G5RJ9l0m8Sd0G0QGVY1jiY9m3KTt6jbQB1mm+LtC1e/+xWN+JZyGaPMTqsoU4YxuwCyAd9pNEHi7QrjV/wCy4r8NdGRoV/dOI2kXJZFkI2MwwcqCSMHjiuK0fULPVh4C0vTc/b9KIe+hCENZols8TpIP4SXZVAPXqOKqadcwyeHPDfhZCTr9lrET3NttPmRCOcvJK3orKCQ3Q7x60Aet18/+Lxn4ieJP+vqH/wBJoa911GznvrQw2+oXNhISD59ssZce37xWX9K8B8QW81l428QQzXk97Kt1FmecIHfNvEeQiqvGccAdK2ofGcOYfwH8idUy2e4UGvVPh0u3w/OP+npv/QErzGCPMqv2wAR+Feo/D8Y0Gf8A6+W/9BSudzvKx4+VL/aL+TOroooqj6YKKKKAPAUlddoUDGKdj5SQfmaoVlC8YGRTwCiFsnkcUSjY+HIZmXGwdcYqAL82T91RT3iO7GTkinOm3Ef8R5roi1FWRSdio6NJ9Caa0QXkdatyj+BBk+1QGCXqVIraM9OxrGb72IpeAv0Ga6KJR/wjkAP8RrnLjK/UiuhifPh+1I7Vx49Plp2/m/Rmv/LtvyMibARsen9apx/ec+1WZyDu56rVWE/f9MV3U17pNJe4zq/h0MeLbI9izEf9+nr26vFfh7/yNlj9X/8ART17VXJWd5s93LHelJ+f6IK8l+KYz4htf+vVf/Q3r1qvJvikceIrUetoo/8AH3pUvjQ8y/gfNHBhMvx0r3LwJ/yJlh9Zf/RjV4l9wqB1r27wJz4NsfrL/wCjGrWu7pHDljvXfp+qOiooormPePMvEqb/ABdeZ7GMj/vha4rWH3zMU7Ng12fid9vi+89jH/6AtcZqZC3Mg9a58Mn9abPlsU/3krfzP8yzJJ/oiHsY8fjkimW5Tbs/iUDd+IqRYS1tEzcArj9aZPthjZVxu4zVxad4Lucb8ys7bfu9MEVXLnLfiRT3bKYHUNUbYIB746V3QjYSRXST72exqUErwOhqE/KcngGlB+QEHI610NXOiUU9UehfDI512c/9Orf+hpXqdeVfC47tanP/AE7P/wChpXqtcE1aTPdytWw/zYVleJ/+RT1n/rxn/wDQDWrWV4n/AORT1n/rxn/9ANEfiR6EtmfOoXPPpTBlG4/Gn7sLn26UH/WexFeqeAm+p9F+GTnwro5/6cYf/QBWpWX4a48K6P8A9eUP/oArUrypbs96HwoKKKKko8s1SPOt3zkn/XSAf99muZ1lR9pyOqjBrpNSk/4m9+O4uZP/AEM1zWt5W5LduP5V5uCT+tNPzPlsTZrTuZB+57ZqKIsZgG6VMDkMPembcupH0r6JPRmMXo0xszE7s9O1RsAqpj3p0vVgfakk+6uOtWtkaw0S/roM+npUkY5pFTbk++anhQDk9TSlKyFUmkinKgfIGetS2A2XcX1qOT5Z29CaktM/a4MDjfg1U/gfobSb9nbpY0tR5vWI7mmKQVA9RS3xzdvjnk0DHHoRXFD+HH0PPkNOA2R24pJE37Se3NKQC/B6Uegqydg3YTa3GeKkwrZXuOaZEhI3MO1SMQCT0zUS3sgZXuEDQoOx/wAazyMSEds1qzAGFQD0rMc4kOfpXTRd0dWHk7NFnTF3ecPVMV6X8Hf+QRrv/YVP/pPDXnWj4MkvoFJr0b4PjGk68P8AqKn/ANJ4a5q8vfkvQ9HL3/tUvT/I9GooorA9wKQkDGT16UtYHi6CyudJiivdFu9WzOPJhtBiRJNrYcPuXy8DI3bhjOO9AG/RXnthZePdHtrm+jniuLSNd8Wj3kxuZ2A6qLgBcMewbeM969CoAKKKKACiiigAr5/8X8fETxIf+nqH/wBJoa+gK+ffGILfETxGAcf6VD/6TQ1tQ+M4sf8AwWWLRXZ0xkhwB9K9S8CKV0S4B/5+WOP+ArXm2l5MQA644Nen+DQBpVxjp9oP/oK153tL4nkPLyuH7zmOiooorrPoQooooA+ekZN3OOlSFiFLH7oqBY1JyetP8wSIUIIArWUddD4cbCxafe/TGcGp9mQZSPmJ4qG3xLKVxx71dRC0wPSNM8VFWXKx21Ejt1T94+M9eaGkibK4GR2pria4kwjYUcc1MbBQNwxu71zuUU7zepVm9jFvVAkP6VqqxTRbdQONprM1RdkmPQAVqwjzNLhTsEzmumu/3dOT7/oa/wDLsxJiQx+lQKSqscdun41bvFAdqrL3/wB3+td8HeNzWm/cOv8Ah6P+Krsf95//AEU9e1V4t8PefE+nN6l//RT17TXDV+Nns5X/AAper/JBXkvxTwPEVqx7Wq4/77evWq8m+KIB8Q22e1ov/ob0UviKzL+B80cPnkE9a9u8Cf8AImWH1l/9GNXiMa7iCe1e3eBOfBth9Zf/AEY1a19jgyv+O15fqjo6KKK5j3zyzxM2fGl+COF8r8fkFcbqfzTOe4zXY+LBt8W3zDqxjH/kNa4jUHJmkHuajDRviG0fKYjWtJf3n+ZuEhtLjK87VHT61m3Ll2/AVqRp5Vgg/hKAmqDIjLnvmscNJKUn5s56nQzkyEOeT1odeMj0xUjLknbSMGCAE9TXqJ63Ivrcqkb19qaFKrjqKmAGNuOtNAKkKecc1rc3UtLHefCz/kMz/wDXs/8A6GlerV5X8LxjW5/+vZv/AENK9Urhn8TPfyx3ofNhWV4n58J6z/14z/8Aotq1ayvE3/Iqax/14z/+gGiPxI75bM+dsAjHtSMCAD6GhTz9RSZOF5r1T5+zufRnhr/kVdH/AOvKH/0AVqVl+Gv+RV0f/ryh/wDQBWpXky3Z78PhQUUUUijyTUyBr9/k4H2mX/0M1h60MzjPTaP5VvavDnXL92xg3EmP++zXP62+F3AjPSuDDWeKTXmfKVk9V5mIWxup4YKOOT1qEn5iOueKkUAbSRkmvoGiJRVgK72GR160pTK9OhqRRgn1pygflUORm5siERHfg81MFAILEDPAoA4/z0pyp5hBb7ueAaiUu5Dk3uZ1xgSv654qWx4uYR3LUy5QC5b2p1iM39uP9oVvJ/un6HbvS+X6GhcpiWY9Tmo1PIHbFWtUULPKEGMtVMH19K5KT56aZxNEmFLHGM0LgE55OeKauM8cGj+fUVViR+4qADTZW3R8cGkJIXJ5qJj8p5pxjrcaQksu2PI54/rVFzuIPvVlyCAnbFVzhSfY1000kdtBJLzNLSCFklH+xivRvhF/yDNfx/0FT/6TwV5tpfMsn+7XpHwfOdJ14/8AUVP/AKTw1xV177fodmXf7zL0/wAj0aiiisT3QrB8VW+ovb6feabbtdSWF6tzJaLIEM6bHUqCSBkbwwBIBKit6uT8fxyy6RYqtlqF/bC+Q3dpYZ8yaLa+QSCPlDbW687QO9AEmkyapq/ikavPpV1pdjDZPbLFdunmTuzo24qjMAFCEAk5O88V1FcX4QtNDg1aV9M8K6ppM5gIae7hZFZdy/KCWPOcH8DXaUAFFFFABRRRQAV8/eLs/wDCyPEZ7faYf/SeGvoGvn7xe4HxH8Rr3NzD/wCk8NbUPjOLH/wGb+kRIkIY9SoNd/4LCrpE6r0W5I/8dWvP7T54I1B6KM/lXfeCARpFznqbpj/46teJhrvFSk2cWXv3opLudLRRRXqntBRRRQB86KrFj6VK5GzavWq+51c7RUip3BJLV1SXVnxLXUt20e4BF61cnUgLAnUg5qO2j8iMOPvHjBq2U8qMs3329a8utV/eaf0y4x0KzzrANgznGajjmuDuLY2npxU624VvMk61aimhYEDHHtUSqRivdjccYt7uxz+pKGaN/wC9zWlapnSYQfu7DmqWssPMCjp2rR09w2mqp6bK6a8n9XhLzLivdsYl+u2RwPQVTj5Zh2xWhqIxNIfYVQiyScjBxXp0XemmVTfuM7D4fn/iqdPX/af/ANFPXtFeM+AE/wCKn05/9px/5CevZq46nxM9rKv4MvV/kgryf4oIW8R2vp9lXP8A329esV5V8TQzeIbYKP8Al0X/ANDeim7SKzR2w/zRxCqXmVV6Bhn6V7X4HGPCFkB2aX/0a1eOZWMBR948V7H4G/5E+x+sv/o1qqo7nnZQ7136fqjoaKKKyPojy3xSP+Kvv8+sZH/fta4PUc+ZIfc13fjBG/4Si9fsoQ/+OLXAXxbzn443UYNXrSZ8tW/3iXq/zOk35sY19UFZkgKudvStNlAsEI+8ygj/AD+FZi53MW6lq58Nb3mu5yVehGyCPAH3QuaZKGGWP3QKtqgdZPXGKbcxHyivauuNT3kmR5lGJMxHPUninqp4Dde9PhXIx3q1HHvGCMHvV1Ktmxtts6z4aLt1y49Psz/+hpXqNeZ/DuPy9emHb7K3/oaV6ZWN+bU+lyv/AHderCsrxP8A8inrP/XjP/6LNatZXif/AJFPWf8Arxn/APQDVR+JHoS2Z8642qo9D+lRoWALn7v9Key7/wAsU0k42446V6x4K2Po7wyc+FNHP/TjD/6AK1Ky/DPHhTRx/wBOMP8A6AK1K8mXxM92PwoKKKKko8n1cs+s3y9hcS/+hGuX1iHkDtmug1RmGuahjvdy/wDoZrD1pvnQdwBn8q5MKnHE2XmfJ1neTfW5jqo59amGNqgdaiUgjPoc1Kq8H1617UjKbJQOg7kUoRevpTgwIHqaeEHBz061zuVjAjx+v8qckfm4z93tT8Ac+tPiXzSMfc7GolOyuCM27Qee474ptgn/ABMLYHqHFTakpjmOOuRUNkx/tO3/AN4V0p3ot+X6HbTvyGtqX/H3IvYNmstX5PpWlc72vJjjg5P41nOMHI71hhlaCj5I51ZyY4MD064pQemetQ7hux3xmnKa6HEbiSEkKTUTHIIPTFP6LUMnQ5pxQ4R1EC5x6YqtIrfvPXtV6JMqPpVOb5ZGFaQetjpoy99ot6Sx8yXP9z/GvS/g9/yCNe/7Cp/9J4a8z0tQHbB6qM16b8H+NJ17/sK/+28FcmI+N/I7cBb63O3b/I9GooornPbCua8aiY6Xac3g0/7Wn9omyLiX7PtbONnz437N23nburpawPF1zp9rpMT6l4in0GEzgLcwypGXba3yZdWGCMnpn5aAOa+Hmo3Wqf2M0Zvmt7PRFt757hJFRrnMe0DfjcygSZIz94c16JXn3hDUIrrxfNFpvirUPEGm/YWZ3maNo4Jd6gDciKCzDdgZ4CtnORjtdVsP7U0m7sPtM1sLmJojNAQHQEYypPQ0Ac0vxCtP7S1dJLR10zT7F71b4SZ+0KjFW2JjpuVgDnnHHGCbOn+K7r+0I7PXdJGlNcWj3duwuRMGRNu9X+UbXUMpwMjrg8Vy9/8ADnV7rUb61GtXUmny6E1jE8sVuiBstsjKxopCrlWyAOmMkcVs/wBma14o1e1uNY0s6VBaWFxbtmdJTNLMFUlNhOEAU/ewTkcDFAFrSfGVzfXWlfbdGaysdYVjp9wbgOzHYZFEibRsLICwwW6YODTLXxxNcTWd0+kGPQ768+x2199oBdnLFVZotvyozDAO4nkZAzVLS9J1+7k8L2Gp6Ytpb6ARJLdCdHW5dIWhTywDuAO8sdwGMY5qtZeH9dXTdH8Ly6aEsdM1CO4bUvPQpLDFKZIwqA7w5wgOQAOTk8UAeikgDJOK+ffF+D8R/ERGD/pMPP8A27w17xqOmWGr2htdSsre8tyQxiuIlkQkdDgjFfP/AIi0+00zx34gs7C1htbWO4i2QwRhEXMEROAOBySfxrah8Zx4/wDgs3rGVmiUKMgjBPpivRvAzFtIuif+fpsfTaleaaXLsgwe5OK9L8DEHRrjHa5Yf+OrXmwjy4mWh5eWP94tTp6KKK7D6AKKKKAPnLcRIflzVu3gKgyE544FRR7SeRWtZ2+Rl/ujnmqxNbkifFJOTsieyt9hDy/dIzg9qsNFvlaVuEXOB2pIZPOLR7SAOh9afLIGKwJ/wIivAqTnKprv+SO2MYqH9blCaOSechchDjkVaSxCrkE/lRNOlvEdoBYehqt/aEgjDFWGeMelb/vqkVyaIy/dxfvasyNUX96w67av6cwFhCv+zzWZqMheZz2bmrtk4S2g7grivWrQbw8U/wCtDNOyK2qDE78dhWYnLHHpWnqbB5pP90fzrOjAEh9hXXhv4SuXB+6zsPh+CPEun88b3/8ART17NXjnw+wfEFgcc+a//oqSvY65pu82e3lX8F+r/QK8t+JThPENuT1NooA/4G9epV5b8SkU+IbZmxxarjP++9JWvqVmn+7v1RxUa7XEjnrjg9q9m8D/APIoWX+9L/6NevHBG0k4Y8Jnoa9k8E/8ijZ/70v/AKNaqm7nnZR/Hfp+qOgoooqD6I8w8WyAeI9QXAPMY/ONa8/vGHmMPeu58Ytt8S6k2Pu+Wfr+7WuDuR8xz15p4OKVSTPla7viJX7v8zpyN2j256Pt/rWQcjc3v0raAH9lRtjOI+KyAhMbE9c7q48HL4/V/mYV1t6IRQwIcZzjlfrUoJYgMO2TUkaZIPrUM5Il2gEds10c3NKxjayCGHzNzfd2N+dXFaJosqRk8cVVikxnH3e9IARwjcZyMVFSLk9WUnY7H4enPiGYelq//oaV6XXmPw6BHiGcHk/ZG5/4GlenVpFWR9Jlf+7/ADYVleJ/+RT1n/rxn/8AQDWrWV4m/wCRU1j/AK8Z/wD0A1pH4kehLZnzoeJOvtigilPA3GkB3MPTJr1TwPM+jPDP/IqaP/14w/8AoArUrL8Nf8iro/8A15Q/+gCtSvKluz3ofCgoooqSjxrWZCuvagMYAvJefbcaxdYIkjR1755rS16QnxFqUfT/AEmXn/gZrKvQBbRpuBI5/CopQ5asZHx05fvH6mYnC/TrVmNOM5qunJPHBOKtopxnPTtXo1GKqyVFzjipvKGQd3TrToVBUHHJp7REENu4HUetefKp71jJIj2BWz2PFSpGduVGFHTFSKg3fNgjGBS7txEScD1FYyqN7FJGHekyXDZ9f6UzTV/4mcGf79TX6bLlx6c1Hp3/ACE7c5/jr1L/ALh27fodNN+5Y1rrIvJgF4yazZkOcgfhWvcKReyd8sTVKZC2WAx7VxYadkvRHM9JMobTnJGOOtNOQfb1qzjnBWmFR6cV3KZSmQg8ZBzSBST61Js7DjmnxpgnvVOSRXOkPhj43HgAdKzrn/WE981qMrLFnOPasubiRs80qLvJsvDazbLNgMmTH92vTfhB/wAgrXv+wr/7bwV5jp2csPbFenfCEY0vXx/1Ff8A23grHEfEz0cv/wB7l6f5HotFFFc57oVzPjbUNQsbLTY9N1GHTp7y/S2NzPCJI0Uq5OQSOu0AepIHeumrnfGM7jTrTTo7eymbVLtbMfbovNgTKs5ZkyN3CYAyMkjmgDn4z4sbxX/Yg8ZWkhNmbrfHpiEx4dV2uN/Gd2Qc87W9K9CrgfAd1bQDRbWy0vTLNdS0UajciytxERKGjHOD9072xnn5TzXfUAFFFFABRRRQAV8/+Lv+SieJP+vqH/0nhr6Ar598Xtj4keIwP+fmH/0nhrah8ZxZgr0GaNmiLGpYfd+YV6P4CGNFuf8Ar7b/ANASvOYIixX0wM16L4AB/sS6z3u2x/3wlefTd6rdzyss/jL5nV0UUV1H0QUUUUAeB2ltG4ViP1rUjbdH5a8A8VVhtsorDoR61bZgsYCfe6V5+Jnzy3v+h8fTVldkrusUYWP7w+WoWYW4J/5aPQkawHz3zuI5+tN8oO4uZM/LnGK5oxivT832NW2/UctsmN745Oain8phj096SQzzuqrjZ3qGaxdBuHX610QSuueWplJ6e6jF1A4nwOgGBV21I+zwD0ArPvRumKHqMGrVpuMcQ7hRmvZqR/dRJl/DQ26bLSk/3R/OqcePMY9yKmunKvLn7oXNV4MFyw6MtbU42gXCPuNnafD848QWC/8ATRz/AOQnr2OvH/AHOu2B/wCmrj/yFJXsFcMnecvU9zKv4L9X+gV5l8RIRP4it0PQWqk/99vXpteZ/ESUx6/bherWyj/x56Ur293crNP93+aOQHz3CRJ0B5r13wau3wtaKOzy/wDo1q8mjZLYZH3ia9a8HHPha0P+1L/6Maoi25eR5+Ufxn6fqjdooorQ+hPKfGBB8Uaivf8Ad/8Aota4KfLTOSeM13HjTH/CWXp9DH/6LWuGlGJZMdzmtcIrTkz5Sq/9on6v8zrwhFhEo6FAaz1QF5N3IHQVsOu3SY8fwxA1lLjc5Xvg/pXiYSo5KbXd/mKvCzj6DYXUIrEfdORVW4cswbOTk1JkSzMnuRVRwFzjoBivTpQXNfqcreliSBgoCkfK9TGNo/ukY61XgwYxGfwqVmkQc4wKua97QR2fw7bdr0x7/ZW/9DSvS68z+HLbtcmP/Tq3/oaV6ZUx0R9Nlf8Au69WFZXib/kVNY/68Zv/AEA1q1leJ/8AkUtZ/wCvGf8A9FtWkfiR6EtmfOhOWKGlQKIxgYGKbw0mfTrQmXyR0PSvWPBa0Po3w1/yKuj/APXlD/6AK1Ky/DX/ACKukf8AXlD/AOgCtSvIluz3YfCgooopFHhviAt/wkepMD/y9yj/AMeNZF2+5N/0H6Vr6+AfEmpKe93Kf/HjWddIos2bnjArSEkpI+Mn/FfqVEA2KccHAqaPcx68CmQfc2nvzT0LZBHToa1m9yJPU0IQuzI708oxI5GOrVAmByPw+tPy4I6YPWvPlF30BMkbhzkjGMCiEFTlaibJkXd93gDHrVrzFhj8tew71ErpWWtylq7mFqUpF0/XnFR6aCdUhB/vZp1+x+0Me2f6U3SyRqcLHu1eta2Hfp+h0w+C50E+VmkJ7MagkTeNycA9anyJZZlPUuWFQgsM4x715FO6XmjmluU2GDyOtNChug46GrDMf4qbww46d67VN2MyARg8Ac1IipGxOOSKf8rZA60whVYkZyRinzN6ANJMhJJ+XsDWVcKBKw961niyoZuoORWRcY88munD76HRhviJ7HIaTHXGa9P+ERzpmvn/AKiv/tvBXl9gTuk/3a9P+EH/ACCte/7Cv/tvBUYj4j08v/3qXp/kei0UUVzHuhXIePdRt7fToopbNtSt4p45NQsYoRK/2YrJhyD9wBk3BuOUIyK6+uK8VeGdf1S+1Z9JuNNS31TSl06f7WJN6YM3zLt46Td/SgCTwWmhWk01rovhnUtJDxh3lurNow4XAC72JJxngemcV2NYeip4mjn2aydINqsWE+xiTfvyMZ3cYxn9K3KACiiigAooooAK+ffF4H/Cx/EhP/PzD/6Tw19BV8++LwW+I/iMetzD/wCk8NbUPjOLH/wGa0AZmUKOMAGvRPAgxo1z/wBfTf8AoK1wlkNsWccnpXoPg1QulXCjtcn/ANBWvHoVL4hxPPy6FqikdFRRRXpHuhRRRQB4ejSjAVcrjg/hUojEOZSxyecGo4Zii7SmcdD6jFTJGc73J29cGvNqNp66fqfIRVxiiSRx5gwmM596kJaaYRqP3WOSKaHe4JjVSq9mHcVLIRABEnMjA49ayk3e1tfy8yktL9BJriOEKoxnNVHubhnIMY2Hoa0Y7NAoaUgnryKnK2+zHy8VisRSpvSPN5mvspy3djir3iTcetXLcDyIv1o1dEWZgMdcipbFN0Ck9AK+glUToRkckl7qRm3nMjj1FVY1weOwxV29jKTyexFUhlWJ65rtpO8FY2p/DY7f4fFv7d08EceY/P8A2yevYq8f8AHOvWA/6av/AOinr2CvPfxy9T28q/gv1f6BXl/xIfZr8B4z9lXH/fb16hXlXxNP/FRWw/6dF/8AQ3pxV3Yeaf7u/VHJQfPciRz1PSvY/Bef+ETs89d0v/oxq8btYpGnRiCq5FezeD+PC9rj+/L/AOjGpTa57LseflC/fP0/VG7RRRQfQnknjMAeKdSZuADEf/Ia1xE2PMY5613fjIqfE2oKQOsf/ota4O4wrsua1wrvKR8pVX+0T9X+Z2gn/wBA2Nj7gx9MCstmO3GKtSk/ZVYDjylFUHkKIARzXj4WkldrqyK027X7ES4V2OepyKq3AIIHYk1aJHmBT0x1qtM2QD6V6tL4rnN1FgbKH17VIZmAG8Drz9KiiX5SR1zxUu8EfOuPXPpVTS5hPc7L4btnXp8dPsrY/wC+0r0+vMfhuB/bs5B4Nq2P++0r06sj6fKv93XqwrK8Tf8AIqax/wBeM/8A6Aa1ayvE3/Iqax/14z/+gGrj8SPQlsz50DfN+HNKCQxAHApsgKKuOT3pwBJPvXqngNK1z6L8Nf8AIq6P/wBeUP8A6AK1Ky/DP/IqaP8A9eMP/oArUrypbs96HwoKKKKko8M1048Uan6fapf/AEI1mXD7YMHozVpa/n/hJtSwM/6VL/6Gax74HycdD1+lbQSckfGSV6rXmRK/Bx1HSrcJJIwPlPWs5D+7Uj1rStzjaMceta1lZBVjYsCMEjBODVhIDnPOD1pIouQc57gVaSN8jrhhz7V5VWrbRMUI3IhAEjCknjgGhiqJsHLD1qUxnaFJPHGfWmsqoAOGasVO71dy+Wxz96wMzKOSDzTNPB/tSH03Cpr5f9Ik2jk4NQ2Cn+04Dkj5xxXuJ/uH6foaU7crR0IIWaTHXNQSjLll5PpUrLtmkI5JORSMm8gjg+lePFpO5i9dCArJ3TjFRkMOcex+lXMMuPlyDUZG4ZK456e1axqEOJUYqQRnHamg8nHJxU5Uc7lwPWozgE7R1HWuhSRI18lQW4rFuhmZiO1bTDC5Y9ulZM4BkbB611YZ6s6MK7TJbAbjJ9K9N+D+TpOvZ6/2r/7bwV5np/Bkz/cr0z4P86Tr3/YVP/pPDU4jdnqZf/vUvT/I9GooormPcCiiigApkqu8LrHJ5bspCvjO09jg9afUVwkslrKkE3kzMhCS7d2xiODjvg84oA81vfGOoeH77VFTVLrVo7LTLi5lTUbJbZllQqE8vCIZEJJyQGA4O7kZ2P7T1rwvq9rb6xqh1WC7sLi4bMCRGGWEKxCbAMoQx+9kjA5OasXXgu51yYv4l1WO+jW1ntoorW0+zqolUK7HLuS2BxyAPSp9P8KXX9oR3mu6sNVa3tHtLdRbCEKj7d7P8x3OwVRkYHXA5oAydL1bX7STwvf6nqa3dvr5EctqIERbV3haZPLIG4gbCp3E5znjpVey8Qa62naP4ol1EPY6nqMdu2m+QgSKGWUxxlXA3lwShOSQeRgVs6T4NubG60oXuste2Ojqw0+3NuEZTsMamR9x3lUJUYC9cnJplr4Hmt5bO1fVzJodjefbLax+zgOrhiyq0u75kVjkDaDwMk4oA6XUdRg0u0NzcJcvGCFxbW0k78/7MalvxxXgeuXsWpeOteu4EnWOS6i2rPA8LjFvEOUcBh07j3r6HrwPxR/yUjxH6C6h/wDSaGtKW5wZk7Ydm/ZBRjdjAVSPyruvBeTpVySet0x/8dWvPLRmlRcHgcHPpXoPghy+kXJP/P03/oK14uFpuOJbfY5MBNOcV6nS0UUV6x7QUUUUAeJQMjRoWHzYHWp1LS5TBVenNRWkUc0SSEclccmrZJaPZFw3QE149aaUml/wx8pTi2tRpaOGP90AWUYwtPihVUFxNguM4z1FPhtUgbzWHzHrz3pkoeeQD/ln3Brl51J2i9Or/Q35WldrXoitLNNK+E3AZqJ4pwSfMOPTFW57mK3BA7elZ8uqAAcNz7V2UI1JJezhoc9TlT956mRqkhafPcDmrunTDyIlK5DA81nX7ZlZvXmrdllLWAA8CvaqwXsIxMm7QTH6kg82Yj0BrCYsJAe3StmVzJ9oB/uj+dZTD96FrfCXjDlfT/Iui9Wdz4AX/idac3/TR/8A0U9ev15D4Az/AGzYA/8APV8f9+pK9erjv78vU9zK/wCC/V/oFea/EC1E/iW3dsYW0Uc/7716VXmvxDeQa7CkRwxtV5/4G9TPmt7rsVmdvYa90clPIEcCNfuntXrHgo7vCdmfVpf/AEY1eWbEijJP3j1xXqHgY7vB1ifUy/8AoxqmlZrQ87Kv479P1R0VFFFan0J5N4ywPFmoZ/6Z/wDota4O5YNKx6V3Pjdh/wAJTfjuAg/8hrXAzDO/P3sYNb4SPvNny01fEz9X+Z2ZjY2MPzfKFXP5VmyqdzZ5xyK2UQHTVHYoD/46Ky25CgdOhrxcLUu5eplXjaxUmPyhh24qGYY+hGKm24wp6YNMKl4yx7HivVg7HKMiUrGGznFT/I65IwTxzUFu5wGH3e9WGRGBI60VPi1H1Ow+HAA1ucDoLZv/AENK9NrzL4cf8h24HpbN/wChLXptQj6bK/8Ad16sKyvE/wDyKes/9eM//oBrVrK8T/8AIp6z/wBeM/8A6AauPxI9CWzPnU8496U5CdeelAGDk00NkCvVPnt9j6M8Nf8AIq6P/wBeMP8A6AK1Ky/DX/Iq6P8A9eUP/oArUrypbs+gh8KCiiipKPD9cx/wk2o46/apc/8AfZrL1CP9xu7k1oa82zxJqZHX7XL/AOhGqN5KHt4/YDNVFNTi0fGT0qP1MpMqg4zzitC0lGxFI61DBGWGe2c05lEOQPTiuqo1P3R1JKTsa8bhTkHhaeLthhcH5j19Kx47plwuelTLebjjmvPnhHfVXJvKJqs5kPBxgfrT49gXc5BasxLti2AeRyak35BLmsJYdpW2GqmtyjeH/THK9KgtDjVIfQuKfK5EpJplsf8AiZwD/azXrJWpNeRdPr6HROuJXwO5waawycg7Wx0qzCwlEi90OBUDoGfcOGI714MJ62fQco6XQzLAcnNMLZGcfhTiJAc5GO9Rncee/wDSt4pGbEJDqQynHSq7fe2qMcdass2F+YHFRSBicJwcd62puxDIXXC/MQeKxpjiRhWzKuEy3LYrGn4mGK9DC63NsP8AEyew5aT6V6b8IMf2Vr2On9q/+28FeaaePmlx/dzXpPwe/wCQRrv/AGFP/beCpr/E/kenl3+9S9P8j0eiiiuc90KKKKACiiigAooooAKKKKACvBfFA/4uJ4m9PtMP/pPDXvVeD+Jcf8LF8Sj/AKeYf/SaGrhozz80/wB3fyLNtIyrhf4q9I8DEHRrjHT7U3/oK15hbTLC21j8x+6DXpngEY0S5/6+2/8AQErihC1Zs83K5fvUvU6qiiiuo+iCiiigDxy2s96KRnBAPWtHbHBAT3AqnAZUwAny4GD+FWRGEHmOxx1INfLYmUpS956Hz9BKMdFqNhgklY+YMIRnimX0ojURx/exxUkt15iFIQC3t6VTkC2yM8rHJOee1FGEpTUp/JBUlGMOWHzZlyW25t0hOT1waa0MYXvUdxLLM/yL8vtVeSCUDPzV9LThJpc0rHlO19Clc4MpH4VbhOLaA+1UbjJkVjwRV1Rtt4D2IxXfUXuxRc17iGlsLN9B/OqQwZWPoKtMfkl/D+dUx8sjDse9XTWjKpLRnb+A2YeI9NT+HLn8fKevYa8e8BsD4l08d9z/APop69hrhfxM9zKf4L9X+SCvNfiFKI9ehHc2q4/77evSq82+IJRdfgZ8f8ey/wDoT1E7W1NMz/3f5o5KOF3G+Xt0xXq3ggY8I2QH96X/ANGtXkrTSzyFVX931DCvWfBH/IoWX+9L/wCjWogpL4jzspt7d27fqjoaKKKs+hPIPHIz4svvTCf+gLXCTEb5D6ZFegeNMN4qvxjnCf8AoC1wFwm3zD6tmujCPVo+XdvrM793+Z20LZstq9Si/qAKoXC+UgI65wav2xAjVj08tf5CqtwQWH1rwKDtUaM6qvFGdI2GOewprArb/N160twjEhlH3ifyp8u18c8MMflXrJ6I5CK3BGAR8p61O8A25Umi3CBBGTyacbZkTAYnnNROp7+9hpaHW/Dlca1N/wBerD/x5K9Lrzb4ef8AIbmHcWzZH/Akr0mqg7o+lyv/AHdeoVleJv8AkVNY/wCvGf8A9ANatZfiX/kVdY/68pv/AEA1pH4kd8vhZ85g5Qn3ppARie1O25DAdzSBO3Ucc16x4SaR9G+Gv+RV0f8A68Yf/QBWpWX4b/5FbSP+vKH/ANAFaleRLdnuw+FBRRRSKPCfEIB8TamD/wA/Uv8A6GazLmH7q9iMVp+ISB4l1IngC8l/9DNUbw/vVC8kAfyraLakrHxlTSo2u46JljjVT1NROi+ZsOcsKk8tcD5uWx+FP2AqN3DYxUcyTuZlQWpBOOlIIWDdOtWlDK2McDvSjcWwV47VftZD5mQIrqegp2SSd3WpirHHHTrUfk/LuYkE9qXOnuSVGJ3A/wANLaf8hCP/AHqc+QxGPlHem2/F7Bju4rd6wfobQeh0Nm2ZJH9+KkZUlkJyc4qpE5R3wOcnA9auJtfvhh2FeBVjyy5kXB3VhhhPakEL9xzUvllQMEmjLDtz1qPaPoPlRAYpMHIHU1G8bHsMYq2GYHJXimMDI3TAx2rSNR31JcEZssajOc5NYlwoV2x610sqxxLgtknpmufuVxK4/wBo162Cqc1wpe7IfYZ/eY64r0r4Qf8AIK17/sK/+28Feb6ZyZgeyV6V8JQBp3iADp/av/tvBVV377R6WXf71L0/yPQ6KKKxPeCiiigAooooAKKKKACiiigArwbxN/yUTxN/18w/+k0Ne814L4myfiP4kA/5+of/AEmhq6e55+af7u/kTxxpkM+MgDk16N8PcnQrnPX7W3/oCV5ykbyN1wvGa9I+H5zodzxj/S2/9BSuSm/f3PMyz+OvmdXRRRXQfRhRRRQB5BBcERKpXJAH8qnG+Xg5C1Wt2jMSFh820A5qcStKvloCpPevmasbSbirHztOV0k2OZ4oVygUsOOKzbqKS6kWRyUQdVPetRLVF+aTB7n61n3crSt5cQI5Iz2rTCSXP7mr7snEJ8vvfcUJZoYjtXb+FVXvVZSMD86naxUMC+CaiktYVBwB+de7T9j5s8536mVPhnznvmrYybW3PT5elUpuJSo79PbFXz/x5QeuBXoVNFEufwIqn7s3PYcVWPIB7irPG2T1OKrY5P1rWHU0p9Ttfh/z4jsG7l3/APRT17HXjfw/H/FSWHoHf/0U9eyVwS+Jnt5V/Bfq/wBArzX4gxiTxBACcAWqn/x569KrzT4hxyPr8Gxsf6KoP/fb1E9t7FZn/u/zRyZlVPljUEj0r1fwT/yKNl/vS/8Aoxq8tWCOHLHG6vUvBJ3eEbI+rS/+jGqKTTvynn5Smq7v2/VHQUUUVqfQnkXjT5fF1+2eB5ef+/a1wd1l/MTOMjrXeeNBnxZqRP3dqf8Aota4ecAuSOnNdGF+Jny0nbEzfm/zOogmH2ZBnoi/j0qSSIMCQeSMistGZAieqg5rSiYnapPzKuM149al7N80TCM+bRlMAuMkYKg8VHNGQq8YPWrpiIuRzkMOlLNCJJMjptIrRV0muxHI7FaCIOjHIDdAaeVkRQOWwOtWbW0LQZBAbPBqR4JUU5ye/Ssp4iPO1ctUpct7G/8AD9SNcmYjGbU/+hLXo9cD4HQrq8jYxutm49PmWu+rrw8+eFz6LLo8tBIKy/Ev/Iq6x/14zf8AoBrUrL8S/wDIq6x/15Tf+gGuiPxI7J/Cz53ACgUz1XPOM0EnA570054Pc8V6qR8+kfRvhr/kVdH/AOvKH/0AVqVl+Gv+RV0f/ryh/wDQBWpXky3Z9BD4UFFFFIo8G8Rj/ipNV/6+pT/4+ao3B2bGP91efwrS8QgHxHqn/X3L/wChms26wwUMOOP5VvF3aPjKn8RrzHRneevPUVOSHw2cH0qtaqzhnHG01MBuwV45rOaXMZtWdgQtuAwT71KH5IKdOhpqK4IBySOc1JuByNhrKTTYIYXJUHbg9TUYDSAMSVz2p5YlcgEHqajbzHAYHA9KqKEV3YiQrt4x1ptoB9rjJ/vcU5n/AHhXB+tNtDm7jH+1mur7D9DWOxohyJX553HFXAwPfaapou6QnPINWsKTyMepNeXVS0FEk+YMD5mcDpTTI2RyeDn/AOtQ44O1gDiofmAALZPrWUYplN2JjM20nBPtSeYxbH3cCo1DgHLZ5pQrbyS3HpVckULmYPtxhiGPWsG9OJGx3JrdmCqucZIHasG6YM7HHPau/Bb3Kh8ZJpbAtP8A7uP516X8JP8AkHa//wBhX/23grzPTEJWUr2AY16V8Ic/2Xr+f+gr/wC28FaV177Z6mX/AO9S9P8AI9FooorE90KKKKACorm3iu7Wa2mUtFMjRuAxUlSMHkcjj0qWoby0hv7KezuFLQTxtFIqsVJVhggEEEcHqDmgDhdDsbKPxLeal4VsY7bSLSzlt5WgG2K+udykbVHDbNrAv3LkZODWTo1la2GleA9esiTq+qzxLfXG4l7sSwO8ok/vYYZGfu7eMV3ejeEtJ0B4zpwvo1jj8tIpNRuJY1X0CO5UdPTiiw8I6Fpmp/2haWAjuFLlMyuyRF/vGNCSqZ77QM0Aefw2sK+FrDxYAf8AhI5dcRHuNx8x9155LQH/AGBGSu3oNuetGr2sM/h3xh4nlBOu6dqMy2dzuO+DyioijT0VhjK/xbznOa9AXwjoSax/aq2IF35pnH71/LEpGDII87A/+1jPvRdeEdCvNW/tO4sQ9yXSRv3rhJHTG1njB2MwwMEgkYHpQBoajJqEVmW0y1tri5yMR3Nw0KY7/MqOf0rwnV3vJPHPiB9Qgggu/tUW+OCYyov+jxYwxVSeMfwj+tfQNeDeJf8Ako3iUf8ATzD/AOk8NXDc8/NP92fyNSO1lkVBFjB5bPpXoPgtBHpNyo6C5b/0Fa4+0dIY97dWVR+ldj4LVl0icuMO1yxP12rXh4KtOeIaey/H/hjnwVOMZxa3dzo6KKK9k9gKKKKAPIoLaKRUkI5KgdauBljjAUHPQVHFZb1UgnBAPX2rRigjhj3Enj1r4vF4mF9W35HkYahJrRW8yjFbXErOJipQ9MVWuxFYxtwePxrRa83nbFgnPPHaoLiwDkTy5HtniijXkqi9ton0QVaUXD91q+5y8rXE8u5cbMcZFQNbXOMtit25kiicoOv0qg17G+VHUdeK+oo4icopwhoeLOmouzepz8w+bnrkirxBNtAR0K1WvwBMxHRuavQoBp9sP9mvTqz92L/rYUtYJmbJ8qSH6fzqFSC4HY1Znjwsw+n86qpnzVHYV0wd4mtPWL/rodv4BYf8JFYL38yT/wBFvXsNeQeAUxr9g/q7/wDop69frz21zSt3Payn+C/X/IK838fzeXr8IGcm1XH/AH29ekV5348dE12Et/z7Lj/vp6yrfBtc0zL+B8zkUt5JiTKRtzxXqfgtQvhOzUdA0v8A6MavMBNJKAIgDXqPg4Y8LWg/2pf/AEY1Z0ZSu1L7jz8qS9s7dv1Ru0UUV0nvnkPjfP8AwlGpDsfL/wDRa1wjcZX0Fd/40yfFN+AOyZ/79rXCXClXc9sGujCS1kj5ab/2ia83+Z0E0DeXG46BVqxCu59y9SQf0qzagPCM9kX+QqMMMbl65xXhus5JwtsZezStII/lk8x+ocqMelSvH5IIH0/PpS5HlnHUkmpFJeElwN5xXNObvc2jFbE1tBIYI3GOOv51YL9Mg8deKdaFktI+OMHP51cjkibHvx0rxMRiJc7bV0m9j1aNBOKs7aIv+FVxrUhHQ27f+hLXZ1yXhwAa2wHT7O3/AKEtdbX02US5sJF+p3UY8qa8wrL8S/8AIq6x/wBeU3/oBrUrK8Tf8iprH/XjN/6Aa9SPxIuXws+dj83BpZMNj2pAoP1oPJ29zXqHz3U+ivDX/Iq6R/15Q/8AoArUrL8Nf8itpH/XlD/6AK1K8qW7PoYfCgooopFHhfiHjxDqZPT7XJ/6Gaz7sDeM+laWvqT4h1Ijn/TZQf8Avs1QvYt2B7A1cZJSR8XU+N+oW7/ucr2PNSRxlnyv1qGL9zGM9zk1eSJgwkTkVlVlyt26kpXYsbkHY3XvSmVCSMHinlxnnrUZliJ2556dK5Uru9i9upAZVKblB55qJt7j5elWNyk4UfWmMJCuVUV0xduhmyk7AOVIqO1X/T4x/tZpZDicqaW3XGoQkf3q7HpB+hcNDVihJklK/eBO3PrU7hXyjg5xzin28TbpGxzklfepGKg4fg4rxJ1byNIwsiqY1B464xTGj6e3P41MVRj1NHkqFxk1anbclxGiNsfKRmnGHJ+bpipEgAHBPXNSCFQSxJrOVZLqWoFSQIgwAeBXOX2RM5XpniuolKAECudvhh29Aea9LL5asIvlqIm0FS0lwO23/GvRPhEQdM18j/oK/wDtvBXn/hz95LLj+4P616B8Iww0zXwww39qc/8AgPBV1X+/mvQ9TAL/AGmT8v8AI9EoooqT2wooooAKKKKACiiigAooooAK8I8SLn4jeJT6XUP/AKTQ17vXg3iUn/hY/iVembmHn/t3hqoHn5p/uz+RuWEgkiIk42mu78GyCXSZ2H/Pwf8A0Fa87jhcspRyo7j1r0LwVt/sifbjH2lun+6teNhYRWJcl1/A5cvnJzUWdJRRRXrntBRRRQB5pDFKqqFUldo5qwtqyoNztjqc0QTFYkBQnAH8qSWWWRSoVlz3r86nOrOdtEc8IUowvqyN5be2BdmUAcZNNupjdQhYRke1Qm03ACZ96gcg9zU6SQQ4Ubc4rocIRanC8pL7jBTnJOMrRi/vMiXT8yl2Y5PakaxhAJGM+uKdO9zNLlY3AHP1qJ7e6LbgzgemK92EqllzTSPIkoXfLG5gapCBKUzyKtxJm2th229ar6kjmbODnvWhZR+ZbQDvtzj1r26tTloQbf8AVjjSvojFnzunXsMc/jVJRh/pW1qFqY5plx1UN096xtu1zzXoYaopwuv60LhpeLO38A5Ou6cewd//AEU9ev15L4EXbrGm8dXf/wBFPXrVcSd5S9We7lX8F+v6IK8/8bwLLr0LMeBbLx/wJ69Arzrx8JG1yJY2K/6MvI/3nrPEJuFk7GmY2VDVdTB82CKQKNuc4r0jwj/yLNr/AL8v/oxq8vigG9d7BjnvXp3g4k+FrQkYJaXj/to1c+FjGM3Z30OHLZN1Xfs/zRu0UUV3HuHlHjMj/hJdQxywCcf9s1rgpzhmHXiu58anb4o1Fs44Qf8AkNa4VwQ5B5I71rhFaUmfK1f94n6v8zpeYlLbiN0a8fgKiUt1JOM7v8/nUsx3JF7qP5VGVJt2wef/AK9ebDa76nNLcnE+5ip4wKljlYuQeOoHvVMuApkxztBIqRpcOrAcVnKknokWptHRWdwnkgEjBqZghyVbHpiqFmY/JAOM9qfLG+SVlwMcCvmqlCPtnZ2PbhWfsldXOh8Ksf7adSc4t25/4EtdnXB+C3Y65Kj5JW1PzHv8y13lfU5fDkoKJ14SXNTv5hWV4m/5FTWP+vGf/wBANatZfiXnwrq//XlN/wCgGu+PxI6JfCz5zVyCvHrmgE5Ukc05U2nB/OhcH5sZ5xXqngNrofRXhv8A5FbSP+vKH/0AVqVmeG/+RW0j/ryh/wDQBWnXky3Z78PhQUUUUijw7XGCeIdTBPW9lx/30ap3o4TB+8oqx4hH/FQ6n3P2uUj2+Y1UkbfEhzk7QMfhTcbSUj4ur8cvUfAUMmXxgkqAau4eKTCruTGc1StUUxHd1zkE+tXElcDaVLds1zV/i0CA4yISOBnvUbeWT2zUhC5HydacI0P8I9qwUlEuzZCCobAAzxmkcvj5I8irO1AchQSeDSEleFiOKFU12DlMG4H+ktkVJbJi7hIPfNTzxhrhvlqOI4u41HrxXp896dl2Mr62Okt4v3OQeaik/wBpQDiprIMYUTd8y8E+tSSQsGIKFs98V8x7TlqtSZ6XJeCaM8hd3XHFAVcZ3cdBV3yRnmOgQLj7grb6zGxl7FlNUB4DnrUyxgMTuJyOlWFtxn7uPepViQDJANZVMSuhpCgzPkjJ6JketcxqG4SOoXOSa7SThcCPrxXK6rAyXjDHHJr1Mpr802mY1YckkyXwqgW5uB6R/wCNd78KP+PHxDn/AKCv/tvBXB+H28u4uSBjK/413nwnObDxAT/0Ff8A23grqmn9aqPyR6OXy5q3yf6HoNFFFaHtBRRRQAUUU2SRYo2kc4RAWY+gFADqKydJ8TaRrkN3NYXReO0bZOZYni8s7Q3O8DjaQc9MGmaP4r0XXrhrfTrwyyiPzQrwvHvjzjem9RvXP8S5HI5oA2aKxNP8XaFquo/YLK/Ek7b/AC8xOqS7ThvLcgLJjvtJxRF4u0KbWP7KS/BuvNaAfunEbSqMmMSY2Fxg5UHPB4oA268F8Tnb8RfEh9LqH/0nhr3qvBPFGD8RvEgPe6h/9J4a0p7nn5n/ALu/kau91bap6gfyrv8AwKhTRZwTk/aW/wDQVrgBOsL/ADZ+6P5V3ngGXztEuW/6emH/AI6teVhYy5720scOAa9ute51VFFFeie8FFFFAHAWzxtBGSOdo70y5ulRDhSfpRb2sbxo5zkqO/tTpxHEpPPFfmlqftur12Jbqex6IzitxKCA2M88ipEskGGlwWHele9QD5Qc/SqF1LfSyJ5JTyyTuzXr04Vqnuq0F9x5c5Uoau8ma++FIuCPzqo13Fzjn8arw2R4LdfrUq6XDFGQueTk81mqWGpt80m2U6leovdikYeqeWH3AffGafpxxHAT2Xiq2rqsYJbP7snFS2kvlrAD0K4r6dwvhUlr/wAMeLe1Vtj9UZHlnPfywK5lhulwK1NQkIkm2nlqyEYCQE/SvUwFL2dK39bCu5SlI9E8ER4v9Lf1dv8A0U9eo15l4LH+laUexc/+inr02uLDScue/wDMz6HL1aj/AF2QV5546LjX4QoPNqvOP9pq9DrifF7omsRluv2df/Qmoxk+Sk3a5WOjzUrXscfHZzmUMXHrXpvhVdvh23Udnl/9GNXnb6gvm4UHjrxXonhXP/CO2+eu+X/0Y1c2ClVlUvUVtDjy+NNVWoO+n+Rs0UUV6Z7B5J42Xf4p1EHoBGR/37WuHm/1wru/GCt/wlWpHjB8sf8AkNa4WcfvD7E1rhX70kfKVf8AeJ+r/M252KrAQeCoyPWohcnIABAOateTujgB/hX+YqF4huI/iHFcUJQaszlknuMSQPkf7P51OTggD7oqqEEUg/2eDUkBZjj86ucVuthJm/aRq9uh4DDpTpUmByJBjFRRK4tgyY3AUhluNg3bc96+ecZOo2mt+p6iklBJo6HwUW/t2TcckWzZ/wC+krvq4LwQ2/WZWP3vs7A/99LXe17uE/hI9XAfwQrL8S/8itq//XlN/wCgGtSsvxL/AMirrH/XlN/6Aa6o7o65/Cz533ZOMc4pUULwOAKRRgZ7kUo5xjvXqHzb8j6I8N/8itpH/XlD/wCgCtOszw3/AMitpH/XlD/6AK068uW7PpIfCgooopFHhmvA/wDCSamexupR/wCPms05jJB/h5/OtPXHx4l1NT0+1S/+hmqN8uQMdCBn8qtP3lF9T4uovfl6lu0iWWHPGQcirUcbKcEE/SqmnK0sCbOtbVs+zCSD5j6CvJxlSVOTS18jejTUrX0IVjXaCUPPFSfZ48j5elacccbDoeacbeLHfj3rwpZhZ21R6ccHdX0MoRRqc7etLxnaENX2hjVu/NKHjTjBqvrd1dJsX1e2jaRzd2EjklBHI7/hWTC3+nxf71auqsry3A7kjH5CsqMAXcIHrX1WD1o3e7X6HkTsptLzOnsyS2VOPmzWuHBABUk+tYdir7Dsx5gHetkXAjjZpM/KMnAr5XMYP2nu6nr4Kfu66B8rYOw8nFOVE/u+1SrcRuq4zyMjimmSPjr8xxXm809rNHdyx3umCrGQQV4qOQRKOBSu0bqQc46VWkdFz14FaUqcpPqZ1JpLoJJOq4wp5rl9YuQ9y/B4roJphs47jPSuTvmaeRyOzEGvp8noJTcmjyMVU5mk3oWtCcM85/2K734Uc2PiHH/QV/8AbeCvPNGbylnz/cxXoPwk/wCQbr//AGFf/beCvVnG1eb9Dry1r27S7P8ANHodFFFB7oUUUUAFIzKilmICqMknsKWigDyU6zpviIfEfTdF1S0ur7UY/wDQ4oJlZpwLKNTtweeQV+taZ1G08Va/og8OyhzZabdC4ZAR9m8xEVI3/utuGdvUbCa9HooA8s0a/tNTs/AOjWAI1PSpI2voApD2ax20kcgk/u5Zgoz97Oar2dzC/hnRfCik/wDCQ22tRvNbbT5kQS6MrzH/AGCgJDdDuA7163RQBU1GPUJbQrplzbW1zkYkuYGmTHcbQ6H9a8I1hLyLxx4gGozwT3S3UW+SCExI3+jxYwpZiOMfxH+lfQVeBeK+fiN4jGf+XqH/ANJoa0pq7PPzNXw7NZY1OZGPVR/Ku8+H+f7DucjH+ltj6bVrirW3NxbxsSRhV/lXeeC1C6RcKOguW/8AQVryMLVTquF9TjwFNqqpeTOkooor0z3AooooA86ghby1wT0HepY7JQ5YsxzzzVaGSZVAEZIAGD+FTjzWOORnvX5xV9opP3kjGk6bS91sfKkKZ/wrLubgAqkYBc5wK1YrElstISMd6s/Y7dMFgpI7kUqWJpUXd3kVUwtWstLROfBu9ufLGfrTHkvwh/dL+dbc0tujbcrmqc95bhcblyegruo4uU2rUvwZx1cLGCd6hyGpGSRvnGG7ipbZNwhbuF4FTaid8jFEB+lFpA0sMPVTtBOO1fWe1X1eN9P+GPn+Vuo0tTHutweX1zn8aoDB+tbl5ZsWlIBxxzjrWMIykxz716uFqRlDQmPu3TPR/BgHn6Sf+mjf+inr0qvNvBvE+kD/AGz/AOinr0mvLwW0/wDEz6fA/wAFf10QVxXi2JX1qIsTxbr/AOhNXa1wnjZJH1aMISP9HXJH+81GPV6LV7DxjtTva5lgQpjgZ+ldv4X/AORet/8Afl/9GNXnVtbOJF3Ss3PevR/DQxoUI/25f/RjVwZZCMK8oqV9P1MMHJyndq2n+RrUUUV7h6J5F42mK+Kr9f8Arn/6LWuJlAZi2e1dp4058X6llegjI/79rXGsMHkdulb4dJN2Pkqz/wBon6v8zohgMpB58ocfhUcrYCA/eY0hcgxAjGYxTCWkkZWXG0gKfwrzIw1uzFsdPEWWXA9xRAuUxj5uR+NOkLKoTkknBqS2A805GBnP4mhyapgknI0kSRYF2jPFMJmA5QVbjWXapWMspGc05g+DmOvC9vaTukz0/ZXXU1fBBJ1qbj/l3b/0Ja76uI8HoF1iU9CYG4/4EtdvXvYGSlRuj1cFFxpWYVleJ/8AkU9Z/wCvGf8A9ANatZfiX/kVdY/68pv/AEA12x+JHTL4WfO6/Kgz24pyJhQaVxncB6Uo6Ae1elc+actD6F8Of8ivpP8A15Q/+gCtOszw5/yK+k/9eUP/AKAK068yW7PpKfwIKKKKRZ4VrqZ8Tao3/T1L/wChmoL5Q0EY9VH8qs67/wAjLqY9bqb/ANCaqt2rbYeuAqj9KG/3kT4yp8cvUdphkQo6jIPWumt5I5CCcZPtXM6ezoBhcqe/pXV2AiMKk4y3SvDzppe819x24BOUrJllUQjqeaPIXA+Y8GpPLU9G60qwjA+cnFfKur15j3lT6WIjEgYkk/NUcnloh5q4IV5y1RTxxhDkinTrJySbYqlFqLaSON1IL57cnr/Ss62AFzGc/wAVbd/EjyyDIHNZfkeXcxAGv0DC1E6PL5fofK1FabOhs4+crnINaqxS7sFBsx1qvp0Adc5wa0A7ISu3IHevjcfiG6rjHVo9/B0EqalLqRYI6gUHpnAqXeTj5KTccD5O9cHO+x2cq7kLDg56VCyMegzV3OQcoMUmfRBWkKzj0InRUupj3Ucuw4QVy94WjLllAJPNdtcLI8ZxHXKapbOxwVIyc19Rk2IUnaVjxMdS5ZJ9CnpB3vdZ7Jn+degfCMY03Xx/1Ff/AG3grhNGh2SXOf4kx/Ou++FH/Hj4g/7Cn/ttBXtTknWkl5fkdOWWddtdv8j0Giiig90KKKKACiiigAooooAKKKKACvBfEib/AImeIl9bmL/0mhr3qvCdfwPid4hP/T1F/wCk0NVF2u12ZwZl/u7OmtLR/ssPlnHy4bArs/CsBt9NmQ9TOT/46tc1pE4jtBv5yBiut0CQS2czAf8ALYj9BXyOV16sswlB7anRQo0lTjNPU1aKKK+sNwooooA83iugsagoegoN82eImqNHjKKSOwqQTRqM4NfASpQ5m+S5xRqTslz2JlupTgBWFRyxXc5XbOyDuMdad9rRRnaagbWVRwvlyflWVOjWvelT/X8zWdWla1WbLK6czEGRwx9SKc2kROMlVJHPSoP7SdugcfhSDV3R9pVzn0FHs8c3oxqeCWjRk6hZ/ZpSBzk44FO0qHesKk87ealvrrz5cbHzjPIqbTNkcpcgjYMCvZnWqrB+/wDFY8mFOm8V7vw3KWroIl2ovX0rlLkBJG9a6nULlXlHXqc1zN0heVmPUZH4V7uT80aaUzgxbi6ra2O68GPm70lf+mjf+inr0yvMPBahb7Scd3b/ANFPXp9aYVJKdv5me9l7bo6/1ogrifGBYaqoUEk269P95q7auQ8UuE1aPIJJgXp/vNWOaS5cO2lfVG+IV4WvY5aG3uGuIm3lVDDIx1r0Pw4MaHCP9uX/ANGNXFNcbcAK2T7V2vhs50KE+ry/+jGrzspqTqV3KStp+phhYQhK0Xf+katFFFfQnceR+Nvl8VXzAZyYwf8Av2tcZIdzE47V2XjeTy/FWoEgkYj/APQFrjWOccHpWuHWrZ8jiP48/V/mdFeR7Le3lAycKCB6UwhWfIxkGn3c37iCNeG2CqSytuPP1ry6MJSp6+ZFRpS0J58sGdT0PFWII/mBzxkVRR8AD3qzvkEe2NsHgCqqQfLyoUJK92dXaERwhWGakYw9wtJbFHtoxJ97bg81I1vA4IIBz718FUlFVW5XWvQ+vpxk6aUbGn4bVBqrFAOYGzj/AHlrq65Tw4oTWHC9PIb/ANCWurr7TJf9zjr3/MI9QrM8SDPhbVx/05Tf+gGtOszxF/yLGrf9ec3/AKAa9eO6Cp8DPntVwQDye9ITh8dqlRcMc9TTAvzEe9eimfLKWrPoPw5/yLGk/wDXlD/6AK06zPDv/IsaT/15w/8AoArTrzpbn1NP4EFFFFIs8L13/katRHrdS/8AoZpl3gxxqOvH8qf4iGPEeoFeG+2S/wDoZpnls0gY+mfwqalrxkfGVPjkvMs2sJjgUbCQR6Vt2qqsCDIB7U3T0RrKLcOqg1ZaBMgj+EYHNfK43FqpNwlpqevhsO4RU1roKu7P+spwEnH73vTPLI7ik2yD+IV57SezX3HWm1umWAHycycY4p4jDDDMDVUCTP3hipUDd2FYTptapm0Jp6NGRfWym4cDA5yayJIGS8jbORu210k1uJJWc4yazb21eMK4IwGzX0eBxasoN62seLisO03JLS5t20W2AFThh1pzSsM5BNMj3mJWQgZXmhmcd6+eceabb1PZUuWCSHCc5HyGg3PC/uzycfSoi7DrSbyapUYvoT7WXcm+0ZB+Q8HFHn4YjYemah3mjce1P2EewvbS7k/nF1+4ea57VGIlK7TxW6jsO9ZeoxmR2eu7LOWnX20OXHXnS31MfSlLzydvlya7b4U/8eXiD/sK/wDttBXIWamK4m56xnFdj8LcfZfEOP8AoKD/ANJoK+ohLmrS7WRllK9/5P8ANHfUUUV0HvBRRRQAUUUUAFFFFABRRXN6v40sdIvbq3ay1C6WyjWW+mtYg6WqMMgvlgTwN2FDEDnFAHSV4N4mUj4keI3HX7VCP/JeGvd45EmiSSNg6OAyspyCD0IrwjxPIq/ETxID/wA/UJ/8loauF76Hn5nf6u7eR2VgEeziBzwK7Dw8ALCXb080/wAhXlg1AwoqhuwxXo3giUzaEzk5/fH/ANBWvm8Dl9Sji/bN6O/4iweMhUtSS1SOkooor6E9EKKKKAPMvs0bRqTnkDvT0t4x6/nXPT6sYpCmRwBiohq7t0PNfMf2VipRvzaHgfXqCfwnWiOMDvSfuVNcg+ruU4bn61VbUXMmd5/M0oZBXl8Uy3msF8MDvBJEKessRP8A9auDGpOEJ3nA9zUy6k21TuPI9aifDk/5io5zb7J2zQxP81USiec+P84rCh1kpGFLckE9aYNTKt5jH1/KopZPiINpv0CrmVGdmo2HXKAuzt97ODWZMoNwy9sc066vlM5AbhjmqEs4DFyeCMGvqcLQnFK/Y8ab5nojv/COP7Q0rH/PV/8A0U9el15R4IuPM1rT4+ySN/6Kkr1eow9N01JPuz6PLZKVG/8AWyCua16NH1Rd3/PFf/Qmrpa4Txtem01eBQcboR/Nq5c2oTr4Z04bto66taNGPPNaFgRwqua3/DvGiQ4/vy/+jGry99YbYx3fd9zXpvhg7vD9ufVpP/RjV52TZfVwlWTqPdf5GVLHU8TO0Fay/wAjXooor6M6jyzxq6DxHdg9fk/9AWuUl2AfWtbx6/8AxWl4ue8f/otawZJlyRnocUQoOLuuup8hi7utP1f5l6ZjKIph93GBTRyhI+9mrE0kZsAFI+XrWe04VlweCKxpJzjZLYxluTxkkEN0PAqe1YGXcfunNVIrhTEAcZDVDDeKhQZ5Lce9XKjKakrBG6Z3xtY5RFLk5Ucc+2KkFuFHBP51zcesuiBWwDjIpza+VIVsAkZHWvkZZTjG7J3R70cww29tTt/DQ26w4/6d2/8AQlrrq8/8DX7XutT7gBtt2xj/AHlr0Cvo8uozo4dU57q534arGrDnjsFZviH/AJFnVf8Arzm/9ANaVZviHjw1qv8A15zf+gGu9bmtT4H6HghjJz65ppjLS8fdGQ1WIpU6568imiQKRnHJrp5pdj41SaPd/D3HhrSh/wBOcP8A6AK0qzvD/wDyLel/9ecX/oArRrme59lS+BegUUUUizxHXYg3iDUGP/P3N/6G1PYKscS+qVX8QzhfEOppnpdy/wDoZqpPdIxh2vyiYIrKrRnNx+Z8dJ2nI7m0s4zYwrz8qjvSy2oAGM8D1rAs9cJgUZG4DBHuKZP4kZU3qFIx718k8qxzrO3c9xY/C+zSa1NvymyRS+S1c2niKYoGKJk0L4lcnG1M/jXS8oxvZGCxuH8zo/JalETCucfxHKOiJjHvUS+JZnGVRCPxprJ8a1sg+vUN0mdQImbk9aiubdmQjtWIniKTdjamKnOueaCoC5qFluNpzTsDxmGlFrU2VSRbVPLxu2d/WiQTc4x04+tY39uNAQkgUAnAp/8AbikEnbwcVl/Z2KTvypl/XKDVrtGlifd0Xbt/WkAlwM4z3rNOtooJJGBQ2tIpAJGScVawOJ/kI+tUf5maQ87Bztzn9KD52G2gdPl+tZv9tIccjmg60gOMrmn9RxH8gfWqP8zNMefx937v61BPBJLCd+N3tVBtfjXdkj5SAfxpH1nLFfl4HNXTwOKjJNRsTPEUXGzbI1hK3J3ehrr/AIaRiKPxGg6DVB/6SwVwL6qHnYjGApzXcfCqb7RZeIZfXVP5W0Ar3qNGrCalPaxWVyj7WSXb/I9AooorrPcCiiigAooooAKKKKACvPdUN9pF/wCL7ddIv71taVZLF7aAyIzmBYSjsOI8FM5bAw1ehUUAY1toEbeFtP0e8muP9Gt4ome2uZIGLIoH30Ktjj1rFl+FnhOe5luZra/knlIMkj6pdFmIAAyfMyeABz6CuzooE0nozjT8LfCh621+cf8AUVuv/jlWrf4f6Fax+XbyaxCmc7Y9au1GfoJa6iigShFapHOf8IRpX/P1rn/g8vP/AI7R/wAIRpX/AD9a5/4PLz/47XR0UFHOf8IRpX/P1rn/AIPLz/47R/whGlf8/Wuf+Dy8/wDjtdHRQBxjfCvwmxy1rfk+p1S6/wDjlA+FfhIdLS+/8Gl1/wDHK7Oigj2cOyOL/wCFVeEf+fO+/wDBpc//AByj/hVPhH/nzvf/AAZ3P/xyur1C+g0zTbq/um229rC80rYzhVBJP5CsDSfFV7danY2eq6MdO/tGBp7JhciXcFAJRwFGx9rA4G4cHnindh7OHZHP6B8LdCk06U6pZX4n+2XKqG1K4X90J3EXAk/55hOe/fmtX/hVXhIY/wBEvuP+opdf/HKn8S+MpNC16z0qKDTN1zbPP52o6l9kQbWVdoPlvuY7s446GuntnlktYXnSNJmRS6RvvVWxyA2BuGe+Bn0FK4ezh2RyP/Cq/CX/AD633p/yFLr/AOOUH4WeEyMG1v8AH/YUuv8A45XZ0UB7OHZHFn4U+EScmzvSf+wnc/8AxykPwo8IEYNlekf9hO5/+OV2tFO7D2cOyOQg+Gfhm1kWS3i1KF1OQ0erXSkcY6iT0J/Orn/CEaV/z9a5/wCDy8/+O10dFIpRS0Rzn/CEaV/z9a5/4PLz/wCO1WuPhx4du3D3I1WZ1GA0msXbED8ZK6yoLyW4hs5ZLW3FzOq5SEyBN59Nx6UA0nozgfEfww0SPwxqr6TaaidRW0la1C6ncsTLsOzgyYPOOtb0HgPRoIVjjm1qNR/Cmt3gAzyeBL61d8Ma5Nr+lzXNzZLZzw3c9rJCs3mgNFIUJDbVyCV9Koav4rvLTUb+10vRjqK6ZAs965uREVDAsEjBU732jOCVHI55oEoRjsib/hCNK/5+tc/8Hl5/8do/4QjSv+frXP8AweXn/wAdrbsb2DUtPtr62bfb3MSzRN6qwBB/I1YoKOPn+GHhe6nM9xBqM0zdZJNWumY9upkqP/hVHhA/8ud76/8AITuf/jldpRTuyPZwfQ43/hVvhTGPs1/j0/tW6/8AjlN/4VT4RP8Ay533/g0uf/jldpRSD2cOyOL/AOFVeER/y533/g0uf/jlJ/wqfwfkH7Fe8dP+Jnc8f+RK7Windh7OHZHGn4WeEz1tb/8A8Gl1/wDHKyvEHwv0RNPhbTLPUGuftlsrY1K4Y+SZ0EvWT/nnv+nbmu71Oe+trIyadYpe3OQFiecRDHclsHGPoareGtZ/4SLwzp2sfZ/s/wBtgWbyt+/ZkZxnAz+QpB7OHZGPbfDfw5ZuXtV1SByMFotXu1JHpxJVr/hCNK/5+tc/8Hl5/wDHaow+Nbm78W3mh29rpK/ZLtbdvtGq+XcSLsRy6Q+UdwAfj5uSp5FdjQUkkrI5z/hCNK/5+tc/8Hl5/wDHaZJ4E0aaJ4pbjWnjcFWVtbvCGB6gjzeRXTUUDOKHwo8IDpZXvH/UTuf/AI5S/wDCqfCP/Pne/wDgzuf/AI5XaUU7sj2cOyOaj8CaPFGscdxrSRoAqqut3gCgdAB5tO/4QjSv+frXP/B5ef8Ax2ujopFnOf8ACEaV/wA/Wuf+Dy8/+O0f8IRpX/P1rn/g8vP/AI7XR1jaxqGt2c2NM0SG9hWLzHklvhBzz8qja2TgDrtHPXrQByWs/DDRX1XSHtrTUZI5r1vt7nUrliY/JlOSTJn/AFgj5HP5mtL/AIVT4R/5873/AMGdz/8AHK6bRtVg1zRLHVbZXWC8gSdFcYYBhkA+/Nc7ZeOJrqWwupNIMWiajdG1tL77QGdmJIRmj2/KrFcA7ieRkDNO7I9nDshi/Czwmv3bW/H01S6/+OUh+FXhEjBtL4j0/tS5/wDjldpRSD2cOyOL/wCFVeER/wAud9/4NLn/AOOUn/CqPCAORZXuf+wnc/8Axyu1op3Yezh2Rxf/AAqrwj/z533/AINLn/45SD4UeEFGBZXo+mp3P/xyu1oouw9nDsjix8KvCI6Wl9/4NLn/AOOUo+FnhMHItb8H/sKXX/xyuzopB7KHZHGt8LPCbkFrW/OOmdUuv/jlZc/ww0QeKLGOK01H+zGs7hrg/wBpXOPODw+Xk+ZnO0y/5ArofEHiLVtCjvb46FHPpNlH5s1x9tCysgXLFI9pBxzwWUnHHat24ulh0+W7Vd6pEZQvTIAz+FAezh2Ryh+FfhM9bW//APBpdf8Axyg/CzwmTk2t+e//ACFLr/45Vjwb4tufFcCXRt9Jjt3t1lK2mq/aZo2YAhJE8pdpwTnngjGO9dXQHs4dkcZ/wqzwn/z63/8A4NLr/wCOUf8ACrPCf/Prf/8Ag0uv/jldnRQHsodkcWfhV4ROc2l8c9f+Jpc8/wDkSl/4VZ4Tzn7Lf5/7Cl1/8crs6KA9nDsji/8AhVHhAZxZ3vPX/iZ3P/xyt7w/4a0rwxaTW2kwSRRTy+dIJJ3lLPtC5y5J6KB+Fa1FFxqEVqkFFFFBQUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBneINL/tvw5qelb/AC/ttrLbh/7pdSufwzXLQW/iO/1LSb690P7M2iWkxEf2qNvtlw0YQLGQTtTG7l8HkccGu6ooA5zVbq/aOIHwg2otNbDzB58G2Nj96Ny7DI91Bz6VZ8I6Tc6H4T0zTLyRZLi2gCOUJKj/AGQTyQOg9hW1RQAUUUUAFFFFABRRRQAVDdzSQWks0NtJcyopZYY2UNIfQFiAD9SBU1FAHDeFpPEOl6Vq6TeFrpLh765vYI5Lu3AlEs5YJlXbBCtk5GOOtP1Kx17TNa1+fStKGoR6zDHscTpGLeZYzH+8DEEpgKcrk8EYrtqKAM/QdM/sXw9pulB/M+xWsVvv/vbEC5/StCiigAooooAKKKKACisOfxp4VtbiW3uPEujQzxOUkjkv4lZGBwQQWyCDxitCx1Sy1MM9jOLiIKrCaMExOG6FHxtfp/CTjvQAuo3VxZ2Tz2unzX8oIAt4XRWbnsXZV469a5bwT/b2j+DdH0q78OzxXFoIbWbzLqHGzHzSqVZshfQ4JzxXaUUAcP4osNT1+GbSbbw19nd7qN01WSaHZGFcN5qgN5m/C8DaOe+K7iiigAooooAKKKKACiiigArjvGX9vXt3b6VaaNe3Oiyx7r6ayngSWXkjyB5kiFQRyzDnBwMZJHY0UAZdrPdRLpdvDor21q8TCVWljBswqjYm1SQ2enykgYrjdO8P66mnaD4Zn00R2WkXsc7aj56FJooWLRhUB3hidgOQAMHk8V6NRQAUUUUAFFFFABRRRQAUUUUAcN4lXXNT8Qizn8OX154dttkgW1ntgL2Xg/vBJKpCKf4cfMRk8DB6ia7vvPliXSTJB9kMqu06DfLk/uSvbj+LpzWjRQBxllp+oah4x07V20H+xbextpoZDJLE0lxv27UxEzDYu3dyeuMDrXZ0UUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQByfiSNP+Ez8G/IvN5c546/6LLXPeKNQ1GFfG8drqFzbmA6cLdo5CPI3sAxUdBnv6969IktbeaaGaWCKSWBi0LsgLRkgqSp7EgkcdjUUumafOZzNY20huNnnb4VPm7fu7sjnHbPSgDiF0Sd/G17oP9va2NPOmRXmPtz+YsxkdNwkzuC4UHYDtz27Vhw67r2v23hO2eTcLvRhdyH+03083MwKg/vI0ZiQOdox97JzivWBa24uzdiCIXLRiMzbBvKAkhd3XGSTj3qpPoOj3OnRadcaTYy2MOBHbSW6NEmOmFIwPwoAp+ERqS+HYE1W6gubpHkXzYZ/OBQOQoL7V3MBhScDJBrcqG1tbextktrS3it4IxhIokCKo9gOBU1ABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB//2Q==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "augment = Augmenter(parallel_augment=True, concat_original=True, min_augmentations=2, max_augmentations=2,\n", + " shuffle_augmentations=False, repeat_augment=1,augmentations=[freq_dropper, chunk_dropper])\n", + "\n", + "augmented_signal, lenghts = augment(clean, lengths=torch.tensor([1.0]))\n", + "\n", + "# We here have three signals: the orignal one + 2 augmentations\n", + "print(augmented_signal.shape)\n", + "\n", + "plt.figure(1)\n", + "plt.specgram(augmented_signal[0],Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "plt.figure(2)\n", + "plt.specgram(augmented_signal[1],Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "plt.figure(3)\n", + "plt.specgram(augmented_signal[2],Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iWLE85czs_Y8" + }, + "source": [ + "By manipulating the parameters min_augmentations and max_augmentations, we can apply a variable number of augmentations:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "executionInfo": { + "elapsed": 3972, + "status": "ok", + "timestamp": 1704409340266, + "user": { + "displayName": "Mirco Ravanelli", + "userId": "06892056361698510975" + }, + "user_tz": 300 + }, + "id": "SbSkJ1gdtRyk", + "outputId": "1bd64349-17bd-4e2c-e3d2-3fb72c5d793c" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/sdelang/env/sb312/lib/python3.12/site-packages/matplotlib/axes/_axes.py:8089: RuntimeWarning: divide by zero encountered in log10\n", + " Z = 10. * np.log10(spec)\n" + ] + }, + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Frequency')" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeEeJAo+IniQFRj7VD/AOk0Ne8V4P4mGfiL4lH/AE9Q/wDpNDVw3POzT/d36oL+NftO4gY2rj8qoXlsiW8RCj5hk1qXaiSWM/7IJ/KqWoP/AKLE3bBxWGFk/cS/rQ+dfxuxl26rHOh2jhh/Ou4tokjQkqMMoIriIRumQerj+YrvQgECP6KBXNnkrci7/wDAO6im5OXY5nU4FW63BQAw3Y96hEaqn3R09K0dTQPcp/unH6VTYDcU9s10UKnNSjc4KitNlEWynACin/Z1IJ2jK8ZxUwDY+WrGCYQe9dE6rRF2zPWJWIXaODV/7MkUZ+UcDNJBBiVQPX5qtzccdyePpWNateSiikrq5RiSONA+wZ6UxkVpHBTj6VYC52Y6UTFTwO1NT94noYdwoErHb1OOlbggRdDRlUAs24/pWRcnkgd63QD/AGBAf4iprXGSaVP1/Q6d4X8v8jKdF8txtHC/1qlGigOdvvV858qQnrs/rVWIgrI3+z/UV003ozOm3ysMLhBgc0CNQCuBxzTkAwi/lTmUH5R2HNO/QTfQRbdJEAKjGeatIiRrsCjmmxxn5Qvc5NWMBAw71z1J3diHJsiZVIZto6cUxUQsBtGRzUhYBlA6KM00Lzv7tSWiJI2hTeTtGdwxXU2tvHJbg7BkoOo+lc3IAWVv7pzXX2ID2yegUfyryc3quNKLO3BR55tMJUTag2jOeeK4/UEjNwQVGQK7aRAbnaOoAJrj9UiP2lieoBxXPkdRc7XkbY6LTTNLyk+zWw2jBUk/kawbqFAWAUcGuiETfYYG4xtrBuo9tySvQnJr0sBP3pa9/wA2clS6af8AWxUMaA/dGaRkUIMAc1Knz/N7Cl8vcBntk16vNbcjms9SiIV7AYFOEC5PA9as+SRgDp3pCucnv0rTnube2b6ldUUPggcCmkDOABxinuRuBPrQe9UWn1GKiEH5R6UhjXIGBwc0DO0/XinAEMM96Ze3UbtUsRt70KirlQoqTGSPrmmKeefXmgV9C3pqqbsDA5z/ACqeS3DTfdGQeuKi04btQj/H+RrT2jdx2Y5rhrzcKunb/M5qt73JtMtI2uNpUEBga6MW0aweXtUFFwDWDpJMd3g9GBz9a6GbcGVj0OB+NfLZrObrqN9D0MCo+zcrEawIZVyoyo649q5vWYozcthRxXUrIrxMwz121y2skLdFB0AxVZO5PEO/RCxySpq3cxZowAuAMDNARfMT5R92pbhRt+o5pqjLL6Y4r61S9085S90AigzcDoP6VWMSeYG2juKtdPtBHoP6VA3Kr+JqoP8Ar5FwbX9eQ1Y1znaKsBF2ZwATTIxz+FWGRWTP41M5akTlqUQgzjAyDVq0RFuk+UfeqDo/vmp7cf6Un1FVU1i/QuTbJ7+JPtb4UDaSKp3MCOQGUEZB/Krl4Ga5lPuagn+9+FY0W1GPoRGTUrorPGmQMClEanqBzTmTpmlVAFHoK6L6GnNpuRCJQoG0YzUvlIWPApP4akAB579KUmKUmMdQyR4A6H+ZqB0GG4FW2HyAdgP61WkUA804MqnLUn04r5c6kDla9I+DqhNH11QMAaqf/SeGvOLBN3me616T8IRjS9eH/UV/9t4K5qzXMz0Mtf8AtMvT/I9FooorA94KKKKACiiigAooooAKKKKACvBPE5I+JPiTH/PzF/6TQ173XgnihgvxF8Snv9pix/4DQ1pT3PPzP/d2TZ3FD2ZQP0qlqJxBDGOcCr1n88W09R0qjqY2pF681z0P4yj2PnF3MxN4uI9ozhgf1rvonDQRL3xzXDW/NxGT/fH867qOIeQuDXJnrj7l/M7sO25OxmXsYNzg9AOKpbNo3EcAZzWjcgiXB6hcVRkIKoPYjFRhpPkSOSqlzMrrIu049atxAY+ozVeLYQSccGrcKL/e6jit6zSRnBakUFsQXdcnJz+dLOj/ALwgc7Qo+vNTRsYmkfqpHHtUVxliu0nKjcR61kpylUuy2kolEsy4A65oMDAGTnpVuVAkG/aM5FV3crHg9DXXCo5axMmrbmPddUrcVm/sCDcPmK9KxJwTtBHOa3VIfw/a467iP5VvjHpT/wAX6M6F/D+X6md/BJn+5/WqsK5MxHTZ/UVccjMgHTZVWHpNgfwf1Fbwej+RjB6MdEu1I89TipHjOOO/8qMExR7RkgCpyh3jjgj8qmU7O5DetySGMhlwKim4kYmrtspDcr0OP0rPvObhhnGDiuelLmqND5dBp+6qdyMVM7Ybj0wPrVaM+vXtVuNAzKP7pya1qe7uIR4yqKO5I/nXVWkZktY15AI5x7VzMhYvnHfArqtNcG3jzx8teFnE5KjGS3ud+XxTqNMsbgX3dzxXJauAJJCPvV1IjfLHnHUVyOrZ8yRiSOc1z5JFe2dn2Nse24xuupulSdIgYDouDWBcKPNOe9b6k/2RF6MmawJF3OOe9d+XNqU/V/mcmK+z6IrpDiTA+lKqEsM/Sp1TbKWJx82acgyVyOd/+Feo6jOUiMBEnAzlc1G8JYA4571oMAseCfmzxULDaWPUNzWcKzY2rGPNHlgPSoskk1fmUNg9D1NUWAJHOMGvSpy5kdFKV1YYM7eRzil3jAX+LFLn5TnrSYA574rQ206irhTim4OAT1Jp5XoaYDn5T2oQl3LmmEf2rGAex/ka12GRj1Y5rI00gX8Z+vP4GtQEszfj/OvNxa/e38l+phWeqLuk489t3Vq6UMJcjuBmuZ0nMlywHY5Bro/LA2ENjkZ96+UzhL2+u56WXN+zfYjJUqoHb/Gub1qIC8MmTyMV0koCqDnBDdK5nWyfOYjnvW+T617rzM8d8CTMmRPkAz1FKg+UH2FNlJVR9MUsW4xocc4HH419Y78p5lvdH4wZx7D+YqrLkRqQO5/nV5sfv/8AdH8xVOQnavHHOaKTu/67FQeqHRAkDPXAq5tG0Y6ntVWPnJHoDWmEUQlvTmsq8+VoiWrMaQbZyfSprZv9Niz/AHhUdx/x8Oe1LbnN7EB/eFdEtYfI2tdfI0riP/TJx6uf51UugAAO/FX3bdeXRI4Vj/WqF0Q3FcmHbbSfZfkZfbIXwMDvTXzx6YpzFcgE8npTWYYFdaKXQNmVI9adtyR7UxDlc54qcAZH1zRJ2FJtBtwMeoqpJhQ2e1X3GVB6cVmuu4OM0UtS6Ku9S5p/WT3U16T8JP8AkG6//wBhX/23grzTTSS8gHZc16b8KBtsfEA/6iv/ALbQVz19KjR6eWq2Jl6f5HoNFFFZHvBRRRQAUUVHPCtxbywOzqsiFCY3KMARjhgQQfcHIoAkoryrVpJ9A1LxNqGj3+qPBoOktuS61Ge4R7uQblysjsPkQKf+B+1a07Xfg3W7KOLUr/UYbvTbuWaO8uGmzLCqMHXP3c7mBVcLyOKAO/orznSmv9LPg3VH1e/vJdbYRX8U85eNzJbvKGRDxHtZMDaBwec9aq2F5qI0PQfFrarfPd6jqkUU9q07GDyZpjGI1i+6uwFSCBnKnJOaAPUK8D8VY/4WN4jz/wA/UP8A6TQ17jqN5PY2hmt9Pub+QEDyLZow59/3jKv614PrE8t9468QTT2U9lI11Fm3nZC6Yt4hyUZl568E9a0p6M8/M/8Ad2atuioF6DjNZuqYEaE89cVpHhhjsBn6YrK1PPlxZOcg/wBK48Km6ykz57sjPtstPFzjLj+deiRJiBMduT+Vee2wBuovZ1P616JDnyWweqZX2ri4jbXJbzPRwaTkzHdW3TM7Z+ckZ9KzZOLkj+EHNX7jzNiKT+82cn3qjJgSlT34rTC7XPPrbiKikcYGTzVmIELjuvAPqKqopLnn5c5qdZSEJ7gf1raom9ERFomZ0w0XHHWoS+XT5eWPJ9BTAmTnI3n7xpC/DnuBtFRGmlsNybBleW7bLHygnTtmlKo0XIGRTHuBHIIlByw60GN2X5Tya1s1a+iJbMi7P70Y9621VY/D9sccnJz+VY14myTjpkmtqT5vDlrj8fpW+Ld40v8AF+jNYfw36fqZMrYZ0HGUzn8arwE7pV/6Z/1FS3B/fEf7OKhgH72X2TH8q7Yr3CIr3WXLfJIPbYv8qtKpR13HOTgVUifagA7KP5VoKVd03djn8RXJWbTM92WYUyG5wSCB9ayLxMTYzk55PrxWxuBOR2Gax52zLk9yawwl+dsueiViKJSeSevT2q7CpYgjgk8j1zVRcsMDtzV+3ZRmTHQYrprt2M1qySUKNq8detdHYKPsyD1AAP4Vz8yDyVbvmujswq20CjsN3+fzr5zNZfuI27s9PL4/vWSNJliFOQM9K4vVyfOkXPQV2A2xyEAfeYiuP1bm5kPrkVWRRSrO3YvMJXUb9zcDY0OEjuvH8qx/LYrjPPXP41rkhNEiXHRf61mQuXlYfwkV2YS8VNr+Z/mceI1cV5IZKPnyOlKComY8YyCBQ/3WAIzURQuAy8Etjn0ruSurM5upYkIaQHoAKhYbVOTnHNW5YlC565ABqsV+8rdMfpWdOSa0HJNFC4+8CDjoazc5lf2rWnjHH6VlsoV29Sea9ag04mtBrVAeQKQgt3pBncPSjd84UdcZrc6LMfuBbaKYRwTnkHmlRRuJ75x+VJ1U+tCBabFiyz9siI4xk/oa11yEZv0rKsATfxjtg/yNbaryw9wa87GStP5HPWV5E+mExXW0D8a6IZDAE55FYFmD9qyP4RXQHDBTjknBr5XNdaqfkejgPgZDMMktnK56VzWvZEox3Aro5eYcLx61zmsyB5UGOw/lXRk6ftl5XM8a1Yy5+UHuaWElSuT2pDygJ7cUq4EgHpzX1H2bHmdLEkY3CbPdP6iqknACn0q5CcLKx5yP6ioZY9xDClB2k/66Di7MbB+nFanSFm6jpis+MYY+lXFcshOfkxjFY4hXaYr6mbOAZmpLT/j/AIs/3qJ2G9yOxxTbXm9h9d1df/Lt+htD4TbugFln2r99iCRWXN8xHbFbM6jzJcd2NYkx4PrmuHBu6M2vfISQXPHKnj8qCQcGgcyNn0puOg9DXomg9ADGPTFSFsZOMkDpUa4CqBUqDPzd8VEiJbkmS8OcY+Xp+NUDxPjtjmtFOU56FT/Os2QMJG5+lFHdoujuy5pRHmynHRa9L+FBzY+ID/1Ff/baCvM9OBHmf7tekfCEk6XrxPX+1P8A23grnrr942ellr/2mXp/kei0UUVke8FFFFABRRRQBj2nhrT7XTNSsHElzDqU0012ZyCZTL94HAHAXCj0AFV9J8I2ml3gu5b6/wBRmjtzawm+kV/JiJBKrhRnO1clsscDmugooA5rSvBVhpN7aTpeX9xFYKyWFtcShorQMMHZhQT8vyjcWwDgUlv4H0621CGdbq+a0t7lruDT3lU28MzEkso27urMQCxAJ4ArpqKACvA/FBI+JPiL0+1Q/wDpNDXvleB+KuPiL4kbv9qh/wDSeGtKW5wZl/u7LUrbbhl7FV/lWdqCZWJu2Dir124Wb3Kj+VZ+oOxji44ArnwqfNFr+tD5u/vWKNop+2RKehkH869Gt9psQRnGCP1rzy3YC4hPo6/zr0SBx9lHuCR+debxJdqn6nqYF3nK/Yy7hcy49Bg1iyOGkZjWzJIPOIPcGsWVALhxnqTWmB2s+xwYjfQVZdp2DqRxUySApnsRz+dQK4DHgZHSpcK0TEH71dc0uxzpkUZbzGlHR6dnZ9AM/jTImJbJHyDnNNPKrGT97JNW466iHErGC7fhU0N1+7P6cVRkBlf/AGRxTlIwFB6nmrlSTjqCdtipfSM0rHsTxW0jH+xoh2CVhXnDhR2rZz/xJIMf3TVYqK5Ka8/0N18FzHmfMpPtSRZ3ynttGP0pJOre2KWI/vJPTaP6V2290S+H+vIsRn92F77RVtSWZd3bBFVY2HlH3AxTyMZ2Ek9a5Zq7MXuW2ueGx681VIVm3epzUTS7eB1NSxJnDnoKlU1TVwdwBwq+rGpLQl9gPY5NRjaQZAfYU2B/LXaDyaco3i7bgjRaRpCMfcBwa37UYigkPUrs/D/IrnGmCW4AxnIroYXBtYR/s5rwczi/ZxSXV/kd+Ca5my0vyzhj0AwK5DU1AuWJ7EmusiZGthIGypUEGuR1Ng87HtipyWLVaX3GmPd4xN2dcaXAw6hM/rWLE/zle5zW1NIg0+BSefLxXPtIFlYDqBXZl8XKEk+7/M5cTbmVuy/IkUKd45xux+VLGwfH93JNJEuSffmmyMFjK9K77XdjmLwJxlvXiol+ZnDdehpQ+eR0AprHzCx6HGOK50i2yCWMq2Ow4H0rHkx5ze2a1p2YsCexxWRO2JCB1Jr08Kn1LoK8nYao6H0ox/EOtHTFBOWAHSus6eouMP7UjAAMfrStwx9M5o3dfegSvuWtObN7H+P8q2Y2PmMP9r9Kw9NB+3RHtz/WtkSYLt6Ng15uMV5/L9TCtpI0LOTN/sH41t7xzn+8MVg6a267K9wRWo4MSs3cnOK+YzCmpVVHyO7CTcabZOqD5j3x/WuX1pSbhmHbGPyrplDbWB43AGue1rAZiOoHStcobWIYsb/DRkqcRx/XmnZXzCeaapHlIaQuFOPUcfWvqbHlk0ZCmX0xmomBzkd6IgS0hPfH9KVcs2R05qbWbBjggyQOgqWBsoV/hNR9MgUgbnA6VLXMhFWcYmfHc8/lS2n/AB/RN70yUZd2J6H+lPtGzcRkdK6X/DfodP2TckkXznHdsmsaYYAz1NXi7eZISOdtUrk42+prjw0OV2MU7yIQaYTyR3qVcbFppHXjmu1PU0T1Ej4VQfWp4mOw+uKjAxj61Ku4KRjjbUT1Im7iqcoM+lUrjLP+NWgeBn0/rVVziQk+tVTVm2XR0lctaf8AKZc9NlekfCIY0vXv+wr/AO28FebWbDdLj+7XpXwj/wCQbr//AGFf/beCuet8bPSyz/eZen+R6HRRRWJ74UUUUAFFFFABRRRQAUUUUAFeAeLGx8SPEa/9PUP/AKTw17/Xz14wkC/E3xEvrcQn/wAl4a2oK8jjx8eag0S3TZui2ew4/Cq9/IDFCoPY/wBKZcyk3AOeMD+VV7p8xIc9jVUaVuX+uh8zCLc15jbd/wDSIuP4x/OvRFINpGQcbR0rzS3l/fxj0YfzrvIbjdABnjFeXn1Fz5Guh6FOSpSafVFO8mCy5HGM1nXL5lVl9KW+lzMSDwSaptNkj/dNdGGocsUzzZycpMto6knOAc4/SpMYDAHtxVFGy+c1PHN8rZ7DFaTptbEE0O0qE44NRzttkYBfQA0zJSNXB5JptxMMqfSiMHzh5DZXCjavWoBuBzk04kcs1Qvcg5rqhB2sioxb2G3LfP7mtTzcaFH3I4rBuJvm3elaJdjpcWGwCOlOtSuoX7nRKDjBX6lYtktk8nnFKjD5z/s1WL8OT1oilyX54210OGho6TsaMTjyM9SAOKldgQCv6VSifCg54xUnm4HtWEqetzklB3JHAGD3PSpFlwgX3xVbfnG49OlNedV+oo9nfRgoN6FoyDO0cAc1DHIFJY9jVb7QPXr1NML5woP1q1S6GqoPqaCy7hy38QNdZaOvkRnORt2j8ea4eN9zjB4zg11NpNtiHOQACB6cV5Wa0OaCSKp/upmis4EWxfuhsYHpXJ6g5Ny69s10CyDaCOMkkiuYvZMuzfWs8ro8tSTQ5yc3FHRzuGtYFzz5YOayNod2PcH86syT4tYeeRHjP51QWTnOfrW+FpOMX8/zOeb5nctK+xA3vSSkP26Gq7yAoVB6ik87K8etbqm73Isy+j4BUjtTGlGSy9xjHvUSSgxqc5PrUbsE+YdPQVnGlqHkLJIWGSMVlyHLknsauSS5X05rMmkw5x6130IWOrDQbZJklRzQHHAPBqASE96C4/EV0cp2+yZY3DnnNITyR75qsJNoGT3pxlwpbvRyh7Fpmjpzf6dEPTP9a0fOA35x941i6fJi+jOexq5JJiRs9CTXJWpc1T5HFiYNTsbekSYuHZuDit0zLKoAweRn2rltKn3XPzdGHIrctZF+bnHrXz2ZYf8AeuXVWLw9RxXIXTMV2Acgk81zeruWuHHTmtcXIaUKPurXOapIwnYk55Jq8sw/LWvboFeftLJEIYbBg8Y4pAQSwJ78VWMuFAHGKd5gHOe1fQ8hj7JltHAB/WlVsAgdD0NVUk7Z6807zh90duhqXTIdN7E5bHFG7jaPfmq/m9cnk0eZxgHn1pezF7NjJW+ZhS2bhbqMHpmqs0uJjk9qW3kBuVweQa3cPcsdvsn7P5G5PJtnm9ATVWT5gPUGi4lzPN6ZJqDzDjrXLTp2SOJQe4/OEGRTQeCTTXk+T3ppbKYHet1E0UGTAjK896mZsD8KqBgGX65pzzZbFS4Nsl022DnIxnBx/Wq0jZdwTgmpJJAqhsZ4x+tVmbLknqa2hE6qMOpfseDKfY16X8H+dJ17/sK/+28FeW2cv+tGe1eofBs7tF10+uqn/wBJ4a5cRGzbO3L4tYmV+3+R6RRRRXKe2FFFFABRRWX4ma7TwprDafu+2ixmNvs6+ZsO3HvnFAEtvrek3eoS6fbapZTXsWfMt47hGkTHXKg5FOg1jTLnUJdPt9Ss5b2HmW2jnVpE/wB5Qcj8a888J3Fzpg8K2lpqVpfw6nYO3kR28afZSsQYMpUbtu7CneSSSOc8VW0f7D/wjHw7+x+V/a325PN2483d5cn2rd367t2e+M9qAPTP7Y0z+1P7L/tKz/tDG77J56+bjGc7M56e1E2saZbajFp8+pWcV9MMx2zzqsj/AEUnJ/CvMB9l/wCEGj/1X/CQ/wDCS+3nfaft3Pv/AKrP/APanax9i/4RX4gfavK/tj+0X8rdjzt+2P7Lt7/3NuO+fegD1qvnDxy5T4o+IPTz4f8A0nhr6CvpNSisA2n21rc3fGY7idoU9zuCOf0r548UreT+Ptel1GCCC6+0RCSOCYyov7iLGGKqTxjsP6104X+IY12lB3IJZm84k9OMVDeT/us++Knliw35Uy5tt8Ck/wAVdkeW6PBpOmpRbKVvPm4TGcZFdfb3jLb85wenFcnbwbZEx13V2lvaf6MuRyVzXHmUqaUeYMfZzXIYtzOTIxqoJidvqAat3sOyRj25NVliyR9K2pcvImjlp8ijdirM2Tg1YWf5CDn3qNbR2fgfL9asCD5PYVM5QM6jh0ITcsck9OoqAzbl565zUvlk7ifwqNocNn1q4qJcOQZLMz4A6d6iYDbmrCxbgVFKbEhMkHj3q1OMdDaNSEdNjHuJMYLdB1rTiut+mwkZwVB6VSuYDzkdDWgtuBp6Z6bauo42VzqxE6bpw9TPabcGIzgimQS5DjuBU5gPPHGKjit9u/HpWl1Y2UqfK0SRzEpj2FOa5IOO1OjhO0emOaQw4NR7tzBulzMGuM4FVZLjce/JqVoi27PTGKYkIK/SqSSNqapx1IklyxQ59Kck7LkscljgVOlvg57mo1hCvn1Oaq6Zpz05XJY5dmMZ5IroILkrECM8isaK3IwK3oLXMQyONvFefi5Qsrnk4pxk1ygbsnIGc9awrub7wOea6P7HtVM9xgVgX1sC5HpmssHKm5PlIw9lUXOWZLgm2UegAqn9o+YrzzWg1qfs6HsVrOmhwhHvxXTScHdIqjyN2YvnnfgU03BCn6Uvk7QhPUnFIYf3ft1rX3TZKmSQz8ADpipPN6A9AahSHCg04xMrA/w4z+NS1Fszkqbk7DZZAR+Oayp7gLIx5rRmjJWs94AS2e5ramkj0MGqa1ZB9rCruwcfSkW53EkZ/Knm1wuAO9SmD5CcVroei5UVsRGbcgFKJiMgngnilWIeWQM9aRLcCNVGeOlGgr09ixp0xa6B9OlWpJmM5H8OagsosXS49MVcaHMpz68VjNrmPNxMqftr+RPY3GJOM1qxXTgA56nms6ztsyAAd+a2Y7PbGEPUc15mLnSUtTyqtpTbgVheFGI55PpWVf3GZCxzW5LbKIwT1/8Ar1i6nblSQOtVhZU3PQdG3tEpGQ9yxVSDwaf5x6fjTUtvNjKP0PBxUht8BfXFep7p7MnSWgqTMSRntTnuBGmeeKakIMhznIFPeDIx2NS+W5jJ0+bUQzZ6/hSi4I453YyTSiAA4PTpUgtycDHy0m4kSlSRlXFwXmbb1FPsrgfa48g7mOM06a1EcjYzin2duI5oyO7Vo2uU9CU6XsGl2/QvTS5kf3qIXGOuafMjNI231oEOevQ1iuVI8yPs1FXI3n3CkWfA71I1sByO1M8k54H1qk4midK2geZ0P50hn/eDr0qQQ9PrTlhDdKLolyporSTk49MVQa8ZmYDOVYg8dq1JrYMBmqMkIV8gc9KuDiduFlSa2JrKYkyn25r1j4Hy+f4b1qTBGdVPX/rhDXlOnwFVcHqRz9a9a+C6hdC1wD/oKn/0RDXLi2uX5m2HlD281HyPS6KKK887wooooAKKKKAKVno2l6dcTXFjptnbTznMskECo0h/2iBk/jRBo+l22oS6hb6bZxXs3+tuY4FWR/8AeYDJ/GrtFAFL+x9L/tT+1P7Ns/7Qxt+1+Qvm4xjG/GenvRNo+l3OoxahPptnLfQjEdy8CtIn0YjI/CrtFABXz74vXPxH8R/9fMP/AKTQ19BV4B4tGfiP4kH/AE8w/wDpPDW1D4jizB2oMzrpNsgx1wD+lEwH2aP2BqxcRZlz7D+VJcxhLOInrjmtY1F7p81GW3kZ8Kf6QnHcV3tuqtZg9woribfAuI893ArurZM24UfxL/WvIz2dlA7aV5zfoc3eRb7mRSOAeKprEFfk9P6VqXi4uXHoSSfxqjtxMwPeuvD1G6a9Dzp3TaJION3HU5FWGhBTI/iHSlgMYTJxxxmpJFKQ7l+bPQfWuedS8+xSjoZ7RqxZB/CarsmdxHY4rRuYAsJZThiecVTVQrbCfeuulUurohppkcSCPk1OW3qQBTCu589ulWII155HX+lOpJfEw3ZjXUOHAPfmtJrcHTIHHXpiqd3zKT6cVpqMaRCSfYVpWm1GD8/0N226a8jIePBYe1V9mDJj+7/hVt+ct7dKiRfndj02/wCFdUZaF05tJkiJmIDvgUFOeeKsQoGiPYlRSbM4zWXPqzBzdyhIoZyB2pEixn61YKgMcCkxhc9Oa25tDoVV2shFj+YelIIgWGasRISFHc0NFsG3PPrUc+tjP2jvuLGnzV1FpbYs1LDsD+lcznaUxzlgK7KJN8UaKcDys5/KvDzeq4xj5mmGhzybZBFBvi5H3a5m/j2sxx65rsOkTog525yK5G9VwWUgnA5rLKajlUkx4mKhy2LflBtOTP3gMgVkyxlnC46c1ugbrKLtkY+nFZZTbKQecd69HC1NZerOZvlaZB5fy5I+7TXj/d5q6qbiVx1zTJEAX2FdKq62JUmVUi5x608ptyDUqjMe78KQHcDuGMDHNNzbYOTZRmUMBj1zVR4sn9a0ZlGBjvVUgEe9dNOWh2UKjS0K4QAE+9Io6jtiptvOO3WggcD1rW50e0IhEAuPbNCxj8hU+OKCuAcfWlzC9q2LYxZuk/H+VXGh3P6YOaisBm8iUe+fyNW5BtkAzyCa5qs37S3kceIk+e5e0m38yZx6Ctd4MsPQdT9KztGk23bJjg85reRFZJOh2gj9K+ZzGtKFd320NsNTU4eZTaFWjYY6Cue1aMCUAdeAfyrrET5yu3gr1rnNWh23jHOR0rTK6961mxYiHIlJGCU27gBzQI9wB9OKlcbST70sS5r6bm0uQ6jtciCYY08oSo469KlkUKtOGNi/pUOfUzdR7jFhzxTmQoqqozjqasRpx15olwq+9Ze0bdjLndzHlTMrginWyA3CD0NSyAb8+9SW0Y+2J6V1OXuM7XU9y3kLIn78hee9PMecenNSSri6OBxmp3iG4Y6YrmdSyRyOb0KTxbenNNEeW6VYlQg5Bz7UiodynHXtVKelwU3YiVM447ilSMHAFSoh7j+IVIkeRnoRSlOwnNlJ4wFA+tUjHknI6VrSoMAVmvlUY4yQTxW9KV0deHmyezQNvx2r034ODGja6P8AqKn/ANJ4a830pcyTA9Ntel/CAY0vXx/1FT/6Tw1zYiXvOPod+Xv/AGqS8v8AI9FooormPbCiiigAooooAKKKKACiiigArwLxWP8Ai4viM/8AT1D/AOk0Ne+14F4r/wCSh+JT/wBPUP8A6TQ1rS+I4My/3dkEhzKoHfAqK+3COJSeOamTcGf1I4puokFIkog7VIo+ZhuUYVzdwj/bH867+3+SAHGcAiuDhIF1GfRxXoFuS2n5HX/69ePxDL3afY9LB6yfoYV0ubl8j7wFVPJ/eKTz1BrRuDuuST2rPMhAGOpFa4eUnBJdjgqpKTLEVvGVYcY71InBCkfKp4/KoYVbBx91xn8asGQEKn4Gs6l723KjaxUmyuXbleoFUpQMbgPmNXLqUSBkH3QKznkxhvwFd+Hi7XZjPfQdIwAAH1NJHK2flyAaiY4B3d6IJkAPXiurk93a4raXIbofN9TmtwxqdCtzx1zWLcjJHpW1GC/h6DH8OQf0rDGO0af+L9Gbx1pv0/yMaYYVx3FQxA5YHptq1KBukH+zmqsWQ0n0rtg7xJg/dZdiwsI9QKicMF+90ojkygx+NIz4DY/Gs1FqTM7O4wcZPc0nB+UjvinRg7CxpG9R1rXqX1J422tn3pz4Lbu2KrBtzU5W3hh2B5rNw1uQ4sXOJIwf7wrtbYny4sH+CuMC+ZInoCK66AFViKddvNeNnKUoxXqdmEdpE9u+2Zlb05rntRKCWQd2yfpW42POPq7AVzupRss8rN1J4+lcuWQXtm+6Q8VJ8iXmX5MLZxY6Fc1ksrMxbPStbAFkin+7j+tUrVd07g/d4xXfh58sZPt/mcs1doVFwzEjAUmopUxCAepXJq3MVYOo7VWkYSdeuCK0pybdyGkiugBXYO/NO8sHeT9KbDtQgjtUzYdWZerCuiTaZJnTDBx2AqqpyAxq5cZ3bfTrVIDOB713U/hOuj8I7t+FNIB2+op5Xj8KYDkA+laI0j3HLz+FBPBNIGxmgYL47gUBbqXNOUfbozjuf5VPdEJKTj+KqumNm5jPfJ/kasTEmYk+tck1+++Rz1dJWZqaMN8jMPaty2bHnI3XcBWNoPV2PSt2OEtcB+NpHNfMZpJe2kn/AFY7MJF8qaHtKqysgHIUc/jXO6n+8lY5x+8P5DNbwjKyylyOT8v0xWBqqbJx/tc0srUVV07BjXJw17mHIpZWH05p8aksmPTNSOoWI+wohOJI/oa+qcvd0PP5roiu+GAB4zzTgw2J7dKS7Ulj9aiPAT6VUVeKLSTii1FJg8n3pWcMMnqarKw69zSq+5ial09bkOBA5yzZ6ZqxakG6jHvVbqD9anslBvYz74rWp8D9DokvdLdwAt02B0bFWFG4nP4VDfqYrxh6vUked3P3ccVwPWnGS7HM1ZkUiEP14xQEO0E+uaeR8xx2oZsIM/jVczskSKiblI6GhQArN/tU8MSjH8qZ1PHc81F3qBDLjB4rNcYfHuav3J2oKoOTlsda7aC0OigizZ5RZCOuyvSvhAc6Vrx/6iv/ALbwV51puGMmeyV6L8If+QXr/wD2FT/6TwVhWfvNHo5Z/vMr9v8AI9FooorE98KKKKACo5547a3luJm2xRIXdsZwAMk8VJVXUvs39lXn2xylr5D+cw7JtO48e2aAM7SvFmkazG0to92IVh88zXFjPBEY+PmDyIqng54PTnpT9H8VaNr07wadeGSVYxLseF4i0ZOA671G5c/xLke9eaalFHdadf6H4I1W71bT5dEuY54ftLXMcLKqiJUc52s3zrsB6dhity41C08XeIdOPhuUSfZdJvEndBtEBlWNY4mPZtyk7eo20AdZpvi7QtXv/sVjfiWchmjzE6rKFOGMbsAsgHfaTRB4u0K41f8AsuK/DXRkaFf3TiNpFyWRZCNjMMHKgkjB44ritH1Cz1YeAtL03P2/SiHvoQhDWaJbPE6SD+El2VQD16jiqmnXMMnhzw34WQk6/ZaxE9zbbT5kQjnLySt6KygkN0O8etAHrdeB+K+fiH4k/wCvqH/0mhr3HUbOe+tDDb6hc2EhIPn2yxlx7fvFZf0rwbXLeay8a+IYZrye9lW6izPOEDvm3iPIRVXjOOAOlaUviPPzP/d36otrH82e4UGq+ooFKjvirsEeZVftgAiquqocgk4HFc9Kf75K582lpczYVzPGP9ofzru4SYbFAO/rXCwf8fUWOhcc13UQzbBs5yMAVxZ9tBPY78LfmfoU1VWuZZD0xWRckI2R0rVikJ3bl28n+dZN6VLpg8CpwaftGmc1ZpxTHpO4I2gYp+PlJB5aoElABGBkdKkAKIWyeRkV1SjZ9jBMhndcbAecYqsF+bJ+6oqSSE78ZOSKc6bcRfxHk11RaikkSU3RpOvQmmtEFGR1q3MOiIMn2qA28v3mVhW8Kmm9hpsil6L9BmujhUf8I9AD/FXN3AK/U10ULZ0C2I7CuPME+Wnb+b9GdFL4JPyMefARv93+tU4+Wc+1WpyDu56r/WqkP8fpiu+kvdMofCxUDDkfdOKcykg46YqSMZKqBxjFS7Bu54yMflRKdmJy1I1Q7DnpUKx4yPQ1eIygOOF61Wb5ZMf3ulTCbdyVJjAm5+OlP2YTaOuaX7hUd6lTau52PBORRKTByYmFQIp65rpoHBhUg/wgVzkcRZlY/wB6t3Yfs+ASARn9a8rMEpKKubUG02yyi+ZcZP1FYWsvvlJTs2DW2kgF0cdA2Kw9UIW5cevNc+XpqvfyNa79zTuWpJP9DRuxjx+OcU23K7dn8SAbvxFSLCWtombgFcY/GmT7YYmC/e4z71tFp3gu5g7rVlZ22528jBFVS5y30JFSSPmPA6hqjbBAPfb/ADr0KcbLUxKySfez2NSglRgdDUJ+U5PANKD8oIOR1rqcbmjXVDrrkBu561R+6xqy7F2x2xVdwBk/jWtNWVjejorMUk7sCmHPOfXilxkk0tWarQXbn8Kbyje3enZwufag/f8AqKBXfUsWBzeJj0P8jVgcszf7VQ2IAvYwO+f5GraoMMp4JJIrkqytP5L9Tmrbl/Sndd6oAccmukQqV254I5/Gue0vAkkA6kVtRtmDAPIxivl80jzVbo7cHK0R8kY84nJxsCj86wdZX/SAR1UYNbZcqdpOSMGsTW8rcFu3A/SnliarpPt/kGLs4adzHPKe2ahiJMwDdOanByGHvUe3LqR9K+pT0aPPi90JMxJb07VGRhUx706XhmB9qR/urjrWkdEjSOyGfT0qSPrSKmMn3zU8KAYLdTSnKyFOSsU5FDZAz1qawG28i+tRyfLO3oTUlnn7ZBgfx4NOf8N+hrd8ljS1H/j9YjuaYpBUDsRS35zduB60gx8voRXBBfuo+hzS3EICtkdRxSSpv2seo5FKQC/B6Ueg/GrXckA+E2txnjipcK25e45qOFCRuYdqkcgEnpniolvZAVrhA0KDsf8AGs8jEhHataZQYEAPSstziQ11YeV0zak+ha0tdxmHqmK9F+EH/IK17/sK/wDtvBXAaNgyS+gUmvQPhEMaZr4/6iv/ALbwVjVl+8kvQ9TLP94l6f5HolFFFZnvBSEgYyevSlrA8XQWVzpMUV7ot3q2Zx5MNoMSJJtbDh9y+XgZG7cMZx3oA36K89sLLx7o9tc30c8VxaRrvi0e8mNzOwHVRcALhj2DbxnvXoVABRRRQAUUUUAFeB+K+PiH4kP/AE9Q/wDpNDXvleA+LQW+IXiQA/8AL3D/AOk0Na0viODMv4D9UXLRXLJjJDgD6U3V4vkXt1OKt6ZkwqO+Ki17iRdp4xivJhUf1xR9TwOS1HmOejG24jx/fFdyhKwKAvAXdmuKQfv0H+2P513Ntl7bnoFxSz2VlBs6MH7za8jHaYi4lG35cjB9c1kXSN5oBY8cfWtydUWTGOAaxb98yDHU1tgWnJcq6HJWTT1HRFAcMRnGKlLEKWb7oqtHGrfMcZqTzRJGUIIUcc+1dUo66GKGwMWuN79MZwasbMgykZYnioLbEspXHHXmrqIWmB6Rpnis60uVlRVxI7ZEJkfGc55pWkibK4GR2pria4kwjYUcHNTGwUfMMbu5rncop3qS1NEm17qMO+UCU1rKxTRbdQONprL1RdkuPQAVrwDzNLhTsEzmurENOlTk+/6DjflaMKYkOfpUCkqrH26fjVu8UCRqrDv/ALv9a9Gm7wTFD4S1bg/mOKmwCyueAR0ptsu6BWHXGatLAGIzjbgYHvXJUqJSdzFptjZMbNqr164qjIwDDI5HStKRVQhVHLelUJkXzyfQUUJIez1GZ5BPWrCR+Y2MYUVXjXcwLVe2kRBEPzEc1dWVtEIRCTIAB8ua1mc+TGo79/xqokaRRAEDJ61a6FB227v1rysRJTastjemmrj4WBvpRjhSPxNY+qfNM57jNbGzZMzL1cisDUXJmkGe5q8BG9a67IdS9kvM3iQ2mRledqjpWZdOXPHXANakaCKxQdigJqiyIy9BmssNJKUn5sKybS9DMTKo2eT1pXU4yOmMVI67idvqOaawYRgE9TXrKV3c5ioRvX2pApVcdRUwA27cdaaAVIU9ua3Ui7kW0844yKgbAbB54q4V546npVSQAyN6jitISua0pXYw43EUox0puMml6ke1am47AI/ChgQB9aQHn6ijJwOetIVtS1pv/H9Hnnrj8jV1VLE9juOPzqjpv/H/AAg/7X8jWwkeQW7qT+VcGKly1Pkv1May1RJpaMtw7ZJyOK04CfKJJwc1S0tgJXB5IzV4AAL6EnIrwsZK9Vp+RtQXupiq26XczdRWbrQzKAem0fyrSMIDljjGBiszW3Aj3AjJ4pYKzxEeUdZP2bT7mHuwGp4YKvHJ61CThiOueKkUAbSRkmvpmjkaEK726detKY8r05BqVRgn1p6gflUOdiOYhER654PNTKoBBYgZ4pQDj/PSnLGJCpb7ueAazlPuK9zNuMCVx71LY8XUI7lqbdIBcv7f4U6w5v7f/eFdEn+6b8v0OpawsX7pMSzE8nNMU8gHpirWqKFmkCDGW5qkD69MVxUXz0kznmrSaJsKWOMZpq4BOeTnikXG7jg0fz6iqsSP3FAB7U2Vt8YxwaaWIXcTmomPynninGGtwEllKxjHPH9aosdxz71ZcggJ2xVc4Un2NdlNJI6KSSNPRyFklH+xXoHwl/5B3iDH/QV/9t4K870rmWX/AHa9C+EJzpevn/qK/wDtvBXFVX72T9D0cs/3h+n+R6LRRRUnvhWD4qt9Re30+8023a6ksL1bmS0WQIZ02OpUEkDI3hgCQCVFb1cn4/jll0ixVbLUL+2F8hu7Swz5k0W18gkEfKG2t152gd6AJNJk1TV/FI1efSrrS7GGye2WK7dPMndnRtxVGYAKEIBJyd54rqK4vwhaaHBq0r6Z4V1TSZzAQ093CyKy7l+UEsec4P4Gu0oAKKKKACiiigArwLxVn/hY3iQ9vtMP/pNDXvteBeK2A+IviRfW6h/9Joa1pfEcGZf7uzotKiRIQT1Kg1l64ArIq9BxWna/PBGo7KM/lWRrWUkBbvzXz2DTeNcmzx63+7pJGPAc3qA9d6n9a76JdlmpXoSc1wNqwN3F/vr/ADr0HH7lUHQjn8qviJ2dNG2Xr4n5GHdIGkBPUsCf1rFvUAKSdznNat0paXy8n5T/ACrJvEKRqp6K1d2BVrann1XeTGRhmJx0qaRgY9q9cYqqkjqflAIqZY+pBJZu1d842d2YFy1jDKETr3q3OpAWFOpBzTLWPyIw45Y8YPrVsp5UZY/favIrVf3l/wCrnTCHulZ51gGwZzjNRxzXB3lsbT93ip1twrGSTOasxzQsCBjj2qJVIxXuxv5lRi29XY5/VFDNE/8Ae5rTtUzpMIP3fLOaoa0w8wKOlaWnuH01FPTy66sRJ/VYS8xQXvNGHqC7ZHx6CqUYyzDtitHUhieQ+wqhHnJyO1erQd6SZmtEzSsjiEk/wgGr3kjIC9uarafGGh3/AIGrgjZFULye+a82vP8AeOzFFELxrECe54FUbiDEgx681p+Vgl36gcCs+ZXMjKBk5zV4ebvuTJWIUUvMqr0DDP0rTjjWEPK3U9KqgrEAo+8xxVoxm4lGchE9KK8776IIjbeOWT55MY3cVaZ+Vx0PFNWUuVSIA4IzQ5CzsG6r/jXHJuctV8jVWS0J4jmQ56DpXPalnzXPua6DYdu8dFGa5u/LGd+ON1dGXK9VtFT3SOk3f6DGvqgNZkgKudvQcVplQLFCOrKCP8/hWaoO5i3Ut/SsMLZczXcmt0I2QR4C/dC5qOZWHzH7oFXFQSLJ64xTbmI+UV7V2Rq+8kzFx6lCGPMRz1zxT1UnAb8akgTKgd8cVajTeMEc9DWlSrytiSuZ7x/NntWbL8sz49a33g2nH5ViXKBZmz610YWopM1paSsyAZ596dwFBpkZJPzDHpStllHsa7ep0ta2FxtUD0NNTd944x/SlZd35Yoyen4UDWxa03/kIRZ/2v5GtlXKg46DOax9O4vYx3+YD/vk1vRIfKRsezV5eOklPXt/mYVE3LQi08/6QzDqx2mtOJ90pz0FVrNUa4Dj0qxIquPlPUV5OJkp1LeRdJNRuTZdgFbHGelYmsRdB2zWsCykAcg1ma03zIPYZ/KjApxrpIdZ3hcxlUfjU4A2qB17VEpBGfQ1Ki8H16ivoJs5JEoHQHqRThGvX0pwYFR/eNSCMZBz061yOdibEQGOfX+VOSPzgCfu5yKkwBz6/wAqdEvnFcfc7EVEp2VxpamZeIPtDjvim6en/Exth3DiptTXy52x1OKisWP9p2+P7wrsTbw7fl+hvC5ralzdSL2BzWUr8nPStO53tdzEDIOT+NZrjByO9c+EVqaj5IynrJjgwPTrilB6Z61DuG7HfGacprpcSWiQkhSaiY5Ug9MU/ovFQSd6IIcVdgFJ2+mKrOG+f1q9EmVGfSqko2yMK2hK8mjalL3mi5pDfvZc/wByvRPhD/yC9f8A+wr/AO28Fec6UoEjY/uivR/hEMaZr/8A2Ff/AG3grlrfxJfI9LLf95l6f5HolFFFZHvBXNeNRMdLtObwaf8Aa0/tE2RcS/Z9rZxs+fG/Zu287d1dLWB4uudPtdJifUvEU+gwmcBbmGVIy7bW+TLqwwRk9M/LQBzXw81G61T+xmjN81vZ6ItvfPcJIqNc5j2gb8bmUCTJGfvDmvRK8+8IahFdeL5otN8Vah4g037CzO8zRtHBLvUAbkRQWYbsDPAVs5yMdrqth/amk3dh9pmthcxNEZoCA6AjGVJ6GgDml+IVp/aWrpJaOumafYverfCTP2hUYq2xMdNysAc8444wTZ0/xXdf2hHZ67pI0pri0e7t2FyJgyJt3q/yja6hlOBkdcHiuXv/AIc6vdajfWo1q6k0+XQmsYnlit0QNltkZWNFIVcq2QB0xkjitn+zNa8Uava3GsaWdKgtLC4t2zOkpmlmCqSmwnCAKfvYJyOBigC1pPjK5vrrSvtujNZWOsKx0+4NwHZjsMiiRNo2FkBYYLdMHBplr44muJrO6fSDHod9efY7a++0Auzliqs0W35UZhgHcTyMgZqlpek6/dyeF7DU9MW0t9AIkluhOjrcukLQp5YB3AHeWO4DGMc1WsvD+urpuj+F5dNCWOmahHcNqXnoUlhilMkYVAd4c4QHIAHJyeKAPRSQBknFeBeK8H4i+IyMH/Soef8At2hr3PUdMsNXtDa6lZW95bkhjFcRLIhI6HBGK8E17T7TTPHPiCz0+1htbWO5i2QwRhEXNvETgDgckn8a1o/EcGZf7uzobGVmiTaMg8E+mKzdbYvKc9F6e9XNLk2wYPUk4qrrTL2HOK8nDx5cY9DwZO9FamPZrtvIec/Ov869IiXMAJ9K84tv+PyD/roufzr0eF8R7COCMg1x8T3fs7eZ6OWNOUrnPXKH7Q7erFayL5C4B/St6/Kq0m0ZIJbArHuej+hPy114Co2lI83ER5ZNGchKnAXNXreAopkJzxwKhgVRwwyQcVr2cGRlvujnmu3F11CJjTjzOyJ7O32EPJ9084ParBi3ymVuFXoO1JE5mLR7SAOh9adK4YrCn4kV89UnOVS73/JHpQjFQ/rcozRyTTYXITjkVZSyCrkE0TTJBEdoBPtUH26QRhirDPGK3/fVIpQ0Rl+7i3zasxtVX96R/drQ05gLCFe+3msvUpC0z+jc1esX2W0HfK4r160G8NFP+tDlhK0mytqoxO/HYVlpyzY9K1NTbfNJ/uj+dZsYAkPsK7sLpRVxX3NrR0zA2TxwKufOgHy5J61X0X57SQYwSetXBLlNzIQT2NeNiZP2815msY+6iARyOS7gqF7etUHfYxZxhmJCg1qlmmAUAqOpqjcqhcM4A2njNaYed3aSM6kVa6KsSbJBJIeTjg9q0gCVEaDJ74rPEbSzqx4TPQ1t20axIZHxkjNPGVFFJ7vsFGPM7DoLdIFBON30qrMytJJ0BzipC8jSbiTtBqndOAjyL1B5A+tclCnJzvJ6s2qSXLZIuxvthkUjPzY/A1zd6481h71vCTEEj45Xn61z90PnbPXmvRy+FqkmZTd+U6f72kW5PD7en41knK7m/wBrpWyABpkbdcR8CskITGxbqTurjwcl7/q/zLxC+H0EUMCHBOccr9alBJIDDtk5qSNMkEd6hnJEu0AjPGa6ObnlYxtZXEhh8zc33djdu9XVaNosqRk8cVVikxnH3e9IFI4RsDORioqRc3qxxaS0JWG4Af3Tj61zt7xcNmuhiBztY/ie9YuoxgSOfQmuzAySqOI07STZnovOfahGx8p70A4AFMBywavX3Om173Hnh6Uig8DceaAdxHpzQLzLGmH/AImER/3v5GumgdFh6jDZrmrDAvI8Ds38jWyhPlBc4BG4H0NeTmEOea+X6mU52kTQfKxIPYVNvIBJ4AXNUopCszjsUqaUM8Z2tgFcVxVKfvakRloTJKdqk9M/pVLWCJIkZe+eaeshJ8vkcdajvgBbRpuBI5/CtKNNRrRYue8TJThf51ajQ4zn8KrpyTxwTiriKcbs9O3rXp1XYzluTRqDtOOf5VN5QyDu6dRSwqCgOOTUjRHIIbp19682dT3rFKOhHsCtnqDxUqRnblRhR0xT1QbucEHgU7duIiTgeorCVRvY0UTBviZLls9jj9Kbpq51ODP96ptQTZdOPTBqLTj/AMTO35/jxXsXvh3bt+gQfQ2LnIvJgFyMn/61Zk6HOQPwrXnUi8k75YmqUyFssBj2rz8NUsl6IioveZnFTnlccdaTGD7etWcc4K01k9BXoqZncgHTg5pApLZ61J5fYcc0+OPk96pzSQ7kkMeRuPAA6VnXH+sY+9ajKyw7s49qypeJGzzSw7vJsuj8RZ08cyY/u16P8I+NM1//ALCv/tvBXnGm53MPbFekfCUY07xAP+or/wC28FZ1vjZ6mWf7zL0/yPQ6KKKyPfCuZ8bahqFjZabHpuow6dPeX6WxuZ4RJGilXJyCR12gD1JA7101c74xncadaadHb2UzapdrZj7dF5sCZVnLMmRu4TAGRkkc0Ac/GfFjeK/7EHjK0kJszdb49MQmPDqu1xv4zuyDnna3pXoVcD4DuraAaLa2Wl6ZZrqWijUbkWVuIiJQ0Y5wfune2M8/Kea76gAooooAKKKKACvA/Ff/ACUPxJ/19Q/+k0Ne+V4D4sbHxG8R/wDX1D/6Tw1rS+I4MyV8O/katoirGpYfd5rM1htkufUGtWGIsV9MDNY2tZDgv/e4xXm4O0sTueA17qVipatuuI+P4x/OvRYQTZrnqK88tRi4hA7uP5138L7rVCO3/wCquDiVN+zsd+WNc0vQz5okM0jsMnkfrWPdIB5fHB5FbboGEvqxP86zruJmdAv3Vcg1OCqWaTf9WOfEwurpFW3to3IZhzWlG26PYvAPFV0tieR396sMQqAJ97pWmInzve/6GdKPKrkrusaBY/vdKiZhACf+WjUJGsB85s7iOab5YdxcydVzjFc8YxXXT832NpNv1/IUW643vjrmmTeURj096bIZp3VVxs71FLZOg3Dr35reEVdc8tTKT092OhiaicXGB0AwKu2uPs0A9AKz74EzlD1GDVuz3GOIdwozXuVY/uYnN0GXbZeUn+6P51SjxvY98VPduVkmz93bmq0OC5YdCtdFKNqYJaNnQaKSLRgOuc5q6WWXDEEDsDVbRsfYnb0NaXlLIw/ujBH1r5zF1FGvNvuddKDlBEaDeQqgjPWqV5brLOEx8oGT+Fa5KQjC9T0rLvXMUqgdX4rHC1JSqe7oVXgow1Ko+e4SJOg61oGNpZWXPyKAMVTjKWw/2iauyysqhI+pHOa6a7k5JR/rzMaSVncjuZ1XEafpWaTuMw7Vda2EKBv4ifWqDbt7Zxgk5/Ot8MoJe6Z1XJv3i5CwaAjuQM1gXGWmck8ZNbagBBjsRWHMMSyY7nNd2CSU5Mm+x2AQ/YYlHTYDVBUBaTdyB0rWZdulx4/hjBrLXG58d8H9K8LC1HJTa7v8zrxELOPoJC6hFYjociqtw5Yqc5OTUmRLKye5FVHAXOOgGK9OlBc1+pxzk7WJIGCqFx8r1MYzH90jA5qvb4MYjP4VKzSIOcYFaTT5tCVsP87LDIPGPzrJv2LXUg/2q0fM3khOowTn3rHvnJuj9cGujCQtP5Fx952Kq8O/scCgpgZ/Gk9TTzkha9Q6dhucsVNKgUIMcDFJ9589+9IuXBI6HpQO2hc07BvY/o38jWuoYwlQehyvtWRpg/0+L6MP0Na+1vLOOoPFebi3+8+S/NnPV3GIP3xHbBFPikaQIAcZzn8qhQhZsH7zCpVAXdt65yK55oxQkhIfI4x8p/nVe8fcm/6D8MVMPmYK/U5PH0pbtFFmzDPygCrg1GcbgtSlGo8tTjg4FTxbmOc8DrTLf7m09+aehbIIxjo1bVHe6F1NGEDZkd6eUYkcjHVqgTGAR07fWn5cEdMHrXmyi73TNk9CRuGJJ4xgUkQKnK1G2TIu77vAGPWrXmLFH5a9hWcrpWWtylq7mBqcpF2/XnFR6YCdUgB/vZp2oMftLHtn+lN0sn+04Se7V7VrYZ27foEO50M3yyyE9mNQyJvG5eM9anz5skqnqWLCoAWXOPxrxKd0vNBPUqOMH5hnNNCBugwOhqwzH+LpSDDDjp3rtU3YwsVxEpOAMGnoqRMeOSO1SfK2QOtMKqjkjO4jFVzN6MLWGkmQnJGzsD61k3ChZWHvWs8WUDN1U5GKyLjHnt9a68Lu7F0viJ7DIaTHXbmvSfhKd2n+ID/1Ff8A23grzWwJ3Sf7lekfCP8A5Buv/wDYV/8AbeClW+NnqZZ/vMvT/I9EooorI98K5Dx7qNvb6dFFLZtqVvFPHJqFjFCJX+zFZMOQfuAMm4NxyhGRXX1xXirwzr+qX2rPpNxpqW+qaUunT/axJvTBm+ZdvHSbv6UASeC00K0mmtdF8M6lpIeMO8t1ZtGHC4AXexJOM8D0ziuxrD0VPE0c+zWTpBtViwn2MSb9+RjO7jGM/pW5QAUUUUAFFFFABXgPisD/AIWL4lJ/5+Yf/SeGvfq8C8Vgn4i+JB63UP8A6Tw1rS+I4My/3d/I2YAzMoUcYANZetJkj6mtyzG2LOOT0rN1yNVlbnqv5Yrw8HV/2vlPFqQtSUjFtjieLPZ1/nXbeYy2gVBlj/jXEQMDdRg8fMP512scZlgBQnPtVZ4lem5FYK/vJFdHKCYnqC3H41UuJWMkSkAZPNWpUxJJFnkjr/OqcuWmzjOOlcuHUXLmCq2lYfvl3kKuVzwaeIxFmQk5PODTI5GjLIVztPB9alRCTucnb15q5u3kvzJir/1sMUSO48wYTGc+9PJaaYRqP3eOSKQM9wTGqlR2YdxUkhEIEScuwOPU1nJu9ra/l5lJaX6BLPHCFUYzVV7i4ZyDGNh6Gr8dooUNJyevIqYrBtx8vFYqvSp6KPN5mroznu7HF33Eu49avWw/cREenJpusIizMBjJORU1gmYFJ6AV9FOonh4yPO5deUzbz/WuPUVUjXB47Cr19GUuJO+CKojKsT6130XemrDjs0dJojFbJiwwoxz/ADrR8nJGGPHNZ+jfNaMD93NaEcEkYI3FsnNfMYx2rz1s7nXSV4LQVUWBeWJOOM1mXkrI5LAZ7VrCArlnYkAdDXP30kkk2XQr1x+dVgEqlRu9ycTeMUtgtxvuhI5PXpW1EFjQySdSM81h2sMjXCMwKrkcVs+U80rZJCggfWt8dbmSb0MsPfe2pCwkuW5GFHcVlT7xI49GroZJI4gUXG7isSXa07EnADU8FUbb0sugsRFK2t2SRAC3dmPCkHNY8+DIxz1rbhYNbMrKAC35isO4wkjLmvQwjvORnbax2gm/0Ha2PuDH0xWazHbjAq1If9GVgOPLUVQdyiAHrXi4WkldrqzqrzbtfsRJhWY56nIqpcqQQB0JNWyf3gU/dx1qrO2QDjpXrUr81zjkLbtmMjv2qUzMAN6gc8/SoYV+RiOueKl3gj51x659KqaXNsJCFlbO38aybwZuCewrUcKcbWxgjp3rMuf9e5zXThviLpv3ioQSgJHP/wBelDE7iOfSgkkkAdsUqLgV3nW9tQB+b8OaUHDHjgUxwUVccnvT+pPvQJ2tctaZzqcP4/yNbDKwVivOCaydJH/Ewi/H+RrdCvsJCk8kV5ONlaqvRfmzGorlSLk7gOduRSk/vSfSpUhKTRgDqMEelMkU+YxA4JNY8yctDG1kQRuTLyB70lzJsgwejNSKG85htPX+lRX4Pk88Hr9K6IxTqIUexCr8HHUdKuQEkjj5T1rNQ/IpHrWnbHG0beMda1xCtEGrMsiMMRgnBqwkBznnB60kUXIIOe4FWVjbI64Yc+1ePVq20TN4QuRLAEjCknjgGhiqJtHLe9SmM7QCTxxn1pGVUAHDNWKnd6u5py2OevmBnZR2PNR6cD/akPpuFTX6j7TLtHPBqHT1P9pwHJ++OK99NfV36foYwtqdGGCzSeuagkGXLLyfSpGXbLIRyScihk3EEcH0rxItRdy3roQFZO68YqMhhg49j9KuYZcfLkGozlhkrjnp7VrGoQ4lRypBGcdqYDyQOTirBUc7lwPWoyACdo6jqK6IyWxk0MfJUFuKxbkfvmI7VtsuFyx7dKx5wPMbHeu3CPVmlJ2kTaeAxk+lej/CIk6Xr5PX+1f/AG3grznTuDJ/uV6N8IjnS9f/AOwr/wC28FFb438j08s/3mXp/keiUUUVke+FFFFABTJVd4XWOTy3ZSFfGdp7HB60+orhJZLWVIJvJmZCEl27tjEcHHfB5xQB5re+MdQ8P32qKmqXWrR2WmXFzKmo2S2zLKhUJ5eEQyISTkgMBwd3Izsf2nrXhfV7W31jVDqsF3YXFw2YEiMMsIViE2AZQhj97JGByc1YuvBdzrkxfxLqsd9GtrPbRRWtp9nVRKoV2OXclsDjkAelT6f4Uuv7QjvNd1Yaq1vaPaW6i2EIVH272f5judgqjIwOuBzQBk6Xq2v2knhe/wBT1Nbu318iOW1ECItq7wtMnlkDcQNhU7ic5zx0qvZeINdbTtH8US6iHsdT1GO3bTfIQJFDLKY4yrgby4JQnJIPIwK2dJ8G3NjdaUL3WWvbHR1Yafbm3CMp2GNTI+47yqEqMBeuTk0y18DzW8tnavq5k0OxvPtltY/ZwHVwxZVaXd8yKxyBtB4GScUAdLqOowaXaG5uEuXjBC4traSd+f8AZjUt+OK8I1i9i1Lxzr93Ak6xyXcW1Z4HhcYt4hyjgMOnce9fQVeEeJP+SjeJD2F1D/6TQ1cHZnn5p/u7+R0NmFGN2MBQR+VYuuHExLHgitK1YyouDwODWRrbCWUhvSvBwFNrGXZ5FeadBIy7dd11Gf8AaH867uzYRQgH/PNcLbf8fUZH98fzrsQTs3KeBits9h7Tki9h4GfJJyHTKDI7hcncwz7ZqjOVScAD8a0oXDM4P+c1myjfMg9cmvNwrfM4vojXEJW5l1FRkJ+YDPvT1LSkpghfU0kUKSEORz061YJLR7I+G7E1pUmk7L/hiIRbWv8Aw40skUf7sZZRjAp0cSqonlwXGcE9qfDbJC3mEfMevNMkDzuB/wAs+4Nc/Om7RenV/obcrSu1r0RXlmllfCblGajaKcEnzOPSrM9xFbg47VQl1RQBw3zdOK7KEakkvZw0Oao4p+9LUx9VkLTj1A5q9psw8iJSuQwPNZuoNmVm9eau2OVtIAOle5Wgvq0Y/wBbHIn1Hamg8ydh6A1gsSHHpW3K5l+0g/3R/Oshh+9210YK8Ycr6f5IcWuZs6jQI91kc9CavtM0LKpBYknpVDRGYadIAef4asNeI5LkH0r5vEwlPEzurq52Qmo0o62ZZ3NK/Jwo6g96z7iBbi+J42IMYpz6gGQqmQSO4qsskphIQ/vGPJrShQqU3zbdP+CZ1asZabizSCNwI1PyntVm4vVhiwnLY5we9RlEijJP3jWZG672d/XP410wowq6tbfiYucobdS/Gr7jLI+cjvWYzF2cg4+Y/wA6mkvDJ8q9iKiUAMx9TXZRhKF3IxlJPRF+PCxYPOFFYN0waZzWysgaA464NYMoJ35+9jBrfBRtKTZcdWjtPLJsovm4CjP5VnSKdzZ5xyK10QHT1HYoD/46KzW5CgdO9eFhamsvU6a8bW9CpMfkDD6VDMMfQjFTbcYU9MGoypePcex4r1oO1jiY2FSsYbOcdqn/AHci5K4J45qvbOcBh93uKssiMCR1oqaS1GiFlVen+fSsi5OZn+uK1pcIAR1JxWVcjErfWuvC7lU/iIFAJDe9I2Qz49acOvPTdSP991713dToW4h5xTiPl96QDByaaGyKB7l/R8tqUH1P8jXUxbUi5IPzHP51ymkNt1FG9M/yNdHCU8k4BzIxavDzWDlUXov1DmtIedolZvyqrI6HhQOhI9qjlmbeQvQnFVA+1c/xGs6WHe7OedToiwMGZSO45qLUY/8AR93cnmo45CpJz0P9KmvZQ9tH9BmulRlGrGxCasZCZCDjPOK0rOYbEUjk96ggiLLkdM5pzKIdwX04rqquM/dByuzZjYKQR0FPF0wwuD8x6+lY0d4y4UnpU63oY45rzZ4N31Vy1VaNVnMh4OMD9afHsC7nILVmJdktgdRgmpA2QSxrnlhmtNjRVVuUL3i9cjp7VXszjVIfQuKfM5ExJplqf+Jnb/72a9mKtRa8v0MoO8rnSOuJG478GmMNxyDtPpVmJhLvXupwKgZAz7hwxHevnYT1s+h1SjpdDcso5OaYWyM4/ClIkB6jHemHcee/X8K3ikZNiEh1IZeOnNV2+9tUY461ZZsKdw4qKQMSQnBx3rem7GckQyLhfmIPFYk3EjCtuVcR7m5bHasSbAm4r08H1CHxFjT+Wk+lejfCPH9m6/jp/av/ALbwV53pw+aXH9yvQ/hD/wAgrXv+wp/7bwU6vxv5HpZX/vEvT/I9FooorI+gCiiigAooooAKKKKACiiigArwnxIP+LheJvT7VD/6TQ17tXhniDB+IniYH/n6h/8ASaGmnY87NP8Ad36ou20jKoC/xVm6yVMhwe2Kt28ywttY8npmqOpp++5NcGGhbE3PAnK9NIo2v+vj9Nw/nXWW7kRODXKW+BPFz0cfzrpIuJXfPUYxVZrFSsi6ErSui9GwUID1P86ZNGPMXH8RyKgjcOAQeUbJqRJdxhB+9g4rxHTlGV0dimpRsx0dr5jFhnB96vbUhhJ7gVWUyJIwC/L2NTCMIN7scdSDXJXk5Ncz0OmjFRTstRsUMkjfvBhCM8U29lEa+XH97HFPkud6FIsE+3pVSQLbozyNyTnntTpQlKalP5IVSUYw5YfNmbJbbnLSE5PXmmtFHjvUdxLLM/yL8vqKrywTBc/N+dfSU4SaXNKx40mr6Io3WDMw/D8quQnFrAe+Kz7jJkBPBFX1+W3tz2K4r0aq9yK/rYh/CMZsLP8A7o/nVEYMpPtVpj8k34fzqmPlkYdj3rWlHR/12HBaHQ6fKYrP6AGmNPHI5UE8Co7V8QDHOAAR7VZEUbEEHH4V5M1GFSUn1Ku5JIgZtxCp1qVWEAI/iNSny4+BgselNACfNLxzxmoc1JWtp+Y+WzBIncb5eMdMVny2pllLnIA5GDVxppZ5Sqr+74IYUy4kyCkQ3EDFa0nOMvX8BSt0MxW8tn2/hVhOFXP1NONkY1DtnJHSmRklmJHFdjnGavEyasWiuEx2wawpiN8h9MiuijIeHjn5a564TaZD6tmngpe9JM2glc7eJs2eB1KL+oAqhOvlICOucVftyBGpPTy1/kKrXBBIHvXztB2qNHXVV4JmdI2GOewprKVtzu69aW4RiQyjO4/pT5dr454Yfyr1k9EcNtyG2BUKCPlPWp3gG0lc0tuECCMnk9KcbdkTAZjzmonU9/ew1HQqyRYXPY8N9BWTdqRO/oK3Sh6+3IrGvB+8kz613YSd5BHSRUTr+NDn94SPxpG447etIwy4I716XW50Ja3EBypPvSEbWyO9OxncB3NIF7dRxzTLuXNMH/ExQex/ka30wbfHcdKwtMx/aUZ9cj9DWxgurLnGCcYryMcr1F6L9TCb1uQCTEgA6buar7sLz34qZYv3jcmoUAWML1K+tVFLoc7HbVMuD6Ul1D9xecEYoVwSrNwAalvW/eqF5IA4p3kppAtrj4nSKNUPU1FIiCTyznLDNSeUpUfNy+B9KeEBUbuHxgVlzJO49ymLQgnHT600Qurcjg1cUOr7ccDvSjcWwV47GtPbSEV40kU9BTslj83WpyrHHHTk1EYPl3MSCe1Tzp6sLFNydwP8NLZ/8hGP/epz5DEY+Ud6S34voMDq4rpbvB+jHDc6G0bLu3vxUjKkr5yc4qrExRnx1ycCra7X74I7CvnaseWXMdsHeNhhiPakET9xzUuwqBgk0ZYduetR7R9CuRdSAxPg5A61G8bHsMVb3MOSOKYwMh6YHtVxqNPUiUEZ0qKM5zk1hXChZGx6108ixxLgtknpmuduVxNIP9o17OAqczZg1yyJNPz+8x6V6H8I+NN1/wD7Cv8A7bwV59pfJmz2SvRfhSALLxCB0/tX/wBtoK1qv960ella/wBol6f5HoFFFFSe+FFFFABRRRQAUUUUAFFFFABXhfiL/kofib/r6h/9Joa90rwrxFk/EbxIAet1D/6TQ011POzT/d36otxxpkM4BIA5NZOplvNGSQcE1qpG8jdcLxkVmamQZtxU8ZFceEf77e58/PYowEmVOMHeP5100UoBxjqcVzcRAni/3hW4cgMRxg1rmEVJpBCVtUW7VwFkJA5NPyqTxsMEDOKjtl3oRjsOfWljGZlRh0FePNLnk/62OmLfKi7HOQNpXJFSDfLwchahiZGUFhhunNSiVpF2ICpPevOqRs9FY7YSutWOZ4oRlQCw44rNuYpLmRJGJVB1U9600tlXmTB7n61RupWkby4gRyRmtMJJc/ub92RiIvl977ihLPDEdqlarPeqwIwPzqZrFQ2XwTUclpAoOAPzr3qfsfNnly5zInwz5B75q4ATaW56fL0qjMMSlR+B9MVon/jxg9cCvRq6KP8AXQi3ulP+CbnsOPxqseQD3qzxtl98fzqtjk+xren1HA2tOwbfc2MHg1e8jOArYxVLTEDwFT096vLFMMYfp1rxMTK1WVnY1gtNhCix/MxBI6ZqPBnPzDaF7GpVt2x++YMM5qJ1eRv3Z2joRWUGm9/mOSfYaZVT5YwCRxxUgSOFCzY3HmnLBHDliBuqNLaSZmaRsqTwD2qnKD62X5itLtqVbi4aTACnFV87ZNuOK0LqKKOLC4ByO/vWc7HeWrtw7jKPurQxqJp6lxPlUkHAHb2rBusvvXOMjrW5EN0Tkn5dtY0+0uSOnNdGE0nIuDs0zqoJh9mjGeiLz69KkkjDAnPJGRWYjMgRPVQc1pRMTtUn5gMZrxK1L2b5om9OfMrMpAFxkjBUHio5oyFXjB61d8oi4HOQw6U6WESSZHTaRWirpSXYh020VYYg6Mc4boD6VIVkRAOWx39asW1qWgzwGz1qR4JEU5ycc8Csp4iPO1c0jRly3sUXjYgNzzjisW/QiaQe9dQYiVQ4xuxj2NY2qwqksvFdeAxCdSxnVpuC5jAODnPc8U0nAx6d6kKgNyOhpmB8wx3r6FMtMcABimjrjvjNIc4HPemtnGehPFNIaRe0oZ1CE/738jWxhm3EZGCfxrG0jd/aMSk8jOf++TWyA+8lTheQR+NeXjP4vyX5syqrUrtllJDbT1pjgZYr2B4qYR/umz1A4qu7BCuO55pQd3oc7I+ChHtn9amuW2bGPXavP4Um1cHFLd4YKGHHH8qu95oFsPjPmEYPPUCpyQ/zZwfSqtmjOGccbTxU4G7BXjmsaiSlbsNAjNuAwfrUofJIKYx0NNjV1IBySOSak3ZJGw1lNpspDC5Kg7cHqajAaQBiSue1PLErkAg9TUZ8xwGVto9KqK+QmVpHIkZdvy460yzH+mRZ/vcU95P3hQqfrTLMg3kY/wBrNdn/AC7foKO5phz5r887jj3q4GB77TVNF3SE55Bq1hSeRjjkmvIrJaG8GyT5gwO8nA6UwyNkcng5/wDrUOODtYZxUXzAAFsn1rKMUy3JomMzYJwT7UnmMWx0FRqHAOWzzQFbeSWGKfJFC5mD7cfMQx61gXxxK2O5Nb0wVVJxkgdqwLtg0jHHOeK9LL97mcviRLpRBa4/3cfzr0X4Uf8AHj4g/wCwr/7bQV5zpiErKy9gGPv1r0T4SZ/s7xBnr/av/tvBW9VL2smelln+8S9P8j0OiiipPfCiiigAqK5t4ru1mtplLRTI0bgMVJUjB5HI49KlqG8tIb+yns7hS0E8bRSKrFSVYYIBBBHB6g5oA4XQ7Gyj8S3mpeFbGO20i0s5beVoBtivrncpG1Rw2zawL9y5GTg1k6NZWthpXgPXrIk6vqs8S31xuJe7EsDvKJP72GGRn7u3jFd3o3hLSdAeM6cL6NY4/LSKTUbiWNV9AjuVHT04osPCOhaZqf8AaFpYCO4UuUzK7JEX+8Y0JKpnvtAzQB5/Dawr4WsPFgB/4SOXXER7jcfMfdeeS0B/2BGSu3oNuetGr2sM/h3xh4nlBOu6dqMy2dzuO+DyioijT0VhjK/xbznOa9AXwjoSax/aq2IF35pnH71/LEpGDII87A/+1jPvRdeEdCvNW/tO4sQ9yXSRv3rhJHTG1njB2MwwMEgkYHpQBoajJqEVmW0y1tri5yMR3Nw0KY7/ADKjn9K8O1J7yTxx4gfUIIILv7XFvjgmMqL/AKPFjDFVJ4x2H9a98rwvxB/yUXxKP+nqH/0nhprZnnZp/u79Ua0dtI6oIsY6tn0rO1m3Hm4H8NdDaukMe89WUfyrE1YOAAQN7gkj+dfO4GvOWKt0X9P7jzMTSjGimtzAhQiRCf74/nW4gLF19TWPCczxA/3h/OtrG1zjtzXtY2WqRw09dS5ENmB2ApSM3RA7imREyyAn7o5FWRb7JAed2P514NSShJ33sd8IuS02HJbxyNvI5Ix1q0GVEAUHPQU0Wm5sgnB561dihjiTcSeK8jEYiOl235Hp0KEtdLeZTjt55GcSlSh6YqvdeVZxscHj05q+bredsWCc8/SoJ7IORNJn6Z4p0azVRe20XZCq0k4futX3Oala4nl3KRsxxmoTbXWCXK963LiSKJ9vf6VRa9jYlQeR7V9NRxFSUVyQ0PDqUoxbUpanOzD5ueoJBq+QTa25HQrVXUABOxHRhmtCFANOtv8Adr1q0/chL+tjBL3TLkG1Jfw/nUKkFwPWrM8eFmH0/nVVM+ao7Cuqm7xb/rYcfhZvaYrPbkDGAT+farytKgG4jPequlxsbVmXqeRV4yEDLAZr57EyvVklqb017qYxUllcbiNvemSSpDJ5aA7iM1MGlbhFHvTTHHbFixOTzzWMZK9pfci2tLr7yFLeSUkyY257U6a4P+riznGORThNJKAIgDVry44I+etE6nK1zq/ZBGF0+V/MyTaStuklxgdMVnswLMK2JrkSHYuMHis0wbZ2H8J6V6WGqSafPoctSKT90fHk2h/usKxW4yvpXQqP3G1egWsK4Qq7+mDXZg5XlJDjo0dFLA3lxsMYCr/Sp4l3NuHXIP6VYtgHiGeyL/IU0MMZX1xXgSrOScOx1KmlaQkfyv5j9Q5UY9KkdPKBA+n+FLkbDjqSTUiktCS33jiuaU3e5tGK2JbeGQwIwxx1qcv0yDx14p1qWS1Tjjv+dW0eNse/HSvFr13zu6vqz1aNFcqs7aIpvHzu7GuX1lc3Lr6/0rr5wAwx0rldaJWeRh93BBP4V62R1HKt8jgzOHLD5nOSj5vxqEqPmPqamkOSBUODn6k191HY8yGwnXilkwR9KAoNB5O096rqadS5pAzqsX0P8jW6X8sM2DgEg/nWLowP9rQ+mD/I11CqoQnHGTn868PMqijWV1fRfmxqPMtDHYOJ1z90moGiUlT6jFakse+TI6BqoSABA3Y06NXm2OWcbEKqUUlumRj6U+8A3rn0p7ZOCBxuA/Ci9i3gD8a1U/fVybaBbP8AuMr2ODUscW58r9ahhxDECe5yavJGwYOgyKxrS5W2upcVcWN8HYRz3pTKhJGDxTi4zz1qMyxk7c8/SuRK7vY126kJlUpuUHnmom8xx8uMe9WNyk4X8aY3mFcoBXTFpdDJoou6iQqRUdoo/tCMf7WaWU4uCp9aW2XGowkf3q79qb9CI7mtHETJKV+8CcfWpmCvlHBzjmnwRNukbHOSVqRio4fg4rwZ1byOyMLRKxjUHjrjFMaPOPz/ABqYqjHqaTyV24yatTtuyXEaI2x8uM04w5OG6VIsIA4J65qTylyWJNZSq2e5aplSQIowAeBXN33E8hHTPFdVKUAIFc3fjEjY6Z5r1Mtl7zMZ+7JE+grukuB22/4133wmIOn+ICOn9q/+28FcL4cHmSzY/uj+td38JwwsPEAYYb+1Of8AwGgrab/2ma9D08tX75vyf5o9Boooqz3AooooAKKKKACiiigAooooAK8M18Z+IviU+l3D/wCk0Ne514T4jbHxE8TKDgm6h5/7d4aaV0zzs0/3d+qN+xkEkXz8bTWbq9wD84AJ24FSormRWSQquOQO9UNYdRIoGMYNeHhMPH61zd/wPIrVW6PKZUbYuosf3x/OtstuJUHk5H65rCiI+0R/74/nWwrYkJ9zXr4yOqOSLsi5BJtdecKrZNa8Z82SM46EmsOJlEBbqcZrUtZSJogOc8H2zXz2OpXTkulz0cJUs7PyNIRyo5VVJXsalW2ZUGWb15pEuNowy8io5LqRwyBWX0NfONVpuySXme4nRirt3FeW3tlLuygDgk025n+0RBYsH6VTNsXwJX3KBgg9zVlZYIBjKj8a6XRhBqUbykvuMFVlJOMrRi/vM6Wx3Sl2Y5PakNlEASMZ+lMnuppZTsjfAGcjvULfaSM/OM9q9uEazS5pWPJlKld8sbmPqkIEhXPIq3Cuba2Xtt61S1FmaYHBz39q0bMb7aAei5r2qrcaEG/60OGOsmkZVwctcL2GOfxqgvD/AErWv4fLlnXHVQ361kD5XOTXo4aSlC6/rQlK10dNo8u20C4yeo+lXzJEWySMHgVS0dVWzDEAnp+FaGyFsY247V81i3H28nZ7nbSUnBAsyEHywCajEJdsyZGDUp8mJcgLntVGe4klGE3Ic1lSi5P3NF3ZVSSive1LfmwxOFG3OagNpJOSzOygnOKiij+dS7ZPvUsl6wIWKMsCOorX2coO1LfuyOeMl7+xLJBFEuSRn6VjP8pcE8gnn8avrDcSv5js23+6frVK5C+aRkZ3c114VcrcXK7MKzurpWJkOLTjlgORWDOcMy9a2S4SJiD2x9awWJD4bkgdfWvTwUdZMzWrR1QJiUtuPMa8fgKiUt6nrupZnDJHz/CP5VGTmAkHn/69eZCOl31LlLUtifc208ECpY5SWIPHUCs/zlwX4zgEipGuAHVl5HtWcsPfRIuNW2rZ0dpcJ5QUkYNSsEPIbHpWdaPGYgCRntSyiTcSspAx0r52eFXtXZ2PahiH7NXVyWeYphSeB3rl9SlLTTZ6b62J5W2AMcsvX/arnL+T9/L/AL1fQ5RhuWVzycbVc9DPf7/FM7/jQxwwOaYxLA4OMGvqUjKMdBAxBGB60FjgNjnHShRg4J5oBB5qzXQ0dGb/AImkBPHX+Rrs4RH9n2kjJJP61w+lsRqcftn+RrrLcEwljJyentXzedUuaone2i/Nl0pcsrWEvZUic9AvrWYSj4XjpmpbxsqAxyepqm7BMsD2IqsLR5YLucdafNIlEgXIPduPyp18PlTB+8BVJ5NwHHJ5+nFTSSb4kbdkhQMe9dPsmpRkZX0aJ4GQyfORgsVANXfnik2quUxnNZ1qA0RDYBzkE+tXEuCBtYE9s1zV4Ny0NabSRMZUJHTPeomMee2c0u5CegyaXEZ44zWCSj3LbuICoPAGe9IzNj5EyKkBjzkAHNIZAv3UyPahPXYLeZi3H/Hy2RUlsmLyE5zzmnzhWuCcUyF9t3Go9a9Xmbp2Xb9Dn+0dNBH+6yDzUchHVgAQOaW1cmJRu5Xr7051O4hkz718xqqj5j1d4KxWOwtjOOKMJgfP7D61PsTP3QDigRp6DHatvaIy5GQhAeA561MEAYncenSlEa9sD3qZRGBkkGsqlU0hTKsikjhc+9cxqJYSuoGck117su3CrXK6pGVvW9Oa9TKal6jTOfERUWmWvCyAXFyPRB/Wu4+FZzaeIv8AsK/+20FcJ4fk8ua5OMcY/nXdfClt1l4hP/UV/wDbeCutxf1ypJ9kehlsr1LeT/Q9Aooorc9oKKKKACiimySLFG0jnCICzH0AoAdRWTpPibSNchu5rC6Lx2jbJzLE8XlnaG53gcbSDnpg0zR/Fei69cNb6deGWUR+aFeF498ecb03qN65/iXI5HNAGzRWJp/i7QtV1H7BZX4knbf5eYnVJdpw3luQFkx32k4oi8XaFNrH9lJfg3XmtAP3TiNpVGTGJMbC4wcqDng8UAbdeCeKXCfEXxIfS6h/9Joa97r568YyAfErxInc3EJ/8l4a1ox5pWOLMIc9Bo1XvTDgDJJwOKy9UmKsgJyec05LkeZn6VSvpvM+c1nhsOoVE7HzUG5ySZHC5MseTyGH862lffJ1GFBrmIJ1adCM9a0o7grIxB611Yig5bGtek4OxqrcAIoHc4/CtLT5z9p5bntXMxylWOelaFnNtvFfPQk152KwidOS8mZU5uE0ztEnhkG4kZ781HcX0canHJA6A1g/KZGYHhvelfy1cSHOQMV85HK6fOm235HpvMJ8tki295LL8iEgnkEinRwF+Z2DVnnUY4+mc/SqsuryOyiM4UjnIr0I4Kq1amuXzOV147ydzpxJCkfUce9QG8iIOD+tcytzLImcjmhPMRTkiiOTxi25S1HLHyeiRPqDRiViB97mpNPnAjiz2Wsa6mMmAeq0+C5CIgOcYr15YRugonIm0+dGpqUiPNMw6+WB1rn2bMvB7VLdXXzMf7wrMWT9/uPXGK7MJh/ZQsdFKlKd5s7nRQGsgXPBAFaAtwPu4Arn9LuCbHYDyAKsDULkELlcjOePyr5/E4SrKtNxdtRwrQjFKSNaSFANzY4rLubtUlIQE/SmtcTzZVyNpqviOGVjzk1eHwzh/Ed2Z1aql8KsTRtM0qtu+XritaJYYEA4H41hnUFVwBn8qY9xJNySK1rYSdWyfuomnVVPW12bsl/GpC4PPFYd5Iv2mTB5JpJC29GyOOtZV3P/AKSa3wWCjCXuhKc6zsXyzFZAWyuAQPQ1lyyDzQPWp47jKNnqRis2WX96MeterRpNNl4ek3JpnSyz4WLB/hFM+17TtHTmqBlLJHu6rTC4Bz3Nc8cNG1mYcruaCzqc/wC7g1KZlUBFHFZCSbWH05qRLksCDTlhgdOS2Opt33W6spwwpXnnXq+aw0vniiBU8Y9KkOpOyjJ6+1eW8BPmbsmjVVtLGi9xvXJOGHc1gX0n76XnqSanmuVYZOeTWZeOAzsfrXoYPDcjFTvUmkyLzPmIPOTSCQAEe9VjKNw+lMEmODXq8h6Sw90XPMB574pVOCADx3FUVmAJ9SKPOAO4dSKOQp4Z7GzpsgGpJ9D/ACNdB5krQny32kcCuQ0+TF8GHXH9K2lvXjOAflx6V5uMw7nNNHBiYuE+XyLE0rZbc2TyB9KrGXLAfjUEk2ZCx9cVCZ/nOKunQsjmVNyLhl/ec85GKTzNn0FVFuAw5pLiYYwOpAq/Za2KVGXNY2Lc74cg4IPFTJNtIVgTnvWRDOyxKVPUDNaMM6uPmzk8Vx1qLV30IacWaC+WcHHPTrTtkWQeMj3qqgTBxnmgooOR2rgdPXdlqXkWx5Q54/OlMsargLn6VQwi8c+lSxSRxjHPFEqPXVjVQgnaNXk7H61nRT/6bGOfvUXsqtPKRnqP5VTWQiZGHrzXr0aPua9V+gQp3uzrba4Ibcp4B5FaouEdeRzXI2100ZBz7nitSO/jk6549q8PGZe3K6RrRxDgrM1y8ZPSlXysduPes03MbDHNMMyfNycNxXGsHJ9zb6wjYXyiCOOfemOYUGOPbmsc3CjoTUElwucsTVwy+TfxMUsWrbGw91GnQfrXParcq87GnPdptIGaxrube5PcGvYwGAVOfMY88qz5ehqaNMu+Yn+7XoHwlO7T/EB/6iv/ALbwV5Zp0uzzh6jFenfB1t2j66f+oqf/AEnhrqrUuWpKfex6OXR5cTJdl/kej0UUVke4FFFFABSMyopZiAqjJJ7ClooA8lOs6b4iHxH03RdUtLq+1GP/AEOKCZWacCyjU7cHnkFfrWmdRtPFWv6IPDsoc2Wm3QuGQEfZvMRFSN/7rbhnb1GwmvR6KAPLNGv7TU7PwDo1gCNT0qSNr6AKQ9msdtJHIJP7uWYKM/ezmq9ncwv4Z0XwopP/AAkNtrUbzW20+ZEEujK8x/2CgJDdDuA7163RQBU1GPUJbQrplzbW1zkYkuYGmTHcbQ6H9a+e/EsN4njvXxqE8E92LmIPJBCYkb9xFjClmI4x/Ef6V9H14B4sTd8R/Ef/AF9Q/wDpNDW1B2nc48dLlotkEMG0Ac9M1WvIyABjtXR2liJIgx9B2rN1K3w7NjAzgVhQxkZ1nE+ZUZQtNnNx2x89XOQemK0BAS2TnNCx/v1H+0K0orYmXBHeu+tX5dWa168ptFKOJs4x9asW8bGbjoADV6O3AUkDJzg1YsbYC/wR8pGOlefWxaUZPsjCMZTaXcdFZZXIZufepEsFTOWY/Wr62Ui7lG7GeKk/s92GC7DPevBnj1f49DujhJfymLLDGWKjqKqPat9pjXb1zXWQaUiYLHJxjJFWWtIEwxVcjocVn/btOm+WN2bLKqkleWhyL6dKQCqnI7VD9huBC25SPxrqpXhD7QVz6VWmZWjOFGa6KWaVZWvEwqYOEb2kcZcRMrfMMHNJFFwp7ECtO8jZ3ciP6VJb2XmpGOnGele99aSppyOL3n7qMK6jJB45xWd5RMhA6iunu7A7mIBwB1xWR5G2U8V2YfERnHQ6qFd004s3NIsvPtOpHTpV5tMkJb5T+dWdGgYaahRcnANaAaQDmPn0r5TFY+oq8lBrc1p4WMoKUupkDTHAJIIz71mzWwSVuTgV0585yFEJweprLvtNkaXncoIyTirwmObnapJIzr4ZKN4JmPtXcAOSTinpYycsoJJrRt9OUSJk557ithxDGvAX8q3xGZKm1Gmr3M6WFc03J2OWeyuNuWQgD3rFngIkJbOc13Ejq6EKAc1zV9D87cYy1d2AxkptqSsJr2MrxZmqh2nAqi0B8wNzkVtRQnY/Gcc1SeMq2MdjXrU6urRpRrtN2LBhO4em2mNCcr6VomAqY8j7yCmmBmZlKkdga5lXRyqckyibY/MeaEtyTjHtWg1uVAXNSwW/7znp/jUvE2jcOeT0IFsm8vgEnHSmGylwCV6V0CWMhRWRSRihraYBv3J4/WvN/tJXsmjT2E7Xsc00LZKgfMmMiqV4mQR7V1slmgyzYVu/FYV/bbJGx06V34XGRqS0CN6Uk2c9JG2Rx7Uzyc+uRwa0DGTMo28ZprxlZDhep59q9VTPUjidkZ5g2fMSe9OW3GN2TzV54slsDOR0pRH047U/aFPFu25Bp8RFyPUg1tLbOVyFzVXToi18g29yB+VdLHZyoi4jJU5JPpXm43FKnJI8/FSlVqXXY52a2kCAMuCRVd4jn3reuoX3ZZMVmvFzzwcc1dDEc0bmEari7Mo+Rkd81LNCQicdQKtCL5gM9qluLYhUPOMD+VW6y5kinXbaZUhiYxLtGSAK0IIHKZI4HWmW0LhQwUkYFdHYxx+Su9QN3qK8/G4z2SulcmEHVlYykiHAyeaf5C85Y8Vvi1hOMBfbinC0iPp78V4ks1j2Z1rAyOcaJVKjJ+bpUMiYyoJzXVfY4eMhT6cdKintoFUnaufpVU82g2lZingJJXucHcKwdt3FQxp8yD3roLy3QyvkAc+lUGtf36Kv8q+lpYmMoHMqjiuUfb2+SFbPYCtD+yn2Ltzwc9a0tPsVdcMOeucVcFs8I2gFsdzXg4nNLTcYvVG1PCSlHml1OdOn3A6KeT60v2G4x905z610XlOSP3dHlPjPl85rH+1ZeRX1JeZzn2G4yMqcY55qJ9PuSchCccde1dR5T4P7vp+tJ5Tk48uqWbSWtkH1L1OVksJFU7lIrJuoSrE4613M9k8sZ+Ug1zl9p7jI55OelepgMwjVdm9TJwlQkm9jHsoifP8AUc16f8GlZdE1wN1/tQ/+k8NcHptth5wR1WvQvhIu3TdfX01X/wBt4K7K1VSk4+h6OXVOfESfdf5HodFFFYHuBRRRQAUUUUAFFFFABRRRQAV4P4ij8z4meIl/6eov/SaGveK8O1vH/CzfER7/AGqL/wBJoafNyptdmefmf+7v1R0VrZMbWPZ6YOBVDU9KYtjPAHPFb+lTeXajfzkDpVfU7kZyAa+FoY3ERxbjHuwrYSg8KpPfQ4aSxMNzGf8AbHb3q8oIncbTxyKfdy77yJcHGQT+dOaZVuHHpX1sqtScVzLWx89ZJ7kkhSK2R1TluoHvU9hGZLkkHgDNVfmdfYHP4Vb0tmjuZTyUGAAPxrhxF40J23N6NnVjfY1UuCBzGc0n2p2bAiYVNvjJzinCVFGcGvmXKO/Ie6lLbnGhpWGBuFMazuZnU+eQB1GOtWPtSqM7TULasquF8uT8qiH1hu9KH5Fy9gl+8mSLpa7tzYLeuKl/suMqeFz9Kh+3u3QN+VA1J0bBVz9BUNY19TRSwa6GRqFj5EmAOpxwKTSrUskKMcsAM+9W7y58+XbsbOM5xUunbY5GcgjaMV7E8TWjg7S+I8uNClLFe78NzO1WIINqL19K5e6Ty3PHNdZfThpR9Tmuauo98rOeoyPwr3MnnKMEpnnYzl9q3E6nQnCadHxngVol0LZ21Q0NEGmxlh1UVpBIvb86+Vx8orFVHZ7nu4VSdCKuhFkUH7tVb5jI21UPI61d2xD0/OmSOikEAk9OK5qNRRqc0Vc2qwcqfK2ZcFlL56MWIAPTFXk0w7fnYMfpTmnxgBWz9KX7ezjIVh+FdNWviqjvHQwpUcNT0lqIdOiVchVz9K5vUrIKz5I+9xXRrPMx5zj6ViX/AJkl1Ipzjgjj2rvyqdeNZqcrnJmEaTppwjYyfK8uMEDnIzWZINxJx2rYkdo1k3KePaspskjIPSvrsNJu7Z4ezN+5gAggcDnC1GYwzduKsXUv7iGNeuwVSR33Hn615dHnlC78zoqcqloLNHvDsp5zxViCDJB7ZFV0OAB71ZDOE2xnB4Ap1XJLlTJha92dFbKIowpXIqRhH6CpLbY0EYf72MVIYIWz0596+HqVl7RuVz62nRbguWxUks4ZkLbFPGDXM6tprCQhT1bPTtXXHbDux909qxdSuFa4KAHIx/OvVyjFV41vdd0edmNCl7O70Zx00HlyDI71WeM72PvitqePzJ29NwxWfcJiQj3Nfd0K3Mlfc+fUnFlRUwwH500gbvarCrhjnqaj2cstdKlqWp6lvS1B1GED1P8AI13MSqLdBtzxzXFaQgGq25Hv/I139rHH9nXf1PPWvkeJKvJOHp/mepltP2jdjC1KNBt+TrmsF4ssTjqK6/VI4tijHf1rmnwjsMdFrbKcQ50U0cePo8lVoo7R9oVccYxV26UMkajGeOPwqphtkeT8+ev41oCEtIHPp+lenWlZpt7HHFXuiS2tmjgHyFgRnpWvbwxiJAwAbqM9qlsEU2cQYfwg1O1vGWDAcgYHNfL4vHc83B6ant4fCcsVNakQiH94U8RH+/TvJx3FJscfxCuB1L7M6lC26HCLnlh7U/7PG4w201GFbPUYqVEPcisZyktVI2gk94mLe2kZnYAAc1lvalLtGzld2MV0stsJJGY4yaz7y1aMKykYDZr38FjtFBvoePicK03JI04LcCAbSAwpSGHXJp6BjCrIQMiht4714Lm5Td2euoJRVkM3EH7po83gfIeTSksKTcTV2T6EXa6jfNOD+7PBxSecfNKeWcBc5p2TS5Y9Kfu9ha9wyXXoeawdSUiYptPHeugQsO9Zt/EZGZ67Muq8lbXY5sbT5qXmYmmRFp5M8fLmu0+FQ22XiEYx/wATT/22grlrRDHPNz1Q4rrfhhjyPEWP+goP/SaCvp6VRzry7WRllUbT+T/NHeUUUV1nuhRRRQAUUUUAFFFFABRRXN6v40sdIvbq3ay1C6WyjWW+mtYg6WqMMgvlgTwN2FDEDnFAHSV4X4hBHxG8SOOv2qEf+S0Ne5RyJNEkkbB0cBlZTkEHoRXhviKRU+IfiUH/AJ+4T/5LQ01foedmn+7v1R2NiEe1jBzwKfdxR7C3rXO/bzCoUNyRxUV3qRa28stjuDmvi1lFeVbnT0bMv7SpKlyOOti3dxRiT34NZLIpuXfv0NQtfNM2SeRgfrT3k27iOSRmvoqGHqUY8snqeNVqxnK6RZt3zHLn7pxitbR2jYze7Vzsc22EEHqcmnx34iI2thjjis8VgpVoShHS5eHxCpTUnrY657aM+v505YIx6/nXODWcoDkdOP5UDVmboea8b+ycXazkel/aFC9+U6kRxik/dA1yjasxThuenU1VfUm353n06miGQV5fFIqWbU18MDuA8QpyyRVxA1NhGzbzgdeamTUiVVg5+YZHNRPh2ovtFRzlfynZNDE/zVSKp5r4/wA4rEh1cqgUtyQT1po1La29jgc/lWdPKMRBtN+hVTMqM7NIdcIC5dvvZ5rNmUG4Ze2OaW6vl88gNwxzVCW4AdnJ4Iwa+owuHqJK/Y8KrNSeh2ekxJLYpnPHAq6LKP1PXPWuZsNSMNsqLggYFXV1k5A45r5vF5di3Wk4PRs9rD43DqnFTWqNr7HGO56+tS29rEju3PzEE8+1c/8A2yTjkc0LqxyBu6nFcssrxji02dEcwwsZJqJ1nlxBc1VVohmucfViEY7vu9eTWc+ptvGXI/E1VHIK878zsOrnVJW5Yna+dEDWTeNGt1urF/tFs/M2OPWqd5fYkXLc44ruweRzp1L36HHic1VWFlE072SLJFZkoQDp1qi8xd0+YnLetSyzrkjPAOK+ho4R0ko3uePUq87vYvzMZBFMPu4wKaOUJH3s1PNJH9gGCPlrPa4CsuDwRWdKLmrJbDnZMsRkkMG6HgVPasDJk/dOapxXKmIA4BDVDDeqhQZ5LcVcqE5qSsTGVmjuzbRyCOTJyBxz7U8QKo4z+dc8mrsiBWwDjIpza4UIVsAkZFfJSyrGN2T0PoY5hht2tTZlPlYB6HgVhX6jzN3Y7j+lPXWd7t5m0KvOazb6+Tztgb1r0suwFalUtJHDjMVTqx90QIdxY9NwxVWWLM8p7dqct4r7UU8g80ksqhyAehya+ghGcXqeVKxUMZOfXNNaIvIR/DyGqzFKgPJ68imiUKRnGSa6lOSexKdh+lRhNQjI6KD/ACNdxDCsqJJk52461w9vKsV0DmtyDWAkQAIIU4NeDnWFrYiUZU9z0sBiYUrqobN7aIYu/PHWsS6sk3nOcY9akn1hnBXjjmsqbUmlXPGTXPl2DxdNWkysZiKFR3ihXtVRlxnA960TtWOJfVaxjdZGCannuoyYSr8omCK9WrQqTcU/M4YTjFOx2lraRmyiXn5RjrSy2wAGOw9aw7TWswKMjcBgj3FMn8QlY94CkY96+TeV451XbufQrH4RU0mtbGz5bZxR5TVzyeIZSgYomT9aVfEjMcbVz+NdLyjG9kYLH4bzOg8k0ojNc9J4hkHRV6VEviSV+VRCPxoWT41q9kL+0MMu50wiZuT1qK4t2ZCO1YyeIHLY2rip/wC2vMBUbcioWXYynJOxTxuGnG1zXVHW2XZjO3v60jiXnGOnH1rIOtGAqkgUAkKvvUn9tKQTkcHFZvLsUnflTL+u0GrXaNDE27+HG39aQCXAzj3rPOsoASSMCkbWUUgEjJOKtYHE/wAhH1qh/MaI87BzjOf0oPm4bbjpx9az/wC2EJAyOaDrCA4yM0fUcR/IH1qj/MaIE3HT7v61DNC8kR34z7VRfXYow5JHyEA/j/8ArpH1cFivy8Dmrp4LFRkpKNiZ4mg42cmMWIrcEt6V1Pw3QRr4kQdBqo/9JYK4dtVDzsRjAU5Ndl8LZvtFr4hl9dVH6W0Ar6HC0asKnNPt/kTls4OpJR7f5HfUUUV6B7QUUUUAFFFFABRRRQAV57qhvtIv/F9uukX962tKsli9tAZEZzAsJR2HEeCmctgYavQqKAMa20CNvC2n6PeTXH+jW8UTPbXMkDFkUD76FWxx61jSfC3wpNcy3EttqEk8pDSSPqt0WYgADJ8zJ4AHPoK7KigTSejOOPwv8Knrb6gcf9RW6/8AjlIfhb4UbrbX5+uq3X/xyuyopWJ9nDsjjB8K/CQ6Wl9/4NLr/wCOUv8Awq3wof8Al21D/wAGt1/8crsqKYeyh2Rxv/CrPCeMfZb/AB6f2pdf/HKb/wAKq8JZz9kvv/Bpdf8Axyu0ooD2UOyOM/4VX4SH/Lpff+DS6/8AjlKPhZ4UHS1v/wDwaXX/AMcrsqKA9lDsjjP+FV+E/wDn1v8A/wAGl1/8co/4VV4S/wCfS+/8Gl1/8crqtQvoNM026v7pttvawvNK2M4VQST+QrA0nxVe3Wp2NnqujHTv7RgaeyYXIl3BQCUcBRsfawOBuHB54oD2UOyMDQPhdocmmzHVLPUBP9sulUNqVwuYhM4i4En/ADzCc9+9ao+FnhMAAWt/gdP+Jpdcf+RKm8S+MpNC16z0qKDTN1zbPP52o6l9kQbWVdoPlvuY7s446GuntnlktYXnSNJmRS6RvvVWxyA2BuGe+Bn0FAezh2RyX/CrfCn/AD7X/HH/ACFbr/45Sn4XeFSMG21Aj/sK3X/xyuxoosHs4dkcYfhX4SJybS+J9Tql1/8AHKQ/CnwiRg2d8f8AuKXP/wAcrtKKd2Hs4dkcaPhb4UUYFtfj6ardf/HKX/hV3hX/AJ99Q/8ABrdf/HK7GilYPZw7I47/AIVd4V/599Q/8Gt1/wDHKP8AhV3hX/n31D/wa3X/AMcrsagvJbiGzlktbcXM6rlITIE3n03HpSsg9nDsjgvEfwy0WPwxqr6TbakdRW0la1C6ncsTLsOzgyYPOOtaA+FnhQqC1rf5xz/xNLr/AOOVteGNcm1/S5rm5sls54bue1khWbzQGikKEhtq5BK+lUNX8V3lpqN/a6Xox1FdMgWe9c3IiKhgWCRgqd77RnBKjkc80w9nDsit/wAKt8KHrbX/AP4Nbr/45SN8K/CTHLWl8T76pdf/AByursb2DUtPtr62bfb3MSzRN6qwBB/I1YoD2cOyOL/4VV4R4/0S+46f8TS5/wDjlH/CqvCR/wCXS+9f+Qpc/wDxyu0op3Yezh2Rx3/Cr/CuMfZ9Qx6f2rdf/HKafhX4SPW0vuP+opdf/HK7OiktA9nDsjjP+FV+Eh0tb7/waXX/AMcpP+FU+Ecg/Y73I6f8TO54/wDIldpRRcPZw7I44/C7wqetvqH/AINbr/45WV4g+GGipp8L6Za6i1z9stlbGp3LHyTOgl6yf889/wBO3Nd1qc99bWRk06xS9ucgLE84iGO5LYOMfQ1W8Naz/wAJF4Z07WPs/wBn+2wLN5W/fsyM4zgZ/IUB7OHZGF/wq3wpz/o1/wA9f+Jrdc/+RKQ/Cvwkxy1pfE+p1S6/+OU6Hxrc3fi280O3tdJX7Jdrbt9o1Xy7iRdiOXSHyjuAD8fNyVPIrsaA9nDsjix8KvCKnItL4H21S5/+OUv/AAqvwmTk2t/n/sKXX/xyuzoouHs4dkcX/wAKq8Jf8+l9/wCDS5/+OUv/AAqrwkf+XS+/8Gl1/wDHK7Oii4ezh2Rxn/CqvCWc/ZL7Pr/al1/8cpR8LfCgGBbX+D/1FLr/AOOV2VFAezh2Rxv/AAq7wp/z7ah/4Nbr/wCOUn/CqvCX/Ppff+DS6/8AjldnWNrGoa3ZzY0zRIb2FYvMeSW+EHPPyqNrZOAOu0c9etAeyh2RyOq/C7Q01HRRZWeoG3e8Zb3GpXBxF5MpGcycfvBHyP6mtT/hVXhL/n0vv/Bpdf8Axyul0bVYNc0Sx1W2V1gvIEnRXGGAYZAPvzXO2Xjia6lsLqTSDFomo3RtbS++0BnZiSEZo9vyqxXAO4nkZAzRcPZQ7Iavwu8KL9221AfTVbr/AOOUh+FnhMjBtb8j0/tS6/8AjldnRQHs4dkcZ/wqvwmP+XW//wDBpdf/ABykHwq8Ig5FpfZ/7Clz/wDHK7SigPZw7I4z/hVfhP8A59b/AP8ABpdf/HKQfCrwivSzvh9NUuf/AI5XaUUXD2cOyOMHws8Jjpa3/wD4NLr/AOOUo+FvhQHIttQB/wCwrdf/AByuyooD2cOyONb4XeFXxvttQbByM6rdHH/kSsyf4ZaMPFFjHFa6l/ZjWdw1wf7TucecHh8vJ8zOdpl/yBXQeIPEWraFHe3x0KOfSbKPzZrj7aFlZAuWKR7SDjngspOOO1btxdLDp8t2q71SIyhemQBn8KLB7OHZHKn4W+FCMG21D/wa3X/xyg/C3woTk22oHvzqt1/8cqfwb4tufFcCXRt9Jjt3t1lK2mq/aZo2YAhJE8pdpwTnngjGO9dXQHs4dkcb/wAKu8Kf8+2of+DW6/8AjlH/AAq7wof+XbUP/Brdf/HK7Kiiwezh2Rxh+FfhNs5tb85651S65/8AIlL/AMKu8K5z9m1DJ/6it1/8crsqKA9nDsjix8KvCQzi0vuev/E0uf8A45W9oHhrS/DFrPbaVDJFFPL50nmTvKWfaFzlyT0UDr2rWooGoRWqQUUUUFBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGd4g0v+2/Dmp6Vv8v7bay24f8Aul1K5/DNctBb+I7/AFLSb690P7M2iWkxEf2qNvtlw0YQLGQTtTG7l8HkccGu6ooA5zVbq/aOIHwg2otNbDzB58G2Nj96Ny7DI91Bz6VZ8I6Tc6H4T0zTLyRZLi2gCOUJKj/ZBPJA6D2FbVFABRRRQAUUUUAFFFFABUN3NJBaSzQ20lzKillhjZQ0h9AWIAP1IFTUUAcN4Wk8Q6XpWrpN4WukuHvrm9gjku7cCUSzlgmVdsEK2TkY460/UrHXtM1rX59K0oahHrMMexxOkYt5ljMf7wMQSmApyuTwRiu2ooAz9B0z+xfD2m6UH8z7FaxW+/8AvbEC5/StCiigAooooAKKKKACisOfxp4VtbiW3uPEujQzxOUkjkv4lZGBwQQWyCDxitCx1Sy1MM9jOLiIKrCaMExOG6FHxtfp/CTjvQAuo3VxZ2Tz2unzX8oIAt4XRWbnsXZV469a5bwT/b2j+DdH0q78OzxXFoIbWbzLqHGzHzSqVZshfQ4JzxXaUUAcP4osNT1+GbSbbw19nd7qN01WSaHZGFcN5qgN5m/C8DaOe+K7iiigAooooAKKKKACiiigArjvGX9vXt3b6VaaNe3Oiyx7r6ayngSWXkjyB5kiFQRyzDnBwMZJHY0UAZdrPdRLpdvDor21q8TCVWljBswqjYm1SQ2enykgYrjdO8P66mnaD4Zn00R2WkXsc7aj56FJooWLRhUB3hidgOQAMHk8V6NRQAUUUUAFFFFABRRRQAUUUUAcN4lXXNT8Qizn8OX154dttkgW1ntgL2Xg/vBJKpCKf4cfMRk8DB6ia7vvPliXSTJB9kMqu06DfLk/uSvbj+LpzWjRQBxllp+oah4x07V20H+xbextpoZDJLE0lxv27UxEzDYu3dyeuMDrXZ0UUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQByfiSNP+Ez8G/IvN5c546/6LLXPeKNQ1GFfG8drqFzbmA6cLdo5CPI3sAxUdBnv6969IktbeaaGaWCKSWBi0LsgLRkgqSp7EgkcdjUUumafOZzNY20huNnnb4VPm7fu7sjnHbPSgDiF0Sd/G17oP9va2NPOmRXmPtz+YsxkdNwkzuC4UHYDtz27Vhw67r2v23hO2eTcLvRhdyH+03083MwKg/vI0ZiQOdox97JzivWBa24uzdiCIXLRiMzbBvKAkhd3XGSTj3qpPoOj3OnRadcaTYy2MOBHbSW6NEmOmFIwPwoAp+ERqS+HYE1W6gubpHkXzYZ/OBQOQoL7V3MBhScDJBrcqG1tbextktrS3it4IxhIokCKo9gOBU1ABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB//Z", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFfP3i4KPiP4kGB/x8w/+k8NfQVfPHjRsfErxGP+niH/ANJ4a3w6vM5MbHmotEF0q+fkgYCj+VU7mJFhTCjmp7h9zgewqtdTjyVPpxXVSi1Y+foRlzRsVoAscynA+8K6m2VEDEgYYDFchBKWlUHruFdOJP3a+gAFZ42DlZGuYRcZJvco3iIJiwUYPP410vw8Vf8AhL7L5R/H2/6ZvXM3bBpB7dK6b4dnPjGz/wCB/wDot6JJ+yIw9+an6r8z2vYn91fyo2J/dX8qdRXEfUjdif3V/KuS+IqIPDAO0f64dv8AYeuvrkfiNx4X/wC23/sj0mc2M/gS9DxpAiqG2imkKXYFRim7+gokcY4r07anznK7m/8ADmNG+INo5UZEcozj1Q17tsT+6v5V4X8NnB8e2g/2JP8A0A17tXLifjPoMFf2bv3/AEQ3Yn91fyo2J/dX8qdRXOdhznjlF/4Q2/8AlH/LPt/00WvAVVMFMDjmvf8Ax1/yJuof9s//AEYtfPrHawI79a7cJszycWm69vJfmyVYUdB8oxnmrKhEXaFHNVlYgcdc1NvxnP4V0STZ59RSehIdpycDpTVVCRwOOaiMhDKB90Dmmh8MW9eKSiSqbsSmNAxO0ZyK24I43iztHKisIuMj61s28mY129gM/SuXFJ8qMK6aSudp4BiT/hKZG2jP2Fx0/wBuOvTNif3V/KvNPh827xNKf+nN/wD0NK9NrkjflVz38sd8OvVjdif3V/KvNPi8im30sbR/y17f7lem15p8Xf8AUaWf+u3/ALJWtH40a43+A/l+aPKiiA/dGaawVUGAMUgk3HPtSbiV5x1OPpXqWPNUWtyHavUAUohUk8DPWjACgDoadv7j6VZu2+gKF8zBA4FNOM4AHFRtIPMz3FN8wcmnYtU3uTKqEHgelNKLkDA4OarCf5W9jT1l5yf4qLGnsZIm+UsRt716b8FYkW88QAKMeXa9veavLjJyPrXqfwTO658QN6pa/wA5qwxK/dM2oRakz1rYn91fyo2J/dX8qdRXlHSN2J/dX8qNif3V/KnUUAN2J/dX8q80+LaLjSPlHSft/wBc69NrzL4vHCaSfQTf+061o/Gjlxv8B/L80eWyqu0cDjNIFTzE4H3aZM4x7kUiuu5fYV6dtDy1B8pOAoMvA7f0qqUXzA+0cZH60/fgzFepx/SoXl+Vc+5pxRdKnK+n9aEyqmc7RxUyhTHnaMmqsT9M9cVZLDbmlJEVYtOxAAuenINWbZUFwvA65qn5mGJ96khk/fqRTlG6Lq05OL9C7dIhnOAOMiq1xGjkAqMZp9zKTM2O4qGaTnj0qIRaSMaMJLlB0Q4BA4r1j4RIptNSyAf9V295K8iZwwBPXP8ASvXfg9/x46h9Iv5yVlilamddKLVWF+7/ACZ6VsT+6v5UbE/ur+VOorzj1jxr4tqo8S2mFH/Hovb/AG3rzxwuG4FeifF048R2v/Xmv/ob15q74OD1PNerh1+7R5ji3Vl6l2wZTHMu3tXq3wWUJoOuKOg1U/8AoiGvI7Jsh8dNoxXrnwXOdC1w/wDUVP8A6IhrHFqy+ZWGjy4ifp/kel0UUVwHohRRRQAUUUUAFFFFABRRRQAV86eNkY/E7xGw/wCe8I/8l4q+i6+fPGA/4uP4kP8A08Qj/wAl4a3w7tM5sZPkotmT5bMwbsarXsTGMLjiti3h3LtqrfQkInrXVCquex89RrtVUYVtbtHPGOeGz1rpljYooPTFZcEY85D/ALQ/nXXx2atCuP5VljsSqdrmuPqSrSXkc3NAfMGM4xXT/DqPZ4xs/ff/AOi3rNubbbLjHQYrb8CLt8X2H/bQH/v21Z+356ehhh6svawg+6/M9kooornPrArkfiON3hbA7y/+yPXXVyfxDGfDaj/puP8A0BqUnZHNjNKEvQ8QKHIA60hgIUnJzWpLAEi3Y5z6VCYzjpwa9GNZPVHzaxLNX4aIV8e2Z/6Zy/8AoBr3mvD/AIdpt8c2R/2Jf/QDXuFc2Jd53PoMDPnpt+f6IKKKKwO053x1/wAibf8A/bP/ANGLXz81uRkjJr6C8c/8idf/APbP/wBGLXh/kHcPftXXhp8qZ4uPrezr/JfmyokB4qKWPEhOTn0rXhgO7G3v+lVLiL96wx3xW8Kt5WOCniG5lDYxYAdOhqQxt8oH0qwkeDirCQAkDrtqpVEi54mxRaBhgDrkZrXtoHZFxnNV2iYnOMc4rodOhHkqWHAXrXDjcTyU7nPObrWibvw3DDxFLuGD9kf/ANDSvUa858DQGHxI5Ixus5P/AEOOvRq54TVSKkj3ssi40LPuFea/FwboNLHr53/slelV5z8Vl3JpIx3m/wDZK2pO00zTHu2Hk/T80eSiDLY/CkFt83U+laKwbGYnuc0qxE7fl5zz7V3+2PCeLfQofZgRxnFMNsTnjpWu0QVcY57VG0eOg6ipVe5McXIwZoWyFA9c1VaFyecj8a6B4hgZHPeqJjGcY6GuiNS56mHxraMv7O2OOlBjfgH0rS8vAxihogozjJxV850rFmbFCY+ASc5PJr1v4Irtl10H+5bfzlrzcwjg4r0z4Lrsn11c5wtsP1lrDEyvSf8AXU1o4j2kj1miiivKOkKKKKACvMvi8MxaUPaf/wBp16bXmnxbGV0ge03/ALTrWj/ERy412oN+n5o8ilg3Jgk8inLCARyegq3LFtUHrwRUqRAqvGTivSdTQ8l4pqO5TSEh5V9s1VmgIVSM5FbIjBaXHZaqyR/KO9Eal2FHFPmv/WxTSEtgHNWPs5IxzmrEceecdhV0Qjyycc9aidWzM62LaZzbwlbluucdKngjP2pM9M4q1PF+/Y49KW3izex+5rVz9251yxPNTv5CzQMZmx61VuYuMZPIrbeHE857A1TuI1I5471jTq3scVHEtSSMtoMYIzXsHweXbY6iP+uX85K8wKA4GK9T+Ewxbal9Iv5yVGKleB3Ua7qVoJ93+TPSKKKK889g8b+Ly7vEdr/15r/6G9eXXSYRzk4Fer/FVN3iS3z0Fmv/AKG9ebyW4fep716mGl7iR5ca6hXkn3I9OQ7Wz6V7B8GF26Hrg/6ip/8ASeGvLNPjy0gxyADXrHwgXbpWvD01X/23grHFyvoPD1efFyXl/kei0UUVwnphRRRQAUUVHPCtxbywOzqsiFCY3KMARjhgQQfcHIoAkoryrVpJ9A1LxNqGj3+qPBoOktuS61Ge4R7uQblysjsPkQKf+B+1a07Xfg3W7KOLUr/UYbvTbuWaO8uGmzLCqMHXP3c7mBVcLyOKAO/orznSmv8ASz4N1R9Xv7yXW2EV/FPOXjcyW7yhkQ8R7WTA2gcHnPWqtheaiND0Hxa2q3z3eo6pFFPatOxg8maYxiNYvursBUggZypyTmgD1CvAPFgB+I/iPP8Az9Q/+k0Ne6ajeT2NoZrfT7m/kBA8i2aMOff94yr+teDazNJfeOvEE1xZT2UjXMWbecoXTFvEOSjMvPXgnrWtJ2lc4MydsOy7DCq44GcZrP1JQsanHJzWsRhhjsBn8qy9SHyRZ6kH+lc+Gk3VTZ83FWkjMtl3TRDGMuB+td/HbgQJtHTk/lXDW4Buoj6Op/WvQ7cfum9CuRXFxBVceSx6OHiqk3cwJYwWkZhzuJGav+DU2+NrPH3cyH/yG1VLxXOAD8+3rV7wgu3xnYD08zP/AH7atcLK6OWkrYiHqvzPXKKKK6z6wK5bx8M6BED0+0D/ANBauprlPiCCfD0eOv2gf+gNUVPhZzYz+BL0PLfJMkzZ+4Bmjy0aI5UZFPknEcgiAOWHUUvlMwGOppczSV9D5KxoeAwB45scDjZL/wCgGvaa8b8EJs8c2eOm2X/0A17JW83d3Po8pf7h+v8AkFFFFQemc944/wCROv8A/tn/AOjFrx0RFWUNznivYvHHHg6//wC2f/oxa8oXDum7sf1FDk4o+dzf+OvT9WOghxnd1xgVl3cf7449f6Vt8E5HYZrImOZPqTUYabc2zzX7trFeOPjJq3FFkDA5J5qFVLcDtzV6BgAX9OK6K02kS3d6jZI1GBjjPWugsY0+zoMAhsDP4Z/pWRNEBAp75rorKNFtrdQOg3D/AD+NeBmde1FW7s7cFTvUZr+E8f8ACTMAelpJ/wChx13dcN4VQL4mcjvayf8AoaV3Nd2XtPDQt/Wp7+EVoP1CvPviehkGmAdcSn/0CvQa4T4ijM2ljttm/wDaddbfKrkZj/u0vl+aPOJI/m3AcCl2hZWPUccVO4wrAEVFsLgMvGWxz6U1O61PlQdQ0gJGMCo3j2g9x1rRlhULn1ABqts+8rc/4VnTrJrQbi0Zky8g/jWdjMz8dK2poxwcVmMgV29Sa9OhO6OnDztdEWzIApCmaeM7hjpSZ+cL3xmt7nTzPoHBfAr0j4Ortute/wB22/8AatecqoySPWvR/g9zda9/u23/ALVrKt/Df9dTrwD/AHtl2PU6KKK849kKKKKACvNviwM/2T9Jv/adek15x8VRltI+k3/tOrp/EjizB2w0n6fmjzaZfkAx1NLGuCvPapCMoCe3FKoAkxjpXTzaWPmOZ2sJGmfNP+x/Wq0q4wuO1XoflWVjzx/UVFLHuIIpQn7zuOM7SIYU/pWjsCwlsZPTFVUGGPpVxW3IW/h6YrKtJtkSfM7mVMgMrcU21UC9iJ7tUs5AZz6Gm2y7r2LPXdXTf3H6HTGT5PkalygEsoA+8xBrNlUNwR0rdnQCSUDuxrFm6H61x4SfMjBaSKxABYDt0r1D4Uc2+on1EX85K8xA/ek+1enfCkYh1L6Rfzkror/Cejgn/tEP66M9GooorkPozyb4oLu8SQen2Jf/AEN688Yfv8Y4xXo/xLGfEsOen2Jf/Q3rzuRSJWyfpXbQelj5utL/AGmoixpqjzJDj+GvTvhJ/wAg3X/+wr/7bwV5pp6keZ/u16T8IM/2Vr2ev9q/+28FY19Zs2y53xUn5f5HotFFFYHvBRRRQAUUUUAY9p4a0+10zUrBxJcw6lNNNdmcgmUy/eBwBwFwo9ABVfSfCNppd4LuW+v9Rmjtzawm+kV/JiJBKrhRnO1clsscDmugooA5rSvBVhpN7aTpeX9xFYKyWFtcShorQMMHZhQT8vyjcWwDgUlv4H0621CGdbq+a0t7lruDT3lU28MzEkso27urMQCxAJ4ArpqKACvA/FBI+JPiLHT7VD/6TQ175Xgfirj4i+JG7/aof/SeGtKW5wZl/u7LMrbbhl7FV/lWfqCZWI9sHFXrtgs2O5UfyqhqDMY4uOAOK58KnzRf9bHzf2rFC1U/bI1PQyDH516Lb7fsWRnGCP1rz63bFxD6h1P616HAwFqvuDj8683iRu1P1PUwTvOV+xlXClpdvoMGpfB7hvGdkfUyf+i2qORx5xB7g0/wjHs8b2JHQtL/AOi2rTAdn2Odf7xB/wB5fmeu0UUV6R9OFcr4/bb4fjPpcD/0Fq6quU+IChvDyA95x/6A1RU+HU5sZ/Al6HlxKxAufwqWG5/dk/lxVORTK+eijinKVwFB6nmqlTUo67nyKdtje8EOW8dWpPQrJj/vg17HXjPgb5fHFivokg/8cNezVclax9HlP8B+v6IKKKKk9Q53x0ceDb8n/pn/AOjFryNSWZd3bkV6744/5E6//wC2f/oxa8dI5Owk07XR85m/8den6sstcZDY9eaqkKzbu5OajaTbwOpqaKPOHPQU1BU1c8t3YgOFX1Y0+0y+wHscmmAqQZAfYU2B/LG0Hk05K8XbcEaDO0hBH3QcGt+1GIoJD1K7Pw/yK51pQsAAxnIrooGzawj/AGc14OZxfs4q3V/kd+CfvNm94Wz/AMJIc9Psj4/77Su4rhvCRB11SDkGzc/+PR13Nd2XaYaK9fzZ7+F+B+oVwXxIP7zSx6ib/wBkrva8++JzbW0o+03/ALTrttfQyzL/AHaXy/NHBBVO8c43Y/KnRkOB/dyTSRJlj780kh2xkdKnd2Pli4M4y3rUa/MXDdehpwbPI6YpjfOW7HAHFc6RbIJIyjY7DgfSseTHnN7ZrXnLbgT2NY8xxIQOpNenhb9S6OsmNUdD6UY/iHWjGMUp5YDtXWdHUMYevRvhAMXevf7tt/7Vrzl+HPpnNejfCD/j7173W2/9q1nV/hv+up25f/F+R6lRRRXnnuBRRRQAV5z8VRzpOOoE3/tOvRq85+KhI/sojrtm/wDadXT+JHFmP+7S+X5o88U4jj+vNLlfMJ5pFx5SGgsFOPUcfWtbXPlSSMhTJ6YqNgc5HeiJSWkJ4zj+lOAJbI6c0tmwYoQZIHQVJA2UK/wmmnjIFIDyQOlS/eQipOoEz47nn8qW0H+nRN702Vcu7E9D/Sn2hzcRkdK6X/DfodF/dNuSRfOcd2yaxphgDPWrxZhI5I521TueNvqa48NHldjJO8iAGvTPhX/qtT+kX85K82UfIK9K+FgxHqf0i/nJW9Z+6d+Af+0x+f5M9EooormPpjyr4l/8jJB/15r/AOhvXn1xln/Gu++Jv/IzW/8A15L/AOhvXAyHEhJ9a7KC6nzGI0xU2WtP+Uy56bK9I+EQxpevD/qK/wDtvBXm1mctLj+7XpXwk/5Buv8A/YV/9t4KxrfGzoyz/eZen+R6HRRRWJ74UUUUAFFFFABRRRQAUUUUAFeBeKj/AMXI8RjHH2qH/wBJ4a99rwHxUcfErxFn/n5h/wDSaGtaXxHBmX+7sddf8fbHd/CvHpxVfUWBihUeh/pU13/x9AjuFz+VV75QIoyeoBFZ0FrD+uh80n75Utj/AKVDxwXX+dejIoNohHBArzm2/wCPqL0DL/OvRoMNBweg5FePxJ/y7Z62B+KS8jEvGxL6EE1d8KMp8Z6dzyfMP/kNqzr75pcg9ySKveEMHxhprY5/eD/yG1dGDiuWLOOD/wBpj/iX5nrlFFFd59UFcp8QAT4ejA6m4H/oLV1dcr4+ONAiPpcD/wBBaoqfCzmxn8CfoeUzHHyJ+YqDy3Xnmrrosa72xkmqbzFgRgitaUm17p8jY6DwIc+N7LPUpJ/6Aa9mrxbwEc+OLM/7MmP++DXtNOorM+kyn+A/X9EFFFFQemc944/5E+//AO2f/oxa8ekAZcrx9K9h8cDPg+//AO2f/oxa8c5A4PUVUT5zOP469P1Y14wAD1Jp4chAuO9GMY3c9hUUsqpx3q1eWh5Su9B+4bsAAKOahjcIWduxpgl4A65Gabjdhc59a2ULaMtR7llH3AZP8QNdfaY8mNs8bcY/WuNhG5x6ZFdnaLiFQOgAx+VeFnVlCJ14Je+za8HNnxE64wFtZAP++0rva4TwkoXxGeME2smf++467ut8C06EWvP8z38Emqbv3CuA+Jib30r2E3/sld/XBfEg4fSz6Cb/ANkrqbaWhGZf7tL5fmjg1+RA+O9JKm8dOh/Ony4KlARyBTdxZOuDmsk38R8v5EqZAK7eMcGoyctuHBIxj3qxGVMS5I3c0yRFGGXHPb3rJS96zKa0KUxJGSMc1kyDLnPXNa82dvI/irJm++frXqYYqj8RHgleTQD2xigA55NKcV1nSAx83OcmvRvhAMXmvf7tt/7VrzcDaAfevR/hAf8ATdeB/wCedt/Oas638N/11O7L/wCN8j1OiiivPPbCiiigArzn4p/f0keon/8AadejV5z8VB82kn0E3/tOrh8RxZj/ALtL5fmjzzA2jnjFNwG3A8elIzYUDNKSOT7VvY+WJI2G3r9aco2gjse9Rxbcn0PWpC2Pl/Ks5LUliNkcDn3o6jA9+aQHkg9cDmlwTlRxx1oApy8OwzS2jBbqMds0yc7ZyD6U62G67Qe9dL+B+h1Je58jWuDtuJuOhNVJFDKPUHirV4cXFweoGeKrBSQee9cVH4UzmejISCoUbeCevpXpPws/1epfSL+cledS5VVU85716N8Lvual/uw/zkrSo7xO7Ln/ALTH5/kz0KiiisT6k8l+KA/4qW35/wCXJP8A0N64GXl2B4rv/ih/yMlue/2NP/Q3rgmX5znnNdtHY+ZxDtiZlmxXBmPX5Sa9I+EH/IK17/sK/wDtvBXm9icNMCexr0f4P86Tr3/YV/8AbeCsa3xP5G+Wf7zL0/yPRqKKKwPfCiiigAoorL8TNdp4U1htP3fbRYzG32dfM2Hbj3zigCW31vSbvUJdPttUspr2LPmW8dwjSJjrlQcinQaxplzqEun2+pWct7DzLbRzq0if7yg5H41554TuLnTB4VtLTUrS/h1OwdvIjt40+ylYgwZSo3bd2FO8kkkc54qto/2H/hGPh39j8r+1vtyebtx5u7y5PtW7v13bs98Z7UAemf2xpn9qf2X/AGlZ/wBoY3fZPPXzcYznZnPT2om1jTLbUYtPn1KzivphmO2edVkf6KTk/hXmA+y/8INH/qv+Eh/4SX2877T9u59/9Vn/AIB7U7WPsX/CK/ED7V5X9sf2i/lbsedv2x/Zdvf+5tx3z70AetV4B4sGfiN4jJ/5+of/AEmhr3K+k1KKwDafbWtzd8ZjuJ2hT3O4I5/SvBdee9k8b+IHv4IILs3UW+OCYyov+jxYwxVSeMfwj+ta0fiOHMf4DJ7jJnRl6EDP5VWvyWVfTJqeZ9u0N6Lj8qhuwXiRux6UqWjifMRfvXK9tzdR/wC8P516HZxlLRm/ibqf5V53btieL13ivSbc4tBnuteHxNJqMEup6+XxvUbfY5y9wl03uMn61d8F5/4S6wB/hMmP+/bVn6pgSH3BP4Ve8FEnxjY+hDkf9+3ruwi/dJnDD/eo/wCJfmewUUUV1H1gVy3j7nQYR63K/wDoLV1Nct4+OPD8ZHacf+gtUVPhZzYz+BL0PL5UNw4B+4DUc0MYjJHX61KsmYXVeo/nURtZfKYsO/rSi7aN2sfJbmp4AXHjWzJ/uyf+gGvaa8Y8A/8AI62g9Fk/9ANez10VPiPo8p/gP1/RBRRRWZ6Zz3jgE+Dr8Dr+7/8ARi14xIzKRzXtHjc48H35/wCuf/oxa8WJyea1pnzmb/x16fqxxk3HnsM1SmwSD71O7Atx2HNQgbxk+tdFNW1OCmraiQ8ybe3NSdCMdWpYk2nimoxDAtVN3Y5O7didDjaB1zXa2pC2sZPUoDXHW8eWGeprsbdFaGLOfkQV85njTUUzpwPxysbnhcn/AISjHb7HIR/33HXc1w3hUH/hI0J6/YpP/Q467mt8u/3WHz/Nnv4X4X6hXAfEtgsuk56ETf8Asld/XnfxSOP7K+k3/tOu5K7sZZl/usvl+aOGJBuM9hj9Kj3MOM9iDTSCSpPRjQ3EbHvTUUfKlq3+YqT0UH/Cp22owz0GGH5YqlAxCZPQ1KXK7c9B1+lc86bci07ISZ1dRgfxZrGlIMr47VoTyD+HtzWY33mPqa78NDlRdHVtjN2Bmg560gBC++akJGwmus6npsMB+UA9D0r0f4Qj/T9fP+xbD9Zq85H+rI9DXo/wf5uddP8AsW385azrfw3/AF1O3AfxvvPUqKKK889sKKKKACvO/ikcNpR/2Z//AGnXolec/FU8aX/uzf8AtOqhrI4sx/3aXy/NHmjsSAPwp/GAPUE0xF3CnMv3GPYV2O2x807bEkKgE+mKkdscD8KgjyZDj7uKkc4XjqelZyXvGbWo4dMHrT+eAOtR47HrUoBIAHWoloSzNuFLTNUljn7VGO4YZpJvlmYmnWR/0mI+rV0Sf7t+h13/AHZpXBP2icnpk1AhzkU+8cNcOB03c0xRzge9ccFaCucr3El3EDmvRPhacx6l9Iv5yV54+4deleifC/7mpfSL+clVL4Tty3/eY/P8mehUUUVkfVHkvxRBPiW2x/z5r/6G9cDLu7HkGvQPif8A8jHb46/ZF/8AQ3rgXb5/au2i9D5nEP8A2mfqWLEfNNnrtr0f4O5Gka7n/oKn/wBJ4a86seTL6lTXo/wg/wCQVr3/AGFT/wCk8FY1n7z+R0ZY/wDaJen+R6LRRRWB7wUUUUAFFFFAFKz0bS9OuJrix02ztp5zmWSCBUaQ/wC0QMn8aINH0u21CXULfTbOK9m/1tzHAqyP/vMBk/jV2igCl/Y+l/2p/an9m2f9oY2/a/IXzcYxjfjPT3om0fS7nUYtQn02zlvoRiO5eBWkT6MRkfhV2igArwLxX/yUTxJ/19Q/+k0Ne+14D4rz/wALG8SD/p5h/wDSeGtaXxHBmX+7sr3Q2zZ65VePwpZ5AbOL1Gf6UXKFpc542j+VJcoEsoSepBzRG3uf10PmVqypAcXMfH8QP616NDIv2MNnogrzq3I+0R57uBXe2wzb7c53L0/GvF4igpKnfoengpuM3bsY1+fMuJEwMA4Bq94KUL4vseefnH/kN6zrwkXTj0zk/jV/wZn/AITWy9P3n/otq7MKrU0vI5abviY/4l+Z7BRRRXSfWBXK/ED/AJF1Mf8APcf+gNXVVyvj/wD5F+P/AK7j/wBBaoqfCc2M/gT9Dy61Ai+8fzq21ysiMq4OKzzkyZBwCMYqxboBn5u/9KirTi/flufJxk9kavgddvji046rJ/6Aa9jrxzwQ27x1aeyyD/xw17HXRLpc+hyj+A/X9EFFFFSeoc/43OPB9+f+uf8A6MWvGGwTj1r2fxv/AMiff/8AbP8A9GLXi+M47dDWkD5zN/469P1ZXkI3kLTEU/rUhADnHJpCcLnpzXWnpocCelkSL/rFHYc0mwFhTolJAHc0rIUGM81nfWxF9SzEcPxXW2e77GjMMHA/lXGeZsKY5yQK7SIboY0U4Hl5r57Ol7sPM7cAvebNnwm27xApPX7HJ/6HHXdVwnhPA8SlR2sn/wDQ467uurAK2Hjbz/NnvYR3g/UK86+KQ3PpA/67f+069Frzz4nj9/o5z2n/APadd0XZkZj/ALtL5fmjgh9wE/wVG+fKORjsKnUbsrjg5qOUfJ7ClF6nygkZOAvans5HBH1+lRqf3e4fSgPvB3cHGKbjd3ArzOCBiqTjJzVyYDAx3qqefrXbS0Wh0UdENGOT70gbII9qOhxQccDua1N7Aq4TH416R8IBi41z/dt//atec9q9H+EIxca5/u2//tWs6z/dv+up24B3rHqFFFFeee4FFFFABXnHxV+9pA9RP/7Tr0evOviiAZdIz/dn/wDadXB2kcWY/wC7S+X5o81BClgOoNLjeFb04pr/ACkn3p0QJ+ldT2ufM9Lgg2uxFSseBx9Ka+FUmnAjYv6VD7kPXUkVM8d6lY+WoVeSBzSRjjOcnrSSsApPeudu8rEmbMS0rgin2hBuIwOzU2T7+fen2q4vU9Ca7ZfA/Q6tOQszN/pBC885p/OR6c0yXi6YgZGasOnzADgYrlbskczIJMr9K9F+GHK6l/uxfzkrzqUEHOc+1ei/C/7mpf7sP85KUvhR25b/ALzH5/kz0Giiisz6o8o+J5/4qO3/AOvNf/Q3rz9uSfrXoHxOGfEduP8ApzX/ANDevPWYqjHGSD+ddtDY+arr/aJ27lywIZpNp6da9H+D/wDyCde/7Cp/9J4a840kZkmHT5etekfCEY0zXx/1FT/6TwVhWfvtehvlqtiZLy/yPRaKKKxPeCiiigAooooAKKKKACiiigArwLxX/wAlF8Rn/p6h/wDSaGvfa8C8V/8AJQ/Ep/6eof8A0mhrWl8RwZl/u7IZGJkUDvgVFfbhHEpPHNSpuDP6kcU3USCkSe1EHacUfMw3KUIJuoR/tj+dd9bnZADgnAIrg4iBdRn0cV39uS2n5HX/AOvXkcQPSnc9LB6yfoYd0N1zJkfeArS8GxlfF1g2e8gP/ftqpXBLXJJ7Vd8HMT4t0/0/ef8Aotq1w0m4pHPSX+0R/wAS/M9booortPqwrlPiF/yLa/8AXYf+gNXV1ynxCOPDan/psP8A0BqmexzYz+BP0PJ5GCgAfU0iTPn5ScGoWOFO7oaIJkAPXiunk93a58jZ2udJ4D/5Ha0Pqsh/8cNezV414E/5HWz9Nkn/AKAa9lrOe59HlH8B+v6IKKKKg9QwPG3PhG+z/wBM/wD0YteKNuVfvZxXtfjYZ8IXw/65/wDoxa8SZ8Bse9a0tz53Nv469P1YwdyeSeKTg/KR3xTowdhY/hTW9R1rp6nndSxG21s+9OfBfd2xVYNuanK28MOwPNZuGtyHFi5xJGD/AHhXa2zHZFg/wVxgXzJE9ARXWwAqsRTrt5rxs5ipRivU68JKzN3weT/wlLqeos3/APQ0r0CvP/CA/wCKrlb1tJP/AENK9ArbCfwInv4H+G/VhXnnxQBJ0ojsJv8A2nXodcJ8RFDXGlA9MTf+yV0OXKrizFXw0l6fmjgUGGYkYCk1HKmIQD1K5q1MQwdR2qtIQ/X0IrOm23c+WehXTldg7807YDvJpsOEII7VM2HVmXqwrok7Mkz5sg47AVUVsgNVy4zux6dapAZwPeu2n8J1UfhH9vwphGdvqKeV4/CmA5APpWiNYjl5P0r0j4Rf8fGuf7tv/wC1a82DYzXpPwh/1+t/7tv/AO1azrfAzswC/fnqFFFFeee6FFFFABXnfxQGZtI+k/8A7Tr0SvOvij/rdH+k/wD7Tqo7nFmP+6y+X5o80kUsrDvwc0+NTuQD0yakdQsR9hRCcSJ9DXS5e7ofL82hHdcED3pQw2J60l2pLH61EeAn0pxV4opJOKLcUpB6+9DOGGT3qsrDr3NKr7mJPtSdPW5LgQucs2emantWzdRj3qt1B+tT2Sg3sZ98VpU+B+h0SS5S3PhblsdA2KsL8xP6VDfKYrxgO70+PO7n7uOK4XrTjLyOVqzI5FIk65GOleh/DAELqWe4iP6yV5+R8xx2r0L4Z5xqWf7sP85KbeiO3Lf96j8/yZ39FFFSfVHlPxNx/wAJFB6/ZF/9DevPXBD49zXf/FJseIbf/r0T/wBDeuAcklsda7aC0Pma6/2ifqWrPKCQjrsr0n4QHOla8f8AqK/+28Fec6bhjJnslejfCH/kF6//ANhU/wDpPBWFZ+80b5Z/vMr9v8j0WiiisT3wooooAKjnnjtreW4mbbFEhd2xnAAyTxUlVdS+zf2VefbHKWvkP5zDsm07jx7ZoAztK8WaRrMbS2j3YhWHzzNcWM8ERj4+YPIiqeDng9OelP0fxVo2vTvBp14ZJVjEux4XiLRk4DrvUblz/EuR715pqUUd1p1/ofgjVbvVtPl0S5jnh+0tcxwsqqIlRznazfOuwHp2GK3LjULTxd4h04+G5RJ9l0m8Sd0G0QGVY1jiY9m3KTt6jbQB1mm+LtC1e/8AsVjfiWchmjzE6rKFOGMbsAsgHfaTRB4u0K41f+y4r8NdGRoV/dOI2kXJZFkI2MwwcqCSMHjiuK0fULPVh4C0vTc/b9KIe+hCENZols8TpIP4SXZVAPXqOKqadcwyeHPDfhZCTr9lrET3NttPmRCOcvJK3orKCQ3Q7x60Aet14H4rGfiH4k/6+of/AEmhr3HUbOe+tDDb6hc2EhIPn2yxlx7fvFZf0rwbXLeay8a+IYZrye9lW6izPOEDvm3iPIRVXjOOAOlaUviPPzP/AHd+qLSx/NnuFBqvqKBSo74q9BHmVXz2AIqrqiHIJJA4rnpT/fJXPm0tLmZCuZ4x/tD+dd3CTDYoB39a4aA/6TFjoXHNdzEM24bOcjAFcee7Qvsd2FvzP0KaqrXMrnpirHhLA8ZWAHQmT/0W1VYpCS25dvJ/nVzwpg+MtOx28z/0W1ThLqo0yadnVpv+8vzPWKKKK9I+oCuU+IJC+HFJ6Ccf+gNXV1yvxAG7w8g9Zx/6A1RP4Tmxn8CfoePOjSdehNNaIKMjrVuYfwIMn2qA28vVlIrrhU03sfJJs6PwF/yOVj/uSZ/74NezV4z4DGPGtlnukn/oBr2asp7n0WU/wH6/ogoooqD1DA8a8+Eb7/tn/wCjFrw9lJBx0xXuPjPnwnff9s//AENa8X2DdzxkYx9K0pSs2fO5s7V16fqyJUOw56VEseMj3q8RlAccL1qs3yyY/vdK1hNu55akxgTL8dKfswm0dSaX7hUDrUqbV3OTwTkU5SYOTE+VQinrmulgcGFSD/CBXORxFmVj/erdCH7PgEgEZ/WvJzBRkoq5tQbTbOl8GEnxRIf+nN//AENK9Crz3wWwbxTMB2tHH/j6V6FWuG/gxPosB/C+bCuE+In/AB86V6bJ/wD2nXd1wPxLbZ/ZpHXbMP1jrWSumh5i7YaXy/NHBO23O3pgiqpc5b8SKe75TA6hqY2CAe+3+daQjbc+UKySfez2NSglBgdDUB+U5PANOB+QEHI611ONzRrqhbrkbu561S+6xqy7F2x2xVdwBk/jWtNWVjejorMUk7sCmHPOfXilxkk0tWarQXbn8K9I+EC7Z9c+lv8A+1a83zhc+1ek/CP/AI+Nb/3bf/2rWVb+G/66nXgG/bL5np9FFFcB7oUUUUAFedfFIHzNHI7edn/xyvRa87+KR2nSj7Tf+06qG5w5j/usvl+aPODyntmooiTMA3Tmpwcqw96j25dSPpXSno0fMRe6EmYkt6dqjIwqY96WXhmB9qH+6uOtaR0SNI7IZ9PSpI+tNVMZPvmrEKAYLdTSnKyFOSsU5FDZAz1qawG27i+tRyfLM3oTUlnn7ZBgfx4NOf8ADfoa3fJY0dR5vWI7mmqQVA7EUt+c3bgc8mkGPl9CK4YaUo+hzS3EICtkHpxXofwzOf7S/wB2H+cledkAvweleh/DIYGpf7sP85KJdDsy3/eo/P8AJnoFFFFI+qPKPigm/wAQW4P/AD6p/wChvXAEYkI7V6H8TF3a9B6/ZV/9CevPnOJDXXQd1Y+YxD/2ia8y1pi7vOHqmK9F+EH/ACCte/7Cv/tvBXAaNgyS+gUmvQPhEMaZr4/6iv8A7bwVhVl+8kvQ6cs/3iXp/keiUUUVme8FISBjJ69KWsDxdBZXOkxRXui3erZnHkw2gxIkm1sOH3L5eBkbtwxnHegDforz2wsvHuj21zfRzxXFpGu+LR7yY3M7AdVFwAuGPYNvGe9ehUAFFFFABRRRQAV4H4r/AOSh+JD/ANPUP/pNDXvleA+LQW+IXiQA/wDL3D/6TQ1rS+I4My/gP1RctFcsmMkOAPpTNXi+RfxOKt6ZkwqO+Ki17iVdp4xXkwqP64o+p4HJajzHPxjbcR4/viu4QlYFAXgLuzXFp/r0H+2P513Ftl7bJ6BcUs9lZQbN8H7zfoY7TEXEo2/LkYPrmr3g4EeM7EE9DIP/ACG1Vp1RZMY4Bq34RO7xpYH/AK6Z/wC/bVrhGnsuhFNNV4X/AJl+Z63RRRXafVBXLePs/wDCPx46+eP/AEFq6muY8df8gOH0+0Ln/vlqzqu0Gzmxf8CXoeaR26oS74yTnmlaSJsrgZHamOs1xJhGwo4OamNgoG4Y3dzXK5RTvUlqfLJNr3UXfBSgeOLTH92TH/fBr2GvIvBy7fHVmPRHH/jhr12u5O6TPeyn+A/V/kgooooPUMDxpn/hEb7H+x/6GteOYBZXPAI6V7J4zOPCd8f9z/0Na8jEAcgnG3AwPelzKL1Pnc3V669P1YyTGzaq9euKpSMNwyOR0rRkVUIVR970qjMi+cT6CroSTPL2epHnkE9asJH5jYxhRUEa7mBbtV7BEQRPvEc1dWVtEJiISZAAPlzWsznyY1Hfv+NU0iSKIAjk9at9Cg7Y3V5eIkpNW6G1NNXOj8EY/wCEqnx/z6P/AOhpXoted+CV2+J5T62jn/x9K9Erpw/8KJ9Hl38H5sK8/wDicONMx6Tf+yV6BXB/EhQzaWD6Tf8Aslat21DMv92l8vzR5qgKo2eT1odeMjpjFSOu4nb6jmkYMIwCeprVSu7nypUI3r7UgUquOoqUAbduOtIAVIU845roUi0+hDtPOOMioWwGweeKuFeeOp6VUkAMjeo4rSErm1KV2MJG4ilGOlNxk0vUj2rQ2HYBH4V6P8IgRPreT/Db/wDtWvNwefqK9J+ERzNrf+7b/wDtWsqv8NnZgP4yPT6KKK4D3QooooAK87+KPLaSD3E3/sleiV5z8VG2jSiPSb/2nVQ+I4cx/wB2l8vzR5xuwGpwYKvqetRE/MR1zxUigDaSMk11NHzLQhXewyOvWlKZXjsalUYJ9acoH5VDnYjmIhERz2PNTKoBBYgA0AcD/PFOWMSEFvu54BrOU+4r3M64wJWHvUtjxdQjuWpt0gFy/t/hTrDm/t8/3hXRJ/um/L9DqWsLF+6TEsxPJzTFIyB2xVrVFCzSBBjLc1SB9emK4qL56aZzzVpNEuFLHGM16B8Mhj+0+c8RfzkrzxcZyODivRPhl01L/dh/nJTeh2Zb/vUfn+TO/ooopH1R5P8AFCQp4hg/681/9DeuAY7jn3rvfijg+JLZfWzX/wBDeuDOFJ9jXbRSUT5nEWWIn3uaWkELJKP9jFeg/CX/AJB3iDH/AEFf/beCvO9K5ll/3a9C+EJzpevn/qK/+28FclVfvZP0N8s/3h+n+R6LRRRUnvhWD4qt9Re30+8023a6ksL1bmS0WQIZ02OpUEkDI3hgCQCVFb1cn4/jll0ixVbLUL+2F8hu7Swz5k0W18gkEfKG2t152gd6AJNJk1TV/FI1efSrrS7GGye2WK7dPMndnRtxVGYAKEIBJyd54rqK4vwhaaHBq0r6Z4V1TSZzAQ093CyKy7l+UEsec4P4Gu0oAKKKKACiiigArwLxVn/hY3iQ9vtMP/pNDXvteBeK2A+IviRe5uof/SaGtaXxHBmX+7s6HSolSEE9SoNZeuAKyKvQcVp2vzwRqOyjP5Vk618kgLd+a+fwabxrk2ePWf7hJIx4Dm8QHrvU/rXexLss1K9CTmuCtWBu4v8AfX+degY/cqg6Hr+VVxC7OmjbAL4n5GHdIDICepYE/rVvwgoHjHT2/ibzM/8Aftqz7oFpfLyflP8AKtDwavl+LtPQ9jJj/v21dmEVorU5abviI+q/M9cooortPqwrl/HgzoMajqZwP/HWrqK5rxucaJH/ANdx/wCgtWVZ2ptnPi/4MvQ83eZYBsGc4zUcc1wd5bG09OKnW3CsZJOtWY5oWBAxx7V50qkYr3Y38z5qMW3q7FjwiP8Ais7B/wC8kh/8cNes15X4XI/4TeyUdNsh/wDHDXqlejRd4JntZUrUpev6IKKKK1PTMPxgceFb0n/Y/wDQ1rynyRkBe3Ner+LyB4WvM9Pk/wDQ1rysRsiqq8nvn0rmrStI8DNVesvT9WRNGsQJ7ngVRuIMSDHrzWl5WCXfqBwKoTK5kZQMnOavDzd9zyZKxCql5lVegYZ+laUaLCHlbqelVsrEFUfeJxVkxm4lGeFX0p1pXtfRBES3jlk+eTGM8VaZ+Vx34pqylyqRAHBGaHIWZgeqj+tccm5S1RqkktDpPAr7vFEy/wB2zf8A9DSvSK858DgDxKxHeyf/ANDjr0auyjb2asfQ5d/B+bCuB+JZI/svHpN/7JXfVwfxI+/pX/bb/wBkrR7DzL/dpfL80efMgiwF+6FzUcwYDcfugVcCB1k9cYptzEfKK44rONX3kmfL8vUowx5iOepPFPVScBuvenwLlR69qtxx7xgjB71pUq8rYkrmc0eGz2rOl+WZ8etb7wbTj8qxLhQszZ9a6cNUUma0tJWZAM8+9LwFBpsZJPzDHpStllHsa7HudLWthcbVA9DXo/wg/wBbrf8Au2//ALVrzdl3flivSfhD/rtb9lt//alZ1v4b/rqduA/jI9Qooorzz3AooooAK84+Kw3f2SPab/2nXo9ecfFU4bSPpN/7Tq6fxI4sx/3aXy/NHnCqOfWphjaoHWolIIz6GpkXg+vUVvNny0iUDoD1IpwRevpShgQM/eNSiMZBz061yylYmxEB39f5U5I/Oxn7ucin4A59f5U+JTKRj7nY1nKdlcaRmXiD7Q474ptgv/EwtvUOKm1JfLmOOpxUVix/tO3x/eFdibdBvy/Q3hexrajzdSL2BzWUr4J9K07ne13MQMg8j61muMHI71z4VWpqPkjKesmKGB6dcV6P8MPual/uxfzkrzPcN2O+M16X8Lv9VqP+7F/OStqisjsy5WxUfn+TPQqKKKxPqTyf4njPiW2/681/9DevP3DfP616H8S03eJrc9vsa/8Aob15/KNsjCuyg9LHzOIf+0zRb0hv3kuf7lei/CH/AJBev/8AYV/9t4K850tQJGx/dFej/CIY0zX/APsK/wDtvBWFb+JL5HRlv+8y9P8AI9EooorI94K5rxqJjpdpzeDT/taf2ibIuJfs+1s42fPjfs3bedu6ulrA8XXOn2ukxPqXiKfQYTOAtzDKkZdtrfJl1YYIyemfloA5r4eajdap/YzRm+a3s9EW3vnuEkVGucx7QN+NzKBJkjP3hzXolefeENQiuvF80Wm+KtQ8Qab9hZneZo2jgl3qANyIoLMN2BngK2c5GO11Ww/tTSbuw+0zWwuYmiM0BAdARjKk9DQBzS/EK0/tLV0ktHXTNPsXvVvhJn7QqMVbYmOm5WAOecccYJs6f4ruv7Qjs9d0kaU1xaPd27C5EwZE271f5RtdQynAyOuDxXL3/wAOdXutRvrUa1dSafLoTWMTyxW6IGy2yMrGikKuVbIA6YyRxWz/AGZrXijV7W41jSzpUFpYXFu2Z0lM0swVSU2E4QBT97BORwMUAWtJ8ZXN9daV9t0ZrKx1hWOn3BuA7MdhkUSJtGwsgLDBbpg4NMtfHE1xNZ3T6QY9Dvrz7HbX32gF2csVVmi2/KjMMA7ieRkDNUtL0nX7uTwvYanpi2lvoBEkt0J0dbl0haFPLAO4A7yx3AYxjmq1l4f11dN0fwvLpoSx0zUI7htS89CksMUpkjCoDvDnCA5AA5OTxQB6KSAMk4rwLxXg/EXxGRg/6VDz/wBu0Ne56jplhq9obXUrK3vLckMYriJZEJHQ4IxXgmvafaaZ458QWdhaw2trHcxbIYIwiLm3iJwBwOST+Na0fiODMv8Ad2dBZSM0ahRkEYJ9MVm60xeU56Dp71d0uTbBg9STiqmtMueBzivJw8eXGPQ8GTvSWpkWi7byHnPzr/OvR4lzAD7V5zbf8fkH/XRf516NC+I9hHBGQa4+Jrv2dvM9DLGnKVznrlD57t6sVq54RyfGWnHHH7zJ9P3bVHflVaTaMkEtgVY8ID/ir7M9iXx/37auvAz5opnJGNsTFf3l+Z6vRRRXpH1AVzvjIA6RFu6CcH/x1q6Kuc8aKX0eFQcf6Qv/AKC1YYn+DIxxP8KR51NHJNNhc7OORVlLIKuQf0ommSCI7QCfaoPtziMMVYZ4x6V5H76pFcmiPnf3cW+bVl/wmCPHFoPRZB/44a9XryTwhJv8c2voVcj/AL4Net17dNNQSZ6uVfwZer/JBRRRVnpmN4sAPhq7B6fJ/wChrXl3zoB8uT3r1HxUN3hu7Hrs/wDQ1rzMSZTcyEE9jXBim1P5f5niZkv3q9P1K4jdyXcFQvb1qi77GLOMMxwoNarM0wCgFR1NULlULhnAAU8Zp4ed3aSPJqRVroqxLtcSSHk44PatIAlRGgye+KoCNpZwxOEz0NbVuiwo0jYyRmnjKiik932CjHmdh0NukCgnG76VVmZWeTpnOKlLyNJk52g1SunAR5F6g84+tclCnJzvJ6s1qSXLZI6rwJn/AISeQHtZyf8AocdekV5p4Cbd4pm9rN//AENK9Lr1aatBHuZb/A+bCuB+JR2nSz/11/8AZK76uE+JC7hpp9PN/wDZKqTstSsx/wB2l8vzRwShgQ4JzjlfrUoJJAYdsmpI0yQR3qGckSbQCM8Zrl5ueVj5m1lcIYvM3N93Y3bvVxTG0WVIyeOKrRSYzj7vekCkcI2BnIxUVIub1diotJaEjDcB/sn865694uGzXQxA52sfx9axdRjAkc+5rrwMkqjiOLtJNmei859qEbHynvQDgAUwHLA16+502ve488PXpXwi/wBfrf8Au2//ALUrzY8Dca9I+EJzPrZH923/APalZVv4bOvAfxkeoUUUVwHuhRRRQAV5t8WP+YR7ib/2nXpNec/FRctpHsJv/adXT0kjizB2w0vl+aPNk4X+dWo04zn8KrpyTxwTiriKcbs9O1a1XY+WluSxqCF45qbyhkHd06inQqCgOOTUjRHIIbgdfevOnU96xSjoR7ArZ7HipEjO3KjCjpipFQbucEdBS7txEScD1FYSqN7FqJhXxMlw2exx+lM01f8AiZwZ/vVNqCbLpx6YNR6cf+Jnbn/br173w7t2/QcH0Ni5yLuUBcjJ/wDrVmToc5A/CtedSLyTvliapTIWywGPauDDVLJeiM6i1ZnFTnkY4616T8Lv9XqP0i/nJXn2OcFa9D+GQwupD/Zi/nJXZOV1Y68tf+0x+f5M9AooorM+pPMPiMm7xHAxPS0X/wBDevO7j/WE+9eh/EkH+3YGBx/oq/8Aob153LxI2ea6MPu2fL4j/eplnT+smP7tej/CP/kGa/8A9hX/ANt4K8403O5h7Yr0j4SjGneIB/1Ff/beCorfGzpyz/eZen+R6HRRRWR74VzPjbUNQsbLTY9N1GHTp7y/S2NzPCJI0Uq5OQSOu0AepIHeumrnfGM7jTrTTo7eymbVLtbMfbovNgTKs5ZkyN3CYAyMkjmgDn4z4sbxX/Yg8ZWkhNmbrfHpiEx4dV2uN/Gd2Qc87W9K9CrgfAd1bQDRbWy0vTLNdS0UajciytxERKGjHOD9072xnn5TzXfUAFFFFABRRRQAV4H4r/5KH4k/6+of/SaGvfK8B8WNj4jeI8f8/UP/AKTw1rS+I4MyV8O/katoiCNSw+7zWZrDbJc+oNakMZYr6YGax9ZyHBf+9xXm4O0sTueA17qVipbNuuI+P4x/OvRIQTZrn7wrz21GLiIDu4/nXfwvutVx/ntXDxIm/Z2O7LGlKXoZ00SGaRyMnkVY8KLt8Wad6HzMf9+2qJ0DCX1Yn+dW/Da48WWGOitIv/kNqWAn76jcXL++g/Nfmem0UUV7h9CFc143dk0SIr1+0D/0Fq6Wub8bOqaNCW6faF/9BascR/Cehhiv4Mjghbrje+OTmo5vKIx6e9NkM0zqq42d6ilsnQbh1+teXBK655anzcnp7qLvg3/ke7MDoqSD/wAcNev1494KJHjq1U9Qj/8AoBr2GvatZI9fKf4D9f0QUUUUHqGN4qz/AMI3d46/J/6GteabllwxBx2Br0zxSceHLo/7n/oa1555SyMP7owR9a8rHTUaib7Hj5hFyqq3b9WRqN5CqCM9ap3duss4THygZNaxKQjC9W6Vl3rmKRQOr8Vy4WpKVT3dDgrwUY6lUfPOkSdB1rQMbSysufkAAxVOMpbDP8RNXZZWVQkfUjnNdNdycko/15mNJKzuMuZ1XEaVmElzMO1XmthCgb+In1qg27e2cYJOfzrfDKCXumdVyb1Ou+H+D4iZu5snz/33HXpdeZ/D8j/hJJFHazf/ANDSvTK7YfCj6HLP93+bCuJ+IKhjp+fSX/2Su2rivH/3tO+kv/slRW+BmmP/AN3l8vzRxELqEUkdDkVVuHLENnJ5qTIllZfciqjgLnb0AxWNKC5r9T5iUnaxJAwVQCPlfNTGMx/dIwKr2+DGIz+FTM0iDnGBVzXvOxK2HedlhkHgisq/ctcyDturQ8zeSE6jBOfesi+cm6P1wa6MJC0/kXH3nYqrw7+xwKCmBn8aT1Jp5yQteodOw3OW2mvSPg+AJNax022//tWvN/vPnv3r0n4QkNLrRHQrb/8AtWs638Nnbgf4yPUKKKK889wKKKKACvOfirwNKPtN/wC069Grzr4qfd0sn+7N/OOrh8SOLMf92l8vzR55Go2KccHAqxFuY5zwOtMg+5tPfmnoWyCMY6GibvdHynU0YQNmR3pxRiRyMdWqFMYyPwp+XBHTB615sou90bJ6D24Y5PGMCiIFTlajbJkXd93gDHrVnzFij8tc8ColdKy1uUtXcwdSlIun684qPTQTqcIP97NO1Bj9pY9s/wBKbpZP9pwk92r2bWwzt2/QcO50M2VlkJ7MahkTeNy8A9amBEskqnqWLCoQWGcfjXi07peaCVmU3GD8wzmu9+GuP+JlgYAWIfrJXEsT/F0ruPhwQW1LH92H+cld0JXN8uX+1R+f5M7yiiitT6g8x+IoLeIoASNv2RePfe9ed3CgSsPevRPiPGra9bseq2y4/wC+nrzq4x57fWt8O/eZ8vif96mWLHIaTHXGa9J+Epzp/iA/9RX/ANt4K81sCd0n+5XpHwj/AOQbr/8A2Ff/AG3gqa3xs6cs/wB5l6f5HolFFFZHvhXIePdRt7fToopbNtSt4p45NQsYoRK/2YrJhyD9wBk3BuOUIyK6+uK8VeGdf1S+1Z9JuNNS31TSl06f7WJN6YM3zLt46Td/SgCTwWmhWk01rovhnUtJDxh3lurNow4XAC72JJxngemcV2NYeip4mjn2aydINqsWE+xiTfvyMZ3cYxn9K3KACiiigAooooAK8B8Vgf8ACxfEpP8Az8w/+k8Ne/V4F4rBPxF8SD1uof8A0nhrWl8RwZl/u7+RswhmKhRxgA1l60gJA9zW3ZjbFnHJrN1yNVlbn+H8sV4eEq/7XY8SpC1JSMa2OJ4s9nX+ddr5jLaBUGSf8a4mBgbmMHj5h/Ou1jjMsGUJz7VeeJXg5F4K/vJFZHKCUnqC3H41e8OS/wDFW6YjcMTJx/2zaqEqYkkizyRVvw0N3jPTzjoZP/RbVjgoxdVS/rY1i2qsF5r8z1OiiivaPogrm/GsYl0eFT/z8Kf/AB1q6Sua8cFxosOwEt9oXp/utWOIv7KVjDFW9jK5wss8cICjGaqvcXDOQYxsPQ1fjtFChpOT15FTlYNuPl4rwFXpU9FHm8zw3SnPd2Kfgsf8VvaEjnbJ/wCgGvX68r8LKieOLUDGSJP/AEA16pX0VOfPFSPQypWoyXm/yQUUUVZ6Zj+KSB4cuienyf8Aoa15z5OSMMeOa9G8UgHw5dA9Pk/9DWvOY4HjyNxbJzXk492mtbaf5nkY9XqrTp+rHKiwKcsSccZrLvJWRyWAz2rWEBXLOxOB0NYF9JJJNl0K9cD8ajAJVKjd7nmYn3YpbCW/z3Ikcnk9K2ogsaGSQ8kZrEtYpGnRmyq5HFbHlPNK2SQoIA963x1uZJvQzw997akLCS5bkYUdxWXPuEjjHRq6B5I4gUXG7isSXa07EnADU8FUbb0sugq8Ura6nU/DsY8SSn1s3/8AQ0r0+vMPh2+7xNMMYxaP+W9K9Pr0Veyue9lf+7r1YVxHxC66b9Jf/ZK7euF+Izbf7NP/AF1/9krOqrxaNMw0w0vl+aOFTCMxz1ORVS5UggDoSatE/vAp+7jrVadsgHHSlSvzXPlmFu2UI79qlMzADeAOefpUUK/IxHXPFS7wR8649c+lOaXM9BIaWVs7fxrKvBm4J7CtRwpxtbHI6d6zLj/XMSa6cN8RdN+8VCCUBI5/+vShidxHPpQSSSAO2KVFwK7zre2oA/N+HNek/CLHn64B2FuP/RleaOCirjk969L+EX+v1z6W/wD7UrOt/Df9dTswH8ZP1PT6KKK8890KKKKACvO/ikQP7Kz0xN/7Tr0SvNviznZpWDjib/2nV01eSRxZgr4aS9PzR58r8HHUdKuQEkjA+U9azUPyKR61p2xxtG3gjrVYhWifLNWZYEYZhgnmrKQHOecHrTYouQc57gVaWN8jrhh+VeRVq20TNoQuRCALGFOeOAaGKom0ct71IYztAJPHGfWkZVQAcM1Yqd3q7mjjbY56+YGdlHUHmmacD/akPpuFTXyj7TJgc8GodPU/2nAc/wAY4r3k19Xfp+hlA6IMFmkx1zUEgy7MvJ9KlZdsshHOTkUhTcQRwfSvFi0nct66EBWTuvGK7f4cgh9TyMfLD/OSuQwy4+XINdj8PCTLqeVx8sPH4yV00J80jpwCtiY/P8mdzRRRXWfSnmPxG51+DJ/5dV/9DevObnmZiO1ej/EUD/hIIST/AMui/wDob153OB5jY71vhn7zPl8Q7YqZNYDcZPpXo/wiJOl6/nr/AGr/AO28FecadwZM/wByvRvhCc6Xr/8A2Ff/AG3gpVvjfyOnLP8AeZen+R6LRRRWJ74UUUUAFMlV3hdY5PLdlIV8Z2nscHrT6iuElktZUgm8mZkISXbu2MRwcd8HnFAHmt74x1Dw/faoqapdatHZaZcXMqajZLbMsqFQnl4RDIhJOSAwHB3cjOx/aeteF9XtbfWNUOqwXdhcXDZgSIwywhWITYBlCGP3skYHJzVi68F3OuTF/Euqx30a2s9tFFa2n2dVEqhXY5dyWwOOQB6VPp/hS6/tCO813VhqrW9o9pbqLYQhUfbvZ/mO52CqMjA64HNAGTpera/aSeF7/U9TW7t9fIjltRAiLau8LTJ5ZA3EDYVO4nOc8dKr2XiDXW07R/FEuoh7HU9Rjt203yECRQyymOMq4G8uCUJySDyMCtnSfBtzY3WlC91lr2x0dWGn25twjKdhjUyPuO8qhKjAXrk5NMtfA81vLZ2r6uZNDsbz7ZbWP2cB1cMWVWl3fMiscgbQeBknFAHS6jqMGl2hubhLl4wQuLa2knfn/ZjUt+OK8I1i9i1Lxzr91Ak6xyXcW1Z4HhcYt4hyjgMOnce9fQVeEeJP+SjeJD2F1D/6TQ1cHZnn5p/u7+R0NmFGN2MBQR+VYmtnExLHgitK1YyouDwODWRrbCWUhq8HA02sXdnkV5p0UjLt13XKHvuH867uzYRQAVwtt/x8xkf3x/OuyBOzcp4GK2z2HtOWLHgZ8knIWZQZHfbk7mH4Zq14cCjxdYgLyN/P/bNqhhcMzg/5zU/hrnxVYH3kP/kNq4sA37ZRfQ6Wl7SEl1a/M9Looor6I90K57xi6ppMJbp9oH/oLV0Nc341jEmk2ynp9pX/ANBasMSk6Mr7WMcQ2qTscHLNLK+F3AZqJopwSfM49KszXEVuD7VQl1RQBw3ze1eRQjUkl7OGh89UcU/elqaPhJ9/jqzz1CSZ/wC+DXrVeQ+DDu8c2reqyH/xw169XuQVopHp5S70X6v9Aoooqj1DJ8TDd4fuR6lP/Q1rzppmhKqQWJJ5Feh+KSw8OXRXr8mP++1rzVrxGJcg+leTjqbnVWl1b/M8nHzUai1s7fqWdzSvycKOoPes+4gW4vieNiDGKc9+GUqmQSO4qsJJTCQhHmMeTWdChUg+bbp/wTy6tWMtNx00gRwsan5T2qxPerDFhOW74NRlEijJP3j6Vmxuu9nf1z+NdMKMKurW34mLnKG3UvRq+4yyPnI71mMxdnIOPmP86nkvGk+VexFQqAGY+prspQlC7kZSaeiOy+HqbfE8h9bFh/4+lenV5l8PGDeIXI/585P/AEOOvTa0jeyufSZX/u69WFcL8Rhn+zfYSn/0Cu6rhviHzLpg7ETf+yVNT4WaZh/u0vl+aOBmPyBh24qGYY+hGKn24wp6YNRlS8ZY9jxUQdj5ZjYVKxhs5x2qf926glcE8c1Xt2OAw+73FWGRGBxRU0lqCIWVV6f59KybkkzP9cVqy4UDHUnFZdyMSt9a68LuXT+IgUAnd70jZDPj1pw689N1I/33XvXd1OhbiHnFek/CIYn1v/dt/wD2pXm4GDk16N8H23Ta57C3H/oys6v8N/11O7L/AOMeo0UUVwHuhRRRQAV5x8VuRpQ9pv8A2nXo9ecfFX7+j/Sf/wBp1dP4kcWYf7tL5fmjzVcqg4zzitKzmGxFI696ggiLDPbOacyiHcF9OKuq4z90+Xcrs2I2CsCOgp4umGBg/MevpWNHdsuFJ6VOt7uOOa86eDd9VctVWjVZzIeDjA/Wnx7AMuQTWYl2xbA6jk1LuyCWNc8sM1psWqq3M+94vXI6VBZnGqQ+hcU+ZyJiTTLU/wDEzg/3s17EVai15foZQd5HRuv7xsDvwaaw3HIO09xViJhLvXupxUDKGfcOGI7189CWtn0OuUdLoblgOTmuw+Hx3TamcY+WH+clccRIDnIx3rsfh9nztTz/AHYf5yV24Ze+bYH/AHiPz/JncUUUV3n0Z5l8RQDr8JJ/5dF4/wCBvXnUxxIwr0b4jAf25CT1+yr/AOhPXnM2BNxW2G+Jny2J/wB6mWNP5aT6V6N8I8f2Zr+On9q/+28Fed6cPmlx/cr0P4Q/8grXv+wp/wC28FKr8b+R05X/ALxL0/yPRaKKKyPoAooooAKKKKACiiigAooooAK8J8SDPxC8Ten2qH/0mhr3avDPEGP+FieJgf8An6h/9Joaadjzs0/3d+qLltIwUBf4qztZKmQ49KtW8ywttY8npmqWpp++5Jrgw0LYi54EpXppFG2/18fpuH866y3ciJwa5S3wJ4+ejj+ddHFxK756jGKrNIqVkVQlaVy9GwUID1P860vD6hfFFif7zSEf9+2rDjcOAQeUbca2PDUofxHpgP3j5mP+/bV5eHpONeL8zvpTTlFea/NHpNFFFfQHvhXN+NmK6NCR1+0DH/fLV0lc541YJosTN0E4P/jrVjiP4UjDE/wZHmkltufdITk9eaRoo8VFcSyzP8i5X1FV5YJgufm/OsacJNLmlY+Tk1fRG14HIPji1/3ZAPwQ17FXjXgTJ8bWZPGEk/8AQDXstd0lZnv5R/Afr+iCiiipPUMXxY2zwzdt6GP/ANDWvJWnjkcqCeBXrHjAZ8K3n/bP/wBDWvLBFGxBBrmq8qldngZrd1kl2/VkDNuIVOtTKwgBH8RqU+XHwMFj0pgAQbpOOeM1i5qStbQ83lswSJ3G+XjHTFZ8tqZZS5zgcjBq400s8hVV/d8EMKZcSZBSIbiBitKTnGXr+BMrdDNVvLZ8fhVhOFXP1pxsiih2zkjkVHGSWYkcV2uUZq8TNqx2Xw8BXxRMD0+xPj/vtK9PrzH4esG8SOR0+wv/AOhx16dULZXPpsr/AN3XqwrhfiKdraYR6S/+yV3VcR8Qvv6Z9Jf/AGSpqO0GaZh/u0vl+aOAkbDHPYU1gVtzu69adcIxIZRncf0p0u18c8MP5Vinoj5e25FbAqACPlPWp3gG3K5otwoQRk8npTjblEwGY85qJ1Pf3sUo6FaSPC57HhvoKybpSJ3z0rcKHr6jkVj3g/ePn1rtwk7yCOkionX8aHP7wkfjSNxx2pGGXB9a9Lrc6EtbiA5Un3r0r4RDE+t/7tv/AO1K82xnIHc16R8IRibWxnPy2/8A7UqK38NnfgP4x6hRRRXnnuBRRRQAV5x8VF3vo6+on/8Aadej1518Uc+bo+Bk4n/9p1cNJHFmP+7S+X5o4qJ0ijVD1NROiCTyznLDNSeUpUfNy+B9KkCAqN3D4xWHMk7nyxSFoQTjp9aQQurcjg1bUOr4xwO9OG4tgrx2q/bSEVo0kU9BT+WJ3damKscccjk1H5Hy7mJBPalzp6sLFNidwP8ADS2Z/wCJhH/vU58hiMfKO9Nt+L6DHdxXQ3eD9CobnQ2jZd29+KkKpK+ec4qrE5R3x1BOBVtdr98EdhXz1WPLLmOuDurDDER0rrvAAK3Gphuu2H+clcrsKgYJNdZ4DP8ApOpg9dkP85K2wcm6h14ONsRH5/kztKKKK9U9881+Iag69CT1+yqB/wB9PXnNwoWRsetemePSg1uHdjP2Zcf99PXnFyuJZB/tGqw0/wB5JHy2M0xMn5j9Pz+8x1216J8I+NN1/wD7Cv8A7bwV57pfJmz2SvRvhQALHxCB0/tX/wBtoKdV/vGjqytf7RL0/wAj0CiiioPfCiiigAooooAKKKKACiiigArwrxF/yUPxN/19Q/8ApNDXuteD+JGP/CxvEijvdQ/+k0NVFXPOzT/d38i7HGmQz4yAOTWTqTMZBknPJrQAeRuGwoxms7UXDS5x0BFcuFi1V1Pnm9CpASZU4x84/mK6aKUA4x3xXMxOBNF/vCtkyY3EdjWuOp87SGpcruX7WRQshYDrWx4aAHivT8c8yY9v3bVg2/zofXArX8JuW8V6eDnjfn/v21edTp/v7o6sPP34LzX5nqlFFFeofThXNeNxu0SIdjOAf++Wrpa5rxxn+wkwcHzhj/vlqxr/AMNnPiv4MvQ82lmhiOBtqq98jKRx+dOa1BbLkGopLWFQSAPzqKcKS3u2fKOUjY8CEN40tSD1SQ/+OGvY68X8A/L43tlHQpJj2whr2iuuasz6DKVag/X9EFFFFSemYni7jwvef8A/9DWvL/JzgBsYr1Dxdj/hF7zPT5P/AENa8qUTDHz/AFrkrp82jPBzT+MvT9WTFFjG5iCR0zUXNwct8oWhY3I/euGGajkdnOIjgdDWUY6769zzGx5mRPljAJHYU8JHEhZsbjzzTEijiyzY3d6iSOSZ2Z2yhOQD2p8sXs7L8wuxlxcmQgKOKr7tr7ccVbuY444sLgHIrPeT591dlBKUfdWhlK99Ttfh4R/wlEqgYAsnH/j6V6hXlnw3bd4nn/68m/8AQ0r1OtLWSR9Llf8Au69WFcX4+XcdP9QJcf8AjldpXF+PWxLpo7lZf/ZKwxF/Zuxtjv8Ad5fL80cGMuMkY2g8VHMuAoxg9RU5GLgejDpSzKHk46bSBXLGdpI+ZcdBkMYdGOcN29qeRIigctgdadbQl4NwOGzT5C6KQQTjngVnKpebS1LUfduQMhIVuRnHFY1+uJpB710IQsqt0DgYHoayNTRVml+tdWCq/vLETjazMU4Oc9zTScDGenenHAbmmHb8wPrXuo3iSgBcV6N8IhibW/8Adt//AGrXmhY4H1r0r4QHMutn/Zt//atZ1l+7f9dTtwEWqyZ6hRRRXAe6FFFFABXnXxSYJJpDH0n/APadei15v8VgC+jg9MT/APtOrpq8jizH/dpfL80cTG289eeoFTswcbgcH0qjbbjucfwn9KnDbsFTjnpWVSHvHyuxIjtuAIP1qYSZJG3p0qBGYHBBJ9ak80EkbTxWU467DTFMhKghcHqajAaQBiSvtSGXK5AIPeo3eRwCrFRVRg/QGyOR8SMu35cdaZZ4+2RE9m4oaUFypFNtGH2tP97NdfL+7foETVDkSv67jgVbDg/xbWrPU5kJB5BzVkMpbPQ968qrT2NISLOWBB3k4HSuw8AHM2pk9SsP85K4h2OPlYV2nw7zu1HccnbFz+MlGHhadzvwMr4mK9fyZ3NFFFd59Gec+Ptv9uw5xn7KuP8Avp687vTiVsdya9A+ITKuuwkj5vsq/wDob155dOGkY+nSqw0f3jZ8tjNcTL1JtLYFp/8Adx/OvRvhRzY+IP8AsK/+20Fea6aDiQj0BNej/CIk6Zr+f+gr/wC28FVWj+8bOrLP95kvL/I9EooorM98KKKKACorm3iu7Wa2mUtFMjRuAxUlSMHkcjj0qWoby0hv7KezuFLQTxtFIqsVJVhggEEEcHqDmgDhdDsbKPxLeal4VsY7bSLSzlt5WgG2K+udykbVHDbNrAv3LkZODWTo1la2GleA9esiTq+qzxLfXG4l7sSwO8ok/vYYZGfu7eMV3ejeEtJ0B4zpwvo1jj8tIpNRuJY1X0CO5UdPTiiw8I6Fpmp/2haWAjuFLlMyuyRF/vGNCSqZ77QM0Aefw2sK+FrDxYAf+Ejl1xEe43HzH3XnktAf9gRkrt6DbnrRq9rDP4d8YeJ5QTrunajMtnc7jvg8oqIo09FYYyv8W85zmvQF8I6Emsf2qtiBd+aZx+9fyxKRgyCPOwP/ALWM+9F14R0K81b+07ixD3JdJG/euEkdMbWeMHYzDAwSCRgelAGhqMmoRWZbTLW2uLnIxHc3DQpjv8yo5/SvBtalvG8c+IH1CCCC7N1FvjgmMqL/AKPFjDFVJ4x2H9a+hK+ffF8mz4keJB/08w/+k8Na0VeVjhzGPNQaRfQttUJ361R1RQJcDtU9rdKm5mPGBis2+uMs7HqSaww9KSrHzUdbJbkETfOv++K1BKTvUHk9KwUnBdAPUVeE5De2a761LmNK1KSZuwzBRgdhW34Tl3eMrIdv3n/otq44XWT1+Wum8COX8X2ZPbfj/v21ef8AVuSXOx4aT9tBPuvzPYqKKKs+uCuZ8ckLoSMeizA/+OtXTVy/j3nw5/20/wDZGrKsrwaOfFfwZeh5NPeSO/ynj6VA0kuOSKc8iJ1phnVl4/lXXCCSVonyGr1N74etnxraZ67JP/QTXtdeI/Dts+O7YDpskP8A46a9upVlaZ9NlitRfr+iCiiisj0TD8YKG8K3qnvs/wDQ1ryRbwqOf5V654tGfC95/wAA/wDQ1rxI3CknJqVTVSTTX9anz2bX9tG3b9WXzcSTSAKfl70jzx224jOT6VSF3tB2/wAqiEmCS3TOa0WG6NaHl+9uXg8k7fMRtNTTX4iXamc9OlZovMqNn8qVCAMmh4dN+8tF0D3o7kztJJ8xIqq0oJIqSS4BQqOhFZ2dspPfFdNKnpqXTp817nonwyIPiGXH/Pm//oaV6tXk3wtbdr8x/wCnNx/4+les1z1FaVj6LLFahbzYVwnxGYpJpRHX99/7JXd1558UZDGNKI/6a/8AslRy83ulZj/u0vl+aOPhm3SF2/hYqKeZNgI/AVlGcFTz1OakN1uDEnnispYV3PmFNmxDJIsKPkbQCTVoXaSBTg8+1ZFreKluqE+varKzRt0J5GK8+thvefMjohVsrJl532vn+E9K5zVZM3Dj1OKvyXQjOzPyjPNYd9PmaQ/XFduX4ZwnzMUp+0aSKkrDd9DURbhifWoHuAfu9jzUHnHefqQK+gUD0qeGlYul88d69P8Ag+wabXMelv8A+1K8jE3f6CvWPgydx1s+ot//AGpWWIjamzqw9HkqpnqtFFFeYekFFFFABXm3xYID6PnuJ/8A2nXpNeX/ABgOP7F/7b/+061oq80jlxseahJf1ucRDNsj4/GlViHyvTOTWfHMFTr1qVJij+1bSo6ux8tKk0zUjuVxg5zTjMh9eKzTOhPXk0eYMdelYvDK9yeWRbadFBIzzUTzsw+Sq3mrk80gmHatVRS6DVNkjSLuINR2zYu1H+1VWRx5nWkglIukJ9TW/s/dZ0xoNRb8jdjbErkdQanMiv1zWYk2GZu+Sae0obPNcMqF2clmi6XI6Hmu/wDhsSU1Anrti/nJXl28jgHmvTfheSbe/J67Yv5yVM6XJZndlqf1mPz/ACZ6BRRRSPqDzD4jlV8QQMev2RR/4+9eb3bgSsR616D8T22+ILbPT7Iv/ob15reSBScng812YWGtz5ycHLFz9TT0eTJmH+zXpHwhOdL18/8AUV/9t4K8q0qYqZfoM/rXqXwcOdG13/sKn/0nhqMRC02/Q6cDDlxc15f5HpFFFFcx7QUUUUAFFFFABRRRQAUUUUAFfPni+Pf8R/Eh/wCniH/0nir6DrwTxQufiP4k/wCvmH/0nhrWi7SucOYycaDaK1vHhCGPT1qjeowGNvNdAlkGIPQd+KoamiiRduMYNRQxKlVsj5qN4PmZz6xHzkwO4rQ8ksxHI7UiIPOT3YfzrSEO1vu8kmuytWtY2rVnK1ijHCxPQ4rr/A0YTxVZepMn/otqw44gIuRzjOfSum8IRBfFGnkHlfMz/wB+2rgq4jmkoiw13Xg/Nfmeq0UUUz64K5rxyhk0AIOplA/8daulrn/GBA0iPIz++H/oLVhiZONKTRjiFelJHj8tgEY7mPPqKb9nXbgdPpWm1pPLICUbGfSlfTJcZGR7YpLGRVlKWp8k6U3siTwBD5fjq156Ryf+gmvaq8i8FQGLxrakj+CQf+OmvXa6ZT53c+gypt0Hfv8AogooopHpmN4rGfDN5/wD/wBDWvD/ACmHVK9x8U/8i5d/8A/9DFeXeRA2CQtYSxHsajut0v1PBzWDlVVu36nPJDJIP9XjNSm12D5jW48caRny0BOOgrOkt5bg948diK0p4v2muyPKlFozSq79oAHNSx2DglmZgDWhHZxh13AEk0+WGZ/uK2PYVcsXraLsK0rGbJAAMCqLw/Oa6RdMcgMx/Ais64t1SUrgcHmqoYuMnZO41z09WdR8LV26/MP+nR//AENK9Zryz4brt8SzADA+xv8A+hpXqdObu7n0eWO+Hv5sK85+KwJj0rHrL/7JXo1ef/E6MyLpig4OJT/6BUxdncvMf92l8vzR5esb7mByO1SrE2cc1cMYCbsc4BpSpZt23GDj61q69z5ZybCCJkjDlcr1J9KsgIygq3B9KvWscZh2uAQfWnvZx7cJgDtivJqYyLm1I2jQbV0YlxAd3U46/Wse6jJd+Twa6iSHB2t26H1rCvI8SyezV6eDr8zsKneEzCeHL8Z5phgwefXNX2jG7IPSmmPdnjvXrKZ60cS7GaYDkYJ61618FgVXWQeoW3/9qV5uIgGxivTPg6MSa37rb/8AtWssRK9JnXQr+0qJHqdFFFeWegFFFFABXmXxeQyf2OB/03/9p16bXnHxVXe2kAdcTf8AtOtKTtNM5MdLlw8n6fmjzCOHnd2BxirLWxJ3Lk8VYt4RsO7v0z61OkLrxgkA1pOvZny8qsm7mcIWA5Xml8o46Vp+Ucj92efaniAE42Vm8ULmkzHMDdlJNKts/wDdNbBhxjEec9aQwuScRnH0pfWx80jnJ4WWTABpY7bNxGQTWnJCTMwKHA74qONP9MjQDgnrXWq94/I1WIlay7FiK03J9aje2dSQFNbdrbbkCZwQac9jJFn5S+ec4ryXj1Gbi2ZqlNrmRz5tmDZ5r0v4Yrtgvx7R/wA5K5L7MSRmI/lXbfD9PLfUVxjCxcfjJVrFKq1E7cvhJYmLfn+R21FFFan0p5Z8TYGm123wDxarz/wJ681vISWIx0OK9b8dxGTWocLkfZlH/jzV5zqVoY52XH6VphcSvaOm+h83iJOliZS6XKWjW+6WcH0/xr074Ort0jXh6aqf/SeGuA0eM+bOAP4ev516F8Ixt0zXwf8AoK/+28FVXqc1SS9DpwE3LFSl3X+R6JRRRWJ7YUUUUAFFFNkkWKNpHOEQFmPoBQA6isnSfE2ka5DdzWF0XjtG2TmWJ4vLO0NzvA42kHPTBpmj+K9F164a3068Msoj80K8Lx74843pvUb1z/EuRyOaANmisTT/ABdoWq6j9gsr8STtv8vMTqku04by3ICyY77ScUReLtCm1j+ykvwbrzWgH7pxG0qjJjEmNhcYOVBzweKANuvB/Exx8RPEp9LqH/0nhr3ivCPEgB+IviQHvdQ/+k8NXDc87NP93fqjRKybtqHHA/lWTrEJjdAPfNbnnLE/OfuisbVZS67z6mvLwMp+2Wmh4dVJLzMuBQZ4wR/GP51vRIsjt0yoNYlvj7VF/vCti3j2Tu4/jJzXfjfXoZJ66jkAdQo7nn6V0PhMBfFFmMclpP0jasaG2KkMnfr9K6Dw2oHiaxYdzIf/ABxq81VU60Yo68LB88W+6/M9Iooor1D6cKw/FKq+mRBunnD/ANBatysDxerNpEYXr5w/9BauXHK+Hmr20M6rtB6XOX3xLGcY/OqjXUfPGfpSQ2IGCev1qVdNhjQ4B5OTzXyK+r02+aTZ579vUWkUh/hkL/wltmyj7yuf/HDXpdebeGYwniy1H93zMfipr0mvrcE70tDXL1anL1/RBRRRXWd5meIVVtCuQ33Ttz/30K4QaZb9kHHTmu816MS6LcRt0baD/wB9CuR+zuCVGMV8tnmIlSrxUZW0/VmFSgqkruNyg9lBEMhRn61lXKO0xCAjI9K6M2Yc/N/Oqk4jgkI5rhweO962smcWKwfu3tyoxY9PmMgYsPatURxQLgAce9V3vV8zCg8deKsiyMwJk7+hrrxNacrOs7I5qFOKuqSuyobtXfYFI5xzWNeqgupMjqa6aawiyrYOR71y+rRt9qbGNtd2V1aVSpanpoc2Np1IR986j4egDxPMo7WTf+hpXp1eX/DoN/wkszN1Nm4/J0r1CveWiR6uV/7uvVhXA/EolW0vHcTD/wBArvq4X4irubTB7S/+yVMnZXZpmP8Au0vl+aOAQhuP9mpyvzcfdGOKjEYikXH8PBp8G5j/ADrCb6rY+YXY17a3ja3TIGR0p0ttIOVYAYpY428gMv3gKaXn2DO3NeA5Tc21Lr1PTtFQSaKzp8oWQZK9657UABcSjHeumd9ygn7w4Nc9f486T3Ne1lsnzu5xV7K1jI2gE8dTSAdR71M+AwP4UwYAP1r6BMpSuhnXjHavSfhEAs+tgf3bf/2rXnIGBnuRXo/wj5m1o/7Nv/7UqKr9xndgH+/R6dRRRXAe8FFFFABXnfxROH0k+03/ALTr0SvOviku5tJB6Ym/9p1UNzhzH/dpfL80cjaQJLDyOQcirKW7o2Oo68VDpyGWBNvXFbNu2zCyD5vavGxdeVOTS18jwqFJTtfQqiNQoJQ88VJ9nTIG3pWqiRsOh5pxgjx34rxJ5jrazPTjgbq90Y/lIvOzrRtyxUIfyrUaKNW6HmgPGmeDT+utq6TYvqtnZuxzV0qxySKR07/hWTCP9Oi9N1bWqFWknHqRj9KykUC7gA7GvqcHO9G76r9DxqqSqNI37VNx+U4OcitUdApBJ9ay7NH8s7MeYBxmthZgiEv2GTivmMwb59NT2MGly66ECkOA2xgCSOa6LwaoW71IgYykP85KyRNGwHB5Ga2vCUivfaiF7JD/ADetMoqSlircttGdsacYzi73/wCGZ1NFFFfVnacP4xfbq8QwTmBf/QmrzvVpg1w/B4r0Pxo+3VYx3Nuv/oTV5tfFp3cr2Yg1xYSC+tzkz5vMX+8t5k2hkM05/wBiu8+FH/Hl4hx/0Ff/AG2grgdGPlrPn+5j+dd78J/+PDxBj/oK/wDttBXVJf7RN+htlbXtml2f5o9Boooqz3gooooAKRmVFLMQFUZJPYUtFAHkp1nTfEQ+I+m6LqlpdX2ox/6HFBMrNOBZRqduDzyCv1rTOo2nirX9EHh2UObLTboXDICPs3mIipG/91twzt6jYTXo9FAHlmjX9pqdn4B0awBGp6VJG19AFIezWO2kjkEn93LMFGfvZzVezuYX8M6L4UUn/hIbbWo3mttp8yIJdGV5j/sFASG6HcB3r1uigCpqMeoS2hXTLm2trnIxJcwNMmO42h0P614ZqiXkXjfxCNRngnuluot8kEJiRv8AR4cYUsxHGP4j/Svfq8G8T8/ETxGAf+XuL/0mhqoq552af7u/VGysan52PVQf0rC1HiRh75/CujtoDPAjEkYUfyrJ1qEAllXPIFeNgK8ViORvU8avTfs1LoY8K5uIz/tCtmBW3tuGOeKyE5uI1/2x/OtqMMZsYxyf5V6WNenyOanqy5aSBflP0rT8NyMfF1ggHyYc5/4A1ZEXyqWHPIB9vWtfwymPFtkewD/+gNXl0IxWIud9FvmgvNfmem0UUV7J9IFYXiwsNJQqMnzRj/vlq3ay9dCmziD4x5o6/wC6a4sylyYScuyE4c65e5wYN1jPljNMd70If3Y/OtmWWBGxlc1UnuoAuNy5PQV8ZSxUptWpfmclXDRgneoV/CzO3i613jB2vn/vk16bXnHhtg3i23IAxh+f+AmvR6+1wTvS2t/wxGXq1OXq/wBAooorrO8oa0CdJmA65X/0IVyP+kBgAgx3rrtaJXSZiBkjb/6EK5YXIHBUZr4ziK/1qNlf3f1ZcVF7uxHtnI+ZcVnSW6/aWZic1qNcMRhUzWPepM0hGGXI6ivNwHM5tXUbnJjeVQTV5E4ESjoPyprXEhU+WoNVLe3cOu6Rjz3rZbyY1/hH4V0YhwoyS+NmFBSqxb+FGOzXjkeZGAvcg1z9/wDNK5Y/xYrrpbqFl2qykniuV1IZZ/lxl69zJ6rlN3hynl5hBRStLmOm+HxB8RyY/wCfN/8A0NK9Mry/4c/8jFLnr9kf/wBDSvUK+gSsrHpZX/u69WFcN8RCA2mDufNA/wDHK7muH+IYy+mDviXH/jlTU+FmmYf7tL5fmjip4iyy4H0ogGUxj5gSPxp0hZVCdSTg1JbgCQ5GBnP4mvOcmqZ88knI0FSQQrtGeKYTMBygq1Gsu1SqFlIzmnMGwcx14ftrSd0mep7K66mbKMrkdutc7fH97Ie4yK6yWNTnnB6kVzWpRATOR0Ix+Ne5ldVOVjzsVBxszHYf40AZXmnEHzVXHFNfKuwAzX0l+hknfQaOF5r0f4RjEutf7tv/AO1K87cZ3Y9K9F+EoxNrX+7b/wDtSoqv92zvy53rr5nptFFFcR9CFFFFABXnfxQAL6Sp7rP/AO069Erz34nDM2kewn/9p047nFmP+7S+X5o43TC6bHUZB610kEkbkE4yfauc08sij5cqe/pXT2XlmJScZPSvm85aTcmvuPKy5NuyZZVEI6nmjyVx948Gn+Wp6N1pRCMD5zxXzDqdbnvKn0sRGNAxJJ5pknlqp5q15S92qOaOMKckU4Vk5JNsJ0mot2RyWpBfObk9f6VnWoH2qM571tX0SPI4yM5rM8jy7qIA195hKqdHl8v0Pkq0Wqjfmb9rHzkdQa01jk3fdG3FV7CEOM5wavh2QlduQO9fH42u3Uaj0PosJRSgmyPBHUCtnwkMX+pHHBSH/wBnrK3k/wAFa/hQk3+o5XGEhx+b11ZHJvF/JnRKKUo27/ozqKKKK+zNzh/Giu2px7VB/wBHH/oTV55elo95KgZavS/Fis2oJtXP7gfzavP9TtnfjaRk5ry8PWX1ucX3Pnszp+9zFLR/me6z2TP867v4TjFh4gH/AFFf/baCuK0aEpJcg/xJj+ddx8K/+PTxD/2FR/6TQV6DkniJpeX5F5Wl7W67P80d/RRRWh7wUUUUAFFFFABRRRQAUUUUAFeFeIE8z4l+Il9bqL/0mhr3WvDtawPib4iPf7VF/wCk0NF7RbXY8/M/93+aOltbV/s0Ow4+XBwKo6nprs2M8dTxW3pcwjtRv5yKg1O5XOQD6V8HQxVeGLaj3ZVbDUXhVJvXQ4h7NobmMn++O3vWkrkTv8p4ORTLubfdxrtOMg/rTjMq3Dj0r62c51IrmWtj51Wi9H1LE8ywWqOse7e2CB2zWx4Ubf4ntT1ALnP/AABqwMs64zwDn8K2/Bu7/hJoB/CNwH/fDVjSpqLXe/6o6qEnKtD1R6hRRRXqn0wVjeJUeTTo1RtrGYc/8BatmsXxNN5GmxyYJxMOn+61cWY831WfLvYUuW3vbHNLYEkF3DH6U5tKicZIXI56VD/aDN0DflSDVHR9pVz+FfCcmMvoyefBrdE+g2v2fxPb++//ANBNd7XC6Jc+f4mthtYHDnJH+ya7qvtMnc3hr1N7/ojLDqCUuTa4UUUV6h0GfrZC6RMT0BX/ANCFcg0kZcnArrteAOjTg9Pl/wDQhXHBIj/+uvkOIIx+sxbv8P6slylsiVJUB6VWvpCzEKpzjtUwWIc8fnTJHRSCBk9OK8SilGopJNk1XKVPlbsUIoJ2niO8hQeRjrV9bKQrh3z+FMafGAFbP0pf7QLDKq4/CumtPEVLOKSOelGhTupO4p06MDIC5HtXO6lbEM3PRq31uZWz1x9KxL9pJLmRecDBHHtXo5U68az55HHmHsXTTgrGr8PkKeJpc97N/wD0NK9NrzXwFuHiWUN1+xyf+hpXpVfXQd4pnRlith/mwrivH4zJpn0l/wDZK7WuH+IbFW0zHpL/AOyVNZXg0jTHu2Hl8vzRxk+WDMp6Gp4UyQe2RVNHIAHqasb5Am2NsHgCvOqQfLyo+dhJXuzprYhIgrCpGMXcCkttjQRiT72MHmntBCwIIGD718NUklUblc+uhGTprlsRPbROC6qM9DXMarp7h2APVy3TtXVEiHdj7voKxdSuVa4KAHIwf1r1sor1o1vd1R5uZUqTp3ejORmi8pxkd6ryKd7n3xWtcJ5szegYYqhcJiRgPU197Qq8yV9z5xOzKyjBA6nvXonwm/4+Nb/3bf8A9qV5+q4Y56mvQPhOMXOtj/Zt/wD2pW03eLPRy13xC9D0yiiiuQ+kCiiigArgPiUu6XSvpN/7Trv64X4hpun0v2WY/wDoFTKXLFs4sxV8NL5fmjk7WExwAbCQRWxbKqwqMgHtTbBVa0iDD+EGrDQpkEYGBgV8djMV7Sbg9NTlw2HcIqS7Cjdn/WUoEnH7zvTfLI7ikw4/iFcLSezR1ptdCcB8nMnaniMMMMwNVhvz94YqRA3dhWE4Napm0Jp6NGXe26mdwMDmsqSFku42zkbttdDLAJJGY4yazry3ZArAjAbNfQYHFqyg30seLisO7uSXU2bePbCCpwac0jAkkE0xN5iVkIGRzQxcd68Brmm29T2U+WKSHCc5HyGtvwm++81A4x8kP83rALtW74QObzUT/sQ/zevYyamlik0ujI525xTfX9GdVRRRX151HKeJWI1JFxnMI/m1cVqTESFdp4rtPErFdWjx08hf/Qmrk7+MyOz18tdQzGbZ5mPTlT0MjSwWnk4x8ua7H4Wf8eviHjH/ABNR/wCk0FcraKY7ibnrGcV1nwwx5HiLH/QUH/pNBXuUZc2Il6I5sqVpfJ/mjvKKKK7T3AooooAKKKKACiiigAoorm9X8aWOkXt1btZahdLZRrLfTWsQdLVGGQXywJ4G7ChiBzigDpK8K8QqR8R/Ejjr9qiH/ktDXucciTRJJGwdHAZWU5BB6EV4b4ikVPiH4lB/5+4T/wCS0NNX6HnZp/u79UdhYhGtYwSeBT7uKPYW9a537eYVChucDFRXepFrbyy2PQ5r4xZRWlW509GzH+0qSpcjjrYt3cUYk9+DWSyKbl35z0NRNfNM2SeRgfrTnk27iOSRmvoqGHqUY8snqeNVqxm7pFq3fMcufunGK6PwcVbXIiP77/8AoDVyEc22EEc85NdL4HmEniGBQf7x/wDHGonQalzea/NG+DqfvoLzPU6KKK9A+pCsXxNj+z4c9POH/oLVtVzXjmTy9BVs4/ff+yNXNjKftKEoLqjOtPkg5djG3xCnLJFXEjUmCE7zge9TLqLFVO88jjmvmJ8PVF9o8yOcr+U9A0dE/taB16nd/wCgmusrzXwhqPm6/aQM2S6uR+CmvSq9vJ8LPDUZU593+SPRoYiNePPFWCiiivVNjO10btGnHqU/9CFcatnH79c9a7DxA23Q7g+m3/0IV5sNZOQOOa+bzfDV62IXsntFfmzkxFejSkvaI2vscfqeuetS29rEjyNz8xBPPtWB/bJbGMc0i6scgZ6nFeTLK8Y4tNmUcwwsZJqJ1YjiAzVZWiGa5x9WIRju+771nPqTbxlyPxNOjkFad+Zjq5zSVuWJ2vmxA1lXjRrdbqxP7QbPzMRx61UvL7Eq5bnHFd2DySdOpe/Q48TmiqwsonbeEGVvFb7f+fOT/wBDjrv68t+HMvm+Jpuc4s3/APQ0r1Kvp6NL2VOMOx25dPno83dsK4P4jZ83SwP7s3/sld5XA/EuURNpRPfzgP8Axyrkm1ZDzH/dpfL80cVGSQwboeBU9qwMmT905qpFcqYgDjIaoYb1UKDPJbiuaVCU1JWPmYys0d0baOQRSZOVHHPtTxAFHBP51z6auyIFbAOMilbXNhCtgEjIr5KWVYxuyd0fQRzDDbtamzKfLwD0PArCvwPM3djuP6U9dZ3u3mbQq85rOvr5PO2bu5r0svwFalUtJHDjMVTqx90aEO4sem4YqrJFmeU9u1PF2r7UB5B5pskqhyAehya9+EZxep5cmiqY2IPrmu9+FnF7rg9Ft/8A2rXExSoDyevIrtvhac32t/8AXO2/nLXWpNqSf9andlf+8L0Z6TRRRWR9OFFFFABXE+PsefpgPdZh+sddtXA/EmQRNpbE44m/9kqKkXKLijizB2w0n6fmiO1tIzZRLz8oA60stsABjsPWsO01rMCjI3AYI9xTJvEJWPeApGM18U8rxzqu3czWPwqppNa2Nny2zjtR5TVzyeIJSgYomTSr4jZjjC5/Gul5RjeyMFj8P5nQeUaURmuefxBIOirjFRr4klcZVEx+NNZPjZK9kH9oYddzpREzcnrUVxbsyEdqxk8QOWxtXFTnWvMBUbcis1l2MpyTsN4zDTja5rqki2y7MZ29/WkcTc4x04+tZP8AbJhKpIAATgU/+2lIJJXg4rN5fiU78qZf1yg1a7RoYm3dsbf1roPCAb7Vf7sZ8uHp9Xrjm1lACSRgV1vgqYTy3z/7EX85K9HLcPWpYhSnGy1/IqjWpzqxUXd/8BnXUUUV9Kekcj4o8z+1Y9uMeQPz3NXOzwvJEd2M+1a3jXUFstVi3EAGBT/48wrm31cFivy8DmvmcThcQ8TKcI6X3PKxNeipOMmRrEVuDn0rqvhsgjXxIo6DVR/6SwVwzaoHnYjGApya7P4WzfaLXxDL66qP0toBXr4ajVhU5p7WMstnB1JKPb/I76iiiu89oKKKKACiiigAooooAK891Q32kX/i+3XSL+9bWlWSxe2gMiM5gWEo7DiPBTOWwMNXoVFAGNbaBG3hbT9HvJrj/RreKJntrmSBiyKB99CrY49axpPhb4UmuZbiW21CSeUhpJH1W6LMQABk+Zk8ADn0FdlRQJpPRnHH4X+FT1t9QOP+ordf/HKQ/C3wo3W2vz9dVuv/AI5XZUUWJ9nDsjjB8KvCQ6Wl9/4NLr/45S/8Kt8KH/l2v/8Awa3X/wAcrsqKA9lDsjjf+FWeE8Y+y3+PT+1Lr/45Ulv8NPDNrKJLePU4ZB0aPV7pT+YkrrqKAVOC1SRzn/CEaV/z9a5/4PLz/wCO0f8ACEaV/wA/Wuf+Dy8/+O10dFBZzn/CEaV/z9a5/wCDy8/+O1FP8P8AQrqPy7iTWJkznbJrV2wz9DLW/qF9Bpmm3V/dNtt7WF5pWxnCqCSfyFYGk+Kr261Oxs9V0Y6d/aMDT2TC5Eu4KASjgKNj7WBwNw4PPFAmk9GYGgfC/Q5NOlOqWeoCf7ZcqobUrhf3QncRcCT/AJ5hOe/fmtX/AIVZ4TGP9Fv+On/E0uv/AI5U3iXxlJoWvWelRQaZuubZ5/O1HUvsiDayrtB8t9zHdnHHQ109s8slrC86RpMyKXSN96q2OQGwNwz3wM+goJ9lDsjlofhp4Ztplmgj1OKVchXTVrpSM9cESVb/AOEI0r/n61z/AMHl5/8AHa6OigpRUdEjnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OigZzMngPRpozHLPrToeqtrd4Qfw82qn/CrvCv/PvqH/g1uv8A45XY0UrIlxjLdHHf8Ku8K/8APvqH/g1uv/jlH/CrvCv/AD76h/4Nbr/45XY1BeS3ENnLJa24uZ1XKQmQJvPpuPSiyF7OHZHBeI/hlosfhjVX0m21I6itpK1qF1O5YmXYdnBkwecda0B8LPChUFrW/wA45/4ml1/8cra8Ma5Nr+lzXNzZLZzw3c9rJCs3mgNFIUJDbVyCV9Koav4rvLTUb+10vRjqK6ZAs965uREVDAsEjBU732jOCVHI55ph7OHZFb/hVvhQ9ba//wDBrdf/ABykb4V+EmOWtL4n31S6/wDjldXY3sGpafbX1s2+3uYlmib1VgCD+RqxQHs4dkcjbfDXw1ZSGS1j1OCQrtLRavdKcemRJ04FW/8AhCNK/wCfrXP/AAeXn/x2ujooKSS0Rzn/AAhGlf8AP1rn/g8vP/jtV7r4c+Hr7Z9r/tW42Z2+brF2+3PXGZeK6uigGk1ZnGf8Kr8JDpa33/g0uv8A45Sf8Kp8I5B+x3uR0/4mdzx/5ErtKKLk+zh2Rxx+F3hU9bbUP/Brdf8AxysrxB8MNFTT4W0y11Frn7ZbK2NTuWPkmdBL1k6eXv8Ap25rutTnvrayMmnWKXtzkBYnnEQx3JbBxj6Gq3hrWf8AhIvDOnax9n+z/bYFm8rfv2ZGcZwM/kKA9nDsjC/4Vb4U5/0a/wCev/E1uuf/ACJSH4V+EmOWtL4n1OqXX/xynQ+Nbm78W3mh29rpK/ZLtbdvtGq+XcSLsRy6Q+UdwAfj5uSp5FdjQHs4dkcWPhV4RByLS+B9tUuf/jlL/wAKr8Jk5+y3+T/1FLr/AOOV2dFFw9nDsji/+FVeEv8An0vv/Bpc/wDxyrFr8OPDtiztaDVbdpMBzFrF2m7GcZxJzjJ/OusoouNQindI5z/hCNK/5+tc/wDB5ef/AB2j/hCNK/5+tc/8Hl5/8dro6KCjnP8AhCNK/wCfrXP/AAeXn/x2j/hCNK/5+tc/8Hl5/wDHa6OsbWNQ1uzmxpmiQ3sKxeY8kt8IOeflUbWycAddo569aAMHVPBkaajoq2Vzrxt3vGW9xrV2cReRKRnMvH7wR8j+WauXXw58PXuz7X/atxszt83WLt9ueuMy+wrd0bVYNc0Sx1W2V1gvIEnRXGGAYZAPvzXO2Xjia6lsLqTSDFomo3RtbS++0BnZiSEZo9vyqxXAO4nkZAzQJpNWY1fhb4UX7ttqA+mq3X/xykPwr8JkYNrfken9qXX/AMcrs6KCfZw7I4z/AIVX4TH/AC63/wD4NLr/AOOUg+FXhEHIs77P/YUuf/jldpRRcPZw7I4z/hVfhP8A59b/AP8ABpdf/HKQfCrwiowLO+H01S5/+OV2lFFw9nDsjjB8LPCY6Wt//wCDS6/+OUo+FvhQHIttQB/7Ct1/8crsqKA9lDsjjm+F3hV8brfUGx0zqt0f/alZc/wx0YeKLGOK11H+zGs7hrg/2nc484PD5eT5mc7TL/kCug8QeItW0KO9vjoUc+k2UfmzXH20LKyBcsUj2kHHPBZSccdq3bi6WHT5btV3qkRlC9MgDP4UWD2cOyOVPwt8KEYNtf8A/g1uv/jlWLf4eaBabvszavDuxny9Zu1zj6S+5/Ok8G+LbnxXAl0bfSY7d7dZStpqv2maNmAISRPKXacE554IxjvXV0WGoRWqRzn/AAhGlf8AP1rn/g8vP/jtH/CEaV/z9a5/4PLz/wCO10dFBRyVz8N/Dl6wa6XVJ2AwDLq92xx6cyVF/wAKu8KZz9m1DP8A2Fbr/wCOV2VFBDpxerRxY+FXhIZxaX3PX/iaXP8A8cre0Dw1pfhi1nttKhkiinl86TzJ3lLPtC5y5J6KB17VrUUDUIrVIKKKKCgooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDO8QaX/bfhzU9K3+X9ttZbcP8A3S6lc/hmuWgt/Ed/qWk317of2ZtEtJiI/tUbfbLhowgWMgnamN3L4PI44Nd1RQBzmq3V+0cQPhBtRaa2HmDz4NsbH70bl2GR7qDn0qz4R0m50PwnpmmXkiyXFtAEcoSVH+yCeSB0HsK2qKACiiigAooooAKKKKACobuaSC0lmhtpLmVFLLDGyhpD6AsQAfqQKmooA4bwtJ4h0vStXSbwtdJcPfXN7BHJd24EolnLBMq7YIVsnIxx1p+pWOvaZrWvz6VpQ1CPWYY9jidIxbzLGY/3gYglMBTlcngjFdtRQBn6Dpn9i+HtN0oP5n2K1it9/wDe2IFz+laFFFABRRRQAUUUUAFFYc/jTwra3EtvceJdGhnicpJHJfxKyMDgggtkEHjFaFjqllqYZ7GcXEQVWE0YJicN0KPja/T+EnHegBdRurizsnntdPmv5QQBbwuis3PYuyrx161y3gn+3tH8G6PpV34dniuLQQ2s3mXUONmPmlUqzZC+hwTniu0ooA4fxRYanr8M2k23hr7O73Ubpqsk0OyMK4bzVAbzN+F4G0c98V3FFFABRRRQAUUUUAFFFFABXHeMv7evbu30q00a9udFlj3X01lPAksvJHkDzJEKgjlmHODgYySOxooAy7We6iXS7eHRXtrV4mEqtLGDZhVGxNqkhs9PlJAxXG6d4f11NO0HwzPpojstIvY521Hz0KTRQsWjCoDvDE7AcgAYPJ4r0aigAooooAKKKKACiiigAooooA4bxKuuan4hFnP4cvrzw7bbJAtrPbAXsvB/eCSVSEU/w4+YjJ4GD1E13fefLEukmSD7IZVdp0G+XJ/cle3H8XTmtGigDjLLT9Q1Dxjp2rtoP9i29jbTQyGSWJpLjft2piJmGxdu7k9cYHWuzoooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDk/Ekaf8ACZ+DfkXm8uc8df8ARZa57xRqGowr43jtdQubcwHThbtHIR5G9gGKjoM9/XvXpElrbzTQzSwRSSwMWhdkBaMkFSVPYkEjjsail0zT5zOZrG2kNxs87fCp83b93dkc47Z6UAcQuiTv42vdB/t7Wxp50yK8x9ufzFmMjpuEmdwXCg7Adue3asOHXde1+28J2zybhd6MLuQ/2m+nm5mBUH95GjMSBztGPvZOcV6wLW3F2bsQRC5aMRmbYN5QEkLu64ySce9VJ9B0e506LTrjSbGWxhwI7aS3Rokx0wpGB+FAFPwiNSXw7Amq3UFzdI8i+bDP5wKByFBfau5gMKTgZINblQ2trb2NsltaW8VvBGMJFEgRVHsBwKmoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAP/2Q==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQEAZABkAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG2AkQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooqC8ne2sbieKBp5Io2dYU+9IQMhR7npQBPRXA+GfG99q2q2dvNc6RexXFq9xcLp4cPpzKAdk2WbkkleQhyDx1xJpni3Wpk0PVr63sV0fW5hFBFEHE9uHVmiZ2J2vuCgEALgsOtAHdUVwg8Yaz9mXX/Isf+EeOo/YvLw/2gJ53keduzt+/zs2/d75pdS8Xa1BHrer2dvYto2i3BhnikVzPOECmVkYHau3JABBztPIzQB3VFVL/AFXT9KsvtmoX1taWuQPOuJRGmT0GScVkf8J94O/6GrRf/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FUf8J94O/6GrRP/AAPi/wDiqAOiornf+E+8Hf8AQ1aJ/wCB8X/xVH/CfeDv+hq0T/wPi/8AiqAOiornf+E+8Hf9DVon/gfF/wDFUf8ACfeDv+hq0T/wPi/+KoA6Kiud/wCE+8Hf9DVon/gfF/8AFUf8J94O/wChq0T/AMD4v/iqAOiornf+E+8Hf9DVon/gfF/8VR/wn3g7/oatE/8AA+L/AOKoA6Kiud/4T7wd/wBDVon/AIHxf/FUf8J94O/6GrRP/A+L/wCKoA6Kiud/4T7wd/0NWif+B8X/AMVR/wAJ94O/6GrRP/A+L/4qgDoqK53/AIT7wd/0NWif+B8X/wAVR/wn3g7/AKGrRP8AwPi/+KoA6Kiud/4T7wd/0NWif+B8X/xVH/CfeDv+hq0T/wAD4v8A4qgDoqK53/hPvB3/AENWif8AgfF/8VR/wn3g7/oatE/8D4v/AIqgDoqK53/hPvB3/Q1aJ/4Hxf8AxVH/AAn3g7/oatE/8D4v/iqAOiornf8AhPvB3/Q1aJ/4Hxf/ABVH/CfeDv8AoatE/wDA+L/4qgDoqK53/hPvB3/Q1aJ/4Hxf/FVq6ZrGma1bvcaVqFrfQo+xpLaZZFDYBwSpPOCPzoAu0UUUAFFFFABVTVbJtS0e9sUne3a5t5IRMn3oyykbh7jOat0UAcRpPhXVV1PRpdRg0i1t9Kt5IF/s8uWuQybMMCq7F/i25bkDnim6Z4S1qFND0m+uLFtH0SYSwSxM5nuAissSupG1NoYEkFslR0ruaKAOEHg/Wfsy6B59j/wjw1H7b5m5/tBTzvP8nbjb9/jfu+72zS6l4R1qeLW9Is7ixXRtauDNPLIzieAOFEqooG1t2CQSRjceDiu6ooAQIoUKFG0DAFeB+KAo+IviQbRj7VD/AOk8Ne+14F4qGfiP4k/6+Yf/AEnhrSl8RwZn/u7+Qy/jX7TuIGNq4/KqN5bIlvEQo+YVpXa+ZLGf9lSfyqnfvi1jbtg4pYeTtBHzcW/aaGXbosc6HaOGH867m1iSNGJUYdQQK4eEEyoPVx/MV3wQCBH9FArlzqVuRd/+Ad1m58xzGpQKt0WCgBvmx71CEUL90dK0dUUNcp/u8VTYDds9s11UJ81KJ583rYpC2U4GBT/IU5O0ZXjNTYP8NTYzCD3reVRoHUl3KKxqSF2jg+lXvs6RoflHTNJBD+9UD15q1Nxx6nj6VjVq3kooG2ylGkcah9g9KYyKzsCvH0qcDOwDpSzFTwO1NS94m5iXKgSsdvU46VuCBBoiOqgFm3H9KyLo8kDvW4Af7Atz3INaYqTSp+v6HZUd6af9dDLdF2ONo4XP61SjRVDnaPU1eJPlSHvsqtEQVkb/AGf8K6IPRipNqLABcIABQI1A24GAM05AMRr+VPZQcqOw5p3Bys7DVt0kQAqMZ5q0iIi7Qo5pscZ+UL3OanwEDDvWE530Oec29LkbBSGbaOnFMVELAbRxzTywDKB0AzSBed/96ktiVsRtEm8naM5FdRaW0cluG2DJQZ/DFc24BYH+6c12Gnqr2yegVf5V5Wb1XClFo6sMueVmLLGgWMbRu3c8Vxt+kZnIKjIruJUDXW0dQATXHanERcuT1AOK58kqJya8rmuJXLJM0PKT7NbDaMFST+RrDuYUywCjg10PlN9gt24xtrCuY9twSvQnmvTwU7ylr3/NnKrxkioY0B+6M0jIoQYA5qZPn+b2FGzcBntk16XNbc157OzKAhXsBgU4W6ZPA9ateSRgDp3pCvVu/Sr5zf27ezK6ookwQOBTWA3bQBxinuRuBPrSEdc+tUaJ9WNVEIJwPSmmNcgYHBzQM7T9eKeAVYE96Zeq6jNqliNvfP50KirlQoqTG7H1zTVOG5655oC90W9OVTdYwOc/yqeS3DTfdGQah08br+P8f5GtPaN2B2Y5rjrT5anyPPxEnGd12JtLtI3uApUYDA10wtY0t/L2qGRcA+9c9pDGO7wejA5ro59wZWPQ4H418xmspuuo30OjBpcjk9WRrboZlBQfKOuPauZ1iOM3T4UcGusWVXhZhnrt/pXJ6wQt2UXoOKrKJSdd36IMWkorl7mNNGAFwBgA0BF8xPlGdtS3Cgpj1HNIoyyntjivqk/dMFL3BAihphgdv6VWaJPMD7RxkCrXT7QR14/pUD8qv4mnD+vuNKbaf9dhqxrnO0VYCL5ecDJpkYwce1WGVWTP40pvUzqT1KKoM4wMg1atERbpPlH3qh6P75qaAf6UnuRTqaxZpUldMmvok+1PhR8pIFVLqCOXaGUEZB/EHIq5dhmuZT7moJvvfhWdJtRiY05NNNMrPGmQCBSiNSOQDmnsmcZ60KgCj0FbX0N+bTchESBANoxuqbykLHgUn8NSgA/N36UmxTm+5G6hkjwOOf51XdBhuBVxh8g9AP61WkUDrTgyqMtbE2nFfLnUgfdr074NKE0XXVAwBqp/9J4a8zsE3eZjuor0/wCDwxpGvD/qK/8AtvBXLiHqztwEv9qmvL/I9GooorlPaCiiigAooooAKKKKACiiigArwDxWSPiV4kx0+0Q/+k8Ne/14B4sYL8R/Enr9phx/4DQ1rR+I4cx/3dik7mU+qgfpVPUDiGGMdqu2nzx7T1HSqWojakXqQamj/FUex81TXvozE8wXEe0A4YH9a7+Fw9vCnfHNcNb83EZP94fzruooR9nTaea5M7cbQv5nem3KyXQy76MG6x2A4qjs2jcegGc1o3QImAPUKBmqUhBVBnsRSw8nyJHn1F7zK4ddpxVqEDH1Garx7MEnHFW4UXP3vvDit6zSREVqRQ2xyzrk5OfzpZ0b5yBztC/jzU0bGFpH6oRx7VFcZZlweVG4j1rJSlKepTSsUizLgD1oMLAeZz0q1KgSHeRzkVAzkJg966oz5vhJ2Me66pW4jN/YFvuHzFelYs4LbQRzmtxSH8P2mOu4j+Va4p6U/X9Gdk/4SM7+CTP9yq0K5aYjoU/qKtueZB/s1Wi6S4H8H9RW8XozOm/dY6JdqR56nFSOh7d/5UYJijwMkAVMVO4ccEVLlrczlJ3uPiQhlxUcv32Jq5bKd3K98fpVC8/17AHocVhTlzVGiIq7EP3VTuRipXODx6YH1quh9evarUabmUH+E5Naz03HJWYjxlUUdzj+ddXZxGW0jTkAgZI9q5iQkvnHfArrNLcG3jzx8leHnE5KjGS3udWCSlUsyxuDPu7niuQ1fAmkI+9XViJ8scEA8iuP1XPmSMTjnNc+SxXtnZ9jbGNvluupvFSdHt2HZcGufnUeafeugXP9jxejJmufkXLjnvXfl+jn6v8AM5K+8fRECRYkwPpShCSMj2qVVxKWPGGzTkGSuRzur03MxcmyPyCH4HVc1G8W4Zxz3q+wCx4J+bPFQsNpY9Q3NRGq2HM0ZE8eWAPaoiSSa0JlDYPQ1QYAkc4wa7oSuj0KNTmVhg3FORzil3gqF/iApc/IQetG0Lz3xWhvddRUwpxTMHAJ6k08r0NNBz8p7UAu5b0wj+1owD2P8jWuwzkdyxrI04gX8Zx68/ga0wSWP4/zrgxK/eX8v8zgxb95ehe0jH2hg3VuK6gMsuQOoGa5fSB5l0R7gg10nl48shiMEZx3r5XOEvbK71OvAXUH2IyVKIB2OT+dc1rUYF8z5PI/+vXSzAKgOcEN0rmNbJ89iOehrfJ9a115kYvZIyZE+QD1FOQfKD7CmSkqoPtili3GJDj5sDj8a+p6HK0+W4/GDOB6D+lVZgRGpA7kH86vHH7712j+YqnKTtUY45zTpvX+uxVJ6r+ug6IEqM9cA1b2/Lx1qtHyMj0BrRCKIi3cVlVlZmVV+8ZEi7ZyfSprZv8ATY8/3hUdx/r3PbiiA5vIgP71bvWHyOm14X8jTuI8Xk4/2z/Oqd1gLjvxV+Q5vLokcKx/mao3OG471y0G9L9kc0f4hC+BgZ5pr549MfrTmZcqCeT0pGYYrqRvG+gmzKketPC5I9jTEOU3Z4qYAZH1zQ3YUm0G3AwfSqkhChs9qvOMrn2rNkXeHGetFLUrD6vUu6f1fHQrXp3wh/5BWvf9hX/23gry7TSS0gHZc16n8JBt07Xx/wBRX/23grmxPxWO/AK2LkvL/I9DooormPcCiiigAooqOeFbi3lgdnVZEKExuUYAjHDAgg+4ORQBJRXlWrST6BqXibUNHv8AVHg0HSW3JdajPcI93INy5WR2HyIFP/A/atadrvwbrdlHFqV/qMN3pt3LNHeXDTZlhVGDrn7udzAquF5HFAHf0V5zpTX+lnwbqj6vf3kutsIr+KecvG5kt3lDIh4j2smBtA4POetVbC81EaHoPi1tVvnu9R1SKKe1adjB5M0xjEaxfdXYCpBAzlTknNAHqFfP/i7H/Cx/Eef+fmH/ANJoa911G8nsbQzW+n3N/ICB5Fs0Yc+/7xlX9a8D1yeW+8ea/PPZT2UjXMWbecoXTEEQ5KMy89eCeta0fiOHMXagzRt0VAp4Bxms3U8CNWPPXFaJyGGOwAP5Vl6j9yLJ6g/0rLDK9VNnzVP4kihbZaaLnGXH869FijxbpjnHJ/KvPLcA3UR7B1P616LBnyHweqZX2rg4hbSh8z06KTmzEkVt0zO2TvJXPpWdJxckfwg5FX7nzNqIT+82cn3qhJgSlT9K3w21zy6m4KikcYGTzViIELjuvAPqKqopLHHTOanWYhD6gf1rWom9EQidpEw0XHHWoC+XTjljyfQUzbuOc/OfvUhbhz3A2ipjTS2G3cVleW6bLHywnTtmgqjRcgZFMe4Eb+WoOWHWjy2YfL1NaWatfRCZk3R/eDHvW2qqnh61OOSSf5VjXi7JOOmSa2pPm8OWmPfP0rbFPSl/i/RnVo6X9dzKlbDOmMZTOfxqtAx3yr6R/wBRU1wf3pH+zioYB+9l9kx/KuuK90dNJQZct8nae2xf5VYClHXcc5OBVaJ9sYA7KP5VeUq7pu7H9RXNVbTOWfxFmBMhsnBIIH1rJu0xNjOSDyfXitcMDyOwzWTM2ZMnuTWGFvztlJ22IolPUnr09quQqSQRwScEeuaqrlhgduauwMozJjoMV0Vm7Cm7sklCjaox9a6XTlH2ZB6gAH8M1zs0Y8lW75rprEKtrbqOw3f5/Ovns2l+4jbuzswC/eMlaTLkKcgZ6VxGrE+dIpPSuyG2OUgA/O5FcbqvzXMvvkU8iilVduyNcW7uN+5u7saFCQeq/wD1qxfLYr1565/GtglU0KFcchf61mQvulYH7pFduEulNr+Z/mcVfVx9ERy/fJHSlBAmY9sgih/usARUZUsAy8ZbH4V3JXWpgTyENIDjAAqJhtByc45q1LEoXPqADVcr95W9P0qKck1oNoo3HUEHHINZm7Mzj0rWmjHH6VmMgR2PcnmvSotWO7CyVmhDyBSEFs80gzuGOlG75wg64zWx12fQeXBfaKYV4JzyDz70qKNxbHJOPypM5U+tAlo9CxZZ+2REcYBP6GtZc7WasuwB+3R+mD/I1tKuSw9wa4cVK0jz8X8a9CxpbGK7Cgc+tdKMggE55HFc5ZA/a8j+EV0ZwwU4+YnBr5bNbOqn5HVgfgZDOMktnK5zXM69kTcdwK6SbmDCcdzXNazIJJlHPQfyroyhP2q8icS1dGZNyg9zSxZBUZ7Uh5UE9uKUY8wD0r6bpY5fs2JEG4Te6f1FVZOAFPpVuI4WVjzkf1FQyx7iCKUHaTCErS1Eg6e3FaXSFm6jpiqCDDH0q2rlkJz8nTFZVtWRUd3czpgDM1NtP+P+In+9SzsA7n0OKbbc3sWeobNdP2H6HXH+G/T9DaugFmn2j7zEE1mTfMR2xWvOoEkwHdjWNMflPrXHhXdHLD4yEkF245U8H8KCQdp7UD/WsT6cU3HQehruOtIegBjX0xUpbGTjJA6VEmAqgVMgz83fFTIznvceGLw5wRkdPxrPPFxjtjmtJOV56Ef1rNkDCV+e/FFLdorDbyRb0ojzZTjoteo/CQ507Xz/ANRX/wBt4K8t04EeZ/u16f8AB4k6Rr2ev9qf+28Fc2JXvtnfgP8Ae5en+R6NRRRXMe2FFFFABRRRQBj2nhrT7XTNSsHElzDqU0012ZyCZTL94HAHAXCj0AFV9J8I2ml3gu5b6/1GaO3NrCb6RX8mIkEquFGc7VyWyxwOa6CigDmtK8FWGk3tpOl5f3EVgrJYW1xKGitAwwdmFBPy/KNxbAOBSW/gfTrbUIZ1ur5rS3uWu4NPeVTbwzMSSyjbu6sxALEAngCumooAK+f/ABYSPiX4i9PtMP8A6Tw19AV8/wDi3j4j+JG7/aYR/wCS8NbUfiOLMP4DJZW23DL2Kr/Ks+/TKxN22nFXrtgJsdyq/wAqoX7MY4+OAOKjDJ3i/wCtj5mH8Qo2qEXkanoZBivR7bb9gyM42kfrXnluwFxD6h1P616NbsBaL05UkfnXl8Rt2p+p6uHd5u/Yx7hS0u30GDWLIwaRmPfNbcrjzyD3BrEljxcMPUmtsDtZ9jy624iy7TsHUjiplcbM+o5/OolYBjwM9qkwrRMQeorsmkYkUZO8yD+KlB2/QDP402Mktkj5RSEZVU9ck1bWoCkiMFz+FSw3J2H9OKqOpkbP8I4pVIwAD35qpU01qPYqX0jGRj2J4rZRj/Y0Q7BeKw7wYYKOwrZ/5gkGP7pp4lLlprz/AEOmX8JGRM+ZSfamxZ8yU9tox+lEnBYntiliP7yT02j+ldf2S18Gn9bE8Z/dhe+0VZUlmXd25FQRnMZ9wMU8jrtOTXPNXZyy3LDXHDY/GqxAJ3epzUbSY4HU1LGmcOegpKCgritZXAHCr6k0+1JfYD2OTTBtILg57CmwP5Y2jqaJK8XYLaF9pGkIP8IOK6K0GIbeU9Suz8P8iuaaULAFHXNdJA260gX/AGc14WZxfs4q3V/kdWD+JstL8s4Y9AMCuN1JALpyexNdgm37OHzxtyDXH6mwa4c5qMlX76X3G2L2ibdwmNJt3HUJn9axYm+cjuc1uTyINNgXP/LMCueZwsrDuK7sAnKEk+7/ADOSulzK3ZDgFO8c43Yp0ZDgemSaSJMk+/NJIdsZHSu7d2MC4M7ct+FRL8xYN16U4NnkdMU1vnLdjgDisEhkEiFWx2HArIkx5ze2a1pi24Z7Gsic4kIHU16GHudWEV5Mag+6fSjH8Q6nP86MbcUHlwB0rpO3qGCH9qGAUMfrSvw59M5oz973NAK+5Z05s3kfvn+VbEbHzGHv+lYumKTewntz/WtgPgue4bBrgxS9/wCX6nDilapoX7J/+JhsH41v7xk5/vACuf0w7rzb3BrWkVolZ/4jzg181mEFKso+RvhZOMGyZEB3Hvt/rXLaypNy7DtjH5V1Co21geNwBrnNawHYj7wHStcpdsQx4n4YmSpxHH9eadlfMJpFx5aGkLBTj1HH1r6Y47XZJGQDJ6YqNgc5HeiJSWkJ4zj+lOAJbPalsxbMUKOQOgqSFsqR/Cab0yBSA8kDpUvVEPUpzjEzY7nn8qW1/wCP6Ju1JKuXdiTwf6U60bdcIR0rd/B8jtv+7+X6G1JIPOkHdsmsiUYAzV0swdyRziqlzxt9TXLh48rsctN3kQA//WphPJHeplA2CmkdeOa609TpUlcbHwqg9c1PGx8s+uKYFxipAGAIxxt4NTJ3IqNMcpygz6VRucs/41bHQZ64/rVSQgSkn1p01qXQVptotaflTLnptr034P8AGk69/wBhX/23grzGzbLS4/u16f8ACH/kFa9/2Ff/AG3grmxG53Zf/vUvT/I9FooormPcCiiigAooooAKKKKACiiigAr598XH/i5XiJccfaYT/wCS8NfQVfPni5sfEzxEP+nmH/0nhrah8Rx49fuGF1/x9Mc/wrx+FVtQYeVCo9D/AEqe6/4+QR3C5/Kq16oESE9QCKdFfB/XQ+Yp/wARFS3P+kw8cFxz+NekooNnHjgqK83tv+PqL0DD+dekW+Gt+D0XkV43Ef8Ay7Z6uHtzteRg3rBZTjggms25OZQV9KvX/wA0xIPckiqD8ujDup49a68JG0EzyajvJghUsQcA5wPfiphF8rY6EcUyKENLuOOKtRK2HzkhRgVdSaWxKVyCFAQEPY/nUdwCrsAuCTgVcaDZEuCN2eagu25VgM7aUKnNO6Bqy1KMpx8i/mKh2MvOTVxkWNd7YyelVHmJBGCK7qcrrQI36FW5b5x6t+lawcjQY89QcCsa5bDFup7VrmNjo8B3cEdKeIStC/c6Zq1NGUed2e/NOTH7wj+7/hTSpUSZOT1/CkhPMh7Fc/yrp6F2vF2/rYtwn9znHIAqVxuGV4+lRRcJntgcU/kDr1Fc8lrc5JfENdMYPUmnhiEC470dMbufSopJVTjvQry0BJy0H5GcAYA5qFGCZduxpgl4A65GaTG4BSeO/vWqhbc2VO25YR9wGT/EDXYWQHkxNnI27cfXmuMiG5x6ZFdpZriBQOgUEflXiZzZQia4dWqChz5W0DgMR+FchqBJuXHQZzXaLHkA4wMnPua47UFIdm7jIrLJ5R9pKw66acbm1MA9pbgf88gc/nWR5Yd247/nWtJlbCD1EXX6k1loCG3E8DqK6sI2oyt3f5nLV3FX5U3+9Eq7+3Q0+TBXYO4FM3Fk64Oa6E3uZEyZAK44x1qMnLFhxkYx71YjKmJckZ5pkiKMMuOe3vWSl71mO2hSlJIyRjmsqUZck9c1rTZ28j+Ksqf/AFh+tejhzqwu7IsEryecUKc4BGO1KAc5J4o4/Kuo7bgMfNznJpCPmx75oA2qD15pedhPegNmWtM5v4h6Z/rWkXUF+R94/wA6zdM/5CMXuD/I1dlAWZx6sa4qyTq28v1OHFL3zT0c4upC3bHNb4cSgKMMRjPtXP6J892d3QgZBrobNAHfjGMgn1r5nNbRquT3SR0YRNrl7gdy7Byck81zGsZNy4966hSZJumFQ4571y+tKy3jEHjk/rVZQ/39nvYMUvdTXczMDaOeMUmA24Hj0PpTWOFAp2RyfavqLHJZj4yNvX605RtBHY0yLbk+/WnlsfL+VQ1qRJa2EbI4HPvR2wPzpAeSD1wOaXBOVHHHWgRTm4dhmltGCXUa9s0ydts5B9KdbANdoPfrXQ/g+R3W/d69jVuDtnm9ATVV1DAeoqzdnFxcHqAScVXAJB571x0vhTOFaO5CQVCjbwScn0oA4JqSUlVUHnPekI+UAd63T0NFLQaAMr9amcbR+FRjAdeO+ac7HzMe1S9WS9WQt098f1qpLy7g8d6uyDCg98f1qoyjeSeSeK2ps6qDW5ZsVwZj1yCa9N+D3/II17/sKn/0nhrzKxJDTAnsa9N+DnOj67/2FT/6Tw1zYjdndl/+8y9P8j0eiiiuU9sKKKKACiisvxM12nhTWG0/d9tFjMbfZ18zYduPfOKAJbfW9Ju9Ql0+21SymvYs+Zbx3CNImOuVByKdBrGmXOoS6fb6lZy3sPMttHOrSJ/vKDkfjXnnhO4udMHhW0tNStL+HU7B28iO3jT7KViDBlKjdt3YU7ySSRzniq2j/Yf+EY+Hf2Pyv7W+3J5u3Hm7vLk+1bu/Xduz3xntQB6Z/bGmf2p/Zf8AaVn/AGhjd9k89fNxjOdmc9PaibWNMttRi0+fUrOK+mGY7Z51WR/opOT+FeYD7L/wg0f+q/4SH/hJfbzvtP27n3/1Wf8AgHtTtY+xf8Ir8QPtXlf2x/aL+Vux52/bH9l29/7m3HfPvQB61Xz54wGfiR4jJ/5+Yf8A0nhr3a+k1KKwDafbWtzd8ZjuJ2hT3O4I5/SvAPET3snjnX3v4IILs3MW+OCYyov7iLGGKqTxj+Ef1rbD/GceO/gsluMmdGHQgZ/Kqt/llHoSc1Ylfbt3ei4/KobsF4kbsTxWlLRxPmKbtNMr2wzdR/7w/nXotjGUs2b+Nup/lXnMDYni9d4FemWpAsxnugrwuJZNQgl1PVwsb1W32OZvsJdN/tDJ+tZyZ3qG6LmruqYEp98n8KoITvJ9RxXfhl+5T8jyKnxssrHJvJBGKvwMVgIfqOtUohLv4AwTn9K0cq0WPzrnxMtky6a6lZmZUeQ9xkVTBYqIyfmJBNWJJyzsP4AKqOdsu/uelb0Yu2qM5MJUM7jP3BUcsKCM46/WpVkJhdV6j+eajNrKImLDv610RdnZu1hGVKmSM+tbGW/sm35+XbWROcZHpitdgDo0OfQGt8R9j1OmprBXMt+dxPXbUcOMOvbb/hUzDIOPSoo1wZD/ALNdC2Li/daLEeWUf3cDNLIxUiiH7mPUCkJyeaz6mD+IcZAx57DNUp8HB/2s1O7Atx261Co3jJ9a0gram1JcvvDYOZdvYA1LjBGOrUsSbTxTUYhgWpt3ZcnzNtE6cbQOua7e0IS0iJ6mMGuMt4/myeprs7ZFeCLOcogr57O2uWKfcMM/fdixksXX23CuN1Q7ZW9Gya7RCVRSccrXG6qpZsHqM1yZI/30kaYy3u3NGRydPjB6FBj86y94EpB6GtRo2bTIR6r/AFrHm4DZ65r1cIk+ZebOCotVckJBnz2GKZuYcZ7HNNIOQT0Y0HhCa7FEgswfMQT0UH/Cp22ow9Bhh+WKpwkhcnvUhcrtz0H8qwnBuQXEmdXXgfxZrGlIMr46itCZx/D2rNb77H1NduHjZHVhY6tke7AzQd3J9aQAqv41KSNhJrqO9u2xGD8qg9D0pzcMAOhPNIv+qI7g8UBsoPXNAmtS5pY/4mEZ7ZP8qtT8XLehJP61V087b+I/X+Rq3J805Huf51yVP4t/L/M4cQ/f+Ro6KxFw+e54rpICTDEy8bm5zXN6TEBc7u+cV1EcQ8pN33lJPHvXy2cSiqv9dmdGCTadv62GHKucdC3P5Vy+ryE3k2egIA/KupmKi3z68friuV1qPy5WPrVZNZ1bv0DGLRIxHYkKPwp/GAPUE0xFDD3pzLzGx6gV9Y+xi7bEsKgE+hFPdscD8KgjJMhx93FSOcLx1PSoa1MZR97UcDxg9afzwB1qPGeD1qUAkADrUMiRm3Kl529qfY5+1Rg9QRmkn+SZiafZH/SI29Wrol/D+R3t/ufkaVwf9ImJ9TmoEOcin3bhrhwOm7mmgc4FcsF7iPP6DZdxA5pFPycdac+4delMxyfQ1a2KWwuBlSPU0gBZ1Y+lOAyAPQ5pyDpRewr2Ipwflx6VQm39FPINaMv3Rjrj+tUXb5/Y/wA61pPQ6sMyxYr802fSvTPg1kaLruev9qn/ANJ4a80sefN9Spr034O/8gjXv+wqf/SeGufEPf5Hfl7/ANpl6f5Ho9FFFcp7YUUUUAFFFFAFKz0bS9OuJrix02ztp5zmWSCBUaQ/7RAyfxog0fS7bUJdQt9Ns4r2b/W3McCrI/8AvMBk/jV2igCl/Y+l/wBqf2p/Ztn/AGhjb9r8hfNxjGN+M9PeibR9LudRi1CfTbOW+hGI7l4FaRPoxGR+FXaKACvn7xd/yUbxH/18w/8ApNDX0DXz74vz/wALI8SD/p5h/wDSeGtqHxnFj/4DKl18swPXKrx+FE7g2cXqM/0pbhN0uf8AZH8qS4QLZxE9SOa1jb3f66HzUbOxUhOLlOP4ga9IgkT7EGz0QV5xbkfaI892ArvrUZttuchl6emDXi8QwUlC/Rno0JuM3bsYmoHzLmRMdDgH+dUEUCQZPTH6VcvSRdSD0Jyfxqhkiciu3DR/dpLseXPWTNG2nALbgM54qywAVmU53c4qlA0ewEkZHGalcskWQS2eAK5akLz0KjLQicqd0anJUjNU5CSSQPunAqzdRYiLK2GJ5x3qmOG2FuetdtFK10ZsltcRY3H86ttcrIjKuKzzy+c8YxirFug5ye/9KKsE/fe4XZl3iYkx61rPHnR7dgayrxsyk+nFaqsf7HgJPAravflpvz/Q3f8ADRmOMbh7VXzjzAP7o/pU0jE7m9qgjHzyE9Nv+FdcVoaU1ZalxMGHA6kChsE49adCA0fplRim4zjt3rHqzn6sryEbyFqNFP61IQA57mkJwpPTmt1sdSdlZEi5MijsOc0mwFhT4lJAHc0MmwYzzUX1sY82tixGcNxXX2Qb7CjMMHA/lXF+ZsKY5yQK7WEFoY0U4HlA/wAq+fzpe7FeZtg1aTZJGPNVGzgoCCP8/SuT1JjuLMMdc+1dXv2xOiDJ25yK4++LncrAnjmscni/ayfQrF291GvvLaTGQPmUEgetYkpLkKeD1NawJawhwcDBH6VlEYlIJzjvXqYSKi5erOSb1XoL/Bk/w0x8+Wc8dQKmUbsrjrmo5B8vsK6ovUzT1EjJwF7U8sRwR/8Aqpin5Nw+lIH3A7uDjFNq7CxBM4KjFUnXLA1cmAwMd6qnn611U9Ed2H0V0NGOT7/0pA2QR7UgGDj8aDgYHc1qdNkCrtTB9M0KBkewp3G2gjaDgc0rhe5Y08H7Yh7ZP8qvEMzZxghjVHTz/pkSjrzn8jV1mKPjOSCa5av8T5f5nDib85saKu+4kxyQK3XY7xjoRz7VgaJNsu3XHXvW9HgxyHdnYCPrxXymZprENvsjpwn8OyEZ0fdHu+dVzt9s1zOunMy49gfyrpUUGVgU5KfermtWiK3jZbIFbZTZV/kTiW+VNmOCFLAdQaMbwG9KR/lJJ9aWIE+wr6rpcye3MKnyuxFSMeBx9Ka+EUmngjYv6VD7mT11HqmeO9SMfLUBeSBRGOM5yaSRgAT1NYt3djHdmbMS0rginWhBuIwOzUkuPMz70+1TF6noa6pfA/Q9Btezt5FmVv35C896f3HpzTJOLokDjNTsvzADgYrnbskcMtEiJ8r9KYMlunFOlBB659qQAhgcdapbDWwDt9RTohhQAckU1Ae46MKfGvGc8jrSkxMjk4UDvzWe3zE+xrRlHAHesx2KxscZIJ/GtqWqOvC67F2wIYybTnHWvTPg5/yB9d/7Cp/9J4a8x0kZkmHT5ev516h8H+NJ14f9RU/+k8Nc+I+Jr0O/AK2KmvL/ACPRqKKK5T2wooooAKKKKACiiigAooooAK+fvF4/4uP4jP8A09Q/+k0NfQNfP3i//koviQ/9PMP/AKTw1tQ+M4sw/gMqyMTIoHfAqK93eXGpPHNSpuDN6kcUmoEFIkrWOk4o+Zp/GijECbqEf7Q/nXoNqfLgDYJwCK4GIgXMZ9HFeg2xLabkdf8A69ePn792F9rno0dZ/IwLsFrqTI+8BVMRfvFJ75FaNyS1yxPas8yEYx3Fb4dtwSXY8yfxMsxW6EN0xUinnaR8qnj8qghVsHH3WGfxqwZQQqfgaipe9txq1irPlSWY5X7wFUpAPvY+ard1L5m5B90VQZ8YP4Cu2hF2uTa70HuwUDH1NNSVs/KTg1Exwp3d6IZkwevFdPJ7pfJ7tyG6znr1Oa3Cg/sK3PHXNYtyMkelbUYL+HrfH8JIP6VhinZU/X9GbPWkY03CuB1xUMOSWBORtq1KBucf7OaqxZDSfSuyLvEqm7wZei+WEewqJtwX73Skjkygx+NIz4DfrUKLTMVFqQwcc9SeKOG+UjviljB2FjSN6jrV9TXqTxttbPvTnwW3dsVXD7mpVbeGHYHBqHHW5m4O9x2cOgP94V2tqx8uLDfwYriwvmSJ6AiutgBURFDzt5zXj5vFShFeprRlaRZtpMTMrenNc5qJUTSDuc1uNjzj6u4Wud1GNknlZsbieK58tgvbN90hV3eKj5l+T5bKIL0K5rKYMWLZrUIAskU/3cf1qnarunYH7vau+hLljJnM9xVGGYkY2k1HIuIgD1K5qzMytvA7VXkIfr6EVpBtu5HUgTldg7807YDuJpsWEIPpUrYYMy9SK3k7Mp7lCbg+wFVEbIDVcuM7senWqIGdo7Zrrp7HoUNYj+34Uxhnae4qQrx+FRgggE9qtG0e6HLyee1KTwTTVbBNC4Lhe4FAW1uXNOVft0Zx3P8AKrFyQshOP4qqaY2biM98n+RqzKSZST61zTX735HHXVqtmaujDfKzD2rctXI89G67sfWsXQfvO3at2KEtcq/G0j5q+bzOUfayUu35F4eL0t3JGmVJmjA5CA5H1rm9TPmTOc/8tD+ma3xGUmmLkcn5fpgVz+qx7Lj/AHuaWVxiqunYMU5Na9zFkUsrDvwc06NTuTHpUjqFiPsKIjiRPpX07l7pi53iR3WQQAeM0oYbEz1pLtSW/GojwE9gacVeKKjFSgkW45D60M4YZPeqysOvc0ofcxJ9qXJrcl0tbkDnLNnpmrFswN1GPeq3BB+tT2Sg3sZ98VpP4WdM0uRlucBblsdA2KnX5s1FfKY7th6vT487ufu44rjesEzgktERupD9eMUBTtBPrmnkfMcdqRmwgzTuybiouVPGKFAAY+9PDEo36UzqeO55qdREUpGDWa4w+Pc1fuDtSqEhJLY6110Vod2FWlyzZZRZCOu2vT/g8c6Rrx/6ip/9J4a80035vMz2TNemfB7/AJBOvf8AYVP/AKTw1z4h6s7sv/3qV+3+R6NRRRXKe4FFFFABUc88dtby3EzbYokLu2M4AGSeKkqrqX2b+yrz7Y5S18h/OYdk2ncePbNAGdpXizSNZjaW0e7EKw+eZrixngiMfHzB5EVTwc8Hpz0p+j+KtG16d4NOvDJKsYl2PC8RaMnAdd6jcuf4lyPevNNSijutOv8AQ/BGq3erafLolzHPD9pa5jhZVURKjnO1m+ddgPTsMVuXGoWni7xDpx8NyiT7LpN4k7oNogMqxrHEx7NuUnb1G2gDrNN8XaFq9/8AYrG/Es5DNHmJ1WUKcMY3YBZAO+0miDxdoVxq/wDZcV+GujI0K/unEbSLksiyEbGYYOVBJGDxxXFaPqFnqw8BaXpuft+lEPfQhCGs0S2eJ0kH8JLsqgHr1HFVNOuYZPDnhvwshJ1+y1iJ7m22nzIhHOXklb0VlBIbod49aAPW6+f/ABeM/ETxJ/19Q/8ApNDXuuo2c99aGG31C5sJCQfPtljLj2/eKy/pXgPiC3msvG3iCGa8nvZVuoszzhA75t4jyEVV4zjgDpW1D4zhzD+A/kTqmWz3Cg1BfoFKjvirsEeZVftgAj8Kq6ohyCeOhqKU/wB6kfMQ3TM2Jczxj/aH8672AmGwRR39a4OE/wCkxY6Fgc13cI3WytnqMAVxZ5qoJ7XPQpt8z9Ckqq91LI33cVj3BCNkdK1opCS+9dvJz+dZN4VLrg8CjCJqbTOKpZpD0ncEbQKdj5SQfmaoElCg8DPapACiFsnkcV1SjZmJDMy42DrjFQBfmyfuqKe8R3YyckU5024j/iPNdEWoqyKTsVHRpPoTTWiC8jrVuUfwIMn2qAwS9SpFbRnp2NYzfexFLwF+gzXRRKP+EcgB/iNc5cZX6kV0MT58P2pHauPHp8tO3836M1/5dt+RkTYCNj0/rVOP7zn2qzOQd3PVaqwn7/piu6mvdJpL3GLGGHI+6cGnMrEHHTFSRjJVQOMYqXYN3PGRihyswlU1I1Q7TnpUSR7cjtmrhGUBxwOtV2+V8f3ulKMmyITbuhgTL8dKfswm0dSad9wqB1qRMLuYnqcihyYSmxMKoVe+a6S3cGFWz/CBXOxxksrH+9W7sP2bAJAIBz+NeZj0pKKuKDs9CzGvmXOT9RWFrD75mKdmwa20lxdnHZsViamQtzIPWufAJqvr2NJvRW7lmST/AERD2MePxyRTLcpt2fxKBu/EVIsJa2iZuAVx+tMn2wxsq43cZreLTvBdzB+ZWdtv3emCKrlzlvxIp7tlMDqGqNsEA98dK7oRsJIrpJ97PY1KCV4HQ1CflOTwDSg/ICDkda6GrnRKKeqFuuRnuetUfuuatSMXbHbFVnAGTn3rSmrKx00FaPKxxY7sD1xTDnnPrxRjJLZpas2Wg4Lnn0pgyje3en7sJkelB/1nTgikJN9SexObxMeh/kasjks3vUFkAL2MDvn+Rq4qjDKepJxXNUdp/JfqcOIfvfIvaVJIpdYwDjk5rpkKsuzPDA5/Hiuc0vAkkUckitqN90GAfmGMe9fNZnHmqaGuGlZEksY88nJxsCj86wdZUfacjqowa2y+07SckYrD1vK3Jbtx/KnliarJN9P8gxFnHTuZB+57ZqKIsZgG6VMDkMPembcupH0r6ZPRmMXo0xszE7s9O1RsAqpj3p0vVgfakk+6uOtWtkaw0S/roM+npUkY5pFTbk++anhQDk9TSlKyFUmkinKgfIGetS2A2XcX1qOT5Z29CaktM/a4MDjfg1U/gfobSb9nbpY0tR5vWI7mmKQVA9RS3xzdvjnk0DHHoRXFD+HH0PPkNOA2R24pJE37Se3NKQC/B6Uegqydg3YTa3GeKkwrZXuOaZEhI3MO1SMQCT0zUS3sgZXuEDQoOx/xrPIxIR2zWrMAYVAPSsxziQ5+ldNF3R1YeTs0WdMXd5w9UxXpfwd/5BGu/wDYVP8A6Tw151o+DJL6BSa9G+D4xpOvD/qKn/0nhrmry9+S9D0cvf8AtUvT/I9GooorA9wKQkDGT16UtYHi6CyudJiivdFu9WzOPJhtBiRJNrYcPuXy8DI3bhjOO9AG/RXnthZePdHtrm+jniuLSNd8Wj3kxuZ2A6qLgBcMewbeM969CoAKKKKACiiigAr5/wDF/HxE8SH/AKeof/SaGvoCvn3xiC3xE8RgHH+lQ/8ApNDW1D4zix/8Fli0V2dMZIcAfSm6tFhVzweTirel5MQA644NR69xMu08ba4IVH9bUPU+bUbU+Y55BtuI8f3xXcxkrboAuQF3ZrilH75B/tCu5tcva5PQJiozuVowbOqj70vkYrzH7RKNp25GD65rJuVbzQC3Tj61uXCxpLjHAIrFvWzIMdTXRg2m9F0OOSfNYWMpnDEZqUsQpY/dFV0jVvmPWpPMEiFCCAK6ZR1MhsLFp979MZwan2ZBlI+YniobfEspXHHvV1ELTA9I0zxUVZcrHbUSO3VP3j4z15oaSJsrgZHamuJriTCNhRxzUxsFA3DG7vXO5RTvN6lWb2MW9UCQ/pWqrFNFt1A42mszVF2SY9ABWrCPM0uFOwTOa6a7/d05Pv8Aoa/8uzEmJDH6VApKqxx26fjVu8UB2qsvf/d/rXfB3jc1pv3CzAD+lT4BYMeARTLdd0CsOuAatLAGI6bcDA965qk0nqcs37zGPjZtUdetU5GAYZHI6VoSBUIVf4vSqMyL5xPoKKLQU7X1G55BPWp1j3tjGFFQRruIJ7VdwRGEX7xHNVUlbRDm7aCIcuAB8ua1mc+RGqjrxn8appGkUQBGSatjgoO23d+tebiJKTVugQ0uPhYG+lGOFI/E1j6n80znuM1sbNkzMOrkVg6g5M0g9zTwUb1brsjTW6XmbhIbS4yvO1R0+tZty5dvwFakaeVYIP4SgJqgyIy575qMNJKUn5sip0M5MhDnk9aHXjI9MVIy5J20jBggBPU16ietyL63KpG9famhSq46ipgBjbjrTQCpCnnHNa3N1LSxFtPOOMiq7ABtp54q8V546npVOQAyN6jitIO5vRldjCQGI96UYPFM25Ofzp2Mke1aHS0OwCMe1IwIAPvQp5+opMkgc9aRNnctad/x/RgnPUj8jV1VJJ7HccVR03/j/hB6/N/I1rpHuBbupNcWIlyz+S/U4sV8ZLpaMtw7cnjitS3J8kknBzVDS2CzOD1ANXwAAvoScivFxjvUafkKltcVX3S7mbqO9Z2tDM4z02j+VaRgAcsfu4GPzrL1t8LuBGelLB2deLj2Kmny2fcxC2N1PDBRxyetQk/MR1zxUigDaSMk19E0KUVYCu9hkdetKUyvToakUYJ9acoH5VDkZubIhER34PNTBQCCxAzwKAOP89KcqeYQW+7ngGolLuQ5N7mdcYEr+ueKlseLmEdy1MuUAuW9qdYjN/bj/aFbyf7p+h270vl+hoXKYlmPU5qNTyB2xVrVFCzyhBjLVTB9fSuSk+emmcTRJhSxxjNC4BOeTnimrjPHBo/n1FVYkfuKgA02Vt0fHBpCSFyeaiY/KeacY63GkJLLtjyOeP61Rc7iD71ZcggJ2xVc4Un2NdNNJHbQSS8zS0ghZJR/sYr0b4Rf8gzX8f8AQVP/AKTwV5tpfMsn+7XpHwfOdJ14/wDUVP8A6Tw1xV177fodmXf7zL0/yPRqKKKxPdCsHxVb6i9vp95ptu11JYXq3MlosgQzpsdSoJIGRvDAEgEqK3q5Px/HLLpFiq2WoX9sL5Dd2lhnzJotr5BII+UNtbrztA70ASaTJqmr+KRq8+lXWl2MNk9ssV26eZO7OjbiqMwAUIQCTk7zxXUVxfhC00ODVpX0zwrqmkzmAhp7uFkVl3L8oJY85wfwNdpQAUUUUAFFFFABXz94uz/wsjxGe32mH/0nhr6Br5+8XuB8R/Ea9zcw/wDpPDW1D4zix/8AAZv6REiQhj1Kg1la4FV0VegGK1LT54I1B6KM/lWRrWUl3N35rxsGm8a5Nng1H+5SSMeA5vEU9d6n9a7+FfLsVKdCTnP41wFqwa7i/wB9f516Hj9wqDoRz+VHEDs6aOrCrWT8jAu0BkBPUsCfyrFvEAKSdznNal2paXywT8p/lWXdoURVP8LV6GCVranmt++MjDMTjpUrkbNq9arI7g/KMipVTuCSWrtktbsiSsy3bR7gEXrVydSAsCdSDmo7aPyIw4+8eMGrZTyoyzffb1ry61X95p/TLjHQrPOsA2DOcZqOOa4O4tjaenFTrbhW8yTrVqKaFgQMce1RKpGK92Nxxi3u7HP6koZo3/vc1pWqZ0mEH7uw5qlrLDzAo6dq0dPcNpqqemyumvJ/V4S8y4r3bGJfrtkcD0FU4+WYdsVoaiMTSH2FUIsknIwcV6dF3pplU37jNGz4hJP8IBq75IyAvbmq9hHuh3/gatiNkCheT3z6V59afvuzOdrUheNYgT3PAqlPDiQY9ea0vKwS79QOBVCVWLsoHOc1dCeu4tUyJVLzKq9Awz9K0Y0WEPK3Ujiq2VjAUfePFWTGbiUZ4RfSitK++iC9xLeKWT55MY3cVaZ+Vx3yBTVlLsiQgHBGaRyFnYNwV/xrkk3KWqK2WhYhOZeegxtrntRz5kh9zW/sO3eOijNc5fFvOfjjdW+ASdRtGsL8yOk35sY19UFZkgKudvStNlAsEI+8ygj/AD+FZi53MW6lqxw1vea7mVXoRsgjwB90LmmShhlj90CraoHWT1xim3MR8or2rrjU95JkeZRiTMRz1J4p6qeA3XvT4VyMd6tRx7xgjB71dSrZsbbbKDR/NntWdN8sz4/vZrdeDacflWNcoFmbPrW+HqKTOjDS95pkAzyfXpS8BQfzpkTEn5hjnilfLqMdAa6+p3te9Ydjaqj0P6UyPf8AeOMdvpTmXf8AlikyeBj2oBbFrTT/AKfFnr838jWuGKg46DOaydPwL2Md/mA/75Nb0Uf7pGA9mrzcZJKev9bnBitZr0IbA4uGbux2mtSJ90uD0FVbNUa5D+1WZFVx8p6ivLxElKpbyIhdK5NmRgFYjjPSsTWIeQO2a1QWUgDkGs7Wm+dB3AGfyp4JONdJFyd0mY6qOfWphjaoHWolIIz6HNSqvB9ete5IibJQOg7kUoRevpTgwIHqaeEHBz061zuVjAjx+v8AKnJH5uM/d7U/AHPrT4l80jH3OxqJTsrgjNu0HnuO+KbYJ/xMLYHqHFTakpjmOOuRUNkx/tO3/wB4V0p3ot+X6HbTvyGtqX/H3IvYNmstX5PpWlc72vJjjg5P41nOMHI71hhlaCj5I51ZyY4MD064pQemetQ7hux3xmnKa6HEbiSEkKTUTHIIPTFP6LUMnQ5pxQ4R1EC5x6YqtIrfvPXtV6JMqPpVOb5ZGFaQetjpoy99ot6Sx8yXP9z/ABr0v4Pf8gjXv+wqf/SeGvM9LUB2weqjNem/B/jSde/7Cv8A7bwVyYj438jtwFvrc7dv8j0aiiiuc9sK5rxqJjpdpzeDT/taf2ibIuJfs+1s42fPjfs3bedu6ulrA8XXOn2ukxPqXiKfQYTOAtzDKkZdtrfJl1YYIyemfloA5r4eajdap/YzRm+a3s9EW3vnuEkVGucx7QN+NzKBJkjP3hzXolefeENQiuvF80Wm+KtQ8Qab9hZneZo2jgl3qANyIoLMN2BngK2c5GO11Ww/tTSbuw+0zWwuYmiM0BAdARjKk9DQBzS/EK0/tLV0ktHXTNPsXvVvhJn7QqMVbYmOm5WAOecccYJs6f4ruv7Qjs9d0kaU1xaPd27C5EwZE271f5RtdQynAyOuDxXL3/w51e61G+tRrV1Jp8uhNYxPLFbogbLbIysaKQq5VsgDpjJHFbP9ma14o1e1uNY0s6VBaWFxbtmdJTNLMFUlNhOEAU/ewTkcDFAFrSfGVzfXWlfbdGaysdYVjp9wbgOzHYZFEibRsLICwwW6YODTLXxxNcTWd0+kGPQ768+x2199oBdnLFVZotvyozDAO4nkZAzVLS9J1+7k8L2Gp6Ytpb6ARJLdCdHW5dIWhTywDuAO8sdwGMY5qtZeH9dXTdH8Ly6aEsdM1CO4bUvPQpLDFKZIwqA7w5wgOQAOTk8UAeikgDJOK+ffF+D8R/ERGD/pMPP/AG7w17xqOmWGr2htdSsre8tyQxiuIlkQkdDgjFfP/iLT7TTPHfiCzsLWG1tY7iLZDBGERcwRE4A4HJJ/GtqHxnHj/wCCzesZWaJQoyCME+mKzdaZpJjnoOnvV3S5dkGD3JxVTWWXdlRyBXBQjy4t6Hzd/cWpkWibbyHnPzqP1r0qFM24J9K82t/+PuD/AK6Ln869JhkxFsI4IyDXBxNzfu7eZ6mCac25HNXKf6S7erlaybxS4Brf1AqrybQCQS2BWLcfx+hPy134GbaUjyaq5ZlBCQcBc1dt4CoMhOeOBUMKqOGGSK17O3yMv90c8114msoRJ+J2RPZW+wh5fukZwe1WGi3ytK3CLnA7UkMnnFo9pAHQ+tPlkDFYE/4ERXgVJzlU13/JHZGMVD+tyhNHJPOQuQhxyKtJYhVyCfyomnS3iO0AsPQ1W/tCQRhirDPGPSt/31SK5NEZfu4v3tWZGqL+9YddtX9OYCwhX/Z5rM1GQvM57NzV2ycJbQdwVxXrVoN4eKf9aGadkVtUGJ347CsxOWOPStPU2DzSf7o/nWdGAJD7CuvDfwlcuD91mzpCZhbJ44FW/nQD5ck1Doo32sgxgk9atiXKb2Qg+hrycRJ+2kibFcRyOS7gqF7etUWcoxZxgscAVqszTAKAVHU1QuVQuGYYCnjNa0J3dpIykkVY12uHc9ccHtWkASojjGW74qgI2knDHhc9DW3bRpCjSvjJGaeLqKKT3Y4R5mPgt0gVScbj7VTnZGkk6Bs4qUySNJuJO0N/SqV0wCPIvUHn865KNOTneT1ZpOStZIuxSbYZFIz82PwNc7eMPMYe9bgk/cSPj7vP1rAuR8xz15rvwMLTkwi7yR05G7R7c9H2/wBayDkbm9+lbQA/sqNsZxHxWQEJjYnrndXLg5fH6v8AMVdbeiEUMCHGc45X61KCWIDDtk1JGmSD61DOSJdoBHbNdHNzSsY2sghh8zc33djfnVxWiaLKkZPHFVYpMZx93vSAEcI3GcjFRUi5PVlJ2JWG5QOm0/nXP3vE7ZrfiBztY/ie9Y+oxje/qCa6sG7TaNaDtNXM5F53dsUqNj5T3oDYAA+tRqcuGr1Nz0rOV7kh4k6+2KCKVuBuPNIDuYemTSJ8yxphzqERPT5v5GulgdFg6ghs1zdjgXkYA7Mf0NbCE+UFzwRkH0Nebjoc8l/Xc4sTL94n5E0HysSD2FTbyASeAFzVKJysrjsUqeXc8Z2tgFcVyTh72pzp6EqSkqpPTPX2qprBEkaOvfPNKshP7vpx1qO9AFtGm4Ejn8KulTUasWOMmZicL9OtWY04zmq6ck8cE4q2inGc9O1ejUZVVkqLnHFTeUMg7unWnQqCoOOTT2iIIbdwOo9a8+VT3rGSRHsCtnseKlSM7cqMKOmKkVBu+bBGMCl3biIk4HqKxlUb2KSMO9JkuGz6/wBKZpq/8TODP9+pr9Nly49Oaj07/kJ25z/HXqX/AHDt2/Q6ab9yxrXWReTALxk1mzIc5A/Cte4Ui9k75YmqUyFssBj2riw07JeiOZ6SZQ2nOSMcdaacg+3rVnHOCtMKj04ruUylMhB4yDmkCkn1qTZ2HHNPjTBPeqckiudIfDHxuPAA6VnXP+sJ75rUZWWLOce1Zc3EjZ5pUXeTZeG1m2WbAZMmP7tem/CD/kFa9/2Ff/beCvMdOzlh7Yr074QjGl6+P+or/wC28FY4j4mejl/+9y9P8j0Wiiiuc90K5nxtqGoWNlpsem6jDp095fpbG5nhEkaKVcnIJHXaAPUkDvXTVzvjGdxp1pp0dvZTNql2tmPt0XmwJlWcsyZG7hMAZGSRzQBz8Z8WN4r/ALEHjK0kJszdb49MQmPDqu1xv4zuyDnna3pXoVcD4DuraAaLa2Wl6ZZrqWijUbkWVuIiJQ0Y5wfune2M8/Kea76gAooooAKKKKACvn/xd/yUTxJ/19Q/+k8NfQFfPvi9sfEjxGB/z8w/+k8NbUPjOLMFegzRs0RY1LD7vzCszV28uXPqDWpBEWK+mBmsfWchwX/vcVyYXXEbnzkVeyZVtm33EfH8Y/nXo8Ck2SZPzDpXnVqMTxAd3H869Bgk3WiEf57V53EibVO3c9DAtKcvQzZ4kM8jsMnkfrWLdIB5fHB5FbrxhxMO7E/zrMvImZ0C/dVyD+lPBVLNJv8Aqxx4iPUq21ujkMw5rTjbdH5a8A8VWjtieR0PvVpmCxgJ97pWmInzve/6GdNWWpK7rFGFj+8PlqFmFuCf+Wj0JGsB8987iOfrTfKDuLmTPy5xiueMYr0/N9jVtv1HLbJje+OTmop/KYY9PekkM87qq42d6hmsXQbh1+tdEErrnlqZSenuoxdQOJ8DoBgVdtSPs8A9AKz70bpih6jBq1abjHEO4UZr2akf3USZfw0Numy0pP8AdH86px48xj3Iqa6cq8ufuhc1XgwXLDoy1tTjaBcI+42b+jEi1cD72c5q4WEuGIIHYGq+jECykb0NafkrI4/ujBH1r5/FVFGvJsuMXJESDeQigjPWqV5brLcBMfKBk/hWwSkIwv3m6VlXzGGUAfeessLUlKp7ugVoKMdSoPnuEiToDzWiYmmlZc/u1AGDVKNkthkfeJq9NMyKEj6kc5rpruTklH+vMyp2s7kd1Oi7Y0/SswncZh2q81qIUDfxH3qg27e2cYJOfzrfDKCXukVG29S5CwaAjuQM1gz5aZyTxmtlQAg9iKxZRiWTHc5rswitKTLpPU68IRYRKOhQGs9UBeTdyB0FbDrt0mPH8MQNZS43OV74P6V4mEqOSm13f5mteFnH0GwuoRWI+6ciqtw5Zg2cnJqTIlmZPciqjgLnHQDFenSgua/U5W9LEkDBQFI+V6mMbR/dIx1qvBgxiM/hUrNIg5xgVc172gh/nZYZB4IrLvmLXMg7bqv+YWJCdRyfxrIvXJujjucGt8NC0zaguadisvDv6A4FIUwM/iKPUmnHJC/rXonottMTOW2GhAojGBgYpPvPnuOtIuZAxHQ9KBtaFzTgDeJ7hv5GtZQxhK56H5aytNGb+L6MP0Nau1thx1B4rz8S/f8Au/U8/FfGNUfviO2CKdFIz7QDjOc/lUSHbLg9WqRQF3Y69RWMkcwjkh8jjHBqC7fcm/6D9KmHzMFbqcmi6RRZs3PGBVRajJXKjuiogGxTjg4FTR7mPXgUyD7m09+aehbII6dDWs3uEnqaEIXZkd6eUYkcjHVqgTA5H4fWn5cEdMHrXnyi76AmSNw5yRjGBRCCpytRNkyLu+7wBj1q15iwx+WvYd6iV0rLW5S1dzC1KUi6frzio9NBOqQg/wB7NOv2P2hj2z/Sm6WSNThY92r1rWw79P0OmHwXOgnys0hPZjUEibxuTgHrU+RLLMp6lywqEFhnGPevIp3S80c0tymwweR1poUN0HHQ1YZj/FTeGHHTvXapuxmQCMHgDmpEVI2JxyRT/lbIHWmEKrEjOSMU+ZvQBpJkJJPy9gayrhQJWHvWs8WVDN1ByKyLjHnk104ffQ6MN8RPY5DSY64zXp/wiOdM18/9RX/23gry+wJ3Sf7ten/CD/kFa9/2Ff8A23gqMR8R6eX/AO9S9P8AI9FooormPdCuQ8e6jb2+nRRS2balbxTxyahYxQiV/sxWTDkH7gDJuDccoRkV19cV4q8M6/ql9qz6TcaalvqmlLp0/wBrEm9MGb5l28dJu/pQBJ4LTQrSaa10XwzqWkh4w7y3Vm0YcLgBd7Ek4zwPTOK7GsPRU8TRz7NZOkG1WLCfYxJv35GM7uMYz+lblABRRRQAUUUUAFfPvi8D/hY/iQn/AJ+Yf/SeGvoKvn3xeC3xH8Rj1uYf/SeGtqHxnFj/AOAzWgDMyhRxgA1l6ygLAe5rdshtizjk9KzNdjVJjz1X8sV5WEq/7Vynz7jampGNbnE8Wezr/Ou28xlswkYyx5/WuIgYG5jB4+YV20cRltsoTkYxinnSjeDkbYfmu0uxWRygmJ6gtx+NU7mVjJEpAGTzVuaPEssWecfn61Smy03TOOlc+HUXLm/rYxqtpWHb5d5CqCueD+FSCMQ5lLHJ5waZHK0e5Cmdp4PrUqRnO9ydvXBq5u3kvzJirjFEkjjzBhMZz71IS00wjUfusckU0O9wTGqlV7MO4qWQiACJOZGBx61nJu9ra/l5lJaX6CTXEcIVRjOaqPc3DOQYxsPQ1ox2aBQ0pBPXkVOVt9mPl4rFYilTekebzNfZTlu7HFXvEm49auW4HkRfrRq6IszAY65FS2KboFJ6AV9BKonQjI5JL3UjNvOZHHqKqxrg8dhirt7GUnk9iKpDKsT1zXbSd4KxtT+Gx0eiMVsmLD5Rjn+daPkZI2s3y81n6N81q4P3c1oRwSRjG9myc185i3atPWzKhqthyxrApyxJxxmsq9lZZCWAz2rYFuVy0jEgDoa56+kkkmy6FeuB+NVgUp1G73IrpqKWwkHz3Ikc9T0rbiVYozJJ1IzzWHaxSNOjEFVyK2fKeeVskqgIA9DW+NtdJvQilfoQMJLpuRhR3H1rLn3CRxjo1dDJLFEDGm3dxwKw5drXDEngNTwdRtvSy6CqxUet2SRAC3dmPCkGsmbHmMc9a2oWDWzKygAt+YrEuMK7Lmu7Cu85BTWqO0E/+gbGx9wY+mBWWzHbjFWpSfsqsBx5SiqDyFEAI5rx8LSSu11ZpWm3a/YiXCuxz1ORVW4BBA7EmrRI8wKemOtVpmyAfSvVpfFc5uosDZQ+vapDMwA3gdefpUUS/KSOueKl3gj51x659KqaXMJ7jSVbO38ay7tc3BPYCtNwpxtOOazbgfvmOa3w+5vh9JlQglASOe/50oYncV5I6UjEkkAdsUsa4H1rtPSe2oA/N+HNKDhjx8opkgKKuOT0NPAyTnvSE0rXLem86lD+P8jWuysFYqM4JrK0kf6fFn3/AJGtsI+1iFJ5I+tebi5Wq/JfmzhxOskVI+TuA/hyKU/60n0qVImSaMAZyMEelMkU+YxA4JNZcyctDlsQxufM5H1pLh9sGD0ZqRQ3msNvNRXwPk46Hr9K3jFOaKgrySIlfg46jpVuEkkYHynrWch/dqR61pW5xtGOPWtKysi6sbFgRgkYJwasJAc55wetJFFyDnPcCrSRvkdcMOfavKq1baJihG5EIAkYUk8cA0MVRNg5YetSmM7QpJ44z601lVABwzVip3eruXy2OfvWBmZRyQeaZp4P9qQ+m4VNfL/pEm0cnBqGwU/2nAckfOOK9xP9w/T9DSnblaOhBCzSY65qCUZcsvJ9KlZds0hHJJyKRk3kEcH0rx4tJ3MXroQFZO6cYqMhhzj2P0q5hlx8uQajI3DJXHPT2rWNQhxKjFSCM47U0Hk45OKnKjncuB61GcAnaOo610KSJGvkqC3FYt0MzMR2raYYXLHt0rJnAMjYPWurDPVnRhXaZLYDcZPpXpvwfydJ17PX+1f/AG3grzPT+DJn+5Xpnwf50nXv+wqf/SeGpxG7PUy//epen+R6NRRRXMe4FFFFABTJVd4XWOTy3ZSFfGdp7HB60+orhJZLWVIJvJmZCEl27tjEcHHfB5xQB5re+MdQ8P32qKmqXWrR2WmXFzKmo2S2zLKhUJ5eEQyISTkgMBwd3Izsf2nrXhfV7W31jVDqsF3YXFw2YEiMMsIViE2AZQhj97JGByc1YuvBdzrkxfxLqsd9GtrPbRRWtp9nVRKoV2OXclsDjkAelT6f4Uuv7QjvNd1Yaq1vaPaW6i2EIVH272f5judgqjIwOuBzQBk6Xq2v2knhe/1PU1u7fXyI5bUQIi2rvC0yeWQNxA2FTuJznPHSq9l4g11tO0fxRLqIex1PUY7dtN8hAkUMspjjKuBvLglCckg8jArZ0nwbc2N1pQvdZa9sdHVhp9ubcIynYY1Mj7jvKoSowF65OTTLXwPNby2dq+rmTQ7G8+2W1j9nAdXDFlVpd3zIrHIG0HgZJxQB0uo6jBpdobm4S5eMELi2tpJ35/2Y1LfjivA9cvYtS8da9dwJOscl1FtWeB4XGLeIco4DDp3HvX0PXgfij/kpHiP0F1D/AOk0NaUtzgzJ2w7N+yCjG7GAqkflWJrhPnkseCOnpWlaM0qLg8Dg59KyNbYSzEH0ryMDTaxbbPDqSTpJeZmW677lD33D+dd5YsIoAD1/+vXB23/HzGR/fFdmC2zcp44rXPIe0UYPY0w8+SbkLOoMjyBctvYZ9s1QuCqThQv41pwOHdwf85rMlG+dB65Jrz8K3zOMuiCulbmXUVGQn5gN1SKWlymCq9OabFBHKQ569OtWCS0eyLhugJrSpNJ2X/DGcItrUaWjhj/dAFlGMLT4oVVBcTYLjOM9RT4bVIG81h8x6896ZKHnkA/5Z9wa5+dSdovTq/0N+VpXa16IrSzTSvhNwGaieKcEnzDj0xVue5itwQO3pWfLqgAHDc+1dlCNSSXs4aHPU5U/eepkapIWnz3A5q7p0w8iJSuQwPNZ1+2ZWb15q3ZZS1gAPAr2qsF7CMTJu0Ex+pIPNmI9AawmLCQHt0rZlcyfaAf7o/nWUw/eha3wl4w5X0/yLovVnUaBHvsznoTV5pmgZVILEk8iqOhsy6fIAfm/hqdr1HJcg56V89iISniZ3V1f+v68jVSUYLWz/wCCWtzyyckhR1B71nXMC3F8SMbEGMU59R3IVTIJHcVVWSUwlUP7xjya0oUKkHzbdP8AgmVSpGWm4s8gRwsa/dParVxfLDFtTlu+D3qLYkUZY/ePpWbG67yz+ufxrpjRhV1a2/Ey5nHbqXo1fcZZHzkd6zWYuzkHHzH+dTSXjSDap6EVEoAZj6muulBxu5ENl6PAiweyisO5YNKx6Vrq4aA+uDWHMM78/exg1rhI2lJs2oayOzMbGxh+b5Qq5/Ks2VTubPOORWyiA6ao7FAf/HRWW3IUDp0NeLhal3L1KrxtYqTH5Qw7cVDMMfQjFTbcYU9MGmFS8ZY9jxXqwdjlGRKVjDZzip/kdckYJ45qC3c4DD7verDIjAkdaKnxaj6kJVR0rKuSTK/5VqSYUDHUnFZlyMSt9a6cPubYf4iBACQ3TmmtlWfHrxTgOcHpu4of78i967Op3rcQ8496Ug7OvPTNAGDk00NkCgN9i9pGW1KD6n+RrqYdqQ8kN8xB9ua5TSW26gh9M/yNdFCU8k4BzIxavGzKDlUXy/U56ztMedolZvyqrI6HhcdCR7UyWZt5C9CSKqB9q+5qaVB7s5JSuTjBlUg9etR6hH+43dyajjkKknPf+lTXkoe3j9gM1uoyjUjYUdGZSZVBxnnFaFpKNiKR1qGCMsM9s5pzKIcgenFdNRqfumlSSk7GvG4U5B4Wni7YYXB+Y9fSseO6ZcLnpUy3m445rz54R31VybyiarOZDwcYH60+PYF3OQWrMS7YtgHkcmpN+QS5rCWHaVthqprco3h/0xyvSoLQ41SH0LinyuRKSaZbH/iZwD/azXrJWpNeRdPr6HROuJXwO5waawycg7Wx0qzCwlEi90OBUDoGfcOGI714MJ62fQco6XQzLAcnNMLZGcfhTiJAc5GO9Rncee/9K3ikZsQkOpDKcdKrt97aoxx1qyzYX5gcVFIGJwnBx3ram7EMhdcL8xB4rGmOJGFbMq4TLctisafiYYr0MLrc2w/xMnsOWk+lem/CDH9la9jp/av/ALbwV5pp4+aXH93Nek/B7/kEa7/2FP8A23gqa/xP5Hp5d/vUvT/I9HooornPdCiiigAooooAKKKKACiiigArwXxQP+LieJvT7TD/AOk8Ne9V4P4lx/wsXxKP+nmH/wBJoauGjPPzT/d38izbSMq4X+Ks/WCplOOmMVZtplhbax+Y/dBqlqSfvuSa5sPC2IufO83upFK2/wBenpuH866y2ciF1Ncpb4E8fPRh/Ouji4ldweoxjtTzOKlZGkZWlcvxMFCA9W/nUc0Q81cfxHIqCOQOAQeUfcfapUm3GAH72CQPyrxnTlGXMjdTUo2Y6O08xiwzg+9X9scEBPcCqymVJWCp8nY/hUwjCDzHY46kGuOvKUmuZ6djoopRTstRsMEkrHzBhCM8Uy+lEaiOP72OKkluvMQpCAW9vSqcgW2RnlY5Jzz2p0YSlNSn8kKpKMYcsPmzLkttzbpCcnrg01oYwveo7iWWZ/kX5faq8kEoGfmr6WnCTS5pWPKdr6FK5wZSPwq3CcW0B9qo3GTIrHgirqjbbwHsRiu+ovdii5r3ENLYWb6D+dUhgysfQVaY/JL+H86pj5ZGHY96umtGVSWjOg0+Yw2n0ANRvPHIxUE8Co7Z8QDHOMAj2qyIo2IIOK82SjGcpPqZNt6EDNuIVOtSqwgBH8RqZvLj4GCx6UwAR/NLxzxmoc1JWtoFrAkLuN8vbpiqEtsZZC5ztHI5q20ss8hVV/d9QwplxJkFIhkgYrSk5xl6/gD02M5W2M2PwqdeFGfrTjZmNQ7ZyRTEJLEkcV1uSkroUtSyVwmO2DWJMRvkPpkV0EZDw8c/LWBcJt8w+rZp4R6tM3w9ubU7aFs2W1epRf1AFULhfKQEdc4NX7YgRqx6eWv8hVW4ILD618/QdqjRdVXijOkbDHPYU1gVt/m69aW4RiQyj7xP5U+Xa+OeGGPyr1k9EchFbgjAI+U9aneAbcqTRbhAgjJ5NONsyJgMTzmonU9/ew0tCtJHhc9jw34VlXSkTsT0FbZQ9cduRWReD53z612Yad5GlB2mVI+v40khJkOOueaG447Z5NIwy4I6Hiu49BLW4gOUJ96aV2Pkd6dtyGA7mkCY46jjmqLui5pg/wCJii+x/ka3kwbbHcdKw9Nx/aUZ7nI/Q1r4Lqy9ME4xXl4xXqL0X6nBiX7yfkQB8SADpu5qHdgc96lWP5zyahUBUC9dvrVxt0OYdtBkwaS5h+6vYjFAcEhjwAalvD+9ULyQB/Ki7U0hq6Y6JljjVT1NROi+ZsOcsKk8tcD5uWx+FP2AqN3DYxWfMk7iKgtSCcdKQQsG6datKGVsY4HelG4tgrx2q/ayHzMgRXU9BTskk7utTFWOOOnWo/J+XcxIJ7UudPckqMTuB/hpbT/kIR/71OfIYjHyjvTbfi9gx3cVu9YP0NoPQ6GzbMkj+/FSMqSyE5OcVUico74HOTgetXE2v3ww7CvAqx5ZcyLg7qwwwntSCF+45qXyyoGCTRlh2561HtH0HyogMUmDkDqajeNj2GMVbDMDkrxTGBkbpgY7VpGo76kuCM2WNRnOcmsS4UK7Y9a6WVY4lwWyT0zXP3K4lcf7Rr1sFU5rhS92Q+wz+8x1xXpXwg/5BWvf9hX/ANt4K830zkzA9kr0r4SgDTvEAHT+1f8A23gqq799o9LLv96l6f5HodFFFYnvBRRRQAUUUUAFFFFABRRRQAV4N4m/5KJ4m/6+Yf8A0mhr3mvBfE2T8R/EgH/P1D/6TQ1dPc8/NP8Ad38ieONMhnxkAcmsrUi3mjJOeTWqkbyN1wvGazNRIMxYqeMgVjhX+93ufOLdFKAkyJxg7x/OulilAOMdTj9K5yIgTRf7wrbOQGI4wa0x0VJpFylZ3LVo6qshIHJqTKx3EbDBHOPpUVsm9CMc4HPrSxjM6I4+6K8qaXNJ/wBbFRbsi/FcEDay5INSjfLwchagheNlDMMN05qUStKvloCpPevNqRs7pWOyErrVjmeKFcoFLDjis26ikupFkclEHVT3rUS1Rfmkwe5+tZ93K0reXECOSM9q0wklz+5q+7JxCfL733FCWaGI7V2/hVV71WUjA/Op2sVDAvgmopLWFQcAfnXu0/Y+bPOd+plT4Z8575q2Mm1tz0+XpVKbiUqO/T2xV8/8eUHrgV6FTRRLn8CKp+7Nz2HFVjyAe4qzxtk9Tiq2OT9a1h1NKfU2NP2+RubGDgGr3kZwFbAHpVPTUDwEHpV1YpuMP0615GIlao9bGSXkBjWMbnIJHTNRYNwfmG0L2Peplt3x++YMM1FIskjfujtHQ1lFpvf5g0NMqp8sagkelPCRQoWbG4805YI4cscbqYltJMzNI2VJ4BqnKL62X5hZlS4uGkwAvFQZ2vtxxWhdRRRxYXAOR3rOcneWrsoOMo+6tCGnfUtp8oJBwB29qw7rL+YmcZHWtqIZikJPy7ayZwC5I6c1vhtJM1w7tK51EEw+zIM9EX8elSSRBgSDyRkVlozIET1UHNaUTE7VJ+ZVxmvGrUvZvmiOM+bRlMAuMkYKg8VHNGQq8YPWrpiIuRzkMOlLNCJJMjptIrRV0muxHI7FaCIOjHIDdAaeVkRQOWwOtWbW0LQZBAbPBqR4JUU5ye/Ssp4iPO1ctUpct7FB42IDc844rHvkImkGO9dOYSVRsEbsYHoaxtViVJpeO9dWCxCdSwnFwaZgHBznueKaTgY9CealKgNyOhpmF+YEd691M7otDgAoBpo6474zSEnA5701s4z0J4ppDjG5e0oZv4T/AL38jWsQzbiOME1kaQW/tCNSeRnP/fJrXw+8lSQuSCK87FfxfkvzZx4pe+V2ywyGx3prgZJHp0qXy/3bZ6gcVA7BSuPWiLu9DlQzA2ke39aluDs2Mf7q8/hSBRg0t1hgoYccfyqr3kik02OjO89eeoqckPhs4PpVa1VnDOONpqYDdgrxzWU0uYTVnYELbgME+9Sh+SCnToaaiuCAckjnNSbgcjYayk02CGFyVB24PU1GA0gDElc9qeWJXIBB6mo28xwGBwPSqihFd2IkK7eMdabaAfa4yf73FOZ/3hXB+tNtDm7jH+1mur7D9DWOxohyJX553HFXAwPfaapou6QnPINWsKTyMepNeXVS0FEk+YMD5mcDpTTI2RyeDn/61Djg7WAOKh+YAAtk+tZRimU3YmMzbScE+1J5jFsfdwKjUOActnmlCtvJLcelVyRQuZg+3GGIY9awb04kbHcmt2YKq5xkgdqwbpgzscc9q78FvcqHxkmlsC0/+7j+del/CT/kHa//ANhX/wBt4K8z0xCVlK9gGNelfCHP9l6/n/oK/wDtvBWlde+2epl/+9S9P8j0WiiisT3QooooAKiubeK7tZraZS0UyNG4DFSVIweRyOPSpahvLSG/sp7O4UtBPG0UiqxUlWGCAQQRweoOaAOF0Oxso/Et5qXhWxjttItLOW3laAbYr653KRtUcNs2sC/cuRk4NZOjWVrYaV4D16yJOr6rPEt9cbiXuxLA7yiT+9hhkZ+7t4xXd6N4S0nQHjOnC+jWOPy0ik1G4ljVfQI7lR09OKLDwjoWman/AGhaWAjuFLlMyuyRF/vGNCSqZ77QM0Aefw2sK+FrDxYAf+Ejl1xEe43HzH3XnktAf9gRkrt6DbnrRq9rDP4d8YeJ5QTrunajMtnc7jvg8oqIo09FYYyv8W85zmvQF8I6Emsf2qtiBd+aZx+9fyxKRgyCPOwP/tYz70XXhHQrzVv7TuLEPcl0kb964SR0xtZ4wdjMMDBIJGB6UAaGoyahFZltMtba4ucjEdzcNCmO/wAyo5/SvCdXe8k8c+IH1CCCC7+1Rb44JjKi/wCjxYwxVSeMfwj+tfQNeDeJf+SjeJR/08w/+k8NXDc8/NP92fyNSO1lkVBFjB5bPpWbrNuBMQOgro7R0hj3t1ZVH6VhauJFwGHzuCxH868PA15yxVui/Hv9x49elGFJNbmBCmJEz/fH8620BYup7msmE5njB/vD+dbBG1zjtzXtYt6pHI23uXYRs47AUpG67IHcYFMhzLICfujkVaFt5cqnndj+deFUkoSd3rY6oRclpsPS2ikbzD1Ix1q0GWOMBQc9BTRZB2yCcHnrV+KCOGPcSePWvGxOJhpdt+R6dChJ3sreZRitriVnExUoemKrXYisY24PH41otebztiwTnnjtUFxYByJ5cj2zxTo15KovbaJ9EKrSi4futX3OXla4nl3LjZjjIqBra5xlsVu3MkUTlB1+lUGvY3yo6jrxX1FHETlFOENDxZ01F2b1OfmHzc9ckVeIJtoCOhWq1+AJmI6NzV6FANPth/s16dWfuxf9bClrBMzZPlSQ/T+dQqQXA7GrM8eFmH0/nVVM+ao7CumDvE1p6xf9dDd01WeAgYwCfz7VeVpUA3EZ71V0uJmtmZRycGrxkIGWAzXg4iV6sktSIrS4xUlmcbiNveo5JUhk8tAdxGanVpW4RR70wxR2zMWJy3PNZRkr2l9yG1pdfeRJbyTEmUjbnIp01yf9XFnOMdKUTSTACIA1b8uOCPnr70TqcrXOrvogjG6937zIa0lbdJLjA6YqixBZhWvNdeYdi4weOlZph2zMP4e1ejh6kmvf0MZJJ6D48m0P91qxm4yvoK31H7javQLzWJcKVdz2wa6sLK8pGtF62Ogmgby43HQKtWIV3PuXqSD+lWbUB4Rnsi/yFRhhjcvXOK8N1nJOFtivZpWkEfyyeY/UOVGPSpXj8kED6fn0pcjyzjqSTUikvCS4G84rmnN3ubRitia2gkMEbjHHX86sF+mQeOvFOtCyWkfHGDn86uRyRNj346V4mIxEudtq6Tex6tGgnFWdtEUXjGd/Y1yusrm6dfU12c4CsNvSuR1s7bmRgPl5B/KvVyOo5Vb+RxZjDlSt3OdlHzfjUJUfMe5NTyHkVX2nJ9ya+3jsYU9g4PBpZMEfSkCg/X/Cg4J2nqao06lzSVzqsf0P8jW4X8oM2DgEg/nWPo6n+1ofTB/ka6dVUITjI3EH868bMKijVV1fRfmznqx5mY7BhOuehNQNGpwfbFakse+TIHAas9wAmexqqVTmWhxtWZEFKg56ZGKfdgbxn0pzAnBA4yBS3sW7A9ga1UvfVwQW7/ucr2PNSRxlnyv1qGL9zGM9zk1eSJgwkTkVjVlyt26jSuxY3IOxuvelMqEkYPFPLjPPWozLETtzz06VypXd7F7dSAyqU3KDzzUTb3Hy9KsblJwo+tMYSFcqorpi7dDNlJ2AcqRUdqv+nxj/AGs0shxOVNLbrjUISP71dj0g/QuGhqxQkySlfvAnbn1qdwr5Rwc45xT7eJt0jY5ySvvUjFQcPwcV4k6t5GkYWRVMag8dcYpjR9Pbn8amKox6mjyVC4yatTtuS4jRG2PlIzTjDk/N0xUiQADgnrmpBCoJYk1nKsl1LUCpIEQYAPArnL7Imcr0zxXUSlACBXO3ww7egPNell8tWEXy1ETaCpaS4Hbb/jXonwiIOma+R/0Ff/beCvP/AA5+8llx/cH9a9A+EYYaZr4YYb+1Of8AwHgq6r/fzXoepgF/tMn5f5HolFFFSe2FFFFABRRRQAUUUUAFFFFABXhHiRc/EbxKfS6h/wDSaGvd68G8Sk/8LH8Sr0zcw8/9u8NVA8/NP92fyNywkEkREnG01maxcBjvABO3Aq1HC5ZSjlR3HrWdrJUSqFxjBrx8JSh9a5l1/A8SpOXslFmQjYuYv98fzrZzuJQdTkfrmsaLH2iL/fH862FG2TPck17GLtdHO+hZt32uvOFVsmtmI+bLGT2JJ/SsSLCwFjycZrXs3/fxLnOeCPTNfPY+N05Lpc7cJLVRfWxfEUqOVVSV6g1MtqyoNztjqc05JmQbShJHemSyyyKVCsue9fMOdWTtovM99QpRV9X5Eby29sC7MoA4yabdTG6hCwjI9qhNpuAEz71A5B7mp0kghwo25xXS4Qi1OF5SX3HOpzknGVoxf3mRLp+ZS7McntSNYwgEjGfXFOne5mlysbgDn61E9vdFtwZwPTFe7CVSy5ppHkSULvljcwNUhAlKZ5FW4kzbWw7betV9SRzNnBz3rQso/MtoB325x617dWpy0INv+rHGlfRGLPndOvYY5/GqSjD/AEra1C1Mc0y46qG6e9Y23a55r0MNUU4XX9aFw0vFnR6PIVtMAZPUfStAyRbsnHPSq2jRqtkGKgk8fhWgsdvIAw2Edq+bxdSKrydnub04txWpEJVKnywCfaoxbl2zJkYq2VhiUlVXPtWfNJNOMKGjINZUbyb5NF3YVLR+LUn82CJwo25zioDZSXBLM7KCc4pIoBvXewY571PLcSAYijZhjqK11hK1N692RpJXnsJLBFCmSRn6ViuNpcE9Cf51rLZXEh3yOxU87SKz7qNRKV4zu5rrwkkm4812Y1k97WQ6P/j045YDkVhTnDMOvFbjEJEzA9sfWsJwQ5B5I716eDWsmFPc6XmJS24jdGvH4ColLdSTjO7/AD+dSzHckXuo/lUZUm3bB5/+vXmw2u+pMtycT7mKnjAqWOVi5B46ge9Uy4CmTHO0EipGlw6sBxWcqSeiRam0dFZ3CeSASMGpmCHJVsemKoWZj8kA4z2p8sb5JWXAxwK+aqUI+2dnY9uFZ+yV1cLiYphSeB3rlNSlLzzZ6bzW9OxCBXbJHBJ/irmr9j58v+9X0eUUFGR5WKqObSM1/v8AFR4559c09+GBz0phy2ccc19Qi47EYYhhgZ65oLHAbHzYzinKuDzQMEbgM9qo2ui/oxP9pwE8df5Gu0gEf2bYSMkk/rXE6Vn+048dg38jXXWyEwli/JHHtXzmdQvNO9tvzZmnae3QgvpUic9AvrWWSr4XjgVaveVAJ3EcmqT4TLDrgjFXhYJU13OCrK8hd4XIPduPyp96OEwfvKKrMSy5I56/SppG3xIc5O0DH4V1ctpJkdB8BQyZfGCSoBq7h4pMKu5MZzVK1RTEd3XOQT61cSVwNpUt2zXNX+LQuA4yISOBnvUbeWT2zUhC5HydacI0P8I9qwUlEuzZCCobAAzxmkcvj5I8irO1AchQSeDSEleFiOKFU12DlMG4H+ktkVJbJi7hIPfNTzxhrhvlqOI4u41HrxXp896dl2Mr62Okt4v3OQeaik/2lAOKmsgxhRN3zLwT61JJCwYgoWz3xXzHtOWq1Jnpcl4JozyF3dccUBVxndx0FXfJGeY6BAuPuCtvrMbGXsWU1QHgOetTLGAxO4nI6VYW3Gfu496lWJAMkA1lUxK6GkKDM+SMnomR61zGobhI6hc5JrtJOFwI+vFcrqsDJeMMccmvUymvzTaZjVhySTJfCqBbm4HpH/jXe/Cj/jx8Q5/6Cv8A7bwVwfh9vLuLkgYyv+Nd58Jzmw8QE/8AQV/9t4K6pp/Wqj8kejl8uat8n+h6DRRRWh7QUUUUAFFFNkkWKNpHOEQFmPoBQA6isnSfE2ka5DdzWF0XjtG2TmWJ4vLO0NzvA42kHPTBpmj+K9F164a3068Msoj80K8Lx74843pvUb1z/EuRyOaANmisTT/F2harqP2CyvxJO2/y8xOqS7ThvLcgLJjvtJxRF4u0KbWP7KS/BuvNaAfunEbSqMmMSY2Fxg5UHPB4oA268F8Tnb8RfEh9LqH/ANJ4a96rwTxRg/EbxID3uof/AEnhrSnuefmf+7v5GrvdW2qeoH8qx9ZVkdBnnnNbAnWF/mz90fyrF1WbzBvPXJrzcCpe2TtoeDNrbqZ8HNxED1Dj+db8QEkjDIJUH+dYFuR9qj/3hWvboY7h3H8ZINehjY3+4iTs0WI2DoFHc8/StTTsfa/RicCs2C3IIZO/8q1LWIG8jfsuXrwsdKPJJJ9GdGFT54vzRvo8bDLDmobm6VEOFJ+lPS3ikbzOcn3pJxHEpPPFfGwVP2i0b8j6ibqezey8zOK3EoIDYzzyKkSyQYaXBYd6V71APlBz9KoXUt9LInklPLJO7NezThWqe6rQX3HlTlShq7yZr74Ui4I/OqjXcXOOfxqvDZHgt1+tSrpcMUZC55OTzWapYam3zSbZTqV6i92KRh6p5YfcB98Zp+nHEcBPZeKrauqxgls/uycVLaS+WsAPQrivp3C+FSWv/DHi3tVbY/VGR5Zz38sCuZYbpcCtTUJCJJtp5ashGAkBP0r1MBS9nSt/WwruUpSO30GFTpyOwHKgVox6bBGgVFAUdOareH4hLpSA9Cq/yrU8iQEgYxXwuPxMliakVO2p72Fw6dGMnG5Tks4UG7AyOnNZVyzeftRTyPSuhNnv+/0+tUrgRW8pAzVYLGLmtrJmeLwr5b25UZEdnOZQxceta6RwwLjgfjVF9QXzcKDx14qyLIzgmTv6GuvFSnJJ1nyo5aEYptUldkbXqFwm0jJxWFfbBdye5ro5rCLKsc5XHeuV1ZGF22Pu5ruyl0pz/d6aGGNjUStUHsm6N16gLu/GsWb/AFwratfMaGQnGM4/CsacfvD7E19DhXaUonHT3NudiqwEHgqMj1qIXJyAAQDmrXk7o4Af4V/mKheIbiP4hxXFCUGrMiSe4xJA+R/s/nU5OCAPuiqoQRSD/Z4NSQFmOPzq5xW62Emb9pGr26HgMOlOlSYHIkGMVFEri2DJjcBSGW42Ddtz3r55xk6jaa36nqKSUEmiOT5gA/LL3rm9ROLiYepzXRSSblBP3hwa52/x50nuTXt5Ympu5x1WroyyeSCOCaQHqPepHwGB/CoxgA/U19AjoTug3buMc4zSooHA6DikUYGe5FKMnGKBvsi7pRB1aIeuf5GupijZoiVYDnArldLIGqw++R/46a6VGlx+7xjJ614eaRbqK3ZfqZSaTRRug37wZ+bkA1V5LqPYmrNyXWb5sZLfpVdv9YzdsZrWj8JxS3GkHfz0IxTTmMkH+Hn86cknzhW6Y/rT75cgY6EDP5VqnaSiwSLdpEssOeMg5FWo42U4IJ+lVNOVpYE2da2rZ9mEkHzH0FeTjKkqcmlr5HVRpqVr6EKxrtBKHnipPs8eR8vStOOONh0PNONvFjvx714Usws7ao9OODur6GUIo1OdvWl4ztCGr7Qxq3fmlDxpxg1X1u6uk2L6vbRtI5u7CRySgjkd/wAKyYW/0+L/AHq1dVZXluB3JGPyFZUYAu4QPWvqsHrRu92v0PInZTaXmdPZklsqcfNmtcOCACpJ9aw7FX2HZjzAO9bIuBHGzSZ+UZOBXyuYwftPd1PXwU/d10D5Wwdh5OKcqJ/d9qlW4jdVxnkZHFNMkfHX5jivN5p7WaO7ljvdMFWMggrxUcgiUcCldo3Ug5x0qtI6LnrwK0pU5SfUzqTSXQSSdVxhTzXL6xch7l+DxXQTTDZx3GelcnfM08jkdmINfT5PQSm5NHkYqpzNJvQtaE4Z5z/sV3vwo5sfEOP+gr/7bwV55ozeUs+f7mK9B+En/IN1/wD7Cv8A7bwV6s42rzfodeWte3aXZ/mj0Oiiig90KKKKACkZlRSzEBVGST2FLRQB5KdZ03xEPiPpui6paXV9qMf+hxQTKzTgWUanbg88gr9a0zqNp4q1/RB4dlDmy026FwyAj7N5iIqRv/dbcM7eo2E16PRQB5Zo1/aanZ+AdGsARqelSRtfQBSHs1jtpI5BJ/dyzBRn72c1Xs7mF/DOi+FFJ/4SG21qN5rbafMiCXRleY/7BQEhuh3Ad69booAqajHqEtoV0y5tra5yMSXMDTJjuNodD+teEawl5F448QDUZ4J7pbqLfJBCYkb/AEeLGFLMRxj+I/0r6CrwLxXz8RvEYz/y9Q/+k0NaU1dnn5mr4dmssanMjHqo/lWDqPEjD3z+FdLa25uLeNiSMKv8qyNahCuWUdwteXga8fbuDep4VSm1FTtoY8S/v4z/ALQrYt1bzH3evFZKczxr0+YfzrYiDNPjGBk/yr0sW9PkY7tF6ykC5U/SrNjMRqKqPubOtUovlUsOeQD7etW7BP8AiY4PTZjNeBiox5ajfY66Epc0Eu5uxwsB8pOPrT47JQ5YsxzzzVeN50JXYSB0PrUw81jjkZ718pU9om/eWp9FTdNpe6x8qQpn/Csu5uACqRgFznArVisSWy0hIx3qz9jt0wWCkjuRRSxNKi7u8iqmFq1lpaJz4N3tz5Yz9aY8l+EP7pfzrbmlt0bblc1TnvLcLjcuT0Fd1HFym1al+DOOrhYwTvUOQ1IySN84w3cVLbJuELdwvAqbUTvkYogP0otIGlhh6qdoJx2r6z2q+rxvp/wx8/yt1GlqY91uDy+uc/jVAYP1rcvLNi0pAOOOcdaxhGUmOfevVwtSMoaEx926Z6B4eQvpMW3+6DWj/pIYAIMd6zvD7tHpMW0ZO0fyrXF0O6gH0r81zBy+t1LK+rPq8GoewhdtaEQW4YfMgFZstsv2p2Zjk9q1muWIIVMmsS+jnaQjDLkdR2q8v5nNq6jcyx3IoJq8icCFMZA/KmvcyFT5Sg1TtrZxIu6Vm571uN5Ea/wiunEuFGSXxs56ClVi38KRis185HmRgJ3INc5fndM5Y87sV2Ut3AylVZSTxiuR1IZd/lxl69zJ6spTfNDlPNx0FG1pXG+dtiKjoayJQGYtntWlDko4K9ORWewweR26V9FQSjJ2OGm9TohgMpB58ocfhUcrYCA/eY0hcgxAjGYxTCWkkZWXG0gKfwrzIw1uxtjp4iyy4HuKIFymMfNyPxp0hZVCckk4NSWwHmnIwM5/E0OTVMEk5GkiSLAu0Z4phMwHKCrcay7VKxllIzmnMHwcx14Xt7Sd0men7K66mXMNy5HUfernb0/vJD3GRXWyxqc84PUiuZ1KICaQjoePxr3MrqqUrHFWjyyTZjuP8aUDK4NOYHzVXHHrTHyrsAM819Dc1Tukho+VeaciYUGlkGdwHpSg8Ae1FxuWmhZ0sf8AEzh/H+RrpIxMinYoK5PWue0oZ1KLjuf5GuthSRY1/dkrySfxrws1qctRen+ZPLzSMe8jfGXGHYZxVRmG4euMGta/fLjKgDHWsViW5YbTjJHpWmEk5wTZx1FaTSBFLNubjirV8oaCMeqj+VV1+8B7Gp7tW2w9cBVH6VtL+JES2HaYZEKOoyD1rpreSOQgnGT7VzOns6AYXKnv6V1dgIjCpOMt0rws6aXvNfcehgE5SsmWVRCOp5o8hcD5jwak8tT0brSrCMD5ycV8q6vXmPeVPpYiMSBiST81RyeWiHmrghXnLVFPHGEOSKdOsnJJtiqUWotpI43Ugvntyev9KzrYAXMZz/FW3fxI8sgyBzWX5Hl3MQBr9AwtROjy+X6HytRWmzobOPnK5yDWqsUu7BQbMdar6dAHXOcGtAOyErtyB3r43H4huq4x1aPfwdBKmpS6kWCOoFB6ZwKl3k4+Sk3HA+TvXBzvsdnKu5Cw4OelQsjHoM1dzkHKDFJn0QVpCs49CJ0VLqY91HLsOEFcveFoy5ZQCTzXbXCyPGcR1ymqWzscFSMnNfUZNiFJ2lY8THUuWSfQp6Qd73WeyZ/nXoHwjGNN18f9RX/23grhNGh2SXOf4kx/Ou++FH/Hj4g/7Cn/ALbQV7U5J1pJeX5HTllnXbXb/I9BooooPdCiiigAooooAKKKKACiiigArwXxIm/4meIl9bmL/wBJoa96rwnX8D4neIT/ANPUX/pNDVRdrtdmcGZf7uzprS0f7LD5Zx8uGwKz9V0uQtjPueK3tInEdoN/OQMVX1W6XOQp9K+Gw+LxEcY4x7sithqDwim3rocK9m0NzGT/AHx2960kcidxtPBzUd3Nvu41wcZB/WnNOqXDj04r7Ccp1IrmWtj57Z7lm4mW3tEdY9284IHbPNWdPLSXbEHoN1Z2WdOvAOfwq5pMjRXMhwSq4AA/GvPxFPloStub0Z3qxvsdBHd/Lkoc0hvmzxE1MLxk5Ip4mjUZwa+WdOF78h76qTtbnJlupTgBWFRyxXc5XbOyDuMdad9rRRnaagbWVRwvlyflWdOjWvelT/X8zWdWla1WbLK6czEGRwx9SKc2kROMlVJHPSoP7SdugcfhSDV3R9pVzn0FHs8c3oxqeCWjRk6hZ/ZpSBzk44FO0qHesKk87ealvrrz5cbHzjPIqbTNkcpcgjYMCvZnWqrB+/8AFY8mFOm8V7vw3KWroIl2ovX0rlLkBJG9a6nULlXlHXqc1zN0heVmPUZH4V7uT80aaUzgxbi6ra2Oy8PzqmmRg88CtJpIy5OBzWRoKINMjLDqorUCQ+3518lmEILFVHrue9hZz9hFaEyTID0FVNQl3thEOSO1ThYh/wDrpkskasCASenFclFRjVUops2quUqfK2kZsNvcNcRNvKqG5GOtaS2MhXEkm76io2uNuAFbJ9qX+0mcZVXH4V11p4mrZwSX3HPRjh6d1NtjjpkSjIC5HPSuZ1S1Ks5J6NxXRLdTPnrj6Vg6i0kl3KnOBgjj2r0coeIjWaqSucWY+xdNOmrGZjy484zkgGsyQ7mJx2rTeRo1k3A8e1ZjHOOD0r6/D31bPHjudFeR7Le3lAycKCB6UwhWfIxkGn3c37iCNeG2CqSytuPP1ry6MJSp6+ZpUaUtCefLBnU9DxViCP5gc8ZFUUfAA96s75BHtjbB4AqqkHy8qFCSvdnV2hEcIVhmpGMPcLSWxR7aMSfe24PNSNbwOCCAc+9fBVJRVVuV1r0Pr6cZOmlGxDJaROpkVVz0Ncpq2nOsjgHq5bp29K64kQbsfd9BWFqt0r3JQZyMEn8a9jJq9eNb3dUeZmVOl7O70ZyE0XluMjvVd1O9j74rVuE8ydvQMMVRnTDke5r72jU5krnhwnZldVwQOp700nD47VKi4Y56mo9vLCt0zZO7L2lYOpwAepP6Gu+hCC2UEA8c1wOkKBqtu3fn+Rr0K0iQ2y78bjz1r5DiWSjODf8AW56GXx5pNLt1MXU0jG35BzmufdMsTjqK63VoogijHf1rl3wjsMdBXRlFbnopo4MfT5KrRXH/AB8KBwMYq7d4Mcajrx/KqB3bU5+bPX8av+WzSBj6Z/CvUq2UoyfQ5FtYs2sJjgUbCQR6Vt2qqsCDIB7U3T0RrKLcOqg1ZaBMgj+EYHNfKY3FqpNwlpqe1hsO4RU1roKu7P8ArKcBJx+970zyyO4pNsg/iFee0ns19x1ptbplgB8nMnGOKeIwwwzA1VAkz94YqVA3dhWE6bWqZtCaejRkX1spuHAwOcmsiSBkvI2zkbttdJNbiSVnOMms29tXjCuCMBs19HgcWrKDetrHi4rDtNyS0ubdtFtgBU4Ydac0rDOQTTI95iVkIGV5oZnHevnnHmm29T2VLlgkhwnOR8hoNzwv7s8nH0qIuw60m8mqVGL6E+1l3JvtGQfkPBxR5+GI2Hpmod5o3HtT9hHsL20u5P5xdfuHmue1RiJSu08Vuo7DvWXqMZkdnruyzlp19tDlx150t9TH0pS88nb5cmu2+FP/AB5eIP8AsK/+20FchZqYribnrGcV2Pwtx9l8Q4/6Cg/9JoK+ohLmrS7WRllK9/5P80d9RRRXQe8FFFFABRRRQAUUUUAFFFc3q/jSx0i9urdrLULpbKNZb6a1iDpaowyC+WBPA3YUMQOcUAdJXg3iZSPiR4jcdftUI/8AJeGvd45EmiSSNg6OAyspyCD0IrwjxPIq/ETxID/z9Qn/AMloauF76Hn5nf6u7eR2VgEeziBzwKdeRRbC3rXNDUDCiqG7DFRXeps9r5ZbHcHNfILJ68q/OpaNnJ/aVNUeRx1sW72GMScdeDWQyKbl35z0NRPetM2SemB196c0m3cR1IzX0lDDzox5ZPU8erNSldKxat5Mxy56HGK19FaNjMPVq5yObZCMeuTT478REbWw3FZYvBSrQlCOly6Fb2c1K17HYyWsbev505LeMev51zI1olRz296Bq7t0PNeL/ZOLtZyPR+vUE78p1ojjA70n7lTXIPq7lOG5+tVW1FzJnefzNKGQV5fFMt5rBfDA7wSRCnrLET/9auDGpOEJ3nA9zUy6k21TuPI9aifDk/5io5zb7J2zQxP81USiec+P84rCh1kpGFLckE9aYNTKt5jH1/KopZPiINpv0CrmVGdmo2HXKAuzt97ODWZMoNwy9sc066vlM5AbhjmqEs4DFyeCMGvqcLQnFK/Y8ab5nojtdHhSWwTOeOBV9bGIHqeuetctYam0Fssa4IGKuLrjZA45r5vF5bi3WnKD0bPWw+Mw8acYzWqN77FH6nrnrUttaQpJI3OXIJ59sVzn9tlgOnNC6ycgZ6nHWuSWVY2UWnI6I5hhYyTUTsBHCq5qqrRDPtXMPrDbGO77vuazX1Ny4Jcj8TRQ4eryvzSLq51T05YHdCaEGse+eJbwt7Vg/wBotn5mI49aqXd8fNXLHpxzXoYPIp0ql+bocWJzT20OVRNO/ki3HFZcuwD61TeYu6/MTk1JJMuSM9DivoqOFdJKN7nlVJOTvYvTMZRFMPu4wKaOUJH3s1YmkjNgApHy9az2nCsuDwRWdJOcbJbCluTxkkEN0PAqe1YGXcfunNVIrhTEAcZDVDDeKhQZ5Lce9XKjKakrBG6Z3xtY5RFLk5Ucc+2KkFuFHBP51zcesuiBWwDjIpza+VIVsAkZHWvkZZTjG7J3R70cww29tTcmPlYB6HgVz2oKPM3+u4/pUia3vdvNChV5zWZfXyGbYG7mvTy7AVqNS0kcOLxMKsfdGhDuLHpuGKqyR5nkPvxTxdq+1QeQeaSSRd5APQ5NfQQjOL1PNZVMZOfXNMaIvIR/DghqsxyIDyevIpokCkZxya6FKXYpSaY/S0EeoREdFB/ka7yGFZUjkyc7Mda4O3lWO6BzW5BrPlwgAghTivCznCVsQ4yp7ndg8TCm37TY3L6yQw55yeOtYN1Yp5h64x61JPrTSArxgc1lTai8oycZNYZdg8XTVpMMXXo1JXghXtlRlxnH1rRYKscS+qVjG5yME1NPdIxh2vyiYIr1atGpNxT8zijK1zubSzjNjCvPyqO9LLagAYzwPWsCz1wmBRkbgMEe4pk/iRlTeoUjHvXyTyrHOs7dz31j8L7NJrU2/KbJFL5LVzaeIpigYomTQviVycbUz+NdLyjG9kYLG4fzOj8lqURMK5x/Eco6ImMe9RL4lmcZVEI/GmsnxrWyD69Q3SZ1AiZuT1qK5t2ZCO1YieIpN2NqYqc655oKgLmoWW42nNOwPGYaUWtTZVJFtU8vG7Z39aJBNzjHTj61jf240BCSBQCcCn/24pBJ28HFZf2dik78qZf1yg1a7RpYn3dF27f1pAJcDOM96zTraKCSRgUNrSKQCRknFWsDif5CPrVH+ZmkPOwc7c5/Sg+dhtoHT5frWb/bSHHI5oOtIDjK5p/UcR/IH1qj/MzTHn8fd+7+tQTwSSwnfjd7VQbX413ZI+UgH8aR9ZyxX5eBzV08DioyTUbEzxFFxs2yNYStyd3oa6/4aRiKPxGg6DVB/wCksFcC+qh52IxgKc13Hwqm+0WXiGX11T+VtAK96jRqwmpT2sVlco+1kl2/yPQKKKK6z3AooooAKKKKACiiigArz3VDfaRf+L7ddIv71taVZLF7aAyIzmBYSjsOI8FM5bAw1ehUUAY1toEbeFtP0e8muP8ARreKJntrmSBiyKB99CrY49axZfhZ4TnuZbma2v5J5SDJI+qXRZiAAMnzMngAc+grs6KBNJ6M40/C3woettfnH/UVuv8A45SH4V+Ez1tb8/XVLr/45XZ0UE+zh2Rxf/CqfCI6Wd7/AODO5/8AjlL/AMKr8Jn/AJdb/wD8Gl1/8crs6KLh7OHZHGf8Kq8JYx9kvsf9hS6/+OUn/CqfCH/Pne/+DO5/+OV2lFO7D2cOyOL/AOFU+Ef+fO+/8Glz/wDHKUfCvwkOlpff+DS6/wDjldnRSuHs4dkcX/wqrwj/AM+d9/4NLn/45R/wqnwj/wA+d7/4M7n/AOOV1eoX0GmabdX90223tYXmlbGcKoJJ/IVgaT4qvbrU7Gz1XRjp39owNPZMLkS7goBKOAo2PtYHA3Dg88U7sPZw7I5/QPhboUmnSnVLK/E/2y5VQ2pXC/uhO4i4En/PMJz3781q/wDCqvCQx/ol9x/1FLr/AOOVP4l8ZSaFr1npUUGmbrm2efztR1L7Ig2sq7QfLfcx3Zxx0NdPbPLJawvOkaTMil0jfeqtjkBsDcM98DPoKVw9nDsjkf8AhVfhL/n1vvT/AJCl1/8AHKD8LPCZGDa3+P8AsKXX/wAcrs6KA9nDsjiz8KfCJOTZ3pP/AGE7n/45SH4UeECMGyvSP+wnc/8Axyu1op3Yezh2Rxg+FfhNelrfj6apdf8Axyl/4VZ4T/59b/8A8Gl1/wDHK7KikHsodkcb/wAKs8J/8+t//wCDS6/+OUf8Kt8Kf8+1/wD+DS6/+OV2VQXktxDZyyWtuLmdVykJkCbz6bj0oD2UOyOB8R/DDRI/DGqvpNpqJ1FbSVrULqdyxMuw7ODJg8461oD4VeEioLWl9nHP/E0uf/jlbfhjXJtf0ua5ubJbOeG7ntZIVm80BopChIbauQSvpVDV/Fd5aajf2ul6MdRXTIFnvXNyIioYFgkYKne+0ZwSo5HPNAezh2RV/wCFV+Ez/wAut/8A+DS6/wDjlIfhT4RY5azvSffU7n/45XWWN7BqWn219bNvt7mJZom9VYAg/kasUXD2cOyOL/4VR4Q4/wBCveOn/Ezuf/jlH/CqPCB/5c731/5Cdz/8crtKKd2Hs4dkcb/wq3wpjH2a/wAen9q3X/xym/8ACqfCJ/5c77/waXP/AMcrtKKQezh2Rxf/AAqrwiP+XO+/8Glz/wDHKT/hU/g/IP2K946f8TO54/8AIldrRTuw9nDsjjT8LPCZ62t//wCDS6/+OVleIPhfoiafC2mWeoNc/bLZWxqVwx8kzoJesn/PPf8ATtzXd6nPfW1kZNOsUvbnICxPOIhjuS2DjH0NVvDWs/8ACReGdO1j7P8AZ/tsCzeVv37MjOM4GfyFIPZw7Iwf+FV+Euf9Fvuev/E0uv8A45SH4U+EWOTZ3xPqdUuf/jlPh8a3N34tvNDt7XSV+yXa27faNV8u4kXYjl0h8o7gA/HzclTyK7Gi4ezh2RxY+FHhAHIsr0H21O5/+OUf8Kq8I5z9jvv/AAaXP/xyu0op3Yezh2Rxf/CqPCH/AD53v/gzuf8A45R/wqnwj/z53v8A4M7n/wCOV2lFF2Hs4dkcX/wqnwjnP2O9z6/2nc//AByl/wCFV+EgMC0vsf8AYUuv/jldnRRdh7OHZHGf8Ks8J/8APrf/APg0uv8A45Sf8Kp8I/8APne/+DO5/wDjldpWNrGoa3ZzY0zRIb2FYvMeSW+EHPPyqNrZOAOu0c9etK4ezh2RyGq/CzQk1HRVsrK/Nu94y3uNRuDiLyZSM5k4/eCPkf1Nan/CqfCP/Pne/wDgzuf/AI5XTaNqsGuaJY6rbK6wXkCTorjDAMMgH35rnbLxxNdS2F1JpBi0TUbo2tpffaAzsxJCM0e35VYrgHcTyMgZp3Yezh2QxfhZ4TX7trfj6apdf/HKQ/CrwiRg2l8R6f2pc/8Axyu0opB7OHZHF/8ACqvCI/5c77/waXP/AMcpP+FUeEAciyvc/wDYTuf/AI5Xa0U7sPZw7I4v/hVXhH/nzvv/AAaXP/xykHwo8IKMCyvR9NTuf/jldrRRdh7OHZHFj4VeER0tL7/waXP/AMcpR8LPCYORa34P/YUuv/jldnRSD2UOyONb4WeE3ILWt+cdM6pdf/HKy5/hhog8UWMcVpqP9mNZ3DXB/tK5x5weHy8nzM52mX/IFdD4g8RatoUd7fHQo59Jso/NmuPtoWVkC5YpHtIOOeCyk447Vu3F0sOny3arvVIjKF6ZAGfwoD2cOyOUPwr8Jnra3/8A4NLr/wCOUH4WeEycm1vz3/5Cl1/8cqx4N8W3PiuBLo2+kx2726ylbTVftM0bMAQkieUu04JzzwRjHeuroD2cOyOM/wCFWeE/+fW//wDBpdf/AByj/hVnhP8A59b/AP8ABpdf/HK7OigPZQ7I4s/Crwic5tL456/8TS55/wDIlL/wqzwnnP2W/wA/9hS6/wDjldnRQHs4dkcX/wAKo8IDOLO956/8TO5/+OVveH/DWleGLSa20mCSKKeXzpBJO8pZ9oXOXJPRQPwrWoouNQitUgooooKCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAM7xBpf9t+HNT0rf5f221ltw/90upXP4ZrloLfxHf6lpN9e6H9mbRLSYiP7VG32y4aMIFjIJ2pjdy+DyOODXdUUAc5qt1ftHED4QbUWmth5g8+DbGx+9G5dhke6g59Ks+EdJudD8J6Zpl5IslxbQBHKElR/sgnkgdB7CtqigAooooAKKKKACiiigAqG7mkgtJZobaS5lRSywxsoaQ+gLEAH6kCpqKAOG8LSeIdL0rV0m8LXSXD31zewRyXduBKJZywTKu2CFbJyMcdafqVjr2ma1r8+laUNQj1mGPY4nSMW8yxmP8AeBiCUwFOVyeCMV21FAGfoOmf2L4e03Sg/mfYrWK33/3tiBc/pWhRRQAUUUUAFFFFABRWHP408K2txLb3HiXRoZ4nKSRyX8SsjA4IILZBB4xWhY6pZamGexnFxEFVhNGCYnDdCj42v0/hJx3oAXUbq4s7J57XT5r+UEAW8LorNz2Lsq8detct4J/t7R/Buj6Vd+HZ4ri0ENrN5l1DjZj5pVKs2QvocE54rtKKAOH8UWGp6/DNpNt4a+zu91G6arJNDsjCuG81QG8zfheBtHPfFdxRRQAUUUUAFFFFABRRRQAVx3jL+3r27t9KtNGvbnRZY919NZTwJLLyR5A8yRCoI5Zhzg4GMkjsaKAMu1nuol0u3h0V7a1eJhKrSxg2YVRsTapIbPT5SQMVxuneH9dTTtB8Mz6aI7LSL2OdtR89Ck0ULFowqA7wxOwHIAGDyeK9GooAKKKKACiiigAooooAKKKKAOG8Srrmp+IRZz+HL688O22yQLaz2wF7Lwf3gklUhFP8OPmIyeBg9RNd33nyxLpJkg+yGVXadBvlyf3JXtx/F05rRooA4yy0/UNQ8Y6dq7aD/YtvY200MhkliaS437dqYiZhsXbu5PXGB1rs6KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA5PxJGn/CZ+DfkXm8uc8df9FlrnvFGoajCvjeO11C5tzAdOFu0chHkb2AYqOgz39e9ekSWtvNNDNLBFJLAxaF2QFoyQVJU9iQSOOxqKXTNPnM5msbaQ3Gzzt8Knzdv3d2RzjtnpQBxC6JO/ja90H+3tbGnnTIrzH25/MWYyOm4SZ3BcKDsB257dqw4dd17X7bwnbPJuF3owu5D/ab6ebmYFQf3kaMxIHO0Y+9k5xXrAtbcXZuxBELloxGZtg3lASQu7rjJJx71Un0HR7nTotOuNJsZbGHAjtpLdGiTHTCkYH4UAU/CI1JfDsCardQXN0jyL5sM/nAoHIUF9q7mAwpOBkg1uVDa2tvY2yW1pbxW8EYwkUSBFUewHAqagAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA//9k=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "augment = Augmenter(parallel_augment=False, concat_original=False, min_augmentations=0, max_augmentations=2,\n", + " shuffle_augmentations=False, repeat_augment=1,augmentations=[freq_dropper, chunk_dropper])\n", + "\n", + "augmented_signal1, lenghts = augment(clean, lengths=torch.tensor([1.0]))\n", + "augmented_signal2, lenghts = augment(clean, lengths=torch.tensor([1.0]))\n", + "augmented_signal3, lenghts = augment(clean, lengths=torch.tensor([1.0]))\n", + "\n", + "plt.figure(1)\n", + "plt.specgram(augmented_signal1.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "plt.figure(2)\n", + "plt.specgram(augmented_signal2.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "\n", + "\n", + "plt.figure(3)\n", + "plt.specgram(augmented_signal3.squeeze(),Fs=16000)\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ka-6_HZwtsIw" + }, + "source": [ + "In this instance, we have set `min_augmentations=0` and `max_augmentations=2` with `parallel_augment=False` and `concat_original=False`. Consequently, the output may contain a random number of augmentations applied in a sequential pipeline, varying between 0 (no augmentation) and 2 (e.g., drop frequency + drop chunk).\n", + "\n", + "By incorporating `shuffle=True`, the order of augmentation application can be randomized (by default, it follows the order provided in the augmentation list). Additionally, the augmentation pipeline can be repeated multiple times, generating several augmented signals that are concatenated within the same batch.\n", + "\n", + "For further details on the arguments, please refer to the documentation of the `Augmenter` class in `speechbrain.augment`.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XxwM1vXZdh5f" + }, + "source": [ + "## References\n", + "[1] Daniel S. Park, William Chan, Yu Zhang, Chung-Cheng Chiu, Barret Zoph, Ekin D. Cubuk, Quoc V. Le, *SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition*, Proc. Interspeech 2019, [ArXiv](https://arxiv.org/abs/1904.08779)\n", + "\n", + "[2] Mirco Ravanelli, Jianyuan Zhong, Santiago Pascual, Pawel Swietojanski, Joao Monteiro, Jan Trmal, Yoshua Bengio:\n", + "*Multi-Task Self-Supervised Learning for Robust Speech Recognition*. Proc. of ICASSP 2020 [ArXiv]()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "16mUGH_A3UHZZOx2Vn69hy6fkPAM8CnA4", + "timestamp": 1612452409993 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/preprocessing/speech-features.ipynb b/docs/tutorials/preprocessing/speech-features.ipynb new file mode 100644 index 0000000000..ede4a85883 --- /dev/null +++ b/docs/tutorials/preprocessing/speech-features.ipynb @@ -0,0 +1,469 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/preprocessing/speech-features.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/preprocessing/speech-features.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7d8HzInf8-j2" + }, + "source": [ + "# Speech Features\n", + "\n", + "Speech is a very **high-dimensional** signal. For instance, when the sampling frequency is 16 kHz, we have 16000 samples for each second. Working with such very high dimensional data can be critical from a machine learning perspective. The goal of feature extraction is to find **more compact** ways to represent speech.\n", + "\n", + "The study of proper speech features was a very active field for research some years ago. With the advent of deep learning, however, the trend is to feed neural networks with **simple features**. We then leave the network itself discovering higher-level representations.\n", + "\n", + "In this tutorial, we will describe the two most popular speech features:\n", + "\n", + "* Filter Banks (FBANKs)\n", + "* Mel-Frequency Cepstral Coefficients (MFCCs)\n", + "\n", + "We will then mention common techniques to add context information.\n", + "\n", + "\n", + "## 1. Filter Banks (FBANKs)\n", + "FBANKs are time-frequency representations computed by applying a **set of filters** to the spectrogram of a speech signal. Please, [take a look at this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/fourier-transform-and-spectrograms.html) for a detailed overview on Fourier transform and spectrograms.\n", + "\n", + "First of all, let's download some speech signals and install SpeechBrain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PiZVuhfB7bN5" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "\n", + "# Clone SpeechBrain repository\n", + "!git clone https://github.com/speechbrain/speechbrain/\n", + "%cd /content/speechbrain/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "is5-1Vuq86b6" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.dropbox.com/s/u8qyvuyie2op286/spk1_snt1.wav" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Cc_06vdTENJz" + }, + "source": [ + "Let's now compute the spectrogram of a speech signal:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PopykIe5EUpf" + }, + "outputs": [], + "source": [ + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from speechbrain.dataio.dataio import read_audio\n", + "from speechbrain.processing.features import STFT\n", + "\n", + "signal = read_audio('spk1_snt1.wav').unsqueeze(0) # [batch, time]\n", + "\n", + "compute_STFT = STFT(sample_rate=16000, win_length=25, hop_length=10, n_fft=400)\n", + "signal_STFT = compute_STFT(signal)\n", + "\n", + "spectrogram = signal_STFT.pow(2).sum(-1) # Power spectrogram\n", + "spectrogram = spectrogram.squeeze(0).transpose(0,1)\n", + "spectrogram = torch.log(spectrogram)\n", + "\n", + "plt.imshow(spectrogram.squeeze(0), cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WSXyUVmdJNwg" + }, + "source": [ + "One way to compress the signal is to average the spectrogram over the frequency axis. This is done with a set of filters:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F5KHdgsEJqYL" + }, + "source": [ + "![fbanks.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAs8AAAHECAYAAADGXcfpAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH5QICFygG98MFEAAAAB1pVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVBkLmUHAAAgAElEQVR42uy9aYyl13nf+TvnvNtdat+6q6u36pVLk6JEiaK12JIly5YsTxxMEMMINMEEmUyAIEZm+ZKBDWOM8QCJ10wGMxMljm1Y0sSOl1hDyzItSpZEUabYZJNN9kL2vlZ1de117323c575cN5bdau6m6ISfyLuHyjUXd7lPOs595zz/F8lIsK7EJ//rd9CKcWN69f5xV/4hW3fffoLK0xeMtw8UPCZb1nsbMZiFtPKI64NGx6ey0lDxamHhJE4RzvFy+uDHNlwjGSOr+81/NJfrqNFcWfA8OXHY37ydMbpXRGHFks2YsVI23J4ruDukOb3H6vzmbMZ3zsQM7FhmVkuuT4acvagY+8i/MOvbOC04tUDEX/xvohmITx5pqRWCKMtx/ygYaWuOXSnILAwuWKZHzF86ccCjp8z2BBuH7DEt0PWIs2jd0oujRjaoUJ2Z4zdCJBSoQSemwn58FzJe65lZKFisRkQWmEj0dQyx0fOd7gzFvCdQzEnruW8sTvi1HjIgZalVgp7Vy0P3cj46pGE98wVhCJ883CdyQ3LV6YDPnc+Y62muXbAMTGnGdwQaoUwkFqeP5iwHCmemC94djbih4cWEYFzWZP6fMTnXlxntaH5/AfrNFPFiTslZyYCLg5p9uicXGn234BPvdnBWOH/faLBt0cN//SNjDsPpxwfXeHa93ZzcwKCtuLJGwXf3R/z+qTm41dKvjMV8PR8ycNzBQ5hfMNyt2n47PdaDK87vncsZt9CyeyNDJWtUV/4GoJw+cTfIXSatYZm+tYqAzdeohg+wuUje5m6eZehW28BDsM8mrMI7+H6Iz/B2J1F4oVXUFrxhc99liOXc/bdKfmPHxrgP33Y8OFXQRBOzsQMp46ZlZJzkyEfuZQyPxjw5w8FfHZgjhfa46SF5uk3Hf/ga2scPvky7V0H+Ll/PMvsfMkv/OsvItTRtNDMkZsnKBsT6LSNRDXOPT7LXx2v8SNnO0wsW4pA046hNAoUfPnJBmmoePRGzkuzCUMtSzMXzu6KMCI8dj3DapgfDFhsBsxNOT7zYs53D8Y0c8gC+N4ezYeuWCY2LFNrJXmgaMUaLTCybjm7J+bGcMArY4b/fuZNPr94iA9fsHzsXIcsUFyeDDk5E5EF8JOvdxjbsHznSI2v7Y04sVRyZVzx1FXLzGLB7z5cpxHBUL3DeVvj77+a8vRbGeemA756vMbfeaXNW6MBzz+hefI8jGxYXp+OeGYy5Fe/tc6V8YB2pHj6Yspiw7DUNFyfFdTdiKlWwfmpkLmaYXykxdjFmPOjhvfdLvnqoZCfOp/zV/tCfvpsh6/N1jiyYnll2vAj07fYG7V4oz3CjSKmuNlk/6rlpbGAz51u88KhGGMV77+W8t19Nc7tVswuOGaPzfPtjXEuSsR//WbB4YWCrx9NeN/1nJd2xwSR5ehty8L7WyiEAs3X0iEeX1B88PA1jtQ2eGZpD6fzBgfmDJ881yaNNOPrlitjEQrHj51qc25fRGEU2gk3RwIGMuH6aMCzByJ+9nSHNydCzu5R/OiZgkN3C7SF28OGc7tjVmdy9r8ZcHYsIK0Ln3k9IyqFNFD8ydEaE7nj4YWSpBCeuJLyp5+B/acj9i1ZUJDkwlJD88Z0xHOHAx7RLX7yL+BuU/O1ozWmWpbhts8Tr+8KaI+VtJXip14qaWTC6T0R9Vw4cifnpf0xL08GzLQtEgkffrPk8csp/+JjQxzYcHzibIeFIc3RWwVzw4ZvH044NWX4mddS9iwV/LvHB3htDP7hmyV5ILz3SsZvfqLBRJTyyGsh56cMt+saHPzw9YJjc17WPFSs1jS1XPjKown/7LFT/MbVR5m5bDh72NG8E9LalfP+83D8SsF/eKxOooDdGeZ2zL5ly9cPBXxq8A7nsyaNqzUiJ7wyYnj6Tsm5g5CsahaNZm/H8Q++vc65PRGXDsInXyj4w/c1GMgcExtCO4SvzUR8+krGzHLJG3sinILjtwte3RvRDhXFoGP/LcvRWyWDqWMj1twYDbk5ZDhxK+f/fqLBT1/IuDpqOPTQPC9e2sVDdy0PzeW8uStk9k7BG9MRAhSB4qnLHY5fK7gxEfD80RppoDg6l/Pt2RqXJ4RmqvjYhYLzk4aPvpVxbSxgsGP5nYfqfPZazqUxw9NXMr6zP+YjlzIiK5z/aMbza6P8o5c2qBXC/KDhU6daXB0L+aW/3WSjDDi2annPjYLQCs8eSZhdtjx9OWWo7diIFXcHDPVc+PZszN9/YZ1aKtwZCTgzHXJ9JOQvpkL+2/EbfGtjnHgxItfCp8+nPHot48ZEwOSy5S8+mPDPf3eV8SsXKJOI5tJ3SQefgjzh5EN7eOqVb3L36JPEmaBURHLrLIuHjrE6GHLwzCUuPXyQoTXH6MIi0d1ztPc8gdIJy0OGV2cjNIqBluPKVMC3Dib85v9zhY3hGhf21pkfMTRTx+CG49DNkjKAtYZm76WbuKRJuHCR0F5ldd+nQCdcmQ5II8VInnPkO19FqPHij/0Yp6dDBtqON3eHDKSO28MBaah435WUVmz45uGYF8cCJq3w5PgCB5IN8q/u5sBiwQuHEn7/YMQHliwTHWFi3fLsI4YfuuB4+mLKV08kXN3rYDnmw9dyVhPFzSGDAkY6jnaoWKgZ/slfrRKVwiuzCQtNw6Vhw60Jx94wg8yQzIcMpcJ7rmU4LbwwW2Os5XjmqOGfPN/ho2dTjlxcIVq6RTk8wQvvm2LPnRKl4JXZhI+9ss7QnTnilVfRtPm9v/f3ODtZ43/443nirOT2rgbNlmX8/Eu0Zh7ml392hp9+scVyQ3NxOuTorYLVpuYDZ1L+5U+PsBYqdq0L8QfneWV+F7uGNvhiZ5R/++wKxsH8sGF4w/GdQwnZIy0u5HWe1Gscfa7Glx5roHmX4uyZM/zs3/27fOXZZ+mjjz766KOPPvroo4+/CbxrB8+/9iu/gjGGv/7Wt/pW7qOPPvroo48++uijP3h+EH71138DgDiO+xbuo48++uijjz766ONvDMG7UagvfOGL/NEf/9Hm++e/+c3N1+sbG32r99FHH3300UcfffTRHzx3MbN3hn/xv/8yi0tLiHObn1+8dImXX3kF+NG+5fvoo48++uijjz766A+eAf7xP/rvOH78OHcWFpicmNj8/Od//ud57rmv875fOXPPOQpABAGUdD+Rni+3IDtey7bPZesET2aAvc81ei+iANVz9lYb7oWSHSd2z1Fb/3u/715VqoOt2t5+1dsetSW17Ghjr5xKwO04RoBc3dvebnvUvYpCVe2SHedZ/PW5j5672hW1dcx9buvlle3t3nlB2fFfVX9bN5dNu2we10UBKLWjZbLNC3Z6Ra8gUlnlfj6lenWkdii5+9/tlG17G7y+5b5y42S78+yw1f2D415F9srkdsrwNhw+2+4j2y/vdoRe7/Wdqvxc3dsG2dHY7rW6oezu41PfT/c7Y8ntOH7b+x6f7tXBg9Rgu9ev/pyoTZ3JfXx0p042Lajkvvd4gMnuCahyZ8w8yM5y7+tN/5Kt3HSPzO7edvfKsjM+Rd2bX+mJz02d9PqB7Mh9st3/XI8dy/tY3b6Nb+zMXapHCer79ENKwAqYHbmrV07XoyYlb9/PdPsohaLY8b3b4bvb8tV9nMDtvO79fEbUNidWO+J1K4ffvz/ova+7T5tEVX3jfdq9rb+5j3/I/dosoJzPb12buerbbvy7++QgV/V7m42kMlxv9u6Re1tfxr1BruyONve8LtVWO5XI5ulK7rXDfaz/wNx0P2dUPXEg8vY5sLyfq8i9/S477Nebn3d2QyL3Xu+B+Vepe/NKj/NsqVvuGStt5fytFqpye0z1jgd69ScP0N39lOnU9vPflXuef+LHf/y+n79y6lUmdu0izXNA0JnGakWZG1rGsDhgGG1m5IEitoITjRWD6hgSgTJ2dEKFclBqRSdWJFaYagtWQ+SE1aZBEkcz8wG8VjdMp0JYCpmBrC7kgcLVHNrCQAorg4b1piGLNM0UStF0Ys163VBqqBVCM3OEDjYaBiVgnBBmmlYdGoUwsSaMdxzjqcMBoRPiEiTXJAXEpTCcC0dblsFMMAKtmqYVK4yDvOYILaSR95CBQlgZMYROIHBEVvx5Fu5OGaYKIXLC3bGAjgEt8IENC0ZIa0KGojDQyIUiUKw3NEXi21YGCqdg3UbkacTUGpTaX0MBj6xbjrQcrRpYI8ROGO/AvnXH7o6jHSsCCzNtHw0biWKNkKWsRmihnsNwLnRqmoYVdqcOY2EiF3KjKDVkkY8YV903rSvmpwPuTgTc3ROi8pTO4C7ycAYt0BrQNLIMGwekQ2PMPdaknhUQG4pkgLuPTSDUEMYAIbBCGQVYU8ORMNQRskSxMaBZHtPUU0UtFxq5YzhzTLcsGwMKSRxpoEgTRehgOa+hco04xdqg4s50QD48zPJ0zIENy56WRahj4xEcTSzTQEx7pEExmKDKknpWMNZxaIFOTXPrWOz1NqBp1zWiPD1SVPj2dO0jDYtreN/dSDTR7oy1Oqgqiwxljs6QJRzJmekIUx3HYOZYHQhYmtRkgdoc6GwkClcvCR0ELqKew0AuZLFifdDQyB2xhQMtR1wIrbpPTdoIaQJ7Oo596yXDHceT65ZEl5SiCYE0hrAUrIEosjgNw4Uw3YKBzJEUwnTb8enVcjPxtWKNcj4uQiskKcTDBa2aRuqOxMHouqKRC0HgaOSO6Y4QlY5BK6w3NZO5wzhhUByljVhuNVktY3IbEDnoNOBwx4GCWinkDYd2UAZwbN0ymjla7YSgUBwxOatNzXpNMdV2dGJFEQtJAdYoSlFsFDGtMqRuIRBhtVNncW2A2oahkSuK2PdBQ6klDxVFAPVKL2no47wwkE44f6+mYjb1+iljYSoofI6paUoNrTEYzBzja4o0BpVYxlNhbQw6kaJV00RKsDWhMLA4bFgd0EysabJQk0Zwd8JwdzxgvaFJSmFf21FrabJY0Yk0sRUmOo7ACXHpGMqEwQ44p8lCReCEVqLQozkrwwobQIxQK6GRsUml1nTCUOrQIiyPGNYGNYH1efpwy9EohDzUPNy2iFKkoaLQijxQnFi1HFwRktJhUQQiJCJoJ+SRYnEsoJNoVpuGxXEDsWMuG6Qjmsh6mjYU2NJT2aFgyAppCMfiDs74/Bla2MgSMhswmlpGU8eAE5JSiApFHkHNgRFoJ4pG7mik3l9n1y2dUJGPl6w0NO9pWcY73tfzGMZSYWHUVJMKCh05hnIhsrA0auiEmnYC5YBFOTjett4/CmG5SBgohY0BPxoZyIVSQziaowWKusMqRREqlkYNaaBISkGUohMoTKk4vG6JrKMVeJvtbjmKJiglrNUVM21HGiqimmWtblifEEaWNSc2LLVC2Gho1ke9LQMRRlKY1iXGQitWlMbH6t4Ny2Dm+6oigOUBQ2iFkUxo1zR3jisWJkOWhzV2wLKndOQbMeMt2NO2DJWCcUI9F7QDLUIj9XloZd8gzsQ4hlk8NIBoGDEWpxoUsaKINIuThvbkEMuTAYEV0BFFqAjKAp1tkNeHuH0sYWHaUIQQlzDZymjkFi3w1ErJ2q6IpV0hyyOGPPS6XJwIWBnRrIxrcqPQWQunLaCxTBBkG1gjdOoK0VDrZNX0S4MkdwymlsAJy0OGWu5zTm78GMUpGB3IGDeW/W2LWQ/JVxPi0uEUpBE8FORQc0y0LYOZpSHCRk0Rl8LMhqOZbQ10i1hIY0EbPybIEmFvy6JFSGuKlUHDRk1RbxQkyjFsCgrn+wOr/NjEakWnBlkEH16xDKeWtKZRtiQfagIBZV1TK4SlQ4rxliVKN2iP1NjYuwuhxkjHsatVYrIOIo562sJpEB1x4wM1Ztcsq8OaZu4YTB3OwGDqh/nTG47IwkTbEixHDKWC6wQg0MiEwdSxVg9wGtIBIUtDdm8IzVXvbxOZe3cOnp/5sz+757Nf/83f5F//H/+K//Nf/Sbt0hI4iFYCcgOrqwmXoogvTEWM7F5kuW4Y6Djy0rDkAspbMY1SWBty3Goootx3DreGAoZajiO3LJ3AG/qlXQF2xLJnqSSw8NpUxOE7JQOpY6WmWBgXbtc05WgBacD0vOP16Yg3J0MW64bpeaFTGG4PGs5NBrQizdi6ZdeKJUmF85Mh2glxLtSWDRcmNRPLlqNvCEduFxxcKCm10MyFsbalWIsYWofhjmPfQsHjN0v2rJQEVjg3HnB5UBOWwsqEo5l5TmkcjK5YnjkY0sgEapZaCjNLBbXc8SfHY3Ytl9Ry4YuHYy7WNJF1PH0xR0WOuQlhRQWsRYrxdctqojgzGdIaceRasR5rUq34TmeQOwtNjrwFrQCi0kfnB84VPHkh5cKwoRNDM4OD14QTb1mOzOVcHo2o5cIjt3KUU1wYCfgTafL/LUzSyB3jyzC9Yrk6YhhrWWbveg7Z/auO5USRG1hqBCig0ApthZujhv/wcJ2/PJTw9SM1TPsO83sfZfHI+whLuLgrZOLWEuuNOgt7j/Fb79vL1M0WTsW0x/bw7ceP48JRymQ/UPoBYC0mjydwaoiZmyV3G4bL4wHPzUYMLhom1i27Vhz7lkoeu5Lx8h5FOVowN2i4PqKIC8Wzy6OEqwE2NTw/o3nmWJ3VyQO8dGiEJ86nPH4pw4bjZMOz2HCabPBxhGFu7d7F2ugQ2m0weXOZY9dygkK4PWL43ScbAJzfFXJ91JAbxV9Phgy2HFOrluVEU2qFncqwkwUDHeH8eMTQiUW+MWWQUmGcMLNoubGvJDm0ypHbjuPXM6YXS57fE/G144aVusFWGebsSEC2K6NROq7ndUbXFLtXShYHAk7tDZle8ny0Jy5nDLUcl8ZCBEiikstjikM3HO89m3JwruDpcx1Go5x1CVACCyMw0HZkiaIx3CYLFFNrlkOXYc9yyWjL8ci1jM+cbKGriYYLQwYjMNhxNHNh+LZm6MAaFyc0+UTGUOrYe06za6VkICmYWik5fscykDqmUuGNqZB9y5ZGIUw5y+lOg29cneYbnSEW2wm1Urg2JRy/XSAKRtcdaxMlQekHE+97M2f/Qsn5uRHqK4YTyRrPTAacmYh46GbOjUHD2rAwtCG0Q8WaDXhtfYDTnSa1QpGU8NLtCb5+bi9jZ2PGlxR3Bh1K4ODtktXEsFZTjK5b2rHmzmBAZIXVmuHaQzknpyJe3mU4frtgbN2yOijMRhvUc8el8YgsVJw/bNl7t2D2vHB7SJGN5uxbcLxxCK4OB1wZDYmVY2OkZKmm+d19Ma/vjjl8SrHUMMwPBDxzJOLPDiWcnooYbjsevWYZuRay0NDcHDIMp8LRmzlxKYx0HDPLll23Fa7QLDcM9cxxZkyhZ9d5bZ8hawgDThhuCVNLglXQSjRTWcm+Bc8H/MIBw9npgCQXVkJ4/HLB5LJltaZ577WMUGBuwNCKFGt1zUdfyXn8JRhpW3IDNRGGrSNwjuWa4dmDMddGAl7aFfHMkQA3WPDv7u7jsgtpZo6pOT//2m5FjK44BMVoxzE/oPihoVtI4mhkjqgD5+8McauVMHun4OB8zkgujLQsjVXNalNoln6C4tZwwNSKZeI2WA0fONPmet2wcCTjmcmQD1zIOHqjIA/hxoBm70LO7x1KKLX/URAPFOxesAy2HM8fDJlrGq6PGtb25BgnPHUpYyXRTGw4vr46wtS68Nq0j+vdyyVpoNAHVgkcLE1a8kCxUtd86UDCfF0z3HI4DVeaCskU772Q0+w4zjUNSSE8/lbK4m4oQuHlccOJKxnXGwEDoy1OTYVcOCbsORnwkXMdxlctZyZDXjsEGzVDkgv75uFo2CHK4cpwwEZkGEmF91xImblTkuSOjdjwjemIRurYu1hwfTjkix/WfOFYjWcOBHR2pexPS5YvDLP/muLxaxl71iy13DGxbKnlQpzD+B1YbGpePTxDWR+iCPby4sN7kRjG72a4kd2sDmrWGhF/fTDi5sxevnuwRr3tcMkAa4mivrJKuH6JtV0H+MITI3zjaI2NxDDQdjzyyl32zBcYJ3zy6+ucPjDEd2cbvLgvYaWhGVpzPDeb8PreiFP7E1o1jUlvUAQ5aEM+/hDx/DXSUHFz2GC1YuqtBRQl5cAUw0uWmXnf5/zJTMTEskWJsFxTzA0EFAZ27V5iX5Dy+JWc6GydtVNjjLS9DReGFI/VV3FjOcdu5OxdLBmRktdHDUMtx+MXMsaWNFoLomB5RLg+DEFs2bNUsDTinwUQWLg9HPLcdMTZkYCxyTVi5dgXt1i3AQuJJg8U9VToRJpb446FpuLjf52yf67k9qjGpG3WxiYhaHAt0YwuOb73Xs1D5zo0r13m9tQYV46fwDHOgWsZj17KqN+9hbiSyfOXcEYhwSD/20fH+KFTbc7OxEwuWfbP5xRaMTNXoICHL6Y0csex6xm1FweYue1ozddRFnYtWfbfLnltMiALNLemLYuLDY5eUIyfimmmjkOL5btz28Yv/q+/xBe++CWyLEPE8Tu//dv8+q//OlcuXeLV117bYuFQW0vjursAIAotO5YpqiVCLfduoeguI3eP1bJ9JUcLiNramqClmmGt1mpEg7ZsdurdrRNKtj7rXarWsn2pZnMLg1aI87MBvVs5tq2mqHuv1f31pHuXjKprJ7K5b2BTBwoIez5v9CzZdJdHde+ydXWOlntXRwIl/jJ6+wqUaL9y06tv0dWf2tq6IltNQwNGyTbd6N5lpwctbfW0NRIhEDDira9E0OKq5R8BpfySkfhZd9EKVcnnz7l3k4jqfq5AV6ugYXfJSymc6vqQwsjWCpwWP2sb9TQyED/jqBC0eN9x+n4bGKRnGVBt+Ud1qUi27KR6l6B7ttio7ft+vC1dtbVAbV/V7S7hde0fAuGO9UHds0am1dalNX4VRCpFilaVX8umf2uhktXLIlptk1btDNZtS7wKqXTszNZ3Wu6NDbpbJ6TH56prbG4T6La7u8WrumQkQqGFqDdkepc21fb3XTl1pWwRqIufddxcRt6xXUNXdlI98RNUQdybn7ox17vTQ/cshWpRGIRA1Ga+6F327OYw3d0apLs5SVWyb8UcqK32iPhz9ZZvheL9VleyuG5s9oS96O02680HW3L5vGzvE7w7TW96ckdv/lSy5aOqZ5uWM15G7rMdSFVxpyo5okr2RInfjqF27HtQ7Ni20+sAYCrZN3Nxz1YW1bNVa9NXKkcQrbzOKh/p+uZmH4Milt4mbK36GNkub6+PoLwfSaW37VtE7t3aV5MdfUV1sLdhFcuVz3S3z3Tb2+0Yu+1Bg6viXnfvv3m9rTv7EPHCOb19u0BY9cFU/ULNCYngddFtfzeO9dYy/85tD0p6YkTE53PB77np2Rygq+0WZufuyaojUyKEsqVLFLhw634uVGiR6m9Lx0F1f91jv80IFnfPliox2/e9dXVQl60ctak3QKS70uqVrXa0XyqHFLW13cJIz/aonjyhRGF2bv/RW60OZSs3CAqp2qFkWyhsjoOcqXKebN/y19Wrdt18qSuZXBUT3TioNr6qLcdouCpvCZv9q9eV2jy3m+O7+VNp2dYf9I5jlNo6tnvMu2bm+aM/+onN141anYHmAONj40yMT3Jn/g7/5vOfR2svrjGmv9u9jz766KOPPvroo48fGO+amefp3dMArK6u8vM//7/wox//+OZ3aZpy6NBs39p99NFHH3300UcfffwX4V0z83zo0CFEhH/+C7+wbeAMkCRJ39J99NFHH3300UcfffwX410z8/zZT/84n/nMZ1heX/8bu+b9KOl2smghP9j15D+jHb17gO6h75EfvCHywObfh35J5B2QMt3nHvI295X760W9bXvVDyQX/xlHvzP7qAeco77/XeT7t1N+UEHe0bHqnZ1yfwa7vxHbP6jR9+hH5Pu2VX4gad/GwA9wOnnAe3kbUUR9f3HlHcqlHnRT9fYifV/N7ZBffhDNyn10LD+gn8gP6LfvJCDfRh71gEvIf2bieCd+8WCdyg8u0gPypLwj9cq2faoPUu07MYm8w9z1feOv50t1H1nlbRKhvNNcoB4s0/3qgLYdv6OfVe9QOHlQ4t6xh/f721x9fx2rt79m+Q4NIW/XQb1tGMqD23m/WFT3Uus+2JfUA236fUYp7ziTyTvwn3eaFt41M88ffOop9s7Ocub0aaZ2T/Mvf/XXuHT58v1/MVi/5bzUCqcVWQISwgCO1AbYxGE1NAvBFDCZCYfalrGsKvpCNot62nVNlkBpQNUcA6WQGUUWK/IA6rWCIvAUPOOZUM/xVSNWEzjPu5xX1GDGeXNZIxRG0cw9Bd5GU5NGijz01FN55Om9jAWML6IwAmlSbYZXkEeQlFUxmxNiC6VRxNbTZjkFVismUkfoBLGKdkPRiTVF4Cu8HdAJfXFRbH0xosBmWzXQKHxVURr4gp6ghLhQDJkSqzw1Vh77pFVXjuHSb9ZvKsuI8qwnA7ljf+po1RTtAa/LUkMYeDqYqVwYqP4CB0UNilARlcKoEkoFu5VlwHqKp07kKXG0+MK10HkqrSyEmnbEViiNIos0kROsUtRKx9GWZTj3tFboiLDMySNDUKQMdVqYzjpRukqUdxjNBVWk6LJAO8dwZx1dtlHWASWmSAnzFtpZNvY0sLqiBhzQTKWOWkWZZMRTJlkN9UKRZIo88bRMg4UwGFjqpRAJxHarEKSZ+4KTTl2TDtVQNvcDT6Uo6wFJ3iIoc3TZxtiUMoAs1kSlMNOyRGVJUgrWKBILB1JHEXp6rIHCU34lHUWYe1q/Wim0XYh2numhk2jyUBEWmiwPsd1iPqUwsUOZrRgxArETnFPUrGDahsnUU+eFlV95f/QFLWms6NQ0VsFIBvXSXyMqHZ26opk7kqynyEwUpfJsLTrXtOqe5iyNPBWSb6vGGugMC3kEJhA6iaqomhSRCHFLoxXUtSV0nsIusp7tpTCeiaAboyrgiIwAACAASURBVC6EPISNmiIoYLgDI5nDVKSngfOUYXnk6QkB4lwRWpjoOELrdW+AzChUZhgphahWkoW+qLJWgKlbX1xTaMZyIagqaYtAcFWOSkofgw3tSCNf0Z4bn+PSRNNqaJzy1HWdRKPxdJljQempyAwkBZjM5yRE2GgaTKE3BxKlUkRV4WUumk6kyCPPDpHkCqth2Hp926pYTJT3Y6fARY489Lm21IpOE9qhwmmfT/IACg15TTAOAu2wofcNBaRFgLGKWuHlt7qi3gsV7bqiXoovUhXQTmGBNIGG8zoCyKvCrcOZQ5TgIqHVUOSht22n5uVA+Twb24rWrBRi6xjJfXnUkLaels5BYH1+cQYS69ueJt7xQyWs5nW0VbRripHcXyf07J++rw6lun9PcSNUtlK0Y0+npx3syhy1jmJ34W3v87QvNC4DmEkdUSnUSmGoDVHp6duSErK6ol4IttQ4BbGVKv5A4wuhRzOpCiqFVs1QiKdjS3JFa1CxPuwpSo3zOToNFSPGIoqK/lOTCKSx7ycamTBayuZoNHRQlgaFUIin4mwUjtJAFkNYKKzxBWTWgBSaZul9bqAQxiqasTxUpA3vz1OZt21hNIETolTTKIXBAkyhiS2eCtAKw6ljd9uRhf4+g5mjmZWMppYiVKyNa4pQk47WGUoL2sMRxuaoLCUqCsBT+0WlQ4lQBj4HhQLt0RAhoogNQ7kjLoWspghLwQWGoMxp5o6wtMSlP7deOuqFUEaKvR3LYJox3GnTSFsoKX2lI1U/Q4cwXWO006ae53SGIiDAxiGiISk8xeeJDZ/XIyc0CiGLPHNSZgOc1d43MkdcOAoD7Yb2PmsNYa6InBA6QVtFLbAIfrwwlDqUEvK6Z4OxoohzX/AflD7/rg/7WrLR3DFYCnlhqOeKLA+w+ELJ0Ar5oBCX4tm1Iv9ZoxCGsgKbBIRFG5WtM5l2UGWKzjXGlohSJNkaSacFWD+uKQVdZOiihXIFxlpEaY5vtEmKnEbhY7iRZQzkltAKxnp6zpFMiErZpF2MS2Gm8jeAqdT58U6lN5/XfO7UAuYXf/EXf/HdM/v8aZ79y6/xP/9P/yP/5vOf59//9r/nZ37mZ2jU65vHzM3N8dLZcXJRzA/CzJrj9HFNp+EY1I7LaZ2RyQ6yEjO1IrRtyH91aoMPvpkR5IbrewXRhqN3SkqjeP5gwvN7hMlcoWY3mHkr4K92R4ykijTUjD11m/OdYZTTPHyrYHAVbo4EJGuaUmkC4G7DMJQ6z3cZG67uEdat4eNvpizXDd94KGYxCTDAwfmCWyMhX3qixt4lx5Vpy8xdxdi644+O1Di4bmmHhu/tNTx9KWdhOODwnZLxdcvl8YADdwv2LZRcHws5ORnwsyfbNArhrcGEk4eEpTBCiaJV11wcDHmjFrAnznj0slCG3qtOT4V86EJObIWVRshSTbNUVzw2V1Dr+KQ3PbvCjY06Cs2ZI5Z4PWTf+BozFwIWm5rBoZTjSYtgPuZTr7Q4eKfguYdqnH0E4jVPx7N8JEeXik+dynnkWsaB+QLRiuc+BOMLAbsXC24chzkd8FhtnT23Ar46E3FpwHBwww9Qxjf8AP3cSMjLU5qP2DWOnRW+eyhhJVY8NpdTXxWOXezw3tcKGlY4MFfSLBOGrl7m/IkDHDn51+y/9DpR/i0GF1+iudSipo8wdeklwkxhUBx95ctoXkS5BMUGwaqiuXoSbQP+4NMfIXLClx9r8NZ0wMdPpQTA7mXfto1EezYJCRi/o3j1qPDYRWFiXWjOrrLnYkg7UtRSzUTL8cNvdJhctNwd1rxwOGHQjrDv/BuI0khtkNXJBnvPfJtkZZVAXiRsCSff+zA3BgzvuZLz+KsdJm4vkNcaXJ0KGV9zHL5dsDxoeO+bOY3MsV7XDM1p4mXDUtMwuW754uQQwy3FY7dLTk3HtGJNvGo4WwwQizC7UNKOFK+93xEHjnApYGzD09/dHdPMJSGzC47htwLefylFNAxvOE5Ox+xfKnltd8T0esmliYgvHw0Zbisev2pRTjG1Yfmhsyl/+oEmn3i5zc1ajfO7NGvWMLkunJgrMA7mVMILB0I0mr/cH5JoxZmpCIx/MMELH7LcDSPy4YK0jBhvOZ4/mHB0vmDyDc21A5oDoyuYq3Vm75YMtS1RS7EwGFDPhekVy1vTEdenLMuE/OGukI9dKdl3FY7MF7x0ANxGwOOLJWeGQjqh4s+PBJyYE5I1zWPXM6YXLc3McWm3IU+EvxwJeeJ8wGQq7H5onrOdYbTA+JpQe3QZd6tOmkY8cTNnbVjRqQtrTcEEjh89nTO5Znl9ImRmco25osFA6nhrKqIwikvjAZfHQpJCyALNnx9O2NVs80Jc528Pz2Ov1MkDiFJN7XbI9GrJaiPgd44lHL2l2bdcsFI3nBqJmBlsoZdivlePeWE4oBxyfOjNkrBjmBvQTGRwaxQOLDpW6ppA/A/py4MG2dVhIY156yAEqeHNI44X45jRUti3Yjk5ExM7eP5hx5Frio29JUYLUwtwem/ArdUGs4vCvjslV2YE6WgOLJdcmIpYGBeOXXWMb1iSUjgzHtMsFFdHPJXoh853yGLFyX0JkxuOw4uWK5OG9mTBa2MhSak4PF/ye++LWQs1deN45JrlyHyJEf/DYveKZf/dklP7Qj45cpuTCxOMp8KJuRyF4uwezf4l4eh8ybcOJxiBjfGSzy9OcWJF+Ma+gI9fyomtnyx58lpGJ1RceNgy72JGc2FpUMgKTSyehWJu0PC9mYihHA7Nl8wsldRuhQziJwbyQDM/4GncAoH3X80IHIy2LSdec8wslfzB43VmFyzfOa55/5slr9fq7G4JswsFt4cDdq9aLuzW7FtyPHzNYoCwhN96rEGpFUcvC9FGwLNPaP7TRJ2fOdNhYTCgHSluDAbsnVzjsiTMrMP5yZAAxXJN8+jtnNqyQhnDzVHNU1dyikBxztRpOrgaB/zY6ZTZuYLLuyJ+/7GYIzcUBxYLskjz5YMJ4wuGR29nXB0N+OQbHQ7OF4iB28MB545o5uOIH7qYohHOT4Ucn8tZLmtMthzDHYiWA8bWhQN3CwZTxydOtjh+JeeZJ5vsmyv4wNmMA5cWGWsFvLkn4pd/vMbH33Dc3jPBU8/d4Jsf3stDr7xOmJ6ntj5Ae2SI8RXHrlurXNzfpJ4Ko4sZrVrAi7MjHL/S4a3jM+xeKBluOV7fG/LecxlFLWH04hmG3BjjC0tEZcJ3H6nx6JWMvXdK5ocMP/3MCgfOXObw2TeYvHkGwzKt8SdJVm8Rdi4QcJLmylkOvNVi7Da88uRB9lyc4+7BR8nriumbHeqZ4kdeSRlvOW6OhUysW144EDOYCV9tDBCvRXziXJvDN3Os0twcDXlhNqTZhhdVk9lbig+91WGoJbw0XWNyeoOHXlMculmgLZw9qlnZlTN2PeK7jYCPX7HUCsdiHBJbxReO1XhosWB61TKQCa/rJsdvCDc3mtysG0Zzx+EFy8LDKcffgGtDCelMhx950fH+0x32vzHH3X2j7Dr/LaL2aY6c6aDXl3lr3wHe8+oNjLNM3vwDRq8tAo6iPkMjCxib/yuizlWMrGDYDUbxka+eZnh+nXxggonlNkdOXmDPnCUiYHClRWgDhjuOiVVLIHBuOmR61TIo8Pi1jMGOY3LVsjBsODVU4/G5go2aZs+q5eZwsMko9K7CZz/7k/w3n/scp14+yec//3l+8qd+6r5LH730aZ4yReFQBBWfkRb/q56KDqwM/K/VnQgrypkuDZCoLWomXa0TqeqYLs2W2qSAkc3771xf0gDaz7pETjapbPwss5A42aRx8jRq0JCtxR/D9qcOdqlXnNqintJsUf8oBbHzswGb1EZ46pudy12eRUht0v/Adpoj0X7WrStbd5bl3mWzaibbKFygCKrZiU0Ktere1lQzVFVbjduivtpc8hM/i5gIJK6Xvkptzl5Fsp22ysgWFZaLwDa791Gb21U83ZTBEuKJ4xL/PqjocXSX8ynCk7QZ/zm6eq8IndukvAoFXLCDImyTvszPugSykyatSze35b+u8sXAbVHrdXWhEBwBnkwrQNCbM8AosDWFGN/2TapDU9EGdeWvfAPNpu8mlVM45R9kYCqaKK22EXJt0uxtWx7r8UVl/Kxr9/OutrrXMIJ/SAV+Rs9VFFOusr3TapOqqcui1JUD7e2sxVNWmaqdXX8OZSveg+qzoKvTsGdZsOvflW60bPdd47ydxirOIqnaWVQ+LT2z7gPVbHXXpi7ooflDeepAI5v27bZLep/Ep2WTbhC2qNh6qbsEtUnT1s1tRiByW8RXXZpC48D2kmLpahWrun7tHsortqjHlKeoDGWLvqnbVN27Uiu9dGqKqCfnRq6iwuxZ3VX4HLTJodaj8KBri200dmqTQm7bkwnVloz0UBRu6qxqrBaIq3wjlcybFIbdlZQeHXfzv6OXvlNte1qZqK18B7Cn0k/i8D9u7/M4v0B25NbqqYSmmw+r723gV0id2pKnlyJ1k8pRdY/zeu72SaL9Q0s03NsOtus2ruJPlEK0UHMwVs2wb/Yxjs0cqXr6gK48Emz1D11d6up/0KVY075/jKTHf6r/ordmAZ3yeRM8XVzgujJvUft1+Umlh5qOntc2ULhqll+Lb5+LNC7wNIDDFWWsdoKra7/Kg/YZopuvdA8tm2zt8wgrSlKF+NxUUQSiK+pbpX0/p9UmhZrrGQ/YAYVNdJW3A7bzH5rqL8YSIIlGi6vo36qVMK1wBsq6/0+1ktelP42Vt73txkPV70VVnEeqS3nZQ+PXpbczPr95Or8tasFev1fAsPP9gNN+RV13qXj1Voz5/lZtUpt2r+cChdS9L/h+K8RhfGLS9GxBiiv96M34q4gOq8+qJyljEK0rCkqFjTQ23tK/M2pztXTzSYQ9dKtSxdtmnGu1+bBNLe+ybRtd/LOf+7nN1z/8kY/wF3/+5/2d7X300UcfffTRRx99/I1Av9sFHBoc5Nnnnutbuo8++uijjz766KOP/uD5neCjH/pQ39J99NFHH3300UcfffQHz+8Em4/j7qOPPvroo48++uijj/7g+QdDWW0SN+KLA+LSF5x5GjdQVuFCQaod4lo8zUq3EM7iCwi18wUdwxYCJ+hCETioW8EG1Zb/3KCpKNuireI+JRBoX4RSVhv7XUVbZxw0CvGURbGnE4qsbBZ/FKFvK0BN+WJEXRUwGefprBoVHU5sfVFBESp05CgCTdHzLHmFpx1CPK1UbIXACUa8HA7BoSgqOqfYCsO5pzITpQi1o1k6L7OunmGvhdJqahXppHaKxApBroisL96yonFWVUUiXganFEr5wqi8YvZwqE2ZbVXoENiqKE4r6gU0rfiCQbNVmOkUmNDT1DjlaaRqVghKVRU++KKT0EIgDmsUShyxLQhsDs6iKIlsDpSV1cPqdUFovQFcGGDD3kpSAUo0WfUuJLa+wqBeeqo9a6hosNhsiwgE1heN1QpvTxt4+qYs6BZuCVkIiLepKE/RpUWQIECMQZUZpsjwjJ9uM8wjC/XSoWyBdgVKHGFZUC9KktJRRltFUVvFHLJZ2GLEU8Y1y6qIynQLkWTTd7O4ov8rQJU+FlCQR96nI+uLaYzzNigCX7BYL6uCIO2PLavilMCBjksCJ7jQ++pIURA4R6l9W2IH2gilER/TFX2RqqiwXFUsaqSiqCv8db0PbecYD52nT5JSo52QJ1R+X9mpKtINbZe6ERqlbDL4KhGaSmhaTyNWL30ZS9At6q0qYK32BYld249a8X5oBZ0b4ooiTwwUzuAMm3ZWWjapsJ34mBBAGXBOeZrLqrg5cJWtAigjKCKIjfeJSIBC+yLHyFPbGedjylhfoOwC54t/qrgr0b7YSwlDyqGrfNSl9AvE/89DhVQFamVFeSlV4WKzqHyKreKxPFJVrvG0YgpPedktEm2UPt6NCE4pIusNZiMf7xZFaRRl4At+xXjqNqe9D4OnUTPifbSoCs8C5e9ZGu9zia18GUUZsOmHCl98Jfiiq9wZlHgKL0+/5fsN0y24rYo/IwtN3GYhY9kTW4WpilLF02p2q+2Mls2iy9IonPbFtaXx8dKlCC1CT9VWhFVxU0VFWFR6UE6wgSIUr89G6XOKAhyeCtWILyCLrJcvjzy1omgYLKFWKN+vCbgqr1rl6cxsVcflnPLlyJHDBA6nvE7KHvpV47wSDb4IsJt7bZXnu/FngCzRXs6qUNlqxUDh+ySreuQTaJSOqKId7dp0sBAahacj9dSePv8HVgiswzjn+yIRlMur73JqZc7urMBUVG02VsRFipLC51JnPT2ZLdBFRq1ICW2OciWhzWgUHZTNicqCwPpj64UFEbKaRolD26IqOPOBGVU5JBTQZY6yRZVJfNmkdr6PUZQIQ0jV91CWBNYfp8sc7QRtC4KyIHBeH/WKgk1RjU2qflCqQkLYymO28tXEivd5/GtVKrSrKPkEtPVjo26JXremMbK+A1OV3yvZKrYTVREcaJ9wiwBURbkauS3uc6riVGMzFL7PlcB3fI0i87oRB9jqr0DblKDSmSVBEPLEYEONmBCkJHA+3ynnUNbTKiprCZwlEB9beeLHSWE17imDKpcoT11ZN9b3T8rHZLd4+l1FVfdOMDc3x5dWxggzGG85js+VDKwoXh+Ief9Vy95FIWtFcKjNUpqQK8Un32jTSB0vPBLz+kjAeTHcSTQ/cj3HasXhBWF62dG4HTC5bhlfh2/Mhpy4XZC82WC5bnhpMuL6Xsd7rjgWBgzDHUd0aJ3nhxLWCfjEmx2uj4f8X7MxH7gj/K1XW3RizR89GfLBNy3HbuccullydTLk2wciTtwqKbVi175Vps5G7FotuTsSMLtQMNpy7F52PHMk4YPXMuolfOlojacfvcFX3DjGKFZqhrVY8dTVjO/N1iiN4vBN4dGbOeMtiwVmVh03RxTzgeHCQMjHLqU8ejXn6C1LMxeujwUMza7znlOKPWuWW0MBk2uOdG/B6ZURPnopZamh0VnABy9nTF0wHLxTcGUs5Gw9Yvh6TKHh6YspYQnfOpzQHG+xslHjP+6J2BvnzNuIXWtwZSzEGd/HFGnI5LplI1EcvuaYbAknB2qoiRzbCVlMYKYl7D92h92vRbw2HfHeWzmNUvHQOcf4muXyVMiBuyUfPdXh4KU7XD04zPjcNQ6cfYXRuavodoeQV5m8vkggF1GsYzmK5svAPANrxwjL28zPPsmdXUNM3ngZWMPXbV9F8TUcR+lMf5TBlmFpwLB/ocABpycjfuJUm91LJZ3Y/zBJI834huWlfTGfeDVnpO14/bjioZcDfu2RGoNaaFj445mYf/qNRaKsxZV9TZ4+3WFow6LjOq3hOsNzf0h99SSaJYQGmg1KDlDTkzz62h2G5l6hvngHlWVM3bzK8bN3mZmP+MOfGqS+6gchKMWlyYg9yyUauDES8vCtgoENePJqxp2BkEv7obmu2LdkyWLNWqT48nTC3KThqdcdnSziwFKJEnh5X8z0siVJNRsHcx4763lQv3G0xokbOVPrlk6k+MpszDenIuabBikUH7qRM/rDt4jeGODq8ZKPvOL48T89yWAr5Dc+OcHfei2laGii/RsEd2Nfo+1gct0hWmFEsR5p0kBzeL7g3z4R87FTgik0zRVDsxBquePsVMhjN3Meul5QiuFGu/n/s/dmP5It953fJyLOlntV1l69d3Xf7rs0eRdRpEgOhxJnqCH1Io/kN/nNmDEg/QO2JYwNYYABDM+LX2xgAMPAAIYhjUTaxmgkURRFiTt1xXt5l769V3d115ZZuWeeLSL8EJlZWdXd99IezYMv8ws0uirzZJxYfvGLOJH1+/zYbOf8r7/k0/YDXn+cEvuw1jNYAUEG5a5kv6T4yr2YNLQs5JrlluXhdXj9juV/34r4zfdj4qLjk2/ta54sKD7xKOODswGlxJJJgdSC1/Y1L+6mhLmlcjvgUjNnqa/ZeTHnj/JFVgLNl/8uJvMlja2MO6MiRWV4oH3OxYZKbPneC4p+q8hoM6EhIpprmk/ey3hY99k5p9lbhe+uhHxxbZ/cSuJGkcqdiP2y4g9fUNyXPl96mHBUVmy0Ne2KonM5wWsHvLvqc1CQHGQBWy3DaCPjc+UjwoclCpnlxd2MYmYp5LDQh//tYsSVtiELBd9b93noSVYM/MK9jOuPDI2q4nFFUvBz+trnJ+fh199O2Fv0uPrIMXlt7HE/8llODBcPDDtrkhd2c/ZqHuf2YLvmEW+NKO97vFMJGAWSUdkSZoI3X4HHC4IwFlw80iwODf/m0xVeOsz4w08GHEU+Fri83OHdQZU7K7AYCy4earJAcqvo0S1BIRecaTvudrOs8LXlp5c1t+Mq1VjyhdsJSwOHtIpSQWgsvVDy7hXLnu/xygPL1pkuzW7EqGgZKYUQgu2yRHiOdnO/7NPyFJtnOgySgNVal7ARcrad8fWLIY2S25wupJa/uhLxhQ8SDmqC/+laAREpvnNBsjByDyrbSx7vL/o8WoF/eDPl5mZANTUUE8vWY83iyHB73aOYC3arHpsdTbsoWRjAnUWfP7no8dN1jxdahtd2Ura2LetdTa8g+WEtoh3B6sjyypOUN68p1lpwFEcMlODsVpNyLebOqMSNA027IBkFktW+plH0ONPVVBLDTy4r1juw1nYPH/eXPTwLP1kJeKWR8/VrBXQg+OA8XN+19CPJV94acO1RxttXQt5bCzgsS1aa8MtvD9lo5XzjlRJ/eD7gVx8kfOV7A67tpBhPsNHO2a96KAOvPEq58dYTFo+GxNUS17dTNj/4c7prl9j84C/YunOfX/vGkFpWYqELj65GvP5X/wY/f4ygibIeBGdZu/PXRINv8cKtm6w+uYc3OGR5/0ds3fs/8fMGS4+HLHQMpl7nF/7iPulCkT/+XJVP/+1PKLUO2C8vstDu8ub1Rb784xH7Sx5n9mI27v4RXv4IQw0jy0ibIrMCMu6j+BHt8/8lhc7/jKCHyhQFe4EgaVI5vIn1L1Db/juqh7sU5Co/vF7g82+NWO4afvhKyKv3Ux5VfPZLghcbmqOyophbLhym1AaWv3zZ55O7ms/eHnFY89hs5vgIeoOQT99JeOd8QCGzNKKAJPFZGFl264IXmg4FW0kMWkpaWwlR2+dcS/PvX4qIcndwsNHVbGy16CQB314MeOOWoBZrrBQ0z2o+9abl3G6XJJKs3P+3SPstBC06Z/8pQa/Bi+//kGh4C6uLKP4cUAi2KbdvUWpKPNvk4PLnKLfe4U//8a/g6yKhX6Pc+ibF9BxSlSg23sNLGjQubLF4/yeUhx7CL3PnTMDXfiniV95KWGtrRqHkoOoxCgWNmuLHayGfuP6Ex70qmRA0S4rFoUZL8fN58mwRCCHGaPjjJyg55jmdeB46kcxGjLEvghoTRJadsrLsmJslsfiT84Yx10Qh8JjJIjZBZwGzYK/KBB81wS3N3F/I4yd0hDiZBWl8jDahPAnBGOgiTmQ/UqeeGpniqsQUWTfFU42TV4gxZmpSsFDHn7Ucn6SL2dLEpLcm6C/Gn5u5j7DIGWTbNJOiEFPklxi/PotDOzEeYwTP5N7i1DifPBE+1fbJB/yZ/tPjryUmA69dCgExvVgixmkFJgP1zHoRHf8sj+81fWofnz5O2m0nhihO1Q9IhUCMWU8p9mSqSXX8GWfTCmnVtMbTZkoQoTvKE0ZMjqtcucFsGcxwyY4LsONWifE1Yqaqk3FaZjyek3IsJ64/NQmnvWidFeEJQVGAh5hiuMSUXDbpK+HwT0zaO7luFv03azv2xNdsQshpm2ZtZoownBmngnCmwanrJlbg2imZuFE7xoABZBwTlk7YpT0xtGP/MzN5Jmiv8UlnhamZTftjastiti+Z6ZOTAyeFRQpLOHNXOZ69AiggCCbtmfE9k9997LFdTNtzjJSzM/NACMjEZDzGGK0J/kqIk9nbxnZfmNRYiOl9rHDfJokTluzKl2MfO5nLzjeIE5kZ1XiwJvebUKeC8Tddk/ZJ7MwUOr6TFKc9ijMOM+MuxISn6ZzliX6R4thq7LR9Jz2uFWI67ye2YjkuJxx/dyTGdfFmDLE2bkcwO85jyJc8ZXTTMR1jJsVs8kFxvN6UhKB4Yk5N3OD4tPHU3J92ixXTNklx7MPlDHJRzKBMxdR27VMZAaPxWKopBs3OEMgm9iFm5qoYr5vjengglHjK54jxwmnHiX4QYMe8S2EE5GNfKI49uJerqf1NILBWOx/r5R5SuwkrjcTLA3eldkeUQgBF91k56Sgzs19AjDFqdozakzy1ksgZexGSiccUTHB543uNOXDCzNiaOt57CHFiqGf82fFgTn3tTOpBMR5AOTNmE8SqObXaTemSWKy1U98yu7ZbBMHUB4z7e2Y+uNN2b2zpxzalsknbZ6+WSCvd2I0n3HR9Ou40hJzcbSZTsnV9JsSx35xg+qYzdoI/ncnUaU/58p/LzfNcc80111xzzTXXXHP9f9HP3ebZWjsf9bnmmmuuueaaa6655pvnj9JgMOC//m9/dz7qc80111xzzTXXXHPNN88fpX/y1a/yX/zWb81Hfa655pprrrnmmmuu+eb5w/Qnf/qnCCF4+aUXkeNAj9k/4FAWx+PhOFgFjv/43I4DHCQuaGs2ZktMyhLP6NhJPMkYU2XHCJ5JoISydhqbJix4M+GK4xixadmT18bRDuOAE3E6vsxdMw4imsS+TYIWZnPSi5ngJWGP2+Ty27s888qOA0XGH5gEE7ogENeQCR5OjpFewlqEmdxjJnBo3DJpp2SmaeDB5P7TTrPHwYKTIuzs+4yDqibBimOUmDR2WtZs+2Zz0jtE2MnILXHc6xgxO7qTO55+bYIeM9MxseMQzVkAmplE0EzaJwTKTjBcx0F5EzzVBCc1DdoECsaiJuEOMwGt08AnO73FxJrHLztMAbP/CQAAIABJREFUlpFijPmZaaU/G/owwRPaaV/O2pKYiUydvD+9pzgO5rEzYzmJDHIBTy5QCeHGwEgX8PJUL1s7tYfJ69Ycz7FjA52G7xyXYe0J+zgR4Dd+w7P2OMAVO22HOD3SdoLqm1TLheGYSbDSrM+w9oTtTsJSamM7VMaVI2b6ixOBOcf9acXJgGLGNivtjD+wLiDMsyDM8VjJU/PD4QPHNmJPdPE4WGc8QOO+UBxj1KZzxh77CmlceyeBP8KMo/3EzDwdtz8cByhPPuuN7WrWR1pcEPUkys8CwtjpfJenxmX2ZyNPegJpj8fTipl2iuP+lrM2NQ58sjP+wM4EXyrssX/A4bcmA1I0k/GyUz9pZ/rfjn2e5ThoVvKMPxUUxwFIk3HFOjrBxC9K6zBmk6BXO47mniAO5TSw1p7ALkprT4ydkafWuckaN9MuaSf9fzx3jDjpA+SpoL9pMPds+4zDuiJcv9mxrzfqeBzNZF2SM6HY40A+NfZ/chJcN+OzTvgfebIudrwenHKE07XvWfNtXIux9VmENQhrZpBo9mTYm7XjwD2mn7NqtufH5TApw12j7HFgqfONduwT3DoppvWQMw7eTvcjAMLo6TWT+StO+H9xYp8w3YeY473A1BUIh/3j9PpxylYmAX0TZKSd2ICw+MzuR1zfSHMcmDq79xDjsmaDt62cbmOeWlU5MU+PV3ArTq29Mx5XWGdVcjx+YrJwGIuVzsMJe2wxduLPZ/tMnPQrRgjU8bbQrduC6Zr6c4GqGwyHfPnLv8o3vvHn9Ho9ftiqoP0cW9Sc37VknsQT8OpOyoMVj+01i98JKI7gTFez0tM8WvFoFj0GnqIVCb78KCUvO/xQKbbsVBXfXvPZ6hmMFFRjy7mWdixhA52SojwUvL3k8/puSjW27J2zBMrytg35yr2EOJQkBUk9tqz1NU8WfH645rM2hMSXXDzMubnp84P1gPXY8J0Nn0LH59qOJsrBKIGXww8vhLy4l1HSloOyj6hqGiVFclRgYWRpX8hIYp/XdzOWBhojBeVYY4WgG0riuuXQ87h6mLE+NAwiBaGhPhBUckuUGG4t+1Qzy/1yyOffTvnBlYjPfxCz0dbkWrLeNWgFtZHmwZblhYeGJ0seW3sZQgmqiSX2Bd9e8vin78ecO8gZliQfrAQUsTwUHueihNKhTzGFYuYYi5WR4aebAeePNH/9oo/yLF94J+G1xymX9yzV1HFzNzuasOERDQVaClolj1cfJmy2cs4f5iAFO3WPT2yn1A9b5MUy1cMHhMkOuxdfY6H9XSSHpIXP4OX3SIuv4mWHCP4a6EP+Esr8iGjY4tGLa5y5vc/ji1+lNDBI833A0qv/I/7oSy/yibsZ3YpEWcsokrzyKGWvrtBKMChKBqHg+k5KKbEUc8tGWxOHgnoHVtuasid543GGMoLuWsZv/Chj6fFDlveHNDYWWG4OKbSaSFkk6v2UtPQ6MnuM5Ba5/xKHW59i8ckOg+V1onaX3voG7752kfXdDjKPESrgyv2AazuadlWy2DfcXQtoFyXJVsb6Q8e+9LXlfEMjhOVeLWB5YOguGe4WfFQtY9v6LFrDZ+8mtCuKR3WPJ3VJ+VKb3bREXIZP3Mr5DxcjhkVFJbEsDs2Y/ym4teEThJqBllzKci41NI/yEnIoGfmSX7irOXP/O1gV8a1fOsuvvjWilBm+sVrkjUc5t1YDMl9wUPFY7+Ysjgw/OOvzwpFmvat5qWHIheDuks/ZtmbrIEMrQSWxnGlphBA0KwojBe+t+VRHcPVQc3U3I8wt37wQohcNsfXY7Oa8/jAh1JaVrma9qekUFTcvKT7zTs6vvzPi0mFGbWi5+kSz1tY8WPdZ6hveXw/49O2YB8s+g0hytpUjrOWvX1NU+oqLhxnKwrvLBSoxvPowQwK7NY/H0mdhIebV23CjmXP+KGdhoGlWfB7Woaw0eezxS7cz3l0NeekgY3vRZ4BivSV4UxTYS4r84sOccmI5uhrzxfoByV6F23XF3SWP9b6hG0n+rhQy8BS/uJNyuZNzvmtY7Wu+tRJybVsiU8XfnFG8fKBZ7Wm2l3wa53NeeWw529U8PA9DT3KxbzkowloPwsxyVJHsV+H6Y0G7AithwtldwWv3UzZbOd+6EnG5pWmUJRs9jWcsI19y4UjTDyXf2ggoSMt6dcAdW+Rcz7LR01zZzxkGkptrPgOjqCSCrieox5ZBJGkVJN1lQ9eT9I0ijX2iWFJJBfFaxov3Db2SIvIMV/cNO1UFSvDNCwGpLznXzrnxyFAYSRaH8LguubnhcbFhCHPDUdlDAD+qBRhp+eSuJot9Kn1orhhMLilpy4Kfc78qubUgqY0kB5Gg7GkYebx8B4aeoh0pfvl+zObAsN7ThNryrXMhr+6nRBlEUvD5OzGXW4bh+YzL23B7yWO/Ivjsdkag4Q9eirhxmPPtCyGffJJyUFdkSiByOLyccbcUsHShA42IxaHmelPzym7O5f2cjaOc71wtUIs1jQ1oS5/QWlb6hqXYchR5XD7MGVYtlw81zTXDopdx5n2Htbz/kuFhEPHaw4RKavmTl31W+oKDksd+QXKxo9mvKFb7mtrA0H0pJReKf/hOyuJAo5CcPdJs1z02OzmX9nJubymiAfjW8v4lyWffTWlXFd+6UqBk3Mb7te2EhYHh8bJPIbF8ayvi194a8uLDlEqrg7CavY0Ftu48pjR4iyAe4eV3kOyg9AO85O8Iht+ncvQ9/Pg+aelzeFkKxkdkdToXK5Sb38SwgiDh9uufZ2X3b4AG8ATJNl72kFLnPkbXsEqydfcO1e73kNyjkDVRaZGKXObcnR+z3NihfvhD/LRBa/PX8HKFlx0i2UZlb2Opo7iPF38XqffpL30Voev4wx5+/ASB4GjzCoVhig1LvHljnSd1n+uPUrQnGRQkZ5o5Z7qavUWfUu7W0R+cD/nMnZh7Z32+vxJytaVJfcHOoseFo5xOSVFIDRcOcxo1xaMln25BcbadEW+mRB2PJzVJJYVqYignlhE+yXpGpalolhTdqmGI4rMPEqKmQiYet6oeLx1p/uRySFVb3iwX+NR2zgsPu6g0xR82EewAku0rXyEPSghVIBr9kKPVVykNvovgFtABMhAVess3qDT/b1T+iPrRCguHPaLuI7z8Lhln+MvfWGHjSY2w2yAaNBHeEsPlNf7d56qcb+R8+5WAas9tlLsFSeoJukXF7RWfMz3DX1QqkCle3sv4/jmPjZ7lbzf8n4+T5//sN3+T3/2932VtdRUAXxpsYDDB8QlzMbUUU4OWkASQpw6qX8ocJHwQSaSBIAclBJXUEo+B8tJC7Em6kSQfQ8LLCdNTp0LqYO5BJugUJIXUsUwNgkBauuPTSaWhnLsEBVoKEk+QI0g9t3kAyKRg4AkyJWiHkjT2picCYeaeznoFQZBbKokl8QRZCEpYBqOAyFrSoiGXgtrIQdijzFLI3JNwqiAJLakSRLllYWgIc4eDSnwHo5fGbXw9bUlzRSk2jDxJbWiIMkM0EtRi45KTZBbtu6fS2JcoA8XEUI1dpftj7lMhM1QSQ5orhDdOYAOEmYO6K+PqhHWgdYBuSZAEgtrIsNHWLLUt1dhQSxz8nZ6HEa4dsQ/VkSHKLFFqqY40uRojtKxBaZA6Q5CQlkpIBu47Bq/oTp/8CoIcl3xkhDQjBD2iQQPt5YBHXFnHyoKb1Fh0EHFY9fC1SxSgrAPWLw4Mse8Siugxo6ocW8LMOv63dk/HlaED/dcTy+JAE+WWKobck6g0pdYbkXsCpQ0qzfDGxwZGVcYP1F2sDEmLFfz+kNzzsFKhCxG9SgkrFcIalM5Z7+UsDQzKQpA7OxoGgqxkKWSGTLnEIlFqKWQWmbvTxySExBNY3xKPn8qjzJ029gNBN5KIYkYcuCQO5Z5gWFR0C46bq9XxuaLCJa7IBETjE6bRMHSnFLnAKhAMESYlMlAeGdeXuYcEBqEgGyde8DUUMkvsOYi/tLDUd0cjiSfwjaWUOBssJwZpx4krcMkDepEkSgWFzI1HlBlGoWRYcHMvytxGItBQTCxRcpxIo5hazh5pqkNLbWCo9zWFzJ1hpZ4gUYIotTNJBtxc6ZchVUwTphgtKWRQGkEu3dzTmaKkDJUhLA6s62sDUQq5N06sJKE2sPRCQSmxCO2+oYo09LKAfu5RTixKW0RoWPYSImNpFSXdwjhJhYWRUa6M1FAfWaojx+ceWIEYOD7vQeSSCYS5QQtII8e1DrUhDyxSuCRGOS4Ri5HHR2NRAlJafOm+7akPNJWRYehLgsz5wcnpZpC7b+mkhV7o7ukJy9BziVWi3FJILFoKjBHkVqKFYOQJ0vE1iedQVtqzxFIwTD18YwkzGBUtnnYnvwVjKScQB4JRIOgVBCNfIizUO5bKwFLKLP1Q0Cq5NoXjZDlWQGpd/XxjsbFyySqES1KBFEQYhoGgFzl/mAnIkAgDhZ5E4vzr8sBQH1pKqftsrNzGJ8wsC7GlPjQs9Qy+5/yuFoJMQWnkkmL1iy7BT3t8n9x39TRAUrB0ixJb1ggLxcyyOLAsDQxRaiimlkEoyKXABJZIO7s04+nq52698TBUE8itq/dCbCkmhrQAg8DVtRIb2mXpErNoiH2BMoLBOHGNMhB5mjyAel8TZZbyyM3bbLzWSdyhb5i7pDSDgjtVzT1JogSFHPqh83/+2FaUgaHvDlyKsZ1+S6Q0eLFLxqFGXQQxggGSFirbx0v3KLZ3gSHGKwCB85NpTlopAQmTZFjDchmIxz5/iOAITzcpdPbBZsgsY3Wni6CPoEuYtpA6pd43eMM+5WaLYv8QQUZWrGO9AJeIK0HSGK85Bi/dBzQ6rGGkjxoNwaZjfy/c55RPv6DQk+Q+xlIeucRF9aHzY6k/PkwqKzzjEvcY6/CFo0AyDI73MBOfBW4tQDjfZn1LlDj7T6fJ0yAYCfKi8+lB7hJupUoQpoaoKykkxyfKrbIkl4KhUWTK7T2kNtgpCBJSX5JGRXRUAmLycAL77QEjIEaIIVlUIogbQMzCYZdoNMSL+whyRJbRXhDoQhkQhL0e1gvRXkCzolDa9UE8XouFdb52GEh64+Qp+2mAloIoNSSB6+9e+HPAef6b736PvcdP+K/+2T+b/5HOXHPNNddcc80111z/UfI+7g38yj/5VXrd7nyk55prrrnmmmuuueb6j9bH+uT5v/v93+e3f+d35qM811xzzTXXXHPNNdffiz62J8+9Xo//4V/9KwaDwXyU55prrrnmmmuuueb6e9HH9uT5V7/yFf7Dn/7pz3bxLKro9AuWCdHG/SqgD0/BhyyQiqeLniLbZl87/cMsDUeAfW4lmUFzHX/2BDqMk3nnJ6/p2XLFyffsKWaYfeqHZ9ZmiiJjXP5xBewz2zkFD41xT9aKE3WVUxSMZYTD45gxgmf2Oj3ONT9B4cAMKmuM2zqNxZr9514TaOHwdqdGbHyNPvGaMM9o/zHk6Bm9Y44RP1OY1LPtxs7YyfFFT/HnXIiKcIEgmNl2ixPXcQJcdTwwUyQanMT+GMuHDrk93eoxbkqI8Rgd97sRJ/vSYYvsU2VPkXhmBsU17nVz2kzt05XLZ8Y2Fs+3Uy1m7Pt5nT+22QlSboI0miC1JhVKPsSFiP8XvmmKEJv54CxGcYq+s083356c+sf/P2PgzBijZmfoV7mAdMb5zyLApvCrMT/KPNVIcaIes+0RJ43quA7j/7PZyfjUcJ5CRE3c3SxKk9mxnwEDilO3tfbUPD+JKZvUSXMaXff0GM1OQ8PTfmVSL2lPeoAMSE59fjJ3zXRsnr6neY7vntxXC/HMaWlPtX3yYm6P/aSwz54CT091cWI9Yba+M0uhPuVrHbLSnvCi9jnrXj4ub3YMeMp3nPQVJ+3mGGk5HR9xsu8m907FDPbT8pQfP76lfo7jm3Ee5tlzbBI8yFOgzdOewfL83YCdmWPiOb78dNnPfv1Ee619pq3ksz7yo13+M+eu/YiWmlN++qkeEG6uPL36TtB94jk1szOzeLaPTvqpiX8wz2lAjnjuvDtxqTieQ5N5ZviYour++Otf53s/+AH//b/4F0+9t7e3xzcOFjnSiq722DowPNhQbLYNq23Nj86F7BQVL/lD9NDjcVGSbeQ0fJ9RIKnFhle6mpW+wdcWYVz0ajG3vNjRFLRlZ1nRjRQLQ832OYm2iifrcCQ9wkpGOYbL+xkHFY+9kuTigWR9ZIhyy1FJ4QFHvqRTFuxXJRfajoIQGsujuode04SBZliAs02LLkCz4LHS1xQTy0HVIw0FhdSyv6i40My50DScaWsqCQw8Ra0Lldiy1DeEqeUH130SqSimlqWhYXvJZ0HmnNk33F7zSXzB1X3NWjenkLjo60IOSSD5zM2YgoW1dk6/Iri/6ZFISauoWBwa7tV9tvY0iwNDdaj5u08q2tJnsKLxA8uX302ptzV//EaJemIJUsHLR4ZzHc3akeOpFnNHRVgYGtb7hvOHOUoK1luGKzs5h4uKc4c5i0NDLTFkUhLkjvKhDCz3DS89SjnTyKgMDcUUqonl5tmAlx8kZAWfQucJfrYNLCPFAio7Ipfn8fMf8/j8GywcfRO4O15i1seM0tt4IqbUqNBb2aJ6eIAyu1jOYcQ1ilmZ9f0WXp5h/JBR4LaSb14I2ehoyomhOrT8+xsRl5uaw6rHzpJH6gnON3K+ey4gQowjpjWrQ0OQS87v9QmGI37wi2e5duuIxkaFQBeIum9hbRFrqkiOwEYIW0KIKvsbNX7yqRIXHgmMF1Dfb9LaWKO7skixnzAqheSew7UtjhwR5lHNxx9IoszyYMmnmlrKicEqwdVGhq8hk5JzHcMvPta80MgZRI4OY6QjobxXClAjnxf3M9ZbmmZV8l5VISua98s+vXWo9nH2EsPa0FEnolzQKEhMYDnbMlzsGc5/8CO0V+fw3Fku7OcoCzUBQQaNisdhSVLILJeaGdJCKARbBzkPzijqfUeEaJUUm52c/brHMBDsFyUfrHr0SorHlzV3Fjy09TjT19zYSXl4WbHUtFSNpVVUYARaCbb2Mx4tedRGbp5pIbh/yfLqe4bzjYy9ZY92SVFILb623NwMWOlrlgeGC4cZ2hMcld2c+8lmgIfgceRxqa25txpwb8ljcWR4cTdDAbEPtcwSe5LiwEWCR5ljNH/nis9yOcECZSzXtjW11M35g6qikML7VcUXn6S82sipDwzfP+9zVFCMUOyOiiwlllpq6EaCwtqIJBSU+5KX9zPMmCoU5eB7giuNnE5BYZXg0w9TmhVFfn1AcxRR7buN+VFBsda1aCu5utbmno0oFHMyregV4eqB4aCo6IWwNBCstjV+bpGeoFFR6PWUalPQKSp2yoprDbfcpaFkKTZoK4gzn0/s5ryz4nGxnfPDSwHRUkKSK0oxbAwM79c9uktQih1RZb0LLzVylhKDQHC2k6MMLPQES32NBBb7htWB4eam4nHo8cZBxsVmTpC7xfnhksd2VbJaHpENA5SFVlGSeRLpwz94nLHSs7x3SbDSsUgEL+9qlgaaxjJc3rW0QkWOICrklAaSi4eW5Z7lsKJYGBmWBobFoeHyQU63ILm/7PPG45RSbLi/4nPjUYIe0yiCoeDHZwKi3HKlpXlS8zhzpkf1sUc1tyAEC0PDwshyVFQcLhvM0KcvJJcfQpjDmVbOUk9z/6Ji5chyd9WnXZT4GipD6EWSf3A/YaVvqCaW9bYm0oK1Vs7jms9tFfLCA6iNDDdXAxZ6UI0NqSd496zHagc+qCteO8i5epBxrqNZ6bk+HQSS1UOo9B254f0Nn9WeYXtFoQO4vpOR+QKVw8LQUM7gyk5Opyz5YCPg5f2MFw4zyolhpaUZFhWPlhUXu4bL+24uZgVFWoqIo5D6YZfCaBfNCoo+aeE1VN7BfRnfAXygCHYNQ51h/RxpuUqh/5CwfxPLhTHZv0OpHZMVfhGVt4AlHAmigKWOp4d42V0kDaACLKBFjUdbF1nbuYXiLoKEXF1Hmoyw9x7S7iHoAiEQYCkjOAAUSelLyMRH5T0EOXmwweHGGbQvESqiUY9YGhiEgPcvhOwsemy2clY6mvfPhdQHmlEg+ORuxpUnGT+9FDCKJK8/yrAInlQVW4c5YwgUe4selaGhXVJsrwtKKcRKojJFPTaUU0f7aRQ9Htcla12LiiVKwLmO5vqBwywuDAydoqJoLRs9w81Vj5cPci71NJcaOS886DGsFWmsVKge9RAojLpKYEJKrXsE6dsIagTJIZBhuQRsIfCwYgU/aSHYwcprCB0jGWEpABGmvEGxLyg1myjbwHjLPLy4xErf0CtKCkZQ7xqUgU5BUsos984LZCY539IsGKglBiEETxYUWMHGQH/8Tp4HwyG/89u/zf/1ta8995rMCvrW49D4xJ7gcV1QGxrHY/YlPauoVhKMgEdVj3cvwU7NY+RL1vqGG08yCpmhMBQEqUMg1UeGl3bdc9RBTdIoS/qhZHtdMIgUWcnSjCTlMOWwqqj1LcUDRd8ornRzmiW3QSmlDqy+s6BoFQQ+bgNoJDxeUIxCwXKQMqxrKl5OIbM8WpXcWfIpJsZtxmLDgyWHorEe1FqWFx9mbB1kLLc1xaZHdWTIlMPNLfYN99YkRwWJZ6DagX4BOnWodw2+BpULCpnFy91mdKOtKSaGlY5moW/45L2EYmKIC47d2o9c+z0NDBVawJmjjFwIHp2BViTJq5pVlU1xVB8s+WwcGoo9wfXDlDMP4XwjH6O7DMXU4BnLK9spax3NtZ2c9YYhyCy9gmSjmXNtJ+Xq44xiahxyTUIpNZxv5qy1NeuNlPJQc2E348aDhJ+cDci9ogPko4ERlcN90tJVoIxIY6DDUS0C7o2fdjWCh8658i6bN7+PQSER2NwHVrGcxxvAjfttVNxmpdGlEBvKsWEUCO5WHU+4EFtW2pqvXw6xCHIluL/q0Y8kCz3Du8s+nnE4uLWu5qW7hjtrAXGxiMwTniz6eElMs76I8QNAodIWmrNABWWOKO89QpcW6BYkP3hhE197LLVzQNJaXueoXoU0JglhoWfoFiSX91I22pru0C2g0loeVxXtkqQyMlxsZpxv5Kw1LSsDw8au5RM7CS/tZDypKaSBcmxZ7hpuj8pkCs40HLJteWDZLkl2lwV/ezbgvQtuA7A4sKy0Lee7mswIGkWHTuqVYPXIEBcUkCKs5spORpBblnqG1+6nLiGDsBwUFYGGUuLQf1f3MtbbOQ9XBVo5ZFkxs/gG7q94HJUVByXFd84F/HQz4P5Zw90Vx/ysxpqNo5x7Zz38HK4c5JApjIRmURKm1uG2jGV3wSOXYJUhyKEYOxRYHIrpybKwDjV2+cA9dGy2coLcIRjfX/MptyX3FxWdouJh3WPoCYqZoTY0hJnDh212coK2R+K7b06MgETBYc2y4qUILMUox9OWK3sZtYFhuWdY7sG7ZcWL+xmffJRSSg23lxTNPOT9UZV2IFiIDSsDy8O6h7ccs+KlVDJLkLtvLQJtXbnNnMW+RgBrfUtlZGiWPCorAzfXx6gnNZJstgzSwJVClw8WJEcr47JyQSkxBJmga33aVYcDRcDV/ZRmSVIqpxghaBUkHV/ia0ttqNkcaBZGllGnQCETrPY079YUsS+5s6JYCBPC8alTObW8ueazv+hQa1FfstmyvHCYstnRVFLLRkdTazre+pmjnNWWoRIbruxn7CwKhkKw0nMHE8K6zWGnKGlHgmoh4UHdoxsprBTESrARa158nFNMDAd1h1RbOhK88CRj40iTKdja1xQTQc8TlKWGocfGkTuQ0RZqsePkr3RzLu86eynk7ugrSiy5hMrIUkwsK0/kmNHvNjcAjxY8rhb75FJwuZnRqCgW+obNI7cxGlmByiQPhmUqsaGQGVY6OStdzaN1hyss5JYHiz4yVmx2cwIDF5s5C33N5f2M5Z7m7L5mraMRrYAPRmWicR3TocfC0NAuSrQU+AaSwCG+LjU1q92cK7sZC32DlxvqO4qNPU3mgZ87zKK0Dnf2YEUxjCTLTUN1ZFlra648zMeJXAQFbdnaz7hwmNEqOd9T72laJcWN7YQwt9xf99lZq9GoVwkzS6YUUMSKMlAiLbwMlMcb3AwHzqzhpW20WGZYOsOo4FPc3cOlMguAkLUHb2GpkRZfA5aB1fEakeNnPcLhPoqd8VlvBYgQZPRKCoEZryMJWp2j0NrGM7eQ7I+/5yoi6WBZH9enCLqKVRFCZFgUJqqT+tBeqJAUywSZ5cxhTrssub3m0x1jClc6ufOJuSX2JZ94kBBmlmEgWBtZ1jo5UW4YeA7RGKUGP7N8sB5QTNy875RhFEnswMM3sNkxSOv2Pwdlj1YJqvuSxBcsjvn2Lz9OGQYOFxhoy1bLEGq3LpcTw43HCZ5xCWpG5TJ7F7eAC8ACiwdHRBrCXgNIKHd3gBpQBdaxnEdYTdDrY1nCoesSlO0iibEsE+R9Lt0xpB5YP0LSR+icZkVx435CtyB58X7GwlBPD7GLqeVgySWwCbTl6kHGat/QCyWhgYOy4lzrY7h5/vJXv8p/83u/R31xcf5HOXPNNddcc80111xz/b3qY7V5/stvf5tRr89v//N/Ph/Zueaaa6655pprrrn+3vWxom3857/xG9y/d28+qnPNNddcc80111xz/SfRx+bk+ff/5b/kt37rt6hUKvNRnWuuueaaa6655prrP4k+FifP/X6f//Ff/2uaBwc/0/Wn6HPHSKHncXxOoZCeWeDkRzGDPrEzsJ5nUFfs0/SmqQwgxfOrMfuL+ZB2nsBdzfx8CpDzTFzN83QS/SROvD6LxrIfVq9nvG/Es9+YwOPEqcZanoMIszPvjTll9hkVmMUknYTr2ZOPl2YWRzcL6REfYWWnmIDPwNVN+vJ5uLWMpxFMT3eRGON5noaY2VnQl3gaQeSqJZ5qij0BDTppr89rpvjMazv3AAAgAElEQVQQWz7RddgprupZxU3xXOJ5xj97sQsMex6uzZ4q4FmgpwlG7HltPI0CE8/ri+fMEfEc3NPTiMxjTNuHTpxTPwv7fB8Az/cxs6+bGRO1zxpz+wzAlv1wn/MsL2GfQXU0pz/yHFTa7OvCPqNd9iMaLZ5Rn2eO88nxMDP9/Cz8F/bY9/GM8RT2xJR8ur3PM3N7sp32I8Yz5xT+7aPAdOJDyrMfsabY5/hSe7L40/VPT4+DfbrNszZhxxTOKa7uGeuv/RD7Eqc4j+L0emlPz4LT/2ZRnmKK3Xy+wZ3uHMNJvNqs7XwYP9OcKk8zQX9OUGziGeuM+Fkm/alrn+WHnwfM+zD/ZO1HI+4+qlr2Z7rCPN9RPGtlNmCFeMo4T9I1xYm1yp6y32eV/7HYPH/xS1/i3/3BH6CU+pmur/cFiz3NdijIPcFGx9JeFKx2oaQtlweGPA1clHRqELFHlFtGBdhfdLi6w5JiITb0qqCLBtGHZllSHhkuNzVd32HnwhEUUoMaCawy5EYy8kFZy0rfYDOfQBsXPS8FsScopYbQhyiGQgqRNhgETyoeYQ4JhtHIJ1eSSmrx+paWhMwTGCFQ1uJpQSm1mIKmFFuymkEOFctdzUpfc1BRnDlKKCYuWvbykUGksDh0OC8DBAOBwLLWy+mHHply3Nxbmz7LPUOYO3qEl2sMHiNf4Gew0raQakqp4bAiKWjwx2xnJNSPBFl1xHCgWOwarIVuSXKjkVFILYdnc2TTRVLnnuWoKIk9wbAIeUWz1rakPkgN/ZojkUwcp59ZmgsOkbe36BGVU6oPJbWhYRgKBkWPxorHuYc5SluKuUUlQ0KVgXXsyM7SJoF1y5DDD/VZav/teMrUAR/DWSStsUV1kGxT7nwXxUMgxKg6wmg6iwWqOqW3WCKKNYMooN43vHKUYwV0K5LKKOc3d7JxOyyXmjkLPYNvLF84yFgeWYLMUIoN9dRS0BDGA4wIWB7keMkT6s0OQV/RL9cp9Y+Qtg4METSRLCGT61R6IZutEDlqMTh3lrrOqHX2yeIAL2kj9TJZoFjsGxZbXUZhleuNnDMtTSExrA8coWUYSAqpRRpLomC1p1Fjx1kbOhJCt+zQasUUrg00tcSS+ILYF+xUFJ9r5fhF6CpLNZMU04xC7tjYZzuaCedzEAiKKVRGhnojxRJiPZ8gtTSvG4btgDCzvL+q6EeCtgfKOBpBoyopxZbUE6Bc2dWRoRprMgnrXdeuUArUkcUIxSj1CQXECka+ow5c2NOEueH2WkApdUjJCdO6mBpK42h0AeRGoKWlU3FG+XDRI1GCy/sZsQ9GChIPWmXJzoqj3iSeIBOgJVxt5/QjQZhb1gaGZklyUFOUE0O2aFBNQSbhsCApJ5b9KkRaUI0FvcDDHyqGSpAEGZWR2+yXY0sh1bzezdFCEAcwCiQFZSgPJIsxyJ7mXDtnEEoKmUR3QxaHgsVWTpRZ7m9KelJwfc9h01JP4C2m1Fs+7ZIi8+AgLlJKLb0KjBJBIcoJc0uuBNtxkWtdw0LmHhSFceQKEWquNS1RamlUFJWhwShBrwRxGhAFknYJNoeae0sKBQQ5NCNBLmGjr/Fzw+stzWFF4eWW4SiggySKXIS/spBpiTSW1Hf0kFxK6gPNUUnRLEmEcDQOYeF+3eOaGKHuCF5sakSkkBYeLju0XME6+yppS9T0sEVNZ9Gy2NXEnqArIPYFnrGstaG7AEtHmkJq8LVga19QTiy+sQgBKhUY35Aqwe1lj4OiZGVg6a0bjozk2o6lEhsudjWpL8gCwdZRTqMmOVhyD7l7FcUX9lKqQ0uUW/xixs2sRLkqWdjXGOn6axRISqkhtYrV2LBXdMQlIcHTFmUs0UjQLbrdRS6hWRLURrDRzymklk5VQu78bu5BjKCQGza9jE5VUh0ZRiVLO5X0fMFq31BROeVE8Forx9eWMIduUdApKKS0Y1a84MGSx2Wbc3GM8dzoaIra8O55n82m5u3NgJf3UzpFRaYyksAdEcSB4GDB4+pOhj9o4efLeNpyZ92nMnKEo726ojbS1PoGwgKGCkfnl1l7GOIlQ3qV60iTUhgMicuXCGJBY+kCplpHaEE46PPmVy7xxp+8h5EeVq6g8gGWMlnoY1hFIBEskfubtIsXKKYxhdE+Wp3j5tWL1DsBK3sxy80Gg9oy5c4yiIi4UqLSHGBZwsoMkBzVXmChP4QspF/5AuXeN/GTR3ijNsL2SYJ1rBdSbR2h7AA/WaBTCjl7mNMrSpqrFl+5dcPPYXGoyZSgHQmsgFEo2OgYaoOM2tDQLhki7db+dkkxDCT3q5LPBIJeQbJ1qFnqw6NVwVB71IdQiQ25dGQWT1kqiaFb9GiVYbkPqQfbCx6XD3KCzG18o8yyMTCUEoeHSz2BtQapLeWBRosiwhaQuo1MYyweIDEoYJk8WsGPNUbVQGdI8wQYOHyg2UcQYikh6CCI2V23bOzsoZWPVhVE1qU81Hi5Ya2rKY4M/YLlYCFgaWBQxrLUFRwUHf3opb2MYSho1j2qI4uspjyoq////9nG1772ddbW1vnSr/zKz/yZ84/hc++nXDtyzMNr25p75y3tkmIpttw4yBnsFvCNZbNvSNsR1djSrME759zC8f6q5zYC65CcTREWbq/6rHQ1n74Zc3Uv5/aKT7ktqA40fsdDeJYkVzTHDuvKbkoyUhRT6EaSTEEvEiwODEUDa11DdSgoxw5X96N1j3JsCKQhbUXEWrHaM5zZNyBhGAqOyo5vHCSw3NXohZSllqG/mmGU4MrDlK39lPsLirONnHpPk/mC12/lLA0N55sZ+RheXjoUKAvXdjOWBpZRIMmV4GsvFri54aOFYK2dE8YZqYB2URIOLFcfaM43cj55L+GdtYBa7BYxad3m4cxdwcpGl4UDyRs/dU/2u0seX7w5YmFgGFxI8XKLlo7/eafucXvJ50eXFT+94dBPvYJC5bC/7h4atACtBMVBzvsbAecOcw5LHuLCiPWjnDONnGZVcbjo85c3Smhr8HKojwxBr0ll/8H4rDXj0fVXQKcIDD53gF0u3P1fxpvnTeAShhvA9tiiHqD4K5Z2fg/FHwAVsuIFpM3Y3lwkKy2ys7lEtZXRqCku7Gd85k6MlfB42SfMLL/+/SFaus3OG7dibtyPiVLLP/rpiK3dlIsHGSsdzcsPUj51c0S5c0CuimwdpATJj7n4wb+lfPBHHF656NjTPAQawN+i+D5+95CNnSOu30vw29s0VgNU2mfj7pucfe8dgv59VJ4Th5KLexnn7+xy7VHK598b8bmfDjm/n7N1kDH0Ba2yY5gHuWUYKa7tpg6xJQWbjZzNVk6/6ri3Rghe28s4f5TTiySdkuRb6wG/fCfh4g5sDC1ru5KFvkMshpnhxnbC527FnGvlRBrqHVhraS580MVSQoclij3DrV/KePt8SDk2/OG1gL2K5EHBIR9TT/LupqIysgzC8c7ICDZamjNtzTCQXNtJOdfIuXaQ8dm7KcpaHvdLhMLS8SXNsmIUCN54M6U6sPzZhZDFgeZiM+NsK0dLWBoYVls5YWqRVpBoRabgwZqPRvCdcyHfuFKgU1R0S27D0StItlc8vn8pcn1VkAzHD86/8Chjr+pRSSwvHGR8sKx4+2yI0jA869BtiRS8U/foFCRvb4a8dSGg0JXs6JDioc/hIKBTlhQTgxWw2s25dJjy2Sc5iS9oFxWdgqLk5ZxrGy49hlf3Ml59kLDc09QHluRRibP3FL9wL6YyNLx9WfJ/XA1IfMGjmqIbCsJzfc63ch4sewx8wfdbyywNLY9XLffqioXKiFJsGIXwV51VXn5iOXNfjb8MEihjoZrzqdsZlQ68vx7QKUgST/B4Ce4OijRKkvsrcK2R85dbIT/ZDCimlrs1xUjBtb2MUmL5wt2Yt9Y9CrHk8KjEA6EY1RxO08eSxAplLIOi24gmvuDifs7C0PDBig/WHVoIa/mzCwHZtRFhBp/+IGerp/G05a8vFfizqxG3Nj1GAdQSy8KtiLSe8/CM5UIj40IzY6EvaBcluRRsbcP2GcvZo5zFgeFsI+fTP8lZ7eQUcksgLF7XR1c0/VDwtSsh7y4qOoFk73rCvf+HvfcMsiu5DjS/zLzu2fIWKHjTQKMN29B7M5IoUhRH0lBuzezGaufPGo1iY2JHEbuM+Tf7Z3dWLmZjZhkz2hA1osSl3MRQJLtJ0TTZbKIbQAPdjYavQnnzqp6/92bm/sj76r0qFNBostlsxr4TgcCrqvvuTXPyZN6Teb4z45FvWqY2Uh671aIaSRqh5PFX21yeDLhwxEcLODvp8/HnG3z4YpNi01AYbfLF2gjnpjwCR1ik7QvmBz1G64Z1HXCgoklz7sUlSCy5tnVM5zXB/LAHFpoeXBnzEAZOzzv04dKYoJJz2MpW4BbAA7HhWK7O9SnBRkGxMWa4PuTx3X0B63nFeBQztqV559WYXGIpNgxzQz5/fyTi4kxAqhzS7qmjIQuDijO3YsYrmtOzCQ9fSfiTh0MqBcXnH8wxO+IzO+LTjCT1okBISyUnuTAdcGQuobB0lXzDoSz/06kcW3nFRkFxdiak5QvGNgzkB0jFOFdOHseIPF59hdmTH2blwIfRPMH88U/SKnyAc4ffzSsnD6ARFJeX+NefeAewHx2FtAonMGo/Wo7SzIWkHEczCRyiUXqQq6cfozp9BiP204zezx9/6ON8/8n3I2yeYxdfZvnwUeAw2huhMjyOYBnNEbR/AO2f5vwTH6dRfhxLiesPfAaok9/8JkH61yhusTU6gY5K7HvlCjOXn6GwvsKtUY+BmmF+xOPlkwY5nTCyqcm1LTPr7uX4ZtnDSNgsSB64nvC+F5vMrDrGdzkBq+DqmM+1MZ8vTQVsFCULA4q3X0w4tJRQH0u5NuT0e3JT4xnLUFOjfMNkJWUrlFzY7zj/9VDyjZmARiQotTSllmGgoTmxrBmtanwNjUCCdZ8nlhK0GsIygh/fxK9XMl6zhyEi5QiVyU9hGSfxpknFQTyezfB+jyLNeQTzWBSKOSQvcvaQYeLaWZIwIM5NoPRtJtcTglhz4mbMsdsJhxYTbg96HF5O8VPLzKzg/JDimwd8Di6nDDQNrwwq9m8Y9g9t8e0D3k+357nRaPBb/+S/5cqrr76u71kBVoodWzyS3Rnx7J2fbTcTmLQ9bv3erUTRzVgl2J3VT+w8AbLHVrG4x/aG2nXsQuyxB7lHgrcsC2G2ySPcFv8dW+xC3HVfx+7aw5HsOiwvdm5L2oywbsXeh+o7W3EWd63Y3grPtqPszmMErq2dB0nae28vIbqZCjtZz+yuLUJp77yD7ekbcUe2PoGD5t9t40vgOJyq5zq742iBsDu3iKzc1XWy81lghdjeZrJyp05ZkXnahejZ9pfZc71d2tEpl9zuY/dc0bMdJXdcL3aNj92f9zpBccdWvNi5d2ZFJwtid+u7o1PbRwVE94tWiDvG58426vZT5+cg61/Z2yN27y7bLpoQ2GzQdp6lxJ161dtXvWNW7KmAO3/lYVF7bC12ytrJftjddt45NqXdqY+i5/fdv9+pjezWtzuMw87C2t5xLnbqXEdLOoXq2kqxQ9s8YbfHnUDsqLUSvf289x6r3FUsueM4mMC3LiFMr/3tLavsuZfaw47cza7KXY2rejO2ip2/9+xOu9s7JnrHqLA7bbDtPYYgxV2PrnnWbttqYYXr+z3Gnqua7dq0TE+tFNtZ6oJdtk7sVed7HCu8Y/oTe+gZOzOBCrvziIRnu8fAeuvR6RqF3dHv29dnR8k69qHT7jnbc9TD7tYXu23fdt9P7NaZOw8dImzPUQnr/qnOzJBd6Nve8b3zmIbYdfRC9KbJtJbQWuT2kR+Z/e3O73buLazZHvuubJ2e9rpG1vaOlo7tEDvmOburj3fPl1buddzFdu1KJ7Ox3Hsds5cy9Oqdv6cdFrsyCIodCtephxWC+xvBYo/Prl1kT4+J3a0tyOZbdszRVjjdDHbZp05v+db+dHueP/WL/5B/+S//N8r9IMG+9KUvfelLX/rSl768CfJTu3j+3rPPsrKyxD/+L/7zfi/2pS996Utf+tKXvvTlTZGf2mMbn/70p3nuuef6PdiXvvSlL33pS1/60pc3TX4qPc+/+7/8r3z6H/4S01NTb9g97a4TSzs/3HExu0/F7nntHogW+5rl2HXfu/zttVBwewFvev9ueh9g7w8Qs+PzXQoj7tV024Q3sRObdo+n7T4j/pp9s1fD3aNP9vrV3TFke0IO71IYcc/y2V1nQl9P3/cepLd3fe69WWpW7L7Zva/fcbS3R8Hu2Q13wRXZ++jC+5KeIrd395u9C1DQ3v/4290ye/X6a41ps8u23Ou616PO9rVsgtjV/uL+x8OeY+8+vmLuo9y7EXP3c9/XHM/23jpuX+N2e2LgxOttLntfqrrj/z3Oyptd7WjF69fb17rA4qhJ9kcehbtO+e5x/t9wf33Q0Q17F6Sr3r6f+KHawN5Tj3rigXZ/73U3z90tsr1vLdnjOnEXuy52zgNZBMcOZOzum9pdY1G8nlr1TP73PI0s7l837R4N/iPPDXfcfw/lvA+E3l52obMK+KnzPC8tL/OHf/gHrK+s/ND38I1DxXm2G7ggM5ajEC4wzXhgspEvhcOmdbpA+4JcZ+FgBdYKtL8zOEVLgZQ2O4zvjr/nLATGInU3qC4wGWLNOFpETlu0EhjhcEgeWVk9S2TcM6WGSFtyBqxyB/v9LJgFYykYi9HuWaMpBFYT6ZTAGJS1BNoylroD76nvym4U5E0n2E5Q0u5/k52/j3Q32KGYldVkJ+iNEogsYCcNXTCal/F3oRuAoyPXptJavFRQMJbAWEwWQCOt+5s1cpubqjv3zvpNZu1lssghpSFV3bdAI8DrWGzPAUO1B9pz9Uz97rXah5y2WeCN3Q4YUQaE6RBTewMAi0Ap+53tGT5hNpy6NEyhUwQaTycgBJ7RzqgJF4zhG0NgXP9jIc0JpAE/a7fUFxhtMUrgGVAGfGNc8IZwASTCaHzTWcxrwCBNmpUp3TVjGoRJ8LNyKWOx0nONAwgMwhiSQJBrZfU1xiHopAtS1NIFHKYepJ4gxdVFS9emajuIzXYnaB9EBhLVngsAkRmmTJmdgVaInntkb3c6C5B0QaYCTQDWoLD4qXC4LwuPpJa8dkE5ngUTWiLr7mcU+Nr1Tep3B2nqCaTOfpeN/2FtiTS0hdN3YbOxbC0jGTEm1BYhXNBIztjt4BjPWoq6G0oUWMtIagl1FnKajS8dCETqbIPJgqEGTXeJraXYbldl3TVGgjBdA140DnWmsjHp45B1oYEoe06aE9gUtAKR9AQoZmONDB1pMxtjhXCLE9mzuJUuqDLUlnJW0cACAVjt6DlGCExmw+i0ubVoXMR953metaSe0+fIWFJP4mc61gnGk5DhNiGQTt8jnQXwZDqiJXjS4QK1cmO6M9yx3XCqXOqem+vhvnsGUiXwM7SgzXROZDpnceXDOPtmZGYTPeHCgbOAtW6fO5CLNcK1hXT2zwgwvquH0hnWLXBjumOnpAUp7PazjYBJadnQjidsrUBmY8co4eys3flypKxrj6HU3rH4VNmaS2fjx7UXkMBU6vTHMwKreoIZszY2qltfTSeoymJUVlYFpBZp3Bj1DXhaoIyzD5F2ZSsbi2/cz8I6W58K1wc2G+y+cTZYK0Eus9cd/KiRrj8O6I7uWazqEuxDY8kbh9gLs7mlE67qGTdvGemuy2lnl4xyZZHW4OsEbIogRekUZdJsDkgcASJyyEGr3MJuKM0wmlKB0dtzgzRpz4u6RWrtbK0xpH6IwFAyFt+abOFqs2cphG2j0iR7NdBYaxHC4JkEYY3TSd0h/uvt/6XRYBPA2XQrXH+lvptXSimoVLi1TM+c6Wc2Iy1mL2up6M61GdIT4WzrUDbfRtrZ1US6UkqcLjmcrKu1yuap0Fh8I7btWykLJCZbR9mOTmatF2VzmAWsLxEIVx8MUmd8awJEVk8XRGkQJAh62yXdfv2UaDrU84JOnAtDJ9gs+ltYi/G6C3YrBEHnTUkIPGspGBjSNkM2CoY1jhqmYViD+uxnP/vZn5aFszGGD33ko3zxC3/G1A/pdV5cXGTsz0MmVlKQgu8eDnh4PkG3ffKxYa7k8dhszNKjMc/m8sxUDRyss9EOiZGkVnLlkOUDFxIQcKkccTsNeHGfYrAhOLyaoj3B7z+c59jUFnM4XurapOGdlzTjq4JDy4ZazmG8vFTSKsITV9t85eEcn/5eg795j8fGAJwrhXzwRsyh1YTJJxYoXC5glGDolsc7rrc5sGSoHYg5OAv711PedqXF5LrmI99v8I6LLY7PJvzM3zY5dvE8D3/nZU5cXKbc9jl6W/COSy1CK/jSkwWeORTRmtD8l39do9w0XDgU8vEfNLi0L6CekyhjefJqTAfyMF3RfOV4xEDbcGglxXge+dgS+4LPfzjP+8+1ODGb8P2TEZfHfd4222ZmLeX//aRi3os4vpQwcingQy+0OH2jzfkjIbnYcHghRQh4ZqjMI3NtKgXFF0/myGMZbFsOL1jyqx77Kpqzh0NG6pqpecuLBwIOL6eUGoZGpLYXPWc/qAkuFHh1KmBpSHL2YMRGSTGzknBg1fCn7yryj75epbR8BV8vo9KrCG5STN9DYfU/IViiFX4QT38ZaNIIf5vLT/5XxN5JipVFJAY4D7wduA5+HswSUMFLLiO5yPStDdqDDzF98Qf4zRbfeXIf7/vBOoevrbNvI2B0w7FTP/ObZf77rzXYv5pye9Tj6QfyNELBalnxD55rcOxajenL1wkbJSrlhPGFpwnsBaauriF4GXgOWGJw6WWgiuACsJUt7D2w0xQaX+DA9a8haVHeOEwyOIaNxvAay3jcQG1O8L//8gFOLCVMX79FbmOdsh3k9qjPjXGP7x6N2MpJzp1QbEQBX90XcmxLMz/kM7Gpmc5wXFbAszN5vjsqGTlUZ3jWo+0L/u2ZkJObhidWEsaqmolqylpJsVoQDLUsKyXF0RWHupvYSLmwL+JfHwop+oIHFxJydUn8wD5GXv4G5XiaA88WKKaOY/rLX2tQTmF92I2N6z9X4/j3fI4uJnzp3TkevegWC199IMdYTVMPJU8dCTmzmPB/fCDiQ5cTzu8L+MzZOodvw7kTgrdfSXn0RsxL+30eu9KmYNzC9zf+psLpGykHb6zwwNUUKT3mJjyeuNxmcMNx4AXw5KUWD87GnFqI3eJRC/avpfz5JwXXvTwr+1PKNcnhpZQHlxLmBxWltuXbB3weu53w0qRPqWVp+RLPQnHBI/Xg+mHLz78Qc2IhQQrBas5jsmF4+JrmzHzMQMsyvZbytY9aTM3nlcmQM3MJc6M+zUAQJZbBhub8UIQopAxtCk7Nx6yXFBs5xfwBi2lBUwriUBKlhiOzhqObbjKcqBj4QJXGD4a4PuqxXHT4uem6IUyhWJPkE/hKPs+wtlwc9Cmkmk9caPJnj4e8/3LM/nXD75/O8YkXUx6/1iafWCY3U6bWNdfGPZajgEf2r2AWc5xcMLR9MPtabBTgQi7k0ZF1bvkB6zmfy/sVj12LsdJhFI+vaawv+MXn2uzbSBG+pB5Ipqua4Sp88WjIdMvy6I025w4GFGPL9GbKoeWUzYIkUgI7G7JVkKwUPaIUmofaFFY9Dq9rvEQgrWBmXfPojTbDK5K69dnKK2aHPAYbmkuTAfMnYx552eA1FLPDPt8+HDLecJzb28MecwOKeFhT3JKMVgQvjvh8et81zI0Bxuuay6bA+y612b+qeepMxOK44NhtTSot5YalUpQETcHCoMevfLdKPoFrEx5beY+l/YZgLUAPxUR1xVpJkSrJN05LptfhM99qMFbXLA0GNAJJqGGwYalHkvMHQp497PHorZixhqHuO2KBe4kWjGxYnjqdZ7yqOT6f0A4E+1ZT1LrPSAX++G05PnE2odg2/NyFBseWEqauubwDf/lIjpsPpIiGT+VEG9MIeN/FFloJvnso5NG5mK+9HVrG55FbCd88EfHR8w0+8v0WmwOKWkny6pDPgwsxRxcTHr0UM7Xq2OtHVlIe/MFLWOWhoxyBDRiqGzZKkg+/0OSRa23WCx6+aDN96TKh+Rb7rv8pkq8jeZ7RxYuU19aQ1BhaOo+Kqzz/wCne/fQmmwcV4/Nf5z3fGMRLv8vm8PsYWP97tB1C2hrlyudpB6dQJkbyA4L2JaZuP0Ou4vM3/80nOfXdFzhcneTES4uIpI2XLjK8/FVi3oZvvkq58ocIriKJEDpE6Rc4cP0afqtCmh9n4tYXEXwTuAQsAIsUatcJ60+h+AaCJqvjb6coilyd8Hjk1Zj91wX+omKjJMml8OVHCuTbltGmYWYt5S9/NeU2eXKxW/T+uwcL7HtwHW7nqOQl7361RRBJBluGd77S4oG5hEsHQr58IOJArslGJBmoCn7v0RzvmU8IGooPXWhycC1lM/SZH/B4firkY+MLNBbzpJ5gveARe4JvH/U5s5AyumX4+NfmkEmNzbFBzh7NcWj+BkH7CwgkMtYYjlE5UaK49n+hqONV9+Pbb+Hp76P4D8At4DKCZ4AcsIrg28BVoM2ZCzW3SG9/j8rU+8lXr5HoQc6dGWewYQkTy/yox1DNELUtt8Z8ji8m+FLy8XNNxjc1/+4dJf7rb1U5spww9KrP224kP13HNv7t5z7H/v37ePTRR3+0RbjsvmUruzPTTC+OStldWzeZy96z3S0eiUUJu42w6Ughu2kvCsdKkT3bbgNVzDYaRWReMvf2LqzDXHUQXtZ0sS0dT6BRrhK9SDEjBToU6EhgPIEpCoyUmAwybpTA+AKdc9/zej0+ntj2xBu1E3djZLfOnXbroqS6+z9e5oC1sgdxk5XPtz1t6rk34V5vyjbmSexCdW17wbgThSM7XrNdSJ3Od6Xd4dlSptu/nophfDwAACAASURBVAXj9+4zuQe4eqrsVbnrTxDGIK1GmN2HYSw7z6F4zjWHnwHewQoJUm7X02TefpO9gRdspw+zvs/KLDPPr/UEdsfuhstxZDvlRGXPDemi9cSuQxdRZlxUVm+bJYbpuLLEDj22UjqPZ7bFp9hZtlLW77vxgR3cmtyxNWgpZ2g506NHvceeOp6z3VusKvue8/gZQDldL7C9M6LzzjPXGcvKZF5VKbbbEHZikzqehqBnn7zTJ6JnG3ob+dTxhoVuEW08gQm7R5CMFF1vRjaGtOfK1YuE6oyDDtZrGz+4rT12eyfGZo0jMy+xyPrSyJ2IqQ720fbYNs/a7d2cbS21dzmKs2u3dSfAEGxnp4kuXg/Z2S3q2jLRg2vrYMh6UXDKdne0pLWuH7dxUV28pRVgsspt28is9p7tfHY20zfs2Hbv6JBRXY+rzfTbCgj3PE7X3Vw3UiBkt17bGCt2ISM7fSq79kf24Pw6dbXZvQKTbbCLvXGPzrkpu3rXwX7aLpLwjqybPfZ697GQ3TZU9njOjdeth9xx7M52EXPZjsL2o3rK4O/C1/XqnrI9c6zK7Fzm/Qys8whL67jotrO1u60bbux6GaKv8zwTduc5ny6+z/hZXTpzlOq2gMnmRJHtPhlPdHVeysxm5jObGWX/FN38ccr1gRDbiDOd721n1dPa4a4zC152P4myxrWl5+x+9xo/u48HDG4/c6c9V519rJ7RKHtsfT6z6ZIOtVR26uu7eVbaHhvSgwv0TRcB2HliZ82yjT7s9K/q2ii5y37snr87fd/ZbTI9gDhBd23VQfsav4vPVTvm0C6w0q0xMkeQ7Pw+AArZtXLXPB71zIFedy9m2xaKO44Vdsbdts6J7rjyrHWeduV2U433U3Rso1qt8s9/93eZvXGDvvSlL33pS1/60pe+9OUnIT81nudf/bVf4//8V/+KKIr6vdaXvvSlL33pS1/60pf+4vlucvHSJW7evMmvfeYz/R7rS1/60pe+9KUvfenLT0x+Ko5t/OzP/ixPP/30j+XeVryZNenNWbz3X+/9ix9DcX7IR70hRRNvzLPsG1JG++Ou7T2bwN73M8Ub0FP2h+qi+wH0va4n2J+sXXm9yDz7Q7X/G13dt9Jd9ripvc9nvtl9L378evRmPOt+O9Pex3WvhTt9o6si3rDqCn7SYt+Uu4kfe1ntfXx+I+ovfixz9k92gnnLe57/h9/5HT75qU9z7OjRN11ddyxstvOb34OsmwWu2Z7run/b4/liZweIXd9R91A/uyuQo/ce4j60WHBnwNZdG0fco9yiG+i21/d2PGcXI1Kw9/WdAERhXXDIHdfaHuwlu2Loem9n96iO2NkG7FmanYi3bvhUZrptLz2453vm3isHiwte3NGe2efQ2jtuafeqpLibpopdQ7rzILXnlNj9RveTlb36KO64/e4WujNQsJetbHfpb09gktjJhO0wb3vjbfZsyT07/bWnmrv+PgsCU3sZw0532C7STOz+W/ZD7992B3VtB27ZO7DqWcjbHa/VOyovdrWPFXe2++7Wsrv1y96DRL7rb72Bt/KOSrCzw7bHbPeP5i7mw+5h0/y7dM7dXvd2B/iITiDpHt+Qu65XWRDm7r7vtU8dfGgvD1fsuOf9veKKLBhJ9LStFXvcUXDHHLFXUOdeAYZ39Ml9vCGa11olZ8FxHdyg3Yu7LLpz3Ru2RrH3P2Yle+n/LgsuuktshyHtqaZ4Ha/TomsDdheygzXd+4Zil95a7pJJ4t4Oth0/2/tsoddnF8WutrW7X0/2yGkheuz57jHVG2i9swo7jbrc1RRW7FVWyZ40fbuXVph71rCDaNiTTS33HJZ7riV65S2NqpudneWf/vZv87Wv/B1CvDFvYYuLiyw+V+alCR8tYT2nmNlISTzB+19q8+SrLTYGFPkFj0dux9QjyXcGcrxSVDyylfDxF9roxGegZVgYlWyUBZ96ocUDC5rDqyl/9GSRalmRjmteNREPiCYPvmyYWLE884DP4WXNyYWYz703R9NXPLwQc7vk897LTQ4uaUbqhgevaK4WIt57dJ7Dz/kcn49p3Cgisgjpx6+1+fzjRX7xbJ3Dr1oev9zisRdjbj1qOPRynZGrr/CNdx9kfDMF5ZFfeQGfV4AxvvneM2wVFVMrKWcPhxRjS6QN77yY8vhLdbYKHpWSZP+qZn7Y48qox+89muMjN2M2CorVksd33qb42Attvnw8x+1xn+dP+HzxdIGfudhiM684tJwSe5L/5x1FRlqGrx+K2F8zPHbWMLGlWS0rPna2welztyDME4eKR16NubIv4Nh8wlDDUC0qfuVrNd59rU1Jw75KirYCIQXDWynvvdhiqyR5x8UWD11LKMaGtbLi4kzAwrhi/6pm+qpko6z448d9JmuSB2/H3B6VrA15nL4Rc2QxwReC2WOHGKwokD4q/Y8Etc+j5eMgE3z9B2j1aYQdwDdzjNxeorz1BSRfxSFyFLAENDMjexA4AYwBKwg2iTYvo2hjdY5TiyO0iiE6KNDyBX/y/jIzGymf+H7M3KSHEfDP/8EgJhB8fcZndVTx0QtNFsYjgtRSXvoe+XSQenmYXP0s7egE1uZQ1jEtE/FPUKxjRYIOf4l24Tfx22cRDCNYB16hrX4doQK8WoWbh2dYmJ5ieEFjByc4sOFxe8yjLEZZOjTF4qDHv/ilAdpK8aFLDXKp5fis4aUpn/dea/H4lTbaE8yspHz+8SJpTrBRlLTfv8HmRpGt9ZAD1ZSpTc2xNcNQw/CVt0vqBLx4yGP/mqXpCb5yKEAaOLCl+e8+WOajcwlPnQwZkYLJmuGJ6zGjTcOh734ZyPHtdz3O5EZC6kv8BMbXDdWC4OC65szNNqsbRWY2NNNrKS1fkUssTx+KGDxZYb2W59RizLHVlBenQ8K2ZF815T2vttm3phnfSgkSj/e80uJtl1s89nyFWzM5Tl6L+dDXXsTfvIYQAX5lEW9ziT/9x8f42NNNxqqawarhX3xkgP/suzUG6oaZuTrffqjAk5fbnLrVJvYF//FQiZMrmvKGZLqS8tjVNjOrKUpApeCxFkkujvtoBe+5FnNiKaH488s8lY7y8mHLB84Zvn8wZKxuuDLms3EkJreqOLieML6l+fvjOcbqhtUkohUq8olBGfjBTMB7rrR48FbM2KZmvGEY2BLcHvJ4YCnhbx/Ms7LPcPi65KMXmxzcSHnhQMiXHg54z9WEzZykFilWioqV5TyVvOCb+2G6Agcrmj8+GmIK8PyUQUrJdKHJ0Zsw0nZUjYfmYz7wYps0ENRyksujPhGCqaomHxsSX/LM0Yh/fyriyaWU70QFPvBqzGJZ8Y2DPo++IpiLPN61kPLQDwTveiXmwfmYcg0ST6AlvHREksaKB1dSHpqLWRlQmOk2H3424ZnjEYNNy4sTkk9faOJZmKho/uSRAge3NJ97uMCXDoc8VNF89UDA3+4L+ehsm0YombopOLCeMjvs8aUjPo8tGWqhI6kcXUpZHZbceqTFYjvig1eaxJ6k3g4Yq2suHvL41ozgiVnDU0dDpCf4q5MRq0Ow0PD5lZeb6JmYfMWjMgiP/cCxeC+MefzCuSY3xz2qBcXX90f8wrkml/cFlJqWR67F3Jrw+Q8P5HhsKWFu2ONvPyoorUqmbihqecGSDXh8IcY3lrNTAaXBJtQdimuwbsEXXBlXmMc3+X44yNnjHu+71OKDF1o0I8mX3yUZ2ZAcXEt52/WYZ49GlFuGyc2UfNsytaEpN01GBxE8eyLkN55pcGQp5ehiwuKQo0eUG4ZnjuWojBrGCm2u2ALHr1g2c5ITCwlHF2KefiDPQAueHwn5+Qsx+9YTXjgUcWA9ZWY5ZXRT8/iVNh95ocnUZkorkOxfSpHA4+crnH7+m5C2efWxJ5i8vcJYLeDxZy7zyCtt/v3PTfDui3UG2pahpQoDG3+RrcJiYAVYBDYRrGRIuKs889jP8cGz8+SazzMy/3ngLEHzL4Er5Op/h0CxMvgp2kciCitP45vbaI4Bowi2gAFAcPIHf4ayL1Fa2SBqfBvfnkNyDYiICw/jJy9lc0kTkNw8+VsMrj2P4Hvo4GFk0kbZF4DbOMLEDPDBnp9jDB8k3BI0BycRCI5dXaddCqnmBZPrmldPKC4WIo5uxIxvGk7Pxhx7TnFsMWV1QNEKBOcfVbxQG+CjV1oM1w0nb8c8MJ/yF4/k+eArLYLUspmX5NsKtekzuQYXpnxM0fChl2IeudXm5GzCcw9E/N4TIXEoma4agmsR33pY8q6XE3ztHEhPXIsZbmo+cLaFF9e4dOYgh17a5OSL3ydsfw/BPG35GXz7EoIWhfW/AqrABJ79LpBgeBzBU2jxa0gu4fIwrABvy1Z778RyEEGN597zW+yb/T75rW8grCT1D/Hld0+xMqw4fzSk5QumNgz1SPLQjTbagxsTAWPVFGnhgaWUobrm7x7KM7OuOXk7fuse2zDG8DMf/zh//Vd//YYtnHvfIpRxoO7et3qtQGQ4I+OB0O7NxaObvEIrB5XvfdPSyt3LSJdApJO4Ichew6x0iJOO19FIkUHkHei/44nTfpbuInA3N1Zuo44IulmDjHSJCowUmMCifYHOuTdk6wmMVHgZ67j7LuWMmMoSKHS9wi7JgfbZiaLqeJgtjGf16bzHdRK6BBZCDZG0lDIPnJVdf2Zg3JeizAujgwzUbjPAeiAhQ1WZDEKvpdjG+aSRIMkJl+REkaG6MjRTRrLRnusPI9hGV3UQXzpw1chnSXE6CDJl3Suz8QQ2sSijMzdLJ2/QcI/7czTLlmNwfvAOHqeIy2vXzNpW7toM7fX8Rtveax0JRI9HIcySY6Q5sY2UGjQZCs44tJNDBnb608MqkXnbugi97it8J7mLA8cLne7yZqjuz8Il/1A939eByDzENkvEIihlgHyT4cqyvCpO/7K2twIKxmGotASbKZPMMF5WiG10WCehj2+6SlUwXa/k0SwZidfjpd7GIOE5PGTm5be99Cbp9MRKgQ16sG0S0FlbZvrfSfqiTHfXpJOcwZVVbOMgddEl7TGhII08VEP1PFThGeuQWVnyCIfGE9sYpg4qL1VuLIvtRBQZykx1EqGIbe9Y2KlfVk6hXdKCwLgyesb24MNEF91FDwZKgtA9HjsyxKV0nizjdVGRLqkL6Kw9jCe2E4QUzU5vkwRElpioYLve0Xy2s5LrJFvC2dJO0hWLQAcdFGQXhSl7vJie7SQrgUB0Ey2FWdsK4ZLvmMCis60OIzvYK7FdPqtEZhNc4XTWDx27baTIEG8Zrsu6BDk6c2DlrGWwo592J6prwHZxc8pmibAAZVxikU7SmM410jobhHAJbDptlHQwoMqV0WQYNtOTiMKh22yWJqKLsgS7jWYMrd32yIbGQcs6+DYhuuPPsz07QD1zibAuAU9kQBhXnzTIUKZZRTrXyqwsRrotCit2oieldUnEOm0levymvXhGhe1BL7r+2sb8dRCAvV5Q4fpAB248aNXF/nVQdhofkCjMNg5R5xQ6lC6xT1Zuh5z09/Beetnvc0DgMGvWw+LjkHBBZsvXgSFAIjGZF7rTQyDQu7zGhcweBzjcWoBL6LGX+1+65CfgrrO9o7dX9O4Vk+sn2WmrDP2Yfb0XJ9nBXOpCtgbKxoRn3Jql03ZGOJvoW7vDJ286yLYMA9vZiO0kTpPZ9NNBDwoPAt3BGXYSZrG9znI6abBSoIWHyhZYrh07SMGwp65RzxzXaQt7F2+1a5du8rBoe69KGYuX2dTOjqHI8HQ7dhNxiX9sTwI2I8Vb99jGv/nc5zh96hSPPfY2+tKXvvSlL33pS1/60pe3grwlPc/NZpP/+Z/9M+ZmZ/s91Je+9KUvfelLX/rSl7eMvCU9z7/6G7/B7//BH5LL5fo91Je+9KUvfelLX/rSl/7i+W4yOzfHq6+8wq995h/1e6cvfelLX/rSl770pS/9xfO95B3veCd/+Vd/9WN9xtVxj9uDilQKhuuGRAnC1KK0pe0LXtonWRp0YKLGWEwuhv1Vy/CWIZcYPO0OpxdaloJxwQmedgfx922maCGItWRq3bKS+qRSMNAwREFKoWVIPEEUpfieodQy7FtPKDQtqwOS9bJkcl1z6mab8Qs+45WUak6yNigZr6SoMKXcMBxadZGr58d9GoFAGgtNRbS1hbRNEiVohpL5IUlj9ABJcAhDwItTPn5q8RtbHFpwwWSnbrXZv5SilWS9pIgSWCtJlsuSNICpiuHcZMBaQTI7oDh42zCxoREenJxt07KSY0spxaZhpKIpNQxawMim5tBiyunZNrEniGLLiWsNyk1DvppgPZ9mINnKS6yASDusT7FhGKxqlLEMbxqGq5rhquboXMyx2zHjFU2YWAarhnytzvKAopqXzI4oosSyOOyelXqCiUrKidspYWI4fDvm4csxh28lLA8pNgsSPzEYCbMzg0i9AJSBFGF9hF3GnWwq4gIWYgRtXNRvCxhAiyeACVwAyTCGE8SFM6xPncGyj1bhNKkaIi4c4PaRaVRqePGQ5OvHQiolyeim5oXDHq1AcHXMo+0LRloGa2GkZji4pKnlJC1foERAmh/Ba6xiZQGtJl2gg1fCMgmMIsXXgOtgN5Dps6j0HC445RbQACy3j06gWouIuMHQ0gVGl5/HhGWq5YhStc3+uRpBCs1AkPqCB24mjG+mhIklVYLBmmGgaSi2DI1QspmTCCwHV1KWS4pc2zB02efUXEzJWIZqhnLTUCsoyg3D0LpgbCtlat0QpIa6L4klHF5KaXuCgxU3xqYrKQdXEyY3UwSWYrWJJCbJjXN71GNuzGdwvYkwlmvjHk1f0gokUWyYWk9JJISpY3Q19iWM1g1L9RxLJUEqYCuUDDU1ww1D6glGK5qNEctWXnB0IabUMKyWFVp5LoAxSRFJm9bIOMujReJikfbIOAeupbRCwfyIRz2UfPTVFgsjHq/sC4gDj2O3ExJPsDTosV6QTFUsl8oeVkA1L5gb8og9QVsJPG2pK0HdFwy0XJDoQEOzulBirQArOCJLlFo2cwoJHLphmdpIKbYszVAys5ZSjQQrOclgQzNcN2zlJE0lmBv0WBmQzA95CAuxD0cXE6yA/esJXtN9J98yNEIXpNhCEHswWtVsRoLVAUtS0FwvSMoYtkJJ3ZecXDMMhG3yLYkINQMiZaRmOLoQM9BykTZWwLlJj8qYYcSPUdYFgC0OeKwWFPNjlkhahhuaUzdSlIGFvGSsbrhWlhxfSPEM5NpZgKuAdgiVgmRtXDMetJiupKTCkkrBQllSWnE6O17VlFqG0YYLSD0/4+xPZCyVnHTBrj40AsH+dc27r7cptA3zBYm00PIE1RFNoQlR27BWEMxNW9YLkmYOmi2fXOIC4UotQy6x1CKJMDCUzRuPXG8zuZESYTi4ojle0+TblgcupTx6tcXR85KJima0pmkKSBX4KeTahpn1lEYoKLYstVBye0gxUtUcWk25PuzR9AVKaDbLML7p2mmi7kgryljaPlS0j7Tw1JGQy5M+ExuayU3NfBIxVEvZv6ypRYowsdwa9hjaEBTbhpWypBkIcollfUjQloLFccHKgGKlJFkpKTbykql1jdKWZiB46nBE05cslRX1nMQImFo1hEseaV6jNJTGN6kFkiC2HF+IEUAoNGMbbp5TBm4OeqyWFM8dDGkGgkSBp2F8tc21cZ/z+0NIXNCYpIZEov0cqWfxkjm8eJlyQxPWthhcfJH85lmggWaSxQNPoNVxDI8DHpYSLsAvZHLxPNgEi0R7UzjY6BHIgsYt4/hpDdVUwDgQUpuYwcjhbB4pYMhh8bEMIrbnjSqQwzKMjKucP/EOLCeyfxG5hmFx6mGghjQ3soi8FnAIGMzmozVc8FuI5SiCTaSYo7xeYWxuFXTCZkHywkTA2GrK/jnN4bWE/auacsMwO+xRrhuagXQUjZzkwILh+GKMsFAPJZWiIkgsuQSujvssFxUro4JcYhhoapSBYmwYr0LbE0yspcS+WwsdrlrKLcvh5YRi0xCmkPhuvIaJpdQwFJqGtg8vTw4wNjeHileQNFieOgYYpL2EFtPMHTue9UmJ+vA7M6JJLqNHFRFcBCbpIOuMGKcy/i5gA7gB3GDi9guAwDCGJWDh0AAzKyljFcOpi2uMVVKWBxXSWKLYzWulpmG1qKiHgtnDlnokGd/U3Bj2WC/Kt9bi+X/8nd/hE5/6BY6/wUznOxbPUz63hj1SBeM1TexBlDhSQNsXXDqiWBp2aIbGVEy+Dac3NeOblii2+NpFYBZblgGrsThygBFwrKJJs8XzwTVY0YFbcNQ1uSim3LTEviCXT/E9TblhObyeUGoaVkYly0Me02uaj91oM30+ZGo9pZJXrI4opjc0KpcyWDecXEkItOX8TEAjEi5KuuWR39xEmiapgkYkuD2uaIweIQlOYAg5NxMQZIvn44suAvXRmzGHFlNST7I6IIliy8qAx/KgJPEtxzcs52YClkset4Y8Ts5p9q2nKA8enk1oC8GDSwmlhkMYlesGrQSHN1NOL8a851ab2INCbHjoRoOhuqGwGWO8gGak2ChIRxBJDBIYaBpGtgxKw1jFMF4zjNU0j87FnJlrM7OREsWW0WzxvDCq2Coobk74RInl9qgiF0PiC6YrmnfOagqx5eR8zDtfbXPmVszCqMdGUeEnGiPg1vFRVDqfLYITsAHCLrhFsy0jiIAEsW38GsAIVn4Ehw4aB8YwPEGSfxcbU+8AjtKOniT1xkgKx5g9chCVGi6c8Xj6vTnWBxVTlZTnH/RphIJXJwPavmSsZRDWYdtOLiZUC5J2KFFEJIUJ/PoyQpTR3oxbPKuhrAzTKPt14CaCDaR+Dr99Llv03wLqbnfnxD5UcxGZNBhbfIGp+R9gomG2hvMM1FscndsiSKEeSVJf8MSNmP2bmiixJMq9cA41LeWmoRFJNvOO2PDAcsxyWVGILWMvB7zrVkzZGEZqmsGmYasgGWwaxtYE01uamVVDlFjqgSCWgscWY1qB4PRGSqAtR9ZTTq+mHKikWAHFrTqCNklukpsTHjf3BwxXmggL1/f7tAJJKxDkYsvMWkrsCYLEcbkbBxOma5rFep7FkiBVgq2cZKRhGG+4F+ipdc3aJGwVBafmYwbqmtUhD+0HKAtekiCSNo3haRanSiTFEq2RfRy9ltAMBbMTHvW84mcut5if9HjpWEg79HjwVkLbg8URxVpZcXBD8+KgQktLNS+ZG/NpByKbeCw1Jah6guGWo4oMNgyL82VWC4JV6xbPuQQqOYkATl6HI5WUUsvQiATHVxKqkWQlJxlqGLfozUsanuDGqMfSoMftUQ8BtH04sxhjBBxZS/GbgpGGptA21EOJr6EhBW1fMF7VbOYkq8PQLhmuliVjVlMNBY1Q8PhqykiuxVBToiLNsEgY30o5Mx8z2DRZtL/g+ZmA9SnDhB+jtCPsLA67BdLclCUvYbyqef815yC4XVLsr1quDUqenEtQxpKPTRb5L2iHlrWSYHWfYcZvcqiSkkrXx/PDisElyXBVM7WlKbc0kw33UnL+iE8jlATGsp6X+NaSKks9lDy0lvIL19sUW4a5gkRaS8sXVMcTxpqWQmxYLgnmDhnWiopGAZpNn1Lq5oFS21BsW2o5hbSWsdQQaMv7r7WYXk8poTm1YjhWc239toua911pceacYGYjZXxLEwtXhzCxFFqWE2spjZyb1Ks5ydyYYnxT8+RKyrUx9yLgScPGkGBq0714HKppp7u6s3gO8DT8/amQl/YH7FtL2b9huJ3kGKtqjixpqjlJmFiujfmMrkkGmpblIUkzFOQTy/qYoCklS1OwPKRYLiuWBhSrRcX+tRRfWxqh4BvHI+qRZHHAYytbPB9YsuQWfHQxdc6R6S1qkSBKLI/PtwEIpWF6PWWg7l44bo14LA1Knj2ZoxlKUuVoWZOrLa4eCHj+WAhJAoCyVZSUpEGB1DN47Vt4rWWGGpZcdYvRhfMUN58F6hhmWDn4MYz3OIYP4EgbA0AJyHFw4QWESQGF8WYAg+V0tnguAZOESQ3ZUMB+IKI6cQQtR7JFbgGbLcRhBMFWz/xRwDKKTKpcPPFzwGPZvwKFmmZ54n1AHamvZvkDWjgE6mi2MF/OFs8BlocQVPDsLINr60wuLIGOqZQlZ/f7TK8mHL6dcnq1zcHVlIGG4eaEx1Ate0FOYTMvOT5neHIhQVpLNddxpFmKieXqdMDyoMfyhCQXW8otg6eh0LZMbTqn4741TTuQeKnlUBUGG5ZTiwnlpiGXQhwIjHQvYAMNQ7GZOStPDTN56xaytYS0dZanzwAaz15Ey4PMHn8we2EYpj70SQxnssXzGlBE2hezxbMPaIzYR2X8l4F1BNeBG+y79RwgMUwCAbcPDXF0KWVyPeWxl9eY3EhYHPFQBqLYLZQHGprVkqIWSW6cNmzlHVr0+oTPWlm9dQIGX37lMn/xhT/n5o3rP/Zn3SUdxpsiuxHoe17AHjkJ7lKJHwXid2d2RfvDP0vsdV/xw7evuHvlO2lLuslr7F37l/to6+5ncQ9N+WHrYe+53WN/qKeJe1zcC4cS99nA4q7N8voKdmdilfseiK+jnjsSeOy64LVumVHrdujSG2pQ7jJefqw50cTe41rs0aniNYbBD1PdN3W7U7xx5RFvQB/fj42+Z1/1/N6K+xiq9+o7+9rt25sqRuy455s0E4o34Gs/cvpD8SZp+v2OMvsG3GPvqUf+CGP8fpOFvNGKIe7R/mLP2tyZp/fOBGjiDe1m8VY5tmGM4Rc//Ys8/dTXkPItn/SwL33pS1/60pe+9KUv/z+Vt8RK9d/835/jySef5NixY/0e6Utf+tKXvvSlL33py1tWfuLHNtrtNv/T7/xTNjc3+73Rl770pS996Utf+tKXt7T8xD3Pv/Krv8of/NEf9XuiL33pS1/60pe+9KUvFSJ8jwAAIABJREFU/cXzveTGjRtcOHeO3/z1X39Tn+tpCFIyvJwgVoKmL9jISVIlSIwgFQIjBKYts2hl993NyEXEV0KHQ8vXDFpCIgVJwSCsRVlLLrEEqcH2BM1FNUGgLbES5DehWLVUQ4GfuGARmYLKOyyXsi7cztfub+VNjTSW0qYljB0KrhZIchVNogQIi59ahE2ANpELXCZf16i4jdAxkhZDFY2wIEyMjBvk2hZpoBYKVDumVNNI66L8w9hS9wT5tiFILFpaosQQtS35agM738QICNc1xbpBGkuuafHaCVrA0GZKIwfFumF4rUVhs4FMquSrMVK3sULgJ5YgsawEiqjtEEedULetUNCWkGtqCg1DMxBELY32BF6rRVSvI9IWUdu10VAlJWpbBm5XCep1ylsJfmIpNg0DNYPXbuM3a4T1OsVqG2ksst0i8QS5hsYSUC0PAlEWyduiOlhCkGI8j8rwELAFtHGR1xGOYGEBjcUHFhH6KkF9HouP0psgA0TSorBVZytUFGqWwYqjfAigtGm4WVQUm4ZaKCg2NeW6Q/4VG5ZUOsSPkZB4PoIEmca0CzmE2UDaCg5H18w0LQup9BSOCtLGRXxbIKGwMYugmZW9ATQRZg2/2UIagdRNRLuKBZS2JBKMcHSHQsvpnrAu9GilIIkSHCbOh3LdoLTTqdgTDFZc33naUqppEuVQRRbwUkvqQ5BacrGjFESxIyFIY0lylkLT4mnYLELb9zBeAYugWDcUGobEkzR9yVYg0ALqgUQLWC4qPOMoA4WWpbQGYWxpCDf2o9iSaxt8ban5UGpohLHE0kWf1wPBVqQAMqydoOEpTBCy7EsMINKUpgeJEqzlFXNFRey55q9EEpFa1iKH8gOHOquFDg11ZMtReSxu3AKunyVMVR2+zAiIlUNz5WJDrmVROLRisWlQxlKuaWLlMHfVSLKSlzQCSSIFUli0dEE+qQJl3XheiyQ4gh9R7CgYsSeIA7DK1T1Wgpov8FOLahg2IomRgloAfgwygcBAsWbJNZ1xlBZyWwJhQMYCry637V+QOj1WxqEo/aYgbAp8A35qiTKUnYglOb+NMo5usRVJ2tJdM1k1mY1zWr6Sl2wWLHUlybctA2uWwoqg2DBIAdI6G+Zn+lgPBC1PUGg4UkeMQBlLrg25xJJvWXybXZ/p91JeERm7HXwVNB3CtIPUTBPpymPBz5BwJgv+awTu+YWGxejMrhlo+oJi3VKqO+KAMm58e9rhUjcj14ZF62yql7r+sZDppyVqWwIHS8JPHXrP1xZ/SxA1LJ62WOnGkSmmNEOBLy3WCDYChxOrBu65UWzINd24UMaNTzJr0Qxd3ZV24eRKW8Kmu3dqJU1PIC2kSuBrS9gpS2oJ1lOC2OlgLRSUGpq2EuRjS7nixmCt6dB5LV8QJA7JF9XdnOAlmsFNTWm1SX6ryWBNs5GTVHOKxYLCIik0NQM1DVLSGCxi8PDaCdIkyNRmtlBQquusTh5uw10DS0TVRbDx9nUOb2aAHJAgabqfbRXQCGIgQqsBLB7W81FJA+2VAYFXr4JcB9pYNFDJ7HIj+xdnyy5nW3RUYGBjhfpAyc0hIkR7klxjPitjK/teCmw6Jcul2f0FlhqWBToYUmFqGCGphwH5psWraGTSxOLsb65ap9hIERYS5fpVWLcmUcYijc2oOBArSKSzS1u+i6rLNywN340jE2rKdUOpYQgTS+zBQkHRCNwclmsbwtQSe4JC3aHp/MSQSFjPKWcjAZlYrBAZBlYT1Vaz+VRhfEXU6qygLKpdwfgBlhzaG0D7I1m/NbMrCkCNoDmfzXsxkABrGJlDUAHqFNeXCesbhLVN0C2Uhqhptm2FZyyrBUmu7ZCXft1RpWJPUGhqtkKJ+uxnP/vZn9Ti+dTp03znO99hoFx+0565uLjI1fODFGuafRXNUlmxUXC4nfWyRy6xfH06wjeSqS3Nis1hJRzecBzJcwcCjq6kfPVgyCcvNhnfsMwPOj5v/cka0WxI25dMVC0zFc3zkwGPZrgWr+ZxYFXz6mTAQ6/q/4+9Nw2y7LgOM7/MvPe+rfaqruqlet/RjW7sBEAQgAiSokiKpChatiVbCockj6xQKCYcGs/wz0Q4PD8cHv0Yj1eZM/KMPBMhaSSOaJkUxUWEQRAAwQax9L5Wd9fatVe99W6Z8yPzvneregEggpvmnYiKqlfvvXszT548mfdkni85fi3lxSNlHp5oUYwNNwcC+h9aZ/9LksgTNAuC3bcSrm8P+PBrDYwSHJxI2Tnb5KVjFeb7PX7t2zUmRz12LEQoFINzZ/HTSUzvSYyE+yaaFOcn8Vrz+ExypHUIrQzDN8+iGnVk7ygDVc3rBwsceeMKO6oFprYWHbBB8lf7fT51OkQFhrkhxYF5zVPnGzz4yjk++1bIxO5+Pnuqwb7ZFD9OGaql9M/NcmP3ED/1ep0vfsLj5/+yyePfu8H2qTkK1TOMV0corc4Q9Q5Qin2aJcGXHyrzs6/UePO+IoXYOuALe3yCGI5O1Ni6mPDK/RXe9/oSE7sr7D99icFbM3jNWVTPHm4Ne/zMK+uUY/jkX7xB/9ISu9YqoDxGqpqTEyEjk9eoLF6jf2GVHSuwtKWPvWfPcv7kTk68WadYXeKth3+NHZPTCP4KmOWtJ/8V4xOnCQcrnHriV9l7+V8BM0CM4eMoc8VNqOfQHMLjz/Cbf0H/0goJD1JsvUzU9yj+yk22Ti/zwjN7OXkx5r7JhJV+hQCOXU/4rZ8Z5B+dqnHqSMATF0OOz8UcnIs5djNieotP4sFQ1bC0pcLY9GuItMjsoXG2TP9fqHTGTfgvAjUyBjWDJWjOAQvAM8AkcJPtkzNIYhSrCC4B8/jRAoX6NkQwSHHhTYLaAjf37aWnqbk+FmCAlw6VeO5Mg2pFstyjGKlq/vDxCk9fanFj1GetonjmfJNCbGgUJDe2+Hz2lRoHZhNCXzK+lHBr0KMntCjDLY2UCwcUg+uCXdWUreuaLdWUtaLkyHTM9x6HJ15L0Z7gpWcEO6cKDK+V0MJnMC6wfypheajA5W0Fvn2ozHAj4c19RR66GvIfPtrPo1dCjkwmFFPNtquKYqj5v4+XeHBR89zZJgMtjZ/AVw8V+bXnq/Q1Dd98tMQD52Iu7Qy4vCNg70KM1PDCiRIEHttrZf63Xx1g50zKvotTnD26lWqv5L88XOY/nSxzcj5h51LKHzxT4f4bEadOFFitKMbWUk4dKPL6eIETMxHPXA95c0fAQKg5OhVTjAzndxRo+fCzZ5tUEKyVJIknKKawbS2llErqw4Ytq4anrrSolSTPXGhxemeBRklwadzjmwd8fKlYV5JKIaGvavnM58d9woJgouJxedDj+EKCloI9CyleYrg25lMdgYU+OzkZrmpePFDivlsRR1c0f3mwxLFbMX9yIuDkTYFqSdaKik+/HnL/Ysz8gMdIM2XvNcFCryKoKw5chb6mphIZaoGdfO9aTBgJDaLhMbQk6AsNh2YiBhua6SGfRaP4O3snKbxe4fpYwOvjARd7JR+8GfL09ZCRdc3UsM9oNeX3H+khPBRxQxf56bMtnngr5dA5wcHZmHNHPfZOpigk21dSBuopf/j+MqlRPHutReLBqe1FHr0Z4kvBWDVlz0rC5JDi0ELKQNM+XP37R3s4WNXsWrFIUho+1ZJg52rKkfmU1yolTszHLAwJyhp6azDUSGkUJd85pPjg2Yg9SylnBguM1jSHZxNe31/g/psJj14NGWqleCkUE9i6nBJ5gudPVNg7H3Nrp+TRizFD1YRT+4toz9rD4emI0fUUXxsSz6L2Zgc8jtyK6F3x2b6s2XMr5cWjRcZXU3h8mfN+D8VCwq044HKvYl9Nc3bE430zCeOLCanyODwXU4gNgw1NOTS8vLfAwg7NrlugjcVLliLNlmXN3KDHdDEg9gTjKwk3Rny2r6fsWkjQQrBlPeUzF1skAZzbFjAz6PGZ79Z56UCR9020OHYzZXQ95dulIQ4vxDTKgp2LCcXUMFAXPHWuxeDyGgeXFR95eYKdN2cZLA7xxYd7WO+RfO3+Eh8+02LbSsKOVU1/COeP72HrxCKFpIQX1gkShd94k1SOsbW1Fa+5ioxaSEJgGsVLDM/NodIYywu+ieA7WMzc+4DTyLQXQQEvfQ2YRLANGCHs+SwiSkgHdlFe+h6toWcJGhcpriQE6dfdRNBH8RqCOReMmXOT3F5gG5peauNPceL0H3D2fT/H1hunSIKtrIzcx56r/waYAFaR9ANXgbMwVoHREJamgT0IXkHyppsgjqHiNaqVvbz+6B4euBxxcjlhx+ULrIwMcvRSg7Fr5xmOKpzb10NfQ+Nri849v73AnqUYZaCQwNwWi48th/DdPQXODfm8fzJk27zm+UMFVoqKntEGP/OtmB3rKT2hRiL4jx/so1qSPDERMtLUHJ6KOXWkyIlrMQ9cjTBKMDWs+Pb+EtVeySNXQqKCYPvMHIXkOhAzvPQKcINUHqUx8Bh9LZ/yysvAIv56L82RvajQJ+z7IFHvT1Oo/Wcsus8DjqDMLXpXzjj9zZOhWqPi38NP/l8ES4zfnGFwYZ7BhSlkVCft3UUphp6WZqiqWeqX/LtnS3z2tRZLfYriisepk5Kgobh/KuLbB0s/usjzP/qt3+Zjn/oUO8fHf2SRbw3vgCclbv9T5KAohjuihIR4B3iYzfgoF7F4B4Qj3uGlbUHE3VlHxnQ+dhvRRdz++25Qs9vAZwIb8nJ1MjL33ibFmLfDmuX0JEymcsEdEXUu2CCM2FAdsYl1lC9vR+e3g+M2qEC8XUFvV5jY8PsO5bEqIs7hl7L37txs7wzp06nOncos3rFFibe5j3kndrjBwN9Od5v+uxm7Zkz7hRG5lsw+mNmK2WhrQpjbbLujZ3EHe9zY7zv1EBuVkusTcnNNzF10Kd5FpxYdPJS4i6/acF9x98vf8bW4vS03XwcD0r1Qm3pG1h/vZBB3Qh52VGvu4jjegU/b5L/M5v7/NvU2d/pL3Fkn+u2Gg3sg5kxWV3MP/OM9iJLiXmPQ2yhqM9rNmM6YpO+k7rcbsMymMUN0+tmdbF7kxpa8j8++atr9N3s/1wAib2Fmkz2bnC/e2HZs8rW3NeY9eoK4m/95R0rfrE3zNr3u9kYXdx5g71Kgt0eqindQytveNR2b70wHNo2M4u5qNbeNru+yM7+DYcHcc8x9O57kO8C4ijvNEpy9mvy48yNKGDx99ixf/tKfc/3ate7Gma50pStd6UpXutKVrvzEyI8k8vyJT3yC57/5zdsiPl3pSle60pWudKUrXelKd/Kck3/7e7/HU+9/ir179nS135WudKUrXelKV7rSlZ8o+aFu20iShP/hn/wTVpaXu5rvSle60pWudKUrXenKT5z8UCPPf/+Xf5l/+S//JUqpH2mltQAtBDLKJTzkdqI3pMVeZVkP0lgEEQaUtltNVpzmpIVwoIUgVQaZgknshnKlDYm7n0ztj8DigRK3ZSWS9vtGQCoEWjk0kCNOICCUFlNkEUgCIxVVT1FT9hqxtNlPIjKIBEBgpES7DfIi1RiHxhEavNggXFqA1AavBZFUGKVA+kRSEUpJTQluegIjLK6mKe33tbDXRyoSAUaofLYASEWkfIxQyPbmfNVJ8pASIyVGKYyQpEKw6OoYK1vuVFhsl4oMMrUXD6UAKUml5zJgLApLC0ikREuFkR4I2zgy1Bhh30+Fcv83GKkwUpEoDyMUkQwwysMgSZTnKmHLHUsfUGjlESkfcomKVqfZT2YwKTblQrn3JUZ4aOmhlWfLKQRoawPCWKCR8axdhNLaAYBoirZdKpfFoZqd9CgtPdeFZfteG1KCBNgsbJ17DzamuGn3GYNqpnjzne/HUlrdKoszki5jQqagEos1esuzbWWwbSUMbdScxtkrAhWbDbl2wkAiBQ3V6Qeew5BZNJ6g4YFMDamwWCVpDMJhw7RLhDXYz0euPxqXwDXlCdsvnE0mwr5e8STzDvuVCFvOeemwlM7ujIBY2Os6QiVNKUglyMSwqiSxdLg39/mWFNzwbK/SwGXXbxCmXR/jdB9JW5aGtIl42TUQkLr7JBKaSpBi7ym0QSb2M57DoYXKYsJiadFmqYBbUhAL208zTJ0g6wMwpwQNKUhkxxqsLu21E2lcPSExzldJmHTtlE9ite8JYiFoqY7uWtLeQwuLRBPa6jpDtaUuCVLnEsMsMsti/7QyG96rCWH9bC5HWAtYkzYRVGvbT7TzrQZoerbOnZ5s6wHW/8nU+nmBaNfXCNDYcmf5pevS6ji7fuxsI7O1ZZX5H4caFLYtVEI7ky3z+RqLfsynOhthsX9+3aASY79nhx0aSiAdhrOlbJ0zH95OdNYWqWdtxtYlS6CrKntflCFWGVATItH5WzjlGGP7mnR2JrXFuGpp+700pq0bT9vvNxzmVRrT9l0qMR1cn9PHqhKsOxu95fSl3DXq7rVxtp8INzIZg0iNHceUAumRCovqiySsSpCRzYJUrsxaOH/m/JRNQkyRJkHFBpFoLPKt7nyedD8pFmlmcr7SOg7T9psdvw4eRngWVSd9N3/Qzg9nHk6yMT2S9v+NGxsMHkbZ8cWOM7Y8Wnnus1Fu1uKuo0QuE1PmfHnYGQuUInJjrHRUBK08jJAYYXWKm58YrN1oLGpSO38UOnuSqZ33NJ0vwfkbO1YLtPOx0kDQNO1+bpz/zGwxyc00EyFYVZbgol2f2zguhbadpMIoz41znbFVSx+knasY6WFpVwmWZCJz47HJjW/58VLn9Gfb32tqvNAQVC2qTgvBFc/iYaW2KL1WNtYJgRHih4eqm5+f53/8p/+U/+P3f/9Hutd5bm6Ohe/0MrSWMvPhGrP1ChLBrbJgoGXoCzUHqilFLRhfjvnOmM9SSRIWJINNzZ/t83nmRszJlYSe0HDqQJFUCY7ORPizJZ6/z2f79gaVacXEiI/w4bGJkPPbi8wc0+y7llJKDCevRzSKkiNzEY1AsmU1RSnBwDkfP4GvP1BmtUfy8KWIo1MRRgiKIQwsrTJ041u8/3yN900VWO/xOHozpNBscOF4ibG1JYLaeQqtnQR4VJbnaQ0MUKl+AUGJc8ce4cXjRY5faKLidd54aB+vHS3w6b84Tan+EoX119l/dZ0jcx4PzEp+5kaMnxj6mob7ZhK+dzTgo68usW36y4hmSjnYyej5l1ndMkCxmbAwEjB648vsv/ItVEPwLx4/yq989RqV2pfRZgBl5jGtIoX4PIW1SVQ1ZGvUz3OXGwij2Du5TjHx2L5oWbdffqzCQGw5ncevLBIsTrLzylvo4jBeOIukRaC2s+/6LMW5t+hfeB3PLKERvPzBY+yebNDTNIxdeA0vniUa2kK58Z/xmnOMT9zAN9/j8PmvU15/C8kVtt5cRyIQXAeW2HntJeAUfv0s+y9+EcQyEAAFJK8Bl7AIuFUkCTwwBr19sHwdRQzcIGjexDPP45mrBOoRtqzHXNtW4L6JmDMHihybjPidb9eoNDX9TcP+2QiM4Mqn6gxdDqgFkms7A45fDfnnnxrg4y++CUYxPH0awdeAGcevnIKT20H70FgArwAfug+mViEJgSJwyzmWAeAlYDewA+jjO89+nP/zMyM8cbqJjGpsW1MUKONrOLWvxJlRj0cmQ759tMx/OFFgR8Pwu19aZWRdM9/v8a+fLvHspYi1Hsn1MZ+Bpub+6yFbFhv8Lz83wgMTIRNjPkYIEil48nyT4zcSXt1T4mOn63zh/jJjDc2JyZDj12MOXodXDxe5/0ZEaVXRCgRFWaG3oanEHsVaFQ+f3Qsh0hc0A8laWfH0mQZ/71Sd0dWUQgxzY4r7rzQ4OBvyC1dCPnKxabnAdc3LR8v89NUmj52rMjg3x9k9IzxwM2J0NeHwXMzW5ZQ4EDx7ukZfE26OF/i5F1rsuzCBCcoov0yjIDk4H/Nbr9Zp+pLDkzG/8r0GtwY9mlLx3Fstdk2t8VcP9bHaK/jVb1TZeyvGV4JrowEfe61BkML58YC9SwnHb0T0xppjsxHVouQr95foi+DsCU1hKeD8qMdnTzU4PhUxM+wxXE+Z2KnwaoJPXwzZvpLy1gnBkauGpYrHllpK5EuKqeAXT9c5uK559GrIQr9ioJEytpqy3KsIjWL3gmZ0TfPWjoC9fVXuO2/YsZLy3GRIKuCre4vsX9cEKYS+5FNv1Dk8G3NwPibyBYM1zd61hBNzEdtXUl44UqInNexaTkmVoF4WDNU057YFHFiIqRUE21ZTXj1Y5Oqwxz94tU5wtszoiubWgGKy3+Nnr4dc2SU5eS2hWZAsDwuO3ozZouF7lTL7R9YZnPF54kKTkarG13BoKkYawUpF0tcyGCGJAslKWXF63Of6SMCHrrTwtWFkPeXCwykHr8D0fSkPn005uy1gqKnZnsCFYUVvbNi3mLBjNaU3gchT7FqK+NiVFmNrmkpDMLAOY9WUi9sCXt4dIHw4NpWwVpIEQjDU1MwOeayWFdvWU3YuJIQFwfmHBJ/7wCAPLaW8eqhIy5eM1FIevxTy5Q8W+eArLQ4txNR6FZGEWlnRX9f0Nwzn9hQYqFvE3U+daSKEnUQG2jC+rikmhupiLwenNRPDHiN1GEwF5cQwWIoZXBccngqZGfKZHPG5NaC4utWnL9QMhJpI+zQDydSA4plLTUqR4aXDZYZrCY9OhpycDLm4LWC0pvnIGw2ef1+ZWxWPB69FbF1OkBieu9Ti8RshN7f4fOJ8k/665tT+IgdnE47fihlZSxldTdk31WLHYsLVbQH7FhKWh8vsunqD0trz+Ok5/MKDFA0cmo340OUWl3cVOPLaBJcOD7PeF3D4wjTKwProNvxUoKpLqLjB/IGf5vSBIkGhH+mN8q1nP8TuGzeQ6Yt2MsczVMc/SHH9K1iM3BXgIWAJQYhmP4IYwTzwMnALP2yh+DZB/RyGgKXxh+ld/kMQvUiuAldIeQjJi8AwFhcaAOcdEm8WyXnKK9eBU+y4/jUES6hU079y2k0Itzp/vQv4HjAI6yuwaxTmVhEsYznSGsPfRnAewTzFZsL4fJnywhyvvH+cY2+8wcitlxFNMKU+CisLfPfR3SQKRqqao1MR3z5aJPQEx6citq6kjC+ljKxpXjhU5vktPr9+tk7oCXoig0CyPAifeCFm/3TM7JDHSkXxvz7XxwevtpDA+ErCY5db9NY1f/Jghc+81rBtU5JsXUt5ZDZCjygee7NFXBRsvTmLZy4CRWL1JMpcR+rvUKqtU1l+3Y5tbCMtHKCy9goqvULQ+DZB9XXi4t9GJbPAcwi+DswC/W4yvRXDfQhu4kcvuus8Cyza8xJo0io9wBc/upu/eLSHf/2BXh6ZiShH8Mtvthiqar5yssIDN0IeuJoyVNdsW05Yq8gf3raNQ0eOcOH8+R+LJME28iwffck9q0jTCaIm7r0MXxVlwVPdiRxkaCCRGlIhMGJjdEHknuiy1xsi3m1skUG4UFcqwHOhuuxeInsiJ0W2DDKxT45S20dC3X7WNfZz7QKI9tNW9oSfhZCMi7bJGBeNTpGxtpGuFAead/fWHfQSLsZmoyC6ozBh3xMktI87aX/efki0X1vMu9S5t3WOcmeySJLTQ6pB2PrnCmLr2wbip53IsIsgyXZkIFNIauvZjhOGzrFpJHoTBq/ZjsxCYyNiakO0wkV5RclxvXS7jtaK7I+NottobPakb6PkBlFy9dZZ1KMjGR4nyvcfk+bu7/5uowmd7jfgecgX3pWp3bpoN6lt9xMXdRBuZSQrqxZQdxF9mYB0GMJajplmEM5WXZQ5hzjKbF4YG8U2zsZjh8uSrj/JOBeB1J3Wzlcnw5NJOshBIUBGIAudHi+s6XQie6rTL6UWFm1odLu+0vXxzIZV6qJxCETsomJeJ0YltUDFuSW9qIM6kroTIWxHDk0Ou2hMO0Qj6UTvhQui2UiP9SuZ/o2xUUKTXzUzOd8lXD8THaScdCsYMu8b2IiQzKJLqbBRXeGuKXNd/DZclFtpaLeJdqts2nT6rzEbLM+Qb0OR843OWNh4vSwanL+A1CZPLnT/6/jnrE4ZZlDoXKQ95+cz+xadZmj/lpndCbFBz5kNShcxlzmfnq1gqA3cNtqHT4g7rP82ZaePGXdAi0yFVQWdsudbTWA6Nm9yNuXC5tLpy7jIfNbQd0IZCjI/a9rjXVYvk+t22WpLdk+pszGwE7EzeRs2NnopUldW3fEhWRu2V6tchFu0B1QB2nTGmtw97b06fsEIgdAZxHQzelK4FSm30uBe5yPNZgPC7E6+UuR8ZprzuUnOV+cNND+jyJcnzf1OciuDoYuYmk1+GTaun+Qvp3OllM5XWV3JRLt+k32lMy5i9CYb79jpBt/nosJR5n9dNxZmox/Lphgt0VkRyfdFvWllH7fKITbUMIeFk9mSX5obO/O2mh9XY4yQuUhyymZUYMcemhvmIfnOnAo7/tSl6KyQJqJ9GFjmj4XoYFB/KNs2/uF/8xv81m//t2wdG+tulOlKV7rSla50pStd6cpPrPzAI88XLl7kG3/1Da5cutTVdle60pWudKUrXelKV36i5Qceef7Upz7NF//si12mc1e60pWudKUrXelKV7qT53vJ733+8zz00EMcP3ZfV9Nd6UpXutKVrnSlK135iZcf2LaNMAz53Oc+x/Tk5I9dpS2exWJIUgmxsX9nyYKyjVayiSwN94gRO2QQuI31icXJCe0SIYzdUO/RSUbJ8E1G2DwymdLegJ4lWyQOfZIKuyNfJjbxS2MRPMIl9KWFXCVcIpi9v03uEyntZDGZ6nYiRx7bYpOTQKQaSdpG4NFOqLMb6rMEJ5u4YlCxQXudBJZsq78RWbKZ5dh1turbuhi56VEtFRhlM2MkNsNKpGCUsEkldBIRjLA4JJu8YmxSo3YJF1liCRoZ2uS2TiKBvW4qs4QvkC5Ro5NckCX7bcRnk2I5AAAgAElEQVT+GIcSskkGGqixIQHBZIkg2ftxvlEgSnPXF9hs6PH2PVLltWkTaGOTwdzlMpyZzJF02vAeKZC6Y4udcoBNkgjtPYVoJ4S0DfI2BBOb6m5RTMbZq00/SZCJbidMJVnSmtOokdDMJaMZLEbKJo5ZFFqau10oc0l/ziazhL1a7nPGIb0y/GOG1cpwVjIyyNDgYRGGRsgOSEpYpFw7tSaHNTOOB5mlqTqCE6HEJTjZPrfqUIIpAk9kyZ04G6eT9WhA1TSqZfGQUhmHuBMOz9dJ8rQJVxb9uKo6/V87DKVN1DOEDu8mHc5NynxbWZ8lDYQiS4K1CZDCs1go8qt7ueROYWxbpCLzSbadtLCIOJwPafuGzHREx7dleLSGa0e0TRCSLlEsS6IRGJtcaTpJZVmCmUw6/88SXxPn8wzQklnbm3b/z5CViWRD0ltWxQylmPWbzC+jrV9p4wq1TeZuJ/XRwedZ26WN5BTG+vpMR4lLXMt8oDQG0k69pdObNA5VmmZdzyXAxh1v4uW6njDchkvVwiLiLOrO0JK2NxqHUhR02ko1LBosFR18mMnlmNnxzPYTDdQleKKTOpYl7WXJUFIb6z5EJ1k2SxyMhGgno+tUONxqNj7YixmHLvSyHuPaog0rTTt1NVhsn02E7SQSGqT1dbG1fxlmidcCI2QHSyoMsVQYJUmlwqDt9YRES9nGmILto7STHwWJ9Ogk57UQxBip2Ih+6/Q7g9zYqTYkpDm8XIZBE6rDndwgMZb+ABtwou2kOI+NyYTxprGJzv/aAICETsKjzH1OWOysuFOi6e0jgHF+VgNe1IEWCE3bT5LretncKPufdAnC2VCTujlD1tZV5WzHYUeNI8KE0qFm2/OTrGPcKb4bAgYtFJ1kfFcimbXlndrHjlmCGPCzGQ1tAAAKgSaSglDCisiQq4LUz41HdBB8bXzsDwpV9/N/6xf4737nd3jooQd/rCbOc3NzxP+1h2PXQur1EvvmE47MRexbSUHDUq/i5Z0BP/96na1rCbtamr11w40Bj0+91eAXLrXQKVzZGvD14yVqvmRuT0L/qmS1okgPNvlya5AC0PIFu1Y1+xZiUDAXeKyVFcemIgyC83sCjkxGHJ6JGV1YZ9s63BwNeOFkmaGaZv9czOyoRzE0/OUTFU5eCSnEKYX1CWTaQBeGOXOgD6EECyNFru0s8cCrbyA0XDn5AYZXInSxB9WqETSvodnJC48fY7lHcWS5zMTBnSAEAw3NtlWQ9TqGCpo+ajsOUm7GLA4EfP1khT99uMjTFyPKLdi63GT85gsYRkmK/fSsv0V5eRVkARH0Ull+DcENwuITPFGvMDw9h5deBF1AUMPXswjqXH7wMwzOLHLpxE7Gzp3m1GO72DHb4K0jFTwNqRI8MhkxUE2plz2md1RY3TrG8Mwium87pAIttvDVj+xmtOlRK/XRU53m0vHnGJm/zJb6IIvDffhhyKuP7ePAldcpNF8FWtS2/jpebQXJuuuIbwFzSM4imAXmgRbNyj/Ej284x1WDraNQ84EqHN0FjxwC4UP/AOyswMMH4BtngfcBF5g5+C/oXf4jZ31rDM6vUF65yta1Itf2bOGpc2v0rbbw66sUY8N6b4Ebewv0VjW1RolSyxAVJR99vcHENp9nb0aMXr9Oa+gofvQa0pwDHiLlaSTDUJiBPSMwuQwigCePwIWrEB0GvptzMgeBFtCPYRcpe2hs2cN6j+Lk+Srf+qmTxD29zGzx2Tcb4wMTuxRPXAiZ2uLzi+dafORsg4Gq5squgLQk+c1X6nxnX5GBhuGRiZDhuqavqSk3EvqQnN5doDfU3Or3CFJDf0tz7EbMo7MhKxXJq3sDHrsesW8+QeqUbbMrHF1U9NU0EhhZ1/w/H+hFFXy++WiZ0XXJ2OU3KC9ewuvZzWA95alrIcMrmsi3Dt5LU7YvRHipoFnx2LLcZGg9JAk8elqG+2cj1kqKSmQYmZjgsYWY4cWQ0ZUmqAKFyOAZQWV+Ek8UWe/12XpzGqTiSx/bxzcerjA96lGtSE5cCzk2EzE4NUGh1aA62EejIDkymdB34zyPrhR5+npKqamZH1CUErh/JkIYwcVdAc+db6K04dqOgIevhJRDw5ldAfWCohIb9k/DrT0JD18z7FtIWemR/PFTFcZXUu6/mVBODIXE0Aoks4WAIBXsX4iZ71c8eDOiv5ESehareX1rwLbllG+cKNMbGZ4/XGK5R1FMIUgM1w8ajp6WXNxRIAwEqxWJ0lDwJeFYzPq2mE++GhN7gmZRsNyrqJckqRT86YNlPCEoR4bzezyePtNiraw4/YTmSqXCllrKgaUYpeHYTMR6WbLc4/OJs3X2z6ac3+lTSAzrFUXsCT54sUUQC1oFQSEx1D1JkMKNLT7NsuSZUykaQeoLWkXJ+QMFdCp440GPj3yvRbUimRzzGa5rvnXIh1RybCHh6UtN1sqKnlDz2HlNEBkG1yTzPR4PTkc8f6jE9rWEPm2Y3m54/WDA1iXD+e0FZrYIpJD42jA95FEvSi6MFbi4TbH36DwvmV52q5ChBcmZpxKascd9UwkXtgcsbzEM1zTF0PDvfqqXmaDAtR7Fg0sJQw3N2FrCFx6vcGAu4elLEXsvXKev3iIuljm1v8TV0YByYvjDzwb87Asho2spBxZiRlY1jaJEAus9imJi+O5YkT8+4POJKyFPXbVouJWKZK0IO5cFXiDYOxPxpUcK3DqYcLFQ4shSwnJFsdJjebmXhj1+/nSDHYspk8MewXPzHHvB58HrEYXI8Kc/63PE1HjixYihpuG7e4ocmo3QUvAXT/ZwfCKkUZD8pyd72b+UUC9JIl/w6uEST5+z+LsrOwPG55sMTl5nT9jHVx4ps9onqW3pZ2y+hRefx1/qwfSNcfzN02y/coZDl1+nkLzFtlsJ6yPbuXx4kANn/5K+pdcxrTKF8AKxPMqffHI3nrF89G0Tkxy69Of44V8BIan3D7h18GcYuvktpP4asIQlL+1zgZMEyQwCBVx2E659QAFDC8EpBHX6lt4CXkaa88Ccm6AdQzDhfOz9JOJTKL7pJu57gfMk/F0E/QheBh4B1onFkxjGkSwDE8CMu94TdlyaXQDGgOPudwPJJTsmoRBU8cM10B7f+NgxHv/2GSQNYjkOMkWmklceHmf/XMz4UszoQoPmYEAhhv/4bC89iaGQwo0xn94oZU9ocZXVsqSQwP6FiKmHIo6eBWkEX32gzLUtHmFFMFrTPDIR8ueP9rB9LaW3aXh8LqKvlvKd+0o8eink2ESVA9dneejcdYrzKwyEAqMMQWsKzQ5Uuur0NuXaYw+NngdQURU/eQktHgAaCN5EMI8Xn8WiV1+lg2E1WJ53CcEaMb+N4nmnrxDYyqkP/GNGJkucfd9xnrxQpz8R7EwNX3ugwo2HUp56NaIYGy6cgH03DIP1lHKoubAzoBybH8y2jfMXL3Lm7Bl+6Rf/7o9luF1q8BIQocBPDF5iXysXgV5XAj+2kQU/MXipfRryU0OhbqPNqRQsezaCGDsYvBYCKQ2LWeTNweBxEQCjRTuCaFy0QBp34ERqy2EENDyLSPFSQ6Tsk3nLE+0DD0Q7HmbakHEtBLEBkaSAIpXKRpKy3+7JNJGdJ79YeTZ6pQ0I5aKush0BtBFgW4Z5d3+VGIe/cTE84yLaRGB0G7MGCUYICnW9KcKZRYY1qbILH4mymLBYCneYgWhHUPyWaQP7EyHdd9wBGcI+6beUjRh3rqcQGLyWdtEVQ8vLnk6btiQy2BjGInTlWnWTSocF8/2NT7Qyh35TEpQCT4EvbfBAZRB2BcSkXpCLImgENQQNZJiilcBvaGSqEalGpNa2kix6Goo2Jiqo26f2oOEwQ1J1ljscuB88i3ZqHw5iHEcua/9wc0+gA92RHSSZEETKJxXSgvK1O8AkdwSA39L4UScSgTEU1wyxO0zDi407EML2HS+CSHWizVlUVCYQ1HV7MaWzIiNQqSFomjaaSmnDmmcPGbGHKwhEGiOI8FLwUvCbuhPJc+3mRU5nCHcQRQeP5beywwEEAk2wliITjYp0O7JozTazdRDaduCmklR9SahEO2LphQaRJm1UlGhHchOCqqZQteF346L0fmjaB70Eoa1r5CKPyvmdDA/mxQYhjYvYWV9Vc4EuFZn2QU7Z6o1x/icVrt4uoqRSG61V2tBSdjUpdL+zfqeljahn/09dn/RSh/hUhiChHYHUucMR1pVoH5Cgc6jNyLORfi1s/QTgxx3EYBDZOusMJWZo+xyV2MNbyCHlsu957iCRRFl/HDrbSFTn4J5E2uiYzuG5VNJpH7/hDsEKbV29xLiDIaxeUwnV3IFRsXKHkrjXWRs2lUBIQyRc5FbaeicClDH2QArZwadNepJQCBt3dNFuT7tonQC/ZpBpioxTVAJa2UNCEgmrvi2nSsGLN+PqrP5CYSPO0kDQMviRdquKbpVIZghMSD1By302O/AFd2iRn2i3wiQQvrb9zR3kFXkCIWzU3QtNu22MtCtBmTRUB9kqjR3nZGoPAEqzld80wW8aQk8QK4iVclFhgzAxKjV49RilQ/y0AYR49djWVclOkMMkNtqoFA2nS+2WVLyo6nyhsIedSA9BmIsCsymS2aKDHSW36oiboLXcJC8EVsgON+msgdlDwrQMNq5SYtzhVp67rxs7pMptCjC5iHV2uFzSvqZb58ZyodPc+BpiDxTJI+DcmJCtImQrDqnBd6tC8771+dnBP8q4g33c6pAWdq4kZQfOFyra8xSNXQ1reJ2VrqBpfV6sbL/3IoNfTyjMxYg0RcXaHdwi3PwjfyKSG69FNn5Xc2NWhvpbcf+r5vSdn3MkGIJcm1r9RSrASI9EevhNgx8bvMSOMQ2vswKerWFIY/URu3HsPd+2EUURTzz5JJPXr/9kbWAR7/6z4i5fFz+A+4vvu9B3+qa59zU2Lde8u1uK97S873UT3vsqYvPK3XtQkntZiNj0y/ww1PXO9zi9TRXzuxl+0I36fd9CZLDSu1xXvLsb3bZAK8TtyuHdX/PdfO4d6178YNrm7VUmfqjuQfwAvi/eweu7N7W557fFj6ie77iv/8gK9HY+9G56FO9xYd6pRYi3UaL58VOsuPuc5vufn9z5g1lAyLTZ/+IH3JvFe6ptsWlDyXsmv/Gbv8n/9M/+Gb29vXSlK13pSle60pWudKUrf5PkPY08X75yhW9+85v875//fFezXelKV7rSla50pStd+Rsn72nk+TOf+Qxf+MIXukznrnSlK13pSle60pWudCfP95J///nPs//gQR48efLHvtKdbf8md057B83WErndSTm0UX4PugbqorNrWBjTPsQ9yjB3DsclXEJShrrKsEAW3+KSkrRNnGgjtszGMpjcefJ2Q32KbGmMEA5dlN9Rlbqz2UGGWa2S9kUz3FyGzdNCIBKDIEWSItAWkZfD3KRug1KaSwAih5gRaNDGJqFswKDhCp8/cz521bKJFxn2Jkvuy3AwMjRtXFIHoeXuZVziFqadgOlS/Bw+T3faTziUXhtqlrJB0Z2Wz5XRJYi0eWHuu7kEDMsHdNkv7U1jmU4iQDu0TqfN8nrJEGgisgkZQtu2SFwyUZaApW+zXd1OXuv8P8umEp0ymHw7ZXVK6GD6kna5LBjRJom01eaqlSUg1fIYrMyGItO2IYO1rcwWszqI1CalSG06SXQO65XtSBOp07joAKLyJpTVNyWXiJZ0kvjafdr9IeMOhinrY1I7zJfDZ3U+n29zkHEHtZV1cJFoZGyTZTvJnaZttwab5CtS4xJnTRvVJrTZ4EuEdmhFvRFTli9T/idxCDyN2JCEJ/RGf9YuszNdoWmX0UCbLbapd1rzzbWp5nY8U4bx1Dh8nNgAi8rpoYNzamPqzAaAZQenltU13ZjkmOExBQa9yYSzJKesErGwiW1pDiGVhzjm7yc6ltVOHMxj0vK2lGEbs2vGomPb+SJJk2u7LBnWJQxmOsr2eXZSzkxnvBE2Wa/uEIUZyjGSHR1m+WltdFjmUXJ6li4JVqamkxjpdIPI10049JxF8KUihyCTHSSdyTCUmWE4OzYChDAb+lrscIImV9e835BpR2NZP8zbrUpy48SG/mzxpcJ0fLQReRxZx+8KIBVykx9OEakhlplN54N6mk7S4J2mQdkYko0Jca4Fk9z4lfev5Hy93lRGARvuI9rvdWqcodc2o+cSOomM5MaX7F61XLmS3FjTQY9CgtI6Nz51UJJg2iOTMR3f38ZFStHxTXqT7eVsspX1EdpTofbcZMNu8E1b10WUT6zM6z3XY4SADchAs2lM3YwYzBJAl7AYVpkb/9w8ARBGo7MxcxNeOKtf1o9lYtq+R5j3CFXXbDb5uU9/mm98/esUC4Uf64nz3NwcjRcrbFlIefVowFrJZ0st5XsHSvQ3NCO1lGdmInprmlYguLQtoFEQlBMYbGoGqymL/R5jtYT7V1IqseHMVo/5csD4oSVWb/RxrGo4tBizay3l3FafXasJexZiiqmjaYwZLo4UuX864vy+Av0NTVIImBkNWOiTTA15RJ5g32JMvSgRAuZHFPtnYq7tKtFjxknKO/mzT4yxbybhxQcqNAqC/bditl2fJazcx7X9I0yOF3jlgTJ7FnyMHoekh+89up2nLjcpNQ1ferzCExea7Fxo8ub9/RTUVv7gV44xGI7ywmO9eAZavuTbB4r0jLR45FzKkVsxrx7r4fHvfgtJi8kD72dgdpGbJ56iNjzEpf1ltk8u4iVN6gMPQaGHpFDCX/MwlJEk/Jvf/TSPffU6xWYv9e2HGJu5itdo0BuM0agEVGI4tzvgjz5d5tilmK3zddb6CzSKglhJSmoAEbeYOrSdvtWIf/6ZcT55ao2exVlM3Ed/ax2tdoBSzI/1Mn7hLLtWpxD1YQTDCHoxaS+6tAUvXMIwhGAdzccQ3MJmUG8H7iNofRebzRsAT8L6OdcBq3Dr/VCegtMVWJmBI2NwbRbmDLf2/2N6Vq4wsPAGMI3hGQQDaA4gmMdE/YTDuxicm+O7j28l0CVqfT77Ji6zZ8Ug0wDPwL7ZhJ5Q4yWG2BdsXdII2U91uJ+epa8gzU0gRnINWIbGLccaLcCBYfAFXLgO6TAwTMrfQbIDwzbgMDDG4q4PcuPwAXZMz7O3WqC8vMrK1n5uDSi2rKWs90gG6poPTUU0jeDmfo9nX28ReYI/f66Po9dChmopO+YT9q4lXNnhM7qSMrqaMrQOp4+U+NbjBb68uwKBYLrPY70smdnh8dDlFuu9ii98oJeVouLD55r01y1pW/sFSo2YesVjcsynv65ZH1AcmokZDg1f/kAvx2/W6Fm7QtR/nNV+ycR2H6Xhjz/cx8MXqgzM3uTMY9sghTcPF4kLHhcOldl3s0kQS66MB0wPeUigTw5SmZ3h1af2sHNigVcf3EJPZBheipg4MMR3HurjwUsRlw8Psj7Yy1cfqFCKDNvWEvYtWGzb1DafsXWfW7uGWBjwubI9oC80bJ1dIRwYZqXP581DBV49UmJlFChqCnXBK4dLPHK5xfyA4uZIQEFrbg16nB0v8MhUxLOXW1Ramhu9RZ66HHLhoMeVsSLX+hT3zSdsWUs5vyNgraxI72sy369YHUm5WSpSTAwH52LWP77GmbCfx6+2CH1BtSyZHPJpFCXxnibDc4pyBCemQqYqAcUE9i3GlCPNUD3l5QMljs5FjK4IylWP+25E3BrwqJVthv7++RgELPVbdFvsC3Yvpaz2KiqR5tJAkdVexfvPh+D4zJfGA649YthxwzAx5nNyImJ+UPHH7+9lbDVlpKEZaKRoKYg9yXLF47X7fNCSqS2Kv/VGgxMTIdvWUq5s89m5lFBMDcu9kuf3l3n8ckhvS7PUr5ge9Li4QzFaM3z0XJO+usZP4fV9RY5ORtzcHuDFhkqsGaynDEWG0zsL9BjN+64m9DYlM/2KvpZmuaQopYb5XsVsn2KsprlySLN7Afy5Ah+5mnJ42jC6knKuv8LOJc3+WzHaE6wGHhe2exyfinliKWFHLeW5uZi39vmcmIi4tD3gsemYmSGfooCDb91kaecwF++rUGwZpgZ9DizEDFYtOWWtR/GNx3rYPRfxlSdLnDnu8fhbEYXYUJAwPRIgC5Jtaykj6ylferTIc1diHr4Zcng6pB5I/uuBMh85E3NwWdPwBXsWY2YHJQsVj+l+wfaG4ca2gD0LMXNxL0NLhlpF4cewcx1OnNNsmanRLBd5a2+BEzdC5oYVYSCY2BlQamneOAFjy4KL4wUGaynlGMYXY/7oQ/0MraaMVqEnUWivyPywoqelef6BCo9eSSmt1dAM0xO2kLGA1FhqEVNo9nFj/24eO/U8hea3AIHSIbWhp5nft5OHppfZfSthsCYoLE7imWtuYvUk6B4KSyGNIUGx8TU6jzkN4H5ujv8y/etnWNj9W1TWXsFi5z6O4C2HUyu68aHpJrECiyntp9nzKH40AUwgCPD0NWDazUTqQIxi3VHn6+69RZSuoZgAzmMpGs9g2IHgmpsMFjD8MoKrGB4m7Pt1vPBl4ASGk2jvCK2e46gwpVQaZ8v1FZpjj/Hix/ax/81zpOWtvH58C0O1lGIMPS0YbsFcv+LCmM8T10NaBcnEqM/R6YgHpizN6M3dRZIy7J+O2bKimBzwmR332LJimO33+MSFBvdPxUyPeKwPgNSSazsCBFBpaqJAML6U0r+4QNg/jN+oocIFXvrAPg6f+S8IriFIqY59GCMexY+uYIki+wnCmwjewiLqtmL5NAPAsNPdgmuDFLiflE+CWGHu0L+lZ2kCj+8AfcDTwBk078cv7kJSZGzmEn61wdn7RqmVJFuaKf2rkqGaZm7QoxJL9k0n/OlHy6xXFH4E17f4703k+WOf/CT/8+/+Lv19fT8xIXdhLNoo9OwTR92zTy5+aqhUU/ecImhKQSotsi6Vncz2QmQoNTR+rImB+UCgiylEkkKsKcT2Mw1lDz8JIihENlLRDARrgSIINeu+IFIQK0noOcQSsC5EO2KdKHtTLaDhS1K/hPFKzBd8VAIrgaDlCYLQIeeCEqmAWiCZLyq0UOhCBeP7GKBctRGdlYLECw2Feko1UCRekddKFVp+gaWCJPSke/IE34VFSg3DcpA94YfESgGKll8iVj4NX2KkD/gYIZ2efedkPAyKc709gMRvRhivQLDSBAyFFqRKEoSGpid4s2K5sfYwDBfflpB6RRsn9gOQgosFiUwMKkrA9/HXQvAL4KI7KmpQmq1ipI+WBQwBIgrRXsGhggJXtpLrrCk2HaCIZXtmsawC7cNIrLVAGNPG5kgJtyx4PSpU7Pu85r5fcE4vADSSxB0akFD1JInnEUtBcaFFaT5y+ClDEIEfOexYajFBxi+SesriAdsoownrvOMU6jEUJRQVxIl72E4BH02l3RaaIoaAxKvQDIp41Yjyio2WKG0RVJ62aB4/MfRN2YNUkFBo2Sf36ZK0iLiWLVvPmm6jBoPIRpXqnmS1ILjgSxq+oKUETU8wV7DtmwhYDCQRHYQZQCplG5GUuoUDL7H4tWKomS5leMK0HRVt+oJICa6XbLhJhCENX5EoQd0XtDzJmi/xYo10KMhUCmJPEHsB6IRq4IO2k4hUgEo0oe+zXFAELU3T92j5irovkNh28UOL6av5AiN9Ys9DC3v4RugJULJ96FItkCwWJPVAEiuLkKr7tFFgqbDYrZZDYRZDTaWqKcQGEQuKkaEaCFZ8SUIHOxUqq9s0MGhliH1YLEi0w49RTql7wtqRQ8IBNHyB8lI8dzBGIYI4tNHuQqRRGvwE1nwIYkOhYfAbAl/jUJ22XkFk7Ua5w0kyn9TwLeRQR7a+XtoJ31Z9SbVksXHVQCK0bd8zRet7AreCoFxEMpaC9QCaDqtWXk8pNg3FprHINGPwIsuIr3rSobVsOC1y5VTGUGxppLFlqTsMZ6IcvtAdflUKNTXP3r9UMxRbTlcunJwKqPmSujs4RfsGLwVqiko1pbhuX+vY6lyl4Ltg2UIg0UDPWkqpaehZ09QCgZdCy5OUG5qmZ9sUo0mEIFQQJJ2Db4o1W+fQE8wWJYkSzBYkSyWBH9v7FRON7/RsD8UxNDxBIdSUGppy1eLzIiEorkOppUmF7e9aQCisnkNPsFyQFnO45qGFbV8joLJoKC5a3bdXVI39jkph2fVzoawvWQ+svoPYRpJvFKRDLwp74g+2XZSG+YJEu/EEwFtvudW/bAyy2LFUQml52U2k7OQ39UvEnk/PbERxPcWPoHMQiY9lABs8XSf15Kb1lmXAo6F6AUFS6CVDwxmKmybLLfeTSWB9rcgwcjU3Zszmoqc1N8bccO8pNwFsYM8YWMRiU037ehbFZjFuKSUgQlMgKfa56wYgihhZIlVFQFBaSUF6pF6Z1WIRSBz6L3f4lJQU6xat6esO2jGSAj+BStUeQBMpQcuzvqS4ZM+xWPclQWz1Vq4Zik1N6PpDU8FyQRB59uAo3/l2oZP2oTSShMVAIVjBHljTIvWKpKVeV2eHpqUKrOVWTkV7TLW6a+Qi/W6ck4qw0O/0dzE3pocgfPxGipE+xdUaMonQbrWnGIHXhMQTNJUgWLf9ZrooqQbWZ7Y88f1Pnl/97ndZWVzk7//SL/1E7lsRd9nSkX//vaBAbV6qbC9ZvAdlF7fdo7OsJu5UMXH7vUWu2ws2Lk+yaclF3FFT73zTjLxNaRshQ9lLdaf7mNv1eDsgaDMoSmy6h7lLa7+Tls1fy7yDjVDqjoU3myu74fLiHRqZ2HTjTYg98w6N822MuH2o3h28hdx0D3OPa0g622+EuYP93c59u6fRC3P3qklz536yASj1dvgls7GJxD36rBHvRtXmdrSZufc2M3O3Zc+39QvmbdtZvBM7EBu7q7nHdrjNr94ORCXM7Uu56l5f3uDLbm8Qcxdlinv4+Tvpe3Od7xC1WFcAACAASURBVOxK76xf8zaDh7jDve5V3jv5ZO4wpuT74x1bYEOdxAbbNuIONiY6nkXc0yXeHXsq3uXr27bpbG7vdzXyuiX/d52HJVw/3TxWmHc5M7jTSH232YW8g6d615q7x0fuZUB3/765hz8Q9/RL4q8xdxL3wDyKu/Wed7BJV97jxveGbJq7oIm/L9pGkiR86EMfYvLmTbrSla50pStd6UpXutKVv+nyfUWef+M3f5P//nOfo7+/v6vJrnSlK13pSle60pWu/I2Xv3bkeXJqim98/etMXLvW1WJXutKVrnSlK13pSlf+fyF/7cjzxz/+Cf7kT/70J7jqZgMNq43mcpIhWXQOFdbeA2k27ifr7FszHXCboI1a2rxvS4uNcCyhLVqrfU/YgIZp77zKf8nhr4TugFqkxmKy0s77ab4+2nQQTeT22uW2DsW5umNu3zVnsDij9qsc+mYj328jls2CtjQCi3HK+Hsi6aCIZGIQSQ5zlbWJcSg3Vx6RZNgie5lWhlUyxmHBTLv++YJ1rqFd3Tq4oI6mMmXEbKzUnfZJ53dBJbltajp3bZ37rG7/r72XTufxS53Li7SzoVckFucF2f6rux0Fqzu6v+uWvGwn7MYdwCanw7xNb95vm+HHbBkzZJG5I5Io+19W7vgOG/DzuCOzSb0yNps6w8aaJHmsnXvf3KGpzKbNrtk9M3vYjLlrf73dV0SnLxk24LTyCEuz6R55a5LpRrtpg6xExyfksXX5n3zLmU37jZuicz+T8y+J6CD0OuowxJu+30asbbKgDInHbb0oV5bNg4nO13mjTWcYOjbpJd+u+WZr5NBRmzMMxKayb86DaOO3cripdhKqyECeG5F3my+VuSgt2ICN5A7eINNxuukaZnM52dgXxKYKpeJ25GKG58wjA9tIwAyFqjPknm3fdv/UmzyQq28oNnZsYUwGMu3cO7cRO71tzNu01zsbJ4zpeFSTQwC6vpaI2+3otgbN4RvblDznUzMcqvj/2HuvYMmS887vl5nnnPJ1ve3b97af7pnpsRgQHAxAAARALj1XDAGkIpZiQCGRIYnL3QhxuW98WJKxehAjqGBoxaW40pJY7lIUvQG5MMTADGYw3nRPe3NvX2/KVx2TmXrIU3Wrbt8eA4BcIKK+iI6uW1XH5Jdffpl1Mv+/1HYgQ4m71iGbgT584FI2zdXE7CPlRF8ONn3/RCqqNQM52g7k8u7ruM+pOr1Gf6vqmj7Q13SPjxiERJoDfQf3aAU90GBfIO8Htkz268V0/djLeX3oUNONSzEQp+ZgLB6SJw+r1G4v0xUli/5i6W4s2l7cDwRAL8fqvn74oB/7W1y/r+2BCD0Q7AzWoz2wFr4fs9oNbXv3yNGJb78RVN2//93fY2t7i1/4pz//HTdkXl9fJ/lCnlgLnn6fx8wGFGKDEIJLcz4nN2K0gksPKGY3DKORodwxVPOK3ZJipqa5NelxZC9x6vqspJ5TeFowVpfEocd0lHBuJea541mO1gwb44rjmwkbswIvguNbGqVhfi+hFFtWJn2eeSLHdskDARMtA0owW9HcfhCOXTfEgaDcNlgpWJ/26OQkaxMey0d8FnYS1iZ8dscU998yXD01xSuP55AdwUPrEdM7mu3JAONlIReQSMHMjqZZltx/rUZlNMuNpQxzmwmXTmXBE7QyEusJCi3DZx7M05KSj70Scmwj5lijytyN59EsItU4mWqd9uQ87YxCAjOrHYycJx6ZxU9ARTGf/f45dhYmmaiWWfTaTF7O0Zpc5LNPjTNfVZjcKE9/zzhv3ufjteClsxnCnKAQSRa2Da/el+P2lM9EXfPq+TzH7xiyicT4PrmSz9k7MdWpPE9/eI5j61nWj45SaEOrmKdc7SDjGlcefQBfjmCYYvV9C0xefgNjyyhexMgnEHYHgQ/U0OL9SF4HSjjl9TRO0VvE4YNKQAMae5BosO+BretQD0CfJKh7+EkjjboqUCT2n8LIcYyZYOP0/fg2Q5JVjHU6CBvgG8i3OggUf/6DMzzyZotMZPnqh3xq2YBiwzJ98wbSxASNHYL2Gwi2cLieXeBxYJnI+1GUuQptDVuTUL2DUxlPI9hA0ATWEXQQRKwuPsb8zdfItAWXHphl5vYd4vIISMnsrsYowXjd0RtuzvkcqyTM30n4zPsLnNqIaQeCC6ezjNY1iRJoXzC7k/DiAzkyiWF9TBH5itnQ0g4kie9S2EzNcPZOxAvncxRC1wZPbCd4xhEP/vMHi2RDw+6Ih5YwXTFcm/OZrrr2c6KRcOrNFVQ7oTl5ksS3FCPL5rjitcWAk5sJ85stZCaPtpI7Ux7r4x7LUz5FC5kQvvJgnmJoGK8brITpO5sUvJjiTot4bAI/ThjdqbAxU+LEXkIQGZ4/n2N+I2F90mNpW7NXljx0M6IQWjoZQSGCW7M+nraMdwznblZ48ZFJxuqKZk7SzgqWqg7Ge245YbRmqZYVsxXNbkny3Mks9RFF6EmuzCseuxFzZDfhzaMZPGs5fztiZcrHWMF712IqRUUusWSMJaMtS7uGQk3itSVjDcFM01BuG6ZqgtM34dhawptHA9qBYK+gqOYFD68k7Cifh1YjRhuGidiyUEnYLCuUgXLL0MopBDDeNFyd93hgOeLmmEc+gblqwhsnskht2Sl5TDed6n60bajmFMtTHlMNw960YbwimK5r/MTyylIGGQumdyyjHcPcnib2BSUBOzOCqV1DKyMQCI5UEl5aynB9XDLVcAzrU5sJmcQSBoK9kiLMOqpEpaiYbxlGm4ZrR30aGUkrIxkPLfetO3++cSZDqAS1x9scuSTxSOkYKYFlfcwnr2GibpiqGayAak5yfCehmZEEGsptQykynN5MGAuhXIdsbCl2LKc2Yixw8WjAXslh+yJPkNOW89sxzUBS7LiBTTsjWKwZ5rY0WyOKakkSe5JiaDi3HBMXy+yO+ozVDXljOXsnwirBxoTixjmYX4NbRwIWqpqFqmFuw/DCYz7juxZSQsjuqMfCbsJ0YokXQsZXJVYIvn5fluMVzf13ItqB4KXFgKXdhI4vqGckvoWsspxbjZnfTbg97fBjE3VNsWN4+USOk+sxrYLH1piP8SCTwGcfyaM03DxlKTYFJ/cM2SZU84rIF3jCEiQQZSSeTUEbnqRS8qgUFXslRaDhvVciNuaneOYDS5TDIq8/MkmeEkGSxYsqXHn4AzTzBZZufhHBBWJ+FInl6vmHCIOAqdWNnhB7a34E4R1Bhou0Jx5lY3GB0e3XCMLrKL2K4aMI6sApYJcsHkFnhKBRx9MvACMIGlhyCNaAECN/EmsXEIwAeRL1wxh7ivVjZxjd+RIwBhTSvmAey1Mpdq0EtKmNfJAgfBDBOnAOR046i0OxPQW8AZQQXEk/30IwiuAigixeGIEZS3O8T23qcVSSw4gin/6Jozx2ocn63CizTU2x5rG2NMPeSIYggbVpj1xsWJ4PmGgZztUS8i1NPedi44mrIcuzHq1A8vT9AUd3DLEvyIWWakGhlWBxK+HPH8jy4cshgbaEvuTGhM+J7YRjOzH5juWN8wFNoRDSYjMBNxbzFIxHdrfCqKgzunMbmCT2zrG+dIqszuLXtgiLD+JHnXQQnQU2cLjAGeqT7yXTirHMI1BpP/sYcNthDO060j5CpnUppXkcTQfcTSwnELFH0NpF6hZJYZoXz0+zV5CUO5qje5rxqmZrXHFsO6bQsbx2LMvitub4Vkw9+w3QNur1Or/4i/8L/9e//e3v2GfOfmxpZSQ7Iw7lE0vBSENztezhaYiU4OKcRFrLzK5msqqRBlZKirYvqGUVmcii0l/tpZYgl0BrK4u0kE80ExXNrbJiqprw1VGPWEKUsQSJZWLTMNZyA4CpnYTlEcXvnMxyYdKj4wumqppcYjFCUplwx5QaDp3lJZblcY/NsgcJ/OdZn8k9zU5OcmlSEedHWR7Pc2tKEPqSpeUEqS2VQoZmocjUtqYdCDKRZWYnIejENLMenRQXJDtwY9zDAjtFp3mv+IJW7JGNLCM1zdkXq+kvsDLl7SoohR9bEiXIti02UyYpLYL08eIEFRu+eHyG1xdmiIpzPPT7e+jcNGG+xPNzAe3SCHFhnJcXfL5+NKCSl6yVFeU2XBr3aWV9Vkc9lsc8rICXjmawXoZiwxJm8zx8M0IrRW1klC+dmELnxtgtF7DSw08sNlNEYFifHKddniYszLM9N+6SpSwjuEniLaZJcwwISPLTOLTQaNpwMzgO50j62zPF2HXqQA3LItR3sSHABLnOZSCHpZw+NdlC549gsmV0/giVsRn8JKGdzzJzq4mw4CUC62WR2vDs0YCRvQSlLVcWFDcnfKwAv7mFiNtk97ZS1qVDLrlf3uMAhOK4G9A3OrDS5Ra0gBySF4EKgssI1hBUaWV9xjevI2zC5ohC2Ih8M6HYNuQ7liC2ZEM3a9HICBauxfgx/MVMwPxGwk7R47U5n72CIvQF5bomiOCNmYCdoiL2BKWq5eROQpSi6YyAfBs6GcHlaZ/pSsJYy9AJoJ1xTyP+01KGjZGA3YLC0w6FJrAkKe7s3MWQoBpiKGGFJRNaxvc0lYIkVILNsocNAsa2HFpQS8HKqOPyrkwEdALByojCT9xgvZaXIAQzb26DTRivuKddMg4xEhZvxsS+YH3EDShHWpbxmqaVlZQahkLHoDS0sordgsTTltlNzcRGm5cXxkk8d/5saDl2J6bQsExuGgptw0TNtUsjYLmguDjls1OQVHMOHxYklr28ZKyhKTfddXxjObsSUc1JanlJuWUohpbxq4L5ZcPYtmSyqclGhk4gGX0p4PSNiFzH0goEsXIowkYGZm4IjBRMVhKUgaObCTO7mmZWYaTz92jLuMFfZGlkJdJCVTiUZKlluTjp0/EFQWIZaenehjuRJ/nCmEe5ZfGVZrsoXUxFlr2sxDZcu56oGyJfUOgY7l+N2S0LlIVa1mHdxquG7bygLRyOMIgFWkE7cNdQBjaLkk4giDw4vhrRyEpujPmEnlPxz28ZZnc1UsPFSY+9nMKfaNPxBYWmJRu7TZdCX1APBON1TbFl8RNLsWPwtaXUMhQii7WQjyzljntv8aah3DDkIreBzuSeTvGkluslSccXKGMZaWrO3IrZKChiAUHicHHHlhOC2OJpy1ZBoXG4N5Mr4RmfoCkYrVuObiaMVTXFlqGal1ydF0zUEr46n2Fu23D0lnu2fmVWkoktR/ZiMtry5pgi8gRnr0XEYwmJcBixVyd8lrZiRuqGQFuWC4pYCbKRwbMwEkIjK5ioOQSrEVDPSbx006PVosICe6WAek4yWdXUs5KnZwJagWRr3LJTUJy44n4wedrFslZQzUsmGoa2L9goe2yPZNgrKgSCelYyUdcIArYmjvKV47PUyyNcPDJOozSBzk5iKbExMZE+U6wCdXT+KJYcOyNZOr5A6BARh0gdUR2doFU+QzRyH2H5JLvjRxBs4UdvAll05nj6gGQSWKZUeQPNFNnwQjrwyiC4gKWYTtyHJPkzaJaAWWCcJHucOHeCaqGc9hXjdDF3MIMWp9N8HQBtWpkxNGfS687iUHVlYBrLaeAqgjh9Oj2RPk+9nj4AWUYlFzHeokOwktApTiFUhiQo8/uLGUxQpJb3mLsWExem2CsXKLXdU+GVUY9qXrE6qhivah5+rUM2dk9e86FhvGbYKiragWS55GZBl0e83gZBGEGhbfl6QTqcb4q3jJFkQ8vMjiYTWy7Me1Tyks2yYmuswK0Jn2bJ4eiOXFoFFJZxtD9DIx9g/CzGnyfKnO753ZU9BipYPFqFE1hGMSwCU2lfOAusIPk8gga5vc0UR5vDUsAxuXNAQqa1iRduARrrZ+j40uEgY5ioaPej3BeMVQ3KQrYNhZZhtGYoduy7X/P8gz/yI/xvv/7rjI5+54oErbWIdMqrf56h/5eEtAd2TALkIYip/ikOIQfpJ929j3L27u8enALN2gMAFnH31LM4sHxEiL5d4PrW4AygheSBJSd9SDF7AJU2cE7uAdgRYNXg1M6hU3AHTGF7u3E5DpXzpTrg8y7PtTvZovrvzfZPK9pD76H/+MNmB0U6DyMOlFC87bKMe61yEgem4A479kDU9C1BgT4EnBj0RXfuWA5Mk/btImgPnzMTHIaN6vfoIAqpyxfmwPRsf5wOXEHuH9c9TB6YanXv2cGpacHghL7Y9+pBXJcAcvaQ9tY/Wyzv4fID8YsQA7cgbXfntEFEXu9b3iEsRw6g+g7spHbPZQ1iP3cMLC8Rb70YSN6LwHQQHSUOz0V3TUemjrb3eFzSfd/2IwffmhR4ly/6dzO8F97qIIpKHEL+sgfz3T1y9L0WJR1Ed8pDEJFW9MMdxWBufIulKXfVcH8bOYQW2d+exb3qsG9Jzr3Yqf3ou/7v9ed9dcDv8sDym95eqXLwQp49EKuH0kHtW2IoD/pepOcdqIh7YM8O7gDZW57Xl0uEtb3MJW3/so3DVqi9Ra2lx3aXCu7nYXmI4w/2qu90tavtLZU49LPBxaIHkJL2HpF32OKlXkY7gEzcf507sAbo4M603V2We/EnD2lMfctV4XBkYjDYxO9Kwt2+v9uPy3uW7a2hs9xr6dBdi6581/KFOKRO32awwtt05d/Imue//Ou/odNq81Of/OR39ELvTiccrnYf2tCGNrShDW1oQxvau7Z3/OQ5imN+8pP/NbVq9Tu6wFeuXKVoHkaqYeUPbWhDG9rQhja0oQ3t3dk7fvL8l3/91/zqr/3ad3Rhn3n2WSrVKlLJYc0PbWhDG9rQhja0oQ3t72/wLKXit//dv/uOLWgcx/xXP/6PeezRR8hlswNrZrt26Cqjd/C9/g8O2zDSfhP3bd/hSXo4pYPHHtiu1b7Fde7CRx2GlXp7Ps0Azgt7j9u/i4Vm7/Jj/3H3XgJq372v7d0v39lmwm9zskOd8S7r195dd/YbuSN7V41+y8y+2+9YDl+RZw8p+r0azwHsnH2H9fqWMX6oh+wAis4evgHzWxzff31x1zrau2JDiLvam3kH57/rc/vW92bfQV3t14t9y2Pe6prvNl4G48QeCuWybxNbb1vIAwjFb/l93+MDcY+YOxgJ4m0r2d6dVw+tt0OO4e7Vsgf/enft+R5bnx/SsOw9+s935nz7FnntrWvCHublg8e9i+267TeVHd8+igZ9K74xdx2aBO0A6bP/hf2Gz/8OPz+gl7lLw/U2Z3j3USPe0R3bty2BeMcOeMeD5x/9oR+kmM/zH//gD74jB8//9J//c376Z/5blHLq/+0RhWhJ1scVq+OeUyFvR8Qe6KWYqV1BLa9YnfAodiwTTU0zECQSju1E3Jz3WJtWVKdgtGPwjMEImGpopqpuEX4hsihjeXgrJvagLZwitZETjDcNyloiT1AMXc3slAWZ2JKLnCLZSJjZcCr47VFFtaSolCRTtYSdBSi1NY/uJqxOepzYijmxnSC1IR9bjmxaFndjKiWJ36wwtrdHYB25QxpQcZsgASMs+VBzam2XYmWT82shYy3DZF1TLQkaeckD2zHndhLKrYROBpKMwjKG9vO0ymWkaZBt7DGxuUwu1LSKeXbm8khtuXwsh7CWJ1baFCJDO6tYOTnFtZOj3J7zaRfhzqTHhVMZspHFakmYFTywFnFqI2GhkmAkHN+OObcRsT7hcW4tZGvaI/Y09bxkZdJjZcahwR5faWN0TNMTCOMoKXsTReLiPIWORhi49UCG8e0QQwaPTSBBJas4IsUaYPGi6+zzQEk/U+5vlQdCHH5IAfPActrCGjiU3SbQBnZwZI42qrOODPfAOjJD0G6Ra4c0S3leui+LTDR3jhSpjeU5vxrRKHrsjnscWzEs7MaMVipI28APl9OWm0mvX8ByHNiCiVGCzjV4cBE+dh6+ZxQWJ4EmhnmcYnsvLdsO0GJq4yKCbeK8x0w1QiYdcvVdxio12hmLtAYvcb7Pxi6GjIIPbkYEiaXtC47uJsSeo2fsFRXKWu7biFie8WhlBYXIkkksWW05sxWztKfRniDyBefvRMxXNMXQcGvWZ3PUI/IFo5GlVpRcWfB5cylAK7hvo8VktcNYI0ElDlBcXZgliCMaBYlRbi3a4ysh5chiEWyP+dw54jNb0Txyp87jdzoU2pZAw0OrESc2KkSBZKrSIiwW0NksUu9S2lphbO0SKtwGCyoK2RpTnF+pM7p9h8l6wkjLcH4tpFaSKA2NvKS8t8lMNWF6r87E9hoiCXlk+QZBq4724Mhuh5Vpn9lqQrFeJxNGFDuWa0cDrh8JeHg9YrxpOLkd897bMVkNjayk1DHM1jTKWOYqCfnI5Y1jOzHrZdXL8UFiKYSGmXrisGpZycJughGO0BEGgvmK5kglYaKRcH49ZrSpWagk7JUUtbwk8aDYNlhgZcZjdcLlyES5jnCqrtkuK+YiQyG07JYVgbZoCUY65nPkCXKxZaqR8OR2QiE0lKuwWfZoZiWNnGQksnR8gafhxlGfZlawNukx1jKcW9UY6cSP03V3LyhLLoZYQnPEcGU+4PZZydq44vIRn44viZVAaXdcpSAJfafyL4aG9bIiiC0jbcNIaKjnJLW9HNLC5phie9Sj1DEUO05hvzrh8Hy1vEPgBQncnvIotzRWQCa27BYktYITbYW+oJKXLE94xJ7AN4aTOwlP3IlYHVU93+Qiw9GqZresuHokYO2opZYTtAPBeMsggcqI+97yXJbtUcXGhGJtNiWHWMGR3QanNmPO3TZkYyeW7gSuP2lmJeN7oCUsT3gUQ8NYx1LNOyqJv+ezN+LKlNfOD9nIsDztERhL6AmkdWK2k7sxJ7c0k3VNJxDsFhz9xBFnNA+vhVw4keHNpQybI4qdkmJtTnCi6tCVIxXBjQmPREGiBJmky/d1RA2sJZtYyi1LsaPZHHW+m65rF+9Ri3JLc3on5oXzOfLGsjnuuX0LGMFK2J70MARAjB/fJCzNM7XXJB8miKhJks2468QGv9PAb97Gb+4yc+tVYAfLONBGJutpK9pN8/cmkqvA7bQP2AZmEfkGTI/A/BTKu47kFl0Kk4gMnbExxqubWO4D6un5YmAdaa/gBG0PAop8uE5cKJJ4C8AqjE+A3ALWYfYOPHQGMikmdaTjfl6fzsCDx0E13T2aNoKY9cVzeMZDJDF783netxLhtfaY2N3h9ftzbE5l2SsrpmqaMBBM1zRrEz6TNUOp3qLQqJGNLaW2RaSi9KN7CWNNTUunewIoekSaQmgIYssjFY1noJmV7BUVZzdi5isJhY5htKl57FbMbE3TyAqaOclcVZNvdWhOjmOFojbxAIk3i9ARo9U9MjvLSH0Hv9nAUgIqaZ8qgSqCFbL1TV5/9BSCmziEXZz62YexIhCj9MUUKVh2/WNK2xCsYD0JGFqTM7SLOcaaCfPVNE+1DJnIUOoYCh2Ha81oSz60RB5cn/bfnWDwy1/8Ij/3sz9Lu93+jho437h5kz/70z/l137lVwBoB5Jrkz52z+fVWZ+XjgQ0M5KnroS0Akl4vsniLcv6iOLZ+YCJumFhJ2E779A+D92I+cKZLC8fVywvGWZrBj8xRMDiVsziikZZy2jb4BvLRy51aGcENeGTCy07ZcXcdozU0Akkkw2DBq5MCQptV3GBBqPg2OuWWMGl2YDVMcWdcY9jawnX7oOpquG7roS8ejTD42+GPH4lRCUR5Y7l7JsJ5y9FrE54BFsrLFy9TTaGbGgde7ldJRtaNIbRaszjf7vM+PJlPvqVOrN7CQsbCddHFdtlxVPXQr77asjUbkQzJ+gUPTTz6NwolalJlN2itHKHhavPU2pEVCZHuH56FC82/IdHy0hr+IHPVyg3DI2CzwsPLPKZJ+Z47kyO5gS8cCzD//lEgWLDYkNBvSD54Asd3vN6xAPLIVrBw2+GPPVCm9eOBDz5fJurSz5tFbE9Inn5aMBLxzL4EXzfF2uYpMPrUiJ0iDJwe3GMaPwMEzWN1IKvfqDA/EtNtJfHM1eAGM+8hGAHeN41uviP0gFyJ22w2zhNcQcmyzim5ET62WkEn02fU95JG/nraSO/nA6yqwThBfx4GWENY5WE7O4upa0au5Oj/LPvKuDHhhfPTrA1PcqHnm2xO+pz64jP+S9pzl3tMHN1BcUWXvwsKLDk0iQ8is6cBt6AxUkynT+Apx6EH/4ofO8cnD4CbKPV6bQDuJ52DBeBHY7c+v+QXKNd9jl2q4nUNcp3bjF7ZY1awSK1xWs3ee1oQLFlybcNiS/42Ksd8h1LLSe5/0ZE6AtqecntCQ9fw/tebfPCUoZKXlFuGvKRoRhZnrwc8tBySOi7ZPuRZ5qcuR0x2jR89WTA5Vmfti+YbRo2RhV/cjbH7z5YIPIE3/3MLks3a8ytd8gkGmEta8dOkm132CkpEk+gtOVHvtJkcs9ghODmTJaXTmc4sRLx0c9t8UOfrTNe1WRi+MhzLR776hqtnOTY1QqtsVGSQpFArzC+9hIzq3+FH14BBF6zwbW5gA9/ZpO5669wbLXD9F7CR77UdO1MW1bLiunrVzh5u8PSlU2WLr6ObDf43s/8HbnOBrES3HexzvPHspxciZlc2SJXazFW1/z+2Sx/fCbLD7zcYn434bHLIT/2pRbFjmG3rJisaU7eifESy8nbCaWO27zhoWsdvjbh9+gEudgyVtecWIt55GqH2wXF2RsRSMffbWUFZ1Yizt6KWNxN+PCrHWZ2DedWQm5O+qyXHTJzvOYGrF8+4fHKkUwPb4eApY2Y69M+83XNaMtwc8onF1liJUiUQ8y1AkGpbVjcSfjQtQ6jLcPkHcmlCY/domSnqBhvanYyDm/3+XNZ9gqKF49mmN3RPPlCSCIFsRAcW3O4QRtYyi2oK9idTvjDM1m+8l2C1+Yz/B9nsjQzktBz57MCVsYdo/bidMBYw/D0tE++Y5naM0xWDZslyebKCJ6FN+d9rs/6jNc0Y3WDh4yMYgAAIABJREFU0panF5xf10cUWkI+tnxmMctMXWOAYmi4Pu6xUZZg3Y+c9RGPPz2aIfIFQQKPXgv5sa+3eWkuIJZu0FlqG86sRNyY9PjTM1leuR+2yopaQTG/myCN5fa0pNQyvHi6wI1ZjzeO+LxxLEMjcIPas69t88SrHd7/tKbUdli5Wk4yVtdUSoq5O5ZICb64GDDe0MzUNRtlhTTgLWe4Oa1AQS6yXClJim3LV475BNrSyLg6tMby2I2IRy9HLG7EVPKKG6kv/MQyvZPw0a81+X8eKfBHD+a5PBNwbdLntVOSx1YjQl8yfUfx6YUMsXI/DrKRASmIhGCrrEBAIbTM7MaMVQ2XZnw6nmBpPebknQi/WWV6J+bx6x1+4cNFim3L5XkfkWg001gFNxaCNB+28eKnaU6c5djlXUbrMUpXCAs5hDaUmppMY4eg/SyZjWVmV38LuJo+ANlB6a/h0GiX04HWKyh+F/hc2gdcxHIMOXoFjk/BiUVU8Bco/jZ9oltExprazAxz1y8TB0/iEKc305z7HJJPAwEJHwZ8ytVXaJfHiIOzwJfh6CwErwPPIY5+HZ44D9O7wBjM7blB+oNFePwByKwCLyLNHoI2V04+iW8CZNxh+cwIn/hak0z9NkcuXeM/fk+Zy0sFbk36LK05VvHx1YTXjvgsrsVMrlaZWNmk1HLoTJkq4s5dj5jdSdjWCizEUuBpSyOjGKtrCh3DU3ccZnGnKLk97vPdFzqcvh0zUTXM72g+/tkWp1YitoseewXJ6dsxI9t1KrNHsCpga+GDRKVTyLjJzOVl8pXnUfo5gs4ahglgBXgpfVh0B8GXKO5e4o/f+wSSvwVeTB9cXXL99Ow40MIzv4ngq8AUgpcQXMKxur+MyYJAszd7nOp4ibmdhDMrMaNNw+xuQqltGa9qRqsJWkExspSbhnZG8NtLmXfPef7VX/01fuZTn/qOGjx//OMf5y/+/M8Hpphk+oReWodQwzrUkACE2Z9uVf3TDn1UG5WeQx6ccBGH4KDk3ai4ftTQQZTQweUY3c+6U3y93RCFww6p9N770XNGglXda0qHJxKD01YDKK+M49QYnx6iRrGPC+o+AepN0R2c5BAi5Sn2AX4EZLrTNF56fWtR1uJZx8nuli1jbY/C1r1/5OAObFalCME+RJ9kvy4QYL27p17crmMm9bNw51D07TII/difQUTRIdNC5uD0msU97+yHJh12nn2WlcMCCZCuHqbt4DdcDNme7/YxWOLwSLE2dRjpIN8O7Py1v5PVYWX09t/rIssQoAaxYvJAsbtxLvpWKPRjiKwUPXzZXUuM+sLRSHrxKQ+JL89C0PWFL7BS9CGv9qdk+2vSqr57og/bFQisL3rYLKvABu4Ji0Mw9u9EJdMMIHsoL2lBBwKLdDEqwHiir9bdcV1cXBe21wVtiXRXTtm/e5rYbyt+6tfuznYcwEIaMbhMq1sP3sFp8z6smWJ/Z9EBVGUaLl3fWyEGcFK9HGj7I7d7vBhYliCsfcuJ1N6ueHI/Rg4SGrstqBtnRu6fwci7p0+lFQQ2raFBEuAB7JrtoUe75+6PQZEGnLSDuD0A396dm/0Dy4PkIcsuRF8Y2TRP340Ldbk76MfY2btxbrLPXwNYNk9glXCxfojvu+1T2TR3H0BuHYaAlAyiEkU3TuRgfPTHmPEEWWvxU/TbwTZ8166f4l5LgcTgfYn9yrQpR7XcXx/2XpP2aa99sM8bCOIu+C7DPixTsM9Q6M8c2b7o7Mv3tptj/fRfXxEtB87bn3M9+ndC7EaX6L7uHatSx9u7Bw62L+f3RYawZiAGXWxILNJl+XTX134kperrj6wSvbZhD/XbYBDaA/jEbu4wfW3Mpv2yEQdySx/STliDsOlOjrLfT/JAvdJXT25jI+d71Z/p+24801enqu9zxT6o0u6P3Q7kqx6/98DSQy+N83dlP/ez/wPPPPMMr7722nfEwPknPvlJPvDhD/PIww8ztKENbWhDG9rQhja0oX0z5n0jB33us5/lI9/7vdy4dg2lvn2Zb1/+6ld57pmvcfvWzWFND21oQxva0IY2tKEN7Zu2b4jZdurkST75iU/wP/78L3zbFkxrzT/+8R/n2tUrw1oe2tCGNrShDW1oQxvaf7nBM8D/+q//NZ/567/kjQsXvi0L9i/+5b/kU5/6FL7vD2t5aEMb2tCGNrShDW1o3xLzvpmDX3z+eY6fPk1le9thTb5N7ObNm/zO7/wOu9vbh35uBMSpejIrQSXQDgRaOCRRpuMhBNRzEt9AoiD2BYW2pZ114qGRlsZLnHqi2DHkI6ckDj1J5GlMV1CUXq+Zl6kIQzhFuC+JPKdIb2QFI21LYCyJFIS+Q9a1MoJ2VpGNLInv2Izl0DgBSSQJYg2CVHXv8DFWCDKxQSsPFcdkQx9BgpVZ1suKkY4h9gBjyEYxnaxHoVNB2Bqg0Uria7eAPh9apLW0A0kuMgiT4HdiUII4W0QkHYJmBWgi5B5CxwggaDfJN9qgY7KJRSYdTKzJxGWCdpNyM2EkX8Qzgpk9TegLipGl40nGmoaRlkbEFi+yZHWInyi8TgOZaKZriiDuUOxYpDbkmlVm6obxpiFoNfBabYxRzHVCjOehkoRcO6Kd8xHWYqVlrB5jpUSQAA3apSly9RaOrGFxyBuFU12X+n5japyyywMse9NjjG3upO/r9Jig7zwRlLIQWogMlozzlYloZRXZJEKkkuaj1QgVtSl2ciTSIc8KLUOhk5CJEzzdwQoPrXJ42mBTaZq7ZgImk/4cFrBQcmISkYrpqiFOrNYEcun9ZXHq5CJOVFFDJZYkIzGZIiK0YBI8DbHnrpRPEYphRmKEpZ0R+Nr2BDLKgGecrCJRoKVgrGUQ1hIGoIWjE3TbhTLuHF2hRtsXjDUNxdBgpWtX0jiMlZeWTXaaICxWW0ROgI3JRhGRJym02vixABsQhB2U7aCiCsXWGON1Dz8KkVENJX3axQzNmmBkr45MKozWNlFxDWEn8MI6lhpOiKKBBvlOC6E7jNT3UPEugoh8GOOHGtnpYMkho4ixRgtBk6C1i9IOdyVx76mkQa7VRHuSqWZCOyOxQuFFDYqtLOO1HB0L2chSbht8bfGThPWxLFHGhVSc4v0qZUW5YwgSSzaCUktTSP0WxI7vSiDIxFCuRvhhi3zbw48V9cAniA0GQaCdyKsTOAxdPSuYxCHtdEruyKVIPGkgzEIiBVEGkrymWZMEiaaVkQTakRsS35AoQSJcHHQCQb4DYZCKmpSl7Us6nqMXldoGLQVzuzFB7ARPiYTYd9eU1hJ7Ll9qXN5GgE1cXMZGooylrJ2qRxrbw9S1i5YggUzicpqf0gKUtuQji59YROCoItnY4hmLry2djKDcMYzVNLnYEHuSMJCESpBPDJ52uLuOL8gECfWioKFde8gmhpKVdDKCRlYSKYG0hommIQwEccYhSpW1xAFkfY1IhPt+TiJxpKaJ2r7v86Gh1DYoTzopVeJygJdY8qFxfuwYQk/Q8R1SVVjwNChh0MLS8aGZFSReissLLRjnl9m2QRjLVF1jEvBS3dloy+DHlnZOoKUTVxUi8BNolADrCEbawGTLMNIx7OUVEZJWRmCyGr8hONZxeUAApY5lN3Z1HnmW/K7zeeS7ayhrHbK10yGIFBaNalcJQofELHUSmr6P9Xwa43ny7Q75sIDxA1ScSg8tJJ7Cr+8haKDCGsQCv7mFineBBkJuge7m6naa52MggxVFhI0gL6EVsS9SUxCEUM5A4Lt865S9OJKRQbBHUN8BIoSt9PUPufRafpqPG71rqqTthHS5LOQzYAJQgbtGJoBsARY1FAswUYRsFpTC6jkEt9P7b1Ns7CFMAXRCEFn8MHb3gc9spUUuNJQ7BqwbN3jaMtFw/0e+JBM6xGShYym3DNWCo+5kYs3DtQRlHApTasjGhlzk0LcWl3dGmzFTDdeWlHbjpWxk8aIEiU8xNJQ7GmEsIm6TaQqscPhEoetIalhbxNIBOgjaqXDPQBC4vhQfJ7bcY7aynvq1/1lwGRpR33sCR87qWst9ktQBSewJgiQmG/m0fdePtbISkbaB2HP5sBhq9ooSKwTzoUX98i//8i9/o4PUXC7Hrdu3ee211/jAU099WwyckyThPe99L3/1l3/J7OzsXZ+vr6+z9VyJC9MBZzdjzq3HaCV4dc7naEXzb8/nOH7FMYN/76EcT95JyMWGGzMB77sZ8fJ9Pmdvx8xVDMfXNGevaE5sJFgl+av7A2aaroktT3isjXgsVDQZY/nygxmm9iwTDc3TZ3LM1DXLUx63Jz1eWsrw/lsRk01BLja0coqlzZivnsnz7IkMYy3Lpftg6Y7lA6+1UcZyfSTLE5c7eBrGG5pM4hiEI7WQkVrMy/cVOf/qCiVdIr/zJro4yS9+6iQff6XFjYWAU1c2Gal1uHRmghMX/gAvuYigzMbCWZQFrQSljutcXjiW5eHbIUvXNxm/8wa18hLV0SNMbXyFUuUCkmdR9g2gTGv8EaauPs/crYuI0GdjcZrzL38dL1rDFOY59sbXWLp1h8XaKJMNyZMvh/y/j+b40M2IlxYy/ORXGjxysUbSUWRagjMXr5KzWUaWn6VUfYmHLoSUtmpMbmj8JGbx+t/w+Gu7nL68w8j262QbL4OZ4uRmg87kDIXdHWauX+Wlh5cYa7gByfkvrZOUCmT3nkfyWS4+/ivMrPw6jntscYiiMmCw/FCKt1FpghqDh0qwss4f/uBv8vAbnwZmgGtuQDo+Ae1r7vuFNjx8FGQIe3tofgTF5xFmlGff9wgnLr4G1lKZnuWnnt4lv36JibjE2mSWZ09nObmuue/iJiPVBuW16xhvgtZ4kVzzebT3OErfQFBNB+RHEdyA42PwofeANrAwD7sV+JPPQ2JSksA4DrP3AFDD8MNAFsEqkkfYmZ5FZebIVG4hdAzFBa7PByys1RCZHNnIcGfOp1mQvHYkYKamuTnjs7CTMNLUlFqWlUmP2T3HET27EnNz2md3zCMRgvvWEhLlBk2hL9gqezx002Hu/tMjeX7s5Tan12NCX3BkVxMGEusLJtqW99wOmbn2efzObUS7hZQl/NZNirWAWyemeeDrFwgSxa35Mu/98itMbX4GP7zAzKrl1MY0I3euk20+jdds8/n/5j62jM97nvkcQfwZFm99Aa9TISqdp7j150j+BkEeQR3Bq4xtn8BrrXL81tcJ2p9FAOXGPGOrN8g1LnP53Fnuf+Eyp69ukNFfIF97GS9+HkEHpZsIniHT0kysJTSmF3n0SsIXzuc5eaPF+PbTzN7Z4cnVKf7RmxGze5qRjmFx0zK1tsM/+4k5Nk5LTt9KaOYUrUDwH54s8VNfqpMLLUsbCQ9vJ5xYjZmuGGb2EqYqCQjBsdWIjz6zRmnzEtN3tskkOa4dK3H2So1CCHsjPkEMt2Z8KkXJHz6Y5eGNhFOrDm3nWWgFHsIKRtuaOycM+V3J7WPQPNfhdpxnJLL83ekMj6xEnF6N+fJ3CQoVxbVxj/m64ZWFgFJsuTwb4BnL1pQFrbg87bBVP/xai3pR8t/9RY1Cx/DqiSxzlYTlSZ98ZDEpHm9rVPHiTJb33Q5ZGfEIY8+h3xScWtbUCooTu5r5SoKw8H8/VWBzMeSxiwaJYKKhuTrtc2Q35pFbMVZAtaCoPtDh/teg2LE8cCtiek9ze8bnoVsRH7oSMtZw6NEXF7OslhTntzVLWzHnb0d8/nSe2dM7PDOeJ5y1nL5qyMaWZkFRig3PnsyhsCxtJZzZTPjao4oXz0iOrwqObCd88T0B03NVWlt5akXBWskn8eBjL7d5z9WEpfWY0aZlac0xxUPPPVh54Fodz0qysWJ6N6JW9Mkay6XZDDltqRYknobpasLXziuObMMbS4pbBZ+jNeef45sJmRiuTfl84rkmo03D6Q3NfZsxCMFuQfGx11uUW5avP+Fz7LahmZdkLMztJbz+pOWJr1QJOpY/frjI//SlBke3Y5anPF4cy9DJCfwTLcZuezy8HXN0R9PMSR6+HtLJSjaKimsLkh94vo1nBRujivG6YXXK45EbHe5/4Q2KUQZr9yhtf5l8dZQfe9VncbnG6myOo7sBnzs/xwe+9Aad8Wmm9jbIND8PHCEpfIjGWJbZa59G8QJBI0YkAcXKv8ePvgY8j7JPp7neQ7CH4/O3gFm0/x6ksXAugI3d9GFD0Q3MF5rw2Ek3yLUWNhrQzAC3EayguEVhbx1BDWU+h0PVRRg+guAlyM1AEiFp4xjQO3jtRTwD4uw2PHIanrew5MHZBTh9AhZG4fyS+8yTcP4MAOHLH8CLv+Ae1HCL6Y061i7iNe+Qs3PMrqwSdC6g6PDoKzDVzjNTcbjauV1NoC2PXo0oNmPWJwNGdxu8/NA073s9ZLya8JnHCzx2ocH0TotzVcluWTHaMCzsJlgJD9yKyHcsL57I8r2vNDl/YYuH32wy2VRkYsuFUxmC0DK3vMbOVJmphuWJN0Kk1hQ3nqNUeQOdXcJmJyiv/xHKfgXMPB6vA19Lc/CbiOweHJuBnXqvD5O8xENXvoLUJ4Fb6YOrLPAo1F5NX4u+B1+t9O8VIEAlTWCJa/efZeniGuWkyM6IohBaLp7MpCwUR8QZr7qHlr/xwTJvzAd8362Ib3qf6n/zm7/Jr/6rf4W19tti8PzJf/JP+L7v+/63pWv0NvYy+8ASIQQdIbApw0iKfayLQxKlT3RSxJ3buap7dPfE/VQe0fu/H83VRd910Wka4X6Vp0ghK3rbM92FLep+B9tFsA1igdwFuowkiyMUOai+HaDd2N4TGtG/D5HYxxTJvt2ARIrGcf7q3pfbQGNgDy0LwlhkbHs7e3WvJ3BPOmTcxdPYfVRfirfpoo66pDWRnkCkO+bJ0BVYKNy5tEWEFhHZFNOTPiVT7ooC9x0r+srndV+6cxrRj7eBu/fuOsgXTHGA4iCurlvJ6U9Waw4wlEQvKkzvbPv3K2zfN2QfRifdWKH/ju5CTR28zKFv9jOCDiL69ivaCtl3lNiPtwNnsYfcwwCyLEVb9dxm3ef2QECLvjYpe/G8j38TVvS1s/0928TANQXCDO6uJa1FYBDdHw52/3grLCZFa4FBajtYbwfiQCRpvMdmwKNCDzY7KWzffZqB+8VahLZ3+U1gEemT0S7mq7u7qbAQ9vhO+xc2Yt8HvTxAD6rX8zdCIP30CWOyj7qUvbKn9y76dlNMK9mmWMf+KBGyP4cdQGh141eILt2ph9McpLmJNM+JHurN9souBnluA392W4fop1wdsjNsipuTYiB/iMNaderALm6vH8UmrMO89RChYh/RJ4QYxLl1EaX9G0b2odZEev7u9wbajdwvqBH7kd07v+2LjYGyu/Ylrd1vm2mc2ENyUy8S0zL0AGdiP35UX5npi0XTnzLS6/XjIhPrBhn9OMv+eunvS/bRfiLN/eKu7wsr9tuWcHlcCFCecG0sjT0rBMKYg71wr7yub3NPve2BHSz3N8Dqy9kHc6Y4mCfTmxd9zrur3zD7+LO+HGIH+o/+XqAPKyr7Al70XV/s9wm94/s/7/Yjui9nWZHeS5q7dF++2A/9/UwrxCFjjf3iKjvoigHaXC/OLUoOjnVEL0c7ZOS+J9OM0zvXgbFE+nf3O8iDvtaoSL5FX3evvwfrqxsXvSHCAVzkgK96Oct+c8s2uvaHf/THvO/JJ3n2mWf+iw6cv/bcc7z03HNcu3p1uCBnaEMb2tCGNrShDW1o33KT34qTfP/HP4ZUit/9vU//Fy3M93/841y6dGlYq0Mb2tCGNrShDW1oQ/t7Me9bdaK/+au/4uz9D/AD/+j7mZiY+AcvyL/4pV/iv//Zn8X7NuZOD21oQxva0IY2tKENbTh4BqBcLvMb//tv8D0f/givv/rKP2gh7qyu8m9+67eo7OwMa3RoQxva0IY2tKENbWh/bya/lSf7iR//caamJvmTP/uzf7ACJEnCUx/8IF/8whfeFS6vK4ITONSKMG7Rft7SlRgAEKce6moUusvZEyH25YDpZ93l7lqCVRYh7L4IztATpXRFG+lydZQZFDLIVEdg5L64UFiH/iI9RxejYqS7Ry3cPyMEFonUBoFJhZwWrCZJNBiTCuicmECapM8nYmBdfVdAI7VNBR8Wi8UIgbQW7Q0K7Wwq2hLd73kCqbXbtx6L0LonCuieG2sJtCuj0iCNAZM4kUBXOWj0wHWMlKk4UGPROM/vf8dKJ5aMjEjlBgaljfNPXzl7/k+6KCL6xBz7/4xM2K9d07sVmcTpdzQoD7wDwoSevqT7XkJP5GKdj7oiiJ5AwTpxi9QGabWrJ0OPOLCv2EoYFMik9ZziutAakgSS1LkEqS8GRTXaV73XRgoMDtlolYRU6CVNKuroE1RgQer9dtSNvaQvJIzoxrPtKUe6IkthbO981oKRglDvty8tXBvrXr/XbtL6T2Wr7nU3lqQTjSqd+q0rDlSyFzfd91RiUeag/wzC6J4KxkkKu/Xe7+9+YWGCJUHpGCF0+p5N43E/xsBilHLRZyxCx67tCdJrCHft1CGi63NjGEkMJKmIKhW3JLET/PbHjDDpP3BtzablMRabtlVjnUhad0VpZr+ejAAhjCuxEMSi5xVIxdLWiH3xjHbIQgtY7fJYt757kp+0GpJeHnXCMsOg4FkZwJhebjPCIfH6Y8kId+9dUaRKk2M3tw7IXC0I7XxkUqGfQRCZfTVct80Z2xXo7Uu84jT2u+XWfUIhsPua7FTg1xNIpmUV1qZ+67t3uuJX2xNcdvsEaSxCO3xjzzcmzbVmHwdpsL366uZ83dfHSOP6H9NXRqv3+wyXd23veLp+SusEk/rY9in80mNdm0zzdJ8OVmAoGucTk4p+dSoetH31J7rxlrZeI7sxY3ufJ1L0+sz95GDSMBfutTGpmNCkeTDtN/rFyjZB6EHhr0OTmoOjgPSV7nuWaNJ2bCAxONJS3/eFAGMhsRCbe4wwDHeLEDVQGHRe7960yzFJt2Myrt9L0v4v7kuOUrr8rvfLPyhMdLkqTrpCUdkTgXezUzfXuri2A3HRFV7rNKi7cIBE7ot7bU/g71ziBO2uwi3W3ba1qTCYNI5xuYhuP7D/zw7Uh+7lz94dW9K6YLCP7fnZHlqvLiv0C2a7VzJpX6hROsHEPRBt7xQmFWbH1vWJAoi07QnavylU3WH2iU98gg+8//380i/90j/I4Pnn/uef54H7z/EzP/3T7+j76+vrbD9b4vaIx7nViPGmpdwxTLcNi9sJRU/w6oJHNhFcK3lcmPZ5aDPmyF7C0R3N353Kcd96zBfO53lgOcJKGGkZjBRcnw6QElYeSOgsRGTHO1QbOU5vRBzZM6yP/v/svXewZcl52Pfr7pNufjnNmzdxJ+5s5CIuQBBgXFAwIIpBtGzZLpcsUgwS/6BNl10FUmWZrJJEw2ZJImW5QItmyS6QYAJJcYlAgCDCcrGLzWF20pvw8rvvxpO623/0ue/d92Y2ICzAsu9X9Wrm3ntOn3O6v/66z9f9/T7FqZsZL84HlDBcPiU5dk0z3zb0Akm7JBnvG85eT2lHit95V4BNFLNdF1V65e0J9z5umehYyrkhTOFzd5f4rXfUCKTg1ozHkfWcxA85vNJk7NZXac8sUNt4Di+5zndf6jN+7SWm+iH9msfYzc8xvXETL7sCHKJff5C02sAoHDNWQZjDB57ocXg1pbp+Az9f5tL5CyxdusHKibsZW/sksArU6Nd+nPLmZXy9jGSd6+ffw31P/RVh/2mkXqe+tYWnryNQCDVLlEvKnRY1v8TytMePf7HD0WefprT1PKGsszNXZXyjSbBzGc9eAxSWCs+86wJLL3yeqP8MkidQNFG0EdxwKxFn3kPt5ho//5OneOfL2zS2nmRyM+Bj33cY3wrG4wCVWlT+JF7+10zd+muk3YTKJEzXod0DDgPHERxmbfERqq0/LoxrGxplWG1z+tYlvHgHWIHTR2FxCi5dLTqsgawH1odbbTApkhiA9th7UQYm128RT58nDSTb42Umbt7EzzSB3+Cey13mLr3A6uIC5TSgNTeDApQoE3aexNMvYZlG0HaTBVLgMvRiWJiATzwJV5bhiWdh5SSG92GpoKMLqPyJ4vmmeeahv8fcjaexTHL17Du5NhdyadFnMp1EhBN0aiFHrm/hd5tk1XEsgigxzG5pztzK2KwpMl/w7LGAfiR4ZS5gcSunFluuz3iMdQ0TBWJLWsHylMdURzPZMUTa0i5JFtdzXjoZct9K6ugSoeTJt0o+ebTKsc2MC9dT1sYU8y3D4isvsT7/EKWeQSZ9enNn8VvrlEWV8tZXCHvXWVzLkKKKl72AYJnNpe9BmAoXL8wxf/WLgGJxdY65VkBj9SKSVeAGgi38boiylxE8h6CJYAVoI61HHpxBso6wjwPTyMTH5zEkz7O4skXUv440m4UePgHMYJkDmSDsM6we/3vUty8j7TjltS9x1601fOoE/c+SeaeIdlbYXjpCqbnD1GaL7niJsatP832vwMMXFdbC7I4mSi0/8mSfuSs36FerzNy4RR7WqDVbVDs9bi6VWXr6c1T7EG5cxeuts3b2ArX1l7ly6hyLGzkXj1WY2ehTSwRPnohoVhWXpjyOHG0Rb5fIAvj376py7kbOn5+OePcrMRtlj3Xl89DVGKElSSfk0iLMbcIPP9njgUsJcSDZCQJakWRlXBBoiVbw2CnFRAvO30pp9ODKhMdU13B4J+f8csqxtZylFy5SSgy9RoWbkx6fOFditqvZKUs26h6tsmK+a5jsaOJQcbyZc6KpqbUFnoadsmS2o1nczPG15eiO5sJly5XxwOERq4LvupiwMeZx5mbKS4fdff7JbIV3XE45upZTiw2dkuJXvqfO2y/HNHqWS4sBXzkWcbOhOL2WY+qa6TUIteGzR0qUJxM8LGmmWFq2/IcHI95g9bLOAAAgAElEQVR2NadZcUzYjaqiH0kObWlWqz5HjrXhWkgltVw84jEz0+X4l0O+96mYY+s5L84F3L2ccuxmyl+fjbjnq1dpjzfwNTx3NGCj6vHAxZiXT9TYHPPolz2mWppG33KopfnTeyosHzece0XTjSR3rxqm2oZqBpGGM7cy+oFkedrn0Jbm2FbOhUspT94XMHvDcGw9I9Dw4pzPyZWMhW3N1YmA7ari8ZMBP/b5LuXE8nK9xDu/sk7Y2mIhaLC0lvPikRBhwfcExzZzVm3E8VXN88cUb30hZWkjZ2VCUY4tn7/g8/BzGfW+5cVDAdNtzRfuLrNRkXz3Y9tUNpfBCoLeKso2Wb/rYSqba4Qbz1D2Fxi/8ix3rT+BimGiDeXNZ5D2cWAGv2MJ2y2UuY7gJQz34dnHENzA8gCC60AXw3+FYJ5+/SeQyRySp4AO0lwBurCxAvwtHJnjGlCF+w/By5vw9DW4ugHNRRyq1ABLWGYRfAnYAhZwaDSL5AaaH0emn92d2DkU6jUkNxD2Gej24MQUvPAkbO3AzTpEO/CnT8GFJVhdh0YVPvsE3NzAW3kKcocrLZgYeIlHzjz/7h+f5P4ne1g5S1o7geq3uHh2jmos+aX/YpIj6xnP3xVS62jGt1tUY4uXxpSpU+llPPqOGuMdw/FbKUGS8qfvmmR6K2N5wmc8sbyyGPCOZ/q0S5JWQxLllvmb2ygNn3zXFM26YqOmKGXgqZD51Ral3CfxBU+fL3H0pWVAkU7cTe4JKs2/AjSSZ4GLuDwE28CGe3PbamG5D8FFHOs5Lp57BziDw/UNXqO7wDyGDyCQGN6G8R8kD9+Hl8cY+UG0vQfFFaY2rnDx3nt48myDiaZGWtiqStoVSbOi+I/vCJjowHhHc6xvqOSWpOCtf1OlFEV8+J/+Uz74Qz/0pk+cH3v8cT7154/ykV/91a/pvIHn0Xk6wcsdlN0z4GeWWLmEAQJY9Zw3zdMWL7fowkPX9cWu91da97syzoPW80H7FqUMmXKYuCAZeGwH3glBUsDqw8zuwrcsAi9zbz19v3j7kQKVCfqhxUr3Bhem7g2o7wk2Q0nPE8S+xEiHvgu62k2oxMB31Kd2rY/KY4JOjpYCSY+g1ys6vYf1/F3vgCq8GNJApW0IEoM0znOspUBlOZkXDb0hCqwfIrO08NJpMuURbXcRpAgy/LRXTPKcN0HlziNcSgy5FJQ7Bi9OUMRIzK4rSZik8Bo4V0ni+UibIG0HBz/vFx0pAQy58sBYVkqDRDUZQT+j7Tu+sJXSYXykqxtlrrkLhRICOfTG6mD2uVceepvN3Bs/HtH2+l5HDT0IVGFk5Z73MdVD3ol24QH1UJ2i3qSHsBatCu+XNigN5Z7B6yZoKUFItHJAfmFVcR/twiOihrzaFmxxf+t9aLVhu4PjmEaujUU41BM8Ej8q3sZ9MuWRS0HPExgVYJWHFYIgcSsWqvBcSe36TNR36C1lnK7GnqDnu4mMLbxIAH5qUYX3K1XOc6m0+8M6D0eqBKXYFvUjiH3YLpJqhEnhGSvK0yrAConQGusFYA1ebhHkSBMTbSWFh955jAce37TINirQlLY1QWIGgKei/hKk6RdeIF3oVQLkCHqgBj7PvOitaaF3PUqtljtexAwzRZ3Xx62MaD9AkCOMRtIl2uoVno4chETm/YJgZfCSvMBEpVRbGdWm89J5RQKL6o7Gy3LXX3Nd9FWDzLVLZqH7eEmGzBMEmdMfLLlSBJklVRJhLEEGmRJF4ihBIDWZFCRKcD10S1+xFHjaeQeNce2rchCJJPfc6lelY4hiZ5P8dI9rNvA4twLngfI0+Il19qdI4iGNJUgtKstQuSbIDLm0dHyJLjyRiSfIJPi50wVlHKs2yFzb26In2CGPdhRbwq5LiGWE8yZHid31zBY5VuhZN8n1tfM+WwltX2KK1cVcOjurhUsCZKXdHUdysWs9d1cr277AM5ZcukRGuXKJS4R19RYItwrm6sCihCXMoNS1u/eHdc+aKIGX5s6ja5zHThd1mipB4glyJZHaJamIYkPXlyR+YWaloNSzBNrip053Bp76QV8MU4unXXIMYSBI2a1TgXtmlbk26PqCUt8lS0IXiZqMptR3STFyKZyeapdMw2QSaVwbSOPuUUuB0u67IHXjYqrcmNwJhCtDm91Vx8GKjFF+sXoZ46cWkadEOx3AEPQzhNkbIyRdZJbsW/FzSUlyHGRs4GOMAB8TVrBEQ8d3nC23SXGM2lt98hR0c2jH0O8N2Y/BxE0W58dD9hkcVz8cOlbjElSJ4viW8zIbCyJ1198xECew3nPu3jx3YPNWF3oJyJ3i/IEHOkEQYxE0I5eAxHgh1nNM6kHCq9WSJJOC2BusFrrVOBAEqfPJ9n3hbL5wvyeeQGmnf0Y6vXaJ2cDXFHMdZwO6viTxZLEiCVpJvFTjaXdu4ssCtScdp3XXizyoi0Hb9dyk2BZj227iqkH7ZYWd9rgtmRmyaDsPQYiVEcYruWNFqUhalhN0d0iVpOcPra4CmXC6uB24upIGSn1LkLnVK/lmTGr/8U//NF95/HG++Nhjb+rk+f2PPMInH310tPlmJCMZyUhGMpKRjGQk3xKRb1bBn//c5/g7P/RD9OP4TSn/53/hF/jP/v7fZ2lpadSKIxnJSEYykpGMZCQj+ZaI92YVfPjwYf7bn/95/ut/+A/5vz760W9q2c1mk1//jd9ge3191IIjGclIRjKSkYxkJCP5lol8Mwv/6Z/6Kb7y2GM8/cwz37Qysyzj3gce4DOf/CRSfr23b4fSRu6naFixF4W8Gws7FMi5F9s6IF8MPu8vww7lpxT7iBSu/OFMlwfjRHfLGP5y9/92/8G4vXv7riluO+R22ZfWczhadZDG+wCZYej43bS7wH7ShTjwFAc/7xErhC6IIQfTNQ9qd5BO2e4/fzdV574nvEPErdjbNbXvFhhq233lHIzC3mvnvUjgHO54fXug3ofSjIqB1hyMCBZ31Euw+1I+77s1e5uWDD3/q7X2XqTx7c8n9tEgEEPpmfcryz7SxiAae7jv3Kmad48Z+v9uLQxIDuL2tri9be6QPNceuJLdO8pye8pcYfaTL/bubbD30NypF+7/fFv9DyLDb7vandtD3J4m1r4BW8UBm3ObLTiYDlm8mo7ZXYKEFbeXajlgl+6QedgMd6Ah+2j3qxsGSAubZAY26lWeezfz+BDhQRy0vXb/8cO/CfvqvelOn+0duteANiAKMsxAGwZxLQNC0v6Th1pb7NVfMqTThqEUx0PEGGH39+dX0wO7l0P8jrozKGe4vIFFFtbeZhqGxxZzpzHGHrTaYv/Y9bq6bvf0oaDw3G4z2VcPw5nndaErw237ej1FDDfY/iTwdzjaHLDRHBirzGtYs1ez29zBvg/X2MGe9TrliVcZV8RQXux9pw7GgGz3XgZVKLQjYIjU7pur5Pv0q6grO9TiZqiPHaAnmTv0XTtsk4ZIKeJgO9r9/8eCyHiNOn8VW3zHOrW4oEGG/r2TVTC3a9Cdu3ZBCjn4tT1A93mT5BN/+Ic88sgj37wJ+T/5J3zoQx/i/vvu+7rL6NQkM7FmfVKxMaZYPwIzHRfFPtsxvG01I6kY6tpyuq/pRsoN/AIeXksxAqZ7mu2Gi8hMfMF433C8pREWDq8Kgo0AgK1x2KkqpIVyYqjHhvfdTCinFmMEazMKaS0SKOWGya5mtp0w2TV4icQqt1F/Y0ywnYfs1BQ7DRfocfWYj5aCd6+mHNnJObWeUu73qcQdwiwGLKWOCxgDi2IdSYyyhurWBhYfQxXLKXImUXGP6uYlptZeZnL9JRY3bjHfylB5ikx7JI0ZNFPMbt5AxH3qnS6Wu4G3AzN4rTXAkPrnsUwzvnmlULTDWOr0K6ewTNOZOsfF+8d5+Z4KWanMWE8T5Q4NJrRGU8MEJVZmPDaPTLB14hiaRV5++C3oYJbZ7XYR9JEDVSwnyLkfy0lgifrOBijFP7i8RZj5gMKUqsSRxADtisRLW/jdLlAHpoD/FJo9WLbF5wawCVxmfOslYA54CJiGlZ7rRl4Op2ZxEdUUgYQR8N3AvPt3IwS+BzgPHMUyRl6vMBGvOEOVxS7wMxKsnjtEUhvnyoMe6BzoI7DcOhrSLUty36M9FmFZxLKAxQcaGB7EBVbUwB93ASfSQCfBBVhsILiOpInMNopnMUCLQ+sXSUrn0cwx1u6w2EwY6xmyALpVn2pnE9nZIq2PkfqwOaG4dI+PMIJeJHjxbEA1NigjmO1oFtqaWmzIPEEpM1y8K2S7odAS2jXFmc2Mya5hqqNZ3Mm5sJ4x1ctZ2Mnph4LEFzR6hvFtmO0bgtyyOe1xajMnkxZDDYSkPznN8oMLLkBVRSSRRxqeAiSSTdAZljoQEbXa3DwWMre1hWWBfu1uhDaszHr0J06ydeh7gbPAOCBIS2eASVwwTx1HzThDZ2oWRABUAYVkh1ceegQXGNQGKkizjosQt0CIZAOpm4Bm/MZFkuAUXnsVi4+gS9B8BegVk9UytVaTPPTxuttUWptY6og8we83mWnFtOqSOBBobTFhhcbOFiLuYTzw0xhpDJPNHcBDdTcBD4NPmBoMk9TjlCRUzO3kxJWAXqS4Nuez2fBYamnGbgT0A8H6lGKuZ0kDmE0MSSAoZ4a3rKckgUArxwqYbVqaFUWQu4HyxpJHOTXMdzR/azlltqM5vZXz3hsZR3fcy2ekLXMdzVZFMt7VrN4lqPRz8to4QufcOBTQ9yUTYUocCGbbmvWGZKxviD3BZk1RTy1bDYlWks2aYmVC0apA7kk2JxRxIAi0pRYbprqGuY5mccdhCINi5mCkYKuuWOpbbk37rE0rXj4d0i0LzrU1/Uhy8a6AnZpipmdY6GhaZcnkjkViuTHn45Ut7dyn0ws5vA6N2PDIzYzj2znzrZwL6ylHmjmdkuSV4yHHdnLGrvls1T20hLXIZ2OnxnQ7d4QjoJG4CUyvJDnSTFG9LUr9Hr2ypBtJtiuKPFDUEsPWmGRrXNKsKTp1QZRZHlpNefCmplOWLJ9ybzBJ4HB6cx3NbCvnaDPnnrWMqY4myCzdMkzsGPBgc8IFxoYWZlsxXm7Yqip8Y3loLSPzoZxaytbi9bpI3aMXSRdYGQrWJjyOb2f0SpL33ExIQ8H8Diwv+MSBm4mUU8NUB+JAcmPO1UUps7z7RsIj13aQYU53Yoatk2MIcnLmKTVTRBYDEeVWExsoIGH9xBLrpyeRdIspTRODIlfTRf8tF0SdGLjfOZz807hgvha9ynmCzWUUaWEHTNH3A+Duom/vFN/1YaUMzVYxWZ3EUTWOATPFdxrXO6bJgu/EkY1OAxKPW8AHi5lIDNxyYwqRO9doWNuCNAR+FGjCjS2wMVy7Bde24fJNuNaFqzsQ54U9fzvw/UCD6+fuIR2vcHjVsnxvlc2FOk//cImVc3NMtXOiNOGHr8RYAQs7Gk9AWiqxcriCjqqszCrW5z3mWprNmmR70mdnqsyFtZTJnubUttOZWs/QHJMkobPZ0kDcqNOaqhBlhs2G4tJdPpmCblnRbZTZqQpuLPpUeppcjdOvLfHku2qE7WZhV48XbZbQXPox4GQxhh0t5jHjRR3PAu8p6m3wSjcD/Aggicv/JZAhcPqiqaDSFYyYxHIBobtYGRbtlzKzs0OUuUDkKLesjzuK1HRHc2FDs1OXZL7A15ZbUz6LrfzNnzwfP36cH3j/+/k3v/7r33BZTz/zDJ/8s0f557/yK99QOVt1wcxOzuVJxZVxj0tLMLueU44tcxs597yY0q9ZqplloWPYKivnrZFw33MxWgmmmzm3Gh5bFTeYjbcMR1czsLD4kiRcDrHASxOC1aqLA6/1DY224a2P9aj0DHmieGHOTaylsZRjw3QzZ+56l8ltDR2FUJY4ELw07nE1i9ioKdZrCgw8fihAS7jvxZjDNzJOv5hQ3u5Q3WkS9XpIDJWVLVy0qUFxFUEfL7XU15exhFgaJOokuZhB9rcZW/0CM1cfZerapzjyxAscupqg0hgVt0kaM+T+LIvPPoM0LRorLRLvLBn3AtME+SWEyohnz6CZZerqlxDkZCxhmaA7e5qcabbmzvG541N89niNLKoxseWIG+79X2NkDR1WeGk64PrMNDcWTpB5S/z52QfJx+Y59OwWBr/oRBVyeYJ45h4yTpBylPErl7Ge4gd/8yalNHQvCVGDTsmRSJpVidfdRtnNosOO060+DGa7YElOFJPnZxF8nMrO/1F0zGJStbnlOmzYgqUp95sdMD8jNPcD02juAx2S8iBwHMsSME5flhlffdm90KQ9pLW0Q8HFQ0eIa1N8+UyI0DmSNkJYXpoL2al45IHPVr1EzmGycB7jK6BBVj1fTNiqcLzh7kVqWE8Kg/Msgs8juImXXy6exQDrLL74WXoz58mjecZvbbN0qcNEUxP7kp2KT+3adbz+LeLKJIkSLI97fO5UgDTQKkk+fleJRlcjjGB2XbO4ltHouMlzuW/54yMRt4rJ8/qY4tTlhOmtnNl1zeEbGWdfSJjZ1BxazmiXJP1AMrGjmVgWTLc1UWZ5ZdLj1NWExBMY2UAKyc74LF89egQvScAr04980unTWCQ+txA6A8aAMuXtTZ5fLDP33C1ylmjPnUclmotTis7UCa4ceQeWE4VhVrSjuwodiApDPknqnWZragEoATUEAsEanz71nUCreNGqIHgeuFQY9BKCiwiuAZpK649JJu7Csxex+Ci28c1fuIFZCKysUL+0Qhb6eOl1apdvost1ZNwjbK4zv9xhve7RDQTaggnrTFy5gUxbpJ7A63XwtGH2uXXAx7PXsXhYEVFqG3R9ikYzpleSHLqZ0isF7JQlfzkfcLWhWFrNGH8yoBVKLk8LZtuGOJBMdQ3dUFLNDBdejumGilRJpIW5Vcn1miJKnef2C4s+1b5hYSvj7V/qM7edc3I55f6nMo6uZEgDUQpzzZyLVclUS3PxqKSxnWLKU4g05fcWAnYiyUQU0wolc1uaZxseEy1NPxBcbyjqPc2X6x6ZhBfqiscnfFbq0PfgyoRPP5QE2jLWNky3NAubOQs3neMjLCb6uYSrYx5zbcNT0z6XpwL+7HiJnZLk+LrTx08fibhVkUzu5Cxs5qxWJdO3LL61PD4f0oxgPQ9Z7ZRYumRpdAxveTrhxNWUQxsZF15MOHIrZaMi+cxSxLFrGWOfD7jcUORS8HTk8+Jqg7l1Ta1rsALGehqEpVVVHLsSo+xNys02zbKkXZbcqEniQFFvG66P+dwY81ipKbbHFOW+4Z4X+9z9uGajrPiju5wPshs4P+z8Vs6hNc2RGxkXnouZ2XBOo52KZOammzwvT3iO4avh0PUYL9Vcryu83HLhWfdCU+0ZysbixTso22Qnkmgl6ISSl6d8ji+nNEuStzzeoxtKFlY1T8wH9IrJcyW2TG5BNxA8ORu4l+3E8ta/7PGej68hdcxWZYGb8xMIUvLyAtUbKbLfwVCmurqODT0EXZYXjnH50DSCrWLyfBmrBKY2gxVjRb98DuiQcxqwxOUTgI9gjeahcwTmCazYxnLMOWWiEg6Nerzo21eLyXQbni5B76abSFMDXgYWC6dLs6B6CIw3SXvyoWJyewSQCL5IxkMMiEmCp4vzAiBxk+dnLwEBsfd2Z0ueesVd92OPw6cvwx99BvprsLNcOAlmgPOkvAWo85XFu0mrdZae03z1RINr02P8328r8/LiIWZuJkTtPu/7kzZGwqHlDJVDXC5zca6MCeu8POVzaSpg7lbGtTGPG2M+62Ml7n4qZmZbc3w5JUoM9VbOSkPRCyXjLY0ylk69zvZYjVJiuT7m86cnfBJf0ip5NBtlNioeT88F1NuGvDZFKzrGvz1fJ9q8haVGzrGiThOeO/Q24FAxhs0VLySlwjExheECUGYPUTdDf+LdgGDt8HcDcdEWZQw1pH0GHTXI5BkETWxQImcKQZ/pl1eJYoOvLVFseWXcpx9JZjc0Jy5qVusesS/xc8tnpj2WVrI3b8/zsPzaRz7C1PQ0P/ojP8L4+PjXXc77H3mET33qU3jeN3bbogB/i6ElP1tsS7AFhmV4RWB4YcXIPXe+OLBCage/yb11CnVgcRwBxts7Rw4vewiXQMPK/ds2xG45B/bcDBK6DI6XQ9sudolr8sB6kNhbZ7xtcW5wolcoqsTKYulSiN0tFM5PLm5fXkIOJQWxQ3c9WPYeLJc7z40a1L280wqz+10O4O3W4hUJKKy6w5KZ3YP97bZl9dVX0PfqYO+ebl/eGdxY5VW2o9y5cDkEHty/fFh8b4d3TImhLRCDJBP7l/TU7ql2N0nDqy5z2TttI/GKP3FgsWmgWYV+Dunerm6L4hyxpyfK7m178IcvNNg6JPbXxWCZboCbs8K9iO77E0OaIgbL4GLovNsXi12inOHlfvOqS6HSOsyjq38ztGdtUKdmqFbMHZcKb9cRgbdPHwatpQ7o0OCzzx6OaXCfw4ilvaQ5zjgMtZXYQ6ft2y9RLOWKIWNlvcGeJbGvbXcPsfuaDM/ubdMY2DBp77zNYfC7YL8dHHzhDScPUezaIiv3mseyv72lHd4WNrxtQ+xvP7F3bwCR3atxz7rEDAc13A4vT8s7LJoX15ZF8plBXQySdnjs1ZcdrjvhrisBIZxFdHos3HMXiD2n386Yy8IBg7K7z6AsKGH3TM1QWwlbnF+0q7C378qRg1oaMk1WuroXQNnu3+pih7b4GblnewfXGq7f3TFNCGfTis+3LdUPjR67dSqdThq1lwRKWbt/vBzWmcEjeKBLYtdu7ibu2NVjhuxnofvGFjZXHNiNOry8L4t+Ym/buubsgbrDcv7Ahovb7PV+TbrDGHubPba7tljsG9nlbdsNB9u7hDHsoe+GbYV34B7MvvFGFuOlVRSr2u5lTxT6JwzYcG++Y4sEJ8OQVlnUtRza3moL5KOVe3MZsbv3Ruzbpjr42re3b+FT+6rJElrusMUTh6u9LZnM7Rsq929rsq+yZaeYj9ihbTp2aN7jiX1tIA/O64aKiwbJgL4Vk+cgCPijT3yCd7373V93Gf/jhz/MIx/4ACdPnhztVB/JSEYykpGMZCQjGcm3ReS36kLvfvhhFo8c4SO/9mtf87mbm5v8+r/+1/yv//JfjlpsJCMZyUhGMpKRjGQk/9+fPAP83sc+xq/88i+zurb2NZ137u67+fSnP00QBKMWG8lIRjKSkYxkJCMZyf8/Js9RFPEbv/Fv+eCHPvSGz/mpn/kZfvhHfozz5869+Tdov/Hv7NdxPfEGzn09yNmrnmAP7vE6uJdzGFPzWuXZA+UNF3EnpM/eXiXxBu7T3uFJ92MCb2PivIECxRtsjzsdJXh12NXQZ2vfUGlvXJFe+6qv/zzidRTSvqF7ue2T/fpq8Y6q8moPZV/zqztcawinaN/ovb3eDbwWEun16umNtOBrXfM1ynm1/vmal7J37sN37KV76mPfSDf7GrrkHcMN7GvYmK9R795wP/majPMbUMADRvl1VPoNFfNG9Nh+M56L1zAPfON9/Ws58U4Qt6+pfPFGSnz1m7kdbfm19N9vlkJ9IyK+plHn1UyOfb35jH39Nnw1Oz4MCn1jY9obrSfzRkee17C/r2F2X+PMb+nkGeAHH/kBAj/g9//gD1/32BdffIk/+IM/4H/7X7652zVqfUuQW4K8yEqfu+iKfijohS7Xe7VvUZ7GeNALBUkg2Zj0iDJDs+HQdd1IoKUgDiVJAN2ypFzJKGWWyZ6h2pQIA6kv2JpQ9MoCZSxGCFo15bawG8H6pKIfScZiTS0xID2khenEMNs1TPQM47HhUMfQnBIO6RW6iWGkXXlaCvolSa/h8tjvLFQx1Lj1HWOk9Wlak4cxYhwtanTHy2TlaQzjdGYn2To6QevwGAgPQ22X32xFUWbFJ66WaNUlzWNjRcSrz9ZchZXz4zSXpsnkcQwK7dXo1D02HpzB0CAtz9CeHWdnfhGlNVbVWZ9xkeaxL9maEGSBoJw6LFZrcYr+xBithsdErFk9Icg8QWe2TqOv2Z4OSIMSnUNzaHGYJDxPWptieyakOztOd2YMqyqk5QjRbyPyGBDEkWK8Z9iZlERJhsj6WGo4qsJ5VNLDRT3XcQEZPVwEtMURFuoF+myAfyuDV4LQhwUfmrkL9pI+sAZMI9jGhUfkGOYZBIsFmcu6uXF2Htnv0K0oNscUmQfdqmBhW9OcCgGfMEkZ7+ZU4wQ/6VLrrCFIEHkJodtAHxWvFfesoZtAO4a5OmgF81Wo1Ye6fYBD6/nFs7Tw4jYWhUj7SJ1Ti3OiJKVdlZiwRDw+iUSglQuCmm0XtAIFi22NwFJNDdXUEGWWbslh7DplSWkQgCUFx5uuwyW+oNlwxJhSkpIFgjgUrE4rosyQ+hBqy9FWTjk1LLRz6r2Y8V6OMDlB0iHqt5lvbaL6WwgyVhZ8ulWJoUYu5jBRmfWTh4EGwuaMd10w0PbRSeJAYsKQ6a4m9d0EfHvxKL363WgmUdoHzmF5iKT2FuAscaPOzpgE4WGZorl4BsM0R7dXcAgrh/5zhI/JQo8ywNKeuZsBUsnrbQE+eThFHs6S+2OARJiEpFEHkxMkCaDYOjnGysk68USNLChhfEUpMYz3cxSa7QmfZNzpq7QWXSqzdrgMSJqHZzBinP5UA+uHGGmwno/QObkSoFM8bQgzyz3bOSd3ciqpoZIaTu7kHN3RHOpq2lVJIzbkBUmhlFo8Y9Hjmq4vSD1Y6GpaNUmnKpnqOUBsmFnC3DLRMwjrbGgvlKzOql3dqGcWIwXTTYs0hiSUoBRnuhrd0FTakk5J4mnL2ZYu8G2CvGboRpKKdVipRuqCi0/vDJCXllBbwsyyNakQ1uJpSxxINsY8jHIOi2rq7lFaV0amIAkt7YqkHlu2Gop+3T177AsSX5JJQbvs8KSdsqCSgUwks7FhqqfxtNKjmaUAACAASURBVKVddvjRqZ4hyhzmr5JZ5noGpS25Ehxvayqp4e6dnKNtDRY2JxXaE0hj6Zck21OSNPIcXrQgqkx3NKeaGs9AP5J4GjLPYehUbulUJb2KQBiIa3CkCYF2GLjMF4S5JcgszQlFlBqiNCHKLBtTHnEgWJ3z6JQlvZJgruP6m9Ka2a5hsmfwjCX1BEFuaPSMCxSzmol+ztqMx2xHc2InpxYbzm7nlFLLRE9TKup6bcajmhiUKQI0DYwlDm/ZKUs8nSOzPkHSppw1qfYcBFikfWxBVTBBxYWtx20AlIF6T2MoORvIWYTpoPpbWFsr7F0Jh/7cRKDxsj6OqFSjsrUMbCLsDnF1ETgNWYYjMQVY7ipsZ634y/bsqDDgV4Hu0JTKw+Uy6BB2V4tz/MIGNBw2T5aK46JijIgxnAObFMQnH4919sJSZfFsUfH/2u51oIclwnhloMx8ZwOlcyb7OfM7Gj+HvO/RrCtyT9GtB7Qr7oWhlBo8A1JbJro5SSDYnhM0xyXVxHB2O6OcGnLP2XRlLDt1hZEOs9uaEaS+JMot5cSydkyxNemRhgJlLZM9S5gbpHV9t5wapLGUEoOMe0T5Ng9ubmGlAjIkraKtQibbzV0bCp3CpjaLOunhKCgP4LB1KdDH714HGpRbNxhgOi0CxRrQQ2XbYFMgQOQ9kvqkq1Pr0+hbdmqODNUKBZXEoAxoKZjt5oSZoV2VHOs6m6A+/OEPf/hbPYF+5JFHeP/738/P/dzPIcSrvx2+/R3v4Hc+9jEW5ue/addeWVlBfqICGfR818C3gpDz11MuTQZcn/IJtSXowfJRF+26LRXTPcMXT4S85+k+/+GtNRaamufn3ADcDxzC5C8uBCwe3uHIE5LDGzl2K+S5BY/Z2PDVRZ9mJHjfV2O6ZcnvPlSGXOGngt85HhFY+K7nY+66nhFpjzgU3JjxObGSc/p6Si+SHF6zfPUuwfyGJfUFaw3FzI4h9gXlzNIqSUxQYqLr84V7pzh2MeU3fvI8py7XuTh1lJkNgVbjvHj3CcrMoHKfF8+d4slTs/Rq4yxe38bYGp5dRZCg/cNcOb7Edj0kLZd5cSFg5cgURy52EMLjz995ik/es4D0p5nKjhI2r5FMnOHpk1M89e55Tn85YXvpHMtLC1yfP8rh5SamMs1H3ztJ7knWqwojBKXMug6uJDcX5iEY55WFgCM3c3797/qcel7QrTeY2tI8c6zK2JbPtbNHqe+MsXH4rfTr83zh7hrCn6I1Psl4x2Nzos7E2mNYHeHlm1w5egatPL54d8j7/3SFcu85tK2iCOiN/R2i3tMFE3KhMFI3oQI2SxEcBe4i5zxK/B7QwDKGOFuFhWmYVfDlFThUgsQgep9C8/1IPgVUEHKepPwAXnYVyPBbdTx2+P0Pfi/3PvZFHnvwHJ88W2G2nRMHkvu+lPCVczVOvLBGmComWwETq9tErctMrH4SSYbmEL59FGghzaOFoe/BeguupPDQPLwUw3ceg0jBcq8wSoukvAXFCvA40ETGS+hoDL97HSPLjPU8yls7/OVDU9x13bJxaIlybLg55bFRV5x+OWd+S7My6RH2YaxvMMCDL6YIbXluKWSrplgd8+j5krlWTrsk+cFPd9gc82jWJC/NBRy/kTJ3Y42N6TpXZnz+u/fU+LtP9EkCSZgZvuP5GGXgzOWExRdvUu/7RDuvEHW2iXbaHL70HEH2IsZW+dX/5DSzHcvsqiaZPkNWHuNfPHyW9z11HUFKxZ4gajf5y7edAwP1LODQLc3KlEIjeOnoSUz5LNWsRNAzpJXT9MfexfKphyl3D3F16QhfOBtx37OXEDT4y3f9KItXLHe98Cc4DqxGsIrhLgTh3ssMMZ9/8B9x7OqXgRQv0RgmaB16K3k0RU5KmFwBM87aXe+kdnOZKE4Recqfff9DfOb0NIf6FWwcYSs+0gjueqWHMil/dWGMqm5Q3d4kaUyBX+Pj75jkgeeaPPbQWeY3fW4eOUqtk9MrS5TyUZ02qwsTzNy8hQ4jQqM4spJx/FZGo2sIcjh7JeXo9ZyZHc1fnStx99WUlbri6rjP9z3ZY23CY+WtCU8nNbRneN9zfW5O+cS+YLJp6ZQkh7Zy5jc1423HG37uUEgnlPxPD0ac6MJLUx6H24ZDOzkPfCVjarvPxmSZWqdP0qiy/HDM7OMRn10K+b7n+5y8ldOPBJ86GxDN9NhKIiIDZ1YztC9pB4K//aU+Ckh8ybFbGWMdw/95b5kTTU2ja7k+6fEHd5VZ6mTc/0oKCC7POlb+mdWMVih56agErTh9I+X37y7TOpIQbvukvmSjotgsS3LfvUT89pmAe9YsXe3xwPWUh5+NEVbwqXMlvuOVhJPLGeXE8lfnS9R7ljPXE+a2DZfnfd75TJ+pHcN023B0PWesb/mLsyXKiSEOBIkneO5wwFxXsHDlUVTS46v33Mvias69FxOmm4aXlnyMlGzWFfNNTalveXnB5+a0ZH7T8PkHFe/8omWiq6kklicWA+5dTjl9Nee331XlvY+1GNvcwgYVfvutVabbht+7r4IVltyTPPBywtTqJhJBrxJx/oqbyLaqkuPLCbGSLC6vovQm1WyR3/rOMR75YpfF9ZzDqzn3PeUm5kaCr0ErwadPlfiBx/sIAc8dDhjrGxY3c2Z3NBfnfN7+lVX87WWi1lVqnWXK7SlUcgtlNska06i8RTp+FJF1CLMn3Avx7P0sXcmQ/ct45stYfhzJk3i6RRbdg8grwBaCxxBcBpbws6vAOIbDlPr/M/A0giaXz/0DJlY8hP08Dk26QNL4Abzk/3GTasJiMtcG3g4nbsJEHTYvM2AGO8b/JQTXCNIdLMcQeMVk7wSCfw9njkCaQ7JUTMT/ms1j/znl5ueg4wEh0j5RlOcPZRY5AqwDS8WEPUOwivZ/gLRxAr+/xcL1i4h8lql+jUMvp6QlxacXG2xXFA9czrk+V+bqtEeYw11XE5CCUgqzKwnrkyH/7kMBLenxg5/vcs9TCb7AcdTLHlM7mkfvqTDT0vzs95Y5HmVETcmRtZzZTc3//qESmyrYJXKEseAtL8UgBdcnPE5dz1me9bjn5ZjK2rNU4he5/6+3yWoNguRZBJexzCK4yeTaU0OT5BvAGQSfAW8GzCUEz9Cvfhg/vQlcB7aQ+ncxvJdK5zeBKknlFCrrovgtYBMvnsIRTyKU3WD1+Nupb1wjD09QS6p85kKZ+WbOP39bjb/9dI/xruHqhMfbn40ppfDpC2XOX8+w3rfB8wwwOzPDL/z3/wPv/Z7vedVjPvxLv8Q7H34nD9x//zd/d0ZBUDuYkU8ylFFKHgDKWAgKZNsuKGZ324XdPd8WDCYjh5B0OByPGloDGMYBlY3dxXGZoVVoK/ajvKxy5cihLD1W7FFiBvc5jDQLjEUZjTSmQHCZXTSXMBZpnRdAMsjotx/35dBkBV7ODqBbrmzPWgJt8OwQxsu4a/vF9aVx11HG7NaVZ/YQOKq4ZysHn83utayEsPBQDDBPyrjMhNJohNVImyOwDoNkTJHVqMDyoHYrR1iLEe5ZrOfAPHtIMo1Bsj8dlxxKjDWMMpN7C31m/xYRhr8DBiA3rBiqV0C5p/eNu46wdheRpQATiN32s1JglSgwUgOvxgAb6A15IYZ2Yvli/7Ydc6ecanvl2WEMkxgg60Sho8O4KPfZDHETh9GNAz1VRWYpcYDiZgLXHMLu4e6MEoVfxTI5vAIphMNcSXeMDUSBKCzwTlJhdrFwktAOcH8GrEZYS20IQTdAZnlD6LyBBxLBbh+hwCEKaxBGu/KMQVrjkFpFnXh2kJVwsDrhWk/cYTlQWTN0jLfbLg7BONQWRZ3YAlenrMU3zlMjijo1Ys9+STvA6u1dMtjFAlrEICPnMMZrgHEUe0gt7bukJ7sIMwXac3o3QEnKAudmB1ko7R780MgCiTWEO3MNvfcsA32YKZ7FG9qRYTyxL1uaEe5GrBykd3J9FpxOCuP+HSANzeDfYZ5n8SzBAeRoMLQ/Zdfe7mYRc88wKNs3FmnFrl0VQ3g5AZQp8HnC9dNBXxjYeSv3eHy26FeD+9CqwKJ6BdKP/ci2ga3dw7JJV1TRNoNy9+XDE/vvU9hBve1hxwZt7A+IiUX7BGYPPamKMvSgPoewdnb4eaTYN6Z6ttAd5Y7PS0Nj4eC69vaMbVbiMIx2cD+yGAnU7gzFDrP87rBwbtVe5tM9NJ13IBOpKjy3e3bclTlZ9Eu/sNOW/YjVg1kHhxBxpkgjibrDVoqBt9nevlvWHEiTiSjQdMPnR6+x58Ie2H1rh57fK3CFAlMSu3MENWTPB5jO3X7KHt7Qtw79aKXAlIvzB2POoBwLk8Z5/XdRk8LpkWfsvqyEA53Z7bO7g4nAotC77XSwHmvsR/UVK6dmMPaVwAy3lwTCwibW9s3R3OfBeCHYlwp1sKVEDmzqAZNZ2KgBvtIW48a3ZfIM8LM//Y+4euUKf/IfH73tt4uvvMJHP/pR/s2/+leMZCQjGclIRjKSkYxkJH9TxPt2XvzyK68wPTPDlcuXqVQqu9+/57u+i0984hNEUTRqoZGMZCQjGclIRjKSkfyNEfntvoFf/MVf5L/5yZ/c/fxTP/OzfOCDH+TeCxdGrTOSkYxkJCMZyUhGMpK/UeJ9u2/gJ3/iJ/jVj3yE5194gSiK+N3f+Rg3b9wYtcxIRjKSkYxkJCMZyUj+xon8m3ATv/uxj/GBD3yA7/v+7+fjH//4m369zHNBL8pCmEM5s+S+2xgvhSWOBLrYJG+BoAg6KKeGXMFYrPGNRSuH3RLFRvhyavH7gtR3WC9fWyZjSymz1BKHVTJKkCtBqKGWueA437oALe0JehVBHEKvIijnDqlnhUMNVVPDRN/ia7fBPVPu2t2SJPOgHwk6VUG3IlHGYhFU+wU6Suf0xyN0KUQaSEJBGvkuz72ETkUSVwN6ExFaNtBiDO2HZJ5AaRfsEuaGctwvNvcLKmnGRKIxwpIFFiNL9Gs+VkCuIYsC2jUXAGAkGCXJPUmUG4LckCpIfdCyCFCRkBWBS1HuiCKZceiYXLl2M1JgfYdiygOfbsWjUxEEucNVWeE29yujsbvBGhJpIC5JKkkRfIVBkAE5aA34WMYGGgJo0AaLX3xOELShNEjUU4V+CnHm6qPkQaLB6OL3XnFeDoDQPSwhIMmiAItHNe4DhnKmme5nVFKDrw3S5C58MQyxSoAxCJ2BzYvyEgQ9d4/o4jsFRUAou4FvGrLBc8wCU1g8BAlF+BSgETYvggqLQDwlML5HNcmxUpJ7g/p3Z8WhRBfBm5G2rj8VMRiedWgsI9zvYW7x9F4gk6ctyhTtJMEoRT90qMVyEZnfr7ngDGmhW3HXQkikzhFojOcXIZvWtY81TMc5YZ6T1gL6ZYdKKlmLxcOKkMyHLPIIM4da61XlblCkspZAu/u1UpGWgyLCSuLnOVnkIYHMd0GB1vp42mL9gFw1gDpazAARWTAGBGh/liyaBSpU0j5QAWpYQnRYRuV9hE7JyxUcBcXDy1JMGIDO6U+VqaQJ43GKn6cIBZ2qIveFa1vhAtlyz0V0SaMxEmqJJi57RFmOEWo3QEcg0FK68wTkSqF05tphEPwpBJnn2lni2mq8byhlLmixllmssATaohJJpA3VzBIHAmVc/SW+IFeQepAUCMKsCCCLQ0E9czpSztyxmeeuaYXAMwajPCQWP5akoWA8NQgc7lMIHA4xcedHmcONRrmlmrlIH6/ofnEoSAJBLbfEJUE/cnZZAkkgyZXTw1pq8HMIc4tWDuXmG9fWRgrCRBBoS+aDCjWhBl3oeiO2SOP02xT1KLBYIAndX65cUJXAjRdGFEHpxfHSWnLP2b4oMwUC0rVtkFs8bRiMRFYU/cda+mVBlLlnL2WWfpVi3Cn6aCCox278CLUbM7Q3CJC1lHJTBDlKulVJpF39lTIDAuKyIPcEWeihlbMJsgjq0kVMl2dcIJ/Fo1dSjMdmEF9Y6CWkgdgN5pTG6Q4CUs/pjK9dnw9y4/g0UhGPRxjhAnHTkl/YLEVc8sFIZJaSVEMGgWRZ4AKyBQPbmwDG2VszCBrW7AV++zhqBoUtdHWsfY8o7gD9opwQQxmVtxkEo+2FaPpACkqAHkRf5sV1UvYC2NLi+7y4fmF7fa+4N12UJfFNvB+4znRR0QNcnS3GFIrzYBAwLUyMzPtYAjf+hgpMRhK6OYIylkrqAmLjSNAP3dQvjsQugdlI1xfHepZGDMLq3UC+XhW6FYfIbSQaz1jqiaUau35ocf29Frt5jpUQh64faeEQd9pz9nZAYBnYb0GGyLPd+rMiKOylNzRFLRXPrsDbm7YKk2IoF/UxiEaMizr1EDov2mAwF8gRpLvt6OkEkOhAkSuopxphYCkxxJHctXtJIMh9QSNxONZvG6ruoMzOzPDR3/xNMq355X/2z97Ua62srPBny5OMd9wAdP5aSjUx3Br38LD0A8Ef3xfw/7L3pkGyHdeB3pd511q7et9evx14Kx4eCEAkAZIQRWoZjUiKHmlMTYQd4/HI4Qn9mFBYDmtGlhQ2bf+Rxw6P+GdEUTMehxdZJkVKQ814JJICBYAkAGIHHt6+9Outeq297pbpH3mr6la/fiRIkaAiXCeio7ur7pJ5zsmTefPm+bLYlmzNQLNj82A9Yn5XceZORGQLLl4PcBR8+ZRPs2RxZjUCoanUYPqyzcuHPXylObwZc245ZnEr4XBVUexoKq2ElSmH2Ja870qX29MOsQVVXzITJFw65BFJwZ+9x+XJSyHT9YSWJzm+EXP2Tsijb0SUUpzRCyd9jlcjfuPJEifbiqcf8Hn1uEM973B8NWKuukNlp8TETovZOxv8vx95iDFVwQsVLz7oY1sFchHcWHD5g8eLHOkWeO3UPPPdE3THz9Mcn+XNEx6n70Rsly0ObXQ5843nENhoaXP4Wp0ztyU3juZoe4LpYII//vFZih3FjbxHJSnz2acqPLIcsFOSzNShVsphC0Eh0Pzx+TxSCg5vx+QDzXZZcmfKYWEn4eTtkG+c9fmThRIXqhEtT7IxbhE4gtmWwg2gVinzpcdLvHbE40NvdJhoKLbGLXKRZHxji6Q0hwxbEIckhSX+2c9O8nefbXH4dhWvexvBawh20eEJ4lyRoPgwbvcK8JxpgEmEeGAedq4BNwx67r2noFYlCX4WWf8qXGnBfAmmXXj5OnS2ARvJGxgmZQfFw9jJ00TivVi0uX7hw4xttDn51ttYaouZrVl+7PkGC7uKckczvrHOymyFisqhXYndVHh7b2Enmwi+DVxCcAfBdhrol4FjgEHhoUIYy8H6dcjl4eoO3cJ/T5h7BNlpY/MmylZIZQErSL2I8iYQShCUFmiX8kR+jrMvrdMeH+PyYQ+B4NJRFzeCbx33eWDdYLhKHcVrxzxsBaeXQ/xIY8eaawsuk03FVCNmYSehnROcuR3hBwolJLdnbOZrCu3kee58jrYjsWKY7Sj++MkcRzYSztyO+PTfrvDYnZCJWoy/vYKTXKNdOY+MI5xknYQ5hI545A2Xme0aLz9+mJdPlOl6Fod3E+bvbBJ4x3jp4RlUrsypmxG5QPOHHylzbDXATQyTeKypKHU0Qvq8/eAMcxsd4nyZyvImV88cZqyt+DePF/no11bRlIiLU9jOBI3JB/F3fDZO/wcUt2rcPP9JJjbWWT33D1he+iDTKx3m1r6B4CKJOEdYnGf7+EUmb34dp93i6sX3MX2nSSymKO7ssXPkAcqbr/HVn/swH/7SWzz02i7l9XXiyjif+8kJsODM1W0Sr8CdOY9mXnJkuYYIQrZmyzz6coOvPzHL4y+sEhTHaBZsxnfqKH+MerlAqdGiPl1g1yuwcOsWzYkJNscNc76TE9ya86gVJIe2EibrCeduBBzZiLgz4/DY9YB8pCi1FVuhz0SgOLMa8tWTPk9eDSi3FF85nSd0JE1Psl2WXFlwafqS5QmHrx1y+dCdmIVazJGtmM+czXOsoyjHmrk9QWWvRW28Qq0giXddvnLK5xe/3WayqXjulE+xo1jc1czekRzZjhgLNQ9fDxAC5poGszfRTNgtWrx62KNbsHjkRsDvPVlga8zhbsV0kG9OOnzwWpdCoJloGLbt+Tshz53wOLqmmN+NuT3lcHXS5pHLivm9mBfOSw4d2mH8ms962eLYTsxjdyL2chblRJNIOF41XONvn/BRluEwCzTbJYtECLquZLKR0MpJym0zkRJZkluHLWRXcHQjJrIEzx/xOLkVMbWrOHV1j0LtMhDx6vn3cHI1JtfVfPk9eX7mhS5+pJjeS/jTn3QIYpuWK1GJ5O1DHh95IWBpM+b4WoQfar78hMP7346Z2lNoCeNtcMjzuZ8sc+5uhB8pyh1F05d86RGfpV0NTgGJwxtHXc7ciajnJcvTDg+sKhJLM1brIGKb/+oXTvOP/12dJH0g2ilL/EhzZckgDFfHLSodhZXA0c2Iy0su2hYcX4vZrNg89naXlXGbpT2Ll88uMl+1cIO7vPXYo0xu1RCRz18+dYGTl9dxutf5+lNPcuLaG4DLK088xvFLG3idpxFcN5McdIm9n4AIbLaQvApsAArFTyDxgC0EbwA7gM3a5CkWl/8NgjfTwdWH6ZSfxG/+qhlg8ZE0rk+kg7HX4dQcbNWgqdPr7AJVEH46mLuGYCydTKkC18Cy4NwRuLINBMRcRBau4jZeR8S9gV6J7vh/id39Uzg2A3UBqo5BtpEOCDX4ZYhB6A52uEy7+CjS2ubG4nmmby/z0nsOMbMb8/aSy1NvdThUjfn8B8o8f9zl4TsRt2YdphqKYjOgWfK5tujw8GsJj1wOma7uoNFErsdn/06O52fy5IFPPN0idgWLLXjq2ZDj6xHNnOTGvMuFNyIm6wlX5h2eedDlyrjN+29GLG1EvHLC45FrIeVWQiI1Y5vXif3DuPEVrLiW9m0h7dyTuLGbPhiEwDqG53wJ8hUY96DZNANrfYJg/BRO58sY3vMWgivAOSCPTKqp7c8DV4AJBLUUHeiR262h8Vl74Az1osPjr7ZJXIvxRPONszmeeq3DmWsBz1zIETiSJ17rMNFIeOWY9zdj5vmtS5fY3NqitrPD1tbWu7NWRQxQbEL0ySl9XFiPBiOEQJBiStKDZHquRGSANgIhDI9FZkkyKftEC23uI0QfZ6Uz9CgzISDSZ2mNm/kcASI9X6dP/YYOkz7dC92HYLm953Fh7ivsFClkD47v02L6tCqNI83MjuwpQkqyCG6RYtNEnHEZH4QnshfKkqJMmXpoo6FvzSkyY4fe7FjvqB66bh9NcPBfOsuTBRj17CcAITXZK6RzhkOMwHuuOfTT1zIDxFi6KUDfhnrwwCvJIOEEDJcMgUSk07Miva6Ie4xEgSgLRAa1NVR3sV8LEpHFyx20o1bGJr0JaaGNL4t9zV4M3Y2B07kaIfTAT3WvXINCGh8dtv1QMfb9JTJopGwDFBlXkEP+k/EKva+9ZnlCJQFWilTq+3AW45jWQw5bWWf0pdO2MLQvpaWH28u+v0WWE5nauXcVkSpO9hhq/eKKdHPKgeZFOgUsMtbQQiA8gZCmYZrZe9FX/FA5U6VIW6dtdeD5Q37Uj0Ep5pKeznoa1SkUbfBush83skbKxEw3Y4cBVEr327joz9WJjO1M+WXGt4bjQy9e6jRmD8rcu2ifkJUpSy9o9nQjBPhpnXr1skSWiDisRyEGcVfva0b3xCG5v80MefpwWwGDJ+TePVdFeuNhn0rPFsPRQKSsR5vhECP3xUYpBig/cW8RB/8Jo4/9EbofirRmf3gYHCvSt3hmiLN/EagW+2KCHsaqDtCYDNl6OO4PjCEzb1HEvhoZfxdkI4i+R8v6gFif1ZxCaH+4Ipp0FpR998wEk6F7yFR7+oB+4B5FkmHxIZSXKY/OcEAzE81D0XEQq/fHdaH3eawY1shBYL3+mMQS/QN6ba4PeRP0+ymRtbnItJ305rLXx2d2FzxIA/d+qjMz7VlR6YaCgysN9/BZ7+2NyuRQbe85NrvLs8j0wSIzHrEwyEqhweq9ZdU/+jXPcRzz0Y9+lD//878giiPOnD/P5vr6aEHNSEYykpGMZCQjGclI/sbJj3zm+dd+/df5O7/4i5w7e4aLFy7woaee4n/8Z//TyDIjGclIRjKSkYxkJCP5Gyc/0pnnarXK//OHf8jd5eX+Z5//wz9kanqaf/gP/1MqY2MjC41kJCMZyUhGMpKRjORvjPxIZ56f+vCH+cIXvnDP55/7gz/gk5/85Mg6IxnJSEYykpGMZCQjGQ2eAX71136N4w8+yI89/vg9333iYx+jG4b80QED6x+EmOQ83V9U38/v09kjTG6ZUrq/mFxjFs/3lqtrrdN8QIM/653TS3jr4bv6CSFpfplOEUVC67Qs4OhBEqEpi0brXrKhQInBcv9+EqHSoDW+ziRspcg33U9pS6+DWcivh/RgkHwgEGrwXRJrVIpcQqdr9FOdDSVMpAcIldY/85XoJVz2PhYCJYaTUrIJGCqTpDNUz1QnA5xW73/RT2TrFSXp6Q1QiSAJQNHP9OzXQwuR6iebOGDwdUNJaENJDOJgZ3FTI/dQb8NpaPv+12QTx5SdSVvrJeakeh+cpQ2aTAq0EPdpsvvvySB5sX9r1feHA1OHtDbfaWM0g9wSaeLbsK1kT8e9RLK+7UUmudBcS4mBvgZ21P0sL9X3K1NmnbYhUuygpTRJqjIlRD/ZqqcLbQu0LfpIpd59dS9xRYO2httOr/3Rv2bvWD2kj0QItErRcPSScDTaydhQgHbkPeHUoLN6CKtMtpQwSVb9JEmtMz6s0TpJdT5UYnN1MdCjVvRtkz1U9xIsxcB/erFAAUks+tfoXV3BIA7ojOeJewNnInq6p68TlT1W9xJLB1mhSpp66qx+0dhq4Fu6Xzfdz5PqxQ4l6VdS6oF/9OK3Etm4zrAPHWKG5gAAIABJREFU9sqU+nY/N04IE6MZ+G3vu2xKmM4kIfcSPHvXVmncF/1EwGGdGZ/V6TU0Qoi+//f7EnFAi9a675NKD+ee9yOo1sPxUmf6nH48G9xjYJtB21dk9TzoY9S+RL+eXge5e0ZfCp3GYUFOD/tC71pJ+ncyyBQdJE2mcUOqXtsV/SRHLQdxuWcAlemf++WATANgX/xmX8aiHDp/8Lm4z0hBgUgy5+h7Y22SbYBZ4oC4tyz3FipjQbEvi08OeiydTbUTB41mhsvc83MxiCPZdiEZ9M+9vlABST8uGc0qJfq+JZVGqEEM7Pl8Xw0ZW/UTyDPF18KggXU6NgLAkoMYYQ3sM4hWA3310QDZivTqq9SwHu0D+kP2+4gycawfy9RQvJAZv9Qi6zOZpMsfBaruhW9/m9/+rd/m+W88lyEJDMvHfu7n+OQnP8l/9su/jOd5P7B7r6+vEz9dZHYz5kg1Jhcors+7hkGroFq22Z2E6T3Bk5dDPnAjYH4n4c/P5RjvaP73pzwObWsmG4oScGE55MxySOAI6jnJ6dWYQqR56bxEhhaTDYWyBPWC5MGViP/z0QKehseuB0w0DILpsTsBH7wZYGlYG7NZ2k64cDum5Uv8AL5+Lkc9Z1HuKJxEc+2Qy4uPOHzg9YCJuqLoCk5uRFS6GpTgX5/LcbEac+r6NnszFSwhcGKL//tDU+SVYdpeWvJp+/DvnvCZ21SMhZrINkzS/+uXbKq5HBPNhEJX8+YxDzfWHF6u4TWqoAXB9FHsdh2rs0dJl5lbr+O062zNVLi05HJsI2JuN2Ft3KYcKt485qNcyURd0/FNI4l9yURbcfZOyNUFh7vTDtUxm9MrIVoKvnIxx9Uph6muppa3mGom3Jp2qXQUX7uQw000O0Wb5w97PLwc8syHCpy9EqKk4KtPTvL5n55i88gkS3tTtAo+n3tvkfeuhETlPFMbEm1ZyKSLsuYRiY0XPovQVzBonMeAPMwmJpU8mDINtezCyiZCNyBXg4+9D95chZt3IZwEloA1BoxKjWQbKKA4hEZy+cQpxlWJOw9OMbmyTeJNYDW26IxPYochQlgEhRxOYlFZWUVGAbXFU9SmKpR36sB1DM4oABYwGKRdDEKpaRy9tgtxCToBtCrIIGHj2PuobP4lgm2EqqdIH4Hi/ZA4OMltIiZwkwQpPASK186MGWZ5rLC0QEl4YD3kxpzLXzxq8/CNmK2yhRcbluvWmEXXk8zWjO/8xbk8778WENqGD/3SSY/IETx/1KfSVfzbJ4o8eqVLPtA8shyxPmEx1tQ8cj1EWbDYSHjhVI6NGY9bx6d48O0NwrGjJIUJwsIxrp09wvriIlM1QbvkE3g+dgJfeyhHpa04caPG9oklbiz4KCmYbCTcWrA5uRJTLdk8+5hHy5c0ChYPXLoNscVb58qcuLLJ//bxJcYTm8M3N2iOlYltwdm3N3npbz/E5KYgcATXllyqC9MoGybvbrNx5BgzK98i16xS2m6BmDYDRH2V5uR7aJaLVFYV3dlp3PoyvppBdi3CiWN47bfJtVeICqeZ2ujgBgEkEbg5OpVxVqccjlYjWiWfl0/nAc3p221KzS5xeYJSvcHdxTKllqbSSPC6bW4cHqM5keeVBws4ieaLH5zg/W8ELB/xmarbvHi2xNqEzf/wVJmL1Yij1cjwj9uKy0dcUDDeTGj7knyo+eIH8pTaxtaxKzl/K8QVhu1sa5hvJMzvJYZyEWk6juBfnMvxH7/S5mQtoTpmM9NIKHY1h0PFQi1GanATjYPF+rjDF99bNDHVt3BjzR88nudnLnURGh5YjTi+EXFlzmW6adBqX76QZ6qtCS3B8WrEKydc1ks2F28F5APFyZ2ERAgqHcWR3YSFRsJUQ/HCgz5zezFebBi3gSvJxZrdgsVLhxyeuBOym5Mc3YrZmJJUOzneezmiOm7zyM2A507m+PDlLrbSBLak0lFUx21Wx23G2wltT3BjxuHbDzg8sJoglebSkouyBNdmXW4etXhj1uPCzQiEoNxRtHKSnNKsTVrszGuuzhe5cClGxjEUj9PyLbSE4xsJq1MWazM2iSWwu5L1p1q85eVxA8lsI6HYUVyfc1GW2c9gugVnbkfkQs3dKZvf/0iJcqyJJTx3wsdPBz/PvsfB7wqOVBNiW/DymRyVVsLMTsLGhM3ahI2XKJZnHXyRI58UONG1mNwNeO6RPK8f9amOSZZnHL7xswmvLHl8ayLHg9sxTU8yv5vwzOkcc3sJC7sxpbam0lRMNhWJJdgpWawcKnH4ZoB0CxSrd3BUlerSeRbWq9jxa9w9dpHFW22ETrCc45Q3t3HCV4GrRPxjLN6kMfkTRMU5cs2XUZxAsAz8DIJXUDyS0lfywBiwQy5aRyqF4QRbwC3s4JsIJtE8huTbQAODB50AVmGvDvUI9DwwDufH4cgkrHVAtzFsaJ8Bqg44NA1JiKp+CEGM5HmIJuBMDqo1YJqEn8IJvgKcQjTfQKmPIYjTfuVQOly9CMkV4DBQBiLscBOZBPjdOZqLcxRCB6kEVw45eBacvNmkm3dY3IkpdxSvHPPZmpA0CzZfeLxA4ppxytLdPZ7+4DQqcaiXLB6/FHN6PWKpGrNwc5lyvctUxyMXabQQ/N5PjXFyI6LcVtyYdwhcwXhHk48Fh/ZiEPDAaoxU8OZRF0sL4vIExa1dbFXjzcd+mtnVm8BNrKSG4CZwC8N+btEa/w3c7rcgbkP7bNrfhQi9jN19CcEOhjWWhyeOwd1LoMfTB4uN1Ga7mD0SughWaE5+lLh8BKfZIdfcwbFmsRQktmRmL2FxJ2ZuO6FelESOYH3c5vRyRGxL7k7b7/7McxzHfOLjH+fprz+NlPe//ezMDJ/+9Kf5uU984gdeBi820Ph8YGY/lBwgTxIxwPQUm4qxpiLf1TQ8QWgJdouCwJFYiabYVRQ7Cj/USLMXCW6oyQfKQPrTDVTMbLDADzW3fUloSQpdhR2b+xUCxVhbIVU6myeg0FFmxkJDy5O0XNHfQKTrSBo5SaFtBtPFrsJJwA8NY/dqziKwBwwnLSVa2LQ8Scc1G1JENnQcyXbRQmjIB6YDAEF1TLCTl8SWwI00Td/oyI6TwcyI5Zgn3DAk39X4QYJQCXZiOkzPtBcD55eCjitou5bBVynzkw/NRgW2gsCRdG1BmML1E0sQOOYagZ3+rQ1cP7YEuzlJaBu7tW2j372iNBsWSMFe3mW95LJbyBF7eZQUdKX5ruu6aMtDyxxgmSdtBTJp0Odn4pnGKAS4Vhq0LDPToJQJokJAyYdGDM1ewPUOeMptDJ52sQwK3vXp5vJoLKPHODKIKaVAWshEo6RERGZjkMTLE+Xy+67fQ+jZ6UDaHtyzm5YnjEAJpK4T23ZavyQNxGH/GkIlCEKESrCS2Dx1C0HXSWd6pcBOzJxCsWN46LW8TAH8xjaBI+im/uUHGifWNDyJlRgbdx1B3beILUHHMbbcKkq89NhyW6GEINfVuJHZLKLcUtRyFrWczV7eQ2ODtMBySXIlOn6ejl9ACImSFjIBO9bUfUliCbSwiB2XRKTtR5pyFlsJoSXYy0nanqTrCOwoQiRmwCCA3ZxF4Dh4QYQSAi8wOmnl8v3NhUJH0vLzZiMXrDSWJLjtGl6jjbY8EB6CgMS2iaWF1U5I3ByQ4AQxWnpox0cQYAUNlOtTaIf9GUakRAuBo8CJNV3Xou6btupFCiEkWto4UULomA2DehunJBJC16GR6n0772HH6UZR0qHrmVjxWtEiskyM6m000k5jDpjBrdCanYIkTB+yAbxQ44dmow8lIR8o/FBhJ7q/MU7LN5sTlTuqPzknNRQDE7cS0dvgQxJZgppv4aT87UQKbhUlbrrBQy7QFDqaWAic2MwG1V2JFuaNm0zMxiSxpO+v5ZbCjc2kQS7U5ENNZEHdl6alx+lGHWmZIwu6DuS7ZjDXi1HdyMKLzDFOAm1HUOgqvNjM1MWW8fHe5FhoC5q+RcszG4xIjfF/KWi4Fg1fUPcFbph5EyBMvQPbbOyyk7dRaZzyg3RjEwH5rmmDoWMeaL0OxAUzYE9kuvmIMDbsOkY/xZbu60wL2ClaND1p2osnCGyJEoKWb+KzFmYjrlpeYql0UyNp9Nx1BYEtCF0bHI9ix8zctX1B05e0XEnDF9TKUCtIap5EpWVX0pTLUib++6GxUy5I3zIJaPkuCA87UgilAPNQhyUQBAitUqCrxO+q/tshI+W0D7FRjgvEaPJpfC4iaKSbX7npj5mcs6M2fa4nAmggWMdsuJEfTEz046aCVjfdJMU2x5VdyKf9Rh+5ltDfeAsBeRuCCIPAczB8aAdcm8HUdQHYMeWOQwTFtKy9DV4EZuOQOP1MAAmSBkLHuGFM4rh4sWlzlobYMvE6F5g9JwA6jqDhW7Q8i0bOousYuzhhTN13CBybyILxWkKlkZALFDKK8IKIQteMewC2C4Np88g2bcAPzCY+Ko2ThU6CsqDrmv499vLprLqkkyul9UqQurdRTac/Ax07lVTHCWinPzElqCNYzbxDs6Hggqjte+MbpH930r9bKMdFOT4gcdqddPM58xY+H2jKzQSZjt3sWBNZoh/TpNbv/uD5n/7Xv8kv/dIvcfzo0e967D/4+3+foNPl81/60miBzUhGMpKRjGQkIxnJSH7k8q7SNlZXV/lf/9W/ZON74Dh/8xvPUSwW+cTeHrZtjyw2kpGMZCQjGclIRjKSH5m8qzPPH/7IR/jiF7/4PZ/333z60/zn/+gfjaw1kpGMZCQjGclIRjKS/38Mnn/5V36F8+ce4v3ve9/3fO5/8au/ypf+9E+pVqsji41kJCMZyUhGMpKRjORHJu/KOohXXnudP/3CF1hfW/u+r/HcM8/wviee4PKlSziO89cvlCZN+tHDOJ80V0CJ4eXm5mPdRw2JNKFCqMExkGKU0uN7aCDBIHEQATpNyBFaEwv6GDyhMgCdDIopSctmJSYhRgxhuegnFgptEj36DBk1jLsSypRXJAZfJfW9WLgskigrA7RfYsA26R7zPQiYbGmTbk+a9JgylpQQQ9CZ/vXUADGV3V++h0UScYroyxqAFImT3qN3fCwN4ijJYJX6dkltpAVsZsvSJ97IFK3Te44MGML/9BFLGc7SPVrbj7jLSu+7JD1dGXwevXSSJIOV0/uZiUbCHjJnP66on+aaOSdNSunVtFd2IdCiV8dw3zkDH+mh/BAgoxR1pVPkX0avPd1mf4vk3sfxOIMRk0lWKz10oxj4HT0UoRjyB5V+rsSg7kMW0Kmv6+H2c4DXDftapp3r1GeFxiDqMn5FYj432L0UWJXmFfWSr3pl00O22d+Yeii/gZ/IaH95Vd8PRGQwhTrSJoEzxUNl6FmpO2pkMrAfOtW1Gui054XhPpv10JlZvCSKPkKuF1dExl36aLRM+zqIlti7z6bItPl9MVhnWkivrMk+Ilc3o06R9b0MCnO/J0cZtOcg5uhhv0qTl/v1y1wgFqIfy3vlijJxBiDaFxJ6KK9+6tI+PSoxHNd70RRAJhqRGDsLnSFo6Uz8Ti/ca0e9Vq76/qyJMnEwi+yTyUBfvTJGGdslGepVH02nh8OdyChaJj1Eq/FTmegBgi5TOJGi+lT/2uKePk4cwCjVpP1UBkmqMu1YZRRkcgWzsWHgjKLfgYh98VIySOi7HwIu25n2CtcAKgcce9CUZJwN4vuc8X6IvF473o9LldmO/YD+RhzY9khjfpxVbw/PmYkN/TiRtonsuEPvHx/0dauRSqCstHbi3p5vKE71Y9UAPdornx66U3JA6BaYpMsu96L+uNd57o0s7IfhaiRiyL6iP57p+7zSiIg+VrUfc8S7gKrTWnPh4QvcvXMHy7K+7+tMTkxw7eZNnn76aX7qJ3/y+77O+vo6t18tEwpYn7Jp5iVdRzJdN9n3ty/G1Dsep9YjTq7HfZUmvmSjZNN1JMe2ElaOC07cUTxzNsfSVkwh0NRLksAX5AJNrWBRc23qBYmSsDpuU4oUMzGoCy2WLgluz7tMBoqF7YRiV/FnF/LEUvCeWwFSw9UFl/m9hFIMNU/ylTM+tXGLjitpORI3FhS7Cm3ByoRD2xW8+YBEKIv33o04samR0sWKjZN/7qfLVLrw0gmPE9WI9YpNfrHN9G0JAmp5SdeVvDnvcO5WwlQjIbIFuyWLZt7CsiW7c+NcOTGL5eYoBLBzaAo3FHz5b41z+koLlSsQ+II78w4PrkRMdkw2+tqEhZ0isNqe4M8eznN+LTJYp46hArx41uX0coSbwBvnXKZ3FKuzFssVydJ8m+mbgqVaQiJgtqW4vOgyVU+Y7SqObkRMNxIW12I2x2z+j/cXGe/Asa2IVkkyuaeY9+Ch2yGxBTM1gb/3LSQvIlnFpgooQv/nseTLkHSBJtgRtBTEAegStJsmHZ8YzhyG00dg3IPVBgRNmMpBwYN2A1jEoOQk7dzP48Qdtg+/B9srMrG5Ta7bZfX4ArgVcrvbvH3hCG4scBJBs2gytivVbV574gizG13KW9/A7V4BrqUBpoEheJxAOFUo5uDMIuzsoZM5hNU02AHdQbCLF6zidre5++gvkF/3kdwBLCRTBKWLWGELmy3scA8ZaL7+oSWKHZjfTZjd2OHmUoGbcy5nbwfcmXYoRHB0I0FiCBbPnveYrBvajBObjOQxBdO1hNgSfOmxAlPthPGmQtuCvYLF+bWIXFvz9Pk8D90KcIFKW+HEmlZOMtFQRJ5gbi/m2GbM/K2bdMYXcaIACRQaNV48V+HwpmJ7wqXtW2gp+OqZHEerEeeu71CfGmenZJGLDK1gY9yiXbSY34nxFUzvJUzXEgqxxdZsgUOrVcob2+SK0xxeabC9UCJwHSptRaVrUwg0O2WHjQmLSksx0VTMr65TrF3FiV3yzW8CLgnzSBkQuC5udAVL23hBiAxcLLWDDARvPHGG2esrNGZnye/cJHYO05xZwtYW33piEk+UuHu8wp2FPK884HN4PeKvHs6jBKxMO5y9ExL5Dq+dLrC4ERPmXJQQ3Djq4ycWtZLL/E5MuauwY835jYCZlSqFSFHcbWC7ecpdhViCOLTIJ4pvn/CZbCXcmXIZ6yiEEHztYp5D2zFvz7k8fCfk1qLN3G7CUjXi+Sck27aHHxvyhtLQ8iwKkUZqzXvrCYtbMU+f8plqKxZ2EzbLFhtli/fcDih3NEfXY+oFg4jCE0zXEuoFSWALHt9LWKrGbI5blNoKP9JsjdnslSSlrmZzzuJwNWa6kTBVT7i66PLkzYDxukG/ubGhxXz1mMcD2zEvHnH5sZsBk13NjXmHes6i83iTeiNHoau4dVjy4EbCpSMSEUsW9xLmGgkPrcdM7yrGA83iVsSxpsKJNNcOOQTKYvOwZs92cGPN+ZWQZ886BNLi+pTFya2EQzsJf/mgD8LQheqe4KlrAVYCX3g4h2cJvn3Uo5mXfGPe5fiO4pkFl194ISDxirz08Byvn3d5Y97jwZWQ27MOM7WE2b2EVx7wsTqS97+lWNpOmGwlHFuLaBUk6+M2V4+7eF2NqzVdV5KLNLOh5qWjLifXI+Y6Jq62PcndkkMkJOdXQhwFLx71OLVqaBerkzbrZYsrR1wiKbGBu0suL5/LcfRuxHbF5u6M5MRqzKUf02x3cpR3JTNjHZZuCY7sROQDzc0Zl82yRTHSXJt3KQaKwBfYkaaZlzhCY9k2U6u3sYIEizq2s0hx8yqWfp5C7OF1y3Tmx4jEhPmc54FVLLpAgfbYBTbn55hYvYXkCoIbgI3iMBYbRPYjWOorwCpQw9AeekO+HIZ40QLy1KZ/Ar/9HGHh17Gi54AZYBMoYkgcCZwumsmX2wE0WsCDKfWhS5D/ZWz1LOgYigXY7SC6CyCuQEGDI6EEOHk4PoncuI1gCyFroF0EVeAunJmDo3mDz6ivovgggjuZiZ8WMI5iDieq4Te3cIKI6TAPQrN0p0biexxa28XVHlhQ7irmdhPuzjh84HKXjQmLwytdxkKIbAtXaSbrisAVbFZsKh2NEC5vnSzxxhMWdeFwfitifismF2q2yhaRLTiynbA8buFrmN6NKXc0yzMO//5inodvdFi69Sb5xrOGKFI6Tam6hmAbxWkEL2UeOrpYQQ7ECbR+1KD9iI2tJiago4GymQp+z5LRzc094PH0uLvAwxhU4BxwFriDFSucdpPImac1WyS/c5t2eZ7NSRvtGGpIdcLm+Y9IFm9pjm3FSGB5xiYfvgu0jd/4zd/kU5/6FK7r/rWv9T//zu/wR5//PDdv3fprXSe0BbWcxd2KzVbRxk40xbZpNN2JCEJBIVAUuqqPVZquJWwWJLm2ILQlt6ck4/WEq+M2bU+QDzRWArt5y2C39qDhCe6M2TQ8yW7Oop6TzOzF6KkALQSbZYu5rZhiK8GNNK9UrBTzpoktw42WSrNYjZBa88dzLt+acan5EkLBVtHgp0odzVZB0nYla+OCsXTGMPQLeF2QiSSxbKol2M5bvDznUmorGp5kzO4azmps0HUNX9JJbLxIE9kpqkzDbl5QHcuxPjHDralpGr5N4hZolCrIJOHl2QJgUW4oys2EtTEbDSxUY3IpuqrjCtqeIJaCt6ccxmsJgSOp5SVOAhsVQaWlqPuSy7M2U3sJpVjxZlmgJ2MEMLcVk0iY245YLVvkIs2hrRgpYP5ubBBSruDNCQs3MhikOxM2QsCJlYjxujIzqW4RqfeAZYR+EXgbSIj8B8Dx08DYgFZsZtRlC/Ch1cGggUJYnADPg+lxmMgBbZiPYbGcBuFCOkvhEhQOIwhoj8+SCxR20GJss8PW5Dxdv4AQsFVyCFyHxDIE0tgysxN3Jio4YUBh7yYGa9RNg2Qb2AMmwbfMrRbHwZdACewwnSqoA5cp7H0N6LA1f5JEz6UdhAPskuTKaJHHiddxojXs9jbXpnO4oWasrhjbaRM4kq2iRS40uKlKzfhZqaOQGt6acQyqK50nsRNYrEYkwvz98oyD1mAlmolWwmbRZnEtIrThrUkbSxsk0ng9JrIM3jDf1UzUYsaaitk9M32WWAIZhcg4obJRY6Nk0XVt2r5tMElAwxUEUqAtGy0MtkwqaOYkHVeyVbIodhRz1ZhyU1FqJQS5Io2Cx9zNHWTS4fBaTLERsDtWIrYEE7WEOFdmcq1LwxPsFCwKHU2ppShVmwgalNa30oeaLiARuk1s+YDCba+Rr++ACHBqeyhyrI8XkDogciVgoZwKkeehLI9r0xXqxTIblRIrYxabZYM5e2vaoeMI7pYtEilp+zlujzsgbdx0gmt5wqOVyyG1ptBSzOwkOAkcvx1gB00mqy2ssMP0TsLUruJwN6Sat6nnLG5MGqxl2xFEtqDhCa5P2AbDlyLkGnlJsWMecnYmNZsFScuTgw2dtEHJ5ULNA3cj7ERzq2yT7ypygaLhSxquYKypKXUU5aai7UpKbc3SRoQTGVxULGFpPcaNNV3blEdJEyPrviSwBG6S4IWackthKbASwZHVCD/SfXRcIUh4s2ywnHdKJuZP7cSsjVns5QSNhZBIGiRbowAzewm3JyWRNCi42Q3F0bsh+a5iYdOwko8tG2RZPSdQGlamJdsFCz/SjNcVyxUTT2u2wZYWOopqTrKbtwhsaNmSQxsxsSV4bcZhuWLx6oRNtWDxStFCBoLXSxLtFInyU2yUbW7M2rw245BYgqYnyHcU5YaikZfYKy7Hb0bM7sSMtRSTtYR8R7FVsLg04xDYgnrOopaXFDqao3dDrk045ALN4qaZm+y6AhEIulL0cYTbvkQkmo4nDO7Ukbw1a7ObN6jTq1Mub806BK6FFyhiB8Zait2ZhG7TxW0KJhzD6Z6smYvaGlZLNjs5yd2K8bu2J/rzqXYM25Ux3PoOaDPpNr7VSnGiW8zcuQS2TZAfQ3U0UjfSOCeBbwIFtNDUcx6KPIKVdCC2gqYC3CZxpjAM4NX0uzSu90vRG0wHdEpTQExYPJP5XqbHeGaiZVFCmMBqkKLzepzhBkHhJDjCDOYiBbsh0AGxB2UPiiEEMRwqwqEc8DqIFsgVMzDkdRP7D7mw4Jjf3EYxb65DO405BosqRIi/V8XbW8Fv7HHsbkixZZCohVbCzEaTQkcxvRdTbpmxjhXDfDVmqyDRUrKwFiClwTI6kcHiNn1B4OVRjstWyebqIclqxeLkrYBCx6AnbWVwl5M1g3pr+bL/LmC7aHF1wkHGmkJ1DclVtNCUmgpFGcijmEj7NdK6aZz4ZZS9iOIosDKYbZ/2MGi/nDHHoQJESfrZWLq4IjZ9JLbpw5kAEtzuW7jdq8SOTydfwG3dIbEELV8QOhIhYLNg8faSxI5hcSMhsAV7eUmloX64yzZWVlf5zGc+Q2139wezxsS2+fKf/AmPPPooe9vbf61r3fPSRBz8AuWg1wci85pVfIfzv8PH33eZD/rdr48+4EXOAW+HBN97WQcvpcV33JPpnmvd50Cp7/O12Ff2d3CP/TsN7i9f9jWp/u4lHmzhiLi/EjngXfE70Uf2E/HObCAO9FrxDn3s3ld74vtwPC0OfrHJffR8v9IfpEJxz1H6/mbhu+/Z9R0s9h18N3NfcfASix67VX/XRvP9tLB7lSr2/aC/l5ii79W8+C5B5HvQ7/4PxTtwQfFdVCMOCHTvqK5CHFAI8Y7qIr4vO4n71l0coBfxDvxP3ueYg84V3yl09Q8S393vhfi++ifxnfR4n75G36d531fjB11QfLdYJt5hT3ZQ6fX36v0/4Ff07ySi7V+lIN6ZtQ4yVHbZyH3Upu8TK8RBcV+IfTs93j+2iPv35PexrHhHrfZ7VPA7vtb9+q0f2sxzHMd89Kd+iq985Sv33UXw+5FzZ8/y8Y99nN/9zGcYyUhGMpKRjGQkIxnJSN5N+aENnn/rv/3vePyxx3j80Ud/4Nf+l5/7ff7pP/knI+uNZCQjGclIRjKSkYzkXZUfyrIUBj8QAAAgAElEQVSNt95+m3/9r/6Au3fu/FAKbVkW/+Kzn+WJD3yQ5575q5EVRzKSkYxkJCMZyUhG8q7ID2Xm+cefeopnn3nmh1rwv/epT6GB3/v93/+ezxVDFJNhvFX/Py36x4kMvaRHbFJigLoSGXJMDyM3QKFlJLtmcf/axgw6q1/G4dMOXPYk9q277SP2tE5xWgaBBCnyiQzaaEgnug9xiXsYKJVF1w0qYyhYwuDrUiRdkqm7UPfWO0PqQygI77eUJ4MsM/+KnjnoEYmyqJ2h8zLYo+4+BM/wYd9p4aWAJOMgYY9Jlr1bj/vEvuVYaQ3V/vVVatiA/SVeAwRdD/qn76sScd9vdJ9JpofXnikNKoukSw3bxzdFmDW8qo8MypZA77cJwwjH/euRuyIDAuph1TI4KrUPh5W9xH4CoCCDI1P7ay0ybUv0cV8DpJbONO19+Lr7oM2GsFnc2wbvdRXRL5++p1XuR2NlGHDotJ1kWWD70Uuir68hgpcY8r4U53XPqcPFeIfr8vU+O2R1IYY8fxgBJ8hgBNMYlvUbkSFH7a+lEt+lMPf5rB+PU/WqfTGg56M9DFevTkk/fgzHpeG2lI09Awxor7biHjKW7h+3/3f/Opn+IxL3wrVQug81uxe5l95Z6QG2kWx/Y9rDfqyYZhAnARJ5cH+xT3V9/SXigHXdGQxfIugjNyXDULes7vq36eUKZXGsWb/u21QMoRC5B3Qq7okFg/W7+h7nH+A57xPr74lEB0VhPcDfDZ0T31u+AxcEc/CaYEgTC7NdhQalBgpI9vc7g/hzT79yYPkHjVgP1TsTS9PfSb+vFUOxst+n68xY54D6ZrGRQ+1BDC/RDvttcP969l5mFfcJXOLeumZ1qg/ylf3xeP/neii20oPXiUE/kwz1LT/EwfNv/fZv8wu/+IscOXz4hz7y//f/9s/47d/6Laqbm9/TecsLktuHLZq+ZL1iYytN4AoaOUEndvBiiGYiGgXJtQdt1icksQXjXcXjGyFTjYQza4pcqHlkI+TVhxzWpyy8SLNTstgpW6wcEky1FXmlCXOY7PC8RceThJs5lIRCV7FbtqgVJcvzDh9fC7lQjdDAnVmH2BHYMTiJphgqfn4z5EwjxlGaekngJppLxxxiC+q+pDojWNyGqUCxMe5w+azLTkVw7aSHtiR/92ZMpaM4WU+4M2uTjxT1psFe3ZhzmGkmHN6L+fHlkFrJYm3KZnNRsrAXM9NMaPmSRMDpvYjJvT1iR3DtlIeyJB+91aU5kWd7XHLphIcX6H4HcnvJ5ehOTDlQ7JQtvn3e42dWAppF2R9o1nOCOJbENnR8wUMrMUJBzov42Y2EheuSQ7sxlZZicS+m1NVUAkXHFbR8SbMsqI9J1mcsEk/wK1e6PLwRooCzqx3cUNHMScb2qhyq3gYdpQExwVAn2sAW3t7r8PAMPLgESxPw/mPw0BHI2UALhEVr/AJ4ZdjtwN0N2KnDsRl44Ahs1cFzMBnYHWAbxUN47ZsIWvidgFy7iwza7CyWcWJNsVbFUjVmGgpLQ+zYaASlVoS2baZbCru1gSBB46dl7mIweDm0nINmbIgftTZcOIWgBvNH4L0PQHkhLU+IZpqNYz7bZ5eI7U+gWUBbD+LWriF0m92Jh0gYR6NYaCk6ecnOlAXS4kQ14OJahBNrCh1NIVD9QW6lrfiPrnSZaCsq7YS9ssXWuMGJXTrmcnfB5sn1kEbeYBHdGOO3JUOMSSxBddLCUpqOL9mpWJTbimZOcPlhi8TqDVQc/OYWMuwS5FyssM4Hbq3ihzHVGUPQ2B2zsSTU8wIsj7Zv7rk+abNXMCFvopWgJLiJYrLWpJUX1IqSN066xI6LoE0rJ5BhFy9UbI1bBK5AxiF2Z5tKO+bk2ha1osQLE7Rlm3Aq3DTStrDYRoY74E2imULJYyjGuPbEIp3KIZJCkYlGhEaTa9YRJCTFceygy9a0w8mtiFys8WKNpeCRlZBioHh8PWKyrZjuKN581KPjSwJXENkWuW5I1xO0cpKbiw5taRmOr4TdssSKA7qVCiJRKMcFrQkcKO0J2iVYnraJpGB9wuLWrERq6HqC8a6ikRc8vh7ixZpc1zDqI0tw5rbm7GZEZy6h3FZMtBU2muV5m+VZm8mmInQEUUGRSEPc8WLN42sRgWuYwZENbqSw4ojIFjTzEploOo6k6wo2Ji3G25pWXlIvScY6inJgCACnNmJiW9D2JVsTNqerEQLNZCthvKOojltsjdk82krIhZqprmGt3zruMl9XIAS3u2N4sWK6mXBxJcKNYboB842EnZLEj4y/Xz7isjkpaRQkSsDdY5LJlmaulbBUVUy0E+brCX6kObOtaLiCim1oSk4CH9yIKMaKRh4WGwpLaS4fdnlyM8aNwc0l7I0pznYVuUjzn6yEtMoWa4dzLO4lPHQ34lgzYX3GYqaesHLEJdeocXElYKdgMdmImdsNKXQScu0OC3sxU62E9y4bu3VcgRtrnFgTOpBPKQoLtZhKW7NVsTi7HfPB1YCxtjKDdk9RK0hiWzBfTzi9FfG+1YhjuxH5ULNdlJzYjmkWJIkwBBUv0hy+LcFSzDcSplcsWj5cPeoSS8PTbRc0gSM4thOzV5S4kWZqZ4dD2wFb4xYze032jh5F2zkEXZRlI9lKB411RNCk0LhNsb2O1cfGGfKCEg8Q5AvMb15F0sRQFspAE4tbwA5u+0XgUNpebXNdrwKnjoBjAZPgFIAZSvXLwBy5jW+msXQPeAzNWQP0fuIBKPiwpdO4HwK7oAxlLLf5gsGquQ5stEyf4+7BB4/BtgRfwFgOpitwfAkKwsTzcwsg2uaeH3rc9JfFPGx34PEL2KfrqT7aGNJGAkxhxXsIGmCBThKc+i1mtraxWpuU9u6ipURqxdxum0ZOMl5r8tSNDYqtFot7MfVxj3bJYbwZUegkLJ9wqRcsXjzjceuoT2LZeJFiogo7RYkGdisWdqyZaCrygWKqkXB6M+bhlYDZvQ5WrIkswd+73GZyZweLKtBB6iq5nVewuQU0sLkKnADppvb0QXaxg7dxeDOtowtMwFQx7b8DiM4Y/dY66fe30bhoFjC4uii12zIwjWYG8BBejnwrRpHDCgI6jqDUjJjcbXBku8sTlxN2xi1EnND1TPvpePIHO3heXVvjf/nd3+Uz//yfvyvT5qVSic9+9rP8h5/6pe/pvJeXBC8cs9gqWlyedrATaPqS7aLDeuhRCDTBQsROQfJXD9q8NekQSMFUM+HhK10WNxNOv5FQ6CguXAv4o3MuV6Yd/EBzd8xmpWLz6hHDpnVjRasA+VizVbRo5ATdW0UiG8bairsVm+qYzauLLo9c6vLwNTM//MqiS+hoE3QjzVhb89iNgMXNGCeGjTFwYs0XjnlEUrCdk1yelyytCGaaiqtTNl85kWNl3OHZoz6xLXnym12mGwlHNiNeXfQodjVre0WUEPzVksfcTsyxlYgfezlgbdzi8ozLrUOCo6uG4bibl2gLjt4NmLmxRmTBX57wUbbFE8822BsrcXfC5ssnchRTbqiS8PKSy4nbEaWmYm3c5nfO5Hjvq222S9YQwiaMJYElqecsLrxuEHpFN+Ti5YilpyXH7sZM7SYcWYmoNBLGGgktz6CfdsuCzYrkyqxL6Ar+1leaXHy7gwYuvFIj143ZyVtM3LrO0ddfBhWgsNIGlU8Dzw1s/UU4Og/nF+HoBJw6DKeXwLOBbbAcVhdOwdEyLNfhW5dhpQpLs4axvLaVGTzvAbdIOEeu/U0EO5RqHUq7TazuHsszU7gdTWX1OpbeZL6aYMeCrmuTCKjsBWjbZn47xmlfT8taSH+3gVkgT5zPg+7CRBFu1ODscZCrcHEJHjoG71/MDObnuH6syO3jR9ib/nFgiaB8DK/7DJIaKycukshJ/j/23jzKsqM88PxF3PVtua+Vte+SSltpR0hsAhpE26YxGLe3MWOw2/acmWmvM90zGNzuOaenB9zGeIw93W6fobEBGzBgJCEkBAKtSCpRpSrVvmYtmZV7vu3eGxHzR8RbMitrE8KIOe87553Keu/euLF+EfeL+H4fZKyezljIS04M+hjPZ+v+MjfvqRKlhtKCpquqm1at/jnF2x4qMzCTMTCtOdPjccwxYR9eF7N7VcDOl2tMlDykgVyiKfsw0SVJPUEq4dhggJ8ZZvMeJ3s9+hYUswXJwzt8G7DEAITkJo8jaotUChFeNskdX99HvppyZDika0Ez3usjhOF8SaDDPLM5iUZwcDDkbMm3hKPpDCUgTjVD49PM5SXnuzy+tCkmi2IEC8wWBLJaJqoZjg0EzAcCL6nizx2jfyZh67PHOVfyiSop+IFV9EEDyTmFxwkk4+hoEBihntuE8vp4ass6FgbXk+a7GThXBTSFc1MIUrJCP3GlwqmBmO1HEvJ1Ta5mUY83v1ihq6y5ZV+VkemM/rmMr16TY85h4pJAUpivUo0EM3nJ02M55j2LjMw8wXhPgF+rsNg9AFphghgB1CJJ1xmY6IJdqwISDw4Mhjy1ysdTUI4FfYuK6aLHrS9ViRNDftGgpEWwXfus4ZqjdRbHMnrnNQNzGR6aF8YCvj8SMHI+oxIKFouaxJPM5z1yieGWfTUqscRXhiSwWEIvqVEPBNMl+yJVCSXlWHKkP6B/NmOmIJnstn2jZ9Ey5K85kFoMWyw5NuBz3aEaAsHQdEbfnOJwf8CJPp/1M4qCm9QzCU9tiCxKUcJT5V6iVLNqUnHDixlxahg6D2PTKSd6fOLE0LWoeXB9zNGhgPNdHlrC99dL+qcUq2dSNh42DMxnrJ1IydU1G08oZmJJr5eQSyBMDTsP1CgkmomSYK3T5V9YF3HD8bp9SSpozvbDmkVNvm64a3eN6a6Ag8NF1o2n3PBiwthUxsvDIasmM/asDilOTHDzrgrHunxGJxLWnCjTPa8pzcyx7lSdkRnFLS9WydcN5ViSqxvi1KLnijVDlGrWnczon1Mc6vO45mSd2/ZULWpMg8krJrp8aj6sPpey43CNnXvqbDqZUKwoxrsDNhxLmC5Z40r/tDUsrd8lMaFFiY7u9pmLJY9tiEl9G4BmpmSoRJLN43Umuy0Cc/TQOOsPznOg4DN6co6zY5vQYcnqZz9GcNRZfCfw0hkKp79Hafag+77oFsF50ngrta4Sa/Y9jmTK6fguLOv328BxJP8Zy+IXWEydspjRm9fCgLGL55ECMEzp/DeAETzzcbcwO4rlBW+FAQ23bIBSHs4aLDO6Ahxz6RoC82ewfhDCGNSUzeuak3ahntQhL6G3YLGnG9dB7MOqAmwZhXDGLhBvvRYWU+guwoEF2LkVNp90i+fzWLxqnYx+JGeRTKECBVFCNPc9Vu89QZAco+fUHpABUmnWHl1gJi8ZPDHF6x88Qu/kNOvOZJzviZkrRgxNpJQWFC+siZksefz59jzPrM2j/IB8Yhg8KjjdY4PWjfd6hKlhaEZRqhpGJxXXHalzy/NVVp+YI0w1aSB4x9cXGD5+2rXZPJLDxHOfB57Eoli/Bqxx82hg2y4/j+CLwOfd4jmy7dPX5do2AbZAPoDKrPv9addWI8DzzuB0AngOGHTzZwRBnvx0hqFItFimHEt6ztcYPH6eDccq3PWA4mSvj5coKpF9kV7Mi1dv8ZxlGW++7z4eefhhpPwni/rNu+6/n0xlfOFLX7qqsyrSXLiVuBIiSC7fbhRtW3/uE7Tf566XZmW810pbO63rxZLoau0Xtkd+a//FM0ufbcRSCmU7Vs9Ilm7LsDR+UaM8xgVfapZdLIXHGM8Z2dzzAbTfSs9vy3vz+ct2sI0UKxN0Gn+3/y7dC2hjC0iIJVstS7fa3TaM39rad5hQuyHUxJBdDG7mtUX6W/ncTHPb3UUobG6DXbAt12iFViSrZju2xzYSbVi8i1XKEoCPuPw+/JL94vat0sZxiuX7+nJJnmjbnm+1g2j2oQueLsAEbf2z7VSL36gFccHJh4v3gbb+6S054mRW3rpt617CrLzfL8zKB3bat+bDiwxSYVbQEpdFfYkV8yHN0m3qlbck29p9WZCxpg66zNahZIXgmJfJrVzWbst3QM0KWKtGlNYlgTldnXlt9y7vzkZcWFTao7Euh+21j/d2lJlctnUsxLLxujIhTbalI9v7sVx2NK6tw3rLWkq2ZbW9XZr9qr31l5XZtOtEIVY+pbKszEaKVjTI5hazHZfNIzKNgdUWna9xH8vHYbs+FK15wCyP0NimDExzcmmNRiNWHlftHUuwMpKvOR+5/BpPOH0hVgBdXhhh1aw0dppHtuRFdHKjh5sL7zNm6VmbJT3EW/mIxEpHMpbkybvI/HCRszMrpWOWL0aWD8hLITLFshmgNXYabWD7iGibisxSfW6sLpZLjr5cHDPYjNwsxZK+qAOxLMJi+yzBxSeYC3rQ5Vd6rXi03tK2WDLDNarRLEX5SfsxwQqa3LyKxzb+949+lLe86U3c+kOga1xOvvaVr/DBX/kVjDGdU+wd6UhHOtKRjnSkIx35ocmrsnh+ef9+/ubTn+bjH/vYj6QQpVKJ3/qt3+KDv/qrnRbtSEc60pGOdKQjHenIa3vx/La3vpWHHnroVQnB/Url93/v9/jCF7/EocOHO63akY50pCMd6UhHOtKR1+bi+T9+7GO85e1vZ+uWLT/agkjJ0UMHuefee0nT9OoTaJx1YulJ0DaYySVvbf59sbNr7j/iIkktwYK1nfdsx321X9jEynHh8dzmuc+LhfM1V1ctF/yn7UCeXnYm6oLIysuPqLYddTIXyfuSuhfLkXoXIsTMJcKPL03/UufCLsQTcZFjQOaiR8tWYhOaq6pcoS9s65XTNFwMq7Qk31q33Wc5Q+YSdbHSMTODuOAM4AVnX00bBlBcWEeapZjAJbAgsQxntqxTXFF31UuxY43Sts6Zu7G5pBju7HnzzKlwOMfWAc/lEKzG981z95ds5hW+1OYi+sQ0873iTyuMFXGReljpu3aEpjAWGSaU+02vPLxTVkBKLcuJMBfJUOMM60rjxlyIQWSFyL7CrKA3V0DFYS6t2oxYOfdmGd2sRZw0Lezmknwv6wVmBceZtuq6EAe6FOclVpwHzIonXpeC1MSKza3FMmrmytr/AlXSng+9bIy2N+2lMHcXe9qlhoVpq0cjliI5m1jStu/MEj22ogJe8qe5pK5fhu+8aHVdTvushKC7VB7NygP6Cpqr1UBmucK7KhGNPt7eh8Slx2NDp6diRZjp0sxf0DdMs//rFdvgUqHRs7axZy6uHC9y9tys+Lxs6X2Xcx2izZfg1Vo8z87O8gcf/jCf+uQnXxNvAt3d3fzmb/4m/+O//teXvM6TUEwgUhYBVYsF830epjcjTASjNU3Pgp1Uu2sQeoJ65FGPbXVVI4tTy3yBrwyryzad0GGlGhxloaErsbzlUBl0aFjMSypF6/3e4NEq5+yR+vYz3ytJfcvSrMWCmRGfxS67UFVFTSnRjJYNsTLcWNVIA8M1+28h1QzUNCY0aB9SzxIvqg7RVY0F5W6oB9bJJJcZ4szQmxqkgSS0B/vjzHblrgokvu3X1dh60mgpHUdaMVZWCG2odHnMDHss9DRdaCgXJNWcoJgYtARPG3prml+YS62jpUPw1SJBpUswWDeUEk1v3SKtAmUole1ojTKD9mChR1LNQz2UFFJDIdF01zVKCALVYo5Kbcs1WFVIlaGlIKcMGEvJFQpM07E1hdGiGw51iELI50ApSFKo1kHp5vgupWWLJYo8SFMo16GewGLFlj1zPM5c2LZs1G45Al59ATBkHhhhEEY3h+LsqMdil6RaECSxREVRm4rrx9AN9GA9hUeBEWQ9BXzINFw3DN0lEDnoKsBAL+QiYAAYRTHE2FxKd2WWMJkBMvzyLNY7vEKhNg/CQ2Cv6atU6K9WqBcChEqQRlPLC7oyTTExxInCVwblCYK0Rqm+QJgs0lfLLAIyFhSVYbiqKaWG0YpCScFct09PYvAM5DLNmpqmXBBUc5KFPkm5JEl9QTUWFKvWuUtLy5bVYYTxPPy0DiiE1syu8hmuKIvpFlBKNatrGrKUQmroq2UMVRWzw5JAGeI0IU4rREmVak9Ed3WRruoib15I0H6AoE6puojxfKo5STHVKA+82hweZaLqNDKzdbMw5JMWIgw+6LqbBLpIGQFC/GSRBl977tYe+moWjya0JkoTECFJV4xB4iVlaqWALLCIuHJRUMtZZx5P2/Hpa/A0VHOS/ort+xLrHjM/GlIPBKkvSEIwvmF+SDI35JHLNEZKapFgfjTm/PaY2WGf2SHJYpfH6qom8QWFzJBLDXcuKAqJJp8a8qkh84RDSUoS37KF67Edn8qD2I1VLaA6osmlmlVVTT20TmDddUEtFCx2Q3fNYg7LeYtAmxv0iNIM7QnKBZv+Yl+DOQy1UFItSXKZoRYIlGcd++YGfDLPXhdoQz61ZBIloR6CZ2zf8xSkke1Hq2qaUBnWLirqkZ0QMw8yCw2glpMsFCWRgkpOsqqi8Y2hXJD0poZMgqegkpcII/G1pYUYt9IOMjseFro8+hJt/Y6MoZYTVHOCfGoYqtpyGGBNoi2TWoKfCYaqEDhLRDlv22u210MaQy61eLtQWX29pmJ1TZgm3LKQUstZB+Zqzs4XfpZSSDVhZvCVnW9yqUZJWChJyrGgXJRoDwqJYU1FESpNNS8sBScQDJcNtVAwO+yRBBYrqaWd7/KpobeuyWWGas62Zdn96xlDf9U+FwOJL+mrWyysAAqJoBoKqrGklGhySYbM6kidsbGikEmNQnUBr271k1+eRrEG2OD0ZeJ0aoR1Aqu2FjZJjXx1Flh01zhncEL3iVwaDWqRW0RnjunvSZtePufuz9rSb7xOzNvPYME6l6mGOWsYWNO2GMzZ50kBIgC22TSDoG09p62ho/EyaFxeMuXymUIUwVgPFAvwxvUWWRc4xB4592+ApNKcM6SpItLEItt8H4MkDbow0rd9zpeMlSt2DjIaFVp9V49cf1YJnqoxUq7Sk6T81HxKKdEIA/WiwFfga9sXe+p2jpwd9CgmdnFfqmcgBFnkk8SCgWqK1AYjAgwjWFLGKle3jbb0gRp0SeAOW67RAogilpgiXRumkM/DcAkKRdiuIYohbeBcG+2buuuHXL3cDpQwhGQMUCt44AuMjMFoqjlBGkp0EFhXRmUYrWQIlRJmmkKiiZV55YvnNE25+dZb+eZjj/1Ij2ssl9//3d/lwQce4PCljm8EhlWnDEPzGaVEc2jU469viMlumGNo3OP1L1W47iFJmBmu2a9JYsPegYBDI3ZBfWww4NBIwFRR0l3RvG6XIp9o+hY0QwsZqQ++EuQSw7YTCaLm0bugqPRoHl0fsWe9YLroEaRQ9yUVX+Bpw0zRY6ro8fC2PIdKHnO+x9GhgP98W4lvbQupRJLF1SlbTybctavO8EzGW5636Kg791chkaw9n7HzcJW0lLHQZZjp8qj7gr1jIVoK9oyEfPsaj8M9kpovWD2dsep8xnVTGUFmONXrUfclq86nhMpwzS7FRI9Fp+0ZDfGVoZzzEEqRm5/nDU+UCWtV9qyN+eubi3xjUx7j2wlmz+qQY0MBG8YTkkAQ1ww37avx6/91FiWhd16x8WzC7qGI726HG0/U2Xoi4cYDNSqRoHdRs+1JiWcMI1OKSih4fEvM/lURZ3s81k5kbBpPufZYnXnPozhnX2JSIK4bVp+uc+ezZYKZCZJAMno+RbKIJEHMCNLh0A2wabh5HfRp4GUY7IN1q+DAOTh+Gr6zD2YXrfKNPUYO/SNsGIS+CI6dhhdehsPj8OhTVqGfX7BKdFM3UEGg3CCeQhhFvPAEUGfWsVNlVsMQojF87uYiT2+O2L02YnywQLl32DlFV6jlbyMT1wGbgBtIuRvF7fjp40AR5hbg3ltg+2YYHIbN62HnjTAyiJbXAjtZGHsdb/3YBNd886t0Tf03BNMEySeARxA8wbrd30F7RSQTbP/uF7n9wee46Rt7GR/qJpw+T75S49DqgA1nU7YeT1h9qkqpbNnMxfHDbH/6uwwefJpbX5ynq6I5OOIzNqd43QtVtp+sc8+uCpVQ8FfX5NlyOiFX1YxMKV73UpXvro3Ztyrg4R0hj2wNmSp6HBoJWHfAs/0u8sALSIqrSKMivUcOIyjjUeHLN/Twxu+UQUAtkGw4o7jtpQS/Ms3mExk3v7DA63ZV+chtMd2LhtUHTjK870VGv/8yB9eNcP13nuKap57nA385Tr1UQnKO7c88SRYXOTTss+VEwnwkiee/guRJ+o//v4Ts4o5HXuKxa0tMDI+iRR9BdgwIMWxj/Lq7MfSQn3wOUJjM8I9vuYmb99Ts4r+aMHT4DFlxgLOrLU4wd+YFjo71MdHlcXzQ56XVEQdGQxYDQZDB6V6fYkWTqxv2j3rc8v06lViQZeBryUM7+hjv8zlX8jnXJ8mKmoevzfHlHTlWnc8wYZ7xPo8nto/xsXvG+LubCnzuxhxf3Zrj9bvqPF/wWDelWD2d8jNfK7P+TMqasylrzmbM5gVzBY/DwwEnewIWI8mxgQDPQDmSrPmej2cMtUBy5I4ao9OaO/bUGO/zqUSCLUfhaK/HU9fDtcfqSG3YNRZxvuTx8PV5Ro6fpxJHPLc64lyXx2M7JeXQLrAODPg8tzFgzdmU8V6fxViSSfjMTXkWchZ32FXRrJ5UdJc15Vhwui+gUDXctbtCbznj3CBEqeHuPVX65xVve3SRYwMBSggOFgQLPdbgcWA45OubcgwsKL4/GnD3ixXimuGl1SEbpjImI4+eBcW+0ZCZhZB83XCm16ceCDwDPQuWKf/wloAdpxJyQhEqODASsG8kYu1Exs2HEw72BygPbjmdshhLqqHETIfcuFeRExnSCJ7cYNF4n722gJca1pzOyNcN/QuKODG85aEF8CQDx07yi38/y+mBCOWHHBgNUcLQMzBQPWEAACAASURBVDnFxlMp/XOKUkWT+TB2VlELJY9tyvHgcMj310RUQ8Hmkwl37a7RP6fZOxoxU/KYLElu/17G/qGA/+f2mIluGx+hEkkG5jLWn8644WiNVZMZ+0YDpgsez45FzBQkUQo370/pLmukMkwUJTuO1Rnv90EZ1p6BvUMB+0dCth1LWHVyDt8cx1+Y4Q3fWyRcPMb65x8lVF8EniY3/ynObn4LGW8GPHwmgXPUe/vcoupFGuQGX51i7KW/Q/AN4LRbkBWAfnvtcLddePEosAPLzMogq1hDSS4EnoItw3DvOKybxyLOGsGlAuBvEfwtbFkNvm8NKd55YAeGndZokdOwYwQ294PvwWg3hnttnlZ3O2tmBc6nUE2gVreL6FTDRAVmFiEXA7MwPAA3X2NRdu+6x/7b2+0W8f3uZaILj2ebc05Q30dYG0cxiMmVgJjZVdtJCiXqoSSJIt76xd3IrIJMqyz29HNoxOdUv48SAn9xgvz4Ee758l5ufGGWD31ujk1nEqJ6xqn1PqWyplgz5OuaG/fVUJ7gCzcW2XI0RSDYfGgRLWC2p4uzvQGve2qGIKmjij1USq8HVlHvvtm13XnXjnngEHSHHLj1Z2x9b18Nm8bcS0lo6987befpt47APWvhLVUYHLQYZxahO0DwOGLwKGztwaIFNae2/SyGTRgGqI7cxvGRAkoGpKVVeCph/3DIRF9MvWsIaQTd8xlveHyOsDrNwEzGlhMZQ9PqlS+e/+2HP8w73/lObvsR0DUuaVX2PL70xS/yjvvvv4Qd3rQwKlhrRKhBOLaQkWAcp6mB5loecUu0RUEzXmuLcgkGRly4vR2Ypfi75QHqBAZHgms+J9TG4t9MWyRCz72gSpcH2bZd7TBvwixD0bmy+m2YlWYEoGXYt+ZWt2zDjl1wXEKgXdltugbfmAu3KZsoImc9jETbsy3+yW+i+uw1jbrV3tLtF+nqT7blXa+Ez2psNXnYN/6LbkG77/SyKETG/V9KCOSysyZh+16OXVSLNozOJdF10ELlsOR4QHv/kKYdJ9fa8m8/AGF7m3aKvK0c2lgrSBO51PYxGl1sWGEakMWc+7vR80xzEtJCYjzZQhaKRrRA28+MaN/OsnVgkBivhQkz7lr7sVdGpg2fJez3nmtfz7QQjCsi7jBL6tlgr9f+UvyfkY0jFnY8Gw+6THv0Nduu0mDzLCQ632KuadeeYkm0UL+t7iRayGVtJZp5bB6VoMmIxDOmWQe2itvxh+7atuiM0lx4bKtdbxmPVuAz0aq39o/nxnx715cYQvd949+GDmnoLd3APbadXRGmTTe16RWBwXitTfYGqtPItu1N6fSPFm1lMC28pRStMrvv2pGhnjFNXdI+XsQyXbZktDl90l587QqgvdZ3S/CaTpc16lM7/bEEP+qwbrJBmTRto7hN17dj70TbEQUtbb+74KifMEsiL3quzsNlURWbui9sHRnTUQtB2p62aUOHtR9D8YwNHSJd3o1szGW2fzQjNjp9HJmluMfm3CFbc0CjvzWvkcvUqrPCt/52USql0wcuAp72GrA0r23Mhe75SyPOtbTAchCe7z4roD1NW8jJlc4MNcdie8fylq82rKXULD+n0JbHFY4ZNQF/7Ri8lU7RNZCD7chR7XR600q90gGuFYGAy44btWXZk00F0hzf7ZhaKTC+xPgCFbb63wXrAbfu8F0ft2m36UMczrRxiLIZSbGBkms/zGf1oLzoMQ2WhrBsnpRZfizGby2WmsdH2qLuGr0UCOzG/BJcoQATitYarx3reLVy+MgRPv/Zz/KffkR0jcvJDddfz+23385/+sQnVvy9Wq10Trt3pCMd6UhHOtKRjnTkquUVLZ7vf9e7+NznP4/v+6/Zgn3qz/+cf/9Hf0SWZSsZnjvSkY50pCMd6UhHOtKRH/7i+U8/+UluueUWbt258zVdsEI+zyc++UlueI3nsyMd6UhHOtKRjnSkI/8/XTxPT0/zkY98hL/41Kd+LAr3vve8h7Vr1vAnf/qnV3yPucz/r/jG5d+Zq0yv7b6V0GjiMs++gDjTdubHXGH2L5rZy3CmLoAtmass/EWoNBdDJYl/sh51MVzORSpMXBrcZK6+ly3LByvU86X4RyvhfMzl87BimO/LdHQu3WfNK67/y/9yqf6QXFWqr0Z/uZKmXNoGl8q/WK5LVhpa5hJ/r+C/YK6sd62oy8wlimlW0mVXoX9WxNxdbcOZH6zlzBWme9H8X6J9Loq8bDvVe0XZN+aKC7P8Sv0qVJdZWetfsiLFZevUvIIxZq6wB+t/kpnin07MlY2kiwxWA86f5iJzijGXn4na3WqW9cuLr1XEVZbjKvv9RdMRl73jQjTmhQP+qhbP115/PV/6h3+gkM//2LwdPPiP/8gffvQPOTcx0fwuziymzghB4kOgoSc1ZKmklLUqKfUFOYdpCoz17ZEGPIcJSiNQnkBqQxo6BwgMqS+IMkPmfBUyD9LAYl2ktJi0JBKkoVWSgbZ5yXxB5tILnHJNQ0FvYhFuoTKEaeN7SRoI0jbEUjF1TjVAIYU4M875BWpFQRIIfAP51NB+TN7Thu5EWeRSJEgi69yVupjuaSgs0smVpZaX6ChEh/4SPm/jyP9gYpF9SKgULO4oDQS1vGjyPOs9kPnW3U0JiJS0KC0BaSSo9EmUtBgcAySxoFq0jhVJCLWcJZQYYetWO1+DalFSz9kypIGwCB7pU+73nBONJPMLVMd8hGpzEmw4Y4D1mq7V7f+zDOoNB4NBB7/taqHrGu+gSQaUbFqJQ9UlaplCj9zEKBEY8llGGgm0F6PxSQOL2PMMlBJFoJTFIaYpBh8dBO59dwwYQgceS7wl4hCC0CL2ukPrVJIkoDKE8YEA7UnKwz4tSni3+zsGDFrkQci2CcZg/IAwS9ChjzCGONNkzplM+5I0FMRp5hSnRSYJrZHGkM9aDlAG0J5tm7FEE2aaTNoxECvrGNtQvgOJpp6T+MrQm2hU0HIiwtWfDj2LYBKRxW95tn/WQ0GgjeOOa7IIMIpAK26raHJZitAKUBjPI85qQOacLwVeVrflDnx0IIiVwddQSKpYL6MY6zDog5TkM4PnHE5VGNvvgTT2ySg5pyeDIKVYz5pOgGkhpN4VoqLAOt8So0WOWq7l+JiE1ulK+5D5kPZrktg69hQzQz2yjlrdqcWzKQmqaMd05gmSQFrcmyeoF4RzyoJK0UMFGoO9d6xu0VIjGcSpJp8ZpDEOn2kRdb6GSpekkpfEymHhjKHSK6nlJJknqOXtuM20T5xZpJpyTntZYJCBBtVyxPa17fO51KB8ifZs2cNMU6jb++LMUEp1U0cGyo6pemzrPg2sPtDCIkFVQ/96Vs8iBLGCrrodK5kvUNLi/ULXdgVt9XrDOS52vPV8ZlBewynZECmLwKznbD48afub59jQusHDDSCfgZKCOLW/J845NAkFkTL0JBY7F2V2rKSeIDIQK0NfYoiUpiuxeVZtzpDK1V0aWl1ZL4WYILD17kMW2DYwnofxPBYHLc6vXrQ4Ty1te4bKsDrRGA9qeVveUmLzk0tduaVAYuuoK4V6TlLL2b6UhrY9ig5jWMisPo4cui8JLe4ObJ66E0OgDbVYUg8kRto2CpTB1wbt21lEeYHVL7LhoBc0dZGfJU5vNXSY7xx/QyylwQAli2OjgflUbXoyD6y3NIusoeOy1pJJGYsnzbrtc1N37DNqOAm3O207l1mlLKq0XoPIp4m20xoS95zUt/NBVQFWv1DP7H04B8BU2U/m8lPXdl5JDVCw1+q2xWySWv1OPy00WwwU22ZjgSZPdThPZdAnzRWsbtEKT1k9LZSmNhKj4oha0fYdzzGZVRiQliI3JzialW8591Fq5+eeVNu2Du06o7duMBLKfRItbD+RDq9ZL0rnICucDs6BbsTlGGrNO7kAjCCql10TKRezoFH3Cvrztq6UgnradD5vOh5WDXi+rb+sgYr1CZKKa0uFER5BmiFUHYxBhR6RMiwO+CyuktQKknpOksQSIyWe1hgMaSDw/uAP/uAPrmQR+tu/+7ts2LiJX/+1H78Q2Ndddx2/9du/wy/94i9y9uxZph4osnUSJrs9nhkNufdAnQ0TiuO1EvccrFJIrCJ8fkPMG16y6J6pPknYn3LjboWPZZweXR1SCSVBanh2XcwNJ+vMliTPrYnZNG6YzUsG5xWPbswxU/TYclJxeq3Bn/M51eNTj6wv6Y6TCftGA073eChfkkvsRGI8wUSPx9u+V2b1hGZwQdNzWpKEgm/eGIOWVELJySGPoTlN/4Jh1YyiHkpGzmrWnlHkUoMWgr+8MyY0kq6apm9a8O2hgBunMnIp3HKozrZDCVEG/+HuErODHqEyPLIm5K0Hajy1OWb3qpDzRY/+iubF1RHbpovM9hapxpKh8wmnhnMcHQ4wwD97vsL6ScWZHp9P3V4kRlD3BV+9Ls+tx+tIDQ+9D87U8hQzzf5Rn9q85D3PV0gC2LMu5j++Lea645rBsuZst8/RwYBn1sUUUsPLoyEvjUXcdqhONZTsXR0wl/dZP5Hxkbd0c6LoUdQgpGT09HmSQg8f/PlR3nC4xtjR84xfezufvbefex//Oo5rA71FODJhB1hm4IuPQk3DVB0mTzt7xS8j1Atk3IEcOg8qhfGzkOuFU2eA24AJmJuwSmxqEsihuRPJHjQ7SUtb8BZnEJQZO+bx2H1jrJ3sIlxUPLlzNbVIMjaZ8c8eOkvf2TPs2zbE9u+9jDEBM+tvIDc1RWXsnzM3vJMkLyjOHQOO2OfedwvcsRN273MYhQDOTcDBo4gjq1GMcnLL9fzX+0d54+OPAI9Szf8qQfoAsAWoMLf6g0iREFVfBEoYEVIf2MzIwSdYGN1CYbbC2OEZvn99P4Waphb5PLEt4q3fPIpfmaW++ibi+e+hxSjTA130zGsODQdcN54igCPDAQ/tyPOBR+cZmVfsWx0yNp2xcTzlxFDI6EzGM2ty/Oxji3z9xjw7jyS8/sUqezaGaE+w4cgkWWmAcHGW+dFhglpMfXALudRjviCZLnj81V1Fbj1aZ3hSMXbsIHvvXMXIgVP0Tye881uSDfteJqqdQFIj6d3A2IFv4nEMQ45a/yZ6jz2O5DxzfTtZ7O5mbFrQN1tn6+5PIzhHxs0Y+tB0kXWvoX8hR+98ircww/jQRnoXJwHJM298I563keLcAkF2Fk+X2fBCLzLOEWTw8qYBDq4bICdKlGOfrqREeXQz/+UNRTZOZsSp4btb82BgquQjjGDv++c5PdvF5rMZ+Yrh6W057t5bZef+OrNdkqe25pi/u0LXIZ/PrY8YUgJjYN9gyNSQ4OYDmskBn995aw/hcJ2BCclbnqtw764qCyVJdwa3H65xzbEEAZwaDDjT63FgOGTVnOL331jkH9ZF/NKLFe58uU4iBR99d56jPRF5I9g7HDAyr/nbkT7+5dMLbDiTcWooYLw34LlrBAP9NarjeXacTSlWNUkkWHs+Y92phEDEzHYFHBoL2HmkTjTns39Mct/uOqOzGS8PREQG1py3L6d/fEuRn9xX5cQqyZ/ekOO2Uxm3769zfNhny8mUFzdZzNvgnKZvUbH6tCJScGA0oJjAwJwiVHahN9UdcP3xlFjBRLfHwKIinxjWT2Ys5CT5usXPjU3Z71/cnuO2vVUe3hRzpjvgmrMpYWaY7PLZOJHx7MaYwTmLsNtwENZPZnzp+jzddcO+0ZDXHahx47E6/fMa38BiTvLs6pDBsubel6vcsSthaD5jzWRKLRQ8sDnmXfuqxAk8dlOewTnFkeGAmaLP9GAf/bUiZ/s8Do6GIODYcMjWs5JanOd1HxzinUfqfPXNEXc9nxBnhj3rQuLMcPPphBMDkqm8x41HUq7ZM83IVIqPx+51EbN5ydi0YrLkcduBlM9vy/HyqohvbAlYO6cREu74fp2960NuOFznyFhAKcg42B1xvM9ntuSx6WzKUxtj3vPkIt0Vw797c4GHB0KuI+GJ3pD37qqw+UzKYimid/wMuzbfyKk1EetOzONl8xgiJN8HeinMlZFMYFn3dbtQzt+CrGZI5oHjaN7HC3e/l8HJBbzsaSzKrQRUMNzKzMgvkDv/GbcIzbAou6pdWJc17D8Esz8HnIHsHPR3wUCXXRhPnnXPbTCjuyBO4Rv7Ye9u2LYOJp9DMAvqHJgMZjTMboJTB2B+HDho55mT5+DFc/bZdQXnKnB2EQZzsPeQjRswPmEjk9x/M+w+AL0liCO7WHx6F3z+EeCngF3uxeBukuh2UBpJGZDMDL+Dv7/vRr67fYCiv4rRyQS/tkBpaha/XieoneIz77uHvnofX769wFzBY+PpFCNA5bo5sXaYwZnEchz9HM9vjlk/kdE9qemuaW48UOO5LTGVSPLojjw/8d1FklDwJ/d1c9vRjL3rIzaOZ/iZ4TvXdrFqVhCV5wkXj2IYwksOI9hPyq/i8YRtrzs3wXSV7oOftfNzBUjqsDjlXhSm4F13wNwijE/C4y/CLddCTwn27oLyPOgcXNMD56ZgqgIcBVZRmN1DEr2ZQB1gbuSNjB08Qr7yTYwc4uTmTeTqmg/8iz4euy6iq+ZzpsdnpitgYNGje3aexa48z2yNr9zy/N8+/Wn+7z/9xI/l2ZT73/lOypUyDzz4IP/4wANNwH4DbmOEhfwL96bdhEZpZ/WRSyPdNawLUjvsjhDO8mTtSx7WgmEJPtbC4RvTxPbYt3NrJW5s3zURP2bpHqenLTJKey10mzDOYmOsdU3qlkWiQVzSXhtCT0CPdggWh/AKlm1TKBccRTgsnjCQMy3LdKBteTxXFmHMEuRNOw5J+a18FrWxoCEDOd3CHNlAD63IQ0bY+mnUQ49ygTHcHb4xRA6TZPPT2n/2TAt/VDAGqe31shE5zkCPbuVVGk3URPy0h2F0b62B5xRRA1PnkGXNmHV6hV1A2Wbd8NpORbWs22LZ4QKT85DaBUkxBq8NE6YKAu1L+2RPtqHPDMIoi9tZvgXW2GLzpH1LlwI8z+L22kCKsW6h6FoWdJeY1m0IvtaxAo1nEY9SYCLZisBmnF3Vky4SoQv44vpdo01p26EIjbGW5DbckfZa7B+BQbtdEgTooA3v6LY6LLKsheMzXguvFLr7GpAk4SxEWgpUn0CH7ehBg6Fh0RcO5eU38yFcFE0jhbNkSdf+bbFHG0g3DLL5m7WoSt2++yDR3a0K8Yy16NkgOY2yaKK2dvXcmJE0dr0Egat77dnAKQiLkGog4oSyfajYhvzzTQv9JoCStuNECNCBQLmdJSWthV97ookSk21blMPa0Od2fJRnx19J2z4lmshP8IUNFGLkUmSeWEYU06K1I9HWKmgh0H4LY6YcBks6PdoIQqVlCzXajpJrXOM5fJWWoILW8a/GWLS4RdGMstZEdbp5wDikXCPLFqnm0H6eAzxqlnZyV++Nshunu32XdgMLqJxxsDG/eE2cp21PLVu7LaJt/DQwdL5u9SEac4Exbag4i0Vbo1t9oIEYk24sK0808aVGgI4FOhBNlF4Tx+rmlEZbF50lUQAqsNdqrwWN87VDojbmEGPnBSOgx0Cv078tRGIL9+jrRr81yw6xNPBzcoUjGqbt2JPB08rpIpZYYW3gkIyl8EJ/hW19Nybbj97ppYi8Zh6kAOm2mXVjh9FnKdRRt/0/1yqLDFr5EBJy0unr9vK6YwNhsPTEge9b/Y5uy49u00+NetWERhNpjaddo2HRcw1LbWDs7keoDdLNrU3wqGlZ2k0bok37FtumAhekyd2v3KZATrf6WmNt4DfaUyyvF5o7f0vFsfF82XaPm1u1cXOcXFpPpjVnt1B+oq3f5Fxfc4HqAtnc3RAOJSqUoU8ZAoff9VzwGuOqTOqrOLbx7p/+aT70q//qx/Zw95e/9CV++QMfYP/Bg0jp0ZGOdKQjHelIRzrSkY5crVzx4vnP/uRP+MYjD/Py/v0/lgUdHhrid377t4mCkFwcd1q+Ix3pSEc60pGOdKQjP7zFM8BXvvIV3vrWt/3YFlZpzZlzZzut3pGOdKQjHelIRzrSkR/+4vn6667jn//kT/Dff+hDP3YFffa55/jjj3+cP/zwhzut3pGOdKQjHelIRzrSkR/+4hngTz7+cZ5+6ikefuSRH5tClstlfurd7+ZL//APCOfIpJyjidfmA+C7g/INxz7PGHv0Xlo0lFAApokTsw4RAi1sQtbBpuWApoT9LXDpAKAFQlikkWeEw9RZpxFhWh/PWMenUBuHmqLpgNVwktHLHE4aSLAsaLmLZNIipzzn0ONp60gXNJ1P7AF5Le39Pc6RCFq4psA5zPjaIpNC7RwCwaJbRMMVreW0ZTmI1tFQuFj3PpBG0jrjOIcb5RwQutwzs1CgPOd45GCKDUcgaWweAuccpF2ZlS+a1+Qz6HZl1dKi1FQoKCrjnLpsW4aq4WBSBELnOFcCb7QNYWQsuqjppKBoOTnQ5ozQwCbJtmElsBi4PAJLB1C+5RlaxJ+PceifLBIYKfG1RTZ5bexfacBIiYr8pkMNKnP1kbm8uDP8StkPWKeTTDnETwOvJJBaubLbMkmdXagOnEOJJgTpObxZw/3O5sEztk9moWg6ZVmckbvKE02cXaDbnaOExSC5Kg1My7krdG1UcPiwnDJ4SjnHQHePtI5/yJbTYpZrOTcJA92Zttg7CQjpHFNbTnlCa1f/Dec967Ri/BDTZGXHzilN2d9NywnHIDEioOmIZGwfNMJ3DlK2vnxt+4mKLVIPPDsmI3u9xvbZzCHMjIAk78aGWErnNo26V9aZSzuHM8/1c7BOv9LYhAyCuM2nSgCesigz2hzBpBufuukw68aVtOMzC5yznrEOd7EyRLrleNYY9562YzN0Q8N3fqfKs+lL58CrEEhjmvpM4pznAkHmkHDSWEe3xli3Dmv2Scpz9e1BnxvnwkBsWo5tplG2hm5rOkBa3aI86yjXqDfr9A2ZJ5yjs0E4dFrTAds5K3naOg/5yvZJaUD6uvlMI0Sbsyp4UjedgJvzjWk4vLryappOcrrNV0xgnewE1sG8gZjz3d86dDpNQJoXqIK9NvOd65i0/aygLO5TamP1q7T5lK5LN56rfJoow0Y6nrYOpZ6r37Chl5yiF21jueHsJwwYz+qIBlbQOiRafeBpCFy5S6qlq+2VHp7rLxb9GDgUnefGT8O5z8Ni6kKMJ1C+j3XEs1hIJQVG+k43h0DBfXAOuo2jm91O740uWxI5R9/GYkEpN0f0uGv81shUbSO1qWsa+iFsAyHHWEf0yKWRc7/laTr8KVo63OkMq+QU6NjmQSk7Rxnb0ErGLHWIs07QWlikpgpE08Etja1+0U2fS1vPoTJN5K3fNpZU04lYksUWM6fcekMFLR3W0F+egyxkoV272PnBNMeGr7Stf61cW7a0W8vxUdjyaePqMbYELG1cvRfdoNQug27ubTi7t5XLov1cn4kKro9oUFlLrzunbhV4zbVUl3Hzllt/CAMqBOVZ9KeAK0fVNadXKfnp97yHN77pTfzmb/wGYRi+phfOWZax8/bb+eAHP8j73/tezp49yzdP9XOoJ2DLZEo9lozMKRYiye2Ha9Qiyb7RkMEFzcB8hvIFh4cDuiqa7nOCbWcSehcNpYqmb0ZRqmieWR9Ru2WB4QMBQ3OKPatD6r7gueGAsbKmu6450euxbSLjaC7CjCnOlwPu37PIHfvrHFjl8+9vLfLG0ymlumbLuYyhOcXt+6tsO5FyZsDn6Y0xQwsZp/oCDg4G9MwbXlrnMTBv2DsYghTsHgq4eTzhF9/VxVtPZNQDyaPb8txyIqFrUZBPNDcdTUhDwVSPx9bJlK1nU6qx5KW1IYGGa89mFOuGWiC540jCjuN1xiYzTgwH3HAi4ehQxNufK7N/dcCGM3XWvbiHqTVjPLcxphoKfATXjCf0LmpKVU0ADM8pFvoNw/kav35HP/eeSTnek+OZawzHwzxj9YRf+FaZWiz5i/tz7B8IeM/jCWtmMya6Pbrrhm9vynHPgSp37q2xeipF+ZAGkpdXhXzhHsm7vpOx0C24/XCdt+2qkk8Nh9f4LHQXeX5Lkf/uq/OMrwpYNxkTmoCtxxT5ua9RK/0yMisiTn+DOv+BT/zc/8xdD/2hU5I5qJ11StdDUAUkkr0wnIP5CkwB6vXAi8CdwPPAJkBR6/o9RH07Hg8DId+7+3+id8HnWzdvJxdvpDA3SaHWxV/d18f153vYtucsXWnEmsmM4sQ4xvfRcRde3MfzN21k/eFThLX9ROU9nNy+jrUvfRjYS8YOZO4UTM3D+DisHrae2X/9GHznBfACmP4WWe4++k+eYuvxYaL5I0j24esHnfLuA0pUCm+gHhryC7Psev3Pkte9nB/qpuvsDOT7kEkFHYSMTvj87etK/M2tBX7uu2X6JqfJ+taAThFVyd6bt/Hxd3SzZSLj+uMJT27OkU80396e49pTKTMlyZoJxbaTCbs3x9x8oM6G0yn71ke8/5sLXHss47bv19j20jEiJZkYyPPsuoi7Din2b+onFgUKc4tkuSIffe9a3vPtCkLaBeYHPn2cHfvKnF9dpK8aMHS0QrzwIpgcUU0RVp5lcusbCesxOoyIK8+g2MR33vQu1pycwqtMUl77bnJnT1CcOUvx/EmCxTkCvR/IU+u+lzObd6LzI4ioiNCCXVtzFL1hBs+eQqpZDDGjB7up9xZ57Nb1DNe3IkSByaFBPvWTvSyEglosmSt5PLUlZmw6I8kVuO7Xh7l9OqOUaraeSnlpXcRiLKlEkrXnU2bPdLF7VUR3XfPAdXluO1xjMScYmNf82X3d3HCqxpF6kUpOsO18Rk4ZCnVN5gluOpiwe13EhrMZezZETHfD259LuO5oysE1IQs5QT0QzcX88+tjPnNPSNkLGF7UPHI3vP1ZxUjNvvTc+XKdF9eFPLJdcM9+w117a1x3x6mRoAAAIABJREFUvE6ppkliSU5ZdNvYtCbIDGUR8J1cgWsXE84NS9acUSSR5Ey3xwM3FpkseXgSIgXXHk8oVjWRElQjwWTJY7zb59h6uOVAypObY+47UKN/QRMkUFR2AbBmOuNcT0CgDHFieGBHnjQUHF4dUo88nlkV8eiWiNlBQVwX5BNDlBi+ui1m91DAQsnj+lMpvRXNv31dibtPJ3bxIGCu2+dNz1co1Ayrz2ZEqeHwqpCNt59CHigyU/Q4PORx4/GEzIeFyOOa+44we6qLsAa1SHDN6ZRybDnZq6Y11VCwZiJjYF6TRoLHNvm87mhCvm6Ia/DYGwN27E+pFSQjC4oXN0bceKjGt7bnOfz2RernC4xMZ9z/3j6O3Z1xxwuGr19fINYGLSXf2xJz06mUeiwoTEv+6J4SWxcVlZxk3USKkdC9YHh8Yw7tw+o5j1B5TJckH3pHF7/xRJm/uS/ijn0pZ7t9rjuTsnfMp3ugxuYDhjUTGef6fQQwNKvoWdCUhccztwnu2JNx28EanoCuqqIcCSo5Ser7lNZU2P59ybtfqFCsG6ZLHvnE0JUW6a9J8kpQLBu+dt/tbD61iJ/Oo7ibqbXXUZjbRV3eSxas4/yauzm1dpgTG7YQmu0U515GMMSD99xJ7A8zNH4U7d/Dqe0f5Nzauxg48w3i8nm3WM7I+E0kBzl27V/QM/llLGt4HnjSLlznynDiVjj6AEzXSPldPCaAbcABYA4m+iA9DNwGUweAKkr+lHsBuBPMYaCG4meQbMBwI4JFDO9EcArN+xHsBV2HchVePAheEbJ77QLam4FDB6jM/AbB45+GR78F3/J50YwyMnmKh+7/P9hy4CvAIBAhVJ4z297C8U2vp2uunwfv2cimcylZJPhffyLmZx+fZ2qwj+I8eAtnULletuzW/PFPDzE8r7hrd40zwz6ZFEx1eTy3KUYGBT539wCDi4r/ckeRm8YTnnqDT2lSMFv0GB8IKEeS648mnBzw+ftbirz/8UW+cnuBf/HwFIvdARO9Hre9MENh/Hv49QlO7HwLfWeeRnIKmELSg51QF+HUDCxUgR3AG2BhD5RnMOwk4+fweBaOTsPum+Hk08AIDGgwCr53FoIUsi6YmgK9E9jLE7/w+4zuuh7J3+GZrwFlvPQmovKzCOZ55N53cHB1jmos6coMt72ccHBVxC1HLFLy7+7uJUdEOSeJ6+bqLc8AQ0NDfOQjH+Ff/vzPv+atzh/6tV/j+h07+F9+53ea3wlsYJKGlaXxXQO100C1GYfxaWCHjCeab/RNa7ADiqMFQrfwRMJYWLy1HNrXON0wzeqWFSUNnLVMmyUoPOVZi1TDihy4N3rPWetwaCNp7Pu4p41FXBnoUy0EXNTASjVIOl6rzI0AJcI4iwHWuqSd4VT5NAMsaBpWGY1y1g+brucsS623T/s2ai1kyll87Eu5oD/TCG0zl1MWbdMIBoOBQmbxdipsWbc0WOu9qxPl27QbKKq8ctgzbd8Os9jm1XeW6lBp0pK9nkZ9hA4BpVI74ABJRsG0x6BrtzrTZm1oBBJp4YGWXm9NOkKnCDIaGBzPIZJCrZoWXxVCXllcmcoJa+nz2pA8jfYxCiMbCJ7QIbV6nDWjAZb3oRC1rOJdof3dsygeYQyaAFVswyzR31a+dgSbwVMpOITR8khNOrA7EwMuiARCIHTDIq+RWpNXLYteI2iJ38CLuf6jA5xlQpCFFsmV5iyqKysKVN7D+LJl9WpYMbV2VjJDSbVjIAVpvyTrlQjhAleEwgYqEQ3rjOcQcrpNI2h8nTXRVEJntg8La+0yntdqb62QWlmrtLEDJ9A4i3ab9adbOsu7vVZojcAQK9OsD2lswCaLnDT0Zdoh8pwV041j2chmaHeyPO3Gtmwh4PLK4seET9Oa1yieEdaSHOn2/UYX1KPNumJo4OlsYKhu5ay/2OdpTzhLtGmOT6+hE72GBd724cYOmmpDnQWOBeerNuussfizWLf1C6dvlUezniS0LNvOEm4cRlBJqz8aGDrhLJ2+s+r62lqTI22RmbH7nrZdraK2fbq5i+f0dBMPZ2wAFuUJq0/d7p7RonmNp1s63BrHZLMNjbOymzYknnBzS+bZMkfNKJCmiXxr6NLm+PFsuYJM4GkL87wxM5Qy+8yoscvo0GHab8xnMJzZvoOzyhthdWWkTXOOaQz1IWfhj1VrJyRz7dqE4TkLcnPusgZia7GWdhfUQDMoTaO8ys0nWSiauwSN4B0mbOwKGEKlmrs+EuWsxtbqKLRGaGV3A3TmdKrVwZ4BTyuLPzMKqVKkTt3+TeAsy8rpZ2V13RLkXQ9LMXON4EzZMp3PMqtvA7vW0C+qaSm397ZQcqIZvGUpzpIlwV1081FCZjTZ0vj4Lp2gid5rzVfCKHydIVTWDMJmBGxs6BrThiTVhqxXkm/0fb+xA27ndV8ZfK3JaTt3x27nN1Q0+1lDRynP4gdz2q4fIg06Es0dXx0JZ3GWrr3Esh1b07azINrqUDTrVeCCqpR8Wqg73cLWLSH1tXaKoyxzde6s2YRuTrXPCowmcLteSkq349LSkZFu7eA3UMSvSP6H3/gNTp08yXefeOI1u3B+9LHHePQb3+Czn/lM54BORzrSkY50pCMd6UhHfmDxf5Cbv/zlL7Njxw5OHD9OqVR6TRXs+V27eO/73sdTTz7ZaeWOdKQjHelIRzrSkY68KiJ/kJvHVq3i//y/Ps5d99zzmipUpVLhXfe/i68/+CBbNm3qtHJHOtKRjnSkIx3pSEd+9ItngF/55V9i/dp1/MVf/uVrplDve//7+Ve//q+4ZefOi15jriQhc/HvxWVuWen3Fb8zV5Anc2XlblyWiFeYjLnyOjYXLdGl0jJkQlw6XcHy4NcrJrc0OKu5omyYFRtDvIIeJn6AznP528RFkzA/0Lgw4mrSc+FLucLGMBe5wLwKOW/QNlb6/hWnLa6oLsQPWP/CLL21PaT9D1PMVY99c4n2/EG6o7nqEWZetQsvfoFe9qsWP2A/Na10L5fIpX7Wr6SrmbbLzZXWj7hIu5tXtfMJc3mNKcwl+qK4TKe8IHFzBb3fvMLOpX8II/MqVmniakbSyteqS4x9c9F5YuVkzeWqfUkbr/CjEFdd92Z5aPRLrsR+uLpWvhqJfOkLf8/v/97vsbCw8CNfOH/4ox/l/Pnz/G//5t9c9BrPOQsWapYIkTjv8kzCmYKkb0EhjaEWSRZi+f+x9+ZRlh1ngecv4t779pf5cl+qsrKydtWmkkprydptyxgv2JIXbBhg2nSPB2jOTPccaJozpwd6gOHQ7m7+aIObxsAwQwPGC8iWF7RZsmSppFKVal+zKjMr9z3fepeI+ePGfe++rJIlY0k2zPvOeSfzvXdfLF988UXciPv9gsWMxPYh4WvW0pLlrORKp81S1qLmCHRCU/ZtlAVpV9O9FlC1RHhOvBn4B5Z9FtMSJcKwgd5yGK29nLPIVTUjSyFCLFMNUXi5iqpjgIopidRQSUgW05K2siLjKjwhmGqTSAWZmmJg2WclI7lxxmc2J1jISfJVxUpakqsoOlcDXEfgJiHpa/IVTdLTlJMyrHt7iGpJ+Bon0BSKocH6tsC1IVtR9C35JDxNe0lhV0pgOcwUQp5SWzW83gpgtMfG9jWZmqKcEDiuwA0sBsoB1YQg8AU+klxNkXSjdoHuRcXgQkDnakDSDcvh2dBZUigBriOw/fDaqiNIe5otE4q2igo/s8NgE8fXJFxNpqLoXFUkajU6lqqAT2ApEuUpFG2IYBxBkRBhU6FrLeqcPuASBm8EhAEGGigD1dDhlCvmuznz+zIhtaKKNggkwZrp+Ckya1VWcpJ8WVNKSQInheMrqg6sZiSB7VBKS6oOzHVnWWlLs5izWEsLHE8z25UmfNJqlszqCVPGKoK5ELdTrkA6BXMLkEmDbQMdUK4BChlcQVDGKV9BsIZmEE03YQCKQtOO5VexfIUiQ2a1yHy7w1paguVQSQg0imIuyXzBIldVuBZUUoKV9jSllI1r2VTybZSTkqolWE5L2tcChqZqIbHG9DepwyApoSBfVqxlBJ4V9kFpgo2qiTB0LrBCCkTvSgBCspKS1BKSSiaJ5ZYYmVhGejMkS5Nk18YRXgnpFsktLyO8MkL5gE0pm0b6iwiqOJXpMPDPXSOw21gtdJNfqyK8MmDjW4KlrjbKbXnAQ6gapfwQiixC+cx12CEeS4c4I19CJSlZbc/hJ3twMz2U0yHBwgo0zw8nmerKsJq1yJUUNVuQchW2HyLZKgmBbwmG1wIyrkZrqDmCfFmxmAqvrTph0E/WVfgWyHSAHWgSXhhw1b8YIDXUbEFHWVEoKRaTYRobFvz6oBIY+pb2LJxAU00I5vImuDGA6S5BMSVpKwe0r2l8GeLl+mchX1Fka4pACMopQb6i6FmBtrIikOBZIUYt44b4tmJScrVgUU5K0p6mpxiQL2tqCNZSkkxNka6FPi9bVeRLiraywjM+p5qCUiIMmMtXFckquLZgISMpJiWraclyVpDww2F1NRPWY7zTYiUjqVmCtXZNuqqxAk3FEQwuBWycCfW8lg79xdByQGcpoFBWuJbAl7BjwWctJbGND4qCCyspgWuJ0JemNHohyWoebF/TXVTMtod4MVtp/Jkk+WJYtnxVY/uatKtAa6pOGIC3mpe4lqBzNWBkPmA1LZkfBM8S9MwqAhH6a9+CsiNYyloEEoo1By2gmBYMz/v0T2muFGym84KltORqh00pKanZIV6raohmri3IVcJ6zrVZKCGoZDTFtOTCgMNyTjLTbtFXUiQ9zcBc2B6BFDiBJlfVUJUkvZBospYRoY4sEfoxaSBtQajf+VzYTqspyWR76BcsE9Tp2oKxLot5UyfPcag5so4M61iuQaDwUiOU8z041SpKdLDW0Y5QHk6tgtSaiU4bLA/IElgFupd9Eq5Ck8e32wBNujiPoo0g0UsIj/WBaaCTRGWZCH8X+utuoGbeL6DpR8kdCJaAEthFMzZkjd8XQPSZBJaJB8Bp0ijSKFEAvHAMYdVc78amtWEAYzhXnA/fq/DpWulegoxlyqfIlmYAj46FRZNHjsDux0sWSFQq2L4mSGTI1hQJT9Gxqth+OQAZ2qe2BDO9bcx35Qhsi56VgPaSIlN2KayE8xNLhdg/X8KG2QAn0ORrinJKYFXDEs+2WZwvWMznJI6vqTqCQklhB5ru5QDXsZgt2NgBEPioZB4v2UZqbYYG1k8CFRSDaPoJcXQAa4CNkpvNVHkZwSSaXvCqRvd+OPwulkKUX08OBrthUyfk00bPkvzMFQRXYlPfCjJYROOgSdG+UiNb0XSsKoZmfaSGtWQ4N0xVXAZmXLJrNXLFKoXF4vePqrvuDFxKtu7Ywf/0L/4FP/+pT/3QJs6f/7M/4zOf+QwvPPccyWTyutdMT09z4kwHFQR3n66iHVjKWkgFaQ8e25HiEy8WcW3BxQ0JFrIWR4YS7LvqkvY1C3mb8W6bv78hjZsUZDzN7BbFeZlh3xWfXRMe6UDznc0pOlzFyIJPKSm452yVx3enqSQk1azPQ696DM94HNuWYs8Vlz5XYQFbZj2sQLBp1kdZ4QD68tYUThBOcl/ZmODdpyokfc0Lm1KcGhJsWoCbr7jsnXB5eWuanzhW4Uu3pFnKO9w6WuNCn8OBKy57xlxGN9pMDgqEZ3HoXJWuouLMhgRVR/LdrSk2LoWoOl8K7jhbRSrBbMHm2OYk952ssOuqR76qGVjwyU9cRGUKfPn+ARwNI7M+Y70Ou8dd/uid7dx/ooKQcGowycBCwGQiyebVEDi54NiMd1vcetEjWxUMLAdkaoqOFc2+yx77L9VIe+Ekc7rDYfO8h2cLkp6mc00xX7CYbbcZWAq4+5jLlnmPxTaL8S6HvpWAQkkTCMHIpMfQbEB+cY7+2RIiKFPsTNMz+iiB3ETC/wqCEpBBs5G8vZ/C3H8zzqxMiP9ZBkZMJz8JVGCwF85cMFZ1EdhknOB24CSau9DWFmz/OyadYVLuRo7s62X/ZZfzmxy6KgmsQPGNW7N0rSl6SjZHtidxAji1vZNSW46TwwmQ0LMc8Ny+Dvaevoytv0L73HeNA7iKZDx0GqtF+LG74BsvwjvvgFfHYH4XrBwHasjgOIKtZFaOIJgk4EEkvQgcYJmAO5B+ChsLPEnbUonH7x1hNSfZdnmNsU1ZCisrnL9hI0dHUmyb9nhyd4qRFcVaW46aY4PvsDrQwdkNab69I2S0fuLpFfaeX6XNtSnlJJd7HHrXApI+dK2GeKuJHhstBZf7HHZOuFgK5joseuaKrBXSvLoly11nKrSXFU/sydFTgWJbmv4LR9l3dplk+RVyK6fpmD8J1QRWdZXO2Xms2graSiJdn4s7t7Jh/CkEs6TXpgmsAZzSKG5hhFP7b+OGs9MklicIZJK1/o2c2NsHmRw9V08hVI3Le99Px9QKBPC379vOlklFpuKzmneY7AzRTkt9nbTV+il1bWWqL82VfoeUq/k3H+qkx0pTTUs6l32mOix2T3j4lqCcFLhOeKObccK+EDJTNYWy5sv7ktxx2WMla+FLQU8xQEnBzI0uNxyH7tWA7lXFhkWPxbzFkc1JHjhVZsNSwFf2ZXj4pRIHLrvMd9h4lqBQDHjyhiQ1z+ZdZypUEpKv3Zhlw0qAHQi+drekY0Fyy8UarrAY77TZuBxw0xmfgWUfbcFyxqKrpNiw6KMsh91XXVxzA5DwDStfwrn+JK+MJLE0dJYVA0XFpvmAsx0JEkqz74pL2tOM9jjccrHKzjGPtKdZyUnm2i3O75CsYONo2LjkowNJxoNH9yTJeLCatRnbKBmY19haU8pI0jV49KYMgSM432lT3uxx8zGFLwWvbEzwkSNl7jlWJe1pDu9Ikasqti77tLuanVMuC21h9P39l6u8NJJi17RLKS1DfzLtc3p7Ak8LPFvw/H6bgaMpLm6DrVcUwws+z21Ps3E5pLVUx3Jsm/SoJCTdxYC2SkjsSPi6TiS5MmIhXMnu8Roji4pXRlJM3uWz8aTFvnMui+2S1Ywk62pGe0NbW01LTskc2xZ9yhnJgycqbL0Ef3woz/HNITJhtM8BIaglJIWy4sqQYCrnMLIQsO9yjYk+m6f2ZRhYDDhyANJrDs/tSdFRVjyzN8OmZcWtF6tsnFGM9jmspCUjcx5aCuaFw71nqmyeCnhmX4pNsz6eA8dGknjCYmFAseec4uJAgnMDDot5m6WU4MjmBBVHkC7U6B2TBFLw1RuTVB2bXbMujnIo5hwCW9C+UqJ/oYq9Ns/q0I8xM7iZgYtHUXIzx2+/mQ2j50jUaqz1beCP72vn3afOk1lYpZa6nb5KlnS5SmK1Rim/CzeTYsP5J1AMUe2+i0TpWQTjSBaAgzjlIrY6bxY/xoF3A98F+oDLaN6Lm3qEhP9N4DjkA6i1me/Pm7FhxbwvI3QNyJvXFWAXvriVwO7FVucI0aYK6DD+95iZvJfDyXWQBs6YybkNrGC7x2FrDyyugO6hsPosUGVgYhuCo8DNVPMPUewcon1yCq+tE5koYCtoq2g2zvjceN4niUMtIcnUAr553yZm+jsYXJEUygEDi4pNYwv0Lgv8pE1gSc4NJugu+tz/SgXfgZluG4TALgkKpYBvHsjype1ppCN48HiZs5uSbJvx2LgQMDzjUcnYPHVjmt3jLvnFedyOjVTyBXpHn0CwahamJsJFC/EBJF3mPIEZszB1F67zDuzgCWAaySqKu5HVZ0x7jcLmPByfghsGYVM77NsKt2yGlSJMXQBcui6dR3Da3NAkgAVsrw8lutGk6S4XCJJZtk767LzqMT7g8PyWBPecqTE8tsjWqz4dSyt0LBbpH596c1aeAR750IdI5/P8t89//ocycS4Wi/zyL/0S45cvv/HgRdG8sF8/7kI2fybX7SJJogNSGrsDIrbtp6+zBajN4RD1jQazwBn9H8HpdfRhnFYWwxrJKP0I1B3bvYgOm9Dmzl/EMExRGWLJ1ssZfWatU0aIymo8oBFhoeqHEZhKy3p6jQTt6+z0RHUV8UcT1m3daClCnJNs1DOetli/rSQMK140cErxTZ26boUAK94wltmusptKqKV+vQdt1n0u1tVQv/a2UoT4irVDdHXUxpFNSbPVJWkG+Zg1ehqQf0EdpF83GPka5Y5Ss82quK4fetJ0nWhgqOpli227ReiwJlONHfDTMF8RIrcE6ISo4+TWb8vG7bCxd6gbj7DoZhuQTduN0mzlNQ5SaG4PUU9E1FFSokmbItpWlM0PGUT4LF2vTQMHJfS124siPD6lcZ1utGtOh4dr1PtAVCLjgyLdRUgpEbN1WzfSF00PQlzbdxp9RNT7foSJ+15PLcmYT7Bih6g0bdPKRrrx/qzXt+E631L3RfVyCaRo1LfRCKLR71nnBnXMX8fQdWLdI3SRvm3daOXID0b9S5sDYOLIOCVEvXz1+lrRoUDXPmbQZKvG70a6aFK1df3xgGuttH4IVnzM0da1+dbHg5ieojo5OurdYGt9jadq8sPxJwKMTTvmN1I3eoyW3/tROKGvHauiAkZpiZjnWf8ol60bvfKazffIpg1CrN4DdLMSrHWPCDTsKMRbIngNv23RfMDVdfeqY3nKdb6e6/j/19rUj7fH+hHgtcac7/WIgrjGl2GwcXVvEUteyxAN2tTv65jZBs5ufZL1pG0Rs5bYsKbDhV8ZfxooOqxIiuY5VLx/XdPq4jqPW8i672v2KPo6Rikb40v8Vc/DXlex5opGNhP1pfX+TDvE5ibiB6NtrJdHv/QlduzezTsffJDhTZvetolztVrl0N1389k//ENa0pKWtKQlLWlJS1rSkrdK5JuZWC6X49tPPsn+AwfMMbdvvWit+cCHP8zw5s188uMfb7VoS1rSkpa0pCUtaUlL/nFMngG2bd3KJz7+cf6P3/z3b0sFPvOf/hPVcpkvf+ELrdZsSUta0pKWtKQlLWnJWyr2W5HoZ//Lf6G9vZ1f/J8/TXd391tW+ImrV/nN3/gNlpeW3rQ03/B6uXgb8tM/QPl+aCLekkv/6Yv+R5mf/lGokeBt7oyvj0ESumXR30uX+s1oGv32Wp74J+oJ3pI8xQ/ahv9/HgfEW295+m1MUv8oWvz31vgbKZ58qwr09Le/ze133onneW9J+sVikTvuuJPHHnvs+1eWgECEGLhACgITGCd9TU0IhApxS4EUCNWI8vMFBqEjUPWgoRBTpURzYB9a1wNFou88wzCON1RgAvDCv+Fj8UKZM+M9XcfVKfMwf7keQBGmZUUBWaZeytTtoiUoW2F6gRSN8hlLqUrqeUUIppoU9cAehQnSCMAKNJ6groswAEciAoVVDPCMFQUCXJOuLxqBJIH5XioQgUaoMIBgVYa/ET7IQCMDbQJDTLlMcZVoBPdYXvg3EMKwWUWTe4k4q1KZ/IwuUWHlhdLmPHsJ0V8CwEfgoYV1HYuJWi2iwwYm8EHHvg/TCP+a98Iy6ftABaFUzCZEXSeBCeyyajQFRmkhQj0qE8ikQdRZo1F5iOXpQRCAKof0DRF18XiQhqiXyaojkjQhzgeQArRG6gC0wJMC15KgNQpZt31fhm1QMrYVSIGyZBjwafrEq5ZgWQqkivpF2J+0CH8v6sEtYPnN7S8VWG5oLwhNYBB2aChJ0RQ0qqXd5OWkQTlFAZJaSECh6u3hhd8LWZ/IacKyhbULA4OUECghECgkPkpILPx6EKT0wJ7XWD4Nm9Vg1TRWVWNXdT1Y7bIU+Ma3YOxXBjq0UULMmdAN/4QGGYSvlXq/bNRbAYHQzcF6JujQF40grWo8AE+F6YXXNHxYhO7E9HlPhlYRlV0R/i6eRzxQNDAmZqmwPnZZx3yIqvuSqIWkCntEFDga929gfIEPttfo//X+opt9KrG0w7KFAVC+sUMlDBJRh0FHnrE7bfyHFg1f4Rv/WQ+CM+0kTPcSOrTTRjlDnxkFeIbjSthnpQ8iCNOL7F+b9o9CsEVAPYA66qkyCNNXUqOMzcdZyaH+NEpARYYBX5HvBQi08alGvYH5LhpHMHWI7DBKsyrD9mti/cbi4CLfLyM71aIpENLyNXbQCB5VMZuLxiRfCIIo/FZoo08R9kgh6ni6ek8WAunruu7ifVxEjl7Ium3U/YFtoYVlbFYhAm36vkDir5sy+aY/WTFf6tMcGBj9G/P5Xjx4MO6To2u82BRLIPAROho34ml762Znwbqb83VsaRWVO47Bs5r8vJbSBN+aYEpPY3kNI5K+ro+jSgikMj4sHsetNJYbC8yMYqBFI2BTmDmOKwWVmGpc0/ci2xV+2O+UkCagUdPwEJ7RdzjGhuOvtW7Minx4pBcZq2/kLEWjjayw/mgNfnyMXD8TjsYIoz9hGduV9bHXjfkkEQUgKg1KvTmouutJf38/Fy9f5k/+7M/56CMPv/npb9zI//arv8LHHnnk+/rd9PQ0s6/k2DztcvfxCj1rAdma5tTGJJ/bn+FjZyukA82eMZfuZZ8/vilLJSO484LL3ScqFMqKSlKylAvZpRtXfHaeUxzuTJG0YGjJ5/Imm4kOhxumfWqOZP+Yy9Vui86yIuvDuS6LbTOaLdMeQ3MeXWsB2696aAv6ln1eHUkSWILP3tvO0EpAoaLoLAVoNB88UsbSsJSzqDmSI/kE94y77JjyaC9r+lZ89l5x+cTzFW66WsNC0L/i070asJqVzOVtKtrmQy8U6VpTvLI1yXSnw55xl30TNc4NJulb8cm4oCzB8ZEkX7opy4ePlhnrcdh3yaWY9Bg49SpTuw/whQ8M8sjja0z32Bw8X2PblEfG1eQ8RSktyZU1SaURluIbt1uM9gsWnQRHN9j8+peLbJ/xuPCTS1wuFpjtsplts0n5mlJaMrAYUEpJkr6mazXg3IYEn789x01XPYbnfLrWAqpJSd+Sj6Ug5WpmOm2UEFzptzk74JBpEN7EAAAgAElEQVTQkPQVfqaNRLXGak8v/We/juQwlv4mISLnXqCPw+/7CLuf+RqC44Ts4yqwBGwFRs11J2Dndjg5DnoHMAlsRHMQOAAMIJhF8yC29/fAKH7iTl469K9RqTZqiRBT1bWqkMLi9JYUuRIcHUlyenMC3w7ZuQOLAYVywPCsz5M3Zrjv5SLjAwkG1trIrHydEJ93yJTrdmAK3n0L3HsHfPT90NMFf/ssLL8cc+Rpw7m8TNV5P7//L3+GA0ctbH+Z0QO/TNf0YaS/ipffwcmbdtF3eY1d03DDyUn8nk0Mnfk6TmUZJ7GJLVOKrZfm2TNvs2nWY8/JGXonZ8m5FYJkjptOV/hfv1Xk/UeqvLwnzfClSYqFDF872Ebfms+7jpS5sDGB1CEP/au3ZZjN25wfTrLzqsep4QS//WAb91+xQdi0VzSTPTZd5YB7nh/FbWtjLSsZGDtGQv0+ggEUKQTjnLrtwyScHrLLL2AxiV2bZ3n4bjaf+RaCUUq5D6Edi1T5MG7iZmTJRxe6UCJgqq+PnO+SWV6hmi+w4+hprCDH3L676Bud5MvvvYOR5RzTnSlODSf4/Q924WXC2crlfodcVXFkZ5Zv3ZzmD+5ro6eiuOlSjU89W2LLrMcTN2boLCkGl3xe3JGimhIEQvLb9+TYvehjaRhcDDg3kOBij8Oz25O8/0yNY5uSbF7wcSV4NmyZ82kfsxlcDth21aOWEFwcTHB+wKF/LWAtLXEC2DvjMbgYYCl4am+ab2xLceu4S7HDpiBdtl4NuNif4KLBWR7ZkuLAGcW+qy5zBYv2smIhbzO936VvXFLMSC71JKk5gkJFIYBNcyGj/pldaUopyTc+EnDfM4qsqzm6OUXfasDlToecq9g+7XFkJMXOOY+JLoeROZeBpQApYPOsT+dSjUcPtXF6Y4L/+1Ca4auwdcGnZ82nkhAc3W6za8JnoVcy2uXQXdScHxLsHAs4MphASslaRnLgSg3fFlTSFg+84DHdYYEUbF7yWUtbPHh4hc5llysbU5zrd5jLW9x7usLAkk8xY3G1w2ZgWZHzNE/vSNLuataSkolehw3TPks5i+dutfnQUzW6SgF/uTXPO89X2XHVo7sY8GsPtlPps/jAC2UOb0/RvRbQvRpwZihB36qiLCwu79TccDHgZF+KQlXh+PDFW3Lsm6iRv2ize9Ll+JYkAphtt0PeflLyuX1p3j1a44FzFb51Q4adcx7veX6F6c4Ezx+w2XtFMN5mkekrcehoQN+qz0zBpm8Ont6W4C9HUhya9SklBH+1M8XDxyrsHNV87mCWg5M+26dcfFtQSkh2T3hYSvNf39HGreMun7s9w6ZVTaK3wqGXfLKuZjFv8djNOapJwdC8T9ZTyDWbzmLA4RsttlxV3HG2SldZ89xgkmpCMpe0SFZsJu4ucraW42dfLHLLyRrpmqaclrSv+eQmL/LyO3ax8eJZpE6w1F2gc3IKjc3jd21lz6USyep3Sbj9HN7fzaHTS2QXjuG4y6SWFjh32w30jlZY2ryRoXN/gaDG5Rs/RNvMKLY7geAogfwJtM6SUF8B5lDcgmAB+A4wCMwSsobvRgcBkjUEE3D7Zpi8CoYh3zgXwANSaD5ksGgXgFXcxCOsDr+D3PxTCL4ELAIFXH4a8JC8aNK4kxCV12YmdvsJec8r4ftdm+DqPOhLhIxojeBJYBVBDRXcQmbpLFKtkJ2bZbWrj8+/u5OpbpuRaZ+2oscTN2d57KY0Wxc0SU/x5VtzqEQ48Uy4mo5SwIkdBf74nXkyvmYpa9G1pugoefTM1fj2gTzzeZtaQnLb2Ro7plyyUlAa9HnouQrvOLxGsc3mSp9NoaL4+h05Hv52kf4LZ9CZblY6srTPzCDdDHPbD2JVhnH8UeA8ln4awTmj93nARvAsdvAy0Btyrr0pJI1zDkDD4hrkslAswZPHw+v27QyZz2cuwvAQLCxBfyeUykAF2Ae8gMRHsoRdmcJrP0DPueNklxYYrLQjEXSvBASZDAKLaq6NWq6NoL3nrVt5BvjM7/4u586c4omnnnpT0/30L/wiH334Yf71L//yP2yZ3mDWlAxfER6oTTdWAsK7f1FfXdGEUPHo7jhabQhv7cwdnV53YqC5Kaoj5NYjnQy2CQTKapQrwiulVXi3o2KrAKqJrCaasWxg6gRBFpQtrouRQjS+k7HVLmWJ+t1ktNIrtcZRIboljifTQiC0wtEa7TRWVgzPPdSdbl5ltXTjZeuwLlqGyxRWhJ+KIc+iskZ6ERqyStdxfOtPQtKCJjySZVahGqi1CL4U3amnY3fwGkspNPa6O9To+xgeR63H0enYqnT8M4PG0QpLB40VMuLorBCrJHUDr9Wod2ifVn3VA9DxVY51x9fV74xVDAe3/sksJ3RIWpGMXSeVIo7di8oaJAQqJZpWOLQM7VXZgsCJEEjC4OjMqkcC/LwgyAijQdGo77q+BeGBEpZu4NgsDR3G/oVYhxVLrscNZWmg5ERs9TiGsNOq3u5CR/W2Yhg86qspaA1SGvqRbOxYWBJbh+0cId2yWmOp2DaeaceEhrzSdVRckA51ZsWwe3F/kY/h/xDRCqomYVYMZWytTJg+VUc0Rjs7NPxFA08Z22bUkFQxH0gzRqqOtoth3OKIt/rqbnydTTfyiPqto5p9nRaNdSIlmhGaIrZjFl1vGZ2mFdeg8KymsuqmR2QilKfEoMpMSZVF/bAqHflMGVuZMyvWSkZYy2Z0n6PjOE9dV4aljR+VDUyXlsa2VXiQU9S3ReTbDTYtrve4Pm2zM4PdjBmN1zNRx9iJsDwItNVoy2g8Iz7mxD4XKrbjE3kvG1Kx3dLm01gFtkkrr6IzXZshflH/rWNMZbPdKMtgCGN7XyLaIcTs6FrNY1uIhDT1W7fyLOOb5+vpoQZZGSHYwro6Jj1FEyKujvNL0IyjZF2Pi48FZlVTXwNGje2EqnX+N47KdGJXq3W/faOPh70Wdi98r2MYNkeDHXPvljY2rUPDjMbjeKpSa1LqWphcY5dLx+YSZjw2tVRpU1PTb2zTT5S49lRXoZVpk0g3WRqHpohYe2bDa1R0XZJrek8AOMbfSxkbExu7XGZLI6bfRGw+YBkdSbSUKEugLNHQaN2ewjHCfisnz7Zt8/STTzI8PMzKygpC/ODP7vzpn/85f/2Fv2Zuerr1eFRLWtKSlrSkJS1pSUveVpFvdQYdHR386q/9Gj/5yU/+wGm9cuwYv/qrv8rLL774pkzEW9KSlrSkJS1pSUta0pIfqckzwL/5lV/hW48/zneef/4fnIbv+7zr3e/mq48+yvDwcKvlWtKSlrSkJS1pSUta8raL/XZkIoTg8oULbN6yhTOnT9PzD8DX/fgHPsgv/eIvcvOBA61Wa0lLWtKSlrSkJS1pyQ9F5NuVUT6f5y//4r/z0Hve833/9p9/+tMsLszzv//6r78pZQniCDAayLYaDVRRFGyhBNSsBh5OBg0Um2fQRIFBvzXSD+NAowfMwQRymO9rsoH3EYQomXpwh0nb8jQ1E59gmTxdg0WyfJA1QU3AnAkUCfFDuo7/0oSMKStoYJesmvmtHwUDgqiG2LAo3k0JgeXT9Ex91aD4XBNgo0wwR4S3U0Lix3BZUjWDdvwYwqkqBTUJy1LgWeFnnqXR2mC7lDY4JI1V03VEljIxGosxVB+IOnbH8jW2FwZZWUEDZxSVJ3pvlTUNNE4sKpEqnhWPxoxKH9DACUVRMOH1DVzcGuCjieN0ou98EC4BAssDuwxWpRHgJ0z7ShPEIwMTgKYbJagYewmMXYTlL9GMq8NgekywhBBQ9U3ZXZqxdgKEoGTb4fWAkjH0kbBQ0gqDWYTBAtUDEIO6vVLHBTZsJQrQCbGLol5uoZSxrwZW0TcBLEpIXCu012VLEgiBJyWjVhjQZ5dD1JJQBsOkNFYQllnXXVj0v0IqE/RHzbxUI4CkHtwZvmQQIHRg8ETSBK6F14Z2EwbeaAHCV2HAW6DDPqPr2kSJELcVoaCkWodsMtf5JpDWKocBZZbXKJWIxSlFgcOR3n3RCCCTXoiX0oQYSemDvayvAZZGSDPLD/tHIKAsw/99EeIELVfXg+niNmcFuh4o7UlBIDXSwyCtVIhGU+swWHFD0LGgopgfkEHDPyghwvxVw7cKFbLWVCxQUWD6iG5YsYr5yiXLIOlkqPOaCLGAqml0C4OxtNZ1XyV9jS8NNpEG2lJ6Dd+sDH7TN9hCqRuBiHU/ZPCA8eDTwIJVS9SxmZi+7QsRIktF2P4hujHyT5qqFAbt18B1RvWNxpayKXNA6E99w6XzhQjzBErGZ1tBpMewTWommM/yQn+5bN77UnBFxrCfAjwZlpkgxIpKvzFhCKSxPa8Zk9oYA0L78WVoP5ZrsIRSUJGCQIbf+ZamLBvBpnZRY/kNrGEgG0iy8PPwOy/WtkJrgxRrxpAFBgunpEWERWsErVXNjy0aeLhi3YfEHBr1gHEhGj5Xiubo7rpvLQI1E1wd/xy0tIwvrs8EaGBGI7ydWlePWOCcND6+7s/jY1nEHozaMAh9td/AJdYxsYHpiwY/GBjbEyrsHyJo9MnAjNkNuxDXYnlpRsrG5zuRDUT2HwV+ikAZu5YxX12N6aTWGKviepBNoN+YPsthVKQUdX1jRThaz3zugw7W+YS4HqkHFwqDuCtbAt+MHSKaQ7lhX3jLUHXXky1btvCFL34Rz/O4+aab3tBvvvq1x/ijP/qvHHvllTflOefp6Wl6/irJpimftaykc03hOoK/vyHD3gWfbbM+3WsBlYSkqxhwYNonq6G7FJCraL6zJ8XQfMDQos/gqs/2SY++5YDhcsCFvgQj8x5bJnzGexzcZDix3LDo8eKWFLumXS512tw8FtC3qji30SFbUzxxY4ae1YBEABsXfDbO+fy7D+T56CtVFtstHr0xzQ1THnecq9JR0vzFoTam31XmvqcUD5+pMFdwOLXHwq4JqklJrqYYmgk4cmuCY71JaglJoaL4v95bYGLI5vJuj93nNKWk4NynlrnvS5LzW5N0rvqkPM3f3ZLj1gtVslXF5T6Hd56oUCgq7jhZQjuCrpl5kqWLeIUt3HZ4idzsKHkKKAsW2iwuDToMLgTMtkk2z/lsXPDpXQpYtRPcc9LnpgmXO8dc3vf0IkPzHslzbVwadDgylGClXTI+YNG9ovnO7jRL7ZKtUz47xj2e2Zrg554vk5CaYkZSTQj+7qYsVwZspjocDm9LsVKwOdGfYPu0x6W+JI7WLOUsCmWFsDJ86d42tk0XSJdGCXE4i0CWK/s+zd5n/rNhBJ8ljMItANuB24A54CSQhIU5lP5nCL4K7Mfjl1BsxubPUdyC5CyCNNAOdFFt+wj5NY8vPNDNHz3scKIzw46FAARMddr8/d4MK2mJo+DIUBIpNd1rCt8WbL3qc9exKtqRdBQDju3oYPuF8ygOAo8DeROxXYKL81Ccg8AP6RKb2uHAFrjnIEwuwcoCcBNwFKlu4q7nn8XxTiGoUpieI6AXCEiWz9E3Nkt1aC+piRexK7PMbNpCfnaCWtsNPPbOAYYWXdouX8DOFFCWoG2pCHYC7DTnNmfony1iVdewNeSrFiLVznhfglpKkvA1t55x6SoqbN9n16vjZHWOZ3dY/OfPTLBhfIJd55b4Z4/PMj/Uzmd/vMAL25LkbMGd3zhKqauLp2/t4P5vnDCIqatACYGDYhunb7wdTQrH2Ue1cBBnZYbk8iwaGwuX5Y13YlUSSFdx5tY7sdMD9J49QXppmbyfIbV8FmpJ/uo9Ixw6fBjJKNLfwNUt/SR9qGQdhqddzm5Ksnnep5YQHLxQY2TaQwOfuSdHpeBwy+Uqea2pWYKONcVKzqKzGDDVafPYhzRXZJqjBzU7RzVT3QmwBH3LAZaGZKCZ6HSYz9r0FhVb5jy6VwMe25dl6uYy7dMOHbWAl7ckEQh+4ZM5bptU1JIWltKspi1WU5KuFcWx7QkWC5L2iuaWCZfHDmR44ESFzlXBf7w3T0JKSinB5kWfsU6bHTMe39qZot3TbJrzGV7w8ReTnLxF88JABs+yuONSlYWMzaMH07wy6HDThMdy3qLkCEbtDDdfqbGSlYx3OaQ9jaU1t1+s8eK+NGc6HfZNumRcxVcO5tg+4zHXZeNYgJJ8aU+KHUs+x3uTiE6Pjnl4fleKpCdwsdgy6zGatPH6Az74XJUff9FFOYJNK4oHj5U5MFpj44KPHcATI0kOTIcov3JCIjQ8cLxK/+Q8372ll20TLo6GC30JpjpskJq/vifF17Y6HBr1yPia/hXF/is12iqKcwOhbxGWYDyboKMSoAQU8zbDSz6FokJq+NSzJe4/UWGlzeJP35Fl55TPy5tT3HOyyk3na3hJ+IU7Ozi04GMryFUUlwaTvOdoGW3BmQ1JuooB5aQkX1bkq6Ef2zblcc+4y94xj2xNcfO4y9Ypl/aKZvNEkYLvcL7P5iNHy6z4Kc712mxZ9DnfmyTjado8wX1XPb54IMXzO5L87NEq+67UGJoNuGM+xKUm/HBiv33G48zGJJ4leOBclb94yGHZT7B1JeDAiYBTmxKstEkSGhZykr1jLp4jmOq0+ZOdGXatBfQuwK0XqnzufXm2TXocnHTZueghXZs7L1UZOJtg76KPkoLBBZ/P/Xg7kz0ONx2fI7E2xdDFMRwmULKTP3vvMLcdKyPUIgPVHjLLk0i/yMXdd7Jj0qNvqkqqeAxIUUvdQM/VIgnvFdoWXsBnO15mP5mSj1UqsdZzL+nS3yH1OLXUx7H9l/H4KWy+CPSYidUgIS4th+Aill4wvrYMY2MGU1czEy9Bqe1fIWofRtCG5KsmjQqwGRkMkV18mvN7PkrX3NeBThQfZmHX3aTmV7A4TYg63USIH/XNa86MU1nQGRi7APoh8zko/gcEF4HesNxqAyCY27YXql28cFc/qZri+KYkQ0s+xazNV27L8tCrFaa6bHxLQEpwtd0m7Wnaq4qV9iSnhxI8sTnJR14s8Y6TFZbbLbpWA7LFEpN9GcY3CPpmwdGKlKu5NJAgsyy579gatufy1C3tDM8EBJZgYN6nc00xO9RB18WzVLsGWO1qJzs/Q27hKrX2DWjVjxV0A/0IMsBuXPsTWOopM0FeDm9MPKNvmQHdDdwKXODwe/+UDSefhZlzwG7oB/7kGzA+B/fsg++8BB9+ALJJuDwNZAjPNhgEtuFzAIt55gduJl9TzI4Mo6Xg/sfPMDC3TLm9wHKbxRffkePspgTFrHh7HtuIy1e++EU2bNrExz/2MbKZzPe8tlgs8lOf/CQXLl54U8ugLHP37TcfMBDd3ESYlfDa5hWMCCelZAPsr4UgsET97lnFUE8i9rs6TH8dIslWDUiNJixbmwrTEQYrg/lcqxD9og0GKXDCtGyDxarjo6JDRXQDoZVWGjuOXopWieV6NJO5gzXlVTZoHwK7gamLvtQZATVpcFDX1pUIQyUNxskOdRdYoByQUqBTIY7MNmi8SB9OhPoydbcjLE4MP2crSKjwptNR4MaRbzoO7AmV4GgMtkwRRxGFqLbkus2Y9aB6xzhLEUMMRavTzQii5mNbFNqWWDrEb2WUbhwyQThQKauB9YljwRAQRKg4aVBXxNE+cVi/BQmnccetVPgK1DrWfrQqkqg7fx1H8WGZ99G1OgYEUgZrZNpfipiaQoNr0qBooBrriL46CtJ878g6astvE4hiuMKgzR1/UmtcbVb9zMqaJPwdfrRbYNVXFxuHeCiz+hnqSMQOSIg+D3dqFErKcCXGpt4ydhzIH8M4xssR9WUtDUWJEFMmLINDQzchySJEn6M0gYKE0nU9xbFk8T2R+rkKooFxi3Zewv6t2ajWnWqoG1g32/QJ1yAGHWX8l4S8alwXtbOm4XMiX6cNZi+hGge/SMK+l1LNVi+EbuoB0Zs6ns7g2jSQ1I3V28gnpWMr6A2UYQydF/kTQt+sEqLuYwKDUIzQbNfbWlWysRqrYwfuSK1NHSGnG6tn2owXdUwn1wLE4thMJcBPivrhULZq4Be1BG03LybGx5Zw7BBNh9HE09cy9MMRqlNZwtiiDnFgxn6VFSrb0qIpH2XaM6dBBbE+aEFgiSa8oJJga7MWa5vxKjaG2sbG9Loyxtszyi+y88Cg/JQ5PEtZzdjKpNKhf5FmdzPq1waXRx3N17AwoTXaFs3nmWiNciK/4JhtVRXDKkZGm6hjTEMn6azrgTSuw6ZhAVZjtTIqhwrMiq8iQt810grHB6mb8aYNTFu8DHGfbtOESY0djtK4fr3HUAbXaVCGUsQwsAYpaDWwsBF2M1rjrl8b2YHTjCXUsV2W+O5SfJG8Pp8KGn0n7G9mFdeMvdFoER1sJer1V+FuZdOYFau7pmnFXaqAa5CypEBEEwbZwNY1FTiudxHbEQ4VoDKysVquw7mRoxrH0rytkslk+MM/+APufeABtH7tYxlX19Y4cPAgv/sffo+uzs4f3oMt/wShHi1OSUuz/yhUKV5Hy+JN1L34x20pLcv7EdNhzH7FP2rLahlmS1pyPZE/jEw/+sgjZFMpfvN3fuc1r/nwIx/hI498hE/93M+1WqklLWlJS1rSkpa0pCU/EmL/sDJ++qmn6O3t5RMf/Rjbtm5p+u6zf/iH1KoVfvv//PetFmpJS1rSkpa0pCUtaUlr8gxw+PBh3vXQQ5w7c6b+2ekzZ/j1f/tvuXDhQqt1WtKSlrSkJS1pSUta8iMl8oeZ+fDwMDceOMC/+43fBODK2BiH3vEOvvHNb9JRKLxl+WpABGDVtEEvxQKZMA/NB40z0SO8SwN5FgYBySAKONINJJzJIDBYowiMolT4iHzNILoi1FKEsQKaAv4qsSAWLxYLEeHXogAcLRpBGBHqrh64uO4celdEQU8GG2RwRMpg98LyaLwYjiYQoq6vSHcRDkcLiShrgyqSIZpMhHgbJQS+DPFOSkg0DaySL0O8n0DUsWA1EeKVQjRWiIZyDZIpyrdm8FMRQkdHqDTz3jNYpLLB4nkRKixqS0y+3voADWFQbbGIh0auNFBGa0AZ8GKItHjoH+bziPWnGtdEqClTtyj1KLAzxF0JqrIZWyYihF01CmKIoXjW5R0F1tQDBoWoI+XCBGNBEVEgnInqFgT1VxRVIXxtAjiUSV0hfWX6SYg2Cu1AhH3CxMooQFY0shZhBzFIwQaSESI0o6jbYkmY8ovmegUitI8QTyVC5JLGROiFCKqwHrV6UKBV1Ui3gYuUBOY7FaLkXI3AD/t6GWQQBp6EeLDwf1UPUAJZVUg3tCVZo47di/qOCMI+EAhBIAUlg3b0JXXcYlRnaczDl4KaCZZyDTpTKoME0802UA+jMf096vuKMP0Fo9OoTFo0o6Oi94qwTNF3rjERNxYJp42viPpWhKzzRQOZFw8aKkd50+wDZQyTKVSjnwaxGNM64jKKaa2XSTT5yci3BaLh37TQdR1EZYwQW1E5iiLUXxDpbh1eMY7civ+uGkOvsd73Rm2iRDgu6GbUXoQWFUob1KcwqtV1vUXB1ELF/LiIBVoTIvNEbHwQdZRgI4BJx10VYd1c0dCXULqOjBRaG/uIdKAb9Vax30R41SDSWcP24nnKOvpM1/2YNkH0yOYye1IgfRrBVzT8dxSIG7W9LyLfEyCjADytG8hLgiYfHZmiqAdT+wilEG484EyZMgmTRszghYz5xMD4+BKNYPBI/EZaTYF6fti6QhgfqggJEbXY+OGZ9pGEBI6a6ZeWqUGwzj9HaQc0R3vHw1XjSNQqDdRdNA6J+vitY33Oi/kKHRuPm3IRIU623tYxBG4UL0585FGN+DXh6Xp+Uul639cGqSi9yO6Mz40FwccxkKHv8q5TZx1D9lHXfaiD9TpszFcaM97gNcbQaNyjjlaM0ormftrM44R6m1F115MPvO99fPwTn+DhRx7h/e97P7/3e/+Bdz344FuW3/T0NOKbGf5mf47fe0+WtLBwk2BpyUKb5P4TFcZ7bP7mthx9JUXFFvztzRke251keFVRS0gu9SW40JfgqwcT9BahmJZ85WCKlZTF5kWfjqKiUA344t0O73+pSsKD5G0rbD1sccuYy+6rLhkvdAZHR1IsZSWb5z1e3Zygb1lxfkOCvjK0VxTfOJDkJ79bZq7NZvuUy5UNNi/sdAiuJjlk8Fi9a4qBGUVbWbFx3me036GjpOibVjy7P8tSRnLDpMeeaY+/vSFF95TDpkWfrTM+fa+k2HO5xv6LHlUHfuv+Aj///Br7Rl2O7Eyxd7RG2oUnbk5z8MQShclxrOIKlpolu3CJr/3M7Ww/fJGuyVFSOoe0Euwa99h6ZZl955fZeOEiGy5d5MK+ER7+dpEdky4HTyzQXbXxqNBz6QkmRrbzE9+t8K5TFU6MpHjkuTL3vzDH7a9cYc+YZmxDmk1TLndeculdUhzZluSeI0UsBEk0Lw2n+PhzRfaPu9xytsIjz5XYftVlZNEjV9V86Y4Md5yu0bFao7MEYzcU2HB+GkENyAHjFGa/Blw0TrNqHOJOXH6WWvvNOLWjrPT9CiL4AJafRvJXwP8IfBcLsJgATiI5DHwImMFnD5IaVG0cD+YHexiYEtx41aVnJcT3lJKSR29Pka4IPv2tFQ6O1Vhts0GEDmam08Zz4C8fypOraDrXFB1TaRz1DIKzCKrAjHHIVbgwDk8fhr97Ano6obcT5hbh8Dj4m4GXwgGEswiWqcmfQoktLGweYmLnHcx37abNq2JVLB599zZGZh1wsriZNoK2YR4/NEC5INl5waPU00fVsRkanWR8Sy+ndnQgLYeOouYv393FzECOscE0Aji+K8n/e28bWsOWWY/+5YC2kqZrvohVXSWr8xy65NFx5Tx2ZQ5ZLWNVFlnpGyIZaD74Yok7D09jFyd55bY9XO202T7nkF6dBrag2c/Mjp9jue9GtIZ/9dPdyKSgq6TJLQX8zU/fw7mRHUNsaFMAACAASURBVOw8M8PjD97M1KYudG4rli/5rZ/KMVIpUFjxuHTDRtpq7ehUmisj7WyZWCRZneHXf/cB/mRbjlq7xTMPWhw86uOg2Tbpcs+rVZ49lOXub55kw9g0D59O8t6jVQ6dKrF9POCjn+ziw6cqIKCwqvitj2Tx59J88HiZ3aOaYkpy2+UaT94DlxMZJm72GXUyLHZrlJJMFmxuv1Al7WkqKYuhSxZb5jz6FgMqScnz+1L8L18vsZKRbJ7z6FlRrGUkg8s+I7M+ZwaTDM/47Jh0KZQUwws+O656tJcDVFpScQQ/+XyR6T6bVzcmODDuce/ZCoOLAWeHEox1O+R8zYEzAb1FRc6FwUWfSwMJNPCxwyXylTCq/4UtKQJf0F9SfPHWHJNtFssZi29utdgzq9k3WiXvazYtBMznLO64VCNd05y9O+DQsx5JV1MshBuinTXNyFVFd1GxkrLoXwvYP1HDdQQTPQk2zMADxytsmfT421uz7JlwOTvk4Pia8V4bW8FdYy5P7s/wzmNl8jXN9mmX/eerCBEw25VhvmCR8uDJGxKs5CXFpMXZfsHPPOexa8plotsmV1UMLvp8/aYsV9OS+89UefLGLNsfmOCXewbYWtU8s1NyrjtFu6/5+p4MP3a0zImdKXwEt4+6pDxFJS25/VyVtazF6aEE9027HB9w2Dbns2HBZ+eES1tZMd7tMNZt89/vSJP2BUkzaT/fn2BowWf3mEvZEby4PUX/qiJfUWBbdF08g2rvpeAFjEx7nOtz+Lt9Fh97sUo1LVnNWIzMefStKiwlOD8o6C1JBDDRY7NzwmMtI3ETkkDCfzyU56cOl9g25aGkYDGR5Gy3xaErLp0lxWfvz5AJBC9tTbH/So2EEmyc95jutPnYsTKVlGSmYHPDeI0dEz6/81CGTz5fYa5gM75B8BPfKTO4EPD/3JXn3jNhO95yMcQMntrRRl4O8tLBEQbHK6h0J/3lNJMjPdipYdyEQ/vkaSQJxrbsoL2kWCg4dE/WKPc8wPj2YS7v7GHD6AkEknL/vVgrS1huhcO37mfD7PMkqieBjdj+OLCCxVFgF9X0zyP8vUiWCHGjlwEbN/nPsYLDBM6dSOURIuS2mZdHwu3g5B0fo2M2QAabUfYeUNsRbOGVQz9D//iTdM99DZ+fBPYgOU1u/oQZOwbMWHSREI8ngU7gBjOZjybkXcA+4CUCfhGLJ4ABFB9HsZfi4I3YlSr5+cvodCcDS5JLgxmynmJwwSftw65JF9+W3HayQqGs2TXm0rOm6CoGLOUlmQooS/MvH1+jnAlt4fS2BPkK9M6uMtebY6rNoafo4yYlqZrm8+9N0j8D7zjj8uTt7YxMBxSKAc/vTnO1P8HWCY/OpRqWtvnjH++nq6xJpvpJL8wztXUnntVHbnUMwVlCLN0Alq4S8CCKB5A8aybSQ8CNoEfx+TkkU0Ab/RdqwD2U+raQSB6BTBJm3BBQXl6G5ZVwLHx6HNgKbADOm7HeMTOJAn77Lka3tPPnDxa4cdSlMHaKIN3Nt+7uYXDWp/YTK4izaa702j/clWeAVCrFY1/9Kvfeey979+3jo488/PYtu+tmPBTr7kUad/Trbwh1465av4GMRPN943WXwq+TzfXe8z2yVOvuUd+QiGbMUv2gjPXK0K9XqcaSSX1lSqz72fqF0nU/j1YxoYG3iaehrpfmdfQSYZxeq/kQ19Pi98A7xKkw1yXEvH5Iub7OusH1c33td/FVvcZt9PoUxDWr3a+96SSuLdx61ejXsTzRyHO9Ga1bE0e85u9fq01F4zAC0cDCfa9OJF5Ld7G9gfVtK1iHeYuVf32GTfgw1tljtOommvvAG7GLN2iJr5mIfj1dvoaJrO9/r+djrt9M4nWN3n6jXUVcm5wWr6206+pP8w+T2MrztX7x+v6kjpuM2Yb4PpSor1fP9Rg7/caNR79G99Sv0fbN27HNiejXcXHi+9S3er3/RXNG4nu6Yv1GhtvXNpr/j703DZIkue47f+5x5p11X31WX9Pdc2EGc2BwDUASJCABkECQBAWslgsI2l1+EW3NRJFLyZYijTKJlyjJRIPJltSKyzUuwVNcCIAIkiAxAIjBDObu6fusuyor7ysu9/0QUVWZWVU93QMMAEr5H2ubrMwID/fnz597Rsb7ueYuz7+TyL7/Z2LfC75eHIngdg3a+YVY3LXf3emlxa44+do13X2ppO5Kv0YE1HffJftWRO+3MrijNdl3xWMbW3rxxZfwPQ8n5X7brnm3E5TQt3OAb6/EXY5B8QZeXNzNRcW+K9l9vznou72muINW67uxkn6Nc8VdNHyf0vUeixj9OoLDHdXldmfp17t8es0J6e59S+xeQIm7M4jY55jBiewOe37/mLFrlS7uDLX3LQ0A+7TuW0DwE/v0wRsV//S3xA538uXs9ZtevNZ38TucKMTrOfY7Oc/cSbtvt9h4rRAl3piGirvy9O+kVb9DnSzuchyK76ZW69cZQL65un7HF8/Pv/giP/NPf4aXX36Zr37lKzzz7LMMNdRQQw011FBDDTXUd6O+44vnD37gA/znP/5jpiYn+a+f/zwf/OAHh70y1FBDDTXUUEMNNdRw8Tyot7ztHXzsY3+fxx97DID5o0f5yEc/yo/86I8Oe2aooYYaaqihhhpqqOHieUv/84//OK5r8y9+4ef73v/VX/xFnvn6M3zmc597Q6+v0XS3Hm6JEiRW8mcvvih+sYVU0n27z4eDz6f2Jowk2J3tcsQW1oudBB8SfNBAGVvHbWGBNP14pV5IjtjCafUku2hEjIcRuz9r9dR5GxFEfFzfM7diJ4dERPslQ8QoOp0kJohwINmw59gY0SP6ntPvS6cQYjutYjvZYQuzRoJD68ExbZURCYGH6EPx7JQfl9ftSXjcxhL2oop2tQt2sESD7+8YSfc9oKz3OU73f771OKxiu1+6YgcftH10D8ZNbNthKyluvzoPpIuKHfv1P6DYXx96fHtQQmuIdF/SphKxf/Wiu7aPD2J/UaK/+L48nz4MX8xz3CbpJSNMbI0yPWjRnTEYX1vRmyorEryX14M52zk5weJt2TXBgHUH7Bj7se6rdyhE39gctPzWGNjOaOrJ0Nrx7bjz/YHURQE9iMf+spXoz+USyTjViH0StTRS94xDvYOqFNtozX4/Vn3owJ2PZB/GrbddeuDaehtHJwbdKumZoHfoK0Dt4Kx6n6XuxcX1+SA9SZh7SPVURSiNiJIYSA+8Su/4KD2Itt4k4O069cwFvef6W38k56jkYF/suO9eycGiJ16IcCfuqa3RHCXXUf2J6Fv9teX36J0+VKIX6zXgD7rHtvTbPsbX9c4hA8lmYgeS1u8p/faP6EXs7RGDe8tOcJN7BZlwEB2oe+qq2TVh9D9rrfeIE7pnTMbRQsdg1IEIvdXKsHdW78lYVT29GPZYoRdZt4VHS/BnQrAXWk3vkT6923sHR81gjNfsnSDeE1+Q20n3WzbYjoNbfSQEe+Zd6oG5R4h9s2DUQHzyEz/YQtJtI1i34kuyDukd99LfsY3oif29iD7Rt9rZ7af9tdlJ2O4f8PTYLhq4Tr+Nt8aG3qPVYiDn9DuCqvvs5z/Pf/jUp3jm6acRe2Rqf/RjH+V9730vH//EJ0i539okwtXVVTaezvHypMUjiwHChpU3e0RNk4eveqQDzacfzjDR1oRmnOxfyRicLEW4EZgaTi375DzFZFMx0VDcGrdYSxmsjgsmGhoMwVwpYrSt6VoGV+cMJhYkhxYjjqyGXD9ggdKsjJvcmDD5nnMdIhPW8yaRCaEVf6d59ZBFpiN5+HqXkys+lZxJ25EIBF3b4O3nO3zlnhRLExbjtZAXT7o8+XyTuVKXv7w/y/xqwOcfzpINQ8ppk1NLPic2Qpp5QSVjkgk0JxcDXD/k4hGXkaYiykjGWooTiz6tlKTlCP7D2/M8uBxwZLVLdmMhdtdUGjN4lZH6BIXSl5Es87kn38Wp5YB0eQ2pBJH0cRtXCNMzTN/6OrnNMusHD1CodXC8kGyljNu9QHGjSb5yBaepWDs0zpkrm0zffAo/f5zU5nPUxw8xe/FVrt4zzezFK6jsCIVWSGFzk9mq4oElxaHlGrmGR3U0RbG0gdmskY5izNS9i00C22S8pHjlbIr7v/oSqc6FJJj6SQB0kqB4AmijeRLBdULxOFZ4FakreJnTmF2FEX0OwTJQQ1AjEB/Cz8xjBReJ0UYFoIUgJGKO7uT9aKfIi6dyZDqKhmuwNC1RQnBj1uLd57q85YrHgZIiNOKgcGgjYrweUc1ILh2wybcUWgiKHUUmdEk1ngU2idF6R4BNNB9H8GLynXgWVj3IKfirS9BogD5OjF06AGSAEKEPI/UFlu55kEI5IB0IUqU1vEKRhYMTjHQMVubyjDckhtem2DU4tOrx9dNplkdNuq7gwEqbat6llTH5hfcWMF3Jm690WSsaVLMS3xKkuorLBxwYDxAdg4laxMqUSWgZLM3luTXrcvGAzbFNm4WjM1w8PUs+ytPOpHFDzfSmwt1Y5Nq98xxcbWPZNgpJYT2kOXU/yjlMvZDjN38gjzQFD93wOboecuyVS5iez9cfPsC9V+tML75KsZmhOpHj/EGHjK85UFWMNjTp0KHjmrx0IsOhkuT5UxkeOHcNy19k0j/LM/MZppsRp2+GHF0JWZiwOLwUkG02GGtEjC69Qu3AGXIrL+BlBdLvkl67wRPVFEcX6xSaIZlWB8N0OD9p865LXdqu5NaoydmlgNE6HNmI+MaMw0Q9xrnNVENCQ1DoRqQ9ePqMzVsveExXIr58b5piK6KUNnnyXJtaRuI5Bs8eTzHSjpisRqR8xR89lOFANeJL96UwNHQciRsqRhqKiwccHr3WRQAT1ZBS3uT4esD0ZsT/90iGRtrgCydsIlPw5mseNydNlosWi6MxqikwBJP1iLURg8/fl+KpKYuTjQjPkXzxgMWIr3nzosfZ9YhjGyG+Kfg/H8lgC8Hp1YAj6wFTlYgLxy1Ovxpxa9LixrSFVJq2I7gxaqENQdeU3LfgcWwlpJGWXJuwOL4e8OC1Ll1XkvcUkSE4d8Dh5VMW0ZvqvFTIcvpmyEhHoaTgpUMOha7iTx/McnpF8dT9Gb563GWsFfH0YYepkSZHrgrWR0zeed7DswXnDjpkPIVvS8oZgyeudil4iqfOpHnGTfP9+XVudXM8fj0gFYEdaVwFRzaCZOEgSPuataLJRD0e01/9AUE5dDi8GZIKFbPViNLbOuhVh/NHbL5x2GEzY+AZgqWcyWwzop42mKpHXJ+wMYGrMzYP3vQwtGakpZi9tYxwiyxPZ/BNQaVgEFiSezYUZ28FuJFmPW/y1/caPDPpcN9qQC1l8PANn6tTFsWO4uBGiOcIDpZCnAC6aYP5UkjXlTRcyZeOuzHuby3EjaBxIuDNLyruXfSZXws4vtRhtOJz+ZDLXCnk2pTFjRmTkaZmuhxythQhhODmuMVyzuTRax7jtYiirzi6GpBrK8oFA4kgMODiIZuXDjk8+uIqRrfB+XsOUGwrDKWpZQzGl1bpjhykNjLGlUM2b3rmHEZoEOVmqeYtjr76KlZH8OoDjzF18ybXHjrO6NIyTnoa2RWkOt+gnfkIjcm34jTKCDZp5f9H7O5VhArRTMQ3hfCIxIcwogUEC/zO3/pfuP/S1+hkfpjKgffjVrMIRrh6/48wWlU4nYgwPYvZuYUg5OJjb+XYy5/BCp5FUEVQQ7CKwAQsIs4AEsELwAIxmm4+idE34vlpqggzRSg3gMVkfvERXCPm2y8hqWJ1X8UMz8VzUJTCaq0z4WcJbYvj1xpM3LxIsRkwVuuSqlR47oFxuo4g11JcOGQzXlccWQ2ZqHQoLF7HK47QtSWXZm3uv+aRr7WoFTIUOor5tZBTSwEamGxqHrniM9GIyHU1Mxtdnr0nTceWPHaxy40Zi0JHUSumma1rJsqKv/uxcX7oYpv82gIvPHKEg1fX0UwjafDs4x8n18nx5+98hGPX1jH0eRRvR9ACrrHF7o+Xzm8iEvMYfBWz+yLSW4NqE8KD8fzYLsdw/HoE3XvQzBGKhzC4nMz7DhGPENoH+MLbjzC7GVItGkw1IiyySOlgCZObkxYjiwZWU+Bb4tt/5/nGzVv82I/9GJ/97Gf3XDgDjI+N8c9/7uf40R/9e29IHUJD0DEEB+oRoQGNMYU2NONNhZKwkDOxIk3LkQSmwPE14y2FZwokmmJbk+soJmsaM9K0bYlQAt+Eti1puxLX18yVNL4F1YzEqJiYEWQ6Gt+M2x0YgrYtGKtHRMmN2ZZroAQYGippScoDJ9CM1eLrayDb0Rga7EBTyhpU0xKh4kV+qqsoVkNKOQOh4msIqalkJHaoObke4Siou/HCOOXFAPpmWmJGmrGmQgFGoHADRWgInh23cLwEAq/C+G654SBoU7zZQFJDUmV11MYKNIbXTe6WKoTuoE2LYn0Zy6sQJswq2wuxugGCgGKjhOVtYLfbZDsKxwswwk0wLIygih1qzKBO15EYXotcN/4K6LY9xso+xxcD3G6I64UoU4AOkJGH0w3JdCOO3fCIZHwno5OS5Kp1BD4xQMtIFptm8n8XMNFMbt+TFFEdsJJAqhHUk2/FK/G3UFkgskYBOxmMfnJfqRUPTCeLMl0CQ2AqCExBOSvjLyeu5NhSyKH1ECsCU0HK16S6ESkvvu1VzRi4XrKwBiIn3VPnKKmzRjHbczfChWobWh1Yr4GKkroBpJL2hEg6SMr4jo3pB1hRfCdYWzYG4DsWzawbYx2VotCMGC+FbOYlm1mDjpNstpJs5vL5OZu1osFI4tOeKWnbAiO5ixSmNB1L4JuCRlrScU0auRT1tEE5axDZadrZPOVCntDNggDX09ghyMCjlcuRbQbkOwrPNlAyT+CMo+wRlICL0xb1lODIeki2q3DbTUATCkh34w0MiqU2QgnKuXh8z26q+E6e5SA1bBYdkBaREW8qJIg4cCFM+l9QqCZ3/gyBFYJUESOlEKE6BK6L4VcAD5SP2W1w9oqH2wlwuwGOF3BwMyISYCW/1HiWxA40EyXNWCuipQyEhnRHU+hqbKXxrXijobYryHY0bqDZzBqxXSOwAzAUhAaUshKpNFaySUctJfFNWBizaLoSzxT4hsQK43tVYw2FlpBtazK+xlBx3VbzBl1LsJCVVFyJFWlCQ+BZgvWs3L7zGZiCVkqwlDdZcCWG0jQdQcWOp5ecp5mrKpxQE0l4JSfZSMvYzz1FylfJrWlBy5WEyYZPAmg4gk5yMyHla7Idtb2xVMqP/SI0YLwev9mxBUujBq3JiLWiRGoYaUYEFtRTEt8UXJi2iEyTRlqykjMIzHjjGMNROIHGiTSup+J22fFmTx1HYmiYqUcoIeiYguuBy5zdpekIZmqKTBDbwgkVvimxgmSUhorAEKR8jRKC8gR4psANNDlfY4Wa7rjGMwWbWUk5E9fTjARl18A3BG1LkPFjfnfTldTTkkIrtoWhwGl1iSwXLeL+6zoSJ9JM1RRmBGlPoYHVguDZgokdgusJ0p6m7sa+Ffc7ZLoKx9eMdeP5sJPYoGUKUqFGJhuvZGVEpquYqkZkO5pcU+F247uyZgSBEZ/bcQR2CMdWQgIj7i+l418trVAzVwpxgnjjCz+Z48wIylmDSlqCMJChh29LDBUjKyMz/uE8stJIBY2UJF1vgbBAmiAEmVoNcKnnJ5Bhi1Y+DYSkfYUSKcAmsqbxU7Mgskl5MwhdR6DQ5IA0kEGLWYSuAoob41OAJLQP46VPoBgBxmhl57C7AdqwiezC9u8KrfwI6dZ1BI1kobuBoNLzG3g+ieFNdpjOWzG6Fv/tmJBxks+Wk1i0nhzfRXALwSZmcA2pl+MbI8rH8JuMlgNSnibVjnCbNdK1Lvm6jwy6NNKSri2wopg3byhwfcg3Qqx2GyPZKCcEzOSGuxPGX9iyHUWxrrAUHFzXTNYitBRk25p0O6KejsdtsRp/+QtNSTftMF5VOJ7i5cMWoe3itBrUchbgoBkDXGr5OUKrwPL4RDLXOWjGk9flpP2byT3gIlpkEFQwwnWIQui0k/lOg9+Nz6t2gDyQRpvjyedG8suEi5ZpSjkjsYEmMiB0c2hpkG0pWimBvSpxQoWp9Ld38dxqtXjHO97Bv//3v8780aO3PfaTn/gEpc0Sf/KZz3zL67G9AZLYfTt+/zP2eGvgZ8T9SGli14d692d7/DbS+5ne66czwfaPNXdQ474P7gYxZ+nbnGDs/E4i9F6F9P9UJfZkk+5QgPW+jdkDqNbD1b3tIzqy/2dBvc+DPN/UU093crrWu82sdyPA9sLz3RHCbo/HavZl8O4qXt+RO/TWTe5zWanvbDTty15mN7VODzRrP8PLvcbUYBWTCwv9WoOlp2xjD1PqPVxB77amHti4cr9HD+6GlqXvLlrtFH+HJ/aa2dD9fS16Y+hecXWPZwJ722wNfnYncWufOu5rU733kBms42vhB8XrsPPuOLWbCCb2Get35AL6tSsg9nhHEI9N6+6fcdzt4ndrEPEajvkaReg7d4vd7+q77Lj9JuRdfahvc+prgS7vEqrcV6Tco8y93hO75r+d5wZvs/nEfnjH2+Ds9B5PpYg9/P9Omy8GH/t5DduInb2h79CnXyfsu6ed5rdz8fzDH/kIP/KjH+GHfvBDd3T8H//RH/HQQw+zsryEYRgMNdRQQw011FBDDTXUd1LftjvPv/6pT1HaKPFL//Jf3vE5B+bm+Oc//3M8+Mgjw54aaqihhhpqqKGGGuq/j8Xz5/70T/mpn/5pPvtf7v4RjP/1H/5DJsbG+MVf+ZVhbw011FBDDTXUUEMN9d/24nmjVOJ/+OhHuXT+PGNjY6+rjL/4whf45V/6JTZKpWGPDTXUUEMNNdRQQw313+7i+W+//wP82q/9G6anp7+pcn7rt36LD3yLdh/MdBVHSgHj9YiJekS5niLXAivSrBVMploRaq7L1SnJpXELz9GU8gbT1QgriDOxD2yEHF0NmKlEzK/7TOaapFsxGaPYVCyOm+TaiqlKRM0y6NiCVw/YdFzItzRSC2p5wWhTITXU0gareYPIgPWCQaGlqOQ1rh+xPGrScQW5dnJsymCkFeF2NWcWfU6s+ExXQuZXfbqOQEuD+bWAlKe4Z8nj2FLI0fWAq7M2Y9UYl2eHMYGjkZIoQ7KeN2i5As+EbFcRWpK2I1kaMzm5GTBRC0nX6rTGJvBG8ljeVcDFCq4BK0Cb+6+v0UxLlGWCjhCRQGNidjeADcBn4uaLWI1b2K1LGGEV6KLcEWCD5sQEZ85forjxCpoSVv0VBAtM3noawSJzV15E6jqY0HENVmaKiKCDDAN826BcdBnZqGM162jDJrQMnPJ17Pp1JpZXka1l5i/fBC1RjKPIA3VgmpBHgRlgDcgkJI0ihniF0ngaUEgPDHWFGCMEfvb7AQ9DX8T2LrFw6G1EnAUifPcoigJBZhoQXDrocmQ9REvIdDW5tualIw7TlTjTXAFXZ0w285KOI1gbiV9v5gx8EyIpqGQNxsoNnM0lFEdZm/8QcA9x6sIkgueAApydhw+cgQ+chdECHB0lxtMtJMeXgAowieYisMD01S+RrbyM4Qc8e/YQVmOVw4s1MrU6k6U2zZRAdhqYfhvpdxirKw6WAtK+ojbiUqyUmFmr8TPPNHnwWgcrUGzmTWbLIV1T0LEEZxY8TlxRHCqF1LISy1eEUrA6YrKel8xWQupZydUDFtenTJbHbfKNAM8W3Jg2aY86zF3/GlbrVYqVDuNrKxj6edKVL2E1LpEvLfLQVR+vqCg2I6oZAy1MgmyW2c2QbNNH4SJDj9mFVe670WWy1GS1YOD4mq8fdykVJPMrHkQdji+FREYWCJE64omLHSYaMVng2kGb+eUWtZxEaIHh+0haOIECWqTLy9jNMpI2KqhgtKpEIsKur3FwzeP9z5XItzWupzm76FHPSI4t+0xWIuwAFgqC85MWF+cMxhsRr8yZ1DKSexYCGilBYMZM6bYjyXmKdNvn8FKVSMBsJcbblbMxSWKuGmGHGukrlICmK9gsxISPe296jDYiOlaczDtVjSjnYkrFgXJIxZUcK0XYYUyryHcUrq+YrEfMlgOkgkuTFuenHKounGpGjLQVWsA9GwFnl32uTJiMNCNujpmg4MnFgGJHYUQxtaRUNHnklRr50joz5ZCpekQtbWBF4GcUVqgZaUdIramlJKWcJJPQMEoFg+vTNq1UTC5aGJXgC9bLWeZXIhSwkTcwQ5hsRLie5ljJxwoUpxZ9DlYjurbg7HJAftmk0Im4/0aAGcJGNo75dqRZHDFIBQoz0ixNmNiBwgrhXKfAWFMx1owYaUfkugo03Jw0WZg0Wc9Jbo7bVFOSTEdxa8Jk8oZkpBXTaK6PW9RTksJli2pO8uqkhQYKI92Yi+xEzFZCTq4G5FuK+TWfVBATSjIdxXg9YnXEQNkZzh9OU2goCu0I21O0bEEkYaMoabmC1VHBwXXNuxZ9ZioB9y94vDBnc3bRZ6IW4ZsxWaFrSTquwAk11YzkpUM2UsPRzYCuI5ipRYw2Qry6zVQtYqLssZ6T2JUlAkMz1opxcoEhKDYU63mDW5MmtXTsj2aCfG25gmrWIDQEz827vHDEoZqR2KHi1qSJlnDPqo8IPJST49K0RbrRJrO2yGglRoEKpQksOLHoEWVGMVQN2SxT3CghdZPa9BT5LoTpcaYvX0XqNjIMcb1VIIXTvUy2dJEgdQIwcdovI9Q14AbKzlEbPwmM4GfyxGQjnydefgnNHIZnsV50kPgoRpi+8de4tSvcODKKUJrQmCVwppm99HwSbw8ARcBGkyOS8yjGkFxFsITmXmAW2CJMrLHNoK5WYaWc1GGOHfa0Eb8n4+MV9wARggBDVxEywqovMrewSDdjYtDg6rEirUwahEmuHZH2NMX1dR44sH32eQAAIABJREFUf52pUoNQhkSig0mZbxx3mVpc5uErZTw7ZtZfn7ToWnEM6jiCC4ctbD8mtJw7Y2D6CmUYTFUiTi352KHG9SKsUFBLC8r5mOz1v32jhVurYIhVTp17EeXkEARAi/mLf06kAk5eLyH1InAIwYVk7jLR3IMSx4B1BBcw9AqakWQunE7st7kNmFIiC3TQ3AKuYaiLxB/aKHmcWyePUJoeI9tVCK05uB5QrAZYIWjDZGncpJqO40jLMXjqkPPGLZ611nzik59kdnaGj330m0fO/cB73sPMzAz/5t/9u2+6rGxHc+96yFRVMVGJ2KylyTfjYLhakEy3NeqAxyvTBl+dsujYkrW8wUw5xAo02a7i8GrIsWWfmc2IY8sB89kGuQakAsVIPeLmpEWxFTFbjti0DDqO4IVDDh1HUGxGmBHUCjDeUhgKyhmDpYJJYApWRkyKTUWpoEn7isUxk7a7hSbSVNKSYisi7SkevO5zz62AA5sR99zyaackCJOTSwGpbsR9N31O3Qo4sRJw8YDNVDni+EqAHcaYu1raIDIN1osGrZSkY0myXUVkxhi1G+MWj274TFUiMtUKndEpuoUCpn8RcDGDS8TonCZv+sYKzYxJZFkgFDKSgI3hryWBwGdy5evY7WvYrZeRYRXoEDojCFapTk5x7wvnGSk/h2AVp/UcghtMrvwVBjc5dOtZJDW0BV3XZOngCMJrISMfzzUpj6WYWK5itapo0yE0BW79Ck7zClPXlzE6C5x4/jpoA8UkmgIxCugggfMkcDBpSwbBEjCCqZ5mbTQNaIwuGOF54CQg6ObfGy+e1cvY3Ze5MfdeIuOBePGcOoGmQJCdRQvBuWMu84sBkRRkuxH5puaZ4y5z6yGuH4PZLx602SgYtF3B8pjFRsGklDfwTEFoCDZzkvG1Om79ForjrB75WFIXE5hA8ufx4vnUIfi+B+F7H4LxIhwcSdp2BTiWBKB1YBLJC8B1Jhb/kHT9GaQf8jtnjuK0l5m/WiNbqTK13KKekchODbPTxPDaTNRCjqyFpD1NZSzNyNIGB29W+PHPNnjs5TaOr9goGMyWQlqmpG1LHrzicfJcxNHVkGpWYgcQSlgcMVktmhzYjBfVFw85XJ6xuTXlkK/FE/blOZP2qMvM0l9gt19gpNRh7NYSZvQV0vX/jNO8QGHjBm+54OMVNcWGppyRaGESZgocXAvJ1Tw0GYi6HLy0xEOvdphZbrBUNHG6mi/el2KtaHJiwUNEHU4u+D2LZ8X7XmgzWQ3JtxWX5m2OX2lSz5sIJFbXR9DA6UYImqTrt7Bbm/Hi2Stjdsrx4rm1zOGVDv/gv6xTaCvSnub+ax6VnMGpGzG/2fbh6ojBc9MWLx00mapFfO2wSTlrcN/1gHo6ZmcrDS1HkOtGpFoe89cqRCJe9IaGoJSXBJbgUDXCCcAOQcn4i/pa0cD14U2XPcaqiq4dL55nKyGbeROh4fBmSDklePNqgBNqahlJvq1I+5rZesTBUoihNC9NW7wwY1FKCU40E+SnEDyxGvDQgs+5aZvRRsSlSQsBfM9Nj/FOhBnFnJ31UZO3fKFCYW2Ng+shU9UgXjyH4OU0bhgj/KSCWtZgvWCQ8xS+KVgrmlyctWimYkTotTGJ8iQrm3lOLIQgYDVvYoaaiXpIytOcWg2wA8XZax5HKiEdR/LmxYDCTZNiS/PopXjCX8+bzFbiGw23Rk2yocaK4NakhROCEwq+1B5hvKkYq0eMtiLyHY1EcHXK5vqUxWrR5OqURTUtyXQ1N6Ytpi4JxloRoSE4N2FRzUiKr1hs5iXPztgoBMVxD08KlKuY2wy555bPSFNxYiUg5StSvo4Xz5WIpQkTbed48VSasbqi2IiRe23bIJTxfNJKCRbGDI6taJ686TG3GfHwNY+nDjk8eN1jqhoRWAIloWvHyEA30FSyBl874mAozZm1kLYDc5WQiXpEt+owXY6Y3PBYykrs+k26pmasoTDDeGyPNBQrhcQG2XjBZUXx4rqRlpTzksCAL590efoeN44LPlyattACzi75GKGPsvOcm7XJVlvkN24wvtZAECAU+Jbk9NUuYWYcqSoYzU1GV9YQqkF1aoZiWxOlx5m9dRFJCyMMcZtLQAa7e55s9RxB5ky8eO58A6EvAJdQdo7K6GlgHC9bRBACPu989uvAQQzPYrnoIPCJxChTy1/EbVzgyj2jCK0JzTlCd46Zm1+LF3IcBkaIsWsFQuM4EeNILiK4QcRDSZyeTBbFy8kCOYJ6GTbWk8XzgWQlE7CNK3UMYIGIe5NzAqQug4ww2wscvHSTTsZAUuPiqRFauRxCWhRaikxXM7q8zENfv8L0SoPICAlFC0Nv8tSpNDM3Fnn86Q08R6CFwdUZG8+W24vnF+dtnAAcX/PCIxZOoFCGyXQ54sw1HzvQZDyNFQjqGYONgoEVaT7+l03cahlDL3P2wteJ3HxS9yZHbn6aSAWcvbSOoW8Bx5A8k9yoM1A8gDbvSfjWzyXHjBPjYqeIkRjrkN0ijhRAewguIjiPoV4kxgO6KOMk14/Osz47Tq4TIbTg8HLASNnHikBLi1uTFuWsgRnFX/p+Zz71xtE2/tUv/zIvvPACX3nqqW9Zmb/5G7/BmbNn+aEPf5jZmZlvbnHfh3vb+ft2pJl90Uii/72+vebEzgY3e+4fNLATm9iDoLI3OEfsoPK2diYT/Ti73n8xmkvvuQvULtv0XFEygKPZroVgFzdMitsQaAYBZbtBV1u7p+0Axwb/yT1tsvsyYu/fWFRvD9xuV8D+E8U+ZYv+nhvona0d2XZaNFj0YFX32l1tV8f3YZ0GnSXpLU0fFu/22qq7sW3f9FbpEoQS/RSk3qtvIet07NxaCrRJ/+ZYAz6J3I13lAMeuYXUEgPjY6eNcgfxpWVS92RHLLnH+NxzzIoert1WycnOeWIfVzAG+k7exm32ZQ0mY9Ls2QFOJjsTCrErBsnkmG0P27NdW7FA3H7o3cVnYj+qlhC7dgWUPf8GaVy9sUOid/XDds+bsR30fpUaGCdb50m9x8+nIjbpLmTWQL36diwU/e/3jt29otfWa3OgHdtjAk2kxa5zBkOm0TtObheS5F4ds2OPLVswGOIG6t/XbtET38Ud/Dy9C2E22J9ix0fF7vG8xx22feOA3CNIyl0d3FOU3MsB+lFnGnF7pF2fsW/HKpP7BGixbYM7wdKJXWXuxYy7Ay5hz2JD7Hl8L7L0djbYDVSUPXP7nvPRHtbQezRyrxlWyf3WA2JnZIjeudXYw0aS/SGKewVyyV7cxG3k78CaqW+MDcypb8jieWl5mV/+pV+itL7+LS23WCjw+7//Bzz62GMs3Ly57yYrQw011FBDDTXUUEMN9UboW/7YRqvV4pFHHuV3f/d335AKP/H4Y3zoBz/MT/6TfzLsvaGGGmqooYYaaqih/uYunsMw5C1vfzs/8tG/x/e8611vWKX/7b/+Vf7T//WfKA3pG0MNNdRQQw011FBD/U1dPP/jn/xJThw7zr/+xV98wyv++3/0hzzy6KPDHhxqqKGGGmqooYYa6m/e4vmLX/oSv/vpT/MHv/fpb0vF3/HWt/Ku7/s+/qdP/IO7PlfJneQsMwQpNb4pY3SbY+CZ0PINCg3FqK/xDUGxqeLMSwFtW+BZcUZy0xFx1njDxDehZUu6jkAqjZICx9eMNiMqKRlnGLuSSsZAmWD7kPIUN0ZNpIZQCtKeYqoakfIVo1Vop8R2QoihNE6oKbYVQmiE0jjdgLYrMH2fbDvE6nooKWi7cfZLuq0IbEGuHTJdDjFCD2UI7EhTaEWEyTP4RqQJzPj6dqhouJK1nEHNkdRzceKFUAFWq5E8Wh9jvJQsABbgok2bSMZuVXcdvJSFnykMWN9HU0HTQNAEFEZYBwRuYw1BIznO2j5DkwEMIrMAmHRMgVCaTDsEYbCWsTG0wPU0oWWjpIFnS4wgQtl5QLAxXUAj0LaLl88QpnLE2cqSzsgIzWIajQU4KEaIzCkiYxQwSHcqgI9MkHwxdghkd6FnCKXJ1zYQYgPwkGEtLk/E/VBoKizfp9iMaDkSJ4TpzQANrBRMlAAnUERGjLIzVJwxZ4WafEthh5qRRkQjHbN3lJXCbQUEqQIaBy2KwHRsNsuEdApcBza7EERsJUhq2oltTeLMZpsY2aOALsoQHKw20JgYYQW0Ah1jxUI7zXreRUmDTCeinJUYkUYqgbbTaGniW3EegmdLxuoRLVeQ8uP6RxJ8Q6BE7OuNlERoiGxBrhtn4LdtwfRGk9lSB9dXEIXkNtcYX1/EaS0CVaCMDG4hxCbQIUYQNUFXMaKQTAPMdoNivRUnqoU+rqdZHHfjpCEB6Ih6WiLCgGxXYUQhc+sVsi0f3xL4VowXQysgRPpN7EadYr1DutlgvNRARAHZZhPpV5NUFS8hyzTQtBF4QB0rbCJ0CdOrAQ3szgYyrJCqbzBSa5Cu18h3Igg8EJBvK6bKIZlKyNhaSLGpyFQUuU5M5vFNQWAIPFOwkTZwA01kCIywwVilTqEZkW8rCi2NVJp0V5FvR0xVQsZrEdlu/FlgEvdtGJMfqhmJEoJcJ8Iz44TQVKjJdTT5WkQkBaGEjh27tRkSUybqEcW2IlONGKuEGFojtUbq2H/HayGRFKS9OHJkOnEc3MwaBIYg2w5AK7RhIEMfJ4gpQwD5pibtKVq2ZLFo0nYE1ZSkawoCCRtjOylSVVeS62gMKyYR2T7YfjyWPFtQaMd+KDXIyMcKArIdlcQYqKcT/FZCm4hkPFeku5qRliISsJmRaA1mqMh2FamuppySaBG3zw40dqgIjDiWSq3xjdgOkQHVtEQLCMy43pmuopaReIYg11GMVkLGGxGyIUmFmiI+RghWEJOWMs0udhDPAXHyrWak5rGZMRmrBbTtGFFoRJD2Fakgtp/Q4BhR3G9JblvXkowlNJFAxvOMbwpKOYONnMQONSkvLiOS8Vw12lC4ftzOtK8xPR/Tq5HrBnG86rZjiJoBhgapwDeg4QjqqRiPmPY0htSUcgYNV1J3DfKd2J6ZjsIIA4qViNFGRKod4OUyaNNipBkhlAIEtZSNMlNoGfsEQGTEsThyU0jVRJvZGEsqBPW0hcAHQkTkxzGUUSIxEuMrg1riRV3AB7oItYrbqgAWpre5PS957ij1wijgMVKrAC0kZaAFKLItRStlgTSQYTuJsZkk/mZRjCAwEbqGIErisIMgRDFKrTDWE5sPJ/8vEGPsHMAhtA4TkzdETyy3gDIxRcIAAoSuAjGZxAw1GsFItYbZ7SCiJplahXR1AyFLCMrIoIFUGhln83FwZSWOsSog1eqgZYyZWylKSlmDatYg21K0nTieFyqaji2ppmJMYmTEa6ZQgmez7YueLWinJdqwUWTQSGTYQdBO5iKFFaxh+gtADcUmkAUsNHMIOqDKSZ/YxMjZFpCP5+iUDRNpME1gCqE7iZ22EH+Z5DoeEMfTliPp2JLIgHraoOUacU2EZiMbJ09akSY0BA+XA4yf/dmf/dlvdiG7sLjI+977Pv7yi1983RuhvB79rfe9j5/6qZ/i9OnTzM/P39E5q6ur1L+SowsUWgo70nzjXhOza3B90uTiuEUtB5Wuy98+51MI4cKYyftfbnNoPWB5zMS3JIaGm7MWG0WTA6WQJZ3hr0462EqQ0XFwyXYUh9dD0hr+5N40J8ohGPDyYZt8VxFoybG1kH/8vQXetejzwpzN33mhxePnu4w2IhAm547ZzJZi5F2hqVCGYKQdUS2a3H+lzch6ha89NMabXlhkrCHIbK7TKozw/EmHUwtdDM/g+kmDtz1d4b4bEXajxPmTY4QSnrjo0chKJiqKxWkLrTXTdcWBUsi5oza/8USWayMGrbmQ7/+az/jyZVL1JWojc+QrX6KTfzfaPILpnweO0B1/C2sTDqNVn+cfnUQLB9wZ8qXzCG6hOYTkCoKX4oGKFy+egzgAZauLSG4kg8RMBkWFiEeQ1PAK34votvnGffMc2IiYXW1hhIr/531zPHKxS74F9dEcbqvJ8twEY2t1wvwhrNZNfu8D7+aBlxcIR4+wevgAhlkkU/0KUGbxkb/PxcNHOXrlPAJByHvwxt9DYM9hdy4yUj+PoIoZPp8Ep0Wgjt3562TxfADNKaZKX8dQTwMNLL9FJE4R5Q4ghMFMWZGvlJmqm/zpm7KcXvR55IqHlvCHb81x/y0PgM2iyf1XfOrpmLVras3pJZ+0p7n3ps+1w0UOX7uIN3aSkaUmjekC6UqN0H0nMjyFKL4Ij56AJx6DTAZ+9S/AqEMpD1xPFnRbi+ciMVsjTYzsC2hM/ACPv3QdGXWwvFeIzEMgBFEqR5At8Nn3TnL6WshYTfPHb81y702fQlNjmhkwHDbGLNIdxeKMw6nFgJfmHY6tB2Q8jWdLNgoG2a5mZdRgcdLi+HLA+bMu916MMW0XD9j88B9c4E1XOqhUhvG1DWZvfoYjt75Cuv55BC8Ci1jdVzH0KnAeqCBwkHqNxuwjuPUUx59/jrlyA60EZtcjLUf5p++f5INfu462LMyww9OPn2D+/HXITzKxtskTzzxPrpuhMp4hstN89b4UZy8u43T/GqM9Qqayxsyqz9jyTU5criMin/GVDdz2c4TpY1idL2K3FxIcUhuJheA8dqAw+GvsjoHBK7jNBUxvnUL5OjOLmrH1G2T0NJmNZerTY7ha8z2XPX7giseHnu1ydC3gQEdzfCUgMAXXJk1yHc3Xjjn86YzN3321zVTNY2T5qxy+4VFUY+Q8OLAZL3gdpXnkkseJ9ZAHr/p0XcGZhQDfEqS6XfJVn6ceyHN5xmasHaM3mwmD17ckj13t8sQ1j6tTJl1b8tIRi4P1gPmlkPsWfI5vhmSV5p23Ar7vXIdS3qDpCNIBnFz2OVIN8WxJ1lekfDi6FvAnD2fYyBscXw24/0oDq1UF08XwuwSZHB1H0HUkB9c1p5YDnjrp8v/e6zLfUHzhuAsaXAUvPqmZuhHz8j93Os2Z9QjvcMCpy5qziz7TlYjVUZN6zuSt5zqkPahmJSfO38T1IsJ0moUxEzvS/Pm9NsfXItbzBuW8wWreZKyteOC6x1hbsVQwuDRjx18GI5hqKabams8csXnPpS5zlRBTa5SAW6MW9y/4dBzJzTGLQ5shhXbE7z+U4cGlgLWCiRsqRrqaLz9gcXhDce9Nj0eXA06tBSxHDkIKHh8rcebLJlOVkHbK4Oyrt/CyOeZXIoQURCLkzCub/Nd3T/Lep8qcn8/w1TMuh0oRqVAz3oiYK0c0XMnG6ZBiSTJZVuTaioVJi/dc6nJoI+TWhEm2q7k8a/HUiRTPHTN52wWPuXLEetFkqhGjCYu+5uFLPrmOYnHS4vGvL5ArP01GTpNtrOE0Glw7foTQFKwXDGwFF6csyhmTzZzBWy51yHXh6gnJ81MOBoLVUZP7Fn0OboacXgjIVWucXpXML/kUy3XWDh4kHZpI02D++jrSb/DUu89wZBmiVI6v3Z/l9PUunZRJpnyZ5oHHyGw+hZd7mG6+QGgKXj2W4fiVryFoEzGJGZTxnTcTOfPoyMHpXECwQIzwXEJQwwhWyTY0gjRO5+VkUa1YOfBJrh8+ztziOQ4sLmHol5BcBm6gOURGn+XG4QKTpTZu4xkEqSTGShTHibgPkzqGug6kEFwGDiMwCYzH+MbZxzm8/GdACsXfSbCpR4HHkjh9isbkO3Gbf5EslNMQVoBRJFVi1N0sUELqV5E0gRGkewi7fpODNzdwmxFW9wrja6uMlc5hqi8juY700oTZo1jdELtzk7e8fAFD1dEqR77cIMyOcfFIiv/7bS6BaVIfNbj3ms/1aYtGRnL/KwGVnMkrb7ZxGpqUpzEVXJm1MDQcXAtpZCTaEFw85XD8WgfhGTGzO2hjcoF4T4iQlHcB13sKuIjgEnA2uVHycSTnkPrV5MvOIeAcgjqK9yL4Ahw5CA8cARP08qNI/RQwmtz88oF3AteANSLzCZ6/7wwbRYOVgslMXfHiMQcpJOP1kMDQ/Mp7CpyuKO6/2uWVIzYfPNf95mkbQRBw7/338+lP/x4nT5z4tt42Nw2Dv/ziFzl95gybGxuY5p01Z5sWJPZ8dwfCltz9GwSb9FE+9A5ibpuqsguDsvuV7nlhDLDohNB9yDIx8KIXfTd4jOg9aAB9JrY4LELsxl4NIo562iJEf1v7+DO6r+I9lRGI12Rk9SLvtkrdzWraD/q1fc9J7HFcn5EESvSiwsQAxkkkmK+9sUD7o30kvVy22Nb9bYovqLd9Zq8+28Yk9pkvLkPowb4dwO/0IYj2QPPsgT7cD3643Q/idqNG9PV37Ec9duvBg8nBbr6dK/TYUAQgZH+R/X2z9VoNtGHAZ1S/30rR72V60E5h71gTe6IJt3GCeqePbg9/1NtXFD31FMRYRhH1u+tO18fsskF4m9j21/iYoOdTQXwzqg97Oehzuqc3dw2s3ai8vXxoyyy9/ip0P7Cx38/FPgE4KVbv9K0QYgcbxY6tY1CV6C97DyTatn31gMvsE0T0YP+K1xj/Yseftoa7FFvzhNips6Bv7O7p870EMZJfGHs4fGJP24memL7jG1qIPcK33tMEvXFHsLf7arF/BNd7zEm3i5QMxAgxEGt0H1pP9GFdRd98slfUEuhBfuPWuOxhxYpep9YD89Vtov1ef2nd4wiKXbXSYscJdqPj9nNGMTAT6tvUbQ8W4Z7xfK/4JBARCGMH5Sf64IMD2NCwp1QldmZdwb5tEL1eInR/zOiJO/qukKqD7dd7eqUeZP9q9lnk7M0j3cFX6j5b6D3s/E0/tvHDH/kIP/GP/hHf/73f8x157mR2Zoaf/umf5uOf/OTwIZyhhhpqqKGGGmqood5QfVN3nv/3f/bP+Mbzz/NHf/AH39FG/NRP/iRH5+e5fPkyJ77Nd7+HGmqooYYaaqihhvrvR6/7zvPnPv95fvu3f5vrly9/VzTkz/7sz3j4kUdot9vDXh1qqKGGGmqooYYa6rtn8dxut/nwhz/MM08/jWEY3xUNOTY/z69/6lM8/ra3DXt1qKGGGmqooYYaaqjvjsWz7/s8+e5386u/9mtMTU5+VzXmYx/5CAdmZ/nN//gfX/NY3ZOIEr8RPyLeJc5nrSW5AFprfN37XLvoz6NRcSFax+CYUCcPmuuk8OREX4j+B9CV2H6Uv5oc2k4SGbQWaAVKaTwNajBxp+9vjVLJdYKtGiaELUSMWtE7x6I1WseALzGQ1KbV1qP+cYUiQKFROilPbCUU9LTD10C00+atpInkOqIvQaT3nyK0A3ofzN/5v+o5Um1D1rYap7WIr6V2zIAWqGZvB9OX1aPjyqC8nU5XIko+S/px23pbL7fqptCE2/Xeeb1Vaw9l7JE4JkB7QLhTfl+OhAYV9ruKSuq75R16y6xqy6ZJcpLWEMZ20ElDt23U6zC6J0Fm28NjLF38tyI06kmbeuot90uE0+iArcERXyLYItppVJTUNUys1ZMERM840giUjt/31U77I72VdKNAxfXrd3i1RxQTO++L/jGaOCEA5a03Dbbtte1bW0ksYrc39maJaSN5P9TEgYGd+ib+Opiko7eTWQaco+9KW1lo8XhDbPV9XDHFjh1VkrSoErhi/3V0n513PhOx/2x3m+7rXgUEW7bvSdjaNk1Pk9RWPlZATxv6U2x2pSENuFMwmGsVJf7j6yRrJ/H37QvHoDGADgI/KSvqieN1HXeLp+O415eYxE5Cr0hsqrtbte6JUKo/aUpp3TNXiL75Y6uNbXrqq3aGm97ui63kOEG0lcand8Z7sHVtIXYlgmult/ttOzonDdsaWzqpZ//w70k81Dt93peC2jsedb/XCojnlYHR1zdlAMjdScY7l4+v5em4n8IeH9EJqHQrdm/PL1s+6Pc4ynbsSD6TyVsB6C4DngZxEErKiBgYD3FFtNSIQKP9HrsSokRtK7htzxHxHKSIDA9NhJIaJfrHbm/8FCoZJELT+1+vNfVgTN0a4WInBirZ6DtOyQiNl5wR9c9VKGIM7D7X28LzCrUdZ/ZKuhNCoaNk8u+NgUr3rTl8PTiX6d1p21qDL3Z8bmtox84e19oTaEujzAhh6MTWPlpU2Z0IrrfbuBM/1a42bDtwRI/HicTlZY+9Vc941tulbY1tgdiey+q6t+8EQou7R9V94EMfYnJiin/1L37hu/LbwA99+MM8+eST/MRP/ASWZe36/P/4+Z/nVOcdpNoKLQUdW3L+oIU8VefQRZP3nu/wgy93eP+5Lg9f9vBdwYOrAQdLIWujJhlPU89ItIRqxuCPv9fi1E3F/8/emwdbdpyHfb/uPttd377MvmKAGQzWAUACFEBCFCmCpChStEOFsq2tEpWjRJYSySol/yguxrHkUsWJE8lkUXHslBKFiWRTpkhYIriJAkGCILaZwQCzL2/f7rv7Wbo7f/S59903mAEGEimRVferuvXuO+fcc3r5+us+X/f361LHEpcVH36+TbVteP4h8JsK7UvKHcP+lubcrM+OmqYcG/7t28vcO5+ybznj7TVNpWPY2zZcmPR59IUGX3xbmY/8xTLHFwW71zJm1zQXpj3O7Ql48JUuXz1R4AOffYbWaJVdV6GyeZavvecYB0+9htIFfuPjM3zsyyt8+94qTzx1CZV0iK0HwrI+XeLxU12qTcPZXT7ac3zPz/xIyMMnEzwDZ3eFjMWWlSlBtOaxr2EoJ5ryRpdy/TzKnqR2+D6eeeBuDr+2SbfyDpZ3zlDpGHztIy0sTHhU24bySsuxVelgmUVQBEL+/L/4VQ5/6zRJ6VFU2iIVd6GYo1v8EKRl0ugoKiuSFI9CWoIkozt7O2OJQgswNCmubfJHj+/h2GLKH35ohD2LKSMtw9jSAlIn2KiEbG1yZLNKFozwv/3UPsYSy67LMfXp/ZRq5xm5usbMeglLBS+7gqQNbY9C+wsGgl4NAAAgAElEQVQYHkKyjuZHkCwCO7Dcj+AKDnsDnYn/iQvHHmRq/ixQyQem41w9/H6eenSalUmfuVmfPfNt5ndWsB6MNw31kuT33l/lQ8+1GG1aEk+wfyVjct3w2t6AqZrmzEzAaGz54sMRu5cND37zecLkWzRHJ7h4/BD7X/wTJIuoTCF4HvQcvNyEPQGMj8LF0/Cd14ArQAmj/iHSGiwPIlgk8d7LNx7/NfZeWENwnquHPsD4/FVWDz1AZf0qhhFk3MKWppif9qg2NKdvK5KFium6ZqRl+ePHK/hI5naEPHmizEgCn/txn/ElwTtPNih3YaxpePLRMvNjgrsuJ9w2n7KrlnHsuRd5+OVFjrx6hpH1Fne/+kf4+nmi1ueZmf88fvwXwALwPI7xXIWREYjngCUoRnDXIVh4FUipzzxAsa2YWfgaKqnj66toMcnq7lnesZIxe/UZ/Owc3eo91Ccn2HXxJTxvnGizRiH5M4r1RSrdceJihWtTPve++CLKvIbiNeb3vYvx1W+guMilO3+YxQM7mLnSRPFFgvZzCM6QFg6isheBwzik4TxwFMFfItjI6wEgQfASll34fJ3K+ip+0mRs4TI7u2MszRR55GsvU+4airVV9mxKTh4q4mfw2++osjMx3LOQcu96ypH5lNvOnEPFdbJoF2GrRkGOMlrvkPoeylpGm4bZ+WWSYolDFxcIKCCsYGQzRmCQvs+JKwnNgkR6oBGsjHisHo95+7OadiToRiKnqEgefqXLf/hJxduf0cxPeSxM+Dx2qku1ZVgaUwghmGxqZjY0Xz5e5PyIYqZtaJYVd15O2NE2/MGxIj96psv+uU1W9kzRGRnlcz80jQndYKpdkNwxn7J7VfOZh4pMFRPGNwQ/eq7LXUtudHX8FYunYb2q+Nlnm9x1LeHeswZpYMeaplWQXJ1R7FzNWB9RNCPF+Kpmx8VLfPHvH+e2swlfv7NAO5CcmvXoFBWzmxrfQBbA03sjji2nVDqGb9xWYP9aSiWx1EY97r/Y4VvHfN55WRNYWBnzeG1PSCuQ7FnNOH415rmDEY+c7zLecizticQwvalZrSi0FBxcSrltWXP0SkyQQZhZJhqGU/t8LuyUfOALlrtfXiAOBFopxi9dQFcnmVhYYnTuNQoxePUVjl28QFdO86WHqtx1LWGsYfC0pes7jN+5PQFji4q9SxnNgkNGXpv0KKaGRkly34WYZkFy9FrCC4cj6krxH46E/N3n2+xfTzGe4I7LCZ+/t8RD5zo0ywrpSe586c9Q9mXa1QeorJ9CMcdTjzzIatXjvksxh5ZS3n65y1hiuftqzNte6YCSNH2Pd1xKGG0bZmuaPz5R4pGzXZanfcbqPi/cFnLg7DWe/JG97F/KaBUVaSCJKxWa07s4eqHNMycm+eqDFXZtaPYuZSxNR4wvrVJefxKo8tmPPsrcZMDxCylnbvM5dvI1usEJXnjHMWR0gNPHd/Llx3awoztBtJECB1jd9dOU6y/iuMo+a3t+Dq99FEzAs4//JDuvCkZrX2HX8mk8fR7FGaDM5tgRou4GcfF+zu65k9K+jNnnvoBllEwcQov9eKwgyUij2zBqCk9fxDKBQJOoH8azr6DMMnvXa0i9xNKd/w3V5c8CZ4GAucO/TKF2B8p+B5lZZBYiuAw0gP1YPgIcwKgJWqN3EnXPYrgTqHDm/g8RWIOVUzRn7qCwtojHEoIYQwXJEuDR2Pn3+c4947z80AT7z5eQcUY8eoJTD95GtelxdVeR//PREe6Z0/hG8NjpDpdmfJ7bF7FzM2OsYUh8QdSElw6EfPN4kQPLKVFm2agqpjcMzYJgadxjz2LKv/3xnZy5Yz8re45Smz7A7BWDVg+zuf8fUtz4XN7HtpwHimngPTkW8FxuXx/EYQDncLztuusX1mpwbh6WOwhzKR/4ajL1XyPtGdLCLDIbQ9BA6lF2do+RBYqHznVph47pvDCuyHzJ1R0B77kYc9tcSrVpWBlXLI2pt+Z5/t//9f/Bxtoan/nD/+v71pUeBAGf+vTv84EPfvCG57/0xacGHVT5664A1ds0xeJ1LUEXVOYg717mNhswgj7izQjnpYqVw5wJC0Jbt+mKcXBwLUDn51RmMSJHLRlo5kB3acFL3XuOMpZMOkRepgReYgli91thLUa6+ymdv9ga91rttdybZyZFz6dHTTrEnJYCL9YIa9xbVP5m5aXWvSHnaZQWt/lI3wsCyvSQXwIterg3idBZvwwzKZ0rT3oY6fKOAGkH8HAiv6b/xqgAQer7+cUeW3AzAVK571Ll95buf2uxUiG06HsJRc5VtVLQVVuoNJlphxrMU+rFBiskHek2NMCAFe650maotsYKNeAG00CMdW7K/K9Lh+2ls/eWK30ydV0eACsliRJkUqCl4yVZIVBm6y285bn67DtR3d4H9KhVOq+LROZ10s5c+oRBS4noe8E1kLrKbWeQaHe470GO87dvLy9/lXvRFYkKtt7IczejEWr7233uKZQaUunqVuUv7l0lMBIyIYiV081UObi8lzoovsx1W+O85iqzqAxUmuHXMlQnRRqDF3dwHM4mDnzfzPMW098UQMr8e+ZcNar3v/MmuHrPcH6+rO8Pdu3MndtCexnnxbeuDAUpMjV91JfoucRIcn1O3WYGUqKlHHBxuA0SHJLLDBzvfTeufvrHdJ5P502RJnHpNSkqd6uq2G0KIY3FS3O0lrVsKNDS2QWvh6bruQmFQGTabXSgB9B6uDoA8FLTcxj2m6TUrnx6HtKeWbTSbXhiB9CX0rhP1xPb0HdSbyHipHWbOAlAK0Fbir53tWdnkwGOppESpKCrJEZuedSUpd/Ge0g22cu3AS/3UGoBXuzKyetsIR57PkRp3DVW9GblIFUSafqObhCQyC2umbAQS9GjBva9d/22mtvh3l8jXNuwwj1PWNfePe1srcVtRtX3dglXJyq1zm5Cv630zosuSDOAw8Rt8iKMQWYZ0jgb6NdTyPOotCt32Z+lytOZOTSiFU5/jBDY3Dap3HHq6a2Zg/XeBlrZVtoysdU/CAPC6txe0rdDWjp7oLTTUT92afIyV1Zuk5qtvlIa6OabtJi8g81yF3qSH7d9LKbACImXWrQUxHkdum6qhxxtu5mKfFOf3u8BrFJoKTFSoZWko6TTPSGxeBgZbLPhSAVCYZEk0nevjqaLSjpbNhcwIu+jhBwg5KW9abxtYDOLwCo5MJ3Ts2nONsk4AyRa+fn9nRZr6WGE15/9tNtQdBKBS6frZ9TANJtAS+U8sNLDSHXdVPYWF9RKRSYFqZJb/bBSaCHdyEK4cpW53ZE6t7pSbKO/SeM2wuqorb7MDpDibN5uOlLSUR6p8tC9shYeRgXXeZzFQH62bP22usptab8PJ4VsayMUZ8C8HPWpB35rUPl4yE+3dN3gNnrJBPiJ7bddad246pYHz+vr6/zSL/1XfOVLX/q+X4vy8Z/8GMLz+Vef+tTrzt1+++1orYcLdoYylKEMZShDGcpQhvKW5ZZRdU/++VP80LveRRAEPxAZ+/yffJa9+/fznh/5EQ4N7D4YhAE2e7NdG4YylKEMZShDGcpQhjKU18ste57/zk98mKe/+lX+9MknfyAyFkURTz75JG9/29u2HT944NCw1ocylKEMZShDGcpQhvK9HTwHvs9TX/4yP/PTP81LL7/8A5G5+++9lw995CP8s9/+7f6xV189M1y2MZShDGUoQxnKUIYylO/t4BngwRMn+H8/8xmeeOKJH5gMfvJ3f5d/9lu/xdVr1wBotdrYWCNji0xc4JLNg/8MLrCgH0CCC4LK8gAWZfJF43nQjMiDTbAWLaGheqvh3Z9e8IfSlkwKmvmi+kzCBemCO0xv9YjO4SkCEIpEKBcwJiRWSIyQaCFJpQtOaHl5IJtQ/fSl+QJ/FwyhBs65IAsrBFapft6kcSFSErf0Ps4jbJRxAR0A3VxDXJBkb8W/W3yvpSCV+a+1xSIReZASxvbLx90pZStgygVLubRprOiHyLlzPTaWMQgyhLaI/HgvGMPkZYJw6ZaZ3SpL0wugssheEFQePNNSOcZK2zy4q51XmczBfgPINgT0Ay8ELqo5DwIh6+fF9K/JcKSNBIgxQvWIRS4sI9liPwkLMnHBCNK4gCGV4dKbB1toAQ25FfAjtpGFRB+9tRWA1tkKglUuAAs5UCiDER30AiacXvVMgRHKBZz08yiQmDzgRqDivDzNVnCYFnnJSReQaaSkpdx9rVQY4QJd2spnQ3mkykcrHy2VqwflonVdkEsvk3og3T2oVc6d2oIKDbS3XuBOLyCmFzxikbjIVzMQWOKC0Lb02ZHmBtBFQCp6EWIO7+eCgnpYwF64irkubXKgjVwXWNq/ZkC/Bq7LNRMr8rYsJDKz/Qi3ftCczIO18npwbVW4YEhtwBpkIz+XOT1Tna0iszmCydkA10ZkzirT+SmTY9NMP+A5DyC2LngVIM7/ygH9lNoiegHVcqtVkB/LcsyiMLCZ21qR48gMkq6ULugtc2npIbCuSWh4W0g1nT9X5/YUk2cpT3MmRT+Q2GrrAt2EyAOtbW7bZW5j2YYzE2Yr8Nn2bb27x2A+GLTfuPsk23pUS5pfa4RLbyZFP1i4bxvyspapzdu6IJWCzX6QZR5cngzqe663qUFa11aM5xErRSZVvz2meXl2pSSWkjQPsouloq0Emej1QzLvB6ElBW0Bca8/M1uamwhQKf1Aw35wtBjQ58zZBC23LI4eND15mZIH2Qtraebntdx+fSYGAs+ts/NCb+Hg0oE6EBak3Qqg7gXoA1vB9IZ+QGrPwti+HdC53euwDayXWiTZgAHuZSYeuE4AmyAytJRY2WvTGVb2rGuvf+gFgSZ9e+DqOB6wbelAusyW3eL6NKT9/tRuC55z9xB5oLUVuY3LG5Do91/pgE1NUKkbD2VSIDLXH6pN00fLgdPxbh5IZwUYCanYCgyUiavXXiqldh8siGwryLd3ryT/rRYD9rlvirOBPPZsejrQT/fGE5sDfXI28Em223WZB1X261rnfanYAjr0kI9yS6+soD+WkDoPeH+rqLr9+/eztLzM//Iv/yU/+bGPucjs7+e3Ayl59NFH+eCHPsR/+Yu/yP/wiU+w420/x+ceGuXFgxFn94RM1jSF1YD7zyfsW04pdywjbadwt8+3ObzYZWUk5Ju3F7g06zPaMSyOKhbGfYRWHNjQvP1Ui/tWMlZLkkZBcWXKp9yA9YrizN6I9z/X4r3nOqyUJD/0apePXo3ZtZyxfzHl6rRiaY9i16LBjvs8/Od/xrGFF7m2+xgHT36DtDzB9MVT7KyljOoC+0+9wKNXFiisLVFqPE05bSFSyaG5vyBszxF2vszHL56icuUF9qz7yNQSNC7jpYZi+wXqMwcYW2tw8kiFo9dS5ic9fA0H1i3tUDI35fNn9xSYqWlOzGW8MO3zxOk2R185RZBcRbKGoEG4PsGuDYHXjLl6x53MXFvk1N0T+KllveJxfrfPva+1CGvzJNW7SIM9ZMUjyG5KFj7MnoVV1PoIfnwSCAEPqybQ3iRe0qU7dRu16T0s75hk4fBuxtZqyAyiTpORpYuUm/OQwlRhkpcOhUxtakZbhkIacml2gsnVJi++bQc7Ly5y5cghxufneGAlARnSHPeZWmySFh/Fb38VP1uiMXYcFY+jzCI6OIynv4FkDQfnEnTLP41KQLGGw+NEwAxeS9CeOELUqNKceRdhPWTujnez8+LzZMFBzu4OqMaWbiVCGVgc96iXJH/woyWOzmm6oeNCfPG+IrUpyXRNc2w+YWYtZncM1yY9sJKRjmH/uUU8ewZrJ3nt7Xdw8KWXXCfNfTRmHyfafBJowZWLsHcG/vwUbC7A3XfC0gravBPFk2jxISSfRZkOO9ZKRN2TwDyjS1WkTtHViMbEIaqrL3Dm/new49wrtKZm+Z0PjXJkKWV53HOUECGYjDUHFpocPfUsDy7ArsuLPHbmMvtfW6A8f46Jpc8ysvJtHr3073nihc+z79WvMH31C8xe+Tbf/IUf4uA3/z+Syn0UWv8Gw0FgCUEVeAk4SKPys4TlZ+H4bdAyUFDQHHeGTxrYNQHXZoFVRjeWmZw7i7V7+OaJ97NrYZnFOx6h3MoY21gjanybNJxgaf8D7L14iah9ERONszY+xejGOQSbbE7dS7lpOLy4iLcJcfFugvQalWYTZdaw7CfSY5R1ifO3jzF77RsYTiD4Nl66CawB6zhayDpZ9A5U9jSWhxHM5y9s48B5JCHwKoIS83seYqR+DWlG2H/tNGFjgdMn7mUkLiOER6ukWKsojrc0r86E7K5lnNkdkgaCB585g/YqbO47ig2r/Lufq+DXPZ67p8A3jodcvM3nrguS5XGP8bk5lvZOESWW9fGQa7uKNIsOW3ZwKUNmluma5tXdPjsWBCoVfP2uAoujHpkUvP/lNqWO5Y5rFpMKnjscMVXXWAmXd/hkSjDStsxsalJPEFjY1zTceTXltsWE1ari/K6AaSt416k2nRGfsNNi+vxL3LU0TyUe4Y/fW+bEqwkIwcSm4e+d63J8znBuMuBdpzuUYsuT95X42NNN9q1pvnS0wA+f7LA+63Pk1WVun29Q6UimF9d5+vEJPvD1NgIox5b6uGL/uWvsrGdc2jlCObV8+2DAxTHDoTXBwrikFUnC2LI6IajGsDrusWNTM9Yy3L6QMt7UeJmlVvSQVlBKLQeXMg6vpFya9JmpZ8xuGna0Mu684lB3J/eFPPpql42qRzMS7F9NWRvx2LOS0QoVV2+XqI5g/3yK8gQfPt1hYtOy47Uvkqox/uLdOzn6wgLLe2YZX3sZL1nh9NvezsbOGSbmFyl2XuGexRX2XVpgLK6y+8plDi01mVrrcP+505w4e4XDC4a7Lixz77lFHrm4xniryM61mOkrFxnRVaobm7xtBWa7hl94qU2YWZqB5LaFjGpTUxZweb/Pfc+cZPdqg6j+NII5pD5BkDwLdHju42/njpcsd11KuLTTpxAbppqGWkmxf67F+oTHn99dYudGxheOF5luGh67GlPqGA4spCBhMrZEccbehqVcq7M8WeL8Tp8z+0O6BYFvJN85EnHickypa5nYtMxPKQpihtLGVbqFI+jqDlIl2bcYc2BhGX+jS1adYbK2AbLI7qUNlneWOPbieQrNc3RnHiBqvYqRx/GyMuDRnHmC9X0V1qf2sL67yoFT55G8hmWEJPgxlDYIEqLOGu3J/xbkNJOr80xdfg7VXmL58Acp1y7QmjkCyQz/z995H0fPrFLbfwRrZlFdg2IeTy9y7c6PUl1ZIAv2IXWB8uICkpeAMcCjNfI40vhE3RdQWYykATwEzGI5juACghQrJmhNn0C1Z/na+x5lx0KBUiy5uG+MqQ1Npxhy/o4dTF4Z57UfOsHElQUkK6SF9/Cldx5lz3LG0csJSzMVRGGWf/2f7mRq07BrLaM2FjKmLY+d6zLSMRxczOiGMN02zGwYur7gmfdKFv2IILNcG/MZiw1zsz6Tm5qVCcX//c4Kkw3DznXNdMfw4OUYqS3Hzi1TqF1BR9OcPjbDnkt/CtwJ7AaOACHPP/wLzFzLgCKC8xj+EywBRj2KtA0sd2N5mNb0b6Ba9yOR+QtLE/wiKj2DYA6RfgTFN4HjCBZpFI8x0RWUm5Zv3lmimFp2bWj8DBYmFfsXM77wSJn9iyl/+FiFpw5Gf7UdBv/pJz7BWq3Gx//BP/iB8D6/45FH+OhP/AS/+Ev/iF/7x/8YKkWuRpL5ULISCjwNsiMIUofPcZ5X99tCw1Coa4RwKLem79w/sXIYFi8FowRRx1Da0P03MW0kWgoST9IIBGHXUlnRCANRy1Jd1oSxxc9fPmMvT4cASZPiyjqpkvhxE2UMSncIGilh1+AlHSqvtnG+tDVU6jxzUW0R6CKYo3rmGoIVCktthDAImyBNhtQtZGIR2tD1BGHHYITzPhdbhkQJ2p6gFkgEEHVsH8fkdVNEjuoCgcxSoprDuaWehxenxJ7so4oS6fB7WIvxC2gVYbwQi4/2CxSudLGECFpbb/4i6L/6WemT+QUSzyPxQ6xyaCSZpqhuB9VxG30Um4aWL/DTHjZH0lYBwgq6fo7FUwqRZZSXU1TmMFUis+ighPO/17FKYkQ44EnQudfTYYd0VAb8/E22h3zzkdSRxmK9EB2UsEQkXgEVt/C71uGrgNjf8lOmUjAfOeRS1xPESlALFY1QoiWELYufGFc/soc63PJoiVSjPTngufTJotLW2/mlZejEkOZv7AV/YFKkhRU+ECPYJNxo9v0wvm648jApqRchSIhVgMy6SANnIzdzkSjnxcskBInFiw3hRovKUkpQT6ic6hCtdpCmheAqggWiK2eIrp1GxueA00gu0oo8YCNHNS3kSECTl20MKLKwAJ6E0IeSzFFiamviTIi8Xiyqs4TUy1jhUfcLWBSZ8pHG4nedZ8LmMzNBPc7bkCZTPXxfluPQNIW1BJRER0VA4mWt3GPhoRI3eOoEHg5fGA54yU0+S5H07+eO+QOzFtfPVCSkgdMtkWqi1QaShNiTfaSi0s5rVuhYujkCKvYEHS+fNZEKozys8NiIHFKqGbi2vBY6DJWWjtXW8wpmSuTPcPcLYmcD/czNhAVtyBRs+pKu5zy6hZbjKBdXHD6zqyRePrvW9l2deLl3RkuBn9r+J2pbUuU8q2FmUcY6LFZm8JI2xfkWfmxo+A4b1fMEVRc0pTWDxmGjvAyaviCMIWob6sLhs6wAv5NRqGX4GQSdjFgIwo51yFGde6qxRCspqRKEsSWWPfcztD2HvRQ5HbDruXz5qfPyB4kljG2O3NtyBnupJewY503PD0ZtSxC7vKeKbee9zKVFGed1i32Bka4fCLuGyorOUYQthNW0A6f7RggECZKU2PdIvCDHDTYpXW0SrbYJupaw1iWqpfjdjNLVFqWrLaJaSmklprQcU5qPCWKXH5kkBIlBZZryqqHYMlTWHIJOW4EfOw9x1HW21mvEhJtx3yvoPKl5+1LG1Xdi+3rmJ2bAqyeIPWc/Vn3nGS+23PkgcTNCXmJBCQoNjcw0WkCa979dzyFAYyUIYpsjM125aC8EFFZ6+Il7t7ZAtJ5i8UAogmaKsBA1M9fXp64srQyRposRBSBwzhwUqa9I/QhtJdbK/hSOjnrXWSQddFTGSB+/3cVfqQOGzIsQaKynsF7IcqGUz9r6aFGgh5KDFolf7KcdfBSt3C7n16SmPwvh+mCTO52C/NPpz4wa5WGCAo2wgJUBfmpIlUDgUHodP8CKkG5Q7HuyrYpoBBKVWaK6IfEkxgtZCL18Tsw9u9AxFNqWKDEOQaghil1b1lLQjKDtO7SgEdBV0FEix1tKFiLXxwkLUdcQtdx9/DjPkxAkqud1DwY+ik5YwPT/F/l3hfGK/f7YUiANqxhK/d+5WWSB40GnCFRud33ntZcZfkcjtevbBBCk+Sggx7LWAokRUPMFc6H4qw2ePc/jS//xP/LSCy/wR3/8734gBtC/88//OV/76leYmZlG5MzIAezg9plUbjCzeoPTYvDAgAde3ORW19/X3uyZvSm5t5xLcV2ixY1TMbgb1E0eIux1abxJgdjXfbmZ2NeX3ZuWmL1xHsWblu6bFpG4/vs2VuQbl+6tnbG3VEtvuWp5vb7d9AfirefkraZOvLF63EAf5cBH3Poz7S0p2bYEiDfJs33DYhC39KzXtwRxi2X3vZ+x22YN7F9D1wZtAje3WzfNuXgDtRQ3/vWt6JV4I1X/a1SBeJNj9mb6Ib7L1SxuxdbZN2gG4sY2YdtH3LjPe8OKuNn9/yo6am+5EsSNsv5mdShu0ueJG100sK5kIF3iltRHfBdbtP2uKJB4g6oUt9hvb7Md4o313PLmdSFu2JbeqGwt310Rt376DS4Vlr/a4BkczeIrX/4yP/9zP8fGxsYPxAA6iWOef+EFhjKUoQxlKEMZylCGMpS/isi/zo+np6b45O9/mtuPHWN5ZeX7NpMbGxs8/OijPProY3zkx398WOtDGcpQhjKUoQxlKEP5mx88A3zsox/lE5/4BO973/cvgeOxxx/nruN38clP/qthjQ9lKEMZylCGMpShDOVvb/AM8J///M9z6PAhfu3Xf/37KnOLS0s8+LaHOXHiBJ/6vd8lzHdHdPuW56iV3sU9TJHO0U09bNgAds4Aab7Oq4cr2r46autWPeSTyLFWDkElc7zPViAMAjSSWAiHxRvAZ2nhsCo2x4fR/63IcSv5fbfhsV6/gM3miC0XuCT7WCLTxzeJPiqmv5t8HuNkgUV1/co03f/kpdHnPNneFvJ6oDzRDhVn7XXllCPo+hiaXjidyPMltu7b/5HjxvRTlGOvTI6hYrBOrM0hNWYrgidH1jl0nEUkvatzLNFWbeUPNAPp6kGysoHvLnDTCoFIXI4Epl/m/bVgA+vGeojCdv53EHZmDLnuDdRgjkd0ILQtRJuRkkF8m6tfh8nrI/VMntZe4GCvbMWW/mzV6fb6kcbddwvp5tJuc/3vpd0g8/s5TB1SOp0U169t5nW6qXu6KeWASRLb0mV7wVzkbUDcbHXt9tbo0Edb/1shriurXv328GU56qiPvurpbA+RlG3pwza9HFx8mQw8s3Wdmb2RtRjAKAn3v0uLa4B2YC1lz2YI22urW6ixwXuLtjsv8gC6LEfC3chQiRw/J7OtOsEOqKtxQXZpHhzagxhKs4WT6mHutvTV3aeXVpurl7AuyE6mW/jFXr1cX456oB3bAQJoOlDtiehZEWjla3Btfx2vzP8XxEI4ZF2+vtf0c5HnP91aByvsVt33EHzXp0PaHqrR9s9va+K2h+iy21qYua7v6bWf3n16fUYPt+fssuy310xIhDFs3VVgkAPts1d+PYagxCrXNi3S5Vk4jFrPPhmx1VZdPyP7z3I4UEWWH0O4gCndK0t74zbtMOMnHPgAACAASURBVIGij7K0uS4MxhiIHFdn7Ovha1LbPKg9L8s8cGuwLxZsYe3s9WvxezUmttbsunsMoihFXsayr0Nb6r+F7nTcxdzmSZH3V/n9DdtTnqNpt/Vz/cxJsJZkW18stqdJ5MHE22xzjiDFOhspepqkB+xRxnYtdO1MxA4GYHNboXMdEYO93OCC5H4fLhCx68sMLvDYsoWU7MNCrzPBPRyd6Y0B+vW/1Z57+m+3pVgMYPlceW/ptGQLO6cHIqd0Xm9yoApEH+a3Zev1VnlZ27e39rr4MDtQh1pspXfbqCePldMIalK8dVTdzeTHPvhBfvlXfgUvCHjwxInvi8Hzo489xvF77+H3P/lJZN45Ly4u8szlCdY9yW01TTk1DkuUwXRdc25/yAt3F1iZ8nnxaMTtl1ooLXjhSJFqannoasJiWbG7pjm+lFIvKB4412VypUG7FDE/6VFIDdNtw3jd8MqugJmG5sSrNarXXmPahGB8Sp2M9XGfna++wt7mJe5dWufg5VWOLl2jMv91BOcYn1/G5xTl1VX87AoyTbh26Ai7rjyDx1w+mPoOghaKBRzyah4XUWqAMRTz+PFFJJdRpongFKPNlHAjYcSfYXRpjok4oRUW2L+aOcplKLk46dOuwg+f7PL3rnawCUwuX8VPLhKrR5G2TGfiEaxfIC1O0Jwcpbq6gR+NsFmUvHxHgTAx+ErSmJzAhmVMECDxCRvnEWmMEeM899572HmuTiYnSUaOIWIQuoM0dfDLLO8apxQLUk8yPbdCuzpOVFunNTlKdyogaFT5s3fv5MhCyo4NFx0+u9YlKnhEcUroFSm0NdIvo7wSwmQkhSKFxHLhYIVX7w85/PKLWKpkIw/SmN5NZe0U9R3HKNS/gZH3kdlDwBhpZR/t0b0s7tvP2PJJLEfYnPpppDVEDc1fvucIh19Zoj67h8mFb+EnG3TG72Rl0sM34Bs4sJgx3jWMti23dQxTG5p9Gxl+ZgmVpRhpruwIiMc9dl2qszBb4A/eOcKYSDh6PuNzH9zJA986j8xgurvO0vQDjK5cIB57iOLCaZATaPWfOXOwrwkPH4Un3gkzY/DMaZRIgAsoW8v1RLOx872EjWkkp9D+AYw3Q9Du8sIDh9l7do5qUidsLVGf3MtIwePIXMZkU3N4MWXnSo2di+cZWzpLmLxC0H4BP13A4zk8TiK5CJwBNvJBvWaLvVnjQG0FufoifncJeAVJhmAOcOQYy/sJW3Xk1HnIDFzdgGYTTJx3Kgo2U4gzXOT5HmCVLLybmaRD0GizOXOQ0fmLyNYmynSw2RRRp40kIAv3IOOMxdumKbV2Ibpj+J06ujRJfWICTwSoNCMLH+Dy4TsZW2nTLd6FVSBR+MJnZPl0jo16lcbIYcJ4AehQ2/3LRPVnUVmKw+89ALwGjOYvXXUMP4bgO8AeCt0iIquysesg5c0zbOx5mIl6E2F9RJaghOXrx8vMjXu843JMkBpuX8145ELM+HyTdGI/tXLI195X4c5zKV85XmTvUsps04ARHLmWIq2l2NFkhRKbZcnJwyHNUPC1d/s89FzM5GqdU4cq7FnOOLk/5PS+iPUJyROnO0y0NBenAx441+XF2yIae6C6Av/+sSKzdU2UOPrG0w8p7n0lo1ZV1EqKSseQvmeD0msFGkXJU+/zqJmA2xcTDs632XP+RUQUEdQa+Fxjcc8d1EYjDs9lBMZy4NIiftJF+hFHGppaJIl9wZF6RrFpmV5roydD7nlxjtHMo3rxOwTtiwQbl4nalzmedhh/8TyiMEagBftWNiivvUIWFPjGg7s598MpYiUgxGNx3L0lXB3x2Ji23H3FcGDFldvMpqbaMSxNeKyXFCuzHvuXMnzjBl/3XkqQGXzx7gJ71jOev7PA7ZcStCfYqCom2oZWJJhuGe6/llBpWdqRpBNKbp9L2F/TTK9q6iXJzGbM5PwlRpcvEMRn8JOEA90W6zv3Ue5KqvPfRrJKtRswubZE1FggicoE2WUU66zt3M3Y4mfwu6/gdxp4fAXFBfxOShB/DT89D91JxhaeprheI0jOEG0+h9eqUVg7w57GRaavXGTH/DfZu3COkStXCVobjGWWA6vLjCyfxKgqKruAYJ52+d1E3W8g0Iz7b2fvuXXSQpFXDgbccTnh7O6AI4spY0trLM1W+eMHSoxYg9mZcPs5qHYNnUiyc03z9XuLvHIkopIqsqLk3KEy0kgu7PAxStAsSCpdy7GVlFJs6IaCHXOriCBktNZF1deJRw+TlkosjStmNjIW95dpVaeorK8jLRTWrhA0l5i0HlHLI+icx4hxwsZZ/PRU3o8mhGuGtQdvp3qpxb6lU8SlwxTqZ6jfcQ+JnqDYOg8sAyEr+97FtZ0FKomH16yhqLNw4E4m5s4Sbl6jsfs49y4+S2lpBT+zRM2rXLjvbiYXvgW0iJqTtGbeRnntL3KaxjowhhYfR3KaYkdg5RReMkenfASrdkM2g+YuJE0Eq1j2k4qD1Gd28tSPTXPvmYTJi99GtTcoRrNI67Fe8gjLkskLLxFKj7BRR5mX2Zw4QXNsiiNnr/LtvzvBkRfalNfm2W9L+Knl2t6QzApSH3ataXzjaD0bFbfPwcnjPuWmZeea5fR0SKYEi7ssj5xOOLKSIlPLyqiiXVQ8cDFmYlWzscfjjudPU5RVSmvr+Mk1VvcdYNfieQqNJ4ENUvFTxKNH8bqGMNhHcW0BHd2Gyr6DogF4kEkkLyFoIbHIZkAyfTuyBZJ1d83RIpTKUNtE0gUu4fjQEt/M0B4/TBz57GkadmxkaARRYlgbUUzUNUVrCBNLa0Txwbnku+N5BigUCjz9l3/JP/nN3+Rbzz77tzpoXl5e5t4TJ7j3vvv5N5/+NJ7nbTufSIGyEKUWP3MYFYN7o14sKl4b8bhSkrxW9vqw9q4vKHQto6sZWgiCrqW6oQkTQ9SxYHX/jcfTUKlZosTS9iVBZvG0RsU1RpYThAA/1RgJXrLJ2PllJp9dp3q+zsS3V/NGs0TAc8ACnn0NwTrS1nLMUjsfjGQ4JNYicA1Yyv9u4hizIv//fN55rwArFGqXUGmdyqbFb3WoLHXxMyg3DGFsUdbhkzYjQaGt2XEqcRueSAHE6FIVQwUTjWCVjw6Lzj8mIGoZMgm1gkRqaIaKelQm8Xy052OFRGKQtgNKsVKuAEXwQ0xYxuIGCpYMsoRUKIdr0riNZJR7E01VQOKHCOuxEkmKLUOx7bwyShv82GI8RaHlEHJhAngFwG3A4KWW9Shkvqxcw8JH2oA0KAOQeSr3UJYwQQVLBEKSRSM0yuN52ZboFqfRKiCsN1goFZGdLklUIWqsAprAWDw3dnEIuthS2TSUWoaJOU2x41B7HpZSxyAyy1xZsVKSCGvJpOR02SNVDqX49MgIECBJqb4yR6s4CngYv4jPJiaokpRmsKIMmy0YqcDEGJRKTpHVK7lX+uv5m3mLJPQwsgJIrLBYz0N1E+qFAItPcWXNobGsZmxDE8WGUtNSrhuKtZjKlQ2CxnKuk88CV4GXgW8B5/LOZXnAU9ID89fxznweWAG+hMO8zeW6u5CXcRnJKqgA2hnoFLLewFm723U7bGGHHILPCkV5uYZFOcB9t4XMYnoIqqC+CVKiCxVEpklRZMUxdDiClzRBCNIgxHoRQgvS0iSbpXEsBWyhnM+uZIRd8mc2AUGiZN/T0S6N5Xm4mP/18nyrvA0LDOWeT5Kg3cKKgNTzEaSkYYXiWp5PqwnbGY1AUg8Eo7UMpWGkphld1djAx/hFjBRcrHhU5g0Xyx5+bKluGrzE6Z+nLXg+UjvM3UYk6fqSsyMOK6nijHYgcvwXLBYEC0XJ6ErGSN2hLKWBjUjSKjpX0lLRocMy5XZNuFIRBJklUYKuL/AzMNWETELXgysjDodZ6hiEMUSbq4hE52XURktBqbmFuwtaXWSWEnZhYiHtI/PGVzSpAi/WFFsG1e1SWNMoNlCs4HMNyRLTX1/Bt8sE7QwvNVRWusgc69UMBRujhiAFryvYDN3s3wVfMF8QFFvGIUy1JUgtQQrNwOWrEUgqLffSAIIoNqgMWr7zZL1a8fAMpMrhr8pNS9cXhG3D+IKh0DH4mSX2HH6wOm8pNTWxJyg1NOFqg2hzCYjxsjXGX1qmGxbxY4OwXaBDaXGNwtI6kKCll7edTTIvA04CzyFYAF4ETiJYzNvlaQQJPicJ9AKCJeBbKBbxOMfI3DlKjcuEjZco1V4h5CpeukZpqc3omU0E9Xy6wvnijCf6L8ejJw3FtQ6ZtHSVJEigHUhKTYPUKVbCZgDNUOD7Gi+fxcik26ziakFyoaJohR6xL1mJfDfrkOMDO75D3Y2sabzM9d0qTQhji9QaCLFekG+c5bCpjSCgGZURxk2xeKnbq6B6pQXSc/7EpAt0kVzNyzHD5zIEAi82VM+tkYYVwCcNimSq59lMAUXqSZqhj/GCHLkpyJTz+Urm0V7IxJlrSNp4mw1k2qRe6LX/hLBZJ4lG8/6/tzlIgTScBARechVBCsInU0V06Pol7Y0OzNgFQIgWcL5SpFw3COpIapTqGUa6jdMCIRA0iDY6uac7IVOKILWEG23WqpKoqfFabSbnNDK1rBUVOkcrehkECWi1Nd+xWJKkSlBZdvjVjieIfYufQqVmHJJRCgqpQx6q1GIUBLU6haZG6gxBRuwpKnOruT4tYwqjJNEolgLFdTclq71Kbk/PA0m+GUyc9zNNlF7HBkVMUKaP8wt9qAZ5WZ3MbfZVwOC1Gm5lgBKMrGpKm67Ne3prhUCl4XB/xY5hYjn97g2eASYnJvjSU0/x4Q9/+G9t4Ky15r1PPMGPvvdH+dTN1jjb6yearvtre5Ot9gaTw9tRPfbN6FqvW0JxYxZe70mDU3JbDMgbLckQr7vHza+7/iOvOyVujoISDOxSB7fKgNpOWHozbFtv1yPe4nMGLxc3m0UcmBK3r7v1rSG87Bsct/1pv16erwelvW62/CaEtv6iCnvztVXK2oHlOjdmTAlr36AI5esffhPe4BuAwwbycD0eUQ3omLrBMozrxXtruv4mKMntWRXb9OOWdMra1+vm9S10cFr2OtyVeMPVcW8VQWVvinHatrzqJu32rdDa5MDc+Y2sx/VYMsFNyFUDO2r2lz9dt2SkN+1+S8TE647ZN8LBvQ4/JvsLsOxAxuxN6uxWkX7b91m9eZpvCt68UT7yHRl7NqC3/OT66eU3R3wO5kneoD1ex0jf1j4V23fGFK9vv2+4bCo/pm6gR5Zbtu7yJvp1w+5kcFPfN0DuvT7FYmsZ2Ouul29hZat4/TPsG9nQm5fdzWmc9g3SchOsnuV1uxHebOzxhof6S6Fuwe7a69vXdfq9zUjcwEaJm41bbqQzNysTcYM0DZSRvVk/eNPHMjBE2db/fVcHzwBHjx7lH/3Kr3DnXXfR6XT+RgfO8wsL3HfiQe677z5+63/8p/jXeZyHMpShDGUoQxnKUIYylL+OyO/FTX/9V3+Vx9/9bj7+Uz/1N5qZJ554gsff/Tif/uQnhzU7lKEMZShDGcpQhjKUH4zBM8D/+i/+BfMLC/zepz71Pc/E/MICh2+/nRMnHuR//p3fQSk1rNmhDGUoQxnKUIYylKH84AyeAT73J3/CP/nN/56vfO1r37NnxHHMBz74QX72Z36G3//0rQ3UjYRSYhnrGILMogW0Q0HXB2Us1a4hzCzFxAUcJKFCS0GYuv3bpbUUEkOQmRzHYsmUR73sEHDSQCcU/escusUFCmkl2awouqHCz0w/0NAtssmwqrdXvU9v8ZglwhIiEARpL+gqcMEDA/u8QwRUcetIQyy9RfU9fFkIZKSBRIgMGTcRWQdpModoUdAqSAqJYbqlma1rMuVge4VOC4yHpYTIEpJShXpZsTIakgSKVAoQimZR4mtLtaPxM4s0DgvWjQSdgkLqNN+P3hKXI1c+SmH8ABJNd7yUr33yyXyfMNNIbdFKgFTILMUikNrgpW7xZCOSaAmZJ4gyi8jifJ2WpFmUZL5PvayYn/BpFQM2qxJpLMYTjLZ0Xn4SoWO82GGB/HYDCBE2pTVSoTtSRXQSpDEU2m2gQKc6jfYUGB+QTDQbGKVYnfBzTQtYG/PQ+RpAiytfacHPDEkph+lIQSrd+sYwsUSpxUst3VDRDSAwlnLXknqCPc0Mi0e3MooACq0NIEWmLSwSoSUyayBsB5pdaLa2MId7xqEkgUN5+nYAPmG8jmAT8BEmROoOFkEx1hgVYWWIJcKPY0a6GaknCFODSpp48RouaKaOC3BJgTb4AoSPC3yRuR4OYgnTXL8NUOzrtLu+RC+wzgXeZKA1dDOwOr8X/faBBhgBCliKQAFpumBcEJo0GqwhK0QYQnRUBDw2xgqsjwSkUUSQQSdSbMyWMLJAu+DRCQXdKG+DUhIY4QJVhMAqFyCkMo2hSmPvlEuR9vL0BUT1TSDEqPGB/Hs0J3fn+QsQtIEArapY4ZFFBddmiBBGYLzArRE2Gmks4x1NIYV6RSGtJdAWL02pVwtsViTd0OmQlq4tNAoqt2WGLG/fnYJPO5IOvWWgkBhm6wZlXOCzMhapM0Y7mrGOZaTr1gwqbRnpOuCVry2llsHLYLauST1BO3KrLMuxWxcrgNiXtANB2gnYGFFsjHhEXeEoCb7AT7P83lluzxRJ4GxjmFqKcYLM2g4DpjVBklCJDaXYUIxTwswijGa01UBmHUQaI+iS+UUALAarBIIU44GX9QITDVZ4FBJLoZEjwKSlEhu0hInMMt6xlHL7neTBauDWO6YeVLqGMEf89ZCkQlhmm5pSYhjvGDbLgjhwwZRa5WvLLQhriAN3z0qs8eImnZKkVVAoA9IYrB+iCyOAjxEjgCHstEh8hVW94FODoAY0EUbjgqDa+O1O3sZ6mC/yNtbMv6f579q4oPNWfn0DF4zbxJLk17cwRMAmSq8jbBsdeegK+b09VNYFfCyuzRjlobKYsY5GZQkj7RZ+EmM8nzAx7K5pxlsGm0pKiSEJBIXE0CwJyrFhrGPwtcXLLNV2h0LccX2KthQyi2dcX+wnKUHmoGpSg0w6mDCkXfQwQjDS7tKOJGFqSENBGkQ0yxFWKrKwDMaSBR6WwHFC81XnVkxhKQMpQatJXJQINF7LtWkv6eDpNjaP2TBU8JKYTkWwWfFpzI4AgrDTRksXqC5TF5AIGUgQZIQpeV9fBBRe0gIiNBOAR1KcQocu2NjSRqV1sD5KS0QqMX4JYQVJaRSoYEQF64ekvqAQ2xzn10HQJY4kRkKQZcjUIuggTEZ7OgIyvLRBkFqUaTBe2yDzAa3xkl5/YPqxEc2iZGNEsVlW1MqSWkmSeeBlrvwyz7WRsfYWGtbXFmuhWXB2QQLlVsdBBJI2WaAwhPja5rbAla3ULYznYRGopI4gQ2WNvExmc31t5f1BgAsgrCPb60jbzo8BzQQaSX7fwVibKoKY9VHF8riiEzrsbb3sqD6FxAU21isuYDKTrh1/TwfPU1NTfO5P/5Sf+OhHuXDx4vfE43z3ffdz/4kT/He/8RsIcWtBZu1AcNdiwrtOttlRz+h4gq/u9bg4FVBMDA++1mXHuub4tRjtR8zviKhHkj0rKZXO/8/eewdbcqUFnr9z0l77vKtX3pdUkkqm1FIbSU03RmphGrswAzsMO7AEAww72NkAepZgdyd6YWJZZomYGSbwMUHAMEBjuxvUXi21pJJKVVKVyr169by53qQ7Z/84mffe91TqVqM2Eez9Im7VfffmzTx5zvd95+SX+f0+RSHUHFmJmErRaHYM6+NlPnrGI7HAjxSX5128CI5sRgQWiFiRuCWa5RIfvcfn+h6fyc02UrUAibJcJDXi8hwwC0wAxdQtz5uX8JlarSIISNiDZBWYSbcFmAceACaBWSLuB8aAm8BVEmaALWqTObS7jbf2Am54Abuzja00LV/y/EGf44sR3/50k//pL5tspwpz4rnzWGqEmFPY7XVWD5/hr86M8KvfMc/aVIm6L9BegeePukxUEx58ucNsJcELNLENl+ZdLh4s4FRWiJ1ZIGLpxAEOLTQJSwXCkSl0u8lrDxw3C2km2J6eZm65SakRUM8LlOuT39xAWw75RkDpVoi2Ez6816WZk1SLFvMrbeytG2ZasT2eOeZTmZrkw/fled/3jvPiqQk+crZAoRnTzsO7Pt5CM4XAxd5eZvz6JSBidP2jwDRCbfCXx+/myqkH8GoreO02By9cRnGUy3c8SadcQgRjgMV7//rThKUiv/1NE+lUNc3vv3OUasEmSTnUFw+6uGHEeCVk6ZhhabY9wVbepm1L9i5o9q3FjG7F3Nhf5tqMw0SguOOVhK2y5F/+RRUliywcfzcABy79EbCJW3kRRRErsvHan0Co8/CxV+GvnwXHMa/H3wGHyyR8v7l4KX0zminGV/9fbPXnwBQ6nsIOXkNZkhPXOnTHDhKUDhLlDjN57QbveLbC5qhkz3pIaeUZ3MbvI6kg+TP6VI2nYVrCwTFM5rgPlFIH10wv5qqpfgfA0XQSnk0X4SfSv4soRszCvtWFzS1QHSCXTvSeuRKOEjQPozmG4iRwADu8jIwraCy8sIWII+pzB4nsvdTnTqDFOH/ytUf43W+YZ3vfAWa3FK8dyvNH7ztNOHqEl4+OcXWPzfX9ebRtE7seUzWH7uQZlO2S5MpUxvPkGgGhf4bPve8JwKFcl+l5TTK28rvADO384+n5G/TU5x78kfT8prF4CZikUz5L4ozSmD9Mrq2IxAG8yCIYnUEqsLoNSq0u7znX4dBGyIfuzJELNVPVhLHVdT7+9oN85L4C1+ds9q6EtHzB/krMp475zK9HHFmOqZYk5w57XDoyxsuHXOp5iR9pTt6K+L7/FpDvKETSZbyl8Zs13v18k3dd6vDA5QCAkZbibZe62IlmspFw5rMxM9WE//GjLdZLDhcP20QWnLgGoW0my4UJh0vzHmvnp/gvDxb5rQeLHHhNcnop5PqEw8xSDUmb3OY2Nmsou8jCdJ6JesLe9YSjr6zgt19CKo3TbTNx8xZ3Xg05fbXLnS9vML+RYHcaPPaRT+G3X8bZuoHkKvWp4+nFV0hU1Eg26RahvF1FW9IsCK0RDi+FHP2YgxYax4s5e1XR9DVnN2Pecz7i1GJIxxWsl22c2CzYIinYKNvcd7HLzHZsFm/KJGS5ieY7P93k9I2QR17p8NQdea7POFRKkqYncaOUuxJGLE7beJHmzIUmYwsvcOGUy2eP+hQ6CrvbIZg8QG3v29BM0h19AEHIngvnWJvwiPNj6WK4g83vIHkGp7sFfBT4JJMLl4BlcGrG1rwEcgHwGfA1sIzNfwWeRvIXCJ6BkQrwN8DzwJ+jrWsYes7foTiIxe/idv4LbnKeztQI24cSBFtAiUL1KpopwOb6/gJhoUxp4QaPPt0iv7HCu/7uJcZvXCUqjbNvOeT/+P0q7326Q7iR49T1kPUpm0PLMc+c9Hn4SsAj5ztMbCeMbGse/ORFDp+7yNsuBkzVYvZtRIy2NKVmyORyhYmaRoQd8u0Yd/sirel9nD85RjsnePDTi7xywGfvQoelSZvNfft5+oEDKK9Mc88ZRALVyXFiuQcZtXsBqcB5P5F7N7DN3o88z8KRPJIGYyt/S8I+Rl57lbGNcyRMAhaRc5aJ6ze5dEbyt2fH+djb3wY47Ln6CkHuCKAoL15BcMP0mQWSKrMbGs0EiiNocpRvPU/CPjpT7wHGWJn/RppjDlBCcB6387egx/BaNnbLpzt+EplYrBx5BzFn6Y7cTzK6l40Jl6OLIW0vQfIakle5MV+glRPMbtTw1xUWr+G061x8+ACwwejGJ5jbjHHCz/Ce3/wTaiWBjFqUVq4SWYKjCxEygsgWfOKUx39+e5G/vifPf7+rwJ/dUeBWwWKyYYKB26OClRGLtz9vajwoCeM1Q8N69rDg1oSNJwR3f/gq2vHJr79Eda5IYs8zvhFisZ6ua1zs4DmC3AQKi1z940i2cLofB4oEhX+G4FUkf5degEwDN7H4EPmt38OKX04/C+G1ZbhxM10flXoXLQlnESzyO4+U+T8fH+W1PQ4dF566I8/NaZuDqxFNT/LHZwrUCpKGb+HEX+bFM8D9957hd37rt3jiiSe+5Pt+4okn+M7v/A7+42/8xhf5S5EFGtJoQgoHl4OFCDRCp1D9NONUDySOC/36DGItxUBSp+gXQskiy+naXgmBlrA7vXZnBno/7bOXcS1EWkChd0TemK4xeGNhMOtW9+kaWvdamp2XSlPlZfrKMmdFPJjdqsjKdSiZFiDI4i9pxGl3i3TaflS/EIISEqvXL2YrtQOQIHoFGLKWZtn7Qujez7Lj9wokqD5+XUnRg7SX0uhu1vdaiLSH+pQKkeiB0TMbBkIihDSfZbqB7BVdyPpcBqZvldXPeFe7oBY7Rk/2h3CwwEI2ZBphgsZkNASB7EVGTMulynpe9XLxzXsg0pDonu6YTHkQWIMaZ/R8QEeyUzLHzIq9mCilSAuMZHdcBiH/fV3TA0V9duvo7YgTAx2xU2N2vde73vf3KZADLAKR6vTOTG1T2KFfYEhJ2Sv4kJUPUClNoFc8aGB8zdt+0Rc9QFZR6edCi53bIxC77FBJa9e5DVANZDb4WWGJgeOluidTver7KaPnSqY+Ii08kRX5ED27MdsoIXp0mowwIen7wowqYnyATlFN4vXEhLRQhlS65zsR9BaSWR8poVFperoWGqn6BI/bMWN0ipt4g5JP/cIXMiPLaGRoSp/0bF+8ASFGD+hNZlsDKixTkopMC8jI3vj2jTOjYUh9m8R80S+wYml6/kftImykNaj6c4nSZo5J+9ncOBMDxZZSe4/1jv3o27yDrOjPLlvLUBCK23jnZJdNBun/WeR60JbULrqFQOsBn5v5YZ2WZtEaEap0G3M8K7XC7Pyz/sxsapD+ImOFjA1CtTc29Id5AFmrFwAAIABJREFUB41D67Tf6ME0dAoI0WKn79QDRcfE6zRNotOOFqHubStTrKRI9EAxo9TOte4XH5Oi569FNmDq9UytzK9nPkmo1I/LAfLJAEKj59d0fy4XANIUdhFiYF7SeqD4kB5YR2S+0ZCDMlsVWvV8utVJUpURCKV6/Z5JkhYeUql6KrmT5qHEoD29nkSUbdurjaX0DoqLYNDvDu5DDZRCYaBYnH7dHNPfLkOy7C7Hk420TAubpWuJtBmJzOwxs82dtvZlXzwDPPm+93H2wbfxvm/6ZpIkeesR5+VlZvbs4f4HzvJLH/jA8BnnoQxlKEMZylCGMpShfEVEfqUO9Lu//Vtsbm7wk2+xhHen0+GJJ5/k537mZ/jN//QfhyM4lKEMZShDGcpQhjKUf3yLZ4CPP/UU/+2P/oi//Ou/+Qf9fnVtjVN33snbHnqIf/XjPz4cvaEMZShDGcpQhjKUofzjXTx7rsuHPvQhfuAH/jlLy8tf1G9b7TaPP/44//MP/zC/8eu/Phy5oQxlKEMZylCGMpSh/ONePAPcdfo0v/Irv8KZ++6j0Wy+qd+srK5y4NAhHnjwbfzsT/0UUr7FZmvdSxIxD+2LXlLaYOpSlnIhsgfvBb3kD0gTb/TOZK8sCTF76FwzkAWhJbE2SX9KZPuRqJxE2QaHlSUPmOSlrEB4+nmv5KYgzsk07UKQWOlvRJZgqMzvJCR2P+GolymiFGiFSRJJEwDTxCPTL7pXYlezK7+ENFNHmwQOmaQJIb0+0L0+Jk1Eiq00+SdN7skSJYVKDE4sS5zMko+E7CcApoMh06QlcxBzrjpNegrTJL+4l48o0mP1k5YsDVVlsn6FSveZmISC2Mv6XfcSPZRIk8eQ5HTaTgCdJhaQJTNqRJo8kZXptmKV9pRI00v6+pYlHqheYhXEYmeCodAmmTCWJjGjn9iR9aEZL6MvWSKIylKpBjXX/K8UJPFAUkbSH6MdSXgKgUrPWxs8XJp0qbIkmzS5yWCxVHr8ZEBT0iwsK8vGEruOkenjYDlu4I3aTmKSQjSgrV3bZpIYnU+Tq2InsyGzrdCqn7wr+lleUhk9V4DWBkEpE923hdQelOgnsyYDZyKUIokhtiRSKW6XvJvYxh5BpImCoq8L/fS3/vmmiaJJzoxtoiEO0mFMHVAvkU2kvouBpLMBjUAZCoQYMDup+nok+6Y4cG5p8tdgMtxApuCO5C4y1yaQSd9eZTp8ilRXtEk2lQosLUh6OaBpAqUldyRiZkmwxoekOpbi+jQCkcTG0hKFUBm60+gAVmz0O8n0U/X0XKdJXSIdKyUESZpwldmjSNVYaI2lswRP871CEEvB7lq9JinTIE/NqRtfolIdy+YEoTJtZUAf+4l8MkkQma/RGt1raz9ZSZMYXcv8rJTs9NSprhIP7DvpKwyAnSXzKnC1eS+lgeDsWBqodB8itfG+nxE6bWtvRuonY8nefCl6SZBYMj33gcQvZfByff+YoWBTBKzI5h5tkv30QLJp2jda616Su5kapLHlVN/MbzRCGxSj2DFHZXPdYB+nfr8336TtVbo3N5tz2W27pp+kypJNzXjo1PfcriS37s0Y/e+y+ceMoejprWYgcTjbn5CpXzJ9qWzZW5skg0mA2ZFSuxdKoXsdpvr2xk6/Tq/vZW8d1BubgVZrjRlHldmLMH2e2r6SxjZ6yN7UvlS6PkrsLCE96cEXQZPYA3OMNsapbIHC4vXlto2fDXMMpCCrXT4Wdiagp+uvAa/eSw7EYIuVMAmVWppEXiX6+7c+8IEPfOCrsYButdv8Xx/8IN/3vd/7ebdtNps88thj/C//6if45V/6397ysVdXV1k8N8LbL3bwI03bE4wGmnreYk9dcXg94saMw7HliJVxm1KgmN1M+Mt7C0xGmlxH8ZE783zTJzc4d1cJJWCqnTDS0DQKkn0bMfkuPHPUY6qR0HUFYx3NqUXFlRMT/NdHSpy9FvH8dym6rTyHr8MffMd9VGenmezOUBsvk6vaRMV3srXnHeQqk4Sjd6ISH6EcnCjCiuAPv+cJDl+fJM49wOKxd1GsTtEcfRin7RE7j6Hdk0Q5WDz5BOOrC0AdSQm4jtcNsKJlJFdpj3wrfvMjlGs+5aYmJwwf9chyxOaoxcaojZaS+Ru3UF4REUZEU0cort9if7fEgZpirKbQFhQCm1ZeUuwobCVoFAQz2wm/8cQoD13ucnSxxcjKEkKF2GxQqGuKW5s47Q6yGyG1oFzfZnN+nuJWk2vH55hZWmdj7wTzSx1q4zkKrSrnHz6Box206xKURtlr2YSu5BffM8Kxtubw9U2cKAQ7z2cfLDNeTbj3WsiJWLE04XDnYsDexSp7Kl0Kayt86hseZP+lFdAe7fkT+PVb1CffTWPqHbjNPMerLzO1dgMrbOK0r9Ga2I9mmtFKlUJHISybuDSC3/ocsbePY+uayaVnicQxfuKfnuR7XmhzajGkWjR0h5GW5tIhn7EtCKXgTx4scMdKSClU3JpyKDc01+4QPDeV49a0Q7sEX/d8h6UJm9PPv4QXvcD2wZPUvTHGtl8k8s/gxFcJc/dDbGHxSeAK8HXQvAkjGv70E7BZh+kRxNUucAUrzCO4lk7nJeAmEovtmbdTqF/FawVoUeDF+/eBX6K8vsXynVPMXW0wtfgxLHUNwceRXMYg5lIs3ZF5ODEHJR8sD7ZrGL7pHHAKOIDBCs2mnxcxrM4D6aK6A2wRu9+LnawSW0exWk+h9fcieDbdRwXNdyF4mTD3w9jxszSL70PbZV49+y5G18dJ/FmUU6Sw9QrKnkS6JZzaIvX5feTrLVrTo2wXLWw0I7WQfdcXOHWjRm6rwtbsNHPbCXtv1Vibn2Bso8L5EyO8eiTH3LaF12lS2rjBnz9xjE+9Z5xv+NPP4NfOA4eBBgnfhKTNlTsfYaJyGRlKtvb8cwqNc4x07iVXv0Ai7ibxDmElm4T+O/Fa14i9fVw9Osm5t82x7yb83hMjPHW2yPENh+ceGGVyW+EqjSVhpppwfKGN1+nSHC0x3lTUihZbIzaHViP8BI5vxOzZjFkft7GU5sBGzOV9Hm97pUsu0tyctrnzRsiHzha4uc/j1KJCOw5jS8tEhTzbox7VogUCJuqGTVwtW8xVYpwILhz2GG0qxtqKl4443HMlYryt2Bi12C7bHNqIKQUJ5Y6iFGuUJbg05XBiI2KzZPPoc1usHz5Oc3KU8sYGQlXwvTtYG7M4fGMTt3EDW11k88B7GV16laXTJ5m99Fesl6eZWD9PrvoR7KCGxZ8guIGtXwVewmtuIHgWQQsZnUTqC6wdeC9TV59DhDFCSZ559C7+8uwId18PePaQzxMvdtm/EVFAcM9iyF0LIYWO5u8e9nlh3qYUwO88VOTRywHK0hxdjqkVJNdnHE4uR1w85JmFhSsYqyk+c8rnHZc7nF4IEcB4M6Fasjh/xOOeSw0qYz4X9rvMbXeZXn2KufVLHFlYpbyxiNtYQltFcluvIpMWGwcfoFhZxNHPUqoV8FvnEEBzzzfjN34bqGGxgOE0CyRX4MAYzI/D+hU4NQOuC9UNeO9pCCOobcPX3GuY7Hkf3nc3rFag1YDJaUTrJoyNwPQsVu1ZEG2giuIx3OC/U6i0kIFEW/NILYEVwCWX3EWzVCCncmxPOCipCcamcYOITz40x1QlYOLmi4xsV3hwOc/Ydo35imCk2uHAUpXxzRr5SFItu1w44nH80otYrGPJaQrKY7yRMFmNGVk8T1Aa57P3jnH0WoPEzYEs4NQqTFUCyh2Ju73G3GYVr9Vmz1oFy8ozVe1SWN9EuEXs+jrCAruj2BorUWo32Nr3neSrG6wdP0th0/DYR9sCqy1QjLF6+F5K26skzAFFLNpExTvpTszjdSxOX+twx4UX8dvPIiNF4h+GpAw6wmIBzT6iwkFkECKdMUQ7QjGDIKI5exwR54hGJug6+/GCLYLifhx1F073HDBGwlHCkWNEpf3osEqu+xly9RbX7zyL9kdwYs1H7y+RizV3XlrHb/4Z0OWl0+/n9POfw6+9Rjh2lML2n2Mlm4xszuC1/xrBJrl6Hkv9PYINKpNfx9jmq1TmT/D8naPcfX6Zye06rszz2h6HtiexhOa7n20z006YaBmE7wvHczx8NWBPTTG/EbP0rS0W4jK1osRSgrkWnLgVsTLvcuT8Ff70B+5g7kqZyfVPImKXFx4+zb4br3D5nn/J+HqM0NdxmwWc5DLVM2fpNh8gF37azBPRfqy0lkB14qf44296P6cvbSOTS0AXzRkQJWL5Niz9d5haF7V0zssBY0heBeDpB59Eakk+USSO4PRixMfvyJOLNKuzNrnY4DeX5iQHVpKvfOQ5k3/7C79AvdnkB37wB99wm83NTY4cPcZjjz3Gj//Yj37Jjm0nmlyo0/fgRxonMVE5J9YEtsBOTOQvtgRWDIEtiCwTreg4EitK6DoSKzFXIxJwI90rDBILYQDl2hxPYNFxPLZyEjfUtIvQciTa8dnMF2h6HsrJkdg2WvokXpkwVyYRBZSTQ+CitY0Vx2gctvNFEq9A4pTp5EZIRIHYKaFxSUSJxM6jLQi9cqowEsPGBStuI2giqKKdHIImXr2D243Id0xRAC9Qveh4ZKdXdumVubY87G5IrqMoNRITaVKgheEfIvrBKiuBzZzEjjVeoAbiLgqn3sFpdxGJQoQxSI1X6xI5LkgT0dNaEVsSv6uIbQskdByXxLZJLJvEsii3TGGDSwWLpmu2scIIISCxDXKm0FKUmorAFvhdc/x8K0bGMU0/D8i0oIID2CRugcQvo6RPrlbDazaAGIsGiWWjbA+nGWBHsYmESxtBiERRrkRpxMBizRW4ocIPBqBBQtBxJV7bIPcqnkRojZ1oAlsiFXQ9WPUEHVsgpMBW5jysqIsgQChF4Hnm6tr1gABs03azIA3MQraTQKsN16tQ6YJlYQojCASddDyyewtdoEXs+EhC7G4btKbt2kS2A8LA771WhNT19BhNYImMOQsCCg44lnkVbPoorAxi77OzEFB2z8dO/w5N1NbKpw7QG3B2pL8TCPIm8uKaxXcic2hh0faKKJlDWy5a2thRC6Q02DUSEwmT4EUmQtV1JAkavxlQrAaIJMbSxp7dICGybUQS03UEdU+aiL/WOJ0u1ZzNesEjt95J226lulQAHDp2zhSswSZ2i4CF31EYwL+HFn7vN4IYkUDgWNRyOTSCTV+ynreJbJumayE1eCHkIhOLslMUoR1r3MhETTKIvxdCoZ30IyZC4AWa0DI2YMXpnZIEGp6k4ltoaWMlmIiu1liJKeJj7h6ZmEtgC5zIDHXbMdG2XDchlmAp06+hJYgs8EONk4CTmPeW0jQcMRA9E0RugdhOCxVhbEWnd3mECoHIFN0IAyLHxVJ1Yq2QqotkDUkrvXCrARtABclW+ncnVe8ILaQpDBGbQkMt12bDkzgxxFKQbylygSbf1fiBeW+ltth0jN+/lJdYicZOo9RKmjtHbqRpepLQFr0bjV1bkOtqch2dMqKNpjfcFDunTV/GEgQBhc1NCpUaVthE0EIkMTJombsptp1GQlu47TZSmzBx4pgCF8YWawNR6DXwbPAtoAWeA5Zt7NRL7ZMI8h74EjwJvge59M5U3jZ+wnahZAM30uN30NjIoIpdbxhdF24aUzbO3+0mBtdpOSQSlGWhbc8UrXJNFNNpt3DaHcaqCTJOKDQVVqwoNiPcIMaJEpQQtFxh7ALja71A4YbpPBt10ELQdjL8nEbbDiJK8DsRbqgQWpOrdRFJjN8IEInG7ypQJjqKTpBRiEYSSQuBTeSPmKJKbh6Nbx457cSm6JN2iZwcWaGN1ArRwkLbDn5DU2gr8hstIEQSIBAo6Q9EPM32YIOKjT8TpnCYsj2UbYqeJbkCMonQ0iHxs4JnacEU20M7PjpJELSwOy0C1yexLTO/OOaupx3HZMVyYiGwOx0kzRQ7lyBo4G1Eqf9uY0f1dH5opXc3EhLboesIZJTgBqEp7JLezUVAqZGQDzR+YO6ydF1BoWHsx0kgKCgarqTtmsIouY4pftN2jb1Xch6J9HDCJgJF2zH+vZMbQ4s8EKY2ERO7PolbGLjLEqfrG4e4MMJGYRRl5dLvA3Ta14kuDMwz8cBdd5naTYRI12qhLejaknxb0XYFgW1eXqTM3W35VXpsY1Ce+uhHeeqpp/hP//k3X/fd+sYG7/nar+Xf/K//hv/wa782fMBmKEMZylCGMpShDGUoX3X5qi6ec7kcLzz3HD/7Mz/NwsJC7/OtrS1O3XEnb3/nu/jxH/3R4SgNZShDGcpQhjKUoQxluHgGKJdK/M7v/R6PvPvd1BsN6vU673zkEX7yX/9rfuP/GUachzKUoQxlKEMZylCGMlw875D3Pf44P/hDP8S9997P/N69fM173svP/ezPDEdnKEMZylCGMpShDGUow8Xz7eSffvd3s7a2gnQc/sOv/d9f1mNpMLiUNOlDA7Ewj5FLNYgFMtgULQRRikVTQhAApJgjk54henkaSgikNvvLMD0ZIsqKNd0Uc5IhZUSGWBMg4hSfFSusVlo/XSusmgKlTVZpil3qwdCSDNMjDM4Hg4iRidki6Z1J9pC8xiSXDNaG7yPgVNoqofroNPOZQrYzvl+KwUkGOX3pf4npQ6EzxFcfabWTeafoH00hBhBlfeRe+m/WlykmqocSQmAlBqIUC0FbYjBYA21LhEAkZttYiF6/o0HUTTuSFMMmBpBtspPiulQfsyYyhI8YQGql6J0eRk/tUrZMl9ImyxRZNQjLSQbapBhAJglBICEU7MIdCRIhe2i2PsIoS4DYhYUTt2lQDz01aBVZvw+gfQSo7FhapYhAOWhJ6f/d/j6F0aUMc9XHaHXT7TNdzBJVwnSbKP0+1c/sOL1mWjvaq1P3pTOsVgYJyxCSPdxUgggUVjPV+Uy/BobM/Eb3dKWHEmMXzi1rU/pdBHQy7FfPzgbHNhujAfTTDrzV4LgYRF52rB7yMkOmpTg1qXRPzWSUnmi6fZIiFvVuNUzMb2SSIaPMvhOZ/c68erg22ImgHMD6ZX8btGR/u8y+dIpey3BZvban/8disF8NCkuJvhFoLVJcVZ9VmfV7Iqye99A9vRe7dJtd+mnsQgnrdVNepgOh6J+byvzDgHproVNnanQryfonEr1xUWRjJno+L0OcykSnSVr9OaMHitS3a/POv/UOHRJ9hKmQb/DbYMD+OgNuQr1+QhT0OXrZNlLQZ9eliVk9tKVkN5Kx/78yyEQhTRL5gFdF6x3jbDq7v4VU2uA0dZ+FlgjZ9x+avt308Kapn9Z6AGWa2r02OEOEQAQp3k8MoPLijEuY4fjY0deZHpt5ViGTBEnSP86gLxUguxoZZ/agdw2L7vcPsbGndHYWDGD/UnSi7vk3Mw8ITW/+7mMAxYA+qx46VwxgJAfHKdmx/W2Gb4cOCbS0erY+AKvs/S00RGJAz9PPsrVPktm7ZT5LevjKtMXSHC+Uoq/fA8ager4zWyuk45ct4HrrmYQsyTzqjVl7x7kYvGAf29dPOOyPpRKGfpWt9USGBx3wE5kPk+qrhKrbLc1mkxMnT/J93//9XL10idn5ee4+ffrLcqzV1VVWnysRWILKqOTapMNopPEV3Hsz4C/O+BxfS9i7GWELOHPhGoVqFa9Yxg01d16tsS+S7H1tkdFYceVAiTMXN2k6Np+6u8CjLzW5dMhnvpkwuxVjAYErEbbmqbMFTmzEOLGmajn4NcHBDU0Zi0PrEa+c8NgctdC5HH/4/mnuuZzw9Lsn+cQjU9xxJeHvv/4gBxZBkac1PcmeTc2NA2ViJZhduoRwc8TuJO2pPWxOjzC6vIynHfL1i8AYStyB4Bqx8+0ItYqgidMGwRIwhgxqSH8abIeNSYt6XlLuKpwIppouL3/tFHtefRVihRVVuXrHYTo5m9ATzG0ECC356Nk8iSuxlebgSgO/q/jM3UUeuhTQLUjG15uIJEJg8+IP3c3scytcfuAuRmohrckZCpVL5LoV3HYTuzCP3+rSGikxurGG39pCqBzFSNMpSSaWNzh37xyzVcXxpTrfvq45citidGmFq3cdwFEuhQQu3yO444UNxltdmmWfXASzW4rn/4nL+EKO6WqFXK1BPHqQxM9jNRM+9W0nGNnQ5LSH7NSx9DLrJx8iv9nAjluIsEht7wx2LKjMjeE3t/HaN7h16D5s7VDaepHI24tzYA/TlYTDi00iz2Z1zKZdMDQXR0EtJ9mYF+xfU0SOoBwo8oHm2oTL110JOLUZM9dQ3P1awNaI5Mjlq9j6cxTimJHVTbzoGaxgC0EdJ1xBso25vLuVOpAtaHWgug2RBc0WPLoH7toLV5fQST119PsxqKkQNxrDijdJ2APCZbTbZXx9gWB8DyOVFUrb13GSbRQOkgtmYcK3oZyHkeoFqJWhvgQrChaXIT4BdEn4fiRbJOIhJAHd3Ndjx1dJxDciucD5e76dmbXXUDyJ4HnsyDFOThWQfBpBF0NV2A+0EUTAAnbgIphAiBxX7r2PPWsRXmRgxE59hbB0ks+9ex+feHCMe85dx45C7EaN7sQMyxM2Zy83mFu4jKJEY59HcfMaauIIN6ctJitdCp0u7XKRyVqLQ6trSIqEOUl+e43ZWHDfrRrlDRs7epau/0+w42ep7z+NX7tKOd7Eq1dAuzTHzlCqvozsOEiuICghhEaqG+j4IFrl2Dx4nMVZlzsWIxIJV+Zdrs06fP2LHXKJZnnSZnHaYaqeILRgeZ/L7KYmcF0WZxzuWgzIR5pDKzGLUw6WNpSLT54tcNeVgItHXGaqCTaaiVpCOda0PEleaY6tR5Sbmhv7XfYsbLGxb4bX5j3KHcW1OZeJRsLBjZh6QdIoShIBn7ojTzFUhLZgX11x7FaIH2le2etxx3JILtS0Pcn+zZhnDvsc3k6YCRS5UONHmns+16LUuUihVsHrrqMoUD+yj7mlOiOr1wmKM3jdvyfX2EbLeUaan8Zph+TVOl74KuAguIyhbHTMKzcKcUo/8R1kvARcpdSs4XYvo5hE0qE2c5DNMZ89lZjDjYSjyxGhK2kVJUoK7MQsX164W7KOy+H9VbxNn6mu4tRyRCwEr7wTDl/TbJUtyl3FVsni7hsB5XrIqwd83nG+jhPG/NU7RhitxaxOWxTzEXc9fYOcsticzDPXCJm+9VHAIbaOpRhpDy09hI6x9DJ2UsBKAqz4HFILBNeBFWR3Fjv+JIZEo9MLzIjI+xms6FNQ6aA734DIL8J2HTplODYGtzagHsHpfbCwhl6bRpzMwzNLEJUg6kL0GIxtmQVo9ThMBGApZFhJ+9sHGghVAw4hWEFZh1k/cBdTS5dZ3TvH5PoW+UaNTmkUW3p4aOxEkG9EyDji5tE95CKX+ojD+rSHo1yCQg6JR+QIDi2tUNiqEzsHqM/tI9dVdHyBrcFtRTSnx7i2J89cQ1Jcu4XyR7Dr61hBha29e6mdmGDsxiu8/OgZRhsSt1XFUpqwNMKNY+OMtDycxhpOuIpV3IvfuoxMXIQoIp08MpFoJ8/W/Bif+pq9bO6d5ODVTfz2NVNnQTgk9h6IIz78nXu5sCfHkeUIv7KNoxbQTNIeuwPtF/FbF4Acl+9/DCV9Knv3UqwnvHLmADfunmL/axew2y2i8hzK8SitvoAXvIYX2KzNzzG2toZmEkkdqxMgwi5+dBGhGsTyFLlEc/XIBOUmrEzZHFuNmFlZw+n+MZBQ5BFGtz6CwKI6fy+l9Q8DEYIZJJ8EYhRvR/JZIMYPZ5CBw9qhE3TyFkeubSGikJdPz+DF8JlTDt/9bJsjSxESTTsnafuCYxsxM9sx0w3FgfWI8Yrk0A3NZC3Bj+Hp4z73Xgs4vrhNcb3C3qDD5NIiVhLRHTnJ4v4JDl95hUJQwW++hqDIrWNfT3n7Olbkszp/mIm1mwiKJPIE6DxKngZnjH2thInFm2geQHIDzUmkXsRWdQSLGERqGzgErKXBjjpQZHv+SR66GXLXYogfa2oFiad1D8V5Y8pBCMHeSsyFfd5XP/K8tLzMfQ88wM/97M/y6//+33Pu3Dl+7Ed/lL9/6qkv2zHdGKq+pJK32M5ZCAXltmK0pnhuzMEPFZaCXFdT3qjidOrMrUU4saZYC9i3GEHSZWylhbIE+VqHSCiWc5JCI6bhSyY34xR5ZGDgWwWbq6O2waU5EmtdotDErsfURsxoVbFUstgqSBr5Ip+YziOV4NrYGM9NFUgcnyvj4yjHR3kFRhoKbbnU8h4qEcgkwApC4lyRyCvQKBSQSUx+q55ebZZJZBmwif1pDPZGIbmEQeq0sNgi3wpxIk0lbxFZgnxHYaU4sPWxIpIOVreOIKTuW0SOoOsIcu0YgWClaLOVl4SuYKQSYEUJforva3kWOsXwKHKsTZUBxXZxFG07RF4OqRoUKqsI2hSbCoQ00dokwN+qgbYob3SINNjtDptFh0JbMbHa4cT5LmMVUwxku5gjtiUTazEbYwIZdRnfaFPopEU/pMXSlEXs5pm4WUdLm8Q1jGvlFLk1ljdXyk4BZftAl3ZpBHBxujVkrAj8PAhB1/MhCQBB03d7kRlta/asRSgBbhDhBwb7t523sJN+dDmvTdQgtgzaytKaUFlMbsbMrUTMrMd4AbhRdsXdJL96lUJ7CaghuJo6hPMIVumjCa+ZaO+NdRMjbYdwswLTEqbHIJfxLpNUHxJgC7ezan4nBCKOGFuuU9hcI/Ty5FZruOE6BkDvDkSrJ4n9GRPx6mhYXIHlNnQrGI6zQ8IsYBHLCaBIVJ4FfBI9A1gsFSZT3ZhJ27+YRhS66XlfwmDqwKDultJIzg2gjBU3qBRLlCrKYLkUSNUiKU5wa6TM+QnTL/52Bak7eIFB1Y1UQ/z6FkradD0LSYt8BA3fFOPJtbp0PZeRrQ7T16pooQkdB9DMXa2y96U6yioCkri1AzibAAAgAElEQVQ0BUjCtMhJYWMJobppP4WAjcVaansRQgdAjIybKPKErktoCyY2YkJX4Caamm2iIMWGopKTVHxJoWuK9SyWHWLbxQ6h6UrGtxWjtYRcRxFbptdiKVgrW7ihZq1oUWwpg0oMNeNbirYnmKgmTG7FxJakkbdASgLHouMKvFCznZN0HUGxobASaHqSriNZL0ganiS2BNPrCYWO8XkdR1BuJNhpIL7YUlQdKLUSxuuJ0fVQoSyLXGWFXGUrvRhySeyY8cUWtqqQOEWgidd9FuXaFDbPAYJ8czGd/Oz0QjGhh04s5QCDJ6QUA88Dq+SqnwZuIegiiPG7CbmuIhEwux5RbCdYWuMlmq5lEGixJdCWiYbr8YhyV9P0BaO1hK4j2B7TjDQULdfgNFueoNhSOFGCVJpiPUIkCQtli0BKuq4gR4IM2pS3u3jdLMLZNhEu10+LXzkgQhA2ggCvsZnizUIEm+mEv4HTXTV9IJyBKLuiW9gHUQe6LWASghg6XeMbtIY4vUskJSQR1KWJ+rZis79uDIyb77sKKEPOBttL+zNJL1aaCDbR2Gm0Nk9gS/Jb67R8C78Z4LSML0lsn9FqZO5kecbXNlyLxHIIHEk95xC5HpHrgzA+cnKpgcZG5UaJXA8nStKopEB7BSLbAg2R52G1GimOMEbqBl3Xol4aQdJkvVRCWR52u4UdRMRegUrBQ3lFrKiDpRs4iTTBg2oF5fm43RjlFcDN0fFcLo+PsjQyit/spP4oBhmTeCPIsMv1CYvrIzaJlD3kqcZB2w6Jm0fQBApUShN0bJt6eQItBJvlcZZGRxAEONEmSjqQgBuuY0cbuNUtOp4FjKApIqhh6U2sbhU7Wk/vxuYpbDap+UZnvVBTaKk0ot0EOkysNtIASZQWTzN3AUUvSpsYvSMEAvK1BcBgMr0Ag45MEmo5SbGtqORgdiUm39aUWhonUtR9yfhWTL6jGa0njDQU4y9LpjZiRuoKv6vZ9iVOohlf6QCS2ZequMEWCpc4V0zvjViU1hfStuVolsYBG2etTZCT6ZxSRAkXJUrE9gx2lLDnZhuhrZTBbad3I6sI1smQdsZG/NRftNPzjRipJ+y9FTG9GuN3NF1XMlZPcGLINxIiKQgswUhVs1i2vrqL5263y4njx/mu/+G7+emf+ikApqemePn8eb75W76FMAy/bMcWb/ZDIQZuC97mLhW3ufu6a2fiCxxj9+2VwUcIhB54/2aae5v2f/5txOfrkdvcQhyouvQP7ugv1B7xxe/zdnfCv8CPhH6D7/Ttbp++4Yi+uRMV4ovpktv/9g0VSbz5vnvToyF2fKS/Ajb4hcdN/AOPoG/zjJr4AiN7O50Qt3kKRuzuqn+gUYi3PHKf7+a/uL1zuK3qiC80HuItapp48wYtXqfXXwp9eIM+2PWR+AK7Em/GhYo321LxRX4m3qLbFbwFh/QlnXnFF9VM8RaPIb4If7PTksTtp4U3NXOJL2Cl4k13iv4Hjs5OhRVvSfc+z9OAA67ytj7mTa4AvrQLu7cyz76x2/mqLZ4vvfYaZ+67j1/4wAf4pQ/84o7v5mZn+eAHP8i7Hn30y7qAHspQhjKUoQxlKEMZylC+GPmqLJ7jOObsAw/wje9/Pz/9kz95221+6F/8Cw4cOsTjTz45HKWhDGUoQxnKUIYylKH8/3PxfOnSJY6dPMnP//wv8MFf/uXPu+0f/sEfEHQDfuVXf3U4UkMZylCGMpShDGUoQ/mqi/2VPFi32+X+++/nf/93/44f+5EfeVO/+cTHnqJUKvHEE09w6uTJL0k7dIqGMygckeKTDIgsw4IJ3cfJIQRKSIMtGcAqaSFTXJ3BJhn0ikkC0yJ7XtlgooSCWPZBKTp7PlcZfBkpVkumGJcAeo83xQPINxlkDCU9gKMZODfY+Yx2Dy+UpB+o/o534F4UYJInB9EzGXTHoK52PkOa4ac0BrOnPXOOya7nxJMBNFUf/ab76LkejkggUCmixyTSiGTgGa8eekn03icDaLX+s+O6R7PRIh0PTYpf2okeY8dLIlJEYA8x1nvQLRl42kz3+10PPoWmBqhkOtWbtF2q/ww7AwiyDBP2RnqqBxBaaHYikHoHGxzfwb/16zSjpxNKg1b0n2MH0oQtnT4TJ9L26Qw3l/avGNCX12vfbY63Aw3URwXpng6az1/fD9HAecJOvJ6+zfF20rgy/JFQpo+bZEi5fswg6elNZk+pTpKiFoXYpQe3eepQ7H5geACb1Tv37Ms00ahvEWlLVU9n1S4d6A4Q+4R+fQ9L1ce+keIxRWrEWXOiDKeWNTNFzZFg8FrStEIM7msAW9dH0A2kBKQ4ykEflPm8zP6F1j2fEu/Kv9iBc9vRn8af6gE/onmdse7SDf0G/w+KGtiX6adkwN+ToqmSAVRV1oeJSHF1ml4nih1su3QUBzBmij6rLsP4ZS89gCzVA/qokZn1Dejd7nPu2+3OswwxibRqp0/q6aYe0NfbPBUv2OUPBkNsemAfapf9iR223fPfiBQZamxLJgP7TDRC91mxMiZFH2p03Md5ikjvmMtE1MekCT0wD2YjEO/0Db2ZbgD5mIHgRIqr7X8u+mffQxWK23q328266nXbxAZHpwfnYb1jbt2pUrt89OvQhP2+1rt8vdAardL55nUIUnbZ0OB3ycB+9K61gXmJrD1a9fpKA83BdY7SPbxttoZB77TQbK5MdmBr+/omUYjkdk+C989J9CZc40elGvSVO/GWO3U5S4yXA+e2U9e1TjGnu3tP3KY39Vcw8nz+wgXuuucMP/+Lv/imF87GzgV/8+GP8Mhjj3HhlVe+JG05uBlxaDvmyHrEAytd7lkKePxyk6PrIb/wcptSpNmatDiyHmKFVbz2eQ7ffJrT117A23iOqaXnkEmL/NZzPPrK8+S2rzKzfp3vubCNvX2Vs1eusHejQ71onMhn7vQ5d6fLt77S4cBWTC5U7K8kuDFcO+jQcQUdHyabMS8e8WnnBadammYBElvwNde6SCE5tB3z4jsnaI2PUS9Z2GFAuaPxtEBTAuXiBF0KtU22R2xAEoxPAmWWDnwddrKA5jSRvQfw0BwlkfcCRSJ5BAi4OeczWq0x2kmIcpLJekK5rbl0b57j1zaBkDg/hSBh32aH0WZCLlBcP53HCTt8+0uLPHTlBvtWNhBRFyfocO+tkMl6xFQjpjkzSTw6T1Qsctdz12jOjXNs4XnaY1OU1y5QnbkXSYuwdAKvtsTagTLl6jZaWkhdA6VwqjeZ3ljGita57/pNRqpbyG4Dp9PCb26RlMaZr8Ws7nOYXF/kkfN1hAqxq4sErmC8ldActzl41cJOYmS7ihVW8Tcvce77BFFxhEfOv8LY1jIiqEMiiayzjK5tEOU8gsIktrpJoV6lNZIDwIo0sTjBdKXFxrSHpozTXObBq2sc3IypTuaoFwWrYzaNgiQXarbGLSrjFncvxPiRwok1zZwkcASFQOFHmnJbcXw1xEpiakVJIkcxWcNZ1r0FFNGcAVpo9mIyiieBE0AJGMdkJ+8118uFHBzaBw+fROQnU+/go3kCKKTowg3qe+foTo7idK6T2NPkGhWsZJvQOkCr/CBWmrltHF+E02gDBzCZzCeBKeBeDB7oTiyW0UyjpU/CEawwQXMs3c8E960uAGUsbgGjVOa+BQhTOoUAHkjP10mv+08POEIPkQQEBcn63jyxH+E3LwA2y3t9TtQi/tnFKtX9x1FOCUEXJ4p49PINSlsLRKU5nCRgZHOTyDpAbmuRO26u4dY2sCtLFDoR2sljBdtIFRPbEoSL1WnjNq7gNS8CAtnZBDSFzRbQAiaIrYeAiGJtE5jg+te8FxhDU0DGa2jGCEqHUE6OG/MW+7djEgMRYKae8BMvtqmWJaEtuDnnIITGUjDeSjixFnHpVI5ypFAlid3tMr9yg0YROjlBrSRplSVPXmzz6mGXu5dCYkdwc94hshTXjzh87OE8N+dsxmtdVg447KnERIUiS3MOt6Yd/FjzjpsBI21NbUQw0lJslyWtnODkRsRMPWG2ZvzErVmbzVGLmUZMbAkW9jhc2u/ixJp3rIRoy0z8bmyIRl53FYuFgQt1wfjKefLxZ7GilyhWPgtsIVjCb34cQ1hZApYxxIla6tXn0sXju6HSSv9OwLXTCdMjyLto4eBwEUmN+ZuvcN9yh5mG4tIBl61RmwPrHe5bCHASTaVsURmRbCiXezcSJi/msfMxa4cVjZxkYa+Nu+GwPG0ROoI9tZgHFgOEArfT5PRqSHPMwW03eWyhy5GNJg9ea3H6Uhcr2UQJl9OLNeZvnUvPYwuv82kcnsPhRdz2i1hRBVhHJm2scMssMcRxNHmgiUVKzNFNNE9iCBj3Utg+B60Q3TpjaDzLdWjPAg24vgpblvnd9VVY9xByGy4vATNAFZIWihwECVRqxodoDQ1h/MjpfWhmUh1vILmEpoDWs8wuPUNU2Mf88nWu3zVJOD5Jvh1x4U4PSUyh3WTp2CSL9x1hqqXp+vDpR3yaOcmlYy5/9m05bhy0KbbaXLt/Dm0ViPMjdF3B8+8oMrpdp1aWVGZGCVyX2VqCk2giZ4Rbsy5K5oic/Whg9tY6AFPVLs2yg7Jd6tNjWMLm8n6HhUM+nfEDaCys1gqCBpUT0zRmRri1P4/VrSEbq3ihptRVZrErLQQRgoTEnUXlJgimDzK/qhFC44eGC90qPU577GHqEx7r+3IkYg8Jo+xdr1PAY6IRsj1TZt92zMlbHRQFEsbMZVDORVFMgxUxY402gi665/tXqc/kUYzS9d5J7eA8tQN7OH39CoX6OrYyiMpusZjqf4xdXwE2gQZdoYBRtLibhInUr86mF185YIbK7F0k+SPEFlzf77K1b5L63hmUJZhpxPzccy2m6jHrU5LFOZtX93k0fMnFMy5eqNkas3rzVmgLamXJeCvhnYsB69M2VphQOzCPjCsIAq596xleOznCWKAQdJCspm1vMbW+DNjE9h468w6xOMGFt3891x44QGKNIZI2bmUBZ3uR7ZnDONwEClhc4f9j782DJDvOA79f5rvqrurqe2Z67gMzOHkBIMELFClS5EIESYmi1o5YWbJo7/oPy6tdSStTDO6GN1ZaRthyrNcbtlcnSVmWRJEUBVEESRwEQBwzOAaDOTD3TE9P30ed7870H+9VdVV3zwCgCK4UUV9ET9VUvXovX+aXX34v8/t+CVki+XY0d6b6vQPNgXRcjLoucQkorcz38O81Y/WYwARDg2/CUDvuPkD82Jzn99x3H7/02V/m11OqxhuR+955L3/19a/z4Z/8yR9JWcZWIkaWI8YWY6auh0xdCTh8vM3EXMQdR12cULOWl4zPhciojuQlRq88yY5Tz2PyLOXF5xDaxeBZDj11DCe6SHnxCrc9s4YZXGLX8xcZnvVpZCRCC741YXFswuLIcZfRpQg71IysxJix5nLVxJfgmYJSS/HsqEXblmTqirYjUYZg/wUPgWR0Oeb5ySJ+rkArI5FhSM5VmBqUyCK0geEFZJZXqGUNwCDKVoAs06UDCK6ixQRRtoTAAjFClNsOZIgLYwgCLpdsCkt1sq4msATlhibrK86OOmx7YQVBhMqUgJiRBZ98S2H7mktjDkbgcdtjM+x7bpqRK6td53lqJqRUiyiuKdxciTg7TJiz2fHQeZrVAuMXj+LmyuQap2iM7AQ8otJ2zNY1Fgs5cks1tDQwqCOIsdQM5StzGCyz66VrFJbWEEETy20nmKFskepqzGLZpDgzy96nWmgVYnCdwJIUmopWzqD6ssSIQqRuYKoaVvAqLx7QRHae/Y+8SmF2Dum10VoSlHdRuLpMZBmE2TwG18it1HCzTjKrEgrC7DjlpTYrRQtFDot59h5fYmwxpp5zaGYliwWDpiNwAsVawWAlb7DjUowTghVD2xEElsAJFXaoybuaidkIGcfUHUnk5FPnMSRhHifYHSW3AS6aodQQ5lPjkEmd1ywwlBiKbBZGh2HHKOzNpb3CRrMrNZ7TwCKtSgU/k8fgMqpQwVlrIFgjLI/iju5GMJsaHxtNI/1/OS3fcOqwj6XO/hgG82hKYNpEjCP9GBhDMg8UmbhwDsghuA7kWaweSO9zocdBkqnjbKT/X3ee0SFBVrJccojMEDM6AUjmSzYj8zG3Hm3QqEykmKwAM4w48PQ1smvThPkKhvYpzKwQ50fJzM6x7ewyVmMVM54l2w7Qpo1kDaFiIkMisDDwMTiPoU4BGqO9AmiyS+3UkSkQZnYgCMmuLAMFruw4lD7wOClCKU9YGkVbWa5VDEaW42QVS0O5objrOZd6VhIagpmy2Z0ALLQVk/MR54ZtMp6ihsD0fIYvXqNlQ9sS1LOSli04/LzH2VGLndciIgnXKiax1JytmHxzKsO1skmh5jNXNqjWFaGdYTpvcK1sYIWavRcDcl7iNOZcRS0r8R3JxEpEuRlTqcfkXMVsyWA1Jym3YmIpuFY2OT1sYSrNzushSiToTjPWSK0xWUj1bX3TguKl4xjhkwiew4y+nw74VxB8A7iaPtzNpo5bPdWNcnqOKQhbqe7FYBrpeW08RxM7EfAS0KA6e5qd0x6VesyJqkUtazA657HrQoCpNPWsZC0rmYktJpcjCq/YGFbM/FjCxr5SsbCWDOZLBqEBQ7WY3VfDZEMrr8322Qg3a2K6LfZdDBibbbHn1Tbbj/sYrKK0zdT5FtXpl9L7WAa+DXwfOIrkRSRrwCIi9jHiWjKjbXacnHbqdCf4yphdJFjHkYTVq+K0z5+GeC19kK7DK3PJUgQxHJuDlgPZWXj2anrMMtBCYaeIu0ZiT1QHcVeCiUrSl2mkfycAm1hWqCw+jcoOM3L5CleGy4TZMk4r4vSohfAjMo0W14dKzIyMU2qAbwsem3Jo24KzIxZ/uM9hesgk0/C4MloFI0ts5wksydHJDPnFBo2MoFEoEJomQ/UYI9IoK8fLloV2HOLSCAhN+fwyoCnXfNpZA2Va1PMFwORc1WamYuFVxgCJ6c4BbWqVEu1CkbmKgwxqmNESZqjJ+slGN8nsZLLRU1wYQtkF/OIY1YVkcdMOFDJWNEdvwR3eQyNrsVSyiZwhNAWGrzWxA5v8SkC9kGd0OWb7RQ9FJnWYIbSs1FFO2qm41gZ8NGbquM9Ty5lAgfbkPhrFKvVSlakT58msLiEV2KEmyGRSWxlh6CUSPneTSALkiMxtxEYp7UPF9FgbKFEv7UAVJ4il5tKQyVqxSLM4RCwSu/TW73uUahFLBYPZssXJYZOmLTm5zcQKNcs5AxnHNB2DyEjQn8WmYs90wHLRQEQxzWI1HdcDruzayfmRAvlWZyO3hfT+XcrXFhAYxPkqbsUgNic5ue0WLg+PorMFZOxiMo0VzbCWG0uZzg5wFnAIrKl0vCgBlXRiJ9uzCihwQsjNLaYbsiU2ttxQRBKEEvhSkPfSzbjEj8F5fvGll9i+cyef+63P8xu/+qs/9Hneee+9/Nxnfp6f/cxniOP47xi2IdZ3rUt3rtPp5lNablgV7z5jdIxweqDofL7+p631Hdq0XJ/rt3W6wC17wiw6S+C6fyFF6o3X7i+T2IhRE5tXKPSG5d2eRdQN21npTQtNorus13+EYDNST/csV3d2eEpmlgQYom93va04PskSZVIx66+6bzlHbAndWj+ZkhuXNjfWldgcXdARo78tQHbrXyP7l3D0xuVM0f99z72KnvJro+czvfWdaLlxAWvDQVJs0Wav9RlbfK833M+N1iLFBl0R/aEqWy7ZvnF4mdjE+pJ917shSvAmC6n9ei/XzyZBmxtAVWJ9KVPoLTqTcWPd2tyKkq2RgRtBl3pDGTeXvzcMQgtQxo3uj+6yaS8RqveK3TAKI917cosVeUtvCILoDTnT6zZI936/IQhH936+wTx196OUrxev2LG3Bh1Wa/KZ3VPPclM9v9bienf5ueccOt2N0egJx+qzt70hVN3wmfXAit5oHd2za2UHYbjp97I/WFJ3x5HOGa2eh0NjS5u39T2KGxg3vUWYyw1wf0JuPkZs1WH1xm0RN8zDyXSJXqQhiZtRq6I7ZiT6YvRctbBhwX6jLvXboS1MYe9/pdgyCIC+HtsfCtfV4V7793rQcWLzd32YWd1TjzKx693+Ll8jOET0ohu3skC6W+ds6P831xO9xRi7oS/p/rrq9nuzHyRp6P7+viWhblN3Xx+3BMkD9Vbl1FsCbfV6yGtv+2m2DOsSW/o8YpM97auCrUmWiDd75nl1dZWf+MAH+Pe/8+/5F7/6z//O5/vi7/w2p8+c4Z/9yq8MotUHMpCBDGQgAxnIQAbyY5c3zXl+4aWXeNs77uY3fvM3+a9+/jM/msJKyZOPP84jDz/MX/zl1watN5CBDGQgAxnIQAYykB+rvGm0jQ9/6EP8uy9+kV/6J//kR3reSrnM80ePsnPnTn7mk58YtOBABjKQgQxkIAMZyEB+bPIjn3l+5tlnGRkb4zf/58/x3/7CLyCE+JEXulQq8b/+7u8ytXs3y8vLP/R5bhZ7qPv/2erXPa8bsVz9EovN4LDe0/T+MtyAbkqAKqI/xHTTBdKYJ9WDpREb43M7VxBb3+2m0GLRg2kRfcCb/hKn6JoeHNtmrE4PXq73lXXA2DoGqz9WSfdg6W4cu9cbnye6eDvdt0Xoxug20ROz3YvV64kXU93SbdYNNqOMOvXW13o30f8u0U7fRHtUByu36Yuev5gbbyvbi0rbopxiq9/019mNFa+rtVvoRO9fLxpIQ5S+xmz4rp9xpH8I06G2qG99w81xxZZ9MUGQbWjzvjjLjXV383ramFWgxY03Jlfixpamtxyil2qVmiDVbVfdd3xHi1XPffcCqcJ1olpPq4kU87iFTehttd4YX00P7m/93uNuf9Gbyr65jm4U16u30JFeVpy4uZnunFFsjqUUW/eQvubuaq9IYWN6vQS6o3c9dSGUXq9n0X89fWNrsKnQ+jX3NtY3eK+2GJ9uVn/0cx63HKi2yJnoDECdHA8hNujzuu3u1l+KU+vobS8qUpGkTupN/a5TYrFpm259E43ZPGqtj6Wqr3578WadPJ1+XexmloTrLSlU/zWi3vvXeguLo/t8j27OAv0QuvXcJrXh+lvoQaprfeOq2Nx39A0iyfWWfU71tX3HJHZRlhvvT+s+6G23vtXmYau/r4st7pHNY4LYbB10X4ZUT0RzN5dFsSWSse/carMFirYa7DfoohY/2pnn2bk5fvqBB/jjP/5jPvqRj7ypXv8v/sIvcHV6mg99+MO8cOzYG/ptJCEbaK5uN6msxLRyguqqRIsk89dSmm11RbNoYFaGyK1FJBgk0TUURrRAktncIGIKEBj1ZTQZJE2swKXqlggRvK0Wc/diSNYPEVpR8gzW8hKNZqQZU/EinEAx2tT83HWP7Y0A18hQbLW5a9FnpBnjZw0iCaPNGCtS+I4gdhzygeLk27OM1cYJchmK1+sYfsjelQUEEZERAk12NKfRTCDwMcIQkGhRJEnCKRPkNdmGR9l3MRqrDK0q9i4VsVsheSPP/uUwHRAyKNsCYsKMiZIgLBivK5Z35Jh4UWExR1x3ia0yKlNkz/I1nKVVDEcTm4LsikdsX0WyTHnBAtYoLF0EPIqrV4EM1upVtLYZX7oGOsJam0EQ4rQuIqhjprQJJ7KImiFCuERaIIKAxYM7sFyDbTUXSY1MYwm7PQ+xyaHFy7Sr4wzNtjD9VZzlZUw9j8JBssq9l6+SqUeYrDD7rr1Ujsb4+SKYCfJKRgpDLwM+9e0lCvV5/FwV4bcRZowOI7JKAw4Ru5HNGk5hCbswhqFhrBlTcRVrJYOJesTMlMB1JHYITpT0VCNKnmrbGcnamMnIckSuaTLWilG5PDRzwCF8o4gTP8vc3W9l/LmFdGiZ73kubpDgh/YDj9LlXK6uwcIStFxYaqfHr6C4FYmVDl8xpcVLyLiAJot06wnmkxyG18IIO3ijncA7Uz2CBIdXIiEhlFKi6nCaJe6gybF4b5WRFx1mb8kxefwQjQMwdqpGY+wOigvH0WxHcIWh+vwGx8FP+6GVvu88+7skODPF1NI8xTUfTTbpkyywbfUaUoxguDWyLYHw2hisUahNo2XqdeoY010AIi6/ZQgZW+z6wRUELTQZnIWr+MOjROYkdrtG1i4gVB1BE1gjydp2kXGdhO+6AoQoqpjhHBoTtzpFceUiU/Nn0zquklBR2jgrM5juGgdW95INInzbQEvYXo9o5iUFX5EJodqOKbuKwBQsjJnsmg7ZuxYiVcjhRojRrqGsMnnXA1lgWz1iohniuDXuWlJk3RYTdQO9XMDymhysZ/lHsz7bGxEijpmqeWTcGKe5xK3LFsbcKEgotupoIoJMBTOCW5cDcs2kCartgIzncn18iJG2wog0taKFE0Vsa0bkFiEXaJaHTJoZycKEJDetGV1ViDQ5TtJM6g/Sh7FOwlsj1dsSCTWmyHri9o5Uzw6T0DdKJPSJKDlWKaitOxSZsI3hdXRnCUHI8Oolpnce4EgtSgZdwwKtCQ1BLGF+1OT9yyGH1iImahHLyya1WLBSleyoR4w3YzI6Rmif0tocyqxiBzGtIZtKYxFLCaS/RmSC0VrDEIooV0EQYgRNwrxM9bmS6oRIy+cBtbQ/N5BcSz/XiDBM71uRUAMUkEVRTftEE7gdOA4EaDGE0DPpsZMkxIX9ab22k2NbyyDbxNyJwXPANgxW0/LsBOZgKAOzqWs7NYGo+BCPwv274UwDzi6TCS6k/UEBAXsWz+OseBi+yf61Mpl2C2dlholVC5WtsDpsYruKWAvKXszwcov/8apkqgYycpmsRQlJqbWEE47ztsVVwmKO4WadUOTJ+TGm1hihixG02T0kkW4DLQzssIwMPRRFQttiqNZAek28osALBG9ZDthWa2BFCoWDQaLQvs4w4oWM19ooo0hoZ7GDiJKvGE9yBG4AACAASURBVGvGXLh3mFse34lXGmVxX57sikGQEWyrNbl/XmN6NcLqMKYGqRTZEMxmgDLLnPlYgb2PwNqQwfCcR7nRpJkfJd808Ys7mL+txPBlSaMkEO/ezbYn62hZxmg3gLW0TXYCr1L22ghc6tvyOLUGtl/H1AuISDLajLBDldCnxwqwbRLjpRmSYIMVqiuXgBZGuIQQmdRLHCaWw5hKAofJu01iI0lY3rcWsTxs0A4leV+RDTStjCBwDNaKktCAfU3FVCOigiITxoy4itmdNnaosSONE2oWxyQFX+P5MdJdJe/atMv7cRqzjCy08MwSlVaNiClM6ij2IjmNqZeAFloaVGoRRC32rq5h+Tm0FJgsEzCBSY0hd5qEPnMrBieTMcMywXfTfnMLfn47mdZh4N3Ak0CW4dVraDvH6Xdl8VogPU2zIGllJeOrMQdXY0q+5uqYSXbY/dHNPB89doy777mXf/nrv/6mO84d+cJv/RY7d+7k137jN97Q7wJLUGwpntqRwYxguWCghSC0DJoZidXW7D0fMFcxaRQm0gG7QQf5AiEGx4BVYJ7I2g5CYHvn0BQxmMVxa0zMx+Q9xW2XQ971jEt5tU15qc7oSkzLSZ6gp+YixhYDKistdl5weeAbdfadbnHNEIxcXeHer15g29lFWnmbQAp2zIY4bkA9Z+DmcpRXY37vcJ7ZiZ1c3D5BaGqk8jjy1EkEAaHtAbPsuPgQsbUTaGG2W4CJkiNAFs0YXlEDNUZqa5jyHBMXvsFt33uB7NxZKtcXuf2RGcxYocgT2RkEEW7eJjQEIlZsvxhwcrKIgYfkGBZPYoazCBly+JnvknG/RmHtP1Be+t8w1W/geP87gkcoLv4ZgosMzf8JgjpD179FTIlM+zsokWPn2e8h4hBL/zWKBob+vxHMIPhbBGcQfA2L45j6OJZ3EalXOTFWop632H90GZPL5NdO4kRPo5Hc9chfsjocUz63yOT5v8UJ/gzBk2jyCK5x3797mFzzewjO8+qh7eggpJUfQkkTjYXVVmQaJ4EWM8OjDF19kcr8MoZawQxmMcJ5ipEGcgSVw1j6Mvm5E2T8BEW3czFiYiniwrDF3gsBQTVkLWdgh4qsn3BEM4HCAFbyku/sy/LKNofrVYcdcxFRvgIU8JzbWdz3fmAvv3vw3Qh+QLLlx1OJkQHgNDBFzFtTh6SR6O+Jc3D0ZZhfhvn51HC+RGx30HazwDLl+T+isPQ0ihKmP4sRB8RiCKu9gOWGXQcmEO9Jf+eg2UvMrSimCMVBlDlFUDqMN3SA0NpDzE5eOrydqDLFU/unqO96Cy8eug/Nbs6NvRMYImQfMMro9KOs02x0avjaxCgSTF/HMVoETiA5y+GHj7Pz+DGUTJwswXPsffExTDfAal6hfPkKRrSE4BKVuUfBUAmJouVi8iKCJt/aN8F3Du3D4hVMLqFFCTv6a+yVa/jVPWSXr1BYW0Qyg+AkcIEEp7aCyQwJh/VVwENZk1jBE4DF6vghYJp93/494ArgoakgWCbrPoLFi9z+0hqF1TaxlCgDDpwPWCgZVGuKfFsxsRIxvhLTyghe3O4AcMfLLtJrcegFF6s5TVQeY2ihQQTsvRSw62yL4swM7/qb65Suz7LnxWnueXSZzMoih4+3+eCjTfZOh8jI58ALNQqLLbKNM7ztO2d4/xNNtAEj568xduY81ZWAQiPmnmca7JoJOXLaZ3K6yeSZa9QLkm3zEaPLMfWsQcFV7J0OeeexNpWa5vS4xWLZ5NE9kqWcZHQtTpCZWBhMI1hFciV9GHJSB2w61d0hEg7DVFcnNLuBOppbgEvpw8iF1F7nE2dztd3VE6d5HcFVEgTWkwj+konzf8HlUYvbznrEEmLTBi3wLIFvCp6etHnHyx53nvI4ctbn4MmY3SfgzJjNgasBd5z0eMcTdd710DTjFx8hW18kv7jIUj7DxOlTVM7PYEWXcZXG8i6TbT6H1aoDbSx3gdDW6YPnKHA1oWdRSx3pUwgeBy4h+BMEp9Lxp4OFjAnZntZTkWhoW+qIXyXmLak9aKNk4mDHbEOzHWij2U6QqQI1Iu5KHGm1SmPijrS+JhGcT/FP24C/gaFCUreHI9i7E1E9DUdK8M63wu1VBE8j+O2k3DpG0ubQc18i3/gOVnCCO18OyDfmyUTfZd+LL7LjZI0LoxZZTxNpg/GlmLc/fJWf+eIlDh1fRbbq7D/dwmAOZ/k0+VbEu/7yIn6pwvYT11EaKqs+Y4shdquGFS8gWwKTOcz2lYS3rGvEsoqbsanMrGL5C8zZglrW4N5n2+x/aQ6n5aFFLn1QMWjZBQoLbfa+eJU4N0QwfoBsM2B0NebguYA/uHUHQekQa5NHODU1xGrJZi0nOfDiPO//iyWc+WnaQ9uxQ4Hph5Tqgm0XG8S5Uf6He24FYXN5xMJo1Bg/c4LFkoHEpjlxiEdu2Y/Ukuslk2O3HSHiEFFmFLs2h+Aikgv41gEgy9DsHIJV5rZXyC3PMHbxYQTPYoQvMjUXknXDRF+qQ3DXXgRfhYMmcIzxi/8fMIfkBIa+kOrHGFF1O2ARydupTM8RWJJIwK1nPV6dsDgxaTFUV+RbyZ4EzazBdNWibUkOXvc5eNnnvV93KTYCJuYiXtiZJe/G5DxN1tO8MukwVI8ZWguR+iqVK5dY3nE7oT3Kzq8vkKmHjJy9RjB0CMUU3tBdQAGDM8ACWjqMvhIg9Ty3P3GFvRcDsASSM7jjuxG4DM99HcEMtcm7SJCpLbBsEgTjEhG3sprfQ2gfxsu8P7UpWbYd/x4qU+HfvqXIy5M2QsN8OcHJWhHcdtZlZDni2DaH4rb6j2bmeWl5mQceeID/+J/+E5/8+Md/rHEnf/LlL3Po8GHe85738sDHPvoGYja2BgF18EMdzt/W64CdmQ/Rt9TTB2kRPcugkhRZliytaLlhM6d0iUgboGwBpuj+TpmiDwmDXF9q7JTV0euYl/VlDdmP5cJaL+eG5SjRi8/qopOs9XsUoG2xvuHbVit6MsUSdSEuRk/ohZkOkpl0tqOcOkJWOnDo9D2ASS88av21g6zK9tyP7HmV3fqXWic7LnXRgZ3vRHIeDdroMKM696l72jUN/1A9uz3pzTogOuUTN8C1dbBYYgP7SiR1pQ2xvkQnNocpCA2G1l0EUN9ObqQ7dKHJ6Y31Jdh6J770M8MAQ/bvjJfuLtmv407yec+SX3cPrr5d5bZaIE12zUvqQCGQ3V2zDJ0suBlaI5RGpveRvPaew0wdoRuspfdFnyV/qg8l2akPc72zGDLd4qpfR3v7r6k1ptA9Tnuv/vXWT69ebWx/2VNWs6eeOsi1jcvFKSzNEH1LndpYx9cJ0YN9JPVrRPIbQbLDZxcrmDZvx/YgBTorwU/3h3QE0hNoE5SVojUF3T6TbMYhUGZaRJnebxqeoMzOuRN7pqVYNx0bljqVkSzRS538Wbof87Y5DG5jZKG4QdBb74K9fB0hNLLntcOztVKU4dZhEWbatbQUKCm69Sl77botUI6AoKfv6XTnwFTfRO91xY3Qhz3xMx1bxfomL8lrvIV+bVxGlzepr3402PoyttiwoK83h9D04i21Bm2lq98ddJ25YdVApHWcrC4osxPWYaKQaCvpE7oH86ntNFzIFBjpWNntawKUle5eaIi+KkvCQWQ6PsreAIiuLU3aeB1Fp40UHyi2CBmSIr3G+jaTHaytne6010Gldb0AU6IyAlpi01irpUAoRSUdo4XWiDR8ZX1j3MQmdsyr1L3BCWKLvtDBbHbsc4++SLEFWtDp4eLZG2xY73Gbw9NU6g/JHvUUPfZfdHbXlALtiOT5N+3zveOWQa8NS8bmToiK7iD8eupd9OmsTOsy9ZdM0YOVlBt8nQ2YW71VSJhGqE7ohuyOdTlFF5nXFzInEx/OSBXu7zzz/ORTT3H4yBE+91uf51MPPvimxDjfTHK5HI899hi/+Iv/DT945plBFPtABjKQgQxkIAMZyEDeNPk7zTxfvnKFT33yk3z5y1/mwx/60H+xm9i3Zw/fefhhPvKRjzA3Ozto1YEMZCADGchABjKQgbwp8kPPPD/73FHuufdefu1f/av/oo5zR+66804++0//KW97x92EYXjD455++ulBqw9kIAMZyEAGMpCBDOTH5zyfOnOGTzz4cb7yla/wq3+Pdvv7N5//PHv37+NnP3PjTVk+97nP9e46uS5923Gy4YAbYbi2OvZmorvbaL6hn/Ueu0XZ1zFK4qa4nv7P9A2OE70AupsU5PWIeEP39/pPL17H9+KH/O0bPV7/UNWzGfa3jqzTW51jAzZxa6iQ/iF7zhbb3L7WufTrOE6/gWNv/KMfiejX/KYfn6hf9zn069SpG/UF8Zq6dyNo20aM1+YtbW9y/puGEG+xj/wm4yNuGlX7Wrq+tTZvFTuvblLw12PtxBY95XUxSl+XLtxwB3nRX5f6dVf+zWrstfRHvKF+pl9zVHij3VC/8W/Fa/1SbDlG9X3Vi5F8nequt3QAblCSrZpBvc6K6elLiX3Xm9CVb9z6idenTb38yc6nWv+oDOem78TrcGz0a+qu+CGLpn+oYXyTDdI3UIstPnrDzvOTTz3F/fffz7/5X/4tH/zAB/7ePQ386Ze/zPXpa/z+H/7hlt+rOGZ1m6SdE2xvx0idBMOvjJvMT5n4juh+trrTwIgVUEUzSswIMEYgdwMjQBXFPrRhoayhFOOWQVEktjKsDBuYCiqewog1ftbEy9u0SpJm3iAXajJBQGxIAsfEzZmYcYz0mtxfqyOiABl5iDDACgMqbgvtCGQUMN4MAYUm4u7VNQqtZYYaC9jRKmiFoImkhe2lhAU8hKoDPjJopP2/guGuobCRcQyE5IM1hGoCfjfRTroNtOkkiWYoYjNJXHC8FZwgwIoiMu0lxpqLGKyiGQUCJKtY7aUU59UiSSYJWOcCV+ji0wjS93Ukq0ADO14EaljBIgnGqs46NcJL/wISNBMIfGKryER9lmJjFrs1B9SQrAGt9Pcu1eYydrCUnich0SbfTZJkOrQSUxALBCFW7CH9FqBRpkMS7VQg79aTZMgoQOIi8AGHvJucV6gARQ6tJWboYkUaJRKWbz5UWJFm25omGyXvPUfiRDpBJbZjrBhyYWK5WwWJl5Ggk3YywhpW6KMY4s7mKklGvJ3Wh5vWkwU0kSz317EXQKMNDRd0gYRMkEHGa+kxO0jIBiL9f4CkTVjIEebKKFFEm52koA7mKIemCJgoI4vGRgsLUES2hfTaeENFYtum2FKgFZ4tcXOCghugyOPYKj1fhGIcv7ADEDS2jad13gY5hMRLHaE11pNkXBK6QIJhNL1G+tkEUMf2m90BT3QRXS5xJo9UTYx4hYRw0GJvY5mp2grgocgg9SrQxI6XsFuzGHoVK1hL69hO9dDo6m/S3xL2dlKnFtAi35xNr+uQEDkaqa4FaCpJPUchRrtOtrVExvcQUUDF83G8BrbfouIqSp5ipB2wt17DCQKM9hqmW8dxa0g8ZOgjA4+xVowTePgFA3SMiDTtSg4RRdQmbBCSZskgG0SU3Abtqo0WGq9gIXAxdIPYhrwXog2DOJtHBC6NEQPpt3G8BkqCjGO05VBp+2SDmGykmWpGZANFPtCUPQ8RR0y2Y4ZdxYib6HXRj9M63o2b30PMJKG1E8Vu1rbfRkKguDPViSJJsnC++3/NDqCCJp/Wv5PWu8V6ElDnVQOHwbZBOj0PPS3GmmsUPY+yr1icMmiWDHxLkIk0hxsRVtxJxtZ4tiATakpeTM5X2H4bo13DaK8CPpa7ggxWKXjLSAKCSgZBQMn10mS5kDhjAxKBlyTSYqU6nAFLkiD3Cuk93c56olgJCJG00zro6LINKKRbT++3mtpQgaKC0C3WE4Dd9HwmRpjgV0VPJrjZrqfvfGAISh2bsh2yNmQFxDFkM1DMgSmhkIN8lgR/l5RbI9HC6NpnQRPTb2J6q4BHWMgj/TbVVh0ZRTiRwo7TaaDAQ8QBUT4Dsd+1abZXwwhWsVvJ3g45t4ZdW8BqryXjlzAotmtIXAzq2O1VlFFA6DZFr4UZNIiNHEN+m6zfol6WCBUh3QZhpohiEshSdNsoKQhyDkJr2nlJc9Sh6AcYocstrZC524uEloGd2vSM7ybjbhygDZPAFiihaReNBIcpJI2qzf5QI+KAXBihjAR9m/eDJHFaxUy0ImJDUG3VGG54tMZyEAcY3bHKx4hWWGdrtyk3F7H9WtcGQkymvUZoSaSKwLSgWICRHBQyMFRJ7ZDX565L2kg38RekWgVc6lWZ0Mm8iKqryISagu/jZQQLu00iQxAagkZO4tkSJ0wSQP2sgeesJwZLpXEiRSZSOKEm78VIQgQKQ8VI7WKpGtXmMjKoIZu1BPvbqqX9ILGv0q9hRjFCh0i/iYhDZOgCGQy3g2/MAyXMVhvFWKLjfjs9xygAre05RBQjokS3veEjgEW7ZHcnIj1HUC9I7EjjO1DyFBKNqcFuGBhf+MIXvvB6HdNXz53joz/1U3zpS1/iZz/1qb+fU+lS8uCDD/LgJz7Bhz/8YSbGx/u+/+IXv0jrs7/C2HXBW19NDForK3l6f5a/OZynGCjGajFOoPm/PpHhp//mGkYgCcVtBGO3od0ic0feT3GhjSBLa8enQJqEQ7sRnk9UnoDIoj22jz96b4m3XfTJ+ZpqQ1EvONSKOV4+4PD8lMO7T3nsujBLu1Rmrexwfdhmx/U6mcVjvOXFeWToY+kZiBSOL5m4OM35W6fY/colti3axJZCeE3e+91nqC48zOjME9j+cygOYvESgovkVgwEJxKnVJ9C4GHEiXPdHv8I2caX8fK3IwMTx/s+5dVlBK8iaKDs20Ba2NFz+MPvQiiF415jYeoI5aWzFFaOYXsTGKFi7OrvM3n1KILHaA99HNt7GsG3MNQzCGrAubRjr5CgmDSa+xBMo8kgWEiN+vcQHAUuIPXDCF5B6u8AswhOpwbiAgmebDk18jUU2xDUaWz/AHtP/XcUV/8AI/4m8AqCF4BrqbOyyvi5q1j6ceBkOiAtIJjGtz+FGX8rPf8k57e9h/3nz5Npz2P7S0CWsDKG7c4QsZvhhYsoKsjAw+QY0MYv3EXl4vmkXDHE2Qlk6OHU27hDI1wbNrFizcRaxI5Fxdh5DQZU2pqTO20mV2JG6jFvPeHjZ8C3BJaCMxM2gS04eHmZXP3bGPoqVmM73tBu7nzpzwm4F4NLJGivqwjq6b2dTDF2hbTO/ITUdfYizMwBh9Lvskh9DJhF8b50ULcRBAhCBNdZ2v0RWpVxbFnGyzvkVx8DxlFMoDJlInsSEfpElb0Iz0cbFYx4jsWxg1SXjnH2jg+Qi6E0L3Fcj2/eXWa0FXPLqTkMzyawFIU1H0GEVzjIq7cfYOLaKzzynp/iwLnvJ+3y7tsQV19IB9SX0rLngcupc7UPyGPXr2DwPULj0xj6S2RXJwGJMksY8TySvwJi6pM/S37ta1jRCeBhJBb7T8+x//R5BC8SmW/BUH+K4BhSP4oZPoShruG4y2mefAX4WxLc0aVUpzwSJrFC6EeAWxA8Sa7+NRKk3X2pnp9N8VgtAvvnMOOjKH8vjvscpZWnyK2NImKPsZkWpbkTZNdaOM4Yu67H7Dm7wG1HXyLbNsitPY4dXCG3NovBdfBtjGCV8eY4Q9NXuHhkkrFrF5FugZO372D7lUv87YcOc+jsGk+9bZzbTy+z7czzvHT37QzPN7m0q8r2qw8hOc/1I+9j1yurmFrSHp4kN3uOJ+/fy5Gnj5JbmaE5tov8yhJRaZTJc4sUPEneMzlw1qdScym1Dba/egmkoBDYlJsxASb750Le9fwKTutp6qMfZfrg28kHw6yNHEHnD/LNd93Hnaeusjb8j8i4z3X1NGZv6hIP05r8aZzmDIF5K6b6Qfrwu5Za+nJqZ+LuQ2DMLyK3nUmcSH81dUBn2HFuiqFFQaWV43c+WiUXaZ7fYXNoNuT+593EmWkqcp7m0dtzHJoJGKkrbrniMnTpFTLuC5jqaQTL2P4JTHWR8upJBFmmD9/F8OzTlFpVlPYw4yus7HgXheWLSGqEpYOY9VUM0oe3KtCcBMaAZdrVX8Nyv5PO692H4JWUPZ5FMAPGfQj9KuBixgXgRXznfsz4GWAeb+jj2O5X0zr5SSRfS53tQ0j9XOrEH0RwDBDYPmlfWiTkAYw75mH1NATvhHszsLiEdl3Ep34KVpYgiuCjPwH1GjxxCA4vwJJBUPwY0l/GUM+kNjhANoaw9B8DC1w/9DNUp08yefUKyqjy3OEhPvRCDauxiuVfRUvB8o5dFOdnsMOXkayQqS1j6u+RaVxC2fuoXn8Wx/9/sFse+CNEJc3k5RMpwvQFso0G3ui7yTafoHxdY8XP0R6/m8nLVynPzvLt+w9wx7EzOMErLO+6G+nswWguMjTr0ihOUB/Okm+0OX1olMtTBd72gytYa4scOGfwa7+0j9unY3ItjRKCvS+fA1NiuA2Uk+fKriEKzZDjUzmqboSpbJ49XEbMa95x/DSGmUPGikxrmmK7hLKyWL7LzjM+zbJm//EnGDtt8/Jbppi6eAaDJxGspZNLx4AcijEkP2D0whkc/zIwlz6oWOSXXeZ3HqFQmyGz6xTc91ZYmofRCuwchrOXSVCmo8AOBMcQrGCGFQTPIziNpMLD730XriW597kFiqGNEoI7j17i7MER/vknC3zolZATexyennKwEXzwhTZCaOaqNvNVk8ASlNuK0RWfrBsTWga7ZiOGllwyzRdRsgzOCIWVJ3HCM0xcPYOhX8JUs4TiEE78fZL9AiSCeczAQxpTWO3TWPpVYnMKu3YKgYntLyMI0YwiKGD7K/j5d2KFJ5GhgWQNxSEgz9++723c9crjGBoibuXl93+IifPznL71Th4/lGF7MyY04YWdGW6ZDWg7krtfcVmpmJzZbjNx2nz9M89Xrl7lnrvv5gv/+l/zkx/84N/rWJTR0VG++tWvcv9P/ATT1671fzc+jqPA6KDC0ul8S2uyuh9Pl1HryxGis4SoVfq0nyJOdNwTjtGZ+lcINJZKThBLuqimDnrM0HQRT50lHal1FxWnMNZ3FBISLUU645s8FcW26D7WKWGmswrZ9JXu05ruoo46eJoeRJdKEHHJJXvxSEbPbE0vBm3jQrCdlF92ZjUySRm6SK58OmPUmVnZiPbSGyKIes/jpNfNsI6VyrCOberFOBk950hmXhKjUEiPybOOTJI917B6fmeDVj1PuR3ST4LI0X1oKJ3O+ps9a0IpyqwXGydEek5AGn2rjR00WJxJ3if6odcRhRZ9+DojxXytn8ECIVP8mZ3uAaXpRfWkc0k995m2u92LIuxdKMymn6ktwhmMBEmU4uUENw9p6g3+SfpU+ns0ykrBXek9aSOZIxM9OiGJMdL+Y+oOnitFY3X107xJuI7Rcy8Wuqsj9NSL0dNeHf0y6SDM1uuno4+ldMUps6GO2XDdXv5VpqeOyun5dY+uJ6jFBDvYwUsls6gJTlGiLYFOZ/NiQ6CMBC+nMBLdSftCf1+XxKYAQ/ShBBMEU4pbSttASYnGTGZXRWc5UnTrV/WgozQy1cOe+k93plN2x6ZBnEnwd1qCthLEnTIESkKc6rsyk74l4zjBFGqFUDFCxVgqTh8+4r7gNNGrZ0qxGa91s9cUnah628tO6tGSaFOQVYmmSJ2sEMW26AuPkTqZaezYbi1kWu92j4216aDlOpixpC1hHXFJalc2lG9jnICOt7gPY32JXIu+z9dDBay076me/q+5EZazv1+wrqeqB9moNmDxdE87KL1ev/TiK3tn1o2uXRdpv1MddF8fqlOmfUKnmE/Zc560fjUplrK4fq+A6l4n7cM6WXnQItVXrdP3sotR013bplILJPt2+5OAoVL0qRSokqCgVFq+NByjZ2xG6+4umkZPyIShdGLXketoR0DLnnE9J1JNMFBCYnTDPMyeNstusO/ZHnvUKXmKfO3Qz9R62yRtZWwRy2D2fJ4FZLf82kw2S9EpLlBoTTVex9xaeh2p2W3BDSEPKh3zlEywvOsDouixJx2b2LH11oaxSPbsYmnSxd314SrX8XOii2GUfX3M7NFPgULqaL1H9kQ/mR1MKMn991bV63aeH/r2t6kMDfHff/az/EOQ973nPfz+f/7PfGTDhi2W5bzxuJ+BDGQgAxnIQAYykIEMhDfgPP+zX/5ltk1O8qlPfxrX8/5B3NyDH/84t995Jz/z6U/3fe567qDlBzKQgQxkIAMZyEAG8uY5zwAP/fVfMzs3x2c+8/P/YG7wT7/yFc6fv8jnPv95AJYWFwYTzwMZyEAGMpCBDGQgA3nzneehoSGeePRRao06P/HhD9NsNv9B3OTTTz3Bl7/0JR597DGcjIOUYtDyAxnIQAYykIEMZCADeXOdZwDDMPirr3+dcqnMAw8+iP4HMI2bzWZ56KGH+Pl//I/5nd/+baoig5dLE0KM5FUjCKUmMgSxFIS2QEQCLU0iu0Bjd5oIZ1hpyLkgNooEVhL8H1qgDRMRR4RZh1bRIBslyDs/K4hMQbsKrYJMEglMlSQQGAaBIwhNCC2BW5FpcpOkvS1JAIztDKgYoUKyQYDQEUbkIyMfI3YR2ifBZiUIuATlZZEgs9z0VfX8Jck4MvIBhQgDYgNAEWZy6TFJxQgVJMdEPjJIEGhm5KeJLD5m2MIImnSQZhAiQ48kkaOTmFBIy1MgSeTIEdsj6fGqp3whvfvMb04M7E2c6U0AiFNMnI/pNdPzdZJiOgk1nWScDL1JXIphkoRCjVA+CT0hSUxxIp0kbCLR2ECMFpo4nyco5NE4+KUMfiWT3pcAKVHSJMonsfVuycYdzaIMk8AUBJbEcySBKYhloh+dRLxslGB+lBDp55APdYISC2Iiq5PkqQGTMGei7N4EIBMYZj2xspMs0dvdMxB2krDMDzi0EQAAIABJREFUtK46aLVOqljYU8fZ9LtMt1+EdqLPneQfJR3QnSSgTiIXoJN2NVQAgBUGoGKMOECokKEgwRQJpdF0Em8TbJaIPTJ+GwjIBy6YqR7kMjBegmoRZDbBL5WzXV0QKTYu0QeFUG1AEeVSckjs9umFqRo9fcdM79VDpzg8oTvYP6dHnzv6laCj1vtaJ6FJdvtYcnzv70Hjs56Y46TlTDCHUnnd/iCiAGUZiChBOkVlGztw0UKDDhO9jzohdAKBRqcYvDDrYKgQjcBUSTKURmCpRKezoUJLgRJ0282JQgLHSNopvT8r9NMEVyO5gm2TCyI6SZGxBGWaBBkDhMTPGUQm+Jkk0c7NJ8dFlkQLEvxipLFi3U3OFjpExooga2KoCKFjCmGIJo9UAWAT5EcBiZJ5NDlAEORlWlf01HnHTvS2UdI3BC5IM+1D/RMoypBoCeVQE1hQiDWZWBPJlJ8vksRvAFOBFWu8XJLApdNE2wQ3aNBJAtUYGHF6Lb2e/ChVT9KxIdIE9ERf1/tm0g9k4Kf/L6THJDYs6S+gtc16gm9CFjHioKurMvS7tkng9tiJgPUkrSBJaE5T7jpJhRIfMjYIMymPbYG0ILbThpPpbSgIo6R8IjmHCFzQFr3JtYJOuWzM0Ev/b4JWjHoeQim0aRBZWVBgRgEi7CBZM2lipoK0H0v8tJ0DhA4QsUIQohhKj/Mx/AQFKXUyDsoUgwcRhSBEZRyUaSNjn9gQaCzCfIKoM+IYpIEZa0ylCbIWGk1sSAqhxi3IpP+gUaZF4Jj4JQeEQWwKpI7JRiFGlGDpYimohIluGnGINhPd0SKFFyCQKsSIwq7dsMMASQhEaCZYT4IXPX8dPGMVKBCLESBJzO0mccapbikNUvbZ+0SvOu+9njE2IB8GmAhEHGNEyf0kfRZKYQJFHgoitrsRVT9Gao2blXiOoF1O+pibk7j5JEHTzQiEUigzSQAVQJQi+3Q3mRoSfKxLLLJpsn5MgkM1II6Icw4gUIaJQBI4RdpjJRRZgmwRv1xJxq54vS51mowoiCgEQY++xzh+Mm7EhqAaaDKRxo4hSpMErVijOuAHEp/xDaHqOuI4Dj/36Z/lz/78z/kP/8d/5FOf/CTZbPbvtQM9NjrK0vIKf/3QQ3y0+Un+5i6Tu85HzA0btB3JSsHg5W0WTgxFX7NQNpm3HO65KJmbOsS/+K9v4eNPzBFVJlmr2JRn55k/+F7O7cxTbZtcnrQpeybO8jSn3nonf/D+Eu855RJagt+7p8iwq/ijB02+O5LnjtmI9u1t7nlCE2RzPHUkg+tIzmyzuXCvwx1PrIBp83/+/L28/YLN0q4DlGbPYKkrTFxpY4bzOI0Z8mtnybS/i9THEVwCrgNZBC+g5N1I/Q0kx0nwZRYJJk4hGAFqWO4igssYcYO1HYcoLn+bV/d/htGlx1FiCi3HMeOngVWMlsQOHkZQI7cSIPU5BN/ECL9BtvWNBJuUlsEMI1znY1jxw8AuIj6CpE5k3IPQAi2nOHP444wt/H7q+E6T8G4vJka6OgGjBdg3DpYDdibBT0Qdgx+SoMBIf9dI8URnsFt/leK/NOuZu5CwoMfRvJWQ2zA4DoQsTv1LsnWB4CxSLxHIj2HqWWJuperuQnp10JJw5ABGe5rGZJ7mtiOc3n2EEbfC8bcc4NKu7ew6X0fSICjdRlCsMLN3hOrMIl//2Ns5fnCKQwsZjh7O8f2DWc6O2xgCqq2Ies5Aaii3FLtmQ2ZHzQTr1VaYMdx+1mfPySvsnrf4yn0lPvjsHBn3MTT7OXfnu4nKVSrXnyZmDwZLaV17JJguN73/AMwCVDPQ3gNqOnWKh0iwfG0S7N8UUEMwlw7Q44Tybgz9DCH3M71vP6d3ZpCGzWrRZuLqYjIkb/sgIuUNGyyDbyIIMPUsgja5Rh2BpLywiuW55JZmMNrL7F+uUmwtUZyvY+mzNIplKmtPAUNY0f/L0MI3EPoC264+hTwylfBKP3E/7NsBu8ehlIF3HIZ9VXjlLOAjeJkg+wBW9EcIMY/UjwM1zr/1s4zO/DmGehXFvgT3dNjAufSHCK4nurtrEmoLCI4iOAk5Hxk8ltbHHcBVYCdwCphMEYuPw1ABvIswOgp7xmExJOEBhpArQ3gB9k3Bag0IERwHexhsAdFtwKtIrYnt27DCJ5FcAQyMeI2ZXe9h5PrTSFqcvvsebjn6l6xU91KZOYHBElZ4EpE6kxpFZNyCpZ/m5J0PsO3qSYTMYwYmxdWXaWcOk7MVdqtJIa6Q8UJOHB7ilrMrWO1FRq+FHHv7Ht7+5AsY+iQQU5rOQmYUlS0jsQhLw+w+PoOIfZSZ5fK+CRzyvLQ/x44FzXfurgLwym6HHYvwzfcVueNkjdO7xwkzgqmFiGyo2LYcM7pUx2mfwvQ8TDXJM+8Y48jRFzFaTfZemMcvHya39l0kOX5w3//ErssvsbT3Z5CqhIzn+Nb993Lrqa8Sy7sw9TESxNp4agevs86xNWH/GGLlYXjHQbi6kI6KyYOx4p00R3YS5PLsmw149PYc9571ODQdcHHSpu1Iqk1F05JcGbe465JPta74zjsK3HGmjT+0B6d1jdXtD5BtXESxC0FEaO3FVhWc5ml0NIQVzf3/7L15tOTHVef5ifhtub58+1av9l1Vkkq7JVnWZtkYCwtj0z6GBmyWmfbQwzlzepieoc90D9gcoOkGBk7T09A2gw3u9tgYvIFXkC3LkmWtpVpV66tX9fY998zfL2L+iPhl/l7WK1m2LIvpyftOnnyZ+fvFcuPeGxE3fvd7Dea93I9fOYmgzNzW2+mb/yaCvwHWoVq1i6DnAR+nqRGcQvMj9poMihuRfBnQVAfei1/9hO3rIDCF0C8hKBgHSfMxBHuBqoWjc2DXEGLlFGSzsGsQsXQOtgRQUgZGpE9CbR3pnYKH74apdSiehne+yaBLPKvgXTcbqMvlVdi3E778BKw9CcM9MDONW1+lMvRGgnLV2pcCkmUEZ4EDZJcvIZiimTuCqEbc9cwzOKGkOjDC3Pge+mYXya6cxQunEMzQTN9B5IziRo8DJ5DRSSTPAucRXMbRWZy6RHKJpV0/R3blMwiex619FcEkgqMISvi1GdxoAUGT7WdzzO7ZgdAOfdNHmd9+mNQ6nLn+ACPza+TmpqkOjpOrCtwGnN3ey9Zz08zs3Eq2ovjwnXlunqwzsqZo5Aq8tGuAp24bZdc5wflDGfY8e46xK3Ok1orMbhvhzLjLm5+rkl95jvRqleLobqTXj5KCtb4cmboiNXeMzNoskduPG15haGYWVz+F4CUWt/0mfilt+eijGUEyB6yiuJFq/48iq2nOH3o3fQtnmNp1kN7VS6T6XoCUC595BkbyMDYAR1+EwIdcHVF/ArwM6AaC0wmH1Sm2nT3E9M5xdh99kUy1zti5K+ggSzXfgxM67Fxocv8XZ7j/acENF2pkak0+9cYeTo55fPYeh9tORXzx+gyLfR5HXmrwZ/f18MDTM8xs6aVnroyMSlzZc4DB6c+jxR5gxeaFWEZylKVtbydVKiH1aRrZNxG5DjrUzOwZp3dukeLY7XjFZc7ddD9fv+d69h3LcPzWWzl/+Hq2nMrih88i8JFcIPTuQDn9OOoU289KXDVvHTYhg1MvEIkDPH3zVm45X2PrYkShHPHlW9IcvNJk/1SDhi9ZzDtMD7sbth7fF33m059m957dPPLII//ovc+/+3u/z0c+8mHe97M/BwIcITYgpQlAiDZ8jLS7q/j7XLzLM1+0bpLWNyniTD5aIIQwe217mfVJ4glIibZPrwWWYi+U2E1h7OXHAoMI3fKUicgA5xmEJonARbQ8s3FL3ASkV2qTYY6vk/ZdtLuUgGkRiT90fL002y6zH0OQQbQgzkw7BBIZbfJojBaJumOvWwwjk+RIizFteKQNicb0Jv3Rtm42+Y1rXG+hhVqeO4FQbQYIGcP+iPYVwoxHGxRNtOCo4k9x8bGoGERE2+8YCkxvBK2KUcrEhpsNW0UgEJ4pJ9knsSGjlvVcbMgytln2O7mJN1p0yEgCTlAlx4tW/zvTbgnd6n2rdS0+xnBaUQJKThuHlu5IeiUsPwUBUmUAjRP1tB0sItZBA3/W7o5ot1uDwOIqxacMQljuOBvGX+g4GYVMlKOMbGqROK3ZLFOd1TFlO6Q2jkVbVGXH9+EmEum0RvAqWYplTIBUcoMEiE3g8mJbJKy+CQkbMo2Jq/UihqsUgEjqbiRaYFnC6osO2sPfqkO0ZZjEGYawSmCtl5UhYyM3Bp/otgnoLCz5v5WXzXOIJmVbbtR/gRWga9kQK6sJOyiEuMaVovW7EMkqNmmV7LS6JDC8dLuvrVlCJGRMtO1GAoJStGaHzesUrdMP7NyQnFFoQ4PpxP8tnYhlzjofRFKGN8kCpxM6aaHgWnPQhraJDRnopJYJWymQDdHCBBN2bhYqYWPURrgysQFO0t3IidYk6iKsp1a0vO9tnZGZhPbothUXneZQJOWhbdgDoTeIqNQb/cHG1BgbJWPZkh0m2/JAJ+dIHc+3Zr4XCVkWL5sC0nJYiM0T6kWbzItath1MWiScTfZ/lZBtHcPSJuYBAeQFohekb7sr7PlLDLMqRMIeWB7qDn3aJEWnwOlkVoddtLzWcb0d11wjC6q5RySuEjhRrOvaHKgINuH1xra8qsWz53l8/KMfZWBwiN379rGwsPCPbtFcrVb5l7/2a/yHP/pDPvGJT3Do0HXdh3W61KUudalLXepSl7r0fZH8QRTyV5/6JD/93vfy9ocfZm1t7R9N54rFIu9697v56le/xhe+8AXuv/fe7oh3qUtd6lKXutSlLnXp9V08O1LyG7/+6xy64QYOHT7Mpamp171j586dY99116GU4pmnvs2h67oe5y51qUtd6lKXutSlLv0jWDzH9Gd/+qf8yq/8Cvc/8ACXp6dft0790R//MXffcw8f/D9+nb/727/tjnKXutSlLnWpS13qUpd+IOT+oAv8X371V5mdn+emI0d4/rnn2LJlyw+tM/V6nQ/91m/zkf/8p3zsYx/joQcf3PQ6LUyueoTYEEMjlG7tJrQw0CQGxs5AiekEhBgIlBBEjjA527VGO8LAPzm6/dw/ibgAZYIKZCIPfOQIpDKhNCp+lt6RaFci7QP6WoCWFleP5MPwicCuVtBdnIA9jl5KQrbFnyPa8G3mXSgTTRD3rRWZg7YBQ+1AHH1V4Ije5LPaUK9u5ahPBjwlIediyDUbjRRpEyQihA0U0x39+W7U2UaXGD5Ku6IVbS+0QjueDWYz5WvhonFQwkBYSSmJIoHCjLOwYxjDzEndrktj5CTupdRm3DXawNsoex+mSi0NpJCy8qJEO25Kg5UtEFrhKDYEWckE/JUOJNRNO5T0kMqhHUxjg0aV7hgvy0vXQOzR6AjgigXCBvzF8mzjYttygkYLGV+GJhkMae+146sTUoSOENoG5LX6ohJXJSJ3HMdEogja7/H3roaUgc0zAAtx20Ur2EmqKKEXBtILKQx7WoyXV/Omkx908igOognYNPuS3kwm4zo7ytWqVaZyPUQoLYyc1XoLARjDzilPIpqOtQMJ+DMEEkXkSKSOg3NEItCzHUojbcBXFMhEk2NIPmtnhHnFNkogUIGD1tJCd2mkimVCo5xEj7WO48AQuv2bRmMjGY39QSOV7b+FOzOQh0Z+Wr+pyPBBa1xt4OhEy9ZZAZQ2GE4nIqSE2GQcTB+1lFfND8n3ZBCYE8evCXAjK6taoIRjoNaQaOmgWx21kJiOhAijB6o9/Rr5j/XIYfNAxlhu5dW2VrX7GHkeTpN231rBfjqhC+2gL2PIEnxxYxsgNtYVj1McTReIdqSvFGacYnluzR+qJafCzksaNwaIbAfBaRLzkClTJ3ighWuMpDDz7Ub4UbHhpYSD1NLyxNloP1pzpg0ORhCKjmDLOB5Qt8c+bqMiDmAUiTWEkb5ImgC0SLbiH5ExWqeJNCSSNthPG6hII3NmPYHaGFCrpSDKSJyiREuJVia4UujIlKc6Q7IlWkgjVtKx8aiyDR0awwnGa4NWIGZC7n3HCHdNG9hCT1hkxAhHRy27KbRC2fnKVRbmUsfjZmBWHaVxtMCN2mutmC+uUnYdFdsWmRhvlWiXDQzVytq3OBi8bePicdAWji+2ZVq0QUGN7jnIyNiw5NygWzJieIg2vI/HX2D6qMTG9YarNY7+PqHqvhu99S1voX9ggPe//+e5/4EHGB0Zec0XzqVSiZ/52Z/lyW8/xf/zif/K3Xfdtel1s7OzHD/Vz6GzTVylWc06OBr8UDNQUfSWIwZKEXN9Lv91b4q3XGgSuZK1jGRi3eHROwrccC7ErRR59uYJPnJnnkKomViMeHFvhoFqnu+8RzBTz1CoKdazAjcb4dckK07AgZmQ/TMh2TlJOXD4i7vyPHC8SjUQfHNfij0XI/rqOY5eP8rWRUU9m+LiaEAuKuA3M2glafbtAqeH0sBW0qWLwCCaHQh6KQ88gl89huBpBPNAA1J52FGA5bLV+IvAFeAsMAdcIVV6ARlVSNdvwG+cAZZA70ZyHOglTO/FDadpchtKZnD1WQvh1cRoWQHYBywCTaTyEUSE4nq0HGZu19vpWX6SiOsQaLJ1D7/2LTTvRvAcmocRvAi8BxrPQakXViZh3YOqhrCMgTyKOgxnkxYsBRoTLZzBYDeXgcMY2LZDKHkbxeG3cGXXNnrnmkj6cGvw0pG7GZ4+BZSRpFjY9hCrozfw7RsKLI33cW7HGO//hTHe8WKKvrmL+OsO/fMZZidyDK00mbi0RFBaRDFEZXCcyZ0D7Dh2lmZmkN4oz4GzReZGU3zjYIa7T9coZSR9FUU5LTg14fOlQykGq5qgAY8dSHPPsRoreYdaSrBccBidKeOvTZLzBhm/dJmg8W1gjPzaMXoWvolsZnjsze9k27l5IMPRW97NyGwKrXdTT/8khHuQyoP6MdBjwGXLszLQC3fuglsOwLnjKHYhdNnyuQ/BAJIqii2cPrCb1RFBugQ1TzJx+RxudIJG+gilvjzVnCS3folG/jrQAket2nEpsj58B+nyRUp9Y0i5StCYJlU6RrZ4Bb/xOWT0PJnqozjRWaQ4C3sdGAxg+zhUG/Bjt8NKEe66EbZNQLlioN/efA/ccTMELuydgNkV5LqD4DLsToHnQHmE3OocbnMVeA7JMdiWgxu3ga9h6wCMDMD+LXDqgul7rgfGc7BcApGDWJdYI0bnMJCIBZiIWHPvIVg9btZopRhO0e6QiKDpQCNNjEuL2g3RFSvTRaCIUOu2zFGO3fNL9E1KUvUqbmMFQZXCyjpeY5p0+SxNdzenjtzMwLSiOnwLyuvHr00h9SqaYXrWSlzae5j+K1eQfg5ZrSGEJigu4tZXEalhvNUFSPWRbbp86/ZtbJuusjTcy/BiGdQAzeAQRB7P376buUGPvnXFF2/PMFz0eOyOMRqFPoKm5NM/mubBxytMbk9T8R3+7sYMQ8WIvopiJSMZLrqU8i5BqPnakQx9JUU5LQm0R//cUQTfRDS2sWXmOdK1czTz1xFUv4JbXwSySE4ysJLHrz9OUH4Ur3YBqR9jy+xXCWoXkPoZhFeEQ1thYR7uOgDDAzA9A27eLBZ2D8OVktlsZX1YcYBDwDBzu3+CbNVAQzZ9j7++Ncu9J2usZSSVtMSLIFfV9JYVO+ab9JUURw+kePDLU6SKC0S9g8yObmXgykkidydnD91FrjoKOMhqHScs8cV7H2DfhRNUCrcidIhXm6E68BCZxWmcpkbyGPBWa4+HrB3tt3p4BVgADhi7zIS1vXWc+rq18yFfvf932H3hr4EMET+HHD8GxTqaNGJ/HpbsBvPOHXCmCr0OvGEvHJ825uDIVpiahFQBtvXC4iy88SbYMQiLK3D3jUbvRhzYMgRPHoWRflhYgS/PQ7EEq/MQDgMap55F6UEkdRR3sr7lZlLFZ1mceB9+qQ+pVyHM4ag1JBVkWEPSy8pAnv6FKVw1zeyet5FbnqaRv4ljN4+zZfIMJodAvMDygR40B1na8SDBag2/soITYSHPtmMwkvcBA9SzD6DEMDKCT//ErUwsavy6ILV+hald+xm+dBnXCXjh0CDbzl5kYWKCnnLEqe0B150rEVRLnNkzysURlzcfrTE16nJqh8taxmFq2CNTUYyvKXqKiky5TmVgmFrvAP/lbX3smWqw/UoJWYtY23aQ+cE0V0ZSFPNpChUo5lx65hdY3Laf//kX9vOj36mwsG07jqjjVy/hVy8TeTfihnMIloECglm0PMTC1vs4dd0ozewWhqcXEaEg03CR6iIp7zn4znEoH4C9Gp54EcplCLJmw1QL4Z+8CQ5ug9Pn4I374bYdcGUZp/okw/PrBPUpnMYxHDXNiRvu5fSONPc9XWJwdo7U8gUQLhKXpf4Uh880GClG9K0Jao7k9rM1mo5geLXO7c9cRuX6eeJwmq1zgmZ2hLVChuHprwOnEazQdO4F3YskxK8uszx+F6k1QTO7E798AS+6wNzOuxi8fImlsf30z38GV/WQivrJ1yRP7s+RK2u2TC/ghPOcuOEhBuYq1LM7kazghgtUh+5hvW8P/noViAi97chI8akH9/LAC1W+eHuW3dNN+iqa41t83nCqarC4hWDHXMi2hegH73mO6Rd//ucplsrce999PPOd77B7167XbOE8MzPDLXfcwe6dO3n+mae/6/WhhGxNt3ZQBmZGk25qPLtjjCSELjRdiRtpUk1N6LoU0w5OBFo41D3JesahGhgA8ErgoLwUjWyIXjOO4kgK/EibnWdTkGqCG2mCiqDuwnrGIWiYfVTdFdCE0A+opHzGViIankvdBeWn0DKFEE20F6CbCh2kaCcT8YEUkddj/X7FtpdNSsgkvcIVO4nXrWeyiduoGP9wFNmFQZywJDR12J2ySVCgEt6eJCRa0FrQthK16ACkIMz02jJTCFzcZgwBlrP3x7jNOYisN6VaS+xIFRuTneiX8TjH4P+KNgRUACKFCrKEfsrWl0Y26jRSebvoVggahF4PzSBDJZC42qeOpFnwUH4KWW+idUSgFU3XwQ9DUs3QtshHC0HDd3GrIbU+l3Rdk28q5lxJzZNkqxrXdqXpme/KaUnNlSahjifwmprIsWcErhFQp1knX42sN834c1LldWAdTZZKJmOxP6Ga7UOTRxChdR+CVeN9VuEGb3ELKD/rQ08aZBMit8MLELUOqCJpPeXCesSxSSq0IpIO2rcg945rky5AnKxF+WkEEUpKkAZb022s201OEajiNWbtOLiQToPrQMHKVDplPjsSUqm2JzqThnweCjmD5Rc4SMqm6sCFagj4BOVYH6p2Q5kzOKeBC2nPLLACr8MrHXtmPKsz0E6mEieW6QVXolIZtAqtd1h28BlzkpLwdJBI+GCoYZNYGFD/Wq4X8HEbjZa8+0WTuMWvF2kELrUgCzqNCrII3bRy0UBRwK/UaHgBRMp6UhwkITJsIHSEE2qEikiFGu24lDNpENJ4x6SDElk0WYSoUvMcg2wmBKWUQ+S6lDIpGn6TdD1iLScIGpq6J5FoimnZ8sjJCJTrtBzfxZS0J27Q9OKTtDJSN8itFBE0bEKWCkJXbeKRiGDdJMxxm1FrPHpWNAY3exZUGvK+GZeeAKodJ0+OlddaCFnPjlFg5oMgjaBoTgA1NF2BE2milIUv07p1IpSrmROgmi/INZr2tETQSGeRYY3QH6aRzqGdCjRr9kTPoRykrcc6g9R143X0cmYD00oOkU2cIMb61UjY7KDDCy2RutS6vpztTTgV8lZ+rRxm3PZpYOBaz3RkT2y08WZmfTMnaAkpaZIcCQHZFKSsvjkSsi5EITSb4LlQqUHR2uZqWz+cqEZILwa4NYsKDCxkmM6DqtrTsAgI7clD03jvReyHjQjTWTPe0qOWiuHpfNpJXmK4OocwyKDxceo1BCnbDo9kwi0tcyAkmnWKaR9NwyTOaZ38arxQUUuZMVHSnAyHriBoKpCSUEIoBT3ViIbrUk5BLjC2W1RAOQKvodFSolyPyPMopiVuqM1JkPAI/YDQEUZnlEbokNAxMK+RH1DNp8HxiXyTxAUEbrNI03Nb8iJiW0WKZpCm5ptkRW40D8LDa4RETmRsz2LV8E2EsF5p2+b4BDGXaZ9G9ASQD8CTCCrk1latJpUARS1waXiCfF3hNkJEVEcq45WOHEG+EVGpC3IVWM2YjadUmkjCQLVBMSepeYLI8ZDSbTuDqQAeWqRtsisPt1ElDAIghRYeQodIXUU5bstzL6ji1ev4DQ2OQ90RaAXaNfJeT+WAACXc1gm2dnyiILCJWbAJVJrUXEnQ0FQDYwtzVUXVMyfIyq4TM3btKF9Lb/D/9Cv/I3/0h3/I/Q88wGOPP/6a1PGnH/kIt9x2G//yV3+Vbzz6aPdBnC51qUtd6lKXutSlLr1m5L7WFfzMT/80kdY8/KM/yktnzjAyPPwDKbfZbPLb/+7f8Ue///v8+cc+xtve+tbuaHapS13qUpe61KUuden/24tngPf9039KNpXiwMGDfOQjf8Y7H3nHqyqvXC7zzz7wAZ4/+iKf/fznecPtt3dHsktd6lKXutSlLnWpS/9tLJ4BfvLd7+bAgQPcc889jH3pS9/3gnd1bY1D19/A6MgwLz7/XHcEu9SlLnWpS13qUpe69EMj+cOs7PrDh/mLj3+cd/74O/mPf/In3/P9H/vLj3P4+uv557/8P/D0U0+9+gapqyFpkhTaYBOhNsIXCdW+JkrcG8OjCLGxKJW4RyTgrJLwdElkMKmASCAiU6iIEZws1I2ITKxH3MqNwXQW5qmFStHo6GASoq6DGVddpzc2DgtZo3T7/1aQU3x/LfG/hblCJOBeOt95hZ9fCemO9jcT7W5ucq3hUQyx1R5NkYC8MaGRK7RhDQlbI20hc9rBmDGinhlvsSmHVYccJJswXUreAAAgAElEQVTUAvhTgBCtsf9uPNKbCrFuQfu0XxslHIQZT603qaMNIaRt/6MkFFAS5kpKG+hjeZeAkGpLkegASouvU9cYzqjdLq2haSCJWnB11ySxCbtEQjf0RlkRogMK8RrG4mV/T9apr2FUNoNZ7GyPCfzTm8q12qC7RsZUwh7pDW3VnVBcyfLExuoNYlibB0JpZEO34PGSdlBGCcA0fTWU02ZNFwmIt1YTIr2JPYheRs6v0Rdi/E9egUzYyL9Y9uP6wnY7w/gO+1MMYyp0G+2vE4Gw9Vm15bWt4zqJKJfgMzaQtFMnY9td75AP8TI2z4Q7X3uuS+i3bsPCtfQr0ht1bVMWivbrqrqSHWzQhk2kZQv0Bog5c49oXRGRwLlMwKS2dbYNP9ZM8ClMlJO0NirRjo5XZN5V3Ke4rtimaG17s7lV5bsgUgrMHC5DjVBsgD4TTaDZnic23JXQyQirH1GnnRCJ2jaBLmzdt5k+iJeZKzs7pDrKTkB8IohactCWL6H0BrRabfW91fe4tLBj3bWhf53rENXR54QsbrDroqWILU41N+tbckyFlcr2d6EtJkraqZbOb1zYvSZQdS9H+/bu5eGHH+a973kPb7jrLnZs3/5d74mU4l/97/+GD/76v+HDH/kIv/D+9yOE+L7qn52dZfGpPLKmeWa3z/VTTXJVRehInt0RMLFsjOpqziHnK+45XuPZPSmG1yIT1xpqzm3x2TnTYLU/S0+k8SIIlCJoavqLiov9Aal1QaGiSDWht6R5ab9kHZeGhD3zISt5h9k+l72LIcOrERfGPR496DO+rihmJRfuq3AmyONoeOGgR9+6RvsuS6N5Tu/uYWUoIF91WcsO00zvQHtbaAYTpMqnkaELpBHMEfGLSH0MxgvG5hwahSADvf3geiZSmhQGtukiRLtBhAhdphk8gBvNABm0LqDcbbx083UsjA/gRVvJlFe4vOsR8itpoB9BkdB7O0I1LJTOskWjFnjNWbxqA0EDgYuMriD0JRtVW0dQRTNikSEEBrKpQRuFo4KBw4snmCRutAJ6bD8cDIJHDrjZljFGqf+9uNUa9cIOSjmf/tllloevI6jWUPmtFOZPEMmdaJ3HDas4KkuxkGHf+TVGFqvcMyvZdXoSJ6oydWQfheklgnCBzPoiTrWMCBVH7z7A8FJElE7hhev41ZAw20+wPMfyUA+BhonFkHSo+eL+FNtXI+Z6HE5OSPbNRvhKk4lgYD3i8cMZds42efxgisNnagTlaeo9Q/QszONHx2z/Fi2vhmnk9zM4PUXoj1DvG6N/5hLKGaSZH2dhbJzcSojAQ7AELAMRinsRnIMHbobBPvjOKRjOIkpLoAvAbiJvgsjdghudw3N2MjpbZGS+gis9RiY/g2AOEW0hqP4DmfUncJuncBpn8cLngROIzDyyeQ53fBm3fhJ36yKuOIMjJyGaQqhJYC9wwWy80gWIGnB4K83zR3BuFLDYhLfdDuMDMD4KL52HLz8Lnobd22FsFI4eNygcL5wHXYfmFQgVrFrEAr1iZGpi0KBz7BmBc7NwJQ+rV2AxDf0ClipQLUK9CWP9sN4wyAJM2I1iDDeXx0D9FUCs46wv4DTWjcEWMZZvDQMj2YsIl4ER2hBka0DKwIhRAcaBFBEPEPljCG8H6VIdQRPtpJFRkcXte0kVAW2QMF46so1s02d5OEdhpcp6YZRM6SKVwhHc+gpBGJEtn0c7eWP80/2IaBWQRNlRBIoXr+8laEKmrpGOx+R4ir6q5PzuPp49Mogo5LkwnieSUE2bqPPnDwcMrioqgWRqxGXvlZCdlxvMD3isZRxeHPeZWIsoVBTTAy79pYhTW31Gl0MGK4qBYsTFEY8n96Z403c+BZxGqDKSKpX8PfjVSaSaQnABKCN4CUGA4KRZleg16PXtSrfPACk0mgZycLYMWwoGUWC5bhAhAJoNKEcQ1aBYIqq/A8kU09seobBc4ekjE4zPFnnqxl52rESk64q/vDfN3acaLBYcLo14uGgqKUmuqhhYD+lZLSGaNZASv1rHqzaZ3reHi+M5sqGg2Jclt7RG6OaZvn4re0+epFrYxunDw4xOzrM8to+5sQwDC5eRPA1U0dxELXgDXjTJ4pZfIFXsQXDRypsH1BBMAKfs4nqntZWKnsYqPQ2TnEyqJdjRhFIJ0WzCkQm4NAd9Hty0E05cgAOjcHgXnJ2F8Swc2Gbkf88IjOTgzLSBp1tYgadPwO2H4dFTcGEG9o7DVy5BtglSUJs8gtt8CghZKfwL/GaV+fGHyBWnkEwjWEdGGrc+hVQjyJqg2neQU9fvYGiuzsmbDyHkAEHoUM1nKfekyZQkfm2JiF7mJ7YyPDNFqWeIb7zpTsbnR5DRKJpRNNtZHLuFKDPISn+OfEmyPLILvOtwqgKJg2Ibmh4ib5ClbaP4NcFgeZaeUpqZ8Qx9CxUytXWCUh2V6ydXaZArNqn2DuCFmkwjxA8VQanMX751K0GkueFCg9Wcw3rK4fBkgy9fn2FiJSRf1RzdF1DNeSwMpihlHEppiRaaMztT1PozRG6A1DA15HJgskp+eZkwlSFVb3Jm3yDraZfD84qFsSzDMzVELYekhtZ9BtmCSeAMkEOxEzesI4NRUg2o5H3WerPkV2so7zIp7wSsrBsUlTEBl+ZB5wzqSl1CtBVuH4T5Epy4Akd2wC03wOOTUFoH8kTcSGlgN0H1KVLNPP2refpnL4ISOI0qizt3MzecoX+1SXZxkhPXjTC2GLFlqcH4pWnOb+9jYq5MbmGNT751goGSYs+lNYLiDPWePvoWjDxrdiLVAsIiiIX+dbiNMrKZwo0WcaJJBEX8psIvKfzm8/i1FxFRDreR4+QNw+yYi8g2FTNb0wwuBVzePsrWi0ep9GxDUsevzxDKCdxKmWr/CF65Dk4WlCYfDDO4FiE84yx4cn+Kc8MOd52sMXJ5isndfUgtyFTVD9fzHNOB/fv59N/8De9617v4D3/8H1/22kqlwgc+8AE+85m/5jOf/Sw/9va3v+r6UxYK7NSAT09Fkasq3EgzU3BxFDQ8QSRgS6WJF8KlPpfeYkQlJeldV7w04qL9DOm6Ztdsk8iBYlqSqWm0BGfBpbca4UbgNzT9K5rVPuO1Ww2k8Sl4gtWMZGK+SeQIimnJYk6wnnKY6neZ3N7g1GjAUk4yNeiwnHdYzWdY7O9lcijF9GAO5bqsjuyiNLCDZmGCRs923NoVYAAYxADT74cwMhBDAwEMF2C8B7YVoC9nF1+undCrCLWGllnARfm9dpHmIcM6kTfOwuAgs4MDlIb2AyPMbz+IZtzeX0O5e9Hk7AKtjGQRqZfILZ8EUkiKaFykWrT1XrQL5Iu2zedtncWERyP2CqfsBJLcOMUwT37id9++ttiF9TCV3gMo7bV2p5FIUxrZDsInWwHIoMQoWgSki/OkymVyNc3QYoXRxTK3H6uRKhfRjsNyfwEhi/TMz+OvLyBrJbRwuTI8hAwV6ZqmkQ5woiZSaWS9jABGl0MydcXYUsipgoMWgqYjKKUM9FU5JRlYi2h4gsl+Fy/UXOj3aHo+gpB0pYGMQtvnhl3MGR70rRloH+3kydQ1WrtEshflp1kbHEcxbPlRS3jcxs2mpDcPuazxdA83QVu4KZFBOQGRtwXJAoNLVSYulehfLpOrKiQzQA2vvki6+DzpyrMIJnHUtxH8PYIXIJgEzuOnjkJuEr9wCrdwHnqugDdtFwG9tPC6UxZOKZdCrQ9DTxb8FPT2wPYtEAQwswAXrhi5rtcNhF2kwDdwg6SXzeJ0pQbNGOZrAZiBLT0wloNCFs4uw5wHl9dgOoJqHXIpKzMlkzwmZa0oBcsTPyF7dfO21sQvn0XgQCMEJ07SEMP8Ze14Bfa7jG2PD62kDRlAotiB8obILYdoLwCh0V4ehKQ00I+mD0UBqUJqnkOp0Mda1gUhqPRPIFCEhQEEmsLcPA7ryGYVLX20mwah0BaaS7sBKxlBOZD0r0eUs2kqgaCeznJlaIDT430sDvRTTAlqnmC21yVfVZzY4hE0FOWU4HKfy8Rlm7gETSgNpHXdFdR8Qd0TVAPJQt5BKsG22SbZiqLkS54dD6ydWEdyEliimd+C25y2nshpBDPAGoJpYB1EHViBHt/sY8iBF7Q9Vo5joC6rDTMhxn6ouTUjX6UyrK+g2A6UWRneRXp5nsm+DKgmV/pcdk83qXuCYxMeubKi7gmmBjyWexxWMg6RFIzPNawXViIbNTKlGogMa729rKUFxUyKYr4HoTWh22sTJbkox2NmqIAmRc13Wejts5sxF4O/Pk7d3QnkWR0/CIzZDVvK6nuMZx+fOHotGzBx4QkIUmZTyUkDvehJc18uDW4d+iVk0+BXoScD+RwMeZBPGV0bz8F4xvwWJ9UoV2B9FZohnF+Ds4sGRnQ2grUyNBs00lusnmmK+evQskBxcIuFLK0jmMJbnwdSpFaX0UJRLexkZngMLTLMjk5Q6h82OZsizWpfP5E/QGb5Cs1ggFImYGhykdWhrRzdcYhqdg+RswMttqLZQml4BC1gdWAI5eUpDu6g0nsIRT/Qh6YHRQ6hFOWeAmEqz65jU/j1OqtZH+X2UpiZAaGQWjC8WEal0rjKwMwOrjRRjnHWnB100UKQamjSdXMS3L+muFKQREJQDgQXhj2mB3LMFzxWcw6F9ZByWnJ8PMPloV7KgUQoKKckhVKEXymZhBxBhpWcR3YlpJrLUQw8hMqj2AoEyLCEJmvtyAVjO7RHZm2OwnqE39SUs1lWC73G801onAgoYxObofW8p4z81oWdu4FK3biKAx9GhkFYGENKKAYoD2wDVhiYOcfEzDpeaREZRWgcytkcKz0+qVqILC2zkpP0FyMmFprkF1ZQjjBJsSLFiRGfTEMhlcKtrOA0FVpkLbzggN1shWiyhN4YqbVVlEzjNpaso6FO78wZwCG38hywjNeYIze3xMyAx8hKSK6mmB0sEGZGCaUHNIk8l0iaNYCsV/ArRaq9Q2iyaOGghcO+yw2EhvGFJnUXpnpdVjJQdyGztErVg6Yj8EL9w3vmuZMeuO8+lhYWKBQK+OkUv/i+913lTW40GuzavYfevl5OnTjRfcimS13qUpe61KUudalLryvJ17sBX/na1/jQBz/E7/zu7274/hOf/CR79u3jl/67X+Lk8ePdkepSl7rUpS51qUtd6tLrTu7r3YDbb72VU8ePMT4xQaQ0/+p//Zf869/4EP/+d37LBBc+8kh3lLrUpS51qUtd6lKXutRdPMeUTqd54okneOePv5PZ6St8+atf5W8++1keevDB165SfdU/LxOXufEb/T2V/4Nu7yv5/pVW/l2CLrW+Zln6mmWIV8EA8SqZp7/7vdcCQviBD9jL169feYu/fyHRm3VYf8+1Xo38IF7mm46eKfu/1huBA7TeiDrwvch+fG8SFUDrH+JY6u9JjV6t3GrxKnT4NRRx/Vrz9VXXol+Vhr0srkosw/qHxU39vd8nfngq8GrL0j/I5uhXYhc7vtf6alXU19ZH/d2mk5czw9+L+X5VN+jv0phXwJfvmzZByBGvtD/6+xY+/Wrk/xqDea0xft0f24jpwL59rK2t8pEPf5hbb7vjNV04CyBywNUmZ72SgsiFdGRgy0IbJ+FGCSYJcw+AF2mUgDAFyjEB4CoxUJEQRBb2JlY0GYGjDSyWciCK86U7plyhwVOgXIikINJmaCKvXY7QELmiDW+H+WyMubLQVCYASeEDLjoe4hj2RqnEwiMJC2NgaISFoNMItCPQpGw5DlHKAa1xlUaoEHBwVGxtnA7xkh0i1wmX5mCCYJK0GSxTEormWtubOOgqLtO1L6fjekmYai/QtbRwNSpC4wHSQiNJUBFOFIFWCBWRhO2RGrRjAne0J9Gu5bmFWBIqQujIQA65AlzZgrnSnpGVnhhhCygk4Kq0r1GyvWDKRHE73Rg4McGfAE0aCFoBSdqVFiJLoH0HLQXKBY1j+eFiAteG2n1SykITSfBcyHjtMVURQpvgJBGFKF/Y79o8EYSJNjmYIKYc0APDWRjtN8Gpoz2Qz0BvFgZy0JuBVvCLpVC15FWE9vveAKRj4OmEDSoc64OBXnBckNIEuUhp2uDad2HHX8m2LChtBkBFtp2qLa9RjEkp29d2TjZaYwJbteWlhc9Lyqu61qYlHj/ZYd2l4RWe4YU2wTRohYhUS1eF1minLb+u0kgd4VidNnIqN2wkNAolJEJplCfMWAORa6DsXGnsUhKK00oCaaWRSuNq3UKC0xg7BeBadmmBCTbV8f/GrinbTaHbK05lr9XClL+RD761K1GCb07CNsRwXsIEc+rkmAhj40LbRy0t7pTEBNy5mABNCQxZOyeRKkIjCFSEltJAjGkDJZa1eulocJWBStOOEZ/IhSgjibIOypMoX6KktPzRSG2B44REO7IFTaYFyMSm0W1BgrnEkGDWouOoOCgwsH1IzmDxPamr5Upv4vzQHRvX+KWU5Zc28h9DnCllza6FsowDEwXQ57ehIq3OGXtgvpPajJWjo44FkdzQR+UKXKWICg5OLCPCwJs6SqGFvV7H0G4KqTVBPOcooytKeEZfLFuUJzfABGpclHCtXTfXxbB0Qmszh1n4PZWShDkJkUZ5hp9axjwTIAX5SOMr3ZJ1x7KsL9R42swgjjLjHM/bSgocjW27QVaUaNNvO3crB5RrZCgWdVdrokASZeL5rHOBKVq8DX3R8kcIYcdGY+0mZgylaMvaaBayvpExx4GeFKTS4PsJbLm4/AgZNVufRbx+0DFsZoQbhcgoRBDhRVaHlYEb9SNlgoqlJKXsGkJbIFhbj8az86/LRjjdCKETdpqs5YDokCtTLpggz9g+uSpsyZGO7YktTypl529QaYlyjI6HqRhfT5OLzHihtYnFBcK0/OFD1W1Gn/nc5/iRt72N9/7UT/G5z36W3/v93+PkyZP8yGuQcnt2dpbVb+Z5fFuKvcshT+1KkW5qHjsS8N5/KHFuzOPDt2R548UGt55o0FdSlLISBDx6MMueuQY3XmhQyfh87WdDJhtZ3nC6xpV+l96SYmBd8ek35lhJS8aXQ9ayDp7S5JYkfVXFsS0+KQ3/910BP/5Mjed3BCwWHMaXQ6R2+NaNgiXP58l6LwWl+dsbXIYXJWNrEWPLio/dmWO+12PfbIP+ouLTN2e56WyZ3ovPsTQ+RqrURKlxJvcfpn/xCuWxtxGU/gvMLMHyKJx5CaaAyWlYXraCJ4FjQAHBM0h1lkrPbcxvvR/Xv5Gmv52wZxeffXAH4/Mhtz51loHpL1FLXc/4uXNIzqPZi+AKMnSQvAhsx8ByuWgUghN2Uvg8cBOKXsL0DTjhCeJIc8F5oAyFCPZsh8EB2N4DeQd6C7Ayg0FlaNKGqHOAHcCofd2OgQfbSt25GVefA3qo9N9KFAzx8QdG2LYSMXLpRZ7aMsJEKUVm7gXKg7cQ+j04NY1y86Srxxi5tIpXOYdXO08jtxN/bRrtFPDopTQ0Rn7hJHMHDnBpxyEKUY7PvqHA3c/M0jv1FPm146hojL9/8wTZqJ+hZUXDFTx3JEMj1OSVYPtySMMTvONbNaYHXMJAMHdXlXAx4NSYzxvO1LjzWA3fc6n3bSNb0eiwhB8+A+yhHtxHqfd+lnbcxsDUZZojB5nZOszo+aOgspy+6QYiL+CLd2Y5MD2Av162NnEvld7349eeAhagVDKwRb6A+26FiX44epKGfDdB+Bd4zS8AF/FqJzl69yNsO/l35OZXkXwB/Cpi6AXY5sC2PNy0Fe7aA2+9HR55Izx8L/zEQ/CWN8Hb74cH7oa7b4W7b4Eju+HW3XD081BbMIuERhG0CytVnOJX4IE74D0PwS1HjCHu74fr9sN7H4af+UlIBXDoIPTkjUE8fwFGemByHe7bZ2C2ZB/cOQqXZiCbh+O9cOo5CG8FHsOgZtwOk4/Ceo9drDRhrQLZLJSW7OL2CuBz7E3/muHJvwIOAy8aaD1SGPi5MVBF+78HFC1ShAB2AasY2LsrdhFXBgZY3fK7eOUIV38Nt3mBoFTErc/gqEka2YCg+hip1X2UB3sJihVc5hhf2s7Iua8wfukEVOv0LT5OxE6iwE66XgM3PMmVHW+if/7bPP6mW9h7+qsot4fv3HyQFCl0XXLd6VVmRzIsFCTntgT0rUeEruAtT1TYdqVCf6m9UBYCRhYiRtYids6EPLc7xfB6SKEqqQSCp3eluHB9k8d6s2wrGpk/MNXkS9dnuO94jfl+h3JKcnxrwP0nahw8/eetA9AmP0um9NvA01aXn0fxBgSngUkoDMCuXsgEsGvIoGr01WFuDhiBKydB7YNLz8LiLohSGFSKd9ixOABAzf0X+OqjwG4GZ1ZR9HD41GUuHdxDEEoOXGxSygh2zCoGyxHDKxE75kJWMg7P7Awo1BWTowFfvnmIyxOjhKkCk7uG8PwhVvMOb/yHC/Qur9LI9ZCOXGa3DBKhmJg8zfKWXfg1zcj0GRq9u9j94jlcfRSBj0EVydAceIig9HX6Zs4CJbQ4TCP1AG543CwMuB7Jt4HrWJr4aTLrf4NBTNgF9XlorpvNcaMKy/NGlof64dyC2RyPFuCZczDWa1YFX33SoCs8ehouLsKd++HzL4ATwv23wacehzddB/t2wvGT8NabYesYTJ6Em/cAguDpT1ubXCdTPIJUgv65/wTkLWzpLA3vzbhqBckUobOXr71pBw998RwfftcN3HG6yuxgwNCaZno4xf6jFygNj+A2fdLF46TFdlLF5yjMBRy+lCVYmSJT/89IneX4ne9i1zMnKPcOUsm4LA33MDFdppL1KSw+R7H3jSzuvoHUehmvfonLE7uZ7y8wdmUOr7jC6CKoVArRjDh3/W7+r4cnePBrk5y4ZRdBQzA35DJ6YZJmbhjp+Nz5YsiemQbrOYfTEy4HLjfZMRPyY1+tsGsuRLmabFWzZanJqW0B+6YaPLk3zZufqXDz6TpNX7B7ukFPWdFT0QxdvkQz18vT1xVY6ssyOeyRlnD4Qp3tF0o8ecs4zx/cwb7TIS4zKHrtHDsHHETSoOnt5S/etp/RhRCtBT6SlMhTmP4O4qYizC5D8yF4QwPW+uCuPviFn4QdefjO1+CfvAUevAsODMMD9xg4z79/FBZTwHEcniO38lfADgQ1VDiGr05YT6NH3+wc2y/+IUHpEzhcYe8pycrYTkZe+iKummLfyTr5pUmaI9czth5x5MVFgtVncblIZegWehaOE3IIgYtii11T1AjzNxJUP4ujHbuU7mVl28+TXnuCeuY2vObj1pnRh6BJX303uWKFbxzp4cFHV1kedDj09F8T0YtXT3P+psMMXzqFG00RpneTnznDwsFbySwv89Qbb2QpL5npd/n7twsK0w5nbw9531/VufnoDE59gaH1flYKHr/ySP/r/9jGh3773/Jvf+s3+fOPfpQff8c7EELwza9/nf0HDnL02DG+9qUvvSb1ehhw/0AZ+2EmG+M96Y2sB8Y1u9PYq+zZXVbkYKBKIrPDRG4EAZdKt3ajdvPS8sRIjKc6q8xnV5udqxYQSTNJuVqTs1j+eaWtR9hQoDceUfutZByOrU/Znb1KeGVIeHCcxEt2eG+x3pnAgIcr2l5UrfAtCLryJBofoVUiGUbSA+1s4uv37PeZDm80m3jk3LZXpLX51In2io77kh7rtoc7hs9qwfQrRaB0y4Pra+NV0bjW+2x2w9p+Z7y95j15vKcd7E5YILQynhatzMkCsZfXs54Dc62OnaLKngXEThUBUSBacuIo49mWtpthIKCizRiIDs9TFCHCyLTdkdY7qUz92vRfYjyHQm8EoTcePsvPwHpvlfU+KWW9chbSiB7rDcyZBD4tnjhxB6w3N75fGxi5SJkFThiaV+ypSn4XRRD6Vx/z+bL9VFkUtWVBa3tP09wfe86iyHrIkt62hPc3loVWcpXYu+51nIbojacnQlx1kiJjD2/L+6c3OaqUiTK9Dk+KvsqLKKLQekN8wLP8laAdA4GHa05JdLttUSBQVs5MfX7LQ2sOuUw7zD3Wu2d103h+lDmdcGnhsctWLwRhCiJtPISq7dxBOSatgLLsEh2+RUcJ0iqRt0QkfO069rrZ+zfYoBgSsLyJbjsJs6HbyXLURu/b1adgMcxl1LILMozsBkm3ZCzCRWplkjrEh3X21E855vQGq89SaxwtCJTCU0bvHa2sjklURtokR3GfVfvRG2s/YteiDiRUZYfMxe0OzPih7OmP7nhQKtrg8TW3JzI8uIlZSSaS+CSSGRnPseVXrwfr8cmFs/GAunUKI9qPq8RjAfakqWY9iLEe9SRODWUigYmVQa1RaYEf28fEYwPKtddr4yAxJy4S7QmijMAtCuuNl0htvdRxV9H2FJaWV1xoZcoT0syWsR4Jc2ogrJfe0ZqMTd4TXyMhUZ4mzInWKXQ8t2sJYb9ECePF1tLc056xBMoRRIG2pzUCJbUtR7bqlval7OmM8sBRGk9HrTln87N0bU48lNWr5Fyh9caTXa1szhNrV3HbNjS22UolpmfXjqXT4emNT2rjOS8P9AE+kT0pNXOoQ+S4yNDcH7mCyBd4OBse1WwnzFEd9jReU0R2jovsr+qq0+7IM3rlADpl7KXGJ06GJvVGe6JxWl5tx/I30u3TNQFEvmjprPJN+f3qdXxso1ar8b/92q/x8b/4KJ/61Kd45yOPtKDqgiDgheefA6X4Zx/4AFprutSlLnWpS13qUpe61KXXm143z/ONR47QaDR48ehRcrncVb8XCgW+8LnPceMtt3D3PffyrW9+oztaXepSl7rUpS51qUtdel3ph+55/tJXvsKuPXt46C1v4fy5c5sunGNKpVKcPn6c/r4C737PeygWi90R61KXutSlLnWpS13q0v8/Fs+/+3t/wHvf8x4+9Ju/yf/5B39wVUbBa9EnP/lJLl+e5qURb5AAACAASURBVN777u+OWJe61KUudalLXepSl/7bXjzX63V+44Mf5E//5D/x5x/7GD/1nvfgyFdedTqV4luPfZ3rDh/irW97G8VS6dU3SscPqNtH4BOxdULoa2M2ijb8XPth+fZvSabGYR06EbcRwzm5CXgotA1ISEIiJtogOtut47I1clOsRmGhfRIwTviJ0uIAmyT0zWbBeLavUmzyS7suNnCBjs9JiLlrvSfrl61+bohDTP52VV0xRFoybFN03NPRFER7/H2xCRinuKqmNnxfLCtiA0ulbseXtULXEp83IJ7RAU1MIjYNNh17HfPlqlZtTnH/pN5E5XUiGGNTQevEFxa0gzo6ONMpAhaScUMHk591glFCJIIDnc1lSyf6HQc8CblZhzd+0Jt16lrys4n8CtnxtUzo1rXug41Ba2KT3+RVuqjlpoxs8+sqMNxkEGhnIFksTMn79OZCmBxr3Y5VE8mm6I5aNoV2t4FSHb1uvSfjzVr2t5MvrwSnXW8CV+50jEeyZO8aLUpqiLhalEVinJPomfpqjRNcgx9creQb8NA1m7ZpI/Rhp3HQG+TuKh7qDQ29umyxCT87lVhcg+/iWvdsNo7ffdkR81e2VFu35jdh269jOY51QHb2WLSu3ch6vVFPxdWDYOZxsUn/Nclge/0ygN+ChEzE1yfKjwN8RYcO6872dIAObLbuuIZmtdrt2KA4w16dCC5O2P2WveXq4NGWfl1z9knwSFxjvm9/1nZ0kr/rTeywEly751p32FnRCszdKN/iGiK4WUaKBAThVfNFx7V6E5mx67cfClTdbbffwRNPfItvP/kENx058n2VIYTg4be/nS/83Rf5jQ9+kH/+y7/8ij3XSZqdnWXyhQK5suKWczW0A7N9Lg88VeUbh7KMrYTkb1tltphHSxhcjzi+zaeccnAVnBn1+fd357lnss5fTgwwWFPsmW1SSTn4kabpCoZXInBg92xI0NB85o40x7cEHLzSQEnJZ474/PdfrvKpBwKuPxcysBbx+Rsy1PbU+TxZRrNN/l/23jtIkuO+8/1kua723ePt7sx6j12sARZYAASwBGhBJ+kodw/CSaTuJEXoXgSlR5l7jDvpThJPek8vBHlR1Ek6SqLEIwGSIEEQBEkYgruLBRZY72bH+56etuUy3x9Zsw4LWlAk4/obMdE91eUy85e//GVW5ScPdMxwwXK5KB22zUse3prkwHTA+R6bbWMeGSXYceQSG6YdMstlhAxp5PLkZkeZ3nIHQyc/BwgMrwdTXmRh6G9ILf0FkvsR2MAIIf8eg2fQWLndwGkUb0IwgYi2INN7OL8qyxO7O2n3LLac9qlmDbovnsVSo5iRj8H5OBCtAk8AGxA8AxRQ7EWwCsH/Au4GzgAegqcxMDDDTwDdcWC/Gc3QOwiMwvQczM/A9AwsDsLSOeDNwHGuEDsC4CAh9/Ir//7XUd23kTYKZBe+QjP/RhLep/GcH8GMpqi17wFDUE9bbHt5Erd+jP65NEbg8SsfOMj9z02RWn4FgzlsVWKp83YUNid330Hv+EtETj+Xtm3lq3sGsIDjXQn6Ztppmxjj9OY+Cg2fg587xYu3rKVzPkQEHgY1/ubgDvK+xE8IBmcj2mYjSgWNJhzrsrlpxGfDqM+Ocx6ebfDIcJbBuYhVCyFrJ0N6ZuvMdidony3TTCXJLzyJUF8iNO/Ajs7hes+TXIgwfI8oXaQ4Nk690Ea6cpTuifOYQZbhcQebKQxlMbXuneTmRplcsw/XM5H+AYypOcTxw7ClP0YYLsPCFGYwhmR/jNFygAl6L15EMYTFh7TdvGMtHDkKB3bCUyfhnp3wD4/B3bfA73wCfuwe+PgjcN/d8PBfwzvfDA/9Kvzig/DPj8JP/Ahs6oE33QYHdsDxcSgmYTHUMdHeDVDIwfw8rF8Hu3fB6lXQ0w2pFAwOaEbpqTN6lvj4NMwswrpu7eB3rYJkA6oezJZgoQOMV0CFwK3AEWBPbLsOUFjxWhD2Qa0dqAA2kp0Ieukcs4GAgAcw+Ryamd2ORjOmgADFQwiOE5q3YnQA9ToalfYSjdR/wg6+CuxDkUYwwqHbf47ltpvonvgiMIqgC8HfosiTaF4ETtDoeAuF6Ydp9LyBRPUjJJafB1xMvkCU3I0VfopG57vJzv0DMz2D5Be/gCAkXcthRYfpmd+M5f09KlrN6ouPky4tsjAwyMDxw3z+rk3c99VFdpwI6KhL+mZCIkvRSNt0zHv80VvbmCiaDC2E/P2dKYbmFEsZE9dX/Pzbigx5iheHEzy+1uG+wxFFH57vd3jfE8sUq5L2msRGMdVmMtFus3nCZ98Jn3Tp9+OOk49JBTiPpky8CFgIynH+D4Bnw/w5WL4DRr5GtDCEsTRGxAcx+BfgfuBlYB2KNQguMrX6w2TLjwIbURTj83Vi4HBm709jqGGaXTnsekQyytJRNhCRz98dLPDeL1Z4fpvLbN5CCZjosGiagoluG98UnBpIMF4w2TQZ8NKQw9rpgFOrHVbNmziLk8wO9tA2PYtFgkYmRdf4BUK7g7HBFGsuPIfpdWLVLxHRjYkJTFPt/kWyMx8GpmL/uYDgGS6ueZC2hSeBWWo9HyBRfRwwsCpbMPlCbMtjoKrAbcBh8E2I1gFNqAmod8KQCZdmYVHCntVwdhSmlmDHECxVobEA990OSwswmYJ374FnXoHj0/CT98PHPgmnF+CBO+DJ5+DZ4/CW2+CZ5+JIw8VkHsElYAhJEYNTwB6C7Bps7wL/+Eu/wI6vPcm60z4J75PcfOwMmRmftmWFszhBz9g5Ev6XSC1dZGH4XnILn8Bd/iSCHp55w0E2vvAxEsHfATMsdT7EqtP/A5OXSC8ousYn6R0/RqJyiORChMl5Fjv38sQteXa8eB6/fT2T/Tm2nlrkE2/fSYfqZKHLJr9YxaqN0T61SCrqp3/iBfrG/oXC7ARdY4sIFdLMdXFpOEPdAGEIHt2fZtuIz0inzcB8SKZSYao7yb/cmuG2k01ODzi8+5EpLNumljbZd2yJRtqh4RqUshaZpuSLN6UZWkrz2P52dp7zOD1ok2lKfNtg47kymcmjmG4v/7gvxxufu8Ts8K1YHjjhScBjZui/kFn6GqYc4+aXnqRj1uU9P7mFX/zKHPbyNJEcx7pdwr4tsLkM73knvHM/dBZhyyYY7IfBPNy2F0wbvvq89lX1OjxxBBpLoOpxJ9RD0zTAkg0ierA4g6IdQQ2Dp9BEjE5MjpGf8Wh07iNyeknWv4wUqzi3dTXJZkTnyCFs+QSQ4oP/7o289eljCHwM6hjUgcMInsVuPglMAykkgwhC3OWjNLL3kaidBLpRdFDp+3FE3WRsTT89p59l88k5RHWB4vwRDJYIxFoS0St0j40wtu3dpGcDPv6OvWwcD0jWPabWrKatLPnULRkm2i3e+qhHUkoaTZc3HKuTK5eZXbuVp7cneXJrmp///PL3duT5y1/9Kms3bGDXzbu4eOECHR0d39X5HMfh4//wMe656w3cdPNuRsfGvrNAHI2Fk5bGwZgowoTAlipeK2EFF6axYivflQAnUnRHulebuK6rsoJhiuLzSkMjaxISXBmP4AqNqQsdvSjLSsfPlSCkbjaEEkTSuIZv3x4RY8KuAN6lqRf9UOLawQmhImQMGpdC85w04NyIFwdYGXMMr+peXY1/iXvfMVotISWGVESuBtirFaD85YVJVkbTnGtGgcU1i6LIq0bcElxZsEBdc/0rn1cvfLIykhfdoDsYIQhZFQUkIv/yQhEixn5pJNvKCIEe4ZOWZltLR6PIclEYdzz1NVW8SIpAYqrw8rUMGZGQEYbUCyEIqVCWiPNdELgmZqQQUYxFwsARSgPzle7pRk68yIKpR7YiQ6OFQldD8h30ghQrKCRlXBmh0Z8riLgriyToBV7iMrFXRpINIhIoQxCmYlSUBEOGGn0lFagwXuBEQkrjfLDixUiUAaRie7kaMZWOb6Rbb5PyqhEC49rRof64k5NI6N9Trv5sa9eBrmlCFCPrglB/l1etPKiuGnUzLf3/1Xi6yzjDq0ZC5JVFVvRnjM4zxHWjwiucMxHbVeI6LyG5fmGfKwsImWgcZHSDoY7LoLcr9xPKa0dAwvCyza/gmUylMGXAtSiofGyPDlfmdmcw1AqmqxDv68ZDU3Zs94n4iVdCj66aGvUUJlaQkQJIo4QTj7xpjF2UEARZjeGKVtbEUAppCpJSxVhMSEXaH1pKL+jTFSqcSP9elPECIrFPi2wuL0JFjOU0lSIyRXwN+6o8d6/zESu4NvGqOs9lRJ+IO9ErZWZeM1RsqCDedv0CTJGuCyrUSMs4nSuLxySlRlSt3K/gCnrPjhePMaVGWgmhEaOXH4zEdUEvDiO0v44rsTJWRrT14ikanSav3J8M0Vgw4yr7dGPEFnE9jnj1YlTyqicZcT6YxpXzmlfloWNeGQW1LK5FmnIt8lFdVfel1OfM2vEIZWx7Ul731MS9qvxW7jG6PHzqhLpMQtcGkiiSSGykoe1UYqNb10Rsz3Zs6wIzklzBZ1qgIhSpy35QYhOR0P8bGvdoKHUZ6Spi/6BsQSIKETLSSE6h71OaJjKx0v5kiHDj+4kRaDJeMCg+pzL0NhGPghoKEpGuF0IqwpyBMgSRES+YtZIjsU1pjKjCkRJp6HFa3b6vPFu2kcYKklbphXOuQs9pn7GysEgOicmaKF5MaeXJmZQaHRoK/elfhflc8b+RvOL/LUv7ZhHzc284Jm5d50vFVX5L11+JA3JlkTErRrESxxDW5afh2ejqeimuSk8mbmdsrqAOtZ8QYRjDNWN8ndSoT0NpzGxg2SjLjBeLu7KoUIRzuQ1MxIugSVNoTKWp67YtIUprvyXtK+g/Q0XYUvu60P0evrbxh3/0x/zoe97Dr//ar/Hnf/qn39Eo8Wvp4Yf/iAN33sHB7+Xy3S211FJLLbXUUksttXSdXvfg2fd9/vvv/z7/3x/+P/zJn/0ZDz34IJb1+hLxTNPkj//wD3nTW97KmnXrOHf+/Ld1vIyiVsm31FJLLbXUUksttfRt63XnPN9zz72Mjo/xzNNPMzgw8D29+f/3D36f/v4+7rvvPg4dOkR7W9u3dFzT8/T7ki211FJLLbXUUksttfRt6HUbeX7+0CHWb9rEwKpBLl28+D0PnAEMw+BXP/AB/s2P/zg337yHwy+80CrRllpqqaWWWmqppZZ+sIPnP/nzv+Sd73gH//GXf5m//Zu/eV3fb/5W9N9+67f44K/9X7zjgQe4MDLyzQ9QV7Ap1wFYXhuycz2p5fp91bU8J3UN0urKcUpA9A0ucXlqkbiWxuRxY8Dba93ijTeq63765qghJa6sOP+NznVFV3Lmu1tU/ercuAFO6brt8nJ6xGtfWYgbpFlchQF6NaqOb2Afr0Z2XY+CUlfgbldN+hPqtctI3oAOpFbOfcO8/iZ1TXy722/AdLpcDvKbX/PqtMnrTxP/6Mvr6oy68hfIV1cQIb5pMr91ie/iGPlN9rnaLsU3yBxxw3tRr0Mib3gO8Q1ghoJX2eirquB1KVPX+aboRsV/lZkYN3Bc6rUc6zevdd+4jn/T8n6NvJDX1tFrjriKkniju1LXW4a6vvJeu+21QW/ixmTFb3Df6jW3qCuTZq+5/nXIQyWvS9R1EwWViveJJ8BGEhork2FDTa1R6jofwbfTsl5jM69GsvHqhvc169ONy/myfV+FRLumzIR4DYTla9/vNS3Q9XxSXsMFXI9ZlN/KZdWrzPTGvuhKukJxg1Oq1/L14hvUjavLMrrumoobo2fVq+9IvTr2urqOyWscxvXnlN+5f4/4JnYnvmkeXQZCyssMhcux2XeFqvN9nz/9sz/j9373d/nwf/8wDz34IKZpfl96AXt276ZSq/O+n/s57r77bvr7+m643/T0NKPPJcn7FsMzAUlPsnY8oJI1cANJrqH44qok6bLBWN5i27iPMAUzeZNUoFhKmrywDt74sk//QsimyYBkoDgy7PLsBoeGaXJ82GHLqM/T65MsZk3GixbnumD7WEgiUtz3fIP1cyF1y2Qyb1FoSkIL1p+XpEPBgVM+2QmboTEw8oq7Xva574U6q8ohp/oc9p9uMtpjs+vQOSq5DAlfkaiOItxu3OUTGH4WxxtF4DO+bTeFmU9heecwohIwhaAAnANKCBaAJnr2/iST/W8lWzkDKkOp9066zk/QPwNZr8nYYJK1L79IkBAETh+WH2IwA8wCp4ESMI3Y1AW9NjCKqJ2CgRwsXwB8NOpmBT8FUIv/XwS69DmUAJXg4bf/PnvOlFBmDVQDga+7EYUM7NkAow24qxsxvMRA8QSr7BfIJ1/CTMxgTD2FkCMYahZBgN24gFtZJt3MosyIdPkkSqax5CidtSF6Jo4RZjKIUICKsMMFpNOBlymSnV/E8kaxVTtdcyFtix7HN6TYdewFDFnDMdtpHz+M7U2TW5rEboBQkjDVx+pLJoNLigurHUIB9YTBn9ya4U0nm0SGoJw2WMyYrJ4K+eCbM7z7qM+X1yfZPhFwsdumo6LovjhFavlFIIPTOIHgKEKdA07HLsdCoBC+JEpmcSvnsMJPI3geO0iQrCySqCzg+ONE7hqS5RMk6ocot+8gU3kGwQlEOAfzKZg8C9MlqK8DNU0jcwuLPbdj1zqRiXWY4SeBlxFUoNeErUPQDKC9oIkd6weg4cOWtbC4CPffBU8cgjv3woa1OoAe6oOeDlg9CN2dkM1AMqWJGCkbUhYkBcwtwKUl+PphOHwCPv04HDwAzz4PJ86C14QnvwLnL8Jv/DFUlqDRANuCF8/CTAkqIVxqwOKEDsx70pBxoZqA5AyEddjaBuVaPOM8iG1zGpiMP3VLVy0eJNF8HBhBcTsGSwhOQsLSs9bv3AiXxuG2zTB3HIaziHQdsaTgTZtBzcKqIsbks4h+G6JpsJchCMktPkP73DKERUz1QlwfT6OpBRGCM5j+yxjyNIYfYcijwBIwjuASQs4j1AXMoIYhn8LxZrFCXSeNaBKhjmD6xzDkGQR14BRCjZFbWsRtHqdn1mB0aJi2+UWquTR2BIc2JvmL27LcdcajfUmyZjagtxTROyPZNBZgAk6oKHqQCyWNhKBQU3TUIuxIsXo+ZGAxZLZo8sjmJAOViJOrXVbPhnSUQ6Qp6Jj9CGxaBfOrgPVxmvpifxABPbGP2Au8jOIeBIeAEEkXBtMIbkLwLFAFlmN/MohgDrs5iRVV0ISGEMEMfvourOACiUoWp7qM3VA8vW8jnYFJJSWQpkk5bbD1YpOGa3FsKMEr/Ql665IzBYs7TzbY95JP/1zAYpvFrvNN1l0K6JyapGvK4O/uKrD3+AwntvWQCk1kKHlme56dx09ieU3aFmqkmufwjK3YYQ2TTyM4Akxg+sdAbotxn6PABQRV3MbXscMxoIHpH8GQs0AVwSlEpwt1CdYy3DoMs/MaebJ3EMY92JTRGMpzCja3wdZBeGkS9q+DwR44egn2rIOsC7s2we379LDam2+BN90FAx3wnoNw/xtgzw5dl+9/A+zcBLftgFt3Q8aGrhzsXA+LIwTeZgxpIAhY7tiK4a8hSnVjNJp0TT6LXduAEc1jqucASa1jL+nyI5jRKEr0YDAHBDSLu0iVngFS+NYO8guLRNlVJOrPAwZWEGDIM8AYgkMYHMLgCHAeVC+COgnvY/ROPUO2ZhDZnbTPPE6qMkbntENx/jTJGphNwAywg+cxrZvAWsKtP08ktmMyhhIZ7MYChflDFOcapOsmg5M2xYakmjJoq0Y8szXDaLdNYnOVtvMWQ5MehcnTWKGBsFJ0zSzzkbu7+OJWl5FOm3rKYNc5j+fWOaybDBmc8Zhrt0g3FWvHfAZGvkSpe4ByeydP3CN46xM1LK+GVyjgVusIfKzmccwoCXwFmEBQYtOYor1i4tTPIyKJuXUWtqyD4+dg9w5IuvCfH4ahXpibh796Crb1wb98Fp54BrJpeOEVwpP7MKKvoSgy0/dBnNoGTPUyUKCReSNBdhVKtTG2/iba5w+h6EDgASEKi4XBd1LNFwmSSTKloyiyvP3Bnbzva2XObSzQM/EykGLN2CJty0cQzDC19u0kqglM+TKwQKXw70g0n0UZu4jMfkx5iLFtB7CjHhK1VxCMAT7LHXtILp0nU17E9b+MoZI8d/deVl28QGR3Y0VTCMZp5LsRooPU8mnal1Lkyucwwgbn161CmnCx2+ZCj8GamZBsQ3JqIMG+U03+6UAb8+0Ox1YnWD0fsHk8+O7eeX7b29/OyVOnePKLX2T9unXf92H0D/3mb3Dgtv088MADfO7zn+em7dtfszNnRQo7UqQDRaEpmRUWbqgwI4Ufc8JqrkAaAjeURKbA8BSRIQgd3Zx2VyWZpkQBTUdQLggqaYNq2sQNFOWsgahDJASerWNCJ1QMVSPSniTTVMxkBZEhsENFoaro9SQdVUU1NMlFirYwIu9J1lZDrFBhKEUqUPiWQIQRqAgNDg8wQ4UgxAq8y70wP2sDEba3FKNd6lxBvNXi/cL4T+G7KTRWJkCa4PpN7CAgMsFLCJLLdSrtSZDZuIET8bF1NL6rDOlucEEsNPR2twjMofFCK+iYJit8V/1dorE0yyA18qbcvR3Ni57HwLjqGBMKWf29TSGMOoMsaG5UqgrpCCO6COQQqgwkcBqLSAqkGwENlxjXFmLQpLcWIiIPZaZRmAgEZlAnTBqYUqFUAiuaxY4isn6ICCShJTCjBoiApB9hNaoImqTLEZIUYKAMl34/QEqLwBb4CQOlFJeyBk6gkTdVy6CeEhgSRM6gs6qdsR3BTN4gcAzcwMf0axhhwAo6TbB0Of+0w/IxfA8lBFbQRFDVjNiggggycboaMfJK4TYWWbRSCOoIAo2OWwzjACRG0jGFNB1Cpxslm0ADWEasdLZcqdFGKVcXac7ViKNMSv+fsMC2IYi79H09sFTWThugt1tjkTIZ8HxwHOjJQ6UE9RiHVWnAbBN6I5ge1wHz8rLOhmoNFkuQSkKzBiuLJ9kWLHvaznIBVGRsZwIsBUaMnlJ1/fAtQ7zoyootR3FaVRzEukCEtApAMx7HyiGY0cevjHC05fWxxTQYE5Bs02UTmpqrOjYLuSyGegHsITAWQbmASb60AJTwze44gPfjYC+I7ynACmpAFTMox8+i7BjV5GPICuBjBktAk4RXidPgIGQF8DCDamwvVaCOUBa5pRJQpWN2mZEtJqiIyNQ4ylrKYKnTBmHQVY8ILYEbKHprEelA0ogMEpGirx5pJKaCnC8xY5xTwdP4Lc81aKYMAkvQSAicUOFEEJrxSGvaje0tGfuH1FUjSCvIymx87x3AQrz/yvjRyujUih+pXcZaOV4l3hZdfj6lTI1RcxoeKjJQoUulmEIuNJEmGs0XaBRdIpB4tsFyxsSYD/AtyHiStkASNGK0VaTbEDsMyPshi23avjxbELoJbK9GzdULUVihR7bR1HYiNRLSiMsDGpjBOSLui+1sLk6/RbJxmpUpSmZQuuwHDCpgZLTNiAiKKTAWdKcrn9L1OePqwJiGrmP5jM5T24ZcRtu+bUI6oTu16ZRmq6/vgbYirBnQndyOdti1BRYWobMdtm6AXAra22BVD9Rr+tzOcZArnZWIyG5DikyMjnPpnCzhCxeh5tHc/yaRY2MFZcAgMvQjWkF01dCshVJJMks16n09cXlb2EElLncvro9BbD+ZeLBF4fg1umctXWeUJFcuAWkK1SZWs4KgDaSLEiYGNVw/IrJNoInCxqBJqCRWs0GyvkRAG4Ed0OFHNMUVxG25aBMYkEqGRIZN1pMI38MMA9K+XtRoOWeyVDAxbMVyxSDflDSzBsmxEEtKDCkwI0XOk9hehTBhE1kGQVqhhI0VNWk4NooMkMLxSnHdaAARBhU2jZcJUh0QrbSXSvvIIIyJcgacr+uBhjCEyRo0mzBWhqlF/X2uivI6Yj9kE1rr4nNpFKwy80grgxQhXjaPwEeRQ2PlAiBB6GaJbAthrCBpQ6JOGyeUNHIpVhY3Gh4rI2gAEj9VQLLMCvY0Mnu1rQs79nd1vGwGWXJi3+chMDWaT4W4lRoGVaTyqHSk9RI6IonAQxCgTDCkxtsW6x4i8hBYBJZBGOixaN/RKFkzpoYaEhY7XZxlSS1pkArAjtR39trGSy+/zMYtW3CTSUZHRn4gAucVHbz3Xv7Lb/029957L8888+yrfq/X6t/gMWZLLbXUUksttdRSSy29tr7t4PmvPvo/eNtb38r73/c+Pv5P//Sv/n7zt6Kffehn+MQ//zPves+7+fRjj13z29//4z9+g/U2W2qppZZaaqmlllpq6XUInoMg4CMf/Sj/92/+Br/127/N//nLv0zCcX5gE3bnnXfy53/5lzz4b/8tX3v++cvbTUO87tzpllpqqaWWWmqppZZawfM1evKpL/NLv/ALfPazn+X/+Omf/qFI3Dvf9jY++9nP8uY3v5m/+KuPAPDQgw/iJBKtkm+ppZZaaqmlllpq6dvWtzwEm8/nyOXzPPfcs2zcsJ7ED0kAum/vXj7/+OO844EHKBYLrF2z5todXsc3OBTf41dY1A/tyb8Die99fv4w5I36/l9EfTfFcDWGDq6db/Ctntf8Icn4a7hT6ls47f9+r4+J1z3V6l/5LD8E82W+3dcS1Y0Ygt9v2/whqBvqe2kx/7vOy/rhSfe3HDzfum8fjz76KD/yIz/GY499jn/42P/Edd0fjgB6zx4effRR3vGOd/Jrv/HreMkfo7scspAxObLa4V2H6ygBuZrmFDqBpmfkGhJDKbrmIwacgGxTsZQ0WO80sCJQhmKqzSLdkMxZgh1nfRaLFoVaRLYu6SuFzOcMlpOCHWcDDAVuoMhWI2ypyNYk6wIf15e81GOz64LP/kNNepsRU10mviPYdkIRmoLQFPzduiS3Ha7ROVtns2VisEB2ycUOAwSTUKs+nAAAIABJREFU2LV2YAIrdBDMAxMUx46iZ3JPojFOApiCtnZYrEA2C0EAzbWQHyVfOhvv75FcqmNFL2DU+zk7vI/ei2NE6TacxiSmGotniZ9A46WqaAxdA1IJzQa1LWhLQqkGdgIV7I6RTAPAJWAdcBbYiEbn2WiyQBJYZu/Rv0ZwDlQVhUQ4kZ6Z39cOnW1wxzoYWqVXi2zLa2JEvQnjT4OT0ROFYxSfYi8IcGvPYHoGigkMIhRtpBZPI5jBaoYYKkIZGYScI3STIGBsaJDBKYVVm0IZBtLuYOvL06wQS9zyRcJ0L8qQmPUakZPB8A3G+go4RpJsU2IFCteTGBHsnwgQQKopudRhYkZQTwru+8IyvXNw8xlN9d5wySewBNLUr0eVO3JY9X6cIIl+aFQjNLchog4wTAx5FLtxCqHiMjRNiF5EUKKZfxOp8mksbxlFFciQXnoSVIBkIwZJ6JSwZhvMLcOFUaCB7T1FtnQWU5QRYYSXHsJuXMSQ6DJIJcFNQFc7WI4maixWYc0wNCMY7Id33Am9PZqo8fxRSNh6/1IF2vNw5BSkTDjyCsxVYWEaqh7IJHhliNrBNCB0NZZufEpj5Q6dAr8C00uaFnB6CiU3IdQRoBOYgeU+4GTsCSRYhqaCJAywDThR0na6tlOTChTQaGps3vERKBRhsAssF6fdA+NWeGUUUjOQkvB1H+6/Tc9S37AW3iqhHsIbNkPC1cSDHcDGYTh+AdYPaSLBofOwvh8mq7BnGKarqMokRqeAsEfnQ1s/yi0gIgvsjTDhw6pNMFuHttVwchb626ERweIydBZgbBQ2dsPoss5bL9IIv842GFuEjhzML6IJBRFwCihjqGF6L32cRC1BbnY9piqy8VSZg8u9zBZStM0t0SjkSHoSo7JEMnIw8xkMKVg1HeAnBKvPLKFwcCXUci6pZoSXMHGbku3nPLJ1Sb0tIuVJsnXJpS4bTBtW98LxMWhIFO0oChjXYC1BYSOwgJj0EDpQMmDbAOqVc4ht6+DcAqSLMNgGL56HLUU4MaXTmQQaaaCM5X8amETI7RiUaWSHWH/mEkLlcOuCTGmUdbKAVVmkd9Tk1ihPybVZtySJMi69402WCnnyMwvka2tI10OkYYBSRAmT2w5NIVSDgamI7OwIphex43gGoRaotW3C8S2cpsVsf5rVF2pAe2yjvcAMIvcy7L0Njp3TdfGuTXBhGsZCuHMAzi1rYsL6PrgwD8Or4OYMXDoP2zeDnYfRGdi1Hdx5cGqwfy848zDswk3rwU/ArrXQ2wVuRlM2hAXtOW3/t+6FtoIm26wZgvIySAn5HEzNaN9iW9rnFvKaKZ1NQ3cXBCGCl3R9ox23OoUZdSIaqxDMAqMIsUi5rZf2BYjoI1k5BvhIVoFSSIoo4ZAsnQBcpvt3ky+lqGeTuM0lIImXuRnfHSQ7P0bEdkwmWCFzQBEhjoBqj+9jAujHCp4HpomsnZjy8wjmMQOFIIESZ4FLuMtfAFVG00xmgCpCNTHElB7UEQ6i6TG6yWb1uRnalruZzVvkqxFuU9H+lEGhImlaijwB0315CtNTnB8scsvROqZySUbQOx+SroesP+/TPrUAgcFCLk9xWZKdHwfqZJYmEBEcfCTDVH+BpO+SqDcRhCjycX5OoukzuTj8nAfWEqW6MesTUG3CyASMTsLMHLxyDqjB3CKcHgFV1dsmSkACqg2YXsKgfjmgTTabROkCVAYIzbXMdRbomF3i1IYuLnRbbKKAwtTtP1MosYH04hkSlocpLeA4gjpvf3w3lVSG7ktlIjqZHN5E78gyqDqRuIn87CksuaTrORkSjTldnvIohlEGShTHZzH9QhxzjCLFLtzyBY1tFRdBeShc2ucligAj+nLs6xaxmwuY4SiKJsq0EaqGKcu0lxrYkUlnOaJ7Hk6vE7ivGPTNhXgJwe6XfGxHx3+uJ0n46ttD1e3ZvZszp09y8L772H7TTj7z6CNs2LDhhyKA3rplK+97//s4deo0HSmDPgWlrMHL61x+8ukq0oBcXdJwwA4FniXoWJYYQHcpop4QJD3FaJfFtkQNO0ohLcF0u0X3QohvCXaPBHx8rUXfiCTXkAyUQkbbEywnBW8/GhDYAtdX5GuS0BbkmpKBBclyyuBSr0mxIrllXAdWpqWYzZsMLChGeiwiAz5+c4r/9KUFjKBBOkpgUCJTNhHCw1TTqEoBwTS2H8Tolyk6R0/EwXAzxl6lgFnIt8HiCGR6oFGB5gbIvkBx/KKuWPikK3Vs/yQqrDI2cDdbP3uMoGsYd/Y0FufjynKKFRSU5rRO6eC53gTXBDOtHb+bQAX7ETwG3AycAYbjRmNjfJ4rKByocd+RjwASIQvaKToScklY0wndbdDVBoN90NUJxYJ25H4Ajz0TB8/NOLAva+ciFMnKMZJYwDQmTSSdpBcuIJhDNCUKl8jowGCCwE0CMDm8it5FhbOk0+oXsmw/swREGKpOsjRGs30Poe3gNi8hk1mIYHJ1G9m6IDFbwwoVrq9wAsUt0wEoSDcl9YSBoaCZNHj/C8s00kluH2mylDXZMuUznzdRVgIQLHfkyI+twsGN82iekN0Y1DGNZUx5CrMu0XxgB5SpObCEeLlB0uV5DG85RhRmyZeOAjkku/X52kZh91Y4cQkuPAe4JJpHSTTPANMQZWnmD2D5UyAD6OyAdBKSDvR0auxcf69uZNevga4OWDUI73oL9HTD+CScOAW5NFQqMDkDvZ1w5LBuqB89AqYDzbk4fV3QWADWgKjo4PnSmG68ZQQvnIJMEj43Ajtz8OIscBswAuzTjUp5EDgcdxolJEzIJ6HX1ceeOAWOBcM9OsCVSuP0TBPOzEJ3B+xaB65Lsi+A3F1QfQzRMa/Z1l8P4L5b4ehx2LoeBrrhY1+CN94MzQYMrdY4v3wOPvsUbF0HuzbDl05pJnblPBzYAS+eQkzMYG1KgN8PahyGVyHa8uAFkNwI/mm4eYsOwofXwbkZGM7DooD6LPT3w9gFWLcTZk5C3wCUK+CV9PexE1BcDfOzcT1rxg3QEiYbGbpwDOjHqdr4GYMtx8+y4Zzk6wc2Mnx2CZHMkGhG5EslIjePJTMIBUNzAYtZix2nF5BOEmVa1PIGRujz8tosbTXJ7eM+RqRotklSniTTUCxmTI1LWz8IyZegUQP2oyigUXyrdAcIA4Ed+4cStLXpjvGSCZuHEa9cgk1rNWorX9AItRe/ABtvR5y4oMveCaFRBErYzUNoHKePyQxhtsim08cprVmHG0jyM+dIzfViqWkGyhG9I50oYSMNmx1OAqVCztyaZdPxGdrqq0nVQ5ppjb8LHIN7XplEqAaD8xHZ6TGksNg53o1QJerFHEYpACwW+9MMXWjEHb1a7AenMTLn4a53wVQJ5pZg/83gvajTt38X1A6Dkde8Ze85uG2D7lB8saI5zGsG4bkjcMdeGJ6AsUl4y92w7rxm/B7YD6v6IZeF9Wvh/ntgfELbaqMB//QJeOPdUCrp+nbzTs1WlxKKRQh8XT+sOHguFvTodS4Dfd0QhljRK/FAyBZSlYvAaqiW0AU3hUmJcnEL7QsCRT/JyikgQNENSqEoICmQLJ0DXOb79pNrTFLrzJA5fwZIESTfQq2YJTv/GJJbMflM3NrbQBuGOqv94OXguYTtzQEVpJnH9j4FVDCC1YCrme1MkSp//bK/sFTM01YephoFhlAigfB8xldbbHyhRFu1kxOrbYo1SedSxOCMSWhElF0QhMyuLrDr8fN8ff8Q9zxfIegyiQzoWwxJNgJ2ToC7uECUyrGYs0g2Q7ILUwgaZEvTpEoR750s8uLtuyjUI/pOXLwqeK7F9Tgd25HEYAEEBIlezPo41D2YmIKL45qdf/ysrvdLZThyBmQdzo/AmO6UUGvCVAWDJiDigR6PMNUGlSGk2MR8T5HukQnObt7MxYIJ5OMnxU3tf8UGMgvnMVhEkQQuYhDym49d4PzOmxk4V0HSzdTqvfSMPAfUicQ2CjPnY7tZBtpJ1BcAG0Odx4iaOngem6NZHIrrzDSKJKnyOGBjKs19Vrh0LOq4xIpeBvJAg0S9hGQCgYcyEhjUQJboWG4SCZfOckjFNTh7i2DteYP+UoDnwC1jHpWMYLbdiNvx74C24TgOj3/uczz00EPce/Agf/jwwz/wgfNypcKP/NiP8rd/9/ccvOee1ss6LbXUUksttdRSSy19R/qOOM+u6/LBX/0V/tvv/R4f/p3f4T9+4AMsVyo/kAk8fvw4q4aHQRicO32KVasGW6XeUksttdRSSy211NK/XvC8op9673v51COP8PRXv8q73/MeZufmfqAS9/7/8B+47c47+duP/g2feeRTrdJuqaWWWmqppZZaaun7FzwD7N61i0Nf+xqZXJ4tW7fyyivHv++JGhkZYc2GjTz/tedZnJ3l7W97a6ukW2qppZZaaqmlllr6/gfPK/qXf/wHfu93f5eDbzzIf/2d3/2+JejDf/AH7D9wgPf/3M/y7DNPY5pmq5RbaqmlllpqqaWWWnpd9LottWeaJg/9zM+wa9cuHnj7A7z8ysv8ycMPU8jn/1USMr+wwEM/+7O8cPgw//PvP8bdd93xmvtWkzCbFmSbgmw9wpCKhK9o2oKaI0g2FYGjSHmQ8CQCRc01SHoRY0mTjikDIaEcY0uEBNuTmAryixGZqkIKsAOFHUGqqS7jC0MDfFNQSRjUs4p8FaxI0TUdYkmF5wiSDZ/QsPFsg8AMCQVYEaydDRBRiBE0kEYRaILwQYVAgLQcCJtAPUY7eWh0DOgZyIGmDXgSkkk08iEBbSk987aYAlLQzMN8E6FOA3VQPrmlGZRps2jZ9BGgUTIrTMYUeta+r02qXIemB02pZ2MToWc8j8b7N+J+WzU+R3XFiriCGpJX7hmh94+U3lxvQsXX22p1TW5wXVha1sg6y4KOJBQMzRWuh6iFGZRKcAXXV4vPv4AkjUmIwkUm8hBNAU1Mv4pd90ktF6g7DrawCZMWShiY1IncNIZXQZpJlGFihAFB1oXQwqRBpjxHsulihpDwFE1HkKk0yS0KKo5BYAsSgSJVlzh1DxF4eFaKZFMRSh+rUcV2U6B0/jl1H0NU4/yMABPDmANR1Lgh/Dhtts5OEQIuoDD9Eb2/nAYaKBwEIZFVgLBypQyEAFPoskja0HAhbUJHF5gGhu/DkgN9DlRrGhOYyemZ+KapP11Xz763TAhDjdWKIggD/VsYIwWrNbgkYakKUoBQIA00oiwiNFNYEbqsfAmE+pjFsqZiLDfAMCEloBbGNlQBEkRWHjN04vK+Sk0Hlmsxxg/IJaDsaeqD42g6RdODdEqju4opjeQKI00nSKUgXYBMBtJpuGmLJhAUi5o6YJgaFZfPatxdKqmvN5CFZFrjvJTSlIxcDkRKH+smwM5pJF/KBcfW95DNQFjWn05K75dJazRfR17v72ahbQkSCRhI6vvNJvU+qRQkK/q4nKPzP+/ovFYKyj7YCQiWdF2nhgKELCEIEMY8maU2QtNCCghNsAyDpmOx5BokGxKnUSOFjyErECpCXKzmLJAh6UmWXYPumSUMYWN4LkYINVuQW6xCytH37ViAj0pU9H356fh+QDPfZxEdaXCTYFchkwCR1f5rXUaXx4ZekCbk83DTsCZvbOvUtidtfa0wC74BZRXXAw/TX0CIOlZzGkP6KOYQQoFaRBGA8BA4COGgqQwm7vIkQlXIluoElkHVFiSFwwXXYjsR0koQGRIIYra1rsOWt4wSWUDhVpso04KoqevbYAoSg7rc24qwdgDas9DRDsO94OSgo0OTNVI2tLdBMnvF/ixT28kKfzidhmIbTM/pPM5l9T5CXEFMrtT5hHPle7GoP4PgymeprOt2s6nPJ6XG101Mg+/rel73dNmZVuyHLDQ1Rcaf1fgzAiVwvAVA4GVyWFUF2JwcKrBxxKeey+A0EyjfJzLTuNUZRFTBbkiEXAJsItPE9OvxdcposkYKKYoYSvuLuY4CnfMzcYbU4/ZEIVQQ35dCUSZ0O3CaQdwuGfFvgtBSWGEZmCW0s3huDhG6GIEgO7+IYBmnXiFTK7KUEQxUGxhewHwxi42+RqpUBjxyi0sYfo1sxaXpmiSbEXXbpOaa9KuImmmTbEoWk9pivFQGu26iDAtBRHJ5kYQHRtQAIhQOkEZiE7ohQhYQSEzfwVA1ZOQg7RTUG1ASSNZjND1YrgIFTdUI0G1E2QNtlYhKA2lYKBxMKVDYaOpVRGRmmehMk2hEIAS5ckBbBNJKIpXUPlVHWWhSTimOD4w4/+eQhkAaAowEqfIUiCqoCKEqgCSyM1iBpf09PuCgEDFBLNTtmJrX8Q8OiAqCJcLEKizPRpGkVsgA4KfSJOvB5ZgjMi1EFLJCnJciiaGmMJtzCKNAuuaAgFQZ7FChDPAtgemVMewklkzg2QIpwPzQhz70odcziO3t6eH+N72J//WpT/HRj36Uvfv20d3V9T0NnD/5yCM89NBDZHM5/vojH+GWfXtfc9/p6Wn+mTxnOi0iYbFvxGfbuSahY3B8dYKxdovekuL4RsV9h312nqni2xZP7UszPBHwqwdz/NwnJfmm5Iu70+w71UQIQU9T0r4s2XkhZPVsiO8Ikj6UciadNUXO08F5wxJ01SRf3+wydo/P0IuCbFPxlmMebqCY77TpGZvnzHCOkW6bhIT5nMmWMZ+3nW1SnF8gUbrAcs8Q+bnPIqSFEHWEGqNavBO38UkEVWA9gufi4GkOWK0NurcdlsvwY3fBoWNwx25450F4/Cvw4zthSx5yLuL0aWzvc0CIIMPqsRn81HY+f+8gN514AngMwaU4iNseX8MD+mF8TLN3yy4sl3RAEyoExy47LG34K5idRhz0DKOZlWvQHOjeeP+ULrxIaZZwrQJREsoSGnOwWNJByxPPwp7tcP4c7FwH29fCjnWQMhBnn8JQLwKzaJRZCagiWMAgADwCcSv1vttJl34bQYRVTZIvHaNt3uXUTWvonqwzP7ydRCBJll+gPHAbVlUR5IZRySx2aZKp7etITNikvOfpnThDcaaOYeZwRJLjQwl2Hh5haKbJE7d2UU0Z9JUibj/RpGdsHKO6yIWNfQxPhgxfmiU7dRpXuTiNADO4gFvL4TYOITgc51kCU5bwsveTaDwe55lE8z4roGrANqCGWzsG9GD5R4BZBCEwi5f9JRzvM8BJ6EjB2n7NeT29CEOdsLgKNir4N/fDLTuwx45hjNvwU7fAl4/C+38C1q/S7OVsGgoFWLNaO6dcVgeBCUc3wBNTOoAeGdON8YkL8JmjMDoO5+chEhAVgQNAL5XMe3H9z+gA2HMgXICedn3d8QU4PQ2+gh4HTtfR3O0GYOJlfgHbOwaMxXYWq7wLZg7rIOziAmwegK/PQE8W7rgZjp3V99nVBkeX4aF7wHU0A/W9D2g8330HNOaupxve/Wa4507YsxN27oAdW+EN+3WQk3Rh3Ro4/Ar89I/C0gIcuFU3Zvfuhy0bIGPAzz8I5y/Au+7Rgcnm9dCow55tsPsmGJuAu2+HjAkDfTA8oIOX/dthoQRvu1Nzrwe7YccGHbwM92im9U3rdYfgpvW6A7JjDTgm9LZBVxFGp6DYBY0zcR30kezECQ5jEGGFh+mZWmR+cBeNZIJ6IsSUML62g2e3prjn0DLF8Vfom/walj+N0QxomIK2uceRDJKTGZ7ak+Lex54j5Uc8v3aAvSebnF1vcecXjmJuPgMHb4OXL8DsBKKnhpEYg1oPGl9ZiMPn43D/ftjfD74H9+zDuLVHM473DsKOzXD3rXDgZrh1N9x7O+zdCft2wC07Yf8OSDagN6s7W6N1DBIIJnFqTUw1S6ryAsnqcxi8gKnOYHABgxOYnMDkFJY8hxWexArn6Zg6iaGW6ZvJMT3Qw+mhBJ3LCf7z+9p525MXiXJ9VHMOxfkjCALqmR5S1UOklkt4uR241VfIzbeh0haW9xWd9z97AO69DX7qPfC2N8G9B+DB98Jdt8O73gw/8RZtP/fu1+i5e98AfUVtfwf2w5efhgd/UtvgF74ED/0U7Pj/2XvzMDmO68DzF5FZd98HgG4AjfsgCBIkARIgeIuUeIiUxEuHSdlryWNZ9I4Om9bKmt312DsjW/bYM7K0tmVb9tiilhIpyZJNEhQp8b4EkrjvqxvoRnej7+quriuP2D8isiqrunFRJKXxV+/7qqsrj8jIiBcvIl/G+8Va6OmGa6/W2MhUUg+q29s0PhH0w219vXGn2bBqhR40957U31LCoz/UujgxCV/7O7jlPfDsi/DtH8B1m+G1N+HYSVi5CLbvh5EsGp82EbLxE8a+uyg20zi5Exjh2IWfpm3wFaCRBz/769z9/Db2r7+aumKcxNRWiqmrmNO/BbswRWJ8nIj3OtDO2PzNtPbvw3IHsThu9HctxchvY/uvAL389H1/wqoDPUaX8sAg0IDw5iHYAxQQDDO68LdITfzIOHAWlJw22fo5RAvPIOkl0/LrnFy2mYjdiu1OsnTf60Tcw6QmXVqc+fzLlfV8YMtuIpODfOdDK1hzPEfjyGHaTg5iMcDCnmHs6VPMyTVjixhLejPsXtXAm6tiXLbtMDtuWUXroMe/XF7HHS8dZmzpClLDDl68Bas4xdzBgzQMnyLqnkIygk8XknoccR3jXR/Hs1ahkpuxMhNEnJMoN4XbvIjo6LfhyHFc5//GWrQDXtoG2UsgMQhHHeASGDwAxFHyBsTgazjx9ShrHZb7ErACN3IJ0vNxrfl88+7LeO/rY1j5DAvGbdYOgJIxPBEj6rwCnEKoDIJjwAEzTkgCx5AUGVh0G6lsHstLMb/3USw1ChxBkkBgkWu6nmhuq06Hueah6BIELwIOHjcTyz+LoAeYi1TdSH+cyY47SEyexGcBezbdSiLn46YaaBh9xOjeHPJ1a7HdSYSycRKLoRjF9p8hPjVFdNKj1ZnLjpUJlvYouoZdlBBMJySLd/+MiGMz0NlCz9wIywaKb9+0jbBcuOYCnnriCZavXMk1V1/Nm9u2vSOD5mw2y3/58pf5jU98gptvv53HfvQjLlq79uwnKv0cJBQVC9oE/5ZWVBOUVkQLNsUUpRXPRHhhJnOAkuWDz7QAUfU6OuHzyhmpOuGcaktUpS4qb/xM56nwIcb7WJUpUZEZeY55eJtedIjQP7LqtxAgQvkJr2qnCOVXhPIvZikfNeM4XbfqnOoRFV5FLZTJ8IFSHyHCWatOSIbu64xXlrOUu5olQSt0/Gw6cgalrVg5L3xcaL04dYa6mjX7IuTlnk3xq5RfVJ97Oj072+/qXapSZ6qPV2rmzalZVhFUSg9oK8qrelXFqpXcSvqpytcJzlVV6Sh19iYcPuBsK8WpqnZfKksxi37Jkq2r1pqS3as4v2ysVIXNEBXNQc1q5GbLQ1h/w+37NN/V9RSurxk2TlQ1utk+VugjK+2HEFodVXUu1TnYYzFLm6rSB9RMezbrsaEd1fetfg57q8L9oKk/S1bZsjM1NXEGTZ3ZLmxV7lvFafVTVNqCKttR2VOpM9RFOY/ivPpULb7Zrkw7EKF2INUMYz6rbRSo8l5VZerUmfIhZinNsC6EytbUnZhhAMRZ7Mjp+zwlZhkizDhKhtrM6QYx4gz1ElZCdZr0y2kITjfUEWfpu8Xs/fRphkhv+7SN2eSfvvlNvn/77dx6++386v3389/+9E/ftrT37tvHgw8+yNDICN/93ve48frra5NwalKTmtSkJjWpSU1q8o6KfKcvcPedd3Jw716++/DDXL7xyp+bB+37Pj/bupXNmzeTqK/nza1bawPnmtSkJjWpSU1qUpOa/PsYPAM0Nzezbft21l28lmuuuZafPvvsW0pnOpvls5/7HHfffQ9/+/d/zw+++91aDdakJjWpSU1qUpOa1OTf1+AZoL2tjb//u7/j/Xd8gLvuvJNHvv99/GCO4DnI0PAwK1as4Mkf/5iDB/bzkXvuqdVeTWpSk5rUpCY1qUlN/n0OngP58v/zh3zroYf4wu/9Hp/97GfP6Zw/+cpXWLt2LQ888AA7d+wglUr9XHnQE/z11G/fbFE++H5VYIn5NwgEKMdw6J2ub85V+vxS0Iqv8ByzT/9EeTqeyEfgucqkFYTM6OOURykAxPf1pHzlV02Xd0M3AaGoRxWKO1Eog9qZ9eaF0NdJxOFUTmO46qY1EcHzdYapClhBJ+f7XpBbkxlZnSHzHeDpyue7EVfnTgyHa+I0NQSVwQLBhH5RmXzpMFXGb82WlAIsP1Refih/ph6EIsTaKQWUCROcFtTTjCAeU5el4Ckx8978Ivieqki6FF8W6F04csQX5awLFarfWQKOFKa+HRR5ylrgo8ihcAEfTzrmuFDdCr9cX0rpuveNXgVFFa5in3JUT8abUT06KU/rkWOUvuhpHSu4+ttXmpyignxagIcvvFKb0Ql6ofZoLuKjG8WMOq4M4lOltJxQXQfX06g2Jb2ZwVZBgI0X0kGFvheFvq+KQMlwcJeotB/K5NUv6s2FovltysXzNWpRAQXT9jxHX3s8b64149bK5VEdGCaqysKrDhxTswRA+qVoNyWKFeUQtk6leEbllba6rq9NSelIP1RX5Uy7XrmtOV5g3/xyYDZoaorlwbSvsWdRBbEIxCUkshD1oOjqMgnuI1QNujGdpv1XbxPh3T5Khm2aqlL28HY/dJ/lziHoO5RQjDoh86LKtlgJhWMXKPcKAl+afqMUFOrr9nGmvFfXf3j/eLZsI6dzM20gVekF1/INUlQpo6Mm+LXgGGpbqFx8o/8qZI+UPyPIXZU6qsCwuebjMHsArk5gyPVRCN1nEu7jVKiO9C8vI4yN81AUUGSN7Sy3R8/zSnnwZbaqPh18kTM1qu2FL4ZMmj5BMGC4bftB32qgA6VYRnwcV5eDCsyUCvcr5b5BVSmj65eGDfgIHBWKmkSTaJXll1Xd9ktlUdL7VnzeAAAgAElEQVTF4Bwf8nFf23VBqE9UMwN0/dMHBYYsaeVGM3ZRGX1+0P15mcr+X/dDs40jfJTvavtbbXeDX9IFynY2nPtqg+jLdFjpyjriG7slvCrbXtYfX/ihdMtphHVPDy1EqF83xea+A6i6c5FVK1fyuc9+lv/9c5/jK3/2Z/z2Aw9g2zNjFwuFApdv3MgTjz3GSy+/zF133kkkEvm5rj04OEj/y60U8harBh0u7inQNuHyw6sb+MG6BBt6HRKOYigaZf3xAlJYpPKKfZ0RLjtW5JqTRWIK1h7Js3zIoXXS56eb4zyxJklLQelzW23+/j0NNBUUYymb6/flePV2jx8urWNCSY4sifG+bVmmsglsD9b0OMwZmebZa+rZPy/KmgGbf74uzuYjDjs2Wsi8xRV7c0RdODbfYl5/LxOdSZpOvczUnKuJZUYQ7COSXY7kKSCBZKtRwFUgx0GNQaoFVnSCSsBNl8KHb4OP3QbXXwM3r9MYpEUL4Os/1AbPk6DqgWYEw6jiAi7d/00ErxgjeB0a69MPzAVG0EiaSTSmaDUwisbkTfD6h7/Ggj1PkW35a6K554FL0eig1QQII40VG0bzHfOUkXa+RpLELVjQCCoHzUIzpWNRONwN8Qgc6YamBhgZh7ltGv3VOQd2HoN1yzS7d2oZ0AjUAYfRPEoXW3UTT//MXD/OROdtJKdeQbo+w/MuoCXnUTfcR2riaQQZZCHOnis2QDRFy9EXiLpHyCRX0TLaR8T/VwST+HThRFLsWNPCLU++jts2H+HmGels4slLU9ywN8vSPQexs330XLwRX0K6XpKrS9GQcbGLLpFMD75sJD93LSrjEFFPoqOYdWc51XgFUcdGegvwuRaJZ+7hPeQbPoVdSCBQ7Lzgc8wbHkDQB3QBcSL5PuAocCWM7oI3DsNwBtrqoGcK2nOwuEUPZFJJGB2FFQ2wfi3cexNcsAqmM7D/MIxPaHPz+E/h5TfgL5+FkQPwhe/Anhfhd/8SDhyFZ/thVzf0tYM7TK7xG0QKvfQteoDG9B5gLYIDxIs7jV50gJvROtbdD8NRaPRgQmk288JG6O1Ds0V9oJVIodvo0/sQpcFOBOjD4xPIkeOQ2YzoewqYB4VRGBrUbOSiC3t6wMvBZcs1cu+Ki/XD5X//Btx6I7y+Qw84ek5otFxHh0Z6CaEZ6o0NcPAwXLUJNq7XCLArL4e5c+GC1brcVq2AW27U6LDVy2DdRbC4C5YthqsuhXlzNILxuVfg8nXw1EvQVKcxfweOwYWrdHledyV0H9f6vvconDilsX+PbYcPXgXfehwuWQmPDUNTAQaH4HAa2uuhtxFy/UAz6fYvEc/uQuICB/HZgOQFHPtetl2ymIu2P0/bwGvk2tZTtC1ufex58q1d1I8+jiBPrvEmYoWnSBR7gOMosYLEaDcb3uwhyutE8yfZdLiV1r6XWH74NSIMwdxDunzyaWhKwZUr4aq1cFELXLMBbrwEPvAe6GyD22+Ea66EEydg0wYYHoPte2DlMvjOE3DVBnj0X2HFEkin4dQQtLfDi6/C0sVwqFvXbcGBnqPAHHyuZWzxB4jkolhewCEeBS4A6oGYsRM2cDkwgseNSI4gcOlbfj0L9h5g2dFu6ide58PPdGO5Lj1r1tJxdB/SURQbl9E48m/8+OYvsPLIj4lmphFMYvu9nFi8lJbR72l7+dl74c7bNbN6fByWLNZ85mBAHPSRdXVltNzSxVo/Ae66TR8vBNxxs24boHnQjQ16+4GD0DFPs5n/bYvWw/0HtK52LYA/+HONmzveCzd9FD75K/DKz+CJ52DTZfDGdtjyIqxeAidOalqO78HOA7D7CMQkHD6Bk7kLy3/D2KmbgE2mj1gD5HDEjdj0ASdoGxwHBoBxPvriKQRTtA5KkultSMaQxSKCYXKxS4l6gwhG8djIFx+4iJWZFpoHJIXGm7AKrUgEkwuvJZH+FpBj9aH9wB6gnZMr/4aG0a1AEje+Gcu1ObX409RNHKJuYj8QZWLBV7EmW4E2JEPECsdMXxdH5pbRkEsQHz6BlUvjyRZQUSL+QeLp41z7+gj4AoHL8qkWmsfSKDeK074amckjcMh1bCLZf4CGnETFUqQKcPnuk8hcjs6BIvVeigYfVh06TuOplxDAv33oRqItS2g+cQSLDLuvu425PScpJlfi+W1If4qGiVexp2HHhgv4xMev4N6D84lPbiM2/RLkdoDrY7EaEjtgYBSKttYxt4hGmx4CmkA5CI5huS62q4ABBA146lLs3Dj7Nq0j5cGPrm/i4oEosckhfDvOP97TxcY3D2L5O83D0m8iOGXKbiHQisbFHWXuyddJTE0x3bqIeOZftf3lEIJx4BTR7DPAeuB1NLZ2rhnXOICDZKk5VjC46qvUj+wFJiimNhPLDCOZYu6JSdpPPUPDyJOmD2kE1hMpDCNUlqmGjSTHXyfqPwu45Op+FT/eRCHZwg83pXj/tizNUx62gqUn0sTTR7BcRSI6l7Ypn5+sT777nuewHDt0iI/fdx8rV63i4ar5yw8/8ggXrl3L+vXr2btvH6tXrXpbry1mc8hWbRVndV+Hj6sGwYiKp01O84w3K8hInZ78pWbdIM7gvT1NpisSF5WeKf9sJSfOs6QD332V9/iM+TxDuYsz5CHMDxTngS2b/UKV550VxWSeaq2ZuDhR7RWcrfzF+WjsLEpzBv/B+beGsykp51YHJQ+dqHb5VSWoznCHYlYv1fndrzh7/Z4VbyTOHfslxCzlGNL/8IfTNe4z3N8MXRKz6/zpqlaps+q/mjUZdVorIMpcs4prVaiOUufWEkWozGZDtM2aZXV+al/prj9P21B1z0r3H6Kq3GbPkODMPQLnqWdn+/8tpKXOw/7MpkvirfHxxDnbPVGZZRHqh2f0L7P00GJ2vT5rYQlxmuoTFeMHFdaDWcpHnO7t6OlsuZp9bDEjvwLyZ61wcS4N7zTHh7yw59XcxDlc50x5U+d1vDp94Z5V96tbsFCzn/eOourORb7yx3/MpZddxn/4xCfYuXsvX/6j/8xvf/7zPPQP/8DX/+Zv+LX77qtNrqlJTWpSk5rUpCY1qckvhdi/DJn46L33Mm/OHO7/+Md59ZWXGB0Z4cmnnuKqK6+s1VBNalKTmtSkJjWpSU1qg+dqGRwaYnx0lNGmFk729bF2zZpa7dSkJjWpSU1qUpOa1OSXSuQvOgPDIyN8+oEH+P0vfpGv/9VfsWfXDh588EEuuPBC/uc//3OthmpSk5rUpCY1qUlNavJLI79Qz/PQ0BCrV6+ms6uLA/v2ETORxf/pS1/i1ttu4/rrr2fL00/z3W996229btESjMhyIJ9Q4AlBv2UmiytwpMA3k+JtF1yhQ0P80Nxxy9B4HAmuOTbY7woNq8iZxxPPggkJC6QgY5XPK8HchEAJgWtQNyNCX9OVkJMSX1goofCkXkxeBQFYQdCRCj8LBbP5AxRRWu/LTcKpUchlNQ7KcWcGmVhW+bHKEgaXZQF5lJTmBh006SEDxM1vm0qOXs5M0XeAKcDBlRruo0SALgrSwvwO8hyeoO+H9gt9GSFAKk03sNDfMvgWYFs675YFlizfU3CPKDTFI6z+AZPNQ0fcB2Ws0UquKXdEACjSxwS4nuC+VUWgTBGbIggoSp0He1yhIvqehA+2oxC+RnnZRVAxnbp0Qbg+VsGUoW8hi0HQQxgD6GvMkggHVHjAEOCikGhyScEc4pn68MzHqSpzR2fMErp+7ZQu06BcZUETSyxpylvqMhGB/gTblT7HMvulSd8y9yB9U7VZUyeB7niV9V3SJ6PvVkjvg/1WGD04hia+NIXOC+PqgnQnQmk7Rl9MWlIA04ZwYPTHsvR9OW6lPgWsphmBeqK8/ZwIQaHzpSEY2DbkCwaTp8B19P+eD7m8xoY5rkEDehozlivA8Sl9L0zp3zgGXTYJgz4UHcimYarBlEkOSOI7lmkDwccGBL6wyFsRgxS08awIjoygsFHSRiERKHwrQhngJVHSQgjL6KD++DL47er0FWVKiRRlnQoXSRjJJkQZ4ReUuZSaHxXQKCzL6Ko0tiBUfzLQIdMmcFDCQnieyVMRTarxdHmRNXkvGj0KMFp5IKbtcSl4yi/dqy8kSlgh+yVxhKQChYmHL0M2SIbalGVVBnKXdErN3F6hc6qybERgs0J6VuozTFrSXCtoy5YFEdu0N1OGwpSrZeuyDso0oIDYlj7XtkD6xg4WzMersllZk0+PchB7tqJcNMPPo4y2K5rWKhBockVaCm2qlIPrBiF6Dr6wQnWVM9ct4ovA7idL9ahRbxk0jUHjEwWe6bcy5vxBNClEIYv6HiQOSkVMHt2S/bJx8fCQCqyCj+24qKJC4CJwkb7+X+YN9rBe16NULh4KoYJ+R9+/QuBYEk8oLGOr9RjABaWwPLcUbi3xcIXU+i1EKG+qpJtCyrJO2uH+V4XspNL2D9ekUUQoH4mLEhrkUzSBk/a0h5cyQcDKM+U7YdIZJCBXwXyqAw/Lup8N2UA/5Nf1zXcYfxv0EwXd95Xa2FTItpRtl/4/iybnqFKaegwiTV9ojlMuvgAnQNSWaK763n3lYRX1dl+IXwyqDuAbf/t3fOjOO/m9L3yBhx96aAaqrmPePG659VYefeQRvv3ww1x62Xrmzmn/ua87ODjI2n+0uGNXjmxc0jrpE3chIuBXdmYZbLLJJiSrhl06Rz2KUcH+JTHW9hZY3DtFFIuphEXTtOLZy5NccNwhVhCM1Vl0pl2W9buMN1g0OD7X7clx7aECRQvqhmxWjimuPZbn3jemGWy22HQwTyEimT/qEZscZrStiY4pn2X7e1kk6lh9okhDQXLL84doObYXK9rCokNPIlQBIWwSmR+jrBVECi8CQwiSCN4ABKKhERKXQPQg/PnnNUT6dz8O//lzcN2lsGwJXHgBHO2G9lY41qNRZK9vg+d2wYQP3jBwA/AUrv0hot5LUMKcXcyb1/wOnSeeRuOHFoZKeRq4D0E3sBqHj2ExyvzdAsExItmTCJJmoLMc6EFj6/LAHNOI55l0NhilXwz0Q7YFTh6EE+2w/3U41AXbnoG9C2DnFnjjInj1e7A7C0cPwwvdkBmH/mEQERhIg1qEz60mOrobjaPK6PS5zjT0E8SmWnC5CIsJOvp2IbKCiHMYiDA+/wPE0z3svmQVC/vGSU304tGK27CQ5EgfFsdwuY5r/+KjfPLxHlb0bUW4Mb75H9YxPx1nKiX5yeU2nrS5sM8h4tXznds6uai7iGcJXrgkSXsxxR/+xhKW5TuxZTtPb25i7f6tSHUSxU0U4g9iu0fYe8E9zO/tRdCPQOCzmL4L/oi6kSyykOfU6huIpBey79IulhwZx+V6FE1IBA6XY5EBTpp67YJcCiYGUPwqIvM0HBtDvTmIeGMX3sFPIseehOlJbYgm09A/AL0DMDUNB7vh1Dhs7wZnCp7dBRODsHsYGIexLm1UVRdgo7geWbAQtJBpXEhDegRoQ+MRx0OdrzFyhSk9qEkLYBREAo72GH2pJ5f8GspdDzQhyRr6wRoEw+YhcBmSCUYXPEhycocxppdA5gD0zIUDO+F4Anf8k8hcP0wdgYd2wTypcXAXroR9ByEzDck4NDfCoi441g0vvAx79sFPXoTFC8ApQmcH3HIf/NqHIZ8vD/Cqpbsb2lphcgp27obNG+GTX4Bf+SAMD2vMnfLgkos0n/2KS2DxIkjF4drNcPAQfPhD0NYA990GH/sQ3H49vO8GuG4D3PweuPVSuPVquO9OuHgJ3H0rXNwK126A9hiJ5j3QfQA4BqSR7AHGsP3nWHH0u9jeCwhep27sMeae/A4WPyE5vgXBcUAQzT0M9OGLxQjexHbzWP7TWIwBbwA5IrlRLF5lsvUjRHM/QyzKQW4aTgzAc4dg31E4PABL2vUgfzoHF6zUGMT5HZqLnYhBNKbxag11cOiYthEHDusHlZFR+MnLMDEBew/Cs2+Ck4WX9kDPCOzeS67xH0EtJt96MXV9u+hds4FEOoXvL8GN34F0JekFD2BNzgEWolhDbs7NRKeHKdRfjiquwGKQxuFBBBJfxFHCJsKrSIo0jhVI5J5gYuF7iY8NYvk76JxaRCyTBaYR5HDYxJyRh0zbc2HjSo2a27VH2+PpafiT/wHvuQZyOXj2BVixXN/X4aPQ3gb9/VAoaNzhsW5obtYPVXv26f1TGTh0RGPoTvRCNqvRds++oJF3fSf19QoF+M734eRJ7Vx59EewexDaY/Cjp/VD2vgo/PnDsGEFPPYsPPQMLGqFP/ouHOqFwVOw9Qj+2KVEvEngGvINn0EWfByxGosiIHC5gWJkEellzaTGjuJyCfnGO7ALIBijGLkUp3UpFGIofw5COuSSVxPJFbA5QiH+Po5esYkrjxfobY/w/HtXsXDAJTU+TCG6gYbRp9FYymbT9xxCcR2FhstJjlsIoNBwDVauiOUswJNXgteMxEMW6hhdfhlOwwpy9ZuITdaTb/0MMteOYh77Nl9A82iOE2vXsv+iBbRmkgivDdduw3Yn+affv4FlR+ro60gx2FHP925bgpOMEY3Nh8RiRlvjuI3z2bu2lZfWJ7lszwh+LMmbG5cyZyrOSIuNY0HHyRzSjWDRS9dIG1G3jmeuWcuSIzE6j75Mrv06pFPk2fddwpLDI1h0s3vD+1h1eJRff20XcqqIF1tAMVlHNL8HxUcRPA99PVCcrx8KigPA9cYGTqC4CyUKCLJ4/AY+i5GcwOc9TM/fzM4rVjBv2CPmKpYPO3QMFXjkU8uYe0JxrDPCsok6UE3IwjV4ooNs52/gydsYXfR+6kafQCNhG8zAtkBi6knA5WdX/zELTvzAPKzGzGD2ReAO4GkErnEGXYDG3nZxcvkaYsV+mvtPoCjixj9Eavx/IkgjeAGLA8bev4Di0wj2ASvwWIAA4oVeJDvoXfWHNI6+gnASFNovJlKYJtOaZGl/AS8qyUclw00J6vx2nMaF/OSGRhwFMcf/xXie12/cxJHDhzh+7BhNTU2nPe7Sdet46sknuf9Xf5Ubrr+O5557jrUXXvhzX196IJ0yNkl7AZX2/FFeTyN4GvKMo6z0NBL4SAPHh6+fd8JMOhFsL2qwjHRBegrpgVU0D/VeiEyjFMJXCKk9AqKo9NOrp592A1at8F0gYvApfpV31qvysAoQXtkbaIW8aIE3UPmVi334ftlLaDxI2hspQ14A/aTmSSv0lCipBFhZpfLTHiuJKOUvF3rKDHtjwt5TEUq7yjNXOr5Y9a1CT6UuTJunb8cr71bhPFUjjbzQ02qQvwBz5Jg6MItEBF50MNT02ZyJgh5znMx52v8h9b0JpdXFFeX7dUXZgexJ7d+YtKS5ltbD8tIpEmXp5utX4AoN3D/wlKNASJRlGcSRMHUjqVwBxQ+VuwjVvfHWqiJM6rTwVXmxE9czC56Yj2MWshCeuU7RGEuP2YFGQTlW13O4/sMeqmBBCWbRB+0dU1KivOr7oCI9X1ZvI+TxxugH+n6covbsBm2j4Mz08PlmcQ/f1wO+MFJveOTsRinwUger50hpFr0Q5cUoAs9pyUtYhbuT4XYuQ+3evIGJhLywFd7YIN2wh942D7MKSJtyiYUeZsILIAVenXEgZYo98PIXQh4sv+Q9UuUlJkILM5lFYqbdcpn4fug+jbfVtKmS59QzC0IUipBKaK990dGrWAlff7tmcRpX646SxgYKaRYx0d4oFW47IuyFx3irgrxYxs5r3RSlBUP0fYrASyalabMK4VYtsIM05UNZh0of8zZhcrrsZQ4voBKssBRe0SHsifZDC7n4/sz9nmfKzlxHmbcZKK3HOfNmzTWL0wRtYSynyz+bg0zO3EsG8vGSLValdhR430WVB7G6rwCM11AE7VWIUHlXoV+RKEti5bSnuChDtk0G/ZQwdj5k50To2iLwNmLq3WwPFrsqvTnQb0sgeOMrzSJrEk+YfIby5hgvvhKgpKAgJZ4Q2oYLZbzKepsjRbk/Cum4qOgDFdLT4w7d30rNYTd9tCfDSFyB5frY4x5e0th9US4HUdF/V73Rq+gLwmVXLi9PaneTNOsZCYV5mxJYcZ1GUJZKWrrfkVbIXsiQB3gaSJi3uuH8BG8IZJU3vKwziqCzNG8khGXOqTPn5EvedIUVhsaG7LwX6ie90hsaEfSnFYueyVJ5K3Ppd3XO8/d+8AOWLF/B2jVrON7dfcaBcyDxWIzvffe7fOn//L94780387Wvf/28lvU+l7ekZ9x2zief4yniHUr/tOeKM3fYFYPttzMfp7vQuyHi58z+mVjB4hdxJ+9CuYrzK1dxBr15J9vR25KOeItZFOdgQMRp2tJb0V/xi2k3s9qQM33O1D7E+dWYmPkAes7tXLw1M/julOH5JCHObds5ldFbuNY55edtvN932Wqer0qInyMX72rpqHdf5cW5ZOQsZ4t3JDfny7oW5zYCCO141zzPH/mV+/nxlsf562/8LffedeesKwqeSX7ns5/hg3fczpVXXsnjTzzJk088Rk1qUpOa1KQmNalJTWrybso77nl+7oUX2bj5KvpP9vH4E1v42IfvPe+BcyDLli5l586dWJbkik2beG3r1loN1qQmNalJTWpSk5rU5N/H4Pkf/umfuOfuu9i4aRNPPP4YV1256edOs2PePB595Lts3LyZD9xxB498//u1WqxJTWpSk5rUpCY1qcm7Iu/ItI2T/f18+ctf5ic/fYb/8Zdf4/6PffRtTT+ZSPC1v/gLLlyzht/73d/l0MGD/N6DDxKLRs/ticHTE1c8EyMTBG8F24JPgKbzTABAKbjPV+Xpdr7e5Arwg0cRE8OhhNDT7s18d9/g7kQowMwX5d++KAcOKAG+kHjCQgVBCaEAljKqThp8T3WAlwI/p+kEngfpvA7+UCYgxLbNpCxZGWgkZdWEn+BGg8CLaaDZ5DcU9FMRhEAo+MMEScxA6VUH6wXfQRBAgFBz0AFLCcoBSGFxqtIIZ90EH0izvxSD4YeC56rzHgQfBoFTinIQWTjYQJ/vCRH8p6O1Szg4UwcSE6ijEPj6WF/X+3QoQFAHjBrdEUEIqGJYBKUiQphEEwxhgh00ricU/Iqng0/xjA7rID/fIJA0isk3qXqhco0AUTRqaLqqjHV5KBHCiAUIO0uG/g8aimeibN1QmamyfpbKuDx3VpUwggH6qRpxFRwfCoaSVP6uqCkV2jcNjKCjvRvLwUgz9EDXbSmAKGj04UC9cOCeCG3HBLBN5k078suBblBG3FUjxUpBcCatIKZjfFrv85QJ7DWBdb4J8vXN7yCYTKGD51TomPBxrlcOsAsCIIOAMcc1QXtBmYWDePyqz4yGxuwzPE83GbkatUloXq3SeiNCOiVnsVFhPGVwviCETgzqxavcXwoCkiU7VelHUlQGQ1LWVYOjE44yWK5ycGA5DMsL2UtlkHWyyoZWf0wgZRhRp4y+OF4lvg/KaLkwWjQcRBoOJrWssq5aVqUOVwScVn0LE0xO2T5VSMVvNYsNzpdtrg7DrLBPUvimzw1vd0N2IpR6KY+6nKSvg69VqK8Wxr6Wbfi0yUPIHpewlW6pb5COQvp+6VxpgjKFB6KgSkHuGjXn4xk99YXQAYMIhAruxQehEJ4JJFcGKhDEbRuzJ1wNAxAKpAmWDbC4wgvfvV/qE6Wn0bVBnSij416paXmh/iGwIwHi0Z+lnpxZ+tDwNcMB6K7pZ8y9m/+VANeopSvMWEiU0xFhs13S9QIaGFAs5cedgXAsBwWKGXZE22a/Kpi3sn+oHGOoim2ywmfsB31OCN8YZDfQWh+h4RIhhLEv3wFU3fYdO7jl5pvJ5PM89cQTXLlp4zs28t9w2WV89CMf4TOf/x2+9a1/5lO/+ZuIswRADA4O8pOROVx0KE9bxufVmyQNQwLLh7ir6Eh7dKY9OsdcGnI+C4c9Fo+4KCGIO5JTTTY9HVFWnSgw1GwRcxR1BUVcKdaecMgkJHuWxOgcc7n4SIHJeskbq+I4lmBe2mPpkMPBrii2hLnDHgvHPPYtiTG/f4zWnGDl3m3E0n1Mzl3Cwt3PM7/nKNF8hLj7LGKqjgg/QnKYRKYX2IFd6EZwCuYnENPH4D9+ENFWB2M2fP8L8OUvQGsrrFkM12yG+Z2wuAsWLtCYut37YHwcvvJ3YPvwJ/8CbRLGeo1CDgIO0jsJRMlHP4flWQj6md9zCEHUKOMhM/iKAL0IJoAEPguJMArsRXASGDGqnDEDGcsM2NrRbF4buNzgxZaQa7icQuQ+tt56N10Hx/B4L5IRNJczQb7+17CL21FsRHAUWAfsRtGFyB0DlsHwlK78qQlQSRSbsHgejcYZDQ3WGxjv+q8k0nsBQb7xVtz6Dn783nWsOngUmz4gy2TH+0n178dWE8zLNePFG+lbvpTXrliBpWwaVRtyKka2ZT4bVCOtmQixyWHy7at5Y00TuYRgVV+RywZctq+Mc/WOLF6snoc/EuPigx5dgwUydRbNGZ+7juRpGC0Sm54iEk0wr3cfvr0O4eeJFN8A+ujoV7ixxdjebnzRSXb++9m9IkljtplU5hD1w4coNK6kzlfEcxFOrr6AXHMd9SN9HF13M42nUiiWUIy/B+kl+Le7v0jH4E3E86/hik8iWGIitZcjVQ8qW0AcS8Ib2+DNIXhmO7zuwv4C9Pgw4EN+FWRbUXQiWIzG4C1Gowg7gBTQieAUEgfBKMk0CCIMrroXX83F8iwsbwpoNYavFY0sajKGMgbFMTLtf0o0uxWfBRTja5GeRPgekiweF2AxQr7u/RQaf4veFbfRfOoUyfRhBAfQqMJJNDZxDMUHEezCYi/QB4NZWD8X+obhuR0wPgY/3QZP7gaR1bi6hnr41yehsR4eewYe/E14bStcvgEOHoQNF0NjA3zxv8I1G+HhRymRAf7Ln8H1V8P/9ygs6NDtcd5cPdC6bDlcuPliHqUAACAASURBVBomxuHKKyBf1G23pUkPdtdfqrFil67TfN1lSzTuLhqFtWv0Q/LSxZBMQuc86Jin87p8qcbdLVms89XeBgs7oK8fco5+dookdBF3tUBHG8TqoLMFZBKyU+URATnzmTIPufUI1Wvad/Dge8oMZmxjAzys7BIs9sDIdmhogOeO6GP8OZC/ELb9DI72wcETUG/DX/0Y6hW8/AY88yaMDMHDFkzthKe64MRrsG0JnHoDhsbhxZOwuA73XxRyaD9sG4KRJpgcQKmPEc3uwFdziUz1IslQNzyMdH38+nbyLQuITE7gx5sRxQKWn6ZYfzF+NIqcttm/eR0q2UI03QJ+nDevvpbO4/2AjySJ4ASWewxIkpzYhuXvBwSx7CQwgOAQmjFdj+BVoAVYCF0ebN8Dr22H9RfDPz0C77saJjPw7Udh1XLYdwD+dYvG9v30Wdh7QNf5P3xL4xMjNnz9b7U++D786Vd1/aYn4f/9psaSdh+HHz4BdSk4ekyj/pIJGDgF6Sl9zKFuWNQGDSn48S648gKd3utHYe0CiEfhwEloikFdAobHTJtWSCTZ5v8E+QIyX0DRyuiqBdSN7kKR5MCGD9K9qo3lOw9jeXmK8fX4sSSDiy8mlq5HiQh+/VyEn0c4LhGvH+XVUZyzFJG3OHrZ5cw/OUUqp2jLKLpGXcYb4zRNSLDjZNuv4MSKW6gb60R6RSwUg10foLF/iJ3X30DjRDOxMY0o3X3NRp6+cT6LhxPEJzO8ePt7qZtS7F0W4+X1DXRNdRLPZOhZsw4Vb6aj7wSRyUnqs3m6ThwlMpmne92FpHI2VtbizRXzIWYzf8TjyPwIK045NOQVUineXJOga9DlmStSTMUFfa02c7M2L19Wx/qDRTxb8ObqOPPGPdoHRnAbu4jkBnDiy9hyXTu3vHacwa52GiZ6iaQHEI7HvEKKyGQaz15C03QRIWLEc7uRToaeiy4h6gmS6f0oFhr0Zx+wBLgM2AWsxeNmJC8gWAQsRnAM6DKozwK9F36a1MQEbVOCyZTN/ME07cPDSNelIx0hnZBcMFBkpCnKgiN7gQhO8xIGFjaTzCsmOmO09D1ibMS95OfdSN+Fl9N0cieQoev4AIIEyCFNdWIIiCE4YfrmoI8eBwYRdNM0tge72GPsdxtO7ApsZwDBNjTXuQD0msfiEXO+QLIfgWWOGSc53ITFGFIdxkonmFi4isa8T9tYkYbxCeoLEdrSEwy3NZJL2VxyNE3DtGTxcPHtm7aRzeX471/9Kre9//3c/2u/xivPP8+cOXPecdd5R0cHe3ZsZ82aNSxZtpznX3zxrOdM23r4FnEUhYj2HgYFYTuKSFFhu9ppYbkQzQdPVcYjbA6WvnkC8cFyFLZjMO1S77M87WEsWvo8y9PpFq3y85TtQEHqjiiSV0SmcoCj4e/kiRRyCE93UuUFRybNx0EzQh2wFdgTEItCxHhfEnHN8oxEND80EtHeh2hU/+952huVK8D2Ps1QnThFeaENYRQWNOxcoeIJNOFQIRgLPcUVqcScTYaeYIsEIPOyZzG8MEeQRoBQi5Q+SkTwZJxsJKF/E4USZs7Ct4OFGazQdkLezhAUvuTxtMx9FUKeH+P9t6Nl774VQVk26Ugs9ITu4VtRhNLYn8i0B0rgRCJMRyJ4EpARfCIoYZGY9A3qSOPiQNe/7UA8rYwuaQRSwdZPwrYDlnkbER/1S5hEyw28lcEbFg2jt8kYL7T29Cgrqp+QrQgCH8k0SkjsnA/CwrMsPKkh8q5t63K1IviRGErYTEUSeFHt6fdkIlTmETMYMtpbHIf+NKRHjTEK4+iC+rDNJ2K+A4B9UNdeSR+kQUy5VlTXg4yE0gk8BmqGp9iz4+VrChVaQEWWXq55IoZvJynacTOIy1K5kIIKed89Y3jz+v+I1G1kPA0jGTgxCsUhmMoar60Do6Zc+schGoFsXg9g8gXd3jxPs3CVD+m0xqp5Hhw+rs/LmTdDjmMW/fAgHqvEypUW/oiEFqewy21aStPGZXm7DP0fjeg0bFsfF/6ORstvEaCMq4xIvVCMbRbAiFhVnsEAP1cIeX4yIQ9gsKBIaIEMfONlNG/CPC/0Rimos0mYmNYDx1weCmMaATgwAfvGYTQDOQUTWW1kp/L62mM5SGd1vXo+yssbD1ympC8qeLuiFJKCblsq8JTqxV2UlAjlm/JQ+CZORwmbomXhyQgqEkVhUbCjoXdXAZYxY+5lnPJCDDmzLwdm0YyyjbJ13Q+Pa12SUj+wWTbkczA0ous8Mw1Do7rc0pOa4+z70D+kWc2OA8f79YNTsQjHTpoFdArQO6j/z+VgbFIfky9qrF/wxsJgU/H88gC86On6D16pBgumoLRn3ApQZLLk3fUj8VL7VljGBmo77FoRCpaFLOqX34FddG1jI8IecVHydRqPu41j28iij/QU0SJEsnoRK2XZICS+FcOJxFEyWrILrh1Fui7FSBRfRk1fKnAsm/GIjW8WAcrZekEjx4KpiMSzgt8RfMvGniqCp7AKLpF0EeEpHMtCCf3Bh6JlYfkaaxYp6jfLSghytkAqyNoa++YCrpRMRywijn4zGIwdNNHOLvWNWUsSG/dwrQANp21TNBugUyNYWdfgd/UbW0+aPBFGyqoKu6h1OlrSXlVaXMQr9dGuFUP6PpGCX8bh5fS1opMKXwpieQzKznjGpVXCsJa8u2ayg5IRHCtW9lBzKjQGcIzdkMahIUJvELLmMwIMmHY0HnqrY5XfeITXBigtiOWYcUkZfarLUQBZBDnT/+o3A9L1sF2FXfDxpcQXEE372C5ECm/TtI1sNss1113H2Pg4Lzz3HCtWrHhX554kEgn+8Zvf5KFvf5t7776HL37p9/mdz33u7Ul8ltkFs/q2w69nz0PUrHQVUfkiTMz2ipSqgWfoQHUeN1dKbhb25mkL4nwLjtO8vuW0Uy5E1T6hZj9WnPWGzxU7J87xTsRpkxFq5rXVmRIVs6c8QyfU2cpZnOWeReWYc7a8qtC0GaXKC5SdVZneKr7vXOpMnb8eqjPvFOecL8msOCMhZqn4M81YCC9JKkO2Ijwd6p0ou3MulLfZSJ7t97nCu6pxfaI8VQhRHtyLM9RRRactZrd9b9HSndlKVnPqZ1OO09jHCm531eqvM3Rntu2haRtBOYU54KI6DfE21/359hXqzLvUaWy9OEsKSp2z2s9aG+di9ip0SJ018dMdKtTsMyXPeDlmuX4wXektaW71zc+yYuoMPT2HPFdN2SlltZKTy7lxJqunX6hzrNWztTsqHH9KiDObLzFz5PWW5N8ef5yuJUtYt24dB/bufdcHzoFEo1E+8eu/ztf/6q/5xje+wWc+/3nGJyaoSU1qUpOa1KQmNalJTd4u+bk8zx/52MfY8vjjbNmyhU2bNmEFwQu/QPnwPXdx5aYruOvee7nu2mvZvn37L0W+alKTmtSkJjWpSU1q8r++vCXP82tbt7Lu0ks5NTBAd3c3V1111S/VAHXhggW8/uqrXHXttbTMmcNPfvrTiv2FQqFW8zWpSU1qUpOa1KQmNXnnB89f+oM/4EMf/CB33X03P/rRj2htbf2lvbm//vrX+cGjj3LPPffwHz/3+dL2ou/qOZ+huUbCV7PORQpEhUhKYSCK2VRKQyPmMFCy8nwmZYhUSpSncmlsHSVskyghXSihUipQXjPm8QRXLkLW1cEdrqeDlJzpMqpmtvmXpTlLs8wDmjE5yy9/e6rqnGrsWHCeX3VcGIMVTOgPY8gqxcx0LpWNL2afK6nOC5MVxvYwy3ZFNXIPMHiicK1RwvCVCFYY7KACPDPPS5T1RuCbwE8T06bKiENDwCrpg6jCQlUC2BQz0WBe5TwtZYJbS/WiKm5LhfRZ+DPrOtDRSrzcbEhCr0o/gmCNYtW3Ezo+CDCrRqBVXaeiCEL6d5bJiLocqucrB2VQVSZnWmI7hInSuLcgX6qc/wAFF0bE5b3QcZQDsBQwniujLgMMmGswUWG0XOlm1EwlqN6nFKdpPFVLKFe1h2rUXkXbd8rGb9bmVV1ffqiOw+3bBFyWsJMFZmKzquu5QEXgaSkAtHrSvvkjwvohZtGZantWvhkRQkiW5maaKhdKo7Y08lEjzYJ0S3M5PYPiDOZKltqnP4vNc0L58maxO6EyCe5NqUqEX4A1FMyMhfBD9+uFru/5M48J61n4GqrSTlRsD+Im1Gz1MFszErPUc9l+qgqbKiqDe5SxdadLO1CvQJX9SoUXeVXCvYpQ25/NoqICGxzMd6WEC1Wi3M8GdllUqK+qtNVKhXBxM+/ap4zFLaVvxgsVBRrEnwQovxJmtDJqw4Bvy/kUs9jMs05xFqcZBqpKQ6JCsUdGf4SnNG7PV7NF2YT68Kr0hW/2Fap0X8xi56ttjZqlL/dDMTteVU48KvGHqiptMWN7yfSVuidVGi+GTz9nVN3wyAgfve8+XnnxRf7bn/85D3zqU8Tj8V/6p4OlS5Zw86238vW//CpbnvoJh48e4+qe9ahIlOEmm8YpRd6WHFkYoynrk05a5GISqWDX8hhrerIUohapfBFfWOxbGuPSniJCwZGFUdonPDoGRhluS7FrRYJLDueYN+2zsr9AaiLNG2sbac/6JHKKbLPiogNZMklJ1HaYPwiHF0dYc2g/qZF+9q1fybzuo9icxLYWkUpvRTKC5Z9C0IeQYwj1JhrxpuCW5XBqGD7zYbjiQmhOwafuhw/eCh++CVYtg0QCduyBoWF4c6fGHe3YBSf7Yfd+OHBUR9D/9FlYOh+O9sJIG/jjwAJA8bNrv8GC488AV3Bo3Y3M6X8JwU5gFdnmDxLJb0OxBME+NJLsFLAK6AHiQATBKNCGxox1AXPwuQhBPa6oR5YiZ6fQxIN+BH3YxSGixW46uw8RdfYZRvFxdNRtGquYRqhuII9gBB1NO4yOZnfM9W0gae5nNWAhOEw5IrcB2AC4WFOLsf1TKBI4sRVIJ8fC9DGKiVXEp3wE0yinhcy8xUQnHU6uWETUtREKOtMePXNspuot5o7ksIp5UrKZ5HSWAxd1cWJBE4c646STEkvAipMOXRmP+SfSHF1az975Saxml2VHFadaI3QM5/ElpCYzWPkMcZFEZApEnS3AwRL2a6rxfyM5/Zq+f9XC4dXrWNF9mLrRUcY7lhNL57EcHxGJMTa3hZaRMVKTQ9i5HFves461ewbwI1H8RBuT7UtoK8ZoOXGc3guvJFGox7fbsIuDCIYokxMSwDqDE9oIpBnr+C3imVMU6z+CXeymd8Vv0DDWTb75o0TyJ8knbkK6UZzoOoQXoxhbh+XZFKOXILwI0+3rsbMuEx1Lieey+GqcWOFnuFyDZCuwGTgAxNDoMx0lHsk1IdVOQBFxprG8YQQOgqMIGikmLkZZdcTGjxCx68nXzcMqTmN5rxrDOpcwCUaX64XQkIN4HSxqheXzYc8AXLEUNqzQSJ1lnbB5A7S0QEMdLF8GjQloa4HWZo2Gy2SgpRkWzoerLoMF83RHfsEqqK+HtauhY65Gzy1bor875kHeRIG3tcDoGHQtgIEhjRED3a4b6qF/QCPnuo/D3HZwipp+0NgAL76q0/zZ65BKwvAo7NwN8Tj87UPQMQe2PKfxe4On4KFBjXnzbwXvlG6Hk9MwWoBJC8byMOWBusl0fPOAtYBPvv7/wC7uRbEewRSKZUy13k8sZwPLgSYKqXuwnDyCIq5Yj8UI2CdgXosmQXA/uaY7Efl2JG/q7s19D+LkdpjuhMIwHClCYRT6p8CNwNRRBlZ+jPqRZ8zDfS9MFsFfA/k+1Hgc6Wv74HO9sVtJBHkyc28n1zCXgcVLSeXqeOmWlbRkkuxbkSTup3CaGinIGOOdi9h2+Vy6Tkwy1DWf4eYETRmf8fZ6kn6c9sF9ZNsWUJ9+Do/lKFwkI3jWBqQaxLc6GVx8P/UTj+GK64EWBAdwxA1YpNEIxiNwxWI4NQZbc3DlAk1lGUvDisVw4qQmZnTMhV37YE6bruOdezWOsLEBtm6HSy/S+MS9B2HNKrAFTKT1MZNpPU5auAAOHdX1P7cd3twNSxZqvdhxENYsB8/VqMDVS8HLwcg4LOqEsTGYnobGFBwYgIiAvGMIJ7axDRFUfg0CDzfWhvQKRAoeFFP4ogORbKezdx/FSAu2Z2MV0gwvWUTd5CjRomS6fS6p/h3gC5zGJqK5E7ixBfiJFpTrcWjVXESLR+uBboqNLby5PkVy0qfr8E6sQpa/+vRFbHzjJPUj3Ywv3UByfIjYVAbLizG4cD4N05JkupdCwzKOL5vD+p4cbcf7EQUfv6GNofYozZMevi1YfWQc5UpeubqF+oxicEEDc06ewqtrAWHRe0EXkw0J5g6M49XVk7RixAuKupzPaJNF65RHx5hP1FEsGM8x0hjFAuaPucQ86Bz1WDLm0DQwQL4uxVijTaqoaBvJILOTKNdm18bVXNhfpG58imQuTSQziqXSDC2+jPrBHixvCCVTWMUx/HgTVn6Uqc5NqEiKuvQ0semXgAYEPWjsmzD2rh9oRnAKQTdQb9pGD4XUepRVh+UeITLdSqZtAdF8nu1rG0gVJE4yyeFlDZxqjfDqJUku31MgkxLM7TsIuKQ7VpNNRWjp66Ox73Ui3otGNxZy4KK7mN/rkkuuJZ4ZMP19PagD5iFzfsj5shBYgUbZStP/CNOvrwayeJGbcBpWY00LQ88ZMH17Cp87gK4Sr1uftwSNw7sYRdKMPYr4rKV7xRImU5KYC/2dCU7Mj1NIRhlujTCdsIi5gqmUpWkc5zoIffb5F3jmqafYsmUL99x55/9S7vVL163j8SeeYM+uHUxMjNFRjDIdk6RjgroxyEcEIykLzxbkI1C0BZ6EgXoL4WmcWLSoPXLTcUnLqIdrCVxLe21iU3lsH/qaLFI5aB/2aB51Ea7DRMKiruATKyocfOI5l1jBx/I9bE8xnpQ09Y4j/WnSdRGzkEaO+HQRQQbIIFWf6ayOUcarudDaBFFLM1oXzoX2Jt3xrloBF63WnbOUMDGpcU+7D2gs0eApGBzSfOfJab0PX6PqIgrcACWWAGxOtKw2v1NMppIIM9BVxHATHUbhG82AJlI6FsbNoDnAMcXMILYOqEdZTUAdfjxFGWlVDA2ER5H+MSx/O6nsViBttjsELEfpbwU8BH2mwR01z7sBRq9AGX/XYIxFgCgLvGURM6iPYLlDRmsS2tPgFmg/Nkwx3owiBVhY+RzFeCM+CXJxvdiM7ShaRn1yUclYnYVnRZFFj/pJF8v1GG1sZqQhimsJJuMW40lJMqvo7HawPI+ppI2Vkww3aYwRCizPx5UK2/UQvks86+HbCTMo7Ck9JBQTrQjVX/KiTcWgqXcC258in2gBEkg3i51zyMfjxKdzRCdyKGwG6izjpYmipEUx2UzzmI8s5piqX4AfjePFmk29FdB8Wv3U79Nm6qsFcMnFu4A4bnwR4JOp70Jg4yYXoIjgJTvwqceLtePTgB9twaMBP96CIoUbb8YXCe1lEQJX2kABRbPRjYZQqw68CRLLGwB0e5H+QaQ6iSSHYBpB3uTfwvKGiU8XKSbaUXbc6JCgjNADwYDRi1Ql8q1B1z3JOMxp1YOHWATq63Qba23W/3e0/f/svXmcXUd94PutOsvde++WWrtkLZZly5ZteRW2MbZsg7FJgIAhECBMnIWEBALz4rw35A3bZN5jsjMTQliSxwSGQCCADWGxWYyNkW3Zsqxdrb3327e7736Wmj+qzr2nr27LMhgH3uf+Pp/WuTpLnVp+v1/VqeVbGi2WzTY3vUilDHt5ow6vp0dfd11Yv1Ynp69HYybTKUgldO+0ozFZOI7ZSCXa4MLSDZ2oRy1s6ZGPEGPTeY0mm87rxniprFm+xSL886MwPw+PHNUYtMIszPrgKxRLQRnMoO+BVzPm4kMQEDJofEPW2E2SemYdkEDRrRtPoot6YrXRjQGgiyCxAkRW56Mw/kC5pnc0JGAZXnItgd1rfIhCqT4YP6bfdXAayobJXpnUH9t+mXKux1SWU6A88Mu6Mj4+iwijTR5CFFnjK/KAh5fI4Se7mc/2EToZTvX34Dsu+bRNLZUjcG1qTopKpo8zfSkIFaV0l96sQkpK6TShm6EnP0otmUYwgcIlJAUoQplDIyrTlLIrgUlC0YOiD6iiRMo0WDKAwdN5PtQCjTpMJmFssrmJzplJrT+lui7LVFKj/Go1rT9jk7rcB/phKq91YaBf4+wsqTnk5arGlkqpw02noVTVZpBOQslMaUwldF5bFuRSMDWv9SrjwrTh5lvoe2r+Wb3+VjCn024ngBB3voqSOUInR6IW0n1mEs9JEVpZpJqnmrCxK2WQEs9NYQXjCOUROgajZkgSynGpOjCflVjePAqYzlmECmQ4jwyL/GBZArdaQaoqtVQ3IUkS/rTpiaeBtgwSGXylWHrKQ1arKBJk5j3KSUm6okhVFI7ng7CZzFjUHMlMTrOskTbKdilmM3iWgDAgTCTomwlwfN2LKkNwAkWqqnA96Bv3qLqCVDUkVwzpKobIUDEwEWBXKyA0yjYEkBbSq4JIM92VZHDUAwGJQsn0fnvUUhlkMIekDCpEqrLp0bcJkn1YBrWr/ZoXGwksmsYjQMl0JCnzW5d/aLkG9ReQqE/huy4iCJjLWFQTDvVEgnwuyVxKcqzXxm4MFgkkHoHroBBYtSqp2gnjU/UmKfOpXtxxl1pmtalXMD446iVOxXxy0rQtcuZ3HLeX1fdZXSjLNb6nKzZy6BBaQyj6zbOYDpck4KEa+wVoX6ekQ9UR1B1B3ZbMZhNMZy3m0y7lhKTqCuquhW/pzWHOu/F83TVXc+X27dxyyy3sevzxX6jG84GDB3nnH7wL3/e59pprsKQ8e0RDtR/QaHtOtqHHRd39Ij583Eo5WcisUc1BofjVNi9fBO8U53Kq2HhC6/BvhC2yWrBGZ+3u1X6Mpzk01Qb1pc4XF/N8JD5W3Mpwbg3Pbol7645hYvGhqLaFL84xrHz20NpzgZpUbIhNtAtGxkaczju3ZJupCaLlV+vYbrspO22gQKplmPt5YYXUIvMH4rqiWibm/DTSboqOResObsIMQavGEKf66WeyqZbpJUrxnGPZqg2Cr93ObeqFyBPOxkvFz2UNT9eKYc0QbWIvFh2QXbyo2w2RqvPS6rPGRhe15XPOGeD5WFPTc8SmZC2wR9UGQxn3FEbHzjHPoHldnXuOTesUuwU7AJ6rbmiZZiHEeef74tkknmNKxvMU1VKJnl0zxpyQPEd9tFBTRYtvstv43nOlRbUi/zjHTMbnyqXFTJ+FXZUqNqVPycUCFCxga581NUM9xxTFxeric/1ezHc3/ZOITT1snd763DkUzbX5WeEz1Tmmn5yHH2tTT4s2QQieR+N5xfLlfO2rX+Ud73gHr3jFK3jPH/3RL0TD+T9/8IO8/OUvx/M8HnjgAXZcdx0d6UhHOtKRjnSkIx3pyE8iz2vBYDab5fd/7/f4zD/9E/d/5Su8bOdO9u3f/3OZsH379/PKV72Kf/jUp7j3t36Lf/nnz7P5wgs7Jd6RjnSkIx3pSEc60pEXp/EcyS0vfSkPPPAA6zds4Pbbb+fP/vIvf64S9Sfvfz+33X473V09fO3++3nvu9/dKemOdKQjHelIRzrSkY781PITb5KyauVK/vZv/oaPXnYZf/qhD/H0U0/xoQ9+kOGlS//dErP/wEH+w72/wfjoKB/+8Id54z33tL9RAGEcMRSHmTSxOWrBltCqQaeKSEphNN84Rq4KzfwkEZt+HMZpPCyc1qjMSxSxZxr/xMRCL/JwBFhzIFJQrunV9V4ArtLHVlEK6iaCgWIB7Sw015TSi5UwC1PiaSZEqZBA1pAhhKFCxXBNOv+UATU159kp/Ja5aFE6g1iuxxOqGrCns+c9NnFLzXCjaxWaODRYgLkiRFFC0zUsQllFLiDVhea987HZc83yWDgXzjxhqwaKTojmnHOlJIrQkMhiqLoo3mGkQs1UhmrhNuT1qIhilCJhdOTsqXSmbOK8xSjuqlkykWIppbcd1dPhmy9QsXSLMPacwbarFmxhKCuIMEAv4FAoUUQoRShrC3VGKI1uRDXyZYFRtGhZ07jMO1WTCaWkiqlAHPwUxMrMZwH6KqZTSjTXBCgRiw8C7NC8S4IVgAogNIsa7EC/JlqjYsXWEtTDBXFeYNxh2ETPNVQtbHFAsXPxawvwYK1rGeLKxNkou8ZaB7OIMIyh9OLYPQXMVo1jir9bc7QUi+lb6/91XiJmzboIL1YWtJnj3W4+c8u1hm7GcYHt4hCLYeRgz+oLivBu8fS1+MZIfyqimeXRZaEaFqJiZaAimxeq7Zz3+PMxy2nodvO5WH5Ejkmvc4qVewxhWDe/azHdC2JoxJrXTJvnN8u6oWexPPZj+lmLhVHymyZV9Jrni74pWnOPUiACmA0hK8GWWi9Dk1vK5J1UxpcqlA1KKpQKF9QAoBr+sakeqokII6qPVcPD+7G0BLESEkBBqYbvFKFCteiOApSlmvlh7kOoBfWyToMO2YtXGbGVStqvRq5NNdUgUAvdcmDi3+DWnq3WjTUZoclnoysiekRpPyisWJvDwiy4XYhcUzX0+vzG2XBBXbMAc4skwik2LWKh7UYY1VYzjM4p31Re5kkVxts2IYKo3RAueL0yyFrRqM9sgxcsmusloI7nlrC9AlLptEb1Tmu5ahvztR83dYISIaHQRA0ZZo1/a84XD6UydZpuwzRCDBZWKUKh89W0ms8bVbeYbL/iCm699Vb++Ytf5O/+9m9JZ3NcuvWSF73hfN9/+k/84bvfzVVXX83f/o//wY033ND2vrGxMQo/zFF0BLWEYHA25KKTHlcdKXFqwOXH65NsG6mz4WQFaUuW5uuMDUiGTp3myAX9bDjuMbbcYd3Th9h0fISUbzGyYSl1S3DN4RpHVjmsGhlDqhCr7vN/H7sarQAAIABJREFU3b2Ut37rJKGdoi8QDD/zNEOFeXIVm+4Th1g1ehqndhKLk2S9NJnZJ6inapS7Lic992kEBeAovHQIsTSEN98Ff3wvvOXlcM8vwx/9LqxZrXF0d7wM+vs0ZaNU1quqJ6fgwgvgff8v3Psmjas7Mwa+D3/+SfiN18E/fgl+6w2Qn4c5DxIVmO8GLgW62HTiczy17fdYdvoRlp34AsqykWpar8ytLkNZS7DDzwF59ErWkGrmPTje54EygvXGQPvwnJuwwofwEzdjeU8hEIR+HQsPOIEgZaz+FLBZp50LgSlgI15iMzJ4BME2YJyAX0PyQxSvRbCXgHcheQzF2xBM4zu/iwiXAkVGLrqPvskvotFQTwG3AU/hpf4Gy/8YsAXJt4EKNecVJMu78LMX4FZHOLNsA32TB9j98pey/ODTHFm/GdseoG/eJ1mc5xs3DnDpY4dYUXJZM3KG3MzXIMySmH+CijPA529byaXH6qzK+9iBwg2glJP0zQXIUDGc97jydMAlB2p0T5XZty6DEBZrDhzDndtPZWAttXQWZ+YAbvh1NLpnOYhhnGIWizyQwRebWT75QxxvHNufIzVXpd67HoSF9D2OXzhE36mAMJtFelXC7mFWnp5F4HFg6zrmuyyWHR1hdMNK6sJlcHwGp3AEpfqweJojV/0BqcqrCdUZXP8ohU2vJjX9JCc3XsvA3PdxSgq3vJfZZRsZPP4QgbOG9My3qeZWkS48jpfpw6mMYKkJ/PRqnOoIlZ5VJEv7Obm0jyXTj1LtGmbg+L/heg5SPYylKsBJYBLFzca9ZYCLgOPAkNGVOno19WkEOcCjnryCVPGzOLU9SL6BW/oOufwD2P2HoDwDr7oehl0Y7oXX3QFr+6GnGy7OwK3Xwt4SpGrw/T1w3XrYdQhWDsJ1WzX94B+/DJtWw+e+ApdeBO/+M9j1lLbHngycPA2rlsOXvgpbLtSYr7FxWLEcPv0/4aor4IF/g3UGU/fdh2HrxfBXfw933QYf+Shcsx327IXRMdhwAfyffwpvewP81SfghmsMdnIvrF0Nf/Exjar8Pz6s7f0b39aovLoHX7gfXnEL/NGn4QNvho99Bq7fojFo334KktMwfwbBQTRucsbY84ypiKpA0hBJLgVmqLmvZXbo9XSPf4+QbVg8TMCtWBwkWcwjmDVlUsGuTAH9CKYI0tuwvB+DfVQTN2a7EAyTKD2KFdYQPANsQ7ILjdVahkZQ1WkSdKYBl97RvYbqUzPxTAIlSIPwAgQzgGJu9ZUkZ3ejkYcTOLNLmNjQxfBpyVd3DnHb9w4z39vDUHGG7GxI74mj5PJH6Joe5cJjp0CkyRVmSfgOyw7vpqvmkhn9JhaPkZkZRbIXicRmL7AHKzgM7EcGB+iZeAjBUWy1B8le4DS2Gjd+6AhccYEmJn35R/C2HdCbg+8/A2+6C6YLMDUNd74Mdu2GzWth21Z415/B775BYwy/dD+8/Y26rL/7MPza68F1YM+zcMctmrhxehRuvxmOHof+bq17P34CLr8ILr8MPv9V+NU7Nfrw0SfgVTfpsE+Pwl03aqJMWIfbr9Hkjo3L4WXb9EfmhmXQnwHLhkSIVXoUix/h1B/GZj82D2KHx3D8Z0jPPoLFYdxqN279SQQOPRP7SJV/gF2vkC4cw+IZKgM3kZl4FMlppO+TKD6GW9vH2tPPsOLIg9jeYVIzY2w5epCVIz/GCb6Nop9X7z1M95n7kYxjV5fj1o8S0gU4zA2tZMlYiacvv5ieuYDHLu9l2w+f4alrNzFYkEwO93HRMycYPvpV1p18EmfOJ1E5ymVHdzF0copVp57ArY8gy1VkrcTgxBGkuxKnWiUxO0W2MEnfVJEv3racrnKIEJCuhOy6Ls3gFDy0Nc2pQZsrDteQwPLRAqeHM/SfGmN6+QCrJmqk5ys8snmIPduXc8FomtOXdHHBo89yYvMFuJbD4a0XsPT4HrL5kzxx1c0MTmUJ3AqB00tm7mt4zgb+5e2bufzBIyQKTyLZRa3rHuzaA6aBuhNBBhgn5B4EPcAkITtNp8g0vn079USCZPUwnnM1menjjGy9gFUTHseGHDJVxd51CfauT7Jm1OP0sM3UsM2Wp44zN7iRgxct4eHNaS57No9SA9jhIwT2W5Fhmu9cew3rZhxSxRpBuIZ67grc6iSwgmrmD3C8eXzeAGym0vVGQvtaPvQHb2H96DVk84PAteRX/gapuTkEPjXnZbilEdzge8AoU2s/RmoujVBLUWotc4Pbya+6mzPLb2Vw7CCeuBLLoOwEHmObbsMpreTIxevJ1BNIFIWcxad25iikbcopTWCbyUg+vrOb0eUW3aXwJ5u20SoXb9nCv91/Pzfv3Ml/fM8f8vvvfg/T+fyL0mg+cPAgl11+OV/8X/+Lv/qbv+bvP/YxLli37pzPhKK5iYQQ4HiKRFmj6OqWRIYK21cavwKEQn+ZBBKcmiKQAsvzSeRrGidmWZo8WA6pWwIZhI1ejjFbYtVDhAInFKA8nFIdy1MI38OdqxkGoYdd83TvTQMiXjeVQU33RthAIqnRV+mUPmaz2kk2eo9bvsPCUKOupmbAcaFc0Q3nIIDxKX1tdh6SCf15a0lwm/gWsLEqZ6jZCcDDYgJk9B3uIZQPlmO+FKNeJwFuMtYjHPUKSULh6nuEbfA58U/woOV3xA2NMDYSZQmTX5bpCEiYr13HHKPPbdfcnzDXBJ6dNHkqTHgaBRY6STQfWppjHaSNbDAlQwOyD/EcHf9QQmjZWIFCBiEVRyD8AKemcOtBo0dcUEaFIVWjV05dYZlk+pZobEbg1ENSpRC3DCLUuhhKgfQ0r1oJGevBCoiTSEQjTZpK4lTmTd4GWJ6HsiI0WATxlyhpgRQ4frO307ckgRRG121DHVAI/EZ+e06S0E2hhKXzwXIBD8+yQMyavJ8nlBaSAkpYCOZQttCMbAEad+fHes8FiIBQmg01lI+kjAii7qiyCbeMaKAQpdGLqGvAifVCe7GevqhM58wH2Lj+MBMlbaOurUdzbKntx7G1PTlC24YwOJR8yWDCatrWHFsfD89oe8oX9P/zM3C6ADOzZrMiYyvz883ev4iGMDevj+WKHvEJQz2SJIRGSFqWRo5JqVFzvunRnc7ra/NF3Vjx/GZPY2FOXxvL64/nYtmEHejftg2FeY1By89qe/dDjSuTUeNzjubmJpHOhbFe6kqj+0VJl8BNIaihcNFoQW23mp8axPqKa83eYSGa4anI34TAvNGNCCFYaulXivfg+ybcmZZ7hI67JUwvlhnpkcTsBCR1QqGw/ZCSY5Eoa7t2fN1TRs1HUsOq10jma3rwwfOxvBBZ97B8ZbCdNSyKpgctatzXTT7WgCKSCZOf0+ZjIGLMV3V+Jgz9xPe1ToJGwLmu9tXKoAs9TyPjLBtmCvq6ZWncaDJpcHee9ucIvWlWVD8ope/3fP2M4+jfrqP1olrTNmBb5jkTtpQayyil/nMdfd51jK1Y2h6caGQUNApt1tjcnPkAK5o0zwAVZOAjqOsSUWUERQQ1pKoAPkpYhturdVFfL+OW5rGqJgzKpPJFnPq88RMBmVMlhPHj0vdjfdJmRCVU1CzX4DAFIvCpWxYIC4XAqfjY4TyJ8jwoD0mdZL6IVa3ilosmLj4CH7tWRQZ6VE+EIZbnYVV9KsY1hUL3udZtveGXZwndRgjBChUyUGZzLX10vBDphdRsm5JjGz8LIvAJpCSUksDW9mWHZWq2o3VBKHNvEZCUXYn0o70OQpTl0NwUxDZ/wtSNxpaJELUCISVKGuKJJZFeHV8KHF8RStNPLAV1W2AFiootqEuzoZll40tBNaJ7Wdo3a/SdpTelkRIRCkKRQMmE8Qsuga0xdYok4KKsJMpNciaRwpf6nMAlcJON+g5pIVTd2JJP4KbNuxwTHwffTVJ3U7ruU3Zs0oUisF2UlcCXFtI03UIJ864uK88ShAICKZhxBWVHEAhemMZzJB/5L/+FL33py/zgB9/nrrvu5vEnd/9MG87vfu97uenmm7n77rv5t29+k1e98pWdiTgd6UhHOtKRjnSkIx35mYl8oQO8+qrt7PrRo6xbv56dt97Kp//xH6lWqy/oOw4fPszaCy7g6/ffz+c++zn+7/e9j1UrV3ZKsyMd6UhHOtKRjnSkI79YjedI/vFTn+Qz//MzfOADH+BXXvc68i/ANI56vc67/vA9XHPddfzmvffy0IMPcsOO6zul2JGOdKQjHelIRzrSkV/sxjPA7Tt3cujAASq1GqvXrOHxxx8nXLDq/Pxl3/79bNi4iW984+s8/P0f8B/f+14GBwc7JdiRjnSkIx3pSEc60pH/fzSeI/nm17/OJz7xCW674w5++7d/h7lo8cx5SLFY5Lfe8Q6u37GD9773PfzokUfYtGnjTxchYZa/CIEvzOIpIfGEpCqbW2tHCwtDIVEIAqEn7PuNLVNDiK5JiUISCGme1/eEUuhFR0KYxVrRNWkWgTUXfun/WygkSjNo0AtPipAvwsEpqNWaC0hUDP1VrcbWy4gmci46+maBVq2u0TZxxFH0QVP1QHkGXRTfslfhi2jbY5rpa2wTHcfb6WuBFZ2LL3ATKLPtql7Ipf/04k3J4tuG0nifkqIFPRTflju+CXa44Dkdbyt2r4qdt2Pnw6aCIAilvjfaQj0QerFaICRSU3YQITp/Gsiilu2EpV5goIQgiBZbtO5SKnSeKLNAzRdS39PY7l0adFGUH9VYeoXREw8lo0VzARDqxZVKIEK9sDGUsqHfAIGMFhNGNqHjEDbIRwbRaNIUSgslrEZ5hSY/QqO/zXIy5WvKWUmr+f/oPnNOSX0uMItVlLRRNOPVXAgYNOLRLH8vVtayRV/EIvfX9MK90Cz+m61ArWIW7PnaRmqxxayR3cpYgTSQjt5Ce5yfBq8MdbMot+7pxVX1ejMekb1F5+qe8SdKPycFzBZ1/tTMPZ6vF3EBHDWLCGdnzdpX855QwVxRh1WZM4sBPWPvoV6YCDBW1M8Xy/reWh0K9WYcFuRXnB9Wj5039iYtQmkW7izYGp2WY2RDlvktaaDtwpDGQmPCWJkRi4PU5aYzrPksESLRo4krNHlWCRb6AiEX+KiG75EWvqThf8PI3hQN/6yMniohtQ0Q2VHcz3kxXwcNZN85twhulYoO0g+gXtJlUq9rvQC9MNDg4CiMmSwLYd7cG4Z64R/mfNlMmfQDqBh9D2ILTCtVo78hjM829bhoFukGga5zVBQnTyPJfF+HHwRQifTYh6qv65C26Mg2/hxiPkzbawPl2agj4nWNOEvH1IKtvOP3qWZdJSUCRShtkNKELfBkdJ/O01CIWP2m0XFhm/dq2JnVUq9oHy8A39T3ymzNrgDpx7Z/N+0P7VO1DklfL6RTmOXm0UJuGfla3W6IfLNCEDTqMNHwl4IAT4KyrFjenl1XLqwfJfEN6kNpoSzZqHsEAaG0CJG6zSTAF1CT4Em9LNeTsplnAqomLdq+lYlfoNtPCORstCA8aiMoU09EZRo0ytCTll5YSdi0zwVbbMcws8JqpFEQ6DxDmroqtt19ZJsGNxgKkKFC+prC6AuoS1NXmzL0RbP+/qlRdecrWy66iN/5nd/hgx/8IB/60Ie4++676e/vX/T+IAh4/IknuOrqq5manmb3449z4w034LruTxWPsbEx6t/J4NYVwzMB23Z/i66TzyJrISvzz3LZVDdDE2WSxSLZwKX35NP05UeQIsfSiQNkp2YZmjlJsniI0tBWcmOH8JYL1hycxk7PsvZQnkz+BO7sEQgcfnlilK4z+0mIIdKVPNnJw/iZ5aSn9pIIHwD6sTiG4GHS84eRah+OPU1mcATuXg2/fBXc83J4y2vhPW+Ha7fD8mUwOADptK4Ek0m4dIt2aF05SCXhWw/Bpg3wqf8PrtgG12zTZI5rt8PcLFx+KWxYpdF2l2+B0XF4yZXg12BlHzx7DNQYcAqBw7L5EazqcSCPDAroVdMFpFJIfxrhnDJaVgbmccsjwGlgBYG8Gal2A6M4wQFgBMsvAs8Cp7A4heCkWTUfhRHxm6dMBVkCJrHrFQTHgcuA/VikgcNmVX0eizJwilrmDmxvN/hLsXgQQR+9Y7uMq9kNZNGYszmc2mk0Cm0WWArMMbn6BnKFXVgVUCJJxq+TrDxDX6FMoniIXKWHPVuWkbYCwlKFi0cPkZwuc+aCYYaOfwvBUSSzCCRC9TO2dgWbR6ZYM3KK42v62XTao5iSlNKSTFWQnThFauYIqdkJwswgq8ZO0ltOkMyP4YSHsMsl5vuW0pPfi1SPEcjfRKo9CE4gGKTUdTsEa3GD7xOylLq1Falg784rGdp3hsLK1Uwv62fN0cNkpg8iAokMqyyd2UNqbh+ELnZqBcuPPU165klsOcBMbw8JJUnNjeGqB4ET9I8eIDm7DzuYQPAMmckTwAjd1VES+ePAGeA06dmTWMEItnccOIRbGQEO49RHkGo/cAKn8iyCZ0kVjyLUAXpLz2L5++ia3oNgH5JIh6KPyM0IjkI6D1t64QIBL70MVlrQXYAz41pn+hRUTsJqhdN7BmYOwEuWwIQFL90IL7savApcd6m2lTf/Mlx7BQz0w4a1sHwJXHkpzM6Bl4fJGZiswMw8zJbh0nXw+D6NortgGO68VXNFL7kI3nA3XLgKlvTDK2/X5778ALzpHpic1PbZ3aVt9dJLNFbs1pvgxEkYXgqbN2kc2X94IzzwTXjH2+HwCOy4Fvp6NBXht+6Bxx6HN7wantmnkWOJBCwfhp03anTdm14L//I1eO0v6Ub2hRvh6m2aBvKbd8KX74d33Qvf+B7ceTMsScPBcSAFvqUb/GEKMinwbOjvper8CfbqvTBdNnZ5Esc7TLbwCIJdSEaBowhWGnTcMWA18EMUPQgOIRgB1mDXH8GzdmKtPgaXrIGDq4FHUOxAcL8pc9c0SCuQsuF118Khw/Brd0G2GwZ64Zar4eAxeN3LtB9MdcGNW+FgEW5ZDUeqsCIJxZBUoQxWzfipMwi6SZdyZCa/TyqzheUHvkXP6I9wVR/dp7+Bki6uOozFFBaHSZSP49TGSVemSPiPIssJLP4NmCZkBxAyueZSMoUf6w4PrgVOAD00SURNUpCBKevjQI/O6+0XwQ/K8Ou3wtYNcOIMvOrlmsYRKLj7dpiegRXD8O574dgJuGQLvOwGOHAItl0Kl2/V92xcD5dfoj+atlwIF28CaWvfv3SJrj9u2qGjsmwYfuUVUDC6ct2VWo92XKdRi7YN2y+HXBpWrtDhnToD11wBWzfD08/CTdfA6gHIpeCyzfDsNKwbBK+kG934QAFNOjiGIGs+//eYzoAZBKPGv4ckiqMIDgMVBJPAIZNfp4BZBDkEpxFMGF2b05QeDpn3DGL7RSTPINURarkd9I8+QmruCP3FeRIzRdbNKRIlRY8n8ZNddBWrpCZHsNR+4BSKddjsQjCDxEcwhSCBZD8WY0APyWIRp1JFOAncygEq3cvwu7pZng/AFlhK4TuSkWUO02nJZM7h6v0FMnVBslKhZ/o0iVKBJ+7YyMYf/Bjb7maJcChnBGsPTbBs6hiJmQIJu5/s6H5ID9E9vhdBgaFSEWWvIFk4ils/iKDE6MZf4sITJfqPfRuLHwP7qWfvxi3/E6SGEL7GfkIdyYzJ10kkHpJnEYzh1uZIlA4imECEcOySW1lz6GEy0zMsm50mVc/hpyyuPVbj0qNlEr7g2qePkM4fxKonsJODXHX4FOnpOYrL15EufBcrGKdm3cYSlSIzUeDRN69g1d5Zal392HMJLPIkqnuAZ7AYZWL4dqSVIjF5jGvzM/SfnEepbpzgJM48OOEBoJ9QLcf2J9HouXkS+RAneAxBnon1r6DvxC7cusvgxFM4tUNI0ghGCOjDd5cyuexCBo7vJyUH+NHWHp7akGS+x2JgXnH94QpdZcXuNQk2jnoIR3es3fl46cXpeY4kk07zw4cf5r4//mOu37GDP7rvvva9zaUSv/nbv82dd97JB97/fvbsfoq+vr4XLiIK7ECRrCrSMxM4jCNrJXKjU/SfrpEo+0g/IFVRWOEcyZk8SipS03ncapHU5AySIqGTQFLCCauk8xWEqpAbK2NRwgpnUHgMPV1AyDy2F+CUaxodIy2sYNYYvOkJYwLYp5VaFUA8C10pzfvs79UV+9Il0NennV4yqSs40Mihvj6NGJJS9xxMTetrJ07r48CAvt7TrfFCmQz09uhert5u7dyyGUi6ukEhPWNgukHrFJ4yjqvWMDyNlZrQWKB09AVbNWn6AREWR4mkOTcHxhlqBziDRodNxsL0Yg3nivkqLZrzBdPIDWM9PlPmPWPmeEb3INiW+TavoPmwAps9xnGPozE2J00YP46FZQN1fEf3aknmUJaFWywCFTKjGpGUnClRcqXGu1mK7iMFUAE1RyDIm7TMAgEi9ElWFU7NJzVdQihIVPRXcsUxvSBeHbs6i1OZBynJjpVJlgOECIAaVjCHUqFGAyIInW6TR3NAEc/uIkzm0KxtG+VkUNgU0xmkVyewE1TdBKmJIhaziED33GXy4zrdysOtKZKTc0jyOFXdmxRYlvlSHwV8rPozwLMIVTHvPgAUseePozFck+Z4NHbMm+OkKf9T5vdu0zg+AEzhVA6YMnjalNFx09iomLJN6uuJMmQkpHzIZczverP3MSf0fRkPnFMgyxoLbbu6gZLLalvIZbS99HTrv2RCX8tldCNXCEhKs8mQgKlic7Tn6Li+nk3p51IpbY+DA7pxnEtre+rqgvEJzd+tGvRhIqF78fp6YXxc31Oq6J7mnm7NZh8a1B+0g4O68dOV0+8IAliyRNt3fx8Ui82PaEvq9xeL+vnjJ3V4Sml/MdCnMWZD/RpTOdCvsXq5rI6rj24k2RYkTE9yNjo6eNl+SBuknJw0H9BPIvie8WWngKrBY2HsVdukwjdle9rY2CECNw2phMGqaT63IqUbnMLg5hqjFBWdp5aCrqzOi2SieS6XgUwScgldJpYw4Srd8CQN9ji40viTqtbbQg04RXZOISjgqMPYczUsjpse1JK5Pw+MIsjjVKaBCQTFxse9IonCou4maOI5kxDrbW+ObEW/643eNQKlfXBXFg77ukx7uqFS0eWTTOke6L4+nTdC6vogCDVSbnBQjyAkEtDfr/UkkYDeXh1+JgPdPVpHcrmmDQz063xyXVgypLFnyaTWLdBI1O5ujaLLZnQ4KWNDUup4dnfpUY9sxtQhCV0eSMja4EQjQzXTMRKahvKsyZ9Jc943PmMC8BGcMD7GM760YHxB3pRfhPubMecC8zvigUvTITMLjKJsh1TlDJIp0lMFrHqNrtEayrJIzvuE0iJR9TXLmqjslXk+6tCJRj/0OwUhTmUWq143PZ1llCVIVRWup5AhBBKsqmImIQmFwLN0b6zj6U1lErMlBAHlpIVdn8cKFcmK0hjR0CczotGRbi1A1ks41eYoWmZ60vSq15HkAUHdTdF9smbiaPjnUW9rSpo6MsKKnjD/r5tyGDHpPI7klBnNzFNJZEgVJnHm58hMzmHXQhJVRe+ETy7vk6oqcidLgIeseyQqAb0ny4gwJHASjXpX2Sly03WE55HPJs2mJBahlTZ2csKU8ynq6TQKiaDCwDMFnFIdJbR92V6ZBtc/0CNXGm9n44TjRm8q1BMpbPI4xQqJuUldH6M3TNGbp7iEwkLikSgr5hOSiZSk5ArSpZDcXEiirig6EseHdE13Qacq6sVtPAMkEgl+7x3v4JFHH+Uzn/kMF11yCVPTuqHneR7PPPMMK1atYteuXRwbGeHe3/gNpBQ/m8gIzh5ejA+VLzay1u6kONf/xSJhiEVm0bRG4qdJo3jeHxaLT5sQ5zh/PkOSLDKkK9q847me4zzz9LwL6fkozILBInFWMbVJR6uuiXProhLt9PN8Ck48RwrF4i8X51Oeiw2fno+OtP7J5ziKc5epOp+yEuf/YJvd4tqGK1rs6ly3K3V++iZ+Qps9l39qF45o2d0M8VM4T7HINI1z6VzsqNrdo56nvrf5r3oeaRDnE+9zpfVnVDe1K5/n5WafT7zUC3LLT5T/510/nkvP2t17bv1r7DAqF8s3cX4+87yuvwhq0voq8Ry+c9H6d5E6TC3iV+I78La6xLOquoXvEc9hbwtMU4g2yiWe02ZEY3vA59ciEIsUrXqx5zy3kwvWruWJJ57gda95DZds3cpH/uzPeMvb3satO3fyl3/+Fzz5+OOkUik60pGOdKQjHelIRzrSkZ8Xsf89Xz7Q3899991HKp3hT973PsIgYHRsjK5crlMyHelIRzrSkY50pCMd+bkT+e/58v0HDvDmt7yFv/7o3/DR//7fefkrX8m1113HJz716U7JdKQjHelIRzrSkY50pNN4BvB8n7/9+CfYvn07Z0bHODEywpve+EY+/9nP8prXv553/cHv84Y3v4VarfYzeb8yGJlQxJAtIpoLJfWEU4OjU7TicmjcF6FkIoQRIkIYNecux9FsqoHtauJ4VGOeX4heBFHTK/FnDFZKKb0Y45zzJeOTcsysnAhFF+GwonAU5mgQUY3wTTRknKEWLW4BvRiD2LlWXnccbbUQU6QWjXvQ8o6Q9lineBzi1+L3R+8150LR8vy5MFxnZWgMhWMOjTI1+SBFA2vUwAGJqDwj9FbQyKcAg0ASgsA8E0ILxsj8qdj8ZRWlq6lRzUjF42s18IkKYZB10ds1PkjF8kCg0XXxcKS3MH/UWfP22+VXuEh5LGp9i+R9XMdVm/vCs4OJ8ik0KLZIfIM+qgXgBXrhV6g0jisIFwYfn1MnRRPzGM1rNqi+5ksDg7mr60V/kZ2FYdOegqCJlRPCYCKj+KqFdhrdF8TsILo/OoYxFF4jzcFCO/eCWJaZvIgwdxEqKlTN+yNsWVhrIsfiaTVYLarG5qo+SgkTX9+8P0KzBbHaWKQvAAAgAElEQVTyiaPg4JwTdRvzr9UCv7qgvJUpS2VQfYHSix59H/y6QfjVtZ/zApiu6nOBQQ4SaISaKuln6rWFPkfEsGiiZS6+FC26F2ERF5sjSszXtdPnsMX3xdJe85u4QYKmLkVIOWJ+Ov5cu/mni9UPkT6f9ZyMnW8xsvj9UTyEqUNQBo9nzkU8LxHzB43yDdt6Ap1fXnv9OOv/XiwvW3UuHr7fkvd+M52N8lMtv9vHbiF2MKpfvNjzYZtnzC9h8HJKY+kCoTG3ddHeNoIYKlJFaLp4/ESEV4wwbn5LG6JdGcTaNa0+qJGPrb45jns1yD4pYzVHhNqN6jSdPhHE8tSgHpHirKIXikY9uLB9sLA9oGL1ShwZubA+8pu610DeNfUiaos18YNB83lTD6sGwpJGXR0K0VyWoTS2LhQQiGZt/KKh6iL54SOP8HvvfCdff+B+PvThD/PnH/nIgusvveEGdtxwI//65X/l7z7+cVasXM36C9a9YO8fGxtj+HMO/YUay07tIVU8CRwFNYTFNIRF5jJddE/sxpkbx61/D4sJaqnlJCuPEThrsMJpJE/hzNURZMlUnyBRGCEzuRdLjWD5zyKwgSxO+Ah2/RSpuTzpwuNInsGtnkEOjUD5DPKGHNyYgVoV3v5K2LYRVvfA774RvvVD2HkTrFsLExOwehWcOq2Rc1LqldiOA8eOQ08PFOY0mcNxNFWgvx/Wrm6uqI4wdpUKrFqh77Ntjbjafwgu3gxf+Y5mf+4rwCXLYE5CUINr1sP0JH74bqT6IXAlMAcpB27ug2N58OZA9MLdV8DBEggb8JCbHMTAPEzNgrDQK5clbN8AYwEkshqRt24NzNXgsmUw5cNlfTA6oVFW07Pw8u3aKG/eCiskbOiDS4dguBeu3ggr+mDrMkg6OFf1ghhH7OiHdSkYDGBLN5RnIefCvF6drdiMYBy4S+sBFlAgW7CQ6hCwFBkGWMEpBGMcv+Q19Ezsww7zZKxBekdPkivvR3p1pCqSrU5hVxSKNShWUnPWIJUim84xdHSEVOUQXe4AvbN1uuoex5akWTteZ99lPSw7mkfJBNX+ZaSnDqBkkkpSMrviQrL500wPX4g7N4vrfw8ryKApFRaCComKwKqX8UUCm+M43jyB3U/CSdA1vg9hdZEuz+EWqzjqJLNLL+b4dX0MHX4QKFBccgnfvnkTFz29G0Edgixe9zKyJZ9U4RkkD8GqHpgVNAgErgXpAahXgBTINGT7dANF5LTX6erWnFjZrRtCMqMbs+leU2dG+hCao/FWMm0cvaWJARctg4k8uAEs7YOhHo1anJ2DdSuhvwempnSD6eZN0JWBGy6DVUNw2SaNNVuZg/WrYLAP5ud1ZTRb1LitqUnY5WuKxP0Pwvo18PR+qJoG2MlxuOoCWLMU1q+E7RfDxRthySBcfJH2y9u2amrB2jWaYDM0qBulg4MwvESTC5JJbXOOo+kcS4c0uaCn21AzLFi2VFMNVq3Q5wf6NCEhldRH19U2nc1qG89moM88b9ua3pFKauxeT7emIWQzmr7Q16fft+0Sjca7eD1ctFk3SE8chbKnG55rc5Cvwl3b4PApuOcmEnNTcGUatq2DZVm480ZN7rn2Uk1nqHqQtKCSRq+ajyrnGVM9FtEzBSvAOEF4BfaGY5r+8dQRYAyJIadcvRJOFGFjP9yxDS5ZDa+6A5IO/Mrduhy2bYHbb4aeBPzqr2i9eu2tcMVW6BXwa68BCvCqm2Hn1bCyB954B6Sq0OMipmpYXgWBT8/EESTPAidx/FngOLaaQNEDDCN4lLp9L1BFqgDBXiSB9hlbliC7a4hEgVyyiJg+rMusXNDv37ZGI+JCC7wikIaL18BECdgO5OHX74CX7dB6dMcGuOUl2g9ffCGsWa0Rg6tXasLG3n0aaZjLwWe/CNdfrXXiK1+H66+BQgHOjGmk3GO7NFVjaBCeeVaTM7JZePhRHVZXF3z7Ibhwkw7j6T2wZbN+98QUXHm5rnsmJjXy9OgxTYG5/mo4MqIb+Rdtgk9/CS4y9fTJUa0XXVJ/wOZS0J+DqZkmjaM2iGASQR7FyxCcpEnWKMUaRzWjLz6Klxh6Rg3YhuIiBI8By4AhNC0iA1xifHkvIRejGEbwI5yKxGMTFo9RHrgZlRrEnh3H612KDEK8ZAIZgDOfx+IMUEGS0brKLXjONqxwjn1XvYrB04+iuJS5JTfhlA9hM0d5cD3O/DTK7SdpdzFwajcZzyI/kGPDyG7WzpzgglmL7WMnyE5VQHkkKr724V6VAWscd7oP5bgkiqP01gqkZn2EEtQzKezKLAn/adyZAnPLtiNrG7CCk8gyBEkblIdUFiMbtzN07Diu91VgFFYM4SYDmE/BlRk4rgAHVnXB7DY0YUMCa0yezwBLUaxA8DRW0Etv4SR2vYTkGJbvYxeLiFwXQyf24yW6GRx/nBPrLyBT7MKtn8Cdm8HLDpKc28/+yy9k+dF/BuYJwh2Mr+yi//QB+p0q6ZN17NocTm0SL7ESK/ARjAEpLG89dl1QWL2Gnqld2OoEVlBEMo0gQFCi3HM3slZDqTSh6EGSYHL5ZWTmfwikScwksMI6TnASSYGQTUAKwREslcdSJTJzM8i6S2j38MTFfWwb98hVQkIE60brlBOSNeWAbUeL9PuC4VLI8KT/4vU8B0HA+97/AXbu3EmgFHv37OHNv/qrbe/dcd21PPjtb7L1sm289tW/xAf/9P8hCIIXLC65qYCeKY/u0WNo3MkEgjIaTXaEqlNHMoIb7AUeBQ6jKAFHwPWRVIET2DyOUhbOxI+BH4O6H41o+z6QR4YVfZ6DwIPAPwOP6GP2MCQ86JmEJQ4MZmDlsK5k+3p0o/eJEd3g7enS3E6AmUKzN8AzX+z5mUZvKI6jfw8M6OPa1bqSzmS0s5VSV56plG6UpJK6grUt/fv4tAbuI6E/DUuSIOvQ2wWORalnlXFs3doAnTL0K7Cl/gpMu7qhkkho7B2ziNxh6Ddfy3akchUYyEF/ApYk9Pv6MuAI6E/BoA29Cf2uvrTeiWRJLwxYMDQAfZ5uBA2lYbAXlvToBsRgDvq6oG8K+lwYqMBQF/T6MJCF3jIMZYyjFgROxsRn0KRrHihjBd8BJg12a9449yIzXUuMoznEwPFpUpPjOMWjSCoI5snmD6JI4dsDhNYAntMPKqT/aJHU/AyCMwwdniM1W6FnskrVFlghHOsx+SkcAmkhKCLrJeq2y3zXMkARCIt6VxTf/aaxaQNHkDyGZJQgmUGjhw4QWpLs8SKSGdzCPNnxCB1Ww0ukGe3tNhXOHPWEy1O9LhZVwMGqV0jWFHaA7rVjEvoSOo7Ma6eccWA4beLjQJcLwyldxinH6JCr39nj6oZwl6P/v8Q1cZcmvDoNRKFQ0GXrV+FAtwt9WbAmNWorLfWHWVcWigbj1tutz2cr0JPTiMeBXm1Lw0ManTXQpRuS6ZT+CwIoVrVNlErwdB1mZuGrP9A9mWMF3auWcMCtQ09WxyOR0GEODGjcXDKhw+3q0r+Hl2rslzA9tSuW6SzKZg0KztYNFdB8XTA2aOv0LRnS51avMqo5qOOYMagwpXTDB5r3ZrPNBnlf78Lne3v0NdfVjVzXhfXrdFhr12i8XX+fRr0lLP09k0pAOtT5KkPdGEw9ov3AcL+2tWVLYLAblg7o46AN6YRpJE+Zivi0KeMJmui5w7oRGtZ0XGwL7IPm/sdhSUp//FCHlAtLB3WnwLJhGB7UH/vDw7BiuW5YLhvWDcvBAX1cvgyG+2DNKu0PVi7XHyJL+3WDcjAHvVnIzqJZ8yD4qGlEjAJf1Q1avoMSEJAFJqgNLCdMRNjNIhrPV9V20XUUshNIsVtfX5IwDThgIKXv6U80B3x7s+a36ewYGtDxTmdg5ZD2zemUTmMyqf96uvW9QdDUn0PHYj1Dk/pYrjRHLR7f0+yxHhvX+gUaYxht9HNqVJdBNCIR1SGuq98rhK5r0mltu1N5revpNORnoTsH80Xdw29JvWlKEOgyTACOC/0ZnS8pG4YUkDP5fQSfIZrI1tD4glKj0ax/11EMGJ8hgB4Cek05uCa8qNexlwgrF5DDk/3GxzxE6HQBZ/ATOcJEDqnmCZ1kc5RDSvNBb5u/gu53tgfx+oZRZBjtWQ5YhHRTzQ0RuvMI8gSuRi6KMKSr4JOdPkZubB7PlvQfPcbwnoOseLrAuodO6g1wPI1yU5ZDKARLfnQIUllQCqc8Sf+BCc2Ptx3CRAqrMgOMkQj3UOoept6/AgixmCB0HELHBZKULbDqReAQuLMwlAP7eyD6NMbRTek8606avIo29nFpIhQVTazrPG7p702e7gbO4IRHyMyWSedPIgjoHt/PZCqLn+vHYp5U/TCBtJDkyWdM24A6QgWUXIXNJEu+ewoRBDjFWSQl/Fxfs12BRbJawPKrlLL9xi6PIhnT4Rg+upfT/jN0sgRujpAeiql+kyZJ0jtiynMfMEsg+wiFbdJ4FBkeJTP/GAob6Ss8C4ZGPXKFACdQJGsKGSiGT3v0jdVZespjaNzTG6q8GA3nbz/4ILfcdhv/8oUv8Pef/CRf+/KXz+u5j330r/nkp/+BL3z+c7zirrt48qmnXpgImR58Ff9PWxSRjBlr673xnfWi3bVk7BgPo3UHLtkcE1ciNvMgPqxLy9DTuUZBz5ODo9Q5htDj72zHZlEtrz8fHpFqyWzRHimlzjG6f1b84ulQLaNlLRibdu98zukEUQpt2mH0RHyHtdZh3tj9gsVGImN4rFi5CdUuIwRCnTVBo82Mq/jUonhcW4pSnDvNYlEFW8SAno/OqXOVb5vw1HmqmGARTJlaqC+t+ds6fL1g2kbsnGijP6p1es1i9vUc538uZBEjVO0KSi4+q+Y59ST+Wy4ygzBiIi9m/6rF9mNTWRQLp53Ep/WolrKKT4s6y1eLc8xmVO3Tpji7TlBtdHkxnJ5q0asFfk2d29/KNii7OEoxmoJ0rike4jlweOfSnXb20Q4rtiD/W3zcOXVHPC/P1JoQ0fADMd0y0+HOL1Rx1hQW0apL6mwX335aj0BZZ79T0P5+tSiuT55jCl+cvhhvo9hNn6h+Eq6j02KnrVMu5OJ2r9qXzFk5oM6u/5r6JVrquMVtszU3xII4qTb1ptUSkjibFLxAFcWLM+f5vffdx1133klffz9P736S173mNc/r+df88i/xxK5dWLbNjuuu4/5vfIOOdKQjHelIRzrSkY505N9DfmaN5y995Stcc/0OfvC97/PJf/gHvvC5z/1U4X3ty1/mL/76r/nNe+/lne98J2dGRzul15GOdKQjHelIRzrSkV/8xvOb3vpW3vSGN3DV1VfxvYce5Fde/eoXJNy3v/WtHDl0iAe/+10u2ryZfQcOdEqwIx3pSEc60pGOdKQjv5iN5y/9679yxfarOHrkCJ/9/Of5y//237DtF3YfFsdxeHr3bu774z/mxhtu4D+///2Uy+XnF0gD23X2DF4Vm1OkFmzbbeb4CBEDqbTOo4njUlqBK3H0W2Buq0LNYG/mq01sXN0sBFSeXsSgMLgp2s+fbDuv9BxzmeLYq2jOlm+QSBGCK46wic2TUkHjBA1klR+YCzEmTWCwUoQaHRaFGWG0IuBLLWzir6LzQQgVrzlP1exdTxjqeEbILc8gnWp1Hb+6SUOlru+p1JsIscCgn+qtaLXWPGs3ibOJ7mniDcOWuX0LsVQL5kqZuWHqrC1iDe4ODJYo/kY9n1AJCFvuPzteMV2MbbEd13ElF85D1Jihhfg9X4gFeroA1wemPGO4wCCIIdZMOQcGgRiVfRgdw+Y9RM/F/zwWYKCisqSqg6/7JmvrLTqvmuXQljrYZjv0yJ4UWocWpE8sDDvCcMXnCspzbGEuxOJ2+vMoKuYPPINLq1Z1/nieySdj554pt7hNxX/XAwh9FmK8Wn83EW0qntdxnJsX6UsdAk+/1zcovegYxlCbkQ7G0Zu+Qfd5fjOOftA81jyo+m3880LE5AKdqsXnX7ZgMkNiuEQ/9tvkoR/pvmr6srPmbUdpietQm3nbYcxv1bwYXtBfqN/QzC9ookqjMOJY0wYNNFbPhG1+R1jDxm+Th2HQzOdIH3xj/5HPJtT6ES70s4p29ZZqo6rPtSV26/MRVk00yrOBl40wZfE6UbTiBv2FHjn27AJ7P8vHiwX3hAt8cevvNtPfF2ytrdr4+nh+xJF74ux8UiFU/GY9H9k0QDmOAvY4Gxkbj5W/IN9VvE2EjNUpoom0E615oa+EQjbzqU0b6yz0oFhsu/XY84pYfESbdlg7PGD7BRsiqr+j6mBBmRuScfACoupee889fOS//lde94Y38qmPf5zNF174M/X5119/PW9729v49V//df7u4x/n1978ZpLJ5HM+NzY2Rv9nSqRPH8JWR01WPovA0c1mfwLX8nAre9GrOvNAFemtx7KexKplEZxBLC1AsYIggbg1hFMTuvL91WthSRes7EK8pB+RyWvE1t1bwFZw5RZ4/R0wNwuvvw1uvRG+H8A7X65XlXdl4bqroTADd90MmzfqVc093XoVvZQGaWUaPratlSab1cdodbSIkTeEWLgwSgi9ktuyNHWjK6eRV6tWwPI+6O+GtQNw5CSMViCswEsugycP4m7bgHDG4JbNsG2pRkjtvAFSoSYUjHpwzTrYsESv8hVJ2HEJbF6nmbL9Cb1i/xUvgfmKfs+mlXDFJiiWYPtFGll35UbIJmHjatiwGq7eAquWw8Y1GiM22A+XXawd+R23aCf+0h0aw3fbTdpwdt6gbWXdKo0yA1gxCE+OwKXr4OR6RJhHMA+sAqZheTfMRySIOhqDVDZ6sAlHKdJze1BswfInkRQAF8EpBCH17BqsOoiwjFRlLDWPE4xhV4s46jAgsOt1ZLWEU5winZlj8HienDVK17iFCKsoJ8XskEPX1BGU00N27ghSTSHCFN2F3VjBLvQy9mh1ehq9UhqkP4fgGOAjgiGsegWlUjjBNH6ynzDdh1N9ElHtpisxRtepJ4ErEYT0ZXyWHT7N2OpLcOgnMXOKZOE0lppCcAhu3AKFYZg/qd971w6NpOpJaZLL5uX8b/beO1qSqzr0/lVV5+7bfXPO9869d3LQjDSaLI2kQRplBEhEBwFGtvH7/MDYa/lhePjZGPOM/Z79vHjGmI/wARJCQigHhCSkQXlGI03O6eYcOlbV98euulXdt+/MCEYw8qq9Vq+urq46dcLe+5yqOvt3KAtDYzVUl8nvynKhqjTWQGMCmmqgpgK666UTXdkg1JTVPeKgr7kUpjJw41rh9378BmmvO2+CZ16Du26CvYdgwyoh0JREYSotNJaBcZjKSmT/IR26K4QkEA5B/5BQZnRddL+zTXBelyyVNPoGoDYOx3YLMaIiAccHoaUKXj8M00lY3A5TMzAxJbSN8jI4dFRQXWf6oKMNXn5NaBumKeQNv08+gQAMDYvtHrfQkrouAwtNExxYNCqdm/3AIZOR/9Jp2ZdKOfZu2/PYmNh8Lic+wX0zkc3K+TbS0j1IMAyn47fJDYkS6KiDDctheQ9cskhQaXVlcPlqidRf0i0Ui1BA0Hx6Dro6hKxQWwE9LbCgEmqisLwNIn5oLoeOBpiZgmtWwb4jsLIbtd+HsiEkhIcjozLQXdUJ3Q2wsA1u2QTb1sLaVXLNBR2C9mtqkLasKBeSSDwmdR6PCYWkrFQoJI0WvaijVfa3Nkk6iRJYsRhqo4LFa/DBmmboqoWNS2HTGqEfXbMWIgZql4rSqeLvqUeLHYPONEqr5SIqw9BeC4NTsLoTDg3AB7ZAeYno3i1XQHOdUEw662EqB1dfAi01MD0BmzuFBLRmGVx6iZRv/0HBwqmK6EQwKO1YYpFeAn4hi/j9QpSxiUrxmNSNoojOJ+Ki29WVko7PIrH4/RAMQHW1tH0kJHhEn0/Oq6qUY2MxQSKqqpxXXSXXbqgTwonPZ1FMamCwT/CHne0wNSmYxLYmoaWs6JH6qErA+mUQ8kFtBHrq4cwYamstyshhGcBd1gyaD8oigiAcm4HagAz0TB8KZ4Bx8MdQjGkUDsDieql/BqztDPiGQfGjmiqa2WdR9UfR9DIU3kDJtOOfTEoaOZiqShDtP46WnCaQPYnKCabj1xBInwKGMI1OfKkRNHMQPdFBRe/jKKj4pw0C6QPANIEp0IxjaJkpfFMTqOYRtGyWgG+G2PBOYBhfNodmnkTTsvhySfyTM5j4MPwhfLkhjGwIX3IIX+40PsbJxdpQp0fRMsP4jFEU0kAOU6klNDKGYqTYe/UKavYfRs0No5LFSLQTHxlE038OCxrgA9vgwBm4rAHetxX8WdizG/7qk1CjwpsH4S/uEALS8BB8/DrBwDb4UTZ3QmcZLK6FpggcPmYNSqfxTfrwGYcITSVRjb1o4VoqT+xFMXehMI0/mUU196DFWyg78ySCfKwlqI0QmdyHQgyfMYzKBCqDKCkfmvkWChPAIjBymFqEyMQpAukR4CASuJhDKCgqynQpipFGLykjNLMflTMEzAkCKZioX0dwcgTwo7AXiKKaCgoqCqeAJCblKBgYtIMS4a1FlegRldrhHG91BFl6OMNkqUrLqSMoBBgpixBPGexbGPr1l+f+4T338Dd/8zfU1dVx/09+whVbtvzGHppUVFRw6OBBvvDFL7Kgu5svfelL/MEnPnHup9fJYYL6zxDUVyPQi8JPge0o5m6iw49Ztx1Vgr0aO4o/1welwyhjD1qDsGroOyDcye6NsOtlzGQJyspOOB6C6STKwhwoVXD0MCxpgNQELO6Azetg/2HYeCksWkj6488Q/OFW4WbOTMMlq+Dv/jd89g8djFZVpXR8VZVOJxi00Ec2tqo04RQyHJZvTXN1lFanWlFudZYJ+QCsXSNpXrERXnldBj0/eBaUHChZGaymQ6iNb0CoBpZVQMByoKtXwuCQDECGjoqDXd4Fx04Lt7ajWfJ4ohfGRqTDvmQp/PQp6GyRPFSWQe8g9HRBSRwaS2HvQWE0h0Jy/UxGkFWRKMzMQFszTExKfU5MwIbLBcd05QZ4JAlbNsDjT4mjHxuHN/dCSyM8+DPoWkHu+Qp8vAKEELxbCdSXwGk/swvWMI7CODBJjhYqT34PyJHxX0sw+9fAUnRWoPEzYA3pxHoCU72o9AJZ1NwkMERAfwk4CWxDMR9EMdtQ0OnZEcGkmq6XT5CM3IoyM05oZIBj7U3UHv0hwXQTweSPmEp0UDL1Kv70g9ZTghFs7ByUIui9nahm2nIsETQ60QyVtLIen/kwhq8TI1IOoxNEk28Q++UZQCdDF+Hx11jzyFFMyjlW1U2PmaXsxL8AaUwaoC4O9dVQWYJ55gUUdLkhOnASEkHoy0BnHUxOCGN8Igs1CZjICRrMCEEoDYTlSWZFCcykoKVe0IhtTTA8DMt64OSgdLinhmDj5fC9J2DNKtG1S1fCvY9CayPs3g8rFsL9e+GqJkHJTU7BwBjs9cH2DPziCNyYgAPHYVO16EtpXAYqL+2ExdbA90gvbG6A778K61bJAH1PBtaUwbFTEIrIQOR4Pzz9EixfJAOM3fvgthxMTckA9uEn4JIVEI2I7dkDUxDdrKwQpnpLixyfTMoxp3tlMJPNym/TlEFzICCD5mBQji18QDAyKgPxXE7qp3Dw7PfLeeFw/qDZMBz8HQgSbR1wrEauqesymC2JSTm7OiXtpkYZlGkqdC+A4RFBrMVjYqMmwtA+oVj4PVNuHqIx6D0tg+L7noK2BpQ9D0DVBwXJWROGI2lorYGGarHTDZcLR7uyUuy3ssK5MSkrc+pg5XLZt2yp4xvt49ZXzu0AgkGpFxVhMSdnoL1Hnro3N4iPCe2CJQtRkr+A0BAE2lAju8F3wkJ+1oH/NGjVcqM1cALaGuHhN2QgfPQERAOwcpH1EEwX3903BMu6JB81x2BpHEK1Us/NTfL59t2S9+pqJ891tc72oh7nSfSVm5zt9WvlOxaTPJqmPHyxpa3V2V66xDlv9SrnKfTiRbKdSIhe2XVZXyfbHe3OgimLemTb54NVS8Uf19fBgUPyHQ5JP9rcCGd65aa1w8pDdQYSjfDyEZSaN+HQOMQV6KiGoGtRq4kMNMVgYhRl5kGgXB5yVOxD6RsFNGg2YOQkjGWgJQh7dgkqUxtGGfuhNdi7AtiPwjcACKUfBXrQaSAwfYjxaBPVU7vQKUPlJDDJaOUaIhM7UEiiMY6q7wVKaTg2BKRQeJlg+nXpNzDw5fYAcRQzhaqHgCiaeYTaAyXAcTnH2C3Xn1gIlGFSgmF0o8caUJIKPn0QjdMoDGCSQg9sxmdO4s+dANIYlKAySMnAfkDHoILnF3Sx5ImfWn4/Su3+ARQ1JQ84Wqpg9TL48Q7oLoH1l0pb3DMFW9YDz8EDJeJnQ6/BMQ3WrYbTZ1COn4Tu5TA4DEotDA6iPDaOIBo1grleYBwtdzdQTeP+NhQeQ1CUHfj0RwAfrS+vspSuBpXDlJ8+DAyjUAnmYQRPN0DAAHgN4XM3o7IL1SwlOvFt4HpkXQMsHTgILCWoP0eOpejBCKr5KjBGbPSH6HycoYaVxM8clHULOAFUobAbheXY5DNT9aEYM/K83kgRzJgcrvazdE+aU+V+gmkTPQN1B96gb8E6ZoIqNUNZHm0J/XrTNt6z/Xo+fued3PnxT3D/j3/8Gx042xKJRPi7L3+ZF3fs4LOf+Qyr1qw5j2kbCnMRdBZ/dhY1pxZ9TSBP+HzW034LZZeHSnK/GjPnvgJ2H2P9VmYxOK50VHXu6+l39PWtmZ9ndyeclw/Vybthzp0CMu/UEHPufttBGgWrHBZFUBVs268d8z7G3Hq2EzUKcGVm4SsbZZ5XOYWInIDr1X4IB3PjK/KazY0q9OPgfvy48YXmLBLRfU3T+g/rybbiSod5XlnafFI1/2NSRJ+12bzLizYnDyqmhWPykY8OK1jlLIJp03oAACAASURBVA+vaObPesnDBxa0veFeDaugPdzYRnf6huvVfN5rVtN55Ze3GqBVzoCaP0PNfjXovq6qyGDQdL0i1NQis9tM5yZ0ltBU4EI1LT/9olNIiky9UZRfzW4Lz1POsqLfuey/0J5nbazId7FVSt3ta5oFSDtzLj4uT92V4n7IKOI/5p22VuS4Yn5odnqBvU8p+L/A/9g2ZKqO/yuYzpZXGPeUiFl/hqv+3NM1ik2mVM7PV5/v9oVIo9h23lSSgtVr5/sUm4Zi2japzLV/w40601x+1eULzcI0XEg6AuQh6mbT8Ln8qz1NUyMfxVY4ZUIrmF6gWX2AfU7Q5YP9Lp/q9sluvv18GL5C9Cizv51eSp3No2bmr+ppKkrRKZd59opr6pp7ClSe7y20Hze+165/32wdG7Pls48LuspqZ0Z19X+F4y03Ns49FTJcZKpOAdoyr94i1uQVc079Odedb3qrYq2AaONjbUCtK19KfqnOWwzD4O4f3Ut7ZyfhUJDnn3+eP77rU+c1ZeKdEkVRaG9v5/SpUyxdtIiW9nbu/tGPvBntnnjiiSeeeOKJJ55cUHnb0zauue46XnnpJf7lX/6F2269laA9deAikHg8zr//+79z/wMPcOedd/Lt736PB++/z2tlTzzxxBNPPPHEE08uiJz3k+fDR47S0NhIaUkJL7/0Eh+6446LauA8ezfg83HbrbeyY8cOcpk0S1es5Imnfua1tCeeeOKJJ5544oknv7nB874D+xkbHeWy9etZ0Nl50RdsYXc3997zI2655VZu/8D7+dM/+5zr32LYkkJxzeXKkwJEkY1UMVzzagvngulm/lxByEcVzVlmmt8u6irv2gVoN9Msvsy3PVfQxpfNopd0Z9tG1uXccyeNfEyTac4/bw6zyNLAZ1kSuei8adfcvLxKdy+5PB+ybq56zJX85YjNef8/27lzddScNwPF1kku+D7nlNf8ss43FTNfN1yYMN2QtnSjy3JGPkrM/k93YcYyhrPtbvOMXjDH1rYPl63Z6RouJJ5h5M/po2De6Wz+iy3vXmCDxZawNc9hK4Xzjd8tuLo55Sy2VLdZRL2KHFc4Z1l3zS/Om+texFYx526/Y6IUmUdd5Nqmefa51LNxLbau6o6uFkOC6sbcNA3z7c1XflfpUmH9FjqYInV/Po7WKPDZBkX6CfMsjts+/3zGAvPlyDxHx2C+rdTkVzH86dnrwzxb2rP9n148bsC0MIN2nWb0/E5gzvx2k/Nf0ts8SznOp2TKOctonuO/c7eRMqdF7TnjRtFrO/3xeU/b2P6e9/CFL/013/j613n+2Wf57Gf/jPWXr72ozTcajfDf/+q/sWL5cr72D19l4xVX8Nk//a8sSvYidIIJIAhdTTCWkih9TIky7m6AXWPQUgldXTAQhJMJWLMAxiehKgwfbodcUHBIN1wFkwa0NEkkt64LWknzg67BiqUSNR6NCPJn83qJYg6HUf5Lm3S88RInOn/pQqcgmvabq7RgQFBEmYzcWl3WJQEsyRTcvEYwc6GQIJV0XRjL0aiUdXgcLlkmUdjlJVBeAe0d0N0qeKjN64RKoCrQswAUTSLVQ2GplxzQ2iropNIIBENSlz6/kA4MQyK2A0HIJCTqv2eBRHu3tcixne0STd/RZkV3t0vUfyIu0d+hIHzkViiLoW4zYHiBWElFM+TKYEEpRIYFq6UnIOeH3WkIm6gMQJUKagBfYBimSyCTRZkcBjMI6XFCoycQEsaUpV9JUJNQB2glYM7AlCnpp7NI1LYBpNAyQ6j0oeiTVA2mgHF8mRPADIHcKFpWB1Kg+KCtCoZSkFQhCyarUdiPVGIOyJAqaSI0eRAfA0ASX2oAsj4ghe43ULOgYjDWXEPViYiV5yQxBQIT/Vb+48CQ4OhUDT64BMY+KvlYsQRqG0DPiNMtjUM2Le0wpUN5WG6WUmlQ/ODTwfAJVq4kBFVNUJcQqkNZXHSqvRmSfiE5KD6htNx5qxBiPn27RO5/7H3S7llTEHgVtVDiF9vbcxj8qkTsV1UK4ioahtY6GJuAoF9IAoYhdlWaEERiJCi6ueUy8GvCo11SKraeKJHyxWMW4jcK0zNCmOnugJExsdvJKSGHGLrg7Gpq8gNvS2LybdNxVNWxd5uU47Z1G1lnY+bs7zwnF7HS0pxObjbo0To/UOS8woG+qkIsKvsrykBR4fQZqfd4XD611ULU8PvFtiIRsaeqChksxmKCsDt+EmprBWNXUSn1OTUj9REAPv4+icZ5/3a53oblUB2H2GFByE1NCmqutFQoG9VVUk+ZTD695FfvGGBgUAgjqio6NzIGPZ2iM7mcYPiOnoY1K4WC1NwgaMS2ZqGY1FULP7c0DosXQFaFxQvhLs2iCa0QVn1lhbRtMCh+88r14t8aG2BiRugvyxZKPdtUlWWLnLye6RVyhWGhQEMhsRdNlbzbOMJCTGFhwPfZxE1j0nVHB22CC0je7Lim/gFpn3RG2t3vFz2MWvqgqfL/xKSQVkoTgmksL5M+rqxUCC6RMCxqhLpKWJSStCJhGJyB6qjobSoI4aD0SzMKtJXCuAILm6FuFFIZ6GgRKsd0CfS0w8EBCRQui8EL04If7U9ZqGLNGvpIkJvKMKBQMnEakyAqI0xVNlEydIbo1CmE1KGhMAVMAipqdsYaWrUgBCWbY54GuoGj1oOTFhQOYVCLygGgwvLNU5iUAT5y1JGORQnMDJFTylFN6TN0tRTVOI1vahhT9YExia42oQf9BJNTmEoE1TyOqZbTMpISn04DKlPifzQTFjXB0m7BEH74aiEblcRg2WL4zCfFDy7sgj+8XsYwy5fAtCq4w2BQdKu12fKPYbHzzldgyg8zOUsPNchGIW2iMAhNIcjVQG9KCEVtFSjZMVDKIGVCIC19QCYMp4ekz9EAfRIYQEhSSQsNO4aqDUKVHwYHrTabBhYAexGU7AgaAwQmyjCpQ+EkghgeJj54gpwWQ9NTKGSBccBgsrKakqEZuY45ja60Sr2baTqH4kzn/IQnYXV/mvBwP/VBBYUhwsN9VER1gmMZVvfPvL05z5/90/+Hj3zwDv7kv36GW2+5mds/9GH+6X9+9aK/B7715hu5cssmPv/FL/KvX/86V03utwYFAzKQW3sd9A7DE6/KoK++Bt6zGWXiMVjbBrddDzvfgO9XwvuugBOnhE16x63iVEZGYfs1KL29sGyJoF1CQWhvkwHGyqWweYMoW1+f4IgWL5x1bv6/rhJn19jg3Oldu9XZ/nWnx6hvIy40XgKrVkreAgZs2yiG8+QL8Du3wZv7xEmuWCoO8fhJGQxcWSZGeulqJ63JSUEm2bJxff61rtma/3vb1rdftuamfEzTlZvle/NG+d6yce45l62GI8dR696CA2tlgNfZKqiq9lboPgBDI9I5zMzAmSGoiaGmHhHWr+ZH8++A6SqYnEE9/SyEYrDnDQIzryLouB7gBRRUwW511UOoDtKnYTSLMnYQemdgYthC6awhkHsR+CUYJ6k/pAMKPv1FCNcSyB62Fs+ZEae0uhNO9MHQFBwySMY/QGTim5jkUPADR+irX0Xr/ifRzBFgkGD6CblZZJR0OEUAAzWrs79nifA5GUBhgvIRk/D4I8Ae4DoUvgX1FiruczegqOo7a7A32PphfS+3EGRftN4erb10rv4cPSaDoh0vCdpxbEzsKTssTr+sFH78JGxYIe2Ry0HA4tTqugwUj54Um/75c6L/rQl4/jVor4H9/cINfnUvtFbAiTPQOwC//yF4401obYFjJ+BnO6XT2XsAGhudBSXAwY0tXyq66fc7A+K2Vgc/aQ98bNxkxBogx2JznxzZODN7gOxum2Bg7nn2/4U35H6/DHgPHobODhkonTgpiLqJCUHUxUugvFwGcYNDMjCMx6GnWx4MaKog5F54UVj1h4/KjbimSR0tXgj/+g342B1w34OwaR386AH44C2wazckQnDztfDQ43Jseyvcc5/cjMdLxM/amM1fR6qr4Kmfwy03wA/vhw1r4b/8Hdx+qwyGX3wVPvwBaNoK+x6Af/q6+Jcnfw6rlsPR45J3Axlwb1on3PDVq+ADt8K/fQtuu1mO7R8UDvjJ0zA6DrctkDJv3SR87Lu+BP/+EWm/kVHRkRuvdfL6k4fhrjtFj/oHoKVZ0Ho26396WvTErTdvV+wbSZC2tfUulXJ0Z2RUBvEAz70g5Rsdk+vH44IUrKmRGzDNB0sWwfO/lAcWTY0WR71G6n5mRvTF75MbkkRcmP7prCBS952AukXyACVdIn1yyRSMarC4EcbL4JrVcGZAbso2Xgohv/jHKzeKHWsWIvXFE7B8Abx5UvCJFg8/G5rEnwqg8kvAT93RftLqIoLGs5zq+G8sHDpA2cA9mKQR2tEx4ABQhTZ5BsigswaNV3FIDlmy2mr8+inAT9q/llD2dTKBJYQyD2OwEoVhFMbRlVY0c4RU1QrGEzpNh+5nuuxaoqOPAn1kQ2sJzRwkMPMq2VA3SuoIqfj1zEQCVCWHyZVU4J+4DyO8gHVPDAEDpCq2Ehl5FTWThmAO1vbA9qtg1Qrhhts+4MZ6uPE6y++0wLarZHvFcnjPVWL74xMyhlm7Bo6ckJvHxT3ws18KjnR4Uh5MREKQqoY3h1H9P4Hll0K2E3pfhqoGWLcEZfQtCDeIvkQAMwQzZTDzLGQVqAjA8T4UXgG1Eox+FCLAXtSyw9DRiDL4KBAFDmPwPlQeBMZQeAuFQwRTe8lxHT7+3bopeo7KY68zVXot4cndaPo08Crg43Tbn9Mz9HVgCMU0mKi5g7L+J8F4i3VPRYBKdFZz+//1ofAS8dEhYIaykUFKRypRSHH7d+Jvn7ZRW1vLD7/3Xf7hH/+R5575Oes2buTpZ5656AfQpaWl/K+vfY3NmzdhEsFByARcryd8rrc6hmByZl8HMxeRNrvKVpFteyUm3f0q2TU1Qdfnrhp4sby6MwtwNoaF85oPeWMWefVeOGXjnfwUq8P5UFWGa5qBvSqYYea3m/s1s4GDUHKjkQzN9V7HvgfVgBgOvsjSMRtvZZoOesmwsTw21siN90ngYI+Clh76rX2q8zpOd/B7iqHPmYai5iHobERQAFBd0CMF1Sx4vaba6Ea7XMECvJF58X1seyvEkLl1YA7+sWCKlaY4+KbCwWbeq3zrya2m5R/jRmAWw42dD27tYnjbPouqLDIFreh0jQIE4Sy+rsj3rE8xXPhB3VnlbtYnGgX1/7berJ97Wpr9pNb+HdKc1VxnyxiwfAVzcZiF2DvdWnlzzsp9Zn4f4F4hUdflLYldZnvg615B0P22QVEucEWca2aLUnzbHmirSr5Nue3DmA9basyDBCzEiyoFiz8WTO/QXTri9tdmsVURTfHB7qLl4e/EDyumYn3b9e8nH43mRuVh+Vc3ylSxzrXWtLMRcqYxOz1AyVvFDxTDXj9Wc4634GizaZuuaUbuaSkWPs0IFqD17Czrlm2dDRdrngVPaRQZ7xgFKMrZ6VjWjYnhwtDOtptqHacUYG79rnp1jcXcyFXTprgFZvVCyZsK4nd93H7JB4SKgAA1q7+z201FMQynryWCjaDNoVnXDbvGi3ItE9+vznn+0O238+gjj7Bp0xbed9tt3P7BD74rZmJtu/pqgoEgnnjiiSeeeOKJJ5548nbl13r/Wl1VxZf/x5e4/yc/YWhwkI6ubh5+5BHMizzgwefzeS3viSeeeOKJJ5544snbH0deiEQ2rFvHfffdxwM//Skf++jHWLZyBU89/rhXu5544oknnnjiiSee/KeSCxb5UxKL8aE77mDnrp2UJRJUVFXx3e99j5lk0qtlTzzxxBNPPPHEE0+8wXMxaaiv50f33MP9993HZz/7Wa6+5hr27d9/kRXbhJACLeVQXQJlCUEUrayDxmoJHIpGhORQXScRxFWVsLxFcDvV1RI5HAwKiaIkKtuxmEQQh0PyO5NxAj5sFJBPc35nslZ2XNNcUmlXNgsCT0xTopvnO64wANE+t/C4dHrucfa2nc9IBFZ0Wmgqn9AK7OAQn0+wXBOTzjmaJuV2p/ebROzN29QFQRK5nJPnUAhiEYkUz+WkjOk0lJZJvZXGpZ7rKyFryP+6KRQD3ZAI+XQOIgHhGvt9SNCBiSB3rGCIgJYfdKlpcnzACv7DRIIQokggib3fJX5VkF/4JcLZsAK6Qj7ARNWnYPZ8CYDwZXJYB7muowJx1FwKxUgBKuH0NAZBdKUcyBBIjVjHZoAUlMeEUGEj1orpqlv/CnWvcBrXfMf9OhLwi16GQ4LG8vuEvlFnYc6yWaFrZLIWOk9xkJI2Ys4O5iovlTYOh0U/JjOwvFn0v7oSSqukXuMxIQcoinxjQsQnQTrJlKNvZ2ORu3XTXRf2eXmcdEt3UynHju1AHvv46Rn5b2ra+S+TkX32f6mUi6mdzb9uNOIEh8UsUo6NKNNcvsu2dRuV59OEtmGa4hPtNrHTCoWk/ios1F1lhYW8tPxoaUJ8DQhZw55aF4+7AjovoD9JxCVvDXVSL5VlFmEoIHYOsKFFylRdKeSBykqhB9lItrIKyess5s/yjzYRJBCQutBUp6/w++R/TZU6XFAvuub3O32DOwiv3MpLNudg+hRcOELt7Hzxwr7A/j015ey39cOtY25yh2H7Nmt/vMTRAztPwaDTd5QlJJ1YVOrSrtN4ifQrZWVSxmBA9CCdlmNjUZhOAQGYSouPjaswnYGKEFREIGlIf+tThfYQDkkdpDJiq5pP8hsOiQ4piuhhPCIVFw5CawVKZRhqMhLfTQ5IoqhTQJrwzJj4PdLWx2/91jGpto4zMUkhAWa2z7UDulVABzMHmGhMAGH0QBTTCkrLhcKATiakMhMWUgd6mqmaBGCgGDOYhAEN1ZgEDEyfNqtiWm5K8m2q6LbapGfADICSkTxHw1L3xfxMof8t1BlVFazd5JQQduIl0k7V5bJfUyEelbY2rSpqLBU9SWVAC0i6Chb1xxSMZSgoaRuGEFYCGqR1QaHa/Z4/YPWfJpRFLLu3AzHLEFyd6RxDSOqMCSuNSuu/DIYSBDNlGY34qkBqGpl0IcGiaiZnpaNb56UxUVFJW32gbqWbA1KYpDGUErQvfOELX3gnxiwtzc18+o//mB0vvsRffO7PyOk67R2dxN3ost+C9PX1UffN+4RH+MHtMjC65TpYtxo2r4W1S+HFXcKS/f33wQ1XC2pn+RK4cZtgk9ashMvXCMuyrlY+tTXQUC/OtaJcHMTuPcI/Pn5CkDCqKh2G3QkcOCTonmzW6Sz27BVklO20VFVYpwG/nPfjB2HJQtizzxnM2Mclk/mDdZDBto0xyllcxn0H5Lpujqedht8v3zXV8P4bYOUKGBqCxV3SkZ3pFWzNvQ/Do8/D8m5BMoXDco7bIEOh3/7g2cYw2TcMA4NS5pmk5NcfhNtuhAcfh9//sHzfcRvc9wh89P3wua/Dlz8FDz4Ld70X9h+FRZ2w/xh0tcBjO+HSTnjpFLSUwkg9lIxDZsLKQAyW1kAyA1VlMDIhTmf/INRGBfdHDriMTPhyfLlnLU804jhiPQj1EUgEZUC4qFk6j6kkNFTAweNouVF01qPxPDAEBPFn2vBlT2GyxGJAC4Ynx+X4s/egmUeBeqpPpphJdDBddTmRif9FeOJxVBqBHcAkbFkD3/wfgvRSVUe3XnxFUG/uTtjdmU9Nie7ZejffcbaO/jpSUmINTvyCDDvTBxsuh4lRQYXt2Ck3Oaf7ZcGAngWCyaqtga/8H/jBQ9DZLHz1jZcLnm1Bh9xI3/sw/H//LPjCG7bC2sWwaz9s3wrPviCIxvufhsuWw6uvCcv3wcclnTO9Uja/P9/e7AGyXS+6LnU7OSl20z8gZZqYlEGVYQguLBaF13aKr9m7XwYlqRSMj8vA5OlnBDX3k4dhYbfU7cnTcvP/1M/lv9d3ic/K5uDUaXkgYJ9fW+O0TVODfFdXSR7DrkF0ZYU1+Gyw7s9Cjr3X1sq+RELKq6pyfZ8Pli6WjnjZErG/Naukc+5sh9YmwUMu7IKqKrleW4tTX/YA7UJIR5vkZ92l8G//L2zbJOVcskjwfKoK790ufm3VcsnPiqXCvQ0FYdFCwZC2Nku5G+odNODCbmuAnpD6DFiDxHrh+rOoW+q6qgq2XyP9RSIu+xTFsQXDkLQUBQ4dkWupqoVZDDiDV7dOFbKabb8OFn4sJMf85CEHl3roiLSPrkNvv3PzYA8+k0lhbts3bAs6ZH82I+WzsYoB6yagpVkQppesgJ4uSa+nS/Y31Mt2Xz801sPqlfB/vwfXXSGs4a/cC00xeGu/1F97FJ4+CFcvhbpyeGYMPrJBWORfvhd+b7vwx7/zOHxou+TnkefgkkVy3V/sgc2LoacZduwRfNvNV6LWAy2jEJ2AXB9Mvopm7gB6SQzuQOEMIPhOWAo8CqRJVn6e0PSfA2GLHVwH7LcGX4vIhhbiyx0AxsFsQuVNNOOnwDr66zYSm0gBVZzu3kR88BX2LlvNjrYAl+x5ECUT5In3bqV712OouV2gbEDXgvhyd6OQYaLqBjB0gsmj+DJPoDCErl7JYH2CssEX8GWOo7MMTT2FTie+T0REf2Kxuf28m2IzNi6DXk1z6BqhkNj/d74Pv/dhabtnn4cNFgf/WC9cuhguWwbP7YWwCb9zg9j9wy9Cex0cPw2XLZJ0BkegrUlwn8Nj0D8CqzpkXYAz/bBpGRwclkHykkbo74NoDm7aLP5671F5+KGvR+FBawA9DaQxWIdCHyp3W4Pim4FfAMOM1d1Oyei/WYPnBcAg8UETxbphghr86XoU+hA+92lgBF27HM3cicI+67gyucHiDArDTNf9Du8osDUQCPAf3/g3HnvsMX5wzz1ce+213PfTn15kT6FdSJw8NI/rKaxiIXnsTqVQ+eYkqcxN76xPBYqtLDePaOr5pQ3zH3fOKlFcdVDwxN79xCOo/erX+G21NUqRcuHggAr/yNlPXyw9wMwvs2mlZxTWn3L2bMypN/eqW/O0rQL5qzY5H2UWP+dcWzHVs2TBvvNWUAwLXKfIyyiFqOvpt+upsHvgfD66Op8OvhP6orhQirPouIKsmlgIOSV/1UFdh4wLQ2gWq3vFQTDllUFx3gygOPtV5dcvz7lsu5jfKHZ9t765dVMpoouKUtx/zdeGyjmyVZimavtTpaCtXMe428btgy+k3iju6ytOnu22tt+mufXe7fNV1SnLfPV3PvqvqWcv33xtcK42OlddzZff+fqVoitpznMNVc3vP4vpVV56LgPNWueaqnwUE3SX7hiuk3yKY3PzZUkvwLfN5quw7BpYT3oVM2gd6HM5A7eDD8/tU+xtl305/tg3J2NK4SkoeecrFoZOMW1fbR1u+SBlFpvnNkWlIJ3z9LXKPPqhnkU33X1hHunUQtLN2V9gW3nXNc/RZSsFh/kLMl7YH+K8DVAgP7RPQTWVvE7BmG+MgFLQeTjHKO/EtI1ismrVKt547TU2X3Elf/SpT/FHn/40Bw8d8ibNeOKJJ5544oknnnjyrhL1N3mx//2P/8BDDz3E/oMHufa66/j7f/xHrwU88cQTTzzxxBNPPPEGz/PJiuXLeeKRR/j4Jz7BP//TP7H9xpvYd+CA1xKeeOKJJ5544oknnniD5/nkc5/5DA89+CDl5eVcc801fO4v/sJrDU888cQTTzzxxBNPvMHzfLJk8WK+863/4B++9jV++sADrLhkNW+9teedv3DUCnbz+6wlzlVZy900LVxQwIlaLpRCzBbMPzlfU4UsYCOI3PgfkICU2UAw17750gcnatYOXimGp3Ojh4ph7Oz0ckVQYtnc3HLmrOhtVZXr+/1C/9ADEmHuvs7FurqkHSSiKoKUUu3yWPUdDDpYHQVQLaROk0/KXKo5QUTuoAfN1TYBNT82QbOCkUwTchY2TLc+BpARpBxAdpYkoFj7AgiCJwT4QNcEyqErEgST0yXNWR01MLGROyrgRzFzyIX0WVM30YAcpmLjf2SvYugos2mZ2Lg7SoIQC+TroH2cT5tfT4v9PlvbXDCPZtmFjdYKBi07D4DP+gQD0o42TSAeg1qLFmAYDsbOp8m+lnppy4DrPN3SC5ukoVn64g/It43wsgJ8TMMVdGjbVs5la7bd2PZn13GhHbrPMw2n/uzjimL/CvzTbNrz+IJCbN58+wrt/XzOc2OxslmnPHa9u4MJ3fXyTvgVO82QpSO27rjxgHa+7XZxn+vO3zsppvn2A1CLBo+7/rOpTLqeX9/u65jm2e2zMNDQfZiqnQWZZzi2l8uBz7Id04Rqn9hQIghhn9hRhU/axx+AUosu4tOgxO/qwwPg84seBwIWnUqFWp9DAYn7QfXnk0r8fsj5oS4m16mJQZl1bJUP4kEoVSERhZCGYhqQ8EFEg4gCAUMoELEAhEBRsuI7fQpmULdcufhclSz4JZBNMwWB5tPTBPQ0qCYKWQLZNA6WLQdKbva3msugmlmL8mEHsWVQsxkcXF4OFB0lmHNIN+f0r2f5P+hC3QX8TjBoJGD5Rz+U+KQOFUV8SlyDoF/qxbaRnO74KPdYxDRB8zu2F7DADLj8pe2bNAUngF1C9ky7nlx9mRwTAhQ03WoPAq6+0a5f+Qh5Q8+vD9PuNwOYhUGhKJDLvnOourcjixYu5KabbyaZSvGHd93FoaNHuWH79nfkWn19fdQN9sKSbti6CXr74LLVsGe/NNqihXD5EkGzPP8iLF2Un8DIiCCF5gzMikSllibgO3fDtqvESG1smn1MIi4DXTemK5HI56nanEp7X1OjGL193OkzTjrjE4Kl+ca3BQE0OiZMUk2DQ4cFR5TJSD7iJfDWXsH8JJNOHu57GBZ1wcHDgv4Kh+G7P4TbbpLrNtXDiuWwainccYPUTyTiYPLSaccxXgwUDveNjapKeUIWC7i8XPjFkQisXCIovsU9gmXauEYYsDddwb4JygAAIABJREFUI3VeXykYpEwaqsqlbsNBcW7lcdjTC1ctA21CcMu5nKDrFEOc/O6j4lj2noQzgzBVCqPHgWZgnOc3fJ7GUyl8uZcxWYdCEIMN6MGVqHojTOqCXZsqheH9cKoP+gLQPwypcSCIigHsBhZjsBktdzcKJ1CYAaoBhZzvcvzG04zWLyc8+Sqm0gJU48/sJjB1BlXZBSEdchrUArddBh+7CTrbnLo8fkIwUJUVczFZbiKB31/cLgqPs/XlQkgkImnX18p29wJoa4NLFsO2LYKb7OoQ3JbfJ8f1dMJVG2DLejhyDC5ZKSi31lbBQn7kvaIDPV2C12ppgm0bJY3LLhEc1NZ1gmTatkUQbAvaRcd6++X3sT6UspjoxeCQYOjuuV/QbYNDMjiLRuA7PxDb/cULsKATdh8QPmrAD08+K/n5xnfhig2CqqurFfs+eUqu8/hTcNka2PWmYDVTFi83EZe8NDfCI0/DJctgcFBsPBKBnz0jiLZkUtB3wSDsPyg+I5m0ONkBZ18xDKFt+7Z+pFLia3w+2PmG5PX4CcEt+nzw2FNShy+8KGnU1znYM0WFU2ek8w4G5foXmhtvD4xXLpO29fvFB+zZ67CaT56WdnzmWWhrlXPO9EpZzvTJOX7fO+u77JtAN6rvbHUxH+XCtsdsVvq3QABOnxbcoKZJ2zQ2zO1/3H2Wm44SCDiDM5/PsWNNE0a+nU9FcRCMigIHDojNRMLw6FPwh78revTabvidm2FJG7Q1wDWb4LoroaMWVi6F66+BFfWwermswdBdD1duljwsa4ctG+HpZ+HmbbI/l4Prr5B+vq0ZfAZ89lOC3HzjTUGTNtbI+g13fhCWdML7tsOCCuiphQ/eCHWlsKEGli2AeAT/VRqsbILqILT5oHQSWqth3RIoy6Kpe2FwHywpRW0bgZI0LKiF43sIRw6iLRhDmTxAdPoXaKk3KR97ge7+pwnUz6AOv0x973P4Zw4AWRQOopq7gF5ghODMIcLTj6DqO1FYCuzBp+8jOvEUqnEcGEZlF2r9ANr6CfjYjdDa4rS7u03dNzP2+KKQKgbQ0e4Mwhsa5Obq2En45Efl5mTdZRAw4YbLxQ52HoSrLoOlneJ7ZmYEOfnQLyGdggUt8OJbQkqJBGHXGdjYIw8oXt0H63qgpwl2HRX2emsVPPY6pCahvQGG+4A4MAosw1CH0MxTwIA1QE4BZ4DlwBChqbdQzCFgFaj9YNYiSLpxBEE3gsoB4AjScaeBWgx/Ak3/OamSz2H6StCyJxF+tA404ps5gY+LRBobGvirv/xLPvD+9/PBO+6gvrGRH919N5dddhnahXaa0ZAsNBEOO0+SUtaCA6GgMJoDAQGEz+dwiz3tKhwABIPWYDvs3Gm5HZO9eEOekwzMdZ7u8sei+cfZ57ufkgwOOXm100hnnDtDOz179Uf7qY9pCnMRZJBo29d0UupqekY6vlgU9Jx0NLYxFj4Bv1jwdYVO3x7szdapVY9lZda3xTOttBZsqKqUQUAiLk4mGBAQv1+T+gm6Bn4Bv8XM1+QJdNgHQeu/ZEY+0/ZTAtUydLn+dLAEk1EEgxO27pwjmFrMHmFad8qa8J3tO+eM/aQia6WXAYKYxFAZsfZNAVVAFlMNAVPkwiHrjl1w/5o5gWoG5UmJasHig35xcIlYvvO132yEgsXt4Fx2Md9xF6Kt7SdqUctO4iXOIC8cchYNCIVk0OjzyeBIUeTYjDVIi0asgWUYSFjldXHL7fRtsfH19jH2Ihz2ggMZl53aT1zHxq0nr1mnDkZH5Xsm6fxn27W9QNLEpPPUzra32QVUMs4DGHtDLfAjMynnzZpq5Xdq2vEFti3bi+AYhpNeOj3/G6/ZN19Z15NtM7886YxzzqS1yNL0jMMu9vkcHGc6XfxN34V+8lxRLtt2Hu0FblRVWMbu+nG/Ochm39knz7OILnPuTer5DroL33Ta7VIScxbJsd+SZLJz1wkotE/3trtfcm8X69vsPsbWq3BY6nlyUur/xEnxK+WlkJyR/MVj8iAjERN7LE1AmbXQSsAvNhaNSBoJa/GamaQs3JGIi6+ORKWfMgxn0ZbShJTDXrhlalqumygR5nk8Iu1emrAWbwmAPyrjhugMEIFoUOorYkpZY2GIjEFgXHxuRIHgNERVOZYZNEYhpIE2jZaeBKYJTE0SyIShrg4YJDI0aflrvzWQm559s6jpw8AJ6+mn3/L7o/iyo7bzkYF2MAhh62bFbs+z6Y277Qp9cSLu+KtYFMbG5PrlpY4PjUWcB3gz1uI44aA8nbZv3oeTMGUt7pTJSb5MA1K65Zf94isi1ptBFHlLYJgwavV3Ac0a8MasOghaCxONWm9rLT/NxOyTZ1UfsJ/kWP2dH5jBQbFmrcE2rvNVTC0NJDECccjaC5/5rPMCqIz9dqdtFJOeri527NjBX3/pS9x8yy185GMfY2RkxJtg44knnnjiiSeeeOLJb13UizFTwUCA3/vd3+XI4cMcP3mSltZWvv+DHzBtP3nxxBNPPPHEE0888cQTb/CcL7FYjOefeYZvfetb/OVf/RW3vf8D7HjpJa/VPPHEE0888cQTTzzxBs/zyXtvvZUDe/bgDwa5bts2/ufXvsb4+LjXep544oknnnjiiSeeeIPnYqJpGg/8+F6+/Z3v8O3vfpfrrr+eu3/8418tsazuQiZZAR+aK7jJRqgUC37LW+L8PILi0tmz///rBpwUwwFli1zTMOYWwDxLMI7hqpt02lnL/lz5vVhRdb+q2PrgjlB2Y8F0w6rvnBXYZUJGh1QWJtMwloWZDDAjQZYkZRsr8MrC7OiKAimpY9PC4pgoriYyrHOsa6HLb2MaCRI0Xe1roMwGQ1jBf7MoH8X1Ie9bruvSiYxRoPDv5jY257eZPHs33559n0vcAW+FqMpZ/JniXLYQ55Rx4+isg1KZ/HK4dVMvQN0VXhMkYMcd5Ou+nlnEP5iu+iv0icXq6GzVZs5zzUIknWk65f1Ni/u6RgFqz73PmMc+3lVmcZ79wYW8jt3uCuI/7R+ZnBOcqhtO8KphOihZG33mDoh1B7lmc/m25NaxTNY51942TOca9pjAsPBodnBvzrKXjBW8q1h9Yjoj/00mrYBdE8ZTwLScP5GU3wrih7OmlGs8Bcmsk/9cTspO2vLXpqQxGwhu+/EpV/9hWttW3zKLqjMhZ1j5fydsw3ACxnXDARUY1jXJOGOQdFaOSVpB67mM7B+y4Ay6hZWbtX93AK4bH2f5ZZOCfW7RrY/hOsZAAuYFFyq6YNdXzvovh4NmNZ00TKsPNlVMUxB55mye5NiLAlX3dqS7q4tPffKTvPLaa3zlb/+WPfv2s2XTJsLh8Hmd39fXR10kIuitBe3wzAtw+Rr49kOwrEOMIhaD6kqJdG9ugkNHBE1WXiZRuHZE/e43BQ8FMDHhRC2PjztYnoZaiRh2dzamKRHzIRffUlUdJbQjXk1TFNXGAR09JtHB7ohYmzOrqhCweKUtTUKP8LlYtuGwdT3NiWqPRmW/qjpptjZKtGwwKN9TU7B4IRw/Kei2QBAM3SmfogiNwsYWudO6mGVyUvI8MyPfIyNSF0PD8n3ylODE3ngTGurh4cdh5XJ46TXB8/3o53DTFggHBBsWNWHLWnjyVbhyFSxvg/dsEFzdFZfCki5oqYF1K2D7FbBlCVy2GNa0QmWCiuwg8SX9sFhDiY5DWw61U0ftyqC8Jw43tMEVi2BdC9y8VVBpl3fBe68QDFtqCk4fhuZSuLYTZVsFNMdg02rYuBB6ImCMop3ZCxwlkB5EM5Ioy6tRN8WhdBSFIWgJwMkhcTDXLhX9bW8VhNWszoXyqTAX+82PjUvz+0Wf7Sj0YyegtVnoNE0NQlg5flL03U3GcZMCikkuJzo/PCzR5/sPQnWV2HMgIPt6z6DU1ggKLxwS2yuLy38TE7IvnZHo9LYWsdWZJARVGRAnU5BKCoFg4QKp/2xWfNLIiEPRqauVfYYulIFIBF55XZBTR49LOess31ZeLvmprZH8V5RbtAvLb0Qikg9NtVjWmpQpXgJHjkoZT/c6OMy39kpab+6VMqTTQjKIRqVOOtqE/BEISDpT0+JTDhyU89IZSTuZku+SmPiWmIX48/kuTOdv60Umk49T9Fu+MhazcHmK6IrPJ3Uas6gzJTHZH7Ls4EL5O10XAks4LPpn9yP2oNHn4uYWXrO3z0XQyMixmYzUqd/vkC8URagtNprUNB0alKpImxsGDI8426m01E0qJW0aCMy9cSt6E+XC5OVyck3TtPQqJDpUWSG+pbcPutqhpxsGBoTmsGQhNDWJr25qEFxkNie4xURcfHVXp5S7skLK0Vgn1yktlWMqK63+LATtzXKtYFB8Wk21lKu8TMYDzU3SHyQScr1QEJYvlmu8+BpcuV7QioePwS3bBW+aTsKnf1/6ze+Pwp9vg0uXQnkC/uDDsLAFHnxa8HsNCcHh3boRqoLw5hmor4IbL4XF7bBmKcTD0m9s3ywoxK1r4a6PwJVr4RM3w23vgc5muLQMNi2GVW3wsdukLzl0GmIVsKoFtlwuSD43bcMWGytbKFNTzhhmvnYMBAXvWlYGFRVCJEnE5VNTLYSpZBrefzNELfb2nR+CFV3Q0QTXb4O4CscH4NIlEASa6wT/GgpAdbnYVEUJLGkRmsYuBbQ0dNdCczWMTUE6AkyiUIvSGoRbroLaMhjToaMe2iKYIciVr0NbXSZ+bkMz3HmN1FXUD1esBn8Q+rPwvisEmZdKw+pG1JkBlPETBBYH8C1Lody6AOW6dlhTBlfXwtrIxYOqe7vyr//8z9z23vfyla98lQ2bN/OpP/gD/uiuu87v5JpqC0ETFeeiqfBGH9yuwOi4GFwgIAYI0jmpKtCWj3Xp7YelS6ynOa6nQTbiCcTgZ5/zuxyejXzy+fIXSyjmUO1OY3QsPz1wmNOqKh0wQHubM7C2JRF3nrBr1v7yMicP7roBcUg+y2F2dcIDDwuT1mfIPtux2neZ9g3FhcYKvlNiP53PZCGmSIetKOI8K8qlE2tsgMFh0YXBERkkpDOiN/0T4jxGx4X9W1MhdTeeheoKaY+WRmuAUC91OTRiLchRI3oyFRdHNj1OxbFdUFEL0ThMHRfUj38KJaJDU60MeGy9qa6y9HJUUHqRMNTsA3qhqhvqQ9CkgFEleQmHIDIMJw3geSCKf2YQAqVQaUJtWp6KTI9Aacy6Gx+UDszncz0dwtGNd827NVXa2u4s3Lrq7tATCdFpv4tVe75P2u3B9UwSKoCBIeFLp9OiK4qColuDl/FxGRiC6EE26yAlkzNik7ouutXXL/lJZ8S/RMKiT20t8m0vYDCTdGy1sV7OL03IANkfk5sD91O7hjrxXVVVDi7PxrX5XIuF2D7D7R/sgdaEhfGcnIIaSx9tRObUlPNwKJPJtzd33ZfEZNBvY/ymp2UQZC/i4fM5OLsL9bbDPdBzozxtf5nLObhK07SwnLq0la1PCQtpFQ5deH1NJoEyqx9JzX3INl89TE3NvUHI6Va9h/MfyuRcC764cZNhF1LVxpiapvM2I5ezeOTM/1aDed7Murdt/R8bh+pqR7/q65wbuEjEwsaVSBuUWIi6WFRuYjTrIZDNv7alsUH0UNPEhuz6UFXhdNv9ZWe75L80IbpWWiqfM73S/pom6yY0Nkh+ZpLix0tiUgetLZJu2W6x9dO90Bd3bnx1XfpN0xR/sLxL9pWXQmsT9A0AGlSFobZSbC0UkDppaYC6GkhEoKEaujvFzro75ZqjYxY/udTa3wHxKJSEIe4TtF91VT5aM+9mXz97n1jMh87qSEjqQVHE7k1TbGN8XLYrKmBkQm5EGhugbwQ6WgXBGeqVG5+aajAUQQpWJqRfDYegptxC1ilQVy7lGJsEYrJATjAg/dPJAZiIAH0oZi2UK9BUJ/mri0J1CYR1TD1MTqvGX2cCL0FZF7TXS5319cs5/cOg9Qvvu7xUFsupCaH0nYLjBkT3oVQ0QkuHtNGoKvmd9r17pm0Uk61XXMFjjzzERz/6Uf77F77A5iuvYmxsDE888cQTTzzxxBNPPHlHnssA/O2Xv/yuLsTnPvMZHnviCdrbW1m2fAV/9/d/77WsJ5544oknnnjiiSfv0OD5b//2XV+QlcuX8x/f+Abf/s63+epXvsKChQvp6+/3WtgTTzzxxBNPPPHEkws3eH7hxRf59J/8yX+aAm3ZtImdu3bxgdtuY9myZfzl5z/vLa7iiSeeeOKJJ5544smFGTx/9MMf5guf//x/qkI11Nfz11/6Evv27uUb3/wmjY1NHDh4EGMWW2NYHywMDkCuOKKuEMmkKC66V0FAxOwxhccX2XZPwlfVuf+7fxd+KHL9Yr+L5bFwe74y2oQP1QqcyqsTm6qhnD2twm3lHGUqLN/Zyvnrfgrbcj5xI8BM00IhmTCdke+cRUixMUdmxvltuIK0bPSSbhRBI+mCF3LvN6xz7I/pQjLlpWM4GCd02bavaehzz5lF2IEgeYz867j/N114w/nqr7A9irXPfG12Lt1RzlO/zqX7eXqtOPamus+3vvUiyDa3fRbat3t/IcrwbHkorAvVtqv5/iv2UR07dR9n77MLVRRDZ85vU4X+qFhbFKLr4OxoObffy8uvq35nfU7BPretnkv/iu6jOFqvGCWjmC8+m6+Es+97O//POfYs+qacxR/OZwO/Sj5V1ZWPea57tjTPWa+mY4uz+9Wz9JvK/HZ5Pu1WmKfC/9QCm3JjIAt9hY2Zs1GOk2Y+/m6OnbnSs3crGSdN08z397rLf7vHJm6/7u4HkjkniPOsPpS3N4Y4mz0Xa083ytEOTjVdSMC8MRb51FTTFPvPC0a10zedOprFyymQc/WNWd2dmHRhpgv/adeljSJUAdV0xoI5q+/NGTImdPef7vMNA9/l69bh871roRtnlfLycvpOneLvv/pVrty6lRtuuJGbb74JOjslwtbvg9YG+b7jcon8BYm2TaVgzyEH7/b8a7BqBRw+Sv+JNmpuLIH+QXh9l+zfc0Daa/1a2X/8DFy6Ena9CSVRWNgDh48KXufSS+CXr8B7tko+7v0p3HydRPceOAjr1gpWanxCInPf2iuImAOHJC/dnTA4KASIkhjEIlBbCzvfEEydrciptCDx4iXS4Lv2wLrVElXbPyjnZiz0XmMDPPyE5Ol0L+zZDyuWwAOPw+/eLpGmigIHD8GZAYhEMZIlqFs6BYW1ZhU8+jO59sqlcPg4xEIQicLREWgsIWnWEg6PQ2oGghEI+UTJJ1OC5IqEBW0TDkm9KxYFY2ISKsvhVK9QKIZHBLN18LAghl5/U/4fnYaWWszjoyiVARgYhrIKGEuBOS11HQ5Cfa0MhnVdsEOLe/JpKSDHplISEawocM0VEmV/3Vb5/oNtEjVcUS5Yoys3QVUFfPIGabOKcqnTeAzKyoWKMToKobBgEG06w+SURBJXV0gZfH4r2luVKO94TLBKtbXScRiGIMiwKAuRsETJV5cDi6ChQvTDRq+VxCQqvXIIpmbgxb2YybUoPA6ZeljQIoio8lKoKIVXD5GKfYxQ6kdQW+0Mpg4fEepDOi111b1ASCMVZfDKXqiJw8nT0va7hqC7RLBv00lB+T31InTWO0jHI6fhuivg5dehp1Pa6uQwLGsTwsnQiFAhOtvg4Z1w9RJ46VUp76oVsO+Ag4FLpeDlnbB+jURCJ5MQT8CjT8H2q+Enj8L1V8Oho5ivT6A0G7BlHRw5DsuWij4lU4JUW7oov2POZgVz1NUJ9z4I61djjpgoE6egp0tsq69fKDg2RaCjVb4Hh6T+ykph11uwfCnsOyRt8sobwkEti8Geg7Bqidj7y2fk+P40vPYCZl0jyvQ4VCRg13G4ZAp8h4QlPjgEK2fgzbfAFwa/CokSQUVFQkLTKLOQXyBtZbdnqUXSiEVF5x99BtqbIOQX37V+NZzuEzvJAuUlou+Dg0IlsAkH1ZVOXS3osPZVOzecmYxc80yfbD/4pOhqPAY/e0HSPHla6AKv7oatGzB2JVGXjAgx4Knn4Ppr4JWd6DUL0VKn4fQIbFkNv3gJLl0h5+eiUBsSv7l2DezcCZFqSI+TqahDO/wW2tVb4ee/gMsuEft+6nnYvlWoNbouNvuTRwRD9urrmLEESnsTPPAk3HQ1vP6G2MCGtXDvQ3D9VaIfL7wsfvOXrwhtoboSXtspaMupKXh1F2xeD3v3CVEiHoc398DCbrGn13bBhsthbAxOnJZjfv48RMOSz8kJuWlXNSH4HDsptJOeBeLPdu+V+o3FHFJGTbXorWkKWWJqSvqDslKpo/Jy0bfjJ0SPd74FigGXrhas4OkzQkz45Uvi7xZ2w4uviJ4bmqC++gclrY3rMN86iF5dh68iAk88DSuWio+KRqV9BgZh1XJ4fpcgYWtrxCa2XQnpFDx9CLbFpD6e2wGNjaD74ZoqOHwKFE0oD4/vhptLoa4CfvYcrFwmhIpQSGgtui72HAqKfXS0k3l4gEDJIViyCCYmMWd8KE0JeOARqLcoKj6/6OCTz0odlybg6Anpv7raxbbDIWmHA0nBwJaUwtVb5FrxBPxu2PG51RZBIxYFJQAzKeg3QDkK69aIfZJCn3ofmtaPcbIcdbUGa1dBc4PU+aZV0NUhOE3DkLYzTNGriUmLhKQ7hI8bLoH9R+Fov2Btx8YEG3nJCnhrP6xZCceOW7jJuBA7hkck3VwOjg4KxafZwgOm085xIFSc0oTYyxt7YdlC0bV9hwQ1B9LP6RZWcUG7lD8UEh2srRH/XVkOnXXivw9Pw+gpWB2CIzoTSxcQ33MAWjXYewSaakAdAz3L/1/euQdXUV9x/PPb3fvIhbxJQkICBEpCAHmYBAiPEqQBKZ0RKaNY7GChrVVLp0TbmbYjVasiKEhRgyKUFmGqmKQDifKyKEYLgnlIikiQ8goESIXwCAESs/3j7Gb33txk/NdpZjL33p3d3+P8zvme89tz5vsz6zVUbgSk9YJzl4W27sQ1GNFf4p6oSPGbNQ0QZaB6J+DnJiQnCr1nZn+Zm80qYuhQXgOtCeJb+6dBfrvD/tLDD8MzYFC6+NSYGPEThgHNzRilxcWUFhf/X7xm3/DX9Vxruc60tWvlQkuLAJXHAyOyJFhoaxMFaWqCimow28QpbCqHhfOh+iCnn46T4PnUWag/C9mjYOsuuHYZxo0R4Nn9iQTPZTthYD8YkiVOrqJSwL2kDL43Sfp+oBAaC0QpS96B8XlQWQOnTsN9P4Sd7wMGNJ6HM+fFuOqOCWin95P2ExIEtH5wpygFSHsnTglFT+stWLIaNiyXgG3vAXnu8lUx+LRUeGqlOIEvT8Dqv8HiRbDwjzDvHit4RhzIR1WAhlk/BCb0h/IdAsA/WghtBpSshLXFMDBZQOXFOngsk2Yzn4ikw3C2HuJSIDlSFLHmPPS8CbdlCkAl9BKaIl0X4/7PcRg5HLbtEQ7H6n9DznD4UxH8ZgHM+TXcNx1qjsPcqfBaDUyNg3U7YFYBlDaA5yD0SxFDuvdOCVbbTThQKUGaTS9ovzHwesRQYqJlHLnZYpw5owRYskdYwXOs0ChF9pT7c28XIOubKs44JVkodpqvi055vTI/TZOg71KT9BUTJSCmlICLTdcVHSUGn9JbntF1h/vVptrSdegVC6RBkl+C7bRU0W+/X9qPipKANBCF2ZKFYjsQD31ToF9f6dvvh+3VNMbmkta6Q6iILl4Sp33wkIz1v1/JtZ/OkwD2tqFQVArTsqBoI6x7HlZ9CgvT4e2tUFEF+8rgidehcKZsynrFwZpSmDIONpXAIz+BfZWw7gAsnyuBR8UBuGuqOI5fvQtVA+GNzSLzUSMkkI6NhqQkaGiEOQ/DB5tF3ucvyJwWPQNTJsL8Qji2F7a9R/vvNfT7j8KkPAk6CiYLbdtgi5orY5B82lR1ra3wWa0Ezw/9Aba8BrVAdRkUzpd1P1VvBc9RdupLPs+clXZ0HXbtgR/fCx9/Kna3rEgCvvED4OV34dn5cPQ4vNoCQ2Ogug2eWYo5ay6qqU64Y5/bD4U5gjv+ANTWw4O3YEsZ9OgNly7CgGSoOgKzCyAlDUYMc+YUG+voS6QreFYKlr8Ok3KF7mppMXy4GnZ+IDp91RTe8oljZU6DMx2Ks5hosWHTFCyyA2r77c4N64Clk6dFH59cBZNGwrBM+O2LMD1fsCwhXrBmwwu0rfTj/cUVyMuBpUUwNhs2bKYtewF64zb4836oWQm/WwLrV8D2XXClD9yRCG8Uy5xLSiB1PDTUcjOvAN+6V9CnTIZXN4ojj42GpX+B6ZOFY7j5usxl3uNw13R4sxgyciAlERY8B9MnwT/KhQM9LxfmPAEXxkkQs2SV4Oaat+CRuTKXnbudIGfJy5A/AfYdgMkTJTCq2Csb0ObrsOltmDhOAsyPP4OxObBmI8zIl83gydMWpWGbbIo/PwKffAGLUgVb3q+QjXR0pAR3rbccfm+fT7i/v7oo9GgD0yVozR4lFKy1hwQvXimBkckwJleC5PozUHAHlJbLumdlQuk7MHMGfO2HPj3k5cX6NyFvNPyritbcfIxYPzz9EqxYLC+lkhKlvcNHRe6L34KV90sgVf4efL9A9GJZFYy2NhNL18HsGXD8GkybAB8dEX3KHg5P/RNGD5Xg+cmX4Ofz4MvjgucnT0uAd8WiET14CAakc2tRPd5pW2Tu9Wcwj/lRSQF4YDEs+6Xg+s1W0dvHi+Ch2TAkAz7cLzzAI4eJbCMiZMOx+yrohvjN+DjR75goGB6QtbfPQoiOlg0NXjkwpbINDh+DR39m0QI203J5DD21Mm5+EUtEgRJfnZgg9jU0Q3AlLTWYf7lHQMZh41RigviKIRnQcAH21EJcFMy6CGv/Lr5581bIGSmbtsie8J2BojNHj8natrRAeR1MiBR/03RZdDcQEKo9Qxe/F+EX3Xid2kD8AAADI0lEQVR+DTz7qGygSrbBPTOsDXq8jNXjceYSCAh9bnu75avihN/a74OKq8A5GJwMu29Rd3s/cnZVwsPxsGM/PDgT1HlobcX8/GvUd32QHAOJjZCWAJcaoO84sQub5nPbOUg3UAkRqPYmSBwMWZYc0/qILvl84o/r2qAtXt5Ap/aRzd6NG2Jvhi5YnTlIxm+3b5rQfB1jzPgJzL777m8UfL6wYgWPFRZ+qwPoqVMLQvPywW8c3WkW3VWaEDA6UhRKD0nB2t8NFzesRw9OT9rpKI/hKH24FImuBaePUHKt3WpL15z0j67Jf2hK1J0i0jQr66HAazgpwI4UqWsMhseZS4QnOBVsutJaHl3m6tFcc7FTarrM0+eRe5QCJQcvKFM5MjVcB6oorYu0dGiKyH7GbtfqC0uuUT4ZS4RXxogHvDpgZQ98ftCMzjILSsWFpOdMOuuH6f5N5xOvOu7r5j/oWbPzKWum2fVJeKFlJWY3/dL5OdWV7ttqE/a6ctbLvd4KkbFb9wLWd3dGy2utu261EemTzmw70XVnXTVNdLUjfao7h1i4Sy600DIDgscVEQhOVRq63OAzOqeKu0tP2u17LQzQTHGeKiQNGpaDVwUH47b9+r1w02P99nUuZ7DsUmka+C35Yn3q1kEmynBwx2PZkWFYBxhpoHd71F/wzwjLXg3LXuw10XUwTMtm3CUq37BpFZJOD/ilj45yAHd6WUpRlBufdJcuaTb/tteFsZYN+zRHR2x5a5ZsNCUbSBBc6uhXD1MWYT3v1V1643PWroNT2MWha7etwuGKu6TAVQLhLhmxdUNZ+Amio6ElOkpz2rFtsMNXhMpRdV86Z0+8Y5xGsC/TtM5+qmPdXGtryVtpmtOsbrj6ssbXcYKvHr6UwNCClUbXg0/91ZSDF6Gno7p0J2yJIG7frMBrr6u/s4/WdPEnmsvXdmXT0Jnr2o3p4f4MdxmIy8EowmO22UUpYVhf4T79U3PWAmeduiwNxXpGhZSydeCbCpa3vXl2Hy5kCyBceVjQNTdIBPerugEWFRa6VIhMXCftmmFkFuq7QxsM66PDy/1/DN8E7BHYQvIAAAAASUVORK5CYII=)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FzrBe84lJuv1" + }, + "source": [ + "From the spectrogram, we can notice that most of the energy is concentrated in the **lower part of the spectrum**. It is convenient to allocate more filters in the lower part of the spectrum and fewer filters for high-frequency components. This is what the mel filter banks are doing.\n", + "\n", + "Each filter is **triangular** and has a response of 1 at the center frequency. The response decreases linearly towards 0 till it reaches the center frequencies of the two adjacent filters (see figure). There is thus some **overlap** between adjacent filters.\n", + "\n", + "The filters are designed to be equally-spaced in the mel-frequency domain. It is possible to pass from the linear frequency domain to the mel one (and vice-versa) with the following non-linear transformations:\n", + "\n", + "$ m=2595log10(1+f/700)$\n", + "\n", + "$f=700(10m/2595−1)$,\n", + "\n", + "where $m$ is the mel frequency component and $f$ the standard frequency one (in Hz). The mel-frequency domain is compressed with a logarithm. The result is that filters equally spaced in the mel-domain will not be equally spaced in the target linear domain. We indeed have more filters in the lower part of the spectrum and less in the higher as desired.\n", + "\n", + "Let's now compute FBANKs using SpeechBrain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0xW3t0ywPsNa" + }, + "outputs": [], + "source": [ + "from speechbrain.processing.features import spectral_magnitude\n", + "from speechbrain.processing.features import Filterbank\n", + "\n", + "compute_fbanks = Filterbank(n_mels=40)\n", + "\n", + "STFT = compute_STFT(signal)\n", + "mag = spectral_magnitude(STFT)\n", + "fbanks = compute_fbanks(mag)\n", + "\n", + "print(STFT.shape)\n", + "print(mag.shape)\n", + "print(fbanks.shape)\n", + "\n", + "plt.imshow(fbanks.squeeze(0).t(), cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VRjJ9HaSRcuc" + }, + "source": [ + "Normally, 40 or 80 FBANKs are computed. As you can observe from the shapes, the dimensionality of the time axis is the same. The dimensionality of the frequency axis, instead, has been reduced. You can see FBANKs just as a simple way to **compress** the rich information embedded in the spectrogram.\n", + "\n", + "The SpeechBrain implementation of the filterbanks is designed to support different shapes of the filters (triangular, rectangular, gaussian). Moreover, when freeze=False, the filters are not frozen and can be tuned during training.\n", + "\n", + "To make it easier the computation of FBANKs, we created a lobe that performs all the needed steps in a single function:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "S0cqrIjpSKOQ" + }, + "source": [ + "The SpeechBrain implementation of the filterbanks is designed to support different shapes for the filters (triangular, rectangular, gaussian). Morever, when `freeze=False`, the filters are not frozen and can be tuned during training." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Jb0yE3ZyUgNN" + }, + "source": [ + "To make it easier the computation of FBANKs, we created a lobe that performs all the needed steps in a single functions:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oSq0Sfc_UqDE" + }, + "outputs": [], + "source": [ + "from speechbrain.lobes.features import Fbank\n", + "fbank_maker = Fbank()\n", + "fbanks = fbank_maker(signal)\n", + "\n", + "plt.imshow(fbanks.squeeze(0).t(), cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()\n", + "\n", + "# Zoom of first 80 steps\n", + "plt.imshow(fbanks.squeeze(0).t()[:,0:80], cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QIs_l3UfTES6" + }, + "source": [ + "## 2. Mel-Frequency Cepstral Coefficients (MFCCs)\n", + "MFCCs are computed by applying a Discrete Cosine Transform (DCT) on the top of the FBANKs. The DCT is a transformation that decorrelates the features and can be used to further compress them.\n", + "\n", + "To make the computation of MFCCs easier, we created a lobe for that:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2TgofojnW_mo" + }, + "outputs": [], + "source": [ + "from speechbrain.lobes.features import MFCC\n", + "mfcc_maker = MFCC(n_mfcc=13, deltas=False, context=False)\n", + "mfccs = mfcc_maker(signal)\n", + "\n", + "plt.imshow(mfccs.squeeze(0).t(), cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()\n", + "\n", + "#Zoom of the first 25 steps\n", + "plt.imshow(mfccs.squeeze(0).t()[:,0:25], cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1LAYg7YpXAzZ" + }, + "source": [ + "In the past, working on decorrelated features was essential. Past machine learning techniques such as Gaussian Mixture Model (GMMS) were not suitable to model correlated data. Deep Neural networks, instead, can be work very well also with **correlated data** and FBANKs are nowadays the preferred choice.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Eeqnb_c2fFLs" + }, + "source": [ + "## 3. Context Information\n", + "Proper management of local context is essential to most speech processing tasks. The dominant solution in the past was about setting a “hand-crafted” context with the following approaches:\n", + "\n", + "* Derivatives \n", + "* Context windows\n", + "\n", + "### 3.1 Derivatives\n", + "The idea behind derivatives is to introduce a local context by simply computing the **difference** with adjacent features. The derivatives are often computed with MFCCS coefficients:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mqZKFjQ0hydd" + }, + "outputs": [], + "source": [ + "from speechbrain.lobes.features import MFCC\n", + "mfcc_maker = MFCC(n_mfcc=13, deltas=True, context=False)\n", + "mfccs_with_deltas = mfcc_maker(signal)\n", + "\n", + "print(mfccs.shape)\n", + "print(mfccs_with_deltas.shape)\n", + "\n", + "plt.imshow(mfccs_with_deltas.squeeze(0).t(), cmap='hot', interpolation='nearest', origin='lower')\n", + "plt.xlabel('Time')\n", + "plt.ylabel('Frequency')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qs5h8Z4iiEeZ" + }, + "source": [ + "The first and second-order derivatives are called delta and delta-delta coefficients and are concatenated with the static coefficients. In the examples, the dimensionality is thus 39 (13 statics, 13 deltas, 13 delta-deltas)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "scbtvj-BjZUh" + }, + "source": [ + "### 3.2 Context Windows\n", + "Context windows adds a local context by simply **concatenating** multiple consecutive features. The result is a bigger feature vector that is better \"aware\" of the local information.\n", + "\n", + "Let's see an example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FIwV2I6ckQb1" + }, + "outputs": [], + "source": [ + "from speechbrain.lobes.features import MFCC\n", + "mfcc_maker = MFCC(n_mfcc=13,\n", + " deltas=True,\n", + " context=True,\n", + " left_frames=5,\n", + " right_frames=5)\n", + "mfccs_with_context = mfcc_maker(signal)\n", + "\n", + "print(mfccs.shape)\n", + "print(mfccs_with_deltas.shape)\n", + "print(mfccs_with_context.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iNfhmNtmk1Pr" + }, + "source": [ + "In this case, we concatenate the current frame with 5 past and 5 future frames. The total dimesionality is thus $39 * (5+5+1)= 429$" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mPoAwpV8lpmL" + }, + "source": [ + "Instead of using the aforementioned solutions, the current trend is to use static features and progressively add a **learnable context** through the **receptive field** on a **Convolutional Neural Network** (CNN). CNNs are often used in the early layers of the neural speech processing system to derive robust and context-aware representations." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TGKN8Vspm3Zn" + }, + "source": [ + "## 4. Other Features\n", + "A recent trend is to feed the neural network with the **raw data**. It is getting quite common to feed neural networks with the **spectrogram **directly or even with the **STFT**. It is also possible to feed the neural network with the **raw time-domain samples** directly. This is made easier with properly designed networks such as SincNEt. SincNEt uses a parametrized convolutional layers called SincConv that learns from the raw samples. SincNet is described in [this tutorial](add link)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w2_iy_smtCsS" + }, + "source": [ + "## References\n", + "[1] P. Mermelstein (1976), \"Distance measures for speech recognition, psychological and instrumental,\" in Pattern Recognition and Artificial Intelligence. [pdf (Web Archive)](https://web.archive.org/web/20200714014004/http://www.haskins.yale.edu/sr/SR047/SR047_07.pdf)\n", + "\n", + "[2] X. Huang, A. Acero (Author), H.-W. Hon, \"Spoken Language Processing: A Guide to Theory, Algorithm and System Development Paperback – 2001\n", + "\n", + "[3] \n", + "\n", + "[4] M. Ravanelli, M. Omologo, \"Automatic context window composition for distant speech recognition\", Speech Communication, 2018 [ArXiv](https://arxiv.org/abs/1805.10498)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1iH1Tvgrsi7nD_uxh5jEj7DAXaL4gdjze", + "timestamp": 1612452449642 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/preprocessing/voice-analysis.ipynb b/docs/tutorials/preprocessing/voice-analysis.ipynb new file mode 100644 index 0000000000..648f1d8ae2 --- /dev/null +++ b/docs/tutorials/preprocessing/voice-analysis.ipynb @@ -0,0 +1,1269 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6d3c5d00", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/preprocessing/voice-analysis.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/preprocessing/voice-analysis.ipynb)" + ] + }, + { + "cell_type": "markdown", + "id": "781dfbec-4093-4ca2-af57-41471236ea5b", + "metadata": {}, + "source": [ + "# Analyzing Vocal Features for Pathology\n", + "\n", + "This notebook goes through a simple voice analysis of a few speech samples. If you are new to speech feature extraction, we recommend reading through [Aalto Speech Processing Ch. 3 Basic Representations](https://speechprocessingbook.aalto.fi/Representations/Representations.html) before going through the notebook to understand the background and theory behind the signal processing techniques used here. Throughout the tutorial, there are several mentions of the PRAAT software, which is an early open-source software used to compute some of these measures, and against which we compare our results from time to time. You can find out more here:\n", + "\n", + "* [https://www.fon.hum.uva.nl/praat/](https://www.fon.hum.uva.nl/praat/)\n", + "\n", + "As a sample vocalization for demonstration purposes, we first download a public sample from a person with Parkinson's disease and cut to just the sustained phonation." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "eefb490d-6199-49a3-b974-1a2b6c323811", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget -O BL01_ENSS.wav \"https://data.mendeley.com/public-files/datasets/9dz247gnyb/files/5183cb8f-77ff-418d-a4cc-e2eca48cb6a5/file_downloaded\"\n", + "!ffmpeg -ss 18 -t 3.5 -y -i BL01_ENSS.wav phonation.wav" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "e5919e03-40fb-457e-9d03-8b196f2fcd99", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "import torch\n", + "import torchaudio\n", + "import matplotlib.pyplot as plt\n", + "from speechbrain.processing.vocal_features import *" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9e595d2d-79c0-41f6-8a25-89c3f4c55da5", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABI8AAADZCAYAAACpbRD7AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAA84pJREFUeJzsfXec3MT5/iPt7lXfuZ97wTaYYsAYgzG9mB56CAFCJ+FHvgQIhFASeqihl0DoJKGFGqrBBhtsbAy4gY177/bZvt626PeH9I5Go9GsdLfnO5t5Ph97b1ejmVej0WjeZ95iWJZlQUNDQ0NDQ0NDQ0NDQ0NDQ0NDQwKzrQXQ0NDQ0NDQ0NDQ0NDQ0NDQ0Gi/0OSRhoaGhoaGhoaGhoaGhoaGhkYgNHmkoaGhoaGhoaGhoaGhoaGhoREITR5paGhoaGhoaGhoaGhoaGhoaARCk0caGhoaGhoaGhoaGhoaGhoaGoHQ5JGGhoaGhoaGhoaGhoaGhoaGRiA0eaShoaGhoaGhoaGhoaGhoaGhEQhNHmloaGhoaGhoaGhoaGhoaGhoBCLe1gK0d2QyGaxduxYlJSUwDKOtxdHQ0NDQ0NDQ0NDQ0NDQ0NDICSzLQnV1NXr37g3TDLYv0uRRFqxduxb9+vVrazE0NDQ0NDQ0NDQ0NDQ0NDQ0WgWrVq1C3759A49r8igLSkpKANgdWVpa2sbSaGhoaGhoaGhoaGhoaGhoaOQGVVVV6NevH+M+gqDJoywgV7XS0lJNHmloaGhoaGhoaGhoaGhoaOxwyBamRwfM1tDQ0NDQ0NDQ0NDQ0NDQ0NAIhCaPNDQ0NDQ0NDQ0NDQ0NDQ0NDQCockjDQ0NDQ0NDQ0NDQ0NDQ0NDY1AaPJIQ0NDYwfF3R/Pa2sRNDQ0NDQ0NDQ0NDR2AGjySENDQyMiahpTSGesthYjK575amlbi6ChoaGhoaGhoaGhsQNAk0caGhoaEXHkAxPx/uw1bS2GhoaGhoaGhoaGhobGNoEmjzQ0NDQiorymEfVNmbYWQ0NDQ0NDQ0NDQ0NDY5tAk0caGho/G2ypbcpJPYZhAADmrauCZbV/9zUNDQ0NDQ0NDQ0NDY2WQJNHGhoaOzwufPFb/G/WGoy4cxzWVtTnrN7jH52E2qZ0zuprDTQk0/j7p/PbWgwNDQ0NDQ0NDQ0Nje0YmjzS0NDY4fHD6kqscUijXAW6tmDX094tj6rqk3hywpK2FkNDQ0NDQ0NDQ0NDYzuGJo80NDR2eBjtvD4NDQ0NDQ0NDQ0NDY32jO2OPHryyScxcOBAFBQUYNSoUfj2229Dnff666/DMAyceuqprSughoaGRnuCZro0NDQ0NDQ0NDQ0NFqI7Yo8euONN3DNNdfg1ltvxYwZM7D33nvj2GOPxcaNG5XnLV++HH/6059wyCGHbCNJNTQ02htay7ss19WmMxbOfuabwOPfLN2c4xY1NDQ0NDQ0NDQ0NDTU2K7Io4ceegi//e1vcdFFF2H33XfH008/jaKiIrzwwguB56TTaZx77rm4/fbbMWjQoG0orYaGRnuB0QrWN61FRqUyGUxVEES/VhBLhDOfnpJLkTQ0NDQ0NDQ0NDQ0fubYbsijpqYmTJ8+HWPGjGG/maaJMWPGYOrUqYHn3XHHHSgrK8Mll1wSqp3GxkZUVVV5/mloaGgAQH1TGrWNqVYho3KJ75ZvbWsRNDQ0NDQ0NDQ0NDR2IMTbWoCwKC8vRzqdRo8ePTy/9+jRA/Pny9NQT548Gc8//zxmzZoVup177rkHt99+e0tE1dDQaMdoicXQHR/OxYaqxtwJI4HhBCmqaUxh5eY67N67NCf1aWhoaGhoaGhoaGhoNBfbjeVRVFRXV+O8887Ds88+i27duoU+78Ybb0RlZSX7t2rVqlaUUkNDY9sgmEBZubkOd330U6ha6hzLo22Bj39chxMem8S+VzUkkck0n/n6cuEmXPfm7FyIpqGhoaGhoaGhoaHxM8N2Qx5169YNsVgMGzZs8Py+YcMG9OzZ01d+yZIlWL58OU466STE43HE43H861//wvvvv494PI4lS5ZI28nPz0dpaannn4aGxo6LhRuq8eykZaHK8hQU0TiWBUxeVJ4zeYJc4va9cxwmLFAnB1Bh4fpqjJ2zvtnntzYWb6zBNW/MamsxNLYBNtc0omYbkbAaGhoaGhoaGhq5wXZDHuXl5WHffffF559/zn7LZDL4/PPPMXr0aF/5XXfdFT/++CNmzZrF/p188sk44ogjMGvWLPTr129biq+hodHGsAL81ZpjyyO6gv3m+WnNqEUOElPkkJJpC02pTIR6WimiN4eollDVDUlU1iWlx5aX1+KdmWtyIZZGO8eFL36Hhz5b2NZi5ByV9clt8txpaGhoaGhoaLQFthvyCACuueYaPPvss3j55Zcxb948XH755aitrcVFF10EADj//PNx4403AgAKCgowbNgwz79OnTqhpKQEw4YNQ15eXlteioaGhoNTnpgceOzJCYtRWS8nG6JAtOYZ99MGecEQ2F5UQ0ZCtVLIo/nrq7DrLWNhWVZohfm293/CFa/NkB5r70HINXKHZDqDzA5Isux9+2eYtmxLW4uhoaGhoaGhodEq2K7Io7POOgsPPPAAbrnlFgwfPhyzZs3C2LFjWRDtlStXYt26dW0spYaGRhTMXl0ZeOzvny7Aqi11za571ZY6zF3rrd+Chd/+6/tm1WfIGI5W1oEbkmk0JNM5q6+irgk1jSnUNaVaZCVR05BCUyqDp75cgv97VU4IiUhlMpGspzSC8cj4hXh+cjh3y22BWasqQssjfY52EOjxraGhoaGhobGjYrvJtka44oorcMUVV0iPTZw4UXnuSy+9lHuBNDQ02i2em7SUpa0P4kmaTaDkwHJiY1UDDMNA95J8t1qHjSIF+9r/zkZRXkxo2sqqgAdJd+nL32O3XqX49zcr8PblB2LfAZ2bJTs1v66iAcvKwxF8Oy5lsO3x7bIt6FKcB2CnthYFAPD14nI8N2kpLjm4fcijoaGhoaGhoaGRW2xXlkcaGho/Twy+6eNmn2shmLTY45axua0w26mW5Qmufd1bP+Dm9+Yoz6lqSKK2KXpw4SBSrCmdQcqJVdSYarlFU1QjkiBSawc2RtHYjrGhqgGzV1WELr/jOeNpaGhoaGhoaNjQ5JGGhka7hmUB6WamqM9mnVPbFI08UdWWSmeyklFN6YwnuPa2UDS3BSejgwTveDjtH1+3av25HDPTlm5ukXurCm98twq/+3fz3Fw1NDQ0NDQ0NHYkaPJIQ0OjWdhS2+T7LZOxgyf/tLYKqXT7iP1BSqrFvsvLba5pxIaqhsA6qhq8gbtZfc5factCbVMa6YwVKci3yG+F0alDlQnZ/g+rK0KWlGN7MhhaV1mPrZJxuz2itS21Zq6siFTeMMKPOQPBZdMZC4s2VEdq++o3ZuHN6asjnRMFmhv1YnNNI5LtZH7X0NDQ0NDQ2HbQ5JGGhkazMOLOcb7fLv3X93h+8jKc8NgkzFsXTQGMilQ6E9rtKoyifceHP+Gq12dKj01eXI69bvsMgE0Wqar7+Md1OOjeL+RyRKBa+JLNUV6DYzx5v5/8RHQLk09+XIfm0EaGgmGI0jfNxUUvfodHxrevFPFbapvQkExj4A0fRT63PXEaubp/P62twtEPfxX9xFZieIKuasXm2gAx2tNdaR0c/veJeGdGy8m6F79ehv98syIHEmloaGhoaGhsC2jySENDIxQ2Vjcgk7Ew8m8uaWRZFlZvrUNDMo3NNY1YXl4baL3TXFiCirxgfTV2u3ksHvtiMS59ueXuJNkskgCgMensssuSrQnnNaYyqGmMHqMoDHgS7JiHvwx5TjilPgp5cfkr/uxq/5q6HD8qMucB7cNKqb2p9iPuHNestPUGIpj6tAHKaxqzEimTF5X7MgnS8z5nTSXGzlnfavJFgewqDvv7RDSlMpgwf+M2l6etUduUQjLd/MFnWRYaU2lMXLAJkxZtyqFkGhoaGhoaGq0JTR5paGiEwv53fY5vlm5GeY3r9rOsvBYH3zcBz09ehtOfmuJhB0TSJ1eoakiiPplGRV0TNlU3KsvyvAnpsc2RSsa/ZKtv+oqtmCUE2qV6ttQ24evF5aGtFOg8vs2FG2qU5wT1f67cnageIqceGrcQkxa3b0WwvaaI3xZWV9saI/82HjMDAk3TbfjN89Mwd22VtMz7s9fi/rHzQ7WlcoNrKVRDZtXWOlz00nee33g5bnj7h9YRqo3R0udo/LyNOOz+iTpIvoaGhoaGxnYGTR5paGiERkoIXE3fkukMs87JtddGS+uzrJYr5yIJFUbpeezzRXh64hKfLADwxfyNOPe5ab66g9ps77D7ePuIDbM9yLi9QuzbjCLQvVu2+TdkTUU95qypbHVSsLlj5vXvVuVWkHYEWZeETWxQ3ZDE+hxbqGpoaGhoaGi0PjR5pKGhocS0pZsxd63aHYnQ0jg9MvDVfPjD2kjnRiGNrBDlZceDrIdag6OIoiMzK6V2xpYEWqQx6ypLSTq0BNsRH5cVYcZCZV3SF+i9tSCTJywxurskS2GYcfvylOW48jV5nLJcY8ricl8g73b2aG0zyG5rKp3B4Js+Vp6XzlgYeMNHvvu/saqh3SRY0NDQ0NDQ0AiGJo80NDSkmL2qAo+MX4h7x87HS18vB+AnRHgdgLlWtaJMV7zqVRSjWBwQadEcMoUnjdwYSd4sbtLzmsFWkHgylzv3e0vijbh/R7XIWldZz53bDBh2+89NWoqFG6oxbelmRhRRffd+Mh/Xt6K7T2u5U7YGPpi9Fptrgl0zs13Lla/PxG3vz821WIFozri0LKCuyY57dOSDE71jPcQgy+XdfPrLJaiok2fju/X9uXhjB7YkainC8L2y2F6WBex/9+cYP29DK0jVdtjvrvFtLYKGhoaGhkbOockjDQ0NKaav2Iqnv1zSbGsNC8D6yrZ3TbBgKUmcUPoui+8Toj1FhduSuPhDMywyNlY34LR/uNnXXp22EtOWbmbfR98jzyJn93H2ziGy6u6P5+G75Vtw1jPfoKbJG1x8bWUD1lTUy05vMdqrK2CQXH94bSamr9ja7HqT6QxSaQvvzlytJKF4nPDopMBjW2qbsgaDn7Z0s89CRwbZs7B0kzyDWRBybel47yfzsbTcK0Nkl7jth5vc5jDYp+H5Dtgu0TtC8Gxy3csWj09DQ0NDQ2N7hCaPNDQ0pJAHibaEMmrF6oB7Ps+lSI4M3k8VDCOaUplNT6yq97sARZGnuWgO6THuJ3snP4pYFXVJzFxZgSmLy/H0l0vwzFdL8MWCjaioawokxdorITM6YOy19D6d9PjkllWwDUH35o9vzMaMlRWhzvlpnTyANQBc9OK3ePCzBcrzb31/btb062Es3sLep1y7ZcqrC6J9JZY0Pxf2yLJwwQvfojGVzl5WVQ3393nPf9symdoBeNe9TMbKefZRDQ0NDQ2NtoQmjzQ0NHyoduKkRNXLWiNwrUo5bE5rzcq25nwuLa+FZVkwYLjua0FqZYCLhr9u71VQfSoFu7XV06lLN+PfU10CYPgd47BAYU3iBsxujkugWFdurm5dK1m9/bgmXPyvKMjFJV/1utzSLJdjJZkOjkfVGhnPsj7f2yC7o6e5kBPOW9NXt64gbQzqhy8XbkJDMnysIp/bczting++T25V2RLMWl2BUXfnfgNFQ0NDQ0OjraDJIw0NDQ+21DZhz9s+87qEBJSVLf3bQxBZy7KwtdaOXbJoY43zW/ZzZLjytZl4ecryZsnRHnQj8bKaK1MqrbY6U9Wb5nbgA8dSQAUPjVsYWsZthcr6JP43a03O67UsC0khcLDYL+mM5SF3v1y4yZHHH0y+pVkGffIpjkX27iKLvWZLE3z+Y58vamGtQhsRhfzTm7Nz2n57waxVFZi6xHZjFbukpeRdWwf4X701966yrRX4X4VkOoPVW+sCjzeldGByDQ0NDY3mQ5NHGhpthOqGZItN/lsDYrplXnnNthT2xiCJtnD+bvkWbKltwqPjW674/bC6EvvcOQ4LHWsZOcnll0+mAM9bV4VVnGLhc/9pgX4Q3jUn/HkDb/jI8/1zIRBtlNsS9dKC6v5q4SaMuvvzZhFXLSUC+PucKzJv7tpKXPX6rNxUBlfx/mTOehz90JfKLGkfzF6Lw/8+EYZhwLKAC1741hOXyld3DpTxTMYltWQkX2VdUEY3eYfn6j4YMKRjLizhuLG6AddJiJ43v9eBsWV48etleGT8Qi8p2Yzh1eIYdBqBmLhgEw6+b0Lg8V3++sk2lEZDQ0NDY0eDJo80NNoI5zw7Df+YsAQ3vvOjJ5jthPkb21CqoFhH4c9v7g70mU9PxbLyGjw83qv4+Xa4WZaz4HZSDgEmc13yX4ta3lWSXVw+llK2q7369ZmYu1bh6iT0tyzbWhj85rlpuOa/s3y/PzNpaeg6mhO/yeDC4AZBJCS3JXa60Zs+XCbJsvKogZpbzn5c9OK3vvTklfVJLN9chz+8PhO3vDdHel59Mo3NtfKMYD45PRn73Cv/RkE2idhc04j/fr8KZzw1RXp8XWU99r7js9D1ifARo8JndUMyq7VElGdlnzs+Y1Zw6yoa8KbPxczCdW/5s/2FdUPdkcmPoG4+/O8TQl23qr/E+94aiOqatmB9dYtiFrWF5WlbWW5paGhoaPw8oMkjDY1tCN6aoLohiYZUGq99uxLrKhtwpZMd66KXvmsr8XywEN5qBxCU1VaRiJfBbu+KV2fg4x/XSeXg5axPeq28gpRWEWFjegTFM3pv1losLw92I8gVJi8uxzszgl2plm2ORpBEQVaLtAASwz7mz7zUWggifY54YGKz6mtMpVHXpM4+FoQJCzYh6bgCimMnlc4gHdCpUftJVs2vn/km1LnrKuux79/GY0tdU6Bbj8edUdLYzJVutrj3Z6/FxcL8lk3ZPfPpqXhucnYCNKiaL+ZvwFMTl+CFycvw/fIt2FqXZBnjaFxOW7oZb3y3Uno+P3bbU4yetoKsm5dvbt78FuXdkgtEdU27/D/T8cLkZS1ud+7aSny5MHeZ5NZV1mPVltZ/p2hoaGhoaIjQ5JGGxjbC0k012Os2YYeeWye/P9sfs6QtwOxIPO5q3gW9yjom12v/bPV9vbjcZzkiXoNhGHjmq+wKaBC5oFIZm3O9c1oYdDmSdZdT9OvF5c1qK1uf2FZYZA2Wra62h2WBuYvylj+WZaGirgkvfb0MF7/0Hb5auAlLN9VI66Bxf//YBbjs39NbVd6gPssFiRRY1rmfvMVYUEB06ougcXLaP1yLpdVb6zDDIZPCylNRlwwkcMNwOVMWb8ab01fhn18twaRF5Y6sXnz20wY2P0itY0LKuqPD804IcKVNZ9x4czxqGlMB85bl+/bLACu3bY4WT1h2Bf/9bhXu+2R+i8Uh3PvJ/MC4Wprg1NDQ0NBoTWjySENjG4FcqSYu2IiHxi20Y5Y4x9qLpfmJj03y/mAJCloWOXkFUrymjVUNuPGdH1smYAgsK68NlWkr6FLenbkaK4WddHE9Lt0xD6FiUp9MWLAx8BxZLbnSB6obU2wc5go8iRRYJoRFWphr/OTHdXjmqyVhRQtsY+hfx2LOmkoM+Ysb/2PhhhoMv2Mc1lU1YOmmGtzw9g94d6Y6KHZdUwpV9UHxfrKD7n+UO0LXUN+UbrUMY1FibJGyakmkEe+paRioqEs6RFRQ2641VhirNL7VTdWNEss251NSSxj3QzuuUvh+FovWN6Xxr6nLQ5+/rbGxugEvfR3BusYxRw1yH/5kzjoccI8/w9iwWz/Fsw5B55L6kuot4PsVW/0H2ghh73xDctvFL1SNWk0daWhoaGi0JjR5pKGxjUCLuukrtuKt71c1O7V5a2Lu2ir5wj7AxSts7Jc9b/0Uq7bW4bVvXdeQ+qY09r9rvLfeMHEzuL9lysef35rNAuaG6V+xyB/fmI3JIax0Zq6qcOSRt8HX2xzyJ8m5AwVdRmMqjUfGL4zkwlDbmHIV6hZoGs2xOgvbXlC5yYvLc2ChZwspujGyLGfCNUxdslkRc8doJfomO7bUhYt7RAK2VqB0ulUf/bg+dNnapnROsj4ZAFZtqbet3wDsd9d4uQtVyPhksjLMBbaZMq6pqMct/5vbzLNbH3PWVOK2D34KVVYV3YzmwcZkBo0B9/a/39vxpWQBycPEstvWEK9TTEbAY9ebx2Lxxmrv+a3I5LSEhNfQ0NDQ0GgumkUeVVRU4LnnnsONN96ILVu2AABmzJiBNWtyn7pYQ2NHwJJNNdwOOIQ//Kioa8KLUXaDWwkWLK81EVlKRCS9qhv9cWGaUhlsrG6UlPZLkQ2euCRZKS3vNcgUnbhpIJnOoFYiN+EDh8RQdoV4z0mCgHPmravCV05sjJemBN9/On9rbRKPjF+EvwYEV5aIAsOwLTRyDar/tvfn+votSoDpLSGDQYvYWtuEFTmK68S7fpz97DdYvlnuFtlSNCc9eZS+FF1Y9r1zXEi55NY7hMq6JCtDx2oCssMF1fP2DJtIaAlVILMqykhiai0tr5Uq1bJ4RkorqxAyXfqv70OUkiOZzrTYpTUqmhv8XbSaVfUbzQf0/lBZhrb1fopoQWRZFq7972yU12SfM8PGx2sptGuahsb2jUUbqvHPL5tvRa2h0ZaITB798MMP2GWXXXDffffhgQceQEVFBQDgnXfewY033phr+TQ0dggc9eCX0t/dTDPeFfPijTW4PeRucK4h210W1/PM8qgla9iQ577yjTyQrSiLp2pmWUMKob/Ug4510hMTFvsqMk0DL09ZjlOf/NrbFlOYXeEXbqh2jinFdJrwx44yYCtXlmXhze9X466P5gEAs8xoSmXwyRyy6vB2Wipjl0nEgjtTJta4nzZkFzYCmCIJCy9NWe4ngDwBs+V1kBIbJajt719x4w09M2kpznv+29Dnhhl+fJkgpd6y7OD3Yua0lkC0WlvOx/TK0pfigvQjFkzeCp2lzfe8Cz/sfcdnmL260hHHjStGz8fLU1dI61URFec9P01SPrwlSvC4khe6b+x8POX0FX/uusrw4y/XPMcPqyvxi8cn57jWbYOgvpi/vgp73Pqp9Ni6yoY2J4tE7HrzWPY3zfNvz1iN9SFcoUVoikdDQ0OG71dsxT05jIOmobEtEZk8uuaaa3DhhRdi0aJFKCgoYL+fcMIJ+Oqrr3IqnIbGjgXXV8iCemHZXjYWLUFQ1S4zZT/jfonQTnDZd4S4M2JqZ0BQSmXuVNzfF7xgZ3t64zu/6wTJbxq2W93WuqS/fgEXvijPjicqvQs3VGPyIrk73Mi/jcfEhZuQymRcgsppk08VLQYGD5ceO7hQSxQ3lzQA+8zqtsb141/enZNVvmz4OISrlLf9YGQTg1fqRdfO4x+dhNcl4ykbLOEzCIc/MDFcPZblWZBGmUZG3T0e3y3f4tTjPy66OhJZFmWukpVdvNEOSj4p4NmI2kZgHZ76DMxeVYF5a6sA2OQfHb/q9Vm+c5szQt+avroZZ8nx6rSVaEimcXbILHmEyvokxmcjioW+nbu2EpV1wVZkPgLcd9wL3j1RHFcyq8/2RiZFcvfclrIHkaWKZ6W9uclraPxc0U6W+BoazUJk8ui7777DZZdd5vu9T58+WL8+2kJeQ+PnCD5YrGj63xo497nsCkeGz6okiYAsLjpV1gBBC1TVNcoIoSBUc+4xMlKnKZVhv/PBdwlrKryWBflx02nbLRQzo7/apRZQ3N+vf7uKWZOJ/VeftGPA/GvqCknMFbeW+8Z6d6ooG9b6quBdcT7jXFSEHpeWOli6yootDDYori8qsl2TjNDJFrDZzggWPWCuSpnbUut3k0mlMx7BPhOIAb66tRXR0pJvqGrERQ4R+smcddIysjhefN/UNqn7QBYAO5vLJbU5/I7PPFZBizfWMGJCdFXzNur+uWCDG5NmypLN7O9vlm2GiLcd8oedHnHgutmw7BOXbKrB5/OCiZzD/z4hsJGb3v0RG6oaMHWpLWcmZND76Su2RHajO+WJr/H+D964YpmMhZenLEedc3/l8c6yy6TKZBdkhduWiDpjPj/Zm9FTnHNvfm8OS5bQEmjFU2NHRW1jSjlPbg9oSKYjv381NLYnRCaP8vPzUVVV5ft94cKF6N69e06E0tDYnnDaP77G5ppGqdsFD1/GLqUyn7vl4deL/YoRj/WVDRh008csPfJSx7qlMZn2WUiFiW8hlhWRTGfQkExL0zl/v3xr1nrv/MjrzkftPDlhMeqb0pi1qsLX19KsZs6JRB6luADVhmHgwXEL0eSkdKfzVdmfXneCgX8+z68cGAZZ5njl2FLbxOo0Q8RcEUFKc8wMnspVSt3Zz3qJxeYobvw4fvTzRR65RCwrr8VsJ9C42ybV4/3kcZrjPthitdIw8O2yLdK6vHGzJD9KyvIunmHv2/z1VahRxNIiyJ7bM56eijckwYZlOPDeL5SuNnVNKdz2vjeQM8kls74Jgugaygciv+r1md6yzqeqq3gXSB4VdUlPLK0xD32JiQs2OW275eoVBNanc8MrJddyqdBZ/cJwuPPD7K7FlN3w/VlrcdO7wdkmpYG+AzDopo9DlYsSz+jrxeUYO0e+ATh5cTlufX8uFm6sxnfOHC3G6woKki2Xy/38Yr49X5J7JX8vWyM2W1Q8N3kZAPUGBOG9WcHB/C3Y1zh/XXVgmTAY+tdPsGhjTeBcTfe8MZWWxG9qUdMaGq2OH9dU4pKXmx83rj3g/dlrcfB9X7S1GBoarYbI5NHJJ5+MO+64A8mk485hGFi5ciWuv/56nHHGGTkXUMSTTz6JgQMHoqCgAKNGjcK33wbHuHj22WdxyCGHoHPnzujcuTPGjBmjLK+h0RzMXFmBrXVNSrcLEW7gafruBSmnc9dW4ksngHJU3Dd2PnMJsSwrcLea0ipTeuQ/v/UDALlbVxhkW58+9vkiXPTidx5XnEWOnHw2NhHkspWhpFiCFcTfP12A4x+Vu86qFs3xmD0NNqUz7B5e+Zqt9FKXhVHCXnCUjPnr/eS6XYd/l72SS/NOxk4uUUVl/cL//pUZjnz2sXgISylZLKvWAi/x/PVVLJBsZTPT2oc0tAiFpeXyoNpR0tPLyoUl3o57ZBI+/sFWlE//x5SsbfH81crNtdjqZFmTjQu/C5H7i1h8c00TXpqyPJTMAFBe04TGFK+Mel33vL/aWLXFq2SHMX5bxFkH+ct7f7A8f9vfxJhBzApR0abs+ebJzMr6JNKWhf8Kc+LSTfZYUmU7JPdMwrx1VVhXWY9/frkElfVJvDJNHh9qW+AfExezK39r+mq8EJCkIS0GIYeBOwTi7PyAeGOquZO/vxSHjVraXNOI/YRsnNsavHz8s3TQvV7FULz/RNq2xlzbmHITObw9fTVWBpCOd3zwE66OQAKLmLOmEuc8G81NUkOjpdghrOqs7GuWoHdhmI0lDY22RmTy6MEHH0RNTQ3KyspQX1+Pww47DEOGDEFJSQnuuuuu1pCR4Y033sA111yDW2+9FTNmzMDee++NY489Fhs3ys2AJ06ciLPPPhsTJkzA1KlT0a9fPxxzzDE6K5xGzhFG0VwpLDDDvCT/+90q3BsxqN5t78/FnR/+hKcmLsG8dS6REXa3mldK0xmLuSqEAW+FEhQLo6IuiQqBRDjOIX34F+qlwu7TsQ9nj6km7t4zq4gssgJcqnZeZiGjVBiQy5tlAR/+4HX/UVluBVkeySzU5jv3lawaqE0ZhxTFXa2q3s2KFLS77tbr/i0SMkdwxOBxj0zCI+MXsnpFRNkNb+nCMsz5MlImqAtdF8/s9T43aSlL9b3a6VsiTRub4fLG47J/T5f+7s2UmBu4dUarcU1FPXvGVNZwLIC9tM1gyCwZeXjIZuGPMMRfOmOxGGgiKFaUDKL1zBWvzsCLXy/HPZ/Mx8rNdR5yiWTMZKysAdgbkmn86AQtby7uH7vA9xv1xNg565lbbJjnZpOTjUwdYy17P9/uWMO1tpFMfVNaGdtJhEr0Q+6f4PnOz38Evg8/m7ve876hzQrAtggMUiDFjYk/vTUbXy9xN63GzlnPAvtXNaRQxbl4r9hci0mLw29wra2ox5Qlm1HXlFJmHW1PSKYz+IvCum9HAr1Lfm54b+aOob8FvdOGBSQX0NBoT4hMHnXs2BHjxo3DBx98gMceewxXXHEFPv74Y3z55ZcoLi5uDRkZHnroIfz2t7/FRRddhN133x1PP/00ioqK8MILL0jLv/LKK/j973+P4cOHY9ddd8Vzzz2HTCaDzz//vFXl1NCQ4W/OTu3kxeUhAgs7n82IU7Nicy3bCXWtV8KfLxZtSKZ95vhUnzz2hbr+jGUhJsw8snPGh/R7b0imfQSb6EojD/DtNftKpoItOaKkVCciR1QaDcOvpBrg77W3Tb6MiKSj1K2rsHe4yfJINl5YH7B2/GXuclwB3xaC/Fqw8HSWdLKSEFkA7MXtBMclJZ1lG25ziDTYDzvZ8doDglw5B97wUSCJMXNlBfv7Mce9jxA2Lk0QeeHKYguzeGOwawzdkyiYskSudP60tgo/rK4IVcfB932B92ba84iczLU/E44LZpQ4aBnLwn+/lweopnqVcZHgH7/iUxLWOkzZRkj686kvl+C3WcbE1CWbcdITk/Hi18tw/9iADQanue+Xb8HEgFg7svnAAPD//jMdtU02aWAqLjDb/eH73T+3+et905mDxLmoOUhnLLw3cw1e/HoZzn/BtoxaubkOJz8xGY+MX4jzX8jiat5MupostYK67Xf/no4NVQ0Y4mzovD/bfb/e/v5P+MOrM6TnreYs+dg7mDv+90/n471Za5zj3t4+59lpuPa/s0JfA42LW/83N5Iba1vhje9WojGVwSvTViKTsZTuqxptg3WV9bg5S4y7bGveq9+YlUOJco+1FfU7iPmUhkYwIpNHhIMPPhi///3v8ec//xljxozJpUxSNDU1Yfr06Z62TNPEmDFjMHXq1FB11NXVIZlMokuXLoFlGhsbUVVV5fmn0XJc9fpMfDpXB1QHgIq6JmysboQF3n1NvVu7oaoh62JoS20TJi3aJF3MuySIxVzZjnvkK5z1T8mzo1DaguKRREHGsgKVEWVQbYktk2FEi7WhQlOUVOsy0sz5pGuLexgyuVUR/1WloIkgq4TXvwt28xPbUNVOygvFq1nrkFK8vNlSiMvqv85xgVSNlprGFPb923jXnsUCZnFEC+FRgXAREYYg5bv4zKe9Y182prPdEZZxjvtNNh4v/890Zpkhu82ba7yEkyxAuPj8yy6X4iFNd1xQZW5rF70kzw4oAwV4J6tJkfp8cuJiqfWKDPz9Ud0rIl/TGUuqvMt23KPGcpEF+5aBXFF5ZDIWKuq898uA4XG3k7Xlzp3BoGObqhvZM5gNP66pZC7Hvradz/98swL/mCgngcPMOqqp6ZD7vFY3FPNt8qJyDLzhI5zixCsD/ISgql6VC3NY1DSkcPUbs3D7Bz/ha8fqZlNNI35YXYnGVIa504ZBc4L2f6aIsWUYBrMcBex3c0MyjYZU2hM3zHuOuj1R8ea/rqmoZ/3/w+oKnP6Pr6ECnVqfTKM+mcLqrXXNSgqwrXD9267F0Yc/rsMRD0zE5ppG37O6o2FbWCDd8PYPeG7S0uwFs2DF5jr8+xu1m257yTTcHNQ3pXHgvV+E44624+vU0IiHKfTYY4+FrvDKK69stjAqlJeXI51Oo0ePHp7fe/Togfnzw7n1XH/99ejdu7eS7Lrnnntw++23t0hWDT++XLgJQ3uWtLUYOcXKzXU+QmzVljqsqajHAYO6AgBOfmIybjx+N08ZqXWI8F0sc+Jjk3DJwYNw+eGDpbJkMhYmzN+Ia9+cjcOHdg98AZfXNGHMQ19i+b0nYv56ubIj7tCrrIuiKG1Tl1CmoGhEidgmjx8C3Day97CkfkWbbvyT7HK77mdujePnbcBb01eje0l+oJyi25qqLQruTZ90jmkA2Zb3KqKSlIOfnBTmsqKrt3pdA2cKAbCF1rJIY2d3EiFm7WIka9bacoSsSpr9uam6ET+tq0JpQZwpghnLQjpjIZnOYMbKraiqT+KTOesxqFsxqzroOmg+4ZXKi1/63tMmg6QScoGSPV//F2DNACBQKVwYQIgQoj7FYR57Io++XlLOrJCyna8is8M62YlElWHYcX3+coI9f9O8OH7eBvy//3jdBJPpDI4O4VbrC7SepT8ueOFbnLR3b1v+oAtQWkRl73BfcgGZ26avb9y/ieig0yij3T+/8pNVzdl0+HbZFuy/U/CGnxIhxltdUwp7OK4i3phe3uv8XYBrqAr8+sC39SH089Slm3HRi9/huGE9AQD//X4V9h/YBQO7uRb9PiJSdHGWtONp0/lcX9mAGSsr8O7M1ahpSOG80QOzXsvB903AvafviV/v3z9r2bZGbWMK66sacOXrM9GlOB+Pn70PAODSl7/Dcxfs18bShcfGqgaUlRZkLTd7VQXu+WQeXv/d6FaRY87aymZloBWxI/Ml05Zuxl59O3l+m7hgI/LjMYwe3LVthNLQaCWEsjx6+OGHPf9uuukmXH311bjttttw22234eqrr8ZNN92ERx55pJXFbT7uvfdevP7663j33XdRUBA8Gd94442orKxk/1atal7QYA0vDETfHW5v+GbpZjzDLYhnrtqKuz6eB8BdlL35/Spc+9/ZrMwPqytZRitZpq4gEkZULtIZCxnLwi8en+TbSUulM1njGdFCNZvrBn8triyuxF8u3Cgtw8u9hQX0tX8jl4p3HV/1tGVJY/MA0XadGhW7xmI9Kq8plUIj9lcYt7V4zH8RbzkuGLJ4RhT/ye0Tx+VB0UbSiRqeEi5M6WYR4BbHg6xmSgvjjrz+0kc9+CUAMAu26gbbtUXuMqdozEG5Y5GzJCCQNeC9f3xWPB7Zxs7AGz5SjhkRsnGxsZqsD+zGlpbXOG0b+PuntgXOgfd+gRe/Xoazn/0GN73zo+tSGYKAXKboAx8U1ztLSej5sevNY7M05beca45LrCoIPF0OPT8d8t29rSCCQ+WWKiKydZLwneYCey72HnteYqEUKICK7OH/NuyECesrs6d8NgA8On4Ri3cTVOZfU5crXRr5tumcbC6nIvJi/uyVBJmrVTbMXRt8TVEgjrkpS8phGEAybTG5KK5ekIsfAGUGQxHUjzMklpQi6hrTaExlmCzXv/2DJ56RCN56656P5yGVzrAYagCkcYqoD6qcOfuL+ZvwSUCGPfcaDM+zM/CGj9qVBdJt788NHKOZjD3WjnpwIqav2ILxjlVc1DiSrQWZFSOP/e8OF2JjbUU9vlkaHHetuVhXWR/ostwcNCcMQ3vHy1OWI5OxcNYz/uDyz01ahle/XYlRd4/HtKVu9tQrX5uJzxxiedWWOtvVTUNjO0Io8mjZsmXs31133YXhw4dj3rx52LJlC7Zs2YJ58+ZhxIgRuPPOO1tN0G7duiEWi2HDBq8Z8IYNG9CzZ0/luQ888ADuvfdefPbZZ9hrr72UZfPz81FaWur5p9Fy7AgvjSlLNuOFycvZ95Zek2VZLNsRuSFREE3KelPXmIZluZYEc9ZU+Xy+RQJBpSRRWmRVMFYxvg9/mf/5ZqXnmAwfOUGiH/jMVqZJsSKlImNZgYtpaurDH4JTHjOrD4UbULMQInsVk0FRTVDg66Aav3Ky6RnK87wgxYwUWkZ0hOgLtWugjbw43St/GervZ75amrVJd4c8u5L/2rSVnvobkmm8M8Mm3fjAvTKLufnrq1DflM4aZFhGnooKt4wgoas84dHJnt+Z25rQCVX1SWyobIBhGD7SWNVfLR3KREBSfKWWEvZ+YyfLk5HLMAw/waqsz8haJu5YGxUkYnZZ/n44n5QuPlubI+4cxzvuZZVLesw5xDIvSiYcIlDldYf9MUvfiQSaUPrJCYs9VoC/eHySz9Xx9g9+wlRByXzp6+Xs73JJ7LHL/v29z4pINU4TcftoKiNJQKA4LwjfLN2MgTd8hKZmuCer3g0y99LVW20l7t9TVwS66fDBsPe89VN8/OM6aTnAHVdVDUlfn4njiO8b1x3WLSNmSyXX1Ovf/hH//GopkgJZt3hTDcRbQCUo5lrQe/zlKcuxeJNDjMPPe0YlFFsTL01Z7htr4mUt2VTrcQ1++sslsCw7KUQmY/kszrYFLMvClws3Yfgd43JW59g56/G/WS0PKE3hDd6ftRb/z7G4y+UdT2eswOe5Oe/AM5+ewm3sbFvc+v5cFvIgyEJ9Q1WjJ9PspEWbWJKRP7/1A+5pBplpWRbqmraPQPYaOx4ixzy6+eab8fjjj2Po0KHst6FDh+Lhhx/GX//615wKxyMvLw/77ruvJ9g1Bb8ePTrYVPP+++/HnXfeibFjx2LkyJGtJp+GH2sq6rNmwNmeYJuEy1+hYnYUy7ICd4FlC1ratTjuEXvnc/46W0GevnIrFmyohmG4O54TF2xCUyqDyvok/jdrDUcg8LJ6F59EUs1caS84V20N3ulwU8UHv8YrnMC9tcLLiz/l22W2kpJwSKP8hENIKBaedOTJCTKXBy/czE2B1bnnNsPiKvhHNUymbAbvvMvkCbQKl/xOZ/vayPGaXmWpxqcyb422Xp22Etc4VnwqZeWH1RU47pFJ+GL+RqwVrAJo4avaKZfFciKFiTINugtTWw6KhRN07Y99sRgWbEsi14rQ/qs5LpsA8Mq07DFgqO4wTTSHWOLP2VLTJHWTCVOvalyx4O9wXaDEy7nNyciVrc0ttU2YuGBTVrnCuFPRMyt7TlXx0lQkQba2wsaJsuv1FpizpoplOfTIA2DCgo2sD1dwWUCXbqr1ybd6az3Ka8K/x4n8E8kMoHljjsjiqoZki7LLUdP0DlTJsnhTDf720TzpsTR3YnVjKjhYObwWXD85GTLXVHjdfsO8mwDbbU5F2Mue+amctQPgf/dawnnVDUmsqajHre/PxZvft73V/X53jQ9FGvpfg1bWOXBDVSMOuvcL/Pf7VTj5cXX8p9bATjd+HIq0WltRH0hkiskw3p+9Bm9NX426ppRynZW1zcoGjHnoS9/vlXVJWJaFGSu3Ss4Kj0fHL8T/+890/PPLJex5BOw147yAcAoqfLd8K7bWhs+cmAu8M2M1c+EP8hwIi+asCL6YvxH736WTP2m0DSKTR+vWrUMq5Wc70+m0zyoo17jmmmvw7LPP4uWXX8a8efNw+eWXo7a2FhdddBEA4Pzzz8eNN97Iyt933324+eab8cILL2DgwIFYv3491q9fj5qamqAmNHKIC1/4Fo99Ye9w0eQ4/qcNuOCFb1n8m/OeV2c7aU+gxciiDdXYKATPvOFtb2DgjJU9wLDs1U6kji/zj2F4Ag2+NGUZTnvya1z1+iyfdYis3n9Ntc8lK6WwC9YgUGriG5wglRPmb8T6gICi5CtPJJIqSClddnFezHdMlDnKJYRZR8nqa06MDrpe2Zl+Rdv9hcg23h0hGySb+/42BaMk2TUFuZNEsYSQt+0nNoPLer/f4WQnBNSE48lCzKSPf1yHdY6rz1jHNFxFvLD7xTWxZFMtkukMU9bOedaep0gxXrKp1nOOrCvEeFkMMjKQWfkFd+q/py4PPMbqcT6jEFRh3JjcwP7ub5f+63u8P3utxPLIf69Sws6s7FkLY8l5n6OoJ3yuof4KiXCktmXkurd9+fc0i2Vlf5fFvslm8ea2q26zOfBel71pQ2iSXDtgxzTjM3wBYM+MXQ+RkLZlWTgx7VaI/JNZHomShIrJRGdawElPBL9TZe802TijdyEhyd070bImKBOd57s0KGCgmMx69yRnffCys7EjnirKvnJLLTY6GTyjvJXmr69CdYNcsZ63zpsQ5vnJy3DGP6b4ysme/22BTdWNgUTzp3PXsyDmsnk428iia6pqSGJtCNfQtsIPqyvxt4/mYXl5rW+uDrofh94/ER/PCbaIaw4MAHvf8RlmrNyK050xcvB9X0Srw7kpW+qasKGqAU9OWMzWPQBwzyfz8CJn2RqEVVvqlDH8ch1EnCe4UukMUukMrvnvbEyhOJ4hLKt9wewV7TWm0jjigYn4YXUFZq7ciofGLfQlZKhPpn2b1kH424c/4Zb/qbPchcGqLXVKrwDA7p+qgPnm54qGZBoXONk+dxREJo+OOuooXHbZZZgxw31wp0+fjssvv7zVs66dddZZeOCBB3DLLbdg+PDhmDVrFsaOHcuCaK9cuRLr1rkT5lNPPYWmpib88pe/RK9evdi/Bx54oFXl/LninRmr8e5MN70uWcs8xWV5Wb21DlOXbmZxgCYtKm/RDklrw7Isz+JyQ1Ujfvfv6XhOiHFBJvCU0UX1AmxO+l/xjMZkhsUt+LezGK7izGJpYUQvWHq5UV//b1bwC0AWlycIpCCQhYbsVhJpREq6aK0kA7lNySAGDZWhWVYVClIlClz3M//JMisxwoNOKnrxPNVoSQuLFuk1RFA1VOSWK4+wCFIIGKX7VNYoaeHY+soGP5notPb7V2Zg9D3eRW1cEewz6Jhs3FM8HteaKLBadj2i21pz4476ni0Z2amwjhHx4Y/2HPD0l+Gz6ISy4JMUeWeG7UphKMqoRN7gWH596mSvMsk6SeHqOX6eXdZVxt1CYhwPy4LPuoaKf+S4JamuPSgOFxBtV9k/98r/9rXBHTvo3i88LhJhzgeAdQGxfBZucDfbRCs6GejeqPqkOQiax258x97A2OnGj1Fe08gIRkBucShaTI7823i3jVBKoPq7SlYeWxyrbHr+3vjOdQlnLmNR2PyAIsc9Mom5xYjHRItOA4ZPdsrSZhje6zr+0UnZhWkmFm2olsZs4nHZv6czl29ClBHneY9Z9qbDd8tzHzeoObjoRV7RtK/q758twO0f/IQttU242MmYebNDCLgWzfZneU0jW5MB0TZpsxEvvLvnaoUFu4j3Z6+VxvvkYcCdYyrrk5i9qgIPfbbAQzAB9pik8AisPlj4fF54A4ZTOCK6IZlWjrfT/jEFlXVJfLlwE+788Cf82dksFgeca7Xvr8NDbAbMn0kWvsLCsvJa/GPCEjw8fhEe+3wRflTEs8uGVVvrsGJzXfaCWTBx4SZc9foszFy51Uc8E96duQZHPfglVm6uixQfbkdGYzKDL4W5antHZPLohRdeQM+ePTFy5Ejk5+cjPz8f+++/P3r06IHnnnuuNWT04IorrsCKFSvQ2NiIadOmYdSoUezYxIkT8dJLL7Hvy5cvh2VZvn+33XZbq8v5c0IqncG/v1mBj39ch49/XI8J8zeyhcXctVVsMWdZlnTSbEpnPARS0KTUFjj5ia9x7MNfYdWWOjax2ymk5UpBXVMahoHAtMgA92JRmKaI/SRaEjw4biFb6FJGpCWb3EX+p0KKYHpRk+WRmCVOVjYMyBJDlYmDWQk49WZbFAL+OE4y+VTKXBgSRNzhlVsehUc49znv9zCZ9+T1WN7PMOcEyOA9JpBR0rLOAqkZJKhKThV5JB474J7PfXGzVP1PC01ZmbhgKea6Q/oLExEaBX7Do+z9JnucIsUZCeG+NsOJmaJKQR4qrpu4gJaI+aJgXREGVE1jMs2sNQgZNj6Dx//dTjIDGWF7wzs/+sqL8X6IsKTdXVX3q+YrAl/inw5hQIF7RZLDguuu9cBnNqn8jeOC9Jd3bdk/nG2PaVmQ9fOFXc5UxsI3S7d4xrQ4h0SB8hzhXeM9z/s9zAwiXTOkMjjywYlYtaUOr33rjo1l5bV4auISfDB7LQbe8BFGcQGHQ83PINkzWeWj51Fm5edaHXpJThkoRhQp4os21GCTY2FU3ZDE3LWVeGv66qzzxkPj/C6KhFenETGlrEI6X1TUNWFNRb1X8Ue4dZrKSmH8TxsCNw6PfvgrHzkgA22g0f0K2lAAgjd1qF+fmrgkJ/GCsmFTdWPWvpuwYJP/GXO+bqltYrErx0vIEn6z4rf/+h7nPT+NbdLe8/E8NKUyGHHnOExbuhkzV25FVUOyxZboKtBa88rXZvpiBapavX/sfJzy5Nd4acpyzFy51bORKxP3jg9+wiUvf59VHrKwnM25wD7w6QJcxlmUUiyhc551g2BPWVKOC174FuU1Tez5FN8t2brxjg9+QjpjYUttk+/a566tZIkreEvdoHsTZf3VnLWaiM01jUg6xOE9n8zHM18tRUVdEyzLwsPc3NOYyqCirgnXvjkL93/aPgLTtzlyYGHc3hB5Jdy9e3d8/PHHmD9/Pt588028+eabmDdvHj7++GOUlZW1howa7RzVDSnc/N4cAAbWVdbjope+870c7UnQex5NwOc8+w2em7wUM1duxRWvzmjVHa0w4EmYH9dUYuWWOhxy/wR8/KP9ElTtKuYnYli9tR41igCqKoh1qywc6EWaEtwqVBYvtOAtKYj7yjQHRAyRRYasbUYsOR91Tdn9/GVuINGIHOGlHuoc/29BwYDDKNVS4iXEIk0WlyKwbAjF3Y1/FVyGlW1GfBYVfOvfCIQfDxlxsk5wNVBdF8UmkBFUND4nOAvyWoUVXYxZvHh/l5V1x4r39+a6KrG+835IQY+cqk9U7qMiaAzJrjNMBkf2PCus8nxgHSg5FKpNob+U417Whv1J82yY+F/ZsFawmJu0yLaAIusZlWvu9Y6LMAVonuNkIatvSqO8polZhwDAgvXe9+8Pqys8sjYm04FkmNylUCgjP9UDMVg3f17QsyErS0X4GEq7/PUTLN1U63OPoHs+bZk31k8QxOD7dO00XGX3nMgiyp4kI3oVQ9cHIqSLHFftAs5l++6P5+PExybjjg/mOpY/wXg1REy0MJaksvsvWpionoVD7v+CKd9XvDozsNyl//peGrScNpeC1llNqQyLFXSn49q8522fOef4YXiXH1J3YgvyNWpUXPV68PVurGrAQfd+gf3uGo+LXrQth6oU60RaJ81UZOojeYPcEsf9tAE/rbXngtmrK/DPr5bi1WkrsKW2CQ+PX4gXv16Ofe4Yx+Yht14J8ev7w0VlfRKvTluJSyXkDU/K/MOJZZmxLDsGmOW9DzNWVrDqNwkumu/OXMNcPWUgF7IgWJaFtRX1GH3PF7j8P7ZMy8prcf4L36IumUZVQxJPTliMWasqsPstn6K8pjGwzqB1jWxe4/vyha+XuRsFzs9kVVvbmGbrnN/9azqrR7w3BGrjqYlL8LgTAF8FPrg2rYeibEgd9+gkvDJthWfTYfgd4zB5cTkeFdpPCu/enxMakmmsr2xoNxkdWwvRt1Ed7LLLLjj55JNx8sknY5dddsmlTBrbGfggrXPWuIvWhRtquEWGXebh8QvZhPKwE8xz9dZ6bK1LYll5LT4MsdvUWpi3rgqrt9bhqAe/xOOfL2I7vb7YF856h896Ru+KfMfdKkwAVdW8qjJOIhBpFCbehkgeqSwo/ApMsBSu5VFwfeLOrOqFRX0jKyMu+pSb3+KxZr7EWrKYlJ0qWvSoFmlqNxVS5rMTTeGCAXvlcTNMBSsbYSyQouxoqorSeOCLkGKisnojxJ3xLmuCngWy4COFJozsKhdFNl4DMrJ5ymZtye+6Jw3I7nyaIeYYdo6KVKFPxXgIQ9DSXKW6U4H9oyCsLOG7t00v6aO6n6oxnEzR+YFF1CSBc5A2JWTTH6Xr/mqRbdqeDDGnUza6grwYFm+ssRVgkeQLkPmxLxar+8ORmdxARFdhOTHu/S4LBszml8CW/fVR21e+Zj/vlGUMAK51AurzVsEAmpWZjYf4vuRBspc7bmeysRPUt7LfKQspuWrL3Ggty3Vz42XgEUYRFEtQPdNXbMVf3/sRD41bKCH43NZkFqmVdV7iYtWWeux+y6dS8jAb0hkLe9z6qa+tPW4Zy8rc8eFcDP3rWNnpoSASe7x1R70isQKPuWsrMXlROW57fy4mLypn94bCAfAbkISqhhSLR0bj63EnJmg6YyGdsVDVkGT38ZdPTwUAX+wifp6kW/7EhMUAvBtz5IZL5YlcpMDt6YxN4KQztovUUQ9O9MlM70J+jItj6LO567H37Z9hwfoqjJ+3AeU1jTjjqSn4zzcr8Kt/2tdAJN+yzbaV5DdcIHcDhjdrmNPAPCLAHYKpsj6JlU5w/+krgt0LT31SHvx8aXktDrzXdmcnF6JZq7biq4Wb2NX9Y8JifO+4LhIR7YtHaLhrH5KHXHvpfi4rt3/fzD2zFP9VXK/QXMXf13VVTuxTrt8nLSpnVsKLN1azIzNWbpUGMK9tTKHJsQIyDG+G0oPvm4BUOoPBN30My7KU/UlocDYcMpZNmNM4EOdaFtdKYl31c8C1/52N3/37ezz95RKs2FyLWasqdkgSLbL5wcUXX6w8/sILLzRbGI3tFCqliHtoLNhZushaZaKz05+x3OxdMlTWJdGxKJELSZX44xuzMGqnLgCAl6cuF0xsLZ8SIlusqWKriAtnpcJpeMuoFuupbJoC3EWGL8aBZLctyjxHFkdxSdBhHyJsRjSFiJcRpp4wJFyY+sS4NWGg3L2LIIiqTWqCYgLkKoUy1RsmHpk4Vj31BNUv+U3ttmZ/ysz9Y4aBNCylMqzKgEcxWsg9qclResNYqqjHlyWUzT56VASTyCeo5oRIpF0EclGVQdD97i8jLpBDBbB35JK16UtBruoLeD+joknhxkhQWSLSEXrHyfqb+oPSiauswujsgrhNHvGxw0n5DbJg4n8NIhIAsPga1735g3PMe31hrM1aasUh9tNmx62QSCTAtRwiVz5yPZRleiOEmcNZsHUZeeTc67emr3a+y2SH55iqTXFzJYig3VjdiEH5wUt2kVxWge/byYvLMXmx3LoB4EkWA399j2Ls2Oc3JNPY+47PsPzeEwHAE9dDJc9UiUVHbWMKf/90AftOrqUWLNQ2pWFZFuaurUJdYzDBIyO3/GVokcW17ZAu78xYg4d+NTz4ZAevTluJ2asrsGJzHXp1LMBvnp+Gb248ih0/6sEvWZ9IZRC+3/XRPFTWJzF+3gbcccoeAFzLG3Guk9kG0xBaU1GPPp0KAbjJBWgIE8FKc0s6Y2HxRpv0uFXIXklYX9Xgk5UsH992xj/F0/rKsZA5659TsWRTrYfkfWXaCq/UXKVVDUnsfsun+PYvR3kukC9j0Crc+Y2ePcAfo4muScRRD37pa/+xz23SLWPBsdw0JO8zRwbJs0zhKc54yg4gTq7Enzmuesl0hm2I/+C4yclcI9+fvRbnjx7gF5pr7N2Za/DuzDVYdNfxGPPQV/jNAf3dYoaB5eW16NmxgG0q/PqZb3DM7j3w4LiF2KVHB1ZWdJGubkzhjKem4sfbjsHaigYM7Vnil0PA1tokvllqrzn5Ndmt/5vDSPV0xlJuovNYtaUO/boUhSrb3rGltonFn33x6+WYumQz3rjsAAD2s7NX346obkhht16lbSlmixHZ8mjr1q2efxs3bsQXX3yBd955BxUVFa0gokZ7Br9IVS2Q0pmMzyeflLVUJoPXvl3pW4TRzuXed3wWKqVpc/CXd3/E8vJaWJaF+eurlUqAq0AB//xKHmRWaV1gZS8TZlHO6oOXxFKdSiSAmBVCJkoUGQhuhrHgc92MLdnrD2NNpXaBEpWnMMpOcJklzoLEClFWRZL54i00M+ZRUH0qqKxk1PSH+JtwDYo2g8ST/R5m3M1aVeH7TZXdjsD6WVJIdPEixVN1/3xEkPR6vG2HsZhTIUxWM7HtXCv5UpKGWaDRdz/cmCTec6RtCG3JlFAWjJyVDSYuM26jijYV8lg0dwafHwWyJGSipQtZbahiKZGxJ78xQDv8SutOsk5SlCErBXnGtOaT8VG6kJqm5yfOsnX61wKkaH/ubEiFsTxSzVvUNyoLMJJDOoeHeNcHQU6EeuckWT+G2TgQ5wOV7G4Z51NShi+bzliejEKqmFqUMIWeu4ZkGnvc+im7j16Z7c+axhR+8fhk5CeC1RbeAiNoLecjTZ1y5C6XzlgY+bdxgW00JNPy/pdcJ39PvK5M3s8NVQ3YUNWAyvqk+/4JmJiPlJAghOK8GFZt9QZGpnqaGHmU9slGmLjAfn5oM4pi3PDzI2UuvfbN2Z5z6fJkxK0lDFzqi+rGFB7/wiZw3pu5Rn4O1W/4g7mrUF7TyJL18EH0ZSBrI88Yh/eZo2MbqhrQkMwEuDp75ZNtJv9TSFDBE3/stxAUNxGwZPV2+AMTGbEHABX1TYy4SnHzGcVVyghj8L1Za3HiY/6QIQ3JNKobkh6J+HmRYvIBNsE1b61LJn30wzo7Y2LGwktfL8PtH8xFOmMhk/EmIjrk/gmtmjhp9da6nOmQlmX5QiYAwJ/enI0J8zdi6tLNbE5xrZ/tMr9/ZQYO//vENg/NkgtEJo/effddz78PP/wQS5cuxVlnnYUDDjigNWTUaMfY6caP2d+qhRKfeYUmqwbnxZSWvGymLCnH0L+OZeasQ/86Fj+ursTYOes8uw4txSvTVuLwByZiq7MrzO+MV1NgZ8FqQbWjFkkRU/wm7kaqdqtF8ki6U+9bfFrSdni4illgEYaYIuuRiDBdpFKawgRbjGJ1EEYecQcl1HUqCIUwymqY6wxlwRGhjG/BHaZzFC5SQcqnDBGKyppXl3E+o4w9lYsWtekq8LIFs5fgcOto3iLJRx6pyJAQhElQvR4IwoezPPJXo7IECYJL/kiOiW2q6mnhmjTIRdRbpmVtEOjdQhZEKhJddYszIW5/QwiCxSVAxcaDz1FtErTkXpAiJouZx5Qj55rC7HiHeSfIMsZRVxBBpfKaFZVAaRDxEHKp4hkS0sJNl7vTBR9zywgEk8Ttliy+eKvTwTd97HHJoThEl//Hzvj6w+oKjPzbOJzAKU4ZC5i8qJzJTlYjspgxNIeQm5/smaN05GIWNu/1CdclHM9YlhNDzPIptLNXVTDl2xB6USz7zdLNGHX3eFTWJbGltkkg37xrtsaUS0Z8MNt2fbPEstI1oI9hwdJNtUIZ+5PGK7miycbiPR/PZ7IDNkmQbWoLM/UFEXb8PbzbaRvCWOffueKc9yMX9FrE/HXVjDR6SkhgI/Zl2uKCccN7b27/wLbKovhHm2ubPFZVPMQujbI5eNXrs3zyyfpWvOUbq70JL+744Cc2FhscwoTqOez+Cayc7xkRKr7mv7OwZFMN7h+7AJe8/H2oOJ8x0/CNyf3uGo+vl5Tjtg9+wtQlmzH4po9x0UvfMQLlt/+y42RZcK3Iwmwe14RIvPPW9NV4ddpKHHzfBHwwex0OvX8CJszfiKe/DE5oJEMynWFWrT+srvRl86W26FoIXy0sh2F4kwTlaq3Q1mh2zCNPJaaJa665Bg8//HAuqtPYDrCputE1Fw2xIJEFRqTStGDmz3/Gsezhd0fqmlL46Mf1eHv6ajQk0/hm6WY8+Jlt5kwv3Obi/BfsVKbkv1/XlGI7BCSVmO7ecy1hAiiHkMNnIhsiUEpaUHZlO0pBJIMqUKxINKnkJSVaToh5X8aqjlBZEgS1LYOYqlq+mxu+Pt+5ocoEL/ZUz0qUDZgo6VtVBIpvoUpjS6Uwhwi0GsWNrqUZX5Snc0pOWDmkZAGb6+gzmDgLaikMGSI9TwykrlTg1TLYx4TnUlURyRCKyMmu5IQjNINJCF8GNVVfhCBSmksGE6KsB2XX48bTsz/FHUuZHO58GuUZA25yXLyaY2FKUBGrQd+D5MkG6ltykW6Q7CATyUP91tKYR/ReVJHfsjVNNshj+YnPhqxvRUInO2TPYXM2d2RrBGY55BSmZCIxSeGl5TX4w2szMXdtFcprmljMHcAeR795fhprsy5pK1leCxAbtCZc6mQXlPUBtb+svBbpjIWKuiSLQ/TPr2yFkZTOuU7AeVIKxcdpwoKNOJKLA7SxusFDXFoBKyO6f5X1SZTXNOGW9+dgxJ3jPDGrxOeHj5FErn+yLJEifMNJMVdSKADmkikhRmm809hOslh1BrbUNPnKA/xaVSGoSKoEixt0iuc3On/KEr+7JR3jszCKkM2r89dXA4ZLEm10rOAoW9wkJx6d++6X1ev0l9KDIfy8K6tHNvdO5oJqv/D1MqQtS7rGXFvpEk2XOkTH+J/suHbpjAXDsK1OP/phHd6ZsQYrNteiMZVGbWPKt6aWwTQMjuxz26cxR8/P8s21WLnZtpAb57T/5ULb6q2qIYkhf/kEP66uxMPjFuK+sfPx4tfLfG0Nu/VTNCTTeGjcQp81ajKdwbrKenw+bwM+cWKGNabSWLmlDrd9MNcTzHpZeS2mLC7HMQ9/CRFNqQx+/8p07PyXT3DY/RNQUdfE+oFfj9WwIP/OtbM+sMfVyU+4gd75fhn5t3GtanHVmsgJeQQAS5YsQSrVvAxTGtsfePPk1RX2JKAy4+R3AsVHRfYSk01UazlTwaMe/BJ3fvgT/v3NCgDAH16bifKaRrw7czUzT1y8scY3Ub8weZnU354Cfb/jmM6qCJgosQV4iMGII5EVkrIiWURFpLIHKJ4t3ZX316cgG4I9h/z1tbDMRsH8PYy1RySXwRALfBVhpdydb6ZlSljI5ZK3qeoT1e4YQdwNV8qVvYj8PKFPZXAJ4ODzffUFc0dsMKsCSdO1+4y5mnnPmewhNMcwz6PbdnjIFjriDnGYtsLFPLIhn4staVlpPQoSiqAMTh/QZliICkBScT1pYeyFcgNWta045jdayG6FSmiuO2S0APp2WbKOVVm3skDZTge21G1N5YJDB1OKtOGsqKBpqsh0qkdl3ZfrN4NKAQ6DRRttCyQiP2RDKJW28MHstcwliofl3CZSfkVXRQD4j7PGo7g5KtKO4tcVchnrCBQw+MB7PwcApkCS8kqd+6QTfHprbRLLN9ch47jZ7H/X58p5gvpC7D665RQAm2uKIRHzVyyGGthQ1ejLvCY+T9LhJbwLaN6QEaM0PkkZT6YyqGpIIZnOYKxDoohwrYgUhEmIucp3jlM4mc7gzemr7LEqVPDBD8GbxpQAIwoMABMX2CQRBd72u5+rrtOtBwja+BDOUaxJwpDEBgymX/GihSXQn3KscN6asRrJtIW3pq/CF47771/enYNXpq30WM4EobYxhc21TSETVPhx8Us2mfWsQxLPWLkVT01cgu+WbcHctVV48/tVmL++CgNv+Ag3vmPH4nvmq6V47PNFqKxP4hSHoLn69Zn4fN4GjL7nC49nxQuTlzly2d/PfuYbzFtXhSMemIjfPD8NCzfU4JHx3tAqDak0I8brkmmc/o8puM2JD/b4F4vx8Y/rUNeUwjAnyL94gdT6Vi6mL2+5Wl7j9pfMZbc9I3LA7Guuucbz3bIsrFu3Dh999BEuuOCCnAmm0b7BLzCISBrvZGbhIZs+MsKikMXD4EpTMFCayAHgj2/MxiE7d4Nh2P7BlJ2EMH3FVvzxjdn44xuzsfTuEzDmoS8x6c9HoHenQmyqbsRTExfjiwUbccrefTCkrAO6dcgLvD5+0Uh/sUDZzXDB4OsJ488cBswSSrAWkZkjNwUsdGVEmPiLencoO2HiO6eFVkVR6lHV1xzzUTIJD+OGoyL8WBnFeUy+FmoMlGZ5EYvblP2ei7Ko4JKCIRZKqnqyN6VEuF317NceZsdX3H2UtU2kuCksOkNZwUnaFIk4dYDx7NfgWq4oCjkIk23NV6/yt/DPj5TwE76HUbgVepVyTg5DekciXqTkvvMpzOlyslNUGFUEU+tARf4xazzFPWHfQ7ThEjDBfcwstZyTlBaTESZ+Vd+GyWgYREy3tEwYSN3WgkxAOIjj000WYUC82ieEeDWqzJcL1vuV+aRz3+762Buri6/l/rELPMdcUt7wdQwRjBQ0WNZvFDCaMuOKzzelHX/fsWa/9s3Z6FiY8NVHbmsUMJopv5Ns5Ve0IOJBfUzPSCJmYmud17JH3MTIWJbPVcclGWgsyt7B8jWa7BmmdQ0RdGKfyxDKCk4cdmE2QODKsnBDDW7+31xffEMVQVIkIRC5yh25hJeErGjAIyMbf2E2iMW+DEU+cwhn3WQHOx/QtShrWXIDpayiW2qbMGOlHRpknWOpJJ4vE3n/u8bb8mX8ZWRuikED533ei8Sw6zEAXPfWD+xn2uxf68SJemfGGsxeXYmf1lbhvVlrcewePVl71F80tun71KWbmesc3csF66sxdclmjB7cFeN+2oBRg7q412zZVo/Up+/OXI1EzMThQ7tzZcJP1G98vwoA8Oe3fsCoQV1w/ds/KoPstzdEtjyaOXOm598PP9g39MEHH8QjjzySa/k0tgNQSlDZwkG1EHIzINmff3xjNitDblD3f+oNdDdpUblHua6oS7IX8WX/ns7Kvf6d+2De8/E8LC2vwctTV2DVlnrMWLkV+901nk2M2eAupmyBm2955MoNqHeVxZ5UKT61Tlwo5m4jedHTYoAWNNOcgIgtXaj6sh4pyobaPQ3TNxEst4jsUZE07Hv26hjCZpHwtZlFBtlvLXEvkWFDVfYdjubsFCqRIyJMtt4IZYFBcWskty0oM5WswuAU3P7fgu6bMnaOoj6ad8j0O8yis7mEmq+M4tkN8xwF9rGsjRDElxjXQwWVRUeU8ZirmEdSKzVh7hb7ggcbnoJy2VKoExDYCKUoKp5HiiPIiGxlmyFYFQei67ay3hAdFoVgiqIwyMnXEM+GWCaMXDKi3PcCkp3n/UPVE7T26FBg70PL3Naogvy4X5kf+bfxThHv2kpqwSRkwJPJRWtQ93Xjv0CqOz/hlUe8N7RWenfmGrYBQxZWlmW7jMs2kT6ZY1sqrHLSuMssCsT5NGYaTCFmZYRPGcK4A1sBZWVjUUxsoCpLYPH/FANFPFsZc4s+FcQLHauVZN4TTxs7x28xxaymhYEmf+69400WQJ4gEt6qWIgqUBEKeC1rQ3m+0AuqZ5g24VWB6GWkrohaIRZdmBn00fGLAo996wQxl8ENAG5/ktXfFa/Z8dXcMWn4Mr1KN5Kdz/nrq5lLrhi/SCxLdf7+lRmBZWTjiTYdKOTKtGVbIq1n2gsiWx5NmDChNeTQ2I6wakudPIVtjixqstVJbmn0EMomaNrF2VDdgKmTN+PIXcvYMSKNZOlp46bhuza2WDG8371y2ohipaMEs1JQLYLtT1p0UCyI5pJbBBYDIIQSJyLMLngohVbVj825PMk5/rhILes3ImXUAWMF5VCxU0hjSpW2O1cIdmsMoWgJ50gRRftsJpRkSojzXKXXq4iGQZhA0gYrG1wPs06SKn72b+urGgLL+M4JUUa5GBU6TnadJA9rU1HfbEm2PHae74fsslPabnnRYEVUbDMKgSaDanyJ7avc8PzJD2SChJcrCqIQJiqoyFcR8yXWKL56HKjGKSPfhH5sLqJYfYaZJ1TViaKGmUtUCDUNK251YAwOyTkkF1lZquolYsc0gu9lmPgfaWap7odoDaZyUyZXMUZcKdqmezLHiTFIa1DZJpJYi9TVV5BFtfmgyvYo/qQiXJhFCJtbJORRgEW1eu7z9qP8/OjPo/K1FIKcoTmBYt7wYCSP0JbSfZmRRhIrGlavpfzOt61q01LMY+/PCnbVI6LkByeQeJgZnZ4DssSTEaLNWQvxZ1DQcYLhkDoPC25igCvzj6sr2d9vCkmSyGWYgpyLMQN5S6c1DvHrrlMl+ptACBJxbCmW3tTGtKUuyeULmC49z/4kV8Bcbw5vK0S2PDryyCNRUVHh+72qqgpHHnlkLmTSaOc45P4JLCtGNqgtSLJPbbIim53Ag7TTceXrM31laBJxCSauTueTfGB5qLKPiXXLECa4dChlW0AYf1jyzZ2/LrqvtwxLHDa8piHY31kMuitdBDWD+whlVRGB1lJZCrWUNBJBsRVUikJVQ9JpWyKP8F1UzoGWK0fZ2hQXt9JzmiNCCOIkcpUh5GiO25WqLDuicF1xF1HRO2rVFn8qWHFRF+a6ZfWI50dbGIYpFYawCl70RwE9G82NwRMOweMySpwCFSGUEe6F+Oktm11BiQKmxKjKhGgiijtdGLjKVfAmEWvLpxgH1xvJqkhxjKpZuCH7WojaVAXMVrXVnL6UKfOii7vspicFzVptaWGXJYsVad861YV5Z9HGnKws1UzuNbIyZLEO4XmSga3HQlhpujG1vOnuw2wQhZnrVM85u0wFqaiy9nPfWd73kWxdRv0ftB6QIZwLmvOpsCzznRNqzglui5Z8svWrGCeUILU7EgmBEMSrKpGGuLkb9dGmhAeysXf92/axi1/6LnR9JKsYRoRHRuivMJva/Jgmy5pQRLvhksyNqYwyniQL/i64K9c7VlD/m7WWxUFj1yIRgc2Vzsf/Zq0JJa8Bf3Y5/hpUIDI9lbHwTyfOUzKd8brttWNEJo8mTpyIpiZ/1P2GhgZMmjRJcobGjoKmVAYVdfKMC4DcbLW5u7WqMjHDO9nJyBJxZ+E3z09z63R+lKX8DRM4VZVGPpSyHWHx2hyFWiVfcyAjLwjfLPWalsp80NdUeBXYMMq0eI4MzU3tHoRcbwCo6iNrItmtyjUxFAoBTcp+Fi2hZjhBPMUg5TJUhwi82HwE95sbh0VyFrP08pYJE1ybEEbJV4FKRsm8oSpZJzHnD4KKYPJbuagmOCrjP1QlLODDXGWuHgNXwZIdE5TpiCBlMsymA0H2fG8SrbeYfP7zxZ1r9TsnN+OJsjpGyp4YumTA+U4Fonu7qi165mYpLNzCQLVLHQVhkgr4FHWFQhzNAimYbFCeJxRSKstOUUY6KAh3ZQZBQVapghdh0yhMJskoJKwYN4XWjnIiOPu1uM+wlb2MwvLI77YWTFSJ54fJ/BfUDg8xnk2uEn6EIShUbdG7tFqx+SlCtt5my3ZkJyFEQlImnrveCEZLe5CuOYz+IN5bmfWZ3zoye32y9blL8ocnkWTBuilWGlkekSUS1X/1G7NYWebKqrDqFgNcU5u/+udUf1FuvC/YUB1qo5sHlSc9NsVtbFfUJXHlazODK2xHCO22RrGNAOCnn37C+vWuH2k6ncbYsWPRp0+f3Eqn0a6wy18/Qf8uRYHHVROvvHzz5DCFyUCWfWOOk4ZV1j6dV93gz+imUgLCKAhhFmeNyfCKXVgLr7bG1KV2BjsV0dRSRc1XX26qQYWTCSHXlE0Y8kAMksljtmN6vC3gk1WyeyRi7tqqwGMiyBVSZalBhOEKJ4VrWES5b2ozcsvzKW/Le4wWGSolP4qclIY6DFSKfJR66N6oECqGklNmc23wmCYoFWT2mRuigs1JijYppokMYd5T6k0H76K1XjL/UwplMXaPDP6g+yGUadUCN+vZLpZHeDZbankkjst564Lnm81OWbL6bCly5cJHUPaEMPDDuK21lNRSXZ44r6isKMJk+8pVggxRnjD1hLM8ym7Z5qa3t9eaqkDNbsDgYEJNtACT9pHl+QhlNauy0hBJJNkmY1APRJk/pIHahWvJFeSbQfYns0IJsd4OR2bQJ20yBd9XVq+kHpH8b66bapQ5Sk0A07rHRlJipS9aS6nfT8FtuZZnIbxOFMfSQvDrdAhSiuqTOSHQMZKr3tkgXaTQv+icqO85eh5Jj+XJurjz2+aaRiwtr8V+A7v4K2gnCE0eDR8+HIZhwDAMqXtaYWEhHn/88ZwKp9H+kMs04s1doNFZtCMpM7NcsTnYTJLOl5FOYXyeVVD1DynHFOdBpbAs27R9kEbNQa4UmVy5m5Gi3RCB1AuDMJv0bWJlFAI+94YWQrQ8aS5kGU4IYXaAZNZRodIdC20QARyG3KVzUgrXyeakad0SgqRRIcx8JpJQufLPD5WJK0JTMmXOz4f6KwxDVLY0hW6U19yEBZShKbgMvbfCkPG5JkGiIFex2sKQ1GGsHglhNm/YDrTEOpmgitdEoHiEatdv9XePXEb2MmEQwoCQ/aGyohDHqWwtFkXWlg5X11rH+ykDHVIplW69dmlSrKOQu6pYRexT2rfe5ztM/DO1BZP3HOmmgUBmqQgOgkiqhIE6U1h2Ap0gtdxm/ea9Z2Ggstzy/S75jVw+VcRCGIu3MHqWS3hkLaqE2JKMVAyKiSmVSxEiINJy16lHtUZ2nw37jyaJ1ZQ/u10waapKOhSEMFZ5Hnmc8mSMwIfUoIxyM1ZW4Lf/+r5dZ18LTR4tW7YMlmVh0KBB+Pbbb9G9u5ueLi8vD2VlZYjFFKkRNbZbNKUybDJTLYKizmHhdnSzWwLJJpeCRPBYNBSTkkqkMNfX3pTt9gRSdlUuaVHMbMMs3qMgbPa9sAhj0dFe0D4pLD9UC4kwFi8yuEpE+Bksytgj5Ve1i9UWiDJXEcmiWlNFyRiimt9oJy5Xcykh1/V5kX3sRCJTQghL7qK5uqyW0kxb6/yWvO0RawPST/NQvoaa0eEqi1xf9Yp77z6HKoWKFNgIwWg853u/u9xRdsW6NTdDwlyPSHqoY1CSFYT3uwzEibmxyVQyeD9VMY9UZcSA2TL50sK9Vl3D9yu2AlBnsQo6OwxZEGX+CPN4NXckiYp/mDAOLkkWfIy5K9HvkrLiJob0vjqNLCunDW5JmxEuPkxZVZkw7ozuOKNzFISJokyUzSe3Hv8x0XrLjX2k2ERSEe+MKA/WKUW5XItFmXzZyWVyp+PJzQUb7LXibe/bAca/mL8BI/p3xtg56/Hr/fsH1tkWCE0eDRgwAACQyXWgEY12j3s+mYe1jsKvXCBK/TuzP4RRQTWaChJItfOkyq4hBlfznBciINL2ooS3BaK4PKigjNGi0SyILztaaO4oUD67bCESvEgnbKzafgjB7RUUX6elc2mYHdVcIcxGSJQ4PFGIOJX1GykoKsKeeiWKq6MK20v2GFksDRcKy4gcyyHON2HqV8VxoXg8K7cEv29Vx4LkUClx1FtSVyhBMVMhzJowXLBlsiBQlfF+V2eK9b4bVJYJYSyG3N+swLZ9FkOKtry1NR/NseYm67qfHLdS1dgieX9SuKBGs3oOJihovKksfqNsCEex9KGQA7JrEDdOZJatud4QUM7JISyhyE2MEXshBAyTHVAujvf+qYPJez/TAdkCZfXzEMkxZcbqMGSpFVyGqpYlcyLyMe5kwLv4pe/x9G/2xQ3v/Lh9kkfvv/8+jj/+eCQSCbz//vvKsieffHJOBNNoX1jkZBRRpVlVPSjS8qFiCAXv2qhYYuZPKlk9ULOyhY4qrlGY3cjtZeHcXrG9xHja0UBWEdvj8FXFnyCo4qWIl0wWHdK2IpjAt3fk0gW5NZDrLIitiVw7h7VuYHkvohBVYaB6ftoTVK5uSq45x+PSF3MtRPUVik28KC58qqaIeCRFOEyAZZnsLY3JFQWRLDacT5UlOmGDs2ngWqhkJ49cmSTKasZ7r1VyVytSelvC2GnplF7LAoHbFX0xf2Ng281BmI0/KqO6H+sq6x25/MeiBHcWjFKk96qy3n7WiPCKEutRFZpChWjjOHvhJYo53s3mZ3+oLI8IYSwppaSYQNLIIN6TMMQqyZyUZYrzWU3526Tzw1ghhnnGXAIzuExMkq6M1rIx7kRVkqq2RCjy6NRTT8X69etRVlaGU089NbCcYRhIp3MbN0Sj7dGlKI/tSkZNgRsme5kKsuZc5tb+lJFAjEEOwUR7zmtmzKMw/aOh0d4xeXF5W4uwzZHr+E7bC8RMie0NLXVLbWlMqCioqA9W5relHBq5gWptooqD1ByIyt+2JE1b2laY9U6UJloao2vBBu+coXRFE+Z9lSstWdNMdGKSqQhSkdAJF7unZQQbES65sh78evFmz/e2iM1YoyDQk+nsfUthA1TkChGkVKK9hIuIEvJA6QwU4nEKE6OLdKxlIcaXaqREmS8YqSvR4UT3VDegfXZLNJXVoMorhSuUFa5FZnDhmGSuEy2PAOCGd37M3mAbIBR5xLuqabe1nx+6leQ3+9yWWx75fxMDjqncEaTEUjMzqoXxuZ/dwhTBGhoa2xa0uJiyZHOWkhoacqis36JYgmi0D2zLe0aKP2WKq8sxOaVCS13Jt6VVUZRYh66FUHCZmQ4BFMWSJAzEJlUyUJy+MF3Ulnsb7dWiXmWBR8fCqKxRM7y2J1RJskYTwliVhhn/Yay7wyDKRgrpW6qsgGLsMJUuGCpeEwv2rZLLW1aGMPOiLESK0l2unUFiOKWh4UWezL5OgqibRmHSG6tSj5oKJtlkk0CwVZK8XcWxXPsmaGhotDlylRVKQ0NDo6XYnshGinejcotRxdsSoVpiybLjBoGIoWqFYt1a8Fk0KawSxXNUaEuX/vKa7dd6Msr4a29QhGBlCThU8c+iQBVIPQpCZScNQbyo3EmJhJq0yLaUX701mPwTH60wlqNhLDJVxgS0IaAikWKSm0uXKrvmt6avxty1lVnl2lYIZXn02GOPha7wyiuvbLYwGu0PqXQm9K6DNOZRxPLuefaZsocoI/inqpjppMKMUSqT0vJIQ0NDQ0NDQ0MjV2gtV71cuxiGwQYnHsy0ZbYyHoYMzJVlh8aOhR01DMZWhQUSPQsspm2IuWGtIlMyPX9Rst2GaTOMMUFk7zcr+LwF66vwpzdnY9k9J7TYvTcXCEUePfzww6EqMwxDk0c7GHb+6ye49OCdQpWVPScqM0XVS5WeXVn6dBYZXxnU0v6UWSWpyCMV099e/KE1NDQ0NDQ0NHYEJJ112uYdID5YcyxZN+gsnho/I6iydtOzQElOZHGMWhuqvCjkVhyG71aR4jKXYdMEkJYneqI+G/fTBpTXNOGcUW2bfS0UebRs2bLWlkOjncKy2ubFpkprTPKoWF2WulERqV8GVcwjHfhUQ0NDQ0NDQyMaVHvloVxdNDQ0fnZQBU7PNSiemor0IYOGMB45KndeGSiItixIOFlk/bSuCo+MX4Rf7tsXhgEkQoaVyTVCkUdBEFOma+yYkLGgMuTS8jhU0EhFez846WVlZJDKd1wPZQ0NDQ0NDQ2N3KG9Bl3W0NDQ4BFG/5R5xbQUFERbZvRAv9Q7sl39xkx075CPSw8ZhJ4dC7Y5idSs1p5//nkMGzYMBQUFKCgowLBhw/Dcc8/lWjYpnnzySQwcOBAFBQUYNWoUvv32W2X5N998E7vuuisKCgqw55574uOPP94mcu5IqG9KK925CNsyxWxYFCSiDXHNHWloaGhoaGho5A5Rd+E1NDQ02gIrt2TPvtcaFlGDuhWjKC8mdZtrStmkUXVjComYgfKaJmytS+KQ+ydg1qoKfL98CyzLwuxVFWhKZbC+FcgtHpHJo1tuuQVXXXUVTjrpJLz55pt48803cdJJJ+GPf/wjbrnlltaQkeGNN97ANddcg1tvvRUzZszA3nvvjWOPPRYbN26Ulp8yZQrOPvtsXHLJJZg5cyZOPfVUnHrqqZgzZ06ryrkjYNWWOrw3cw0AoD6Z9kSGLy2QG6xt65hAYbID9OtSFKlOVeA1DQ0NDQ0NDQ0NDQ0NDY1cIG4amL26EnVNaZTXeN14e5Tmo64pjaK8GKobUsiLmSgtSLBskslUBje88yO21DbhlCe/xjszVuOAez5vVXkjk0dPPfUUnn32Wdxzzz04+eSTcfLJJ+Oee+7BM888g3/84x+tISPDQw89hN/+9re46KKLsPvuu+Ppp59GUVERXnjhBWn5Rx99FMcddxyuu+467LbbbrjzzjsxYsQIPPHEE60qZ3uDZVnYWJ2dFKmsTyKZzuCcZ7/BxAUbcc1/ZwGwLY94k7iGdpoZIk9itlejg1xraGhoaGhoaGhoaGhotDPEY16/ly7FeehYmMDArkXIi5toSBJ5lERe3ERhXowF56+sT2LVljos2GDHbNpS1/rxeSPHPEomkxg5cqTv93333RepVOsp6k1NTZg+fTpuvPFG9ptpmhgzZgymTp0qPWfq1Km45pprPL8de+yxeO+99wLbaWxsRGOjy/pVVdkR37fUNiEV274CJq+rrMc1b8xmAwoA/t9hg3H5YYPRsSgBy7JgGAYsy0J1YwpnPDUFJ+7ZC1OWbMaBg7uiIBFDQzKNOkYe2WZz7TWtaElB3JetI0p6Rg0NDQ0NDQ0NDQ0NDQ2NbYFEzPRkasyLmRjUsxhFeXEsLa9BfTKNgkQMNQ0pFOXFPZnEN1Q1oDGVwXfLtqK0II5ZKyvQrUM+KuqamI6fzliIm2bW2CxV9cGZ8HhEJo/OO+88PPXUU3jooYc8vz/zzDM499xzo1YXGuXl5Uin0+jRo4fn9x49emD+/PnSc9avXy8tv379+sB27rnnHtx+++2+3x/6bAHyizo0Q/K2w8IN1ViwoRqdixL47I+HobymEcc/OglPf7mElTlgUBfs2rMUL01ZDgB49PNFAOyI7p2L8lDTmEJ9Mo1ETD7iEjEDyTZIpbhrzxIWGZ+QHzex/05d8O0y253tzH374s3pqz1lTMOfba2kII5qbaGkoaGhoaGhoaGhoaHRrtFW+mdrgJI77dO/E2aurMCArkVYuaUOu/YsQV7MRL3jtlbTmEKnogTKaxpRlBdDSX4cK7fUo1NRAt8t34IRAzpj5qoKpDMZjHnoSxy6c3fAsDO5pVWpxh001IUzuGhWtrXnn38en332GQ444AAAwLRp07By5Uqcf/75HksfkWDaHnDjjTd6rqGqqgr9+vXD307bE6WlpW0oWcvRvSQf8+44Dr9+9hvMXlUBAPhm6RZ8s9QbO2i3XqWYs6YKnYsTiJkGahtTSMRM7DewM5ZuqsVefTtiwoJNOH5YT8xYuRUbqnKbZnWnbsVYVq4OrrhX344e8uiQnbth8cYamAYwaqcumLZsi88M8LBdumPVljos5eo+Y0RffPZTMJmooaGhoaGhoaHx80RbbjDuSAqyhkYuYRMuO8azUVIQR2V9El2L8wEAhXkxJNMZpDIWivJiWF+ZRFlpATZVN2KnbsWoqG9Ch/w4SgsTWLmlFgO7FmPxxhqcM6o/Ji7YhLtOG4YeJQUYs3uPLC17UVVVhacuzl4uMnk0Z84cjBgxAgCwZIltwdKtWzd069bNE4jayHHO827duiEWi2HDhg2e3zds2ICePXtKz+nZs2ek8gCQn5+P/Pz8lgvcTlGYF8P//u8gAEA6Y6EplcFD4xZg3wFd0JhKY11lAzrkx/HX9+Zg736d0LEwgU3VjehWEkM6Y8ECWPyjeMxEzDAwckBnfL9ia4vkuvignfDC18sAAIO7F6O8uhHVimj2MdMb36h7h3ws3FANywJ6dSyAYcAT5DsvZiIRMzCoewcsLa/FpQfvhOcmL0NxfgztMEmchoaGhoaGhoZGO4FhYJuvF+OmiWRanTo8P26isZ2Gk9DQaC2YOeYZtjUuOmggXvx6Ofvet3Mh8/LJj5tIpm0dvTg/jnou5lGnogTWVtQDJUDHwgRWbqnDzmUlmLWqAnv36wQAOHfUgFaVPTJ5NGHChNaQIyvy8vKw77774vPPP8epp54KAMhkMvj8889xxRVXSM8ZPXo0Pv/8c1x99dXst3HjxmH06NHbQOL2j5hpoDAvhr+cuLvn97Fz1gEAGpNpdCnOQ2PKjnmUzljIWBYjjxKmgVTGQjxm4Oz9++O1b1c2W5aOhQn2t2XZXPJvD9kJz05a5inXs7QA+QkTcdM7acRMAxnLPtc0DFiWbaYHABeMHoDXvlsF3tkz7lyDaRjIOKuBc0f1xyvTmn8NGhoaGhoaGhoa7RNxZ93aHCRME02yPNrNRK+OBViXJcNvImYgWxiSvJgmjwiy0BQaOybM7Zs7YkmeBnQtQsqxLiSjh7x4DMlUBsl0BqUF+ahpTKFDfhzVjSl0Kc5DbVMKDak0epTmY8qSzThiaBkAO6TLyxfv3+qyR8621pa45ppr8Oyzz+Lll1/GvHnzcPnll6O2thYXXXQRAOD888/3BNS+6qqrMHbsWDz44IOYP38+brvtNnz//feBZJOGjTG79cCFBw5kjGfGsgd5KuME3XKY0XjMJl7ipolORTb5c9VROzerTd7FLG3ZJBURPFceOYQd61KcZxNDzgM2rE+pK4tDbhFHRNZJRHzZsD+JfIqbth9oj9J8j6WShobG9o+CxHb1itPQ0NDQaEW0p3VeQpIhWERePJa9nnhwPVGMMzrkNyuSSbtC3NTv/Ki49uhdAAD3nbFnG0sSDbnkCM8Y0ddnlNDayHOe2zG79YBlWbA444z8uE1UJ9MWivPjSKYtlBQkYFlAp6I81DWmUdOYRllJARpTGezTvxMAoFNRAoft0r3VZY/8lDU0NODvf/87TjjhBIwcORIjRozw/GtNnHXWWXjggQdwyy23YPjw4Zg1axbGjh3LgmKvXLkS69atY+UPPPBAvPrqq3jmmWew995746233sJ7772HYcOGtaqc2zviMRO3nbwHGpJplDgvk3jMJlksy+u2RmQST8aEwS/26uVt0zlvj96lyFiwLZyoTu4Fm7EsWLDYAuBIh22NmyazIHKvwz0/nbE8L1E6FovZVkpdi/N952toaGzfiOXYrJm3kNSwsVO34rYWoc3Qt3NhW4uwTZCnUE7bEtu514KGg22ptDWHPKJz+DXiqcN7B5bfu29HAMDZ+/dX1psf4rkKSlbDQ9V/Ufq2R+n2H7KjPZGD2wsMYcO9rZHNCEH2PAaVCYMhZR1gwcqpDviXE3YDAPzu0EGe38tK8jGkzE6+xbx4YiYylm04EWeWR0QeZVCYZ5crKbD18U6FCdQ2pVDTkESxo6N3KsrD3aftifwQZHMuEHmkXHLJJbj//vsxYMAA/OIXv8App5zi+dfauOKKK7BixQo0NjZi2rRpGDVqFDs2ceJEvPTSS57yZ555JhYsWIDGxkbMmTMHJ5xwQqvLuKMgmbFQlG8PxLjJu625RFEqYw92YvtjkhfdDcfv6vtNfEYTnPmezcC6pBE/CRCBRQ9YgiuTcVzWiI4mf9g8iUzsPMdtzTS3vS+7hoZG6yLXC0m9MPXj59wnP5dr371X+0wWsq13ijWaB9VzsnNZB3Qpzgs8TkQMKU4tRZQRQ2LTOOOXiIc7G5eicti1OI/FHdnH+QxCGFI2jHWSqkyYOYrIA6pn9KCuWc9pr4ibBi48cGBbixGIMIThtkRJfpzFKA5DVG4LZJODjqt0trDvhg75cXQqTABWeHfHAV2LspahZ6qfs8FEc0KnogQO2bkbAJ48svXQdMZCvmMtX5iw4/FSKBaAI4+KEshYQG1jGh2Yjm7gnFFqsjqXiDyKP/zwQ7z33nt46qmncNttt+HWW2/1/NPYcbCpuhE9SgoA2O4fLnHjWB6ZJjIZ222N3l2008+/fGQPsSUYHNJkQDGIbPKIyB+78t8eshPSzrGYYJUUN223NRguGx2XWC6JMtmkkwUDBps4Lj14JwDATSf4Sa/2BL1w1tBQQ/bsNwf0qOlHzo9cW3dtT9iW1x5lvs/1OG2v75oYt0urkRtEJUQvP3xw1jJ/4EIPiLCtyYMxcmAXDCnrEIn0USHKHiEfG/OoXctgWRauO3aoc8xJre0ohVcdtTM6FibQs2MBC5MgZvwVEc5trWVlaI5S3dYEF+IhbJu5Rq6m0ljMyDqG2/KV1Z7m0qE9SpCfcC1VWrIZkssxky3hFo1Tnjw6cU+vN8uRu9rkbp7iGevfpQjpjGXHyY0g328PGZS1TEwwcDjXIXYMGExu1xDDtjxKZSzkxez7UZxnf1pwCW9yKy3Ksz/XVzVgaXktJl9/BPbp3znCFbQcke92nz59UFJS0hqyaLQzvHTRfjhzZD8AQH48hlSGYhG5LHXashCLGexhpwdmIOfKQL/t5ewgAUBGiO1H5pKmYSCTIbc1co9zXn4muc5Z3IPpEkQ2CeQuDsSHF3AnGzpmk1X0YrUP9u5kM8VEku3R2911/fNxQwP76/aT9wg81howI0z03TrYO3u5fmmGqU+7+mi0FXKVjYMWRtt7do/mQqmctKPF8LZGlDm4pYjSz7m+J9mU4LYCs3jejp/LspLsrkIl2zAWTdS+DOPqoSpiAbAUBUxnQzBXz1oUC/MEt8nYsdDe7e9cZK+l2Caqs77skB9HxlFEM8I6MwgqxZYQhmxQWbOwjVaFS5K4no5CBISZa8Lcuqjj7u+/3Ev6e9w0s7ZHfdoW00ZrvDNu+YWd9CiqG3XGssN5ZIjsDBgjYcZgz9KCSG0/cObegcey3ZdEzMSY3cqQsSymkx04xDZYIHex/XfqAgDIT5jsOTvL0WcJlzoGCTDcOeh+Z1xRWdn8HGYdKBo40Pg2OAMHes7iMQOWZSGVdi2Pipw537Ispl+TLsU/c4ZhoG/nom2+DotMHj344IO4/vrrsWLFitaQR6Md4fChZcycOC/uWh7lxdwBn87YsYlMgTzi34n08Jw/eiD7jR6ee0+3A7S5lkfuTlScc48D7IcvnbE8TCwfayljeVOpii9E/tESYzQZDmlF18WfV5znLtz6drbNFffmiDDCtn4RRXnZtpbSG0aGn7FuqbENoBqCYcYe7fCokCdxof05QRlTIwKxMDCEuff2hFwPB1V9UcZeruf7tghCG+Z6xTXC9gSaU3p1tJWuvSRrCkIYhTPMPBYGUee4bGRMp6KEclc/6PyLDhoIwM2em6sxrbZz8sJdz9obpbY89rFETFxD2jFLTENNhvEIM3fym59kFS+Ctx4R3fviMe8mrFQOQdENQ2oRQhFDikJ3n2brAFFJlSDrlJiZ3XKF6QbtdF6TgSz8ZJfWu5M9h6hiBYmb2789ZCdYsO8fjVYaB6SXie6MKgztGc2oRDZn9yjNR8fCBAzBznCQY4xA5wzv14mFUSFChY7R+KfvO3Urxhn79gEAnODE2n3u/JEAyFjBa3RALR821A46LSO5qF/UBJhXj+TnEtJ/XQsqC03pDGoaU6hyUivK5nMyyijKi6F/lyI8de4IHyG2rRD5yRk5ciQaGhowaNAglJSUoEuXLp5/GjsWaODnx02kMhmP5RHFQYqZJnuY6IE1DQO7OXESZMG0xZ2ZBGcebFHA7Jh3ZyfmuKZZFtjT6znmuJ+pLI8I7Dwiljg2WGSMDQM4flhPzzWce8AAX53bevnqWk+FLxtGxihrtDAv/Gwv8lxCZ9dqfWzLXfAwUBGYYRSOnh2z75g1x/KoLfTZMHFBmvM4qvo4ymK4VGGFSIvAMPINzxJLJCxaOjXlSqHt7uxuqlTOKC3liuTs1sGWK4ySqyI/CEURCI4wl8DWG9sheUR6qylZH4kIcz9zdc/DEnEx08CgbsXMakGG3h0LUJwX95Epp+/Th/1tH3PbfNBRyA5wQh8YFJcyR7c4iuURHxaBXNHYWpcLdgu4IRdIXvotTP0AsP9Ar/5Ep/JrmmF9vM8Yi/1pGiy26GNn7+MpwzZ2FbKIISLCPO9UXZj1naof+jjWMlHvb1DxuGlmnStjgmK/LUD3UXYf9g7xPiP3SNn5cdO7PpHNBWIwdDZe4Y5X0odoTSR6f6ggu3+P/nq477c+jmcHyfjHMbuwY7RRTwQvnX/NMXaZ644dCsOwx2naiVFERBOzQmUkja2X8oYB7L7H3bI24eu6krmxcv3XLoYvUI2fmFCPyfQ11zKR3ocPfLYQ1Q0pAHasIwAo5FzT6BitkQ4a0g0T/3Q4jt+zFw524idta0TWtM4++2ysWbMGd999Nx5//HE8/PDDnn8aOxboxZAfjyGdoWjwXJwhy36A6BHyPCDMFNI/odFiIi6wsvTi5YNi8y5maSfbGtXErIPyY2hMZRzLI2+7MvKIscGGW7fIvvPtJgTLA+mksY1Nj6g7w+wK04TY0he9iPZmeZRr94WuikCezUEY8dq7B0YkcnEbXItqvIaRtcB5WavSm+ZxRHJYtIWVUkwy1/rKNGOAqZTzKFYfqrJ0aEQI3/1cWZq0lPxp6dgj0D3J5t4ThH+cO8LTZpQguTI89CtbgXffMbkhL8L0dxSlVFQYtgUGhcguGGZjh+65KjYjIUy/hYnvFmZMhiXiOhflwTRdRUgG0Rr835fsDwD41X790LEwgV+N7MvCBpAS2a+LbZ0oWqOr+iDKfBAltgndxy7FeewaaLXrZgPm5XQsOahsFrGojh6l+b7A20z55FzIxHFucjL0YeEWvGtUGhYqAiAmkARh3hFUJtwaMPu9yxURH48ZWZn2MO/JXIPi1Mier1tP2j3r+QmBhOAh6lH8dY0c0NlzPoNBwZg5bw02J9llf3+Ebe3UvUOwa+0dp+zhVOeXqyDh3yw40bH+ofu+dz+XEGWWUI48jMBx5OnsPIcx03bzsq2mvLqkq6u518fIXKcLeP3PsrzGA1SGnjuq74BBXXDQkG6ea6VjvLvgQY77nPvcufoyAI+OWpTn3+yjdolYqqhL4u0ZqwEAyXQGy+890ZGzbRWFyOTRlClT8Oabb+L666/HhRdeiAsuuMDzT2PHRH7CRDqTsd3WmJ+mu3AzhYUQb+abEB6eg4Z05ayDxN0b/iEm4sq7+8OyqnHndXL80D0kkIQ5dgkib2BAA/DtFrm7mmDX4l6f++gMKeuAuCmbOlsXZogXYIy7BiDc7nWUOSlc2dz2zK4K81jx3rUUUSboMFYfURZlLUVrLYyi9Mm2WJy1tLtop0c1ZppjeZQri7soMcMKnDStqn4nscQAkyrI6uOTHARBNL1WEd1GBGWkpeOKLGpa+qypePtIJHyI61ERSxRIs7+jeItty9xQVOOdLKHCxEsJUx8hzG0j2aNY1G6rWGQHDOrCLKpVEJUwGUxB9jDEqrJNSaFCQXmTVeMqO145snUpWQMFxTy67LBBLKYKKXh8gPO+nQvRrUO+rwwfRJbkyGTU9zjondSxMIFXf2tnZKZnI4xLGW9Jnxc3UZwfZ9cpWh7x1m+WQ3KlhbXkyXv39rWxU7dizurM9GUqdq1ITGaBIY4RfgPR4P62zzOE78FjkeqleE5h5iPKGNVSYpkRHpL7S/FsZDGYgsYdr48EtsXds1zggEHZvW7IskzWX1H6UFaUf1YAl5Q0DOC0EbaVn0gu29Y2tqWcJYxtaovCdOzSowSHOptrg7p7yXOKdSQbXjLDATIqED1DBnUrZpZ74jWLm/1E9pA3CuAPLUDjPcZZ+oh6Y4zrJxpNzPIo7n2+uxbn+0hhOvanY+xxeszuPZjsblvePuAtocT1Ub8uhUwOIo8GdC3C/Wfshad/MwKDu3dAe0Fk8mjXXXdFfX19a8ii0Y6RFzOREiyJEhzTSw8Tv/AT3cDoswP3IhbNR+0Xr7cu3uwvnbEQNw2knKePJkRypcmLm2hMpp3zvHV7rJKYxVLcbctpl7nleVzpvIRWgp8MLXW0/ijuZYQwa2G2QA1hOmkKnzKYwj0Mgyg7yblCocL1QRyHLUUU5TLUznuOyoRBrtUpEitKn4iB9FsDSqIkxPm0O2aaRiBRw5Pb2dpkcnELnZaA5q9uit0/Qr7CNJ5A9yTKOJM9TzR3quYfcdGv6jc2/yhWJRT4P4wyogItzFoa8kLtMml/dlC4edL1qOQgBUoWq+X/HWbvDNM7+a5T9/R8J+RL3HkNxfgU39lh3BaorIrcj/LchLO2IQU5d/OLShHMi8dCWu94P2UQ+5jvm8HdvTE+VNdH40tW5qWL9rPlcA7xI+g0x3WMLH5EhTrbPM/WPYIS/6uRfW258uLMrUS02slz1ozkLuLNQCSQMiHIxG4SC+FDd+mOuGmgMBFDYSKGe8+wn40zQ8QISQhrPz5OiSGsu6isYRhsY9O9XhsnCeRRImZgQNcixE0DT54zAjHT8MXfoe7P4xLSuIQjmHwkC3PfEcg3kRSUgc7Zu29H9OlUiJhpMAJZxNO/sa0c9+rbyXuRCrjWhJK2FWTloTt397TJQ5wN/8+xkhnWu6PCpU145kI8zAO6FuFwJ/7NmN1scuAI5zsFZT5luP0skZWPiKK8GM52sm3J5rUwMYV4mYkIFY9R3YPLOrDvGfZc2ccuGG2H3DDAWQY69ZjC2GZZtDmS5s/HerNQ86RIkMxHO/02oGsRJ4/3OT9uWE9GSIt6mmgIQImVDMN1JyWXeDEWmUd/CyKP4HYCXYdoeWRK5oCEUKYgEQt0f2O9Y1nsvUBBscfsVkaHYAD4+oYjcfCQbvj82sPw5XVH4Ff79cNxw3qxNVd7QOSl07333otrr70WEydOxObNm1FVVeX5p7FjIi9usodC3HHhFz3eh1uMIcQdE2IeMaLGdCcOqpV/0NMZC4V5MdQ32QQRWSXR4risJB8bqxsBuCaBdH5lfRKNqYznN7awN3iyy5kQJEyx1MTccdkP2tFylcgISrfimDhZq1484ksl1A5QFKUy1I55FEPx7AgTGyJMV3cJ4ZIWRScJo+yo6nOJr/BtKttqBmmprC/COBYJ09bMhqQaD2FkLUjEMGa3HogZBkvD/MKFIz1lRMujKNYxqpIkuqo63gw7G8IE9nbdDbLXx86RtB1GyRfJI5UyHYZoDDWPRSAdWm55pBh7CHP/s8tx4GB7h182jVImUN87VlAYSwv8pKhrteBvW1Q8wyg3KheyKKQ+2/2O8IxFeWdlq1b1ujIQ8hqEOBwyxISxTH3cp1Mh/u7E/RHvp+wyHz5ruHMsWHmTubQd4aSyFhVp9lwr+rRXxwLH3cPdNCO3qwsOHMjOd11LbDBCJG7CgME29GQbe3EPKaPOtvbHo3fx1A8AFx04EKZphxwwDftejOjfibkdDupejIsP2gkA8M/z9vXUR/PoqEFdWCrvtLMGZOtSiasXy14lWikJsh+5axkyFlnJ2MdFgta1SDN9zyq/sUllRKsRFTkpgr1jYiYOHtINMdNgxPSLF9oEJJGqng1XAP06Z0+CoHoXxhXvLMOwYwV1KZYQWcJzSmTWiP6dAp9xkZAsyg/eiCRrsZ3LStgzcZwT+/Rqh3C9xwksTaIHZakz4H/eecjCYIjWovwGsBinUbRu2bmsBPsO6OwhYqi+44b1YvVYEMarU5+PuDVdkkZcz/M6xqidvMQ7zX8H72yPqfy46TMqoGDvhkFZuF39MC7oYvy6Mm1ZdhIlpyy942gzkPWzwet29k+kN9KcYhNoFqub7y96Lg3w3ilw5CLyiKsnQLelXstY7lqHLEN/ua9NaDelMsiLm+jTqRDxmNmuLI1ERCaPjjvuOEydOhVHHXUUysrK0LlzZ3Tu3BmdOnVC586dW0NGjTbGQUO6okO+G/QwIWGDGVsbc3fxxewURMrwTDBbOHGEiGgamODMB1NCZHzaQc533DUSMRMpZzbpUOAljzZWN+KndVWe88iKhV/g+GMeSYJpO59jdivz+emKGTGiLIbdcxTKEZPPK68Mce4aspWNohi752Qvk1IFRYiAMApIFLe1MLJHIfxCuQ8q6uOzCuYCUXbwwyCSO4nQdhgLj+aSXHlxdRDeEf07KY8P7l6Mvp3t3VbqKrJAomwaNIfQbmyU62npuOhNASZDEmGAWkG2YGFw9+JQxK/MbemKI4bYMofY0c6LexWDMJaPqkw/YeqhMqrYNGECLYsBbGV1Ku8JrV0VRURrD9XOPM2iQ8r8C0qfOwbNlc4fJQVxfHzlIQD87oZy0sH5DEHWBcngOSYQEuFcrbO3SYv3KBZk2Z4j1dvKMMK+N+xPVVnx+eHnSjcQstwFiUfnIifbkEQB9bmiccfc9Zx8M1DVTxcdNJBZCZBSSTHjSDEjpZQPIE015jmEDpFPvOW2aP1ASptszOzpBJCmaxjcvQN+4cRUgWFfkz2vu5+GYVsjxRzlGXDnKbIwOX1EHwzqVoz9BnbhrInkrj28VSplhcuwNay3Tw92YqbwFk2GYR8X51G+HSIjRasw1p9xw6f4i/dTHfPIrZfkodIkx81OOnh+XHQvyQ9nlSjZeCJSyjfuuH4wDQNx05S+Y0S3NTZXxUxp/B3AXe+PHNgZvTsWIBEzWSDroT1si8mPrjwYAHDCnj0dmd22MiLByZ6r4PmbroMeXen8KHmuz3cshNwydH3uWKTNLt+8zz97Ge9znhd3y2Yy9pxCa3RmVSexWmPkkSMPrY34tunvB4VjfD8QqO/yYl69kNelRLKUf29nHC8U0UihMxfChORyyWunTS5cCf1OBgKi25o3pAqVd+QT4ywZnN7L5gf7M+mwz3zw/zxurqQyQQRke0NkKSdMmIAJEybgiy++8PybMGECHn300daQUaON8cqlBziuZvZ30Q81bhquib5D2MS4hxFwywFCcDL2EPIPKDHc8LSTFzeRTGc8BJNLHsnOtzzHjtujJ5fRx/AciznElF2HuCjgg2l72eide5TYO2dcxoJjnd0JQhRrGIKqqEgyKC1xhB2JMIFvw8hJZcKQK+m0lbVMGLgvkuA2zRDXSSDZVenDVS98EcqMVCGIOX6XJypkp7gLiez19QqRccy959nlEQlT1XX3dtpubjrwfMXL1jA48/oAkPWhyS3SAXtRs7sT3+T4YT2xV9+OzN8/VJDQUISJd9EnoigvhqvH7OypTwUKPqkar8m0rTJFCXZPt6ZDfhxHOSbWYYiFYb1tecK4m7HdYEkQSbGM2trMkVURgyxMoGWZZeK9Z+zllUd1TwQFUgZRUTcNA387dRh27VnC0gmz+c6pb2iPEqaEWkI9vCWCWK+40JX15V9P3M1zTFwAyyCSpLLxLiq9YebKMNOg6KITBlnnwyyWR1GSTqjKktJD2aby4gZO36cPYg7RAXDvbwUJr3IzC2N1IpILYc4hy3IPUeIco408OmYAvmfBDmpseHbzfWtCbjwRKXOZEFSaArvTpfcsLfC4jMVMgyV0IULEVibJ2sm9HgC49mhbGb/iiCGA4VXG08K6VCTmePJPtM6gMhcfPNAjr12Hfb+pPpaSHW4ZQ+gT+uxUTGnK/ZZHIjmpup98vYw8EtZ4YsBm6j/DMHDGiL7SesU68uMmc79mcedYfX45SRb+txH9O3kUeQJ/P4Iu9YlzbPe3Id07sHfguaNskkbcKGJuvXCnBDcZj+D2RNcJwyUvPR2hntfk5Jj3OxU5cmgZOzagKwWX98rsPjOu7FTGtWS2s2Xnx000JjOe66B5RhbDlp5TItt4IpOOdXHe9554s+JzLsw75PbJ64eiiyjTzUwTyYyFWMxAMuOVnWKPupZEnKcLEUM+tzW/WzjfFskrPtcJwTqJSC37fG8Z6mM+Qxy9V6nayw8fjIMGt032tKiIvFo/7LDDPP9GjBiBBQsW4LrrrsNVV13VGjJqtAPwpIwYDJJPjckrYmk2+9GEy00SxPKKEwhnspgWyB+eISfQg0kLFp5YorIsponhEjz0gJOi0iE/jtrGlEdOj+URY7fhOUYvFg8rLfQdfSdGPAxMw2AKgghxcSAGWuQhLmxy7bYWBjmzPIL3WmQQFRkVqIgqVXsURUZVRkzLLEOYRV6QVYbsHJU1g1hfj9Ls5FGYuFkEkYhUjjv24s1arRT8To24W8fLEARX+eLL2juFlMVmcPcO6N4h37M4C4LPDSTEMxekZHoUSVM9Fkf078RMt1XypTMWy1gSBDokXq9p+OeJoHq6dchjsSDCWMExRS5Ef2Uj0o7atUwZaLx3p4Ks8sgO0ePHk6NBxErGsrBrzxLl+Auycs2Lm+yZpPfs3890iCvDXegGKTNsvnHajsdchers/ft7r4HrywMGdWUKG12fWEYEPzYuOmigVBHyzaOKPmEKTIh5JtszIas3O3cU/L6yLVeCz2XPTXZx0JjK4Jjde6CspAB79umImGniyN3KYJoGu5+nOs8PH1jZ32bwPRKtY3gwKwo2BuUKMQ+KteLG9wFEti2P38jLyN24EjGTuQBmLCJcwOrmr4ueCb4eCvLtU/QNr+IZo740XGKEtcldIP3Fu5qQfCQLLWPEaxH7i19nikQTTypalpesoWuhlOw9SguwT/9OHisgMY7egYO6sfpFqxFx7RfG3Z/ILD4BDH2KFmr2/xZihjvun3Hc/96+fDQA4PR9bFLpqF3L0LO0APlx00egsaxbfTuyayR3QsMh1miOHdS92I4SYRi+59RzPwIeUpY91+n/GKen8GOTv26eZHTvp7ffZPHj7nNibL144X6MvOTP4SHbEKUx9OqldsB3XlcSY/iI87Uts5fcJYsj911hr83zEzE0ptKe+qgM741A63jmUuZzqXXbEj1LZM+G2B8GHFIXfis/0UU0ETeQSmdsnTDt7Qtyg+ODZLuEDpXxz3XiPabb5CYb4q/PkUuML2a4JBuL70bGC8N64tOrD0VezGBWSKKV0e8OHYy9nee/vaPZ9lFfffUVLrjgAvTq1QsPPPAAjjzySHzzzTe5lE2jHYEnZUSz4jj34ipI+B8i32RjeFlhgGN3DY6hZlH03WxILpvuPZ/a5RcPNKkUcQGWibRKZ7zHOuQnUFWf9F6XJ0YTvai8izGadGRmzQTTNFBaEPdk46IXQhDiMQOjB3eVHxMWBWKgRcA1CRZfLqGU3hArcSoRJp5ROkSZMBAXGzJEcb0LE5fCFBYHKiiV3ijknaItGpPkW84C/0rOCUP2iKlLywKCZPLyhYrtFJJgALiFQUj2SAxAzFsekdsBwTDc+xwE2mU1Ta+pfsyws7bQ4t7gFsnZ6rPryH7tTJkIKmD4iZeg+gzD4O65WkYxq4msLsBtu8mJFcfvAme/twa3AMs+dlIZ250uZgTXGeYZIajaogC2YazCZL/REdMEbj1pD+n5GctiATAD2/CRR65Fh6vM2H9QumQDkoWu6GZG48DT73bmqOMdy1iZdYxh0M6993x1ljz7szGVQbcO+dJ+8ylZgbWFJwhtucLPq/yzrULQ68o0kDXmkZj1SHUJhuFaVhyxa5lNXjuKu2nY9/2U4b099Uhd00KQR7JjIllDCrGMAHvWsYJ7kQvAbcE2E2AW5s4JvBtKxrIQN01fvKCYc4203uMVM9+axXTJHqrnD0fa1pgJwf2ELC7oN5qv6G/DmZN8VhC0luKSrBD5ZMvgniMqlzJiyLW6dwg6iaUDueIZgCfmEdXDu8yx8SQ8Gzxhwvet3ab3UzZuibDh5WPvQqe8a7nib5vWvuIajzZzzxllx3KhDTqZHkH17dmnEwA7phKtlen5IGJz1E5dfOnlRTdcIr4MA7jFcbUTXawM+ElicX3pzlWu5wHTAoR5lu9ZscyQsg6Ix0zlO5B/PvPjpifpgL8tf3KehLAB5w3e7t1gcC0M7fhDBZzlEYuPJLqbmy4BI4Yv8cztwnuJuVxz7ylxk53XZygOmJitkGSmzX4ijXgXMNIBaf7hkxyJhG8+p1Pa12IyQkfULUUSGwCXPMpveZRmJJv9SZtYxflxDO1ZguL8OBqcpE4kR2lBnLkfby+IRB6tX78e9957L3beeWeceeaZKC0tRWNjI9577z3ce++92G+//VpLTo02Rm1TCk3pDIryYthS1wQAKCtxdkZjbkA/PshYRniI+JhHohmh13TRG2SRpUzkrJPEBV7vToXo16UQmYzrG08POCmbvN88TU4UsMx2ifMy4nGJvL4XDLmrGfyi0/tyiJnu+bRwD1Lc+nUpxO69Sj2El4gwyhu9rP1lA6uNRA5EiQXUKUKqcdViO4z1VBTXL1GxlSGMmwW/gAtsi/VtsDzZrFAAf5YIlUUHUwIUbeb5TJlVfRtcjwhxLEXtmxuO3zWgNPDEOft4vvO7N2I/UFBWpayGrVRQvAf7PDgWAO5i2qkNeTET+YkY/nSMHTTzd4IrhavM2d+bG+B9uLMDJfaPqi+Z/IqLPnxod2fXNbCIb+x0LsrDzmUdQDEoeDmCUkDTwh/glfzgNpPpDFNG7j5tmFKubAR3NqLPNPzyiCmIZQyH2C6v2InIWPZCVE02iAtUOwAoP2bcZBL++YEWuuxdJVg48vM/I4a48VSYiHmuiXb6xfeFym2N1Na0Y2Ui67ew980um32uFHd8lZsiPiXAwP8dMRgjB3TGu78/0Fc+aKuD+lB1Bbzyxn+KKCvJx12n7cn6m+Ybe9y6hLU4j8rmErYmkdwj0c2pH5elyfKtaVwSv2dpAQzDwE0n2PMwuXbzfZnJeBUqMduXa1VkIC24liRM0ya7TW5DUHBb8xCqlsUCUtttUT3e+dMjjwHHMtB0iSDDVay9RAb1hf1HQrA84uM2iRuJvmePk8EN/Ot9X1PbZDEUMw0fEWqa9k2KmwbXllgP2Hdxs2LkgC6e77Jn75Ff2+/SXzrp3Hkyi+rOMLLA6wrLZ4ATiUgZYUXPjqvUw1OfadjWqnkxgyP17HvsjnvDtTxyyvz3stGevohzmfyIXNypW5HTf95NXx4iUcG/A92g7t6yoqUnLxdPXsRMQ0ny8v1EYonxlbxukU5ZZgnlnQdt/clrrZeIe8vETdt6Jz8RQ4NjeUR6kT9WoVtPWiBOZOQKQfa+VFkYWpaFRMzgYjDZZUmHIyKmIZlGQV4MdU0pRiiRhwmtafm5QpRLvL646Qbels1j9Mm8YjLefuLXE9QWbbh1Kc7DpD8fwdp+/Ox92IZTLGZg57IOGD24K779yxhsTwhNHp100kkYOnQofvjhBzzyyCNYu3YtHn/88daUTaMdYebKCgC2T/mWWps8opgQCc7fg9/BEM0cRZNKwF38JsQFh+EyuMxtjbP2IdBDW5wfx6Q/H4m05S7WacFCQfIsuC8Eqtv1A3ZNCV33KKcNE75Fi/gSMg3XjFZcL/JfidQR3x/kP3zE0DLfy1GEO2nb3/kJmsXJcH4j1zcqEcbyKBR55HyqbIqomhMdH/Bs1e7WqzSLJYBXTmkZYfdUBRWh4y5Gg1/4rlzOuFa05d+dDC6jUrB9wW4V1xBGrjzh2VJbKXmPqe6n3PJBDpn1gJjylt+JE60geHdQ8dhxzMrC3y7tupLS5lGiDVeJ5okl07B3Eu2Fkl32iKFlnnpFYi+IXOHlkvXP/b/cy6PAZLO6cVUM9fjfrVcpLFhqQhTesdOlOA93njrMWQh72wjiFXjZw4yDZDpjz9GmN/YUjzCxiqj1MFZFfJmHfzVcWkb1W0whK0AxQYKl5Pund8cCxEwnaCinvJGCzLuL0KuBLBzce+IdI/x3A2Tx4R7r27nQI70drNn0EYOybF0MTgUjB3RGUG/EFPOUrzrhGmTwE5jZ32v8nE6khuiKA7guUiLIeisMUUVFZCU75MfRwbGuIILDMFyLCdd1yPQrqZK2VSSuqBDv2acjs1r1rWn4uB3MTcy5Dp+yzBM6VMaph1uf2Bt2fuvjWMwdJ5RJjdZksh1/IiZVmZ7oM8MpgWThxAJnszKUeMVbn4+44gmsDDx9QqsfU3j2eBlEFx26aOobyqRmB4U2PWUMbpwaglwuKWlgr74dPf1JTdGmhtIyzTl2wKCurIxpGGy+AOALSMzXQ2tqN0SFDfH55K1xslqYmXzICwOxmOFde1jepDnidcW5a3Ddpxx5+A1ooS9c6zPRetCdd8W4XL51Hdw5nycmafzxZQE3W5vnCnzEJvWP24ei/iRuQhgGHHJXslHP9Xc6Y1seNVA8Hud9QgRMAecCRnWLniR8LNKkoPPlCX1p949gBcRlPiNClbxGqF86OVY5RB79Yq/e+H+HDsKrvz0Ap+7Tx9MWrWn7di7CYbt0x/mjB+DX+/fH/jt1QUEihqfOHeGL75ZMW+w9S31KpJQrHwAit9IZz/n83JdMWTh9nz7YtVcJptxwJBIx00Pad+2Qj87FeXj1t6NQnBfDuGsOg2G476LtBaGl/eSTT3DJJZfg9ttvx4knnohYTJ3hRmPHAgVtzYubzOSOZ2/ZAoRTcMTUjt6dJDjH7E9vxgqaGG2Qfyo/wVKZgoSJUs4dLJ2Bk/0NzMSZZ6LtXe09mXJKxJIsnhK/e8iYfueYzBQ+KObRqJ26sjIi6/7p1YcCAG4/ZQ/WNxa9HAOYGXHi4ycdfpFRlBfDmSNts+EwylsY1yrKqKJSiCj4chEXa2pIWQfEDAN3niq3KCCEUfiUbmYhyhBYXymURPbSU1xvGIXIl5ZZsdAP4xYhLuRk52Tb/QY48lR4CcpAbRDJqgz+LRAdKusbIjj5cScSVeTHbh/zns+b+4pjd2DX4kBXE0rvbLuL2P3EkyYxw2BziRM2g7OMNLhdP/ucfZysbkxPYPc88NIZZL1Du9IiwRRkKWcY/t08GUxaWKoGrqRN6hPZoj+wGraoyl62f5ci1wxdKCYu0sJYHqkDVft/81utyc7z/pqNaPcugf3gF+C9OhWyneI4R0qJCqNhcDGPBDn8yrRz3ARboPIEh0iUuAo3Pb/27wlFf7ttOJY5kqLi2FV1mzvnquZBV2EB1BZ84j2j9zA/f3kUYst7X19xXMzvPWNPNhcEQXzPyuT56MqDAcMljYgsJcsmZiUW87uIxk0D547q721TuFeya+ffhzKFXmxDnOP8yrI7dgIJHaYMmh4ygNownbnEcr6L6y9+LNM9cUkZbxm6dFGxjjuTO1n20Pi0LHgtOJzzeTLR9My97vrRtcCAp2+9z5ygRAsWCgaIePFbmRF48o2OUFt3OZaZ9AzzVrO+zQbTTpEue57EhCruGHTLi+SD91kht0Tqk4zTj95xZ1KfG34CxhTqi5leQlKcsyy491CUh12DU4bkEi3I+fMJ4ruK9Rd378VYQ6I1KNVrcP1mcusIXl6ZPkAwuTlefPZkeoFoJZPOWCzOHfV3UiA8bGsbb8wjMRaP6wHhuof5LNE4PSTpWNvQOOCtwmlTXCStSQejsWxnzLbPr3d0TRpHpQ55dPDO3XD8nr0won9ndCnOwxFDu6Mo3/Ui6dWxAPsN7IKXL94fR+3WA0cMLcN/LxuNIWUdcPyevZCImfj2L0ehttGuf3i/jhg5sAse/fVw7C+EhOhcnIf5dx4Hw7D7K2YarpeKcN/OGNEX5x84AA+dNRx79O7IMuXKcODgbsp3XHtHaPJo8uTJqK6uxr777otRo0bhiSeeQHl5eWvKptGO0L0kH306FaIgEfORR2Qmyv+W4tzHRPNLg/uNwGdysCz3hQi4/tP8Yof+zoub+OG2Y1k96UzGnqxN11SaFE/Lshs/Z1R/lDnBSDsWJjD5+iM8bDKBV9pEIoyZ8XPKmpghgII6jhzYmfn1i3WL5pNMTu56RfhM2blVo/sCpHrt7xQvQ7XwVQUuJjeiaxyFmyDLdPXu7w8CAPzzPNsKiu63aRpIC33Mw6K3bhDo2lQpZ43gaxDBm6QH1RNmZzuMpZaPvHM+OuTH8ZcTdvO0oSSPxOx53mEoyOVpSgoxA4fqUuhlSi/0MO5+sgUijz16l6KbE2eJv27xpcyf7o8r4n+uWD0mHIsh98DTvxnhOc9j8cWdb5qGZ9HJKx600OHbpF1EMb6FOl5M8D3n0ybzZYLGhwEjaxk6ZlnZLI+8f5gGmDWWO7f4rTa81+aXXdXm4UPLXFcO2NaY1zrzjSqdc5D8qiK8Enf2/v2YvKL8ImTkkeqZIXeHILAdccPglFNXiQN4q0C+XvtTjNnCKzUd8uMwDAN/HLML69OYaXBzo8E+aV43De8uucqqhe8Duga7Nv8F+0gVxaxER1T3WNz1VlnlUTV8IFw+TbrYFm3eEChI9C49yPrRwG5OFkYRpzu74IWM7JY819y9sK0a+TnKibMGb+wpnkAsdlw4xl9zqHPMrrekwCXR72UWDfZ5skQLlqDEUWbJg3fuZlv6wD++xA1AXhl03VlcWe11nHuM4pLETQMwiMi2PAST35WM1g/wZXwSiVVeWc5YLtmdiJlOv3rJ+CALIcNR+HkLPmf5yPqNERM+wlC2XjR9ZWA5G50wpGsaigPFE/a0kUpW/0TK8GOZwN8zIm7O3NebFS0hPD+u9Zs7/wUFZ+b7j+DPWOw+70SKZwQi0eMKablzIf2W4Cwh7f73rrdllkf07hFJRstD5wubxXS+6J4Ht4/F4NqiGyHJZY8Xty3emnO/gZ3Rr0uh5/mitskV3mMxJLRR25hicwBBXPc2pjLIc9w1Lcu+7x2dTTaai/Mdi6PTR/TBr0b2w5+O2QX9uhR6rovWfLbVndNrpM8J4z6PixvUmCISkeZcgyMPPaIz6ybAviO8BVNNQ4r190FDuqI4LyZ1NX7xov2RH4/h8KHdETMNTL3xKF8ZEWUlBRjYrQgXH7QTrjhyZ1x++GCcMrwPykoKcPywnsiPm8yToyARs/s9L46EaSLl6JaUVW5wtw44ee/e2LNvR5a9b0dHaPLogAMOwLPPPot169bhsssuw+uvv47evXsjk8lg3LhxqK6ubk05NdoYZSUF+PqGI3HfGXvh/44YAoCLbyRZ5KQzGfbiYJnV+BexQJCIxJK9K2MfEyPSm4bf7Yzwu0MH445T9sD9v9wLB+/cHU+cs49reZTxxp9YdNfxAGzzxnjMZZPddmjS5v194blOWkzQS5x+61lagD8fGxy3xffy4XcjhDSyL1+8v+dccbHCW7Hw/roGV2YvLpNFEFTxfU7cs5en7X0HdEb3knz07Ohn1qlMJ/5l5SwKhC525aaFYqB09rE+nQpRmIjhdMdHP6ieMISOq6QE1xOG0BF3cqVlhJc71darYwEO2aWbpw2V0pQQFEmVNYpq99utzx0v/DkyUBpvCqqt6mHRKkV2TV25BbDYto8c89TtfErIP7EfbOXeu7CmbBYeqxoDHlN9+i4qerQoJGXCc41Ofb2EeGOq/j/ZCdwcRFTQ7rysT3wwuPGfxcvIgnqciWQET565u9WunPJ2IsgOfu6zC3UoiOMwJwB6GJKAIJr7q66PtyolUJwX2fnib44OLMXOZR2YwumLp+TAtcgi1wB3l5uaYgqYoMgCfhcA6p8T9+xp3y/YfWpbEojuE/5dfFK6qH9p51RF2IvWaaWFcZZCWrxO0a1LBrq2XXp0CCxLZNaArsWeemXzICmDv+KscC0mr1eu0/bpwyxC3PPdsWe/U+XvDAA4z8n2eIbzfupclMDRu/cAALx/hb2pQjF4TOcGeeYY+m6Q2yw88tE9teEdF0ft5rrPuhtt9uf9v9zLUxbwxzy69hg7TX2H/DiTAUI94nPMu60x4sA5mBcz0LNjASynrTtP2QMD6X6ZFDDbzTrmhjnwkxXMRY5ZuIhkjysnTwjxfUvWH54U7BzRBHiJ8AQX5Ngw3PWjGPeEubWw95hbH/Wxu0lIZVwrBhoDbOxy5JlNcrqbs2JbpJTHuLEs3qs9+3a025KQPb6A/Wzsmaw+gevxxK/L8Ite+OdHnvgmlzTffTbdPmpKZewynNsSb5XFrxNFrwWCa7Hrjhci3fg1UaBlv/iucUgjfoz63o/cfc1Y3nHInmWnbH48xlzKxOeULGvo3ntltj9rGlMsBpBoudStg7sRl3b0kkzGwt/P3JtlYqVz9+jdEfsP7IIjhpbhlOF9cMWRO2NImRseAAD6dynGhQcOdC2CuTZFK/hE3ESToyA1CeQRvcMKEjG2+Zgfj+GyQwchETPRu2MB6hrTKM6L4+jdeuLgId0QNw3UNqVxzdG7IG4aeOXSA2AYBvbp31l+4wC8dNH+gcdk2LVnKW45aXff70/9Zl8YhoExztwNAH85cTc8fs4+SDjxcV+8aD8U58Ww7J4T0LEogcfO3idS29s7IjvZFRcX4+KLL8bkyZPx448/4tprr8W9996LsrIynHzyya0ho0Y7wtCeJWzBxiyPTE7pcj6llkfcQlnMduEGHbQnPdpdOHr3Hj7rlpKCOKoaUjhnVH9fOuYhZR2wV99OOG2fvtipWzF+sVdvNnHt0rMEB3IZzPiXzq/3649Hfj3cUxe/W2QJCwbeUsP+yZttTWaG7IXhqceTScGS78zSi0EaWBFe+UzDYIH6bJm8LzsZRIUfAJsQ3YDm9u98LKVf72cvyl//3QEe+fgXNS2UMuLqhesN0V3AV4Z7iR66c3dpmXCWQvZnSliAyuoJa8kBBC9GPHL5lE83GGQYFwwx5lGYHXcVfJZHirKFjg/4gYPte89fLqXo/eyPh3pkV1mcvP+Hg31WQa7sweNWJMz46xRrol18FTFFBBOfYYbcSOJcYFdS3MR0tKI8lOFFRoqJOGCQbSLdLSDLHRFIfP2y8ZrnU2AUd9KZz0zTcFMX+4t4Pvm2RZJZ9azR+WHc1kzDdW0wDIq7IT4T2euhdvl5kSxriACVzZ0k6959O9nfA2Tk5ZDt+FNw4SN2LWNz2kl79ZbKyc9XTDnNeN3WmKUdyQmwh08kytwEFCYjHm0XLXdMU9n+XYqYi4Ub+8XJ9mXaVoHkcs0r1UR4sn5jfWP3RWEihtuEDHRiWmcVyKWclBzZWE7EDOzUrRhnjuzL6t13QGePAk8gspvWEYmYaVu7GP7xXZCI+eJU8WmYab7KBBjQuvOyie4l+TAMe8MDgGcsG9wnH3ON/52PeSSfI8HkojJBzyIfF47Axwayr9P+THPZ/sT3tbvucd9RbnZbb6ecMaIvxv3xMJy4Zy+cPLw3zhs9kFlMkAslJRvxyuW36CE3W7aJJ1oKsT5xy7jWZQZzAfS8b7h1ncyKJSFYHpH1k5txybFsd8ZVAZf5VAyu7b7/DFZ3KmOxDQt+DU1wg2rLNsW8/U8kFJ3HtzmwazG7pyTPv5wNSdet3GtFyrcpWtjy62JxfIjX4FqhAYDFXKUAv/t7ftxEYyrtsTyivuLHgh3/yi2TFzPRsTCBfp2LMKJ/J+edTRYsDolhmvj+r2PQo7QA8+6wXZDE5RrJJVqJ5cVsUoS3PDJZvxm+76lMxn6muH7zJCow3ZhirvWf23OUxZC6QFzT1jSk0KHAa3lE94os0vbq2xG79izBPafvhROcmKOE0oI4ThluW8n89/+NRhC+ufEo5MVN3HbyHizmEU+KsYDnzmdxXgxFeXEUJFwrQlqXkHXR7r1K0cFZRxbmxXCjY3U/5caj0LNjAQ4c3BVXjdkZJ+zZC1/fcCRO2rsXrjxqZ3XMvW2EspIClJUU4L3fH4Rf79cPRwwtczYms7/TdkS06I4MHToU999/P1avXo3XXnstVzJpbCfgM6u5Lxr7c2DXYhzkEAziJMgHzYNwrHNRAltrk/aPhp0iNhEzWWplALju2KG48MABuPu0PT0mj0EwDAO/2KsXfjWyH+44RR5zp1+XIuw3sItT3v6N3/EVFwOezG8QLY+8GWvoMvk5xhLq4QkDtvghv3mns2jnMsYRbbacbvplPksBbxEmLpJlcO9BHlvAW8Lix2veD891uztJpMAICy/u2sQ2ySXANICbf+HfCaBj9oLQT6yISnMYssc1QZYtzKkeb/0y0CEx+CaPIAsmfjEr210TQUqA31TeX1aWnUmEGDBW3W+uzKJ8XR1ik3a3/ASnv14iBwzJNYhKgexYGPJCZvHiur259RjcJ7VnmmCxjnjLBstRIti8JlHcAWAUF4SU8H9HDAYAfPiHg6klALaVigjDcK2f7Prpd//1vnnZaBjcsWz3kQJmXy24obK2hU/STWKGwS0avdfN418X7y8d27Ky7r0mooOUave+s1TLBp0TeHkYNaiLZ1wBrhXL6MFu/DkA6FKc75m3AW6scqISMS6ONVk/P+pkMDLAW6Vawnn2Jz0nfTsV2Qqe6Q+SGxfmBRiudarfksD+pPmWFBRSnMl9B7CtR/1ZhOzx6yrajgxch1922CBPm4ZwT0xOkSWQdSzftUft6g00T/WwIPemgS7FedIxQ5a19Cz3LC1AOuO6YhJJsVO3Yjxxzgh2Xp9OhehYmGCWd1R1h3w3NTj1LYGlnjdpvnLjUonwWBALm0D8/bTJIvt5jTsEB2XMYgG0OaX5OJalle8v77NOirWsT0VLRJoDvGXc96K9dnA3CFwrD7sMESbF+XHUNNquJY2pDHo7VpeAPWYK82K49JBBOITb7CHXte4d8tGUyiA/bjrWFG5wXsB1maF1g8fNTIhlI7Nqz2RcIi7hBKS2CVV46gWAdNq7iUl/8wk4LMArg+VdP+zeqxQn7tkLNxy/K249aQ9ccvBOzL1RFr8mncl4rMwM7hjdF3L3F62KxPUOXSfAzdmmUNZ01zxpS7z3btv22DRZPSrXxbRledY9VB9bB3jW0HBCFxB55CXsUukMOhfloXtJPoqdsdCYyiAeM9l4K3TCZpQWxFFVn2Ttzr71GAzsVox3fn8Qs8LKi9nBiwm0+VqYF0OX4jxPEGOATz7gvU6yiIrH/HFRxbLxmPMMcs+OadhEqbvGonhhXOYx7vlkhH7AxoAFC12L8/DXE3djgc7pXtF67vzRA3HP6Xvh6N17YHB379rCMAz2jpJh+b0nojg/zpL7AMCZI/vid4cO8pKzXJsx08B5owfimfP2xZfXHcGCWNN80bU4D4WJGG46YTfcc/qemPTnI9gmC+E3BwzA3533PAD0KC1gIUvaE/p3LWLr3Z8zckLnxWIxnHrqqXj//fdzUZ3GdoK8uIkbj9+V7c4D9qTYr0shTt2nDx761XDs078TY8ljwuQJ+F+qvzlggB1QEt6F+eNn74P9BnbG3aftiX0HdMG+ThrSsOAXkCr89cTdMNTJ7CST1+8bz8dAAfstbhrcot+9DjE+gBh0zoDflSAjvOh36+kSLYDc5NuAvQAQSQZlkGPn2E7dirndO+eYj6ywJeWJNdEMnidgyNJBzLpC13DAoK6uO5BQhgLYAe4CToRICKjIHjqfFi+qXd0wrmT05hZ3o8tK8vGoY80mI1F+f/hgD8noD+rob5NiT/mtUNSKTBBct7XgmEcyYqlfl0KUFMRxyM7kcgfP+ZSpgi2IDYO1NaxPqXMNRJ75r0AkB/i+EAkh7/MlyOwosrzsbHHNK02G/x7HDCe+j+EuqnmlUHwuxYVkWUk+yhzrg0sO3gkAmMUcr6zaMvk73ue6IiHCCB0LE6yursV5nvtIMQIoBhspQORmJMOFBw1kZU/au7dLJphuzKOdy2ie9C8jKFA5yTy0Z6nnGnjEPPOEBVEp7lqchz36dATgtQIJwv8dMcRrYQB3XPhij3DnufeCPt2jQaS0LDOcyVVkz+WuwjnGcS3i42Ids3sPFCRMRviQ8m5wbXhlNpiCIY5B3gqSrO74OdPrAkJxXHj3Cfu9RcQunZeIGfjzcUNZHWIfUNt0jnh3/GS3gQOdzSXCXafuyY4B9j0e0LVI6oLpxgMDTtizJzoUxEFZ6v5ywm44cHBXfHDFwciPm4ysYEo6ONdAR9J/OpaTBtd3hEScuz5uXNHG1ehBXfHxlYd4r8++Cx7rAkI8Znr6SSQASPnlrcSoLQOGb6oXLQB4iMGwEzHDdg0y/FYj9EwRicnPcaKFOCnzVx65M353yCAM6FqEplQGU5xYIxP+dLhfGAfz77TDBTx01nA0pjLIj8fwlxN2w5kj++H+M/ZillqdivIw+9Zj2BrDNFzSSAz+yx45w7tWo3lLjF9kX4vhI1N498xE3PSEXMg45GQ6k8HNv9gdg7p1YC6ugK3sPnnuCAzoWowhZR1w8y92Z+RRSlhr2GnS3eectzLzWLzDjQ9lX59/beAGzPb2hfs+4iyFOKssHvy8ahpuHC67frcPSHaqn/qHzZlOmRLBMqa0II4uxXkeyyMxHtSv9++PT64+BM9dMBJXj7E3NRpTacRNwyWP8mKobUrhuuOG4uCd5clbenUswNCeHZCImWhKp1kf8fj1fv3w9v8bjZKCOJdRTL425fWXNDf+ChKu3sM2TJ37Smte1r8xw5PRNp0R5m+n3MFDuuHBX+3t0TnouabrfO6C/XDtMUNx6SGD0F2wWJbFNssFDhjUFcfs0ROdChNsHUfXN6BrMRb97Xjb+qtLEXqUFniytOXHTYwe3BUzbzkapmlbYPXrUvSztdjZUdD2tmAa2yUGdy+GaQCXHTYYBYmYJ6PZpD8fycq9c/mBzCqGVyT54IhL7z4BiZiJTkUJFOXFMaBrMc4fPcCTnhsAenUsxDlCppFc49JDBqFbh3y8cukoz0ucYvUw0seTjpZe9O7bwjQNjysJLWR8ixVB4YyZfGYRWgR5ZSQCgU8FLsYCgCGa+9oKptQCRFjY22QYLX+9Lzd+Uc0sj7jvfBmXFHAtCsQFq2/HUHjpAsD1x9nXSzs1vAwvXriffb6wgFZaXjhlyOxclaUmTJY62ZG/nLAbepQWMOLL5Ma+fS32Thi5lvBtqogq2onxBaSWnGIInzzIkuL0EbbbR78uhegf8EL//eGDmcxUX4d8e2eRdr5EZZxibPAkDsUcechJiS4u5j2xiwQ5+G9iP/HXzlzJOEKOrAXEul0CDuw7r4SQ2yev0JGibXDjXpSHrivh7FTGDH9cHZVlFZ++l7Jk8f0jG4skHykcPLlCVhj3nLYXq5csYviqivNizMKnp5NQoKy0gCN07F37mGGgV8cCnOEEX5WtV1nwfabI0HX6ZTe4TuCDRfPPoBgrR+3+5h8XQcFtZZAR0KJCRujbudD3gLnPCR/bz/7jcudZSnByGAYRTDRWLM9YFOu166M502v9QPKRa4fr+mKwTQ1ptjVugMZMr4sFYCuXRHjS+RMdguD8AwZ4roVXZPk6Abu9nbrZbu8k+6Q/HwGAs65xzonHyIXPZO6GRICWFCScILRExLiu0WnLPqc4P+Yl503XJYvvE8C1lDAMlzyiNkW3Neq3fC7ZBYupwvWbe+/Brp2vhx/nNN/Y1o6OlZhpcu8f+BAUE4UH/UL3rLQggaqGpGctIo7t7iX5GFLWwaMAu1YGdhl6D+3ZtyMOHNINn159qMedke5xNvQozcewPqUYs3sP7NKjBL/arx/LvgTYa5aSgjhbY9G6iQLyyqyv+A06Gh/xmGmnszd4iz6TBeYV5wdquzGVQUlBHMfu0RNH7dYDPToWoLohhUsO3gmdi/Pwu0Pt53nB345TXieRDbxbI5F05G5ILfvWjYbEqoi9910XZLeMdy5IcNbtYuxOVpaIIceiLxbjYh4FjBNyvxrSvQN26laMv506DMP7dcKSu0+AaRp45rx92fg5Z9QAvH/FQehUmIe8mO1CNtyJPUgWZgWJGMpKbOIhETPRuSjB/qb7/KuR/fDor/fBafv0ZWsrsnwjDOxWjCN3tcNdJJmVk3dnz3ZlNPHChfuxTNKi1R49e1cftQv+c8koXHjgQIwc0BmH7NwNxflxRoLSfaBz7GDzXuI1bhqe+2BZFkYN6spiytH1DereAacM78M2eP553r7MQooSFXUsTHg8Ln6641h07ZCP3x8+2E3k00r4/NrDcPqIvvjxtmPQsTCB5feeyK5bxEsX7YeiRAwL/nY8DMMI5SWisf1Ak0cazcLn1x7OJryjdi1jCwcxeB3vE0ovkoI8N2MbvfgKEjHMuuUYdt4dpwzDMGe3uS1w0JBunjgNYowm3iWIsrOIlke0CKBj/MtbFkSc6qU0mylhYSMuCkYP7uoQRJBYHnl3LullJlP0xJg7FtxsMz7TWXGnD7wZq5xoshehRJ552+bdrkghCtoBMpw2TG5MkWmtP11uduVSvDYeUVyjaCzwilGHgjhMw99vbNzA/e53XaQyLpg1ns8KJfh6Vcd6OOTAsD4dcfCQbujWIR9H794DpgFc5FidPP0bezf+TAo0yykpRCiICgyBFE0+loK4g+nuvPvE8ylChsEFl4a3Lz2EpvPJm4iTYsZISNGc3yGY+HTHRHbybgUwSLm05Gbl7B67MiTTGc+YFpV8Xvk5dBdvHC97HnHTFO/pzIf89ZIbHCNVDNeqguDOLW69dH28jciArsUYRVZ+zjV3Lc5jZJThjOdYzHvfeMujO0+xY90kTMMz97t9BB888wRcUsu19PCPe2Xgf9OVlSCLu0fwue/QfMM1wY8VQrcO+ShKxHzkrMGdLyY/4GUEwDZDYiafvYrkFJ4BuH2YzlhO5lNHiWZyunMRyWwHzPa7x9AYtudVp6wzT/N9z/eb3Xf237TrTQHo+TlO7JME21yy+8WQHJO5IKUy9nXQ++0mJ0bGvy/Z397N58ZDOgM3ax17p7nPDx2jZ9ijcHPvMwuifO6YIQKZ3ud/HLOLR3HizxNdU1wrXrDrM+ASvxTnynXN5KxlhPcu4CevZfM8/VTkzMf/77DBePis4R5rZXpG6TrPHz0AL164n2d+F0nKIWUdcNo+btKKgkQsVDwrEeeNHsiysvK4lQtie+wePfHBFQfjNwcMwAlO4o7GZAYncrFc+GeX+rkpnWFWOwmT+te9hjiXYZePf0gK8aNnDcdxw3ri6xuOxBG7luHKo3bGG787AJcdNtgnr8q9hqzv5995PHp1KsCZ+/ZFIuZmHyMCkQZPWUk+BnUv9hJDhvf+8+smvh6+jBjQmKxiAO8mIwAuCYLJCGA6FmRh26kwgV6dCnD7KcNw7TFD8ZsDBqBHaQE7fswePdGztAAfXHEwYqaBorw4XrhoP/zx6F3QrUO+Z1OkKM/ffzNvOQa79Chh66Mz9+2L3p0K2YYVADx5zgj07VzkOxcAdu7RgcUAagrI8ltSkJBaHu3drxPbIC7Mi6FzcR7+74ghOHBIN/z7klGsXkKfToU4aEhXlBYkUJgXw347dUEvJ5mM5fRZfsxup0txHprSGfzr4v1x7B49sVO3Yt8aiOaoY/foifx4DFeP2Rk9OxbgzlP9YTfIyvvPx+2qtpDPAToV5SERMz2ZHYNw+NCyZs0JGtsHNHmk0WKQKeLye09kfsUqFCZiqGuSm5O2J/QsLcAnVx1i+zs7kyC9g+LcCzktKNKA/XLnYyXA8pq9+nzO6cVsGCx2g8+ayAFziykt8JFSLEuF4c2sYzgagirmyB8p/onlyiW6ojFXPLgLcTF4q3+B4y6204JvV9/ObrY2sobwxfHwkBZeBVRc0IjKuQxU35jd7EwK8Zjhy94mKtyq+uiIZdlxHOhvXjnhlWcKUmyTZX6CSjRT9pwvKlgcSeK7TonCQeAtl2ixaKurrrLT2cmWF6SckPIpa8NdQPZg7bhEEyefYXhOpqx+osg8ASMq8vy9IXl4pcsQzhdVPX7nn1d7bSXajR1mAGx82i4/XsKWt0QAbMXVtgAJjvXFWjPIZUseO+2AQV1YcgDDAPZ34rNdMHqg5xoMw94p5u+HP6Mjl22KK+dxaYIrD81dfJ9QEHH7N+DKI+0MnJR9UST2xPHLg1wceHc6OH1NpJ6oCMW4HdaHz7KtpRIx73keRZvGhSAXX4buvdzCyzsf0LmyxTGrB65Ls0vO2N8fP2cE+nYuRI/SAmY5w6cj54lMvj3AjcFRUhBHtbPrTvXThgVvwUXXbcD/TiArU97ChFx8DPBj2fDMc3xf+t5hkph0VLZv50K7X/l+Z/OB9zu5FdFzxMO2PDJct1TTcMY9F3DcMNwy4KwCDMNDiIp9zD+Hz18w0vtOMezrczNvua5HfAwPuuf240ObK05fGDyRxxN1VL/rasUTTdT3dCfETHv8+E8JG07FjnLes2MB9ujd0RO/hbrAdV225frzcUNxzB492W88+nUpwsNnDUdr4aKDdmJ/FyRi2LNvR5y0d28cu0dPFCZiaEyl8aQTiqB3xwLPM0vjM5l2A3/zhGhJfhwlBXEkYq5lSjqTwYybj/ZsfpLlaSmnKHftkM82RsLi3d8fhLISe7OmW4d8/P3MvfGHo3bGa789gJEXRCACdt9+ce3hGNanIw4f2l1ISGP/tXuvUnxx7WGM+OU3CsW4nO787z5HKWEdxpOw7LkxbLdRZpXntE199Iu9emH8NYcpr900DezZ190E7pAf91igPOukQv/pjmDLLUo2wMfDIZy4V6/A+/H0b/bFZYfa8dnETMoiLjtsEPp1KcKVR9lWSP/7v4OwR+9SZhWpwqxbjsaQsg545dIDcMPxu+If547Avy7eH8cN64l7Tt8THQsTSMRM5v561sh++PI6t94JfzqcEUCEk/fu7YnzevWYXVBSkMB5jpVnEAzDQI/SYB1sosKdVEMjCjR5pLHNUFaSj39dvD/2H9gFp+3TB89fMNITz6a9wTAM7NarFKcM7818sMWYRzHTDgDIB9C1LPvF7bU88qb6JGJINDl3A/IanCk1yQNv2wZZE5l+UgrCzqXhklwiSAYxcx3gLnhFUsZO0+rufPF9IypmtMgm5ZAHZXMjqUT3P76+o3cvY9/FxZTowqTa8KBjiZiBvft2RMw0cKyzSBbblLnAiPC5+BluQGII98w0bP99di+5MrxJO39t/DH3PtD1e2XwyCX5i+C6FblKMSMfLEHZUZBaviWZ4S3TsTDhXK8/0CJttprcOf0dBUy8HF4pEJVVL0lgF6KUt67FjMzNA24ZeJVJN14G51ZgkNWBlzR1ySOvS2EiRvEP/NZ0fgs+1wqEJ8mYqwz4QK8GRjgB7T1Bvw16fuzP/o65u4ww4dNY8zLxZRnh4VjP0JwiWjXyhBA/B/DzD7tOyTh97oKRXB+4sWhkbmu/3s+21OEtYYjQn37z0e51Gu718nKJJBTgJ0DdZ8qV0SU0TOcc91kXr0m08OLHbsayCZRhvUvdHX64ZXjLGbGn6Hry4zE0pTN46FfDcepwW8FodCx5SZEaUtYBh+zcjcngBmF24+AN7t4B+w3s7H3mnfeJadoXICPjfc8RZwlIz4sIIjb6dylyLaK8xg9cff7rTrP3pntOzHStSWgTh7JY8cQy1ccsj8BlqOJkp3qJ+M9YFo7azU3V7AZy59Jgc1Z+E687ghtDhkscsnWBu3YwuM+48J3mmrhpMOsHmcUe1ZsQxnTHwgQLJuy+kwxMu+kodi4f74cgurycMrwPhvXpiOl/HYNB3Yox+1bbMnzRXcejLfH5tYfhl/v2Y9+n3HgUunXIw6DuxQA3lmnTzwBYwGIDwOFDu+Pbm8agV8cCdC5K4Oz9+2NI9xKfNUlrokN+HGWlBThnVH/sO6AzLjt0kK/9k/bujVtP2oO5tgFey/dB3TtgSFkH7NOvs2ec0waiOy5c619y0yM3Ln6NZpf1jsUvrj0cR+1WhnNG9UfMtOOzFefHWcaylgY0PppLhR4Eco+OCiJBf7FXL5ZxMQg3Hr8bunXIZ1k5AbDYPNnQictkWJCIMTdxADh7//4soHSC20zONtbOGz0QRwgJBcJi2k1jAo8NDOlOqqGRDdsNebRlyxace+65KC0tRadOnXDJJZegpqZGWf4Pf/gDhg4disLCQvTv3x9XXnklKisrt6HUGoT5dx6HeMzEobt0x4FDuuHGE3bDUbv18Ey87RWH7tIdh+7SHflxkxFCHlPgjIVhvTsy8iWVsRemjDxyFDHe8shvRWF/xgyK2WBwcZG8jylPlNiKAbjFtbtYTMTcbCFknRQzDZwpvIzP2s/rlmTvmDp/C7ubfPBk2n3ld9V58Eq+bVHgklsDHJLA7SP4Ftvi9fYsLZRaTPBlVMGmCSz4qOEGtBVLiwq3eA881+l89upYyFz+SGkUyS0+eC6vKHvbdOu+w3EDEscJmWqLFlI8ZNYV7Pp85JhXiQVkFgZehZu/V0HkCO1wk1sOXbesbYBT3IU7QoQcL0exJJU3tfHSRfv5ZBfd5pjSatLutNseBS22r8G1xKBnjhRQwFVoE5ybK0AxNTKeaxdTN/OkigWB3DXc3W+epOAtDU3ufPeZtcfDobt08/QBr+zzhAIA/OHIIV7LI/Y88mXpH1ljwanfNTXhCQHe8oVXTiiu0tQb7dh4ZAJPcWLc2DJeV7lB3YtRWuiNu8P3aUE85mmLf478MY9cObnu9oC/FzwBwf/GxyNxfxf72x3zFMOEudky0tbwxOMhyxmvPPYnpbQePbgrBjmZdEQrvJP37o37f7k3yMWKrOp40u/0EX3xyK/3YfMgANY2KY/uPTbcewx42iK53GfE8D2/dL0sDTzc51kk2Km+prSFvLgJctMDwFJvU510LbYVScZOz53OeEgl9tybBrMEZPOzMA7s+SoDA/Akd7j/l3sxcoysg1mcKhPSe8UsmKhv2THuPhj8/MNZJDnX56437M8jdy3DHr3tIMwsCHycyAH78/GzRzD3rhRncUGuytTXRCCIcRxFdHVcjGh9I4Ym2Nbo3anQExcJAPbo3RFfXHs4W4+UleTb5DvXp6ZpYPTgrjAMA4V5Mdx+8h64+/Q9cc/pe3qsY9oCO/coQcw0MPf2Y33H+Hk6nbGYWx1gB5l+8Fd7O2s9g5UBvGtUgDKB2fecz0LGl3Gt67xj8u7T9kTMNPCMYykk9n97xhPnjGCxhdoKvxrZD3HTYM+uDN/+5ajAYxoa7Q3bDXl07rnnYu7cuRg3bhw+/PBDfPXVV/jd734XWH7t2rVYu3YtHnjgAcyZMwcvvfQSxo4di0suuWQbSq1B2BGCpU2/+Wgcvkt3XHboICRiJhbddTwjhP5x7giWUYl2vJjbmrPI3H9gF/byoBe8z62DC+pJZVi6U0cOURl0LXvgMUW3d2ZdciZjueQUj2uPsXdbWCpQ0o7BW4l4lZOYSZZM7iJWjB3C++lbltcV7wUn0LXHpNpy3Q54uOleadHvKmi84uLtE/jw8sX7AwDuO2Mv5xfDrU9Y/IvuagouCl072ATooO7FrsLN1cHLFTPAdr+pHXYNHAHWjXN/81ynaWDUTl1Y+lVROff8xu2mEy48cKBQBsxagJR15mbByEtvPbxyLAZ1dwPJO0qdSUoxfGWZuxgbL/DhkkN2co75M7s87lit8feaZC/hAuDybkGyPhPjipCMfEY7Xskn0k8kzkQCM26644vGNLM4FAg5m2Cga6DrdN0HeHkNw0sSe/vSDbzM9zP/SX3Bu+kdNKSbHeNJINYAngiFJx4LP2fxd4BkjXlkd8/L49J88/1FfckT6aTAiJkIi7jdXdEd0iU+DK6M/SnLHEgQf5I98oawYlLVY4CLe8PJQWQYb11lW3Ja7N6S5dEHVxzM6i103qFn7dcP952+F/t92T0nYGC3Yky5wSbjeOWyQ37cJmA40k/MyENkIuC+IzyuVPBaesmIS+oLV+H09gmLp2e5gbwZBFKXer4plbF37bl5hnfLZpZHhh38NpnKIG6atrUfp/wW5sUwvF8n9q4mQidm+q2e8uMmmtJ2BjBewf7VyH6MHCOrHdN03QMJxflxDHJimNAmjGh5BLhWfgbczHb8uCXZ6VmhAOu3nbwHc2ehPqH7Sffo4J27sdh7YqBgQoJzBUwLc9L2jNNH9MHFB+2EcdcchjP27Yujd7cDJx8+1I4nRwGuAZsobGsiTESxxAWrOD/OMgYG3c9z9u+PkY41qkuoeud9OwafvZFDMYDSae+9H9itCGfv1x9FebEdYs3eXnDCnr1gGAY+crIyykCujRoa2wPa18wZgHnz5mHs2LF47rnnMGrUKBx88MF4/PHH8frrr2Pt2rXSc4YNG4a3334bJ510EgYPHowjjzwSd911Fz744AOkUinpORoaKnTIj8MwDNzoBO20zVGdnUrDja3Q5CxiveQJcM0xQ/G7QwfjxQv3Y+564iKbdkePH9YT+zqLAXGxL1ot0KKYt0gA7MUCH4w5QwteZ7H4jJOemOCmNeUUcu/msMftg7JKsF11XxBw51zDjbHiy2jBKSeptFfpFcs46ocybpNIxJiGm72KsveR9QIRJdL6JEpvEK471gnEDC7YquXGWwCAUYO6sGvhr49XPikWkm1tAo/26lfQvOfz0okEAH95Iwd29hxjpICjuNBYBTjiUCQ6iHww/AFVWdYhgWyUBY61iR2OCOSugkigkQOo39zfqG5meWQaLN4I1c2Tfy6p4u0Pr6k+Bcy2fyM3U8BriWFfu/tJZfn+ca2uDCYfy3IjKt7sPrgWTWD1gbks8CSbzP3TPcZbDIlkmdsndtpp93yySvS5Mzr97rWO8BIEfMBsdm9Nh4QQx6nhV9gZ2eIo3DxpR7+7/W//RvG4ANdtjSezRIKCWXAIChXgd70UCXMeHssjQ24Fyap2xrwB9/mgvmTEGEfSuHF0yPIIzCLiumOHol+XIiz82/EY0LXYk+aerlO2u/7a7w7A7w8fjAsPGoiuxXmIx0yf1QDNgwA3DgQCzs5a5fQPe46884w3VpUXvZzEBoV5cXR05l+esLLb8I6H0sI4enUsQCzmvjdSaTfLlh0TyC6cFzPQ5KQ+T6a9z26H/Dje+7+DUJiwFWLmImi6Y4TuWX48hsaknRmr3nEFJIixk5iLoeHOXKUFCXzxp8PRv0sxRu3UFc9fMBLXHzfUc70kly/rGv8dhjcguPD+GdanFJ2KyBLIO9/wSIkThYMXLtwP540eoCyzPeLAwd1w4l69WJyZU/fpg0TM9MRQ2t7wmwMGYN8BXbBbr1KWNv7/t3fn8VHU9//AXzOzV5I9cl+QAwgkIUDCTVDucKlUalWgrQIe/dICNdVapa1gsYrWX78etdVvay1a79ZS+5CKWhH9qlgEpGrrF49SrJZDVAgEubLz+2PmM/OZ2d1kE8jJ6/l4YJLdmdnPzs6uM+99v98ftxSfBo+m4h8rp6M4KxU/PLvSOrZHlmbisW+MsXqHBf0eRHUd2UEfNE3BuP7GZ8l7N86E36OhNDsN06ryrew1IiK3bhE82rhxI9LT0zFihD0rQ11dHVRVxV//+tekt3PgwAGEw2F4PIkb3h09ehQNDQ2Of0SJZKb5rN4iwtETTY4Tuaw0H/rl2LXGkypyrXI9udcRACslfumU/ji3phceuXwMhhZnOLYvTiT95jdDIpAjZyuJ20UmiCivMzKGjPu9Vk8mY3tfHDNOzHXptpiZiKSLQWvmGvPx7IwJ5zjjZUOIAJZHOvE9YZb4iNTqSea3he6LCpEhA9gX7jPNaZ7jlWFZGVz2VZ31X3GhrQAY0juC35gZUbEZYbEn5uLbTE/M+Oznr8Do8yF6tYhAgiNjxNyemA1MDtgl6tPjDlTIF7rub5AdQRnXi+QO9MmBCfcMVWIroqRSU+0eXmKzItApLmisOn8ltjeWXQqFGO5v6xUgJvhjPV/FzqaLyZRT7Ivg2D5j5rJyQMRcPxrVrX2rqXZgySiL0K2fNUXpyEpz9lOQZ3py71Oxv+LNSul+PymQA1fGY3vNEjq5R438mCIjUFXk/jBKzDLis8DKMNSdGT6Qfhj3OferqthZRZlpXnu/SceuGmd7cvBFlfa/GKddTqRYWWPyrHdyQFrwas7XU7Fec6Ao0wioWM3MXe/rH88ejCWTyqQ9bi+rKHbAWRDHhMjsUJXY49HuWWZ+firOfnhiHVFKI8YjT37QOyPF0UR1sTlGEQRLlpjWubIgDI+mwqepMdu4c+5QqxGrrktlmtL+9mjG6/3S1ZNQnufcJ/bnnf26y9Gj4sxUXDvT+MLlO1P749fzR+Lurw+zMmjsoLCxfE7IjwF5QXz/rEr894U18KpGg3AAVs8WwHhv+D0qemekSGVrKpqiRvDHq6n46ugSa/l5o4qxrn48+uUEURAJOII+OSE/Xrp6ktGMvClqzmTnDB6F/B7MH1vqCBrJnxOyGYPyce/8EcgNB6z/18shGhGotfYXjMC7yLRSFGfpojsw9NTScdZsTtYyrrKzdfXjrAxVt/L8kJXpIJ8ziOxc6nqevmJci9kp4jPjsnF9rdtSfBpG981CVWEYQ3pH8NyV43HhiCJs/uFUhANe/PbS0QA6vxyRiLqP1k0b0El2796N3Fxn8zCPx4PMzEzs3r07qW3s27cPN9xwQ7OlbgCwatUq/OhHP2rzWOn0Mr0qD3WV9rF5x9waVJsp8sLUgXnWzF4ykRED2CfOX6opdNTf1/YzpiVdMLYUAZ/o1WMsnCIHj8xMCXEieNTMfpKnYDUeRyo1cn3jWNsvC5t+MAW7DxzB0RNRXHDPxpj+QzGZR9LVrjyd728vHYXcUACbvj8Ff3jjY+sk252BkRsK4P5LRuGtj/bjRJMOn8e+7/tnVeKF7Z84M5h090W+8fMb4/vi3pd3xJb0SY/p9bgv2O0yEgBI9WkJy8HiZSHc8/XhqLhuneNCyZjGXbF6KYl9ZWV9OC78nRkSYnzl+SHsbjgCRTnuaC4ttiUCCfI45eGJC2R3mRYQG1wRF7Ki/4YcsBKvvTWVthx8aIrfx8jjWtYjZbJEdeM4zjd7b7j37azqAlT3TneMU14/JnNJer7iOIht2m48QY+mOJrJy9t29GpRxHbsY92jqtKsbcZ9N355MDJSvVgxy+hJJZcKyWVrsNax33N+jyr13bIDc3LQVtynKYr1GimKEZyTM/jk11NkP4oMFveMjvK43E2Zxd9WoBY2XWwfivVTBBMAo9fXf/YfMZe197/cC8aR5eV6b8lZfmKqeHlZ8f5RFWeQfc6IIjy2+d9WBlq85uEhvxfAF1ZGnDsQPVDqQaEowPWzBlrNTB1lb9J+BoBzhhTi0dc/tN6PMjHttDjm3QFZOdtSk/Zrk25/ASCX15xKPznfKHe7SmoMW12UDgCYN6oIeWG/NUOZCNoBwJll2WiK6nFLWeT3sThmZV5NsQJ8fo8Gv8fIEBHcmUzDijPw7HcmWPfnhv04eMTIFj9+wi7bmTOyCAGvhmtmVOCldz/BtIF52L7nII5HdVw3owIeTXVkY/k8KjI9PmuWsOVPvu0IOBZnpWLmoHyUZKXCp6nIDDr7MaqqguElGXjtn59Ksyjax1tLHJlHmp15FPQbTXYvHFEEBcDvt3wEBcAic0r4ey8e0WzPPQD4+phi5Ef8jmBnRX7i/irChu9ORHFmKnasOgsAMGFATstPhLqNvy2fZv3enbOviKhr6dTg0bXXXotbbrml2WXeeeedk36choYGnH322Rg4cCCuv/76ZpddtmwZrrzySse6RUVFzaxBpzORPi+cW2NM+V5jnpCLZeKdXM4bZWSj/O/3JiE76Mcdc2tQEEmxvlGUXf+lqpjbRPBo/thSfN54DJc9sBlNUR0PXz7aSElW7bR3uf5dBI3EBaj9zbGC3FDA+narJCsVoYAXaT4j1b+uMtfR1yWqA7eePwSKAqx9a5fUkBgY1984Cc0NB6xSGlVRrCli5aaOEwbk4J1dDUY2hKJYZSj2xb7zIk7+ote6WJVKCoyfsMdpPpZPyoIR2xXbswIoJrmhrRinm7tkzFpCgVUWJDf9BYBjTUZQr8ksEZQ3K373eVQr+8Eqw3JcoEnPIU5wS9wnB1nqKnPxl3f2Wsso1rLSdqE4y8tcy9gBIRUnmqLwapojECYTASdHz62o7jiOxfMQ2xXlf8b2jA2Kb1LlMqxD5vTkjtIq16CtwJIj4GWXvAD27GhiZik5EBDVYzOPACOTIyfot0pK4xHlJHZGmn0MHjsRxfYf27MVyceMHYS1b1JVOLJTmnQd6Sle7DdnU3JMsQw7OCx+elSjb8qymRV2YFW1Z6QS+0I0CHcE3az9bh937oCOGIM4/OSSRDn4EK+RvR1Esv82Aqqwg3nip6JAU1VHELU4KxV1lXkx2RXycRHVdbx49UQriCD3oXNriupYYF5gbb1uKu58/j3r+D56PGo9LmAH+uTns+ZbY/HlX7xqBVg8ZrmVEdQ31o869qUqHWNmA301tvn8qSQCY0vNKallq8w+SrOH9jLHZB8PXk1FohYo8meJdYyYt979tWG47S/vJgx+LJtZgcw0nxXUWn7OwJhl7vn6cCiKgsc2/xsnolF8zywDmzHILqsRk1ps2vEZQgGPFcRuzqBeEbO8yX4NS7PTWpyRSM70FUHXZGgq8I+V083fjcksFCgYXpKJ4WZ5LgBrGvnRfY0vjuoG5uHVD/bF3eaTi88AYGTQAcC6+vExy7gnyJBx9qWeLZIaO4MuEdHJ6tTg0VVXXYUFCxY0u0zfvn2Rn5+PvXv3Om4/ceIEPvvsM+Tn5ydY03Dw4EHMmDEDoVAIa9asgdfb/Iep3++H39/8tI5ELamvM77ZfcVsYtocMR2oCDwla1hJOu6/ZBQGmGUESyaVYWSfTAwzy9zi9UwQZXEAYvoPuT33nQnwagrOHmKcpN873yjpev/Gmdiy83PUVeY6ZnARV7/u5tMBr4YUn2aedBu3RV19H/ubU0v/Y1dDzCxW8uZ03e6/Yvxt/IxXfiX+FmV68sW82K7IdJAvjAHnBbvYb27uAI6iiItKOVsD1jf5gFFeV54fwrce3Gr1W7KDT8ZvBZEAqovS8dHnX1iPZX9D7hyf5nq+8njk2cliysAcQSM7SKYozmm7gdhAjFwCKRrYRnVX9o20LADMry1FVtD5uSrKwOJdfIlDMzfkh1czXpemqI5vTy5Dvtk/Rc4aSNS0XZTSeFVj1rf3zSb3dZW5jlImBXAcV1FdR5rfzuwLp3jxtTElqClKxwUjEn+ZcP8loxDwanj4stE4amZJiP0FAA1HjlvLvnj1RPz7sy/MfSxmW5NfIyP4JpcQRXXg6hnl0HXgv81ZdgC5nNTuyaTrwPs3GRkF/zWhH46eaLLGo+twHHu6bpfITSrPkQKsxl6VM/kUsc+k2wQ54CiXrakKUBgJuMrWnO9HoxTQmTkESBliciaT+Zj3zrdL2a3tmq+5KAUsyUqT7nN+Hsq+OGaXKWWm+RzZbkdOxAZ7RYBMPKWmqI6Xrp6EVJ+G6qJ0s4+b0fRZlFtZnzcwMrNEiZc4huP1s+po4v9Dck+hRCoLwjElsPKXJaP7ZkFT1bj9eADjuASMhtQAcIk56YRMlH3dOW8ohpdkOoJGbqKXYDLEY75341lJrwMY5crpKV6sfvVf0vuo+f20bflURFK8UomZgpygH0sml8UsK7IZZf1ygri4tiTm9mrpS6pEbjVnNyQiIjoVOjV4lJOTg5ycltNka2trsX//fmzZsgXDhxtNftevX49oNIrRo0cnXK+hoQHTp0+H3+/Hn/70JwQC7GZPHavXKZ4idP1VRjr/98+qQKrP40gzFyfigmh8CriyIMyLE/esa26J+mt4zG9FxTejgPHt8MjS+NkYc0cWY9aQQvzhjY+RHw7gkU0fxjz2lMo8TKnMw7TbXrSCWlYJklRqIoIy4orFPfOPnElT3TuCf+5rtLI+3BeMVtAmTlaEnOFgjMFe79bzh+Dq378ZU+IjeraIC3cRjFAV+yK6PD9kZGVZWRZyGY/xU/QrWPvmrvg9fMxtzq8twdv/abD2jft5iV1cnJka0xTYyhZwzLZmX8DL6yuufev+5t1tw3cnOqYHBoBpVfGD/HJ2nJOx3b45Qbz745lY9OAWRHWj6bybXKJlZzpJwTHFLrUTGQn3zh+JzxqPWWMwlrX3S1QHbp8zFACwYtZAaGbJirF84lloxPtxbFk2XvvnpwCMRvsHj5zAbXOqrbI8ACjJSsOhoyfM/it21o28R62G2ebvuq5bwWH3PlAUZ6mk+7WxZiyzsuqcwbJbvjIEmqrgNwtHYc0bH5n70e55ZARLzHI6KfPI0XxaCnzJgRXxuaOpdpDUfv/ZY5TLusRjyse+u5F9PHZ/JGcg5kypyXS89Q8fczVINgOpIb8nJtAV8Ko4fKzJsZ0TUR3FZuP2JxefgV+/vEMKspr95KTPBefzMV6vC0cUITfUNb68kl9jt6+NNrJmn75inHWci0CtMyBu3B4v+N5aXzJ7JHW2ivwwKvLDuH/jTiNDMUFgTCYCYMKMQflQFMX64qgleeEAVp47qE3jJSIiOpW6RYe0yspKzJgxA5dffjk2bdqEV155BUuWLMHcuXNRWGicUHz88ceoqKjApk2bABiBo2nTpqGxsRG//vWv0dDQgN27d2P37t1WyQhRd9PXLNFIpifGl6oLoSgKplTkoiCSgiG9I9CksoiWMo9aY8agfGQF/fjVxbGZAD6Piow0Hy49s4+VxZTosTXVnkJY14Ghxel2A1vYF5GCu1+GmElIUxX4PZrVD0peRg4aWQ1s4bxQcpfbyBfI7tm13NdFcjmcWNd9EdYrPQXhFC/k6gp3xpachRLbCFwEfeC4PZ5wiiemj5E7IGSXCMU2tpa/LQeAjFQfsoI+64L35WsmOcq4SrPTpHXi/y9GZCnJpTEy+fAQr5P7iJGDDLqu428rplkzx9jlmio8qtFzxT2rUGaaD69eO9lxMW89flS3gqdy5kprjO6TiTeum4phJRkozUrFl4f2tt6/QlVhBBuXTXFlfNn7QFPkrLDEWSly2ZrYN+5l3YFkEZwRj5WRZl/gypf/OuJk4kmBaVHyKDMya9wNoZ0lPqqioCI/5Ch7soIrVl8vZ/akO4gaT7xjGQAevMz+osndHPY3C0ZiUC9njxiPZmSrbfpBHcb3z3bcN7umF1adN9hx7Lo/07yaguNNUePYMzP0jGbrIqiiWL+rirH+18eUJAy0djSPqib8XLnxy3a/Pvn1M8o/3YH4ljOYuiNxfCXb80h2YTPZi0RERF1Zt2iYDQAPPfQQlixZgilTpkBVVXzlK1/BnXfead1//PhxbN++HYcPHwYAbN261ZqJrazMmRq8Y8cOlJaWdtjYiTrDecOMXge/NmcQ+9OSM/Gth7bgyHHRf8O+2Kmvi+2B0RZTB8Y2Bo8ncfAIdl8kXceabxk9Hbb/eAbue/lfdk8U82Rdt9ZT0DsjBecP6421b+5ynNi7MzDkE31dLslxZAE5Mw3koI07COHuySSCPnZ5il3iM7LUKKt4cskZ8KoqXnr3E2t99/WV/Kc8LkfDbJHFocSuuGRyGa54dJsVzBLP1/28xIWs2J67MTUAvPDdidYyc0YW4bxhvbD61X+hd0YKemck/vZclH4loirxM4/cr9m3JpVZQUQ3EVSJpBglye/fOBMeTcV7N86ER1XQPy+I9f+318r+kBWmp6DhyHGk+jT0zQkixavht5eOcvQsaytFUZCR5rMa3zYnFPAinOLFNTMqMLqPkdGn6zq+NrpEyjZL3A9HZFdpqv23uyG+oij4y5UTcOCLY6g1y4n0BIFkcTz5vSqOn4hax11VYVgKfDgDq7+6eIS131TFGTQV5XRinT98aywUxejRIjLARABNNd/f7hKzeGWlwus/qLN+l0vs4u2uOSOKUJyZik0/mGLdNqkiN2Y5n6bg+ImoFZB+9BtjrPsK01NQmJ5iNDc2H8/9uSDPandCCoiLkj/RiB0AUv0ehANdqz/JTecNwvt7D7W4XHVROn63qBYHDh+PKf/UYb9+qxeObL/BdoKsoB9pPg/C5ox2REREp4NuEzzKzMzEww8/nPD+0tJSu2kqgIkTJzr+JiK7DwgAu/+Qrls9mjpKouBRRX4YvdKNYIR8oez3aK6gDBzLqAoQDnil/ihi6nMl5iJaECU5qpHg4GggLJqRy9lD1UXp+Md/DiS80FYUALruLIdTnFka4iLDbtasWMGe2Myj2KCK6irlcfcbkW6ynoN4nvHGLLYn/7OylACMMoNdfbLT8J/9X1j7QlO1FoMiXx1djJqidLx5/bSEy3g1e98IP5s3FKVZaY4eSvGCOTkhP1aeW4XyvBCOStNqi9I0kV3i92goiBiZXvGIMpTO9L0Z5YhG7cw5wNj/dQPzEI3qVlZRvP+l1VXmQVEU3PyVIXjlfaOxbqKSwrJcI/PpkW+McfT4SfT/ylXnDYauA4+9/iFSfZpVNigHcioLQihMDzimkRalbXajbPO9awbC5NI7d+aRnPUngmLiMd0N3oUcs9Tr5vMGW+9nOZtLdovZnLmlaa+9mopjTfb6Y6QyXUFT7c+IJlcjN/Ee93s0K/vRapgNuxG7oiiYN7II59Z0jbIsITcUwD8/aWxxuYBXw8jSTOw68AVKs1OhqarVmFsE8AFgYnlsgK47++VFw6EqSsLybiIiop6o2wSPiOjkleeF8PnhY/jf9/YhGtVx57yh1kVOR6ksCCMzzYdZcXpY/D+zuect6/4v5oJWgdSI2rwgkcu65KBKeooPexqOQlONINlkKbPAzvSxS3JE6Yjwk/OHYNxPXnBcyGal+ax+P8tm2jODWdtVROaR3ZBYrJuoaiPeLGlC0O+JKa9xlq3BkdnhlhcOYGRphqOPjjyTl7w9ecpwq/m0ruPxRbXW9nJCftxuTnOdjJvM0pbmMipu+coQ/N3s2yTEOy7cHv+vWgS8Gi6uLQUAR/+teG6bU9Nsn5zO5n4PXjV1AEIB43/PqqogO+hPOH7RNNpYxj4+Wpq1Sw5UueOrVYURLJrQz3rt3GWyI0szrcfyezTkhuxt3bdgBFTFWa4kgtbxypfEW2DqwDz0Sk/BroYj1vtRzorzmDNUAUCiybTmjirGoaMnkBPyJwygJcvrUXE8TraabEaV3bz5RJPzsb5UXYgzyrIQMAPfa9/cZe4D437RkFqBEfAMJzFDWEdLlO0XT7xZQlszG1l3w2wjIiI6HXW9sxUiajffntIfK2ZVYXJFLgrTU/Cl6sK4Mw+1p6evGIfy/BB+Nm9os8u5L2gVM3rkmN3JXMYuMzP/Vu1Mh6iu474FI53bgXFhKjfoFRkBABwX7uKnMfW50ZNJbk5ux38U6786dFQUhOD3GKUpiS6g/B57um53ptG6+nGYN6rYcZtz1is7+8I5Bbrxu1FOMtZZVufqESX6yribklf3jljZHIJXU61pvE+VrKC/Tcdfa2ZVAoy+W8lM391VLJ3S3xFQuvWC6mb7WgmVBSEsPKPUzLxL/vHcQZay3CCujRMgFRZPKks4nskVeVJTbfs4i+p2QEiW4tMwu6YQhekpqBuYZ5fpufosqa7tJdodQb8Hy2ZWGgG05mM/zUpP8SKc0vz3ayk+zQrCucvWfB4VBZEUZKT5rIbJ7tkdvVriwHJXUFOUjq3XTW3z+nIQnYiIiLo/Zh4RnYbkYEpX5S43G1acgQVnlDoyeaJW8MhZzqUoRqNsuUzPzQpGKWbzaamBsDVrlfTzSFRHwKs5SocuGlMSE5xJ83ugqSq+Mb6PtW7ixrODrCCBexF5hh4rSCZNyR7v73hGlmbg8PEmvPTuJ/Zzh72e2G+KAqT6PPB7VDy55MzEGzzFkgmKUHL7qSw3hBWzqnD5A5tbVbbdlgydloIe150z0CojtXqQxVnJ79Fw+1w7kGwsoqCyIIxIihepogm+9P5OZl8YZXBtzzy6YERRUllwAPDS1ZOQF2l5ljRdlK0pRslaqs+DG2Z33Vm0FEWxStDaIqrr1jFARERE3R+DR0TU5agK4HeVBYwozcSI0kw0Hj1hXYQ2NxtZNGpPKx+PYpatuZtiA87p28Xfum5kTYmsJAC4YfYgbPzgU3N7RsbRn68Y52gULfd+cZOzS5K9IBbTkBstlWIzj0aUZOD5/9tr/b3gDCOI9ZN1263yNTmbQzxPBcDtczu+vKs8P4QfnFXZsQ/aDSWYuC6uNJ+GYy2UXMnaEjyK15NL1ifbmKXunZUz8OBrO63ASUv6ZKch1efBjN7GrGO3maWSl43r63g/JjO+k5lQUlMVpPljT5E2Lpscc1txVstTrmekepHi06yx//CcgQCMZuk9VTTJ15yIiIi6BwaPiKjL+cfKGQl7SsgXdFbmkTW9t/m3mXF025wa5LrKr+SyNd0sW1PgbMIrtqNIQZaorseUcsnbmzAgB/1ygjHf1MvNhZuT6Hr4qqkDkJ7qtZ6XIv1UFCDk9zjWveurw1C5fF3cbYlgmyLtJ7mvTDDOxXJ7y0zz4fLxfTv8cbub1lyEy1Opt+SVaycjO9j67JJkg4wpPk2aSa3l94K7f5X4HDijLBsfm03b5RLT5sZ3Mj2PEonX2ycZr31/Cnyaiof++iEAO7jWUz37nfEoCAe6dFkeERERtQ6DR0TU5STTjHTluVUozw9ZfxtTRNtZRLoODC/JSLwBRUwl7Ww+LdYHjJ4fc0YU4ciJpsQZTObP6VX5ce+XZ1trTqLL4aVT+kvjco5PURSc2T8bn5rTnRu3JX4M9zMQZWtpfg8iqW0vT6H211KmjyxexkwivdLbFgxpTTBLzH54sm2neqWn4J2VM7Dv0NGE70dh/IAcpCeYYa8ziCzDW83Z3nq6AXnGZ/OlZzIwTERE1FMweERE3ZKYaUtQFTljKH7Wwd9/NB07Pz1sLAPFUUojZ0UoitErqE92Gl7/12dQm+JPkw6IxtS18e8EsPCM0qQutJO5FheNiBUz6KPAeJ45QT/SU73Yf/h4wnXnjChCf3OqdgDIC9uzc/XLCaJfTjDRqtQFpHi1LjUteDDgwZJJZUktaxynwKBekZOe3THFp6EoMxVFmc2Xig0rzsCw4maCx50k/TQL0p7ZP7uzh0BERESnSNc5EyUiOgma4sw8ihc8SvN7HAEiHUZTbQWK+dPeltiOWDZRCUzAq2FkaeLZv8b1z2mxdOO/L2x5Nq1/3nQWAGDB2FIzgGSU6ek6kBX04f6Fo5pd/5bzh2CodDH91+/XQVMVpHDK6W5hSkUuXv9BXWcPw+LVVHx3enlSy6qq0by6qjCCslwGKYmIiIi6IwaPiKhHMKbyNn4XDa7jyUj1oSI/hMJIAP1ygphamYeqwrCjpEb0PxIzsGnNzNqWjJZKjs4b1rvFAJNobj1+QI6jbE0HkuolI/zlygnwmxksXk3FTy+sSW5F6lSqqiDShcqwWmNsvyxcZzaIJiIiIqLuiWVrRNSt/WPldACibK3lTKH8SADr6scDAC6SSt8CXg1eM4IkN5QWP09m5iYAmFie0+z9relpI2ZHM5p+t67/jDvzI5l+TEQnoyw3hLLcUMsLEhEREVGXxcwjIurWUn1GDFye1awtwZ7rZ1Uhw5wpTVGMrCN7Snt7prK2Wt1CWVlrYjjFWamYO6rYyDzS9Zh11181oQ0jJCIiIiIiio/BIyLqEVRFkTKF0OJsTDHruyIwmlkGJ/ohNbXDtN+y1mQe+T0a+mSn2VOgq4o1W5uiAH3Z/JqIiIiIiE4hlq0RUY9gBHuMAMqUyjz0zmh+NqaWqFID7sG9Igic5CxR7UEBoCO5mdqIiIiIiIjaisEjIuoRVEVBv5w0PLX0TAzqFWnzNN0PXz7a2p5IRpo3qvhUDfOUUpTW9zwiIiIiIiJqLZatEVGPcP2sKng0FYN6RU5qO2P7ZQMwSt/kPkod4V83n92q5VVrtjU7+6idq+uIiIiIiOg0xOAREfUIxVknV6bmNrwko8tn9Khmw2xNGieDR0REREREdKqxbI2IKI4lk/tjT8MReLWuG2M/Z0gBvjjehHDAi5yQH08tPRMBb9cdLxERERERdU+KfrLzT/dwDQ0NiEQiOHDgAMLhcGcPh4g6WFNUh6Z27QwkIiIiIiKitkg25sGvqImImsHAERERERERne4YPCIiIiIiIiIiooQYPCIiIiIiIiIiooQYPCIiIiIiIiIiooQYPCIiIiIiIiIiooQ8nT2Ark5MRtfQ0NDJIyEiIiIiIiIiOnVErEPEPhJh8KgFn376KQCgqKiok0dCRERERERERHTqHTx4EJFIJOH9DB61IDMzEwDw4YcfNrsjiU5WQ0MDioqK8O9//xvhcLizh0M9GI816ig81qij8FijjsJjjToKjzXqKLqu4+DBgygsLGx2OQaPWqCqRluoSCTCNy11iHA4zGONOgSPNeooPNaoo/BYo47CY406Co816gjJJMqwYTYRERERERERESXE4BERERERERERESXE4FEL/H4/VqxYAb/f39lDoR6Oxxp1FB5r1FF4rFFH4bFGHYXHGnUUHmvU1Sh6S/OxERERERERERHRaYuZR0RERERERERElBCDR0RERERERERElBCDR0RERERERERElBCDR0RERERERERElBCDRwB+/vOfo7S0FIFAAKNHj8amTZuaXf53v/sdKioqEAgEMHjwYPz5z3/uoJFSd9eaY2316tVQFMXxLxAIdOBoqTt66aWXMGvWLBQWFkJRFPzxj39scZ0NGzZg2LBh8Pv9KCsrw+rVq9t9nNT9tfZY27BhQ8xnmqIo2L17d8cMmLqtVatWYeTIkQiFQsjNzcXs2bOxffv2Ftfj+Rq1VluONZ6vUVvcfffdGDJkCMLhMMLhMGpra/H00083uw4/06iznfbBo8ceewxXXnklVqxYga1bt6K6uhrTp0/H3r174y7/6quvYt68ebj00kvxxhtvYPbs2Zg9ezbefvvtDh45dTetPdYAIBwOY9euXda/nTt3duCIqTtqbGxEdXU1fv7znye1/I4dO3D22Wdj0qRJ2LZtG+rr63HZZZfhmWeeaeeRUnfX2mNN2L59u+NzLTc3t51GSD3Fiy++iMWLF+O1117Dc889h+PHj2PatGlobGxMuA7P16gt2nKsATxfo9br3bs3br75ZmzZsgWbN2/G5MmTce655+Lvf/973OX5mUZdgaLrut7Zg+hMo0ePxsiRI3HXXXcBAKLRKIqKirB06VJce+21McvPmTMHjY2NeOqpp6zbxowZg5qaGtxzzz0dNm7qflp7rK1evRr19fXYv39/B4+UegpFUbBmzRrMnj074TLXXHMN1q5d6zj5mDt3Lvbv349169Z1wCipJ0jmWNuwYQMmTZqEzz//HOnp6R02Nup5PvnkE+Tm5uLFF1/E+PHj4y7D8zU6FZI51ni+RqdKZmYmbr31Vlx66aUx9/EzjbqC0zrz6NixY9iyZQvq6uqs21RVRV1dHTZu3Bh3nY0bNzqWB4Dp06cnXJ4IaNuxBgCHDh1CSUkJioqKmv02gqit+JlGHa2mpgYFBQWYOnUqXnnllc4eDnVDBw4cAGBcaCXCzzY6FZI51gCer9HJaWpqwqOPPorGxkbU1tbGXYafadQVnNbBo3379qGpqQl5eXmO2/Py8hL2YNi9e3erlicC2naslZeX47777sOTTz6JBx98ENFoFGPHjsVHH33UEUOm00Siz7SGhgZ88cUXnTQq6okKCgpwzz334IknnsATTzyBoqIiTJw4EVu3bu3soVE3Eo1GUV9fjzPOOAODBg1KuBzP1+hkJXus8XyN2uqtt95CMBiE3+/HokWLsGbNGgwcODDusvxMo67A09kDIKL4amtrHd8+jB07FpWVlfif//kf3HDDDZ04MiKi1isvL0d5ebn199ixY/HBBx/gtttuw29/+9tOHBl1J4sXL8bbb7+Nl19+ubOHQj1csscaz9eorcrLy7Ft2zYcOHAAv//97zF//ny8+OKLCQNIRJ3ttM48ys7OhqZp2LNnj+P2PXv2ID8/P+46+fn5rVqeCGjbsebm9XoxdOhQvP/+++0xRDpNJfpMC4fDSElJ6aRR0eli1KhR/EyjpC1ZsgRPPfUUXnjhBfTu3bvZZXm+RiejNceaG8/XKFk+nw9lZWUYPnw4Vq1aherqatxxxx1xl+VnGnUFp3XwyOfzYfjw4Xj++eet26LRKJ5//vmE9aa1tbWO5QHgueeeS7g8EdC2Y82tqakJb731FgoKCtprmHQa4mcadaZt27bxM41apOs6lixZgjVr1mD9+vXo06dPi+vws43aoi3HmhvP16itotEojh49Gvc+fqZRV3Dal61deeWVmD9/PkaMGIFRo0bh9ttvR2NjIxYuXAgAuPjii9GrVy+sWrUKAHDFFVdgwoQJ+OlPf4qzzz4bjz76KDZv3oxf/vKXnfk0qBto7bG2cuVKjBkzBmVlZdi/fz9uvfVW7Ny5E5dddllnPg3q4g4dOuT4tnPHjh3Ytm0bMjMzUVxcjGXLluHjjz/GAw88AABYtGgR7rrrLnzve9/DJZdcgvXr1+Pxxx/H2rVrO+spUDfR2mPt9ttvR58+fVBVVYUjR47g3nvvxfr16/Hss8921lOgbmLx4sV4+OGH8eSTTyIUClk9PiKRiJUhyfM1OhXacqzxfI3aYtmyZZg5cyaKi4tx8OBBPPzww9iwYQOeeeYZAPxMoy5KJ/1nP/uZXlxcrPt8Pn3UqFH6a6+9Zt03YcIEff78+Y7lH3/8cX3AgAG6z+fTq6qq9LVr13bwiKm7as2xVl9fby2bl5enn3XWWfrWrVs7YdTUnbzwwgs6gJh/4tiaP3++PmHChJh1ampqdJ/Pp/ft21f/zW9+0+Hjpu6ntcfaLbfcovfr108PBAJ6ZmamPnHiRH39+vWdM3jqVuIdZwAcn1U8X6NToS3HGs/XqC0uueQSvaSkRPf5fHpOTo4+ZcoU/dlnn7Xu52cadUWKrut6RwariIiIiIiIiIio+zitex4REREREREREVHzGDwiIiIiIiIiIqKEGDwiIiIiIiIiIqKEGDwiIiIiIiIiIqKEGDwiIiIiIiIiIqKEGDwiIiIiIiIiIqKEGDwiIiIiIiIiIqKEGDwiIiIiOkkLFizA7NmzO/xxV69eDUVRoCgK6uvrk1pnwYIF1jp//OMf23V8RERE1DN4OnsARERERF2ZoijN3r9ixQrccccd0HW9g0bkFA6HsX37dqSlpSW1/B133IGbb74ZBQUF7TwyIiIi6ikYPCIiIiJqxq5du6zfH3vsMSxfvhzbt2+3bgsGgwgGg50xNABGcCs/Pz/p5SORCCKRSDuOiIiIiHoalq0RERERNSM/P9/6F4lErGCN+BcMBmPK1iZOnIilS5eivr4eGRkZyMvLw69+9Ss0NjZi4cKFCIVCKCsrw9NPP+14rLfffhszZ85EMBhEXl4eLrroIuzbt6/VY/7FL36B/v37IxAIIC8vD+eff/7J7gYiIiI6jTF4RERERNQO7r//fmRnZ2PTpk1YunQpvvnNb+KCCy7A2LFjsXXrVkybNg0XXXQRDh8+DADYv38/Jk+ejKFDh2Lz5s1Yt24d9uzZgwsvvLBVj7t582Z8+9vfxsqVK7F9+3asW7cO48ePb4+nSERERKcJlq0RERERtYPq6mr88Ic/BAAsW7YMN998M7Kzs3H55ZcDAJYvX467774bb775JsaMGYO77roLQ4cOxU033WRt47777kNRURHeffddDBgwIKnH/fDDD5GWloZzzjkHoVAIJSUlGDp06Kl/gkRERHTaYOYRERERUTsYMmSI9bumacjKysLgwYOt2/Ly8gAAe/fuBQD87W9/wwsvvGD1UAoGg6ioqAAAfPDBB0k/7tSpU1FSUoK+ffvioosuwkMPPWRlNxERERG1BYNHRERERO3A6/U6/lYUxXGbmMUtGo0CAA4dOoRZs2Zh27Ztjn/vvfdeq8rOQqEQtm7dikceeQQFBQVYvnw5qqursX///pN/UkRERHRaYtkaERERURcwbNgwPPHEEygtLYXHc3KnaB6PB3V1dairq8OKFSuQnp6O9evX47zzzjtFoyUiIqLTCTOPiIiIiLqAxYsX47PPPsO8efPw+uuv44MPPsAzzzyDhQsXoqmpKentPPXUU7jzzjuxbds27Ny5Ew888ACi0SjKy8vbcfRERETUkzF4RERERNQFFBYW4pVXXkFTUxOmTZuGwYMHo76+Hunp6VDV5E/Z0tPT8Yc//AGTJ09GZWUl7rnnHjzyyCOoqqpqx9ETERFRT6bouq539iCIiIiIqPVWr16N+vr6NvUzUhQFa9aswezZs0/5uIiIiKhnYeYRERERUTd24MABBINBXHPNNUktv2jRIgSDwXYeFREREfUkzDwiIiIi6qYOHjyIPXv2ADDK1bKzs1tcZ+/evWhoaAAAFBQUIC0trV3HSERERN0fg0dERERERERERJQQy9aIiIiIiIiIiCghBo+IiIiIiIiIiCghBo+IiIiIiIiIiCghBo+IiIiIiIiIiCghBo+IiIiIiIiIiCghBo+IiIiIiIiIiCghBo+IiIiIiIiIiCghBo+IiIiIiIiIiCghBo+IiIiIiIiIiCih/w+p8Mq8dCmpwgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Load sustained phonation recording from QPN dataset\n", + "audio_filename = \"phonation.wav\"\n", + "audio, sample_rate = torchaudio.load(audio_filename)\n", + "\n", + "# Parameters for all figures in the document\n", + "plt.rcParams[\"figure.figsize\"] = (14,2)\n", + "plt.rcParams['axes.xmargin'] = 0\n", + "\n", + "# Downsample to reduce number of points on the plot\n", + "downsample_factor = 20\n", + "xs = torch.arange(len(audio[0,::downsample_factor])) / sample_rate * downsample_factor\n", + "plt.plot(xs, audio[0,::downsample_factor], linewidth=.5)\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Amplitude\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "98a89d0d-317f-4988-887e-832ed89acd4c", + "metadata": {}, + "source": [ + "## Intro\n", + "\n", + "To understand how this speech analysis works, we'll step through the vocal feature computation functions, including:\n", + "\n", + "* `speechbrain.processing.vocal_features.compute_autocorrelation_features`\n", + "* `speechbrain.processing.vocal_features.compute_periodic_features`\n", + "* `speechbrain.processing.vocal_features.compute_gne`\n", + "\n", + "These different ways of approaching vocal features can give different indications of potential vocal pathology, which we will\n", + "explain as we go through the tutorial. We first just set up basic parameters that apply to all sections." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "7ac82d0a-5e40-4359-bfd6-eb5a8a8e883f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Samples between (the start of) one frame and the next: 441\n", + "Samples contained within each frame: 2205\n", + "Number of samples in the maximum lag: 551\n", + "Number of samples in the minimum lag: 147\n" + ] + } + ], + "source": [ + "# Frequency of human speech is usually between 75-300 Hz\n", + "min_f0_Hz = 80\n", + "max_f0_Hz = 300\n", + "\n", + "# A new window of 0.05 second length every 0.01 seconds\n", + "step_size = 0.01\n", + "window_size = 0.05\n", + "\n", + "# Do some number manipulations\n", + "step_samples = int(step_size * sample_rate)\n", + "window_samples = int(window_size * sample_rate)\n", + "max_lag = int(sample_rate / min_f0_Hz)\n", + "min_lag = int(sample_rate / max_f0_Hz)\n", + "\n", + "print(\"Samples between (the start of) one frame and the next:\", step_samples)\n", + "print(\"Samples contained within each frame:\", window_samples)\n", + "print(\"Number of samples in the maximum lag:\", max_lag)\n", + "print(\"Number of samples in the minimum lag:\", min_lag)" + ] + }, + { + "cell_type": "markdown", + "id": "3d9ccc23-e941-4e0f-9fd7-e0a0bed6012c", + "metadata": {}, + "source": [ + "Our sanity check for these values is that at least one complete period will be contained in the frame (for minimum Hz) and at most 8 for maximum Hz." + ] + }, + { + "cell_type": "markdown", + "id": "0c55a4b8-dfca-41d0-8e27-c96ca0018b94", + "metadata": {}, + "source": [ + "## Compute autocorrelation and related features\n", + "\n", + "The first set of features, autocorrelation features, pick up on signs of pathology\n", + "as a result of breathy / noisy / irregular phonations. The cause is usually inadequate control over some aspect\n", + "of phonation, such as the vocal cords, causing the periods to become less regular.\n", + "\n", + "Autocorrelation is the cross-correlation of a signal with itself at each lag from min_lag to max_lag.\n", + "The min lag and max lag correspond to the extremes of the human vocal range -- to reduce the chance of\n", + "false peaks at a frequency that can't possibly correspond with human vocalization.\n", + "For all periodic/harmonic signals (including phonations), there are peaks at regular lag intervals\n", + "corresponding to the period of the signal. The harmonicity is the ratio of the strongest peak against\n", + "the theoretical maximum which occurs when the lag is zero (as the signal perfectly lines up with itself at lag 0).\n", + "\n", + "The harmonicity is a useful measure of pathology, as any irregularities will show up as a reduction in the\n", + "cross-correlation score of the signal against its time-shifted version.\n", + "\n", + "For animations which may be helpful for understanding the concept of autocorrelation, see the following:\n", + "* [https://tahull.github.io/blog/2020/08/acf-animated](https://tahull.github.io/blog/2020/08/acf-animated)\n", + "* [https://github.com/chautruonglong/Fundamental-Frequency](https://github.com/chautruonglong/Fundamental-Frequency)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f56a4150-ef25-46c6-a5d0-2608c31b4d12", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABI0AAADUCAYAAAARBtMYAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs/Xvwbcl1F4Z/Vnfvvc8538e9d0ZXkke2lFiAnTgUUAk4OH6pkO0II5cM5uEUASXEUI552TghBIgsoKIidoEJtsGpCjJJREQc/EriFBaxHAcIwTiGArvsWC7Jtl4zmpn7+H6/57H37l6/P9Za3b33Od87I/yQ4LfX1J3vPq+9+7F69Vqf9WhiZsZCCy200EILLbTQQgsttNBCCy200EILVeQ+0Q1YaKGFFlpooYUWWmihhRZaaKGFFlrok48W0GihhRZaaKGFFlpooYUWWmihhRZaaKEjWkCjhRZaaKGFFlpooYUWWmihhRZaaKGFjmgBjRZaaKGFFlpooYUWWmihhRZaaKGFFjqiBTRaaKGFFlpooYUWWmihhRZaaKGFFlroiBbQaKGFFlpooYUWWmihhRZaaKGFFlpooSNaQKOFFlpooYUWWmihhRZaaKGFFlpooYWOaAGNFlpooYUWWmihhRZaaKGFFlpooYUWOqIFNFpooYUWWmihhRZaaKGFFlpooYUWWuiIFtBooYUWWmihhRZa6F8S+qEf+iEQEX7oh37oF/W+RIRv+IZv+EW950ILLbTQQgst9MlPC2i00EILLbTQQgt9UtK3fdu3gYjw2Z/92b+g+3z/93//Ani8DFrGaaGFFlpooYUWmtMCGi200EILLbTQQp+U9K53vQv/yr/yr+Af/sN/iPe9733/3Pf5/u//frz97W//RWzZv5z0pHHa7Xb4U3/qT/0yt2ihhRZaaKGFFvpE0wIaLbTQQgsttNBCn3T0/ve/H3//7/99/IW/8Bdw//59vOtd7/pEN+mXlLbb7cn3x3FE3/e/zK05ptVqhRDCJ7oZCy200EILLbTQLzMtoNFCCy200EILLfRJR+9617tw7949fOmXfim+4iu+4gg0uq12zwc+8AEQEb7jO74DAPDWt74V3/qt3wpA6vLYP6Obmxv8sT/2x/Bpn/Zp6LoOn/EZn4Fv+qZvAjMftel/+B/+B/yG3/AbsNlscO/ePXz+538+fuAHfmDynW/7tm/DZ33WZ6HrOjzzzDP4mq/5Gjx8+HDynS/8wi/Ev/Fv/Bv40R/9UXz+538+NpsN/vP//D/Pbf+mb/omfPM3fzNe//rXo+s6/MRP/AQA4Cd/8ifxFV/xFXjqqaewWq3wb/1b/xa+7/u+7yXH8v/6v/4v/Pbf/tvx2te+Fl3X4dM+7dPwtV/7tdjtdvk7LzVOp2oa/diP/Rje9KY34fLyEufn5/hNv+k34R/8g38w+c53fMd3gIjw9/7e38PXfd3X4f79+zg7O8OXf/mX42Mf+9hLtn2hhRZaaKGFFvrE0uIyWmihhRZaaKGFPunoXe96F37rb/2taNsWX/mVX4m/8lf+Cn7kR34Ev/7X//qP6z5/4A/8AXz4wx/Ge97zHvz3//1/P/mMmfFlX/ZleO9734vf9/t+H37tr/21+Nt/+2/jP/lP/hN86EMfwl/8i38xf/ftb387vuEbvgGf8zmfgz/zZ/4M2rbF//P//D/4wR/8QXzxF38xAOAbvuEb8Pa3vx1vfOMb8dVf/dX4qZ/6qdzuv/f3/h6apsn3e+GFF/CmN70Jv+t3/S787t/9u/GqV70qf/bOd74T+/0ev//3/350XYennnoKP/7jP45/59/5d/Ca17wG/9l/9p/h7OwM/9P/9D/hLW95C/7W3/pb+PIv//Jbx+A7v/M7sd1u8dVf/dV4+umn8Q//4T/EX/7Lfxkf/OAH8Z3f+Z0vOU6n6Md//MfxeZ/3ebi8vMR/+p/+p2iaBt/+7d+OL/zCL8T/+X/+n0d1qP7QH/pDuHfvHt72trfhAx/4AL75m78Zf/AP/kH8zb/5N1/yWQsttNBCCy200CeQeKGFFlpooYUWWuiTiP7RP/pHDIDf8573MDNzSok/9VM/lf/IH/kj+Tvvfe97GQC/973vnfz2/e9/PwPgd77znfm9r/mar+FTKs/3fM/3MAD+c3/uz03e/4qv+AomIn7f+97HzMw//dM/zc45/vIv/3KOMU6+m1JiZubnnnuO27blL/7iL55851u+5VsYAP+1v/bX8ntf8AVfwAD4r/7Vv3qy7ZeXl/zcc89NPvtNv+k38a/+1b+a9/v95Nmf8zmfw7/yV/7KJ47Ldrs96vs73vEOJiL+2Z/92ZccJ2ZmAPy2t70tv37LW97Cbdvyz/zMz+T3PvzhD/PFxQV//ud/fn7vne98JwPgN77xjXmsmJm/9mu/lr33/PDhw5PPW2ihhRZaaKGFPjloSU9baKGFFlpooYU+qehd73oXXvWqV+ENb3gDAEmN+p2/83fi3e9+N2KMv2jP+f7v/3547/GH//Afnrz/x/7YHwMz43//3/93AMD3fM/3IKWE/+K/+C/g3FR1shSuv/N3/g76vscf/aN/dPKdr/qqr8Ll5SX+t//tf5v8rus6/Af/wX9wsl2/7bf9Nty/fz+/fvHFF/GDP/iD+B2/43fg6uoKzz//PJ5//nm88MIL+JIv+RL89E//ND70oQ/d2s/1ep2vb25u8Pzzz+NzPudzwMz4sR/7sScN0UmKMeIHfuAH8Ja3vAWf/umfnt//lE/5FPx7/96/h7/7d/8uHj9+PPnN7//9v3+S7vZ5n/d5iDHiZ3/2Zz/u5y+00EILLbTQQr98tIBGCy200EILLbTQJw3FGPHud78bb3jDG/D+978f73vf+/C+970Pn/3Zn41nn30W/8f/8X/8oj3rZ3/2Z/HMM8/g4uJi8v6/9q/9a/lzAPiZn/kZOOfwr//r//oT7wUAn/EZnzF5v21bfPqnf/oROPKa17wGbduevNe/+q/+q5PX73vf+8DM+NN/+k/j/v37k39ve9vbAADPPffcrW37uZ/7Obz1rW/FU089hfPzc9y/fx9f8AVfAAB49OjRrb+7jT72sY9hu90e9RWQsUsp4ed//ucn77/2ta+dvL537x4A4MGDBx/38xdaaKGFFlpooV8+WmoaLbTQQgsttNBCnzT0gz/4g/jIRz6Cd7/73Xj3u9999Pm73vUufPEXf/EkaqWmX8xIpF9KqqN/XuqzlBIA4Ou//uvxJV/yJSd/8yt+xa84+X6MEV/0RV+EF198EX/8j/9xfOZnfibOzs7woQ99CG9961vzvX+pyXt/8n0+UXB8oYUWWmihhRb65KEFNFpooYUWWmihhT5p6F3vehde+cpX5pO8avqu7/oufPd3fzf+6l/9qzlSZX4y2al0p9sApte97nX4O3/n7+Dq6moSbfSTP/mT+XMAeP3rX4+UEn7iJ34Cv/bX/tpb7wUAP/VTPzVJ2er7Hu9///vxxje+8ZYevzTZ/Zqm+bjv80//6T/F//f//X/463/9r+P3/J7fk99/z3vec/Td28ZpTvfv38dms8FP/dRPHX32kz/5k3DO4dM+7dM+rnYutNBCCy200EKfnLSkpy200EILLbTQQp8UtNvt8F3f9V34Lb/lt+ArvuIrjv79wT/4B3F1dYXv+77vw+te9zp47/HDP/zDk3t827d929F9z87OABwDTL/5N/9mxBjxLd/yLZP3/+Jf/IsgIrzpTW8CALzlLW+Bcw5/5s/8maPIHIuUeeMb34i2bfFf/9f/9SR65r/9b/9bPHr0CF/6pV/6zzcoAF75ylfiC7/wC/Ht3/7t+MhHPnL0+ZOOrrcIn7pNzIy/9Jf+0tF3bxunU/f84i/+Ynzv934vPvCBD+T3n332WfyNv/E38Lmf+7m4vLx84j0WWmihhRZaaKF/MWiJNFpooYUWWmihhT4p6Pu+7/twdXWFL/uyLzv5+b/9b//buH//Pt71rnfhd/7O34nf/tt/O/7yX/7LICK8/vWvx//6v/6vJ2v7/Jv/5r8JAPjDf/gP40u+5Evgvcfv+l2/C29+85vxhje8AX/yT/5JfOADH8Cv+TW/Bj/wAz+A7/3e78Uf/aN/FK9//esBSOrXn/yTfxJ/9s/+WXze530efutv/a3oug4/8iM/gmeeeQbveMc7cP/+ffyJP/En8Pa3vx3/7r/77+LLvuzL8FM/9VP4tm/7Nvz6X//r8bt/9+/+BY3Nt37rt+JzP/dz8at/9a/GV33VV+HTP/3T8eyzz+L//r//b3zwgx/EP/kn/+Tk7z7zMz8Tr3/96/H1X//1+NCHPoTLy0v8rb/1t07WErptnE7Rn/tzfw7vec978Lmf+7n4j//j/xghBHz7t387DocD/qv/6r/6BfV1oYUWWmihhRb6JKJP3MFtCy200EILLbTQQoXe/OY382q14pubm1u/89a3vpWbpuHnn3+eP/axj/Fv+22/jTebDd+7d4//wB/4A/zP/tk/YwD8zne+M/9mHEf+Q3/oD/H9+/eZiCbHyl9dXfHXfu3X8jPPPMNN0/Cv/JW/kr/xG79xcjy80V/7a3+Nf92v+3XcdR3fu3ePv+ALvoDf8573TL7zLd/yLfyZn/mZ3DQNv+pVr+Kv/uqv5gcPHky+8wVf8AX8WZ/1WUf3f//7388A+Bu/8RtP9v1nfuZn+Pf8nt/Dr371q7lpGn7Na17Dv+W3/Bb+n//n/zl/573vfS8D4Pe+9735vZ/4iZ/gN77xjXx+fs6veMUr+Ku+6qv4n/yTf/JxjRMAftvb3jZpz//7//6//CVf8iV8fn7Om82G3/CGN/Df//t/f/Kdd77znQyAf+RHfmTy/ql2LrTQQgsttNBCn3xEzEsFwoUWWmihhRZaaKGFFlpooYUWWmihhaa01DRaaKGFFlpooYUWWmihhRZaaKGFFlroiBbQaKGFFlpooYUWWmihhRZaaKGFFlpooSNaQKOFFlpooYUWWmihhRZaaKGFFlpooYWO6BMKGv3wD/8w3vzmN+OZZ54BEeF7vud7nvj97/qu78IXfdEX4f79+7i8vMRv/I2/EX/7b//tyXe+4Ru+AUQ0+feZn/mZv4S9WGihhRZaaKGFFlpooYUWWmihhRb6l48+oaDRzc0Nfs2v+TX41m/91pf1/R/+4R/GF33RF+H7v//78aM/+qN4wxvegDe/+c34sR/7scn3PuuzPgsf+chH8r+/+3f/7i9F8xdaaKGFFlpooYUWWmihhRZaaKGF/qWl8Il8+Jve9Ca86U1vetnf/+Zv/ubJ6//yv/wv8b3f+734X/6X/wW/7tf9uvx+CAGvfvWrf7GaudBCCy200EILLbTQQgsttNBCCy30/3f0CQWNfqGUUsLV1RWeeuqpyfs//dM/jWeeeQar1Qq/8Tf+RrzjHe/Aa1/72lvvczgccDgcJvd98cUX8fTTT4OIfsnav9BCCy200EILLbTQQgsttNBCCy30y0nMjKurKzzzzDNw7skJaP9Cg0bf9E3fhOvra/yO3/E78nuf/dmfje/4ju/AZ3zGZ+AjH/kI3v72t+PzPu/z8M/+2T/DxcXFyfu84x3vwNvf/vZfrmYvtNBCCy200EILLbTQQgsttNBCC31C6ed//ufxqZ/6qU/8DjEz/zK154lERPju7/5uvOUtb3lZ3/8bf+Nv4Ku+6qvwvd/7vXjjG9946/cePnyI173udfgLf+Ev4Pf9vt938jvzSKNHjx7hta99LV73x/80XLc63d78P6PpMDJORCgxTr37ksT2LAKY5NJvgfYR4EbAH4DmJsENQH8XuHot0N8DkAA3EmgkuB5YPQ90jxjE0+amhpACwA6ILTCuCeyBFBipA9jbs7UDDFAiUNLrEaAoPaMIUAKIpa25D07vA4A9g/V50Ofk+/vqOQS50dGA1DeWtoCra3vf+jn7josyNmBptxvL+zTKz3Pf7PHltqcncf65zhVI+s1OXnNgcFNdh2l3yG5iz2VrC4FYxhdJ5zaSjL2+T6M+WucAmM01T5qaX+d2OvmUnV3rZ3PgWdtEUZ83Mnyv7yXARQaijSNPp/CUtKHpGJykasEdzQFXn9RzVl+fagKdaE/NWlReM9HkM7kv53GQ91jm5rb2k7X9ls4yT9p+8h4o9znVt9xuR8JjBHCQ9cyka70p6zE5OuJRuOraXlujTAaxPCjPeQ+4Qa79wHC9fMcNDDdwlgkUWRs/7ReT8B1I2+RmY2Y/ma+LU333+jUHJF/4OQWAvYxLCshrL7VA8iz9bIDU6rVjsMompJmcGa3vsgZNnrhIwg/1GrH1aeslcVm3SWQ4dJ2Qrm3Y/Nr2XO8dT+LrzK/QdUvTNVyNJ6rvT28y+0j7Js99CZlS32LGrzbPuS0Vj922LrL8sOdHzvOe51/bNW//8c3q9tBx36sOkMqS+ftH9wLArvAuO8p8xZ4QW1T8V/FiqzzoGNwKz7ED4PSv7k2ZzwbADZT5xWV5y3CD7mnGZ7XsP9U9Qhnram7mPCZ/y01s/U3mPxVZOB+fLDOp2v/cTP40hOTlOra6JxKAgCyzQFXDImVZgij6DVjHROUPjSKDbCzcwHB5v5cxK/x8Yn4nPFnthZVcKvwsYznZK+r9067zGFTjrr8h1nHU8c4yw+SHvnaRi97CPOH/6QTXE3a6nblNVGTika7iRX5O9JmaB+x6LoercbhVbzqx9uoxrH8314lOyX1A5HlSXTWZnuWEp1IDaXRgcJsAz3CO4UICebkhOZZlYewx0zMnbWeAo0NKDpwADA7Ye5nHkeB63QeUR4/WZ7WO8t88nly+k05/P4+VbUm+jLfIFSp7XpZHxpcs1wFln2sgercDOCThc8cgz4CTB3Ii5T/9W+nadk3af7I9UvdGF8UGoSiyyo1Fltd9rMehHvepzcL5O7af5P3eIe8rZe0VGWPjkwImazvbH9XelDx0vfOEl6FDMJcdNKotkUQWuZ6ybhR6Wdciu7ms+cRTfjaGr/ZxdlM5amty0g+vfax19XodwuStjpHZWvO9ePbbiT4SUek9Rd7K/NoccrkGqv2hao89L+t7KoNsTwjV/hBKOzkA7FUHtX0SEP7M+mKlkFR8ObWpqGoXTdtYyxkuv53zqIwJTfiy1lOeSJU8Zc+ZL2sZm209RuFFUlnWJXAQ2dWsB4QuovERT623uFwd4CghUEJwCTERnr85w3M3F+hHD74O4Ect0BtTyLPSYY8P/Pk/i4cPH+LOnTtPbP6/kJFG7373u/Ef/Uf/Eb7zO7/ziYARANy9exe/6lf9Krzvfe+79Ttd16HruqP3fbeCW62O7UrjpVMKttJtRvBtitzkt7MbUnVh93WVUKbAIKghcs5wFwy6UEbvSZTPA0C7ysCpb94AaFQhagFaA3B63wagOWigiisRgERwbrpW7fsmuIFqQ3echRcIgGdQkEVPjsGey/NqZXEyOCgDYZsXQwy4aA9Gvq6NMWJpH+kG6AhZUSAP0IkVkQ1pm4PaoJ2AK3xaKQtFMHCbwI2sVBcYrjlG2JhL/9gEH4sSkyJlxZkHAg3ykAx4ZcWEJkJsshGf6F/dp+R5YnjPeZkgzxLDl+AGAAd9bmSgRzaKYSDBXEGfjS3UwJgakeWB1pbJ705QNihqQX7qmfX9bxmTiaKs13MFVpQf5atUFP7SX9ECJgpxDchQmRbiW6aoHg9T/HNDb/m+bcoOiAECHDkgdmqgkm7CplA6nXPj44of8jo0xcoUpkiArjl3ILiD8uWe4JUf0DNoDwFFRoBqAIlPdJhEJaxl60TnqL8/Mcx0vXgxzmEKc1uAo2KoA6ljUbgcZBNWoIjaCN9GkAO8Z/gQQQSkRIjRgRng5JBGB46iSPNI4CjrjceyPqkGkCoD15RHAZmANM74p1a4rM88u64U6DJANOWrWjmpDbhKUT4a33rQqzU0aVOazcUtPFjms2pTDRSpsghM18PkniwyZWJETwyqWgHXRyYIv9nvX44i9yQ6xaP1nuAFKBKlToEiVX7jSgFbD8SW83VqFZD0DOoSqI2yj+o+CABpIKTBIyWAegc+OFAE0mjrS/nrAHB2HnAFYt/e9sIPldy138z7PpE3MxAvPeE51fpMQZ9j4JAa87FDNga45bw3Iug/AFQ5qzjpHsgCqKWBQImQRmRDnUaAD5RlM3rO4+MGADN+yk3WPkzWkNP5NX41g8bm3/atOV/b/l8Z86wGen6GrcPMy0U+ZMNsLK95qPaX7Pji4/2uWtB5Xl3lIHClXbYGDaAz/rT9oTj0Kj7Ia4rKGJ6Y//ovn/ps9vpoT9R5z3tB1jEJU6HFVXvld9yygv+iX5pMD4HRtgnOi1G1agYEl0DEcMRwYDAIiSkDR/Y3N1fbNESPwxiQmDD0hP4gewRGAh+87gUABtkjBEjRdQuAdY4Z+rdaUwYymYNQ9D+Za+aKN1Xm53lzCgwEAE7AstRw4UU1uhHMISLAkG8EKPIuwQeGcwnOMYJP8C6BmTBGh5hkD0zJISXIXp3KHPGoeyMDiA48iI7AA0R+jQDHsj7ByuOV7ObEx7x8i2E/lzOogPmal1ODDEiLHqDjkAFenl57AJ4zD5IBEzCZNOPhBGB0iKMDEsA9ZXnNZnslgE0fsjU+zgBCG8d6XdQASyCQOp3RAGiLEyy1OF6rlc4mdldliwTT8ziDY/O1WdsfFMXeMN0Gg/VDrk1mpVH5FyYb6k7p+jHnEUEcBAZgBoCbsk9yx6WdTdI5AZxPAmgCcI4L8Kv7mbWdEwnEqNfyvuhrUBmGVNZ3zWPMRc5wtCAJmuga+R66LjMf1zw60UF0LO2ntW2oQC0IBZxl3TNUv6Amwa8i0DBCiDjbRHRdROeAp9YJd9sIRwle5VlMwD54PHINMAYkbhAPHdi7yTrKvoyXUY7nXzjQ6H/8H/9H/If/4X+Id7/73fjSL/3Sl/z+9fU1fuZnfgb//r//73/8D9PFd1KROvFWPdw0CeC6fSL4CV8xxprcfP69mQJXe4XBKMqNrzZVnt4rtoTUYiJQa2XHnlMLNzcUg8jFSvmq712j/r6OsKk3MlUUdbGQT1mAEWGiyRXlQXZt2Txp+h7RZL7yYq08GpQFQDVeqIQzbAGjACcaAZUjpqp+ZRDANmQS8AseIGJQEAFHjtE0EaEd4YjRhog2yDWAiYKSmPJru06JMEaPmOQ69gFxdFmR5lGV6SjKiil2L2kwkQomm2+v4J2Nv6t3aL0cSf4l8aq5XQGu/J6yIkAVbxwZFzZ15tGsxj1PaQW+TQCsemNEue/EY2V/Z4892lgxu0b5vCiyXBlWlO/vBjVETPkZq+cm+W5WAKjqU+atGQhUt6VmbZre48i4nvfPFH/HsrYbluuOFSBhMVjVSCOn/0g8sOZ1JWL9Jw9z+p4pkSkJcBJ3AcPBCz9sCX4nCpPfA2Era84PyGBSNohqQ//Uuj2emjI/tu+5EilpRnvycj2uINFDnhFXVRTRKgKt9DN0I3wb4Rxj1QxYtz28Y7Q+ovMDHDHG5DCkgKj9PowBY3JI7DCMXgElQhwckoJLKToB1hjAKMYEGOKRHOzaxoFy1FLNPxPFw/5W8h2gWkSeBLcna2fOS/MBrse8WlMTb3esPr9NtkyU1vJ6HnWZsswssndym4QMThvYVgNxBo7nCKS8H6kSGDkrsHlMJ+M56wDNBqT6fB45I94/4T2LnkkNMCpQlFpG3CTZcz2DuwQEMdJcG+GCGGTrbsCqHUDE8E68hAzCfgjYDQ1Schj6gH4fhKdGQjx4YFQP/t483CSRxTY+9fzUXbI9DIU3bpvDqvdZkGZwA2UujuT6TAaZ4p8BM3MWdQloEsgx2m5ECBFEjCYkeBfVl8B5mx+jwxB9lj/j4CXaYyCMhwCOMgZxpwZOBPyhRAD4QfmpjuLBrA8VLxr4Z9EKqUFR8jMvc9EJ9DfZI26OMhtnz2XNubJws7PLIjVUT3G5vSIzbK+xiA2ABCCdGSuTaJQ851TWnkUcGLiu7U2h9C81BuIBk+hvA7gAIBbey88vHHNaeM94LH8/G6/Wdj7+/Vze1LIs6PpygG8jmnYEOUYXIrp2gHcJqzDivDmg8RGti9j4Hq0TnnMQQ4sBRHZIujEnJqTZImEG9rHBLjYY2WE3NHh8WGGIHsPo0R8CxlH2w9Q78CjMITxJRRZVETo2b2Tyy3SI/BllR8JEN3Kqs5sTpGXRrx2AVtYXHMN5hveCTASfEJoIp9ddGEX2uIRVECDNU0LnRgSXkJjQp4AheQHJksOYfNZPU3JgCJDWj/KdGD36wYuOMHjEvQePTva/fdkD3Tjtrzl6a3l9FI1kcnw+Dk1Zl7Udwy0LUGRj0qa8Bg2UJifzDwKcS/Ahwan+410qn53wRDIT+lHmnhMhHoL0NxHc3iFtHRAB3wNJnfcUVR4ZgFQ7RMAw+62OZI0tkJriBIud9Vt1u8rZl9dQlmUMNKr3qV3iVM+nqn8AsiM9sTrGGBJZ14vOh0igQQCyDCblyE6UaDMDlUyXmNtaZIAXZ1A+dWo/BQY6AUi8E34NQQDe4CO8l2CJ4GQfze3WhyWuQE4mRFanHwu4m9SmTMll0Iiz8oT8XQDgSLL3sqxZrqLOswKvn03tD5roKgBPbKKpU0HXMCAOXNU9bCyYAKegkWsi2hBx1h1w1vXo3Ii77R53m63wrDZodA4rPyD4iMgOo2PEl5LJL0GfUNDo+vp6EgH0/ve/H//4H/9jPPXUU3jta1+LP/En/gQ+9KEP4b/77/47AJKS9nt/7+/FX/pLfwmf/dmfjY9+9KMAgPV6nUOqvv7rvx5vfvOb8brXvQ4f/vCH8ba3vQ3ee3zlV37lx99AY4oTSjVQhPf8I6q+VKINyo2qT8utJ3Jo+o0jQ5nr76kia+i6Gd2uACii1BTl1kImTKlIjQgjUxTMA5+fW29mqjCQocn2/txwITXcNOw1NUUwQIEidhAwJcimRrqxkdPIqSpc2BY4oEIsKcASCUwOBTAiMHE2GrKie5uBc4uRZQhwjsjw07ZLSKRFSenGElI2vEMQoeYoCTjkBQE+a3qcNT08Jaz9gE0Y4CBGgjSRsoATbxchcnnvEGXjjuxwM7TYD+rpih6jbtYpOfF4qeCbMOkJgUEKGtgGGUKCU++bt43Uho+ljeOoykEipN4LaDA6uJ7gd5VHYqDidaiNYCrNkhDqomTntASNeMk8ZSj8XKHM80pTL0CqDOq675XCfqTYTtZa9QxXMY8qb2D1bvdOvmohyVnxu015N17jwlfVeNRgUTFSubpGdeO6//b7YsTAMahNQCiGWaOgZQgJrY9ZKWrUSPMa2iqbT+ELgnzmiJGYcIgCnIzJ4+rQ4aZvkaLDsG0wbBsgEvyWEG9EUfQ9EHYoClNP2divw5vnANJkbvI4UpWKQJXypEa7AtPjRgHqwMAmAp14mVerAW0rivJ5e8BZK2vyojngIuzFwHAD1m6Ap4SBPfapQWSHIXlsYyuKM3vsxwZ98ohMOIyVYh09RgWTxugxjnKdzBubSICkwYmyURmMtXFuc2y8fJzCVsbnVFpMvXbyuqp46yQAWT+3TkVOEIUxK0wntsd6nuzaAQwzbCwFSZVYIMvTI4o6RiyKqutJAfGieBNXciYbIXptabtHBhjUAJsKxFqUzC6mToUaTGhK9EzqGOMmyfrrEuhsFEXPJ3TdgBAigkvYtAO6MCJQxJ12h4vmIGuSEjxFMBOuxw6PxxXG5HEztMUoHTwOhwZjdMDoMO698hCmRul8z6tkySTl3AGzYajkIE9+Cx03kz3Z8J3rSfkZOq9qsLmQENoI8gnBJ6y7Hm2ICC7ivD1gHUZ4SmjdKMa8tsEe3ceAQwpZ/mzHFmNy6MeAm0Mr0RCDx7gTo51GYNw7uF7Gx/eYGas4GqsM8KgOkyMSc7q+GTg2dqyOFtlL4TWtB5U+o3usd/Iwi2wBQYxMM2KSygdNe4q9BzRawx10X2Uo2EzT9QlMIzJQ1t80ukhTIg3Qaqq50ggUIomC9k2aOQ4k6jIlWZMcqciyua5R0yl5XvOZ8VrtMDRH4gTJnv2cinMjNBFNEPB/3fQi013CJgy4aPYIlLDxPS7DDq2LaGnEmT+goVj2ObDqXA4JVK5VF6u7s08NtqlFZI+rscODfoNDCtiPAiAdojgTDn2DIapzb/Q5Ui5Zuheg8l/XlEUoMc3AEpqCJb6AkdxKygo5hmtTdoJ07YBO9/vGmx7K6PyIlR/EOeJGbHyP4BJaGrH2PVqKBTSiiMQO+9SgZ4/EDj17DMkX/RSytx1SwC42iEzoq/V5GAKu9x36MSCNDuM+YMgyS/QDQNdjneo2iSqtxuAUeOa5RBQ5CF/rmFAX4RqJnmrbEW07it5DElVF0CgrJ/BgcBGdH+Eciz6k0Rt1RFpNkQm7scE+NhiTw03f4vrQiT60Dxi2DTg6xD0hbl12FuXoSEtd08h8Awjz2nUVUNTKnMeOEdfa1zaJU0LXMdnaIRR542SNeC/j0PiIoPxgwEsW+ypEYnLK/8AwehyGRt6LlAF7REIaFBhN0idUKYo5aq7ej7IjXubLnHlokjgSgthQq1UvjnaXsGl6dOpk7/yYwV5LxTI5YXvFmBwGVpuInTj51L4yHS0xZecfq21jfiL7HPod0+XEaS/XGVjSvwawiY1Qri0CPQPH4/GeOYkErMkzWCMBXYiiRzQRXRhx2e1x0R0ENGq2uNdsQWCRXUwY2GPlBzQ+IiaPpHZdqh99m218C31CQaN/9I/+Ed7whjfk11/3dV8HAPi9v/f34ju+4zvwkY98BD/3cz+XP/9v/pv/BuM44mu+5mvwNV/zNfl9+z4AfPCDH8RXfuVX4oUXXsD9+/fxuZ/7ufgH/+Af4P79+x93+7KHvyaaDvaRZ2X2JabZVnNk61aRCPZn9tDaPrQv5ZBv/Ty2RahIlAtL2F0ORQS4AWIjBm5tqAqgYwsYOdQ+N9IUbPPsmpEXMSEmFKNfPR4m4FKTJNTQAeQTqEkgJ4i+CyLEJBxWNjsTziK8dKEyZZQ4I8jRIToWpYWcoLOpalBiBZJEgOaxNuWfoYK2eGuYUMKbVZEi9db4IIg3eRG0bSOGp6eETpVf7xJWfkTrR3hibHyPtRfj88LvxSilhLXrsXE9PJIIM52QBELUTThBvF4MEgNVN+6RPa7GFbaxRWSHfQzYxyYLRBGKKshUEJrXdsJbBDiwtltArs6NaFyEIyCQCGwbK1Ok9mODbRSFYNs3eLRfox89Yu8xbFvEwQmv9JWwnETeVEasbu7iiZUNAwTxfnjb8JCBLBAmSmxW9qFegzoM1fi3Wj+1R6lagWXZ6prNyj6q53HZTDgRxt5j7IN600hyhZOmhKqHuPbSZmOjChPPIanWt2p8rL2WskK6TiTaRza5ucAnkvBdpwrRqh3Fk0gJF+0e5wqQrNyAtUbSBEoIJAqEt2sUBSkDSJD7RnY4pICBAwb2eOFwhgfDBkPyeLhb4+FuLUDJtsFw1YBHB78njDcSCeBGiUIyhSlHAHABkk4ZvdlT7kt0kXjcNBKgY4wbBb7bBD4fgdZCeQ9ouwGti7i72uGi2aNxCXebLS7DHoGirE+/Q6CEjkasXA8PxsAeBw6IkIijHTcYVHHepg59Cogg7GJbjNpUAN59FGMiMaGPHocYcuRgr8Bvii5HDpoyUodIZ+FVpeBODLUaVHWFr8xgha6jCV+7epCrnSo/k3I0mUU7sgHyXLUDs1vM+Nfa4TWsnMzbrZElTgHqOTPH5Ep0pUaWRDXAeHA5LRCD01RdAQVybQkzrnOUKY7S2ay5qIbyFIhSGyiTmlgdI64UNOgicDbCBcaqG3C52aFtRnR+xJ12j3UY0LiIi7DH2vVoXMIdv8W538ND1p6nBGbgKq3wOMqaejyu8MJwJkbZKADSIXkMg4Al/RAEfBg80ihMwBW4M5mPPAca2u/SJN3CvLW1jLXrmi8t/D+ZkVfdw1WyO/iERvf11o84a3s0apTdbXdipDpZexsvsqmlES2Nkz2LARxSg0Nq8lq7jh1G9tiOLR4OaxxiwH4MeLRfYz80iNGh3weMQwAiIR6c7ElMAvKPZR2dcnxlg8ZDgPdOAHirh+NcgiPAm+ebGI2LCD7pdUJwxTALCoSZnCVijMkL6KwGmgDPouPs+waDAg3xEBAHqZsziVysI6cVRLI9L6em5H0HIhe0rg85hm8k6i1HeKkhuWpGrILsD/YPAEaWaK+kYPh+EDmXDae8H0/lyaRGUL209L45yhXCl3V0B2G2F9v3VUf0xFiHAesgetZls8edRuT4mT/g0u/QUMTKDfm6QZQ1SFHFZgEnJ6ARxOisJRODsOcGu9QgwuEqrvBid45DCtjGFg+GDfZRnAk3Q4d9DEhJ5zaKPheTQ9S5SuyyIzTFKsXLwCUzPm2OCaJDqD7QtiO8Ri5u2j6n3V20e1w0B93vBShyxOoQ6eGJ0dGAjetF16OItRsQIDzQ0oig+mmve319bU7OrBemBrukOmlqcBNbjOxxM7Z44XCGXWxwGAOu9ivsxwBODuPgEEevUR+qL4LABiAZT8f6upLLBtwGBloGe5E9oY7mbAd07YBALDpQ02sajwEO4hBrSCMdKWLlRWfyEH3Iqw5kelJNiR1uUquRZx4P+zUeDhsMyeHxfoWH2w2G6BEPHuNNgzQ60YP2lNOMXY8SoWOOoWrPYQWKUgcBflcJvBG7JHQj2tUo+2uWLciOX3tvFUY0XgDBlR/QedHzBKQfq1J3BTw1sGUfG9wMbbYvdkPAqHLr0AeJAE2E2HvEQQXoQMDo1K5Fsc1cAUgoJHFsesnEMOfKKoy40+2wbkRnuwx7bEKvYOaAjgw0ighztBxiBxnIKXqbA8MJoJlCXuODOvwYJbsj87W+f4gevep1Mem99LMcsaSfWaZItlmTw1DrLQeP1FtkxekxMd629FGnqXmhGbHuBnTNgFUYcLfb4bLZC2gUtrj0OxCAES47OC1aMMvTXyB9QkGjL/zCL8ST6nAbEGT0Qz/0Qy95z3e/+92/wFYVohkoZDSzQcv37XOavmvgENVfqm6WwQxUoMYJKvaBtGASYj0CNLIi1cUIhqZPgSWsMbWlA1kBbriARjXaaaH/eSwKyj/ZQZ3yvXr6k0YRmTJt0Q6ui4ADvIbGkioGbdA8TCcIsrcoF1UKmC0cVhZkH30OUR9Gj95rxIv3SMTZg88Qbx05wJFsQsQQ79pMQQRpWK+m7rguwnfSxiZIqLN3jC6MOG8PAhj5EedBFOAmhzrLRrNxPVYapbBxvWzQSDhzB5w5NdppxIoGeEWGDSs3743xTUQRTns1ViMcrmOHLbfZC7RPDRIc+iQGblI3Y1JgwUKwM4spc8umEeGRMpggCruCCbpD19FQ29jiJq0wssPDYY3nDhfYxwbbocWD3Qa7sUGKhKEPVS0YNYJ1MwNJupOEnVpagnjDyCkopxFagZJudtIK8/rktaEAyqiKN2egUXqcFU0FQMwzURslpogCyECKo5SVfNlIgZFNwSdcDx2u+06U/SFg34snJkX1KiZV8HRNewt7VlCnCWJgANOoLkcW6YMcNm4bf6NtzxuWyRkdDmeGi6a6XOhmGyjiTtjh0gto2dGAlRvg1FgN0KgjfW1KtG6nk3EwxXFkj4E9XuzO8DBu0KeA5zYXeO5wgUMMeLjb4IXNGfrokXYB43WTa3GFLeXikf6gMixxBpUAFM+5GT6214YqTFuBIvYArxLSWQS3okifne/RtmL83F9f47KVDfYVzTXuhJ2Mid8JUARZq2ck67NBQqeGXWTCoErGCId9EgBpZIc9S4pChMMuteg5yPikkK93sRUDQ9eqeGOdrJmxFUMsORzGBmOqlRGqPGByHVnTAkETY6wYUcI/wcscOlf43a5tHdXGIOv/an6yNTUmLwC2gjgSFEYTI9EWkfEJKv41vml9zOt45cXIE9BaFPP51tcnL95bduijGGCHKIrgYQyaIilK2TgKc6Reok3AAAaNBLQweqv7Zq+nlmAGh4qQLHvpJLWuKSkg1EXQKsJ5xmbV4+JshyZE3Gn2ePX6Mc7DASs34G7YYu3FSD1zB6xoEKPWHbChPoOzXuXMNrW45g4jO1ylFR6MZ+g54CZ2eHHYZD56cNhgpwD+bmjQ696YVGk1mWDAT3ARjcqfOsKwpIGxBClWMtJTyvVeonpujRfH5JT/bJcAGleiGlo3Yu2lrys/4CLs0dGIzg2467fZcD93hyyPGjMEZtMzsEcPn/fCbZLxuUkdHo5n2HPAduzwsf4cN6PwyqPDGlv1kB8OTQbY0uA0+g8T8JM0QhMknt2mlf2oDRGbrs/RYuLBVYeRF0eLRWe0TlPQKebrQBFtFdXidW8Z2OGQhMcHlihGSwW6GlbYWvTCocN2aDQl1mEcfa7PkdSJBptz5eUMyHiJLCUnelbbqvHoEtZhmHjwOzWWz3yPs3DI7XSKso5ZvxAw/Grs0EeRczG5vCcVr30xwE6RtdETZ1AtuJTHzfYk28Mne6Ma/tbejT8gUMKl3+GO36JR59y52yMgoaOIjRsQIDKwU13ClruBRhr0qtd0pPICQM8eB90TrlOHR80aBw7YpRYPxw323KBPAY/HFfZJZPxubLJTYWQvOi1KRAezyLwhBiQWJ1is9wE1VCWtLOa9/qztBRDQyMXzIBFUd8IWd8JO93tZcx4CBq1IQKOGYtZDAyWJMhLtGYHYzAcMrAAaSPc7Bbkq0EjGpEGCOE22Kr+u4wrPry6wVWDlhcMZrmOHmHRMYgAzsoOAMXMOJ3WoWDRHKo5Q5yXsKPgk4JkXneqs6dGGUYD5doezIHLmTrPDZdhlfTeormPXIn9GdFSDRgkGI9reKTwjfCh7f6O6gMfD1QYPxw0GDnj+cIbnNhc4pIDrQ4eHZ2v0Y0AcHMaDRF7RSKCDKxE6CiSpCl8ihFcJaSVgb7se0G16eO3r5Wovzl9dR+bwC6rDBM1umNgoXuRtZzq/bXsqQEbVcRIT9qnFVVyJDE4BN2MrvJo8bkYBRmNy2PatAoJin40KCELnTmRscWS1IUo0nEtYNwPudHtxLvgeT7fXOA89WhpxJ2xx5g4Z6Mw2is7jfK8Y2Wf9K6psTQqmiO6q+hx7tRmgjnrja5cjdg4pZGfFqGBU/q7Ku8ikepJFMHl9z2E7yJik6LDbt9gfGq2HqXW/QAVssEkwwDwkeA1UaJsRm7bHuu2x8QPuNjvcbbdoKWbQSGRT0L0yoHPDFDSq7JEjofYy6F+4mkafEKLjsT0a6xogtA9p+t1bf8Nls3pSGyYvzBtmYX5BN7fsTQJqbZ4ZejIQToBGmgtsKQzOnsI5VHTelpz6gEqZNiTc6oasI2itwEsb0XZDDousc6g7P+Yc6pUfs+LgNSRU8ql99tofYhDPDRP2QyOKVCIMY0DvxIOPkaRd6q1IjorXgqsx8NJvciyKfyfew/VqwGZ1gPeMTehx3vQILuIs9LjbbNE68Vpd+D1aN2ZDoCMBitYq1Ao4NMKBsSIxRB0xGhBaorwpJxTjLf/jcp0ADAxEVdx3weOQPT8+e4EG9hg4qKJTwqoVRssbnpEjRoBsNk4VK4s08VR+U4NGB422GOHwKK7x3OoSe25xNa7w0cMlbkbxSFwPXVYIxNh1QAYEZRzOmj57CNd+yFFZrRux0g0uUESn12JcpQyA5fWl4ZgRskmZIT/pOyF7jsxQKkBaAZFIN1unymo9BoNGekV2eDiu8XDcYGSP67HF42Et6RJRjN2YaGKQBZewDuJZ9ySh850bs3fLNngDckgNaguhD5TQ0JjB1FTNrynpThU/S3U5dwdJs4Jcn2n0jCmIIkrK+NjrvHfZUqepYj0qL47s8Dg8wg1LFNzz3QWeW1/ikAKeXV/ig+t72MUG1/sOD882OIwB3DsMN0FqHAyS0ugGUZZ8D9BQgfaTMG2UYp8auZhWjHgm4fntasDZ+QG+jbhoDnjV2RXOmwM2vsendA9xN+zQ0oin/DUuvRgSZ27AhoZsRKwIEj0AQiACwSL+IhgstU35gAiRTQMIo87FgR0GBS0HeIx6fWBNq4HDngP25o3lBjdRFOueQ/ZU5hRVndtUKShDKvc1Ax4qL2sFsXEVz6gS6ZGy0mgydi4PjJeS8tPAAX0ShWvQqAhrTwZl6+1Bn2MGX6NGnQOj8wM65d+Nk3kRkDJm5b2mXo0wibIMeDyusVcDeztKVNeYHG7GFvtRgbghYD80MlaDx9AH2CECbB7QiFzIver0RFmzvS7/tZOEnHj+GnV8rLsB590BwSfcbbd49eYxNn7AnbDFM+1DXLg9Ghpx4Q7ZaFvTqMYZsHKMVbX27JEHHrDnLSKAbQq4Ci0GeGxTi4dxkw3UF4ZzbJPK27HDPknfRxaDM4O9Kle6KuKgIantUiIJeSJ/MpClRlPmOY2EHVmMCoDUqBe50dKIzo1ZFq3VUdLRiHO3l+dSxBn1apwx1pTQGhgAWYM1X5nMGXRP7Nlhr8brnhtcpQ4De9ykDs+P57hJMhYv9mdi7CSPR/0KN0OHmCSVtB+D8HoFvhqYRgR0YcSmkYiwdehxr91i7SVa7MxLWoBHUqNLU38qg6ahiMb2UhQDtY7etLWfIIbNLjWy5tjjUdwIiMQeD4cNrsYVxiRgc4ksLkBu0ugV1vkwcCU4qVsTXETjkgCZXoyJM3/Ie+7KmSMh4cz1OPMHmBQycHlgX2RZavAorvW1GkpQQx8l5cPkhbloDEwynjH51ahOEDQqqDboM9iGAkIHNeqdGsEWuX3uDrhwBwQktJSwUl0ygNAR4EAq432uVlSnqnKlWyRw1s1qZhwRMfKIBGDvBtzwFiMT9hxwFVY46Bxep5XILMjc9qnJEQ4j+xxNbs6uQwoSiQoB4UzW15EPph9ZStmdZidOS4oauXhAoIhLt8e522cnSKu8GIjR6vr2AFoFhxwRAkQnFV3JgXSGZL9j3QMjIpcxyusThIGheihhzx4RhG1q8aA5U6C3xQurc9xEXa+xVV4Wh6elFElEi+6lGp3LbE4MATI8SfSqg0QxnreHnLp0EfbY+AGNRmFceJE7l04cZ07BBg8+WpOBEjoU4MWf0I3yfgdRUXrd+yMcHqcOj5MALC+253h2dYmeAx70Gzy3vhCAaWhwdehwGMWx2h8aAbGtRmk0Q0wBbMdoVgOaldhOd1Z73Ftv0biIy2aPp9ob4QloeQFdJ0H/egUHG009XLte7RKRyxbZWfO4OcYksi5gqzK254Cb1KFX3eA6Co8PyePxYBkQhN3QZlstsoDbUxnLWPsRZ62AnGehx1PtDdZuwMYf8IpwnfeLSyfRuQ4Cxjemw5gOXxnRwq8OA1vmBlU6k8MAr/uYgkYqBUzHsusMGnGTHfEmm2v5ZvvioJFN5jQ0sOrx0GEXW/TR40G3waP9GjER+kNAr/qJgGpFLpou4jS7xXvJGjhvDjhrBCC/22xxL2zR0IhLv8OF24MBbW9THDCUJs47GyPT6ycM/RK0gEZPIK69jkq183F2eeIG5TeT782Y+ySdQgJNoQWQj6j2kLo+jsVDZqAlcU7Bgv4se0cZpRgvQUOwoeBYpTCDQA7CyFYXQ74kaW/60irys2PwukLCNz3ateRNb9p+EqEjSop4SQwJDaq0WIioKa2CcjfoWRbhNrbYqqd+O7S4HsTo2g8BWy8e1xQdxj6JkKpSGWxMyQYlsBSEc4z1usdqLbnd91ZbPLUSgXzhD7jTbMUb6g+4F27Q0aAK8EFCeClhTWNWEFfEaFRhawHI4TFihAYENdicbsoGDNli5qNpN1ZInFSJAXoeMWAQgASiUIuwpHxoGVDMsFrxq0nYiLOiEADYSbtqDlQsa4rjAQcAiYHr0OA14REOqiB9rLvQDUVS6PapAavQFgPX0qFE2Tv3B5z5QwbbNu6QQ4Vrb4+NbZ02VS8R2xDqyCwDVWpwyO5nSkAZqeI5qj1KVH1m4er2nKu0wlVcYYR40x7HdVaqDQywtpoRcZ6NjYgz36OjAaTCPXt1s2IsKYJNHQWkbS8blpABDOZdMmVhRRFtvgY6VRA9CJ5I+1b/N1WgK3GAAj+KAmm8uOc99rzDyIRX+y0eNA/Rs8ezzSVetXosxu3hDB/e3ME2ttj3DR7vVjgMATw4xG2DsZf8eL+vandYUVACkh1PqqmvaSV53mE9YHPew4WEy26PV51diccq7PCpqwe4E3ZYux6v9I9xx+/QEOOCBmycqAorMvCW4OBUTSaQvpd5P4uPojybMs1qWAw8IrJGpEEAJlZgaVCPlChdLkdr7S3tTb20AhYVT5fNrXmZDbQU71YBRs1zKrxUwEUDSXNEmXqSc6TRbBeq5QazeRtngLT2JR39GhPA067N0LO0EKdgeqdgurVzrruMcDgoQGuAwIEDRgVPDkkiL6/iKq+367HD9dhV+4MAdKVIq9Zk0Iico1rYxFl5LqkzkOKbjkGUsG5GAbpdwt12h6c7UdifDtd4pn2Itetx4Q54hb+R1A8Aa8doINGVDQyQBAI8PLncd9L1FZkxIoHB6N2IHQ+IzDjA4zo9xsACQD5sN9glARuvU4eDGqWRRTEGoBFlMlsrN2BFFWikhkPZH4qH2sBymyMAOWKEQTkM3jiGdMwCCtDdkBi2HrInrigigBEI6IjQACAiNPDwKmRsDc4pIunaA0YkDArk9gqwjSAc2ONReIgdBxy4wYPuDNepw5ACHowbXMWV1AQcW2xjmw1x65evQNaN73FHa+Bs3AFPhRup/6KpTWKEF5DelPRGZbRFtJixWZ2unHks6f6d1PDYs8cIMUBvUpejix/FNa7SKutAlgbUc5U6wbpW2VLgZP4sErrRyKcLLwaYRboJP4jDqMkOruLssnkFND1NZZOBdQZ6jVz4oU65j3DKi8cQtXFTTpGGgEa29xcAswDd1qYsWyB81WX9S9abAxBAaMiDQAqMlLU2M58ma7D+jmlk+VukYJLmiw1gXHJE1D1gywcBOOGxS2JkRnUYWFrXkCMcZHxMXxFDvERIjHDZQWR7go1PQxGNG3HpdppilnBO5iRinLmINWmdHpgeKuvLq0Mk73u13n+08iaTVWmqFXEB2BiMkRkDBiQGDn7Afb9DrxGCj5oVtqnFCIetyqwILbZtjlC7ZkmF3ytQOSaXo7VyVA0kcvVO2OVI+TteUl8bRFz6Pc5IotDOaMTGjSrXkAEHNYmEr0DwVPRfl6e91pLqrkt/IwRE3KYBO75BBOFRuMKrm0cY2ONht8Gzq0tN3ZM6WLsokVZX/Uoi9FmKao9JjCwBzyVK+KLb47w9oHEJr2ivcb+7QucEMHg6yB5U7/e1g9VkkwFlHY2S4gWgpYQGte0nlNhKUhhI71VXcSqXHAYFkA4s5QoexTVuouy/11HAEgGUi4OhdnBtfC+AnhN76qlwjbXuUXf9DmvqEZSXu4qXi+5a4x5Fo0+Q9WgVS+oDCQUw0kxf1bfkd+X3darawC5HuI7QSKOskwkZwJZ0LfeVXvdoXCufB3ykvYPnu3MB2PYdrvYrrRPlcrS0dEXuHEJE24xSA7Dpcd4ccNns895014tteuH2OHMHkWbaISboXmZOPBzNsXbyZdMCGr0MOmFnnxzj/N6JSaHJZ4xJTtqpe77EJFIC3MBSv6FnuB5wI0txx0oLJp80DNCBGyfgEFCFPeqpZg1L/vvs+UmbCpJFJm+UPrLXekWNPMtvRkHCfcLleo87awmZvGz2uNvspMCcG3OBWfEojdnTbEi4eZccJBVml1ocNA3kJna40bD066bD42GNQQ2Ex0E8ikN0OLSNAkhSuDnFCkVTIdxoalTwCU+tt7i7kiiiV3ZXeHX7OCtYd/wWLY3Y0IA7bqdIt3hHG0pwIDQkCooY+k4UYKgCTCaIykYbOSFiGlY5YRO9IirbuCeHoEZFR5bIINNiRmwNOtXb+untTj6xNpbvletTlMCILP63uxzxlLvGyMCer/FMeIw9O4zwuNF0nQwawcncVl6eteux1gitliJWpjiqwSHKYQmgE+NmCqaZ9zLVr6Fpk6oT2vcVa636qH8Jk367E98B7LRjBUvSDrsgQNKOA260xoFESMh1ifCC1gzo0Wpklyh+GlGEEipf4b8aiVFAPOu79bn29Flby/fFOPU6qx4EV+0cNPutvcr3VEWQ9Uu18iz3cxq15HGuivS56/GUKs2v8Du8srlCzx4vdOf40OoetqnF1bDCc/sL3MQWhyHg0W6NXd/KWt0HjL1WSbQCggQgpJxuG7oRbTfCecblao+nNzdow4inmxt86voBLvwe536PTwmPcOH3WCHinh9wRhGegBZODYkyZgYA9crXqMbWxtFGTtaL8EijK52h8rQyRVjHLIGzAjNixMj2HqHnosgMsJDooqzJvYoycwoYlT6UyAIBWWvgRkFhTL2mJU7tmIqcshRZTUeCxeIpaHkEupg3thh2ppz7qi0NCaBup+86THkSOh+D8uDIhB079BDj6cAegypqWzWuLVVJ9gcBkx6PK4lUigFbBZaG5HDQtB9U/bC06Dqd1SI2Whc1SotxEaQ2XaCEu2GLp8OVevl3uO9v1OBmXDhGBwFFggKSc9kcwRh5zH02metAaOAAktOVNxbpgYSD6xGZ0YOwTVv0Kl93Gu1XRwTNI4hajfIRGZuyjC3PN9C88IfJHaDInKyAV/OWgXkUA9UDaHStyDoyYFYiGyjzEmfGi7pqpvxAWmdP1lsLl/VrA9gEuI24624wsKR+XYcr7DUS9ypVxmpsseM2n45loJGlkTliBf92ui+NuHT7HBnVUUKAAhOV0emJ7IRklREuyw3KY1X2W2aVDUABnsGILGB8r2vwJtfQmXq+ey5psJb2xHldydw2VOr3BERsnEZ4QcAhizoJxND67miqObS2E4CIiFH1mAF7bNMNRpT6iyW9o8gJSWtyE5k6J29RRKr7tVUUyBEv2jWVvbLJ4y481tCxRBHjPhWZzMdRRAQBVAwwqO9A1TuO5DnC76KPAQIenCuAFDHi4AZ16InzIOq1RYxnOQpzKrhSLwhUUmegtS71eQbEBoo4U4DPkziFJHKI0JJDq05Kc4KUsShjwswKEHAVUVR0SXN42ro1sAlUxoSIKr2K8l5ogMoFFVDthvfodV+RiCyNKGKf04ZKxLxG6mpKpEW6CGhUovdWbsCl26FzAxqNNutoRCDGhiJWqqd36iSivC5rfVfI6bo9Rbftl4nKmG0oYVQH7123xdNuhxGE69TiU5qH6DlI9NUo0Ve72ODheCZ1QtlhO0pqIwCV27KP3m22uNvs0LgRrwjXeGXzGC2NOHc9Lt0OjUUVVfuvm6wdzrJcnNmk8sspl8x5o/DAyIye+wzASLoiNEXfY1CA87rai7epwza1OaLHZFPtSFpTn6PA1jRkGdsQ45wiWmJ4InRwCBQmc0YVD9rc2JWBl2ZzlUwOdXZm3SxNZMBcPknfBdhn1uh6HOs9FtFksm9UoHyAx5WCpPvU4I7f4bn2Eofk8bHmAh9rzjEkh/3QYKflLWpnVhMiVs2IJkRJQ2x2uNvssXEH3PFbXPotAhLO3R5rNwjA7CS1M5KrIjNRaR///LSARk8gYlYAZiomngQOye9uu+H0oo5iovrjeZEIu7Vpavpl9hJWTUnSNUCSnsaak58LXXoGOIGbhNS68mxTWpqUj1at+2EpaMwKRNUFFq1Wkud8MkHwCeebPc7WcqTpK7pr3F/doHGiTN9rbrJnc+WGjIjX0SSdRu1k7yYUNFIlKbLDdVb8BMF9GCR3+Lppcd6sMSSJ9NgObQ5ltSOyAWSDwCryrxupQfDq1eOM3L8qPMKrm0foaMTGDTh3PQISVsTYkHhJxYPl4HWrFKVazMfayB6RRDlh8UgNbBEKokCcYie7lQl6M1SL96MAU9mIhYUVn4aG5Jan35+EY1eKVBa8me3MoCnPXJFDR6bwMO64nURZMOEASd0Roa2hn8SqbLNuXpzTEhoSQ0lCowkeTQbNirp9O9X9qF9Px/c4muY2OrXEa1hgJEa0aADuceBDNnD7yqAvUU6i1OmBPGioRPuYx69+bu4zlfE/9nJN/z9vcwJn8GJAwphKhEziYvwZ4CZRLvbbGXhhMo6ABlbzgHK0DgFYE2GtIPOGDrjrDhgYeOyv8apwJeH7cYXn1pcCAMcWz+0vcDWu0EePR/s1tkOrhaLFA5MjPbzU5DhrD7joDmhcxNPtDT5l9Qhr3+Mpf4PXtA8kJY9G3PM9NjQigLB2Dh1CViAs1aznhIHFRD2ww968uiiGT6CUo7XECDYQhGQOURuJRak2oziYuk1T/hFFPeU2RU4TGHnOx8fzw/n7NdDocpuoep/UkJxGtZxaBfUOZIYEmLP3zr5zaqubG5lm6CGDl67wO5WVeKolxeyAys+k0ZYJAwZEBdwOfJPX246DeETZ5bpAVktqmzoBdVPI0TkF/KIcxl+M0pKi1dKYPbln7iApIIiqsO+lfgoxzi2iCHV6I+NQRYmKV9pSjkVG6hDBpEZHwJokI64hhxZe59WhI87gykARUWdm4AGjjpcdpDCZkwoMKCDOExwJKDLHUdFDMv/iGEjO1zQ32UuEoq29McUMmAwZLLJ0zOO219ESoRpfp3sxgTTFVFb4yMBdHDCwALT7dIMD5DQgMUSLeWW8bDXdDGDbqOHZAlg7GT9XgYDW13p/eql9VocRyPeqDLbK+DTDMzJw4AN6v88psRa5KFFJlPtRagdxBvosTUuiT2UPkvhfkds2jrWpXK/PeS+SMkIE44JS3gOF76aOG5NZ+XUFHNV3lnQxAygJTS27Kp3GVaM7l1+1vBwl10NBWQUvAPQKkAhg4SagpxlWjQKC1q4yJuV7gUjBUNJUZg8Hh4YYDbk8h6ZPyZ6bdOzSZEyszWKUqoGax4qOxtOiLWQ/EiCkUR70lYSvjf/ECYO+igpU2r6fdVIm9CjrTypdyvNaTXGTvss+aGPidK3Xa1LaIqPlQFiT/CIRY5PlODByzDIr6ryYfmy646ApRRKV53MKeJ1G22gUUWNpvyT7NAFoyWmEvzlIp5Fkx8TZocvV/NmcnvqdU36F9ruhAIbotucke9be7fE07zWN0eNaU473qcGjZKnXGkXLjfKlAacJd/wuF3K/43Z4yu3QUEKndsk0QwD6+7JKiglJGSgiXeclUbMehfL/REV21+BiAuteLDJ9r4CgRCJq/TMAkS2tHrkUhgPQ0Yi1pfkSY61ZGh4CeprD85TzncHQrVB6WO05OVqVaNYTnUOq5vOEQCp3KusF1neu71buWdY0Z91kBGGX9thrGvKFO+AVzTX2HHAn7HDW9DikgEeHFV70Gy0uLqm+zJIifdb2aH3EeXPAnWaPO2GLjetx6Xe4dLucpbGiEZGknMuBgjhobgUk/vloAY2eSKc2zIrquai+WhtuR9N1Yv5OPiG/KSkxtzZnfj9CLjJmJ2N4n8DkMLQOKSZBjEh/TCingx1bm+Bw7B0iJyld5CWfeLXq0WlhxVeur/HUaovWjXhle4X7zWOpfO9KGF1AzEd6ehX2XgVIoyg5UDZrMQr2mi9MuEkNtiyI/JVf42GQlKDruMLDRgCkgxZqG7TY4E6PyM6GAIn37bKRAnmdG/BM+wivVKDoFX6L+/5GBbJsQB4S6dPkrbDMS2KJUDAEu4d5sQgHtmgCAb923GQk2vJsUWY5Ky658COK4dKg1AdZqTdOolSQvXEWWnsrb81YRxSKAhwIqm6RBVPl3drZqGCXzZqwgoMnAUFaVQ7MEE7KQDX45LJiUYzs2rA1AwNiqyIh5TSgut0z1q8MnakBMzWCU6WsqdGtm8BUiTseK6CKiiAFDvWpLQHnlsKBAkzUqq0pmKa01CrvHEyYRI5xuW89jsBpkWCeQkljZAyQOd5rhIYVdDavnvGjGR0WyWKvJ5ElIFgefKOpJheux1mu10JSFwjI6XAM4NLtcddLBNKWG7wmPsReQ5uf6y5xlVbYxwbP9+d4PK7yqWN9lG1KTvsY1eO2w71Wov+eCjf4lOYh1m7AhevxCrfD2ilQRJKaYON4QERkYMeMgyqo29RgyyHXXtimVtMpJFqOATSIFdAtRcRDFeYdcth0SUcReWEGbh15MU0L9LoGPBw6i4M/Ma9F8asVoLIeaPb/qUFVdqQpf98WlXh8rycKkydQfd+E4vkbwYjJVLES0VavCwM1TEZ05BWMqIxvllSlqPfu0VcgIGGv4eeS/lfS7A5aDFPaZU8tRkhZAZDUUkSNKpVUmBWNcLo3rMlSj0z2OozMuE4JI0qU1AA3iZJKmmJ20CNLLS3VgbGhXpwVBkap99WU6WysOotrKXIDOC0fAUwMhCmnTXlhYiAxNBUH1eyQ8rc7+n1ixsgxy6LaKOz17wiZG9vXbT4soiKn1qHUGmmqKKlW645IvRbdo8kMNqeRFkALnw2EkRJGjHLNY25XPV4lElV4ryVLW4WmEcpYGxAv/UrFsOQC5JZ4KQN7Tz2PJiCezxEdCjYTAcRYsc87YB0NkjDm1Asbe5ulUocFVYRXHXE6lRN5D2IDW6wy4lQW2P5tkWOZqo1pun9O4bJTe2uJNiTV/aqxPvrVlCRCq0R3DAwcdI56OC1YLcWrJQpCT6PV1D/Rt0TrEN4aNcUHms5raeMlmrOOihaAN6nMJwVuTB7IPAr5k+NjY2t9sTk8ltFmEUz1HKc8k3TOekQkBgYw+goQMjBqZIceLqcf91p7aKzkotXYiiCNSJO0LwPQG9VGZK5sfTJaTaUPMB1RADaJFJH2GgAOoDrcp3aC8GS9RIyIPOb1ZP2Yp5S1Gv1V9tWyZ9oYDoiIqeh6dS3Rmp9qh5pFmcjruiSArQ7WPd5KUhQnmieR14DUjjpT4GXAgD2L42MAYZsCepV7xpeFJAL2jHpsLPWQGGfOojoJoRpTmvxS22rRmFmXTBiMj1hcG/aLWtLXNb/MgevgckRi0VEBc0yK7SPpwwMOMBtjHpVKxhtkUZoupylP9GHwZN4E0DyWB54L0OyqdWjtR75GdV10m4kdUv3G5x8o35wAYuq1yowcaZcA9BR1LEasKOGev8GBA87pgItwwD4FfCxcoPVRapqNPtdoXIcBl60UB7/b7HAv3OBekAMk7rgdzt0BHinXeYrsRA9FCbwwHeYXgxbQ6OOll6k4n5yeW+aMZ/cl03Ce9FwGKDKcHdc4SnoaVdoDkZ68FESdiY1HjMUAhQpr1yRQkzQNrYAXclJBlBaRAlF6z7aVk0MaH3FvtcV5K8DLp3SPcL+VaJ1XhCvc91daJHrEuZ7WYMK1hHMX4VR754saD1EIFPvfU8SeD4ggXKc97vobOdYzdXgc1higNWW0aNuYrLikz54Jq5lzN0jRwJZGvMo/xivCNRokXDrGHWfhmyWipxg3rOkBjJRYFWBSb6nTegri7b7hUn9AUida2ClnlrqVa+5Uiost9Do3uVNlJp9+QHbazJhPZvFaU4Ly/U7wG9u11UkR8CDCaW6yqElWoNH4yQwp8Q6IEb2hEZfuoAAb4Sx74CR1x0lHJh7C2jgZOGFAArN4AoccCYOM1pvXKSrj2mY9XSKnUrxMlFe5x1W6jdxLFQsu3+PZb0r6itaOUWVpTQlr9Ww1ROhUMa8939ZP21wl6syU25K2FFHlXXNRSCIs4sXqRJQ6Mi73ooyq1XaycNldavJx8depk2gLLehXz3N9hK7VWLB7cTUGotQnrN2Qi8A/5W9wx28RKOGO2+OOO6ixC5xp1MtGvUiyng942vUYABzY41PCI+xYTh95YTyXuiMQsHefAoiQi7R6yDHl98JW6hW4A57ykkLSEeG8AorEeBXQ7CYBB5Y6E49ShxuVCY/TGldxnVObrjUff9T6IAxCS2MuFtso7zdaMH5Nfa4VYIVvhRdL6Hw7KYhb0i6D1hIwL7t4j4t/YGIcTCLSTgNLFiOYFTl9vyiKJZrDeI5n/+y+lZmTwS9RxouiVa9nrgyd2sjLEWwsyret6QFO0xKQgUojuxKATtabFGwtUTINXAbfAhxatZDX1ncWz/6o6YYjYk51E3mCyqAvz64LnU7SRAk57aipPPu6IgBI1Np1Ei/1jgMexEZD9wOuUpdTrHcsoeqJna7PBgzkUHIPOQHKilyeuwPuuZ0eRS/pFg1ZYV+nbbHUr2m0YuEN6+s0RcfAjmlEhM2TKemUo9uomveaT2oeMseDzHmpgTOwx1bHoOeAa62HM2oBaNsPy75THcZAYrjWRbU3rtcCryPOdB22xDijqOlNFoUhK6ajgLUZq8ojeaxmhkPpk3wpMmOXxhyhMSg/J+1rSTtysk9xOW3KeNxq7NX7i0UBOQWcW5UbHhJtJlGNpNESrhQrrvrxUlRLixz9kqNvCgAl18ojoFx0nKp/QdvkUQC6nP48091ORVxN4aPp+zXgWUd6xCzZUDmhMImW2SfCXvWvLTfYVql8liq+T01OXR00omNU47xEdJTTpKy+UqMHZ1jxZAJjo/UXg3r5L1yPRufOitxbNLFpAr66togX4zkjr+DK8bhNx88AeAHHY4702DFkHBjYcYtdBZRbGuPAvpy2q9dyLHnIunJ9spTpm52TWqStFU5GXY9OTkq04sqNrkuvtbLOaZCUTpI6gnXEntdxaOAmY0EVwFbzhtWSyiNX7U+ZfzjllNWeRc+U+osOBy46ZIZKuPxW+q4ONR0vW7tWQNl0x8RiH7XQPZ6EN9Y5/ZOxdkn39wJodygpxxGMwY05+nvAvqpLqmMBkQddlVJmOqatESsZMdn7dbFEG4fc34qvZiIkr3mqSkIAOSrV0nBNF/EqlwAFSMnmqQLTqzbVeo3L+zjlPcnae1DbQIA05AimUednTkEjBAmWDTLC0vHqvbzoVNqG7KAobTMgFii8Nbc4nkQZAAfDkUdHsj4Dely6AwY4rCni3O9x4CAHZriIXWqxHZt86MHaD7hsBDS643d4KtxoXV05XOOM+pySaKUHLOne/i/62QRkuBWPeClaQKOPk+rMsZfCj/JXaw2LZ+9VX7aP69Sx2a6qH8qX2ZEUoE6kx9wTuJF0NHIM51IGdiIIQxOBOBXKIIZv9Eg/rd3g9OSQmrzTI8+d1Ha4aPdYhQErN+IV3TXuNDusaMCrm0e4H67Q0oi7XlJT5KhTSWEx4eCrJegmynfZRGvQqKOUN8m1T7oBMM7dHneSAEhbDrjxrXpO9NQdOD15R0EjEoDFCitfavGwhhLuuh533KDKmsdKhXuEghpg9CxRCpGlMNyWQw6dva6KVl7FVT715yp25WjgUdJx5Khij15PhxAhZaokJjU16mNnWz9mA/XM93LSCcmJOLWSU58OdsyTItHNUDLAwKIr9knyqcW4L8dRGghDJPnjZ76HhxQZfEW4wkpPoXrK70VZAPQktrLpmBC2mg0MYJejAUg9gVMlR0Aen9MTWdvMKAIdKiwbRHgqp++VYtnlNKiIAgbYpj+t0WLh0a76jHQjZS16Kt7HC7/HpdsLeOZGnGltpgbiVbJ68aaYjJDoBzu+9qDFBK2/5mm3ArOsm6MBOvVpFgVc5Amgl7R/VpT0JnU5FedqXGVQZNATCa0Aa0xVpBGXkyEMsALKxuNICk+2XhSlp9sb3G2kcPwrwhVeoTVe7rk9ns6ADrAh8aoHAtY6cyNH3HPXGHRNPfaPNfLHab0D2abaqnDymRtwQWI8rohwRqLQGD8wgAMzrhUo2nPAi2kthYI54IXxHI/iGkMKeDiu8XiUU++2scWNFk6ux6HxUaOcpKjsSo8QD05OIbFaNwailVMrohYgH9C5AvDaCWK52LsaI8K/df0BzuNt8sBSTGT+p1uFVYGxGknG0+VEI+GtVBu4VWHjsqbKKytaT7CaRAVimRrXNDl5xN6zNWbr2xTwPTdquFjBV1+UVVhEowB0QRXyTVUPbKXAgEX+tFQikxoUEKWjoubU4O006mMa5TTvnzhOaqPEDFdRbHdaw+Q6BTxIctrV47TCs8OdXCT00bjGTmtW7KIdva3RdEkMV09yqowH445G0zVOlMVXBHHGrGnApd/peki4oB5drtVVUjLMEWN8YX0UsMPS5CifMCNyw06bMaDHqYe4dh7kXSTzaD3Xtm9Y3REp9q4n23GD67hCr8WB7aCEqGPSxwJcp7wvWvQeY+VHrL04n1ZuwHk4wCtf3NExWbkBd91WgF0knLtBgH2CnF7KyDWmPNUaB5R3ORezH5BwYPGc9+xww5IKNqpjKBfq5aDpM6VwfA1+2X5SIosNhCORAXoMfK55V9UeWqnxuXEJK+ozEGH1E+v0IDPizACzORegsKQk9WypQCJzrRT6wOX0Rzv9K4GKx1qN47W2t1EA007fClVarNUFQm7naQCpNm5t3K29JfqZMsA9Zn7VlBfl1+vUllqXSQ6lGOGwT23WuQ6pwU0UvWzQU+hGNvNODS5KWLkxF0NvKOZiy55Kkf8zX1JUz/wB9/yNROBobZZGDxxoJ5H0KUej1DUaS+RxnfoPBUOmMgewFDYrlq+13tTR8SitcKO671Vc4SqtkBSktTUphaUNKPLY6cEyg9Z+G1M5wTNq7SA76dXpPte6eHTSa+fGopPSqMXWhY/v+RspUo2IC+Nr/V6rGs2k+DSZnVAi8Gw0Ak0PkDEgadREyciMvZaDGJlwU9kDN6nDNtcyq07146IfWwFjc6D1KWR90GShPNPl9bFSJ5qd3nfmpObduTvgMoOLCeeqEwdMI0Y7VxzTdbSiwWVFppcacAObXcI41CmHtc4GA9EkdXtAVVgd5owsNd1qqg+okBT9qCeylQwRT4QWRc9vqrQ3i9YEaqdx/f/ihDdnxkH30gMTHicrHu9wzaLHipyygvKF5NmlkH99kI456yZOeJXEVnMx61Mz/Uu+c7oOKmbPt4taj7A1bbPbEdASIYLhwhYbd8DADiuS0xB3SU6hfjBu0Kcgp3SHfVXw/Bp33RYtSWr82hUXOAA4Lrkr038znKjGGV4KzKhoAY2eSJVHpNKOT+E5JzCg0x/cMjkWcU/645MgIAG5YBIDlBgUCRQlwshFfQ+Q1DTHUkQrDBidwzhKqlYdoUFgNK1UZ3dOTiJofYRz1VG7aihtghw1u3IDnmq3euTsgPvhCneDpIq8wm/xlNshUMKGCBsCHHkEFFS8DENB0abKey0kKzRa58FyX5klRetM84V7Tti7Hlacrdew96jelMgO5VSYpAXyRqyyQin1eUjb0SOC1Wuz1c15mwIepVUOc34U11KUMgU8jmvs9Ojjx+MqF7W7GVpsRzmCcj822A8BzHI0rhU9IwWG8thUgI8jYQxHLHVd1LBY+RFtUAHpohyxTXralksTQWJjzKDMX8YHSQvDmoJ7iAGD5tNGbafxqKUJdH7EKohAvtPscL+7kvQgv8crm8cKxEVs9EhlE84SYE85JUFqVHW4TnIC2c7Sg9iOIFfgL7lsXGRvz6QOiPKyi1mBsYgyQvHyAgLC9KmKqtF7lkgjUegNOLCZMOCsVYXSU8K9sBWwhCIu/D6nYLY05uLe1l7r95blZLmRPa5jh70qMH02QrQIZCqnrJTTcSgbJBlQ1FYbsMCgyXzejHKkrRyBKke9JhBidAoUQaMKqVxz4RPwjJNY5EvwSWsMJVyu9jhrewGNumvc767R0Yj7zWM80zyUjc/1eNrtsaKIpgKQWk0BAUQZvssH9CxHh0q9F3ms1VGwIo4ronwSjv07MHDDCSMD19zgY3GDG+Wpjw539HjoBs8fzvFwWGNMHld9J0BRcuhHj34IeTzMsJOIzVjkqo9wToxZOcq6yMngBLQ0HjFQt1a4O6dH3bpRT2OyQwGGzL91Ws48Rcf8nlYbALBokGKglpN5aGLEGkAsRXPL6Wt2OpvtC6Y81YpYfcJRDVaaEppBqspLa6d4JWhUTVJeTHp6la5vM1ZNYYMaIZvQq2Et4IkcxR3zqU+y1/T5xKByitLUM2tpIlb3LcwKodt+xPn/031JjO6oKS+iqEcANyngcWoxwOFBPMNHhzvYpRYPxzU+vL+Lqyi1uq6HDocY5KjdGDBGTQ/RdSiyjPPee9b2uFjt0biUj1TuvETX3A03CiD1uBe22FCvUW969DusHpNQrObnoCf2mfK9Z5MtvpwQoyf5GfBszgOTt7Z7WAQsTCaD9IjikOe5TyGDZYcUcDO0GbDeDXJMc0pyrPaYXJY3rA4yGw8iAW/bIOuo8+UEu40fcNnoKWe+11POpBDuPX+DcydHkF+4HhuNzrXDLGr5YTLIotAOeiKd7U0P0kZPcvW4SavcRzvdVeS4RDcX0KhEKGRnB5f913Sq4AQYPfN9Bp8v/Q4bdWpduh3O9GTRjmJOA8oe5rzHFs3KovykvqDXFCWHXQp67LREvR30ZKq+AvsOCuoyStokQcE6VxWu9TsFtkqKBMGiyQ0g4SrFY7KVTNJ+SnSa10hti3624+mFZ0e4yZ4Z4fA4rgUoYoercYVHg1wfovBfTFLrYz9KsdmoPBcNNFIdzBGj9RHBV3JcHammUzhirP2As6AnloU97gUpy9DRKPqPHn8uhZljjo5rzKlVjWkp7s3ZKD8ervLOCOUzjRh+nFbYshw68uJ4hkdR9rZH4xqPRwWNkhe9h+U4+4MehT4mj0NUgCQ5DKNcMxNSkr/OMZowovFy4lXjI4Ieme6p2pvMftB9bq3OlbNwwNONnorlBtzzNzlKa0N69DvV0bmYXDcktcSKAV8SFwuYKA5IObVOQESL8nyU1rjWU24fj2sF0sppdEXOy9o0mWf6Ya/gsO2Z9YmLiak4bvWkqvNwwIUX0OhO2OIpfyOng1GPu36rTjQBkARIJKzhNNIKcFVh6rIXCUhU2yU7BZd2HHCdGl0/VjC90ueAfOKoRZtZFFmWTRniNW4T2R6Q8sm/KzpO0Q9QkFWdOWuNtDNw28rlWuROgf6Ml0sU2IEJ18lhALBNLV6MG9xwhz55PIybHCXfc6P68XQ/srqDDgXAlHVW+lHbgDS5rk9m5GyviM4leo9ZA7ZfnLLT7Tu+WtuTenKQtGkP4JzEGTkygXAFT4w9ezwOa1yO5zhwEHnixZ46d3s8Ha5x6fYISFi7iA5WE1OAdbJr3XtkLRf9bq7L538vkxbQ6CVJ4ItT6YBHQFH9Bs/+nvgRzWcvo1HTO59kTtJIIweJNvKqIHigFMGW6KAujAjJYWg0koFLQ4gYXTNg3Ymxsg6ipHuX0KpH3ZMYN5eKdq5cj1eEa5x7qXJ/z21x4fYIBFxSwrkzr0oBigrQVvL+ay+YKep2mkVGakn+1l5Bew2SUN6WNBWBSk57gqSzMVuhP1EGLR3OUuNaDV3Ps6Df2zNjx5IW9Ti1eBBX6OFxFdd4frzIJx48GM4krD4FPB467McGY/K4GVrsB4mM6fuAYfRihI4OaVDFmDEpLj5hqJo59DNLEYTVq/IJPojB6H2SCDEAzonAO2ZQiwawl6q8MiEmyopCjC6fNMepRA9YNA8I8D4ihARyYtzcXe/Q+hEXzQGv7B7jLPRoSU6eW7khC2dPCcwWcSC584/HtSg57LAdW9yMdsKRx34MClw59KrgSX2N2sBF5mWvhnyOmtNxtN8ApEXmDChS8MmANANIKmFrT5BpUrDESUHmi3aPy26P4BIuwh73NNpm5QbxQlLMAjyp8mHe9YE9rscO+6jHrMaAIWlamRZwZ9ajSpMUgUzssiJnSm4GGQ00su8p8DGMHuOo3qXBIQ1etXRSoUHTjYOr94ATwkee1wcGa1Tj9WoN3wmQ8uz6ApdrCbV95eoKH13dwcoPeMpL7aGN67FxI+65PdbqdTsjSS8KAC7JTXLlp4Wep3MOAAdO2LLUtLlKDZ6PG+xYQNwPDvfwKG6wHVt8dH+JR8MaffR4vF/jphegaDh4jH0AJwIGAnR91mMyesY+iIwmx1IHzimf6TogYl2HalA7uxaeMSW7cVFApxy1JOBrcCJn7Sha8+yLwpWKp58UIM6Kjaz1VEc1WHSaGvKm6FqEmYE6Y7Jj2aeGeolcQwVIyxq2EPy8AqkCjXRd1or1oM+Qo5M9ejVW9jHgoEcMm8GSRaGu28ZFAaddQudHnDcHdAraXoQ91n7IxvW5Kuln7iDGNSRt5EwVx0aNNtsDGpTUqkpE5j2pfi3gLWl6m9Sm26YGIxwexg2eHy8kgm04x4f2d7GNLa77Ds9vz7EdGsToMPQhH6nLoxN+Y8jpgFHljmMpe0KMbTfiwWoD5xnrtsdFdynH7voel+0enZeolKeaG5z5Ho0ql3J8ejmhUvihnEq0SyVd+qDAfFReEI+6XO9TyMCzyd6ZCMhyFkAGtc0o7dUQHZJHP4rRFaNDPwhwxtEhDrrX6DhwpNkTWDdrdaCEBAoMECOEAuS2YcSmHRBcwioMuNvuNBJ6wNPNDS6CgBx3/Q0uvESG2rHO2QjQfgwaDRchp/88jBscOOAmdni+PxfDK3ncTGR3BfJHp4duFKMyOyR4tv9CDuRovTjtjN8bF6UWZLMTZx2NuBe2+cS+lYKkli6VU2IpIWg0QOJyyuLAXhxa2rfrVCK8trHNEX99Cjn69JACDlEiji3KhhQssfE0gM4iSixKyiIqg8qtgJLWVcvvHO2gQJEZ5z177LjNaWSSMiW82SdJZ0wKTg5J9IirYYUr1R1u+hbXQyfyPXoBQhIhJTnW2vQajrJHWmkG20ddENkNEidBlvUK6hLEWbBuBnjH2ARZk42esHjmxYESKGHlSvrySo3aEjFaaiWRylSbQwPli2Qqa8PSywYFd18cz3AT5UjvF/sNHg8SOXutDhGTr1GB6pgoXyfVS2GOo0hTHUAfTY3UMQUxnO77UDA371PVPhd8ygDvJgy42+3Q+QFrP4jMUmfAudtrdFLKkVoG6loKXKtRI2bwW225UmpAeMEiiQ8cZN9PLfoU8GDcSLoPe1yNHa6HrkTAznRJBuQQDtax0rHL+nEqUTn2HhEjuISg+/omDFg3Aijeafa4197IevY7vKK50jTbAXf8Np9eeEGDRsYAHUrkdJ3mvWcBViID19zicRVZ9yiu82lzk5ICuurGpAW2kzoMUlVDjkvEsYyDPNPrni+g0agnXWvqoTpvGnXgWB2wMz0N2VHCmkZ14NjJjLkYROFlyOlriQlbbvFAHfE3qcNzwwWu0wqHFPBw2OBm1NIesZzEVstvO+G0dtR5jVS1Ay4ccZZLotOoE44wAceD6lbGf40b87Nc1f5aPa5tEVvfpoe0elLypRtw4QZY7Tw5zQ+4hz0cjRiYcO1WuPR79OwRoCeMI2HjBjzl9jhzvUZ2SrkXs3ELeCTZL73VJzN97BYc4wSefystoNGTSKTh9D1+QhTRbWgdVZ/zLRM015VO3dYE+aQRABMhed1gPEBeUsy8T2g1rSI6h77xOU0BulGbR/O8PYjh2xxwp9mhcRKCfO73ei0I+doN6GjAU36LczogkJwMsKYEpzVdGsXIRVktJ1jUocdWx8WOb7QTgezoVqCEBwq4E3NhuTr8UY6g92pkOt3nTMgaYGBH2ioAeGK4Ba1P2HHCyIQHqctA0YPxDB8bL3DgBo+GNZ7rL7CLDfaxwePDSsJ5o8Oub7NyEg8BcXBimA9Ojg5PAEWCGxWETIoN6abMdcOqBmZZTqwn48l7g2f0XhnBcRXrzEUJshvNAQAuSisYoAwgAIgkr41f9S9TaUsMjL6RD3fdCg/XG5BPWLcDPrS+gy6I1+WsOYjnCRa1pqBRUg9hIlwPHa57SZnqR4/DoIZkrBQ8VWY4KzI0XW/aLgPVADUCqoIbGfxKUOOkUoq4Gqc8RieAE4Ie+y7/HnTnCKsRziWs2gFnbZ8N3I16wU3xMANqO7Q54uAwNBiiRnXVYJ0qs8wEmCIHTIEeVXJR8UfReso40UA6pwANhDCi8F+yn9YMh6mcOiHXmAAOLIC1A1IXMHYMeEa/7vDi+gzOM55fn+OjZ5dofcTT7TVes76DM9/jwu/wqiARaWsa8ZTf6ylFwIYcVrmIpEOrkygnz8g6PjCjR0Jk4IoDHsQOPTu8GM/wweGepCcMa3xwdxcPhzUOY8DD7QY3fSt8tQuIhyDjciBQT3Cp1Ieb8AMA9qWvIH3t5PVoJ0nm9cllHebCQKJwk4JOTj22zkn0IDmtReaTRhaK3LMac2ZQOAKCK15eR6WGV22016l1CWIssX7HIlzs/Ro0rcnmXwBSAaY8CVg6jYpkBVnlHVG6SwpcSkVhGaNXYwVIo0McBcDkil8zYG5j1aQMknfNAB+Snnw5oPUjgou4bA7YBDFcL8MeF5q2dOH3uPQ7NbS1Hg5SLqjsdbeZp7HWdWjsvT03WQm7ipL+MrDHi8M5nusvsI8Bjw9rPLc9x34MGA8B/XWLcfASEdwTaJR+Wi1CQNeg8Zk6gUBA7DyGVQt4xr7t8Hi1Bln0cDsgeAFI7nR7rLxEAZ8FSVnOURG6C5pXOTFhGzXCiwmHKMCARZQOqUQcGNCTmBCjzOOciIphyzqPDEKKIr+zYT4WYAi9U9lDoAHwUfZDslykGQPaOsvrTgtL9Z5xaBLgAAoJj9oEcglNSHh21aMJEvVw2e6xacSIv9dscdFoOrHvsXH9JBUcgKTusNSbuh47PBg2OKSA3djiwWGN3Sgg4GEIKrulrwLmA5xcltd8tLec0P6cgBRwKg9ChPPiAFm3A1oFkS7bPc4aScldezE6nRo9BiLXoK5FRRhotI0a4cUGeMm878eQI1nH2lkRBXBhkDpgxLDqwohN08saDD2e6rYZvN34g0RbQJyOIQNIMXvqK80jG+7WxoOmTB1SkPYaaBQ1Ik2BZ0uf6vU6JsJuaLEdGola6wMGdQRM+C8RYPoXiz5mahKrzGYCxlBkulh3Rc8i3Wd9kLIOTkHLta7JoI6A4AQo6+prBbxLJLQ0pKTFlGjOEslpzGMOLeT57FnW78N+jZuxxRg9rg4dtr2ePtoHjL3wKFSnUCVc9j4ueqnpei6W8an1ntSwyiZGdLIW8zq1CXWiA4AA8imDSyFEPL86ExkeRtzp9lgHjRgNPVZeo/8s7Q1i9DcagbQicVZ7svRuqa1ktQcTJBL9YZRIwEOSdODtKDz/qF/jehQn0a6XaGsDcKcxXWYzqLxjZGCtdvRmZ2IqcB6pXkjEaJqI0CiA1PY478S+umz2eKqTKNEz3+NekJS9tZOTXy36aqUp7DUlkERN6yFAj+IGD+IZeva41nSmIRXQyGrQ2eSMLOmYJdrM5ZIEOXK/2n8JmKx7A7InDi4nvHqm676hiMuw01pzSfW7PgNNrWYdGPjCkMhXOw37JnZ4YTzHLrW4iR2eO5zjelxhiB6P+w67scmO1Do615xcjY85Gq51Ea2WFLB+2CET3kmEtmRmVOAtlePpS6aCHsDg6jVZ2m/OaMD2QlbHlGW0JHHmKKB+31/hntb/vKARFxYZRRH3nJT3PncjztMBI1yO7CJwjk6z06ZLeQJSe5s0+tCpjRV07y9BAXOql/nLoQU0egJJSBdEANqblU47Qe3mSET12akopeOHlYfc+nUDAjKPirKuNkYW9I4Y7Dgbr+swaA2duqCthp1SkkiRToCip5ptLmR95g6466XgltQJ2BVU3CWsKKEUaAy57VEV1Z6THG8KSTPpQbk+ggh6yaW1a8sjjpCsXas640kKy9lJFWuS9DRPQMte8oJzUbZyQkheTDQtscj6f8kFlgr/IwgvxoCHSbxdHx3v4KPjHRxSwAvDGZ47XOKQAq76Di/uziSFa/Q47BsxwqIDHxx4dGKcHxz8oAbCAJAqKhSRi5VTvVpJFeNTc55ZhDJgJN/nokzPAKdKjpXbzKWDfWYAgilSiQqYUBk0mT9JDJukilVqPYZVAw6Mvkm43qxB6qlrmhHBp1wrwDnOCumYBBAaTLFJqtwNrig1I2VjwscCbNVgWz1W2cDI48C5jwaMUKr6NZ8HrsdqCkxR7rsCCATEFWPoRFnYdgkPOgUAKi94nfqVIiEOXsChSGADFFWBhSlvqsgRKp6p2555YjrvmfJ8EtxYeM5FwHQR+bzqeMUXJ0yb8g3VZHOkowNSS0iN8GRcOYzrBuwZL6wbPDpfw/mEZ1cXeO78El0YcRl2ePXqsdSFcAe8unmES7/TjXOPc9dr4WNJZRPDF/kUuC0HXGuNghfjGZ4dLqWI9nCOn9/dw9W4wk3f4vmbc1z3HXhwGG4apIMHIsHvCeEgvOUGguuRDVcbq3pMc/043anZUbUOATiVL9X6zOsQyBGgICA5IKpiDY1aYgVIssJtk5Anwt5HdgoAyIcTwKaSq5D0VIFByn9INAVoEoFrQ/3EmqqBWDh9XbUn/7Qyjgu4CwU6qYAG0eSiAOh5HZ4CDLwYK9AxO7QpR3m5JuaIgK4d0DaSRrJpemwaiZA9DwdcNhIJKHXYDlqo1uptaHlTNdgSS22lklLkshK+S02ugXI1Cig5ssPjwwoP9huJaNgH7K87xMHB9QR/49D2yme9AEUZICnDmMc9+Qo0ah1SpzKt8Rg6WVO9Z2zbCHiJtnm+GxCC1BtcNQMaTasxBRjQyAIFgPrR4zBqSlr0k8iLpIAekkVDqdw02XuKqPTFZCYlTAEyBaoFlKUMWLsRgAXFmszCbA1VMp09Ien4sGekoEVXg/BJ8owYGPuuA4IAjS+sBoRGgNazrpfoEEpY+QGrULzHBn72VerOfmxw1XcYo8cw6H4/igzh3pVxqRwtRUYTbKnkJXxCuWMPAeBJeHxUMJ4c47qNoEaA2rYbpIwAMdpQohXNiLNUIatnWEdRjEnStMzY2g8Bo0aCjaMAuWCJsDCHCteRXyYHCPAhIrQR5BNWzYiPdedoNUV3rSUMnBpttr4aNShNtpncyKk0rKcbxpAjCbajGMdjcjiM0nYBvb2m9YujJUfP9B6pFwcM9Q6k4KSrQZGZfM/OMdWnQJT1m6xbVQ45NiCJBFga1XF2CAnbNmaHQPACdNclBSTKNGmqG2cDFlRHGhkwX+rGTXhF3x2S06h2hzF63Bxa9EMQ/WIfEA+yWbmDOI2Iix41cRipXlTrpBOdz2STA1KgI6eJ8Ua2RZ2No+oF6lCJgXHoWiCIzHrYbbLMasNYIm+9AaCQeolaS6rzA1Z+zGCkFeTOoBETDqnBo0FOYB2Sx3XfYTcGxOiwPzQ4DI1E+/dVtDVmfdX+FKebCKNaXpvNNZHfVd+ZgENg7JU3btqV6oUJq3bE+eqA4CPWfsCddi8ORt/j6eZ6Fn3Vz1Q6wuO4xlWUiKlHwxoPho0AiGOLq77L0Y7zqBKGOHIsBThnFyRd61zpAdVDzbEFqPNInVo2V04jrFa67hsnqYh1WuKZ73XeBAir628CwD41uebr9djhxf4M+9RgNwY82G2wHQQAPRwaDJWTiVMlm3RNOp/UISfZFxL5hRIRS3ranwJhBLGTDSiy+rF2naOOXMplB3J0v7GA6lwTtYlKaRcZB3HmdG7EC80jvKK5RksRr/RXuK+HL7UkoJAESQw4oxFpdt9AknIfdPTknyh5EXIiYq/1X61mZJ+CRBnmVMypZKHZM16KFtDoZdKJvR5AGex52NfLmYTJT17urJki5YGkFS85Akwi2LgVo8I7USRaN2LlR0Egg9VUL6ijo4TLdo+nmi1aP+J+c1XVINnjnt9KEVdinLuIDgyvEUUBDQCgPoHlwAkHBYpuUsANy5GdhxTkqHkFiawYqh3raTVnCmhUnyQTce4Ogl5DCsuJUSk1jTbqsbG6SXYCW50YV0cfWQrbyMCNpqH17PDh8RzPakTRhw538aHDPexjwKPDGi/sznCIHn3fYLdtRckaHHAQo58iwfeUlWDfixeVWBRjytEdPAUpqnllR5PXR7xSrW5RqmkKNM15qFbm7a1TG2UFnEzaWH9mz7S2ekLSI5VSQ4hdMfDSKgig5CGe4IlHSq9VISWuIj2Y4AYB2TCJ+ijK3qSdM8s+G+0TZYZKv6vfkp2nOlfkayPullhOiQaQsY8tqWHHSK1H7ALYaRSYGrtQJQ1Qo7EnMaQSBKyIJwDFWqlLDFcdc3VkXFPFH/Z2ORJEfpuKQkip6td0CEsfJ30uH7INTm3MEZCCKJUgQuwIsQXgGXHt0N8ooLjq8Hi7hgsJm7bHRzZ3sGokje+Z1SPcaXZYux6vah7jrr+pUkhGSPh5KYb+OK3xKG7Qs8cL/Tk+fLiLbWrxeL/CszcXUqOo9+ivOoy9F4BiS2jUcPcH4TUxXFl4rlaara+23iwN2ACz2qi1tVjNRxmr6ju6vnkO/Pr6O1P+LePO+d7JAUkVORv/yVzmdUyT1xlErNfRzECgOWvM2lRA6uO1YSeQTNZV9YzM46qQm1wEqu/UzTEeCzU46QQ0UFkzBjFeDl0CLCKpFS8vOUbXjFi1euJProFTiplbzTPzIOY6TzliS1N8WNLpLLVu1zfYDY0o4vuAfteAowPtCf7awY8CRIZd4TMaWf6ekjvKL5ZqzkTwja4jNdhSy5kPU+MBBwyBMbQtOANpCeRTBkEyoGjpOAwBgwatpRcBaBQGRWj0gfCON0BnNm8TquVGtXZMls2BWDnxlct8R5zekyZrpJIzKnttvzQgV4AX5RPPiK3XsWTsV40cEOIYV12EU94IQYyKuiYGA9M0nsFj3HtwcsBAcDsnRniiY2dQ7Wix63m/Tmwpmcczv1fz3HpwAEbHGLoWNw1LRJKXNL1yqm1JE7L0KUtFNwBZIlkJSJKmzAoU0UiVs4Jy21095xUYkJqAfcuARnrdrFYK3iY0IcKrpz8oQOKAfG1guLGNRSNa9MBgQFZ06EcxdlIiSWO0yI95aqe2WcB/J/MxyGvbRycg7Ym5Mfl8iufMOQJYJKm0PgVksI8DY2hCiTKtok5IQSeCzpWr0iGpaguV981QP+IVFcopEcZBotoRCWnv1dlGcHvd5xJUn6JqT5jJn2pMTq7Dmkf9bEzm1ma1z2VdzNv4iOxmL5G5Y9cUmRUsClf42lmqt0aXWg2lOqXb6iZZRG1iqU9107cSIZkchoOmA0cCH3x2RtJACMN0b8w8YF2p9GYbN2FdmoDbE/068wwL+K8RkakN6NsEdhCn6moF8hJ9teokOq3TiNF1kBTbc029nu9NV2OXyzdc9x2ueknB7IeAfd/k0gWnVFfWlGBzYMqap9KH+jeV7pI761jTE4VH85xZdKQGKqwaqbXqibFuegH7SGs++QLSW7Sy1BzTwxCGBo/7FfoxYBw9drsGwxBkjatTvuZlG/dkupFnQNOXS3uLbDQdqpR1KLKzvF8i/HJUIRW+PBrXW0CjfIARMdYaFd26iAerDZ7vbtDRgKu2w07rFt3zBzzt9mhIDtJZu6lqZ8+Q4AjpvhTDl2ydgUtNul1qsNU09IOmHEeNhp3Ps6qFL5sW0OhJZIx0Wn4DOClfp+/Xgsg+MWGUsVZkhRk4MYFzjjThX2+IdbN1oZuA7ZyARjForRBGrj/hKeGy2UmRTTfgleEKn9I8xNr1OKcRd92g3n6HtR75WndLipdJEeoRhMcp4Dp5KTiX1ngcVxgh+de71GrxY1+dTEA5P92KptoRlh6Wi5pw7vfYaN78pdtJkTlI9fgLBZMEqR3zqRR2PKQNmcmZnlmjFjxejCtcpRZ7bvCzh6fxoeEe9rHBR3eX+Mj2En0M2O1bbLcdxuhABwfsvCiLoxifpKkGrjdFC3ADT7yr6mSbCmdTUPSa54rCLQt5wh9zvpjxTf3M+fOp/t3kc76V5+XZNDF2k5dIE87GjXqDHZCCKAr2QIt+zUo2iwe+GPBqXDHn4u61wZnbNmkfT+6vu0EZW5gSwPm6NpbnoFEZO57OFVTB9UWBjjnCBkiNKkZqxKRgNy8bc1bkFMhxoxhRmI2JgTuE2fua05mNa5q2zT6o217LCPs9qt/KfY4ByPr+p3gugx8QQ84URL+HjgkhbQnjjSiPaeWwvwngAOzaiMdna/g2Yd0MeG5zgbO2x9oPeHX3CHfbLRpKOHOy7hmkMqRBhHjZXhw2GDjg4X6N53bn2I8Nhn3A9nolQFFPcDcO7UHWZ9iTApKA7wUoIgZoRJ6Dmh9YI4rKtXw4H3fgCWuy/pzqa+VPix60Ma/vO7l/sS7m0YVM1ddr/p1fz4yFI2/pk9a8q65tfZ38sralljM1aJQ4A7ZzuTgHQ83wSL6Mf2oop+impsiZ2DpwgAK5jLEVpXHfJDzqUokA0FNCpbB5yoqd9ciK/2eAhYvXNmpqHRKQNKoBCaC9g985IAL+IEARjbK2w173AQV+J3000VDNI3vKciONEpVT5Anl+U+BslGWgTSS1K2U0yOtU/ovUeH3WeSrgQVZ3hpIbzI6QpwJT+CTmufst6bcZxmX5RwX3pjPOeEoko8dVddcXU/HLo+hymT21X7kxYAbtDJrHxgIqQ7yFqojPgeCOzhp8wj4vUZu2l5le9IkcuPEPJ+iWo4a+FU5JGyeja9T45C0GjAHLhFXFgkD5QEDkmsZkIQnoVHEfizRJS5ikp40ARJS1UadgxS4ioBjDCsP1qhjCuIkymCWs5TclIGQuuKDrTFAopySGrWIWmdHI7lodMVwHyWVeCJDWEEi07lGZPk+6Vc9JzOZPQXzZ3LWyZfq7zhf5g1O162DAOr13mhgm+rlk0JqkzZUitkp8VrJc5lPGSeKkKhZ1aF8X/Y5p3JoPgbz8TipA832I5O1DJr0b0LzfclVYGhTQN3UuCznOKixDwC+4t8apPAJFFKu1er0uk6J5VGjzZRvqHcyRtWYkK1py/yqhjzv6/r+KcfpES/V45b7TnlNy3qhvG9x45HaANaIwn0n68WHiBdXo0ZfJayaAW0o6VAGkO2HgP1YUjAPloI5iCNgEiE1Nw5ytJlGQFYRkbUecKTLaOena4KRqnnqDSR1jOsmVZkGUQAlkoi7RqPsZJjkvkPU+nfJYRgd+kOTnfK891JnMpFE6o6VrmJjXzu1KrmIbKPwxIEHoDjhgEk9s/o7mHxH+3dqWGtDA9X3CVkONo2A6Y2PuOk7PL86Q+dG3Kxa7FYtVm7AM+ExQiPFxjeO0IE0yr6c9Cr/OJd0GZgxQjJ5dqw1qzhgmzpsY6cHj1iE6fQk5H9eWkCjJ5HO0hECfQudVKM5/2/CgMZotbCayB+ufl49IAtxB1kUanSQlk0XZtU8YktP8z3s+Fo7zckijTwlXIY97gapV/RUuMHTfosNDVgTcOkIDQXYUa5yBD3nYxEHJjxKDtd6isUL8RwPo4RMPooSDWCnpuxjk0/1GbXgL3PJuxd7VnJxHSEfV21hjmstvHYZFDSiiEunNSsQNY1Oj6+GFTtDtdg0zUCPkj1wwHPjJR7GDXapwc9tn8aH9nclR/xmgwfXG0k923vwzgPRgXrA70hrE1mkiG7WAxdFzIABQA2lE6yhipgJ6qM5nzOUfuBmr+26bGx8Ull6sqIwY+wjpUpezCNNANkcU4/KuEH2yJkiPwEeKoWUeBrpIelTPI2KqaJkToNGdTtxtJbs7akRy3n9nRrLk/fVvzktiwCvCoEoB6XvtdIAA3DY+ovsjXZV9EHmk7rv82uezRWVi2M5VRhqLrfmkWq1QmBghhWjzqlJtmPW82g3jDxRuCUKjZH2gN/JmKWOMN5Q9jr2mwBuGPs24vpsBd+N6ELEs5sLXLT7XDNj7QcAyHXEIhOu+w6P+xXG6HDYt9jedIjRgfYO7tqjGURJ9FuNXkuAO7CMva1PPZatNmLlDcpjYsosmXI6NzDn0zDnndt4qQaf9G9+fXIj0dtVY1y/PvW8uSHwkjKBjx9dt+kl2zd/dr3eUPPyMb+f4lHA1lExPFJgMV6drjU12lJjYItea6pkajxSyzm9bbSjXMwjOQfqWQfV5LXWr7AULdJ6V64Hml4GxR/MeSDOAn/gLMd8XwEmCSC2eS/yNO8DBLCmnIMgci8WWVsUdjoyVk3eJgXUynyJIK/H2VJUs4zNQBEfRQdl0EflcL7lLfOfeayWZaz9QJn3I2MLmER6GFBY5G2RQVPer/YlTQ2xyAaT0Wzg4pxPAoO9x5zKmFR7vAJq7sAV4K/9zHvTbAxOD1HuR37vKKqlGM638bhFWR0Z6VTk8KQNiY7nto72qqOkuICDp0EjQmptPRLiyhwpKHXuzKg0AHMCbFUjUUc6JNGrrN6Vr0AuNwKIsg5Ptb2A//Y+Z/3rSQbxBMw/5SQAZgAQijycfd/lz6hae8g8On/eEV9Ue0H+e0qG6zwJGF05LA3MHDhHaLsqtX1ii9zyCNT7+4y3HFvb+KgfTwSQFGTKoBNVAFuV4i5jWpVccMhjz54zMB41gjA33uY3EtxAUiMt2h4v/CXXOiZjNSb1kJwa9yx2+Og94FgW1uBE5o0K8M8R2bqXpdZrmm2DXZdyRKTT9E+x6SB2HQNx9HqQDqQ2XC9AkYvQCMjjNk70tSyTaRJ1dnIc8nWtw3PhceMBQhWFxlL7SgHBvmHph4MAf76O8JFHpKgRUAyJej24XJrCHxQoSuroqEsHWLMnfCJg7kRGVn3iMqFTPsf09XyCDTg+2vpu2Quz3qzPGTXjwvuEYQh41HdoXcQQpeD/yg0YOo/G9ThzPe5CahF7JNjWa4fCRM3kiQB61gM64HGT2lwI/jp1cjKzpdRrWuKpuoQfLy2g0RMo18Gu5UXFUDU4Wa+9yT2q/xcb7jatu9BECHN1kTcsRvKKGEcJ8yWGFMJ2tq4FOGmdnODEXgonsraBmQR40aPC167HXXfAHTdiTREtOTRE8CAkSOFZOW4YuEqEHcuJBR+L53gY1+g54GPDBV4czzGwx+NhpScWSNjxIYWqSKsGKbJFPZURY/2/HWPtKWETBkkncAkXYZNPprrwe1yGXalToUXXpFq/5ENLZSR53sAeN6nTYnkNnu0v8GA8wz4GfPT6Dp7dnkv9gusG/U0Ljg5+LzVQZCMi+L1sOs4iigwo0s3IlGSqlWybe1MsKgbh2d9bab6ZTZQIniqsqbxP898c8dXxc+q2MAFiOdnzaNIWS72w/kkf5ct1NMpk81JjxMbKZSPGxrPy3gIlwqZeC7e1f96do82eT7x3+1jkMdB+y8Ypm6YbGdyrQqDRNtnTVPW9ft40PaNKWUk12FcBjZOUxhlo9tKiZNJ2MzpFgebyvt1LP58oBKoklHvNxmfOc1QAmTSIQc2OkXaA72QzTw0hrlmUqNajPwtIHWMXErZnHUI35gKjbZDigEP0UphVvWz9PiAlAYroxsNFkkiPrUV6QAz4rEzreGfDicvcVKHOPJlzHYNakh8p8jx7Xa7zeqvee/KU8eTP0ViDjtbUEY/NbjX5LLePX748sOfkNmAqi6rfH92rkknGu3Xq5EtGZJjyTaYUUvbkJ19qbBhgK8CARmiQApRNHa0DAReq6I7JhFSKdR43NjlVRWcMlZE2MlzP+doryECJc0pavpcNnSmUeWCrJc2yZ7lU9Aqq2ll7SQ10KnoBzeRt0T0McJ4Y2nPQqI4OqkD6knI1i0Kd81gVjTSZ91PzXMsWms5FrVfVa4pRyZl5A+rhVDkmcpnzujYQBkQloqd+IFD2HZXVOb1QQcEMfJrsBnRvmsoBzO4LVLK4+pMNEh24Cb9X+2lSQAZAAWkA1OD/xJibzU0dqTtJ9a722dqmqn9vhjuga22gnErp7FrXp0XbHKXdzteaPiPzUx3pFgs4dCvIZTzLNlcVbyi/ztVoW0cMZHA1t1Hvx0TTPYGrKeNK3traAyb7ZRlHqq5fmk79dDJO1b0KaCRtzk5KNj30hOOwuufEjpm8z0V/098UuTS9nqyZJ/ZMQCZXjXXN41NQbvZ+dkzSJLIuO+Rq+ZyQgZM6EtAARYqzyO3jIX5yR+Z790vNawU41EBYiSSkHJHNHohdHVWoJR6qMSMAGFHKGQwWPUVVxOhLtKnWoee6Sm531cUjfa/ILq6+PwG9M3AM2X+1HxahX9aK3iHZvqp6m81hhESLZ5lQ1v2kjX7KJ9Pak/NJmjLtyTmv7z3Xj0/RbXqL9dOxZlxIzb3t6LDvG3gfgSSlWzo/glhS+M79Ab2/QUePMVDMOgCgha6hhctB6NlhhKSlXacOV2mNAwdcjStcRanvtRsbOdFTD204VQz746EFNHoS6WxxWSdFiwEm0na+OR1dv8yN4+RNJk/hIiQrQUH2HiBFvrRwV+tGdG4QJRQCxOTjXwE0mvp14XZYuwEbN6AFo9HO9iz1EQYGtgwMIOxSwPPxDFepxY5bfLS/gxfjGfoU8MLhDA+GjRzvODRSxIzrI8SlHzaMjOp1NY6WQmDFyK6bEZ3mxl6FHhs9ReTc9zjXY2hbO2IRWhRSe5lPE4CcDnMV5bjZQwx4fn+Oh/0aQ/S4ul7j6mYFHgm09fA3Pntx/L54vLLxmWbjX4fdzxSuotRXG2MNLBCmAvrUuuZq466NGx1TUXj0fdtQSasb1Ip6dU3l58ftBbK3ZKLg1u2xW6XpXwOZbqNa2cuRNHW/AFHCT21wT1hMc2W3/P4EVHuLsJ98PPcS6ntZJlTGgkX7mVGXDaJKZky80joORSEs/TUDLF9PGvjksZ100dpQK2t1yHjmxWqzJTrmS5rdT/tk411HFcgGL432iYBBgWovURkWJRL3YvinBhi3TrzXwaPfeOxXmjbU6BHbAHgkPQ5YC3weHFwSoKiAupCUIJWNokxz4bNaka7HyJd+p2yYVWMyH/7aoODpeHA1Z5Of1jx+UnHj0+9PZ/QJXt6XwxQnUk9vWU6n9rSTbZqsN5PoT7h/PTYvtS8ycnQOESSiDfLCWZoSISviUC+2KKpiaJhXOzlo7RgqSuyJIctGFk/5pY74c2PtueZS0NnS0Gwtc5Gxcy90jqoJU6O7dixMxv0U+IRyT/vwpFirNM8MDsHaXMa5BriO6r4xUEehlPmx65m8OmoIl4bW8igbU2Vc7HpiQN7Wr+pZuV+VzPWV7C51ycrph5PxszHR/rrExTivADcbr4m8fsJ+MtEh519U+VHqT5XP6/adlMk2jrVhQ5ORzuMzB2uP5umWbYUBGf8I1S9I1qTOE40lZbSMKU3Htwa6rZ2zsavBugmYyVDjX+X4HPA6sV5rOWfr3bategwnUQoatY/qO/ka5XUe3GpMj8b6ZJtOjPPs/VOjf7TegAKwaf9d5FsPy7B1aeMwN4KnPFXkT9YRZvvNUYdn7T61/ifrUi+4vs+Jsa7XfS0Xc42pej70GTmKe1bL8VQtp3mXTrVn/vlL7rB1n0/Mp4yl6kMOAhLpOjqKljXr3IAWIEeWAVXE1HztVO2Yz92EF0/JrJejQpyiSibVc5W01hwABU0qQWhyiqu2R6rmEJPrWm+e8AsD5Mp1lqW2picb5e3tfymaB4yUD15iPBzggkbfe5bUwsEh+oBHDPTs0foRDcTGPQs9+hDQUo9zJ0VsclSWgkZy6qSARVFBIykHI6DR47jG42GFQ2ywG1s57GB0QFwijX5Jqdp/KmHC1YenObFepyKIqDD5Lc+i2YdHyqA924SOGXnVBoaEHB1lBbhaN2KtoJEnRkjRtiEwCAFSEHTtBqzcAE8jEqy4FmNQvWjLDV6MK+y4wU3q8JHhLh7GNfaxwbOHS7zYn2FIDo/3a1z1nSyM0WMYQinG+FIIJ5XFYbmgVnzsMIZ8CsUuNFgNHRwlPA4DNmGdTxCx42YNhiJIJJOdsDAkj+uhkxMWosOj3Ro3hxZpdEg3DfgmSFHrnfyzCIV8ulIsKUVHSgtkQwNDPYI6OROBipwLfKQYz+f7FNWb3jydyU7bqqIo5DPK7PMkBizentKOY8/1rB2YtUPbhVpJ4OrntfJzyjNrz4fwJ538ym2LiY82xfJ7wi3m1PEt54oMMF3DT9o86nE53UhMACzjC9Zn1F939R34FqHwBOVEv1p4zLzDpIoYJqk/rBv6Ua2Q2rCyjd4eYnNe15+KDJcLq1YGqoI4IAZ7gu/NkBcwyep3jFs9NYpUqVIPt5tFevieRHkepEaRtUFAXRQjmGdjZP2q0tBq8KF4NFGlGFZTWa/BvBblxhPFrUpLFSC3jFd1Hu7J9XFkpE+bP2kOUfXOiS+/lL40p/lelNtTN7h+1Mt5QK3szdfay5B5x+2T+baouZIeyRL5YKHyFfBXz2cGJmz5z9bxxOiz92rQKJbreaRGlmu69hiYRVvQJP0qg0bAxLte8y1bxMWkfZxP57odqKn6d0r21vJKdYi5HJ381gqJzCeEy2cn9wvS/1WRMCVNhTQKqKRcpQrQnuyP1uZqTqSNsta4Wo91H7OqPKDsA072mDn/2djWY1KeV+456Vp1MQdHcnTR/P35GNXzoy9LGs2Jehp5Hc3Wvcm1E/cva/aWvbD6fg105XEFtMYQxFBTXsrAVaUvTPWIaZsnbarXWm3kzx1LNa9P9IvjWlsZkES1n9XXJg9ykeuqvafmpnp9cv+/rR/GM/M9A7PrWfsn41N9Nn1GAcZORVbltto40HQccsFqwvF+78oYTgHP020s48H5FK4pUI1Sk2o2DqedI8hyQtrFx22cr1tz4s72ZmvjfKkdPXYmt045cXn+fv2VeX/zD4oe4FC+MwUvaVKPJzVlT7B0bABH4Fedeki32KM5XRLAJM3ulv5njUOFUP71bNBOzlvVHwDHxdNtn6v6frRu6j5GHK+R29anvWVd5CwKb+WDJ9Jt4/RyvlPzKBFSgpykbKBZ9HKQgGswguBDxMZdYN3IYR2pBTrX48If4LTcilkwCU6XktQDjnAYksfDuBHQKAXJ8hlW6KPUwDLQSAD/mplf7mAUWkCjl0u10lRz3m0C75YXJ4X67B4nlYlTnH7iPeMDOTesHMFqX/bOTR4XKEnxaIo5f3KvaOZOj7Xu2eMqrfDseInrtMLN2OHDh7t4MGzQR48Xdmd4fFjJsYj7Br0VZ7NjxW8RLvlvZWVPKtzrUc9EjDg6OB8EQAoBOy8FTbsguZ8GkuUTOlCEn4FFKckJHbu+QT8GpOjQ7xqMdgz3jUPYSdFL3xPcAcV7MYpQzqh4PX+2Bn2ZqxyRAUw9hHOjtN78qrV8cn7nG24qOczHRky52dSgOTEXmCp1t0WZ1AZoPQ7ZO2CKzEhlmZw6qSvfq2xOc09p/vrRxn1qIXD+8pFHvmK+cpIeym5CJ8a73pT0mSe9bdbfChiYR0edpmn9odNKCJ28nLyebKacP8pX2ub6xKF0BBShbPQWYVN58iaK27xZ1WZfh8S7kcCDtMmNJCCrzoszb5FGZ7ACSG4oqSNSSLsUk2RPE4XCnudGzh7XDOpWdVnqsa0Vzdq4KVEN+plH9obl8cljPO33RGHP17WhQyeVIeEZqhQ/Xbdc1g3AqiDeUrjwSTIV1XydfP8JvDW/tSmNtdLFVO5/2950imbrvJaRk/ZOf1LGUMcEqOTYXBnUNV9k04yHJ6kQtzSzku+TaJJ6nnm21q0Zlvak95+cimX13TTqqa7DcKr229zwnNQY0nB+VPyTf1PJ3NvkfWnw9JKc3Hcin6r/EVCcPzrmPJ8LeXKR49pnBmTd1QZpLnKuQNqJvXEir2t+rPcgrethkVOOGFYUNo+PrbfSoUpX0DbfMki2LHNB+nq/tD7a68k+UsZh8tn8cZnHSzvn9fvqva2MNed9rdyPZ6+PZffRHlunH89lprW9dnBVe1Cup6mNPI56mo7BdN9CNnaPolWr/eUk0FIJOOEtex5K1NOclwh5L6wBEgATXeyk7JyNW+Z9a1Oc/aTmUeO9Gtyp57Z+6AmZmx8/H4f6M2AqQ2w/O5UKNtsPa4Dt6ORCmj277t98eJgmMnqSRjhPMcz94OlY2H1ne1gdKXWrPjYbk3pwCq8f8/lkEHF8/6PvZD4oH+RIOMZEV5F0skoGzflEO81DxbNWZB1FJh5RzR+nPs99ORFlf8TXU5l/8lmz8Z3YFVU7WPsDVPyUr4/3uerrQjX4We9PNag75+O8v+D2ff3kPjV9/fJ4/PT9Jz8j+Z61iQDAq72mMlB0TY9IBOc9Hvk12jaiCwOQSqqaJwmG8LPcw1wDGIQ+BTwYBTTqU8DjcY3roUMfPQ5DQBx8tsezfK3683L6ZLSARk8iEw58ek3aV4A5b1UK0+zDW/ag4/dmytyEuETAkIYo5nzmxHBgOJKTxxqKaLUiIDEj6oqye8oJZSnv5Qf2eJRaEIDHaYWPxQvsU4NHcY0PH+7h8bjCdmzx7PYCj/oV4uix3bU4HBrYsYjoHcy4MD6fCmku+eQASn2H6poA1lM4mABOJEYpATE6DN6DCDgM5dhcgqSygaC1k+TBdoRrSnLk7NgHDdUj0M7J6SiJ4Hda0NSii+wY7goMYBk0uT6xkZiCdCTgagXGDNFZePxknm9juFqpqhSQWAFFkw3r1GY8Z6j5JlqHJNOJfmKmCMXagKdppIceLU8McJV+M1EGZt6+UhOp2hiqMa5/T7PBm4fonjbs9P71T7MEPfGs6rP64WQKQZ0rn4UyT4zMyVhr/442TzrxrNyeE+2tN2vIM8tzKH+3BkUMNKr5EsBEgcxtxPS9+vn5ucoDqeq7GwAXZIH4AeCqkCzGClQZVZnRVDJrp+upHGVs66VWumE8p+/XtVgqmnpNqSrEiaKU1SedwDbyMv95HddKE6rro/fp+P2sRMqXcog527gQjmuNEKapi3XHTitVcyX7WAGmmRyu5rX+3fxZTFV/TrTplMJR3XcqW6q9sea/ek3WVANAiSayeJqaciLtDsobWWZQOTJ3Ltdu6dORZ/aWvtZpVsmAIl+Bnw5VdNGs+GsNWtrY1fzOhWcMZOSqqLEjAqrTyUqHZ7GVkzmhyetT+82xEVHeOB0tUTbFearP3GjP4NlENlExWOs9qBqTumETB0FCOSUqSV0MlwAkBa61ng9s3LS5E6N9+qIapwpgdmXsJnt8lpPVvoXjPvDs/vVc578VX7tUXlNdC69qLsx0ncs/LnNM1bMZuDXKJPMyMJWLJ/aq6cPK38k6JF1Dt1Et06v5uFXnnq/bvLYx4ZnJPkdVP2YA0gQgOSUPZ8+eNN3mDAAnDeipeBIk16x9tPG2YL38uLnMqSLk8qPr79RjQ6Xvdux4rpljMihY32e1Ck9F7tBsPCrQ6NZ9p6ZqH64jMKc1qniSWuUMVJms8eN+ZoCXTm8VNdlanIBfJo8m8va0HJzz+Sm9bPI+MAHMpAC5tNeNAPQUt0k9S923TKES8NWcDHVB8BM1Jk+0dwKWzOUPlfV+23fm8t5405wEpPK07DPIbS8HR1RRnZB1Mdnv5/vvE+Yxnyw7n8P5fJ7g3XKT6q/9I5zUFVA+nv7WxuNl6AHzm+W1rnKBncxzds4MLjsq94cWD3ZrNKGF55RT1QIldG5AIAmIsJq9jFIbuE8BD8cNHo9r9Mnjcb/Cdd9KLeG+QRodeHT5oIG6/Xxi7p9EC2j08dJ8Y85X01G/LVJwrqhO9JRTE8ezr6vGkwIhtoBzuiFZ3n1ATutyTgChOtLI4oxYn+qQwAAGlgC4HZei0S+M5/hwfxc3qcPjYY0Pb+9InuQQ8Phmje2hlUiinRewKClgMNLRojy54On4H1cGm+igssI5OVloxEiREJ1IDOcYzgfMJ4aZ9ChOAEmOb7WjXNE7KZgXCW6vRzkmyEk3elRpDbzIvNFEMJ1SNG4P8a2u7UhR4Fgxvo1O8QxXbQTNNqKyWc8N3SMyKVkJjicKYWuSKUesG+RYitfZKXKUqKoHMWsLqo3d4TglCLbJ0Eu2ZaJcJdxi0MyeWykLpxThepOefMfua38naVk1kEGT8PH6vvNCp0d1FAiTDT6DaJiNwZGCTeW96h5T/sOkAPAROAT7Lk/7fmKMamBkohQ2Em1ECUgDw3sDYhmeqIA8piwmhmPKKUZutLmveBHlWfm6jvywt2cA7STlJaBEWVXjYNfZ2FA5U4+JATkTg43rz060r/6Ogbr1us1jxzBHwDS6hKbPyHPN07nPMqoMRA3CnASKTs3tKZr3o1KMS5vmi6ysrdO8PJ3bW/lLGzgHqA3Eqou9umjfK1Ft03myQar6VXXyJRXCqmm5PwQwqpRP5bE6Si62yKC4FTk1PpsYZjU4ac9TeQqg1C9kwE7LsTFwHjkdlDXiDyjjNO/Ak+TqxLA4JR/n41fxxmTy5rKlVv7rSMdqTI4M+ttkfS1/K9Ao5ZO3ANeUPciKBOeo4cjZoXX6JDeaeK5TBbKnOqW8lqN1f218UT6bz8Hkesaved9IU5kwOR0st10PNTjFv7N1Pi0oX3hgUkh2bqQZwDdve/WsAmTxjB9mXT3Rxok+AJRIS4ea1Z7Mk6ja647le9YrrX+nQKN5Y26jeou1OYKszeRQdEdCBnWTq2RYdfAFz3UV/ctMR/KoTKm8YOPTeq5qoEhr9JXC79O5rdfnafCMy3dsTk7sRacG5wgA1fHJJzZGkZkWAcjqcMoyPu8vfPK5R45HfTSAAnjWfE0nnGWzNKk8DrP75W69xGsAE30kmf7LQBpF9yGG1GV0mOpAk1qHRXZbmh/7+mTaY7nNmPM1Hffr1Lqh6RicAg7mus0kQroCkCx9WjqP6ZzVPF61g2btK3+ncmqiu84dLfZ7lx8xkatzsBXz16hez69na3MSUXsbr+jr+bhO9FgdIwLEbhocEBl9H3BzaOGjRBZ1IWIdBzQuYqUnhzsNBKlPQbdavQ+GDa6GldTt7TvcHFrE5DD0ARgUMBqlQUfy+OOgBTR6Ehlj0XR8J2kuvxBSYZx56VTx4Fmx7aIwm7GOogyZMsHl+54SwolkZ4ZsTo4YEXLsn2PGPjXYphYjPJ7vz/Fzu6dwHVe4PrT42M0FbvoWaXAYrxukQ5B2GPCiG0LOs60FgS8V9eFIC89qf7Myw1KfQ68BApztkCQFIonkeyokEknto9IxnZsEwApv6/GNVqXfHSiDHP5AGSiqT1uwjflIoL4cBUQ3qXwEcCXs6or+HzdgZBOnf0saWhHeWcCfCh19Ah31dX49a0PtEU0z0CjNTrCw079qI3jCG1UR5lORWKcAukmbqr7NU0hO74RVn+t71cK+buOJ52XFKBaDbQoaVbw0H+eZt/rIY1J9z65rA8zaeTSvUyFV+jABbHmiTJ/qszyTT451/t2c36qTbtyoPMAaNeT1/V7aYccAy0lE1QaWJNXCCrqfnOsZMZBTXgBo4WPKUQz5dBKHiQKdj7J2KptCNTcemBzNqh0/WcjaGlGvsyOFqayHsm7LfSYKc1X8tVYiJ+s+o7vVc2xujOa8m+eZpp9Vxtm0v9VbldKY2zRRrG5fY7lds/pRc0OnVnInxLNxq2tWnDwFTObJjtuW92W1HCmw+ZrkRSWbTlGa9UP4hPSEGDVWQwGK5GTAwmepQa7PNXecnASlY0nnrE9uE4Ck8A6P9pqR7HQdYOL1nxgOc5mTeYCOZd9cVtpbM+WbZt+ZyndM9smJIRBQ9sO6MPlta76etxnAOknfq/egfAS5RD6KU8uK5VN5lBUMnke+VimGKZQxS1prZAL82W9mPH2rQWp9qsCvCbicU3o4HzlNsPcJRyls9dzUz7X2VnMwBevoeK5uMa6tzbXMkjZO19gEEKrXXYWCcD2fPGn6dOwIU6CzbpO+Pk4zxlGfQJXTbravzlWkW8RR+ZwhexUgJxbHat5cxYsGJjFABiABqFOZ65tP3jtqAJW1Vq+xuoh8KH1PAWVvy/3lcm39njuPTukBJ8Cso9GZ7BOoIgGrAsdJsyQ0MjlpVE6W41l3rcaq1jVmz57o5tVc1wDvZO+vnERHshfVvW5Zs2V8OH+v9Hm6dsHqBAukNoaA/BY1a2OS+52qR7I+y+Sr9bsGTuoacFVk3Vx/P9WXl63nG1/O9BakKtUpv8+TOazXd54yql7XfKlrvD79LB3tFbX8KkKv1ksn41c/v9I1efa9us9VScLTcmo+TE/in6P+zcaaIWmtTIiDRAW5lHDtOjwII7axQeMi1n5AcJY9NMKT1jdiO+DJ4WG/xvWwwpAcbg4tDn2DGB249xIoYRHuZiPN9vCXS59Q0OiHf/iH8Y3f+I340R/9UXzkIx/Bd3/3d+Mtb3nLE3/zQz/0Q/i6r/s6/PiP/zg+7dM+DX/qT/0pvPWtb82fv+Md78B3fdd34Sd/8iexXq/xOZ/zOfjzf/7P4zM+4zM+7vZZUen5RlF/fqxnMuar79R8FIacfTpfWademjSZR8QoY8sx9ZKi5jT9TGQNwUHrLegNGXLk3zVWIACP4wovjmfoWU4W+/mbe5IbeQi4vlqj74Mc9Xjj8P8j799jdn2uumD8s+a67+fZ3357RqAE2kJTAopQI1iCRaihoSlq0pAIxANnFAQFqmDrL5CgGEQD+HIIxVhoSFAogiEcLCUFMU0wCAEiIaAmTThIq5aXHr577+e5r2vW+8fMWrPWmpnrvp/97fvS8ptk7+e6r2sOa9as06xZM3O4Sep4kVUEa9TZCJISUWLCqLnCYBBKsAxLYOZqqdc+t1Oy2w0jdi+uKpaCG92/mQnpROX3ViOKarjocosGu1VYZgVqFiWlz6ko194Tzv651usUjhtWQ0VRQtk+CgkwNYQp3mul2RQ7JxCsEQbA3tYwNd6zUZBuFaka7DKhW6ErLu66z2DwjA4fdiG6RvAqXFZJRIMrKLyuv7EeBBxQqcy9k0/mrBpx3IKpOXHVaGj5jN01nkABLuLA38CGPiKIfL0OD9aIAgC3UoXLxrczMljh19eG3mhjNSLySnrVeDpCDablSGXL6VZwtdxAJ3ple2NT6mSMJweHoVWJwJLDtOVclHwldCRX2sJM2uuzXF+dgFydRkJzPr5a/jLsNi33DZNx0HctskHpwTmQvKHpadggfETjsS1rEAqeKLy3K3Qj+mdXlZucW8NYYRmk2Ypm52TXSamZzNnuMLxTzUQu0tZwSm7iYQ5Mz7Y8mwmJGQPp/ACP7bff2hcnJOJM4BoBLNtC8jWbb0YnOFnWtpE53GaU2wIByJkgjU4azeiV8MPITjZjW3X+zmKH0wUU3hsd5Ogu0kCQWZx8v/05atzkn8HvMBm86G9ZNTVnPpUIR+s0avZJNg4kcV43Wjb4MX1ozuUWuWEnMZ1dEBwTbsJpcSSPlr90CyYgZ1eVyST5K8SFHmDkih0b+WnlgFksyGHrlt2ea/uk9ptTYlVmQfguyDNLF4G/CKh2i+Acu1FSUW4orgWW0VhVuuIDvF6VumJ/JjQ32y3QvptJcwJo4SF/spHp9lp0lfsGP4rfPVkk2Sx/6dlgzVFUZBO7SfhoMahzGDj7oeXfHSv7zuqGXDJQZrDeMFnkpNqIEmkEKM8qDnKdM8SoLGkn9EGjKJOJGjYLRsq3xnk/wu/UPrJy0PxuY2hpAGoPpVN7Xg61P1vdpicOJLOlLc6jyliQdwhKhKssvnTOQQOvG1P0Ml15ik0mq0eg/VJdHBe4ZKwtvcdIzoDL0r9wY5+ZC/iFPh6eeWfHy+ol3RaqfWHvvFZUt/5afmRTV0f7oR+d4yjqUmNzO92Lqne47KDhdQEx4WE64j3Ha9zmA47LhlM+4JD6SKONEzYmrHnBO2/v4YnbEl304OYKp5sD8kagurMGEtk2snX29G5If6ROoyeeeAIvetGL8IVf+IX4zM/8zLP53/rWt+Iv/aW/hC/90i/FD/zAD+DNb34zvviLvxgf8iEfgpe//OUAgJ//+Z/Hl3/5l+PP/bk/h3Vd8Y/+0T/Cp3/6p+M3fuM38Pjjj98NQKv05Ocucg21aiJrh/djM9uca6qQ8gw04+uIcsPQYgRUBuiKQUs5ELo4jsbtMhM2JBADD/kKJz6AAfzB6XH8r9un4eF2xP/94Cl4x3sex/3bK+SbBdu7j6CbBWkFlvttW5fdnxyFEyXPfNNOup6aDttvikTbkHkUZ5EYNXUrQ3RkpFtqESECu1QRVtkAo5Cr4i3XYrIzUiCGSmrPEk1V3nFjTuc5sWPCHoexfxFz4bwe5oA/l3m/PjuBGRoQgzrF+CvOoSIFaSuTczVu12AQCHiGTjplsFTjxk0w2K2GKWMqTGSYpHVlpE06g6DrL+vziN+1Xoau7KOe36R05c438vU3hWccjYBZ9ed+ImfO/WpXcHj6mU56k5nE1MPlh2na2XG9bPDLG5rTZyWd0JftYQxkqgc9kkZkcWpyS7aQgM2+f+mT8IQxgNytWAfCJs6hBchX1ZEkkUYa6dHozD7zgQuMwpuCd6UnqvReDcKId3Q/+w/B6WRDuX2UQWvX0w45g6b/HsbI0q4+s9Kfe7+n02xbUoVdCR51Xo2lAR/FSZ49w25GfrnhLjplBV9J+RDOGVBu8pM+hK1uEdej7itM5BcSjKNbt6RVvdycRlx1dNUVR3b97RrlslAixncJ+WelP7K6LbeV+SJvyTnVWh/J9yPqNheFgTapNLTBddzGCBrIdNOOld1WTyoOFuvA7vEhbTjj3Rn53GCwztdj4yen+40DqWz3avrSTtLs7T9yDbaTJ1VvxS3orr9oeJ3hzrWt/TBjqNF0Be6mX4xzyfGk6Ye0D7ioXetkkH65SNTw3p5DUsaAABsZUie+Cm+UT0FWOafS4Le0ZaMk2uIcPC2Z3xJF22icvYyL4xB/RxkwFerynbV/7CKIACz+QoRZlKmbYwzwsre44yba6hThGvkI5TUWOzTioauzNDyd4+hVzZfgBs3ur7qPxVmSCVzPwLRyvKN9WVABHK073Fl7JMHJMxcVWB1FHlcVyXv6T5JV+RZ/BJTTZQzihB+z7L6oi1JcFqsFD2mjYj+KnFrNBRk2WseMm42Sygupo0jllJVHAo7Yi1E+R/sgCUoZlhYd3gfRRQqvPAfHqLPlAk513KxNcGh94MG4OTkVkm7WUfpoY+K+1bwRsJlj1M0lXB94zC9Wdwa+i048VLpHprJ9kcuZvQ9PR6w54ZAXbDnhkDIS5fqXy1m9OWHjhDUnvPvmuszVN8LtzRH54VKiq+Q4Fit3YGC5Y/ojdRq94hWvwCte8YqL87/2ta/FR3zER+BbvuVbAAB/8k/+SbzlLW/Bt33bt6nT6I1vfKMr8/rXvx4f9EEfhF/+5V/Gp3zKpzw6sHuKn0ImK2AuEa4zohu8okwAcTEgbkx4o0y46lYrPbwMJeKImSptEpjLFfQryjLvhmPZG8kJ/+fmcfyvh0/Dg+2I99y/h/tP3MPN7QF0k5CeWLDcFuNleViuzHZCQhg9CqPK5HbCrL9nipuKcaJrsBbPM0rnZrxYr6qet2LOXXErF5KM8MoL2mqNCmr2Hv3UBJoaw+oM4Da5T9CJuoWc7eAbo8oNeCwUkeRenSO2UV2lnPNdkv/m8G4FrjgMcjm4GDIpW8kbkcYIiKDrarOuhrHZnsZ+gmEcJyRwsf7qcGhXNH2/TJ3mG8X+AmOfruEv3qhFtG2EvBqlb6NGTDvdirTQjDyjGe5q1JhQXL1hkCHmynjoqWImlTJU6yNbRI2DcMvhpdpEDttN1J4XAgvvHaisKmYUp9FSefBUjB5ZXUuHdgNflrM7IDRTjVmzIpUPpIaxPUemPENXz8U5VCb0bFblDG0tXP5VnDUipxrpyI22rExSPJvfO3rC3TpVHQOQ/nGRd6ObdQTPzlnljBkLi/kc5IczkqWqmTMgwO4mNtGgHCUF1fOapWuJenMTu0HbsvUFjHZeXpQteuYP6UKAyP64ZRQA9NZDNYpD4+FnXMG1t2AKzZXJCWO7bjSWr40uOGTlb5L+ipEqDv/czrJARnEcVTy0q6xZI3DbpIurzmv4mtKGkbFuMl5lUjdWaN+aCJ0QeiUI0Y8AjNPa6EJCJ488wj2/NN6DyoNOdtWtWszUIh/VqVZAkzMXwdCz+BRso5vaOPsJuI1c1Ak5YPQ9LovkDH1t0avso3h18skdXfuIDIP+0ZhLn5yTy2yfV3pg75xxcrGmHCa3J3N+X5joq5wSWGXcrPwCHO4VLdYpogsq8JGzZN9X+AEX6d1N0ALup8+VMKM55H6qbVTHnwuseuslw0Vb+EhS6vuP8Rh27KYy0yxehkUQJoMTspUbfOz0zQhx7aJ/MalnVCdX+CrNFMdJc6qMIm+bnpT3cPTunGpAc4wKjUtkp4lY4arv1Rm6o3eGiQbPVteJaGJAt9tvKNHWDOSVkY6l7ykXO8g5t+vcJFV5Htt2ty9LhKvwsEYamegywcusL0Yn6LPV7xDZKrhvstdH7Q70MiK9N/KbLjA4pxG3LWkib0UnWWe2rYqpHb7NqEQicE5s5Y7GjZxy7wdYtLaUFgzP8n1kLFF7KGfuJoAZ27LgZj1gZcKRM5iBJZWLnhKVy64yE26rQ2nNCQ8eXuFhvbk8P1yAm6XcLHxD5Xbh6juwthYBU1U+S+9XZxr9wi/8Al72spe5dy9/+cvxVV/1VdMy73znOwEAz372s6d5bm5ucHNzo7/f9a53AaiEK8S3N97BhpklAjzhiGMnlB/KckN4qigPhZg5AVQFMh+BtDAOaSvXz1fJWg7NKl7JDMKJD7jlBcxle9pNPmDjhD+4fQr+4MFT8HA7luvo7x+A2wPotjiK0m3b4kUrHPwKfFjRgnlWY9JOFJTxekS67RnniNsZT21rWjM6B2WqwAXE0CjvZMuBGLzWUZSNI8NGKIhjSSei9VmM4wZnEF4zwmEY4WdeRsID9Iyn2vlxfRRQGCWjez+GrYV6mo9cBZ6Efy7FMakGpLk1qiNymTyKkSORMFT6RKngkeo3Ghk9ztgzd7XM0BDHo9bVum01R8RFUaLikOJM4K2enZWLs0QVqjiT4Pttbw9k01/Ie7R+O/ohOOePlfadTjP9USdTzdkuV2qOojJ+DU41FIB9rSLtJONKEcOM63lGC4DM5WyQBbqdkQ919XwrSi3Vm9XSiZBD6DMAyNlXINSziwDI2TEm0mi7MlvPjmiGo43+W9AcRfWWRp3EO6SO+hxokMK3keEgFXb4bfLJOmKbgWVlhZGPlg+djCwfolHmJk4UJrVTz4+ATaE9gZvHOLJ1V3mlXTE0LryuF3mNyMzwGjIqfzUnreCJNmo3Y5lIo7SR237cDhIO2wVjPwaGYAyhd2dkHNtq6HbFhf4XBuozUf3rovyohtDLdmuqHaiEmIBy603FszgyzDhICD7ipKuWibaF6md7rp44raM+tpMqYo+Tc+NuyhK1fndynJqKk4k0A1Vvs9KeRtHGdqMeAooe0hsJy1ZZ3Tp7rA4lloi0xn9uQiM4SNzO1dBosYo/N4nh5tivTvq91MCW/qHxmaF51Z+VxmMEkp1EA4E/ja2gNqPaLW182xbpSnN24WIUlcpUb+ur0XDU2tUoOMAvzNnn6NAc8B4AExHpz+FzDqRk6NbQtXseIZ8BPc2KwzfUb7MhHNKgoRsAdgWGbb/lzhYGRrek1e766qNc7OSR6LM6piO63FHhwnfOnlIC3df9Z9+JzDJONTCX83ck+nY7Qz8q46h777ohUUbRGUowEVdojlHhDwtzoAUadLI773Cit7TvCXXrYhl/Xkvfs0ZEkkZb65Y2ezasrXaB8kLn0JZn5fUz/ZO80odBPnvjsYrfMG9VWjZj5aJi2YzXRG943cptDqlzMvZyKsIroHA9F1dI15Hw+LgJXwFp9zsRLr8pZCfu3mu/wu/dxND5Q94I65pqMEdxGqXErorMhNO21JvBCbcPjzjdLoVPbhakh2XxPt0CS92exgsV+XAJPJP0fuU0etvb3oYP/uAPdu8++IM/GO9617vw4MEDPPbYY+5bzhlf9VVfhZe85CX403/6T0/r/aZv+iZ8wzd8Q/9hQuQX4doKYP3JZcWISkAjd5nP12lDh+UqTRWq3IQkUAyPjRNWpBpdtODECzbjKMpIuL9e4YntCmtOeOfNY3j3w2vcrAfkBwfg4VKur72l4hWX7VzWCBDhIwa08YTr1dlui1c46FarkUksj/XVBUwXJ2JdeSNUgSrQRd65fc/swtJ15cZESakCUqNFnts7B7IawAZGObNpz4Ci8Lv7Oxda+meywhSL7Jm6YdelKyVKEag4kWfjtHORPwq/GKrlA1mnUZhsNOeHX51ucImWmPZA6+r7AMeRfd8bQxdnC2v/dAJWb+dj1NWOTM5g1P7KmEr/qlIcOofg8VBoamAE7nXb2oNdvjpxJbTo85lyH9u4tpnyXWCtFXOqinCR5xL9URxI1Ql9gN64lg4oZyIBuuquTmh7sGVdhfJOI9YzjSRqTY1Eg2tLc7CTK6CNVTR09D0pHY4MCYcQw3c0zGecbUGTD8+PCPBx/DaSFfBwOJlio0dGfTBDWX4GS2hiBPrf3GjQ4EQcCFokrj6IGmDAOhPUaWt4rEw2ChGTcQZkc85Y3NPvDqrn9tw53AQc0b+A34p8MFuVFlbHAhYGHXJ1ABfZphJK/7MNwI1th1Mr80wG1lsaq9wxN/D0tMFB/3l5NHwWeTujJ/va0lWQZwqCkW3uDMNcHGQkaGBCc45XXEURP4Op6iB2E1TUcw0NbQwch9Y20agskR8xQpEMbqyTfyYlLXFZ5uUCI5tnIOjP3CJ47ZZNtf+kuiFPsncaRRvGOGgUpzNnQzZDIhHmWcaQGy6FLwH3TiMrHR4ivGYcRPZXHLeIXNMPwE+Uax0e1wJHw1sbe3J5XPEI30Tmu6EdRnCyofcd+W4rC4sQ3SSfap9NlBXseM7GMPRVn0UGRXydSc6REmUFuAkGqTpxle8MWoydJHQNhK3bNHQmuVZi1KSV0ar7AXfUAeAZP8gXddjDuo+iEDKvw7MeNVB5nTIBB3M7orlApEVEtrPpFH0iBm0kVZ2XKH8cGv3HKFGpZ6hXYPAi7YVtXQHT5f9Iy/LbRYqZw96NjI0wWNPKXy7ATha44oF21dE7i7qcpUir8rRH9hbejt4H+YLScipC/5LCnreEdUtIIjP4gJRYbR8GkHPCaV3KYdc1uojlwOuHqZw1XCPYUt0VVIKUK53XhV5RXZem9yun0V3Tl3/5l+PXf/3X8Za3vGU332te8xq86lWv0t/vete78NznPtfTQdT17qHPE98xjGwaWqWh/hkRCmNuxTOtofeVQVIGEuQg7GacleiipBFFD/MRD/MRGye8e73Gu0/XOOUF77m5xs3NFW5PC+h2KaHHcg6A3Xssggme0e1+6jxwvGj+mUKfInLymixDhtsdhDNJbkOpH+UQSG7iv/Kl2z9rzy6yIdHO222NLe1PUXBlldgB2hSyPLttcvVbZ4yYuo3ybYY9dcaSNfLZRKX4rQDs4asoYoMq7Y/9JrnjeMjEoBrpAIphrXQvhbxVZiN/Zo4Tbzw1p41P/l3c9nBu9dfCNKhunL0KXy2b6v59Gd9aT483NuM07q9MPtRhVssOnUYd8POkW+tyubITVREhm21++uwNyDkirJIVukQZf2Pg85Ja1FGCbhlSB1JGjTqq9Cph6ICLXLS3wvCBndPIHThstzc6HvH8ymbyqH3OpBEcFrutn+QNBokSib8VP9zqMUPl6fIO2vsCY36YIh9cUE3LwnMQB/U4ejd5qMLRb00SAWjGqhJlcR7WvwKHo9+afSONLLFOAruN250fVdtQvTbqk9C33SIjxu3CujiiWx1rtAlJxKntCgrcIiMEXqW/HICg+Dfwv5mU2A4MZWTHB2jwWVkEOLkm4+XgkCrM+Oozwckv275oxWYHiSMQLoJTZVDEh6X7yWKIbg1nqA6SusQRI9ELEJAFb5an7SJRjS4SWa2LGwKHo21ovX4UyhgZlQw1Dgllwmwmb3YBBpndlr0YnUQwv0dMKtGg0pcRDUQ8jvShyjUGUnX0SUScqvfCj+LIIIEXUAfNDExpt5GH3a6NsaMowhrr5tau3f7nD/kP9uPguUOHxWE8ckGGKvIr9vtuT0HtoipMPRzsjqGes+1EHor6TnjA/Y4CcQKzHLLvHM8yNkFPKsxoNG7gE7lFOTiTLK/arVtKl3hEOqEORyMnogXdK3DTvpOlrW0dT428Kv+58+nkWAc51L/elAjZnl3H1m3lMttK9ViH2HeDJ4qyNHx3HWL/s+tvLFZ/6NmssDq2jy4ckajQRSkS+IkN7sP428UfsHHKTPrgTacLCNymyONmrFtNA3mq/Why2NK0yCOg6Ki8LWBk5Gqny8JurreCc07YTgl5TcXeeZiQTqkswj4kLA8LnpI5Cy8DJdpeov7i2F6QLnIayXatu6SnP/3pdy5zLj3nOc/B29/+dvfu7W9/O57+9Kd3UUZf8RVfgZ/4iZ/Af/pP/wkf9mEftlvv9fU1rq+vu/cOpzMGGqWpIixRCLMye3RsHRYgLmd6yIo6GtPkehC2RGWsnHCbD9iQcMPNUfRgO+LBdoWVE95zusK7b+/htCU8uL3CerMgr0shwJXcHlWgtSnMYz38eq1wgjsXiGkcXeQYXvo9mwwNDBumprDse/ubiUGJjJJnNRZV1hBcCGs2z3omg/K3GSxbgWvfanfTV+eFN4o7dM9OXrr+kTlHRkLJiWBXl6zB1a3yWGK2Rl6AtSHGwMXttZvEpFZdZ4RWAeUx7lE0Wp31B3tTIItOdTdYLKw0/DFPtngw0MfnBxX8OKVHbXVlJDYURxUpdqLVJ3GQGVhIjNtzys5sP6tjzFlW9NEioexETaq1xkVceRy1LBE8KqNaH8kq/5S1rbyQrrbJ+UbIQDpWp5HIBnEGuMNbjUNanNN224gq8wles3lp+2nO5NDnaDRaHrRGQ+e8pda+NSwMXErFwaAtNHJufAN18XmKiKnR4l79o0L77zyrDuqZttk+dKWonBGhZ+soD5AwaEl1e6zbMgroltFGV4NzfwwYViwC2NkKw+pMAHHZKkxN0ur5JsaAZt0WRXqrJ8QYDnpA4TGLBE7G2lXlcxQwkjVGjndOPKdHzCvnxB6Vn5yTxyWsXmSTbIvV59y2NsPw4dwmEKAsnxlekv6qbqy/q0wfRu05uNnIs0H01AAsHWt9YTRftBdcIWmz/Y249lv4yQCONsE+50yOcNvnqOtHdRqd4UQQozkCjKJw56IAfsuLrTPA5HSqnfQ7+zPoe9NFO7buXDTd1kdtaxTgncij+uJ70Wnyty5GUpUN8k1vWTbjupdcVILtYywf7WHlMa+vp3rc6fsyTvpcy13qNFLYrdOGUJyJwS7oo8FC/2xdo35kOHoa4sD8Lrj3dOL6bt6rbS6yOOYZJduXkX6QvgNd5JfcAs25bl2USL2VPJ3asB7pkz0MWmhQ+G6PxqQqixKqfT7XTxE5Vs5aHJj6teIq+Fw0ErcuDduxP824S/1keFv1uf0dqoz858713Js/mm40wCsaROYNQffycnT+kmazPFkDDngj5I0ATiAqjiIirs6ksh0NGwGnBKxUt6S1y6mWh+V2Yr2Bsx4lgwRkiQ6tHRix0l66yGn0zGc+009czyQiwn/7b/8NL3jBC+4Ayvn0SZ/0Sfipn/op9+5nfuZn8Emf9En6m5nxd//u38W///f/Hv/xP/5HfMRHfMQjt2d4ZJ5BGw4ZwzcrG/X1jFZdZvIEb8sJs5i/2Eg9kxuXA69PvGCtUUYP8xFbTri/XeGJ9QobJzxxusYTt1dYt4Tb2wPyKQFrIUY9Gwio4fe1adnWRQhnAZm9w/EQPgS45TmuIEyFiRiITQgQeeE8tJcITbiK4WL3u0vzZgud7hEfSKDpKkR4LsPYjCsCipFiDzEdrZgYmPSTU0yVJvRcAjLKOjiQiD1ORs+hL9HgGvYxKMIS0VEdOzUUmOo32xY5GKqlqAqkV+Q8HdRRagqTpCO2XXkX6uPQZvsw2F4Wk1EG3VlJNHUR+2ak72zRu7dZrm97Xrk/g0lX2gETSUNehpiojX7lMcJQYV5Yt465A8xr9I/6LiXyIlPdNiIGU1GSlMsta1nOSzPywN6M4s4oWNhHFFmZM6Ld2B8934ZaSDxTz59G7sjEXc8BqcVJJ/SNNwH0ZwzoqnQrb6PWmhF3iTpvPNZYK9BPrMZOZEdJVohN3wuMvdxUXrskCVwypgP+morB2eQJxkEhYKu3tJ6DBYkmM/ImTBwU20N5yd4548aNzVZH6Dio00r4b2vtk4kuKs+e/wiGXmoxSn4C1hz3ht6DvL0oKejVQa10JARfdYqtV+kBaBF09pB4Kzvr7/psHUU28geCE1QdGQ5a7ZLVbVXnKc/Z2xBT4yM9Kw5AZ/WPSLiT6dAgG6WlWNbYB9Ex1dqdpM6hww7X+ikFxeycOuy7M2RNm9/8jXrfRWL1+nlef2tGzxmxRe5Cnx1vt9+uXvtedJiV8SLzzXl57tD47Ie6s4vCe658CKAcF5GrTE8AFlLe4PpcvvXyc8Sr3XavmC+WseQQI4VGfCR9n73PtYmIk1EiQwIa1Q/dSgVQi4SR/rjtdNTxWPnLrk/ubDdxcg1SBLf0gzx9xAgPWPmLzsZ3LQ0UFEt/dZxRtt2BS1+FduMRFqlV6raiHsgsXvFE/hk6ifA5dBp6kPmH/JYie/xodJCzraq9NzzjqfJp59QV3ebodcJ3GNAlt/zOXhWwLM8GfrGOH13jl/EKfY19b45fCjx5Xt86u1BhESES+p1rG5lKBFGIZuWNgDWVi2YygW6pXOaQgeWG9DB1dRoxykUzZuFVd0I8Yrp4e9q/+3f/bvcwaUnMjM/4jM+4qM73vOc9+B//43/o77e+9a341V/9VTz72c/G8573PLzmNa/B7/3e7+H7v//7AQBf+qVfiu/8zu/E137t1+ILv/AL8bM/+7N4wxvegJ/8yZ/UOr78y78c/+bf/Bv82I/9GJ72tKfhbW97GwDgGc94RheNdLYvNGCmHRpp5pL/YeVPxxyWIR2hhxYsI1mmCd5worItTU5azxCn0YKb7YgHW7nG74n1CvfXco7Rg9MRD28PWLeE7XYB1hJhJKuyiotlgA+C3ztszy6i5gn3TN4El1PWRok7IaAM2xDghLNEERnHiYVP/9YwfrY6ytTtDn60gsQZVd54sn0qMFuFbfvUnt0V9HZSupcsDoSm7B5uxXlzINmIhq5PFjdR4GbfR6c0hR6kbXAJUZftGQnlJjVZabOHDIODnS5KkZzjBAOF3hneA/yUvxxwdaF0dAQW2zawhWzWCCgTO+5gGbclz2FQWH7GwcK87xGmmF+NBOroD5YPLX1nQxP22Sh468jkhfRwVT5Qm7QduJwrQvVcJnMeiJ7TxPWsI5lgH6kdIi5Gr/J7LVOdUW1iiH6sHQ2TixzSg8oBv6rnDphtePDyBy7qo03e0XgP8EZVAuQa77JCSAq7v7mIta/N0xbH+gKajhPaGCkQcTRKXcSGwOTziDFlX3eyM7bH6CfUA1g4wDHiLzLPgjeHooTS2IIWYTpoz4ulyoWurw2W4WH6tX/2zCVek8q0RmfUX1NuJ7haFZmtnUJbbQKmq/hsnEks/Z3AZ+D0nXad78Shrcs6q9x5PrHMSM6xcd7JGVMGD+6w28HWMd8FQuO7YmuQOI8Wrg5Zaltkq9wROIk6hA9Bdy8Z6nSP28O0vI2MsvpzQuMNb+z6FCeZXLc8tg4EXgjAN51teM7A1OCjZkc6u4zcBFPfj+yImRi5iw6bpdqmSpiZrjPDSZnKAh1gFgKgB8XHG13VDrUwSl2TPsSD8ekgi2YlUr3YZIXe5TDfFh1udEjsk+0rqv4weGgwRBqjpq/Fhq4OoHN6vdN57N8PU5SNQLNLAY22KTghnT/I+TvqBE+mTMVf16jINjJ12zxuzPrtSeqENjQQ7XQnZ/S9l5ONpxpcOhTU9LoeWI3SV7slSLfsgcM2URlnKjaCXeibnVMnzzl8s7RrcKCXRNg+ys8RXzmbB9o/XawnwDlSSPoX6tCxk1ELLj+LdxfpbWhxo8DHg35EWjV02eZE1J4ljx3mgd7j2metSxbFZQHf8WUAQdqWBcE6Z435y81vFacblQPTk9E1KDjAKSHViz/kNvNyrIPckgYsN4zlxsi1Wj4d6m+NCN0X36N0kdPo+c9/Pj7lUz4FH/ABH3BRpS94wQtwPB7P5vulX/ol/MW/+Bf1t5wr9Hmf93l4/etfj9///d/Hb//2b+v3j/iIj8BP/uRP4qu/+qvxf/1f/xc+7MM+DP/6X/9rvPzlL9c83/3d3w0AeOlLX+ra+r7v+z58/ud//kXw7yYZ6x3lt2eHd8UuVaoystU4Kud4kA6+MFAWwczAVqOL7ucrrHnBEya66P56hfunK6yZ8PD2WCKMtgRelxLuVsN2hSEoARlciZ2dsHQRANbxIuhSBVQQ486WqMLACQIb5WBxqkxrFK8cUg2o8uEgxBxehYGt0DdCm01+vRY7COCR99s7v3yUQlzZUqFtV5cvSQK7gLU0Ic5G8brbRSiUi/UZ3Ci+ZVXO4cGn1gbVc2SonSlzrHDJb7uNQxSIVkTNyFYjxyr0Hv8WZtcPgUsU+DlpGJRvMwiCQJdv0VAAuptKhgejWziNgQLQcIKhfbUNnVGK3XNUwtnSnxgjhqbFQOL4LhoeMAaAUcbmVo98RKUHBlfaZyLgUAEwBwMXnDDqsmzJW7etseKEGlKcog+/LQ6FbjSKo63IEKNtu2XoDW7FMDF9tH3vjAzy/XdGk30uffMyktqZS4lAup2O2tY6NCNjPLGw1kYYcwHM8ks3Ae+NlmHSPrRqh+MgVUX+1kLmvaW1sAI8pG/9J7zGjr8YhpbEkWQUjzvE2W55sFaTaS7iVj+PcC7ViCMhA7wV4UsrgU6pnQV48jTnri03k7PqB1K5rvJdDj0latG9iZC3uv06cdmap3RDHmY7HlY+xIlIxL8vHHQx5nxo8AOn+9vNpnbl2znPBgsXI9gUFtX9aNdPJyoy+VBlCpUtEiQRihS20JmzVWQCzoDjnRYNQG1RTXAo+t7os+GERvsSIsqsA5rMZDtV+ajOZtY+E1r+LkVn0Wj7sXWmAy0SOugKtzgWdI7buh9hMX2dJ55/3ilP7ju87VgPwydAt2g0+6L1hybb0xw6w7hpPF1qUTVZaa78o0OT+ZxFbgnflorIRuWMukiAnBc1QUGDychRexFAsk4CG21lxlPsgqjz3PtRGsDDhh7sxRV6UHOl4byhRspDD5pvdlPF8sj5EPptf9sFW6dbVLYEnES+ZYQI6wkdTHDBQg+1T7lG3+et0QbMTo0Yuap6iv08BIau2dKoHXPjCAVTN77Kz6s5lzbK1QH9AfA2jtn6L/0lNN0EQBfLSlnzXvknjJPoAjNWSn8ruXlTuxU10OaAb4c2mbwP8yNn25n+616B5PPYmx0vScr/op/M4oXCJLgBit1MSaPqFB9biy4qziEgnQo+l1uUG/jq++W2lgWrTSG2CBv876n/UbrIafTWt771DlUCv/7rv35Rvpe+9KWTA21Lev3rXz8s8yu/8ivTMnv13TXJQpQSl/uIOaYvBWEiBKey0TJIRpnshIOwqV5JzFy2qK2ccMrl1rSH20Gji+6fjrh/Opar+k4HbLcLck6AGrnkV/ijQIsMONgipkJYIxyE8IF4OKkIMr2ZTfopfTYwxBUeloOtdYXR5xsKhcCsinvrvTbCzQlhvSKUzDP6Z2NEeg85e6UchnjXvLK4F6cRAD8ptQIznAV0zjiLcAXFovLfOKnygZrBtAD5SgwmVueBGvayUi9MJbQs0S/RaB1N4BURAlSgPTU65NnzcOeos/2zPCb6LigmS08SYVMMBQnBZnWedo4rMVSMItcJlVXkkk/LeLgawAYm+W3htngE9LayaBx2/bVK2H4zqxZOiarTiJCPQD4yQIR8RdhyOeeFcwajhNxSvYlIytMiGgzF6BAHkhO+zQAh87pb8Zdr2TPKFtscJu25yE2q5yal1cimQHPSZBzHfuJscOGMFSObnAPAPrdtnfpMBfkzY0YHyij8ztHoDOD2vuftPtDfb4+wWw5MNEQH24AwA+90Z4vYVfGB3mwGHrfVanMwcZP13KLN6m8CKR7l0T0DbYtZQ6CnsYAV4S97ixxnKo4ioNDbKVX9TEg3zcBLt+R1oHOQBPTBy1ihE0s/YsTTwazi17O92gQsjAM8zju5GvN2iZz8a5O8Usjpfvlh5U828kd1PzndX/I1dO/KPOlesnIY5aD8qh/5yDXygzTyEVRlEpMnPJL2ql7VRQxqeso+y9gZx0u3Cm51PNuGQj+M/moRC+yelf+cfimydpisvnS8R20yYlfw3TP82XID/afRJHbB6g5HWbiJWfx0iY6ufwkokaNucQSOzpxto8/cOQqi3u/ak5+m32kB8tFMCrfGvy0KN4wnwTu0FR+tUbLCijwIfmGv0aJeWiNyRuzSbpHI9NXY3D4yPkRtUocGn8yirXMgKU9WR8NRbCWqNy4XPlRnCtAiKU3b3XOgkc5RVOm+OYqo6n6DC7vwa3E6GfcRtTLgIv95AVKNPKMDI18VnOiN15KXofLaXdZjCTCZViOM1uEeInEajTd9K7df65ibvnXBDlYmYaCDzFmvvJE6YuRML0DGsMdXoz+zwC4wVpmrdlp9Ryfj/JUtVwEfHS9Z3hFdaOlykM/+dXLO1SXP/hgA11X2+UXGlyNcLB7hz/2kyvdrrdA479MKpBvSm4aXm4IXYmC5ZY06Wm4Yy20bX6592Fbh6UffovZeuT3tD//wD/HMZz7zvVHV+1RSeqTwcpbuoCulrk72RaE8pUg0xYNKbAyIp6uEMRNO9RyjU17woG5J28yWtJwTTrcL8mkpZ4usZIwh8oxIAScqVNjBprb3zADZqrOrCoPkrkTmXrEBjmmtR98dtp2KA0MVloYIe3itkPBM3pDvBBmjbWVxqxTwxr9zGrEzYEQpx0gqyhdwblTaBGj0gk6S4A039aSTE44uBeJTBWINBas8LUjWOBCDIAF8IORTGRekGnWiq00MfwKd4IpUkQ0diiN6kDqodkT6GMZ76DCzxlLEr8XF4L3WUwvL5MTTYou6cvxMQkuNt1z/RitFEZY4HnFsR2M6UMoXOYoA6FXVBv8jp1E2ztt8qgZ0Amhl0FaM5rwlZM4ln0QHLnXKkMyq/8Kw14ZP9+2j4Uq3vGwArUkncekEnUyU2yar8j013tWDAis/kuFPbWtiWETcc+Q3KWfoUicSALBUXtH3pkzY+tYMGRk06nlax4jMs//bGcSGSRr5SN1ehrI7P01wYKwj0UMIWwQEZDtxkYl2hMl2R3FHQOIqV6iT72y2IMk2P8Wdi0ISwe8PbeaqREmRggHViWKBPydsoxZddCLQbVJ9tzyEcRpZo9figUeN9ROwQCflHYHq5RNM1KJqIPI2IhSI570MV2wjAkY6SGHUEh5bduxzyWMXicDQw++jnuzk0SA5x23y/+hYnUiJkddyZhpSic7GVjrOC5eV/+AsdDyh+qjgLJ3IRC4auSq2DpqMbfgO+OsQRW5S0S+K1d8HIMv5ONXBXMZg4lw2eJTVfIVdtgIyXD+SXVQQOy32QWAUvB+ogx0IXe5sj/Z1PqlsFcUJfLe4Y3Sb8pd1lmSAwP2Wl4Fz0tMfz8evOvcYqLd/cqNBG2lkonB1kQmCK+71CAC37WWAH4cX6a9E0thFkJmTgAGKt1zFOjmUcQPUw2L7xIqfRsfJ4CSvzZFGh+ZA0gjK2oZ7ntG4vBZ9YhagRaakldRRFBfbfYSZdMS8M/3bSzayKsuYp9LvXJ2IdAAyyxZjAnI7D9TeNOv6ODqPSWTO2mRROgFTJ0x9Ftqw49u1Z/k38vposYtaX8Xx5xyjCX1SnQDj1KXm2DLyR+20E4dI3Z5XYW0iQzO6KGf7ZL/Z51gHMJ5X7PBl98o4BulI7eZfWWgG6UIzCNCzjaj2f21OtOWGiqOoOo3Sys2+PZXn5Zax3LKihKvNlCTCOU/G5YJ0Z6fRN3/zN+PDP/zD8dmf/dkAgM/6rM/Cj/zIj+A5z3kOfuqnfgovetGLHg2S98U0Igw1igff7Gs+k2lUBhgTXKsUagTLypowTAbUqEc7CPs2L3iwHXGbD7i/luiiLSfc3B5xeypb0vJpqRFG0G0bGv0gAETFpowTAA5hhkU4G+9+NsJAnEaV0J0RifpXI5usYdI83GWi3lZ4yDiQ2O4xj5Efdmzlr1GQ6rlnGKEGZ4w0r3iBszNa1FHEw1We8nc84BR+a9Jx4OY8A7yjKBlcBYdeZ5zENi1sCM82ex0HURj5UGlvgd7qx4n0KvQoqO2k34ZQD429gDfXmSjwrXFhVwf2+mv+jt7Z/NaQYyorjGL8pKUpAzWYomAWurFOI+ucjCt+MN8EJitajD2huLBjh4ZHAM2ZGeraW2XrnoNoa3go/JUOQN4KDeaNsOXKmysATkVuHcpWEVk9R53gSb06oSduXTI0wwD0LKx6OKAYUumWdHVmuYVO6NRRxCirVisA5rJ6Y1elI521Zlui8F5xwY4+JK9zsgbjKwmdJj95dCHUpl22247M+EdH0HRVeTbuMVUDii0slqYJE/5qQHXtxQnbiN7R4076ndwNevbZONEXagZspSsWWajXEkuUAJvDy9t5C/ZPA8oAltvZPLS2aCI6UTHqqi49PEQ7c+BUaA0wMi7KW9OuylgYWSIGoNnaQSuaw1ad2OFCBFN/56yLE2cZjwlduDGfyVhT3kcu+L43fc9eTo1kvcGNPM9WwWWSxqlOqOqiBq11tV8dbI1fnTdc5SU1h/JGNfzfOL+Crmr4PY9Hi08XFa3ywNI1wIcqZwET4cpwkYARR4a+usUYibyIC182Gm61/MkKrzpuE5rOizoe6GmD/EM3+ZrJjXO6yvQLKHpOZbqh8Wh/WYetGzPE9vpB1G3GqHyYDU7EgURoDiSRWc5p1GRNdIpMnSSiEE1fdBGWy5hJFAatFQ/Sr9jH0XvFbVv4HCVdeOXwTvpiz9dMNcJCeHJDs5skUrLisU3QWaNDhB86XFWYgSBbMrltaDa6qDxzbxsFmeP6fYaHAZE3JWOqDl6RyVTlUTqUyGuR4cVWLnKaD+y3LVl5JOO9UYs6ORV9o2N+MpF21p6RMczVwRAXYqV7Vuda2hN1noAssBKcDdP0k3f82Wi8yOfiGLbjpjJH54ZQmzVppBF329PaWPG4L+ZvgctEQCUzvCrsDE5s2frszp4zyTpiLc2U+Vm1ia+gN//mI9Vn9joMRjRsUL2T1nLIteClOI0EV6wOpOWWkW5zWVCtW9Y5kTqNiOFdFEM7bpzu7DR67Wtfix/4gR8AUG4u+5mf+Rn8h//wH/CGN7wBX/M1X4M3velNd63yfTZRJWxHhPZ5IEho8GST1sUhBw3yjWrPDIAaIZ28grUh3pkJp7zgNh9wsx3wcD3iwW1xGt3eHnC6PRTn0O0CuqXi8DHbNrrWCe0WAPljVoPiJDitpKFzaS0rrSLAhbibA6n9ttu3IO2R8RIbQyofqG6FQd1HbFZ4xJtbPfyj83+sZoghvroakcMKsURDZaOcRfFWAzhuT2uREwZXBm9x9M8qLGOgiUC3wnF3YjcTEOcMNJvVKPG8VIdJPaC1GOxlHPLJwCmTGGvwAcaANYrfTCKmTqPQn6gcLlkNmK5gmnej9pTslxrdVp/dYY/LQLEYA1aVJPv3+s2setkJdZxADY1LY3t00R0W/wMFh9i2/W4NDTs5WJoiLUZSPRi8Ri2W0NhCrHLmSM5JnUV8LAYTgXXfPAFuQMrhwhXXG7Xr0zcCncq3ImfaWSnptskkF120+lWrtE1wbdOElnoDi/qxMQalyqdAJ9YQk3y2bWfYhDOV4th0/GJ/x3yT7kKKW1lT+d4aVMMaDD117crfMNEusLDDn5tEV/y0CZg3YCWSsTnlQsSfjIHUpTfYVSBHOt72hwXhIrPKc9FzJprtpj3HlUC/6t9obpiMcWrxLnJWFgqy2Z4mzhGRtXFypW1PHBxxMunG0f604zMiIENjzinORn+yb3+oGy0ssW30/KX6fmt2AG2otoBxIBHMWUcoUw0TBWsPEm6LXaRnRsSIKbdgxOxxupcsr9fxthNttVVStW3cSn+ld5U7flwcnwvMNholLnBx6FNwvCg9BFzzSl4+DPQRh99D3RVw5eyQyQKKz8OuT9Y2s9HTri5He9zqsvVaHWn7ZM5U40ygzMp7nI1838x7G0mTPL6iHtnDWxwTF7G3slsEkede7hv5M+u3LRPSntOoPNtti/UslSp3KUNtRF7Rttsa3eicoQT/rALC8qyhZecYrXOOjduzjaDLpYJdmXNJMgtDOUPPKhRbRGSORl4vFb7a53xEixKV6COBgwGCn5+VyBLrNPJ83OSSWZQ/wW3rEh3EVo9bmW6eOZU+NdlEfsyC3CrPE4e25T2RsVHeZnYLfenEbnuaX/wcKIgoFw2fRSdQx4MI7wwtDu0FSZGPhESp8gMBeYVuZaUVoGOtf4E6kCwsaW0HXKcTcHjY8LLc1ufMuihFXBxG6cQAM3hJekwIbQmO3wV1d6D1OzuN3va2t+G5z30uAOAnfuIn8Fmf9Vn49E//dHz4h384PvETP/Gu1b1PJzvmcaJ2rpzSbVSE5vvEDurHj81DAiQ0OR+gF8MIA9pJR0aJNMIG3K4HPDwd8PD2WFb/bw/g27IlLd3Ww7XkYMq1wd0psvjbKlRUY8R4wkWQ2dC5tLZn2oClGtXIVShkswoAAweJ0iEVrvnIyCeJNKKymkFoDCjOi7CNqJ11Q9oP6/G2KxO0cW9YqdHLTvk0w4Sd4OgUUhhX931kPMVkBCURwBl6YKKLdtCxIl/1zNBHq9cWiNltvWXViHUMyqpKqSxbZ52lH9PXlNlHGkVHnBg4I+HmhH3fzmhCMxyLUV/3jCWpfymTws4BYBRnbMNNMMx2KEc/tq/RcBvJIhr01dJdeAb8FkSbuvfscUIGT0IHxIbuGGok6f7pJHzTlCPldkZN3rgdsqhndxRg1Hayt8BtVA7sz3XSftvCd5vTCG5LkDU63GGYcUvaHY1Fe4RHcXYFQjI0yaBmZAFuAqZOEZFJln/R6hpOEGW8BkaLpwH2W4Ds32HnooHlDXmrxzq+GdCyrsQJH3Cf3wFECMZoP6luk+gq42TLH+DPELF5JFrX6jXroIx9AnSbAAG6baktMBinUXVUprWeKyC6w2xZjpMTJxZNo2Q+2gkYb1CDneyk9GBwQq2MlR8tOraNg5OxFqYRjUS6RvimOtDUxeavoT/vUOz1ToR9lNjQpoyzTuATkDZq26VXtC3sZmuo46WMsdPIOv7ELrBOI4EzRmeM4A8yozli2Y+f2QJSohdqGTnP0NhFsV5tW3SpjYqy9kydoFkbCKg0rFEqTV+IDdZ4ysiviEsFg3o5hgB35Amn/7g9W5xafRQWYKguBMxloe+Xe597foipOEGoLqbWyNkaBZqz8GFz6IJY6VD43J6zol2P9svAhnH4ycUhMnSYRTyY8q4vo28GR+63lEFfhoxuouo8A6EcBs5C6zXSITVbvVvQDfKrO1OGWmOOTixdC98yt216ovvXipPIqwMceEQN8kpWgp5NU2xiswMi177KQcSpRA3m1Ti3V9P3uOBY4dDtZbpA1vqabH9tlJ3he12st/IYjT712fTXOo2mfB+/Wb08omHLtyJzqkyVKCIXacRhoc/u3ujGydsPdnxcnwYyK+pfmDnV0IE56ZPTbYGXt42QT4VOtqtKA4TiUBQHkqlWxjmJTfEAWGQb2g2beXVGWrluo96QTkUB5wODcwIv1MbfzEHumu7sNHrWs56F3/md38Fzn/tcvPGNb8Q3fuM3FoQwY9u2M6XfzxIJzcyURxvcKEw1OxnGtEV3Bmuk913NluEZvi4uCjozATnhhg7YOOF2PeDm9ojTzaEc3HmzALepOIpuquNIvfPkhUltmhP0ylcrWAoc1YCWEMIMLGJAV4/xckJxDK310C7xJhunkT/TyOBdhRc5JZNPVI3A4jByBqHc4FS9+BJamcxqj+2HD2etHlxjSBHzJPploFTDuDhhRPaleeFojH22M8xNBgylP0ab0HKJ1KKu7cuSkX2+PZZJfTWe6gRclHpaoc6EkdPITVwApQnJE5UCsR8zAYZQ2zF9FINu2Bn/4A3RYd7YVu0zt37JKprta4TVOsJUqRs+dvRj+21lkHwihVzh6mlLngcrabbzs/ehutb3QTJGANBufSHm5jTK4sSlMoGr0Vm0UjujRs44QAvx13pl4r5SDck30UV1gre46CKjJF1kgHcUOcPRKnnTtw5/UScEOw9CG5YPqY5jJuPsRaOfFcaaCnCMDCD73hYLRmHkty7N5IHAps88bnunvs4BOoJvliqOgNrnFAxV4yjQSJwFSHal02wHsWdGuS1BVV5NU+TTSsudwb42g66FlRceEGeNw0tA15TGCCpXxSGijpJc+06sB/CW/hqCtONhHdVG9g5p3L63VZ2jgdH4h7EH+vNiZhPbabQD2rBRpc3C49Si0DYuOp8AOpA+81JsgVrYyRnnfDCr4GKUO8O76yP3YzjCEQn8ZXKt8tvoDXfY7Foiesv7Rr9sygFGXlqeYzjbxU66fJQU1KlnnWK2j6Xdxoe5Rvxp20M2MifTBXk20pEObsGhvMPkXeUz1auGrn059uVGssnY2Q4m1yMz1uJkrHhJmVReZiNzKHlcueMDLD46uds3bvtnj3VQW1Xy7PTBdeZcIjgbcydbRV+9jMDQP6HQdGLj/JdFRhi6RpNfnS1FcJN256wVpwKz0/fRoeJsMADDYyJGKWaLOkP6yqXOouOrjCYgq31MepyGRhKucUHEaYXmADLb7HT3xsY+WscuvG6NJ2RXh8Iq42nozXXJLjZ3C1wUdDEb2GXM+kUmrVDoNRsnV134ExnVnH3iTLJ0PV/47N5Z2yzQkvKZ+a5VEGvhztEUcBThcU4Zw9u6NTOVxaat7pTJB+hZoLYMyXy52hPHB9VRtHFxGq256qlcZTaDThm0Fn9MyguYgZzLYmvB4R4H76c7O40+8zM/E3/tr/01fORHfiTe8Y534BWveAUA4Fd+5Vfwwhe+8JEBeV9NhWSMxBtRJ4cHKmUsPVuC1b/BGBtTPqExW6TAcRECI1Xq3XICE+G0LdhOC/LtUrZ23KRyYGcuE65FzwEx4ay1KVFcsnLi+gEjoKqBZfdYLrf+GdVJtJy4rZBUogcbpxG4GQEA7B7Ssge4Hux1SOooygvVMDwTXVQnCnmlegWmiX4x/VMjyUQalfC+JoTFQTSdeFF4tkofDXdDfu2Ul5dknaG0l2ZCzTe3r/kH31n/myTpwwZ1tsqEr3SH+jYZxvCqWdl/d9kHBtRsEts5HW2WASsRoe/fAEfOWWMNW4IacmdXCM/2lcdGHtmsPS/O6ZIuG3OcG2ce0yL7vggPp1wOgWwrX21v9baVyT0v5fB03U56lDMOWKNyAOhEHUALx+ayBbY4pMWQao4id16HmVQYUV3AHziRx4jBeOxstsm42/YgNE9wMrU5lwKJxjqtYdO/crB24xXlyrACX1VzxA7FyW4aRdZ0bU+SbU+2Rws+3OqonUAHZ9IwMikc6H2JDRUjIDUiY22TtiSGrnMssHbbjm8etHFODooN22inTMCkD+WqXunTGLlzJ85lY2Jtmb2sM7rrnCoDWpb3wwW3WeKGnwQuW4YqzUhklkRl6aTHrh7Lo8VPrrf0iIMlbDt3Osb2MQLM6PGltFzPbRPCrmpSHGB2YoZ1HNUzO2PD6tdRBK/7DTj7Jjpe7OTcTgCTMuVlKS52zDNOdGDon6ROr47sJ5tEngz1Z2Wy+md3oYSEDpoxWbbZlmfhTwBt0i3tmzG376aTWkFb6Kt1SFvHmcXB2T7E9xU/btumtT/4DIJDdLujH7TFRYpbf/SZHZ17vLUGrf1obfVhRPeA3h897ShMWPwzkvA1l61rqJErtMLMZdAWySRyJ9Sr8zOxdWzkyGQHRHQg+UUCoVMjd1zXuL1n6GIXCODMSpt6LhOgTjCgp/cRjlykUZ0TdvBy6bPVVUCNZhf5Y/RM02uD8a02heUpCt892lnxE6NaXWdQ2pO2ndOIm4xPW52jUglmSOZ8I3EgWdneIo1KwMXhfq6LoYzlJpf5M3O9dCaXtsRpJHYGERJT2Z5mFkUeJd3ZafRt3/Zt+PAP/3D8zu/8Dv75P//neOpTnwoA+P3f/338nb/zdx4NivfVZBVl/e0orNOVXkj6t4aQVdFHgdpqY9sGt6YhN8McEvIVytXVqiDMQbx1kni7JTAI67pgPS3gmwRshHSbQPXsheUWSDdG8IjwiYJ6ZGABbgWr3FDEJdLoFs05dOK2ArsyllPWyKKSn2skjxgt0ml2OARInUYAwIeMtKQyEV2onXeUqEYaFeNQohpAcgYE+yGtAlmcV3YPdAvfD4PbKTIg3ng0Ungd/ly91BOQbbKjRw/P2TSrPgpKMh8MLep4BDhGK4KAUc4QijwDh2sbqnza+4GxeUm/Pbj98z7a71xhNHZ3x622HzWXGlIx3yidwYEzOveMx7NpYMjI35EiqnwFKsqPncHUzp/JJzTePcDdKCbnYEkEgY3uUONJIjo2VgdSMxzZwaM4EIMnWA0uitLghcMkxtnMjLlRvpPEucYAiD1tzwyZoCp8vgDTMO29H/TB4oAegU9GttVeRg6/LXzWaWLtRDJ0HZ1JztCzv60+O8MTkcY1MlZprL3Xc4uEbozxz7GPtquM6hTj9mwjWQawFf3LioPipCePi9hW4FOddA3yjoesdeJODkQtRm1SMc5ieA7YO/tpSlsGj+DqPCIA1glDZw5plecQeeTrDmV38H4u6WKZ6xM5h63KLWdUYuogdPQr0UymH0C/Wj9cvR/wveQT5+ms7fOAXVB+Ip9j/qHM7GQztXxU5Lpkt/ag3I5IaoCfSQHXrdIGmGzb9n3ioc1YnqltZwvJLdLY6BH7bYSrCf4i+Q0zjIyRWZmdsaXqeCQU2m2LbTTEQ29rt0Y7XlWnbhs3tw3N0ZUHcg9tLk/MMPrt4JLxIb3vQ6KvJWqP19Z/G3Fl+ylnvlobqEQHhq3PnV1mdFVwdFu61z8DO4cIGkknfZH36kBS3Uotix030xc3NuY4DznsWmDv7EppR/FLKoc1yKD2f6Y3mcziJ5nOxyQ0WhFKqOM2kfOeFg39SX8AYIM6RMstgqXifGAsB3s+XHlPK7dIo5VxuF+2nlFmpNutbLXk6jDK9XndgLUIfCKq8+XUnHBPwnF0Z6fR8XjEP/gH/6B7/9Vf/dV3b/39IEUBERnKKSZbyCo5BJI0ymVC055pLePpSjvp6fmAIdYVyDlhzQmZgTUv2DJplFG6SeCNkG7kcK3ydzFOI3GWaNtogqwzkMQAqcIonWKkkey35ErohQnSWp1GuUYaVcEG8ZCrUdZjSG67YVA9VK4KLDnwq65cpEMq8mShGmlUFLBsiWn9q/XaVQqNOgrtCzND7VGIo8h72I3gHISRD5XTXQ2TR2D4kfHXAbJroBl6swaKM0ibwLcwuypYDAZ4nOjYwikhAJ0CGqY4XIPOdn0KvLprMcR6hf6BocIewdThd2gsYtjX4arNqI1Je934G/k1qjYmR7cWB7bvIxwAznArq+isq2rZXsd7kG0j5JxGdrJjD7NMG9frZqErcXGLo6Uz4eF5uLHpr+sjOdlsi9gLAiKuZmmI61khU/8ubV2SBvTPs2+1jQl6dlMH4iX0PGl/CMuAxgC4eYAfY/Zjnc70O9TdGbqW5kUG2DJGfrG0t4dfodVMZuLZ6Fj/7hmqJrMJcDiPV5OP5WEiJ7tiM9xFuTrKO+MZoz+YaapLPD/ysE+iwx1u9/g+4tOOe21D9L5d1Bj19yJeiTDXTpW2W5/IjI32wXX0TBsRTxPZYX3XdvHS4mdot7C3l2YLSRfBG/I0ORHoMtLPqM4BvzhQoic3RKO61V1XrvzXRFflO5lM23zmh4vWsTLdRKJrdC2x9TmN0wCvka6nPD2qYyaj2GwxvKPeGfIvh2+oODyjH7qIda2Hndxwzw4Yjx87DrHOUeryn6FjQrMj7NYgG0UIwG1J2z0PU89U81E5Q9yiND5yvEz7E9sM1erCjdhYhifFOe9uWpzVb+B2RwUYZ0sUpEwRcDTkcmmQxIGVW7nRIq6TbVNh2PqlClUWPglloW9GixZ0S4ubkRlrRTARckK78ZtIaYE2mUeXOfTyYCuRRDmDThtorcjLzWmErXwHACyp/JO2A70YErwo3dlpBAD/83/+T7zlLW/B//pf/wtZAKvp7/29v/coVb5/pKniOMN4VufMhDP710MSJrRDYo+MfGTdUqMEeQBSylhSBnI522jNC/KWgNuEdEM10gjlauBcQt+Wh9y2c2xGsSrFw50vYgG1zhY9wb06jdKJNXROnUbiKNL9qdxWS92VhewUuCrl2jgRihNNyi4M3qiuzlA5XJeKJ5cyIaUSpZQOXM43kj5YHJrQWRdVoMZ/2MdrD2atN0ixPrd8aqTGcTfjP10sNPnViDXl2ofBe5suoc+BIcqDgi4kWIS+PMutVoymsAZJI+LMeSR2O5I1noqiNwJ61l9DJ7uGjRw2t2eAzj81gWvPxhHnKeCiBLrKUqMDe+ij/QsYBWm/RWAGynCUXF8n9Df8vaNNvHHW0wMD/sDdyl8Khq4uFsWoDqSFPF5SKM9wW0ntllp3XkvsUlXGUi87B++kv4Yv2cA+mZ8GBPV17fz0ht4ww6SNC8ZqBNc5+rdwRf2119SuLrTtU3gelbNycfB3OEEIutT+GE+mhuB1ddnf3gi3DZpIggR3ngkMzbn2MsykAm0bntArA4wgR2MfAx64Fp117NxkzDlZEfA2qdOBNbBEefTejEEcSza4ds7bYYPwk3WHq4a7uAgU6WMvsdVBdkxH/erBG9JSHLsGRrN2ehq/RDDcIU3opIN9MB5k/wqe4yJKpN0Lk12wbP8q0uxhsTN5MtOT8ZnCe3ESucnoCEC0hQSU/E52V9nc0aOA3fEjGzoyuh8x3yRZu8HiYSBzRn05m4VpzHs7eqOVNTYCovzEvmyT14oSno7tNHpiYj91dGLg7eAKcHQAdhnHr7tD3bXeYO/FcgDGZ5HtENcAHzyiLYsfr868bpGfqp9q+2TLmPPhYjsNiFZncGq5MbSyaELHblGh3i4ORt2FUyB+FInp5n/ywnTMOZ60kOQvGURWUqIS0SgyvD4nhjrYkruYw+BwY40solNGengqh1xzBk5biTCC1FmBzln2QQLrBhyWArM6lqA45mgXnUl3dhq9/vWvx9/+238bV1dX+IAP+AA97BQoxPLHymkUmOicvJ4aCJcImQHD9nkYWDIoodw0JKMn+5m5+lBA2HJxFt2uC27XA/g2IVenEW2EdIN6xlA5gf1gI41W7plWDF4j2BgivBoz2Gv/6FQPuWbU6KKar0YYQQTeRRxtFA4acxSBUQVFZiCnYmcnQsqp7BfeCleUg/XK7yQefeuIEMPHt1pC+4Sx9DDVem6SbqMhPdxMnUbmQL+9San8nereOzD0I6fOgOnfuyQGRBU+Gvaa6yG0IhzzZJXYKEaPK8DdChPPKpnB5JQ8hd99X1098fes73GsGKBM3b55rn3nhL7vdrJBk/7C0JrBlb6Xd7GPsa9iuF7Sp/A8s7dt29YYlqucy3MzEMnIJr3qW2mjVGBvWQFVZT86WFXqq20MHUUGJ2zoBwDsbVk5OnVTb0wJPYuc64V8ny7h1Y4mwxiMhm2a/y7JGmAzA9pmn9AG0GyTjk5GgE+M9D50nbryxKxXwtrIHgDNMUnt2cqk1rzVHQPY9sZj0hWOLw387gwl1Qkmai6003giKAFH0zSB0/Qt8q/57cL57dgTWuTsnszZoRPfaMgTxze2b7sjY4xyFl6LNPIyzqmRAY3G/GQ6T8DkMKk5/DHaulsMQo+jIaHY8WHTlwCv1zMD59Go75P+DGW3BYkA572wZQJza5fsll9dFOAm45Vfe7nsKhwBbH8RNDpQZbeFW+Al/7d872VJ5BcRGEpn0k+ST5UvXOfHupHq/3cSzYEehEwptW9s85Efc8djI/kDzKMbDQxknveSc95KuzBtjcrYurNRQJnH7YZny0aaf9afkN/JsIQpT4NC/U7WhAtERnJslgY0P5JX6ng9a1zARLiylxsGnpH9rtkMbUV5ZZ1G0udoi8zY2OFdL8c50xdbfrbQZ/tg559AjfRBkz2aWWQRAYlBvNuD6WsJIjrL0yOZGXQ2ZwPHVp+pnuGr0WJ5yN+0le1mJdhiAz08gU5bcQptGyBOI9L/yjfmIge3rTqLqG2p5/PkO0t3dhp93dd9Hb7+678er3nNa5DSJS7s998UGVJ/ThA+ssskvz5ORuqiswSZgFxES1oJ6UTlIDWu3xjlZPZMYCbkTNi2hNNpAa0JdEtYbgBsZTuaHEx9uLGRRu0UftfvqIysnWH3o671QC5GO6+oPs/OK2rEzm3iYPFBaKH6Ihwk5Lwa3HLKPaEYDAX+yiiJkGCcRpnMQdijg9rIrdCUyWeBK9czknTCfzDPzmlEzhHizuaRpoyBSK1LBrEDYRSeXZFHlQKDOoaOo2jkyA+JNAPVm3y4RZSYPeaunDH0zl65OjAKXQpwOaN7ogw6g2eC32E7pl5/IxepUmduDqSuGuOkUIcZ+X7rb5h3FjYRNhamgIuzRuBO/kuMX0bDg3WQlUlFAVTOGRKni0ZBMMzKozcyyK7mW3gr3ytdWf6RZ6EbGWOCTiCy4F1pTmA3Tl2bMumZccKzQ34UfHRyJDyz+RNpdpQnfpulWZ4LYNxzEDt9d1c6ifwl72Q85BuFb/Bt6bkiqDqyXt0skywnW6zjSIFjzGh86hSb6GIX8Un+vTpedFEBkFsCu4O4bdvCF7n2Faz4Fho2l7N4eAk10oGVNp0NMtAn2g9xkho5o+8l355TJNQJmfLYcZ/pLIvH2Kdqy1BqlyhEvhtOnGEz2bpMRnuuRGh/6FTTTOR1lZHPDXc05nsnowa0KL9zeMdoEWdsALTy7kzqbAOqY+5+t2ctM5JjTVS321KZm9yvIQlk+knn+h7bcMwJteP61fwgP5zOrHxo6VXGnMoHjdoTlBqUCBRKDlz1kfGSj0A/a7/byve+71bS8thxKnxLDhfO9pzwiOBgdha0l6ODcegy9r9d3bKYSA23KtsFHvO3tc09mdCkkUl0tqeTwbmjMRm+czwRs+2Nl6WrWknxFQQ+Hjy7auy7zE53UBybgZy1Fzlx5EMtR+4dV4ImHoLUl48/R4VmOlbpgBHlk9dH1I+ZyMa6ZazJJ3R5aQRwx8y+w3t9Z6MXpA7uB738SdRkOQGoC556thJXB1I91Lo1Xp1K1WmEdQMe3pS/OYPFaUQoq/Xik5GoI6IaTMFm/i3/LlAig3Rnp9H9+/fxOZ/zOX/sHUZAIYbZpRSjvC7tCWp5NaNf06bj78SghUuU0ZGwXTNoKRJY95AeASzl9jSicoNI3hJoJRxOJcKIthJdJDeblUijdki1RAfFlRu5alWZo0p7d81p9ZzaZ4HPO4yghgZXLUJBg3EuiNW1thqNYA9xtHjlWn/Ze00gSmizjVT3EJeOkRoX8aDJKimTKIsqGHQLGrnoIjcRtQ4Pu+UlrvZwM0yUDKzCsMaOVbCi+BALG/ifbNqjYwNXNyGocFECsownw92A1ykr8g43RBwa48f13dbBBn/A7qr0qF/jFfFB1mjIjBRvRjnkDqgOBwwr6/on9GFXseW36bs2L3rJ9HMI10xJ19+WBvctgwuSwRtJPxSuxtdscWLGzjsWAzDRwNB8LktHq+LsdU454VVqSt/hN+JBDJoZbmd0BFM/+m9qKILnuntCvx09zpLANnqv387DCMAd0HxJcqH2Fhb7eyLLonNEDVfJLD8kOi3CZLcRSoYdHAPnx1fzWJwa2ukmbKnJtWFkSl+962/HrzGZekRDxm25MxkNauPjHR/YcRrN6YQH75wxPeyoL9utmA/48Kx6G8BEAguqfWGXWPdoWXFALUohbDd0EaJBRju4VTeR2kqd7CWEl1BHhNIaUzmUWbJa/pwhx/LdwNnSvhmUjOoiQ/+V7xjUy+rQ99n7KM+HNLRXbeA5pekoUySvyts+jSbh7nkIAJpuMM++4km5WRrJ67Pykxo9Bjnj8BPBqTbcKII25rPPl7DOXt8AoWtqFRp5ZUHZje69YMLbLQ7unDHqqmR0Fz7oGD9CsvRYukP1mRvLWzvDAsOh7QldRjS5ZzNoZPebhagdresR+zlsPLx3fO9orifARtPU0bLNo7fwVSHFaHNGK1f3dvE1GGky2F62WIeRla1eD9itqxUULvBSQl1Qp3bsQT2HSI9r2Wrk0ZaL04gzeN2Am1vwutY8GziXPe20LPXsoqCr9ZgX+Tfo3h3SnZ1GX/RFX4Qf/uEfxqtf/epHb/X9JNGAQadKNaSL9MMFSt9FZtQXcoq9Gpd1WxkY4K1EF93mRW9M226T3pa2iNPoFtVpxMWB9LAeSm1uM5ODHhkoThS359ICjwbLxurAIrkRjdEI1iEAXnBZRDDrCk/ZS501ExPXCTlNx0IYVCQG5boqBgLMQd9lkGslicp5UQy/EiuCwU1AqVPOTekHoGy3raERoiV6LdVQZIWlKjTbtimy68AcpZm2Gf2OVSseS95CK2iCiQxAoX/dim18tjh1wjjUw/6TM4Z20shhNMwH+L3dsQ5bXiIJCLpS3sFrFWCYtA0djRTG1KCzm8jKzwDrSFm3bQTmW/17gYibp0jHCTVipPKgKHkya0sDuJrBFAyo2EzlTa0mOOUif7ZirOegdG1DZFkPj6Mb9vTmDURT/xShE8LaKXNnfd/Jg/48kHE+32BxxNcXHX+F4PoJvXa/nxShmfqoqQOuBlm1IY08nmyJiCkatyNYHR8bPWEmJMNtDCMar9F4YDnEHY0vrQw5hyuRhUMmMew9kndWJpnf3XMoN3MMTeW1TcrfcP0lsItQvcSh535z+CwsNtKXOzJdzi/snRNo+LKLGndJM8M9yr4Lqt9zzg3PSZk9x/KBjuxCgGum2nduFVu/efnecO4HcUiyaj9gP1k9eSYLEGS8fTbRXk7vILw3QJJ7DnJRzUzLGPLfOb0wAD6O7U6R0XwiiuyZs8z+7XhE3scBizI+5pfnO9gWWq2N9JrU3xoZZImybACns/ECzc3khG3yEp28V88wf6RTKf8oNFTzFf1AWtWdyrtCuIw3d/E40McTWgcmOsXRF1da5ovxPCefHaR0upE6eRrrlu9Cz0woC+xyvAUBtFIb3y2XG9DEeSROonUFbk/g9aTfOOdi/zKDcKjwkNrEscO6biJ8P9NFk3Rnp9E3fdM34S//5b+MN77xjfjYj/1YHI9H9/1bv/Vb71rl+3ASbO5L6wkdj79H/WF+nLWxGOCtzsg3Kmd6rL3iovoXQNnOtiVgJdCpOoo2NpFG9fkmq9NIbjNTR0llDNmWZcO1rTIAzI1j1ZDwRq9ILWPMRMQR2gQsF58xwC0iiCoSpV7LoAIbgOGhyWLcJAJy9fJLZFHNx0zja6V3DDN17GUAqeI+k8tvhaO9ErRFTZm6Imqkj2q01oBT61gwzwSMJwbn0swgvGsKknioZ0YKINRh83arHwNDBsB8giFtIuStjTiDeGIJRMVNQHP82fxiO86Uauy7gcNt8Ur+c9cna1TE9xfC6wzlQT8uNvBGNBsVkqFjcYSWNaF2oL/Cqo5nTHFoz3p07erzGHqhEZYJKo1xRXECb3FtjATr1I0OXitbOvgehbfeK2mg0+5SNtIJYJjp8tSFvo/0qqWJkHfXCBe+Rhsv9e4Zfjtn+MYJ+XSCPuh6pwcin4sBFw51p63Xq16JmPZsnovpKVz7PRu2iJ9RPjvsJp/bLhrBtvLF8pXRhzSTTaHp6RjMdMBemtlme0Us2gd6wzkHB9tStF8ZY3o2ee4S6efgeW8l9uSiB9ia71E3jR0ToZAy6KR7Zw1jNL4SejN0ORoDh39Dfw6kETBWDnHpS5d3j18u6lDfppNXd0z9di8YBy28fI2ytitrmdw2Mnk2ZS2eLrmspORvGc5FwPBZvPu6tbpRfwNtuAtdrN1RBamWv2R8zvFwR5cWTu4BZ2B0HuDZ9Ijy4YzKHDczosH4HtBdIsN6ovgTeZShZ2Q5vpZDw0OZqR67a7oUfyowyxY1TlynwgRaEvIhl6Mt1ty2sZHAX29Bk21o61aijE4bmMth15wzKNetabodx5zrUdsuP6nJSgPeXUjhkZxGP/3TP42P+qiPqrC05mhipL/fJvLo7IyU+nL0/qLQ4YvhqH8SQIdcblA7EvI1Aws0zI24XFdtwxGxEbAScCKk23JrWtJIIwYyY7nNSLdbXfHMddWzHlC7UD0Nn1HCo4tFyGl0k4IYfVaRshfKowMa7Cydm20vVZBzUhWOGnqd1eNLygmdADJNlXmENHjhUHAD3d3+ZAwUBsqZDIMdnHb12N46p50dNmo+J0AOZuzOnNDtTOTLnJMKIxtu+o3rY280zPijGyb0XRW8RqeP3Z9sy0wNNYyNpFn/XSDIGWUydapEQyPCFuprkTZoW2wAd0ihjVTaFavWuAiwKMzsfzsjxE3MGtNNjeZZsuOj9KYWTXkdoyaUEFrEkSh+svBPoi1ce9YYIDinNAHtNjvL/wQ/eY7G2gYkcVpZ/ACI21pHzwj1D/XA5N0lqD8rsga0KM5RvYGmYyrsNz7KD1PnAEqVl4P2hn2YyJyuP4HGR3KL3G+e1jFNEyOzG8uaz8kx+SATC4tfoSf2dGavUda+1ULDyMiJDDzXKbkqWA4Z12qt3jXgd4d0U3jYoe2ZrI5XZNvbJ7tbFwd9G+qGCKOljYEu4kDDsQ47ZCqLVL5JhYGQFZ9sxhqur52uGOgSoM/zXkmBH0ckziYfYGCrONBrsRnt6IFRsjSqcjq+hLcfqvzuzvQzf0d9KtfTG7tUcccdLfm/7N65/o7aGckRtLFSGEXEDvmmq3b+Ieq3M2mqMy3NxcUj9xzkLTAYX08cbjQvMfg6+fb/cjJ4dOi0eLXjX+liiKOaSD8Mxs/aKQEp552/hrekzOiIB819hjAGcq/U6bL0Pzg8D+rds5lHcv+ss5DCPwOrGzMDv1tskONRgHY0hm3L2JwdwCO4op4b5TP9jU2pCSxGaH0nYpAXgHMxHHip822gzNvFpmTZscP18Ot6lhEzONezjRIDfDBjXQ3Reo5v+4cu3VW13Nlp9C3f8i343u/9Xnz+53/+XYu+/6ULMDmzs3eVQCgrr92+UpvdKo6asZw/RMCJ2kSQyzvOhI0Tck7ARqBTAq1AOpXzi4rTiJFuy1aydJv1Sj9s3K7wS4QSOsN6g5gobrew7BiIx4aOKOe9NAhDJUi5nbJquFGLMJJnwL+PKeJ21gQDXLmftmqgiBIy50OoAonnGNW23FhV42tqoIzAtcaTuVHEO5C45RH87OmVgQBUeGffrcYJxtzZrQnB0G57kn25s8bWzKjZw+UZuDqdMap3hJcRTMaU4vay/DG40n7b9uP4DdrojOP4bWYUShQPo78e+cK+mU52z238uf02bfQhse1GB5UTFp4gE1gas/InCFNxQw1py9BnjFaycJCdHBn4S9Otb95hRNqGbVeMhJnSjnic6ZVL05DMGZ0DcndBI9LZKMveRwCggUkbcB5eT+XRbiScoSe/QhzejxoewHdu1bqLCBVYcvX9ElrkqREiqjstndWINqq8SdugH5YmRnRE6Ps16addsRWYXR21LRs9p9WFdrvrry9JYZxGcsHdwDjri5GXlq51ktbhhM+PvdTHaAtK3MZbm5HxpQEOIs1GurR9irSp5ecRLPG962bon8Am22QdmFLfYPxGQ9rkYouMi+M3dBwL/Rg53W4mM40Ruu10ADrbbRQsCKBEjk5C0rrIDeXXhucoN4YT3AtkibWNdaFTs/q+jHSS0vWAz1XusOFRDvDMZJZ8d33lMY1i/Ns90+DxEv0W4ZrwpEQit3raeFwicjo6kXNfpd5RJWb+ci7S0bVlYI4vyzgNlOmZes/2kaqUGNEJ0I5tuOD8Jwd25IMZSxvBw+G3vHMLJSMdFVKUY3oOrdZVKzEyVO00yLP0YeDMFv65VBE4gEjxKoyo+FX9YJWSgjq2/Uy+4iQnsFw9vSRQqlEAKfvCnOs/7sa2tEP1qJWk/zhRcyCZ5mdyZi/d2Wl0fX2Nl7zkJXct9n6dot0BwAv7CyoY0ctMmDqajgPKBM6GYIVGZSvLwqDEWIiRiRsTbUA61S1pG6qjqDqNThl0qlf6VccRwHoSO6d6GxlRCa1jqmf/WIkQ4I5MO/OUExzhU1VkiiAzgdzztk+TO+x2xzrq8FzlQq5nK+mhqyodWjRIdSCRMb5n2+/0GloxsnLr4yWyzDln7GHbBHM2DvXX00uZSb1TA9UZDT5E157Z4Q5Trf/2tshFJXrJatwwhQ45JUF3IxXXfqzXtnXnSs3EOfSTp+9HFlls/8wZLYMxdBOXYGy6SKDRmO8k3bYKeBr1esorewPHvOLzbdskdRYVbzU2OhzbZ4tHNzmKkYDsy1oZXBR2yTx0Jk3Gezopj3btIxHzIO2QlksXtDeLgnNFJ3nOtT3tuxUdZ2RWt6o7lGuXA6fRBLYuOf9P6L7CrjphVJehK7ulW/R14xMDZEW21G3hvCSa2cIueImOTvvXnp1nP/tKLyfLTlZZXgPcpQl2W+p0QcDwh8VHwc8cqrMTQJUf6PW9wEqY66wAr5Ox9vuIRvf0zuD7qHkAbrs1GMWZwrHApYIAzv6KEzOnO2YyPdTfOWUDreliWOflhtPPtps0+K5lMBqHcJ6eo0vuysc6pCM97knhiA7a2VlDMQlcVt0Ifhwfy3EP2fAxWqGefiZnTE1o1PeXx8B2gA9ehXF8UrsvBvZZlyz+GX7SHpFiEDyLvtJ355qMGYJM7Z5HMu7JpjpWeuufhfsMX2qeKKum7cyr6dq7oI/KL4Z3HOzy0oyPizSqc1hvs7EbGKszzuHd02mFRtsOC5MCXygitNfdDBflDtB2G7joIHJ4mKiKamMmdRjRspTAgkUcR6k4p2wZxdE+Hmy6s9PoK7/yK/Ed3/Ed+PZv//a7Fn2/S0NcRrnM3etSdqDrHiVZJgKV29NoYeCQkY+pKZGtNMgJyCBkLhFH2AhUzzMqTiNWB1KJLgLolMs5RnItn3jk65X1VOu1BpQaIqNOzjrrBBGP87ooCG5Cz538HjTxFHk0/u4M3UmeKDjrxKDYA2zGhFoVpp6R0Wono62PlnEnUUeDvnbh2saIdQdJIzwHPMQ2hkbtdFxbIQtPab/Ro21LuzJTrntpRFcD/tNsu7Rxpt5z7XSS1/DpmfYGRQc/doA6Y7wMq7Bj6Z53DqKG+X2uDTI0H+gBA2NW6d0qzhGdXWIgSrZo3Km4CrJG6+yx6M4diYdgi9waKAXrINKaqYlLdSyHZu0qOpnyCPn+P09TIg1poucuAX2X3C8c81m5uMAwdDxYPgrfrWE5bd80MXLCOKcLj8uqoWtpTOmXA4ysMMkijrbJ6MdibxCcvC+NyHl+jveD3O6RcHdZ5HCucHAX9ThzFk31kdF3w8naHZI6n2k8toAZ3xEYI9kbZUfEyQzJ53hhwKvzFfb229lvF7RhIzDsSv5e1A1Xguro0+lMGusMyz/RhrEqxbK60O+M32wecpd8l1cVTo5lZokQF/tbGxF+yyz2m/w+Q6tDv5/ADTRAjCK51Gk+k4+w9c7kpU2z97KYIj+1Ii9ALnUmDcSd53uTSUWalT4U6QUAAQAASURBVHkjmKOusLIYmAy0bz7WzgaG2Jxra1CZ3RGs/TXy2R3DEenNwHuxbAn8pb6Wc3wwkYOAGWfhK92qzefrHcEqOAkLDC7SyOpWtedkzkaNLs60fxfH5lAGVhkpA0cDApTLVrpbzajITpLooETlfESRjR0A9X2qTqNlKf+q04gP5R804qgH99J0Z6fRL/7iL+Jnf/Zn8RM/8RP4mI/5mO4g7B/90R99BDDeR1MUQiF5Bg08N5MMM0o0jOfq1ReF4fJaJASdEtIN6YnrqtBXAjRyrRBaOSdBHEX1hrRTbk6jtTqNdN9krSwt885P+rBHhc6QHwngwOR69SDgmMoqwiYW5btYTdJGb9Vqy85YITc0Tu6qw0gmfq0NIjb1tQcfXdH6Og0FHhhfTtgbYaJ/Qtttax73SsRGgXTwohu3LkIswusym3pJrihmp8xK2zSUd10a0tBlES+uhGpZdDRwWQWXwSHt2OakzZ42AigX9GlX6V+aLG8qfXH/bkoHgyqDglSeITgnCqdG0A1Xhp8jXQmswtYjp9NEb2o1xjCYkKxvdCS7DP/IiqW8nJFTnIfZyLwh7CHizjm6Lx3zu+afpbu2d5dPj8r37B9m1cxsX6CnU5cxyFV5nqFiakhO6G3Ku1GPkSF7iRJlqnTnlJ3hDe4MyM52OId3w2dOHhDKpKS3c316FLlq27YPRmYIKJ1OjPw84kUOLLEH4x3osoxlq9ydn4PLb+abvhZVNZBD743UReHeQV5EW8U5/G2ddjam38ennUmZVg79OUZAZ5tZfEztmWDbxHxKW0zmuekk2ZYo5XWLmXzXsertS4VL+hYdYIP+dX2Jfdjra4VR33PAuFVzM/qztGdh5ApfrZNrfbvOhJFMtTyk9cqHyfh242Z1L4fmyePU1NOBuQO3k9dWtlzKLBZ/MCrG4IRMvxzeIXqAyjk2ZKsb3HhKoR5zW5a1W+7E6BboHT04lEkDO8TB7OTyRKjZvg3akfHpbNczdB2jzS9yCJH+twtT17ZZiG38NgCQ0Xb3sDnLUPsz7lQbHgJTOT6GKAFLUkcRLQtwWMq2tMNSnEZLAi9G1sR/F6Y7O42e+cxn4jM/8zPvWuz9M1liHCE1jOnI6dDlmzmQLuBrIiAtDF4YOBD4WA9bNmWXK0Y6ZByWEkOcmMsh2RuD1hpdtKH8XWso31ZPaLcOGvkr0UVnuu9yDISHK6QGR0SgtFmFQnAaOQZUJaSqDMXIrsq9huvySIqLMDAKvfwr+0prVb3AMc3b45d63Ti3DEUHay4RpgTY67/Ldd3m+6Q+i2oNVSb4LQc1Ssy2Lf3fTTx5ju/Y1FuteqqGHhk86jlQF6SzDqI9vgT6cFGnBH2hi26l2fmuESjWkKrjqXrnEr0d8lxyw8gYoKCUY/tKZuQJKLYn9NopxfrHGVXjPATo1s7GiSazvA/GIVPpQZRvrg9GiXdbIa1x4AwRbzy4+mL/zO8YCeF2DV0wPg4OjOGdHi4/AnOGk7vSyrDyu5V55CYvaK9NTgzljHg/GKq6khngO2fvXQpjP56GrmybExhLt/x4NzHSjMey7YfmDvta2TCqdIfGefRykveR0iV0YXRRpxesODVG9GjRZdjOiNcELisC7ki8HCYftuJz52C5b0NC7K2HR9q2ckmZu+j+YXljd8X6xBFzQT0xUtpGXrqLRKIJNoP/DnTL4QfVmw591F+1Z5xt1go3PdAmii66llofGXDnUDpevSvso3Iz4Tbk+3F7UxHD1Z6xul/thCh8p9VbUQ7nLBvp6xlgrpKQ7RHxqXAb+F2ze6udF7RzDi5xlJOln0p/HGhO6rB05qL0XMU1s0TiXALfhX0q+c5kDIvO3I3jpL0wBvJT1klKVKzR8fXK+XJMS0Vm7bvDaf3sSGhqZE3gIfNioN9lrLz8CG1VxxJtXHb3MErgxpb9Fjudg0ejszqLZOCWBFoOoMNStqIdD8DxUOTN0TqNUjv/9qIwsj7d2Wn0fd/3fXduZC9913d9F/7Fv/gXeNvb3oYXvehF+I7v+A68+MUvHuZ96Utfip//+Z/v3n/GZ3wGfvInfxIA8Pa3vx3/8B/+Q7zpTW/CH/7hH+JTPuVT8B3f8R34yI/8yDvDJlsKp2jd4RfHD/FQD/1ghS2N6C/Aw0Bq/zghbG8qwOZM2HLCZiKN5FYWcRrRyo5AnYNG+iZCTDWhA9cJDA7lgELXHJnGWYK2sORjx1jyt3MyGY+KHqrI3M4eqo4SdSq5FSGqZ/7Uv+IoStS2diXjQEpo+Qnj1TA0fOzotLkxHckiGMZRIHXlJoZxe6aLjRSFnwG3ltrB6BUCsE8XFyn0jk4m6ZwRafsvD9P+mm9TqymUGDKomo59+3u6VcY29FlX+i9W4BEW816MskG+1mBEas0yoLcmcyqkDH8m0gQOToPOuAhEGMUPT3Rd3c1gipMPZ1TZMZBqJziYjpUxmJtBULPm8tLjJMLqqvI8YmilGzcHw6Oo+MtSJ1e6j5PPl9LmJWUHDfDg20g0BPZsfBgMNrbjNEmNNgKA3Ri296NIhq4Kbj8dS3U4JIXbn3/XCMDJW9OOXQQ5K3cuTXvjP8i3935P/kyDsJWnKg8am2d4HbWV+fVP5LFhf851MJSLNHc27fH2qIG9NvZ4Z1Z+kr2DYFRXOBKgOA1IZdJIH/KFstvKZG9bzXX2KF0SdTSX72hb7RyvNYftKOpt11O44xRx+mpW/lyayZxHLL+brCySQVecjBlqz+Hp1svjLcCT80BFqmu0V0yKz8txGRcMLw7KeRJydeh0YIDYRHKxwELm2bddoosaTbd6m33a/Cikv7XJnfG/dHHXpYFiJlNRd3xAfd7DpfHVKr+1bXNV/lTcAfW6+rrQIo4ZbXbq8N9vu0t75bnBVZxBBA2PNXNh3U5Xb3sD1/n4los9ueVyU5qdm4sMVrlYtpsRoUQYHcRRlIDjAhyXco6ROI0ONdIoXtI046dJurPT6L2ZfuiHfgivetWr8NrXvhaf+ImfiH/5L/8lXv7yl+O3fuu38EEf9EFd/h/90R/F7e2t/n7HO96BF73oRfirf/WvAihK6pWvfCWOxyN+7Md+DE9/+tPxrd/6rXjZy16G3/iN38Djjz9+NwCrd492qOScYdYLziYlqNXgx4zI858IgEzIW5GMtBLSiYDVN05VSCRiJJiVOUZxFK31SsKtbEkDcyXQKJlKw9bh0A7mKs/DCaBNxuC1faEE8CaMb5UuK2OLECgFxEoxSBkqllaXC+uPTozUDBJejHNoka1V9ZueDUTunCDtt1X+2BfCzqiXsjYfm2wieIRG4g1XGP/u2pJna5xYZT1SXiPYDXzyXJRts1SmDgPbfhyznXaGMmxkn0VjafZt0L+hvrpIeHordMYFDt+EuRKKY78Hyx7LjcYdY6NVYdqB3in2EYzGaJTrsoEBXUYDsWuK3F50WWWx28FmfZXPLI4my7dAU5CX8mqkU8ufxmB2clX5dYcnZ/ADHZ108L0X0tn9+xfWM9V3d4F3xpcToC7ii9iEoVFvpEbEoxt//TSR4a6aCV+1JtqP0Sad0g4N8eBuUssAbUbejiYR9XkGy13TORz0AF8gi0Nfd/kQZuyNnunOBJRG89hSm0UadZnvyLcXl7HlztG+1e+x/olM7crdkVdmMHhZH5wnDMwOi24gjmV3Px71Rb3Yw+qvKa5mOmGHR2fvpD7Xv0xGH6HJEEuLYqOe0dMuemqmi22ZwfgRBs1cYuvM0owPZt3hhgfV9xYnBt7u3ei3abdb8Jn1YRLt1dV1aeIBjUY4hzJ98H2njWF9o9/VnrZ0Fmmus6VUrprvdoEro1xeZOxzuz3O1nlXp0qXRkRqbCb5beHYS3Es1UHZ2V+lQ4KrpvtHSnWg1/bk6l6K/a1tM2AuxxBks0YyQhzSDHUIFV3PdU6O4jzSHUAZzFwDMEq9lBLaGUZ1a9rVAXw81uiiA/iqRRrlY3EY8dJuEh8tpFySLnIa/dk/+2fx5je/Gc961rMuqvSTP/mT8UM/9EP40A/90N183/qt34ov+ZIvwRd8wRcAAF772tfiJ3/yJ/G93/u9ePWrX93lf/azn+1+/+AP/iCe8pSnqNPov//3/47//J//M379138dH/MxHwMA+O7v/m485znPwb/9t/8WX/zFX3wR/GeTMNmAEWKaEWg7FmswWlUXCROovEiMtDBwYPCRkK9zIRZTLh8ZWBhUt3fIbV0l2ki8mS3KqETxVGJ0ZyQYuAiQfeVyyHEhPvk+MHq55CUW7y+XW9mUwetx0kxoW9LaM9dqZSLKQMlLl1xRL0xprenWF67wg4C8hOd6SJi9mcxFIBkcWAO9G2fb9MhQOCO8OgFZvw+jkEYpKtGRUh0ZUwFel5yA9IqJbMRbgMMZgXtK88I0rGPPaNKxGNDpk0kThdjpTjMGF8FQiH2a7WwdA4UwHINRHQp8sJLYvIn0p4a2wcfM4BmNUzVIm5FOPoJyp59A4EHLt4R61Wj9nc73fciv8llkSuTDkQF9Adyz32d55Iyx9f9ZCjLjIrDuWKajgYjnmCxtym/lS56XizIijomFfSRbB7TTwzb5MJKLKHzQboURHgn8NYJRYSEH7yVpKEP3xmwqa7GPL0zezfSO9DdX+VB5sHfejvu1B8NdUjeRsTDutRsLWHrhLntH8/ptj2Ei7Y/yX6J7tB7Wdw7PQJXPsXNjeGKzkTacU79zGg1o+Ayu92ydXUeNsbXUOTmgLTsp7badDOCZOYo6WEL/zo33jHd2b1417/to9J38QL3lscJkj4uwNmqA+RIHZhe5Pzd8GoizuvbKxzpsPZcoopHt8mSTpT/zd7hALK9GeifAJE4iWWwY0mksO5Ldg7olSb7Z8QUNbkLPO1BZ0qF+BAtjMA/icf0T+psuULyX7CnvdAZSXUglrmMgZxcRgK3Cn6Vz5beeKZxzizayUUcA6jk1JcrosJTnwwIcj8VxlBL4agFfHcCJkK+SOo3ygZodfI5XJukip9Gv/uqv4td+7dc6p81e/pubm908t7e3+OVf/mW85jWv0XcpJbzsZS/DL/zCL1zUzute9zp8zud8jkYQSZv37t1zdV5fX+Mtb3nL1Gl0c3Pj4H3Xu94FoAjf3QnaJQrYMvJQgbP90cpNlBAlo8EylbN7VJCXCKS8Jd2exnaSI+cXVQcScvFqum1pAGw0kYsuqievc40yUgfSSBELPOIQylRufRMnVuLG9HIOUTBW2Nzk5m51s6HSdiJABo/urCLoNjROMB5XIB+b95UPxXEEALygMZeUA9pqmDV0rIEzM4xFCMbVgR0jWauxxpr9pqH57JrUXxcYUfLcKYqRAhnAJHB4o9KPiYPnHCwXprnBNO5H16Z8e5IKQ0L0pfGpDXjOWIwsOJlgXuR0i+MZ6XTPQBzhJNJf/RsnbC0suDcOhu2Rz9O2BqDx/cTI61arzVbStmd7zqujNHToDoyLGOGgK7Ajw2iURjxgO2lpeb+Ku6dL6T2oo2nDO8bkXdo4myaGYPd9RqdAj/BIlzP+HMmWwEfT6J4I98AGmE507NXm+kzKI8M04e+LV+Av6PewvpkcHsFg8aa/J8KzFrSyQR3KRu8Ijs72y4jri+lvhscI8sRmc1kmtDWUuQi0fo53B7R/Dp5e79i2ycMRI0m5VECDevba8PhsdkEnf9NkrPb6Felz9HtCw2T7J85JwNs3pv34Lto95TX1bUYdM0qj8Y9ZIh2N+DU8j8pMI3BDsk6jjjYGdNrR4sw+GuFnlnbwcrZsqEPruUDRXmRv7oiwSfMdXY/4d0gDI/qxcDinEfd1z+ohQ7ODNl0fBrKEXAbgUufirB2Bb6jTDbF2NteovxObZiq/9mTO6Ju1YRnIW+WTXI6GKYsdDF6BtJZnICOBys1oW4buMWKujqIMcN0RVDtF9bBrpAQcDqBlAR8W8PURuD4WR9H1UralJcJ2lZCvyllGXJ1GbovaOR4I6eLtaZ/2aZ+2v0fZJIrbUAbp//yf/4Nt2/DBH/zB7v0Hf/AH4zd/8zfPlv/FX/xF/Pqv/zpe97rX6buP/uiPxvOe9zy85jWvwfd8z/fg8ccfx7d927fhd3/3d/H7v//707q+6Zu+Cd/wDd8w6Ej9s9vtczODNjjDVSrhvkuMGQa4XrvHtjJleC7bHBOX7WkabYTGvOEMo6EjyzqNkkQYVUdRjcYpHssKizkXyFUlh2hXIaaHe2VGWkXB+tBJdYAIrAJ7ZnD23zQ/91EZBd5U4NMDwCrsR9OPIyEfivGSD1BnEi/lX6kLfqvaIs/BCJgpFqsU06QMMBWonXHSPc/5baZY9oz+PWVkK+7Ct0WuZV/5I68whzR0NFnlPYM7lr2wvX1gajWzsQltdYr5UdsPhpE3QNjns22NjDLCOek1NmBGStKs/tu/HZBnjJwmK8zAngPS4jM6de15CSn031m39U3kzaBYyT7rX+pxEpqYwm6zjnhhj04uVfR3MAguatfy3A4up2VmbezoPfk7NPAEEAvCJeMhz5Gn7Mp3rHDGR+fkWOQJSZHO7KcaISz8FeXsWUN3T87sjO/QuSM4GOmPmX4RPprkKd94F5ZSGTdnGQPY2pA0h60/LDzK4ZnuOGtzRfhMmXPbK7qa4jjb5wFdWlkjFYxUsatqJHPPpMgfwyi93NNdXOXvRMCM72Y0KvIann7099mOhPwjuT74psVN/ySyDzA0ZvPUPnncjSodwBD7NKKDkSwL9U5tikjfg3bG+OA+r+3rSMdH+Gbydk8On8PJoMzQaRTqmqYIb3xn4BrVebHj95K0IwMuXXzqkrPLoNuau35G+bcnq2MTOziOPDGUJ0F/aSTVjIYtKJEH7XsA9vYyfX8XmnjE5G1fajKEy+3lYtfyyshrcSJJ4AVlRuIFvNUt6VuZu+qAEEEOv66HGAEpgY7HEmVUnUZ8T6KLluIoIiBfJWxXpa18QG8HX8IzJl3kNHrrW996eY01fdiHfdidy9wlve51r8PHfuzHukOzj8cjfvRHfxRf9EVfhGc/+9lYlgUve9nL8IpXvGLX4fWa17wGr3rVq/T3u971Ljz3uc8tPyIyJ9q6EH0jb8eb0YAZVGuJf2rIECMtGVgYeUnIS71hSyYtXP5kpvIvUzs0rnxoDiP1ALMBuxJnqn8XcRSZ0LaDcbbUbV3qNAqC3xl6mZFE6WRG3tgJNgXCCpLMJjy/XU0o2+pK3aU/Wkwe5CDrZJxc9eT4fEwaprcdCflY+p6PlakINdLI9g/9M8F5bO1kQ/4yUA4tl3dhi0zn+4v05Qw08z0q4XNpz0AeGWbnFIcTkDCTfnJGVtcOteLWUDlrFA7L9n85lpn1geKVrZelbg989mOzGwYchHSnoPvGQh3s34/KjnDkyvB8nCV1RhU5utP31gjIk+dgHIxg7WmJvBNpZiBKHcFg5mRo6oJII50QS31pMF4jg0dwYyI1Z6uuw35HOOK7CZ+erT+mc3ku4b3J+44mLyqzk29P30bDNxqdIxqd5B/BpfAFg8r1Mcp2palWedN/5oe0f0Yuev0HnajB6kKgRX3Efu3h14KyIztGkyUmdt+7trpvPI++igw9HfP6IQO6mFS3rmo1erscG57EfKxHsjh+80Cc14V77YW2p3o40rJ9H22Ac21alox5RjIktEX2WfJlE+0V+Mkv+gVYR7Iq6mwnu83ziN9s+ZisbnO0a/NMyke9pk4yMnQ2kCczXM/gDbpqmmb0IJ8jbowMughv0QYZ0XiUvfEg/iDPOhwYRhvm54EcsuMWQbpE7+2VH6UBTEDAYfw9kPWPnAYyoIuM5Tn8kaU0v71AYTMZZ0iZ0eUlfDcAysoQAMFmwj7eTVvT6DGLk0inBm+Sb68Pd4rmPJN6x3PhmbS1eVE6td/LLYGXXJ5R828E5FzOLaoBCsRLuQALgG47Twl8VZxGfEjI9w4lwigR8nXdkpbKbprtKE4j0rN7Lx7PkC5yGj3/+c9/tNp30p/4E38Cy7Lg7W9/u3v/9re/Hc95znN2yz7xxBP4wR/8Qfzjf/yPu28f//Efj1/91V/FO9/5Ttze3uIDP/AD8Ymf+In4hE/4hGl919fXuL6+7t4ToxfYkTvliXpZO2JoX53NOTomMzRtGIrAzSjlyJT1TCMa0IXmY7T9YPCEuJgInUON0DmkEqFzKIS4XRVHDAjI4mDRunxbYuQmPdSTyrlK1giJjA/WkHwwI0mYJaCOr6bcRZmxQXo7q8hGF+WFitdVnEZXxVnEhOo0qmWW0i8xZFx00VLbGkxIh0ZLahKum7jOiaN/NzJQ9pTn4DdfkKc8G9oefHcTIlFSAHSf7i7foExC7PtoUF8A367zY/g7Wjh3TBYR0XBUZTjou23vEqdNfG8MQmliXH5iXVzSXqjGPrNtNBoCOrFjh5OouIeJbP6W167yuknKCEypw51jVIEzfNnoa4CjgTFd6mcPYx13ETXlPRu0T8Z+xAvG4hkaNOfG7K6GzqVyYu/99F3j5TsbmNP2DcDR8RKemxE6oDkzbsM6pII44RrJwk6+D2SYqV/pZMIL0V7wzdUFH1koqWHuo8WDKYGMdIz0I/yOf5X2O312xhm0h5/Y3gDk7lnkaY08ZG6Ryayylo0MDrQS5H5Hnzs40hIj3TnKvqu8Soap7LZ0MdIzQLPX9pKje1NHBCdW5WS3edaITx7LZB78lpaj7LfPjtag9BLtJzbPtmyHiTCO1onivk/Ke6ezPeiZ3TbI2Jdugu8qRD/pJYeiHjZT99k0wdHQwRvBHP4NuijKzih/2IzzFP4mDHseGfdpyEbRDtjTZwN+jWbS2XoGdXL4Pa7cVnuh/Ku49nBVR/genNGUcXwLxZl1tHdpxIf6zTd6kd0q7VtemskKezi37YKF6Yy8HNJex5s7MM/qGaVBPRbeLtLVbieXSCMuTiP5nQ/Ackj1wgsG5QW0JSRmLOsGXstZRpQS9MbwGtTBSyqHXRun0XZvKfPcK3EUlR00+UCAiTSyc9ddG32Q/shuT7u6usLHf/zH481vfjNe+cpXAgByznjzm9+Mr/iKr9gt+8M//MO4ubnB3/gbf2Oa5xnPeAaAcjj2L/3SL+Gf/JN/8l6B2/pZPKnX84/MG8nSCa/Ocglp9Hog/IszoxhMLAL+wOXAbJTtaSrgovGnJ7vX53r7EC/lJHZ1FB0S+EDIh7Ivsngqm9OICWAhxMjkta8uXK8yU9KQSfaMLvipwkacRoXJ2tXeNurIRbaYG9Paod0lokg8rNt1OxBsuyqeWGEocSDJ9rQSrcDDSKPSX279pvDe4XtiRIzSDgPLkBls9YWiUItZu0oHjex9R7BfuSkl68i8qD0xVkZGjr4bKF995sn7vk2a4OiixPaP6Z/pb7sKdqdy2oN3/I2sAUwATTTcBbuCGwy1Dyx9ADzzum/UBtzaMUxwiOm+oSszTWpUUouqEDxfYGg6OnKybkBbQ/k6aCSwlx7cP4DdGYmXpJlxtkfL3XJca3/6+9xS6Ll+7747LyfGrGBo+Wzevpj+rLSpdGrpmGM+862DMfBdavDZv3K5hL6bejONHcDmd+SRETwiR4TWpEzd6s2u8OBHNGx35GEDN9CflbvDZ8nDVTaZPPU7jX7Htjl2w0RG198AgzMcDjlPKmDCbvyoN772dc0I3p00G46L6xnSRS9zL0qx2ERmdc/VhnQXvGhEd6PFzsHCsQ5v0zXZXn872jNwiO1knjVPsAGmkXKGRi/a+ihw1L5Ln5mNrSWTP81s+jVmyOHioYf7jN0yGvOQ35kaousGbc7spksinlgXRaKNA+g15+fgHn17EjwS2dhluWC8L4rYi2J5ZkNE+81WtzcGYRx0AU5pK8o2Yx/Z5nQsTMOWJxntWBAHXG1u197wPzm+u0Sezc40ymEc3JYLbq8CnnnW7BBXaDp0BmuUTXtpIL+mfMtwkcFtmzlAR9LtauLQocxgWoqaXcvA0ZqRUjnPiLcFelGVdRpdH3Sevj22YH0s1e1phO0IIBGyBECkOr+V83rDpQOXpj8ypxEAvOpVr8Lnfd7n4RM+4RPw4he/GP/yX/5LPPHEE3qb2ud+7ufiQz/0Q/FN3/RNrtzrXvc6vPKVr8QHfMAHdHX+8A//MD7wAz8Qz3ve8/Bf/+t/xVd+5Vfila98JT790z/9yQE7YCC3IjTLHg2jkCwdUnyJXgimehB221JBlRgrc2yEnBM2TshM3mAmoJ1XVAkQUoUhxGM5aCsf6wFah+JkKVE5xQmzXhcPJgjqSFJ4jU0nYLqQ31wih4SpRysIMUS4HSQGPdzNOZBsOxCmKICo06gegl0cRSVPviL1vuYjigOMAF54cKZR6VBzkLFGOJT3xhCgln/XILWKIuBA65l8cr9mE8/u94QaL1G2g3es/1GjdRHWsckOrgHMNUJu9F5+06CsFtlbMiD3x/eCGszDGoKCs7ZiW/FGU1LAeGHYwTrpR31HESfy3nWCw+8AtoPBjJH0Q2Gllrn2bxTV4A1Eq40v0bqz1NMOm+2Od6tqwgd7tLeX2BCNF/rjevfqnhjvowsrh/WMUB0tOas74th3qckr2st2hqfuwnt7iS623kqF0cHA5hmmqnM1dvxT5baTMxTzTmq1jgs1WhtftXxSAzuZ4Q1dnvfhruy2R5NWxhpapInTiKwsNrjoZVSQ2QK6sZvYjV85u4iBcsmH1ivPxnYZpjvqtugENO8vTufo+lHEonUYhbouqi7KhFHBoM/0MUY36IJIO1fKOw7C7+zluNpjsU1JliUGul7fk/3NYbzGf3k0jhU3na3NKDSm/TK0mRpeok7Ufk1srlGk0ShfqWYA754tF3Fiv494epTitxGdiExj/51tgYHajVV1UYB3TXu0tDMGXXnq3w3TOdyjjtlQZ86ex7Rb+tQh1+kyyePOe9RoHatDmtNTx2+urlqDUzk5yo8xfoJtQrZbjB3n84CAyMqE9ncUNDWyVUv9AxsqvtqjgYnMciI2Eryg0vbTbO9NqxySDaQDgQ92brsgrVXbbQwcitOI5OZx3UFTd85cLXoz2qpOo7ol7arCKpc/kXcaOd1/h/RH6jT67M/+bPzv//2/8fVf//V429vehj/zZ/4M3vjGN+rh2L/927+NlJIr81u/9Vt4y1vegje96U3DOn//938fr3rVq/D2t78dH/IhH4LP/dzPxdd93dc9KTidkApy7yKc78nKM8Iz0ni2itteqSv/EiOljEQZTCXqiCvzyUHQhajNCq2cYUQop7BfLdWhkrBdJw13W+/JuUbAdo12fZ+NyhkIZGGWFjJJyObGoanTiKEOIdpapFKJOiLoTWyZOsbXbSpAPY8JCmvZnobax3aOUT4yuDrCiqOI9bkIsCoN4m/ps3MamW/WOI1djcJuloaE1gwFsnn2HEjv5WTMzQ6s8nwBAHZCIr/rj/Hko+TvI2t8v3dbtu2ZcmyVb6xd35NR4rWMPJv+NkUWzjhTjYIwduz6LPnKUWMRBy2/6Y6H18JXfzQHVzEE7dGqbJ7L8letRDtCvh3bD9dhDxDN8tscBp/OUHLaeSddovgvMIpc/lmdneE0NgTtGLvXkU/lO/VlLgGzWxngoJucoBmkOFbduA6yT8cSXX8v0nlDft0xZIVaxdHLlZYZbvJ3ifxxMgboI/v0ww4O1eFsHB+1bo70IPUa2PeNBKgeab9nQmpGODtjZejROqqd/LU04nBj88FHQQ5krJM4dcw69a+ywJacyGWLW6dDsD9eIxjv4iyawhHSRVXGxb15obPUzN1D/enpayoS4mAYWcjW+ZlDHvlko3Lq5BW4ALVxeMn/ZRv9F7/d1d5hr/mGWcD+Qp/WpSBcBzAD9Vw87mDdLTN43zkl4vPMWaTPezIU0sio4cthVVjm1ZWe7BHB3O7SSgNtzkaQB08elieZ9sZ+z3kXyzq5PECeCkODm0yN3jMKjXY6r+BGaJXrwdAdeu9KkxAxwp4PhxnrH7byoMGpzuZaqZMPlhSM7tmLjHM7gKTftu4RrOzL3ikZWESz2UYsHADazhgG+IAyn2VocIPeCiq7cMBI2wF8LIiSG/BAcE6jTYI6FsL6WMJ6jwCzg6bslIEefu2ijKyBdgccEF96Jdr/H6V3vetdeMYznoEXfP0/Rbp3z30LtD3+MdPXZ+T3rn2bGOkZJ6Rn3QIHxvbwgPX+AXkrMWbiqbx+yi2e9uz7uPeUW5zuH3D/fz4VN//3PRzew3jG/7jFU3/nVAjQXmGfqGxJI4CPCdvVAlSCXB9LyAshXwHrY/Ww6IWK0+iISsT1n+BBl5dimCSUcSgaHYCXoQj5t1aGtra/3p5Q7xLBOI2MgytBD7wu4XpsIqZQtvtJWav4NaKI/YHXVhnIb/PcGbBxQjzbahJpYc84hmlX39/VkIovHqnQJIlim351eGz94j7aBuH9TpPnTwmzeXcM6dJiUzIA2mRVMoTfgI9AkoZaDa7tOEmzjiKi0F+7mr+T4vYd6YPAxV2kUdya4CcaXepo1Ixhx8xngO3a2aeZDiaL/8l2oAbIwIAb1Wu3B42qkb+2r86YmvAqMOTPR+JbC2+FmUfv95Jpx/LYXr6Lqtsx1PrM+wb+tKrIb6M2LgGkw8FkfPba5v65KUX4sbX5JUpY3uUBvKOo1Uvw6+o4837kNIq0jUCnIrNg8+8ky9NV3lh5pGcXyZa0IF893AYHAqfVH+c8FaMqe3DnLzrenggKqzd2K78g3XXMNY+xO7TMzmBx/Bto2k1kgba1y+YBhtu6Lk22aIzirn8vcswILKBBv9pzd/bZCOZL+IuA7lyhPb5wYmunP5GmCZPnHVna9SfIzJHe3zXcdt4NZMe0LE+e45jcZTwuTefsgEvLuPG24xhpoVWs8tLJxZpxNCYZKiPJPHfw1jp0CBhDernErGtVjnmwKzgbr8iHlvcmuO4imyLv68+BzcfnVUAP4wVGzsiRdSa7jKGbyzKQbqmccZSBw33G8X75fniQcXwi6y3jegi9HLuSzJm8Szmj9/QUwnqvfNuObX4OgpsLtwAP1uf88CHe+g3/P7zzne/E05/+9N3+/JFGGr3Pp4GwNxHjc1lK/qMrMxPgZ98XYJqBVSuTlR1xwlQDlGuZFmWEEqJ2qNf2yS1qgN6OhlQcRdv1Ur2YJbqID4UI13vQ7WnbNepWLnbX0/t+cGNcFkYhz8wBTxYH7hamzdZDvQMqDEa84SxLSJ7b11kcRhpdtHAfVaTCgTtaUOEuA0y1fwqM3apgpIwaWwMjRpFx5ln6xtVQlsgQ0qD+MV5HSmrU/kXS0D9TfH+uDfe6KkKwopAEf7V/GgtD83BbW+l7w56AgS1OeBR2bvmi08UbAqE+FFnBxIAZv/Zc/slW5nH5sdpvziADh0zSNLxZtoGJTImGim/LNxGMwQorSXWyvEOTGnbGZ0b6oXjlt1CfNUa6yXt9ZwX0iKeNgUa2DpvNOYfR4SE+O1RSXOfm9r/luwtYcJdvz/CAjJXN1Fbto+IbtHUhSBdloJ2Vf+KefYKh5/kQF8gv0rrH4DXCHdrCQaG788wYEyRQoNla89b0t24zF31i5b298eRRbj8Z6ZrAbHZBRNWZAO1ombS48nqsb9Q2jJyxziJG04fGtgFkiNootBVn8udPxevaTb8uRVXHTu6lQQC1DyKunZB2/Q3lRw3NkgN8YINcXN9EFu4lHjzbyauANHLSV/29W9cYSsWv4o3QtuSIbjE2wlBhxPaETAZ64c4BZmOVW2hZ4KoR/Tq5nZUx9RG1l2wnDS4fNSTZ75fKvlG+OG4jR1KEefocD3s3cuEcoi2fDBlx8AwD77S+QbKk6Q+qbXXehS7qmDjZNLQPvDE3bSLqEsmoN8tW+Si3R+5VZscxyEaKeXYAk6imwpvs6S7S4IwEKz/omJ2xK0S1aBajaiK4d1WHFqbWjwsGnQyLxb+D1GwFrjLC3DBdo4wpl6NStrWe38sEyglprdXnRqNMpHP6LGcLL8D6WDmvV3fQHGuzHf018ioZsDsGMd3ZafSsZz3Lh20KLES4d+8eXvjCF+LzP//z9VyiPzbpQqSOaCc6ikZ6rauk44IysuWcgbLtDEs1WqxSPGYsS8YxbUBKoKU5dfhQzimiVIiQcyWgJdXbxIqjaLtH6jTa7pEeEC3PvAD5qkXolPoHvanbxvRQwVz64UITYxel21W5q6OIUa/ehXcUybcosKgxi4bjidPowO3ZnF0E2ZKmQLRxcErEKhxrMGd5JGXOjiKs0aIGy0BBjZQwwRumooBUCAiwAUZM0q5iPlOBNQIEz5W4R91WG8caBiOAzJV/Q8fDVKgPGjybLjSuMKBXlbqWofux1j6PIsqc8ObqQBLQwqr/XZOLaGv01UU1RHqcwRtTNJLqmV9snjVfGsPfnbfW1X8mKX5DX7J/Z/fRuwZGbegBjWRub/TwFL9T4zO2hmAqgOlke2RU0M4tSk9mi4wAp89nso7at+nJwjJtdAzjmOQ44Odc3aGyS/bFnEnDGkaRF/VZjfpYi1UhRu7TBr96bPWMlEnUtj9b/Tbrwplux8DX8lJouhD8VAR0+m8CB8fnwIey6GWe9bdZUCFTFxO1w7eV96ByRsnL8uRAvZ5Nl+jCGR5cPZOyfMGwxfF9b8kGS7cXps5OsTcfDRcb/Lh17UU54Nox5aVZF8VqFmOCreOSgclFvAdnyXQcztUb8ygs3PJEPbCX7HgP9QaanXVhnRQ7MhqPqCPj+Lm+CAwzGINN5PrUFgG7cgO4hpFfe/2QvtjKZ7arZufhfNbRyayerl4CkZFBqb1vcoou4+Po0OOKD5WRxj6ZwUjhvdUnI/BHH0a0TubHhE6HoMRxcroA2mfNa/VlWLRXG2wAbww+OJsuWSAHjNIMqCWM8RR/E+oaf90eyyhzZ2Ygl10821U5JHsF3I3jyMUxKM4iEOqt4NA5+fYYYb1GmdfW+Xm3Bhnt4EdQKXd2Gn391389/uk//ad4xStegRe/+MUAgF/8xV/EG9/4Rnz5l3853vrWt+LLvuzLsK4rvuRLvuTuEL0PJXsrPTDmD5vXMYulKkvvO7JMsxhGcoNOjJQYlDI4pWJEcqVEsxKj21lESCTWLVrqNGKukUYEPpbb0eRmsfVe0rN/1ntSTs4xArAwtiPAh2oIx1VPbvDqgbaZlLl4nF3R4zyzgg82ApLJbW9zgpM0izEsCw7kmsF2EFhzHpUyBhLrwR8l1dtUfxrjVQdi0MmgoB3spmijI+q2JnWrGdJvuyJ/Tg4GAiRbMAqWWTLGrF2hnhrQ2u4cOPbA+PHcS0/Snr5T3bYPu30Nih+DIsYAsw4jW1zyTeGZwWsnstZgtiv4OeRx8AYDMMIixrvymeEB4zxR2thLBimjlZFhHy1+5SBBRonc4NY/O2E4C4aJdnDO6WCw6HgRSqiwmdSwRM10hnMd15HBf6HdctFEZvT7Ueo8l/dReO6cYTpra/Z8LmnevhGnW91na63aAuF5xlPD/pDSDMXyIXLWtm1prZxlwI3G9gyScZe792M6LZnOiOo76ZgmWxpu/UIQBR0fxkTsKyMb9HbUqvuQjDxKE8fsRZOI0L2Zs8Xpe4zxMaIZ7KsNqavT5dJPqeYOfKARNiO47pIYnn4DMLsTtl6tXyy3KDgAL6L9CKv8zvb9pILRivwIRgOHizgzZS++1VSqJy6RSvrbtzecLEe4wm+K36vc0XyWLi2u5LuzT9DjxunHYDcYG86OYyMWupgOdvsV7Z4LUodKIyc6Z2iEYVhW5HzrPyfbby74uVR2ChzybKOLMnp9E+GKciTKLBgcDnjX4dfQgNMbXTvhDNeR7JWAgNB2pFMy//m5cBzm1vE7O40uyWc7bOifYoOOD2J50w5Vx5H0I3HdCQTkVPJtuZ7968rBOI3Kzh9xGq2PcdkBlAA+skYa2fLlEilq+jW3ei9Nd3YaveUtb8E3fuM34ku/9Evd++/5nu/Bm970JvzIj/wIPu7jPg7f/u3f/n7vNIoMFu3IKZ1xPwbcPXiB1UdIciPUCkdmArYEEGHbErBSmSAN2k7ESMSVGNGcRlepXv9XzwWietL6sUYaXRPWx+SaehNddDDRRQngQ43QEQaqjGO987qtho1CrV3i+GyNIcAJFZZ+qULjziAgyReUuD4bxxYvIUJCso3ClkcCiAe3b1iBoq8rThDqPePwqk3494on8n10yplcmZFtc3YiwQHeWRkz5rrP1uF0UOiSMOJO0HKXra9lYHTspVneKdO2ZwqvdycNltcvUe4Dhd5p0UsM/5GxLJMyoyjiSs9Y2VIPp4XNGEZqDBFqxB413ttLWg87Z68AOSIbNwmqh+FrHzbT3zgZH+l32+/cRzcOZUu4NZHjpQTAgIfDCnnEwXsz3YUf3lsw3MFAOyuPbLIy9VK8aRmjRyd5xhMW8tVz/9cavU6mD+p0E66uLLkIIxsFpHSWDJ0FfTbFwTlZb1Mnxy48EW6nbYcHo6fdpN1cYtHhYKAXosNWHUZGB3HltaYzL+jHSC4YuPXbRBSf0x0jOho5juZ6X955O6MrP/s2le+DNOML+RzHZpZ28vT4GOu1TvZKhnNjKvJf6gs6YMSj0mC0syLvTjvk8nNf/lwyYzyMIoxmjhmnjtcky0Bu6fuK2Kgbp+NrbZSOt2J/A50G+6bItQFi7qobQr4p7DMbM9o3Nr+T0xfYXdZRRNRvlyVjI81SpDErLwAXXaTHfcS8jmZaZcU5YYpEXMv4xz6OonZntAjs2zlj0Pp+Rhzb9gJvOHp7lHSBODFZdSy1TJXJ0UYkGFzbxc9qu7by9VUqQRl2XHVbmuajFvxgL3JagO0esN2rC7VXDD7kBoTQ80ptemYXVO+Q7uw0+umf/ml88zd/c/f+0z7t0/D3//7fBwB8xmd8Bl796lffter37WQZCv0zsM+/Z/Rw/32wmnBYMo5XK+iQcQtGzqSnqsshoHTMOBw2HNOGvCygA4MPxdmTr8oB1sVp1Iy17YrKWUU10mi7hl5NX57Lli57FhAvaAdF205bxazP49vNCsDcFIpFBmsQriIlnrbPtk2LxPo8ZNio7KIRpwYs+X6Ytly+2YBGCWKE4egQ8BH8tav+W+xXlU6X8n0Hr5XEM4E9w69IxhhpJAbBTBlf8s42FPJNjYoLkDA1FmNzlxoeA9qNym3qfJy1HZ+DYNkdI81jADE02zmNzsEUYJlOaqwxIs/ByOzKd21QMGwY7myACK8YvWbFzR2Mn8M3K59i/029fivDoO/BAO6MfNKq9hUGQj0x3yVM3SmhyfOTSXeyqM5UNeCFKT+b9q2xptlnuB3pk1jXCC4Hhx1EU03kKfs8+jZLIS/FFUXAXYtbjERyv8/x1FTWXSJ/HrWM7Vf8PeCrXUfRoJ2x04g6WTPFz6yPk3ZHk+5LbTmEsh2NsRFrExngI3jDt11ZOkgRjicjH2Z2yx1hsc8dvU7ho+Gjq2vGjzuyv6OZiY63Tt05aM3+iU1NRfaOTTGsIMrRGa2OcB14LeJrmBJ6HagV9GMy0n8jPenSo9LkOdp+BJkXt36dk+kugtjaRgNbqKvKCQ0P1HT+cYmesdHgALpdIbZvE5y17GNknR3nWfEJr+85LTV75M85eJfptSjPJu039m4ZuugrI0NI8U7z/pux0V0wUt5ULn2UYA11GtUjVrbHGPk6F2flMYOOWSvSYymIwJxqsAmXqKM7pjs7jZ797Gfjx3/8x/HVX/3V7v2P//iP49nPfjYA4IknnsDTnva0OwPzPpeMYr84/97Hum1hKNcuaCTnhFUijdYEXhN4C8v4huEYUAdPiRQCtqtUDbXWer6SSCNUR1GJNMrHUkYOiNazi5wCrT+c97t2dy+8c0cA2L9DlFaH0u7kT+odtNdNHLlkpuDFb32hXlCPhPZMcVnlPSnTGYJRCNt3mvc80cydRP75coPNFFC4qDduR/3YgYNm38K7qaNjUGa3zkuN3jP594zLjubk9V0NowuMu+E4R7ozzzO5NjIAgDFdTvtO8FFHA0Nx11BOgJwT1DlVBkavbA9S55A7NB+N5zLG/GoBsvxpcT3oy0WOokvSpfmDrLg4Paoh/mTSpM2RnOnezaqMtBCM8lE+u+Kr3xww8DQ1gG8I+0wPzAxw216sd3L7p5Wl6kw130Z9muLykncjPtsrP0tR1iDgR1ZPI6+ZfJ0j3vKdbN0JusbpIAv8hF/ipH9/ASjAu5dmcnmPzrRAgG0mZyZ65lxUy67+vPv8Yb/9Ea/GZia4uHhBI/KnfX/GHhvpum7hI7732e+c9ujnrEOlK+CfR3S16wwIem7XVJvo8qG+G1Q0XPwctLVrs4107Eg+38HW2uOXzvE4g+scjDO70PKN/o7CHOPxjGM4SMMFvtT3+eIo7D0etW2O5FTo6xDtezw/wN9URo7KmPy7c6KLE03rd/qoNqZn6ka4TNvlYqYy18wEUN2m1tkC9cYzTlzm5/XWb35sA6rTaDluSIcMApAzIedSAaM4jEq1tQ8j+2Mn3dlp9HVf93X4si/7Mvzcz/2cnmn0X/7Lf8FP/dRP4bWvfS0A4Gd+5mfwqZ/6qXet+n0/TQQdgPmHYMXZbI549+o2ZcuhtjzmfAKIGEtiHKgciE0LAwcGH4DtWA7K0q0bABhUnElHAPUKP9kXWQ7ZYnOQdDP2AAD1kC4JpyPAH84WJ12SUns/FGx3mFUTX3aTlv0bz0ZSR9FW/gHUrkUEfHSG3UfMvq+ziYNXPrwjbI0xXP+2jvpnju9HyjP2H9gxGs0+5IuVInlhZhRGB98s8YXwRoOd63/R2Dln+GPQnk1klNokT2c42XEbGEczOHYnp+49z7e/zBT5JbToCraGd1e09wxHfabuvdYzU/6mXjmrJE4KZ8awcwgJT7J5L3iQa0tH5S/Az+5KdMh757TDc0NatHz4JNMuuI/awF5/BrR7Nkx6QkN7k2uhxUui3aJsmRqWIicvpZ8on2J9IY8LKgyGptvmeQmNXSALYxrqANu3C9sb8Wd7zy6P1hv1RhxDoDmUJf//mzw56lN4132L33mO0+HcYSR7J/Quf7t+7vX1HE3cEU820msXvgthmuJqoNdKfnb5hmVm7UVbJQESLVLOoWl9uuu5RB3PXKDrL7aZaubOThBbYddmCnBF2GKycjU60iwEl8DsbMcL8lvYBnbDbv6Z3J0VCZl7OW7k/l2TtRF38gC9DuvSjgzaq1fq1iNFBvXOIq936aaJYteOfTnr0658ntg79veu7o9ySB4p1H2JnXJhGtrJYsfWHToAumgvrtk0wggo5w4vpl5ZMJIgkGob5Otcbv9eGOmxDcv1BkqM42HD8VAm6OuWsG0JmQk5H7CdCgDiRqAZrU3SnZ1GX/IlX4I/9af+FL7zO78TP/qjPwoA+KiP+ij8/M//PP78n//zAKDb1P7YJUtE5P70hqhl7onAmJaxbZh/x8OG6+vbsj2NuNhhW5HkUjQdM5a04ZAylpRBhwwcM7AS8nW5DS3e7rIdS7QRk4kuSijey8UQOTeOS3b7Vjg3xId1CqLaeSVOObLnYmsQdrgY4bCebyT4052iAZfe0UM6yVTnEJM6jQgo3+XZ9qk6mboJKthPXGcTCTtBsP2icd93Vw6DxXFJePJe+O7QYLNZOqFojPdJpNHZlU9pbwD/KLqr5b9AiY9oYOf7RSn0Sw5Sl7Ecbicw5Wy7rn8Wnu43D+nK1RHBrAbk0HgNRrP2R/ZmTxTv+Dd545tKY+15jIfhZE8M0Z1V3pHR4mmIPb04vm/lJdrSlh0mB0cT+mcN34kYs9/OJuMNny8w0J0Ufg+HB+acDLh7/T4NnTORRuUFC0DxwPUGrJ/QePoFUAw1R5toeUZwjfgl9CnSjP/Nvk/ulikMU9cf7ScF3sB4rK08mLyf5XdlXJ93Jp+ztoOMIW7jeNb5H+WIbnEth8w7x1+0ISDvglHWdXCHrC/h5RF+Ig0MGrrLBF2znJW9FPruv3fPMV2iCyd4jPDFQ38VXhm/kH+3TYfPQD9Rb2IHt3v9GegdKULErYOVH6XYtMqR7FDdxP4Mr1lFO2k3Mi3qP/PsyoYyfSfMZ4snI2PjBNOdp2lhmbWz0+YlaWpP7bR5ycT/4ui8PfxP2m867AwgAztpGD0Ty+zX6sozETg3Hu1okj2tj/RCJ9ODnJvaetonI6NH8mQCe0zWkescvMnwLM3LT2X1LM9eeVt2wCs6xZUtaoxw0yfUMSQ3Ddsb0XVeUf+y3P69MPgqg4+MtGQc7604Xq9IKeP6sOFqWQEAt9uC2/WAzITTRtiWBSxnGj8CI97ZaQQAL3nJS/CSl7zkUYq+fyVL1PFTEAYN9cZwrV9pQpRdvXsEzsVjyLdHUM443RywPTwgy/Y0CYNbEnIuXkUxvrAwcAC4nlEEJkeQ+Vj+tWcbXdTgUiJjDM4RoSZ4Bieyc6LuKm6m9tPhIiit9j1KaCBugeN6I5xTpAx/wK06ioC0UT3jCc1pZPtl+1TrknOkXIQDxvnVuIydFEFhojIo9leE30R4OaFuhDgZR9rQyWDLYee3a8Q/d4p2YIgpZ+wKbt41CAstccBpVOQN8F0jYYaHvSIj2F1fqU1M5Xcwtrr+h77qeLHvV3ue4Yj3x3PvvcJIBnbWd/MV/DgJ2EfkePVFvpFTmgLDXmTIqL9D3IioyCiG+4h+RrhzBo6HgQQ2VgmDEX+fW+m10RYXGdpNZPZZZvg/Zwuco/9e1J5Pl/CU0K08z8ZDMxMsAvajidjQkqFrc8bWVC7twTXoX09zYTx3+xVoyx4ib6KLSG+SaF0uf/kiGG26xGE+igKK/bHytn8fad80EmDUUTVyhOs2ZwLKTXFyYmdCMbi55R/S5szJOkmu/Z3k8WvG2fbVVfpeShSqc/YBW4JApYrw7smlixYNomOz+8s6mZpNzs85JNXOqu8sbe6Wt/2oBBcXLh281vFsL06x+m8EIsFHpTM0qlVtmBEvxeeaIi3116pfKD/t74kdB0T93jJE2iM4FhvbpraPgUeGZS7llzBuT4q+TVkG/Jp++NY1taefg96nuKAdcBFhigsGTb/BP9syA7hjvQpm4hJdsne+TpWxlgado8jQ+dCeiviLdpzMU7v3tk8D+FTuVfhqW1x5rwU2lPHcJauBLtobV+3frI5BIjSYiGqgreCHAfFsya3eAIClXWbsklyCUZ1KcgkVLRnpqpxdtKSMxx67xb2rExbKuD6suF5KpNHD9YAHibFlAk4Jt4dc2t9qx/gcwnx6JKeRpIcPH+L29ta9e/rTn/5kqnyfSh2hRGFzidWxo3BcsgrAvNLyYiwRFz9DYtDCINGmlfLTIeOwlIOw15SwHDak4wbaEvJVwnaPNFRcGsuH5jTipd6QRgEQo6xJonDkxiJ3/o8pFmAHoWyvC160rp/G4HB4HDwzczUoyDMljCDLHsa0GthXqLNHt6dZZY/QN+a6hc0Lzs44yGecRUa4dcLVZI80MVNCxGEL02A7gCsjv2N7Ab8zxXS5zhaCIP8uwjHsF/fGmOmLE+qmrq798H5Xeds0Mq70PTUaVRqmtoXTKP6urjBu1jgcruRwoKVLQ6WDEtc+GMOYapQUpWJMiHMEqRmOegNifUNWK7txLD+cAWvlgQWkGhAtYqudi9Ub7PKDtejI8HTvR4aBzTvBHUv7gE7g24HvBqWGKRisRszMMdVNyN0kiDu4LzZSJox40arsTr2Xtc8e93dJsb+uDcOzDhDSP2x+AtzoROSrGqfkIwGNrhQHaKvG8J6DZ/LOfePpN5ekMeEv/d1ozxn1ZNBgecrSmUa7mj6YtnfHZlcOm/F18jbKI5MHoUxsJ+gQ53hYzAe1ARo+Oid8gNfiYLYQgQpjxImRLHOEOX2zw7PRBp/Vd8ZWJ8ZUTo0yax8GvO/5pYeL7bsYDcDoI27UjGscxGz0iZSxWy4u7Mo0Dcd6kjfQi3UO2Vv2eDG0ZSJcd51GUaHVcaKt4qAiiJjB9YbiIZ2O+gXfL1L7tpUd6bPdZMbQNWv6NV7AGeAh4iDAQFxFUO0zy0Kq043Frpvq61kfIuyTb42vAj+f40sa17sLmv2Y+3EG4OYeRfePj4KICwn22dMldXl0rEbwUXju9Cd6XMhrRrtkaWJ7K00y3NxSm5VhMG3b7Vj6TF6GaL+i3IfpK4nObO/d2UETOrUym2ffDN40WgiNnndpV3he5GDoQ8SB9mMplTmZIxdPEZcAkCODDhkpMa6uVyzHsrPoqdcP8fjVCYky7i0rrpcVDOCYrpASY8sJ+bTgwXKFLTO8t/TydGen0f379/G1X/u1eMMb3oB3vOMd3fdt2x4JkPfVpLQxsCemIe42X6RI6h70595EhqjcnnZ9XMsBVwxsWwInLykoMRYqnscllXONlkMGjgCuGds99u5MqqevV0eRiy4aKEXKADJVx0v9Zs7/cf2UuqzgIvMPgK6YEcoEXA/BDSiSdzFlKmV4MJmtQoyYXHQRbe2K46TnGMHnMQq6/ebWFyM/CY3JFcSEFmIYhZ25NtkdTBeVmikq8toZwAIrQY0TzRPTwFCxQzJTIl6JRs3dGuocMR0M/kWnNBlwESHyLjhVMJuUjJuBczQM2jybLKs4A8tYePWAe1A1Euv7bjVFPllxkEP/7ITHbZ+aOCE7jVcbD2MZw4KLkUzVKVKe8wJPm2qoUKgrtKaGrBTgRl8MPfjWToK6Veuwrc9/v2CgDG2OVsStEdLVaMeIoM4zJCAHw6w1YsYxXBsuNMuBlpHlIgRWh3HnxO346MxZY7Y/g6gUa+x08jPUd27F/+Jzzy75HkGKdDzkUVZhS+EzyS9RlvK+8iTVTI4PIpDK0uw/XdiXoX6ysl+AILTDLInKJRVLe5+XQXlZ2GCAMxU2DU4dFh5UZWFQMurOwKa5pL+dA8vSx4Wy1fFa1Yn5QCp3eCmXcQCiJ70Mam1z09G59jUTIDIHghcpyhjRFtkXUX7sdsQX6fI7W4ea/I19GdSnLMHt4x6P2j5Eref4hVyxoCsIfoJqxsrSr+mX9ocwneyKnpz2e9InALogJg4ZWXcU/Az7FuBu2ztIeS/au6OIKdu3IdyG9jTSPJeFSXA9M3Mzi1+58Wrpm++rHe9Cu1RkmOmnwKST1whSp/vtM3V9VPxIngTHn0Nb0PTfyXDhSTScMApO1GxSAhN5fhk52NTZEQK7tX0sn4VLBDqH4vCZ9gHjKk/kOSwg01YAIGJzzXkLoHSyE01tlbFtC2rgIu9HDs121k3gSQumHedDuLQmZh7Qt6hKmUfJGNsFdmSo7TuMepu1Y3iQje3Zyx1MeVNvEyO4SN3YP+kHzN+9CL2YX2mO0elA1zUOMNu/AzRoNSTRRNC5cMEHF2dRYqSFka7KIdeHlPHY9S2ujiVI5BnXD/H48QYLMa7Tiqu0gkE4UAYRY80Lbg8L0pJBOQGJH8lvdGen0dd8zdfg537u5/Dd3/3d+Jt/82/iu77ru/B7v/d7+J7v+R78s3/2z+4OwftwErpwxDNKA8KICq196BW5tuW0X8sg7W854fZ0ADhjPS3YbpeyPa1mIAJ4a5KDCFiWjMNhA2dGvjogXwNuNRKot6IJMxqgM+l2LcoAre3g6LSiCQ3xpFsFGxWuhNhZIZHkVjbDIAs3GASeiKyI+yzIq9wqSx0MSARSi5Iyh18zmuMlSBdnKKiSp7Z6AlankJ0wNmE0ksjwRkpCMY4tzroCUp+MCWCdK6qo7Pa7DH9DjXESTo16S7sGlt1VNlmBiGc5ZdbolNiWMzAGXaVWbRWoRZEQV4FqgIkHoDc+ZVN/nahPFMNuisaS/iVf/ALBa9FhfzCRjqe8b6u6NYpFngcKfQq0U7BmYlZpTg1oMTZMpJE3HMMNZoas7VgqzVV6SxvavmkmZ1wBAzo0VnFxqJxBapAvesjg7HB28608m/d25TmhTVYXgx/btKX1akD5ycMOXxgZpHlchGJBSsMPtQnCThqukI6MXivbgmid8UibaJNZSe/zuR9OnhogopyJwMmTLe+ePWU0OT3oK9DLr1m6RL+fk5nwtOfaD9tWy1mBhb7yARrdK86SOOYy+dRJyVZ1TAZSRtMLdZHkbJ9G3VT6Q8FzbvX4CCT/zFXm7l5KoXRHcBN4tQUI+VB5j4oDO07uXRI8CIyCB+FBs81cZJFOfDrZ08M6hN18099Bt1ld3mRTk8kjhzaFeuKERiN6jHNav++kDuYBLbr3MHIf5GyVCPvUaRSdEmGy/kiJ2yKflaPSyU63otGW9slcU60HyVY+tAuVwzMJg2xV/cetYdrKUQeovJm3JttJjkCA1wmqO0w/YMfc0rh7zw0stjAF3KONgeJhNDYRV0EfIuIh8I2lX9H3DXYYnjRnM4r8sn21dY9SZ3iYPgZdru8x6m+jU8evZJ5h8Gb7H+Czstbp+42r47DQBa8itxkgoedmN3Rd1Tap0aXITFlwsDLS2m8OQI+jfJjwdMSrHWvluyJLVY66+Z93jDq5KCQ+kjOhTxrdbedIAktcYFeaZZdfxzn2acRj5neEV7+h0aqlV80b9aH8nMhLh3/73c6FE7ebyhODDlwCQpaMq+sTlkPGcdnwtOsbPHY8FafR8QGedrxBIsYVrTimDcyEVGcRp7zg/nIsF2Rtpb5HSXd2Gv34j/84vv/7vx8vfelL8QVf8AX4C3/hL+CFL3whnv/85+MHfuAH8Nf/+l9/JEDen1Ln3GFEOdbyAtCvIdM5pR8qKWWobU9Lh6znBJXQPy6hasuGq7QhL4Srw4rr44qNF9xeZ6z32MFtGU2FojIEQ7d9VaOMNrRDot3taQIgnMIRZZ1l21t9lw+ipLnc7lb7AXUgtT7tpkzVUcbgrRpV4rHIMummEfo7eMHGaIjZOkFiFJ7NMzJEg6CzBgubkHynE0N7zmkkytY5jaCKyE1crYHi4DfPQYANDcQRPqwymRjsnSd/h1eUAOOQV8DO2pzRmHG/53Q0NFZ2DLFWCPu4jXVNhYTcPMjeCBniIbTddUSrNHxoJmqLRBQZZQ00Za2K3MAdt9KE9sUYEuM2bSjbPpnUeCKmZlgobUQLbNytYR5r4FnnF9WoDctjpk9+C1yLKLJ5cphgdONmjWEuwIycQN1EJ/sbJhvvkOKjGGKBd6XrbAAY0hmN+XbPOIxpUK91gHfypPMSTCqn8t9o0rwLUzDu7KSpy3emH5cmDgKvqzfws6MloTkIvZHTh44ul3b5BB/Me6OL3UKATEpFxkr00cqODnedIpEOZnpGnCt2S6XmMfSg7U1osrZjnbltglCdQxUvOURcOYe2hdvKnZHeyYy0Nly0rfTcO3Kj/Jbq9/hoQKsOPjN5c3JUaEDqJyUnN1b628kQWexiMw4eF6M0gl1w73VEgFflHw37PsVDiDTq+BwYlttNFg+Ad54F2nayXp3/xf5UulKaY79YGiOjtPEJ7JXnlP4qzWEzkfgZSovOTpOqo2wF3A3EbjGGTRkLhoVZ+h4jhq1OF54afauLuZZXR/bG1K4T29P21civ5kizffTbScc6KLwwtlB0JMSFo5Hu7xwn4bngiAf0MIGDS791kb3SgESdpRO396cmu7rt7FKdg6P0iQk1KrV8K4sNZpzFbonwGn7M9Xp2AEN617JmzHWctjqGYrfIXFD6bu0eie6dmAfRMTSyuWxggaVRKa+HSS9tbHXM4nhFWs1oyLIszoMyUVdw6J+T2YafbTVuPFoGq+85oRxuXfuGOkdOS65zfcbhsOHe1QnH44brpUYXHW5wTBueeXyApx5ukJBxTBsOlKstw8gg3OYF7zlc11vVMyhRheVuAvnOTqM/+IM/wAte8AIA5fyiP/iDPwAAfPInfzK+7Mu+7K7VvU8nob1u0mwEFg0/AHaKO7Ol7HBZBotExoQSNZQyjocSmsaZcEoMWaFnyQgggcs/YhyXDdeHFSsz1qsNfK8SEhsAqCkKuVksKifHIKFDdguaZQI9VJsANgdsY6mHeVXBLKF35ZrBrB5QcZCN8CaI4UzIqUpXwUMGdP9HXblnWZEkBnM9u4Whf31HMRY8l6aJIQNwvzqwDBRTJ8iMcHOrGUWIA1YJ+0gqO24Rxk6oGUXl+j/CQ1AmsqpCmcuK28g4D+06dOnLR0X6oP6ZoXFpssbIQAkD6Paxe+eitbKMPFDBEqrkUmY6ZqP2B6nUHyaxZrLqDA1nLJr+hsmO54vKnzYSYSNdhUorqtNIDCaTZzWrjc6xyZ2ybR0K7QM+KmoJ23sOpr+H5jDLVkE7Zc3ufYn6EFmFsgrNfqLSnF/kJgLqVAfK5EbfDyYO5tk5B6KBYvFQkTBkyW6cJmM4RDKGvNnBEZ6lgUl1tfPmpeUlobNRvti+tL1n7Nl8bqzY1yfN3VUeWLB0wkU9H8lKcJ10Rx4T458rbeUrqNNIjUaCj4Kt0XvqcDRRv2QcJLvy6Nz4o9Fxm8hIoWacuMgAbY8cbl17Rq8gGV5NjVfFaSR2gXOeBfnr+GNr/Va9E7bPz7bSuwm86XMv78xfO3GxeJRzRoDOCd2VB+Z9CmMwXhhCc5jZMYn9iGMQV/cNTA4+oVE7gVN5wq0+C3tVaFPHEmGX7rD3zdoQ0Z6wKfSDnaOINdodBy52JwEktifQ/hKgkbEBJqImdDiT2tK8EfJaAdiAfEqFf3PRediKzBb9ABhZb+SZvndjbvSs6J2RTDU85nVehbnDT6nUjjmSwZP8lraqvtNn95cUdmztWRZBipzCRGaRlymxbgzGPPCq8t2A97zsbTa44m7kcCCDA9NOAMvLWmP30kpq96QTIZ1I8ZFOqAto4dgM28egI1V2HkijUuUs2rYIwT6yT8rLc6qL9Yoj9tFUhrYdnmVsZb7BhSbFxtMggjqO6jQKtoOzB0y/PH9WB5I4jBYoDaozpY4VzLihOsKIuJ6jZphE2jcOHTcHZoBHg8u1FsGByFtDrw4/DFg7ttkn5N4pTpQua8al9ZcWBpYMpOI0Wo4bUmJcH1Y85eqEe8cTrpcVz7y6j6cebnCVNjzjcB9PW26QKGNBxoEyMgiZgRMfcKADrpYVS8pIVe49Srqz0+gFL3gB3vrWt+J5z3sePvqjPxpveMMb8OIXvxg//uM/jmc+85mPBMT7alIenuC2NzrHWs++jUWGVZOoYNMOlzOMdHvaWramcS6ZRZkRgERcHExcvJHrccUJjJvrDbzmuvfUELI1BjYqCo4JnBmcSJ0r6hwSwCOfkRh+9fnIyFd1C9qBwdVpRAsDh9wippbiRU1UnlNV3pS4KW8Yo0yYnMuWvW1LhVfXhEypOIVkm151gmVAV0A4+cOsVZBU5SnPflWQW/vBGIpjpzmNsNbnRcIORTD0jjHZ9uYEWRVOXJWsKikjxBENjSC095JfbYnKpM9P1SCSldx0qr/jeVd7qw4RZwOcTp01gXxbgdbOk5oUhn53eJDhjauCQZmQLW9hH9EPA2Q7FlPE0yxZI1ImsTKRPbCJ8oM3KJXezcS1/u6MJ6swq5Gk0UUnMYwIaSWkW+jKlBgaXUSacbC4s1EqXgC0sHKYflWjoxhSJXIhHxsvyzMnNhP1Ko8k/FeUtfDkgavxUX9XAcimz3bCgM0/kzEuYA1jDecODiTdakRjA9Lachz+DsYedVg6Hrb0M6KhAc6Vbw1Mzml0CY9Zg9s+RxkL9nwCwUvgNcN7JU+Aay+a5EnKB2uAu9V86yg6tL7pQonoQHEiHMzhlldZVxUp5UJzQKExobONsK2VsbfKb/U5rY3OhMYaXuJA8678mdNacJpavM5oMo4zVT5cGk6y4gHFPrC6MaHJHjJtKA+i6sLKWwEnMkFvF19QuyEVTTd1k1Urgg2NTiMUUHWmlbnyzZYfRbMYWdrod3zTq670A17vWJ0DOLruHLTUZGZ7HmzvMHKRgeY8GJCTn0WbdgWpAxtpmmbfTERG5zRngd3AuDBoKYKDlhKVT8RIdbWeqr15WDISFXkvf/dAIUMomQlbLoO7bgtO64LMhLwR1nUBb4V/tzWpzYbqWACarFdbwcovjT5F4XdBpXVyViDZjC3ieC5Gz4VJNxnHEKWCGxIZRFB8UWJYm5uBYmO7SXgBhjMhi51ddwFw1W04NR1Hwp9h0WVXVhuHlToirG4Th5Cl5cqX6pioOBEng7vROaE5BUUGob1rdrpnMK6vuM7HmAGsCeupDE46Aek2taijW1J7XRdbZWzN+IposM4wto6iI7fnQ1mI93LJ8EMdZ1mgL7qGtW/J2HhEk/5lAq+pjmfpI7jS60ZuPqAyLDrmrU6w9Hpo4wTZQSP2V6XBRWgRQq8FsJTKfFdg175wmz1ytd8Y9a/Qr/lWnhuQuuZb8ypdc+kfEGxCodGoD+W9WUC05wqr7jiwOoqwMFJ1bi9LCRZZlox7hxVPvSpb0h5LJzzreB9PPz7EkTY8c7mPpy4PQahzaTAyE9ZlwS0f8ZA23FtOWJYNacsApT5A4IJ0Z6fRF3zBF+DXfu3X8Kmf+ql49atfjb/yV/4KvvM7vxOn0wnf+q3fetfq3rfTJci8o+GpgiBU7apho+xNxpSKkqNDBnjDlgmUE1CFOwAcDhuOacUVrUAC7h1OyCAsyHhwtSKtaxX24fwBASwROCW9up5zuQo3Ecq5AFTPtVga0FZh5SPrSn2+YvBVZfojIx234jlNWZX4UpkiiUJPWxEOVRiU/ZiN2QFCNgJg3RJOW1HW25qwHhbkTEWAH1IxLDcCL6ltEQnCWfqvUVBWwKrQZc2nytY46kDN+aMRUmScX/XdspTDyQllLA8pq5DW4ajCrgk5+V0EV65CbNuKwwwM5FwdiFW4Z7M61U8cBskYvVaZECbRXtUgyHVCA7OyRiczQTaT6OEKoTV6AOj5AgAc/5Hphv4VAjQvjaAm+7vr7xwVbPPYtihmqArAnC8So0a6ugnDicNZMeLwIEqxfeKYT1acZMVVaPlgJ6hiGJXVmWTpVumZZVu90rhs+uRcVjFQeS9vqdDdKWG9LQDQLSHdJF1p1BU3u/KohgbaZI4H/TMKzu3tt4bUoUZu1H7zlVmtOuYymUgAHTYsS+nncqg8WScRx2VTx7vwJ3Pju8yENSd93jIhc3Fc5y21fMaxlDcCZ+FVapOITMBKjU/VkRaIwuACOb6rRk7gm84RHGhOKxkSkHlnaZnNZMfmGSVLs7ZdMeTtJGama41RpgaonVCpQ2kcyVXgbfm9U9ejwcI9Uo0WRucIWbiF1x+KIa9nElw1Q52W6ohMjMMhI6USVXt12HA8bFUfZiwVyI2T0plOSqts39ZFnUrbWhdJZHKmjkfTETusZlHAfQtj2g2HMaIdDpU5bZ1+1V6dDjpxqfLnUFY9U+JizC4ZiVBXQ3MpLhMBlIl6zgRGxYPw4VZwJHppOxV9SDJZlcUV3SoUVvlt/wK9tshgmZTWjNGppbKJmz1gHBnqiEb7rc1WnLI4w6xDWhxeK+qCHlwUo3s2fel0jF2xtxFtQquATtJA5ZmWXEhFV/ADSVSYR44jckj13x3JsclfcUL6X+2L4Ki2OUqpOj5g5LjI9Kv6fEwl8n6hwmdXy4qFuE20zIR5tPO2oLJ82Dhh5UJ/N9sBD7Yjtlx49uF6KAuaOeG0LpVOi37QCWzlV8GB4tHab8LP0n9rQ1l5KpNsQnGWpWZvpqU4gVJqC7KpXpQjOxgOKWv/D2nT56W+B6B6jQFkrnoOUD0IQPvPaAu64lxb1wVbpee8LsiVJ9nqwzDppiBrlFaEX4THgGbrmMgNtWPrbVOi79PC1bbl5iATJ5lxRKRKw4kGUWhodMkATtui8vp0KvKamZBPCdttkU20EnCb2lY9cfibf26sRXeL0+hoooWOucyvqJx5k465yZ0wHwFxkatL1TsiY1OjfXW86H+FB8S22XLCSewbGdtq4+S1Ocx4S3XRHjonaMgydGudlodcbNCUsRyz0urxsCkNHgzNHihXeBmHevkT1TFKVX92cyeuekP6FN63YNomZ+R95oTN6J1seDjLfBqFlqVedUYFW5DXBF5LXY6mlzKOoHLg9XLYQAk4LBuujysOS8ZjhxMeP97i8eMNHksnPOPwAM84PMCRVjwjPWhOo9qLDYRbPuABH5GQcZ3WikdGTgw2tHxpurPT6Ku/+qv1+WUvexl+8zd/E7/8y7+MF77whfi4j/u4OwPwfpE4GI7ttfutOjJYW7NhsQozliHziQEVbEvKoJSxUfKwcCmhAoAyDmnDvWUFACw44P7VCVf50CY0mboG80bIa5XGlJDrORvbhhJ1JLc9mU7JpIwJZZJWPaTpKiNdlVWd43HD8bipsjpWR9FCua72FCUuezFBKIKgIjQbAbBxm6TdbgfcbAsyF+fRzelQvm8Jp1MzrPPJrALYPfHG2KOlTCoTlZWppa5UiTIBMRZinWASqvebyu0IKTXBVYQYtI9LykhgXC0brtJatg6mcup9ghiPIqxqf+1zFWCnvGj/b7cFp7yAUSaxqxgmXIyVJrxKWZpQolWaBBhDgZsjIaTWHuG0Lbg9HcrkeUtYLd6tV13hMIQfFIgoOLmes3NeiVE5gMkZX6a9rr9utjjAh/4XFW8DWx2YW+kzA7qyqKsKI2NajTzTDzvZnwBkYbYh8m6CZ9k5OCpT5bfjUieosE6RRqdAexYHUVKcsxrNDFI6zEx4uB5wuxXeuz0d8PB0AOdCC+tNoQesCbhJujLVotPqipuuVPlJkMpUmfAA7ly0fATyVTWkDlkn6rRkLPWmiZQYV8cVh6U4pa8PK66qc+h6WZUnr5cV18KfVPmTGBsTtjpB2JAMHwpPJmQQ1rx4o7nKqTUvOFV+2WSyi/q8LYVHMxXnrxgrISK06QtydCg0bumCnOMPZrLq3+8lXT02E5p+hXlWkfCtwFN5ODU+TqmtCoo87XSnMbjEOaAyrfJarjJdnObIVLdIELDBbZcQWWSdShUtFVA0Qyp2rbJ2m2jX9wfW1VGhOT208mqtN5kW3iuTs4zrw4pjylioGIL30qlMcKmElQPAiYt8z0y4yQc83I7YcqG3h1ublN6uh0JzudBcrtEPOm4GfpGjs0WB6UHWYUwcbVg7QoSkjilaFIPInEPWScq9w1oWiirvHZeiD49pwyFtpbzQBspEXfhw5YTbip81L7jZig5a84KH6wHrVnCxrgtWWVypzm0wmg1k6MB1A9VRsrTIw0W2zxN0Egq0Z0KdnFceS3WMRcZKNIvKVTITl4ETet2KnOFcFsjU6SBRaEBxMrjJtR3zRssSNUl1UiI6YlnMhLpOlmVRSxwvKcArsihXuZgHciDqpGYbc/dasjZdg35ybmsOdEp1UikOj6u04XqpMj2tuLec9IDYe8uKBWVcrtKqduaCNp7ax9qeN9NF/1WnEQp/PrFdY6s0+WC7wiknrFx4da10esrNNsu5d7zIe3EyqfyDkb8VDhslUhwAouMzDkvR8VfLpg6zAzXn0IGy6raFyhmoZYwzjikjoeVbKFe7O+lkO3MqdGnsVEbhyVNekFF03IlLv2/zggfrUfXjzXrAqeo9kWMA2uQaQc5kGBpvOiBVXSK7FpZla7S8ZLXPr1T2FkehyBnLk2r3AA4/I4dinLhJH4u8TniwHfBwPSIz4cHpiAenqyKb1gWn24PKnu0kUWgErNRu+gwOf1n0oasMHIs8OR43HK7WMr+qjgWB09psYtfLnEMWwmSuRWacm77m2q82zqecqo1XZPBtlU05lzFf69idtkV3f+ScGv0atJHqfpEzZV57TBlXh+LYOJDVD8XhcRTYqYcdQHGWGB7eqtLO1V4tTqNqy9U5ZU/HAmqjv1UWcGo9YtdJfa6uystWpgvf51zs49vqUBSbDwxdUILojWqzXx22ctj1suHxwy2efnyApx1u8JTlFs86PIFnLfdxoA1PSw/xeLpxcnNDwsN0hSfSNRIY1+mkskIWdi5Q+S7d2WkU0/Of/3w8//nPx+/+7u/ib/2tv4V/9a/+1ZOt8n0mVZpuv/VphGZSe36YBkVo/ql/VxX0aVtAqRCnAJnECKgMeJWqEmBGXpIK//tXV7jhg67WiaAW6LlOgtelEvRSvJF5Kyt2m4bWGuAJZeWwGiTpquy/pMS4d7Xi+qoo63uHFY8dT0hVcV2pwirCWVd+VIlbpxF0UibGojD9w60a01xXe9ZjUeZbUUzCqGWVtiliSWrsUfFqH5RRV1xrxMHWhFXKOsFcKONIZlXGCN4DlT4lI+ASZVxTnaBCJqUrZNef9XCXnhvliSL4TryokfYwH3Fbx3PlBSeZ1BpjRgRhpDlHhxV2Ebht4tLCHAFofQBwygtu8wG5jsET63UxGLYF99crjf4qKxMN72yoXgxDMVQB6Ep7VOICo6xeygqQ7ZXizRhi2neTP6FNaJS5FBeNH6zhKsYDYNsgddxlrlFveTHGf9JJmHU+6aoIQWlEJtgycRbjVCdNZoKXkLW+sSSCtqGTscpv91LZCy0TsyPVCAeI06iNuU6CZOQJhhaAjavRgIT721VZaeWEJ9YrpYf7pyu85/Yaa11xvbk9FENiI2zVuYitGEyo0RJUHUgQ+SsOCtk+SigrigcGp7r14Kps6zkctjJRrxOex4+3xVGWyvO95YSFMh5fynNCwcm9VOTUNa24rs8HNPxkJmyoRi7EGE4dT97yQXlv5UVXotUBUI0vpZn6Xhxwp2z4RSblhr6B4iRRajRGDhm+cKum1FbfEtp7Uro0NRiCqmanc9KrwaXw9a5ondAEPhJ6F/4uhuFWabpMWKJ82iqOxCATx6SdrOpKdm4rohqBkqlGY5L+1smXOAykrzobGzFVc7YQUCfdddVzaZFC14cNjx1vsaSM62XF047lvIEDbXhsOamuu5dOuKo64rF0W5xGlQcXFLfViZci30G4yUc8yFfqKHmwXSnt3N+OSk+31YFpV1ClhzKJt3rKRvE0RDTHjx1T+dwiDoxxHMa/6MSqx1PTk4dU+l7eZTy23Kp+vJdOONKq9sqBNpWJJG1DdBtwyge1Z265RHoITp7YrnCbizPt4XbUCc7tJpPVstBiZbTVFSprqU5oKuzlTIjSt4OlZaqTdlTdn4wdkJo9o3hHm8zlOinRCUaVJwpvnXjfbAfc5ga7i3ZkUgeqjI11CIrdJY5aO3m8Sm1CLRPJVG2x67R6pxH8Cn5xkhwMDcyWpuYO5iYrgpwyOneWVLYAuEqr0pnI9IUyrqj0Y0Ghw2s6KZ8daXVjIi4Zscf0r7G3hRaLnVUW7W74oPx5qs+n+v1hPhbnJgin3PSDjLEsDApfieNFeG3j6uQ0dofST9XxxzqhTmDcW07Vji6TRFkEOVDRZwnNaURo7xc0ezWh1CV2IaPoPXEa2WfVT5WOhV6LTXoo0Q75gPvbFW5VZl2rw/eUm560OOGq64C6uKIOq7q4UmlH7LmFsnEO1efKc/fMwtA9wUnVOTbSbEHl9XqYsNg+qeJB6FQUh/Rd7IC18u6DfIX725XaQ+9e72HlhIfrEU+sV8Ue2orDf+MyR1nXBVvd9pTrAjcBxXFdx1qiUlOSqJNT1TUnPOVw0vGKi37y90rnJRlXVPRSlLfWqZsrncqC2A0fdH5xW2l544SbKmtzfRY5dcoJa14UY8JHdmHyWB28xYm7FZ1QbdOnLLe4SoU2r9NJ7TGhVxkrcRr5sbGOTeM0AqntpoufMDwm8srIObXlEOZXaosZmwTNgSRt3VQ5vuaEJ05XuL8ey8L/uhQHUjA6yq6b0r/rZcVTr4od+9TDQzzz8ABPOz7EU9Itnnm4j2dUp9FT6QZPSbeunpUT7qdr3EsnAFBZv9RoxG1Xuo7Tk3YaSXrHO96B173udX+snEZVDzfvKNrzKM38RVrfXjkxWOtz54CiQkjHZUNaNhAfkHPGRqSGQAtvF+OEwVVxHmjBU69usCLpBHfNZQZm97BuOWHdtm67lz3HQye2dbK7HDYNMX/s6oTr4wkLMR4/3uDxq1scKOOx5YSnHG7V0SICWX7LZOJIa/Ego01c7QRVJmPFQZLwIB/xIBcGfLgVxSRK96GGCxfjSzzFwugJFV/VUfHY4YTHquB9bDnhKcttEa5pUyVzTMboqAaJCGrphyhlFWq04VCV8BWtuCJR1ln7Woa9TQ6dwWL+rrxgq0Lplg84oSnotbgTyuqAMXTsVGw4HzLGgcDVPPdNWVrj4FSNo4zivHoiX+PEC27yEe9e7+G2Kk8x3HWCUetoTomC06u6qnyotFFgYlVkcribGNrWIBaclb6a1TsxJxnO+9sMUs+VzlEDKHy2PVF8MmoP8xEP87FO2A64yUddiRAnga1boumUTuozGbics8iMj8Au+a3BZnsieZZq7FyLMY0yMbum1dBfm5jJJM/2GeY30FaCxWgWenvIRzzk0vcn8j28Z7vGygvevd3DH54ew4kXPFiv8O7TNU55wWmTlccaLXGqIfx1ki/RW9CVRy7byswqopxNISsxS8q4t5zwtKubutJ8wjMOD3FvKcbG05YHeKzKw6ekW+XjayrbeYWHr6itQh7qSDPKqpUa8mjGgfBgRuPDhp9U+WXBqTo/1up0EmeSOH43JHXE2gjDyHtCX/JbpjGNfqDjrxGRQmPVMHZ0NtBMUm/pbzO+msHkV9lsIgOHPEu4tDjThe+PVRam6riMWnJDMTrFWXCT60on2gQs10mJRF22FV+qzu3FOZZkgiaOJYRWrdOr8a6JIAE0SpSIcW9ZVW88ZbnF0w8PcUwb7qUTnr48wDWtONBWnSKb6oFDfb42OqHhzkzAQLjlpfJXefeQr/T7/XxVJmdVBpWJLJQ3ozxZjN4RWeGpDM5BLOMoyU5e1eAejP9SndsHNH1/oA3XVWceaMM98jg5Vnm0VHkf06byvfGRTNge5mN9V6I+xIF7P18r3TzYjspjQicyOZDWEoq+QR3nqyqjj1QnNFU/qq1CQd+jyVVri4k8jjpFeF9WxsUhvSHhYT7UsU14mI+4qbCf8qKTczuJaU48cSyUcV2IjZ0lq/abbllQpx41nryqtk6DtTq56pgXeyQpfEJzSkXOF9nLCZvJO9IaXSZY56WvQWoUW+JoYL+mE+7RWpx+1VmUkHHQCWdxNixGNlEHlejY2HKlRW46ofCkoUU+tgk1H6vdnXBbHQuWd5qMFZxWXYE2EVV5W+0ZmXQLnkY6XhYpr2mtdNlk76H+FjtrqZCIztOo8zpy0rYsVmc0W65J7fJ9rbTs+fOA+/lK393frnHLpdXbfKj9LXb+WnXbNtQ7HifCq2LP3FtOar+Kk2EB4166VRu+OOyr06jyZOH75kBaILZnk5l78zuhAYHxIRe7cEORP+/eitPoQb7Cu9Z7dRHpgPtbjb6q85dVnMIssrstWKY6R5GFr6cebvDUw4063B9fynOqThSla8NTupgNWcwufTxWmSW8KCmj8e7KizrpVyTccos6uuFjdZj5xexbPhgZ2/ioLQgXOWMdvI+nG4XtsXSrjq5rozMlUpBk3IRjqcGfK38KvUY+Uh1mbB13DhLaPGLlhBWL2mVtUb45lkobi84N1vqcucnxlRPeeXoM717vYeOE++sR99crtek2dYa2cb+3rNWmXfHU5SGedXwCzzg8xGPpFs9ansAz0gMcKOMpVeY1niRslPCefMJjdAJTcawvNbKXyc+hLk3vNafRH8dE3OaaIhjdFPARED6wz/tvwVkk/9phfSVzC19FCY2vBNyiXgrhXWPFbV5wczyo0WU9wNbIF8LNDGy5eEHt/k1GCQldkolkOKwaev/U4w0eP9xioQ1PP9zgaYeH1Qlzi6ek4jQ6QMJhc/1dt6eJUKvGpviCRWCJMjqhKN7MRTg/yFdVcB1wf7tWY/phjXxQgcyLQ3OCrLKVFYmnLjd4fLmpkQg3eDwVIXysilcMx2tq4c3NAGkTdVHKbaJUQ58BLFSYjgSPKONWVvB6EilOikZ/5TgmwQmwwk5k23P0mFtyikTYDK/qoJDfiBE5DY5mHAC3WPDg/2nv36P3O6r6cPy155zneb8/1wSIuQghgtGAVBNugVCXgCIstdbUVUX/AMTLWraITdNVNT+7oGg1WNqKFTRqi3ipii2iq6BRGy6tirWAadWlsfB14WWZQCr5XN/v53nOmf37Y2bP7D1nzvN+fwL5fATmBZ+8z3OuM3v27L1nz549fpHWz4ZBTIcRDqtoyIuAziyeHTeLNFiIjgyMaaApiiUr+Kkjw9Irr6/X38t1t8ZpCX2PcZwoj7Guy4rjbHc0jIJR7xIPSim18SHGO0HNmCAr91TGWIs86M5GckrMXhFC8h15RgY5DnEGzTgQdF3z83rwWBNHgc+yUbFiwibKlj3usR8NinN+B2f8bhzc7uDUeAQbH5yLZ+Ns48Z3ODeEyICRQz6INNPq89IHcfA6hHDz3W6Ac3GgvtgPirNb4fJ+DztuwA6FQfsubdDTiGNuHYxpMHZpwILEkctYRP7vAcTN5eBAEtik+p3wf+ixjLBuXFbthtRMlKLehD6hv6gZKTXoGiAGs4tOkBiqb3qrOJDIGOylI1YPuuRYjGBtPOa23KaQAkbYwY0MDLVzu+Q/GRDL9/WgUPhdnOlpRnQilxijmi0c4ZIzWhx0os/W3GcHi19gpQYk+7pP+uyg23iXZhiNnCDdH7Ic7FU/WrocMXrErXGsW6NDiBo67vaT8/GYW0edFvphH+u+UDqhVzqBVH8bGRgijw1MWEfeGRCcSCOCobnPfZpMEMNU+Ec7rcVkTg4OxIgg5FlaUryh88tk16Sdga1LUXFchzvyAAwx2sOnwXqgiegB0YcSDZrfx/E/0g8BwghgiOfGKH9EL+1zj01yvCxSxJY+Fp6xuokgS3NEPs85ubIDFGnQnaI2kw2T7YA8UTKNVtAyYwAlx8xaJoOifbPmQK2N0i9jrKcMaGRwJO2LRHcZMMbcFlHPLmkMDoTYN0VWiLNFt7/mK5FZge4zMkvar9D/JUqnkZVZ9vuaR7Wc6SGOFGBBHguIDQH0oNgeQKfkuiMCwRkdpxE4uJ75wzPDR24cMWLDnHhxDcLIEoHi4iSDmvRLbSyD2jzZNXCHTYxq9chO2eS8ik4jab8OKmoEoT2l3ZY0Rt1m7bpse3LQc5SCeOM/Uv1Q7EqfOFantTO0YmCMV8ZEg2Cn7kenwgCHVXSe5Qgdl2R8dkAqB7Hi98zjlJw+yWkUdb12QotjXo4XMcom1zWU11Hms+QwIzMES3xXwxDrGxxIDpvYxvsxp8wYJ9fO+SUGhEj9836ZJsH3o90sTuGRXXLeJ6eRW6eJr2NuhRPdProoo47SOuknkTUw5bZ2dHBuR12MMGap8b/IpjHycuB3sVWyo1v08j4vjLNQHIJc2CqiH5Yx+k+Wjx6JbdjDY0dWaYCxIE66oiOxj6PjUJU5y6mU3g8ydoKxy7Q+y/U1/BzfFng59tvIm6KT9DhL7DpJYyCO1kCTQItTyyM4449gYIczwy7ORofiauzTBDTUt3fdBicW+1i6EcfdPq5YnE127eVuDyfcfgh6II+d6ADN/U7kwYAFuRj5ympp9cH2X4nmNDokyoEzAFT6WBU1X1A6r0a+YpQljV/eH0PeHAdJ5joGyKekbWGJ1aBmcAHJSLygDptFB6JoYKlZNpl9Cp/OgxO5TwamIly7uLysj1FNR7s1drvgBT7R7+N4FwZwJ9y+EWpHaJ1mNXol3DpzrGd+sqE6pMEDotModMgVx44WDUVxIG24w4qz0yiEVXamHo44GIFuQA+P411YE9qRx1Fa45hbw0UjN0QfBOG6JB8UcDT2xcDtoqINQi2YIuG3S2aJo2yiiHI+HP/IgDX/9czpt093IJ0/LMJgJZed0v8QzatpeTyiwcTAgBFrXiUjfgVnDCYxMLVwFgUNIIaXxrxQUXklIzLSVq1MOpBW5b9Ux+K4fFdpFOgBnD4vyifwIrBhmY2gYChAz7jlwY/URyI9hJ+cGkhoBZ/OFYZLTRaV9dDPdiBI+pWOHLrIgdo4BFlelONt9Na8ODBjiPywxoB1NKD32WHPdxgB7PMC5/0CA8KMlUSnrX2Hs343OBjZ4bxfYhVzr8lsLQF5ORPCbOGROKt41K1wstvDIg7qTrj9aDCG2ZdlHKTuEmMBxEEFoYs836NDR+EbTvF+2TulT3HlmFn6IUNMa3EyIQ4upI+GgQXiPcHRxKyNHD3wgvoOIt8RLI9n+Wz5hKPRmaNG0kCg1qAzjKXLpY0v/e0SEiFQlicZeHHQ0sHyaolgrGajb0i/ZaY+9z0ZmMkgRAYkKRoTOTpDnE55kFvSMA97szM9z24uVHTQjjJ6d+BxxA3RUQLsIkwUOBB6pRMcujhgzTxXNoHmpZE9xtjjRvYY4tBs5CCDxIErAxepk+YheXcZXVHlB9V2Qgd9MRvKMEsa5QEtr0M0AFJdO1D8TeiiNCJY3Tjte/I367ysAwOtRvaxP41Y8zo4cBlYR/5IvIE8EB2jjNbDq9DmcXACj6WK6FgqG0VsgFRHqW+indapsVY01fihHsH+EqeYj3QdADPwHsSxALVcB3mJoAyqi+aI/GijSbpYxz7WRfqk1KMj7UjI/CnlA7JjM+XkVoMvTdNyeDKnt/L3WNFxXndru8UpPdeTy/YYZQss14XUOw/ScrWzACj3T22PeXCgCYfjEUPqJ6ltoQeySq4iDFAHtjJ+2gORJiuDHcrJhupBaeKjg0NPYsmJNUdRplk9Z3VehU+JVW0t5F6tG32kQagTY8Njqv+aw7kwuM76ZVT6LQ3u44So9NXstIzRdFATctAOaU4RZX2kEUU53NNc+4djhyyrhV5bwaLvZSKJk5NtzQPW2I+ymrCKEVQbyLiEkk2kl7aPyfaO4xWE8YpE0O3SgKMUVnXIuCRP/tZ1vO4/PVHSv0EmT52nut96zrwcbJjMy6KLg4x1aTJ7kFUPsI5jKScQyr5IMpaTjO0Q7bTYRovUj20PFl0ClDKGlZ1ieVePkLy6r6y7TFHkPstG55UyzTijWM4R1nE8MLDDGb+DczEy64zfxZnxCAaEFA9nx900USYRiTu0wfFuFVIsuBWu7E/juNvHDo24zK1wLKY4WRKwjA7eEYwBCJuzxonqZYyMleWJebXFheGSO43e+MY34nWvex3uv/9+3HjjjfjhH/5h3HzzzdV73/zmN+PlL3+5Obezs4P9/X1z7o//+I/xnd/5nXjPe96DYRjweZ/3eXjrW9+Kxz/+8RdUNiY8vGgiIHFTUC7mVB1UGF+cn5O/EtmToj84M6fnGK/CeWCeE4aNaa2tI05LadYx0kgnZCZlUIoRIp1cQjclDFTCs4+5FXbdJjqK9lKEzhHahOiiKNB2ouNFFF1pbBFBKTaLEUOazR+wSYbUhh3WnYvGYYhwkPDJHOYdZhXHZDjkGfcdt8Eyhu0epU0y+HfI40iKdiEsoljqiNCjD28iO8icGiM1QaQjiuadO9MhRB5gpDNzmuwCeXZqYl6Ay4lkIMzJcBri/FsY4AzJGNJvJUUnPYhwysG2zbCxpZ1iat7UKFqeOTzpssKJdY1GwxhrHgZxQ9W46szgcVrfaTlrta6VZq5mhINads4YlLO1L2i+X2TPmDGeNuyxdkM0njZYIQwsgvHUYYh9dc8vYvRECOFecR+Nji72WxuFtqQBu9Fgklm2nhhLeBwhn6I3luTQU6BzHx1mNSpqA4LhJ0aEbRua1H+2L6qPaSpqeuuBhzVGyvun17QhU3JJrbxlXSrFnH6PuaBPeUcN27+d6TfP1bZ+YqwphwHEaA0G+pgcKevkaBoQHE1ivA2cByQyc1jWRnREWS7tbBFDXQYkS8pRDAtyadDRkTMcl/VqpqJH7mFadxAIfXx6QTIcRupjiPUPzvup03JuYKcN7/Cbso1xgfJw7hulzKoP0OQdtfJmR4R+t40LsYM5pvw2kcPMHAZwkIitIR2LDWG/n6NQCDIxlJ1aPVy8TsnpV2r+mtw9EDSlg9arwXnqVdmzG1oGa6XM0mUJkwdCP0S+zD3Qke2vuk51jZOHVKOapCp1BU+eKM/bsmpHW+LSQl7VtGFpedXluy6jHjzqvlfSYR7B6anygiZmjG80tlE+nwezpS2YZYNn+7v6bcrTe13iRzJ9ZFs9pjQJR74sF2f7fGrl2m8Y+zT9J9aDMrU9ZKIly3FA6Re2dJPJFeFxkXGaZ0L0FEUndbYlCdYh7bZSxdJ4TifmczLYqr2AIPQcOdRmAGOIzm2PARteK2cLkoNFnDC2PCFCVaKilwh6J+gjhz5NB9r2mfZNTjwiPKNpZeunjgunYdJbrKOw2TjMRoxJxuoVE6T+hbaJ9QChpy61UZfajYp2KzW2PSSU7TU/pmFwGufP9xXrbKq+rejTevIwONhGjBixxwNWHNr4HC9wzoex6Vm/kyKQQtqCkO5hx4WAi0VcrveY7hyO0hoLYhynEUeIQURmcnhDgHTcDsFe0c4iSWfzcHBop9FXf/VXb73+0EMPXfDH3/KWt+D222/HXXfdhWc961l4/etfjxe96EW47777cOWVV1afOXnyJO677770uzTWP/ShD+ELv/AL8U3f9E14zWteg5MnT+KP/uiPsLu7e8HlEw6fCA4lCDWowsemdFx5ShKlzH2DEDcyY5PTCBx2sCIfDCe9jWIfE0/q5JLBXRPySIjTaMM9gBBqv5ty9gxxPbSEMualYhLeHBxC65TP5yitsRsTDR6hAUdogCNghxg74CTMe5Lur6NwkAwDYF7JZW+wzN6IEcgYokNpxIANy2ydDBAozlDnxQ86smMZw+MdgJ1k/AfDfwGXZrJyyecEcpw5SU6DyuCGQ+IxKXue4TQtnxpfG0RCIiNskQ1XV5Tt0MZq8WWtDPQgTZdKDIjs+KAUqdGRwyLN8dmCzBm02xwZWk3lkOhprgt5Y0kD21Z2sD4dqrB13E7Kw/ndlAe9PZyKJjpA+1TeqctXnteKavrEHLxxPuT2ZIyc+S/MBOr78rHmhymf5gGVKPsFERYI8riPs5sEwpIcduKAV2YepU8M8HH2CljzCgMkMsCl6C3JGwEgJuuWpSxh1p8IWADYiYZHT4QFFjGXR5Yzpi9Gx2aY6VR9kq2TQdNB8l9I3+tAaUZPRxiWfUR+B+e4MuSVAW4MPROJcAGDzgvAHM/pK8a1RiU7H75cXPy3dJZIhEVZLv0lkXGk5MykFEknW+NOvqHbX+TaCImuzc/qQastB2LEQvjViZFNWZ+V9fBgrKPTnBGi8eT7XhnaadCvv4XgrFiQDPIp6KNIj2RUA+jRF21EVWoeNADSZbf3zMucugs20zoc+yhD9GAjDjDSbHV0fih5VL4x9SNS+g+5XTqlpRONyKWlf7modfrUUOqQso6ad7TzV4pcDjBqMl7bPKUzqovcFdpXmet0uPbR9djWb8s36P4itNfPSpmDXVdzNR6Ovg8Xpe5OfV36nlo6lvWORHZqGVBaXcJbeWxBxTU5DnZPdtJoJ44egGvbMX2nYheZwT1ZWZm/bW0bAk1oIZNXgDgXp7YEqygRbVuJXMpOnAwdhZZlsi2X0E3bqIlXKE+Q9dG2FmRb0J6w9pqizwE00fQsnxE7XfpnyUf6mUw3+550haV3lPpBO/FC5BtAWMaChnZTTmEgOpbsmEHTJrRBdvY6ohk3ka2XdV74oo5ST4+5uBOa/NdFeSu8bL/MQNjtzby/tGP1+1UEHDRfWyfVCG9kLE+M9e32/7Qe5ZNlnefuntGsVN6l+iRlu/coj9FxyFjxCqtuH54J5/k8zvkzaRm+5DDUeQB3aMRJ2sTxNmGXCAvqVF+TfuxTi1q5He1SbS9oBj8EDu00uuyyyw68/tKXvvTwXwbw7/7dv8O3fMu3pOihu+66C+94xzvwpje9Cd/1Xd9VfYaIcPXVV8++87u/+7vx5V/+5fjX//pfp3Of/dmffUHlmn5UHUYCG+HGcouivLqBFQcS2w6WR6kFczNMEu7ACDEk2TM8KEQcuZibxPkwK+Yk/IxTYrEdtwn3I2w/GnboCOsrAeCIy/mGdmmTlmj1NGIRZ/Z75KTPHSQnyIgOwA55LGNo7FKicojQozOOomyQl11xavwbJBJZIw0ARgoDTyZORkMSvIxkRoi4KRVdpwSvg1OKP5dTlCriIHdQxsiQBsE5v4JeRiHOq5B7A9jAxWUROaxcmMjuCJbJodfzS64EvYY/lD0u5UKZN2EerP7q/CQhR4tOGjdtno4kSXAcwFMIkQzRHTJDUC73sTRNAwfIbKUy6mIfE6MuKNVSqZeCnpNRA7IGXpKLnI9ZPYnJufljPRMd1lmLU4HQQ0dN6XdnJSozs1I/O6uYv6edODbsP4eqWwpo4yeHtw8p35WLO7pIjpiYq4bjuvX0jM5Zo7+ne2zOjRJyBsQE0nGJzg4FebSMik07jrtIwV0AejZOjNkB2ZiSEH+hu8yId1DL7GjqHOJI1xUGjMkxFd4tOSfWMRR8HXMKyJKlDXIOrrwGX3JBSUJJnUg455ySZYdCI+m/ObpSLdGCzi0hxvhURmnKJ1lK0/4t9Cu5oTR6k/OVpzynob/plF0059ivfTcZkZx52g5Qyv5o3yw5SaTP9UAlN0mekZQBjcidMDiRuyqVe5jIbtggr1YchiIDM9bwUdYD+0yp/0mehxDar3dvcckxmuW95HoISSsX8DGXXlg2tIA4bSkuM8gOSB05VBQ66S09kJAlJNI+uf/ottEDNau5JzIWc+0sEziSl8ul47xcLMpLbSdR7ks6z12f+iFizihJkhqiv/IScr0UNTv7Aq1qy6+ztM66KR/LErKxwr8l3VD9q/QNSztK/UL7Sq6ZjnKEkK5H0qupX9ohUTYdWX3L6lwtJ3U766VmeoiZZVReMmKjlkrZVB9yzUG+H/4v5bBO4HxfIJ7m17CULx4zYRNp7VlyP+Zl4z6el7wjWdbFJXsx91OW3/F6ku+Z/6TNFiLbk+0T3qj1lp4snQ5VNS1KaTy9X8v0vBwO2KToOsk9mekgqQI8crqHkJcm2wE52TwguVsCTcaUs7OLudCyngql6Ug2NLCrEzqECVnpk4vIy4BMuszzD9I3MtVqzurAMznyW0fmDZwnmmXiTGhYUjrrxnIZeKaj7stCn5xHEFEGxQkuyNJksRGd6kc51txEk86gdIT55ByUKHd7l+73edmkfWewoercaO2OrPvFQSa/zYoLFdWlJ1TKNSRWb+QyjhhT5P4An+RvmFTM4wSuvEtkKJD1o1wrI37Ts5W61+yvbVZP7Up2Out3IOYWCrkUd8gnB+EJHrFyQ+zDIU+oBykbM+j9XQcsItfIMlwNBs+UVcohFv7DM4MO7TT6yZ/8yYfx+nms12u8//3vxx133JHOOefwghe8AO9973tnnzt79iyuu+46eO/xtKc9Dd///d+PpzzlKQAA7z3e8Y534Du+4zvwohe9CL//+7+PJzzhCbjjjjtw6623zr5ztVphtVql36dPnwaQG9n4dWbaIgiOeLEm7yNsGByZm6uPqe8xA4MnOB+2lN9swvIrR4wxbhW/kcTVCMyxdAOOujU8CEsaVa6HkGyWiHHMrXDc7aelZifdftxuOng2ZYZ/AVEKIbJAwrZ76pLRGgyIcm6lQgdoQ0UFxM7QzgofG7KoyW6Eavq8namwRm/+pSMyNuyj0gkKZxMHrxuE9ali/K7jlqJhIJp381lx3ip3rRJYruM2pOIwkgSSQdhqAy0Pocq8GjpJZ6cSjOodtrZtV5uMVs5fCiYS4q4XXdwlg5IDqSxT2r41LhUKy4MCzxyhTdqBQxxbuS5CayQ+3bDDOprNevcNSbankx+a/Eix/1iF4RO9JE9W5g2V90E5xRglTcLbbH4ZbRzkXTZkdxLZjWKnuvNWHhCHvE/B1RKM2SLxo7QHUyp9Tpyct+YV5Nwx2UEh90niQdmxRfr9WrYwV9/meH+mD01oo3kmbM2bnciybXgXoxDDslSPI26DYxSWri6JcYQ2aeC7RB7s93DB2CbCrlL1ZSRpbdZ+hE+D9g0z9tljRFC8+yyJ8wl7vEiJlM/zMuZCI6x4kRKYrzkkqNQ0BHICc0m8KYMK2aJY75xU24VOdoMqDW4575JxrRJVU0UOxL/iYNZ9itUZz5a3tZHrK/1eeH5qiOXkmaUs0vfp57LD0X5fO0jESTDAASoitHybJFFOOWVU4viF0JQQd3/JDoSc06OW2Dw7DXI9dflrAwmGjg4KDoS8rGCl+toe72CIOuCc30m7zKxiwuqUxFvtljTGPIV5hy3EpKehH+3QkPqUJNnuYpTsDsl22So/TeSfVH62OVPEAZ8GjMiD6Cxzsnmp5YFud+GH/I3wIw9KsxwXGS5Lxcc4eZUTm7ucxyPZMEgJ3MVRLZsmLJIOCrlMlpLfI8rhHjoxsCRIHmMeORs5oSFOoTDADHnqxBGxivreR7mi6yhyOW/UjSRTU78wx+F+Saot/SzzO2OBvNvRMm0aguQcS+1AyqlTyEg92JXB1pDaFKktdC6ZHOWp7JP43p58mkyU1AN6N7I84Mq01YnNS7tQZLoun+R9S3IDWa5oG0D0oeQvW8d22KgNKkr+0/bXJupikXNiN/RKpoe6eSDZFCJ/RrPrpkQDOLBpJ2fe7RPPaZoaZJMWnP5TnOe8lCnUnZItseIu7kymbc8o64s+yYkmebc2Sbwe+D8mnCbZ2XiM7aw28FA6Ied9i7vfRj5ZYIy7XwU5vUMjFsgJrHX+Myp4SPhHRzfJ/VpWi63uE02Q+uoaMjEkOe+6xEM1K9n2XyULjVzMdlPS8bEuYfwUdyFUduEiJp7vogxaYOpIk/FN6RjLjmsf+7BMWiNujjDVXXryz8OZXGgFy1WRpRTUhix5jJGXleWUIz1ynqGQtzA7k2rfkYj3UD+YhPL7kHx0svNgnsyr526ztpROpu90XaDsKhJr3/Kctrkkqv5CYMclmSba+d+BsIiSfEEOu5SdgGHy1EOcpGLD9NpJTzKCLZe05okbGZ+KDAREFvNUUB8Clyyn0YMPPohxHHHVVVeZ81dddRX+5E/+pPrMDTfcgDe96U34gi/4Apw6dQr/5t/8GzznOc/BH/3RH+Fxj3scPvKRj+Ds2bN47Wtfi3/1r/4VfuAHfgB33303vvqrvxrvete78NznPrf63jvvvBOvec1rZsua6Fn4Nco0FjpabiL0a41S9NyJ8Zo4PDRu33kcWQzoFiM2o0fnGKMPAl2yoe8uNnHnoCHtILRLGwCApzwQ2qDD4DoQOCSsdntYkMdxt8ZJt447Z4RlH9Lxc3JBG3qpzQAZokuuBVkaUOZaKPNkpPOwA0N5eweKM3CydtNFJa7TTEOVKP5XE5XzQe5YPq7BZWwwYhPXG+9zmCUeAay4w54PxuIqbR3apW1+ZaewcBwS2+ltJ1e+T9v8hi2goxJn2UKVYHcpywINyB1cBpkivGXXtzQQVYPVvI2yHfTrL9ScBF4lHwYk2WymaZqJc9lpFLb7XEdjPSRtk6i0ZdxpTowKcWroHV/2uceKF8mIW3Oftmwd4rb1IaF53s5SlHlZQ6GPfCttEa/uT1vdch7IGmNeDar1rLDQIBsHjCNugyNdSJi+6wYccevgVEDYdc8RA2k2HWnQJLuoyBbZadAk22WrwbW0CSCOxnBe8o+ltiFxo4XnxXGw8dlppLeZ1tszCy0CrWAcJjWk3aRcdBr1cYkqxZ0H4/avx7uQpLqnEUdojRNuhT4Oeo+IEYkQudiTzEjmJQ8pcgJ22eQQlSoDWDNhn8MgY8U9zvEuNnGQftbvRgdujzPjDvbjtr/nx+A0Gtlhf1yEHbZi8kHZrj21P4dlv4u0Hjw6jeLa8D5Gg0i/07nh0u5FpI3puItWdDL1Lg8Y8zbT2iGkHaPZAQUA2jmcuJ7zgEqMWjPQisZzPlY8zrlPifjUTlKRNTWpog3w7GTIgzpxFImTYOOjA1PdI7pS3i3OaXEGhNx5YcAiiR1la+m8O85ojzEi72qlZ4NziUn+w7mvl4OKHIEZd69C3sHzPC/TlspnxiMhvNwvcHo8krZ7lx08PSjsUBrbJm9bHwdaFHjjSBd3ZSOfZGwX67YbHbE9jTjq1qq+YVMH6aNpYFW0Qdp5Cy4NoPTufSlROOcthn3FbNZSOCdkJvUuJbujHNt4mWhxaZcgBjB4maxQ7QGEPAwIA9MF5fyLYRn+kBy34rQOuR82aVLlSNxiW5xvy7gFud5mWkM70zdxm2lxNpznZdopaC2Ja9VEi+Fl1RfzAFOoliHll/4vuSK7xNdDzN0WbDtCGIz3St+72Cdz0ns23K2dcqJnh9gDpE/K4Fg7DERHu6RrooMkpiNY0BBorZwDYod0plx5wFlCyqh3KCodPZq+g3Z+qDqteBG3BHd5gxS2ThE5Ft0n/THbC4Fai8K2Sru4Kp27TKkcQjtl2VQ6nbJO6CHyPdtD0pM0RUT21xwaBKg+KTsoLpJzes8vsR/tqX2/SLZVtX9G2TN4l64LT2u70FFIXbGIskn4M5eVY94su9JBy6Wj3To6bcMuk4vocJOd30j3dSBNyshxH3WQ5iUto/PumoF/VsnOcpEOwQZfRdtc+C7ngM3cKJNqWXbqJNzZQSwyXDuNKPJGqHvYqTDkX/Rxd7B1cp4to7wO/XtM/NGT2kZe9ROdDD84BHO0dKifQ66FdVx7uNjvXdRz04mAks+0bOlgdzPOGxipjY2IsUTuOz3l3VLFEa7fDyClIBC9uuIeA4KM2uNlmujbj8dpZzLRFZT1UJd2ZJXd4WyuXqGKnCP1W65lWzpHFWpZVlJpro+md0FtQgCOK3NCS+0SsJuiFV1atrmg6fvkiNJ/p9GXEl03gNMmPRuR97LDXWH3XygueSLsC8Ett9yCW265Jf1+znOegyc/+cn4sR/7MXzv934vvA/G0ld91Vfhn/7TfwoAuOmmm/A7v/M7uOuuu2adRnfccQduv/329Pv06dO49tpri7sO8vzkUGHNRVWHU1Ub6BOlNyr8GbzDagjBjZuxw2bs4D2F3dM4CNjRO6XskRR7EMgyaEdSxA6M424/Rhd5HHceJ4lDJBE5LNGlcH+diMwuLxpTrpLs/ea4JbwOCaXE3OLXLZfGyOBGOhsQvLQyuAyzhWH22HHcGSp5522Su9pGqhL2KOtL15LfhAnn2eF8dPqc9Uuc5Z0QleCXOON3McSBwJkxbCG+4Q57YxgsDL7D3hgdSEzYHxZp8Lkeu+D84DBYGLwyKGVGlQ6adYozVTJ4o7gFOSmHjPqbnSXWBaWFhHYSyOCSGUmwSPm000ie7J1H34XWWnYjjvQbdC4oyGP9GksXDN2lCwYwwIr/tNESBlNi4CWnGqvd+xCdKD4v58tODqWEYr2l7hQVQ6hjHt54JnhfN+TFUaKPi+aAc3EQTYzdbsBOH3dOcgN2u00a+C0iDfSgfYxba6YoNN8Hgy0atyPHQb4S7Lm8lJLesxi6im+y0aMGK9Ke8flxDHQ0NFQDZXDmC9XcufKJ1rE9XTQU+jHkWyPGbr/Bbh/ocKxf4eQiRjF2K1zW72HhrHMxbE2+H2YiEXaf2okyS0eqyY58YiTJtuMr2cY2RhCdGo9i7Xvs+QVODUeDM9d3ODvsYN/3GL3D3rDAagyOos3YYT3GQYV3GL2zNEFeAhzqHXhAlpvJ+SRjlTGizwv/60SE4gxJkXFxWZ92V8oAWjuEU162GLUkDZSNRLujknYQjwVvlXKghPkerENas4gYocJDxlkRvz/4PEAdfBishHLkcms52LmQh89RNKy7IRnny7iBQ3CqDFi6IRnpOzFH347bpJ3NxEjXjjvtdMsO16yPkvMtzeR3ceARZNaIvOvJwOH49GY3ybGzm2XQ0+ywGTuMqh+nNlH1di7zx7IfUj9adgOO9NFR5Hx0kATeOdKtAx0g8na6Tbp2Bmw4bILhQRh8pwbUlJz0Iotl0mBUDh3NnbqthJ9CX41tG+X4yHHQxQ4bL/olOs848EzmRcuFMiFGyDkbKQ5K+84nB9KyG7JzsRvSpMqRbhP1UaDbjnI0STSA5uMx6nZxssuOrGvf49y4TI6u4PB3Scam6Fy20aBQMlbrYvnVUSi/OKQX0Rnv4LHbDZH/QzTnjhuSo1ovkU2ypdCzoruzsySUUxwM4kBK0a6i+6HsAJCZjFm67KBbugHHupDf0pFXDgAZKNmIyqncULZfjDyUvrb2fXLgycDRR2fSKM465TTa91Gmg7Ae+zQR4IUX5RteTxjJoD8OFGVCrvPJeZudRkrvIcglaY+lG3Ck2+RIozihpgeMOhJVuEB4T/NF3ozGOv40dJTewA574zLtxHV+XGI1BntqNfZxJ1Ll1OUc5S7yznt7rHd1kyWUfRd2TJZ+mGSnGrRb3eaxcD7RSnY77d2Io906LmdXPEMSKaJ4Jjkf8iYYyTEFjnXKEZv74iiKzrPkcPALrKKNuVYTt1qCaXkptAguxMA3rPqFmZhhTHS83lxo2Q1pQm0nTixm5/Y6RU8ecZEmcUJkUTi0g1Mly6YVL6IOCnZkcBSWUURkyip9HVCT1hM6ZI7UzhXha2OrRJkT9I+ewMkReCJjJR+udqYAId/sBh049vO9OAGz5h7nxp20C/beGJ19DOM0kgkF4ZOFE/mTJ7Yn9lgxka0dn3ncaZ2WtdUbWedpqulJXE506KJD+ahbhYj7GKRxnDboiXGEPI5RjI6GwyImBC+/KOPu0DfV6hh45FQMIfp5w2Irhwk6mSj4eHDJnEZXXHEFuq7DAw88YM4/8MADW3MWaSwWCzz1qU/FBz/4wfTOvu/xeZ/3eea+Jz/5yfit3/qt2ffs7OxgZ2dncp4p/NND5nlfz5xTyZ4mjsY55YsHNiHnf8bJoo0sUXgUOvCSBiyjUtuJhrLkIGEAIwUFTGCcoMC4HTGOEGEn7jjUIef4AQBJ6RmicnQSW8YGIbpoxQ4bzuGE4glPyj2WW89860GN1K3sqEuVX2knDjQdwk4Cy6hkdH4QIXvpx2MgJdwdQdjjDisOW32f8kdwJm4Dfno8gofGoxi4w7lxB6eH4DTaGxY4O+xg44OjaH+IS9K8w3qIziFPGIYw+GQm+CEM2sEE9gDioF0MSSmsKOCtoCDYQEEowVmHQRn9pnnIGB+s/jCSgRAagbLDJF6Tb6eyuuAwAAGu8+j7EeSCsbXsB3RxueTCRSNA10+M7DiA2gxx0C6DiFEGGAQWA48J7Mk0pFbxMrCe0LHCABw7NnNxTbdJeU334VhvIobrPbo+Gpedx6KPUQ0uRIIRZQcUA4E3xi45gsbRJYONhe66vvG8bgcWeaAYnHT7YNqe6fnEf7reFTocBGLAAUwMcgB6D+qiE6n36Pq4dHExYncZHIq73YBji1UY9HYDjvf7SUad7PdSdMCJLhxL8n0xnobo6JZZZT1oF0fu+XGJU5sQ3bEae5zZ7GA19hhHFxy5Q+CzzabDOEZDcCD40WXe13xGub7UceYxx/m8y/2OSHjP9kWRz+k+p5w+8RpILze0DZSXdsSoUmXgmGVI8T96AA9k/oManKd+xeXXLFwqN9t+rCH9UcmZlIw9fTvwfHIS+DBASc+XTlrpY51PDuGu83HThyBrOvJwLjiuFy70vaWLDkwEJ5MeaMsAvAOnCC9pB6FBir7SA62ow8Spshp77I9hZnd/6LE3BMN2NfTYWy+C83FwGDZdlGcEHsn2vSSDKfVncpwii7t+hFvkuvb9GCaInE8O2t754EiLjmxxopWTENoJvfEhwkcG3eLc8UzmeBxVbpN4PrRnnU+S7ILIMi27kWVulOlZNuX+xtqMEp5zCPJG0YeUDiLiRBdxYi/6MQ1sd/ohOmUCrZZRLveUB8HSJAAw+pD7jaPjbH+Mg/MoQwbvwD7Qymv9pGX3XGeqQDY5EfmQnNNx4xOph/C4OLB7KqIN02BOORyk/WJbi1MoOW8ZyX7Rekrqk2fzY5RA5LHdfogO3QFH+03iO5kkkkgEHYGZIhQVb2oHbRmRtvZ9cjiGaFkpu1oW6PNEy2rso4OWMIwOw9glnSv1MXoWua2CTZXL5jof5axMEMRralJq0cUoUbG5uzE5/TvSEwlx+RVJpJJEAto+mvgBWR6JXE0yNfYRn6L0Aq1kknJkwnrosY56Tuig65ocCT7LZdY2YDpvC+Vc1IHIOi+Vn9RAWXSZdiA5j2Xsk2ILiKNUyyztnMzR82FLdpHXyflAekIu8PTK98kxshqjc5yDjF5HmbcZuyTnDN2VntM6S/qP3O9LOqo+op3b0qcXSl4vXHBoOxecG7tuSBsXHenWWEZa7CTnf17ayIw0+OfoJC0nW8Wxrx0DyZSNfcnrOiWdbZ1G2RmonCi1iS/kMae0zdKNyWm6dGGsJg7TTjud4nsH5H4vTvqBO6x9h3PDMtYrTC4HvaUnGPKyOVDYCbx3ytGonJt2Mtk6OlMP0w4m5DQAOsJQQ+iY+qaiu0t0E5oEe+WYW+Fot0aPEZd153FZt4cFjbjMrfAotx9TfHgcc9lBUwvWSH8556sKubsIe+ywF3N17nFwKO7zMskIbfdFsX5oXDKn0XK5xNOf/nTcc889Kd+Q9x733HMPvu3bvu1Q7xjHEX/wB3+AL//yL0/vfOYzn2l2VwOAP/3TP8V111134YVUht0EYufMjNL1WZ65cGBDyaAwZtXruih4+xGj8+i7EcwUjEg3onOMo4uQW2THDclbLSF64tEHxMscnEa7NKR16ZKHnRGcREPc3cZHx1BYaxlyhaw5eHzP+z6t9d+r5Aph2DXUOWQ7CmEluIReujPL7LGsH991YcmdzFLs0CYILOQZoUxqTt8Sh5t4XAd2OOd3sBdDzj82HMNDw1EM7PDQ5ige2hzBwB3ObxY4t4mCbOiwHwcF3lMYFMSBJg/BmIQHaHDAGOpFIwFj7pzkpzzDttAzCIIx+XOEP6jGpkoFcOV0cawFBxXOGWNUy7cdAMdgAsaOMfQcs3gy0HugiwJYnEtF3djHgQTCoB1DHLSPFP4BIA977JETyZfj1kQThrEFyhGB1DPOLkLVu6QN6XNqrAcX60+A7xljz+F3x0DPyaESnAxsBtShY7mYvCHwBcZYFk8glVlV2iHwTG4nXU41yZHLWbRroJuqq+a/wjA8EIpPmTjRgXvAd+GDY4/ID4y9BeP0wgOO0fUj+mUY+Pb9iN3FJjgZ3YhjyzV2uiCvTiz2caQL/XsnGh2MMJMoA4d9v8D+mGegzm52UiTmufUyDCxGh/W6xzA4wBP8xoEHF2g5ONAQCEgj0ElfFT7DlN+lrpB6JzoAPjmLKu1hjjnLdGkg3Yb6PXP0V8/MO3CkcPlc+GvPMVfuKYuu5My049nv5mPVYVj9Vk5L079ndK13Qb6w0Kzj5FSRYyIG9WGgBwp6sotRkF0X9KQjpAGLGLx6YJfqyXYgqwebeoA6jB020Rk0Dh02mw7sEXTAqgt1HAFaO9AIuMhnKdhC+iIVfNYJnzH8YoFhEW5cdaGOUneKjjSK/So4S+IAVyYSFEHToIeRdJc4ptOERpS/DErtJM9Q1GuTdjbtn3mJpJ2TzAnPuei4NvKo1Emi40gdR/WALvS96PcHCy84TjqJCEEHOSTHvugh4Q0gD+wmPOdz5MU4BuefOP38pks6HtoJmBzzW+gzB6VLE1+Lk6LPPO56DxcH7S46mgArA/TAlVX/T9Gq4gzSExTGiYLUV/WxOMeBOEmknJnLxRAivuIgrYuOOHHUiTNAL2ut5cEafV4mNXqJSAu8Ks6gMLGkImPE9uLg+Pdj5OUxOmmZbJvU5B2TlcPEUbbEqjtO10l0O+IEiTi0I2+lSFMlV9Ix5ciNbaBIb/mVnO+xrBzbU5y63rswSTlGPTc4cJwEYbGnUn/ThgyMfCb1eyLiCfCd0nuuuKmmv2K/BIItRJ3th+JU6RzDuegAdXlw7yg77jrFVx2F1BziGJUcfaMPETcSkb4ZujRxOwwySQTw6CLPwNTB0j2eEV0qfYGRHEaTvq70eJrQJXG2CZ9E3RTrvejH1G/Eud25GDHq8pJIIgY4LN8aYvS9REiHiFGXJiM1r0ijEpCcwDoKPXGpOjbyJF5MOdPE8UK6nVSELDhEpEXduhQnDmUnjjhtxKkijs+pk77D3tCneoX2DEQWZx0BaeKAEHW/y/Tt1PdqjrA88ZYspurxXL/Vk3M6f508K39luasjxtF+jd0Ydfeo/jwuX5zHgkZc0Z/Blf1p7NCAE26DR/EaO2kJcmZVVv8VeMjOv2Gp3znf4TwvsIkrZfb8MqVMkahYu7nO4XFJl6fdfvvteNnLXoZnPOMZuPnmm/H6178e586dS7upvfSlL8VjH/tY3HnnnQCA7/me78Gzn/1sXH/99XjooYfwute9Dh/+8Ifxzd/8zemd//yf/3O8+MUvxhd90Rfh+c9/Pu6++2781//6X/Hud7/7wgtYyFcDNrdUnpUWOUQkEW1pNGlRH9b8h+VpLgqJoDR65zF0Dp1j7LvgUU87kbCsJc27oYViEbwKHZYyjgiJZB0Ql5eFqJwNgsdS1kee8bspFPSM300h3Of9Evt+GXaL8Tk/jay9TrpKdTAuKCSdWsK1wzIDWX6Qk+5KEmZZfiD1pGh56m/p5MqrmIdoYIcz4y7O+xA59LHNUZzaHMHgHc6ud3BmvROMl02P1boPBsrg4NfRcBwRnUPBqHYDgoOIYY9HIEWasmINqazwwGEYRUkP8/wcPxYSwci92nltzJf3FWXlmPmOnQx2okHfh9+IRr3XEk9eoQYO3UCgAck5QmoAb50oRTkrYL2tlP6meo7KOho6FC8u645Q7+w0olB3Arjn4DiRQY2EvYkOlu+OuS40UuINGtUgqhhYkZLwum2qDrJanfX3oyU0+6zII1IndP2zNZyMSO7iP1A6BgF+wfCLfLy/jMZUxziz9EAXDO/FckDXB4V/ZLnBso/hzd2IhfNq0B4Mv/XYYS3Ly4YO63VYdsaDw7ju4EcKfLRygcYe6DaKzwbAJadu5jMziJU+KX+d/KZpfz2o75YyPnloNT3n35We1eVRv6s3b+v3whN+5h59e6V8hb1dGYip64mPc78GYBx0s5ZLdNYJ/X0czEn/yk5LxhidlkOHOPBGdmITVHRkiF4hp+ZXlRzVM/J50EDJWcBMYSA2xIYYCLQJPOYGgttEnhrjsfCWz3U3pFPZmH3qOwTfh3qJw5Jj/ThOInkC4Dg41ZRDMzkkpRE4NqLQ11OSsfDIzuno6HFaTqnj8DqabSvdd0zbKllm5K9u94K3cl9T7S86RxxIhIkjN92nnW89knNp03GgYzCM8oSGhprQoBGggZJD320o00LkBrKzP9VpDlqOym/K5bNyJutVROfpINmvnXVcmw5Xk0Uc6pX1C6W+T37aplTURXgOpCeJQuQJLYIcD1GBKvpLJo2AFB2YIyZzsXQEUFo+5SlH6HmkgT4YsR+SrUfSpdHmEjsifSTXyzZAplmSJZRpntsk3Ox15vQu6DFpj0CDoi0K0IEMksuTyx5+pOg8kUtjpgNtgkwiDjrODeEZrdvkfbkINKXLXNEIYOU00jILuYi27Fp/hlwSYXLFAYM45Uj9JaSILwDZAQyR1zEZufCVsG1y5hD8SIk/WB3LZGSaRJO+UJZXfpe6U9PPyC5VcdX2WmeyA7zIGZE/aWIx6qaO4fox9B+yTrUyMi85TEeHMTpGvThKK4MIHT0ldZBGM+ZuSY94TNDn2Z5P7RPbD0oGgNF1nPq+OAcJgCzzBzgtIWUOEa2bGH3vPWHcBFtOIsL1Ko2kNxQPkUTDRX0vzkhrwmY5hFgWS6v6PTr9TCKhthUAQ08dfd5FZ6CLzsFljCC9bLmPk4s9LN2IK5fHcGa5i1034PLuHPb6MyEXFvISV0DUe3a8OQQRuGEXNzhwOMvLmLKhw1m/k3J4rmJ6BlmlpHXQYXFJnUYvfvGL8dGPfhSvetWrcP/99+Omm27C3XffnZJj//mf/zmcy8kXP/axj+FbvuVbcP/99+NRj3oUnv70p+N3fud3zHK0f/AP/gHuuusu3Hnnnfj2b/923HDDDXjrW9+KL/zCL7zwAtY60cwt0wt00KPb360NKQ5SLCUfc5KALnjaezdiJ4Z+Huk32O02KRG23pUjMF8UPuAw6xc/tgnmIFaqE6y4w3nuMURHy2l/BPvcY+0XODUeCQmhucPpITuN9sacK0TC+VlKO0OsLHyC0BGhkpI+R4+1hEDudpu45CBEGokzSSdrTNFFiOvY4zp4Wd8sjrWzm52Ql8g7nFnt4Ox6B94T1use69UizMJuHHgdrHQaALemoHRGhEFoNCjDMdKx084A7dZVFU+DUCAbgReCQsFZo4CNsQ7MG02zxlTte1DGLRAHc0HjJ2dJukZTg0IGDslxgjRAIc+BVozqwMMaN5UOqhW/hlH0PDGSDqy/NpAcJ4eZDPLSILYja/BDt0f468ZcBhrZDio5H6dyym/AGjCqXLMOx8L4L+uUZrdRO5ZOqVLTV+7nwuAOgx6hDwUnIgXa+AXiAJjhl5GWHbC/48F9UPpndjxcHOi7zifFzzr/yejgJWJrCH00908XBw2A20TnkI+O3MhT2ZHLavBn+UDXSwasEweK0LFyevvYoDQ2p4/UmlQr+QMVfvn9khcYF9zvZ3+Xgw49bsrr1HIfrvG1fl7xmB3MIckUHf3FHRX9kJPBzn2WU3JeHC5lv6FiMJXKCZHt4ZqRWUOYMAAHHnMDgrwaOfOcDDY0f8mndP2UDOEuOKVh6kqm34b7pICVPqzrpWhOMdJRO+ZFFqfjFN2Z5RGY6/JEtyvUM8jftPcpQ52zvDGOIjVAtfKFTP3FgDDRf8rBK3xRHfiWOrfkyzHLD/KqbYUHtD6p8L4h04zMCPVR7Vo6zPTkTMWRMdGvpPSOrldy0JJxJJQ2QnrVjCzkjpUcB3jBga4UHEo1Z0B2ABQv1wNXphyBK5FcUtYxl9mpiFzDo6Pqnx5pMmbaEAW9pD2MnJGJIZEjlGlt+FL1Pbm/9oHYIDxbGKqeTn1NeEo52UnTZDJhGZ8/gA6ztkFZLKFJdFpqOVOrbnmPnmjjyBs1B1R6hgAQm34gjpdR093oMkoTuGJnGXmtnYvaeV/Ky0rnnUyq6utGZ031euo7QHL2pn4uxx2HqHWZbC0dkvJa7eBNjl9K/WCq0AC7sVDms1IfTcpd/C6Rr7Npt0H1CXGmAjBLr9My/tiGkopBnH0pCn9QjmCRw0JUKUdnozR9opsqS6WOJLUo5WcN22bUjOzUL8h1pORUDpHBXR+cgmeWuzi6cxS9G3Fmdwfn/BK7bsBj+rPY8wscicn1JeF+h5wXLSdM92CEKDTJAXeWd3DO74SgjnEXZ8cdrGJ0/iCpQWY78XYQ88PJn/2pjdOnT+Oyyy7DE171fXC7uwDqpOUKf0zuqTxcNSj0CS14+9ApqGMce/R5nHjMOXSL0ST3k7wGvfO4YnkWTz5+P67cOY1d2uAx/VmccPtq55C4PA05Uarqu1hzj/2Yhf+cX+Kh8RhW3GPPL/Gx4WhILud7PLQ5gr1xgSGuO90fQtjbOiboZgRnjaybDvJUGYpR6lI6BiRMMG3f6XJopqxVdcQxL4Eks/Qpl4UmneRwsEk9XVz/HdbGjt5hb7PAaghRROv9BTbrPgiwtQNWLg42CbSmpIDdOhvbk4Fo3DfWaSVVzjCXykQp5gvqw6VSE4VWKLbSYVWVf4eRAgSjWFmU5MTggqnjXMcQmuhIrOA0gqIbpzLbGbMtVmD1e8VgRyvM9Coufuuy5/eWAxJfRlwR8sCWMP2ut86gFHUUeScYh+oeuVYMSmYHJAdB+E05QdJgXK6nAdjU6TflAfV9Qh4AIdMEQI5GIhQOpBiR1CM4IJfxWIwnKYsZ3FKIAIgGYRjMUXIUTfonc3DWmQHGtD10XZioGKRNbbIJtB7gmfPltS3vqkIMaXV8Ie+a8P02uaC/WSnWtK6ZT2uGdRmNQsxbvy8DuFK2CA+VkSXJ8VL0SQCVQXedxJo+k2Ov/iZeiryleDHco5xGKPqwyA+y5UpOVsR69FFLmj5p6aMjb0yfLOuk6a77kpSLc53gQ7RGrrftIzVno/0GT3USAOOwj2UOZadCh1DRbooHFM8b/VI5Z50wZKKRDqSV0GNkFQWr5TUmclrTQ5cpkaqUl7A8DlW/0mntlcM0vUPLAU2Dsk5yUtkHtq5FvHfxAlYTAUFeZ74Nx5wHwdKODmGQDOSonaLuE2ebdvpoO0A5SEwEnOFjGGdJGsyVbXwgn6h6QLWBfkbuSb+tA3rqZJhpmC00r9LH2ATI+kwmLKM8mkxEHYB0S03XS9FFrgodDsJh5FTZD2vHit6Jzkp2G94uHIrV4yjfJuUs+bOmx4rjQ026VPS14bNYnxARGesoEadQ95RlER6A5ncyVeJJIXXZqV7mmdP6vSWYMrEmOlbqpPpUcioX9QFT7t9x9UaeuCDbbul7aiJARbkn5+RBvDojHyq1VB9Vt+s+KreVsiVFA3NIYREnSBc7A/qdsLz3UbvnccXRc1h2Ax6zPIerd06FnT/j7nthl94wjpfdGeWYIXmDwzj3rN/BWR825/h/m+N4cHMCK9/jo+eP46/OXhZyMJ5eYHhwF36/g9/fx5+95rtx6tQpnDx5ciu5Pql2T7voIPNnermwAWdfwRXdVXtA9XRCZDZlwITQvQ6jizuSjHHnGRd2SxnZ54SBsLvWyO8xSttwTInZwjamgdlO+yMpGfRHNyew7xc4Py7xsfXRkMF+7HFmvYP9oYf3DqtNHxPMxgiAMU/fme0so3eZIvHK5LHJYUQ6pDn87l3OU7HoRiziril6BzG9RrdM6hmcRJQTVsed58Z1hzHmPMHKBWeRD8sKknNIlhzIoEAtOXBDVtDGUSSKG1Zx29kXpOgFSW5dVd6JmFO+SoNRr76nlIkMitPxIQwIg7JMEhVlCqHqVDGKq9AGkC9opcsrETeAXcoyp9ZmTlvBzodT9qoerOLqk1MIopRIGZqRPnq5AStyaEM3dEQzqDRGfGpPzksJLsDHn5WlagxdXsoRU2EmMIZ8U1DQHPushIFL+5N6f72ZC8Fo+B2JVj5FIMWBR3QC+EUxcHZCH80zuq9BRarBGM1pAJ+eYfs85HoWVFLXZIwkI8860HSb5qrbKLa5Y8sTit/Ld84YdfLXGLmTe2aEiC7TpPyVR7Zg8nz6zXVZM6ED1+moPqCXnCbjMPJsGlx3ZHjL8FnFaVS+V5fb0EXYQtqsxn+es4NFORmyY4nV+yPUAIq7GKmJ8C7vY73G+E8TkjClU9HMk8nmku9UPfLAg40DO8llfU+tXSc8zhOaGZ5TZZI+pWVRdp6FumvZJHW90AnSPLlhZ8S3TmhoWmmZoaOLkhwXWs4wMNmfEx0f669lj9GnqqwmmqqUA/qds8So8DYwaZ+yCkxKp6UBLqVjxOOJsyXpQ1Un+2rDl4ZHTZ9SbaKPtW6cyHRLE01bc5xoq+SJq1wHkvMLKNpGt/NcXYp2KIlBM9cnOiTWu4wErE226efLyR/zubIOQgfdX7ySRRfQ/ybYxq+pbUjRFtbxa+iuHhU6p4k3xQ/ShwHD+/q7wPSdM8W3z+s2LNu1IJfR15q3COBenNpkl14TTEQkU0Wmb60Xmd9cq0PxzKSP1upknqdUtom8cgCDlANJ5F3l24yJ49g4+4qChT5Z6A3po9qpu62+h4Gm45zsNEzNRn5o+8T3nJadb4aQXsE5DxpDjuBFN4bNW9hht9tg6UYcdWv0blS77IXd+I7QBgsawAi7fso4/ozfxblxJ64EOoIzw07YUXhcxA0c6ksZD4PmNNqGmrAteH2W7MUztXfP3TJ5Z2S+Lq6H7PoxRNF0YR3oogu7WfTkcbxf4WgXtvSTbVAFI+dN6CWUzYOw55dpm8OHxqN4cDiBlV/g1GYXH1mdwN64xP6wwEP7u9gfFhhHh9Uq7EQED/CmC+tNGTFkUksmJCHBZdgiwTiTQHptLNLaeFCIOpLfnfPoJSEk5YSQwUEUPug9xbWbwdkWdqkKa5z9JiYK9ACtHbAJy83chkDrUBxX5j+Jyw9CuPrUiBSH0kSh6wauMYs6x5i5p7y3YlCYv8ZgmZkhPkhhHMDkBORxNhANN5ZmrH9jRrnWBxrqfFKKPHnnrPJL7+fD3VdC01oqG70nBIC9RC5lDa7X408GpOU3deST1M+XDof4Qw88t5W5rALng+T0KutY/UfWWCSp3AFGlWqnyXKU+D3hLSYGd5QMC99TctJ4kxMrO2omkQwm8kjOayODC0O7KJNcR35v2A2OTB9KLKCNNfWMbpvqsg+o7+nzXN5TlCe91g7Uk4GfPlpek/OHYJSyP809Uj1/Ac7Xyj2Z7jy5ZspEWXjayJLM1zYnDCWjdC5SKTxTK9+Wpata3kqUiT6vZG55f+UzYfef2CmSE8X0e8p0Mg+XMnbGQafqMFePSV/Q/UUPtLTzXj9rfk9pl86XRSKAODuDCARmDucA0y90X0jXKqjWL37fEiKXJp0tiEy6TqUsK2VLWV/zTs5yT+qtjlPt1b3Vcsn52cpTfm5ORh9kk1buM44pieRzYTJNNkJwQ9Z3ZikgIS/l0vpFo5SRamLIynfFn1rGasfdtqgao3uQJ+mUPIDsnU6knJr5n3wj8a96Xyq/lF3LA4+6fJD6o3henTfPSR0n9OEpTfQ74iEV9C8j1hKvRmVHuv0411fTdIKiDlQ7P1NX+162vKfKnuR47bHCkVtz4hiZpftbSR97Sx1b6mv44aCclE4iCZEm0uzyXGEyWB3PB5QPxf3C/2XZy/tTn0iPTfuq/q0MpckEq5IF+TfZSF/1PUu36bfM/SKXYj+mESkthM65mcp8IdiiU0s9OAXZ+kt7OgYNFByExGAfczZ1DvsI8rTrfZg4ArDTjVh2A451wWm0pAHHuhV2XHQauTDOZ4SNYiQFy7mYx2hgh1PDEZzZ7IYk48Mi7B44urgz9UHcM0VzGm1DTckpA+BABaXfgxnZWDnP6qTk/KAubundjei7MSy18gAzmS1XQ9+iFFnkIxN5BANbtipdc4cN9/AgnPFHcDpuWf3/Nidw//ok9sYFTq+P4KN7x7A/LLDe9NjbW4aIooGAVQfeuKDEN5STykbFD8CEBnJKAAfYJINyzPYaOG21SxTXM6ukimu1Rj5tl8yUOhunRIoI62Q3YakceYA2DjSoxJYxisg4iiRfkQgucQjFwaqegS35IBlLJR9AhFsWKFnAIZ07CCTy2SPZwySGnBglYikw5XsA8JzzwSgWmirs8h5dp2QkRWMLhfOj9r3y27G8rDoFIycrzIlpiwFItfNMlRxjRlnPQVtIyuA0v2efs2WZnAegZ5WJOSs5begZpUQzhhZvKUx8kWpPk0hWZoepONYJrku+rBgUckwcecFTuqiXCZB2ihGAIQ6aHcLOLCTHYjCRzTtjDAqeGBemLCUdajTSy+hU3Yyx1tGk7Sd8nIiL4AyvlCvYU4F/WV6n66Vfqtq2NiilCTNnI8nQVx1vY8Xy/duv8XyfvpB3QtFc9bNDQQgosi6di1FwVIkoAcy58LsU3IUxqM/J+dQ/D+h3bL8t9Uv9XJXFT6KkaNJmZfn0Ujejg5SsD78rhdQyUv9OultFGGl6UHTsCG8R8mBS1w9FUxJZkpI6n+wAMssKfex7ANQ906rMT1AoPvVFPQ7M/lqiksdJ2G4b65Z6Y3Kt8vBM26jHZmRZ5WRpd9L02uQzhR2Q3uEinxHAzPCgFJXKHrnPDVpnUN2uKeo7O4FkoqVhbC7Dm3N5tnR/m6GLdULnMpfycpLgVorJSqRI+VQZjd4ry67LvO1ceq+ijfrWlI62sDUenNPrDELpWK9Gt83A1EHkRFHmVNZIQOMgnnmhnriqlkPru7IPbYHsoCZ01vQ4rDoyRdW8K3rd2/bSUZv54TCOk7plR4jYZflacuTpSs4VVvOyamNT5uJdc/bBxFE+kavyUl2vgteSXWlz0IVzM/q3psLUO015VdszRVoXfFgWsVbXbefndIGBrnPUkxJF5pjDbyKEoGIHOMbQ9SGv5+DhOARHLPsRSzdiv++xcB5LGrDqe+y4ER2NOBJ3Smdkp5FnwtkxOI1Gdji12cWZuLvw3rBIOwmaXZQvAM1ptA3CaDO01TPPB83AQ79GRv3Q55WCNsxNYM8AEYYxbCk9UNgyb4jL0/rohe6Ysen66CTKy9EGdAAYm6iEPBNWHLbfG+Dw0HAMfzMcw5o7PLg6gb/evwz74wJnV0t87NxRrDY9eOMwnl8ER9EIuBUlx0vaiUjX1wi+EFWQdpbSnmEJPSdK9wXJms+HmSHRzgzvKOmPTM8w4Oa4fTA80rajFHe6cXFdPK0p5TdxG5vENDuHlOCPil8LMEY0oLrcvkkIcmrRLIy1ElYCJTmRAGvAoDhWfJSN4SI3TlJMNEkgbZ7boqAZmORNqBp88i15r8xEeyDtTBHpxgd8N0UL1L4x+XRp/dhjq8TCzHXtXfo1076rNBFN21BeZww6dSwfqzdfzRqh8ib9hXDHAYps7h4bLkym76XlIEAerKZjKH5Q79LFVcagjqxLy8XkeBA+pTRjLP8cGOxj/pT4Xpf6iHxA02ebtifLr8bgLH6b62SUvMmtIrsV1VA1hsn2t3RNIhao6KvSRyj9ZjE2WTkRt1VbF6nWV5PhWPQzfc+W+gHKeRVfxrUybelj1TIe1mY5rLFXfDo5yynXXs+CTo1Nrn+jKoeKS4bXsjzPy1xi34v3Wh6z/TDxMKtvxT6W6S78wkrRCF+peswZtJXfua/nE7a7MUJEkP1eZhNVkNoHiv44iQKbc56VelHRJH1BLckJAwVSsjnImKSriuWCodTzjRy6ZpYRNRTdo9L3pu8Nx0KUfNfW6IhE96K8+md6JU34PJVNqZcsM8jQuhYplPhb3ScDM4r3UXqeK9+wKOtKBW+la5x/T6LQUr0Unyk9UNbDLD2T5dpQ51V9J+1ZHsv32Z42g+y5SUZtO6h3yQunbW7pUNo+qZza8eWKulRszYmDXf81ywpLoTmFdmyZNov2QTqnIqNm66TfS/lichRsK4v0WbmvaMfaJNiknQ/6hnpG9HaqTzpHIITNNlK/q0UUE6c6JVuNwnljj9FUV6T6VMpqaMXSP7XgPKCiyRZB4l+EYs1GVRtouus+qRPI68/rdiveU9MfSXfMyZk5eVLeA9UelWvm+dp9lfImWajKR4SQ4D8yQMjNiTDm2ngMmw7kHfYpBomMIxbdiME79M5j4QaM7LDsBnTEWLkNFjSCoSONCOfHJfbGJUZPOLMJGzwN3mG17jEMIYWM2UHwAtCcRofBFqMxnVP3VIWPeZ9Ij/q3KB9CInBAjL4bcWSxQb8Iy9OGLkiN3o3Y6WK4Wr/G0g1YYIxZ1RGji+LuYQj/9nzYgm9gh/83HMP/2xzHyvd4cHUcD+4dw/7QY7W/wP65HWw2XXC0nO/SlrPdOiSIBiMl4TPEIZjtgzk6FYIiopjoVoSRLCegZHhMDZsohRGEphZ86ajYijUnyo3RRKNEF8Wye7XbjSh0PUOkXy5OrbKd52AUEU0VMWAHCJp28XhGBmch50kdIytfJdC1wAeQnSgVTBxFeqaxLJ+iD3FUihzpqxMOq3Xls0rFKAGydZ8z1kzBVZk4HnCs61wjVd6bv5mNjWo59KPiLNMGHdN2JVrSufr+uQ+q4+Ib1Yg3IOcxAgoDJPNiGTo8a5BImYv+YXMMcYo8dDEHWDrWiQ2ZrVNEL+uhsqKWBhNDQxsQZf10zqlktKj6mTwctaV502IY2TBj/NplSnnHllrehXTM+X4rh+qCZ0IlQxOq3jg76z+HmiNMfswZUpNvzOdrqZVrQl9WNNJGbHHfxPCb6/9sf1yIQVij88QJJPymk3UbPkPmMemfJV9D0VqWqEb656XQMgGikjVLtG0pg2pyR58/0IAkWyYAU34gdWssR6XNM02ybizpY2aia3xq9FrWh0Ifkc3OJFHmYCwDiaZltU0OLYKNmHY0qUs6Ru13peClvoHVy2VOPzPwTjxAmd9nUJeRSg9U5KeWG6nuUNfU8aEQbbjq4ETLfvV70iAFTc2kavzvRLYUk3Ombob/1Lv1/eq7pgz6vJY3jBxhL9GlafLM2mCGb3V1peuwqlM8QVJvTiQ1dDJ1LTa4ONDJoHUFTel2IU5+Y3Mle0j6pJVfgZ+p4PFCp2QS5GOpOqsTmg6wbVzaOludLfpdRhCXBapUPukkthsleAr52RhwnkGy/DE6uql4BzHALuqk5IGN33TSoRAmxsu6VMpm5JSSX+aZrXXSZajwMmASVE/0gy6/4nHdPqmdGDFAIPL/Fpst83u9r5f1mLBLcaKMOtsKYcKZPlEtjylXeIEeH/HowAODmDG4LgSIsGx4RdFp1IHhsPDBabTvFjYRdow0Oj8sQrJrdji/WeD8eoHRO2w2PcYhLImj0cExzVVhFs1ptA3SyCVz6VsqgmvCr5XnANtYE8UR/xOM0LClKcXdxDritBbRc0767CkkfN74Huu49KxjH/s4YcV9WJLGhHN+B3t+iQ13+JvNMTy4Po712ONj+0dxem8X66HHuN/Bn+9BGwe3IXR7Me+PB9x66mwphbTjtCNk8Lqqe2Q2kKNUCf74KGxdkB5mZiMKklBppeQ08SS7fhTUZneljdr9bC66SAtD0X06FFQbwbXGlmdKZVsaaBVDpcTEt6MMFSHHnHK2daH8u6RX7f2AMda5uGbKIQrBK8WfnEacFGYtvw2X3ytn4UXx62/OSTdtUCQlVwl7rvRV+w0y1w+jUE3yzZrjroRWJKgc629VWNwonVpY/pwhpWdTJ7RW5aqUcWI4Frw4ocOY+YEGDrsPxr7XbZAMKp043vBoNLTLuqV6KAMkzDIVOy31eec6k1BSD85d8Y7SCJmra3Fco38ZlQPkviHHJu+SGcQW9JDn1bt0Gao6RJW91n8u3GlUto08VpSpBq37yjIR5uXczLfTQAMwUZQm/1lJP1avLZxvUo8UibIFmpZZdqsEwITgJJIdAh3SjoCTnfh0HyvpoHjO1j2W1kTBMvyANFgTh32gCXL7TOQBJm2xrb61i1PeVzeXMrYiW7LBT9W+d6jBqupfZQ6c0OfIyBm7U6fQ1DJx6TTKsoVmnRGm+nPtuQ1Gr5M6tvolOMayg1A/W0XBr1VazziQjNNdrtVen/oYz9xQr2s65sp5+VnK39r7FB+X8n2O1ydtCJj8J1Ubobg2iS4x/JevU6d39JrJ91fqEHMuF2YyOS3l0wNqnVdKRxPrfl7rk5o+Ja0uAJMJBuGPMU+mTR3dcn+hMw/Su1TQQbe37qtFJLXR8fovqmSvVFLdT6o88aXWHsryyHuy4yaT1L2or6qTmdRS9oze5EFPDk70bkkf/XtbfTWP+tw+Ys/IxNhcTi3dTkKrybdNfyVTzukEYD5/oByG7S9SPub6ebmfK+fnZJSU31wq5QmVfRJIUZgUCywJ/ccQyTs6h80mbt4Ul2l3jrFxHTwTetfDgdG7MfgDgLABlg+RRvvDAvtj2BV8tVlgbx02rfKb8I89hRU4DwPNaXQAqkyHyBe6A2qFGS9Wdbk2YPV3tnyfPIEo7Py18R3YB+HjOeTs4ciNIVtR+LJE4ozssEFYsnY+Jrwe2eHsuINzfgeD7/Cx9VF8bHUUa9/hzGoH+/sLbIYe2HfAKjiMwr+8c1i5lAscbAujeC8QyXGULADOxE6SuSCYFrRqltVuvSmDVioUFIwQqArUUnGWSlX9NUKiFGqSrLv8hn5PnShTaKWsaGCVtZJkM0alPFtzVrC+Ya4MevA2cRqRMeQn9NZKt5wNqwzaJzNCNZhvUMX4KupX1Dm9pnLOHGslkwx5a/xoYz+VTdVj1qE4c1wr4ySSSjWVqd9hvld8h4u2nx3AKUPB0CEOzNyQ84S5AfCbTKewvJWNkZ0Ma47ys+CZiTEc+5rvSR1Ln8v3TWakdMRVEX1VzRmwxdDQmDN0S2dQ/q0ikJIhVvmmeeeW7bG1AVOJNCr70YH9qpAxqZzld9PvKLBn+k4pUw/jGDC00/RRObJ0lInue4mmqQ4EoNgcoKjffEFsHcxyTuUo4h7JkZm3UcYkuWlV5qDS/qV8N45qMn3HjVx3Gqk6WOdMbquJLporXzpZ/K3QyugS3X+NblTfmJNNGlr+aloVzpWk5wcoOUN56WyK+CPz3rJMpZwQ5/WsE1DV/7AD7okjQfqdjj5MDgey0YoFTaYvj5drs/YEmPwixT2GTyfKBYUtZfVRVffKOa2r5vio4MWqzVXySynfy2uYtlN1QkQXZwsf6igwGYwyI+TjFFlvbIJC3lfpsyWxvUaiCdn21PaUljnb+tc22qSZvm3EQLrHtHshk4UWfkR2QIhTFyLf1fe26R/92ZKvgYmNMDmu0aGG2vVSZhb1Lu3AlPPVIy3XT2OSkj7pPfKySrRqPPZqM5FJJE5ZVt0/ynopOkzsHFW3qT2fxwDmvjK6rkY/CgfaKZbqouqbkoOrsYHkx53wqK4XIyd6ZnvLnOOI1e9Sfs1mRqj1I2CqQ2QMqB1h+vtxoyYgjO+HsYND3uXMOUZHHoOnsDEUwgZRjkLE/sgOY9wVbTX0WA8hQGSz6bBZ92BP4CHk9wVTWCZ3GDlToDmNtmCb4ObJ9Zq2qb0UYlbDGv+cbmD1Pq3QHDF6GrFwIzyFKCIA6J3H0oVwtYUbQ0I1AB6EDXchrxF3cRu+XQzscGaMW/J5h4fWR3F6vYv12GFvvcSw7uEHlyKMSAZ+ozJQtYArlG5NUOvtFk1nrxFWXsfKiUQH0JWzEkIsnyWgbQNtSE2uoaJYtOItjoMA5qnilrqKZ7n89jZUBJGua6pz8bt6vjw+6JsEpKD9LQrVziLFczJ7IoO4uZkUrcgcCkURb6rNJpRlrdSzjMKaracux8z5WrcuByrGOami1kqnkbxvMrtJZT3Z9o85Ra/LsqWuc4b1Nky21i1pM6E5AZJTxMzsU1q+Oon4G1g5dDUdC55JZcp1YEcqsX6ccVPnc+SHorE2oLvIZ+U9JW20PKpFjm0lonq2NHrTwIEmiTKrBrKht4pSrfSp/NGCZ0rjEZX6VupQypjt/YvMn7nv5nacKXtZhophnZbnTs7rYy6cRllPlNFTBxajoJvhJVmS1iE6jcKxX4guYcN/xkgv6mravjDQDS2U7NX9zTxTq8e2KAz1tzrBYW+Z5z/9zlIGOVbH6nsTYtRRlXmGLmQmtLQ+SpFGHHgj6S31LuvYJDtLrGQLlI1Qo2lZxK310e2l+HXicIgOLxQDzIm+Nx9AsnWqTqNKPUz/LHVAjf9KXi3KM9tmxe+y3CUfpTJKvQxfqXYyk0+cny3rcoG8Z1DU1UQCqlyXxrFWk/UTUKLNLO/UZFGkS8odKuc7ntBQnjPvK47NM6kgB3HzjFxl1e+4sJMknQFgJ34LPVOdTNGl0vxa8kOt3ybeOoTw19+ao1sqSOQBkdc+OMnkOKX10JFGHjAJ01l9SLeZ1AM0jaIWGwiYd8Sj+K37QdG/E2mqMjbfl9pNy68ZZ42BKk/p8E3ypyJ7ZaxlbIf4rlKeaAck6/bhfJtpUFZkqMmsmXqUx6ENOL+e6uU1409GcuTw6OBHhmekzVicYzjqMPgORDlABBTK6jksTWMGhqELm1Yxwa8d/DocY8wrcMK3aCIXDkJzGm2BmQmdvSn+JXuuZmBN+M4YYhVpnpQkJ68kg+CZ4u5pLnkYPYfs+3tugX2/wMovAAQvJDGw4Q6nhyM4PR7BwA5nh12cG5bY+A6n1zs4u1piGDusV30IXxscaHDACEBtKy/1IDetDyN2bAmZlK2zxWjWAo5glPnkRYQgfPSa3m0whod6KcWXEZKxyo5Mu7F2finDZFpeJIGVBJ3Ln+SOrcJSzg8h3pyjjFIB8smqIVMwU9VxqaXjHGoGmjnm6fXyW2lr6KzoUYbc6hnRgofSa7SBJ3RT9JWHa7Mnk7oY42JL51WEO8h4mkArVXEUIda1rHvRTnZAgsKQz/wzVeg8NXjLJqo0edVITu1rleVWpFtzOUgzqNnNiZIR4WWQFiPQ3DpH+0l+tGA8ceIb4wxQZdMDGK92WhKZI0ZyzVHE0j8nzqTIV52qW9V4ooNpVNKqIiBTfUrjC9PfW6OWinNby6GLXuOt2jNFueeMyANRfGfisNIDwMoL07I81acm/YuL5QClI7cYtEk9dP+cRG9VSFDNTad5LvJVWpLmeOI0yvWt9CNdT/moRJYwsrxVMi7JV1+hQ8k/iu7TCRElc+XebXxiCFNAt695VyHf1DXzbI2/Kt+ZqCnVpxJdWOQPpWMtZ0R2m2/oeusJDacH5Go32EKOp7oqWhgUenQyANOywfD7lJcPcnxW+73m3VT2QucCMAMz/WLVdxJNtcwuZVpZvkqbVcut6akH+QWt8/nCxpyzF9Q5y1as+G+bNMj1MMvT9O5SxkYAdERG1cFWQ6GiJ+patWmKZFB1T23bZQIfsmqH4NuZ+ys6DxB+zXKstBHLCYD0TIWntBwvy2AcDmWUSnSepTGJlPsw9Ci/k44rRGFEe4FV/VRd9fJ9nf9ztP27Nkk1GZcUdo/u41bG5nLWHEUTU3mu34ojrLxWRhqVkZMAZvlHyyUtS0nkLSfdau5Rkw+p2KlMlPtnRe6wKQ/nsss7zI32M4X4Nn9LuVXWr4She6Qbg8AjYRwI1Dkwc1hOFutLxCanr7zIc4hUYgb86DAOkZAbAtYu6w81oX9om1ahOY22QSu/WoNXzhsPpr6v+BH0AdnTlY4rxoJsN08ITNNjRNeFntmRxyKubdztNvHYgxEjjdhh7XucGXbx0HA0OI02Ozg7LDF6h7PrHeyvFxhGB7/po8OI4gA4l5IJiBuxwQMhBDeWW8pvhFoXFFY545+EGFU6DS4guqhEfL40lLVABcWyx++K8K4qYZmh0XVKikmVX54zTiNOnmbtNLLlJcMPlQyP0+NplWd/XTDmLJi5dlCzCaydRl4rzFxHM0grBarLtE7brMfzJc/MQmipyjVbH7lhxpisHmvlFxmGtRDWObV0BElZFm2wmLqGOiY66IGHrnutTHJYk1fq2CaQJ1snoGI9yINTYrK+X8lK1kuFZO00BweSWyA7kETGxNk3UWIuDvhKo9oMaDodXZTlC4uRrGVRMhw5OSFTpBEQ+nlJ71SnUHZOSwam9Jls3V4jIWvdoOnOWc6X9NR/5bU1fkrl2lIObVhpA8b8rbltlExV3zZlmcOcISXfUu2Zrc3iHay+PSdPorFlckjoaKSKo2k620/1+szQbTLoTsY7h8mSyIfhmPPgbdbQJVPH1Jd9OJjNaRXvkUGKOEvStZraKaJJ8gCDrRPP8EnmDdtEqlzlwMS0ea53dlhNaWHkUK0PKGYyVTOymbOT18MO2sqIRs1PIr9quqloZ9H3qS5ajisen0I3fPyP5u+i7rM87otn0gCT0jnz9Zq9kmwV2KgcPYFT0bki11OZRkoOk/nltVM6T34XMO1Q2FyTaKNUl/AyY2seFlWGqv8054RvmIoIhvzXJO8v23vu/XKpJsepQh8X+nByTgs9Splb/Ygm1cxN2+Q9bb+HY7+c2EZJpnOijXE4qBUDh3G2WTs9191G5bDhnzl9fSBEZlaeN7K3iI5NCcFLZ/9ENhXlmfA68gR9tGeM86XQuVYe2HqnSfxUd8oizPRn9Rbdf7n8TZbXS5qUdKz09dJRxNGBBKj+DVj6K52Z5Wopc1H9zcXvUoSX9FOqaMoDJW9s6XdG50m/IAI7F3ItOo68XFPmOo0DZVtiCI4ncMxDvKZ0T9WhdwFoTqMLxAUTeYvSmuioyruT8meELfPGDuMQr1FURg7o4yhLIpEkm/q+X2Lte6x8j1PDETy0PoKNdzi3WeJ8dBrtrRYhwsi7sN5xoBC6JsarMpi00VyNnkgOFrZCbc4wKenjk7i6MIVfQZrJJYbknygHJ6wFCrExpLRHf7oelRNd0m9l2GhnB804O5gZ2piatH/NWJhINXU4R68LZdpSCNfAyixlGEcNK0OcS6M80VtOIhnmSZHFYyJOdKd4PhVt4k1Qbz2kwi+relg6yfboSUhH7z4zJWNanElZ8VBS/GkAJnVVAw9QrKNyYpi6H1TGslOpgVnJHlxrt3TxkJ0vGhGZ7tkAZCCv02aARgL3gee97KSmnUblwE6KL7ykZqDMzJpEcUz6LRd5jLQRmXkLnWqDVBvKBoh8X8to3Q5lPy2tDfXW/GNqyORZ64L2PD224oBQ+17toXR7MmZm6qGfNvKJquUxqBhQ6Wb9bWmPeI+EWttvS19DljPa+JEyKQM8HyPP5gJ2oB37rWmqA+qiDevq8p7IV8mZ4ABe+NSXSfd1XUc1aAqyROmFJGLZlC9J0XQfIw3IpI4zdbLOurqzg4vfut1mOc30g9yWpOiW5DrUNXmn0g8iV1GeTxe3wPAJ7KB0JFiHIqfimgFC4lFYR0RyMKM+uVHUfVsZzceEv/XvVC6RRWx+m/wnHO2mLbQxywKLfmh5QE9cVN4j3456rtxIhIo+S/KQqVOdFsY6ULLBRHuLMwvI8j3dpwSkrqOpQOW4KmMV/02UJ7IJJHxe2Ylv8kmRXel45gGByJj0Wz0wO/lknZlbB65cHLI6WRtsl+Xdxuf6mnqeEZalZ73COVK94Ot8fiaBuH635mvDM1keG1s9lVE7xCt13gZ5XwFmNvyTbAfl0CaGjUw3TqMtG8jIa0U2xbpO0zmovjADnuWNYmdbqYe9JT1oSOABu3Osur8glbFcTNmhdKuSvzJpKDK2wmMUV5Kkrhk9uUamTKs7uZier/A860pTcV7Xp3as5V3hGKQx7yTOg4v9OkcZAUq3AWm8kd4b60EDhaXqPtjZtKns6FzqgUOiOY0OQKLnIQRIGTUjfw/Fp7WbCiZ0FBJfdc7n/kIhOZaLd0onlCVsK99jzy+xP/Y4s9nBqfUuBu9wfr3E/maB0RM2qx7jOmRap02MDJBQUtV5fa+Eu66jCKjUyeNlPRtbm8UnRZ8thsRWiAAtO2jZOVzY6lK2VUxL0gqLxc6+cT4XZ3GSISXKQhlaJjrEsYrEKioTC5uKWZZX1WtyrM7R5D6uXJsh5GEFxeTxrMQNic0MRKEs4zOTLaDlEiE71ojzIEOdJyh6ouClSVnne1xtB+SttCibLtYn6cSYeE5InSOuwvlqks/UeWOdxDkkzjIg0kFogDTQIt2PDttHJlWKSpXzAFX6RbIX54zE0qFB6k+N35iTkmMPcB86FHmAB0o77/kBFacRGfnJpI1hhNk1OTbGU2UQFPtw7p9IPBYG9Lm+nA4IWQhm6lX7pokKq99L5kD3FxwINge0fdxcuzYps+K12j0HvJvNd7Ign31FSSfAOhNmvm9IlWbS4hWP7PT06ppXBlJa0hN5qTqLXfytlSXpmSzzq4O06DRKfCUOSS3X5IXKsOUU/srBEJSQhWJSQ5D1Bpt7WDlBTCShrpN2Wpe6WcneVOaSHjXHtZJb6ZRuc80bczOmrAxizg8YvXFomZc6sjGyWSLPgGxwS/WVMjaDKRfep20CaffcrrB03FIsXWc5aXWjKq/mgaRfeFruYhKnClWnMsdiXX6h0lax38nnxB4SXhanJ3L/krpJtJyRHUX3N6d1eStL1tNMvtznKvWCfqE6lvKgcs823bqF/7YuhzPv5O3yu3wu/p1ELig7wkReJB6t8GNmOVXw9AFbTi2DDqMbJnWel+2GVozMM9pm8kiGSHVXtZKHFE3ELkh0U/Rh7UgrysbGjsX0eEYvVOtYY5airimS1CPkmxHelOW1gJm4Mt8onUTlZju1Nqt0Z/mb+o7qqwCmvGrey1rU5ud120T+orK9KuxhHMCiW03/TgWs9u9JRG61zHPXp/LTyCF1r9EPtXdnka0LF4pOiraUP00c7AAiAo9Bf6bd5IB4g0TkZ91V3RBHchDHYxedRhMHew9jYhwGzWm0DXThBK2hlMWH+nTZEQmg6CDqyUcHkocD0LsRy7gkLSxP8+jIY8Md1r7H+XGJvXGBs5sdnFmF5Ner9QKrTcio7tcdeO2CERWjjNKsLCFFFTGQZ3KkThVBnRmT6x1qYuyoPquNIXVvnUj5+7ksWxSCDADLGTRS1wuHVxKmJhx6xjApCmeUg1LWyZGS6mskzKSOc7/tLAHn/E9SRgZA0TQqiTmnBMsySPjj3HUq/gLGWCBjIW6BkEg5SkKdyvdHmm4R1NXvEWBqQuZPFVYOUxT4Qeknwe84t6PmB8dZsM8h9ulUljRIC06M7CgCUgQS1PEhZFMmf7iZOdCBQ0ViIbQ2qRCgmHGcqxLpA+PAzBoyRBohONsWlAf4Qx7cp7XWMlsnjytjL886aYdQ+K6ZPSkN16psyn3VWAnSN42RI3QrKl5G0AHCLAVx9HPFteKeGTEGlOboXIPU2nRmwF/9zjbZXT05U5BavZKltP1z6e3xP2lXvciIdpc9SvLVRAOKc0gvmwSlKBNdhWphCjmr9U6qt54gSJGSmEYIKkPAisYZWSH3z/JxUXiJ7D1I7tb6gZYzhS6l8vvyZ8YJqNt4VlYLHUR2ikOEkB0TkDYWWbWlWlr+m28qgguPABO9a7p1QZN07HjqDNvCy5N+SsVvTJt3qoNZ/ZnaCjYHxxZboizcNoftrF0QlaDwRmwbGbgkuwMAJHdkoi/nz1bKNzklVVHyvXQAHN4eU8XXNFJloVLWa/JM2sSW0ZRfl1v44hC6uor4bHWpS9mHq7qt+PYcn+n6Jj1XnlO0M48KLxTfE7u3lFFAjvItyhIiKov2AaCd41udyKbf5uPZHGMH0SY9lstkNonRddb9m8zD1fbXDiqOUY95Wa06ZlTzGwGwY4Aar5TV1ZEpB/FkpU6H2SAHyLot22/WMV7KAM7C3ny/jITJjn3Y8ZXAF++OE7qK0g+/Lx4GNP3JW5UWDK+RKvdEX4ByHj5ZkVMuY5T3jEibYrgBaQdjJqSxre8p5zu7ADSn0QWiagwcAqVCST/nOrAyzCRUezP0OLdeouMenfPoXYgwWnQjuNugdx5r32HwDgM7bLjDnl/g7LCDvWGBc6sdnN3fwegdhnWHcdMFYbVxMVEWMlOynYdhQspnZJWidOwsTFiVfSLc2YbJlbMGteTBhogFnXSCRHJIntlcjlKByiyrElKmHvG+ctZK6lYUKZWJKQ5CSCkTMlVPP0rBUFNWc4JNW+AmFJsK40lxKlWeBSyBS2OA1Z855o0XiDChcS5Hvc8UshAlZTkqHJNJQx9WOiBNXsyGDGWqW0Je/rwNxsknt7NqW63nYtsHe0nznlYCVjFOBh3yjqpVEo4rXaFe9uJzIboor33Wf8uw1/Rs4TSqQvcNik61Qi4Yxxdz+gYzAX0RKeIrLyauGEnbDbOtMLJJ1zEeqwiVSWVrP4mVXCmvaTIKHaYyiOT3zKeqp6ttQsYo3QYyBzPPlPUpL217tnjuQptJ3h+aiSc6M41Tk+OPzHFeGkCxX2GmXevlrbYHoepIMZEntQ8U/Q6MOGAoeK583Mz21787Kf9hCV2WXd6n61jjT7K/SZWPtFwroCOgdDQmCy2QaZLayoTx15iwEMLMqnyqjrUspxfCB/qdhW0hst+80rybDrheqYsqQ635JyiXSG2d9ZwxNoyNUryn6Eu1cubXKD4R/o268UADmuyxya2k2nOyNKj2XoZ1CJmlz5TqVEZHmKiFmo2my6n1ESGnb9B2WVmv2m/ziaLO5i9PaFR5wbT9J/WhSXvPOdWs2CFzPHEmVNuqPD+ju1XEi9yQciIVdZhEi0HRTpfDXszQ6TAmPD95dXzljDKL9cgRcCIs+OB2kyX9yWlkI/aMbTZXvup71an4jhQBWNxfo2UJIpHJXGm36f2TyK3YZqXjj2ZsFYIS2ZzJmeqj342ZTQWkaJQtr8P2w/Ie4a0KeWdOyKvrH9BOIqMuGXEjKCR5K3VzA0zy9PKb4TxyjlBJBcEwydOBsLvrhaI5jQ4Dzfdkzxv9d0AHqr2XYZnbfJLyXxCj70YcWWzQL0JUUe88CIyF89jphpgQO0Qhhc5GWI0x0mhYYG+9xGp/EXIXrR38JkQXuU1YlhYECqWwyEm9CDYvUU0Z6Dpo+ciVnQG0spZOMZdIsaRNogvmk985BKUNmFwSZSTUbKihNE4ptOaUxpxAKg2OJPRQN762QcvJyfp9VR9Vxyp/biunKe8WRo6Nw8X35R8BOcF1WfbJd2lygauFrBSjZnDI9+eiHw6CjmBQ52zZ0qeKH7Vvcr0qHJVijFwKLEfZ4Vk8ZP1+M++cfEMPzCgNUHXeEJs7pjBQSuVcq57hAcq86eJ6bA7X9NLDFIXBYfvYHCmiokPi9UlUTnmsy1ce15xeZf6miczR9Kh8p1b3xHTauaVkQqoD2b7rlFFDuc1Jz9oDB9dX3Ti7vBCY9Gl7SxR4pUEpNx3CoVWF7tcz/ah6ovKNdEqsKEoUy+fTezjXd2YJbfU7h4isqamBUq6Z5Vb6t3YOSQJ9wMwuz1ZcBqJFn0v36EHXXBvpeisBzeJsiYN81o2v9WT6VvEuxb8mMqakSbxuI8KERpEmczKpVjcqyqkmkya58Grq28jtQ+hknt5XW/Guy12lx9zLDc+pS6XzpAKr8hT/61MzdTDyvnSa12Sjfm5ShUKPHAYzMs44JjwUfWj2GVtuUsfqutid8b1V27Ooa+pe+rtpqVDQ3dQh91Hl8J0sjZqBlmc5ahzqb9FuZgJQG7u57KbtAPtXt1dJA3O/KplpU5EXSi5BaILcP+W3XpFglvVVFVZ6f9nvZ7NIpTJXBuO6fiU9DK3KOhYou5WTYpJNQO6U6imXuIpNlFQWW3kntluKlrW8XG3Lst5SJ8mzKbqmqEt1sl+zmdE5lI63RTYZpGJXNnYQqPLrT5b1LfmyvGfKn6obFvJzq1+9KLteMpZ+z9W/5iwo3geGzeelyw91XiVI15vGpF0IzXeRdyVmgAYOTiMgbCCzCDKKO7am0iHRnEZbQJODKWoE3zZOLb27c+9IwtHFxo2JWyW/SeqzFHMdUViS5qJEYAAjE1a+x/lhif1Nj/Wqx7jfw48E2ji4dZAUNARGLJUEiSDRf6WCSshoJWE6ss8dYrIzwJi/N7dDiFHcc3Qsciilbbh1nhOCSeKdw5tjaLUuulLOB3rehU663pXr+Xh7lNWhoBWmiapiK8hN2yR1dDiDRZRWrR6VsnChBAOtZdmQEpalNtLvLw2Twyjt4hHzjUIxmDIf9M6KEJ7trEJ7uSV+3zqytr//oICQ2SY7jLKL38u5X2AMkomBqA3HxKd1w2pSFml/nVcoyi9QWHIXwmHJ5GsyHo6yXICyZipWgBxmy0aVf+7Y1om8vW4NksLoVJ+wAiD/Zb0kNF1j019T/4h9JLGYXoIX7zPNfFCbs/pTMy71+UOhIuvjOalj0gkHKsqpTJm0Ww3pMRbfqpFr+p5Ed/WyMlrGFKDynfCOmXeLbGFzxvBLSv7PsoMJomNI6VjtKNLbTcu7amJStQE5ysaq005a9Qxh2iQHyVVSdTIyjHMZlIwzEzD5cXNu0k9VX+MURURGNpFxqqnmmONd6V+AyXGW+peiT8pf5jTBNZT+kMkDua3mBCppOhsVcMh+V07I6XPxX5YvBZ9OqlKhPZAHoRUZSbo9JK9K4cw0onim6x5q4D15qH46DPhoel3/1rpMfs/Je7mvjGwXGpR1NfKz+HbsB3nTFwZGJHmfd9zj0G8r/XPbsh+ddmDunrmJHbMciYt2M+etrtD0q7bjNvlhZDQyfRwlPmYX6pU3kFHVmKvn3Dfjx0xXLv7O7rpZtYEO+N6kzkXZRS6KY8xR2Gwj1hsyFnEcZHf8EKn3BdlOkAgkw2syjlK2TtIpZT10HfUObZXy1/hK+TsRJruU3k85OMnuRqfroM8VZU58JmkJTH/V5Y/HaemZvReo8GjZdrqvlXpxRn7V9F9qW/WuuXGi8eNq2SV8UNZPyx22dU1L0jzgNkjOItOems6ep5FGAHwPgOMmVT1MWxwWzWm0DaUQOcT9DxeGwTRTi2IjwjB22Nv06OHQO4/ejXAU1oo68ugdYeM7DBz+bbjD/rDA+WGB9dBjXPXAXgcac3SRMJ44jXQ9ZLcQGdxlRRCMVNK3lwqWKYXIEeJfcRSNgZGFvslrqna9qQnwJF6Vp7dMvG23aCQ1cM2KW2+bC5ffpfKTbVceWqnW6i736Os1z/9cZy3OUXla1T0fU/qg1DkdT5alqfcpQzPdVdajUsT0biAOWuS7auegZBCUu/EUFUq0ysoNyELTVn4GSSBXljCV92w1PmbKV/3YdOchmV0UXjO3175TGWBQ7b6aZtNMONdIQM4bBGwZjKlXaQN70h5FuUpejIZF2va+Y3BHsd8xsOBkMJEazJF8gIr3a+GiT7O6kSnTUYwnJYOgjSY9MJLfylGdHNuGJqRO6rrTpIkmfcv0TznmaCwi95Mkj6b8y4n4U7pPCSN/i35jjCuq0HjLO2t9ipSj6zDlqZVP81ztXnWr3DSJrozlK2llduCciw4qzlOtIFSpmjgShP0U/7G3jiKKyd7BAOkdArWjSPhxjgbKQM10ACTiiDuk5aDCW0KXmo9l66RArR0LgVSbQJqUsYTmQ5lIEjqY3czkGDCDDD07XvCrtgmyjpffDHRq5r/jfOwR9WTWnbm8+eXGOaQH4ZOBqCqQ0R0V2V1C627NdGmZPJnk/Vm/0FTPpToUx1wp+4QXKSXeTZN+op9L2R+Lmnlh/vtbi6cvzvDPNvVmXqjk3OzW7eWkJBfHwPQeU+BpwcyyD7GHxPYZVX91SMtX9ZK7cuBZm1yeHFfqTsBkEH5gXcrzylbVTrVa/QvRULE9c101DbKNSGbjiuRIOkiv1OoPoFyGmB0Oih7l7qzlphvb2rsGLfuSU4zUjpqZN6iLOsJxYIQu/tU28tZ6FvUTXZPaSq3YAEwbktZBmm412V0ci6y3u4ZSukamzTmKpXw9vVLLRS7LXumP3vKxuU+3oXr3ZCyt66Rl1TadNam/kslJ/hb3lvwSy1mWgbUc15ekvkCySZNjaMz1D04jnjqN9LtSpBGHoJAxFGhchIJzB/jlVO0dBs1ptAWz9DygYz+Mdtj6LXYAOcZOP+DkzgqLxYA+Lk9zxFjQiJ1ug548jnSb5EwCAxvvsD/02Gx6+HUHt++AkeA2IaM6NFOWdlMHeE+Z0fU61i53InG2aGaX90oyrsS4wvhxjSV5wI2c12jqmR+t7HS5tNc3KWc5jqF32oHkooc1KinulEJ3VgAkOVAYSXqmc864KAebB3rO9TMlDmIiJbjy3/gmXSe5PvOOue/WnAmTZ0XoxTYQg8H3uTxha3OaClr9LdPmVFV4cn9VPqt36sH5pO6Vb5tyzJxPj3NxOg3U5FuhriaCpKZYEp+Q5RPYv9M12xVM6sf2guZBwCYK3GIsGj4onq9C7+TRAV76Xk/wfTCMuGew5xwF2HPOyzW3NXCtzZITKA7ePQF6oC6DT0beYpuRt7HVylac26XRAhTykOoDI3tL+ls1TGRp70x0pNkFrnheF2bSnyudQs+a59/qvsTzheWiy59uIitLomE4KeMcTP/NhuIs75d1K2iYvl3sGMS1Y0KmXXqX6DER+pzeT3LfhBbFCe04YhVdNMruo4h6lpJh7zaY8mJ6xxZxXPQLrbOSnovn7NbSVE9CLXI1fQDztK+d0HJV8QNgedNOvMQLSv4kI7lwFFm+oIN5RJeJONgASfdHuSM6SO9uJxMc2vki3ygdQMm5nPlXR0uXg6DSFtiKyItTmcF5YJ3Kq3U8m/urKPWJlKfmQAdN7DfyFeeDfr3SfyUPTPSprq++VJP7tTpsOZ90E6tB14ycKR0Gup0OtNn0d5U85Kj3dJ+cOpCCfM+yn+brrGl0aHmP1E6pjuMWOcMz52s0Kb5X/S1l0/KaCjpEGe47JDmdHbyY2K4TXjkUDaY8a3S8tgk0DxQ0OfSAWulr6Z9m/NExvNSxj/JInGV93mJ91gbS9Yp6JdRJL1NSxwwlSyk5H+DDGIzGejWqtrKR9coRVvR7O96gfAxYHSR9sOhn5Q6nVUdRuVqlnMwsvjHRmxXenNRRQ52bvb98rsaP6n0M4QvF84qXTb2Vo4gGOcfoNkiJrWnkaaQR5FnlWBInk4/l6Ajjsi7TD0JzGm2BSWjJckxVWTl5Vt00qzfLTqoEljEgXJhNHIkw+CBxPRFGdiBijNHg6GXXNN9hcB023GM19tgfFiHp9b5Ddz4YBm4THEfCoBL5YwUggmc8GqK+lwExxy2CY3Epkykl0+bI2Im5YxZ3L8ecvy0OJM7MDaAuzKVs8u1ySVrHFacRwS+QlHfy/CMblxNohRMFrj6eU6pGWWkjRe8couozq5hK4VNe1gpZC2soYa7u07RLr58ReKWBtO378r1sFGmnHIKy1MsEtUEp30y0pu2zCXO00sJdhclOBL1+pKZkimMqr1W+q5WnpkMyIHQ7QNXL8IZWolPHzWx5q0ptamXVvleeN98oeXRbpJFg4jSifCx9ryf4gXMk0iLP+qclbBN+1YI0NGLKeSLh2GMemAcFK8eFQygOgsDWeNL3bDWSa3KiRM0AKfupzEKi6DuEidGc30UFXVTxNG8hH88ZaLluZJ9RYP1Nsk4YdvpaQSalxyayu5QvemA3U5YcUaHpk+V9lkFq2/syutGFj+tZ8LJOZuZPdXzTpbTQ1JElia8iv8lkzEDBwItOJDconlNGr6FRibKPA7avTeRtcV3L24qeqsr3A2SeoVP6RiHY5+SPyPQ4sJvqyekzkzLWyqf4Mul4oUmPbAv0CBHIQIg66lS9tGDV9JFoG5/bMpVdObzMzLnm+ZnymuMqTQt90mm5wSgnSGYHMLpMHMup9aw5Jnu+HIjKKzUvJn4jc21ersCWu3xnBdv0tdaXE4eQbseSFij5j/M7a/KzKEtpA/k4GLSRNDCR7bYNbf0NLSrnjIBUsqisqxlQe3VN1d+8ula/Gn2Q6TMtqCpv4mXKvCyyKfZT10E5clHna63/5nRfUY9JqgtxROjEwAVfT/ijVq/DwOXyljawi3X3fXQUyQR3ciAhRolKpViNq3KFaUSwdxiF0yhO0BeT74kX5Pyg+rFmp0J+TCYFpBhVW4UKXazeR/ojilZ68l3aBzARNmk5Heuy85TH52SslF/Vz9gxJW/JvfKj5Lny2Rl5W5MzqtqVcUJ+nlQ7J6fRyGbs3K05jte5vjwNSM4kknvGrNvYOfgujrtlrH4B/N6cRttQMmMpbC8EZN/Fk/dUxWA4PYYZQx7D8jP2gCcCOx+YwjE6doAHBgrL0tY+/ht6rDY9/KYDrxy6fcTlacgOHa+ZClEoU3auRCanIsxW59tLjwoTe7WWMhrMbmDlNAoCIHtUs1c0bLWNafihkMQo6xDqycqAlo6YB64MPwDeUYqEEUWet+62TWGM+nJ2tGIUEtgo59IZML2/aHLzm6fXK8o5GWrxWM88bhWOcr58ZUVhbhtATw1HZbCLUumQB3nxXPmuPNMJu1RIG0OxDNt4wfyeE+zpxvq5A+mO4nulw6yiDAz9S0NFGbqlsTpntOqxjf47Z3TPzrhW3msMSXl+xpjON1ilzGIkIfQ1t6EU7UfLSLOOwBuV/6oPRhMDFzbjpownGfCIXJPwbR3mq40qsyx2jH14W/+kKekn7DHpY4UBLDQq+EQ7AUpDp3x3dUawKMzsoLvsQ4cxFoim/QrF8bbHy7LId2fkoimSGKVlny4NWN3foo7KtGVMZvYKeQlDazZ0FV0dyhovGMd2XgbpRoA2wlNk8g+UTiNLk2lDmJB49enSeNcDFNfRhC7FS7fLg0OgHFCkxyrfSu/Vx2rSZdZppJ43fXJbmZJNkGnCBLg+2yvcIcgaIC5bkzpEHpFycW5nO1NPW5xGQJWv51DTW1CyAWRlQxpcx4G1mz47JUz8lNalnGUlADuAVvUtnUZpq1GdHiCWK/HBtrLouur6b+Mh+WRRnwnPzunVxFuFDpt5Rssn61DgSVvqhLjJERJlSnaQkHUgabtZORnKutflKuU/Rf0nzpJKRAaY63281HdFv53lZd1X9akkF7J8ZUJynDBR1PeF/AISX5v3VPhFvmXIxDN0ACqpMqRuKlKjoMGMOKuj1PHKFnQuR9+7jnIkvkRkJzroBNo595XhV184imSCTKf8KCZetZ3jRraRRjxDY7ls5JLNXVRGvNbsHC0rSmKa8ZWWTaM+ZmO/JSeSn+nT5WdKW0U7tpUMm5YxM0Ap17QNUT4yp+cSChmQgh7kcsxhpe1YNwJuI2NlRrcJS87ADOcBCB0kCT8H2siYnsZ8DDj4jkB94B3dXw6L5jQ6FHKjcvXsFKYNdIfk+ZtY3yvHHQNLD/QeR3dX+IwjZ7Gz2IRoRjCIQhLsnkIS7CNug6Ubw28wvCdshg68cXArQr8HIEb6uA2soQDVQcBWqLkg7JLxNedsUR3bDQANnI7D9+IaS3Eg+Rh1JE4j8SQzpkpOBBwAMyhIkUbZyNJefO6AcSC4LsxGU5xtLDtwqgMQZ4+z86p0GgH2eGJ0RC8vAGO8lAZQXYmzvbe8pyLsykGVvVYR3DR9bbpkhMiMoSHvhaIhSRSJKse2mTWpY+TBbOyxpacx3syjU0PCKLnK99KD098TWs8SKP8VXjO8lM4X98vPYqA0N9ggxQcHlkXKk06w4bPZAVytrgXfbZ31l+8agynnDUtGUjSc/JAdixKBlJauKTqCpsufJiHMOkfaEK4l40kbGrUZdTFGYO/ZZoAcCrpfUSbyZFCb+KQY5BcGc4k54y7fYIpi2m12cFA8N6kLFceuqOM2lLxWk5fltbIMlT5dRmWl2VqU52EH1+bYJqXlRIgthq4QIvIToPmHos4T/sxL0kTPTSYiCjpNR0Hqly6rcRopvacGYIZ2mi9U2WfpX7QFlad1WSblLoo/1+Yi4yq6VcpZLU+NVoXu0/kMfZz8AsVoaUlYrJwwk3V8Ou9S6VwW+pXRHXOyuwbhZ+G1sq8lWarqlGyuYtIu8W7xiTn7RJUfgB2kmTrlAaYeBOmcM7pcofwzfUdha1RD9QFVBvV7qqfYOvFmeLzmNJpMGJTROfp7qdyKDmqCjAmg5CBhK+/LgXet7qUOmYOqv3WWZDlz6IiM2b5ascEL6OdM24pcRpZNiSYDlMwqbMTSGVrKmC18UkapS51dtOfFXkiRLVoWFvWj+ul5lDaQ1KMLA3ShAYnTyBVOfuM8qxdgEi1t0nzk40wHLpxGqC5PK9stfVLrXipzGmX5VY00Kp63H8x/tYM3OYPkfLELmHEgaadR+d6yXVRZalFH1fs1LQhZR8zIWytbeKITzLPJVonLE/U7gOjogXUUxWO38XlFjvR1smUM9i3HcjAQnUbsOnQLB+8lcCPbM4fF3wqn0Rvf+Ea87nWvw/33348bb7wRP/zDP4ybb765eu8v/dIv4fu///vxwQ9+EJvNBp/zOZ+Df/bP/hle8pKXpHv+5b/8l/iFX/gF/MVf/AWWyyWe/vSn4/u+7/vwrGc964LKxagwh23fzG+0he7lBaozuRHahLD9bSwEe8L+ZoGHVkfQ+yUcMVzk0gV5LLsBHTHGzmHXbeDgsRp7rIcew9ABG4duRej2kMLdXPRWipAhth0pRAeo7fl6oBZSKvUgIIUQEgO0iYzJ4qTi1Pmz04iTY0nKkhRV+luQMwkimcURA5GQcxpR8tyL0A5LpYCxp5TXQJxipi0mglcp3BmDsOZMMgKkFB6KZjWDeDKzNSMM8+CajHDMhkkhlGrGmkbNeCiuy3vMTJL2nIvBLsqzENaTahQD9nljr97DuJj5tEZLvY7It8xeO8jgB2J9HadjazjQ9Dmph7SzdiBVeEjKMeGToixZKZadRb1Pficew0x9p4ZiuncLTYTWNgkkB2czhRl/L8aiQ9j6M95nltXo2Rd5r+qDYhA65TQy67+1oZEcSGz7sxr0JeVb6aMX6kCanzlWPKqcGuJolWeNsUjIg8qC1qaYcr1mPLGSJSUvmJdsQWFgcZS9c+WqvpfDf6qz/GWZ9GPGaEVengZMDNXJYDs9Q/Z5qHsmcoNs4RVtgWJ7bj0w07uZiNNIO4rEYDeyrTAmNCpyS9PdOo2yMe9dUd85GV+LPpiTC7qMxfu2fSNVpXz/QYN5+a3fZcpTKVwRDefjUg+JcBC7wMlSNWQ5pB1GLOWV71dm7ZONcpjJpG2IhM6yOz5WDJpNnpRi0JbKXWsDVOgr5S2cQxP5Kvf44llhAqXTbT8qDYYpShk5JzOl3Ol0KZ8LHiknXwyfaXtqju9SZABP79Hf1fVQ8iNt+CA0ibJKy520QYS2iWp1TzK+Imd1neR4xlkytafmBK36dGErTHQKivMA0kSnipCRlwlP8yj1zktqA624cPiToZHQw+i7mq7hst65Hae7N4cHTL8t61j7rbFFFoq8kX6a0mZoB5I4Gp26r7YDsq52OSGmI29GVV81OeFUvc3ExUzZ029dR1K8rOslk2I1XQygdCInFaLkiXEaMbKjOulVuyJlIr+K8ZjxwUz6FFv+0nWE/W2fVeOoStuk76Y+k/V+tUwz8lLL3pTCRRxFHiDv4dY+Rw55TvUPUXqR8D46ixCjjHwoCHeAW3QgT3CDi2P1MuRxOy650+gtb3kLbr/9dtx111141rOehde//vV40YtehPvuuw9XXnnl5P5HP/rR+O7v/m486UlPwnK5xNvf/na8/OUvx5VXXokXvehFAIDP/dzPxRve8AY88YlPxN7eHn7wB38QL3zhC/HBD34Qn/EZn3H4whmDsTi37VJNuRQwArB4JnUqophQEyAQOjCW3ZAcRL0bQUBMih2SXy/diI58jrT2hHFwoI0DrQj9PseweTZ5hcRJYwyoDmnAF47VUrAZZ0vyZHN0FBVOIzDH8xwNMgaNPhliSQiw+g1kpYRAFxZCEZlII+1ACk6iuDRmcPBdiGagBXIkhJr50UojzdYYgyleqymSYgBQGjlWwZbH9dmcrUqMimMtfCgMbjgW1ux8p1DdwnauLpWyJ9alyDZRqfBYGkbzinBqwHGuvzZekM9NlKkoLqmOMTQOLwwn7XAQKPQvKPpuUwi6DtUBRoVvtpZL1a+a8LZE4ivGlAdVecr7y3MzZUkyh3POM5ak1JEvaIzGhwtRGNlpRNZppPklvncSem4MJqjBuTUWE13VzI91Tk95rerkPQQ0/+k+R5Rz6JCv9AndR43RUulw+mylP6XTB7SrHB/EOoan9csvgOdMOQ5TrvQd299zZEjZ19RxZYlfbdbaRAKS/nxFLhZ9c+LoTrO5bGZ8TdSbr8i2st5byMqK7iaqZqQ06CIHSHL5rU6jORlUK1OmyvR0+f6azTQj12wfm49oOJyT09KF9KBNLxsaABvRiKLtC9pIxLDImdrgvFrHOSFZP230cyELyoGZ0avqvhpJctl4mpeR7W9d9yAvVcRKrD+Qy1Py4Wz95upb1DkVucJopTzT9SP9W6U2mJ0EMHzP6Z6ag610uJsyGblEYZIXosNU/k/dV8VJKb+L+svxgXK2rNNEt4WbjAOheM7UZVvfm7MHq+/ibAck3UdgH2RToonSgTzR9zPyPRWqXgfpo1LGib3gY/kOGX11GFvQ6Hh1nKJyEOs3qrp2XMimOTpUypQcXgjOBJ1zrLR15K/Pds5keRrU9wCUhqSVS8UEsI400vrVlH87/2onSapH6eyTfl1GUClnaOr36t266PZEdhylR2bkcr7OODDSCKqvTSYVsu6f5V+jazhHw48MGnygi/dwmzFFDkk0UXDEUlgLCQQnkbS798lpRB3BLTswdylyC8q5dRgQ85zr+eLgWc96Fp75zGfiDW94AwDAe49rr70Wr3zlK/Fd3/Vdh3rH0572NHzFV3wFvvd7v7d6/fTp07jsssvw3/7bf8OXfMmXHPg+uf8Jr/4+uN3dqVBRhlHNRtK3bMNEcZZwgD86wh/z6PoRj3vUx3DdY/4Gu4tNfIyj/glL1AjA0g040e1jxw148Pxx/P791+LPHroC7hzh+P/X4chfUVwepjyZySsZCxOVVlj/T5AlXl5yAxBMSKk2mLQgcwOnndHcKE4jgEafl6TJ2svSYRSNnKy0KxQWw8mp4y4QM0Qauby0buFSmX1POd+RjmpQDVMKJVLkMSU5jDIr3m0erxg2xljZ9rwRakoIFkKtqoAO5WXAlO6loSanlNKZRCVoxX8YA6hyvoqaoTV3/ZECRcWpv1czigU1Q9ccc75+WMlM5qlD3J4N5Wq5trTBoQwp3ebSv1JfQ55l00u0Uug66iHqypAHYKOJZBmFMg5rEQCTfqUNl0pdq9cq/D+pe3p4/rztF4QD+8iWj1Ztkbl2miv/lnadG9jMPbq1HKVBd4g+PulLmla13+W5Gm1FX8w9XyuK4aXs5DBLYXT0WjKGlXMSQLV/adt6Th7KZW24T/7WltxV6qG+dUGO8snLJkXWp6cXOR/MDuorzx2oF4o2tDk2Konn9T0ltFzQMoRt26aB+Wwdt2BmkJLkeKlLdBsD07adFUj5b90pUtRFy9iKDK1N2pS691Bm0GHk5DYQJnSe5evqec7PFPUFYJa+TN6tymBkk9gCosPkHpE129rQtLe6V18rUWs/9dte21IPYEJLK6vYnpt7psSEPjTtn+rYOCDk+ZruKX/r+pU2u+j8rTbBDA49NC70t5Rdyqn4wURhazugIs9rdSyjpe3y0SKSF8j1VtHWh4k0qiE5Q3W9yrYq+oGhiaWYkqlsZaxy/Ok6aQe+aV/tkDH9PveHibwhsrxZg5FL2VbYSislL9IkUTyu6rs0kZQdRLKkLB2PHjREJ5D3wGYExugE0o7qrgM6cRpxzHXEgPeg0YMB+GM78CeOgJcd9j6jw5nreqxPOIz7+/jQa/9/OHXqFE6ePDlTuYBLGmm0Xq/x/ve/H3fccUc655zDC17wArz3ve898Hlmxjvf+U7cd999+IEf+IHZb/z4j/84LrvsMtx4443Ve1arFVarVfp96tQpAIBf7U+Zo1RAcqrKSNuETnFzRbAzAZ49/MBA73GKCX/JS/R9D0ecIop68li4EY48dp3HuCAc6RjnznuszgwYz6yBswQ+DfDpmEhtYPDogxGgonx0R09LvKIXkxfWaaSTGotuoZHBUZDxwPBx6Zn3HlDhdjyEJN4pvE4LjTT7kztUuFahpziNgDi7I0qb0u+QdDc7jbhzMURd5wYoJDQXn6spdUyNg/SjNOpmoAVcJuQW+V0xlvTjctPklrIuh9WH296jUfNU1xT+FppUX32AQOeDXnqI737C8HBoeqHlmhcbDwvZMCxfOBOjVTEuzbvKW7TiVbP/AAA1sKPYX4jUeXmB6OtiWV+aMZOZEpEhno3YmMVBtK8ZyRdC8Nr7NT3Kc1RQ/TC8Ub6vVj4uDw7LdJX7D/Hox8uTW1H5/lbjbzJwmX/XVj96Ualy4MFpgGLVVrUfVfoQ6WulcVnwYbI35K+qV42nap/UJw/VXpqOc0xwIbKM7Q89iJi8qmJsb0VJDz14KZZxV8tc2DTm+8wHF+MTIpQrry3zBdHBn9J8ZcSADDbSf2Y+bequ7pv7cGXJJRW/9ccsL9uvb539v4C+OulHqoyzTqNyIFp7b62syS4sBpnlMQpdOBnUqqIf1mYqZdSBD2w5P9fWBy1vKz9aEaqTvrdNVmn9RurERCCGNqsVr9qP5zDh2drNVCXurBO30p6pLk4VR2yh8l26bAw1QZF5lIu/+hlxgObIHen7ZGjPFZrqqtQdYjS5dpg+quVO0p3SRjzv/KwutWdAL1GTiDIA1kE24aWDoPqwfp4wmXCyFYvlSdF/2hmfV7AAiKtsQp1CHqMwLmaP4CiKTiMMY2xvDxqG5DQy/a7vguMIiEZIpJWKNPLdAL9k8Ogw7vUY9xbwfQde7cfHDlZel9Rp9OCDD2IcR1x11VXm/FVXXYU/+ZM/mX3u1KlTeOxjH4vVaoWu6/AjP/Ij+NIv/VJzz9vf/nZ83dd9Hc6fP49rrrkGv/mbv4krrrii+r4777wTr3nNaybnP/zaeuTSpcKHL3UBGhoaGhoaGhoaGhoaGhoaPiVw5swZXHbZZVvvueQ5jR4OTpw4gXvvvRdnz57FPffcg9tvvx1PfOIT8bznPS/d8/znPx/33nsvHnzwQfzET/wEvvZrvxb/83/+z2qepDvuuAO33357+u29x9/8zd/gMY95DOiwy3gaGj5OnD59Gtdeey3+4i/+4sAQwYaGTwQazzVcbDSea7jYaDzXcLHReK7hYqLxW8PDBTPjzJkz+MzP/MwD772kTqMrrrgCXdfhgQceMOcfeOABXH311bPPOedw/fXXAwBuuukm/PEf/zHuvPNO4zQ6duwYrr/+elx//fV49rOfjc/5nM/Bf/yP/9EshRPs7OxgZ2fHnLv88ssffsUaGj4OnDx5sgn9houKxnMNFxuN5xouNhrPNVxsNJ5ruJho/NbwcHBQhJHAHXzLI4flcomnP/3puOeee9I57z3uuece3HLLLYd+j/fe5CR6uPc0NDQ0NDQ0NDQ0NDQ0NDQ0NARc8uVpt99+O172spfhGc94Bm6++Wa8/vWvx7lz5/Dyl78cAPDSl74Uj33sY3HnnXcCCPmHnvGMZ+CzP/uzsVqt8Ku/+qv4mZ/5Gfzoj/4oAODcuXP4vu/7Pvz9v//3cc011+DBBx/EG9/4RvzVX/0VvuZrvuaS1bOhoaGhoaGhoaGhoaGhoaHhkwmX3Gn04he/GB/96Efxqle9Cvfffz9uuukm3H333Sk59p//+Z/DuRwQde7cOfzjf/yP8Zd/+Zc4cuQInvSkJ+Fnf/Zn8eIXvxgA0HUd/uRP/gQ/9VM/hQcffBCPecxj8MxnPhP/43/8DzzlKU+5JHVsaDgMdnZ28OpXv3qyVLKh4ZFC47mGi43Gcw0XG43nGi42Gs81XEw0fmu4GCA+zB5rDQ0NDQ0NDQ0NDQ0NDQ0NDQ2fVrikOY0aGhoaGhoaGhoaGhoaGhoaGv52ojmNGhoaGhoaGhoaGhoaGhoaGhomaE6jhoaGhoaGhoaGhoaGhoaGhoYJmtOooaGhoaGhoaGhoaGhoaGhoWGC5jRqaLiIeOMb34jP+qzPwu7uLp71rGfh937v92bvffOb3wwiMv92d3cvYmkbPtnx3//7f8dXfuVX4jM/8zNBRPjlX/7lA59597vfjac97WnY2dnB9ddfjze/+c2PeDkbPnVwoTz37ne/eyLniAj333//xSlwwyc17rzzTjzzmc/EiRMncOWVV+LWW2/Ffffdd+Bz//k//2c86UlPwu7uLj7/8z8fv/qrv3oRStvwqYCHw3PNnmt4uPjRH/1RfMEXfAFOnjyJkydP4pZbbsGv/dqvbX2mybeGRwLNadTQcJHwlre8Bbfffjte/epX4wMf+ABuvPFGvOhFL8JHPvKR2WdOnjyJv/7rv07/PvzhD1/EEjd8suPcuXO48cYb8cY3vvFQ9//Zn/0ZvuIrvgLPf/7zce+99+K2227DN3/zN+PXf/3XH+GSNnyq4EJ5TnDfffcZWXfllVc+QiVs+FTCe97zHrziFa/A7/7u7+I3f/M3sdls8MIXvhDnzp2bfeZ3fud38PVf//X4pm/6Jvz+7/8+br31Vtx66634wz/8w4tY8oZPVjwcngOaPdfw8PC4xz0Or33ta/H+978f73vf+/DFX/zF+Kqv+ir80R/9UfX+Jt8aHikQM/OlLkRDw6cDnvWsZ+GZz3wm3vCGNwAAvPe49tpr8cpXvhLf9V3fNbn/zW9+M2677TY89NBDF7mkDZ+KICK87W1vw6233jp7z3d+53fiHe94hzEuvu7rvg4PPfQQ7r777otQyoZPJRyG59797nfj+c9/Pj72sY/h8ssvv2hla/jUxEc/+lFceeWVeM973oMv+qIvqt7z4he/GOfOncPb3/72dO7Zz342brrpJtx1110Xq6gNnyI4DM81e67hE4lHP/rReN3rXodv+qZvmlxr8q3hkUKLNGpouAhYr9d4//vfjxe84AXpnHMOL3jBC/De97539rmzZ8/iuuuuw7XXXrt1ZqGh4ROB9773vYZHAeBFL3rRVh5taPhE4KabbsI111yDL/3SL8Vv//ZvX+riNHyS4tSpUwDCoGoOTc41fCJxGJ4Dmj3X8PFjHEf8wi/8As6dO4dbbrmlek+Tbw2PFJrTqKHhIuDBBx/EOI646qqrzPmrrrpqNnfHDTfcgDe96U34lV/5Ffzsz/4svPd4znOeg7/8y7+8GEVu+DTE/fffX+XR06dPY29v7xKVquFTGddccw3uuusuvPWtb8Vb3/pWXHvttXje856HD3zgA5e6aA2fZPDe47bbbsPf/bt/F3/n7/yd2fvm5FzLo9VwoTgszzV7ruHjwR/8wR/g+PHj2NnZwbd+67fibW97Gz7v8z6vem+Tbw2PFPpLXYCGhoY6brnlFjOT8JznPAdPfvKT8WM/9mP43u/93ktYsoaGhoZPDG644QbccMMN6fdznvMcfOhDH8IP/uAP4md+5mcuYckaPtnwile8An/4h3+I3/qt37rURWn4NMFhea7Zcw0fD2644Qbce++9OHXqFP7Lf/kveNnLXob3vOc9s46jhoZHAi3SqKHhIuCKK65A13V44IEHzPkHHngAV1999aHesVgs8NSnPhUf/OAHH4kiNjTg6quvrvLoyZMnceTIkUtUqoZPN9x8881NzjVcEL7t274Nb3/72/Gud70Lj3vc47beOyfnDquLGxqAC+O5Es2ea7gQLJdLXH/99Xj605+OO++8EzfeeCN+6Id+qHpvk28NjxSa06ih4SJguVzi6U9/Ou655550znuPe+65Z3ZdcolxHPEHf/AHuOaaax6pYjZ8muOWW24xPAoAv/mbv3loHm1o+ETg3nvvbXKu4VBgZnzbt30b3va2t+Gd73wnnvCEJxz4TJNzDR8PHg7PlWj2XMPHA+89VqtV9VqTbw2PFNrytIaGi4Tbb78dL3vZy/CMZzwDN998M17/+tfj3LlzePnLXw4AeOlLX4rHPvaxuPPOOwEA3/M934NnP/vZuP766/HQQw/hda97HT784Q/jm7/5my9lNRo+iXD27Fkzk/lnf/ZnuPfee/HoRz8aj3/843HHHXfgr/7qr/DTP/3TAIBv/dZvxRve8AZ8x3d8B77xG78R73znO/GLv/iLeMc73nGpqtDwSYYL5bnXv/71eMITnoCnPOUp2N/fx3/4D/8B73znO/Ebv/Ebl6oKDZ9EeMUrXoGf+7mfw6/8yq/gxIkTKW/HZZddlqIjS936T/7JP8Fzn/tc/Nt/+2/xFV/xFfiFX/gFvO9978OP//iPX7J6NHzy4OHwXLPnGh4u7rjjDnzZl30ZHv/4x+PMmTP4uZ/7Obz73e/Gr//6rwNo8q3hIoIbGhouGn74h3+YH//4x/N2GpHfAAAHLklEQVRyueSbb76Zf/d3fzdde+5zn8sve9nL0u/bbrst3XvVVVfxl3/5l/MHPvCBS1Dqhk9WvOtd72IAk3/CZy972cv4uc997uSZm266iZfLJT/xiU/kn/zJn7zo5W745MWF8twP/MAP8Gd/9mfz7u4uP/rRj+bnPe95/M53vvPSFL7hkw41XgNg5FapW5mZf/EXf5E/93M/l5fLJT/lKU/hd7zjHRe34A2ftHg4PNfsuYaHi2/8xm/k6667jpfLJX/GZ3wGf8mXfAn/xm/8Rrre5FvDxQIxM19MJ1VDQ0NDQ0NDQ0NDQ0NDQ0NDw99+tJxGDQ0NDQ0NDQ0NDQ0NDQ0NDQ0TNKdRQ0NDQ0NDQ0NDQ0NDQ0NDQ8MEzWnU0NDQ0NDQ0NDQ0NDQ0NDQ0DBBcxo1NDQ0NDQ0NDQ0NDQ0NDQ0NEzQnEYNDQ0NDQ0NDQ0NDQ0NDQ0NDRM0p1FDQ0NDQ0NDQ0NDQ0NDQ0NDwwTNadTQ0NDQ0NDQ0NDQ0NDQ0NDQMEFzGjU0NDQ0NDQ0NDQ0NDQ0NDQ0TNCcRg0NDQ0NDQ0NDwPf8A3fgFtvvfWif/fNb34ziAhEhNtuu+1Qz3zDN3xDeuaXf/mXH9HyNTQ0NDQ0NHzqoL/UBWhoaGhoaGho+NsGItp6/dWvfjV+6Id+CMx8kUpkcfLkSdx33304duzYoe7/oR/6Ibz2ta/FNddc8wiXrKGhoaGhoeFTCc1p1NDQ0NDQ0NBQ4K//+q/T8Vve8ha86lWvwn333ZfOHT9+HMePH78URQMQnFpXX331oe+/7LLLcNlllz2CJWpoaGhoaGj4VERbntbQ0NDQ0NDQUODqq69O/y677LLkpJF/x48fnyxPe97znodXvvKVuO222/CoRz0KV111FX7iJ34C586dw8tf/nKcOHEC119/PX7t137NfOsP//AP8WVf9mU4fvw4rrrqKrzkJS/Bgw8+eMFl/pEf+RF8zud8DnZ3d3HVVVfhH/7Df/jxkqGhoaGhoaHh0xzNadTQ0NDQ0NDQ8AnCT/3UT+GKK67A7/3e7+GVr3wl/tE/+kf4mq/5GjznOc/BBz7wAbzwhS/ES17yEpw/fx4A8NBDD+GLv/iL8dSnPhXve9/7cPfdd+OBBx7A137t117Qd9/3vvfh27/92/E93/M9uO+++3D33Xfji77oix6JKjY0NDQ0NDR8GqEtT2toaGhoaGho+AThxhtvxL/4F/8CAHDHHXfgta99La644gp8y7d8CwDgVa96FX70R38U/+f//B88+9nPxhve8AY89alPxfd///end7zpTW/Ctddeiz/90z/F537u5x7qu3/+53+OY8eO4e/9vb+HEydO4LrrrsNTn/rUT3wFGxoaGhoaGj6t0CKNGhoaGhoaGho+QfiCL/iCdNx1HR7zmMfg8z//89O5q666CgDwkY98BADwv//3/8a73vWulCPp+PHjeNKTngQA+NCHPnTo737pl34prrvuOjzxiU/ES17yEvyn//SfUjRTQ0NDQ0NDQ8PDRXMaNTQ0NDQ0NDR8grBYLMxvIjLnZFc27z0A4OzZs/jKr/xK3Hvvvebf//2///eClpedOHECH/jAB/DzP//zuOaaa/CqV70KN954Ix566KGPv1INDQ0NDQ0Nn7Zoy9MaGhoaGhoaGi4Rnva0p+Gtb30rPuuzPgt9//GZZX3f4wUveAFe8IIX4NWvfjUuv/xyvPOd78RXf/VXf4JK29DQ0NDQ0PDphhZp1NDQ0NDQ0NBwifCKV7wCf/M3f4Ov//qvx//6X/8LH/rQh/Drv/7rePnLX45xHA/9nre//e349//+3+Pee+/Fhz/8Yfz0T/80vPe44YYbHsHSNzQ0NDQ0NHyqozmNGhoaGhoaGhouET7zMz8Tv/3bv41xHPHCF74Qn//5n4/bbrsNl19+OZw7vJl2+eWX45d+6ZfwxV/8xXjyk5+Mu+66Cz//8z+PpzzlKY9g6RsaGhoaGho+1UHMzJe6EA0NDQ0NDQ0NDYfDm9/8Ztx2220PK18REeFtb3sbbr311k94uRoaGhoaGho+9dAijRoaGhoaGhoaPslw6tQpHD9+HN/5nd95qPu/9Vu/FcePH3+ES9XQ0NDQ0NDwqYYWadTQ0NDQ0NDQ8EmEM2fO4IEHHgAQlqVdccUVBz7zkY98BKdPnwYAXHPNNTh27NgjWsaGhoaGhoaGTw00p1FDQ0NDQ0NDQ0NDQ0NDQ0NDwwRteVpDQ0NDQ0NDQ0NDQ0NDQ0NDwwTNadTQ0NDQ0NDQ0NDQ0NDQ0NDQMEFzGjU0NDQ0NDQ0NDQ0NDQ0NDQ0TNCcRg0NDQ0NDQ0NDQ0NDQ0NDQ0NEzSnUUNDQ0NDQ0NDQ0NDQ0NDQ0PDBM1p1NDQ0NDQ0NDQ0NDQ0NDQ0NAwQXMaNTQ0NDQ0NDQ0NDQ0NDQ0NDRM0JxGDQ0NDQ0NDQ0NDQ0NDQ0NDQ0T/P8Bh3FqhnIk19sAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABMQAAADvCAYAAAAdFAgcAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAfIxJREFUeJzt3Xd8FGX+B/DPbE92s+mdUBM60pSqFEVCsSCi4p2KKLYTFdGfineK9bg7GxYEK1jgQAXEghRpngIiTamhGAgtIYXUTbbO74/Znc2m7m422YR83q/XvnZ39pmZZ2ZnZ2e+832eEURRFEFERERERERERNRKKIJdASIiIiIiIiIioqbEgBgREREREREREbUqDIgREREREREREVGrwoAYERERERERERG1KgyIERERERERERFRq8KAGBERERERERERtSoMiBERERERERERUavCgBgREREREREREbUqDIgREREREREREVGrwoAYERERtXibN2+GIAjYvHlzQKcrCAKee+65gE7Tl3lPnz49KPP216JFiyAIAk6cOBHsqhARERHViQExIiKiZuTdd9+FIAgYOHBgg6e1evXqoAVzWhKup6b33HPPQRCEGh8LFixolHnyeyYiIqLKVMGuABEREbktXrwY7du3x44dO3Ds2DGkpqb6Pa3Vq1dj3rx5DALUo671VF5eDpWKh0uNZf78+TAYDB7DAhEMrgl/D0RERFQZj/CIiIiaiczMTGzduhUrVqzAfffdh8WLF2P27NnBrlajMplMCA0NrTbcZrPB4XBAo9EEoVZuOp0uqPO/2E2aNAkxMTHBrkaDlJWVQa/XB7saRERE5CM2mSQiImomFi9ejMjISIwfPx6TJk3C4sWLq5Wpra+sEydOQBAELFq0CABw5513Yt68eQDg0RzNpaysDI899hhSUlKg1WrRpUsXvPrqqxBFsdo8P//8cwwYMAChoaGIjIzEsGHDsG7dOo8y7777Lnr06AGtVoukpCQ8+OCDKCws9CgzYsQI9OzZE7t27cKwYcMQGhqKp59+Wq77q6++irlz56JTp07QarU4ePAgAODw4cOYNGkSoqKioNPpcOmll+Kbb76pd33+73//w0033YS2bdtCq9UiJSUFjz76KMrLy+Uy9a2nmvoQ27NnD8aOHQuj0QiDwYCrrroK27dv9yjj6kvrl19+wcyZMxEbGwu9Xo8bbrgBubm59da9ssWLF6NLly7Q6XTo378/fvrpJ4/PT548ib/97W/o0qULQkJCEB0djZtuuqlaP15WqxXPP/880tLSoNPpEB0djcsvvxzr16/3KOft+j5w4ACuvPJKhISEoE2bNnjppZfgcDh8Wrb6fP755+jfvz9CQkIQFRWFyZMn49SpUx5lGvo9e/ubck3HYDDg+PHjGDduHMLCwvDXv/4VAOBwODB37lz06NEDOp0O8fHxuO+++3DhwgWP6e7cuRPp6emIiYlBSEgIOnTogLvuuitQq4yIiIi8xAwxIiKiZmLx4sWYOHEiNBoNbr31VsyfPx+//fYbLrvsMp+ndd999+Hs2bNYv349PvvsM4/PRFHEddddh02bNuHuu+9Gnz59sHbtWvzf//0fzpw5gzfeeEMu+/zzz+O5557DkCFD8MILL0Cj0eDXX3/Fxo0bMXr0aABSf1DPP/88Ro0ahQceeAAZGRly3X/55Reo1Wp5evn5+Rg7diwmT56M2267DfHx8fJnCxcuREVFBe69915otVpERUXhwIEDGDp0KJKTk/HUU09Br9fjiy++wIQJE7B8+XLccMMNta6DL7/8EiaTCQ888ACio6OxY8cOvP322zh9+jS+/PLLetdTTQ4cOIArrrgCRqMRTzzxBNRqNd577z2MGDECW7Zsqdbc76GHHkJkZCRmz56NEydOYO7cuZg+fTqWLVtW77wAYMuWLVi2bBkefvhhaLVavPvuuxgzZgx27NiBnj17AgB+++03bN26FZMnT0abNm1w4sQJzJ8/HyNGjMDBgwflDLznnnsOc+bMwbRp0zBgwAAUFxdj586d2L17N66++mp5+bxZ39nZ2Rg5ciRsNptc7v3330dISIhXy+VSUFDg8V6pVCIyMhIA8PLLL+OZZ57BzTffjGnTpiE3Nxdvv/02hg0bhj179iAiIgJA43zPdbHZbEhPT8fll1+OV199VV6/9913HxYtWoSpU6fi4YcfRmZmJt555x3s2bNH/h2cP38eo0ePRmxsLJ566ilERETgxIkTWLFiRYPqRERERH4QiYiIKOh27twpAhDXr18viqIoOhwOsU2bNuIjjzziUW7Tpk0iAHHTpk0ewzMzM0UA4sKFC+VhDz74oFjTX/3XX38tAhBfeuklj+GTJk0SBUEQjx07JoqiKB49elRUKBTiDTfcINrtdo+yDodDFEVRPH/+vKjRaMTRo0d7lHnnnXdEAOLHH38sDxs+fLgIQFywYEGNdTcajeL58+c9PrvqqqvEXr16iRUVFR7zHjJkiJiWllbnejGZTNWWfc6cOaIgCOLJkyfrXU+iKIoAxNmzZ8vvJ0yYIGo0GvH48ePysLNnz4phYWHisGHD5GELFy4UAYijRo2S15UoiuKjjz4qKpVKsbCwsMb5VZ03AHHnzp3ysJMnT4o6nU684YYb6lzObdu2iQDETz/9VB7Wu3dvcfz48XXO09v1PWPGDBGA+Ouvv8rDzp8/L4aHh4sAxMzMzDrnM3v2bHn5Kj/atWsniqIonjhxQlQqleLLL7/sMd6+fftElUrlMbyh37Mvv6kpU6aIAMSnnnrKo+z//vc/EYC4ePFij+Fr1qzxGL5y5UoRgPjbb7/VvnKIiIioSbDJJBERUTOwePFixMfHY+TIkQCkZl233HILli5dCrvdHtB5rV69GkqlEg8//LDH8MceewyiKOKHH34AAHz99ddwOBx49tlnoVB4HjK4mpv9+OOPsFgsmDFjhkeZe+65B0ajEd9//73HeFqtFlOnTq2xXjfeeCNiY2Pl9wUFBdi4cSNuvvlmlJSUIC8vD3l5ecjPz0d6ejqOHj2KM2fO1LqclbOVysrKkJeXhyFDhkAURezZs6euVVQju92OdevWYcKECejYsaM8PDExEX/5y1/w888/o7i42GOce++916MJ5hVXXAG73Y6TJ096Nc/Bgwejf//+8vu2bdvi+uuvx9q1a+XtovJyWq1W5OfnIzU1FREREdi9e7f8WUREBA4cOICjR4/WOC9f1vfq1asxaNAgDBgwQB4/NjZWbj7oreXLl2P9+vXyw9VMeMWKFXA4HLj55pvleuTl5SEhIQFpaWnYtGmTPI1Af8/eeOCBBzzef/nllwgPD8fVV1/tUd/+/fvDYDDI9XVltX333XewWq2NUjciIiLyDptMEhERBZndbsfSpUsxcuRIZGZmysMHDhyI1157DRs2bJCbJwbCyZMnkZSUhLCwMI/h3bp1kz8HgOPHj0OhUKB79+51TgsAunTp4jFco9GgY8eO1QI/ycnJtXaU36FDB4/3x44dgyiKeOaZZ/DMM8/UOM758+eRnJxc42dZWVl49tln8c0331Trx6moqKjWZapNbm4uTCZTtWUFpHXncDhw6tQp9OjRQx7etm1bj3Ku5oCu+hQVFXn0daXRaBAVFSW/T0tLqzavzp07w2QyITc3FwkJCSgvL8ecOXOwcOFCnDlzxqMfuMrL+cILL+D6669H586d0bNnT4wZMwa33347LrnkEgC+re+TJ0/WeDfImtZNXYYNG1Zjp/pHjx6FKIo1Lj8Aj2a4gf6e66NSqdCmTZtq9S0qKkJcXFyN45w/fx4AMHz4cNx44414/vnn8cYbb2DEiBGYMGEC/vKXv0Cr1Qa8rkRERFQ7BsSIiIiCbOPGjTh37hyWLl2KpUuXVvt88eLFckCscrZRZYHOImssdfUxVfUzVwftjz/+ONLT02scJzU1tcbhdrsdV199NQoKCvDkk0+ia9eu0Ov1OHPmDO68886Ad/5eG6VSWeNwV9DqkUcewSeffCIPHz58eLXO3evz0EMPYeHChZgxYwYGDx6M8PBwCIKAyZMneyznsGHDcPz4caxatQrr1q3Dhx9+iDfeeAMLFizAtGnTGrS+A83hcEAQBPzwww81rkODwQAgMN+zr78prVZbLWPS4XAgLi6uxhthAJAzHwVBwFdffYXt27fj22+/xdq1a3HXXXfhtddew/bt2+XlIiIiosbHgBgREVGQLV68GHFxcfJd8CpbsWIFVq5ciQULFiAkJETOMKp6B8eamuDVdqLfrl07/PjjjygpKfHIEjt8+LD8OQB06tQJDocDBw8eRJ8+fWqdFgBkZGR4NCO0WCzIzMzEqFGjalnq+rmmp1arfZ7Ovn37cOTIEXzyySe444475OFV76gI1L6eqoqNjUVoaCgyMjKqfXb48GEoFAqkpKT4VM8nnngCt912m/ze9f261NS88ciRIwgNDZWDLF999RWmTJmC1157TS5TUVFRbRsBgKioKEydOhVTp05FaWkphg0bhueeew7Tpk3zaX23a9euxrrVtG780alTJ4iiiA4dOqBz5861lgvE9+zLb6qu+v74448YOnSoVzcWGDRoEAYNGoSXX34ZS5YswV//+lcsXboU06ZN83qeRERE1DDsQ4yIiCiIysvLsWLFClxzzTWYNGlStcf06dNRUlKCb775BoAUiFAqlfjpp588pvPuu+9Wm7ZerwdQ/UR/3LhxsNvteOeddzyGv/HGGxAEAWPHjgUATJgwAQqFAi+88EK1TBtXhtOoUaOg0Wjw1ltveTTV++ijj1BUVITx48f7sVYkcXFxGDFiBN577z2cO3eu2ue5ubm1juvKKqpcJ1EU8eabb1YrW9t6qmmao0ePxqpVq3DixAl5eE5ODpYsWYLLL78cRqOxzmlU1b17d4waNUp+VO4vDAC2bdvm0Q/YqVOnsGrVKowePVpeRqVS6bGcAPD2229Xy3DKz8/3eG8wGJCamgqz2QzAt/U9btw4bN++HTt27PD4vLYMKV9NnDgRSqUSzz//fLVlE0VRXpZAfM++/KZqc/PNN8Nut+PFF1+s9pnNZpPneeHChWrL4wo2u74HIiIiahrMECMiIgqib775BiUlJbjuuutq/HzQoEGIjY3F4sWLccsttyA8PBw33XQT3n77bQiCgE6dOuG7776T+yiqzBVcefjhh5Geng6lUonJkyfj2muvxciRI/H3v/8dJ06cQO/evbFu3TqsWrUKM2bMQKdOnQBIzeP+/ve/48UXX8QVV1yBiRMnQqvV4rfffkNSUhLmzJmD2NhYzJo1C88//zzGjBmD6667DhkZGXj33Xdx2WWXeWQ/+WPevHm4/PLL0atXL9xzzz3o2LEjcnJysG3bNpw+fRq///57jeN17doVnTp1wuOPP44zZ87AaDRi+fLl1fqYqms91eSll17C+vXrcfnll+Nvf/sbVCoV3nvvPZjNZvznP/9p0LLWpGfPnkhPT8fDDz8MrVYrB2mef/55ucw111yDzz77DOHh4ejevTu2bduGH3/8EdHR0R7T6t69O0aMGIH+/fsjKioKO3fuxFdffYXp06fLZbxd30888QQ+++wzjBkzBo888gj0ej3ef/99tGvXDn/88UeDl7tTp0546aWXMGvWLJw4cQITJkxAWFgYMjMzsXLlStx77714/PHHA/I9+/Kbqs3w4cNx3333Yc6cOdi7dy9Gjx4NtVqNo0eP4ssvv8Sbb76JSZMm4ZNPPsG7776LG264AZ06dUJJSQk++OADGI1GjBs3rsHrjYiIiHzQxHe1JCIiokquvfZaUafTiWVlZbWWufPOO0W1Wi3m5eWJoiiKubm54o033iiGhoaKkZGR4n333Sfu379fBCAuXLhQHs9ms4kPPfSQGBsbKwqCIFb+2y8pKREfffRRMSkpSVSr1WJaWpr4yiuviA6Ho9r8P/74Y7Fv376iVqsVIyMjxeHDh4vr16/3KPPOO++IXbt2FdVqtRgfHy8+8MAD4oULFzzKDB8+XOzRo0e16WdmZooAxFdeeaXG5T9+/Lh4xx13iAkJCaJarRaTk5PFa665Rvzqq6/kMps2bRIBiJs2bZKHHTx4UBw1apRoMBjEmJgY8Z577hF///13n9YTAHH27Nke9dm9e7eYnp4uGgwGMTQ0VBw5cqS4detWjzILFy4UAYi//fabx/Ca6lkbAOKDDz4ofv7552JaWpqo1WrFvn37Vhv3woUL4tSpU8WYmBjRYDCI6enp4uHDh8V27dqJU6ZMkcu99NJL4oABA8SIiAgxJCRE7Nq1q/jyyy+LFovF5/UtiqL4xx9/iMOHDxd1Op2YnJwsvvjii+JHH30kAhAzMzPrXLbZs2eLAMTc3Nw6yy1fvly8/PLLRb1eL+r1erFr167igw8+KGZkZMhlAvE9e/ubmjJliqjX62ut7/vvvy/2799fDAkJEcPCwsRevXqJTzzxhHj27FlRFKVt59ZbbxXbtm0rarVaMS4uTrzmmmvEnTt31rkeiIiIKPAEUaySt01ERERERERERHQRYx9iRERERERERETUqjAgRkRERERERERErQoDYkRERERERERE1KowIEZERERERERERK0KA2JERERERERERNSqMCBGREREREREREStiirYFWiObDYb9uzZg/j4eCgUjBkSERERERER0cXB4XAgJycHffv2hUrVesNCrXfJ67Bnzx4MGDAg2NUgIiIiIiIiImoUO3bswGWXXRbsagQNA2I1iI+PByBtHImJiUGuDRERERERERFRYJw7dw4DBgyQYx+tFQNiNXA1k0xMTESbNm2CXBsiIiIiIiIiosBq7V1EBXXpT/30E1Zcey3mJyXhVUHA0a+/rnecrM2b8Wm/fnhDq8WHqanYv2hRtTJ75s3D++3b4w2dDp8PHIhzO3YEvvJERERERERERNQiBTUgZi0rQ1zv3hg1b55X5QszM7Fi/HikjByJO/buRb8ZM7B22jRkrl0rlzm8bBk2z5yJwbNn4/bduxHXuze+Sk9H2fnzjbUYRERERERERETUggS1yWTHsWPRcexYr8v/vmABwjt0wMjXXgMARHfrhjM//4xdb7yBDunpAICdr7+OXvfcg15TpwIArl6wAH9+/z32f/wxBj71VOAXojGJDuD0/4CK/PrLRvcEojo3fp1aALvVCmtZGSCK0EZEQBCEYFepGofdjtKzZ2G+cAGCUglBqYRCqYSgUkGhVAIALKWlsJaUwGoyISQ6GobkZOiioupdHlEUAVGUngHYzWaU5+ejPC8P5qIiCIIAhUoFhUolzc/1UKuh1Gqh0moh1HKnEZVWC01YmM/LKzocMBcXQ6FUQm0weLUM1rIyWEtLIToc8jJVXT7R4QAcDqmM6+EsJ8/D+Vz5vSAIUGq10vLqdPLryvUSHQ7YrVY4bDaINhsElQrq0FC/tqeqdRIdDlhKSlBRWAi72ez+Dqp8J/J2oVRK72uZt+hwwFpeDkEQoAoJ8XubFx0OlOfnw5SbC9HhgEqng0qnAwQBDue6UKjVCI2LgzokxK95tBQOux3FJ0/CYbVCbTBAExYGhUoFh80mrwvXsyiK0roKCYFKp4OgUMgP1/bmy3xFh8P9G678XMtwVWgoVFqtPA1RFGG3WGApKYG1tBSWkhI4rFYoNBqotFqPZ6VWC6VG47F9uX5b1rIyWIqLpfFtNveyVHn2WE7XcIVC3n5UISFQajR1LrfNbIa1rExebwq1WlqXAdh/iw4HbGYz7GaztA1X3u85f2eCIEi/eYtFfjiczyHR0X7t9/zhsNsBQP4f8JXocEjbZOX9YQ37SHtFBcrz81GRnw9LaSm04eHQRUcjJCoKuuhoj+0JqL4PawwW5/bmsFql9V/pWRRFaMPDoY2IgC4iot7tyR8Ou93v9V6ZzWyGpaRE+v2VlEjbtVIp/cdqNAiJiUFITExA5lWZ3WqV56tQKqX9UaV9EtVNFEU4bDbp9282Q7TbERId7dW6E0UR1tJS2MxmqENDW906d9hsqLhwQT5uhUIBY0oKtOHhwa4a+cBusUChVjfL8yZfOOx26X/QeTyl1Gh8+s9wHQuXZWfDXFgItcEAbUSE9B8UHh7wfTcFX4vqQ+zctm1oN2qUx7D26enYNGMGAOmHnLNrFwbOmiV/LigUaDtqFM5u21brdM1mM8xms/y+pKQksBX319GVwLeTvCur1gP3ZwMaQ8Cr4bDbkbNrFwoyMgBIB8QKtRqhsbEITUhASFQUzMXFqHAGXcorPVceZikuhtVkgs1kkk+sFWo1lM5n107YXFwMc2EhrKWlUOv10BiN0BiN7oOLKieEDotFOnErLYW1rAwOq1Wuu1KjgT4xEfrERHlnpouIQFhKCsI7dICxfXtEde2KkKgoeRxLaSnO790LU06OdEDuDLCV5+VJy1NlmRQqlXxSKZ9cajSA8+DK4wTaZkN5Xh5KsrKkE0wfKTUaqPV66URWrYbDZpPWaXm5x3I3JrXBAENSkvzQJyUhNDYWDqsVVpMJ1tJSlJ47h9IzZ1B69iwqCgpgKSmRA1qCQgGN0Qh9fDwiUlMRmZYGtcGAC0eP4sKRIyg+eRLmoiKIzhPDpqTUaOSDYld9q36ujYyELioKushI6CIjodbr5W3WUlIC0Xky6rDbYTOZ5O0SoiifgNvM5hqnXy9BgMIVHHMGyhwWC2wVFR5l1KGhUIWGQq3XQ63XS0GtSkHSygFFu9kMW0UFbCYTKgoKpKCLF9R6PQxJSYjp2RMxl1yC6O7dERobK62bqCjoExKgVKurjWc1maR9Q0GBdPDlDMZW5Ofj/N69yP39dxQePw5zURHMRUWwlZdDazRWW++6qCiEd+iA6G7dENWtG3QRETXW01JSIm1bR4/ClJsrTSsiAhqDAdbycjnwasrNhen8eZiys1Fw+DDyDx2Crbzc12+oZs4AUeWHSqeDOiwMmrAwKWB84QIqLlyAvdJ/kS8UajW0RqMcbPV5/yII0vbvcDTKvkQXFQVj27YIS0mBoFRK672sDBWug86ioupVUiqhMRigCgmRgzkApP14VBS0ERHSNlxRIQW8Kipgq6iQt2m787XdYql/8RWKOrd91/amT0iANjwcGqMRIbGxCG/fHuHt2yOsbVuExsZK/1WCAIfNBlNuLsrOncOFI0dQkJGBwuPHYTWZIDr/C6wmkxxwdD1by8qg1GgQkZaG6G7dEBofD9P58yg7dw4VBQVSgNF5wg04A6jOk1HT+fMoz8vz+jdcF7VeD11kJBw2W437MIVaLT+7Amy2igop8Or6b9do3P/xGo3023OuO9HhgK28HLbycvnEw1pa6nX9VCEh0n+6M0Dmeh2Zlob4/v0R378/dFFRUr3Ky1Fy5gwuZGSgICMDxSdPSusqNxemvDxYS0pgKS2V6q5SyfvNyg+H3e6xXbmeXUE0QaWCaLfLwef6CAoFQuPiEBofD31CAkLj4wGHA6bcXJTn5cFqMknTdW6XtvJyWE0m2Csq5KCm/JtwPte1ncvbjTNIpg4N9fgdig4HIjp1QlSXLojo1AmAdExtq6iAKScHpefOoezcOSi1WoRERyMkJgZqg0HeHsyFhe79bKUWGQqlEpqwMGicJ5Jao1F6bTRCbTDI/1UApOMk5zGX/NoZKLeaTLCWlcHmfLZbLPIFLVVoKAyJiTC0aQNDYiJMeXko+vNPFGVmwlJcLAe4BIUCSmeQXhAEeZ8hf69mc7XfjlKjgbFdO4R36AC186KIUq2GpbRUPh6sKChAeX5+te9dFRoqL59Kq5WWy2KBw2aD2mCQttvISMT06IGkIUOQNGQIjCkpXm3/FYWFOPLVVzj544/ytmwuKoI+IQHhHTsivH17KNRqaf9nNqM8Lw+lZ8+i7Nw5OJyBvpCYGCjUapQ7tzlLSYm8fuRgakgI1Ho9wtu3R2TnzohITYW9okI+xrtw5AjyDhzAhYyMGrc/TVgYwtq2lff9ofHxsJaWouLCBfk/r+LCBZgLC+Gw2apdcIEgQBsejsjUVESkpSGsTRtpnYaEQKHRyPtSu8UCS3ExzMXFsJaWQhsRIR2fJiZCqdVKx1uiCEtJCcrz8mDKy4NKp5OOX3r1gj4uTroIZDKh5NQpnNqyBae3bEHOrl2wmc3ScZ0oIjQ2FmEpKQhr0wYhMTHyeYUqNFTe3wmCIG/DgiBA4zzvUIWEoKKwEBUFBTA7L4Z6/MbOnkXZ2bMod35uLiyUAuixsdK+IjZWfq3S6eRjFktxsfyfWPkREh2NhAEDEBoTU+/2lL1zJzY9+ijO/PyzvL9QurZZ5/mLNjwcIdHR0EVFSRdJK188cl68gCi6LwZWvjBY6fdd+TcuOhzQVAo2KSodM7rOE0Lj4yGKIkw5OTDl5MBqMnkEuCylpfKFP1dA1uOY2EmhUsm/SbVeX+21aLejLDsbZdnZMJ0/X+c5iNpggDY83H38UVEhLbvzuFwVEgJdRAQ04eEwtmuHzhMnouM110BjCPw5OgWGIIr+nJUF3quCgOtXrkTahAm1lvmoc2f0nDrVI+D15+rVWDF+PB4xmWC+cAELkpPxl61bkTR4sFxmyxNP4NSWLbjt119rnO5zzz2H559/vtrwU6dOBbdT/V/nAD8/DYTGAxGptZc7t03KJrv7GBDRKWCz//OHH7Dvgw+QtWkTzIWFAZtucxQaH4+orl1RnpuLgsOHA3JCUR+FWg1dVJR0UGuzVbuioQkLg8ZohCokRD5gaci8QqKjoXUGDVzBOXm+zj8z18FTs+EMALleS0/O5yp/uPKjUpaL84U8OdcwVyAo2Muq1Gig1OkgOr971/cfbLrISAhKpXyiAEA+8fE2wABBgD4hAYbkZNjNZlQ4TxxqOlAJBKVGI2UiOQMF9koBEr+nqdVCFRIiBTpr+F5cJ/4A5BPV5sYVeFOq1bA7TzAdFovXQVmFSiVlxzkDHxDFahclXNlrHsMcjkb7rpuKHMD2YTlcFy0qCgv9C3o3EUGp9AhsWIqK5JP7oG7HzsCsHExzPkMUYS4uhqW4OHh185EqJASasDCo9Xo50Gw3m1FeUNCo24Yc2PbjohsFlyYsDIbkZIS1aQNdVJR8gdUVVFZqNCjKzMSf333n3f9wE1OFhkJjMEgZYwUFwa6O15RabdCPBxuDoFQiZcQIdJ40CV1uvtnj4j8AlOXk4H9PP439Cxc26/+rYAmJiYEuMlK6eFdYCJvJ5Pe0VCEh6HTddRj41FOI69PHq3HO792Lspwc6XzQ+XC1WKiaxe2v06dPIyUlJfgxjyBrURlijWXWrFmYOXOm/P7MmTPo3r17EGvkZHZeMe/2F2DE67WXm58AmHIAi/dXV+tz9OuvsWriRHkHqQ0PR1y/flA4m9LZzWYpmyInBxUXLkhXy50H17roaIQ6n0NiYqQrCtHR0IaHS1dbnVcma7oaKDoc0hXk8HApg8N1Bb24WDpIrxwUcV5FUjhPQNR6PTQGg/xaFEUp2n/2rJyBYHYe9JdkZaHoxAkU/fknSk6dkq88uBiSk+UrbEqNBqrQUHk5XMsUEh0NTXg4xEop9pWfBYWi+tV0lQraiAgp0yAx0ae0W5vZjLLsbCkbzGKB3XU123W1TK2u3qQJ0km7JizM6xToumLk1rIylJ07h9KzZ92PM2dQnpcHpVYrXw3Vx8fDkJwMQ3KydBXNeXXYYbdLV/GKiqSri86sMGtZGSLT0hDZuTPCO3ZESFQUNOHhfjdR9JYoinBYrR5ZJa4MyKpNGB0Wi3Q1s6DA49laViY35dEajdIVQlcWkPPgUK3XAwqFvJ2rdDpoIyLkLI+qdXKdPDnsdilAarfLmSCuYa7gmUKjkbcBuJqaOq+iu66o2yoqPJvuVXqtrNTUz9WUp6bMrsr1sxQXw5Sbi+KTJ5G3bx9y//gDBRkZHuvGYbWizJlVUJUrQKvQaOR1ojYYEHvJJYjt3RvR3bpJGUDh4VDpdFIGXqWryRUXLqA8Lw+FR48i/9AhlJ45I1+pRA2ZRiGxsYjq3BmhCQmwOpuqWktLpavfzu8n1HUVNi4OEampiOnRA+EdO0KhVEIURdgqKqT17fw9Vw6+Vt2W5MyNSlkcHg+7HbaKCrlJFQQBushIaCMjoTEYqjVB9GiKWKlJomv+tvJyOVggKBTugyZnBkdt36Not8PmDJDJ+y1nBqIrU6ZqU2JfiKIoZ3iUnj2L4qwslJ4+LWcxqvV6aCMjoU9IgD4+XsqWq5S56Mres5pM8m9KFEVYiorkbU2hUkHpvJrtaqKp1GrdmQ6VPlPqdFAolfLFh8qZu6LNJp9wuoIwcvP1khLp/yIzU87EMBcVwZSTg6ITJ1B84gSKT52CzWRyb4eQgvYhsbGITE1FZJcuiExLg8ZolJtByxnQYWFS5ozzIoiltBQFhw+j4PBhlOflQR8fD31iIkKio2G3WOSsYFdzVUGphC4yEqFxcQiJjYU6NLTGrESPYXU0v5YzvgsKoNRoPPZhos0Gu9XqzsxwZsWoncFoV7Piqk0e7WazvO+3FBdDUKnk70YXFQVDYiL0CQn1Nql3/YdUODMozIWF0vdRWIjyvDzk7d+P7J07UXDokBzYc30PUV26IKpLF4R36iRlHsTFISQmRj7RUIWESJlYzn1n5f2oQqmstk0pnc3iXPtn129PExZW52/PlSnuykZwPQSlUsoCiYmR1rermbQr69e5jmv7XtV6PTTO4LdrPq7MMlc2XuXXDpvNfbzkcKDw2DEpg+7ECQhKpfxbCI2Lgz4pCfqEBDisVilLPjdXzkx32GxQhYZK/+FpaTAkJ8vfocNmk/dNru9ezv41meR1LAgCBGf2VeXWA0qt1p3N4Tx+VOv1UGo0cnaNtbQUpWfPouTUKZSeO4eQmBhEdOyI8A4doIuMlLtEkDMZy8vlLgEq7ztc5VxdRwBAyZkzKPrzTxSfPAmryeT+v9LrpeNB50Pnamqs07mXy9kiwpXZ5+qWQqFUwlJaCnNhIUy5ucjZtQtnt27F+b17YSkpkX/79Ynp2RNdbr4ZEamp0nGW0YjSc+fk+ooOh7w8uqgoOWNKoVTKLR0cVitCnNuc1miUMx+tzm3EVl4u7f/+/FPOclWHhsrTCu/YETE9eiC6Rw8YU1I8tnlXtlVxVpb8bHKe3LuyvLXOjG9tRASUlS66VH4uz8tD4bFjuHD0qHwM7NrXVs5UdWWgqvR6VBQUyMf+dmemFgQBGoNBPk+xlpQgd98+FB4/7hEMU+l0SBw8GCnDhyN56FBowsPl5TLl5KDk1CmUnD4tZ3q5Mtnlfb8oyucOosMh759s5eVSRqszu1ne5py/MUNyMvSJiQiNiZEzKh1Wq3Su5cwGK8/Nhcn523NljWmMRmkeFy6gvKBA+m8sKEDJ6dMoOHQIWRs2IGvDBmydPRujP/gAqdddB1EUcWjJEmx86CFUXLgAAOh+++0Y8txzUOv18vK4lkNQKKR9rPOiiaBQVA/WOo+Lajrmca0T+VHpOMrq/C24Woa4jgHMhYVSdnRODgRBkLJpnccJrmxLV/KAK0gkP+v1Un2cWbZ2s9kzy7SG14JCIR2LODN2Q+Piqh0L261W+fs0FxVJWWfO7xGCIP8v2srL5XLnduxAxrJlKDx+HBnLliFj2TJ0vukmDJk9G9HdutXYtPr83r34adYsnFizptbfvzY8HHF9+yK+f38kDR6M1Ouvr/U/h+rXojLElg4bhrh+/XDl3LnysH0LF2LTjBl4uKgIdosFc0NDcd1XX3lMZ/WUKTAXFuKGVau8qkuziZauvx/44z1g8HPAkNm1l/soDSg8Bkz+GUge2uDZnt2+HV+MHAlbRQW6Tp6M/jNmIL5//zpPrFpye3NLSQnyDx1CweHD0EVGIv7SS2FITAx2tYhaLNHhgCkvDyWnTqHs7FkodTp3QDkqyqt+5Hxhdp5kuU52AMgnAq4mVUSNzWoyyU2PGqufKPKO1XnhyHWxqCUfo1Dr4AoglZ45g5LTp2EuLq6xWZo6NBRpN9yA2EsuCXaVLwqWsjKU5+a6u5hoQD+szU3h8eM4snw59n/8sdztTc+pU2EuLMTRlSsBAHF9+2LUvHkeLasosERRRM6uXdj5+us4vHSpRzae68JsaGws9AkJUKhUOLFuHQApSz26e3dYnE1CraWltXblEd2jB0a+/jrajx7tU92aTcwjyFpUKDFx8GBkrl7tMezk+vXyj1ip0SC+f39kbdggB8REhwNZGzag7/TpTV3dhnNliOki6i6ndrZJtjS877MLx45h5bXXwlZRgQ7jxmHcZ5/VG3Fu6X8cmrAwJA4YgMQBA4JdFaKLgqBQQB8XB31cHNC/f6PPT2s0Qms0Nvp8iOqiDg2Fum3bYFeDIGWs4SK/8QddXNShoXIWIzUdjV4PjV4f7Go0iohOnTDgiSfQ7+GH8fMzz2Dna69JzSMhZeoPfvZZDHjyyTpbBVDDCYKAhEsvxTVLlmDgrFn45dlncWzVKkAU3X1p5uYi/+BBeZyukydj6IsvIjLVs8skV7+exSdPImfXLuTs2oXDS5ci/8ABfJWejg7jxmHsokUIjY1t6sVs0YIaELOUlqLw2DH5fVFmJs7v3St3vvvTrFkoPXMG4z79FADQ+/77seedd7DliSfQ8667kLVxIzK++AITv/9ensalM2fihylTEH/ppUgcMAC75s6FtawMPZ13nWxRzIXSs6aeu7S4OtK3NqzJpK2iAivGjUN5Xh7i+/fHtcuWMf2SiIiIiIioBVLpdBjxyivodO21WH/ffdAYjRj9/vuI69072FVrdWJ79cKElSvd3WY4765sys1FWXY2KvLz0WbYsFr7GVOoVNA5Wz7E9e6NXnfdhctfegnbXngBe955B5mrV2PjI4/gmiVLmnbBWrigRjuyd+7EFyNHyu83O/vx6jFlCsYuWoSyc+dQnJUlfx7RoQMmfv89Nj/6KHa/+SYMbdog/cMP0SE9XS7T9ZZbYMrNxS/PPgtTdjZi+/TBpDVroI+Pb7oFCxRXQEwbUXc5jfN28A3sQ+z4t9/iwtGjCI2Px8TvvuPdMIiIiIiIiFq4lGHDcNehQ8GuBgFyf5QIQCaXLjISI994A11uuQVLBg/G4f/+F5c9/jji+/ULQE1bh6AGxNqOGIHH6+jCbOyiRTWOc8eePXVOt9/06ejXEptIVuVrk8kGZogd/OwzAFL7cn1CQoOmRURERERERESNK2nQIHT7619xaPFi/PTUU7jJ2RcZ1a/6rQ2o+fC2yWQA+hAz5eYi84cfAADdb7vN7+kQERERERERUdMZ+uKLUGo0OLl+PU6sXx/s6rQYDIg1Z95miAWgD7HDy5bBYbMhrm9fxPTo4fd0iIiIiIiIiKjpRHTogD5/+xsA4Kcnn4TocAS5Ri0DA2LNld0K2EzS63o71W94H2KHPv8cAND99tv9ngYRERERERERNb2Bf/87NEYjzu/Zg8PLlgW7Oi0CA2LNlSs7DAC0xrrLNrDJZMGRIzj3668QFAp0u/VWv6ZBRERERERERMERGhODfo88AgA4umJFkGvTMjAg1ly5+g9TGwBFPfc+aGCn+ged2WHtRo9mZ/pERERERERELVDiZZcBAIr+/DPINWkZGBBrrlwBMW1E/WUb0IeYKIpyc8kebC5JRERERERE1CIZO3QAABRlZga5Ji0DA2LNlbcd6gMN6kMsa+NGFGVmQm0wIHXCBJ/HJyIiIiIiIqLgC2/fHgBQceECKgoLg1qXloABsebKlSFWX4f6QIP6ENv+8ssAgB5TpkAdGurz+EREREREREQUfBqDAaFxcQCYJeYNBsSaK18yxPzsQ+z0zz/j1KZNUKjVGPDkk77Vj4iIiIiIiIialXA2m/QaA2J1cdiCN29fMsRcTSZ9DIhtf/FFAEDPqVNhTEnxaVwiIiIiIiIial4YEPMeA2J1sZUFb96uDDFfOtWvo8nkhWPHsO7ee3Fi3ToAwLlff8WJdeugUKkwcNasBlaWiIiIiIiIiIJNDojxTpP1UgW7As2ZYLcEb+byXSZ96EPMWgaIDkDwjHOKoog1d96JM7/8gj8++ACdrr0W5uJiAED3O+6QO94jIiIiIiIiopYrvGNHAMwQ8wYDYnWxVwRv3nJALKL+sq4MMQCwmjzfA8hcswZnfvkFCrUaEEUc//ZbAICgUDA7jIiIiIiIiOgiwSaT3mOTyToIdnPwZu5Lp/qqUACC9LpKP2Kiw4Gf//53AEC/hx/GlD/+QPv0dADAJffei8jU1ABVmIiIiIiIiIiCqXJATHQ4glyb5o0ZYnUJakCsUHr2plN9QZCywiwl0kOfIH90ZMUKnN+zB2qDAQOeegqhMTG48YcfUHL6NMKSkxun7kRERERERETU5MJSUiAoFLCbzSjLzoYhKSnYVWq2mCFWhxaTIQa4+xGzuDPEHHY7fnn2WQBA/0cfRWhMDABAEAQYnT8SIiIiIiIiIro4KNVqhLVtC4DNJuvDiEhdWkqGGABowqTnSk0mDy1ejIJDh6CLjMRljz0W2PoRERERERERUbPjajZZyDtN1okBsboEMyBmcWaIedOpPlApQ6xEHrTvww8BAJc+/ji04V4G1oiIiIiIiIioxWLH+t5hH2J1EIJ1l0lRdDeZ1HqbIeYMiDkzxKwmE85u3w4A6HLTTYGuIRERERERERE1QxEdOwIIYEDs1znA0RVAwWFAFQIkDQGG/RuI6uIus2wEcHqL53iX3AdcvcD9vjgL+PEB4NQmKamnxxTgijmAIjihKQbE6hKsDDFrKSA67wbhc4aYFBA7u3UrHFYrDMnJiOCdJImIiIiIiIhaBTlDLFBNJk9vAfo8CCRcBjhswM9PA1+NBqYeBNR6d7le9wBDX3C/V4W6XzvswMrxQGgCcOtWoOwc8MMdgEINXPHPwNTTRwyI1SFonepXFErPSg2g0nk3TpU+xLI2bQIAtB05EoIgBLiCRERERERERNQcBbzJ5I1rPN+PWQTMjwNydgFthrmHq0MBfULN0zi5Dsg/CEz6EdDHA+gDDH0R+OlJYMhzUvyjibEPsboEKyBWuUN9b4NZVfoQO+UMiKWMHBngyhERERERERFRc+UKiJWcPg27xVJruZKSEhQXF8sPs9nLGIiriyddlOfwQ4uBeTHAop7A/2YBVpP7s7PbgJhezmCYU/t0wFIM5B3wbr4BxoBYHYKWISZvXBHej1OpDzFLaSmyf/sNAANiRERERERERK1JaHw8VCEhgCii+OTJWst1794d4eHh8mPOnDn1T1x0AJtnAElDgZie7uHd/gKM+xy4eRMwcBZw8DNg9W3uz8uygdB4z2m53puyvV+4AGKTybo0hwwxb7maTFpKcebnn+Gw2WBs1w4RzsgwEREREREREV38BEFAeIcOyD94EEWZmYhMS/P4XJW5CgBw8OBBJCcny8O1Wm39E9/wIJC3H5j8s+fwS+51v47tBegTgS+vAgqPAxGd/F6WxsQMsToELUPM4rrDZIT347iaTFpL5P7DmB1GRERERERE1PrU2o+Yw47Qn6YDAMLCwmA0GuVHvQGxDdOB499JWWBhbeoumzhQei48Jj3rEwBTjmcZ1/vQWvoda2R+BcSWDh+OA59+Cmt5eUAqsWfePLzfvj3e0Onw+cCBOLdjR+3zHjECrwpCtcfy8ePlMj/ceWe1z78aM8b3itkr/FmchnN1qu9Lk8lKd5k8ValDfSIiIiIiIiJqXcI7dgQAFFa906Sl2PeJiaIUDDu2Erh5IxDuRUu083ulZ32i9Jw0GMjbB5jOu8ucXA9ojEB0d9/rFAB+NZmM69sXmx9/HBseeghdbr4Zve6+G0mDBvlVgcPLlmHzzJkYtWABEgcOxO65c/FVejruysiAPi6uWvnrV6yAo1KncOX5+fikd290uekmj3Ltx4zB2IUL5fdKb1L/qrLX3vlco3JliPnUZFIKiDnKC5GzaxcAZogRERERERERtUa1Zoi5umjyxYYHgcNLgOtXSd01lTn7/NKEA+oQqVnkoSVAx3GALhrI/QPY/Kh0B8rYS6Sy7UZLga/VtwPD/iP1G/bzP4A+DwIqP+I1AeBXQOzKuXMx4tVXceybb3Dgk0+wdNgwRKSmotddd6H77bdDHx9f/0Scdr7+Onrdcw96TZ0KALh6wQL8+f332P/xxxj41FPVyodEed7F4PDSpVCHhqJzlYCYSquFPqFhaXeCPTAZcD5zZYj50mTS2YeYOf8cRIcDEZ06wZiSEvCqEREREREREVHzFt6+PQBU71TfdRM/X/w+X3r+YoTn8PSFQM87AYUGyPoR2D0XsJYBYSlA2o3AoH+4yyqUwA3fAT8+APx3MKDWA92nAENf8L0+AeJ3p/oKlQqdJ05E54kTUXb+PP54/3388swz+N/TT6PjuHHo9/DDaHvllXVOw26xIGfXLgycNUseJigUaDtqFM5u2+ZVPfZ99BG6Tp4MjV7vMfzU5s2YFxcHXWQk2l55JS5/6SWEREfXOA2z2exxe9GSkhJXBb2qQ8C5IrZaHzLEnE0m7cW5AJgdRkRERERERNRa6ZzJRObCQs8P/MkQe0ys+3NjCnDLlvqnY2wHTFzt+/wbSYM71T+3Ywe2zp6Nna+9hpC4OAycNQshMTFYcc012Pz443WOW56XB9Fur5ZRpo+PR1l2/bfdPLdjB/L270evadM8hncYMwZjP/0UN2/YgGH//jdObdmC5WPHwmG31zidOXPmeNxqtHt3qf2qEKw+xMz+d6ovmqX2wOw/jIiIiIiIiKh10kZEAADMRVUywvzJELtI+ZUhVnb+PA5+9hn2L1yIwqNH0fHaa3HNf/+L9unpEAQBANDjzjuxfMwYjHj11YBWuLJ9H32EmF69kDhggMfwrpMny69je/VC7CWX4MNOnXBq82a0u+qqatOZNWsWZs6cKb8/c+aMFBQL1l0m/ckQc/YhpoBU54TLLgtwpYiIiIiIiIioJdCGS/GEgGSIXaT8Coi916YNIjp1Qs+77kLPO+9EaGxstTKxl1xSb1AmJCYGglKJshzPW2+W5eTU2/+XpawMh5cuxdAX6m9vGtGxI0JiYlB47FiNATGtVutxe9HiYinLSrAFKSBm8SNDzNmHmEYj3UDAdUcJIiIiIiIiImpdXBlidrMZtooKqHQ66QMGxGR+BcRu3rABba64os4yWqMRt2zaVGcZpUaD+P79kbVhA9ImTAAAiA4HsjZsQN/p0+sc98iXX8JuNqP7bbfVW9+S06dRnp8PfWJivWU9BCtDzNWpvi7C+3GcTSbVGiC6a2colMqAV4uIiIiIiIiImj+t0QgIAiCKMBcVVQqIscmki199iP0yezYqqqbdATAXF2NZPR3pV3XpzJn444MPsP+TT5B/6BDWP/AArGVl6Om86+TqO+7AT5U63XfZ99FHSJ0woVpH+ZbSUmz+v//D2e3bUXTiBE5u2ICvr78ekampaJ+e7lPdBEeQM8Q0vneqDwCx3dMCXCEiIiIiIiIiaikEhUIKiqFKs0lmiMn8yhA7vWUL7Jbqd2C0VVTgzP/+59O0ut5yC0y5ufjl2Wdhys5GbJ8+mLRmjdzRfnFWFgSFZ9yuICMDZ37+GZPWras2PUGpRN4ff+DAJ5/AXFgIQ1IS2o8ejaEvvghVpWaRXgl2H2K+ZIipdHCIAhSCiJhuHRqjVkRERERERETUQmjCw2EuKvLsWJ8ZYjKfAmK5f/wBABBFEfkHD8JU6U6QDrsdJ9asgSE52edK9Js+Hf1qaSI5efPmasOiunTB42LNt/1Uh4Rg0tq1PtehRsEIiNnMgM15d0tfMsQEATarAhqNHdGpKY1TNyIiIiIiIiJqEXQRESjJymKGWC18Coh90qcPBEGAIAj4ooamkaqQEFz19tsBq1ywCcEIiLmaS0IAtEavRxNFEZYKBzQaILKdj32lEREREREREdFFxdWxvkeXV8wQk/kUELsnMxMQRXzQsSNu27EDIZXuLqnUaBAaF3dxdeZur2j6ebo61NeEAYL3XbyVnjkDS4UIgxEwJkY1Tt2IiIiIiIiIqEXQhkutziweTSYLg1OZZsingFh4u3YAgMcdjkapTHMj2Kv3k9boXBli2gifRss/dAg6Z0KbUgxS32dERERERERE1CzUnCFWWFPRVsnrgNixb75Bh7FjoVSrceybb+osm3rddQ2uWLMQzAwxXzrUB5B/8CBiXXEwS0kga0RERERERERELYwrIGZmk8kaeR0Q+3rCBDyQnQ19XBy+njCh1nKCIOAxuz0QdQs6wWEFRIdPTRcbzJUh5kuH+gAKDh1ChNU1jdLA1omIiIiIiIiIWhRXk0n5LpOi2LIyxFZN9H2cqxcAoXFeFfU6IFa5mWRraTIJQLrrozqk6ebnyhDztcnkwYNo47q5pJUBMSIiIiIiIqLWrFqGmK0ccNiCVh+fHfsa6HIzoPIyJnN4iZQgFOiAWKtlr2jagJjch5hvGWL5Bw/CEu+aBptMEhEREREREbVmuqoBMWd2mCgIAMRgVMl3V77ldYALR77yadJ+tQXc8PDD2P3WW9WG737nHWycMcOfSTZftibuR8yVvuhDhpgpNxfl+fmwuu4BwAwxIiIiIiIiolZNU7XJpLP/MFHtWwJO0Ny8CdBFeV9+4g+AIdnr4n4FxI4uX46koUOrDU8eMgRHvvItItfsNXXH+q4O7rRGr0fJP3gQAKAIiZAGsA8xIiIiIiIiolattgwxaMKCUR3fpQwHFD40bGxzOaDSel3cr4BYeX6+3DlbZRqjEeV5ef5Msvlq6gwxVzBL7f0Gmn/okDRKZII0gBliRERERERERK1atT7EXE0mNd4n4DQbX1wJbH2++vCKC9JnfvArIBaRmorMNWuqDc/84QeEd+zoV0WaraYOiFnLpGe13utRXBli2tg20gD2IUZERERERETUqlW7y6TcZLIFBsRObQb2vgN8PcEdNwEAuwU4vcWvSfrVqf6lM2diw/TpKM/NRdsrpUjcyQ0bsPO113Dl3Ll+VaTZauomkzbnF6sxeD2KKyAWktQeKAMzxIiIiIiIiIhaOVeGmKWkBA6bDYqWnCEGAJN+BH68D1gyCJjwLRDevkGT8ysg1uuuu2A3m7H95Zex7cUXAQDh7dvj6vnz0eOOOxpUoWYnaE0m688Qc9hsOP7tt8jZtQsAoE9JBQ6DfYgRERERERERtXKVu7oyFxcjxJUhpm0hnepXZUgEbt4CrJ0KLL4MuPZLIKqb35PzKyAGAH0eeAB9HngAptxcqEJCoDF4n9HUojR1hpgXTSZLz57Fgc8+w+/z56P45EkAgC4yEmGduksBMSubTBIRERERERG1ZkqNBqqQENjKy2EuKkKIK0PMhz7Lmw1BkJ5VWmD8EmD7S8DyMcBlT/o9Sb8DYi6hsbENnUTz1uR9iLkyxDwDjKIo4tjXX+P3997DyfXrITocAICQ6Ghcct996PO3v0EtnJIKM0OMiIiIiIiIqNXTRkRIAbHCwkp3mWyBTSZF0fP9oH9I2WFrpvg9Sa8DYp/264ebN2yALjISn/bt647O1eCO3bv9rlCz00w61c/+7TesmjhRfp88dCh63n03uk6eDHVIiDQw74JzGgyIEREREREREbV22ogIlJ075wyIOZtMalpghtg9mUBIjOewzjcCUV2AnF1+TdLrgFjq9ddDqdVKrydM8GtmLVLQmkx6ZogVZ2UBAKK6dMHE779HRKdO1cd1bdQMiBERERERERG1eh53mnQUAmihd5k0tqt5eExP6eEHrwNiQ2bPrvH1RS9oTSY9M8QsztukRqSm1hwMA9xBNFsF4LABiga3iCUiIiIiIiKiFsp1p0lzYSGgdWWItaCA2KqJ9ZcBgOtX+DzpBkVM7BYLTOfPy/1ZuRjbtm3IZJuXpswQs1sBu0V6XSUgVlFYCMC9MdeoclaZpRTQ1VGWiIiIiIiIiC5qusoBsYhCAC0sIFb1jpiHlwAdr3W3kGsAvwJiBUeOYO3dd+Ps1q0ew0VRhCAIeMxub3DFmo2mzBBzNZcEAI1nk0mzM0Os8m1Tq1FqpKwwh03KNGNAjIiIiIiIiKjV8mgyGeJnhtivc4CjK4CCw4AqBEgaAgz7t9R/l4utAtj8GJCxFLCbgfbpwFXvAvp4d5niLODHB4BTm6SEnh5TgCvm1N26bcxCz/dHvgKG/QeI6OjbMtTAr4DYmqlToVCpcMN338GQmFhnB/stXjACYgqVFNyqxOJNQEwQpChpxQXAUtJYtSQiIiIiIiKiFsCjyWRkIQA/OtU/vQXo8yCQcJmUgPPz08BXo4GpB92t2zY/Cvz5PXDtl1JW14bpwDcTgVt/kT532IGV44HQBODWrUDZOeCHOwCFGrjinwFZVl/5FRA7v3cvbt+1C9Fduwa6Ps1PUzaZrKVDfcCdIaapKyDmGpcBMSIiIiIiIqJWzx0QK3DHHDT1xBWqunGN5/sxi4D5cdLdHdsMk+5eue8jYPwSoO2VUpn0hcCibsDZ7UDSIODkOiD/IDDpR2fWWB9g6IvAT08CQ56rlhTUFBT+jBTdvTvK8/ICXZfmqUkzxGruUB/wsskkABjaSM8ZywJZMyIiIiIiIiJqYVwBMUeZO4YjqqUMsZKSEhQXF8sPs9ns3UTNUnwCuijpOWcX4LACbUe5y0R3BcLaAue2Se/PbgNienk2oWyfDliKgbwD/ixag/kVEBv+73/jpyeeQNbmzSjPz4e5uNjjcVEJSoZYAwJig5+Rnne/CeT+EcjaEREREREREVEL4oohOEwF0gC1Xu6zq3v37ggPD5cfc+bMqX+CogPYPANIGgrE9JSGlWVLGV5V+zHXx0ufucqExnt+7npvyq59fse+8XyIDiBrQ/XhfvCryeQXo6So35dXXeUx3N9O9ffMm4ffXnkFZdnZiO3dG1e9/TYSBwyosez+RYuwZupUj2FKrRaPVrgDV6Io4pfZs7Hvgw9gLixE0tChuHr+fESmpflULwBByhCr3mTSqz7EAKDDWCDtRuDocqmzusn/AwS/4p5ERERERERE1IK5MsRQfsE1QP7s4MGDSE5OdpfVauuf4IYHgbz9wOSfA1fJuqyaUH3Y+vs83wsCMNP3mzv6FRC7ZdMmf0ar0eFly7B55kyMWrAAiQMHYvfcufgqPR13ZWRAHxdX4zgaoxF3Z2S4B1Tp1H/Hf/6DPW+9hbGffILwDh3w8zPP4Kv0dEw9eBAqnc63CgajU/2GZIgBwIg3gBNrgLNbgf2LgF53BbCSRERERERERNQS6JwBMcHqbM2ndccUwsLCYDT6cMfJDdOB498Bk38Cwtq4h+sTALsFqCj0zBIry5E+c5XJ3uE5PVOO9ByaUPs8H3N4Xz8f+RUQSxk+PGAV2Pn66+h1zz3o5cz6unrBAvz5/ffY//HHGPjUUzWOIwgC9Ak1rzBRFLF77lwM+sc/kHr99QCAcZ9+infj43Hs66/RdfJk3yrYlE0mLc4MMU0DOtUHAGMKMOR5YMvjwE9PSG154/tJ7XVVPgYEiYiIiIiIiKhFcsUQBKvzxnuVMsS8JorAxoeAYyuBmzcD4R08P4/vL90tMmsD0PlGaVhBBlCSBSQOlt4nDQZ+fRkwnQdCnclPJ9cDGiMQ3d33OgWAXwExAKgoLMS+jz5CwaFDAIDoHj3Q6667vMtgcrJbLMjZtQsDZ82ShwkKBdqOGoWz27bVOp6ltBTvtWsHOByI69cPV/zzn4jp0QMAUJSZibLsbLQb5e7MTRsejsSBA3F227YaA2Jms9mj87iSkkp3aGwGGWKiKPqWIQYAfR8GDnwC5O0DfrzfPdzVfFKpBQY+DQz6R0NrTURERERERETNkCtDTCE64w1aH+8wCUjNJA8vAa5fBWjC3P2CacIBdYg0zV53A5tnSh3ta43AhoekYFjSIKlsu9FS4Gv17cCw/0j9hv38D6DPg4Cqlqaax76RuoVSqr2r55+rgZSRUp284FfnUtk7d+LDTp2w6403UFFQgIqCAux6/XV82KkTcnbv9no65Xl5EO126OM9O1bTx8ejLLvmTtWiunTBmI8/xg2rVmHc559DdDiwZMgQlJw+DQDyeKFVphlaxzTnzJnj0ZFc9+6VopPNoFN9W0UFHFYrAB8CYko1MGkdMGCWtOHpoqXhokN62MqBX54B9r4bqNoTERERERERUTPi6kNMqxFdA3yfyO/zpTtLfjECWJDofmQsc5cZ8QbQ8Rrg2xuBpcOkJpLXr3B/rlACN3wnPf93MLD6NqD7HcDQF2qf7zc3AOZC7+v5/WSg7JzXxf3KENv06KPodN11SP/gAyhU0iQcNhvWTpuGTTNmYPJPP/kzWa8kDR6MpMGD3e+HDMHCbt3w+3vv4fIXX/RrmrNmzcLMmTPl92fOnHEHxZpBp/quDvUhCNAYqjenrJU+Abjin9JrUQTK8wHRJr3//T1g23NS2mNYCtDp2obVnYiIiIiIiIiaFVVICBQqFbQhzliAPwGxx0QvZqQDRs2THrUxtgMmrvZ+vqIIrLlTauHmDR/jN34FxHJ27sToSsEwAFCoVLjsiSfw+aWXej2dkJgYCEolynJyPIaX5eTU2kdYVUq1GnF9+6Lw2DEAkMcz5eTAkJgolzPl5CCuT58ap6HVaj3uplBcXOz+sBlkiMnNJY1GCAo/7xgpCEBojPv94GeBklPA/o+A7yYDN28CEmu+sycRERERERERtTyCIEAbEQGtLk8a4E+TyWDpMcW38t3+KvVJ5iW/AmIaoxElWVmI7trVY3jJqVPQhIV5PR2lRoP4/v2RtWED0iZMAACIDgeyNmxA3+nTvZqGw25H3r596DBuHAAgvEMH6BMScHLDBjkAZi4uxrlff0WfBx7wum6yZpAh5lOH+t4SBGDUfKD0NHBiLfDFSODKt4Ced1W7aycRERERERERtUzaiAhoQ1wBsYig1sUnYxY26uT9SjfqcsstWHv33Ti8bBmKT51C8alTOLx0KdZNm4aut97q07QunTkTf3zwAfZ/8gnyDx3C+gcegLWsDD2dd51cfccd+KlSp/tbX3gBJ9atQ+GffyJn926svu02FJ88iV7TpgGQop/9ZszA9pdewrFvvkHuvn344Y47YEhKQqoz6OaTZtCpvs8d6ntLqQau/VLqY8xmAtZNA76/VWobTEREREREREQtnjY8HFqd/CaodWlO/MoQG/HqqxAEAT/ccQccNqkdqkKtRp8HHsAV//qXT9PqesstMOXm4pdnn4UpOxuxffpg0po1ckf7xVlZHs0EzRcuYO0998CUnQ1tZCTi+/fHrVu3IqZSR/gDnngC1rIyrLv3XpgLC5F8+eW4cc0aqHS6avOvV3NqMhnogBgg3SHixh+A314Ffvm71Cne6Z+AwbOlbDFv7+ZARERERERERM2OlCEmvwlmVZoVQRRFL3pHq5nVZELh8eMAgIhOnaAODQ1YxYLp9OnTSElJQdFLgNFoBB5qooypL68Gsn4Exn0utX112vfRR1g7bRo6jh+Pid9913jzP/crsPqvQKH0nSIiFbj0cSDhMiCqm9e3LiUiIiIiIiKi5mHVpEkYlLIc8W0ATFyN0+peSElJwalTp9CmTZtgVy9o/MoQc1GHhiK2V69A1aV5CkaGmKoJM8QqSxwITDkA/PE+sP1FoPAY8OP90meCAojuDqROBLrcDMT0aNy6EBEREREREVGDacPDPTPEHMGsTfPhV0DMVlGB3W+/jVObNsF0/jxEh+favGP37oBUrlmwWwDRIQWEGpurU31NzZ3qayMiGr8OKi3Q7yGg553AnreBk+uB3H1ART6Qt196bH8BiO4BdLlFekR1bvx6EREREREREZHPpLtMym+A8mDWpvnwKyC29u67cWLdOnSeNAkJAwZAuNjvSmgzN01zwdr6ECssBNAEGWKVacKAgU9LD1EEyrKBrA1SH2Mn1gL5B4Ctz0qP2D7O4NjNQETHpqtjZaIIlJwCTOfrLysogfAOgC6i0atFREREREREVKtTm4G97wIVF+ovqzFI3RnF9ADi+gHR3byahS4iHFqt8402HChvYSlipjxg/8fAuW1SbAIA9AlA0hCgx51AaKxfk/UrIHb8u+9w4+rVSB461K+Ztjj2iuAGxJwZYpqmDIhVJgiAIRHofpv0qLgAHPsayPhC6vMsd6/0+HkWEH8pkHy51LzS9dBFBr5ODhtwagtwdAWQ8xuQf8idYectQ5KU6dZ5EtB9ipQdR0RERERERNSYRBHI2ii1vjr9k48jf+1+2fYq6YZ4ba6oc4wQYwgUNucbbQSAAh/nGUTZvwHL0wFVKNBuFBDpbKFmygF2vwXs+Bdw41og4VKfJ+1XQMyQnAxNWJg/o7YooqAA4ABsTdSPmCugo66lyWSwAmJV6SKBnlOlR3m+FJTK+AI4tRHI2Sk9KtMnSIGxqO5SBDu6O2BoIwXavOWwSZ39FxwCcv8AMlcD5XmeZRRqIDS+/unazVImWelZ6XFyPbDtRWDAU9KdNXnzAKKmIYrS7/noCqDwqI8jC0B8fyDtBinjk4iIiIiouRNF4OQ6YNsLwNmt0jClBuh5N5DsRcJReT6Qf1BqsXVuu9SKK2uDlCllbFfraB0VRwAADocAhaqFne9ufAjofBMwakH1c31RlPo93/gQ8JdtPk/ar4DYiNdew5Ynn8TVCxYgvF3tK73FU+oAmJqmY33RAVhN0utaMsSaTUCsspBo4JJ7pIfpPPDn90DePueP9KDUjLEs29nkcmNg562Llk6G240GYnpKd8VUqr0b11wkZZWd+RnY/QZQehrYOB3431NAx/FA2kSg3dWNk91G1FqZ8qT9Q8Ehaf9wYo37rrb+OLwE2PKY1Gw77QYg9QZpX3CxN+Mn71UUSttbwWHpUXauYdPTRUnNFKK7ATGXsOk9ERH5ryzbff7nLXWodwkA1PyYzgPHvgH2fQBk75CGqXRAr3uBy54AwpJ9n2bxSSk7at9HUnDNFWCrgdH5XGbSIKylbT+5vwPpi2re7gUB6Pco8HlfvybtV0As4dJLYa+owIcdO0IVGgql2jMIMb2gBaXf1UFUaAGYmiZDzFYOQJReV+lU39KcA2KVhcZJWWOVWUqkkxBXgCz/kBTNLs/1ceKCFPF2nYgkXw6kjAAUft4oVRsOJA2SHn2nS+2Rf/uPtFPJWCY9ACm7LaorENcX6HSdNF9/59kaiCJQesa734w2AgiNafQqUZBdOAocWyU1sz67FfJ+zkWlA9qP8f23ZS2Xrq6d/sndbHvrbCkwnnqDFNROHNA0N0Sh4HL1IVlw2DP4lX9ISqVvLIISSBkubW8dxwPG9jxBaY7Mxc5AvHObKPemr1EFENZW+v+P6io1zahybNbkrCbgwhGgIENaluITgGivf7yQGPdy6BMBOLdRdaj0vqHbrLUMKG1goLmykGhejCzP9+xHSGO4eAMgokNqrSEfN4pAyWn3ftzsRX9KlQlKIHGgdMxuSAp4dRvMbgXy/pCOiY59Ld2szB8ao/t3LT+6ARGdvE8OCBRRlM7rXN9ZweHqrXjqJQBhbdzLEdNTOj5sjqpts3WoKHAfm5z5BTj7izQ+AKhCgN4PAJc+LnVN5C9jO2DUfGDA08Cf3wEOS61FLxw7hj1vv4NCWyImzvZ/lkERmiAFEaO71vx59g5pP+kHQRRFsf5inr4YNQolWVnoeffd0MdX30H3nDLFr8o0F6dPn0ZKSgouvJ6ACEc2cNtuIN6/iKPXTOeB+c4vcabd4yTuw9RUFB4/jlt//rn19NsWDKIDyN4pNd86tlI68KxKFwW0TwdiegFRXYDwyn88CsDYVjrIbE2sJiBnN3DcGfQoPOb9uLpoaT2Gpbi3eaVWCmpEdXFm/WkapdoNUlEIXHCelJScRrUgjy80RulkK6qrtP0EKoCjDpOuNDV1QMhWITWDPP6NtD3kH/D8PCJVOtiJ6ioFrNqPadiJpikP+PNb4OhKKUBmN7s/0ydK04/uLm1PMb2A8Pb+zysQRIcUvHH1GSmK0v6/4LC0TXlzY5AWR5C27cgu0veg9fHijrXMGQg4DBSdcAcBHFag8E9pvbnWZ00Mye5triG/MVGUMsxcGY7FJz0/V+ulZYzpIf1PdBjXOCf2tgop0OzaZgqPe273DSEopMBeVBdpWRry2xRU0oF6U/fPKTqk7eTEGikYf2qTtK00VFiKMzjWpVKAKcF9DKwKlcoolA2ou/MmRpVPLF3/NVW3t0BwbbOVT6ojUuv/zhx2qZnO8VXAyR8Dt/25hMY5129SzUEghVo68XftUyI7N69jr/IC5/eWIT2XnHKfBNdGdLgDQRX51T/XGKVlrfxdhdcR/DAk+76vbUwOuxT8kS9aZLi3b1sj3eoucSCQNNS5vpzrLiS2aQOLhX9Kx0OnNjv/w45L3cC4CAopMOILW3nt25NCJW0XUV08WxzpE9y/9ZAY/9eBwwYUZVbfR3nTEbwv1Hrp+C31eql/rIYG7x12qTVQXccKLpYS9/ZZkuVe13aL9H/b0G02vr90Ie2Se6R9XRM6//vv+LRPH4TGx+Nv2dlyzOPUqVNo06ZNk9bFZ3vmSa1CLrnPuU044yZlOVJz0X0fAMNfBfr8zedJ+xUQmxsair9s24a43r19nmFLIAfE3myHCOtJ4NatQNLgxp1p4Z/AR52kA6pHPH+s82JjUZ6Xhyn79iG2Z8/GrQe5ydlth6QD6uPf1nyQUlVYSqWDNOdz1aBPQ9nM0p9qgeuA6whgLgzMtL1lKZHmW5LlOVxQ1n9gKoq+3wSBfKcKBSLTPLdD12tNAPqBFEVp+zv+DXB6sztgUTlAqFABKSOB1AnSFduwRvzDtZQAmWukoHbm99L7qqK6SfXoOB6I7Q1ojdXLeMt18lp6xj3MVi4FhQsygKI/3Qe+ol3K6LhwtPEO/lszhQqISJMO9qO7ua8yB2pbr0nhn86r/Cul4EDlkxxXnZKvkO4AJZ+062ucVK0sxZ4njgUZzsBIA4LwTUlQSH38RaR5nvQZkt3rxN//R3ORO+BQ+bnwaPUr92EpzsB4V2ne9c3LbpV+v3K2g5dZ7UqttEzG9j5mkzuzqwsO17zfcnE12Y3q4swEqSdw5RHEPeR5DGM1eZdh5g21PjDHNw05NmiKY6/K5G2khm3Q51YQNVAb3AEAa1n9AbWahMZ7rg/Xc3iHxmnt4ArqVb6wU3JKOn7+87va14tC5bl/CI1zX8TwNTPOUgKcWCvtk2uijXAHs1HDdJUad7DVm9+YB1HKjHJtC2d+rjkDTBMmdcviyi729cKJ6xwg/5BnYKrgcBCPrQVpu3IFHmsLZtfGYZOOkQoOA3k1tCRyBYQNyajxe6tvujX9LzRE1W22NmqDO4Ad09OZTR687qaKTpzABx06QKXTYUZ5ecsKiAHA4WVSV0c5u6QgJyBdhIrvD/SfCXS52a/J+hUQ+7RfP4x6910kDRrk10ybO9fGUfBOZ0RWHAFu2gi0Hdm4M83dB3x6ifQn8IC7iYcoinhDq4XDasW9WVkwpqQ0bj2odg6b9Od25mf3n13RCfdBisNS94GsKkQKToTE+X+VQ7RL8yw+4d/BUWMJiZH6ckudAHQY490JqNXkznIwZbuHW0qlQNuFDOkqlCNAB+yBpNa7s7rC2zfswNJ18FRwWErBDhRLUfUT9Mr0ic5MgAakpBdl1pwRqI1wHuxNcGbJRPg/D3/ZzNIVo3O/ujMs8g9UXyf6BOd68PEKbUWBtJ3W9ZuvjUItHdy56CLdmSfByOprbHartK1ccAbvfe0vRaV1BrucWbly9oqrKX1XILxj0zcTqaxyAOXcr1KQuGp2ZCBpI9wnHv4E2mpjt7iX40IDTyBs5YDNy+/al/9HW7lUt7qawyrU0l2vUydIGQZRXbyudo3kjJ/KJ5+HpOEuluLAZKIJCml7rpwJ5MrsCGQ3A3aLFNStmolW9Kd3/7sRHYFO10vrN7pH4LJuXBfb6mp25Tp+cAWg6rpY6dq2KgeFDEmo96Ta4coGOeKZhekK+lTN9KlKDvp2lQIF3uyfQuMr/aYrXVi0mZ0XWg57boeVj0ErE+11XyR1Zdh5Exz2hihKTZHru+CjCXO2sKiyXXu7fnxRek66AVfePs9j9qa+mCAogTbD3Bfh5KB8I2SpiZUC6xeOupvOuTLTXcea5iL/5yEIns3JK2eWBuqmZKIInN/tvOC0SvovDcQ5T9Vjr9q49hnVji0U0jG/a5ttgV3oVFy4gHeiogAAj5rNOHf+fMsKiLnYre7/h5CYBu8//AqInVi3Dluffx6Xv/wyYnv1gqJKH2JaYwOuuDcDckBsfi9Elu0DJq4GOoxt3Jme3Q78d7D0A5v2pzzYWl6ON0OlP8WHiopa/Lq96Jnyql8pdDVpCcSBcmWaMM8DvNBY+HTVpKGUWvdBJvsCa34qByGqbo+BbJan1EgZYB3GuQ/2QhsQ9G1M5iIpg+z4KukmH4HoX0pQOJtNOZtJVW6uUDXQ5jpBaqEHUuSjwuPSXYxdJ6+Fx6QghC/kk3lXk58gNfvxR+Xmf9WyJU+6+8MqPOb//6PcDKhKFkxDL1T4w+HMAi3IkJrm+Hp4HRIjZTd6BH3JK+X5tWQLNmDb8oYqtOYMrObQ55y5yP0bu5ABFBxxX5RozCxlhcqZ1eUMtKkNQPvRzn54rwjuhQtruTuwWFFLf9euJvryRW8fL8q6MpkiuwCxl0gXi0OiGlz1Vs0VEL6QAZh8zL4UBOexV1fpAlorP/Zy2O14XSWtg7+dP48Cs7nlBMQK/5SOnxvh2MevgNirCmknJ1SpkCiKEAQBj9mbYUaHD1wBsfz3L0VU8U7guhXSHcwa08kfga+ulq6cTPlDHlyWnY35iVK76cdsNgiKiyxzoLVw2KQ/1gsZDWzaKLhPqi/WDlap8VUUSgd8RZmAWMcV7vpojNLNLRqrSVpjc50wFP7p+3pQ6YGozjx5JWooX/8fBZWUnRTZuXn1kUTNT+Vty6NJoxcdfiuUQFg7Z3AjzTMLMzS+5Wb0yn2VZXh3gwlvaSOk3yQv+BA1a2+GhcFaWoq7jx5FmU7XcgJiryuB+8+5+1379hbgyrfcfYk1gF97rFs2bWrwjFsEhfMkpynuMunq5K9Kswez6w6TRiODYS2ZQgVEpkoPomDTRUgd2icOCHZNgksbDiRcJj2IKDj4/0iNpfK21XF8sGvTPAjOG0AZ2wa7JkQUBNrwcFhLS6UYg64B3aY0tao5XJmrAeucgEza54CY3WrFthdewNULFiAyLS0glWiuRKVzI7E3RUDM2QliLQExTTivghIRERERERGR77Th4Sg9cwaWoiIo4hueXXUx8DnlSKlWI/ePP+oveDFQaaTnprgjmJwh5tnngJwhxoAYEREREREREfnBFVNwxRhaDEFAtb6yA9R1kF9NJrvfdhv2ffQRhv3rXwGpRHMlZ4g1hyaTDIgRERERERERkR80lQJiAbovaNMQRWDNndJN3QCpBd/6+6vfZfv6FT5P2q+AmMNmw76PP8bJH39EfP/+UOs9KzLy9df9mWzzo3RmiDVFk0mLs8lklbvSWBgQIyIiIiIiIqIG0BqNAABLcXGQa+KjHlM833e7LWCT9isglrd/P+L79QMAXDhyxPPDi+mud8wQIyIiIiIiIqIWrsU2mRyzsNEmzbtM1kFs0rtMslN9IiIiIiIiIgo8TUsNiDUinzvVb1Uqt1FtbPV0qq+LiGj8OhARERERERHRRafFZog1Ir8yxAAge+dOZHzxBYqzsuCwWDw+u36F752ZNUfNqVN9ZogRERERERERkT8aFBA7/RPw2ytAzi6g7Bxw3UogbYL78zV3Agc+8RynfTpw4xr3+/ICYONDwJ/fAoICSLsRGPlmtX7Um5JfGWKHly7FkiFDkH/oEI6tXAm71Yq8AweQtXHjxRW4UTVhp/pyk0l2qk9EREREREREgeOKKVj8CYhZy4DY3sBV82ov034McP8592P8fz0/X/1XIP8AMGk9MOE7Kci2/l7f6xJAfmWIbf/nPzHyjTfQ98EH8WZYGK58802Ed+iA9ffdB31iYqDrGDTNIUOsorAQAANiREREREREROSfBmWIdRgrPeqi1AL6hJo/yz8EnFgD/PU3IOFSadiVbwMrxgHDXwUMSb7XKQD8yhArPH4cHcePBwAoNRpYy8ogCAL6P/oo/nj//YBWMKgUzgyxpgiIWWruVJ8ZYkRERERERETUEDV1ql9SUoLi4mL5YTab/Z/B6c3Au3HAx12AHx8AyvPdn53dBmgj3MEwAGg3Smo6ee5X/+fZQH4FxHSRkbCUlAAADMnJyNu/HwBgLiyE1WQKXO2CzZUh1pSd6mtq7lSfATEiIiIiIiIi8kdNGWLdu3dHeHi4/JgzZ45/E28/BhjzKXDTBuCKfwOntgArxgIOu/S5KRsIjfMcR6ECdFFAWbZ/8wwAv5pMthk2DCfXr0dsr17octNN2PjII8jauBEn169Hu6uu8nl6e+bNw2+vvIKy7GzE9u6Nq95+G4kDBtRY9o8PPsCBTz+Vg3Dx/fvjin/+06P8D3feiQOfeHbo1j49HZPWrIEvRNddJpukyWTNGWLsVJ+IiIiIiIiIGqKmgNjBgweRnJzsLqPV+jfxrpPdr2N7AbGXAB91Ak5tBtr5HiNqKn4FxK565x3YK6Qg0aC//x0KtRpnt25F2o03YvA//uHTtA4vW4bNM2di1IIFSBw4ELvnzsVX6em4KyMD+ri4auVPbd6MrrfeiuQhQ6DU6bDj3//GV6NH484DBxBW6YtsP2YMxi5cKL9X+vPFugJiTZkhVqlTfVEUmSFGRERERERERA3iiinYTCbYbTYAQFhYGIxGY+BnFtERCIkBCo9JAbHQBMB03rOMwwZUFNTe71gT8CkgZi4uBgAoVCooDAb5fZ+//Q19/vY3vyqw8/XX0euee9Br6lQAwNULFuDP77/H/o8/xsCnnqpWfvzixR7v0z/8EEeXL0fWhg3occcd8nCVVgt9QsNWrKhqygyx6p3q2yoq4LBaATAgRkRERERERET+0VQKfFmdXWA1mpLTUh9ieudNF5MGA+ZCIGcXEN9fGpa1ERAdQOLAxq1LHXwKiL0dEQFBEOot95jd7tX07BYLcnbtwsBZs+RhgkKBtqNG4ey2bV5Nw2YywWG1QhcV5TH81ObNmBcXB11kJNpeeSUuf+klhERH1zgNs9ns0XlciWvjaKoMMbsFcEiBr8oBMfl2qIIAjcFQw4hERERERERERHVTqtVQhYbCZjLJyU1es5RK2V4uxZnA+b1SH2C6KGDb80DajVK2V+Fx4KcngMhUoH26VD66m9TP2Lp7gFELpPjHxulSU8sg3WES8DEgdsumTfJrURSxYtw4pH/4IQyVmir6ojwvD6LdDn18vMdwfXw8Cg4f9moaW558EvqkJLQbNUoe1mHMGKRNnIjwDh1QePw4/vf001g+diz+sm0bFEpltWnMmTMHzz//fLXhoqKJMsRc2WGAR0BMbi5pNEJQ+HX/AyIiIiIiIiIiaMPDYTOZ5Jskei1nJ/DFSPf7zTOl5x5TgKvmA7l/AAc+kbLADElAu9HA0BcBVaWuq8YtloJgX14l3V0y7UbgyrcavEwN4VNALGX4cI/3glKJxEGDENGxY0Ar5a1f//UvZCxdils2b4ZKp5OHd53s7tAttlcvxF5yCT7s1AmnNm+usdP/WbNmYebMmfL7M2fOoHv37u67TDZ2QMzi7FBfoQaUGnkwO9QnIiIiIiIiokDQGo0oO3fO94BYygjgMbH2zyetrX8aIVHA+CW+zbeRBTXtKCQmBoJSibKcHI/hZTk59fb/9durr2LHv/6FSevWIfaSS+osG9GxI0JiYlB47FiNn2u1WhiNRvkRFhYGABBdwanGbjLpyhDTeDaLZIf6RERERERERBQIrmQbi69NJi9SQQ2IKTUaxPfvj6wNG+RhosOBrA0bkDR4cK3j7fjPf7DtxRdx45o1SLj00nrnU3L6NMrz86FPTPSxgk2UIWZzBsRUeo/BDIgRERERERERUSC4YguN3ql+C9HwgJgXnezX5dKZM/HHBx9g/yefIP/QIax/4AFYy8rQ03nXydV33IGfKnW6/+u//41fnnkGYz7+GOHt26MsOxtl2dmwlErNDi2lpdj8f/+Hs9u3o+jECZzcsAFfX389IlNT0T493bfKudq7OqyAw7sbBfjF1WRS7RkQszAgRkREREREREQB4Iot+Nyp/kXKpz7EVk2c6PHeXlGBH++/H2q9ZyDn+hUrvJ5m11tugSk3F788+yxM2dmI7dMHk9askTvaL87K8uhQ/vf582G3WPDNpEke0xk8ezaGPvccBKUSeX/8gQOffAJzYSEMSUloP3o0hr74IlRaLXwhKiuVt5sBRahP41dlc97Jslo9amkyeWzVKgBARKdODZovEREREREREbVucoaYM6GotfMpIFa1c/dut90WkEr0mz4d/aZPr/GzyZs3e7y/98SJOqelDgnBpLVedOjmDUWlwJWtAlD7HxArPnUKy4YPh7mwEGM/+QSdrr3W/aG1eoZY3oEDOP7tt4AgoM+DD/o9XyIiIiIiIiIi9iHmyaeA2NiFCxurHs2TQgUISkC019uxvrW8HOqQkBo/qygsxIpx41CUmQkAWHnddRj49NMY+sILUCiV7gwxtTtDbMd//gMASJs4EVGdOwdgYYiIiIiIiIiotZKbTLIPMQBB7lS/RVDV37H+vo8/xttGI74YNQqFx497fGa3WLBq4kTk7d8PfWIiLrn3XgDAr//8J768+mqc37u3UkBMyhArzsrC4SXS7UgHPPlkYJeHiIiIiIiIiFodV0DMwoAYAB8zxFolpU4KWNWSIXZqyxasv+8+OGw2ZG3YgEU9e2Lwc88heehQXDhyBEeWL8epTZugNhhw4+rViOvTBykjRmDttGk4tWkTPu3bF2Mf7IYeHSBniO18/XU4bDa0vfJKJF52WRMuLBERERERERFdjBgQ88SAWH3kDLHyah8VZmbimxtvhMNmQ+r118NaVoaTP/6I/z31lEc5hUqF65cvR1yfPgCAbrfeioRLL8XW557D4aVLUXzsENABOPjFSuRuisUfH3wAABhQZTpERERERERERP7Qsg8xDwyI1cOCcGhwBo6zO6GI7+8eXlKCr6+7DuX5+Yjv3x/jlyyBKiQEBz79FFufew4QRUR16YLIzp3R5eab0eaKKzymG5mWhvGLF2Pws8+iaOEEAIdRklOI3xZKfYfF9+uHdqNGNd2CEhEREREREdFFiwExTwyI1eH83r04uTIDQ68Gzn3+BPTGUYjo1Alntm7FmqlTceHIEegTEjBh1SqoQ6U7UPacMgU9p0zxeh5RXbog6sorgH2H0eH6m3DeICD3998x4vXXIQhCYy0aEREREREREbUiGjaZ9MCAWB3WTpuGeNGOoVcDCXHF+GBQH7QdewMOfv45IIrQJyZiwqpVCEtObtiMrKUAgLj+g3HtvY8GoOZERERERERERG7sQ8wT7zJZh7KcHCC6G2z6DlCqgJSUUhz87DNAFNFjyhRMPXAgMJ3ey3eZNDR8WkREREREREREVbgCYlaTKcg1aR4YEKuDJiwME77+GqqefwEADLy5GxIHDsQN332HsYsWQRcZGZgZFZ+UnkPjAjM9IiIiIiIiIqJKNEZjsKvQrLDJZB1GzZuHqM6dgfCJwK8vIybkBP76v1xArQ/cTOxWoOCQ9Dr2ksBNl4iIiIiIiIjISaXVQqnVAmZzsKvSLDBDrA5tR46UXsT1BYztAVs5cGJtYGdy4Qhgt0jNJY3tAjttIiIiIiIiIiInV7NJYkDMO4IApE2UXh9ZHthp5+2TnmN6AQK/DiIiIiIiIiJqHAyIuTEC4620G6XnP78DbAFML8z9Q3qO7RW4aRIRERERERERVaFhQEzGgJi3kgYB+gTAUiwFxQJFzhBj/2FERERERERE1HiYIebGgJi3BAXQ8y7p9c9PS/1+BQIzxIiIiIiIiIioCTAg5saAmC8uexIIjZM6wv99fsOnZy4CSrKk1zEMiBERERERERFR42FAzI0BMV9ojcDQl6TXW58DyvMbNr1cZ3PJsBRAF9mwaRERERERERER1YEBMTcGxHzV8y4g9hLAXCgFxRqi8h0miYiIiIiIiIgaETvVd2NAzFcKJTBirvT69/lA/kH/p5Xn6j+MHeoTERERERERUeNihpgbA2L+aDsSSJ0AiHbgl2f9n04uM8SIiIiIiIiIqGkwIObGgJi/hr4EQACOLnffKdIXouhuMskMMSIiIiIiIiJqZAyIuTEg5q+YHkDnm6TX21/0ffySLMBSDCjUQGSXwNaNiIiIiIiIiKgKvwJip38CVl4LLEgCXhOAo197fi6KUuu5BYnAmyHAl6OAC0c9y5QXAN//FXjbCLwTAay9G7CU+rsYAcGAWEMMfkZ6PvKVu/mjt1xZZdHdAKU6sPUiIiIiIiIiIqpCYzT6PpK1DIjtDVw1r+bPf/sPsOctYNQC4C+/Amo9sDwdsFW4y6z+K5B/AJi0HpjwnRRkW3+vfwsRIAyINURMT/+zxOQ7TLK5JBERERERERE1Pr8yxDqMBS5/CUi7ofpnogjsngsM/AeQer3UJdTYT4HSs8Cxr6Uy+YeAE2uA0R8CiQOBNpcDV74NHF4qlQsSVdDmfLEY9Axw5Evpse8jQBfp3Xgn10vP7FCfiIiIiIiIiJpA5YBYSUkJiouL3Z9ptdBqtb5NsCgTKMsG2o2qPBMp8HV2G9B1svSsjQASLnWXaTcKEBTAuV9rDrQ1gWYRENszbx5+e+UVlGVnI7Z3b1z19ttIHDCg1vIZX36JX555BkUnTiAyLQ3D/v1vdBw3Tv5cFEX8Mns29n3wAcyFhUgaOhRXz5+PyLS0wFc+thfQeZLUbHLdND/GZ4YYERERERERETU+TaWAWPfu3T0+mz17Np577jnfJliWLT2HxnsOD413f2bKBkLjPD9XqABdlLtMEAQ9IHZ42TJsnjkToxYsQOLAgdg9dy6+Sk/HXRkZ0MfFVSt/ZutWfHfrrbhizhx0uuYaHFqyBF9PmIDbd+9GbM+eAIAd//kP9rz1FsZ+8gnCO3TAz888g6/S0zH14EGodLrAL8Sw/0idwVlKfBsvoiPQ9srA14eIiIiIiIiIqAqVToe4Pn2AvXtx8OBBJCcny5/5nB3WwgU9ILbz9dfR65570GvqVADA1QsW4M/vv8f+jz/GwKeeqlZ+95tvosOYMRjwf/8HALj8xRdxcv167H3nHVy9YAFEUcTuuXMx6B//QOr11wMAxn36Kd6Nj8exr79G18mTA78Q4R2AG38I/HSJiIiIiIiIiAJEEARM/PZbPJKSgrCwMBj96WS/Mn2C9GzKAQyJ7uGmHCC2j/Q6NAEwnfccz2EDKgrc4wdBUDvVt1ssyNm1C+1GuduaCgoF2o4ahbPbttU4ztlt2zzKA0D79HS5fFFmJsqysz3KaMPDkThwYK3TNJvNKC4ulh8lJT5mehERERERERERtTbhHaSgVtYG9zBzsdQ3WNJg6X3SYMBcCOTscpfJ2giIDqmvsSAJakCsPC8Pot0OfbxnW1N9fDzKsmtuR1qWnY3QKuVDK5V3PddVpqo5c+YgPDxcflRtR0tERERERERE1CpZSoHze6UHABRnSq+LswBBAPrNALa/BBz7BsjdB/xwB2BIAlInSOWjuwHtxwDr7gHO7QDO/AJsnC51uG9ICsoiAc2gyWRzMGvWLMycOVN+f+bMGQbFiIiIiIiIiIhydgJfjHS/3+yMn/SYAoxZBFz2BGAtA9bfK2WCJV8OTFwDqCr14T5usRQE+/Iq6e6SaTcCV77VlEtRTVADYiExMRCUSpTl5HgML8vJgT6h5nak+oQEmKqUN1Uq73o25eTAkJjoUSauT58ap1n11qKVbztKRERERERERNRqpYwAHhNr/1wQgKEvSI/ahEQB45cEvGoNEdSAmFKjQXz//sjasAFpEyYAAESHA1kbNqDv9Ok1jpM0eDBObtiA/jNmyMNOrl+PpMFS29TwDh2gT0jAyQ0b5ACYubgY5379FX0eeMCrejkcDgDAuXPn/FswIiIiIiIiIqJmyBXrcMU+WqugN5m8dOZM/DBlCuIvvRSJAwZg19y5sJaVoafzrpOr77gDhuRkDJszBwDQ75FHsGz4cPz22mvoOH48Di9diuydO3H1++8DkO6Y0G/GDGx/6SVEpqUhvEMH/PLMMzAkJSHVGXSrz6lTpwAAAwYMCPwCExEREREREREFWU5ODtq2bRvsagRN0ANiXW+5BabcXPzy7LMwZWcjtk8fTFqzRu5ovzgrC4LC3fd/8pAhGL9kCX7+xz/w89NPIyItDRO+/hqxPXvKZQY88QSsZWVYd++9MBcWIvnyy3HjmjVQ6XTV5l+Tbt26AQD279+P8PDwAC4tUc1KSkrQvXt3HDx4EGFhYcGuDrUS3O6oqXGbo6bGbY6aGrc5CgZud+Qrh8OBnJwc9O3bN9hVCSpBFMU6GoK2TsXFxQgPD0dRURGMRmOwq0OtALc5CgZud9TUuM1RU+M2R02N2xwFA7c7Iv8o6i9CRERERERERER08WBAjIiIiIiIiIiIWhUGxGqg1Woxe/ZsaLXaYFeFWglucxQM3O6oqXGbo6bGbY6aGrc5CgZud0T+YR9iRERERERERETUqjBDjIiIiIiIiIiIWhUGxIiIiIiIiIiIqFVhQIyIiIiIiIiIiFoVBsSIiIiIiIiIiKhVabUBsXnz5qF9+/bQ6XQYOHAgduzYUWf5L7/8El27doVOp0OvXr2wevXqJqopXSx82eYWLVoEQRA8HjqdrglrSy3dTz/9hGuvvRZJSUkQBAFff/11veNs3rwZ/fr1g1arRWpqKhYtWtTo9aSLh6/b3ObNm6vt5wRBQHZ2dtNUmFq8OXPm4LLLLkNYWBji4uIwYcIEZGRk1Dsej+nIX/5sczymo4aaP38+LrnkEhiNRhiNRgwePBg//PBDneNwP0fknVYZEFu2bBlmzpyJ2bNnY/fu3ejduzfS09Nx/vz5Gstv3boVt956K+6++27s2bMHEyZMwIQJE7B///4mrjm1VL5ucwBgNBpx7tw5+XHy5MkmrDG1dGVlZejduzfmzZvnVfnMzEyMHz8eI0eOxN69ezFjxgxMmzYNa9eubeSa0sXC123OJSMjw2NfFxcX10g1pIvNli1b8OCDD2L79u1Yv349rFYrRo8ejbKyslrH4TEdNYQ/2xzAYzpqmDZt2uBf//oXdu3ahZ07d+LKK6/E9ddfjwMHDtRYnvs5Iu8JoiiKwa5EUxs4cCAuu+wyvPPOOwAAh8OBlJQUPPTQQ3jqqaeqlb/llltQVlaG7777Th42aNAg9OnTBwsWLGiyelPL5es2t2jRIsyYMQOFhYVNXFO6GAmCgJUrV2LChAm1lnnyySfx/fffexwsTZ48GYWFhVizZk0T1JIuJt5sc5s3b8bIkSNx4cIFRERENFnd6OKVm5uLuLg4bNmyBcOGDauxDI/pKJC82eZ4TEeNISoqCq+88gruvvvuap9xP0fkvVaXIWaxWLBr1y6MGjVKHqZQKDBq1Chs27atxnG2bdvmUR4A0tPTay1PVJk/2xwAlJaWol27dkhJSanzKhBRIHA/R8HSp08fJCYm4uqrr8Yvv/wS7OpQC1ZUVARAOlGsDfd1FEjebHMAj+kocOx2O5YuXYqysjIMHjy4xjLczxF5r9UFxPLy8mC32xEfH+8xPD4+vtZ+S7Kzs30qT1SZP9tcly5d8PHHH2PVqlX4/PPP4XA4MGTIEJw+fbopqkytUG37ueLiYpSXlwepVnQxS0xMxIIFC7B8+XIsX74cKSkpGDFiBHbv3h3sqlEL5HA4MGPGDAwdOhQ9e/astRyP6ShQvN3meExHgbBv3z4YDAZotVrcf//9WLlyJbp3715jWe7niLynCnYFiKi6wYMHe1z1GTJkCLp164b33nsPL774YhBrRkQUGF26dEGXLl3k90OGDMHx48fxxhtv4LPPPgtizaglevDBB7F//378/PPPwa4KtRLebnM8pqNA6NKlC/bu3YuioiJ89dVXmDJlCrZs2VJrUIyIvNPqMsRiYmKgVCqRk5PjMTwnJwcJCQk1jpOQkOBTeaLK/NnmqlKr1ejbty+OHTvWGFUkqnU/ZzQaERISEqRaUWszYMAA7ufIZ9OnT8d3332HTZs2oU2bNnWW5TEdBYIv21xVPKYjf2g0GqSmpqJ///6YM2cOevfujTfffLPGstzPEXmv1QXENBoN+vfvjw0bNsjDHA4HNmzYUGs77MGDB3uUB4D169fXWp6oMn+2uarsdjv27duHxMTExqomtXLcz1FzsHfvXu7nyGuiKGL69OlYuXIlNm7ciA4dOtQ7Dvd11BD+bHNV8ZiOAsHhcMBsNtf4GfdzRN5rlU0mZ86ciSlTpuDSSy/FgAEDMHfuXJSVlWHq1KkAgDvuuAPJycmYM2cOAOCRRx7B8OHD8dprr2H8+PFYunQpdu7ciffffz+Yi0EtiK/b3AsvvIBBgwYhNTUVhYWFeOWVV3Dy5ElMmzYtmItBLUhpaanH1efMzEzs3bsXUVFRaNu2LWbNmoUzZ87g008/BQDcf//9eOedd/DEE0/grrvuwsaNG/HFF1/g+++/D9YiUAvj6zY3d+5cdOjQAT169EBFRQU+/PBDbNy4EevWrQvWIlAL8+CDD2LJkiVYtWoVwsLC5P5xwsPD5cxWHtNRIPmzzfGYjhpq1qxZGDt2LNq2bYuSkhIsWbIEmzdvxtq1awFwP0fUIGIr9fbbb4tt27YVNRqNOGDAAHH79u3yZ8OHDxenTJniUf6LL74QO3fuLGo0GrFHjx7i999/38Q1ppbOl21uxowZctn4+Hhx3Lhx4u7du4NQa2qpNm3aJAKo9nBtZ1OmTBGHDx9ebZw+ffqIGo1G7Nixo7hw4cImrze1XL5uc//+97/FTp06iTqdToyKihJHjBghbty4MTiVpxappu0NgMe+i8d0FEj+bHM8pqOGuuuuu8R27dqJGo1GjI2NFa+66ipx3bp18ufczxH5TxBFUWzKABwREREREREREVEwtbo+xIiIiIiIiIiIqHVjQIyIiIiIiIiIiFoVBsSIiIiIiIiIiKhVYUCMiIiIiIiIiIhaFQbEiIiIiIiIiIioVWFAjIiIiIiIiIiIWhUGxIiIiIiIiIiIqFVhQIyIiIiIiIiIiFoVBsSIiIiI/HDnnXdiwoQJTT7fRYsWQRAECIKAGTNmeDXOnXfeKY/z9ddfN2r9iIiIiFoCVbArQERERNTcCIJQ5+ezZ8/Gm2++CVEUm6hGnoxGIzIyMqDX670q/+abb+Jf//oXEhMTG7lmRERERC0DA2JEREREVZw7d05+vWzZMjz77LPIyMiQhxkMBhgMhmBUDYAUsEtISPC6fHh4OMLDwxuxRkREREQtC5tMEhEREVWRkJAgP8LDw+UAlOthMBiqNZkcMWIEHnroIcyYMQORkZGIj4/HBx98gLKyMkydOhVhYWFITU3FDz/84DGv/fv3Y+zYsTAYDIiPj8ftt9+OvLw8n+v87rvvIi0tDTqdDvHx8Zg0aVJDVwMRERHRRYsBMSIiIqIA+eSTTxATE4MdO3bgoYcewgMPPICbbroJQ4YMwe7duzF69GjcfvvtMJlMAIDCwkJceeWV6Nu3L3bu3Ik1a9YgJycHN998s0/z3blzJx5++GG88MILyMjIwJo1azBs2LDGWEQiIiKiiwKbTBIREREFSO/evfGPf/wDADBr1iz861//QkxMDO655x4AwLPPPov58+fjjz/+wKBBg/DOO++gb9+++Oc//ylP4+OPP0ZKSgqOHDmCzp07ezXfrKws6PV6XHPNNQgLC0O7du3Qt2/fwC8gERER0UWCGWJEREREAXLJJZfIr5VKJaKjo9GrVy95WHx8PADg/PnzAIDff/8dmzZtkvskMxgM6Nq1KwDg+PHjXs/36quvRrt27dCxY0fcfvvtWLx4sZyFRkRERETVMSBGREREFCBqtdrjvSAIHsNcd690OBwAgNLSUlx77bXYu3evx+Po0aM+NXkMCwvD7t278d///heJiYl49tln0bt3bxQWFjZ8oYiIiIguQmwySURERBQk/fr1w/Lly9G+fXuoVA07LFOpVBg1ahRGjRqF2bNnIyIiAhs3bsTEiRMDVFsiIiKiiwczxIiIiIiC5MEHH0RBQQFuvfVW/Pbbbzh+/DjWrl2LqVOnwm63ez2d7777Dm+99Rb27t2LkydP4tNPP4XD4UCXLl0asfZERERELRcDYkRERERBkpSUhF9++QV2ux2jR49Gr169MGPGDERERECh8P4wLSIiAitWrMCVV16Jbt26YcGCBfjvf/+LHj16NGLtiYiIiFouQRRFMdiVICIiIiLvLFq0CDNmzPCrfzBBELBy5UpMmDAh4PUiIiIiakmYIUZERETUwhQVFcFgMODJJ5/0qvz9998Pg8HQyLUiIiIiajmYIUZERETUgpSUlCAnJweA1FQyJiam3nHOnz+P4uJiAEBiYiL0en2j1pGIiIiouWNAjIiIiIiIiIiIWhU2mSQiIiIiIiIiolaFATEiIiIiIiIiImpVGBAjIiIiIiIiIqJWhQExIiIiIiIiIiJqVRgQIyIiIiIiIiKiVoUBMSIiIiIiIiIialUYECMiIiIiIiIiolaFATEiIiIiIiIiImpV/h8W2+XlUbWg2QAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Split into frames, and compute autocorrelation for each frame\n", + "frames = audio.unfold(-1, window_samples, step_samples)\n", + "autocorrelation = autocorrelate(frames)\n", + "\n", + "# Use autocorrelation maxima to compute harmonicity (max) and lags (index) corresponding to estimated period\n", + "harmonicity, lags = autocorrelation[:, :, min_lag:max_lag].max(dim=-1)\n", + "lags = torch.nn.functional.pad(lags, pad=(3, 3))\n", + "\n", + "# Take the median of 7 frames to avoid short octave jumps\n", + "best_lags, _ = lags.unfold(-1, 7, 1).median(dim=-1)\n", + "\n", + "# Re-add the min_lag back in after previous step removed it\n", + "best_lags = best_lags + min_lag\n", + "estimated_f0 = sample_rate / best_lags\n", + "\n", + "xs = torch.arange(len(harmonicity[0])) * step_size\n", + "\n", + "# Show autocorrelation from min lag to max lag\n", + "plt.imshow(autocorrelation[0, :, min_lag:max_lag].transpose(0,1), aspect=0.1, origin=\"lower\")\n", + "plt.title(\"Autocorrelation\")\n", + "plt.ylabel(\"Lag [ms]\")\n", + "plt.xlabel(\"Time [s]\")\n", + "xticks = (torch.arange(1, 7) / 2 / step_size).int().tolist()\n", + "plt.xticks(xticks, xs[xticks].tolist())\n", + "yticks = torch.linspace(0, max_lag - min_lag, 5).int()\n", + "plt.yticks(yticks.tolist(), ((yticks + min_lag) / step_samples).numpy().round(decimals=2))\n", + "plt.show()\n", + "\n", + "# Show autocorrelation-based features, harmonicity (usually represented in log scale as HNR) and f0\n", + "fig, ax1 = plt.subplots()\n", + "ax1.set_title(\"Autocorrelation-based Features\")\n", + "ax1.set_ylabel(\"Harmonicity\", color=\"darkred\")\n", + "ax1.set_xlabel(\"Time [s]\")\n", + "ax1.plot(xs, harmonicity[0], color=\"darkred\")\n", + "ax1.tick_params(axis=\"y\", labelcolor=\"darkred\")\n", + "ax2 = ax1.twinx()\n", + "ax2.set_ylabel(\"F0 [Hz]\", color=\"darkorange\")\n", + "ax2.plot(xs, estimated_f0[0], color=\"darkorange\")\n", + "ax2.tick_params(axis=\"y\", labelcolor=\"darkorange\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "604c2859-5414-4576-bca6-8d2d6714d968", + "metadata": {}, + "source": [ + "To measure Harmonicity-to-Noise ratio (HNR), we need an estimate of noise, in addition to our earlier estimate of harmonicity.\n", + "According to the following publication, we can estimate the noise as just 1 - harmonicity, as any reductions in harmonicity are due to noise.\n", + "\n", + "See \"Harmonic to Noise Ratio Measurement - Selection of Window and Length\"\n", + "By J. Fernandez, F. Teixeira, V. Guedes, A. Junior, and J. P. Teixeira\n", + "\n", + "We have found, however, that the HNR is dominated by the noise term, so for\n", + "efficiency and simplicity, we just take -10 * log(noise) as the HNR value (in decibels).\n", + "\n", + "The max value for HNR is 30 dB, enforced by clamping the noise to a minimum of 10^-3." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "732c593a-5be8-4bbf-b2e6-be43b7d061ce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average HNR: 23.8\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABH8AAADZCAYAAAC5IZxiAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAklxJREFUeJzs3XdcFHf6B/DPbKcuvRfBggqKvfca0zSxpWu6iSlefndJzOWS3F3u0i+9F40xMVFjSbPEXlEBC4ggvffOwvb5/TE7A8gCu7CwLD7v14tXIuwuX2B3dub5PoVhWZYFIYQQQgghhBBCCOmXRPZeACGEEEIIIYQQQgjpORT8IYQQQgghhBBCCOnHKPhDCCGEEEIIIYQQ0o9R8IcQQgghhBBCCCGkH6PgDyGEEEIIIYQQQkg/RsEfQgghhBBCCCGEkH6Mgj+EEEIIIYQQQggh/RgFfwghhBBCCCGEEEL6MYm9F9DTjEYjioqK4ObmBoZh7L0cQgghhBBCCCGEEJtgWRb19fUICgqCSNR+fk+/D/4UFRUhNDTU3ssghBBCCCGEEEII6RH5+fkICQlp9+v9Pvjj5uYGgPtFuLu723k1hBBCCCGEEEIIIbZRV1eH0NBQIfbRnn4f/OFLvdzd3Sn4QwghhBBCCCGEkH6nszY3dm34/Omnn2LkyJFCYGby5MnYs2eP8HW1Wo21a9fC29sbrq6uWLp0KUpLS+24YkIIIYQQQgghhBDHYtfgT0hICF5//XUkJCQgPj4ec+bMweLFi3H58mUAwF/+8hf8+uuv2LZtG44ePYqioiLcfvvt9lwyIYQQQgghhBBCiENhWJZl7b2Ilry8vPDWW29h2bJl8PX1xQ8//IBly5YBAFJTUzFs2DCcPn0akyZNsujx6urqoFQqUVtbS2VfxK50BiMuF9VhZLASIlH3Js/Vq3WIy6qCwWgEACikYkwb5AOJ2K7xXEIIIXZSp9ahsLoJwwL77rlOZYMGFQ1aRAV03JOgL2rQ6JFdrsKIEKW9l0IIIcQK9WodCvr4+2N3WRrz6DNXigaDAT/++CNUKhUmT56MhIQE6HQ6zJs3T7jN0KFDERYWhtOnT7f7OBqNBnV1da0+COkL3jtwFUs+Pokd5wu7/VjrdyTh4U3xWLM5EWs2J2L1hnP44GC6DVZJCCHEET277RIWvX8cpzMr7b0Us2oatbjlwxO48YPjyKlQ2Xs5VvvnL5dxy0cn8MOZPHsvhZBe8c9fL+PZ7RfRx/IECLHaP3YlY9H7x3EivcLeS7E7uwd/kpKS4OrqCrlcjjVr1mDnzp0YPnw4SkpKIJPJ4OHh0er2/v7+KCkpaffxXnvtNSiVSuGDxryTvuKPJO55G59T1e3His+pBgBEB7ljpGkX8ovjWSirU3f7sfu6E+kV+PRIJvQGo72XQkivMhpZ3PbJScx95wjUOoO9l0P6EK3eiCNXywAAe5OL7byatliWxQs7k1BUq4bByOJsdvffB3vbKVNQ7a19qaht1Nl5NYT0rNI6NTaczMHW+AIU1jTZezmEdMs503XTrxeL7LwS+7N78CcqKgoXLlzAmTNn8Nhjj2HVqlVISUnp8uOtX78etbW1wkd+fr4NV0tI1xRUNyLbtNOZ3c0dz5pGLUpMQZ4tj0zC7rVTMSbMA2qdEe8e6N/ZPwdSSrF6w1m8sTcVW87S7qslimuboNVToKw/SCmuw/m8GmSWq3Aqk3avSLOkwhqoddzr/Fgf3NncnlAgbIAAQFJhrR1XY71qlVa4AK5u1OF9yrQl/dyF/Brh/wuqKfhDHJdaZ0BRLfccPpRWBqPx+s5ks3vwRyaTYdCgQRg7dixee+01xMbG4v3330dAQAC0Wi1qampa3b60tBQBAQHtPp5cLhemh9F4d9IbjEYWqzecxV1fxkHXTjZKyzTD7gZ/UkvqAQDBHk5wV0jBMAzW3zgMALA1Ph8ZZQ3devy+6lRGBR7/IRF600H7/YPpUGn0dl5V35aYV43Jrx3CP3Yl23spdqU3GPH2vjQk5FbbeyndcvRqufD/B6+UdekxWJalFP5+KC6rOZMmu0KF/KpGO66mtdxKFV75hRvkMSbMAwBwycGCP5eLuBYCCil32rzpdE6/fa8lBGgd/OlLxxNCrJVb2Qj+tKe8XoPkIsd6/7E1uwd/rmU0GqHRaDB27FhIpVIcPHhQ+FpaWhry8vIwefJkO66QkNZK69U4klaOU5mVOHil1Oxtjmc0B3/K6jXdClqkmYI/Q1s0zBw/wAvzhvnDYGTx9r60Lj92X8SyLOKyKvHQpnho9UbMG+aPcG9nVDRo8fWJbHsvr087aQo67rxQiDr19VumcDC1DB8dzsALO5LsvZRuaRn8OZRaZnUQp6imCWP+/Scmv3YIL+xMwuHUMmj0VD7WH1xbRnUsvbydW/YulmXxzNaLUGkNmBDhhbeXxwIArhTXOVRG4mXTxcLcof6YO9QPeiOL//ze9Sx1Qvq6C3k1wv9T5g9xZNkVrQP1Xd086y/sGvxZv349jh07hpycHCQlJWH9+vU4cuQI7r77biiVSjz44IN45plncPjwYSQkJOD+++/H5MmTLZ70RUhvaPmm+L2ZRpBGI4tTGa3T8LuT/ZNawu1ADg1sPS3l2RuiIGKAvZdLkJjn2BkOAHCpoAYv7U7GzLeO4I4v4tCoNWDaIB98dNdo/HVBFADg86OZqGzQ2HmlfVdGOfeGp9Ubsf+y+cDk9YDftUwrrUe1Smvn1XRNvVqHRFPmkkTEoLhWLWQBWurPlFJUN+pQUqfGD2fycP/Gc7j1w5PXdWCwP9AbjEIvuRtHcJnRx672jeDP+fwaJORWQyEV4d2VoxDh4wJ3hQRavRFXS617/tpTsinzZ3iQO/5+0zBIRAwOp5XjeB8JshFiSwYji0sFNcK/86sp84c4rizTNZezTAyA2zy7ntk1+FNWVob77rsPUVFRmDt3Ls6dO4d9+/Zh/vz5AIB3330XN998M5YuXYoZM2YgICAAO3bssOeSCWmjZTrs8fSKNoGdlOI6VDfq4CqXINbUnDmnsjvBH+6EOSqgdUnjEH83LBsbAgAOP/krs7wByz49jU2nc5FX1QipmMFNIwPxxX1joZCKcdOIQMQEu0OlNeCjwxn2Xi4AIKu8AQV97AQps7x5t+OX67jJXUltcyP0eAct/TqVWQm9kUWEjwtmDvEFYP0JzDlTgODW2CDcMykMHs5SpJXW45mfLl73NfCOLKW4DiqtAe4KCR6aHgkAOJVR2W4Zcm/akVgAAFgUE4hgDycwDIORIR4AHKvvz2XTWmOClYj0dcXK8dwwkb3J7Q8gIcRRZZY3QKVtzgotqKLMH+K4ssu5ay7+GimpsBal18GAnPbYNfjz9ddfIycnBxqNBmVlZThw4IAQ+AEAhUKBjz/+GFVVVVCpVNixY0eH/X4IsYf8a94Ur21EfNxUejMp0huD/LhsHf5AZC2jkcVVM2VfvMdnDQLDAEfSyh1ylC7vmxPZ0BqMiA1R4sv7xuHCSwvw8V1j4CyTAABEIgbP38D1Odocl2v3evRqlRa3fnQSSz4+iSZt3yijMRpZZJY1PwdOZlQIWVJqnQGPbIrH37ZdHyNci1u8yZ+zwbQ9e+BLvmYM9sGcYX4ArAv+sCwr/Ox3TgjDq0tGYNMDEyCTiHDgSik+7iNBVGK9M6Z+PxMivBAb4gEPZynqNXpcbNGzwx60eiN+u8RNHrttdLDw+ZhgbhPkUoFjBH8aNHph5zg6iNt0mRDhBQBIL6W+P6T/4Uu+3OTcOVdf29gixBr8pvz4AV6IDfUAABy+jrN/+lzPH0IcDZ8OO8p0QNkWn99qDPOJDO6ibdogb0T6ugDoetlXQXUTVFoDZGIRInxc2nx9gI8LZpmyAjadzu3S9+hpZXVqfHU8q92ATbVKi59Nu8XPLxqG+cP94WI6AWlp2mAfTB3kDZ2BxcZTOT255E6dy6lCg0aPigYt9l3uGzvBRbVNaNIZIBExGB7oDoORxR9J3IXYBwfTsT+lFNsSClBW3//L5lpm/nQU/PnP7ylY8fnpHi2Dyq1U4bvTOdBbkZXBsiyOpnHHkZlRvpgdxQV/EvOqUWVhGVtBdRNK6zSQiBjhWDUyxAOvLokBAPzvwFUcTuu/J0NGI4v4nCqHbxLPsiz+9WsKXvnlspCtdcbU72dihDfEIgbTBvkAsH/p1+G0MtQ06uDnJsdU05oAYKQpAzapsMZOK7POlWKu5CtQqYCPqxwAl2kLcGXYPRVA1xuM2Bqfj7LreIea2Md5U+B4QTS34V5cp3aoHl2EtMRfc0X4uGDuUO786cB13PeHgj+EdBMfxLh3UjiClApUN+qwJ5m7yFbrDDiXw5WZTBvsKwRssrtY9sX3+xno5wqp2PzLd9WUAQCAbQn5fe5CJ7+qEbd/egqv/n4F8/53FB8cTG8VKAOAH87mQa0zYnigOyZFenX4eA9N40octicUtHmc3pTQoscSH7iyN34SzQAfF9w+htt1/+ViES4V1ODzY1nC7VJMvSz6s5bBn6SCWrPZWefzqvHl8Wycza7CrvOFPbIOlmWx9odE/GP3ZXwXZ3lwNrNchcKaJsjEIkyK9EaQhxOGBbqDZYGjVy07gYnP5QIE0cFKOJnq3gFgxbhQ3D0xDCwLPPRtPO7+Kg4bT2YjIbcKv1wswocH0/G//Wlo1PatY4k1UkvqsPzz01j22Wn8Y7djT747mVGJb05mY+OpHHxxPAtGY3NGF5+NMsO0AWDvke98ydeS0cEQixjh8yNMmT9pJfUO0XA82VTyxWf9AECkrwvEIgZ1aj1K63omgL7lXD6e3X4J/7ftYo88PiHt4Sd9zRvmB7lEBJYFimup9Is4ntpGHSpNm2QRPi6Ya8qcPplRYdfrBnui4A8h3cQ3fB7g44w7J4QB4LJuGjR6nM2uglZvRKBSgYG+Lhjg3b3MH37S1zAzJV+8GaYgU71aj52mi1idwYhvTmRjrykoZQ/ZFSqs/Pw0CqqboJCKoNEb8b8/r2Lhe8dwOrMSAFcmsOl0DgDgwWkRYBimg0fkLnJCPJ1Q26TDr3bsaZOQ0xz8OZFR0SdOkvjgzyBfV9w8MggMA5zLqcaTW87DYGTB/2pTivt38MdgZIXabiepGHoji/P5bfv+vL2/eUre9oSeCeCdz69BciH3+/7pXL7FGQN8ydeECC+h9HHOUO4C39KpFXwQeny4Z5uvvXTLcMwZ6geDkcXJjEq88msKln56Gk9tOY93/ryKDw5l4IsWAUNHodUb8fqeVNz8wQkkmHo92bsUqru+OtH8d3h7Xxq2JeSjtkkHF5lYCE5MH8xl2VwqqEFNo30anNc0aoWyRD74zAvxdIKnsxQ6Ayu8p/Vl/Gs2OkgpfE4uEQubOWk91Lj6kGl66ImMChTW2P89hVwfGrV6pJk2GkeHeSLE0wlA2xYHhDgCfrPd310OF7kEwwPdEeCuQJPOgNNZlXZenX1Q8If0eX25J4nOYBQu9EM9nbFyfCgkIgbn82ow4pV9eOrH8wCAaYN8wDAMBvg4AwBqGnVdmjrU3Oy5/eCPSMTg3knhAIBNp3NQXq/B3V+dwb9+S8GazYl4cVdSr6fvZpY3YOXnp1FUq8ZAXxcc/dtsvH/HKPi5yZFb2Yi7v4rDx4cz8HtSEUrrNPBxlePm2MBOH1csYnDXRC7gttnMpLXeoNEbcMm0Mxzq5QSWBXYk9kzmiDUyTX2lBvm5IkCpwPgBXFZAbmUjvF1keGQGlzXFjzDuryobNNAbWYhFDOaY0n3PZbcO/pzMqMDJjEpIxQwkIgaXCmp75KL0uxalmKkl9RY3vOWDP3yjZwCYM9QfAFfaY0ljXz5AOW5A22w6uUSMb1aPx5G/zsLfbxyGCQO8EKhUYPwATyGQ8NO5fKtK1fqCT45k4LOjmdAbWaEUKq+q0eF+Dl5GWT2OpJWDYYDJkd7QG1ms35EEABg7wAsSUzZooNIJQ/xdYWS5wIE9/HqpGDoDi2GB7hh6zXAChmEwwtT02RH6/vDHSL5XES/KVPp1tQeOFeoWFyYsC+zsIxmljshgZHG1tL5Pn0v2JcmFdTCy3MVygFKBUC/uvJX6/hBHxI9554P1DMNgtmnz7KSds2PthYI/pE9jWRYPfRuP2W8fQUMfK2ECgOIaNYwsIJeI4Osmh5+7As/dMBTBHlwQoKaR6x0y23TR6SyTIMBdAaBrpV982VdHwR8AWDYuBM4yMa6WNmD+u0dxNrsKTlIxGAbYHJeHu76MQ1l97/UR+PdvKSir12BogBt+enQy/N0VWDwqGIf+OgvLxobAyAJv7UvDs9svAQDumxwOuUTcyaNyVowLhVTM4GJ+jZCe35uSC+ug1Rvh5SLDk7MHA+BKv+x9opnJZ/74uQLgJjzx/r0kBtMHcW9+/b3sq8hU8uXnJhfKCFv2/WFZFm/t47J+7p4YLgSIbF2+V9Ggwe+m5rfDArmL4Z/O5Xd6P7XOgDOmi8CZUc3Bn1GhHvBykaFOrReyWtpT26gTshPGDWib+cMb4OOCh2dEYuuayTi9fi62rZmCr1aNg5eLDMW16j41HrW8XtNho3ejkcVW0+/3lVuGY9MDEyCXiKAzsEK2pqP5+kQOAGD+MH98ds9YBCkV4Ie0TYxoHdSbZnp9882gexsfrFh6TdYPb0Qw9xpI6uPBH7XOgHTTsbRl2RfQ3PenJzJ/4nOqodY1Bym3J9j/PcVRvbAjCQvePYY/kizrx8eyLJ7bfgnrd1y6Ln/nF0yZsXxvOCHzh4I/xAHxA3YifFyFz40L594vE/M6Pnfqryj4Q/q0Y+kVOJhahuwKFeL74JQe/s0wxNNJKFF6eEYkTj4/B2dfmIsv7h2L9+8YhUUxzVPq+OiztdO41DqDUC7GXzy2x10hFVLtaxp1iPR1wa9PTsPXq8bBTS5BfG41Vnx2uksZQMW1TZj51mG8uTfVots3aQ04ZSrr+uDO0ULDTABwlUvw9vJYvLl0pHBhJpOIcLcpm8cSPq5y3BDDZQlttqCPiq1P5hJNF95jwjxx48hAOEnFyCpXCQ0T7SWj/Jrgz6ggTIjwwoPTInDjiEAMC+QuXHIqG/tkYNVWSkyZeQFKBcZHNL/h89kfB6+U4UJ+DZykYjw+e6AwCnRHYqFNM0R+OpcvTLB78SZuUt0vF4pa9R8yl8FzJrsKGlPp6GC/5pMXsYgRmrt3NrUiIY87dkb6uLR6/VlCLhFj+Tjud/K9hdl1pzMrcSK9oscunLR6I2775CRmvHUYb+9LM/t3isuuRFGtGm4KCe6YEAaRiGnuueaAkxCrVFqhh85D0yOhdJbivTtGg2+lc21/ND7I11lgsCcUVDciMa8GIqZ10LmlEcEeACBkTfZVaSX1MBhZeLnIEKhUtPpaVAD3erzaA8GfY+lctt+NIwLgLBMjp7LRLn9LR3epoAY/xXNB4DgLSzzSyxrwU3w+tpzNR6Jp6tX1hO/3MyqUO4aEevKZP44ZNCfXN35SY2SLITljTOXvyYV1DtF3ztYo+EP6tJbjhy/3wQwFfueZT4ttyc9dgQXRAVg8KrhV75qILk78yihrgJEFPJyl8HPr/ALukekDEenrgltjg7B77VQM8nPFnKH+2P3EVLgrJMipbERyF0p+dp4vRG5lI745mW3RWPO47Epo9UYEezi1unhtacX4UOx8fCrmDvXDK7dEw9vKC9R7TMGi3ReKOpzUVFqnxvx3j2H1hrM2uzDlG+mOG+AJV7kEN5gCfdviC1BSq0ZcVqVV48VZlsX3Z3K7daJfpdIKU6D4CXPuCim2PjoZ/7h5OADA21UuZKGl9uO+P8WmzJ9ApQJD/NzgrpCgUWtASnEdyurUeG3PFQDA6qkD4OemwOyhfvB2kaGiQSNcgHWXwcjiB1Pg5N7JAzA50hshnk6o1+ix93IxWJbF50czEf3SPnx6JLPVfYUpX0N82/TA4ke+H+wk+HNOKPlqP+unI3eZepkdSy9HXmXHu79FNU249+szuOfrM1jx+eke2Vk7cKUUBdVNYFngo8MZuOurM0JfJ95OU+nlzSMDoZByWYT8ayGz3PHGc38flwuN3ogRwUqMN/0dJ0R44aO7xuD5RUMxJqz133as6eQ2taSu14O7/AXziGAl/NwVZm/DT/y6Wlrfp5tu8u+R0UHubV5/fObP1dJ6YfKarfCT2hZGB+DGEdzmRk/1IuuvWJbFq79fEf6dXmZZkK5lkGjn+evvd86PeY8N5V6jIabgT0eZloT0VS0nffEGeDvDy0UGrcHYJ68texoFf0ifFZ9ThbPZzRfN9ijp6UzLzB9LRXSx6TPf72dogFunjZABIMzbGYf+bxY+uHM03BRS4fORvq7CVJjELgQYjqRyJ6VqndGifhL8xesMMxevLQ0PcsfXq8cLPXysMSHCC0P8XdGkMwilHtdS6wx45LsEZJQ14EhaOXI6uYi1BMuySMitAdB8scVnjmw5m4dJrx3EHV/EYflnp9tc1Lfn96Ri/H1nMh79LgGGLl5Q8M2egz2chAbB5gw3lTH056bP/KSvAHcniESM0PNmw8kc3PThCWSWq+DlIsOjph5IUrEIi0dxWXP8xVZ+VSN+u1SEL49l4dXfUvDXbRetOh4dSi1DYU0TPJyluHlkIEQiBsvHhgIAtpzNx993JeO1PanQGriG5y0vJPlpXi37/fCmD/aFWMQgo6yhw6AMnzVprt+PJcK9XTB9sA9YFthyruPsn/jcauhN6z+XU43bPzmFZ7dftOnF8Y+m1/i0QT5wkYlxNrsKN75/HFmmoE6T1oA/krgSu9vHhAj3c9TMn4LqRnxr6hf10PTWjfBvHBGINTMHtjm2+rsrEOLpBCPLTbLrTReFzAGPdm/DjU2XwWBkLe59ZQ/8hcG1/X4A7nUhl4ig1hmRZ8ML49I6NVJL6sEw3Gucf0/5/VKxRRsuhLM/pbTVOST/vtiZlqWSv10qvq4yA8rq1SiqVYNhgJGmvlyhXtz5LWX+OA61ziBsAPYHRiOLjw9nYNyrB7D7guU9NVmWbQ7++DYHfxiGwWjT+1NXroMcHQV/SJ/1ielimS9b6UqWSk/j3wz5tFhLdPUChJ++cG3zzK4YbdolPm9lSnNto67VWPM/UzqvoTfXrNbWGIbBA1MjAADvH0xHRUPr0bssy+KFHUmtJv0cTet+/5K8qkZUNGggE4uE8cWTI72FRqBiEYNgD+7E6Y29qfj+TOdlaV8dzwbA9YixJmOoJf4kd2A7mVa84abywf7c96dl5g8AofH1zvOFKK/XYIi/K7avmQwPZ5lwH/5i68+UUkx74xCmv3kYT/xwHv/54wq+OpGN7QkFeOz7hE4vxFiWxcmMCrxuyi5aOS5UyEJZNi4EDAOcza7CD2fywDCATCxCca1amEaWX9WIzHIVxCIGU0wNi1tSOkmFLJBDqaVm16DRG3DR1FdlnJlJX5a6eyLXRH7rufwOy0X5QMPNIwOxwvQzbo0vsFnj4YLqRhw3ZWT997YR+O2p6Rga4IZKlRaPf58Itc6A/SklUGkNCPVyavUzR5pq/rPKez74ozcY8cimeDzz04UuZxlq9UZ8ciQD8/53FBUNGgR7OAlZIJbgf/beLhfij7OxHQR/GIYRNiH+vjOpw4xNa7Esa7NsIj4L4tp+PwB3fB/szz2nbNn3h8/6GRGshJeLDBMGeCHUi8sU3G/Bey5pnvQHAPdPHQAAqGjQdjpog2VZnMnmMn8kIgY1jTocTrVNBqgjSC/lzh0GeLvAVc5tHPGZP2X1mj6dpUearfwiDjPfOtxppq4jqG3S4ZHvEvDWvjRUNGjwzv6rFm8mldVr0Kg1QCxi2lyn8aVf1l4H9QcU/CF9UkpRHQ6llkHEAG8vjwXAjZmsbbTdCaItdFT21Z4BLXr+WHNRcKW480lfluJLBKwtyTiWXg6DkYVcwh06Dl4p6zA7JbdShewKFSQiBlMGeXd9wRZYPi4U0UHuqFfr8cae1v2IvjqejR3nCyEWMZg3zDQlqYtd/rPKG4SdQP6iKibYXbioF4kY7H5iKo4/Oxup/74BJ5+fg8dnDQQAvLgrucOR9Am51UK9PQDsMWUvWIsvaxnk20nw53rK/DEFfya0aIx788hA7Hx8KiKv+T0ND3JHdJC70BxYImIQG+qBW2OD8MiMSAQqFcivasIHh9Lb/b5JBbW444s43P3VGWSWq6B0kuLeyeHC14M9nDB9MBcQVUhF+PyesbhxBFcy+JupMTRfdjY61ANKJynM4RtUH0ozf4FyqaAWWr0R3i6yVmnP1po3zA/+7nJUqrTYd7n9C1C+5Gf+cH+8uSwW95kmD/J9N7prW3wBWBaYMtAbYd7OiPBxwaYHJsDHVYbUknq88stl/Gwq+bp9dIhNSm674mRmJfanlGKHKchorWqVFjd/eBxv7k2DWmfEhAFe2PTgBEjFlp+2jTUT/GFZFodSS3us4b/OYBQ2ajoK/gDAizcNh7+7HFdLG/D45kSLptZZ4j+/X8HIV/Zb3OOlPRfza5BSXAepmGl13GhpSA9M/OLfm/gNE5GIwe2juYD05rjc67IJsbV+OJOL7AoVfFxl+L8FUcImTEYnJZ+Z5Q2oaNBCLhHhHtOx63oq/eKzJwe2yJLwdJbCRcad3xTWUPZPX2cwskgqqEG9Wo/3Dly193K6Jau8Abd+dAIHrpRCJhFBIRUhr6oRZy3cGOU3ekI9nSCTtH7vHB3mAeD6bPpMwR/SJ316lMv6uXFEIEaFeghlVZeL+1b2T34XMn/CvJwhYgCV1mD2ouBIWhmOp5cLkW2dwYh//Zoi7JyPMJN+bq3YUCXEIgbFtWphVL0l+Mayd00Mg7tCgkqVtsOSAn4Hc0y4J9wV5i9ebUUsYvCvxTEAgG0JBUjIrYbRyOKTIxlCX5cXbxqG/1swBADXlNbadO4/koox552juPmDE8ivakS86aJq7DUZFQqpGKFezsKF2t8WRuGeSWFgWeAvP11o983m6xNZAJqzw/ZeLulSuUzGNZO+2sNn/qSW1Dvs+OvOFNdxz28+82dMmAf+Mm8IXr99BD68czRc5ObL4t6/YxSevSEKmx6YgIsvL8DutVPxwZ2j8cKNw4Tn2ZfHsoQJfC01aPS466s4nMmugkwswuopA7D/LzOEHVTe+kVDsWRUELY+OhkLogNw00iuOe6eJO7vfsyCrDl+5HtcZiVU1/R2SS+tx7ofLwAAJkV6W1Qu2h6JWCSUUB1uJ2tOrTMgxXThzweYV4znytv2Xy7pdhq6wcgKpXgrTY8LcP3V3r9jNBiGKwnjf2+3XzNpim/4WFKnbvO7srXd55tT09MtLDdpaXtCAa6WNsDLRYZ3lsfip0cnYWAnwdxrjTVNNDmfVyME6X+5WIQHNsbjto9PtemTxGNZFv+39SLm/+9oq7IZS3A9fIxwU0iEEuf2BHk44etV4+EsE+NERgX+vjOp24GNapUWm+JyoTUY8Z/fr3Tr8b4+wWVh3jIyCH5u5nsXRdl44pfByOJEenOpNG/F+FDIJCKcy6nG7gvtbyDY0qnMCrx34Cq2nM3D4bQyh7nw1+gN+NiUOb5u3hC4yiVChhaf2dKeOFPJ19hwT9wxgTvGHEot6zRjqL/INF0st9wQYRiG+v44kNomnTAFcueFwlYN6Q+klGLVN2cd5u/44q5k5FY2ItjDCT+vmYLbRnPv6dviLQvI8hs9A8xsfMWGeEDEwOrroP6Agj+kx+VXNeLjwxlo1Fp2sl1U04TfL3EnN4/PGgQAiAniAh6XC/tOhoJa1xy84WuiLSGTiIRMoaxrdqCPXS3H6g3ncO/XZzHr7SP47Ggm7vgiDt+c5E5C184eaLb3gLWcZRIMNWUQJZp61nTGYGRxxHRRtWB4gDC+/s8U8+UmQO+UfLU0NtxTKNn5x65kPPjtOby5Nw1GFlg1ORyrpwzA0AA3+LnJ0aQzID7H8og/y7L46BDXgDy9rAG3fXJSCIbxF1ntYRgG/7o1BjdEB0BvZPHJ4bb9f/KrGrE3mcuo+OCO0XBTSFBapxFKgKxhafAnzMsZLjIxtHpjm+dif2A0siit5V6jfOYPwzB4et5g3DEhrMNgyCA/Nzw+axBmDPFtEyCaP9wfC6P9oTdy5YTXBugOp5ahXq1HiKcTjvxtFl65NRr+ZhrfDgt0x3t3jBZ6K0wf7AM3uQQldWqcya7CyYy2I96vNdDXBWFeztAaWvfgisuqxNJPT6GwpgkRPi54ftHQDn5TluFLzC62M8nuclEtdAYWPq4yIWAfHaTEiGAldAYWO89bXqtvzomMChTWNEHpJMXC6IBWX5s6yAdPzRks/HtcuCfCrwk+eDjL4OXClff1ZPZPk9bQKjsqvQuBgeOmv+XjswZi6diQLgXuogLc4CqXoEGjR1pJPViWxZfHuQBzYU0TVn1zFvVmyq1+vVSMnxMLkF7WgDu/jMNXx7MsDqJczDdl/YR4QCTqfM0xwUp8dBc3tWxrfAF2WdHPwZxtCc1liUmFtR1mqXWkqKYJv5syLx+YFtHu7YYENDd9toXkwlpUN+rgJpe06pkU7OGEp+Zw50P//i0FNY09G4wwGFk8uikB7x1Ix/odSbh/wzlMf+OQQ5QI7zJl2wUqFVgxjgvg8FmwnTV95rPFJkZ4Y2iAO4YHchmgv3UxC9fR8FnDkddcLPPnuPnU96dL9iaXIMPChuPdVaVq3lRmWeB/+7nsnzNZlXj8+0QcvVouBLZtpaC6EZtO59i0J9mlghqcyqyERMTgp0cnYUSIEstMvRL/SCq2aJBBdgX3fDaX9ewilwhtNFpeBxXXNvX78kYK/pAe98LOJLy1Lw0fHsro/MYAtsbnw8hyo2v5spSYYO6/fanvT4Gp2bObXNJuSUZ7Bni3HfduMLL47x9choqI4frJvL4nFQm51XBTSPDFvWPxt4Xdv4Dj8TvzlvaDuFRQgyqVFm5yCcYN8MT84VzGQXvBH42+ecR7bwV/AOD5RUPhppAgpbgOh9PKIZeI8ObSkXjl1mgwDAOGYYQdVT44ZYkz2VVIKa6DQirC0AA3VDRohX4y12b+mCMSMfjbDVEAgIOppW12Xr49lQMjyzWxHRGixHxTedofSdZdvDRq9cIObcvU7fbWNKwf9/2patRCazCCYdDuzn1XvXJrNFxkYiTm1eCHs62bIO9J5i4UbokNQpCH5YFhhVQsvK7++8cVNGj08HKRCcFvcxiGEUq/DqeWwWhk8e2pHNz39VnUqfUYG+6Jnx+bYlVpanv4IFVmucpsjxa+dn50mGerYAWf/fPTubxuZWL8ZGo2fdvoYKHMsqWn5g7G9MFcb6Q7J5hvHM9f1PRksPPAlVKoWpwEX7Uy80etM+Csqe8IXxrYFWIRI6S2J+RWIT63GsmFdZBLRPBxlSO1pB5rNie06uFUp9bh37+lAOCmoxmM3MSkJ7act+iEmA8M8tO8LDFnqD8emcGVxh680vVebEYji+9NU/WGmDI93t5/tUuN8789lQODkcWkSK8ON1z4zJ+sclWHvbAs0ajV44ODXCnplEHebUr8HpkxEEP8XVGp0grnCj0lq7wB9Ro95BIR5gz1g5+bHEYW3Q7O9TSjkcUXx7gA5wNTI4RSDz7zp6Omz1y/Hy7zZ1Ikt6HDZw/uTLw+Sr/4Mplr+wWGCOPeHSNjpC85klaGNZsT8PCmhF4p2axSce/NbnIJGIbLIN91vhCPfJcArSnD+8+UUput5WRGBW7+8ARe2n0Znx21bLCJJT43vY5vjQ0Snn9jwjww0NcFTTqDkCTQkWwzY95bGhPuAaC59Gvf5RJMff0Q5r5zVMgg7o8o+EN6VGFNk7AbvS2+40ahABcA4ac1tTx5jzadfPWliV/5VdwFdoiXs9W7snwUuuWUk58TCpBaUg93hQQnnpuD124fgZEh3Fjf356chgXX7HR317UHvc7wWS7Th/hAKhZh5hBfSMUMsipUZk+oEnKq0ag1wMdVLpQX9QYfVzmeu4ELkkX4uGDX2qlYMT601d+ID0ZZc3Dnd0puHxOC7Y9NwWxTNkakrwt83SwbTT/Q11WYmvRdXHPz53q1Tphg9KBpl5kfGb8nqdiqN2n+5M3TWQpv187X1Z/7/vD9fnxc5W3qvbsrUOmEvy7kgnnvHUgXLowbtXqhQeiNMZY35+XxDX35Y8OMwT6dZlDwwZ8DV8pw55dxePmXy9AajFgUE4DvH5ooZLt0l4+rXMjoSSpoeyzmjyV8wIF3a2wQ5BIRrpY2tOppZY3SOrUQaG5Z8tWSWMTgq1XjsGvt1DYlXzyh4X4PNn3mp5GEe3MnrBmdlJpcKzGvGmqdEb5uciGI0VV8YDo+txrfCMewYGy8fzxcZGKczOB2gytNTfLf/fMqyus1iPBxwR9PTcc/b42GRMTg90vFeHn35U6/38WCGgCd9/u51owhXNCuO82pT2RUILeyEW5yCb57cCKUTlJklDVgVycZZ2V1atz60Qnc+UUcUorqoNLohYDuQ9MiO7xvoFIBN7kEeiOLrArr/s4texzlVzXi9k9O4WBqGaRiBvdNHtDm9jKJCP+9bQQALkuquz2NOtJyytk3q8fjn7dGA+B23O3dc6ijYN7B1DJklqvgppAIZVsAl8kJdBz8ya5QobxeA5lEJDx/b40Ngojhepm1l/HYXzRpDSgylb9ce7HMH/cLqijzx1pbTf3usitUQmZVT+Izf4YEuGGJaXLpup8uoLZJh9hQDyikIhTWNAl9RLuKZVlsOJmN+745ixpTP9btCQU2meyZW6kSel4+MrP5GMwwDJabsvm2WlD6xZcxRviYfx9t2f+0okGD9TuSYGS5a9f7vjmLZ7dfRG1T3+o1awsU/CHd0qjVd9ik8ecErjknwE1aOHCl/RIhgGtwWlSrhodz67R+fuc7q0LV470aLFXQhTHvPH7i0Pdn8rDxZDZUGj3e3p8GgNu9DvJwwp0TwvDLE9Owbc2UNuULtsAf9C4X1Vq0o3vY1FB2dhR3oemmkGLyQO6E3Vz2D59VM2NI5xevtnbPpHDs/8sM7Hl6upDZ0tK0QT5gGK7XDR8g6EhupUp47j4wdQBc5RJ8ed84vLlsJD66c4xVa1s9ZQAA4Kdz+WjSGsCyLF79jcvyiPR1EQJTM4b4wkUmRlGtWpjWZAlLS754vTXx6+CVUjy3/ZJNp/p05tpJX7Z2z6RwBCkVqGjQCCVNR9PK0aQzIMTTSchYtMb0IVzpF6+jki/exEgvOMvEqGjQ4Ex2FZykYvzz1mh8fNcYsxky3cGXopgL4vCZP/yxhad0kuImU1Bry9k8/JlSiju/iMPYf/+JSwVtH8ecr45nQWdgMS7c0+xrmieXiDEq1KPdgDzfy8LaC3VLVau0OGI6Vv5lHtdf7GpZvVUXzCdMDX+541T3jp3jTCWpx66WCyVQ90+NQEywEp/eMxYSEYMDV0ox/91j+PhwBr49lQMA+Oet0VBIxVg1ZQC+Xj0eIoZr2s1vzpjTqNUL5U8djXk3Z1Soh9CHrqu9ZTabAupLx4bA312Bx0yN9t87eLXdjSe1zoCHv0vApYJanM6qxC0fncDqDWdRr9YjwsdFCKy2h2EYofQrzcKmzyzLYs13CRj89z2Y9N+DWPn5aSz++CRSS+rh4yrDlocnYaqZ6X4AMG6AF+6ayG2M/X1nUpvzr+/icvHIpniLy+zbw28G8FPOZkb5QiEVoaC6SQgM2UNyYS2mvXEID2+KN9un7otjXObB3RPD4daizyD/flhcqzZb6gg09/sZHeohHDf93BVYbLqAfn5H6993VnkD3v3zar+5OMyuUIFlueP1tRsGfOaopZk/+VWN+OxoJu79+gze3pfW58pobBGgsES1SosDKc3ZjIdSuz9ltjOVpv5Uns4yrJs3GBLT+XeYlzO+WTUO0wZx5xQdtWxo0Og77ANpMLL4+65k/PPXFBiMLG4fHQw3hQSFNU02CUp/eTwLRhaYHeXbZsLx7aODIRYxSMit7jCYVlTThOwKFRimeZPzWsJ1UGEdntt+CVUqLYYGuGH1lAHCpNJ7vjpj94C3rVHwh3RZaZ0ac94+ihlvHjZb725s0ZyTf+P94Uxem9u19NNZ7sTy2rR+Xzc5/N3lYFngSh/JUOhKs2fejSMCsGYmd2L6yq8puPfrMyir1yDMy7nVRKCeFOblDB9XGXQGFpc7Kacrq1MLmQizoppPhvkSFXPjZ6+dWNLbhvi7tXvh6+kiQ6yphIWfqNSRjadywLLcz8LvIErEIqwYF9rum0p7ZkX5IczLGbVNOuy6UIjv4nLxU3w+RAzwyi3RQqBMIRVjjqn0y9KpX0Yji59N6en8FJrOtMz86ak3OLXOgGe3X8JP8fnCKPveUGLaxQww02/HFqRikdAP5MtjWTAaWfxh6tt044jALl24yyVizI/2F/5tSdmPXNJcLjY50hv71s3AqikDeiToyl/UX7sLXlzbhOJaNcQixmzJD1/6tTW+AA9visfprEpUqrRCiUZHqlVaoZxnranvSVcJmT89VPb1e1Ix9EYWwwPdsTA6AAwD1DTqUNFgeY8WPlt2WjsBAGuMCuOaWlY3ck1Apw/2EY4NM4b4YsfjUzA0wA1VKi3e2sf1R7tpRGCrZsMzh/jimflcIOsfu5Pbfb9ILqyDkeVeb+Z6XHXEWSYRAg3xFk5yaam4tkkI0N9tCo6smjwAvm5y5Fc1mZ02x7Is/rb9Ei7m15g2nPxhMLI4Z+oF98BUy15DwsQvC/v+7EgsxF5TII7v71Wl0iIm2B2/PDEN4wZ03EPuuRuGwttFhsxyFX5sUXJ6tZSbdrc/pbTDCztL8H9jfnPAWSYRNn74slZbsfRCvLCmCQ9sPIfiWi4L8P2DractJuZV41xONaRiRhjvzlM6SeFnytBtL/uHH/E+KbL1ZNK/3zQMHs5SXCmuE96/MsoasOLz03j/YDre/dO2E5UOp5Zh4bvHOs10T8yrRm5l2+PYS7uTceP7x1HRYN2UQT4gPtDXpc17F7/J2VnPn9I6NZZ9egrT3zyM1/ek4nh6BT46nIEbPzjeraw+W8kqb8BD357DwL//gT96oY/T7guFQqkVACEruCfxzcm9XWQI93bBXxdGYVSoBzbePx7ernIs4Fs2XGl73n4+rxpPbTmPUf/cjwXvHjM70EWrN+LpH8/jhzN5EDHcIJV3VsTillhuYAV/3ddVFQ0aoaHzo6brpJb83BWYZXp/6qjxMx9oGxPm2W72c7i3M7xcZNAajELW5TsrYvHKrdHY9uhkuMolSCqsbdVPsT+g4A/pspd3X0ZJnRrFtWqs/Px0mzeqszlVyKtqhKtcgk/vHgOG4dOyzZ90l9drhJO3O8a37dcQzTd97iO9SZrHvFuf+cMwDJ67IQpPmi5k+PHIz90wFHKJbXfqO1rDaD7lsZOmz/xO9sgQZasSp3nDuJPBi/k1rZqvNWj0whSkydecSPUVlvb9qVPrhN3uBzto/GkpsYjBfaYA3/sH0vHPX7n+Gs8vGtrqggsAboxpHv1tSXO7L45n4Xh6BRRSkZBh1Jkh/m4QixhUqbQoaWf6T3ftvlAo7EZtjsvttV3Ans78AYA7JoTBTSFBVoUKv14qwiHTMWxRTNfLNPmxzuMHeMLHgtI9APjvbSPw82NT8P1DExHm3f3+Pu3hyyEuXpOxw2f9DA1wg7Os7QS1iRFeQg8qpZMUK8ZxP+P+lFLUNna8c77hZDYatQZEB7kLJ31dxa8hq1zVI8FOvuRryeggOMnECDPtmHfWaJZXrdIKgfZpg7sf/HFt0dQS4PqgtDQyxAO/PDENz8wfAqmYgdJJihdvHtbmcR6fNQhzhvpBozfisc2JZrMd+IBgbGjXhhKYG01vqS1nuV6BEyO8MNgUjHGSiYX32A8PprdpRvrBwQz8erEIEhGDT+8ei8/vHYcN94/HID9XDAt0x1LT8IDODAvkvh/f7LojFQ0a/Pt37pj/1NzB2Pn4FLy7MhZvL4/FtkenWNQjTOkkxbp5XHPzdw+ko06tA8uy+JdpFx4ALlmRLXotlmWF86zoFv3GmkuRS2z22nlu+yXE/ms/EnI7DvjVNumw+puzKKvXwN+dOyZ+dDgDJ00XZXVqHd7ex2VP3zY62GzwsaO+PyzL4owp82diZOvgm4+rHC/eNBwA8N6Bqzh6tRx3fxUnBHR/TiiwaUb62/vTkFZaj+/P5LZ7m4yyBiz/7DRWfH66VTZSSa0a38XlIqW4Dp8dsa7/SpaZSV88vudKlUrb4c/6wcF0xOdWQ8QAUwd5468LhsDPTY6schWWfXYKn9uwJ4w1VBo9/v1bCha8ewwHrpSBZbks1J623bQZx5+Pncup6vHsZ/5cy8uVC3ismTkQu9ZOFf6uc4b5gWG4YD0/5aq4tglLPz2F2z45hV8uFpnKWFVY+ukpvLM/TcicbNIa8Oh38fjtUjGkYgYf3TUGD02PBMMwwrCVP5KL282us8SmUznQ6I2IDfXAxAjzgXB+M2lzXG67QU6+VUVH2ZsMw2BMizL1p+cOFo554wZ4CT8TnxHbX1Dwh3TJ3uRi7L1cAomIwdAAN1Q36nDnl3Gt+sfwda43jwzEYH83zDDtYG85az5tfHtCAfRGFqPDPBAV0DZrIca0K9gTfX+SC2vx+dFMqy5K803pr13J/AG4g87/LYgSSgMmRXrhxhG27evTmZb1rh3hRzvPjmp9EA1UOiHYwwlGFrjUIhPgUkENWBYIUirg10NZF93FZySdSK8w20Mgs7wB7x9Ix5KPT0KlNWCwn6vQTLa7lo8LhZNUjJI6NQxGFktGBeHh6W17S8yK8oOPqxyFNU14ZFN8h6PpE/OqhZPfV26JFi6AOqOQioVJKD0xTY9l2VaTJapU2m5PfbIUX9IXoLQ+QGspV7kE907ignkv7kyGSmtAkFJhddlLS9MG++DHRybho7ssLyl0kUswNtyzx0sso4PcIRYxKK3TtCqZTMw13++HxzAMNt4/AZ/dMwan18/BG0tHIsrfDVq9Eb920LixXq3DRtOJ19rZg7pdBhXm7QwRwwWoy63cGe9MdoUK53KqwTAQdkEH+7W94CytU+Po1XKzF9CnMivBstz9rM2eac8405S2SB8Xs5mYMokIT80djFPPz8Wfz8xAoJnXi0jE4H8rYhHi6YS8qka8fyC9zW0udLHfj7BOU4maNVMYAS5zZJvpfOOeSa0zZ+8YH4YQTyeU1Wuw6XSO8Pn9l0vw7gEuY+M/t8Vg8kBuk2J2lB8OPDMTfzw1zWwQ0xy+ROtMdmWbix6jkW1VPsFN6tJhWKA7npwzCKPDPHHb6BAsGxsCJ5nlGz93TAhDpK8LqlRafHYkE3+mlLbane6snLKsTt1uWUdxrRo1jTpIRIwQMAG4iyiZRISsChWuWtnHypyE3Gr8FJ+PerUeazYnorSdzQet3ohHv4tHelkD/N3l2Pn4VNwxPhQsy/Uy2XouH/PeOSpMBnpkhvk+TYM76PuTWa5CSZ0aMrGoTdkqACwdE4xpg3yg0Rux6puzKK3TYIi/K8K9nVGv0dvsPS2jrF4IvJ3Nbj8gtu9yCQxGFqV1GhxNa97A+j2pWGi1sPlMLsrqLd/QESZ9mRkUoXSSwl3BvR7y2yn9atIa8MsF7li+8f4J+P6hSXhizmD8+ZeZuH1MMFgWeGtfWrdLErvi9T2p+PpENvRGVphaeTa7qkc3oq4U1yG5sA5SMYOn5g5GpI8L9EYWJ9N7Noukig/+OJvPdvFxlQvP8QMppdAZjFj7fSIScqshE4uwdEwIfnpkEhaPCoKRBT48lIHol/diyN/3IOaVfTicVg6FVISvVo0XehQCXLnkQF8XqHXGLmdV1TbqsMH0fr9mRmS77/fzh/ljRLASDRo93jvQNvOuSWsQjodzh3Vcustn+sWGKIWKDB5fiXEwtQx5lf2n2TkFf4jVapt0eMnU+HHNzIHYumYyxoV7ol6tx11fxuGjQ+moUmmxxzSlaLlph5dv4Lw9oXXjZ6ORRXppPX40TXK500zWD9Ci6bONM39YlsXTP57Ha3tS8fqeVIvvxzd87u4UnafnDcbhv87Ctw9M6PaFjbX4iHdiXnW7O3lavRHHTW9Ws81E0Ee3eAwe3w9ktJmTqL4iNkQJT2cpapt0OHhNL6rPj2Zi7jtH8e6Bq8gqV0EmEeGFm4bZ7O+jdJIKDWljgt3x+tKRZh/bSSbGN6vHwUUmxqnMSjy95YLZQFVtkw5PbTkPvZHFTSMD222I255oU2+apB4IrJ7IqMDV0gY4y8R4ai63W/31iexeqaHujcwfAFg9dQBkYhHqTTuii7pY8tXSpEhvm13825KzTCKUubTs+3Oef82Htv+aD/Vyxg0xgXCWSVrtFP58zSSdls+NzXF5qFPrMdDXBTfYoOm9XCIWdrGzbNz0+c293PvHrCG+QgCFLxNtWRK09vtErPrmrNkSSKHky0aBZoALiIwK9cArt0Z3GBz0dZN3OBXPw1mGV5fEAAB+OJsrNInm8Zk/o0wltdbiM39SS+osynTknc2pQnGtGm4KCRa0KJkEuMDWOtMGy6dHM1Gn1iGvshH/t+0iAOD+qQOw0sw5hzWv34G+rojwcYHOwAr9mgDuuDz9zcMY/a8/sfb7RPxvfxp2XyiCiAHeWDqizTQva0jFIqxfxGVofX0iW8ggXWj6+ZML69oN7hxPL8eE/x4U7nMtPvgwyM+1Vem0m0KKGabnZXdLv1iWxet7mqebltdrsGZzgtkNjl8vFiEuqwqucgk2rJ6AIA8nvHxLNIb4u6K8XoNnf76EMlOT8u8enCi85q7Ftx9INxP84UtVpgzyNlsuzjAM/nNbDBRS7m8W6eOC7x+aJDTm3nQ6xybvaXzwBOACUte+xngte8e0PH7+cpG7v0wsglpnxOdHOy+r5QmTvsxk/gDA0EC+LNN8cHbf5RLUa/QI8XRqVbKqdJbineWxCFQqoDeyXW763x18Sd+rS2Kw9dHJ8HOTQ6M3CpsWPYF/Ts0d6g8vF5lw/tzTfX+E4E8Hgx6aWzaU4q19aUjMq4GbQoJ9f5mBd1bEYmKkN96/YzQ+ums0PJ2l0BlYaA1GGIwsvFxk+O7BiW02Erj3dO7cs6ulX1+fyEK9Wo8of7dWfV+vJRIx+PtN3PHvhzN5SL+m5PZ0VgU0eiOCPZyEiYztuWdSON5cOhIb7p8AyTXH5NZDWnK69DP1RRT8IVZ7fU8qyuo1iPRxwRNzBsFdIcWmBydg5hBfqHVGvL3/Kma+eRhNOgMifVyECPPcYdy40IoGLe78Mg6rvjmLu7+Kw5hX/8T8d48ht7IRLjIxbhppfkIOP241vbTeptH6y0V1Qkf4jadyhDTijtQ26YS09640fL5WhI9Lr5V7tTQyxAMS0y5+e00243Or0KDRw8dVhpFmRt42Zw/VCJ+7YPr/7mQ/9DSJWIQ7TAHJb042X4RVNmjwnmlXe/pgH7y9PBbn/j6vTdZTdz2/aCheuWU4vr1/QodNeUeGeODL+8ZBJhZh7+USPLP1ArJMO3Qsy+L3S8VY/NEJFFQ3IcTTCa/dPsLqwMOIYL6k0vbBH37C0IpxoXh4egRc5RJklDV0Wm5nC3wZW0APB3/83BStpkv1dgZfbxtlKuvhT+K1eqMQOBwTbnnAd/HoIIhFDM7n1Qi7zodSSzHxvwcx4pV9WPDuUXx8OAMA8NisQTbLauqJvj9ns6uwJ7kEIgZ4flFz2RQ/rSvdlCmRX9WIeNMFx5v7UttMTTuRwb0ubJVlyK3BDbvWTm1TVtoVM4f4YkSwEmqdsdVxs6JBg4LqJjAMEGPFmPeWApQKIZPUXK+J9vAXvDdEB5h9H71tdDAG+bmiplGHTw5n4vEfElCv1mNMmAdeuLFtiVtXtJy4x9t9oRCFNU2o1+jxe1IxPjjEPZcfnBaBkV0MkLU0b5gfJkR4QaM3orCmCX5ucry9PBaucgmadAazQQ4AwmTJn87lC/1BWhL6/ZjpZ3eDaYIhv7nXVQeulOFcTjUUUhG2PDwJ7goJzufV4JVf2k6U4wOi900OF9bkJBPjo7vGwFkmFjIr9jw9XcjgMqc5+NP6QlGrN2J7Qtsps9cK93bBR3eOwbKxIfj+4YnwdZNzGVtSMa6WNghj4ruKZVnsMgV/+EPdOTOBliqVttVm28ErZahp1CKvshEX82sgYoDXl3JT4TbH5aLMgnJulmWF84qBZjJ/AGC6KaBzop3MlW2m3+GysSFtjtUMwwi9rKzN7OsuvcEoHOtnRfmCYRghONVTvVx0BqMwZZDfAOePEYfTynu04XTVNWVf5vDBn5MZFULfvbeWxQrvjbybRwbh9Pq5OPHcbJx6fg7i1s/F6fVzhKE117p9TDBEDPe8TSrghslYGhStVmnxzckcAMBf5g/u9P1+UqQ3Fgz3h5EF/vvHlVZfO2g6Ds8e6tvp+bBCKsaK8aHtBsuuHdLSH1iW09of6FSArvcvrvub5MJa7Dp7FU4AXr91BBRQAzrAmQE23jMcvycV4829qSivb4ATgJWjQsHouVQ5KYD7xvng48OZSMltvWvkKRVhZIgSd08Ih4tIA+ja7nYEObMIdDagplGHtPxixHaww2yN3xPS4QQ15BIRNHojXtwWh11rp0HpJG1zW5Zlsf9yKd49kAYnqOHtKmt3vY7AiQHGBslwqaAWp1LzsGJc24yREyk5cIIa8wd5Q2RoBK459o0LlsEJalzJKwar5U4eruQXwwlajAmScq+9PmrVOB98dywFF7OKkJJXhOGBSmw6lgboVBgf7I5N90ab3ji0gM7yhq2WcBMDqyf4AdADuo53uaeEO+Hj5UOw7qcL2H8hC/svZCEqwA1SMYNkU6lWiKsUn66MgrvY+rWO9JfACWpkFJba9O+VWdaAuLR8ODPA/eN94SbW4t6x3th4Khebj6dgVqTtp9jxWJZFTW01nGBEkLOhx5+Hj0z2x97zmQjyUGB0gKxPP++7a0ygDLugRmpeMaALxfGUUoj1jQh0lmKAO2vxz+6nAOYPdsXRtHL8cu4qpg/2xTOb46ExZYbmq7njyWAvBRZHK232O43yZnAWauSXlgG67vckMxpZvPlrApygxopxIYjyFglrHeIlghPUKCgrB3Qq/HkxC04wXYwZgGd/PIXta6bARS5BXqUKFVXVcBMxmBii6JPPIQbAU9OD8NSPpdh2Kg2PTA6A0kmK709chRPUiPRx7tIxiDclTIHfaqpxIasQ0wd0nlWrMxhxOCkbTtBhSYz554gYwLOzQ/D0Txfw7VEuuBDoLMXHy6MgNTYB7Q+1sdj8wS744YQacWm5MGgGQsQAO85wv5P7JofDRS7G0avlcFdI8ZdZwTb52zIA/rEgDCs+5wIGf18wGG5iLcYFyXAmuwGXc4swzKf1ea9aZ0Bcah6cYAAMwO5zV7H6mj5QGYWlcIIasf5t37/nD3KFm0iDvFI1sotKEdFOlkhH9AYj3t9zHk5Q45HJEZgYqsBHy4dgzeZE7DqbjrkDXTHPdHHKsiwSMvLhBC1mDHBqtZ4hXiIcfmo8wMCUJcmdj7ZnsCcDJ6hRWa1Gk6pOKLM7lFwCVUMdwtxkmDPQpcO/zbzBrpg3eBAAI6BTQSkBlsd6Ylt8AX46mYpJoaOs/n3wLuVXo7yqCl5SMRYM98fui0W4kFWAG6JaZy4cTymEglUjKsANLMviamkD9pzPRF2TDk5QY0qkN26L8cD2OAXO59Xgq8NJeOHG4R1+77JaNYxaFVxFDMLcYPZ3MCPCCZ9AjcTMAujVQ1plSRRUNSIxoxDODLB8pKfZ+08OlePARTUuZRcCuqCu/ZK6IK+8ARJDE9xlYgQ5c3+3GRHO2HNejfj0fGCudZnSljiWUopGVR1CXGWYGeFsOqeUw0emg6pBjZS8YmFD29YaVbVwggY+Ml27z+WBHgyG+4qQXc5dn62aEs49z8zcXgEgxBUATEEc1tDu68zfiXuNHL9agRUfHQDAZaE9Pntgq+bNeoMRazYnoFFrwL8XR2Ognxs2HE2DQdOAMYFuWDjE/FqutX5eKE6n5iEuLR+nrvhjyiAfsCyLU6m5cIIG8we5dvtYOyvSBUO8GORXNeDXhHSz10l9hoU/K8P2t/ll16irq4NSqUTtq0AfzKAnhBBCCCGEEEII6ZI6NaB8EaitrYW7e/uTiKnsixBCCCGEEEIIIaQfu37KvtYUAR1EwUjnXv3tMr4/k4+pg7zx1arxdlvHCzuSsPN8IcaEeWDzQxO71Vj17zuTsCOxEMvHheBfi7lGlp8fzRR6vrQkYrh61ifmDO6TjVi7imVZzPvfURTVqPHRXaMxd1hzw8zv43Lx6u9XMH6AJzY9OLHdx3hrXyq+OZGD5eNC4CITY+OpXNwxIQQv3xLTGz9Ct638/HSr0bj/WxmLRTHme0/1V3/bdhG/XSrG03MHYc2sQd1+vNSSOtz28Sm4yMSIe2FuqxTxpIIarPg8DhIRg31/mWHReGNrHUkrxWObz2NogBt2rp1q88e/3t371Rmhd42LTIxtayZ3qQQEaH5viQpww7cPTDBbcmtLGr0Bt350AnmVTbhvcjjWd6Hvy/m8anx3Ohd/ppRCb2QR5KHA709NN9u/a83mBBxNK0eErzOyyxtbHU+/PJaFjw5lYNwAT8yP9se8Yf7wcZV3+2fsaXqDETd9eBx5lVyvuHsmheGFG7vfFN9gZDH5vwdRr9Hj58cnY3hg+6URTVoDpr1xCI1aA354eAJGh5nvQ9FbfrlYiOe2JyHCxxnl9Vo0aPTYcP84TIq0Xf8mSxTVNGHuO0chETE49+I84Tmp1Rsx7Y1DqFfr8d2DE/DLxUJsiy/EzSMD8dbyWABAXFYF7t8QjxAvBf78y6x2v8fm0zn4zx+pkIoZfP/wRIwI9uh0XUkFNXjo23jUqfUYGaLEpgcntOrRxLIsFr53DPlVTXhv5SgsjAnAk1sScSClDOvmDW5VNtJVL/+SjK3nCjB5oBdeXTICW87m4avj2Zg22Adf3jeuy4+7NT4fL+++jIkRXtj4wASr738ivRwPb0qAh7MUx56dDalYhLnvHEFRjRpfrRqLqYO4fl3838fLRYrjz86BSMQIxxeAG8rxyd3NUyIrGzR4e99V7LrA9Z9xNg2RuLZtwn//SMF3p/Nw/9RwPHtD+8fDH8/l4Z+/pGBsuAc2PzQJAHA6swIPbIyHm0KCY8/O7rCH4aPfxePY1QqsXzQU95l6qfS0Z7ddxK+Xits8h/7zewo2x+Vh5fgQvHKr7c5Vf71YhGe3X0K4txP2PD2j1TGxtE6N2W8fAcsCvz45VWhOXlzThPnvHoPByHZrPQVVjZj/7jHIJSKcf2l+rw+RMYe/puKfM2V1asx55ygMRha7n5iK3y8V4YtjXP+4z+4dg5lDrO+vWV6vwVNbEnEhv/k8/pVbo60eftKRRe8fQ05FIzasHodJA3v3mG6xujrgxc5LKq+f4I/UhfsgXVKt0uL7xCo0QYHVM2Ps+rt86oZR2JFcjZN5auy7Wi80ILSWWmfAL5dr0QQFbhwzSPiZHp03EvNiI7Hvcgn2JZcguagO0wf7YP2iYWZH0Ds6BsC0YeH49nQu/kxvwNyRzWNS/8xoQBMUmDIsvMO/+YgBQWg6UYKzBRqu2SQUiAkPdpjX3N3Th+PMlvMAuDGnC2IHNndcvE4MCQlA06VqXCjR2+Tvdq6gHE1QYFy4DySK1q+bEREuGD0wGKcyK/FlXCleviW629/vWvkNYjRBAS8PT4d5HjqSYeGBOJ7LXfi/fvsoRAT5d3KP9j21aDQigwKwKCYAyg4mlNiKXAq8sHg8Vn1zFl/GleLW8YMRHWR5/4U/korx+PcX+UfDxAgvvHTLcCiczW8whfv7oimtHinlRgAK7hhrek4+PHcEHp47ops/Ue+TSIG1C0bhqS3nsXRMCNbfOhKMDY6ZYgBRYQE4nl6B+EIthoe1/9o9eKUIlVopQjzdMSoyBLDzhc70YQOgYdKRUmEEIEG4tzsmDA7r9feSQB9nuLi6o6JBi5QKA8aEcc/Lk1llKFNL4OPqgtEDQyB3csem+ErsvlyL9Yul8HKRIbmsFE1QIDLQv8Pj5t3Th+NErhp7L5fg8a1p+O3J6R0GbRNyq7F6YzLqNRKMDffFl/ePh1zR+vYMgNkxEfj8WBZ+S63DvNhIHM1qRBMUmDAk1CbH8fkjB+LbcxU4lNmIWe+fg1TEoAkKLJ0Y1a3HHzEgCE3IREKRFkaxs9XN6XddzuDWMTIMUtP75ciIYGSeL8SZfA2mDuPWdiA9F01QYPLQEIjkXLD95rGDsDeNa2K9cFRkq5/D29MFr90xCXdMrcGLu5KRVFiLr8+U44PIkFbfP7WSRRMUCPP36/D3MCUqHE3IQly+Bg1GOVxkYrx3tAhNUOD22LB2j4G8kRHB2He1Aafz1bivl96XUyoNaIICEYGtf7aJQ8LwZVwZjmQ12vQcYVdyDZqgwILYgWBkrTdE/L1dMGP4AOy9XILPT5XireXccIhv4/PRYOSC/j+cr8LjC8QdTl5sT6VWhyYo4OmiaPO97WXx+CF442A+TuSqkVnDYt/lajQY5Rgb7okhIQEYEhKA6cMHoFKlxcwuTvT09XLBhkdm4/mfL2HXhSIwDDAjOhyQ2m5jMcjHB1cqypBeA0zqq+eUUssaUlPZF7HId3G5aNIZMDzQHVMHdb9BZncEKBV4ZDoXoHh9T2qrsfHWOJxahnqNHkFKBSZc07l+oK8rHp81CLufmIb0Vxdh4/0T+mXgh8c3VzxwpUyYQtCkNeB0Jjcec46ZEe8t8RO/0krrcck09WeUaQS8I1gUEyCMA187axDE11ngB2ge955so4lf/JSSceHmd+Mfm8XtwP141vzEme76xbTTOTSw/75u7WlhdAAkIgarpwzA4lHBnd+hA+4KKe6aGAbPXgj88GYO8cVNIwJhZIF/7Eq2avrKlrN5ALjj4u9PTcNPj07uMHg0uMWoWYbhjjf9wa2xQUj8x3y8vXykzSaxAc3HDHOTjlrix2LfEhvUJ3a4PV1kwrh6AFg5PtSmvxdLMQwjTBO72GKs9l7ThK4bYvwhFjEYEaJETLA7tAYjtsVzk5r4Me+dBUMZhsEby0YixNMJ+VVNWPNdQrtTWE9mVOC+r8+gXqPHhAgvfPvABLgrzAeKbjC9Ng5dKcX5vGrUqfVwk0uEiZTdNW2wD3atnYpJkV7Q6o1QaQ3wdZNj7rDuTfMc7OcKhVSEeo0e2ZXWN5jlp9vNa5F5zU9UOpvTPEWMHxM+t8U52bxh/gj1ckKgUtHq/i3FhnrgpVu4ps9Hr5ZDb2h93sxP+orsJHszzNsZ4d7O0BtZxGVWYntCAc5kV0EhFWGNBZlZEyKaX9u90XLWaGSRYZp6x097400a6A2xiEFOZSPyqxpt8v1qGrU4ls5lYd0Saz4D45GZ3PXLrguFKK1To1GrF95TPJyl0OqN+OZETpe+Pz/pqzffSzsToFQI03K3nsvHtnhuDHzLrJxxA7w6HO1uCYVUjHdXjsKbS0fivZWjEKi0bUb5QNPzJ7OdKYqOhII/pFNqnQHfnsoBADw6M7JPnGQ9MnMgfFzlyKlsxM+JBV16DD4N9pZRQR2eoNnj5K23TYzwhqtcgooGjRC8OZVZAY3eiGAPJwz26/iEwM9dgRBPJ7Asl1rurpAgwruPRsbNkIpF+Gb1eLy9PLbVyO7rCX+yX1DdhJrG7gdj4k0nrOMHmJ/KN22QD2KC3dGkM+Db0znd/n4tncupwrmcakjFDO6dNMCmj004Y8M9kfKvG/DKrbbP2uot/7h5OFxkYiTm1WCr6eK3Mw0aPeKyuKD4izcNsyhjqOXxc3y4F/z6Udmwl4vM5ucE4yO4Y8aZrMp2LxDr1TocMZW63NrORZY98GXTYhGDZWNCOrl1z4k1BX/4cma9wYj9KVzwp2VJ810TwgEAr+1JxapvzuKM6bkdbWbM+7WUTlJ8ds9YuMolOJ1ViUe+S4BG3zoA9EdSMe7fcA4qrQHTBvlg4/3j4Spvv+ggNsQDAe4KqLQGvLkvDQAwMdKrVdlwd40K9cCWhydh4/3jsSgmAP+9bQSk3Xx8iVgkHAsuFdRYdV+t3oicSi740HKTcYLpdXA+rwZavREnMyqQVaGCVMxg2uDmshOFVIw9T8/An8/MhEsHv9vRoR5QOklR26TD+RZBQbXOgMIaLoszsp0x7y3xY9J3XywSRmz/Zd4QhHp1Pp1vRLASMrEIFQ0a5FZ2HHDR6o04erUcOkPXx/EV1jRBrTNCJhYh7Jr1ucolGB3qAYALUNrCnuQS6Awshga4tQr6tzQmzBPjB3hCZ2Cx4WQOdiQWorZJh3BvZ7yxdCQAruVCnbqD0XXtqOTHvPeh4A8ArDAFejacykF2hQouMjFuGmH71goMw2DF+NBub0iZE+nDvTayKvreFE5rUfCHdGpzXC4qVVoEezjhxh54sXaFq1yCR2Zw40ktPWlv6WRGBfZdLgUALOmBg4SjkUlEmDmEqyk/kFKKgupG/Mf0pj57qK9FJ/d89g/A7TI5WtBsWKA7lo0N6RPBTXtQOkkR7s2dHPHj47uqsKYJRbVqiEVMuxlgDMMIO4XfnspBo7bjcffW+ORwBgBg2dgQBCj7z4V2XyOTOPYpRIBSgb/MHwIAeGNvaruZCy2dSC+HzsAiwsel011y3sAWwZ9FI/pH1k9PGhPmCZlYhLJ6jXBRfK0jaeXQGoyI8HHB0D6Ulbt4VBBCPJ2wavIAuwb5RoZygYiLpkDEiYwKVDfq4OksxcSI5mzM28cEmzKnuIyQolo1AGC4BcEfAIgJVmLD/ePhJBXj2NVyrP3+PDLLG5CYV43Pj2Zi7Q+J0BqMuHFEAL5ePQ7Oso67TYhEjJD9czab20CY0gP9NRiGwawoP3x6z1jMH971ktWW+Oyklv0DLZFbqYLByMJVLkFAi+fMQF9XeLnIoNEb8dSW87jn6zMAgNlRfnC7JnPKVS7pMKgGcAEq/jzvsCmDCACyK1RgWcBdIYG3BUGD6abA068Xi1DdqMPQADc8MC3Cop9VIRUj1vTcbJnRdC2WZfHklkSs+uYs3tl/tcPH7CiDiM/6ifBxMRtAnGoKZG1LKIDBiuzP9vDZiLeO6jgg/cgM7tzn+zO5+PoE1+9m9ZQBmD/MH0P8XVGv0eO707lWf38+i9qSv2NvmjPUDz6ucqFS45bYoA4DlX0R/37PZ8k5Msc+cyM9rkqlxQcHuebHT8wZ1O3dEVtaMjoYYhGD83k1yLTixVjbpMNft3H9Gu6aGIZhgdQIHICQ9rzrQiGWfnoKWeUqBCkVeHSGZU0Wx7S4yB8dZj7bg/RtMaady+6WfvFZP9FB7h2e7C+KCUS4tzOqG3XYes76IK45l4tqcTitHCIGFj93yfVr9ZQBCPZwQnWjDvsul3R6+wNXuIumzkphW3KVSzAxwgteLjLcNLJvbKD0ZQqpGKNMO/J8Jsq1+L/Vgmj/PhWwD1Q64cRzc4QSG3vhM3+yylX456+X8cimBADAguEBrS6CFVIxPrxzNI78dRYenBYBNwX3XA2wInA1foAXvlo1DjKJCAeulGLuO0dx+yen8NqeVLAsd5714Z1jWjV37sgN15RF8hfofR0f1LA2+JNuClAM9HVp9VxmGAbjTGWEey+XgGWBpWNChObcXTF7KBf8OdQi+MP/f1SAm0WvpckDfYQ2VgwD/MfKzKlxpnK2+A6CPxtP5QgbtN+dzmk3G7lKpcXcd45i1TdnzQaBhJIvf/OB+mVjuSElCbnV+OhQhsU/gzmldWrEZXPHq1tGdhz8mTvUD5G+LqhX65FdoYKbXILl47gyUX5TbMPJbIs2JFqq7INlXwCXXb90bPNG+wobNmLuLQNNWXFcNpl1f5e+pu9cyZM+6X9/pqFOrcewQHesGNe3Xqx+bgrMMO1A7EwstPh+r/xyGcW1aoR7O+PvXZjy0l/NjvKDiOHKfkrrNBji74qfH59iUSovAIxp0euAT6UljkXo+1PY3eBPx/1+eGIRgwdNO4Y/W/EaPpxahs+OZrbpWwAAnx7JBADcNDIIA3wcp/SQ2IdELMKysVx5Dt+LoD1GIyvsmM+1IvgDAN89OBHHnp3dpSae16OJkdyxI85M8EejNwglX93tE9FfebnIEOrF9bzYcDIHWoMR0wf74G83RJm9fbi3C/5x83AkvbIQPz062eqA2tRBPvj83rEIcFfAVS5BiKcTRgQr8fcbh+E/S2Ks6qM3foCXkLng4yrHkHYu3Psavs/S5aJas+9N7WnuSdM2g226KVPH312Ob1aPwzsrYrs1DXHmED8wDJBaUo+imiY0avVC5sndE8Mtegylk1TY4LtrQlirPleW4HtsttfT62J+jVBO5iqXQKU1tJsF8/qeK8iqUOHo1XLh99hSehnXCHtQO1maoV7O+PcSbrLW+wevCtlm1sooa8Bft10Ey3IboZ2dN4tEDB6e3jxcZcX4UCFz65bYIAR7OKGioXnz3VJVKg2Avpf5A3DPFWeZGGPCPBzyGsHLRQalkxQsy2XLOTIK/pB2XSmuww9nuCZkL98yvE82wV1qOmnfeb7Qooadv18qxs7zhRAxwP9WjHK4tMOe5Okiw8QIrpn3uHBPbHt0ilUN04YFusPXTc7VUTtQs2fSjE9b55t+dtW5Tvr9tHTjiECIGCCpsBZ5nfQAAACVRo8nfkjE63tS8ZapJwQvp0KFP5KKAQCP2WAsMLk+8MGfExkVHTb+vFhQg0qVFm5yibB7bSmZRNRpWQZpNimSey86k13VZkf/VEYlGjR6+LnJMcp0wU3ammR6Px/o64INq8dj0wMT4OMq77HvNzvKD3EvzEXyPxfixHNz8OuT0/DwDOv7RIpFDBZEc6VYUwZ696nMro5EeLvATS6BWmcUsnks0V5DYgC4c3woNj0wAX8+MxNzhna/PM3LRSZceB9OK8OWs/moUmkR7u2Mm63ISnx1SQz+b/4Q/P0m6zdQx4R7QsRwF9DrdyShSducRVHbpMPaHxKhM7C4IToA/7mNC8xsOJXT6nYAlzm0tUXA/s8rpW2+F/+7HdxBAPH2MSG4fUwwjCzw9I/nrep5WKXSYv2OS1jw7lEcT6/gMo4tPPe4bXQwgj2c4CQVY3WLsfdSsQjPLxoKAPjkSCb2JhdbsR6uT5CXS8+9zrsq3NsFx5+djc0PTXSY13RLDMMIPbGyyh07+ENnItehJ7ecx4GUUrgpJHB3kmKAtzPeWhbbKk2QZVn8+7cUGFluMgl/ItbXzBvmD3eFBIU1TYjLqsQUM+nBLMvibHYVvj+Thz2mg+hjswZavVtxPXhj6UicyKjA7WOCoZBalqLNk4pF2PHYFGgNRng4971dB9I5vmFldoUKdWpduxNZOlLbpENaKbfbNtaC4I+PqxyTB3rjZEYlfk8qFqaAteePpGKoTCeBnx/LwogQJW4eGYSyOjWe3HIeRhaYHeVrcc8KQkK9nDF1EPcc3J5QIPQBuhZfHjFjiK/D9zvq68aEeUIqZlBcq0Z+VRPCvJt30vnGxQui/R2ut1xveumW4bh1VBAmRXr3qZJ9S/xl/hBIxSI8NC2y8xv3ESIRg5hgJU5nVeJSQY3FLQX4QJG5wRoSsQgzTNk/tjJnqB8S82qwN7kEV03v1Y/NHGhVU+1hge5dbpmgdJLi2RuG4o29qdhyNg8JuVV4Ys5gnM6swN7kElQ36hDq5YQ3lo2Ei0yMt/enIb+qCT+dy8PqqVymsN5gxIu7kgEAAe4KlNSp8WdKKR6fNUj4PizLCr9bc4G1lv69OAbn82qQXaHCys/j8OjMSKFE98+UUvycUACxSIR3V8YK/ZbUOgPu++aM0CNx/nB/PLswqt1Gz9dSSMXY/cRUNGkNbTKFbokNwoX8Gnx9IhvPbL2ICB9XiyYO85k/Xi5dzw7rSd49GHzuDZE+rjifV+PwfX8seqXX1dVZ/UH6ptI6NX69WIQmnQFl9RpklDXgwJUyvLbnSqvb7U0uwanMSsgkIrzQh0ujFFIxbjZN+thuZupXo1aP5Z+dxsov4vDLxSLoDCxmRfni6bnmT+6vd2HezrhrYpjVgR9eqJczBlrYBJX0PV4uMgR7cNleLRtCWuN8XjVYFgj3dra4xIVvJM9n7XRkWwL3Oucndzy7/RJ2XyjE4o9PIqmwFp7OUjy/qO8es0jfxJc1b++g8efBLvT7IV3jJBMLZTR8Hw0AMBhZ/JnC7fAvGE4lXx1xU0gxfbCvwwV+AK6s/1+LY1oF/RzByBDr+v4YjKxwIdlZgMJWZpuOX8fTK1Bap0GgUoHbe3ky3ZqZA7H5wYnwdZPjamkDntpyHlvO5qO6UQd/dzk+vXsslE5SSMQioTnyl8ezhclfG0/lILWkHp7OUmx8YDwA4EJ+Dcrq1cL3KK/XoF6th4jhGj53xEUuwYd3joarXIK00no8s/Uipr5+GJP+exBP/HAeh9PKceBKKR7bnCg0Lf7nrylILqyDp7MU29ZMxpf3jbM48MPzcZW3WyK2ftFQTBnojUatAY98F4/axs6nf1UJ074cO8jSVw30455H1vSZ7Yssekfw8PCAp6enxR9eXl7Iysrq6bWTLuDr54cGuOH3p6bh3ZVc47it8QVC87XCmiY8vyMJAPDI9EiLe77Yy1LTaO69ySVQaVpPDNqTVIL43GoopCLcOSEUvz4xDRvvn0C7toS04xZTMPWFHUm4UmxZIL+2USekZFva76elhdEBFpV+5VaqcDa7CgwD/PDwREwdxJ0YPf3jBRTXqjHQ1wW71k61aIeMkJYWRgcIWaTmxv4W1zYhpbgODAPMirLtTjwxj59K1bLvT2JeNSoatHBTSPpsRjK5fvEBS0uDPwXVjdDojZBJRL12rj080B3+7s3BgUdnRNrlnHjqIB/seXo65g/3h5+bHHdNDMP3D03EyefmIMZUgg4Ay8eGwMdVjsKaJiz99BRu+uA43jSVfD+/aCiGBrgjNkQJlm0O0APNGVXh3i4WNRuPCVbi6N9m4a8LhsDfXY6KBo0QjHpwWgScZWKcyKjA8zsu4eeEAmw5mweGAd6/YzTGW1kGbAmJWISP7hqDYA8n5FY24tOjmZ3ep6qPjnrvLyJ9TBO/HLznj8VlX9u3b4eXV+dPbpZlceONN3ZrUaTn8CdR0wb5IDpIieggJeIyq/BTfD7+vjMZu5+Yiqe2nEdtkw4jQ5R4au5gO6+4c2PCPBHh44LsChX2JpcIfYAAYE8ylx7+yIyBeKadVH5CSLP/WzAElwpqcCqzEg99G49da6fC1639XaSE3Crc89VZsGAxZ6gfrpZyJ1yW9PvhWVr69bMp62faIB+EeDrjwzvH4JYPT6CwpgnTB/vgo7vGdKsZJrl+KaRiLBkdjE2nc7E1Pr9NqQVf8jU61MPhU9cdxaRIb3xyJBNnspqbsO4zvafPHepHmzikz+Ezf1JL6qDRGzoNOvA9aSJ9XHqtrybDMJgd5Ycfz+XDx1WGOyaE9cr3NcfHVY4v7xvX4W0UUjEemh6B1/ektgqqTYjwwvKxXMbm/OH+uFhQiwMppbjT9PN01EupPd6ucjwxZzAenTkQx66WQyoWYcpAb0jEIkwb5IOHNsVjR2Ihdp7nBlQ8PXewzcvyWvJykeH/FgzBM1sv4nQ7kw95OoMRdWq9cD9iewNb9PxhWdYhexcBFgZ/wsPDMWPGDHh7W7bLEhkZCam08xPw1157DTt27EBqaiqcnJwwZcoUvPHGG4iKap5GoFar8X//93/48ccfodFosHDhQnzyySfw9+9+47PrUZzpJGrywOa/5fOLhmJ/SgnSSuux+KOTSCuth5tcgo/uHOMQJ1cMw2DpmGC8vf8qtpzNE4I/DRo9jqVzE0EWxVB6OCGWkIpF+OTuMbjtk1PIrlDh0e/i8cPDk8yWAhbWNOHR7xLQZBp7+UdS86hsaxvi3jgiECczKvFHO8Efo5EVJoItN5XoeLnIsPPxKUjMq8a8Yf5W9Swg5ForxoVi0+lc7L9citxKFcK9uRM9lUaPL49x2cxzh9G5R28ZG+4JsYhBYU0T8qsaEahUYL+p5IumfJG+KMTTCZ7OUlQ36pBaXI/YTqYaCf1+rCwX6q5VUwYgPrcaT88d3OUy/9700LQIBCoVYFlA6SyFp7MMQwPchJ5f84b74+39V3EiowKNWj2cZZIuBX94UrGozbF+9lA/vLokBut3JIFlud5vT83p+Q1yPqvocmEtmrQGOMnM/72qTVk/IgbwoE2wHhHm7QyxiEGDRo+yeg383R1zeqdFZ8rZ2dkWB34AIDk5GaGhnY8FP3r0KNauXYu4uDj8+eef0Ol0WLBgAVSq5nSqv/zlL/j111+xbds2HD16FEVFRbj99tstXgtpVlKrRnaFCiIGGB/RfGHm6SLDelNfH75R6xvLRjpUrfWKcaGQiBjE51YjxTSp6HBqGbR6IwZ4O2MolYEQYjEPZxm+WjUO7goJEvNq8O6Bq21u06jV4+Fv41HRoMWwQHf8/NhkPDZrIAb5uWJhtL+wQ2Kpzkq/TmVWorCmCe4KCRYMbz4p83NX4IaYQAr8kG6LCVZiQoQXtAYjHtmUIJQRv/LLZeRUNiJIqcA9Fo5DJt3nIpcIEwg/OZKBRe8fR15VI+QSEWZS6R3pgxiGaVH6VdPp7YUARS/3ShwW6I4Dz8wUyrz7OolYhMWjgrFkdDBmR/lhVKhHq6BVlL8bQr2coNEbcexqBapUWpwx9Qoz10i7q+6cEIb/3BaD20YH472Vo3ql4XyIpxP83eXQG1lc7OA5VWWaUubpLKNG+D1ELhEj1JPri+nIfX/sera8d+9erF69GtHR0YiNjcXGjRuRl5eHhIQEAEBtbS2+/vpr/O9//8OcOXMwduxYbNiwAadOnUJcXJw9l+6Q+JKvmGBlmyk+y8aEYIIpunzvpHChAauj8HNXYKEpu+e7uBwAXA8gALghJtBhU/MIsZeBvq54eznXE+zr49lIK6kXvmY0svjrtotIKa6Dt4sMX943FmPDvfDcDUNx4JmZ+PzecVa/5vjSL8B88/ZtCfkAgFtHBTnETiVxTB/cMRq+bnKkldbjr9su4rdLRdiWUACGAf63chSUzrSj2pv4vj5bzuYjvawBHs5SvL08Fs4yGlZL+qZYU+nXhfzO+/50JzuFNGMYBvOHcdcAnx7JwIJ3j+JqaQPkEhEmRNi2H8/dE8Px7spRvVZaxTCM0EMxIbe63dtVNZiCP1Ty1aMiTYFaRx73blXwp76+HgkJCWho4A5WiYmJuO+++7B8+XJ8//333V5MbS13oOR7CyUkJECn02HevHnCbYYOHYqwsDCcPn2629/vesMHf8w1SRSJGHxx31h8evcYvHzL8N5emk3cN4nbkd11vghldWocTuN6NFDJFyFdsyA6AAuG+0NvZPGPXclgWRZGI4sXdyfjj6QSSMUMPr93LEI8bZMleNMIbhfyg4PpWPbpKRxIKcWOxALc/slJ7L5QBABYNrbzrFJCuipAqcBn94yBVMxgT3IJnv7xAgDg8VkDqcGwHcwbxk0mkktEeGzWQBz922yHyVYg1ye+5PnXi0WtNk2uxbKsEPwZ7E/Bn+6ab8oIvlhQi4oGLYb4u2Lro5Ntdn5iT2PDuR6K/GAecyqp2XOviPRp7vvjqCzeOjl27BhuvvlmNDQ0wNPTE1u2bMGyZcsQHBwMsViMHTt2oLGxEQ8//HCXFmI0GrFu3TpMnToVMTExAICSkhLIZDJ4eHi0uq2/vz9KSkrMPAqg0Wig0WiEf9PY+WZ8s7DJ7ZzAejjLsMjBMn5amhDhhSh/N6SV1mPdTxfQqDUg2MNJaMBHCLHey7dG43h6Bc7mVGFbfAEScqvxU3w+GAZ4a1ms1b19OrJiXAjSSuqw5Ww+4nOr8dCmeOFrEhGD1VMGCLuqhPSUseFe+OetMXhhZxIMRhaxoR5YN48GBtjDuAFe+O3JafBzk8PPQfsrkOvL9ME+mDPUD4dSy7DupwvYvXaq2f6ZpXUaNGj0EIsYDPC2rkyatDV+ADf8Ja+qEY/PGogn5gyyaMqXIxhnGqCRkFsNo5E1W9ZVbSr78qbgT48aaMrSuy7Kvl588UUsX74c+fn5WLduHVauXIknnngCV65cQXJyMv75z3/i448/7vJC1q5di+TkZPz4449dfgyAayKtVCqFD0t6D10PimqakFvZCLGIEQ4i/Q3DMLhnMpf9cyqTC3QtjA6gki9CuiHYwwnr5nFNDZ/fcQk/xedDxADvrhiFJaODbfq9JGIR/rk4Bieem41HZ0bCXSFBsIcT/rpgCE6tn4MXbx5Or2fSK+6aGIZ18wZjTJgHPrhjFKTUU8puYoKVFPghDoNhGLy+dAQ8naW4UlyH98z0zAOA9DIuKyjc29khhqv0dRKxCDsfn4KzL8zF/y2I6jeBH4Dr0eQkFaNOrUdGO0GHSir76hVC5k/FdRD8uXTpEv72t78hODgYzz33HOrq6rBy5Urh63fccQcyMzO7tIgnnngCv/32Gw4fPoyQkOYx3QEBAdBqtaipqWl1+9LSUgQEmC/lWb9+PWpra4WP/Pz8Lq2pv2nZ78dN0X97Ftw2Ohiu8uaEtkUjqOSLkO56YFoEovzdYGQBsYjB+3eMtnngpyU/dwXWLxqGS68sxMnn5+CJOYPh50YXf6R3rZs3BDsenypM/SKEEEv4uSnw39tGAAA+O5qJc2bKdezV7Lk/83CWwdtVbu9l2JxULMIo0+S4+BzzfX+qVJT50xv4nj8F1U1QmybdOhqLgz91dXVCLx6ZTAZnZ2e4uTVPUHJzc0NjY9sJLR1hWRZPPPEEdu7ciUOHDiEiIqLV18eOHQupVIqDBw8Kn0tLS0NeXh4mT55s9jHlcjnc3d1bfZCW/X5s2/isr3GVS7B0DHdR6ucmx9iw/pnlREhvkopFeP/OUZg71A+f3TOWem4QQgghHVg0IhC3jw6GkQVWfn4aKz4/jW9OZCOrvAEsy7YY807BH9I5vmojPtd83x9+2hf1/OlZPq4yuCskYFnH7ftjcc8fhmFapdtf+++uWLt2LX744Qfs3r0bbm5uQh8fpVIJJycnKJVKPPjgg3jmmWfg5eUFd3d3PPnkk5g8eTImTZrUre99vems309/8ujMgbhSXI9lY0No3CEhNjI0wB1frx5v72UQQgghDuGVxdEorlXjdFYlzmZX4Wx2Ff71G4SLR4AmfRHL8E2f25v4xU/7ouBPz2IYBtFBSpzOqsTFghoMD3K8JBOLgz8sy2Lu3LmQSLi7NDY24pZbboFMxj3J9Hq91d/8008/BQDMmjWr1ec3bNiA1atXAwDeffddiEQiLF26FBqNBgsXLsQnn3xi9fe6npXVq5Ff1QQRA5s2Z+2rgjycsHWN+cwwQgghhBBCepq7Qootj0xCflUj9qeUYv/lEpzPr0GduvmaaXggDTEgnRsT7gmGAXIrG1Fer4GvW+vytiqa9tVrxg3wxOmsSsTnVOPOCWH2Xo7VLA7+vPzyy63+vXjx4ja3Wbp0qVXfnOXD3h1QKBT4+OOPu9VM+nrHp6WFejm36odDCCGEEEII6TmhXs54cFoEHpwWAa3eiKul9bhYUAMPJxmiAtw6fwBy3XNXSBHl74bUknok5FbhhpjW05mp7Kv3jBGysMyX4JlTVqeGh7OsTzR373LwhziO3Eou+ENNKwkhhBBCCLEPmUSEmGAlYoIp44dYZ9wAT6SW1CM+p7pV8IdlWVRT5k+vGWPqJ5tT2YiKBg18OmkynlxYi1s+OoFgDye8uiQGs6L8emOZ7bJ/+In0uOwKrhF3hLeznVdCCCGEEEIIIcQa48K51h0Jea37/tQ06qA3ctU0ns4U/OlpSicphpgatbfXg6ml/SmlYFluQtjqDefw1JbzKKlV9/Qy22VR5s/o0aMtbu6cmJjYrQUR26PMH0IIIYQQQghxTKPDPAAAl4vqoNEbIJeIAQBJhbUAgDAvZyikYnst77oyNtwLV0sbkJhbjYXRAR3e9nRmBQBgTJgHLuTX4JeLRfjlYhFGBCsxK8oXi0cF92rjd4syf5YsWYLFixdj8eLFWLhwITIzMyGXyzFr1izMmjULCoUCmZmZWLhwYU+vl3RBdgUX/InwoeAPIYQQQgghhDiSMC9neLnIoNUbcaW4Xvh8oikTiJ8IRnoe/7uO7yTzp1Grx/m8GgDAeytHY/faaRg/gLtvUmEtPjyUgZs/PI6MsvoOHsW2LMr8adnv56GHHsJTTz2Ff//7321uk5+fb9vVkW5jWRa5lVzZVziVfRFCCCGEEEKIQ2EYBqNCPXAotQzn86oxKtQDQHPp0RhTZhDpeeNMwZ+kgtpWWVjXOpdTDb2RRbCHE0K9nBDGOGPbmikoq1fj2NUKbDqdg0sFtfjb9kvYvmYKxCLLKq26w+qeP9u2bcN9993X5vP33HMPfv75Z5ssithOWb0GTToDxCIGIZ4U/CGEEEIIIYQQRzPaFPDhs0mMRhYXTP8/hjJ/ek24tzO8XWTQGoxINpXdmXPKVPI1dZB3qxY6fm4KLBsbgs/vHQs3uQTn82qw4WR2j68b6ELwx8nJCSdPnmzz+ZMnT0KhUNhkUcR2ckwlX8EeTn1ivBwhhBBCCCGEEOuMNk2aOp/PZfuklzWgXqOHs0yMKH83ey7tusIwjFD61VHT51MZlQCAKQN9zH49UOmEv980DADw1r40oVVLT7J41Dtv3bp1eOyxx5CYmIgJEyYAAM6cOYNvvvkG//jHP2y+QNI9OaZmzwOo3w8hhBBCCCGEOKSRoUowDJBf1YSKBo0QeBgV6gGJmDb5e9O4AZ7Yn1KK+JxqPDKj7ddrG3VILuKygiYP9G73cVaOD8Wvl4pwMqMSz22/hB8fmQRRD5Z/WR38ef755xEZGYn3338fmzdvBgAMGzYMGzZswIoVK2y+QNI9OaZ+PwOo3w8hhBBCCCGEOCR3hRSDfF2RXtaAC3k1QvCHmj33Pv53nphXDZZl20xGj8uuBMsCA31d4O/efnUUwzB4/faRWPjeMZzNqcKJjArMGOLbY+vuUohwxYoVOHnyJKqqqlBVVYWTJ09S4KeP4su+BtCYd0IIIYQQQghxWPzI9/P51Tifxzd7puBPb4sJVkImFqGiQYtDqWXQGYytvn46s+OSr5ZCvZyxeFQwAGBPcrHtF9sC5Yf1c0Lmjw9l/hBCCCGEEEKIo+L7/hy8UoYs0yb/aJr01evkEjFiQ5UAgAe/jcfof/2JRzbFC0EfvtnzlA5Kvlq6aUQgAGDf5VLorwkk2ZJFwR8vLy9UVFRY/KBhYWHIzc3t8qKIbXBj3inzhxBCCCGEEEIcHT/iPbWkHgBXVuThLLPjiq5fr9wajZtGBsLTWYoGjR77U0px55dxuPOLOFwtbQDDAJMiLQv+TIr0gqezFFUqLc5kV/XYmi3q+VNTU4M9e/ZAqVRa9KCVlZUwGAzdWhjpvvJ6DRq1BogY0Jh3QgghhBBCCHFgQ/zd4CwTo1HLXWtTvx/7iQ5S4uO7xsBoZHG5qA5b4/Px47k8nM7isn+GB7rD08WywJxELMLC6AD8eC4fvycVY+qgzsvFusLihs+rVq3qkQWQnsOPiwvxdKYx74QQQgghhBDiwMQiBrEhHkKAgfr92J9IxGBEiBIjQpR4ZEYk3juQjj+SinHXxDCrHufGEYH48Vw+9iWX4N+LYyDugalfFgV/jMaeqzsjPSfX1O8nnCZ9EUIIIYQQQojDGxXWHPyhzJ++JdTLGe+siMU7K2Ktvu/kgd7wcJaiUqXFmexKi5pFW4vSQfqxbFO/nwgf6vdDCCGEEEIIIY5utKnvj7tCgoG+rvZdDLEZqViEBcP9AQB/JPXM1C8K/vRjfLPncGr2TAghhBBCCCEOb1aUH+4YH4qXbomGqAdKg4j93Gia+rU3uRQGI2vzx7e45w9xPNkVXNlXBI15J4QQQgghhBCHJ5OI8PrSkfZeBukBUwb6wF0hQUWDBvE5VZho4bQwS1HmTz9FY94JIYQQQgghhBDHIJOIMGOILwAgPrfa5o9PwZ9+isa8E0IIIYQQQgghjmNEsBIAcLmo1uaPbbPgT2JiIm6++WZbPRzpJn7Me7CnE415J4QQQgghhBBC+rgYIfhTZ/PHtioqsG/fPvz1r3/FCy+8gKysLABAamoqlixZgvHjx9NI+D4ko7wBADCIOsATQgghhBBCCCF9XnSQOwAgt7IRtU06mz62xcGfr7/+GosWLcLGjRvxxhtvYNKkSdi8eTMmT56MgIAAJCcn448//rDp4kjXpZeagj9+FPwhhBBCCCGEEEL6Og9nGYI9nAAAKTbO/rE4+PP+++/jjTfeQEVFBbZu3YqKigp88sknSEpKwmeffYZhw4bZdGGkezJNmT+D/dzsvBJCCCGEEEIIIYRYIiaYy/6xdd8fi4M/mZmZWL58OQDg9ttvh0QiwVtvvYWQkBCbLojYBp/5M5AyfwghhBBCCCGEEIcQHdQzfX8sDv40NTXB2ZmbGsUwDORyOQIDA226GGIb9WodSurUAKjsixBCCCGEEEIIcRR85k9yoW0zfyTW3Pirr76CqysXTNDr9di4cSN8fHxa3eapp56y3epIl2SUcVk/fm5yKJ2kdl4NIYQQQgghhBBCLBFjyvzJLG9Ak9YAJ5nYJo9rcfAnLCwMX375pfDvgIAAfPfdd61uwzAMBX/6AD74M9ifsn4IIYQQQgghhBBH4eeugI+rHBUNGlwpqcOYME+bPK7FwZ+cnBybfEPS82jMOyGEEEIIIYQQ4phigt1xJK0clwtrbRb8sbjnD3EcGTTmnRBCCCGEEEIIcUgxPdD02eLMn02bNll0u/vuu6/LiyG2IWT+0Jh3QgghhBBCCCHEoUQHmZo+23Dcu8XBn6effrrdrzEMA5VKBb1eT8EfO1PrDMiragRAmT+EEEIIIYQQQoijiQnmMn/SSuqh1Rshk3S/aMviR6iurjb7kZKSghUrVoBlWcyfP7/bCyLdk1WuAssCHs5S+LjK7L0cQgghhBBCCCGEWCHE0wnuCgl0BhbpZfU2ecwuh4/q6+vx4osvYsiQIbhw4QL27duHvXv32mRRpOv4J8YgX1cwDGPn1RBCCCGEEEIIIcQaDMMgmu/7U2ibvj9WB390Oh3+97//ISIiAtu2bcOGDRsQFxeH2bNn22RBpHsyacw7IYQQQgghhBDi0IYFcn1/rpbaJvPH4p4/LMti06ZNeOmll6DX6/Hf//4XDz74IMRisU0WQmwj3RT8GUhj3gkhhBBCCCGEEIcU7u0MAMivbrTJ41mc+TNy5Eg8/vjjuPPOO5GQkIA77rgDKpUKdXV1rT6scezYMdxyyy0ICgoCwzDYtWtXq6+zLIuXXnoJgYGBcHJywrx585Cenm7V97jeZAiZPzTpixBCCCGEEEIIcUShXk4AgLyqJps8nsXBn8uXL6OpqQlvvvkmgoOD4enp2erDw8MDnp6eVn1zlUqF2NhYfPzxx2a//uabb+KDDz7AZ599hjNnzsDFxQULFy6EWq226vtcL3QGI3IqVQBo0hchhBBCCCGEEOKoQj25zJ+CqkawLNvtx7O47Ovw4cPd/mbXWrRoERYtWmT2ayzL4r333sOLL76IxYsXAwA2bdoEf39/7Nq1C3fccYfN1+PocisboTOwcJGJEaRU2Hs5hBBCCCGEEEII6YIQU/CnXqNHbZMOHs7dm+ZtcfBn5syZ3fpG1srOzkZJSQnmzZsnfE6pVGLixIk4ffp0u8EfjUYDjUYj/NvaUjRHxjeCGuhHk74IIYQQQgghhBBH5SQTw9dNjvJ6DfKqGnsv+GNpEMXd3b3Li2mppKQEAODv79/q8/7+/sLXzHnttdfwz3/+0yZrcDS/XiwCAIwJs678jhBCCCGEEEIIIX1LqKcTyus1yK9qwsgQj249lsXBHw8Pjw6zSViWBcMwMBgM3VpQd61fvx7PPPOM8O+6ujqEhobacUW9o6RWjf0ppQCAOyeE2Xk1hBBCCCGEEEII6Y4wL2ck5tUgr6r7E7+61POHZVnceOON+OqrrxAcHNztRZgTEBAAACgtLUVgYKDw+dLSUowaNard+8nlcsjl8h5ZU1+25WweDEYWEwZ4ISqAJn0RQgghhBBCCCGOLNTLduPeu9zzRywWY9KkSYiMjOz2IsyJiIhAQEAADh48KAR76urqcObMGTz22GM98j0dlc5gxI/n8gAA90wOt/NqCCGEEEIIIYQQ0l38xK/83sz86QkNDQ3IyMgQ/p2dnY0LFy7Ay8sLYWFhWLduHV599VUMHjwYERER+Mc//oGgoCAsWbLEfovuAzR6A1KL6xETrIRYxOBASilK6zTwcZXhhugAey+PEEIIIYQQQggh3SRk/jh68Cc+Ph6zZ88W/s336lm1ahU2btyIZ599FiqVCo888ghqamowbdo07N27FwrF9T3G/OktF7D3cglGhijx6pIYbD6TCwC4Y3wYZBKRnVdHCCGEEEIIIYSQ7gr1cgIAFNY0wWBkIRZ1fao3w7Is25U7urm54dKlS4iIiOjyN+8NdXV1UCqVqK2ttdkkMns6nVmJO7+ME/7NMADLAiIGOP7cHAR7ONlxdYQQQgghhBBCCLEFg5FF1It7oDeyOPX8HASZud63NOZhcebP7bff3urfarUaa9asgYuLS6vP79ixw9KHJFYyGln8948rAIAlo4LAMAx2ni8EAMwZ6k+BH0IIIYQQQgghpJ8QixgEezoht7IReVWNZoM/lrI4+KNUKlv9+5577unyNyVd8+ulIiQV1sJFJsbfbxoOXzc5Vo4PxZ6kYjw8o2cabxNCCCGEEEIIIcQ+Qj2dkVvZiPyqRkyK9O7y41gc/NmwYUOXvwnpPrXOgDf3pgEA1swcCF83bpz9pEjvbj0BCCGEEEIIIYQQ0jfxfX/yq5u69TjUHdhBbDqdg8KaJvi7y/HQdMryIYQQQgghhBBC+jtbTfyi4I8D0BuM+Op4NgDgmflD4CQT23lFhBBCCCGEEEII6WmhnhT8uW4cz6hAWb0GXi4y3DY6xN7LIYQQQgghhBBCSC8QMn+qKfjT721PKAAA3BobBJmE/mSEEEIIIYQQQsj1IMwU/Cmt00CtM3T5cSiS0MfVNurw5+VSAMCysZT1QwghhBBCCCGEXC88naVwMbV+KehG02cK/vRxv1wqgtZgxNAAN0QHudt7OYQQQgghhBBCCOklDMPYpPSLgj99HF/ytWxsCBiGsfNqCCGEEEIIIYQQ0ptsMfGLgj99WHppPS7m10AiYrBkdLC9l0MIIYQQQgghhJBeZouJXxT86cO2J3JZP7Oi/ODjKrfzagghhBBCCCGEENLbwr254E92BQV/+h2DkcWu84UAqNEzIYQQQgghhBByvRrs5woASC+r7/JjUPCnjzqeXo7SOg08naWYM9TP3sshhBBCCCGEEEKIHQz2dwMA5FU1oknbtXHvFPzpo/hGz4tHBUMmoT8TIYQQQgghhBByPfJxlcHLRQaWBTLKGrr0GBRV6INqG3XYn1IKgEq+CCGEEEIIIYSQ6xnDMBjiz5V+pZV2rfSLgj990K+XiqDVGzE0wA3RQe72Xg4hhBBCCCGEEELsaIip9Cudgj/9B1/ytWxsCBiGsfNqCCGEEEIIIYQQYk988Icyf/qJjLIGXMivgVjEYPGoYHsvhxBCCCGEEEIIIXYWFcAFf66WUPCnX/g5kcv6mTXEF75ucjuvhhBCCCGEEEIIIfY2xI8L/hTVqlGv1ll9f4mtF0Qs16jVI7eyEbmVKuSY/vtHUgkAavRMCCGEEEIIIYQQjtJZCn93OUrrNLha2oCx4Z5W3Z+CP3aQUdaAB789h9zKRrNf93OTY84wv15eFSGEEEIIIYQQQvqqIf5uKK3TIL20noI/jmDn+QIh8OPhLEW4twsGeDsj3NsFET7OmBjhDblEbOdVEkIIIYQQQgghpK8Y4u+G4+kVXWr6TMEfOziXXQ0A+M9tMbh7YridV0MIIYQQQgghhJC+Lso08etqF4I/1PC5l6l1BlzIrwEATBnoY9/FEEIIIYQQQgghxCEM9ncFAFwtbbD6vhT86WWXCmqhNRjh4yrHAG9ney+HEEIIIYQQQgghDmCwKfOnvF6DKpXWqvtS8KeXncupAgBMiPAEwzB2Xg0hhBBCCCGEEEIcgatcghBPJwDWl35R8KeXnck2BX8GeNl5JYQQQgghhBBCCHEkQ0zZP+kU/Om7DEYWiblcs+fxERT8IYQQQgghhBBCiOX44I+1E78o+NOLrhTXoUGjh5tcgqEB7vZeDiGEEEIIIYQQQhxIVADX9PlKMQV/+iy+5GvsAE+IRdTvhxBCCCGEEEIIIZYbF85VEV3Ir7Gq6TMFf3rROb7fD5V8EUIIIYQQQgghxEqhXs4YFugOg5HFgSulFt+Pgj+9hGXZ5klf1OyZEEIIIYQQQgghXXBDdAAAYP/lEovvQ8GfXpJVoUKlSguZRIQRIUp7L4cQQgghhBBCCCEOaGGMPwDgWHoFGjR6i+4j6ckFOSKWZVGl0kLpJIVE3DY2ptYZkFZSj8tFdQCAcG9nhHk5I1CpMHt7ACiqacIXR7MAAKNCPSCXiHvuByCEEEIIIYQQQki/FeXvhgHezsipbMTJ9AqL7nPdBH9SimqRdaUGjVo9QjydEOLpDBe5BMW1TSiuUSOnUoWLBbW4VFCDmkYdRAzg56aAv7scDMNAqzdCrTcgr7IReiNr9nu4ySVQOkvh4SyFh5MMSmcpahq1OJVZCdZ0lxmDfXrxpyaEEEIIIYQQQkh/wjAMFkYH4PNjWRb3/XGI4M/HH3+Mt956CyUlJYiNjcWHH36ICRMmWPUYKz6Pg0jubPHtjSxQUqdGSZ26zdc8naWICVZCImKQW9WIgqomaA1G1Gv0qNfoUVDd1OY+kyK9sGxsKJaMCrJq3YQQQgghhBBCCCEtLYzhgj/H0sstun2fD/789NNPeOaZZ/DZZ59h4sSJeO+997Bw4UKkpaXBz8/P4sdxV0gwepAP3J2kKKxuQkF1Exq1egQoFQhSOiHYwwkxIUrEhigxxN8NdWodimvUKK1TQ8QwkEpEkIlFCPN2RpBSAYZpHtVuMLKobdKhplGLmiYdaht1qGnSoqZRB5YF5g/3R6iX5YEnQgghhBBCCCGEkPaMCvGAn5scJRWNFt2eYVnWfA1THzFx4kSMHz8eH330EQDAaDQiNDQUTz75JJ5//vlO719XVwelUomamhooldRomRBCCCGEEEIIIY7vH7uS8e3RK8h/bwVqa2vh7u7e7m379LQvrVaLhIQEzJs3T/icSCTCvHnzcPr0abP30Wg0qKura/UBoFWmDiGEEEIIIYQQQogjW2ga+W6JPh38qaiogMFggL+/f6vP+/v7o6TE/Dz71157DUqlUvgIDQ3tjaUSQgghhBBCCCGE9JqJkV4I83Ky6LZ9OvjTFevXr0dtba3wkZ+fb+8lEUIIIYQQQgghhNiUVCzC709Nt+i2fbrhs4+PD8RiMUpLW48uKy0tRUCA+fQmuVwOuVzeG8sjhBBCCCGEEEIIsRtLW9z06cwfmUyGsWPH4uDBg8LnjEYjDh48iMmTJ9txZYQQQgghhBBCCCGOoU9n/gDAM888g1WrVmHcuHGYMGEC3nvvPahUKtx///32XhohhBBCCCGEEEJIn9fngz8rV65EeXk5XnrpJZSUlGDUqFHYu3dvmybQhBBCCCGEEEIIIaQthmVZ1t6L6Em1tbXw8PBAfn5+hzPvCSGEEEIIIYQQQhxJXV0dQkNDUVNTA6VS2e7t+nzmT3dVVlYCAI18J4QQQgghhBBCSL9UX19/fQd/vLy8AAB5eXkd/iIIsRU+8krZZqS30HOO2AM970hvo+cc6W30nCO9jZ5zpCtYlkV9fT2CgoI6vF2/D/6IRNxAM6VSSS8g0qvc3d3pOUd6FT3niD3Q8470NnrOkd5GzznS2+g5R6xlSaJLnx71TgghhBBCCCGEEEK6h4I/hBBCCCGEEEIIIf1Yvw/+yOVyvPzyy5DL5fZeCrlO0HOO9DZ6zhF7oOcd6W30nCO9jZ5zpLfRc470pH4/6p0QQgghhBBCCCHketbvM38IIYQQQgghhBBCrmcU/CGEEEIIIYQQQgjpxyj4QwghhBBCCCGEENKPUfCHEEIIIYQQQgghpB/rF8Gfjz/+GAMGDIBCocDEiRNx9uzZDm+/bds2DB06FAqFAiNGjMAff/zRSysl/YU1z7mNGzeCYZhWHwqFohdXSxzdsWPHcMsttyAoKAgMw2DXrl2d3ufIkSMYM2YM5HI5Bg0ahI0bN/b4Okn/Ye1z7siRI22OcwzDoKSkpHcWTBzea6+9hvHjx8PNzQ1+fn5YsmQJ0tLSOr0fndORrurKc47O6Uh3ffrppxg5ciTc3d3h7u6OyZMnY8+ePR3eh45zxFYcPvjz008/4ZlnnsHLL7+MxMRExMbGYuHChSgrKzN7+1OnTuHOO+/Egw8+iPPnz2PJkiVYsmQJkpOTe3nlxFFZ+5wDAHd3dxQXFwsfubm5vbhi4uhUKhViY2Px8ccfW3T77Oxs3HTTTZg9ezYuXLiAdevW4aGHHsK+fft6eKWkv7D2OcdLS0trdazz8/ProRWS/ubo0aNYu3Yt4uLi8Oeff0Kn02HBggVQqVTt3ofO6Uh3dOU5B9A5HemekJAQvP7660hISEB8fDzmzJmDxYsX4/Lly2ZvT8c5YksOP+p94sSJGD9+PD766CMAgNFoRGhoKJ588kk8//zzbW6/cuVKqFQq/Pbbb8LnJk2ahFGjRuGzzz7rtXUTx2Xtc27jxo1Yt24dampqenmlpD9iGAY7d+7EkiVL2r3Nc889h99//73VicEdd9yBmpoa7N27txdWSfoTS55zR44cwezZs1FdXQ0PD49eWxvpv8rLy+Hn54ejR49ixowZZm9D53TElix5ztE5HekJXl5eeOutt/Dggw+2+Rod54gtOXTmj1arRUJCAubNmyd8TiQSYd68eTh9+rTZ+5w+fbrV7QFg4cKF7d6ekJa68pwDgIaGBoSHhyM0NLTD6D4htkDHOWIvo0aNQmBgIObPn4+TJ0/aeznEgdXW1gLgLoraQ8c6YkuWPOcAOqcjtmMwGPDjjz9CpVJh8uTJZm9DxzliSw4d/KmoqIDBYIC/v3+rz/v7+7fbZ6CkpMSq2xPSUleec1FRUfjmm2+we/dubN68GUajEVOmTEFBQUFvLJlch9o7ztXV1aGpqclOqyL9WWBgID777DP8/PPP+PnnnxEaGopZs2YhMTHR3ksjDshoNGLdunWYOnUqYmJi2r0dndMRW7H0OUfndMQWkpKS4OrqCrlcjjVr1mDnzp0YPny42dvScY7YksTeCyCkv5s8eXKraP6UKVMwbNgwfP755/j3v/9tx5URQohtREVFISoqSvj3lClTkJmZiXfffRffffedHVdGHNHatWuRnJyMEydO2Hsp5Dph6XOOzumILURFReHChQuora3F9u3bsWrVKhw9erTdABAhtuLQmT8+Pj4Qi8UoLS1t9fnS0lIEBASYvU9AQIBVtyekpa48564llUoxevRoZGRk9MQSCWn3OOfu7g4nJyc7rYpcbyZMmEDHOWK1J554Ar/99hsOHz6MkJCQDm9L53TEFqx5zl2LzulIV8hkMgwaNAhjx47Fa6+9htjYWLz//vtmb0vHOWJLDh38kclkGDt2LA4ePCh8zmg04uDBg+3WTU6ePLnV7QHgzz//bPf2hLTUlefctQwGA5KSkhAYGNhTyyTXOTrOkb7gwoULdJwjFmNZFk888QR27tyJQ4cOISIiotP70LGOdEdXnnPXonM6YgtGoxEajcbs1+g4R2zJ4cu+nnnmGaxatQrjxo3DhAkT8N5770GlUuH+++8HANx3330IDg7Ga6+9BgB4+umnMXPmTLzzzju46aab8OOPPyI+Ph5ffPGFPX8M4kCsfc7961//wqRJkzBo0CDU1NTgrbfeQm5uLh566CF7/hjEgTQ0NLTaVczOzsaFCxfg5eWFsLAwrF+/HoWFhdi0aRMAYM2aNfjoo4/w7LPP4oEHHsChQ4ewdetW/P777/b6EYiDsfY599577yEiIgLR0dFQq9X46quvcOjQIezfv99ePwJxMGvXrsUPP/yA3bt3w83NTehnoVQqhYxFOqcjttSV5xyd05HuWr9+PRYtWoSwsDDU19fjhx9+wJEjR7Bv3z4AdJwjPYztBz788EM2LCyMlclk7IQJE9i4uDjhazNnzmRXrVrV6vZbt25lhwwZwspkMjY6Opr9/fffe3nFxNFZ85xbt26dcFt/f3/2xhtvZBMTE+2wauKoDh8+zAJo88E/z1atWsXOnDmzzX1GjRrFymQyNjIykt2wYUOvr5s4Lmufc2+88QY7cOBAVqFQsF5eXuysWbPYQ4cO2WfxxCGZe74BaHXsonM6Yktdec7ROR3prgceeIANDw9nZTIZ6+vry86dO5fdv3+/8HU6zpGexLAsy/ZmsIkQQgghhBBCCCGE9B6H7vlDCCGEEEIIIYQQQjpGwR9CCCGEEEIIIYSQfoyCP4QQQgghhBBCCCH9GAV/CCGEEEIIIYQQQvoxCv4QQgghhBBCCCGE9GMU/CGEEEIIIYQQQgjpxyj4QwghhBBCCCGEENKPUfCHEEIIIde91atXY8mSJb3+fTdu3AiGYcAwDNatW2fRfVavXi3cZ9euXT26PkIIIYT0DxJ7L4AQQgghpCcxDNPh119++WW8//77YFm2l1bUmru7O9LS0uDi4mLR7d9//328/vrrCAwM7OGVEUIIIaS/oOAPIYQQQvq14uJi4f9/+uknvPTSS0hLSxM+5+rqCldXV3ssDQAXnAoICLD49kqlEkqlsgdXRAghhJD+hsq+CCGEENKvBQQECB9KpVIItvAfrq6ubcq+Zs2ahSeffBLr1q2Dp6cn/P398eWXX0KlUuH++++Hm5sbBg0ahD179rT6XsnJyVi0aBFcXV3h7++Pe++9FxUVFVav+ZNPPsHgwYOhUCjg7++PZcuWdffXQAghhJDrGAV/CCGEEELM+Pbbb+Hj44OzZ8/iySefxGOPPYbly5djypQpSExMxIIFC3DvvfeisbERAFBTU4M5c+Zg9OjRiI+Px969e1FaWooVK1ZY9X3j4+Px1FNP4V//+hfS0tKwd+9ezJgxoyd+REIIIYRcJ6jsixBCCCHEjNjYWLz44osAgP9v7/5BqmrjOIB/b9rkvXBBIQzihoMZcg2bmuPmlBBtDQ4OQuDiFi62ibMINYUIcadqcXGxqUmIq6Nxh9q6g/hvVd/h5RWE4PUtRd7j5wMHznk4f37Pdvjye86ZnZ3NwsJC+vr6MjU1lSSZm5vLmzdvsrW1lUePHmVpaSmjo6OZn58/vce7d+9y586dbG9vZ3Bw8FzP/fHjR3p6evL06dNUKpXUarWMjo5e/AQBgGtD5w8AwC+MjIyc7nd1daW3tzf1ev107NatW0mSTqeTJNnc3Mznz59PvyFULpczNDSUJGm32+d+7pMnT1Kr1TIwMJCJiYm8f//+tLsIAOB3CH8AAH7h5s2bZ45LpdKZsX/+InZ8fJwkOTw8zPj4eFqt1pnt27dv/2nZVqVSydevX9NsNtPf35+5ubk8ePAgu7u7fz4pAOBasuwLAOACPHz4MB8+fMjdu3fT3f1nr1jd3d1pNBppNBp5/fp1qtVq1tfX8/z58wuqFgC4TnT+AABcgOnp6ezs7OTFixfZ2NhIu93O2tpaJicnc3R0dO77rK6uZnFxMa1WK9+/f8/KykqOj49z7969S6weACgy4Q8AwAW4fft2vnz5kqOjo4yNjaVer2dmZibVajU3bpz/latarebjx495/Phx7t+/n7dv36bZbGZ4ePgSqwcAiqx0cnJyctVFAABcR8vLy5mZmfmt7/mUSqV8+vQpz549u/C6AIBi0fkDAHCF9vb2Ui6X8+rVq3Od//Lly5TL5UuuCgAoEp0/AABX5ODgID9//kzy93Kvvr6+f72m0+lkf38/SdLf35+enp5LrREA+P8T/gAAAAAUmGVfAAAAAAUm/AEAAAAoMOEPAAAAQIEJfwAAAAAKTPgDAAAAUGDCHwAAAIACE/4AAAAAFJjwBwAAAKDAhD8AAAAABfYXB6jPVmx54/cAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "noise = torch.clamp(1 - harmonicity, min=10**-3)\n", + "hnr = -10 * torch.log10(noise)\n", + "\n", + "plt.plot(xs, hnr[0])\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"HNR [dB]\")\n", + "\n", + "# PRAAT uses harmonicity to determine voicing in this scenario\n", + "avg_hnr = hnr[harmonicity > 0.45].mean().numpy().round(decimals=1)\n", + "plt.axhline(y=avg_hnr, color=\"darkorange\")\n", + "print(\"Average HNR:\", avg_hnr)" + ] + }, + { + "cell_type": "markdown", + "id": "9230ac2d-fe79-4fdb-9125-546ee8872ff6", + "metadata": {}, + "source": [ + "## Computing period-based feats\n", + "\n", + "Pathologies can also sometimes appear in the voice as irregularities in the period. Two features that have been used very commonly\n", + "in the speech diagnosis literature are *jitter* and *shimmer*.\n", + "\n", + "You can find a good explanation here:\n", + "\n", + "[https://speechprocessingbook.aalto.fi/Representations/Jitter_and_shimmer.html](https://speechprocessingbook.aalto.fi/Representations/Jitter_and_shimmer.html)\n", + "\n", + "The most basic explanation is that jitter is a measure of the fluctuation in period *length* and shimmer is a measure of the\n", + "fluctuation in period *peak amplitude*.\n", + "\n", + "In order to understand how this works, it is helpful to visualize a single frame. Let's use the frame #200, at 2.00 seconds into the recording.\n", + "\n", + "We'll start by identifying the period peaks and masking everything not within 20% of the period peaks, so as to avoid jumping from the beginning to the middle of the period." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a9e28fb8-2d7a-4c93-9d1c-6324dfe4fa9b", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABI8AAADvCAYAAAB7TvRLAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnXd4HNX5tp+Z7X3Vu3svuGIDNhgMmO4QQk9o4QMCIQnwC0kgCSUQSuiE3lMgEBN6wBDAYAPGxr3bsi1Zlqwube878/1xZma1Vtspq0XSua/LF2J3dnW0Z+eU57zv8zI8z/OgUCgUCoVCoVAoFAqFQqFQeoDNdQMoFAqFQqFQKBQKhUKhUCjfX6h4RKFQKBQKhUKhUCgUCoVC6RUqHlEoFAqFQqFQKBQKhUKhUHqFikcUCoVCoVAoFAqFQqFQKJReoeIRhUKhUCgUCoVCoVAoFAqlV6h4RKFQKBQKhUKhUCgUCoVC6RUqHlEoFAqFQqFQKBQKhUKhUHqFikcUCoVCoVAoFAqFQqFQKJReoeIRhUKhUCgUCoVCoVAoFAqlV6h4RKFQKBQKRTZffPEFGIbBm2++mdXfM2rUKFx++eVZ/R1yeeWVV8AwDGprazV7z+OPPx7HH3+86vepra0FwzB45ZVXVL9XVx544AGMGTMGOp0OM2fO1PS9KRQKhUKhfP+h4hGFQqFQKIMMUbxgGAZfffVVt+d5nkdVVRUYhsGZZ56ZgxZShhKffPIJfvOb32DBggV4+eWXcc899+S6SZpw8OBB3HnnnZg3bx7y8vJQWFiI448/Hp9++mmP13s8Hlx99dUoKiqCzWbDCSecgA0bNvR47XvvvYfZs2fDbDZjxIgRuP3225FIJLL551AoFAqFklX0uW4AhUKhUCgUZZjNZrz22mtYuHBh2uNffvkl6uvrYTKZctSyoc0ll1yCCy+88Hv5+Y4cORLhcBgGg0Gz9/z888/BsixefPFFGI1Gzd4317z77ru4//77cfbZZ+Oyyy5DIpHA3//+d5x88sl46aWXcMUVV0jXchyHM844A5s3b8bNN9+MwsJCPPXUUzj++OOxfv16jB8/Xrr2o48+wtlnn43jjz8ef/3rX7F161bcfffdaGlpwdNPP52LP5VCoVAoFNVQ8YhCoVAolEHK6aefjmXLluHxxx+HXp+a0l977TXMmTMHbW1tOWzd0CMYDMJms0Gn00Gn0+W6OT3CMAzMZrOm79nS0gKLxdKvcMRxHGKxmOa/P1uccMIJqKurQ2FhofTYz372M8ycORO33XZbmnj05ptv4ptvvsGyZctw7rnnAgDOP/98TJgwAbfffjtee+016dpf//rXOOKII/DJJ59I96XT6cQ999yDX/3qV5g0adIA/YUUCoVCoWgHTVujUCgUCmWQctFFF6G9vR3/+9//pMdisRjefPNNXHzxxT2+5sEHH8QxxxyDgoICWCwWzJkzp0ffov/9739YuHAh3G437HY7Jk6ciFtvvbXP9kSjUZx55plwuVz45ptvABBB4dFHH8XUqVNhNptRUlKCa665Bp2dnWmv5Xked999NyorK2G1WnHCCSdg+/btGX0Oos/Pgw8+iEceeQQjR46ExWLBokWLsG3btm7X79q1C+eeey7y8/NhNpsxd+5cvPfee2nXiKmBX375Ja677joUFxejsrIy7bnDPY+eeuopTJ06FSaTCeXl5fj5z38Oj8fT7fc/99xzGDt2LCwWC+bNm4dVq1b1+Hf99a9/xdSpU2G1WpGXl4e5c+emiRR9fRZdPY8uv/xy2O12NDQ04Oyzz4bdbkdRURF+/etfI5lM9vl+DMPg5ZdfRjAYlFIlxfdmGAbXX389Xn31VenvXr58OYDMv2fieyxbtgxTpkyBxWLB0Ucfja1btwIAnn32WYwbNw5msxnHH398jz5Ta9aswamnngqXywWr1YpFixbh66+/7vPvAoCpU6emCUcAYDKZcPrpp6O+vh5+v196/M0330RJSQnOOecc6bGioiKcf/75ePfddxGNRgEAO3bswI4dO3D11VenCbrXXXcdeJ7PukcYhUKhUCjZgopHFAqFQqEMUkaNGoWjjz4a//rXv6THPvroI3i9Xlx44YU9vuaxxx7DrFmz8Kc//Qn33HMP9Ho9zjvvPPz3v/+Vrtm+fTvOPPNMRKNR/OlPf8JDDz2EpUuX9rkhD4fDOOuss/DNN9/g008/xTHHHAMAuOaaa3DzzTdjwYIFeOyxx3DFFVfg1VdfxSmnnIJ4PC69/rbbbsMf//hHzJgxQzJnXrJkCYLBYMafx9///nc8/vjj+PnPf45bbrkF27Ztw+LFi9Hc3Jz2tx111FHYuXMnfve73+Ghhx6CzWbD2Wefjbfffrvbe1533XXYsWMHbrvtNvzud7/r9Xffcccd+PnPf47y8nI89NBD+NGPfoRnn30WS5YsSfs7X3zxRVxzzTUoLS3FX/7yFyxYsABLly7FwYMH097v+eefxy9/+UtMmTIFjz76KO68807MnDkTa9asyfjz6EoymcQpp5yCgoICPPjgg1i0aBEeeughPPfcc32+7h//+AeOPfZYmEwm/OMf/8A//vEPHHfccdLzn3/+OW688UZccMEFeOyxxzBq1CgAmX3PRFatWoX/+7//w2WXXYY77rgDO3fuxJlnnoknn3wSjz/+OK677jrcfPPNWL16NX7605+mvfbzzz/HcccdB5/Ph9tvvx333HMPPB4PFi9ejLVr1yr6rJqammC1WmG1WqXHNm7ciNmzZ4Nl05fO8+bNQygUwp49e6TrAGDu3Llp15WXl6OyslJ6nkKhUCiUQQdPoVAoFAplUPHyyy/zAPjvvvuOf+KJJ3iHw8GHQiGe53n+vPPO40844QSe53l+5MiR/BlnnJH2WvE6kVgsxk+bNo1fvHix9NgjjzzCA+BbW1t7bcOKFSt4APyyZct4v9/PL1q0iC8sLOQ3btwoXbNq1SoeAP/qq6+mvXb58uVpj7e0tPBGo5E/44wzeI7jpOtuvfVWHgB/2WWX9fl51NTU8AB4i8XC19fXS4+vWbOGB8DfeOON0mMnnngiP336dD4SiUiPcRzHH3PMMfz48eOlx8TPeOHChXwikUj7feJzNTU1ae1fsmQJn0wmpeueeOIJHgD/0ksv8TxPPuvi4mJ+5syZfDQala577rnneAD8okWLpMd+8IMf8FOnTu3z7+7rs3j55Zelxy677DIeAP+nP/0p7dpZs2bxc+bM6fc9L7vsMt5ms3V7HADPsiy/ffv2bs9l8j0T38NkMkmfJc/z/LPPPssD4EtLS3mfzyc9fsstt6R97hzH8ePHj+dPOeWUtO9NKBTiR48ezZ988sn9/m2HU11dzZvNZv6SSy5Je9xms/E//elPu13/3//+lwfAL1++nOd5nn/ggQd4AHxdXV23a4888kj+qKOOkt0mCoVCoVC+D9DIIwqFQqFQBjHnn38+wuEwPvjgA/j9fnzwwQe9pqwBgMVikX7u7OyE1+vFsccem1Y1yu12AyCGwhzH9fn7vV4vlixZgl27duGLL75IK+O+bNkyuFwunHzyyWhra5P+zZkzB3a7HStWrAAAfPrpp4jFYvjFL34BhmGk199www0yPgng7LPPRkVFhfT/8+bNw/z58/Hhhx8CADo6OvD555/j/PPPh9/vl9rT3t6OU045BdXV1WhoaEh7z6uuuqpffyOx/TfccENaZMpVV10Fp9MpRdusW7cOLS0t+NnPfpbmH3T55ZfD5XKlvafb7UZ9fT2+++47WZ9BX/zsZz9L+/9jjz0W+/fvV/WeixYtwpQpU7o9nsn3TOTEE0+UIpYAYP78+QCAH/3oR3A4HN0eF9u8adMmVFdX4+KLL0Z7e7vUn8FgECeeeCJWrlzZ7/e3K6FQCOeddx4sFgvuu+++tOfC4XCPBumiv1M4HE77b2/Xis9TKBQKhTLYoIbZFAqFQqEMYoqKinDSSSfhtddeQygUQjKZlAx9e+KDDz7A3XffjU2bNkk+LQDSRJsLLrgAL7zwAv7f//t/+N3vfocTTzwR55xzDs4999xuaTs33HADIpEINm7ciKlTp6Y9V11dDa/Xi+Li4h7b0tLSAgA4cOAAAKRVrBL/try8vAw+BfT4egCYMGEC/v3vfwMA9u7dC57n8cc//hF//OMfe21TVwFq9OjR/f5esf0TJ05Me9xoNGLMmDHS8739nQaDAWPGjEl77Le//S0+/fRTzJs3D+PGjcOSJUtw8cUXY8GCBf22pyfMZjOKiorSHsvLy+vmPSWX3j6fTL5nIiNGjEj7f1FIq6qq6vFxsc3V1dUAgMsuu6zX9nm93oy+Q8lkEhdeeCF27NiBjz76COXl5WnPWyyWtL9DJBKJSM93/W9v13YV1SgUCoVCGUxQ8YhCoVAolEHOxRdfjKuuugpNTU047bTTpMihw1m1ahWWLl2K4447Dk899RTKyspgMBjw8ssvpxkxWywWrFy5EitWrMB///tfLF++HG+88QYWL16MTz75JC0S5wc/+AFef/113Hffffj73/+eJi5xHIfi4mK8+uqrPbbncDEj24hRKL/+9a9xyimn9HjNuHHj0v4/V5v9yZMnY/fu3fjggw+wfPly/Oc//8FTTz2F2267DXfeeafs98tWdbiePp9Mv2f9ta23x3meB5DqzwceeCAt4q0rdrs9kz8DV111FT744AO8+uqrWLx4cbfny8rK0NjY2O1x8TFRbCorK5MeP1z8amxsxLx58zJqD4VCoVAo3zeoeEShUCgUyiDnhz/8Ia655hp8++23eOONN3q97j//+Q/MZjM+/vjjtLSal19+udu1LMvixBNPxIknnoiHH34Y99xzD37/+99jxYoVOOmkk6Trzj77bCxZsgSXX345HA4Hnn76aem5sWPH4tNPP8WCBQv6FGFGjhwJgESSdI3AaW1tlRUZI0aidGXPnj1SSpT43gaDIe1vUIvY/t27d6e1PxaLoaamRvpdXf/OrgJFPB5HTU0NZsyYkfa+NpsNF1xwAS644ALEYjGcc845+POf/4xbbrlFSpf6PiLne6aGsWPHAgCcTqeq/rz55pvx8ssv49FHH8VFF13U4zUzZ87EqlWrwHFcmkC6Zs0aWK1WTJgwQboOICmKXYWiQ4cOob6+HldffbXidlIoFAqFkkuo5xGFQqFQKIMcu92Op59+GnfccQfOOuusXq/T6XRgGCatPHttbS3eeeedtOs6Ojq6vVbcFPeUjnPppZfi8ccfxzPPPIPf/va30uPnn38+kskk7rrrrm6vSSQSUhn7k046CQaDAX/961+lqBIAePTRR3v9W3rinXfeSfMsWrt2LdasWYPTTjsNAFBcXIzjjz8ezz77bI9RJK2trbJ+n8hJJ50Eo9GIxx9/PK39L774IrxeL8444wwApAJXUVERnnnmGcRiMem6V155RfosRNrb29P+32g0YsqUKeB5Pq162/eRTL9napkzZw7Gjh2LBx98EIFAoNvzmfTnAw88gAcffBC33norfvWrX/V63bnnnovm5ma89dZb0mNtbW1YtmwZzjrrLEkkmzp1KiZNmoTnnnsu7e9/+umnwTBMnymlFAqFQqF8n6GRRxQKhUKhDAH68n0ROeOMM/Dwww/j1FNPxcUXX4yWlhY8+eSTGDduHLZs2SJd96c//QkrV67EGWecgZEjR6KlpQVPPfUUKisrsXDhwh7f+/rrr4fP58Pvf/97uFwu3HrrrVi0aBGuueYa3Hvvvdi0aROWLFkCg8GA6upqLFu2DI899hjOPfdcFBUV4de//jXuvfdenHnmmTj99NOxceNGfPTRRygsLMz4Mxg3bhwWLlyIa6+9FtFoFI8++igKCgrwm9/8RrrmySefxMKFCzF9+nRcddVVGDNmDJqbm7F69WrU19dj8+bNGf8+kaKiItxyyy248847ceqpp2Lp0qXYvXs3nnrqKRx55JH4yU9+AoBEPN1999245pprsHjxYlxwwQWoqanByy+/3M3zaMmSJSgtLcWCBQtQUlKCnTt34oknnsAZZ5yRZiL9fSTT75laWJbFCy+8gNNOOw1Tp07FFVdcgYqKCjQ0NGDFihVwOp14//33e33922+/jd/85jcYP348Jk+ejH/+859pz5988skoKSkBQMSjo446CldccQV27NiBwsJCPPXUU0gmk93SCB944AEsXboUS5YswYUXXoht27bhiSeewP/7f/8PkydP1uzvp1AoFAplIKHiEYVCoVAow4TFixfjxRdfxH333YcbbrgBo0ePxv3334/a2tq0Tf3SpUtRW1uLl156CW1tbSgsLMSiRYtw5513dqsK1pVbb70VXq9XEpB+/vOf45lnnsGcOXPw7LPP4tZbb4Ver8eoUaPwk5/8JM38+e6774bZbMYzzzyDFStWYP78+fjkk0+kqJ1MuPTSS8GyLB599FG0tLRg3rx5eOKJJyQfGgCYMmUK1q1bhzvvvBOvvPIK2tvbUVxcjFmzZuG2226T+YmmuOOOO1BUVIQnnngCN954I/Lz83H11VfjnnvugcFgkK67+uqrkUwm8cADD+Dmm2/G9OnT8d5773Uz8L7mmmvw6quv4uGHH0YgEEBlZSV++ctf4g9/+IPiNg4UmX7PtOD444/H6tWrcdddd+GJJ55AIBBAaWkp5s+fj2uuuabP14pCYXV1NS655JJuz69YsUISj3Q6HT788EPcfPPNePzxxxEOh3HkkUfilVde6WaUfuaZZ+Ktt97CnXfeiV/84hcoKirCrbfequr7RaFQKBRKrmH4rvHVFAqFQqFQKIOM2tpajB49Gg888AB+/etf57o5FAqFQqFQKEMO6nlEoVAoFAqFQqFQKBQKhULpFSoeUSgUCoVCoVAoFAqFQqFQeoWKRxQKhUKhUCgUCoVCoVAolF6hnkcUCoVCoVAoFAqFQqFQKJReoZFHFAqFQqFQKBQKhUKhUCiUXqHiEYVCoVAoFAqFQqFQKBQKpVf0uW7A9x2O43Do0CE4HA4wDJPr5lAoFAqFQqFQKBQKhUKhaALP8/D7/SgvLwfL9h5fRMWjfjh06BCqqqpy3QwKhUKhUCgUCoVCoVAolKxw8OBBVFZW9vo8FY/6weFwACAfpNPpzHFrKBQKhUKhUCgUCoVCoVC0wefzoaqqStI+eoOKR/0gpqo5nU4qHlEoFAqFQqFQKBQKhUIZcvRn00MNsykUCoVCoVAoFAqFQqFQKL1CxSMKhUKhUCgUCoVCoVAoFEqvUPGIQqFQKBQKhUKhUCgUCiUDmuv34cCuDbluxoBDxSMKhUKhUCgUCoVCoVAolAywP38MRr5+AloaanLdlAGFikcUCoVCoVAoFAqFQqEoJJlI5LoJlAHC52mHjYkAAOo2fJLj1gwsVDyiUCiUAcbnaUfQ78l1MygUCoVCoVAoKuA5Djv/fAzq75mBcNCf6+ZQBoD2Q/uln+NNO3PYkoGHikcUCoUygAR8nYg/OgutjywEz3G5bg6FQqFQKBQNaTywG40Hdue6GZQBIuD3YHJ8O0Zy9djxxRu5bg5lAPA17ZN+NvjqctiSgYeKRxQKhTKA1O9ejwJ4MYo7iIN7t+S6OZQBhkadUSjDh13ffYpt9y7Cvq3f5roplAEi4OtE2cvz4HrpWEQjoVw3hzIAeFoOSj/HD6zNYUsoA0WkLSUYGeK+HLZk4KHiEYVCoQwg4c4m6eemrV/krB2UgSfo9yD26Gy0PnIsjTqjUIYBk/77I0yLbkLy3V/kuimUAWLvmo8AAFYmivbG4RWRMFzxtx2SftZH2nPYEspAwQXapJ9NiUAOWzLwUPEohzTs34lvn/x/2PS/13LdFAqFMkDEOlInVElvQw5bQhloDu5Yi0J4MIqrQ8P+HbluDmWASCYS2HbvImy57yREwsFcN4cyQHQViN2Jtj6upAwlwjWrpZ997XSOHw6EO1PikSFOPY+GA0zEI/1s4ah4RBkgmt/6LY5qXYaZX1+b66ZQcsDWle9i+z0LcaiW5sUPJzhvapHBRLw5bAlloAm2p4TDph1f5bAllIHkUO1OTItuwhGR77D5vb/mujmUAcLb0SL97NEX5rAllIFEF+6Qfg53NOawJZSBIu5tln42Jqh4NBzQRVPrdysVjygDhS2SGmzoaeTwY/rnl2JqbCua/31jrptCGUD0odR933XyoQx9Ym210s/xtv29X0gZUngbU33Nd9TmriGUAcXvaZV+tiSH1+ZiOGOIeaSfYx4qHg0LgqnIQnqvDw/0XXyO7Pzw2sNT8SiHsHxC+tnb3tTHlZShRiiQEg0MyeE16Ax39F1CmvXDzGRvuMN6U/4XTJT2/XAh1HZA+pmlgvGwIexLeZ/kcx19XEkZShjjqXuc89O1/bAgllrXDbcolOGKqcv63cLEhpU5PhWPcoiVSw02gY7mPq6kDDWa6/ZIP3OMLoctoQw0pi4hzSYa3jys0MdSmwoqIgwfkp310s+GGO334UKki3hkYyI0wnyYYO0yr7Phzhy2hDJQ6LqIR8MtCmW4Ykmmr9/9nuFjlE7Foxzi4FNfvJCHikfDiVCXcHZ3jJ5MDScsXRaWZioeDSsM8UCXn2nk0XCBDaRSV0wJ2u/DhVgwPdoo4KXRR8MBG5e6x1lqnjws0HWZ261MFPFYNIetoQwENi793g752odNFV0qHuWIRDwGJ1IhblFfax9XU4YaMX+qvwuSw0etpqRXZaDhzcMLY5dyrka6qRg2GKKp6AMLFY+GDclgetRJ2E/Fo+GAg0+N8/o4neOHA4ebZFOheOhzeIRZw6p/InBnOb795x25adAAQsWjHNG1CgcAJPy0jOtwIuZL9beNiSAWjeSwNZSBpOuEQ8ObhxdmLtXf5iQVj4YL5rhH+vnw00rK0IUPedL+P+z39HgdZegQCQVgYWLS/9PKW8MDYzLd7ybgpXu6oUwsGoGVIdFlTSgCAEyr+yccTBhH7X0EviGewkbFoxwR6EwXj3hasntYwYXSBxZfJ408Gw4k4jHYmbD0/3YmjEQ81scrKEMJSxfxyEKjzoYN1oRH+rlrVAJliBPxpP1vNODp8TLK0OHwtZyZFkQZFhxeYS3so5FHQxnxPud4Bh2GEgCAo8vavuXAzpy0a6Cg4lGOCHnTJxgmSk8nhhNMKH1iCXqoeDQc6CmUmYY3Dx9sfOp0kkadDR/sXaKNLEwMkRAVkIYDbNST9v/xEDVPHuoEPekHw10PDChDFwufHnkUDdB13VAm6CUBAAHGiqjB2e15f9O+gW7SgELFoxwR9aVHnrAx6oMwnNAdtqgM0xDXYUFQuO+DvBkh3pT2GGVok0wk0qPO+BC4ZDKHLaIMBDzHwc2nz+9+Dx3vhwOHV9ZLhmiE+VAn5E2fz630kGBYIB4MNaMAABCjUYZDmrBgPRJg7EgYXd2ej7bWDHSTBhQqHuWIWCB98di1zCNl6NPVQBUAIn4qIAwHQkIoc4CxIcDYyGNe2vfDgcBhficsw8Pvo5EIQx2ftwN6hlRgCfJm8l96WDAsMAoVFTmeAQAkw1Q8GuqIxVAaBR8UOx8aNhWYhivxWFTyueoUUpiSoU4kE4lcNouSRaJC8YMQa0fS2D3yiPEeHOgmDShUPMoRXDB9w2hI0DD24YQ5nr6IjAeogDAciPhSE06QdZDH/J10cTkMCPuJUBTj9YjwBgCp0GfK0MXf3gSACEcdbD4AIETFo2GBaIrfxBYDIN6W+7etQcP+oe2HMZyJC+lK7aYKAICe4bD9/hOw765ZqNnxXS6bRskSwS6HQEFzKQAgb9dr4O4qxrZ7F9EI4yGIeJ9H9A7w5u6RR/rw0J7jqXiUI3jB80Y8nTBR8WhYYUsS8agFZDORDNL86OFAXOjnsM6BiM4OADCsvBfeP1Vh68q3c9k0SpYRy3QHGCv8DOl7KiIMfQKeZgCAl3UipCOCcdRH+304YBNMdNuNREgobfgfRi07Bca/n0YLJQxROHGON5dKqenTopswNrkfzSuezWXTKFkiJEQVh3kjEmayph+fqIaBSWJadBMVDYcgScG/LmZwQp8/SnpcvOfNsdwfDMaT2TuUpuJRjtBFyBevw1gOIL2EM2XoY+fJiWSrsRIAwIdp+spwICGUbo4ZHIjqyUZyUmIn3AhgymdX5LBllGwTETwQQowNIZaIR566bVjzxv1oaxraIc7DmaiPpLEEWSciehLeHm3Zi3XvPU0FhCGOXaisF7KPAACM5g6AZXgUoRN7N63MZdMo2UKopJs0u+Fl0tNZXJ7tuWiRRCSexPoDnUhyfE7bMdQQD4aCjBWcpaDb862blw90k9J4a0M9Hvu0Gr5IPKftGEpwYQ8AIGF0wT1yuvT4TsfRAABHIncBATzP4+Lnv8WEP3yEj7Y2ZuV3UPEoR+ijJPIkaCUnUrQiw/AhmUjAJSwqg/aRAAD2sJK+lKGJOOHEDQ7EzOmLDB3Dw08rrw1ZYkEiEEdYK8KCeDR3/W8wf+c98D5/Vi6bRskiMSHKKGxwIy4Yax619xHM3fA7fPf3W3PZNEoWScRjkkE+nz+22/OeXbkVj97ffAhra+h8ozW6MBGPeFsx/Pq8tOdGxvblNEX9oU9240dPf4Nfvb4xZ20YikSDZD8XZqzQF4zq9ryuNXdpqk3eCG7692Y88uke/P2b2py1Y6jBCHu2pMmF8nFHSI/zE04FALg5Tw5aRWj0RvDNvnbwPPDBFioeDSlMcQ8AIJk3BgDg4v1Ys+xBbF35bg5bRRkI/J42sAw5+eHcowEAVn8tvv3nHdi55uNcNo2SZXhBPOKMTnAFE7o9f6h608A2iDJgxEPEPDeqsyNqcKQ9NzZZg6aDe3PRLEqWSQrFMWJGNxKWorTnjj74fC6aJHGgPYgTH/oCv162OaftGIr4Pam0BXPJ+G7P59JQdWejD7/410ac/+xqtPqjOWvHUMQUJf2ucxQhbEgXj6xMFO0tDbloFgDg4+0khfaDLY3w0ygUzYgJVRTDOjscpeO6Pe8M1g5wi1LsbEpV+tzWQKt6a4VOCACB2Q2bw41vJ9yM1ZU/xfgF5wAAnAgiGgnlpG31namqvtsOZadIAxWPcoQ1QW5iS/lUAICJiWP+9rsw/fNLsX/bmlw2jZJl/J1kAg/wFuicxFxvenQDjtr7CMo++ilNZRjCsFFy3/MmFyzlU7o97z2Yu7D2f6yuxcL7P8fybdk5qRjuiGW6Y3o74obu1TnqN3020E2iDAC8kMaSMOcD7qpuz7c31w90kyTuX74L+1qDeHN9PZq8kZy1YygSEPzMgrwZ1vzybs8bQ00D3SSJr6pTnlvvbz6Us3YMRaxxEs1lcpUiZszr9nzHoX0D3SQAQCiWQH1najO7s5FWeNaKRNADAIjprCioSh0KbrKSFKbSxMGcRZxVN6f6eXcz7XOt0MfIeo61knv8qIv/gKP/3yNw5hUhxusAAJ2tuRlbD3ak7vMD7SEEo9pX/aPiUY6wcWQTaSuqgp+3pD3X/N1buWgSZYAIeogHho91wuBIT11yI4DqDSty0SzKACCdVljzMGL6sfDzFhxgK7HOeTIAIOnJ3UbyuVX7Ud8Zxs/+uSFnbRjKcBEy5icMDsQd3UWEeGNu/TA6grGsGiwOV3RhspnkLfkwFYzs9nxjde7ut11Nqc3Eyj2tOWvHUCTsI6Khn7HDnlfS7XlHtHmgmySxud4j/bz9EI1G0BJnkqQnW/NKwZXPBgA0oQi79ZMAAP6m3IhHdR0hdLU62p6liIThSFKY22N6BwpLR2CbaSb26sai4ifPAABcCMLnyY2B8v7WlCVKbXsQsQSd47XAlCBzp96WLhAzLAsPQ9LT/W05Eo860yOeatu1t8UZdOLRk08+iVGjRsFsNmP+/PlYu3Ztr9c+//zzOPbYY5GXl4e8vDycdNJJfV4/UPAcB5dgmGxzF8PDpn/5zK1bctEsygAR8QoGqjonzI7u5nrevd8OdJMoA4QxThZsOms+8orKkLh+Iwpu+Apxl+B9FchN1E8knsTBjlSoa1uApjJoToT0fdLogL4olcbSCRKFZPbmZlMBAE99sRez7/offvoKrQqjNYaYBwDA2grgKBnd7flgfW5Ew0g8iQPtqUVmdQs9ldaSiCAehVg7iivHSI+3g2wsCpItOWkXADT7UlFmOxqpeKQVPMfBzZNx3lFQhnnn3YwDF66A9YY18FtJcZR4+4GctK3Zlz6n17RRn1Wt4MLC3G6wg2FZTLvlS4z9/ToUlY9ChzC/t9VX56RtXe91ngcaPOE+rqZkikUQj4z2/G7P+XRkTx/syFXkUXof17Zpnz43qMSjN954AzfddBNuv/12bNiwATNmzMApp5yClpaeJ+EvvvgCF110EVasWIHVq1ejqqoKS5YsQUND7nKOASAc8sPEkHxjZ34xAnp32vOlodwMMiItvgg2H/TktA1DmZhfMFDVu+AorOz2PNOeO++T9Qc6sfihL3DHe7mNghiqmIV0VTHiLK+oDHZnHlinUHUxR6kMe1sCaf+/u4luJLWGjZHPlDc54ChPhbbvGn0pACA/XJuLZgEAlq0jEW+rqtvQ6KWLSy0xCeKR3l6EygkzUcdWYKdhCtYUEm8E3pMb75sD7aG0qkv7WulmUktiYsl2vRN6gxHr5j6ANrhRf9xfAAB58CMczM0421VIqG72I5pI5qQdQw2ftwNGhnyW7qJyMCyLkZNmw+kuQNxOiuPkyuuq+bC01Nr23PixDEUYoVpy0uROPcaS7XWbnlhT+Jr2D3i7AKDpMNGwroP2uxZYOTJ29xQAEDQQQSnmyc16XkxPNegYAMDfVtfi+AdW4Na3t2r2OwaVePTwww/jqquuwhVXXIEpU6bgmWeegdVqxUsvvdTj9a+++iquu+46zJw5E5MmTcILL7wAjuPw2We9e0tEo1H4fL60f1rj6ySRJ3FeB5vdhbCxMO35Er4NkXBuFnKHPGEseuAL/ODJr/Hx9tzl5A9luCA5kYwZ3SgfNbHb8/ZA7QC3KMWf/7sD+1uDeOWbWuxvDfT/AoosrEkynhw+4ZgLSBqTPZab1JHDT6N20tNozdEJ4hFjdmL0EQuw3TgDa92no2rBRQCAomRrTnwRoolkWo78il00fUlLbElyKm1yFcJssaHqD9sw8Xdfgc8n0SiGQG4Osxo86ZuIfXS815SkUF0xJvibzT3zahTecQBHHH8+grwZANDaMPDRhjzPp0UjJDge1c2077XA20pEeD9vgdliS3uOzRsBADAFcxON0CT0+agCKwCglkYeaYYuIqQmW7sLCQEzEY+ibbUD2SSJFqHfK9zEHqW62Y+Ve1rR4qced2pwCBWzra7ufR4zkz09589NdKlomL1wHGnH2poO1LaH8NqaOrRrlFUwaMSjWCyG9evX46STTpIeY1kWJ510ElavXp3Re4RCIcTjceTndw8zE7n33nvhcrmkf1VV3b0p1BLsJF8oL+MAw7IY8eMn8G3JRVg/72EEeAtYhkfzgd2a/95M+HpvG8JxcnLyz29zE1471OFDZKJJmtxgWBZrp92OJhRi7fQ7AQDFsdycTCU5Ps0D491N1EhTaxxCuqrFlV51yV5EFpZ5ybZurxkIDjfLpZFH2qNPkMUGa3bBZLZi6q0rMe+GfyG/lMwxViaKgN8z4O3a1xJEoksEyue7cufFMhRxcEQ8srmLAZATaVang6mA3PP2SG5SVRs85J6fUeUGQEw2aQSKdvAhDwB0M8dnWBatOjL+e5sGfo3lCycQFXxP5owk6RU0dU0bAh3kwPVwKwoAsBSRlFVXNDf3uygYHjWGbHbrO0N4+esanPv0N2mHBxT5GKJEKNbZC7s9F7MJZvm+gV9PRxNJtAdJAR6x3+/+705c+tJaXPjst+B5vq+XU3ohHovCyhARxu7q3ucJqzDX58CGIp7kpOjxY8cXdXt+k0ZZRYNGPGpra0MymURJSbrxYElJCZqaMouQ+e1vf4vy8vI0AepwbrnlFni9XunfwYPab+TDgudNgCXlmgvLR+Koa5/BnNOvRJOeDDSdB3dp/nszoauR4pr9HQjH6GJSa8RTCs5CRMx5596E0jv2YeIJPwYAFKETAV/ngLerpi2IUJf+Xr6NRp5pSTKRgIMnizS7O33CyS8lnkf58OWkvOchYbIpd5ET8V1NfiSSHD2d0hCjIB7prK60x612l1Q0oaOpbsDbdUiIOtOxJMT5673tiMTpuK8FXDIp+Rs68krTnrOXkMij/ERuTicbhX6fWemCw6QHxyPNA4miEsEonTO5uj3lM5J1bCQH0QjimO6yGDBLEA4/2NKIHz71NR78ODeHlkOFSCfZLB5uRQEArrKxAICiHHldieLRtAoXrEYdOB648/0dWHegE29vzK2Vx2DHEvcAAAyO7pt12Mm9rg8NfL+3+onAYdSxmD8mPWhif1tQikajyEPMHgIAew+RR6yb2JHkoqLmIU8YHA+Y9CyOHNU9UEarAgmDRjxSy3333YfXX38db7/9Nsxmc6/XmUwmOJ3OtH9aExU8b0K67osKr4WcQkdacuN71DXiIJbk8F1tR07aMZQxCOIRa0+faFz5RZK5XlPNwHsOHRAc+SvzLNCzDHY3+7t54VCUE/C2g2XISY8zL73v3QUliPIGAEB748ALCI1CFMIJk8iJyY5GHy587lscfe/nmp1UDHdMSXIvGazubs916sgk728b+KhD8ZTqhInFKHWaEY4n8eyX+7G2ho79avF7O6AT7/mC9IOvgnIiHhWhMzeCsSAelbstGFNsBwBUNwfgj8QHvC1DEVaorMlb3N2ei1jIdyHhGfhNu7ihLHaYMKWcrDdW7mnFxjoPnlixlwrHKoj7iEAQMXbftBWWjwIA2JlwTg4HRZ+rMpcZIwvSU+q21NPKa2oQU5PNru7ikd5VBgAwRQc+qlwUDIudJkwu7b6X3XyQ9rsSgl7Slz5YodPruz1vLiRRxc7owItHYspaZZ4FY4tt3Z7XytNy0IhHhYWF0Ol0aG5OD6lvbm5GaWlpL68iPPjgg7jvvvvwySef4IgjjshmMzMiESCeN1FDd/Eo5hoFAGA6cmOuJn7xxguLyY+3N+FXr2/Ev9YO/IZ2qGKJk4WDwVnc7blmAxEPPQd3DGibgJTvzZQyJ46bQCbBkx7+Ej9/dQONQNMAv3BaEeAtMBhNac8xLIs2liw4vS0Dn8ogTijzRufDYtAhyfFYd6ATSY7Ha2to+qoWWDgizhpt3cd9v55EokVyUJ2j0St6Iphx5hFkofvIp3tw/rOrsaqa+h+pwSeksQR4C4ym9EOrvMIyhHkjAKDt0MDfY4cEwbjcbcHYIrLI/PlrGzD9jk9owQQNMMTJCS9r6Z7ClLST+4wNDvzmolXwvChymHBEZfexiIrGyuH9ZH8iep50xeZwSxGm7Y0Df7+LUSYlTrN0v4vsOERFBDU4eXKvW90l3Z4zuUk2iSPePqBtAlKCYanTjElljm7P76UVNhUREippBhh7j887haqquaioKaagVuVbYTXq8fylczGzyo1LjiLZDeK8r5ZBIx4ZjUbMmTMnzexaNL8++uije33dX/7yF9x1111Yvnw55s6dOxBN7RdO8LyJG7tP3LrCcQAAa2DgxZpoIilNMBfOI8rpq2vq8O6mQ7jlra3YSk8nNMGW8AAAzK7uE43fRm7weA4izxoE4bAiz4JbTpsEu4ko6v/d2ojXqHiompCPnFb4e5lwvAYiJgbbchB5JAgIlXkWTChJb993tQN/SjoUsQopixZ7981k2EzE2oQ3d+JRqcuCK48djWJHStj8iKauqiLYSTaTPrb7qW9X7xtP48AfFkmpqm4zxhal3/OvfFNL/TBUYhTEI52t+/0uVtc0hQbeX0yMPCpymDC2yI6p5enfzWoabawYVvA44ew9H2h36Iio5B/gA6J4kkObIBqWOM04eUr62rPRF0EsMfDFGoYC4aAfTpCDIXdx9+rJ9kJyr7u4gV9HiV6WJU4zDDoW7/58Ac6ZVYGL55P93eEl3SmZEfWTPXyI7S7IAUB+OUlRdSMw4BU1DwqV1irziFB98pQSvPPzBThJuOcP9zdVyqARjwDgpptuwvPPP4+//e1v2LlzJ6699loEg0FcccUVAIBLL70Ut9xyi3T9/fffjz/+8Y946aWXMGrUKDQ1NaGpqQmBQG4nR7GsI2fuvqgQSzgXxOoHtE0ASV3hecBi0OGsGWXdnt/UxQ+JohwX7wEA2PK7f8ZcPhEPDZ0DX4WlXog8qnBbML7EgU9uPA7HjCX5vB9soebZaokIoa5BXc+psCETEY8SnQObysBxqeo7ZS4Lls6sSHu+oTOcVtKbIh+e42AXxSNn95SGhEUId/cP/GaysYuIUOay4PNfH48/nDEZAI1CUEtE9DfsIUUdALwGsqALtdYMWJsAUhxBXER2jTzqyuEVGCnyMCfJpsFo636/mwvIGJuL6pqSeGQ3gWEY/OXcI3Dd8WNxzizSJvEQiSIfS5iI7fq8ngvt+AxEPAq3D+z6vtUfBc8DepZBgc2I06aV4aJ5VbjltEkw6VnwvHabyuFG2yEi/Ad5M5yu7ve6u4gISnnwIx7TptJVpnRNWwNIcYSHL5iJuYJRvig0UOQRD5B1UUTfs3jkdOUjIthQdLYM7HpeFASr8qxpj4t+poeGW9oaAFxwwQV48MEHcdttt2HmzJnYtGkTli9fLplo19XVobEx5W7+9NNPIxaL4dxzz0VZWZn078EHH8zVnwCg77KORSPJor2Eax1wH4SuimWxwywplyK0tKd6opEQnCCfs7uwu3hkKp0IAHCFBj6suaFLrixANhV3nz0NACndTgUEdcSEdNXeJpyETTit9A9shYa2QBTxJA+WIT4Ylx49Ev938gS8fMWR0LMMYkkurbQzRT7hkB96hpzs2pzdDw3gJGOBIQeRCGLkUZmL3Pd2kx6nTCXfxbr2EL3vVRDzE3EgrO9ZPIqYBcHYO7D3fKs/igTHQ8cyKHaYMbOq+3dyVyNNaVCDTfA4Mzm6byjthbmrrtk18ggAppa78JtTJ2HWCDcAUoWLogxnnKSpWAp6Fo9S9/vAHsaJ/mZlbjNYloFRz+Lec47ANYvGokJY79F+V4a3qRYA0KYrBMN231I784rA8aQYhbdjYNOYGrocCHdlRD4RFupolT1FJINkDx/T93wQzLAsOhk3AMDXNrBCsXgfV+Wni0dlwnfAH0kgEE2o/j2DSjwCgOuvvx4HDhxANBrFmjVrMH/+fOm5L774Aq+88or0/7W1JPT68H933HHHwDe8C6Yo+eLpHN09bwqKKxHkzdAxPJrr9gxou+oPEw/uO+cIOM0pMzAqHqnH2042h3FeB4e7e158/ogpAIDyRD14LrthxDzP47U1dVi27iB4nu8y0aQGnZEFNlgMOkTiHGpo/6tCmnB68DoDAAipDMbgwGwk69pD2FjXKYnGJU4z9DoWBh2LX5w4HidMLEa5MOHQUr7qCHhJ3yd4FlZb9wWHaKppjg5sJALP813Eo5QnT7nbAoOOCIe0IotyuCARjGOmHgRDAAkrmQOY4MD2u1gcodxtho5lUOoy4+kfz8aSKSWYP5qIHXRjoQ47T8Qjaw/VePKE6poF8CIWHdj7q6vnUVdSIgKNPFICz3EoSpL72FU6qsdrkhbhfg8NrP+NuLYrd1m6PScKC/U00lAR4XZS5MJn6L6fAwCdXg+vYFUQ7Px+iEeisNDojSCRpOmKcuGCRPSPm7sfDIj49WTOD3cObOr/wcP28SJ2kx4OYT/fqMG9PujEo6GAVTBMNvXgecOwLJr0ZBPZcXBX1tuy45APd76/HS3+SJrRFgAsHF+IjbctwT+unAeAhjhqga+NnDh5GQdYna7b86WjJiHJM7AxEbQ3Zbfy0tqaDtz69lbc/OYWLFtXL51IVnQZdHQsg/GCBw4111MHFyL3fdzk7vF5Yx5JG7AOgIBQ1x7CyY98iR8+9Q1+//Y2AMD4ku4RUeIJ1UG6oVCFv4OIxh7G2ePppEXoe+cAm2p2BGOS10WJMyUe6VhGCns+QEVjxfCCYJzsRTxi7GTDoY8MbL/XCuLRqC5Vl06bXobnLp0rmSjTtDXlJOIx2Bny+dlc3Q+J8grLEOPJQr69aWCjjA+PPBKpFO53GoGiDL+vE1aGfLYFZaN6vshGvgv68MBGnEkiQl538UjcZNJ0RWUk2msBAGFrea/XBBiytgp6BlY8SgUEpEehFNlNMOpZJLnU4RElc9gQWaNz1u7V9URCRnJoEPMOnHgUiiWk8f3wtDUgJR5r0edUPMoBjqQHAGDN69lUz2cip9DRtuwuKniex03/3oSXv67FVX9b16NiqWMZlAqbCtG5n6KckKBC+1h3j8+bzFY0skRUbK7NbsWbD7akIlxe/qYWAPG7yrMa0q4TxUR6IqkOJkw2kpyp58gjayEJdXfFsy8evfxNDaKCaLCriYiCk0q7i0dV+WQsoFEI6ggJxsl+tue+dwpGm/lc9j2GIvEkfvaP9bjrgx1S5Y1CYTHZlZEF5L6vbad9rxR9iGwWeFvPi0y9EH1sHqAyzrub/DjYEUJNG+nTMYXdvY7EU2q6mVSOp53M80megSu/50NCqbpm88AWSOhNPBL73RdJwBeJD2ibhgLtgveNFzZY7T2P86ydjAPG2MCaJ4sHw4dHoABdRUN6vyvB2EmK23AF43u9Jqh3AwCivoETjyLxZI8HwgDAsgyq8ujaTilG4bBHvJ97ImYi4hHnH7g+39dCDoUKbEbk2Yzdni9zk718owa+R/r+L+mOx+PBm2++iX379uHmm29Gfn4+NmzYgJKSElRUVPT/BsMYnuOQx3sBBnAUdPe8AYCYtQQIAZwvu3nRu5v90sZxc71XSkvqehoJAMWCeOQNxxGJJ2E2dI+YoWRGTJg8goaeT6IBoMNUicpIE4KNewCcnrW2bO9SnnVnI6kMM7LACoZh0q6roosLTTBEyAZRjDY4HLcQ6l7Id4BLJnuMTNOKr6q7b1ZnVLq7PSYJh3SBoYqosIAI9eJ9k1dCPFBsTAQBXyfsPfkiacQ/vz2A5dvJ5tYijOWjCrqfUo0ssAFoxYEOGnmkFJMgCumcPR8UmdzkcVs8+5vJVdWtuOyltTDqWanfxxZ3r/xYIYz3NPJIOd6WehQC6GRcKNT3vMz26gtRHm9BcAA8Md7d1ID7P9qFB86bgY5QDACJPuiKzaRHvs2IjmAMDZ1hOMsMPb0VpRf8zeSwt50tQi+J6VK2gXUA7veubD5I1npTyrqnTEtisYfO8UrID9UCACzlk3q9JmJwAXEg7h+4CNN9rSRt1mUxdDsQBsjabl9rkFoSKMAcI4d8BmfPa3kASFqLgM5UlNJAsLeV7OfH9TCvAylfywZPDiKPtmzZggkTJuD+++/Hgw8+CI/HAwB466230iqdUXrG7+uEkSFmVT0ZJgMA5yCP64PZNU/dUu9N+39fhLRrUmn6BOM066XFJq3IoI6EsImMGnvPlY1YyIYimUVTRZ7nUd3cvergxB6iTyqpoaImWCJkEjG4ew5vLiipAsczMDBJdLRmr+87gzGpHPNjF84EAJQ4TThpSveJUBQO6emUOpJ+IiJEje4en7c78xDkiUjfmeVIhG/3p6KbnvxiLwBgVA8RKGLk0YE22vdKsQlpiGZ3z3O9LZ+MBU7Ok/W2vLamDhwPROIcOkMkskSsptmVVKQxneuVEuwgUb29RRgDqeqacU92xaNQLIFfvb4Jh7wR/N+/N4PnSUR5nrX7yXQl9T1STKSdjNt+U+8bSoubiEdi9kE2SXI8fvvmFky5bTl2CIeDMwVT9K7QPldOMpFAeZJU0yocNaPX62JGchiUDA5cuuKeZiIkTCxxdDsQBiD5WR6ihwSycSSI+GvO63leB1KHxIYBTFHdWk/uc9Fq5HAqxMijXHge3XTTTbj88stRXV0NsznlkXD66adj5cqVqhs01PEKnjcB3gKztecO1rvIgtIczq54tL2BiEddxxWLQdfNaIthiKEmQBeUauEDZCBJWLov2kWSDhK9pwtkT0Bo9kXhjyagYxlMq0iJhRN68L2hiwttcCZI31vye47ONBhN6GDImaWnOXspq6JwVOG24AczK/D2dcfg39ccDZO+e6RTuTDZUNNkdUgGi6beReMOIY3F15rd0q5dRWBeKKQ2ui/xiAqHinEniVBnK6zs8XmncIDk5n1IJtRXQOmLww+Ljqh0YWxR9zWIWNa5PRijlfYUEvUQ8Shg7H2ej4vVNX3ZLZCw45BP+lkcx0fmW8Gy3TeUFXRDqZhkJ5mzI9beN5SOAtLnbt6X9YIo/153EG+sO4hQLAkAmDXCLQnDXRFTmpq8EXq/y6SxdhdMTBwR3oDSEb2nrXFmIh4xoeynpde2BXHfR7vw3iayf+hNSEiVbqdrOzkkEwkUcmQ95+xlXgcAg1AExRLLfrRZIJrAP789gL+trgUALBjb3WcP6CIY5iJt7bvvvsOzzz7b7fGKigo0NQ2sq/hgJNhOFgoe1oWeb2nAnE+8TxxZ9j7Z10rSEX6xeDwe/4zk7R45Or/HRUWJ04SatiDdRKpEHxb6tA+jNb27AqgDTKHs3U/iqcSoAitOm1aGbQ1kgXnc+O7t6poTz/N8j6cYlL7hOQ75XCfAAK7insv4AkCnvgiFCQ8CLdmLPqlpI+LRmCIiGMwa0XuKlGii3OKL0r5XQcpgsedJHQB8hgIgdgjhjuxFIvA832OY+pTy7ukM4n3fQCMOFZGIx5DH+8g9X9SzYOwuKAXHM9AxPNraGlFY2vvYoAZPKCaloX3+f4uw7kAnTpla2uP9XGAzgmFI5EJHMNbNG4fSP0kfOfiLmnoXj+AoA5oBfTC76+ZtDd5uj02t6DmxqlxKYaLikVzMnaQ6Ml/Ye/qSu5AcDJuYOPx+Dxyu3g8T1LJ8G/leHTehCDMrXfjJUSN7vN+LHWboWQYJjkezLyJ9Byj901a7FZUAGvSVGNtLeioA8FYyDugi2U1XTHI8fvLimrSD3uMm9LzXKJPMk+m9LoemumpUMHFEeQNKR07o9TqL4GlsT2S3z3mex+UvrcW6A+T3OMx6HNtLn6eizXKQtmYymeDz+bo9vmfPHhQV9b4hphBCHrKoCOh637A5hc1lts1TxVSUBWML8PvTJ6PUacZvTpnY47UlNJRdE8xhMqHr3L17g5kLSP87Y9kzWhPFo/HFDlxy9EgcNSYflx8zCtMruy8qxcijQDQBT4gaaSrB52mXKrHkC/42PREwklDXaEf2Ku3tF7zNejLLPZxiB7nvY0kOHcFY1to01LEESTSRPq/3vg8L6Q4Jb/YiETyhOILCSXRhF8+TI0d138R0NdD1UwNd2TTW7gTL8AjzRuQX9ZyqqjcY4REq8YiVOLPBwQ6yQShymDCmyI7z51bBZenZ00avY1EgmG22+Ol8rwTGS6JQErbeo1D0UnXN7Bqq7hbS001dDPFn95C+BFDxSA1FYWKYba+a1us1FpsDIZ6Mu7727I3zSY7Hulqyf/jtqRNx05KJknfp4ehYhva7QiKHdgAAPNbRfV6nsxHxKNtG6TsbfWnC0aRSBxZP6jmNUuzzRg2EhOFEW+1mAMAhXQX0hu6pvyJ2wdPYzWW3z7cf8knC0aRSBx6/aBbspp6FzIou9zmnMspQtni0dOlS/OlPf0I8ThaTDMOgrq4Ov/3tb/GjH/1IVWOGA3HhRCps7F08yhfKfDoRRDiYnfLo8SQnTRQjC2y46rgx+PbWEzGtlxMpMdy1yUsrrqnBKUSTWQp7P2F2Fo8EABRw2cuVFc30xpfY4TQb8PrVR+OOpVN7vNZs0EmnzzR1TRmN+8iE04J8WGzdUwNFYlZyWsF5sxd9Ui9sJEcU9C8eGfWpjSSNOlROXpQIA5aSMb1ek7AKi7wsprEcFKKIihwm/O60SShxmnDt8WN7XGzYTHq4BaNNuqmQT9v+TQCAesPIPs3vRV+cYEf2IlCkUt0ZRhUUCaJxi5/O90rI8+4EABgre/dBsYqHRFmOMBf7/vdnTMbJU0pwzuwKXDSvZxFb9MSgaWvyaGuqQyVP7t/yCXP6vNYjVNz0Z1E8avJFEIwlYdAx3TxMe0IcF6ivpTx0HSRjI5Y3rs/rDA4ScWyOe7LaHlEwnD86H3/76Ty8dd0xMOh63uaLlgSHvCSjgJIZ0S3vAgDaHL1HHQGAq4iktDmYMCKh7v6yWvHlHjJ/LJlSguU3HIcTJvbuuVbqMoNhgFiCQ7vKw2DZ4tFDDz2EQCCA4uJihMNhLFq0COPGjYPD4cCf//xnVY0ZDnAB0tFxc+/hzHaHWzqdaGuszUo7DnnCSHI8THoWxRmEpUuRR/QkUhWFSSIIuUpG9npNfjk5xXAiiKDfk5V2iCfRIzMQEABqmq0W34EtAIAmc+/iAQDwTnIabcii35WY7yxuFPqja+oaRT5cMokSjkQX5Jf37osABxEOpdTWLCDe91V5Fpw7pxJrbj0Jvz219zQLWrZdOZH6rQAAj73vjUXAQKK+RJ+cbCBXPBLXBK1UPJJNZ2sjRsVJFErJhPm9XucSDokKufas+t+Iaadji+x4/tK5ePj8mb1WzKUmusrYv/odAEC1fjzyi/uuOB3QuQEAEU/2Is7E6qjlbgt0PdhQHI7oe0THeXk4AzUAAGNp73MoAJhdJCvHluyetaMlop/lkaPysWhCEazG3lPpRB/bSJyjGQUZ0ly/DzM7PwYAOI79WZ/XOl35iPHk8+9oyZ6P5T6hz2dUufu91qBjUeLQ5oBAtnjkcrnwv//9D++//z4ef/xxXH/99fjwww/x5ZdfwmbLbCM6nGGDZMJI9mGYzLAs2lnyvC9LlXcOtJPJZUQvxomHI4lH1FxNMT5PO2wM+fwKhOiynnC48hHgyWTeniXxUBSBDjdH742uvkcU+ehrVwAAQq4+xAMAesHvzBrJnlm+GKZc6sqs78VFBo08UkbjgV0wMgnEeD2KKnoPb9eLBouRLIpHwn1flW/N6Hq6mVROYeMXAACu4sg+r4sIpsoJf/buebH/KjIc74uoeKSYXe89CCOTwD7dGFSMmdLrdQVlJPrHzMTh68zOPc/zvCzhULymxR9FLJFdQ+chxYHVAIC24mP6vTRsIFkHMV/2xCO5YjEtiiIfnuNQliD7s/yRvacqAoBVqLLn5LMrHolWJCMK+p/fTXodCu0kqlwLA+XhQO3Xb8LIJLHTMAWT5p7Y57UMy6KDcQMA/G1ZFI+ELJKeil/0RLlG0aWyxSORhQsX4rrrrsNvfvMbnHTSSaoaMZyw+0hpZH1R35tIn4Eo1dkyTxUr6IzMYJABgFIXWUzSDaRyWg6QUPZ2uGC195weKNKmI2GuvixU3eI4XjJMy1w8opFHStmz4QvMDqwExzMoWnBpn9faishptDuenYVlIslJPiZitY3+KJFSVum9r4TG7V8BAGoMY2Ew9h7lacknYc7ZTGMRzbKr8jIb96V0BioeyaLp4F6MT1SD4xmMPfb8Pq8VK2+KlTizgRhRkOk9L0YetdD5XhY8x2Fk/fsAgM5Z14Jhe19imy02dIKkMHc01WalPR3BGCJxIgKVZRBpmm8zwqRnwfN0vJdDkW87AMAy+qh+r40JFTeTgSyKR8L9nun6roJ6HsmmvaUeToSQ5BmUjelbPHILVTWdCCISDmatTeL8PiLDwyHRNFsLA+XhAN9G0hS9BbMyut6vJ0JxuDM7Kek8z2O/UPhqbFFmwTsVedpU0c2o2trjjz+e8Rv+8pe/VNyYoQ7PcSiPkTDHvNEz+7w2bC4CYkDCkx3Fsq6dfOFG5Gf2haNVl9TjOUjM9VoMleijBgsAwGcsBiIHEW7X3ji5NRBFLMlBxzI9lm7tiSoaeaSYzq9fAgCsd5+MI4/o+2TSXToKAFDItYFLJvv0SlFCsz8KjgcMOibNMLkvSoTS3dQ8VxnJurUAgM783v1PACCvgqQ3lXAtSCYS0PVRvUUp4v1blS9PNKbpDPI48M2bKAWw2zgZk/upoMbbyEFRNtMVxZPlTCspSWlrARp5JIemg9Uo55sR53WYesJF/V7fyRYgj/PD33IQmNp7iptSRDGg2GGCSd//XMIwDCrcFuxvC6LBE84ogmG4k4jHUJU8CDBA2eT++zBhKQA8ABPKXglvcZyvcGd4SEDHedk079uKQgCNbAkqLX3vo5x5RQjzRliYGNob61AxZrLm7UlyvNTvmYtHZmxt8NKKaxli9dcCANh+gj9EQoZ8IAHEvNkRj3yRBPzRBIBUdkh/TC5z4P3NPVfhlENGq9NHHnkk7f9bW1sRCoXgdrsBAB6PB1arFcXFxVQ86oO9W77GePgR53WoHD+zz2vjtlLAB8CfHR8EMW0t08ijrlWXOkNx5Nt6d5mn9Ey8hajWflvvfkciEXMJEAESndqLR2L0UKnTDH0vZnqHI24iD9LII9nke8ippG7S6f1eW1g2ChzPwMgk0dbagMLS3qtzKaFR2EyUOM0ZpasCXc3yqXikhLxO4nelH9F3+lJJ5VjEeD2MTAKH6vehfFTPlS/VcFBKV5UXeURPpOWhr/8WAOApP67/ax0kpcEYyX7kUeZpa9TnTAmNO75BGYAD+lEY10dhBBG/sQiI1CKSpQhzuf0OEIFxf1uQpqpmSEv9fpQzHKK8AUV92BGIMDYSVa4PZ/F+98iLPJIOB4UqTJmuDYYzgQZyGNxuHonKfq5lWBZtbCGq+EPwNNVkRTxq9IaR4HgYdax02N8fWpZuHw7kR8k4bSvLbG0WNRcCYSCZpZR0UfTLsxpgMWZ20DxdKIqlVjzKaOdYU1Mj/fvzn/+MmTNnYufOnejo6EBHRwd27tyJ2bNn46677lLVmKGO79MHAQCbXSfAbO07P5FxktK+xlB2FEs5ubHAYVWX6CZSEbaWDQAArrh3HwSRpIP0PxvQXjysV7Cg7JoTTyszZE48FsXIRC0AoHTS0f1ebzCa0MaQUNeOQzWat6fRK6asZd73JZLnEd1IyiUSDmJUfB8AoGzKsX1eq9Pr0agjptntdbs0bwvX5WQy47S1POp5pITSANlY2Mf274FicBHxyBbvyEpbIvGkVFklY8NsKdqQ3vNyiDUQk/QOV/9zPABELaTvk97sFEiQ630DpDwxqGCcGR0NewAAzbrijCKFdXYSaZjNsu3iAWGma7xSlxmsUIWpLUjv+Uzg20i/h519F0ERkaxI2rLjYyvu6SrzMjNJB0jkEQAaeZQhTp4ILo7C8oyuTzqIrKjzaR8EAKT8S8tkrOenlRPxqLY9BG9YuVG6bM+jP/7xj/jrX/+KiRNTytvEiRPxyCOP4A9/+IPihgx1vO3NmOn/EgCQv+Q3/V5vdJMvpzWq/ekEz/PSQDMyw/BGIJUzTxcV8gn6PZgQJuXaS2ed1u/1Ohep2GHOgnhYLzMfHkidUIRiSXTSygwZ0958UDBL1qF0RGahrp16ssgIttZq3h5xkVCaofcJkIo8aqb+J7Kp3f4tjEwSHXBmFEnUaSYpTqGm3Zq3pTVATHBZJjP/E4Aa6CohFPCigicnjVVT+k9jseWTud6VzI54JM7XNqMOLosho9dInkf+CD0skIFOqJKZdGUWMZq0Ey+UbBwSAcoOisRUJyoYZ0a4hRzyeIxlGV1vFMRia5bKtivxtDR0iVahqWuZYfWRiopscWZRKCFBKI53Zkc8kvwMZe3pyPejkUYe9UsykYAT5DO2uQozeo2haCy5PpidPhfn9vIM13MAkGczSrYF21VEH8kWjxobG5FIJLo9nkwm0dycvWohg5267d9Cx/CoZ0oxZlr/C0p7CVGzC+LaLypaA1GEYkmwTObpCwAwSijrXtuWPcO3ocq2j56HmYnjIFOOkRNn93u9qYBsIp1R7e8pSTyScRppNugk75uDKo3WhhO+VhLm2sHkZexfFDCTRWi0TXuzdHFRmal4AKTEo45gDNFEUvM2DWU8e0gVnjrLlD7Nc0UiDpLSyrfv17wt4n1b5rLAkGG6ar7NCLOBGOjS08nMECtkBnkzXPnF/V5fPJKkMBShE0G/R/P2HJIWmJaMvQrFamuROIdAtPt6j9Iz5jA57NG7+0tkIehc5cLrsrN2ltKXaORR1kgIUWMRQRzoD6nyVjI7kUdtCjwtgZTQVEfXdxlRGCWCgL0isxS0hJvs6fQd+7LSnjqZZtkAUCHc67QQTv/4PalADmdeUUavcZSRA+OiWHa8i8U1mZzIIwA4osINANhQp3wMki0enXjiibjmmmuwYcMG6bH169fj2muvpVXX+iB4YD0AoMWWmUpdKrj3F6ETfq+2J5J17alNhFGf+VdgTCERj/ZT8Ug2hrpVAID6UT/MaBOZXzkBAFCSbATPaXvin8qHl2eGOVro/z3Nfk3bM5QJthHxyKfPz/g1cTuJOoM3G35X8jcTbqtBGieoB4o8DI1k3A+XZFadgykgJ1VmwZhRS5REHDIMI0UdHuygm8lM8AqVs9p0RRmN9a78InTACQBo3L9N8/Yo8b2xGvWwm4glJk1dyxyxUqKlsG+TdBGTUGHRHsuOWbqSvq+gIoIsmIgHAJA052V0vbuERKXl8x7EY9rfWweFPpfjaQkAE0uJR9f2Q9ktJz8U4DkOhRwxPM8vH5vRa4wlkwAArqD2B0MAUNchzywbSJV3P+SNqEphGg4EBPEoyJv7rJrblZJRJH25GB0IB7XfN4mHwZkWwhCZP4bsR1bvV27aL1s8eumll1BaWoq5c+fCZDLBZDJh3rx5KCkpwQsvvKC4IUMdppOEtkbzJmR0vdNdgDa4AQCN+7Zq2ha5ZtkiowTxoKYtoGl7OI4f8hENRUFilm0fnVlFldKRk5DgWViZKNqatA15lJsPLzKljOTK0sVF5sQ8JHIwaMwszBUAmDwSfWIKaG+iKpVyLcisyiJABAQx6oymrsmjLEDM0u1j+ve7AgBrKTmpyotkr+/lhLUDwPhissDc1UTv+0wQK2SKHheZ0GwgIoJYkVNLukYeyUFKXaOCccYUJskGw1WcWdqas2Sk8LpmzQ+JgK6eR5nf8xNKiIhQ1xFCKEajzvpDFyGn94wlswOigpIqRHgD9AyHlvq9mrdH6nOZ67sjKt0AgM0HPRq3aOjh83bAyJA9i7sws3TF/FEkIKAsfjAr93qdgvndbTVKqek76Lq+T0I+Mrb7mf4LIYi48ovhA1lrNx3Q3sfykIK0NQCYP5rU+95Y5wHHKUtLly0eFRUV4cMPP8SuXbuwbNkyLFu2DDt37sSHH36I4uL+Q7SHK2IlFdaRWWgrADQbyemVr17bBeWBDmXikRh5Utum3YmULxLHogdXYPrtn2BtTXY8H3JNOOhHBUdEhIpJczN6jcFoQhNL7qeWWu36n+d56TRSTgQCAEyrIKfjW1W69A8nOKHKQsyS+UbSXDQaAOCMaut31dXrrEpm31cKmw96Gp05fm8HygXvmxHTF2T0moIR5KSqLNmIZA/p4WpI9b28cX+qYLBIF5eZkfAQ4S9sKc34NX7bKABAvHmP5u1Jle2Wd8+Lvmg0fSkzIuEgbAwR111FmaWtlQon0y4E0dmmrUVBIJqQognkCAmFdhMK7UbwPLCnWduDwqGIIUbWQ6wtM/GI1enQJBRG6DiovbedeDgoJ7oYAI6oTFVhUrqhHC742sm9GuAt/RY/EikZQQIHHEwYPo2zSYAuB4MyD4dmVJF+X70ve9X/hgIRH4nSCeoyF48YlkWznqQme+q1v9cPeZUdDI0tssGkZxGKJSU9QC6yxSORCRMmYOnSpVi6dCkmTMgsmmY4Y42RwcLgynxBGbCTTWS8RdsFZV07STsbkZ959AGQEo+afBEENfJB+GR7Mw52hBFLcvjX2uyYiuWazpYGsAyPMG9EXoanFADQKRgwhjQ0Tm4NRBFNcGAY+Xmyc0aSsOwt9R7NTiRfX1uHhfd/jrc3ZqdUca5hw2TC4cyZp625ykhufFFSWx+MtkAM4XgSDCP/VHJUIVmQaOl3lkhy2Fo/dBeqYrU8L2xw5WUWeVZSORYxXgcjk0BLg7beCDVC340ukjfui5uKldWt1DQ7A5gQmeuT1syjDbn8cQAAo0d7P4yDncoizkYVUo9DOQQ6yeYryTOwO9wZvcZic6AR5GChuUbblEVxM5lnNUgpiJkiCsbraofmgZ6WmONEPNLbM7/fPWYiLoaaqjVvj2hLIfd+H1dkh8WgQzCWxH6NswuGGkFBPPKwroxfY7W74AERmjoOaTvOe8NxdAgVNTOtoC1y8hQS0PDRtuxU9h4qxAJkLR+RIR4BgM9CgkCiLdpGGXIcL1U+lyse6XUsJglpqkoPBWWLRz/96U/7/EfpGbtgjiea5WUCX0AWlCaNF5RKI4/cViPybUYAqY2IWlbsakn9vLtlSFZ28bcTQ0UP48rIA0MkYiaLyqRXuxPJfS2k3yrz5PldAeREo8xlRjzJ45u9ynNlRbzhOP7wzjbUd4Zx4xubh2SIvC5GBmbG4s74NUWV5L53IgifR/3nLCJGnpQ5zTDpMzPvFhHN8mvatYs8uvnNLTjria9w69vapuV+X/C1EMPzDjbzqDOdXi+dSrdpHOZcKxwaiN51mbJgXCFKnCa0BWL4eDtdYPaHLiqmsWTmgQIAplLihegKaW+Sr8TrCkh9T2raqXiUCUGvmNZgy7g4AgC0mkmKW6BB2/v9oFS6W946DwCOHU+EkC92Z8eLaShh5cgcb3IUZPwaqTBCR43m7ZEOCWSO83odK0WXb6zzaN2sIUXIQw72gjq3rNe168hawNes7TgvCvzFDpNsoXjxpBIYdAyqWwLY20JFw95Ihsl9HtNnFmkmErcKe/5AS98XyqQtEEU8yYNlgBJHZh5MXZlcRu71nY0DJB51dnam/WtpacHnn3+Ot956Cx6PR1EjhgN5nAcAYC8oz/g1ljJhQRnW1jhXPJmQG94IABOFfPhtGqUurT+Qcnv3hOKo1XBz+n0hLEw0fn3mmwkASFiFNNCAdhEo+1rJ5DCuSN4ACBDvm1Omko3tm+vVRwp9vL0JiS5RJx9sacTVf1+Hi5//Fiv3DI1FqyFOBmadNfO+tznc6BQMdNs09ERQ6nkDpIwVdymcaA6nyRvB2xtJBYrXvzs4JL2Uwm0kktJnzFw8AgCvgSw2Ih3aReP5InG0BcjJ5CiZmwqDjsUFR5IN7qtrtBc3hhpGIY1FZ8t8M+muIF5XhRpHG0YTSTQJ95bcdMUxQoTatgbvkDzU0ZqwkNYQYOTNrREzud/Fql1aIYqGYllmOZwwiaw91tZ0aBZlPlRxcMQI1+rOfJxn8klWgcmv/Xgqikdyx3kAmC1El3ddl1O6E/cRISBslLem95vI+jnSrm2WhVLBEABcFgOOGUvE4uXbtK/uPVTgo2TvlNTLm0d5K8k6YCPa3lOHhKijEpnG+CJTygdYPHr77bfT/n3wwQfYv38/LrjgAhx11FGKGjHUCQf9sDLEdNJVlLl4ZCsgoa1alvT0R+JoF8Ib5UYeAcCsEW4A2kwujd4wmnwR6FhGUkE/2HwIj39WjS92a6vS5pKYl2wIwgZ5Ew3jIBONIazdZyGeLIxVIB4BwEXzRoBhgOXbm7Cl3qOqLV8edqr5mze34JMdzfhmXzv+39/XYXfT4K/qZkqQz1tvc8t6XZuOLN59TdpV5lCaEw8AM4X7vroloElVjsO/O//d0og9zX7sbx06J19JLxHHotbMU5UBIGYgY2EypN24L55MFik4mQSAC4+sAsMA3+7vkA4f1LB8WxMueu5bfLaTjI3/WV+Phfd/jtve1b7a2ECTSmPJXDzKLx0FAHAjgEhIu3vgkCcCngcsBh0K7UZZr503ugBGPYsD7SHsphU2+yUSICleYZlpDUkbEY/YoLZrHildUUHk0ZhCG6ryLYglORp91A82noytFkfm6ztLMYkudke0LeEdjCak6oijZRTFEJk7kmx011HxqE/4KNlwxw3y7vWokE3A+bW918UK2GNkpqSLnDaNrFE+2aHt4cVQgo+RzzhpkPcZs1ayDjDEPJq2R2khDJEBjzzq8U1YFjfddBMeeeQRLd5uyBHwkhOpBM9mnAsPAPZ8ckO7eJ9m7vxifmOp0wyH2SD79UeOJpPLl3takVTpVSKGxk4qdWDxJDKoPvS/PXj4f3tw+cvf4bU1Q8MDKSmEK0ZNmW8mAEAv+GOZo9oZ2W0SKmlMq8g8V7srE0sd+OFMUkr+ic/VRcXsEAatm0+ZmPa4xaBDLMHh7v9qX3looDEnyabLYM/c8whIVWcThUctEDeASk4kC+0m6VTrm73qv487G9M3o3/6YAeWPLISJz38Jd7frO0JfK5gQ2TcT1oy98IAgISJ3Jt8RDtjejUnkwBZoMwXxv6V1eo2ky2+CH75r41Yvb8d1/xjPVZVt+I3/9mC+s4w/r76gCbfr1xiTQppLM7M+92ZV4QIT+bj9ibtohEk89w8CxiGkfVau0mPRRPIvPzhVm3SFfe2+HHr21ule7wtEMV3tR1DwvcsLohHcj0xWKd4SKStSHNQKN1dqeCwgGEYnHkEOeh8a4P6CMhANIGfvLAG17+2ARzH44Mth3D6Y6vw8tfap20NJNFISKq6ZZUhHrkriHhUlNRWRBBTk/NtRris8tf3oq/l3paA5KGjhg11nfjJC2ukdedQQYxC4QzyDmE5M/l8mbC2XmJq5/ejx5K9ya4mv+p93VCFiZI1Ky9TPDII6azmuEfT9oiFj5SKR2IAwSFvBJG4/GrnmohHALBv3z4kNK4OM1QI+chAEWQssjxvxBKQRiapmTv/lnqyIREd9uWyYGwhnGY9WvxRLFdpsLaxjpxuzBrhxslTup/O3/PhTnhD6qMccg0TEk2T5UUemd3kM7EnPJq0I5pISuKhGEGmhKuOI4bOX+xpVRzSHoolpIXOBUdW4YwjyHf9VyeOx8c3HAeWAVZVt0lpdoMVG0fab5YpHsVN5LuSDGi3oRCjBWdVuRW9folgrPieBuLOjkYyDl17/FjYjCl/EI4Hbn9v+5Dwv1LidwUAnJGMzYyG4tH+VmV+R10RQ9u/3a/Oh+vj7U2IJclhSILjccmLa9MWrB8O8tB5Oy+ksbgyF48YlkUbSxaZ3hbt0tQlAUGm35HI6dPJHPTRVvV9wvM8/m/ZFry2pg6/+NdGvPFdHRb9ZQXOe2Y17nh/u+r3zzVipKAYOZgpRjeZ+6waHhIB6cKhEpbOIOLRN/vaEU3I31x05a+fV+OrvW34YEsj/vHtAfzmzS3Y0ejDne/vGNQpUiF/aoy22TNfU7uLiYmunQlrGmkopawpyCoAiOg0rphsKsX1uVJ4nscv/7URX+1tw/nPrkYiyWH7IS++2ds26NNgmZgoHsn7nBnBvkAf9WjanlpJPFKWUVCZZ4VJzyKW4KQIdaV8vbcNR9/7Ge79cOeg7+euMAnyufBGeZ+xyUEOYMRDJa0Q90ZKBcM8qwEOM4lCV9LnssWjm266Ke3fjTfeiAsvvBAXXHABLrjgAtkNGA6EA2QQDjLyOtlssSHIk3K5YmlItWwRvIqOqHQrer1Rz+KKBSRf+54PdypSLEU2CJFHs0fkYWaVG3cunYqTJpdgxa+Px6RSBwLRBP69Tlu/p1zACiGuvMxNpMVJBh0bp03KwPZDPsSSHPJtRkWpSyKTSh2ocFsQS3DYrPBEaVeTHzxP0mgK7Sb89cJZWH3LYtx48gSMKLDihIkkbetfgzz6zC6EtMvZSAJAUqjOJgqPaqltC6LRG4GeZaQUNLksnUk2E5/taoEvok7UFSOPjh1XiGcvmYuzZpTjP9cejZEFVnQEY5If0mBG9LtiZd73EK7XRbUTj8QUUHFjoATxVHqrSr+7r4TIouMmpHuE3HLaJADEpHewLjp5joOTJ4s6u0ue15VPT8aIULt2c16dCp8zADhxclczVXXz0K4mf9p88dv/bEUwRtYPf199YNBX9uLD5L5IGOWJR5Z8EsnrTGr39/M8n/I8UpC2BpB5vtBuQjiexOaDyu95nufxYRfxkRwOpNaNf/umVvF755pwgHwuId4EnT7zdGCHMw9RIdKwo0W7ua6mVbnfkcg0wQtll0rbgNr2kPQdjCU43PjvzTjj8a9w8QtrcP9y7cuWDySsIB5BppCgE/xvDHHt5nae57tEHim713UsgzFCJIpa0+wnV+xFozeCZ1fuxxd7WsFxPF5dcwCvr60b1BGmujj5jBmTvD63usm87uCyIx4pXdMxDCNZ1xxQYEUgWzzauHFj2r8tW7YAAB566CE8+uijshswHIgFPACAMCt/QPcKpSCDHdqEjW8VvEbE8stK+NmisSh1mtHgCSvOkY0lOGkTMmsE2ZRcdswovHDZXIwutOEnR5FqFP9ed3DQbiRE9HEyCbNmeZ+5zUVOop18QJO0xY2SWOeWncLQFYZhMLmMhObvVRgZJObZinm3LMugzJU6If3xUcSg980N9aoEylwSjYRgYUjot80pL/KIF/KkdRqZ7H0uVDU8clQ+rEb5njcAMKXMiXHFdsQSHD5WEXXoj8SlTe3kMicWji/EXy+ahTkj8/Hj+aTf/6OBIXuuMSXElEV5EYeshYwT+rh2iw1xrJ2uMF0VSN2rB9pDCKgw0d0uRD/+bNEY3HjSBBQ5TLj9rCm45OiRMOpY1HeGB23EYTjkh44h85XV6Zb12oiJjBEJn3bRhruayGc9XuEC02k2SD4oGw54VLXla0E0HFVATrkBUh3oVKEIw2AXjMVoBLlpDTbBaFmMWNOCBk8YgWgCepZRHHnEMAxmClGqSn0xADJeiBFwXfnRbOLpuXx7Ezwh9SlSuSASFMQjRt5nzLAsOhk3AMDXpt1cJ46tk0vlCZhdmSCU8FbrOfndYWJw13T051ftR6O3+3disKBLiEKCvBRV0QfPrKF41BqIIhBNgGWUHxIAqTmiWoV4FIgm0iKTf/HaRvzomW/w+7e34XdvbcXzq7Tz8BxodELkEStTPLK5yUG4Vvs4AOA4Xro/xyr0uQKAkYIvWq2CiqqyxaMVK1ak/fvss8/w+uuv4+qrr4ZehvI+nIiHyIAeVSAeBXRksS9W7FKDN5yqZjatXPkmwmLU4by5ZOJXGs6+s9GHWIKD22roMcR26cxymPQsqlsCgz5f2ihsInUWeZ+5XVCsjUwCkbD6csliytr0Crfq9xpXLIhHCiealHjU8+S7aEIxKtwWeEJx/PGdbYNSQAz6UsKP3SlPQBArNRlj2ohH6w6Qhdzh0R5yYBgGPxBSGT5UkcYinmiWuczIs6Wb+J49swI6lsGGOo/iqLbvC5LflU1e3+uF60XxSS3NvggaPGEwDDBVhXiUbzOixElKwu5RaKDsDcWl0+ip5S786qTx+O73J+GKBaNhNeoxfwwRKlbsGpwmvaEAGdc4noHZIrOkryAe8RpFGwKpzeQUFfO9WJVlh8pKi6uqiXj0k6NG4t/XHI3rjh+LN392DC6YR1J4Pt3ZPCjHeRFGOJnmZW4orU7hkAghJDWyfhA37lMrXDAbdP1c3TsTS8l3WOn9DqSi3WdUuSUxCgBuOX0SppQ5EUtweGdjAxJJbTZWA0lMEI8iMsUjAPAJ1XfDHdql6W47RNoztUK5eCRWVFbT5wCkSMJ5o1MHZ+OL7Zg7Mg9Jjser3w7eqHK9ICTozPLuddEHz5rUTigWo81I6pnye12MYFETebS13gsxuEjHMghEE9KhNQC8+FXNoPVUMiQF8cgsb163u8j3X89wCAW1ORDc2eSDL5KAzajDhBJ538GujBrIyKPFixfD4/F0e9zn82Hx4sWyGzAcSIY9AICYXv7pX0RPJoFEyKO6HduFSbwq39Jt0yaXBePIICh6KMlF8juq6jkKxmk24PTpxAvgb9/UDupFpSlJBne5FbdsdhfiPJkM/B71fggH2tVVZOiKONGIXipyEdOWppT1vMjRsQzuWDoVLAMsW1+Pj1T6a+WCsBBxGOTNskLaAcDoJKcVlrg24pHkdaYi4hBIlXD+rrZT8SJAFDF76vtipxknCr/jB09+jdfXDt4FpuR35ZBnlG8UxCOzRuLR6n1EjJhW7lJUaa0ro4STKqW+CNsPpeYgl6W7oauYrvrm+nq0B6IKW5k7osJBURgmsDp5C3nOIpT01chMtaYtiFZ/FHqW6VWkzwS1VVkA4re3tob8XQvGFWJGlRu/OXUSRhRYcfSYAliNOjT7othc7x20c70uTu53uWkNzryUoC8WV1HLd7Vk3pg3Sp5wfTjixqS6WfmGcotwCDCj0oVbTpuEY8cX4omLZ6HQbsIFRxLh8I73d2DK7R9jxSCrshsT7vcIKz/iI2Qg97tWRTFq2oKo7wxDxzKKC6IApCgKQNJi4ioEPfE7eM1xY/CzRWMxpsiGe86ZjisXEtuLf62tU/X+uSQlJMgT6WxCKrOD1y6qWK1ZtogYeaQ0mwAANguZLadPL8U9P5yGQrsJp08vxVe/PUHyylXrpZUrjEKf6y0y05KtDiR4IrV0PVBWg7imO3J0Pgw65dbVI/PJd+bAQHgeffHFF4jFuoeYRiIRrFq1SnYDhgOcmAsv05kfAOKC4JQMqw9z3KJB6oKIuKBs8IQVmVp/J5r3juh9cXOxkMLyzqZDuPu/OxW08vuBWRCPjDLFI4Zl4Rd8soIe9YsqMepM7SQDQMqVrVMw6HAcj12Hpa31xMlTSnDd8aQqyT+/1a4C0UAR9nsAAEFG/sLSJJxGW5Lq03f8kS7RHirv/cllTjjMegSiCSklRi6igNBb39921hRpIfOHd7YN2hQmye/KKU88Mglik5XT5u9eK5wAHzVGXupkT4hh8crFI/KdmVrW8/fw9OllcJj02N3sx9InvkaLL6KsoTkiElCWxgIAjI0cyOgj2ohHnwop5UePLVCcqgqkokN3NvoUCzsb6zwIx5MotBsxqTRdyDIbdFgsCMZnP/k1Ln/5u0GZqiymNTAyfVAMRhNCPInoC3i1Mc3+ThDq5o5Sd8+L3ogNHuUpRuLBxfQKF+aPKcA/rpwvVXL74ewKlDqJr2csweEPb28bVL4ocWFdHtPJn+PFohhcSJv7/ePt5IDtmLEFcCqopCxS4bbAZtQhnuQlI2a5HOwIoaYtCIYB5o7Mx+9Om4TP/+94HDkqHydPKUGh3YT2YAz/XndwUPW3SEpIkJnCJAjFTj4ILqnNGCcKNhNLlR8QAMBo4VC5TkEKk9QWSSh244IjR2DdH07CUz+eg8o8qzTGv7CqBl9Vtw26fjdxZAw0yIw2Y1gWAWEPEPZpc6+LqYFHj5G3tjyclOdRFtPWtmzZIvkb7dixQ/r/LVu2YOPGjXjxxRdRUVEhuwHDgghZMCdlGikCKfNFXgPxaL+wCZukIh9axGUxoEIoEbhbZnhrkuMl/4MF43r/8h85Kl8q4/63b2rR4h9cGwkRq7CJNMv0PgGAIEMmJ7WDTjCaQJtwkj9CYSWOrnRdVMo9PTrQEUIwloRRz/Zb/ensWWRMWX+gE7HE4Dqligoh7WEFp5JmB1n023j1AkKjl9w3Louhx2gPOehYRvJTUBrevK42VWWxJyrzrPjkxuNwzNgCJDge/90y+KpvxaKRlN+VS94EbxH8sUTxSS2iYKO0SEJXRPPdnjxMMmuLkFZR3vMcVOoy463rjsGIfCsaPGH8bXWtot+TK2JhMhcqSWPR24l4ZNKopK+UqjpeeaoqQKJM9SwDXySBQ15lc/BX1eJ8X9hjpPF1x4+DRUiv+nJP66A0UTYKPig6i/xNnF+Y50MaRB51BmOSb8nckeoij8Q1XpMvoiitLMnxUirVjB6qfDrNBrx57dH445lTAJD1xIZBFJmQFO73uALxKGkSqmqGtY1GEKM3lcIwDMaLEWcK5/j3txB/o6PHFMBlTV9z6HWsZHvx+7e34di/rFAV1ZgLzBwRj4xWeXspZx7pG5bh4deogva3+8n7zB+tTigW5/bOUBx+BQVReJ6XxKOe1hpLBG+75dub8JMX1wy6uV0Uj4xW+eN7SAgCEItnqSHJ8VgjHA4cPVadeCQa69d3yt/HZSwezZw5E7NmzQLDMFi8eDFmzpwp/ZszZw7uvvtu3HbbbfJaPlyIkQmGk3kiBQC8kXxRmaj6wbXJR8SDUpdZ9XsBwFgpdUneBLOtwQtPKA6HSY8Z/Wxofn7COMyodCHB8fhi9+D0wXAIm0CLwy37tSEdmZyifnWLStGc0GHWqzqVEimym2DSs0hyPBo98jYUG4Sos+kVLuj7CbkcW2SD22pANMENugVGPEQWzVEF4pG1i4Cg1mSvWYjeEE941SKmPe5TkLLY6A1jf5cTyd5gGEY6nV5VPfjue7EKDwBY7TJD28W+Z8JIxNUZySY5HruFCLHeBBs5iOa7BzvVRR71lVYxvsSB3wmV197rYrI6GIiHyFwfZeWLRyaxuqZG4tHWerGyqrpoQ5NeJ6Up71I4Bn+1NyUe9cSUcif+d9NxkonyG98NviqrBmFzITetAQBCLFnnRVTO8wCwTphfxxbZUGA3qXqvQrsJRh2Z55v98tNI97YEEIolYTXqMLao5/VvZZ4VVy4cjTMEmwIx3WkwwEXJ/Z7Qy4/m5s1uAACrQVVNnuexXuj3+RpEmI5QGWH6/mZy4HOW4JF4ONcePxbHjidjQYMnjN/9Z4ui35MrzLwoJMgbW40ms1RB29+hPl2x2RdBTVsQLKM+ytBm0iNfsDNRcji0s9GPQ94IjDoW03uYc46fWISq/NS8+MKqmkGVomwB+UxMNvnju1gsSyyepYbth7zwRxJwmPWYqsLLECAFK8wGMr4fkhldmrF4VFNTg3379oHneaxduxY1NTXSv4aGBvh8Pvz0pz+V3fjhgE4Qj2BSsHjXcIJp9mq8iRRUyxqZoa0r95DN4DHjCvoVDwDgKEFdFUWHwUQkHISRISaYctNXACCiJ4vKeFDdKUWTVxAONep7lmWkBcaBDnn9L3pfzM6gZDzDMFKapdrqHwON6FMW1ckXje1CtIqRSao2S28S7vtip7qNhIiY9ihXNAaA9zYRMWDuyLxuJ5KHIy6CtzZ4B52ZalgwRozyBhiM8j53hzu1wQ6oPJ1s9UcRiXPQsYxUWUMNUtqaAvEoFEtIKYj9CVmLJhTBoGNwsCOsKKQ6VyQiZIxSksZiFsxUbRqU9A1EU1FCUzQQDUXvGyXlu73hOLYIqRULexGPACIi3HbWFLAMsL8tKInegwWTEI0gN60BAMLCHBELqI9GEE//+xLnM4VlGZS5yZqhoVP+hlLs92kVLujYviu8iibPYnTiYICPkvEsKbPCHgCwVhIVpo+p/3u7VtwaX6wufQmAtMlXMs7vbfFjZ6MPepbBadNKe7zGaTbgH1fOxze/WwyGATbXewdVZoFVEI/MNvmbdz9D+ifkU5+iKqYvTSl3qo4qB4AqFYdDT3+5DwCweFJxj96KVqMe7/58IZ68eDZ0LIMGz+CpqspzHKw8+X4q6fOIjowPcQ28i8V98JGj8vsdU/uDYRjJ96hWpml2xuLRyJEjMWrUKHAch7lz52LkyJHSv7KyMuhkmkMOJ6RS7TKrbQEAI5xi6ePqb7ImMQJBo8gjpREIYtWVYzMMp58j+CINxqprXTd/dgWRR3Ej+c5wIXXCmdZ9D6ROp+T4HiWSHD7ZQXLzF03ILLxaPLFUY+SXC5JhsgmMKzDKt9qcksmeWhPVFr+2wuGYIuVm6e8K4pGYjtgXowtscJj0iMQ5VeVjc0FUEI9CjPzPPM0DRaVRvrggL7QbVS80gNSm4pBHfhrLyj2t4HgSvVTcz3fRZtJLlZlEsXkwIIpHcZ38yCOrSyzZrl4saxXuebtJD4cGkaYTVZTvXr2vHRxP1gvl7r4/F5fFIIldh5f6/r5jEVNZFJxMRw1CYZSg+gMy0fxUjBZTi5i6JvdkGgA2djHL7g/xFF1tVb+BhBEijzgFfqY6GxH3THH1f68YIVTmssCoV26gK6ImPVmMOjpuQhHc1r4L85S7LVLhDDH96vtOMpGAlSHjq9kmX6gLCNkEYa/6iGoxknd2H96xcqhUGHH29d42vL/5EFgGuH7xuF6vy7cZccYRZZL/opjO/H0nFovAwBCPKrOC8T0mBAEkQuqF4r2S/Yx6kRhQ7nuU0Sjz3nvvIR6PSz/39Y/SHUOCdLbcUu3kNW4AgDGhboKJxJPwhkkflmi0iRwtRR5lvrHzR+JSTvuiDMuGiya/+1oDg873JuwnE2KAt8iuuAUASUE84lWKR+IprlZ9D6SiEOSIR6v3t6MzFEe+zZixga+YHrlvkAkInOB1psQov6vJnlofDDHySKu+F0XjmragrLDjvS1+7BBOJE+fVtbv9WyXqjHiCfZgISp4YUSh7DMXPVDE8UMpLUKqcrFDm74vcZilNJYmmZEhYsXEU6f2fBp9ONMr3ABSC+TBABchY1RCQeSRXYg4szJRxKLqTuFFo/EihzbRhqKB/X4Zc72I6G/YV9RRV0S/jB2DqN8BwAzxZFqBt6UgHqk9JAJS87E4P6tFFPyUmGZ/K/jwzBvdf9S1KCLUtAURjCZk/65cwMTJhotXYElhFMQjc1L991zs8xEa9bnSCFOe5yW/o7Nm9D/HAylfri2D5HC4a7l1m4IDYSmbQIMU1T2C36yacu1dEb8/9TKjDN/a0AAAuHDeiIwq/Ynpy1/v06a6ZLYJB7r0uV3+Pl7cA/ARDcQjYR+k1eFASjySd69ntJs9++yz0dTUhOLiYpx99tm9XscwDJIaOcgPJcRS7QYF4W5ieXfxPZQibiAtBh2cZnXlmkXECIS6jhASSS6jFLRv93cgwfEYVWDNeHFT7jLDadbDFyFpD31V6Pq+IVbcCjA2KLnVtcqLb9I4ZRHoEnkkY9ARKwCdMrU0o+8LQLwbgMEXeYSocq8zAAgyNrj5gGoBQdzkl2gUdTYi3wo9yyAcT6LJF0GZK7Moiw8E4+tFE4qQZ+v7RFLkiCoXVu9vx+Z6Ly44UnGTB5xUCWdln3mItQNcu2oPlFbBJF8rEYFlGVTkWVDTFsTBjjAq8zIbw6OJJD7fSSpGnjY9Q/GochCmscSENBa9/E2cw5UPjmfAMjx8na0oLK1S3A6t+11MeVQSifB1P35HhyOKCIMpAoXnONj4MMAoS2sQzZPF4ipqOCiJR/Kj33qiQqF4tK3Bi/1tQehYBvMyMPMtcphQ7DChxR/FriYf5miQdpdtdEJGAGOSP8ebXUKaqgYVVcWUQtGTTi1i5FF9Zxgcx4PNMGr1YEcY+1uDMOgYnDwls3FePBzeNkjG+XDACweABM/CZJY/zkcNbiAKJILqhZPqZvLd0Uo8SkWcZb6m53le8qUUfcv6Y8HYQgC78e2+9oz3jrkkFPDCDSDMG2FREATAGUj/8BqM76JNzJhePOTkIs7tWYk84jgOxcXF0s+9/aPCUc+IpdoNVrfs15qECl1mteKRFHli6rHaiRLKnGaYDSziST5jpfqbfcIp5PjMFpIAESUnCQvKwWaaHBUM0kTDNNkIefE6teKRxgICkFKs5UQe7RTSHo4clXmYraiwH+wIDaoSzqzodWZUNrGHWfJ3R1VWaBCjEEo02kgadKwk/MrxOxPTUE6cXJLxa44Qok+2NQyOhaVIIkwWdTEFxskAENaR74xaD5RU5JE2fQ8A5YIHimjCnwlf722DP5pAidOEWVWZ3fuid4cSY/acIXigKBGMWZ1OijYMqk1V9WkrHombUm84Dm8o80o8DR5ikM8ymVeGEdPWBlPkUSQchI4hUZgWBSfTqUMij7p2xJPoCBKT/Uq3NlEokngkMxrhuZX7AQBnHlGWsR/LZGmdNzj8DXUJsvZhTPLneIvggeng1f+tbQHS51rd72VuM1gGiCU4SYjOhNX7yfp+RqW7R9+bnhA9Lbc3+AZF+XaxGEaIsYBh5YseCZMbgPpsAq5L9K92EWfyPY9aA1G0+KNgmMzT56ZVuOA06+GPJrB1EKztosJhYFiBDQEAcILfMauy8BXH8dK9rlUgwKiCLHseUZSjplS72e4GoL5kdzbSlliWkb54mYazi+JPphsIkcmlyg07c0lcmCBEwzS56ATxyBhXN8A2aywgAOmRR5mmL4khl3JOSorsJjjMenA8UDuIzHNZ0adMgYEq0CW8WaXJXjb8rsTNZH2GkQgcx2PLQfIdntlDyebemFxGPoM9zX4kB8HCUkSNcTIAxISTqqTKHPnWgGCWrqV45JLvgSJWylwypTTjU2wxLbojGIMnpK7q3EDBimksCgx0ASAgpiuqNFMVfc606nebSY9Cu1CJR8bG4mvB02JGlTvjKp+TSh1gGPI3tCqo8JULgkKEMUD86uTCCJYGBpXmyW3CRt+oY+G0aBNhLqatyRGLveE4/ruVRJpedeyYjF83oUTwNxwkKerGBLnfdTJLtgOAwy1UV2QiiMfUfc9FwTA/w4je/jDoWCmiWE4UymohDUlOCfFxxXYY9Sz80YSsg8hcEQ0K4hGUHQxxolAcUSceecNxaU2kVb+LgkSLjHFXjH4amW+FxZiZ97GOZaTviBiZ+n0mJR4p63PGLIhHcXX7185QTOrzArs2fS6t5Tsz38cBGaatPf744xm/4S9/+cuMrx0u2PkQwCgr1S6eTtj5EHiOU6R0A13KdWu4gQSI/8muJj/2twaxeFL/14sDzUSZZl+TB2nkUSLoAQDEFJgmA4BBzItX6Xklpa1p2P9i9Ik/moAnFO83Fak9EEVHMAaGQa9le3uCYRiMK7ZjY50He1sCmFQ6ONIWRZN71qysnGZM7wCiQFKFeJTkeGkDlo2UxUw3kgc7Q/BHEzDqWWmDkAkjC2ww6VlE4hwOdoQwqlB9xbCBgIuSTUVcoXgUF8QjLuxR1Q6tI1CArh4omfvyiOP27JHujF9jM+lR4jSh2RdFTVsQs0Zos1jKJkxcuB+Myr6nIdYOJJsRVZmqKt7zWvZ7Vb4VbYEYDnaEMvK1AFLRhsfI2ExajXqMLrBhf1sQOxt9KHJk5o2YS8TNRYg3waqgeIx4SGRQaZ4snkoX2I2aRZiLa4ZGb+b3+9qaDiQ5HmMKbRl/VwBgvHCoVN0yOA4JjWKFPYv8A6KuVTV9na0oKKlU3I72ILnfC+1a3u8WNHjCqO8MY+6o/q/neR6rhepfR4/J/H436FhMLnNi80EPtjZ4v/dzfEwohBJVGFXMWMmaXqcyylDsc5fFoIlJOgCpkIUnFEcknoTZ0P9YJhZRkJs6t3BcIT7e3oyv97bj+sXj5Td2AIkL43tUsXhExkC1ha/E8d1tNcCgUapfmdsMhgEicQ4dwRgyLa+RkXj0yCOPZPRmDMNQ8egwYtEITAwJ87Y65Odw25zkNUYmgUgkBLNVmQjRmAXPGwAYUygaafYfEdIWiKJdgXgAYNCmrXGCQVpcryz6xCSIhxYVefGJJCedSGrZ/2aDDqVOM5p8EexvC2JOP+KRWDGrMs+S8QmFyNgiIh7taxk8kUdGyShfmdiV0EBAaAtEwfHkpKdA04WlvNx4UTQeW2SXld+uYxmML7FjW4MPu5r83/uFpQgXVe59AwCckXxn1Bostkgignb3vdzqSzzPSxGjE0vk3QujC21o9kVR2x7ELI0qymQTvRCJwKqJNkwCcbXpin4x4kzDw4I8KzbWeWRFB4gpCTMEE+xMmVzuxP62IHY0+nBchoU1ckmkSyqLkjveYBcPidSJJu0B7UUEUTzyRxIIRBMZpSOtO0C+v/NliAhAyphdnC++75gk8UhBMRy9Hj5Y4UQIQW+bOvGoi2ioFRVuK4AO1Gd4QNQWiKHZR9KX5I7V08qJeLTtkBdnzShX0NqBIxESimEoFI9YQTwyqo4y1L7PnWY9zAZyWNfii2JEQf+jmSj0yhWPRA+89Qc6EY4lSfRZJN5vhb5cEBdtCBQeBuqtZHwwqhzfxX1ckYbju0mvQ7GDHNI1eMIY5cxsfZ7RVTU1NRn9279/v6o/YijiF0otczwDh0uBeGR3gePJCVLAp3xBKW7yKjXKjRWRKq5l4EuxR9hAyAlvFJlYQkLZ2wKxQRPKDqQM0pIKfW9SkWfKB50mXwQcDxh02goIADBeiCLJpISzKB6JXiZyEH2PBpNptmhyL04ccuEkE1Xliwwx4qzIbtKkVLuIZKyYoQ+G2G9KKkSIixKxsshggI+RvlcqHvFC36vNkc9GBIrc6kut/ij8kQRYBhhbLE/8Gy0cTmQyv3wf0AseKKwCA10AiAlVt5Iq/TCy0e9yow0j8aQ05k/PoFR7VyTT7EHie5RKa1B2v5sF8cjKabO50HJDaTfp4RCKrDRlGH20X7hf5ZaTFueHFn9UlrdWrjBzZAw0KkhbA1JpqiGvutSddo3T1oBUOkum47woIoxQsL4Xo9O2NXjBcTxe/roGT3xejXjy+1ddOSFEHikVEowOIpqYE+rEI1EwLLRpN8YzDCNZmzT7M7vXpcgjmff66EIbylxmxJIc1h3owC1vbcHMP/0P9364U16jB4Ck1OfKBEPR71itd7E4r2t5OACkDgTlVNlTFffE87ysHLnhSMhHwjgDjAWsgnDmdBNN5eKRWIZvpMbikVi2OxPPo90qykpajDqMFvyVBlP0ESNs/sRIArnYhIocDj4InlM2kYonxZV5Vk0FBCC1ONzd1H+fVAv9P15G2pLIuKLB5YUAAGbhVNKkUDziNTDZa+5ilK8lkt+VzMij8QrEo9R3bPCIRxCqbin1voFwkq2LKe97nk+lLGrpeVSRl4o8ymT+F78jZS4LTHp5c+CYQnF+GRzikSFJ/ladWZl4lDCSflebrpiNfhfNVOsy9Dnb2xJAkuORZzXIjnidKphmb673yHpdroiH1UUjpA6JtElr0HpzUSZEH2UqHtUK96vcSFGH2YBy4XftafGjMxjDN3vbvreFMqw8uReUVNgDgBBL5jY1aaocx0ueR1r2e0WevA3l3hblc7zog7jhgAfL1h/Ene/vwIOf7MFbG+plv1e2SQp+hnGFPqZmB7nXrUl1+xgxbU1LwRAASoRoVXHt2Bc8z3ep+Cav3xmGkaKP/vntAfx7HenrZ1ful77P3xc4oXJyQq+sz42Cd7GFU7eOEQ8HCjWc1wGgQjgMllMUQZF49OKLL2LatGkwm80wm82YNm0aXnjhBSVvJZsnn3wSo0aNgtlsxvz587F27do+r1+2bBkmTZoEs9mM6dOn48MPPxyQdoqE/eQEMQjl6RZBIRA6omCCSSYS2LH6I1za8Thu0v9bM1d+ETFtrdkXRTCa6PNaMXJArt+RyKQy0TR78IhHUpU0i1vR68W8eD3DIdDFlFMOqdK92vY9AEwU/IcyMTJPCQjy+3+ssCDZ3xoYNMbJFl4QjxRU3wFSJqp6FQJCNozygdRGstUfzWhhv1c4lVSysBS/Y7sHUeQRK3jf8Aq9b3TCeKHGA8UXTiAmnNxqGYEibiRDsSS84f6jA8RIFSVzjxTZOkjEI6MQiaBXmLYmlmxnIh7FbYgnOXSEtK2+BHQt352ZYCylKpY6ZPvvzBmZBx3L4EB7SJZhb65QG41gdaW8LTkVVYvbspC2BgClokl+BqbZSY6XDivHKEgzFteHW+u9uPzltbj4hTW4/rWNst8n2yQTCdgZ8nlYncpSaqWiGAHl1RW7GifnaZjyI0UeZbih3CMdDsof+yaVOlCVb0E4nsRv/7NVevy9zYdkv1e24UUhQeHBkMVNKperrbKXjbQ1ACgWDhqbff1neDR6I/BHE9CzjLQXlMOCcWTc+3h7c9rjK3a1yH6vbMIJGSQJhd61FqFYllg8SymtWUhbA7pU1JRRBEW2eHTbbbfhV7/6Fc466ywsW7YMy5Ytw1lnnYUbb7wRt912m9y3k8Ubb7yBm266Cbfffjs2bNiAGTNm4JRTTkFLS89ftG+++QYXXXQRrrzySmzcuBFnn302zj77bGzbti2r7exKRIg8CumUfem6vlZJye51T/8UUz6+EJfoP8Uv9e+gwqCt8OKyGlAgKN/9LfD3SAq1soX15NLBVcYVAPSCuz6rUDwyW+2I8MTCzN/ZKuu1yUQCGx/6ARzvXQkGHKrylJ2K9oUUFdLs7zcKoVrFyVRVngVGHYtogpNdMjhX2ATxSKyYKBdJQFCRJ92UJfHIZTHAIXhf9LeZ5Hk+dSqpIOpsojBe1LQFEU18P0+gD0esusUYFObI29wAAFNCeSSC6HtDfAzkR732htmgkypvZXIqXddOrhEFRzmMLkqJR4Mhyln0QFGaxgKhEo906KCA9kAMvOBzlq/pZjJ1OplJX4jRqEoKHDjMBswRfFPe23wIr62pw/3Ld31vI1CSYTEaQdn9Lh4S6Rhe8SER0DXySNsNZZkz88ijQ54wYkkORh0rpbjKYc5I0u+PfLoHm+vJffDpzubvnV1BMJC6R20KxaOYgYjFiaDyNFUxZU1L42QAqHQL93uGEaZqoosZhsGFR47o9viGAx4kvm+pa4KfIacwJd3uFrMJQkgm+j5w74uOoJiiqq2QIK4VWzKIPBIP9EYV2hR99xaMLUz7f72QGbHugLq0bc0RBENOsf0ISUsWC18ppc0vjO8Obcd3uVGGgALx6Omnn8bzzz+Pe++9F0uXLsXSpUtx77334rnnnsNTTz0l9+1k8fDDD+Oqq67CFVdcgSlTpuCZZ56B1WrFSy+91OP1jz32GE499VTcfPPNmDx5Mu666y7Mnj0bTzzxRFbb2ZV4UCzVrlw8igqvlVuy29vRiiPb3kt7zNC+R3E7ekM8Hd7Xhx8Nz/OS55HSyKNpgm/Cmv3t2FjXiV+9vhFfVX+/yzyahI2/uBlUgl9hXvzmT/+JWf4vcLpuLSYy9bLKp2bKuGI7WIZUZ+irvGdnMCadiirxvdHrWCn6aGV1K/70/g68/HXN9zYKKRaNwKzCKB8ADDayIFVjoioKuiMzMD6UA8Mwkn/awX7SWA55IwjGktCzDEbkyz+tK3Ga4LIYkORIiPQ3e9syCqnOJTrB+4ZR6H0jGegmlfe9VK5dY+EQSEURiREGfSGmrSmJPKoSUm1DsST2twXxmzc34/rXNiAUU77oziZmnnwvlYpHrFB1S6/CTFUUDQvtRrAapimXusxgGSCa4KQT0L7YrbCyqsgFR1YBAB74eDdufXsrnv5iH5798vvpq5lKa1DoeWS1IyocEgU8ytc02TDMBkhFHiCzimu17WTOqcq3KEqTP3IUGfv8kfR7fE2N8uicbBASPEijvAEms8KqmkKaKh9WIR6JPlcapy/Jvd/Fw0Glh8OXHTNKmiMuP2YUHCY9wvGkdOj8vUE4GOKMyuZ2Zx4pAMAyvOSJq4T2LAnFJVLkUf/3umhFMVFhnxc7zWnRibefNQUAsK5WXcEIrWEFGwIo7HNRXNYzHMIh5Wu61iyN75WSeJR5lG9G1da6Eo/HMXfu3G6Pz5kzBwkVKmp/xGIxrF+/Hrfccov0GMuyOOmkk7B69eoeX7N69WrcdNNNaY+dcsopeOedd3r9PdFoFNFoaqD0+dRF6iQEwSemV15ePKp3ALFU2fdMqdv2DaYzPJnchI0s/nOl4hugN54JxhAwJuBebgC+7LnQX4Lj8T4fBmMCRryubKJdBGClKQQuDOAF4EYAul0MOLcFGlv5APYS4Py/A44SVW8jbvyNNuWVgoKsA0VcJyJ+eYunxK6PpZ//Zb4P7hXPAisUN6NHzABWmcOIJ3k4nzMBvUQ4WBIcvjBGoNcxsD2jLALq1VAMPmMC+BBYKDwWWGWAy5JpcckMsBUB570CuCpUvU3A2w5RMrIrPJU0CqGuiirtHdqI5Lu/wK3NrfiNkUfxGhOwQbvoEwD4mz+KkDGJ/HeNwMe9TyV58SS+MEZh1LMwPnmr7N/DAPhEF0XYmAT7HGDngSgLxF0WGLS68W2FwLkvAe7up59K0CfVGSenDHRlhjk37wDe+RkQ8WFGLIEvjDGYgyzwmLYC0nMBYcz/0ACs6Pv++7Uvgl8YORStNwFb5X0HjQBWmsj4YnqaxXUJcmqXeMQImGUvX7pjzQd+9AKQP0b9e0HwQGEAk0LxSC/MEya56Yqte4C3rwbCHowT77eEtv0u9kWC4+F8zgz0c9L8584wkkYepavMwGr5p9LnAJhnDqcdEOi/ZsBvt0D1XW9xA+c8DxRqUyKaF6srGpSvrfyMDSZ4hEOiiZm9qH0f8NZVQIhsth7whhE38ij5zASs1G68vzKawNnGGCw7dMDBvjcuMyJk3LGGdcBj8jc58wB8bSH3PMsAJoMO4VgSrgzGmn4xu4AfPgMUT1b3PgDCAQ8AIMhYoHQrxwmRhrLTVDsPkLV8sA3TYuR+N4dzd78neR5vxcOACRjxpsKIHAArjDxihRzMNTr8TB9BhOdQ8E8jkEGFvz4xOYCznwJKp6t7H6gXEgxGE/y8BQ4mDH9nM9yFpZm/2NsALLsMCLbhdl8EvzVyKPrKBKzV7l6/OJbAEmMM5ur+v0/nBGJYYkzAfdAAPKbs3nyPT8BricNm0sOxRo9jjWHAAyQftUAnM925GyY7cNbjQMVsVW/DxoU+V5iObrU5keBZ6BkOQV8nrHKtLPxNwL8vxX0NdYgZORR/YQK+1q7PFyTJ/oz1AHg6s9FM9h15ySWX4Omnn8bDDz+c9vhzzz2HH//4x3LfLmPa2tqQTCZRUpK+mS8pKcGuXbt6fE1TU1OP1zc1NfX6e+69917ceeed6hsswIXJCWJCoWEyACSEBYncss2B2nUAgG3OhZhzzBLg41uAYCv5pyGFAApZABHhXw8YAIwS5x6FhywsgBEM0G3l6FH2fn3SWQPUrASOOE/V21g5MuiYHMqjfsI6B8ABMZniUYE/dV/k8R6g06O4DX1RAZDO6UPjMEPofx6K+z8fQP7h65c+vnOK6KwB9q8AZv1E1duEfJ3IBxDkzbDplS18zELEkuidJIfWr15GUfNWVAL99o1SisX3Dgv/esEKoe85KO77EvF3Aan7X12xknQ6a4C9nwJzf6rJ2xmTgveNRdkC0yoY6DrkGujufB9o3AyALMbtLIAkFH/uvZHJmC9SBpC+Cwr/ZCKNLxxS34F+vnMZ01kD7PkEOOpnqt+KSyZhZcjBk8WubL43CqKhRW7E2a4PgEPEF0aL+603pPEkg+ZVybi2J5iu7yGiYv5IoxPA7o80E48YYW2mtDAGQA6JCjkPIgEZp+57PgYa1kv/m8lcrAQHAAcLIIF+P38nAGeG1/YEgy5/B4T3yXCsyYhdH2giHkUFP9MQY5UOiuTCSGmqHnkvrP4EqP8OAGADYMvSOJ/p/a6D+vW9+D7i0WKp+LtDwj+17HxfE/FIlxBS0k3KhAQA8LMOOPgwgnKr7FV/LPW79PkonFd7Q866Qc46oM/fh9R7SN8jj7L368aOd1WLR3pBPGJMysZ3hmURZCxwIUgiFstHyXuDvZ8BB9egHMjK+G5E6nP3eTLL5lC0q3nxxRfxySef4KijjgIArFmzBnV1dbj00kvTIn0OF5gGA7fcckva3+Dz+VBVVaX8DYUThaSKRYW4IJErHuk7qgEAsfyJwFHXAqOPBWLam0+uqWnH/ct3Y2yRDQ+cO6PHa55ZuQ+fbG/GGdPLcOXC0Yp/ly8Sx3Mr9yPB8WAYYM3+Dpw0uRjXHT9O8Xt24+NbgYZ1AKc+ks7OBwEmlfOqhKjBCcTl5cUn4jFUJQ4ADNB6xssoKlEXSdMX/153EK9/dxDHTyzCLxf3vBh/ftV+fLStCUtnlOHyY5T3/5fVLdje4MNJU0rwx3e2IZ7k8fiFMyU/DlV8ejtQtxrg1PtqhAV/siBjVWyVbxW+Mw4FJnsdB7ajCMDjibNxqGgh7jl7Oli1pziH8dG2Rjy/qgZHjsrHLadN6vW6xz6rxpd7WvHj+SPwo9mVin6XPxrHjW9sQkcwjoo8Mxo6IyhymPDMT2aDURuH8PldQO0qTfpdxKDSONkmGOhamBiikVDmaRFCSH3LyDPxT/5UfFXdhrNnluPSo0cpakdvfLu/HX/5eDdGF9rw0Hk9j/kAEIwlcMmLpKjFP66cB5tR/pJj+bZGPLeqJu0xs4HFP66cr+5k8sv7gH2fazLOA0Ao6IMoFco+WRQQK/HY5JZsjwtK2uSleMOwFG98V6/9vAjg8c+q8cWeVvzkqBE4Z1bv9/K2Q17c9u52FDtMeOYnc1T9To7nwTIM/vTBdmw66MXVx43BqVNlnNYfzqqHyCZMo34HADZG+otXsaEMs3aAA+JyxKOE0O8TTkX86Btw4fPfgueBFy+bq6l5ciCawKUvkfv47z+dB3sfkSB//u8OrK/z4GeLxmLJFHWR2wCwtcGL29/T4Lv0zeNEONJonI+GyHo8wiovhsPayBxvkFsUIyasCcYvwWum8/Hm+nqcOq0UVx+rTQSlyAur9uPDbU39ziHiGD13ZB5uPV29MAek9hUj8i149IJZyt/o26eAHe9o1u96QTzSKZzbASDIOoFkC6I+mamYYhXO8afgkuqFCEaT2q1/BVoDUVzzj/XQ6xj866qjep1jOZ7Hj19Yg2iCw+MXzZQ8stTy1xXVWLGrFefNqcRF81REgq99Dtj2JsCr73epiqpFRZ8zNrj4oLQ3kIWgI6zkjsCj8XPw/KVzNU9TvfzltfBFErjjtHHAfUv7vV72Sm7btm2YPZuoePv27QMAFBYWorCwMM2IWm51jf4oLCyETqdDc3O6K3tzczNKS3teSJSWlsq6HgBMJhNMJu3yCVnB+JI3K1tMAgCnsGS3PVgHADAWjwcYRhPVvSfyTX5s+IjHbo8Of6mal9b3oVgCTd4Int3vRSfvwi/mHAmMKFb8u5wAfj2BJC19sbsFT+/7Dk0NZlx72O9VhVUQelQOOol4TKrGYXcV9nN178QFU8VkKPNBp/VQDcqYJGK8HgWzfwDotE1Z6orTNxIb1q5H1OfEL0fMlx5fva8dWxs8OG9OFd7riGMr78QVk2cBI8oV/65FI0j6IgCwG+zYsL8dX4TH4CczRqr8KwBYhegwDSabqJBiGlKxsBQFBBMTRyQchNmS+Xu5grXktZNOwe/PPQ+sWcPUPoH8RDs2rDSgudOCW7r0++Es98Wwk8/DtRPmAiOUbSYcAO755Vzsaw1iRpULM+78BHEfjwPW6bLLQXfDRjwIwGtnzGkSxCODwsVG11THgLcjc/FIOBx4fZ8ejyfyAeTj4nEzgBHKRLveqHKGsWE5jy3tDP5cNrdXQ+6aBi828B4U2IywjVug6HfNzougeu2X8EcSuPGkCXjmy30Ix5LYb56qqLKPhE2YhzTq90iAiEdJnlHsgZKquiVTMBaq+yFvJL7pGIcNvBUnj5gEjBirqB29kahwY8PuvZjEjcA5I3pfT6yvr8UGPo6TKoqBEUeq+p3iQbRzQh421FXjI285Th2hYjMppqJrMM6LiIUxGLMKewKDA0gAcTmbi4RQ0tpViQbHEVjPdcBsYOGeuJCs+TTCDqDZFUWDJ4zNzEQsGNH7euZ/gRBq+RDs4+cDfVyXKVWFMWx4Nw74AF/xbDiVzmVbhXW/Rve76EEaVViyHUilqZoTMsUjUSx2VWFdaDw28FacVDERGKGtWGwZV4oNW7fBESzCpSPm9XrdyvVbsYE3YP6oscCI3g+S5DA6L4INH/HY2AHcVTwbDqX9vuNd8l+N7neD4GfImpWnqEb0TiAJxPwyI48EESGRNwarImRsd05cCGi4vsvneGxlA4gneDS7ZvRqel/fHsLqeDuMOhalUxcBOm3M2vMmlmLDzm2weQtxUR/ryn7Z/V/yXxUG1SKmJJmPDUoLYQAIszbS50K6qyyEwJFargQb+AlwTlig2ect0pYfw7YGH/aaJmR0vWzxaMUKjU1TMsRoNGLOnDn47LPPcPbZZwMAOI7DZ599huuvv77H1xx99NH47LPPcMMNN0iP/e9//8PRRx89AC0miGW2GRXikfhanczTiaJ4AwDAWZFh/rxCRhRYwTJAMJZEiz8qufU/v3I//vLxLsSTJAyuyGHCMRqaNs8fXQCjnsUhbwT7WgMYp6AEfI8wwkZI5UlFwNsBt/Cz3aU88kgs3yydOmRAR8M+lAFoYYtQmUXhCEhVXKtuCSCR5KDXsTjkCePSl9YgnuTx8fZm7Ggk312xkooWzB+Tj9X727GmpgM/OUoD8UhcbGtwQhUPiqeSyhcYNrsLHM+AZXgEvO0Zi0ceTydKQRYlPznjJNiyIBwBwJRyMpE2eMJoD0R7rPoRT3LYJxhpTlJonCtS7DRL5s+zqvKwtrYDX+9rUy8eMcIkrGHkkVrjZJ1eL/kihHztKCjJUPwRNhVhPuVVoPZz74lylxn5NiM6gjHsavJjZpW7x+tEQ201fVTsMOOfV87HlgYvLjqyCquqW7HuQCe2NnjViUdiv2u0qQiHyBgXghkOVtnCTjxksDJRxKIRGE0ZepgkhJwBvUVV1aP+EE+46/oxSt+lsjhGT4hGyutqVebmiPO7htX79EJVRJ1F+TovbnACYYCTY54s9rvOhIOC2WllnlXzw1sAmD86H29tbMCq6jYsGNezKBRPcjgoVOwZrXZcFnBbjShzmdHojWBPkx9zRylcS2m0rhNJCvd7TGH5bgAwCpGGstNURbHYYMEhobx2hYLKdv0hGiHvae67fTsOkc9iSpnyzfXhFDvMqHBb0OAJY2OdB8dNKFL2Rhqu6wDAKEYVW5T/rTGjG4gCyaBMY2hBRAgIRXQsBp1U9VYrdCyDcrcFB9pDONAe6lU8EiutjS22Q6+hkDFrhBsAsKnOA47jlRd9kMZ59eKRWRKP3IrfI6qzA0n5ha8ASP3ugxV5VgMMGgtHAKmKuq3Bh631mWU4ad+CLHLTTTfh+eefx9/+9jfs3LkT1157LYLBIK644goAwKWXXppmqP2rX/0Ky5cvx0MPPYRdu3bhjjvuwLp163oVm7KBQTiR0lmVb5pZYUEi5l1mQiQcRKGQNFqs0UlAb5j0OmlzIE4ioVgC9y9PCUczq9x48bK5MOm1EzIsRh3mjyYLiS/3aFh1jRUHHXWTTVAISQ3xJhiMyqPZeCEvnpWRFx9qIWkenUYV4f0ZMiLfCotBh1iCQ62wqXjju4NS368/0Ikkx2NskU1R6d7emD+aLLzW1rRrU8Zbw01FyihfRUi7TocAQzZrQW/mi4zmmu0AgE44YctTHuXXHw6zARNKyCLm6309h19vqfcgluSQZzVourg9Zhzp+2/2alCBR6P7vStWniwwlRonA0BQWCCG/ZlvJpNCOkMYJKS5zGXG+BLtRQSGYTC9gsxLWxt6X2yIlZfUVvubUeXGJUeNhF7HYrpQdXNLhoucXmG1FQ2jQTL3hRnl33O7K3W44uuU4U0YJyLC3s6EJNRno9/F9xTFod4QN5sTS7XbTM6sckPHMmjwhKUNsyKyIBabBPFIr2JzkZQqb8n4XidJ5NE3B/xSeqhYOUdrxM37yj29fy8PdoSQ5HhYDDqUOLQzbxYF8J39fO/6RGOxmBcyARIqTNItTvKZOjiZY5kYeWSw4pCH3PtlLu37fYLwuTd6I/CG4z1ek+R47Bb6ZbKG4hEALBREyv/taO7nyj5gtRMRAMDMkTWu0oMhAEiY3OSHkDLxyMuT+bTEacqKUCxWzNva4On1GnGMn6DxPDOxxAGLQQd/NNFnBe9+0fB+N2uwnhNF5mRIwbpFFI94q+aV1kSOGkPWHt9lWOlOtngUiUTwwAMP4PTTT8fcuXMxe/bstH/Z5IILLsCDDz6I2267DTNnzsSmTZuwfPlyyRS7rq4OjY2N0vXHHHMMXnvtNTz33HOYMWMG3nzzTbzzzjuYNm1aVtvZFbFUu0FFtS1xQWKSUbK7o7keABDhDXC6tS/RfjgzKt0AgM31HgBkcZngeBTYjNh3z+l45+cLcIRwjZYsEhY0y9YdRCKpUdqJRotLsZSrn1E3uIrlmw0xT8avSXSSlMWwVXmKWKawLCNNIOIi4pt9RMwrd6UWkEtnaOu7NGuEG0Ydi2ZfFPtaNXAM1FBESAobgLiKU0kACDBEfArLMEv31e8EADQZVHi1ZciJk8nY+/nO9MXdl3tacdMbm/D4Z3sBAEePLdC0bPgxY8mi8tv97eA4lWKfxifSXY2TTVbl/S+mPEb9mS8wIyGy2OL1Fnz12xPw2f8t0lSw74okHgljfk+IIsPYIu0WmOLv3daHaJURGkegxIRIhAirfBOn0+vhgygYyxBGBe+bf20kG/tZI9xS2WstmVjqAMMAbYEoWv09l+/meR57xMgjNZFhh2Ez6TFViHZcd0BF9FEWxGKzUBXRYFMeeZQ6JJLxvU6QPlhTl5r/shFxBgALx5Mxd0ejr9e+7yoWazneTxJEiV2NKqofs9qO83yEtIUzKP+Ou4rI+szN+5GIxzJ/oSAe1fp41HUQMSMb97vTbJDWcL199rXtQYTjSZgNrGbRZiKnH1EGAHhnY0Ov4lW/aBiBAgBWIaXYZHMrfo9UlT154xgvZB80RIiAIEZia414KL+qOv1Qnud5bK33IhBNdBGPtI1s1utYKZJ5ZbWKoAANRUObULTGbFe+j08IewFOpncxgC6RR7asiUdin4sBIP0hWzy68sor8Ze//AUjR47EmWeeiR/84Adp/7LN9ddfjwMHDiAajWLNmjWYPz+VE/nFF1/glVdeSbv+vPPOw+7duxGNRrFt2zacfvrpWW9jV6xCOKpRxZdODG21JjOfOH1tRDzqYPPAKAyhl8MRh50G7xQmmmkVLug0XEQczo9mV8Jh1mNXkx+Pf75XmzfVaNARN30hFalLQCpqzSijfDMbICJq0l6m6ndnipiesLvJh2giiU0HPQCAV346D8eOL8TCcYW4+jhtzRzNBh2OEtIgP9za2M/VGaDhibS4sEwa1U2sYeG7I0dASLSR+8Bv0yCVrx+OHS+KOB1S9FejN4yfvvId3trYgC+FU+pT1Bjc9sDMKjcsBh3agzEp0kIx4vio0eIy4PdIP6tJVw3ryYYp6ss8AkUUj2x2ByrzrLAqMKjOFDECaGtD+uf/7qYGPPzJbgSiCUlYmlahfFN9OOJcs/2QL62Mu2w0FhHiYTLXR1VEHgGplISwT8bCWdhMhngjxhTa8MTFs7NyIm016jFG2CCKY/zhNHjC8EcTMOgYjCnSdjM5dyS5n76rkXli3xWNxWIAsHDqNxcQIsx1CsSjGEhqMsMA587JzqFBod0kiYFbugjGq/e149U1BxCJJ7FfOMTRut/FdChVKYuMtuM8EyX3O6fCJD2vsBxJITXd0957FehuCGlrr3xHDm0q8ywodWVHSJghbOR7E2zFzebEUqfma/1jxxViXLEd/mhC+RpPy3Udx0kFTKwu5YfyjOCrqpdZZa+zg6wFXlhHxr9spCoCwOJJxWAYIh51TVl8duV+nPXEVzj9sVVYL3wftBaPAOBkwWj/7Y31yrMKNOr3ZCIhHQZaHW7F75Mwkz7nZaYqHti9Cdj9IQASeZStyNKqfCsq3BYkMlxTyV5ZfvDBB/jwww+xYIEy88vhhqhSi2W3lWAvIKcTLi7ziTPcQQZav07575WDGFW0pd4Dnucl8UjrMNbDybMZcffZ0/Cr1zfh6S/24oIjq9QPqBotLmNC1ZSITp14ZBRCm21xT+avCZNJhnVkP20NIPmyAAkrr24OIJ7k4bIYML7Yjn9cqcL0rh/OOqIMK/e04r3Nh/CLxePUbZgY7TaTWpRuBoCQ3k1M9ryZh22zAbIITdqzH3U2qyoPBh2DJl8E9Z1hVOVb8daGhrRN/eQyJ06bpq2IadSzOGFSET7c2oT/bKhXJ05onM4Q8nXACSDG62WZnB9OxJgPxICEP3PxKBEl843LqZ1Y0xtiBNCeZj8i8STMBh32twZwwxubwPPA+rpOKY11uobi0ehCO2xGHYKxJPa1BpQvXjVOX0pEyCI7plMXARBiHaQSjwzBmI+HwQCI8Ebcf+4RWdtUAMC80QXY1xrEmv3t0iKf53lc9ff12NXkw5lHkHFnbJFdc2+GeaPz8dLXNfhyTyt4nlc23mch8kiqqupQYU+g4JAISVE80uNHsytx9qxyTX2mDmdSmQO7m/3Y1eTHiZNLEI4lceXfvkMolsTGOo9U93KchpGGAHDc+CLoWQa7m/3Y3xrAGCXvr3H6kk6osAeF5bsBEmnYxrhQCA+8LfUoLM2suhQfD4EBEAaJQjh9evYOCeePzsdH25qwpqYDPz+BPBaJJ/GHd7Zhf2tASpubUan9nMOyDH44qwIPfLwb/93SqKz6lob3eyjog40h7+PMU+jBBEAniEeGuLwolIgwJ/iFtLUjsvCZA8CYIjtOnFSCT3c24/3Nh/B/SyaC53k8+yUpkiVGuwHZ8VRcOrMcD36yG9safPj3uoO44EgF/a5RxFkw4IV4h1sdKiJLbeSgVReWF03V/t4fIR4Dh2DGVI2j+7pyzNgCvNGcWcSz7Jm9oqICDkf2JqehRJpKraJUu6uQpPu4EUA81nO48OHEPEQ8Chmzn7IGAFPLndCzDNoCMdR3hqXTiMll2f+uLJ1Rjvmj8xFP8nhtzQH1b6jRZCP63kQN6gQEZwkZOvK5zDeRthgZoIzugYk8miT0885GnxQJMqXMmZXT766cMq0URh2LvS0BycBPMRouLrUo3QwAETNZoCS8mZ+6GSOk71lH9vyORCxGHcYXp/oeAL4SQo3vXDoV71+/EO/8/BgY9dpHP54/l5ywv7amTqUHiigWa7OpCAsLPNGvSikJMxm7+aAM7xuh2prdnl3RHiB+SgU2I5Jc6rDgnU2HpCywrwU/qkmlDuRrWFZWxzKYWq6B75GGYjEAJIXIo7hK8SiiJ/eTnJLt0TBZZyR1JszqxbxcK44aQ9Yy39akFpmf72rBpzubUd8ZxjPCBiMbIsZxEwph0rOo6wgpjzjUWjSMx7qcTCtf5xmEsu1y7AlEw+woDLjk6JE4drzyDW0mTDjMQHlldStCMfI5vrWhHsu3///2zju8rer849+rvSXvkXjEGc5w4mwnBJJA0oyGsAKFQJkpq2GV0ZT+2hBoCymjlFWgtBBoKVDKKGUECEkIgcSZzt5xpmPHS16y9vn9ce6VrXjJlq5s2e/nefw8ku6Uj+6953zP+35fPnExPD2yg1qrQY3zRP+bL3Z3IkKnORGOOJNM0hVhVNgDgBolFw0bKktC3sbR0HS9f3r3+XhwlnxFcSaK3pJbj1UFrCH+uu4o/rP1FLadsKO0lv8GJdE40swThbENRytRWR/a+CeICKat1dl538bNlNAbun5/01hEo/ROVNnzuF1I9PJJxDPg24/NjFwBmnOZN4pPPK89wPsfRysaUO0ITh1MNmtliYRJNGlx74zBAIAnvtiPWmcXUhYVkZkUdIiek26m6nIVVQBQmnh/XOvqnE9nRsOuwOtt/kERTw1tzgWdMKXvdI/+mWeewZIlS3D8eAQG6b2cRkcdNIIXAGCydb1kqTU+GV7Gm6q6PLQHjL+O32Tcenk7ExI6tTIw+7/haGXA60LyKJATQRBw03nZAIB3N52Eyxtm5yBCnQy/mJ/sCSMnHgAS0nm6lxUNcNSHNliyePnAw5gY2RLdbSGFlZ+qbkThUX7s4VFoe4tOjem5/Df+vx2hd75aJYJVOZpKN4fXifYa+My+UB965JHezR9Oamu0os6klMU6eHx+bD3BH7ZTBiViZH+rbJ4704YkYeKAeLi8fjz91YGu7yjCkQiSwbVDCO8h7zfy37XSEfpMlUL0vjFFYYJHEIRA6prkP7SzFf+jabmRfwYFUuba8VvqkAhHIvhdfDDpU4UnHrnFyQafI/RIY6coHiXF2SJa+aY1JGPNPSW1AR+SVfvOtlhvfAQra0oYNKqAz+HKrooIEW73htqmdjJawrAnMHHxqDP2BF5xMtEFNQZGOFWsNZrf64Hg693PgDon7+/K0e+bm8efZ11u9whHmKoDFfbC+64Nat7urprQv1ejg///0xISkNfPKsvkjMTQVDOsejUa3D7sESeF/73lJADAoFFCEICrx2dgQrY8QkZ2ohF5/Szw+Xnl3k4TQbHYIfrQ1QmmsOxAtGY+HjR24lovKd4HjeBDA9PimpmT8cj84bJFHgHAuEz+uzxQxvt128Q0tVSLLtBVPm9ggmwTxIvOH4CcJCPsDg8+3dGFlMXA9R6ep6Gz3g4AaAhzMlBj5X15vSf053pNVTmSwNcf4fw7nEqLrG1+/qDEkAM+Ov3rHz9+PJxOJ3JycmA2mxEfHx/0RzRhL+c/eCdTw2jqeoMrlEpUC3z72orQBsgKB+/M+Y3yRx9ITBb9Z/69+SQcbh+0KgWyE+Tv0AA8RzbNqkNlgxsfbz8d3s4ipFgzsfMvVVHpKmZrPOoZV/fLTxd3fFy/H/GMH9uSFB3xyGbQBIwVP9zO/bYiWba1Pebn8xmv/+04E17VtQjOUEWidDMACGL0kLox9OgTsygc6uPlT1sDmqLO9pfVobiiAW6vHyatSvbBjCAI+L8fDwMAfLT9NPaUdDEKJcKGmu4GOwCgURHe91eY+CBZ7Qo9AkXp47PAZnN0rj0pHW3nqZqAmSYA/GLmECgVAvRqJW6YnB3x40ozrmsOlHe7J4IEEz1QwhWPvOLzQpp8CGkbF484S020hXXsUEix6JCTaARjTd5DUoWWNPEZoBCazPQjzTzRRPdfhSfQ4PJ2fgcR9r6RxCMnU0Oj7brvjCmO/78sLPT7mMvJ212t0cGsU3f52KEiRR4dKa+Hx+fHkbNctJzeTCAekmJChgzmzbOGp0Ah8OqOJyodHW9wLhE2zNb6wjdJBwCXlgsJ3trQhRGvkx87JVG+yBMJhULAhGw+tissrkSZmKKuEIDN/zcT+x6bgz9eOUrWKHMpLe+zXV2YIIxQfx4AGmv4RE69IrzJGaN4rdv89pC3qTq2AwBQos7EXTNycfOUAbL+z/vH6WHWquD2+nG0vAHbRY+7S0en47FLRuCaCRn4v3nDZTu+SqnAVaJ/2xe7uyIeReZ6dzbw+3E4VVQBQG/jbW72hi4elRzaBgAoRRJeWTQNX/1iKvrHRf7eKhFv1OD9O84Lad1Oi0cLFy7E6dOn8fjjj+OFF17As88+G/RHNFEnhqHaBVvYptW1gdDW0ISRJs8beTpwrTFZnJGUjPVyU82yz4JKqJQK3DJlAAAeUhtWBaYIpbEoG/ksBTOEnzpYoeQdjNqyjiP+7JVl0Ih52fHJ0RGPgCZ/K2ksF43IIwCYMSwZBo0SJ6ocLapDdIoIzkg3lW4Or2OptvJOk94V+veKEzsk5oToiEdSOe79Z2oD6UtDU82ypywC3Mxzfn46GAOe+Hx/14SESKexiOKRK8xKe2oLv3cb3KGLR2o/F4+i4XkENF3zB8vqUFLjRGWDGyqFgNun5WDV/dPw2T3ny+K/c+HQpMA1v+1EF010I22gK/mcacP73/vE7QWnPfRjixFnCTZbWMcOlQLxWb/xaCWcHl+gpPLbPyvAkjlD8cbNE5Euk+/Sj0emITvBgMoGN/6+vuPJlBZEOH2pKU01PLHYkih6W6IBbpczpG3cLt7uZpM8FdbOpX+cHkaNEh4fQ3FFQ6DdbzovG9OGJEGnVuDO6QNlOXaCSRuosilNUHWKCKep6iTxSKyG3FW8Bi68dSa6mIkG+cnx4R07VKRKTIVHqwJG+UNSzDBqVdCp5Yksbk4gde1IJSo6m7oWwaqaLjGVuDFM8Sg+hYsiBsGF+trQnl+uCt73r9VFtmpxWygUQmBicO+ZmkDk0ZhMG66fnI3lC0YhySxP5S+JHw3nk6eFxVVwejp53Ubo+e6RxCNFeKKN1B+PZ3awEMeW9WcOAQDKtf1xweAkZMuYstZZOj2y/+GHH/D+++9jyZIluOmmm3DjjTcG/RFNNNr5w6BOZQt7X7VaHrLrrAgtXdAgpq5oopS6AgDjs7l5rkS0Ik8krpmYAbNWhSPlDfhmf8sw+pCJUBqLFDGgMIUf/VWr4QPJxsoQxKOzJwAA1TCHNRPaWZqbo1t0KtlKBp+LQaPC1RP4w/i5bw71iEiESJRuBgB9PO8oWLyh5Uk31NkD/htxydHpZEipDMcqHdhxkj9oh0bB60zil7NzoVEqsP5wRddKeEc4bc3XyP8HblV4/wODOFNl8oX4nRiDDrztbdboiEdDUvg1fuhsPXaIg4rcVDN0aiUGJBq7ZmwbAgaNCnPENJa/ry/u2jUf4UgEhVtMQRDLMHcZcfvOVN1SiMbJSXFhHjtEJN+jwuIqFFc0gDHAqldjQKIRd04fGEgtkwO1UoH7RY+Xv6w93GbZ+DaJ8PUupTU0hpnWYI1Pho/x/lNNiJW3vG5RLDZFZ1AhCAKGiPf7vSW1OFbJn3ODkk1485aJ2P+7ubh8jHwTVleO4/v+58YTaHR3sv2kCdwIedtFomQ7AAgmfp9XdcJIV+nj4lFaFCKPAKBAvN43HasKpK5FsoJmR2QlGDGynxV+Bny5p5NpixHs13nreT8sXB9To9mGOjGboKo0RAuYWh6M4DFGx8cUaBrDbSquDvicyemzdC4Dk0xINmvh9voD1d1CJkL3eY9D7M+FKR4l9RsAHxOgFTyoPBua+O2z8zZ36qM3jg+VTotHQ4cORWNjGOakfQhPLRcwHOrwLza3kauW/uoTIa0ved4YopS6AvAO/YyhTZFOcnYgW8OsU+O6SdxceskHO/F24XE43F0Jaw9vULHt6Utw8PcTEO/keeFSrms4OPV8H77qjm869RU8Os2uiG4a6YVDm9p76pCkqEWdAcCd0wZCq1Jg6/FqrO6qcBjBmcmIlG4GYBbTDuP9oUWfVJfx34eDaWEMo6xoZ0g2axFnUMPnZ/h0J3/YSdX3okFGvAELxnGh7G/fHe38DiIceeQXI1C8YXqdSVU2bf7QRASPww41+P0uLik6HcysBCPUSgEOty9gZCtV3pSbGyZnQyEAn+8qxVNfdsHzKsLpiirRJF8IM1VVqrqlcocuHqn9XEBJiVIkQpPvUQ22n7ADAAYmGaMSbQjwKpsj+1nh9PjxSWe97iIceeSR0lTDrKraFXsC5hHTVKMkHgFNkwVf7yuDx8egUyuQbpWvul9z5o1KQ/84PSrqXXi7s8VRIni9M78fFsajrkxhVN0CAJUYYRpqdLHf5wukOyWlRs/T0qRVoc7pDTzjpYmDaCGlq36+q5MpTBEUi/11vG/p0oafTVCl5PuoLT8Z0vpqhxiZZomieCRmD7yz6QT8DOhn0yPZEr0JaUEQMEU0yv/+cCezCiIUeeRt4KJVuIKhWqNFucC/S8XJgyFtI9Tz37rX2AvEo+XLl+OBBx7A2rVrUVlZidra2qA/ogmveKNxR+BGw6w8skJT37F4wD1v7AAAS1J0og8kfvGjIUgya5GVYMDUKItHAHDLlGxolApUNbjxfx/txi0rNnc+hS2Mh42zsQFj67/FEO9BZPn5Q0EfF/6F7zPzdlTWdZy26KzmD/d6dXQq7UmMzYzDBYMTMSDRKGv1j9ZItuhw05RsALw6g1QVpFNEMG3NJM5KhlO6Geh8eHOdmNZapYje7JAgCIHKSmfFKIBoVFlsjpSy+tXeMhyraOjcxorIhbUDgODgs5N+XXhtYE3i4pFRcKKxoeMKTHaxmEId08MWpYqoaqUCOYl8ECEZ1stp6Nic0Rk2/OHykQCAV9cd7XzFvQgb6GrEyjnKMNNYVEb+uwm16pbH7YIJXKxOTIqOx2GKRYesBAP8DAGPwYEyRZm1hiAIgSiU/xZ10uMwgh4oAOAVIw1dyvAFnDqFDQDgqA4xusLnBgCYjdH730u+R1+Ig/icRBMUiuiIhmqlAj+fPggA8OaGY53r20VQRKitqYJatAawJoTXv9PFcTHA5A1tgqiq/DQ0gg8+JiBR7B/IjUqpwDjRAP9oeVO0WTSZM4L/nzcerUKNoxPVtyIoGgoNXMDxRcBLtk7NhQRniFYkBic/ttoWvTHd8LTgZ/lYGYogdIQkHn25p7RzEcYRmhSUCld41OH3ayo1/DdcX3okpPU1omCoiKJgGCqdFo/mzJmDDRs2YMaMGUhOTkZcXBzi4uJgs9kQFxf9H1ZPxef1QlHHO9NeQ/g3Gk1iNgDA0tixSl1bUwWtwG+ucVH0vAF4ysKmX8/A6gemw6hVRfXYABcRVtwyAZfkp0OpELDxaBU2HQvdMwRAWDOT9laq4ZkTwr/wVTbejrrGjjuVUll3qcx7tBAEAf9YVIDVD0zrltzcn08fhDiDGofP1uO3/93deREhQg8bl9MRSB0zWbteZRHg4c0OxvPKq8s6jjpsrOZtX6+KbtRZ80gjlUKIauQRAAxOMWN6bhIYQ6BUeMhEugqPWKwAYfrNmcw2uBg3wQ2lymatFHEo2KI2mAOAwefMQI+MYjrDwomZmJAdJ1bi6WQ6Q6QNdEWfM7UxvH6QxsTvGQZvaJFH0jPHyxSwhTmQ7Qx5Yjl26fk6MMqDyYtHpUGpELDzVE3AeyckIuRpKCGlqXrCTFMFgHqx8lZjVWgDSild0RLFyCNpokDSbaItIlw+ph/MOhVOVjV2rm8XwQjTOjGtsIHpoNOH9783J2UCAOJ9oUVWVJUcAwBUCnFQaeT1nGmOlLomkRvlZ3x2ohG5KWb4/Azf7O9E1bUItrtUuERKNQwHp5b3z701oUUZWr3892FMzAz72KEyOMUEZbO+xJgMW9SOLTF7RAoMGiWOlDdg49EuXO/hioZi4QpfuOnoABwGPo7zVIbm1Wd089+bJi66QSCh0GnxaM2aNVizZg1Wr14d9LdmzRo899xzcpxjzLFr3UfA7xJRUPEhAEARlxX2PhNzRgMAMjzH4PW4213XLuZT1sIY9oOtKwiCEHTDiTbnDUzE8wvH4Mqx/EL9YGsnzRXDmJmUBnDNiYSAp0/JAQCkOY9g419uxd6NK9tcV2jgg1epzHu0iVbqwrlY9eqAF8Y7m05i1p/XYcOR0LyCAETkYbN7/SfY/v5yAICLqWEJM6QdAKrE9MPa8o4HFB6x3K9DE92oszGZtsDrUf2t3SIcL76Qz0i/t+Vk5yqvRTiNRSemH6is4YnGgkIRSGOpr+o4VN8hCod1quhO4kiRCABgM6gDaS3RYrY4I915L4zIRCJI5pd6PxcwtGGmqlpSeH8h0VcekrFmTUA0tEChlN+8VmJEv+DBYzQjjwBuoDx1MBfaPtrWieijCBul++v59e7R2sLeV6OB3zN81aGlsij8vC9ojVKkIQDkpgQfa0SUCmNI6DVKzBrOr/mVuztxzUfoei/eUwj7GT5BUaMIXyhP6sejZs1CI+pqOh4c15fzdL1qVXQnByXTbABIMDZV140ms0bwPu1XezohHkUw0lDn4v1JdQSsKDxGcR91pXC7nO3e65nfjwTRukB6PkQDnVqJzGaVE88VEKOBWafGpaO5ePLPzqSqhplJUFNZhqO7C6F08cgjFgHxyGvhwp+yJjT7GVtAMIxOhGFn6LR4NG3atKC/sWPH4sCBA3jooYdw7733ynGOMYdp7TIohabwOkPygLD32S8nDw6mhU7w4PSR3e2uW1fBOx7VUUxd6YlcOZ6LNp/vOtM5p/4wZiYd5wzyKmCDwRR+ByN1wCgAQBKqMensvzF85dVtprIEZkfM0Ulh6En8tCATL107FiP7WeH2+vHHlftD3zjMSITGhjrkrboek45wEb1ckRB2lUUAsItm6Y6zHfv5+Ot4p8qtDy/iqbP8aHgKLDouGF0zMXozY82ZkB2Pi0elgTHg0U/2hh7iHKF0hm1PzceR341GkpuL1VI6QjjUKW0AAEdlx7OTbrskHEa3gzezWUn2Hw1LiarXGdAkHm0qruqcebIifBHB7/Nh15MzcfLRYUj2846e3hze/z8lg4ugJqERNVUd+7c1iM+cGmV02318VvDxolUgoTlXiqWc/7HxOOqcIaayROh6P7JrI4pWvQOl6EvhN4V/vfss/Psoa0MQjxiDnvFUTZstetF+CSYtzLqmyYFoRhpKSGb5nUpliUCk4c61H2DA+7MwdNVNAIB6Zfjf3WCyogZ8kreypONnvLuK/zYadNGdHBzZzxZ4PTQtOtVUz0USDb89WB56nz6CkYZS4RKdLfxrXTDzfQw/+z+4H8/G9j9d2ua69sqyQDZJYlr0xCMA+NXcoRAE/pwfkR79ax0AfjqJ9ym/3F0a+jM+zEmC0pfnI+c/szCgaj2AJi/CcFAlZAMA4mv3Y/tX/0RNVXmb6/q83oD9TFxqdNs8FLrcy1u3bh1uvPFGpKWl4emnn8ZFF12EjRs3RvLcYhaPQhP03pqWE/Y+lSoVTqj5fsr2rUfV2bZn2pzdlLrS0xifFYd+Nj0a3D6sO9j2RdqCMDqXbnuweFQrehiES0JqRqA6g8SRorWtrtsU+dDzTNbkRhAEzBuVhjdungC1UkDRSTuKQ01fCzM3/tDmr4Le16gjMzPYYMoGAHjLD3W4rtLBf+csAqmyncGgUeGd2ybhrVsm4ifju2+W5Nc/HgadWoFNx6rw6c4QjTUj4InQUGfH2IZ1GOgrRjL4DKE5ArNFdTrewXSW7sfBbWvhdLSdnuOr423v0UU36mx4ugUPzc7FVeP64+EfD4vqsQFumD46wwY/A97acCz0DQPpDF1v9x3fvINRzq3IYCXQCNys3BQf3qBOZzChAjYAQHkIxpouUTRsUEf3eT+6WQqDVa9GVkJ41Wi6wpy8VOQkGVHT6MGbPxwLbaMIRRpqP7wRo9ffgYnVnwIAlGFGGgKAKp4PEgyNJdhX+CUqStueoXY7aqGHVFkzuvYE14oTBAaNslt8UC4YnAiDRokzNc7QK2xGIOJM2PAiAEAl8H00qiIzmK5U8L5CbemxDtdltbzv7zZE1wdFo1JgyZyhuGBwIv64YFRUjy2R18+CdKsOjR4fvjsUooFyhCIN/T4fkv38GRuXPjCsfQGAWhSgLHDAJDRibP26NqOPqs7wNKcqWKJaQRngkzPrHroQL147JqrHbc6IdCtGpFvg9TP8cCTUdu/6fd7v8yHXy4twJIHfX5TG8J+vplT+uxnkO4IxPyxGxUs/anPdqrOnoBL88DEB8VG+v4dCp8Sj0tJSLF++HIMHD8ZVV10Fi8UCl8uFjz/+GMuXL8eECRPkOs+YwuSzB71PzRoakf3WWHlKzsQdv4XhpXyUFLceVeEVU1ec2uhGH/Q0BEEIhLl+2Zkw13BuOmIIezniUIok1Ex6qNP7aPWUFArYz4kkc1a2no5nFo0X9bboVdrraSSatJiQzW/2IQuHYXrfNBzfFvS+MUIzgyyei8YjT72HDX+9B87GtsUwjZP//hRh+u10hRHp1m4xyW9Ouk2PO6bxB/QzXx2AJxTjdGkGNYzBZGnx3qD3PiYE0hHCwW3inYZJR57DkE8uxe5XbmpzXSldlRmi3waLLxyEp67KR7xR0/HKMnDrBfwaeXntEew8ZQ9towiksThLgqOAG5gO8Unh33cr1HwfNSUdV5HzRbACUGfQqBT4w+V50KoUWHzhwG6JRFAqBNw7YzAA4G/ri0OLPopA5JHX40Z/FpwypY2AL4U0uMhzFWHYFz9B1d8WtLlutWhPUM90sFmjK+DcPWMwFl84EB/+/Dzo1NFLlZTQqZWYN5IPvt/dFFqKXyQKYnhUwdF1Ln1knrO14iRBxrf3oWZZOk4cLGpzXbUY6QZL9Pt3d04fiH8sKkD/uOgLxYDUp+9kmnIErvfCd5/AoScmQyN44WFKJEfg2W5IaCkIVLXhbVhfwUXkKmX39K8y4g3dcp03R6rwuak4RN+jMETDkuJ9LT7TREA8ShmQF/R+oK8Ybpez1XXtosdppRAHpSr6NhAdEbJ4NH/+fOTm5mLnzp3485//jJKSErzwwgtynltMwvz+QPg6ABQO/7+IKcVCWn7gtU7w4OTmT1o/h3oulHj03TuQ6wlIFRq+2V8W2kASCOthw8RyzUeSZyF12WGMmfXTTu+jLaqmPgY/a+qge+2tP2ikMq6mxJ5nshZNpolCxrehikfhhrW7gqtN+voXdG0/56BP49EcJqERk0veRNGbbQuSBjcPq9ZEICc/Vrn1ghzEGzU4VukIzQslAoMK+6ngQf5xZVZE0lWF+Oyg98NqvmtzXXU3CofdzY9HpmJuXiq8fob73i2CyxvCNRyBNBahMTjqoVSVFpFU1VozFxHYwa+x9ZnLsG3lirbPQXze+6KcqgoA1xVkYc+js3Hb1PBn4rvKxaPSkZNkhN3hwT82huCJEQED3dYM7I0J4T9v04eMC3o/xHsQx/ZtaXXdugp+DtE2yAcAk1aFh2YPjXphhOZI6dGf7iwJrfpWBNrd6AqehPTZIpNO4rRw8TsOdbCiAaVfPNXmuoGqW/E9zwclGkhpyl/tKYXbG8rkUHj3+dKTh1Gwf3kgEqVMkQyVOvxJkrSB+S0+a6uEu1SRrV7Td8d00mTw5lBN8sMYx1We2NviM3Ny+NdbfHI/2BEsQJefbt08u76ci0d2Vc8MAgm5l/PFF19g0aJFePTRRzFv3jwoo2jMGEs0OuoCIa019x5BwU9+GbF9D5p6dSCcHQAUp1vvVKik1JUIVASIdcZnxyPBqIHd4emEYt31h43CzaNCmCbyRuX5F16FkuvXozDpSv5BfcuZF2djAyzg52DrgaGO0WRaLn/Q/nCkIrT8+DDTlxTNxKMt5hkYc+k9XdrPuWSPuSjofWJ52+nBFi8fzBri+27UmVGrws8u4DODH20PQTyKQBrLudUzylKndnlfzTGkDgl6r4YHPq+31XV1bt72Gmvf8zoTBAFPXDESyWYtjlY04L3NIUQjRCCdQekMfqbUaSOTKsySRwAAJtR8iXF1a5BU+Mc215VEQ5i6p92j7XF1LkqFEIg2/DiU6z0SYvHZlr+vtJwRXd6fhDUuMaiPBwAVR7e3um5DFRePavuoPcHYTBtyU8xwef34uKgT9/kwIlCs3uACHOrE8CNQAECZnBv0XtvYdqS8zcsjDaNZdasnMXFAPJLMWtQ6vVh/OISJwTDv86e2rwp6X6WNzKSsLbHls6KurPVKsayG/75d3VQEpycwIZtHVx4sq0d1Q/tFowCE1e7extoWnyX2H9zp/bTG/uwb4WsWBFB9unXB0C0GBzT0UMEw5Kf++vXrUVdXh3HjxqGgoAAvvvgiKipCzD3sQzjqeZUfPxNgsUb2oR6f3A+quwqxIf1GAIDZ0XoHWSt53lj67o1GQqkQAoau/9sRWknMcKozKLwO/kIjj3lo/0F5gTQmjaOleCSFsruYOuK/v1gjN8WMFIsWTo8fW0PxRZDSLrrYuVR6uB/NxsH3Y/wDH0Ys4tAal4g9miaPAZuv9QpyzO9HPOPf09zHo84uHsnFs03HqjruaERgMMmcvLOx1Xwhds/8B8Zcv7zL+2rOoPHBOfEawYfyktZnqqR01UiYecYiNoMGP5/ORYS3N57o2Eg3AoNJjdseeH0W8VBMvLXL+2qOdUBwBEoGK2nTD0MrVgDqy8/72SNSoVYKOFhWj+OVHXjcRUAsbqgMFivqmR5Gs63L+2vO0fgLgt5LNgTn4q7h6UvRrqzZUxAEAVdP4NEAn+4MoW8XgUhDPRqD3lvSIjOgtOWMDXqf4Gq9b+/3+ZDo59e7LS0ywlWsoVQIgZTFz3eFkLoWZtqatyF4gqAheVwba3ae7ee9hN3a0Tik4r8jb23rYpgigqb8sUqCSYuBSXxSPiSfszA8DX3Olt6S5giNpybd9DicDx7DTh23+XGcbb0/56/h9zR3DxUMQxaPJk2ahNdeew1nzpzB7bffjnfffRfp6enw+/34+uuvUVfXeuWnvkajKB45oItI+Pq52BJTYR31YwCAydv6BWQSU1e0fXQQcS5XjOUD6U92lMDhbn3WPogwqjMovbzjqtBGPvJIQi0aYetcLSOppHLuVYJNlt9fLCEIAi4YzFX7kHyPFF1vdwBQieKRQhf5UP7+t/8Huy5aAQBIhB0up6PFOrX2yoBpb3xK3446y0wwYGiqGT4/w+r9HVSsCtPrCgAEN297tyULeedfAp0+Mte/3mjGxpSFKEVT6HJVSeuzk1K6qjmh7973Lx/THxqlAgfK6nD4bNvm4gCaJgnCGUx67ACAoikvI3lZMUZNb9ujpjMMHDMNXhZ8/7ZXth6NYPLw54DW1vcKJEhY9WqMyeAz098fbl1cDxCBiDOpsuEpIRV7NPk4Mj1yFg75t/0VG4c8hP0qnq7M6lu/f0mVNfuyPYHkabn1eHXHkwQRKIygZ9yf5KSQjsLEBRiUf0EHW4TGoFFTmiLKAVhYy8gHAKgsPw2N4IOPCUhI6Ztpa0BThc8NRypDmCQIrzACa5aa7GFKpE++ukv7aY0xs36KvIe/RZV1eItjNUfn5PcApa1vTwpKqWtbQkldC+M+73cF9x1OCpGN5DeabQGPQl9D688rVQN/xrAeKhh2enRpNBpxyy23YP369di1axceeOABLF++HMnJybjkkkvkOMeYwtUgikeCvoM1u44pnncSreJg4Vysfn4DikQOfm9g4oB4ZMTr4XD7sGZ/J0SELgwmVV4+M6XQyle2WGvhnUWDr6bFMkcfD2U/lwsG80F3SL5HYXYuNT4uHCr1ti5t3x7WhBTknX8pnEwNAKhoJfrELgqHtTBETLyIZSRjza/2djA7GYFIBCnqDNrIC4eT7nwFqcuO4ICKF15w1baM+K2rqYJR4AObhLS+mc4AAFaDGpMH8k7ZNx2KhtL1HmKp71YwivdgnTWyA3i1Rostg+7GcUXTALGtymtSP8DYh1NVAeC8Qbzdv++oGk8EDHT9YqRhqWkERvx6HfIvvKrL+zoXrc6ASdf+BtUZPF1ZsiE4F0WDaE9g7LviUf84PkngZ8Dagx1c74rwREO3ywmNwH8zlnu+Q8Fdr0MRIfsOQaFAweK/o3oxL4JjgQMed8uS5PYzxwBwE121RhuRY8ciY7NsUCkEnLY34lR1Y/srhxlZLDjtAIDC+EtQ+tM1yBoWucgjCb/Wxo/VhnhkdvNrXR/ftycFJfFoUyjiUTj3eTefnC0RUrDdeD4qpyzt/D46wKezAQAER+vikTYgGPbM53pYoQm5ubl48skncerUKbzzzjuROqeYxtXAOxVOhXzikTWJ30DMQmOLyktulxNx4qyFNYnEI0As3y6msYQU3hzGYFLt4zcdpU4+8UgvepqY/C2j/dxiiHtfDWU/lwsGJ0EQgP2ldThb23pVgwBhRqBofVxAUBvkMREVFAqUiyV9a8palnCur+DiUY1gk+X4scas4Xx28tuD5e17XoWRpiohiUdyisZONTfg9tS1HBxXiWJiLYwwmGyynUMsIBnlbzwaagRK19vdxHi76y2Rv99Ouv4xZC3djYMq7nslTQw0x+1yIg78eR+X0ndFQwCYMohPFGw4Ugm/vx1BMAJiMRO9Df1q+apOqUTje62zdTFM08gHlIo+nK4IADOG8f7Qqn0hisVdbPfGhqb+lsFo7tI+OsIanxzwQ6mpaDnpIZnoVqv6rmAIAAaNCvkZNgDABpnv80qXnW8eNwAZg1uaXEcCwcBFEZWrdfEoQSzCZE7u2/f4iQP4/2nXqRo0ujtozzAij5gYSX4iaTrGPPQZRs9c2Ol9dHgMPY+UVYi/r3OxiIKhLr5njuMjkteiVCpx2WWX4ZNPWq/+1ZfwiEZbLoV8nQqLNR5uxh+E9nOqfpSfPgKFwOBkaiQk98wfXXdw8Sge+rd6/9mORYQwFGuNn4tHKp08nQsAMMXxjoOV1bXwwWBSnmwfDmVvTrxRg5H9+KD7u0Mhzkh3sXOpF9teI+PgvUHJv4uzlegTp53nxdepSTgEgBHploDn1ZZj7eTIRyACRe3lnQ2lXr7qQ24Nb3u/o+WsW81ZsayromdW5ogmUgdz6/Hq9kWESHigiGksughU1msLl5ILkl6HvcWyqjLujeJmStgS+raIkN/fBoNGiaoGN/aXtmOjEGYECoDAzLRfJV8/T2Pj7WnwtD7LrpfSFa09M60hWswQU5jWHShvv6JumCJCo5hV4GYq2aJ+FEolagTed6yrbikeuSqOAQAadH37WgeASTn8Pt/xJEF493mNm7e7whDXpe1DQSGWgVe7W2YTNDbUBYrgxPdRnyuJ/nF6pFp08PoZtp/swPconMJHHvkKHwWOYeD9dHUb4lGc6G1mSeqZgmHfNkWRAa9DfMDIKB4JCgXsAu+s1lWeCVpWLfphnFUm93nPm+aMSLdgbKYNLq8fv/3v7vbzpMMo6ar18xBajUzRJwBgjecdB7XgQ11t8A1UWccHE35L382HPxcpEuHLPSGmL3VxUGFg/IGjN9q6tH0ouNT8d+WtbykeecSoM6eGUhaBYM+r7w61k7YYgRLOWjFlUaWXT0TwiaHtrfkiOKv4dV+n6XuV1s5laKoZBo0SdU4vDp5tR0QI0zC7eRqLzihfu3vUXDzytSIe2SXRUIjv8897jUoREA5/aC91LQKRRwox0lDOwYXWxAcXBn/r3l0W0SDfmNAz0xqixej+NiQYNahzeVF00t72imGKxVJWgUOITCGMtqhT8HtJg72VSCr7cQCA20T9u4IB/PooPNpBClOYaapaD293tUm+fpVaFI/03pbiUYWYquhgWpgt8glYsYAgCJgg3uM3F3ckHnV9kkDwiJ6ickaWir8nradlmzsd9bCKgmFcWrZs5xAOfbu3IQM+J++sulXyeo7UKvlNxFHNI03qa6tR+P4zqD/My3jbNX3XPLM1BEHAo5fkQa0U8OWesvZLeIeRI60TZ6LlFI90BhMcjM981VXxDsbONf9B4fvPwOTg1dbUidmyHT/WmCdGna09UN5+1FkY1daY3w8j48KhLkJVd1rDreH79je00mGq4W3vNvbtmejmSJ5X69qLOouAB4okGqsN8okITM87G0pny06Tzy6W8tWTeKRSKjAm0wYA7UechemFEY00FgDwioKx39nKrHQlv+ZrVBRxBgBTBvL/w/eH5b3eFR55q6oCTamQUmpkc7weNxIZfwbEpWbLdg6xgEIhBHzO2m33MCeHXA5+vTshnyUF0BRd7Kpp+V209fx6F+KyZD2HWGBcVhyUAd+jlgVEAoRpkK8XxVu1UT7hRvLMM/haTnbUlklRxQl9foIAACZk83bYcrwj0bDrkYZKsWq2rN61Zv6sMvhamuNXlBwD0LMFQ/olRhjm4he/T2bxyKHmAwlXzVkwvx8nXpyPgj2PYdKxlwAAjca+bazWGiP7W3HXhbwk5r8KW3rGBAhjZtIgCghaGcUjAKgV+P4b7GXYu+EL5K39GQr2PIZhnr0AAFNKjqzHjyWGpvKoM7fPj19/tAvetkLbw6i25nI6oBajEIwW+WaomqJPWj44NQ2iIGqlWUmJCwYnQSEA+87U4nBbUSgRiETQ+/kskcYo33Uvhc2rXC1FBEUdn0Twmfp2FILE+Kym1LU2CTPiLBppLADg13BhSnC27GS6xckjh5bSlIEm0+xNxVVtpzBFINJQ6ZMGF/L184w2PrgwMQf8vuBzrThzHCrBDzdT9umqWxKS39UP7VXaC1M0dDdKfqbyRh65VWKkYaO9xTKrk1/vumTq3xm1qoAlQbvRR2FWW2vKJpBvYshgka71ln0URwWPNquhqGIAwNhM3g8qOmnvwNtOEo86b0egEu/vgozikU5qc3/L53ptOY8k78mCYc88qxiGiSX+fGp5xSOXlneOfXXlOLJrA4a7dwWvkJIn6/FjlWsmZkAhAFuOV+NEZRuzFV3sZPh9PhgEXiFDJ+NMNADUK/kgtbGmHPWFb0EhBN8gU7NHyHr8WOM3Fw+HRqXAqn1n8csPdraethhGGkt9Le+8+JkAg4wCgmSypxQrgJQcO4Ady2dix+p3YXLytDUtRZ0FiDdqAp4Yv3hvB2qdnpYrRSASQRKNdTL6XSmNbYc56xp52W5FHy/lKzFenJ3c3F5VljBnpKOVxsJ0fNCiaEU09NfytHWPgQYWADAs1YJ4owYNbh92tJXCFIGS7VJVVUHGtDWzKB4pBIa6muDfcZVoT1CuSIxYxa9YRoo4236yGg63t/WVwhQRvI28b++WsRgOAHjFNFV/Y9P1Xl5yDG6XE0k+fp+3pQ+W9RxihQLR96iwWD7RUA9pQli+Pr1R9DG1sAb4vPz36/W4UV1+Bt7KYwAAh4Ge7UBwWvrh8tZTegGEWfiIt7lSRvHIFMef2ZZWvGsbyo4C6NmCIYlHEUYQXdr9MoYzA4BPzx+WQsNZlG/+T4vl1gFjZT1+rJJi0QVmqdpMXeviTafR0SyNQUYDVQBoVHGBwl1bjhz7D0HLjikyYEuktMXmjM2Mw4sLx0ClEPDhttP4Yncr/kdhpLE01vEIh3pBL2tnXmGQjBXtAICy9+9HvnMz8tfdjmwPH1CYU/q2qeK5/GruUMQbNdh1ugZ//GJ/yxXCFBH8Ph9MAu9s6GUUj6QwZ12zMOeK0pOoKD0Bk5unr+riKQoBAMZkxkEhAKeqG1HWVqpqmB4obnEw6YS84pEgikcqD3++1FRXoPD567FzzX+gauCDSb+J7veAmMKUw6OP1reVwhQBsViqqqqSsaqqVmdAI9MAAOrt/LsUvnADTj2ai5o9XwMA7GpqdwDIiNejn00Pj49hU3EbgnGYhtleyZJCKZ8PCgD4NLxvx8Q01b0bvkD8q6Nx4E9zYBafMykZg2Q9h1ihYIAkHsk3SSAVRdDKOCloEYUEhcBQX1MJl9OBQ09Oh/HFEUg+za91n7VnGidHG5VSgVH9+TNxWyiRxV1od0k8Uunlu79b4nmbawQfHOJE1M41/0HhizfDd4YHgziMPbc/R+JRhFGI4pGcufAAABNXqlWNFYgv/R4AsM00DX4m4IhyAAaMPE/e48cwl4/hCv6zqw6i4PFVmPmnb7G3pFnoYBc7l43iDcDPBOj08kaeuTV8Zt13ahsSYYebKVE4/P9QCSvOjrxd1mPHKrNGpOLn0wcCAJZ9sqdlFEoY6QyNdXYAgAPydixVoomqxlMDr8eN4fWFgWUawQc/E5CWQ1FnzRmYZMKL144BAPx7y0nUONpq9651LqUHPwCYZMxPD4Q5i+LR3g1fwPpyPvQvT8BAXzEAwJzcczsb0cSkVWFYGu/st+l7FOagwi1OFrhkjkRQGmwAALUoHu377EUUVH2CUd8uQkod72SqrJSuKCEVSFjZ2gQBEPb1DgBqPx9QyllVFQDqBN6PdNRWoOrsaRRU/hf9WSkmn/wbAKDeRN43APe0nCKmLP5wpI0olDDFYr+T9+09MotHfi2/bylcYn9y7XIoBYaRru0AgArYoDPIPL6IEcZnx0MhAMcrHSitaWOSIIwIlOZFEfQyFkXQaHVoYHwSoq66HHvWvIdhnj3QCD4M9PEoFHUCTQpKjBFT19pNSw9jkkArRpKr9fLd3/UGM9xMBQCorSpDybEDGPXtIhRUfIhJZe8AAHzWnnt/J/EowijFKhyCVt5OhdLEVUtDYylyPIcBAKlXPgX3klPI+b9t0OrkfcDFMnPyUpFs5h4VZbUuHD5bj7v+ta0plamLIkJjPZ8pckAne56qV/S+6V/xHQDgmHogCn7ySyQsO4GJl98t67FjmZ9fOAgDEo04W+fC45/tC05fCyNtzd1gBwA0KuQVDTVmsQKPtxbH922BVggWQk4q+8Eoo2F3rHLewEQMTTXD42P4au85g8owIxEconDoYUpZ77sG0VTTLBro1m3+F9SCD0aBd5r9TEBq9nDZjh9rTMjms9LvbTmJ0/bGliuEWW3N44yOeKQWxSOtj7e77vSGwLJMP4+e1cWTx6HErBEpUCkE7C+tw+GzraQ1RNAgX86ZaQBwKPj+nbVVOLrxk5YrpI6U9fixhGSa3WYUSpjpin4397XzqeTtWwuSeOTm6SwZrkNByytVFG0mYdGpMTyd/7/aTF0L43qX+vQAoJfZiqJO4PtvsJ+F58h3LZbbMmlSUKKpqmZl25Wzw5gc0kk+VzLe3wWFAjWBNq/AiQ0ftFhHk9RzIwxJPIowKi9/wChlnpHSWrmPx3DPbqgFH6pgQVrmYOgMph5rsNVTMGhUeOPmCZg9IgVXj+cz9UcrGrBHij7q4k1H8sBolNkDAwD8YuWl/owPhKvi8mU/Zm9Ap1biD5dxP7B3N5/EL//TzP8ojLQ1j1hG26WUVzzSiwKC0V+HyoO8sqKPCYHl5aZhsh4/lvnxSF6F7rNdZ4IXhGmYLaUsOgR5RWOT6ItgEFxwNjYgo7owaPkZRbLsHdxY4mKxyuK6g+WYsnw13tl0TpGEMCMRfFIkgszikcbEZ1n1onjUv7Fl6qUlicQjCZtBE6iy+M+Nx1uuEAGDfB2TBhfyXm+NSr5/d30VvKe2t1hO9gRNSGLxntM1aHC14nsUpmgY8DOVWzzSN6Wplp06EijZLWG30QRBcwoGcNFwY1um2WGICFI2gZupoNHK26+XfEyddZVIqC5qsXzAiEmyHj+WmJgdD7WSV9o70pbvURj3eR3EVEUZfa4AoF4heddWQHPqhxbLM0dfKOvxw4FUhgijEXPhlXp5q20Z4oPLcZ/UDyXRqBOMSLfi1evH449XjsLsEVyI+2ov94/o6qDC3SiWchXkHUwAgGBICHqvypoo+zF7C+cNSsQfLs+DSiHg/a2n8N8iXsEEgijCdCGdwePgnQy5xSMp+sTK6uA/yweRm1OuwkHVEFTDguSLfyPr8WOZeaKYsP5QBewOd9OCMEu2O+vtAORPWbRY4+Fl/B5/pngv0llZ0PIKLXkiNGdcVhx+Oqnpf/LnVQeDKy2GGXkUEI+U8t7vtZJ4xByotVciEfag5V6mQOoAEo2bc8PkbADAih+OtaysGqb3DQDoouCDAgAuNd+/z1ENi51XUnUwHjXdwHQYNHqqrMePJfrHGdDPpofXz7D9hL3lCuGKhm7et2cyF8NRiZGGGm8dSg9sBgC4mDqwXJ93sazHjzWafI/aiDwKtHsXquhGqSgCADhVXKhwVZ1GlvcYAGCraToAoDBxAZQqleznECsYtSpMHcz7wv8qPNn6Sl2stsb8fuiZVPgoSt61deVIdRwAAOzQFwAAdmnHIik9W9bjhwOpDRFG4xeNFGUWjyyJwR4HjsRRsh6vNzNrOA8D/mqPmM7SxUGFpzE6aQwAoIkLbv/0vGmyH7M3cV1BFu6dwSuW3PdeEd7acAwsjMGkVBlFqpQiF5LJnlbwBAYTQtJQ5Cz5HrqH9iJzyGhZjx/LDEwyYWiqGV4/w61vbcGYx77CjGfW4kwt7yh0uYSzg7e9U+aURUGhCHiglO34CgBwBkk4JaTCxwQIk+6U9fixhiAI+P1lI3Hw93NhM6hRVusK9kiQxOIulPIFAH+UIhEMoo+WmTWgrHhPi+WlimRKUz+HC4cm4/apvJz5018dgNPT7NoOUyxmfj8M4sy0TuaZaU8z8SjDxe0Jjv/4H9iYshAll70PlVoj6/FjjQlilcVWhQRF1yNQAEAhWlIwGSvsAU1pqjpfPRpP8miznbaLsHf2u9iYuwQjp14u6/FjjYkD4iEIwNHyBpS0lp6s6LpY7JImhCF/n96ltgEAFMfXB7JJxt7/EZy/PI2Cu16X/fixxk8ncz+gf2w81npV1S62u9vthFr0udLJPjnAxSl3RTHSGS96kn3r2zh+zRoMWPyhrMcOFxKPIoxOFI80Bnl/dLbE4MgjfdZ4WY/Xm7lwKB+Q7y+tQ0W9q8uRR4FSrjIbKgJAfFZe4HUlrEjLpNKtneWO6QMxsh+/eS/97x6sPyIOLLvQuWQu3snwquUdTBhNVngY/30OcfGBpDljBFRqDaUshYBklr/5WDWqHR4cKW/AvzaLVRe7OCPtbeSzk84oXPd1Ct7G+uNrAABnjMOgu30VSq7/DqOmL5D9+LGIRqXAjKHnRJcCYaetSZEI8otHPMpUK3hgL+aDyb3qPNQzPqAp1w+U9fixykOzc9E/To+qBjc+29ksVTXMCBSX0wGlwAVHuQcXPi1/PmnP7oBZaISLqTFozHRMuvMVDB5DUUfncoEYjfBx0Wk43OekroVREAMAFB5+vQtqea93nZSa7quDroI/430pIzF88lxMWvhryjA4B5tBg/FZXDR8f8upliuEkbbmFiPKnQr5I48kH9MBtZsAAKd0uRAUCjJHb4PpQ5Iwb1QaPD6GX32wMziqGAjDu7apAIpB5vu7R8Pv78Yz3IKgFImwxicha+hYWYuvRAK6C0UYvZgLL3c487n5t5mjqCPRVeKNGgxN5YOywqNVXY488okCgtxpDACQNqAp771ClUYdii6gVirwtxvHB9IWvzlQzhd0oXMpiG3PNPIKOM1N9lQCf1im5pBpaqgsLMhEP5segoCAL0rRKd52XZ2RllIWPTKnLAJAnYqfc75TTGdIHoXE1AxkDKLfQHvMEq/xL/eUgjEGr8+P0lrRbL6rHihiZVW/zINJsyUOftHXzH96KwCg3jwAxTNexYaMn6HfdS/JevxYRaVUYOFEnrb4r+Z+V2FGoDQ21AVeyz24YDobACCnnrf7CXU21BqtrMeMZWbnpcKsVeFkVSPO/+MabGheeS3MNFWlVxSPtPIO5k1x/F5lZbVIaeRm2eYs8rZqj+sKeBTKS2sOY/2hiuCFYYjFXif3m3JHIZvAL17rCeCRzA2J9ExvD0EQ8MQVI2HVq3GkvAGbzjXK76JBvlOsoupkatlTBX06LhDlNhYBAMpiaCKIRpwRxhgQj2yyH+uYgps9V8KK+OR+sh+vNzMph8/ubjha0dS57GSOtF+svuONQgSCVmfAHk0+HEwL5fw/yX683kqKRYffX8Yf0iequ56+pHBzAYHJXGURAGoVtsDrapjp2u8EFp0aax6cjqKls/CPRQXI62eBF+ENJv1OUTxSyS8eNeqSg96bBpDXWShMHZwEnVqBU9WN2HmqBj95dQPufm8nX9jlSAQ+sJDbA0WhVKIBfLIowb4bAOCPH4iRUy/F5EXPILkflXBui6vG9YdKIWDr8WrsL63FsYoG/Hub6HHXVYN80QclGoMLQcdnpm3gQmW1Zaisx4t1TFoVXr1+HJLNWlQ1uPHQf3bAI0UkhJmuqBL9TBUyi0eWBC4e6QV3IJWl//ACWY8Z61w6Oh0/Gp4Ct8+Ppf/dDZ+/WSpyGEbpXrFPH41sAsEQH/RelzlO9mPGOhadOjD5u3LPOVV0u+ht11T4KAretWLhI6lqsiMudu7vJB5FEGdjQ+BHYLQmdLB2+Ghv/ADbjefj1LRnZD9Wb0cq87ruYAX86Fong4mDSJ/MgwmJwQ98BffdOzEof0pUjtdbSTJrkWLRwh+GiKAU/RAEnbwz0QBQo2sSi0rVGbIfr7ehUSlg1XMD0ikDE8EgGaV3MQIlcN3LH17uMQanK2fmnSf7MXsDeo0ykNJyy4rN2HbCDr/Y7qyLg0lBTGOBzB4oANAgel0N8h0BAOhScmU/Zm8g2aLDzGF8cHHbW1txyYvr8cYP3GCVdTECxeWI3uBCaQhOXWCpVFW1I84blIjVD05HokmDU9WNWLmbDypZmGlrap/kZyrvfd5ossLNmkTJEiEZ1rhEWY8Z6wiCgGevHg2rXo2jFQ344Uiz6KMw2t0fKIogv3ikOEc8Sh8+WfZj9gbm5vE+0Zd7SuFvVTTsZNXsgM+V/BGeCmNwm6v7xU60GYlHEaS+hofN+ZkAcxTyFdOycjHmoc+Qf+FVsh+rtzNlUCLMWhVOVDnwx694qDDO7sHJQztD3ofg5OGmfjF3WW40Wh1sialROVZvZ0S6NTCYhN8Pl9MBl9MR8vZqUTxSREE8cpmbBKNaa+zMVPRE8jNs8LHwqi9JKYt+mVMWAUCwNIlHp4Q0WOOTZD9mb+FaMYWpsoFX2pPEYp+vlbLeIRBIY4mGeKQM/m3FZ1G57lB5aE4uzDr+bK91euGT2t3btXZ3i2kNjVGowKS1pgS9t+VQNEIomLSqQMriJztK8NyqQ7j0Lxv5wi7e5zV+bpKu0smfmm4XmvoRZYYhsh6vt2DSqnBJPi8k85+tzbyPAulLnS+MEK2iCACgMTUFHFTAhuR0iigNhfMGJcCkVaGs1oWiU3YAQHmdC9WN4v29kxkkks9VNAofqUzBonBSTuykp5J4FEEa67h4VC/ooVAqu/lsiM5g0qpwj1h9a83JppuN5e05KDl2IKR9KFxcPGJ6W8TPj5CXvHRLYFDBzu6B+4kcOJYPwd6NK0PaXuPjKSxSmV05EeKyA68V/ckoPxzyM2yBdvf7fTiwZTUObvu2U/sQpPQljfyRR6bMppmpUvMI2Y/Xm5iem4SLR3HxbUJ2HDISeHt5PF0TEQJpLDr5271e3TSwcDMV0geQeBQqA5NM+Ojn5+En4/vj59MHYkwWn+31dFE8kqqquqMgHiVkDQu89jMBWcMpTTVU5uTxibWv95bh2VUHUePi/Tqfr2vikVYshqPWyz9JUK9oEo+cCXSfD5UF4/oDAFbuLkVxRQM+2VGC5V8e5Au7IBoyN3+2R0M8MqdkB16f0Q4gH9MQ0aqUuEgsevRJUQm+3luGSU98g5vf5D5xnY088kRRPNKam57rLqZGvxjyrqRfZwRprOPVmhyITtoSEVlunZqD/9wxGVfOmYmPhv4Jx4T+sKIBpz/6bUjbq912AIBC37Nd8omWDE+3BiIRBOaHWWhEHOqg/fpXIW2v8/EZKrmrLALAwKnXogI2uJkKGWNnyX683ky6VQeLgYcnK6qLkfvp5RjyySUo+vpfIe+jKWVR/kHF0ImzA69VI6m6WmcQBAEvLByDdQ9diPdum4zBqTYAgLeLIoLKx/0NlVHwOXPpm7yuTin7U4n2TjIo2Ywnr8zHL+cMxZTB/H/ZVRFBEo+iMbhIyWiqotoILVVe6gTD0ywYntb0PJYii7saaWhkXDzSmeXv39n1mYHXejLLDpn8/lYUDIiHy+vHhU+vxT3vbMenu3mFTdaVdEWxKILcvnYAkJM3CaXgkSiuvIWyH683IYmGbxcexz3vbIfPz1DpEK/zToqGUvVcl0r+e21KdpMwXK5IiKnnOolHIXLiwHY4GxvaXcclRh45FCQexSrjs+Nx27RBuPyaRfBc8jIAYKz9K+z+7r8dbqv18k6lykjiUawxIt0CP2t5OxzoK0bpycMdbq8TZyW1JvnbPjE9C4YHd6Lm9q1IzRzc8QZEmwiCgMEpLQU/7aYXUV9bDY/b1eE+VFFMWVSpNdh/8YfYPOox5M+4Rvbj9TYEQUBmggEKhYDMRP6c9ouDirOni7Fj+UxsfCu0yQKNKB6pdPI/7/3GpvTkKmPsVGTpiYzK5v9Ljd8Jp7MRRd+8iyO7Noa8vS/ggxKFtAa1BiUCT13blXq57MfrTQiCgCevHIWhqWbMHJaMx68Ywz/3e+HvpHDI/H4YGe//66MgHrnim1LVhhTMlf14vQVBEPD8wjFIsTT51Uj9uubiUenJw9j4z0c67NspRF87FoXUZEGhALv5c2wZ9yTGzbtV9uP1JqYOTsSYTBs8PoZGD2/nQH9ejDzyuF04vON7sA7S2KQCKN4oFEBJTM8KvD6VNE3240USEo9CJPPD+Tj85x+3+8NzN/C0JadK/plIQn4Gj5mKreaLoBQYhqy6Gdu/fLPd9fWieKQxy2+WTkSW/nF66LXqwPvCxCuwX81TQ45v7Fg4lDqWOpNVnhM8B4PJiqT07Kgcq7czYWByi8+GefbA9Kds7Hzh6g63l1IWlXr5xSMAGDp+BiZccS+FtYdJZgJvL7/fB6/HjdrXFyDfuRmTjj6Ps6eLO9xeK1ZWjUYaS3OvK08CeaCEQ9aAwaiCBRrBi/3PXYrR392OgR/MRtWyDBz8/QScOry73e19YgWmaJjoAkDdvFewcfADGLfouagcrzeR18+KlfdNxd9unICCUcPgYFqo4cXxA9uw6aMXcPrRIdjy6V873I/L6YBG4INSQxTEo8Hz7kWxIgsbMm+DIUp9it5CikWHL+6dij/9JB+f33MB5uXzqBRJPGJ+P+xv/hSTDv8Zrjcua3dMJ1XUjIavHcB9bMfPv52e7Z1EEAQsvXg4DBpuF3NdQWYgk4CJz/eDT8/EoI9+jI1/u7fdfTWJR9GJ8jx4ySfYmLIQo254KirHixT0C+0Eea4i7N3wRZvLfQ6etuaO0o+OkJ8RP/8ntpmmQiP4kLPhYdRUlQeW7V7/CQr//VTAeNPg551KHYlHMYcgCMhOaOogWCf9FNXpUwEABXsew+Znr25zttLjdsEs8IGkOS6l1XWInst5k8/HCYFXsNuQfScKk64MLBtXtwYVpSfb3V4TqMITHfGIiAwZqTxFwOBvwKZ//S5QyQwAir9/v8PtdX5JPJL/eW9tZqSpSxvWzppERwgKJYrN3CtudGNh4PN41GKI9yDq3ru13ciUwOBCHZ3rPXf8RZh03VKoNfJX/+nNaLQ6HNHxNJGybZ9hRNHv0Y+VYfyWh1D4wo04fqCozW3ra5uK4RijIOYkpmZiwNKdmHxLbA0oewrxRg2uGNsfw9MtOH8InxwS4Afz+7Fr3UcY6t0HAMjyn8SxfZvb3E80iyIQ4TEmMw7fPDANax+cjmWXjIBaLVYsZH5sef9JjHDz4kcFp//RfhaRM3oFUABgyNhpmHTnKzEnEpN4FCJ7NdzIqnbvV4HPtn3xBja8dm+gKpOvoRIA4IlSp4KQH53eiFH3foBjikxY0YB9X7wMt8uJg9vWYtjXN6Bg7++x+f0nwfx+WBi/6RisVAEpFrli3lwcE/pjs20eho6fgcxpN6CO8dSECTUrsXvdR61uV1PFc+p9TIDZRiV1Yw2TwYD4X3yPAxd/hEk3PI4Jd/wVO6a+Cgfjg7Wj699HRemJNrfX+XlHRGuIrYd/Xyeh3yA4oIVW8OK84ucBAOXgUQWKkm0dbm8Sow0NlvgO1gyfoeNnYGP/RdirGYnBky+R/Xi9Hd3kprSQXdox2DH1VWzMuQcupsYwz15s/k87g3anHQDg01I/L9ZoSOPlz3MP/w1GwRn4vKDyY+jeuaLNNOWGGiqGE6uMHzoADqaFAgwHH5+M3DW3By0v2/ppm9sGiiJoKSAgFkiz6pGdaIRaqUBefxsAQGA+ZB9cEVhHITAU71zf5j4Ubj45wOj+3i4kHoWIY8ilAIC4Mj5TVVF6EqM23o/Jp1dg21tLAACKWl4e0mvu1z0nSciCSq1B2dDrAQCTDj0D9+PZwOcPQSnw0p85B15D5dnT0Au8BHRCWmZbuyJ6MKNy+iH7kT2YcB83S+6XMwKNt23AcQUPe2448n2r29VXnwUA1AomKFWq6JwsEVFMljjkjr8IgkIBhVKJ/IuuwY5+3FNo4u5HkfjKSBT++8lWtzWIRqoaI4lHsYSgUOKkuqkc8ikhFafO+z0AINu+ETuXz8DWZy5HfW11i23dLicMAh9ommwt0x7lYNLP/oThv14Pk4U89cJlxHk/RuGI32KbaRoSr30V+Rddg0k3/A5Fwx8EAAza9xd4Pe5Wt1W4pMEFXe+xhm34dABAHPhE3371cBQmXAYASEEl9nz7QavbOSU/UyqGE3MYjSbsjpsBAMj17odW8GC3djQ2ZHABedLR57Fxxa9b3TbgaxeF6FIisuRnNmWApKIcdUyPHXperbLmYOt9eSC6BVBiGRKPQqTfKH7zGeg5iENF3+HQFy9CJfBc2YwzXwMAdA2nAQDKOBIPehsjf3w7zoLPMJuERgzxHgwsS0YVDn3CZyqrYYFOTx2M3kJyvwEoHXoTAMBUXtTqzGSDKB7VKWimojdhGjoj6P2wvc+2aH+/zwcL450NU1x0RAQiclSlXhB4XTLidmTmTwcAJKEao5xbMK5uNfateafFdrXVPH3ZzwSYrfJHHhGRp+CqBzH2wU+QlpUb+Gzs5b9AFSxIQA32fPdxq9sp3Vx4EPQkHsUaOflT0ciaKhrZB8xDwd1vYmPSVQAA997Wo1DcDXYAQCMVw4lJBl/3JxTpJ+GYIhPbJj2HEUvWIHvmrXAy7nM5ofgvrZpna0U/Q0pJjz3GZAY/lw+bxqExZQIAIL34P9j29HwUvv90i+1UXt7m0SiAEsvEjHhUVVWF6667DhaLBTabDYsWLUJ9fX276999993Izc2FXq9HZmYm7rnnHtTU1HTp+GlZg1EiJEMt+DD444sx+fgrgWX92RmcPV0Mq7sUAKBPyunSMYiei8FkRePCj7BhwF2Bz5xMjY0pvKTm5BJupl2ppJS13kbi0CkAgJGubXD9IQu7n5iGwhdvwban5qOi5DjcdXwg6VDSYKI3MWLKxTiuyAi8t8CBLW8vxbYv3kBNdQUAoM5eEZhEsCaQ31WskXflr7FbOxqFCZdh/OX3IiGlP0qEYBHQf3JLi+0a7PyarxWMlMbSi1BrtDiUNAsAYFv/WKtV2NQeHnmk0NuieWpEBNBodTiqbfIMy5i0AABgHDkfADCi6ptWvY8CxXCUJB7FInFJaRi95EtkL92FsXNugqBQIC0rF2d+8hkAQCkwqP4+AxveWAK3qymd0ezn7a630sRQrDEyp3/AegAAPINmQ585GgCQ6T+NsfXrULDndy2KY2i8XFdQUuRRu8SMeHTddddhz549+Prrr/Hpp59i3bp1uO2229pcv6SkBCUlJXj66aexe/durFixAitXrsSiRYu6fA4nBgSXRrbDhFJwseDYxo+R5uPiUXz/QV0+BtFzycodjck3/gGFwx5GBWwoGrwYgy7/NdysafBQr6WHTG8ja9j4wEPIJDQiz1WEgooPMLZhHdyv/QjOMj5j1ai2deNZEpFGoVRCfcMH2H7eSwET7cnHX8HYwvvAnsvHsX1bUFvF7/n1TA+tLjrVl4jIYbbGI+/hb1Fw95sBEajEnB+0Tpx9V+D1iYNFaGyoQ2MNFw/rBepg9jaSp98GN1Miy38Saf+5BIeKvgtarhUHF2ojpQ/GImzqgzimyEBh/CXol8OFpBFTLsZ+1TAYBSe07ywIKowCAB47zypo1JKnYW9iwIgCFA57GACQCDsmH38FRZ/z6nvM74dV9DE1xdPEUKyh1+uxI+M6AMBZxGPU7FvQf8R5LdY7supvQe/NHu5drLOltViXaCImxKN9+/Zh5cqV+Nvf/oaCggKcf/75eOGFF/Duu++ipKSk1W3y8vLwwQcfYP78+Rg4cCAuuugi/OEPf8D//vc/eMXqWJ1l/MLfYtPIZdj9o39iY+4S1F77GY6lzAQADNz1LLSCB2eQhPRsqobSmym4+ldIXHYck376KBJTM7HD1pTe0pg4qhvPjJADlVqDYu3QVpelszJMPsrNdt16Eg57G+nZuRgz66dI+9HdqBfN0+uYHjbUw/zeFSg7wD3waihlsdeQfc0z2K8ahk22H/P3niNwOR3Y/PFLyPzXNOx7+Vo463gH06Ek8ai3MWBEAU5c8Qn2q4fDILhg+O8ibP3879j3h8nY9NHz0Psl8YgiTWORvPMvQfbS3Si45x+BzxRKJZJu/Q9OCWlIRQUOrbgjeKMa7mfqNqZH81SJKDBw2nWogC3wXjjGxeK62mqoBV510ZaQ2h2nRoTJxJuewuHLP4f27o3QGUxISOmPzVYeWVoF3mdLPPllYH3m9yPRzyeGrClZ0T/hGCImxKMNGzbAZrNh/Pjxgc9mzpwJhUKBwsLCdrYMpqamBhaLBap2TG1dLhdqa2uD/iRUag0mLvgF8qbMx6SFv0bmkNHQDZkOAEgAD288njITgiIm/q1EhEiZ93+ogRFupkLm9Ju7+3QIGbBc9QKOKrKxMece7L/4Qxy98itsK/hz0DosYXD3nBwhO5lDRsN55yYcvvwzeBZvxVFFNhJQg/FbHgIANFDKYq8hMT0LQ3+zERPueRvVsEAj+LDrm7cxoYibqo6tXwfnKR6N5FSRaNgbGZR/PtLu/AQlQjL6sTKM23Q/hnn2InfHciT5uMedgVJZehUJKf3hmP8qfEzA+NpV2LH634FlmnoeeQRbRhtbE7FKYmoGlIs3YsOAxQCAjJptcLucqK04AwBwMC10BjLMjkWUKhUG5U8JshTIu+117J/7Pvx3/AAfEzDYewglxfsBcC9DqfBRYr8Bre6T4MSEylFaWork5OAHtUqlQnx8PEpLS0PaR0VFBX73u9+1m+oGAE888QSsVmvgLyOj/YdFzrgfBV77mIBBl7fu2k/0XjKHjIbnjo04c+03gTBooneRMTgfOUt3YNINv8PQ8TOQk1eA4dOuCjLfNKS1Hp1E9A4SUzMxKP98xCf3g+qaN+FlTY/PRjWlsPQ2BIUCR83jAADjNz8YtKz/8Q8BAE49pTP0VqzxSaif/7cg3wwrGgJV9hLJnqDXMWTsNGxJ5JWVR357G/Y+fj62Pn0pxjasAwBoEygaoTcSl5SG/AVL4GFKpKICmidSkPSP6QAAu4ImhnoTeqMZQwtmITE1A/u1PFPE8c9rcfroHlSUcP8jKnzUMd0qHv3qV7+CIAjt/u3fvz/s49TW1mLevHkYPnw4li1b1u66Dz/8MGpqagJ/J0+ebHd9iy0BhQmXws2U2DzwbiSmUqW1vkhiaiayckd392kQUURnMGFXfJN43G/4pG48GyKaZA4Zjd3GgsD7RjMNKnojukm3Bl6fFNKxzcirs/VnfFbaFzewW86LiA5Dxk5DzS3foej8V7DFMjPweQ2MsNgS2tmSiFVG3/oyNltnQSEwDHfvwrj6tYFlqbkTuu/ECFkxmKw4oh4SeK8VPACAeqWtm86IkBvtvMdRx/QY5DsC41uzULHmJQBAuYrSFDui7fytKPDAAw/gpptuanednJwcpKam4uzZs0Gfe71eVFVVITW1/Uauq6vDnDlzYDab8dFHH0GtVre7vlarhVarbXedcym4+y0wvx+TKF2NIPoUWQt+h23v1EIx9qcYndK/u0+HiCKeIfOBog0AAHVWQQdrE7HIiCnzcMTyJQRBQNbQcShf9U9gY5OBsjZlSDtbE72BtKxcpGXlYnPtWWDnKgBAuTIVFI/QO9HqDJjwi/dx8tAOnNm9DjixEROrP0WRYTJGZ+V29+kRMlI/6iZg25Kgz+wWavPeyqD881GW8D3OrLgGQ7wHUVD1CQCgKpFE4o7oVvEoKSkJSUkdlzafPHky7HY7tm7dinHjeBj56tWr4ff7UVDQdqe9trYWs2fPhlarxSeffAKdThexcz8X8jkiiL5HSv+BSHno0+4+DaIbGP3jn+Hg7hVI8Z7GgAlzu/t0CJkYOLIpojBj1HSgWfX2pIGjo34+RPeQM2UBsHMpAMBuyO7ekyFkJ2NwPjIG5wO4G8V7N2NwBnka9nbGXXwbtgBQaI0Yu+Eu/mEGTQz1ZlL6D4T1/jXY+vL1GFe3GgBgHnVxN59Vz0dgjLHuPolQmDt3LsrKyvDKK6/A4/Hg5ptvxvjx4/Gvf/0LAHD69GnMmDEDb731FiZOnIja2lrMmjULDocDH330EYzGpvzFpKQkKJXKtg4VRG1tLaxWa8BsmyAIgiAAwOV0wO/zQW+kqlt9hepl/RGHOhxX9EfW0j3dfTpEFNnw6t2ILy9E/C3/RlJ6dnefDkEQMrH187/Dd3AVRt32Ghlm9wGY34+iVf+CIT4dueMv6u7T6TZC1TxiRjyqqqrCXXfdhf/9739QKBRYsGABnn/+eZhM/KI+duwYBgwYgDVr1mD69OlYu3YtLrzwwlb3VVxcjOzs7JCOS+IRQRAEQRAAcHR3Iaq+ehL6glswYsq87j4dgiAIgiCIsOl14lF3QeIRQRAEQRAEQRAEQRC9kVA1DzLqIQiCIAiCIAiCIAiCINqExCOCIAiCIAiCIAiCIAiiTUg8IgiCIAiCIAiCIAiCINqExCOCIAiCIAiCIAiCIAiiTVTdfQI9HclPvLa2tpvPhCAIgiAIgiAIgiAIInJIWkdHtdRIPOqAyspKAEBGRkY3nwlBEARBEARBEARBEETkqaurg9VqbXM5iUcdEB8fDwA4ceJEu/9IIvapra1FRkYGTp482W6JQiL2obbuO1Bb9x2orfsO1NZ9B2rrvgO1dd+B2rrnwRhDXV0d0tPT212PxKMOUCi4LZTVaqUfdx/BYrFQW/cRqK37DtTWfQdq674DtXXfgdq670Bt3Xegtu5ZhBIoQ4bZBEEQBEEQBEEQBEEQRJuQeEQQBEEQBEEQBEEQBEG0CYlHHaDVavHII49Aq9V296kQMkNt3Xegtu47UFv3Hait+w7U1n0Hauu+A7V134HaOnYRWEf12AiCIAiCIAiCIAiCIIg+C0UeEQRBEARBEARBEARBEG1C4hFBEARBEARBEARBEATRJiQeEQRBEARBEARBEARBEG1C4hFBEARBEARBEARBEATRJiQetcNLL72E7Oxs6HQ6FBQUYNOmTd19SkQneeKJJzBhwgSYzWYkJyfjsssuw4EDB4LWmT59OgRBCPq74447gtY5ceIE5s2bB4PBgOTkZDz00EPwer3R/CpEByxbtqxFOw4dOjSw3Ol0YvHixUhISIDJZMKCBQtQVlYWtA9q59ggOzu7RVsLgoDFixcDoGs6llm3bh3mz5+P9PR0CIKAjz/+OGg5YwxLly5FWloa9Ho9Zs6ciUOHDgWtU1VVheuuuw4WiwU2mw2LFi1CfX190Do7d+7EBRdcAJ1Oh4yMDDz55JNyfzXiHNpra4/HgyVLlmDkyJEwGo1IT0/HDTfcgJKSkqB9tHYvWL58edA61NbdT0fX9U033dSiHefMmRO0Dl3XsUFHbd3as1sQBDz11FOBdei6jg1CGWNFqu+9du1ajB07FlqtFoMGDcKKFSvk/npEG5B41Abvvfce7r//fjzyyCPYtm0b8vPzMXv2bJw9e7a7T43oBN9++y0WL16MjRs34uuvv4bH48GsWbPQ0NAQtN6tt96KM2fOBP6aP4R8Ph/mzZsHt9uNH374AW+++SZWrFiBpUuXRvvrEB0wYsSIoHZcv359YNkvfvEL/O9//8P777+Pb7/9FiUlJbjiiisCy6mdY4fNmzcHtfPXX38NALjqqqsC69A1HZs0NDQgPz8fL730UqvLn3zySTz//PN45ZVXUFhYCKPRiNmzZ8PpdAbWue6667Bnzx58/fXX+PTTT7Fu3TrcdtttgeW1tbWYNWsWsrKysHXrVjz11FNYtmwZ/vrXv8r+/Ygm2mtrh8OBbdu24be//S22bduGDz/8EAcOHMAll1zSYt3HHnss6Fq/++67A8uorXsGHV3XADBnzpygdnznnXeCltN1HRt01NbN2/jMmTN4/fXXIQgCFixYELQeXdc9n1DGWJHoexcXF2PevHm48MILUVRUhPvuuw8/+9nP8OWXX0b1+xIijGiViRMnssWLFwfe+3w+lp6ezp544oluPCsiXM6ePcsAsG+//Tbw2bRp09i9997b5jaff/45UygUrLS0NPDZyy+/zCwWC3O5XHKeLtEJHnnkEZafn9/qMrvdztRqNXv//fcDn+3bt48BYBs2bGCMUTvHMvfeey8bOHAg8/v9jDG6pnsLANhHH30UeO/3+1lqaip76qmnAp/Z7Xam1WrZO++8wxhjbO/evQwA27x5c2CdL774ggmCwE6fPs0YY+wvf/kLi4uLC2rrJUuWsNzcXJm/EdEW57Z1a2zatIkBYMePHw98lpWVxZ599tk2t6G27nm01tY33ngju/TSS9vchq7r2CSU6/rSSy9lF110UdBndF3HJueOsSLV9/7lL3/JRowYEXSsq6++ms2ePVvur0S0AkUetYLb7cbWrVsxc+bMwGcKhQIzZ87Ehg0buvHMiHCpqakBAMTHxwd9/vbbbyMxMRF5eXl4+OGH4XA4Ass2bNiAkSNHIiUlJfDZ7NmzUVtbiz179kTnxImQOHToENLT05GTk4PrrrsOJ06cAABs3boVHo8n6JoeOnQoMjMzA9c0tXNs4na78c9//hO33HILBEEIfE7XdO+juLgYpaWlQdex1WpFQUFB0HVss9kwfvz4wDozZ86EQqFAYWFhYJ2pU6dCo9EE1pk9ezYOHDiA6urqKH0borPU1NRAEATYbLagz5cvX46EhASMGTMGTz31VFC6A7V17LB27VokJycjNzcXd955JyorKwPL6LrunZSVleGzzz7DokWLWiyj6zr2OHeMFam+94YNG4L2Ia1DY/LuQdXdJ9ATqaiogM/nC/ohA0BKSgr279/fTWdFhIvf78d9992HKVOmIC8vL/D5tddei6ysLKSnp2Pnzp1YsmQJDhw4gA8//BAAUFpa2upvQVpG9AwKCgqwYsUK5Obm4syZM3j00UdxwQUXYPfu3SgtLYVGo2kx6EhJSQm0IbVzbPLxxx/DbrfjpptuCnxG13TvRGqb1tqu+XWcnJwctFylUiE+Pj5onQEDBrTYh7QsLi5OlvMnuo7T6cSSJUuwcOFCWCyWwOf33HMPxo4di/j4ePzwww94+OGHcebMGfzpT38CQG0dK8yZMwdXXHEFBgwYgCNHjuDXv/415s6diw0bNkCpVNJ13Ut58803YTabg9KYALquY5HWxliR6nu3tU5tbS0aGxuh1+vl+EpEG5B4RPQZFi9ejN27dwf54AAIypkfOXIk0tLSMGPGDBw5cgQDBw6M9mkSXWTu3LmB16NGjUJBQQGysrLw73//mx4svZi///3vmDt3LtLT0wOf0TVNEL0Hj8eDn/zkJ2CM4eWXXw5adv/99wdejxo1ChqNBrfffjueeOIJaLXaaJ8q0UWuueaawOuRI0di1KhRGDhwINauXYsZM2Z045kRcvL666/juuuug06nC/qcruvYo60xFtH7oLS1VkhMTIRSqWzhBl9WVobU1NRuOisiHO666y58+umnWLNmDfr379/uugUFBQCAw4cPAwBSU1Nb/S1Iy4ieic1mw5AhQ3D48GGkpqbC7XbDbrcHrdP8mqZ2jj2OHz+OVatW4Wc/+1m769E13TuQ2qa9Z3NqamqLwhZerxdVVVV0rccgknB0/PhxfP3110FRR61RUFAAr9eLY8eOAaC2jlVycnKQmJgYdM+m67p38d133+HAgQMdPr8Buq57Om2NsSLV925rHYvFQpPD3QCJR62g0Wgwbtw4fPPNN4HP/H4/vvnmG0yePLkbz4zoLIwx3HXXXfjoo4+wevXqFmGurVFUVAQASEtLAwBMnjwZu3btCuq4SJ3Y4cOHy3LeRPjU19fjyJEjSEtLw7hx46BWq4Ou6QMHDuDEiROBa5raOfZ44403kJycjHnz5rW7Hl3TvYMBAwYgNTU16Dqura1FYWFh0HVst9uxdevWwDqrV6+G3+8PiIiTJ0/GunXr4PF4Aut8/fXXyM3NpXSHHoQkHB06dAirVq1CQkJCh9sUFRVBoVAEUpyorWOTU6dOobKyMuieTdd17+Lvf/87xo0bh/z8/A7Xpeu6Z9LRGCtSfe/JkycH7UNah8bk3UQ3G3b3WN59912m1WrZihUr2N69e9ltt93GbDZbkBs80fO58847mdVqZWvXrmVnzpwJ/DkcDsYYY4cPH2aPPfYY27JlCysuLmb//e9/WU5ODps6dWpgH16vl+Xl5bFZs2axoqIitnLlSpaUlMQefvjh7vpaRCs88MADbO3atay4uJh9//33bObMmSwxMZGdPXuWMcbYHXfcwTIzM9nq1avZlqY70rEAAAt0SURBVC1b2OTJk9nkyZMD21M7xxY+n49lZmayJUuWBH1O13RsU1dXx7Zv3862b9/OALA//elPbPv27YEKW8uXL2c2m43997//ZTt37mSXXnopGzBgAGtsbAzsY86cOWzMmDGssLCQrV+/ng0ePJgtXLgwsNxut7OUlBR2/fXXs927d7N3332XGQwG9uqrr0b9+/Zl2mtrt9vNLrnkEta/f39WVFQU9PyWKvD88MMP7Nlnn2VFRUXsyJEj7J///CdLSkpiN9xwQ+AY1NY9g/bauq6ujj344INsw4YNrLi4mK1atYqNHTuWDR48mDmdzsA+6LqODTq6hzPGWE1NDTMYDOzll19usT1d17FDR2MsxiLT9z569CgzGAzsoYceYvv27WMvvfQSUyqVbOXKlVH9vgSHxKN2eOGFF1hmZibTaDRs4sSJbOPGjd19SkQnAdDq3xtvvMEYY+zEiRNs6tSpLD4+nmm1WjZo0CD20EMPsZqamqD9HDt2jM2dO5fp9XqWmJjIHnjgAebxeLrhGxFtcfXVV7O0tDSm0WhYv3792NVXX80OHz4cWN7Y2Mh+/vOfs7i4OGYwGNjll1/Ozpw5E7QPaufY4csvv2QA2IEDB4I+p2s6tlmzZk2r9+wbb7yRMcaY3+9nv/3tb1lKSgrTarVsxowZLX4DlZWVbOHChcxkMjGLxcJuvvlmVldXF7TOjh072Pnnn8+0Wi3r168fW758ebS+IiHSXlsXFxe3+fxes2YNY4yxrVu3soKCAma1WplOp2PDhg1jjz/+eJDgwBi1dU+gvbZ2OBxs1qxZLCkpianVapaVlcVuvfXWFpO1dF3HBh3dwxlj7NVXX2V6vZ7Z7fYW29N1HTt0NMZiLHJ97zVr1rDRo0czjUbDcnJygo5BRBeBMcZkCmoiCIIgCIIgCIIgCIIgYhzyPCIIgiAIgiAIgiAIgiDahMQjgiAIgiAIgiAIgiAIok1IPCIIgiAIgiAIgiAIgiDahMQjgiAIgiAIgiAIgiAIok1IPCIIgiAIgiAIgiAIgiDahMQjgiAIgiAIgiAIgiAIok1IPCIIgiAIgiAIgiAIgiDahMQjgiAIgiAIgiAIgiAIok1IPCIIgiAIoldz00034bLLLov6cVesWAFBECAIAu67776oH7+zTJ8+PWLneezYscB3Hz16dET2SRAEQRBE96Hq7hMgCIIgCILoKoIgtLv8kUcewXPPPQfGWJTOKBiLxYIDBw7AaDR2y/G7i4yMDJw5cwZPP/00Vq1a1d2nQxAEQRBEmJB4RBAEQRBEzHLmzJnA6/feew9Lly7FgQMHAp+ZTCaYTKbuODUAXNxKTU3ttuN3F0qlEqmpqd36vycIgiAIInJQ2hpBEARBEDFLampq4M9qtQbEGunPZDK1SFubPn067r77btx3332Ii4tDSkoKXnvtNTQ0NODmm2+G2WzGoEGD8MUXXwQda/fu3Zg7dy5MJhNSUlJw/fXXo6KiotPn/Je//AWDBw+GTqdDSkoKrrzyysCylStX4vzzz4fNZkNCQgIuvvhiHDlyJLBcSgf797//jQsuuAB6vR4TJkzAwYMHsXnzZowfPx4mkwlz585FeXl5YDvpf/Doo48iKSkJFosFd9xxB9xud5vn6XK58OCDD6Jfv34wGo0oKCjA2rVrA8uPHz+O+fPnIy4uDkajESNGjMDnn3/e6f8HQRAEQRA9HxKPCIIgCILoc7z55ptITEzEpk2bcPfdd+POO+/EVVddhfPOOw/btm3DrFmzcP3118PhcAAA7HY7LrroIowZMwZbtmzBypUrUVZWhp/85CedOu6WLVtwzz334LHHHsOBAwewcuVKTJ06NbC8oaEB999/P7Zs2YJvvvkGCoUCl19+Ofx+f9B+HnnkEfzmN7/Btm3boFKpcO211+KXv/wlnnvuOXz33Xc4fPgwli5dGrTNN998g3379mHt2rV455138OGHH+LRRx9t81zvuusubNiwAe+++y527tyJq666CnPmzMGhQ4cAAIsXL4bL5cK6deuwa9cu/PGPf6RII4IgCILopVDaGkEQBEEQfY78/Hz85je/AQA8/PDDWL58ORITE3HrrbcCAJYuXYqXX34ZO3fuxKRJk/Diiy9izJgxePzxxwP7eP3115GRkYGDBw9iyJAhIR33xIkTMBqNuPjii2E2m5GVlYUxY8YEli9YsCBo/ddffx1JSUnYu3cv8vLyAp8/+OCDmD17NgDg3nvvxcKFC/HNN99gypQpAIBFixZhxYoVQfvSaDR4/fXXYTAYMGLECDz22GN46KGH8Lvf/Q4KRfB84okTJ/DGG2/gxIkTSE9PDxxz5cqVeOONN/D444/jxIkTWLBgAUaOHAkAyMnJCel/QBAEQRBE7EGRRwRBEARB9DlGjRoVeK1UKpGQkBAQQQAgJSUFAHD27FkAwI4dO7BmzZqAh5LJZMLQoUMBICitrCN+9KMfISsrCzk5Obj++uvx9ttvB6KbAODQoUNYuHAhcnJyYLFYkJ2dDYCLOW2dv3Su556/dO4S+fn5MBgMgfeTJ09GfX09Tp482eI8d+3aBZ/PhyFDhgR952+//Tbwfe+55x78/ve/x5QpU/DII49g586dIf8fCIIgCIKILSjyiCAIgiCIPodarQ56LwhC0GdSFTcpXay+vh7z58/HH//4xxb7SktLC/m4ZrMZ27Ztw9q1a/HVV19h6dKlWLZsGTZv3gybzYb58+cjKysLr732GtLT0+H3+5GXl9fCm6i1cz33s3NT3TpDfX09lEoltm7dCqVSGbRMSk372c9+htmzZ+Ozzz7DV199hSeeeALPPPMM7r777i4flyAIgiCIngmJRwRBEARBEB0wduxYfPDBB8jOzoZKFV73SaVSYebMmZg5cyYeeeQR2Gw2rF69GtOmTcOBAwfw2muv4YILLgAArF+/PhKnD4BHTzU2NkKv1wMANm7cCJPJhIyMjBbrjhkzBj6fD2fPng2cS2tkZGTgjjvuwB133IGHH34Yr732GolHBEEQBNELobQ1giAIgiCIDli8eDGqqqqwcOFCbN68GUeOHMGXX36Jm2++GT6fL+T9fPrpp3j++edRVFSE48eP46233oLf70dubi7i4uKQkJCAv/71rzh8+DBWr16N+++/P2Lfwe12Y9GiRdi7dy8+//xzPPLII7jrrrta+B0BwJAhQ3DdddfhhhtuwIcffoji4mJs2rQJTzzxBD777DMAwH333Ycvv/wSxcXF2LZtG9asWYNhw4ZF7HwJgiAIgug5UOQRQRAEQRBEB6Snp+P777/HkiVLMGvWLLhcLmRlZWHOnDmtii9tYbPZ8OGHH2LZsmVwOp0YPHgw3nnnHYwYMQIA8O677+Kee+5BXl4ecnNz8fzzz2P69OkR+Q4zZszA4MGDMXXqVLhcLixcuBDLli1rc/033ngDv//97/HAAw/g9OnTSExMxKRJk3DxxRcDAHw+HxYvXoxTp07BYrFgzpw5ePbZZyNyrgRBEARB9CwExhjr7pMgCIIgCILobaxYsQL33Xcf7HZ7d58KbrrpJtjtdnz88cdRPe6yZcvw8ccfo6ioKKrHJQiCIAgislDaGkEQBEEQhEzU1NTAZDJhyZIl3X0qUeXEiRMwmUx4/PHHu/tUCIIgCIKIAJS2RhAEQRAEIQMLFizA+eefD4Cnq/Ul0tPTA9FGWq22e0+GIAiCIIiwobQ1giAIgiAIgiAIgiAIok0obY0gCIIgCIIgCIIgCIJoExKPCIIgCIIgCIIgCIIgiDYh8YggCIIgCIIgCIIgCIJoExKPCIIgCIIgCIIgCIIgiDYh8YggCIIgCIIgCIIgCIJoExKPCIIgCIIgCIIgCIIgiDYh8YggCIIgCIIgCIIgCIJoExKPCIIgCIIgCIIgCIIgiDb5f9wSukmf8yRLAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "frame_i = 200\n", + "plt.plot(frames[0, frame_i])\n", + "\n", + "# Prepare for masking\n", + "masked_frames = torch.clone(frames).detach()\n", + "mask_indices = torch.arange(frames.size(-1)).view(1, 1, -1)\n", + "mask_indices = mask_indices.expand(frames.shape)\n", + "periods = best_lags.unsqueeze(-1)\n", + "period_indices = mask_indices.remainder(periods)\n", + "\n", + "# Mask everything not within about 20% (1/5) of a period peak\n", + "jitter_range = periods // 5\n", + "peak, lag = torch.max(masked_frames, dim=-1, keepdim=True)\n", + "mask = (\n", + " (period_indices < lag.remainder(periods) - jitter_range)\n", + " | (period_indices > lag.remainder(periods) + jitter_range)\n", + ")\n", + "masked_frames[mask] = 0\n", + "\n", + "\n", + "plt.plot(masked_frames[0, frame_i])\n", + "plt.xlabel(\"Time [samples]\")\n", + "plt.ylabel(\"Amplitude\")\n", + "plt.title(f\"Masked periods in frame {frame_i}\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "2d410cba-4042-4585-97ec-6752a675685a", + "metadata": {}, + "source": [ + "Next, we'll identify the four period peaks that we use for the computation of jitter and shimmer. We maintain a consistent number of period peaks regardless of the fundamental frequency, as this can affect the number of peaks that appear in the window. PRAAT uses 3 or 5 or 7 neighboring peaks to compute variations of jitter and shimmer. The number can be configured but it is important that even at the lowest frequency there is guaranteed to be 4 periods in the window." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "271d5d10-e72b-463a-b8b6-0d901eea0052", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Peak indices: tensor([ 437, 181, 1204, 693])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABI8AAADvCAYAAAB7TvRLAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAA5gFJREFUeJzsnXd4HNXZxc9sV+9dsmTLvXdjbGMwphlwIBRDQjE9lEBCwkdIoSQhdEILvRM6wVQDptgY4957U+9d2lXbOt8fM/fuarUr7bRdlft7Hj2Y1Wp3pLszc++55z0vx/M8DwaDwWAwGAwGg8FgMBgMBiMAukgfAIPBYDAYDAaDwWAwGAwGY+DCxCMGg8FgMBgMBoPBYDAYDEZQmHjEYDAYDAaDwWAwGAwGg8EIChOPGAwGg8FgMBgMBoPBYDAYQWHiEYPBYDAYDAaDwWAwGAwGIyhMPGIwGAwGg8FgMBgMBoPBYASFiUcMBoPBYDAYDAaDwWAwGIygMPGIwWAwGAwGg8FgMBgMBoMRFCYeMRgMBoPBYDAYDAaDwWAwgsLEIwaDwWAwGJSTTz4ZJ598smqvV1paCo7j8Prrr6v2mmpz7733guM4NDY2avYeb731FsaPHw+j0YjExETN3kcpao9/KKxcuRIFBQVhfc9IcPLJJ2Py5MmRPgwGg8FgMGTBxCMGg8FgMAY4r7/+OjiOo18WiwVjx47FLbfcgrq6ukgfHqMfDh8+jJUrV6KwsBAvvfQSXnzxxUgfEkPk2WefHdDCJoPBYDAYAwVDpA+AwWAwGAxGaPz973/HyJEj0d3djQ0bNuC5557D6tWrsX//fkRHR6vyHmvWrFHldRhe1q1bB4/HgyeffBKjR4+O9OEMOF566SV4PJ6IvPezzz6L1NRUrFy5MiLvz2AwGAzGYIGJRwwGg8FgDBLOOusszJ49GwBw7bXXIiUlBY8//jg+/fRTXHrppYpeu7OzE9HR0TCZTGocKsOH+vp6ABjQ5WqRxGg0RvoQGAwGg8Fg9AMrW2MwGAwGY5CyZMkSAEBJSQl97L///S9mzZqFqKgoJCcn45JLLkFFRUWPnyPZKzt27MBJJ52E6Oho/PnPf6bf88+8qa+vxzXXXIOMjAxYLBZMmzYNb7zxRq/jaW1txcqVK5GQkIDExERceeWVaG1tDel3IaV569evxw033ICUlBTEx8fjiiuuQEtLS6/nf/XVV1i0aBFiYmIQFxeHs88+GwcOHOjxnL1792LlypUYNWoULBYLMjMzcfXVV6Opqanf4ykrK8Po0aMxefJkWhp47NgxXHDBBcjMzITFYkFubi4uueQStLW1BX2dgoIC3HPPPQCAtLQ0cByHe++9l37/2WefxaRJk2A2m5GdnY2bb76519+soKAgoDPGf6zWrVsHjuPwwQcf4P7770dubi4sFgtOPfVUHD9+vNfPv/jiiygsLERUVBTmzp2Ln376qd+/C9B3jpX/72ez2fC73/0OBQUFMJvNSE9Px2mnnYadO3fS5/hnHpHXf/TRR+kxms1mzJkzB9u2bev1nh9++CEmTpwIi8WCyZMnY9WqVSHlKBUUFODAgQP48ccfaUmo79+zuLgYF110EZKTkxEdHY0TTjgBX375ZY/XIH/z999/H3/+85+RmZmJmJgYLF++vNd5F4g1a9YgOjoal156KVwuV7/PZzAYDAYjUjDnEYPBYDAYg5SioiIAQEpKCgDg/vvvx9/+9jdcfPHFuPbaa9HQ0ICnn34aJ510Enbt2tXD+dLU1ISzzjoLl1xyCS677DJkZGQEfI+uri6cfPLJOH78OG655RaMHDkSH374IVauXInW1lbcdtttAACe5/GLX/wCGzZswG9+8xtMmDABq1atwpVXXinpd7rllluQmJiIe++9F0eOHMFzzz2HsrIyukgHhPDpK6+8EmeccQYeeughdHZ24rnnnsPChQuxa9cuKhp8++23KC4uxlVXXYXMzEwcOHAAL774Ig4cOIDNmzfT1wv0d12yZAmSk5Px7bffIjU1FQ6HA2eccQbsdjt++9vfIjMzE1VVVfjiiy/Q2tqKhISEgK/1xBNP4M0338SqVavw3HPPITY2FlOnTgUgBHXfd999WLp0KW688Ub6+27btg0///yzbEfOgw8+CJ1Ohz/+8Y9oa2vDww8/jF//+tfYsmULfc4rr7yCG264ASeeeCJ+97vfobi4GMuXL0dycjLy8vJkvW8gfvOb3+Cjjz7CLbfcgokTJ6KpqQkbNmzAoUOHMHPmzD5/9p133oHNZsMNN9wAjuPw8MMP45e//CWKi4vp3+bLL7/EihUrMGXKFDzwwANoaWnBNddcg5ycnH6P7YknnsBvf/tbxMbG4i9/+QsA0POgrq4OJ554Ijo7O3HrrbciJSUFb7zxBpYvX46PPvoI559/fo/Xuv/++8FxHO68807U19fjiSeewNKlS7F7925ERUUFfP8vvvgCF154IVasWIFXX30Ver2+32NmMBgMBiNi8AwGg8FgMAY0r732Gg+A/+677/iGhga+oqKCf++99/iUlBQ+KiqKr6ys5EtLS3m9Xs/ff//9PX523759vMFg6PH44sWLeQD8888/3+u9Fi9ezC9evJj+/xNPPMED4P/73//SxxwOBz9//nw+NjaWt1qtPM/z/CeffMID4B9++GH6PJfLxS9atIgHwL/22msh/Y6zZs3iHQ4Hffzhhx/mAfCffvopz/M8b7PZ+MTERP66667r8fO1tbV8QkJCj8c7Ozt7vc+7777LA+DXr19PH7vnnnt4AHxDQwN/6NAhPjs7m58zZw7f3NxMn7Nr1y4eAP/hhx/2+XsEwvf1CfX19bzJZOJPP/103u1208efeeYZHgD/6quv0sfy8/P5K6+8stfr+o/V2rVreQD8hAkTeLvdTh9/8skneQD8vn37eJ4Xxi89PZ2fPn16j+e9+OKLPIAerxmIkpKSoGMKgL/nnnvo/yckJPA333xzn6935ZVX8vn5+b1ePyUlpccYfPrppzwA/vPPP6ePTZkyhc/NzeVtNht9bN26dTyAHq8ZjEmTJgX8fX/3u9/xAPiffvqJPmaz2fiRI0fyBQUFdMzI3zwnJ4eeCzzP8x988AEPgH/yySfpY4sXL+YnTZrE8zzP/+9//+ONRiN/3XXX9Rh/BoPBYDAGKqxsjcFgMBiMQcLSpUuRlpaGvLw8XHLJJYiNjcWqVauQk5ODjz/+GB6PBxdffDEaGxvpV2ZmJsaMGYO1a9f2eC2z2Yyrrrqq3/dcvXo1MjMze2QqGY1G3HrrrWhvb8ePP/5In2cwGHDjjTfS5+n1evz2t7+V9Dtef/31PRw3N954IwwGA1avXg1AcBO1trbi0ksv7fF76vV6zJs3r8fv6ev46O7uRmNjI0444QQA6FE2Rdi/fz8WL16MgoICfPfdd0hKSqLfI86ib775Bp2dnZJ+p0B89913cDgc+N3vfgedzjsdu+666xAfH9+rPEoKV111VY/sqkWLFgEQyrAAYPv27aivr8dvfvObHs8jJYdqkpiYiC1btqC6ulryz65YsaLHGPj/HtXV1di3bx+uuOIKxMbG0uctXrwYU6ZMUXTcq1evxty5c7Fw4UL6WGxsLK6//nqUlpbi4MGDPZ5/xRVXIC4ujv7/hRdeiKysLPq59eXdd9/FihUrcMMNN+CFF17oMf4MBoPBYAxUWNkag8FgMBiDhP/85z8YO3YsDAYDMjIyMG7cOLrwPHbsGHiex5gxYwL+rH8JVE5OTkjh2GVlZRgzZkyvBe6ECRPo98l/s7KyeiziAWDcuHGh/XIi/scfGxuLrKwslJaWAhB+T8Cb9+RPfHw8/XdzczPuu+8+vPfeezS0mhAop+jcc89FRkYGvvnmm16/x8iRI3H77bfj8ccfx9tvv41FixZh+fLluOyyy2QJLuTv5v/3MZlMGDVqFP2+HEaMGNHj/4kAQ7KjyGv7/62NRiNGjRol+30D8fDDD+PKK69EXl4eZs2ahWXLluGKK64I6X1C/T0CdbAbPXp0QIEwVMrKyjBv3rxej/t+7idPnkwf9/9bchyH0aNH088toaSkBJdddhkuuugiPP3007KPj8FgMBiMcMPEIwaDwWAwBglz586l3db88Xg84DgOX331VcDsFH8xJFgOy0CHtHR/6623kJmZ2ev7BoN3anPxxRdj48aNuOOOOzB9+nTExsbC4/HgzDPPDNga/oILLsAbb7yBt99+GzfccEOv7z/22GNYuXIlPv30U6xZswa33norHnjgAWzevBm5ubkq/pY9CZbN5Ha7A451sOwcnuc1Px5/Lr74YixatAirVq3CmjVr8Mgjj+Chhx7Cxx9/jLPOOqvP99H694gEWVlZ1JG0ffv2oOczg8FgMBgDDSYeMRgMBoMxBCgsLATP8xg5ciTGjh2r2uvm5+dj79698Hg8PdxHhw8fpt8n//3+++/R3t7eQ6g6cuSIpPc7duwYTjnlFPr/7e3tqKmpwbJlywAIvycApKenY+nSpUFfp6WlBd9//z3uu+8+3H333T1ePxiPPPIIDAYDbrrpJsTFxeFXv/pVr+dMmTIFU6ZMwV//+lds3LgRCxYswPPPP49//vOfkn5P8nc7cuRIDxeOw+FASUlJj98tKSkpYNe6srIyWU4h8t7Hjh3r4eByOp0oKSnBtGnT+vx54gDyP6ZgbqmsrCzcdNNNuOmmm1BfX4+ZM2fi/vvv71c86g/yewTqJBfosUAEE8Ly8/MDfnb9P/cE/88Vz/M4fvw4DUcnWCwWfPHFF1iyZAnOPPNM/Pjjj5g0aVJIx8pgMBgMRiRhRdYMBoPBYAwBfvnLX0Kv1+O+++7r5czgeT6k9vSBWLZsGWpra/H+++/Tx1wuF55++mnExsZi8eLF9HkulwvPPfccfZ7b7ZZcmvPiiy/C6XTS/3/uuefgcrmo0HDGGWcgPj4e//rXv3o8j9DQ0ADA61rx/1s88cQTQd+b4zi8+OKLuPDCC3HllVfis88+o9+zWq29WqlPmTIFOp0Odrtd0u8ICPlVJpMJTz31VI9jfOWVV9DW1oazzz6bPlZYWIjNmzfD4XDQx7744ouQWsEHYvbs2UhLS8Pzzz/f4zVff/31gCKVP/Hx8UhNTcX69et7PP7ss8/2+H+3292rPDA9PR3Z2dmy/mb+ZGdnY/LkyXjzzTfR3t5OH//xxx+xb9++kF4jJiYm4O+8bNkybN26FZs2baKPdXR04MUXX0RBQQEmTpzY4/lvvvkmbDYb/f+PPvoINTU1AQWyhIQEfPPNN0hPT8dpp51GuyYyGAwGgzGQYc4jBoPBYDCGAIWFhfjnP/+Ju+66C6WlpTjvvPMQFxeHkpISrFq1Ctdffz3++Mc/Sn7d66+/Hi+88AJWrlyJHTt2oKCgAB999BF+/vlnPPHEEzQk+Nxzz8WCBQvwpz/9CaWlpZg4cSI+/vjjgNlCfeFwOHDqqafi4osvxpEjR/Dss89i4cKFWL58OQBBuHjuuedw+eWXY+bMmbjkkkuQlpaG8vJyfPnll1iwYAGeeeYZxMfH46STTsLDDz8Mp9OJnJwcrFmzBiUlJX2+v06nw3//+1+cd955uPjii7F69WosWbIEP/zwA2655RZcdNFFGDt2LFwuF9566y3o9XpccMEFkv+uaWlpuOuuu3DffffhzDPPxPLly+nvO2fOHFx22WX0uddeey0++ugjnHnmmbj44otRVFSE//73v9SFJRWj0Yh//vOfuOGGG7BkyRKsWLECJSUleO2110J2Ml177bV48MEHce2112L27NlYv349jh492uM5NpsNubm5uPDCCzFt2jTExsbiu+++w7Zt2/DYY4/JOnZ//vWvf+EXv/gFFixYgKuuugotLS145plnMHny5B6CUjBmzZqF5557Dv/85z8xevRopKenY8mSJfjTn/6Ed999F2eddRZuvfVWJCcn44033kBJSQn+97//9coAS05OxsKFC3HVVVehrq4OTzzxBEaPHo3rrrsu4Pumpqbi22+/xcKFC7F06VJs2LABOTk5qvxNGAwGg8HQhIj1eWMwGAwGgxESpI39tm3b+n3u//73P37hwoV8TEwMHxMTw48fP56/+eab+SNHjtDn+LYM98e//TvP83xdXR1/1VVX8ampqbzJZOKnTJkSsE17U1MTf/nll/Px8fF8QkICf/nll9MW94GeH+h3/PHHH/nrr7+eT0pK4mNjY/lf//rXfFNTU6/nr127lj/jjDP4hIQE3mKx8IWFhfzKlSv57du30+dUVlby559/Pp+YmMgnJCTwF110EV9dXd2rnfw999zDA+AbGhroY52dnfzixYv52NhYfvPmzXxxcTF/9dVX84WFhbzFYuGTk5P5U045hf/uu+/6/L2CvT7hmWee4cePH88bjUY+IyODv/HGG/mWlpZez3vsscf4nJwc3mw28wsWLOC3b9/ea6xI2/gPP/ywx8+WlJQEHINnn32WHzlyJG82m/nZs2fz69evDzj+gejs7OSvueYaPiEhgY+Li+Mvvvhivr6+vsff1m6383fccQc/bdo0Pi4ujo+JieGnTZvGP/vssz1e68orr+Tz8/N7He8jjzzS6339x47nef69997jx48fz5vNZn7y5Mn8Z599xl9wwQX8+PHj+/09amtr+bPPPpuPi4vjAfT43YuKivgLL7yQT0xM5C0WCz937lz+iy++6PHz5G/+7rvv8nfddRefnp7OR0VF8WeffTZfVlbW47mBzrvjx4/zWVlZ/IQJEwJ+PhgMBoPBGChwPD+IUwcZDAaDwWAMCV5//XVcddVV2LZtGwsRZihm+vTpSEtLw7fffqvp+6xbtw6nnHIKPvzwQ1x44YWavheDwWAwGJGEZR4xGAwGg8FgMAYlTqezVxbVunXrsGfPHpx88smROSgGg8FgMIYgLPOIwWAwGAwGgzEoqaqqwtKlS3HZZZchOzsbhw8fxvPPP4/MzEz85je/ifThMRgMBoMxZGDiEYPBYDAYDAZjUJKUlIRZs2bh5ZdfRkNDA2JiYnD22WfjwQcfREpKSqQPj8FgMBiMIQPLPGIwGAwGg8FgMBgMBoPBYASFZR4xGAwGg8FgMBgMBoPBYDCCwsQjBoPBYDAYDAaDwWAwGAxGUFjmUT94PB5UV1cjLi4OHMdF+nAYDAaDwWAwGAwGg8FgMFSB53nYbDZkZ2dDpwvuL2LiUT9UV1cjLy8v0ofBYDAYDAaDwWAwGAwGg6EJFRUVyM3NDfp9Jh71Q1xcHADhDxkfHx/ho2EwGIMeVwfwcbbw719WA4aYyB4Pg8FgMBgMdWD3+OEHG/PhxxAcc6vViry8PKp9BIOJR/1AStXi4+OZeMRgMJTj0gPR4r/j44fEDYfBYDAYDAbYPX44wsZ8+DGEx7y/mB4WmM1gMBgMBoPBYDAYDAaDwQgKE48YDAaDwWAwGAwGg8FgMBhBYeIRg8FgMBgMBoPBYDAYDAYjKEw8YjAYDAaDwWAwGAwGg8GQQF1bd6QPIaww8YjBYDAYDAaDwWAwGAwGox+s3U76721lzRE8kvDDxCMGg8FgMBgMBoPBYDAkwvM8/XeXwx3BI2GEi1oft1FRfXsEjyT8MPGIwWAwwki73UX/7TvhYDAYDAaDwWAMLnzndWuP1EfwSBjhorq1k/67orkrgkcSfph4xGAwGGHkWJ2N/rukqSOCR8KIBNZuJzp8JpoMBmPosqOsBb96aTMOVlsjfSiMMOErJNhdzIUyHGhst9N/76lojdyBMMJGTZt3zK12Zx/PHHow8YjBYDDCSFO7g/57Z1lLBI+EEW467C6c+tiPOP/Zn5nrjMEYBlzw3EZsLGrCXav2RfpQGGFiS3ET/XeD1d7HMxlDhUabd5xbOhx9PJMxVGj2GWdbFxOPGGGivKkT9352AN8erIv0oTAYjDBRZ/XWSddah1eHhuHOoRorGmx2HK1rR1lTZ/8/wBgSuD08fvXSZlz56lZ0O5kTYbjgKxAPt248w5k9la30340dTDwaDjT6CAlW5iweFlh9BCPbMBtzJh5FkIe+PozXN5biuje3R/pQGBFgw7FGrHhhEyqa2SJyOOErGNm6htcNZ7jjO/a7mbV92FDe3ImNRU348WgDPtheEenDYYSJ1k7v4iIjwRLBI2GEE1/nSaONuVCGA00+zqPh5kIZrlh95u/WYTbmTDyKINVt3oAtths5/LjslS3YUtKMf3xxMNKHwggjDT6TDCsTj4YVvqGKzHk0fKhu9Y472ywYPrT67kwPs8XFcMZ33BvamfNoONDc6VPC1M3O9eFAm88427qH11yeiUcRxOX2WppbOtnuxHCi0+G90LQPM7vjcMfqc5MZbiF7w50qn+4cbII5fKjyEY983SiMoU2bj4hQx0qUhw1tnUw8Gm74zuOtw0xIGK74bgh0Oz3DKhyfiUcRxHdi0cwC1oYVvg4EvY6L4JEwwo2vaDDcrK7DHV/hoI2N/bChptUrHLBxHz74jnWHw80c5sME33G3so3hYUF7t/fcHm4ulOGK/718OFUSMPEogvi6jZh4NLzwHXvfXWnG0MdXMGLi0fDCd1JpZc6jYYNv1lUrO+eHDf6LC7aoHB74jruvqMAYuvg6jzodbjjdnggeDSMc+M/hrN3OYdNFl4lHEcLl9vSYSDDxaHjR6iMesS4swwtbjzpptpAcTvR0nbGF5HDBN0C3jZWtDRt6i0ds7IcDPZxHLJZgWNBuZ0LxcMN/DvfZ7mpMvXcNXlpfHKEjCh9MPIoQ/ruPLUw8GlY0d/S0sztcbJdiuGDrYW9mu5LDCd/dSVa+NHzwDVNl4z588HeWsgXl0Kfb6Ua30zufa2eC4bCg3e/cZtf5oY3D5UGXXxnyqxtKYLO7cP/qQ0PeWc7EowjR6lcHzQLWhhf+AensRjM8cLk9PQSEdrsLLmZvHjawsrXhie/mUGsX2ygaLrCyteGHfyA+G/PhgX/jGxZJMLQJtGaz+XwGyod4N10mHkWIFr8bDOu4NbzwFw/b2IJiWBBoIskml8OHHuIRm1wOG3zv991ODwtOHib4lyiysrWhj7843O5gYz4csPmLR+xcH9L0t+Ff2cLEI4YG9N6dYBea4YS/eMjaNw8PAk0o2CRjeOD28D02CWx2Fzye4RGuOJzheZ45TYcp/kIC2ygY+vSe2zOheDjgX7bGMg2HNv3dw307ag9FmHgUIfwnk6xsbXjhn3HFxKPhQaAJBVtIDg/83aU833u3kjH0sHa74BZFwhiTHgA754cLZJw5Tvh/tlEw9PF3lbd3u4ZNB6bhitPt6ZFzBQjnupttDg1Z+nOOD/Uu2kw8ihD+Nxi2IzW88BcPWfvm4UGgRaO1i00uhwNEPDLpdTAbhFsvK10b+pCNghiTHunxFgBss2C40CZuFuQmRQEQhMRDNdYhn4cxnPE/t90eHr9+eQuWPfkTjtTaInRUDC3xdx0BwLtbyzHur1/hVy9tZg7jIUh/G0AN7fYwHUlkYOJRhCBlSzmJwqSCla0NL8gEIyPeLP4/yzwaDgTaeX782yOY8Y9v8dOxhggcESNckGt8nMWAhCgjAOZAGQ6QTmtJMSY67ux6Pzwg4nBBSgwA4Jv9tVj21E+48PmNrFHCECXQRuDGoiYcrLHi3a3lETgihtYEyqzdW9kGl4fHxqImHGai4ZAj2NwtWnQXN9oiLx45NbzHMPEoQpDJ44jkaADMeTTcIM6jkanCpJItIocHgZwmO8tb0drpxJWvbo3AETHCBbnGx1kMiBdFhKKGdry1qRQNA2CiwdAGcq9PijYhMVoY97KmTqzaVckEhCEOua/npwjzvCN1NvA8UG+zY09lWyQPjaER/pEEvuyviuyYdzvd2FHWwsqpVKa/ctSfjzeG6UgC8/HOSjz53TFWNqsiwdZsp4xPBxBZ5xHP8/jVS5sx9q9f4at9NZq8BxOPIgRxnuQlM+fRcMPt4enuFBGPWBnD8KCvm7eHZ9eBoQwZ21iLAfEWAwDgtvd242+fHsAVTDgcsjR3COPu6zy6f/Uh/P79PXj6h+ORPDSGhrjcHupIIM4jX7aXNof7kHrw+Z5qbC2J7DEMRRrbg4tHB6qtES1Rf2zNEVzw3Ebc9t6uiB3DUCRQ2ZovkXQe1bZ14/YP9uDf3x3FmxtLI3YcQ41g4tHSCYJ4FEnnUU1bNzYWNYHngS/2MvFoSEGcJwWieNDS6cTbW8qw4VhkFWqG9li7nCDzh3xxUlna1IGX1hdjW4QnlAxt6c9hdry+PUxHwgg31HlkNlIRgXCoxorqIR6wOFwhToTkaCPSYs09vvfk98cicUiUsqYOnPrYOvzxwz0RPY6hiG8TlEDiUSQDVQ/VWPHbd3fh4hc2MdejyjT24Tjocrr7FJe05psDdQCEBSXbqFKPQGVrvhQ3Rm5ed6jWSv+9v8raxzMZUvCfy9911nj8dslonDJOEI+s3S7YXZHptFjZ4r237K/Wxu3IxKMIQZwmY9LjAAAOlwd/WbUfl72yBYdq2Ak+lCHCYazZQBcTPx1rxP2rD+G6N7ezUoYhTH/tWyMpHr21qRQLH/oBX+/XZqdiuONbtuYvHgFgwvEQxTfzKEcMTvalr8Wm1jz09WEUNXTgox2VqG3rjthxDEXI4iLGpEdanLnX96tbI/f39t2k/HxPdcSOYyjS1NH3+Rwp0bDT4UJlizeo/VANy+FRi2CxI8SFUtzQETHH2bE67zgfqWNjrhb+4tEVJxbgD6ePQ0KUEUa90F6zKUJCcUWz9zwva+pEhwZdfZl4FCGIgJAZb0Gc2dDje98erIvEITHCBAlLT4ox0gwMQmunE7sqWiNwVIxw4H/DiTMbUJgWg/Nn5AAQ7KaR4sWfilHZ0oXf/HdnxI5hKOMVj4zITYru9f1Id+Jp7nBoGrA4XPE6j0zITuwtHh2N4Lj7llOsP8oC+9WEXOsTooxIjjH1+n5NW+ScR3sqW+m/D1SzzUo1abT1XjDmJEZhxohEAOgh4IST8uZO+EYdHdDIkTAcsfktzk8YlYzJOfG4//wpAIRrQX8bh1pR3NBB/13a1AGHi93j1SBYFQHHcUiJETYLIrUxVOF3jSlt6gjyTPkMOvHoP//5DwoKCmCxWDBv3jxs3Ro8K+Kll17CokWLkJSUhKSkJCxdurTP54cLnuepgJAYbey1K7UvwqF6DG0hiwnfAFVf9jDxaMji34nl69+fhE9vWYg8MTi/1hoZ8ajb6UZFs3cxE0k3xFDFt9sayToDQBeWRQ2Rc509u+44Zv7jW1z9+raIHcNQhWwUJcaYaHdVX45FyG3Y7XSjzKdl/LF6tiutJmRxER9lRFaChT6eGiuc75EsU63zuc8cZE531eB5vpfz6ItbF+Cr3y2izXGqWiIz7nXWnsdV0qj+gnK44l8C+NpVc/H5LQuREW9Binh/r2yNjGjoe67zfGTLZYcSgZrfEFLjhDGPVEmw71weAEob1f/sDSrx6P3338ftt9+Oe+65Bzt37sS0adNwxhlnoL6+PuDz161bh0svvRRr167Fpk2bkJeXh9NPPx1VVVVhPvKedDndVP1NijH12pU6GOGdoHprNxMwNIQuJqJNSIu19Pp+UUPkbuo7ylqw5LF1uPezAxE7hqFMm1+L7uQYE2LNBmTGC5+DSJWO+JfLRdoFMxTxLVsrSPU6j65dNBJAZEsWP9xeCUAon42kI2Io0iIGZidHmzA6PRaj0mIwOz8Jl5+QDyByk/myps4eXZcied8ZihDxKDHaCINehycvmY7UWDMevnAqAMGB3OWITCaGr5BwrM4WsWyOoYa12wWnu2d5UmFaHOItRiocR+p8r/ObW5Q2RUbMGIoEanjDcULpUq5Yqhwp0bDWTzQsb2bjrgZ95ZemxkbWeUTcjaR87o1NpTj5kbX486p9qr3HoBKPHn/8cVx33XW46qqrMHHiRDz//POIjo7Gq6++GvD5b7/9Nm666SZMnz4d48ePx8svvwyPx4Pvv/8+6HvY7XZYrdYeX2pDLjQGHRewHr66rQvdzsjczKtbu7D4kXX4xX9+xjcHaiNyDEMdMv5J0Ubabc+XkgiG693/5UEUN3Tg9Y2lKI6gE2Ko4u88IpCd6UiJR/4TWpa7pj4kVDPOYsDknATMH5WCi2blYtnkLABCBkokchHsLnePGvm1h1n5kpp4M4+MsBj1+P72xfjghvm0WUakFpNVfjvhkXS+DUV8y9YA4BfTc7D9r0txyrh0xJj0AIS5Xrjheb6HG8Hl4XGsjo29GpDFon8UBQCadxY5EUEY84IUYeOilDmPVKO5I3i2DRn3ygiNe7047kS8PFZnw/qjDai3sYw7JYQmHkUm84h81haOTgUAbC1pRmlTJ97ZUo4mlQStQSMeORwO7NixA0uXLqWP6XQ6LF26FJs2bQrpNTo7O+F0OpGcnBz0OQ888AASEhLoV15enuJj98fXecJxHO5bPgnXLhyJZ341A7FmA3g+cnXRPx9vRJcoXP13c1lEjmGoQ8Y/SRz/B345BdkJFjx0gVAfXRyhHWC3h++RgfHpbhakqTaBdqgAIEN0HtVFqGzNX7RiziP18ZatGWE26PHu9SfgkYumIT1emGh0Od39dm3RgqL6Drh8HCg/HGaZe2pCM49EhzHHcdDpOOQkCud8pBaTVWJg87S8RABCyCZzoKiH1U88InAcR7OvaiIQmm3tcsEuOt9n5ScBYKVrakECclPiemdckZy7SIkIZG5xwqgU8Tg68drPJbjwuY09Ng8Y0mnpQzzKShDO9UhEEthdbjSJx0bG/Z9fHsIVr27FJS9sjliI92DH6fagsw/XaLpoCInEfN7p9lD3+KIxab2+v1ulqqJBIx41NjbC7XYjIyOjx+MZGRmorQ3NIXPnnXciOzu7hwDlz1133YW2tjb6VVFRoei4A+HrPAGA9HgL/nrORJwzNZuWM5RoUKMYCr5BiluKmyNmqx7KeMVDYfwvnTsCG+86FWdOEhwI9TZ7RBaRJY0dPS6IX+9nzjM1cXt4WIO0x80UnUdNHY6ILODIDni2eByHa21wuT1sd0pFrD5la75Emwx0p9o/lyIckOwVvU6wOP98vCliztehhsfD0+t9cnTPBWVOonCvj1T2TY34vtNzExBnNsDDo0cGEkMZreK4B+qsmCWKR5EYe3JNT4gyYoYoHH6xtwbnP/szHv3mSNiPZyhBnEf+5zqAyJetiQvZyTkJiDbp4eGB+z4/iO1lLVi1K7JRHoOd5s7g4hEREiKRf0Pe06TXYd6onqaJ4saOiGVsDnb6ch0Bvtf38P99q1u74OEBs0GHOQW9jTJqNUgYNOKRUh588EG89957WLVqFSyW3jkzBLPZjPj4+B5fauPrPPEnP0WwspdpkI4eCr6OA4fbw9pHawDdnYrtWa6YEG2k4XqRsBSTz1xuUhQMOg5H6mwRzWEZati6nQi20ZMUbYTJIFyO6yMgIJAd8FPGC61lD9ZYccmLmzH/gR9U26kY7vh2W/MnTXQfRUKsI7tUp4xLR2a8BV1ON174sRhbS9i1Xym2bhftcJTod7/PFp1H9TZ7ZARjcRGbnRiFUemxAIBjde29wl8Z8vAvW/MlS3SaRqK7JllQpseZMTFbmN+uP9qAXeWteGbtcSYcK4CUhCTH9p7bk9L0drsrIpuDZGMiK8FC1xmEvZWsSY8S+nIeEWdxJMQjIhimx5sxIbP3WnZPBRt3OZBru/9GIIG4iiOxOUCcjblJUShMj+n1fbUyLQeNeJSamgq9Xo+6up6W+rq6OmRmZvb5s48++igefPBBrFmzBlOnTtXyMEPCt9OaP7QeOULiEfngjREnk98cqMVt7+3Cu1vLI3I8QxFiI00N0L53VJpwskcif4LsiE3MisdJYwW749LHf8TNb+9kDjQVII7D2AB5CBzHeUOzI7AbRG4oc0cmI8qoh9vDY3tZC9weHu9sYeWrakAW5YHGPyNOGPtITDDJAjYn0YJzpgrux39/dxQXv7AJPx1j+UdKIDvSsWYDFYcJyTEmWIzCY3VtkXCcCeOenRiFQvG+c/M7OzHl3jWsYYIK9CUeZYhCQl0ExOIGUeBIizNjam5Cr+8z0Vg+5PqdEmBuF2P2OkwjkW1I5hUZ8RZ6vhMOVjMRQQl9ZR6RpjiR2BgigmFmvAXjs+J6ff8467ApC28nzcDiESlLjkSmHSlBzUuORrTJgJeumI3peYm0QYdabqhBIx6ZTCbMmjWrR9g1Cb+eP39+0J97+OGH8Y9//ANff/01Zs+eHY5D7Zc2v7IlXwqo8yj89nG7y01vMJfMHQEAeHtLOT7dXY27Pt6HfWx3QhWa/TIwfCEtvCPRRpVkb+QkReGus8bTRe6X+2rwDhMPFdPazw2HlK5FYjeavGduUhTGZsT2+N620pawH89QhOw2xwfYrSK7k5GokSdjn5kQhWsWjaQ2ewD4ipWuKoJc65Niet/rfbNvIlHKQktVEy0oTOt5zr++sZTlYSjEu8DoPfZko8C/A1Y4IAJHWpwZhWmxmJTd05FwjLmNZUOupSTD0B8qGob5Ou90e2hJXUa8BadN7Bn/UWPtph2gGdLocrhpSXog0iJYtlbr83k06nX49OYF+OWMHPxqnrC+82/pzggNem0P4CIHvDlXrRHoqFkh5iWTLn+nTczAJzcvwFLxnFdLuB404hEA3H777XjppZfwxhtv4NChQ7jxxhvR0dGBq666CgBwxRVX4K677qLPf+ihh/C3v/0Nr776KgoKClBbW4va2lq0t0f25thCM496iwcFERQPalq7wfNAlFGPc6dl9fr+bp88JIZ8yE3cv2wNAEaJk/hIhGZXiguYnMQojMmIw5rfn4QTC4WQvS/2svBspfSVgQFEbkHh8Xi772QlRGH59Jwe369q6erR0pshHZ7n+yxbI4JNREoWfUSErIQo/PDHk/HXsycAYC4EpdCw7AD3eiByOShuD08nkb7OI18ilc0yVGjrEs73QNf7zAThfI+Ey5SKR7FmcByHhy+ciptOLsQvZwjX/UgFuA8FyHhmJvTuogt47/Hhdh412OzgeaHDc0qMCWdNzsKlc/Nw11njYTbowPOR6/Q62CH3T9JB0R8iHrV0OuF0h1eg8y1bA4TmCI+vmI7ZYlB+RYQaMw12gjVDIMRbDDCLTuNGlbqbhQoRBPPEgH4CyTNVyw01qMSjFStW4NFHH8Xdd9+N6dOnY/fu3fj6669piHZ5eTlqamro85977jk4HA5ceOGFyMrKol+PPvpopH4FAN4JpX8GAgDkp3hDNMOdg+CrWKbHWahySWCtPZVjd7npIjI1QF38QHAekXHPTozCP8+bDEBo3c4EBGWQ3YpAjkPA6zwK94Kisd0Op5uHjhNEjCvm5+MPp43Fa1fNgUHHweH2RKwL3FChy+mm50+gOnnabS+CZWtktyzWbMAZk4RS8PKmTnbeK4CUrSUFcJkCQHpcZEoaGmx2uDw89DoO6XEWTM9L6vWcwzWspEEJfS0wItld09d5BACTshPwf2eOx4wRiQAi1+l3KEAyTjLje28MAj4O0zCf7+S4shIt0Ok4mAw6PPDLqbhhcaFPK3k27nKgbrOEwGOeEGUEJ/SiCNptVyuqfDaEfRmRLKwzy1mXPVmQcQzkKgUEV3FqLMmxDO+cjpzHeck9xSMS4m3rVidzLXD9xADmlltuwS233BLwe+vWrevx/6WlpdofkAwaSeZNAPEgLdaMGJMeHQ43Klu6etnJtaTSTzx48JdTcdPbO6glk4lHymnpEC46Bh0X0PJIdoCLG9rB8zw4ctfRAJ7n8e7WChj1HC6cletzo/FedPJTYhBl1KPL6UZJYwdGp4fv8zjUIDechKjAC8lw70qWN3WiqcMOj1iekhFvgUEv7Cf89tQxAAQBsby5ExXNnbTEhiEdq+hC0Os4RAfYoUyjzqPwLip4nvcRj7ylFtmJUTDqBeGw1trda/LJCI3WIJ3WCKliS+9GW/DMDC0gzRGyEy3Q6zhkJljw3K9nYtWuKrR1ObGlpJktLBTSV+YRudY3tjvgcHl65WFpiW/mkS9eEYE5j+Tgey3N9LmW+pImLiib28N7vpO5XXYAR1ROYhSKGzqo85whDf/NF3/0Og6JUUa0dDrR2unodd5pSTDxiAgLNW3dcLk9dN7HCI0mGj1iBIKcNqlxZlS1doXfeeS3jifEmg2Isxhg63ahprULYzJ6Z2BJgX1iIgDpyJAaoGyJ4zjaCSEcYs3Baivu+/wA6m3dPYK2AGDhmFTsuvt0vHXNXADM4qgG5EKSFGOCTtdbGMpLjoaOAzocbs1rpLeWNOPPq/bhjo/24sPtlfT9cnwuOnodhzFiBg4L11OGd7ei78yjcDiPyps6cdq/f8T5z27EX1btB4CANxOyQ1XBFhSKoNk30aaAgrDXgRLeiUZzh4NmXfjmdOh1HLU9l7FNA9k0dxC3YWDxiCwmmzrCO+6kIUeBT9els6Zk4cUrZtMQZVa2Jh+X20N3dwOJR8kxJpjEBVskXGdAb/EoVzzfmQNFHja7C51ivkmwzCOSc9nUR8CyFlARIam3wEEWmaxcUR5k3ZQVRDAEvBElfQVra4HXENDThZIWa4bJoIPbw0ckY3OwQ9ZxyTHBhcA00RwSTvGo0+Gi13f/sjXAKx6rMeZMPIoA5AKSEsB5BHgv5lq3+eN5Hrd/sBuv/VyK697YHlCx1Ou8XaDqIpDHMdQgk4ZA3TgAwGzQU/GuWONF2xd7vSWer20sBSDkXSX5lVWR42E7kspo6SfzKCOMzqPXNpbALooGh2sFUXB8Zm/xKC9ZuBYwF4Iymvs57zPiw+c86na68Zu3duAfXxyknTdSxcmkL/m08ycbe7mQiRxxGPmTEuYJ5pFaGyqaO1HSKIzpqNTeWUc0h4ld72VDyhV1XGDhkOO4iIXkBxOPyLhbu12wdoe3vGYoUCNeSxOijIg2Bd4gipR4RASOQA5Sr2jIznc5HBc7I49KDe7KJ2XLZA4YDrqd7oAbwgCg03HIS2JzO7lQA0iQ+RzgNYeE01VcVC+sGVNiTAFL5bMSSVMe5ee6rLK11tZWfPTRRygqKsIdd9yB5ORk7Ny5ExkZGcjJyen/BYYxPM+jqZ2IR4FVy3A5EI7U2ejCcU9lG83Z8d2NBIB0cVHb1uVEt9MNizFwMByjf5o7grvOCPkpMShr6kRZUwdOGJWi2bEc8GnPeqjGKr53dC9nRB6bXKgCDUqPMQMBGphl+XRi8Xj4gM40tdhwrLHXY9NyE3s9RoVDNsFQhDf7JrBwSK6xHQ432u0u2ulQC/67uQxfHxC6qEWJ1/KClN67VIIDtgFlzcx5JBdaIhTkeh/OCeZPxxpw5atbYTLo6LgXBihDzhGv98x5JB+yaEuOMUMf5DqeGW9BZUsXatu0Fw4/3V2Fh746jEcumkavRf6fyRizAckxJjR3OFDV0oX4rMDXKkZgyIKsLwcKOd+bwlzKsqdCmOtNzIrv9T1vaD+7x8uhSOxOODItGgjSV4ZsyLaEMfOoSBS1EqKMvTaEAWFuV9TQQYVFRuiQNXxyH+s4em8P47l+vEFYzweLFyGllVWtEXAe7d27F2PHjsVDDz2ERx99FK2trQCAjz/+uEenM0ZgbHYXHGLifvBd6PA4ffZWtvX4f5JtND6z5w0m3mKgk03WkUEZ9KLTh2JNUvG1nFTyPI9jdb27Do4L4D7JZYGKqkA6aaUHCdNMizOD4wCXh9d0Z7Klw0HbMT95yXQAgvNl6cT0Xs8lwiHbnVIG7boV5LyPNRtotxat3Uebi70d1P6z7jgAb5dPX4jzqKyRjb1cgrk8CKlhLFt7Z0s5PDzQ7fTQRQzppulLZgTDnIcKje3Bcy0JGWHaJOx0uHDbe7tR3daNP3ywBzwvOMoDdfvNZblHsgmUHecPuf6Ho3zJ7eFx50d7MfHur3FQ3BycLoai+8LGXD5uD08rBApTg2fIRKJs7WidICSMy4gLWCpPMiy1rnAZing3gvtyHoW/bG1fpXCek6gRf3KI80iFMZcsHt1+++1YuXIljh07BovFe5FctmwZ1q9fr/iAhjpEPIg1G4I6eMLVieNAlSAe+V5Xooz6XkFbHMdRNxSbUCqjsb3vkkXAqw7XWrW7qNdZ7bDZXdDrOEzO8YqFYwPk3rDJhTqQDivBXAhGvY4uJrU8z4hwlJMYhV9Mz8Gqm07EBzfMh9nQ+3qUnRiZDnBDjaZ+xCPAe93XOvfIVwQWs9Jpl0dfqHjEhEPZNIjnPMm08ofcB5o7HJp3tfPfLJqamxCwIQcRt5vCcExDlf5EQyB8It3Baiv9N7mO5ydHB3S25rAFpWzIdTWrj+YC5Hxv6nCA57U9tz7YXoH3t1fQHKYZIxLpZ84XUtJU29bNzneJVDR3wuHywGzQITtAnhSB3PdbwiAelTZ24MGvDuOz3YINKpiQ4G3dzuZ2UvDNiQo2lweAtDjSFEF78ajd7sJ/N5fhjU2lAIAFhakBn0cFw0iUrW3btg0vvPBCr8dzcnJQW1ur+ICGOsSu2pd4EK6uS0UNgmL+2yVj8NT3xwAAc0YmB5xUZMSbUdLYwRaRCmnsIyydQHautAyyI7sSBSnROGtyFvZXCRPMk8ak9Xqub0281h3ghio8z3udR30sKLISLGiw2VHT1o3JOQmaHEtJo1ijL3b2mzGid5tuAhU0rHY29goIKWAxzozixg5NF5M8zwe0qU/M7l3OQM77KuY4lIXL7aGiYTARITnaBI4DPLwgIGnViae100HL0H74w2JsL2vBGZMyA57PKTHCMbk9vKbHNJRp7KdcEQjfPG9/VVuvxyYFubdk0xImJh5J5ajo5B7bR0faFPH673AJgepxATruqsXX+4X12Elj0zA9NwGXnZAftFmDQcfB5eFRZ+1mXVUlcLyezKVig5anAt7cM63L1tweHpe9sqXHRu9JY3vP6QHvJrUa+TfDiaqWLtjFDpmBAugJXueRtoIhz/NY+epWbC8TsjDiLAYsCjLmXrdZBMrWzGYzrFZrr8ePHj2KtLTAB8zwQp0nfexAZyaEJ0iRlKIsKEzBX5ZNQGa8Bf93xriAzw2XG2qoQyaKgXaACMTlVaPCCR4MIh6NSY/D5fPzccKoZKw8sQBTcntPKonzqN3uoh3DGNKwdrnQ5RR2ANOCuBAA3wWFdjd0YrMOFJbrD3FMONyesHcKGUrQrid9TMzJNVbLLoutnU50iDvRvgL2nILkXs/1DdC1sQBdyZQ3d4LnAYtRF/R+b9DrkBytvb29oln4/KXFmTEqLRYXz84LGtxv0HuPN9ydwIYKZG4VrGU7EL6ytSOiqGH2CcSfGaB8CWDikRLInCqQe5sQZdIjWixP1vJ+6vbw2F4qlCffeeY43H76OJqr549ex7FxlwkJyw6WMUNIjiGZR9rOoQ7VWHsIR+Mz47BkfO84AsB7rmu5zhiKkFyhUakxMOiDSyipcSTPUFvn0YFqKxWOxmfG4alLZwTNzMzxOc89Cl2GksWj5cuX4+9//zucTmEyyXEcysvLceedd+KCCy5QdDDDAZJtECwsG/AuIqzdLnSJE321cbo99EaRnxKD604ahc1/PjWo28G7qGUd15QQSqii13mk3Y2chOmNyYhFvMWI966fj3uXTwr4XItRT3efWemaPMgkIzPegihT8MD5rDBYiSvFheSIlP7FI5PBu5BkrkP5kMDx3OTg4hFxpGkp0FeILqK0ODP+dNZ4ZMSbcePJhQEnGzFmAxLFoE22qJCO72Kyr/B7Wsqi4Q4lbdUdoquACNxal1AOVUgkQCBHHyFcZWtk7P9y9gScNjEDv5yZg0vnjgj4XJKJwcrWpFFv60aZ2JUyUG6kL6SESUtHQq21Gx0ON4x6rleGaSDIdYHlWkqDOI8K0/qeS4Ur84gIhvNGJuONq+fi45tOhDGIwEEiCarbujQvoRxKfLVPcPQFCp/3hWzO2ewudDu1WccDwI9HGwAAp0/MwNe/OwmnjAssFgLCZgbHCc5HpbmqksWjxx57DO3t7UhPT0dXVxcWL16M0aNHIy4uDvfff7+igxkONIUQpBhrNtDdCa0WbNWtXXB7eJgNuj7LaAjUecR2IhVBnUd9iEfke9ZuFzrsLk2Og+xE54cgIAAsNFspdCHZz8Qyi+4GaTd5J/XOZKHQH76lawzpeDw8FV1JAHkgwpF5RM77vKQoXDgrF1v+vBR3njk+6PNZ23b5HKrp34kAhKcri1TxiMwJtHTBDVWaOxx07CdnBy899i1b03LxRspOC9Ni8dIVs/H4xdOD5m2yEF15rDsiLOCm5ib0uTEMeKsOtBQSyGZFdmJUn+VUBFJ+w67z0igK0XlE2qa3auw8InmWcwqSsXhsGqJNwZNpyDqj2+lhFQUhUtPWhU92VwEALpuf3+dz4y0GmEThTsv7KOn2Ny0vsd/nGvU6ZMSps0EgWTxKSEjAt99+i88//xxPPfUUbrnlFqxevRo//vgjYmJCW4gOZ7wtXIOLRxzHaV4PT3ZJRgQJTvSHikcsXE021m5vyUhf4lGcxUidAFqJh0QE8g9HD4Zv7hFDOj8dEyaX44KEFxLC4TwiNuXMhNDGPjNM5RVDlYqWTjjcHpj0uj7PexJUrKVIR5xHecnBRSxf2GJSPmuP1AMAZvaRKQaERzwi49dXRoMvaUw8ks3rG0vhcHswMSuehs4HgpzvdpcHbV3aLN54npckHJLn1NvscLg8mhzTUIQ4PhaODhxU6wsRl5o17LAoVSxmTVGkw/M8dR71Kx6FyXlEymVH9HHdIZgNempiUCNAeTjw3cE6ON08Zucn9Xtf5zguLB3XiIAZqPlFILJVcpdKDswmLFy4EAsXLlT05sORY/XCjtTI1L4HOiPeguLGDs0yB0gHnb4mN76QHCa2gJRPuSjYpcaa+twRAAQR4Vh9O2rbukO+KISKx8PTwLTQxSPmPJLL7opWrN5XC44DzpuR0+dzaR26Rjdzl9tDrynZfQgZvmSEKdh1qLK7ohWAUMISzEIOePOlNC1bE6/7fTmgfKHlDEw8kkR1axf2VraB44DTJmb0+dyUMARrEkdBqOc8cR7Vs/u9JHiex6pdlQCAGxaP6rPBgMWoR1K0ES2dTtRau2morpo0dzjQ7RREoKwQnKbJMSaYDTrYXR7UtnWHtAhlAHsqhDLFvppPEMJRtkbO91Dndzks80gyDe122Lpd0HFAQUoMgODXSiIiWLuFEqZgzj+lkPv7iBA3h7ISotDY7kB1azcm9eGSZAiQJlOz8vs/zwEh96i6rVuzc53neRSLx9Rf6SQhJykaO8tbFXfRDUk8euqpp0J+wVtvvVX2wQx1eJ7HkVpBPBrfT/lKhrgrpdWCrbxJ+MCNSA7tA8e6LimHKMSB2mL7kymKR1p0XGtot8Ph9kCv4/oM7vYljzmPZPPB9goAwPnTc4QbtKsj6HOJ86i2rRseDx+SK1AKdTY7PDxg1HN9dvzzhVyLWHiuPHaVtwIQWiX3he/ur9vDh1RuIBVaPtdH9lKgY2LlDNL47lAdAGDWiKR+u5WFxXkkitGhdlKiZWthaDM8lKhq7UJFcxcMOg6nT8zs9/kZ8RZBPGrrDimbRs7xAMJ4mg39L1g5jkNOYhSKGztQ1drFxKMQcLk9NNNwck7/Y0jEYk3L1lqI8yjETQJ2nZdMUb0wj8tLjhbEoD4SJhKijLAYdeh2elBvtWtyXrl9yuNDF48s2FfVxjquhUiJ2GwmlDUcoP293drtgk2MNskNcUNwQlYcPt8TuAunFEISj/7973/3+P+GhgZ0dnYiMTERANDa2oro6Gikp6cz8agP9ldZ0dLphEHH9Wtz1LoTBylbC9V55Nt1qaXT2WfZHSMwUi48NDRbg50g4h7KjLf02S3AF7KIrGDOI8nsqxQu0kv7cSAAwmKC4wCnm0djh52ed2pBPk8Z8ZaQhalwtZQequwSnUfT+6lJz06Mgkmvg8PtQXVrV8ilZVKooOWq0pxHbEdaGltLhDKWxUFa5vqSFo7MI7KYDLlsjeWcyWGveK0fnxXXZ2MEQmaCBYdrbZq5DaWOOyBch4obO1ipaojUtHXD7eFhMnjzRPqCZB41hSHjLFTnEd0cFLswqb1pNRSRUi7EcRyyEqJQ0tiB6jZtRNmati64PDxMeh3d7O8PNVu3DwfKRNNFQYjiEbm3a1X+TUS/pGhjSPcbAJgiNsVSKh6FtHIsKSmhX/fffz+mT5+OQ4cOobm5Gc3NzTh06BBmzpyJf/zjH4oOZqjz/PoiAMA5U7P6tS1q3YlDSm0s4Nd1iS0iZbFTdCCMC2GHkeTR1Ggw/pUyJpS+rgjWmSF0nG4PdRtOCdLJ0Bej3htgr0ULVeJkyw4x7wjwFbLZQlIq3U43DlaLJQ15fVud9TqOOoKIuK8moQZ3+0KuEWwhKQ0iIoRSxpKicS5Ct9NNO6uEHJhN3YbsnJfC4RorgNCu9YD2XWylZt8A3kwMJhiHBikVyk2KCkl0SY4Rzi2l3Y76gmwQhjrHy0ywQCd2YWrUMItpKOEVj0ITErTehCv3+RyG6loOR2fnoQQ5Z/tzExOyNW44QtYIWRLm86SJQ2lTp6KsPcmB2X/729/w9NNPY9y4cfSxcePG4d///jf++te/yj6QoU5rpwNf7asBANywuLDf59OAag0WbDzP0wtNvoTd7Sw2qZBNh92FLcVNAIBFY/oPVfQtX1KbSon18ID3ItjpcKOFdWYImQabUCJo1HMhT+DJjUCLGzp5zb6Cm/0JV0vpocjBGiucbh4pMaaQSsWIK7GkKXhpo1wa2oUQXB0XWv4JwAJ05dDpcNH766Q+WrUT0mi+kLYCQoxJj4QoY0g/QzOPbNp2AhtqkEYHoTr7aJ6cZo0xpG8UkVInJhiHhnc+FdqYa122JifT0ujjVmGla6FRRLNmQsskJXN6rdZPNM9Q0pqOdPdlc7v+cHt42LqFErFQ76MFqcJYlGownwO8n6XsEOdzgND5j8xFDyhwH0kWj2pqauBy9S7udLvdqKurk30gQ539VVZ4eKFMbEJW/xNKMmmvUBhqFYiGdjs6HW7ouNBveAAJhQNKG7U5EYYyq3ZVwe7yYFRqDMb0U7IIeBf3Wkzg6GRHwm6kxain2TdafCaHKmTnPi3WHLIVPEfDzidkUhmqeAB4xaPmDgfsLrfqxzSU2S26DafnJYaUE5cvXmPLNLjGkvM2KyGqz+BuX5JjTLAYdeB5tjsZKkTwjzHpkRjd/yQzX8wdrLfZ0WHvIzhDJtV0ghkVclYhEbS6nR60a3BMQxUy9lkhivPkPq9Z2Vqr9Hs9cx5Jg4xdVoilQt6yNW3Eo0YZmZaAV2gqZ/O7kCgWnUejQhSPSKkTCThWm3KJYdkAkCOe66wRTv9YfVw6oYpHZCy0cJID3jmZFOcRAEzNSQQA7Cxvkf3eksWjU089FTfccAN27txJH9uxYwduvPFGLF26VPaBDHUOiKULk0NMtB+Z5p1Q2rrVdXqQrl9ZCVEwGUL/CIwiFz8mHklmU5HgOrpgVm5oi0jxolPe3Kn6zq+3Hl5a3TVxRRyts6l6PEMZ0q0oTcokTsOcGTnCYWK0kV4nWAaKNEjeUX9h2QQywdRip0qO45DjOOo6rGhmi8lQIKWhWSGKNQnRRrqgLNHg3ion9ybaZECsWYjEZKVroSPV2al1KYucsc9hIoIkWsVFZWJMaAtK4vBpaLfD6VbfzVkhjrmUTEsAGCc28TlQbVX9mIYaPM9T0TDUc4s4lEi4utqUN0sLy/Y9puq2bkUlTMMB8veJMelD3nwjhotaaze6HOpvvJLN4FAbYRDmjUoGAGwSq2HkIFk8evXVV5GZmYnZs2fDbDbDbDZj7ty5yMjIwMsvvyz7QIY6paJg019QNiHeYqRJ7Wor1VLDsglkYVPSqO7Fz+Phh7yj4aCYhTAtNzGk5+clR0Ov49DpcKs+eZdaD0+YmCUIn2xyETq+zqNQydXQeURbuaaEVqcPCAICcZ2x0jVp7K4Qdnam95N3RChIITZn9RducmztAKhT8nAtO+9DoUai+wTwCvNFGiwsfJ1HUkjXuJxuKEJEoFAdH75RAFqUB3ozj0I/58dmCCJCeXMnOh3MddYfLZ2CgygpOrQmMmmxZpgNOrg9vCbOcjrmEud3U8W56R5xw4MRHGu3C063cL6mhNg8iKz9iuvbNTnXy2Xc3xOjTbTK5SCb1/cJFYlDPM+F5xoRbxE2YbQQ46tllK0BwLyRKQCETsAej7zPomTxKC0tDatXr8bhw4fx4Ycf4sMPP8ShQ4ewevVqpKenyzqI4QAJwww1aAvwBrEVqyzWlDXLE4/IBLe0Ub2TwNrtxOJH12LKPWtoh5qhRpfDTZ0EE7LiQvoZo15HRQQ1d6N5nqe7kVIcCIC3De0+hSn9wwnSZYEE0IYCcYSpnT3gm3WWJ3HscxO9TjhGaNi6ndStMyU3NMcp2akqb+qEW+ZNPRjesZd23Z8kumXZ5DI0atu8O/+hMipNu5IGb9tuaed8psYZHUONbqcbHeLucmqI8zxSstjW5VQ9A6fd7qK75VKEhNRYM1JjTeB54GidNi6JoUSbmAGZGGIpi07HaVrOQjYHpbiLAWBqrrcLk9wF5XCBnKuxZkO/zY8IJGfGZnfB2q2+KFsho2wNAKblCeO+qahR9WMaSpBraagla4Cw8UpMF2UauMmr2+RtDBWmxcBs0KHT4aZ6gFQki0eEsWPHYvny5Vi+fDnGjh0r92WGDUQ8SpXgQCC1tGpPKMvFD/GI5NDdB4BXPKq1dquWzbDmQB0qmrvgcHvw7tZyVV5zoNHYbgfPAxajDskh7lIA3kWemrtTDe122F0ecJz0OtlZ+YJ7Ym9lq2o7ku9tLcfCh37Aql2VqrzeQIPsSiZL2K3wZh6pO7FsbHegy+kGx0nflaTBfyoKmS63B/sqh+5ElbgQEqKMIU84shOjYNRzcLg9qmcMERF6ZIjdYQhkUbH+WAMLzQ6B5g5hkhmqgAD43Ou1yLpqkec4oyWUrEw9JEgmho4DYk2GkH4myqSnop7aY08Wk0nRRlqCGCpEMN5eOjQ39NSE3OOlOBLIxq0WC0oSSyH1fB+dFosoox4dDrfqG9ZDjSZxPUfCz0Mh2mSgGXhq39t9xedQO2gTTpuYAQD4an+tqsc01GgVz3Mp4hHgk2OpslDs8fB0jilVPDLodRgvlqnK3RSULB5dffXVfX4xAkPC8VIlXGwKNdqNlOs8Sow2UfFDLTfM2sP13n8fqR+SnV2IcJgSYw45sBTw7XijXtlAUb0wbrlJ0vKuAGFHIyvBAqebx8bj8mtlCW1dTvz1k/2obOnC79/fMyQt8lYZuxVkMWHtdsGqYt4ZcZ5kxVtgNoS2W0YgjpgSFW+Ad3y0F+c+swF/XrVPtdccSMgpX9LrODrpV9PhCXhzlEh2XagsGJ2KjHgzGtsd+OYAm2D2B5lkhupEALwbM8UalK3JyboCvJ8TLTr/DUXIznR8lDHk5giA13VWovI8z9tCXto8D/B2hF13pEHVYxqKeMtZQj/ftVpQAj6bBBKv8wa9jrrLd4mNHhiBaRTXc1I2gwGfTroqdzcjAn96nFmyULxkfAaMeg7H6ttxvJ6JhsEgjSPiLNL+vpli1UFDu7rl343tdjjdPHQckCFho4pAGncdqgmTeNTS0tLjq76+Hj/88AM+/vhjtLa2yjqI4YA85xHJGFLbeSTP3ggA48R6+P0qlS7tKPOmvbd2OjXJ+og0coRDAEiLVz9zgmRqjA6xQ4QvHMfhjEmZAICPdih3Cn1zoBYuH9fJF3trcP2b2/GrlzZj/dGhMWmVY3WNMRvopETN0jW5mTeAN1jxsMwbjT+1bd1YtasKAPDetoohmaUkNTyXQMRDNf8m1m4nnfAWSFxUGPU6rJgzAgDw9pYy1Y5pqEIWk6FmoADejRy1S8TsLjdtAy+1XJHMP/ZXtQ3JTR21kXOtB7zistrXQCIaknIZKZwyXoig2FrSrEkHwKFEa6f8811uyUhfkPWC1Os8AMwU3eW+83JGb4jLJ9S8IwLpblatlatYxpgnRBlxYqEgFn+9v0bV4xpKdNqFkuQYieIccSS2qFyWXC1uTmZIDMYnTMwOs3i0atWqHl9ffPEFiouLsWLFCpxwwgmyDmKo0+Vwo1OshZdic0yPEy40jSoqlrZuJ5rED7FU5xHg7Rqkxs2lpq0LtdZu6HUcVUG/2FONp74/hnVH6vv56cFDUwexuEpTh8n419vUm1SSnYVCGeIRAFw6dwQ4Dvj6QC32VrYqOpYf/XY1/++jvVhzsA4bi5pw7ZvbcaR28Hd1I7Xt8VHSbjhEQNBCPJIjGk8Xz/tj9e2qdOXw/+x8ubcGR+tsmjgvIoUc5xEgOBcAqNr9hOxMpsnYmQSAS+bkgeOAzcXNdPNBCV/vr8WlL27G94fqAAD/21GJhQ/9gLs/3a/4tSMNKWNJkOBEIPlIrZ1OdDvVax5R3doNngeijHrJmxdzR6bAZNChrKkTR1iHzX6RKx6R+7zaO9O0XFGG82hUagzykqPgcHuY+6gfSDdkKY4Ecg9W41rqS4fdRZ3qIyU0xSDMzhe6MG1n4lGftNtFl6FF2rmeRtZ0NnWFBFLyOkpiSTrhrMnCpvCag3WqHdNQo0OsjIgxS3PtE1G5pVPdbnZyG2EQwu48CvgiOh1uv/12/Pvf/1bj5YYcpPREr+MkTdyJ+6Cl06Hazh+pb8yMtyBO4oUPAOaMFG4uPx5tUBzoSqyx4zPjsGR8GgDgsW+P4vFvj2Lla9vwzpahkYFEdvyl7lKQcPUGFcvWdoudNCbnhBbg68+4zDicPz0HAPDMD8cVHQvpQHfHGeN6PB5l1MPh8uCfXx5U9PoDAatPKYMUSMkiER7VgCwA5exIpsaa6a7WxuPKgxUP1fRcjP79i4M4/d/rsfTxH/H5nmrFrz8QaKG7k9JEY7L4VLNkUcnOJCBMUOaJ1/71x5QtJuut3bj13V3YVNyEG97agZ+ONeD//rcXlS1deHNTmSqfr0jSJsOJkBBlhFksI1bTgULDc5OiJJVMA0IY7OKxwn159T51yhWP19vw51X76Dne2G7HttLmIZF7Jls80sBhDICG9efK2CzgOA7nTM0GAHy8U7nLuN3uwmUvb8Et7+yEx8Pji73VWPbkT3jt5xLFrx1J7C437boVK0E8Iu5ftZ2GpDQ5OcYkSbwmkFzL4/XtqgS47yxvwWUvb6HzzqFCu0wXSpI4JmSDQS2U3t/nFwrdtw7X2lRv1DFUIA7MmBDz7AhkzFtVHnOysSxXPCIGguq2blkbVqqIRwBQVFQEl4vZWwNBdiZizQZJEzgiHjndvGrp/HsrhXIzkrAvlQWFqYi3GFBvs+NrhQFru8qF3Y0ZIxJx2sTMXt//1+pDdCI+mCE3Yan10WSnuEklu6Pd5abiIXGQyeG6k0YBANYdbZBtae90uOhEZ8WcPJw9NQsAcNupY/DN706CjgN+OtaoSevqcEJzMCQKtUkx6o494HULzshLlPXzp4vBip+pIO4crBGuQzeeXIgYk3cnx8MD93x2YEjkX5FrttTFJPmsWLvU+xuQ3DypeUe+EGv75mJleWffHKiFwy0Eb7s8PC5/ZWuPCevqQW6d9wboSuvKkknLl9QTEaiAIDHviLBsinBf/mqf8jHheR5/+HAv3tlSjt++uwvvbyvH4ofX4qLnN+Hezw8ofv1I06Zwo0BNhzHQUziUw/Jpgni0sagJdpcyN9zTPxzDhuON+GJvDd7aXIb/+2gvDtZYcd/nBwd1iVSH3ft3kbKoJBuD7XaXqk5DWrImo6oAEOaopKU8mZ/Lhed53PruLmw43oiLX9gEl9uDA9Vt2Hi8cdCXwZJ5b7REFwq5J6jpKga8zuKRqfIqCnKTomE26OBweahDXS4/H2/E/Ae+xwOrDw36cfaFdNKMlige0bI1lcUjsjaSKxgmRRupW1LOmEsWj26//fYeX7///e9xySWXYMWKFVixYoXkAxgOkEWE1KAti1FPF1ZqtXHdK2YVTc1NlPXzJoMOVy0YCUAQd5Tc+HaKzqOZI5IwPS8R9y2fhKUTMrD2jydjfGYc2u0ufLC9QvbrDxTkuk8So4SLjlo3mgPVVjjcHiTHmGSVLhHGZ8YhJzEKDpcHe2TuKB2utYHnhUlUaqwZT18yA5vuWoLfnzYWI1Kicco4IXPh3UHuPiPuEakCAnUdqnTelzZ2oKatGwYdR0vQpLJ8urCY+P5wvWJXDHEeLRqdihcun41zp2XjfzfOR35KNJo7HDQPaTDjXUxKu+4naFC2RkpAycJADmRXep/CvLsNorPoJNHVQrjrrPEAhJDewTrp5HmejpuUwGwAyBBLGmpVdB6VK8g5A4BTJ/iGqSorXTtca+txv7jzf/vohPzNTWWDvrMXEXvllrKoWbbG87w380hG2Rog3OdTY83ocrqxp0L+Oc/zPFb7iI/C5oB33vjGxlLZrx1piIgQZdRDLyEkPc5soA1L1HSWk9B1Oe5iwmQxC+WwwtiA0qZO+hl0uDz4/Qd7cPZTG/Crl7fgoa+PKHrtSEPGPdSuigQyp1fThcLzvI/zSN65rtdxtOOn0tDs/6w9jpq2brywvhjrjjbA4+Hx9pYyvLe1fFA7TDvtMsvWYojzSF3BkObXypzTcRzn0/UxDOLRrl27enzt3bsXAPDYY4/hiSeekHwAwwEbFY+k20iTRfdJs0rlK/vErBHSflkOv1lciMx4C6pau2TXyDpcHroImTFCWJRceWIBXr5yNkamxuCyE/IBAB9srxi0CwkCGf94ieIhsR23dTpV+RvsomJdouQSBl84jsOELCE4/bhMZxCpsyV1tzodRztRAMCvTxACej/aWanqzlw4sbvc6HYKDgvJzqNoct6rc8P5QexqOKcgWfLOCWFiVjxGp8fC4fLgGwWuQ1u3ky5qJ2TFY+GYVDx96QzMyk/Gr+cJ4/4/FQLZI43cMhYiNqlZtkautVNklqsC3nO1rKmTdh6RwwHR/fibxaPw+6VjkRZnxj3nTsTl8/Nh0utQ2dI1aB2HXU43yPxYShkLAKTGiee8iiLC4Vrhbz1G5gQz3mKkOSg7y1oVHcvPomhYkBJNS/TS48w4U2zCMNgFY5KJESs5E0P9xUVVaxfa7S4YdJxs5xHHcZguulTl5mIAwvWCOOB8uWBmLgAhP1Htko5w0U4XlNLOdY7jkBarfhcmcm2dkBkv+zXGii28lWZObvMTg33L0V/6qVj1dvXhRO64J9KyNfXO9YZ2O9rtLug4+ZsEgPcecUyBeNRud/VwJv/2nV244PmN+Muq/fjTx/vw0k/Fsl870sgtVaSCYZc66zgA8Hh4en4Wysy5ArxdH0tldFSVLB6tXbu2x9f333+P9957D9dffz0MBnmLkqFOOxGPZASVJot5GaRjlxLaurzdzCZny19ERJn0uGi2cOOXa2c/VGOFw+VBYrQxoMV2+fRsmA06HKtvH/T10jY7CVSUtogki06H20NFCCWQkrUpOYmKX2t0uigeybzReMWjuIDfXzw2HTmJUWjtdOJvn+wflAJiu0+pqdSFZHKMurXx28uEiZy/20MKHMfhF2Ipw2oFZSxkRzMrwULL8wjnTc+BXsdhZ3mrbFfbQMEqs2RRbedRnbUbVa1d4DhgkgLxKDnGhAwxn+WozADltk4n3Y2elJ2A25aOwba/LMVVC0Yi2mTAvFGCULH28OAM6SVlLBwHWAzygjWbVVxYkMXkRAX3e9KV5aDCTos/HRPEo8tOyMcHN8zHTScX4qPfnIgVc/MAAN8dqhuU13lCh8wFJXEk27pdquWNkIX7pJwEWIzSPoe+jMsUFpRyz3fA63aflpdIxSgAuGvZeEzMiofD5cEnu6rgciuf44Qb6kCRKBgC3liCRhWdR/urhb/1pBz54hHpqKxkzAFQJ+FcMSsPEASK2flJcHt4vL158LrKiXNOavMJUsKkpquYuM2E0jP55zpxsChxHu2rbKObJ3odh3a7i25aA8ArG0oGbaYSiVKINkn7G5PNQLeH7+G4VMKhWius3S7EmPQYmxF4DRUKBeF0Hi1ZsgStra29HrdarViyZInkAxgOyOnGQPCGpyrPvzgg3sTzkqN6LdqksmC0kH9BMpSkQvOO8gK7YOItRiybIuTgvLGxdFBPKm0yyxZjTHoYRCu0GjebsiZlHRl8ITcakqUiFVK2NDEr8CRHr+Nw7/JJ0HHAhzsq8ZXCfK1IQHenTNIs7YCPaKxWuSrJOlPgOAS8LZy3lbbIngQQETPQ2KfHW3Cq+B6/+M/PeG/r4J1gys1A8WYeqTPB3FQk7AROzk6Q1WnNlwJxp0puLsKBau89KJAji5SrfrSjEk0qd58KB3SCadRDJ/mcV7dUtaSxAw02Oww6LqhIHwpKu7IAggtza4mwmFwwOhXT8hLxf2eOx4iUaMwflYJokx51Vjv2VLYN2nu9V0iQV6YKeOeKStlWKsyv5hYkKXodsjA5Vid/QblX3ASYlpuAu84aj0VjUvHMr2YgNdaMFXME4fDezw9i4j3fYO0g67Ir14ECCE0oAG9DFaWUNHagsqULeh0nuyEKIDRFAYSyGKcCQY98Bm84aRR+s7gQo9Ji8K9fTsE1C4XYi3e3lit6/Ugid9y1CMxWGpZNIM4judUEALBHrGxZNiUT/zp/MlJjzVg2JRMb7jyFZuUqzdKKFB0yBUPfklabStnFZE43Z2QyjHr50dX5ycJnpiwcmUfr1q2Dw9H7g9/d3Y2ffvpJ8gEMB+SKB74/o8akYq8KpQsEMqGsau2SFWq9jYT3jgg+ufmVWMLyye5q/PPLQzKOcmAgt2yR4zg6sWztUn6zIa4zpTcZALRWtlzGRcfj4XHYr2wtEKdNzMBNJ48GAPx3c5mMo4wsZNyluo4Ar71ZDQHB1u3j9lB47k/IikecxYB2u4uWxEiFCAjBxv7ucyfSicxfP9k/aEuY5OZd0Q0DlcSjreIO8Amjkvt5Zv8QW7x88Uj4zEzKCvw5XDYlC3FmA47U2bD8mZ9Rr2L+TzhQspgk4lGzSguL78SS8vmFKbJLVQGvO/RQjVW2sLOrvBVdTjdSY00Yn9lTyLIY9VgiCsbn/ednrHxt26AsVSZlDVL/1ka9ju5mq+VI2CYKdbMLlJ3zI1ToCkY2LqbkJGDeqBS8dc082snt/Jk5yIwXMp8cLg/+umr/oMpFUXK+kw1cNeZ2gNCIAABOLEyR7Hb1JScxCjEmPZxungYxS6WiuRMljR3gOGB2fjL+dNZ4/PCHkzGnIBmnTcxAaqwZTR0OfLC9YlCNN0FuYHaCT2C2Wr83EWzGZcrfIACAkeKmcrmMEiZ6LFQoTsSKOSOw/a9L8eyvZyE3KZpe41/+qQQbjjUOunGnYy7x+s5xnKrreMDbtGT+qBRFr+PNPNKwbG3v3r003+jgwYP0//fu3Ytdu3bhlVdeQU5OjuQDGA54nUfSL+hqdt4pFhdh4xXUQxMSoozIEVsEHpFob3V7eJp/sGB08A//nIJk2sb9jY2lqncjCRdqOM+Udp3rsLvQKO7kj5DZicMX30ml1N2jsuZOdDjcMBl0/XZ/Om+GcE3ZUdYCh2tw7VK1y9yJBtR1n9S0CedNQpRRspDhj17H0TwFufbm7aXeLouByE2Kxprfn4QTC1Pg8vD4cu/g677lcHlk513Fq+g2BbyCjdwmCb6Q8N1AGSahHYtYVpEd+B6UmWDBxzediBHJ0ahq7cIbm0plvU+kILZ0JeKRWs4jWqo6Rn6pKiC4TA06DtZuF6rb5N2DNxwj9/vUgE7jm04ejSixvOrHow2DMkS5Q2agKqBuqWpLh4PmlszOV+Y8InO8Wmu3rLIyt4enpVTTAnT5jLcY8dGN8/G3cyYCEOYTOweRM0Gu2wxQb25HIG4E4t6UC8dxGEMcZzLv8Z/vFfKN5o9KoYIJwaDX0diLv6zaj0UPr1XkaowEcsed5N/wvHoulM3FwnV+3khlQjG5t7d0OmWJHDzPU/Eo0FzjdDHb7usDtbjslS2D796u4PpO1n5qzOncHh5bxM2B+YXKxCMSrF/ZIn0dF7J4NH36dMyYMQMcx2HJkiWYPn06/Zo1axb++c9/4u6775Z25MMEm12+AyFeRcWyVmwBTFoCK6WQli5Ju8Hsr2pDa6cTcWYDpvWzoLn5lNGYlpsAl4fHuiODMweDCH+yxCMSpqlwUknCCeMsBkW7UoS0WDPMBh3cHh41rdIWFDtF19mUnAQY+rFcFqbFIDHaCLvLM+gmGO3UeSRDNPYJTVZaxlEnujfIDq9SSNljkYySxZq2LhT77EgGg+M4ujv907HBd96T8iVA+u4kuea3212Kc0DcHh5HRIdYMMFGCiR8t6JFmfOor7KKMRlx+JPYee0zn5DVwYB3d1L6BNMbkq+OeLSvknRWVeY2NBv0tEz5sMxr8IbjXvEoEBOz4/Ht7SfREOX3tw2+LqudDhWEBBXEo+3i/bUwLQYpYmmUXFJjzTDphft8nYxsnuP17eh0uBFt0qMwLXBoe25SNK5ZOBJnizEFpNxpMCA3RBfwdmNUY8x5nscOcdznqeAwHaHQYfr5HmHD51wxI9GfG08uxKIxwrWgqrULf/rfXlnvEynouEt0oZgMOtpBW43StTprN0oaO6DjlLsMY8wGuoEhZ3PoUI0N1W3dMOl1mBLgnnPyuDTkJXvD+1/+qWRQlSh3KNgYijOTXDvl5/qB6jbYul2IsxgwSUGWISA0rLAYhet7tUR3acjiUUlJCYqKisDzPLZu3YqSkhL6VVVVBavViquvvlrywQ8HlJSteXehlX/o6tpUXkSKqmWJRGvr+qPCYvDE0Sn9igcAcIKorhLRYTDR7XTDIS4A5TjP1JpU1raJwqFKY6/TcXSCUdYsbfxJ9sXMEFrGcxxHyyyVdv8INyQoXWqXPeFnhHF3unnFYem14nmfHq9sIUEgZY9SRWMA+Gy3IAbMzk/qtSPpD5kE76tqG3RhqmSiYTLoJNekx/fIQFG2U9Vgs6Pb6YFex9HOGkqgZWsyxKNOh4uWIPYnZC0emwajnkNFc5csS3WkUOI8SlSx61a73esSmqiCaEiyb+S0727rcmKvWFqxMIh4BAgiwt3nToSOA4obO6joPVhol1nWAHiv92oICWT3vy9xPlR0Og5ZicKcoapF+oKSjPvknIR+c/9IyDNxJw4GlARmJ6h4vvt23BqTrqx8CQBd5Mu5zh+vt+FQjRUGHYezJmcGfE68xYi3rpmHjX9aAo4D9lS2DarKAiWOMxKarXRDGPCWL03MjlfsKgeAPAWbQ8/9WAQAWDI+PeDfJdpkwKc3L8R/fjUTeh2HqtbB01WV53mvs1TG9d1btqbceUTWwXMKkiVnqfrDcRzNPSqVGJod8qw2Pz8fBQUF8Hg8mD17NvLz8+lXVlYW9Hr5Ke9DHSVla2p+6GqJA0El55FcBwLpurIoRDv9LDEXaTB2XfMdNyU7kkrLl9Qee8C7OyUl98jl9mDNQaE2f/HY0OzVZMdSSZBfJKDOIxnjHu0Tsq1UOK63qSscjkqTH5b+qSgekXLEvhiZEoM4swHdTo+i9rGRoNMnLF0qamagkAl5aqxJ8UQD8C4qqlull7GsP9oADy+4l9L7+SzGmA20MxMRmwcD7QrGnZQ0qCEgNIjnfKzZIGve4c84Be27NxU1wcML84XsxL7bxidEGanY5d/qe6BDOu3JKlNW0YVCwk+JW0wppHRN6s40AOzyCcvuD7KLrrSrXzhRsqBU021GHEJZCVEwGeQH6BKUlCcT19FJY9OoUBKM7MQo2jiDlF8NdNweHl1iJptUVzHg3SRQw3lEnLwz+8iOlUKuTMfZz8cb8fmeaug44JYlo4M+LznGhLOnZtH8RVLOPNBxuD1wiRlNcsac3IPVWMcfp/EzykViQH7uUUhXmc8++wxOp5P+u68vRm/IB0aJA0HpArLb6aY3qQyVFpEjqfMo9IWdrdtJa9oXh9g2nIT8FjW0D7rcGyIcxpoNshZvxNqsdHeK7OKqNfaA14UgRTzaVNyElk4nkmNMIQf4kvLIokEmINgU7E75huwpFg7b1B17IhqXNHZIsh0fr7fhoLgjuWxyVr/P1/l0jSE72IMF4jySG1ScoJLjtF4sVU6PU2fsM+IstIylVqIzhHRMPHNS4N1of6bkJALwTpAHA500SFX+YrLL6VZ8nyNB42lx6rgNSYB9sYR7PYHkG/blOvKF5GUcHETjDgAdjoGReUTux+T+rBQi+MkJzd4s5vDMHdl/NgcREUoaO6goM9BREpjtbYai3piPUGnM5TpMeZ6neUfnTuv/Hg94c7n2DpLNYd+SdHnOI/Wyro6KebNK2rX7Qj4/lRJdhh/vrAIAXDJ3REid/kj58s/i9WGg02n3NnCQIxSrGT9DskbV2hzwikfSzvWQ/grnnXceamtrkZ6ejvPOOy/o8ziOg9s9+LpkaA25wSjrtqbsZkoWkFFGvSwRKxDEgVDe3AmX2xNSCdrm4ma4PDwKUqJDntxkJ1gQbzHA2i2UPfTVoWugoUQ4BNQsW1O3ZBHwcR5JuOiQDkBnTMoM6fMCCNkNwCB2Hskc+3iLEa2dTsUCAlnkZ6jkOhuRHA2DjkOX041aazeyEvp2ExC+EIOvF49No51m+mNqXgI2FTdhT2UbVsyRfchhp1NB9g0gjH1NW7fi875BDMlXS0TQ6TjkJEWhpLEDFc1dyE0K7Rpud7nxwyGhDfdZU0IUj3IHYRkLKVuTMe5xFgM4TghTbetyKhoztcedlDzKcSL83E/ekT9ERBhMDpQeZQ2KHMbKRZMKKh6Fdl3ujxyZ4tH+qjYUN3ZAr+MwN4Qw37Q4M9LjzKi32XG41opZKpTdaY0a5UtqNMUgJYUkk04pxHlU2dIFj4eHLsSNz4rmLhQ3dMCo53DaxNCu82RzeP8guc4Th6Fex8Esw+VFxl0N59GxOmFOrJZ45HWchT6n53me5lKS3LL+WFCYCuAINhc1hbx2jCRkDW8x6mSZANSsICIxMaOCZMhJhdzbNXEeeTwepKen038H+2LCUWBoy26znOBcdexudAEZbw7Y7UQOWfEWWIw6ON18yEr1xiJxF3JMaBNJQBAlx4sTysEWmuzNu5JXOpAQrU4pg9oCAuBVrKU4jw6JZQ9zCkK32RKFvaK5c1C1cKaisYyJJeATmq1wQUFcCBkqLSSNeh0VfqXknZEylFMnZIT8M1NF98n+qsExsSRQ55HMsVdLNPY6j9QZewDIFjNQSAh/KPx8vBE2uwsZ8WbMyAvt3CfZHXKC2SOFEgFBp+PotUKtcVdLPCKL0rYup6Qd86pWISBfx4XeGYaUrQ0m51G30wPSeVqJeKR03Ludbhq4npuojguFikcS3Qgvri8GAJwzNSvkPJYJdJ43OPINlQRmU+eRCiJCY7vwGmqd71mJFug4oWsoEaJDYVOxML+flpsYsqBGMi0PVFkHRft239JkOWsptaoJPD7uX/UcZ9Izjxra7ai32cFxoZfPTc5JQLzFAJvdhX2DYG5HswxlOsm9ZWvKx5yc62oZAQpSNM48YshHSat2tUpXtChb0uk4+sEL1c5OxJ9QFxCECZnyAzsjiZKxB9SzNtepLCAAPZ1HoZYvEcullJ2StFgz4iwGeHigdBCF56rhPAKUly5pkXdFFpOVIToRPB4eeyuEScL0AC2bgzEhS/icHK2zwT0IJpYEYm2X40ABvMKh4sDsdjEsXU3xKEF6BgrplHn6xMyQd7FJWXRzh0OVBVY4UDrJJCG6yrOu1BUNY8wGpMaKnXgkLCx+FjMtpuUlhtzlc3xmHDhO+B0aZHT4igTtPmVW0Ubp57x3o0DZuDeKC32TXkdfUymkbE2KWNzW5cSX+wSn6XWLRoX8c2MzxHzDQVKi3qGgkzIRETocbsltsv0hgmFyiI7e/jDqddRRLMWFskksQ5LSQnx0eixMBh1sdpekjchIocRtBviUrSk819u6nHROpNa4E0GiXsJ1l7if8pOjERXifEev4+hnhDhTBzLekmS54pE687mWTgcd85RYdcaczuVbQl/HASGWrT311FMhv+Ctt94a8nOHC1Yl3dZ8grZ4npftGqrTYAEJCPknh2ttKG7owJLx/T+fXGjGSQz7mjBInUdWlcQj1crWVBx/4j6x2V1o7XT2W4rU1G5Hc4cDHIegbXsDwXEcRqfHYld5K47Xt2N85uAoW7TZlbnOvOKR/BuO28PTBZgWJYuhLiQrWjphs7tgMujoAiEU8lNiYDbo0O30oKK5EwWpyjuGhYNOhZlHdOwHmAMF8M1ACT3ziFy3Z+YnhvwzMWYDMuLNqLPaUdLYgRkj1JksaUkHzTySJxomRBlRgS7F407OeTXHPS85Go3tDlQ0d4aUawF43YYnSlhMRpsMGJkSg+LGDhyqsSItLrRsxEhCxOJokz5kcdQXte7zZFc6JdakmsOczBlq2kI/37eWNMPt4TEqNSbkzwoAjBE3lY7VD45NQrKolNNtzberZluXE6mx8s/Vpg7hfFfyGv7kJUehqrULlS1dmF3Q//N5nscmsfvX/FGhn+9GvQ4TsuKxp6IV+6raBvw9noy5XFdxkkpla2TME6KMqoSkA6CNLFo7neh2umEJQQgnTRSkls4tHJ2Kbw7U4efjTbhlyRjpBxtGOhTGEMSpMJcHvNf3xGij5C6+wchKtIDjBPdsc4cDoa5WQvr0//vf/w7pxTiOY+KRHw6Xh4ZfxskoWyOig8Ptgd3lCelkDkSNBpk3ADAqlQRp9u8IaWy3o0mGeABg2JatkV0KJYsJl9tDdyTVHH+LUY/MeAtqrd0obuzArH7EI9IxKzcpKuQdCkJhmiAeFdUPHueRb1i6HNTYjW5st8PDCzs9KapOLKXVxhPRuDAtVlJ9u17HYUxGLPZXWXG41jbgJ5YEb/mSXOeRSoHZVERQ77yX2n2J53nqGB2XIU34HZkagzqrHaVNHZihUkcZLSGiodxzXrVyRRtxnKm4WZAUjV3lrZLcAaQkYZoYgh0qE7LjUdzYgYM1VpwUYmONSKIkOBlQb9yb2tUXEYh4ZOt2od3uCumzvb1MEA3nSRARAG8wO7lfDHTaFXRb0+uEphi2bpdy8chHNFSLnMRoAM2oDHGDqLHdgTqrUL4k9Vo9OVsQj/ZXt+HcadkyjjZ8dCgoVQR8yxXVE4rVIt5igMUobNbVW+0YkdJ/ORwReqWKRyQDb0dZC7ocbsF91u3st0NfJFA65nEqBWaTdVyaitd3s0GP9Dhhk66qtQsF8aHNz0N6VklJSUhfxcXFin6JoQiZ/HOcPPdJjEkI0fR9LTmQRV6uSrWxBNpxLYRciqPiAkKKvZEwLkOwsje2OwaNlR1Q5joD1KmLr7V2w8MDRr26AgIAjBFdJKG0cCbiEckykQLJPRpModntCiztgDpla8RxlhZrVqVVO4EGK4aYg0HGTU6HCDIpIZ1FBgPKnUfq5F1p4UCR2n2pwWaHrdsFHQcUpksT/0aKmxOh3F8GAu10hzKyIoIW4y7VbdjtdNNr/pQQWrX7QkOzB0nuEVlcRFo0JIsLNReUsWYDnb/Uhug+KhbPV6ntpMn9od5mV6UbldYoyTgD1BMSmlQuWwO85SyhXueJiDBCxvyeuNP2V7XB4+Hx2s8leOaHY4rL+bTAW7Ymb2OIOI+UlmITwTA1Rr1rPMdxNNqkzhbauU6dRxLP9ZGpMchKsMDh9mB7WTPu+ngvpv/9Wzyw+pC0gw4Dyp1HKsUQ2NTfHAC8G4JSuuwp8j3xPC+pRm44QlwDsWaDLDuzb4imkoUEacOXr7J4RNp2h5J5dERBW8kokx4jxXylweQ+8mYeyQzMjvLaHeWea2SnODcpWlUBAfBODo/U9j8mx8TxHyOhbIkwOm1wZSEA3swjuYHZ1Oqq4Lyv8wnKVxOadyXReTRGhnjk/YwNHvGoQ3HmkXLhkOe9JYtqZh7lJHmdR6Fck8hnJCshCmaDtL/HqFRyfxkc4pHSrCu1xSM1x52EqZaHmHN2vL4dbg+PpGijZMfrJDE0e09lq6SfixQdDmWLC7XL1tReXGSJ7qNQxaNS8XyV6hSNsxiRLb7X0XobWjoc2Hi8ccA2ylAqGqrhLPd4eJp5pOa45yRJW1Aer5d/jyc5iDvLWvHhjgrc9/lBPLrmKD7eWSn5tbRGidsM8I650hxTUrampmAIABmiW5XMHfuC53mfjm/Sxp3jOOo++u/mMnywXRjrF9YX08/zQIFc3+WaAGhgtl2dzYFUFe/rAJAjbgZLaYogSzx65ZVXMHnyZFgsFlgsFkyePBkvv/yynJeSzH/+8x8UFBTAYrFg3rx52Lp1a5/P//DDDzF+/HhYLBZMmTIFq1evDstxEqy0Vbs88QBQltTu9vDYUtxEdwDVSuUnkLK1OqudqrPBIM4BqXlHhPFZJDR78IhHZDIYarcRf8jPuT18j1BOKXhb96o79gAwTswfCiXI3CsgSB//QnFCUtzQPmiCkxU7j0jZmgIBQYugfMC7kGyw2UOa2B8XdyXlTCzJZ+zIYHIe2ZV1W1Mj88ja5YJD3LlV04FCFpKdDndIi13iVJFz76HO1kEiHnUoHXcVRASn24PmTnW7LwG+7btDE4xpqWJmnOT8nVn5SdDrOJQ1dUoK7I0USh0o3q66TkUdpxo1KFsDgEwSkh9CaLbbw9PNylEyyozJ/HBfZRtWvrYVv3p5C255Z5fk19Ea3zmZ8oYo8hfLvsHJSSqW/FDnUYgLyqN0c1D6/G58ZhzykqPQ5XTjzv/to49/tqda8mtpjfLAbOI8GnhlawCQLm401ln7r/CoaeuGze6CQcfRtaAUFowWylq/OVDX4/G1h+slv5aWeDumK3OSK2+Aon7ZGuDTUVNCExTJ4tHdd9+N2267Deeeey4+/PBDfPjhhzj33HPx+9//HnfffbfUl5PE+++/j9tvvx333HMPdu7ciWnTpuGMM85AfX3gD9rGjRtx6aWX4pprrsGuXbtw3nnn4bzzzsP+/fs1PU5fyCQwXqZ44PuzcsK2/vbpfqx4cTP9f7KboBYJ0UakiMp3fxP8o1ShliceTcgcXG1cAa9rRK54ZDHqYRbD8KTebNweHre+u4vejPNUHnvAxxVSZ+vXhXBMwc5UXlIUTHod7C6P5JbBkUL5DUe5gFCrkXiUEGWkjqr+FpM8z3t3JWW4zsaJ14uSxg7YXQNzB9ofpU4Er3Aof7JBcm+EHAN5xxEIi1FPO2+Fsitd3iQ8hwiOUhiZ5hWPBoPLWUmALqCOA6Wp3QFezDlLVnUx6d2dDGUsiBtVToODOIsRs8TclM/2VOOdLeV46OvDA9iBouxaT8bdwwPtDvnnvNd5pO6CMis+dOdRdWsXHG4PTHodLXGVwqx8Ydz//d1R7KkUMrO+O1Q34OIKOnzGSe4GUWKUME5KSvRIyZqawckAkJsonu8hOkyVuIs5jsMlc0b0enxnWStcA6x0TWlTBOo263Yq2ght7iAlquoKCWSuWB+C84hs6BWkxsj67C0oTO3x/waxMmJ7WYvk19ISuhEsI7cY8DWAyK8gAYBGm3h9j1P3+i7VZQjIEI+ee+45vPTSS3jggQewfPlyLF++HA888ABefPFFPPvss1JfThKPP/44rrvuOlx11VWYOHEinn/+eURHR+PVV18N+Pwnn3wSZ555Ju644w5MmDAB//jHPzBz5kw888wzmh6nL2ThFy/z5gLID9tq63Ti3a3l9P8vmJmrWkK7L2R3uKiPPBqe52nmkVzn0WQxN2FLcRN2lbfgtvd2YcOxgd3mkbhGlLTNlbugWHOgtsfOjZT2qaEyOj0WOk4Qtvpq79nS4aC7onJybwx6HXUfrT/WgL9/fhCv/VwyYF1IDpcQcA/IC8oHlInGBCLo5ocQfCgFjuNoflpFP2Us1W3d6HC4YdBxGJEsfSc6I96MhCgj3B7BIr3xeGNIlupIQpxHisvWFIgItF27ysIh4HUREYdBX5CyNTnOozyx1LbT4UZxYwf+76M9uOWdnbQ8bKBBnUcKM4+U7EoT0TA11iSrVD4YmQkW6DjA7vLQHdC+OCKzsyphxZw8AMAj3xzBn1ftw3PrivDCjwMzV7Odjru8891i1NPFlyIhQSPnUVZi6B3XSpuEe05ecpSsMvk5BckAeu/SbylpkvxaWkKOz2TQSS7HJcRHKS9hImOeonL5ktTznWwOyt0cvvLEAnqPWHliAeLMBnQ53XTTeaDQ4VAnMJvnld3fmzQSijOo86j/c51EUYyTOebp8ZYe7sR7zp0IANgudukcKCjNuSJreLeHR5eCDZAGja7vuVQ8Ct3lK1lJcDqdmD17dq/HZ82aBZdLuwmdw+HAjh07sHTpUvqYTqfD0qVLsWnTpoA/s2nTph7PB4Azzjgj6PMBwG63w2q19vhSAhEP5DpPAF8HgrS/776qNvC8cHP7/dKx+NNZ42UfQ1+MSuu/tKCiuQs2uwtGPYeCFHkdk+aPSkG0SY/qtm6c/+xGfLq7Gre+t0t2OVc48IqH8sdfbl38Dz7Wz0nZ8Th9YqbsYwiGxainuQZ9la6RiUVOYpTsm+5C0eL610/249WfS3Df5wfx/I9Fsl5La3yFXvmB2aJoLHOC0W53YXd5KwBv2Z+ajKAZKH3fcIhoXJgWK2t3iuM4TBWF44tf2IRfvbwFSx//cUCXs3idR5ELS9ci94ZQQMvJ+p/YlzXJyz8BhHsXcUze/v5ufLC9El/srcGH2wdeFgbgm3mkbGGhZFHhHXd1RUOTQUezi0LZoSTOI7mLyXOmZfVaEH+wvWJAOtA6FJYvAeq4zrQIzAa8paqhdFgk88CRMspYAEE8KhA3O2JMeiwWu+0NtMw7pZmGgHdup8hp2KFN+ZLJoKMulP7O96Z2O5pldlImxJoN+OTmBXjj6rn42zkTMTVPuOfvrmiV9XpaQYUEmdd4o15HPzMtCkKzaYc9FQOzAa/zKJSytSO18h3lhNuWjkFhWgz+ePpYLJuSBUBYLygNFFcTcq7LnctHm/RUSFdSutZo06ZsLTccZWuXX345nnvuuV6Pv/jii/j1r38t9eVCprGxEW63GxkZGT0ez8jIQG1tbcCfqa2tlfR8AHjggQeQkJBAv/Ly8hQdNxF8FJWtyXQekTa5p0/MwG1Lx6iaf+ALmSQU99ERZ1eFYEOclJ0g21prMepx3oycHo81dziw/miDrNcLB1aFmUe+Pyt1d+qA2Knm+ctm4ovfLlTV0uxLKKHZpBOHkpvM2VOz4R+d8d628gG5mCA3iBifm4ZUlDiPdle0Yvp9a1At7hSPljmh6wvaca0fEeewzG4cviwZnw7A28XM1u3Cyz8NTBcC4D3OmAiWLxEHihbXfSlB1sSJIHfTgHTiISUsAPDl3hpZr6UlHg8/QMZd/U5rhFyae9T3JLO100EXH3KdR2aDHg/8cgoSooxYOiEdBh2HqtYuuhExkFDaGAPwbY6hRDzSJjA7Xzx3Q2lY4RWP5LlddToO//n1TFw5Px/PXz4LJ48TxKOBFldAxlzughLwOd9VKFtTW0QAvI7l/rpdknMyL0l6pzVfkmNMWDw2DXodR0O0d1cMzBImuZuggBD3AShznDV2aCMUkw2CmhDyzci8Xq7zCAB+MT0H3//hZNyyZAxSYs10brGzfOCMu01h2RrHcbSkWU52MYFsDqh9bydla7ZuV8hzD0WB2ddeey2uvfZaTJkyBS+99BJ0Oh1uv/12+jUYueuuu9DW1ka/KioqFL0eLVtSMKmQ23mHlJEpObFDIRTn0ZYSwYZIbghyueP0cTh7ShZOn5iBMyYJwuCAFo+6lYuHchYULreHXtgnZSdIDiyVwriM/kOzya6hnHp4wvS8RDx+8TSsmJ2HT25eAJNeh4rmLhQNwDbeRDxSsphQ0nHrjY2lcIklfTNGJNJAPDUZIU4sS/spXSJBmlLbNvty/owcaqcuFK833x2qH5DCIeAVj5Q6j7qdHsk5T24Pj/VHG7CvShBzpXa6CoXRYuj94X4WdLZuJ13QjpBZOjlvZHKvx/ZXtw24ktVOHzu60pIGJeIRKTfQwnEWqr2d3Atyk6Jk5wABwOmTMrHrb6fh5Svn0LJrMpcYSCjNtwO8m4RyXWcOl4c6GdQWj0jeZFVrV7+fzVKFziNAmLPc94vJWDQmjWZmDbRGKTYV3GaJapzvbdptEpC1w9F+mlXQTroqOpyn5SYCGMDOIwXnOgk2VyIaNtK27eqKRySOoKq1q897rMfj02lNwdzOn5li5hlxzQ8EvA0R5Auj5DohN4bC4+GpUKz29T3aZECSKGjWhOg+kiwe7d+/HzNnzkRaWhqKiopQVFSE1NRUzJw5E/v378euXbuwa9cu7N69W+pL90lqair0ej3q6nqmstfV1SEzM3A5TmZmpqTnA4DZbEZ8fHyPLyV4A7PVyDyS9qFTUi4gBboL3dDeazHX6XChuKEdX+0TdooXi7tIckmKMeE/v56JF6+YjUvnCgF76482DMhFpMvtobsUypxH0rsz1Fq74XTzMOl1mggHvowL0kp9U1ETXlxfhJYOB3aJN4Kp4oRALufPyMVDF07F9LxEGqy5uXhgZSEA3pacSiaWZDHhcHkkB8VuEf8mv543Am9ePVfV7BMCmVgequl7Uk+dRwpE7MRoEz6/ZSHeve4EfP7bhTDqBRdCKJk7kYCWL8mcbPjuZku97r+yoRhXvLoVn4t5Z3K63/THFLGM8Gidrc/PJhmflBiT7A2UMyZn0vPo90vHIsqoFzKQ+sjYiwSd4rVex4E2OZCKGuKRN+dM/ft+bojBmuReoEQwJpBrF7neD7Q8DMB3syByZWvVrV3gecBi1Km+oEyINtJ5xP6qtj6fSz5/BTKdR/5MELvsVrZ0KXJlqY0agqFcV7kvpAMeyaVSEyIK9NdN95AK7mJ/po9IBCC4mpS4NdTG21FTvpBAyhXllq11OdxUhFA70zAz3gKjnoPTzfeZe1TZ0oUupxsmvQ75KnZyJgaDXQNINFTaVVH4WW9othxaOh1UzFPbbQb4hGaHKB5J/kusXbtW6o+ogslkwqxZs/D999/jvPPOAwB4PB58//33uOWWWwL+zPz58/H999/jd7/7HX3s22+/xfz588NwxAJqZN7I7bpEHAFyywVCZURKNHScECRXb7PTmtmX1hfj4W8Ow+kWPvBpcWacqGJo87yRKTAZdKhu60ZRQzvdDR8o+F4kwj2pJJP77ESLJsKBL2SBcKy+HS63Bwa9DtWtXbji1S1wunl8c6AOB0WBgSwA1GDeqGRsKm7ClpJmXHZCvmqvqwZqLCZiTAZwnBis2O0MuWNWa6eDlqv9edkERfbqvpiY7d2Nbmq3B+z64XR7UCRa2pUuJNPjLXSiNCMvCVtLm/FzUaPm4rgclAYn63Uc4swG2OwuWLucknaaVu/rWZatxgLen+wEC5JjTGjucOBwrS2oo5SIR0rGKD3Ogv9eMw97q9pw6Zw8/HSsAdvLWrCvqk0TYUwuNEjVZJDt9CTX+i6nGw6XR1apsZKuR/1BytbK+xFtDytsjhEIEqS8vXTglDMQrCqWrckVjypEN1huUrQmTuN5I5Px8a4q/HSsEQtGpwZ8jtPtQYU49xip0nU5MdqErAQLatq6cbTWhtkFvZ2IkaBdBXdxggqZRySHSotNwlCdRwfFiISJWco2231Jj7MgJzEKVa1d2FXeipPGKtt4VosOh/KytcRo6RvCvpCS9CijXlHmViD0Og7ZiVEoa+pEWVNn0I6JpNNaYXosDCo2Ypohioa7y1vh8fCar19CgWYeySxbA+Q3viIQB3dStFGTxlfjM+Oxv8qKfZV9bw4QtAlB0Yjbb78dL730Et544w0cOnQIN954Izo6OnDVVVcBAK644grcdddd9Pm33XYbvv76azz22GM4fPgw7r33Xmzfvj2o2KQFRB1W4jyR4zzqdrppcKacLjdSMBu8ocnkJtLpcOGhr73C0fS8RLxy5WzZXSkCEWXS05KGH48OvK5rZEIZbdIrOtnlTCpJO3sy2deSEcnRiDLq4XB5qGD5/rYKOvY7ylrg9vAoTIuR1bo3GPNGCkLk1pKmAec8o7uSCiaWOlFAAKSF5ZMxyIg3ayYcAcKkeayYYfVzUWD3197KVjjcHiT57FyrwYliePrG4wPPdQYoD04G5GVedTvd2FvZSv8/K8GiKGcsGBzHYYqYRbSvDycCyTtS2u1vWl4iLj8hHwa9jrqe9oY4yQkXHSpkYfguROUsKL8/5BXqtRh38pr9ORHIYnNcpnqLyel5idCLuUehBDeHEzWdR1IbowDAA18dwuWvbAXgdYepDVm89xUTUNHcCbeHR5RRjwwVA9uJAH5oAIVmtxN3sRIRQXSVt3TIDweubhWEhKwE9cedOIlq2rqDXo/cHp46DSeoKB4BwEJRpPz2YF0/zwwfapStJdKumvLGneTJZcSbNRGKiUt8X1Vr0OeQa/xYle8z4zLiEGXUw2Z39dnBO5y0q1C2Fi+zgojQqFGnNcIJo4Q59bYQnb2SV7Td3d145JFHsGzZMsyePRszZ87s8aUlK1aswKOPPoq7774b06dPx+7du/H111/TUOzy8nLU1HiDNE888US88847ePHFFzFt2jR89NFH+OSTTzB58mRNj9MX6jxSEpgtI/uECEdmg05RyVyokPrkPeLC5XCtDS4Pj5QYE4r+tQyf3LxAcclSIEgnjg+3V8Dl9qj++kogk0AlwiHg25Ej9BtNlYa7Uf7odBy9gZBJxMYiQczLTvBOIJdPy+n9wwqYMSIRJr0OdVb7gMs98gaoKjv35Jz7pRqWrfhz6gTh2vvDoZ6Tux+PNuD293fjqe+PAwDmF6aouoN0YqEwqdxc3ATPAMu+8Q1OVhIeGicjA6WypRMeXpjYbrjzFHz/h8WqCva+UPHIR6zy57BPpz2137e/8plwQxYVSsoZ9DqOjrtU8WhneQuue3M7AOHaqMWm0bjMOHCcMJElcwx/eJ6nHRbVzFuMMRswSXQ7bi8bWO4jNa73cp1Hu8pb8MKP3uYBWjjOAGDhGOGae7DGGnTsfcViNa/340VR4nA/JdLhRA3BMDVOEI+aOx2S5688z+Pn442026kW53u8xUjncMH+9qVNHehyumEx6lRzmxGWTRW6b32yq0qRO0tN1ChXTJQZmM3zPDYVNdHgerVL1ghkU/6nYz035Xmex77KNrTbXT7ikbruX4NeR53M648NDFMALVtT5DwiZWvK3GZaiUdkzIkBpD8ki0fXXHMNHn74YeTn5+Occ87BL37xix5fWnPLLbegrKwMdrsdW7Zswbx58+j31q1bh9dff73H8y+66CIcOXIEdrsd+/fvx7JlyzQ/Rl+8ZWvKVeoWCRbHBp9Udi3DkglT/XaDSQ7K5JwE2d2mQuGCmbmIsxhwuNaGp344rtn7yEGNsHRA3qSyVqxVzkjQ5ubizzifjmt2l5uGHL5+9VwsGpOKhaNTcf1Jo1R9T4tRjxPEMsjV+wZW9yUywVBy3gs/L33syWSyQKHbIxQWjSEiTjN1f9W0deHq17fh411V+FHcpT5jUvCcOTlMz0tElFGPpg4HdVoMFNod6pSryslFKG0Uxj4/JRq5SdGyy+ZCgTiASDA34dPdVXh8zRG0211UWCId09SA3GsOVFsHVGh2p0/ZmhLkighvbSqDhxcyCJ/51UxN7vvRJgPNOAwWZFvV2gWb3QWjnqPNNNRidr4wwd02wEKzyeJCjcYoUsfd15XBccCFs5R1CA5GaqyZioG+7sZNRU14e0sZup1u2nFX7XEn5VADqWTRprB9NyB0SNOJpenNEl0oT/9wHL9+eQsAwW2WqdFcb5q4kA8m2JLF5rjMeNXn+otGp2J0eixsdteAmOPxPE/PTyWbwqRsTcqaDgA+3lmFS1/ajD+v2gdAu83hJePTwXGCeORbsvjC+mKc+8wGLHvyJ+wQPw9qi0cAcNpEYVNy1a7KiFcVuH02A5Wc6yQkvblD2pgfr2/Hggd/wB0f7gWgnbM0LzkaOYlRtNFOf0j+S3zxxRdYvXo1FixYIPnghiNUQFBwoUkVuygQ21ooNGjYrjcQxFW0t7IVPM9T8UhtG6s/STEm/PO8ybjtvd14bt1xrJiTFxa3TSioEZYOCL8jADS1hz65qBdtrVp03AkE6YhyqNaGY3XtcLp5JEQZMSY9Fm9dM6+fn5bPuVOzsP5oAz7bU43fLhkdFqE0FNRo3Qx4g/Ekjb24Q6FFly1/ZuQlwajnUGvtRmVLF/KSo/Hxzqoei/oJWfE4a3KWqu9rMuhwyvg0rN5Xi//trFRVnFAKWVSY9LqQc6oCQTKkpIx9WXN4cu4ArwOIhGZbjHoUN7Tjd+/vBs8DO8pbaAnlFBXHZ2RqLGJMenQ43ChqaNdk8iqHDoUh6YTEaKMQDixBRPB4eHwvuv8eunCqpvfAuSNTUNTQgS3FTXSSz/M8rntzBw7XWnHO1GwAgttM7WyGuSOT8erPJfhRbJIxcK73yl0ocsWj/eLi/YKZuThvRraqOVP+jM+Kw5E6Gw7X2nDqhAx0Ody45o1t6HS4sau8FWQ0RqvoNASAk8akwaDjcKTOhuKGdoxS+fXlYFMhB0Wv45AcY6ZOvvQQS/3cHh4vrve6zZZNUff+6su8kcn4an8ttpQ04+ZThMe6nW789ZP9KG5op2Vz03LVvwfrdBzOn5GDR745gi/31tAmOZGi0+Gmi2uyuSMHuWVrb20u6/H/UzX4mwPAqLRYnDo+A98dqsPne6rxh9PHged5vPBjEQDvBiWgTabi8unZeHTNEeyvsuKD7RVYMSdy497hsxmo5N7uncuHvo4HgMfWHKGVJIC2DbBOLEzB+3WhRUFIvrPn5OQgLm5gTNYGOjzP09IlReKRuIho7XTCGaK1lYpHGlnc/JmUHQ+DjkNjuwOVLV10N4J0ytCS5dOyMW9kMpxuHu9sKev/B8KEVYUdCkDILQGEuvNQIc6zsIlHWd7OW8QJMjErXvPJ/RmTM2HS63C8vp0G+A0E6GJCYeYQEX+JIBQK4RSOo0x6jEnv2XVtg2g1vm/5JHx+y0J8cvOJsoJ/++Pi2cIO+ztbygdUBopaJYupRDTuCH2yUUs674TBcZiVYEFKjAluj3ez4JPd1SAbhT+LeVTjM+OQHKNedxC9jsOk7IGXe0QzjyLgPCpr7oS12wWzQYcZQcLL1eKEUYL7Z3OJd5L5w+F6fHeoDpUtXXheXGBoIWKcNDYVZoMO5c2dA8Zx6HJ76M60GoHZUjuKHawWzoHL5+dj0RhtQ4XH+gUorz/WQH/3j3dW4usDQlj/xGx1F7UJ0UacKObffLW/tp9nh4d2FTqqAt77dLBSwEAcq7eh3e6CxajDF79diD+ePk7RMfTFXDFbckdpMy2te3F9MT7aUYmd5a3U5U5EY7U5WxTGNhU3SV54qw25Jhv1HKIUbAwlxUi/xjvdHhyo7nm/mzlCvQY0/pw9VXCLrzsiuMeLGzt6OaXS48yaOGFSY8247dQxAIAHvjoc0S6L7T6bgUoiAEgHzCaJ+WY7/Bx/apeG+rJIQii95Bn9Y489hjvvvBNlZQNnkT5Q6XK64RAvtoosjlFGagcNdRc63M4ji1FPd/83FTfRrAuSUaAlHMdh5YkFAID3tlbA7pLW1lwr1CpbIwvBti4nDeLtjwbxhq5VTbQ/xFZe2dKFLcVCScHEMIx9vMWIk8cJFzzSmnwgoMZONAC6E0mcZKFAujJoVRvtz3hasmiD0+3BjnLhZrdgdCqm5CZolrmzeGwa5o5Mht3lwaNrjmjyHnIgGwZKx16O84iMfXq89mPPcRwtXSP5Q3sD5B8tHqf+gpaWzPWRtxRuvC2cwy8ekb//hKx4VTvfBIIEax6ottJj/O5Qfa/nzVaxsyYh2mSgOYdfDxgRwXtPVqNte5uEUpZ2u4ue84Uql4oFwvdaD/Q83z28976nxbzvrMnCYnagjLta93iyqGyUcJ3fI5aMzshLwuScBE02ZwjjM+OQEGVEh8ONA+Km8AfbKwAIzWA4DlgxOw9zCrQRMgpSYzA5Jx5uj9C5N5L4lqwp2RhNIEHpEpxHZU2dcLp56HUc/nj6WNxz7kTNnEcAMGuEsElwpE6Y1+0URYzMeAvIr35iYYpmG8TXLByJUWkxaO104os9kStZJNd3JSVrgFCiCkhzHrV1OlHvIyqb9DpNx3zh6NSQDR+SrzizZ89Gd3c3Ro0ahbi4OCQnJ/f4Ynghk36zQYcYBcGpOh2HlBhygwntg+ebeRQu5ov5Mx9sq0Cnww2zQReW8glAqJHNSrCgqcOBT3ZVheU9+4O04VTiOgOE3UwyKSUW4b7geT7szqPEaBMNVvx4VyUAddu29sW504Qdr8/31ES8PpqgRutmwGdXcgCXrBLX2eE6G0oaO+BweRBrNmi+mOE4Dn9ZNgEAsGpXVa9duUihdsmilEVFuMeelKPtrWyjYZoA8PulY6HXCbuzV8wvUP19yY7r2iMNA+ac9zqPlImlcsSjY3Wk25H2Tt+MeAtGpcaA573ZQ6RDC9no0HHeMH21OVsM0X1nSzn9m0cSIiKYDTpFi/iUGOk70xVi+UhStFHx9SYUiPOoqKEdTrcHRfVCxtHJPgLx2IxY5GkQ3nz6xAzoOKG7Y3lTZ/8/oDE0RDcSzqM6ITBZ61gIQFh/zCkQ1nZbSppQJ5ao6zhg21+W4tDfz8RDF07V1GVOyvK+3BfZDUIyp1daTZAsI4rieL13Q/6WJWNw1YKRmv7Nc5OiEGc2wOHyoLihA7tEwfIX07Px9+WTcMmcPPzl7Imavb9Br8NFYn7bV/sjLx4pLUeXM587Ko55TmIU3rpmLtb8/iRNO2gnx5jw4W9ODOm5ku90l156KaqqqvCvf/0LTz/9NP7973/3+GJ48W2tp/QkJy6CUG8w4V5EAMB8cUeSBOuNy4zTfBeUYNDrcPWCkQAES+1A6MDULE4CU1Qo2SCT8toQStdaOp1wuoXfP1zuE8A7kSFruXA4jwDg1AnpiDbpUd7c2as7RKRQI0wT8Ip/DRKcR+TcD9fYk3bch2ustHxpfGZcWPJIpuUl4txp2eB54IHVhweEkEDD0hVmndGdKglla95y5fA4Dsk5f7TOhuq2bjR1OGDQcbhh8Sh8d/tifHnrQk3yd04Zn0bP+Z3lAyNElwjGShcWcrJvSAZFODosAsA88V6/ubgJ3U43ban89rXzcOeZ4/HaVXORrVHu0rIpWShIiUZThwOvbCjR5D2koNZGAXEatnU54XCFFk9Axl0LsSYQuUlRiDHp4XTzKGnsoOO+8sQCLB6bBotRhxtPLtTkvVNizbTLJtmgiiRqZB4B8sQj8ncvTA/T+S52YtpS3EyD8sdmxCHGbFCU6xcqtHStqElS9qvaqBGWDXjndZ0Odw/nYl9Utggl6eE613U6jm4MHqxpo86jGSMScfn8Ajx4wVTN15enTUwHAGwpaUa3MzIVJe0qned0Dd9uD3meWiaK5CNTY7BoTJqmeUdSkbyy37hxIz788EPceeedWLlyJa688soeXwwvRFUmiqMSyESsKsRsj3BnHgHA7AIhPJcQLucJ4ZK5eYgzG1DU0IHvD/e20YebRjr+yscgSxz/6rb+x5/k4yRFGzW1M/vjuwsWbzFo1jLYn2iTASvmCDsUT35/bIAICOrkIaRLzDzqsLvQJd5kw+Y8EksZSps6sadCcJ6MD4MDgvB/Z4yDSa/DhuONA6KFNx17xZMN6buT4Xacjs0QzvFj9e20lGJcZhwsRj1GpsZoFmwbbTLgTLGM5ZUNJQPinFcj3xCQn3kEaNOuOxAk92hLSTNKGjvA88Jxj0yNwY0nF9LSMi0w6nW4Xcx4eXbdcUmLbi1Qq7NmYpQRpFlVqOUsVeKCUqsOPP5wHIex4vX+YLUVpU2C82h0eizeuHouDv/jLJw/I1ez979wlvDa/91cji5HZOMJyHVeSaki4J2jSxFFisSudoVhCg6fJ57vW0ubaelaOJtU5KfEYEpOAjw88M2ByJUttnUJ5yXpliaXGLOB5mHWWUOb25HnZYUpigLwruG2lrTQnDMtc5b8KUyLRXqcGQ6Xp1f2T7igZWsKnUeZCRboOMDh8oRcSUDGXKtOikqQvLIcP348uroGTjjpQEZN5wmZHBD1uT8i4TyKNhlw6nivVV3LCWQg4ixG/PqEfADAnf/bi7e3lIWcEaQmN7+9E794ZgOdWKkhHpIbRk0IZWveTmvhveCcMt473ieNTQub6wwAblxcCLNBhx1lLfhhAAiHarRuBryZVfUSHYfRJj1iFE5qQyU9zoykaCPcHh5f7BVs5aT7XjjIS47GBbNyAAAv/1Tcz7O1x6paFoa0Gnmn20PvOeG67uenxMCo59DpcNMgW9J5U2uumF8AHQes3leLR76JfOaVN+Mu/JlHFWEXj0juURt2lbcCEDJ3wtX97NypWZiSk4BupwefRTjrTq3sG53YeQsIXUioEzcVMsK4oCSbBd8eqoPTzcNi1CE7ITzi1dlTs5CbFIXGdjvejmBzFJ7naQkTCT+Wi1TnkcfD0wYR4TrfJ2bFI9ZsgK3bRe/xZOMgXJBy1dX7IlfC5M2TVD6nJ7mEoYpHteK8PpxCAqkeeHdrOTy8UD4VrhxVQBCrF4hB+T8fj0xVgVpuM6NehyzxOlnRHNo6nlSahKNzslQkr+4efPBB/OEPf8C6devQ1NQEq9Xa44vhpVEsN1DDeZIjwXnkm3kTTvEIAH5/2likxZmRnxKNk8IsHgHA1QsKYNLr0NzhwF9W7cfVr28Lawlbt9ONL/fVYE9lG47XC9ZiNcqHshJJx7VQnEeieBSG0FxfZo5IwqIxqRiZGqNp949ApMdbsHJBAQChO4MrxK6EWqFeYLY0e3ODT6lsuOA4jnZWIp+9cGSv+EJKVtccrENpY0dY39sfIuAkKdw0IKJzh8Md0i47cSgZdBxtBaw1Rr0Oo1KFRQQJrNcy0NGX6XmJuP/8KQCAF9YXR7zjHhWPwuw8cro9dFETji57gCBW5KdEw8ODZgyGywUBCNcc4kL5dHdkMw69XbeUn3NS3YZEcAjnRhHJPfpKXMSPSo2FThce0dCo1+Gmk0cDAN7YVBqxeAJrt4u2bE9S6EJJ8ylnCYXGDjtcHh46LnyZlga9DrPEAPziBq/bLJycOUlwmm4ubpYUKq8mam7KE8E31GYopJNqOIXiiVk97+UzNWiC0B9EPPrmQG1EHMbenCs1TSChZbYRYTEjzGu5UJAsHp155pnYtGkTTj31VKSnpyMpKQlJSUlITExEUlL4P1gDFbeHp6qhGhd48qEra+p/YWTtdtGa+XAuIgGhZGHrn0/FD384OWzOB1/S4y14/eo5WD4tG3odh83FzdgqhnmGg0Bhl2rsUpCdveoQMo9IiVO4hUOO4/DWNfPwwx8WR6Q296aTRyMp2ojj9e3426f7IyYi2F1u2r5Y6W5FjNmAaDGAtz6EHarGCDgOgZ5OI4OOC6vzCADGZMTh5HFp4HnQVuGRgoq3Cscg1mygZaehOBF8s67CtZgDgDF+O9BTwljOcOncEZhTkCR24olsFyZatqZQREgUJ6mtIZYuEbFSr+MUL2SlMFlsx07ur4VhXkyeMzULeh2HvZVtNAMmEqi1UQB4r9uhuhEaVLrWSIFsFBDdJtwiwvkzchBnMaCiuSuscztfWsRzLsakV5z5Q5wcdSHM7QCv+zw9zhJWdzcpXSOMC/M9viA1BuMy4uD28Pj+cGS6rqkZB0JEoNCdR2LZWhidR2MyYmmnbwCYkZcYtvcmnDEpA9EmPYoaOrC5OPznO9nESYxWvjlA8qqIU7g/vOLREHAerV27FmvXrsUPP/zQ42vt2rV48skntTjGQcdPxxow5i+r8eYmwVarRjr6WJ8Wqf25KsgFLt4SnjA7fziO63HBCTcnFqbiqUtn4MKZws7k/3aEL1wxkPVYjYV8brIgHh2useK+zw9ga0nwi2gkdiN9CVfpgj8JUUaahfHu1gqc/sR6bCpqCusxbDzeiNd/LgUAmAw6xeIR4Jt7FIKA0K7e5EYKM0Yk0n9PzU2IiHB88ynCjvT72ysi2nmtQSXxluM4pErowNTQHhnRmDgRAGGCRcpawsUZ4o50pMQjshvqdR4p++yTsoTq1u6QdlrJ9T45xhRW0XBSTs/FYzidR4Dg6D5pjLArvWpn5NxHxCWkNAcF8NkkCqE8HfC6FsJ5zo/L6Hl+TwpTYwxClEmP0ycK5/zX+8N/zh+utdIKgGRV8kyF891md9Ecpb6oIaUsYc5BIaHZgBDFkR2BHJbTJwmxGGsORFY8SlXhfPOWrdnhcHn6vNbzPI868VwPp5BgMep7lEb6C4jhIM5ixC+mC7EE/w1jqWprpwOHaqw050qNuXxeEhGPQnNJR2LMQ0WyeLR48eIeXzNnzsSRI0dwxx134LbbbtPiGAcd9395CL5uWjXCDAtSYhBt0sPu8tAsnWBEynky0LhwtiAerd5XE7ak/ka/BX5anBnRJuUL6dHixLzeZsdrP5fi4hc2BS1liUTe1UDhsnkj8J9fzcSUnAQ4XB489PXhsL13l8ONX728BQ98JbxnVoJFFSGNhOWHknfmndyEz4EAAKdNzKBZL5fMHRHW9ybMKUjGOVOzwPPAfZ8dDLvF+cb/7sBZT/6EEtHxpsb5lyKhy2akzvulPi3ZT5uQEdbdcMArHm0taQ57eLLHw+OKV7filEfXUUeAUucRmS+0213UMt8XkShVBYDZ+T0XEuFqkODLhWIr57c2l4W08FaTg9VWfHewzhtqqsIEX2pZAxn7cJaop8Sae7iswuk0JJCw/HCXsvx4tAFnPvETrnh1KwAgWQXBMNpkoAvTmhDcR6R8iYhO4WJKTiL99/is8HRT9YeIhj8ebYhI9y2ytlLjWpshbu5+uKMCM//xLW5+Z2fQ57Z0ejswhltI+NNZ48Fxwn1+Unb4z3UAuOwEYU75zf7asN3jV762DWc9+RO+PyRkqKrjPBKu7wdq2rDmQG2f5Zdujzd+ZkgEZhPWr1+PK6+8EllZWXj00UexZMkSbN68Wc1jG7SY/dw+OSqIR3odR3d0d5a39hmiOpzFA19m5ychJzEKHQ431h9tCMt7+tetqxGWDghjGefn5thVEbj7wHAef47jcPbULLx21RwY9Rx2V7TSxbzW+Fvo1Qq5GymWAJY09l+a0UidR+G92USbDHj3+hPw5tVzcfHsvLC+ty9/XjYBFqMOW0ub8cXe8AVrdthd+Gp/LQ7VWOlukRrOP5J1V9zQjt0VrX1OmCPRYRMQQjXvOGMcLpqVi7uWTQjrewOCFXx6XiI8PPDmptKwvve3h+rw07FGlDR2wCE6gpVmXVmMenrtrghBRIhUqep0nxKGhCgj8lPCE97ry5mTMzEqLQZtXU68sbE0rO9949s7cO2b2/HetgoA6uRSkLliVWsXtpU299ll0+HyBuSH22X8K3GDINqkj0gOyqIxqYg26VHT1h3WDpukIYOb5B2pNL/Lom7D/jeIqPMoPjwh5QSTQYc7zxyPRWNS8dAFU8P63oTJOfHITrCgy+nGT8fCG6AsBJULf3tyX1YCEXxt3S60211YvS+4EEqyTlNiTGHtoAwImzPr7zgFz/xqRljf15dJ2QmYlB0Pl4fHxiLtx93j4bFb7B5LHP+qOI9EF9f+Kiuuf2sHLnkpuGbS2G6HW8w2C/fGUChI+hTW1tbiwQcfxJgxY3DRRRchPj4edrsdn3zyCR588EHMmTNHq+McVPgLO3kqlK0B3uT7//toL0588IegdZNe8WDgqZXhhOM4anP9Jkw2V+9kzoycxCjcftpYVV6X47hei4NgtdKRKl0aSKTGmjGnQNgZD5dw6F8qpVZtOhGP3thYhoe/PhyagBAB4XBSdkJEQvJ9yU6Mwm8WFwIAHltzBM4wBaf7u0F1nDo7w2Sn6oGvDuO8//yMuz7eF/S5kRz7m08ZjUcumoZklRZTUrlu0SgAwHPrirC3sjVs73tMbF9MiDHpVdkwIKUCpU0hiEcqdgCSgsmgw/3nT4bZoMPNpxRGxImg13G47dQxAICXN5SEzX3kcntQ5jc2ajgCyOJiY1ETLnp+E657Y3vQ55KNgnAG5BN+e+oY3HxKIT6+6cSIRCNYjHqcPUXovvXe1oqwvW+s3waeWhtExHH2+/d3Y9p9a/rM8CLiUTizbwg3nlyIt66Zp0oUhxyEOX34y5Tf2FiKXz63EQ63BwYdp8rfPtD1Ilh5eqRbtuclR0fkPPeFdPjsK7JDLcoDrK3VEI9G+WXBHqqxUkeZP2TM0+LMEY2BCUbI4tG5556LcePGYe/evXjiiSdQXV2Np59+WstjG5TwPN/DevqP8yarphT7WgbtLg/WBVkUM/HAC+nQ8P3hurAsJElHrHOmZuPnPy2hNzo1+Nu5E+E7P68L0qUhUjvRA43FopDxY5jEIxKcSphVoE59OMkSabe78Oy6Ivz726NBn+sNTY7MIn4gcN2iUUiOMaG0qTNsWSiljT0nG2Mz4lQpV83za8W8po8Jc6Q6bA4Elk3JxFmTM+Hy8Pjde7thd4WnpKHFz3Y+IkWddvWkDfa6I/W45Z2dtLNVIBojeL//9bx8HLjvDFx/UmHY35twztRsjEqLQWunE29tDk8mRqBFnhrn3Vi/PKE9lW046idQEnzF4nBmXQGCiHLHGePD3hjBF1Ie/cXe6rB13/IvK/O/PsuF3ONbOp1o63LipfXFfRyD4ELJCnPZ2kCBlCmvOVAbdOGtJtWtXbjnswPUiZKbFKVKafboABlxwQwBtW1i+dIAzL4JF2QzeFsYQvIDVSuoIdylxJp7lb/VBilVraUOw4E55iGfAV999RWuueYa3HfffTj77LOh10dWhRyodDnd1NK6557TcfkJ+aq99ukTM3pMUHaVs7Kl/phdkIyUGBNaO51hUaw7RfEoxqz++XHKuHSs/cPJWHliAYDAzqNupxtWUcQY7uO/eJwgHm0sagxLfbzvrvfyadlYoVL5ln9ZwIbjwW27xIUwnMc+xmzAtYtGAgBW7QqPeOSfUXLK+HRVXrcgpedOlcPtofcXf4bzdZ/jODzwyylIjzOjuLED728LjxuhxU9EyFFpQUcW5R/vrMIXe2vwyDdHgj63MUKZR4RwZ1z5o9dx1G34SZjO90DttUeq0GE0IcrYaxwP1wYWj9Tq6jhYmTkiEeMy4mB3efDJ7nCNu7biEaG2jw5ckXQeDQTmjkxGWpwZ1m4XNhzXfmPQf90wIkWdTsKBSh4rguRakpyrjGE65gAwp0CYBx+ta+9131UbYgLwRY1SRUDY3PTV+oOVptfZBm5YNiBBPNqwYQNsNhtmzZqFefPm4ZlnnkFjY3hrTgcD5EPHcaABsmqREmvGmt+dhJtOFiZK/rZpwnBeRPij13E00PXzPdWav1+HGGKthusgEAWpMTRbIpB4RBYSJoNO9c/fYGNcRhwy4s3odnqwIwy5CO2iaPfXsyfgqUtnqOY4TIgy4gSfLhfBHGc8z/do1z6cOWdKNgAhh0rriQbgdZ2dMzUL71w7j5bSKGWOn3vN6eaDLiyG+9gnRpvovfHtzeVhCdJt6fR+tjLizbh8foEqr+vfwaq4sSPo7xOpkPyBxBmTMmHUczha146yfhqKqIF/FlGs2aBah8nTJvYUnoMFxA73xigcx2HFHGGD5ou92s/tAO/8jjBCJfFoQlbP8z1YTqPHw9N5X1ZCeDOPBgp6HUdLFlfv0750rbWz5/xh1gj1Mr5euHwWTixMwdRcoaqkOUiWba2KofyDlZRYMwrTBOFO65yzTkdv8ShOYSMMws2njMa+e8+glRHBmiPUiSLxoBePTjjhBLz00kuoqanBDTfcgPfeew/Z2dnweDz49ttvYbMF3h0ZbnTYhZtLjMmgSQZAUowJJ48TJhfBQrOZeNSTX84U2jx+tqc64EVBTcjra+E8IpBxbbT1XhT7huZGIoNiIMFxHBaNES7Q4cg9IgKCfy6CGjx/2Sy8dc1cAIJAGKgsx9rloqG9w/3cH5ESjfGZcXB7ePxwuF7z9yObBvkp0ThxdKpq+QBRJj2uXTiyR1vkqiC7k+y6D5w/IxcmvQ5H6mw4Xt9/wLxSmsVymZeumI0tf15KJ4RKmZaX2CvnwL9EjhBp59FAICHKiBl5wqLu5+NNmr8f+Zvnp0Rj/qgUPK1imOy9yyfh7nMmYuaIxB7v5Q/LtvS2bt9R1hKWTQIyvxuVGoMr5+djqkqd5ibnxFNHOeDNzvSnscMOp1sI0R3O13myIbypqEnzTYLWLu9116DjsGyKelEUZ0zKxDvXnYDJ4ufI9718qbUO3K5b4YRspm3XuHSNrOMJarhKfYkxG+j9urkj2JhHNueqPyRvjcfExODqq6/Ghg0bsG/fPvzhD3/Agw8+iPT0dCxfvlyLYxxUdGhYtkRIEfNMmtqD3GBY5lEP5o5MRl5yFDodbqw9rK2IQC46WjmPAG972JbOPsSjYTyx8GXRmFQA4ck9sonnvlo7FL4kRpuwcHQqzKKbKVCdNMm8ibMYIh5uOBAgeWNrDmq/O0nEo1iz+mP/13MmYuNdp2KGuJgMdN7bup10V1yNrk+DlYRoI+YXCsGa34dBNCQL1uQYdcfdqNfh/84Yh9Hp3nKWYHkY3sDs4TvuAHDiaGHcfw5DNx6yUTAtNxHvXn8CThmnTpkqAJgNely9cCROFRfIwZ1Hw7tsDQByk4RNAg8PrDuq7fnucHngdAtCxaqbF+C+X0xWLWuK4zjcu3wSdv7tNADC5ytQRie576fFmWGMcLloJJmZnwiDjkNVaxcqg2ymqEWbKOhcOncE1vz+JIzxyyVTAxJ43xpkg6BugOffhAsiHvl3NlabLjHmIi85CmdMysBfNOgiS7KP/J1tBOIwHPTOo0CMGzcODz/8MCorK/Huu++qdUyDmnYqHmknHpBJos3u6pXl4nB5aJgjExAEOI7D2WIZi9b2Zuo8Mmm3eE+k4lHvG81wDs0NxKIxaeA4ITfCP69AbajzSKNyQY7jkC3WXQcUjyLUqn2gcvpEYfH149EGzTOv2qnrTLvzPkk87wNNNsjnISHKqKlwPRgg7p/Nxdo7UMjCQo1OLP7csLgQ392+GNPEkoZAIoJvu/aBOskMFwtGCxsFm4qa4AmSC6YWnaJQq6nDWLyOBxWPRDdC+jAWiwHg1AmCcPfdIW3Foy6fkrVojeZ3iVFGmocSyEnlzTsaniVrhGiTAdPyEgEAmzS+zpMw9oKUaIwKEHKtBn3d2wFvSPpAdaGEi7kjBfFoX2Vbj/NRbYgJ5LQJmXjh8tlYKs4l1SQpuh/BkIpHA/P6rop0rdfrcd555+Gzzz5T4+UGNR127UpXCPEWA4x64Q7j3/Wjpq0LPA+YDbph3XHJn3OmCjXSPxyu11REaCfOIw3HPynGq1j7W3br2oZ3DoI/yTEmTBEtwT8d03ZHut0u3ATiNMyaIjecQMIhLV9hYw9AyI0hmVfbS7WtkafOIw3Hvq/dyRq2M0khE8wdZS1hEBG03yyKF8fdZg++WWDUc/TaMFyZlpuIaJMezR2OoCHTakHEoyijhpuEYoZVU0eQsjXmMAcA6tBaf6RB0466HeK5btLrNHP96HQcFRKaAwgJpGQ5e5h2WvOF5EBqvUnQquEGASGBCAkByta6HN4mOMNdPMpNikJmvAUuD49dFdrN6cKxOZDYRwUJ4M02HahzuuHre9QI6jzScPeX4zikxAgTBv/cI3JzyUmKGvaZN75Myo7HzBGJsLs8+Nun+zWrkyaLiXA4EFwenpZKEYiFNzdpeO9M+UKcCN/00eZcDYjzKE7DhWRiHztUzHnUE9/Mq5+OaVu2aNOwbI3Q1wRzoNfHh5PxmXGINulh63bhaL12IoJvGYuWbi8iRlu7euf1kd3J9DjLsL/fmww6Khxu1Lh0LRzZhmSx2hYkB6WBjP0AXVyEi+m5iUiJMcFmd9F26lpAxjxawzEHvF24AuUekc5MeUnqBHUPZuaNFMpUtxRrW8JE5lr+LdbVhGwMBdoUJPf2aJNe07nlYIDjOMwRr/HbSrQTj4gJJErTCpLgm4HdTje97g/UDntMPFKZcOxAA95dKeI2aLe78M6WcuwSb55qtRUcKnAch/uWT4ZRz+GbA3WatfAOR+aRxahHlJhp0yqGra07Uo93tpSjXMzFyGWTC8rZouts3ZEGzVxnPM97S5e0dJ/0ISAwa3NvSObVeo1dZ+FwnIZStjZc2zf7YtDraD6Ulo6zcJSxAEC8mKFm6+59ztcPcGt7uFlQKJzvPx/X+nzX/j5PxaMAiwuX20NbOQ/3c16n42jOmZbj7tsMR0topmWAIF22OehlVn4S9DT3KHAenBoQ10+8BlmWBCIYtvVxb8+MZxsEADCnQGiMsL1MO9Gw06n9uU7nc13BxzzKOHAFQyYeqUw4FhEAqPOosV0oXbr69W3486p9eOSbIwCAPJVaiA4lpuQm4JZThBba72wp1+Q9vJlHGk8wYrzW5i3FTbhKHH/SwpJNLryMzxRcZw63B39etQ8uDaztdpcHLrFERovAbEJiVHCra1Wr6DpkwjFl0Zg06DjgUI0VxzV0obRr2GmP0NdOFS1bG+YLScLsfG/pmlaEo4wF8HEedQdyHgkCwnDPOyKQ0OytJc2aljB1ObV3HnnLFV29yi/rbHa4PTyMeo45TeHNu9qoYac9cr5rKRQD3vM9kFhMQvNz2fweMWYDjSTQ0n3UGYYcW1qSzjYF+2XmCEE82l3RqllZOhlzLc/1hD7cZnU+TvKBKhgy8UhlSOaNlpMKoGfHtQPVVmwt6XnxnJAVr+n7D1YumZsHHQdsL2tBeZO6uxUeD09rZbW2NifS7BsHPtpRCf8qvJEp6raWHOz89ZyJMBl0+O5QPf7vf3tVL1u0ihM9jgOiNex0RnJNyG50RXMnVr62FT8crutRssoQSI4x0UyM37+/h46T2oTDcZrQR+YRnWwwEQEAMFvcndymYVeWcJWxkB1va4CFxUDvyBJuJmTGIznGhA6HG3s0LGEiLpQoDa/15HzneW9JNIFc67MSolTr+DWYIY6zXRUt9LxUm84w5FkCvuKR9/eos3bD4fLQcWdlawLzxNyjLSVaiobar+lISXpblxNuURBxuYVmCBXNbMx98S1LP97Qrsl7kDHX0llK3GaBsmsr6fV94N7XmXikMh1hUKkBb65JY7s9YJbLRCYeBSQj3kJ3qdQuXevy6eiktfMoiVqbHVjn14Z+THosvTAxBGaOSMIzl86AQcfh451V+Gq/uvlHvs4TLSfzvqIhAPzzy4NYd6QBV7++HQdrrACY88ifP501HskxJuyrasNDXx1W/fU9Ht4rHoWjbM1HRGiw2VFv62bOIz9mjEiCjhMmYXUalarSjQINBQSg92KyrcuJuz7eh3VH6qnzaLh33CLodBzmjxLcRxs0LGEKR1C62aCHxShM0Un+xZ9X7cPiR9bS341d6wXykqOQkxgFp5vvtZGqFh1h6KQLeB1nZKNjS3ET5j/wPa5+fRvN1mPOcoF5I4l4FIZNAg3n9MRRLgjFTthdblz60mbM+9d3dH2Xl8zGHBDK0qeKHUh3auQs7gqDCYBsBDvdXtPBuiP1uPvT/ThcK8zl81MGrmDIxCOVoWVrGosHXueRnXaROntKFjhOcB1NzmHiUTDOn5EDAPj3d0cx71/fYenjP+JgtVXx65LJBceBTvq0gohDeyvb0GCzw6jn8I/zJiM11oTrTxql6XsPVk6flImbTi4EANz72QFVXSjhCMsGfAOznXC5PVh7xCscOt08OA4YmcpcZ74UpsXimV/NAAB8sL0iYIaIEjp9RGMtO+15y9YE4ZAsKk55ZB0OicIhE48EYs0G6r7VKveoI0xOBP/F5AfbKvDu1nKsfG0b7TiTEcfGnUAaJHyt8gaBL1Q41FhI8A3Nbmq3450t5Shr6sRT3x8DABSwaz0AIdNygViyuLFIGxdKZxjcCEBvsfipH47Bw3vF0LQ4MywaC9aDhdkFydBxQFlTJ82JURPfpghabgibDDoqSrZ2OvH9oXpsK22B083TTUEWReJlhli6plVZekcY4keijHqYxHL3lk6HWEWwDW9uKsNLP5UAGNhjzsQjlbGFyXlEMo+qW7uxv6oNgLDDfujvZ2L1rQthNrCbSzDOnJyJdLGdeZ3Vjv9v787joir3P4B/zgwwLAMM+yKbgIoKIm6IC5may1WzMivzanbNruWSVzOvv9tVs0Xbb5bZclPrVlrdzLpmuKOpuKG4i2IoKigqssv+/P6YmQMjOzKDM3zerxev5Jwzc8708Mw553ue5/tNyczH9G8P3/VUpqoJFY09T1Uftd6RnAkA6OTrjAm9A3Ho5Qcwtoe/Ufdtzp6/PxRt3R2QmVeMN3493WzT1/QjT4yZ7wgwzHuTfC0PJWWGeT2C3R2M/t1jjvqEuCPM2xGl5QKbTxln1JmVQoLKyninVP3TSf20tfVJ6SirEPIQa0kCAl15M6nXM0j7VPq7Q5fkfGDNqdBEIxH03yn6nEdVS1P/cb0AAKetVTWksxesFBLOXM1DSqZxpjWYKpBQNXi0q4aKkZ18+ZBQT58021ijUExRvhuo2t9LIYTAiSuGDzY56qiSk6213AeMMXWt6hRIY1beAgxLt+//o/pnCfFQG3X/5qSyquZNo1TOlqeoGrHNJUkyuJ7fdvpatW3u5es5Bo+amckSZutGHh24kIWyCgE3Bxv4udjB1lp5zybYulfY21hh1dM9MbSzFx7XBVr+uFGAk3c5+qjABEnW9PQnmou6vE1R/hqj79MS2For8fpD4QCAtQcv4aX/Nk/+I31yS2NXWaxaoeHYZW3QuOosuS5+GqPu35z9KUJbde/X4xnN+r75xdq2d1AZN2isz4twu7QcRaXl2J1ieDPp72Jv9AtcczJSV2Vx19nr6Lt0O9YcaN4iCaYKIDjpRyLopi4d1fX7qlhtrZLG3kausvj1votG2YepzvVVg0fHL1e/PmF6gkr6YPHJKzly+zSnyiS6pht5lJ5TJE9Z1NMniSat6LbaoOE+IyTN1j+YsVEqYGPEB0OAYSXdQzWMqGEe20q9glxhrdRW2jtvhLxHpkqO71JlJkFNf7/63I33IgaPmlmBCRKnAoD7HRU2uvg5M2jUCJ19nfHphB5489EuGNpZm1B386nqkd/GqHwyZfyRH672hiNc9KWpqX59Qt3x+sPhsFJI+CHxMn5OSr/r95SnrRm531et0HDumvak+VSfIET6a+DqYIPpA0ONun9zNkIXTNh97kaN5e6bKs8EldYAbRBBqYsUXrxZKCfS1Av2uHefUrWE7oEu+HPvAPn3f20926yVFgtNVn2pcuRRblEpbuQXG6xXKiQE3MO5EVrCxJggAMDqvReMUln1tonO9c5VpiyeSNcGDfVJuh1slIj0YyBBz8/FHm00diirEDiSlt3s7y8nTjZ2zqMqCfL16RSqBi70BSBIqzLvkRFGHukDhkYebQZUBo8yc4uQfFVbGVZ/zfJUTKB87ift925sO+305G/3X2rW9xZCmO77vUoOU/33+8AwTwBA/3bu9/SIYgaPmllltTXTBo8iOOKgyYZ08gYAbK4h8XhjyPNkTXCiufNLRV++khpmfHQgXhjUDgAw67skfJVw4a5GIJkqgKDPdVVSViGfbNp5OuLHqTHYM28ghzbXIcRDjTBvR5RVCEz56hCiFm/GoHfj8cddPrmqnLJo3LaXJEm+mdx7vjJhbqCbPRQS8HTftkbdv7mRJAmvPRSBs68Nh8beGtdyi5s1R4Lpch5p3z+3qBQXbhRUW+/nYsdp6ne4P8wTf9Xl/ntnczKKquQlu1tCCJM9mXaqUmHxtC6Q8NXkXpjSvy3WPNsbVkpewlfVU/ek3phTmExVbS23qAwndef4kV188N2zvbFoVCfE6kbVkVavtq6QJO0U3vRmnp5cGTA0/gNh/WyCfX9Uzib5aFwUzrw6DK+MDjf6/s3Nn2MCAQD/2XehWauqlpRXoExX8c74I4+03++XbhXKFdbef6wrts6OxYo/dzfqvu8WzzzNrHLamnH/6FzvqKbVhUNZm+x+XaT3zNW8ak91G6NynqzxTzQhnpVBAne1DefBN8HUASHyEPAFP5/E2oNNf4JhqpxHDjZKWOmeQOlvhEM91bBSKjhlqQH0yfIPXriFW4WlOH+9AIs3nLqr9zTVVGWg8unkTl2FxS5+zvjv1D7YNmeAnCiYDNlYKTAorHlGl1alr65pqpxHJWUVOJOhfSLdK8hV/nvr4OVo1P2bq7lDO8DPxQ5ZBSX49VjzTVUtLquA7t7CZNPWTlzJQV5xGWysFOjqr8E/RnTiFOUa9NeNRlifdMUgX01zKDBBHhSgcipLTmGJPPKos68zooPdMKlvW84wuIPG3gY9ArVBwx8OXW7W9y40ZSoKXV//XZfbLLyNdjYJk6PXbEB7D4zo4oPScoG//3is2UYV6+/jAOPfy+nzWO7XTVnzdbaFs701Qj0dTXI9eTcYPGpmBSZKmH3n/FtOW2o6VwcbhHlrL8D338W8aVOVcgUMSzj6u9rzgqIJrJUK/PupHvK0xS/3Xmjye5ly9In+CVW57g4mhNOVGmxcdADaaOwgSZDzouw8e/2ugsb6UWemmK6qr6oVr6uyF+HnDA9HFSvs1WOIro9vOnkVQgiUlVfUOIqnMfTnemMHbR1VVtB/vSddzgYAhHg64JM/d8fMQe2wmE+la2SlVGBcL+20xW+bMd+Vfno6YLqE2Xt0Iw3DvB1hzdFGtRoa7g1HlRUuZd1Gvzd3IKEZK6+ZKkG+/sFwVmGJXGmLua3qNj5aOwpl+Y4U7NZVn24Ocl47Ez4YupGvnVLfhVNS6yRJEpY8EgFnO2ucv16AA82UKF9fPVdlpTD6VEGNg7bNE3QJ0sPMqJ/zLNTM5GprJhh90k43+sRdbQM3NRNm3o3ewdqkewl/NP3EIyfRNMGJRmWlREywG+xtlHiVNw9N5uVki9ceigCgHXmW38REm3LCbBO0vbu6ctShi701+34jONlaY8eLA5C0YAj+Mzka4W2cIERlMKYp8k2U5w4AvJ0Np6t2aaMx+j4tQWw7D9haK3D51m0cu5yDxz5NwIB34vGfhAtNfs9CE01pUCgkqHX7OKYLHrV1d0C/du6Y/UD7an8TVGlsdz9YKSQkXryFM1dzceFGAT6OT0FmXtPLeuvP86a4udDnv9FXWOzM6mp1Uqus8OmE7vB0VCGroARz/3sUpc00IqHARAny9VPTi0or5KksrKpXt9FdffFAJy+UlFdgwc8n5Adrd8uUD4T1I870mBi9fk621vLD37i7TDuiV2iiASBAZZvrqybrBzGYAwaPmlFRabn8R6Cfq25M+ophb4+NNPq+LJ2+zOuuszdQ0cQTj75kt9oEgUMA+PIvvbBn3kCE8yRzVzwcVXK1otMZTau4Z6qE2QAQ4Fo56ow5jhrPxkohP9HvG6IdfXQkrem5cPT93tEUI4/uyHXGC8yGsbNRylNa/rL6IA7rEup+sC2lybnOKnOgGP/GwkmevqT9fmrrzn7fEJ5OthisSzD87FeJePCj3XgrLhkTvzhwF+1uusIYzndcR3b2ZX+vT59Qd2x/cQDc1Ta4fOs24k5obyrvtqpq5U2lcfu7g40SNlVGl/m52FX7OyBDkiTh/ce7wtnOGn/cKJBzAt4tU1XUBKr39QiOPGqQ4eHapOKbTl5t8r1bVfogsZ0Jpgtq7mhzjjxqpfQ3kJJkmhsJPxd7fDqhB+7v4Gn0fVm6vqHucFRZIS2rEJNWH8SCn0/g/S1nkdqIqQ36kqoae9Oc6G2sFPJTKro7+ovyk1e0CSqLy8pRXNbwJKumSpgNaKcp6rF8692J9NcAAI7qRnQ0Rb4Jcx55VynJHuRmL1froPo9qZvCdLOgstLejfxi+el+Y8kjEUxwkXnnjQUr6zXc3GEd4GirPbfn6r6nz1zNkwNxjWWqKnsA4KY2PL9z5FHDqFVW8pTFX46m44Ot5xD5ymYs23auye9pqkCCJElwcajs72zzhlGrrPBgpC8A4L+JzZP7qMBEAUOgMmE2oC2I5H0PV9q6l/QJdYNaZYVrucXytO7recW42cRUBKYKEgOGbQ4AHTnyqHWqOnVFwbKKZkWtssJMXfWtXWev46uEi/hg2zk8tHwPLmUVNug99MEjU4w6o+YVrrtAO5mei7PX8tDj1a3o/ca2Bs+jNlXCbMBw5FFXXfCDmkYfPDqTkYei0nIcSbuFo5eyG/Ue+SYc5ty+ysUF275xBnTwwEhd6eOeQS7yEPGmBg4LTThN2bNK0NBGqUBgle8AqluIhxo/Pd8Hj/Xww/MDQuRcZ9vONC15emUQwfg3F1VzmUkSHxY0xrBwbRXdLaeu4f2tZ5FbVIb3tpyVK5g1limr6VadwtTJhyNQGmpMdz8AQNyJq0i9UYBfjqZj7g9Hm1xR1ZQjj3w1lcGiMG9H5jFtIJWVUi5v/0tSOracuobeS7Zh6L92yfdkjZEvJ0k3XZ4rQDsYwJxyVzJ41Iz0T7WcTHADSc1vSmww/js1Bv/3pzDMHBiKYHcH5Nwuxftbzzbo9dm6LyoOMTY/nXQjj45fycH8dceRV1yGW4WlWPjLyQa9Xh84NsW0teHh3vBwVMFGqUBv3XRLahpfZ1t4OKpQViHwxe5UPPzxXoxevgdbGlGVy1TJ0gFtlS29EV18jb4/SyJJEj4cF4Vdc+/Hd8/GICpAW6FHX8GssUyV8wioTJQOaEcdsUR744R6OuKtRyPx0rAwOajQ1OIYBSa8uWijqayiam+tZOWlRujk41Rjouk1TUyeXjk13fjXd1VvIsPbMGDYUJF+zohu64risgrc/048Zq45gh8SL+PZ/yQ2KQ+SKXMedfJxgq8uf93YHn5G358l0QcNv9l/ETPXHEF5hcCN/BL8nHSl0e9lyuu54Cr93MfZ1qzO6+ZzpC3sfKb2yXRdcm+b7gaSjKNHkCuejQ3B7CEd8K8nugIA1h+5gj0p9c+hzmHwyGzph4afuZqHxIuV+W9OZ+QiPbv+aS35Jsx55Olki51zB+D3efcb3FxQ40mSJI/geXtTsrz8s13nkV9c1qBkq/kmnLJopVRg3fN98NaYLhjckdOVG0uSJAS42UOhkNDeS5s36Ow1bfDoak4RJq06gE92nm/Qe5lyBIpXlZFH7b3MZ2j7vSi6rTbgfjjtFopKy7Ht9DW5JHpDVOY8Mn67WykV8HfVfsfrp2FRw0iShLce7YIwb0cM7uiJf0/sAQCIO3Gt0blRhBAmfUDUrkofj+EDogaTJAnLxkUZfF8CQEpmPvanVlbeS8++jc92na/32k5ftt0Uo0slScL3U2PwwRNd5el31DCx7dwRFaBBabnA7Sr36bvOVhZCKS2vwIkrOfXmPjNl8MizytTEQWFeRt9fc2LwqIFGL9+LyV8erPMPT/9kgtOWLEMXPw1GRfqiQgCTVh1A3ImMOrfPNXHOI2o+dyalnNA7EN0DtSMTGlKJy5Q5jwDtU+87kydT0zzQqfpJ++CFWwhfuAl/+y6p3tfnmbDaGgB0C3DBYz39Oaz9LumDMOcy81FWXoHJXx5EfPJ1LP3tDK7m1F+Nq8CEuW+qXmTqg17UNCEeDnBXq1BcVoGpXydi8peH8Kdlv6P7q1sw+qPduFBPnkNTTmsAgGVPROHlER0xb3iYSfZnScLbOCNuViz+/VRPxLb3gJ21Ejfyi3EuMx8/HLqE2Ld2NGh0QnFZBUrLtdf+pjjH/7l3ADp4OWLW4HYm+zuzFF5OtvjthVi891gkNs7sj8d7+AMAfjtemTh9+reH8cbGM5i4su7E+aYceQRo89iO7tqG5/ZGkiQJC0Z2ks/F46O1gfaDF26hokKgrLwCE77Yj5Ef7sZbVR4S1sTU1/I/T+uLZ/q1xYtD25tkf82FwaNG2JNyE/vqGOqcq3sy4cSRRxbj7Ue74E8R3igtF5j343HkFFbOod2bcgNf77soD4flyCPzJUmSwRD3h7u1wYD22upM//fTccz5/mitTytLyyvkAMKd5Vbp3venCB8EuWlzyLw4pD0m9QmS1204loHreXUnXiwwYc4jaj7tPLVBmIs3C/Dprj9wssroky2n65+2KD+VNsHNXdVS3aGeDB7dDUmS0Ec3mqPqg4GbBSU4ejkHL/5Q+3c9UOUhoYnSE0QFuOCZ/sGwNqMpDfciGysFegRpHwjtOnsdi345ibSsQrywNgn/+Ok4UjJrz4ujv7aXJNNMU/V0tMWmv8Vi1mDzuqG8V7g62OCRbn7o5OuEwbqHQ/FnMyGEwK5zN+RqmymZ+Ui+Vvu05crveE4XvddFBbhg25z7EP/iACx6sDPsrJXIuV2Kc5n5+Crhonzv/snO83XOIqoMHpnm+z3SX4OXR3YyuyAxz0YN1DtYm2tid0rlxcbG4xl4e9MZuSrTrUJtFReOPLIcttZKLHsiCu291Mi5XYofEi+hpKwCSZey8ecv9uPl9Sfwn4QLEELI7a+xYwDBHM0bHoYgN3s83sMf3QJc8GBXX7lq4o+HL2PXuZpHIOnbXSGx75sjtcoKG2b2x0/P98G0+0OxYGQnrJzUQy7VuvX0NWTm1T4SRR7mzOCRWfFwVMHZzhoVonLKoqejdrpDQ5Kmyw+L7Izf7t0CXDBzYCh6tXVFv3YeRt+fpXsyunIKWL9Qd6yc1AN/Hx4GGysFDl28ha/3X6z1taZsd2pevYO1QcOP41PkaokA8M3+NEz4Yn+t05Rzb1eORmAxHPPSJ8QNdtZKXMq6jUdW7MWUrw4ZrK9rZHnl6FL2dXPg42yHIHcHWCsV6BaoAQDsT72JL3anytsIoc1tWpv8YqafaQgGjxroTxHaKi0J57XzZq/nFWPGmiNYvuM8/rVVW/5TP3+WeUgsi5VSgQkxQQCA1349jW6vbsHCn09A/3Byxc7zuJFfgqJS7YWH5x3zrck8dPXXIH7u/Xjz0S4AgEA3B2yZfR9CdGWxq+ZCqiq7sHLEmZIXlmZJrbJCVIALJEmCQiFhYJgXnu4bBACYv+44er2+Df/ZV/MNpT7nEUcemRdJkuTRRwAQ6GaPVx8KB6AdmTBx5QHMWHNEDg5WVVJWIee+MdXDgtlDOuD7v8aYbDi9Jesd7IbXHw7HiAgfLB0TgYFhXph6XwheHtERALBs2zmU1RpI0I8w54MCc6N/CHxLd87uHuiCP/fWBhIzcopqDSTkFbHNzZWDygqjIrX3b0fSslFSVoE+IW54QVddeelvZ7B8R0qNrzVlfjNqXj11xUVW7k7FlezbcFRZ4f4O2gcvh2u5lgdMm7/UnDF41ED6KjfHLufg+OUcrD2QJk9X+u24NhfO5VsMHlmqMd3ayEn48ovLcPRyZeT6Wm4xVu3RRrZdHWxYEcWCeDvb4um+bQEASZeya3wymVWgHXnk4sARZ5akT4i7we9vxZ2p1v4VFUKussgpi+anb2hlG0+9L0TOc5aZV4xdZ6/jf0fTsfnk1Wqv009RliReZJqr8dGBWD6+G/xc7OVl43oFwM3BBjfyS/D7uZqLZDC3pfmKaKOBrXXlbc+ICB+89lCEPFV5ay1VNvN4Q2nW/j68IwaFeaK9lxorxnfDN89EY2wPP6istH8L725OrjF5Nh8MmS/9PfuFm4UAgD6hbuihW/bdoUt4/ptEfFPDCFP9wyI+pKmb2QSPsrKyMH78eDg5OUGj0WDy5MnIz699jnJWVhZmzJiBDh06wM7ODgEBAZg5cyZycmofrlYXP1d7+LnYoaxCYNRHu/Hulsry7RduFuJqThGu6IJHVS9GyDLY21jhm2d646VhHeRlKisFpvTXBhY+jtdW6PFxZhJjSxMVoAEA/H7uBqIWb8H4f+/Dwp9P4LmvE5GZW4Rs3bQ1Bg8sS58QN4P8MnlFZfgk/jw2Hs+Qgwe5RaXyQwQXB95Mmptn+rdF31A3jI8OwOM9/OGuVsHPxfDhT01T2HJua/u8s501p7FYEGulAqN0lY5e33i6xipszG1pvmysFIjyd5F/H9zRy+C/G46l15j7iMEj8+bqYIMvJvXE5r/dh+ERPpAkCX4u9vh5el8AQIUAHvxoDz7cdg4lZZUPiLJ4bWe2ogJcDHJVDeroJec1/eN6ATYev4p//HSiWnGMXAYMG8Rsgkfjx4/HyZMnsWXLFmzYsAG7du3Cs88+W+v26enpSE9PxzvvvIMTJ05g9erViIuLw+TJk5t8DH/uHWjwu8beWh5lFJ+ciYtZ2ginvrQqWZZQTzWeHxCKxaM7w8NRhTlD2mNKbDCslZU3DwweWZ4OXo5y/pv84jLsSbmJLxMu4rcTVzHmk71IvaHt97zAsCwKhYRVk3ri0wnd5SfT7245i+e/OYz73t6Bs9fy5FFnapUVVFYccWhuHG2t8c0zvfH6wxFyEEg/3F0vqUrw6Pz1fNwuKZenqmo4+sTiPNHLH9ZKCSmZ+RizYi+OXzZ84ChPW2Pbm6UZA0PRzlONcb0CEKArlNAnxA3dAjQoKCnHxC/2GxRGAYBrudobTE9HXt9ZkjBvJywe3RkAcCO/GO9uOYv1uup7Qgjc0p3fXTmq3OzY2SgxuZ/24b63ky0ejPRFhJ9zte3WHbls8PsNXYEUdzXTj9TFLIJHp0+fRlxcHP79738jOjoa/fr1w4cffoi1a9ciPT29xteEh4fjxx9/xKhRoxASEoKBAwfi9ddfx//+9z+UlVXPYdAQz/Rri6WPRODbKdFYNKoTfnyuD/4U4Q0AeGdzMkrKKtBGY4cAV448smQTY4Jw8B+D8WxsCDwdbTGqi6+8LqKNpuUOjIzCSqlAV39NjesuZd3Gm3FnAADezjzZWBp/V3sM7eyNCTGB8jBmR5UVsgtL8eTn++TEi7y4tBzzh4ehW4AGY7v7AQBOZeSiuKwcPyZexqB3d2LOD0mVec4YMLY4Yd5O+On5vuge6ILbpeWY9u1hbDiWjkdX7MX3hy7JT6Y5CsU89Ql1x5bZ92HJIxHyMoVCwmcTeyDIzR7pOUVY8MsJg9fopzT5ahg8sjTDw30MAgX7dHlt84rLUKYbVczzu3maNbg9Nszoh7hZ/WFrrYS7WoVHotoAANx0bRp3onJauhAC6Tnavs6BAHUzi+BRQkICNBoNevToIS8bPHgwFAoF9u/f3+D3ycnJgZOTE6ysaj/pFxcXIzc31+BHz0qpwBO9AtAnxB2T+rZFiIdart5wI18bof5ThDckicPYW5NpA0PhbGcNG6UCD+u+mMiyvP5wOMK8HfH34WFY93wf/PZCf3w8vpvBNsHuLKFtqUI81Nj+4n3YMKMf4ucOQJi3I27kl+CFtUkAmO/Kkng62WLd833x1qNd4Opgg9JygU0nr2HOD0cBABuPX5XLOztz9IlFCm/jjJVP9YSfix3Ssgox/dsjOHTxFl7/9bQcSOBIU8virlbhgyeioJCAn5PSseNMprzuihw84qwCS+PhqMLmv8Vi7lBtSop9f9xESVkFsnT3dPY2SuYxNVNKhYTwNs7QVPmufv3hCPx3agziZsVCIWnzGF/SzRrKuV0qFz7yZvCoTmYRPLp69So8PT0NlllZWcHV1RVXr1ZPZlmTGzdu4NVXX61zqhsALFmyBM7OzvKPv79/ndv3bFs5xF0hAVNigxt0PGQ5QjzU2DI7Fhtf6C8PgybLEuyhRtysWEy9LwTdAlzQ0ccJA8M8DZJvBuuqspFl8nS0RXgbZ7ipVVg+vptBZT03Bo8sjiRJ6BOifTg0c80Rg3U/HLoEAPBx4gWmpXK2t8byJ7sZ5M3IuV0qV2Bq48JAgqWJ9NfgyWht9bW/fHkQj32agGnfHsZvutEJDB5ZJlcHGzzdNwhWCgnpOUVo//JvGPKvXfI6shx2Nkr0CHKFh6MK0W215/epXyfi4s0CpGdrp6ey8FH9WjR49Pe//x2SJNX5c+bMmbveT25uLkaMGIFOnTph0aJFdW47f/585OTkyD+XLl2qc3snW2s8GR0Aa6WEl4aFcU50K+XpaGuQXJcsn621EqMjK0eahbepPp+aLFOIh1ou+wpoy7yT5RkfXZnnsK27A4Z11k5T11dwCXJnwNiSRfprsPlvsfj3xB54qGvl9HRnO2uWbbdQ/xzZCY9EtYEQwIHULPx6LENe19nXqQWPjIzJ3sYKXarkxNEnzuaDIcv1jxEd4aiywsn0XIxevgf/2aetvubP1DP1atFJ23PmzMGkSZPq3CY4OBje3t7IzMw0WF5WVoasrCx4e3vX+fq8vDwMGzYMjo6O+Omnn2BtXfcJX6VSQaVqXO6SNx6OwOsPhXO6GlErM+uBdsi5XYqxPfyYYK+VGR7ug62nteelbgEu9WxN5igmxA0bZ/aHJAHtvRyx+eRVxJ2sHO3clsEji+fnYg8/F3tkFZRgfVK6bhlHoFgqlZUS7z3eFdMHhiLx4i0kXryFtQcvYXBHT1ZStnATY4JwOC3JYFknBgwtVngbZ2yeHYup/0nE0cs5WHMgDQAQ3da1nldSiwaPPDw84OHhUe92MTExyM7ORmJiIrp37w4A2L59OyoqKhAdHV3r63JzczF06FCoVCr88ssvsLU13qggBo6IWh8fZzt8MqF7Sx8GtYAHu/riq4QLuHCzUJ7eRJan6s1Dt0DDIGEHb0dTHw61kIEdK1MnhHhwlLGlC/ZQI9hDjbE9/PF037YMGLYCo3WjC22tlZj6dSIAPhiydD7OdvjurzGY+99j+N9R7cOBgWGe9byKzKJcRMeOHTFs2DBMmTIFn3zyCUpLSzF9+nQ88cQT8PXVdvYrV65g0KBB+Oqrr9CrVy/k5uZiyJAhKCwsxNdff22Q/NrDwwNKJeczEhFR01grFfh+agwqKrTz6MnyeTnZwsXeGrcKSxHi4cCRR62Iu1qFqfeFYN8fN/GPER1b+nDIhBgkbh0kScJDuqI3Hz0ZhZ3J1zEq0reeV5G5s7VWYtkTXTGqiw88HFWIYsCwXmYRPAKAb775BtOnT8egQYOgUCgwZswYLFu2TF5fWlqK5ORkFBZqcxEcPnxYrsQWGhpq8F6pqakICgoy2bETEZHlUVkxaNTafDulNz7deR6P9wxo6UMhE/v78LCWPgQiMoGRXXwxsgsDR62FJEkY0rnuNDhUyWyCR66urvj2229rXR8UFAQhhPz7gAEDDH4nIiIiuhsdfZzwryeiWvowiIiIiEyuRautERERERERERHRvY3BIyIiIiIiIiIiqhWDR0REREREREREVCsGj4iIiIiIiIiIqFZmkzC7peiTbufm5rbwkRCRRSgrAAp1/87NBazKW/RwiIiIqJnwHN/6sM1bHwtsc32so76CY5JgSbI6/fHHHwgJCWnpwyAiIiIiIiIiMopLly7Bz8+v1vUceVQPV1dXAEBaWhqcnZ1b+GjImHJzc+Hv749Lly7BycmppQ+HjIht3XqwrVsPtnXrwbZuPdjWrQfbuvVgW997hBDIy8uDr69vndsxeFQPhUKbFsrZ2Zl/3K2Ek5MT27qVYFu3Hmzr1oNt3XqwrVsPtnXrwbZuPdjW95aGDJRhwmwiIiIiIiIiIqoVg0dERERERERERFQrBo/qoVKpsHDhQqhUqpY+FDIytnXrwbZuPdjWrQfbuvVgW7cebOvWg23derCtzRerrRERERERERERUa048oiIiIiIiIiIiGrF4BEREREREREREdWKwSMiIiIiIiIiIqoVg0dERERERERERFQrBo/qsHz5cgQFBcHW1hbR0dE4cOBASx8SNdKSJUvQs2dPODo6wtPTEw899BCSk5MNthkwYAAkSTL4mTp1qsE2aWlpGDFiBOzt7eHp6Ym5c+eirKzMlB+F6rFo0aJq7RgWFiavLyoqwrRp0+Dm5ga1Wo0xY8bg2rVrBu/BdjYPQUFB1dpakiRMmzYNAPu0Odu1axdGjRoFX19fSJKE9evXG6wXQmDBggXw8fGBnZ0dBg8ejHPnzhlsk5WVhfHjx8PJyQkajQaTJ09Gfn6+wTbHjh1D//79YWtrC39/f7z11lvG/mh0h7raurS0FPPmzUNERAQcHBzg6+uLiRMnIj093eA9avouWLp0qcE2bOuWV1+/njRpUrV2HDZsmME27Nfmob62runcLUkS3n77bXkb9mvz0JB7rOa69o6Pj0e3bt2gUqkQGhqK1atXG/vjUS0YPKrFd999h9mzZ2PhwoU4fPgwIiMjMXToUGRmZrb0oVEj7Ny5E9OmTcO+ffuwZcsWlJaWYsiQISgoKDDYbsqUKcjIyJB/qp6EysvLMWLECJSUlGDv3r348ssvsXr1aixYsMDUH4fq0blzZ4N23L17t7zub3/7G/73v//hhx9+wM6dO5Geno5HHnlEXs92Nh8HDx40aOctW7YAAMaOHStvwz5tngoKChAZGYnly5fXuP6tt97CsmXL8Mknn2D//v1wcHDA0KFDUVRUJG8zfvx4nDx5Elu2bMGGDRuwa9cuPPvss/L63NxcDBkyBIGBgUhMTMTbb7+NRYsW4bPPPjP656NKdbV1YWEhDh8+jH/+8584fPgw1q1bh+TkZDz44IPVtl28eLFBX58xY4a8jm19b6ivXwPAsGHDDNpxzZo1BuvZr81DfW1dtY0zMjKwcuVKSJKEMWPGGGzHfn3va8g9VnNce6empmLEiBG4//77kZSUhFmzZuGZZ57Bpk2bTPp5SUdQjXr16iWmTZsm/15eXi58fX3FkiVLWvCo6G5lZmYKAGLnzp3ysvvuu0+88MILtb5m48aNQqFQiKtXr8rLVqxYIZycnERxcbExD5caYeHChSIyMrLGddnZ2cLa2lr88MMP8rLTp08LACIhIUEIwXY2Zy+88IIICQkRFRUVQgj2aUsBQPz000/y7xUVFcLb21u8/fbb8rLs7GyhUqnEmjVrhBBCnDp1SgAQBw8elLf57bffhCRJ4sqVK0IIIT7++GPh4uJi0Nbz5s0THTp0MPInotrc2dY1OXDggAAgLl68KC8LDAwU77//fq2vYVvfe2pq66eeekqMHj261tewX5unhvTr0aNHi4EDBxosY782T3feYzXXtfdLL70kOnfubLCvxx9/XAwdOtTYH4lqwJFHNSgpKUFiYiIGDx4sL1MoFBg8eDASEhJa8MjobuXk5AAAXF1dDZZ/8803cHd3R3h4OObPn4/CwkJ5XUJCAiIiIuDl5SUvGzp0KHJzc3Hy5EnTHDg1yLlz5+Dr64vg4GCMHz8eaWlpAIDExESUlpYa9OmwsDAEBATIfZrtbJ5KSkrw9ddf4y9/+QskSZKXs09bntTUVFy9etWgHzs7OyM6OtqgH2s0GvTo0UPeZvDgwVAoFNi/f7+8TWxsLGxsbORthg4diuTkZNy6dctEn4YaKycnB5IkQaPRGCxfunQp3NzcEBUVhbfffttgugPb2nzEx8fD09MTHTp0wHPPPYebN2/K69ivLdO1a9fw66+/YvLkydXWsV+bnzvvsZrr2jshIcHgPfTb8J68ZVi19AHci27cuIHy8nKDP2QA8PLywpkzZ1roqOhuVVRUYNasWejbty/Cw8Pl5U8++SQCAwPh6+uLY8eOYd68eUhOTsa6desAAFevXq3xb0G/ju4N0dHRWL16NTp06ICMjAy88sor6N+/P06cOIGrV6/Cxsam2k2Hl5eX3IZsZ/O0fv16ZGdnY9KkSfIy9mnLpG+bmtquaj/29PQ0WG9lZQVXV1eDbdq2bVvtPfTrXFxcjHL81HRFRUWYN28exo0bBycnJ3n5zJkz0a1bN7i6umLv3r2YP38+MjIy8N577wFgW5uLYcOG4ZFHHkHbtm1x/vx5/N///R+GDx+OhIQEKJVK9msL9eWXX8LR0dFgGhPAfm2OarrHaq5r79q2yc3Nxe3bt2FnZ2eMj0S1YPCIWo1p06bhxIkTBnlwABjMmY+IiICPjw8GDRqE8+fPIyQkxNSHSU00fPhw+d9dunRBdHQ0AgMD8f333/PEYsG++OILDB8+HL6+vvIy9mkiy1FaWorHHnsMQgisWLHCYN3s2bPlf3fp0gU2Njb461//iiVLlkClUpn6UKmJnnjiCfnfERER6NKlC0JCQhAfH49Bgwa14JGRMa1cuRLjx4+Hra2twXL2a/NT2z0WWR5OW6uBu7s7lEpltWzw165dg7e3dwsdFd2N6dOnY8OGDdixYwf8/Pzq3DY6OhoAkJKSAgDw9vau8W9Bv47uTRqNBu3bt0dKSgq8vb1RUlKC7Oxsg22q9mm2s/m5ePEitm7dimeeeabO7dinLYO+beo6N3t7e1crbFFWVoasrCz2dTOkDxxdvHgRW7ZsMRh1VJPo6GiUlZXhwoULANjW5io4OBju7u4G39ns15bl999/R3Jycr3nb4D9+l5X2z1Wc11717aNk5MTHw63AAaPamBjY4Pu3btj27Zt8rKKigps27YNMTExLXhk1FhCCEyfPh0//fQTtm/fXm2Ya02SkpIAAD4+PgCAmJgYHD9+3ODCRX8R26lTJ6McN929/Px8nD9/Hj4+PujevTusra0N+nRycjLS0tLkPs12Nj+rVq2Cp6cnRowYUed27NOWoW3btvD29jbox7m5udi/f79BP87OzkZiYqK8zfbt21FRUSEHEWNiYrBr1y6UlpbK22zZsgUdOnTgdId7iD5wdO7cOWzduhVubm71viYpKQkKhUKe4sS2Nk+XL1/GzZs3Db6z2a8tyxdffIHu3bsjMjKy3m3Zr+9N9d1jNde1d0xMjMF76LfhPXkLaeGE3festWvXCpVKJVavXi1OnTolnn32WaHRaAyywdO977nnnhPOzs4iPj5eZGRkyD+FhYVCCCFSUlLE4sWLxaFDh0Rqaqr4+eefRXBwsIiNjZXfo6ysTISHh4shQ4aIpKQkERcXJzw8PMT8+fNb6mNRDebMmSPi4+NFamqq2LNnjxg8eLBwd3cXmZmZQgghpk6dKgICAsT27dvFoUOHRExMjIiJiZFfz3Y2L+Xl5SIgIEDMmzfPYDn7tHnLy8sTR44cEUeOHBEAxHvvvSeOHDkiV9haunSp0Gg04ueffxbHjh0To0ePFm3bthW3b9+W32PYsGEiKipK7N+/X+zevVu0a9dOjBs3Tl6fnZ0tvLy8xIQJE8SJEyfE2rVrhb29vfj0009N/nlbs7rauqSkRDz44IPCz89PJCUlGZy/9RV49u7dK95//32RlJQkzp8/L77++mvh4eEhJk6cKO+DbX1vqKut8/LyxIsvvigSEhJEamqq2Lp1q+jWrZto166dKCoqkt+D/do81PcdLoQQOTk5wt7eXqxYsaLa69mvzUd991hCNM+19x9//CHs7e3F3LlzxenTp8Xy5cuFUqkUcXFxJv28pMXgUR0+/PBDERAQIGxsbESvXr3Evn37WvqQqJEA1PizatUqIYQQaWlpIjY2Vri6ugqVSiVCQ0PF3LlzRU5OjsH7XLhwQQwfPlzY2dkJd3d3MWfOHFFaWtoCn4hq8/jjjwsfHx9hY2Mj2rRpIx5//HGRkpIir799+7Z4/vnnhYuLi7C3txcPP/ywyMjIMHgPtrP52LRpkwAgkpOTDZazT5u3HTt21Pid/dRTTwkhhKioqBD//Oc/hZeXl1CpVGLQoEHV/gZu3rwpxo0bJ9RqtXBychJPP/20yMvLM9jm6NGjol+/fkKlUok2bdqIpUuXmuojkk5dbZ2amlrr+XvHjh1CCCESExNFdHS0cHZ2Fra2tqJjx47ijTfeMAg4CMG2vhfU1daFhYViyJAhwsPDQ1hbW4vAwEAxZcqUag9r2a/NQ33f4UII8emnnwo7OzuRnZ1d7fXs1+ajvnssIZrv2nvHjh2ia9euwsbGRgQHBxvsg0xLEkIIIw1qIiIiIiIiIiIiM8ecR0REREREREREVCsGj4iIiIiIiIiIqFYMHhERERERERERUa0YPCIiIiIiIiIioloxeERERERERERERLVi8IiIiIiIiIiIiGrF4BEREREREREREdWKwSMiIiIiIiIiIqoVg0dERERk0SZNmoSHHnrI5PtdvXo1JEmCJEmYNWuWyfffWAMGDGi247xw4YL82bt27dos70lEREQtx6qlD4CIiIioqSRJqnP9woUL8cEHH0AIYaIjMuTk5ITk5GQ4ODi0yP5bir+/PzIyMvDOO+9g69atLX04REREdJcYPCIiIiKzlZGRIf/7u+++w4IFC5CcnCwvU6vVUKvVLXFoALTBLW9v7xbbf0tRKpXw9vZu0f/3RERE1Hw4bY2IiIjMlre3t/zj7OwsB2v0P2q1utq0tQEDBmDGjBmYNWsWXFxc4OXlhc8//xwFBQV4+umn4ejoiNDQUPz2228G+zpx4gSGDx8OtVoNLy8vTJgwATdu3Gj0MX/88cdo164dbG1t4eXlhUcffVReFxcXh379+kGj0cDNzQ0jR47E+fPn5fX66WDff/89+vfvDzs7O/Ts2RNnz57FwYMH0aNHD6jVagwfPhzXr1+XX6f/f/DKK6/Aw8MDTk5OmDp1KkpKSmo9zuLiYrz44oto06YNHBwcEB0djfj4eHn9xYsXMWrUKLi4uMDBwQGdO3fGxo0bG/3/g4iIiO59DB4RERFRq/Pll1/C3d0dBw4cwIwZM/Dcc89h7Nix6NOnDw4fPowhQ4ZgwoQJKCwsBABkZ2dj4MCBiIqKwqFDhxAXF4dr167hsccea9R+Dx06hJkzZ2Lx4sVITk5GXFwcYmNj5fUFBQWYPXs2Dh06hG3btkGhUODhhx9GRUWFwfssXLgQL7/8Mg4fPgwrKys8+eSTeOmll/DBBx/g999/R0pKChYsWGDwmm3btuH06dOIj4/HmjVrsG7dOrzyyiu1Huv06dORkJCAtWvX4tixYxg7diyGDRuGc+fOAQCmTZuG4uJi7Nq1C8ePH8ebb77JkUZEREQWitPWiIiIqNWJjIzEyy+/DACYP38+li5dCnd3d0yZMgUAsGDBAqxYsQLHjh1D79698dFHHyEqKgpvvPGG/B4rV66Ev78/zp49i/bt2zdov2lpaXBwcMDIkSPh6OiIwMBAREVFyevHjBljsP3KlSvh4eGBU6dOITw8XF7+4osvYujQoQCAF154AePGjcO2bdvQt29fAMDkyZOxevVqg/eysbHBypUrYW9vj86dO2Px4sWYO3cuXn31VSgUhs8T09LSsGrVKqSlpcHX11feZ1xcHFatWoU33ngDaWlpGDNmDCIiIgAAwcHBDfp/QEREROaHI4+IiIio1enSpYv8b6VSCTc3NzkIAgBeXl4AgMzMTADA0aNHsWPHDjmHklqtRlhYGAAYTCurzwMPPIDAwEAEBwdjwoQJ+Oabb+TRTQBw7tw5jBs3DsHBwXByckJQUBAAbTCntuPXH+udx68/dr3IyEjY29vLv8fExCA/Px+XLl2qdpzHjx9HeXk52rdvb/CZd+7cKX/emTNn4rXXXkPfvn2xcOFCHDt2rMH/H4iIiMi8cOQRERERtTrW1tYGv0uSZLBMX8VNP10sPz8fo0aNwptvvlntvXx8fBq8X0dHRxw+fBjx8fHYvHkzFixYgEWLFuHgwYPQaDQYNWoUAgMD8fnnn8PX1xcVFRUIDw+vlpuopmO9c9mdU90aIz8/H0qlEomJiVAqlQbr9FPTnnnmGQwdOhS//vorNm/ejCVLluDdd9/FjBkzmrxfIiIiujcxeERERERUj27duuHHH39EUFAQrKzu7vLJysoKgwcPxuDBg7Fw4UJoNBps374d9913H5KTk/H555+jf//+AIDdu3c3x+ED0I6eun37Nuzs7AAA+/btg1qthr+/f7Vto6KiUF5ejszMTPlYauLv74+pU6di6tSpmD9/Pj7//HMGj4iIiCwQp60RERER1WPatGnIysrCuHHjcPDgQZw/fx6bNm3C008/jfLy8ga/z4YNG7Bs2TIkJSXh4sWL+Oqrr1BRUYEOHTrAxcUFbm5u+Oyzz5CSkoLt27dj9uzZzfYZSkpKMHnyZJw6dQobN27EwoULMX369Gr5jgCgffv2GD9+PCZOnIh169YhNTUVBw4cwJIlS/Drr78CAGbNmoVNmzYhNTUVhw8fxo4dO9CxY8dmO14iIiK6d3DkEREREVE9fH19sWfPHsybNw9DhgxBcXExAgMDMWzYsBqDL7XRaDRYt24dFi1ahKKiIrRr1w5r1qxB586dAQBr167FzJkzER4ejg4dOmDZsmUYMGBAs3yGQYMGoV27doiNjUVxcTHGjRuHRYsW1br9qlWr8Nprr2HOnDm4cuUK3N3d0bt3b4wcORIAUF5ejmnTpuHy5ctwcnLCsGHD8P777zfLsRIREdG9RRJCiJY+CCIiIiJLs3r1asyaNQvZ2dktfSiYNGkSsrOzsX79epPud9GiRVi/fj2SkpJMul8iIiJqXpy2RkRERGQkOTk5UKvVmDdvXksfikmlpaVBrVbjjTfeaOlDISIiombAaWtERERERjBmzBj069cPgHa6Wmvi6+srjzZSqVQtezBERER01zhtjYiIiIiIiIiIasVpa0REREREREREVCsGj4iIiIiIiIiIqFYMHhERERERERERUa0YPCIiIiIiIiIioloxeERERERERERERLVi8IiIiIiIiIiIiGrF4BEREREREREREdWKwSMiIiIiIiIiIqrV/wNhxccIyPGnbAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.plot(frames[0, frame_i])\n", + "peaks, lags = [], []\n", + "\n", + "# Find neighboring peaks\n", + "peaks, lags = [], []\n", + "for i in range(4):\n", + " peak, lag = torch.max(masked_frames, dim=-1, keepdim=True)\n", + " mask = (\n", + " (mask_indices > lag - periods // 2)\n", + " & (mask_indices < lag + periods // 2)\n", + " )\n", + " masked_frames[mask] = 0\n", + " peaks.append(peak.squeeze(-1))\n", + " lags.append(lag.squeeze(-1))\n", + "\n", + "peaks = torch.stack(peaks, dim=-1)\n", + "lags = torch.stack(lags, dim=-1)\n", + "\n", + "for lag in lags[0, frame_i]:\n", + " plt.axvline(lag, color=\"orange\")\n", + "\n", + "plt.title(\"Period peaks found using topk\")\n", + "plt.xlabel(\"Time [samples]\")\n", + "plt.ylabel(\"Amplitude\")\n", + "\n", + "print(\"Peak indices:\", lags[0, 230])" + ] + }, + { + "cell_type": "markdown", + "id": "c41bb91d-2211-4603-9307-34ff0824fd34", + "metadata": {}, + "source": [ + "Jitter is the average variation in period length. We compare each period's length\n", + "against the period estimate across the whole window. The mean difference is the\n", + "jitter, after normalizing by the period to get a number from 0 to 1." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "efd48060-8036-4fad-90ed-bba0fd2f92b4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Peak lags from 0: tensor([51, 53, 53, 55])\n", + "Normalized period length differences: tensor([0.0078, 0.0000, 0.0000, 0.0078])\n", + "Average jitter score for this frame: tensor(0.0039)\n" + ] + } + ], + "source": [ + "lags = lags.remainder(periods)\n", + "jitter_frames = (lags - lags.float().mean(dim=-1, keepdims=True)).abs()\n", + "jitter = jitter_frames.mean(dim=-1) / best_lags\n", + "\n", + "print(\"Peak lags from 0:\", lags[0, frame_i])\n", + "print(\"Normalized period length differences:\", jitter_frames[0, frame_i] / best_lags[0, frame_i])\n", + "print(\"Average jitter score for this frame:\", jitter[0, frame_i])" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "262234da-e948-4a57-a6fa-a35945a324ae", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Jitter: 0.44%\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABI0AAADvCAYAAAB/uyR2AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAi/dJREFUeJzs3Xd4U2UbBvA7adqke9BdSil7r7JHyxARQQURBAdLRAQHH5/6iYulIqKCIjJlCCKyEZC99yoFChQo3XvvNs043x9pTpvOtJS2wP27rl5Xc/Kec94kJ8nJc573eSWCIAggIiIiIiIiIiIqQlrbHSAiIiIiIiIiorqHQSMiIiIiIiIiIiqBQSMiIiIiIiIiIiqBQSMiIiIiIiIiIiqBQSMiIiIiIiIiIiqBQSMiIiIiIiIiIiqBQSMiIiIiIiIiIiqBQSMiIiIiIiIiIiqBQSMiIiIiIiIiIiqBQSMiIqpVs2fPhkQiqe1u1AkSiQSzZ8+u7W48ERo2bIjx48eLt0+cOAGJRIITJ07UWp/oybNhwwa0aNECpqamsLOzq9S6pR2T48ePR8OGDau1j3VFWFgYJBIJ1q1bV9tdISKiSmDQiIiIatS6desgkUhw5cqVMtt8++232LVrV4nl586dw+zZs5GWlvboOkhPjU2bNmHx4sUllsfExGD27NkICAio8T7R4yMoKAjjx49H48aNsWrVKqxcubK2u0RERFTtGDQiIqJa9cUXXyA3N9dgWXlBozlz5jBoRBW6e/cuVq1aJd729fVFbm4ufH19xWXlBY3mzJnDoBGV68SJE9Bqtfj5558xfvx4jBo1qra7REREVO0YNCIiololk8mgUChqtQ85OTm1uv/HlVarRV5eXm13o1RyuRympqbibalUCoVCAam09k59srOza23fxqjr/QMAtVqN/Pz82u4GACAhIQEAKj0sjYiI6HHCoBEREdWq4jWNJBIJsrOzsX79ekgkEkgkEowfPx6zZ8/Gxx9/DADw9vYW7wsLCxPX3bhxI3x8fGBubg4HBweMHj0akZGRBvvr27cv2rRpg6tXr8LX1xcWFhb47LPPyuzfjRs3MH78eDRq1AgKhQKurq6YOHEikpOTS30cwcHBGD9+POzs7GBra4sJEyaUCEoplUr85z//gZOTE6ytrfHiiy8iKirKqOcrPz8fX331FXx8fGBrawtLS0v06dMHx48fF9uoVCo4ODhgwoQJJdbPyMiAQqHARx99ZNCfWbNmoUmTJpDL5fD09MQnn3wCpVJpsK5EIsF7772HP//8E61bt4ZcLseBAwcAAD/88AN69uyJevXqwdzcHD4+Pti2bVuJ/efm5uKDDz6Ao6Oj+Nijo6NLrecUHR2NiRMnwsXFBXK5HK1bt8aaNWuMep4qqmnUt29f7Nu3D+Hh4eKx1LBhQ5w4cQJdunQBAEyYMEG8r2gdlosXL+K5556Dra0tLCws4Ofnh7NnzxrsX3883L59G6+99hrs7e3Ru3fvMvurUqkwZ84cNG3aFAqFAvXq1UPv3r1x+PBhg3ZBQUEYNWoUnJycYG5ujubNm+Pzzz83aHPt2jUMHjwYNjY2sLKywoABA3DhwgWDNvphoidPnsTUqVPh7OyM+vXri/fv378fffr0gaWlJaytrTFkyBDcunWrwuc9JSUFH330Edq2bQsrKyvY2Nhg8ODBuH79eom2eXl5mD17Npo1awaFQgE3Nze8/PLLePDgAYDCGjg//PADFi9ejMaNG0Mul+P27dsAgGPHjol9tLOzw0svvYQ7d+4Y7CMzMxPTp09Hw4YNIZfL4ezsjIEDB8Lf319sc//+fYwYMQKurq5QKBSoX78+Ro8ejfT09DIfZ8OGDTFr1iwAgJOTk8HxW1ZtsuLH5MO4cuUKBg0aBEdHR5ibm8Pb2xsTJ040aGPse1L/vt66dStatWoFc3Nz9OjRAzdv3gQArFixAk2aNIFCoUDfvn0NPnMBw8/Unj17iv1Zvny5UY8lKCgIr7zyChwcHKBQKNC5c2f8888/VXtiiIio2slquwNERERFbdiwAZMmTULXrl0xefJkAEDjxo1haWmJe/fu4a+//sKiRYvg6OgIQPeDDQC++eYbfPnllxg1ahQmTZqExMRELFmyBL6+vrh27ZpBNkBycjIGDx6M0aNH44033oCLi0uZ/Tl8+DBCQkIwYcIEuLq64tatW1i5ciVu3bqFCxculCjiPWrUKHh7e2P+/Pnw9/fH6tWr4ezsjAULFohtJk2ahI0bN+K1115Dz549cezYMQwZMsSo5ycjIwOrV6/GmDFj8PbbbyMzMxO///47Bg0ahEuXLqFDhw4wNTXF8OHDsWPHDqxYsQJmZmbi+rt27YJSqcTo0aMB6LKFXnzxRZw5cwaTJ09Gy5YtcfPmTSxatAj37t0rMUzw2LFj2LJlC9577z04OjqKRXt//vlnvPjii3j99deRn5+PzZs3Y+TIkdi7d6/BYxs/fjy2bNmCN998E927d8fJkydLfezx8fHo3r27+IPWyckJ+/fvx1tvvYWMjAxMnz7dqOerLJ9//jnS09MRFRWFRYsWAQCsrKzQsmVLzJ07F1999RUmT56MPn36AAB69uwpPv7BgwfDx8cHs2bNglQqxdq1a9G/f3+cPn0aXbt2NdjPyJEj0bRpU3z77bcQBKHM/syePRvz588Xj/2MjAxcuXIF/v7+GDhwIABdALNPnz4wNTXF5MmT0bBhQzx48AB79uzBN998AwC4desW+vTpAxsbG3zyyScwNTXFihUr0LdvX5w8eRLdunUz2O/UqVPh5OSEr776Ssw02rBhA8aNG4dBgwZhwYIFyMnJwbJly9C7d29cu3at3ELNISEh2LVrF0aOHAlvb2/Ex8djxYoV8PPzw+3bt+Hu7g4A0Gg0GDp0KI4ePYrRo0fjww8/RGZmJg4fPozAwEA0btxY3ObatWuRl5eHyZMnQy6Xw8HBAUeOHMHgwYPRqFEjzJ49G7m5uViyZAl69eoFf39/sY9TpkzBtm3b8N5776FVq1ZITk7GmTNncOfOHXTq1An5+fkYNGgQlEol3n//fbi6uiI6Ohp79+5FWloabG1tS32cixcvxh9//IGdO3di2bJlsLKyQrt27cp8XqpTQkICnn32WTg5OeHTTz+FnZ0dwsLCsGPHDoN2xr4nAeD06dP4559/MG3aNADA/PnzMXToUHzyySf47bffMHXqVKSmpuL777/HxIkTcezYMYP1U1NT8fzzz2PUqFEYM2YMtmzZgnfffRdmZmYlgllF3bp1C7169YKHhwc+/fRTWFpaYsuWLRg2bBi2b9+O4cOHV9OzRkREVSYQERHVoLVr1woAhMuXLwuCIAizZs0Sin8dWVpaCuPGjSux7sKFCwUAQmhoqMHysLAwwcTERPjmm28Mlt+8eVOQyWQGy/38/AQAwvLly43qb05OTollf/31lwBAOHXqlLhM/zgmTpxo0Hb48OFCvXr1xNsBAQECAGHq1KkG7V577TUBgDBr1qxy+6NWqwWlUmmwLDU1VXBxcTHY98GDBwUAwp49ewzaPv/880KjRo3E2xs2bBCkUqlw+vRpg3bLly8XAAhnz54VlwEQpFKpcOvWrRL9Kv485efnC23atBH69+8vLrt69aoAQJg+fbpB2/Hjx5d47G+99Zbg5uYmJCUlGbQdPXq0YGtrW+rrUpSXl5fBMXT8+HEBgHD8+HFx2ZAhQwQvL68S616+fFkAIKxdu9ZguVarFZo2bSoMGjRI0Gq1Bo/d29tbGDhwoLhMfzyMGTOm3H7qtW/fXhgyZEi5bXx9fQVra2shPDy8RL/0hg0bJpiZmQkPHjwQl8XExAjW1taCr6+vuEz/Puzdu7egVqvF5ZmZmYKdnZ3w9ttvG+wjLi5OsLW1LbG8uLy8PEGj0RgsCw0NFeRyuTB37lxx2Zo1awQAwk8//VRiG/rHExoaKgAQbGxshISEBIM2HTp0EJydnYXk5GRx2fXr1wWpVCqMHTtWXGZraytMmzatzP5eu3ZNACBs3bq13MdVGv1rnJiYaLC8rPexMcfkuHHjSj0mi9q5c6fBZ2hZjHlP6vsrl8sNPldXrFghABBcXV2FjIwMcfnMmTNLfAbrP1N//PFHcZlSqRRfo/z8fEEQCl/Pou+rAQMGCG3bthXy8vLEZVqtVujZs6fQtGnTch8fERHVDA5PIyKix96OHTug1WoxatQoJCUliX+urq5o2rSpwdAtQFfvprShW6UxNzcX/8/Ly0NSUhK6d+8OAAZDXPSmTJlicLtPnz5ITk5GRkYGAODff/8FAHzwwQcG7YzNnDExMREzh7RaLVJSUqBWq9G5c2eD/vTv3x+Ojo74+++/xWWpqak4fPgwXn31VXHZ1q1b0bJlS7Ro0cLguevfvz8AlHju/Pz80KpVqxL9Kvo8paamIj09HX369DHok34o29SpUw3Wff/99w1uC4KA7du344UXXoAgCAb9GjRoENLT00t97h+1gIAA3L9/H6+99hqSk5PFPmVnZ2PAgAE4deoUtFqtwTrFj4ey2NnZ4datW7h//36p9ycmJuLUqVOYOHEiGjRoYHCfPttNo9Hg0KFDGDZsGBo1aiTe7+bmhtdeew1nzpwRj0O9t99+GyYmJuLtw4cPIy0tDWPGjDF43k1MTNCtW7cSx0NxcrlcrBul0WiQnJwMKysrNG/e3OA12759OxwdHUu89kUfj96IESPEjEIAiI2NRUBAAMaPHw8HBwdxebt27TBw4EDxPQbonteLFy8iJiam1P7qM4kOHjz42NQ202dN7t27FyqVqsx2xrwn9QYMGGCQQabPSBsxYgSsra1LLA8JCTFYXyaT4Z133hFvm5mZ4Z133kFCQgKuXr1aav9SUlJw7NgxjBo1CpmZmeKxlpycjEGDBuH+/fuIjo4u8/EREVHN4PA0IiJ67N2/fx+CIKBp06al3l+0IDIAeHh4GAzZKk9KSgrmzJmDzZs3i4Vv9UqreVL8B729vT0A3Y82GxsbhIeHQyqVGgy/AYDmzZsb1R8AWL9+PX788UcEBQUZ/Gj09vYW/5fJZBgxYgQ2bdoEpVIJuVyOHTt2QKVSGQSN7t+/jzt37hj8KC+q+GMuuo+i9u7di6+//hoBAQEGtZCKBgD0j734Npo0aWJwOzExEWlpaVi5cmWZ05gX71dN0Ad0xo0bV2ab9PR08TUHyn6+ips7dy5eeuklNGvWDG3atMFzzz2HN998UxzypP+R3qZNmzK3kZiYiJycnFKPpZYtW0Kr1SIyMhKtW7cus3/6x6gPGhZnY2NT7uPQzyb222+/ITQ0FBqNRryvXr164v8PHjxA8+bNIZNVfCpavI/h4eEASn/PtGzZEgcPHkR2djYsLS3x/fffY9y4cfD09ISPjw+ef/55jB07VgyqeXt7Y8aMGfjpp5/w559/ok+fPnjxxRfxxhtvlDk0rbb5+flhxIgRmDNnDhYtWoS+ffti2LBheO211yCXy8V2xrwn9Yp/bukfu6enZ6nLU1NTDZa7u7vD0tLSYFmzZs0A6GpT6QPtRQUHB0MQBHz55Zf48ssvS32sCQkJ8PDwKPU+IiKqGQwaERHRY0+r1UIikWD//v0GWRN6VlZWBreLXoGvyKhRo3Du3Dl8/PHH6NChA6ysrKDVavHcc8+VyCoBUOr+AZRbz6YyNm7ciPHjx2PYsGH4+OOP4ezsDBMTE8yfP18sIKw3evRorFixAvv378ewYcOwZcsWtGjRAu3btxfbaLVatG3bFj/99FOp+yv+o7G05+706dN48cUX4evri99++w1ubm4wNTXF2rVrsWnTpko/Rv3z+sYbb5QZoKmp+jFF6fu1cOFCdOjQodQ2VT3WfH198eDBA+zevRuHDh3C6tWrsWjRIixfvhyTJk16qH6Xp3j/9I9xw4YNcHV1LdG+oiDPt99+iy+//BITJ07EvHnz4ODgAKlUiunTp5f6fqlKHytj1KhR6NOnD3bu3IlDhw5h4cKFWLBgAXbs2IHBgwcDAH788UeMHz9efO4/+OADzJ8/HxcuXDAoDv6wigbQHoZEIsG2bdtw4cIF7NmzBwcPHsTEiRPx448/4sKFC7Cysqr0e7Ksz61H+XmmPx4++ugjDBo0qNQ2xQPKRERU8xg0IiKiOqe0K+HlLW/cuDEEQYC3t7d4dbs6pKam4ujRo5gzZw6++uorcXlZQ4iM4eXlBa1WK2Za6N29e9eo9bdt24ZGjRphx44dBs+Hfianonx9feHm5oa///4bvXv3xrFjx0rMtNW4cWNcv34dAwYMKPP5rcj27duhUChw8OBBg0yHtWvXGrTTP/bQ0FCDrLDg4GCDdvpZ5TQaDZ555pkq9ckYVTnOAF22zaPol37GuwkTJiArKwu+vr6YPXs2Jk2aJGbGBAYGlrm+k5MTLCwsSj2WgoKCIJVKSwQBi9M/Rmdn5yo9xm3btqFfv374/fffDZanpaWJxev1+7l48SJUKlWJTMCKeHl5ASj9PRMUFARHR0eDrBc3NzdMnToVU6dORUJCAjp16oRvvvlGDBoBQNu2bdG2bVt88cUXOHfuHHr16oXly5fj66+/rlTfAF12YVpamsGy/Px8xMbGVnpb5enevTu6d++Ob775Bps2bcLrr7+OzZs3Y9KkSUa/J6tLTEyMmN2ld+/ePQAos3C6/pg2NTV9pO9zIiJ6OKxpREREdY6lpWWJH1365QBK3Pfyyy/DxMQEc+bMKXEFXBAEJCcnV6kf+qvsxbe5ePHiKm0PgPhD9ZdffqnSNkvr08WLF3H+/PkSbaVSKV555RXs2bMHGzZsgFqtNhiaBugyMaKjo7Fq1aoS6+fm5oozalXUJ4lEYpBJERYWVmLmNX02wW+//WawfMmSJSW2N2LECGzfvr3UIEliYmKFfTKGpaVlqUMMyzrOfHx80LhxY/zwww/Iysqq1n4VP0atrKzQpEkTcViRk5MTfH19sWbNGkRERBi01R8LJiYmePbZZ7F7926DadHj4+OxadMm9O7du8LhZYMGDYKNjQ2+/fbbUuvlVPQYTUxMSrxftm7dWqI2zYgRI5CUlIRff/21xDYqymJxc3NDhw4dsH79eoPXKDAwEIcOHcLzzz8PQJfZU/z1dXZ2hru7u/i8ZmRkQK1WG7Rp27YtpFKpwZCuymjcuDFOnTplsGzlypXVlmmUmppa4jnSZ77p+2zse7K6qNVqrFixQrydn5+PFStWwMnJCT4+PqWu4+zsjL59+2LFihWlBtSq631OREQPh5lGRERU5/j4+ODIkSP46aef4O7uDm9vb3Tr1k388fH5559j9OjRMDU1xQsvvIDGjRvj66+/xsyZMxEWFoZhw4bB2toaoaGh2LlzJyZPnoyPPvqo0v2wsbGBr68vvv/+e6hUKnh4eODQoUMIDQ2t8mPr0KEDxowZg99++w3p6eno2bMnjh49WiLbpixDhw7Fjh07MHz4cAwZMgShoaFYvnw5WrVqVWog49VXX8WSJUswa9YstG3bFi1btjS4/80338SWLVswZcoUHD9+HL169YJGo0FQUBC2bNmCgwcPonPnzuX2aciQIfjpp5/w3HPP4bXXXkNCQgKWLl2KJk2a4MaNG2I7Hx8fjBgxAosXL0ZycjK6d++OkydPihkJRTN8vvvuOxw/fhzdunXD22+/jVatWiElJQX+/v44cuQIUlJSjHq+yuPj44O///4bM2bMQJcuXWBlZSUeT3Z2dli+fDmsra1haWmJbt26wdvbG6tXr8bgwYPRunVrTJgwAR4eHoiOjsbx48dhY2ODPXv2VKkvrVq1Qt++feHj4wMHBwdcuXJFnCpe75dffkHv3r3RqVMnTJ48Gd7e3ggLC8O+ffsQEBAAAPj6669x+PBh9O7dG1OnToVMJsOKFSugVCrx/fffV9gPGxsbLFu2DG+++SY6deqE0aNHw8nJCREREdi3bx969epVaqBHb+jQoZg7dy4mTJiAnj174ubNm/jzzz8NCnMDwNixY/HHH39gxowZuHTpEvr06YPs7GwcOXIEU6dOxUsvvVRuPxcuXIjBgwejR48eeOutt5Cbm4slS5bA1tYWs2fPBgBkZmaifv36eOWVV9C+fXtYWVnhyJEjuHz5Mn788UcAwLFjx/Dee+9h5MiRaNasGdRqNTZs2CAGLqti0qRJmDJlCkaMGIGBAwfi+vXrOHjwoEGm1cNYv349fvvtNwwfPhyNGzdGZmYmVq1aBRsbGzFgZux7srq4u7tjwYIFCAsLQ7NmzfD3338jICAAK1euLDeTbOnSpejduzfatm2Lt99+G40aNUJ8fDzOnz+PqKgoXL9+vdr7SkRElVQLM7YREdFTTD/Vtr+/vyAIhdNWFxUUFCT4+voK5ubmAgCDaarnzZsneHh4CFKptMTUz9u3bxd69+4tWFpaCpaWlkKLFi2EadOmCXfv3hXb+Pn5Ca1btza6v1FRUcLw4cMFOzs7wdbWVhg5cqQQExNTYlrtsqbf1k9tXrSfubm5wgcffCDUq1dPsLS0FF544QUhMjKyzKm6i9JqtcK3334reHl5CXK5XOjYsaOwd+/eMqfq1mq1gqenpwBA+Prrr0vdZn5+vrBgwQKhdevWglwuF+zt7QUfHx9hzpw5Qnp6utgOQJnTl//+++9C06ZNBblcLrRo0UJYu3Ztqa9tdna2MG3aNMHBwUGwsrIShg0bJty9e1cAIHz33XcGbePj44Vp06YJnp6egqmpqeDq6ioMGDBAWLlyZbnPkSAYN715VlaW8Nprrwl2dnYCAIPnb/fu3UKrVq0EmUxWYprwa9euCS+//LJQr149QS6XC15eXsKoUaOEo0ePim3KOh7K8vXXXwtdu3YV7OzsBHNzc6FFixbCN998I05XrhcYGCgejwqFQmjevLnw5ZdfGrTx9/cXBg0aJFhZWQkWFhZCv379hHPnzhm00R+XZU3bfvz4cWHQoEGCra2toFAohMaNGwvjx48Xrly5Uu7jyMvLE/773/8Kbm5ugrm5udCrVy/h/Pnzgp+fn+Dn52fQNicnR/j8888Fb29v8fV95ZVXhAcPHgiCUDhF+8KFC0vd15EjR4RevXoJ5ubmgo2NjfDCCy8It2/fFu9XKpXCxx9/LLRv316wtrYWLC0thfbt2wu//fab2CYkJESYOHGi0LhxY0GhUAgODg5Cv379hCNHjpT7OAWh7NdYo9EI//vf/wRHR0fBwsJCGDRokBAcHGzUMVnW+7gof39/YcyYMUKDBg0EuVwuODs7C0OHDi3x2hj7niztfV3Wc6/v89atW8Vl+s/UK1euCD169BAUCoXg5eUl/Prrr6Vus+h7SRAE4cGDB8LYsWMFV1dXwdTUVPDw8BCGDh0qbNu2rdzngYiIaoZEEKqpMicREZERfvnlF3z44YcIDg4uMYMYPZ0CAgLQsWNHbNy4Ea+//nq1bNPT0xODBg3C6tWrq2V7RFS6vn37Iikpqdx6W0RE9PhiTSMiIqpRly9fhqWlpVjMlp4uubm5JZYtXrwYUqkUvr6+1bIPlUqF5OTkahsORERERPS0Yk0jIiKqEdu3b8eJEyfw559/YtKkSRVO3U1Ppu+//x5Xr15Fv379IJPJsH//fuzfvx+TJ0+ucGYvYxw8eBCbN29Gbm4uBgwYUA09JiIiInp6cXgaERHVCG9vb2RmZmL48OFYvHixwdTM9PQ4fPgw5syZg9u3byMrKwsNGjTAm2++ic8//7xaAon9+vVDcHAw3n33XXz22WfV0GMiKg+HpxERPdkYNCIiIiIiIiIiohJY04iIiIiIiIiIiEpg0IiIiIiIiIiIiEpgFdJSaLVaxMTEwNraGhKJpLa7Q0RERERERERULQRBQGZmJtzd3SGVlp9LxKBRKWJiYqplBhciIiIiIiIiorooMjIS9evXL7dNrQaNTp06hYULF+Lq1auIjY3Fzp07MWzYMPF+QRAwa9YsrFq1CmlpaejVqxeWLVuGpk2blrvdpUuXYuHChYiLi0P79u2xZMkSdO3a1eh+WVtbA9A9gTY2NlV6bERERERUe7RaLYKCggAALVq0qPBKKhER0dMiIyMDnp6eYuyjPLUaNMrOzkb79u0xceJEvPzyyyXu//777/HLL79g/fr18Pb2xpdffolBgwbh9u3bUCgUpW7z77//xowZM7B8+XJ069YNixcvxqBBg3D37l04Ozsb1S/9kDQbGxsGjYiIiIgeQ1qtFlZWVgB053QMGhERERkyphyPRBAEoQb6UiGJRGKQaSQIAtzd3fHf//4XH330EQAgPT0dLi4uWLduHUaPHl3qdrp164YuXbrg119/BaA7YfD09MT777+PTz/91Ki+ZGRkwNbWFunp6QwaERERET2GtFotbt++DQBo1aoVg0ZEREQFKhPzqLPfnqGhoYiLi8MzzzwjLrO1tUW3bt1w/vz5UtfJz8/H1atXDdaRSqV45plnylwHAJRKJTIyMgz+iIiIiIiIiIieZnU2aBQXFwcAcHFxMVju4uIi3ldcUlISNBpNpdYBgPnz58PW1lb8K1oEOyU7H3diGUQiIiIiIiIioqdLnQ0a1aSZM2ciPT1d/IuMjBTvm/zHFTz/y2lEpuTUYg+JiIiIiIiIiGpWnQ0aubq6AgDi4+MNlsfHx4v3Fefo6AgTE5NKrQMAcrlcLHpdvPh1REoOBAGITsut6kMhIiIiIiIiInrs1Nmgkbe3N1xdXXH06FFxWUZGBi5evIgePXqUuo6ZmRl8fHwM1tFqtTh69GiZ61QkT6UBAOTma6q0PhERERERERHR40hWmzvPyspCcHCweDs0NBQBAQFwcHBAgwYNMH36dHz99ddo2rQpvL298eWXX8Ld3V2cYQ0ABgwYgOHDh+O9994DAMyYMQPjxo1D586d0bVrVyxevBjZ2dmYMGFClfqoVGsBALkqBo2IiIiIiIiI6OlRq0GjK1euoF+/fuLtGTNmAADGjRuHdevW4ZNPPkF2djYmT56MtLQ09O7dGwcOHIBCoRDXefDgAZKSksTbr776KhITE/HVV18hLi4OHTp0wIEDB0oUxzaGIAhi0CiHmUZERERERERE9BSRCIIg1HYn6pqMjAzY2toiPikFXReeAwDMG9YGb3b3quWeEREREZExtFotbt++DQBo1aoVpNI6W5WBiIioRuljHunp6QY1nUvDb89yKFVa8f/cfHUt9oSIiIiIiIiIqGYxaFQOpbpwSFpuvraclkRERERERERETxYGjcqhr2cEADkqZhoRERERERER0dODQaNyFM00ymMhbCIiIiIiIiJ6ijBoVI48VWGgiLOnEREREREREdHThEGjchQdnparYtCIiIiIiIiIiJ4eDBqVwyBoxEwjIiIiIiIiInqKMGhUjjwVM42IiIiIiIiI6OnEoFE58osUwmbQiIiIiIiIiIieJgwalcMg04jD04iIiIiIiIjoKcKgUTmUzDQiIiIiIiIioqcUg0blKFoIO4eZRkRERERERET0FGHQqBx5RbKL8hg0IiIiIiIiIqKnCING5cgvUtMoR6WBIAi12BsiIiIiIiIioprDoFE5lJrCoJFGK0Clqb6gkUYrIDNPVW3bIyIiIiIiIiKqTgwalUNZrPh1dc6g9s6Gq+j6zVHEZ+RV2zaJiIiIiIiIiKoLg0blyCsyPA2ovhnU1BotTt1LRK5Kg8Do9GrZJhERERERERFRdWLQqBxKtWGQKCdfXS3bDU3KRn7B0Lc4ZhoRERERERERUR3EoFE5lOpHk2l0Jy5T/D8+nUEjIiIiIiIiIqp7GDQqR5760dQ0uhObIf7PTCMiIiIiIiIiqosYNCpHvspwtrSqZBptvRKJU/cSDZYFFQkaxWcoq9Y5IiIiIiIiIqJHiEGjchTPNMqpZKZRZEoOPt52A+9suIq8IgGnoKLD0yrINMqrpiFxRERERERERESVwaBROYrXNKpsACcmLReALkPJPzwVAJCWk4/YInWMyhuedi44CW1mHcTykw8qtV8iIiIiIiIioofFoFE59LOnWStkACqfaZSSnS/+fyY4CUBhlpGdhSkAIC1HVWYwasOFcKi1Aq4WBJyIiIiIiIiIiGoKg0blUKp0mUb2FmYAKl8IO6lI0Ojsg2QAhfWMOnvZQ2Gqe/pLG6KWk6/G8bsJADhEjYiIiIiIiIhqXp0PGjVs2BASiaTE37Rp00ptv27duhJtFQpFlfatLAjW2FsWBI0qGbxJySoMGt2MSkN6jkrMNGrpZgNXG12/SiuGffJuIvIKglYMGhERERERERFRTZPVdgcqcvnyZWg0hUGTwMBADBw4ECNHjixzHRsbG9y9e1e8LZFIqrRvpUafaaQbSlbZTKPk7MJgkFYAzock405B0KiFqw0uhqYgLDmn1LpG/wbGif9XZdY2IiIiIiIiIqKHUeeDRk5OTga3v/vuOzRu3Bh+fn5lriORSODq6mr0PpRKJZTKwgBPRoZuCJlSpQVMiwxPq2TwJrkg00hhKkWeSovT9xNxN0637ZZu1oWZRumGQaM8lQbH7sSLtysbrCIiIiIiIiIielh1fnhaUfn5+di4cSMmTpxYbvZQVlYWvLy84OnpiZdeegm3bt0qd7vz58+Hra2t+Ofp6QkAUGsFAIVFqytbCFufafRMSxcAwD8BMchTaaEwlcKrniVcbXVBo+KZRqfvJyE7XwP9Q9QPUyMiIiIiIiIiqimPVdBo165dSEtLw/jx48ts07x5c6xZswa7d+/Gxo0bodVq0bNnT0RFRZW5zsyZM5Geni7+RUZGGtyvzzSqbG0hfabRkLZukEqATKVa10cXa5hIJXARaxoZBo32B8YCAHo1dqzSfomIiIiIiIiIHladH55W1O+//47BgwfD3d29zDY9evRAjx49xNs9e/ZEy5YtsWLFCsybN6/UdeRyOeRyeZnbtBczjdSV6m9ywexp3k6WaFvfDtcj0wDo6hkBKFIIuzBolK/W4vBt3dC04R09cCY4iTWNiIiIiIiIiKjGPTaZRuHh4Thy5AgmTZpUqfVMTU3RsWNHBAcHV2m/ZiZSWJjpYmu5lRgmptEKSM3RBY3qWcrRu0k98b6WbtYAABcbXaCq6PC0cw+SkJmnhpO1HL2aOBbsVwNBEKrUfyIiIiIiIiKiqnhsgkZr166Fs7MzhgwZUqn1NBoNbt68CTc3tyrtV24qhYWZCQAgtxKZRqk5+dDHeewtTMUAEAC0cNNlGhUOT1OKQaGjdxIAAINau8BSrtuvIAD5GtY1IiIiIiIiIqKa81gEjbRaLdauXYtx48ZBJjMcUTd27FjMnDlTvD137lwcOnQIISEh8Pf3xxtvvIHw8PBKZyjpyWUmUOiDRpUYJpZSMDTN3sIUMhMpOjWwRz1LM1jLZWhZLGiUr9YiNUcFALgclgIA6N3EEQpTE3F7efkMGhERERERERFRzXksahodOXIEERERmDhxYon7IiIiIJUWxr5SU1Px9ttvIy4uDvb29vDx8cG5c+fQqlWrKu1bYSqFRUHwpjKzpyVl6WZOc7A0K9iOCXZO7YV8jRa25roaSWYyKepZmiE5Ox/xGXkwkUhwNz4TAODj5QBTEylkUgnUWgG5Kg1sYVqlx0BEREREREREVFmPRdDo2WefLbOmz4kTJwxuL1q0CIsWLaq2fStMTWBekGmUV4mgkX7mtHpWhQW2G9SzKNHO2UaB5Ox8xGXkIS49D4IAeDtawslat565qQkylWoWwyYiIiIiIiKiGlVh0OiXX36p9EYnTJgAa2vrKnWorpHLCmsa5VRheJqjlVm57Vxt5LgTC8Sn5yEiJQcA0NnLXrxfYaYLGuUxaERERERERERENajCoNH06dNRv359mJiYVNQUABAZGYmhQ4c+MUEjhamJWFsot1KZRobD08riaquraxSXkYcrYakAgC4NHYrsXzf0jplGRERERERERFSTjBqeduXKFTg7Oxu1wSclWKSnMJXCwkz3NCnVWmi0AkykkgrXSyrINKpnKS+3nb4YdkRKDgKi0gAAnRsWZhqZm1Z+aBwRERERERER0cOqcPa0WbNmwcrKyugNfvbZZ3BwcKi44WNCLjMRAzcAjB4mlpJl7PA0XdDoxN1E5Ku1qGdpBm9HS/F+/b6ZaURERERERERENanCTKNZs2ZVaoMzZ86scmfqIoWpVBwiBuhmULOUV5yglZytH55mXKaRvgZS54b2kEgKM5n0Q+PyVNrKdZyIiIiIiIiI6CFUefa0pKQkXLx4ERqNBl26dIGbm1t19qvOkMtMIJFIYG5qglyVxuhMo2T98LQKMo30QSO9ovWMgMKgETONiIiIiIiIiKgmVSlotH37drz11lto1qwZVCoV7t69i6VLl2LChAnV3b9ap88yMjfTBY1yjKwtlGzs8DRbw6BR52JBIw5PIyIiIiIiIqLaUGFNIwDIysoyuD1nzhxcunQJly5dwrVr17B161Z8/vnnj6SDtU0u0wVtKhO8UWm0SM9VAah4eJq9hSnMTHQvg8JUitbuNgb3m5uxEDYRERERERER1TyjgkY+Pj7YvXu3eFsmkyEhIUG8HR8fDzOz8jNqHlfyIplGAJCTr65wndSCoWlSCWBnblpuW4lEAmcbXWCpo6c9TE0MXxJ9ppOxw+KIiIiIiIiIiKqDUcPTDh48iGnTpmHdunVYunQpfv75Z7z66qvQaDRQq9WQSqVYt27dI+5q7VAUZBpZ6DN+jAjeJBUMTXOwNINUKqmgtW4GtajUXHRpaF9y/xyeRkRERERERES1wKigUcOGDbFv3z789ddf8PPzwwcffIDg4GAEBwdDo9GgRYsWUCgUFW/oMaQP2ojBm/yKZzHTz5xWr4KhaXpD27khKjUXL3ZwL3EfaxoRERERERERUW0wania3pgxY3D58mVcv34dffv2hVarRYcOHZ7YgBEAyGW6p8iiEsPTUoycOU1vfC9vXPhsAJo4W5e4Tx80ylNVHKwiIiIiIiIiIqouRs+e9u+//+LOnTto3749Vq9ejZMnT+L111/H4MGDMXfuXJibmz/KftYafYZRYfCmcsPTqmv/rGlERERERERERA9r08Vwo9salWn03//+FxMmTMDly5fxzjvvYN68efDz84O/vz8UCgU6duyI/fv3V7nDdZmiRCHsioM3yVm64WmOVsYNTyt3/2b6YXEMGhERERERERFR1SnVGnx/4K7R7Y0KGq1btw7//vsvNm/ejMuXL2PDhg0AADMzM8ybNw87duzAt99+W7Ue13FymWGmkTG1hcThadWQacSaRkRERERERERUHYITsqDWCka3NypoZGlpidDQUABAZGRkiRpGrVq1wunTpyvRzceHPtPIohIZP+LwNCNrGpWnMsPiiIiIiIiIiIjKEhSbWan2RgWN5s+fj7Fjx8Ld3R1+fn6YN29elTr3OCpe08i4TKPKzZ5W/v51LxGDRkRERERERET0MO7EZlSqvVGFsF9//XU899xzCAkJQdOmTWFnZ1eVvj2W9LOnmZvpniqjahoVDE9zrMZMIw5PIyIiIiIiIqKHERRXuUwjo2dPq1evHurVq1fpDj3uCjONdMEjY4I3ydU5e5oZg0ZERERERERE9HAEQah0plGFw9NefvllZGQYv9HXX38dCQkJlepEXVZY00gXX6uoplGeSoMspRoAUK86Zk+T6WsaaR96W0RERERERET0dErMUiI5Ox8SifHrVBg02r17NxITE5GRkVHhX3p6Ovbs2YOsrKyHeRx1in72NIWRhbD1M6eZmkhgozA6katM5gX7zTNiWBwRERERERERUWn0RbC96lkYvU6FUQ1BENCsWbOq9+oxJ9dnGhUMU8upYJhY0aFpksqE78rAmkZERERERERE9LD0Q9OaO1vjtJHrVBg0On78eKU74uHhUel16iqxppGRGT/BibrInWM1DE0DCoNGaq0AlUYLUxOjJrwjIiIiIiIiIhLpi2A3c7U2ep0Kg0Z+fn5V79EToHD2NH2mkbrMtlqtgBUnQwAAz7R0qZ79mxYGifJUGgaNiIiIiIiIiKjS9JlGzVyMDxrV6QjE7NmzIZFIDP5atGhR7jpbt25FixYtoFAo0LZtW/z7779V3r9EApgVBGnEYWL5ZRekPnQ7DkFxmbCWyzCxl3eV91uUXCYVi1RxiBoRERERERERVVa+WovgBF396WYuVkavV6eDRgDQunVrxMbGin9nzpwps+25c+cwZswYvPXWW7h27RqGDRuGYcOGITAwsEr7NpNJxbpEFmIh7NIzjbRaAT8fDQYAjO/VELYWplXaZ3ESiUQMWOWVE7AiIiIiIiIiIipNcEIW1FoB1goZ3O3MjV6vzgeNZDIZXF1dxT9HR8cy2/7888947rnn8PHHH6Nly5aYN28eOnXqhF9//bVK+1bICp+eogWpBUEo0fbwnXjcic2AlVyGt3pXT5ZR8X3nqasn0yhfrcXugGgkZOZVy/aIiIiIiIiIqO4KitMNTWvpalOpSbuMDhoJgoCIiAjk5dVsoOH+/ftwd3dHo0aN8PrrryMiIqLMtufPn8czzzxjsGzQoEE4f/58uftQKpXIyMgw+AMAucxEbKMoyDTSCoBSbZjxIwgCfj5yHwAwrqcX7CzMjH+ARlCIQ+OqJ2i0PzAWH24OwIL9d6tle0RERERERERUd+mLYLd0M76eEVDJoFGTJk0QGRlZuZ49hG7dumHdunU4cOAAli1bhtDQUPTp0weZmZmlto+Li4OLi2EBahcXF8TFxZW7n/nz58PW1lb88/T0BGBYhFqf7QPoClIXdeJeIm7HZsDSzASTejeq1GM0hqKgH9VV0ygqNRcAEJ6cXS3bIyIiIiIiIqK6S18Eu4WbTaXWMzpoJJVK0bRpUyQnJ1euZw9h8ODBGDlyJNq1a4dBgwbh33//RVpaGrZs2VKt+5k5cybS09PFP31gTFEk08jURApTE10KV/HgzbWINADA823dYG9ZvVlGQOHMbdUVNMrIVQEAErOU1bI9IiIiIiIiIqq77sTqkm9auD6iTCMA+O677/Dxxx9XubD0w7Kzs0OzZs0QHBxc6v2urq6Ij483WBYfHw9XV9dytyuXy2FjY2PwB+hmLitKn22UU2yYWHRB5k5DR0vjH0wl6PerrKagUVpOQdAok0EjIiIiIiIioidZUpYSSVlKSCRA80cZNBo7diwuXbqE9u3bw9zcHA4ODgZ/j1pWVhYePHgANze3Uu/v0aMHjh49arDs8OHD6NGjR5X2V7SmEVAk46dY0CgqNQcAUN/e+ArklaEwrd5Mo/SCTKOcfA2ylaXPBkdEREREREREj7+wJF1pGndbc1iYySq1bqVaL168uFIbf1gfffQRXnjhBXh5eSEmJgazZs2CiYkJxowZA0AXxPLw8MD8+fMBAB9++CH8/Pzw448/YsiQIdi8eTOuXLmClStXVmn/ZqaGMTXdk6ssEbyJTtNlGnlUYtq6yigshK2toKVx9EEjQJdtZCmv3EFDRERERERERI8HfcyiKokulYoWjBs3rtI7eBhRUVEYM2YMkpOT4eTkhN69e+PChQtwcnICAEREREAqLQzs9OzZE5s2bcIXX3yBzz77DE2bNsWuXbvQpk2bKu1fUSxoVNosZhqtgLh03YxyHo8o08i8mjON0ooGjbKUj2xYHRERERERERHVLv1kWFVJdKl0ismDBw+wdu1aPHjwAD///DOcnZ2xf/9+NGjQAK1bt650B8qzefPmcu8/ceJEiWUjR47EyJEjq2X/cpPimUYlaxrFZ+RBrRUgk0rgbK2olv0Wpw9eFZ+1raoyimUaEREREREREdGTKUY/OqoKiS6Vqml08uRJtG3bFhcvXsSOHTuQlZUFALh+/TpmzZpV6Z3XdXJTw5pG+qBRRl5h0EUfsXO3M4eJVPJI+qHPNKquoFHx4WlERERERERE9GR6mJI6lQoaffrpp/j6669x+PBhmJkVTi3fv39/XLhwodI7r+uKD0/zqmcBAAgtKCIFANFpuiLYj6qeEQAoyijAXRUqjRZZRYpfM2hERERERERE9OTSz/j+yDONbt68ieHDh5dY7uzsjKSkpErvvK4zKzZ7WlNn3dR09+OzxGUP8+QbS8w0Uj980Kjo0DSAQSMiIiIiIiKiJ5UgCDWXaWRnZ4fY2NgSy69duwYPD49K77yuU8gMn56mzlYAgAeJRYJGj3jmNKB6Z09LLxY0SsjMe+htEhEREREREVHdk5ajEusyuz/qoNHo0aPxv//9D3FxcZBIJNBqtTh79iw++ugjjB07ttI7r+vkxTKNmrjogkbhydlifSF9TaOqTF1nrOqsaZRWPNMoi5lGRERERERERE8ifaKLo5WZmJBSGZUKGn377bdo0aIFPD09kZWVhVatWsHX1xc9e/bEF198Uemd13VyU8PC1k5WctgoZNAKhXWNanJ4Wm41BI30mUb6ot0cnkZERERERET0ZHrY0VGVChqZmZlh1apVCAkJwd69e7Fx40YEBQVhw4YNMDGpfMSqrisehZNIJGjqUlDXKCHLYGxgfTuLR9cPM+Mzjc4FJ2Hp8WBotUKp96fn6IJG+qLeSVn5ZbYlIiIiIiIiosfXwya6VCpoNHfuXOTk5MDT0xPPP/88Ro0ahaZNmyI3Nxdz586tUgfqMnkpgTB9XaPg+EwkZeVDqdZCIgFcbRWPrB/62krGZBp9sTsQCw/exeE78aXer880auykexwarYDUnPxq6ikRERERERER1RU1mmk0Z84cZGVllViek5ODOXPmVKkDdZnctOTT06QgaHQ/IQtRqTkAAFcbBcxklXoqK8XcTF8Iu/ygkVYrICpFd0CcvJdYaht90MjRygwOlmYAWNeIiIiIiIiI6EkkZhrVRNBIEARIJJISy69fvw4HB4cqdaAuM5OVkmlUZHhaTcycBhhfCDspW4l8jW6GtZN3EyEIJYedpRUMT7MxN4WTlRwA6xoRERERERERPYnEuIV91UrqyIxpZG9vD4lEAolEgmbNmhkEjjQaDbKysjBlypQqdaAuKy3TSD88LSwpG+HJukyjR1kEGyisrZSn0pbbLjYtT/w/Oi0XIUnZ4jA0PX2mkZ25GZys5bgbn8mgEREREREREdETSB80crerWkkdo4JGixcvhiAImDhxIubMmQNbW1vxPjMzMzRs2BA9evSoUgfqMkUpNY3cbBWwNDNBdr4GZ4OTADz6TCOFkbOnxRQcDHqn7iWWGTSyNTeFszUzjYiIiIiIiIieRLn5GqRk62oYV3XyLqOCRuPGjQMAeHt7o1evXpDJjFrtsSc3LTkUTyKRoImLNa5HpuFyWAoAoH4V07yMJdY0qiBopI8gSiSAIOiCRhN6eRu0Sc/VHTC25qZwYtCIiIiIiIiI6LGVpVTj5N1E9GxcD/YFdYv19DECK7kMNuZVi+NUqqZR//79kZKSUmJ5cnIyTErJynncyUupaQQATQqyd1QaXc2gRz08TV/TKF+thVZbsk6RXmy6bnha7yaOAIDzIckl6iCJw9MsigSNWAibiIiIiIiI6LGz5Nh9TNvkjx7fHcUXu24iNClbvK9oHebS6lMbo9KFsEujVCphZmZW6n2Ps9JqGgFAUxfDIV+PfnhaYT/y1GVnG+mHp/Vv4QxnaznyVFpcCUs1aFN0eBozjYiIiIiIiIgeX1EFs6PlqbTYeCEC/X88gS2XIwEUmTntIRJdjMpP+uWXXwDohmatXr0aVlaFQRONRoNTp06hRYsWVe5EXaUoI9NIXwxb75EHjYr0IzdfAwuz0l+2mIJMI3c7c/Rp6oTt/lE4dT8RvZs6im30s6fZcvY0IiIiIiIiosdaWo6uBM2EXg3xIDEbp+4l4vuDd/FiB3dEpxVM3vUQMQujgkaLFi0CoMs0Wr58ucFQNH0h7OXLl1e5E3WVXFZGppGztfi/o5WZWHPoUZFKJZDLpFCqteXWNYopknrm17wgaHQvEZ893xIAkKfSQKnWzcBma2GKPJUuaJTAoBERERERERHRYyc1W5cY4tfMCZ893xK+3x9HbHoe/rkeg5i0wsSSqjIqaBQaGgoA6NevH3bs2AF7e/sq7/BxoigjGORhbw6FqRR5Ku0jzzLSMzczgVKtRZ5KW+r9SrVGzBhytzMvGLMIBMVlIi49D662CmQUDE2TSgArMxm0BcPT0nNVUKo1ZdZwIiIiIiIiIqK6R59pZGdhBlMTKcb3bIj5+4Pw++lQsfj1wwxPq1RNo+PHjz81ASMAZQ4DM5FKxKnsH3URbD39ELXiha314tN1ASO5TAp7C1PYW5qhnYctAOB8SBIAIK1IPSOpVAJbc1OYmuiKYSVl5T/S/hMRERERERFR9UotKEFjb2EKABjdtQEszExwNz4TV8N1NY4f6fC0GTNmYN68ebC0tMSMGTPKbfvTTz9VuSOPm6bOVrgVk1GjmUYAyhyeVlpV9Hb17XA9Kh334rMAGBbBBnQ1qpys5IhJz0NiprLGHgsRERERERERPZw8lUaMEdhZ6CYnszU3xajOnlh3Lgz6ydfrP8pC2NeuXYNKpQs2+Pv7V3matifNmz28kJSVjxE+9WtkfwrTgqBRfulBo9h0XdDIzU4hLmvsZAkACEnUBY2KFsHWc7IuDBoRERERERER0eNBnxhiIpXARlEY3pnYyxvrz4dBEAAzE6k4CVZVVBg0On78uPj/iRMnqryjJ42PlwM2TupWY/szN9WNJCxreJq+CLa7bWEEsVHBELoHidkAimQaFUQgAV3QCOAMakRERERERESPk9SCeka25qYGCT4N6llgUCtXHLgVBzc7BaTSqif/GFUI++WXX654QzIZXF1dMXDgQLzwwgtV7hCVTsw0KitolF6yKnpjZ13QKDw5G2qNtsTwNIBBIyIiIiIiIqLHkX7mNDsL0xL3TevXBKfuJ6J3E8eH2odRQSNbW9sK22i1Wty/fx+rV6/GRx99hLlz5z5Ux8iQuWn5hbDFTKMiw9PcbBQwNzVBrkqDyNRcpItRyMKXXZ+mlpiV90j6TURERERERETVLz1X9xvfvshoIr229W1x9YuBUJhWav6zEowKGq1du9boDe7duxdTp06tlqDR/PnzsWPHDgQFBcHc3Bw9e/bEggUL0Lx58zLXWbduHSZMmGCwTC6XIy/v8Q6KKMz0QSNtqfcXBo0KM42kUgm8HS1xOzYDDxKyxEwjO3MOTyMiIiIiIiJ6nOlnTrMzL5lpBBROqPUwHi7kVIrevXujc+fO1bKtkydPYtq0abhw4QIOHz4MlUqFZ599FtnZ2eWuZ2Njg9jYWPEvPDy8WvpTmxSy8oenxabpgmJutoZV0fVD1B4kZpU7PC2BQSMiIiIiIiKix4a+ppFdKZlG1cWoTKPKsLOzw44dO6plWwcOHDC4vW7dOjg7O+Pq1avw9fUtcz2JRAJXV9dq6UNdYW6mi++VNntaRp4KmUo1AMPhaQDQyFE/g1o20koJGrkWBJn0mUpEREREREREVPfpZ0i3L6WmUXWp9kyjRyk9PR0A4ODgUG67rKwseHl5wdPTEy+99BJu3bpVbnulUomMjAyDv7qmvJpG+iwjewtTWJgZxgFLzTQqckB5OVgAAOIzlGXWSyIiIiIiIiKiuiWtINPI3vLRZRo9NkEjrVaL6dOno1evXmjTpk2Z7Zo3b441a9Zg9+7d2LhxI7RaLXr27ImoqKgy15k/fz5sbW3FP09Pz0fxEB5KeUEjfZZQ8aFpANDYSZdpVNbwNDsLU1grdIGmiJSc6u00EREREREREZUpT6XBF7tuYunxYDEIZCx9TSPbMmoaVYfHJmg0bdo0BAYGYvPmzeW269GjB8aOHYsOHTrAz88PO3bsgJOTE1asWFHmOjNnzkR6err4FxkZWd3df2hy07JrGkWXUgRbz7tgeFpqjgpRqbp2RQ8oiUQCr3q6bKOwpPJrRRERERERERFR9dnhH42NFyKw8OBd9PzuGGb/cwux6caVjxEzjR5hTaPHImj03nvvYe/evTh+/Djq169fqXVNTU3RsWNHBAcHl9lGLpfDxsbG4K+uMReDRiVnT9MfUMXrGQGAhZkMHgXBpHy1bl27YuMdverpAkvMNCIiIiIiIiKqOedDkgEA1nIZcvI1WHcuDG+svmjUuqlPe00jQRDw3nvvYefOnTh27Bi8vb0rvQ2NRoObN2/Czc3tEfSw5uinyiutEHZMQU2j0jKNAKBRwRA1veKpa/q6RuHJDBoRERERERER1QRBEHD+gS5otGpcZ6yf2BUA8CAxG8lZFc9wnlYDs6fV6aDRtGnTsHHjRmzatAnW1taIi4tDXFwccnMLU7XGjh2LmTNnirfnzp2LQ4cOISQkBP7+/njjjTcQHh6OSZMm1cZDqDb6TCOluuyaRmUFjRo7WYn/m5lIxW3p6YenhTPTiIiIiIiIiKhGPEjMRlKWEnKZFB0b2MGvmZM4A/rN6PRy1xUEQZw9rfhoouokq7hJ7Vm2bBkAoG/fvgbL165di/HjxwMAIiIiIJUWxr5SU1Px9ttvIy4uDvb29vDx8cG5c+fQqlWrmur2I6Ew1T3GsORsrD4dYnBfcEIWAMDdtuTwNKCwGDYA2JibQiKRGNzfwEF3f3gyaxoRERERERER1QT90DQfL3vIZbrkjjYetghJykZgdDr6Nncuc90spRpqrQDg0dY0qtNBI0EQKmxz4sQJg9uLFi3CokWLHlGPao+NQhc5jEzJxdf77pTapr69RanLGxXJNLI1L/mSN3TUrRedmgu1RguZSZ1OQCMiIiIiIiJ67F0oGJrWo1E9cVlbD1v8cz2mwkwjfZaRXCYVy9k8CnU6aESFuno7YGrfxuJQtOLae9rBtcxMo8KgUWljHV2sFTCTSZGv1iImLQ8N6pUefCIiIiIiIiKihycIAi4UZBp1b1wYNGrjYQsACIzOKHf91BqYOQ1g0OixITOR4pPnWlRpXRcbOSzNTJCdrylRBBsApFIJGjhYIDghC+Ep2QwaERERERERET1C9xOykJydD3NTE7Svbycub+2hm809Oi0XKdn5cLAsPShUE/WMgDpeCJuqh0QiEYeolRY0AgpnUAvjDGpEREREREREj5R+1rTODe1hJisMzdgoTOFtRDHsVHHmNAaNqBroi2GXFTTSZxdFsBg2ERERERER0SMlDk0rUs9Ir3CIWtlBI32m0aMensag0VNicFs32FmYwreZY6n3N6ynn0GNmUZEREREREREj4pWK5QbNGpbMETtZpQxmUasaUTVYFBrVzzbygUSiaTU+8VMoxQGjYiIiIiIiIgelbvxmUjNUcHCzATt6tuWuF+faVTe8DTWNKJqV1bACCisaRSenANBEGqqS0RERERERERPlUuhKQCAzg0dYGpSMiyjDxpFp+UiNTu/1G2kibOnMWhENaC+vQWkEiBXpUFiprK2u0NERERERET0RAqKywQAtC8lywjQFcNuWDAaqKxso1Qx04g1jagGmMmkcLczBwCEc4gaERERERERPUJRqTl4b5M/jgXF13ZXalxwgi5o1MTZqsw2FQ1RK8w0YtCIaohXvcIhakRERPR4ic/IQ7ZSXdvdICJ6qgmCAI2W5T4qkpylxNjfL2HvjVjM2XP7qSqRIggC7sVnAQCaOluX2U5f66isYthpuaxpRDWsgYNuBrWI5Oxa7gkRERFVxq2YdPh+fxxvrb9c210hInqqvbfpGjrNO4ywJP6mKku2Uo2J6y4jpOA5Ck/OwY1yZgl70iRl5SM9VwWpBGjkZFlmu6KZRqUF1fS1jljTiGqMPtMojJlGRPQEuxuXia92B7J+Gz1RFh2+B6VaiwshKUjO4rFNRFQbbkalY9/NWKTnqrD85IPa7k6dlK/WYsrGq7gelQ57C1N0begAANhzPaaWe1Z95u29jVdXnEd6Qc2h4u4XDE1r4GABhalJmdtp42ELmVSC6LRcjF1zCbHpueJ9ao0WGXm67GLWNKIaoy+0VdWaRvfjM9HvhxP482J4dXaL6rgspRr/3oyFUq2p7a7UGflqLf6+HIGoVAZg65p8tRbv/nkVf5wPx4IDQbXdHXoCBEan48+L4bU6FOF6ZBqO3EkQb18smJGFiOo+QRBw6FYcguIyarsrVA1WnCoMFO3wj0Z8Rl4t9qZuWno8GKfvJ8Hc1ARrxnfBpD7eAIC9N2KhfQKG9d2JzcDvZ0JxMTQFPx+9X2qb4ATd0LQm5QxNA3TFsL99uS3kMilO30/Cs4tOYXdANAAgPbcwIGVrzkwjqiH64WkhCVnIqkJNhDVnwxCalI0fD917LAII2Uo11p0NRWgFqaPZSjVSypjm8Gmn1mgxcd1lTP3TH2vPhtV2d+qMvy9H4H/bb+KFJWdwNTy1trvzxLsXn4notFyDZRl5Knyz7zZmbAkw+Dxbfy4MIYm69/zugGjEpfNkjqouI0+FcWsu4fOdgdh4ofYumCw6cg8AIJNKAADnHyTXWl/o6ZKv1iJPVffP+eqyA4FxmLzhKsavucw6OI+5yJQc/HszFoDuYny+Ros1Z0JruVd1S1pOPn4veE6+G9EWHRvYw6+5E2wUMsRl5OFS2ON/0WPV6RDx/z/Oh4kBoqLu6+sZuZRdBFtvVGdP/PthH7T3tENmnhofbg7Ajag0ceY0a7kMpiaPNqzDoBGJmjhbwcPOHJlKNb7Zd6dS66o0WuwP1H1IpmTn49Ctul0BPyEzD6+uPI/Ze25jzMoL4njQ4rKVarzw6xn4fX8cCbxSUMIPh+7hUsEV7RN3Eypo/fTQX/FPzVHhtVUXcKDgvUGVExidjk+338D9+Mwy21wOS8Fzi0/B9/vjmL75Gu7GZeLfm7F45seTWHU6FDv8ozFlw1Xkq7VIyMwTr/jYmptCpRGw9ixP5qjqlhy9j+SC749FR+6VmYb+KPlHpOLE3USYSCX4eFBzAMD5kCc/aHQ9Mg39fziBvTeenOEMjxtBEPDK8nPo98MJgyveZLw8lQbf7tedc8dl5D30haZbMenoMPcQ5u+v3Hn8o7DqVAgG/nQSt2Iejzo1Ofnqh+7r72dCoRWAPk0d8eXQVgCAPy9G8P1RxOrTochSqtHC1RovtHMHAMhlJniujSsA4J+HGKK261o02s85BO+Z++A9cx+afPYvlh4PrpZ+Gys2PRf/BOgeQwtXa6i1Ar79t+T7UT88rWk5M6cV1djJCtun9MCzrVwAAJsuRiA9V3f+YWf5aLOMAAaNqAgzmRQLR7YDAPx1KQLHg4wPApwNTkJakZPlTRcjjFpv4cEgDPzpJB4klozAliYxU4n+P5zA8z+fxqaLEcjJr3xGVEhiFkYsO4fAaF0acFxGHj7dcaPU4mILD95FSGI2MpVq7A+MK3Obcel5+HznTdyNK/vH7ZPm6J14g7Ha/hFpvNoI3QnghYIfbJ0a2EGp1uLdP/2x5XJkLffs8aLRCvhg8zVsvhyJEcvOicHJorRaAXP33IZW0LXfFRCDQYtPYeqf/kjIVKJhPQtYmJngTHASPtp6Hd/tD0KWUo329W3x48j2AHSfVRl5PJmjygtJzBIzLOtZmiEtR4Ulx+4b3L/gQNAjL4S66LAuy2hEJw+M6uwJiUSX9p6Q+WRf6FhyLBghSdmY/c8tgxnj1Botfjp876F+eJBx7sVn4UZUOmLT83Dkdu1eLMzN1+CnQ3exOyAaKo1WXJ6Rp8KmixE4eKvsczhjpOeqcOpeIpadeFCtGcRrz4YhMqUwU1afpVJVCw7cRVqOCr+fDjWofVLTYtNzsfDgXdxPyMKUjVfFacEBXXbwf/4OwLWIupWJ/cWuQAz55UyVp35Pzc7H3wXneu/4Nka/5s5o7mKNLKW6VjNR65LU7HzxYt30Z5pCWpAdCwAvtNcFkPbfjDV4Dxvr8O14/HfrdaTnqiAIgCAAaq2AVadDqrS9qlp3NgxqrYBu3g747fVOkEklOBaUgJP3Eg3a6bOPyps5rTiZiRRv+zYCoAuuRaXq3uP2j7ieEcCgERXTs7EjJvbSjSv9ZPsNpGbnQxAERKXmlDuMa8913ZfcMy1dIJXornKGVBAISs3Ox6pTobifkIVpf/obFXD453oMQpKycTs2A5/tvIlu3x7FJ9uu469LEbgdk4EHiVnYeiUSM3fcwMdbr5cIKt2Pz8SIZecQmZILr3oWWPpaJ5iaSHDwVjw2F/tRfyUsBevPh4m395eTLbLk2H38eTECE9ZeKjNr6XGUnqMy+KLXi0zJwYwt1wEA43s2hIuNHPlqLfzr2AlAbTgfkgylWgt3WwW2vNMDr3drAEEA5u+/A3UNfmnVFq1WwNXw1Iee9nvvjRhxGFlGnhpv/H4RB4oFbndci8bN6HRYyWX4Y2JXDG7jCokEMDWR4IMBTXFgui+Wv+EDmVSCf67HYIe/bgz47Bdbo38LZzR1tkKmUo2/jAxyExX1zb47UGsF9GvuhJ9e7QAAWH9eN0z7bHAShi09i2UnHmD0ygslhk8aQ6XRlpvhqtEKWHbiAU7fT4JMKsH7/ZvC3tIMLVxtAAAXQh7/FP+yJGTk4XhBdmtSVr7Bd/XqM6H45eh9fLTlepUyhNUaLfZcj2Ew2QhngpPE/8u7sFYTVp0OwS/HgvHh5gD4fn8cv50Ixmc7b6L7t0fx2c6beGfDVXx/IKjS9VIiU3Lw/M+n0X7OIYxdcwkLDgRh/JpLSKqGYvMJmXliFsSQdm4AgIO34qpc0+VKWApOFfwwVWsFrKvFsgG/HgtGfsE5T2RKLj7cHACNVsCFkGS8suwcdl6Lxry9t2utf8Up1RocLDiG916vWuBu44Vw5Ko0aOVmg15N6kEqleAdP90P/LVnw6p8YfVaRCr23oipExdmb0alV+mCvd6q0yHIztegpZsNnm3lanBfj0b14GhlhtQclcFnizEuhiRj2iZ/aLQCRnSqj0ufD8ClzwaIF3Qu1tD3YWZBkBoAJvs2QiMnK4zr2RCArjC2PniVkp2PpKx8SCS6kT6V0dnLHk2crZCTr8G6c2EAHn09I4BBIyrFJ881RxNnKyRmKjFi2Tl0/fYoei84jv4/nii1qr1SrcGhgis4k30boW9zZwAoEYQpbs+NGPELJSguE3ON+PLQXyka0MIZXvUskJmnxpYrUZi54yae/+U0Bvx4Eh9vu4G/LkVi69WoEtkdS44FIzVHhXb1bbH93Z4Y0s5NTOefs+cWggtSBfNUGnyy/QYEAfBt5gQAuBRa+ow0Gq0g9ismPQ8ztgQ8EUXcQpOy4ffDcfT5/rj4vAC61/u9Tf5Iz1WhvacdPnu+JXo0qgeAdTQA4ORd3QmbX3NnyEykmPNia9hbmCI1R/VUFKf9cncgRiw7h27fHsWs3YHlDi3Ty83XGNQN02gFLDmmO5Ge2rcxnmnpgny1FlP/vIolR+9DrdEiW6nGwoO6Qtbv9W8C32ZOWPaGD0593A8nP+6HGQObQWFqAt9mTvihIKsIAF7xqY+ODewhlUrEqzVrzoYiX/3kB/So+py8l4ijQQmQSSX4Ymgr+DVzgl8zJ6g0Aib/cQVj11xCRp4aMqkEcRl5GPv7xUrVxotIzsFzi0+h53fHSgRLAeBBYhZGLj8nFnOf2Nsbng66ySz0n8cXnuAhajuuRUOjFWAllwEAVpwMQUaeCiGJWWLmVb5Giz8uhFV626tOh+L9v65h2p/+pWYgU6Ez9wuvnJ+6n1ilepjVQasVsPWq7nzP3NQEsel5+P7A3YKMdA08HcwBAL+deID/bAmoVN3NFace4HasLjPdq54F3GwVyFSq8cPBuw/d7x8P3tNlv3ra4ceR7WEllyE2PQ8BUWlV2t5PBcd+cxdd5sKmixHIrIXgZ2RKDrZc0b0es15oBYWpFCfvJWLKxqsY+/slcbYn/4g0g/PL2nQlLBXZ+brj4uS9xEqfx+fkq8Xg9Tt+jSCR6DJoXmjvDg87cyRlKfFnFS5QRaXmYPTKC3hv0zX0XnAcS47er7U6q1suR+KFX8/g3Y1V+2xMyc7H+oIgR/EsI0CXRTOkrS54+tfFiArre6k1WtyKSceG82GYtP4K8tVaPNPSGQtGtIWztQLONgo821oXmNr3kBl8xtp8KRKZSjUaO1miX8Hv4Q8GNIWDpRmCE7Kw74auH/pz4/r25jA3K3vmtNJIJBKM7uIJALgWkQaAmUZUSxSmJlg0qgNkUglCkrKRmKmERKJL8/vv1uslsklO3k1EplINVxsFOnvZY0zXBgCAbVejyv1i3n41CgAwqLULJBLdl1t5Uy0mZSlxuaA42txhbXD8v32x4a2umNq3MXo2rgcruQxmMim6NLQXAz07rkWL62fmqcTgzjfD2sLRSg4AmNS7EXo3cUSeSovnFp/G8N/O4u0/riAkMRtO1nIsGd0RbTxsoBV0qY/FXQ5LQVJWPqzkMshlUhy/m4jlp6p/is34jDyMW3OpRoY55ak0eHfjVaTlqJCZp8a7G/3FKwvf7ruD61HpsDU3xdLXOsJMJkWPxk9n0GjZiQfo9d0xBEYXjoHXX/3u21x3DMpMpOLVlPKy1Z4Eu65FiydFWUo11p8Px8BFp8qtrXAgMA69FxxDj/lHxdpP/96MRXBCFmwUMkzp2xjL3+iEMV0bQCsAPx6+hxHLzmHOnluIz1CigYMFJvRqKG7P08EC7nbmBvsY1tED37/SDoNau2Dm4Bbi8pc6uMPFRo74DKU4E0Vp1BotvtodiM923qy1H0VUNyRlKfHL0fv4z98BAIBxPRuisZPuKuEXQ1rCRCrB/YQsaLQChnf0wJEZfnCzVeBBYjYmrrts1BXaaxGpGP7bWTxIzIZaK+CjrdcNMne3X43C8z+fhn9EGqzlMnw/op3Bca3/PL7whH4eC4Ig/iCd+XwLNHG2QnquCqtPheDT7TehVGvhUfAZsOVSVKW3/fdl3WfY6ftJOHE3sYI1nl75aq14IcRKLkO+WlupsgbV6UJoMiJTcmEtl+HCzAH4fkQ7dPN2wJB2bvjr7e449XE/LHylHWRSCXYHxGD8mstGXSjIyFOJGarrJ3bFyY/7YcmYjgCAv69E4maU7rs/N1+DGX8HYNjSs0aXTjgelIAtBYGur4a2gsLUBP1b6H5k7q/CD9zzD5Jx7kEyTE0kWD2uMxo7WeoyaS/VfCbtL0fvQ6UR0LuJIyb08sb8l9sC0J1D52u0eK61q3ievvVK5d6jxroQkoz+P5zAp9tvGBUkLHrsJmfn42Z06bWNLoWm4PXVF0oUt/7jfDiSsvLh6WCO5wsCHwBgaiLFe/2bAAB+PnKv0qMRFhy4C6VaC6lE9/3z4+F76PndUXy+82aFIzqqU0aeSrxIcfJeosFsnUVptALe/P0i+v94okS25u9ndFlGrd1txLo8xQ3vVB8AcOh2PF5dcb7M4d3LTjxA29mHMOSXM/hy9y1kKtXo2tABv77WCbIiBaGfb6s7/z50K+6RZ/trtIV1Mif7NhKDYrbmphjXoyEAiN9d96swNK2oEZ3qw6zI47S3YKYR1ZK29W2xdkIXfDm0FbZN6YHA2YPwTEtn5Ku1mPzHFUSmFE4lvqcgajqknRukUgn6NXeCq40CKdn5OFhGQez78Zm4HpUOmVSCb4a3xdS+jQEA/9t+A6+uOI9XV5zHm79fxI0iV1uO3I6HIABtPWzhYWcOqVSCPk2d8MlzLbDp7e64MetZ3Jn7HLZO6YlFo9pDJpXgRlS6GM3dfzMOSrUWTZ2t0MbDRtyuVCrBj6Pao6WbDdRaAdci0nD6vi4tct5LbWBrYYrnWut/9Je84qv/cn+ujSvmvtQaAPDDwbs496ByqZUVmbf3Nk7eS8SnO27gXCXTNivry12BCIrLhKOVGZyt5bifkIXPdtzEnusxWH9eNy570avtUd9ef2XbEQAQEJlm1MnS9qtRtXIiU52OByVgwYEgRKfl4otdgdBqBYQmZSM8OQemJhL0auIoth1c8KV1IDD+iZ0Z5X58JmbuuAkAeL9/E2x8qxsGtdadFKw4GVIiKJOeq8KMvwMwZeNVJGfni7Wf1p8LE+vCTOztDRuFKWQmUnw7vA1+GtUeNgoZrkelY0vBiebMwS0gl1V8lWZUZ0+seLMz6hUEiwFd4cUJBcNx154NK/PK2Q+H7uGP8+HYdDECo5af54xrlZCZp8LiI/eMrltXnsRMJRYdvlfusBBBELDi5INqD67r62f1nH8MPx2+h5TsfDRyssQHA5qKbZq6WONdv8YwNZFgxsBm+GlUezR0tMSGt7rCzsIUAZFp+GJnYLn7ORAYi9ErLyA5Ox+t3W3QqYEdspSFgfslR+/jv1uvQ6nWwreZEw7+xxejuniKV7UBoKu3A6QSICQp+4mc6tk/IhUhidkwNzXBSx08MGNgMwDAkuPBuBSWAgszE2ye3B1NCoafVsblsFSEJRee33y973aN1sJ4nOi+7zWoZ2mG17vrLhaWlhVXE/Tv9xc6uMPWwhSjunji73d6YOlrndCjcT1IJBKM7OyJtRO6wEouw/mQZPxRZEhjWXZcjUJOvgZNnK3g21T3nd65oQNe6uAOQQBm77mFxEwlRq+6gB3XohEQmSYOiVt4MKjMH6mbL0Vg0h9XIAjAy5084ONlD6DwB+7+wLhKZXIIgiBm2I3u0gCeDhaYrM+kPRNWLZm0Wq2A3QHRiCjy/ihNaFK2eMF2xrO69+bwjvXxdsGU6uN7NsTS1zvh9W66Y2a7f3S1v8d2B0Rj7O+XEJKUjc2XIzF+zWWxEHWeSoOd16JKXAQ+UTCsTz/M53ixyV0y81T4clcgRq04j7PByZi37zauFFzIzsxTYUVBjc8PBzQrMYvVqM6eaOFqjYw8tTjTpTGuhqdiz/UYSCTAzqm98PPoDmjjYYM8lRZ/XozAgJ9OYvIfV0odBaEXmZKDnw7dxZe7AjH7n1uYs+cW/rpU+Zqw+kkfTAoCId/su11qMG771Sicvp+EkMRsMTkA0I1S+OuS7n36fv8mBt9ZRXXwtMMPBVl3V8JTMbighm1R/96MxYIDQchVaWAtl6FPU0fMGNgMv4/vDIWp4flg90b1YG9hiuTs/Ec+K9ul0BTEpOfB1twUL3XwMLhvhI8HJBLg3INkRKbkFKlnVLmhaXr2lmZi4XAAsKuBTCPZI9/D40yVDagqlzL2JOnT0AJ9GloU3FLi5xHN8MbqVATFZWDq2lP4z8BmaO1hi7O3w2EODV5qbQuosiED8JpPPSw7/gCztl3Ejgt26ORlh2dbu4q1FnZfuQdz5KFfM2c4ytX4j58HboTG4EpYKm6EFkaVv8xIw46pvWAileBYYCjMkYehLT11r00x4ke0BqgnB55tZo3jQQnYc/U+Zgxsjr1X78MceRjVvgEkasMvPRdzYP/UTohKycHl8FT4h6egYT1LPNfcGlBlY3BzGyw9lIdrD6KQntFM/FLRagUcDwyFOfIxtKU1/Jo54NoDB+wOiMF7685g4ch2GNCy9Gh6ZVwJS8HRG6EwBwAB+Pivc9jxbi+42CoeetvFbb8ahb1Xg2EpAX4d2QYyqRTj117GwYCQgj4ImOzrjf6NrcTXwdNaQGM7ICYtF1eDo9CnqVOZ278WkYIvtl4CADSw1qJXk8K2+hoeHvYWZa1eJ8Sk5WLmlgswh+4k5G5kHPb5ByMtJx/myEO3Bg6wkioBle6LvGcDc7go1MjI0h1DnRs61Eq/BUFAplING0X1XpHIyVfjPxvPA6ps9G/sgOl+HjCRStC7YUv8fMQEy0+GYO6Oy2jjZILGztY4G5yIL3YFIi5dCUsJ8FYfb6TnqrDlchS+++cqAMBZIcOErs7iMSYB8HJbe/Rq0Blf/hOI0/eS0KOxg/gerarRHRyw/HA+QmMTcO1BFDp5Gb42R27HY/3JWzAHYKOQITQ2AaOXHsGyN3zQ0s2m9I2S6Ps9N7HtajSOXg/Bzmm9DK4AVtbCPdex50YsHsTE49fXOpXa5kBgLBbv19VbszfNx8DWrqW2q6x912Pw19k7MAHQtb4txvbwwrOtXGEmywdUhVeOP+pfH+/7uukCmQXfM03spfj9tZZ4Y/UlHAh4gCm9XNHMxfDqoiAIWH8uDN8fvAupADzX3Ak/jmyHLKUaI5adQ0R8Il5cdBDRqXkwB/C2rzemD2gGqVRb4vi3lQGd3MxwKyYDl+5FisVF6zp9QN1EWvqPCb0dF3XnD8PbeMBKqsRzzazR0dUUQQUTUXw+sCU8rQW828sV8/65VbiiKhuQln/87b6s2/bAVi64Ep6CmMRkbDsfhDHdvB7uwT2BLtwN153HNbLH0BY2+ONkHi7cjUBeTpMSP9yKEgQBkSk58LC3qPC1NkZ6rgonAsNgDi1Gd3Ao9/ugT0MLzHrOC1/tvoWVR27ipdZ2cLKWl9pWEARsuRAEc+RhYldvg/PGmc944vStcNwOj8ULPx1Eeq4KrhamGNPVE3tvxCAqJQtrjt+Ci0KDsQX1TPTb/OXYfSw/EQIz6LJd577QWOyzn7clHExVSE7Nw63wWLTxsDXqOTh2Jx43w2JgK5NiWm9XQJWNYW3s8OshLZIy0vCvfzCGdfSoeEPl2HAuDN/tD4KLjRzb3u0pZusDukDMzag0XItMw/6bcTDT5sKvuRM6uZmJj+3zZ73wvq+77vxDk4P+jS3hYalBSlY6Tt8KQ/9qOFcWBAErT4Vg8ZH7MAEwoHE9XI9MQ0BINN5clg7fZo7Y5h8tznK55Z3uaFvfDlEpOYhOSIKVVIL/+HlhwYG7OBcUgel+uucsNDELb62/jNh0JcwBNHS0QFhSDr7adhE7pvbChjOhyMvJRCsnCwwr+C1UlAmA2YO9MGHtFey4eA9vdnasMMNEEAQs3HsV5sjDyx090N7VFO1d7fBiq464HJaCdefCcDwoEadvh+OjvEysHtfF4P10NVzX5tidBJR2rXLxvzKM7FIfr3X1KpGdXVxYUjb+PhcEcwj4aWR7fPvvHcQnp+DP07cxsXcjsV22Uo0lhwJgDt134rYLQRjfxQkSiQSHrscgNzsD3rZyPNPEqtz36Svt7NG9fid8sSsQF0NTMG/nZYTHJeB/z7VAeHI2Zm27BHOoMb6nFz4e1KLIMDfD72MAMAUwpIUNtvtH4/D1B+jZoPzH+jCO3ngAc+RhSIt6UCAPKJJoVd8K6NfIAucfpGD35XuIiE+FOfLQvJ60yuewr3eqh8PXQwAAjnJV1bZTiXUkAgdsl5CRkQFbW1ukfw3YVP9vciIiIiJ6xLQm5rj9jO4iRasjXSHV1N5sUkRERHVJRh5g+wWQnp4OG5vyL4RyeBoREREREREREZXA4WnlmRIDVBB1e9olZOQhIiUHHTztyh12oNEK+Pbf29h0sbDOxOvdPPHF0NZlrpOv1uLFX08jPDkXClMp8lRaTPb1xn8GNje6f7P/CcTflwvH1M5/uW2VU3Rvx6ZjxG/noTCV4th/+8LOwhQDfjyB2HQlfh3TEQOKFXXT19b4+ahuFqjnWrti/ghdMcD/bb+BQ6XUe2rqbIUNb3WDbUFBs/QcFZ7/5RRSslWYObiFmOZ8PCgeU/+8BgD4cEATTOnbpEqPKTlLiTfXXERoYg7MZFIMbeeGN7p7VWnYTWxaLvr/eBJSCXD+swElhkDl5mvw/C+nEJeuxPRnmsLSzATf/BsEB0tTfDW0Nf6zJQCCANSzMkVylgpD27lhYZFZr2pbnkqDmdtv4kBBMfWir/mBwDixMK6brRxH/9u3xHjtPJUGvb87hux8Df6a3A0dPO3L3FdaTj72B8ZVug5BIydL9GrsWGJGipvRaXhj1SXka7SY1McbWkHA1itRyMxTw1ouw9qJXdDa3bg0+KJuxaTjtVUXka/WYsbAZuJsZKW5EpaCt/+4AjsLU3wzrC16Fqn5VBe8vuoC/CPS8F6/Jpjk6413N17F+QcpqGdliu1TDIeCJmcp8cryc4hLV2JQaxcserUDNpwPx3cHgiAIgFwmxYIR7TCojSv8w1MwbdM1pBWkw0skgF8zJ8Sm5+FuXOGsMQ/z2VRUQmYepm70x62YDHFZIycLjOvpjRGd6hukrydk5uFCSDKeaekCC7PqPR04fCseH2y+BrlMij3v9cbUP68iODEb43p64dPBLctcTxAEBESmISU7H/1bOIvvo1dXnMeNqHSM7Fwf269GQSsUDi0AdPWOnv/5NLKUanw3oi26eTvg5WXnkJqtwvCOHvhiaMsqPcaEzDw8v/g0svM1WDiyHYa2e7ihXh9vvY69N2LRt7kTZr/YGu9uvIo7sZmQy6RY+Eq7ahtOl6VUo9/CE8hSqvFie3fMf7mt+LnwICETDxKy8UwrF4PPitx8DY7ciUc3bwc4l5FmrdZocfJeIqJSK87YkZtKMaCFS5nDf/R+PHQXq0+HwtLMBAKAnHwN3vHzxvRnDL/rN10Mxzf77kArAB8PamYwLKIsmTlKIFI3wUanxI1wszfHG928MKyjByzlhsfDuDUXcSk0FR/0b4J3++m+U8/cT8TkDVdRPCd/XE8v/O+5FqXW5YhOzUFQbCb6tnCu1PCrlOx8TPvzKgIiSy/Aq+fbzBEr3uxs9HYB4PCtOHywOUC8Xd9BgaHt3GFhKoNEoqvjMqSdW5nvEbVGiyN3EtDY2RJNna1x9HY83vvrGrwdLfDvh74AAP/wFLy++hKsFTKc+qSfOEQtX63FrN23sKugrl1rdxu0crfG1ivRsLc0xd73+8DB0gyCIODonQRYyk3Qo3HZ3w/34jNxLjgJggBk56ux9PgDyKQSnPi4r0HNuoro34tNna3g7WSJG1FpiEtXopGTBdq622HPjRhoBWDv+73QuArFanf4R+HznYGwtzTFgem+mPzHFVyP1A2RWv6GT5k1XbKVagz46aQ4hAoAFKZSLHylPZ4pcp4ZnZqDsWsuISYtD63dbbBuYldxNsGiLocmY8K6K9BoBcx+sTVe6uBucC5TdB+6+w2/h27HpuOVZechCMCGt7oiPlOJj7bohgBLJYBWALo3csDi0R2rNO33qOXncDM6Ax/0b4IpfRuX+bzo6b8jLoWmiDOcHbwdj7txmTA3NcEPI9sZNdTty126odOdvewRGJOOPJUWO6f1RAtXGyw6fBcrT4ViYCsX3IlLR1RKHqb0bYQPBzQz2Mb/tt3APwUT+PwyuiMGtq54vz8fuYflJ0NKLP9uRFu81MEDeSoN3lp3Gf4RaXC3U2Dv+30qnF1L/1j0zEyk+OblNkZ9XwXFZWDD+XDsvRFb7jnnxN4N8fGgwgkXrkWk4M3fL4vDiu0tTZGarULnhvb4Y2JX3I3PxPCl5yCTSuDX3AlH7yTgudauWDS6Q4V9Ks3FkGS8/9c15OZrsGqcD7o3qtw55Gc7bmLntWj0auqIPhWcf96KTsf+wDioi4zrszAzwQvt3VDf3gKCAMikEjzf1k08N/z+wB2sPRte4W+Xr3bfxNYrutfKzVaOYx/1q9TjqHYZGcAXxp3XcHhaKcThaUakapHxBEHA0uPB+OGQrgjcnvd6o2398n+oHgiMxZSN/uLtf97rhXYFPxKMcTU8FSOWnQOgm4b1yhfPlDhRNJYgCPBbeAIRKTmwNDNB/5Yu2HM9BhZmJvD/cmCZY/h3XYvGx9uuQ6UR0NnLHmqt7gvPzESKjwY1g7udOdQaAd/+ewcJmUp0amCHjZO6ISAiDZ/uuImIlBw0drLEgem+BsX11pwJxdy9twEA84a1wZvdK1dzIUupxmurLuBGVDrcbRXYMqWHWNi6qvwWHkd4cg5+Ht3BoBA0gIICx8HwsDPH0f/6wUQqwXOLT+FBYuF42te6NcCrnT0x/Lez0ArA7+M6l1oTKl+tRW5+YQE+K4WsUifnWUo1NJrSP/pszGUlTlpSs/Px9h9XcCU8FaYmEix8pb3BD3xBEDBqxXlcDkvFa90a4NvhbUvd9gd/XcM/12MwvmdDfFhQQNdaITMIuObkq/HSr2fFmRUqq5GjJcb3aojn27rB1ESKnHw1Xll2HtFpuXimpQtWjdWdrGYp1Zi47jIuhaagnqUZtk7pgUYFs0DpvxbKO3lLz1XhhSVnEJGSgwEtnLFqbOcSwariUrPzYVkwy2Fd88/1GHzw1zU4W8vRqYE9DtyKg6WZCf58uzs6eNqVaO8fkYpXV5yHSiPAx8seV8N1s0o2cLBARMFEAaM618eugBjkq7VoV98WdhZmOHWvcDYmhakUbdxtcSU8Fc7Wchz/qG+lP59y8tVQqnQnetFpuZiy8SqiUnPhYGmG59u6Yte1GHHGt44N7LDwlXZo7GSF7f7RmLvnFjLy1Ojfwhmry3n91BotMvMKi2ZW9Boq1RoM/OkUIlJy8H7/Jvjvs81x6l4ixq65pJu96L1eqG9n+FmjFQScvJeINWdDcaNgRqKvhrbCxN7euB+fiYGLTkEmleD8zAH4bn8QtvtHoV9zJ6yd0BUAMHPHTfx1KQLt6tti19RekEolOHUvEePWXoIg6H4Uj+naAK/4eMC8EsGjRYfvYdvVKLT3tMOuqT0r/EFTkZDELDzz00loBcDRSo6kLCXqWZph1bjO6NSg7EByVRwPSsCkP3Q/Fif28sang1vg1+PB+O14MNRaAR8MaCoWkRYEAW//cQVH7iTAWiHDl0NbYaRPffHxpueq8PflCKw/F47oNOOHeJmZSPFiB3dM7OWNVu4lz6cO3orDOxt0dcyWvtYJGkHAB3/pLoiseNMHfs2cIAgFgaWC2Ype7eyJr4e3KVFstjRarRa3b+u+J8fvjEFStu6HuLVChtFdPPFqF0/YKEwRn6HEC7+egUQCnP1ff4MaHyGJWUjOzodWKyAwJgPzCr53//dcC7xbMIkHUDhzzg+H7iJPpcU7fo0ws5wAqUqjRZ5K9z0Wl56Ht/+4grDkHNgoZJg3rA0creSQSACpRAIJgIw8Nd7deBVqrYD1E7vCr1nZtQOLKvp+7ObtgLvxmWIQu6gGDhZYMKKdOPueXlBcBj7ZdgM3CiYvmeLXGMnZSvx1KRJvdvfCvGFtCp5rAT2+O4r4DCVautlg4Svt4FXPAu9u9MeZ4CSYSHUF4t/xbQStALyw5AzuxmdiWAd3fDSoOWbuuClOQvJie3fMfrE1HCzNxG2fuJeANWfCcKaUiUCea+2K5W/6GPV86MWm56L/DyeRqyp7Zq1eTerhz0ndK7VdPbVGi4GLTiE0KRut3GxwOzYDVnIZDv3Ht8IaMgkZeTh9PwnXIlNx/kEyHiRmQyoBZr/YGm9298Lmy5H4dt8dZCrVaORoia1TepQbMFt+8gG+2x8EMxMpmrpY4VZMBkxNJJjYyxsx6XnwD08V39cfDmiK6c80hUQigSAIeHXlBVwKTcHQdm5iLbnZ/9zCuoKp01/xqY9vh7et8vf6xgvh+GKXboKAdvVtMbGXN3o2qQcJJBBQcJ4mAAKAi6Ep+P1MKK5HppXYjqOVHGvGdzb6N0JUag76/XACqoJzQVcbBc7P7A+JRILLYSkYufy82NbFRo5j/y35/ZyanY/XV1+Eu525eG5VEa1WwO3YDKg0WphIJdjhH41158Igk0qw4k0f/HUpEkfuxMNGIcPWKT3R3LXigGWeSoNhS88iKC4TNgoZVo7tjO6N6lW4XlHJWUqcup+IgAhdbaqo1Fy0cLVGxwZ26NzQAX2bOZV4fIHR6Vh1OgT7bsSKAZaiv+1eWXYOV8ILZ9z+6+3uJT5fKiM1Ox+ZeWo0qFf53yrHguIxcd2VSq3T1dsB/Vs4Y9e1aLFmXlFutgrs+6AP7C1M0ef744hKzcXyNzrhuTZupWxNp+jvUt9mTvhjYtfKPZBqVpmYx2MRNFq6dCkWLlyIuLg4tG/fHkuWLEHXrmU/yVu3bsWXX36JsLAwNG3aFAsWLMDzzz9v9P4YNHq0jtyOR6ZSheEd61fYVhAEvLriAi6FpcDdVoGzn/av1Em7IAjo98MJhCXn4OWOHvjp1Q4P0XNdYblPt980+EFf9Iu0LOcfJOOdDVeQUfDDy87CFCve8EG3Ih/qQXEZGLX8PDLy1PB0MEdkiu4L3N1WgeVv+pT6RfjTobv45VgwJBJgxjPN4FbBiUhR269G4XxIMhwKAgb6aaMfxqfbb2BzBbMWLRnTUSzMWvRDvLW7Dba/2xMKUxN8++8drDwVAlcbBQ7+x9fg6tW/N2Pxv+03DH/EmpmgXX07dGxgh4b1LHVVk4tJz1EhICoNARFp5f7osbMwRQdPO3TwtEOuSoOAiDTciErXzdKgkGHFmz7oWcpV0Ji0XKw/F4a3enuXeZV+/81YvPunv8EyN1sFlr3hgw6edhAEATO2XMfOa9FwtDIrEXgrj1oj4NS9xDJnC2rgYIE97/c2eC4z8lQYs/ICbsVkwMPOHM+2dkFAZBpuxWTARiETnwcPe3PxBC42PQ8BEWnwj0hFUlY+POzMse+D3jUyc8OjlK/WoteCY0jM1BUvNzORYu2ELuW+Bn+cD8NXuwsL7c4c3AJv9fbGvL23xVkGAWBgKxf8MrojzM1MEJyQhZ3XomBnboaRnevD3MykRIClPHkqDf65HoMrYSm4FpGG4MSsElkQDetZYN2ErmjoaInMPBX+vhyJn4/cR6ZSDTMTKVp72OBaRJrBOsV/AOtdCUvB1D/9kZBZODuLmYkUrdxt0LGBHZo6W0P/2z0tR4WASN2xEZ+hLBEIm7juMo4ZMSW3TCqBWitAJpXg73d64OCtOKw8FYKBrVywamxnhCdno/+PJ6HRCnixvTuC4jJwL173mbzlnR7o6l1YzHx3QDR+OnwP4RXM+FORbVN6VFsB+4+2Xse2glllGjlZYt34rlU6CTbGDv8ozCjICnCxkSM+o/B1lEiA9RO6wreZE1acfID5+4MM1vVt5gQvBwtci0zFndhM8Yqyg6UZejauV2GgPiwpG9ejCrNmPOzM0aGBHdp62CIuPa/gsyYdKo2At3p748uhrQAY/iAt7uNBzTHViGwEvaJBI6/GzbArIAZrz4YhpIxpnPs0dcSGt7qVu83Vp0Pw9b47AHSzAHk6WEAQBGy6FFnix+zKN33wbCnZYwcC4/C/7TfEGZ30POzMsX5iFzQpI7Nl7p7bWHM2FE2drbD/wz5GFZbXv7b6H75SiQQ7r0UjIDIVWgEQBODcgyTEFswK+Vq3BmKgPCQxG7+fCYFKI0Auk0JZLBNh+Rs+BjP4nLyXiOmbryE1RwWZVAJXWwWiUnNhYWaCpa91Qr+CKeUB3exrLxdcINJnk5vJpFBrtAVBVTO83Kk+7sRmICAyTfzOl0p0x6a9hS47SS4zwbt9G6Oho2WFz0VxBwJjsT8wDi1cbdDB0w4N6lkgqGB/ESk5mNq3iVE/2suy61o0phdkIQPAN8Pb4PVKFlVXa7T4cnegOPNUYydL8WJbe087/PZ6J3hUcO6n1QqYvOEqjtzRZbjbmptixZs+YmBBqxWw4GAQVhRkwDzX2hU25jJci0jD/YQsKEylOPrfvuJ+8tVa/Hz0HtztzPFa1wYPFUxXabSYt/c2Nl+ONDq72kwmxcCWLrApOJ+xVsgwtodXpS98frHrJjZe0M3MNbqLJ74b0Q6A7jnvNO+weM6+6NX2Rv1mqQqtVsCMLQHYFRAjLjOTSbHxrW4G32UViUnLxeZLERjW0UO8AFhT4jPysN0/Ch525gaZakWP/ybOVjj8H9+HvvBSVVqtgMVH7yM8ueLCzzYKU4zsXF/83SUIAs4/SMbem7FQqrSQSIALIcmISs2FXzMnzBjYDC8tPQtzU10SQXmZYYIgYMBPJxGSmG3wvVdbnqig0d9//42xY8di+fLl6NatGxYvXoytW7fi7t27cHZ2LtH+3Llz8PX1xfz58zF06FBs2rQJCxYsgL+/P9q0aWPUPhk0qluC4jLw3qZrmNCrYaW/bAFdkGHp8WAsfrUDmrpU/ctfTxAEnL6fhN/PhCIwOh0rx3YWp0stz/34TLyz8SpkUgmWv+FT6of61fBUvLH6onjla2wPL3zyXItSU471fZn1zy38UeQHamVYmpngr8ndK5W9VZ4rYSmYsO6yQUCnqGdbuWDFm4VXY/RBkqvhqdjwVld41dOd9OXmazD451MIS85BAwcLfDeiLXo0qoffz4Tim3/vlPiRXBMa1rPAyrGdS8x6VBlFrwYVpTCVYsmYTkjMVOKznTdhIpXgr7e7V+qEAdBlUG2/GoX15wx/FHnYmWPlWJ9Sh6AlZSkxavn5Mn9ElcfOwhTrJ3RF+1IycR5Hiw7fw89H70MqAX573fDHUGn077+9N2Ix76U2GNKu8OrSmjOhWHzkHkZ19sTM51uW+wP7QGAcpmy8CrlMiqP/9SvzxPdKWAo+2X4DIYllv1a9mzji59EdSlx1jknLxWc7b+LEXV2mk5lMiv880wxWChm+3BUIE6kEmyZ1Mwhk77sRi/9sCajSdM1ymRRLxnQ0+MEckpiFkcvPIzk7v9R1XGzkeKObF8Z0a4BZu29h381YuNkqoNJokZSVj1VjO2NgwfCMT7Zdx5YrUQbrT+jVELNeKDnkWaMVcCwoAWvOhOJaZGqlPj8kEt0U1rNfLHsodWVFpebg9dUX0cjREote7fDIA65Fgxz1LM0w96U2OBOchL8uRcDB0gyzX2yN//wdAI1WwJwXWyMnX4NFR+6VeN2bu1hjYu+GeKmDR7mzYxXlH5GKNWdCsT8wTgw6Fde3uRNWje0sZg7lq7WY+udVHLlTGGC0lsvwzctt8WIlZ4IrGjRq1aoVpFIptFpdVtvvZ0JxISQZ2oIDwtJMhmVv+KB304qD9d/tD8Lygim2i7KWy/D5kJa4F5+FNWdDYa2QYe/7vcXvNgD4/Uwovt53u8Rx2LWhA359vSOcrcuegSU9RwW/H44jLUdlVIZxUpYSfQuGKf4wsj1e8Sn9h29mngrf/huEvy5FlHr/wFYu+GZYG/hHpOKLXbeQlKWEVAJc++rZEkOSEjOVmPVPIP69qRv+5Gglx9rxXUrNLP9m322sOq3LIOvS0B4LRrRDZp4aH2+7LgaC9fTZYWN7NISnQ92eYVVPoxUwaPEpBCdkoXsjB2ya1L3CjNzSFM/UV5hK8dGzzTGhl7fRWdbpOSq8/vsFqDUClr7eqdQLhZsuRuDL3YEG71WJBJj9QmuMKzIL3KOQnKXEposR+PNiBOIz8wzu0z9CRys5XuvWAG909zKYva2q4tLz4LvwOPLV2hIZItM2+WPfjVh0bGCH7VN6Vul1M1a+Wou31l/G6ftJkEqAZW/4YFA1DVWuTUq1Bj3mH0NKdj5mv9AK43t513aXqk1QXAZe+vUslGqtmGE+uI0rlr1RccbjgcA4fPvvHSx9rVOFI24etScqaNStWzd06dIFv/76KwDdCYCnpyfef/99fPrppyXav/rqq8jOzsbevXvFZd27d0eHDh2wfPlyo/bJoBE9KlqtAImk/GE/l0JTsOliOF7r5mVU0ECrFfDbiWBcDkutsG1R5qYmeNu3kVEBr9oQGJ2OyX9cQUzB1c9ODezgX5Ad8WZ3L3w+RPdDXCsICEvKwbWIVFyLSENilrLU7cllUrTxsEXHBnZo42EL81J+9Gi0Au7FZyIgMg0BkWmQy0zQ0VOXwdTYyapaThoEQRB/LGTnq/H+X9dw4m4ipBLdVNMqjYCZg1vgHb+SWR+VodJoIQiAAAGmUmm5fY9Oy8V3+4PgYGGKjg3s0a6+LVILskYCItOQWuRHvq25Kdp72qJjA3u0cbetcKz94yQ9V4W5e25jYCvnctOLixMEodT3dFnLS2s3ZtUFXAhJQb/mTni1i2eJNhdCUrD+fBgEAXC2lmOET310amCPDp52qGdZGHQo73UWBAG7A2Jw7kESJvs2QhNnawiCgP9uuY4d16LhbC3HrBdaw0QK3I7NxC9H7wPQ/WBc/GoHmJvqas7EpOXCv+D9FpVamMEjl5mI77F29W1LrY+i1QrQlHHaIZNKxOcrM0+Fl349KwYzHa3McH7mADGwkJSlxPcHguBgKUenBnbo2MC+wto5dYmxx0Z12XghHGFJ2Xi3b2PUs5IjT6XBy7+dw+3YwtpXL3Vwx+JXO0AikSA4IQu/n9HVGepQ8Py62yqq3OcspRo3otJwLSINt2My4GQtR8cGukzGBg4WpW43W6nWD06BXCY1ajhacaUFjaqDIAhYfToU50OSxWVutgq8378pXAsCnaNXXsDV8FS0crPBBwMKaiQFJ4mZDa93a4DPh7SEtOCxGxuIW38uDLP+uQV7C1N8M7wtyvta2nM9FvtuxqKthy12T+tV4XfY2eAk/HkxHHkFQ15lUgle7OCOIW3dxNcoLScfvx4Lhlc9C7zZo2GZ2zoQGIuT95IwtW/jMoM8eSpdgNLLwRKju3iK/VOqNbqMsMQstK1vh46edmjham1UZlVdcyMqDX9eiMD0gU3hZvtw033/ezMWp+8n4h3fqmVWGfO5cy44CVuuRMLdzlyXbdzArtxA5uNuz/UY3IhKw/+ea2FwfAUnZGHZiQd4r38TeFfhua6sbKUavx4Phk8De4PaVY+7U/cScTY4Cf8Z2Mzoz7jHxZbLkfhk+w3x9i9jOlb6wkZte2KCRvn5+bCwsMC2bdswbNgwcfm4ceOQlpaG3bt3l1inQYMGmDFjBqZPny4umzVrFnbt2oXr16+Xuh+lUgmlsvCHZkZGBjw9PRk0IqplmXkqfLc/CH9eLLz6+dnzLfB2n0a1luJa3dQaLb7YFSgO6ytad4ieHrdjMjB0yWmUkYwhGulTH18MaSUWy68O5dXRGtfDC1+90LpSNcOqS1BcBoYtPYs8lRZv9/HG50NqN437SROenI2hv5xBplKNJs5W2D2tV5Vr/tVVjypoZIzY9FwM+eUMUkrJrHuY7zGVRovBP59GcCXq3hUftklERA9HEAR8tPUGtvtHwUwmhf+XA8scGVJXVSZoVKcfWVJSEjQaDVxcDCOuLi4uCAoKKnWduLi4UtvHxcWV2h4A5s+fjzlz5jx8h4moWlkrdFdSh7Zzx9qzoXi5U/0Khww9bmQmUsx/uS1audsgMDodnz/figGjp1Ardxt8NbQV9t6ILfV+czMTvNXbG32blxyW/bAszGRYNbYzvtsfhKSCTD2pVIKXOrg/dL2Kh9HC1Qa/vd4JW69ElTszH1WNVz1LrB7XGZsvR+KDAU2fuIBRbXOzNcea8V2w6PA9ZBfUmjOTSTGuZ8OHGnpiaiLFDyPbY+HBILEIfnn6tXBmwIiIqJpJJBLMG9YaZjIJWrvbPnYBo8qq05lGMTEx8PDwwLlz59CjRw9x+SeffIKTJ0/i4sWLJdYxMzPD+vXrMWbMGHHZb7/9hjlz5iA+vuQU5wAzjYiIiIieNLWZaURERFSXPTGZRo6OjjAxMSkR7ImPj4era+lXaVxdXSvVHgDkcjnk8senFgIRERERERER0aNWpy+5mJmZwcfHB0ePHhWXabVaHD161CDzqKgePXoYtAeAw4cPl9meiIiIiIiIiIhKqtOZRgAwY8YMjBs3Dp07d0bXrl2xePFiZGdnY8KECQCAsWPHwsPDA/PnzwcAfPjhh/Dz88OPP/6IIUOGYPPmzbhy5QpWrlxZmw+DiIiIiIiIiOixUueDRq+++ioSExPx1VdfIS4uDh06dMCBAwfEYtcREREGY9R79uyJTZs24YsvvsBnn32Gpk2bYteuXWjTpk1tPQQiIiIiIiIiosdOnS6EXVvS09NhZ2eHyMhIFsImIiIiegxptVpxtt0WLVqwEDYREVEB/eRfaWlpsLW1Lbdtnc80qg3JyckAAE9Pz1ruCRERERERERFR9cvMzGTQqCocHBwA6Ia+VfQEElUHfaSX2W1Uk3jcUU3jMUc1jccc1TQec1QbeNxRZQmCgMzMTLi7u1fYlkGjUujTl21tbfmmoxplY2PDY45qHI87qmk85qim8ZijmsZjjmoDjzuqDGMTZDi4m4iIiIiIiIiISmDQiIiIiIiIiIiISmDQqBRyuRyzZs2CXC6v7a7QU4LHHNUGHndU03jMUU3jMUc1jccc1QYed/QoSQRBEGq7E0REREREREREVLcw04iIiIiIiIiIiEpg0IiIiIiIiIiIiEpg0IiIiIiIiIiIiEpg0IiIiIiIiIiIiEp4aoNGS5cuRcOGDaFQKNCtWzdcunSp3PZbt25FixYtoFAo0LZtW/z777811FN6UlTmmFu3bh0kEonBn0KhqMHe0uPu1KlTeOGFF+Du7g6JRIJdu3ZVuM6JEyfQqVMnyOVyNGnSBOvWrXvk/aQnR2WPuRMnTpT4nJNIJIiLi6uZDtNjb/78+ejSpQusra3h7OyMYcOG4e7duxWux3M6qqqqHHM8p6OHtWzZMrRr1w42NjawsbFBjx49sH///nLX4eccVaenMmj0999/Y8aMGZg1axb8/f3Rvn17DBo0CAkJCaW2P3fuHMaMGYO33noL165dw7BhwzBs2DAEBgbWcM/pcVXZYw4AbGxsEBsbK/6Fh4fXYI/pcZednY327dtj6dKlRrUPDQ3FkCFD0K9fPwQEBGD69OmYNGkSDh48+Ih7Sk+Kyh5zenfv3jX4rHN2dn5EPaQnzcmTJzFt2jRcuHABhw8fhkqlwrPPPovs7Owy1+E5HT2MqhxzAM/p6OHUr18f3333Ha5evYorV66gf//+eOmll3Dr1q1S2/NzjqqbRBAEobY7UdO6deuGLl264NdffwUAaLVaeHp64v3338enn35aov2rr76K7Oxs7N27V1zWvXt3dOjQAcuXL6+xftPjq7LH3Lp16zB9+nSkpaXVcE/pSSSRSLBz504MGzaszDb/+9//sG/fPoMTitGjRyMtLQ0HDhyogV7Sk8SYY+7EiRPo168fUlNTYWdnV2N9oydXYmIinJ2dcfLkSfj6+pbahud0VJ2MOeZ4TkePgoODAxYuXIi33nqrxH38nKPq9tRlGuXn5+Pq1at45plnxGVSqRTPPPMMzp8/X+o658+fN2gPAIMGDSqzPVFRVTnmACArKwteXl7w9PQs92oCUXXg5xzVlg4dOsDNzQ0DBw7E2bNna7s79BhLT08HoPsxVRZ+1lF1MuaYA3hOR9VHo9Fg8+bNyM7ORo8ePUptw885qm5PXdAoKSkJGo0GLi4uBstdXFzKrKMQFxdXqfZERVXlmGvevDnWrFmD3bt3Y+PGjdBqtejZsyeioqJqosv0FCrrcy4jIwO5ubm11Ct6krm5uWH58uXYvn07tm/fDk9PT/Tt2xf+/v613TV6DGm1WkyfPh29evVCmzZtymzHczqqLsYeczyno+pw8+ZNWFlZQS6XY8qUKdi5cydatWpValt+zlF1k9V2B4iopB49ehhcPejZsydatmyJFStWYN68ebXYMyKi6tG8eXM0b95cvN2zZ088ePAAixYtwoYNG2qxZ/Q4mjZtGgIDA3HmzJna7go9JYw95nhOR9WhefPmCAgIQHp6OrZt24Zx48bh5MmTZQaOiKrTU5dp5OjoCBMTE8THxxssj4+Ph6ura6nruLq6Vqo9UVFVOeaKMzU1RceOHREcHPwoukhU5uecjY0NzM3Na6lX9LTp2rUrP+eo0t577z3s3bsXx48fR/369ctty3M6qg6VOeaK4zkdVYWZmRmaNGkCHx8fzJ8/H+3bt8fPP/9calt+zlF1e+qCRmZmZvDx8cHRo0fFZVqtFkePHi1zXGiPHj0M2gPA4cOHy2xPVFRVjrniNBoNbt68CTc3t0fVTXrK8XOO6oKAgAB+zpHRBEHAe++9h507d+LYsWPw9vaucB1+1tHDqMoxVxzP6ag6aLVaKJXKUu/j5xxVt6dyeNqMGTMwbtw4dO7cGV27dsXixYuRnZ2NCRMmAADGjh0LDw8PzJ8/HwDw4Ycfws/PDz/++COGDBmCzZs348qVK1i5cmVtPgx6jFT2mJs7dy66d++OJk2aIC0tDQsXLkR4eDgmTZpUmw+DHiNZWVkGVzFDQ0MREBAABwcHNGjQADNnzkR0dDT++OMPAMCUKVPw66+/4pNPPsHEiRNx7NgxbNmyBfv27auth0CPmcoec4sXL4a3tzdat26NvLw8rF69GseOHcOhQ4dq6yHQY2batGnYtGkTdu/eDWtra7Feh62trZghyXM6qk5VOeZ4TkcPa+bMmRg8eDAaNGiAzMxMbNq0CSdOnMDBgwcB8HOOaoDwlFqyZInQoEEDwczMTOjatatw4cIF8T4/Pz9h3LhxBu23bNkiNGvWTDAzMxNat24t7Nu3r4Z7TI+7yhxz06dPF9u6uLgIzz//vODv718LvabH1fHjxwUAJf70x9m4ceMEPz+/Eut06NBBMDMzExo1aiSsXbu2xvtNj6/KHnMLFiwQGjduLCgUCsHBwUHo27evcOzYsdrpPD2WSjveABh8dvGcjqpTVY45ntPRw5o4caLg5eUlmJmZCU5OTsKAAQOEQ4cOiffzc44eNYkgCEJNBqmIiIiIiIiIiKjue+pqGhERERERERERUcUYNCIiIiIiIiIiohIYNCIiIiIiIiIiohIYNCIiIiIiIiIiohIYNCIiIiIiIiIiohIYNCIiIiIiIiIiohIYNCIiIiIiIiIiohIYNCIiIiIiIiIiohIYNCIiIiKqgvHjx2PYsGE1vt9169ZBIpFAIpFg+vTpRq0zfvx4cZ1du3Y90v4RERHRk0NW2x0gIiIiqmskEkm598+aNQs///wzBEGooR4ZsrGxwd27d2FpaWlU+59//hnfffcd3NzcHnHPiIiI6EnCoBERERFRMbGxseL/f//9N7766ivcvXtXXGZlZQUrK6va6BoAXVDL1dXV6Pa2trawtbV9hD0iIiKiJxGHpxEREREV4+rqKv7Z2tqKQRr9n5WVVYnhaX379sX777+P6dOnw97eHi4uLli1ahWys7MxYcIEWFtbo0mTJti/f7/BvgIDAzF48GBYWVnBxcUFb775JpKSkird599++w1NmzaFQqGAi4sLXnnllYd9GoiIiOgpx6ARERERUTVZv349HB0dcenSJfy/vfsHSSaO4zj+sWzyhIOEMAijoT+EyTVFQ4OYU0EEBQ0ODUERhVsEYdAQzRHUFBGEU7U02FJTUyLWVuFQmw5hf2hTn+HhEeKGx0qJ7P2CAz3O+31/2/Hh+z0XFhY0NzeniYkJDQ4OKpVKKRwOKxKJ6O3tTZKUz+cVDAZlWZaSyaQSiYSy2awmJyc/tG4ymdTi4qLW1tZ0c3OjRCKhoaGhWmwRAAD8IoynAQAAVEkgENDKyookaXl5WRsbG/J4PJqZmZEkxWIxbW9v6/r6WgMDA9ra2pJlWVpfXy/fY3d3V21tbbq9vVVnZ2dF6z48PMjlcmlkZERut1s+n0+WZVV/gwAA4Feh0wgAAKBK+vr6yp8bGxvV3Nwsv99fPtfS0iJJyuVykqSrqyudn5+X35FkGIa6u7slSZlMpuJ1h4eH5fP51NHRoUgkooODg3I3EwAAwGcRGgEAAFRJU1PTu+8Oh+PduX//ylYsFiVJr6+vGh0dVTqdfnfc3d19aLzM7XYrlUopHo/L6/UqFospEAgon89/fVMAAODXYjwNAADgm/T39+vw8FDt7e1yOr/2WOZ0OhUKhRQKhbS6uirTNHV2dqbx8fEqVQsAAH4bOo0AAAC+yfz8vB4fHzU1NaXLy0tlMhmdnp5qenpahUKh4vucnJxoc3NT6XRa9/f32t/fV7FYVFdXVw2rBwAA9Y7QCAAA4Ju0trbq4uJChUJB4XBYfr9f0WhUpmmqoaHyxzTTNHV0dKRgMKienh7t7OwoHo+rt7e3htUDAIB65yiVSqXvLgIAAACV2dvbUzQa/dT7ihwOh46PjzU2Nlb1ugAAQP2h0wgAAOCHeXp6kmEYWlpaquj62dlZGYZR46oAAEC9odMIAADgB3l5eVE2m5X0dyzN4/H89ze5XE7Pz8+SJK/XK5fLVdMaAQBAfSA0AgAAAAAAgA3jaQAAAAAAALAhNAIAAAAAAIANoREAAAAAAABsCI0AAAAAAABgQ2gEAAAAAAAAG0IjAAAAAAAA2BAaAQAAAAAAwIbQCAAAAAAAADZ/AFtEIVgFt0lBAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# PRAAT uses cutoff of jitter > 0.02 as un-voiced\n", + "plt.plot(xs, jitter[0] * 100)\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Jitter [%]\")\n", + "plt.title(\"Jitter and average jitter score across full sample\")\n", + "plt.axhline(y = jitter[jitter < 0.02].mean() * 100, color=\"darkorange\")\n", + "plt.axvline(x = frame_i / 100, color=\"lightgrey\")\n", + "print(\"Average Jitter: {0:.2f}%\".format(100 * jitter[jitter < 0.02].mean().numpy()))" + ] + }, + { + "cell_type": "markdown", + "id": "da73114a-40e2-454f-bedf-a1eb86a686d9", + "metadata": {}, + "source": [ + "Shimmer is the average variation in peak amplitude. We first compute the amplitude of each peak, then\n", + "we compute the difference of each peak from the average peak amplitude. The shimmer is just\n", + "the average difference normalized by the overall average amplitude to achieve a score from 0 to 1." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "a3260893-a1d3-4135-890d-7e3b1f9b93ee", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Normalized peak amplitude differences: tensor([0.0294, 0.0015, 0.0098, 0.0210])\n", + "Shimmer score for the frame: tensor(0.0154)\n" + ] + } + ], + "source": [ + "avg_amps = peaks.mean(dim=-1, keepdims=True)\n", + "amp_diff = (peaks - avg_amps).abs()\n", + "shimmer = amp_diff.mean(dim=-1) / avg_amps.squeeze(-1).clamp(min=1e-3)\n", + "\n", + "print(\"Normalized peak amplitude differences:\", amp_diff[0, frame_i] / avg_amps[0, frame_i])\n", + "print(\"Shimmer score for the frame:\", shimmer[0, frame_i])" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "723c2372-c13b-40e4-afed-8b9dfd6dfcbf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Shimmer: 2.21%\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABH8AAADvCAYAAABrAnjSAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAtSZJREFUeJzs3XV8E4f7B/DPxVNLvaVKW1qKu3S4DJkyhSlzYYPvlN98sO0L0+9cmG/M2JgLOtzditTdXeP3++Ny16RJ26SNFZ7369UXNEmTS3K53D33CMOyLAtCCCGEEEIIIYQQcl4SeXoBCCGEEEIIIYQQQojrUPCHEEIIIYQQQggh5DxGwR9CCCGEEEIIIYSQ8xgFfwghhBBCCCGEEELOYxT8IYQQQgghhBBCCDmPUfCHEEIIIYQQQggh5DxGwR9CCCGEEEIIIYSQ8xgFfwghhBBCCCGEEELOYxT8IYQQQgghhBBCCDmPUfCHEEJIjzAMgwcffLDL23355ZdgGAZ5eXmuX6gLCL2u7nXbbbfBz8/PrtsyDINly5a5doGI2+n1eixduhSxsbEQiUSYN2+eQ3+/bNkyMAxjcVnfvn1x2223OW8hvQhtowghxDtQ8IcQQohNJ0+exLXXXov4+HgoFApER0fj4osvxrvvvuvpRSOEEI/5/PPP8dprr+Haa6/FV199hYcfftjTi0QIIYR0SeLpBSCEEOJ99uzZg2nTpiEuLg533303IiMjUVhYiH379uHtt9/G4sWLHb7PW265BQsWLIBcLnfBEhPifVpbWyGR0K7W+WbLli2Ijo7Gm2++6elFIYQQQuxGeySEEEKs/Pe//4VKpcLBgwcRGBhocV1FRUW37lMsFkMsFjth6byDXq+H0WiETCbz9KL0Os3NzfD19fX0YricQqHw9CI4VUtLC3x8fDzy2N60zlRUVFhtFwkhhBBvR2VfhBBCrGRnZ2PQoEE2D3DCw8Nt/s1vv/2GwYMHQy6XY9CgQVi/fr3F9bb6PvTt2xeXXXYZtm3bhtGjR0OpVGLIkCHYtm0bAOCXX37BkCFDoFAoMGrUKBw9etTiPvn+KwUFBbjsssvg5+eH6OhovP/++wC40rXp06fD19cX8fHx+O6776yWu66uDg899BBiY2Mhl8vRr18/vPLKKzAajcJt8vLywDAMXn/9dbz11ltISkqCXC7H6dOnO3wNv/jiC0yfPh3h4eGQy+UYOHAgPvzwQ6vb8a/Brl27MHbsWCgUCiQmJuLrr7+2um16ejqmT58OpVKJmJgYvPTSSxbL2ZkTJ07gtttuQ2JiIhQKBSIjI3HHHXegurpauM3atWvBMAy2b99u9ferVq0CwzA4deqUcNnZs2dx7bXXIjg4GAqFAqNHj8Yff/xh8Xf8+759+3YsWrQI4eHhiImJAQDk5+dj0aJF6N+/P5RKJUJCQnDdddfZ7A1y4sQJTJkyxeK5f/HFFzZ7iaxbtw6TJk2Cr68v/P39cemllyI9Pb3L10in02H58uVITk6GQqFASEgIJk6ciE2bNlndtri4GPPmzYOfnx/CwsLw2GOPwWAwWNymfc8fvtdLRkYGbr75ZqhUKoSFheHZZ58Fy7IoLCzElVdeiYCAAERGRuKNN96wuL9t27aBYRj8+OOPWL58OaKjo+Hv749rr70W9fX10Gg0eOihhxAeHg4/Pz/cfvvt0Gg0Vsv+zTffYNSoUVAqlQgODsaCBQtQWFhocZupU6di8ODBOHz4MCZPngwfHx889dRTHb529qxf5q/dnXfeiaioKMjlciQkJOD++++HVqsF0Pk6AwAffPABBg0aBLlcjqioKDzwwAOoq6uzeIzMzExcc801iIyMhEKhQExMDBYsWID6+nrhNps2bcLEiRMRGBgIPz8/9O/fv9PnyG8Htm7divT0dDAMA4ZhsG3bNuG94bdd7f/myy+/7PB+HfHDDz9g1KhR8Pf3R0BAAIYMGYK3335buL6mpgaPPfYYhgwZAj8/PwQEBGDu3Lk4fvy4xf04Y13i+719++236N+/v7Cd3rFjh13PpbufU0IIId1DmT+EEEKsxMfHY+/evTh16hQGDx7c5e137dqFX375BYsWLYK/vz/eeecdXHPNNSgoKEBISEinf5uVlYUbb7wR9957L26++Wa8/vrruPzyy/HRRx/hqaeewqJFiwAAK1euxPXXX49z585BJGo7d2EwGDB37lxMnjwZr776Kr799ls8+OCD8PX1xdNPP42bbroJV199NT766CPceuutSEtLQ0JCAgAuk2HKlCkoLi7Gvffei7i4OOzZswdPPvkkSktL8dZbb1ks6xdffAG1Wo177rkHcrkcwcHBHT6vDz/8EIMGDcIVV1wBiUSCP//8E4sWLYLRaMQDDzxg9Rpce+21uPPOO7Fw4UJ8/vnnuO222zBq1CgMGjQIAFBWVoZp06ZBr9fjiSeegK+vLz7++GMolcou3x+AO9DNycnB7bffjsjISKSnp+Pjjz9Geno69u3bB4ZhcOmll8LPzw8//vgjpkyZYvH3a9aswaBBg4T1IT09HRMmTEB0dLSwPD/++CPmzZuHn3/+GVdddZXF3y9atAhhYWF47rnn0NzcDAA4ePAg9uzZgwULFiAmJgZ5eXn48MMPMXXqVJw+fVrIMikuLsa0adPAMAyefPJJ+Pr64tNPP7VZQrh69WosXLgQs2fPxiuvvIKWlhZ8+OGHmDhxIo4ePYq+fft2+BotW7YMK1euxF133YWxY8eioaEBhw4dwpEjR3DxxRcLtzMYDJg9ezbGjRuH119/HZs3b8Ybb7yBpKQk3H///V2+F/Pnz8eAAQPw8ssv4++//8ZLL72E4OBgrFq1CtOnT8crr7yCb7/9Fo899hjGjBmDyZMnW/z9ypUroVQq8cQTTyArKwvvvvsupFIpRCIRamtrsWzZMuzbtw9ffvklEhIS8Nxzzwl/+9///hfPPvssrr/+etx1112orKzEu+++i8mTJ+Po0aMWAd/q6mrMnTsXCxYswM0334yIiIgOn5M96xcAlJSUYOzYsairq8M999yD1NRUFBcXY+3atWhpabHIpLO1zixbtgzLly/HzJkzcf/99+PcuXP48MMPcfDgQezevRtSqRRarRazZ8+GRqPB4sWLERkZieLiYvz111+oq6uDSqVCeno6LrvsMgwdOhQvvPAC5HI5srKysHv37g6fY1hYGFavXo3//ve/aGpqwsqVKwEAAwYMwJkzZ7p833tq06ZNuOGGGzBjxgy88sorAIAzZ85g9+7d+M9//gMAyMnJwW+//YbrrrsOCQkJKC8vx6pVqzBlyhScPn0aUVFRFvfZk3UJALZv3441a9ZgyZIlkMvl+OCDDzBnzhwcOHCg0++OnnxOCSGEdBNLCCGEtLNx40ZWLBazYrGYTUtLY5cuXcpu2LCB1Wq1VrcFwMpkMjYrK0u47Pjx4ywA9t133xUu++KLL1gAbG5urnBZfHw8C4Dds2ePcNmGDRtYAKxSqWTz8/OFy1etWsUCYLdu3SpctnDhQhYAu2LFCuGy2tpaVqlUsgzDsD/88INw+dmzZ1kA7PPPPy9c9uKLL7K+vr5sRkaGxXN64oknWLFYzBYUFLAsy7K5ubksADYgIICtqKiw4xVk2ZaWFqvLZs+ezSYmJlpcxr8GO3bsEC6rqKhg5XI5++ijjwqXPfTQQywAdv/+/Ra3U6lUVq+rvcvz/fffWz32DTfcwIaHh7N6vV64rLS0lBWJROwLL7wgXDZjxgx2yJAhrFqtFi4zGo3sRRddxCYnJwuX8e/7xIkTLe6zo2Xau3cvC4D9+uuvhcsWL17MMgzDHj16VLisurqaDQ4OtnjujY2NbGBgIHv33Xdb3GdZWRmrUqmsLm9v2LBh7KWXXtrpbfh1zvy1YFmWHTFiBDtq1CiLy9qvb88//zwLgL3nnnuEy/R6PRsTE8MyDMO+/PLLwuX8erxw4ULhsq1bt7IA2MGDB1t8Fm+44QaWYRh27ty5Fo+flpbGxsfHC7/n5eWxYrGY/e9//2txu5MnT7ISicTi8ilTprAA2I8++qjT14Nn7/p16623siKRiD148KDV7Y1GI8uyHa8zFRUVrEwmY2fNmsUaDAbh8vfee48FwH7++ecsy7Ls0aNHWQDsTz/91OHyvvnmmywAtrKy0q7nZ27KlCnsoEGDLC7j3xvz7RPLtm07vvjiC+Eyfj0wFx8fb/Fe2/Kf//yHDQgIsPocmVOr1RavDb8McrncYp3t6brEstz6DYA9dOiQcFl+fj6rUCjYq666Sris/ba/p59TQggh3UNlX4QQQqxcfPHF2Lt3L6644gocP34cr776KmbPno3o6Girsh4AmDlzJpKSkoTfhw4dioCAAOTk5HT5WAMHDkRaWprw+7hx4wAA06dPR1xcnNXltu7zrrvuEv4fGBiI/v37w9fXF9dff71wef/+/REYGGjx9z/99BMmTZqEoKAgVFVVCT8zZ86EwWCwKl+45pprEBYW1uVzAmCRkVNfX4+qqipMmTIFOTk5FqUn/GswadIk4fewsDD079/fYln/+ecfjB8/HmPHjrW43U033eTw8qjValRVVWH8+PEAgCNHjgjXzZ8/HxUVFRblK2vXroXRaMT8+fMBcKUlW7ZswfXXX4/Gxkbhdauursbs2bORmZmJ4uJii8e/++67rXo+mS+TTqdDdXU1+vXrh8DAQItlWr9+PdLS0jB8+HDhsuDgYKvnvmnTJtTV1eGGG26weD/FYjHGjRuHrVu3dvoaBQYGIj09HZmZmZ3eDgDuu+8+i98nTZpk1/oOWK6vYrEYo0ePBsuyuPPOOy2Wpf06wLv11lshlUqF38eNGweWZXHHHXdY3G7cuHEoLCyEXq8HwJVRGo1GXH/99RavT2RkJJKTk61eH7lcjttvv92u52TP+mU0GvHbb7/h8ssvx+jRo63uo/348/brzObNm6HVavHQQw9ZZP/dfffdCAgIwN9//w0AUKlUAIANGzagpaXF5vLyGU6///673aWTnhYYGIjm5mabZYg8uVwuvDYGgwHV1dVCSZv5Z4rX3XWJl5aWhlGjRgm/x8XF4corr8SGDRusyiB5Pf2cEkII6R4K/hBCCLFpzJgx+OWXX1BbW4sDBw7gySefRGNjI6699lqrXjfmQRpeUFAQamtru3yc9n/LH7jFxsbavLz9fSoUCquAjEqlQkxMjNXBpEqlsvj7zMxMrF+/HmFhYRY/M2fOBGDd3JovF7PH7t27MXPmTPj6+iIwMBBhYWFCP5H2wR97Xr/8/HwkJydb3a5///52LU9NTQ3+85//ICIiAkqlEmFhYcLzMV+eOXPmQKVSYc2aNcJla9aswfDhw5GSkgKAK1NjWRbPPvus1Wv3/PPPA7DvtWttbcVzzz0n9FsKDQ1FWFgY6urqLJYpPz8f/fr1s/r79pfxQZvp06dbLdfGjRu7bFb+wgsvoK6uDikpKRgyZAgef/xxnDhxwup2ttY5e9d3wPY6r1AoEBoaanW5rft05DNjNBqF1zIzMxMsyyI5Odnq9Tlz5ozV6xMdHW13Q3N71q/Kyko0NDTYVUoKWK8z+fn5AKzXeZlMhsTEROH6hIQEPPLII/j0008RGhqK2bNn4/3337dYp+bPn48JEybgrrvuQkREBBYsWIAff/zRqwNBixYtQkpKCubOnYuYmBjccccdVr3VjEYj3nzzTSQnJ1t8pk6cOGG13QG6vy7xbG2TUlJS0NLSgsrKSpvPo6efU0IIId1DPX8IIYR0SiaTYcyYMRgzZgxSUlJw++2346effhIO8gF0OMWLZdku77+jv7X3Pnvy90ajERdffDGWLl1q87Z8sINnb3+d7OxszJgxA6mpqfjf//6H2NhYyGQy/PPPP3jzzTetDjB78vrZ6/rrr8eePXvw+OOPY/jw4fDz84PRaMScOXMslkcul2PevHn49ddf8cEHH6C8vBy7d+/GihUrhNvwt3/ssccwe/Zsm4/XPjBj67VbvHgxvvjiCzz00ENIS0uDSqUCwzBYsGBBtw7C+b9ZvXo1IiMjra7vauz65MmTkZ2djd9//x0bN27Ep59+ijfffBMfffSRVbZOT9j6e0fWge6u80ajEQzDYN26dTZv6+fnZ/G7ves7YP/65QhHHr+9N954A7fddpvwXi5ZsgQrV67Evn37EBMTA6VSiR07dmDr1q34+++/sX79eqxZswbTp0/Hxo0bHX6P2weaeR1lv3RHeHg4jh07hg0bNmDdunVYt24dvvjiC9x666346quvAAArVqzAs88+izvuuAMvvvgigoODIRKJ8NBDD9l8H3q6/e2Onn5OCSGEdA9tXQkhhNiNL9UoLS318JI4R1JSEpqamoRMH2f5888/odFo8Mcff1icWe9JOUN8fLzNcqRz5851+be1tbX4999/sXz5couGrR2VN82fPx9fffUV/v33X5w5cwYsywolXwCQmJgIAJBKpT167dauXYuFCxdaTLVSq9VWk5vi4+ORlZVl9fftL+NLD8PDw7u9XMHBwbj99ttx++23o6mpCZMnT8ayZcssgj+9VVJSEliWRUJCglVgsyfsXb/CwsIQEBBgMTHOEfHx8QC4dZ5fBwFAq9UiNzfX6j0fMmQIhgwZgmeeeQZ79uzBhAkT8NFHH+Gll14CAIhEIsyYMQMzZszA//73P6xYsQJPP/00tm7d6vD6ExQUBABW6y6fjeQsMpkMl19+OS6//HIYjUYsWrQIq1atwrPPPot+/fph7dq1mDZtGj777DOLv6urq7PKLHMGW9uQjIwM+Pj4dFgi64zPKSGEEMdR2RchhBArW7dutXmG959//gFgf6mRt7v++uuxd+9ebNiwweq6uro6q/4W9uLPmpu/hvX19fjiiy+6t6AALrnkEuzbtw8HDhwQLqusrMS3337breUBYDXNjDdz5kwEBwdjzZo1WLNmDcaOHWtRghMeHo6pU6di1apVNgOBHZV72Fqu9sv07rvvWmVLzJ49G3v37sWxY8eEy2pqaqye++zZsxEQEIAVK1ZAp9M5vFztx5L7+fmhX79+Nsel90ZXX301xGIxli9fbvW6syxrcyy7Pexdv0QiEebNm4c///wThw4dsrqfrrJKZs6cCZlMhnfeecfitp999hnq6+tx6aWXAgAaGhqsPrtDhgyBSCQS3suamhqr++d7SnXn/Y6Pj4dYLLbqE/bBBx84fF8daf/+iEQiDB06FEDbMtv6TP30009WPbicZe/evRa9hAoLC/H7779j1qxZHWYP9fRzSgghpHso84cQQoiVxYsXo6WlBVdddRVSU1Oh1WqxZ88erFmzBn379rW7Cay3e/zxx/HHH3/gsssuE0arNzc34+TJk1i7di3y8vK6dbZ81qxZwhn6e++9F01NTfjkk08QHh7e7ayppUuXYvXq1ZgzZw7+85//CKPe4+PjbfalMRcQEIDJkyfj1VdfhU6nQ3R0NDZu3Ijc3Fybt5dKpbj66qvxww8/oLm5Ga+//rrVbd5//31MnDgRQ4YMwd13343ExESUl5dj7969KCoqwvHjx7t8TpdddhlWr14NlUqFgQMHYu/evdi8eTNCQkKsnvs333yDiy++GIsXLxZGvcfFxaGmpkYouQkICMCHH36IW265BSNHjsSCBQsQFhaGgoIC/P3335gwYQLee++9Dpdn4MCBmDp1KkaNGoXg4GAcOnQIa9euxYMPPtjlc+kNkpKS8NJLL+HJJ59EXl4e5s2bB39/f+Tm5uLXX3/FPffcg8cee8zh+3Vk/VqxYgU2btyIKVOm4J577sGAAQNQWlqKn376Cbt27bIYNd9eWFgYnnzySSxfvhxz5szBFVdcgXPnzuGDDz7AmDFjcPPNNwMAtmzZggcffBDXXXcdUlJSoNfrsXr1aojFYlxzzTUAuP5OO3bswKWXXor4+HhUVFTggw8+QExMDCZOnOjwa6BSqXDdddfh3XffBcMwSEpKwl9//eXU/jV33XUXampqMH36dMTExCA/Px/vvvsuhg8fjgEDBgDgPlMvvPACbr/9dlx00UU4efIkvv32W4tMKWcaPHgwZs+ebTHqHQCWL1/e4d/09HNKCCGkeyj4QwghxMrrr7+On376Cf/88w8+/vhjaLVaxMXFYdGiRXjmmWc6PUDrTXx8fLB9+3asWLECP/30E77++msEBAQgJSUFy5cvF5qfOqp///5Yu3YtnnnmGTz22GOIjIzE/fffj7CwMKspOvbq06cPtm7disWLF+Pll19GSEgI7rvvPkRFRVlMierId999h8WLF+P9998Hy7KYNWsW1q1bh6ioKJu3nz9/Pj799FMwDGMxNY03cOBAHDp0CMuXL8eXX36J6upqhIeHY8SIERalP515++23IRaL8e2330KtVmPChAnYvHmzVR+h2NhYbN26FUuWLMGKFSsQFhaGBx54AL6+vliyZAkUCoVw2xtvvBFRUVF4+eWX8dprr0Gj0SA6OhqTJk3qMmi5ZMkS/PHHH9i4cSM0Gg3i4+Px0ksv4fHHH7fr+fQGTzzxBFJSUvDmm28KB+ixsbGYNWsWrrjiim7fr73rV3R0NPbv349nn30W3377LRoaGhAdHY25c+fCx8eny8dZtmwZwsLC8N577+Hhhx9GcHAw7rnnHqxYsUKYWjVs2DDMnj0bf/75J4qLi+Hj44Nhw4Zh3bp1wgSyK664Anl5efj8889RVVWF0NBQTJkypUef+3fffRc6nQ4fffQR5HI5rr/+erz22mt2N7juys0334yPP/4YH3zwAerq6hAZGYn58+dj2bJlwoSvp556Cs3Nzfjuu++wZs0ajBw5En///TeeeOIJpyxDe1OmTEFaWhqWL1+OgoICDBw4EF9++aWQkdSRnnxOCSGEdA/DOrObJCGEEEKImzz00ENYtWoVmpqaetyEmRDiGIZh8MADD1CWDiGE9BLU84cQQgghXq+1tdXi9+rqaqxevRoTJ06kwA8hhBBCSBeo7IsQQgghXi8tLQ1Tp07FgAEDUF5ejs8++wwNDQ149tlnPb1ohBBCCCFej4I/hBBCCPF6l1xyCdauXYuPP/4YDMNg5MiR+OyzzzB58mRPLxohhBBCiNfzaM+fHTt24LXXXsPhw4dRWlqKX3/9FfPmzROuv+222/DVV19Z/M3s2bOxfv16Ny8pIYQQQgghhBBCSO/k0Z4/zc3NGDZsGN5///0ObzNnzhyUlpYKP99//70bl5AQQgghhBBCCCGkd/No2dfcuXMxd+7cTm8jl8sRGRnppiUihBBCCCGEEEIIOb94fc+fbdu2ITw8HEFBQZg+fTpeeuklhISEdHh7jUYDjUYj/G40GlFTU4OQkBAwDOOORSaEEEIIIYQQQghxOZZl0djYiKioKIhEHRd3eXXwZ86cObj66quRkJCA7OxsPPXUU5g7dy727t3b4VjXlStXYvny5W5eUkIIIYQQQgghhBDPKCwsRExMTIfXe7ThszmGYawaPreXk5ODpKQkbN68GTNmzLB5m/aZP/X19YiLi0NhYSECAgKcvdhduvvrQ9ibXY0VVw3GFcOjAQAlda14/KfjqG3R2vybmQMj8cjFKQ4/1qRXtqC2RYffHrgI/cL9e7TchBBCCCHewmg04uzZswCA1NTUTs9sEkIIIZ72y+EiPPdHuvD7f2b2w92TkoTfl3x/BFvOVuLZywZg/pi4Du/ni905eGNjJqb2D8O2c5UAgMPPzoRc0pYM09DQgNjYWNTV1UGlUnV4X16d+dNeYmIiQkNDkZWV1WHwRy6XQy6XW10eEBDgkeCPVOELkbwVvv7+wuMHBATgz0dnOf2x/PwDUG9ohUju65HnSgghhBDiCkajEX5+fgC4/SgK/hBCCPFmIoUPRHIf4XeJ3M/iGN0o5a4PCgzs9Ng9OCgIIrkPdCKFcH+BKhWkYuvvwa7a3PSqb86ioiJUV1ejT58+nl4Uu7HgEqtEbug3pJRx0b9WrcHlj0UIIYQQQgghhBBrap3lMbneaLR5vULaeUjGR8od4zdr9cJl3Y0teDTzp6mpCVlZWcLvubm5OHbsGIKDgxEcHIzly5fjmmuuQWRkJLKzs7F06VL069cPs2fP9uBSO4Z/j93RbFppWjHar2iEEEIIIYQQQghxD7XOMtijNVj+ruGDPxLbvYx5PqYEjyaNefCne8vk0eDPoUOHMG3aNOH3Rx55BACwcOFCfPjhhzhx4gS++uor1NXVISoqCrNmzcKLL75os6zLWxlNLZXcMWeMz/xpocwfQgghhBBCCCHEIzT6dpk/BstWy3xwSCHtPPij4I/xNW33193EEo8Gf6ZOnYrO+k1v2LDBjUvjGvyzc0vZl2nFaaXMH0IIIYQQQgghxCPaZ/7o2mX+qPUOln2ZMn+6m/UD9LKeP70RH9zqyZtkLwr+EEIIIYQQQgghnsW3YhGbAgE6q8wfPvjTVdkXl6/TpOWDP90PLFDwx8WMpvfYDYk/Qj1gq1kzKEIIIYQQQgghhLgPn/njr+CCN1aZP0LZV+chGb61C18wRcEfL8Zn/rij4bNCCP4Yu7glIYQQQgghhBBCXIEv6/KTdxT84a6X29nwmdeTsAIFf1xMyPxxw2P5UNkXIYQQQgghhBDiURpTZg8f/DFv+MyyLDR6+xo+K9tdT5k/XsytDZ+p7IsQQgghhBBCCPEoftpXgEIKwHLUOx/4Aewv++JRw2cvJjR8dsMrraDMH0IIIYQQQgghxKP4si4/BZ/5Yzv401XZl1wisgj4UOaPFzPyPX/cUPglNHzWUc8fQgghhBBCCCHEE9Ttyr7Mp31pTcEfhgGk4s7jBAzDCBO/+L/pLgr+uBjrgWlfzRoq+yKEEEIIIYQQQjyhfeaPziLzh7tOJhbZNRjKvPRL1IO6Lwr+uJjRCSPZ7KVSygAAdS1alz8WIYQQQgghhBBCrPHTvmyNeufLvuQS+8Ix5k2fqezLi7WNenf9YwX7csGf2had6x+MEEIIIYQQQgghVvhpX3zDZ/OyL/46eReTvnjm496p4bMXY92Y+RPsy61YNc2U+UMIIYQQQgghhHiCUPYlt8784Sd/2Z35Yxb8sadMrCMU/HExoxszf4J8uMyf+ladRTdxQgghhBBCCCGEuIda377hs1nZlykwJLMz+EOZP70En9zljmlfKqVUCDLVtVLpFyGEEEIIIYQQ4k5GIytM9BJGvRvNyr6Enj/2lX1Rz59egs/86UmEzl4SsQgqJVf6RU2fCSGEEEIIIYQQ9+KDO4BZw2d9Dxo+m416p+CPF2sb9e6G6A+AYFPpV00zZf4QQgghhBBCCCHuxI9yBwB/uanhs0XmD3e9vcEfH6l5z5/uLxcFf1yMdWPmDwAE+fLBH8r8IYQQQgghhBBC3EltmuYlETFQyriQi0XDZ1Pmj709f5QyKvvqFYxuzvzhmz7XUtkXIYQQQgghhBDiVvykL4VUDInIFPyxWfZFo97PK+6c9gXQuHdCCCGEEEIIIcRT1Ho++COC1JTdY1H2ZQoOyaV2Zv5Qw+fege/505M3yRF82VctBX8IIYQQQgghhBC34su+5BIxpKZUHYtR7w43fKaeP72Cu3v+CA2fqeyLEEIIIYQQQghxK/PMHqmYC7mwLGAwZf9oHS77omlfvYLQ8weU+UMIIYQQQgghhJzP1KbgjkIiFsq+gLbsH0czf3yo4XPvwMLNPX+EzB8a9U4IIYQQQgghhLhTW8NnESRmJUBtwR/HRr1T2Vcv0Tbtyz2PF2Rq+EyZP4QQQgghhBBCiHuZT/viy74AQGfgggMO9/yhhs+9g9sbPvtQ2RchhBBCCCGEEOIJGl1bcEcsYoT+v3pT5o/Q80fajVHvPYjgUPDHxdoaPrsn+BNs6vnTqNELKxUhhBBCCCGEEEJcr23UOxe04bN/tO16/sjEjpd9UeaPFzOy7u35E6CQCpHFOpr4RQghhBBCCCGEuA2f+dM++NNW9tU2Dcwe5tO+GAr+eC9T1ZfbRr2LRIxQ+kXj3gkhhBBCCCGEEPcxb/gMAFIxFwzgy77My8LsYTntq/vLJen6JkBwcLBDd8owDI4cOYL4+PhuLdT5xGjkM3/cFP0BN+69ulmLGur7QwghhBBCCCGEuI1amOZlu+yL/5e/visKJzV8tiv4U1dXh7feegsqlarL27Isi0WLFsFgMHR7oc4nfMNn94V+2sa91zbTuHdCCCGEEEIIIcRd1Hxmj5D5w/2r58u+TNfLvDHzBwAWLFiA8PBwu267ePHibi/Q+aat7MudmT/cuHcq+yKEEEIIIYQQQtxHKPsSMn+4WIBOaPjMZwbZF/yRikWQihnoDGyPKorsCv4YjY5NjWpsbOzWwpyPjG6e9gW0Tfyice+EEEIIIYQQQoj78NO8+HItiVXDZ8fKvgBAKRVDZ9D3KPOHGj67mFD25ca6L6HhMwV/CCGEEEIIIYQQt7Fu+MwHfyxHvds77QtoG/fukVHvjY2NePzxxzFmzBiMHDkSixcvRlVVVbcX5Hzl7lHvgFnmD5V9EUIIIYQQQgghbqO2GvVuWfalNQV/ZGL7wzH8uHePBH/uvvtuVFVVYfny5Xj++eeRk5ODm266qdsLcr5qy/xxY88fyvwhhBBCCCGEEELcrn1PH6lV2Zfpekcyf0yBpJ6EFexu+Pzmm2/ioYceEoIYBw8eREZGBsRibiH69++P8ePHd39JzlMs+J4/7ntMyvwhhBBCCCGEEELcr63sq4OGzzrHe/74OKHsy+7gT3Z2NsaNG4dVq1ZhxIgRuPjii3HppZdi3rx50Ol0WL16NWbPnt3tBTlfGU2ZP+6d9kWj3gkhhBBCCCGEEHdra/jcbtS7sV3PHzunfQHmPX+6v1x2B3/ee+897Nu3D3fccQemTZuGlStX4ptvvsGmTZtgMBhw3XXX4cEHH+z+kpynhJ4/bnzMYB/K/CGEEEIIIYQQQtzNetS7qexLz4JlWWhNGUAyR4I/Ujdm/gDA+PHjcfDgQbzyyitIS0vDa6+9hp9//rnbD34h8EjPH18pAKBFa4BaZxDSzQghhBBCCCGEEOI6fMNnOT/q3ZSuozMahawfwLHMH77sqydxBYcbPkskEjz99NP4888/8dZbb+Haa69FWVlZtxfgfMbykR+4t+ePn1wi1BVS9g8hhBBCCCGEEOIefOaP0PBZwmf+tA/+2J+koRSmfXV/uewO/hw/fhxjxoyBv78/JkyYAKPRiH///ReXXnopLrroInz44YfdX4rzlFnsx62ZPwzDIJAmfhFCCCGEEEIIIW7VvuGzzGzaFz/pi2HaGkHbwxkNn+0O/txxxx2YNGkSDh48iOuuuw733XcfAOD222/H/v37sXv3bqSlpTn04Dt27MDll1+OqKgoMAyD3377zeJ6lmXx3HPPoU+fPlAqlZg5cyYyMzMdegxPMnoo8wcw6/tDTZ8JIYQQQgghhBC3ULdr+Gxe9qU1a/bsSIKIEPxxuHarjd1/mpGRgUWLFiE1NRWLFy9Gbm6ucF1YWBi++eYbLF++3KEHb25uxrBhw/D+++/bvP7VV1/FO++8g48++gj79++Hr68vZs+eDbVa7dDjeIrRQ5k/QFvfnxoq+yKEEEIIIYQQQlyOZVkhwCOMepe0NXzmy774bCB78ffVk7iC3Q2fp06dinvuuQcLFizAli1bMGHCBKvbzJo1y6EHnzt3LubOnWvzOpZl8dZbb+GZZ57BlVdeCQD4+uuvERERgd9++w0LFixw6LE8gUVb9MfNsR8EC+PeKfhDCCGEEEIIIYS4mnlPHyH4Y8r80RuN0LRrBm2v6EAlACDUdJzfHXaHm77++muMHDkSv//+OxITE13e4yc3NxdlZWWYOXOmcJlKpcK4ceOwd+/eDv9Oo9GgoaHB4sdTzHv+9KQ2rzuCqOcPIYQQQgghhBDiNny/H8Cs4bMpy0drMAo9fxyZ9AUAlw7tg89vG41HZvXv9rLZnfkTFBSE119/vdsP5Ch+glhERITF5REREZ1OF1u5cqXD5WeuYt7zx82JP22ZP1T2RQghhBBCCCGEuBw/5l0sYoSgj8T0r97AWvT8cYRULML01Iiub9gJux7xxIkTMBqNXd/QJD09HXq9vtsL1RNPPvkk6uvrhZ/CwkKPLAdAmT+EEEIIIYQQQsiFQpj0ZRbckZmmeukMbaPeZQ6MeXcWu4I/I0aMQHV1td13mpaWhoKCgm4vFABERkYCAMrLyy0uLy8vF66zRS6XIyAgwOLHUywyfzzV84cyfwghhBBCCCGEEJfTtGv2DLSVfXGj3ruX+eMMdpV9sSyLZ599Fj4+PnbdqVbb84BDQkICIiMj8e+//2L48OEAgIaGBuzfvx/3339/j+/fHSynfbn3sYNMwZ/qJgr+EEIIIYQQQgghriZk/pgFfyRC8Kf7PX+cwa7gz+TJk3Hu3Dm77zQtLQ1KpbLL2zU1NSErK0v4PTc3F8eOHUNwcDDi4uLw0EMP4aWXXkJycjISEhLw7LPPIioqCvPmzbN7WTzKg2VfKiU36r1R7ZnyO0IIIYQQQggh5ELCB3/MgztS87Kvbk77cga7gj/btm1zyYMfOnQI06ZNE35/5JFHAAALFy7El19+iaVLl6K5uRn33HMP6urqMHHiRKxfvx4KhcIly+Ns5mVf7g7++Cu4t7ZRrXPr4xJCCCGEEEIIIRcitd46uCM1b/hsMPX8EXtp5o+rTJ06Fax5V+R2GIbBCy+8gBdeeMGNS+U85s/M3dO+/OXcW9uk0YNlWTDurjsjhBBCCCGEEEIuIG1lX+aZP2aj3vnMIKn7gz/uf8QLiCcbPvsrpKZlAJq1Bvc+OCGEEEIIIYQQcoERGj5LzHv+cMEAvdm0L0/0/KHgjwtZBn/cG/1RSEWQiLjHpNIvQgghhBBCCCHEtWxl/shsTvvy0lHvpJtMsR+RByquGIYR+v40UdNnQgghhBBCCCHEpYSyLomtUe9GaHtL5o9Op8Mdd9yB3NxcVy3PecUoBH8802/HzxT8aaDgDyGEEEIIIYQQ4lKtpuCPUmZd9uXpUe8OPaJUKsXPP//sqmU57/BlX57qtewv58e9U9kXIYQQQgghhBDiSnzVjZ+8bbaW7bIvLw/+AMC8efPw22+/uWBRzj98xx9PTdpqG/dOmT+EEEIIIYQQQogr8VU3/LE40C7zR2c9Ct5dHB71npycjBdeeAG7d+/GqFGj4Ovra3H9kiVLnLZwvZ3RVPfliZ4/QNvEryYNBX8IIYQQQgghhBBXahSCP1LhMouePwYu+MNnA7mTw8Gfzz77DIGBgTh8+DAOHz5scR3DMBT8sYGBpzN/qOyLEEIIIYQQQghxJf7Y288s80fKj3o3sm09f6S9IPhDzZ7tx/f88VzmD5V9EUIIIYQQQggh7sBX3QRYBH9MmT96s7Kv3tDzh6fVanHu3Dno9RRY6Iinp31R8IcQQgghhBBCCHGPRhs9f4Tgj9G84bP7e/44HPxpaWnBnXfeCR8fHwwaNAgFBQUAgMWLF+Pll192+gL2Zqwp88dDVV9CnSEFfwghhBBCCCGEENfiy74se/60NXzWmoI/st6Q+fPkk0/i+PHj2LZtGxQKhXD5zJkzsWbNGqcuXG/n6cwffrwc9fwhhBBCCCGEEEJcq9HGqHeLsi++548Hgj8O9/z57bffsGbNGowfP95ihPmgQYOQnZ3t1IXr7fjMHw/FfqjsixBCCCGEEEIIcZNGja1R77207KuyshLh4eFWlzc3N1sEgwhgSvzxWOZPAI16J4QQQgghhBBCXE6jNwhlXR2VfQnBHw9M+3L4EUePHo2///5b+J0P+Hz66adIS0tz3pKdB7xn2heVfRFCCCGEEEIIIa5iXnFjUfYl4sIuLAu0aLnb9IqyrxUrVmDu3Lk4ffo09Ho93n77bZw+fRp79uzB9u3bXbGMvRbf79lTHZ/9qOyLEEIIIYQQQghxOf6421cmhtgsA0RqFuhp1nA9f3pFw+eJEyfi2LFj0Ov1GDJkCDZu3Ijw8HDs3bsXo0aNcsUy9lqez/yhaV+EEEIIIYQQQoirNQlj3qUWl/NlXwDQLGT+uL/nj8OZPwCQlJSETz75xNnLct5hPTztiy/70hq4ruKeWMEIIYQQQgghhJDzXduYd8swC1/2BbTFCHpF2RevoqICFRUVMBqNFpcPHTq0xwt1vuDfWE/1wfaVtb29jWo95H4U/CGEEEIIIYQQQpytQW096QsARCIGYhEDg1HoC9M7gj+HDx/GwoULcebMGWGUOY9hGBgMBqctXG/XVvblmeiPWMTATy5Bk0aPRrUeoX5yjywHIYQQQgghhBByPuMzf/zalX0BgKRd8McTPX8cDv7ccccdSElJwWeffYaIiAga794JY7vgmCf4K/jgD038IoQQQgghhBBCXKFJYzvzBwBkYpEw5h3oJT1/cnJy8PPPP6Nfv36uWJ7zCh/6Ebk/qCfwV0hQWt/WfIoQQgghhBBCCCHOxQ9aCrAR/JFKRICG+z/DWDaBdheHwxIzZszA8ePHXbEs5x3Ww2VfAOAn51a8Bgr+EEIIIYQQQgghLtHW8Nl22RdPLhF5pILK4cyfTz/9FAsXLsSpU6cwePBgSKWWT+yKK65w2sL1dnxJnycL49rGvVPZFyGEEEIIIYQQ4gp85g+fgGFOKm7Lu5GJPVMa5HDwZ+/evdi9ezfWrVtndR01fLbk6VHvQFu9YeN5mPnTpNHjaEEt0hJDIPHQB4gQQgghhBBCCGnspOePeZmXXOqZKdwOHzEvXrwYN998M0pLS2E0Gi1+KPBjiW/47Mme2HzmD9986nzy+oZzuOWzA/jlaLGnF4UQQgghhBBCyAWsURj1bl32ZZ7544kx70A3gj/V1dV4+OGHERER4YrlOa/wmT+enIjWlvlz/pV97c+tAQBklDV6eEkIIYQQQgghhFzI2nr+WGf+SHpj8Ofqq6/G1q1bXbEs5522hs+eWwZ/+flZ9qXRG5BZzgV9SupbPbw0hBBCCCHeSa0z4M/jJWjVUoY+IYS4Ej9h299Gzx+ZWdmXzANj3oFu9PxJSUnBk08+iV27dmHIkCFWDZ+XLFnitIXr7YzU88dlMsuboDe9wCV1aqvrWZb1aMYVIYQQQog3+HhHDv63KQNLZiTjkYtTPL04hBBy3vL2sq9uTfvy8/PD9u3bsX37dovrGIah4I8ZFqynF6Ft2td51vPndEmD8P/Sdpk/OZVNuPrDPbhzQgIWz0h296IRQgghhHiNQ/m1AIBjhXWeXRBCCDnPdV72ZTnq3RMcDv7k5ua6YjnOS96Q+ePXS3v+rD9VColIhJkDbfeWSi+pF/5f0aiBzmAUoqk7M6tQ16LD+vQyCv4QQggh5ILGnzDjy+UJIYQ4n8HIotlUXmt72pdZ5k9vmfZF7Ocd0756X9lXfYsOD3x3FHevPoTCmhabt0k3y/xhWaCsvq30q8D0N6X11uVghBBCCCEXiopGNaqaNAC4/aKGXnYykBBCegvz6dp+XQR/ZOJekvnDsizWrl2LrVu3oqKiAkaj0eL6X375xWkL1+t5QeZPAD/qvRcFf0obWmEwpU19f6AAS+ekWlxvNLI4U8oFf6RiBjoDi9J6NWKDfQAA+dVc8KemWQu1zgCFhyKrhBBCLlx6gxEihoHIk1MfyAXPvEwe4HomjooP8tDSEELI+YuvtJFJRJDbaOgsNS/7kvaSaV8PPfQQbrnlFuTm5sLPzw8qlcrih7QxesO0r15Y9lXZqBH+/+OhQmj1lgHGvOpmNGsNkEtEGB4bCMCy709BTbPw/5I6mgRGCCHEvXQGI2a/tQOTX9uKg3k1nl4ccgE7U2pZ6pVVQaVfhBDiCnylTYCNrB/AO0a9O5z5s3r1avzyyy+45JJLXLE85xWW7/fsyZ4/pjFzzVoDDEYW4l5wBrKioS34U9WkxcbTZbhsaJRwGV/ylRrpj9ggHxzMqxUmfrEsK5R9AVyKc2KYn5uWnBBCCOHKj7MruRMR81ftxUMzU/DAtH4oqGnBwdwa6IxG3Dg2jqZSEpc7bcqUlklE0OqNyChv8vASEULI+amzSV+AZamXrcwgd3A4+KNSqZCYmOiKZTnveEfmT9vK16TWQ+Vje2X0JpWm2nQRwzXN/nZfgUXwh9+RGRilQrAv93z4DJ/KRg3UurZMIcr8IYQQ4m78SQiJiIHeyOJ/mzKwanu20AgSAKIClZjWP9xTi0guEKdNAzJmpIZj3akyZFDTZ0IIcYkmDVdpwydftCf1gmlfDj/qsmXLsHz5crS20kF1V7xh2hdXc8i9zY2a3lH6xWf+XDo0CiIG2JtTjayKtjNVfObPoKgA9FEpAbSVfeW3axDNZwQRQggh7sIPK5ieGo43rhsGH5kYzVoDZGKRkA5Ok5eIq7Vo9cip4jLQrhweDYDr+UMIIcT52jJ/zqOyr+uvvx7ff/89wsPD0bdvX0illpkkR44ccdrC9X6maV8eXgp/hRSaJk2vmfjFZ/4Mi1GhVWvA5jPl+P5AAZ69bCBYlhXOYg2KCkBtixZAW5CnoNoy+GPeC4gQQghxB/67KC7YB9eMisHU/mEorG1FaqQ/3tuShfe2ZgnDCQhxlXNljWBZINRPjrSkEABAWYMa9a06qJTenwlOCCG9SUMXwR9Zbwz+LFy4EIcPH8bNN9+MiIgIqlfvhDdk/gDcCljVi4I/FQ1cICfMX46bxsdh85lyrD1chHunJIJluT5AIgZIjQxAvqm5c/vMH6VUjFadAcVU9kUIIcTN+O+i+BBuCmWInxwhfnKLyyj4Q1yNb/Y8MCoAKqUUkQEKlDWokVVBE78IIcTZ+AFLHfX8kYjMp331kp4/f//9NzZs2ICJEye6YnnOK3zPH0/Hx3rbxC8+8yfMX45xCSFIDPNFTmUz5q/ahzsmJgAAksL8oJSJhbKv2hYdWrUGFFRzwaDRfYOwM7MKpfVU9kUIIcS9+LKv2GAfq+viQ3wBQDh5QYirnC7lMqUH9gkAACRH+KGsQY3M8kYK/hBiA8uy2JFZhU935iApzA/Lrhjk6UUivUhTF5k/UonnM38cftTY2FgEBAS4YlmsLFu2DAzDWPykpqa65bGdgZ/25S3BnyZN78j8qTT1/An3V0AsYvD5wjGIDlQit6oZz/52CgBX8gVwo/R8ZVzktKS+VWiyOT6RS28urWsFK4xdI4QQQlzLfOpknM3gD3dZcW0rtHqj1fWEOMvpEn5Ahin4E+4PADTxixAb9mZX45oP92Dh5wewM7MKX+7Jo200cYjQ86ejhs9mmT+y3hL8eeONN7B06VLk5eW5YHGsDRo0CKWlpcLPrl273PK4ztA27cvDZV9yLvWsoReUfbVqDWg0BanC/LkU+b6hvlh7fxqSwnyF2/E7MgzDoE+gqelznVrY4R6XEAyAG3Hf0Or9z5sQQsj5obpZixatAQwDRAcpra4P95dDIRXByIJKk4nLGIwszpaZyr76cEGflAg/AEBmBTUbJ8RcZnkjbvlsP44U1EEuEQmTmssbqIKA2K+rsi+pF/T8cfhRb775ZmzduhVJSUnw9/dHcHCwxY+zSSQSREZGCj+hoaFOfwxX83Tmj18vKvuqbOSyfuSStokoANBHpcSP96ZhcHQAGAaY2C/M7DoFACCrohFVTVwD6JRIfwSZxtqXUNNnQgghbsKfhOgToIBcYl3TzzAM4oNNpV/VVPpFXCO/uhktWgMUUhESQrmgT3IEFwSiiV+EWNqZWQW9kcXQGBV2Lp0mlOxS+wjiiK6mfVmWffWSnj9vvfWWCxajY5mZmYiKioJCoUBaWhpWrlyJuLi4Dm+v0Wig0WiE3xsaGtyxmDZ5TeaPEPzx/gyYyiZuIxseILdqJh7iJ8dviyagpkWLcH+FcHm0KfNnf24NACDIR4oAhRRRgUrUtuhQWt+KAX3cU6pICCHkwtZZvx9eXIgPzpU3CoEiQpyNb/bcPzIAYlMaQ7Ip84cmfhFi6URRHQBgRmoEwgMUiAxQIL+6haYGn+dK6lrx2oZzuH1CXwyNCezx/fHVK3Y1fO5N077cZdy4cfjyyy/Rv39/lJaWYvny5Zg0aRJOnToFf39/m3+zcuVKLF++3G3L2BmjqUzU0xPR+BWwqRcEfypM/X7CTFNR2pOIRRaBHwBC02c++BNnaqbZR6VEekkDiusoak8IIcQ9zMe8d6Svqe9PXhUFf4hrtG/2DAABCvOJX40YFd+Wsc+yLD7Ylo0gHxluHNfxSVZCzkcnirjPy9BYFYC2qoIyL8j80RuMkIg9Eyg43/18uAi/Hi2G3sji3RtG9Pj++EQLv45GvZsFfLy654959kxDQ0OnP840d+5cXHfddRg6dChmz56Nf/75B3V1dfjxxx87/Jsnn3wS9fX1wk9hYaFTl8kRfJthD1d9CU2nXFH21VEz5VatAXlVjqez85O+2gd4OtMnkLttTTNX8sXvcEeZLi+lngqEEELcpP2Yd1v4kxQFTpz4tfVcBZauPQ61zuC0+yS91zlTv58BfSxPlvLZP+2bPqeXNOC1Defw3O+noDdQk1ty4ahv1SHHdMwyzJT9IfQT9WDwR28wYuU/ZzDw+Q349WiRx5bjfMZn3xY4qQS7reeP7eCPROT5si+7gj9BQUGoqKgAAAQGBiIoKMjqh7/clQIDA5GSkoKsrKwObyOXyxEQEGDx4yltZV8eWwQAriv70huMuPL93bjls/1WQaCH1hzF1Ne34UypYwFBIfPH33bmjy1RKsuGmvFC8MfzG25CCCEXlgI7yr74zJ/8audl/vz37zP48VAR1p0qddp9kt6LP5hNCvOzuDwlgp/4Zdn0ectZbj9fb2RR1+r9PSIJcZZTxVzWT0yQEsG+MgBtmT+eKvuqbdbiti8OYtWOHGj1RmxML/fIcpzvimq597ew1jnvM3+sHdBRzx+xWdmX1IvLvrZs2SI0c966datLF6gzTU1NyM7Oxi233OKxZXAE6zU9f7iyr0Ynj3rPr2kR0iSrmrQWAZtDebUAuDGjjvTb4Rs+hzsQ/OEzf3hxpp1qfsNN01QIIYS4S2EnY955QsPnmhYYjSxEPTxLVN+qQ1YFl8mRW0lNpC90eoNRWA/7hvpaXJcayQV/DphK5Xl88AfgMqlDOyi/J+R8I5R8xaiEyyIDPFf2lV3ZhIWfH0BRbSsYBmBZ4Fw5TehzhaI6bjtZ06xFk0YPvw5GtNuDZVk0ddHzRybx/LQvu57hlClTbP7f1R577DFcfvnliI+PR0lJCZ5//nmIxWLccMMNbluGnuCTYTw97ctVmT/FZlHS3KpmIfhT36JDtakEiy/jsldFI7eR7UnmT5xV5g8FfwghhLieWmdAmWk0cGfBn6hABSQiBlq9EeWNaqF3XXcdK6wT/p/rxGwi0jsV17VCZ2Ahl4jQJ8DyBNmMARGQiBiklzQgs7wRyRH+qG7S4Lip4S3QVkZPyIWAb/Zs3vCX3yZ7onrgfxszUFTbivgQHyy/YhBu++Ig8qtboNEbPFYqdD7SG4woNesLW1jT0qMBQa06AwxG7uC/oyCSedmXp3r+dCu8VVdXhwMHDqCiogJGo2Vd8K233uqUBQOAoqIi3HDDDaiurkZYWBgmTpyIffv2ISwsrOs/9gJGIfjj2eiPq0a9m2fU5FQ2YWwClx2WU9VWR85n8thL6PkTYH/wRykTI8hHitoW7vnxfRb44E9ZvdopZ1adiWVZFNW2IjpQ6VXLRQghpPuK61rBsoCvTCyUD9giEYsQE6REXnUL8qpaehz8OVpQK/yfxseTXFPJV98QX6t9jGBfGab2D8PmMxX49Wgxls5JxfaMSphX71Pwh1xIbGX+8FUFlU0a6AxGSN3YcPlEcR0AYOVVQ5CWFAJ/hQSNaj1yKptperETlTWooTe2bfh6GvzhkyzEIgY+MttBOouyr94y6v3PP//ETTfdhKamJgQEBFgENhiGcWrw54cffnDafXkC3/PH04f2AW7I/Mkxa+6ca/b/CgeDP23Tvuxv+AxwEfraFh1kEhEiTM2iI/zlEDGAzsCiqkmD8ADH7tOV/jlZhge+O4KHZibjoZkpnl4cQgghTmDe76erEz9xIb7Iq25BQU0z0pJCevS4RwvqhP/nVjWDZVmPn3ginsMP3Ogbajv77KoRMdh8pgK/HyvBY7P6W5R8ARCyt8n5o6C6BZ/tysHiGclU0memqkmD4jquvGpIdFvwJ9hHBplYBK3BiPIGNWKCOs7kdKYGtQ6FNdzx1cAo7jg7JcIfh/NrkVHe2CuCP952wr0jRe36/PS07w+fZOEnl3T4/WseRPRU2ZfDj/roo4/ijjvuQFNTE+rq6lBbWyv81NTUdH0HFxA+lujpnj8BSlPPH7UOGr3zpoAU1balludUtmX7mAd/KhvtT5c0GFlhh8ORzB+gbbJXbFBbJo35WPgSL2v6vC+nGgBw3CxVnxBCSO9mz5h3Hj+coKdNn41G1qLsq1Gtp8yNC5yQ+dOu3w9vxoBw+MslKK5rxd6cauzIqAQA9Dc1g66l9cdpapq1aHZyz83ueGXDWXy1Nx+f7Mjx9KJ4Fb7kKzHU16JPi0jEIELFHYu4s+/P2VKut08flQKBPlz2aEdN2r3Rv2fKkfLMOvx4yHPTtu1lFfyp6dl3MZ9k0dGkL6CXBn+Ki4uxZMkS+Pi4JwLamwkNnz3z3grC/OQI9JHCyAKZ7UZ79oRl2Vezzf87kvlT06yFwciCYYCQTtLlbeFT5uNDLHd0+KBQiZc1feYbc5bUeVdQihBCSPcV2DHmnRfvpIlfudXNqG/VQS4RCcMS8qj064LG931KCLEd/FFIxbhkSB8AwAt/nkaDWo8gHymmpYYDoLIvZ6ls1GDyq1tx6+cHPLocRiOLvdncScdjdNLRwvFCruRrmFm/H16fAPf3/eGnJA80y/BJieAm9p0rc94xnKus2p4DvZHFxvQyTy9Kl/gkBpkpIOOs4E9nTaMlZmVfnur54/Cjzp49G4cOHXLFspx3hIbPHi78YhgGg6O4VEZ+nKEzmJd9FdS0QGfg+j/lWGT+2B/84W8b4iuDxMHa2iGmOt3hsYEWl/cx9f3xtuBPdiUf/PGu5SKEENJ9BXZM+uLxJyvya3oWqOFLvobGqNAvnDtIyKvyzqbPzRo9TeB0A77sK6GDzB8AmDciGkDbFKEpKWEI9eNOvFHZl3Okl9SjSaPH4fxaj/biOlfeKAT0ThXXC01piXmzZ5XVdXzfH3cOjuGDP+blXXxGXmaFd2f+FNa04EAeVwWU0wumTvKZP8PjAgEAhbXOCf4EdDDpC2if+ePFPX/++OMP4f+XXnopHn/8cZw+fRpDhgyBVGr5BK+44grnLmEvJvT88YKyx0HRAdiVVYVTJc4J/ugMRmGiiYgB9EYWBTUtSAjxFXY6AO6DoNYZoJB2vYLzk766U4t87cgYbsc3zM/i8igVv+H2ngybBrVOyIhq1OjRoNZ1uqEghBDSOxSa9fzpinnmT0969PDNnkfEBaFRrcee7GqvzPzJr27GDR/vQ0WjBrv+bzoiVd7Th+98otUbhTPanQV/xiUEIzpQKQTjpqWGC/utVPblHOZlJTsyKnFLWsfvhyvtzqoS/t+sNSCnsgnJpoDChYxlWZw0nRQf2u7kMQBhG+WJzB/z4A//XhXUtKBVa4Cyg2bCnvbr0WLh//k1LdDqjR7LbrEHv528KCkEB3JrUFjT2qPvYr7nT2dlX3yWEcNYNn92J7uCP/PmzbO67IUXXrC6jGEYGAzO6ynjSlkVjciqaMacwZEueww+sO7pnj8AzDJ/Gpxyf2X1ahhZbiVOCvfDmdIG5FY2QykVo1VngETEQGQaY1vZqLFrR5jP/OlOY2aRiEFqpHUTtCgvzPzhS754pXVqBERS8IcQQnozlmUdyvzhb9Oo1qO2RdfhdDC9wYila09gRHwQbhkfb3U9n/kzIjZQOHNp3nvPGxRUt+CGj/cJ/feyK5so+OMiBTUtMJomzoX5d3wyTSRicOXwKHywLRsihsv84UuCKPPHOcwzCbZnVOGWtL4eWQ6+5It3vKiegj/g+oFWNWkhETEWZVa8PqbjEXf1/NEbjDhbxmX3DOjT9v6E+skQ7CtDTbMWWRVNQrWDN2FZFr8cKRJ+N5iSAvhsVG/EB2fHJgSDYbhR7VVN2k63m51p0nTd84cv+5JLRB4bymBXOM5oNNr101sCP3qDEbd8dgD3fXPYYjyqs7FelPnDd7A/U9oAvak8y17NGj2e/vUkDuW1NfTmPzBRgQokhXFnMnKqmoQdzrgQH6H3gL19f/jbhTlxCgHfC8ibGj5ntwv+eFNgipALwc+Hi4Sm64Q4S3WzFi1aAxgGiA7qenS7QipGpOngorOSkKOFdfjlaDFeW39W2K/gtWj1OFvGndQZEReEvnwpWQ/7CDlTYU0Lbvhkn8X3MPWUcZ08s2bPXR1czB8TC3+FBHOH9EGgjwwhvtz+V02zY5NavcGPhwqx4p8zVp8RczsyKjH7zR3YcrbcLctknvmzJ7sKWr1j+9/OoDcYsT+X238fnxgMoK3U6UJ3whTs7B/pb7NCgW8d4a7Mn7zqZmj0RiilYosepgzDINkURPHWps9HC+uQV90CpVQsHBdmV3pvjyK9wSi8r0lhfsJ3cU9Kvxr4nj+dBH/4fkD+Hqz48N5cLBf692yF8IYfzndl8If71xsyf+KCfeAvl0CjNyLbwTrMtYeL8O3+Aqxcd1a4jE8Tjg5SItFUapVT2SxM/UoM9RUip/ZO/GrL/HFe8CfGtANeUN3c6Q6BO2W12xiWuLGWmJAL3dGCWjz603Es+f6opxeFnGf4gEufAIXdtfxxptKvgk4aTfL99RrUequMjBNF9TCy3GSYSJVCKPPJq/Lsdx7LsjhSUIunfz2JS97ZieK6ViSG+WJsX+7gs7aFgj+u0tWkL3PxIb44+PRMvLNgBAAg2NTzp7ZZ5zX7TPYwGlks+yMdH+/IQXqJ7Qz3rIpGLPr2CM6VN+K7/e6ZRGQe/GnRGnAo3/1TkU8Uc32HVEopbhgbB4DL/Dnf5VU1Y9u5ik5vw78OQ200ewa47Srgvp4/p02TvlL7+EPcblR6/0jvnvjFZ/3MGRyJwaaEA28O/pTWq2EwspCJRQjzkwsVKt1t+qzRG3DSFFTtLLATG+yD5y4biJVXDenW4ziD3cGfvXv34q+//rK47Ouvv0ZCQgLCw8Nxzz33QKPpHWcKvj9QIPz/pBMbILfnTT1/RCIGA6O4lEZHmz7zr1F6Sb2QNcTvjMYE+iDRtIORU9ksNHtODPMTMn/sbfpc6YLMn37hfhCLGNS26IQeRZ7GZ/7wI/4o84cQ99lyltsZrGjUoEXr+fG7PZFV0Yh1J0s9vRjEhD/oTgizv68HX/rVfuSsOfMGye3LuY4I/X4CAXA7lgzD9ZPzVOmOWmfA/I/34eoP9uDb/QVoVOuRGumP7+8ejyTT2WvK/HGdXFMWWUeTvtpTSMXCgWawabS01mAUShh6g/JGNVq0XPWBrZLH+hYd7vrqkPCcnDn8pDPFpiwCvmHv9oxKtzyuOb7ka3xisDAU5UxJg0eykNzpkR+P4bYvDuJAbscBNz4DalgHZVR8aWpFo0YYauNKtvr98JK9eNy7Rm/AXye4fZGrR0YjySwpwFvx37nRQUqIRAxig7of/MmqaMRV7+/B1nPc53t8Ykint79jYgJmDoxw+HGcxa6ePwDX42fq1Km47LLLAAAnT57EnXfeidtuuw0DBgzAa6+9hqioKCxbtsxVy+oUhTUtFhvfky6MfvPnTDw97Ys3OFqF/bk1OFlcj2tGxdj9d/xZFLXOiKzKJqRGBqC4jvtwcJk/bWVfPnLubGdCqK9wYGVv2ZcrMn8UpvTDjPImpBc3CGVgnsRnXo1LDMGOjEoa906IG201OxNYUqf26nr0riz5/hhOlzbgr8UThTNtxHP4zNfOmuy2Z89JEvMTBDmVTRhjyp4BzPv9BAHgvvOiVFwT37yq5m4NUOiprWcrcCC3BnKJCJcO6YNrRsUgLTEEIhGDYF/ujCg1FHYdeyZ9dUQpEwu9G2uatR4tTXBErtlBZl674I/eYMSD3x9BXnULogOVKKlvRVmDGpWNmm739rBHi1aPqiZuPb95fBye/T0dOzKq8ORclz2kTXuyuWbPE/qFIi7YByqlFPWtOpwra/TK3jHOwp+M3nymHGMTgq2uNxpZ4Riwo9ch1FcOqZiBzsCislEj9BF1lc6CP/2F4I/3ZdNsPVuJuhYdIgLkuCgpFPWtXONjb8784Zs98xUiscHcv4U1bd+3WRWNOJJfh5yqZuRWNaGh1TogzoLFscI6qHVGBPlI8co1QzElJcwNz6D77M78OXbsGGbMmCH8/sMPP2DcuHH45JNP8Mgjj+Cdd97Bjz/+6JKFdKY1BwvBssAgUxZMTlWz0J3b2fjMH5F3xH4wOJp7zukOTPzS6A3INIsynzBtKIWIaaBS2MGoatIKG9KEUF+E+XERc3szf/hpX87M/AGAQaZm16dLndPsuic0eoPQ22FycigAyvwhxF0qGtUWTe/dOb7V2ViWRU4Vt2PVvok88Qw+4yAx1P6AYpijwZ92B7Z8g15+VC0A9A3lzmDmeajvz7+m7Lqbx8fjf/OHY0K/UIhMO0JBpsySmhbX7HcRx8q+bOEbj9uTnXWmtAHXr9rr0hYK9jD/XLRf7z/dlYudmVVQSsX45NbRwj5rR9Nv9QYjFny8F9ev2otWbfd7mfIZ8v4KCS4Z0gcMw71e5W7MQlfrDDiUx703FyWFgGEYYaT5cQf7/vx9ohQr/jnjcN9QT9DqjagzbWN2dJBtlVvdjEaNHnKJCCkdNL8WiRhEBLhv4hcf/BnYx3p5UiK475XiulY0afTIKG/E9De24YHvjkCt82zP3Y2nywAAVwyLgljEWGT+eGv5KH8cKwR/+Mwfs6EJc9/eiaU/n8BH27OxIb0ce3OqrX725dRArTNiUnIoNjw0GbMGuW6QlLPYnflTW1uLiIi2FKXt27dj7ty28PWYMWNQWOieGtru0hmMWHOIW8ZFU/thxT9nUFzXivSShi5TtLqDX9+9oewLaJv4lV7SAKORFXbGOpNZ3gS9se2De7KoHtePjrXo+eOvkCLcX46KRo2QZp4Y5ivsgDie+ePcCSCDogLw69Fih4JerpJXxU3h8JdLMMyUfks9fwhxj+3nLHcCS3tx1l19qw5qnakMlwLIXqE7ZV/2BX/a1lPzDIeKRi57gWHaTmgBXB+X3VnVVhkQ7mA0skKfjRmp4VbX84GFOur54xKtWoNwkNqdzB+Ae4+K61rtCv58visXB3Jr8Mr6s/jx3rRuPZ4zmJeX5LVrns4f/D8+uz8GRgVgSLQKOZXNOFVUj2n9rdfRbecqsS+HKxVaue4MXrhycLeWqe3g0gchfnIMiVbhRFE9dmRU4rrRsd26T0cdKaiFRm9EuL9cOCAfGqPCzswqhyoftHoj/u/nE2jS6DEqPgizvfwAt9qsYfnZskaUN6iFIA6PL/kaFBUAqbjjXIg+KgWKaltNJ4uCXLG4AIDqJg3KG7jl7m9jenGgj0w41tqRUYkX/zqN0no1ciqbUdOkxacLR8NXbvdhvVOdNfUqGpvAHUsnhPqCYbj9lOpmrUcyULti/vkErPvvfbsvHzoDi/gQH0xJCUNCqC+CfWU2m+gH+8hwUVKIXcfV3sDuzJ+IiAjk5uYCALRaLY4cOYLx48cL1zc2NkIq9e700H/PVKCyUYNQPxkuHhghZMK4qvSLFTJ/vGNlSAzzg0IqQovWINSEd4UPmPD14CeK62E0ssJBEx8xTTTb2fWTSxDmJxcyeOzJ/GnW6NFsOsPi7DRcvtdRR00A3YlPgUwM90O0KX20rF4No9E7I+OEnE+2mYI//Ca5NwdNzM9CFvVgOgVxDqORNcv8cSD4w39PNjme+cN/pyWG+sJH1rbTz/d6sfd73plOFNejqkkLP7kEo/tal1oEOZBVQhyXX8O95yqlFEE+3dsn5wN09vSM4jN+DuTWdDqxztVyq9qyH9svB98jZXRf7sCdn37bUc9P/iQxAHy9Nx9bz3beNLgj/HY51rSfzJeC7Mis6tb9dceeLK7fD5/1A7Q1N3Yk8+dAbo3QL2nbOff3LXJUVaPlumsr++dEF82eeZGqtn11VzpjCqD0DfERJkK1x2coPfTDMZTWq4Xb7s2pxq2fH0CDiypZOqM3GIVBNnx2kkIqFo5xvLXvj1XZlykIVFqvRrNGj7WmBtbLLh+EF64cjNsnJODK4dG4YliU1c/E5NBeE/gBHAj+XHLJJXjiiSewc+dOPPnkk/Dx8cGkSZOE60+cOIGkpCSXLKSzfGdq9Hzd6FjIJKIuvwB6yihk/njHCiEWMRjYx7Gmz/zO5XTTGbwzpQ0ormuF1mCEWMQIo/H4iV/c/7nxonzvHnuCP3x2kI9M3OFGr7sG9eHe56LaVtR7ONWcL8/oZ2qILWIAnYFFVSc7/oSQntMbjNiRye0ATjXthPfmsq8yi+BP730e7dW36vDImmPYn1Pt6UVxSEl9KzR6I6RiRtjptUdXmT8Nah0azRrv5lc3w2DauTht+n7mS5t5fLmPJw7G+Ybqk1NCIZNY72LyDYWp549r8Jlh9ox570iIr33vUXWTxiIY+fOR4m49njOYN3muatIK7RyqmjSoatKCYSD0d+P7o9k6IVjRqBbW4ZkDuGqHx9ce79Y+WmG7zAI++LMzs9ItzYMB4GAel8GUltRW3TDMFOzIrGiyu6xt85ly4f/bz1V4bSkPr7LJMlCz00bAjQ/+DIvtvO9R28QvVwd/Ou73w0s2BVe0BiP6qBT47u7x+OaucQhQSHA4vxaz39yBZ347iQ3pZW5r2J5f0wKt3giFVCQEUAAImWY96fuzJ6sKN36yD+fKnN/kun3mT7i/HDKJCAYji0935qKuRYfoQCUme3n/nu6wO/jz4osvQiKRYMqUKfjkk0/wySefQCaTCdd//vnnmDVrlksW0hmaNXrsMu34zzelWw4xbQBd1fXf28q+gM6/9Gzhb3fJkEgEKCTQ6o1Cw9TIAAUkplRJ8zOdfKoxv1Nb1aTpMrPl811cVllfO6dTOELlIxUiu+mlni394jeC/cL9IBGLhOBZb85AIJ6XU9mEl9ed7VXTWdztSEEdGtV6BPpIMXdwHwDuqeF3FfNlL+4lwZ+6Fm2X37c/HSrEL0eL8frGc25aKufgDz7jQ3yF70V78GXOTRq9zelz/Hsb6COFTCKCzsAKZyz54M/AKMuDhQS+509Vi9sP0rac5Q4SbZXTAGb9ZKjsyyXaJn35dHHLjtmbncVn/fAnvH8+XOSRLGat3igEWmSmz16+qe8Pn/UTG+QjZMfxnxdbpW2/HCmGwchiRFwg3rtxBFIi/FDVpMUTP59w+LPUPrNgeGwgQv3kqGvR4Y9jJd15qg5hWRbnTM/ffCBARIAcYf5yGIws0kvqodEbkF5Sj+YO9h9YlsW/Z9uCPyX1amR6eZ85PvMn0JT9tiurymLd1BuMQmVDV5k/fPDH9Zk/XQd/+EC/SinF13eMRVSgEsNjA/H9PeMR6idHab0a3+wrwL2rD2POWzvckmHJ94VNifC3yH4RhgH1IPjz6oZz2JNdjf/7+YRTty06g1E4+cdn5olEDGJMJ24+3pENALhxXJxQ+XI+sXsPJTQ0FDt27EBtbS1qa2tx1VVXWVz/008/4fnnn3f6AjrLmdIGGFnuQ8yfFeMzf3Kqml2SKudtDZ+Btr4/9gS8DEZW2BgNjlIJG8h1J7nGXuZnN83LvvjgD1/jqTeyqO1kR2/L2XKs3pcPAHjyklR7n4pD+H4Ipz1c+sVn/iSZXi9+cgBN/CI9sfzP0/hoeza+MX2OiDW+D8nk5DBhZ7w3B13LzLKWiutavf5MLAA88N0RXPburk77r/EHa3xvut4it5sTlnxN05UA61IFoK3kKyZIKZRz8dkW/Os4qF3wJyaIG/fepGmbNuQO5Q1cQ3WGAaZ2EPzhD8bUOmOPmukS29omfXV/iqG9ZV988Gfe8Gj4KyQormvFvlz3Z+wV1rbAYGThIxML7Rz4z2NmOV+O0tZAN0AhFT6n5pn/LMvix4Ncydf80bFQSMV4e8EIyMQibD5TIWSK2Kt9Q1mJWIQ7JvYFAKzake3y7VtFowZ1LTqImLYsDICrRuBHmz/203EMW74Rl76zC4/+eNzm/WRVNKGwphUysQhjTKVz2851rxTOXfgy2qkpYfCTS1DTrLVo8J1R3gS1zgh/uUTYrnaED/64sj+nwchiv2kk/cBOgj+XD+uDJ+em4qf70oTR7wAXFNr++FR8eutoLEyLR4ivDEW1rVh72PW9eM+VcZ+x5HDLJtVtmT/dy0DNr24WBhocK6zDnyecFzAtq1fDyAIyiciiH1FsMBc0b9YaIBExuG60/ZOxexP7T0+ZqFQqiMViq8uDg4MtMoG8Db+TZB79DvaVCQGM9GLnBwW8recPAAyKbiv76upgIa+6GS1aAxRSERLD/IRRiPtNX+7RQWbBn1Dzsi/u/1KxSNiJ6Kjpc1WTBkvXngAA3DkxAZOSXZNeN7CPYxlPrmA0shaZPwDQx7T+9ebyE+JZap0B+0wlMme8YKKdt9pq6lMwLTVMCLqW1qkttoPVTRrsz6nuFWWY5pk/Gr2x054x3oKftLbf1EzVFj6925HedN6A72vgSL8fgDsQ47Nk+YmX5vjgT5RKaXYmlZtSyk81an+wwI97B6yb37oS3xtlaExgh737/OQSSMXcPhFl/zjXubJG7Db1eOEnvnWHvWVfh0zBn4v6heKyoVEAgLWHuT4ZOZVNuG/1YXxmyup2Jf6zlxDqKwS9+CAYn/nSP9IyGMYfC5ifCD2cX4ucqmb4yMS4bBj3fAb0CRDaHmzvYGpURwpNjWP5A0oAuGlcPPzkEmSUNwnlZa7Cb0v7hvpCIbU8bhsRxwVx8qpbhMEBOzIrhZJSc5vPcMs5PikElwzhsma9ve8PX0YbqVLiIlPJm3nfH77Z85AYVZe9WtzR8+ffM+UormtFoI8UE02TgG2RS8S4d0qSzelkvnIJZg6MwPIrB+Px2f0BAN8fKHT5iaGMDj5j/PdVd8u+/jzOBXvkpvLhV9adddoJA36iV0yg0uL958e9A8DsQZEI93fuACJv4XDwp7fiMz6GmgV/APPGb3VOf0z+4+Y9oR8uMiuTiNCg1uOoKaLaEf5LMTUyAGIRI7x2/HdDjFnwJyZIKaTbmu/8hnfSz6CuRYula0+gqkmL1Eh/YWPlCoOiHB9z72wl9a1Q67ieEHGmnYGoQCr7Ij2zL6caGj238+aKuujzQVm9GmdKuYyEyclhiDSdyWvVGVDf2pb1ecdXhzD/430Y/dJmjPnvZtzy2X6s/OcMfjta3KO6dVcoazcu2NtLvxrUOuG17igIbzSyyChve51dVZLtCjndzPwBOu/7U2zKCo0KVAr3nVvVhLOmz3pkgAIhNiaptN3WfcEf/mDW1pQvHsMwwrj3C7nvz7myRvxxvMQpB2ZqnQGvrj+LS9/ZKRxAXpTU8QFkV4JsZP7oDEaL90utMwjDUkbHB+HaUdwZ8nUny/DDgQJc/u4urE8vw4t/ncbebNdmA/HNnhNCfdHXVO7GB0YzytpKUswNibbuf7nGlPVz6ZA+Fr0nJ6Vwr+XOTPsDHk0aPWpNPSbNT5SqlFLcNC4OAPDRdq60hGVZ/HioEP/54ahTqxD4/YHUSOtAwc3j43HP5EQ8d9lArPvPJPjJJWjRGpBZYb0PwZdyzhwQLmT0Hcyr8eoyc/4ETqifTOjZsiOjre/PcTubPQNtmT8VjRqXjbn/ck8eAGDBmDirQF13XD4sCn5yCXKrml3++csot/0Z62dKBCisaYFG71jQhmVZ/G4qjXzmsoGIDlSipF6Nz3blOGGJ27LyzD+bACx6FvGf0/PRBRP8SefLl2LaBX9i+OCP88+Y82Vf3tLwGeBS3C4bykXu39qc2elt25pJcl+SQ9q9duZlXxKxCEvn9McNY+MszkK2ndHkNsQlda34zw9HMfnVrRj+wiZsOVsBmUSEtxYMd8oGryN8xlN2ZTPUOs+kmvMlX33NekJEC2Vf3n3gRryX+Q5NdmWT2xpJOoJPp3/615MemUbBN70cHKVCiJ8cCqlYOLvNl1w2a/TC2UCG4Q7Ed2ZWYdWOHDy05hhmvLEdz/1+yuaZUU/gM3/4enRvb/rMnwUHOg7CF9W2otVs++wNExrtxR+Amg8/sFdnE7/474boQKVw3zmVzUgvtl3yxeN3xI8W1Dq8PN2h0RuwK4vbFk3vJPgDtJUVdVYOfj7TGYy47YsDWPL9UWw6Xd71H3Th9i8O4oNt2dAbWcwaGIF1/5nUo6mpITZ6/jzy43GMX/mvsD6dKq6H1mBEqJ8M8SE+GBkXiMRQX7TqDHjil5No1hqEEr//+/mEzX5WziJM2QvzE9o65FU3W/S86d8uAMK3QODLvmqbtfj7ZCkA4PoxlmPYJ5sy0o8U1Nn9/cUH41VKKQIUllPX7piYAJlYhEP5tdidVYWla09g6doT+P1YCdaZlsEZznVwUM4v11OXDMAdExMwoE+AcCL8WEGdxe1qm7VCed/01HAkhPoiPsQHOgOL3Vnum1rmKD74E+YvFxptHymoFRqB89/1w9od19gS6ieHRMTAYGRd0icwo7wRe7KrIWKAm8c7J+DgK5dg3ggue+3b/QVOuU9bNHqD8Plrv56F+cvhL5fAyLb14LLX2bJGZFY0QSYR4crhUVg6h0sO+GBbNioaev4etG/2zOMbaieG+lo0ST/fXDDBH37FG9JB5o8rzjC2Tfty+l33yH9mJEMiYrAjoxIHcjtOv09vN0kkOlAp7LQB1hHTuyYlYuXVQyxS6NqPe/9wWzZ+P1aCAtOBQHyID/53/TCkRnZc4+oMkQEKBPvKYDCywtmQ3KpmbD1b0WGTO2cTJn2Ftx0c9FHxZV/nd8+fzafLcddXB13eMO9CtD2jLXVcZ2C9bqxmfYsO931zGEt/PoFv9xfgg63Zbl8G/jUxPwPax5R1xx9cnytvBMty2Yqnls3GL4suwoqrhuCW8fEYHR8EhuHG/j7w7RGPBZDN8Z+lwWbNS72ZeXAqs6LJ5mvIH6zwekvmj0ZvEJ6fszN/hLIvi8yfZrPvZ9vfndNSuQOezWcq3NI7acuZCrRoDYgIkHe4TDw+8+d8GfduNLI4nF9jd4Bj0+ly4Tv/mx4emFU0qLE3hztwXHXLKHx862hhv6K7gtuVfRmMLP49Uw6N3oiX150Fy7JCydeo+CAwDAOGYXCNKfuHYYAl0/th22NT0UelQEFNC97YmNGjZeqMecllgtmku7IGNRrVeohFjNXnclB02xTYqiYNHvjuCFq0BqRE+GF0fJDFbWODfZAQ6guDkbU7i0IY8x5s/V5EBChw9choAMDCzw/gJ1OpHAAho88ZhHIcG8Gf9obHBQKA0GOFty2jAkaW++7kD5T5aZneXPrF9zoL85ML75/eyOKNjRlo0uiF44ChsYFd3pdYxAjbtM6OmbqLz/q5eGCEVTCiJ24cGw8A2JBeZtfU5e7IrWqG3sjCXy4RMqR4DMN0u+kzn/UzvX84AhRSXDEsCiPiAtGiNeDdLVk9Xm4+kzsu2PL1npoSjpfmDcbHt472qsQNZ7tggj8sC0SpFBaNnYC24E+uC5o+89m83tTzB+CmkVxnmnj2xsZzNtOOWZa1aibJMIxF8MyecbZhZuPeWZYVxkU+f/lAHHvuYmx/fJpQK+5KDMOYlX414GhBLS5/dxdu//IgRry4CQs/P4AfDhS4dCeZP5Axb7wX1e4A9HzEsiz++88ZbD5Tgbf/dd0O4IWosKYF2ZXNEIsYJJuCimfLvCdb4mhBLS55Zyc2pJcLje+/3pvn9nIPoSzArDF9lMqy35b5pA1fuQQj44Jw47g4vDhvMNbefxHeu2EkZGIR1qeX4dbPD6DOjVkLJe2m0jSqdULK/aj4YADeX/ZlnvljMLLCgYk5/jJ+XbanN503KKhuAcsC/nIJQv0c731oX/BHIQwKKK1XC2fjB0bZPnM9LiEEfnIJKhs1OOHiIJreYBSms10zMqbLneb2wYXe7p9Tpbjmw71Y8c8Zu27/9d484f87MipR4OBZcXN8+UpyuD9mD4rs9v2Y49+fRo0eGr0BOZVNaDH12tifW4MdmVU4ZMqmHG3a/gDAXZMS8Pjs/vjh7vF4ZFZ/BPrIsOKqIQCAz3fnCuuss5k3W483lX1VNWlxJL9OuFwuscwsVymlwm3v+uoQ9mRXw1cmxjs3jLC5/k5Kdqz0i9/exQTaPpi/Z3IiGIYbiBLkI8V1psCZre1id5hvY9tnPdky3BQEaR/84fv9zBjQls3Hl35588h3flvKb1sXpnGBkC/35OGSt3dCb2QR6idDlMq+ni7jTVkgfH9FR/17phy/HyvG4fxalDeohWON+hYdfj1SDAC47aKEbt13RwZGBWB4bCD0RhY/uajxM1+mnRLpb/Nz052mz0YjK/T7uWI4d3zIMIzQGmTt4SLUt/TseP2oaVvUPvNLJGJw8/h4i5P05yNJ1zc5PyigxsgoGaCzXAGDZEBSIJf6P/+9zULfGlsYhsH8MbG4Yax9aXkSQzOUUEPOtlg9rqctmdQH/xzOwoncEuw7V4C0dvXhZXWtULc0wk/EoH+ISFj+kVFSHMgw9SDwNXb5vPooDVBCjbr6WpwuKEVdfR2CpWLcMCIECqkO0LmvBGRYpASHMtXYcCwLb61rhEGjR6hMjGatAQcyWnAgoxBhcj1mDIxw+mO3aPXYeTofShgwo5+P8LpF+xqhhBrNTWqoWxpcWvpmj7yqZuzJrsK1o2IhkzgnNpxR3oCyqmooAfxzJBuPTouxCsKS7tl9tgBKqDEyJhBJ4QoUVVQhp6QcGBTo6UWDzmDEktW7UNOoRWqIEq9dNwzP/ZaOs2WN+HpnOv4zI8Vty1JSWQUl1EgObNuWxQewUEKNytoaQBeGrOIyKKHG0AiJze3apQMCEHrrICz+7ihO5pbg8jer8NQlAzBnUKRLzxBVNWkw762diFTJ8fuDE8EwDMqrG6GEGiqlFKmhIiihRkVNtdd9z5grr66GEm2Zf2cLyzA0wrIcIqekAkqocdXgGHy0vRo6tRrFFVWICXbe2VBXyCuthBJqDAgNAKN3/EA+0of7nqxvqLN4D/UGI+ob6qAE910RKNUhyteA2mYdSqvUUAIYHCa2+b7LAFyc7If1p8qw/VQuhke67vO29mABSiqr0cdHivsmRHa5HoYrdVBCjYamesfWWaNZSauuGRB5x/nL4znFUEKNjMIyQJfY6W0zyhtxPKcEfiIGqZH+SC9pwE/7zuLRWd3reXg6vwRKqDEqKsRpn/8AMQs/kQYGI4u6ujqk51t+dt9ZfxQldWooocOYmLb9ajmAByZybQX4y6Yl+WLB8GD8fqwEy38+gN8fnODU7WWTRo/GxnooAfRVAf5iLWL8DKhu0mHbqVwoocaQsECbr83ISBkqqmtwrrAMPgzwzrUjkBpi+/M0NcEHP+1V40BGYZfvMdC2vUtQsTbvLzGQwXOz4nGiuB6PXpyC2hYt/jqchfyyCqe8j0VVzWB0LQiUiBAfgC7vc0SkFEqoUViuRnNTPXzlEmj0Bhw8Vwgl9Lg42U+4j/GxCqgkWtTWq/HuxuO4KDEEg6NVHt9/5Wn1RmhbG6EEECrXA7pm3DY2HFE+Rjz7+ylU1tRACWBUlJ/d2+sJcQp8DTWO5hQBun4OLc/m0+VY/P1Ri8uCfKWYmBTKJQfomjE03A/jY+VO/w6/ZXQozhWW4ed958BqmpFf3YxWvQHPXDIQEXYGvjqTW1IOJdQYFCayuewpwYxpvaoEdH3sus9j+TWoqatFmFyC6Um+wv2mxSowLIJrlv7z/rO4Y2LXn0NbyuvVqK2vgy8DDIuUevV+k8PsfC4M661hWydpaGiASqVC/UtAwPnZtJsQQggh5LxmFCtxeuYBAMDAzWMhMnh3thshhBDiLg1qQPUMUF9fj4CAjsuvveO0CSGEEEIIIYQQQghxiQum7Gt01dfY+9ylNkei2uvL3bl4Zf05pEb645dFF3WZurrynzP4em8+7p6cgEcudt0Y85747Wgxfj5cBBbWCWAKqRiPzErBwD5dd8PvSE5lEy59ZxekYgY6AwuGAXYsneaRsh+jkcWD3x+BVm/EG9cNh8qnreSgqkmDSa9sBQDseWK6MOrUGcrr1Zj2xjawLLDxocmIDbEsYbjrq4PYnVWN/141GFePjHHa4zrq5yNFeObXU8LvcwdH4n/zh9v1tzmVTXh4zTGh/veVa4fgimHRyK5owmXvcu//9sen4fqP96CoRo3nLh9od/mkJ1zz4W6cLmn06s/uwdxq3Pr5QQT5SrFr6XQ0qHVIW7kFAHDg6RnwbzdhxJ1atHpMemUrWrQGfH/POAyPbWug+ffJUjz243GolFJsemSyy5fzUF4NbvnsAGKCFdj08FTh8qMFNbjxkwOIDlLg84VjMPutnZBJRDj8zExhGl9XDuRWY+HnB+Evl2D/0zNcUv71wHdHsMXUd2HJ9H64f1o/vL8lC+9tzcJ1o6Px1CUDMeKFTQCcv+1yFpZlMfqlzWjRGvD+jSPwwHdHoZCKcPDpttc6s6IRV7y7G35yCQ48PQM/HCzEC3+exsTkUHxy62gPP4PO3fTJPhwpqMPr1w/DpUPsS203V1bfimmvb4dUzODYc7OEoQl/Hi/B0rUnMC4hGF/eMRYA8PGObLy5iZvUeWtaPJ68ZECn933rZ/txMK8WT1+SipvT+gLg3o8fDhbi9Q3n0KI1QCYWYe9T0+Ejc2yXsLxBjblv7USrzoA35w/HnMH29Zzhn9f4xGB8cftY+x/QaAQy87n/31/uFWVf5fVqTH19m/B7++2duW/25uG//5xFvzBf/LGYK+F8d0smPtiajTF9g/D1neMceuyC6mZuuyUW4eAzM51Wqg0ACz/fjwO5tXj5miF4/vd0aPRG/LNkIkL95Zj15g7Utehwz+QEPGzn9+Mja45h3akyPDorBXdN6l65hi3vbcnE+1uzcc3IaLxk6i/04dYsvGPWFPbvJRM7nMJnNLIWQ0o6s2p7Nt7anInpA8Lx/o0jO73t+BX/or5Vh98fnGBz2pYtD353BP+eqcCTc1Nx60V97fqbjjz8wzGsTy/DY7NScKedr/dnO3Pw+sYMXDwwAsuvGIQpr22FzsB2+RwqGzWY8cY26Awsfl6U1qNjBnsUVDdj3vt7LCZDMgxw+JmLoZSJse1cOe7/5igGRvnj5/snOO1x39x0Dh/vyMWVw6Pw8jVDu7y9Rm/AJW/vREmd2unrfXeZbzP2Pz2jR6V6rVoDRr20CSzLHdd1NGHwyvd2IaO8ya7viBf+TMf3BwoxPFaF7+4eb3Of6u3NGfhoew5Gxwdh9V2ObTMB4IaP9+JYYT1WXj0E80ZEO/z3Xq2hAXim6z66F0zwJ0gViJCg4K5v2Il5Y1OwYnMhjpbpcLxcLzRI64hWpEQrFNCLfACp4xNA3GHe2BTMG+u6XgChwTK0QoFW0zZ6ZGwgQnv4PnSXCMAHt022eV1okC9iwkORWdGE/UUazBlse+etO35NL0MLq8CYvkGIjQyzuj4kMBitaEZho8ij68mGc41ohQJzB0difXoZfjlVhxsnajC6b+fv1x/HS/DEzyfQojVAKlZCZ2CxclMhLh6ahHXnStAKBcYmhSEoMAg3TRiIZX+exqo9Zbg+LVUYU+1NjEYW6ZUGqKHAmSqj1352t+YUohUKzEqOgkjuh0A5EBCgQnmDBhk1LEbFe265N6UXo1orRVywCsMSoi1GHs4ZnoQ3thYhp7IZL27IxyvXDHVpz5zsumq0QoE+oWEW72VEiAitUCC/gcGpSgNaoUBSRAAkCvt21AFgeKISOtEpVGhYFDeLnDqpg3esVIdWcDXLx8v1gNQXRc0MWqFASGAIFD4B8PNXobJRg6ImEYICvW99rWnSoForBSBFWmo8GFkGarUG5NQDKRHc8p6tqkcrFBgQEQhG5ofU2Ei0IgdHSrRgJT5ePXnjbLURrVAgPiKsW9uL4ECl8D1Zr5cJAbyCRtP7HBQs3G9cRDhawTXvTImJ7PLxJg/qix15rVif0YSbJ/uirF6NpT+fwI6MSgDce9JqAPYXajAt1bGDtvd25qJGJ8Xw2DDMHp5o92jTgIBAtEKB0haxY6+Xec8fqa9XBH9OVzcLn08AyK0Hhifafk5fHqpCKxS4/qJUMDIuGHHt+P54c1sxduS14ly10a7mvLxj5dxnJqWPCjKl/X9nDz+/QLSiFXsL1KjTy+AvlyA+MhwiEYPnrx6LVTuycfW4/na/f0nREWg9VYdTFXqnfqdm1rJohQIxZp+9mIgwtIKboCWTiBAXEQZ0ENB3ZA1KS43Dys2F2J7TAp1ICWkH99mg1qGsVQxAjKiwUEBq36FWYp9w/HWmAemVhh6/Ricr9dx3WnSE3fc1uG8UWlGAA0Ua/HWmHg0GbnJfSkznB+xhwb6YPKgv/jpRim8OV2PFVa4d4rKnoBo1OilCfH1xyZA++O1YMRrVemTXsRgc7YuKVilaoYC/f6BT17XRybF4e0cpduS22PWd9MPBPGTXAeH+Ktw0cSDgBT2RYiN8hP3Eo6U6pCV1f9JydkU9WlhuknJoUFCH2/8RSTE4Xp6HXfktmDOi4/ejoLoFXx+qgh4KLJkzQthGtrdgwgC8u7MUO/NbcapCj8HR9n9vafQGHC7RQQsFhiVGe+3+fbdJ7ZtE6/lvTjcZGNXzL8ZAH5lwVu+7/fld3t5bp325k79cAoW0bTWb6YJmys4yLpELcuzP7V43f1tYlsXPpjGeHWX1RAVaTh3yBLXOgJ2ZVQCAB6b1w4Ix3DS4F/463ekEtMKaFjy85hhatAakJYZgy6NTER2oRGm9Gp/uzMG6U2UAgEuGcDsP142OhUopRV51CzadLnfxs+qe4rpWqHXcQUa2g+Mp3UVnMOKvE6UAgMnJbQHF/pHcF/k5G+NiKxrUuPh/2/HMbyddvnz8mM4rh0dZ7SCJRQyeu2wgRAzw46EivL+152M7O2M+BthcuL8cIgbQGVhhgktqpGM7QjKJSJhm4awpLeZqm7UWI9zTS7nJPvyYaH60Kj95sbiu+1ODXIkfgx4RIIdSJsbAPvzkxbYpVO0n0wzoEwCxiEF1sxblDa4ZU+sM9S06VJumVnVnzDvArUdBpkzUCrOJX/ykr+jAtuBCotnEukHRXa+vMwZw37n7cqrx7f58zHpzO3ZkVEIuEeHZywZivmny5/YMx8Y2l9S1Ys1BLgi1dHZ/h4JzwaZR73U9nNjiDdpva/OqbH8GKxs1yKpoAsMAV5qdbe6jUuJi037R0p9PQGcw2vx7W46bJjMNs2NctaOCfLn1kR/nPThaJWTIzBkciV8XTUB8iP3re6rpc+3MUeaA5aQvnvn/+4X52Z3J2ZXBUSoE+UjRpNHjn5OlVtdr9UaU1auxJ4vblwrykcJPbv85duH7u4ffJRq9QXhdHAkmDolRQSxiUNagxqe7cgEAV9mZGXHTOG6a1m9Hi9Ho5MnJ7fHr0FUjovHivMEYYPo+yazgLq9sMk36cnKFwej4YEjFDErq1Sio6fy7tkWrF0aSL56RDKXM84EfgBtcNDaBm1zW07H1/LYvJcKv0+1/mmlS2t5sy2OrBrXOYmrXW5szoDeymJQcKvyNLREBClxiOha/d/VhLP8zHdtNUxMLa6x/+MmoADftWWswIthXhr4h3j1IwpUumODPoA7GoTqKL1X583hpl6PhjabojxcmN7gNwzAWqYAXD/De4M/4RG5jsz+nZxtEc+klDcisaIJMIhI2Vu3x4967+jJxpT3ZVWjVGRClUmBQVAAendUffnIJThTV4/fjxR3+3baMShiMLIbHBuKbu8YhNtgH/zc3FQDw3tYspJc0QCxicPFALvjjK5cII023nq1w/RPrBn4HAgDyq1ug1du/M+4uPxwsREFNC0L9ZBZptPwO9jkb497/tykDmRVN+G5/AaqbXHcwXdOsNWUVAFcOt73jOLV/OJZdMQgA8PrGDPx6tMhly5Nj2gk2P2gGAIlYhEjTFIAtpnWR34l0RH8XHdQAwGnT+PkQUyZIYU0r6lt1KDMFfyJNwZ+YIC74U+Sl494La7ltW6wpM2pQlCn4U9y2np4VdiS511MhFaNfWNvId2+VW82tXxEBcvg6cKDXnq1x721j3pXCZX1DfBHmL0dkgEIIPHYmIdQX/cL9oDeyePrXU2hQ6zEsRoW/l0zCnRMTMLU/FzzeYecIa94H27KgNRgxNiG40x11W/jAQk2LVhgVfaKoDrd9cQDvbclEeYO6sz/3KmdNn9FAU/Aur9r2tJUzptslhPgioF2p67OXDUSAQoLjhXV4feM5ux/7RFEdAGBoTKCDS921YF9ufSwzvRdDY3q2D81vJ7Mrm5z2ncqyLHJtBPfjzQ7qHAl+dEUkYnCdKVi6dO0J4eDZaGTx3pZMDFm2AeNX/ov7vjkCAIh1cEph/8i2EwmdnXTrSnZFMwxGFgEKifAdZw8fmUTY/uZXt0DEtI3a7sr4xGAkhfmiRWvAb6aTP65y1rR/w7+3KRH868adrOO3oaEdlCF1l1ImFio+2gcyzBmNLB5fewJVTRrEBfsIAXZvMTaBO9F9IK9nJ7r5EzZdlTWOTwgBw3Dj3itM25MGtQ4X/287Rv93E5Z8fxS/HS3Gr8e4Yw1+pHtnHpjWDyqlFMV1rfhidx4Wfn4Ak1/bikmvWv+MX/Ev8kz7gUdMI95HxAZ6dTaxq10wwZ+BTgr+jOkbhH7hfmjVGfDb0Y4PioG2zB97U6HPV+H+3JdPfIgP+oV3vbPqKfwG8UxZg0U0ursMRhaf7swBAFw8MAIqpe3eJvxZu/25NSiqdU8A6KdDhfhsV66w473pNHfwO3NgBBiGQaifHPdPTQIAfLA1Gx0NBdxlOmCYOSBcKOG6fGgfjIgLhMa0gzc+MRjBZn1I+DKyEx44oNMbjHjyl5N4+teTFgdZ5jLL27J9DEYW+R3szNtiNLKoadYio7xR+JJztmaNHm9v5np+LJmRbHHA2T/CdiAio7wRPx7iztIbWeDfM64LvP19shR6I4vB0QGdft5vTeuLeydzNfBL157A/hznZdyZs3VmmNfHdFDNZ5YM6OP4gUJ/IeDm/OAPnxkzpm+wkN1zprTBOvPH24M/Ndxy8UGqQaY07VO2Mn/MdiT5zBbz23kCy7LYdq4Cb2w8Z3VW+6TpADwxtGffbULwp6ltu1FSx/3fPPgjk4iw6eHJ2PDw5A7LTtrjM0skIgaPXpyCn++/SPhsXtQvFGIRg5zKZru/f4rNsn4enpni8E40/32g1RvRouXS1D/ekYNt5yrx+sYMXPTyFtz11SGLrLf2Np8ux/cHCrB6Xz7+PF6CnZmVbvv+NMdva/kTW3nVtpeBD+QOiLIOMMcE+eDVa7keIqu259jMwipvUOPXo0XIMWWj6g1GnDIFT4f1MDBjS0i73mFDevgY0YFK+Msl0BlY5FQ5J6P2tQ3n0KjRQyEVIc4s4OOvkCLUj1t+e/vt2Gvp7P6YOSACGr0Rd351ELsyq7DwiwN4fWMGNHojJCLuhGdqpD/unZzk0H33DfGFTCJCi9bQo225eRalo5/N4bFt7/Ok5DBh/70rDMMI2T/f7svvcJ+xp1iWFb5r+ZM1yeHce5xpet5VppNbrugtyp8k3tfB/grLsnjhr9P4+0QppGIGL189xKm9uJxhnOlY53B+bY8CsWfK7Av+qHykQrbvXtPr9t3+ApQ3aKAzsPjjeAkeWnMMLMv1GrUnmN0/0h87/28aPrxpJOaPjkWUSgGlVGz1IxUzaNLo8fK6swCAowV1AICR8c5r7dEbXTA9fwZ244yuLQzD4MaxcXjhr9P4ak8e5o+JhVzCpfMZjSye+vUkDuTWYO39F1Hmj0lEALcBnjkgwqsjreH+CiSG+SKnshkH8mqEHebuqG3W4j9rjgkZEDd20tw4JcIfE/qFYHdWNb7ak4enLx3Y7ce1R15VMx5fewIAVwr0f3NSsfkMV4I10ywz65a0eHy4LRuZFU3YllGJaf3DLe5HbzBij+nsx0Sz0iOGYfDMpQNxzYd7AABzBltmPPFnEDPKG6HWGXrUcM5RPxwsxPcHCgBwTUf/b24qbhgTZ9HwMbPCcsc0u7IJyV18ual1Bjzw7RFsz6iE3nTGzkcmxp+LJ9p1dt4Rn+3KFc4oLRhjuV4JgYjyRrAsK3zeXll3FkYWUEhFUOuM2JBehuvHuOZs1O+moPiVw7pOF/+/OakoqmvF3ydK8cxvp7D+oclO7QNlHryzGfxRWe7Ydud7ItWFwZ/TJdzB3aCoABhZFsV1rTicX4v6Vi4A0Zb5wx34dHXA0KTR45E1xzApJQy3jI93+vJ2hD8o58+E85k/p0sawLIsWnUGIfMxxexM/eAoFX45Uiwc5DoLy7L4+UgxKhs1CFBKEKCQIsRPhtggH0SqFBZBleOFdXh53Vlhp7VVa8Azl7Vto/kSxxkDLLePjuJLFLrK/AG4EnRH3DMpETKxCLMGRVhlQauUUgyPDcTh/FrsyKjCjeO6bsT/wdYs6Awsxic6nvUDAEqpGDKJCFq9ETXNWvjKJUJwJDHUFzlVzdh8phxqnQHfmDX0LK9vC4wt/v4oNAbLA0ypmMFfiyc5NdujM1q9USgNnjM4Ej8dLhLOMLfHf5Y72sbMGdwHt4yPx+p9+XhkzTHcPqEvGIZBi1aPXZlVOF7EBUBD/eTY8NAkVDZp0KozwFcm7rCZcU+0bxw/NDqwR/fHMAxS+/jjYF4tzpY2Olxi296q7dn4YFs2AOD5ywcJ++G8gVEq7Mio7LI3p6MkYhHeu3EEbjE1Ur/5s/0AuO/WF68cjGtGxtjdQNrWffcL88Pp0gacK2+0CGg5on0WpSOGxwbi+wNcYPfqkY41w71mZAxe3XAWZ8sacTi/tst+kd1R2ahBbYsOIgZCADvZlPnD77vxwZ+OGhD3RFpiCN7dkoW9OdUW+1i8D7dn48s9eQCAN64fjov6hTp9GXoqOdwPwb4y1DRrcbK4HqO6EQipbtJgbzZX3ji6b9d/n5YYgvSSBuzLqcacwZH43FRWuHh6P5TWq/HH8RKIGQaPzrJ/wEqAQoq5Q/pgbidDFs6VNWLu2zuwPr0M+3OqcaTAlPkTF2j345yPLpjgT7ATJ6BcMzIG72zJRHZlM5797ZTQsPS9rVn4wXQ2bEdGpTA/i4H3Bjzc4c6JiWAYBvdM9nyn+66MTwxBTmUz9uVUdyv4U9GgxpGCOrz412kU17VCIRVhxVVDMKGLL4C7JiZid1Y1fjhQiCUzkl06AYkPfgDAqh05KG9Qo7JRAz+5ROh7BHAb1gVjYvHprlx8siPHKvhzorgejWo9VEophrRruDYqPggPTEvCgdwaXDHMMm24j0qBUD8Zqpq0OF3agJFx7onAN2n0eGtzBgBup6CyUYOnfz2F9afK8MVtY8ymDnE7EP4KCRrVemRVdH2W8r0tWfjXrIxNJubO3j31y0l8f/f4bu8MtlfdpMHHO7hsssdm97c6o9Qv3A9iEYO6Fh0qGjWICFBgX041/j1bAbGIwVvzh+O+b45gZ1YVmjR6h/oR2KOwpgWH8mvBMMDlw7pOFxeJGKy4agh2ZlQis6IJvx8rdurEu6LaFugMLOQSEaJUSqvro80OqvuoFA4fVAOW5Qw6g9HubAx7pPPBn+gAGFgWG0+X419ToNZPLhG2EzFCz5/Ogz/rTpZy93G2AqPjg7pV5tYdhaagFF/2lRzuD6mYQYNaj6LaVtS2aMGyXLaB+dlaPlB8OL8GeoPRab07tp2rxGM/Hbd5nYgBApRSMABYtPWlETFc1tzPR4rw2Oz+UEjFKKjm1neRnet7Z9qXfTWodWg09SqIDrRedx0R5CvDwxd3PNhhcnKYKfhT2WXwp6i2RcgifHhm94ZFMAyDYB8ZyhrUqG3RItRPLgRNfrhnPMoa1Ljy/d3YlVWFguoW4SD4x0MFmGHazx8ao4LKRwYRw6C+VYfcqmZUNGrw4bYsvLVgRLeWy1E5VU3QGVj4yyVCEKy+VYe6Fq3VtoQPbg20kfnDe/rSATiYV4OzZY14fWOGxXUMA/jKJKhq0uCpX09ieir3fTw4WuWSwQnmmT+BPlLEBvdsHQS4nmoH82pxpqwB89D9KTvfHyjAStOZ/P+bk2pzcujr1w7FmbJGjE90fgBCIRXj01vHYP7He3G2rBH9wv3wwU0jnZJl1D/Snwv+lDV0+wQkn/mT2o0gKB+w8VdIMGugfdP7eCofKS4fGoWfDhfh1fXn8M1d45ye9cJnm/QN9RVOHPKZPwU1LWjVGtrKvvycP/lyZHwQZGIRyhs0SH12vVVhB98v8tnLBlrt+3oLhmEwpm8QNqSX40BuTbeCPz8fKYLOwGJYjMquQG5aUgg+3ZWLvdnV+P1oCSoaNYgMUGDx9GTIJCI8e+lAaAwGuzPN7NU/0h8Lxsbhu/0FeOKXkyitV0PEAMNcUCrbm3hXLlovofKR4p0FI4SGpd/sy8fm0+V4c3Pbl/WJonoh7fFCz/wZFR+E928ciQgHao89hU+H7Krpc32LDpe/uwujXtyESa9uwZy3dmD0S5sxdsW/uO+bwyiua0XfEB/8umiCXQezU1LCkBTmi0aNHj8eck7/k8zyRjz643GsP9XWmFCjNwg77vyOBV+fPSUlzOrs2e0TEyAWMdiTXW3Vd2NnBhf1vygpxObO5+OzU/HTfRdZlbsxDCOkdZ4scl85x8fbs1HVpEXfEB/sXDoNz18+EAqpCDszq3DU1DiTZVlkmXac+CyoroI/WRVNWLWDOwP51vzhyHhpLv59dAqUUjH259Zgjen1doZ3t2ShSaPH4OgAXGbjbIdCKhaa2J0ta0R9q07YSb5xbBxmD4pE3xAfaPVGbD/nWI8Pe/xxnFuXxieECFkpXVEppbjPVGL45uYMp/ZYyjEr+bIVgDPP/OnOjjLAHZj7mcoZcjs4698drVqDkFUwKEolZGzw66r569vW86fzspd9pn5mBiOL534/5bLU/PaKTFk9/HLKJCIhaHbDJ/vwfz9zTcjbHzwNjw1EoI8UtS06HDbV6jvDt/u5APjQGBVmDYzAuIRgJIZyJRdGlgv41LboUNeiA8NwJ3y2Pz4NfVQK1LbosCGda2T/m6lHwYR+oT3+fmsf/Ck2BcyCfWUubxY6OYU7ObE7uwr6ThoOG40slv2RDp2BxUVJIRiX6HjWD4/PLKlp1uJceSOMLHewFuYvx9CYQEw0nTD54SD3Xmn1Rvx4uK3Ufs29afh04Rh8fOtorLk3DZ/fNgYA8OeJUhS6qX8en+3XP9IfPjKJkOXcvvSrVWsQyrUGdRJwVUjF+OTW0bg1LR4LxsRiwZhY3DA2Dq9cMwT7n5qBH+4ZD6mYwYb0cqGZrCuaPQOWJ0yHRKuckrWdaiqrPVva/SzJk0X1ePpXbntx75REoTy9vfAABaakhLks21zlI8Wae9Lw/o0j8YcD49y70tMecmqdAcdM3xHdWaakMD98eutofHPnuG5td+6fmgQ/uQQH8mrw5C8ne/QdU96gxuq9eUKmK9DWz3CAWcAh1E+GIB8pWJY7CVPVxDXgd3bDZ4D7jPJ9FjV6I9Q6yx+G4bJZ7pyY4PTHdia+6XN3BtywLCskOizopKrB3JiEYIgYbtvIHyvfMbGvEBxU+UidHvjhPTwzBX5yibB/lhoZ0KP+fOeDC/vZ98DklDD835xUrFx3Fsv/PA2FVAyWbUtZPlVcL5QZOOusP3E9vp73dEkD6lt1Hfbp+Wx3Lk6agiHmLWFEDPflmZYUgkdn9e/w79sTiRjcOTERT/16El/szsXCtPhun+XW6o34cFs23t/KNeT880QJ1v/HH4lhflh/qgy1LTr0USnw4U0j8b9NGULq9MyB1mUL0YFKXDa0D34/VoJPdubgbbMzqruyuODBJLOSL3sNiVZhy9kKnHBT8Ke8QY1PdnJppv83JxUKqRi3T0jAkYI6U7+IKozpG4zSejWatQZIRAxmDojAr0eLkV3Z8QE9y7J49rdT0BlYTOsfJky3ig32waOzUvDS32ew4p8zmJEajvAeHhxuPVeBr/bmCc+ho+1KamQAsiubcf83h4V+Gr4yMZbMSAbDMJg9KBKrduRgQ3oZLh3acbqso1iWxe+mg+F5Ixw743X7RQn4YnceCmta8cPBAtya1tcpy8Q3A+1oClMfs4yK7mbBMAyDlAg/HCmow9myRqcdBJwtaxAOiMP95UKpFL8vbR644nv+NKr1nW63zHf0DubV4ucjxbh2lPMyrWwxGlkUmTKSzBugzh3cB6eKG0ylatz1g9tNr5KIRZieGo5fjhRj4+nyHgUbeGX1amw5y2VP/e/6YegX3vZ+GY0sqpo1aDA72FApZUJgZv6YWLy1ORPf7S/AFcOi8KupxNHeiTid4Xd8+Uk1fAlfVKDrT5oMjeGCbHUtOhwrrOuwXOONTeew+UwFZBIRnrpkQI8eM9jU9Lm2RSv0sBrQJ0A4WL9xbBx2Zlbhp8NFePjiFGxIL0NVB33aAC4DZlJyKHZmVuGTnTl44crBPVo+e5wxBTH4oEbfEF+UN2iQV9VsUW7UPrjVmdhgnw6XPdxfgYcvTsGr688J60dPGzF3xDz446zH6GmJLMuyWLnuDIwsN0H0iTmpTlmu7lL5SJ36HQq0BX+6Oz3yp8NFqGnWIjpQ2e2+Jj2ZypsY5of3bxqJO748iJ+PFCExzBcPTOvn8P3szqrCku+PorpZi4KaFqEdAh84NC/tZBgGyeH+OJBXI+y7A64p+wKAtxcMxxNzU4XWHuaUUjFCXBB0cjb+RPehvFoYjKxD2YMHcmuQU9kMH5nY7ozXAIUUQ2ICcbywDqX1avjLJTYz9lwhzF+OB6b1wyvruROhI+MD3fK43owyf3rgnsmJuHxYFPRGFk0aPcb2Dca7N3IHx6dK6oXeH6T3iAhQICHUF0a24zGI9a06fLGbCyS8cOUg/LLoIqy+cyx+XXQR0pfPwaZHpuCFKwfbHfjhXT0yGkE+UhTVtmJjN8eg51Y14/J3d3EZFAYjVEoptHojnvj5JIxGVjjjvWBMHCRiER6f3R9LZiRjRmo4Zg+yneJ79ySuXO+vE6VCWUmTRi80TpuU7HhNM78zebK4zuG/7Y7/bcxAq86AUfFBFtOxJvbjDiZ3m0az8iVffUN9hR367Momi8kb5meyfj9Wgr051ZBLRHjhysEWZxlvn5CAoTEqNKr1WPZneo+WP7uyCUu+PwqWBRaMie004Man8PKBnyiVAq9cO1TYEZplep+3nq1wapbN2bJGZJQ3QSYWWfV56opSJsaS6dwO4jv/ZqFFq+/iL+zDNxZtP+mLZ14K1pMSKGFEr40pa93Fl3wNjOLOunNlaW3bFPMpLj4yiXCwVtxB35/CmhYU1bZCImKE13rlP2cszqq6QmWTBlq9EWIRYxGwemBaP+x7cga+u2scXpo3GI/P7o/7plifxedLDzadLndKptKPhwphZIGxfYMtAj8AF4QP91egX7i/8GN+ADF/TCxEDNec/5cjxcitaoZSKu5w2+mI9pk/fIBqUB/XHNybE4sYoTR5Rwcj338/Voz3t3InCl65ZggGR/dsuYJMZVG1zTphEpb5Z3DGgAiE+slQ2ajBlrMVWL0vv8v75LNA1hwsFPp+uBL/eefLHvqaRp+3n/jF9/sxD251172TkzDa7KDeVeULQT7mmT/OeQw+MF7WoEZts9bhv9+RWYU92dWQiUV4cu4Ar+4h2V18w/ucymaHv5/1BiNWbec+o3dPSnBqCbIjpqSECdM8X9twDg+vOYYnfzlh9bP8z3R8tD0bvx4twu6sKpwqrkdBdQve35qFWz7bj2rTOrI+vUzY9vMZUe0zdfm+P3xvNqmYcXgf3F4MwyAqUImYIB+rn94Q+AG4bZG/XIImjV7Y/tqLz/q5YliUQ60D0sxO3tw4Ps6l7S3au31CX6F8ms96upBR5k8PMAyDV64ZgroWLepadHj/ppFcirZUjBazlH3RefgFdT6blByK3KpmvLr+LCb0C4GPzPJj8tWePDSq9UgO98PN4+KdltmlkIpxy/h4vLMlC4u+PSJcLhOLEKCUQqWUICbIB7MGRWD2oEirSQYF1S244eN9KGtQI8RXhuevGIQRsYGY/dYOHMirwQt/ncaB3BqIRQzmm5r9MgyDRzrpBQFwZ1T5htQvrzuLt+cPx77sauiNLOJDfBweZwpA6BGUVdGEZo2+yxTM+hYdGtQ6xAQpHd7hyyhvxE+HuS+rpy5Jtfh7/oDnWGEdGtU6YVpEcrgf4oJ9IBExaNEaUNagRlSgEqeK63HDx/ugNRgR6CNFQysXpFgyI9nqdRCLGLx89VBc/t4u/HOyDHuyq3BRkuOBsga1Dnd/fQiNaj1Gxwd1eUb75vHxSAj1RYBSiuQIP6uxwiNiAxHuL0dFowZ7sqswtV0vp1PF9fjnZCk0eiP0BiMCfWS47aK+Vg1A2+NLYKalhnVrp2v+mDh8vDMHhTWt+HJPHhZNdfxsYXttk75sN0Q1z6roSfCnu2e0d2VW4ZX1Z/H85QOtsi3SzZo9A9xndVBUAHZncTu37ZtVRwcqUdOsRVFti82+IvtNweyhMSo8OD0Z/5wqQ1ZFE+5dfQgj4oKgUkrRP8IfU1LCnJqtypfg9FEprLIZI1UKRKoUnTbFnJwSCrlEhIKaFpwr71mjWIORFaZU3TDO8YbnfVRKTE+NwOYz5Xjmt1MAgNmDIpySQm4e/FHrDPjrBFeue6WDWXTdNSU5DH+fKMWfJ0qtno9Wb8R7W7kyo3unJOKqET3PFuODlbUtWrPgT9sBnUwiwjWjYrBqew5e33AOmRVN8JF2fjCblhiCYTEqHC+qx1d78hxqHtod7Q9E40O574D2TZ9Pl3IZrp31+7GXWMTgf9cPx7wPdiPcXy6UUjqbTCJCbLAS5fUap50p91dwvYMKa1pxtqzRoWbhRiMrTOy5JS2+W/sdvUEflULoN5hd2eTQ99KfJ0pQVNuKEF8Z5o9xT1ZFR24ZH4/cymZ8vjtXyJB01NUjo/H3iVJhfUkO9xPK8Nt/DySbmj/zI9hDfOXnZXDQWcQiBqP7BmHruUrsy6m2O5hf36LDPye57yZ7S754aUkh+Gh7NqRiBndMcG9ZnEIqxtd3jsWuzCpc2kmD6AsFBX96yEcmweo7x1l0fR8UFYBD+bVCgz+q+updlsxIxvpTZcisaMIzv53CG9cNE97bRrUOn/Fd6mckO72k75a0vvjuQIFQswwAWoMRVU0aVDVpkF3ZjO0ZlXj2t1MYnxiC+WNiMWdwJCobNbjhEy7wkxzuh+/uHi8cTCyd3R/L/jwtTCCYOSDc7n4svP/MSMG+nH3483gJQnxlwlmY7mT9AFw9fmSAAmUNapwubcCYTqZCtGoNuOSdnSiua0VMkBKTksNwyZBIu8vNXt9wDkYWmDMoEqPiLR8nJsgHfUN8kFfdgv05NcKORXK4H6RiEfqG+iKroglZFU2IClTi8925QhNWfjx4crifkB3V3sCoANw4Ng6r9+Xj0525Dgd/WJbFQz8cQ05lM1eqd/OoLhsoyiQiTEvtePKQSMTg4oER+HZ/ATakl1sEf9afKsWSH45ZnXHcnVWF7+4e3+FjG40s/jT1jpo3vHslMDKJCA/NSMGjPx3H13vyce/kpB43Mu2q7CvYV4Zp/cOg0Rs7vI09utOnge+7k1PVjMd+Oo4ND0+26Ll12jTefJDZAeOgKJUQ/Ils18A6JcIfJ4vrsS2jUsjuMsePph2fGAKZRIQXrhyEGz/Zj305NUIvIP7xHp2Vgmn9w4Xtnq2pJvbiy1O6e5DqI5NgUnIoNp+pwMb0cruDPyzLQq0zwsCywtnJnZmVKK5rhUopxVwHs9N4N42Lw+Yz5WjVcZl185xQ8gW09afgewo1qvXoo1JgvJvOUk5O4banuVXNQp+w9qanhmPpbOeU2vCZJdXNWqGUo/2B7oIxcVi1PUfIyLRVmmyOYRjcPzUJ931zBF/tyUNssA8iAxSIC/ZB3x58vgEuCL96bz6OFdbhgWn9kBDiK5Sr8RPqEoTMH8ueP11N+nJUXIgPdiydBplY5NID3O/vHo9Gtd6pvThSIwNMB/MNSEsKwaG8GhzKr8XCtL6d9pj543gJzpQ2wF8u6VYZUW/BMAxSI01T0coa7A7+GI0sPjSV8N8xMcHlfcLs8fSlAzAsVtVhD64mjQHlDWqU1atR1aRBg1qH+lYd5BIxnrokFfPHxKGhVY/NZ8qxMb0cEhEDrcEIX5nY6vvEPKsMcF3J1/lkQr9QbD1XiR8OFuL2CQl27W/9erQIGr0RqZH+GOZgOejEfqG4d3Ii+kf6e6QHbFKYn9On7/ZWFPxxEvMv4CExKhzKrxUOoCj43LuE+snx7g0jcMMn+/DLkWKMTwgRxmJ/vTcf9a06JIX5uiR6HOYvx54nZqBR3VaKodYbUd+iQ12rFieKuKyME0X12JNdjT3Z1Qj2lUEuEaG0Xo2EUF98e9c4iy++W9P64s8TpULD1JvGOT7ieWxCMF67dige+fE4vtyTB6mYW6kn9nO83w9vSIwKZafVOFFU32nw59v9+UK5WVFtK74/UIDvDxTgtWuH4rrRnZ+9P1pQi42nyyFigMdm285wmtAvFHnVBdiVVSUEf/qZdiSSwtqCP6P7BmH9Ka7R66pbRiE6UImGVh0GRgV0GpC5Y2ICvtmfjy1nK5Bd2eTQl89fJ0qx5WwF5BIRPr5ltNN2aGYPisS3+wvwx7FihPvLcdO4OGxIL8Nzf6SDZbkm3kNjAiERMfhqbx4O5dfi+T9OYcVVQ2webBzMq0GJqY67s8BTVy4b1gcv/n0aZQ1q7MqqwpSU7q9fLVo9SkwHZ4kdHPgxDIMvbh/b7cfg8Wf+i2pb7Z6i9vfJUqEhdV51Cz7blStkO+kNRiGQZD6a2/zgsX3mz7WjYvDzkSL8frQYT10ywGoZ+OAP3zfnoqRQfHrraJwsrkeDWofaZi02nS5HekkD7vjyEGKClNAbWNS36qCUibHsikHdml7C7/jzk766Y9bASGw+U4FNp8uxZEZyp7f9em8e3tyUgbpWndAfaUK/ENw/pR++M5W9Xj0yWpgU46jJKWGIDlSiuK4VoX5yoTFxT6mUUkjFDHQGFp+a+pNdOTzabT0DI1UKrrFwB+XO0YFK3Dul5wFZXpCphPFkUT0aNXrIxCKrbWNCqC/SEkOEUo4bxsQB6s5LomcNjERimC9yKpuxdO0J4fJb0+Kx7PJBNl9Ptc6AT3fmIMesv5uPXIzoQB9EBymRUdaIr/ZyGb8A8O+ZcmEgQHSgUsiujDcFf/LNyr4MRtbss+y86XrOntRoS0wPPrMdGRDpj02ny3G2tBH/ninHfd8chs7AYvu5Snx222irTGuAe39e33gOAHDf1CSnTu/1RkNjAnEwrxZvbsrE1JTwLrNuAeDfsxXIKG+Cn1yCm8c7vo/nCmIRgyu7cTLI/GTDrIFcpuWmM2VC+XZKpL/V5zi5Xa89V0z6Ot9cPyYW7/ybiayKJqw7VYrLhnb+/a7VG4Xy2xvGxjkceBaLGDzZw15xxDko+OMC7cdeU9lX7zMukWvY/NqGc3j291NIL6kHwzBCacvi6ckuGa8KcBkQ7euG+VrVi5JCcd+UJBTWtOCXI8X4/kCBcKYjLtgH3909zqqxsEjE4JVrhuKq93cjNtin2wcrV4+MQbNGj2d/56a9iBg4lLbd3tBoFTadLsfJoroOb9OqNeCj7dxo82WXD0R8iC9+PVqMP46X4Lnf0zEiLtCqb4e51zZwO4zXjIzp8HYT+4Xi2/1c8Ifvt8GnEPcL98OG9HJkVzZhQ3oZWrQGxIf4YNbACLu/+BJCfTHDVCryxe5cvDRviF1/p9YZhDT3RVP7YYgTG3uOTwzBsFiu+d7b/2bi/a1ZQo+yG8fF4YUrBv1/e3ce19SV9w/8cxNIEAJBtgCyi6IgAi4oaF1Ra9WW6WaXx+LSzs8O2vL4zEzr65lqtxnt+JupVm3tTMdiZ6rV1qodraIjaqu17lhXqlQRUYKIEEA2SZ4/AleWAAGykPh5v155vSDc25zYk5Nzv/ec71fcojM4pCdmpx/DhqN5iPRXYoaBiWVDxbiHB/h2+qIaAOQOUjwW4491h3Px5fG8LgV/rhbpgw49nR2Nmjx3hbuzDCo3OdSaavysLsOgoLYTbWq1Oqyur9YTG+iOrLwSrNx7Gb+K6wU/ZQ8cuVKM6ntaKOQOCG60vaHxxWPz1XvDwzzEi95tWflNgrwN+X6kEqFJvpCkSFWT5J7FFTX4+LscrPvhqrhiBwAqa+vwyoZTKCqrxuwOVjHJq69A1pVtGuP7+0AiAGfyS3GjpBL+rZQ+33nmJhZta5lf69Dl2+KKKQBdSjQplQhISQzGn769iOlDA0xWfl4iEeClkONmaZVYTMAUiaQ7YvrQIIttF2n4TJ6tX+EWXr/asrnnhwfh8C+3EaFyRXyoBy5caDv4I5EI+OCZOPzzcC5uaqpQqKlCtroMnx3ORXnVPfz5yYFN/p/lFd/Fbz4/Kf6btyXcR4He3i7IOKcW8/I13qoWUr/t687dWpTerYXS2RG5tytwt6YOTo6SVrefPkga8qPtvViILafyUVun/945/MttpKw9ik9nxTcJbO3PLsSb35zD9TuVULnJLb5dxBpSx4Yj41wBrhXfRer6k1g3O77N/D1qTRX+ukdfQWlGQrDZct1YSuO5VcPYfzZfg30XCwEYrszppZCJSev1v3PlT3vcnBwxe2Qolv/nElbuvYxHBvi1ebNhZeYl5NyqgIeLzGQrXsk6GPwxg+bBH+47tU0vj+6NY1eLsT/7FtYdvp9sMtTLxegM9+YS6OGMV5P6IHVsb+zLvoXjV4uRkhgCP6Xhi6JwHwUOvjYOMgdJl+4kz0gIgabqHpZlZCOht2eXJhkNwYyf2ph0f34kF0Xl1Qjo2QPPDw+Go1SCUX29UVxRg4OXizBv/SlsTR1hMOBw6PL95JCvJrW+WiChtycE4X5Jd4lwf5tQeH0Q6HJhOa7Vr2D4VVyvDn+m54wMxX8uqLH5RD5+OzEC7s7tByP+cfAK8ksq4ad0wq9HGd5W1lkyBwm+mpuAjHMFSD+kX9kDAAsm9MX8ceFN3t/YCP1Wj/d2XcRb35xD5gU1pBIBgiBAIuiD299f0ifMNsWE4KkhgVh3OBe7z6vFC6jOuFLU9pYvU+urcoVaU43sgvaDP7vPq5GtLoOr3AHrZsVjzrpjOJ57B+/uuIDYAHf8OUMf9Evs7dnk8xrmrYCXQo67NfcQ1CyYIggCnosPwrs7LmD9kWt4rtGducb5ftrKT+PhIsPCyf3x0kNhyC4og6uTA5Q9HPHpoatI/+Eq3t5+HoVl1Xjt4QijPwN5xV3b9gUAngo5Bgf3xLGrd7DnvBopiSEtjjlzvRT/vSkLAJCSEIzUceFwkTmguKIG/zh4BV8cu4aqWi2GhvTsckW2lx4KQ3yoJwaYcCUHoF/52bjyVYSBixx70bB6o2F1VmvbW6ZE+8HhvySI8jc+WfKAXkq89+RA8fdtWflYsOk0vj6Vj4qae5g/Tv99kHv7Lv536xmU3K2Fu7MjXhwZKq7i1FTeQ35JJfLvVMJBKuCFhGBMjPSFRCIg41wB/nfLWRSVVzdJuOwscxDzqV29XYEYZ3dx+3+Er5vZbhjZkoZCCg0JuR+J9sXsEaGYlX4Mx67ewXN//xGj6rd0Z6vLsKc+yObjKsf702O7xXYmc/NwkeGTlCF4/MMf8EPObfxxxwUxgXJj9+q0WHc4F+/v+Rnl1ffgIpPaXXCs8di/7bT+JpOhrb+CIKBvfcUvgNu+jDUrMRT/+P4KstVl2H2+oNViHafzSsTKwO8md7ygDXUvDP6YQZi3As4yqVhth1/3tkkiEbDquUHYeCxPrEwhEYBpMf7dZhLnIJVgQqQKE4wozdnZi+jmUseGY1QfbwR6dC3RZEOQ9JdbFdBU1bZITFxVW4ePv/tFfM2GO19SiYC/To/BIyu+x8WCMrz173OYN65lcOfP9at+nhsW1ObydXdnGaJ7KcWy88GeLmIwqWEbwrkbGrEC1eOdSHY6PMwDkX5uOH9Tg/VHr7WbzLiwrAof1idY/f3DEWaZ8DpKJZg60B9TB/rj/A0NKmvvtciJ1GDu6DCcv6nBv0/fwL5sw9WA/JROGG6CUtxR/m7o5+uKiwVl+OanGwZXGrVH33f0ExVLXUD383XF95eK2k36rNPpsGrfJQBASmIIlM6OePPRKExbdRA7frqJHfWJfh+O8sXSJ5quEpNKBGxNTUTNPa3BIM4TgwLw54xsnLuhwenrpWK56cb5fozhpZDDK/z+5HnxtEh4u8qxLCMbaw7k4HReCd5Jjmpz1R2gL6veUPigqwlaJ0b64tjVO9h0PA+hXi6I7qWEu7MjyqvvIa+4Ei9+dgxVtVqM7uuNN6ZGiqs7XOQOePPRKMwfF469FwvFC8uuEAShSSlvU/FudLf6cTu/s9qzWQC8tWTIgiCIFRq12s5VJ3wsthd6OEoxb/0pZJxTI+Nc09VDAwOU+PD5QUZvc5oU5Yv4EA8cvFyE8f2bbnMN8XK5H/wJdDd5vh9bF+LpIs6PH4n2xYpn4uAolWD9i8MxY+0R/HS9VPwuBvRj3szEEKQl9bFodSBr6+frhr8+HYu5/zqB9B+uYve5ghbBz8raOhTXz01jA93xx18NsMugR8PYX1e/Orm17/RwlUIM/nDlj3GUzo6YOSIEKzMvY8Xey2KAu7Gq2jr8z5enUafV4dEYfzzChMk2j8EfM5BK9FVZjl3V303vJnEC6gSF3AFzOrjN4UFgii1Ingq5mDvjbH4pEnt7oai8Gjod4O7siPVHruFWWTV6uffAE4OaBlx8XJ2wfHocZqw9gg1H87DhaJ7B13CWSTFvXPvJIUeEe4kTzobVPsD94E95fZLnIcE9EeTZ8YtYQRAwZ2Qo/ufL01j3w1WM76dqMxfYmv05qKipQ0yAEo/FmP8isL0qNIIg4P2nY/BojD/uVNRAq9NBqwO0Oh109T+PCPcySVBUEAQ8OTgA7+64gK+O53U4+KPT6bDw6zP46Xopejo7mqRqmDEatjMculzUZlnqmyWVOJuvQQ9HqbiFakAvJZ6LD8LnR67ByVGCxdOi8MzQQIMrHdq6QO3pIsOUaD9sOZWP9UdyxQDFkSsdC/40JwgCUseGw8dVjj9sPYvDv9zG5BXfY87IMIPbSGvrtPj3TzfwTdYN3NPqt4i2lnfJWBOjVPjjtxdw7oYGL6w9CkBfCbGm7n5AoI+PAiufizO4FctTIcfT7eQIs7aGCzdBAB6Nte7qUnNrnrel8fYpc5gY5YtPZw3FO9vP487dhps5+sDS65P7NUm2boyeLjKDK4BDPJ1x9EqxuO20YeWPKSp92QOpRMCyJ2Pwy61yzB3TW7ypEx2gxOaXE7HpeB6qa/WfabmDBMlxvbpUidGWPTzAF7+bpE8/0JC/rjl3Z0e89nA/TB8SaLH8YJY2IVI/9jcwtO0LAPo2mrt52WEQzFzmjAzF2oNXcOGmBu/sON8in2BWXgkuF5bD21WOtx9ruQKNbA+DP2YS3ctdDP5w2xeRYQMDlMgvqcS72y9AU1XbJM9Iw8cmdWy4wYTKI/t44Y0pkVj+n59Rfa/lHWEHiYDfToow6g7QyHAvsVJGn0YTCBe5A/yVTuLE6/FBnS9xPC3GH0t3XYRaU41Jy78z6pw3pkZ2mwldwyozS0iO64WlOy/i9PVS/Kwu69A2nU++15eWlUoErH5+kMVKAjdcvF4qLMcb9WXA2/Jfw4OaXAC/MTUSkf5uSAjzRFgXKlI8PyxIzIs1Z2QYiitqkFfcMt9PZzw1JBDDwzzx5jfnsPdiIdYcyMGaAzltnhMf6oFXxvVpkceso4I9XfDBs3FinrCrt++KgR8XmRT9/Nzw/tOxLVYQ2pKGPE4jentZpRqKJbVY+WOBC/wR4V7YlTbKrK/RUFXs9PUSnLp2B2fzufKnuSkDDa8c6O2twMLJTAjbWOrYcEwb6I+SyhqDfw/zVlgk+bc1hXi5oK9KgZ/V5fB1c2p123zjpM/eXPljNHdnGVISQ/Dh/hx8euhqq8e990S0USkLqPuziRFj9erVWLZsGQoKChATE4OVK1ciPr7rFVrMKTrg/hd9N7l2I+p2Bga4Y+fZAvHuaEPAR6fTP8K8XfDk4NYDLrNHhnY4Aa0hg4N7Qu4gQfU9Lfqoml549/ZR4EZpFWRSSZcqvMkcJHj9YX3unIbkyq0RoK9INKSNKmj2zEshx9h+PthzXo01+3Pwm7G9Eezp0mbSy0vqMvz79A2sqt8ut2hqJBJ7m6YSkzEi/dzw6vg+7W77AvR3a5tvVXRylHaqEl9zg4N7ihPlxkHG9vL9GCvQwxn/mDkUe86r8bfvcsQKSM31Vbli9shQk26PejTGX6w4VlpZi7KqWni6yO0mD8gzQ4Nw/U4l/p+Jc3x1Rz1kUvRwlKKytg5+ytYv6GxNSH3Fr8yLhcisT1ArCK2vViBqT5CnM4JgmZsY3dWESBV+Vpe3uUKw8dzN29U+xhNL+c3YcFTf04qrIpsbEuyBcf0sc/OPzK/bB382btyIBQsWYM2aNRg2bBiWL1+OSZMmITs7Gz4+nS8pbG7RvdzFn7nyh8iw54YF4fqdu/B0kWFoqAfignqih6MUmspa3LlbA1+lU5tl1E3FyVGKZ+ODsOtsAUY028bSx0efy2V8f58u5016YnAAnmgjmEX3PTU4AHvOq/H1qXx8fSofDhIBKjcnKHs4wq2HA3o4SsWxNa/4Li7VJ+wGgOlDAvFCgmXL3QqCgP+e0Neir9laO14d3xe/++o0AKCHoxSuTg749UOmDSgYm2vMXJQ9HO0u6aSv0gn//6kYazfDYno6O6KytM6utvWM7OOF+FAP3Cy9v4p18gA/kwReiR5Uv36oN0orazF9SOvVCL0VckT5u+F2eY3R+btITyF3wBtTI63dDLIQQafTtX0L2sqGDRuGoUOHYtWqVQD0Cf8CAwMxf/58vP766+2er9FooFQqUVpaCjc3y00wtFodot/MQEVNHf74qwEmuaNLRJZ3s7QSH+3Pwa9HhXFCYUF1Wh2W7ryAI1eKcbmwXEyg3xpHqYCH+nhj6kA/PBbbq9skZSciw6Z88D3O3dAgdWxv/G5Sv3aP12q1OH/+PAAgMjISEon5bwwQke24V6fFPa3OYAVYIntnbMyjW9+KqKmpwYkTJ7Bw4ULxOYlEgqSkJBw+fNjgOdXV1aiurhZ/Ly3VJ3HVaDTmbawBfT0ccCK3DLV3K6zy+kTUdS4C8NuxQQDu8XNsYfMfCsD8hwKg1epQoKnCrbIqlFXXoayyFpW194NBCrkDhoV5iitBKsrb33pFRNbVy1mHM9V30d/T0aixVavVorxcv8JPo9Ew+ENEBtVUtn8Mkb1p+B5tb11Ptw7+FBUVoa6uDipV06XlKpUKFy9eNHjOkiVL8NZbb7V4PjDQelU+Zi0HZlnt1YmIiIi6p2nLrd0CIiIi+1BWVgalsvWqzN06+NMZCxcuxIIFC8TfS0pKEBwcjGvXrrX5D0FkKhqNBoGBgcjLy7PoVkN6cLHPkTWw35Glsc+RpbHPkaWxz1Fn6HQ6lJWVwd/fv83junXwx8vLC1KpFGq1usnzarUavr6+Bs+Ry+WQy1uW+FMqlfwAkUW5ubmxz5FFsc+RNbDfkaWxz5Glsc+RpbHPUUcZs9ClW2+YlslkGDx4MPbu3Ss+p9VqsXfvXiQkJFixZUREREREREREtqFbr/wBgAULFiAlJQVDhgxBfHw8li9fjoqKCsyaxSw6RERERERERETt6fbBn+nTp+PWrVtYtGgRCgoKEBsbi127drVIAt0auVyOxYsXG9wKRmQO7HNkaexzZA3sd2Rp7HNkaexzZGnsc2ROgq69emBERERERERERGSzunXOHyIiIiIiIiIi6hoGf4iIiIiIiIiI7BiDP0REREREREREdozBHyIiIiIiIiIiO2YXwZ/Vq1cjJCQETk5OGDZsGI4ePdrm8V9++SX69esHJycnREdH49tvv7VQS8ledKTPpaenQxCEJg8nJycLtpZs3XfffYdp06bB398fgiBg69at7Z6zf/9+DBo0CHK5HOHh4UhPTzd7O8l+dLTP7d+/v8U4JwgCCgoKLNNgsnlLlizB0KFD4erqCh8fHyQnJyM7O7vd8zino87qTJ/jnI666qOPPsLAgQPh5uYGNzc3JCQkYOfOnW2ew3GOTMXmgz8bN27EggULsHjxYpw8eRIxMTGYNGkSCgsLDR7/ww8/4Nlnn8WcOXNw6tQpJCcnIzk5GWfPnrVwy8lWdbTPAYCbmxtu3rwpPnJzcy3YYrJ1FRUViImJwerVq406/sqVK5gyZQrGjh2LrKwspKWl4cUXX0RGRoaZW0r2oqN9rkF2dnaTsc7Hx8dMLSR7c+DAAaSmpuLHH3/Enj17UFtbi4kTJ6KioqLVczino67oTJ8DOKejrgkICMDSpUtx4sQJHD9+HOPGjcNjjz2Gc+fOGTye4xyZks2Xeh82bBiGDh2KVatWAQC0Wi0CAwMxf/58vP766y2Onz59OioqKrB9+3bxueHDhyM2NhZr1qyxWLvJdnW0z6WnpyMtLQ0lJSUWbinZI0EQsGXLFiQnJ7d6zGuvvYYdO3Y0mRg888wzKCkpwa5duyzQSrInxvS5/fv3Y+zYsbhz5w7c3d0t1jayX7du3YKPjw8OHDiAUaNGGTyGczoyJWP6HOd0ZA4eHh5YtmwZ5syZ0+JvHOfIlGx65U9NTQ1OnDiBpKQk8TmJRIKkpCQcPnzY4DmHDx9ucjwATJo0qdXjiRrrTJ8DgPLycgQHByMwMLDN6D6RKXCcI2uJjY2Fn58fJkyYgEOHDlm7OWTDSktLAegvilrDsY5MyZg+B3BOR6ZTV1eHL774AhUVFUhISDB4DMc5MiWbDv4UFRWhrq4OKpWqyfMqlarVPAMFBQUdOp6osc70uYiICKxduxbbtm3Dv/71L2i1WiQmJuL69euWaDI9gFob5zQaDSorK63UKrJnfn5+WLNmDTZv3ozNmzcjMDAQY8aMwcmTJ63dNLJBWq0WaWlpGDFiBAYMGNDqcZzTkakY2+c4pyNTOHPmDBQKBeRyOebOnYstW7YgMjLS4LEc58iUHKzdACJ7l5CQ0CSan5iYiP79++Pjjz/GO++8Y8WWERGZRkREBCIiIsTfExMTkZOTg/fffx///Oc/rdgyskWpqak4e/YsDh48aO2m0APC2D7HOR2ZQkREBLKyslBaWoqvvvoKKSkpOHDgQKsBICJTsemVP15eXpBKpVCr1U2eV6vV8PX1NXiOr69vh44naqwzfa45R0dHxMXF4fLly+ZoIlGr45ybmxt69OhhpVbRgyY+Pp7jHHXYvHnzsH37duzbtw8BAQFtHss5HZlCR/pcc5zTUWfIZDKEh4dj8ODBWLJkCWJiYrBixQqDx3KcI1Oy6eCPTCbD4MGDsXfvXvE5rVaLvXv3trpvMiEhocnxALBnz55WjydqrDN9rrm6ujqcOXMGfn5+5momPeA4zlF3kJWVxXGOjKbT6TBv3jxs2bIFmZmZCA0NbfccjnXUFZ3pc81xTkemoNVqUV1dbfBvHOfIlGx+29eCBQuQkpKCIUOGID4+HsuXL0dFRQVmzZoFAHjhhRfQq1cvLFmyBADw6quvYvTo0fjLX/6CKVOm4IsvvsDx48fxt7/9zZpvg2xIR/vc22+/jeHDhyM8PBwlJSVYtmwZcnNz8eKLL1rzbZANKS8vb3JX8cqVK8jKyoKHhweCgoKwcOFC5Ofn47PPPgMAzJ07F6tWrcLvf/97zJ49G5mZmdi0aRN27NhhrbdANqajfW758uUIDQ1FVFQUqqqq8MknnyAzMxO7d++21lsgG5Oamor169dj27ZtcHV1FfNZKJVKccUi53RkSp3pc5zTUVctXLgQkydPRlBQEMrKyrB+/Xrs378fGRkZADjOkZnp7MDKlSt1QUFBOplMpouPj9f9+OOP4t9Gjx6tS0lJaXL8pk2bdH379tXJZDJdVFSUbseOHRZuMdm6jvS5tLQ08ViVSqV75JFHdCdPnrRCq8lW7du3TwegxaOhn6WkpOhGjx7d4pzY2FidTCbThYWF6T799FOLt5tsV0f73Hvvvafr3bu3zsnJSefh4aEbM2aMLjMz0zqNJ5tkqL8BaDJ2cU5HptSZPsc5HXXV7NmzdcHBwTqZTKbz9vbWjR8/Xrd7927x7xznyJwEnU6ns2SwiYiIiIiIiIiILMemc/4QEREREREREVHbGPwhIiIiIiIiIrJjDP4QEREREREREdkxBn+IiIiIiIiIiOwYgz9ERERERERERHaMwR8iIiIiIiIiIjvG4A8RERERERERkR1j8IeIiIiIiIiIyI4x+ENEREQPvJkzZyI5Odnir5ueng5BECAIAtLS0ow6Z+bMmeI5W7duNWv7iIiIyD44WLsBREREROYkCEKbf1+8eDFWrFgBnU5noRY15ebmhuzsbLi4uBh1/IoVK7B06VL4+fmZuWVERERkLxj8ISIiIrt28+ZN8eeNGzdi0aJFyM7OFp9TKBRQKBTWaBoAfXDK19fX6OOVSiWUSqUZW0RERET2htu+iIiIyK75+vqKD6VSKQZbGh4KhaLFtq8xY8Zg/vz5SEtLQ8+ePaFSqfD3v/8dFRUVmDVrFlxdXREeHo6dO3c2ea2zZ89i8uTJUCgUUKlUmDFjBoqKijrc5g8//BB9+vSBk5MTVCoVnnzyya7+MxAREdEDjMEfIiIiIgPWrVsHLy8vHD16FPPnz8fLL7+Mp556ComJiTh58iQmTpyIGTNm4O7duwCAkpISjBs3DnFxcTh+/Dh27doFtVqNp59+ukOve/z4cbzyyit4++23kZ2djV27dmHUqFHmeItERET0gOC2LyIiIiIDYmJi8Ic//AEAsHDhQixduhReXl546aWXAACLFi3CRx99hJ9++gnDhw/HqlWrEBcXhz/96U/if2Pt2rUIDAzEzz//jL59+xr1uteuXYOLiwumTp0KV1dXBAcHIy4uzvRvkIiIiB4YXPlDREREZMDAgQPFn6VSKTw9PREdHS0+p1KpAACFhYUAgNOnT2Pfvn1iDiGFQoF+/foBAHJycox+3QkTJiA4OBhhYWGYMWMGPv/8c3F1EREREVFnMPhDREREZICjo2OT3wVBaPJcQxUxrVYLACgvL8e0adOQlZXV5HHp0qUObdtydXXFyZMnsWHDBvj5+WHRokWIiYlBSUlJ198UERERPZC47YuIiIjIBAYNGoTNmzcjJCQEDg5dm2I5ODggKSkJSUlJWLx4Mdzd3ZGZmYnHH3/cRK0lIiKiBwlX/hARERGZQGpqKoqLi/Hss8/i2LFjyMnJQUZGBmbNmoW6ujqj/zvbt2/HBx98gKysLOTm5uKzzz6DVqtFRESEGVtPRERE9ozBHyIiIiIT8Pf3x6FDh1BXV4eJEyciOjoaaWlpcHd3h0Ri/JTL3d0dX3/9NcaNG4f+/ftjzZo12LBhA6KioszYeiIiIrJngk6n01m7EUREREQPovT0dKSlpXUqn48gCNiyZQuSk5NN3i4iIiKyL1z5Q0RERGRFpaWlUCgUeO2114w6fu7cuVAoFGZuFREREdkTrvwhIiIispKysjKo1WoA+u1eXl5e7Z5TWFgIjUYDAPDz84OLi4tZ20hERES2j8EfIiIiIiIiIiI7xm1fRERERERERER2jMEfIiIiIiIiIiI7xuAPEREREREREZEdY/CHiIiIiIiIiMiOMfhDRERERERERGTHGPwhIiIiIiIiIrJjDP4QEREREREREdkxBn+IiIiIiIiIiOzY/wGmHxFe9EDvUgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# PRAAT uses jitter to determine if its voiced\n", + "plt.plot(xs, shimmer[0] * 100)\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Shimmer [%]\")\n", + "plt.title(\"Shimmer and average shimmer across full sample\")\n", + "plt.ylim((0, 15))\n", + "plt.axhline(y = shimmer[jitter < 0.02].mean() * 100, color=\"darkorange\")\n", + "plt.axvline(x = frame_i / 100, color=\"lightgrey\")\n", + "print(\"Average Shimmer: {0:.2f}%\".format(100 * shimmer[jitter < 0.02].mean().numpy()))" + ] + }, + { + "cell_type": "markdown", + "id": "ecb3e3e9-6944-48a2-87c9-58abfce0e05e", + "metadata": {}, + "source": [ + "## Compute GNE step-by-step\n", + "\n", + "An algorithm for GNE computation from the original paper:\n", + "\n", + "\"Glottal-to-Noise Excitation Ratio - a New Measure for Describing\n", + "Pathological Voices\" by D. Michaelis, T. Oramss, and H. W. Strube.\n", + "\n", + "This algorithm divides the signal into frequency bands, and compares\n", + "the correlation between the bands. High correlation indicates a\n", + "relatively low amount of noise in the signal, whereas lower correlation\n", + "could be a sign of pathology in the vocal signal.\n", + "\n", + "Godino-Llorente et al. in \"The Effectiveness of the Glottal to Noise\n", + "Excitation Ratio for the Screening of Voice Disorders\" explore the\n", + "goodness of the bandwidth and frequency shift parameters, and write out\n", + "a clear description of how to compute the measure, used here. The steps are as follows:\n", + "\n", + "1. Downsampling (with a previous low-pass filtering) the signal to a sampling frequency of 10 kHz.\n", + "1. Inverse filtering of the speech signal to obtain the glottal waveform by means of linear prediction techniques.\n", + "1. For each window, calculating the Hilbert envelope of the different frequency bands with fixed bandwidth (BW) and different center frequencies.\n", + "1. For each window, calculating the cross-correlation function for each pair of envelopes.\n", + "1. Pick the maximum of each correlation function for each pair of frequency bands as the GNE score." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "5c6200bc-51c5-48e7-a871-6b0dae93547a", + "metadata": {}, + "outputs": [], + "source": [ + "# Step 1. Downsample to 10 kHz since voice energy is low above 5 kHz\n", + "new_sample_rate = 10000\n", + "downsampled_audio = torchaudio.functional.resample(audio, 44100, new_sample_rate)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "051ef7ad-fd5c-4c0c-980a-a1f9f3f6a213", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABJgAAADZCAYAAABsH4pqAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAA0hZJREFUeJzsnXd4G1Xaxc+MumTJcu89vTcSEgIECCR0lhZY+oYWFliWXeoHLLCwsCwBlt5CJ5TQFhIIJCGF9F7de7ea1btmvj9GM5Ys2ZFcsJPc3/P4iSNLo7E85d5zz3teimVZFgQCgUAgEAgEAoFAIBAIBEIfoYd6BwgEAoFAIBAIBAKBQCAQCMc2RGAiEAgEAoFAIBAIBAKBQCD0CyIwEQgEAoFAIBAIBAKBQCAQ+gURmAgEAoFAIBAIBAKBQCAQCP2CCEwEAoFAIBAIBAKBQCAQCIR+QQQmAoFAIBAIBAKBQCAQCARCvyACE4FAIBAIBAKBQCAQCAQCoV8QgYlAIBAIBAKBQCAQCAQCgdAvxEO9A8cDDMOgtbUVarUaFEUN9e4QCAQCgUAgEAgEAoFAIAwILMvCZrMhOzsbNN2zT4kITANAa2sr8vLyhno3CAQCgUAgEAgEAoFAIBAGhaamJuTm5vb4cyIwDQBqtRoA92FrNJoh3hsCgUAgEAgEAoFAIBAIhIHBarUiLy9P0D56gghMAwBfFqfRaIjARCAQCAQCgUAgEAgEAuG442iRQCTkm0AgEAgEAoFAIBAIBAKB0C+IwEQgEAgEAoFAIBAIBAKBQOgXRGAiEAgEAoFAIBCGmHqDAy1m11DvBoFAIBAIfYZkMBEIBAKBQCAQCEOI2xfABa9shkIqws6HzzpqxgWBQCD0B5Zl4ff7EQgEhnpXCMMEkUgEsVjc7/sPEZgIBAKBQCAQCIQhxOjwwu7xw+7xw+NnIJeIhnqXCATCcYrX60VbWxucTudQ7wphmKFUKpGVlQWpVNrnbRCBiUAgEAgEAoFAGEJcXr/wvdsXIAITgUAYFBiGQV1dHUQiEbKzsyGVSoljkgCWZeH1eqHX61FXV4eRI0eCpvuWpkQEJgKBQCAQCAQCYQhxeLrKVNw+Zgj3hEAgHM94vV4wDIO8vDwolcqh3h3CMEKhUEAikaChoQFerxdyubxP2yEh3wQCgUAgEAgEwhDi9IYKTCQThUAgDC59dacQjm8G4rggRxaBQCAQCAQCgTCEuHwhJXJ+IjARCAQC4diECEwEAoFAIBAIBMIQQkrkCAQCgXA8QAQmAoFAIBAIBAJhCHGREjkCgUAYMh5//HFMmTJF+P+NN96ISy65pF/bdDqduOyyy6DRaEBRFMxmc7+2d6xABCYCgUAgEAgEAmEIcXbrIkcgEAiE6Gzbtg0ikQjnn3/+oL3Hf//7X3zwwQf92saHH36I3377DVu3bkVbWxsSExMHZueGOURgIhAIBAKBQCAQhhCHl5TIEQgEQiwsW7YMd911FzZt2oTW1tZBeY/ExERotdp+baOmpgZjx47FhAkTkJmZCYqiIp7j9Xr79R7DESIwEQgEAoFAIBAIQwgpkSMQCEMFy7JgGGZIvliWjWtf7XY7vvjiCyxZsgTnn39+hMvogw8+iBCGvvvuuwhx59lnn0VGRgbUajUWL14Mt9sd9vPuJXIejwd333030tPTIZfLMXfuXOzatavH/Zw3bx6WLl2KTZs2gaIozJs3DwBQWFiIf/7zn7j++uuh0Whw6623AgAeeOABjBo1CkqlEsXFxXj00Ufh8/mE7fElfO+99x7y8/ORkJCAO+64A4FAAM899xwyMzORnp6Op59+Omw/zGYzbr75ZqSlpUGj0eDMM8/EgQMHevuI+414ULdOIBAIBAKBQCAQesVJBCYCgTBEsCyL0tLSIXnvcePGRXX29MSXX36JMWPGYPTo0bj22mtxzz334KGHHop7G48//jhee+01zJ07Fx9//DFefvllFBcX9/ia+++/H19//TU+/PBDFBQU4LnnnsOCBQtQXV2N5OTkiOd/8803ePDBB3H48GF88803kEqlws+ef/55PPbYY/jHP/4hPKZWq/HBBx8gOzsbhw4dwi233AK1Wo37779feE5NTQ1++uknrF69GjU1Nbj88stRW1uLUaNGYePGjdi6dSv+9Kc/Yf78+Zg1axYA4IorroBCocBPP/2ExMREvPXWWzjrrLNQWVkZdb8HAuJgIhAIBAKBQCAQhhCSwUQgEAhHZ9myZbj22msBAAsXLoTFYsHGjRvj2sZLL72ExYsXY/HixRg9ejSeeuopjBs3rsfnOxwOvPHGG/jPf/6Dc889F+PGjcM777wDhUKBZcuWRX1NcnIylEolpFIpMjMzw8ScM888E3/7299QUlKCkpISAMAjjzyCOXPmoLCwEBdeeCH+/ve/48svvwzbJsMweO+99zBu3DhceOGFOOOMM1BRUYGXXnoJo0ePxk033YTRo0dj/fr1AIDNmzdj586dWLFiBWbMmIGRI0fi+eefh1arxVdffRXXZxYPxMFEIBAIBAIhLmxuHxQSEcSi/q1Ttbe3g2XZHrMJCIQThTAHk59kMBEIhN8PiqJ6FVgG+71jpaKiAjt37sS3334LABCLxVi0aBGWLVsmlKDFQllZGW6//fawx2bPni0IM92pqamBz+fDKaecIjwmkUgwc+ZMlJWVxfy+PDNmzIh47IsvvsDLL7+Mmpoa2O12+P1+aDSasOcUFhZCrVYL/8/IyIBIJAJN02GP6XQ6AMCBAwdgt9uRkpISth2Xy4Wampq49ztWiMBEIBAIBAIhZixOH+b++1dMzE3E8ltO7vN2/H4/DAYDAC5MU6lUDtQuEgjHHKREjkAgDBUURR0TizzLli2D3+9Hdna28BjLspDJZHj11VeRmJgImqYjcp1Cs4yGAyqVKuz/27ZtwzXXXIMnnngCCxYsQGJiIj7//HMsXbo07HkSiSTs/xRFRX2MYbhFCrvdjqysLGzYsCFiH/obYN4bpESOQCAQCARCzDSYHLB5/Njb2Bl3OGcooZ1TzGbzAOwZgXDsEl4iRxxMBAKBEIrf78dHH32EpUuXYv/+/cLXgQMHkJ2djc8++wwAkJaWBpvNBofDIbx2//79YdsaO3YsduzYEfbY9u3be3zvkpISSKVSbNmyRXjM5/Nh165dA+L82rp1KwoKCvB///d/QilbQ0NDv7c7bdo0tLe3QywWY8SIEWFfqamp/d5+TxAHE4FAIBAIhJjhu125fQyc3gBUsr4NJUIFJovFgqysrGNiBZVAGAyIg4lAIBB6ZuXKlejs7MTixYuRmJgY9rPLLrsMy5Ytw+23345Zs2ZBqVTi4Ycfxt13340dO3ZEdJr7y1/+ghtvvBEzZszAKaecgk8//RRHjhzpMeRbpVJhyZIluO+++5CcnIz8/Hw899xzcDqdWLx4cb9/t5EjR6KxsRGff/45TjrpJKxatUooA+wP8+fPx+zZs3HJJZfgueeew6hRo9Da2opVq1bhD3/4Q9RSvYGAOJgIBAKBQCDETGg+jNHu7eWZvRNqWQ8EArDb7f3aLwLhWMYVIjB5/ERgIhAIhFCWLVuG+fPnR4hLACcw7d69GwcPHkRycjI++eQT/Pjjj5g4cSI+++wzPP7442HPX7RoER599FHcf//9mD59OhoaGrBkyZJe3//ZZ5/FZZddhuuuuw7Tpk1DdXU1fv75ZyQlJfX7d7vooovw17/+FXfeeSemTJmCrVu34tFHH+33dimKwo8//ojTTjsNN910E0aNGoWrrroKDQ0NyMjI6Pf2e3xftj/+dgIAwGq1IjExERaLJSKMi0AgEAiE44nVh9tx+yd7AADf3DEH0/L7NrhqaWlBZ2cnKIoCy7LQarXIzc0dyF0lEI4Z5v77VzR3ugAAl0zJxktXTR3iPSIQCMcjbrcbdXV1KCoqglwuH+rdIQwzejs+YtU8iIOJQCAQCARCzISW7/THwcSXyPFBk1arVQimJBBONFxhJXLkPCAQCATCsQkRmAgEAoFAIMRMuMDk6fN2QgUmiUQChmFgs9n6vX8EwrFIWAYTKZEjEAgEwjEKEZgIBAKBQCDEjCtUYHL0zcHEsqyQwSSVSoVMBdJNjnAiwjBs2HlFQr4JBAKBcKxCBCYCgUAgEAgxEzoRNvTRwcSLSxRFQSwWC2VydrsdgQCZXBNOLFzdBCVSIkcgEAiEYxUiMBEIBAKBQIiZ0MlvXzOY+PI4iUQCiqIgl8shk8nAsiwsFsuA7CeBcKwQWh4HEAcTgUAgEI5diMBEIBAIBAIhZsIymBx9czDxApNUKhUe411MHR0dcLvdfd9BAuEYw+n1h/3f4++fg8loNKKtrQ2kUTSBQCAQfm+IwEQgEAgEAiFmBqKLXGj+Ek9ycjLkcjkCgQDq6+vh8fQ9QJxAOJbo7mByefvuYPJ4PGhra4PRaITdbu/vrhEIBAKBEBdEYCIQCAQCgRAzoZNfQz9L5EIFJpFIhMLCQsjlcvj9ftTV1QnPIxCOZyJK5PrRRc5gMAjfk3JTAoFAIPzeEIGJQCAQCARCzIQGEpscHjBM/GU4oRlMoYjFYhQWFkImkwkiEwn9Jhzv8CVyCTIxgL5nMPn9/rBOjFarFQxDAsMJBALhaHzwwQdCqf5gUFhYiJdeemnQtj+cIAITgUAgEAiEmAkN+WZYwOzyxb2NaA4mHl5kkkgk8Pl8xIVBOO7hHUxJKk5wdfuYPuUnGY1GsCwLhUIBiUQChmFImRyBQDhuuPHGG0FRVMTXwoUL+73tRYsWobKyUvj/448/jilTpsS9nZ6Eql27duHWW2/txx4eO4iHegcIBAKBQCAcO3R3VxjtHiSrIoWinmAYRnAlRROYAM7ZlJiYCIPBAJfL1fedJRCOAfiy02SVDE0m7nj3+BnIJaKYtxEIBGAymQAAaWlpcDqdMBgMMJvN0Gg0A7/TBAKBMAQsXLgQ77//fthjMpms39tVKBRQKBT93k5PpKWlDdq2hxvEwUQgEAgEAiFmXN0EpnhzmHj3Ek3TEIl6nkArlUoAgNPpjHMPhzcsy8JisaC9vR0NDQ2oqqpCRUUFcZqcwPAOppQQodbji6+0rbOzE4FAAFKpFGq1GomJiQAAm81GykwJBMJxg0wmQ2ZmZthXUlISNmzYAKlUit9++0147nPPPYf09HR0dHQAAMxmM2677TZkZGRALpdjwoQJWLlyJYBw59EHH3yAJ554AgcOHBBcUh988AEA4IUXXsDEiROhUqmQl5eHO+64Q7h/b9iwATfddBMsFovwuscffxxAZIlcY2MjLr74YiQkJECj0eDKK68U9hPoclB9/PHHKCwsRGJiIq666irYbLZB+mQHDuJgIhAIBAKBEDPdHUwmR98Epp7cSzy8wOTxeBAIBHoVo44lrFYrmpqaIh5vaGhAQUEBEhIShmCvCEMJn8GkloshoikEGBZufwCJkBzllRwsy8JoNAIAUlNTQVEU5HI5pFIpvF4vbDbboGaLEAiEYxuWZSMWj34vFBIRKIrq93bmzZuHe+65B9dddx0OHDiA2tpaPProo1ixYgUyMjLAMAzOPfdc2Gw2fPLJJygpKUFpaWnUscWiRYtw+PBhrF69GmvXrgUAQbSnaRovv/wyioqKUFtbizvuuAP3338/Xn/9dcyZMwcvvfQSHnvsMVRUVABA1Hs6wzCCuLRx40b4/X78+c9/xqJFi7BhwwbheTU1Nfjuu++wcuVKdHZ24sorr8Szzz6Lp59+ut+f12BCBCYCgUAgEAgxww9CU1RSGB1eGB2euF7v83GZTUcTmMRisZDD5HQ6oVar+7bDw4zOzk4AgEqlgkajgVQqFVrKE5HpxIR3MCmlYsjFNBzeQFxB3xaLBT6fDyKRSBCSKIpCYmIi9Ho9LBYLEZgIBEKPuHwBjHvs5yF579InF0ApjV2SWLlyZcQ98uGHH8bDDz+Mp556CmvWrMGtt96Kw4cP44YbbsBFF10EAFi7di127tyJsrIyjBo1CgBQXFwc9T0UCgUSEhIgFouRmZkZ9rN77rlH+L6wsBBPPfUUbr/9drz++uuQSqVITEwERVERrwtl3bp1OHToEOrq6pCXlwcA+OijjzB+/Hjs2rULJ510EgBOiPrggw+E8c91112HdevWEYGJQCAQCATC8YM7OBnOTVLA6PD2uUTuaAITwLmYLBbLcSMw+Xw+wUqfnZ0t5EaoVCo0NjYSkekEpUtgEkEuEcHhDcTlJuCPqaSkJNB0V/oFLzDZ7Xb4/X6IxWTYTyAQjm3OOOMMvPHGG2GPJScnA+DGFZ9++ikmTZqEgoICvPjii8Jz9u/fj9zcXEFc6itr167FM888g/LyclitVvj9frjdbjidTsF5fTTKysqQl5cniEsAMG7cOGi1WpSVlQkCU2FhYdjYJysrCzqdrl/7/3tA7jQEAoFAIBBixu3nsmFykhQ40GyB0R6fg4kXmCSSo5f/8ALT8RL0zXfEUygUYaGkNE0jPz9fEJmampowevToMLGAcPzCl8jxAhMQ3q3xaHg83DnYPaBWLpdDLpfD7XbDarUKkzACgUAIRSERofTJBUP23vGgUqkwYsSIHn++detWAIDJZILJZIJKpeLeZwACvOvr63HBBRdgyZIlePrpp5GcnIzNmzdj8eLF8Hq9MQtMsdJ9nERRFBgmvny+oYCMXAgEAoFAIMSMS3AwcQMp4yA7mAAu6LsvbduHG2azGQCilivxIpNEIkEgEDgmgjwJA0NoiZxMwg3NYy2RY1lWEJiidVLic0OMRiMJ+yYQCFGhKApKqXhIvgYif4mnpqYGf/3rX/HOO+9g1qxZuOGGGwRBZtKkSWhubkZlZWVM25JKpRHXzD179oBhGCxduhQnn3wyRo0ahdbW1qO+rjtjx45FU1NTWB5jaWkpzGYzxo0bF9P+DWeOOYHptddeQ2FhIeRyOWbNmoWdO3f2+vwVK1ZgzJgxkMvlmDhxIn788cewn994441Cyjv/tXDhwsH8FQgEAoFAOCZhWS58GOBK5ADElcHEsmzMGUwA58DgV+z4SfSxitvthtvtBtA16e8OTdPCz3i3E+H4xxVaIifmHUyxiUF+v1+YQEU7p5KSkiAWi+HxeNDY2HhMrH4TCARCT3g8HrS3t4d9GQwGBAIBXHvttViwYAFuuukmvP/++zh48CCWLl0KADj99NNx2mmn4bLLLsOaNWtQV1eHn376CatXr476PoWFhairq8P+/fthMBjg8XgwYsQI+Hw+vPLKK6itrcXHH3+MN998M+J1drsd69atg8FgiNoJd/78+Zg4cSKuueYa7N27Fzt37sT111+P008/HTNmzBj4D+135pgSmL744gvce++9+Mc//oG9e/di8uTJWLBgQY+1iFu3bsXVV1+NxYsXY9++fbjkkktwySWX4PDhw2HPW7hwIdra2oSvzz777Pf4dQgEAoFAOKbw+BnwRqIcbVBgisPBFAgEhAluLCVyFEUJtvZog7RjCd69pFare83CIe3lTzwcwRI5hVQEhTS+EjleeJVKpVFLKsViMQoKCkDTNBwOB5qbm48LNyCBQDgxWb16NbKyssK+5s6di6effhoNDQ146623AHB5RW+//TYeeeQRHDhwAADw9ddf46STTsLVV1+NcePG4f777+/xPnvZZZdh4cKFOOOMM5CWlobPPvsMkydPxgsvvIB///vfmDBhAj799FM888wzYa+bM2cObr/9dixatAhpaWl47rnnIrZNURT+97//ISkpCaeddhrmz5+P4uJifPHFFwP8aQ0NFNuHu4zZbMZXX32Fmpoa3HfffUhOTsbevXuRkZGBnJycwdhPAMCsWbNw0kkn4dVXXwXAJavn5eXhrrvuwoMPPhjx/EWLFsHhcGDlypXCYyeffDKmTJkiqI033ngjzGYzvvvuuz7vl9VqRWJiIiwWCzQaTZ+3QyAQCATCcMbs9GLKk2sAAKvvORULX/oNGrkYBx+PLbvB6XSitrYWYrEYY8aMiek1/OpkUlLSoI4xBhOWZVFZWQmfz4e8vLweHUz8c6uqquD1epGbm0u6f50AXPnWNuysM+G1P07D8p0N2FJtxH+vmoKLpxz9eDeZTGhtbYVarUZBQUGPz+MD5FmWRUpKCjIzMwe0NIVAIBwbuN1u1NXVoaioCHK5fKh3hzDM6O34iFXziNvBdPDgQYwaNQr//ve/8fzzzwsrct988w0eeuiheDcXM16vF3v27MH8+fOFx2iaxvz587Ft27aor9m2bVvY8wFgwYIFEc/fsGED0tPTMXr0aCxZsgRGo7HXffF4PLBarWFfBAKBQCAc7/CuComIQqaGG3hY3X54/bG5LeLJX+IJzWE6VnE6nfD5fKBp+qjd8Pj28gApkztR6E+JXG/5S6EkJCQIAq3RaBQ6zxEIBAKBMJDELTDde++9uPHGG1FVVRWmap133nnYtGnTgO5cKHxtZUZGRtjjGRkZaG9vj/qa9vb2oz5/4cKF+Oijj7Bu3Tr8+9//xsaNG3Huuef2akt/5plnkJiYKHyFthgkEAgEAuF4hW+dLheLoJFLIKY5B4TJEVuZXH8EJo/Hc8yUjPn9fhiNRhiNRphMJuj1egCARqOJqTMcLzDZ7fZj5ncm9J3QEjm+ixwvOh2N0BK5o6HVapGUlAQARGAiEAgEwqDQcwhAD+zatUuobQwlJyenR6FnOHPVVVcJ30+cOBGTJk1CSUkJNmzYgLPOOivqax566CHce++9wv+tVisRmQgEAoFw3MNPeuVSEWiaQrJKCp3NA4Pdg8zEo1vt4wn45hGLxZBIJPD5fHA6nUd1AA0HdDodTCZTxOOxlrvJ5XLIZDLBMc2LAoTjE/68UoV2kYvRFRirg4lHqVSis7MTLperD3tKIBAIBELvxO1gkslkUUvCKisrkZaWNiA7FY3U1FSIRCJ0dHSEPd7R0YHMzMyor8nMzIzr+QBQXFyM1NRUVFdX9/gcmUwGjUYT9kUgEAgEwvEO30FOEXRZpCRwk1pjDA4mlmXhcDgAIO7cB97FdKxMivnfU6VSQa1WQ6VSISUlBSqVKuZtkDK5EwdnUGAKdTDFUiLHMIwg2sYqMPGh+W63m4R9EwgEAmHAiVtguuiii/Dkk08KNzSKotDY2IgHHngAl1122YDvII9UKsX06dOxbt064TGGYbBu3TrMnj076mtmz54d9nwAWLNmTY/PB4Dm5mYYjUZkZWUNzI4TCAQCgXCc4OYdTEGXRWoC50Qy2j1Hfa3H44HX6wVFUXEJLcCxlcPk9/sFV0leXh4KCgpQVFSErKysuEKVQ8vk/H7/oOwrYXjgDJbIhWcwHd3BxB9nIpGo186EochkMlAUBYZhhJJVAoFAIBAGirgFpqVLl8JutyM9PR0ulwunn346RowYAbVajaeffnow9lHg3nvvxTvvvIMPP/wQZWVlWLJkCRwOB2666SYAwPXXXx8WNP6Xv/wFq1evxtKlS1FeXo7HH38cu3fvxp133gmAG7Tdd9992L59O+rr67Fu3TpcfPHFGDFiBBYsiK0jDoFAIBAIJwp8BpPgYFLxAtPRJ6o2mw0A5+oRiURxvW+owDTcXRe8y0oqlcY86Y+GTCYTnF6kmcjxiy/AwBfgjmmVVCyIt7E4mOItjwO4hWH+uDpWHIGxYrPZiGhGIMTIcL+XEoaGgTgu4h75JCYmYs2aNdi8eTMOHjwIu92OadOmRXRrGwwWLVoEvV6Pxx57DO3t7ZgyZQpWr14tBHk3NjaGhWfOmTMHy5cvxyOPPIKHH34YI0eOxHfffYcJEyYA4FZ8Dh48iA8//BBmsxnZ2dk455xz8M9//jOumzWBQCAQCCcCvKtC1q1EzuA4uoOJF0lCy8qPtFrAssCEnMReX8vfk/mSoHgynH5veJcVL4r1h8TERLjdbthsNiQnJ/d7e4ThhzMkzFshFQnircc/OAITwJXJuVwuuFyumHPBhjt2ux0NDQ2gaRpFRUVCKSCBQAhHIpEA4O5V5DwhdIcfw/DHSV/o89La3LlzMXfu3D6/cV+58847BQdSdzZs2BDx2BVXXIErrrgi6vMVCgV+/vnngdw9AoFAIBCOWyIcTAmxOZj8fr/gluBDur1+Ble9tR0My2LPo2cL2TPRoGlaCL32eDwnjMDEb8Ptdvd7W4ThCV8eJ6YpSMV0SAbT0UvkeLdOXwQm4PhyMBmNRgCcCF1XV0dEJgKhB0QiEbRaLXQ6HQDuPhNP+Tbh+IRlWTidTuh0Omi12rid5qHEJDC9/PLLMW/w7rvv7vPOEAgEAoFAGL64uwlMqapgyPdRMpj48ji5XC6sipmdXtg83OTa6vb1KjDxr/V4PHC73cO2kxzLssKkfSAEJr6Uyefzwe/396vkjjA84R1MSil3/A92iRwQGfR9rE8uvV5v2DXG7Xajvr4ehYWFRGQiEKLAN7ziRSYCgUer1fbaEC0WYhqpvPjii2H/1+v1cDqdgq3WbDZDqVQiPT2dCEwEAoFAIByn8JNefhIsOJiO0kUuWnmcxeUTvnd4AsBRNCO5XA6LxTKs3TxutxsMwwiOq/4iEokE55bL5Rq2whqh77gEgYkbkvPlp66jCEwsywoCU7yOvu5B38d6LITJZALA5bvl5+ejvr4eLpeLiEwEQg9QFIWsrCykp6cLjbsIBIlE0i/nEk9MAlNdXZ3w/fLly/H6669j2bJlGD16NACgoqICt9xyC2677bZ+7xCBQCAMFmVtVnyxqwl3nTlCyI4hEAix4wpppw50ZTD1ViLHMAzsdjsAhAkk4QLT0buk8W6e4SwwhZbHDZQrRKFQEIHpOIY/9rscTHyJXO8Ck8/nE9xH8QpMfNA3n8N0LAtMDMOgs7MTAJCSkgKRSITCwkIiMhEIMSASiQZEUCAQQom7i9yjjz6KV155RRCXAGD06NF48cUX8cgjjwzozhEIBMJA8saGGnywtR7f7W8d6l05JvF4PDCbzQgEjl66QTg+cft5B1N4FzmD3dNj5xG73Q6WZSGRSASRCADMzvgEJn4S7PF4wDBHz6cZCgYyf4nneMzLIXThDApJSllQYBLzJXK9H+Oh7qW+iJnHy3FltVoRCAQgFosFAZYXmRQKBQKBgCA2EQgEAmHwiVtgamtrg98fORAMBALo6OgYkJ0iEAiEwaDdwjkfOqzD1wExXGFZFvX19WhubkZ5eTmam5uFyTThxMHl5Sa98m4h3x4/A4c3uvDIZ6Oo1eqwiXCYg8l7dIFJIpEInWKHaytyIjAR4kUokZNwRQWxOpj6mr/Ec7wcV3y4d3Jyctj1hYhMBAKBMDTELTCdddZZuO2227B3717hsT179mDJkiWYP3/+gO4cgUAgDCSGYBCxwXb0luqEcMxms1Cnz7IszGYzamtrUVdXJ0x0CMc/3bvIKaViobQnWtA3y7JhAlMoERlMR4Ev6wGGZ5mcz+cTzpGBLMfhf2e/30+yMo5DePceX3bK/+vxx+Zg6q/AxAd9H4vwJX4AkJSUFPHzaCITceAe/9jcPqwt7YDHT/7WBMJQELfA9N577yEzMxMzZsyATCaDTCbDzJkzkZGRgXfffXcw9pFAIBAGBH1QWNIfpePV0eA7RR2rg/J4YVkWer0eAJCRkYHi4mIkJiaCoig4HA5UV1fDYDCcMJ/HiYynm8AEdLmYDFFymNxuN/x+P2iahkqlCvtZvBlMwPDOYeInunK5fEAzLUIDw4kD4/iDF21VQonc7+Ng6h70fSzAMAyMRiM6OjrQ2tqKlpYWAEBiYqLQnbI7vMgkkUgQCATgcDh+z10mDAEvr6vCzR/txld7mod6VwiEE5K4+92mpaXhxx9/RGVlJcrLywEAY8aMwahRowZ85wgEAmGgcPsCQkv0aBPheNDr9dDpdEhKSkJOTs5A7N6wxmKxwOv1QiQSITk5GSKRCEqlEl6vFy0tLXA4HGhvb4fFYkF+fn6PA33CsY+rWxc5AEhRydBkckV1MPFCkEKhEMrbeEIFJvtxIDANRnkcT2jQd2gnPsKxj5MPzhdK5PgMpsEVmI7FoO/W1laYzeaIx5OTk3t9nUgkgkajgdFohNVqJefQcU5ZG+earTcQMZFAGAriFph4Ro0aRUQlAoFwzKAPKYvT96NEzufzCW6ezs5OqNXq43qwGupe4jv08EilUhQWFqKzsxPt7e1wuVzQ6XQnhOh2otIlMHUdB6lBB5PRESnc8iVd0bpchQpMzh7ym7rDT4KHs8A0GN2qFAoFzGYzcTAdhzh76CLn6kVg8vv9QqlXvB3kQlEoFILApNVq+7yd3wO73S6IS0lJSRCLxaBpGnK5PMIdGQ21Wg2j0QibzSZ03yMcnzR1ctfi3rqbEgiEwSNugelPf/pTrz9/7733+rwzBAKBMFiElsWZHB4EGBYiOv4Bpk6nEwanLMuipaUFSqUSYnGf9fphjc1mg8fjAU3TSElJifg5RVFITk6GVCpFfX09LBYLsrKyItwqhOMDdxSBKUXFiT7RHEx86U00V1t/SuT4CfZwaa/MMIwg/gyWgwmAUJpLJsfHD7y4yneRk0m6usj19LfmBVaJRNKvc+BYCfpmGAatrVz31+TkZGRnZ8e9DaVSCZqmEQgE4HK5BuU8JQw9AYZFq5k7nqMtehAIhMEn7hlAZ2dn2JdOp8Ovv/6Kb775JqptlUAgEIYDoa4lhgU6nfEPPNxuNzo7OwEAhYWFkMvlCAQCaGlpOW7yhxiGgc/nQyAQAMMw0Ol0ACLdS91RqVQQi8VgGAZ2u/332l3C74wr2Do9NIMpuZcMJt7BdDSBKdYSOZFIJGxrOLmY+KBkkUjUL0dJT/DCWiAQIEHfxxlOX/QuckDPQd98jlB/RZJjJehbr9fD6/VCLBYjIyOjT9ugaRoJCQkAujpbEo4/Oqxu+ALcsWx0kAYkBMJQEPeS+7fffhvxGMMwWLJkCUpKSgZkpwgEAmGgMXRzVxjsHqQmxJc50d7eDgDQaDRQqVTIzc1FTU0NbDYbOjs7j5oDMdzx+XyoqamB3x8+2acoKqp7qftzEhMTYTQaYTabj+uywRMZN58XIw11MHGCSjTRdqBL5ABObPH5fHC73TGVxvwe8BN+lUo1KO4ivhTI7XbD7XYPiohFGBpcvINJGh7yDQAeHxMmOPGEHm/9ITTo2+fzDcvjyu12C2Xa2dnZ/XJsaTQaWK1WWK3WPgtVhOFNk8kpfG8iJXIEwpAwIDUMNE3j3nvvxYsvvjgQmyMQCIQBp3vuksEW38DDZrPBbreDoihhYCqXy4XvW1tbUVtbi46ODiHj4ViCL/frLi4BQGpqakwlgHyGh81mI62gj1Pc/siQ7yQlNyk1dStHYFl2wB1MwPAM+h4oR0lvHCvlTIT44MtD+RI5iYgCX73tjtJmPbQcs78CE0VRgqjEh4YPJ/j7EoAByTvkHUwej+eY6ZxHiI+mzq7ro8HhPebGYgTC8cCAhYZEW/UmEAiE4UKEwBQlL6YnWJYV3EvJyclh3XZSUlLgdDphtVrhdDqFoF+1Wo2CgoIB2PPfB5PJJAhoJSUlkMlkYBguAyTWfCm5XA6pVAqv1wur1YqkpKRB3mvC7w3vtpBHKZHrLjD5/X5hcN9dYGJZFhZn/BlMwPAL+mZZVjjvB9NRpVAo0NnZSQSm4ww+zJt3MFEUBYVEBIc3ELWTnNPpBMuykEgkA+I4kslk8Hg88Hg8UKvV/d7eQMIHkNM03afcpe6IxWIolUo4nU7YbLajOnMJxx6hDiavn4Hd44daTjrbEgi/J3ELTPfee2/Y/1mWRVtbG1atWoUbbrhhwHaMQCAQBpJoJXKx4nA4hKDrtLS0sJ9RFIW8vDx4vV44HA44nU5YLBbYbDY4nc5jIkjU7XYLAlpmZqbgEIm3FIGiKGi1Wuh0OlgsFiIwHYfwE96wDKagg6mzm8AU6l7qXjbm9jHwBrryZRxxlsgBnAthOAReu91uMAwjlLENFsMl6JthGGFBkd+PaH9jQmzw5aEKSdeQXC4ITJEZTHzG3UCVY/KC7XB0MFmtVgDcgk00F2RfUKvVRGA6jmnuDBfgTQ4vEZgIhN+ZuAWmffv2hf2fn3AtXbr0qB3mCAQCYajgHUw5WgVazK6wrnJHw2KxAAASExOjunkoioJMJoNMJhNymMxmMwwGA/Lz8wdg7wcPhmHQ1NQElmWhVqv7nSOVmJgInU4Hu90Ov99/3HbXO1HhJ7xhDqZgBpMxWI7AT3pjLY8D4ncwDafcmMHOX+Lhf28+6Hsofm+/34+qqqqIEli5XI6kpCRotdph09nvWIE/9lWyrs+NP7+iOZgGKn+J51gQmAYy00+tVqOjowMOh2NYdaIkDAxNnc6w/xvsXhSkDI+sPgLhRCHukf/69esHYz8IBAJhUOEFpbFZak5gssU2mGZZVhjkJiYmxvSalJQUmM1mWK1WeL3eIZ8A94Zer4fH44FIJEJOTk6/J8gymQwKhQIulwsWi4WsEB9HBBhWcB0poghMHj8DpzcAlYwbWvAZJ7EITM44BCZe0B0ugdfR8pc8/gD21HdiVnEKRPTAiE68Q4o/t7q7KX8POjs7BXGJoihB6HO73Whra0NHRweSkpKQkZEBmh6QmM/jnu4lcgAgC2acuboJTIFAYMDyl4T3GqYCE5+TRFGUkJ00EMhkMqGU2263x3xfJxwbNAdL5CQiCr4AG1G6TSAQBp+47/5nnnkmzGZzxONWqxVnnnnmQOwTgUAgDCgsywqh3mOzuJXQaC3Vo2G324VVzlgH9AqFQniuyWTqwx7/PrAsK+xfdnb2gLmN+AE77/wiHB+EuilCu8gppSLIxNxwInQwH0sHOV57iSfkGxg+Qd895S8t/aUSf3x3B77Y1TSg78c7DI1G4+8eXsuyLDo7OwEAOTk5GD9+PMaNG4exY8ciMzMTUqkUDMPAaDSivr6eBP3HSNQSOXF0BxN/rA1U/hLQJTAFAoFhlaXKL+yoVKoBdRlRFCVkTfHvQTg+8PoZtFm5ewI/1jPG4VbvjsPhQHNzs3DeEQiE2IhbYNqwYUPUzgtutxu//fbbgOwUgUAgDCQOb0BYCRYEphgdTKHlcfG4e1JTUwFwAtNwnWiFimcDWYLAC0yhoeeEY59QNwUvKAHchI13MUUTmKI5mMxO7nkZGk4ocngDcQkm/OR6qDtBeTweBAIB0DQtZCQBwM9HuEyz3Q0DKzDzZbp+v/93F3CdTie8Xi9omg5zfYhEIqSmpmLkyJHIz88HTdNwOp2ora0d8r/PsYAzaokcd351z2Di3XLdHT0efwA7ao1gmPhFR5qmhXN0OLmYQvOXBprQRRASmn/80GZxgWW582dUBnfcGPvgYGIYBm1tbairq4PZbEZ9ff2QL2YQCMcSMQtMBw8exMGDBwEApaWlwv8PHjyIffv2YdmyZcjJyRm0HSUQCIS+wpfDKaUi5CdzZSyxhHwzDBN3eRxPQkKCsKIfzfU5HOD3S6vVDmh2jEQigVarBQA0NzeDYSKDan9vzGYzampqoNPphtUq/bFEVwc5OuJ4EQQmZ9dgPpYSuWwtJ8oEGBYef+zHCb9NXsQaKvgJv0KhED6TJpMTDUZOWK3R2Qf0/WiaHjIXE+92TExMjFr+RlEUNBoNiouLIRaL4fF4UFtbSyZmvcCyLJx8cL40MoPJ4w9fnAgN+A5l2eY6LHp7O97cVNOn/RhuZXJ+v18QfgZy8YNHqVQK9/S2tjbSyv44ocnEHTO5SUqkJnDHtDFGtzqPw+FAVVUVjEYjAK7zIMMwqK+vH/L7DYFwrBBzPcSUKVOEevtopXAKhQKvvPLKgO4cgUAgDAS8mJSmlnUNOhxeMAwLupd8FLvdDoZhhNbG8UBRFFJSUtDW1gaDwYDk5ORh1WUpEAj0WTyLhaysLNjtdni9XrS1tQ3pAoTFYkFzczMArgOXXq+HRqOBSqUSykICgQC0Wu2AZn0cb/CT3dCAbx5BYAoO5lmWjalELjOxq+uaw+OPuu1o8Nsc6gF/tMDlzdUG4ftqnX3AO74lJydDr9cLLdx/j06VodeLo3WHlMvlKC4uRkNDAzweD+rq6lBYWBjm8CJwePwMeG1DKQ3vIgeEl8gFAgFBrOsuMB1p4f42X+1pxpLTS+I+3mQyGex2+7ARmPhjTaFQDFj3uO5kZmbCarUKnV/5RRHCsQsf8J2bpECK0Hwi9mPa4/Ggvr4eLMtCLBYjJycHCoUCdXV18Hg8aGhoQFFREQmGJxCOQswOprq6OtTU1IBlWezcuRN1dXXCV0tLC6xWK+kiRyAQhiW8gyktQYaUBG7QEWBYmF29T077Wh7Hk5SUBJFIBJ/PB5vNFvfrBxOr1QqWZSGVSgdl4icSiZCbmwuACwYeqqwLu90uiEtqtRoKhQIsy8JisaC1tRUdHR0wGo2CDd5gMJDV7B5weSMDvnl4gakz6GBiGEZwrkWbIFqD516SUiJsz+GJvZQ01ME0VH8vlmWPKjA5vAG0WQbWwSMWiwVR2GAwhP1ssD4Ls9kMlmWFEP+jIZVKUVxcDIVCgUAggPr6elKKFIXQ7omh55VCEJi6XH38sSaVSiPOqVYL99nW6h0ob4//XjPcHEz8/XIwyuN4JBKJEJTf3t4+LJy2hP7RHBSY8pKUwlgvnpBvnU4HlmWhVCoxcuRIqNVqiMViFBQUQCwWw+12o7GxcdjGHhAIw4WYBaaCggIUFhaCYRjMmDEDBQUFwldWVhZRcwkEwrCFF5hSE2SQiGholdzgvLcyuf6Ux/HQNC2sig63wGt+fwa6PC6UhIQEoYtcS0vL716a5nQ60dDQAJZlodFokJ+fj5KSEpSUlCApKQkJCQnQarVITU0V/sbt7e1obW0lIlMU+Aym3gQmPu+CL48TiURRy6l4B5NWIRW6zjm8sR8ffCA9y7JDNtj3er0IBAKgKEoQXRiGxdagwCQRcedV1QCXyQFdGW98p0oA+N/+Fox+ZLWQ/zSQ8OHeSUlJMV8vRCKR4FwiIlN0nCFlp6HdBmVCBlPXsR1NzORpDxExVx1si3s/hpPAFAgEhFLAwSiPCyU1NRUSiQR+vx96vX5Q34sw+PAlcnnJCqQE3eqxNnRxu93CuKj7vFYqlaKgoAAURQkldMNt0ZBAGE7EJDB9//33gg39+++/7/WLQCAQhhuhJXIAhDK53oK+eYePRCLpl8OHFy5sNtuwES18Pp8wgB/sFs0ZGRmQyWQIBAJoa4t/4tNXfD6fIC4lJCQgNzdXmBgrFArk5OSgsLAQubm5yMzMFP4FuMk06YIVCT/ZlUUTmJRBB1NQYOqtPA6A4B5MVEiEcGNHHJ3kaJoWRKahKpMLzV/iRbTSNis6nT4kyMQ4fVQ6AK5MbqCRy+URnSq/2tMMb4DBq79WD+h7uVwuuN1uUBQVdxlRNJFpOIgYwwVetA0tjwNCS+S6XDXRuhUCgD/AoMMaIjAdij9TiBeYfD7fkF/37Ha7cO/l92uwoGkaWVlZADg3YLRQeqfXj3c21UJnI1liw52mUAcTv+gRYxe5jo4OAJyoGW3Mp1AoUFhYKAiSDQ0NaGpqIpmOBEIUYspguuSSS9De3o709HRccsklPT6PoqghvzERCARCd4QSOUFgkqJaB+h7GXgMlMNHoVBAJBIhEAjA4XAMi4wf/ndTKBSQSqVwev0RE5yBgqZp5ObmoqamBhaLBWlpaUKL+cHEYDAgEAhALpcLna16g6IopKamQiqVorm5GQ6HAx0dHcjOzh70fT1W6HIwRX6WyQnhDqbeOsgBXQ6mRIUEquCxZ49DYOK37ff74fP5hiTfJ5qj5Lcqzr10cnEyxmapsbasY1AEJgBISUmBw+GA2WxGWlo69jeaAQCHWiw43GLBhJyBEY9595JGoxFEvXjgRSbewWQymYRJ/YkOL6oqpeGirVzM/T+0cyMvfnS/fupsHjAsIKYpiGgKdQYHytpsGJcdu/tHLBYL9ymv1zukeVm8M0Sj0fwuuYVqtRoqlQoOhwMmk0lYaOB5b3Mdnv+lEmXtVrxw5ZRB3x9C3wkN+Q4tkTtaDp7T6RSOu4yMjB6fp1KpMHLkSKG03mKxwOPxoKQk/twzAuF4JiYHE8MwSE9PF77v6YuISwQCYTgSWiIHAGlqedjj3XG73cJgo7/BnxRFCTkSw8VSHdo97j8/V2DyE79gfYVu0N5PoVAIpQ7xliH85+dyzH5mHRqMjphfEwgEhElxRkbGUcWlUPhSOoBzhvAiAqHLwaSQ9uxgMnUrkTuawKRRSJAQLJHjy4ViZSg7yfWUv7QlWB53yohUjEjnxORq3eCc9wkJCaAoCn6/H6UtnbCFCHRf7GoakPcI7YJ5tHDv3hCJRELeDe8OJXR1ZowQmLqVyIWOsbufU3zGV4ZGjnmjuc941aHWuPdlOJTJsSz7u+QvhUJRlNCZ0WKxRBybu+q5e8n2GuOg7UMgEEB7ezuamppgNBrhcrnIORInbl9AcKvnJSuEsm0/w8Lq6n3xgncvabXao7rmeNdbcXExRCIR3G63MN4gEAgcsY+6CQQC4RglskROGnw8em0+L4JoNJoBsejz4spwmFh5PB6hE5FGo8FXe5rhC7B45NvDwmRnMOAnl/yKXyywLIvlOxrRZnHjw60NMb+XyWQCwzCQyWR9cowlJCQIk+mWlhYS/hqEn+zy7opQhJDvGEvkQh1MymCJXF8cTKHvNdh4/AF8vacZVrcPXq8Xfr8fFEUJXdzcvgB21nPlaqeO7BKYqoKd5AYamqYFp8mOGk4g5vPlvtvf0uv5zLIsLM6jf242mw0Mw0AikUTN/omHhIQE0DQNn88nXINOdHhRVdFDiRzfuZE/xmmajhDM24IB39laOc6fxDkuVx3se5ncUApMLpcLgUAANE33+3iLB7VaLRyboYsKLMviQLMZANBqcQsh0gOJ0+lEdXU1DAYDLBYL2traUFNTg9LSUpSWluLIkSM4fPgwKioqhNJ2QiT830YtEyNRIYFMLII6uHjRWyc5u90Oh8MBiqIEM0UsKJVKYVyj0+mIyYJACCEmr/PLL78c8wbvvvvuPu8MgUAgDAaRJXJ8+GPkoMPr9QolZPzgob/wTgOfzwePx/O7lIj1BL/SplarUWN0QRf8bFrMLry2vhp/XzB6UN5XoVBArVbDZrNBr9cLHeZ6o0bvQGdwEvzd/hY8eO4YSMW9r4swDAOjkVtpTk1N7bNtPTMzEzabDV6vFzqdLqJs4kSEFyzk0RxMPYR89+Rg4rvIaZUSIeTbOcwFpv+urcLrG2pwSVU2Hj2bc7kplUphwr+7vhNeP4MMjQwlaQnw+BlQFGB2+mB0eIXrzkCiVCrhdDqxp4E7r6+ZlY/vD7SiyeTCj4facNn06OfZEz+U4sNt9fj05lmYU5La4/b568VANAOgaRoJCQmwWq2wWq1DWoY1XOCD7VU9Opg4cTu05LT734EP+M5MVOCsMemQiWnUG5040mqNq0xyKAWmqg4b8pKVgoDC3zN/L2iaRmJiIjo7O2E2m4WFiQajE+YQIXZnnQm5ScoBeU+WZaHX66HTceKwRCKBVquFy+UShLZQkdDn86G+vh5ZWVlITk4mJVnd4MvjcpIUwmeTkiCFzeOH0eFFcZThHMMwQjZkcnJyjwsiPZGcnAyTyQSv1wuj0RiXQEUgHM/EJDC9+OKLMW2MoigiMBEIhGEFy7KCU4kXmNJ6EZj4tt8JCQkDNgHiJ1Y2mw1Wq3XIBCaWZcO6Qf2yn/td09Qy6G0evL2pFpdOy0Fx2uDkRKWlpcFms8FsNiM9Pf2og7ndQTcIwJVe/Vquw8IJvQs9FosFfr8/rJV7XxCJRMjOzkZjYyMMBgM0Go3gVDlRcfu5yW5vXeQsLh/8AabXDCaWZYVJW6JCggQp30Vu+JbI+QIMvtzdDIALUb5xigZihJfHbQ4pj6MoCnKJCHlJSjSanKjW2QdFYFKpVDAYDDjUyk3MZxQmQyER4flfKvH5rsaoAlO1zoaPttWDZYGvdjf3KDCFNgOItVTY5PDC4fEjLzn6uaLRaASBqbeskxOFnkvk+JDvcAdTtPOp1cwJTNmJcqhkYpw5Jh0/HW7HqkNtx4TA9OOhNtzx6V4smpGHW6dy5xMv8Lh9AVz77g5olVIsvXIyEhXRBeuBQKvVorOzE1arFQzDgKZpwb3Es6vehEunHX1xJBZaWlrCytVDO5exLAufzxeWHaTT6WA2m9HW1ga3242srKy4yr+Pd4SA75BrT7JKinqjs8eg7/b2dng8HojF4j4tKNI0jYyMDDQ1NcFgMCApKanHRRUC4UQipitTXV1dTF+1tbWDvb8EAoEQF1aXH94ANzHmu4qkqvkSufBBh8/nEwSYgXIv8cSSw+T2BeLKGooXm82GQCAAsVgMtVqNTVVcKeBtpxVj3ug0eAMM/vH9kUEr41MqlcKEnBfyAOCLXY34MkpmzO6gK4Nf3f9qT++5MizLCttNSUnp9+Bbo9EIIlVra+uQlzcONa6Qlurd0Sql4BfUjXaPUC4QTUR0egPwM9xneayUyK0v1wnXC1+AxTcHuMyO0BJMPn/p1JFdgk1omdxgoFQqYXEH0GzlPoNpeUm4YkYeaIrLjokWMP7imioEP36sr9AhwEQ/rvnJr1KpjLlU+Ib3dmLBS5sEV013+Ougx+Mh3eRw9BK5WAQmvkQuM5FbuDh/Eheg/mOc3eRCBabf81r37m/c3OGHg60w27j7H3+crC/XYXdDJ9aWdeCKN7ei1ewatP1QKpWQSCRgGAZWqxUAsL/JDAAoSOFEix11pp5eHhd2u104v3JycpCbmyuISwC3YC+VSiGTySCVSiGVSpGTkyOIsp2dnWhoaCDl2yE0d3LHRl6IwywlKOrzztpQrFar0IEzJyenTw0MgK6ucwzDCG60Yx2GYeD1ek/4MQ+h7/Rr9M2yLDn4CATCsEZvD+YNycXCoF0okbOFDzqMRiNYloVCoYjqVmFZFrvqTfjrF/tx6nO/YkMcwdh8DpPL5epxQvx/3x7GvOc3DFrgNj+Y0mq18PgZ7AwOlk8flYYnLhoPqZjGb1UG/HS4fVDeH4BgIe/s7ITP58PhFgse+PoQ7v/6IBqN4fkWfNnPX88eBQBYX6HvtVW03W6Hx+MBTdNCaGt/ycrKAkVRcLvdcLkGb3JzLCCEfEdxMIloCtqgu0Bn4SaJNE3D7mVwzbvb8dnORuG5fP6SmKaglIq6Qr77ITCFjkVsbh9uen8n3tpYE9f2euPL3Zy4WZzKCaQ/VVjBghJcjka7B4dbudLaU0IcQSODAlPNIAlMIpEINRZuklmcokCiUoIMjRxnjuHOsy92NYY9/3CLBasOtYGiuL9jp9OHfY2RAbUsy4a5K2LB6vbhUIsFTm9AOHej7S8vyvGT+BMZZ48lcrzAFFki1x0+5DsrkTsWzxyTDqmYRoPRiUZT7JlBEolEEOX5EtfBprTVir3B7odObwD72tyQyWTC77nyUJvw3MoOOy59fSvK2wfnuKEoSjjW+WOfF5j+dEoRAKBW74jqfI4HhmHQ2sqFsCcnJ8ccnk9RFNLS0lBQUACapuFwOFBfX0+yf4I0mXgHU5fznM/bNHbL2/T5fGhpaQHALUb1J1CeoiihhL6zs/OYzpfz+/1ob29HeXk5KisrUVtbC7PZTIRMQtz0SWBatmwZJkyYALlcDrlcjgkTJuDdd98d6H0jEAiEfqPrlr8EdAlMRocHTHD1PhAICAJMWlpaRL7Bt/uacfaLm3DFm9vw7b4WNJlc+HZfS8z7IRaLhcloNBeTxx/AT4fbwLLAO5sG3g3q9XqFcpekpCTsrDPB42eQlSjHiPQEFKSocPvpJQCAp1aWwh+IHFBYnD4cbDYLn1lfUCqVUCqVYFkWJpMJyzbXCT/7+UiXsKW3eVBncICigCtm5GFqvhYBhsV3vXzmvHspKSkpbDW4P4SW2p3onWKEkO8oAhPQVSant3JCnEQiwYYKHbZUG/FmiNhjCclfoihKyGCye+KbKIWuOPv9XeLU//a3Yn2FHs/8VI6VB+PvptUdndWN9RWc2++VP06FViGG3hnAfn1AuE48ubIULAuMz9YgXdNVAlsidJIbvHDeKhP3uY3P7BLFrzqJy4j6em+LMPECgBfWVAIALpyUjbPHcW6ItWWRgrbL5YLH4wFFUTGXmob+jkeCYls0QpsenOh0OZi6CUzBrDlXHA6mbC133CmlYmQF3Uwd1tjFEN41A/x+ZXKf7uCaN9DB2+2WRqcw2Xd5A/g1eGy+ee00jExPQLvVjSve2NajgNlfeIHJbrfD6fbiSCt3jJ4+Kg2jM7j9Ci3ddvsCePz7I/h6T3PM72EwGOD1eiEWi/tUJqpWq1FYWAiapuF0OtHQ0EBEJnSVyIVmZPH3JFOIg4llWbS0tCAQCEAul0f8DV5bX413NtXGNc5RqVTCcXssuJh8Ph8MBgNaW1vR1taGtrY2NDU1oaKiAgaDQRCUXC4XmpubUVlZidbWVkFAI+YSwtGIW2B67LHH8Je//AUXXnghVqxYgRUrVuDCCy/EX//6Vzz22GODsY8EAoHQZ/j8pdD8k5TgqpYvwAqTXX6VRiaTRaxm7W8y469fHEC1zg6lVITpBdyKI2/JjpXeJla76zuFycbWGmO/W5v7uglE/IqsSqWCTCbDb8HyuFNHdgVh3zGvBMkqKVotbmyvjSwFuOWj3bjo1S047T/r8fqGaiE8PR5CW0LXtBrxw4EuAeCnw12r1fwEYnSGGokKCa6YngcAWLG7OergxuPxCN1/UlJS4t6v3uBXmC0Wywm9kueKWWDiVnClUilq9dzfpMnkFAQq/pzTBB1PvHvDEaeDiaKoqGVyv5R2CN8/8NXBfp9LX+1tRoBhMb0gCeOzE3HuaC0AYGU5J6J8u68Z/9vfChFN4cmLJ4S9tqtErn/70BulOu46NDqlS3yYNzoN+clKmBxenPfyb1h1sA17Gjrxa7kOIprCX88ehbPGci6ndWUdEdvkrxcajSZmsba6I1Rg6lk8EgSEXtycJwrOfmYw+QKMsIjCl8gBIYsocbpt+DK5aC4MXkz5eFt9XNvsCbvHLywY/OWskQCAnS0uyJScS3BDhQ4uXwC5SQosGJ+Jr26fg5lFybB5/Lj7s32wugfu2Kk3OPDpjgaIxBJhIWhPdRu8fgZapQQFKUrMLOLuW6Flcst3NOKDrfV46JtD0FmP7lzxeDxCl9rMzMw+L4QolcowkYk4mbpCvkMdTCmqyLxNm80Gu90OiqKQm5sbVkpfrbPjPz9X4Okfy3DHp3vj6qzLC1VWqxVO58B3G+wvLMvCYrGgvr4eFRUVaG9vh8lkgtFohNFohMViERz8+fn5GDNmDNLT0yEWi+H3+2EymdDS0oLq6mqUlZVBp9MRoYnQI3ELTG+88QbeeecdPPPMM7joootw0UUX4ZlnnsHbb7+N119/fTD2kUAgEPpM9w5yACATi6CRc+4HfuARGmbb3b30UXBAPX9sBnY8fBYeu2AcAMTdspifWDkcjojBYPdyu0+2h5e2xEqAYfGP/x3G+H/8jFfWVQmlzKHh3gCwqZLPi+nKmpJLREKIdqjwAwCVHTahBXtzpwvPra7AnGfX4bX11XHvo0ajAU3T+OawEX6GxZhM7nPZ22gWslv4VWJezLtgchbkEhpVOjsONEe6I0K748XbCeZoKJVKSKVSMAwjdBg8EXH5eg75BroEJkOwjFEikaDOwAlMDAvh+9CAbwCCg4nvqBUP3QUmq9uHbTXcsT0mUw2HN4DbP9kbt3jFw7IsVgTDvRfNyAPLsji7SA4KwI4GK9ZX6PDod0cAAPecNVI4Xnl4ganD6hnQCTGPL8DgSDt37SpJpAUBVCyi8enNszA1Xwub248/L9+LWz7aDQC4YnouilJVmDcqHSKaQpXOHlaeyjCMIDDFWr4DhIto0QQmjz+Ayg4bJBKJUIJ8oruY+BI5ZQ8ZTB5/7yVyOpsHLAtIRBRSVSGLKPy5GCV7pjd6CvpmWRb3f3UQH2ytx2PfH0FZW///bt/ta4HDG0Bxmgp/OjkHWjkNh5fBgTZOKFgVLI87fyJXppyolOD9G09CfrISLWYX/vlDab/3gee+rw7g/749jA+21gsupp213D15ci43JjgpKDDtCt6b3L4A3gg6M70BBu9tqe/1PViWFbL8EhIS+tWEAuDuS0VFRRCJRHC5XGhvH7zS9u6sK+vAhH/8HDFOGCqsbp+wcJEblsEUWSLHX3OSk5MjGq7saegSD1cfacdVb2/rtSw/FLlcLhw7w83F5PF4UFtbi6amJmGsq1QqkZaWhrS0NKSmpiItLQ1FRUUoLi6GRqOBWCxGeno6Ro8ejby8PKSkpAhdU/m8qfr6+hN+kYAQnbgFJp/PhxkzZkQ8Pn369DCLOoEwFLAsC6fTiba2NtTX16O2thY1NTWoqqpCRUUFysvLUVpairKyMjQ1NQ07R4Lb7RbsqPzNoL29HTqdDh0dHcKXyWSC3W6PyB4hRBJNYAKA1OD/9XYPGIYR3C+hob0AZ61eeZAb6P75jBKo5RLkJnErZB1WDzz+2Fe4+GwJ/jgNZUOwBOeqkzinztd7muOeFLu8Adz+yR58uK0BXj+DpWsq8cxP5bDZbPD5fKBpGhqNBh1WNyo6bKAoYO6I8A5SF07KBsANrrz+rnPj673cJHve6DT85/JJmJynhS/AYukvFeg4ysqtyeHFZW9sxfM/V4BhWNA0DZFMiZ+quIHO384ZjWn5WgDAL6XcIJkP+J5RyE1wNXIJFo7nxK8Vu8PDvhmGiRDQdFY3Fn+wC59sb4jjE4xOaDbHiVwmF2uJnNHBnXNSqRS1hi5XCx90bXWFC0z85Dqe493lDeDbfc3wsdwwhh/kbqjQwxdgUZKmwseLZyFDI0O1zo4Hvj7Yp2vlzjoT6gwOqKQinD8pC06nE+kqEU7K5SYxt3y4G3aPHzMLk3HHGSMiXq+RS5Ch4a41g1EmV9ZmhdvHIEFKI0cjDruu5CUr8eVts/HnM0pAUdx5KBXRuCvoFklUSnBS8PxaG+JistlsYBgGEokkrEve0QgNMjfYPRGOjudWV+CcFzdh+Y7GCDenyxvAu7/Vot4weE0OhiM9O5i449rtCyAQCAjjlO4CU1sw9DpDIwdNdy2MpAg5g/E5mHwQ4+sjFhxsCRfSX1pbhe+DYgLLAs/+VB7XdrvDsiw+3cEtolwzqwAupwMn53Hn1M9H2uHyBrAuWB533sQs4XUqmRjPXzEZFAWs2NOMtaWR7jueL3c14ZwXN+JwS++LAiaHV7jfvLe5DsoEbsGjtJ07FifnaQEAMws5gam01Qqb24flOxqht3mEv9Wn2xt6FZFtNhscDgcoihKy/XojFkFaoVBAk5qJHyttaGo3/G6ljZ/vaoLd48ezP5VHLaf/vWkOupeSlBIh0w/ocjDxJXIsywoCS/exHsA5yQFunJOklOBAswV/eG2rsDhyNNLT00FRFOx2u/A+vxcMw+LeL/bj0te3wOLsOnYsFgtqamrgcrkgEomQlpaGkSNHori4GBkZGcjIyEBmZiYyMjKgUqkijku+TDorKwvFxcUYO3YscnJyhByw6upqWK3WmO6vbl9AENUJxzdxC0zXXXcd3njjjYjH3377bVxzzTUDslMEwtHgu33p9XpBfGlpaUFFRQVqa2thNBq5GnqnU8iS8Pl88Pv9YBgGgUAAFosFTU1NKCsrQ2NjIywWy4BZjFmWhcEeWycWlmVhs9lQV1eH6upqmM1meL1eOJ1OWCwWGAwG6HQ66PV64au1tVWwuZaVlaGhoQEGg4HURkeBdyhFCEz8ANzuhcvlAsMwEIlEEStaX+5ugtfPYEKOBlOCA81klVSYEPAtomOBoihhwsYLWgDQYnahSmcHTQEPLByD4lQVbB5/XBlPJocXf3x3O9aUdkAqprFoBidUvb2pFo98dwQMy0Kr1YKmaWyq5MSsSTmJSFKFu31mFiUjTS2DxeXD5mrueaHZR1edlIcrZuThf38+BTMKksCwwDd7e9/P1YfbsaehE6+ur8bfVhyAL8BgQ4MbDi+DbLUYZ4xKxbkTuEnET4e4yQWf4TKjoCus+4rg7/TFrqawzIvu3fG8fga3f7IH68p1+Pfq8ohywXhZV9aB6uAcxel0nrDdr4SQb2n0oQMvMHUGB/NisRh1+q7jnBdYLN0EJn5C4Igxg4lhWNy5fC/++sUBvLSZE395gemXYI7XOeMzkaaW4bU/ToOYprDyYJvgNoiHL4Ji5gWTsqGSiYXz9vLJXHmZn2Ghlovx4lVTIKKjTxhHpnMTVv73DzAsVh1sw3Ory/HnT/fi/Jd/w7z/rMdj/zuM7bXGHru6RYMvJZ2QpQJNURHCtURE474FY/Dp4lmYXpCERy8YixxtVwnJ/LFcWce68q6JOu/SS0xMPOokOJSqYIkc/zmEuphYlsVPQUfKC2sqIJJxYoLD4YDT6cS/fizDU6vK8OA3B2N+v77w06E2fBVHXs5gE0uJHH9si0SiiM6YfMB3dqIi7PG0hHCxN5RWswuf7WyM6Nro8gZw54pSvL/PjD9/14B7v9iPDqsb3+1rwX/XVQEA7jpzBCQiChsr9dhcZYjYdij7m8zCtaA7+5rMKGuzQiamcdm0HNhsNpySzx0TvxzpwLryDqE8blJuuNNnZlEybjm1GADw4DeHwvJ1eL4/0IoHvjmIyg47Pt7W+yLDhgod+GFTq8WNX8r0UKvVqDRy252Sx71/ZqIc+clKMCxXxs7nyj16wTiMSE+AzePH8h09O4+NRiMAzjlztK6Mv5Z3YNLjv+DO5XvDFnqi8drmZry+04T/W6tDdePgO4oCDIsdtdzv0mJ2CU6zoWRfE3cdHJkRHm+Q0u088Hg88Pv9YWOxUPYEGx5cd3IBvrnjFBSlqtBiduH5Xypi2g+pVIqkpCSwLIunfjiM65ftwH/XVmFbjVG4fw4W7/xWi2/2tWBvoxnP/VwOhmHQ0tKCpqYmMAwDpVKJESNGICMjI+auoNGgKApJSUkoKSmBXC5HIBBAY2MjamtrhTK7aDSZnDjj+Q2Y/cyvOBAMzyccv/Qr5Pvmm2/GzTffjIkTJ+Kdd94BTdO49957ha/B4LXXXkNhYSHkcjlmzZqFnTt39vr8FStWYMyYMZDL5Zg4cSJ+/PHHsJ+zLIvHHnsMWVlZUCgUmD9/PqqqqgZl3wl9h2VZWO12bCutw/dbDmH5+v34YmslDtW2COJLZ2cn/H4/aJpGYmIisrOzkZeXh/z8fBQWFqK4uBglJSWCcp+amiq4SaxWK5qamlBeXo7GxkYYjUY4HI643E0V7TY8/3MFrn9vJ6b9cw1mPLUW5/73N7T00FaX79JTU1ODhoYGYeKi0WhQUFCAvLw8ZGZmIiUlBcnJyWFfCQkJQhkQwzCw2Wxob29HdXU1amtrj+kuFgMN72AKzWACugQng83DucECLDY1ecNylQIMKzhgrj+5UJhsURQluJjiLZPjV81CV7f48rip+UlIUklx7ckFAICPtzXEJBhurjLg0te3YF+jGYkKCT69eRb+ffkkPHvpRFAAvi814bnfDPCIuAH8b1WR5XE8IprC+cEV45UHuIHjlmoDOqweaJUSnBHsTgUAV87gc5Gaet3P0C5V3+5rwe0f78Hy3dy2Lx6jgdPpEErzdtQZsaFCB1+ARYZGJnzOADCnJAWXTsuBn2HxtxUH8MaGGiEsHODcSxRF4fEfjgidiWxuv1DS0BMHm82Y/8JGoRQylF31Jiz+cDeuf3836u3cLZMvHwK4Mo9nfizrt4h1LNBbFzkASFIGA1WDq6cWDwtHSIYFn4UkhHwLJXLBDKYYVzbf/q0W68q5c+bnchOaLT74fD54/AHBCXhOMMB6RmEyHg2WtD63ugLf7I1dXFh9uB2rgu7FK4POQv68PXNcJorTuAnKM5dODBNtujMipJOcL8Dgni/248/L9+L1DTVYdagNR1qtqDc68dG2Blz19nac/My6mDvg8QLTtKD4HSpchzJnRCq+XjIH180uDHv8rKDAtKPWBKvbB4ZhhN+RdxnFgt3jF+51vCsyNOi7wehEa1AMMdi9+GhHi1AitH5/tRD2vKPOJJTJDjQ6mxt/Xr4Xf19xYFBD1+OhNfiZhQbDA4Bc3NVFLpaA7yxt+OuF9uz2SPHludXleOibQ/jDa1tQq+c+B6+fwZJP92BfkwUyEXef+2ZfC854fgPu/4oT/W47vRh/O2c0rpnF3Z+e+amsxyDkL3Y14pLXtuDc//4W9e/5abAE/IJJ2UiQ0nC5XJiYIUeiQgyjw4vnVnMT+vMmRnf63Hv2KIzKSIDB7sGDXx8My8rZVKnH377cL4hGGyv1vd6f+GsJ33Hs3d/qIJInoNnCfe6TcroELj6H6Ynvj0Bn8yBHq8AV0/Nw22mc4LVsc51wnQwwLD7cWo/X1lfD5XLHlRH4xS5O2F55sA23fry7xywgr58RrlE1nV7c930NjJbBPbbL2qywuruu1W9trB3yhc0t1dyYprsjOyUk5JthutxLKpUqQqw1ObxCZuC0/CQUpaqw9MrJALhjKtZ7fFpaGr4qteLzA0ZsqjLgxbWVuPqd7Zj0+C94MdhkYaA50GTGf37uEsGW72zETzvLBMc1X/4W7RrSV2QymTCXoigKLpcLTU1NqKqqgslkCps/WVw+3PTBLrRZ3LC4fLhu2Y6jOgsJxzbioz8lnMOHD2PatGkAgJoabgCUmpqK1NRUHD58WHhePKtesfLFF1/g3nvvxZtvvolZs2bhpZdewoIFC1BRUSG0vg5l69atuPrqq/HMM8/gggsuwPLly3HJJZdg7969mDCBC+J87rnn8PLLL+PDDz9EUVERHn30USxYsAClpaURToZjEYZhwkoX+b8Ly7JgGAYsy5Wr0DQNkUgEiqIG5W8XDaPdg4+2NaCi3YaLp2Rj4YTMsPdmGEZw8fxypB1v7zKgzRY+AVFIaPxzYSHmFCWCpmmoVCrIFUp8vqsJpW3NGJmuxoScRIzNUkMpD7+wKpVKZGRkwO12w2KxwGKxwOfzwWq1Crb9uk4vDun9yElNRGFGEjITFShMVUImDp9g/XCgFX9bcSBipam83YY/vLYF7914EiYEByl8OY/BYBAGjgGWQmpKEtJSU+PKj2FZFm63G3a7HTXtnfh6vw6FSQ6c7nIJtdXdb6InGj2VyKUJDiYP7HYGH+8345syK97aocPnt56MURlqbKzUobnThUSFBBdOzg57fW6SEpUd9qhB3xanD/qgc4qiALVcjHQ1dz3hV83cbjf8fj/EYrEwKZ43ihN8Lpuei//8XIGKDht21pkwqzj6gLRGb8e/VpUJA+TsRDk+/NNMjMxQg2EYzM5gcd/cVCzdYsDmRifOeXkrLp2aKwR8nzYqUmACgAsnZ+GDrfX4pbQDbl9AKI+7cFJ22LF/3qQsPP7DEdQaHNjT0IkZhclRt7cvuFp1w+wCfL6rSdhfjVyEs0pUMJvNyM/Px/hsDY60WoWB0ozC5LBrAkVReP7yyUhLkOGtTbX49+pytHY6cGkxBYmIW1X7bGcjlu9oBEVxAeHl7TasK9NhTklqxH4BnNtmySd70WJ24elVZThzTLqQ4cCyLJ5bzZWC+BkWT6xtwdJz0iDu7ER6ejo+3dGIR77j7nsjM9S4fHpu1PfoL3saTPj5SAeumZWPgpTYS5YGGj7kW9aDwMSvFltc3HW62Rpe4sFP6s0ubtIbkcEUQ4nczjqTcHxkJ8rRanHjs0NmPJapwbYaI+weP9LVMkzO1QqvuWFOIZo7nXjntzrc/9VBpCTIcHrw2Hf7AjjYbEFqghSFKSrQNAWHx48nfygV3EtzSlIwLV8LhmHgcnHnuzohActvPhntVrfgbOwJXmA60mrFkk/2Ym1ZB8Q0hStPykNJWgIKkrnjbfWRdvxypB16mwfPri7HVTPzhc+oJ/YFhdSZJalAwASXywWWZWO+jxelqlCcpkKt3oFNlXqcVpgglMfxYcexUBP826apZZhTkoKNlfowB9PWGs7xoJaJYfP48famGlw983SInU68srUJvE7Bslz2zuK5RTG/d6z8fLhdeJ81pR3C32WoYFlWuH/kheTGAIA86BJ0+wPwernzJbrAxIk3oQHfQPTsGZ76YN5Wlc6Oi1/dgqVXTsbKg23YUKGHXELj2QU5YPw+fHjIiQMt3N9wwfgMPLBgDADOxfT1nmYcabXi+wOtuGRqTvj2DQ48EcxHare68acPdmHF7bOF83zF7iZ8t59zvV5zcn5XAwqFHPPHZuLrvc1oDHY+PD+kPC7s85GI8MKVU3DJa1vwS2kHZv1rLS6bnouTCpPx9xUH4AuwOHdCJtZX6NBudaOyw47RmZGt6H0BRnD0PnvpJPx5+V4carHgi/16sAAyEsSQU13XpZmFyfhqT7Mglt5xRgmkYhoXT8nBC2sq0WZx49t9LThrbDru+Xy/cNz73Q6clScKywis7LBhTWkHFs8tCis7dnkD2BjcJ4mIwoYKPW54fyeW3TAD6m7j2M3VelhcPiQpJfAFGJTqPbjt49349PZTI8apA8X2oHtpWr4WZW02lLZZsbnaEHXB6vcgwLDC53xKN4GJd2gzLGB2+YQOvtHK4/YGxfqSNJXwusm5WiSrpDA5vNjb0BkxFqvR26GWicME4o1VJny0zwwAuGisFoxEgR11JuhtHvx3XRW0SgluOiX8+mZz+0CHdFSNB7vHj7s/3wc/w+L8iVkQ0yz+d6AdSze24L/n56CwIB9qtRoBhkVVhxUmuxdmlw9mpw9yCY0R6QkoSUvo8b0ZhsWnOxuRqpJGzNNomkZmZiZSU1NhNBq5+A6XB+6WFnR0dCAlJQUJGi2WfLIX1To7MjVyZGnl2NdoxrXLdmD5zSdjXPbRFzIsTh/WV+hw+qi0MNc9wzDwer2CM83v9wslxRRFCfNbUBTEwTkuP98Vi8UQi8VRnaGE/hP3kbx+/frB2I+YeOGFF3DLLbfgpptuAgC8+eabWLVqFd577z08+OCDEc//73//i4ULF+K+++4DAPzzn//EmjVr8Oqrr+LNN98Ey7J46aWX8Mgjj+Diiy8GAHz00UfIyMjAd999h6uuuiqu/atq64TGwYCiuIkQBc4NwJ+M/PcUADrke4oCaIriVuIdXnTYPNDZPLC6/JCKKMglIsglNBiWhdPjh8vLfbl9Abi8Abh9fgQCLORiCjIxIBNRsLq80Nu90Dv88PgZnJSjxBlFKqh6KG0AuIu0nwFUcomQFSOTyaBUKqFQKIQT8FCzBc+uLsOhZgtGZqgxPluDCdmJSFPLIKKpyC+K+1cioiERUXD7GHy2sxFf7m4SAixXH2nHxGw17j49HxMz5HA6nbA7nKjr9OK9vZ040M7dzOViCikqKTRKKTw+BrUGBx5YVYd//WECFp2UjyaTE3/7dKcQRhxKpkaOnCQFcpMUKElLwPWzC6BVSqFQKKBQKASxyWazwe5w4sOdbfhkvwkBFgD0wnZSVFLceloxrj25AEqpCK/+Wo2lwVWJU0ak4NwJWZiUmwiNXILbPt6Dig4brnxrG15eNBlFahZNHQbY3H40Wnwo13tRbvKh2uACTTUgRSVFaoIMRWkq3L9gdMRksqLdhk+2NyAlQYqZhcmYkq+F0cXi9d/asGJ3M/zB0fPeVheWzGRgtVqhVqtxoN2NVzc1Qu/wYcG4DFw0JQfT8iPDrI9HeKEnLaF7iRx3k9JZ3Shvc+J/5dxA2uTw4o/v7MDnt56Mj4LW+ium50a0ke7JwdTc6cRZSzcKxzbP4rlFePi8scJ5xXc9U6jU2BpcfZs3mhPKExUSXDI1G5/tbMLTP5bhlBGpkASvGWanFwaHF3qbB3sbOuFnWIhpCteeXIB75o+EVimF3+9HQ0MDXC4XTi9KwLiibLy1pQXbao3CpFklFWFqMPeoO1PzkoTJ+6qDbfg5WHZ0WTcBJUEmxvkTs7BiTzO+3N0UVWCyuHyCsHD3WSNx/qRsLP5gF2weP66ekQu5uKvEbeH4TBxptaI2mHcwo1tgMsBdOx86byzS1DI8taoMH+9owpe7KYzPVOKkJhHe31wPAPj7OaNRkqbC7cEJ/SPnj4043hmGxb1f7hecFx4/g2d/Kserf+QWUTZU6rGrvhMyMY2cJAVq9Q48u9mAp8/KwLvrDuHptV1ZUO9trsNl03IG9Jw63GLB0l8qsD4oQNYbHHj7+sgMxN8LfhX9aA4mi4crV2wwcdftURkJqOywo87ggD/ACAKUJiLku/cSAoPdg7s+24sAw+IPU3OweG4RLnhlMzbVO1Gjd2BDG3cunj0uIyyPBgAeOncsdDYP/re/FUs+2YOHzxuLXfUmrCvTCaVCGrkYk/O0aDQ50WB0gqKAW08rxt/OHg2KouBwOMCyLMRiMaRSKTJlVMTEPhq8kLE5eJ5LxTTevHYazhwT3h57/rgMeP8wEWe9sAFNJhcONpt7nbRtqtSjxewCTQEzitLQWMt1wnS73XGJQ/PHZuBtfS3WlekwJZm7bmk0GlAUBY8/gE+3N2JDpR5Wlw92jx8Ojx/nTsjCYxeOE7bB5y+NTE/A+GxuMSVUYNoSDF7/09wirCvvwOEWK97YWItRqQqUGzyQiylcOTUTH+1qww8HWgdFYOKz9AAuc2rJvJIBf494MDt9wrEX6tQEukrkWBZwunsRmMzRS+RSQxZQusNnY+UnK9FocuLWj/cAAMQ0hTeunY4ShRtWqxXvXpWP7a3c9fu204uFcyolQYbb55XgPz9X4D8/V2DhhExhf/1Bh57TG8DUfC0ajU6Utllx12f78PZ10/HGhhphvHT59FyMTZOjtrYWAFc6du4EjbCgEa08LpQJOYl46aop+PfqcjSZXHh/Sz3eDwZtnzoyFf+9aipu/Xg3NlTosbFSF1Vg2l3fCZvbj2SVFGeMScdl03OxfEcj3tjI7dOoFCnMZrMgSPAOJu4zlwvdTaViGovnFuGpVWV4ZV0Vlv5SCYPdAzFNwc+weH1zC8acn4U5wZLvBqMDV729XSjv+3NIftvmagPcPgY5WgVeumoK/vT+LuysM+Gad3fgs1tODhMCvt/PlcRdPCUHC8el4Yb3d2N3swPXvL0Ns0ekIUkhhlYhwrwxmUhOGJhFc15gWjghE5Nytfhgaz3e2lg7ZALTkVYLzE4f1DIxJnc7XiQiGokKCSwuH/RWF/zBEuLunYKBkNzHkLJ8EU3h9FFp+HZfC9ZX6MMEprI2Ky56dTMocOOvO88cAYPdg798vg8sgPNHq3Hr9ETk5eVBo5mKNzbW4LnVFXhyZSmytVxnRJZlsXxnI/61qgwimsL/nT8WV87IE8YRh1sseDwYqj9vdDoum56DU0emQSLqmss99t1hNBidyNbK8fA5RWhsasHaMhq1nT5sM0gwcYIa22uNePz7Iyhv77mbaY5WgRvnFGLx3CLhXPcFGPx9xQH8L3icnTshE//6w0RB5PEHGGys1GNbjRFVOjuqdDa0mt1IVogwI0eBWTk2bGt2YWuNHSqpCO/deBLykhW4btlO7G/iRKbHLxqP6QXcmLP7+MkfYPDZria88EsFOp0+5CYp8PLlY5EuC8DpdMLr9aJM78G7e0zIVktw/RQt0lRd58f+Nhfe3t2JVpsPmQliZKslyNFIMDNXgQnpMuH9aJoGAxpbmlyoMXmgdwTQYffB7PJjRoEWl07JwkkFWkGwCv2yuv346VAbdtaZkJIgRX6yEnnJSmQmyqGSiqGSiaGUiiAT02G/X4fVjf1NZhxqtsDtCyBDI0e6RoZ0tRwFKUpkdsvVC8UXYHCk1YrKdhtS1VLkJ6uQm6QIE6pZloXV5UdTpxMtZhdMDi+UUhE0cgkS5GIkyERQyyVIkImgkohAUZwjktMCGPgC3Pc+PwN/8DF/gIXZEluDh/il0iHC6/Viz549eOihh4THaJrG/PnzsW3btqiv2bZtW0Sp3oIFC/Ddd98BAOrq6tDe3o758+cLP09MTMSsWbOwbdu2HgUmj8cTlsHBu13+8OZO0DJl1NcMNXta3Xh/bydOL1JhbJoM7XY/Wqx+tNl8sLgDcHgZuPycOCGiAJWURoKURpZajHFpcoxLlyFbq8TyA534udLctd2GTsGi3xdGpsgwNk2KX6rtONRqwy2fHUGSXAS3v2t/AEAqonDzqcW444wRQl6HL8Dgga8P4pu9LXjg60PYVmPE2uBkQSUVYdFJ+Wg0OVHaakGrxY12K/fF7+8Xu5rwyh+nYlo+N4mlKAoKhQIGF4sHfq7GrmDY3/Q8NRDww+j0Q+/ww+jw4pmfyvHmxhpMyEkUyo1unluEh84bi4DfB6fTCafTghcvyMMjqxuwt8WBmz/e2+tnEWBZ6ILiYmmbFZurDHjtj9MwdyS3IvPjoTb8fcUBIbMBCIqWgCAsTc7T4lCzGetqHagyenHrjCT8VNWMLY1dIsiH2xrw4bYGZGmkuHhSJq49pSSs60Y0vH4GUvGxp/AHGFYYwPWUwdRuceLNFicCLOfoMQQ//6ve3gZj8LV8yVoo/ISAb43Ls7POBI+fgZimhFwNq9uPZZvrUG9w4L9XT4VKpRIEpiMGHxzeAFITpBgfspJz3cmF+GxnEw42W3AwStc0nvljM/DQeWNQkta1ItfR0SEEOubn52O8SoV54/Owp6ETb2yoxtoyHf4wLSdskBIKTVO4YHI23t5UiydXlsLtY1CSpooYvAFc6dCKPc1YebAN/7hwfMQqGF9rX5CiREqCDCkJMnz75zn4+UgHbppTiJbGOng8HlgsFpw7MVOYfADhA73u3HxqMTI0Mjz27SF0ugPY2+LA3hZuUrBwfCbumFcCpzcAqYhGg9GJGr0dI9LDB5VvbKzB+go9pGIa//rDRNz31QGsPNiGG+aYMD0/Cc8HnTI3zCnEVSfl4eJXt6BU58Hj63U41MFN0s4pScDGegdK26zYXmvC7JKjlz8cDZZl8dj/juDjbgHl22qM8AcYiHv4uw027qN0keMDVa0eBnK5HLXVnOgwpyQVTSYXXL4AGkzOyAymYMi3188NaqIdlwzD4q9f7EeH1YOSNBWeumQCVDIxzh6bjjVlOny8z4hyE3dtPCcYBh8KTVP4z+WTYbR7sbnaIDjPAE5strn9sLr9wvU8O1GOpVdOCft78k6LhISEuITEUKeMUirCuzfM6NFRJxXTmJafhCaTC/saowtMDMPijY01WBrMBTlrbAYS5FxnNrvdzgnXcQhMZ41Jx9ubavHjoTZ4nVbML1ahoFCN7w+04j8/l0dc4wDg/a11uPPMEULuFt9BjhOYuOtYo8kJq9uHBKkY24MOg7kjUzGjMAnXLduJT7c3IiHYzfOqiYk4K1+ET3Zz2T1NJifykgduPKWzucMWnvY2dkJv80TcF35PmoKLE+lqWURwvjzEfWJzcePNSoMH6xobcO2sfOH4a7NGdzDxCyjdBSaG4cYZAPDJ4llYtrkWH25rAEUBS6+cjDNGp6Ojg8vj8nq9uHByuDuJ50+nFOHjbQ1oMbtw84e78eC5YzAhJxGv/FqN/U1mqOVivPrHaeiwunH129vxa7kOC//7m7DYsGReCf42fyTq6+uErmrJycmYq2Ggkorg8AZ6LI8L5YJJ2ThvQhY2VenxyfZG/FregSl5Wrx57XRIxTROH5UWFJj0uPW0SEFxfbA8fd6oNIhoCovnFmH5jkZhTDU6VQar1QqGYUDTNApSlMjQyNBh9WDJGSPCxkVXz8zHK79WC+6m0RlqvPrHqXj02wPYXm/BS9tMOGuWChYnVy7Ej00+39WIJaeXCJPJrhy5DJxUmIzPbj0Z17+3EwebLXh7Uy3+evYoAJzgvyYYcn7h5GxML0jCcxeW4G/fV2N3owW7G7vGDRMyavHtnadGiJSrD7fBF2AjHNo9EWBY7KjjzqPZxanQKiX4eHsDNlcbcLjFIjj1f0944f7kkpSo98aUBCksLh9ajFakg4WXFeGfP1bi/EnZYdd3voPc9MLwha15ozmBaUOFDg+eO0Z4fPmORvgCLAAW722pw5e7m6CSccfuycXJ+L+FRTAZDejo6IBGo8GS00vQ3OnC8h2N+Mvn+/DClVPw6Y4GbKk2Ctt84OtD+G5fK/7v/LH4ak8zPtpWL7guVx1qw6pDbUhNkKIoVQWryw+r24c2ixs0Bdw/Nw2dHS1QS4FbZqbhpc0d+O+vtdjfYsOPh7hjSikVIUergFYpQaJCApvbjxq9HQa7l3Nx/1iGzdUGvHDlZKhkYty5fJ/guAWAn4J5mo9eMA5VOju+3NWE9ihNXkyuAH6ptuOX4BiApoD7TkmBOmCBiBHjw5tOwnXBY/ruz/YB4MbkE3I0yNTIkJ4ghVpK48u9rajSc9dJEcV1ML72g3148NQ0TMqU47ODFqw4YgHDAhUGL7Y1uXDDzExcMD4Nb2xpxo+lXZ9ts9WPZqsfaHHh2zIrRqRIcfEYNSamy/FztR0/VdlgdkeWQX5/sAPfH+xAVoIYc/KVUMtoyMU0xDSwr82Nnc1O+GKonhTR3NhJJaHhZ1gYnb07tuViGnlaGTLUUsjFFKRiChKaRqPZjdIOJzwhc2QAoMDN3RkWYFgWAQbwxZHnGCuMJ7ZYkLgFJrfbjVdeeQXr16+HTqeLyKjZu7f3SXRfMRgMCAQCyMgIX/HLyMhAeXn0bhbt7e1Rn8+38uT/7e050XjmmWfwxBNPRDwul1AQiSmw4FaduH/Zbv/n/u0JhYRCikKEZKUYaikNX4CFN/gFADIxBZmYhlwsgkxCc99LRBDRFDwBcMKMj0WCXIzMRAWytEr4Ga7NcpXOHnbC90SA5SYIVg+DVpsfe1ojLx7zClW4YLQa7XY/akxe1HZ64fSxCLDc7xhgWTAsdzMKMCwCLAt/gIWP4R4fny7DZeMSMTFDBpqmcfXkZHx20IxV5RZ0ursEFJoCzp2QhQfPHRMx2JSIaCy9YjKyExV4dX01vgsq7DMKkvDioilhz+90eNFocqK504XmTic+29mIeqMTV765DQ+eOwbXzCrA5moDVh9ux+rDbXB4A1BJRXji4gm4bFqOkPPSrtNjbZUFXxy2oM3mw29VBogo4J5Ts3HBWAWqqyojWnY+eloK3thFYU21HSwAmZhGglyM7EQFZhQm4aTCZEzLTwJFceVcepsHL62rwoEmM254fycePm8sjHYPXt/AlaTOKkpGhkaOXfUmwR5/6shU3DN/JKYXJGN7rRF3f7YPjRYPHlmnEz7H80drMCVThs0NTmxvcqLN6sWbmxvx1uZGzClKxDUnF+Gs8ZlhturmTide+KUS3+1vwWXTcvGvSydGTP78AQaVHXbsbezEvkYzqvV2pKtlKEhWoiBVhUk5iUIXlsHA4fFjW40Rp45KjbCEdzq9CDAsKKorgJiHF5h21JnhY1hIRRSeDk5a//jOdmGl57RRaShMjSxL4kW57g6mig7uddfMyscTF3OluCsPtuJvXx7AunIdLn9jK16+fCwALs9lY3VAeJ/Q1Ypx2Rq8cvVUHG6xwBcIrh4wLLQKCVITZEhVy1CSphLcAjwMwwghvXl5eWFBltMLkvDuDSfB7vH3KBLwXDApC29vqhXEgEun5UYd7M8oSEJxqgq1BgdWHWoTcpl49gbzl6aGHAMj0tWC2KPVatHR0QGLxYIRRUUYkZ6Aap0dSqkIY7MiVxlDf89ZWRJ8dFkOWh0sOhgNdtSZIKIp/OvSiVyIp0yMk0tSsKlSj7VlujCBaVuNUZigP3nReFw+PRd7Gkz4bGcTnvyhFLecVowjrVYkyMS4/fQSJKukeGHRFNzy0W7BUXnh+BT8/fRsiNbU4acqO97eWDUgAlNFhw0fb+cmfRdNzsZdZ47Epa9vgdXtx8EWiyCK/94ctYscXyLnDkAqlaLOwDmvStITMCI9AYdaLKjqsEcITEpZ1/YcHj+0ysgy4d+qDfitygC5hMYb104XhMx7zh6FNWU6/NYQXJmWiTG7h5JSqZjGG9dOwy0f7UaL2YVzxmXivIlZmJqnRYBlUd5mw/5mMzy+AK6YkRdWnubxeASBKTm5Z+EzGikqKSbmJKK504l3bzgJ06M480KZkqfF//a3Yn+UIFSLy4e/fbkfa4Mdtq6Ynot/XsJdZ1QqldC9KDU1uoAVjekFSZhVlIwddSasqrBhVYUNSRuN6AxmaaWrZbj1tGLkJyuhlkvw6P8Oo1pnx29Velw8hRMgqoMB3yMy1EhSSQUHZFmrFRqFBEaHFwqJCJNztZCKaZwyIgVbqo0wObwoSVPh+lm5cNptmJghx4F2N5ZvrsBfzhk7YFEFqw+3g2W5zzbAsDjUYsH6cp2QrTUU8MJdNCFNIqJAU1xpj8PthQLAYz/VorHTjXS1DAuCIirfRa4nB5PV7Q9bIDI5vfAH74lZWjmeuHgCzhmfCRFN4eTgecMHAPfWzEAhFeHxi8bhz8v3YXO1ARe8shnzRqcJAu1Tl0xAjlaBHK0CLy6agjs+5UpkKAp44qLxuH52Idra2uB2uyESiZCTw7k/5RIRbjqlCF/ubsLVM/Nj+hxpmsK80emYNzodNrcPColIEBr4UthddZ1wePwRCyDrgt0TzxzLuYdL0hIwf2yG0FVxXIYSDMO5wbVazvW99IopONBsFjq+8qhkYtwzfySeXFmKK6bn4omLJkAuoXHXrGQcbLGiTO/G6xtqsK3GiFq9A9mJctg8fjSZXNhcbcBpo9LgDzDCe58zjvsbT8hJxFOXTMAdn+7FO7/V4tqTC5CmlmF9hQ4ObwA5WoXQhfX8GSWgfE5sb3LA7A7A7Gaws9mJwx0urNxRhotmj4dIxF1vS1utuP0Tbs7WaHKGuah6guug54daLsa4bI2Q2fj9gVa8vakWL189Naa/2UDSU/4ST6pKhlq9A60mG9KTgVVVTnyysx3rynXYeN8ZkIppeP0MDgQX8rpfn08bmQaa4iIv2iwuZCUq4PIGhDLPe88ehV9K23G4xQq7x4+8ZAVev2Y6EuUiWMyd8Hq96OzsRHJyMp68aDzazC6sr9Djjk+5z14uofH3c0YDAJ7/pQLbao244JXNwvtfNDkbV8/Mx5rSDvxvfwsMdi8MIaWvFIDrp2gxQksLAdx3jsnAhobt2N9kxo+H2kFTwB9n5eNvZ4+OaOwCAGYn1y35nytLsbFSj/Ne/g15SUrsbuAc3G9cOw3pajnu+WI/qnV23BUUhQBubH3exEyMy0rEqIwEFKSoUNZmxa/lOqwt60Cr2YW/npqFGTlSIY5EJBLhmXNysPyAAnuarajSO2Gwe4S4iFASpDSunazFKflKPPubHkd0HjyxXof8ZAXqjNz178LJ2Wi3uLCrvhNvbW3FW1u5+SBFcYHtN84pRJvFjVqDAwebzPjhYCuqjV4s3WIMe69MjQxnjkxCRoIYKXIKIgSwqc6OjXU2tNn9+Lo0unsnP1GCOflKuHwMOux+tNv9MLsDcPlYeILz9wAD2D0B2IPNTGiKe93IFCkSpCKYXH6YXAEYnQF02P1w+xlUGVyoMkTP8VVLaRQnS2H1BNBu88PlZ2H3RipdiTIa6QliaOUiePwMHD4WTi8Dh4+B08fgKD0EIKYRrETiXK6gaDT1/hLudTE8J4zFixfjl19+weWXX46ZM2eeECU23XnooYfCnFFWqxV5eXnY/cg5Rw3FDA3CYxgGDBsuQvXmEulPPtItpxZjV30nPt/ZiA6bG4UpKhSlcl9pahk0cgnUcjFkEhFsbh8swfrcinYbdtWbsLPOBJ3Ngym5atxzWi5Gpyng9/sFN1e8AX8SiQRqtRpqtVoI25s9FXjE5ka7xS3sj1ouOepn8vcFo5GTpMDbm2px+fRc3H56SUQ3nySVFEkqqSB0/HFWPh78+hBWHWrDU6vKgp2mun6HaflavLRoKvJTlML7pKamIjk5GbnZVpw/3ojVpXpsaXLiglFqTMmShIWrKhQKKJVKSKVSiEQiLC0uhIcB1Eplj/klANdqGABml6Tg4W8P4Zu9LfjnylLh57eeVoz7F4wWBk/NnU74A2yYAHJycQpW3X0q/vrFfmyuNuDk4mT848LxGJulQSAQwNUeD8x2J9aUduCb/e040O7GljoLttTth0pKY97IFFwwOQf7m614f2u9kCu1Yk8z2q1uvHHtdCTIxGAYFl/tbcZzqyui2vBDmTsiFfeeM2rAJ8YdVjdueG8nytttuHByNl7pNrjh85eSldIIYSw1uHLNK/y3zs0XBvqf3DwLV7+9HVU6O27uoVSjq0Qu/OJfGRSmRoXY8S+YlI0crQK3fLQH5e02XPbuXswvUuL8UQlYHyzN48vjQrlwcnbMK4s8sbQYT4ihzn9iTqJQQkFRwB+mRl/JpigKV8zIw79Xl+PLXU0RAhOfETO1h799YmIiOjo64HA44Ha7sXB8Jl7VVWNaflLU1UiWZWGxWNDR0QGfzweKojC1OAPp6em4fk5hxPPPHpvOCUylHbj9dG4Fu9PhxV8+3weGBS6dloNFwYnCvWePxg8H2nCoxYIHgsG2t5xaLIiTZ4/LwL1nj8ILaypx6dQc/OeKyRDRFK6b5cdPVWXYUGlEZasJo7LjEyC6w7snTynhyjwAzgW0+kg7tlQZhkxgch0l5Ds5KAz5GSBAS4TWziWpKkFgqtHbYeVDvoPPl4hoYZDv8AagjWJc+TTo5rrqpHyMCukUND47EXMLVNjcwL3XvDHpvd4z1HIJPr91dsTjNChMzE3ExB5KcnQ6TtBRq9VQKuNz1lAUhW/vmANfgI0otY0Gf67sa+yMyFO6+7N92FjJue6evGg8rgqZhKvVauFc4h0XsSAW0fjslpPx/Y4KfHugDduaXOh0+qCSinDb6SW4+dQiKKVd14z5YzNQrbNjQ0WXwFQZdDCNCrq1xmUnotXixpFWq7CoNrMoWfjbPLBwDC56dQsA4ImLJqAwPxl6vR7zip040O7G6jI9zi+WoLCwMGpeSrzw5XEXTMqC0xvAoRYLfintGFqBKbg4kZcU6TbjxRanNwCH2wsH40djJyds/3ykHQvGZ8LrZ4QS8O4h3xq5RCjPMjo8yAoKUHzgdmqCTLgnds+tiUVgAoCFE7Kw7l4NXlpbif8daBUmhxdPyRaOC4AL6n7qkgn4eFsD7j1nFBaMz4Tdbhe6quXk5IQ5a/6+YDT+vmB0r+/dE90ziopSVchLVqDJ5ML2WqMQag9wZWo1egfENBXmFLzl1CKsLeuAVERjenE6bGYTzGYztFotAM6FxzvLu3PTKUW4ema+IMI7HA4kShjcelIyXtpqxAtBh26CTIxlN56EL3Y14YOt9Vi+oxGnjUrDnoZOdDp90ColOCnESXPuhExMztPiQJMZr/xahScvniCUx104OVu4RojFYpw9YwzmjndBJpNBLpfj4W8O4Ys9LfhsnwGTsxtQWFgImqbx0tout/B/fq4ARQF3zOtdZNpWy4k5s4qShXH2racV4/sDrVh5sBW3n14SU6ZOPFjdPvxypAMrD7Zib0MnHr9oPC6dxpXru32BrvtlDwITf/9u77QjoFXgx3Lu+W0WN3440IrLpuficKsFXj+DJKUExd0WFJNUUkzJ02JvoxkbKvS4emY+fjrcBpubE5PuPGME7jxjBFYd4nLM7jijRHjPtLQ0tLe3Q6/XQ6vVQiyi8eofp2HR29twuMWKkwqT8Nzlk1EUfM9zxmXi4W8PYXO1AcWpKjx58QThWJtdkoKHzhuD7bVG2N1+wO+Gy2JCskKEzES50ARILOau1c9cOhHXLduJkjQVHr1gXK/uMq2Say4zozAJdy7fh2qdHR1WD1RSEd694SRh4WzlXXPx7E/l+GR7A2YWJePqmfk4Z3xGxOJumjoNp41Kwz8uHAeXLwClVAyXywWDwSBEIiDgwNVjZbh6bBo8fgZ1nT7Um70wOgMwugIwu1kUpchx48xspKjlkEql+GL8aDy2shzf7G1BnZHLR336DxNwwaRssCyLHw624Zkfy9BmcWNMphrPXDpRuJ8WpyVwx8jJBXjovLFYvoOr5tDbPJiWr8Wf5hZh4fjMiHHn1WcATq8fPx5qx/6mTji9ATi9XERNSZoSF03Kwuh0pZBtzGdAsSwLluVMFk6vnxN1vH44PAGwLPe7KSQiYf4c+m+AAdqsHjRZvNDZffAGGHj9nNkkXS3F1FwNClMUENFdZXedLj+sLj9EIhoimoKYppGcIIVC0pWvHKoj8N97/Qzs3gAoAFIxZ1gRiyhIRKKonXGtVisSn+3xUBKIW2BauXIlfvzxR5xyyinxvrRfpKamQiQSCdZdno6ODmRmRtrhASAzM7PX5/P/dnR0ICsrK+w5U6ZM6XFfZDJZn1s8hg4URSIRBieCL/r7zixKDqsf74kEmVgYjJxcnIIb5hSCZbluQNEmpizLwuv1CicVf2KFwoeq8cFqNE1HFcvS1XIhDDkerp6ZH/NqF8ANQl7941TM2p6Mp1aWwRtgkJ0oxznjM7FgfCZmFSVHrX2laRparRZarRa5OdlYFAwMDP3dFAqFsEIU9p5x/D5yiQhLr5iMcVka/OvHMkjFNP592aSwQRuAHkvb0tQyfLx4Jpo7XchNUnTlgIlEUCqVUCqVuCE9FdedNhYH69rx2Y4GrKmywOQKYNURPVYd6VpFmJGvwXkTsvCfNdX4rcqAq97ehvsXjMGLaysF8SBBJsaUPC2m5msxJlMDo8ODBqMTdQYHfqvSY3O1AZurDThrTDoeOm9MRKkSwNm9DXYPMjTymMrxqjpsuOG9nYId/YcDrbhiem5YcDVvf+/eoQfoKiEAgGy1GH8+c3TIz2T4/s65aDA5MCYz+mCJ/+x1Ng/cvoAwoKwMruSP7tYud2p+Er778xzc+tEelLZZ8W2ZFd+VcZMvmgJO62HAGi+8yyLeFuPdoSgKF07OwmvrazCnJAXZvXTJumxaDp7/pQK7GzpRo7cL5XoMwwoujJ7ynqRSKTQaDaxWKwwGA245rRg2tw9XdBOq+G6Per1e6JIoFouRnp6OpKSeBZczx2bg0f8dwd7GTpgcXiSrpHgs2AGIL7XiP6c0tQx3nTkCz/xUDpcvgGSVFItPDRcY7z5rJK6ZlS90aQKA2eOLMCuvCTua7Hh9bSme/+PJwiCvL+wOlvLMCJlgzB3JCUybqw2466yRfd52X2FZtsvB1EOWn0IqgkxMweNnYfNRQlBvUZpKKBOr1tlhdoaHfANcJpjXz0QN+m6zuIRV/WtmRV7nbzwpA5sbuPJIvnvcQMI3ggAQtaFILIhFNGLN3B2bpYZURKPT6UOjySlk8RntHmwKBvR/fuvJEUKjTCaDWCyG3++Hw+GImjPSExQFjNayuG9uGpIyclBl8mF8MFuxO/NGp+HNjTXYWKlHgGHh8QcEoZ1vEz4+W4O1ZR040moV/t5zQtx9k3K1eGnRFPgCjDCBysjIwI1nafHa9nWo6/ShyeKDoqMDKpWqX9cyndUtdJI8d2IWLE4fXlhTic3Veri8gZhEv8GgKXh+9FQK2CUwcRMvnl/LdfAHGOhsbm5hUkQL4i4PTVNIVkmhs3lgtHuFMZ3Oxl07MzQ9j2H5EOpAICA0ouiJwlQVXrpqKu44YwRe+bUaNrcPTwadu6Fce3KBUGrOsiyam7mcpeTk5Li6FcYLRXEZOp9sb8TGSn2YwPRrsNnEjMKksGvRzKJkPHvpRGiVUmSmJcFmNnGdZn2+mLpwdeVnsTAYOEHm8mm5ONwpwtoyHWgKeOWPUzE2S4OrZ+bjg631WFvWAZ3VjV+CJW9njkkPm+hSFIUHF47B1e9sx/IdjbhyRh5+DY5vLpwcHoTO54ry3DZvBL7c04KdLS6Ut5pB042wiBLxS2kHKAr448x8fLqjEc+trgBNUcJCTDS213Ln0ckhLtEJOYlYMD4DPx/pwF2f7cUPd80NE6S7883eZuhsHlwzKz9CEAzF7vHj0e8OY9XBNnhDOrj9c2UpzhqbgUSFBLvrO+H1M8jUyFGSFn1BLTTwfn87hTZrl3D61qYaXDotRwj4nl6QFPVac8bodOxtNGN9uQ5Xz8zH58Euf1dOzxPmCtEWBJOTk4WGPiaTCampqVDJxFhx2xwcbrVgen5S2FwjP0WJjxfPRJXOjsIUVcRYWCKicerINBgMBrS3G4FUGbRaLbKzsyMWFMZmabDr/86K69o5JlOD7+88BU+tKsPehk48e9mksCYWcokIj180Ho9eMC6q+NAdiqKEY0GhUCAvLw8sy8LlcsFut8PpdAqZhiOLORFJIpFEnUPxLL1iMibnalHWZsU980cJ5cEUReGiydmYPzYdh1usmJqv7TEGIlklxZ1njsStp5XA7PIedd6plIpx+fTcQWvkEo08ADPjeH5Q/44bsRhQDkJPs7hHwDk5OXENWgYKqVSK6dOnY926dbjkkksAcA6gdevW4c4774z6mtmzZ2PdunW45557hMfWrFmD2bO51cuioiJkZmZi3bp1gqBktVqxY8cOLFmyZDB/nWMOiqJ6dD1QFNVnwW0ooSgK188uxOmj0mD3+DEuSxPXhbj7TXww9u/mU4txxph0KKUiYYAYz+uPlmFB0zSmlGRjSkk2Hne5sbm8BT8d7sCWBhvUUhrXTErEjBwFKMqN/5ybg0fXtuJwixXXv7cTADcp/Mv8kbhxTlGPolCTyYlXfq3C13tbsK5ch+21Rvz3qqmYHzIRXH24Hfd9dQA2tx8UBWSouU4TUhEtrH7LJSIUpShRHOx28eQPR2B1+1GcxpXhfbe/FY98dxi//PU0yCUi7G3sxAu/cCt010XJUEpNkAkrvH85NRsKWfhARyEV9SguAUCSUgKllJsAtJpdKE5LgM3tEwKjR2ZEXidzk5T44a652FChw9vrK7GjkXMvTS9IiloWFC9+v19ow6vt690mhDvmjYBEROPSqb3fVNM1cswblYZ15Tp8tqMRjwTbwtcZHbC4fJCJaYzN6vmzTE1NhdVqhdlsRnp6ulBaCHQ5lvR6vbCiTtO00L30aC6NHK0C47I0KG2zYn25DjIJjR8OtEJEU3jhyikRA+EbTynE8p2NaDA6cce8kqjXvZRugfEUReH2M0djx4d78HOlFYur6zFhdEmfJ8W7gyuyJ4WEpvP2/72N0Us9BhtvgBGyIHoqkfP7/dDIaOj9AZTpXPAzLBQSETI1ckFgquywCS2uwwQmmRidIaHHoXy2k+syNrMoOep5NSZTjeunaNHipHD2IAhMej0n6qjV6kG95vPIxCKMz9FgX6MZ+xrNgsC0oUIPlgXGZWmiutgoioJarUZnZyfsdntcYzWn04lAIACappGdqkVOWs/H7vSCJKhlYpgcXhxsNkMiosGyXCkgv3LP5zAdbDYLpdzdHQbdu48BQJJKhtNGpeHXch1+a3AgL1ES9+/SnZ+C5XFT87XI0SqQnShHjlaBFjNXmjQYx0wsNPXQQY5HHrynevwMjui6ogrMTh921XdCLOL+RpmJ0cNgUxNk0Nk8YQ7jjuDkOqOXCZVIJIJEIoHP54PH44lJLB+VoY5wEPeE0+mE3++HSCTqcYF4IDl9VLogMIXCC0xndQvbpygqzBmoUCjgcrlgNpuRlhZbkLXNZkN7e7twz0pJScGzl6XhudXlOHNMOs4IOpZHZ6oxvSAJexo6sWJPM34pDeYvjYv8XGaXpGDeaC5T6qYPdsHr57IRx/VybwU458Y5QQHo2zIb8rVSPPtbIwDg4snZePoPE5GpkWPpmko8+1M5KtptuHFOYUS0gT/AYGddpMAEAM9cOgn7GjehRu/Akz+U4tnLJkXdl/J2K+798gAA4N3fanHv2aOx6KS8CLHC5PDixvd3CvmTI9ITcMGkLKw82IZqnR1vbKjBg+eOEfKXThmR2uP9lr9fWzwB/FrPnXOXTs3BL6UdqOzgnJj8PXd6D7mPZ4xJx9I1ldhSbUBFO9fdl6aAy2f0PjaiaRrp6elobW2FXq9HUlISRCIRFFJR2P09FIqiwly6obAsC51OJ9yTUlJSkJmZ2ePv3pcxiFIqxr/+MLHX58QiLvUERVHCQndfX39DFLc6j1IqjslMAXBVQ30xNRCOTtxJoUuXLsUDDzyAhoaGoz95gLn33nvxzjvv4MMPP0RZWRmWLFkCh8MhdJW7/vrrw0LA//KXv2D16tVYunQpysvL8fjjj2P37t2CIEVRFO655x489dRT+P7773Ho0CFcf/31yM7OFkQswvFPQQqXYzNcyz1L0hLiFpf6gkIhx9lTS/DCdXOw6b55WHHLDCyYlCusHhdqKPz77HRkqbnB5oWTMvHr3+fh1tNKenUc5SUr8dzlk7H23tMxuzgFDm8At3y8G29trIEvwOCplaW4/ZM9sLn9ENEUWJZrbbyv0YwddVx55s46EzZV6vHhtgb84/sj+PuKA7C6/ZhekISvb5+Dp4IDpEaTE6/+Wg2L04e7lgfbtk7KwtUzI8sg5BIRHjkrB/fMTsFZ46K3Qu4NiqKEiQG/es+7l7IS5T22FxfRFM4am4Fl10/Fmxdm4+YZKXj20t5v5rFitVrBsizkcvmAZJdwmRKjhFLR3uBXpz/f1QSrmyuB4h1uk3ITe1xJAgClUimU8/ErvgA3mGpsbERzczM8Hg9omkZaWhpGjRqF9PT0mEuA5gfzNb7c3YRHg+HOf55XEjUbTCYW4cObZuK5yydFtBLujXljMjAiTQWXn8WbW1ux9mADqjpswmfRHavbh2/3NQslKzwtZhdazC6IaCps5bAgRYkcrQK+ABu1S+Zg4w6p7e+pRM7tdiMxmKe0N/i3L0rlrh8jgwJTRbsNgaBSFXqO8EKe0xPeSc4XYPD5Tm4yFC1sH+BKrq+ckIgnFxT0KH71lYFwL/UF/m+/r7GriQbvWDhrbM/7wQsxfDvuWOGblfDd43pDIqJx6ihOLNpQoRcCvkPDzMcHyzGqdHbYPX4kKiS9isyh8I6MzU1uYUIVbxl+KKuC5XF8y3uK6hIi15Z29Pi6gcIfYIRS81Cagw6m3OTo93d50FnlDbA4ouOEivSgo2xNaYcg3PXUyTBFCPrucj91BAN5o7l6Q4m1TK4vhLaK/z3ag88uSYFERKHB6ER9sGzX4fFjR9CNc8aY3s9r3iFrNBojcme743a70dDQgIaGBng8HohEIuTm5kKhUCA1QYbnLp+MhRPCxxt/DIpZb26sQZPJBZmYxmmjojua718wRsjrBMLL43rjtqAraX2dA9ubnNjWYANNAXeeyZXE3XXWSNwbDA//dl8LLn5tCy56dTO+3tMMJni9PtLKZQxp5OKIczlZJcVLV00BRXFjgJUHW6Pux5vBLFExTcFg9+Lhbw/h/Jd/w1d7mmEJZr61ml24/M2tONhsQZJSgi9vm401fz0N98wfhQcXciHb722pQ4vZ1ZW/NLLn7MOUoOjdaPZhSz03Rrv51GL8MeiGfWNjDfYEr7MzCiOFe4AT9dPUMji8ATzwNVc+f/qotJjG5klJSZBKpQgEAmhqaoLBYIDD4eDKxOKAZVlBqAK4+1Fv4hKBMJTEvQQ6Y8YMuN1uFBcXQ6lURthFTabBG/guWrQIer0ejz32GNrb2zFlyhSsXr1aCOlubGwMu1nNmTMHy5cvxyOPPIKHH34YI0eOxHfffYcJE7pWx++//344HA7ceuutMJvNmDt3LlavXj1gwZIEwrEIXwbKD6wCgQDMZjNkMhNeOV+ETlcA2Rop4LIgoOrdzspTlKrCR4tn4h/fH8HyHY145qdyvL+lXuhAccupRbhvwRiu24fZhXaLS+jkQoGC3eNDrcGBWr0DjUYnphcm4bELxgkTyscvGofbP9mLtzbVYGe9CS1mF/KTlXgmGPgcCsMw0Ov1mJEhAjIS+pzxkZukQEWHLURgCuaQ9LD6FIpSqUSeVorcRAlyE/vvXgK6yuMGwr0UL6ePSsPI9ARU6ez4YmcTbjmtWJgc95S/FEpaWhocDgc6OzuRnp4OkUiEtrY22Gw2UBSFtLQ0pKSkxHSsdeessRl4+ddqofvNuCwN7jyz5zKzwlRV1GD33qAoCotPLcZD3xzCqkobVlUeEX526shUXDOrAPPHpsMbYIS2zhaXDzOLkvHlbV2ZQHx53LgsTZhLiaIonDoyFZ/vasKWKoOwAn40fi3vQJPJhWtPLujXqqPbzw2GRTTVo1jo8XigkXE/47t1FgXLFvKTlZCKaKHUQSqmIZd0bYfvuNjdwbSurAM6mwcpKikWRukOB3S1b+/eYGEg4LOXNBrN7+Je4pman4T3t9QLJaa+AINNwYybM3uZEPNCrdfrhcfj6dFdzJcpuFyusADzWMuV5o1Kx4+H2rGhQodTAtxkeGRG13U0O1EOrVICc3DSOLs4Jebjb/7YDMjENBo7Pbj9hzZkJYgxKtuKkVlaFKQoUZCiQlaiHAabF82dTjR1OqFVSoXg61A6rG7sCnaHOm9i18T+7HEZ+GBrPdaVdyDAsP06N3qjw+rGNe/ugN3tx69/P11wTDIMK9w3enYwceeE3ulHg5n7HP969ig89M0h/FLaLpS5ZfcgMPFB38ZoDqZeSuQAbgxgt9sHRWDiXba/VzVEgkyMGQXJ2FZrxMZKPSRiGo99dxjeAIOCFGWPpVU8Wq0Wer0ePp8PRqMxwsXEsizsdrsgHPCkpKQI97LeOH9SFp4IOrIB4NSRaT2WmI3L1uAPU3LwzT4uYDrWjMZp+UmYWZiMnfUm/HszJ8qcUaSCyGFAIKCESCTC3WeNxNyRqfhkWwNWHmzDwWYL/rbiAH442IoXrpyCbbVcZtasHs7lOSWp+PO8EXh1fTUe+uYQJudqw1z0TSYnfgiKvStun439TWa8tLYK5e02/H3FAYhpCrNLUlCjs6PV4kZWovz/27vv6LjKc13gz57em6RRG3XJssEFF1wgYMAGGwhgMASIbwKEYwIxoabgnBACKYCTQzhwyOEkN5SQQAghEOBCDgQwphgCJqYYN8my1Xubpqn7/jHa2zOqI82o+vmt5bVsTdE38jejvd/9Fjxx9YqEwPWaeU4sL3HgnzWduONvn+Pzxljw/+RhpnIGg0EgENtvn/cHaRe6rDguzwK7UY1H362Rs7LUSgELhulTpFAIOG1OFp7ZVS9/Jl96YnJtOQRBQHZ2Nurq6uQhDNLXCwoKkvrMjUajqK+vly8E5OXljXnYBNFkGnOA6fLLL0dDQwN+/vOfIzs7e9Ijp9dff/2wJXHbt28f9LVLLrkEl1xyybDPJwgC7rrrLtx1113pWiLRrKNUKpGRkQGHwwGv14vW1lb4fD60tbWhs7MTWVlZcurvSNRKBX62YT7mOE2466Uv0NzbB7NWhV9csgjr58dODrLM2ljfjzFOnlt3fA7WzHXi9X2t+GdNJzRKBR766hJYBtT4+3w+NDQ0yAfONptt3CWeUqNvqVnr/v4G35U5ox84KxQKGAwGeL1eeDyelMtMg8EgfL7YOqzWyR8VrFAI2HxKKb737Kd45N0aXHly8dEG30n8XxqNRuh0OvT19aGjowNKpVK+YJHsQdhwFuRb4TTHykXUSgH3XbooqT5fY7VxiQv7mnqxu6YVHb4QegIi3IEI3j4Ym4CWbdEiEhUTMgr+WdOZMJJdStUf6krqyeWxAJNUFjCabl8Q1/7hYwTDUWzf34oHLl88Ys+LkfiDIzf4BvozmHSx26VpimX9gTqVUoHiTIOc5WfVqxOOH6Rg2sAeTH94P5a99JUTC4b9P5uoAFMoFJIP6Cczewk4+p75oqkXfaEIPq7tgjsQRoZRg0Uu27CPUyqVMBqNo36uNDU1DbogqFQqkw62r66MnWR/2tAjn2zGB9YFQcDxeRZ5BPdJ5clPVzTr1LhkmQt/eL8WDb0hNPSG8FGjH8DQWRGSJzevwEkDTjRf+awJohgb3BHfR255iQNmnQrtniB213WPOtlvPDo8AWz6vx+gqjW253fXdcvra3UHEIxEoVQIyB0mQCQFYHc3xS7CVDhNuOCEPPz4hT2o7/LLPQZzh+mPJ/UZ7PAe/bxp7ZV6ME1NBlMoFJJ76KWjeXuyVldmYeehDvz27UO4p7/Hnkoh4MY1FaOex0hlTg0NDWhvb4fD4ZCPdYLBIGpra+XXBMSCtE6nM+kL1Tq1EhctceGx9w4DGL2P3C1nzcGOg+2Yn2+R+x0m45pTS/HPw50IRUQoBeCri+zwer2oqalBbm4ujEYjlhTasaTQjn8/dx7+9GEdHnj9ILbvb8O5D7wtZ5wON6UTAG5cW4H3qtvxcW03tjz5MZ6+ZpXc4+w3Ow4hEhVxSkUmFhfasbjQjgsX5+P3O4/g5c+asK/ZLU8hLM0y4omrVyB/wN4WBAE/OGceNjz0rjxJc062aVBGXiQSQUtLC7q6uqCOJGYJS0M9cq16nL8oH89+HOsHNj/fOmIG7OlznXhmV+y+mSbNiJmkA1mtVqhUKni9XjmwHw6HUV9fj7KyshEvBEiZT16vF4IgwOVyTckxHtFYjDnA9N5772Hnzp1YtGjRRKyHiKYxQRBgMplgNBrlHgPBYFCekuFwOJCRkTFizwZBEHDlySWYk23GK583499OKZF7jKS6tjsvOB7vVXfAH4rgB+fMHTQRqru7W24uqlQqkZeXl1RJyHBcg0rkks9gAiCfCLa3t0On0w079S0ZUgaC0WhMqhHpRLhgcR62/e9+NPX04ZmP6rGvOXZynkwGk5SlJKWQSyUxOTk5KTeBVSgEbFicj9/sOITvnFU5Ym+tVGhUCtx5wXx4PB4cPnw49jVHHv76SSv+/FGdnD1Q6DDgprUVeOajeuw81IG/7W6QM6qkZsRD9WeQmiTva3aj1d03au+AFz5plEtz3tzfho3//R5+d8WJo/ZmA2Klek9+cAQXLnah3GmSJ8iNdAAen8EkVTSVxGUHVDjNCQGmeHKJXPBogKmm3Yt3qtrlRrTDmagAk3RyrdFoJj2r2WXXI9OkQbsniD2NvXij/2Tq9LnOIfvtxDOZTPB6vXC73cjIGHwyGA6H0dXVJd9XKqmVJromI9uik3ubSeWQ8ZkGQCwLTw4wDZNhMJyfXDAf164uw6FWNz7cW4OG3hB6Imo09oZQ2+GFNxiBVqWAy65HJCricIcPj7xTk/B9IlERv++fPvjlhYmZHmqlAqdXOvHCJ414dU9z2gNMPf4Qvva7f8rBJQD4rL5HXp90USLPphtyWiZw9L32r/4A0/ISBwwaFU6pyMI/9rbIDZeHC1BJvWfa3XEZTEk0+QYmLsAklcfp9fqUBiGM1eo5WbjnlX3y7+rlJQ78dMP8pH9X22w2tLe3IxAIoL29HdnZ2QiHwzh8+DCCwSAUCgXsdjsyMjLkJulj8dUVsWbfSoWAM0YJXLjsBuzcekZsZPgYnDHXiXKnCVWtHly8tACrFpTj8OHD6OvrQ01NDcxmM7Kzs6HT6ZBh0mLL6eU4Y64TW/74MQ61e+WSzIH9l+KplQr852WLcf5/vYNP63tw89O78etNS9DhDeLPH8UaY1932tEm4jaDBjesqcANaypQ0+7F3z9vRnOPHzesqRjU61ByQoEN5y7MlUtfv1Q+uC9Wc3Oz/BmXYz/6O0ivVuL8uKyva04tlQNMS0c5Tjm5PBNKhYBIVMTGpa4Ry/6HYjQa5WM8URRRU1MDn8+HI0eOoKysTA5aStlwwWAQoVBIPhZSKBQoLCyc1MAs0XiN+dN97ty58Pv9o9+RiGYtQRBgsVjkhrLSL8O2tja0t7fDbDbDZIqVng13sHVSeSZOGmas7Hi57Ab8/urlONLhw8YlgxvISrXrFosFeXl5KR/gShlM9f0nC1KAaeAEueHY7XZ0dnYiFAqhpqYGDocD2dnZYy4DkxphA1NTHifRqpS48qQi/PLVA7j75b2IirGTn+F6hAxksVig0Whiae2AfMCeDt85qxJfWVYw6CR4IphMJnkyntLfhe+vr8TNZ1bgjb2tiIrAWcdnQ61UIBIVsfNQB577VwO2nF6O3r6wnPmzbIgT3gyTVj6p31ndMWiq5EDPfBQ7cL7sxAK8sa8VB1o8uOChd/F/r1g2ZJNoySufNeH7z36K3r4wDrZ48JuvLzs6QU499EG1KIoJGUyS0syjP++yuJ/9wACTVBLiievB9FR/76XVc7JGDIpJASZpRPB4yiiHIp1cT8UQC0GI9eD6x95W/Ku2K64h8ehXzc1mM1paWuD1ehGNRgcFjbq6uiCKIvR6PYqKisYdYD99bha+aOqV/10xYDqoNBbbadaOWoY0kCAIcNkNcNkNqLTFPrs1Gg3KysqgUCjgDoRh1qogCAKq2zxY8x9v4fV9rTjc7pXLW1/d04xDbV5YdCpcMkQz3rPn5+CFTxrx2HuHsXGpa8Rggz8YwbMf1+Ps+TnDnvgCsbHpNe1e/Ptzn+GLpl5kmjQ4vTKW+fBZQ498P3mC3DDlccDRAFOnP/aekBrXnnVctjxVEcCwfWCk3jPt3vgeTFIvp+QymEKh0JB7aLykANNkDwuam2PGylIHjnT48J2zKnHRkvwx7XtBEOB0OlFXV4eOjg7YbDbU1dUhGAxCrVajpKRkXIElyZxsM369aQm0KoVc2jiSsQY3gNiFlvu+sgh/2VWPm9bOgcGgQUVFBVpbW9HV1QW32w232w2r1Qqn0wmtVot5uRa88O0vYetfP8OLnzQiy6zF3FEytAscBvzm68uw6bcf4O97mnHv/+6DUhAQCEdxQoFt2AyokkxjQvBpJN9bV4lX9zQjFBFx8oDsSGniLAC4XC7kqvQAYlNGz1mQm5DFW5ljxrkLcvH/PmtKmDA4FKtejQsW5eHN/a34PyuG7geYLEEQUFhYiKqqKgSDQdTX1yMvLw/Nzc3ysVw8tVqNgoKCcTfGJppsYz67uueee3DrrbfiZz/7GRYsWDDoSvlEjhsloulFEAQ4HA7Y7faEEfK9vb3yL3itVguTyQSz2QyDwTDhTT1PLHYMmf0RCATkE8b8/Py0nITGZzC1ewJo9wQhCIOv5A9HrVajoqJCvtrW2dkJt9s95oNVqY+KFPibSptWFOG/3qyCu7/UaaRAxkDSQXx9fT1MJhPy8pJrYJoMjUoxKcElSW5uLjweD/x+P1pbW+F0OnH2gsTmruvn5+CHz3+O6jYv9jT2os0TgCjGGnoP14T3lIpMfNHUi7cPto8YYNrb1IvPGnqgVgr43vq5uGntHPzb7z/E5w29uPaJXXjt5tWwGhJ/f/eFIrjrpS/w5Ae18tc+OhILRkgZTMOVyEknolIGkyS+l1XFCAEmU39z8PgMph39U5++smxwk/54SqUSCoUC0WgUwWAwbb2SpjLABMQy//6xtxXP727AoXYv1EoBX6oYPSiv1WrlKWBerzfhZF4URbk0zuFwpPT+Oq3SiYfejDXttRnUckmWZN3xObhocT7OmOdM6ftkZmaiq6sLwWAQR44cQXFxcULpc1mWCadXZuHN/W147L3D+PH5x0MURTy0vQoAcOVJxUOWhq47PgenzsnCjgNtuOGpf+H5LScPm6H34BsH8evt1Xjmozr85bqTEk7w3X0h/PiFL/BBTQcauv1y9p5Vr8YTV69AuycwRIBp5P5LwOBgrhRgiv08j2YJDpfBlGlO7MEUjkTliXKjBf1VKhWUSiUikQgCgUBa3lPRaFTuUTTZASZBEPDU5pUp7UOpD5vf70d1dTWi0SiUSiWKi4tTCi5Jzhnw+2EiLHTZsDCuxFatViM/Px+ZmZlobW1FT0+P/MfhcCArKwsmrRoPXHYCLlqcP+zEwoFOLHZg28ULcdPTu/E/bx2Cpv/9ct1p45+uGq8ow4hfXrIIXzT24rQB/QilBtpKpRJWqxVREXL/v8uGGPhy36WLcOtZc1CaRLnhfZeegGhUTOpnMBqVSoWioiIcOnQIbrcbBw4ckLOVHA6HXFanVqsnpRk+UTqNeceuX78eO3fuxJo1a+B0OmG322G322Gz2eSGwER0bBEEAVarFWVlZSgtLYXT6ZQPSAOBADo6OnD48GHs27cPTU1No05imQhSwMtoNKYtw0HKYGpzB/BZ/zjdIodB7jmQDKVSifz8fBQXF8snhR0dHWNah5SZZbVa0/baxstu1CQEBBYX2sb0eJvNhoqKipQyK6YDtVotj+Bua2tDU1PToElYZp0aa/uvmj7/rwa5wfeyYUYlA0dHvb9b1T7iZC0pe2ntvGw4jBrkWHX48zdXoSzLiFZ3AHe99EXC/bu8QVz46/fk4NI3Ty2FRqVApzeIQ+1eBEKx9+xwe1sexx139T3TpEkIJMUH+GwDM5i0UgZTLMAUiYo41D/xaX7e6P0mpCu7UgPVdJAy6dJx8jge0iS5zxtin10rSjKS6qEllTIDg6fJud1uhEIh+eQrFYsLbLDoYv9vFU7ToPerTq3EfZeeMKg8baykk3iFQgGfz4e6urpBe/8bX4pNfHzmo9gUyx0H2/F5Qy/0aiWuHGYapEIh4JeXLESGUYN9zW7c+/d9Q94vEhXx149jTZU/qe/B/f84kHDbDU/9C89+XI/6rlhwyaJTYUWJA09cvRzzci1y4+AjHT55UpZUIlcwzAQ54GiTbwBw2XRyplKmSZuQ4ThsgMnYXyLXH1SSAtgqhQCHYfQ9ne4yOZ/Ph2g0CpVKNSWDdFL9fSI1awYgZ3UVFxdPWQA6nbRaLQoKClBWViZ/dnR2duLgwYPweDwQBAGnz3UmPQkSADYszsdNa2Ol38FIFOVOE84cJUtoLC44IR9bz5k3qOG4lAEktT9QKgTcs3EBfnjuvCEzg7UqZVLBJUk6gksSvV6PvLzY56OUVVpWVoa8vDwYjUZotVoGl2hGGnMG05tvvjnsbZ999llKiyGimU0QBBgMBhgMBjidToTDYbkXiMfjQTgcRkdHBzwejzy6d7LEj+FOF5tBDaNGCW8wIjdcTbanw0BSxs6RI0fQ1dWF7OzspA4s+vr65JPIgdNtpsrVXyrBE+8fgSiOPcAETF3GSLo5HA6Ioig3VA6Hw3C5XAn/rxeckIf/91kTXvikEYX9ZWAnDjMqOXabAxqlAk09fdjT2CuXIcULhqN4fnfshDg+2GfQqLDt4kW4+OH38OzH9fjyolycXumENxDGVY99iL39JT2/uvQEnFKRhY9ru/Dh4S7sOtwFU38gIf6kN57U5NZpOZqREV8eB8RKIBQCEBUByzA9mKQm343dfgTDUWiUCuTbR/+csFgs8Hg86OnpSdv7YKozmBa6rAmZKiNNjxtIKl8eGHCTgtd2uz3lExeVUoFT52ThpU+bUDHOz71k6XQ6FBUV4fDhw3C73WhoaEB+/tEypy+VZ8pTLP/8YR1e/SJWQnb58kI4jMMHU5xmHX55ySJc9diHePTdwzi1ImvQ2Pr3D3WgubdPzoL49fZqnFKRhZWlGfj5y3vx5v426NQKPHDZYiwpsiPDqEkIZtgMGhQ49Kjr9OPzxh6cXJ55tERuhNJPbVwG08DPhDOPy8aHh7ugUSmGfX0ZUpNvTxCiKMaVx2mTOknWarXw+XxpCzBJv6dMpsHByJnCaDTCbDbD6/WiqKhoUo9hJoNer0dxcTG8Xi+am5vh9/tRW1uL4uLicZVn3bimAvVdfvxlVz1uOXNOWoMzQ4kvj4s/1rtoyeAS2enCbrfL7wer1Tpj3xtE8cZ8dLF69eqEP0uWLMH+/fvx3e9+FzfeeONErJGIZiiVSgWr1QqXy4XKykoUFhZCpVIhEAjg0KFDaG1tRV9f34RnNIVCIbl3XDoDTIIgyCcIr/c34U1mgtxwTCYT1Go1otHokHX4Q4nvKzVdAjNFGUbcdf7x+LcvlWBxwbGd2ZqRkYGCggIIgoDe3l65qarktEonrHo1Wt0BfHREmiA3fAaTXqOUp9fc8cIeRKODs5je2NeCTm8QTrMWpwwoqVpaZMc3+jM6tj77Gdo9AXzziV3YXdcNm0GNpzavxCkVWf33ja1j15EueYqcbpQMpqy4fjAlmYl9d3RqpRxEG1giZ+x/Xm//95Gyl4oyDEmNkJfe1319fWk5IY5Go3LT8Kl6X5l16oSywrFMLTIajRAEAcFgEA0NDYhGo+jr65NLlNI14vqmtRU487hsfOPk4rQ830iMRiMKCmIB0+7ubrS0HO1DJAgCrurf1w+8fhD/rOmEWilg86lDZy/FO32uE1eeVAwA+O5fPkGbO3H/SE2AL17mwleWuSCKwM1P78ZvdlTjd+/UAAD+45ITcNbxOcg0aYc8QVyYbwMAfNqf6SoFmFwjlMhp46YmrhzQJP3chXkw92dKDXdCKgWYwlERvf4wWnqlIHBy2UPpzmCSgp2TXR6XTlLvnLlz56Y0lGO6MxqNKCkpgdFoRDQaxeHDh8fVf1cQBPzi4oX4+PYzJ6UE0OfzyeVxM6kZts1mg81mY3CJZo1xX77asWMHrrjiCuTm5uKXv/wlzjjjDLz//vvpXBsRzSJSf6Dy8nKYzWaIoojW1lZUVVXhiy++wIEDB1BbW4uurq60T4OKn1yT7glrUplcQ3fs4Gu8GUzA0Z5WAAaNEB9KIBCQA1HTJXtJ8rVVxfjhl4+b8CuWM4HVakVRUZFc4lNVVSU3iNWoFAkH3naDetSGyD/88nEwapTYdaQLT/RPyYr35/7yuIuWuIacUPWdsypRnGFAc28fzvrVDrxT1Q6DRonHrlqekIkilRN8dKQTfWGpB9PQhw1S0CzHdvSgvnSI1yG9P6T+MBLDgAymQ22eYZ9jKCqVSj6hSDY4OxKpPE6pVE5p2akUoC3LMo5p2qZSqZRLNLu6ulBTUyMHo81mc9rK/sqdZvz268tQ7pycoIHFYkF+fqz3WHt7uzwpCgAuXJwPm0GN3r7YHtq4xDVsA+yBbjt7LubmmNHuCSaUyvmCYfz98+b+58vHHecdj5JMI5p6+vDzl2P3u+XMOTh34cgnz9JE088auhEMR9HUH+wZqUROE/dWW1GS2Mg436bHju+ejt9+fdmwj9eqlDD3Zx62eQJo7U1ugpz8+DQGmILBoPw8M+nEfyiCIBwTZUsKhQJFRUUwGAxykCn+4kiyBEEYMYswnaTPfrPZzGAN0RQa0ydkc3Mz7rnnHlRUVOCSSy6BxWJBIBDA888/j3vuuQcnnnjiRK2TiGYJlUqFwsJC5OfnQ6/XywdqwWAQvb29aGhowP79+1FdXY329nZEIpFRnnF0E1EeJxl4BTqVDCbgaLq03+8f9Yphe3s7gNgB+2xL1Z9tTCYTysrK5D3Y09ODAwcOoLOzExcuPtqse1nx6I2X82163Hb2XADAtr/vk6cYAkBLbx+295drDjU5C4hlQd27cSEAoNMbhFop4DdfWyb3/JFIo9ur27xo6o6dWAzVBFkUxbgMplgZHDA4gwkAbj2rEt9cXYrzB/TlGVgid6gtlmkzlt4Y0s9Wer+nQno9Go1mSk9Uzj8h9nP6PyvHPrUoIyMDRUVFUCqV8Pv98slXuiYzThW73S4H1BsbG+WsLL1Gia8uLwQAKATgm6uTm0gFxPb1zy9aAAD4y656/Ks2Frh6dU8LfMEIijIMWFJoh1Grwv2XniCPiD9/UR6+fUb5qM+/MF8KMPWgsb8RuE6tQNYIE8OkWG6GQYWijMGZTnajZtim5BJpIlmHJyCXyGWPMYMpGAyO2O8tGdJFnnT2QKSJJwWZdDodIpEIqqur0dTUlPaLgOkwXHkcEU2+pANM5513HiorK/Hpp5/i/vvvR2NjIx588MGJXBsRzVKCIMBut6OsrAzz5s1DZWUliouLkZWVJTf/9Pv9aG5uxr59+9DQ0DCu9GwAiEQi8gnIxASYjgZ21EoBxWPIMhiKSqWS1zlSFlMwGER3dzcAwOlMvnSGpo5Wq0VhYWFCI9WmpiYszDUi3xbbRyP1X4q3aUURTiy2wxuM4N+f+xyiKGLHgTZ8+8l/ISrGgkNlIwRnVpRm4Ka1FXAYNXjgssVDTiezGzVyNtW71bFg5lBT5KQTUEEQoNNpUeAwQBAwZEPYyhwztp49b9AEO6McYJJK5PozmIYIUg0nnWVyU91/SXJyeSYO/uxsuYRrrMxmM8rKyuTPVa1WOytKe5xOJywWC0RRRG1trZxxdtXJJVjksuJbp5UPGeAcyZJCOzb292r5cX/5qVQed+Hio/2eFhXY8NuvL8ONayqw7eKFSQUgj+8PMNV1+uVpci67YcTHGpSx0vHF+cZxBzmlyX4d3qBcIpdsgEmtVkMQBIiiKP98xyu+/xLNLFKTfYPBAFEU0dHRgQMHDky7QJPf70c4HIZCoeA+I5piSTf5fuWVV3DDDTfguuuuQ0VFxUSuiYiOIYIgQK1WQ61Ww2QyITs7G6FQCL29vejs7EQgEEBXVxe6urpgNBqRmZk5piahbrcboihCq9VOyMlifICpNNMEjSr11HmHwyGPCs7JyYFSqYQoivKJczAYlF+X0WgcV/NNmjp6vV5uWOz1etHa2oIfn388/vxRHS5eOniM8lAUCgH3bFyIs//zbbx1oA1fuvdNuUxTIQBbTh89e+OmtXNwwxkVI5YxLi2yo7rNi0/qugEMncEUH4wRBAG//foytPYGRmxgPNDRHkzjz2CSyuSkZt+pBF6nS4AJANRDlDmOhUajQWlpKXp6euTeTDOdIAhwuVw4dOgQ+vr6cOTIEZSWliLLrMXfrv/SuJ/3+2dX4n/3NOOT+h78ensV3q2KBVbjswyBWN+mgc3AR2LVq1GcYcDhDh9e+bwJAFAwQvN6t9uNE53AN0+0Y+OK0TOkhpMRN0muxX20yXcyYgFjHfx+P+rq6lBQUDCu90M0GpUv8szk/kvHMpVKhZKSEng8HrS2tsLv96OjowOdnZ1wOBzIzMxMe/uBsZKyl8xm8zFRwkg0nSX9DnznnXfgdruxdOlSrFixAv/1X/8ll2cQEaWTWq1GRkYGysvLUVJSIo/T9nq9OHLkiJym3djYiPr6etTX16Onp2fIZuETnTIdXyI3J8XyOInBYIBWq0U0GkVbWxuam5tx4MABVFdXo76+Xj7AA6Zf7yVKjiAIco+c3t5enFRkwm+/vmxMvSrKsky4cU3sgk9Dtx9GjRJXnVyM7d85HWfMTW4c9Gg9spb1N/qWeokPFWCS+nJIWTJzss1DZkSNxBhXIucLhtHUE3vO0fpRDSR9VqRaJidlbKSrV9FUUygUsNvts+b1AEfLd6TBEXV1dSmXcjnNOvk99ctXD8jZgGPpfzWcBS4bAODNfbFeWMMFYEOhEOrr62FQK3DFqmLMcY0/UCo1+m73BOUeTDnW5DKYACA3NxdKpRJ9fX2orq6WM5HGwufzQRRFqFSqaRGwpfERBAFmsxmlpaXyBL34jKa6ujo0NjaiqakJLS0t6O7unrQMJ1EU5RJglscRTb2kM5hWrlyJlStX4v7778fTTz+NRx55BLfccgui0Shee+01FBQU8MoEEaWVIAgwGo0wGo3Izs5GR0cHurq60NfXN6jZZHd3N5RKJex2O0wmE6LRKCKRyIRPronPYKrMTk9attTsu6mpKSGQr1AooNPpoNFooNVqodfrmQo+g+n1ejgcDnR2dqKpqQllZWVjzi655tRSRKIiTFoVLl7mgkWX3qvISweU7A1VIie9x1LpA2bUHC2Rk7KXHEYNbIaxBUSk97mU7TeeE9r4nlI8IZ7e1Go1ioqKcOjQIXg8HjQ3NyM3N7VpVVecVIynPqyV9+FFS/JHeURyFuRb8OInjfCHYmWgBUNMkBNFEXV1dYhEItDpdHIQerwSezCNrUQOiF3sKC8vR21tLfx+P44cOYKcnBxkZiYfQJY+H8aSeUzTlxRoMplM/Rm4rfD5fMMOV9BqtTCZTLDb7fJFCInUe1PKYB9Pfy5pYEwoFJLXRkRTK+kAk8RoNOIb3/gGvvGNb2D//v343e9+h3vuuQe33XYbzjzzTLzwwgsTsU4iOsZpNBrk5uYiKysL3d3dcq29IAiIRCLy19rb2wdlV6pUqglrgm3Vq2HSquAJhFOaIDeQzWZDW1sbIpEIzGYzbDYbTCYTU79nGafTie7ubvT19aGrq2vM4+PVSgVuWDNxZeulmUY4jBp0emMZPboBU+QikQh8vliT8VQO7I3a2ImFPxRBddvY+y9J0lEmFw6H5WzI2ZTxM1vp9Xq4XC7U1dWho6MDWq12zO+jeBqVAj8+73h8/ZF/QqNS4MsL8kZ/UBIW5NsS/j3UBLmWlhb4fD4oFAoUFham/Hkv9WBq7PajyxfLJsk2Jx9gAmJBvJKSEjQ3N6OzsxMtLS1wOBxJr439l2YnQRBgMplgNBrh9Xrh9/sRjUYhiiKi0Sh8Pp8c6A8EAujo6IDJZEJmZiYEQUBHR8egTFODwQCTyQSDwQC9Xj8o4CT1+pNI2X5SCWZWVhaPkYimgTEHmOJVVlZi27ZtuPvuu/Hiiy/ikUceSde6iIiGpFKphrx6mp2dDbfbja6uLgQCAXm8uJTVNFFXTgVBwDkLcvDOwXYsLxn/Sc1ASqUSc+bMAQAeMM1iKpUKTqcTzc3NaGlpgdVqnVZTlgRBwJJCO/6xtwVAbFJXPCk7QaPRpBSMkUrkAOCz+tiV8NIxlsdJrFYrPB4Pent7xxVgkrKX1Go133szhNVqRSAQQGtrKxobG6HRaFIKaJw6Jwu/3rQENr16UEP68Zqfn1i6M3ACqXSBBABcLldagpsZ/RlMXzTFTuS1KgUs+rEf+isUCuTm5qKnpweRSAR+vz+pZvGhUEh+PzHANDtJgaah/n/D4TC8Xi96enrQ29sLj8cj/86QGAwGhMNhBINB+Hw++YIFECu7VigUCIfDcuBfpVLJv2/cbjcikQgUCgXy8vJgs9km+uUSURJSCjBJlEolNmzYgA0bNqTj6YiIxkwQBFgslimpv9928aJBV9bSgSe3x4aMjAw5MNrU1ASXyzXVS0qwrPhogGlgD6Z0laBqVQooFQIiUVGeslWSOb4T0vgyuXA4DJVqbIc6Uv8llsfNLFlZWQgEAujp6UF9fT3mzJmT0mfoOQtSK7UbyKxTozTTiEPtsWyLgSVy0r6LnySaKqlErqU3FuTJtujG/XtKKlnv7e2Fz+dLKsAUXz471vchzXwqlQpWq1UOAHd0dKC7uxuiKMJmsyEjI0Mum5OGl0jZUKFQaFArBABysEkKROl0unE3oCeiicFPeyKiNGBvCRovQRCQl5eHmpoadHd3w2w2y82qp4NlRUf7MMUHmERRTFv5iyAIMGqU6O0LY09jLNtivBlMUjPhQCAAr9c75p8l+y/NTIIgID8/Hz6fD6FQCB0dHdNuCMIClxWH2r0w61SDMqMmorG81ORbkm1JbU9LASav15vUzza+/xId27RaLfLy8uS+YgODvxqNBhkZGcjIyAAQy37z+/1yg3iVSgWlUilnxUmZ6mMp1ySiycF3JBER0RQzGo1y6WdjY+OkTd9Jxvx8KzTK2OFCfJNvKUNIymxIlam/TM4TCAMY+wS5hOfqP6GVenOMBQNMM5dCoUB2dmyCYltbG8Lh8BSvKNGC/Fiwc6gG3xMRYMo0Ju5h5xgafA/FYIitW5oMNxJRFBlgokEUCkVSASG1Wg2LxQKr1Qqj0QitViv307TZbMjOzkZmZiaDS0TTEN+VRERE04DT6YROp0MkEkFDQ0PKI9fTRadW4uTy2FXlooyjJ8bxJ4/pOMg3xPVhUioEFDrGH2CSAl4D+30kYyJO9GnyWK1W6HQ6RKNRtLW1TfVyEqyfn4OyLCO+smxwGexE7DuLXgW18mh27VgbfA8k9cSJRqNDli/F8/v9cn8cKTBFRESzHwNMRERE04BCoYDL5YIgCPB4POjq6prqJcke/OoSvHHr6oRJiemeDhXf6LvArodGNf5DFCnAFAwGx5QNFo1G2YNphhMEQc5i6uzslP8/pwOX3YDXbz0NV55cMui2iQgwCYKAjLgsphxrans6PltxtOzA+AA0S8iJiI4dDDARERFNEzqdTj45bmxsRFtb27TIZDJpVSjNOhpIikQicpPVVBt8S4xxE+riv9d4KJVK6PWxMfBjyWKSTvIVCgWbEs9gZrMZRqMRoiiipaVlqpeTlInKnIvvw5SdYokccLRMbiwBJiIiOnYwwERERDSNZGRkyOOWW1paUFtbO+16yUgnj9K46HSIz2AqzUy9p1OymRbx4rOXmHUxs0nNhHt6ehJGn09H0WhUfo+nO8AkTZIDAGeKJXLA0ffVcH2YIpGIPGkOYICJiOhYwwATERHRNCJNw8rLy4MgCHC73aiurp5WJ8lSeVy6speAo02+gdQzmIDERt/JZoFJDb7Zf2nm0+v18gTB+vp6RCKRKV7R8OIz55RK5Sj3HpvEDKbUyz71ej0EQUAkEpHfLwDQ1dWFqqoq7N27F7W1tQBigVq+l4iIji0MMBEREU0zgiDA4XCgtLQUGo0GoVAIhw4dQnt7+5SXzMVPh0pngMkQVyJXkoYMJoPBAEEQEAqFku7DIzUuZv+l2SE3NxdqtRrBYBD19fVT/t4ZTnx5XLoz5xIymNJQIicIwqAyObfbjYaGBvn9o9FoYLPZ4HINbmZORESzGwNMRERE05Rer0dZWRksFgsAoLm5GXV1dVOajREIBBAOhxNONNMhPoOpLCv1AJNCoZD7MI1WJheJRFBfX4+enh4AkB9HM5tKpUJhYaGcCTjdpspJpEb0E5Htk9mfwWTSqhLeY6mIL5MLhUKor68HANhsNlRWVmLOnDlwuVx8HxERHYMYYCIiIprGlEolCgoKkJubC0EQ0Nvbi6qqKvj9/ilZj1QWI40sTxepB5NJq0KWOT0ZRFKZ3EiNvj0eDw4ePIju7m4AQFZWFvvGzCJ6vR65ubkAgNbW1jE1fZ8sE9XgG4A8Rc6ZhvI4SXx/s4aGBkQiEWi1WuTl5UGtVqft+xAR0czDABMREdE0JwgCMjIyUFpaCrVaLZfMSRk3k0kKMKW7jEwqkSvNMqatTCj+RHio8ii3243Dhw8jHA5Do9GgtLQU2dnZbPA9yzgcDtjtdgBAXV1dQu+g6WAiA0wLXVYoFQKWFtrT9pxSH6ZwOAyPxwNBEFBQUJDWgDMREc1M/E1AREQ0Q0glc9II9rq6OrS0tExqb5mJCjDNy42VAa4szUjbcw7XkBiI9ZJqbm4GAFitVpSXl6e15I+ml9zcXOj1ekQiETmoOF1MZICpItuMXT9ci3s3Lkzbc8aXnwKxn61Ol3p/JyIimvkYYCIiIppBVCoViouLkZERC8S0tbWhoaFh0r7/RAWYTi7PxAc/WIPb1s9N23MqFAo5i6mrqyvhtp6eHgQCASgUCuTm5jL7YpZTKBQoKiqSMwCPHDkyLSbLiaIoB5gmqrzMZtBAoUhvVp5URmq1WuXsMCIiIh5NERERzTCCICA3Nxf5+fkAgO7u7lEbWaeDKIoTFmACgGyLLu0nwlIgrqOjQ+6/I4oiWltbAQCZmZlQqdLT/JimNyk4q1Qq4ff7UVdXh3A4jGAwiL6+vqSnDaZTOByWMxAnIoNpomRmZqK4uBgul4slpUREJGOAiYiIaIay2+1wOBwAYhPmJrpULhQKzbiTYbPZLGdY1NfXIxKJoKurC8FgEEqlUg5A0bFBq9WiqKgIgiDA4/Fg3759OHDgAKqqqnDgwAG43e5JXU989tJMCtQoFAqYTKYZtWYiIpp4DDARERHNYFlZWVAoFPD7/ejt7Z3Q7yWdDGu12hl1YpmbmwuNRoNwOIyGhgZ5XH1WVhaUSuUUr44mm8FgQEFBgfx/LwiCXCLZ1NSEaDQ6aWuZyP5LREREk4054URERDOYWq1GZmYmWltb0dLSArPZPGH9hCayPG4iKRQKuFwuHDp0SA7CqVQqOfuLjj0WiwVmsxkA5EbwBw4cQDAYRGdnJzIzMydlHQwwERHRbMIMJiIiohkuIyMDKpUKwWAQXV1dEEURPp8PDQ0NqK+vRygUSsv3kQJMM/Fk2GAwwOl0yv92Op1s7H2MEwRBzsRTKpXIzs4GALS2tk7alDkGmIiIaDZhBhMREdEMp1Qq4XQ60djYiNbWVnR1daGvr0++3e12Izc3F1arNaXStpmawSTJyspCIBBANBqFzWab6uXQNGO329HZ2Ym+vj60trYiLy9vwr8nA0xERDSb8NIdERHRLGC326HVahGJRNDX1wdBEGC1WqHT6RCJRFBfX4/a2tqUMjNmeoBJEAQUFBSgqKiI2Us0iCAIyMnJAQB0dnbK+30iMcBERESzCTOYiIiIZgFBEJCfn4+2tjYYjUbYbDaoVCqIooi2tja0tbXB7XajpqYGZWVlYw6wRCIROTg1UwNMRKMxmUwwm81wu904cuQI9Ho9VCoVVCoVNBoNNBoNtFptWgKUkUgEkUgEAANMREQ0O8yYy3ednZ3YtGkTLBYLbDYbrr76ang8nhEf09fXhy1btiAjIwMmkwkbN25ES0tLwn2k+vv4P3/6058m8qUQERFNCIPBgKKiImRmZkKlil1DEgQBTqcTZWVlUKlUCAQCaGxsHPNzS9kcKpWKk9doVsvJyYEgCAgGg+jp6UFHRwdaWlpQV1eH6upqfPHFFzh48CB8Pl9K30fqjaZUKvmeIiKiWWHGBJg2bdqEPXv24LXXXsNLL72EHTt24JprrhnxMTfffDNefPFFPPPMM3jrrbfQ2NiIiy66aND9Hn30UTQ1Ncl/NmzYMEGvgoiIaGrodDq4XC4AQHd3N7q6usb0+JleHkeULK1Wi/LycrhcLuTk5CAzMxNWqxV6vV4OBAUCARw6dAhtbW0QRXFc34flcURENNvMiBK5vXv34u9//zs+/PBDLFu2DADw4IMP4pxzzsEvf/nLIZsw9vT04He/+x2efPJJnHHGGQBigaR58+bh/fffx8qVK+X72mw2ueaeiIhotjKZTHA6nWhtbUVjYyP0ej10Ol1Sj2WAiY4lWq122L0eDofR1NSEnp4etLS0wOPxwOVyQa1Wj+l7MMBERESzzYzIYNq5cydsNpscXAKAtWvXQqFQ4IMPPhjyMbt27UIoFMLatWvlr82dOxeFhYXYuXNnwn23bNmCzMxMLF++HI888sioV6ICgQB6e3sT/hAREc0EWVlZMJlMEEURtbW18knuaBhgIopRqVRwuVzIz8+HIAjwer04ePAgWlpa5J5KyWCAiYiIZpsZkcHU3NwMp9OZ8DWVSgWHw4Hm5uZhH6PRaAaNIc7Ozk54zF133YUzzjgDBoMBr776Kr71rW/B4/HghhtuGHY9d999N+68887xvyAiIqIpIggCXC4XqqqqEAwGceDAAVitVmRmZkKr1cLn88Hn86Gvrw8ZGRkwGo0AGGAiiicIAux2OwwGA+rr6+H3+9HW1obOzk5kZmZCrVbLFyy1Wi0MBkPC46X+TgADTERENHtMaYDptttuw7333jviffbu3Tuha7j99tvlvy9evBherxe/+MUvRgwwbd26Fbfccov8797eXhQUFEzoOomIiNJFpVKhuLgYTU1N8Hq96OnpkU9243m9XlRUVECpVMrZFgwwER2l1WpRWlqK3t5etLS0IBgMDhooAwBOpxNZWVkQBAGRSARHjhxBJBKBTqeD1WqdgpUTERGl35QGmG699VZceeWVI96ntLQUOTk5aG1tTfh6OBxGZ2fnsL2TcnJyEAwG0d3dnZDF1NLSMmK/pRUrVuAnP/kJAoHAsAfRI9XlExERzQQ6nQ4lJSXw+/1ob2+XA0wqlQpGoxF+vx/BYBBNTU1yFrFCoZCn0xFRjCAIsFqtsFgs6Orqkt9LgiAgGo3C5/OhtbUVwWAQeXl5qKurQyAQgEqlQlFRERSKGdGxgoiIaFRTepSYlZWFrKysUe+3atUqdHd3Y9euXVi6dCkA4I033kA0GsWKFSuGfMzSpUuhVqvx+uuvY+PGjQCA/fv3o7a2FqtWrRr2e+3evRt2u50BJCIiOibo9XoUFBQgNzcX0WhULtfx+/2orq5OyGzSaDQQBGGqlko0rQmCAIfDAYfDkfD1zs5ONDY2oru7Gx6PB+FwGIIgoKioaMyNwYmIiKazGXEZct68eVi/fj02b96Mhx9+GKFQCNdffz0uu+wyeYJcQ0MD1qxZg9///vdYvnw5rFYrrr76atxyyy1wOBywWCz49re/jVWrVskT5F588UW0tLRg5cqV0Ol0eO211/Dzn/8c3/nOd6by5RIREU26gZlJer0eWVlZaGtrk4NMvPhCNHYOhwNqtRp1dXUIh8MAAJfLBb1eP8UrIyIiSq8ZEWACgD/+8Y+4/vrrsWbNGigUCmzcuBEPPPCAfHsoFML+/fvh8/nkr/3qV7+S7xsIBLBu3Tr8+te/lm9Xq9V46KGHcPPNN0MURZSXl+O+++7D5s2bJ/W1ERERTUdZWVlwu93o6+sDwAAT0XiZzWaUlJSgubkZVquVfZeIiGhWEkRpxAWNW29vL6xWK3p6emCxWKZ6OURERGkjlcoBQEFBAU+MiYiIiI4xycY8ZkwGExEREU0+vV4Pl8sFj8cDs9k81cshIiIiommKASYiIiIakc1mS5jISkREREQ0EOeiEhERERERERFRShhgIiIiIiIiIiKilDDAREREREREREREKWGAiYiIiIiIiIiIUsIm32kgiiKA2Og+IiIiIiIiIqLZQop1SLGP4TDAlAYdHR0AgIKCgileCRERERERERFR+rndblit1mFvZ4ApDRwOBwCgtrZ2xB820UzR29uLgoIC1NXVwWKxTPVyiFLGPU2zEfc1zTbc0zQbcV/TbCCKItxuN/Ly8ka8HwNMaaBQxFpZWa1WfmjQrGKxWLinaVbhnqbZiPuaZhvuaZqNuK9ppksmmYZNvomIiIiIiIiIKCUMMBERERERERERUUoYYEoDrVaLO+64A1qtdqqXQpQW3NM023BP02zEfU2zDfc0zUbc13QsEcTR5swRERERERERERGNgBlMRERERERERESUEgaYiIiIiIiIiIgoJQwwERERERERERFRShhgIiIiIiIiIiKilDDAlKKHHnoIxcXF0Ol0WLFiBf75z39O9ZKIkvLjH/8YgiAk/Jk7d658e19fH7Zs2YKMjAyYTCZs3LgRLS0tU7hiosF27NiB8847D3l5eRAEAc8//3zC7aIo4kc/+hFyc3Oh1+uxdu1aHDx4MOE+nZ2d2LRpEywWC2w2G66++mp4PJ5JfBVER422p6+88spBn93r169PuA/3NE0nd999N0488USYzWY4nU5s2LAB+/fvT7hPMscctbW1OPfcc2EwGOB0OvHd734X4XB4Ml8KkSyZfX3aaacN+ry+9tprE+7DfU2zDQNMKXj66adxyy234I477sDHH3+MRYsWYd26dWhtbZ3qpREl5fjjj0dTU5P855133pFvu/nmm/Hiiy/imWeewVtvvYXGxkZcdNFFU7haosG8Xi8WLVqEhx56aMjbt23bhgceeAAPP/wwPvjgAxiNRqxbtw59fX3yfTZt2oQ9e/bgtddew0svvYQdO3bgmmuumayXQJRgtD0NAOvXr0/47H7qqacSbueepunkrbfewpYtW/D+++/jtddeQygUwllnnQWv1yvfZ7RjjkgkgnPPPRfBYBDvvfceHn/8cTz22GP40Y9+NBUviSipfQ0AmzdvTvi83rZtm3wb9zXNSiKN2/Lly8UtW7bI/45EImJeXp549913T+GqiJJzxx13iIsWLRrytu7ublGtVovPPPOM/LW9e/eKAMSdO3dO0gqJxgaA+Nxzz8n/jkajYk5OjviLX/xC/lp3d7eo1WrFp556ShRFUfziiy9EAOKHH34o3+eVV14RBUEQGxoaJm3tREMZuKdFURSvuOIK8YILLhj2MdzTNN21traKAMS33npLFMXkjjlefvllUaFQiM3NzfJ9/vu//1u0WCxiIBCY3BdANISB+1oURXH16tXijTfeOOxjuK9pNmIG0zgFg0Hs2rULa9eulb+mUCiwdu1a7Ny5cwpXRpS8gwcPIi8vD6Wlpdi0aRNqa2sBALt27UIoFErY33PnzkVhYSH3N80YNTU1aG5uTtjHVqsVK1askPfxzp07YbPZsGzZMvk+a9euhUKhwAcffDDpayZKxvbt2+F0OlFZWYnrrrsOHR0d8m3c0zTd9fT0AAAcDgeA5I45du7ciQULFiA7O1u+z7p169Db24s9e/ZM4uqJhjZwX0v++Mc/IjMzE/Pnz8fWrVvh8/nk27ivaTZSTfUCZqr29nZEIpGEDwQAyM7Oxr59+6ZoVUTJW7FiBR577DFUVlaiqakJd955J0455RR8/vnnaG5uhkajgc1mS3hMdnY2mpubp2bBRGMk7dWhPqel25qbm+F0OhNuV6lUcDgc3Os0La1fvx4XXXQRSkpKUF1djR/84Ac4++yzsXPnTiiVSu5pmtai0ShuuukmnHzyyZg/fz4AJHXM0dzcPORnuXQb0VQaal8DwFe/+lUUFRUhLy8Pn376Kb7//e9j//79+Otf/wqA+5pmJwaYiI5RZ599tvz3hQsXYsWKFSgqKsKf//xn6PX6KVwZEREN57LLLpP/vmDBAixcuBBlZWXYvn071qxZM4UrIxrdli1b8Pnnnyf0fCSa6Ybb1/G97xYsWIDc3FysWbMG1dXVKCsrm+xlEk0KlsiNU2ZmJpRK5aAJFy0tLcjJyZmiVRGNn81mw5w5c1BVVYWcnBwEg0F0d3cn3If7m2YSaa+O9Dmdk5MzaDBDOBxGZ2cn9zrNCKWlpcjMzERVVRUA7mmavq6//nq89NJLePPNN+FyueSvJ3PMkZOTM+RnuXQb0VQZbl8PZcWKFQCQ8HnNfU2zDQNM46TRaLB06VK8/vrr8tei0Shef/11rFq1agpXRjQ+Ho8H1dXVyM3NxdKlS6FWqxP29/79+1FbW8v9TTNGSUkJcnJyEvZxb28vPvjgA3kfr1q1Ct3d3di1a5d8nzfeeAPRaFQ+ECSazurr69HR0YHc3FwA3NM0/YiiiOuvvx7PPfcc3njjDZSUlCTcnswxx6pVq/DZZ58lBE9fe+01WCwWHHfccZPzQojijLavh7J7924ASPi85r6m2YYlcim45ZZbcMUVV2DZsmVYvnw57r//fni9Xlx11VVTvTSiUX3nO9/Beeedh6KiIjQ2NuKOO+6AUqnE5ZdfDqvViquvvhq33HILHA4HLBYLvv3tb2PVqlVYuXLlVC+dSObxeOQrgUCssffu3bvhcDhQWFiIm266CT/96U9RUVGBkpIS3H777cjLy8OGDRsAAPPmzcP69euxefNmPPzwwwiFQrj++utx2WWXIS8vb4peFR3LRtrTDocDd955JzZu3IicnBxUV1fje9/7HsrLy7Fu3ToA3NM0/WzZsgVPPvkk/va3v8FsNsu9ZaxWK/R6fVLHHGeddRaOO+44fO1rX8O2bdvQ3NyMH/7wh9iyZQu0Wu1Uvjw6Ro22r6urq/Hkk0/inHPOQUZGBj799FPcfPPNOPXUU7Fw4UIA3Nc0S031GLuZ7sEHHxQLCwtFjUYjLl++XHz//feneklESbn00kvF3NxcUaPRiPn5+eKll14qVlVVybf7/X7xW9/6lmi320WDwSBeeOGFYlNT0xSumGiwN998UwQw6M8VV1whiqIoRqNR8fbbbxezs7NFrVYrrlmzRty/f3/Cc3R0dIiXX365aDKZRIvFIl511VWi2+2egldDNPKe9vl84llnnSVmZWWJarVaLCoqEjdv3pww4loUuadpehlqPwMQH330Ufk+yRxzHD58WDz77LNFvV4vZmZmirfeeqsYCoUm+dUQxYy2r2tra8VTTz1VdDgcolarFcvLy8Xvfve7Yk9PT8LzcF/TbCOIoihOZkCLiIiIiIiIiIhmF/ZgIiIiIiIiIiKilDDAREREREREREREKWGAiYiIiIiIiIiIUsIAExERERERERERpYQBJiIiIiIiIiIiSgkDTERERERERERElBIGmIiIiIiIiIiIKCUMMBERERHNYoIg4Pnnn5/qZRAREdEsxwATERERUYra2tpw3XXXobCwEFqtFjk5OVi3bh3efffdqV4aERER0aRQTfUCiIiIiGa6jRs3IhgM4vHHH0dpaSlaWlrw+uuvo6OjY6qXRkRERDQpmMFERERElILu7m68/fbbuPfee3H66aejqKgIy5cvx9atW3H++ecDAO677z4sWLAARqMRBQUF+Na3vgWPxyM/x2OPPQabzYaXXnoJlZWVMBgMuPjii+Hz+fD444+juLgYdrsdN9xwAyKRiPy44uJi/OQnP8Hll18Oo9GI/Px8PPTQQyOut66uDl/5yldgs9ngcDhwwQUX4PDhw/Lt27dvx/Lly2E0GmGz2XDyySfjyJEj6f2hERER0azDABMRERFRCkwmE0wmE55//nkEAoEh76NQKPDAAw9gz549ePzxx/HGG2/ge9/7XsJ9fD4fHnjgAfzpT3/C3//+d2zfvh0XXnghXn75Zbz88st44okn8D//8z/4y1/+kvC4X/ziF1i0aBH+9a9/4bbbbsONN96I1157bch1hEIhrFu3DmazGW+//TbeffddmEwmrF+/HsFgEOFwGBs2bMDq1avx6aefYufOnbjmmmsgCEJ6flhEREQ0awmiKIpTvQgiIiKimezZZ5/F5s2b4ff7sWTJEqxevRqXXXYZFi5cOOT9//KXv+Daa69Fe3s7gFgG01VXXYWqqiqUlZUBAK699lo88cQTaGlpgclkAgCsX78excXFePjhhwHEMpjmzZuHV155RX7uyy67DL29vXj55ZcBxJp8P/fcc9iwYQP+8Ic/4Kc//Sn27t0rB42CwSBsNhuef/55LFu2DBkZGdi+fTtWr149MT8sIiIimpWYwURERESUoo0bN6KxsREvvPAC1q9fj+3bt2PJkiV47LHHAAD/+Mc/sGbNGuTn58NsNuNrX/saOjo64PP55OcwGAxycAkAsrOzUVxcLAeXpK+1trYmfO9Vq1YN+vfevXuHXOcnn3yCqqoqmM1mOfPK4XCgr68P1dXVcDgcuPLKK7Fu3Tqcd955+M///E80NTWl+uMhIiKiYwADTERERERpoNPpcOaZZ+L222/He++9hyuvvBJ33HEHDh8+jC9/+ctYuHAhnn32WezatUvukxQMBuXHq9XqhOcTBGHIr0Wj0XGv0ePxYOnSpdi9e3fCnwMHDuCrX/0qAODRRx/Fzp07cdJJJ+Hpp5/GnDlz8P7774/7exIREdGxgQEmIiIioglw3HHHwev1YteuXYhGo/iP//gPrFy5EnPmzEFjY2Pavs/A4M/777+PefPmDXnfJUuW4ODBg3A6nSgvL0/4Y7Va5fstXrwYW7duxXvvvYf58+fjySefTNt6iYiIaHZigImIiIgoBR0dHTjjjDPwhz/8AZ9++ilqamrwzDPPYNu2bbjgggtQXl6OUCiEBx98EIcOHcITTzwh91BKh3fffRfbtm3DgQMH8NBDD+GZZ57BjTfeOOR9N23ahMzMTFxwwQV4++23UVNTg+3bt+OGG25AfX09ampqsHXrVuzcuRNHjhzBq6++ioMHDw4bsCIiIiKSqKZ6AUREREQzmclkwooVK/CrX/0K1dXVCIVCKCgowObNm/GDH/wAer0e9913H+69915s3boVp556Ku6++258/etfT8v3v/XWW/HRRx/hzjvvhMViwX333Yd169YNeV+DwYAdO3bg+9//Pi666CK43W7k5+djzZo1sFgs8Pv92LdvHx5//HF0dHQgNzcXW7ZswTe/+c20rJWIiIhmL06RIyIiIpqhiouLcdNNN+Gmm26a6qUQERHRMY4lckRERERERERElBIGmIiIiIiIiIiIKCUskSMiIiIiIiIiopQwg4mIiIiIiIiIiFLCABMREREREREREaWEASYiIiIiIiIiIkoJA0xERERERERERJQSBpiIiIiIiIiIiCglDDAREREREREREVFKGGAiIiIiIiIiIqKUMMBEREREREREREQpYYCJiIiIiIiIiIhS8v8BLMt0AmZc6uQAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Step 2. Inverse filter with 30-msec window, 10-msec hop and 13th order LPC\n", + "frame_size, hop_size, lpc_order = 300, 100, 13\n", + "\n", + "# Solve for LPC coefficients\n", + "window = torch.hann_window(frame_size).view(1, 1, -1)\n", + "gne_frames = downsampled_audio.view(1, -1).unfold(-1, frame_size, hop_size) * window\n", + "\n", + "autocorrelation = compute_cross_correlation(gne_frames, gne_frames, width=lpc_order)\n", + "\n", + "# Collapse frame and batch into same dimension, for lfiltering\n", + "batch, frame_count, _ = autocorrelation.shape\n", + "autocorrelation = autocorrelation.view(batch * frame_count, -1)\n", + "\n", + "# Construct Toeplitz matrices (one per frame)\n", + "# This is [[p0, p1, p2...], [p1, p0, p1...], [p2, p1, p0...] ...]\n", + "# Our sliding window should go from the end to the front, so flip\n", + "# The autocorrelation has an extra value on each end for our prediction values\n", + "R = autocorrelation[:, 1: -1].unfold(-1, lpc_order, 1).flip(dims=(1,))\n", + "r = autocorrelation[:, lpc_order + 1:]\n", + "\n", + "# Solve for LPC coefficients, generate inverse filter with coeffs 1, -a_1, ...\n", + "lpc = torch.linalg.solve(R, r)\n", + "a_coeffs = torch.cat((torch.ones(lpc.size(0), 1), -lpc), dim=1)\n", + "b_coeffs = torch.zeros_like(a_coeffs)\n", + "b_coeffs[:, 0] = 1\n", + "\n", + "# Perform filtering\n", + "excitation = torchaudio.functional.lfilter(gne_frames, b_coeffs, a_coeffs, clamp=False)\n", + "plt.plot(gne_frames[0, 200, :] * 0.3, label='Audio frame', color=\"lightgrey\")\n", + "plt.plot(excitation[0, 200, :], label='Excitation')\n", + "plt.xlabel(\"Samples\")\n", + "plt.ylabel(\"Amplitude\")\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "d5dc9680-a233-41d7-8798-227a0ca4ea87", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABI8AAADvCAYAAAB7TvRLAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAZ+dJREFUeJzt3XdcE/f/B/BX2BsEQcAqIGARFwpqcW9woFRb6wZXW9Qq7lUHLpx1F7do3buOilvrXghWcSKOKoqKhlVm7veHv+RrJMFEA0F5PR8PHnqf+9zd+y6XC3nzGSJBEAQQEREREREREREpoKPtAIiIiIiIiIiIqPhi8oiIiIiIiIiIiJRi8oiIiIiIiIiIiJRi8oiIiIiIiIiIiJRi8oiIiIiIiIiIiJRi8oiIiIiIiIiIiJRi8oiIiIiIiIiIiJRi8oiIiIiIiIiIiJRi8oiIiIiIiIiIiJRi8oiIiD5IJBJh0qRJ2g7jk/3xxx/w8PCAvr4+rKystB1OkRCJRBg4cKC2w1Doc72vHjx4AJFIhMjISI3s7/3rMGnSJIhEIrx8+VIj+6fi5fnz5/juu+9gY2MDkUiE+fPnAwDu3r2Lli1bwtLSEiKRCLt370ZkZCREIhEePHig1jGCg4Ph7Oys8dhJ+6TPByKiosbkERGRCuLj4/HTTz+hQoUKMDIygoWFBerVq4cFCxbgv//+03Z4pIJbt24hODgYrq6uWLFiBZYvX15g/dOnT6NVq1YoW7YsjIyMUL58eQQEBGDjxo1FFHHRkn5JfffHzs4OTZo0wYEDB7Qd3mfpxIkTEIlE2L59u7ZD0Rppom3OnDkF1nN2ds537zVo0AC7du1SWH/Xrl1o1aoVSpcuDQMDAzg6OqJTp044duyYSnFlZmZi3rx5qFOnDiwtLWFkZISKFSti4MCBuHPnjtrnqY4hQ4bg4MGDGDNmDP744w/4+/sDAIKCgvDPP/9g2rRp+OOPP+Dj41OocXyquLg4TJo0Se3EVkxMDLp3745y5crB0NAQ1tbWaN68OdasWYO8vLzCCbaIbdy4UZYU/BgZGRmYNGkSTpw4obGYiIg+lZ62AyAiKu7279+P77//HoaGhujZsyeqVKmC7OxsnD59GiNGjMCNGzc+mIj43P3333/Q0/u8PzJOnDgBiUSCBQsWwM3NrcC627Ztww8//AAvLy8MHjwYpUqVQkJCAv7++2+sWLECXbt2LaKoi97kyZPh4uICQRDw/PlzREZGonXr1ti7dy/atm2r0WN9CfeVJvA6vOXl5YVhw4YBAJ4+fYply5ahQ4cOiIiIwM8//wwAEAQBvXv3RmRkJGrUqIGhQ4fC3t4eiYmJ2LVrF5o1a4YzZ86gbt26So/z8uVL+Pv748qVK2jbti26du0KMzMz3L59G5s3b8by5cuRnZ1daOd57NgxtG/fHsOHD5eV/ffffzh37hzGjRsn11KwR48e6Ny5MwwNDdU6xooVKyCRSDQWsyJxcXEICwtD48aNVW7ltHLlSvz8888oU6YMevToAXd3d6SmpuLo0aPo06cPEhMTMXbs2EKNuyhs3LgR169fR2ho6Edtn5GRgbCwMABA48aN5db9+uuvGD169CdGSESkPv6mQkRUgISEBHTu3BlOTk44duwYHBwcZOsGDBiAe/fuYf/+/VqMsPBIJBJkZ2fDyMgIRkZG2g7nkyUlJQGASt3VJk2aBE9PT5w/fx4GBgYK9/OlatWqlVyLhz59+qBMmTLYtGlTgcmj3NxcSCSSfNerIF/CfaUJmrwO775vPzdly5ZF9+7dZcs9e/aEm5sb5s2bJ0sezZ07F5GRkQgNDcVvv/0m131n3Lhx+OOPPz6YiAsODsbVq1exfft2dOzYUW7dlClTMG7cOA2eVX5JSUn5nkMvXrwAkP/5pKurC11dXbWPoa+v/7HhFZrz58/j559/hq+vL/766y+Ym5vL1oWGhuLy5cu4fv26FiP8POjp6THZTERawW5rREQFmDVrFtLS0rBq1Sq5xJGUm5sbBg8eLFvOzc3FlClT4OrqCkNDQzg7O2Ps2LHIysqS287Z2Rlt27bFiRMn4OPjA2NjY1StWlXWRH3nzp2oWrUqjIyM4O3tjatXr8ptHxwcDDMzM9y/fx9+fn4wNTWFo6MjJk+eDEEQ5OrOmTMHdevWhY2NDYyNjeHt7a2wG410bJwNGzagcuXKMDQ0RFRUlGzdu2OypKamIjQ0FM7OzjA0NISdnR1atGiB6OhouX1u27YN3t7eMDY2RunSpdG9e3c8efJE4bk8efIEgYGBMDMzg62tLYYPH65yF4bff/9dFrOjoyMGDBiAN2/eyF3viRMnAgBsbW0/ONZOfHw8atWqpTARYmdnJ/v/u11y5s2bBycnJxgbG6NRo0YKvwTdunUL3333HaytrWFkZAQfHx/s2bMnX703b94gNDRU1q3Dzc0NM2fOzNeSQNqSSnqv2Nrawt/fH5cvX863z927d6NKlSowNDRE5cqVZa/th1hZWcHY2Fjuy8q75z1//nzZ/R4XF4fs7GxMmDAB3t7esLS0hKmpKRo0aIDjx4/n27eysX7u3buH4OBgWFlZwdLSEr169UJGRsYHYz116hS+//57lC9fHoaGhihXrhyGDBmSr2upOvfcmzdvEBwcDEtLS1hZWSEoKEju3tIEZffjy5cv0alTJ1hYWMDGxgaDBw9GZmZmvm2VvW+vXr2KVq1awcLCAmZmZmjWrBnOnz8vt31OTg7CwsLg7u4OIyMj2NjYoH79+jh8+LCszrVr1xAcHCzrtmtvb4/evXvj1atXGr0O77O3t0elSpWQkJAA4G3rnPDwcHh4eGDOnDkKx33p0aMHateurXSfFy5cwP79+9GnT598iSMAMDQ0zNfF7tixY2jQoAFMTU1hZWWF9u3b4+bNm/m2ffLkCXr37o0yZcrI3merV6+WrZd2DRUEAUuWLJF10Zs0aRKcnJwAACNGjIBIJJK15FE25tGBAwfQqFEjmJubw8LCArVq1ZLrUqtozCOJRIL58+ejcuXKMDIyQpkyZfDTTz/h9evXcvWkn0+nT59G7dq1YWRkhAoVKmDdunVy5/L9998DAJo0aSI7l4K6WYWFhUEkEmHDhg1yiSMpHx8fBAcHy5bT09MxbNgw2XPw66+/xpw5c/J9xknfA9u2bYOnpyeMjY3h6+uLf/75BwCwbNkyuLm5wcjICI0bN853LRs3bowqVargypUrqFu3LoyNjeHi4oKlS5fK1VP2Wki7p0rPvXHjxti/fz8ePnwouy7S10KV5+ODBw9ga2srd83efUYoGvNI3d89CnptiYiUYdqaiKgAe/fuRYUKFQrsAvGuvn37Yu3atfjuu+8wbNgwXLhwAeHh4bh582a+sTvu3buHrl274qeffkL37t0xZ84cBAQEYOnSpRg7diz69+8PAAgPD0enTp1w+/Zt6Oj8L+efl5cHf39/fPPNN5g1axaioqIwceJE5ObmYvLkybJ6CxYsQLt27dCtWzdkZ2dj8+bN+P7777Fv3z60adNGLqZjx45h69atGDhwIEqXLq20K8LPP/+M7du3Y+DAgfD09MSrV69w+vRp3Lx5EzVr1gTw9hftXr16oVatWggPD8fz58+xYMECnDlzBlevXpX7C3teXh78/PxQp04dzJkzB0eOHMHcuXPh6uqKkJCQAq/5pEmTEBYWhubNmyMkJAS3b99GREQELl26hDNnzkBfXx/z58/HunXrsGvXLkRERMDMzAzVqlVTuk8nJyccPXoU//77L7766qsCjw8A69atQ2pqKgYMGIDMzEwsWLAATZs2xT///IMyZcoAAG7cuIF69eqhbNmyGD16NExNTbF161YEBgZix44d+PbbbwG87a7QqFEjPHnyBD/99BPKly+Ps2fPYsyYMUhMTJQbR6NPnz6IjIxEq1at0LdvX+Tm5uLUqVM4f/68XOuh06dPY+fOnejfvz/Mzc2xcOFCdOzYEY8ePYKNjY3cuYjFYrx8+RKCICApKQmLFi1CWlqaXIsQqTVr1iAzMxM//vijbOySlJQUrFy5El26dEG/fv2QmpqKVatWwc/PDxcvXoSXl9cHr2enTp3g4uKC8PBwREdHY+XKlbCzs8PMmTML3G7btm3IyMhASEgIbGxscPHiRSxatAj//vsvtm3bJldXlXtOEAS0b98ep0+fxs8//4xKlSph165dCAoK+uA5aEKnTp3g7OyM8PBwnD9/HgsXLsTr16/zfdFT9L69ceMGGjRoAAsLC4wcORL6+vpYtmwZGjdujJMnT6JOnToA3r5/wsPD0bdvX9SuXRspKSm4fPkyoqOj0aJFCwDA4cOHcf/+ffTq1Qv29vayrro3btzA+fPnC23w3pycHDx+/Fh2j54+fRrJyckIDQ39qNY4AGTJ2h49eqhU/8iRI2jVqhUqVKiASZMm4b///sOiRYtQr149REdHy56Rz58/xzfffCNLZNja2uLAgQPo06cPUlJSEBoaioYNG+KPP/5Ajx490KJFC/Ts2RMAUK1aNVhZWWHIkCHo0qULWrduDTMzM6UxRUZGonfv3qhcuTLGjBkDKysrXL16FVFRUQV2qf3pp59kz+VBgwYhISEBixcvxtWrV2XPSql79+7hu+++Q58+fRAUFITVq1cjODgY3t7eqFy5Mho2bIhBgwZh4cKFGDt2LCpVqgQAsn/fl5GRgaNHj6Jhw4YoX778B6+7IAho164djh8/jj59+sDLywsHDx7EiBEj8OTJE8ybN0+u/qlTp7Bnzx4MGDAAwNvPzbZt22LkyJH4/fff0b9/f7x+/RqzZs1C7969842N9fr1a7Ru3RqdOnVCly5dsHXrVoSEhMDAwAC9e/f+YLzvGjduHMRiMf79919ZnNLXU5Xno62tLSIiIhASEoJvv/0WHTp0AIACP7PU/d2joNeWiEgpgYiIFBKLxQIAoX379irVj4mJEQAIffv2lSsfPny4AEA4duyYrMzJyUkAIJw9e1ZWdvDgQQGAYGxsLDx8+FBWvmzZMgGAcPz4cVlZUFCQAED45ZdfZGUSiURo06aNYGBgILx48UJWnpGRIRdPdna2UKVKFaFp06Zy5QAEHR0d4caNG/nODYAwceJE2bKlpaUwYMAApdciOztbsLOzE6pUqSL8999/svJ9+/YJAIQJEybkO5fJkyfL7aNGjRqCt7e30mMIgiAkJSUJBgYGQsuWLYW8vDxZ+eLFiwUAwurVq2VlEydOFADIXRtlVq1aJQAQDAwMhCZNmgjjx48XTp06JXcMQRCEhIQE2Wv277//ysovXLggABCGDBkiK2vWrJlQtWpVITMzU1YmkUiEunXrCu7u7rKyKVOmCKampsKdO3fkjjV69GhBV1dXePTokSAIgnDs2DEBgDBo0KB88UskEtn/pedx7949WVlsbKwAQFi0aJGsbM2aNQKAfD+GhoZCZGSkwvO2sLAQkpKS5Nbl5uYKWVlZcmWvX78WypQpI/Tu3Vuu/P37SvoavV/v22+/FWxsbPKd5/vev9cFQRDCw8MFkUgk955S9Z7bvXu3AECYNWuW3Pk1aNBAACCsWbOmwHiOHz8uABC2bdtWYD1l16Fdu3Zy9fr37y8AEGJjY+W2VfS+DQwMFAwMDIT4+HhZ2dOnTwVzc3OhYcOGsrLq1asLbdq0KTA+Rdd106ZNAgDh77//LnBb6b0ye/bsAus5OTkJLVu2FF68eCG8ePFCiI2NFTp37iz3nFuwYIEAQNi1a1eB+yrIt99+KwAQXr9+rVJ9Ly8vwc7OTnj16pWsLDY2VtDR0RF69uwpK+vTp4/g4OAgvHz5Um77zp07C5aWlnLXEEC+56ey6yR9XyYkJAiCIAhv3rwRzM3NhTp16sg9WwVB/n0fFBQkODk5yZZPnTolABA2bNggt01UVFS+cunn07uvbVJSkmBoaCgMGzZMVrZt27Z8n03KSJ85gwcP/mBdQfjfe2/q1Kly5d99950gEonknmfS55T0GgnC/z437e3thZSUFFn5mDFj5K6nIAhCo0aNBADC3LlzZWVZWVmy1z47O1sQhPyvhZT0ff7udWjTpo3c9ZdS9fn44sWLfM8FKenzQepjfvf40GtLRKQIu60RESmRkpICAAqb1yvy119/AQCGDh0qVy4dAPb9sZE8PT3h6+srW5a2BGjatKncX2al5ffv3893zHcHVpX+xTs7OxtHjhyRlRsbG8v+//r1a4jFYjRo0CBfFzMAaNSoETw9PT9wpm+7Ml24cAFPnz5VuP7y5ctISkpC//795cZeadOmDTw8PBSOEyUd00SqQYMGCs/5XUeOHEF2djZCQ0PlWmX169cPFhYWHz0eVe/evREVFYXGjRvj9OnTmDJlCho0aAB3d3ecPXs2X/3AwECULVtWtly7dm3UqVNHdk8kJyfj2LFj6NSpE1JTU/Hy5Uu8fPkSr169gp+fH+7evSvrzrdt2zY0aNAApUqVktV7+fIlmjdvjry8PPz9998AgB07dkAkEsm6473r/ZYgzZs3h6urq2y5WrVqsLCwUHh9lyxZgsOHD+Pw4cNYv349mjRpgr59+2Lnzp356nbs2FHWvUJKV1dX1t1PIpEgOTkZubm58PHxUXjPKaLoXnj16pXsPanMu/d6eno6Xr58ibp160IQhHxdP5Ud591r8tdff0FPT0+u9Zuuri5++eUXlc7jU0lbUUhJjyu9r6Tef9/m5eXh0KFDCAwMRIUKFWTlDg4O6Nq1K06fPi27llZWVrhx4wbu3r2rNI53r2tmZiZevnyJb775BgBUfk1VcejQIdja2sLW1hbVq1fHtm3b0KNHD1mLM3WfyYqos4/ExETExMQgODgY1tbWsvJq1aqhRYsWstdBEATs2LEDAQEBEARB7n3r5+cHsVisset0+PBhpKamYvTo0fnGtSqoBdi2bdtgaWmJFi1ayMXn7e0NMzOzfN1KPT090aBBA9myra0tvv766w8+k5X5mM9TXV1dDBo0SK582LBhEAQh3wyQzZo1k2spK/3c7Nixo9wxlX2e6unp4aeffpItGxgY4KeffkJSUhKuXLmiUsyq0MTz8X0f87uHJl9bIio52G2NiEgJCwsLAG/H91HFw4cPoaOjk28mL3t7e1hZWeHhw4dy5e833be0tAQAlCtXTmH5++NS6OjoyH0xBICKFSsCgNyYDPv27cPUqVMRExMjN/6Boi8aLi4uSs/vXbNmzUJQUBDKlSsHb29vtG7dGj179pTFIz3Xr7/+Ot+2Hh4eOH36tFyZdLyed5UqVSrfOb9P2XEMDAxQoUKFfNdcHX5+fvDz80NGRgauXLmCLVu2YOnSpWjbti1u3bolN/aRu7t7vu0rVqyIrVu3AnjbTUAQBIwfPx7jx49XeLykpCSULVsWd+/exbVr1/Jdj3frAW/HZXJ0dJT7UquMom4iyq5v7dq15bq8denSBTVq1MDAgQPRtm1buXGglN0va9euxdy5c3Hr1i3k5OR8sP6H4i1VqhSAt+8B6ftSkUePHmHChAnYs2dPvnMTi8Vyy6rccw8fPoSDg0O+LkSK7uvC8P595erqCh0dnXxjrrx/XV+8eIGMjAyFcVaqVAkSiQSPHz9G5cqVMXnyZLRv3x4VK1ZElSpV4O/vjx49esh1kUlOTkZYWBg2b96cb8D496/rp6hTpw6mTp0KkUgEExMTVKpUSa57q7rPZEXe3ceHBs8v6DlWqVIlHDx4EOnp6UhPT8ebN2+wfPlypTNvamqg/fj4eABAlSpV1Nru7t27EIvFcs+td70fnzrPDFV8zOepo6NjvmSTtFucpj9PHR0dYWpqKlf27uepNFmqCZ/6fHzfp/7uAXzaa0tEJQeTR0RESlhYWMDR0VHt2V9UHf9D2ZgdysqF9wYJVcWpU6fQrl07NGzYEL///jscHBygr6+PNWvWyA2uKvVuC4OCdOrUCQ0aNMCuXbtw6NAhzJ49GzNnzsTOnTvRqlUrteP82PFLioKJiQkaNGiABg0aoHTp0ggLC8OBAwfUGvdGOtD18OHD4efnp7CO9Bd/iUSCFi1aYOTIkQrrSb/QqONT7ikdHR00adIECxYswN27d+XGxFB0v6xfvx7BwcEIDAzEiBEjYGdnB11dXYSHh8u++BZGvHl5eWjRogWSk5MxatQoeHh4wNTUFE+ePEFwcHC+wcaL8z2njLJni6rvW0UaNmyI+Ph4/Pnnnzh06BBWrlyJefPmYenSpejbty+At+/3s2fPYsSIEfDy8oKZmRkkEgn8/f01Oh186dKl0bx5c6XrPTw8AAD//PMPAgMDP+oY7+7j3dYXn0J6Dbp37670uVDQeDVFQSKRwM7ODhs2bFC4XlELQkU+5nMIePt809PTkw1irWlF8Xmq7P2n6sQOgGaej+rG9z5Nv7ZEVHIweUREVIC2bdti+fLlOHfunFwXM0WcnJwgkUhw9+5duUFDnz9/jjdv3shm09EUiUSC+/fvyyUT7ty5AwCy5vs7duyAkZERDh48CENDQ1m9NWvWfPLxHRwc0L9/f/Tv3x9JSUmoWbMmpk2bhlatWsnO9fbt22jatKncdrdv39bYtXj3OO+2wsrOzkZCQkKBX0Q/hrRFTmJioly5oi4/d+7ckb0O0tj09fU/GJOrqyvS0tJUqnfw4EEkJyer1ProU+Tm5gIA0tLSPlh3+/btqFChAnbu3Cn3ZUZR9zpN+ueff3Dnzh2sXbtWNhAxALlZw9QlHTg9LS1NrvXR7du3PylWVd29e1euNcK9e/cgkUiUDmQvZWtrCxMTE4Vx3rp1Czo6OnItMqytrdGrVy/06tULaWlpaNiwISZNmoS+ffvi9evXOHr0KMLCwjBhwgS52Ipa/fr1UapUKWzatAljx479qARgQEAAwsPDsX79+g8mj959vrzv1q1bKF26NExNTWFkZARzc3Pk5eVp/JnzPmn30+vXr+drafKh7Y4cOYJ69ep9UrLxXeoMlG5iYoKmTZvi2LFjePz4cb4WQe9zcnLCkSNHkJqaKtf66NatW7L1mvT06VOkp6fLtT56//NU2gLy/dkWFbVwVXZtVH0+qnNti/p3DyIquTjmERFRAUaOHAlTU1P07dsXz58/z7c+Pj4eCxYsAAC0bt0aAORmwwKA3377DQDyzWymCYsXL5b9XxAELF68GPr6+mjWrBmAt39hFIlEcn8ZffDgAXbv3v3Rx8zLy8vXVcXOzg6Ojo6ybnE+Pj6ws7PD0qVL5brKHThwADdv3tTYtWjevDkMDAywcOFCub+arlq1CmKx+KOPc/ToUYXl0rEl3u/Gsnv3btmYRQBw8eJFXLhwQdYKy87ODo0bN8ayZcvyJZ6At92MpDp16oRz587h4MGD+eq9efNGlsjp2LEjBEFAWFhYvnqa/AtyTk4ODh06BAMDA6UzKb1L+oX+3RguXLiAc+fOaSwmVY8rCILs/fkxWrdujdzcXERERMjK8vLysGjRoo8PVA1LliyRW5Ye90Ot+3R1ddGyZUv8+eefcl3cnj9/jo0bN6J+/fqybkSvXr2S29bMzAxubm6y962i6wrkf84VBRMTE4waNQo3b97EqFGjFN7n69evx8WLF5Xuw9fXF/7+/li5cqXC52B2djaGDx8O4G2C3MvLC2vXrpVLGFy/fh2HDh2SPfN1dXXRsWNH7NixQ2FL1Xff35+qZcuWMDc3R3h4ODIzM+XWFfS+79SpE/Ly8jBlypR863Jzc/MlRFQhTbSouu3EiRMhCAJ69OihMBF95coVrF27FsDb915eXp7cZxwAzJs3DyKR6KNauBYkNzcXy5Ytky1nZ2dj2bJlsLW1hbe3N4D/Je6k484Bb58HiroqmpqaKuzSqerz0cTEBIBq11Ybv3sQUcnElkdERAVwdXXFxo0b8cMPP6BSpUro2bMnqlSpguzsbJw9exbbtm1DcHAwAKB69eoICgrC8uXL8ebNGzRq1AgXL17E2rVrERgYiCZNmmg0NiMjI0RFRSEoKAh16tTBgQMHsH//fowdO1bWBaFNmzb47bff4O/vj65duyIpKQlLliyBm5sbrl279lHHTU1NxVdffYXvvvsO1atXh5mZGY4cOYJLly5h7ty5AN62sJk5cyZ69eqFRo0aoUuXLnj+/DkWLFgAZ2dnDBkyRCPXwNbWFmPGjEFYWBj8/f3Rrl073L59G7///jtq1aqlcHp5VbRv3x4uLi4ICAiAq6sr0tPTceTIEezduxe1atVCQECAXH03NzfUr18fISEhyMrKwvz582FjYyPX9WzJkiWoX78+qlatin79+qFChQp4/vw5zp07h3///RexsbEAgBEjRmDPnj1o27atbPrk9PR0/PPPP9i+fTsePHiA0qVLo0mTJujRowcWLlyIu3fvyroQnTp1Ck2aNJEbTF0dBw4ckP11PykpCRs3bsTdu3cxevToAscbkmrbti127tyJb7/9Fm3atEFCQgKWLl0KT09PlVoufSwPDw+4urpi+PDhePLkCSwsLLBjx45PGscjICAA9erVw+jRo/HgwQN4enpi586dao/zs2PHDtk1fZd03DBlEhIS0K5dO/j7++PcuXNYv349unbtiurVq3/wmFOnTsXhw4dRv3599O/fH3p6eli2bBmysrIwa9YsWT1PT080btwY3t7esLa2xuXLl7F9+3bZ/WNhYYGGDRti1qxZyMnJQdmyZXHo0CEkJCSodQ2OHj2aL9kBvB1sXp3xe0aMGIEbN25g7ty5OH78OL777jvY29vj2bNn2L17Ny5evKhwUPt3rVu3Di1btkSHDh0QEBCAZs2awdTUFHfv3sXmzZuRmJiIOXPmAABmz56NVq1awdfXF3369MF///2HRYsWwdLSEpMmTZLtc8aMGTh+/Djq1KmDfv36wdPTE8nJyYiOjsaRI0eQnJys8jkWxMLCAvPmzUPfvn1Rq1YtdO3aFaVKlUJsbCwyMjJkyZf3NWrUCD/99BPCw8MRExODli1bQl9fH3fv3sW2bduwYMECfPfdd2rF4uXlBV1dXcycORNisRiGhoZo2rSp0nGV6tatiyVLlqB///7w8PBAjx494O7ujtTUVJw4cQJ79uzB1KlTAbx97zVp0gTjxo3DgwcPUL16dRw6dAh//vknQkND5SYA0ARHR0fMnDkTDx48QMWKFbFlyxbExMRg+fLl0NfXBwBUrlwZ33zzDcaMGSNr8bl582ZZQv9d3t7e2LJlC4YOHYpatWrBzMwMAQEBKj8fjY2N4enpiS1btqBixYqwtrZGlSpVFL5Xivp3DyIqwYpwZjcios/WnTt3hH79+gnOzs6CgYGBYG5uLtSrV09YtGiR3NTrOTk5QlhYmODi4iLo6+sL5cqVE8aMGSNXRxDeTperaHpsqDiFc1BQkGBqairEx8cLLVu2FExMTIQyZcoIEydOzDed/KpVqwR3d3fB0NBQ8PDwENasWZNvql9lx353nXTK4KysLGHEiBFC9erVBXNzc8HU1FSoXr268Pvvv+fbbsuWLUKNGjUEQ0NDwdraWujWrZvclPbvnsv7FMWozOLFiwUPDw9BX19fKFOmjBASEpJvKm7p/l68ePHB/W3atEno3Lmz4OrqKhgbGwtGRkaCp6enMG7cOLlpn999bebOnSuUK1dOMDQ0FBo0aCA3nbpUfHy80LNnT8He3l7Q19cXypYtK7Rt21bYvn27XL3U1FRhzJgxgpubm2BgYCCULl1aqFu3rjBnzhzZtNGC8Hba59mzZwseHh6CgYGBYGtrK7Rq1Uq4cuWKrI6y19XJyUkICgqSLUunoX73x8jISPDy8hIiIiLkpgEvaPp1iUQiTJ8+XXBychIMDQ2FGjVqCPv27cs3dbg0NkVT1L//GimbIvt9cXFxQvPmzQUzMzOhdOnSQr9+/WRThK9Zs0ZWT5177tWrV0KPHj0ECwsLwdLSUujRo4dw9erVfPtURDqFt7KfU6dOFXgd4uLihO+++04wNzcXSpUqJQwcODDf9OwFvW+jo6MFPz8/wczMTDAxMRGaNGkinD17Vq7O1KlThdq1awtWVlaCsbGx4OHhIUybNk3uPvv333+Fb7/9VrCyshIsLS2F77//Xnj69KnSqcTfJb1XlP388ccfgiAofyYqs337dqFly5aCtbW1oKenJzg4OAg//PCDcOLECZW2z8jIEObMmSPUqlVLMDMzEwwMDAR3d3fhl19+kZsGXhAE4ciRI0K9evUEY2NjwcLCQggICBDi4uLy7fP58+fCgAEDhHLlygn6+vqCvb290KxZM2H58uVy9VR9zguC8nt/z549Qt26dWUx1a5dW9i0aZNsvaL3myAIwvLlywVvb2/B2NhYMDc3F6pWrSqMHDlSePr0qayOsteiUaNGQqNGjeTKVqxYIVSoUEHQ1dXNN129MleuXBG6du0qODo6Cvr6+kKpUqWEZs2aCWvXrpX7/EpNTRWGDBkiq+fu7i7Mnj1b7lkkCOpdT+l7ctu2bXLnVblyZeHy5cuCr6+vYGRkJDg5OQmLFy/OF3t8fLzQvHlzwdDQUChTpowwduxY4fDhw/nOPS0tTejatatgZWUlAJC9Fuo8H8+ePSt4e3sLBgYGcu81Rc+pT/3dQ9FrS0T0PpEgcHQ0IqLPTXBwMLZv316oLTnowx48eAAXFxfMnj1b1tWFiIg+H40bN8bLly/VnhyDiKik4ZhHRERERERERESkFJNHRERERERERESkFJNHRERERERERESkFMc8IiIiIiIiIiIipdjyiIiIiIiIiIiIlGLyiIiIiIiIiIiIlNLTdgDFnUQiwdOnT2Fubg6RSKTtcIiIiIiIiIiINEIQBKSmpsLR0RE6OsrbFzF59AFPnz5FuXLltB0GEREREREREVGhePz4Mb766iul65k8+gBzc3MAby+khYWFlqMhIiIiIiIiItKMlJQUlCtXTpb7UIbJow+QdlW79TIXTRzNoavDrmtERERERERE9OX40DA9n9WA2X///TcCAgLg6OgIkUiE3bt3f3CbEydOoGbNmjA0NISbmxsiIyM/6ti9115C/ZnHEHU98aO2JyIiIiIiIiL6HH1WyaP09HRUr14dS5YsUal+QkIC2rRpgyZNmiAmJgahoaHo27cvDh48qPaxq4ru45k4EyHro5lAIiIiIiIiIqISQyQIgqDuRqdOncKyZcsQHx+P7du3o2zZsvjjjz/g4uKC+vXrF0ac+YhEIuzatQuBgYFK64waNQr79+/H9evXZWWdO3fGmzdvEBUVpdJxUlJSYGlpCfFoc2zQbYdZuV1gb2mE06OasgsbEREREREREX22ZDkPsbjAcZ7VHvNox44d6NGjB7p164arV68iKysLACAWizF9+nT89ddfHx+1hp07dw7NmzeXK/Pz80NoaKjSbbKysmTnBLy9kFIhentxMK8WYsRuuJiQDF9XG43HTERERERERKQOQRCQm5uLvLw8bYdCxYyuri709PQ+OKbRh6idPJo6dSqWLl2Knj17YvPmzbLyevXqYerUqZ8UjKY9e/YMZcqUkSsrU6YMUlJS8N9//8HY2DjfNuHh4QgLC1O6TxdRImIENySlZmo8XiIiIiIiIiJ1ZGdnIzExERkZGdoOhYopExMTODg4wMDA4KP3oXby6Pbt22jYsGG+cktLS7x58+ajAykuxowZg6FDh8qWpdPWSSUIDgAAO3OjIo+NiIiIiIiISEoikSAhIQG6urpwdHSEgYHBJ7cwoS+HIAjIzs7GixcvkJCQAHd3d+jofNzQ12onj+zt7XHv3j04OzvLlZ8+fRoVKlT4qCAKi729PZ4/fy5X9vz5c1hYWChsdQQAhoaGMDQ0VLju99wAxApucLA0Qm0Xa43HS0RERERERKSq7OxsSCQSlCtXDiYmJtoOh4ohY2Nj6Ovr4+HDh8jOzoaR0cc1hFE7edSvXz8MHjwYq1evhkgkwtOnT3Hu3DkMHz4c48eP/6ggCouvr2++MZgOHz4MX19ftffVJWsc4gyqAAAmBnhysGwiIiIiIiIqFj62NQmVDJq4P9ROHo0ePRoSiQTNmjVDRkYGGjZsCENDQwwfPhy//PLLJwdUkLS0NNy7d0+2nJCQgJiYGFhbW6N8+fIYM2YMnjx5gnXr1gEAfv75ZyxevBgjR45E7969cezYMWzduhX79+9X+9j/CBVQ1tIIEwM84V/FQWPnRERERERERERUnIkEQRA+ZsPs7Gzcu3cPaWlp8PT0hJmZmaZjy+fEiRNo0qRJvvKgoCBERkYiODgYDx48wIkTJ+S2GTJkCOLi4vDVV19h/PjxCA4OVvmY0mnrDl9NQJNqTmxxRERERERERMVCZmYmEhIS4OLi8tHdkejLV9B9Is15iMViWFhYKN3HRyePSgpVLyQRERERERFRUfpSk0cikQi7du1CYGAgHjx4ABcXF1y9ehVeXl7aDu2zpInkkdrd1po0aVLg6O3Hjh1Td5dEREREREREVIIEBwfjzZs32L17d751iYmJKFWqVNEHRUqpnTx6P9OXk5ODmJgYXL9+HUFBQZqKi4iIiIiIiIiKSJ5EwMWEZCSlZsLO/O0M49oatsXe3v6TthcEAXl5edDTUzvlQUqoPeT2vHnz5H4WL16M06dPIzQ0FPr6+oURIxEREREREREVkqjriag/8xi6rDiPwZtj0GXFedSfeQxR1xO1Eo9IJMrXIunWrVuoW7cujIyMUKVKFZw8eVK27sSJExCJRDhw4AC8vb1haGiI06dPIysrC4MGDYKdnR2MjIxQv359XLp0Sbbd69ev0a1bN9ja2sLY2Bju7u5Ys2aNbP2oUaNQsWJFmJiYoEKFChg/fjxycnIK/fyLI43N59e9e3esXr1aU7sjIiIiIiIiokIWdT0RIeujkSjOlCt/Js5EyPporSWQ3jdixAgMGzYMV69eha+vLwICAvDq1Su5OqNHj8aMGTNw8+ZNVKtWDSNHjsSOHTuwdu1aREdHw83NDX5+fkhOTgYAjB8/HnFxcThw4ABu3ryJiIgIlC5dWrY/c3NzREZGIi4uDgsWLMCKFSswb968Ij3v4kJjyaNz5859UQN0EREREREREX3J8iQCwvbGQdEsWtKysL1xyJNof56tgQMHomPHjqhUqRIiIiJgaWmJVatWydWZPHkyWrRoAVdXVxgaGiIiIgKzZ89Gq1at4OnpiRUrVsDY2Fi23aNHj1CjRg34+PjA2dkZzZs3R0BAgGx/v/76K+rWrQtnZ2cEBARg+PDh2Lp1a5Ged3GhdgfADh06yC0LgoDExERcvnwZ48eP11hgRERERERERFR4LiYk52tx9C4BQKI4ExcTkuHralN0gSng6+sr+7+enh58fHxw8+ZNuTo+Pj6y/8fHxyMnJwf16tWTlenr66N27dqy7UJCQtCxY0dER0ejZcuWCAwMRN26dWX1t2zZgoULFyI+Ph5paWnIzc0tsbOwq93yyNLSUu7H2toajRs3xl9//YWJEycWRoxEREREREREpGFJqcoTRx9TT9tMTU3Vqt+qVSs8fPgQQ4YMwdOnT9GsWTMMHz4cwNveVd26dUPr1q2xb98+XL16FePGjUN2dnZhhF7sqd3y6N3Bo4iIiIiIiIjo82RnrtrQM6rWK0znz59Hw4YNAQC5ubm4cuUKBg4cqLS+q6srDAwMcObMGTg5OQF4O1v8pUuXEBoaKqtna2uLoKAgBAUFoUGDBhgxYgTmzJmDs2fPwsnJCePGjZPVffjwYeGc3GeA89YRERERERERlUC1XazhYGmEZ+JMheMeiQDYWxqhtot1oRxfLBYjJiZGrszGRnH3uCVLlsDd3R2VKlXCvHnz8Pr1a/Tu3Vvpvk1NTRESEoIRI0bA2toa5cuXx6xZs5CRkYE+ffoAACZMmABvb29UrlwZWVlZ2LdvHypVqgQAcHd3x6NHj7B582bUqlUL+/fvx65duzRz4p8hlZJHpUqVgkgkUmmH0lHLiYiIiIiIiKj40tURYWKAJ0LWR0MEyCWQpBmAiQGe0NVRLR+grhMnTqBGjRpyZdLEzvtmzJiBGTNmICYmBm5ubtizZ4/czGjKtpFIJOjRowdSU1Ph4+ODgwcPolSpUgAAAwMDjBkzBg8ePICxsTEaNGiAzZs3AwDatWuHIUOGYODAgcjKykKbNm0wfvx4TJo06dNP/DMkEgThg8Omr127VuUdBgUFfVJAxU1KSgosLS0hFotL7MBYREREREREVPxkZmYiISEBLi4unzT7edT1RITtjZMbPNvB0ggTAzzhX8VBE6GSFhV0n6ia81Cp5dGXlhAiIiIiIiIiorf8qzighac9LiYkIyk1E3bmb7uqFVaLI/r8fNKYR5mZmflGGmfrHCIiIiIiIqLPi66OCL6uiscbItJRd4P09HQMHDgQdnZ2MDU1RalSpeR+iIiIiIiIiIjoy6F28mjkyJE4duwYIiIiYGhoiJUrVyIsLAyOjo5Yt25dYcRIRERERERERERaona3tb1792LdunVo3LgxevXqhQYNGsDNzQ1OTk7YsGEDunXrVhhxEhERERERERGRFqjd8ig5ORkVKlQA8HZ8o+TkZABA/fr18ffff2s2OiIiIiIiIiIi0iq1k0cVKlRAQkICAMDDwwNbt24F8LZFkpWVlUaDIyIiIiIiIiIi7VI7edSrVy/ExsYCAEaPHo0lS5bAyMgIQ4YMwYgRIzQeIBERERERERERaY/aYx4NGTJE9v/mzZvj1q1buHLlCtzc3FCtWjWNBkdERERERERERNqldsujx48fyy07OTmhQ4cOTBwRERERERER0Rdl0qRJ8PLy0moMjRs3RmhoqFZjUDt55OzsjEaNGmHFihV4/fp1YcRERERERERERF+4Fy9eICQkBOXLl4ehoSHs7e3h5+eHM2fOaDs0tQQHB0MkEsl+bGxs4O/vj2vXrmlk/zt37sSUKVM0sq+PpXby6PLly6hduzYmT54MBwcHBAYGYvv27cjKyiqM+IiIiIiIiIjoC9SxY0dcvXoVa9euxZ07d7Bnzx40btwYr1690nZoavP390diYiISExNx9OhR6OnpoW3btgVuk5OTo9K+ra2tYW5urokwP5rayaMaNWpg9uzZePToEQ4cOABbW1v8+OOPKFOmDHr37l0YMRIRERERERHRF+TNmzc4deoUZs6ciSZNmsDJyQm1a9fGmDFj0K5dOwCASCRCREQEWrVqBWNjY1SoUAHbt2+X28/jx4/RqVMnWFlZwdraGu3bt8eDBw/k6qxcuRKVKlWCkZERPDw88Pvvv8ut//fff9GlSxdYW1vD1NQUPj4+uHDhglydP/74A87OzrC0tETnzp2Rmpoqt17acsre3h5eXl4YPXo0Hj9+jBcvXgAAHjx4AJFIhC1btqBRo0YwMjLChg0b8OrVK3Tp0gVly5aFiYkJqlatik2bNsnt+/1ua87Ozpg+fTp69+4Nc3NzlC9fHsuXL1f7NVCH2skjKZFIhCZNmmDFihU4cuQIXFxcsHbtWk3GRkRERERERERF5d/LQOzmt/8WMjMzM5iZmWH37t0F9mQaP348OnbsiNjYWHTr1g2dO3fGzZs3AbxtuePn5wdzc3OcOnUKZ86cgZmZGfz9/ZGdnQ0A2LBhAyZMmIBp06bh5s2bmD59OsaPHy/LX6SlpaFRo0Z48uQJ9uzZg9jYWIwcORISiUQWQ3x8PHbv3o19+/Zh3759OHnyJGbMmKE05rS0NKxfvx5ubm6wsbGRWzd69GgMHjwYN2/ehJ+fHzIzM+Ht7Y39+/fj+vXr+PHHH9GjRw9cvHixwOs3d+5c+Pj44OrVq+jfvz9CQkJw+/btgi/6J1B7tjWpf//9Fxs3bsTGjRtx/fp1+Pr6YsmSJZqMjYiIiIiIiIiKwuGJwJn5/1uuFwq0CCu0w+np6SEyMhL9+vXD0qVLUbNmTTRq1AidO3eWm5Dr+++/R9++fQEAU6ZMweHDh7Fo0SL8/vvv2LJlCyQSCVauXAmRSAQAWLNmDaysrHDixAm0bNkSEydOxNy5c9GhQwcAgIuLC+Li4rBs2TIEBQVh48aNePHiBS5dugRra2sAgJubm1ysEokEkZGRsq5jPXr0wNGjRzFt2jRZnX379sHMzAwAkJ6eDgcHB+zbtw86OvJtdkJDQ2WxSA0fPlz2/19++QUHDx7E1q1bUbt2baXXr3Xr1ujfvz8AYNSoUZg3bx6OHz+Or7/++kOX/qOo3fJo2bJlaNSoEZydnbFu3Tr88MMPiI+Px6lTp/Dzzz8XRoxEREREREREVFj+vSyfOALeLhdyC6SOHTvi6dOn2LNnD/z9/XHixAnUrFkTkZGRsjq+vr5y2/j6+spaHsXGxuLevXswNzeXtWSytrZGZmYm4uPjkZ6ejvj4ePTp00e23szMDFOnTkV8fDwAICYmBjVq1JAljhRxdnaWG3PIwcEBSUlJcnWaNGmCmJgYxMTE4OLFi/Dz80OrVq3w8OFDuXo+Pj5yy3l5eZgyZQqqVq0Ka2trmJmZ4eDBg3j06FGB1+7dBJtIJIK9vX2+mDRJ7ZZHU6dORZcuXbBw4UJUr169MGIiIiIiIiIioqLy6p7y8q98FK/TECMjI7Ro0QItWrTA+PHj0bdvX0ycOBHBwcEf3DYtLQ3e3t7YsGFDvnW2trZIS0sDAKxYsQJ16tSRW6+rqwsAMDY2/uBx9PX15ZZFIpFctzYAMDU1lWuxtHLlSlhaWmLFihWYOnWqXL13zZ49GwsWLMD8+fNRtWpVmJqaIjQ0VNbt7lNi0iS1k0ePHj2SNQcjIiIiIiIios+cjZt65YXI09MTu3fvli2fP38ePXv2lFuuUaMGAKBmzZrYsmUL7OzsYGFhkW9flpaWcHR0xP3799GtWzeFx6tWrRpWrlyJ5OTkAlsfqUskEkFHRwf//fdfgfXOnDmD9u3bo3v37gDedpG7c+cOPD09NRaLJqjdbY2JIyIiIiIiIqIvyFc+b8c4ele9IYXa6ujVq1do2rQp1q9fj2vXriEhIQHbtm3DrFmz0L59e1m9bdu2YfXq1bhz5w4mTpyIixcvYuDAgQCAbt26oXTp0mjfvj1OnTqFhIQEnDhxAoMGDcK///4LAAgLC0N4eDgWLlyIO3fu4J9//sGaNWvw22+/AQC6dOkCe3t7BAYG4syZM7h//z527NiBc+fOqXU+WVlZePbsGZ49e4abN2/il19+QVpaGgICAgrczt3dHYcPH8bZs2dx8+ZN/PTTT3j+/Llaxy4KHz1gNhERERERERF9IVqEAZUC3nZVs3Er9O5qZmZmqFOnDubNm4f4+Hjk5OSgXLly6NevH8aOHSurFxYWhs2bN6N///5wcHDApk2bZK1yTExM8Pfff2PUqFHo0KEDUlNTUbZsWTRr1kzWEqlv374wMTHB7NmzMWLECJiamqJq1aoIDQ0FABgYGODQoUMYNmwYWrdujdzcXHh6eqo9IVhUVBQcHBwAAObm5vDw8MC2bdvQuHHjArf79ddfcf/+ffj5+cHExAQ//vgjAgMDIRaL1Tp+YRMJgiBoO4jiLCUlBZaWlhCLxQqbwRERERERERFpQ2ZmJhISEuDi4gIjIyNth6NxIpEIu3btQmBgoLZD+awVdJ+omvNQu9saERERERERERGVHGonj5o2bYo3b97kK09JSUHTpk01ERMRERERERERERUTao95dOLECYVTxmVmZuLUqVMaCYqIiIiIiIiISjaOslN8qJw8unbtmuz/cXFxePbsmWw5Ly8PUVFRKFu2rGajIyIiIiIiIiIirVI5eeTl5QWRSASRSKSwe5qxsTEWLVqk0eCIiIiIiIiIqGBsoUMF0cT9ofKYRwkJCYiPj4cgCLh48SISEhJkP0+ePEFKSgp69+79yQF9yJIlS+Ds7AwjIyPUqVMHFy9eVFo3MjJSlvCS/nyJI9ATERERERFRyaOvrw8AyMjI0HIkVJxJ7w/p/fIxVG555OTkBACQSCQffbBPtWXLFgwdOhRLly5FnTp1MH/+fPj5+eH27duws7NTuI2FhQVu374tWxaJREUVLhEREREREVGh0dXVhZWVFZKSkgAAJiYm/M5LMoIgICMjA0lJSbCysoKuru5H70vtAbMB4O7duzh+/DiSkpLyJZMmTJjw0cF8yG+//YZ+/fqhV69eAIClS5di//79WL16NUaPHq1wG5FIBHt7+0KLiYiIiIiIiEhbpN93pQkkovdZWVl9cl5E7eTRihUrEBISgtKlS8Pe3l4uqykSiQoteZSdnY0rV65gzJgxsjIdHR00b94c586dU7pdWloanJycIJFIULNmTUyfPh2VK1dWWj8rKwtZWVmy5ZSUFM2cABEREREREZGGiUQiODg4wM7ODjk5OdoOh4oZfX39T2pxJKV28mjq1KmYNm0aRo0a9ckHV8fLly+Rl5eHMmXKyJWXKVMGt27dUrjN119/jdWrV6NatWoQi8WYM2cO6tatixs3buCrr75SuE14eDjCwsI0Hj8RERERERFRYdHV1dVIkoBIEZUHzJZ6/fo1vv/++8KIReN8fX3Rs2dPeHl5oVGjRti5cydsbW2xbNkypduMGTMGYrFY9vP48eMijJiIiIiIiIiIqHhRO3n0/fff49ChQ4URS4FKly4NXV1dPH/+XK78+fPnKvfd09fXR40aNXDv3j2ldQwNDWFhYSH3Q0RERERERERUUqndbc3NzQ3jx4/H+fPnUbVq1XxTvQ0aNEhjwb3LwMAA3t7eOHr0KAIDAwG8nfnt6NGjGDhwoEr7yMvLwz///IPWrVsXSoxERERERERERF8akSAIgjobuLi4KN+ZSIT79+9/clDKbNmyBUFBQVi2bBlq166N+fPnY+vWrbh16xbKlCmDnj17omzZsggPDwcATJ48Gd988w3c3Nzw5s0bzJ49G7t378aVK1fg6emp0jFTUlJgaWkJsVjMVkhERERERERE9MVQNeehdsujhISETwrsU/zwww948eIFJkyYgGfPnsHLywtRUVGyQbQfPXoEHZ3/9cR7/fo1+vXrh2fPnqFUqVLw9vbG2bNnVU4cERERERERERGVdGq3PJLKzs5GQkICXF1doaendg7qs8GWR0RERERERET0JVI156H2gNkZGRno06cPTExMULlyZTx69AgA8Msvv2DGjBkfHzERERERERERERU7aiePxowZg9jYWJw4cQJGRkay8ubNm2PLli0aDY6IiIiIiIiIiLRL7f5mu3fvxpYtW/DNN99AJBLJyitXroz4+HiNBkdERERERERERNqldsujFy9ewM7OLl95enq6XDKJiIiIiIiIiIg+f2onj3x8fLB//37ZsjRhtHLlSvj6+mouMiIiIiIiIiIi0jq1u61Nnz4drVq1QlxcHHJzc7FgwQLExcXh7NmzOHnyZGHESEREREREREREWqJ2y6P69esjJiYGubm5qFq1Kg4dOgQ7OzucO3cO3t7ehREjERERERERERFpiUgQBEHbQRRnKSkpsLS0hFgshoWFhbbDISIiIiIiIiLSCFVzHip1W0tJSZHtJCUlpcC6TLAQEREREREREX05VEoelSpVComJibCzs4OVlZXCWdUEQYBIJEJeXp7GgyQiIiIiIiIiIu1QKXl07NgxWFtbAwCOHz9eqAEREREREREREVHxwTGPPoBjHhERERERERHRl0jVnIfas62tWbMG27Zty1e+bds2rF27Vt3dERERERERERFRMaZ28ig8PBylS5fOV25nZ4fp06drJCgiIqKSLk8i4Fz8K/wZ8wTn4l8hT8KGwkRERESkHSqNefSuR48ewcXFJV+5k5MTHj16pJGgiIiISrKo64kI2xuHRHGmrMzB0ggTAzzhX8VBi5ERERERUUmkdssjOzs7XLt2LV95bGwsbGxsNBIUERFRSRV1PREh66ORKM6El+gevtU5BS/RPTwTZyJkfTSiridqO0QiIiIiKmHUbnnUpUsXDBo0CObm5mjYsCEA4OTJkxg8eDA6d+6s8QCJiIhKijyJgLC9cRAAjNLbhBC9vbJ1EbkBmJXbBWF749DC0x66OiLtBUpEREREJYrayaMpU6bgwYMHaNasGfT03m4ukUjQs2dPjnlEVETyJAIuJiQjKTUTduZGqO1izS+SRF+AiwnJshZH7yaOACBEby8O5tVCjNgNFxOS4evK1r5EREREVDTUTh4ZGBhgy5YtmDJlCmJjY2FsbIyqVavCycmpMOIjovdwLBSiL1dS6tv3tYtIcdc0F1EiYgQ3WT0iIiIioqKgdvJIqmLFiqhYsaImYyGiD5COhSIA8BLdg4soEQmCA2LFbghZH42I7jWZQCL6jNmZGwEAEgTF72NpubQeEREREVFRUCl5NHToUEyZMgWmpqYYOnRogXV/++03jQRGRPI4FgrRl6+2izUcLI0QK3ZDRG6A3Pv899wAxApucLB821WViIiIiKioqJQ8unr1KnJycgAA0dHREIkUfzFVVk5En45joRB9+XR1RJgY4ImQ9dGYldsFB/Nq/a+FoeAGAJgY4MkEMREREREVKZWSRwsWLICFhQUA4MSJE4UZDxEpwbFQiEoG/yoOiOheE2F74xAjdkPM/yeNOLYZFRectIGIiKjkUSl5VKNGDSQmJsLOzg4VKlTApUuXYGPDlg1ERYljoRCVHP5VHNDC055f0KnY4aQNREREJZOOKpWsrKyQkJAAAHjw4AEkEkmhBkVE+cnGQhHejoXyLo6FQvTl0dURwdfVBu29ysLX1YaJI9I66aQN0i7U3+qcgpfoHp6JMxGyPhpR1xW3jCUiIqLPn0otjzp27IhGjRrBwcEBIpEIPj4+0NXVVVj3/v37Gg2QiN7iWChERKQtnLSBiIioZFMpebR8+XJ06NAB9+7dw6BBg9CvXz+Ym5sXdmxE9B6OhUJERNrASRuIiIhKNpWSR9euXUPLli3h7++PK1euYPDgwUweEWkJx0IhIqKixkkbiIiISja1B8w+efIksrOzCzsuIiqAdCwUIiKiosBJG4iIiEo2DphNRERERAXipA1EREQlGwfMJiIiIqICcdIGIiKiko0DZhMRERHRB3HSBiIiopJLpeQRAPj7+wMAB8wmIiIiKqE4aQNRyZAnEfg+JyI5IkEQhI/Z8N69e4iPj0fDhg1hbGwMQRAgEn15D5SUlBRYWlpCLBbDwsJC2+EQEREREREVmqjriQjbG4dE8f9mT2QLQ6Ivl6o5D5UGzH5XcnIymjVrhooVK6J169ZITHw7ZWufPn0wbNiwj4+YiIiIiIiItCbqeiJC1kcjUZwJL9E9fKtzCl6ie3gmzkTI+mhEXU/UdohEpCVqJ49CQ0Ohr6+PR48ewcTERFb+ww8/ICoqSqPBERERERERUeHLkwgI2xsHAcAovU3YbTgB8wwisNtwAkbqbQIAhO2NQ57kozquENFnTuUxj6QOHTqEgwcP4quvvpIrd3d3x8OHDzUWGBERERERERWNiwnJshZHIXp75daF6O3FwbxaiBG74WJCMnxdbbQUJZV0HI9Le9RueZSeni7X4kgqOTkZhoaGGgmqIEuWLIGzszOMjIxQp04dXLx4scD627Ztg4eHB4yMjFC1alX89ddfhR4jERERERHR5yQp9e0YRy4ixV3TpOXSekRFLep6IurPPIYuK85j8OYYdFlxHvVnHmN3yiKidvKoQYMGWLdunWxZJBJBIpFg1qxZaNKkiUaDe9+WLVswdOhQTJw4EdHR0ahevTr8/PyQlJSksP7Zs2fRpUsX9OnTB1evXkVgYCACAwNx/fr1Qo2TiIiIiIjoc2JnbgQASBAUD4otLZfWIypKHI9L+9Sebe369eto1qwZatasiWPHjqFdu3a4ceMGkpOTcebMGbi6uhZWrKhTpw5q1aqFxYsXAwAkEgnKlSuHX375BaNHj85X/4cffkB6ejr27dsnK/vmm2/g5eWFpUuXqnRMzrZG9GVj01ciItIWfgZRcZInEVB/5jE8E2dipN4mua5rv+cGYHZuF9hbGuH0qKa8T6lISe/NRHEmRr13b0bkBmAW781PomrOQ+0xj6pUqYI7d+5g8eLFMDc3R1paGjp06IABAwbAwaHwpm7Mzs7GlStXMGbMGFmZjo4OmjdvjnPnzinc5ty5cxg6dKhcmZ+fH3bv3q30OFlZWcjKypItp6SkfFrgRFRscSpaIiLSFn4GUXGjqyPCxABPhKyPxqzcLjiYVwsuokQkCA6IFdwAABMDPPnlnIocx+MqHtROHgGApaUlxo0bp+lYCvTy5Uvk5eWhTJkycuVlypTBrVu3FG7z7NkzhfWfPXum9Djh4eEICwv79ICJqFiTNn0VAHiJ7v3vlyOxG0LWRyOie03+8k5ERIWCn0GFgy25Pp1/FQdEdK+JsL1xiBG7Ieb/k0ZMbJI2qTIeV4zgxvG4CtlHJY/evHmDVatW4ebNmwCAypUro3fv3rC0tNRocNowZswYudZKKSkpKFeunBYjIiJNe38qWkVNX8P2xqGFpz1/6SQiIo3iZ1DhYEsuzfGv4oAWnvZMxFGxwfG4ige1B8y+fPkyXF1dMW/ePCQnJyM5ORm//fYbXF1dER0dXRgxAgBKly4NXV1dPH/+XK78+fPnsLe3V7iNvb29WvUBwNDQEBYWFnI/RPRl+VDT1+qie0gUZ+JiQrKWIiQioi8VP4M0jwPpap6ujgi+rjZo71UWvq42TByRVtV2sYaDpRFiBTdE5AbIrfs9NwCxghscLN8mOanwqJ08GjJkCNq1a4cHDx5g586d2LlzJxISEtC2bVuEhoYWQohvGRgYwNvbG0ePHpWVSSQSHD16FL6+vgq38fX1lasPAIcPH1Zan4hKBk5FS0RE2sLPIM16vyXXbsMJmGcQgd2GEzBSbxMAIGxvHPIkas0RRETFiHQ8LgCYldsFgVmTMSQ7BIFZkzE7twsAjsf1sfIkAi7eV+2PFWp3W7t8+TJWrFgBPb3/baqnp4eRI0fCx8dH3d2pZejQoQgKCoKPjw9q166N+fPnIz09Hb169QIA9OzZE2XLlkV4eDgAYPDgwWjUqBHmzp2LNm3aYPPmzbh8+TKWL19eqHESUfHGpq9ERKQt/AzSLA6kS1QycDwuzZN2932SVEjJIwsLCzx69AgeHh5y5Y8fP4a5ubm6u1PLDz/8gBcvXmDChAl49uwZvLy8EBUVJRsU+9GjR9DR+V9jqrp162Ljxo349ddfMXbsWLi7u2P37t2oUqVKocZJRMWbrOmr+G3T1/enomXTVyIiKiz8DNIsDqRLVHJwPC7NeXfihqqi+3iswjYiQRDUasM5aNAg7Nq1C3PmzEHdunUBAGfOnMGIESPQsWNHzJ8/X/3Ii7GUlBRYWlpCLBZz/COiL4j0gQkA1d+d6eb//4rBmW6IiKiw8DNIc87Fv0KXFefhJbqH3YYT8q0PzJqMGMENm/p9w5ZHpDWcCZCKkzyJgPozjyFRnIlRepvQLW8PLGekfjDnoXbLozlz5kAkEqFnz57Izc0FAOjr6yMkJAQzZsz4+DMgIipCbPpKRETaws8gzWFLLiruOBMgFTfvd/dNyVNtO7VbHkllZGQgPj4eAODq6goTE5OP2U2xx5ZHRF82/iWIiIi0hZ9BmsGWXFRcvds1yIv3JhUTf8Y8weDNMfhW5xTmGUQgJUvQbMujvLw83LhxA+7u7jA2NoaJiQmqVq0KAPjvv/9w7do1VKlSRW7MISKi4k46FS0REVFR42eQZrAlFxVH788E+G6ruIjcAMzK7YKwvXFo4WnPpDEVqQ9N3KCMysmjP/74A4sXL8aFCxfyrdPX10fv3r0RGhqK7t27qxUAERERERHRp+BAulTccCZAKq7e7+7bDXtU2k7lZkKrVq3C8OHDoaurm2+dnp4eRo4cieXLl6seMRERERERkYZIW3K19yoLX1cbJo5Iq1SZCfDdekRFRVdHhIkBngCAWbld0CVrnErbqZw8un37Nr755hul62vVqoWbN2+qujsiIvoC5UkEnIt/hT9jnuBc/CvkST5qWD0iIiKiz9qHugZJy6X1iIqStLuvvaUR/hEqqLSNyt3W0tPTkZKSonR9amoqMjIyVN0dERF9YTibCBVnHJiYiIiKEmcCpOJO2t33+LWHaDH/w/VVTh65u7vj7NmzqFatmsL1p0+fhru7u6q7IyKiL4jS2UTEbghZH83ZREirmNgkIqKiJu0aFLI+GrNyu+BgXq18s61NDPDkHzJIq3R1RKhdQbUEpsrd1rp27Ypff/0V165dy7cuNjYWEyZMQNeuXVWPkoiIvgjvzyay23AC5hlEYLfhBIzU2wQACNsbxy5spBXSxKZ00NJvdU7BS3QPz8SZCFkfjajriseiICIi+lTvdg2KEdywS9IAMYIb7C2N+Ic1+uyIBEFQ6bf5nJwctGzZEqdPn0bz5s3h4eEBALh16xaOHDmCevXq4fDhw9DX1y/UgItaSkoKLC0tIRaLYWFhoe1wiIiKnXPxr9BlxXl4ie5ht+GEfOsDsyYjRnDDpn7fcDYRKlJ5EgH1Zx5DojhT6TTJ9pZGOD2qKf/yS0REhYZdp6k4UzXnoXK3NX19fRw6dAjz5s3Dxo0b8ffff0MQBFSsWBHTpk1DaGjoF5c4IiKiD1NlNpEYwY2ziVCR4zTJRERUHEhnAiT6nKmcPALeJpBGjhyJkSNHFlY8RET0meFsIlRcMbFJREREpBkqj3lERESkiGw2EeHtbCLv4mwipE1MbBIRERFphlotj4iIiN7H2USouOI0yURERESaofKA2SUVB8wmIlINp0On4kg62xoAVBfdy5fY5Gw3REREVJKpmvNg8ugDmDwiIlIdZxOh4oiJTSIiIiLFCiV5lJKSAjMzM+joyA+VlJeXh/T09C8yucLkERER0eePiU0iIiKi/FTNeag8YPauXbvg4+ODzMz8M5JkZmaiVq1a2Lt3r4ItiYiIiLRLOk1ye6+y8HW1YeKIiIiISA0qJ48iIiIwcuRImJiY5FtnamqKUaNGYfHixRoNjoiIiIiIiIiItEvl5NH169fRuHFjpesbNmyIf/75RxMxERERERERERFRMaFy8uj169fIzc1Vuj4nJwevX7/WSFBERERERERERFQ8qJw8cnZ2xuXLl5Wuv3z5MpycnDQSFBERERERERERFQ8qJ486dOiAcePG4fnz5/nWPXv2DL/++is6duyo0eCIiIiIiIiIiEi7RIIgCKpUTE1Nha+vLx49eoTu3bvj66+/BgDcunULGzZsQLly5XD+/HmYm5sXasBFTdVp64iIiIiIiIiIPieq5jz0VN2hubk5zpw5gzFjxmDLli2y8Y2srKzQvXt3TJs27YtLHBERERERERERlXQqtzx6lyAIePnyJQRBgK2tLUQiUWHEViyw5RERERERERERfYk03vIIAM6fP4+9e/ciJycHTZs2hb+//ycHSkRERERERERExZfKyaPt27fjhx9+gLGxMfT19TF37lzMnDkTw4cPL8z4iIiIiIiIiIhIi1SebS08PBz9+vWDWCzG69evMXXqVEyfPr0wYyMiIiIiIiIiIi1TecwjMzMzxMTEwM3NDQCQnZ0NU1NTPHnyBHZ2doUapDZxzCMiIiIiIiIi+hKpmvNQueVRRkaG3I4MDAxgZGSEtLS0T4uUiIiIiIiIiIiKLbUGzF65ciXMzMxky7m5uYiMjETp0qVlZYMGDdJcdEREREREREREpFUqd1tzdnaGSCQqeGciEe7fv6+RwIoLdlvTjDyJgIsJyUhKzYSduRFqu1hDV6fg+4mIiIiIiIiICo+qOQ+VWx49ePBAE3FRCRR1PRFhe+OQKM6UlTlYGmFigCf8qzhoMTIiIiIiIiIi+hCVxzz6kH///Rc//vijpnZHX4io64kIWR+NRHEmvET38K3OKXiJ7uGZOBMh66MRdT1R2yESERERERERUQE0ljx69eoVVq1apand0RcgTyIgbG8cBACj9DZht+EEzDOIwG7DCRiptwkAELY3DnkSlXpOEhEREREREZEWaCx5RPS+iwnJshZHIXp75daF6O1FddE9JIozcTEhWUsREhEREREREdGHMHlEhSYp9e0YRy4ixV3TpOXSekRERERERERU/Hw2yaPk5GR069YNFhYWsLKyQp8+fZCWllbgNo0bN4ZIJJL7+fnnn4soYrIzNwIAJAiKB8WWlkvrEREREREREVHxo/Jsax06dChw/Zs3bz41lgJ169YNiYmJOHz4MHJyctCrVy/8+OOP2LhxY4Hb9evXD5MnT5Ytm5iYFGqc9D+1XazhYGmEWLEbInID5Lqu/Z4bgFjBDQ6WRqjtYq3FKImIiIiIiIioIConjywtLT+4vmfPnp8ckCI3b95EVFQULl26BB8fHwDAokWL0Lp1a8yZMweOjo5KtzUxMYG9vb3Kx8rKykJWVpZsOSUl5eMDL+F0dUSYGOCJkPXRmJXbBQfzasFFlIgEwQGxghsAYGKAJ3R1RFqOlIiIiIiIiIiUEQmCUOynulq9ejWGDRuG169fy8pyc3NhZGSEbdu24dtvv1W4XePGjXHjxg0IggB7e3sEBARg/PjxBbY+mjRpEsLCwvKVi8ViWFhYfPrJlEBR1xMRtjcOieL/jW3kYGmEiQGe8K+iuEsbERERERERERWulJQUWFpafjDnoXLLI2169uwZ7Ozs5Mr09PRgbW2NZ8+eKd2ua9eucHJygqOjI65du4ZRo0bh9u3b2Llzp9JtxowZg6FDh8qWU1JSUK5cuU8/iRLMv4oDWnja42JCMpJSM2Fn/rarGlscERERERERERV/Wk0ejR49GjNnziywzs2bNz96/z/++KPs/1WrVoWDgwOaNWuG+Ph4uLq6KtzG0NAQhoaGsmVpwyx2X/t0lW31UdlWHwCQnpaq5WiIiIiIiIiISjZpruNDndK0mjwaNmwYgoODC6xToUIF2NvbIykpSa48NzcXycnJao1nVKdOHQDAvXv3lCaP3vfq1SsAYOsjIiIiIiIiIvoipaamFjjWtVaTR7a2trC1tf1gPV9fX7x58wZXrlyBt7c3AODYsWOQSCSyhJAqYmJiAAAODqqPs2Nt/XYmsEePHn1w0HCioiTtUvn48WOOx0XFCu9NKq54b1JxxPuSiivem1Rc8d7ULEEQkJqaWuBEZMBnMuZRpUqV4O/vj379+mHp0qXIycnBwIED0blzZ9kJPnnyBM2aNcO6detQu3ZtxMfHY+PGjWjdujVsbGxw7do1DBkyBA0bNkS1atVUPraOjg6At7PJ8cak4sjCwoL3JhVLvDepuOK9ScUR70sqrnhvUnHFe1NzVGkoo1MEcWjEhg0b4OHhgWbNmqF169aoX78+li9fLlufk5OD27dvIyMjAwBgYGCAI0eOoGXLlvDw8MCwYcPQsWNH7N27V1unQERERERERET02fksWh4Bb7uPbdy4Uel6Z2dnuQGeypUrh5MnTxZFaEREREREREREX6zPpuWRthgaGmLixIlyM7ARFQe8N6m44r1JxRXvTSqOeF9SccV7k4or3pvaIRI+NB8bERERERERERGVWGx5RERERERERERESjF5RERERERERERESjF5RERERERERERESjF5RERERERERERESjF5VIAlS5bA2dkZRkZGqFOnDi5evKjtkKiECw8PR61atWBubg47OzsEBgbi9u3b2g6LKJ8ZM2ZAJBIhNDRU26EQ4cmTJ+jevTtsbGxgbGyMqlWr4vLly9oOi0q4vLw8jB8/Hi4uLjA2NoarqyumTJkCzmVDRe3vv/9GQEAAHB0dIRKJsHv3brn1giBgwoQJcHBwgLGxMZo3b467d+9qJ1gqUQq6N3NycjBq1ChUrVoVpqamcHR0RM+ePfH06VPtBfyFY/JIiS1btmDo0KGYOHEioqOjUb16dfj5+SEpKUnboVEJdvLkSQwYMADnz5/H4cOHkZOTg5YtWyI9PV3boRHJXLp0CcuWLUO1atW0HQoRXr9+jXr16kFfXx8HDhxAXFwc5s6di1KlSmk7NCrhZs6ciYiICCxevBg3b97EzJkzMWvWLCxatEjboVEJk56ejurVq2PJkiUK18+aNQsLFy7E0qVLceHCBZiamsLPzw+ZmZlFHCmVNAXdmxkZGYiOjsb48eMRHR2NnTt34vbt22jXrp0WIi0ZRAL/vKFQnTp1UKtWLSxevBgAIJFIUK5cOfzyyy8YPXq0lqMjeuvFixews7PDyZMn0bBhQ22HQ4S0tDTUrFkTv//+O6ZOnQovLy/Mnz9f22FRCTZ69GicOXMGp06d0nYoRHLatm2LMmXKYNWqVbKyjh07wtjYGOvXr9diZFSSiUQi7Nq1C4GBgQDetjpydHTEsGHDMHz4cACAWCxGmTJlEBkZic6dO2sxWipJ3r83Fbl06RJq166Nhw8fonz58kUXXAnBlkcKZGdn48qVK2jevLmsTEdHB82bN8e5c+e0GBmRPLFYDACwtrbWciREbw0YMABt2rSRe34SadOePXvg4+OD77//HnZ2dqhRowZWrFih7bCIULduXRw9ehR37twBAMTGxuL06dNo1aqVliMj+p+EhAQ8e/ZM7nPd0tISderU4fciKnbEYjFEIhGsrKy0HcoXSU/bARRHL1++RF5eHsqUKSNXXqZMGdy6dUtLURHJk0gkCA0NRb169VClShVth0OEzZs3Izo6GpcuXdJ2KEQy9+/fR0REBIYOHYqxY8fi0qVLGDRoEAwMDBAUFKTt8KgEGz16NFJSUuDh4QFdXV3k5eVh2rRp6Natm7ZDI5J59uwZACj8XiRdR1QcZGZmYtSoUejSpQssLCy0Hc4Xickjos/UgAEDcP36dZw+fVrboRDh8ePHGDx4MA4fPgwjIyNth0MkI5FI4OPjg+nTpwMAatSogevXr2Pp0qVMHpFWbd26FRs2bMDGjRtRuXJlxMTEIDQ0FI6Ojrw3iYjUkJOTg06dOkEQBERERGg7nC8Wu60pULp0aejq6uL58+dy5c+fP4e9vb2WoiL6n4EDB2Lfvn04fvw4vvrqK22HQ4QrV64gKSkJNWvWhJ6eHvT09HDy5EksXLgQenp6yMvL03aIVEI5ODjA09NTrqxSpUp49OiRliIiemvEiBEYPXo0OnfujKpVq6JHjx4YMmQIwsPDtR0akYz0uw+/F1FxJU0cPXz4EIcPH2aro0LE5JECBgYG8Pb2xtGjR2VlEokER48eha+vrxYjo5JOEAQMHDgQu3btwrFjx+Di4qLtkIgAAM2aNcM///yDmJgY2Y+Pjw+6deuGmJgY6OrqajtEKqHq1auH27dvy5XduXMHTk5OWoqI6K2MjAzo6Mj/Kq6rqwuJRKKliIjyc3Fxgb29vdz3opSUFFy4cIHfi0jrpImju3fv4siRI7CxsdF2SF80dltTYujQoQgKCoKPjw9q166N+fPnIz09Hb169dJ2aFSCDRgwABs3bsSff/4Jc3NzWV9zS0tLGBsbazk6KsnMzc3zjb1lamoKGxsbjslFWjVkyBDUrVsX06dPR6dOnXDx4kUsX74cy5cv13ZoVMIFBARg2rRpKF++PCpXroyrV6/it99+Q+/evbUdGpUwaWlpuHfvnmw5ISEBMTExsLa2Rvny5REaGoqpU6fC3d0dLi4uGD9+PBwdHQuc9YpIEwq6Nx0cHPDdd98hOjoa+/btQ15enuy7kbW1NQwMDLQV9hdLJAiCoO0giqvFixdj9uzZePbsGby8vLBw4ULUqVNH22FRCSYSiRSWr1mzBsHBwUUbDNEHNG7cGF5eXpg/f762Q6ESbt++fRgzZgzu3r0LFxcXDB06FP369dN2WFTCpaamYvz48di1axeSkpLg6OiILl26YMKECfzSQ0XqxIkTaNKkSb7yoKAgREZGQhAETJw4EcuXL8ebN29Qv359/P7776hYsaIWoqWSpKB7c9KkSUp7YRw/fhyNGzcu5OhKHiaPiIiIiIiIiIhIKY55RERERERERERESjF5RERERERERERESjF5RERERERERERESjF5RERERERERERESjF5RERERERERERESjF5RERERERERERESjF5RERERERERERESjF5RERERERERERESjF5RERERKRhgiDgxx9/hLW1NUQiEWJiYhSWNW7cGKGhoSrt88SJExCJRHjz5k2hxl6Yxw4ODkZgYKBGYiIiIqKiw+QRERERFakPJRCcnZ0hEokgEolgamqKmjVrYtu2bXJ1UlJSMG7cOHh4eMDIyAj29vZo3rw5du7cCUEQlO47Ozsbs2bNQvXq1WFiYoLSpUujXr16WLNmDXJycjR1ioiKikJkZCT27duHxMREVKlSRWHZzp07MWXKFJX2WbduXSQmJsLS0lJjcT548ECWyCrqYxMREdHnQ0/bARARERG9b/LkyejXrx9SUlIwd+5c/PDDDyhbtizq1q2LN2/eoH79+hCLxZg6dSpq1aoFPT09nDx5EiNHjkTTpk1hZWWVb5/Z2dnw8/NDbGwspkyZgnr16sHCwgLnz5/HnDlzUKNGDXh5eWkk/vj4eDg4OKBu3boFlllbW6u8TwMDA9jb22skPnVp89hERESkfWx5RERERMWOubk57O3tUbFiRSxZsgTGxsbYu3cvAGDs2LF48OABLly4gKCgIHh6eqJixYro168fYmJiYGZmpnCf8+fPx99//42jR49iwIAB8PLyQoUKFdC1a1dcuHAB7u7uAICsrCwMGjQIdnZ2MDIyQv369XHp0iW5fV2/fh2tWrWCmZkZypQpgx49euDly5cA3ras+uWXX/Do0SOIRCI4OzsrLAOQr9taVlYWRo0ahXLlysHQ0BBubm5YtWoVAMVdx06fPo0GDRrA2NgY5cqVw6BBg5Ceni5b7+zsjOnTp6N3794wNzdH+fLlsXz5ctl6FxcXAECNGjUgEonQuHFjhdfu/WNHRkbCysoKBw8eRKVKlWBmZgZ/f38kJibKtsnLy8PQoUNhZWUFGxsbjBw5Ml+rMIlEgvDwcLi4uMDY2BjVq1fH9u3bAbzt+te8eXP4+fnJtktOTsZXX32FCRMmKIyTiIiICgeTR0RERFSs6enpQV9fH9nZ2ZBIJNi8eTO6desGR0fHfHXNzMygp6e4YfWGDRvQvHlz1KhRI986fX19mJqaAgBGjhyJHTt2YO3atYiOjoabmxv8/PyQnJwMAHjz5g2aNm2KGjVq4PLly4iKisLz58/RqVMnAMCCBQswefJkfPXVV0hMTMSlS5cUlinSs2dPbNq0CQsXLsTNmzexbNkypcmw+Ph4+Pv7o2PHjrh27Rq2bNmC06dPY+DAgXL15s6dCx8fH1y9ehX9+/dHSEgIbt++DQC4ePEiAODIkSNITEzEzp07FR5LkYyMDMyZMwd//PEH/v77bzx69AjDhw+XO25kZCRWr16N06dPIzk5Gbt27ZLbR3h4ONatW4elS5fixo0bGDJkCLp3746TJ09CJBJh7dq1uHTpEhYuXAgA+Pnnn1G2bFkmj4iIiIoYu60RERFRsZWdnY25c+dCLBajadOmePnyJV6/fg0PDw+193X37l2lLWuk0tPTERERgcjISLRq1QoAsGLFChw+fBirVq3CiBEjsHjxYtSoUQPTp0+Xbbd69WqUK1cOd+7cQcWKFWFubg5dXV25rl6Kyt51584dbN26FYcPH0bz5s0BABUqVFAaa3h4OLp16yZrueTu7o6FCxeiUaNGiIiIgJGREQCgdevW6N+/PwBg1KhRmDdvHo4fP46vv/4atra2AAAbGxu1u6Xl5ORg6dKlcHV1BQAMHDgQkydPlq2fP38+xowZgw4dOgAAli5dioMHD8rWZ2VlYfr06Thy5Ah8fX1l53v69GksW7YMjRo1QtmyZbFs2TL07NkTz549w19//YWrV68qTRASERFR4eAnLxERERU7o0aNwq+//orMzEyYmZlhxowZaNOmDZ4/f/7R+yxoIG2p+Ph45OTkoF69erIyfX191K5dGzdv3gQAxMbG4vjx4wpbBMXHx6NixYofFV9MTAx0dXXRqFEjlerHxsbi2rVr2LBhg6xMEARIJBIkJCSgUqVKAIBq1arJ1otEItjb2yMpKemjYnyXiYmJLHEEAA4ODrL9isViJCYmok6dOrL1enp68PHxkb0O9+7dQ0ZGBlq0aCG33+zsbLnWYd9//z127dqFGTNmICIiQta9kIiIiIoOk0dERERU7IwYMQLBwcGyMYVEIhEAwNbWFlZWVrh165ba+6xYseJHbfe+tLQ0BAQEYObMmfnWOTg4fPR+jY2N1Y7jp59+wqBBg/KtK1++vOz/+vr6cutEIhEkEsnHBfkORftVJUEnlZaWBgDYv38/ypYtK7fO0NBQ9v+MjAxcuXIFurq6uHv37idETERERB+LYx4RERFRsVO6dGm4ubnB3t5eljgCAB0dHXTu3BkbNmzA06dP822XlpaG3Nxchfvs2rUrjhw5gqtXr+Zbl5OTg/T0dLi6usLAwABnzpyRW3fp0iV4enoCAGrWrIkbN27A2dkZbm5ucj/ScZM+RtWqVSGRSHDy5EmV6tesWRNxcXH5YnBzc4OBgYFK+5DWy8vL++i4FbG0tISDgwMuXLggK8vNzcWVK1dky56enjA0NMSjR4/yxV+uXDlZvWHDhkFHRwcHDhzAwoULcezYMY3GSkRERB/G5BEREREVObFYjJiYGLmfx48fq7TttGnTUK5cOdSpUwfr1q1DXFwc7t69i9WrV6NGjRqyFi3vCw0NRb169dCsWTMsWbIEsbGxuH//PrZu3YpvvvkGd+/ehampKUJCQjBixAhERUUhLi4O/fr1Q0ZGBvr06QMAGDBgAJKTk9GlSxdcunQJ8fHxOHjwIHr16vVJSRhnZ2cEBQWhd+/e2L17NxISEnDixAls3bpVYf1Ro0bh7NmzGDhwIGJiYnD37l38+eef+QbMLoidnR2MjY1lg36LxeKPjv99gwcPxowZM7B7927cunUL/fv3l5spztzcHMOHD8eQIUOwdu1axMfHIzo6GosWLcLatWsBvG2VtHr1amzYsAEtWrTAiBEjEBQUhNevX2ssTiIiIvowdlsjIiKiInfixIl8s5716dMHK1eu/OC21tbWOH/+PGbMmIGpU6fi4cOHKFWqFKpWrYrZs2fD0tJS4XaGhoY4fPgw5s2bh2XLlmH48OEwMTFBpUqVMGjQIFSpUgUAMGPGDEgkEvTo0QOpqanw8fHBwYMHUapUKQCAo6Mjzpw5g1GjRqFly5bIysqCk5MT/P39oaPzaX+Xi4iIwNixY9G/f3+8evUK5cuXx9ixYxXWrVatGk6ePIlx48ahQYMGEAQBrq6u+OGHH1Q+np6eHhYuXIjJkydjwoQJaNCgAU6cOPFJ5yA1bNgwJCYmIigoCDo6Oujduze+/fZbuQTVlClTYGtri/DwcNy/fx9WVlaoWbMmxo4dixcvXqBPnz6YNGkSatasCQAICwvDoUOH8PPPP2PLli0aiZOIiIg+TCSo0zmdiIiIiIiIiIhKFHZbIyIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipZg8IiIiIiIiIiIipf4Pa2jF9YRIhcUAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Compare cf. librosa to check accuracy of LPC coefficients\n", + "import librosa\n", + "\n", + "librosa_coeffs = librosa.lpc(gne_frames[0, 0].numpy(), order=lpc_order)\n", + "plt.scatter(torch.arange(lpc_order + 1), librosa_coeffs)\n", + "plt.scatter(torch.arange(lpc_order + 1), a_coeffs[0], s=10)\n", + "plt.legend([\"Librosa\", \"SpeechBrain\"])\n", + "plt.xlabel(\"LPC Coefficient index\")\n", + "plt.ylabel(\"LPC Coefficient value\")\n", + "plt.title(\"Comparison of SpeechBrain and Librosa LPC Coefficient Computation\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "5c26553d-b149-4e11-88ab-c0154c845bcb", + "metadata": {}, + "source": [ + "The Hilbert Envelope is used here to estimate how consistent the glottal pulse train (computed in part 2) is across frequency bands.\n", + "It is not exactly straightforward to compute, you can see an example in:\n", + "\n", + "[https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.hilbert.html](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.hilbert.html)\n", + "\n", + "Although the steps are hidden here in the `compute_hilbert_envelopes()` call, the steps (for those interested) are as follows:\n", + "\n", + "(a) Applying a real Discrete-Time Fourier Transform to the inverse filtered speech signal in the time domain\n", + "(b) Picking each frequency band multiplying by a Hanning window in the frequency domain\n", + "(c) Doubling the length of the sequence obtained by padding zeros (ie, setting the values at negative frequencies to zero)\n", + "(d) Applying an inverse Discrete-Time Fourier Transform\n", + "(e) Taking the absolute value of the complex signal in the time domain obtained as the Hilbert envelope.\n", + "\n", + "The steps b through e are applied to each frequency band." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "dfbaebb3-a58e-4db4-9e50-20326d0f934e", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABJUAAADvCAYAAABLwuVqAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAA6QlJREFUeJzs3XdYFFcXwOHf0puAKEUsgFixiw17i9hrYondxKiJmsSoX0yMJSYxxiSaaNQYWzSa2HtvsfdesYEoShMB6bA73x8jqwRUwAVEz/s8q+zMnZmzu8Oye+beczWKoigIIYQQQgghhBBCCJEFRnkdgBBCCCGEEEIIIYTIfySpJIQQQgghhBBCCCGyTJJKQgghhBBCCCGEECLLJKkkhBBCCCGEEEIIIbJMkkpCCCGEEEIIIYQQIsskqSSEEEIIIYQQQgghskySSkIIIYQQQgghhBAiyySpJIQQQgghhBBCCCGyTJJKQgghhBBCCCGEECLLJKkkhBAiDXd3d/r166e//++//6LRaPj333/1yxo3bkzFihVzP7jXxKJFi9BoNAQEBOR1KPlGQEAAGo2GRYsW6ZdNmDABjUaTpl1KSgqjR4+mePHiGBkZ0bFjRwBiYmJ4//33cXFxQaPR8Mknn+Re8DlMo9EwdOjQvA4jjf++j4hXV0hICG+//TaFChVCo9Ewffr0vA7pldOvXz/c3d3zOgwhhHglSVJJCCFeY6nJi5MnT2a4Pj8kh+Li4pgwYUKapJbIX2bNmpUmGZSTFixYwNSpU3n77bf5888/+fTTTwH47rvvWLRoEUOGDGHJkiX07t07V+LJjmXLlskX+zdQXr3Xffrpp2zfvp0xY8awZMkSWrZsmavHF0IIkb+Z5HUAQgghXi1+fn4YGb061xzi4uKYOHEioCbBRP4za9YsChcubPCeK2PHjuXzzz9Ps2zPnj0ULVqUadOmpVtep04dxo8fb9AYcsKyZcu4ePHia9WbSrxYXr3X7dmzhw4dOjBy5MhcO2Z+88cff6DT6fI6DCGEeCW9Ot8ahBBCvBLMzc0xNTXN6zDQ6XQkJCTkdRjiFWZiYoKFhUWaZaGhodjb26dr+6zl2SXnp8gvYmNjn7s+s78bL9rP68zU1BRzc/O8DkMIIV5JklQSQgiRRlZqoZw6dYq6detiaWmJh4cHc+bMSdcmMTGR8ePHU6pUKczNzSlevDijR48mMTExTbvUujBLly6lQoUKmJubM2fOHBwdHQGYOHEiGo0GjUbDhAkTnhtXZGQkn3zyCcWLF8fc3JxSpUoxZcqUNFeaU2v0/Pjjj8ydOxdPT0/Mzc2pWbMmJ06c0Lf78ccf0Wg03L59O91xxowZg5mZGQ8fPtQvO3bsGC1btsTOzg4rKysaNWrEoUOHMvV8zpo1S//YXV1d+eijj4iMjEzTJnXIoiGf+507d1K/fn3s7e2xsbGhbNmyfPHFFy+Md+HChTRt2hQnJyfMzc3x8vJi9uzZadq4u7tz6dIl9u3bp3/9XtQLIzIykn79+mFnZ4e9vT19+/ZN9zxA2ppKqa/n3r17uXTpkv5YqTXB/P392bx5s355aj2rlzk/t23bBkBQUBADBgzA2dkZc3NzKlSowIIFC9JsnxrHihUr+PbbbylWrBgWFhY0a9aMGzdu6Ns1btyYzZs3c/v2bX2sma3lsnTpUsqWLYuFhQXe3t7s378/zfrbt2/z4YcfUrZsWSwtLSlUqBDvvPNOutpeqcNmDx06xIgRI3B0dMTa2ppOnToRFhaWpq2iKHzzzTcUK1YMKysrmjRpwqVLl9LFlrrP/fv3M2jQIAoVKoStrS19+vRJ8/sDsH79etq0aYOrqyvm5uZ4enoyadIktFptmnbXr1+nS5cuuLi4YGFhQbFixejevTtRUVH6Ntk9twH++usvatWqhZWVFQULFqRhw4bs2LEjTZutW7fSoEEDrK2tKVCgAG3atEn3+Pv164eNjQ1BQUF07NgRGxsbHB0dGTlypP4xBQQEvPC97urVq7z99ts4ODhgYWFBjRo12LBhQ4bP8759+/jwww9xcnKiWLFiGT6+1LaKovDbb7/pj5mZ/WTmcQOsW7eOihUrYmFhQcWKFVm7dm26+kQZ1e1LfU7+W0ctq89DZs7h1MfTqFEjChQogK2tLTVr1mTZsmX69RnVVNLpdEyfPp0KFSpgYWGBs7MzgwYNSnc+nzx5El9fXwoXLqx/vx4wYEC6GIQQIr+S4W9CCPEGiIqKIjw8PN3y5OTkbO/z4cOHtG7dmq5du9KjRw9WrFjBkCFDMDMz039g1ul0tG/fnoMHD/LBBx9Qvnx5Lly4wLRp07h27Rrr1q1Ls889e/awYsUKhg4dSuHChalSpQqzZ89myJAhdOrUic6dOwNQuXLlZ8YVFxdHo0aNCAoKYtCgQZQoUYLDhw8zZswY7t+/n65WzbJly3j06BGDBg1Co9Hwww8/0LlzZ27duoWpqSldu3Zl9OjRrFixglGjRqXZdsWKFbRo0YKCBQvq42/VqhXe3t6MHz8eIyMjfeLlwIED1KpV65lxT5gwgYkTJ9K8eXOGDBmCn58fs2fP5sSJExw6dChN7zFDPveXLl2ibdu2VK5cma+//hpzc3Nu3LiRqUTY7NmzqVChAu3bt8fExISNGzfy4YcfotPp+OijjwCYPn06w4YNw8bGhi+//BIAZ2fnZ+5TURQ6dOjAwYMHGTx4MOXLl2ft2rX07dv3ubE4OjqyZMkSvv32W2JiYpg8eTIA5cuXZ8mSJXz66acUK1aMzz77TN/+Zc9Pd3d3QkJCqFOnjj7p5OjoyNatW3nvvfeIjo5ON4Tt+++/x8jIiJEjRxIVFcUPP/xAz549OXbsGABffvklUVFR3L17Vz+Ez8bG5oWvxb59+1i+fDnDhw/H3NycWbNm0bJlS44fP66vm3bixAkOHz5M9+7dKVasGAEBAcyePZvGjRtz+fJlrKys0uxz2LBhFCxYkPHjxxMQEMD06dMZOnQoy5cv17cZN24c33zzDa1bt6Z169acPn2aFi1akJSUlGGcQ4cOxd7engkTJujP8du3b+sTC6AmBGxsbBgxYgQ2Njbs2bOHcePGER0dzdSpUwFISkrC19eXxMREhg0bhouLC0FBQWzatInIyEjs7Oxe6tyeOHEiEyZMoG7dunz99deYmZlx7Ngx9uzZQ4sWLQBYsmQJffv2xdfXlylTphAXF8fs2bOpX78+Z86cSZOE0Gq1+Pr6Urt2bX788Ud27drFTz/9hKenJ0OGDMHR0fG573WXLl2iXr16FC1alM8//xxra2tWrFhBx44dWb16NZ06dUoT/4cffoijoyPjxo17Zg+jhg0b6uuLvfXWW/Tp0yddm4z2k9nHvWPHDrp06YKXlxeTJ0/mwYMH9O/f/5lJrszI6vOQmXN40aJFDBgwgAoVKjBmzBjs7e05c+YM27Zt4913331mLIMGDWLRokX079+f4cOH4+/vz8yZMzlz5oz+PTs0NJQWLVrg6OjI559/jr29PQEBAaxZsybbz4EQQrxyFCGEEK+thQsXKsBzbxUqVEizjZubm9K3b1/9/b179yqAsnfvXv2yRo0aKYDy008/6ZclJiYqVatWVZycnJSkpCRFURRlyZIlipGRkXLgwIE0x5gzZ44CKIcOHdIvAxQjIyPl0qVLadqGhYUpgDJ+/PhMPeZJkyYp1tbWyrVr19Is//zzzxVjY2MlMDBQURRF8ff3VwClUKFCSkREhL7d+vXrFUDZuHGjfpmPj4/i7e2dZn/Hjx9XAGXx4sWKoiiKTqdTSpcurfj6+io6nU7fLi4uTvHw8FDeeust/bLU18Xf319RFEUJDQ1VzMzMlBYtWiharVbfbubMmQqgLFiwQL/M0M/9tGnTFEAJCwt70VObTlxcXLplvr6+SsmSJdMsq1ChgtKoUaNM7XPdunUKoPzwww/6ZSkpKUqDBg0UQFm4cKF++fjx45X/fpRp1KhRunNaUdTzuk2bNmmWGeL8fO+995QiRYoo4eHhaZZ3795dsbOz0z9Hqb9H5cuXVxITE/XtfvnlFwVQLly4oF/Wpk0bxc3NLaOnJ0Opv8snT57UL7t9+7ZiYWGhdOrUSb8so9fryJEjac5jRXlyfjZv3jzNufzpp58qxsbGSmRkpKIoT87bNm3apGn3xRdfKECa95HUfXp7e+vPUUVRlB9++EEBlPXr1z83zkGDBilWVlZKQkKCoiiKcubMGQVQVq5c+cznJbvn9vXr1xUjIyOlU6dOaX4fFUXRP85Hjx4p9vb2ysCBA9OsDw4OVuzs7NIs79u3rwIoX3/9dZq21apVS/O+8rz3umbNmimVKlXSP/7UWOrWrauULl1avyz1ea5fv76SkpKSqccLKB999FGaZc/aT1Yed9WqVZUiRYrozxdFUZQdO3YoQJrzO6O/MYry5D366d/5rD4PLzqHIyMjlQIFCii1a9dW4uPj0xz/6e369u2bJuYDBw4ogLJ06dI022zbti3N8rVr1yqAcuLECUUIIV5XMvxNCCHeAL/99hs7d+5Md3tej58XMTExYdCgQfr7ZmZmDBo0iNDQUE6dOgXAypUrKV++POXKlSM8PFx/a9q0KQB79+5Ns89GjRrh5eWV7ZhSj9mgQQMKFiyY5pjNmzdHq9WmGxLUrVs3fU8jgAYNGgBw69atNG1OnTrFzZs39cuWL1+Oubk5HTp0AODs2bNcv36dd999lwcPHuiPGxsbS7Nmzdi/f/8zC73u2rWLpKQkPvnkkzRF0gcOHIitrS2bN29O096Qz31qLZX169dnuRCtpaWl/ufU3nCNGjXi1q1baYYgZcWWLVswMTFhyJAh+mXGxsYMGzYsW/t7npc9PxVFYfXq1bRr1w5FUdLsw9fXl6ioKE6fPp1mH/3798fMzEx/P6PzLTt8fHzw9vbW3y9RogQdOnRg+/bt+iFWT79eycnJPHjwgFKlSmFvb58uToAPPvhA33soNVatVqsfCpp63g4bNixNu+cVGP/ggw/S9LobMmQIJiYmbNmyRb/s6TgfPXpEeHg4DRo0IC4ujqtXrwJgZ2cHwPbt24mLi8vwWNk9t9etW4dOp2PcuHHpJi1IfZw7d+4kMjKSHj16pHndjY2NqV27drpzB2Dw4MFp7jdo0CBTr3tERAR79uyha9eu+ucjPDycBw8e4Ovry/Xr1wkKCkqzzcCBAzE2Ns70Y36W/+4ns4/7/v37nD17lr59++pfK4C33nor2+/x2XkeXnQO79y5k0ePHvH555+nq8/29Hb/tXLlSuzs7HjrrbfSPA/e3t7Y2Nike3/dtGnTS/UMFkKIV5kMfxNCiDdArVq1qFGjRrrlqYmX7HB1dcXa2jrNsjJlygBqLYw6depw/fp1rly5oq8V8l+hoaFp7nt4eGQrlqddv36d8+fPZ/qYJUqUSHM/NcH0dF2Md955hxEjRrB8+XK++OILFEVh5cqVtGrVCltbW/1xgecO04qKikqTwEqV+gWnbNmyaZabmZlRsmTJdPWcDPncd+vWjXnz5vH+++/z+eef06xZMzp37szbb7/9wlkADx06xPjx4zly5Ei6L/ZRUVFpvkxm1u3btylSpEi6IV//fW4M4WXPz7CwMCIjI5k7dy5z587N1D4yc75lR+nSpdMtK1OmDHFxcYSFheHi4kJ8fDyTJ09m4cKFBAUFoSiKvm1GScAXxZp6Xv732I6Ojhme5xm1tbGxoUiRImnqOl26dImxY8eyZ88eoqOj07RPjdPDw4MRI0bw888/s3TpUho0aED79u3p1auX/rzL7rl98+ZNjIyMnpv8SP19T01A/lfq+0IqCwuLdOdZwYIFM/W637hxA0VR+Oqrr/jqq68ybBMaGkrRokX19w3xXprRfjL7uJ91boD6u5xREvNFsvM8vOgcTr1QkDpENLOuX79OVFQUTk5Oz4wD1ER0ly5dmDhxItOmTaNx48Z07NiRd999Vwp/CyFeG5JUEkIIkWN0Oh2VKlXi559/znB98eLF09x/uofCyxzzrbfeYvTo0RmuT02+pHrW1fynv3C7urrSoEEDVqxYwRdffMHRo0cJDAxkypQpaY4LMHXqVKpWrZrhPjNTG8dQMvvcW1pasn//fvbu3cvmzZvZtm0by5cvp2nTpuzYseOZz8/Nmzdp1qwZ5cqV4+eff6Z48eKYmZmxZcsWpk2bli+m337Z8zP1Mfbq1euZycT/9gbMzPmWU4YNG8bChQv55JNP8PHxwc7ODo1GQ/fu3TN8vfIi1sjISBo1aoStrS1ff/01np6eWFhYcPr0af73v/+lifOnn36iX79+rF+/nh07djB8+HAmT57M0aNHKVasWLbP7cxIjWPJkiW4uLikW29ikvYjtiGONXLkSHx9fTNsU6pUqTT3DfFemtF+svq4M+NZPYL+W5g9O89DTp3DOp0OJycnli5dmuH61ASiRqNh1apVHD16lI0bN7J9+3YGDBjATz/9xNGjR3P1b4IQQuQUSSoJIYTIlnv37hEbG5umx8y1a9cA9IVaPT09OXfuHM2aNXvuUILnyep2np6exMTE0Lx582wd71m6devGhx9+iJ+fH8uXL8fKyop27dqlOS6oV+qzemw3NzcA/Pz8KFmypH55UlIS/v7+6fZn6OfeyMiIZs2a0axZM37++We+++47vvzyS/bu3fvMx7Jx40YSExPZsGFDmt4AGQ37ycpr6Obmxu7du4mJiUnzhcvPzy/T+8islz0/HR0dKVCgAFqt1qDnW3ZiSe1B8rRr165hZWWl/4K7atUq+vbty08//aRvk5CQkOHMepmRet5ev349zXkbFhb2zB44169fp0mTJvr7MTEx3L9/n9atWwPqTGAPHjxgzZo1NGzYUN/O398/w/1VqlSJSpUqMXbsWA4fPky9evWYM2cO33zzDZC9c9vT0xOdTsfly5efmSBO/X13cnIy2Gv/rNc99bk1NTU1+PtaVmX2cT99bvzXf3+XU3sP/fc8/G8PzZx4HlIfz8WLF9MlpF603a5du6hXr16mEnh16tShTp06fPvttyxbtoyePXvyzz//8P7772c7diGEeFVITSUhhBDZkpKSwu+//66/n5SUxO+//46jo6O+tkvXrl0JCgrijz/+SLd9fHz8M2clelrqjFSZ/eLbtWtXjhw5wvbt29Oti4yMJCUlJVP7+a8uXbpgbGzM33//zcqVK2nbtm2apI63tzeenp78+OOPxMTEpNs+o2msUzVv3hwzMzN+/fXXNFfQ58+fT1RUFG3atEnT3pDPfURERLr1qV+kExMTnxlzag+A/w6hWrhwYbq21tbWmX79WrduTUpKCrNnz9Yv02q1zJgxI1PbZ8XLnp/GxsZ06dKF1atXc/HixXTrn/eaP4+1tXWWa1IdOXIkzZCiO3fusH79elq0aKF/rYyNjdP10JgxY0a6HiGZ1bx5c0xNTZkxY0aa/f53hsWnzZ07N01tmdmzZ5OSkkKrVq30MULa8yopKYlZs2al2U90dHS63+VKlSphZGSkP2+ze2537NgRIyMjvv7663Q9uFLj8vX1xdbWlu+++y7DWjnZee2f9V7n5ORE48aN+f3337l//75BjpVdmX3cRYoUoWrVqvz5559pzuWdO3dy+fLlNNu4ublhbGycrt7df1/znHgeWrRoQYECBZg8eTIJCQlp1j2vN1PXrl3RarVMmjQp3bqUlBT9a/jw4cN0+8nMOSiEEPmJ9FQSQgiRLa6urkyZMoWAgADKlCnD8uXLOXv2LHPnztUX4u3duzcrVqxg8ODB7N27l3r16qHVarl69SorVqxg+/btGdZ6epqlpSVeXl4sX76cMmXK4ODgQMWKFZ9ZA2PUqFFs2LCBtm3b0q9fP7y9vYmNjeXChQusWrWKgIAAChcunOXH6+TkRJMmTfj555959OgR3bp1S7PeyMiIefPm0apVKypUqED//v0pWrQoQUFB7N27F1tbWzZu3Jjhvh0dHRkzZgwTJ06kZcuWtG/fHj8/P2bNmkXNmjXp1atXmvaGfO6//vpr9u/fT5s2bXBzcyM0NJRZs2ZRrFgx6tev/8zno0WLFpiZmdGuXTsGDRpETEwMf/zxB05OTum+8Hl7ezN79my++eYbSpUqhZOT0zNrsrRr14569erx+eefExAQgJeXF2vWrMl24e/nMcT5+f3337N3715q167NwIED8fLyIiIigtOnT7Nr164MExsv4u3tzfLlyxkxYgQ1a9bExsYmTa+4jFSsWBFfX1+GDx+Oubm5/gv5xIkT9W3atm3LkiVLsLOzw8vLiyNHjrBr1y4KFSqU5RhBPW9HjhzJ5MmTadu2La1bt+bMmTNs3br1mb9jSUlJNGvWjK5du+rP8fr169O+fXsA6tatS8GCBenbty/Dhw9Ho9GwZMmSdF/M9+zZw9ChQ3nnnXcoU6YMKSkpLFmyRJ/oA7J9bpcqVYovv/ySSZMm0aBBAzp37oy5uTknTpzA1dWVyZMnY2try+zZs+nduzfVq1ene/fuODo6EhgYyObNm6lXrx4zZ87M0vP5vPe63377jfr161OpUiUGDhxIyZIlCQkJ4ciRI9y9e5dz585l6VjZlZXHPXnyZNq0aUP9+vUZMGAAERERzJgxgwoVKqRJvNvZ2fHOO+8wY8YMNBoNnp6ebNq0KV09MsDgz4OtrS3Tpk3j/fffp2bNmrz77rsULFiQc+fOERcXx59//pnhdo0aNWLQoEFMnjyZs2fP0qJFC0xNTbl+/TorV67kl19+4e233+bPP/9k1qxZdOrUCU9PTx49esQff/yBra2tvneeEELke7k825wQQohclDqt8rOmM85o+nU3N7c0U4FnNN1z6nYnT55UfHx8FAsLC8XNzU2ZOXNmumMkJSUpU6ZMUSpUqKCYm5srBQsWVLy9vZWJEycqUVFR+nZkMK11qsOHDyve3t6KmZnZM6fcftqjR4+UMWPGKKVKlVLMzMyUwoULK3Xr1lV+/PFH/XTmqdNVT506Nd32zzrGH3/8oQBKgQIF0k0/nerMmTNK586dlUKFCinm5uaKm5ub0rVrV2X37t36Nqmvi7+/f5ptZ86cqZQrV04xNTVVnJ2dlSFDhigPHz5M08bQz/3u3buVDh06KK6uroqZmZni6uqq9OjRQ7l27drznmJFURRlw4YNSuXKlRULCwvF3d1dmTJlirJgwYJ0jy04OFhp06aNUqBAAQVQGjVq9Nz9PnjwQOndu7dia2ur2NnZKb1799ZPIf/09OLjx49X/vtRJqNzWlHU87pNmzbZeo4U5fnnZ0hIiPLRRx8pxYsXV0xNTRUXFxelWbNmyty5c/VtUn+PVq5cmWbbjKZNj4mJUd59913F3t4+3fTrGUmN7a+//lJKly6tmJubK9WqVUs3RfvDhw+V/v37K4ULF1ZsbGwUX19f5erVq+l+55/1vpHRe4FWq1UmTpyoFClSRLG0tFQaN26sXLx48Zn73Ldvn/LBBx8oBQsWVGxsbJSePXsqDx48SHOcQ4cOKXXq1FEsLS0VV1dXZfTo0cr27dvTHPvWrVvKgAEDFE9PT8XCwkJxcHBQmjRpouzatUu/n5c5txVFURYsWKBUq1ZNf140atRI2blzZ7rnxNfXV7Gzs1MsLCwUT09PpV+/fsrJkyf1bfr27atYW1un239G5+/z3utu3ryp9OnTR3FxcVFMTU2VokWLKm3btlVWrVqV7nnOyhT2GZ3bL9pPZh63oijK6tWrlfLlyyvm5uaKl5eXsmbNGqVv377pzumwsDClS5cuipWVlVKwYEFl0KBBysWLF9P9brzs85DROawo6ntZ3bp1FUtLS8XW1lapVauW8vfff+vXZxSzoijK3LlzFW9vb8XS0lIpUKCAUqlSJWX06NHKvXv3FEVRlNOnTys9evRQSpQooZibmytOTk5K27Zt0z1PQgiRn2kUJRcqQwohhBDCIBo3bkx4eHiGw62EeFUtWrSI/v37c+LEiRf2/hKvt379+vHvv/+mmfFPCCFE/iU1lYQQQgghhBBCCCFElklSSQghhBBCCCGEEEJkmSSVhBBCCCGEEEIIIUSWSU0lIYQQQgghhBBCCJFled5T6bfffsPd3R0LCwtq167N8ePHn9t+5cqVlCtXDgsLCypVqsSWLVvSrFcUhXHjxlGkSBEsLS1p3rw5169fT7efzZs3U7t2bSwtLSlYsCAdO3Y05MMSQgghhBBCCCGEeK3laVJp+fLljBgxgvHjx3P69GmqVKmCr68voaGhGbY/fPgwPXr04L333uPMmTN07NiRjh07ppkB54cffuDXX39lzpw5HDt2DGtra3x9fUlISNC3Wb16Nb1796Z///6cO3eOQ4cO8e677+b44xVCCCGEEEIIIYR4XeTp8LfatWtTs2ZNZs6cCYBOp6N48eIMGzaMzz//PF37bt26ERsby6ZNm/TL6tSpQ9WqVZkzZw6KouDq6spnn33GyJEjAYiKisLZ2ZlFixbRvXt3UlJScHd3Z+LEibz33nvZjl2n03Hv3j0KFCiARqPJ9n6EEEIIIYQQQgghXiWKovDo0SNcXV0xMnp2fySTXIwpjaSkJE6dOsWYMWP0y4yMjGjevDlHjhzJcJsjR44wYsSINMt8fX1Zt24dAP7+/gQHB9O8eXP9ejs7O2rXrs2RI0fo3r07p0+fJigoCCMjI6pVq0ZwcDBVq1Zl6tSpVKxY8ZnxJiYmkpiYqL8fFBSEl5dXdh66EEIIIYQQQgghxCvvzp07FCtW7Jnr8yypFB4ejlarxdnZOc1yZ2dnrl69muE2wcHBGbYPDg7Wr09d9qw2t27dAmDChAn8/PPPuLu789NPP9G4cWOuXbuGg4NDhseePHkyEydOTLf8zp072NravujhCiGEEEIIIYQQQuQL0dHRFC9enAIFCjy3XZ4llfKKTqcD4Msvv6RLly4ALFy4kGLFirFy5UoGDRqU4XZjxoxJ00sq9Qm2tbWVpJIQQgghhBBCCCFeOy8q95NnhboLFy6MsbExISEhaZaHhITg4uKS4TYuLi7PbZ/6//PaFClSBCDN0DVzc3NKlixJYGDgM+M1NzfXJ5AkkSSEEEIIIYQQQog3XZ4llczMzPD29mb37t36ZTqdjt27d+Pj45PhNj4+PmnaA+zcuVPf3sPDAxcXlzRtoqOjOXbsmL6Nt7c35ubm+Pn56dskJycTEBCAm5ubwR6fEEIIIYQQQgghxOssT4e/jRgxgr59+1KjRg1q1arF9OnTiY2NpX///gD06dOHokWLMnnyZAA+/vhjGjVqxE8//USbNm34559/OHnyJHPnzgXUblmffPIJ33zzDaVLl8bDw4OvvvoKV1dXOnbsCICtrS2DBw9m/PjxFC9eHDc3N6ZOnQrAO++8k/tPghBCCJFHwmMSuXQvmhStDq1OQadAWZcCeBS2zuvQhBBCCCFEPpCnSaVu3boRFhbGuHHj9LOwbdu2TV9oOzAwMM3UdXXr1mXZsmWMHTuWL774gtKlS7Nu3bo0s7aNHj2a2NhYPvjgAyIjI6lfvz7btm3DwsJC32bq1KmYmJjQu3dv4uPjqV27Nnv27KFgwYK59+CFEEKIPHLpXhQLDwWw4ew9krS6dOtruTvQrWZxWlcqgqWZcR5EKIQQQojXjaIopKSkoNVq8zoUARgbG2NiYvLCmkkvolEURTFQTG+U6Oho7OzsiIqKkvpKQggh8oVbYTF8sfYCR29F6JeVdLSmgLkJRkYadDqFC0FR6B5/MihgYcL/WpajZ+0SL/2BQwghhBBvrqSkJO7fv09cXFxehyKeYmVlRZEiRTAzM0u3LrM5jzdu9jchhBDiTXTcP4KBi08SFZ+MsZGG1pWK0L+eO9VLpO2lGxyVwOrTd1l+4g6BEXGMXXeRY/4RTO5cCRtz+dgghBBCiKzR6XT4+/tjbGyMq6srZmZmcrEqjymKQlJSEmFhYfj7+1O6dOk0o8SyQnoqZZP0VBJCCJFfbDh3j5ErzpGk1VG1uD2/9axOUXvL526j0ynMP+jPlG1XSdEplCxszW89q1O+iPzNE0IIIUTmJSQk4O/vj5ubG1ZWVnkdjnhKXFwct2/fxsPDI03JIMh8ziPPZn8TQgghRM77fd9Nhv99hiStjpYVXPjngzovTCgBGBlpGNiwJMsH+VDEzoJb4bF0mnWIEwERL9xWCCGEEOK/stsTRuQcQ7wm8qoKIYQQr6kN5+4xeetVAN6r78FvPatjYZq1wtvebgXZMrwB9UsVJiFZx4CFJ7gYFJUT4QohhBBCiHxGkkpCCCHEa+h6yCM+X30egMGNPPmqrRfGRtmrX1DQ2ox5fWtQy8OBR4kp9FlwnBuhMYYMVwghhBBC5EOSVBJCCCFeM7GJKQxZepq4JC11PQsxyrfsS+/TwtSY+X1rUKmoHRGxSfSef4y7D2UGFyGEEEKIN5kklYQQQojXiKIofL7mAjdCY3C2NefXHtWy3UPpvwpYmPLngFqUcrLhflQCAxadIC4pxSD7FiK/0uoUjt16wO4rIez1C2X/tTDO3olEp5O5cIQQIj+bMGECGo0mza1cuXL69QkJCXz00UcUKlQIGxsbunTpQkhISJp9BAYG0qZNG6ysrHBycmLUqFGkpDz57LRo0SLs7e0zPL5Go2HdunU58dAMSuYGFkIIIV4jS48FsvHcPUyMNPz2bnUK25gbdP8O1mb89V5t2s08yLWQGMauu8hP71SRqYHFG+dhbBLLT95hyZHbBEXGp1tfsrA1feu608W7GDbm8pFbCCHyowoVKrBr1y79fROTJ+/nn376KZs3b2blypXY2dkxdOhQOnfuzKFDhwDQarW0adMGFxcXDh8+zP379+nTpw+mpqZ89913uf5Ycor8hRNCCCFeE6GPEvj+cWHuz1uVo4a7Q44cx8XOgl+7V6PnvKOsOR1EHY9CdK1ZPEeOJcSrJlmrY8rWqyw5epvEFB0A9lamuDlYoVUUdDq4ExHHrfBYxm+4xNTtfvSv587HzUpjYiyDBIQQQlEU4pO1eXJsS1PjLF0IMzExwcXFJd3yqKgo5s+fz7Jly2jatCkACxcupHz58hw9epQ6deqwY8cOLl++zK5du3B2dqZq1apMmjSJ//3vf0yYMAEzM7NMxzFhwgQmTpyYbvnChQvp169fpveTEySpJIQQQrwmpm7zIyYxhcrF7BhQzyNHj+XjWYjPWpRl6nY/vlp/kUrF7ChfxDZHjylEXotOSObDv05z8EY4AF5FbOlX1532VV3TzKwYm5jC6tN3WXQ4gFthsczYc4OzdyKZ2aM6dlameRW+EEK8EuKTtXiN254nx778tS9WZplPg1y/fh1XV1csLCzw8fFh8uTJlChRglOnTpGcnEzz5s31bcuVK0eJEiU4cuQIderU4ciRI1SqVAlnZ2d9G19fX4YMGcKlS5eoVq1apuMYOXIkgwcP1t9funQp48aNo0aNGpneR06RyyVCCCHEa+DsnUhWnroLwIT2FTAyUB2l5xnSyJPGZR1JTNHx4dLTPEpIzvFjCpFX7j6M4+3Zhzl4IxwrM2Pm9KrO5uH16VqzeJqEEoC1uQl9fNzZ9WkjfuleFUtTYw5cD6fjrEMyc6IQQuQTtWvXZtGiRWzbto3Zs2fj7+9PgwYNePToEcHBwZiZmaWrh+Ts7ExwcDAAwcHBaRJKqetT16WKiorCxsYm3e1pNjY2uLi44OLiQkBAAGPHjmXhwoVUrFgxBx551khPJSGEECKf0+kUxm+4BECX6sWoXqJgrhzXyEjDtK5VafPrAfzDY/luyxUmd66cK8cWIjdduhdF3wUnCI9JxNnWnPl9a1KxqN0LtzMy0tChalFKOxVg4OKT+IfH0um3Q/zex5u6noVzIXIhhHj1WJoac/lr3zw7dma1atVK/3PlypWpXbs2bm5urFixAktLS4PFVKBAAU6fPp1ueenSpdMtCwwMpGPHjowcOZKuXbsaLIaXIT2VhBBCiHxu9em7nLsTiY25Cf9rWTZXj13Q2oxp3aoC8PfxO+y/Fparxxcipz2ISWTgnycJj0mknEsB1n1UL1MJpad5udqyfmg9aroX5FFiCoMWn+J6yKMcilgIIV5tGo0GKzOTPLm9zMQi9vb2lClThhs3buDi4kJSUhKRkZFp2oSEhOhrMLm4uKSbDS71/tN1moyMjChVqlS623/FxsbSvn17fHx8+Prrr7P9OAxNkkpCCCFEPvYoIZkp2/wAGN6sFE62FrkeQ+2ShehX1x2AMWsuyDA48drQ6hQ+/ucs96IS8ChszfJBPhSxy97V6cI25vz1fm1quTvwKDGF/ovUnk9CCCHyh5iYGG7evEmRIkXw9vbG1NSU3bt369f7+fkRGBiIj48PAD4+Ply4cIHQ0FB9m507d2Jra4uXl1eWjq0oCr169UKn07FkyZJXatZdSSoJIYQQ+djCQwGExyRSsrA1/ermbHHu5xndsiwlHKwIioxn8uMZ6ITI737e6cfBG+FYmhozp5c3dpYvV2Tb3MSYOb29cStkxd2H8Xyw+CQJeTQDkhBCiOcbOXIk+/btIyAggMOHD9OpUyeMjY3p0aMHdnZ2vPfee4wYMYK9e/dy6tQp+vfvj4+PD3Xq1AGgRYsWeHl50bt3b86dO8f27dsZO3YsH330Eebm5lmKZcKECezatYvff/+dmJgYgoODCQ4OJj4+PiceepZIUkkIIYTIp2ITU1hwyB+AT94qg5lJ3v1ZtzIzYUoXtZ7SsmOBHLwenmexCGEIOy4F89vemwB836USZV0KGGS/DtZmLOhXE1sLE04HRjJq1XkURTHIvoUQQhjO3bt36dGjB2XLlqVr164UKlSIo0eP4ujoCMC0adNo27YtXbp0oWHDhri4uLBmzRr99sbGxmzatAljY2N8fHzo1asXffr0ydbQtX379hETE0PdunUpUqSI/rZ8+XKDPd7s0ijyVyxboqOjsbOzIyoqCltbmUJZCCFE7vtj/y2+3XIFj8LW7BrRCONcmPHtRcatv8jiI7cpam/JzhENszRtrxCvinuR8fhO38+jhBT61XVnQvsKBj/G4Zvh9Jl/nBSdwqSOFeldx83gxxBCiFdBQkIC/v7+eHh4YGGR+8P0xbM977XJbM5DeioJIYQQ+VBCspa5B24BMKSR5yuRUAL4X8tyFLW3JCgynl9338jrcITIlm83X+FRQgpVi9vzRevyOXKMup6F9fuevOUKtx/E5shxhBBCiJwkSSUhhBAiH1p58g5hjxJxtbOgY7WieR2OnrW5CRMf9+qYd+CWzHAl8p1DN8LZfOE+RhqY3LlSjg4r7VfXnTolHYhL0jJq5Xl0OhlAIIQQIn+RpJIQQgiRzyRrdczZp/ZSGtTIM09rKWWkuZczzcs7k6JTGLvuotSLEflGUoqO8RsuAdDHx53yRXK2xIGRkYapb1fB2syY4wER+hppQgghRH7xan0KFUIIIcQLrTsTRFBkPIVtzOlWs3heh5Oh8e28sDA14ph/BOvOBuV1OEJkyp+HA7gRGkMhazM+fatMrhyzuIMVX7ZRp5b+YbsfN0JjcuW4QgghhCFIUkkIIYTIR3Q6hdn71Bmp3m/ggYWpcR5HlLHiDlYMa1oaUOvTRMUn53FEQjxfSHQC03ddA+B/rcphZ2maa8fuUas4Dcs4kpSiY9SqczIMTgghRL4hSSUhhBAiHzl4I5xbYbEUMDeh1ys+W9TABiXxdLQmPCaJn3b45XU4QjzX91uvEpukpVoJe96uXixXj63RaJjSpRLWZsacCYxkzRnp3SeEECJ/kKSSEEIIkY8sOXobgC7exbAxN8njaJ7PzMSISR0qAmrcF+5G5XFEQmTsRugj/TDNie0rYJQHsykWsbNkWDO1d9+UbVeJSUzJ9RiEEEKIrJKkkhBCCJFPBEXGs/tKCAC96pTI42gyp26pwnSo6oqiwNh1F9DKsB7xCpq55waKAr4VnKlczD7P4uhfzx33QlaEPUpkxp7reRaHEEIIkVmSVBJCCCHyib+PBaJToK5nIUo5Fci7QBQFIm7B/fNw7yzcOwMhlyAlMcPmX7YpTwFzE87djeLv44G5G6sQL+AfHsuGc/cA9HXA8oq5iTFftVWLdi846I9/eGyexiOEEEK8yKvdb14IIYQQACSmaPnnhJqQ6Z0XtZRCLsO1bXDnONw5BvER6dsYmYJzBXCtBu71oVxbMLXAqYAFI33LMn7DJX7YdpWWFV0obGOe+49BiAz8tvcGOgWalXOiYlG7vA6HpuWcaFzWkX/9wvhm02Xm96uZ1yEJIYQQzyQ9lYQQQoh8YNvFYMJjknC2Nae5l3PuHFRR4MYuWNwRZvvA7olwbauaUDI2BxsXKOAKtsXAwg50yXD/LJxaCKvfg5/LwbYxEHqVXnXcqOBqS3RCCpO3XM2d+IV4gTsRcax9XBQ7tZ5RXtNoNHzV1gsTIw27r4byr19oXockhBBvrP3799OuXTtcXV3RaDSsW7cuzfo1a9bQokULChUqhEaj4ezZs+n2kZCQwEcffUShQoWwsbGhS5cuhISEpGkTGBhImzZtsLKywsnJiVGjRpGS8qS23qJFi7C3t88wxoziyk3SU0kIIYTIB/56XKC7R60SmBrnwjWhG7thx1gIvaze1xhBaV/waADFa4NLZTAxe9JeUSDytjoU7u5JuLQWooPg6Cw4Ogvjsq2Z2mQkbZZFs/r0XbrWKEbtkoVy/nEI8Ryz/r2BVqfQsIwjVYvb53U4ep6ONvSr6868g/5M2eZHw9KOeVI8XAgh3nSxsbFUqVKFAQMG0Llz5wzX169fn65duzJw4MAM9/Hpp5+yefNmVq5ciZ2dHUOHDqVz584cOnQIAK1WS5s2bXBxceHw4cPcv3+fPn36YGpqynfffZejj88QJKkkhBBCvOKu3I/mRMBDTIw09KiVwwW6E6Jhx5dwerF638wGqveB2oOh4HOG3Wk0UNBdvVXoBG99rSamTi1Sh835bcHr+g6WFO3Mh3eb89X6i2we3iB3EmRCZCAoMp5Vp+4CMLxpqTyOJr2hTUux/OQdrtyPZuP5e3SoWjSvQxJCCMNQFEiOy5tjm1qpn1kyqVWrVrRq1eqZ63v37g1AQEBAhuujoqKYP38+y5Yto2nTpgAsXLiQ8uXLc/ToUerUqcOOHTu4fPkyu3btwtnZmapVqzJp0iT+97//MWHCBMzMzDLcd0YmTJjAxIkT0y1fuHAh/fr1y/R+skKSSkIIIcQrbtkxtZaSbwUXnG0tcu5AN/fA+mEQrX7RpvZgaDwGLO2zvi8jYyjTQr2F+am9nq7voH74CvZbbOGLsAEsPFSMDxp6GvQhCJFZ8w/4k6xV8ClZiBruDnkdTjr2VmYMaliSH3dc4+ed12hdqYgkYYUQr4fkOPjONW+O/cU9MLPOtcOdOnWK5ORkmjdvrl9Wrlw5SpQowZEjR6hTpw5HjhyhUqVKODs/KW/g6+vLkCFDuHTpEtWqVcv08UaOHMngwYP195cuXcq4ceOoUaOGYR5QBuQvkxBCCPEKS0jW6mem6l6reM4cRFHg0C+wpJOaUCroDv22QKsp2Uso/ZdjWei5EnqtAcfy2BPDLLNfcdz9KfdDpF6MyH2xiSmsPHUHgA8alczjaJ6tfz0PCtuYc/tBHMtP3MnrcIQQQmRRcHAwZmZm6eohOTs7ExwcrG/zdEIpdX3qulRRUVHY2Nikuz3NxsYGFxcXXFxcCAgIYOzYsSxcuJCKFSvmwKNTSU8lIYQQ4hW2+0ooUfHJFLGzoK5nYcMfQJsCW0fDyfnqfe9+4PtdzlzFK9UMPBqi7J2McvBnOmn2Ef5HQ+i7GIrXMvzxhHiGtWeCeJSQgkdhaxqVdszrcJ7J2tyEYU1LMX7DJX7dfZ0u1YthaWac12EJIcTLMbVSewzl1bHzqQIFCnD69Ol0y0uXTj/RRGBgIB07dmTkyJF07do1R+OSpJIQQgjxClv1uDdF5+pFMTZ0od7EGFg1AK5vBzRqMsnnQ8Me47+MTdE0H0dgIR+M1w2mWMp9dAtbY9R6KtTon7PHFgJQFIXFRwIA6F3HLW8LYGtT1AL3MaEQEwwxYWBqAbZFwa4Y2BWjR60S/HHgFncfxrPocABDGsuQUSFEPqfR5OoQtLzk4uJCUlISkZGRaXorhYSE4OLiom9z/PjxNNulzg6X2gbAyMiIUqVeXAMwNjaW9u3b4+Pjw9dff22AR/F8klQSQgghXlGh0Qnsvx4OQOfqxQy788RH6nC3uyfAxAI6/wFe7Q17jOdwq/YWPwb+TbmTY2nLMdj0CYRchJbfg7FprsUh3jxHbj7gWkgMVmbGvF3DwL9XmZEQrdYv89uqJnTjHz6nsQaz4rWZ5dGAjyJdmf2vCe/WLoGdpfyOCCFEfuDt7Y2pqSm7d++mS5cuAPj5+REYGIiPjw8APj4+fPvtt4SGhuLk5ATAzp07sbW1xcvLK0vHUxSFXr16odPpWLJkCZosFCXPLkkqCSGEEK+odWeD0OoUqpewx9PR5sUbZFZyPPzdQ00oWdhDz1VQvKbh9p9Jg1tW561Lo7kcu5xRpivRnJgHoVeh62KwLpTr8Yg3w6LDAYDa+8/WIheTM1FBcOAnOPMXaBOfLDexhAIuYOMMNk7q72fUXYgOgsRouHOUyhzlgDmc05Vk34YPad/1/SzNXiSEECJ7YmJiuHHjhv6+v78/Z8+excHBgRIlShAREUFgYCD37qnD+fz8/AD0dY3s7Ox47733GDFiBA4ODtja2jJs2DB8fHyoU6cOAC1atMDLy4vevXvzww8/EBwczNixY/noo48wNzfPUrwTJkxg165d7Nixg5iYGGJiYgCws7PD0tLSEE9JOtlKKkVGRrJq1Spu3rzJqFGjcHBw4PTp0zg7O1O0qEx3KoQQQrwsRVFYfSoIgLe9DVigOyUJVvSBgANgVgB6r4Gi3obbfxbYmJvwTadKvPdnItcpwRyrORjfPggLWqhFvQu65Ulc4vV192Ecu66oQwr6+rjnzkGj78GBn+H0n6BNUpcVKgVlW0GZVlC8Nhg/4yN51F24ugWubkQXcIgqRreocmUkKb8vwaTpl1C6hSSXhBAiB508eZImTZro748YMQKAvn37smjRIjZs2ED//k+G73fv3h2A8ePHM2HCBACmTZuGkZERXbp0ITExEV9fX2bNmqXfxtjYmE2bNjFkyBB8fHywtramb9++2Rq6tm/fPmJiYqhbt26a5QsXLqRfv35Z3l9maBRFUbKywfnz52nevDl2dnYEBATg5+dHyZIlGTt2LIGBgSxevDhHAn3VREdHY2dnR1RUFLa2tnkdjhBCiNfMhbtRtJt5EDMTI0582dwww120KbB6AFxer/aO6L0G3Oq+eLsc9tGy02w+f5+WzpHM5js0UXfBxgV6rQaXnJutRLx5vt96lTn7blKvVCGWvl8nZw+mKGoiadsXkByrLnOrD03GgHv9LO9O9yiM5TM/p33CRqw1j3s6eTSE9jMlASuEeKUlJCTg7++Ph4cHFhYWeR2OeMrzXpvM5jyMsnrQESNG0K9fP65fv57moK1bt2b//v1Z3R0Av/32G+7u7lhYWFC7du10Rar+a+XKlZQrVw4LCwsqVarEli1b0qxXFIVx48ZRpEgRLC0tad68OdevX89wX4mJiVStWhWNRsPZs2ezFb8QQghhaKkFun0ruBiufsr2L9SEkrEZdP/rlUgoAUxoVwE7S1O2hdiztOI8cPJSixYvbA0BB/M6PPGaSEjWsvxEIJALvZQehcCybrDxYzWhVKwW9N0I/TdnK6EEYFTAEds239Ag8RcW0B7FxBL898PsunDqTzWJJYQQQuSyLCeVTpw4waBBg9ItL1q0KMHBwVkOYPny5YwYMYLx48dz+vRpqlSpgq+vL6GhoRm2P3z4MD169OC9997jzJkzdOzYkY4dO3Lx4kV9mx9++IFff/2VOXPmcOzYMaytrfH19SUhISHd/kaPHo2rq2uW4xZCCCFySmKKlvXn1LH5XaobaFj5yQVw/Hf15y7zoFRzw+zXABwLmPNl6/IAfLM/ksCOq6FEXUiMgiWd4fKGPI5QvA62XwrmYVwyRe0taVbeOecO5LcNZtVRi3Abm0OLb2HAdrVX0UtqVdGFws6ufJ3QnUVVlkLxOpAUAxuHw9J31NnjhBBCiFyU5aSSubk50dHR6ZZfu3YNR0fHLAfw888/M3DgQPr374+Xlxdz5szBysqKBQsWZNj+l19+oWXLlowaNYry5cszadIkqlevzsyZMwG1l9L06dMZO3YsHTp0oHLlyixevJh79+6xbt26NPvaunUrO3bs4Mcff8xy3EIIIURO2ecXRmRcMk4FzGlQOut/W9PxPwBbRqk/N/0KvDq8/D4N7J0axajrWYiEZB0jN95G13M1lGurFjRe0QdOzM/rEEU+t/LkXQDe9i6GsVEO1CFSFDjyG/zdHeIjwKUyDNoHdYeCUZY/cmfIyEjD8GalAZh2KoWo7uvhrUlq8urGTvijCQRfMMixhBBCiMzI8l+49u3b8/XXX5OcnAyARqMhMDCQ//3vf/op8jIrKSmJU6dO0bz5k6ulRkZGNG/enCNHjmS4zZEjR9K0B/D19dW39/f3Jzg4OE0bOzs7ateunWafISEhDBw4kCVLlmBlZfXCWBMTE4mOjk5zE0IIIXLChse9lNpVcX35L78R/rCiN+hSoOLb0OAzA0RoeBqNhu87V8bKzJjj/hEsOB6szgLn3Q9QYPMI2DtZhviIbLkTEcehm+GAmlQyOG0KbP5MHWKKAt794f3d4FTe4IdqXbEIpZ1siE5IYdGRO1BvuJq8cigJUXdgfgvp3SeEECLXZDmp9NNPPxETE4OTkxPx8fE0atSIUqVKUaBAAb799tss7Ss8PBytVouzc9ouyM7Ozs8cShccHPzc9qn/P6+Noij069ePwYMHU6NGjUzFOnnyZOzs7PS34sUNOBOPEEII8VhsYop+dqr2VV5yeHZizONeEw/BtTp0mPlKzxRVopAVY9t4AfDDdj+uhcVB2+nQ6HO1wb7vYdMnoNPmWYwif1p9+i6KAvVKFaK4w4svJmZJYgz83Q1Ozgc00OIbaDsNTMwMe5zHjIw0fNxc7a00/+AtouKT1eTV+7uhZGNIjlMTyfumShJWCCFEjstyUsnOzo6dO3eyceNGfv31V4YOHcqWLVvYt28f1tbWORGjwc2YMYNHjx4xZsyYTG8zZswYoqKi9Lc7d+7kYIRCCCHeVLuuhJCQrMOtkBWVi9llf0fK4949YVfVmdS6LwNTS8MFmkN61CpOk7KOJKXo+HT5WZK0ijpbVpufQWMEpxapw+GS09dJFCIjOp2iH/rWtYaBLwomxqi1jG7sUmdU7LYE6g7L8eRtmt5KhwLUhVYO0HM11Hpc+3TvN7Dtc9DpcjQWIYQQb7ZsD/CuX78+H374IaNHj043HC2zChcujLGxMSEhIWmWh4SE4OLikuE2Li4uz22f+v/z2uzZs4cjR45gbm6OiYkJpUqVAqBGjRr07ds3w+Oam5tja2ub5iaEEEIY2sbHQ9/aV3FF8zJfTM8sgfPLQWMM7ywE2yIGijBnaTQapnSpjL2VKZfuRTNjz+PZW2u+B+/8qc5cd3UT/NUZ4iPzNFaRPxy59YCgyHgKWJjgWyHjz5fZkvgIlr4NgYfB3E6d3a18O8Pt/zky7K0EYGwCrX+AVlPV+8fmwIah6vA8IYQQIgeYZKbRr7/+mukdDh8+PNNtzczM8Pb2Zvfu3XTs2BEAnU7H7t27GTp0aIbb+Pj4sHv3bj755BP9sp07d+Lj4wOAh4cHLi4u7N69m6pVqwIQHR3NsWPHGDJkiP7xfPPNN/rt7927h6+vL8uXL6d27dqZjl8IIYQwpMi4JPZdU2dvavcyQ9+CLz5VmHssuNU1QHS5x8nWgm87VuKjZaf5be8NGpR2pJaHA3i1B8s18M+7cPsQLGoDvVZDAQMmCsRrZ8VJtXd5h6quWJgaG2aniY/gr7fhzlE1odRnLRT1Nsy+M0ntrXSd66ExLDoUoE8yAVD7A7CwhXUfwtmlarxd5oGJea7GKIQQ4vWXqaTStGnT0twPCwsjLi4Oe3t7ACIjI7GyssLJySlLSSWAESNG0LdvX2rUqEGtWrWYPn06sbGx9O/fH4A+ffpQtGhRJk+eDMDHH39Mo0aN+Omnn2jTpg3//PMPJ0+eZO7cuYB6hfOTTz7hm2++oXTp0nh4ePDVV1/h6uqqT1yVKFEiTQw2NjYAeHp6UqxYDhRvFEIIITJh28VgkrUK5VwKUMa5QPZ2kvgIVvaFlAQo9RbU+8SgMeaWNpWLsPtKUdacCWLY36fZPLwBhW3MwaMB9N8Cf3WBkIsw/y3ovQ4KeeZ1yOIVFBWXzNaLak1Ngw19S4pTh7zlYUIJnvRWGrrsDPMP3qJfPXfsLE2fNKjSHcysYdUAuLJBHTbadUmO1XoSQgjxZsrU8Dd/f3/97dtvv6Vq1apcuXKFiIgIIiIiuHLlCtWrV2fSpElZDqBbt278+OOPjBs3jqpVq3L27Fm2bdumL7QdGBjI/fv39e3r1q3LsmXLmDt3LlWqVGHVqlWsW7eOihUr6tuMHj2aYcOG8cEHH1CzZk1iYmLYtm0bFhYWWY5PCCGEyC0bzz+Z9S3bNo+EBzfAtih0+t1gU5nnhUkdK1LKyYaQ6EQ++ecsWt3josMulWDAdnW2q8hAdbaroNN5G6x4JW04f4+kFB3lXApQqehL1ChLpU1RkzSBRx4nlNblSUIpVYa1lZ5Wvh28uxxMLODaNlg9ALTJuR6nEEKI11eWP2l+9dVXzJgxg7Jly+qXlS1blmnTpjF27NhsBTF06FBu375NYmIix44dSzME7d9//2XRokVp2r/zzjv4+fmRmJjIxYsXad26dZr1Go2Gr7/+muDgYBISEti1axdlypR55vHd3d1RFEU/XE4IIYTIbaGPEjhy8wHwErO+XVwD5/9RC1q/vQCsCxkwwtxnbW7C7J7VsTQ15uCNcGbuufFkpYMHDNgBRapCXDj82Q5u7smzWMWradUptUD3297FXq5GGajF7zd9DNe2qkmad5dD0eoGiDL7nllb6WmeTdVC/cZmcGUjrB0kNZaEECKT9u/fT7t27XB1VWtdrlu37pltBw8ejEajYfr06WmWR0RE0LNnT2xtbbG3t+e9994jJiYmTZvz58/ToEEDLCwsKF68OD/88EOa9RMmTMgwXxEQEIBGo+Hs2bPZfIQvL8tJpfv375OSkv4PkVarTVccWwghhBCZs/n8fXQKVC1un70pz6PvwaZP1Z8bfAYl6hg2wDxS2rkA33RUeyNP332NQzfCn6y0cYR+m8CjESQ9noXr1KK8CVS8cvzDYzl3JxJjIw0dqxV9+R3umQRn/nqStHXzefl9GsALeysBlGoG3f4CI1O4uBrWfySzwgkhRCbExsZSpUoVfvvtt+e2W7t2LUePHsXVNf2FwZ49e3Lp0iV27tzJpk2b2L9/Px988IF+fXR0NC1atMDNzY1Tp04xdepUJkyYoC/x86rLclKpWbNmDBo0iNOnn3QzP3XqFEOGDMn2LHBCCCHEm+7pWd+yTKdTC/ImRKo9dxr9z6Cx5bUu3sXoVqM4igJDl50m8EHck5XmBaDnSqj0DuhSYOPHsPVz6YkhWHcmCID6pQqr9bhexvE/4MBP6s9tp0O5Ni+3PwP6b2+l6IRnDG8r4wvvLAIjE7VH49ZRau8rIYTIZYqiEJcclyc3JYvve61ateKbb76hU6dOz2wTFBTEsGHDWLp0KaampmnWXblyhW3btjFv3jxq165N/fr1mTFjBv/88w/37qmf/ZYuXUpSUhILFiygQoUKdO/eneHDh/Pzzz9n+bnt168fGo0m3e3ff//N8r4yK1OFup+2YMECfWHt1CcsJSUFX19f5s2bZ/AAhRBCiNfd/ah4TgdGotGoBaqz7MQfcGsvmFhC5z/A2PTF2+QzEztU4EpwNOfvRjHgzxOs+bAuthaPH6eJufq4C5eFvd/AsdkQfg3eWQgWBqijI/IdRVFYf1ZNKnWs9hI1ykAdVrn1caK2yZfg3fclozO8/84EN7xZ6Ywblm8LnefCqvfgxDywLKjOECmEELkoPiWe2svyZtb1Y+8ew8o0Gz3Cn0Gn09G7d29GjRpFhQoV0q0/cuQI9vb21KhRQ7+sefPmGBkZcezYMTp16sSRI0do2LAhZmZPJlLw9fVlypQpPHz4kIIFC2Y6nl9++YXvv/9ef//777/n77//ply5ctl8hC+W5Z5Kjo6ObNmyhatXr7Jy5UpWrlzJlStX2LJlC05OTjkRoxBCCPFa2/Z4dirvEgVxts3ipBJhfrBznPpzi0ng+OwagvmZhakxf/SpgbOtOTdCYxi67Awp2qeG72g00GgUdF2sJtdu7oa5jeH++TyLWeSdc3ejCHgQh6WpMS28XLK/o/DrsKIfKFqo0gMajjJYjIZkZKTRJ5LmHXhObyWAil2g7eOr3/unwpHnD+kQQgjxbFOmTMHExIThw4dnuD44ODhdnsTExAQHBweCg4P1bVInKkuVej+1DcCFCxewsbFJc/tvIsvOzg4XFxdcXFw4fPgwv//+O2vWrMHF5SX+Fr5AlnsqpSpTpsxzi18LIYQQInO2XlA/MLSqlMVeSilJsGYgpCSAZzOo+X4ORPfqcLa1YF6fmrzz+2H2Xwvjm81XmND+P1cFvTqAvRss7wURt2Bec2j1PXj3VxNP4o2QOvStRQVnrM2z+XE3LgKWdYPEKChWSx329gqfQ60rFeHX3ZnorQRQYwDEP4TdX8P2L9QeS1Xfzb1ghRBvNEsTS469eyzPjm0op06d4pdffuH06dMvPxlEJpQtW5YNGzakWRYUFETjxo3TtT1z5gy9e/dm5syZ1KtXL0fjyvJf2QEDBjx3/YIFC7IdjBBCCPGmCY1O4MTtCABaVsziVaR9U+D+OfULYYffXukvvIZSqZgd07tVZfBfp1l0OIASDlYMqO+RtpFrVRi0H9YNUadR3/QpBByCNj+BpX1ehC1yUYpWx6bzap2KjlWzWaBbmwwr+0HETbArDt2XgmkWexHmMuPHvZWG/X2GeQdu0a+e+5MhohmpP0JNnB2ZCeuHgrmtOjxOCCFymEajMegQtLxy4MABQkNDKVGihH6ZVqvls88+Y/r06QQEBODi4kJoaGia7VJSUoiIiND3HnJxcUk36Vnq/ad7GJmZmVGqVKk07UxM0qd0goODad++Pe+//z7vvffeyz3ITMjy8LeHDx+muYWGhrJnzx7WrFlDZGRkDoQohBBCvL62XwpGeTzrW1H7LFw9CzwGBx8PYWk7HWyzUYspn2pZsQijW5YF4OtNl1l75m76RlYO0P1veOtr0BjDxVUwyweubc/laEVuO3gjnPCYJByszahfunD2drLtc/DfB6bW0ONvsMkfJR5aV8rETHCpNBpo8Q1U7aUO71vVH/z350qcQgjxOujduzfnz5/n7Nmz+purqyujRo1i+3b184aPjw+RkZGcOnVKv92ePXvQ6XTUrl1b32b//v0kJz8Zurxz507Kli2bpXpKAAkJCXTo0IFy5cplq9B3dmS5p9LatWvTLdPpdAwZMgRPT0+DBCWEEEK8KbY+rqfUulIWeiklPoK1H4CiU+u8VOiYM8G9woY08iTsUSILDwUwcuV5bC1MaVY+bT0CjIyg3sdQvI7aayniJizrCpW7Q8vJauJJvHbWn1V7KbWtXART4yxfP1VnejvxePKZznPBpZIBo8tZ/+2t1NfHHTur5/RW0mig3S/qzJFXN8HfPaDvRihaPddiFkKIV1lMTAw3btzQ3/f39+fs2bM4ODhQokQJChUqlKa9qakpLi4ulC2rXvwqX748LVu2ZODAgcyZM4fk5GSGDh1K9+7dcXVVJ5J49913mThxIu+99x7/+9//uHjxIr/88gvTpk3LcryDBg3izp077N69m7CwMP1yBweHNIXADSkbf2kz2ImRESNGjMjWgxZCCCHeVA9iEjl66wEArSpmoafRtjHwMADsSkCrKTkT3CtOo9HwVRsvOlUrilan8OHS0xz3j8i4cYnaMPgg+AwFNOp06jNrqMkD7XMKGot8Jy4phe2X1ERth+wMfbu598lMb83G5cvhYK0rFaGscwGiE1L4ff/NF29gbAJd5oNHQ0iKgb+6qBMACCGE4OTJk1SrVo1q1aoBMGLECKpVq8a4ceMyvY+lS5dSrlw5mjVrRuvWralfvz5z587Vr7ezs2PHjh34+/vj7e3NZ599xrhx4/jggw+yHO++ffu4f/8+Xl5eFClSRH87fPhwlveVWRpFURRD7GjLli307ds3TTbsdRYdHY2dnR1RUVHY2trmdThCCCHyob+PBzJmzQUqFrVl07AGmdvo6mb4511AA/02gXv9HI3xVZes1TF4ySl2Xw2lgLkJf75Xi+olntNV/M4J2DAUwq6q9wuVVmfNK9PyjahJ9bpbfzaIj/85SwkHK/aNapy1wqnhN2BeU0iIgsrdoNPv+fac2HEpmA+WnMLS1Jh9oxrjlJlZJRMfwZ/t4d5psC0KA7aBfYkXbyeEEC+QkJCAv78/Hh4eWFi82vXp3jTPe20ym/PI8vC3ESNGpLmvKAr3799n8+bN9O3bN6u7E0IIId5YWy7cB7LQSykmDDY8nrK27rA3PqEEYGpsxG89q9N3wXGO+UfQZ/5x/hxQC2+3ZySWitdUey2dWgT/ToYH1+Hv7uBaHeoNh/Ltwcg4Vx+DMJzN59XfqXZVimQtoRQfCX93UxNKxWpCu1/zbUIJ4C0vZ6qVsOdMYCQz9txgUseKL97IvAD0Wg0LWkK4HyzuCAO2g41jjscrhBAi/8ry8LczZ86kuZ0/fx6An376ienTpxs6PiGEEOK1FBmXxJGbqUPfMlFPSVFg48cQFw5OFaDp2ByOMP+wMDVmYf+a1CnpQExiCn3mH+NkwDOGwgEYm0KtgTD8DNT7BEws1N4ZK/vBjOpw7Hd1ViyRrzxKSObfa2qP+baVXTO/oTZFLVL94IbaQ6fbqz/T24toNBpG+5YD1B6RgQ/iMrehlQP0XqsOrY24CX91VhNtQgghxDNkuafS3r17cyIOIYQQ4o2y83IIKTqFci4FKOlo8+INzi4Fv81gZAqdfwcT85wPMh+xMjNhQb+avLfoJEduPaDPguMs6l+LWh7PKcZtYQdvTVRrLZ34Q62x9DAAto6G7V9C6RZQpZv6v2kWZuYTeWLXlRCSUnR4OlpTzqVA5jfc+RXc3AOmVupMbwWcX7xNPuDjWYgGpQtz4Ho403ZdY1q3qpnb0K4o9FkHC3wh+Dws6w6918jvgBBCiAxluadS06ZNiYyMTLc8Ojqapk2bGiImIYQQ4rX3ZNa3TAx9exgAWz9Xf276Zb6ajSo3pSaW6pcqTFySln4Lj3PscSH057JxhCZfwKcXofWP6vOrS1aTeCv6wBQPtXjx0dkQdk3tNSZeOalD39pUds380LfTS+DoLPXnjrOhSJUcii5vpPZWWnc2iKvB0ZnfsJAn9FoD5rYQeFjtxSdF7YUQQmQgy0mlf//9l6SkpHTLExISOHDggEGCEkIIIV5n0QnJHLiuDtN54dA3nRbWDoGkR1DCB+oOz/H4knXJRCVGEZGQ/4aAWZoZM69vDRqUTk0sndAPM3whM2t1WNzggzDkiDo0zrYYpMTDjV2w7XP4rSZMcYclnWHPt3Blk1rgWZuSkw9LvEBUfDL79EPfMlmj7PYR2PSp+nPjMVChY84El4cqFbOjTaUiKAp8v/Vq1jYuUhneXa4OD722DdZ9CDpdzgQqhHgjGGiOMGFAhnhNMj38LbV2EsDly5cJDg7W39dqtWzbto2iRbMxdasQQgjxhtl9JYRkrUIpJxtKO79gmM6RmWpPATMbtSdFDhSRjk2OZe31tay4toKgR0Ek6Z5cPCpTsAxtSrahtUdrXKwzUfvpFWBhaswffWrwwZJT7L8WRv9Fx1nQtyZ1SxXO/E6cvdShcc0nQOgVNal0c7eaiEiIVH++uftJe2MzKFQKHEqCgwcU9Hjyv11xddp2kWN2XlZ/p8o421DmRb9TAJGBsLyX2iPNqwM0HJ3zQeaRkb5l2XE5mH/9wtjrF0qTsk6Z39itLnRdrM44eWEFWBaEVlPydRFzIUTuMzU1BSAuLg5LSxlK+yqJi1Nr7qW+RtmhUTKZmjIyMtJ3Jc5oE0tLS2bMmMGAAQOyHUx+ktnp9YQQQoj/Grj4JDsvhzC8aSlGtCj77IbBF+GPJqBNgvYzoHofg8YRFhfG4suLWXVtFTHJMS9sX8+1HuN8xuFqk4UiyHkoIVnL4L9O8a9fGOYmRizoV5N6WUksZSQlCUIvQdApuHsKQi5C+HW1N9OzGJmoiaX/JpscPKCgu9pDSryU/guPs9cvjE+bl+Hj5qWf3zgxRq0XFHJRHeo4YPtr/xp8s+ky8w76U8rJhq0fN8DUOIuDFc6vhDUDAQUajoImX0piSQiRJffv3ycyMhInJyesrKyyNkOnMDhFUYiLiyM0NBR7e3uKFEnfyzezOY9MJ5Vu376NoiiULFmS48eP4+j4ZHpRMzMznJycMDZ+c6bglaSSEEKI7IhJTKH6pJ0kpejY+nEDyhd5xt+QlET4o6n6xbdMK7WAsAE/gJ0IPsFn/37Gw8SHALjbutPbqzf1itbDxtQGKxMr4lLi2HF7B5tvbeZUyCkAbM1s+bb+tzQu3thgseSkxBQtQ/46zZ6roViYGjG/rwESS/+l00HUHQi/BhG31BpYEf7w0F/9OSXh+dtbO6VPODl7gWN56eGUCZFxSdT4ZhcpOoVdIxpSyuk5PZV0OljRG65uUp/3gXvAvnjuBZtHouKTafLjv0TEJjGhnRf96nlkfSfH/4AtI9WfG3wGTb+SxJIQItMURSE4ODjD+swi79jb2+Pi4pJhks/gSSWRliSVhBBCZMfGc/cY9vcZPApbs+ezRs++UrdzHBz6BawKw4dHwCYLQ1aeQ1EUlvstZ8rxKaQoKZQtWJbh1YdTv2h9jDTP7r1wO/o2n+//nIsPLgLQ16svH3t/jKlR9rtL55ZcSSw9i04HMcFPkkz//T/+4bO3NbEE16pQ1Bvc6qlDkSztcyfufGTFiTuMXn2eci4F2PZJw+c33vMt7P9BHa7YbzMUr5U7Qb4C/jp6m7HrLmJnacq+UY2xtzLL+k6O/Abbv1B/rjsc3vpaEktCiCzRarUkJ0vh/1eBqanpczsGGTSptGHDBlq1aoWpqSkbNmx4btv27du/aHevBUkqCSGEyI4Pl55iy4VghjT25H8ty2XcyP8A/NkOUKDbUijf1iDHTtYm8+2xb1l9fTUArT1aM6HuBCxNMlffIFmbzM+nfuavK38B6nC4GU1nYGosiaVsi49Mn2yK8Fenck/8z2xdGiMoUhVKNoayrdRkUw7U2Mpv+iw4zv5rYYxsUYahTZ8z9O3MUlj/ofpzx9lQ9d3cCfAVkaLV0XbGQa4GP6JfXXcmtK+QvR0dmwtbR6k/1/kQfL+TxJIQQryGDJpUMjIyIjg4GCcnJ4yMnn0VU6PRoNVqsxdxPiNJJSGEEFkVn6Sl+qSdxCdr2Ti0PpWK2aVvFBsOs+upvVuq9YIOvxnk2DpFx5gDY9jivwUNGj71/pR+Ffplq6bB7sDdjDkwhviUeNqWbMu39b99bi+nV8Urm1jKiE4HD26otZvuHIOAA+r9p1k7QhlfKN8BPJtAPkjuGVpEbBI1v92FVqew57NGlHS0ybjhjd2wrCvoUtRZ/d6amKtxvioO3Qin57xjGBtp2Ppxg8wVNc/IyQVPZs6r1gva/iJDNYUQ4jUjw99ymCSVhBBCZNW2i/cZ/NdpihW05MDoJukTOjqd+sX3xk4oXBY+2GuwAsLTT01n/sX5mGhMmNZk2kvXRDoYdJBhu4eRoqTQv2J/RniPMEicOS1fJZb+K/oe+O+H6zvVW2LUk3WWDlChE1R6B0rUeWN6jvx9PJAxay7gVcSWLR83yLjR/XOwsDUkxUClrtDpd3jORVJDS9GlEBwbzL2Ye9yLvUd4fDgeth5UdapKIctCuRZHqg8Wn2TH5RC83QqycpAPRkbZPFfO/AUbhoGig9K+8M7C177guRBCvEkkqZTDJKkkhBAiq4b/fYYN5+7xQcOSfNG6fPoGh36FnV+BiYVaQNg5m8NT/mOF3womHZ0EwDf1vqFDqQ4G2e/6G+sZe2gsAP+r+T96efUyyH5zWmKKlsFLTrHXLyz/JZZSpSRB4GG4sgkur4PYsCfrCpVWZwqs+i5Y57PHlUW95h3j4I1wRrcsy4eNS6Vv8PA2zH8LYkLAoyH0XA0m2agllA1RiVH8c/Ufll1dRkRCRIZt3GzdqO1Sm4GVB+Ji7ZIrcd2LjOetn/cRm6RlUseK9K7jlv2d+W2Flf3V2Q+L1oB3V4B17ifKhBBCGJ5Bk0q//vprpg88fPjwTLfNzySpJIQQIisSkrV4T9pJbJKWtR/WpVqJgmkb3D2pTnOuS4G206DGAIMc9987//Lx3o/RKTo+qvoRg6sMNsh+U827MI9fTv+CBg0zms6gUfFGBt1/TvlvYmlBv5rU9cynCRhtCvjvgwsr4fIGSI5VlxuZgld7qPMRFPPO2xhzQHhMIrW+3YVOgX2jGuNW6D+9ZB4Fqz2UIm6CUwUYsBUsMhhyaui44sNZcHEBq66tIj4lHgAzIzNcbVxxtXGloEVB/CL8uBl5EwX1Y7iFsQXvV3qffhX7YW5snuMxLjrkz4SNlylgbsLOEY1wsbPI/s7uHFd7WMY/BIeS0P1vcHpGvTghhBD5hkGTSh4emZt2VKPRcOvWrcxHmY9JUkkIIURW7LwcwsDFJyliZ8Gh/zVNO+Qk9gHMbQxRgeDVAd750yDDl/yj/Om2qRvxKfF0Kd2F8T7js1VD6XkUReHbY9+y3G85DhYOrO2wFgcLB4MeI6c8nViyNjPmnw98Mq5zlZ8kPoKLq+HUIrh35snyEj7g8xGUbf3aFPdOnc2sUlE7Ng6rn3ZlTBgsagPhfmBfAvpvA7uiOR7TyeCTjNw3kgcJDwAoW7As/Sv2x9fdFxOjtDWHohKjOBt6lgUXF3A69DQARW2K8lWdr6hXtF6OxqnVKXSZfZizdyLxreDM771rvNwOw67BX13U9zAzG3WIoYEmGBBCCJE3ZPhbDpOkkhBCiKwYsfwsa84E0b+eO+PbPTWsTZsCf3VSa+UU9IAP/jXItPHJ2mR6be3F5QeXqeVSizlvzcHUKGcKOSdqE+m+qTs3Im/QtHhTpjeZbvDkVU5JSNbSf+EJjtx6QCFrM1YO9nl2sef85v45ODobLqwC3ePpmwu6qzN2Ve0J5vn7cfaYe5Qjtx7weatyDG7k+WRFXIQ6e2LIRbAtCv23qI87BymKwuLLi5l2ahpaRUsp+1J8VuMz6rnWe+HvgqIobAvYxo8nfyQ0LhQjjRETfCbQqXSnHI35anA0bX89SIpOYU4vb1pWfMnhd7HhsLKfWlQeoOEoaPxFrtavEkIIYTiZzXm81Lu8oihITkoIIYR4vqQUHTuvhADQulKRtCt3jVcTSqbW0H2ZQRJKADPPzuTyg8vYmdvxXf3vciyhBGBubM73Db7HxMiEPXf2sP7m+hw7lqFZmBozt483FYva8iA2id7zjxMclZDXYRlGkSrQaQ58cgEafAYW9vAwALaOhmlesHOcOkQsHwp9lMAxf7U3UJunf6fiH8KSTmpCycYZ+mzI8YRSfEo8I/eN5MeTP6JVtLQp2YalrZdSv2j9TCVXNRoNrTxasbHjRjqW6ohO0THu8DgWXFyQo3GXc7FlUKOSAHy1/iIPYhJfbofWhaH3OjVpCbB/KizpCFFBL7dfIYQQr7RsJZXmz59PxYoVsbCwwMLCgooVKzJv3jxDxyaEEEK8Fg7dDOdRQgpOBczxfrqW0vmVcGSm+nPHWeDsZZDjHb9/nIUXFwIw0WciztbOBtnv85R1KMtHVT8C4Pvj3xMUk3++SBawMGVR/1p4FLYmKDKe3vOPERWXnNdhGY5tEWg2DkZchjY/gYMnJETBoV9geiXY+DE8uJnXUWbJtovB6BSoUtye4g5W6sLoe2oNpftnwaoQ9FkPhTMo3m1ASdokPt37KTtu78DEyIQvan/B5PqTsTK1yvK+rEyt+Lru1wyoqNZTm3ZqGj+d/ClHL+AOa1qaUk42hD1K5H+rz7/8sYxNoOVkdfibiaVa62u2jzokUwghxGspy0mlcePG8fHHH9OuXTtWrlzJypUradeuHZ9++injxo3LiRiFEEKIfG3rhfsAtKzo8qSW0r2z6nTcAPVHQIWOBjlWVGIUYw6OQUGhS+kuNHNrZpD9Zkb/Cv2p5lSN2ORYvjz4JTpFl2vHflmFbcxZPKAWzrbmXA+NYcjSUySl5J/4M8XMGmq+D0NPqsWUi9cGbZJaf2lmDXXo0v1zeR1lpmw6r/5OtU3tpRR2Dea3gNDLYOMCfTeCUwYzLBpQsi6ZUftGcejeISxNLJn71lx6lOvxUkM/NRoNn3p/ymfenwGw6NIipp+ebqCI07MwNebX7tUwMzZi15VQlhy9bZgdV+kOgw+Aa3U1gblqAKx+Xx0iJ4QQ4rWS5ZpKjo6O/Prrr/To0SPN8r///pthw4YRHv5m/LGQmkpC5B1FUfAPj+X2gzjuRcVzPzKBB7GJGBtpMDEywszECHsrU9wLWeNWyAq3QtbYmJu8eMdC5IBkrY6a3+4iMi6ZZQNrqzOMPQxQvwDHhIBnM+i50mDFk0fvH81W/62427qzvO3ybPWYeBl3Ht2hy4YuxKfE813972jn2S5Xj/+yLt2L4p05R4hL0tKtRnG+71Ip39SHypbbR+DgNLi+/ckyz2ZQ/1Nwr2+QgvGGFhKdQJ3Ju1EUOPR5U4rGXIalb0N8hNoLq/daKOiWozFodVq+OPgFW/y3YGZkxm/Nf6NOkToGPcaa62sYf3g8AJMbTKZtyZwrfL3wkD8TN17GzMSIjUPrU9algGF2rE1Wh8Ht/xEUrTr7XtOvwLu/2qtJCCHEKyvHCnXb29tz4sQJSpcunWb5tWvXqFWrFpGRkdkKOL+RpJIQuSs8JpF//cI4dCOcQzfCCX2UtdoPJQtbU9PdgRruBantUYgShXL3i7Z4cx24Hkbv+ccpZG3GsS+aYZLwEBa0gAc3DD7N+cGggwzZNQQjjRFLWy+lYuGKBtlvVs2/MJ/pp6dT2LIwmzptwtrU+sUbvUL2XA3h/T9PolPgfy3LMaSx54s3yu+CL8Kh6eowpdQeZkVrqMmlsq1fqWLLqQmQ6iXsWVPnFmwZCSkJ4FoNeq5Sa/vkIEVRmHR0EiuvrcREY8L0JtNpVLxRjhzrl9O/MO/CPMyNzVnUclGO/U4rikL/RSf41y+Mss4FWD+0HhamBpwl8O5J2PQpBJ9X7ztXglbfq4lLIYQQr6QcSyoNGzYMU1NTfv755zTLR44cSXx8PL/99lv2Is5nJKkkRM5TFIXTgZEsPhLAlgv3SdY+ebsyNzGipKMNrnYWuNpbUtjGHJ2ikKLTkZSi40FMEgEP1N5MD2KT0u27lJMNzco78VZ5Z6qVKIix0at3NV68Hr5Ye4FlxwLpUasEk9t6wuIOcPc42BaD93eCratBjhOfEk+n9Z0Iigmit1dvRtccbZD9ZkeSNolO6zsR+CiQ/hX7M8J7RJ7Fkl2LDvkzYeNlAGb1rJ6+wPrrKsJfrfN15i81UQNQuCzU/wQqvg0mZnkaHkCX2Ye5dDuYDR5rKXN/g7qwdAt4ewGYG6iHzXP8ffVvvjv2HUYaI6Y0nEJL95Y5diydomP4nuHsu7sPJ0sn/mn7D45WjjlyrPCYRFpOP0B4TCLveBfjh7crG7aXnk4LpxbC7kmQEKku82gIjceAW13DHUcIIYRB5GhSafHixRQvXpw6ddRuvseOHSMwMJA+ffpgavpkdpn/Jp5eJ5JUEiLnKIrC7iuhTN99jYtB0frlFVxtaVzWkXqehanuVjDTV1EfxiZxOvAhJwIecjIggrN3IknRPXnrK2xjTptKLrSr4kr1EgWf1LwR4iVpdQq1vt3Fg9gk/upfjfqnPwO/LWrPpAE7wKmcwY417dQ0FlxcgIu1C+s7rM/1YW//te/OPobuGYqJkQlr26/F3c49T+PJjvHrL/LnkdtYmhqzekhdvFzfoL/3MaFwbA4cnweJUeoy22JQdyhU76PWZ8oD9yLj6TtlMTNMZ1DO6A5ojKDJF1D/s1zpTXUq5BTvb3+fFCWFkTVG0rdC3xw/ZkxSDD239ORW1C0qO1Zmke8iTI1zZjbHg9fD6bPgGDoFvmrrxXv1PQx/kNgHsPdbOL0YdI8L4ns0hLrD1aGXr1CvOCGEeJPlWFKpSZMmmWqn0WjYs2dPVnadr0hSSYiccS3kEZM2XebAdbU+m7mJEe2ruNLHx51KxQwzRCg6IZl9fmHsuhLC3quhRCek6Ne52lnQtoorbSsXoVJRu9e7lorIcUduPqDHH0dxtIRjpf/C6NoWMDaHPusMemX+2sNrdNvYjRQlhV+b/EqTEpn7W52TFEXhw90fcjDoIA2LNeS3ZvmvJ3OKVkf/RSc4cD2c4g6WbBxaH3urvO+pk6sSouDkQjg6S60BBmDpADUGgHdfsC+Re7EkPuLc0i/wur0UU40WrB2hy3womTNDz/4rJDaEbpu68SDhAa3cWzGl4ZRc+xsRGB1Ij809iE6KZnCVwfqZFnPC/IP+TNp0GSMNLOxfi0ZlcqZnFJF34ODPcHrJk+RSQQ+o+R5U7QlWDjlzXCGEEJmSY0kloZKkkhCGFZeUwg/b/Fhy9DZanYKZsRHvNfDggwYlKWidc1/ikrU6Dt4IZ+O5e+y4FEJM4pMEk1shK9pVdqVdFVfDFS0Vb5Rx6y+y/Mh11hWeQ/mYo2pCqftSKP2WwY6hU3T03tqb82HnaVaiGdObTDfYvl+Wf5Q/nTd0JkWXwm/NfqNhsYZ5HVKWRcYl0W7mQe5ExNOgdGEW9a/1Zg6XTU6Ac3/DoV/gof/jhRr1XPbuD6Wa59zQOJ0WLq2FHWPhkTrrW6BTE0r0mg22uTMsMUmbRP/t/Tkfdp7SBUvzV6u/cr034LaAbYzaNwpjjTFLWy+lQuEKOXIcRVH43+rzrDh5lwIWJqz7qB6ejjY5cixATS4dnQVnlj7pFWdsBqXegkpdoEwrMJM6iEIIkdskqZTDJKkkhOFcvhfNsL9PczMsFgDfCs582dor14tpJyRr+dcvjE3n77HrSggJyU+mEy/tZEO7xz2YSubkh2vx2tDqFBp9t4XJid/RwPgimFhCj7/B07C9iFZfW82EIxOwNrVmfYf1OFs7G3T/L+vnkz+z8NJCPO08Wd1+NcYGmuUuN12+F03n2YdISNYxuJEnn7cy3LDFfEenhaub4OQCuPXvk+WWBaF8O6jYBdzqG2Zmr4QotbbT8bnqjIlAgM6Zr7V9+P7zkTgVsHj5Y2TS98e/Z+mVpRQwK8DyNsspbls81479tJH7RrI9YDuedp4sb7ccc2PzHDlOYoqWnn8c4+Tth3gUtmbVYB8K2eTMsfSSYuHCKjjxBwRfeLLc1FpNXpbxVRNNNjnUc0oIIUQaOZZUSkhIYMaMGezdu5fQ0FB0Ol2a9adPn85exPmMJJWEeHmKorDocACTt1wlSavD2dacqW9XoWFOdbXPgtjEFHZfDWXjuXvs8wsjSfvkva6Cqy3tqrjSplIRijvI1VORsZMXrmC0shfVjW6gmFqj6bnC4DMdPUp6RNu1bYlIiGBUjVH0qdDHoPs3hOikaFqtbkV0UjTf1f+Odp7t8jqkbNlw7h7D/z4DwOye1Wn1phTufp4HN+HUIjj3D8SGPlluYQ8eDcCjkXorVCrzdXIehYD/Pri5F65sgKQY/T6PubxLn6u1qF6yCH9/UMfQj+aZDt87zKCdgwDyvMfdw4SHdFrfiQcJD3K8CH54TCIdZh4iKDKeikVtWTawDrYWOVPLKQ1FgdDLaoLp4iqIDHxqpQaKVldrMLnXh+J1wFwu9AghRE7IsaRSz5492bFjB2+//TbOzs7pxpKPHz8+y8H+9ttvTJ06leDgYKpUqcKMGTOoVavWM9uvXLmSr776ioCAAEqXLs2UKVNo3bq1fr2iKIwfP54//viDyMhI6tWrx+zZsyldujQAAQEBTJo0iT179hAcHIyrqyu9evXiyy+/xMwsc123JakkxMuJS0rh0+Vn2X5JrdHRvLwTP7xdBYccHOqWXVHxyey8HMLGc/c4eCMc7VNFvsu5FKBJOScal3GkultBTI2lwKgA7p4i+s+u2CaHE2dcAKu+q6FEbYMfZuqJqSy+vBgPOw9Wt1+NqVEufOHLhvkX5jP99HSK2hRlY8eNOVZkOKd9t+UKc/ffooC5CZuHN8j13pSvLJ0WAg7CxdVqIij+Ydr1plZqYqlwGSjorg5lMrEAE3NIiFaHtEXfU5NUYVfSbutYDmoPhsrdaDPnFJfuRfNtp4r0rO2WKw8tKjGKzhs6ExoXSrey3RhbZ2yuHPd59gbuZfje4WjQsLjVYqo6Vc2xY90Mi6HrnCM8iE2ipntBFg+ojaVZLvY2VBQIOg3XtsH17XD/XNr1GmNwrQbu9cC9ARSvDRbyufxVFp2QzJnASM4EPuRMYCRX7keTkKwlRaeQolWwMDWijHMByroUoJxLAXw8C1PKSRKHQuSFHEsq2dnZsWXLFurVq/fSQQIsX76cPn36MGfOHGrXrs306dNZuXIlfn5+ODk5pWt/+PBhGjZsyOTJk2nbti3Lli1jypQpnD59mooVKwIwZcoUJk+ezJ9//omHhwdfffUVFy5c4PLly1hYWLBt2zaWL19Ojx49KFWqFBcvXmTgwIH07t2bH3/8MVNxS1JJiOwLfZTA+3+e5PzdKMyMjfiyTXn6+Ljli6LYEbFJbL14n43n7nHMP4Kn30GtzYzxdnegtocDdUo6UKmoPWYmkmR645z7B2XDcDTaRK7pivKww5/U9q5p8MP4R/nTeX1nUpQUZjefTf2ihu0FZUhxyXG0WduG8PhwxtYeS7dy3fI6pGxJ1uroPvcop24/pHIxO1YNriu/4/+lTYZ7Z9TeRrf2wZ3joE3Mwg40UKQylGwMpVuAWz3QaPAPj6XJj/9ibKThxJfNc+0CxOcHPmfzrc242bqxou2KPJ9VMdWXB79kw80NakK53eocTdReuhdF97lHeZSQQsMyjvzRxxtzkzwaxhp9H27ugduH1ERm5O206zXG4FJJnQihhI96k+Fyr4TzdyNZfOQ2G87dIylF9+INnlKxqC0dqxalfRVXnGxzb9irEG+6HEsqeXl58c8//1C5cuWXDhKgdu3a1KxZk5kzZwKg0+koXrw4w4YN4/PPP0/Xvlu3bsTGxrJp0yb9sjp16lC1alXmzJmDoii4urry2WefMXLkSACioqJwdnZm0aJFdO/ePcM4pk6dyuzZs7l161am4pakkhDZcy3kEf0XniAoMp6CVqbM7VODmu75c4aXiNgk9l8L41+/UPZdC+NhXHKa9RamRlQvUZBaHg7U9ihEtRL2WJjmv3oyIpNiH8C2z+HCCgB2aL2ZaPIx/45tlyM92D7c9SEHgg7QqFgjZjabafD9G9rfV//mu2Pf4WjpyObOm7E0sczrkLLlXmQ8rX89QGRcMgPqeTCunVdeh/Rq0ybDw9sQfk29Rd2FlHhISYTkeDCzAbuiYOsKdsWhaA2wLpRuNzN2X+ennddoULowS94zfK+/jGwP2M7IfSMx0hixuNViqjhWyZXjZkZ0UjTt1rYjIiGCEd4j6F+xf44e79TtCHrNO058spa3vJyZ+W61vEssPS0yEAIOwe2DapLpce2tNAqVUpNLbnXVIXO5OWOhYNflEGbsvcG5O5H6ZSUcrPB2K0i1EvZUKmqHraUppkZGGBtriI5Pxi/4EVeDH3HpXhRHbj4g5XEPcSMNdKlejI+bl6ZYwVcjwSvE6yzHkkpbt27l119/Zc6cObi5vVzX46SkJKysrFi1ahUdO3bUL+/bty+RkZGsX78+3TYlSpRgxIgRfPLJJ/pl48ePZ926dZw7d45bt27h6enJmTNnqFq1qr5No0aNqFq1Kr/88kuGsYwdO5Zt27Zx8uTJDNcnJiaSmPjkSlt0dDTFixeXpJIQWXDcP4L3Fp3gUWIKHoWtWdCvJh6FrfM6LIPQ6hSuBkdz3D+CY7ciOB4QQURsUpo2psYaKha1o5a7AzXcHajhVjBHZ7YTuURR1JmptoyCuHDQGLHbsS/vBzajey03Jnc2zEWYp+2/u5+Pdn+EiZEJa9uvxd3O3eDHMLRkbTLt1rUjKCYoV74E56Rdl0N4f7H6eWFub29aVHDJ44hef77T9uMX8ogfulSma82cL5IdFhdGpw2diEqMYmClgQyvPjzHj5lV626s46tDX2FpYsnGjhtzvEj/gethvPfnSZJSdDQoXZi5vWvk7lC4zIgKgsAjcPuw+n/o5fRtCpUCz2bg2VRNMklNphxx92EcEzZcZtcVtcyBmbERbSoXobePG9WK22e6d3pEbBKbz99j7ZkgTgdG6vf1bu0SfNSkFI4FcriAvBBvsMwmlbI8NUeNGjVISEigZMmSWFlZYWqatrttREREpvcVHh6OVqvF2TntH0FnZ2euXr2a4TbBwcEZtg8ODtavT132rDb/dePGDWbMmPHcoW+TJ09m4sSJz39AQohnOnwznPcWnSQ+WUstdwd+7+39WiVUjI00VHC1o4KrHf3reaAoCjdCYzjqH8GxWw847h9B6KPEx3UEIvl9v9orsrSTDTU9HKhfqjD1ShXGzjJ/1pp5Y90/D3u+UWt9ADh5kdJuBiMXRqCQTJtKrgY/ZLI2maknpgLQq3yvfJFQAjA1NmVIlSGMPTSW+Rfn83aZtylgViCvw8qW5l7OvF/fg3kH/Rm58hxbXG3lqnkOuh7yCL+QR5gaa/DNhQSeoiiMPzyeqMQoyjuUZ0iVITl+zOxo79meVddWcS7sHD+d/IkfGv2Qo8drUNqRhf1q8v6fJzlwPZy+C44zv18NCuRG8e7MsisKld5WbwBxEXDn2JMkU9BpeHBDvR3/HYxM1TpMnk2gVHMoUgXywVD8V1mKVse8g/78sus68claTIw0vNfAg4ENSlI4GzMIOlib0dvHnd4+7pwOfMiP2/04fPMBiw4HsPrUXb5q58U73sXyRQkFIV5XWU4q9ejRg6CgIL777rsMC3XnN0FBQbRs2ZJ33nmHgQMHPrPdmDFjGDHiyQwbqT2VhBAvdvB6OO8vPkFCso5GZRz5vbf3az8MTKPRUNq5AKWdC9C7jhuKonAnIp4TARH6282wWK6HxnA9NIZlxwIxNtJQvYQ9jcs60bpSkdemF9dr6f552DdFnVodwMgEGoyEBp9x+FYUD+NCKGRtRp2Shh/auezqMgKiA3CwcOCDyh8YfP85qW3Jtsy/OB//KH+WXVnGoCqD8jqkbBvdshwnbj/k3J1Ihv19hhWDfKRQfw7ZdP4+oCY17KxyPoGx6voqDgQdwMzIjO/qf/fKFpY30hjxRe0v6L6pO1sDtvJ2mbepVeTZE90YQr1Shfnr/Vr0W3iC4wER9Jx3jEX9a72Sk2wAYOUAZVupN4D4SAg4oNZlurFbrcl0+6B62zMJ7N3AqwNU6Aiu1SXBlEUPY5MY+vdpDt14AEAtDwe+7ViR0s6GuYBQvURBlg2sw6Eb4UzZdpXzd6MYveo8Wy/cZ3LnyrjYSb0lIfJClpNKhw8f5siRI1Sp8vLjygsXLoyxsTEhISFploeEhODikvGVKBcXl+e2T/0/JCSEIkWKpGnz9HA4gHv37tGkSRPq1q3L3Llznxurubk55ubSvVKIrPrXL5QPlpwiKUVH03JOzOpZ/bVPKGVEo9FQopAVJQpZ0cW7GAAPYhI5efshR289YP+1MG6GxXIi4CEnAh4ydbsfFVxtaVvZlQ5VXXG1zwf1Z3Q6SIxWbwlR6i0lEVBAP9BaQV/d3MhIradiZqMOPzAroP5v8oq+18ZHwuV1cH6FWiQWAA1U7AKN/geOZQDY/PgLcMuKLpgYOMkQHh/OnHNzAPik+if5rqePsZExgyoP4vMDn7PkyhJ6efXC2jR/Jk/NTIyY2aMarX89wJnASH7c7seY1uXzOqzXjqIobDp/D4A2lYq8oPXLuxN9R98TcHj14ZQqWCrHj/kyvAp50bVsV5b7Lee7Y9+xsv3KHJ8F0tvNgb8H1qHPguOcvxtF51mHWNi/Vv64EGJpD+XbqTeAiFuPE0x74NZeNcl0+Ff1Zl9CTTB5dYKikmB6kUv3ohi05BR3H8ZjZWbMxPYVeDuHehDVK1WYtR/WY/7BW/y44xp7/cJ4a9o+vutUiXZVDN9DWAjxfFlOKpUrV474+HiDHNzMzAxvb292796tr6mk0+nYvXs3Q4cOzXAbHx8fdu/enaam0s6dO/Hx8QHAw8MDFxcXdu/erU8iRUdHc+zYMYYMedJ9OSgoiCZNmuDt7c3ChQsxMpKri0IY2uGb4fqE0ltezvz2bnWZKekphWzM8a3goh/OcScijn3Xwth+KZjDNx9w6V40l+5FM3X7VZqXd6a3jxv1PAtjZJTLH2yTEyA6SJ3yOzpILbSb+nN0kJpsSYiCxEc8lT3KPiNTMC8A1o5g46TerJ2e/Gzj/HidM1gXhpzqRaDTQshFuH0E/PfDjZ2gTa2TlZpMGg2OZfWbJGt1bLukDrVuU9nwX4BnnplJTHIMXoW86FCqg8H3nxtaurdkzrk5BEQHsNxvOQMqDsjrkLKtuIMVU9+uwuC/TvH7/lvULulA03I5W9fmTXM1+BE3w2IxMzbirQo5+9xqdVq+PPQl8Snx1HCuQW+v3jl6PEMZVm0Y2wO2czPqJiv8VtCzfM8cP2bFonasGORDv4XHCXgQR6dZh5jbuwa1PPLZxBsOJdVbzfchKRau71QvHlzbrhYBPzxDvdmVgCrdoVpPKOie11G/cjacu8foVedISNbhVsiKub1rUNYlGxc94h9C2DX1uY+8DVF31GWJjyAxRi3yb2QKJuYYG5vxgYUtXas5svYmnIq0ZsY//ly4U5fRrSoa/KKOEOLZslyoe8eOHUycOJFvv/2WSpUqpauplNWi1cuXL6dv3778/vvv1KpVi+nTp7NixQquXr2Ks7Mzffr0oWjRokyePBlQe0o1atSI77//njZt2vDPP//w3Xffcfr0aSpWrAjAlClT+P777/nzzz/x8PDgq6++4vz581y+fBkLCwuCgoJo3Lgxbm5u/PnnnxgbP+k18aweUv8ls78J8Xzn70bSY+5RYpPUmWJm9az+6gwNSY5/0pMmIUqdnSiVRgOmVmpSw8xGvaqZBz1nImKT2HYxmPVngzjm/6RWnUdhawY3Kknn6sUM+3xqUyAqEMJvPKk38eAGPLgJ0Xezti9jM7CwBwtbMLEANKCBx/88vtqrUZM2SY8/KCbFQEpC9mK3dFCTTVaF1SSTtePj2+P75raPe0RZg2lqPI9jSEmAhGhIjFKTYw/9ISJAvXodclHtdfU0Jy+o3E2t12FXLF0oe/1C6b/wBIVtzDn2RTOMDZgAvPzgMt03dUdBYXGrxVRzqmawfee29TfWM/bQWBwsHNjaeesrM017dk3YcIlFhwMoaGXKlo8bUMQuH/QszCembr/Kb3tv8paXM3/0qZGjx5p3YR6/nP4Fa1Nr1rRfg6tN/unxsMJvBZOOTqKgeUG2dN6CjVnuFJ8Oe5TI+4tPcu5OJGbGRkx9pzIdqhbNlWPnqKQ49ULCpXVqgik59sk6j0ZQvQ+Ua/v4b8qbbcnR23y17iIAjco48mv3apkbppqcAHdPQOBRuH9WHVYeFfjS8SQoptwx86R4xbpYlGkGHg3VzyNCiCzLsdnfUnv0/Lcro6IoaDQatFptloOdOXMmU6dOJTg4mKpVq/Lrr79Su7Y6XWzjxo1xd3dn0aJF+vYrV65k7NixBAQEULp0aX744Qdat26dJpbx48czd+5cIiMjqV+/PrNmzaJMGXVowqJFi+jfP+NZZzL7dEhSSYhnuxH6iHfmHOFhXDJ1PQuxoF/N3B/ypihqYuDeGQg+r/788LZ65SshKmv7snRQp7su4AIFijz52bao2j3evoSasMghN0If8dfRQFafusujxBQAihW0ZGiTUnTxzkJySVEgJvSphNF1NWn04AZE+IMu+dnbmlg+nvb78S11CnDbomoyx8IWLOzUBE52P2RrU54kmRKjITYMYsIgNhRiQtSfY0Ie3w9T1ytZ/5uTJWYFoERtdSrqUm+BS6XnDoH45J8zrDt7j74+bkzsUNFgYSiKQt9tfTkTeobWHq2Z0nCKwfadF5J1ybRbq84EN6rGKPpU6JPXIb2UxBQtXWYf5mJQNLXcHVg2sLZcJTcARVFo/OO/3H4Qxy/dq+ZossIvwo/um7uTokthUr1JdCzVMceOlRNSdCl0Wt+JgOiAXJ+tLj5JyyfLz7D9klqeYmSLMnzUpFS+r7uqlxwPflvg9BJ1iFwqC3uo3FVNMLlUyrPw8tK8A7f4ZvMVAPrVdeertl7Pvpii06nJo+s71N6/d0+CNjF9O9tiam8w+xJgX1y9SGRmo17sM7VQPytoE9Xh9fEPH/egDoKoOyTfv4hpSmza/RmZQLFaULal2sM4gwtCQoiM5VhSad++fc9cd+HChWcOW3vdSFJJiIwFRcbz9uzD3I9KoEoxO5YOrIONeZZH2madoqjJkRu71OKbd48/P3mkMVITIBa2YPxUTyRFB8lxT3rPZHY4l1VhKOj2+EPQ4/8LuoG9u/oBxgBXM2MTU/j7eCBz9t0iPEb9IFbCwYov25SnhdfjiRO0yWp38YcB6W8R/ul73jzNxAIcPKGQJxQurU67XKiUuszK4dWrJ6HTQXyEmiiLfZxkinvw5OfYcPX/pFj1tUyKVb8cKAr62k4m5uo5YG6nJsXsSzweDuEBhcuAcwUwylxCNDYxhRrf7CI+WcvaD+tSrURBgz3ULbe28L8D/8PSxJINHTfgYp3/p7BffW01E45MoLBlYbZ23oqFSf6+4h8QHkvbGQeJSUxheLPSjHirTF6HlO9duBtFu5kHsTA14tTYt7DOob8lSdokum/uzvWH12lSvAm/NPklXyZEdt/ezSf/foKFsQWbO2/Gycop146t1Sl8v/UKfxzwB+Ad72J826nS6zfk/eFtOLsUzixN24vXtTrUGKAmLczyd8/LzPpt7w2mbvcDYEhjT0b7lk3/e5OcoCbirm5ShxbGpK2Li40LuNeDot7gUllNzlnaZz8onQ7/6xdYvGYDbrHnaWxyAXfuP9VAA271oPI7UKGz9GAS4gVyLKn0X48ePeLvv/9m3rx5nDp1Kls9lfIjSSoJkV50QjJdZh3memgMpZxsWDHIJ2dnhFEUuH8OLqyEKxvUMfhPMzZTP6AUqaomCAq6qQkfu6JqQulFXxp0OkiIhEf31Vv0fXgUDI/uqT9HB6nHTIh8caw2zmpdoHTDsx7/b2at9gYytVCTXPrYNGodn+R4tZZAUhxJ0aGc8buB341bWKU8xIFo3C3iKG4eg2lssJoYexaNkZo4SU0YPX2zLaoWzxbZsvbMXT5dfg73QlbsHdnYYF9KY5Njab+2PaHxoXxU9SMGVxlskP3mtWRtMm3WtuF+7H3G1BrDu+XfzeuQXtr6s0F8/M9ZjDSw9P06+HgWyuuQ8rXJW67w+/5btK7kwqye3jl2nJ9P/czCiwtxsHBgTfs1FLLMn6+boij02dqHs2Fn6VK6CxPqTsj1GJYcCWD8hkvoFKjrWYjZvbyxs3w1Z897KTot3NwLZxbD1S1Pevqa20HVHuDdH5zK5W2MOejphNKnzcswvNlTPdOSE9Shg5fXg982tQdyKjMb8GwCno+HpTmUzJELVpFxSbz/50lO3n5ISeNwfq0RTsXI3U9NsvE4lqrvQq0P1AtpQoh0cjyptH//fubPn8/q1atxdXWlc+fOdOnShZo1a2Y76PxEkkpCpJWi1THgz5PsvxaGs6056z6ql3N1RR6FqB/kzq+A8GtPlhubQQkfKNVc/bDi5AUmuTDNcUKUmlxKHV739M8Pb6etxZALFBMLNAXd1e7jaW4eag+cV3V2tXyuz4Lj7L8WxifNS/NJc8P1Uvnp5E8surSIYjbFWNdxHebGr8/r98/Vf/j22Lc4WzmzpfMWzIxf0WnJs2DUynOsPHUXZ1tztn7c8NWdav0VpygK9afsJSgynlk9q9M6h2Z+OxVyiv7b+qOg8EuTX2haommOHCe3nAk9Q5+tfTDSGLG2/VpK2pfM9Rj2XA1h6LIzxCVpKe1kw4J+NSnu8Br33okJg7N/wcmF6t/9VG711N5L5du9Vn93V5y4w+jV5wH4X8tyDGnsqSbZ/PfDhVXqRb6ne0UXcAWv9lCmpTqUPJeei4RkLZ/8c5Ztl4LRaGB8Wy/6VTCBi6vV3mZPf34s1RwafKbGJ4TQy5GkUnBwMIsWLWL+/PlER0fTtWtX5syZw7lz5/Dy8jJI4PmFJJWESGv8+ov8eeQ2lqbGrBzsQ8WidoY9gKKoBR2P/a5e/Uq9KmhioX5QqfSOevUrB2sbZYuiQFyEOiQtLvzJkKynh2fFhqs9kZLj1P+frjGgoM5uZmqpPlZTS7Vnk9WTXk7hSgGWXYxn7x0dd5XCFHIqxo/dqhn+NRDPFPoogTrf7UanwL5RjXErZJjz8GbkTd7e8DYpSgq/NfuNhsUaGmS/r4pEbSKtV7cmND6UcT7jeKfMO3kd0kuLS0qh7YyD3AqLpVk5J+b1rZEvh1LltTOBD+k06zBWZsacGvsWlmaGr8sXmxxLlw1dCIoJomOpjkyqN8ngx8gLH+/5mD139tC4eGNmNJ2RJzFcDIrivT9PEBKdSGEbM/7oU8OgQ4JfSTod3NqjJpf8tj6p+WdVCKr1Au9+as+cfGzP1RAGLj6FVqfwYaOSjK4cB+dXwqU1aYe22RaFCp3Aq6M6tC2PekFrdQoTNlxiyVE12TfKtywfNSmlfja7tReOzYVr29CXOnBvAA1HqRcm5X1bCMMnldq1a8f+/ftp06YNPXv2pGXLlhgbG2NqaipJJUkqiTfcn4fV7u4Ac3p507KiAeu9KIo688r+qRB08sny4rWhel/1CqCMiQdg28X7fLn2Ig9ikzAx0vBRk1IMbVrq1Zl17zU2/6A/kzZdpnoJe9Z8WM8g+1QUhfd3vM/x4OM0Kd6EX//f3n2HR1F2DRz+bc+m90oIodfQCR0UpKPYQUTFrlgQu5+A+qpYXwUL+qqIDVRQUREQBZEqvZfQSUjvZXuZ74+FhUhLIJDCua9rrt2dmZ09u5nszpx5nvNcOa1KtlvTfL3ra15f/zpx/nH8eu2v6NS1v6vMrowSRny4CrvTzfNDW3B3r9p9Ilkd/jN/F5+tPMTwtrG8N+rijHT4/Mrn+fnAz8T6xfLD1T9cshHTLraDxQe57ufrcCkuvhj0BR2iOlRLHJnFFu6auYFdmSUYtGreubndRWtxVuMUp8Pmr2DjF54u88c1utLTeqnpYNBcgnqTVWhzaiGjPvmH+s4jPBm3k/7ulagKDp5YwRjiSSIl3QTxXWtMd3pFUZi6ZB/v/rkPgIevbMyEq5qeSPYXHIRV02Dz1ycuWCb0gP4vQvzl0QNHiDOp8qSSVqvlkUce4YEHHqBJkxP9TiWpJEklcXlblpLDnTPX41ZOagZdFdxuT2HH5W96Rm8DT62hNjd4+r/Htqua16lj8stsPD9vBwt3ZAHQKSGE925pL0OcX2TD31vJ9vRi/nNNK8Z0a1Al21x4aCFPLX8Kg8bAzyN+Js6/DgzTfRoWp4XBPwwm35rPS91f4tom11Z3SFXiqzWHmfjzTrRqFd/e25VODUKrO6Raw+VW6PHaUrJKrHw8piMDW1V9Yfrjxe/VKjWfDfiMTtGdqvw1qtOLa15k7t65JEUk8fXgr6uttZzJ5uTh2ZtZuicH8Bwn3N+n4eXTes/lhH2/w4YZnkFEjreICYjxjBrX4bZaMRpZ+v6t/PLNB/RzraSpOv3EAp0vNBtyrLX4lZem5MB5+vjvA0xZuAeAe3ol8tyQFuX3w+KjsPJd2PTliRbjLa6GfpMhvPGlD1iIGqCiOY8Kp5BXrlxJaWkpHTt2JDk5mffff5+8vLwqCVYIUTvtzS7l4VmbcSuekV7u71NFV+MP/AWf9IXvx3gSSjo/6PEoPLYDRnwoCaWzCPM38OHoDkwd2Y4Ag5YNRwoZMnUFf6XkVHdoddb+nFK2pxejVasYmhRbJdsss5fx1vq3ALi7zd11NqEEYNQauaPVHQB8sv0TnG5n9QZURW7tmsDwtrE43QoPzdpMftlphs4Wp7XmQD5ZJVYCfbT0aRpR5dtPK03jP/94urrdm3RvnUsoATzY9kGMWiPbcrexJHVJtcXhZ9DyyW2duKN7AwBeX7SHZ37YjsN1lgEl6hKNFpoPhVt/gEc2Q8/HPN3XSzPh79fhndYwc5inRZOlsLqjLS//AKx4G9eHPYj7ujcPKN/RVJ2OotF7EknXfQpP7IMbPoNmg2p0Qgngvj6NeGG4pxHEJysO8eKvuyjXtiKoHgx9Cx7ZBO1u9QxssvsX+KALLHzm7CMKC3GZq3ShbpPJxHfffceMGTNYt24dLpeL//73v9x5550EBARcrDhrHGmpJC53eWU2RnywiqOFFrokhvL1XckXPnRw5lb48wU4sNTzWB8AXe+Hrg96hrQXlXI4z8S4WZvYmeEpmPlg30Y8PqAZGvVlcoX4Ennr9xTe/2s//VtE8untVdNU/oXVL/DDvh+ID4jnp2t+qlPFuU/H7DAz8IeBFNmKeLXnqwxvNLy6Q6oSZTYn17y/kgO5Jno1CWfm2C7y/1cBE77fwo+b0rkluT6vXtumSrftcDu4Y+EdbMvbRvvI9swYOAOtunZ1Q6qo9ze/z8fbPiYhMIGfrvmp2ruWzlx1iJfm78KtQI/GYXw4uo6ODHcuTjvs+RXWz4AjK0/MV+ugYV9oNthTKzLoEl9McNogba2n5MDeRZC/37vIoWhYp06i9YCxBLW7BozBlza2KjR7XSrP/rgdgDu6N2Dy8JanbzmXvQuWvHis5hKeOpZXvQRJI2tM1z4hLraLPvobQEpKCp999hlfffUVRUVFXHXVVfzyyy/nu7laRZJK4nJmdbgY/elaNh4pJCHMl3kP9iDkQkY4KsuBJS95+rOjeA6sOt8NvZ/wFKUW583qcPHqgt18ucZTpLJXk3DeG9WeYN+afUWxtnC7FXq94Rmh6v1b2jOsCloqrU5fzX1/3gfAjIEz6Bx9edR0+HT7p0zdNJXEoER+uvonNOqqL8xcHfZml3LN+6uwOFw82q8Jj11VdSMD1kVmu5NOL/+J2e5i7v3dqrzb4NRNU/l0+6cE6AOYO3wusf5V07qwJjI5TAz5cQgF1gKeT36em5vfXN0hnTIy3Jd3dbm8u2cXHvGMRrZ9LuTsLL8sOslTMDqhu2dk26q+uGYr87QGP7IKDq2AtHXgtJxYrtaRGtiBD3KT+IsuTL/3Kjom1I1i69+tT+XpHyqQWALY/ycsfPpEki0+GYa8CTFtL1G0QlSfS5JUOs7lcvHrr78yY8YMSSoJUccpisJj321h3pYMAn20/PhgDxpHnmdxU6cd1n0Mf79xYvjZ1jdAv4kQ0qDKYhbwy9YMnp67DYvDRf1QXz4e05EWMfLddaH+3pvL7TPWEeCjZf3/9cdHd2GJkFJ7Kdf+fC3Z5mxuaX4LzyY/W0WR1nxl9jIG/jCQEnsJb/Z+k0GJg6o7pCrz46ajTPh+KwDTR3dg8OVSrPg8zNuczvjvtlA/1Je/n+xbpbV3lqYuZfxf41FQeLvP2wxoMKDKtl1Tzd4zm1fXvkqYTxgLrluAr863ukNiZ0Yxd870jAwXG+TDl3d1oXHk5dPb4YxydkPKAkhZ5Bntln+dooU1gaiWENkSIppDcH3wj/K0oDlT1zOn3dOtrjgNCg9D4SHI3QuZWyBv36mv4RcBja+CpgP5y9mGsbM9NYhev74NN3euX8VvuHp9uy6VZ461WBrbowGThp0lseS0wz8feo5XHSZA5Sm4fuXz0pJe1GmXNKl0OZKkkrhcvbdkH2//sReNWsWXd3ahR+PzbEl08G9Y8ATk7fU8jm0Pg9+A+C5VF6woZ1dGCfd9vYG0AgtGnYY3b0yqkpY1l7P7vtrA7zuzuaN7A164utUFb2/y6sn8uO9H4gPimTt8bo04AbyUpm+ZzodbP6RxcGN+uPoH1Kq608XghV92MnP1YXx0ar6/rxtJ9YKrO6Qaacxna1mxL6/KW3XtLdzLrQtuxeK0MLLZSP6v6/9V2bZrMofbwYh5I0gtTeXBtg/yQLsHqjskAI4WmrltxjoO5poI9tXx2e2d60wrmCpRlusZ8v7IKjiy+sSx0pn4BIFGD2qtZ3LZwVpSvuXR6QTEQr1OnhZRDXpBRDNQqUjNNzN02gpKbU5u65bAS9e0rrr3VoOcnFi6s0ciE4e1OHsiuyQDFk+EHXM9j42h0P8FaD9GusSJOkmSSheZJJXE5Wj+tgwemrUZgFevbcMtyedx1ao0GxY/D9u/9zz2i/D8ILe9RX6QL4FCk51Hvt3Min2egRbu69OQpwY2lzov5yG7xEr315bicissfqw3TaMu7Er7iqMreHDJg6hQ8fmgz+kY1bGKIq09im3FDPphEGWOMt7t+y79EvpVd0hVxulyc/eXG1iWkktEgIGfx/UgNvgy7vZzGjklVrpOWYJbgWVP9KVBuF+VbLfAWsAtv91Celk6ydHJTL9qerXXF7qUfj/8O0/8/QS+Wl8WXLeAMGNYdYcEQIHJzp0z17MlrQgfnZoPR3fgyuZR1R1WzVSWC1lbPa2ZcvZA7m4oyQRTDpxzcAMVBMZ6WoCHNIDQRIhp5+m+5R95yto2p4sbpq9he3oxHRNC+Pberug0dff47OQaS3f1TOT5oedILIGnu+DCpyBnl+dxfFcY9o6nJZkQdYgklS4ySSqJy83m1EJG/u8fbE43d/dM5PlhlfzhdLtg/Wew9D/HurqpPHWTrny+Vhd8rI1cboU3ft/Dx38fBKTO0vk63mqvU0IIcx/ofkHbyjJlMXL+SPKt+dza4lae7vJ0FUVZ+0zbNI1Ptn9Ci9AWfDfsuzo19Hip1cEN09eQkl1Ki5hA5t7fDT/DhRWJLrQWklKYwoGiA+wr3EemKRONSoNOrUOn0RFhjKBlWEtahbeiQWCDGt3665PlB3llwW461A/mxwd7VMk2HS4Hdy++m005m4gPiGf20NkEGYKqZNu1haIojPptFDvzd9a4brVmu5MHv9nEspRcNGoVr13Xhhs7xVd3WLWH2+3p3mbOB7fDk2ByOz2tlXyCwScQDIFQiRp1k3/ewRdrjhDiq+O3R3rViuR3gbWArTlbybPmUWwrpsRegsPlIMI3gmjfaKL9omkU3OiM//uz1qby3E+exNLdPRP5v4okllxOTwmHpa94usSptdBtHPR5GvRVkxAXorpJUukik6SSuJykF1m45v1V5JXZ6Nc8kv/d1qlyLVuOboTfHvOM7gaerm5D/wtxHS5OwKJCft2awVPH6izFhxr535hOUmepglxuhd7HCnT/96a2XNeh3nlvy+q0cvui29mVv4umIU35esjXGLU1/yD+Yim0FjLwh4FYnBbev/J9+sT3qe6QqtTRQjMjPlhFXpmdrg1D+fT2zvhXMrFkcVpYkrqEX/b/wj+Z/6D8uy7KGfjp/OgS1Z1OYf2I1rfF4dQQHeRDvRAjEf4G1NXcYnHQu8vZk1XKyyNac2vXhAvensvtYuKqifx68Ff8df58M+QbGgY3rIJIa5+1mWu5e/HdaNVafhnxC/EBNSdx43C5efqHbfy4KR2Apwc15/4+DetUQrm2WLA9kwe/2QTA52M7c0WzU1sy1QRuxc3qjNUsP7qc9Vnr2V+0/5zPUavUJIUn0TOuJ73q9aJFaPnE0Tdrj/B/P+0A4J5eiTw3pAKJJYDio7DoGdj9q+dxUH1PIe9mdacuoLh8SVLpIpOkkrhclNmc3DB9NXuySmkeHcDcB7pX/ATIXOBpmbThc0ABQxD0nwQdx1bqqpm4eHZnlnDvVyfqLL1+QxJXt5U6S+fy154cxs5cT5BRx9rn+p13gW5FUXh25bP8dvA3gg3BzB46m3oB55+gqiv+u/G/fL7jc9qEt+GbId/UuZPLLWlF3PrpWspsTtrXD2bm2C4VGlq9wFrAR1s/4pcDv2BymLzzEwITaBTUiMYhjYkPiEdRFBxuBw63g125B9mYuZ1M6wHc2L3PUZy+OEqTsBf0QLFHoNOoaB8fwg0d6zEkKabSia4LtTuzhMFTV6DXqFn3f/0uuOWky+3i+VXPM//gfDQqDdOunEbver2rKNra6b4/7mN1xmqGJA7h9d6vV3c45SiKwmuLTrSgvbOHpxtSdSc6LydH8k0Mm7aSUpuTB/o24ulBzas7pFMU24qZt38e36V8R1ppWrlljYMbUy+gHkH6IAINgejUOnLMOWSZssg0ZZJell5u/WYhzbit1W0MbjAYncbz/fv1P0d4ft55JJbAU2B9wZNQnOp53HyYp1ZoUNyFvWkhqpEklS4ySSqJy4HLrXDvlxtYsieHcH8DPz/Ug7iKNIN2u2HzV/DnC2Ap8MxLGgkD/nPa/vuiehWZ7Tw8+6Q6S70b8uTAZmhraA0Ft1up9hONu7/YwJ+7s7mzRyKThp9/DYWZO2by9sa30ag0/O+q/9ElRgrVA+Rb8hn0wyCsLisf9f+IHnFV0xWqJtmaVsRtM9ZRbHHQKjaQr+5KJtTv9IkUu8vON7u/4X/b/keZowyAev71uLrR1QxvNPyURGSZzcm8zenMWpvKrsxjI2viQu2TgS5wK/rgraAp9cxWVDhLW2HL74Pb6mm9YtRpGNwmmkeubFJldY3O5T/zd/HZykMMbBXFx2M6XdC2nG4nz618joWHFqJVaXmt92sMbDCwiiKtvXbn7+am+TcB8P2w72kR1qKaIzrVpysO8vJvuwG4tn0cb9yQVKfr+dQUDpebG6avZuvRYjodq6NUk44BTA4Tn27/lG92f4PlWPHxAH0AQxKH0DWmKx2jOhLic/ZC71mmLFakr2DF0RWsyViD1WUFIMIYwegWoxndYjQ+Wh+++ucIE48llu7t3ZBnBzeveGLJboK/X4c1H3i6Ier94YrnoMt9oLm0iXohqoIklS4ySSqJy8HL83fx6cpDGLRqvruvG+3ig8/9pPRNnlHd0jd6Hke29DQDbtDzosYqLozLrfDm7yl89PcBAHo29tRZCjnDSe7FpigKh/JMrDtUwLpDBRzIM1FktlNoslNidRLgo6V+qC/1Q31JDPfjqpZRtIsPviQtWjKKLPR8fSluBf6c0IfGkf7ntZ3fD//OU8ufwq24ebbLs9zS4pYqjrR2e33d63y9+2vaR7bni0Ff1LnWSuBpnXPrp2vJN9lpEunPe7e0p3l0+WOK1RmreWnNS96r7C1CW/BYx8foGtP1lM9kT1YJX/9zhHmbMyizeYr3atQqOtQPpm+zSPo0jaBJlD9aNazNWsvs3bNZdnSZ9/nxPu0ozRxAWpZniGyDVs3jA5pyV8+GF7WYv9XhouuUJRSZHcy4o9MFFWt2uBw8u/JZfj/8O1qVlrf6vFWnCr5fqKeWP8XCQwvpEduDj676qLrDOa2fNh/liTnbcLkVrmweyQe3dMCol9bNF9Obv+/hg78OEOijZdH43jWmjpJbcfPbwd94Z+M75FpyAU+LpFta3MLQxKHnPUJqsa2YOXvnMGv3LO924/zjeLLTk1xZ/0q+XpvqTSzd17shz1QmsQSQvRPmPwZpaz2Po9rA8Hc9I+0JUYtIUukik6SSqOtOLlr4/i3tzz30vLkAlrwEG2cCCugDjl2duQc0l88oO7Xd/G0ZPDnnRJ2lj2/tRMvYS/MdpygKW9KK+GlzOot2ZJFTaqvU8xuE+TKifRzXd6hHfOj5HWhWxH8XpzBt6X6SE0P57r5u57WN2XtmM2XtFBQUrm9yPZO7Ta6TSZMLkWPOYfAPg7G77Xzc/2O6x11YMfSaan9OGaM//YfsEhtatYoH+jZi3BWN0WjcfLD5A2bsmIGCQqQxkkc6PMLwRsPLFdu2OV0s2pHF1/8cYf3hQu/8huF+jO6awHXt486aHN5XuI+ZO2ey4OACnIonEdUl4kpKMvqxfr/nRL5tvSDeuKEtzaIvbITDM/lx01EmfL+VuGAjy5+64rwTWEdKjvDU8qfYlb8LrVrLf/v8lyvqX1HF0dZuaSVpXD3vapyKk/9d9T+6xZ7fd9jFtnRPNg98vQmb003nBiF8envnCnURFZW39mA+Iz/5B0WBD0d3YEibmOoOCYDDxYd5ftXzbM311OOs51+PJzo/wZXxV1bZ76XD5eC3Q7/x/ub3yTZnA9A9tjvPdHmGlbtUTPx5J+AZKfeZQZVMLB1vtf/HJLAWASpofytcORECZJRDUTtIUukik6SSqMuW781l7Mz1uNwKj1/VlIf7NTnzym4XbPoSlrzoGYEEIOlmuOolCIi+4FhK7aUcKDrAweKDZJuzsTlt2FyeyV/vT6QxkgjfCGL8YmgW2gyDxnDBr3m5251Zwn1fbSS1wIyPTs0bN7S9qHWWckttfL8hjR82HuVg3ok6MXqtmnbxwSQnhtI6LogwPz3BvnqCjDqKzHZSC8ykFpjZklbE4p3ZWBwuALRqFXf2TOSRfk2qvC5MscVBr9eXUmJ18sEtHRiaVLmDb0VReG/ze3yy/RMAbmp6E88lP4dGaoyd1vHWSi3DWvLt0G/rbOItu8TKxHk7WLzLc1KTEGXDP/5bUk2ebkA3Nr2RJzo94b0qb3O6WH0gn993ZLF4VzYFJk+tJI1axYCWUYzpmkC3RmGV+rzSStP4cMuH/HbwNxQUtCotScED2LS1E6UmX/RaNe+Pas+AVhf+vf5vN0xfzYYjhTwxoCkPXXmW35uz+PXAr7z8z8uYnWaCDEG81us1esZJC9nTmbJ2CrP2zKJZSDO+G/Zdjf3+WX+4gDtnrqfU6qR5dABf3tWFyACf6g6rTik2Oxg8dTkZxVZu6lSPN25oW90hoSgKP+3/idfWvYbFacGoNXJv0r3c1vI29JqL03ra7DDz6fZPmblzJg63A4PGwOOdHsde0JXJv+wC4P4+jXh6ULPK/w6Z8mDxRNg6y/NY7w+9HoeuD4JO9mdRs0lS6SKTpJKoq3ZllHDTx2soszm5tn0c/72p7Zl/QPcv8fxQ5niu5Hi6ur0FDc6//kmxrZjVGatZmb6StZlrvVeOKkKn1tE6vDUdIjvQNbYrnaM619iD5ZquyGznkW+3sHyvp1n4dR3ieHZwCyICqiZppygKm1KL+HLNYRZsz8Th8vwU+ejUDGwVzYj2cXRrGFbhAtgmm5M/dmXz/YY0Vh/IByAywMCzQ5ozol1clSUjpv65j3f+3EuTSH8Wje9dqRYVxbZiXl/3Or8e9IwQM67dOO5Luq/OJkqqQoG1gME/DMbsNPN2n7cZ0GBAdYd0US3akcn/LfwZW9gMVBoListAlH0MfeKuwuFyU2C2U1BmZ0d6MaXHurcBRAf6MKpLfUZ2iScq8MJOUlIKUpi6aSor0lcAYNAYCHb0Y/++zmjw5fXrk7ihY9UVk9+TVcKgd1egVatY/cyVRFYy/oNFB/lgywcsPrIYgE5RnZjSawrRflWf/KorCq2FDP1xKKWOUl7q/hLXNrm2ukM6o92ZJYz5bB15ZTbqh/ry9V3J1A+7eC1Rjzte7N7kMGF2mr3D0/vp6s5Q8Yqi8NDszfy2LZMGYb789kgv/C5xgf5/K7YV8+KaF/njyB8AJEcn83LPly/Z/3NaSRovr32Z1RmrAegR14Mkw3288VsGAA/0bcRTA88jsQSQ+g8sehYyPKPrEVwfrvoPtLwG5DhA1FCSVLrIJKkk6qLMYgvXfrCarBIr3RqG8cWdXdBrT1OoMWu7pznvgaWexz5B0OeZ8+7qZnaYWXxkMfP2z2Nzzmbcirvc8khjJI2CGxHrH4tRa8SgMaDX6Cm1l5JjziHXksuRkiMUWAtOed6QhkMY1nAYzUKbVTquy53LrfDWYk+dJUWBAB8tTw5sxujkhPPunmJ1uPhlawZfrjnMjvQS7/x28cHcklyfIW0ufNSppXuyeenXXRzONwNwRbMI3h3Z/oK7TpzcSum9Ue0ZXsHWWw6Xg29TvuWjrR9RYi9BrVIzqeskrm96/QXFc7n4cMuHTN86nQaBDfjpmp/QqutusdOFhxbyfyv/D4fbgc6ZQOHhm1EcoaddNyrQwICW0QxqHU1yYmiVF9XdkLWBdze96+16osUXU2437IXdmTi4M3f1TKyS15n08w6+XHOEwa2jmX5rxwo/L6Ughf9t+x9/HPkDBQW1Ss0DbR/gnjb3yMWECvhi5xe8teEtIowRzL92/nnXprkUjuSbuPWztaQVWIgIMPDlnV1oEVO1x95mh5lNOZvYmruVrTlb2Z633VsU/2SB+kBi/WNpGNSQbrHd6BbTjSi/2tmVac6GNJ6cuw2tWsUPD3SnbUXqZl5Eh4sP88CfD3C07ChalZaHOzzMHa3uKNfd91JQFIVZe2bxzsZ3sLlsBBuCuTLsUb5Y4qkzdUGJJbcbts/xDGRT6klUUb87DHoVYttX3ZsQoopIUukik6SSqGtKrA5u+mgNe7JKaRLpz9wHup96Ep69C5ZNgd2/eB6rddDlXuj9BPie/sTnbFIKUvgu5TsWHFpQbnjsxsGN6RXXix5xPWgR1oJA/bn/xxRFIbU0lU3Zm9iYvZG/0v6ixH4iadEhsgN3t7mbnnE9pWVIJW1OLWTizzu8SaDm0QGM6ZbA1W1jCfA5d6JGURS2pxczf1sm329Io8jsADzd265uG8tt3RJIqhdcpTHbnC4+XXGIaUv2YXO6aRDmy8djOl1QTZjKtlJKK01j+dHlzNo9i9RSzxDDjYMb82yXZ2WUt0oos5cx+MfBFNmKeKHbC3UyGacoCp/v/Jx3Nr4DwFUJV/Fqz1cpMsGq/XlsTy8mwEdLqJ+eUD89CWF+JMUFXfRREBVF4a+0v5i2aRoHij1F/BW3HkdhV+5odSvPDrywejxmu5PkV5ZQanPy9V3J9GwSftZY9hTsYdnRZfyd9jc783d6l/Wr34/7ku6rkaOZ1VR2l51r5l3D0bKjPND2AR5s92B1h3RWOSVWbpuxjj1ZpQT6aPn09s50Saz8cce/7czfydy9c1lwcAFmp/m06xg0BjQqzRmXNw5uzICEAVzb5Npa00LucJ6JIdNWYLa7eHJgM8Zd0bha49mcs5mHlz5Msa2YOP843u7zNq3CW1VrTPsL9/PMimdIKUxBhYruYaNZtLIloGZM1wReuLrV+Q9gYDfBqmmwaio4LYAKkm6CPk9DWKOqfBtCXBBJKl1kklQSdYnV4WLs5+tZczCfiAADPz3YnXohJ121zNwGK/8LO+cBCqCCVtdCv4kQ2rDSr7cpexOfbv/U270CID4gnuuaXMfQxKHE+F94kUi7y86Koyv49eCv/H30b5xuT1eR5qHNuafNPfRP6H/Jr37VZi63wqy1R3jj9xRKrZ7P0levYVhSDD0ah5MQ5keDMF+CjDryTXYyiiwcLbTwz8F8/tiVTWax1buteiFGbu2awE2d4s84hHpV2ZFezH1fbSS9yIKvXsObN7StdB0kOHsrJafbSZYpi7TSNNJK0zhUfIhVGas4VHzIu06YTxgPtX+IaxtfK60ozsOXO7/kzQ1vEuUbxfxr5+OjrTt1KBRF4fX1r/PN7m8AuLXFrTzR6YkatZ+43C7+TP2TT7d9yp7CPQAoiopG/u24u/2N9Kvf77xauny3PpWnf9hOgzBflj7e15sks7lspJems79oP3sK9rCnYA+78neRb833PletUjOwwUDuaXMPTULOrw7T5W7x4cU8/vfjGLVGfh3xa41vcVNsdnDXF+vZcKQQvUbNmzcmcU27uEpv53iy9KOtH7G7YLd3foxfDJ2iOtE2oi1tI9sS5x+HUWv0to4ss5eRYcogsyyTbXnbWJOxhh15O1DwnEqpVWp6x/XmxmY30jOuZ409xnC43Nzw0Rq2phWRnBjKrHu6XtTRHc/ljyN/8MzyZ7C77bQJb8N7V75HmDGs2uI5md1l57V1rzFn7xwAGvt3ZcumQSguH4a2ieG/N7fFoL2A7+ridE9N0m3feR6rNNDuFuj9JIQkVME7EOLCSFLpIpOkkqgrHC43D3y9iT93Z+On1/Ddfd1oHRfkaaK7/09Y8x4cWn7iCS2v8XR1i2pZ6dfamL2RaZumsSnH059crVLTv35/RjYfSceojhftACzHnMOXO7/k+73fY3FaAEgKT+KpLk/RNqL6i1LWJoUmOz9sOsrsdakcyDWdslyrVuF0n/qz4qvX0KdpBNd3qMcVzSMv6QFsgcnOw7M3sWq/54T0yYHNeLBvo0q1WJv65z7eWbKDhJgSHhxgZH/RPm8SKaMswztq1sk0Kg3tI9vTN74vNzS9oU7V4rjUbC4bw34aRpYpi8c7Ps4dre+o7pCqhKIovLL2Fb5L+Q4VKp7o9AS3tbqtusM6I0VRWJG+gpeWf0C2Y5d3vlFrpE14G9pGtKVdZDuaBDchxCfktMk/l9tFgbWAbHM2j85dRmpxFj2aaYkNs5NtziatNI0sU5b3RP1kRq2RrjFd6Rvfl971ehNuPHPLJnFuiqJw+6Lb2ZyzmasbXc0rPV+p7pDOyWJ38dh3W1i0MwuAJwY0ZdwVjSv8fb49dztvbXjLexyiU+von9CfG5veSKeoTpVuyVxkLWJF+gp+3PcjG7I3eOc3Dm7MfUn3cVXCVTUqQQzw1u8pvP/XfgJ9tCwa35vYYGO1xfLVrq94c/2bKCj0je/LG73fwKitvnjO5Md9P/LyPy/jcDsIN9Qjfc8o7NYwejQO4+MxnS58UJCMzfDXq7DPUxsOtc4z6E33hyGy+YW/ASHOkySVLjJJKom6wO1WmPD9FuZtycCgVTNzbBe6Rblh62zPiG75+zwrqjTQagT0nADRrSv9OgeLD/Luxnf5K+0vALRqLdc0uoaxrceSEHjprsQUWYv4Zs83fLnzS28z9iGJQ3is42O1psl6TaEoCusPFzJvSzr7sks5km8mp9QGeOpNRgYYiAs20iw6kAEto+jWqOJFty8Gp8vNawv38OlKT+uhO7o3YNKwlmftPuRwO9iSs4XFh5bx7Y7FKLpsVKrT/2Tq1DrqBdQjPiCe+IB42kW2o3ts9wp13RQV89O+n5i0ehIBugB+ufaXWp9Q+HdC6cXuL9bogsknc7sV7p79Oyszf8cQvBl0+addz6g1EmwIRkHB7rJjd9mxOC24FNc5X8NP50dCYAItQlvQLLQZLUJb0CKshYzwWcW25W5j9ILRAHw1+CvaRbar3oAqwO1WmLJwN5+s8Hyf39ixHv8Z0fqsvzFF1iJeW/8avx38DfB0abut5W3c1vI2gn2CqySug8UHmbt3Lj/t+8lbjykxKJH7ku5jcOLgGtFyae3BfEZ+8g+KwnmNYFpV3IqbN9e/yde7vwbg5mY382yXZ2tcAu5kO/J28Niyx8gyZeGrDaDsyGhMJQ1oERPIp7d3Iq4qknNp6+GvV+DgXyfmNR3sSS4ldJeC3uKSk6TSRSZJJVHbKYrC8/N28M3aVPzVdmZfUUqb/N9h7yI41lUMQyB0uA2S74fg+Eq/Rp4ljw+3fMiP+37EpbjQqDRc1+Q67ku6r1qb2eeac3lv83vM2z8PBQWj1sijHR5lZLORNfqApqaz2F0Umu2E+xvQa9W4FTduxV2jCivPWHmIl+Z7WlgMTYrhvzeVb7quKArb8rYxd+9clhxZQqmjtNzzQ31CaRHagqahTWkQ2MCbRIr0jawRJwx1mcvtYtRvo9hdsJvhDYfzaq9Xqzuk81abE0rHWR0ubvnkHzalFhIXUcy9AzTsK97B1tytHC09etrWe8epVWrU7kBs1gDiAqK5snETIn0jifSN9P5PhfqESv27S2TiqonM2z+PZiHN+HbYtzXqO/tsvlpzmMm/7MStQOu4QD68peNpR4ZbfnQ5k1dPJs+ShwoVwxsN5+H2D1/QxSS3W+FAbhmbUgvZklZMRpGFnFIbuaVWSu0l+IStQQlcjqL2tI4O0zVgdNP7uTVpIEZ99Xy+xWYHg6cuJ6PYyo0d6/HmjdXTUtvqtPLcyue8I7xN6DiBO1rdUSv+3/MseTy69FG25W1Do9Kgyr+ewuwOhPsb+HhMRzomhFTNCx3dAKvehd3z4XirzchW0GmspwWTj5x7iktDkkoXmSSVRG3mdiu8NW81Rzf8xmDNeq7Sb0frspxYIa4TtL8VWl9/Xj9cZoeZmTtnMnPnTG93syvir2B8h/E0DK58DaaLZVf+Ll5b9xqbczYDkBSRxIvdXqRxSPUWrKxNFEUh25zN9rztbM/bzr7CfeRb8smz5FFgLfAmEw0aAz5aH+r51yMxKJGGwQ1pGtKUDpEdLvmoQ79szeDx77fgcCl0bhDC1JHtCfZT+PXAr3y/93v2Fu71rqvDH3NxE3TWlnx6880kJ1TNiFfi/GzP3c7oBaNRUJgxcAadoztXd0iV9u+E0ks9XmJE4xHVHdZ5yS+zMeLDVaQVWOjeKIwv7+yCVqNGURTKHGUUWgspshWhUWnQa/ToNXqMWiO7j7q5fcZG9Fo1fz/Zl5igmtfd5XJSYC1g+E/DKbGX8EyXZxjdYnR1h1Rhy/fm8ui3myk0Owjw0fLWjW0Z2MqTLDI7zLy54U3m7p0LQMOghrzS8xVah1e+xTWAyeZkyZ4cftuWwZoD+ZRYz5w4BUBtRR+yGn3YclQaT11Bt6UBrY2juD+5P72bRlyyruCKovDQ7M38ti2TBmG+/PZIL/wutMvWeSi0FvLI0kfYkrsFnVrHKz1fYXDi4Esex4WwOq1MWjWJhYcXAuBv7U/moSvRa7S8dn0brutQr+peLG8/rJ4G274/VtAb0Pt7eg+0uQka9AS5GCouIkkqXWSSVBK1jikP0jfiPrSCjM2LqGfdV355UH1oeTW0G31e9ZLAU7D4x30/8uGWD70FVZPCk5jQaQIdoyo+VPSl5FbczN07l/9u/C8mhwmtWst9SfdxV5u70KkvbAj6usrhcrA+az1L05ayLG0Z2ebs896WVq2lQ2QHusd254r6V9Aw6NIkHVftz+O+rzZSZjcTGLkO38gVmJzFgKdbxMAGA6G0C9/8rUaj1vD5HZ3p3TTiksQmzu6lNS8xZ+8cGgU1Ys7Vc2rV/2ldSigdtze7lBEfrMJsd3Ff74Y8O+TsI7ApisKID1ezNa2IO3skMmn4+f3eiKr1fcr3/Oef/+Cn8+PXEb8S4Vt7vu8yiiw8NGsTm1KLALgluT43ddUzae2T3gETxrQcwyPtH6l0kX+L3cXSPTn8tj2DpXtysDrc3mVGnYa28UF0qB9Cg3A/IgIMRAYYCPTRYXG4KLU6KLE42Z6ZwYLU2aQrf4DKM/qps6wZgearubVDN8b2TLzwmjznMHfjUZ6YsxWtWsUPD3SnbXzwRX2900krTeOBPx/gSMkRAvQBTL1iaq28MACe77HpW6czfet0AEJpz5E9I0AxMKZrAv83tEXVdvm3FMLWb2HDDMg7ceGLgBjPBeAWw6FeZ0kwiSonSaWLTJJKosZyu6DwMOTugZzdkL0T0jdC0ZFTVi0OaEJQu6s9P0Yx7c67r7aiKPx99G/e2fgOB4sPAp7R3MZ3GM9VCVfViibN2aZsXl77MsvSlgHQKqwVr/R8hUbBMrQrnL1bmEaloWlIU1qHt6ZFWAuifKMIN4YTbgzHoDFgc9mwOW2YnCZSS1I5UHyAQ0WH2Ja3jfSy9HKv0zqsNdc0vobBiYMJMgRdtPdjdpj5aNPXfLHrc9xqT+0LoyqCXpHX0tDYl8JSjbf+0n9GtGZMVxmFpaYothUz/KfhFNoKmdBxAmNbj63ukCrk3wml//T4D9c0vqa6w6oSv23LZNwsT+Hjc9Vp+XNXNnd/uQGjTsPyp64gIkBqJNUELreLMQvHsD1vO4MTB/NG7zeqO6RKcbjcvH6sbp42YAfG2DmgthHpG8mUnlPoEtOlwtuyOlwsS8ll/rYMluzOweI4UQMsIcyXYUkxDGgZTavYQLSaind7zjJl8ebaD/gj7RcUPMkpR3Fb/KxX8fSVV3JDh3pnrfN3vg7nmRg6bQUmu4snBzZj3BWXvjX29tztPLT0IQqsBcT4xTC9//Q6cXy14OACJq6aiN1tJ1TbgNTdo1CcQTSPDuD9W9rTODKgal9QUSB1jWe0uJ3zwFp0YplvGDQZCE2ugga9wL/2JIZFzSVJpYtMkkqiWrjdYCmAsmwozYKyHCjL8gxJWnjYkzgqPAIu22mfnqGNZ4W1EWtpzbARN3Nlp6QLDmlH3g7e3vC2d9STYEMw97e9n5ua3oROU3taEIDnpG/BoQW8svYVSu2l6NV6HunwCLe2uPWyrbVkcpj45cAvzNk7h32FJ1q3hfqEckX8FfSr349O0Z3Oa7QWRVFILU1lVfoqVqSvYE3GGm8BX51aR//6/bmx2fmNyHMmZoeZ71O+5/Odn1NgLQDAXxNF3tFeOIraA+X/ztKSomY6XrTbqDXy8zU/E+NfPcVmK8qtuHl17at1MqF03JQFu/l4+UF89RrmjetB06hTT6bcboUh01awJ6uUB/o24ulBMqpRTbIzfyej5o9CQeGTAZ/QNaZrdYdUKS63i+eWvcmCtG8AcJoSibPfyy2dWjG4TfRZu1la7C5W7c9j/rYM/tydQ5ntRNe2eiFGhiXFMiwphlaxgRf8e3Sk5AjTNr3P4iOLvPOcZU2IUw3i9aE30DEh9IK2fzKHy80NH61ha1oRyYmhzLqn6yUdfRVgWdoynvz7SawuKy1CW/BBvw9qVUu4c9mSs4VH/3qUAmsBgbowLEfHUFAQjVGnYdLwlozsHH9xLq46bbDvD9g1zzNqnLW4/PKIFpDYC+p1gbgOENpQCn2LSpOk0kUmSSVRKYri+fK3l4GtBGxlx+6Xgb3Uc2srPTav9KRlxx7bSj3d10w5J4pon43WB8KbQkRziGxBul8LHljqZlseGLRqPh7Tkb7NIi/oLR0tPcq0TdO8fcoNGgO3triVu9rcRYC+iq/MXGI55hwmr57MyvSVAHSI7MDLPV4mPrDyxcprq+Mj5c3aPYsSewlwolvYtY2vpX1k+ypPtOVb8llwaAG/HPiFPQV7vPMTgxK5semNXN3o6vNuvVRsK+b7lO/5evfX3mRSPf963Jt0L8MaDWP9oWJmrDyEWwE/gxZ/g4YWMYGMTk645Afg4tzcips7Ft3B5pzNJIUnMWPQjBo7KphbcfPSmpf4Yd8PdTahBJ4RFm//fB2r9udTP9SXL+7sQmK4X7l1ft2awcOzNxNg0LLi6SsI9tVXU7TiTF755xW+TfmWWL9Yfrj6B/z1/tUdUoWYHWaeXv40y44uA6BT8DVs3NKDEsuJ7modE0LoUD8Yo16Lr16DWgUpWWXsSC9mf24ZLveJU6LYIB+GJsUwNCmWtvWCLkpSYE/BHj7Z9hl/HFnsbbnktofRIaw3T/S8gaSINhf8uq8v2sP0ZQcI9NGyaHxvYqtihLJK+G7Pd7y67lXcipsecT14u8/b+On8zv3EWiajLINxS8axv2g/erWBWMdYtu9tAECXxFBevbZ11bdaOpnLAan/QMpCOLgMcnaeuo5PEMS0hciW3vMDwhp7WjhJskmcgSSVLjJJKl0GFAWcVk/m31pyLPlTWj7Zczz5c66EkL2sYsmgivINA/+oE1NgDIQkQkgDCEmAoHhvv+rFO7OY8P1WymxOogN9mH5rB9rXP//RKbJN2czcOZPvUr7D4XZ4R1J5qN1DNb61QGUoisKP+37kjfVvYHaaMWqNPNHpCW5semOt6M53vrJN2Xy560vm7J3jLbKeEJjAqOajGNZw2EXtknay3fm7mbN3DvMPzvfGYdAYGJAwgJua3UTbiLYV+juklqTy1a6v+PnAz97t1POvx31t72Now6G1qh6PKC+tNI2R80dSYi/h2sbX8mL3F2vc/6bL7WLS6kn8cuAX1Co1L/d4meGNhld3WBdNgcnONR+sJK3AQpBRx/RbO9C9UTgut8JnKw/y1uK92J1uHuvflEf7N6nucMVpmBwmrv/letLL0hnReAT/6fGf6g7pnLJMWTy89GH2FOxBr9bzUo+XGNpwKIUmO/O2pLNgeybrDxeeczsxQT4Mah3NsKRY2scHX5SuaKeTXpbOp1u/4Kf9P+LiREvzcJ9I2kS0olFwIxoGNSTGL8Zb8F6r0mJz2zA7zJTZyzA5TZgdZkwOE2WOMlxuF2kFFn7dlgWKmpEdWtKvSVOi/aKJ9Y+96Bf/3IqbqZumMmPHDACua3Idz3d9vk7/5pbZy3hq+VOsSF8BQLeQW1mxPgmLw41Oo+Le3g156IomGPWXoOW7KR+OrITDqzwlMLK2n7EXA1ojBNXzjPIcdGwKjofAWPAN95xz+IZCLet9IKqGJJUuMkkq1SKK4kkMmfPBlOtp8WPOA3PBsYRR0bHb00wue9XHo/MDg79n9AaDPxgCT9z33gYcWxZwbF6A5wvdPxr8IkB77qu7JVYH7/yxl89XHQagS4NQPhjd4bzrV6SVpDFj5wx+3v8zDren0GTXmK483ulxmofW3S4M6WXpTFw1kfVZ6wHoFtONl3q8dEFDEddEqSWpzNgxg18O/OL9+7YIbcFdbe6if/3+1db9r8xexoJDC5izd0651ktRvlF0ie5C5+jOtI1si4/GU3xVhYoDxQdYk7GG1Rmr2V+03/ucZiHNuL3V7QxKHFSnD2wvJ6szVvPAnw/gVtw8l/wco5qPqu6QvBxuB/+34v9YeHghGpWG13q9xqDEQdUd1kWXU2rl3i83siWtCK1axRMDm7F0dw7rDntaCF7RLIIPR3e8NCdW4rxszN7I2EVjUVCYesVUrqx/ZXWHdEa78nfx8JKHybHkEOoTytQrptIust0p62UVW1m8K4ujhRbMdidmuwu7003DCH/axAXRJi6IqEBDtSamTXYTb6yYx9zdC1D57UGlvgjHoMc0DGpI+8j2tI1oS7fYblV6TGNz2Zi4aiILD3laso9rN477ku6rcUn/i8HldvHWhrf4evfXAPSrN5iSoyNYutuT1Az3N3B/n4aMTk64tN+BTjvk7IKsbZCb4qm5mrsHStLP/dzjDIGecxHfsH9Nx+YZQz33T76twPmKqNkkqXSRSVKpBnDaoDQTSjI8NYVK0j11hky5nqSR6VgSyZwPx06Sz4tK7UnqnJL8CTgp6eOZp+j9sWv9MGGkTPGh1O2DTeOLTeOLXeOHU23EaNDjo9dg1GkI9tUR7m9AV4lCj+fidivM3XiUN37fQ16Z54Dkju4N+L+hLSr9Oi63i1UZq5i7dy7Ljy731rvpGNWRe5PupVtMt8viIMGtuJm1exbvbnoXm8tGgC6AZ5KfYXjD4bX+/acUpPDZjs/4/fDvuBVP8/sOkR24J+keesT2qDHvT1EUduTt4Pu937Po0CKsLmuFn9sjrgd3tLqD5OjkGvN+RNWZuWMmb298G61Ky/8G/K9SownZXXYOFh/kSMkR9Go9/np/fHW+RBojL6jmR7GtmGdWPMPK9JVo1Vre7P0m/RP6n/f2ahurw8VTc7fxy9YM7zw/vYaJw1py88WqLyKq1H83/JfPd35OqE8oP179I2HGsOoO6RRLU5fyzIpnsDgtNApqxPv93qdeQBUO515N9mWXMm7WWg6U7kRvzKVXKzd2dQZ5ljycbicOlwOH24Feo8dP53fKZNQYWbwrj+wSC2H+Ovq3DKfQlk+2KZtsc7a3C/jJ2ke2Z1CDQQxoMIBwY/h5x55Wksbjfz/O7oLdaFVaXuj+Qp3s7nsu36d8z6trX8WluGgf0Z6rY57j3cUZHC30tJgO9zdwX++G3NipXvV2A3baPOdPRWlQnHbs9igUp0JJpqeOq6UQFPe5t3U6ev9jSaaQE8kmvwhPL4uAaPCP9Fww94/yJKbUVXc+JKqGJJUuMkkqnZnLrWB3unG63eg0agxadeUPIB0WT7KoJP1ftyfdN+VWbpv6APAL83yZHW/OaQz29DE+26T3L9fXuNjs4FC+iUN5ZRzKM5OabyKz2Ep2iZWsEmu54WYrKtRPT2SAgXohvtQP9SUhzJf6Yb4khPoSF2LEoD331YycUiu/78zm+/VpbE/3FOtrGOHH5OGt6FOJodDdiptd+btYmrqUXw78Um64+B5xPbi3zb10iOpQ6fdYFxwqPsTzK59nW942AK6Iv4Lnuz5PpO+F1aeqDltytvDZ9s+89ScAesX14u42d9f4v6/FaWFLzhbWZ61nfdZ6UgpTvAkxgDCfMLrGdqVbTDeSY5IJ8Tn/7p6i5lMUhadXPM3CQwvx1/lzf9v7uaX5LacdKMDitLA6fTVL05ayI28HR0qOeJPl/9YgsAHJMcl0i+lG19iuFa4Dsjt/N48te4z0snQMGgNv9XmLvvF9L+Qt1kqKovDe0v28++dekhPDeOOGJOJDfas7LFFBdpedm+ffzP6i/VwRfwVTr5haY5KBiqLw5a4veXvD2ygodIvpxtt936719RxPZrG7eHj2Jv7cnYNaBS9d05pbKzgK6RuL9vDhsgMEGLTMf6QnCWHlv7sKrYVsydnC5tzNbMrexLbcbSh4TgfVKjW96/VmZLORdIvthlpV8ZP8P4/8ycRVEylzlBFiCOGNPm/UumLvVWlNxhoeX/Y4pY5S4vzjeLfve2w5YOC9pftJL/Ikl/QaNQNaRXFTp3h6NA6vmTUc3W5Prw5zgedC/SlTgSf5dPL980lEqbWe5FJQfPnueMH1T8wz1I4ab3WJJJUusrqeVLLYXaQXWcgospBXZqPQ7KDQZKfQ7JkKTHaKzA5KrU7sLjd257HJ5S5X6PA4vUaNXuuZgjU26mkKaaAror6uiFh1IVHkE+bKI8iRi58tG729qGKBan08fX4D4zy3AdHgFwl+4Z7J96RbnU+F37/Z7uRwnpnD+SYO5Zk4mGvy3i8wnbs5skatIsioI9BHi0GrQaNWodV4fiisDhcWhwuL3UWR2YHzNJ/XyVQqiA0yUj/Uk3CqH+ZLTJAPFoeLEouTYouDTUcKWX+kgOP/zf4GLY/2a8Lt3Rug1579gMDhdnCw6CC7C3azIWsDK9NXkm/N9y4PMgQxvOFwbmh6Q50Y/vVCOd1OZu6cyQdbPsDpduKr9eXBdg9yS4tbanyXKofbwZ9H/uSb3d+wNXcr4OkuNqDBAO5qfRctwlpUc4RCnB+L08L9f9zPphzPsPb1A+ozoeMEYv1jOVp2lKOlR9mWu42V6StPaeUWqA8kMSgRRVEoc5RR5igjz5JXLlFp1BrpV78f1zS+hi7RXU57ouVW3Py8/2deWfsKNpfNcxJxxbt1untwRZhsTvwM2uoOQ5yHlIIURv42EqfbyYSOExjbemx1h4TD7WDK2inM2TsHgBub3sizyc/W+N/f8+F0uXl+3g6+XZ8GwCP9mvBY/yZnTe7N25zO+O+2APDBLR0YmnTuWpfZpmwWH1nMokOLvBfNAOID4rmx6Y0MazjsrC03M8oy+HT7p96/SfvI9rzR+406VybgfBwsPshDSx4irTQNf50/r/V6jW4xvfhh01G+XHOE3Zkl3nVD/fT0bBxO76YR9GoSTlRgxc9bapzjiShL4UlJp4ITpUjKso+NZH3s1pxXse0aQ04kmrxTgqeebHCCJJ0uAkkqXWS1OamkKAr5JjsZRRbSCy2kF1m8CSTPrbVCiZN/U+EmhDKiVIVEqoqIUhUQQwHRqgJiVMdv8wlSmSu0PbNiIFMJJU8TTqkuEqsxCqd/LOqgOAyh9fCPrE9YRAxh/j4E++oq1bXL6nCRW2ojrcBMWqGZtAILqSfdzys7QzG7YyIDDDQI96NhuB8JYX7EBvsQFehDdKAP4QEG/PSaCl3Rc7sViiwOckqtZJfYSC3wtHxKLTBzJN9MaoEZs/30V9FPp218MENaR3NthzhC/bSU2kspthVTYi/x3uZZ8sgyZZFlyiK9LJ0DRQewu8v/vf10fnSP7U7/+v3pl9Cvxo6qVJ1SClJ46Z+X2JbrOQBrFNSIp7s8TdeYrjXmau5xB4sOsujwIn7Y+wM5lhwAtGotwxsO587Wd9IgqEH1BihEFXC5Xfx84GembZpWLjH+b3H+cfSr34+uMV1pGtKUSN/IU/5nS+wlrM9az9rMtaxKX0Vqaap3WZRvFJ2iO9EmvA1twttgcpj488ifLE1bSp7Fc2DcK64XU3pNuWSF7YW4WGbtnsWUdVNQoeL9fu/Tu17vaoulwFrAk38/ybqsdahQ8USnJxjTckyN+82tSoqi8M6f+5i2ZB8AIzvH8/KI1mhPc8y7+kAet89Yh8OlcG/vhjw3pPIXig4WH2ROyhx+3v8zpY5SwNN6qWtMV4Y1HEbLsJb4aH3w0fiQacrky11fsvjwYm+Lzzta3cEjHR6pk0m+81VoLWT8X+O9Fz1ub3k7j3Z4FJ1Gx470YuZsSGPelgyKLeVLdcQG+dAqLojWsUE0iw4gIcyX+FBf/Otikt7l8CSbSjJO6oZ30m1xmqfW7bn4hpVPMnlvG3iSUVLjqdIkqXSR1eSkkt3pJqvYWj5ZVGgho/hEEsnmPHeTRH+DltggA/H+EOdjIUZnJlJjIkpVQKi7gGBXAb62XPSWHHTmbLTmHFQVrF3k1AVgNUZRpo+kUBtBnjqcTMI46gzmkCOYveZADpSqqUCYXgEGLUG+Onx0Gnx0agxaz3CxDpeC0+1pSVVicVJotlfo/QcZdSQeSxw1CPcj8djUINzvkn2hK4pCXpmd1IITiaZDeQUcLUtHoytFoy8FTQk6fRl+vmbKnAXkWnIpthVT5iir8Ov46/xpFtqM1mGt6VmvJx0jO56264go73jLhHc2vkOhzVOEMSk8iTvb3MkV8VdUqtl4VVEUhXxrPvsK97EpZxN/HP6DA8UHvMvDfMK4udnN3NjsxguqmyBETWVymPhs+2fM3jMbnVpHfEA89QLqkRiUSJ96fWge2rxSJ6GKorAtbxu/7P+FhYcXUmovPeO6AboA7mh9B3e3ubta/v+FqGqKovDSPy8xd+9c/HR+fD34axqHND7n8xwuB5tzNpNtzqbQWkihrRCn20nDoIY0Dm5Mo+BG+Ooq3h1yZ/5Oxv81nixTFkatkdd6vVajC4hXta//OcKkn3fgVqB/i0jeG9WhXKHnvdmlXD99NaVWJ0PbxPDeqPYXNHqd2WFm4aGFzNs/jy25W865fteYrtzd5m6SY5LP+zXrMrvLzjsb3/EW8G4T3oY3er/hrQHmcLnZnFrE8r25rNiXy7b0Ys50hh7mpyc+1Jf4EB+Cg4rRGXPR66yotSbcajMqlQu9Ro9BY0Cv0RNhjCDWP5Zov2iifaOr5fheURRyLbkcLj5MRlkGmaZsMstyyDHnUmovw+wwY3aavAPFeJ4EOrUBvdoHvcaIUaUnRKUlXFER4XYS7bDQwFpCQlkuISVHUduKzhGFytOr5bRJpwQIiPGOnC1OkKTSRVZdSSWrw0VWsad2T/axKavYRnaJlYxiTwIpp9R27ItIwYADX6z4qWyeW6z4qqz4qaxE+7iJ8XURbXASpTMTrjERrJTi5y7Bx1GMxlroaa5Y2RHQ/CKOFV+LhqC4E13TjndTC4gBn3N/Zm63p0WV932WWMku9rToyTppXrHFcYYvXgVwg+r4rRsUFaAGRY1eo6FeiB/1Qn2JDzEe+4L2JT7USHyILyF+1ZvNNjvM7C3c650OFR/iSMmRcjWOKsJf50+QIYhAfSCBhkBCfUK9PyzRftE0CW5CXECcnABdgGJbMdO3TmdOyhxvq6/EoEQGJw6mQ2QH2oS3OeXg2eF2kG3KJtOUSUZZhmcyeW4LrAVYnBZsLhs2pw2NWoNBY/BeHTRoDfhofPDR+qBWqXG4HThdTqwuK6klqd4E13FatZbusd0ZnDiYAQkD0GvkSo2o+xRFqfIWDDaXjQ1ZG9iet50deTvYnrcdtUpN3/i+9K/fny7RXSQhL+och8vBPX/cw8bsjdTzr8fsobMJ9gk+ZT1FUdiau5X5B+fz++HfKTrLSZ4KFS3DWjK80XCGJA45Y+07RVGYt38eL//zMna3nYTABKZeMfWy7I7/+84sHpm9GZvTTYf6wTx2VVMcLjcWu5tXF+wmvchCp4QQvr47GR9d1Z0cp5akMv/gfBYfXkyeNQ+r04rNZUOr1jKowSBub3X7Zd/Nt6KWpC5h4qqJlNpL8df581jHx7ih6Q2nHIOXWh3szixlR3oxO9KLOZBbxpGCEkpVKWj996DxSUftk1npEQI1Kg0NgxrRKrwlzUObkxSeRIuwFmjV53+x3OpwkW+yU2iyk2+yk1Naxr7C/Rws2UOGZS8FziNYlEzcqooPsFJZisuIyh6Mr9NIhFtHvALNXHZaOU20dOYQ4chGr5y9FwpqnaeOU2DcsfPYqJMKip906xNUrtZuXVerkkoffPABb775JllZWbRt25b33nuPLl26nHH9OXPmMHHiRA4fPkyTJk14/fXXGTJkiHe5oihMnjyZTz75hKKiInr06MH06dNp0qSJd52CggIefvhhfv31V9RqNddffz1Tp07F379ifTGrKqmkuByUlZZQUlxMUWkxptJiysrKsJhKsJrLsJpLsZhKsVvKcFhN4LTgiw0jNowqG0bsnscqzzxP0shz66eyouE8q/X/m0Z/omp/QLQnMXS6W7/IczYtVBQFm8uGyWHC7DBT5ijz3HeaKbOXYXL+a77D7Dm5Pmmyu+xYnVbsLjs2lw2r04bVZcXucuBWXJ6pgu9do9KgUWkwaA0E6ALw0/vhr/PHT3fiNsgQRKRvpHeK8o0izBhWJc17FUUhw5RBSkEKKYUp7C3YS0phCmmlaWd8TqA+kCi/KCKMEZ7Jt/xtiE8IgfpAAvQBF/RDISonz5LHrN2z+HbPt95m4+DZxxICE3Arbu8+XGQrKlezpSqpUFE/sD7NQprRN74vfeP71qkCpkIIIS6tQmsho34bRXpZOk1DmvJar9doEnLiuHpH3g5eWP0CKYUp3nlhPmE0CWlCiE8IIYYQVCoV+4v2s79wf7kuqlq1lj71+nBVwlX0jOtJkCEIRVFYkb6CD7d8yM78nQD0rdeXV3u9eln/nq0/XMBdM9dTYnWesqxhuB8/PND9klwUdStu3IpbjjHPQ0ZZBk8tf8pb27J9ZHsmdZ102haAVqeVv9L+4o8jf7AqfRVmZ/kSIhr0GJQYXA5/bDYf7HYjiqJBpXKC2oVKZUelLUGtK0KlK0KlPnW/0eBDuK4Z8cY2JPq2J8bYEINOiwqwOd3eqcTi8NbVLTTbKTQ5KDDZsTjsqH3S0fodQON3AI3x8GlfR1FUKPYw3I4QFGcgbmcgancgerUfBrURH40RvdqASu05jlWrFNwqB6hsKCobLiw4KMWulOCkBJe6CLemADRn75mhuLW47eHo7EEE2X2IcqhIcDho4TDRVMmjgTqfGFUeWipYbkTrcyLh5BvmqfNkDAafYM+tMeT092vpBadak1T67rvvuO222/joo49ITk7m3XffZc6cOaSkpBAZeeqISqtXr6Z3795MmTKFYcOGMWvWLF5//XU2bdpE69atAXj99deZMmUKX3zxBYmJiUycOJHt27eza9cufHw8Rc8GDx5MZmYmH3/8MQ6Hg7Fjx9K5c2dmzZpVobiPf8Arts4jwKDGZbHgNptxW0woFhMqqwm1tQyNzYTWVorGXobWUYbWWYrOZcLgMmNUTPhgw60CJyqcKhUO8NyqPLee+Zy4ValwHLvvUKlwHlvfWe6+Z33H8e2gwq3RoVLrUGs8k0qjR63Ro9YYUGl0qHW+nknvh1rvh0rvf+y+v+e+1ge1WnPsn1yNWuUZ0c3ldmFxWrA6rVicFixOC2an2Xvf5DCdMpkdZpzKqV82tY0KFaE+oUT5RXlb/cT4xXhaAR27H24MR61S43Q7sblsFNuLOVx8mMMlhzlUfIh9hfvYV7ivXALiZBHGCJqGNqVZSDMaBzemfmB9EgISTnuFUNQcZfYyFhxawIbsDWzK3nTG1mV6tZ4Y/xhi/GKI848jxi+GWP9YInwjvC2RDBoDbsWN1WnF6rJic9qwuCzYnJ7klFNxolfr0al16DQ64vzjSAxKxKg1XuJ3LYQQoi7bV7iPu36/i0JbIXq1nkc6PMKNTW9k+tbpfLnrS9yK21vUfnjD4XSJ6XLGpEOuOZfFRxbz8/6f2V2w2ztfo9LQIaoDZofZm0wyao3c0+Ye7mpzl7SqxtPV7YVfdlJgsqPXekZZjg4y8tTAZjLCYi3hcruYvWc2721+D7PTjFalpX9Cf5qGNKVhcEMCdAEsOryIRYcWlTtHiDBG0LtebzpHd6ZFaAvqB9Yv9z9WanWQWWwlo8hCZrGVzCILGcVWMo/1Zsk0ZeLUHEXtk4HGJx2N7xFUGku52NxOf1xlTXCamuEyNUZx/bvBhRu1PheN3340fvvR+h5EpSnfCkiLH6HahsT4NKVBYFMaBTWkcWgC0QH+BBp1GPUafHWa09YGqyyzw8zR0qMcLErjYFEaqSVHSS9LI8N0lHxbBq6znHMqLiNueziKLQw/ux8RTg3RdkhwOImllEhVIZEUEakqIkJVVOG6wMc5AItahUWlxqI1YNH7YtH7YtUZcGoNOHQGHBofHBodDq0Oh0aPQ6PFodYeu1XjUmlwq7W41RoUtQa3Sl3+vkqNolLhRkFRFG/C9/iIjhqVxnverlGVP5c/+Zxep9aVnzSeW4fJwS0dbqn5SaXk5GQ6d+7M+++/D4Db7SY+Pp6HH36YZ5555pT1b775ZkwmE/Pnz/fO69q1K+3ateOjjz5CURRiY2N5/PHHeeKJJwAoLi4mKiqKmTNnMnLkSHbv3k3Lli1Zv349nTp1AmDRokUMGTKEo0ePEhsbe864jyeVWkxvgcYo/S/Pl6/WFz+dX7nJV+frbSHkq/PFT+uHj9YHvUaPj8anXD9hg8ZQbtJpdGhVWjRqT+sjrVqLWqX2tEZSa7z/aC7FhcvtwqW4PI/dLiwuCya7ydtCyntrL6PQVkiOOcc75ZpzK5QYU6vUKIri/cc+E61aS6OgRjQLbUbTkKbe21Cf0Kr6qEU1yizL5FDJIfRqvXdfDjGEEGYMkwNkIYQQtUaeJY9JqyaxIn0F4En4WJyek9IhiUN4psszZ+zKdiZ7C/ey8NBClqUtY3/Rfu98o9bIyOYjub3l7YQZw6rsPQhRU2SZsnhl7SssS1t2xnVi/GIY1nAYV9a/kpZhLS/ouFFRFIrMDjKKLWQWWckvs7K/eB/7S7eSZtlOnnMnLk7upqYiUN2AKG0SGq2TUvdhcu0HsbvLJ6IC9AF0ie5CckwyydHJJAYl1ogC+k63k8yyTA6XHOZIyRHv7ZGSI2SaMs/6XJ0SgtoVjtMWiMXqh9sRiNatxV9dSoCqDD+VCa3GjEpjBo0Vl9qOXePArnZh1rixqhWcNeAzuFAui4vdD+yu2Uklu92Or68vc+fOZcSIEd75t99+O0VFRfz888+nPKd+/fpMmDCB8ePHe+dNnjyZefPmsXXrVg4ePEijRo3YvHkz7dq1867Tp08f2rVrx9SpU5kxYwaPP/44hYUn6o44nU58fHyYM2cO11577Smva7PZsNlOZGFLSkqIj4+n/7QmuHy12FXqk1oQHSvdc560ai06tSc5olWfmHRqXbnHWrUWrUrrSaSotehUpy4//hyNSoPCSdlLRcGN+5SM5vEky/H5p3vOycvVKjVGrbHc5KP18d731/vjp/XDT+/nuf1X8qi2nlC7FTcF1gJyzblkm7PJMmWRacr0jqqWZcoi25ztHQ3jOK1aS/2A+jQIbEBiUCKNght5rkoENZQ6HEIIIYSo8RRFYe6+uby5/k0sTguRvpFM7DqRvvF9L3jbR0uP8vfRv7G77FzT+Bq5uCbqPEVR2JC9ga25WzlUfIgDRQfIMefQNaYr1zS+hs7RnS/Z+ZLD5WBL7hZWpa9iVcYq9hTsOe16Phof2ke2Jzkmma4xXWke2hxNLStybXFaSC1J9SaZTk44na0e3HlRVODWoXZrUStatG4NGgW0igoNCjoFdIqCHgW94kavuDEobnwUNzpc6HGddOtGp7jQ40SDghpQo6AC1AqoAdWxxwqgoMINuFQn7rtVeG5RoRxb5unlxLGeTp4eUA6VCpNVYcbjh86ZVKrWjrB5eXm4XC6ioqLKzY+KimLPntPvxFlZWaddPysry7v8+LyzrfPvrnVarZbQ0FDvOv82ZcoUXnzxxVPmTxv+N7FR4fgbtGhOGmXB5XbhcDtwuB3YTyp0rVKpUOFZT4UKlcrTBO3kJFFNyOyKs1Or1IQbwwk3htMi7PRDtrrcLgqsBahUKm+rKp1aV2sTaUIIIYQQKpWKG5veSLeYbvyT+Q8DGwyssjpH9QLqMbrF6CrZlhC1gUqlonN0ZzpHd67uUNBpdN5YxnccT645l9UZq1mftZ4AfQAtwlrQIrQFiUGJtb6ellFrpFloM5qFNjtlWZG1iCOlR0gtSSXXkkuuOZdcSy4Wp6VcFzE/nR+BhkCC9EEEGgIJ1AcSZAjCXxuARmVEcetxOXXYHWrccKz3yrFb5diQUu5TBxX5dyrg+EMFcLoUHC7PqOZOhw2X3YzLZkFxmFHsZhSHFRxmFIdnHg4r2M2onBZwWlE7LahdVvSKDYNiw6DY0WHHqTHi1Prj0vnj0vqjGALAEIDFpWUGj5zz86zde8Ml9OyzzzJhwgTv4+MtleJC/Qg0ntrCRKP2dLfywedShilqEI1aQ4RvRHWHIYQQQghR5eoF1OOGgBuqOwwhxEUS4RvBNY2v4ZrG11R3KJdUsE8wwT7BtI1oW92hVLuSkhKoQFKpWptMhIeHo9FoyM4uX8Q2Ozub6Ojo0z4nOjr6rOsfvz3XOjk5OeWWO51OCgoKzvi6BoOBwMDAcpMQQgghhBBCCCHE5apak0p6vZ6OHTuyZMkS7zy3282SJUvo1q3baZ/TrVu3cusD/PHHH971ExMTiY6OLrdOSUkJa9eu9a7TrVs3ioqK2Lhxo3edpUuX4na7SU5OrrL3J4QQQgghhBBCCFFXVXv3twkTJnD77bfTqVMnunTpwrvvvovJZGLs2LEA3HbbbcTFxTFlyhQAHn30Ufr06cPbb7/N0KFD+fbbb9mwYQP/+9//AE+/1PHjx/Pyyy/TpEkTEhMTmThxIrGxsd5i4C1atGDQoEHcc889fPTRRzgcDh566CFGjhxZoZHfhBBCCCGEEEIIIS531Z5Uuvnmm8nNzWXSpElkZWXRrl07Fi1a5C20nZqailp9okFV9+7dmTVrFs8//zzPPfccTZo0Yd68ebRu3dq7zlNPPYXJZOLee++lqKiInj17smjRInx8TtQ3+uabb3jooYfo168farWa66+/nmnTpl26Ny6EEEIIIYQQQghRi6kURVGqO4jaqLi4mODgYNLS0qS+khBCCCGEEEIIIeqM44OTFRUVERQUdMb1qr2lUm2Vn58PQHx8fDVHIoQQQgghhBBCCFH1SktLJal0MYSGhgKe7nln+4CFqC2OZ6Kl9Z2oS2S/FnWN7NOiLpL9WtQ1sk+LukBRFEpLS89Zd1qSSufpeJ2noKAg+aIQdUpgYKDs06LOkf1a1DWyT4u6SPZrUdfIPi1qu4o0oFGfcw0hhBBCCCGEEEIIIf5FkkpCCCGEEEIIIYQQotIkqXSeDAYDkydPxmAwVHcoQlQJ2adFXST7tahrZJ8WdZHs16KukX1aXE5UiqIo1R2EEEIIIYQQQgghhKhdpKWSEEIIIYQQQgghhKg0SSoJIYQQQgghhBBCiEqTpJIQQgghhBBCCCGEqDRJKgkhhBBCCCGEEEKISpOk0nn44IMPaNCgAT4+PiQnJ7Nu3brqDkmICnvhhRdQqVTlpubNm3uXW61Wxo0bR1hYGP7+/lx//fVkZ2dXY8RClLd8+XKGDx9ObGwsKpWKefPmlVuuKAqTJk0iJiYGo9FI//792bdvX7l1CgoKGD16NIGBgQQHB3PXXXdRVlZ2Cd+FECeca5++4447TvneHjRoULl1ZJ8WNcmUKVPo3LkzAQEBREZGMmLECFJSUsqtU5HjjdTUVIYOHYqvry+RkZE8+eSTOJ3OS/lWhPCqyH7dt2/fU76v77///nLryH4t6hpJKlXSd999x4QJE5g8eTKbNm2ibdu2DBw4kJycnOoOTYgKa9WqFZmZmd5p5cqV3mWPPfYYv/76K3PmzOHvv/8mIyOD6667rhqjFaI8k8lE27Zt+eCDD067/I033mDatGl89NFHrF27Fj8/PwYOHIjVavWuM3r0aHbu3Mkff/zB/PnzWb58Offee++legtClHOufRpg0KBB5b63Z8+eXW657NOiJvn7778ZN24c//zzD3/88QcOh4MBAwZgMpm865zreMPlcjF06FDsdjurV6/miy++YObMmUyaNKk63pIQFdqvAe65555y39dvvPGGd5ns16JOUkSldOnSRRk3bpz3scvlUmJjY5UpU6ZUY1RCVNzkyZOVtm3bnnZZUVGRotPplDlz5njn7d69WwGUNWvWXKIIhag4QPnpp5+8j91utxIdHa28+eab3nlFRUWKwWBQZs+erSiKouzatUsBlPXr13vXWbhwoaJSqZT09PRLFrsQp/PvfVpRFOX2229XrrnmmjM+R/ZpUdPl5OQogPL3338rilKx440FCxYoarVaycrK8q4zffp0JTAwULHZbJf2DQhxGv/erxVFUfr06aM8+uijZ3yO7NeiLpKWSpVgt9vZuHEj/fv3985Tq9X079+fNWvWVGNkQlTOvn37iI2NpWHDhowePZrU1FQANm7ciMPhKLePN2/enPr168s+LmqFQ4cOkZWVVW4fDgoKIjk52bsPr1mzhuDgYDp16uRdp3///qjVatauXXvJYxaiIpYtW0ZkZCTNmjXjgQceID8/37tM9mlR0xUXFwMQGhoKVOx4Y82aNbRp04aoqCjvOgMHDqSkpISdO3dewuiFOL1/79fHffPNN4SHh9O6dWueffZZzGazd5ns16Iu0lZ3ALVJXl4eLper3JcAQFRUFHv27KmmqISonOTkZGbOnEmzZs3IzMzkxRdfpFevXuzYsYOsrCz0ej3BwcHlnhMVFUVWVlb1BCxEJRzfT0/3PX18WVZWFpGRkeWWa7VaQkNDZT8XNdKgQYO47rrrSExM5MCBAzz33HMMHjyYNWvWoNFoZJ8WNZrb7Wb8+PH06NGD1q1bA1ToeCMrK+u03+XHlwlRnU63XwPccsstJCQkEBsby7Zt23j66adJSUnhxx9/BGS/FnWTJJWEuMwMHjzYez8pKYnk5GQSEhL4/vvvMRqN1RiZEEKI0xk5cqT3fps2bUhKSqJRo0YsW7aMfv36VWNkQpzbuHHj2LFjR7n6jULUdmfar0+uZdemTRtiYmLo168fBw4coFGjRpc6TCEuCen+Vgnh4eFoNJpTRqbIzs4mOjq6mqIS4sIEBwfTtGlT9u/fT3R0NHa7naKionLryD4uaovj++nZvqejo6NPGVzB6XRSUFAg+7moFRo2bEh4eDj79+8HZJ8WNddDDz3E/Pnz+euvv6hXr553fkWON6Kjo0/7XX58mRDV5Uz79ekkJycDlPu+lv1a1DWSVKoEvV5Px44dWbJkiXee2+1myZIldOvWrRojE+L8lZWVceDAAWJiYujYsSM6na7cPp6SkkJqaqrs46JWSExMJDo6utw+XFJSwtq1a737cLdu3SgqKmLjxo3edZYuXYrb7fYe/AlRkx09epT8/HxiYmIA2adFzaMoCg899BA//fQTS5cuJTExsdzyihxvdOvWje3bt5dLmP7xxx8EBgbSsmXLS/NGhDjJufbr09myZQtAue9r2a9FXSPd3yppwoQJ3H777XTq1IkuXbrw7rvvYjKZGDt2bHWHJkSFPPHEEwwfPpyEhAQyMjKYPHkyGo2GUaNGERQUxF133cWECRMIDQ0lMDCQhx9+mG7dutG1a9fqDl0IwJMIPX7FDzzFubds2UJoaCj169dn/PjxvPzyyzRp0oTExEQmTpxIbGwsI0aMAKBFixYMGjSIe+65h48++giHw8FDDz3EyJEjiY2NraZ3JS5nZ9unQ0NDefHFF7n++uuJjo7mwIEDPPXUUzRu3JiBAwcCsk+LmmfcuHHMmjWLn3/+mYCAAG+tmKCgIIxGY4WONwYMGEDLli0ZM2YMb7zxBllZWTz//POMGzcOg8FQnW9PXKbOtV8fOHCAWbNmMWTIEMLCwti2bRuPPfYYvXv3JikpCZD9WtRR1T38XG303nvvKfXr11f0er3SpUsX5Z9//qnukISosJtvvlmJiYlR9Hq9EhcXp9x8883K/v37vcstFovy4IMPKiEhIYqvr69y7bXXKpmZmdUYsRDl/fXXXwpwynT77bcriqIobrdbmThxohIVFaUYDAalX79+SkpKSrlt5OfnK6NGjVL8/f2VwMBAZezYsUppaWk1vBshzr5Pm81mZcCAAUpERISi0+mUhIQE5Z577ik3HLWiyD4tapbT7c+A8vnnn3vXqcjxxuHDh5XBgwcrRqNRCQ8PVx5//HHF4XBc4ncjhMe59uvU1FSld+/eSmhoqGIwGJTGjRsrTz75pFJcXFxuO7Jfi7pGpSiKcimTWEIIIYQQQgghhBCi9pOaSkIIIYQQQgghhBCi0iSpJIQQQgghhBBCCCEqTZJKQgghhBBCCCGEEKLSJKkkhBBCCCGEEEIIISpNkkpCCCGEEEIIIYQQotIkqSSEEEIIIYQQQgghKk2SSkIIIYQQQgghhBCi0iSpJIQQQgghhBBCCCEqTZJKQgghhLgs3XHHHYwYMeKSv+7MmTNRqVSoVCrGjx9/yV+/svr27VtlcR4+fNj73tu1a1cl2xRCCCFE9dFWdwBCCCGEEFVNpVKddfnkyZOZOnUqiqJcoojKCwwMJCUlBT8/v2p5/eoSHx9PZmYmb731Fn/++Wd1hyOEEEKICyRJJSGEEELUOZmZmd773333HZMmTSIlJcU7z9/fH39//+oIDfAkvaKjo6vt9auLRqMhOjq6Wj97IYQQQlQd6f4mhBBCiDonOjraOwUFBXmTOMcnf3//U7q/9e3bl4cffpjx48cTEhJCVFQUn3zyCSaTibFjxxIQEEDjxo1ZuHBhudfasWMHgwcPxt/fn6ioKMaMGUNeXl6lY/7www9p0qQJPj4+REVFccMNN3iXLVq0iJ49exIcHExYWBjDhg3jwIED3uXHu5V9//339OrVC6PRSOfOndm7dy/r16+nU6dO+Pv7M3jwYHJzc73PO/4ZvPjii0RERBAYGMj999+P3W4/Y5w2m40nnniCuLg4/Pz8SE5OZtmyZd7lR44cYfjw4YSEhODn50erVq1YsGBBpT8PIYQQQtR8klQSQgghhDjmiy++IDw8nHXr1vHwww/zwAMPcOONN9K9e3c2bdrEgAEDGDNmDGazGYCioiKuvPJK2rdvz4YNG1i0aBHZ2dncdNNNlXrdDRs28Mgjj/DSSy+RkpLCokWL6N27t3e5yWRiwoQJbNiwgSVLlqBWq7n22mtxu93ltjN58mSef/55Nm3ahFar5ZZbbuGpp55i6tSprFixgv379zNp0qRyz1myZAm7d+9m2bJlzJ49mx9//JEXX3zxjLE+9NBDrFmzhm+//ZZt27Zx4403MmjQIPbt2wfAuHHjsNlsLF++nO3bt/P6669LyyQhhBCijpLub0IIIYQQx7Rt25bnn38egGeffZbXXnuN8PBw7rnnHgAmTZrE9OnT2bZtG127duX999+nffv2vPrqq95tzJgxg/j4ePbu3UvTpk0r9Lqpqan4+fkxbNgwAgICSEhIoH379t7l119/fbn1Z8yYQUREBLt27aJ169be+U888QQDBw4E4NFHH2XUqFEsWbKEHj16AHDXXXcxc+bMctvS6/XMmDEDX19fWrVqxUsvvcSTTz7Jf/7zH9Tq8tcfU1NT+fzzz0lNTSU2Ntb7mosWLeLzzz/n1VdfJTU1leuvv542bdoA0LBhwwp9BkIIIYSofaSlkhBCCCHEMUlJSd77Go2GsLAwb3IEICoqCoCcnBwAtm7dyl9//eWt0eTv70/z5s0BynVPO5errrqKhIQEGjZsyJgxY/jmm2+8raEA9u3bx6hRo2jYsCGBgYE0aNAA8CR5zhT/8Vj/Hf/x2I9r27Ytvr6+3sfdunWjrKyMtLS0U+Lcvn07LpeLpk2blnvPf//9t/f9PvLII7z88sv06NGDyZMns23btgp/DkIIIYSoXaSlkhBCCCHEMTqdrtxjlUpVbt7xUeWOdzsrKytj+PDhvP7666dsKyYmpsKvGxAQwKZNm1i2bBmLFy9m0qRJvPDCC6xfv57g4GCGDx9OQkICn3zyCbGxsbjdblq3bn1K7aPTxfrvef/uMlcZZWVlaDQaNm7ciEajKbfseBe3u+++m4EDB/Lbb7+xePFipkyZwttvv83DDz983q8rhBBCiJpJkkpCCCGEEOepQ4cO/PDDDzRo0ACt9sIOq7RaLf3796d///5MnjyZ4OBgli5dSp8+fUhJSeGTTz6hV69eAKxcubIqwgc8ra0sFgtGoxGAf/75B39/f+Lj409Zt3379rhcLnJycryxnE58fDz3338/999/P88++yyffPKJJJWEEEKIOki6vwkhhBBCnKdx48ZRUFDAqFGjWL9+PQcOHOD3339n7NixuFyuCm9n/vz5TJs2jS1btnDkyBG+/PJL3G43zZo1IyQkhLCwMP73v/+xf/9+li5dyoQJE6rsPdjtdu666y527drFggULmDx5Mg899NAp9ZQAmjZtyujRo7ntttv48ccfOXToEOvWrWPKlCn89ttvAIwfP57ff/+dQ4cOsWnTJv766y9atGhRZfEKIYQQouaQlkpCCCGEEOcpNjaWVatW8fTTTzNgwABsNhsJCQkMGjTotEmZMwkODubHH3/khRdewGq10qRJE2bPnk2rVq0A+Pbbb3nkkUdo3bo1zZo1Y9q0afTt27dK3kO/fv1o0qQJvXv3xmazMWrUKF544YUzrv/555/z8ssv8/jjj5Oenk54eDhdu3Zl2LBhALhcLsaNG8fRo0cJDAxk0KBBvPPOO1USqxBCCCFqFpWiKEp1ByGEEEIIcbmYOXMm48ePp6ioqLpD4Y477qCoqIh58+Zd0td94YUXmDdvHlu2bLmkryuEEEKIqiXd34QQQgghLrHi4mL8/f15+umnqzuUSyo1NRV/f39effXV6g5FCCGEEFVAur8JIYQQQlxC119/PT179gQ83d4uJ7Gxsd7WSQaDoXqDEUIIIcQFk+5vQgghhBBCCCGEEKLSpPubEEIIIYQQQgghhKg0SSoJIYQQQgghhBBCiEqTpJIQQgghhBBCCCGEqDRJKgkhhBBCCCGEEEKISpOkkhBCCCGEEEIIIYSoNEkqCSGEEEIIIYQQQohKk6SSEEIIIYQQQgghhKg0SSoJIYQQQgghhBBCiEr7f97NMCzMZHsyAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Step 3. Compute Hilbert envelopes for each frequency bin\n", + "bandwidth = 1000\n", + "fshift = 300\n", + "min_freq, max_freq = bandwidth // 2, new_sample_rate // 2 - bandwidth // 2\n", + "center_freqs = range(min_freq, max_freq, fshift)\n", + "envelopes = {\n", + " center_freq: compute_hilbert_envelopes(\n", + " excitation, center_freq, bandwidth, new_sample_rate\n", + " )\n", + " for center_freq in center_freqs\n", + "}\n", + "plt.plot(envelopes[500][0, 200])\n", + "plt.plot(envelopes[1100][0, 200])\n", + "plt.plot(envelopes[1400][0, 200])\n", + "plt.legend([\"500Hz\", \"1100Hz\", \"1400Hz\"])\n", + "plt.title(\"Hilbert envelopes at different bandpass center frequencies\")\n", + "plt.xlabel(\"Time [samples]\")\n", + "plt.ylabel(\"Amplitude\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "630c9d07-42da-48c2-a4f7-3fde18df8afc", + "metadata": {}, + "outputs": [], + "source": [ + "# Step 4. Compute cross correlation between (non-neighboring) frequency bins\n", + "correlations = [\n", + " compute_cross_correlation(envelopes[freq_i], envelopes[freq_j], width=3)\n", + " for freq_i in center_freqs\n", + " for freq_j in center_freqs\n", + " if (freq_j - freq_i) > bandwidth // 2\n", + "]\n", + "\n", + "# Step 5. The maximum cross-correlation is the GNE score\n", + "gne = torch.stack(correlations, dim=-1).amax(dim=(2, 3))\n", + "gne = -10 * torch.log10(1 - gne)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "73cd8327-eed8-4ecb-af59-b6b10d966c27", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average GNE score: tensor(19.8546)\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABH8AAADvCAYAAABrAnjSAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAo99JREFUeJzs3Xd8E4X7B/DPZacr3XvS0hbK3rvs5QBREUGZKss9UL8u+KlfxIF+XYADUBQFN8jee49CoYMuuvceadb9/rjcNWnTNiktZTzv18uXNPOSXC53zz2DYVmWBSGEEEIIIYQQQgi5I4naewEIIYQQQgghhBBCSNuh4A8hhBBCCCGEEELIHYyCP4QQQgghhBBCCCF3MAr+EEIIIYQQQgghhNzBKPhDCCGEEEIIIYQQcgej4A8hhBBCCCGEEELIHYyCP4QQQgghhBBCCCF3MAr+EEIIIYQQQgghhNzBKPhDCCGEEEIIIYQQcgej4A8hhBBCyE22fv16MAyDtLS0Zm+7c+dO9OjRAwqFAgzDoLS0tM2XzxKGYfD000+3+fPodDosWbIEAQEBEIlEmDx5sk33X7p0KRiGMbssODgYs2fPbr2FJIQQQm4zFPwhhBByU6SmpuLpp59GeHg47OzsYGdnh86dO2Px4sW4dOmS2W35gzcvLy9UV1c3eKzg4GDce++9ZpcxDNPofwsWLGjT10buTNXV1Vi6dCkOHjzYbstQVFSEqVOnQqlU4quvvsKGDRtgb2/fZs93/PhxLF26tN0CTACwdu1afPTRR3jooYfwww8/4IUXXmi3ZWnM9u3bsXTp0gaX3wrrDCGEEGKJpL0XgBBCyJ3v33//xSOPPAKJRIIZM2age/fuEIlEiI+Px59//olVq1YhNTUVQUFBZvfLz8/HqlWr8NJLL1n1PGPGjMHMmTMbXB4eHt4qr4PcXaqrq7Fs2TIAwPDhw9tlGc6cOYOKigq8++67GD16dJs/3/Hjx7Fs2TLMnj0bzs7Obf58luzfvx9+fn749NNP2+X5rbF9+3Z89dVXDQJAt8I6QwghhFhCwR9CCCFtKjk5GdOmTUNQUBD27dsHHx8fs+tXrFiBr7/+GiJRw2TUHj164KOPPsKiRYugVCqbfa7w8HA89thjrbbsd7Kqqqo2zSAhrSM/Px8AWjUQc6t/9vn5+e0WeLpV3eqfGSGEkFsflX0RQghpUx9++CGqqqqwbt26BoEfAJBIJHj22WcREBDQ4Lq3334beXl5WLVqVZsuI19mlpiYiMceewwqlQoeHh546623wLIsMjIyMGnSJDg5OcHb2xuffPJJg8eora3FO++8g7CwMMjlcgQEBGDJkiWora01u926deswcuRIeHp6Qi6Xo3PnzhZf39mzZzFu3Di4u7tDqVQiJCQEc+fOFa4/ePAgGIZpUF6SlpYGhmGwfv164bLZs2fDwcEBycnJmDhxIhwdHTFjxgwAgMFgwGeffYaoqCgoFAp4eXlh/vz5KCkpseq9i4+Px9SpU+Hh4QGlUomIiAi88cYbZre5cOECJkyYACcnJzg4OGDUqFE4efKk2W34HjhHjx7Fs88+Cw8PDzg7O2P+/PnQaDQoLS3FzJkz4eLiAhcXFyxZsgQsyzZ43R9//DE+/fRTBAUFQalUIjo6GrGxsWbPNXz4cItZGbNnz0ZwcLDweB4eHgCAZcuWCSWEppke8fHxeOihh+Dq6gqFQoE+ffpgy5YtDR73ypUrGDlyJJRKJfz9/fHee+/BYDA0+94OHz4cs2bNAgD07dsXDMOY9a357bff0Lt3byiVSri7u+Oxxx5DVlZWg9fU2Gdf39KlS/HKK68AAEJCQoTXXL8v0d9//40uXbpALpcjKioKO3fubPBYWVlZmDt3Lry8vITbrV27tsnXy3+GBw4cwJUrV4TnP3jwoE3re0tZ+xyzZ8/GV199BcC83LS11hn+u3Do0CEsWrQInp6e8Pf3BwBcv34dixYtQkREBJRKJdzc3PDwww83+Iz4xzh27BhefPFFeHh4wN7eHg888AAKCgoavPYdO3YgOjoajo6OcHJyQt++fbFx40az25w6dQrjx4+HSqWCnZ0doqOjcezYMVvfZkIIIe2EMn8IIYS0qX///RdhYWHo37+/zfcdOnQoRo4ciQ8//BALFy5sNvtHrVajsLCwweVOTk6QyWTNPt8jjzyCTp064YMPPsC2bdvw3nvvwdXVFWvWrMHIkSOxYsUK/Pzzz3j55ZfRt29fDBs2DAAXQLn//vtx9OhRPPXUU+jUqRMuX76MTz/9FImJifj777+F51i1ahWioqJw//33QyKRYOvWrVi0aBEMBgMWL14MgMt8GDt2LDw8PPDaa6/B2dkZaWlp+PPPP21498zpdDqMGzcOQ4YMwccffww7OzsAwPz587F+/XrMmTMHzz77LFJTU/Hll1/iwoULOHbsGKRSaaOPeenSJQwdOhRSqRRPPfUUgoODkZycjK1bt+L9998HwAU+hg4dCicnJyxZsgRSqRRr1qzB8OHDcejQoQbrxTPPPANvb28sW7YMJ0+exDfffANnZ2ccP34cgYGB+O9//4vt27fjo48+QpcuXRqU+f3444+oqKjA4sWLoVar8b///Q8jR47E5cuX4eXlZfX75eHhgVWrVmHhwoV44IEHMGXKFABAt27dhNc1ePBg+Pn54bXXXoO9vT02b96MyZMn448//sADDzwAAMjNzcWIESOg0+mE233zzTdWZbK98cYbiIiIwDfffIP/+7//Q0hICEJDQwFA+Mz69u2L5cuXIy8vD//73/9w7NgxXLhwwSxzprHPvr4pU6YgMTERv/zyCz799FO4u7sL7wXv6NGj+PPPP7Fo0SI4Ojri888/x4MPPoj09HS4ubkBAPLy8jBgwAChQbSHhwd27NiBefPmoby8HM8//3yj7/mGDRvw/vvvo7KyEsuXLwcAdOrUCXFxcc2+XzfL/PnzkZ2djT179mDDhg3C5a21zvAWLVoEDw8PvP3226iqqgLAlQEeP34c06ZNg7+/P9LS0rBq1SoMHz4cV69ebfDZPvPMM3BxccE777yDtLQ0fPbZZ3j66aexadMm4Tbr16/H3LlzERUVhddffx3Ozs64cOECdu7cienTpwPgSvEmTJiA3r1745133oFIJBIC2UeOHEG/fv1a/40mhBDSulhCCCGkjZSVlbEA2MmTJze4rqSkhC0oKBD+q66uFq575513WABsQUEBe+jQIRYAu3LlSuH6oKAg9p577jF7PACN/vfLL780uZz88z311FPCZTqdjvX392cZhmE/+OADs+VWKpXsrFmzhMs2bNjAikQi9siRI2aPu3r1ahYAe+zYMeEy09fJGzduHNuhQwfh77/++osFwJ45c6bRZT5w4AALgD1w4IDZ5ampqSwAdt26dcJls2bNYgGwr732mtltjxw5wgJgf/75Z7PLd+7cafHy+oYNG8Y6Ojqy169fN7vcYDAI/548eTIrk8nY5ORk4bLs7GzW0dGRHTZsmHDZunXrWADsuHHjzO4/cOBAlmEYdsGCBcJl/GcTHR3d4HUrlUo2MzNTuPzUqVMsAPaFF14QLouOjja7L2/WrFlsUFCQ8HdBQQELgH3nnXca3HbUqFFs165dWbVabfa6Bw0axHbs2FG47Pnnn2cBsKdOnRIuy8/PZ1UqFQuATU1NbfDYpvj3xXRd0Gg0rKenJ9ulSxe2pqZGuPzff/9lAbBvv/222Wuy9Nk35qOPPmp0uQCwMpmMTUpKEi6LiYlhAbBffPGFcNm8efNYHx8ftrCw0Oz+06ZNY1UqlcXvgKno6Gg2KirK7DJb1nf++2wqKCjI7DtriS3PsXjx4gbPwbKts87wn/mQIUNYnU5n9hiW3rsTJ06wANgff/yxwWOMHj3a7Pv0wgsvsGKxmC0tLWVZlmVLS0tZR0dHtn///mbrEr9s/P87duzY4LtZXV3NhoSEsGPGjGmwTIQQQm49VPZFCCGkzZSXlwMAHBwcGlw3fPhweHh4CP/xZRT1DRs2DCNGjMCHH36ImpqaJp9v0qRJ2LNnT4P/RowYYdXyPvHEE8K/xWIx+vTpA5ZlMW/ePOFyZ2dnREREICUlRbjst99+Q6dOnRAZGYnCwkLhv5EjRwIADhw4INzWNOOjrKwMhYWFiI6ORkpKCsrKyoTnALisKa1Wa9WyW2PhwoVmf//2229QqVQYM2aM2XL37t0bDg4OZstdX0FBAQ4fPoy5c+ciMDDQ7Dp+zLZer8fu3bsxefJkdOjQQbjex8cH06dPx9GjR4V1hDdv3jyzMd39+/dv8Bnwn43pZ8CbPHky/Pz8hL/79euH/v37Y/v27U29NTYpLi7G/v37MXXqVFRUVAjvW1FREcaNG4dr164J5Vfbt2/HgAEDzDIjPDw8Gi29ssbZs2eRn5+PRYsWQaFQCJffc889iIyMxLZt2xrcp/5n31KjR48Wso8ALqvFyclJ+CxYlsUff/yB++67DyzLmq1X48aNQ1lZGc6fP98qy3I7sWWd4T355JMQi8Vml5luP7RaLYqKihAWFgZnZ2eL7+tTTz1l9n0aOnQo9Ho9rl+/DgDYs2cPKioq8Nprr5mtS0Dd9/jixYu4du0apk+fjqKiImHZq6qqMGrUKBw+fNiqMkZCCCHti8q+CCGEtBlHR0cAQGVlZYPr1qxZg4qKCuTl5TXbpHnp0qWIjo7G6tWrmxz77O/vf0MTkeoHMVQqFRQKhVD+Ynp5UVGR8Pe1a9cQFxdnVh5jim/aCwDHjh3DO++8gxMnTjQYY19WVgaVSoXo6Gg8+OCDWLZsGT799FMMHz4ckydPxvTp0yGXy1v02iQSidA3xHS5y8rK4Onp2exy18cf7Hfp0qXR2xQUFKC6uhoRERENruvUqRMMBgMyMjIQFRUlXG7pMwDQoCeUSqWy2JeoY8eODS4LDw/H5s2bG11OWyUlJYFlWbz11lt46623LN4mPz8ffn5+uH79usWSR0vvibX4A3dLjxEZGYmjR4+aXWbps2+p+p8PALi4uAifRUFBAUpLS/HNN9/gm2++sfgYTa1Xdypb1hleSEhIg9vU1NRg+fLlWLduHbKyssz6XvHBY1P1Py8XFxcAED6v5ORkAE1/j69duwYAQv8pS8rKyoTHJoQQcmui4A8hhJA2o1Kp4OPj06DhLgDhgLh+o1JLhg0bhuHDh+PDDz/EggULWnsxBfXPsjd2GQCzgy6DwYCuXbti5cqVFm/LBy6Sk5MxatQoREZGYuXKlQgICIBMJsP27dvx6aefCmfPGYbB77//jpMnT2Lr1q3YtWsX5s6di08++QQnT56Eg4OD2dl8U3q93uLlcrm8wUQ1g8EAT09P/Pzzzxbv01gwqy019n5butz0M7AFwzAW79vYe1cf/zm9/PLLGDdunMXbhIWFtWjZ2oKlz76lmvs+8O/NY4891miwgO+BYwtb1/eWaMvnaMk6Y6kv1DPPPIN169bh+eefx8CBA6FSqcAwDKZNm2Yx+8aa7Ze1y/7RRx+hR48eFm9jKbuTEELIrYWCP4QQQtrUPffcg++++w6nT5++oaagS5cuxfDhw7FmzZpWXLrWERoaipiYGIwaNarRA0gA2Lp1K2pra7FlyxazM/KNlVcNGDAAAwYMwPvvv4+NGzdixowZ+PXXX/HEE08IZ9lLS0vN7sNnhVi73Hv37sXgwYOtakBsii/jshTY43l4eMDOzg4JCQkNrouPj4dIJLI45e1G8FkKphITE4UpXgCX/WCpZKz+e9fYZ8m/dqlU2mymWVBQkMVlsvSeWCsoKEh4DL600PRx+etboqn11xoeHh5wdHSEXq+/oSy8+lpjfW/N52jsfWqNdaYpv//+O2bNmmU2cVCtVjdYZmvxJXyxsbGNBiz52zg5ObXqZ0oIIeTmop4/hBBC2tSSJUtgZ2eHuXPnIi8vr8H11p6Bjo6OxvDhw7FixQqo1erWXswbMnXqVGRlZeHbb79tcF1NTY0wqYc/C1+/VGPdunVm9ykpKWnwvvBn3PnR8UFBQRCLxTh8+LDZ7b7++mublluv1+Pdd99tcJ1Op2vygNLDwwPDhg3D2rVrkZ6ebnYdv+xisRhjx47FP//8Y5bhlZeXh40bN2LIkCFwcnKyenmt8ffff5v1Tjl9+jROnTqFCRMmCJeFhoYiPj7ebOR1TExMg7HV/OSk+u+Dp6enEIjMyclpsAymjztx4kScPHkSp0+fNru+sWwra/Tp0weenp5YvXq1sD4A3LjuuLg43HPPPS1+bHt7ewANX7O1xGIxHnzwQfzxxx8WA4OWxoxbozXW99Z8jsbep9ZYZ5oiFosbbBu++OKLFmcnjR07Fo6Ojli+fHmD7Sr/PL1790ZoaCg+/vhjiyW8Lf1MCSGE3FyU+UMIIaRNdezYERs3bsSjjz6KiIgIzJgxA927dwfLskhNTcXGjRshEoms6knyzjvvNNm8OTExET/99FODy728vDBmzJgbeh1Nefzxx7F582YsWLAABw4cwODBg6HX6xEfH4/Nmzdj165d6NOnD8aOHQuZTIb77rsP8+fPR2VlJb799lt4enqaHRD+8MMP+Prrr/HAAw8gNDQUFRUV+Pbbb+Hk5ISJEycC4ErqHn74YXzxxRdgGAahoaH4999/beqnEh0djfnz52P58uW4ePEixo4dC6lUimvXruG3337D//73Pzz00EON3v/zzz/HkCFD0KtXLzz11FMICQlBWloatm3bhosXLwIA3nvvPezZswdDhgzBokWLIJFIsGbNGtTW1uLDDz9s2RvehLCwMAwZMgQLFy5EbW0tPvvsM7i5uWHJkiXCbebOnYuVK1di3LhxmDdvHvLz87F69WpERUWZNaBWKpXo3LkzNm3ahPDwcLi6uqJLly7o0qULvvrqKwwZMgRdu3bFk08+iQ4dOiAvLw8nTpxAZmYmYmJiAHDBzw0bNmD8+PF47rnnhFHvQUFBuHTpUoteo1QqxYoVKzBnzhxER0fj0UcfFUa9BwcHN9kXqzm9e/cGwI2ZnzZtGqRSKe677z4h2GGNDz74AAcOHED//v3x5JNPonPnziguLsb58+exd+9eFBcX27xcrbG+t+Zz8O/Ts88+i3HjxkEsFmPatGmtss405d5778WGDRugUqnQuXNnnDhxAnv37oWbm1uLXrOTkxM+/fRTPPHEE+jbty+mT58OFxcXxMTEoLq6Gj/88ANEIhG+++47TJgwAVFRUZgzZw78/PyQlZWFAwcOwMnJCVu3bm3R8xNCCLmJbvJ0MUIIIXeppKQkduHChWxYWBirUChYpVLJRkZGsgsWLGAvXrxodlvTUe/1RUdHswBsGvVuaay3Nc83a9Ys1t7e3uIy1B9FrdFo2BUrVrBRUVGsXC5nXVxc2N69e7PLli1jy8rKhNtt2bKF7datG6tQKNjg4GB2xYoV7Nq1a83Ga58/f5599NFH2cDAQFYul7Oenp7svffey549e9bsOQsKCtgHH3yQtbOzY11cXNj58+ezsbGxFke9W3odvG+++Ybt3bs3q1QqWUdHR7Zr167skiVL2Ozs7CbfN5Zl2djYWPaBBx5gnZ2dWYVCwUZERLBvvfWW2W3Onz/Pjhs3jnVwcGDt7OzYESNGsMePHze7jaWR5ixr/WfDj+P+6KOP2E8++YQNCAhg5XI5O3ToUDYmJqbBcv/0009shw4dWJlMxvbo0YPdtWtXg1HvLMuyx48fZ3v37s3KZLIGI7yTk5PZmTNnst7e3qxUKmX9/PzYe++9l/3999/NHuPSpUtsdHQ0q1AoWD8/P/bdd99lv//++xaPeudt2rSJ7dmzJyuXy1lXV1d2xowZZmPuLb1P1nj33XdZPz8/ViQSmS0jAHbx4sUNbm9pjHpeXh67ePFiNiAggJVKpay3tzc7atQo9ptvvmn2+S19v1jW+vW9paPebXkOnU7HPvPMM6yHhwfLMIzZ893oOtPUZ15SUsLOmTOHdXd3Zx0cHNhx48ax8fHxDV5fY4/R2Dj7LVu2sIMGDWKVSiXr5OTE9uvXj/3ll1/MbnPhwgV2ypQprJubGyuXy9mgoCB26tSp7L59+5p9XwkhhLQ/hmVb2C2REEIIIeQWkZaWhpCQEHz00Ud4+eWX23txCCGEEEJuKdTzhxBCCCGEEEIIIeQORsEfQgghhBBCCCGEkDsYBX8IIYQQQgghhBBC7mDU84cQQgghhBBCCCHkDkaZP4QQQgghhBBCCCF3MAr+EEIIIYQQQgghhNzBJO29AG3NYDAgOzsbjo6OYBimvReHEEIIIYQQQgghpFWwLIuKigr4+vpCJGo8v+eOD/5kZ2cjICCgvReDEEIIIYQQQgghpE1kZGTA39+/0evv+OCPo6MjAO6NcHJyauelIYQQQgghhBBCCGkd5eXlCAgIEGIfjbnjgz98qZeTkxMFfwghhBBCCCGEEHLHaa7NDTV8JoQQQgghhBBCCLmDUfCHEEIIIYQQQggh5A5GwR9CCCGEEEIIIYSQWxTLsriQXoIajb7Fj0HBH0IIIYQQQgghhJBb1P74fDzw9XG8t+1qix+Dgj+EEEIIIYQQQgght6ik/EoAQHZpTYsfg4I/hBBCCCGEEEIIIbeokmotAECrZ1v8GBT8IYQQQgghhBBCiFUOJxYguaCyvRfjrlJarQEAaPSGFj8GBX8IIYQQQgghhBDSrLTCKsxcexqLfjrf3otyVykxBn90FPwhhBBCCCGEEEJIW7peXA0AyCipbuclubtQ2RchhBBCCCGEEEJuisKKWgBAtUaPWl3Lx44T2/BlX1rK/CGEEEIIIYQQQkhbKqqqFf5dasxGIW2Pz/yhnj+EEEIIIYQQQghpU0WVGuHffB8a0rZYlkWZMfijo7IvQgghhBBCCCGEtKWCyrrMn5Iqyvy5Gao1eiHjh8q+CCGEEEIIIYQQ0qZMM39KKfPnpjDNsKLgDyGEEEIIIYQQQtqUac+fEur5c1OY9laiaV+EEEIIIYQQQmz2xl+XMfF/R1Ct0bX3opDbAPX8ufko84cQQgghhBBCSIvpDSx+O5uJqznlOJtW0t6LQ25xLMtS2Vc7KDHL/KHgDyGEEEIIIYQQG2SX1giNZBNyK9p5acitrlytMxs1TmVfN0epWeYPC5ZtWekXBX8IIYSQW1h8bjkeWXMCF9LpjCwhhJDWlVxQKfw7Lre8HZeE3A6KTCZ9AZT5c7PUn6qmM1DwhxBCCLnjfHM4BadSi/HVgeT2XhRCCCF3mNTCKuHflPlDmlNYaR7socyfm6N+b6WWln5R8IcQQgi5RbEsi5PJRQCAkylFN1TnTQghhNRnGvy5llcJHf3OkCbwmT8ihvubGj7fHPUzrLQ6yvy5LWWV1uDV3y8hMY8i7YQQQsxdL6pGdpkaAFBZq8PFjNL2XSDSrgwGFiv3JGLbpZz2XhRCyB0ipaAu+KPRG8yCQYTUV1jFBSECXe0AmI8gJ22nfoaV1kCZP7elvy9kYdPZDKw9mtrei0IIIeQWc9yY9cM7kljQTktCbgWHEgvw+b5reOX3GNTq9O29OISQOwAf7FFIucPCeCr9Ik3gM3/CPB0AcBkphhb2nyHWK62pF/yhsq/bU1WtDgCQV65u5yUhhBByqzmRwgV/+DNsh68VtufikHa2MzYXAFCt0bfZSObKWh0mf3UML266SDv0hNzh1Fo9skprAADR4R4AuCEDvGqNDseSCls8WYjceQqNwZ9QY/DHwAIVxuNZ0nao7OsOwUftCup1TieEEHJ3Y1kWJ4yZPy+OCQcAXMosRRmlWN+VdHoDdl/NFf4+dINZYCzL4kJ6SYMMon9jsnExoxR/XsjChpPXb+g5CCG3Nj7rR6WUYmAHNwDmTZ/f/ucKZnx3CpvPZrTL8pFbT5Gx4bOvSgk7mRgATfy6GUqqzN9jDWX+3J40Ou6DK6ygLw0hN8vx5EL0e38v/r2U3d6LQkijkvIrUVhZC7lEhAldvRHqYQ8Dy62/bYllWRxLKqSduVvM6bRis5r/Qwk3FvzZfjkXD3x9HG/8FWt2+V8XsoR/L98RZzYGmhByZ+GDPx087NHJxwkAEJfDBX+qanXCftL2y7mWH4Dcdfjgj7uDHC52MgCNT/zKL1fjbFrxTVu2O5VOb0C5msuuEhs7beuo58/tiY/aFVbWUno1IRZcyS5r9d4WH+9KQH5FLY3OJrc0vt9P32BXyCViDO3IpeQfSWrb4M/O2FzM+O4U/vPX5TZ9HmKbXcaSrzGdvSBigIS8CuSU1bT48Y4Zg4h/XchCtrHsI6u0BqdSuR317gHOUGsNeHFzjMXpP7uu5GLl7gSotdR7iJDbFR/8CXG3R6Q3F/zJKq1BuVqLvXF5UGu57/7JlCLUaGz/rqcUVCIpn3oI3Un4si83Bxmc7aQAGp/49dyvF/HQ6hP4jTLHbkiZSb8fN3su4EZlX7cpjfGD0xnYBo2cyN1t3bHUu74R+IH4fNzz+VHM/P50q40evZxZhvPppQCAuJxy2ilpBsuySMituOtHv/5+LhMzvjuJfy5m3bRAPZ/hMzCUS8Uf2tEdAHA4saBN+y/sj88HABxMKBCyU0n7MhhY7LqSBwB4tF8Augc4A2g8+2fbpRy8/uflJg/W4nK4vh56A4sfTqQBALZc5M7y9w9xxerHesFRIUFMRik+2ZMobAO0egOWbb2C+RvO4fP9SVh96PYOosfllGP1oWTkllHvRXL34TP7OrjbQ2UnhY9KAQBIzK0QtgcAUKsz4GRKkcXHaIxaq8dDq0/gga+Po5J6wtwx+OCPu4NMyPxpLFM4NrsMALBs61VkFFffnAW8A/GZVY4KCRRSrtSOyr5uU6YfXEEF9f0hnJyyGizbehX/9+9VZJbcXhtLvYHF0i1X8M3hGz8g4Jubnkotxuf7k2748QBg/fE0s79Nd25IQ9su52DcZ4cxZ/2ZuzoQsHJ3Ao4lFeG5Xy/ini+OYl9cXpsGYAwGFidTuAwMPvgzoIMbpGIGmSU1uF7UdtuFk6ncDn61Rk+j5W8RMZmlyC1Xw0EuwaBQd6Exq6W+P2mFVXhh80X8cjodv5xOt/h4egOL+Jy6wPcvp9JRVavDPxe5kq8HevrBR6XEu5O6AABWHUzG4BX78cnuBMz47hTWHUsT7vvN4ZTbdv+FZVk888sFfLAjHtEfHcDyHXFU7khuGpZlsT8+r12HvtRl/nDNeyO9HQFwmT789qV/iCsA4EBCvk2PnZBbgeIqDSrUOiTnt275aFm1lrIObwKWZc1OImh0deVH7g7yusyfqoYJDGXVWlQYb1tZq8PLv8VQlUsL8b9LLnYySMVc2RdN+7pNaXUU/CENnU6tq489lXJ71coeTSrE+uNp+O/2+BvKqmFZFkdNylu+2H/thnudFFXWYquxfn3WwCAAwNZLOcJBPMuyOJCQj+2XcxCbVYYKNWXjXTBmSR25VojX/rx0V078yC6tQXaZGiIGcJRLEJdTjnk/nMWoTw7huyMpDZrwtYarOeUoq9HCQS5BNz8VAMBeLkGvQBcAbVf6lV1ag4ziulKio21cYlYfHzz++mDrBHvvFDuvcIHwEZGeUEjFGB7hCQA4eq3QbAeQZVm8veWKEKj9+dR1i9/ZtKIq1Gj1UEhFCHazQ7lah/e2xSE+twIysQgTuvoAACb18MVrEyLhai9DXnktvtifhNOpxXCQS7Dm8d7oHuCMao0en++71tZvQZuIzSpHkvGgtFZnwJpDKRj64QHsvkL9TUjbO5lSjLnrz+KBr461ye+INUx7/gBApLHvz7dHUqEzsOjs44QnhnYAwGWD2rIPcCmrTPh3/d5hp1OL8f62qy0aYFBZq8PQD/fjga+P23xf0rz43HI88cNZjF55CJ3e3olOb+8UKhGKqrhjVYmIgZNC2mTmT4bx5LW9TAw7mRinUoux9tjdXdHQUnzmj4udFFIxF77R6ans67ZklvlTSSnHhHPGpDmarWm27e3fmLpMmu+OtHwjn1ZUjazSGsjEIkzq4QuWBV7YdBFFNzAZ79czGdDoDOjqp8KS8ZFQSEVILaxCbBZX/vD7uUzMWXcGi34+j3u/OIquS3ej3/t7MXPtaSzfHofjN/lA+FZgmmHy5/ksfLonsR2Xpn2cu86N1I7yVeHwkhGYP6wD7GVipBRW4b1tcRiwfF+rNw8/IfT7cYFEXPdTzZd+HYi37QystU6lmm9vjt3kdf5AfD7WH0/DhzsTqEzAiGVZod/P+ChvAEBXPxVc7KSoqNUJAVoA2BGbi8OJBZCJRVBKxUguqBJ6+JjiS74ivJ0wZ3AIAAhZQiMjPaFScmdzGYbBguhQnHh9JL6c3hNDwtzRO8gFfy8ejHFR3nh9QqRwX/4gsq2wLNvqB8h8ptM93XywdnYfRHo7okKtw4KfzmGDsRSupXR6A63DpEkxmaUAgOwyNZ7fdPGmZ0UUV2lQajyoDHYzBn+MmT98j5H7e/hiUKgbZGIR0ourkWLD9zw2s/Hgz3vbruLbI6l44sczNmfwJOdXolytQ1xOOZ2kawPvb4vD3rg8JOVXCj2f/jHu2/PNnl3tZRCJGLgIPX8afg585UKYlyPevKczAODDXQlIoSECNuN7KqnsZJBJuH1Cyvy5TZmWUdDEL8I7k1oi/Ptk6u0T/NHoDNhlcsb0zwtZVmW0/Xk+E0NW7MeF9LrXzWcc9ApyxvIpXRHqYY+88lo8vfFCi5oO6vQG/GwcWzxrUDDs5RKM6uQFANgSk4X0omos3XIFABDqYQ9XY0O1/IpaHE4swJrDKXh87enbrgzvRqUXczt693bjMgE+35/U5o374nPLUX4L7dDxwZ/eQS5wsZfh9YmdcOqN0fjvA10R5euEWp0Br/5+CWmtePC7J47r7zI4zN3s8vFduIP/w4kFQt19azqZzAUK7jFmflzMKG3Vnevmzhr/aDJaPCG3vNWe93YWn1uBtKJqyCQiDI/gyr3EIkZoAH4okQsEVtbq8H9brwIAFkR3wOSefgCAn081LP26ms29t519nPBQb384KSTCdZN7+ja4vVwixr3dfPHTE/3xx8JBCPPkSkQGdHDDiAgP6AwsPt6V0KLXpzewePz7U3jsu1NN9hZbsTMBvd7bIwRsbpTewGKL8YBmcg8/jIz0wr/PDMG0vgEwsMBb/1zBBzviW3xAPnPtafR9b2+bBWrJ7S8xry47+lBiAb46cHMzHlMLuYNwX5UCSuPIbr7pM+++7r6wl0vQz1j6ddCGKYOmmT8pBXW/jzq9AfHGcfJn0krwzC8XbOormGtSJteWJdB3o9TCKhy5VgiGAVY/1hubnhoAALiSVYYajd6k2bMcAOAsTPuykPljzCIOcFHi0X4B6B/iCo3OIPQVJNarK/uSQmKc9kU9f25T5pk/VPZFuC94gnGHQMRwG8+s0pZPdLmZjiYVoFytg4ejHN0DnKHRGbDB5GDOknK1Fsu2XkVmSQ0+21tXOnDsGhf8GRLmDjuZBF9O7wU7mRgnUoowa+1pmw9I91zNQ3aZGq72MiGQcV837iDn30s5eGHzRVRp9OgX4ordL0Tj/FtjcHnpWPy5aBD++0BXdPR0gN7AYtulHJue11oGA4tnf7mAMSsP4a2/Y7H7Sm67n9FiWRbpxgZ9L4+NwOIRoQCAj3cntFkD6H8uZmH8Z0fw4qaLbfL4LWEa/OE5yCWY3j8QW54egv4hrqjS6PH8postPhNj6npRFU6nFkPEcBkJpsI8HdHdXwWdgcU/bdCvis/8mdLLDyHu9tAb2FYrPT2cWIDwN3fg51OWtwmphVU4bNLDhg9Q3O34AEV0uAfs5XVBGr7vz+/nMvHKbzGYu/4McsvVCHS1w6IRYZjRPxAAsDM2p0EQ/moOH/xxhL1cgkf7cbd1UkiEkjJrvTohEgzD9QeLaUGPqP3x+ThyrRBHkwpxoJEDy5yyGqw9mgqWBZZuudIqGUAnU4qQX1ELlVIqvJcSsQjLp3TFi2PCAQCrDyVjzeEUmx87Lqccx5OLUKPV46kNZ9usjOy7IylYsOEcqjW3d4bRPxez8J+/LrdpXzm1Vt8mAfMbcS2PC76Mi+JORK3cm4ij125etiUfkAkxlnwBXPkX31Okb7AL/JyVACAEng9a2fdHrdXjmklwyzTzJ62oChqdATKJCDKJCHuu5uGtf2KtLikz7ZGUTk2EWxV/knR4uAfGd/FGvxBXeDnJoTOwiMksNRnzzgV9XOy5zJ9SC5k/fNlXgKsdGIZBn2BuH4oCdrarK/uSUdnX7U5DPX9IPWfTuAPNDh726OrvDAA4ZVL6pdbqhbG8psrVWmw4eb1F9dOt5d8YLjByT1cfPGWsEd9wIq3JTJ3vDqcI6cWHEguQWlgFvYEV+vvwmQ+dfJywYV4/OCokOJ1WjMe+O2V1Y868cjXeMWb1PNovQOiUPzzCA45yCXLK1Dh3vQSOcglWTu0OsTGq7qiQolegC6b3D8TswcHca7Qh+GNLyv/G0+nYEpONa/mV2HDyOp7acA4D/rsPm89mtFufnfyKWqi1BohFDPxclHh2VEe42EmRV16LI22wg1pYWStkXx2+VohaXfs3c6zW6IQDZdPgD08sYvDpIz3gpJDgYkZpq/Q++f1cJgBgSEcP+KiUDa5/sLc/AC5jrjXllqmRVlQNEQP0CXbF4DCu0XRr9f35+2IWtHouQ8TSwerP9QLFV3NoEp/BwOKfC3VNmE0NC/eAVMwgr7wWv53LFHrFLZsUBYVUjC5+KvQIcIZWz+K3c+bZenzZV2df7iz/k8M6YFi4B16b0EnYPlor0tsJk7pzgfRfz9ieFWh6guDXRhpUrz6YLJwsK6nW4oMd8TY/T31/G9/XiV19hDR6gCt1e3ZUR7x9L1em8M3hZJuzTfnvsFIqhlbPYtHP51u9NLSqVocPdyVg55VcbL98+/YoKq3W4LU/LmPjqXTsuZrXZs+z4KdzGLh8H64XWZ+hqdbqW+X3N7dMjff+vWq2n28wsEK/qSXjI/FInwCwLPDqH5caZJvFZpXhz/OZrX5CiC/h6mBs9gwAUrEI4V5c6df93euyAPmg8KmU4gbbb72BxepDyThr0rIgLqccOgMLmfFANa2wGnrj64ozbtujfJ3w+bQeEDHAL6czsLGR7399ppP5KJDQetRaPX4zbrseG8D1xWQYBn2CuKyvc9dLTCZ9WZP5Ywz+uNgBAIKMpYVpNnwHCYcPrjnbSans63anpWlfd4Wyai3WH0u1KljB9/vpF+yKAR24DS7f94dlWTy14RyGfnigwRSelbsT8dbfsXj6l/PtEixQa/XYbdxxu7ebD8ZFecHfRYmSai3+aOQgtbCyFt8Zm8h5OnI/JBtOXEdsVhnK1To4KiToamx2CwC9g1zxy5MD4GInRUxmGR7//nSzteJqrR5P/XgW+RW1CPdywILoUOE6hVSMccYyGgB4d3IX+Bt/pOqb0MUHYhGDy1llVpX3bDh5Hd2X7RaCGU3JLVMLBzOzBwVj5sAgBLraoUqjx5LfL+GZXy4IAbKbiT+j5uusgFQsglwiFkpJ6h9MtoZlW68KZzc0OgMumfQLaC8XM0qhN7DwVSng69wwEAMAvs5K/HdKVwDAVweSzBq220pvYPGHcefrYWOQp777uvlCKmZwJbsc8TdQGnU6tRhP/nhWODvLZ/109nWCSinFEGPg1Zrgzyu/xWDIiv0obiIj46KxN01JtRab6gUJajR6bDaWE07pxa1jfIDibnYytQjZZWo4KiQYGWmekePhKMcPc/rh1fGRWDI+AkvGR2D1Y70wwiRzh8/+2XgqXTigLKysRV55LRiG6/kDcDvyP87th+nG29uKD0jujM0x268prKzFkt9jsPlshsXyqTRjthfDxdtxICG/wcj1vHI1fjGuLy8ZM3I2nc0w641nK7VWL0yTnNyjYZkbAMwcGIQAV+43bLMNpa5avUEILP1vWg880NMPOgOLpzdewMT/HcHHuxJaZYre4cQC4QQi/1puR7+eyUCN8Xf8dCuUuW84kYZNZ8yDCAm5FTiYUACtnhUyOZuTXFCJ7st2CyeObsRXB5Lw3dFUfLG/7uRAVmkNarR6yMQiBLnaYen9UXBUSJBVWoOzJsuo1uox47tTeHFzDPr/dx/e+OuyWUbNjUjlM3/c7c0uf+e+KCweEYqpfQOEy0I97OHvooRGb8DxJPPPadvlHHywIx6LN54XAjyxxpKvAaFukEtE0OgNQtk8/7sV6e2E8V188Jqxd9jHuxKs2tcx3Ubwpenkxv17KQdlNVr4OSvNMkD5E19n04pRZPyNdzO2RnBWNpX5Yyz7cuX2nfi+UhSws53ptC8q+7rNUebP3eHj3QlYuvUqFv50vtn+AaeNO7R9g10xoAN35p1v2Hk0qRCHEwugN7BmB086vUE4q3jkWqFNO6qt5XBiASprdfBRKdArkGtSO28I10j0m8OWJyKtOpiMao0eXf1UWPFQNwBcUGH3VW5HdmAHN7NmtwDQxU+FTfMHwtVehstZZQ3OAG84kYbnf72AzWcykFeuxut/XkZMZhmc7aT4dmYfOCqkZrd/bEAQJCIG0/oGCIENS1ztZRhkHLnd3BncK9lleHfrVegNLNYfT8NPzZS+vf1PLCprdegR4Iy37u2M/5vUBQdeHo4l4yMgETH491IOJv7vCPIrbm5TeP4HOsi1bsfw4d7czuCeq3lNHujbas/VPGyNyYaIqWs4eSNBlNZy3rgT3stC1o+pe7v54sFe/jCwMNvBt9Xx5EJkl6nhpJBgTGcvi7dxsZdhVCR3HR8oaomVexKw52oenvjxLMpqtMJo+f4h3Ho+sIM7GAZIyq9scEBuqswY4M0sqWk0UFRSpTFrFPrt4RSzIMGWmCyUq3UIcFVioTFAm5BbIRxI3K34IMK93XwsZuQMCnPHwuGhWDQ8DIuGh2F8F/MywXu7+cJJIUFmSY0wtpkPqgW52sHBpIzsRgzs4AY3exlKqrVCs3IA+GR3IjafzcSS3y9h8tfHcD7d/MD7J5MSg37BrjCwaNBTbNXBZGh0BvQNdsHTI8MwzXhA+sZfl20685lZUo1z10tQrtZif3w+Kmp18FUp0DfY1eLtJWKRkMH67ZEUq0td98fno6hKAw9HOUZGeuLjh7tj5sAgMAxXbvflgSRM/uoYlm65ckMNfk376x25VoCqW7S59LW8ikZPfGn1BvxwPE3421Jzcl52aQ2e+eUCXtx8ER/tisdPJ683+E1MKajEW/9cwat/XBaCD0BdM3PA+gPPkylFqNUZsPvKjWcjXcnmloXP7Abq+v108LCHRCyCUibGOGND960mgzN2XckVAiLVGj1+PpWOez4/ikvGZtE3Qhjz7mEe/OkX4opXxkVCLqnb5jAMIwSWd9UrY9xuzIjOK68VTlbyJ2+6+6vQwYPLLOJLv+KNmT+dfLjf+rmDQ9DR0wEl1Vp8YUX2LPX8aRv89nh6/0AhAx6AUK517nqJcKzqbjxh69JI5g/LskKwz1/I/OH+n1lS3aYlnnci/v11Npn2RZk/tynq+XPn0xtY7IjlfhhPpBTh+6ONT8Cq0ehx2fiD2S/EFX2CXCBiuB+37NIafLK7btLS9ss5wsbzVGoxCivrNrzv/RvX5MFaW+DLoe7p6gOR8Udjap8AuNnLkF5cjUlfHTM7W5VTViOk+780NhzRHT0Q7GaHCrUO3x7m3qMhHd1hSbiXIz6Z2h0AsP54GvZczQPLslixMx5v/XMFf1/MxpI/LqH/f/fhrwtZEIsYfD29l5ByaqpHgDNil43DcmPmRlNMewQ1pkajx7O/XIBGbxBq5ZduuWJWumdqZ2wOdl/Ng0TE4IMHuwo/uGIRg0XDw/D7wkHwc1Yiq7QGf5xrnUan1ko3puYGutVlQ3X2dUIXPydo9WyrNV4tV2vx5t+XAXDlJ3z/kaYOBG4W/gxsn2aCPwCwcDgXtDiVUtziA7HfznLBnEk9/Josv+EzLf66kN2i/ktl1VqcMR6IXC+qxkubY4R1lA86q+ykwpj5pqZ+HUsuBH8Ma3rAZeqi8UAlwFUJdwc5ssvUQs8ilmXx4wluW/BY/yB08HCAQipCjVZ/x6eHZxRX4+dT1y1mMKq1euy4zGenNB6YbopSJsbUPlywZNWhZAANS75ag0QsEpqR88Hx4iqNUJqokIpwKbMMU74+jrf+jkWtTo8aTV2JweMDgzCtH7ecm0yyhPLL1cKB+3OjwsEwDF4dz42eT8yrNAscNCUmoxRjPz2MB1cdR7elu/G8safYfT18hd8rSx7qHQBXexkyS2qw3crsGr7ka0pPP0jEIohFDP5vUheceWM0Pnm4u9BMff3xNLz6x6UWBTi1egP2GZumKqQi1OoMQnDvVpJaWIXx/zuC0SsPCfs2pnZdyUVOmVqYLpfQRKDos72J2BqTjT/PZ+GrA8l48+9YvFCvN5zpe8CX4Kq1evx1oe63KsPKHjHpxqBCbrn6hnpMGQwsEozNjeNzy4XfhmvGkq+OxhIrgGuuDHD7d/x2nf9NeHZkGDY+2R+9Ap2h0RtuaJoqwJ18Ti3iy74a7htZwvdL3BGbK5RCVtXqcMCkDxAfsL5s/C3o6qdCqDG4lJzPPR/f7JlvLi0Ri/CWscxy/fG0ZqdBtWXwJ72o+pYNpLal2KwyXMwohVTMCL8ZvE4+TlBKxShX64T9BD7zhw/+VGv0ZqX6BZVc2wCG4bLHAS7DXyEVwcDitulleqsoNe35I6GeP7c1rUnks7hK0yrNQsmthauR1YDfv/xoV0Kj5QwXMkqgM7DwdlLA30UJR4VUKHtaviMeFzNKoZCK4O4gQ1mNVtjR4Xe2p/bxR/cAZ1TU6vDGX5dbvfyrXK3FumOpDX6YS6o02GucTmTaoNZeLsHGJwfA30WJ9OJqTPn6OL49nII3/76MR9achEZnQL9gV0SHe0AkYoQaYz4oWn/SkakREZ54wphZ9MrvMVjy+yWsOsgd3Ezp5Yfu/iqhlOCd+zpjUBOPpZCKwTCNHwDwxkV5QypmEJ9bgaR8y2nX7267iuSCKng6yrH1mSG4t5sPdAau58PxpEKkF1WjXM19dq/9cQkv/3YJALAgOrTBlA2AC07NMfYbOnsDZQ4tcd24kxzoal4Kx+8YbDrTOv2INp3OQF55LYLd7PDC6HBhqsi5tOI2ayxtDYOBFTJ/egdZzg4wZZoWf7KRYF9Tymq0whnVh/tYLvniRYd7wNVehsLKWhxpQU+eg4n50BtYeDrKIZOIsDcuDymFVWAYruSUx38Hmwr+mDZptnSABwAXjO9jv2A3ISNw9aFkXC+qwlMbzuFKdjlkEhEe7hMAsYgRypFaUvpVVqO96SOTW0Jv4Mp43/grFs/8cqFBEGBvXB4qanXwc1Y2mp1ijSeGdoBMLMLp1GKcSSs2m/TVmu41Bsd3xuZCo+OmK9bqDOjqp8LhJSPwkDFgueHkdUxdcxLfHuH6vfm7KBEd7okJXXzgaMxSOppUiLJqLd7+5wpqdQb0DnIRelC52MuwZFwEAGDN4ZRmS39TCioxZ/0ZVGv0sDdONNLouF5mD/Zq+numlIkxa2AwAK7vUHPbu8LKWmG6V/3vsLuDHA/29sdXM3ph5dTuEDHAb+cyuZMFNp4FP5lShAq1Du4OMjzWn/vdvBVLv06lFEFvYFFYqcG0b07gyDXzANW6Y2kAuHLnUA97sCyEoLSpsmqt0Ph83pAQoZzxRHIRikxOnJpui3ZfzcPV7HLsiM0xKyW6bmXwxzSoEHcD5bWZJTWoMgZKDGzdeHc+8yfcs67fzqBQN7jay1BUpcGJlCJkldbgmLH/4cN9AjAo1B3/N6kLAGBHbM4NZQPvvMJ9Tz0d5Y2Wu9fXN9gVAa5KVNbqhN+qgwkFqNUZoDSerNgRm4vSao0Q3Orm72yW+VNWoxUO/CO86wJfw8I9MDLSEzoDi/9uj2t0GViWNTu5mVNWY9X3R6MzICG3osnvcFJ+BUZ8chCjPjnUoub1tzN+EMP4Lj7wMGb18KRiEXoEOAMAso3vPd/zx1EhEY5vTEu/Mo0lX95OCiGDjGEYk9KvW/vEDstyQdvUVpzieiNKTMq+ZJT5c3urX6/XmmUUdwuNzoAlv8dgbRMZNe2Jz/qZ3NMPoyI9odEb8MKmixZ3WPmU4L4hrkIwgj8Lz6cBzxoUjEnGs8D/XMyCVm/ADuNO36QefvjooW6QiUXYF5/fYBpQrU6P1/+8hG8OJ7fotbzyWwyWbb2KsZ8extv/xCKvXI31x1Ix4pODqNboEehqJ/xA8CK8HfHP4sHoF+yKilod3t8eh59OpiO9uBoOcgn+c08n4bU+3DtA2IHwUSmaPRu1ZHwkuvqpUFqtxW/nMsEwwH8f6IqVU3vgn6eH4Mwbo7H7hWGYadx5v1EqO6kwXnlrTMPsnz1X87DxVDoYBvj0kR5wtZfho4e6I8rXCUVVGkz/7hSGfXQA3Zbuxqy1p/HrmQxU1urQ3V+Fp0eGNfq8/IHf2eslN/Wgtq7sy3zH8P7uvpBJRIjPrcCVVpjIxAcxZw0KhkIqRoSXI5wUElRp9EKz5faQVFCJcrUOSqlYSE9vCsMwJhNRbD8LvzUmG7U6AyK8HM16XVkik4iEZpz1y2SssS+OO0Cd0ssf/3d/lHB5pLcTVHZ1pZF89t0hY7lpfSzLmjX/js0us7hzfcG4I90z0BmPDQiEo0KCpPxKjPzkEPYYM99eM2Z0ANwUKsD24M/uK7novmw37vniKPbH57Vbs3Rr/HUhS3h9e67m4Z0t5tNu/jpf1+i5qeyU5nirFEKm2Jf7k4TvVKdWDv70C3GFh6Mc5Wod9sfn40djZucTQ0Pg6ajAxw93x/o5faFSShGTUYqVe7hM1scGBEEsYqCUiYWm1v/dHofhHx/ATuMB5otjws0C9FN6+cNHpUBBRW2jPeUArl/Q49+fRnGVBl39VDj1xmjEvDMWvy8YiH+fGSI0tm3KzIFBUErFuJpT3mz/q78vZEFnYNEjwBlhno0/9pRe/vh6Ri9IxQy2Xc7B0q229ZXhS5HGdPbCROMJlwPx+bdEk3xTMcZgsFIqRpVGjznrzuC7IynIr1DjYkYpzl0vgUwswowBgehnLDe1lCX7+/lMqLUGRHo74s17OuH9B7qii58TDCyEE09qrR4njPft4set21/sv4ZfTnPbx7HGMlprp0OZBonibqD5fP3A0QVj7zN+0pdp5o9ULMIEPoMuJgd/nMsEywIDOrgiwPg73MVPhV6BXCP3X09b3vbzk0ktDQfhbTiRBgB4tJ95iU9TRCIGU3py2xL+e7fduI/7+MAg+DlzgaEv9idBb2Dh7iCHl5NcyPxJKagSsqD8nJVCxhfvPxM7QSJisDcuv9GpZxW1OlQbg2kyCZdFwpcXNeXL/dcw7rPD+ON84xnLu67kQW9gkVuuxsNrTghZfPWVVWvNppdZwrIsYrPK2vUElrXK1Vr8fYE7Xniskb5vfOkXz8047UskYiw2fa7f7JnHl37dquV6p1KK8OrvlzBw+X6M++wwxn92+KZXUtTHsqzQD9OZRr3f/mrrRaup74/tdl/NxeazmXh329VWaaLYmliWxS5jYGZCFx+seKgb3B1kiM+twP/9e7XBgUlds+e6jWz/DnVnfB3kEiwYFopJxgaVe+PysPtKHkqrtXB3kKF/iCvCvRzx7CgukLB06xWzM0Nf7k/CL6czsHxHvFVNi00dSizALuMOp87AlWn0/+8+LN16FaXVWoR7OeDL6T0tZtC4Ocjx0xP9sSA6FP1CXPHEkBCsfqw3Di8ZYRYsUtlJhb47wzp6NJuNI5OI8MWjPeEgl0AsYvDZIz3MGpa6O8it2rm3BZ/2/O+lbLPPz2Bg8dEurv/QE0NChIwJpUyMb2f2wehOnghwVUJuTNd0d5DjsQGB2PhEf/yxcFCTJT5Rvk6wk4lRVqNFYiMZR22B30k2LfsCuOkO/I50SwIPpmo0eqHPFR9YE4kYIfunPfv+8I1BewQ4N+g91Zjh4VxPhIOJ+RYDD2U1Wsxcexqz1p42O2vDsqxQ3vJwH3+rMtEeMfY+2X4516azlDq9QRjXO7qTJx7pG4CpxiyFkZEeZrftG+wKlVKKoiqNxcyz5IIqZJXWCCN7K9S6Bjt1BgMrNHvuGegMR4UUMwdy2Qp6A4tBoW7Y8dxQzDVmBAF1WSm2HnTxZURxOeWYu/4sHlp9otksLIOBvelBIrVWj092JwAARkV6gmGAn06m4+uDySisrMW568VCULSpXmTWWhDdASKG247zZ+Rbs+wL4EpVJxoPXN/6JxYFFbXwdlJgYte6bNDhEZ7495khiDI+t0wsMisxmNaX237H51agpFqLjp4O+Gle/wZZoDKJCE8a+/GsOWS5H0+tTo/Z684gq7QGwW52WDenLxzkEqiUUvQJdrU6+OViLxO+a+9vi2t08pdaqxe+ww810qzd1PguPljzeG8AXENua8u2DAZW6Is3trM3evg7w9NRjopaHY4n255x2Jb4vjQfPNgV93f3hc7A4r1tcej3/j7M/P4UAK7UydNRIQy4OF1vO8OyrDAJ8LEBQcK2cVxnbl3j90vOppVAreUyWT55uAcYhstCOZ1aDBEDvGzMFiuoqLU4bbD+c6abZCbcSPN5vr8NPz79nPEkTpJQ9uVgdnu+9GtHbI4wWIHvtcfjT2htPJVucd1fczgZizeex/jPDmN/fMOeRXE55TiTVgKJiLG5yTufLXc0qRApBZVCpts9XX2EfdMfjYGlbv4qMAyDUJPMnwRjMMw064cX5ukgZIDzj1FfnvFA3EkhEU4QWpPNddD4/WpqSiafmearUkCjM+Dl32Lw5t+XUW4yZe3otUKM+OQgxqw81GR52s+n0nHvF0fxsUm7hlvVn+cyUaPVI9zLQdj3qq/+tFM+8wfgAhIAUFLVMPPH39V8UEbwLTzxq0KtxeNrT2PT2QyhtLBWZ8DFDOuaxLeVGq1eyG5zsb/Ny76WL1+Ovn37wtHREZ6enpg8eTISEhLMbqNWq7F48WK4ubnBwcEBDz74IPLy2m4U5M3G7/zz0W8K/tiOHy/Oslzj3NbKjEjKr8TQD/dj5W7zdZJlWSzfHocVO+ObPWCIySxDdpkadjIxhnZ0h7uDHB8+1A0Mw/1o8z0YAG7HkS8x6WOS4t8n2FVIqZw7OBgu9jJ09VMhxN0eaq1BmEQxoYuPcIA6PzoUnX2cUFqtxTv/cNdfzS4XyqJYlqurtpZGZ8Ay4/PMHRyCjU/2F86suTvI8N8HumL7s0PRzTia3hKZRITXJkRi8/yBePPezhjfxVs4y2/qPxMj8Z+JkXhlfIRVyxbsbo/dLwzD/peihYyotjSmsxdkEhGSC6rMJnLsj89HYl4lHOQSPD2yo9l9fJ2V+G5WXxxZMhLx745H7LJxOP2fUXhvclcMCnNvNrAgEYvQM9AZgOWU+LZQodYKmYiWeiXxB2x/nM+6oX4Ip1KLoNEZ4KtSCGcHAQg7IHwT4vbAZ+LVP+PVlEFhbpCJRcgorjFrcAxw7+mstadxOLEAhxILhN4IAHA8uQhXssuhlIqbLUXhdfJxwhRjYGDZ1iuNbo8Sciuw+0qucP3Z6yUoV+vgYidFz0AXMAyD5VO6YfP8gXim3rorFYswuhMX6Nt5pWFZCV9m0S/YFZ2MO/OX6/X9SS6oREUtl0EVYQzGLh4RhsUjQrFqRi/8/ER/s7Pf/GsDIJQoWUOjM+C4MTPjgZ5+UEhFOHe9BNO+OYnHvz+Fy5llyCiuxoYTaXjih7MYs/IQer+7B2FvbMfolYeEMoyb4fujqcgpU8PPWYmvZvTCO8Z+Fx/tSkCf9/biwVUnoDOw6OavQpinQzOP1rwgN3shU4xlARc7KbydFDf8uPXda3wOfl9m1qBgoTklL8DVDn8sHIT/TIzEqsd6mf0OdPZ1wj1dfeDlJMf/TYrCjueGNtr7bVq/ALjYSZFeXI1tlxtmYm4+k4G4nHK42cvw49z+Zgcrtlo0PFQ4cfMfCyXVGp0BC386h+SCKjjKJcIBfHNGRnph9qBgAMCrv1+yatLRpawy5JXXwl4mxsBQN4hEjNAoeNctVPql1uqFLI8+wa747JEeePOeTujmz2U1lqu5AAxf1sxv82OzysxGmh9PLkJKYRUc5BKzQOhY42s+mlSIylodDiVyQYhh4R6I8HbERJPm5yMjvRDu5SjsazeX/VNUpRFKtYAbC/7w9+U/owvpJcgsMZ/0ZapvsCu8nLgMuoziGtjLxJjQ1dvsNhO6esPdQYbccjX2XDU/JqrV6YVyunK1DnPXn8XK3QlmmZt8j7VxUd7wsnE7EOhmh37BrmBZ4KXfYlCt0cPPWYlu/irh89EaD0q7GDNYOxh/24uqNMJveqSF4A9QVy55+FqBxUArf1DurVIIJenpzWSR1Or0wudwKrUYZRYmU1VrdMIJn5+e6I9nR3G/hT+dTMfIjw/hz/OZ+OpAEmauPYXiKg0MLIRMM0s2nuICwZvPZtzSLT1YlsVPxmWd0T+o0RNPvYJcYHqV6Xab7/tTakXmT+AtnPlzObMMGp0BHo7c9Es+mHkjmX+tgc/6kYoZ2MvEt3fZ16FDh7B48WKcPHkSe/bsgVarxdixY1FVVbfD/MILL2Dr1q347bffcOjQIWRnZ2PKlCntuNSti4/k8Y1hKfhjm0qTRnMyCddQclMrTLpiWRbvbIlFRnENVh9KMaspP5FShDWHU7DqYHKzB6Z8Df6ISE8hs2NkpBfeuofb0f9wZwL+PJ+Js2nFmPi/I6jS6OFmLxMOkADASSHF7EEh6BfiinnGM50Mwwg78oXGZTPd2ZSKRfjo4W6QiBjsiM3FlphsLPkjBjoDKxxg/34u02wHqynfH01FSmEV3B3keH5MRwwKdceWxUPwx8JBOPjKCEzvH2h1ZkRzHBVSPDUs1KYddV9npcUARVtwVEjxgDHI9M4/V4SzbquNgbwZAwIbpDKbYhgGDnKJzWUcfOnXmZuUCcPvHLvZyyxOBBoS5o5OPk6orNVhdQvLCAEIJUND62V68SUAZ9KK26V/C8uyOHude6+bm/Rlyk4mEQ5iTEu/Kmt1mL3uDC5mlAo7UF8dSGqw/jzSNwAuFoKijXl1QiTsZGKcTy9tUOYJcJlVM747iac2nMPPxh28fcYyiRERnmYNxvuFuFrMQOMb+e6KzW1w0HvYeKZ0aEd3YUe/ftNnvsyhm79K2E7YySR4ZVwkJnT1sbizGWkM/tjSbPXc9RJUafRwd5Dhk4e749ArI/C4cZrfkWuFuO/Loxj64QG89c8V7I3Lw7X8ShQZd+KTC6rwyJoTrTJBpzlFlbVCIP7lceFQSMWYPTgEi4wNwxmGG+Pe3V+FV8dHttrzLhpRV1raycfJquwyW/UOdBGCSkqpGNP7Wc4qUEjFeGpYKEZ1ajjR7qsZvXDqP6Mxc2Bwk78rdjIJ5gzmssVW1evHU6vT42vje/zc6I4Nshdt5emkwBeP9oJYxOCvC1lmExy1egOe+eU8DiQUQCEV4dtZfZr8DahvyfgIBLvZIbdcjf/betXscS0FdPleK8NN9iv47+ieq3m3zIS8uJxy6Aws3B1k8FUpIBIxeGJoB2x5egjOvjka/5vWA+vn9BW2Gz4qJQJd7WBgYTaOfYMxUPFATz+z36JwLwcEu9lBozPgUEIBDidyvyXR4Vz24jOj6tb3R43NxK0NFvAHpjLjGfZreZUtLt/hx5o/2NsfMokIJdVaIXOLn/RlSixicE/Xuv25e7r5wE5m/hssl4iFLDk+kMPbcjEbBRW18HKS43FjFs3n+5Mwex1X/lhWoxVOPDxuzMC01YO9uX0gfts+oYs3GIZBuJejWUYdPzDATiaBr4rbLuw3ZgpFNpJ519nHCX7OSqi1hgY9ooC6Me9eTgqhhKi5LJKE3AohIKU3sDiYmN/gNqdSiqHVs/B3USLE3R4vjgnHhnn90MHdHoWVtXhxcww+2pUAA8u1JQDQaMbttbwKoby2uErTaAnbreBUajGS8ithJxPjgV6Nn0B1UkiFYxNHucRsX8GFz/wxCaplGEvxAuoFN2/lnj98eXq/EFcMC/cQTmjzQez2wu8DOdvJwDDM7V32tXPnTsyePRtRUVHo3r071q9fj/T0dJw7dw4AUFZWhu+//x4rV67EyJEj0bt3b6xbtw7Hjx/HyZMn23PRW4XewAoTUvxcjMEfmvhlk31xeajVGRDibi80gPxwZ3yj0yLq7xRp9QYk5VfifHqJ2Q/7rit5OJbERfQ1eoNZQGm98YwKgCZ757Asi53GWmi+hps3d0gInhzKNyu+hIfXnEBKIdck+ItHezYIDLx9X2dsnj/QbIfy/h51OwfeTooG04iifFXCwcQLmy4iNqscKqUUvzw5AGGeDqis1QlTJJqSXFApjK5+fUIknIyj0kUiBr2DXFptVPDtZMn4CKiUUlzNKceGk9dxNq0YZ429C+YNDmn+AVqAb8J7Jq34ppSo8DvH9X+4eSIRg1fGhQMAfjiehrzyltVE8zt3Q8PNz+5bKnU7kVxkdlDQlpILKnG9qBoysciqSV+m6vr+cDuYVbU6zFl3Gueul8BJIcGmpwbCxU6KtKJqbL2UjavZ5ThyrRAiBkIzZGt5OSmw2HhQv3xHXIMpJb+dyxAmAf7f1qu4nFkmTAmydOBtydCO7rCTiZFdpjbL6qnV6YWSqmHhHkKfovqZP/x4756B1r+PDnKJcKBm7Vl3fod+WEeugbyXkwLvTu6C/S8Nx5SefmCYuiDXkvER+PmJ/tjx3FDsfyka3QOcUVKtxfRvT7WoWbcle6/mWdzp/2J/Eiprdeji54RJ3et2tpeMj8SFt8Yg8b0JOPPGaPzz9JAmm97bKtzLUSjXbCpL80aIRIxwADGtX4BZ/6i2MGtgMOxlYsTnVggHlQB3ciOnTA0vJ3mDyTUtNTDUDa8Zg3H/9+9VfL7vGj7cGY9Za09j15U8yMQifPN4H6FPn7XsZBJ8/HB3MAzXR+WhVccxcPk+dHxjB2auPW2W/VCt0WG7McuJ/ywB7mDF2a7x8sz2wI/67uqnahBodHeQY1IPPww3jg7n9TcGzvlJj7llauwxBqv5ciAew9RlPP1wIg0JeRUQMdyJCYDrX/bOfZ3x1LAOwvPwQcDmMn/Si7kD054BzrCXiaHRGxpkclqjqlYnlCR181MJwZBNZ7h9yvoZj7x7u9dlLT3cyPo7vX8gRAx3QpIfJc+yrDBRdvagELw7uQs+e6QHFFIRFwD/4ig+2BEvlPj0b6TEpzkTu/pAIa07hJxgUtr5QM+6fdOu/nW960KNGYw1xn6XnRrJ/DH9XPmSPlN88MdHpUCgMZDQXDAvpt4ggt1XGz5u3YmMuhNRQzt6YMfzQ/HKuAgopWLIJCKseLCr0HQ7JsPygIP6J2L+bqXJqG2BD2RP6uEn7N83hi/94vv98Cz3/OHKvgJczMu++IBdRnFNmwSq1Vo9zl0vsZjd1Rw+mNfD+PvIr6PxN9DwvTXUTfriPh++7Eura9n7d0sdtZWVcV8iV1fjpJdz56DVajF69GjhNpGRkQgMDMSJEycwYMCABo9RW1uL2tq6AEp5ufED01YB2sZ7arQHjUYPJbiNWLATCyXUKC0r4Zb1DpFRVI03/r6MviGuDcoJWsPumBQoocbkKF/M6uuBLWcScS2/Av/beQHv3NfF7LbLtsbi19OZcJRL4GwvhVTMIKO4RjgbMKCDKz6b1hNyiQifbD8HJdSI8HZEQm4F/jiZgPkDvZFTWoNjcdehBHd29mRCBhIzAy32lUnILUdeUTGcJSKM6GDf4HN9fXQgikpKhGbNU3r5Ycn4SC7AY8U6EOrMoI+vDFeyyzEpygsifTVQL0N28VAfHIxNxTXjeM2lEzrCU6nHvP6e+L+thdh0Ig6z+nqYNftTa/XYczUPB+LzEZNZiuxSbh0dHOiMKV2d76j1s6Xc5MDrYwKwbMtVfL37EiK9HaGEGg/39IOnUt8m71EPHykcRbUoLVMju6AQfk1M52BZFnG55fB2UlosrbNGVkEhlFCjowvT6OsZ0cEeAwPkuJhRhjV7L+Ht+6Is3q4xeWVqZOQVwo4BhgQpzZ5HCmBQoALHkoqw+2IyVhdUYdeVPIgY4OOp3THBJKW/Ley7lAol1Bga6g5HsQbQWl/aNqKDHT6BGpdSs1FcWoIXNl1AbFoJPBUSfD+7B7r6ybFgkDc+23sN3+6/jM7eKiihxsQuPghwZG1ef+b198TfZxKQWVyG7/ZfxnOjuaCcTm/Aj4evQAk1PBxlKKhQY9EPh1FYoYGTmMGwEKVVz6UAMK6jI3ZeycW+Syno5sUF2s8nF4LRViPAUYZINxEMGgmUUCM5Kw+splLYgb56PQdKqNHHV2bTa+vhLUVBsRqJmbkYFKSERmdAZa2u0XX6ZHw6lFBjZJj59jbQCVg5pSPeHBcEiZixuIP786wuePrn8ziVWoz5aw/jvcldrC7dsSQhrxzP/HgcMrEIh5YMF3aOtXoDdpxPghI6vDoqqsF220UGwKAF2qhK4IP7Q9HXT46Henu12bb8+Whf9PaRYVi4R5v/XqikwOx+Hlh7NA3v/HEKfrP6ooO7A9YeiIUSajw9JBgKqAHbjwMsemKAJ65ez8HOK7lYteeScLmjiMHn03pgWIhdi15zHz85Fg32xtqjabhynQvuKAGcvZaJxT9U4+sZvaHVG7BgwznkF5XCSyHBiNC69VwKYGQHe+yIzUVMahb6B7R+SZ+t4tK5730vG773AwMV+PecGjEpWSgs8cSbf16GzFCDPkEuiHATNXic8RGO+PGwGpdTs6EEF2BxkWkBLfeBz+lnDC4Zv2dhzoASauQUFgLaxoPfWfl1v39ivRQX0quQmJmLcFfbzpknZZVAwarh7iiDm1yH/gFyXLmuRma+GkoAnS28JgDo6S3FzD5cEKux7aavPXB/ZxV2XcnDsz8exY9z+yGtqArXcwvgJhNjek83QFuFyV2c0cmjB5775QKuF5Xg79MlUAKY068DGF3LSm8cxcD9nVTYeikH3io5enrX7bdOinLG2gN6+Lko4GWyPxThyuCs8ZhHJhYhRIVG14vxEQ745Zgax+KvQ6fuYJYdVVRaDCXU8HcwIMR47JRXVNTkOhZvXBcHhbrheHIRTidkQFPTUcjsAoCziRlQQo3h9b7DcgCLh/hgek83aPUGeDopkF+uhhJqZOSrUVlZZnYSlGVZ7IpJghJqzB4UhPXHr+PIleuoquwA+1vsZGlBRS0OX0mDEiwe7+PW7Pd0YKACf55SI9jJ/D3yVGihhBrVVWWAtgp6A4uS0hIowTbYp/GxY6ESa6DRG5Bb2PS+rK1YlsWLG8/jQDwXyOvgYYeeAS5YEB0K/0ZOZJreNz4jB0po0MuXW58j3ERQQo38YjWqKsva7fMrryiFEmp4Kbh9NiXDrX+MrtL8M7NyO8uwVpxCfvHFF21e0DfffFMI4ljDYDDg/vvvR2lpKY4ePQoA2LhxI+bMmWMWzAGAfv36YcSIEVixYkWDx1m6dCmWLVvW4PKy94A2KG8nhBBCCCGEEEIIaRflakD1JpdM4+TU+EADq0JYn332GQYOHAiZzLozyEePHsXTTz9tU/Bn8eLFiI2NFQI/LfX666+bBavKy8sRENA6Kb+EEEIIIYQQQgghtxur85f++usveHp6Nn9DAI6Oto1Wfvrpp/Hvv//i8OHD8Pevm3Li7e0NjUaD0tJSODs7C5fn5eXB29vbwiMBcrkccrmFRrELsoEmomDtIbu0BqM+OQSZRIRvZ/bGrLVnEOJuh+3PDWvvRbshmcXVmPTVMVRr9OgeoIK/sx22Xc7B9P4BeOveurKQqzllqKjRob+N9fG8hT+dw8GEAiwcEYpnTUrK/jiXiTf/jkWIux22PTsUDMNg2jcnEJNRhlfHR2B2Iz1Z8srUWPjzOcTlVKBXoDN+eqI/GIZBZnE1xn52GHyOXICrEjueGwaxiMHj35/C2bQSjI3yQpVGj2Mm/R36hbjgiaEdhPHVt5rs0hqM+/QwdMaaW1d7KQZ2cMcDvXwxsIO7zQ2J71ZfH0jC7+cysGZmH3T0bN2x8vXtvZqHZ365gDAPe2x9dqhwOcuymL3uNE6nlsDXWYHH+gfBU6XAy5tjIGKAX+cPQGcfFZ7fdAF7r9b1xZg1KAjPjw632OR3zKcHkVmsxk9P9EPvoKYD+ZczSzF1zUmIGOCfpwcjzIr3wWBgMeTD/Sip0mLDvH5mE+5Mb/PZvkS42csxvX8gpGIRtHoDXth0Efvi8mEnE+PfZ4bAx1lp4Rla7qcTaXh/ezx6Bznjpycalhdb42BCHhb+dAF2MjG+mdnb4ntYVqPFmJWHUKHW4ZuZvW94W6HW6jFr7WlcyiyDi70UJVVa2MnE2PdStFB2tO9qHvbG5eONezvZ3K9rwU/ncCihAINC3XAlpxxl1dzj735hGNyMDdrf2RKLzWcyMW9IMF4eF4nP9yVi1cEUTOzqg0+mdrfp+TKLqzHm08PC32OjvDA2yhsf7YxHXjmXEfyfiZF4fGAwXth0ETtjc7FweAc8OyrcpuexZPvlHLzx12Woteb1V69NiMQs43Smxqw6kITP9yfB1V6K4iotHBUSHHttJKRiEe77/AiSCqrw8dTuuKdr25Yu3m2qanV4euN5nEwphpuDFHteGA6l7NYq92+J06lFWPTTedjJxfh+dt9Gf2dKqzUYuHw/AODk66PavN9SU06nFmHW2jPwUcmx/+URNt33w51xWH/8OmYNDMJzjfw+mTp6rQBP/ngOKjspji4Z0WST8KySaoxeeRhSMYMLb481K3k3NWTFfhRVavDbgoHQ6g2Y/u0peDjKcHjJSJtey+PfncLZ6yVY8VBX3G/s7zV65UFklaghE4tw7q3RrTIsI6ukGo9/fwo5ZbUY09kLnz/a84Yfs7XlltVgxMeHAABLxkVgTjP97fbF5eHpjRfg66zA3hejhTLiISv2oahSiz8WDURnHxXGfnoIGcU1+GFuX2FQhKkL6cWY/u1puDvKcPiVETiYkI9FP1+Aj0qOfS8NB8MweHlzDLZdzsGC4R3wnJW/H98dScEnuxMxLsoLn03j3m+t3oDhHx1AcZUWax7vhWHhnkgrrMKE/x2BWMTgwMvD4eHY8qmDlqQXVcFLpYBcYtu27q8LWfjPn5fh4SjDnhejbb6/qd2xuXhu00X0CnTGz08OwJ/nM/HGX7EYFOqG72f3bXD7/26/ig0n0oX9hNaw5LcYbL2Ug46eDvh94SChpC+/Qo05604jpaAaPio5fpzb32IJ2Me74vH90TRM7euPZffXtQ35cGcc1h27jscGBOIN47Cem2359jj8eOI65g0NxstjI/HD8TR8sCMe93bzwUcPm+xXlZcDbzZfrm7Vnt+6deugUqmav6HRmjVr4OXVfCNJlmXxzDPP4K+//sLBgwcREmK+IejduzekUin27duHBx98EACQkJCA9PR0DBw40OrlAQBI7bn/biEaBqiBAhKxBG4urqiBAhkVoltuOX86eR0nkovwydTuzf4IA8C+5AIUaaSI8nXD908MxPHkIvx+uQR7k6rwlvG1Vai1eOT7y6jR6nHk1RHwUdl28FZWo8XepEpoocD4HqFm79m4nqF4c1sqrhbqcT5HA3cHOU5m1ELEKDCxVxggtVz/5+Vujw0LRmLP1TwMj/AAY8x08/eyx4CIQKGZ5NSBkRDLucZ1c4ZH4cj6s/jnCtevSsQoMKGrD+YP69BmDTVbi6+HPdbMHYa0omr0C3FBqIdDm0x/udMtGtsdi8badlDbUj1D/VCDOFwu0KNEIxWmQu24nINDqTWQS+yw/qlooUnzroQKbInJxqv/pKB/B1dsvVoOmdgOYzp7YdvlHKw+nof9yVVYP6cffE0CKFq9ASmlDPRQwN/To9HvDK9riD2Gdg7C7qt5+ORAFlY91lu4rrCyFpczy+DlpICfixIqpRQGA4sL2aXIrhLDQS5H9w5+gIUdYBGAFyf2MrtMKgU+mTEIj35zEufTS/H5kRwsn9K1pW+pRdsTKlADBYZ3CWnx9jg6KgTLHpKim78Kkd6WTzyopMC386KRUVKNoZ1b3l+Gp5ACX8wagklfHkN2mRqAGNP7hcBZVddoeVT3DhjVvUOLHn9klxDsTKjAvuQqAGJEejvjvcld4OZSF9iKDPBBzZlCnM/R4mhaDb44kgMtFOgX7m/ze+nnaQcvN1dkl6nx1r2d8Vj/QDAMg2Gdg/D+tjj8eiYDb21PQy2jxP7kKtRAgcGdglrlN3RirzCE+XnhD2Pj4LSiKlzKLMNXR3MwbXBkozvKtTo91p4pQA0UeOme7nh/WxzyqzQ4k1mLAFc7XC7QQyxSYkhkELcyk1ZjLwVWzRmGdcfS0C/EBUr7W+uEX0v1C7fHvtd8IJOImgxmOavs4e7qgoziGlwp1GFQqPPNW8h6YnLzUAMFOvp72fx9fOXe3lgwpnuzzWd5gzvZ4dX7WHT0coRE0fSJB293O+jEStToWeRUi+Bvod9IVa0OmZUiAAoEeHlAImJQAwXSK4CiWokQ6G4Oy7K4mKfl3gc/b+F96Bzoi6SSbAR5NL+81vLztMcP80fi1zPpeHxAcLO/2e3By80OIpk9qjR6hFmxXnDb8kQkl+pxpUCPLn4qaHQGZFaKAYjh5eoGSOXwcnNDYnEh0soY9LPwmBdy8lEDBSL8PcHIHDAwUglIE5FSxj1uZx8n7E/hfj8GRlj/+xEV5IsapONMlka4z9GUfGRVieFmr8SgyCBALEKwjz3CA7wRk1GKDWcL8Nzo8EaDjrbaEpONZ3+5AB+VAotGhGFqH3+rgjgGA4uvj+agBgpMHxIJufLGtpWOTs6ogQK5NWJAao+0cu474+nmavH99PPwQA3yca2EbZXf6wMJ+dh8qQQiRoH/e7g/ZMq675Wnqz3WPTkC0749iZSCKiz+PRF/LRzU4AT3mSwNaqBAVKCv2TKF+nqjBnm4lKdrt+PzArUENVDAwcEZkNpDJLNHDRSoYuXmyyTVN/oYpqwK/syaNcumhZw+fbpVt1u8eDE2btyIf/75B46OjsjN5RrfqlQqKJVKqFQqzJs3Dy+++CJcXV3h5OSEZ555BgMHDrTY7Pl2w49ok0pEQiS4olaHGo3+ljlbpdUb8MGOeFTW6jCxqw/u6db8mcrEPG4yT3S4B+zlEgwMdYNUzOB6UTVSC6sQ4m6Pvy5kocI4lSYup9zm4M/BhHxo9Sw6ejo0aLbsIJdgYlcf/HE+E5vPZAoHtYPD3OHZTOMne7kEk3s2HHX4+MAg7I/nMg1MJy8MD/dEvxBXXMosxSN9AjBvSIcbHil7Mw0Kc8egsOZvR24Nbg5yhHrYI7mgCkeTCnFfd1+otXq8vy0OADB/WAez6Vxv39cZh68V4GpOuTB29KOHu2FSDz88GJ+HJb9fRmJeJR777hR+nT8Ano7c9yOrhJvCoJCK4GnlWaqXxkZgT1wedsTm4nJmGbr6q1BcpcEDXx8Tpj4A3Ohcja4um4LbPth25lMuEeP1iZ3w8OoT+O1sBhZGh7ba966kSiNMmhnb2XKGqTUYhrFqylD3AGd0D3Bu8fPU5+mowLcz++Dh1SfAgrV5elhTxnT2gsduOdRaPV4aE47HBgQ1OGvNT/y6mFGK+RvOQqtncU83HzzU2/bya4ZhsPWZIdDoDGYHXI4KKZZP6QpXexm+PpiM94zrv5NCgu6tGHQP93LE6xM7AQA0OgOGfrgfeeW1+OdidqOf7bZLOcKY5Xu7+XInP85lYm9cPoLduXW0d6BLu2Zl3MkUUjEWGqdc3kmsXV+6+Kq44E9WOQaFtt6kOFvFZJYCaNlUOYax3JS9qds3ltFdn1jEwN/FDqmFVUgvrrYY/OEngTnbSYUJq0FudrheVI343AoMDrPuNzG7TI0KtQ4SEYNQDwfh8qEd3bElJhu9bJwi2ZwAVzu80kpZFG2BYRgsHB6Ks9dL0NdCpm99CqkY0eEe2HklF7uv5KKLnwr5FXUNo/nG/0FudjhyDbhebLnZLT99squfs/C4Qzu6Y/fVPEz66hhc7WUortLAXiZGz0Bnq19PV38VGAbIKq1BfoUano4K/GycnHVvNx+z38YHevgiJqMUn+9PwqazGZjSyx9dfFUQi7j3pWeAc7PHJ5ZsMU4RyylT462/Y/H1gSQM7OAGlZ0UzkoZhnR0FyZ0mTqQkI9r+ZVwlEswvX+gzc9bn4s99z3hp1JlGr9Dlr5fQN3Er+vNTGmz1ood8QCAOYND0MPC/pSnkwI/P9EfY1YeRkxGKX45k44Z/eumB+oNrLCe9Ki3DkT68BO/KsCybLucJOenqPHTvvh1S9Ne075SUlJQU1ODTp06QSSybQd+1apVAIDhw4ebXb5u3TrMnj0bAPDpp59CJBLhwQcfRG1tLcaNG4evv/76Rhf7lsAfAMnEIjjKJZBLRKjVGVBYWdvoeOWb7WJGKSqNQZrz6SVWBX+u5VcCADp6cT92DnIJ+ga74nhyEQ4l5CPYLVgYLQgASfmVGBlp3chhHn9wNizccpnE1D7++ON8Jv69lC0cNDxgIahjreHhHvjvA10R5GZnNm5dJGLwy5MDwLJsq6TuEtKcwWHuSC6owkubY5BaWAWd3oCs0hr4qBRYUO+gx91BjjcmdsIrv3NTaV4ZF4FJPbjvwchIL/zztBOmrj6BlMIqPP7dafz61AC42MuE0bSBrnZW/9BFeDticg8//HUhCx/vTsC3M/tgwU/nkFFcAyeFBBKxCMVVGrPAj0IqwrS+LevJ1jfYFUM7uuPItUJ8vv8aPn64dbKv9sXnQ29gEenteFsFck118VNh5/NDoTOwZhldN8rFXob9L0VDKhY1mgUa7uUImViEauN46sFhblg5tXuLz3Q6NnIQyDAMXhkXAYYBvjqQDIAby9tW22GZRIQ5g0PwwY54fHs4BQ/18odIxKCoshZfHkhCeY0O7o4y7DGOEJ45MBgyiQijO3ni93OZ2BefhxB37gzdiEjrSugJsVUXPxUXgM+yPIL6ZuHHvLdmMLa1BLoagz9F1RhkIU54vYgLIgSZ7Id38nbC9aJqxOWUY3CYdUG1eOMJlzBPB7OpUg/19oe7oxy9Als3+HM7eNrGqb9jo7y44M/VPLw4NgJ55Vzwx9NJLuybBLly29XGAglCIDKgroLl8YFBOHKtEDVaPQoquBLiEZGeNp2IcpBL0NHTAYl5lbiUUQZXhxrsjcuHiAFm1isNfmxAEHLK1dh8JgN55bVYdTDZ7Hp7mRg/zutvMVDTGI3OgBPJRQCAp4Z1wD8Xs5BTpsafF+rGyn+6NxGTe/jiPxM7mQWXVh/inn/6gECbAq2NcTGWlRdVaTB/w1kk5nHHgY0dywa51X1mNxpQScqvRHxuBSQiBs+MbPxsto9KiZfGhmPZ1qtYsSMe46K84W48PkzMq0C1Rg97mdgsUAtw31+xiEFZjRZ55bXwVt38rLoSY1CNL9/n11OtvmVjQa0O/mi1Wrz33ns4f/48BgwYgNdeew2PPfYYNm/eDACIiIjA9u3bERwcbPWTWzFoDAqFAl999RW++uorqx/3dlGX+cOAYRi4O8iNEeRbJ/hzJLFA+Pf59BKr7pPEB39M6tKjwz244E9iATr7qoQNg+ntbXEqhdvg9Q+xfPagX4ircKamqrgaSqkY46Ju7Cx+Y9Fx7qCGyqXIzfHC6HBkltRgf3w+Vu5JFC5/fWIn2MkabtIf6u2P4ioNFFIxZg4MMrvOz1mJjU/2x9Q1J5CQV4FHvz2J+7r7Ct/JQFfbUlyfH90RW2OycSixADPXnsLp1GI4yCX4Y+EgdPRyRLVGJyyLnUwMhUR8Q72lXhobgSPXCvHn+UwsGh4KH5US3x5JwenUYiybFNXgR9wau65wGag3sr24FfA7V62tsWAMTyYRoZOPI2Iyy9DFzwlrHu9zQ70EmsIwDF4eGwGpWITvj6ZiWr+2He4wvX8gvtyfhGv5lTiUWIBeQS54/PvTQlYdTy4RYXo/7vdiaEcPyMQiXC+qRmYJlwE3koI/pI1E+XLlG7HZ7Rf8Ka3WCNkzfCbgrSTQuH/Nn+Sojw8iBJpsQzv5OGHnlVzE5VRY/TzxudxtI73Ns9MZhsGICNoGWGNkpCfEIgbxuRW4XlSFnDIu+ONtEsjgT9KkW/g8K9RapBRwwTzTdXFoRw/ELhuHwspa5JSpUVKtsSnwwuvu74zEvErEZJbi3HXuGOmh3v4N9j0kYhFen9AJL44Jx/64fGyJyUZRlQYsyyKvvBbpxdWYvfY0fn6yv9XZchfSS1Cl0cPdQYbXxkfixTHh2HM1DzllNSit1uJ6cTW2X87B3xezsTcuH3MHB2NkJy9o9QacSSuBTCzCPCsz5prj6ShHdLgHDiUWYNeVPOFyfxfLJ5/8nJUQixgh+NaSrCfeztgcANyJUT440pjHBwTh93OZuJJdjv9uj8PKqT0AADEZpQC4TMX6J6rkEjE6uNvjWn4l4nLL2yX4Uypk/vDBH24ZdYY2Dv689tpr2LBhAyZNmoS1a9fi9OnTSEhIwMaNGyESifDuu+/ijTfewM8//9yiBbkbmWb+AICHIxf84aPQt4LDJg2Mr2SVo1anb3JHvrCyFsVVGjAMzDZ+0REeWL4jHidSioQzIB6OchRU1Noc/CmoqEWycWPer5HgD8MweLi3Pz7ezR0cj43ygr2NzU0JuRW52Mvw/aw+2BKTjWVbr6K4SoO+wS64r5GsPIZhMD+68TKIIDd7/PxEfzyy5iTicysQn5tgcp1tQeggN3tM7RuAjafScTKlGAwDfPFoT3Q0lmbaySQWA1Qt1SPAGaM7eWJvXD5e/i0GuWVqY68b4MVNF/HnosE2ZZyU1Whx5BoX8L7dgz/t6bUJnbD9cg6eG93R5qbStmIYBs+PDsdzozq2eTq2k0KKR/sF4NsjqfjqQBIMLIurOeVwd5Bh9qBgFFdpUVxVi1GdvIR+XPZyCQaEuuFwYgH0BhZ+zkqEe9kelCTEGl2MB7iphVWorNW1+ffPEj7rJ9jN7pYsbwxqIlgA1AWFzDJ/jKUf9QO9jTEYWOyL4w6CI33ujN5T7cHZToYBHVxxLKkIu6/kgd/Emx6AN1VCxGfA+TkrhSwPnljEwMtJAa8bCDx0D3DGb+cy8euZDBRU1EImFuHZUY1nN8klYkzo6oMJJs3+qzU6zF57BqfTivH496fx7cw+cLWXokKtg7OdTMgYre+I8fhsSBg3oEUhEuO+7ua9Ay9lluKtf64IJWef708S3sMpvfxuKOhiimEY/DC3H+Jzy/Hn+Sz8fSELKqUUnRrpdyiTiODnrER6cTXSiqpvaDm2XeZO2FkzQEEiFuG9yV0wZdVx/Hk+Cw/19segUHdcNAZ/GivBj/RxwrX8SsTnVLRL4JbP/OHLvvi4gbaty75+//13rF+/HhMnTkRiYiIiIyOxbds2TJgwAQDg6emJGTNmtGgh7lZC8McYTOH7/hRW3hrBn9JqDS4Z0yXtZWJUafSIzSpvMjp+jU/1c7Ez61sU4eUIbycFcsvVQlT4lXERWPL7JSTlV9qU9nfaWPIV6e3YZJR3Si9/fLInESwLTO7R8pIvQm41DMNgUg8/DAlzx84ruZjQxeeGDnzDPB3x56JB+O1sJvLK1SisrIXOwOLRFmRSPDuyI/44l4lanQH/mdCpzUtcXhgTjr1x+TifXgqA28krr9EiJrMM646l4omh1jc33nQmHWqtARFejsLOPrHdwFA3DAxt2RTHlrpZdfhzBodg3bE0nDWe5VUppdgwrz86NXGAN7qTJw4bs2hHRHpQY33SZtwd5PBRKZBTpkZcTrlVvVVaG58B0Zq9zFoTn/mT3kiZULqQ+VMX/OGzMeJzy1FSpRGCu4358UQazqeXwk4mxr1WtEsgjRvb2RvHkoqw60qu0JPHLPPH1Q4Mw5282Rmbg/Fd6t7vy8ZAZDf/tslA4/vL8Cftp/cPbLTPTWPsZBKsndMXj39/ChfSSzF1zQnhOrGIweb5AyxOC+VPVDU1JbSbvzP+WjgIW2KysSM2B8eTilBRq4NUzODJYS0b/NCUSG8n/GeiE/5j7JXXlCA3O2Pwp6rRE/nNSS2sQlxOOcQiBmM6W9c+pGegCx7tF4iNp9Ix8/vTeLhPgNBKxFK/IIA73twaAyTkWhf8bU16A4tytXnZl9Dzp4VlX1YXN2ZnZ6N7d66nQnh4OORyOcLC6mrrwsPDhYbNxDp8rZ7MmL7FB39ulcyf48lFMLBAR08HDDQ2DrzQTOnXtXwuzbX+mU2GYRBt0p+nu78K93f3BcMA5WodCmwIeJ1Obbrki+frrMRb93TG3MEhjfYGIuR25uYgx4z+QULjwxsR5GaPl8dF4KOHu2PdnH7YMK+/VSPb6/NWKbB+Tj+snNodTwxtvWbDjYnyVWHmwCA420nx0phw7HspGv+5h9vx+Hh3grAjfyK5CC9uuoizacUWH0enN+CH41wvsnlDQugAnVjk66zE/cazq/YyMX6Y26/JwA9gXuZFJV+krUX5cge6/IHvzXbGuI1tj8CTNZoqEwLqGgebZv54qxSI9HYEywKHrxVYvB8vrbAKK3ZyGbSvT4i0ORhAzPEH9efSSxCbxR18m2b+2MkkeMzYvPeZXy7gkDHQnllSjS0x2QC45sxtIcLbUahmUErFWDyiZRNUHOQSrJ/TD0M7ukMqZuBsJ4WznRR6A4v/23oVBoN5hkdxlQaXjFlNQzs23YNKJGIwuacf1jzeBxfeHoM/Fg7ElqeHtKgsvjXxz//5vmu40sIy1e2XuZKvQaFuzQZkTb02IRLR4R7QGVj8cjodqYXcd76xht986SZfynkzldVowXfJcTZm/vBlX23e80ev10NqMpZUIpFALK7L7BCJRFb18CF16jJ/jGVQxpREWwIhbYlPKRza0QPujjLsjctrtu8Pn/lj6aAxOsIDm85mAABmDAiCQipGgAsX+U3KrxQmDTWHj9D279D8meW5rTjphhBinZud9bHs/ij836Quwt/T+gbgn4tZOJlSjJd/i4GTUoq9xhT8w9cKse+laLPG7QCw60oeskpr4GYvw/09bnzsOrlzvTohElKxCI/0C2j0TKEpfxc7zBwYhMySGqubxRLSUl38nLA3Lq9d+v5o9QZcMGZhtvRsflvjM3/KarQoq9aalaZp9QZkl3Klw/X7pg2P8ER8bgUOJhQIgxPqMxhYvPJ7DGq0egwKdTObKERaxtdZiW7+KlzKLMMJY7/P+qVaS++PQnGVBtsu52D+hrOYOTAYG05cR41WD5lEdEOTO5siFYvQ3V+FM2klmDskWDiJ3xJ8FimvoKIWIz4+iJjMMvx9MQtTevkL1x1LKgTLckEJW0qmJGKRxSyi9jBvSAj2x+cjvbgaD646jg+mdLM4bbkpO4z9fiZaUfJlykkhxQ9z++FMWjE+25uIY0lF6OTj1GgJIF+6mZRfCY3OYNbAva3xk74c5RKh0TNf9qXT34RpX7t27YJKxUVPDQYD9u3bh9jYWABAaWlpixbgbsana5n2wAGA/PL2D/6wLCukqQ8Nd4edcbLLueslTZZo8WPeLfU0GBzmDld7GeQSEe7rxh1chXk6IL24Gsn5lVaNJS2p0giR11t1x4IQcnPV3x4xDIMPpnTDuM8O47TxLLRYxECllKKwshYf7YrHe5O7mt3n+6MpAOoC04Q0xstJgRUPdbPpPqbBSULaEt/Y9krWzS9RuJpdjhqtHiqlFGHtnFnQGDuZBO4OchRW1uKXM+koq9Eiu7QGYzp7oZOPE/QGFnKJCJ71DuRHRHhg9aFkHDL276rfT45lWXx9MAln0kpgLxNjxYPdbmigAakztrOX0EsKAHzqNd0Vixh8+kgPVGl0OJhQgG8Oc7/n/YJd8d8pXRDm2Xbr4tL7o7A/Lr/Vy6g8HOVYPCIMK3bGY8XOeIzv4i30TKwr+bp9TyYEuNph69ND8NymCziYUIDnN11EtUZv9ej59KJqxGZxJV8t7dHYN9gVPz8xAMkFlXBWNt6fzFelgKNCggq1DskFlc1m+7amUn7Sl71JAs7NmvYFALNmzTL7e/78+WZ/U5q8bfjMHz6Sx9ew5pbX3PRl0ekNmL3uDCRiBh891B2VtTpkldZAJhahf4grGDCQiBjkldciu0wNP2clUgoqseHkdSwaHiYErixN+uKplFLsen4YxCJG6AcU5umA/fH5Vjd95g/kQj3sGzRvI4QQXrC7Pd68pxPe3nIFIyM88frETiioqMWj357Ez6fSMaWXvzBq90J6Cc6nl0ImFuGxAdbteBBCyK2Ib/p8Lb8CNRq9Wf/FtsaXfPUJcrmlAx9BbnYorKzFBzvihcv+uZgNJwV3WBToatdg+XsFucBRIeFKbjJL0dNkVHuFWos3/ooVyoz+c0+nW2Zq751gbJS3MMAFaJj5A3An0lc/1hsLfzqHy1nlWDIuAg/19m/z9TDKVyWUWra2OYODsfH0dWQU12D1oRS8OCYcLMuaVWbczlR2Unw/qy/e/fcq1h9Pw6Yz6VYHf7Ybs34GdHC94dYHzZXAMQyDKF8nnEwpxunU4psc/OEyf5yVda+RL/tq854/BoOh2f/0en2LFuJuJWT+GIM/vs7cSLyskpsf/EnMq8TRpEIcTCjA/V8exbdHuKh57yAX2MkkUMrEwsp+/noJanV6PLXhHNYdS8PKPVxtc1FlLYqquJU01NNyd3oPR7nZl5Q/M5RUYDn4sz8+D2M/PYSdsVw/qdM2lHwRQu5ujw8MRsK7E/D97L4I83TAwFA3PNjLHywL/OfPy9DqDTAYWHx3NBUAcF93X6vLTwkh5Fbk6SiHu4McBhZYsTMe89afweAP9mPDyett/txC8OcW7ffDu7+7L+xlYkT5OuHRfgGYP6wDXOykKFfrAFiedCkVizDMeLB9IKGu78/lzDLc8/lRbInJhljEYMn4CEzvRycRWlNHTwezqVeNlecopGKsnd0XZ94Yhal9A27pAKQ1FFIxXp/A9TBccygZ3x5Owf74fOSUqSGTiO6ICgixiMH8aC5r6nJWmdDcuCksy2KrMdBqa8lXS/FTvvgWAjcLP+nL2aQ8VXozy75I69LymT/Gsi8/Fy74U1KtRbVG16ojkZsTb9LBPKdMjY2n0gFwJV+8XoHOuJxVhvPpJbiWVyFk6/x7KQfv3BeFa8a/A1yVVi97qDEV01Lmj05vwNItV5FeXI2nN57Hl9N74pSVzZ4JIQRAg9rsN+7phH3xeYjPrcCIjw+ioKIWtcZt8TzqEUYIuc0xDIOufk44kFCA9cfThMvf+jsWNRodnhoW2ibPy7IszqZxfSH7hTQ+FfZWMGtQMGYNCja77PnR4fjtXAYOxOfjyUamRA6P8MC2yzk4mJCPF8eEI6O4GtO/O4kKtQ5+zkp8/mjPJifikpZhGAZjO3thzeEUuNnLmuy5cqdVoUzo4o2BHdxwIqUI72+PEy7vH+J6x5So+6iUCHazQ1pRNc6mFWNkZN3krhPJRfBykqODSXbOv5dycCW7HHKJqMUlX7Ya1ckLy3fE41RKMSprdXCQ35xjdD7zx8VkujW//rdp2deWLVusfsD777+/RQtyN+Izf+TGCJ5KKYWjXIKKWh2ySmrQ0av1Rg2XVmugUkob3SgmGPvoTOnph+JqDQ4az2oMDatLKewV5IIfTlzHrthc5BsnkimkIlSoddgXl49i4wpqqeSrMXwdbl55LcrVWjgp6iKbW2KyhWkMOgOLpzdegN7YVLx/CGX+EEJs52ovw38mdsKS3y8h05hlKRExeLiPPzr73rxUXkIIaSsz+gchragaoR72GNDBDfkVtfjmcAr+uz0eGp0BT4/s2OrPmVpYhaIqDWQSkVB6djtRysSYOTAYMwcGN3qb6Ahun/hSZhlyymrwzC8XUKHWoUeAM36Y08+seTRpXZN6+GHtsdRGJzLdqRiGwXez+mDz2Qzsj8/HyZQiaPUs7rlJGS83y4AObkgrqsbJlLrgz5XsMjz67Uk4yiX4a/EghHk6olqjw3+NQbBFw8NuWguQUA97IUB1JLEAE2x4/+NyyvHr6XT0CHTGAz39m7+DiRIh+GPS80d0Y2VfVgV/Jk+ebPY3wzBmk71MAwpU+mU9rTFdyzSC7eeiRHxuBbJKWy/4s+ZQMpbviMcbEzs12pAszhj86RPsikf6BuCbwymo0ejQxa/uYIjvj5Fdxk1CGBflhQ4eDlh1MBl/ns8UMpc6Wmj23BiVUgoPRzkKKmqRnF8p1FDrDSy+PJAEAHhpTDiSCirxz0UuxS/Izc5szCMhhNji4d7+8HSUgwXQwd0efs5KoYEeIYTc7kZ39sLozl5mlznKJfhkTyI+3p2IPy9kwc1eBpVShnAvBwwOc0fvIJcbyiTgS756+DtDLrkzMhLq83RUoKufCpezyjBn3RnE51bASSHBF4/2pMBPG+vs64T9Lw23aaT3ncJeLsGcwSGYMzgEFWotMktqhPHjd4qBoW749UwGThonugHAX+ezAAAVtTrM++Es/l40GN8dTUFOmRr+LkqhXOxmYBgGozt54bujqdgTl2dV8OdCegm+3J+EffH5AIBfz2Tg3m6+QtmWNerKvkx7/txY5o9Vz27a12f37t3o0aMHduzYgdLSUpSWlmL79u3o1asXdu7c2aKFuFvV1mv4DAB+fN+f0tbp+7MzNgfLjQ3t9jRRpxifw5V9Rfo4QixisHB4KF4cG2EW2PN3UQoRVkeFBO9O6oIHe3Fj+Q4lFuBUCvfDb0vmD2DS98ek9Gvb5RykFFRBpZRi9uBgfPJwd9zfnZsQxtddEkJISzAMg+ERnhgR4YkgN3sK/BBC7njPjOqI1ydEAgBSCqpwJq0Ee+Py8PXBZMz47hS6LduN9/692uLHP2Ms+eoTfGeXPY0wZv/wk2c/fKgbNXe+SQJc7W5auc2tylEhRScfpzuuvI2v6Ig19v3RG1ihgbqdTIzrRdWYvf4Mvj3M9Wh8697ON73sbVQnLqB+ID4fekPT/Xbicsoxdc0J7IvPB8NwvY1qdQZcy7NuwBGv1ELmD5800tKePzbv8T7//PP43//+h3HjxsHJyQlOTk4YN24cVq5ciWeffbZFC3G34qd9mWb+tGbT59isMrywKUb4+0pWmcWVtbhKI5RxhTeRbcQwDEZFcoGXd+6LgqeTAmGejujur4LOwCLBOOa9o40jFfnSL77ps8HA4sv91wAAcweHwFEhhUQswmeP9MAfCwfi1fGRNj0+IYQQQsjdbn50KI4sGYGNT/TH1zN64d1JUZjS0w9eTnJodAZ8dzRVyOCx1Vnj/fre4T0Zo01OQD4+IAjju9xZ5TeEtAdvlQIh7vYwsMCZ1GKcSilCfkUtVEopNs8fCHuZGDEZpdDoDRja0R1j62U23gx9gl2gUkpRUq3F+fSSJm+760outHoWPQKcse/FaPQzNsGPzSqz6TlLqrjMH9OMN6Hhs4GFoZkglCU2B3+Sk5Ph7Ozc4HKVSoW0tDSbF+Buxqdr1S/7Am488ye/XI0nfzyLGq0eQzu6QykVo0qjR4qFqVp8s+dAKyLqyyZF4cDLw/FQ77qaxQd6+pndJqyFwZ9kY+bPriu5SMyrhKNcgtmDg4XbiUQMege53tSxpYQQQgghd4oAVzsMCnPHxK4+eHxgMFY+0gMnXx+FR/oEAABWHUy2+THzK9RIK6oGw9S1CLhT9QhwxshITwyP8MAb93Rq78Uh5I4xoAMXIDmZUoS/L3IlXxO7+qCLnwqfP9oTDMONOX/nvqh2yXySikUYbsz8a27q1/Ekrnxtap8AdPBwQFd/rg/aZVuDP/yod5OyL4m47rVrDbaXftkc/Onbty9efPFF5OXVvei8vDy88sor6Nevn80LcDfTNFH2lX2DwZ9Vh5KRU6ZGBw97fDm9l9C751Jmw5UuPofL2LGmflQhFZuNWwS48ch88yk/ZyXsbUzJDDOZ+LUzNgcv/cZlK80aFAyVkmqoCSGEEELaCsMwWDA8FAwD7I/PR1xOucXbpRZWYfPZDFRrdGaX74rNBQBEeDne8fttYhGDtbP7Yv2cfnfMtCVCbgUDOnClX4cTC7HDuE2Z1INr+TGqkxc2zx+IzfMH2pxk0Jr40q99cfmN3qZao8OFDC4zaHAY95qijANFbA3+lBp7/piVfZnEDVpS+mVz8Gft2rXIyclBYGAgwsLCEBYWhsDAQGRlZeH777+3eQHuZnzwR24p8+cGy76uZnM/3E+PCINKKUVXP2cAwKXM0ga35Sd9tbR5mJuDHMONabDhNjR75vFf4rSiaiz46TyqNXoMCXPHwuFtM46UEEIIIYTUCXG3x0RjCdOaQ+bZPyzLYuOpdEz432Es+f0Sxqw8jN1XclGr0+P/tl7FW/9cAQCMjKSejISQluGDPwl5FahQ6+CjUgjlUgDQN9hVGAzUXqLDPSARMUjKr0RaYZXF25xJK4FWz8LPWYlAYz+wrsYJiHE55dDZ0Ki5tKbhqHfTpJGWNH22uWtWWFgYLl26hD179iA+nmsk3KlTJ4wePfqOaz7V1vgPTGqSvuVvzPzJLVdDqzfY1BHcVKpxhexgbKbcPYBb6S5ZiDjyZV+RPi0fc7x4RCgS8yrwsDFt2BaejnJhxD0AzBkcjDcmdqImrIQQQgghN8nC4aHYdjkHWy/l4KWxEQhwtUNRZS3e+CsWO69wZ+IVUhGySmvw1IZzcHeQobCSOziZPSgYz45q/RHyhJC7g5eTAh3c7ZFiPIa9v7svRKJbK7agUkrRv4MrjiUV4dO9ifjskR4N4h/HkwsBcBPM+OuC3ezhIJegslaHa/mV6GTFMbdaq4day8UKnE0yf8QiBgwDsGzLxr23qGU6wzAYO3Ysxo4d25K7E6NavuePSZDD3UEOmVgEjd6A3DJ1iyYIVKi1QgNnvkSLjzhezS43CyrpDSwSjZ3HI25gbGDPQBccXjKiRfdlGAbDIz2x+0ou3p3UBVP72h5AIoQQQgghLdfFT4WhHd1x5Foh/vPXZYhFDI4lFUKrZyEVM3hlXARm9A/ClweS8O3hFBRWauBiJ8VHD3VvMFqeEEJsNSDUrS74Yyz5utU8PzocJ1NO4p+L2egR4Iw5g0PMruf7/fAlXwDXtzbK1wmnUosRm1VmVfCH7/cjETENevJKxSJodAZo26rs6/PPP4darbb6QVevXo2KigqbF+ZuoxWmfdXVDItEDHycFQBa3vQ5rbAaAODuIBNqr4Pd7OGokKBWZ0BiXt1nk15cjRqtHnKJCMFu9hYf72b4fFoPXHh7DAV+CCGEEELaCV9yf+RaIQ4mFECrZ9HFzwl/LhyMp4aFwl4uwavjI7HjuaF4dXwkdj4/jAI/hJBWMSTMHQA3ObrzDVSktKW+wa54YyLX7P39bXE4lVIkXFdWrUVsNldlM7CDu9n9+EQMayd+8ZO+nO1kDbKL+MQRW0rIeFYFf1544QWbgjlLlixBQUGBzQtzt9FYKPsCbrzpc0ohl8nTwb2u/45IxAgr3WWTps/xxqZ+Ed6OELdjah3DMLCTtSgRjRBCCCGEtIKBHdwwrW8Auvur8NKYcOx9MRr/PjNUmFbD6+jliIXDQ+HlpGinJSWE3GnGR3nj/Qe6YNVjvW7pdjJzBgdjUg9f6AwsFm+8gNwyLknmREoRWBbo4GEPb5X5ttHWiV+lwqSvhk30+dhBm/X8YVkWo0aNgkRi3cF5Tc2NNSu+W2h0DUe9A3XBn5Y2fU4p4NLl6k/l6uqvwvHkIlzKKsM042XxxmbPEV4tL/kihBBCCCG3P4Zh8MGD3dp7MQghdyGRiMGM/kHtvRjNYhgGy6d0RUJuBeJzKzBr7Wn8+tQAnDD2+xkc6t7gPl34FizGps/N9bYtsTDpi8ffV6OzvezLqmjOO++8Y9ODTpo0Ca6urs3f8C6ntdDzBzCZ+NXizB++2bN58Ke7vzMA84lfrdHsmRBCCCGEEEIIuRvYyST4dmYfPLT6OBLyKjB73WmU1XABm0Ghbg1uH2LS9Dm5oKrZXrslQuaPrMF1QtmXoY0yf2wN/hDrNJv508LgTypf9uVhPnadL/tKyK2AWquHQiq+4THvhBBCCCGEEELI3STA1Q4/zeuPqWtOIMbYVoVh6sbWmxKJGHT2dcLp1GJcziprNvjDl31Zyvy5kbIvmqXdjmrboOyLZVmkNlL25e+ihKu9DFo9i/jcClTV6nC9mGsOTcEfQgghhBBCCCHEOh29HPHj3P5wNE7k6uzjBBf7htk6ANDF1/qmz3VlXw0f60bKvij40460QsPnxsu+WNa2DzWvvBZVGj3EIgaB9cbEM0xd0+fVB5Nx7xdHwbKAh6Mcbg7ylr4MQgghhBBCCCHkrtPVX4V1c/qiq58KTw3r0MTtuDYr1jR9bqrsi48dtFnDZ9I2+Glf9TN/fFRKMAyXGVRUpYG7DYEZftJXgIuyweMCQDd/FQ4lFmDnlVwAXAfxV8dHtvQlEEIIIYQQQgghd60+wa7Y+syQJm/DJ2FczW6+6XNpEw2fZcayr5b0/KHMn3akNaZq1W/4LJOI4OnIBXxsLf3iJ33V7/fDG9XJCyKGy/b5z8RIHHt1JB7q7W/rohNCCCGEEEIIIcQKIe4OcFRIUKPV42pOeZO3LbUi86fNpn2RttFY5g/A9f3JK69FVmkNugc4W/2YqYWW+/3wegQ448wbo+GgkEAuEdu+0IQQQgghhBBCCLGaWMSgf4gr9sbl40RyEboZJ3Fb0lTmj+RmNHzu3LkziouLhb8XLVqEwsJC4e/8/HzY2dlZuitphDDty0LKl28Lmz6nFPCTviwHfwDAzUFOgR9CCCGEEEIIIeQm4SeBnUgpavJ2fM8fS82jb6Tnj9XBn/j4eOh0OuHvn376CeXldelKLMtCrVbbvAB3Mz7zR2op88el8XHvGp0BBoPlNK/mMn8IIYQQQgghhBBycw0M5YI/Z1KLzYI32aU12HM1DyzLwmBgUVbDZf44W+z5w8UOdPqbWPZlaQoVwzAtfbi7DsuyTWb++Bszfy5mlGLdsVSkFlYhtbAKaUVVyCqpQaCrHXY8NwxKWV0Gj0ZnQIYxUyi0kZ4/hBBCCCGEEEIIubk6eTtBpZSirEaL2Kwy9Ax0AcuyeGrDWcRmlePL6T0xJMwdfJ6Hs9LSqHcu5qKhaV+3D51J5o7Fnj8udcGfixmlDa5PK6rGiZRCjIz0Ei5LL66C3sDCXiYWGkYTQgghhBBCCCGkfYmMfX92X83DiZQi9Ax0wcWMUsRmcRVVG05cR5QvNxXMQS6xGCe4KWVfDMM0yOyhTJ+W47N+AMuZP/1D3NAnyAURXo4YH+WNBdGhWPFgV2x6agAm9/AFABxOLDS7Dz/pK8TDnj4bQgghhBBCCCHkFsKXfp1I5vr+bDyVLlx3KrUY566XAABUyoYlX8BNKvtiWRajRo2CRMLdpaamBvfddx9kMi4VybQfEGmeWfDHQkTPXi7B7wsHWbxvSbUWf1/MxqHEArPLU4R+P1TyRQghhBBCCCGE3Er44M/ZtBIUVdZi66VsAECwmx3Siqqx6mASAMDF3nLwRxj13pZlX++8847Z35MmTWpwmwcffNDmBbhb8WlaYhEDsci2LJ1BYW6QiBikFlbhelEVgty45s6pxsyfDtTsmRBCCCGEEEIIuaWEezrC1V6G4ioNlm29CrXWgHAvB7w+oRPmrD+DZOMxvYtdw34/wI2Nem9x8IfcmFpj5o9UbHt5lpNCil5BLjidWozDiQV4fKA9tHoDjiZxZWDhXo6tuqyEEEIIIYQQQgi5MSIRgwEdXLH9ci62xHBZP9P7BWJYuAf8nJXCtG/nRoI/N6XnD2ldfJqWpX4/1ogO9wAAofRr26UcZJXWwN1BhlGdPFtnIQkhhBBCCCGEENJqBnZwE/6tkIrwQC9/iEUMpvUNEC53sTDmHahrGdOmPX9GjBjRbBNhhmGwb98+mxfibsRH6iz1+7FGdLgHPtqVgOPJRajV6bH6UDIAYPagYCik4mbuTQghhBBCCCGEkJttgEnw595uvkJz56l9A/DZvmvQG9gmMn9uwqj3Hj16NHpdRUUFNm7ciNraWpsX4G7FN3xuaeZPZx8nuDvIUVhZi5W7ExGfWwF7mRiPDwhuxaUkhBBCCCGEEEJIawnzdBBKvGb0DxQu93JSYHwXb2y7lNNoH1+JqOVlX1YHfz799NMGl+l0Onz11Vd4//334efnh3fffdfmBbhb3Wjmj0jEYFi4O/48n4U1h1MAANP7B0LVSHoYIYQQQgghhBBC2hfDMPhxXj/kl9eiZ6CL2XUfPtgNk3v4YUSEh8X73kjZV4t7/vz888+IiIjAihUrsHTpUsTFxWHatGk2Pcbhw4dx3333wdfXFwzD4O+//za7nmVZvP322/Dx8YFSqcTo0aNx7dq1li7yLaWu4XPL2y7xfX+4x2Ewb0iHG14uQgghhBBCCCGEtJ1QDwdh7Lspe7kEYzp7QdJInOBGyr5sjjzs3LkTPXr0wKJFizB79mxcu3YNixYtgkRidRKRoKqqCt27d8dXX31l8foPP/wQn3/+OVavXo1Tp07B3t4e48aNg1qttvm5bjVC2VcLM38AYGhHD/BtmCb38IO3StEai0YIIYQQQgghhJBbTF3ZVxs2fD59+jReffVVnDx5EgsWLMDevXvh7u5u8xOamjBhAiZMmGDxOpZl8dlnn+HNN9/EpEmTAAA//vgjvLy88Pfff9ucZXSr4T+sG8n8cbWXYXQnL5xMKcLC4aGttWiEEEIIIYQQQgi5xUiNySNaXRv2/BkwYACUSiUWLFiAkJAQbNy40eLtnn32WZsXwpLU1FTk5uZi9OjRwmUqlQr9+/fHiRMnGg3+1NbWmjWeLi8vb5XlaW2tkfkDAKtm9IJGb4CdzPbMK0IIIYQQQgghhNweZMayL52hDYM/gYGBFvvymGIYptWCP7m5uQAALy8vs8u9vLyE6yxZvnw5li1b1irL0Jb4hs/yGwz+SMSiRusBCSGEEEIIIYQQcmfgK4c0bVn2lZaWZvODt4fXX38dL774ovB3eXk5AgIC2nGJLNO0QsNnQgghhBBCCCGE3B34xI+WlH3dspEHb29vAEBeXp7Z5Xl5ecJ1lsjlcjg5OZn9dyuq5Ue9U/CHEEIIIYQQQgghzbgpZV81NTXYt28f7r33XgBcho1pbx2xWIx3330XCkXrTJwKCQmBt7c39u3bhx49egDgsnhOnTqFhQsXtspztCdtK/X8IYQQQgghhBBCyJ3vppR9/fDDD9i2bZsQ/Pnyyy8RFRUFpVIJAIiPj4evry9eeOEFq5+8srIS/9/evcc2dZ9/HP8c23Hc4VyaZjhhCQEWSBmjgUIDYVO5NIBahhpV67r+wVJ6magCnZU/KKm2RM2mwYS2wUYou4hlXUsLK6WVYA2tCGFaS8VClrVUarYgRNf9SoC1IcGFXOzz+wNsSEmInYud47xfkqX4+Hvix86Xo9Onz/P9trS0hJ6fOnVKTU1NSktL08SJE+X1evWTn/xEU6dO1eTJk/WjH/1IEyZMUHFxcdjvMVp1+Wn7AgAAAAAA4RlK21fYyZ8XX3xR69ev73Vs165dmjJliiTphRdeUHV1dUTJn4aGBi1evDj0PLhWT0lJiWpqarR+/Xr5fD59//vfV1tbm775zW+qtrZ22KqLYmm4dvsCAAAAAADxL+Fq21dwA6lIhJ38aWlp0cyZM0PPXS6XbLZriYuCggKVlpZG9OaLFi2SafZfrmQYhqqqqlRVVRXR77WC7tCaP0aMIwEAAAAAAKNdcM3gnsAItn21tbX1WuPn3LlzvV4PBAK9XsfNUfkDAAAAAADCFVrzZyR3+8rKytKJEyf6ff29995TVlZWxAGMVcE1f0j+AAAAAACAgTiG0PYVdubhvvvuU0VFhS5fvnzDa5cuXdKzzz6rFStWRBzAWBXM1LHgMwAAAAAAGEiw7WtE1/x55plntGfPHuXl5Wnt2rWaNm2aJKm5uVnbtm1TT0+PnnnmmYgDGKto+wIAAAAAAOEKFo/0jORW7x6PR++8846efPJJbdiwIbRQs2EYWrp0qbZv3y6PxxNxAGPVtQWfSf4AAAAAAICbC7Z9dY1k5Y8kTZ48WbW1tfr000/V0tIiScrNzVVaWlrEbzzWseYPAAAAAAAIV1Tavq6XlpamgoKCwZyKq7p6rlROUfkDAAAAAAAGEmz7CpiSP2DKbjPCPpfMQ4wEK39Y8BkAAAAAAAwk4brOoUirf8g8xEhXj18SbV8AAAAAAGBgjusqfUj+WET31dW5Sf4AAAAAAICBXN851B3hjl9kHmIktNU7bV8AAAAAAGAAdpsRWuenh8ofawglf6j8AQAAAAAAYUgY5HbvZB5ipJsFnwEAAAAAQAQSbMHt3mn7soROKn8AAAAAAEAEgjt+0fZlEcHKH9b8AQAAAAAA4aDty2KCfyinwxhgJAAAAAAAgOSg7ctaru32ZY9xJAAAAAAAwAqCS8d0U/ljDaG2L9b8AQAAAAAAYQi2fZH8sYBAwAyVaAX/cAAAAAAAADcT3DGcti8L6A5cy9BR+QMAAAAAAMLhCCZ/eqj8GfW6rvsjJbDbFwAAAAAACIPzavdQT4Dkz6h3ffKHrd4BAAAAAEA4ggUkXbR9jX7Xr/djs7HmDwAAAAAAGBhtXxYSrPyh5QsAAAAAAITLyW5f1tHl90tisWcAAAAAABC+0G5fAdq+Rr2unmDbF18/AAAAAAAITwJtX9bRdbU8i8WeAQAAAABAuBy0fVlH8I+USNsXAAAAAAAIU7CIhOSPBbDgMwAAAAAAiFSo7Yut3ke/YPKHBZ8BAAAAAEC4aPuykNCaPyR/AAAAAABAmGj7spBrbV9GjCMBAAAAAABWQduXhXSHKn/sMY4EAAAAAABYRQKVP9YRWvOHyh8AAAAAABAm1vyxENb8AQAAAAAAkXLS9mUd1yp/+PoBAAAAAEB4Eqj8sY5g5U8CyR8AAAAAABAmB2v+WEeo8oe2LwAAAAAAECbaviykmzV/AAAAAABAhBIccdz2VV1drUmTJsnlcmnevHk6duxYrEMaEtb8AQAAAAAAkYrbrd53796tsrIyVVZWqrGxUfn5+Vq+fLnOnj0b69AGLVieReUPAAAAAAAIl8M2uLYvx0gEM5x+8Ytf6IknntDq1aslSTt27NCBAwe0c+dObdiwIezfs/1wizptierxm0qw25TgMOS02678bLcpwW7I6bjys90wdKnbr0vdfnV2B/Qlp11JLofcLocCptTdE1C3/8qjy29e+bnn2nPTNGWzGXLYDPkDpnydPero7NGlLr8CpqkP/q9dEgs+AwAAAACA8Dmvtn19/Nnn2nzwQ7W3t4d13qhO/nR1den48eMqLy8PHbPZbCoqKtLRo0f7PKezs1OdnZ2h58EvYnv9SdkSvzSyAUcoI9kV6xAAAAAAAIBFuBMTJEmt7Z2qPnxSgc7PwzpvVCd/zp8/L7/fL4/H0+u4x+PRhx9+2Oc5Gzdu1LPPPnvD8QfnZmn8bbcqwW5TT7Bqp+e6yp3QMVP+QEC3OO1yJdiV6LDrUlePOi5fqd6xG4YSHDY57UaoaihYMRQ8ZhiSPyAFzCtlWEkuh9yJDn3JaZfNZshmGEq9JUH3TPfcECcAAAAAAEBf5uTcqrKl03Sm/bKcdpvMrs9VtWXg80Z18mcwysvLVVZWFnre3t6u7OxsVa6coeTk5BhGBgAAAAAAMHh2m6Gn7pkaet7e3q6qMM4b1cmf9PR02e12tba29jre2tqqjIyMPs9JTExUYmJiNMIDAAAAAAAY9Ub1isNOp1Nz5szRoUOHQscCgYAOHTqkwsLCGEYGAAAAAABgDaO68keSysrKVFJSorlz56qgoEBbtmyRz+cL7f4FAAAAAACA/o365M9DDz2kc+fOqaKiQmfOnNGsWbNUW1t7wyLQAAAAAAAAuJFhmle3pIpTFy5cUGpqqv7zn/+w4DMAAAAAAIgbwU2u2tralJKS0u+4UV/5M1T/+9//JEnZ2dkxjgQAAAAAAGD4dXR0jO3kT1pamiTpo48+uukXAQyHYNaVSjNEC3MO0cR8QzQx3xBtzDlEE/MNw8U0TXV0dGjChAk3HRf3yR+b7cqGZikpKfyjQtQkJycz3xBVzDlEE/MN0cR8Q7Qx5xBNzDcMh3AKXUb1Vu8AAAAAAAAYGpI/AAAAAAAAcSzukz+JiYmqrKxUYmJirEPBGMB8Q7Qx5xBNzDdEE/MN0cacQzQx3xBtcb/VOwAAAAAAwFgW95U/AAAAAAAAYxnJHwAAAAAAgDhG8gcAAAAAACCOkfwBAAAAAACIY3GR/KmurtakSZPkcrk0b948HTt27Kbj//znP+v222+Xy+XSzJkz9Ze//CVKkSIeRDLfampqZBhGr4fL5YpitLCyv/71r1q5cqUmTJggwzD02muvDXhOfX297rzzTiUmJio3N1c1NTUjHifiQ6Tzrb6+/obrm2EYOnPmTHQChqVt3LhRd911l5KSkjR+/HgVFxerubl5wPO4h8NgDWbOcR+HwXruued0xx13KDk5WcnJySosLNQbb7xx03O4vmGkWT75s3v3bpWVlamyslKNjY3Kz8/X8uXLdfbs2T7Hv/POO3r44Yf12GOP6R//+IeKi4tVXFysEydORDlyWFGk802SkpOT9cknn4Qep0+fjmLEsDKfz6f8/HxVV1eHNf7UqVNasWKFFi9erKamJnm9Xj3++OM6ePDgCEeKeBDpfAtqbm7udY0bP378CEWIeHLkyBGVlpbq3Xff1VtvvaXu7m4tW7ZMPp+v33O4h8NQDGbOSdzHYXCysrK0adMmHT9+XA0NDVqyZInuv/9+ffDBB32O5/qGaLD8Vu/z5s3TXXfdpW3btkmSAoGAsrOztW7dOm3YsOGG8Q899JB8Pp/2798fOjZ//nzNmjVLO3bsiFrcsKZI51tNTY28Xq/a2tqiHCnijWEY2rdvn4qLi/sd8/TTT+vAgQO9bhS++93vqq2tTbW1tVGIEvEinPlWX1+vxYsX67PPPlNqamrUYkN8OnfunMaPH68jR47o7rvv7nMM93AYTuHMOe7jMJzS0tK0efNmPfbYYze8xvUN0WDpyp+uri4dP35cRUVFoWM2m01FRUU6evRon+ccPXq013hJWr58eb/jgaDBzDdJunjxonJycpSdnX3TjD8wVFzfEAuzZs1SZmamli5dqrfffjvW4cCiLly4IOnKfxz1h2schlM4c07iPg5D5/f79fLLL8vn86mwsLDPMVzfEA2WTv6cP39efr9fHo+n13GPx9PvmgNnzpyJaDwQNJj5lpeXp507d+r111/XCy+8oEAgoAULFujjjz+ORsgYY/q7vrW3t+vSpUsxigrxKjMzUzt27NDevXu1d+9eZWdna9GiRWpsbIx1aLCYQCAgr9erb3zjG/r617/e7zju4TBcwp1z3MdhKN5//3253W4lJiZqzZo12rdvn772ta/1OZbrG6LBEesAgHhWWFjYK8O/YMECTZ8+Xb/5zW/04x//OIaRAcDQ5OXlKS8vL/R8wYIFOnnypH75y1/qT3/6Uwwjg9WUlpbqxIkT+tvf/hbrUDBGhDvnuI/DUOTl5ampqUkXLlzQK6+8opKSEh05cqTfBBAw0ixd+ZOeni673a7W1tZex1tbW5WRkdHnORkZGRGNB4IGM9++KCEhQbNnz1ZLS8tIhIgxrr/rW3Jysm655ZYYRYWxpKCggOsbIrJ27Vrt379fhw8fVlZW1k3Hcg+H4RDJnPsi7uMQCafTqdzcXM2ZM0cbN25Ufn6+tm7d2udYrm+IBksnf5xOp+bMmaNDhw6FjgUCAR06dKjffsrCwsJe4yXprbfe6nc8EDSY+fZFfr9f77//vjIzM0cqTIxhXN8Qa01NTVzfEBbTNLV27Vrt27dPdXV1mjx58oDncI3DUAxmzn0R93EYikAgoM7Ozj5f4/qGaLB821dZWZlKSko0d+5cFRQUaMuWLfL5fFq9erUk6Xvf+56+8pWvaOPGjZKkH/zgB1q4cKF+/vOfa8WKFXr55ZfV0NCg3/72t7H8GLCISOdbVVWV5s+fr9zcXLW1tWnz5s06ffq0Hn/88Vh+DFjExYsXe/3fxVOnTqmpqUlpaWmaOHGiysvL9d///lfPP/+8JGnNmjXatm2b1q9fr0cffVR1dXXas2ePDhw4EKuPAAuJdL5t2bJFkydP1owZM3T58mX9/ve/V11dnd58881YfQRYSGlpqXbt2qXXX39dSUlJoXUtUlJSQpWK3MNhOA1mznEfh8EqLy/Xvffeq4kTJ6qjo0O7du1SfX29Dh48KInrG2LEjAO//vWvzYkTJ5pOp9MsKCgw33333dBrCxcuNEtKSnqN37Nnjzlt2jTT6XSaM2bMMA8cOBDliGFlkcw3r9cbGuvxeMz77rvPbGxsjEHUsKLDhw+bkm54BOdYSUmJuXDhwhvOmTVrlul0Os0pU6aYf/jDH6IeN6wp0vn2s5/9zPzqV79qulwuMy0tzVy0aJFZV1cXm+BhOX3NNUm9rlncw2E4DWbOcR+HwXr00UfNnJwc0+l0ml/+8pfNe+65x3zzzTdDr3N9QywYpmma0Uw2AQAAAAAAIHosveYPAAAAAAAAbo7kDwAAAAAAQBwj+QMAAAAAABDHSP4AAAAAAADEMZI/AAAAAAAAcYzkDwAAAAAAQBwj+QMAAAAAABDHSP4AAAAAAADEMZI/AABgzHvkkUdUXFwc9fetqamRYRgyDENerzescx555JHQOa+99tqIxgcAAOKDI9YBAAAAjCTDMG76emVlpbZu3SrTNKMUUW/Jyclqbm7WuHHjwhq/detWbdq0SZmZmSMcGQAAiBckfwAAQFz75JNPQj/v3r1bFRUVam5uDh1zu91yu92xCE3SleRURkZG2ONTUlKUkpIyghEBAIB4Q9sXAACIaxkZGaFHSkpKKNkSfLjd7hvavhYtWqR169bJ6/Xq1ltvlcfj0e9+9zv5fD6tXr1aSUlJys3N1RtvvNHrvU6cOKF7771XbrdbHo9Hq1at0vnz5yOOefv27Zo6dapcLpc8Ho++/e1vD/VrAAAAYxjJHwAAgD788Y9/VHp6uo4dO6Z169bpySef1IMPPqgFCxaosbFRy5Yt06pVq/T5559Lktra2rRkyRLNnj1bDQ0Nqq2tVWtrq77zne9E9L4NDQ166qmnVFVVpebmZtXW1uruu+8eiY8IAADGCNq+AAAA+pCfn68f/vCHkqTy8nJt2rRJ6enpeuKJJyRJFRUVeu655/Tee+9p/vz52rZtm2bPnq2f/vSnod+xc+dOZWdn61//+pemTZsW1vt+9NFHGjdunL71rW8pKSlJOTk5mj179vB/QAAAMGZQ+QMAANCHO+64I/Sz3W7XbbfdppkzZ4aOeTweSdLZs2clSf/85z91+PDh0BpCbrdbt99+uyTp5MmTYb/v0qVLlZOToylTpmjVqlV68cUXQ9VFAAAAg0HyBwAAoA8JCQm9nhuG0etYcBexQCAgSbp48aJWrlyppqamXo9///vfEbVtJSUlqbGxUS+99JIyMzNVUVGh/Px8tbW1Df1DAQCAMYm2LwAAgGFw5513au/evZo0aZIcjqHdYjkcDhUVFamoqEiVlZVKTU1VXV2dHnjggWGKFgAAjCVU/gAAAAyD0tJSffrpp3r44Yf197//XSdPntTBgwe1evVq+f3+sH/P/v379atf/UpNTU06ffq0nn/+eQUCAeXl5Y1g9AAAIJ6R/AEAABgGEyZM0Ntvvy2/369ly5Zp5syZ8nq9Sk1Nlc0W/i1XamqqXn31VS1ZskTTp0/Xjh079NJLL2nGjBkjGD0AAIhnhmmaZqyDAAAAGItqamrk9XoHtZ6PYRjat2+fiouLhz0uAAAQX6j8AQAAiKELFy7I7Xbr6aefDmv8mjVr5Ha7RzgqAAAQT6j8AQAAiJGOjg61trZKutLulZ6ePuA5Z8+eVXt7uyQpMzNT48aNG9EYAQCA9ZH8AQAAAAAAiGO0fQEAAAAAAMQxkj8AAAAAAABxjOQPAAAAAABAHCP5AwAAAAAAEMdI/gAAAAAAAMQxkj8AAAAAAABxjOQPAAAAAABAHCP5AwAAAAAAEMf+H1LgWR+KQbjyAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "new_step_size = hop_size / new_sample_rate\n", + "gne_xs = torch.arange(len(gne[0])) * new_step_size\n", + "plt.plot(gne_xs, gne[0])\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"GNE [dB]\")\n", + "plt.title(\"GNE measure computed for the full utterance\")\n", + "plt.axhline(gne[gne > 10].mean(), color=\"darkorange\")\n", + "print(\"Average GNE score:\", gne[gne > 10].mean())" + ] + }, + { + "cell_type": "markdown", + "id": "87e3280c-24ce-4611-8c49-35d5f52444d6", + "metadata": {}, + "source": [ + "## PRAAT-Parselmouth\n", + "\n", + "The following is a side-by-side analysis with PRAAT to verify that our measures look accurate. To compute the PRAAT measures in Python, we use Parselmouth:\n", + "\n", + "* [https://parselmouth.readthedocs.io/en/stable/](https://parselmouth.readthedocs.io/en/stable/)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "82f8e472-e99d-449f-a5b4-bf6c1479de9c", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install praat-parselmouth" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "72857216-492d-433e-9ace-ecd379127220", + "metadata": {}, + "outputs": [], + "source": [ + "import parselmouth\n", + "from parselmouth.praat import call\n", + "import numpy as np\n", + "\n", + "# Bundle these to compute them again later\n", + "def compute_praat_features(audio_filename):\n", + " f0min = 75\n", + " f0max = 300\n", + " \n", + " sound = parselmouth.Sound(audio_filename)\n", + " pointProcess = call(sound, \"To PointProcess (periodic, cc)\", f0min, f0max)\n", + " pitch = sound.to_pitch()\n", + " pitch_values = pitch.selected_array['frequency']\n", + " pitch_values[pitch_values==0] = np.nan\n", + "\n", + " jitter = call(pointProcess, \"Get jitter (local)\", 0, 0, 0.0001, 0.02, 1.3)\n", + " shimmer = call([sound, pointProcess], \"Get shimmer (local)\", 0, 0, 0.0001, 0.02, 1.3, 1.6)\n", + "\n", + " harmonicity = sound.to_harmonicity()\n", + "\n", + " return pitch.xs() - 0.02, pitch_values, jitter, shimmer, harmonicity\n", + "\n", + "\n", + "pitch_xs, pitch_values, praat_jitter, praat_shimmer, praat_harmonicity = compute_praat_features(audio_filename)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "49c97ae9-86aa-41b9-b1c5-c9e19083820c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Estimated average frequency (SpeechBrain): 171.9 Hz\n", + "Estimated average frequency (PRAAT): 171.9 Hz\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABIgAAADeCAYAAABWtDs6AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAdy9JREFUeJzt3Xd4VNXWx/HvpPeEACmUhN57B+kiHUSwAEoTUZAqykW89gbqBSuICgKKWFBBUFGR3hGR3ntJIRDSk6nn/WM0r5FiEtIgv8/zzHPvnLLPGjw5c2advdc2GYZhICIiIiIiIiIixZZLYQcgIiIiIiIiIiKFSwkiEREREREREZFiTgkiEREREREREZFiTgkiEREREREREZFiTgkiEREREREREZFiTgkiEREREREREZFiTgkiEREREREREZFiTgkiEREREREREZFiTgkiEREREREREZFiTgkiEREREREREZFirlATRFOnTqVp06b4+/sTEhJCnz59OHz4cJZtMjIyGD16NCVLlsTPz49+/foRGxubZZszZ87Qo0cPfHx8CAkJYdKkSdhstoL8KCIiIiIiIiIiN61CTRCtW7eO0aNHs3XrVlauXInVaqVz586kpqZmbvPYY4+xfPlyFi9ezLp164iKiqJv376Z6+12Oz169MBisbB582YWLFjA/PnzefbZZwvjI4mIiIiIiIiI3HRMhmEYhR3EX+Li4ggJCWHdunW0bduWxMRESpcuzaJFi7j77rsBOHToEDVr1mTLli20aNGCFStW0LNnT6KioggNDQVg9uzZTJ48mbi4ODw8PArzI4mIiIiIiIiIFHluhR3A3yUmJgIQHBwMwO+//47VaqVTp06Z29SoUYOIiIjMBNGWLVuoW7duZnIIoEuXLowaNYr9+/fTsGHDK45jNpsxm82Z7x0OB/Hx8ZQsWRKTyZRfH09EREREREREpEAZhkFycjJlypTBxeXaA8mKTILI4XAwYcIEbrvtNurUqQNATEwMHh4eBAUFZdk2NDSUmJiYzG3+nhz6a/1f665m6tSpvPDCC3n8CUREREREREREiqazZ89Srly5a64vMgmi0aNHs2/fPjZu3Jjvx5oyZQoTJ07MfJ+YmEhERARnz54lICAg348vIiIiIiIiIlIQkpKSKF++PP7+/tfdrkgkiMaMGcP333/P+vXrs2SzwsLCsFgsJCQkZOlFFBsbS1hYWOY227dvz9LeX7Oc/bXNP3l6euLp6XnF8oCAACWIREREREREROSW828ldQp1FjPDMBgzZgxLlixh9erVVKxYMcv6xo0b4+7uzqpVqzKXHT58mDNnztCyZUsAWrZsyd69e7lw4ULmNitXriQgIIBatWoVzAcREREREREREbmJFWoPotGjR7No0SK+++47/P39M2sGBQYG4u3tTWBgIMOHD2fixIkEBwcTEBDA2LFjadmyJS1atACgc+fO1KpVi0GDBvH6668TExPD008/zejRo6/aS0hERERERERERLIq1Gnur9W9ad68eQwdOhSAjIwMHn/8cT7//HPMZjNdunRh1qxZWYaPnT59mlGjRrF27Vp8fX0ZMmQI06ZNw80te/mvpKQkAgMDSUxM1BAzEREREREREbllZDfnUagJoqJCCSIREREREREpygzDwGazYbfbCzsUKWJcXV1xc3O7Ziec7OY8ikSRahERERERERG5OovFQnR0NGlpaYUdihRRPj4+hIeH4+Hhkes2lCASERERERERKaIcDgcnT57E1dWVMmXK4OHh8a+zUUnxYRgGFouFuLg4Tp48SdWqVXFxyd18ZEoQiYiIiIiIiBRRFosFh8NB+fLl8fHxKexwpAjy9vbG3d2d06dPY7FY8PLyylU7hTrNvYiIiIiIiIj8u9z2CpHiIS/OD51hIiIiIiIiIiLFnBJEIiIiIiIiIiLFnBJEIiIiIiIiIiLFnBJEIiIiIiIiIpLnhg4dislkwmQy4eHhQZUqVXjxxRex2WysXbs2c53JZKJ06dJ0796dvXv3XrWtLl264Orqym+//XbdY9aoUQNPT09iYmIArjjO1V5r167N649+U1KCSERERERERETyRdeuXYmOjubo0aM8/vjjPP/887zxxhuZ6w8fPkx0dDQ///wzZrOZHj16YLFYsrRx5swZNm/ezJgxY/j444+veayNGzeSnp7O3XffzYIFCwBo1aoV0dHRma977703M6a/Xq1atcqfD3+T0TT3IiIiIiIiIjcRwzBIt9oL5dje7q6YTKZsb+/p6UlYWBgAo0aNYsmSJSxbtoyWLVsCEBISQlBQEGFhYUyYMIHevXtz6NAh6tWrl9nGvHnz6NmzJ6NGjaJFixbMmDEDb2/vK441d+5cBg4cSLt27Rg/fjyTJ0/Gw8Mj8/jgnBLebDZnWSZOShCJiIiIiIiI3ETSrXZqPftzoRz7wItd8PHIfSrB29ubS5cuXbE8MTGRL774AgAPD4/M5YZhMG/ePGbOnEmNGjWoUqUKX3/9NYMGDcqyf3JyMosXL2bbtm3UqFGDxMRENmzYQJs2bXIda3GjIWYiIiIiIiIikq8Mw+DXX3/l559/pmPHjpnLy5Urh5+fH0FBQSxatIjevXtTo0aNzPW//voraWlpdOnSBYAHHniAuXPnXtH+F198QdWqValduzaurq7079//qtvJtakHkYiIiIiIiMhNxNvdlQMvdim0Y+fE999/j5+fH1arFYfDwcCBA3n++eczi01v2LABHx8ftm7dyquvvsrs2bOz7P/xxx9z33334ebmTF8MGDCASZMmcfz4cSpXrpxluwceeCDz/QMPPEC7du1499138ff3z+3HLVaUIBIRERERERG5iZhMphsa5lWQOnTowPvvv4+HhwdlypTJTPT8pWLFigQFBVG9enUuXLjAfffdx/r16wGIj49nyZIlWK1W3n///cx97HY7H3/8Ma+88goABw4cYOvWrWzfvp3Jkydn2e6LL75gxIgRBfBJb34aYiYiIiIiIiIi+cLX15cqVaoQERFxRXLon0aPHs2+fftYsmQJAJ999hnlypVj9+7d7Nq1K/M1ffp05s+fj93uLNQ9d+5c2rZte8V2EydO1DCzHFCCSEREREREREQKnY+PDyNGjOC5557DMAzmzp3L3XffTZ06dbK8hg8fzsWLF/npp5+wWq18+umnDBgw4IrtHnroIbZt28b+/fsL+6PdFJQgEhEREREREZEiYcyYMRw8eJDXX3+d3bt3069fvyu2CQwM5Pbbb2fu3LksW7aMS5cucdddd12xXc2aNalZs6Z6EWWTyTAMo7CDKGxJSUkEBgaSmJhIQEBAYYcjIiIiIiIiAkBGRgYnT56kYsWKeHl5FXY4UkRd7zzJbs5DPYj+xmJzFHYIIiIiIiIiIiIFTgmiv/n1YGxhhyAiIiIiIiIiUuCyNS9e3759c9zw7NmzCQkJyfF+henzbWfof1v1wg5DRERERERERKRAZasH0dKlS/Hw8CAwMDBbrx9++IGUlJT8jj3P/XE2gX3nEws7DBERERERERGRApWtHkQA77zzTrZ7BH399de5Dqgw+ZDOp1tO89rd9Qo7FBERERERERGRApOtHkRr1qwhODg4242uWLGCsmXL5jqowlLVdJ6lu86TkGYp7FBERERERERERApMthJE7dq1w80t252NaN26NZ6enrkOqrC0CbqE2ebgqx1nCzsUEREREREREZECk+NZzDp27MgLL7xwxfLLly/TsWPHPAmqsNxRKh6AT7eexu4wCjkaEREREREREZGCkeME0dq1a3nvvffo06cPqampmcstFgvr1q3L0+AKWgXjLH6ebpyNT+dQTFJhhyMiIiIiIiIiUiBynCAC+PXXX4mJiaFFixacOnUq1wdfv349vXr1okyZMphMJpYuXZplfWxsLEOHDqVMmTL4+PjQtWtXjh49mmWbjIwMRo8eTcmSJfHz86Nfv37ExsbmKh7Xi4epGeYHwLELN98sbCIiIiIiIiKSM88//zwNGjQo1Bjat2/PhAkTCjWGXCWIwsPDWbduHXXr1qVp06asXbs2VwdPTU2lfv36zJw584p1hmHQp08fTpw4wXfffccff/xBZGQknTp1ytJz6bHHHmP58uUsXryYdevWERUVRd++fXMVDxkJNAw2A0oQiYiIiIiIiNyouLg4Ro0aRUREBJ6enoSFhdGlSxc2bdpU2KHlyNChQzGZTJmvkiVL0rVrV/bs2ZMn7X/77be89NJLedJWbmW/8vSfTCYTAJ6enixatIiXX36Zrl27Mnny5BwfvFu3bnTr1u2q644ePcrWrVvZt28ftWvXBuD9998nLCyMzz//nIceeojExETmzp3LokWLMusfzZs3j5o1a7J161ZatGiR45gaeZ4HgpUgEhEREREREblB/fr1w2KxsGDBAipVqkRsbCyrVq3i0qVLhR1ajnXt2pV58+YBEBMTw9NPP03Pnj05c+bMNfexWq24u7v/a9s5mTk+v+S4B5FhZC3e/PTTT/PZZ58xffr0PAsKwGx29uTx8vLKXObi4oKnpycbN24E4Pfff8dqtdKpU6fMbWrUqEFERARbtmy5bttJSUlZXn+pivM/rBJEIiIiIiIiUiQZBlhSC+dlZH9Cp4SEBDZs2MBrr71Ghw4diIyMpFmzZkyZMoXevXsDzk4o77//Pt26dcPb25tKlSrx9ddfZ2nn7Nmz3HvvvQQFBREcHMydd955RbmbOXPmULNmTby8vKhRowazZs3Ksv7cuXMMGDCA4OBgfH19adKkCdu2bcuyzaeffkqFChUIDAykf//+JCcnZ1n/Vw+osLAwGjRowJNPPsnZs2eJi4sD4NSpU5hMJr788kvatWuHl5cXn332GZcuXWLAgAGULVsWHx8f6taty+eff56l7X8OMatQoQKvvvoqDz74IP7+/kRERPDhhx9m+98+N3Lcg+jkyZOUKlUqy7J+/fpRvXp1fv/99zwL7K9Ez5QpU/jggw/w9fXlzTff5Ny5c0RHRwPOjJ2HhwdBQUFZ9g0NDSUmJuaabU+dOvWqM7EBhGWcABpw6lIqNrsDN9dcjcITERERERERyR/WNHi1TOEc+6ko8PDN1qZ+fn74+fmxdOlSWrRogaen51W3e+aZZ5g2bRpvv/02n376Kf3792fv3r3UrFkTq9VKly5daNmyJRs2bMDNzS1zJNOePXvw8PDgs88+49lnn+W9996jYcOG/PHHH4wYMQJfX1+GDBlCSkoK7dq1o2zZsixbtoywsDB27tyJw+HIjOH48eMsXbqU77//nsuXL3Pvvfcybdo0XnnllavGnJKSwsKFC6lSpQolS5bMsu7JJ59k+vTpNGzYEC8vLzIyMmjcuDGTJ08mICCAH374gUGDBlG5cmWaNWt2zX+/6dOn89JLL/HUU0/x9ddfM2rUKNq1a0f16tWz9e+fUzlOEEVGRl51eZ06dahTp84NB/QXd3d3vv32W4YPH05wcDCurq506tSJbt26XdGLKaemTJnCxIkTM98nJSVRvnx5AHwSDuPt7kq61c7p+DQql/a7oWOJiIiIiIiIFEdubm7Mnz+fESNGMHv2bBo1akS7du3o378/9erVy9zunnvu4aGHHgLgpZdeYuXKlbz77rvMmjWLL7/8EofDwZw5czJL3sybN4+goCDWrl1L586dee6555g+fXpmPeKKFSty4MABPvjgA4YMGcKiRYuIi4vjt99+yxzKVaVKlSyxOhwO5s+fj7+/PwCDBg1i1apVWRJE33//PX5+zhxBamoq4eHhfP/997i4ZO1YMmHChCtqIz/xxBOZ/3/s2LH8/PPPfPXVV9dNEHXv3p1HH30UgMmTJ/Pmm2+yZs2awk8QZbfw87fffpvrYP6pcePG7Nq1i8TERCwWC6VLl6Z58+Y0adIEgLCwMCwWCwkJCVl6EcXGxhIWFnbNdj09Pa+ZuTTFHaFaaW92R6Vw7EKKEkQiIiIiIiJStLj7OHvyFNaxc6Bfv3706NGDDRs2sHXrVlasWMHrr7/OnDlzGDp0KAAtW7bMsk/Lli3ZtWsXALt37+bYsWOZiZu/ZGRkcPz4cVJTUzl+/DjDhw9nxIgRmettNhuBgYEA7Nq1i4YNG163zk+FChWyHCM8PJwLFy5k2aZDhw68//77AFy+fJlZs2bRrVs3tm/fnqUzzV85i7/Y7XZeffVVvvrqK86fP4/FYsFsNuPjc/1/y78n0UwmE2FhYVfElJeynSD66x/2L4sWLaJXr15X/EfKD38d++jRo+zYsSOzsnfjxo1xd3dn1apV9OvXD4DDhw9z5syZK06wbHHzAXs6LYIuszvKnWMXUuhSO88+hoiIiIiIiMiNM5myPcyrKPDy8uKOO+7gjjvu4JlnnuGhhx7iueeey0wQXU9KSgqNGzfms88+u2Jd6dKlSUlx1g/+6KOPaN68eZb1rq6uAHh7e//rcf5ZSNpkMmUZggbg6+ubpefRnDlzCAwM5KOPPuLll1/Ost3fvfHGG7z99tu89dZb1K1bF19fXyZMmIDFYrnhmPJSthNEf1Xq/svXX3/N66+/TqVKlXJ98JSUFI4dO5b5/uTJk+zatYvg4GAiIiJYvHgxpUuXJiIigr179zJ+/Hj69OlD586dAWfiaPjw4UycOJHg4GACAgIYO3YsLVu2zNUMZpSuDvG7aOQZBUSqULWIiIiIiIhIHqtVqxZLly7NfL9161YGDx6c5X3Dhg0BaNSoEV9++SUhISEEBARc0VZgYCBlypThxIkT3H///Vc9Xr169ZgzZw7x8fF5OluYyWTCxcWF9PT06263adMm7rzzTh544AHAOZztyJEj1KpVK89iyQuFWoF5x44dNGzYMPM//MSJE2nYsCHPPvssANHR0QwaNIgaNWowbtw4Bg0adEWl7zfffJOePXvSr18/2rZtS1hYWO6HuYXUAKCKcRrQTGYiIiIiIiIiuXXp0iU6duzIwoUL2bNnDydPnmTx4sW8/vrr3HnnnZnbLV68mI8//pgjR47w3HPPsX37dsaMGQPA/fffT6lSpbjzzjvZsGEDJ0+eZO3atYwbN45z584B8MILLzB16lTeeecdjhw5wt69e5k3bx4zZswAYMCAAYSFhdGnTx82bdrEiRMn+Oabb647+/nVmM1mYmJiiImJ4eDBg4wdO5aUlBR69ep13f2qVq3KypUr2bx5MwcPHuSRRx4hNjY2R8cuCDkuUp2X2rdvf92C0+PGjWPcuHHXbcPLy4uZM2cyc+bMGw8opCYcgtCME0Bbjsel4HAYuLiYbrxtERERERERkWLEz8+P5s2b8+abb3L8+HGsVivly5dnxIgRPPXUU5nbvfDCC3zxxRc8+uijhIeH8/nnn2f2rvHx8WH9+vVMnjyZvn37kpycTNmyZbn99tszexQ99NBD+Pj48MYbbzBp0iR8fX2pW7du5rTxHh4e/PLLLzz++ON0794dm81GrVq1cpxH+OmnnwgPDwfA39+fGjVqsHjxYtq3b3/d/Z5++mlOnDhBly5d8PHx4eGHH6ZPnz4kJibm6Pj5zWTkckowf39/du/efUNDzIqKpKQkAgMDSdy9goBv78MIiqTqhWnYHAabnuxI2aB/H68oIiIiIiIiktcyMjI4efIkFStWxMvLq7DDyXMmk4klS5bQp0+fwg7lpna98yQz55GYeNVhen/Jdg+iZcuWZXnvcDhYtWoV+/bty7K8d+/e2W2y6PlziJkp4TQ1S5rYG2dw7EKKEkQiIiIiIiIickvLdoLoatm8Rx55JMt7k8mE3W6/4aAKjU8wBJSDpHN08jvD3rhyHLuQQrtqpQs7MhERERERERGRfJPtBFF+TqVWpFRqD7sW0ordvEk5FaoWERERERERySe5rHoj+aBQZzErkqp0BKBa6m8AHFeCSERERERERERucdlKEC1btgyr1ZrtRn/88UfS09NzHVShqtQBMBGYdIQQLnP0QnJhRyQiIiIiIiLFnHrayPXkxfmRrQTRXXfdRUJCQrYb7d+/P9HR0bmNqXD5BEOZhgC0dd3D5TQrl1LMhRyUiIiIiIiIFEfu7u4ApKWlFXIkUpT9dX78db7kRrZqEBmGwdChQ/H09MxWoxkZGbkOqEiocjtE7aSL1wG+Tm3HsQsplPTL3mcXERERERERySuurq4EBQVx4cIFAHx8fDCZTIUclRQVhmGQlpbGhQsXCAoKwtXVNddtZStBNGTIkBw1ev/99xMQEJCrgIqEyh1h/Rs0N/ZgwsGxuBSaVypZ2FGJiIiIiIhIMRQWFgaQmSQS+aegoKDM8yS3spUgmjdv3g0d5KZTril4+BNgSaSO6RSbjpXh/uaRhR2ViIiIiIiIFEMmk4nw8HBCQkJyVB9Yigd3d/cb6jn0l2xPc1+suLpDxbZw+AfauOxh7sEqpJht+Hnqn0tEREREREQKh6ura54kAkSuRtPcX8uf09138dyP2ebg1wOxhRyQiIiIiIiIiEj+UILoWirfDkAd4xB+pLF8d1QhByQiIiIiIiIikj+UILqW4IpQoiKuhp2WLgdYfzSOxDSN9RQRERERERGRW0+OE0QnTpzIjziKpqp3AHCf7x9Y7QY/748p5IBERERERERERPJejhNEVapUoUOHDixcuJCMjIz8iKnoqHsvAO3sm53DzPZomJmIiIiIiIiI3HpynCDauXMn9erVY+LEiYSFhfHII4+wffv2/Iit8JVrAqWq4e4w08N1G5uOXeRiirmwoxIRERERERERyVM5ThA1aNCAt99+m6ioKD7++GOio6Np3bo1derUYcaMGcTFxeVHnIXDZIIG9wMw1HsjDgNW7I0u5KBERERERERERPJWrotUu7m50bdvXxYvXsxrr73GsWPHeOKJJyhfvjyDBw8mOvoWSaTU7w8mV2raDlLJFMUXv53FZncUdlQiIiIiIiIiInkm1wmiHTt28OijjxIeHs6MGTN44oknOH78OCtXriQqKoo777wzL+MsPP5hmcWqB3psZH9UEh+sL0aFukVERERERETklpfjBNGMGTOoW7curVq1Iioqik8++YTTp0/z8ssvU7FiRdq0acP8+fPZuXNnfsRbOP4cZna/92ZccPDmyiPsO59YyEGJiIiIiIiIiOSNHCeI3n//fQYOHMjp06dZunQpPXv2xMUlazMhISHMnTs3z4IsdNW6gk9JvDMu8FiFs9gcBo99uYsMq72wIxMRERERERERuWEmwzCMwg6isCUlJREYGEhiYiIBAQFX32jFk7DtfawRbWlz/lFiUh0Mb12RZ3rWKthgRURERERERESyKVs5D3LRg2jevHksXrz4iuWLFy9mwYIFOW3u5tF4KLi44X5mPStKvkkAKczdeJIdp+ILOzIRERERERERkRuS4wTR1KlTKVWq1BXLQ0JCePXVV/MkqCIppAYM/Ao8/ClxYRsrA1+mnOkCTy3Zi8WmWc1ERERERERE5OaV4wTRmTNnqFix4hXLIyMjOXPmTJ4EVWRVuR0e/AkCyhJqPsNSz+e5HHuOjzZoVjMRERERERERuXnlOEEUEhLCnj17rli+e/duSpYsmSdBFWlhdeChVVC6BqVIYLL7F7yz6iinL6UWdmQiIiIiIiIicgv5btd55m48icOR/+Wjc5wgGjBgAOPGjWPNmjXY7XbsdjurV69m/Pjx9O/fPz9iLHoCwuHOmQDc7bqe2vZDPL10H6r3LSIiIiIiIiJ5YfGOs4z/YhcvfX+Az7bn/4itHCeIXnrpJZo3b87tt9+Ot7c33t7edO7cmY4dO+a4BtH69evp1asXZcqUwWQysXTp0izrU1JSGDNmDOXKlcPb25tatWoxe/bsLNtkZGQwevRoSpYsiZ+fH/369SM2NjanHyvnyjWBBg8A8ILHJ2w6eoFlu6Py/7giIiIiIiIickvbduISTy3Zm/l+6o8HOXMpLV+PmeMEkYeHB19++SWHDh3is88+49tvv+X48eN8/PHHeHh45Kit1NRU6tevz8yZM6+6fuLEifz0008sXLiQgwcPMmHCBMaMGcOyZcsyt3nsscdYvnw5ixcvZt26dURFRdG3b9+cfqzc6fQceAZQ13SCe13X8vyy/cQlmwvm2CIiIiIiIiJyyzl9KZWRC3/HajfoUTecFpWCSbPYmfT17nwdamYyisi4KJPJxJIlS+jTp0/msjp16nDffffxzDPPZC5r3Lgx3bp14+WXXyYxMZHSpUuzaNEi7r77bgAOHTpEzZo12bJlCy1atMjWsZOSkggMDCQxMZGAgICcBb5lFvw8hQRTAG3T/0frulWYdX/jnLUhIiIiIiIiIsVeqtlG7/c2cjwulXrlAvny4ZbEJZvp+vZ60ix2nu9Vi6G3XTlx2PVkN+eR4x5EdruduXPnMnDgQDp16kTHjh2zvPJSq1atWLZsGefPn8cwDNasWcORI0fo3LkzAL///jtWq5VOnTpl7lOjRg0iIiLYsmXLNds1m80kJSVleeVasxFQugZBRhJPuH/Nj3tj+HFvdO7bExEREREREZFi6Yc90RyPSyU0wJM5g5vg7eFKREkfpnSrAcC0nw5x6mL+TJKV4wTR+PHjGT9+PHa7nTp16lC/fv0sr7z07rvvUqtWLcqVK4eHhwddu3Zl5syZtG3bFoCYmBg8PDwICgrKsl9oaCgxMTHXbHfq1KkEBgZmvsqXL5/7IF3dodvrADzg+is1TGd4Zuk+4lMtuW9TRERERERERIqdVYecNZUHNoskJMArc/n9zSNpVbkkGVYHr/x4MF+O7ZbTHb744gu++uorunfvnh/xZPHuu++ydetWli1bRmRkJOvXr2f06NGUKVMmS6+hnJoyZQoTJ07MfJ+UlHRjSaJK7aDWnbgc+I7XfT6ld+pTvLB8P2/3b5j7NkVERERERESk2Miw2tlw9CIAt9cMybLOxcXEi3fWpstbG1h5IJatJy7RolLJPD1+ropUV6lSJU+DuJr09HSeeuopZsyYQa9evahXrx5jxozhvvvu43//+x8AYWFhWCwWEhISsuwbGxtLWFjYNdv29PQkICAgy+uGdX4F3LypZ99Pb9ctfLcrivVH4m68XRERERERERG55W07GU+axU5ogCe1y1yZp6gS4s+AZs7OLa/+eDDPC1bnOEH0+OOP8/bbb5Pfta2tVitWqxUXl6whurq64nA4AGfBand3d1atWpW5/vDhw5w5c4aWLVvma3xXCCoPbZy9kl72+RIfMnjmu31kWO0FG4eIiIiIiIiI3HRWH3QOL+tYIxSTyXTVbSZ0qoafpxt7ziWyfE9Unh4/x0PMNm7cyJo1a1ixYgW1a9fG3d09y/pvv/02222lpKRw7NixzPcnT55k165dBAcHExERQbt27Zg0aRLe3t5ERkaybt06PvnkE2bMmAFAYGAgw4cPZ+LEiQQHBxMQEMDYsWNp2bJltmcwy1OtxsEfCwlIOM1kn+U8d+keZq05xsTO1Qs+lmxISLMwb9Mp9kclUibIm4hgHyqX9qN11VK4u+Y4dygiIiIiIiIiOXXud4yN0+lyJIZ27nbqxQXCZ55X3bSUpz//bXIPUzbB6z8dpkvtMLzcXfMkjBxPcz9s2LDrrp83b16221q7di0dOnS4YvmQIUOYP38+MTExTJkyhV9++YX4+HgiIyN5+OGHeeyxxzKzaRkZGTz++ON8/vnnmM1munTpwqxZs647xOyfbmia+3869CN8MQCHyZ1OGVM561KWFePbUiXE78bazUOXUy3M2XiCBZtPk2K2XbG+WcVg5gxpQoCX+1X2FhEREREREZE882EHiNqZ7c0Nd18mG2P5KqUek7vWYFT7ytfdPrs5jxwniG5FeZogMgxYdC8c/YX9Xo3okfA4zSuW5IuHW1yzi1hBiU+18NGGE3yy+RSpFufQt5rhAfRrVJZLqRbOXEpj3ZE4Usw2aoUHsODBZpT2v3rWUkRERERERERuUNQf8GF77CY3nrIMo2pYIA+1rnTt7fd8ASfXY2Didet9fOnZj81Tbr9uL6Ls5jxyPMQMwGazsXbtWo4fP87AgQPx9/cnKiqKgIAA/PyKTk+ZQmEyQbfX4MQ6amfspI/HdpaebM7CbWcY1CKyUEKybPuYjFVTcTOnMAoYZQIXbxOebi64pZkwbfz/xJXVx4OZrl15K7ord8/ezKcPNieipE+hxC0iIiIiIiJyS/ttLgCbPVvzZXoHXm5aBxpeJ3dQ71746UlMv81hsvsXBFpS+G5XDe5rGnHDoeS40Mzp06epW7cud955J6NHjyYuzjlT12uvvcYTTzxxwwHdEoIrQevHAHjFexG+pPPKDwc4EZdSsHHYrfDjJDxWPEaA5QIBprTMl5+Rirs1GZM5CcyJmS/39DgmOD7lA98PibmUwP1zt5JuUaFtERERERERkTyVngB7vwbgvaS2AHSsEXKdHQBXd+gxHbq9AcAw15/4ZsPuPJlILMcJovHjx9OkSRMuX76Mt7d35vK77rory2xixV7rCVCiAr7mOF4v9SMZVgePfbkLq91RMMdPi4eFfWH7hwC8abuHdZ1XYIzZAWN+v/ar6zQwudLFvo5vvV8hIz6K99ce+5eDiYiIiIiIiEiO7PkSbOkk+Vdhm6M6NcMDKBPk/e/7ATR/GFtofTxNNhpc+oEtxy/dcDg5ThBt2LCBp59+Gg8PjyzLK1SowPnz5284oFuGuzd0ex2A7qlLGea1lt3nEpm5pgCSLXYbLOwHJ9eThhcjLBOxt5lEu1atMJWqCqWqXPvVYhQM+ha8gqhtHOVTj2l8sO4Ypy6m5n/cIlLg7A4Du6PYl6ITERERESlYhpE5vGypW1fAxO3/1nvoH9yaDQdgoOtq5m08ccMh5ThB5HA4sNuvHHJ07tw5/P39bzigW0q1LtDwAUyGnef4kBfd5vH+6kPsPHM5f4/72xyI2kmaix93mZ/nTOkOjL29Svb3r9QeHl6D4RlADZezNDd28/zy/XnSZU1ECldMYgb3fbCFOs/9TJWnfqTyUz/S6KWVfPP7ucIOTURERESk+Di9CS4exu7mw+vRDXAxwT1NyuWsjbp3Y/fwp4JLLBlHVnP60o117Mhxgqhz58689dZbme9NJhMpKSk899xzdO/e/YaCuSX1fg86Pg3AYLeVfOL2Ck9+sproxPT8OV5SNKx+GYCXzfdxzBTJ9Hvr4+l27YrmVxVcCVODgQAMclvF2sNxrDwQm9fRikgBOnUxlbtnb2bbyXhSzDZsf/YcSky38vji3Uz+eg8ZVtUcExERERHJd3/2Htri24EUfOhVvwyRJX1z1oaHL64NBgBwv+uvzN986oZCynGCaPr06WzatIlatWqRkZHBwIEDM4eXvfbaazcUzC3JZIK2k6D/5xgefjR3OcTH1v/w6sdfkWax5f3xfn4KLMnsoSqf2zswukMV6pQNzF1bTR4E4HaXnYRziReWH1DBapGb1KGYJO75YAvnLqdToaQPy8e0ZuuU29n5zB1MvKMaJhN8ueMsfWZu0pBSEREREZH8lHIBDi4HYGrcbQCMal85d239+bu9k8vvrNuxh1Rz7vMMOU4QlStXjt27d/PUU0/x2GOP0bBhQ6ZNm8Yff/xBSEjOxssVKzW6YxqxGmtQJcqZLvJ6whMsmvsWjrys/XFsFez/FjsuTDEPo1FkScZ0yMHQsn8qXR0qtMEFBw/7ruN8Qjqfbz+Td/GKSIE4EJXEfR9sJS7ZTI0wf74a2ZK65QIJC/Qi2NeDcbdXZeHw5pTy8+BQTDIDP9qaf70cRURERESKu52fgMPKGe9a7HdUoFPNEGqEBeSurZCaGBEtcTM56GVbycZjF3MdlslQYRmSkpIIDAwkMTGRgIBc/kfJrvQEEhYOJuj8OgDWhzxA0wffxNvL4192/BepFzHmdMJ0+SRzbd340Ochlo9pTUiA1421u38JLB5Kumcp6ibOoHSgH+smdcDDLce5RREpSHu+gp2fYLXZ2Hc+EYvdgZ+nG9VD/XFzvfrfr8XmYF9MKv9L70lcqRYsHtmSIJ8bvDaJiIiIiMj/c9jh7fqQeJb/2Ebyla0t3z7aikYRJXLf5t6v4ZvhJBneXPKrTsVSWYeqJaVbCRz967/mPNxyetxPPvnkuusHDx6c0yaLF+8ggoYv4fBnT1D9+Me0vbCQLa8fIKXHbDo1qobJZMp5m1G74MsHMCWeJdoI5j3jXuYNanLjySGAGj3BLxTvlFju8d3N54mNWfrHee5tWv7G2xaR/HHpOHw3Buxm3IGG4OwvagWuU4vaA2gEvOtxhg4XKvDg/N9Y+FBzfDxy/FUhIiIiIiJXc3QlJJ4l3dWf7zJa0KJS8I0lhwBq9sLsHUpAeiwBqbvgnxUjzNnrF5TjHkQlSmQN3Gq1kpaWhoeHBz4+PsTHx+ekuSKhQHsQ/c2eFXOovm0Knlg47gjnw9Cn6dKqCW2rlsbNJZuJoqO/wPLxYMvghCOMEdbHeaRfN+5tkocJnNWvwPrXiQpqSquYx6hUypeVE9vhmt0YRaTgGAZ8ciecXMcp/0a8dqkNnm4u/KdrdcoEev/7/mtehYuH+ZpOPJHxILfXCGHOkCa5S16LiIiIiEhWn90LR39mnqMHL1ju59PhzWhTtfQNN5sRd4pJ78zDZjd4tmdNwv9275+UkkZgi/vzvgfR5ctXTtF+9OhRRo0axaRJk3LaXLFWr9tDZNRuRNLC/lS2RPNa3Gj4LndtrbY3YIJ1NPe1qZO3ySGAxkNgw/8ok/AbzbzOsf1iOX7aF0OPeuF5exwRuXF7F8PJddhdPBl8cRBnjFBm39uIMnWy+ffqFwrzunI3v/Kt222sOgSLd5xTr0ERERGRfGB3GBiGcc0SAHKLuXza2ckDWGDtSN2ygbSuUipPmvYqXYGECt3ZcPQije01eah2pf9fmZSUrTby5CysWrUq06ZNY/z48XnRXLHiFdGIgHGbSI9ol6v97S4evGe/i4esT9CzeU2e6l4zjyMEAstB7bsAeMv/U0w4mLX2GCpfJVLEpMXDT1MAeMvShzNGKGM6VKFrdpNDAJEtodEQAGYGfIIHVl7+4QAXkjPyI2Ippiw2BxuPXuT5ZfvpM3MT47/4g483nmTnmcs3NPOGiIjIzSDDamflgVgmLd5N01d+pdazP/PUkr2cjU8r7NAkv/0+HzDYQl1OGeGM7lA5T3vqt6/unDhs7eG4XO2fZ4Ul3NzciIqKyqvmihe/0ng/uAzsNsx2Oyv3x/LB+hMciHZm+cL8vRjVvhJ9GpXD789aICkWG59tPc2bvx4jww59G5bl5Tvr5N8wkM4vw5FfKJO8l2Eeq/k4qhNrj8TRobpmrhMpMlY+C2kXOWqUY7atB73ql+GxO6rlvJ07XoDDKyiRepJnS6zk6cvdeWHZAWbe3yjvY5Zbn2HA8VVw+TQXks1sPHaRfecTMdscANQGiILje+H4n7v4eLgS5ONBsI87Qb4elPBxJyggkEqt78XD7wbH6IuIiBSi7SfjGbXwdy6lWrIsX7TtDF/+dpY7G5ThyW41CPHPg3qyUrTYLPDHpwAssNxO5dK+dK4VlqeHaFetNC/hPM9SzTZ8PXOW8slxDaJly5ZleW8YBtHR0bz33nuUL1+eFStW5CiAoqCwahBdj8NhsGx3FG/8fJjzCc7ppv083birYVlK+nkwf/MpEtKsAHSvG8Y7/Rvmf7fE7R/Bj0+Q4epLu9TXcA0sw/KxrSnp55m/xxWR67NZ4Oen4LePALjb/Cz+1drw4eAmuOf2uvDnTAgOV0/uSJ/KcUcYHw5qTOfaefslJrc4SyosnwB7v8qT5qJMYVzu9TG1G92WJ+2JiIgUpO92nWfS4j1Y7A7CArzoWieMzrVCMZlMzFp7jA1HndOT1wjz55tRrXL8416KuG0fwopJxFGClhlvM+2extzduFyeHsIwDNq8voZzl9OZO6QJt9cMBbKf88hxgsjFJeuPDZPJROnSpenYsSPTp08nPPzmq0tTFBNEf8mw2vl8+xk+3XKaExezliKvVMqXUe0r07dRuYIpGO2ww9zOcH4H611bMjh1LLdVKcmCYc00ZlaksCTHwFdD4OxWAF6z9uf38kNZ8GAzvD1cc9+uYcBnd8OxXzkd0Jh2FyYSGuDFyontCPByz6Pg5ZZ26TjWz+/H/eJBbIYLaxwNseFCaIAXFUr6UMLHg6t1erXaDdKtdtIt9v//X4ud8LSDhHORNMOTZZFT6DZgDIHeOhdFRKToMwyDWWuP88bPhwHoWjuMN+9rcMW92q6zCTy0YAcXU8x0qhnKh4Ma43KTTgyUYbWzPyqRP84ksOdcIgHezs4OjSJKFM/JTxLPwczmYEnhaesw1vj3Zu2k9rl/mHsdTy/dy8KtZxjUIpKX+tQB8jFBdCsqygmivxiGwebjl/hs22kup1oZ0DyCHnXDC34msZh98GE7cNh40vEoX1ha80i7Skzplg+1j0Tk+uIOw4LekBJDKj6Ms4wiOrQDXzzSIm+SOJdPwcwWYEtnmsdYZie15IEWEbzcp+6Nty23NPvBH7F9MwJPWwpxRiBjrOOp1OQORrSpRKXSfrlqM/FSDLHzHqBaym8AfOx6L7eNmEH1MP+8DF1ERApJdGI6u84ksOtcAr4ebgy7rQL+N/NDqd/mwN6vMQyDM/GpXEg2AxAW4EW5Ej5c61dckt2NJ8625BdrQ0a2q8yT3WoUXMx54GhsMnM3nmTJH+czh5P/XaXSvvRvWp5ht1XMl+RIkWQY8MVAOPwje1xqcGfa0zzfuy5DWlXIl8OtPBDLiE92UD7Ym/WTOmAymZQgyombIUFUpPz6PGx8E4BZtt78z3Yv7w5solnNRAqS3QZzbofoXUR5RDIweRwpvpEsG9OaMkHZmM4+uza9DSufxeoRRPOkacQTwOKRLWlaITjvjiG3DoedtF9exmfrDAB+c1RjVqlneLxfO+qUDcyT9s8veZqye2cBMMx4lsEDB6ke3i0oMc3KjtPxbD8Vz/ELqQT7uhMa4EVIgBcda4RQNi+vcyJSaMw2O1/9dpaPNpzkzD8KNJcN8ub1u+txWx7N8FSgzu903qcZVyZIsmumrTfTbffy+j0N83wYUn7Yey6R//1ymHVH/r84cik/DxqUL0GD8oGcuJjKir0xpFvtAPSqX4a37mtQ8B0eCsOB7+CrwdhNbnTNeJXLvpXYOLkjXu430Nv/OlLNNhq+uBKL3cHqx9tRqbRf/iWIJk6cmO1tZ8yYkZOmC40SRDlktzmL4W6dCcAGex3+w3g+eKQz9coFFW5sIsXFpndg5TNkuPnTJmUaCa7BfD6iBU3yOnFjt8KHHSB2LzuDOtM3ZiiVS/vyw7g2+falJjeptHiSPhtMwPkNAHxqdMO1y8v0b1E5z7vHZyydgNeueZx0hNLd+hr/6dmAYbdVzNNjSAFLi4f0yySlW3h1xSG2nbzE1e5Q7bgQ5xrKmI7VGNG2Ep5uRfg6ZLdBwukrFidnWDkam8z5xAzikjK4kGzG6l2KXs1q0KB8UMHHKVJQ7FZIOAM4hx+t2BfNom1niEtx9qxxNZmoWMqXGuEB/H46nuhE5wyqPRpWYkyfdjc2dL4g/f3eybslHyQ2x83FxNBWFbL3gO3k+sy6khvsdXjcPpZXH+hAp1qh+Rx4LljSIDman/fF8L+Vh7HaHZhM0KZyae5uWo66ZQKzDCdLtdhYuT+GV9bEEGf3o3/T8kztW/fWHnKWnuAcWpYSwzu2Psyw3cvb/RtwZ4Oy+XrY++dsZdOxS/y3e01GtK2UfwmiDh068Mcff2C1WqlevToAR44cwdXVlUaN/n+GG5PJxOrVq3P5cQqWEkS5tPdrjGVjMVnTOOooy1D31/lidEfKB/sUdmQit7b4EzCrFdjS+Y91BF/ZOzCtb136N4vIn+Od+935FAyD0a7P8kNqDcZ2rMLjnavnz/HkpuM49wdpCwfglxFNuuHBDO/R3DvscaqG5tPwr4xEjJnNMSVHM8vWm9dt/XmpTx0GtYjMn+NJ/nE4YOMMWPMqGPZs7bLbUYnR1vF4lKzAS33qFM3eBVG7YPEQ51DdbEg3PHja+iBHy/RiUItIejcoU+jJrxSzjdOXUjkbn0ZiuhU/T3f8vdwI9HanUmnfm3vojxS887/DV0Mh8Uyudn/fdxT9HnmekICbYGavDTNg1Qskm/xpn/46ae7BzB7UmHbVSme/jb/9zjpvlGS8/THGD+lPm6o5aCO/HfoBY+koTBmJOd7VwIU3bX1519aHh9pU5qnuNW/NJFHqJfh6GJxcx0lHGF0t0xh9Rx3G3V413w/9yZZTPPvdfmqE+bNifBuSk5PzJ0E0Y8YM1q5dy4IFCyhRwjnV7OXLlxk2bBht2rTh8ccfv7FPUgiUILoBFw7i+OROXFJimWXrzTclhvPNqFYE+XgUdmQ3rbPxaaw7Esf6I3FsOxlPusUOJjABFUr6Mrx1Rfo0LIuH260/Ztdqd7Dj1GXWHrnA8QspdKkdxl0NyxbvouiGAZ/0hpPr+c1Ul3vSn2RQiwqZBejyzYrJsG02aT5laRz/ElYXb74f15oaYbpmFneX1n+E/+opeGDlpCOUzyJfZvz9d+X/j8dDP8AXA3HgSg/zyxwxVWD+sKZF6uY5w2pn8/GL7D+fhNnmwGJ34HAY3Fa1FO2qlr5pC4/mmfQEWDISjjhnwE3BG4cBLiYT3h6uuF7lx4JhTcfksJKIH+Mtj7LOaMCLvWszqGWFgo39WgwDdi6AH/8DdjNWkzsZDjf+ebPtYjLhYvrzf3Hg4XDOWLvQdjsv2gYT5O/HQ20qMrB5JH4FMItRusXOiYsp7Dh1me2n4vn91GVikjKuu0+5Et7UDA+gY40Q7m5crvjUEsmBwzHJLNt9nt9PX84svG+2OXB1MeHl5oqXuwsNI0owqUv1W7ZX7rHYZC6t/5DGB6biZlgxG+6YcZ7TLiYTHm4uuLuaMF2jIo/NbsfNlkqa4ckDnm/x8tCe1CpThO89Lh3HMaslLnYzEy0j+dWjI/OGNaVxZC56eF84iPHFA5jij2ExXHnFGEqPof+lWaWSeR93TthtsPol2PQWAGmGJzZc8HRzwcPN9Zq1lTIZBliSAVhlb8hj1lE8dEejAkmaFKjzOzG+HIQp6RzphgeDLE9SoVEn3ri7XoEkwxLSLDR7dRUWm4PlY1oTGWDKnwRR2bJl+eWXX6hdu3aW5fv27aNz585ERUXl7hMUIiWIbtCfN+k2XLjT/DK+FRrxyYPNbtkvujx3YBmc247dMNh49CKHYpKvuekhRwRLHK0JD/Tm4baVeKBF5C15Q3Yxxcz0X47w/e4oks22LOsqlvJl/O1V6VW/TPEYs/xPOz+FZWOwmDzplDENt1KV+LEghnuZU2BWC0g8y8rAfoyI7UfN8ACWPNpKf+vFlMOSztEFj1L9/LcArDYaE9PxLQa0LcCu4l8NhgPfccarOnckTMHDy4clj95GlZDcFcLOKz/ti+bbnefZcPRiZq2Ff4os6cOgFpHc06R88ZyNLe4wLLoPLp/E7uLBs9ahfGZtT92ygcwd0uTavQQSzjhnbozaiQMT79r68LatH+M6VWf87VUL9wm0JQ1+eBx2LwLgV3sjJlpHkoQf5YO96VWvDC0qlaRu2UBK+P7tQZrDAetfx1g7DRMGB02VWW91FqX1cHOldIPudO/d/8YTioYBOz6GyycxDNgXlcjxuFSS0q3XPE+93V3x93LHy90F659JznSrnTTL/2+/xVGLM8Gt+U/X6nSpHXZr9gLIJrPNzq7Tl4nftoiUUzuJT7MAkGD4s9DeiWSu3su+frlAPhzchNCboXfMv7l4DHZ/TlxCMjtOx+Ny+SRdXHcA8LO9CU86RtGqdiUGNI2gVeWS/35eOxxkzOmGV9RW1tvrMsr0X94Z0Chz6u4ixW7l8gc9KHFhG+vtdXnW/0U+HtYs15MzAJCRhGPJKFwOfw/Ad0ZbSt43k9a18qnX+L9JuQBfPwinnMPJ59q68abpfqbd05ie9cpkv50/FsL3E8Fu5rQjhIesTzCgR2cebF2Eh4sbBhxcBud+u+5mqWY7lxISKHNiMW6G8+HZSOtjlK7ciHnDmhbob7exn//B8t1RDGoRyaSOEfmTIPL392f58uW0b98+y/I1a9bQu3dvkpOv/eO2qFKCKA98NQQOLOWAUZFe5hdpVTWUjwY30Q/Hf3N6C8zrBlc8W7y2taZmjE1/mGR8aFEpmPfvb5z1RvMmZrU7+HTLad789QjJGc7EUElfD9pVK03ZEt4s3Hqay2lWAGqE+fNsz1q0KopDC/JLcgzMbAYZibxiHcgcR0++HtmKxpElCub4R3+Fz/phmFwYanqZdWkVskyfeSs6HpfCygOxbDgaR0qGLfMvtZSfJ22rlqJd9RAqlvIt1BgLw9kThzEvup8qtqM4DBOLAwbTcsirRJQq4MRMcgy81wzMiRxzq8qQlDG4lYxk6aO3Fcp10eEwmLriIB9tOJm5LDzQi5aVSxLg5Y6HmwspZhvLd0dlXuP8vdwY2a4yw26rgI9H/vcUKRKsGfBBG7h4hGSvcAYkjmafUYlONUN4Z0DDf/93sJnhpymwYy4A6+11GW8dTe+WdXmuV+3C6Zl16bgzYRm7DwcuvGG9l9n2nvSqX46ht1WgYfmgf0+cHF0J3zwEGQlXrPo+oD8tH5pByYAbuN78vgCWj8v9/tcxz9aFV233U7t8KR5sXZEutUMLfYhcfrI7DNYfiWPTsYskpltJyrByKcXCifMxvGR6nx6u26/YJ80vgsPtZmEvXRubwyDDaicu2cyrPx7kcpqV0ABPPhrc5Oau57nvWxzfjcHFmpplsQMXNkaMwtZyHI0jSxLok8Ok+MVjGO+3wmQ387hlJEuMtjzdoxbDbqtQdBKSyTEkL3wA/9jfSDc8mBz6AS8M6Zk330WGgXXDW7iufhEXHBxwRHKgzXv069SmYD//6S0Yi4diSokhxfBisvVh9pfoyKz7G+euV1fULvhqECSc4YQjjG6Wabx0dxPubVI+z0O/YZY0jB8ew7T7ixzt9ou9Ma97T6BTw2qM6VilQHqE/t3Goxd5YO42Arzc+HVsM0JLBed9gmjw4MFs2LCB6dOn06xZMwC2bdvGpEmTaNOmDQsWLLixT1EIlCDKAykX4L2mkJHA/xwDec/Sk9ZVSvHR4CY3T0G5gmbNgNmt4dJR9nvUY2NaedxcTNxeM5QKJa/yhMmaDjs/AbuFJJ8IhqaMZqelPJElfZg7pAlVQm7uqZ7PXU7joQU7MntQ1SkbwFPda9Ki4v8/XUox21iw+RQfrDtO0p8/rjrXCuW/PWoSWbIY/Ej/chAcXMZBU2V6pj/Pg22q8N8etQo2hm8fhj1fkhJYjYaxT2PFjffvb0S3ujfnLIbJGVbMNgcuJmfn9qjEdPaeS2T3uUS2nbzEibjUf20jItiHRhFB1CsXRP3yQdQuE5BnyfELyRnsOZvI/qgk9kUlcinFjIebC55/Dk0I9vWktJ8Hpf09iSzpS83wAEr7e+bJsa/GZnewcvkXtPjjP5QwJZNg+PF70//RoXse9G7IrRNrYfFQSL9MIn6Ms4zGpWonPh7atEBvnM02O08s3sPy3c6e1MNbV6Rvo7LUCg+4Io40i42lf0Qxf/NJjsSmAM6k46PtK3NHrVDKlfAuOj968sOql2DD/0h2C6ZdyqvEE8DQVhV4pmetnPUM3f0lLB8PtnTOGyUZbRlPyeqtmHFfg4LtlXVwOSx9FMxJXCKQMZYx7HWvzyt31cl5EdKEM84n69Y0HAacPXWMyGjnELwdpjqY7p5L49q5mPI6KcpZJNWcxGqXlhy1BOPmYqJphWDCA73x93LD082FHJ92qRdh9+cA7DKqMtI8jhhKUtLXg7ublKNr7TDqlg28ZYaGX0ox8+WOsyzadoZzl9OzrKtqOsds9zep7BKNDTdORfSlfGhpPN1Mzt7iiWfAzQt6TIeGD2Tud+ZSGsMX/MbRCyl4ubvwv3vq56wnRlFgs2D88jSm7R8AsN1RnV2OKlQN8adhhWCCGt4F5Zvd2DH+rOuT5upP29TXuUggg1pE8lyvWoV/fp3ahGPxUFxSL5BsePNpmf/y0PDReV4OwnJsLebPh+BvTyDJ8OGbyGehelcORCVxODaZyJK+TOhUlco30mPpagwDts7C+OUZTIado46yjLROoFrtxrx2dz0CbmRIeeolZ/IvJcY5O7a9P+8OaFS0Zse+dJyMzwbiFX8Iu2HiK3t7kq7RGxCcZUFK+HjgElaL8u0fpEmFbPSUyycOh0Gb19dwPiGdqT0rM7BNzbxPEKWlpfHEE0/w8ccfY7U6n+S7ubkxfPhw3njjDXx9b74faUoQ5ZE/FsJ3o3G4evKIdSIrLXW5rUpJ5gxuqiTR1ax6ETZMJ95Ugvbpr2F4BjFnSBOaX29c8fmdzt5aiWcwMGHGHcNwPpk5X7YbgX2nE1qqkMclZ5fDDuvfgK3v47BlYLE5nDPWmMDdxQVXl2uNRnf2t7LZHdgczsvXWSOEb8v9h5btu9OmSqlbs67HweXw5QPYcaWn+WXMJWvx4/hCmEks9RLMbAppl9hV+k7uOdsPLy8vfhzXpugXqLeZMa94Bpfdn2HYrTgM46qzJP2Ti4sJV5Mpyw8nhwEOw8DhuHoDpr/XF3EBl38fkf//bWPgcDifUDty9hX957Hhgns5ttZ9gQp1W1OvXGDuz5OUOFg+Hsfx1dgdDuwOAy+cQyZOuFfF+4HPCI8sAsXKE844e29E/YHDMDHJ+ggNej9aMEWrDYP0rXMwr3wFT7szoeju5oJbNn5pG4DdMLDZHVnORZMJHC7uHClzF3HNJ1MxNJgKJX0K/0dQXojeg/FRB0wOG49YJvCL0YxnetTK/bCC2P3O5Hn8cSyGKy/aBrMp6E4+HNwk/4qk/8VuhVUvwOZ3AdhhVOdR8zjCylXg3QEN8+zBxfmNCwn+9XG8ySDWCOJVn8mUrd+RrnWcyZd/TSYaBnwxEA7/yC5HZfpaXiCylD+z7m9EzfA8uO89vAK+fQTMiThwxYLrFddWFxcTMUGNOX3bNCpWqkrZIO+b47t61+ew6gWM9ARsDgc2+9//UMn8bjBhwtVhwYQDw78MpnsXZE2IpMU7H7AcW+l833AQdH8D3L0B58OKcZ//wZrDzunBx99elfG3V705/o0Sz2P7aghu553DbmbaerO5/Ehe7Fs/bxMVdit81AFi9nKuRDO6x4wgyfClbbXSvH9/I3wLuGdGpgPLnA8pDDuHHOV5wXsyHz7WP9/q8BmJ57gwtz+hSXsBZ4H7v2xz1GSK/WE6NWvA+E5VKeWXBw+MzMmkLh6J7zHnELdl9pY8zyOM7tKAB/OqB9ef5UrsuNLb/CJHXSrz9aiWhdObzmaGX5+HPxZm3iua7BZccBBnBDLRMQ6/6h1IMdu4nGYh3WKnXAkfKpbyJbKkD7XCA6hTNrDwzsereHPlEd5edZRmZb1YPK5T3ieI/pKamsrx48cBqFy58k2ZGPqLEkR5xDDgs3vg2EoMTMwy+jHd3IfwIF8md6tBr3rht/YT0ZyI3oPxYXtMhp1HLI/xm/dtfPJgM+qUDfz3fdPiYckjcPSXK1YdcpTnnZLP0KBhE+5qWC5fexLckNSL8M1w55P/PGI1XHnFdj+rAvrQtGJJqof6Uz3MnxphAYQGeN7c597fpsd8z3YnM+z3sXhky9wVPMwL+5c4b4aAw241GJoymtLlKvHVIy2L7LDSy+ePkb5oMGVS9xd2KAXGbLjxom0wX5vu4P7mFRjTsQrBOenqfmarsyt5cvQVq46X60ulIbMw/fnjpkiwmeHHSbBzASmGF70c/+OjsXflbz0icwqW78bhceCbfDvETkcVRlvGk+oVSqdaoXSrE06bqqWy/K3ZHQb7oxLZcvwSUQnppFnspFntuJpMNIoIonkl5zWx0H9s2m04PuqIS8xufrQ3Y4IxkXcHNKRL7bAbazcjCb571JlIB5bYb+MV08OM6VKfuxqWy/lwluxIjoHFw+DMZgDm2HswzXofbWuUYebARnn+YCzt/H6SPxlAqPk0NsOFabYBzLF3p0lkMJO6VL/+w6V938DXD2IxXOlpeZUqdZryWr96efsDNv6kc9a26N3X3eyiEcAE62j2eTWiX6Ny3N884sbqs+QXa7rzevLHpznbr1IH6DcHfK8y/N3hgA3TYc0rgAFh9eDeTyDYmRy1Owym/W2Iao+64fzvnvpF+yHr8TUYXw/HlH6JRMOHJ2yP0qDTAEa2q5w/dSKjd8OcO8BuJs23HA8kjWanNZLGkSX4eGjTgq/nlhLnHPqfHs939lY8ZXuIeQ93oFnFfL4/s1k4++VjlD+68IpVcUYA46xj2e1Wj3sal2NIqwq5/htLOrMHy6L7KZVx5s/77AdIa/AgEzvXICwwj+tlLR4G+7/ljEdlOiY9R2iQP9+PbV0gw8UzrHaOx6Vw/tRh6m6eQHjKlfeK2xw1+K7KSzzaqzXlShTxB6L/cO5yGm1eX4M9I42zb92bfwmiY8eOcfz4cdq2bYu3tzeGYdy0P8CUIMpD1gz46Un4fR4A20z1+c7SBIDywd60v70HNRu0KswIC5/dCh91hJg9/GBvxgTHYywa0YKmFXL4ZZIcA3YLFpuD1Zs20Xz3fynhuEyK4cVMWx+STb7UDAugSYUSVCzlh4fbv/99XkyxcCg6iaMXUki32p29kwwDTzcXSvt7UtrPk5AAL8oEeeV+DK3dChvfguQoHG7evGp6iBXJVSgf7MM7AxoSktOklt1K8o/P4n/c+WTjR3szNjrqZtnE292V0ABPygR5UyPMn4qlfK9542IYYHMYuLo4e4Dg5g01uoNXNpJ3+WXZWNj5CSeMMnQzv8q4LnUZ3aFK4cUDcOhH5+xD5kTi8WesZQwBte5g5sBGReJHKId/gLRLxKda2HH0PM3OfkyQKYUEw5e3fcZSqnpz6pcNom7ZQAJ93DEM48/Zk7ih7zLDMLiQbOZAVBIHYpLYczaBA9HJ1+xpdDUmE9QvF0THmiG0q1Y6R08A0612TsZcxnf9i0TGrQGcT/u2Omrh5eZCm6qlaFWlFJ7/1u098RzGprcxOWwcc5RhkvURykdU5M6GZWhTMxKPwJBsx1SgHA6Med0wnd3KOns93ij9Ct8+2jp/Zn28cAj7l4NwvXQEm+HCey4D6Xbfo1QPu7FeKxlWB/vPJxJ7aDPtj7yEjyOVeMOfmbY7Scd5LriYwM/TDX8vdzzdXIhKTCfD6rhuu+4eHnjW7Erfto0KbfZB67oZuK95gQTDl56O6UwbfAetq+ZRHTnDgC3vYax8DpNh57CjHJ/a78Dk4kqtcH9q1G1M3VY98qYX1skNziKtqRfIcPFhQsbD/ORoRt+GZXnt7nr5V4DUnIL1u3G4/5mQXOloyhp7PQCqhPjSrpqzJtrfL8F2h4OMX17G13aZt2x9SWo+iad71Myf67RhQNJ5MP7/XLQ7DI5dSOHAsZM02/ssZc3HcRgm3rHfxTu2vjhw4bYqJXmmZ62iMyvmxWPOZFfsPhyYeMvaj2/sbYgs5cvItpVoU7XU1b8nXNzBP4x/Had3fLWz1lTaJfAMhLaPg+f/Xzd2nLrMd7vPY3a4cDm8HW+O6FbgdUv+lWHA+v9hrHkFEwb7HBV42mMSzwzqkf91EaP+cPamTziNw9WDl+xDmZfRjjplA/nkweY5exByoxYPhf1LOEwkPTJe4pEO1ZnUJRdDQHMr9RL8Ve8p9aLzfjF2H3Zc+MjWnTOGs5B39VA/WlUuRZUQv2wNI7XYDHYdOU7dE3Pwxky0EcyHoc9y71398qbX4dX8Ldk2x30gLyf3pG210swb2vTGko3nf78icZ1itnEwOonDMcnEJJmJT7XgTQZj3L6jxJ/3ipOtI9jnqEiFUr7ULluCTs0bFv7scTfggTnbWL//TP4kiC5dusS9997LmjVrMJlMHD16lEqVKvHggw9SokQJpk+fnu221q9fzxtvvMHvv/9OdHQ0S5YsoU+fPv8f3DXO4Ndff51JkyYBEB8fz9ixY1m+fDkuLi7069ePt99+Gz+/7GdKlSDKB7u/gOUTwJZ1fLbNcGFDxEhaDnoRL49iOGtLUrTzpvLMZhIMXzqZ/8ekfq25r2kezESQHIvly6F4nNt8420VgDjPCAaljOGQvRwVS/ny+YgWuX8aYRiwbbZz/LvD9u/b59Al9zIk9ZpDxXq35Xnb1/WPJ5j3mJ/Ft1obPh7StPCTMJDlibHZcKebZSqd2rTmqe41Cy+mv/2N/dNRt6pEd55N6yaNC/TfL8VsY/vJS+w9l0RyhpUUs41ks42UDBspZhupZhveHq40rRBMswrBNKlQgiCfG7zBNQzY/A7Gry9gMq4+O1F2LLe3YJrbo0wf1JoWN8uN0cWjGO/fllnMtHSbYTzZLY9v2Hd/ifH9BEzWNGKMEvzX9TGeGDEs72+c4086h87F7MmT5i4Z/kywjsYc0Z4hrSrQtU5YwcwG6bBjWTUVt03/wwWDKY5H6T3kCVpWzodz6tQmjK+HYUqJvWLVty6dOd74afo0qfTnD6UcfnbDgE1vYax6EZPh4JCjPKOsEzhphDO8dUX+2z2fEi//jOG3Oc4i3Q5rtnc75CjP+g5fM6J99cJ7oGtN//Mh4nwADng1ZEjiw8QZgZTwcefLR1pSLb+HBf6bfd9iLBuHyZLMRSOAcdYxHPRqyJPdatCvUbm8G+aZeM6ZXPiX2ZASDF8+KDGJsY+OLVpF7H+b45yxD1hk68DLjqF8+ki7gps0I/2ys+7X4R8B+MHUlifSh1IupBQfD21aMEPe/xz675zF+SVMZerz7ajb8ueBRHZZ0uDHJ2DXZ3nW5B9u9Unv/SGt6hVA4mvPV/DtCAxMvO/ow/8s/Rh7e3Ueu6Naztuy25y99TbOyNFu57xrsKnRdMpUqE69ckG3zCyj3+06z9gFm/MnQTR48GAuXLjAnDlzqFmzJrt376ZSpUr8/PPPTJw4kf37s999f8WKFWzatInGjRvTt2/fKxJEMTExV2w/fPhwjh07RqVKlQDo1q0b0dHRfPDBB1itVoYNG0bTpk1ZtGhRtuNQgiifxB6Aze+AORmz1c65qPNUTnfe5G5za0LAgLnUrFyhcGPE2a3wjzMJ/HYqHpvdQeUQPyqX9qN8CR8choH1zzHnAd7u+Hq45v7G6sQ657Cq1DhSDG/GWMdSoUUfnu9dO+8+jN0GW2fC2e0kZ1g5l5BObGIGZtv1nyz/xWRyFlUr7e+Jj4ers2qKyYTN7sj8IZuSYSPVkr0fnG4upj8L6rrg6uIsfuligp0pJZluvpNUvOlUM5RX7qqTN1O7nv0Ntn/gvAn9k90wMuOOT7UQl2zGYs/evwdAHZeTlDVdwmy4sTDoEar1mECbagXQe+LScecTsti9ODAx1TqAH/zu5odxbYrWrHXWDFh0L5xcxw5HNe61PMsLd9ZlUMsKBR5KxpHV8PVwvCzxJBvebHHUwsBEST8PSlVuTOSdT2NyvwWmEM6J01vgtzkY1nRikjI4diEly/TUfp5ueLi54O5qwsVkIt1iJ9Viw2KHVY6G7CnZkzkFdbOdlza+Cb8+T6LhQyfzG/zn7nbckxezolgz4KfJmT9wN9pr87z7RGY+3OWGew5d95gbZzhr7QAGBmarA7PNgcVmx2I3/uxN5IbLNb6fHIZBRvQhfJKcvTfetd/F27a+RJbyZ2S7StzVsFz+/ahJjsWyeDgeZ5xTIn9udKbK0Nk0rZiPCcfkWFj3GkZKDMnpNmIvJ1E5aSsuGOx1VOBR63gs/hE0iQymcWQJ6pULpFqY//ULraYnYP3mEdyP/QTAN/Y2/Nf6IK1rlmdku8o0yWkv4Bt1/nfYOhusaaRZbJy8mEpMUkbWGjl/cfPApd1k7vjHDMSFZveX8P0EsKZh9w3labeJfB5bnlJ+nnz1SIvCGXJmM8MvT8P2DwHncJIxlrE0qVOTl/rUyZtaLlcc0wKb33bO5HQV6TFH8E44AsAy37u5Y8xMvL2LwHdYwhmY1RIsKbxh689MW2+e71WLobcV8PTkfz4I4dcXwLBz3FSehzPGEeUWwcQ7qjHstgr5V7ct/TKWt5vikRHHTFtvFgc+yCcPNifiapPMFDTDgL2LndOxGwapFhtnLqURlZCeWbsTwN3VBV8PVzzdXbA5DGx2gzSLDeuf1xBvd1cCqrehbr8puLoVUHLSMJxJ5G2zAdhkr80E2xj6tm3I+NurZj9JmhxDxhdD8Tq/BXB+V6eSdUh8gJc7IQGelPDxwNfTFQ83F0yhdaHNRHAroiU6bkCG1c7Dc9fz6aiOeZ8gCgsL4+eff6Z+/fr4+/tnJohOnDhBvXr1SElJyVXQJpPpigTRP/Xp04fk5GRWrVoFwMGDB6lVqxa//fYbTZo4hzH99NNPdO/enXPnzlGmTPZmAFCCqIAYBvu+f48qv7+IFxaijWBW1XmNu/v0K5i6JXYbrH8Dx97FmC1WMmwOzFY7ZpsjW4VqwZlAcXUxOS+qnq74eLiRva8ew/mFajg4ZEQwyjKespXrMn9Y03wvOmoYBucup7P9ZDw7z1wm3WLPnKrb082FEH9PSvt7Eh7oTdOKwdnKlCemW9l3PpG95xNJNdv+LKbrnN3jxMVUjselkJB2/SebtcsE8N8eNWlVuWCnqbc7DP44c5ltJ+Ox/+2LsoSvB5HBPkQE+xDs54HV5sBid3AuKhqP78dQP3UT4OxRsaXWc0y+s2n+1LUAOLsdPu0LlmTS3EswInUk26jHl4+0LLinczmRcBZmtQBLCi9aBzHf0Y13BjTM31lYUi7Aiv9A1C4sdgcpGVYCLdG4YnDQEcF4+wTq1m/CQ20q5l936JuQw2Gw8dhFFm07w8qDsVn+Bv7OZILudcN5rV+9oje0ITvsNphzO0TvYoO9DmPsj/H2kLa0r34Dyd34E86kbcweHJh413YXX3j3Z/7wVvmXHMpL/xgCfoESpDmcyeZU1wDM7Z6mUfs+eXvMk+uxLx6Oa9oFUg1Ppro8wr3DHy+UwqPWwysxvnkID0sCaYYnF4wrY3BzNeHh6nyo4fHny2E4vzdM6ZfxtCVhNtx43jaE5Fr3M65TtcLv8fI3ZpudjUcv8sOeaOJSzDSvGEybqqWpUzawYHqK5cSFQ87ecRcPY5hcmec1iJcudyIs0IevHmlZsEnpy6ecvXmi/gCcRZY/chvAy30bFO5sYjYLF779DyEHnH+zh9xr4T1wAZEVc9GbIq8YBnx6F5xYwy5TTe5K/y8965fjnf4NCq9n2qlNzp7DKTFY8CDK4bxP8nBzwcvdBavdwGJzEG0KYWed/3J769turIC9JY1zc++nXOxqjjnK8Gz4+7w3uFXBDm3LheQMK4t3nGPhttPXnaG1bJA3YzpWoV+jfHxw8G/2LHbOUGlNJcnwJt4IwM3VRLCPBz7Xqcll4EyEGKkX8THSSDG8eNI6ghW0omqIH3XKBlK/XCAdaoTcdHWE8kJ2cx45ThD5+/uzc+dOqlatmiVBtGPHDrp06cKlS5dyFfC/JYhiY2MpV64cCxYsYODAgQB8/PHHPP7441y+fDlzO5vNhpeXF4sXL+auu+66altmsxmz2Zz5PikpifLlyytBVEASTu7EvGgQodZzWA1XPvQYRL17/pu/vTKSopyZ5Kht+XeMbPjC1p7nbENpX7s8b93XsGgXHrxBiWlW4lLMXEoxczHFQobVjsXuwGp3EBbgRaeaoUVjmFR2GAZJa97Cd/1LuGLnhCOMZzwmMaxvLzrVCs3bY1lS4f1WcPkU8SUb0e38g8QSzEt96hTMjEy59dtc+GEiFpMnd2RM5ZwpnDfva0Dv+nl/c238+aPTLe3CFeu+d+tE7G0vclezqkX+Zq2wXUjKYH90EknpVhLSrKRZ7JQr4U3l0n5UKu1bZAuOZ1vsfowPO2CymznhCONxHuPFhwdQt1wu6okdXA5LR4M5MXOYVmq5tsx+oDEhedH7sSD9rffG39kNE6tDh9Fy2DT8vG/w6emfxXiNta9iMhwcdpTjea/JvDKib+EWJE485/wheTZ39wJnHaV5xe9JBvfrU+APN25JllT4/jHY8yUAW12bMDJ1BF4BpXmrf4OCGdZ66EdYOhIyEknEn/GWkezzacEnDzajVpmi8Zvg+LrPCFn9OP6mdOINP36u/iI9+g6+sanFc8m+Yz6u348nw3Cnq2Ua7qWrsnT0bYU/Y1PKBeff9qkN190s1fBkivUhTpftQY+6YXSuFUaFUtmfaMlx4TAX5w0gJP24s/Zc5DuMGjwQT7eb6/syxWzj1MVUTl1KJT7Vgr+XG/6e7gT5uFOvXFDhDpP7S9xhZxI57lCudj/kKM/c8Oe5rUVL7qgVWvjnaBGQbwmi7t2707hxY1566SX8/f3Zs2cPkZGR9O/fH4fDwddff52rgP8tQfT6668zbdo0oqKi8PJy3oy9+uqrLFiwgMOHD2fZNiQkhBdeeIFRo0Zdta3nn3+eF1544YrlShAVIHMy0QsfIfzsDwCstDfmpyrPMqZHUyrm4EKdHUn7fsJ16SP42hJINrx50TaIRJ+K1CoTQO0yAdQuE0h4oNcVTz7MNjtuf063Ds6MdGK6lctpFv44k8D6I3HEJmVkbl/C14M2VUsRGeyLz5+9iy6lmDkYncT+6CQOXHblhFGGEW0q8mS3mkXvaZ78u7PbMX8xBM/UKDIMd56zDcVcZyDP9a6Td8O+VkyGbbOx+JaheeLLXLZ5Mbx1RZ7pWStv2s8vDgd80htObeC4T306xU/CZHJhxr0N6NOwbO7bTb8MJ9eDw8a5y+ns37WFTpcW4YqDI46yvGQbRLrJm6YVStCxYQ0aN2p28yQeJf+d/Q1j8RBMSecxG+584HIvTRo0oFnFYNyye56c2eYcugrscFRjjGUsbZvU56U+dW66HwWZUi9BvHMmWrPNxsEfZtHgonMGsN9c6mPq+yFN6uSy3kTqRTK+fBCvM+sA+MrWjoXBY/joobZ5M5T4RjnsELMX7JYsi1MybJyJT+N4XApHYpM5HJvMxWQznm6ulPB1J9DHk8p1mjO4bc2i8ePpVmEYsPMTZ709u5lYU2keyRjLbqowpkMVxt1eNX+KftutsOoF2PwuAHtN1XgkfQymoPIsfKh5nt+L3qgzx/bh+GooFSxHAZjDXZyoM452NctwW5VS+dfTM/4ERP2BYcDJCwmEbnwaXyONl633c7r6g0ztWzd/ht/lhsMBsfvAlkFCuoXlu6OxOxyUK+FN+UBPSu/4H8Fx2wH43NaBTY46AIQHelG5tB8hAV6EBngSHuh11QdMloQoHKtexsvIIM4IZF3dafTrN+CmnaTppmCzQOxezGYLi38/x3e7z2O/2jDav3F3deGOumXp1qkzZUvqd/3f5VuCaN++fdx+++00atSI1atX07t3b/bv3098fDybNm2icuXKuQr43xJENWrU4I477uDdd9/NXJbbBJF6EBURhkHG1o9w++Up3AwrZx2lGWcfT/3mHRnbsQolb/ALx2a1sH/RFOqenIsLBvsdkcwr8zxDet5OnbIBN3xBNwyDnWcus+SP83y/J/pfh1R5uLrwTM+ahVKbRfJQWjz2bx7G9fhKAL6xt+ZNj1E8fVdj2lcPubFeF6c2wfzuAIx2eZof0mrRqWYIHwxqcnMkFONPOns/WdM44N+KgXFDSDT58+KddXigeUTO/+ZObXLW7brKNOvfOtryddhjNK9Wnvuals/76Vbl1pEWj+2bh3H78282t/6axnxUxxpMvKPaLfej4OgvH1Fu83/xxswFI4jFFZ7nvnsG5ujHn+XEJixfDMXPcoF0w4Pn7A8S2HII4ztVuymHKmZY7Xi6udxy/62LpOg9zkkP4k9gM7nximUA8+xdaRhRgjfurk+VkDzseZZ4Hr4eltmTbLF7b55KvpuI0oF89tANTJiR36wZnPtqIuWOOgsQb3XUZJxlDJddg+lRN5wJnarlqDfMdRkGbP8Q4+f/YvpHMfTdRlWO9vqGfo1z8b1emBx2WDsNY/0bmMjRz98stjpqEd9tFt1bNszD4CQ7EtOtbDtxiU3HLrLx2EWiEzOw2BzYHAZe7i70bxrBo+0r33w9ewtIviWIwNnT5r333mP37t2kpKTQqFEjRo8eTXh4eK4Dvl6CaMOGDbRt25Zdu3ZRv379zOW5HWL2T6pBVMiidmH5YjAeSaexGK68arufZZ69+N+99elYI+fDdxwOg72HD2P6Zjj1bPsA+MGzG0F3Tee2GjfQk+E6LDYHaw9f4Kf9McQlm0nOsJGcYSXA252mFYL/fOXBzERSNDgcsPltjFUvYTLsHHWU5VHreI4azvHaAV7umVMOt69emhph/v9+E2VJdc68dPkkXzk68h/LQ9QKD2DxyJY3V7fYfd/AklFgN5PgHsKDKY+y06jGXQ3L8spddbJXYNDhgI3TMdY4h6ecN0pxxuEcghrk741bw4GUbz/s5h8CJQXH4cCy5X3idiwlOiEVq8156+Pl4ULFkr7XvTZnGK5MOducJWn16VE3nPcGNry5fhTlQOq5faR8ej+h5lPYDRPvm+7Dt9N/6Ns44vr16RwOTix7lchd03HFwTFHGWaHPMuIu3veHPWZpGjISIJlY+DAdwCspBkTMx7G7OrHmI5VGNmu8o333jr2K3z7MKRdwvD052W3scy9VIeyQd58M6pV0U0O/Y1tz9ewbBxutlTiCWSMZTSbHXVwdTFxT+NyjOlY5cbqq6QnkLp4FL4nnDOEHXBEkmj44uIC/gFBBPebTliFIt6r+XqOr4at74M1HZvdQUK6lTSLjXSrgwyLnXSr/aq1SQ3gN7cGtHzgRZpVLl3gYcu1/VVP8aZ4mFqI8iVBZLVa6dq1K7Nnz6Zq1ap5EmhmINdJEA0dOpR9+/axY8eOLMv/KlK9Y8cOGjduDMAvv/xC165dVaT6ZpORCN+NdtZ5AM44SmPGgyAfd0r5efL3P3cD54XA5nBgtRtY7c5Zxqx/1rexOQxCuEyAKY1UvNhZ73la9Rmpi4bkvdObMRY/iCklmgzDnbPG1etouZjAxcX05yxuziLnfxVCdTGZMDBwsabhnR5NlBFMF/PrNKoWyf/uqU9p/yLSdTsnovc4C37GH8ducuOEIxQMZ8HIsEAvPK8zXMAA0lKT8E139hr6xt6GZ6zDqF0hnKe616RhRBEs0i03lXSLnc+3n2HmmmNcSnUOM+pSO5SnutcksmTWp+8XU8wMmrudg9FJ1AoP4OtRLYvWVNP5wZLKpa/GUvLYNwCcM0qRgSe+f86U5uZiwmQyYQIsdgdmqwO7JZUQu7Mm2ApTW+w9ptOjcdVbNpEm+ejPniv8/F9wWIl1K8ODqaPZb1SkWqgfU/vWy/5kDUlRzmHbcX+NNDAwLh7FhEFyiVo85foEy895UcrPg8UjWxW5YWXXdfGYs8dV7D4MTES7lSPV8ucMrSbw83Aj0Mcdb3dXrvdXaACX0ywkpltxMYGryUSgkYi/Iynzoe2u8PsY0DyCHvXK3JQ9AXMqxWzjt5PxbD5+kf1RSYT4exJZ0pfIkj60qVr65rwvEyEfexCVLl2azZs350mCKCUlhWPHjgHQsGFDZsyYQYcOHQgODiYiIgJwfpDw8HCmT5/OyJEjr2ijW7duxMbGMnv27Mxp7ps0aaJp7m9GhgHbPsD45ekrurPmRpRnZbzv/5QSEXk4jbzIP6XEwZKHnU+kbpDDMPGI8SQdegxkQLPyN/ePq4wk5wwU+7/N1e5/DU9JrnEvA5pF0KZqqZv730OKnMR0K2/9eoRPtpx2zlJlgtsql+KeJuWILOnLwq2nWbY7CovNQSk/D74b05qyQd7/3vAtwr5zIcb3j+PmyPj3jQGz4c4vFZ6gff+J+Hurt6zcoHO/Ox80JJ7B7uLOdGMQs9Jvx2QyMbhFJJO61rh+suLoSowlj2BKu3LynE9tnXjZ9gBmPPD3dOPzh1tQp2wuitcXNmu6MwG2c0GeN33WUZp5ZZ+jW5ceNK0QnOfti0jBy7cE0WOPPYanpyfTpk274SDXrl1Lhw4drlg+ZMgQ5s+fD8CHH37IhAkTiI6OJjDwyot3fHw8Y8aMYfny5bi4uNCvXz/eeecd/PyyP1ZZCaIiJvE8XD7JjlOXeX/dMVIy7FfdrISvOyH+noT4e1Ha39M5XXuAJyF+ngQH+uFWthG46SZVCoDDATG7nTOy/IPZ5iAu2YzF5iDDaifNYicqIZ1zCemcu5yO1W7H3dXF2asoMJRBPTsTUfIWmXrTMCB2P2QkkJBuZdaaY+w+mwhAaKAnfRuWI9Vi42x8Ggejk4hJdNaG8/d2o1XzlvRq1UBP6iTfHY5JZuqKg6w9HHfV9fXKBfJKn7q5m/3sZpdyASPuMCcvprH28AX2RyeRYbVjttox2xyEBnhRJcSPyqX9qFmnIeUiKhV2xHIrSb8M342BQ98DsNe/DXMu1QUgyMednvXKULtsAD7u/58oupRi5sLhrdQ8vRCA/Y5IXrf1J8Nw3g9eJIDTpnJUKOVL9VB/RrarfPP/bV84BGkXM9+ejk/ll/2xbDh6EbPV2asoLNCTPg3LUiMsAJPJORHLB+tOcvxCCq4uMLhlBcoFe5OcbiPFahBRuyXVy+fxLK0iUqjyLUE0duxYPvnkE6pWrUrjxo3x9c3aHXPGjBm5i7gQKUFUdF1MMbP9ZDyuLiY83JxDcsICvCgT5K36IyI3GcMw+HFvDC9+v5/YJPMV6wO93Xm4bSWGtqpwc9VdklvC2fg0Fv9+jq93nOVCspnudcMZelsFGpYPUu81kcLyZ+9yfnkacti7fIHtDj7wHEb9imFUC/Wnaqgf1UL9qVDSt1jMRJeUYWXh1tN8tP4El68xkUqQjzvv39+YlpVLFnB0IlLQ8i1BdLUeP5mNmUysXn3jwywKmhJEIiIFJ8Vs451VR9l49CIRwT5UC/OnRpg/rauWIsDrOoVwRQqAw2HgMAzc8mNqbRHJnfM7YeObYE7C7jA4n5BOXLKZNEvWXuYmE/h4exNT+V4ibruPWuEBuBTzGpSpZhsLt57m062nSUyzYgAOw6B6mD9v3tsg72Y+E5EiLc8TRCdOnKBixYq35FM0JYhERERERG4uJy+m8uuBWM4npNOsYjC3VSl1/Rn3RESKqTxPELm6uhIdHU1IiHOWnvvuu4933nmH0NCbf3yqEkQiIiIiIiIicivKbs4j2/2n/5lH+vHHH0lNvbIgq4iIiIiIiIiI3Fw0wF5EREREREREpJjLdoLIZDJdUX/oVqxHJCIiIiIiIiJS3GR7HmHDMBg6dCienp4AZGRkMHLkyCumuf/222/zNkIREREREREREclX2U4QDRkyJMv7Bx54IM+DERERERERERGRgpftBNG8efPyMw4RERERERERESkkKlItIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMKUEkIiIiIiIiIlLMFWqCaP369fTq1YsyZcpgMplYunTpFdscPHiQ3r17ExgYiK+vL02bNuXMmTOZ6zMyMhg9ejQlS5bEz8+Pfv36ERsbW4CfQkRERERERETk5laoCaLU1FTq16/PzJkzr7r++PHjtG7dmho1arB27Vr27NnDM888g5eXV+Y2jz32GMuXL2fx4sWsW7eOqKgo+vbtW1AfQURERERERETkpmcyDMMo7CAATCYTS5YsoU+fPpnL+vfvj7u7O59++ulV90lMTKR06dIsWrSIu+++G4BDhw5Rs2ZNtmzZQosWLbJ17KSkJAIDA0lMTCQgIOCGP4uIiIiIiIiISFGQ3ZyHWwHGlCMOh4MffviB//znP3Tp0oU//viDihUrMmXKlMwk0u+//47VaqVTp06Z+9WoUYOIiIjrJojMZjNmsznzfWJiIuD8RxMRERERERERuVX8lev4t/5BRTZBdOHCBVJSUpg2bRovv/wyr732Gj/99BN9+/ZlzZo1tGvXjpiYGDw8PAgKCsqyb2hoKDExMddse+rUqbzwwgtXLC9fvnxefwwRERERERERkUKXnJxMYGDgNdcX2QSRw+EA4M477+Sxxx4DoEGDBmzevJnZs2fTrl27XLc9ZcoUJk6cmOVY8fHxlCxZEpPJdGOBS75KSkqifPnynD17VsMBpUjRuSlFkc5LKap0bkpRpXNTiiqdm3IjDMMgOTmZMmXKXHe7IpsgKlWqFG5ubtSqVSvL8po1a7Jx40YAwsLCsFgsJCQkZOlFFBsbS1hY2DXb9vT0xNPTM8uyf/ZCkqItICBAF0YpknRuSlGk81KKKp2bUlTp3JSiSuem5Nb1eg79pVBnMbseDw8PmjZtyuHDh7MsP3LkCJGRkQA0btwYd3d3Vq1albn+8OHDnDlzhpYtWxZovCIiIiIiIiIiN6tC7UGUkpLCsWPHMt+fPHmSXbt2ERwcTEREBJMmTeK+++6jbdu2dOjQgZ9++only5ezdu1awJkBGz58OBMnTiQ4OJiAgADGjh1Ly5Ytsz2DmYiIiIiIiIhIcVeoCaIdO3bQoUOHzPd/1QUaMmQI8+fP56677mL27NlMnTqVcePGUb16db755htat26duc+bb76Ji4sL/fr1w2w206VLF2bNmlXgn0UKhqenJ88999wVQwRFCpvOTSmKdF5KUaVzU4oqnZtSVOnclIJgMv5tnjMREREREREREbmlFdkaRCIiIiIiIiIiUjCUIBIRERERERERKeaUIBIRERERERERKeaUIBIRERERERERKeaUIJIiZ+bMmVSoUAEvLy+aN2/O9u3br7nt/PnzMZlMWV5eXl4FGK0UB+vXr6dXr16UKVMGk8nE0qVL/3WftWvX0qhRIzw9PalSpQrz58/P9zil+Mnpubl27dorrpkmk4mYmJiCCViKhalTp9K0aVP8/f0JCQmhT58+HD58+F/3W7x4MTVq1MDLy4u6devy448/FkC0Upzk5tzUvaYUhPfff5969eoREBBAQEAALVu2ZMWKFdfdR9dMyQ9KEEmR8uWXXzJx4kSee+45du7cSf369enSpQsXLly45j4BAQFER0dnvk6fPl2AEUtxkJqaSv369Zk5c2a2tj958iQ9evSgQ4cO7Nq1iwkTJvDQQw/x888/53OkUtzk9Nz8y+HDh7NcN0NCQvIpQimO1q1bx+jRo9m6dSsrV67EarXSuXNnUlNTr7nP5s2bGTBgAMOHD+ePP/6gT58+9OnTh3379hVg5HKry825CbrXlPxXrlw5pk2bxu+//86OHTvo2LEjd955J/v377/q9rpmSn7RNPdSpDRv3pymTZvy3nvvAeBwOChfvjxjx47lySefvGL7+fPnM2HCBBISEgo4UimuTCYTS5YsoU+fPtfcZvLkyfzwww9ZvqT79+9PQkICP/30UwFEKcVRds7NtWvX0qFDBy5fvkxQUFCBxSbFW1xcHCEhIaxbt462bdtedZv77ruP1NRUvv/++8xlLVq0oEGDBsyePbugQpViJjvnpu41pbAEBwfzxhtvMHz48CvW6Zop+UU9iKTIsFgs/P7773Tq1ClzmYuLC506dWLLli3X3C8lJYXIyEjKly9/3Uy7SEHZsmVLlvMYoEuXLtc9j0UKUoMGDQgPD+eOO+5g06ZNhR2O3OISExMB54+da9F1UwpDds5N0L2mFCy73c4XX3xBamoqLVu2vOo2umZKflGCSIqMixcvYrfbCQ0NzbI8NDT0mvUxqlevzscff8x3333HwoULcTgctGrVinPnzhVEyCJXFRMTc9XzOCkpifT09EKKSgTCw8OZPXs233zzDd988w3ly5enffv27Ny5s7BDk1uUw+FgwoQJ3HbbbdSpU+ea213ruqn6WJJfsntu6l5TCsrevXvx8/PD09OTkSNHsmTJEmrVqnXVbXXNlPziVtgBiNyIli1bZsmst2rVipo1a/LBBx/w0ksvFWJkIiJFT/Xq1alevXrm+1atWnH8+HHefPNNPv3000KMTG5Vo0ePZt++fWzcuLGwQxHJIrvnpu41paBUr16dXbt2kZiYyNdff82QIUNYt27dNZNEIvlBPYikyChVqhSurq7ExsZmWR4bG0tYWFi22nB3d6dhw4YcO3YsP0IUyZawsLCrnscBAQF4e3sXUlQiV9esWTNdMyVfjBkzhu+//541a9ZQrly56257retmdr//RXIiJ+fmP+leU/KLh4cHVapUoXHjxkydOpX69evz9ttvX3VbXTMlvyhBJEWGh4cHjRs3ZtWqVZnLHA4Hq1atuub423+y2+3s3buX8PDw/ApT5F+1bNkyy3kMsHLlymyfxyIFadeuXbpmSp4yDIMxY8awZMkSVq9eTcWKFf91H103pSDk5tz8J91rSkFxOByYzearrtM1U/KLhphJkTJx4kSGDBlCkyZNaNasGW+99RapqakMGzYMgMGDB1O2bFmmTp0KwIsvvkiLFi2oUqUKCQkJvPHGG5w+fZqHHnqoMD+G3GJSUlKyPCk8efIku3btIjg4mIiICKZMmcL58+f55JNPABg5ciTvvfce//nPf3jwwQdZvXo1X331FT/88ENhfQS5ReX03HzrrbeoWLEitWvXJiMjgzlz5rB69Wp++eWXwvoIcgsaPXo0ixYt4rvvvsPf3z+zJkZgYGBmL8p/fp+PHz+edu3aMX36dHr06MEXX3zBjh07+PDDDwvtc8itJzfnpu41pSBMmTKFbt26ERERQXJyMosWLWLt2rX8/PPPgK6ZUoAMkSLm3XffNSIiIgwPDw+jWbNmxtatWzPXtWvXzhgyZEjm+wkTJmRuGxoaanTv3t3YuXNnIUQtt7I1a9YYwBWvv87FIUOGGO3atbtinwYNGhgeHh5GpUqVjHnz5hV43HLry+m5+dprrxmVK1c2vLy8jODgYKN9+/bG6tWrCyd4uWVd7ZwEslwH//l9bhiG8dVXXxnVqlUzPDw8jNq1axs//PBDwQYut7zcnJu615SC8OCDDxqRkZGGh4eHUbp0aeP22283fvnll8z1umZKQTEZhmEUZEJKRERERERERESKFtUgEhEREREREREp5pQgEhEREREREREp5pQgEhEREREREREp5pQgEhEREREREREp5pQgEhEREREREREp5pQgEhEREREREREp5pQgEhEREREREREp5pQgEhEREREREREp5pQgEhEREcmGoUOH0qdPnwI/7vz58zGZTJhMJiZMmJCtfYYOHZq5z9KlS/M1PhEREbk1uBV2ACIiIiKFzWQyXXf9c889x9tvv41hGAUUUVYBAQEcPnwYX1/fbG3/9ttvM23aNMLDw/M5MhEREblVKEEkIiIixV50dHTm///yyy959tlnOXz4cOYyPz8//Pz8CiM0wJnACgsLy/b2gYGBBAYG5mNEIiIicqvREDMREREp9sLCwjJfgYGBmQmZv15+fn5XDDFr3749Y8eOZcKECZQoUYLQ0FA++ugjUlNTGTZsGP7+/lSpUoUVK1ZkOda+ffvo1q0bfn5+hIaGMmjQIC5evJjjmGfNmkXVqlXx8vIiNDSUu++++0b/GURERKQYU4JIREREJJcWLFhAqVKl2L59O2PHjmXUqFHcc889tGrVip07d9K5c2cGDRpEWloaAAkJCXTs2JGGDRuyY8cOfvrpJ2JjY7n33ntzdNwdO3Ywbtw4XnzxRQ4fPsxPP/1E27Zt8+MjioiISDGhIWYiIiIiuVS/fn2efvppAKZMmcK0adMoVaoUI0aMAODZZ5/l/fffZ8+ePbRo0YL33nuPhg0b8uqrr2a28fHHH1O+fHmOHDlCtWrVsnXcM2fO4OvrS8+ePfH39ycyMpKGDRvm/QcUERGRYkM9iERERERyqV69epn/39XVlZIlS1K3bt3MZaGhoQBcuHABgN27d7NmzZrMmkZ+fn7UqFEDgOPHj2f7uHfccQeRkZFUqlSJQYMG8dlnn2X2UhIRERHJDSWIRERERHLJ3d09y3uTyZRl2V+zozkcDgBSUlLo1asXu3btyvI6evRojoaI+fv7s3PnTj7//HPCw8N59tlnqV+/PgkJCTf+oURERKRY0hAzERERkQLSqFEjvvnmGypUqICb243dhrm5udGpUyc6derEc889R1BQEKtXr6Zv3755FK2IiIgUJ+pBJCIiIlJARo8eTXx8PAMGDOC3337j+PHj/PzzzwwbNgy73Z7tdr7//nveeecddu3axenTp/nkk09wOBxUr149H6MXERGRW5kSRCIiIiIFpEyZMmzatAm73U7nzp2pW7cuEyZMICgoCBeX7N+WBQUF8e2339KxY0dq1qzJ7Nmz+fzzz6ldu3Y+Ri8iIiK3MpNhGEZhByEiIiIiVzd//nwmTJiQq/pCJpOJJUuW0KdPnzyPS0RERG4t6kEkIiIiUsQlJibi5+fH5MmTs7X9yJEj8fPzy+eoRERE5FaiHkQiIiIiRVhycjKxsbGAc2hZqVKl/nWfCxcukJSUBEB4eDi+vr75GqOIiIjc/JQgEhEREREREREp5jTETERERERERESkmFOCSERERERERESkmFOCSERERERERESkmFOCSERERERERESkmFOCSERERERERESkmFOCSERERERERESkmFOCSERERERERESkmFOCSERERERERESkmPs/ndIg0mbzjxsAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Pitch comparison\n", + "plt.plot(pitch_xs, pitch_values)\n", + "voiced = np.isfinite(pitch_values[:-1])\n", + "plt.plot(xs[voiced], estimated_f0[0, voiced])\n", + "plt.ylim((160, 200))\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Frequency [Hz]\")\n", + "plt.legend([\"PRAAT\", \"SpeechBrain\"])\n", + "\n", + "print(\"Estimated average frequency (SpeechBrain): {0:.1f} Hz\".format(estimated_f0[0, voiced].mean().numpy()))\n", + "print(\"Estimated average frequency (PRAAT): {0:.1f} Hz\".format(np.nanmean(pitch_values)))" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "ee8ddbcb-e839-4742-b2de-5c17747a4c1a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Jitter (SpeechBrain): 0.50%\n", + "Average Jitter (PRAAT): 0.35%\n" + ] + } + ], + "source": [ + "print(\"Average Jitter (SpeechBrain): {0:.2f}%\".format(100 * jitter[0, voiced].mean().numpy()))\n", + "print(\"Average Jitter (PRAAT): {0:.2f}%\".format(100 * praat_jitter))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "4e9008cc-5f54-4b70-a86c-4b54717f35e2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Shimmer (SpeechBrain): 2.40%\n", + "Average Shimmer (PRAAT): 2.25%\n" + ] + } + ], + "source": [ + "print(\"Average Shimmer (SpeechBrain): {0:.2f}%\".format(100 * shimmer[0, voiced].mean().numpy()))\n", + "print(\"Average Shimmer (PRAAT): {0:.2f}%\".format(100 * praat_shimmer))" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "d27dbb08-37fb-427c-9d19-ac3b330c9044", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average HNR (SpeechBrain): 24.2%\n", + "Average HNR (PRAAT): 27.3%\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABH8AAADeCAYAAACkJKzaAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnXd4VFX+xt87Pb33HkLvHaQKKIooKvZed+2i/lzLrmsvq2tb26ooujYUREQQkd5rIJCQBum915lMv78/zj333qmZhGAAz+d58iSZemfmzr3nvOf9vl+O53keDAaDwWAwGAwGg8FgMBiMcxJFf28Ag8FgMBgMBoPBYDAYDAbj9MHEHwaDwWAwGAwGg8FgMBiMcxgm/jAYDAaDwWAwGAwGg8FgnMMw8YfBYDAYDAaDwWAwGAwG4xyGiT8MBoPBYDAYDAaDwWAwGOcwTPxhMBgMBoPBYDAYDAaDwTiHYeIPg8FgMBgMBoPBYDAYDMY5DBN/GAwGg8FgMBgMBoPBYDDOYZj4w2AwGAwGg8FgMBgMBoNxDsPEHwaDwWAwGAwGg8FgMBiMc5gzRvx57bXXwHEclixZIl5mNBpx//33IyIiAoGBgVi8eDHq6ur6byMZDAaDwWAwGAwGg8FgMM4yzgjx5+DBg/j4448xatQoh8sfeeQR/PLLL1ixYgW2b9+O6upqXHnllf20lQwGg8FgMBgMBoPBYDAYZx/9Lv50dnbixhtvxKeffoqwsDDx8ra2Nnz22Wd46623MGfOHIwfPx7Lli3Dnj17sG/fvn7cYgaDwWAwGAwGg8FgMBiMswdVf2/A/fffj0suuQTz5s3DSy+9JF6emZkJi8WCefPmiZcNGTIEycnJ2Lt3L6ZMmeL28UwmE0wmk/i/3W5Hc3MzIiIiwHHc6XshDAaDwWAwGAwGg8FgMBh/IDzPo6OjA/Hx8VAoPPt7+lX8Wb58OQ4fPoyDBw+6XFdbWwuNRoPQ0FCHy2NiYlBbW+vxMV999VU8//zzfb2pDAaDwWAwGAwGg8FgMBhnJBUVFUhMTPR4fb+JPxUVFXj44YexceNG6HS6Pnvcp556Co8++qj4f1tbG5KTk1FRUYHg4OA+ex4Gg8FgMBgMBoPBYDAYjP6kvb0dSUlJCAoK8nq7fhN/MjMzUV9fj3HjxomX2Ww27NixA++//z42bNgAs9mM1tZWB/dPXV0dYmNjPT6uVquFVqt1uTw4OJiJPwwGg8FgMBgMBoPBYDDOObqLuek38Wfu3LnIzs52uOz222/HkCFD8MQTTyApKQlqtRqbN2/G4sWLAQAFBQUoLy/H1KlT+2OTGQwGg8FgMBgMBoPBYDDOOvpN/AkKCsKIESMcLgsICEBERIR4+Z133olHH30U4eHhCA4OxoMPPoipU6d6DHtmMBgMBoPBYDAYDAaDwWA40u/dvrzx9ttvQ6FQYPHixTCZTJg/fz4+/PDD/t4sBoPBYDAYDAaDwWAwGIyzBo7neb6/N+J00t7ejpCQELS1tXnM/OF5HlarFTab7Q/eOsaZjlqthlKp7O/NYDAYDAaDwWAwGAwGwwVfNA/gDHf+/BGYzWbU1NTAYDD096YwzkA4jkNiYiICAwP7e1MYDAaDwWAwGAwGg8HoFX9q8cdut6OkpARKpRLx8fHQaDTdJmQz/jzwPI+GhgZUVlZi4MCBzAHEYDAYDAaDwWAwGIyzkj+1+GM2m2G325GUlAR/f//+3hzGGUhUVBRKS0thsViY+MNgMBgMBoPBYDAYjLMSRX9vwJmAQsHeBoZ7mBOMwWAwGAwGg8FgMBhnO0z1YDAYDAaDwWAwGAwGg8E4h2HiD4PBYDAYDAaDwWAwGAzGOQwTfxgMBoPBYDAYDAaDwWAwzmGY+HOWctttt4HjOHAcB41Gg4yMDLzwwguwWq3Ytm2beB3HcYiKisKCBQuQnZ3t9rHmz58PpVKJgwcPen3OIUOGQKvVora2FgBcnsfdz7Zt2/r6pTMYDAaDwWAwGAwGg8HoAUz8OYu56KKLUFNTgxMnTuCxxx7Dc889hzfeeEO8vqCgADU1NdiwYQNMJhMuueQSmM1mh8coLy/Hnj178MADD+Dzzz/3+Fy7du1CV1cXrrrqKnz55ZcAgPPOOw81NTXizzXXXCNuE/0577zzTs+LZzAYDAaDwWAwGAwGg+ETf+pW787wPI8ui61fnttPrexxZymtVovY2FgAwL333ouffvoJa9aswdSpUwEA0dHRCA0NRWxsLJYsWYLLLrsM+fn5GDVqlPgYy5Ytw8KFC3HvvfdiypQpeOutt+Dn5+fyXJ999hluuOEGzJo1Cw8//DCeeOIJaDQa8fkBwM/PDyaTyeEyBoPBYDAYDAaDwWAwGP0LE39kdFlsGPbPDf3y3LkvzIe/5tQ+Dj8/PzQ1Nblc3tbWhuXLlwMANBqNeDnP81i2bBk++OADDBkyBBkZGVi5ciVuvvlmh/t3dHRgxYoV2L9/P4YMGYK2tjbs3LkTM2bMOKXtZTAYDAaDwWAwGAwGg3H6YWVf5wA8z2PTpk3YsGED5syZI16emJiIwMBAhIaG4ttvv8Vll12GIUOGiNdv2rQJBoMB8+fPBwDcdNNN+Oyzz1wef/ny5Rg4cCCGDx8OpVKJ6667zu3tGAwGg8FgMBgMBoPBYJx5MOePDD+1ErkvzO+35+4pa9euRWBgICwWC+x2O2644QY899xzYnDzzp074e/vj3379uGVV17Bf//7X4f7f/7557j22muhUpHd4Prrr8fjjz+OoqIiDBgwwOF2N910k/j/TTfdhFmzZuG9995DUFBQb14ug8FgMBgMBoPBYDAYjD8IJv7I4DjulEuv/kjOP/98fPTRR9BoNIiPjxdFHEpaWhpCQ0MxePBg1NfX49prr8WOHTsAAM3Nzfjpp59gsVjw0Ucfifex2Wz4/PPP8fLLLwMAcnNzsW/fPhw4cABPPPGEw+2WL1+Ou++++w94pQwGg8FgMBgMBoPBYDB6Cyv7OosJCAhARkYGkpOTXYQfZ+6//37k5OTgp59+AgB88803SExMxNGjR5GVlSX+vPnmm/jiiy9gs5Hg688++wwzZ850ud2jjz7KSr8YDAaDwWAwGAwGg8E4C2Diz58Ef39/3H333Xj22WfB8zw+++wzXHXVVRgxYoTDz5133onGxkb89ttvsFgs+Oqrr3D99de73O6uu+7C/v37cfz48f5+aQwGg8FgMBgMBoPBYDC8wMSfPxEPPPAA8vLy8Prrr+Po0aNYvHixy21CQkIwd+5cfPbZZ1izZg2amppwxRVXuNxu6NChGDp0KHP/MBh/ADtPNOD+bw+j3Wjp701hMBgMBoPBYDAYZyEcz/N8f2/E6aS9vR0hISFoa2tDcHCww3VGoxElJSVIS0uDTqfrpy1knMmwfYRxJnDROzuQX9uBfy0eiWsnJvf35jAYDAaDwWAwGIwzBG+ahxzm/GEwGIwzmPoOI/JrOwAAjZ3mft4aBoPBYDAYDAaDcTbCxB8Gg3HWs+tEI7YV1Pf3ZpwWdp9sFP9uYuIPg8FgMBgMBoPB6AVnT19zBoPBcKLTZMVza45jZWYllAoOe5+cg+jgc6s8b+cJSfxpMTDxh8FgMBgMBoPBYPQc5vxhMBj9TrPejJ+OVEJvsvp8n8PlLVjw7k6szKwEANjsPDLLWk7XJvYLPM9jl0z8adIz8YdxdrE5rw4Xvr0deTXt/b0pDAaDwWAwGH9qmPjDYDD6ndfW5+GR749i4Xu7kF3Z1u3tGztNuGnpfpQ3G5AQ6oep6REAcM6JPyfqO1HfYRL/b9abvNyawTjzeHfzCRTWdWJ9dk1/bwqDwWAwGAzGnxom/jAY5wgmqw2/5dTCZLX196b0CJ7nxdKmkkY9rvxoNz7ZUQS73XMjwm0FDTCYbciIDsT6JTNwzcREAEBm+Zkj/vA8j7ImPWxeXkd30PclPEADAGhmmT+Ms4iq1i4cE8TcFoOln7eGwTg95Ne2o7LF0N+bwWAwGAxGtzDxh8E4R/hwaxHu+ToTr6zL6+9N6RHlzQbUtBmhVnKYPzwGFhuPV37Nx1f7yjzeh4Y7XzwiFsE6NcYnhwMAcqraYLScGeLXmqPVmPXGNry35USvH2PXiQYAwCUj4wCQsi+e772YxGD8kWzIqRX/bmZ5VYxzkPp2Iy57fzeu/3Rff28Kg8FgMBjdwsQfBuMcYUs+EURWZFai3Xj2rLLvL24GAIxJCsV/bxqPO6enkctLmtze3maXnEKzB0cBAJLC/RAZqIXFxiOnqvuysT+CI+WtAIDthQ29ur/Zasf+EvLeXDYmHgBgstrRdYaIW4y+wWqzn3VuPV/ZcFwSf1pYXhXjHGR/STPMVjsqmru8ulUZDAaDwTgTYOIPg3EO0GowI6eaiB4Gsw2rhBDks4F9xUTkmZwWAY7jMDmNuHgqW7rc3j6rohVtXRYE61QYnRgKAOA4DuNTyN9nSu4P3f7c6nZYbPYe3/9weQsMZhsiAjQYnxwGjYocrlm793OL2784iKmvbkF9h7G/N6VPaew04WBps/h/MxN/GOcg8vON3ux7wwIGg8FgMPoDJv4wThvPPfccxowZ06/bMHv2bCxZsqRft+GPYG9RE+TVQP/bV3ZWlAfxPC+6W6YIoc2JYf4APIs/24WSrxmDoqBSSoew8SlhAM4c8aeqlWy/yWpHQW1Hj+9Pu3xNy4iEQsEhgub+sEn0OcPJ+g7sPNGIZr0Zvx+v+8Ofv91owea8OpitPRcnu2NTbh3sPBCgUQIAWvs588doseH2ZQewdGdxv24H49zisCxnrsPIxB8Gg8FgnNkw8ecspaGhAffeey+Sk5Oh1WoRGxuL+fPnY/fu3f29aT3itttuA8dx4k9ERAQuuugiHDt2rE8ef9WqVXjxxRf75LHOZHadJELB1eMTEahVobhBj90n3ZdNnUlUtnShqrULKgWHcYJzJyHMDwAROdy1fqdlVLMGRTlcTsWfw+UtboWvnKo2PPpDFsqb/phgTnkA6NHK1h7ff3cR+UynD4wEIAt9ZuLPOcMvR6UOWFuFss0/kn9vKMCdXx7Cg98d9jmY3NfSFlryRUsWmw39m1d1qLQFWwsa8PEOJv4w+oYusw251e3i/51uzlcMBoPBYJxJMPHnLGXx4sU4cuQIvvzySxQWFmLNmjWYPXs2mprO/Am/MxdddBFqampQU1ODzZs3Q6VSYeHChV7vY7H4toocHh6OoKCgvtjMM5o9ReRzv3B4LBaPSwAAfLm3tB+3yDdoydfopFD4a1QAgBA/NYJ15G/qnqE0dZpwTMj0me0k/gyPD4FGqUBjpxnlza4Czwu/5GLV4Sr85atDpz0Uuq3L4rAKfKyiZzlENjsvTiomCKIWFX+amPhzTsDzPNYeqxb/313U+IeHldPjxobjdfjH6uxuxZl2owVz3tyGm5bu7/Z2VHy+flIyAJJhZTD3X7ZRhSDGNnaaelWGyWA4c6yyFVaZGMqcPwwGg8E402HijxyeB8z6/vnpwYpoa2srdu7ciX/96184//zzkZKSgkmTJuGpp57CZZddBoBkoHz00Ue4+OKL4efnh/T0dKxcudLhcSoqKnDNNdcgNDQU4eHhWLRoEUpLSx1us3TpUgwdOhQ6nQ5DhgzBhx9+6HB9ZWUlrr/+eoSHhyMgIAATJkzA/v2OE4OvvvoKqampCAkJwXXXXYeODscSGOpcio2NxZgxY/Dkk0+ioqICDQ3E4VFaWgqO4/D9999j1qxZ0Ol0+Oabb9DU1ITrr78eCQkJ8Pf3x8iRI/Hdd985PLZz2VdqaipeeeUV3HHHHQgKCkJycjI++eQTn9/7M5Gq1i6UNOqhVHCYnB6Om6emAAA259Wd8e1n9wlhzzTnhyKVfjlu/84TjeB5YGhcMKKDdQ7X6dRKjEgIBuBa+lXc0IkDQv5Ifm0HXluf33cvwg1VTiVrPXX+lDTqYbLa4adWIiUiAADEsi8WnHtukF/bgaIGPTRKBaKCtDBa7Nhb3HPxvtNk7VXQbFuXBSfrOwEACg747kAF3tpY6PU+647VoLTJgF0nvQtVW/PrYbbZMSAqACMTQqAV8qr607VWIQjCPA80dJj6bTtOJ0aLDW39XF73ZyKz3PE803EWNVpgMBgMxp8TVX9vwBmFxQC8Et8/z/10NaAJ8OmmgYGBCAwMxOrVqzFlyhRotVq3t3vmmWfw2muv4d1338VXX32F6667DtnZ2Rg6dCgsFgvmz5+PqVOnYufOnVCpVHjppZfEkiuNRoNvvvkG//znP/H+++9j7NixOHLkCO6++24EBATg1ltvRWdnJ2bNmoWEhASsWbMGsbGxOHz4MOx2aVW1qKgIq1evxtq1a9HS0oJrrrkGr732Gl5++WW329zZ2Ymvv/4aGRkZiIiIcLjuySefxJtvvomxY8dCp9PBaDRi/PjxeOKJJxAcHIx169bh5ptvxoABAzBp0iSP79+bb76JF198EU8//TRWrlyJe++9F7NmzcLgwYN9ev/7Cp7ncbC0BVvy63HBsGiMTwnv/k5u2C2UfI1KDEGwTo1gnRrTMiKw+2QTlu4swXOXDe/Lze5TqPOH5v1QEsP8kFvT7pL7Q1u80y5fzoxPCcPh8lYcLm/BleMSxctXCgHYKRH+KGsy4Is9pZieEYl5w2L67LXIoaJVbLAOte1GFNZ1wGC2wl+jQqvBjLu+PITzBkTg0Qvd73P5tcT1Mzg2CEoFBwAIDyDfc+b8OTegrp9Zg6MQFaTFt/vLsTW/HucPjvb5MSqaDZj31nZcODwW710/tkfPf7SiFQD5Tvx15gA8/VM23ttyEkaLDY9dOBg6tdLlPj/KguSrW7uQHhXo9rE35ZHv6UUjYsFxHMIDNKhpM6LFYEZSuH+PtrOvqJAdS+rajYgP9euX7Thd8DyPxR/twYn6Tjx+4WDcMT1NPHYwTg+HnRYZ+qPsy2wl3QKDdOo//LkZDAaDcfbBnD9nISqVCl988QW+/PJLhIaGYtq0aXj66addcnKuvvpq3HXXXRg0aBBefPFFTJgwAe+99x4A4Pvvv4fdbsfSpUsxcuRIDB06FMuWLUN5eTm2bdsGAHj22Wfx5ptv4sorr0RaWhquvPJKPPLII/j4448BAN9++y0aGhqwevVqTJ8+HRkZGbjmmmswdepUcRvsdju++OILjBgxAjNmzMDNN9+MzZs3O2zn2rVrRUErKCgIa9aswffffw+FwnH3XLJkibgtcXFxSEhIwP/93/9hzJgxSE9Px4MPPoiLLroIP/zwg9f3b8GCBbjvvvuQkZGBJ554ApGRkdi6dWuvPoveYLHZ8dmuEsx7azuu+Xgv/ru9CC+uzev141HxZ3pGpHjZXTPSAQBf7CnF5rw/PkjWFyqaDahq7YJSwYl5PRR3oc92O48dQgiyc94PRQp9bhUvs9rs+PEwmbQ+edEQ3CW0kn985VHUtp2eDku0XG1cSiiig7Sw88BxoYzr2wPlOFTWgi/2lHoss8mvIe64oXFSyWJ4ABncN+vPTdfCnwlS8kXyfhaOisMcQfDZnFffo1ycIxWtMFnt2F7Qs/sBwJHyVgDA2KRQ3DA5Gf934SAAwKc7S7DwvV3IEsQhSmmjHodkk90aL98dWnY5Jol8H8P8+z+vSu4irGs/tzqrAUBZkwHHq9thttrx8q95uP6TfThc3oK1x6rx8rpcPLHyGHMF9SE8z4sO08hAsn93/sFlX2arHdd9sheTXt6Mmjb3DRIYDAaDwZDDnD9y1P7EgdNfz90DFi9ejEsuuQQ7d+7Evn37sH79erz++utYunQpbrvtNgBwEGHo/1lZWQCAo0eP4uTJky55OEajEUVFRdDr9SgqKsKdd96Ju+++W7zearUiJCQEAJCVlYWxY8ciPNyzYyU1NdXhOeLi4lBf7xhsev755+Ojjz4CALS0tODDDz/ExRdfjAMHDiAlJUW83YQJExzuZ7PZ8Morr+CHH35AVVUVzGYzTCYT/P29v5ejRo0S/+Y4DrGxsS7bdDpZdbgSL67NBQCoFBysdt4l28ZXeJ4XszXOGyCJP+cPjsatU1Pw5d4yPPJ9FtY+OAPJEf2z4u4J2uVrVGIIArSOh6JEIfRZPmE7Xt2OZr0ZgVqVi1hEGZdMLi+obUdblwUhfmrsPNGIunYTwgM0mDs0BnOGRmNfSRNyqtrxr9/y8fa1Y/r8tVHRKiHUD5YkHhtz63C0ohXjk8Pw3YFyAEC70YoWg0XM8pFDnT9DYoPFy6jzhwU+n/3kVLWjrMkAnVqBeUNjwHGARqVAVWsXTtR3YlCMbzlltJSp3WhFQ4fJpRTSG0cqyMR1rPCdeWDOQAyKCcLTP+XgZH0nrvxwN567bDhumZoKgBy35FR7OWa1dxGRIdSfCJZ0H+/Pjl8VzdL2ni7Rtz/ZKSwCJIT6odVgxoHSZlz54R6H26RE+uO+2RndPpbZasdzvxxHRlQg7hDE8nOBqtYu2Gx8n5wLSxr1aDFYoFEpMDktAuuya/7wzJ/3t57EYUHE3VvU5OB2ZTAYDAbDHcz5I4fjSOlVf/xwPbdn63Q6XHDBBXjmmWewZ88e3HbbbXj22Wd9um9nZyfGjx+PrKwsh5/CwkLccMMN6OwkWRCffvqpw/U5OTnYt28fAMDPr3vbvFrtaEXmOM6hLAwAAgICkJGRgYyMDEycOBFLly6FXq/Hp59+6nI7OW+88QbeffddPPHEE9i6dSuysrIwf/58mM3eJ8e+bNPp5GglCf+9bHQ8fn9kJgASZGztRQhpYV0nGjtN0KkVYrcsyt8vGYZxyaFoN1pxz9eZf3iYbHfsF0q+JqdFuFwniT/ShI3m5oxNDoVa6f7QFR2sQ3pUAOw8cO/Xmegy2/DDoQoAwOVjEqBRKaBVKfHy5SMBkNKb+o6+nwjSzJ/EMH+MTgwRtr8NO082OkxCSxo73d4/T3D+DImVO39Y4PO5Ai35mjMkGgFaFfw1KkwVSh+39KDrl/z7QfN7fIHnecn5kxwqXn7h8FhsenQmLh8TDzsPPP9LLg6VNsNu57HqSBUAaT/05vxpNZB9NNSPHGvD+rlTXZfZhsZOyTFX2+7onmvrsqD+LHcD7RZckddPSsJvS2ZixsBIaFUKjEoMwcRUIvAdKm3x9hAiG47X4tv95XhpXe4ZnxvnKxabHZe9twuXvLcTXX0QPE5dP6MSQhAmuDI7/sCyr+zKNnyw9aT4f05Vu5dbM85GLDY7zNb+Cac3WmxeBX4Gg3H2wsSfc4hhw4ZBr9eL/1ORRv7/0KFDAQDjxo3DiRMnEB0dLQov9CckJAQxMTGIj49HcXGxy/VpaWQlcNSoUcjKykJzc3Ofvg6O46BQKNDV5f3Es3v3bixatAg33XQTRo8ejfT0dBQWeg8sPRMoqCUT+7lDo5ESEQAFB9j53k3qacnXxNRwaFWOGR0alQIf3DgOEQEa5Na045Vfe19adjrIcepmJcdd2dfxaiKajUwI8fq4b1w1GoFaFfYUNeGWz/djk1D2ds1EaVV0dFIoxiWHwmLj8c2+8lN7IW6obCUTpoRQP4xKDAVAOsN8s6/M4XbFDXrnu6LdaBGdYHLnT0QgC3w+F3As+ZIy5uYMIaVfPRN/pIn5iR6IPyWNerR1WaBVKRz2MQAI9dfg7WvHYNGYeNjsPB767gh+z61DZUsXArUqXDMhCQA8lpnY7TzaBOdPiOD8CRN+txj6Z991FjDkQg/P87jiw92Y8+b2szaw12bnsaeInAumZUQiKdwfX905GfkvXoQ1D0zHPxeS3Dcq5HUHzUiz88DXTsfH9zafwKPfZ51xiwndUdakR5PejA6j1W03yJ5CHTfjU8LEvJ0/quzLZLXhsRVZsNl5seQsp7pnHSUZZzZWmx3z39mBBf/Z2atA/1PlqVXZmPn6VnGMyWAwzh2Y+HMW0tTUhDlz5uDrr7/GsWPHUFJSghUrVuD111/HokWLxNutWLECn3/+OQoLC/Hss8/iwIEDeOCBBwAAN954IyIjI7Fo0SLs3LkTJSUl2LZtGx566CFUVpKB3/PPP49XX30V//nPf1BYWIjs7GwsW7YMb731FgDg+uuvR2xsLC6//HLs3r0bxcXF+PHHH7F3794evR6TyYTa2lrU1tYiLy8PDz74IDo7O3HppZd6vd/AgQOxceNG7NmzB3l5efjrX/+KurozM9+GwvO8KP4MjQuGUsEhMpCU89S39zzLhXYHmibL+5ETF+KHN64mZW5rjlb36SDCarPjq31lvVoZ5nkeZU1E+EiPcg06TxCcP816M/TCaipd2RzRjfgzPiUMX94xEYFaFQ6WtsBi4zEqMcRlknv7NCJifrO/HCZr305kROdPuB9GCc6fsiaDKERNSSelkiWNruIP3T/iQ3Ti5Blgzp9zhRP1nahq7YJWpXAId6biT2ZZi8/ZLHJx9ER9h5dbOkJdPyMTQqBRuQ4DOI7Dy1eMRGqEP6rbjHjouyMAgEtGxiE9knxfq1vdO2U6TFbQw0wIdf70c+aPc3B8rUz8aTVYUNygR6fJirKms9PlklPVhnajFUE6lYM4zgmO4qFxQQjQKNFutKKwm/2kts2InScaxP+XHywXhZ69RU14c2MhVh2pwvcHK07DKzl9nKiTxNG+cDPRsOdxKWEIFMqW/yjx8J1NJ1BY14mIAA3+IwS951a394tI8Edittrx7f7yc7Zbn5zKli4UN+hxsr6zX0Tzg6XNsNp5/Ou3/B7nyTEYjDMbJv6chQQGBmLy5Ml4++23MXPmTIwYMQLPPPMM7r77brz//vvi7Z5//nksX74co0aNwv/+9z989913GDZsGADA398fO3bsQHJyMq688koMHToUd955J4xGI4KDyST5rrvuwtKlS7Fs2TKMHDkSs2bNwhdffCE6fzQaDX7//XdER0djwYIFGDlyJF577TUola5dYrzx22+/IS4uDnFxcZg8eTIOHjyIFStWYPbs2V7v949//APjxo3D/PnzMXv2bFGIOpOpbOlCp8kKtZJDmjCJig4WxJ8elh/xPC8OQCemes5dmjEwCn5qJVoNFpxs8N0d0B0rMivxzOoc/GN1To/v29BhgsFsg4KTXD5yQvzUCNaRAXVVaxcsNrsoioyI9y7+AMD4lHBRAAKAqwW3gpyLRsQiNliHxk4T1glOjL5AbyJZPgBx/oT6a5AqZEzYeWBSajguHBYLAChtchV/8muEvJ84R7EqXJhAdxit/WYFZ5w6u4TynElp4fDTSMfKpHB/ZEQHwmbnsUM2+faE3c6LIiPgOLkFgCPlLWJppTNS3k+ox8cP1Krw/g3joFEqYBZKUhePT0RcKMkV8uT8ocKVn1opuhGpcNlfzp8KYbJPjyly8Uf+HTxbg6B3CavzU9MjoHJTEqtSKjBOcFgeLPHu1F11pBJ2nojoiWF+aDVYsCarGlabHc//cly83YfbTp5V7h+5M66il86fhg4TDpU248fMSlFEG5ccJu5Xf0S3ry6zDZ/tLAEAvHzFSExKDYdWpSDiZR84ms5kvtxTiqd/ysZbGwv6e1NOO/LjUssfnJVms/NiLtqxyjaxeyODwTg3YIHPZyFarRavvvoqXn31Va+3i4+Px++//+7x+tjYWHz55ZdeH+OGG27ADTfc4PH6lJQUrFy50u11zz33HJ577jmHy5YsWYIlS5aI/3/xxRf44osvvG5Damqq25WH8PBwrF692ut9aecySmlpqcttaAj2H0G+IGBkRAeJuTXRQToA7ajv4WpWWZMBTXozNEoFRiQEe7ydWqnA2ORQ7ClqwsHSZp/DZLvjgDCJ2FfcBJPV5lJ25o1SYYU9IczPrfMAIKIQafdugNXGw2yzI0inQlK4by2ax6eEY+W9U7H7ZBOun+gq/qiVCtw8NQVvbCjAst2luGJsgrhSfirQkq0QP7VYDjAqMVR8zTdOSUawcLm7sq+8Wte8H/p4SgUHm51Hi8GMmB6E+zLOHNx156PMGRKNk/Wd2Jpfj0tHx7tcL6eh0ySKMoBj5o/eZMWNS/ejy2LDT/dNw5ikUIf7Snk/7oPTKSMSQvD0giF47pdcpEUGYGJqGIqEfbbGg/OnzSnsGej/zB862Z+QGo4t+fWok+UVyd0+db1wX54JiPvUQPcOUACYkBKOnScacbC0BTcLId7O8DyPlYeI8/faCUloMZjx6vp8fLGnFCabHfm1HQjxU8NPrURtuxE/HKoQA8HPdArrJMeTsxPMGxXNBqzLrsG6YzXIrnIsrUqLDEBUkBaBp0n8sdjsUCk4h/PSscpWmG12xAbrMH94DDiOw9C4YGRVtCKnqk1cVDoX2SeI2bk1vrscz1bkx6W2rp4fNxs6THj11zxcPznZ6+Kgp/taZS6ytzYWYu6QaCgUpz4+YjAY/Q9z/jAYfyAFYhcnaWIfHdS7sq/D5WT1fkRCcLfCywTh5N/dqm9POCI8v9Fix2FZa3VfKBXKnVIjPA9U5aHPNM9geHxwjwSaIbHBuHN6mtvVcAC4bmISNCoFsqvaxAyHU4WWFCSESiLVaGHyHR6gwUUjYsUBemmT3sWq78n5o1BwYnZKUycr/foj2HOyES/8kouX1ubi1fV5WLa7BDanz+tEXQfu/OIg8mq6D1y12OziBMZdqSYtA9tW2ODyPM7Q/YxmfjTpzWgSQo0zy1pgMNvA88CzP+c47GMGs1UUob05fyi3npeKz2+bgGW3TQTHcYgXnD8dJqvbMpdWYaJCS74AybXWou+fTB0ask67BOrNNnGi7ij+nPnOn9o2Ix5efgQfbD0JnufRZbaJQc6eyn8BYGKa4PwpbfZYxnG4vBXFjXr4qZVYMCoO105Mgk6tQG5NO14SOlQ+duEg3D+HdAz7cGvRWeP+kYujFT6Wfa09Vo0Zr2/Fa+vzkV3VBo4j56Up6eFYPC4Rr19FSqoDtWRfb+/DzJ/SRj0mvrwJS77Pcrj8kOD2HZ8SJp4L6eLPuZz7w/M8jlS0AgCKGzrP+VIkB+dPL46ba49VY9WRKry35WT3N3aiSsgsjAjQIEirQl5NO9bn1Pb4cRgMxpmJT+JPe3t7j3984aOPPsKoUaMQHByM4OBgTJ06FevXrxevNxqNuP/++xEREYHAwEAsXrz4jM90YTC84c7VQdsz97TsK1M2COyOSVT88bHbS3c0682ikwWAGDbqjld/zcM/Vmc7DNbowMa7+COFPh8XVlx9KfnqCRGBWlw+hjgsvtlf1s2tfUPq9CWJP5ePice0jAj8c+EwaFVKJIb5QaXgYLTYHUpQ7HZZJlSsq0Orv8tn/kxYbXbc83UmPt9dgqW7SvDx9mI8/0su1mU7lgi+tbEQm/Pr8fmukm4fM6uiFXqzDeEBGgyLc3XrTUgNQ5BOhWa9Wexu5wkqaGREB4r7Gp3g7pOVex2tbMP3h6R8luzKNtjsPGKDdYgL6d5Fx3Ec5gyJQaogWPprVKKw467jF23n7uj86efAZ2EyMzgmCEFCKSgtayiTTbJOR+e/vmRvURMWvrcTP2dV440NBXh70wkcLG2G2WZHXIhOzGNyx5ikUKgUHGrajKI70ZmVmWQ/uXhkLAK1KoT6a3D5mAQAgMlqx5DYINwwKRnXTEhEXIhOdP+c6VhtdgeXpa/Onx+F4OtRiSF46fIROPj3edj1xBws/8tUvHnNaNFREUSdP32Y+fPxjiK0GixYd6zGQWR1d96n58Xj53DHr9Img+gc7DBaz/nsO7ko3drV8/2KjkPyfViUcLmv4OocEBWIO6aTmIe3NxV2uyDBYDDODnwSf0JDQxEWFubzT3h4OIqLi7t93MTERLz22mvIzMzEoUOHMGfOHCxatAjHj5O68kceeQS//PILVqxYge3bt6O6uhpXXnnlqb3iPwk8z5/x+Td/RuiJeLA7508Py76oU2VcN6UbAFnhVyo4VLV29Un7zqwKRxHJU0eILrMNH+8oxtf7yh3CjenAJiXCNe+HIjl/DGJnsO7CnnvDvKExAHrWKtsbdGKRIBN/IgK1+OauKbh8LJlIqZQKJIeT117a6Dgp0Ztt0KgUbu37pzP0uc1gwa4Tjed8aKivHKMhuloV/jozHecNIK3Y1x6tFm/TZbZhWwHJ5ymo674Ugeb9nDcgwq2FXq1UYObAKADA1m66flHnT1KYPwZGBwKQck1oEDwt93r9t3yx/TpdPffF9eOJuBAiWLs7ltCJitz5QwOfWwxmtyv2PM/jUGmzKMj0NVQoSwr3R4yw7dTl45j5c+aWfX2yowg3fbYfjZ1m0VX4n80nxByeaRmRXl2R/hoVhgvHz4Olrg7QLrMNa48SYfOq8VJnxFvPSxX//uelw6BSKqBVKXHf7AEAiPunrwPz+5ryZoNDiaQvmT9Giw37isn79K/Fo3DTlBSxOYMzNFuur8q+GjtN+PFwFQDAauexp4h8n+12XnT8Oog/wueaU912zjpiaL4hxV2zhHMJ+XGptReiORXm6ztMoiPUV+hxPSHMD3fOSEOInxon6zuxMZctvjMY5wI+l32tXLkSW7Zs6fZn8+bN0Gg0Pj3mpZdeigULFmDgwIEYNGgQXn75ZQQGBmLfvn1oa2vDZ599hrfeegtz5szB+PHjsWzZMuzZs8elhTmDcTZgtNjEActQ2ap/b8SfDqNFLCEb54PzJ0CrwvB48pzuBv49hWaGTE0nE+KjlW1uS0DkK/1UwAGkgZu3fAIq/pQ1GcSSGm/ZRr0lVpgM9qbbmjsq6cAp1Lurgr72YtkgNk/4TAfFBLotVaPiT3MPB3O+8OyaHNz02X5szGMDPEASaqYPjMRTC4bin5eSsPxthQ3ivr69sB5dQtlLYV1Ht8LZLi95P5TzfWz5Xik6zPwxUMjxOlnfCb3JimOVxCn3zrVjMDgmCC0GCx774Sge/T4L/91eBODUxJ94Yd925/xpp5k/ftI4gIo/FhvvMkE2WW14alU2rvrvXtzzdWavt8kT7UaLmEOUGOaHGCFgX3L+SELA6RKfTpX9xU145dd82Ow8Fo9LxKZHZ+H/LhwEAGIG0wwveT+USam09MvVAbr7ZCM6TFYkhPphSlqEePnQuGD8a/FIvHblSJw3QHqOayYmITaYuH9+81IS0qw347EfjnoMH/8joKIo7SzZbrSK+4QnDpW2oMtiQ3SQ1iV/zRnq/Onoo7Kvr/eVOYT67ygkAnNxYydaDRbo1AoMi5fOhQNjAqFWcmg1WDy6us52qOhFKe7D5hVnGjY77yBQ9sYxKQ/kp25iX6GuofhQHYJ1alw4jCyQnfBhgYPBYJz5+CT+pKSkYObMmZg1a1a3P7Nnz0Z6ejrUanX3DyzDZrNh+fLl0Ov1mDp1KjIzM2GxWDBv3jzxNkOGDEFycrLXVuImk6nHJWjn6koJ49Tpy33jZH0n3lG9h+W6VxEdIGX0iGVfPcibyCmqwEb1Yzipuwkxb8cDz4cDX10B2D13gZooln71nfizcHQcUiL8YbPzYgC0HHnAKy3dkrd5T/Gh7Cu3ph0Gsw1+aiXSIgNPedudIYHbJEDX2db87w0F+HJPaY8eTz4p9wYVf+QrmPk1tCzQvcgVfpqCc3mex66TZHJW2MOB4rnKLqcQ3cExQUiPCoDZasdmofuJPAfBaLGj3IujoMNoQZbguvGWzTJ7cBQ4Djhe3e41g4bmliSG+SFDdP504FBZC2x2HolhfkiNDMALi4YDADbn12PVkSq0GiwI8VOLjrfeQJ0/Ne6cP8JERV725adRwk9Njnny/Iq6diOu+2Qflgttw49Xt8Fq69tOdnQSFRGgQYBWJQal13UY0W60OLjoztSyL+ryvGBYDP599Sj4aZS4//wM3CWUZIznCrBw8wVAwXovj+I9+43mxUxJd3WlXTsxGddNSna4TKtS4spxxMm44bhn8WdlZgV+PFyBsv/9BZb/TAQMfZc75yt00jomMRQRwjG0u3bv2wvJd3zWoChwq+8DPp0LGN1n6tBgf4PZdsqlMUaLDV/tJSXIVwpO0e2FDeB5Xiz5Gp0YKjaMAMhnQRs55JyjpV/0OxArfH+L+9n5U95k6HXXuO6obu2CxSbtR6296PYlF+bze3JOt9uRXPkLEtAgivyRwgJlXzuOjRabQ9kt48yn3WjBvV9n4len8vczDpsF+PIy4Me7+ntLzkh8En9KSkoQERHR/Q0FcnJykJTk2l3HHdnZ2QgMDIRWq8U999yDn376CcOGDUNtbS00Gg1CQ0Mdbh8TE4PaWs8DjVdffRUhISHij7ftoAKVwXBut8dk9B6zmZzsetq+3h3lxfm4TLkXU5ANrkkK4aPOn4YOk88lN03HN2OAogYq2AHeRn6KtgD5v3i8z0Rh1ffQKeb+2Oy8OIkdmxQmrgbvPum6suvo/CED54ZOE/RCm3dvnbto2RTV34bFB0NJJyXWvhuERAZqwHHkdTXpJUdNeZMB7289iRfX5vZoQlolm5R7Iy3KVfyhDidPK83hAadnEFbZ0oVGwU1UfYa6H/5I9CarGGhOXTocx2HhyDgAwLrsGpisNlEEChBatnsbZO8vbobNziM1wh9J4Z6FwchALUYlhgJwU/qVtxY4uBSAJDImhcvKvuo6sVcoEZkiuPImp0fg4bkDMT4lDPfMGoBv75qM/U/PRXpU74VUOilwt6/QiUqIv+MCkChcCseE+nYjLn1vF46UtyJYp4JGqYDFxqPChzwWnufxzf4y3PXlIXG/9USlUwYXnTzWtRlRLrh+aMfBxk4zLH0sPvWENoMFKw5VuLijTghtxUcnhoilXRzH4e+XDMU/LhmKf6cfgbKjCtj/sdfHnyC4RE/Ud6LF6RiSKzgz5Y6S7rhoRCwAYGt+g8fg58K6Tlyi2I9rsAnq5kJYT271+fH7Cur8yYgJdGgk4I3tgttmTro/cPRboOoQsPdDt7cN0Erjg1Mt/frpSBWa9KS077lFw6FWcqhs6UJJo148d09IdXX7irk/52Dos95kFZ3OVwiCo7tOmX8UnSYrLvtgFy7/YDe6zH1f8ljqJIj0VPyx2uwOTvL82h4IggXrcHfja3hB/YXoXo44TVmDf1t5DLPe2IZj3eTbMc4cfs6qxvqcWrz+W35/b4p3GvKBku1A9gqgpbS/t+aMo9+7fQ0ePBhZWVnYv38/7r33Xtx6663Izc3t9eM99dRTaGtrE38qKjyHESqVSoSGhqK+vh5NTU3o6uqC0WhkP+wHRqMRBoMBDQ0N8Pf3h0ql6vU+SbEU7ZT+kR2MaI6AVWjh7QuGqjwAQGn0POCxAmDaw+SK7W9IaokTdNW3oK4Dbb1YSaKcrO9Ep8kKf40Sg2ICMS2DTDLdhT63yJ4np6pdcP2QCVd8qJ/XLmUhfmoE66T3fUR8MHltvzwMvBIHlOzo9WuQo1IqxM9AXvpFQ2Ktdh4NPpZZGS02NAqduLoVfwTXkzzzhw7ShroJAwakQVhfO3/klnq5XfzPyoHSZlhsxD2TLBNqLhlFwsG3FzTgt5xadJqsiAnWYr4wCfZmr6dOIm+uH8qcwW5Kv8wGYOUdwLrHYKvKEnMZ5M6f+g4TNuaSxRFakgkAj1wwCD/eex6evHgIzsuIhE59amK26Pxxs6+4y/wBZKHPwr679lgN6jtMSInwx5oHpmOA8Bq6K+cwWmx4fOUx/P2nHGzKq8NPQjYKRW+y4uPtRaKTkq7OJwqfIy3zrG03ipOs4fHBUCuJqNLQw+y1vuSj7UV4fOUxfLbTMTyc5pFlRDuKwhzH4a4Z6UgzCmOmiv1ehfGIQC0GCKJzplOGSq4gPLsLIvfEyIQQJIT6octiE8USZ+pqq/Gc+gvx/4P7++a43RNO1JH3b2B0kLgfeHNtVLd2obCuEwoOmB4ie117P3DrXNKqlKKA6K782Vfsdh5Ld5K8zNunpSJYp8aEFHLe3lHYgEw3eT8UseNX1bkn/hytbIWdB+JDdGL2Wn9m/hytaEWrgbgGD5X1vZONNtOgEV49FV2cXcw9cv5UHAAADOCqRfGHlu329bjjkOBCz60+N91q5yJHhPNGaZPBq1P2f3tL8eLa3P6rrJHNsQ5vXeVzV8qDpc246qM9uPOLg+d0VVCPZrQdHR0oLCzE4MGDERgYiMOHD+Odd95BV1cXLr/8ctx444093gCNRoOMDNI2dPz48Th48CDeffddXHvttTCbzWhtbXVw/9TV1SE2Ntbj42m1Wmi17kP53EEfq77ee74C48+JQqFAcnKyb+3Fy/cBMcMBrXvXRmjDAekf2YFJo1IgPECDZr0Z9R0mRAhCRGWLATwPF5eA3c5D23YCAKBLGgsExQLTlgAHPwPqson1f8gCl+ePDNQiPTIAxY16ZJY3Y86Q3pV9UEfE6MRQqJQKcZKZX9uBxk6TQyimfHW5rcuCypYun9q8UxLD/MVJyfCEEGDHG0DmF+TKTc8Dd22SRkinQEywFg0dJuFkRlZQa1qlE1tNm9Gnzkh0NTlAo3SZ/DpDnT/lzQZYbHYU1HagtMkAlYLzOAHrcdlXSymQvRKYch+g8ew2OSJrcy9/3X9Wdp+Qsnnk3/1BMYHIiA7EyfpOvCi0vr5oeKxQ4leFgjrPg1hf8n4oc4ZE4+1Nhdh1shEmq42IpBX7ARsRJvR5G2GxDYNKwSEmWAelghPzV2gGzOT08F69dl+g34VqN/tKm5vMH8B1AkGFzkVjEpAaGYD0qADk1bSjuEGPuUPdP29duxF/+SoTRwXnIUCEurtnpov/f7KjGO9uPoGtBfVY/pepkkNKKMOkZZ517SZRiE6LCEB9uwlVrV2oazeKzqY/GupuOCIL1LfbeUm8iHHj1jI0A9RJajEA1YeB5Cken2NSWjiKGvQ4UNqMeUKWBz02Az0TfziOw/zhsfh8dwk25NRi/nDHsRnP87iy8b+I4trBgwMHHobyo9h9stFBBLXbefzlq0PIq+nAmORQTEwJw6zB0V4z4TxR2WLAovd348LhMXj1ylGw2XkUNVDxxzfnD83YGZMUiqC2QukKcwew623gwhdd7hOsU6Gx03xKzp+VmZUoatAjSKcSy+xmDY7C3uImrM6qFt0u7po8DBdDn8+9iTQ9P41NCRP3ibImPaw2u9tsvNONPHx618lGzBBC+vuKMmGMNCCKnGt66vyhJV9qJQeLjUdhXQdsdl5yTnvBWpUFFYA4rhlWQSgPD+x78afLbBOdo925NxlnDkdk596DJS24ZFScy23sdh4vr8uDyWrH1RMSPcYYnFaapQWUmiPr8avmIvxj4TCPN69tM+LV9Xn4OUtq6FHebPAaTXE247P4s2PHDixcuBCdnZ0ICwvDd999h6uuugoJCQlQKpVYtWoVDAYD7r777lPaILvdDpPJhPHjx0OtVmPz5s1YvHgxAKCgoADl5eWYOnXqKT2HHI7jEBcXh+joaFgsfdemk3FuoNFooFD4MLgo2kIyd4YsBK77xu1N0vRHpX+cbIjRQVpR/BkaR1a3L/9gN+w8sOuJ8+Gvkb6qRQ2dSLFXAgogMm0kudA/HJh4F7D7HWDH68Dgi92KIhNTw1HcqMeBkpZTEH9aAUiBsRGBWgyNC0ZeTTv2FDXhstHx0st0WrE6Xt0mtXmP9J6JAxBXAxV/ztNvBra+TK7glMSGX7wVGDCnV69DDpkQtjt0+5G7Gup8LIWqapXyfroTDGOCdPBTK9FlsaGypUtsFX7JqDiEBbgPze+x+LPlZSD7B8BmBs5/2uPNjsicP9XnovPnh1uBjhrg1rWAqvuGBJ5cOhzHYcHIOPxn8wnR4XXRiDixVMjZ+VPU0Els0tk1OFnfCY6DQ2iuJ4bHByMqiAiS3+wrx23npUJRKjkH+aKtAIYhPtRPHNAPjAlEreB2SQr3c585VZ0F+IUCYandboM34kOlbl88zzvs621uWr0Djh2/AGk1eqhQ4jhAmNQVeXH+/FUQfkL81Lh7Rhr+/XshDpU2w27nxZyaHSfIxH1fcTP2FDVKzh9a9iXr9iXPHitt0oviT39BM6NyqtrE97WqtQtdFhvUSg4p7soFKw85/l+6s1vx57sDFQ4BzLTcNCHUz6VcrzsuGkHEn015dTBb7aIDBgCaszfgCm4b7DwH+9xnodryHIYoynHvb/lY88B08XbFjXpsEkooq1q7sO5YDdS/5mH3E3PETDxf+TGTlE19d6ACt09Lg1algMlqh1alQFK4vygCesv8oS6mWYOigbqfyYUxI8niyoFPgan3k0UXGYFaQfzpZejzvuIm/H11NgDgrzPTxQ5iMwdG4bX1+WKpdUZ0IEL9XY9hQ2ODoeCIc62+3djj9+1Mhoot45LDEB/iJ36mVa1d/TI5kztl97gpdz9VqPNndGKoIP70THShCzgjEkKQV9MOo8WOsiZ996W+PA+uloxTtZwFWmsrgKjT4jiWO7f60215OrHa7NhT1AR/jRIpEQFCxMCpL1b2F816s8PndqCkya3409BpgkkIrC9tNPSp+GO38+A4dP8+yuZY0xTH8WJWBZ5aMNStANpmsGDhe7vQ2GkCxwH+aiX0ZhuOV7efs+KPz5L5P/7xD1x99dWoqKjAkiVLcO211+KBBx5AXl4ecnJy8Pzzz+ODDz7o0ZM/9dRT2LFjB0pLS5GdnY2nnnoK27Ztw4033oiQkBDceeedePTRR7F161ZkZmbi9ttvx9SpUzFliueBTW9RKpXQ6XTsh/04/Pgk/ABAjSDsFPwKtLsGoTXXliEZsqyqZkdbv3Poc0FtBxo7zWjWm5Fd6WjjzixtRgZH1GlVzBDpiqkPAGp/oPoIcHKz282cIHZ76b1Vma5Kj5WtPk4TrNh7nFq+O+dKZFe1iQMbX50/alhxqeoAEnY8Ti487yFgkiAyb3+jV6/BGdoBSD7xk+eZuOtq5A46oUjopuQLABQKTmx1v7+4Cb8cI5/pHdPSPN6nx+JPAykPRJ6UBfXTkUr8d3uRaGk1WshJjtJhtPZZy+IzAosRyF1NnDM1Wd3evKHDJAoTtMRAzkLZYCciQINJaeEYLAgYpU0G0V6cV9OOS/6zE//ZfAIn6juhVnK4a3qaT5NrhYITRdQX1ubiig93oyNfykoJrDsILcwOmVm09AtwLPkSKdsLfDILeHc08MlsYPe7gN61VNMXqIBistodSjsBoLWL7JvOzrdwWW6EVXC6AcAQwWkilX25L+c4Wd+JrIpWqBQcVt8/DX+ZOQA6tQItBosoGHUYLWKnMwB4Z+MJMRibOihp5k99h0l8rtRIfykIuo+6/tnsPG5bdgB3fHHQp1wQu13KO2rsNIuZHbTkKz3SfQdAVAqOUpWwL5Tu8vo8k4VOXjnV7eL3vDd5P5TxKWGIDNSi3WjFXnlHL7sd/hv/BgD4WbMAqkl3AAASuCZUVFVBLzvGUPF5SGwQ/u/CQQgP0MBi40Xhvyesz5HOvx9vL0ah4JoaEBUIpYLr1vljsdnFTn+zBkcB9UJJ3dT7gcRJgLUL2PFvl/sFnkLHr+KGTvz1q0xYbDwuGRmH+2ZniNcNjQtCVJDkpp3gobunn0YpHgOyz6HSL57nRbfBuORQKBSc206ZvmCz8z6Xf/iyPQDJMuxNK3ZvUFF6jLDA1tpNZzpn6MJVQqgfBgtB4D6VfrVVQGlqc/gfkIT7Jr25z0phHMSfc9T5s/xgBW75/ACu+u9eTHx5E0Y99zu+3V/e35vVa7JkjlQAOOAhP1QurPdloHeH0YKL3t2By97f3X2wvkz8CeX0iNYXuG1KAwC7ixrR2GlCXIgOa+6fjsvGkLHXuVhCS/FZ/Dl27Bgef/xxJCQk4IknnkB7ezuuvfZa8frrrrsORUVFPXry+vp63HLLLRg8eDDmzp2LgwcPYsOGDbjgggsAAG+//TYWLlyIxYsXY+bMmYiNjcWqVat69BwMxh9CWyX5zduBY9+7XN2QswUAYKVfOTfOH0Bq9y4f9B6WleQAQFFRAQI5I2ycEgiXyh0QGAVMIANsbP+X2+wfGgKbVdHaK6ttu9EihmfKW0VPFSbJR5y2tVmYGNLBWk5Vu1j21a2iXnMMN1S9iEztPXhP9Q44mxkYeikw73mScaTUAOV7gNLdPX4dztBSEHlIoryTUa0XN0B5kwFv/l6AR3/IwlIhq6O7vB8KbT385sZCWGw8JqSEYXRSqMfby4MXuw0H53mgieRHoD4XaCpCeZMBj/1wFK+tz8dOYYKTU9UGq51HZKBWbFlcey65fzploquzS8INNLtqWFywWIIpZ1BMkBiwfOHwWCgVHKKDtAj1V8Nm58XJ+opDlTBa7BgaF4y3rx2NzGcuwN8v8Ww7dubx+YPx2AWDEKBR4mRlLfzqs8gVmiAo7WZMUBQgMVRyggyU5cFMcSf+FG2R/q4+Amz8J/DJ+UBXzwPgtSolIoVSgGqnjl9i4LNz5o9Y9mVBaZMBJqsdfmql6GZJFzr5eXL+0O4i0wdGIi0yABqVAmOE78oBQcymodrRQVpoVAocKG0Wj1dJwncyMlADhRDwTifJKREBMvGnb5w/eTXt2FbQgC359Xjk+6xuv691HUaH9t504EnDnt2WfAFA5UHye9wt5He599yf+FA/JIX7wWbnpcyNXuT9UJQKDhcOJy5Sh5bvlQfh11GGdt4PWxPvAXQhQCgpZRrMlYtOFkAqJZg1KAoPzBkoZtp4657njpJGPfJrO0TT689ZVWIJF33/kmSZP3Qim1vdjrc3FmLN0Wr8ml2DDpMVYf5qjIwPBupyyIPFDAPmPkP+zvwCaHPMmgrSkv29o4fCeYvejDu+OIi2LgvGJofizWtGO3Rb4zgOMwZKbsFxHsQfQAp9PpfEn9ImA5r1ZmhUCgwXXp8o/vQg9Jnnefz1q0MY88LvokO3N5Q06tFqsECjUiA1wh88DzFkvy+w23mUCfv9GCH432C2wWT1XbSii1XxoX6i68In8YcuYlLayT4eIRzrzVY7DH0UcC3Pdmvs6Fvx7EyBOtYCtSpwHDk2fLWvrJ+3qvccLmsFQDqSAqR0u82NMCkX1ulib1/w9sYTKKzrRHZVW/ch4S1kLN7Ak/1/uiIba49Vu70pzb+bNzQGIxNDMEwMzz/3SmgpPos/7e3tCA8nGQIajQb+/v4ICpIGm0FBQT3umvXZZ5+htLQUJpMJ9fX12LRpkyj8AIBOp8MHH3yA5uZm6PV6rFq1ymveD4PRb1DxBwCOfucivNhKiEBx3H8yuaC1zKEtu7zjF+DYsUNuMQaA5jIyGDUGpgBKJxfBeQ8CSi1ZDa7KdNnMpHB/jE4Khc3OY02W+wOhN45VtIHngeRwf4dsHzqgrnMKgKMrYnTwmlPVJuVseCv7MnUCXy5ERu2vCOYMMGrCgcn3AFd8AigUQHA8MPYmctsdr/f4dTgT4+S8AhzdPt6cP0/9dAzvbTmJo0cOoLOR7AcjhfyF7qCDWPq53znds+sHgFgOZud9WA3srAMsssFx/jp8tqsYdA76P6GdMN2/xiWHIt5LlstZS0ed+GdT4R4YzN4nZ3TVf/pAz+VZj104GKOTQnHXDPJ5cRwnrrAW1HbAbudFseKxCwbhirGJCNb1rJxGp1biwbkDse3x8/HwwEaoODuqEQ3bkIVk+xQ5DiKjXBxwK/7Q48Hsp4FL3gJCkoG2cmDtIx5D4r1Bc3/k3w2jxSbavZ3LvsJlgc+0zGhwbJA40aUZWE16s9tA+nXHyPt5yUjJeTXJqXU5LdebNywGN04mQgN9adSNJw94p9uaGuGPaNH91zer0HJx47fjtXh9Q4HX25c5DZJpy+5CWVixC3YbUCl8rmNvAvwjiDOl+rDX56Lun33FjoGrvXH+AMDFQuD5xtxaaUU2dzUAYJN9PJJjhRLjGFKmPJQrcwicppMkuqBAQ9bLezhxoK6f6RmRmJIeDquddIUDIAq2NMRWb7ah1WABz/N4ePkRvLv5BB767ggeXp4FAJgxMApKfR0RRzklEDkYSJsJxI0B7BagYp/Dc1PnT0/Lvj7dWYzSJgMSw/zw6S0T3IaxzxokZcp4cv4AwKhEQfypPHfEH7pvjEwIEUsK08VOmd7D4eX8cqwGm/LqYbTYRdGzV9sjLHCNSgjBbCGYf7ebZhe9pbadiMAqBYchcUGgOmBPcn+o8yc2WCe6UvN9cdE5iz/CuNZPrYRWeO/7qvTrz+D8KRSE+39fPRrb/m82AOBkfYeDyH82QV3/Fw6LFYXPTDeB53Lxp6+cP8er2/DFHqliwlODAQDkvNhKHFYrbLMBkPHS+pxat517D5U5BumPEM6DTPwBGdzKa+yc/2cw/tTIxZ+GfIfyEp7nEVCzHwBQl7oI4BSA1Ugm5wJU/KGrzvLuB0fKW8QVyrImPYI7iZtDE+cmFTUoFhi2SLjjV243dbHQKvXHw5VurweA13/Lx7gXN7p0RKG2zzFO7pQoYTLVarA4rFDRgcLU9AgoFRya9CQQk+PgPpOEcnwVYGwDQlOAOzZA9+RJ4OJ/OYYWT1sCKFRA8Tag4qDnx/IB6f2XBiFyR4MnJ4zBbMWBkmbEoQkbdH/H1ojXsfq+qbhqfKJPz5sWKU3YE8P8cOFw7+K2WqkQO6B1OwhrcnRiWnN/wQ+HyGeextXgjqIH0XhghSzDKQxxoZ67OJ21dEhlIPqifXh+jftuks16M74/WI7NQoctb8HMF42Ixc/3T8MAWYYCHWQX1nUgs7wFte1GBGlVmDGo+4wfb0QFaXFnIvncdlqH4QBHJtDTFdkOYfAjE0IwJDYIFwyLcQ0s5nlJ/Bl0ITDxTuCaL8j35/hPQJb7nDJvuOv4RVcBlQpOzCyhhMlavbvraheoVYklWUVOk7oTdR0oqOuAWsnhwmHSd2RimiD+CPZz6tqanhGJe2eTsjCAlHXKOwvSsjWAOJRC/TWIEd1/fSN8UvGHTsj/u70Iyw94tvw7Cx051dT54yXsuSGfhBBrAkmzgVQhR0eWD+WOycL7tr+kCWarXXQX9cb5AxCxkQYeHyptJgsbuSQr51fbZKkkMXYEAGAoVy4OuDtNVhTWkeenpcS0HLash84f6jy6eEQc7pk1AABEsZt2StOpleLxvqLFgNyadpyo7xSdZHSSe+W4BKD+OLlzxABALewzsULOXuMJh+cO0tKyL/eT9Lc2FmLcixtxst7RgbEpj4wDHp8/2GFBRc7MgVEID9BgcEyQ1xDskYJT5Ghl2znTqYa6mMbKxhz0vOmr80dvsuKVdXni/946vXUHLVEcmxwqlgX3Ze4PzURMDveHWqkQ8508iT9tXRb8llPrUAYjOX90GBLXg7KvmmMAgA5eOH8I41qO40TXcZOP4s+uE41eRcgimfjTeA5m/thlLuBBMYFIDvdHkFYFi41HcQ9Ey/7i56wqjHtxI7YWkPGQzc4jSxgrjksJxSThHHKgxNU57Cj+nLrzx27n8czqHNh5yf3uVfxpqwTsVpihwo+2GQCACcpCGPQd2OPk0usy23BcOMZQ8WeIkJ/W2GlyWBA+l/BZ/OF5HnPnzsW4ceMwbtw4GAwGXHrppeL/cscOg/Gng4o/MWRwi6zvxKt2HMlHip0M+ifPuRwIEYQBWelXtCyHwmbnkVcjnagbO83iwXTniUYx70ctz/uRM+5m8jv7R9Ie2olLR8VDreRwvLpdnITJ4Xkeyw9WoFlvFlfSKSWN5PHoRJcS6q8WWyXTIFxAyvyJC/VDhjhR5nFhUBl0Zi/lJoeWkd8T7yThpQo3ranDUoBRQunpgY89P5YPOJd86E1WtMtWcD05fw6WtsBi4zE/qBgq3oxAfRnG2PN8Fsbl7qfbzkv1qRuHz7k/zYL4E0VEQmXVQQRYmjE8LggfBS3DeYpcdO34j4Pzx1sXp7MWmciarGjAvpx8MaAZIBkfj3yfhTtf/gjha25DkKEcwToVJqb2rFuWuMJa2yG6VC4YHuMgOsBqBoq3O7j+fEFZRnJc9tqH4d8nSD36cK4MqX7SIEunVuK3JTPx6S0TXB+guRgwthJXYPRwclnCeOD8v5O/f/2bi1jYHVRgku8rdIIS6qd2+Q6E08BnvVk8vg2NczyO0BX9onrHwfE6wUU1Y2CUQ2bSuOQwKBUkFDmrohWFdSRUe2p6BKKDdLh5SgoAkNIy2bEwRhaGS4WGvi77opPEJfMG4uG5AwEAz/yc47HclpY4UYdKbnU7eJ7HSUEYGRgdSDpKbnoOMAnnB6EtMxLGkWNkKhnoosS7+EOdYdmVbciuaoXFxiNIp/K5XNUZtVIhdg7bmFtHhMb2KnTCDzvtIyXxRzg/DlWU4UhZC+x2HscqSBvvhFA/8TNI8qEduzMVzQYcq2yDggMuHB6DWYOiHMRFuXgmz/2h3V3mDY3G6vun4fjz83HsuQuJq6NOEIqjZeWakYPI70ZZFzBALJl1l5fG8zyWHyhHs96Mr/dJAmBFswGFdZ1QKjjMHhQt3aGrFajPF/8NC9Bg86OzsPLeqV7PLcPigqFUcGjsNHktVT6T6K4ckn4vBsgyzSTnj2/izwdbTzq8HxXNvV/coM6fcclhmDIgAgqOZA85l7/2FjpZpselUKF81l27d7PVjpuW7sc9X2c6LObRwOfYEKnsq7zZ4JCz5RbB+bPdPpr8L1vUpB2/5DmOPM/jSHmLS47SrhONuOmz/Vj0wS7ReSeH53mUyMq+OkxWn3LRziYqW7pgtJAA/JSIAHAcJwpxeb3IMvsjaeuy4Lk1x9GsN+PFtbmw2UnHOL3ZhkCtCgOjg8Txkbv8UHnmT3Vb1ynnbK3IrMDh8lYEaJRYeisZ3xytaHXJFBUR5lYV9ihUKhPBBydCAysmK/JdSr+OVbbCaucRE6wVzwt+GqW4sJdTfe64KOX4LP48++yzWLx4MRYtWoRFixbhmWeewdVXXy3+v3jxYvzzn/88ndvKYJyZmDrIxAoAZjxGfmevAKwkHG/3VhK22+CXjpDIOKnTjlz8ETN/jCht0qPLYoNOrRDLh+gEfeeJBmQohKyByMHutydlOnHMmDuAvDUuV4cFaDBnCBlo/nS4yuX64ka9KCw4Zy7QwXiyU9cZjuNE949cKadhsOH+GowQXstjqhX42PwU8N44IOtb15KTmqOkbEGhBsbc6P41UmjGUd5awNj7EyoNfG7sJOKbs9hT1250O0jdLYhjc4NlLqrjP/n8vBnRQQjUqhDmr8a1E5N8uo8k/nSzWkYn86nTYY8bBw485ikz8UL6cQwxk9LB4I4i1LUboVRwGJUY6tbN8UdgsdmRX9veo1wDn5E5fwAgw5yPQ7Kgws15dVh9pAKvqj7FBcpM/DdpC9Y9NAN+GjeCoxeGxEoDO1rytdC5E8am54D/XQbs+Y/vD2xsEwflOZpRyGzWIN+eBAXHI7XdtbTTLdT1EzfKsdvZtIeJYGDRA19fCez+D9BKQj5haCbiQvURtw/pbl+hZZ7OeT+A5PxpMZjFEoShTk4TOqlzDnJ1V/IFAAFaFYYLFu13N5HJ+LC4YPG5Hpo7EE+O57HM8jfg9TTRrUG/74CUPRYbQi6rlX33Sxv1WLqz2K1VXE6bweKw8t7WZUGR4EoYnRiKJfMGIiM6EBabtHrqDHW50BKqqtYuHK9uh95sg0rBISXcD1h1N2k1vkEQ7WiGVeJE8ps6fyoOAFbPx4ekcH8khPrBaufxlVD+OSwu+JTc3BcMJeLPprw68MIxcJNtLMycRnLICc6fQVwVDCYTCus7xLyfMbIMOZoDVS7L5emODceJ62dSWjgiA7XgOA73zCKZeBqlwqFTGhWXypoMYvnzZaOJI1alVEglmjTsOWa49EQexB9vgc+VLV1intzaYzXi/rRZcP1MSAlzDIJfeQfw0VRJ3AP5/gR1Uzrqp1GK4uGxPiz9+nh7EW5fdgDtHlxNveX2ZQcw+Jn1mPPvbbjji4N4f8sJl/Osc7c+AEgX3E81bUa3ZbxHyluw9lg1iho6UdzQKWbx0e9WhZdOb97Qm6woEBbMxiaHIVinxijBbbXbaaGst5TKOhACUvmsO+fPv38vEJ1R+4SwdavNLroX40N0CA/QiOPLgjov7p+OWqCzFnZw2GwbSy5rl8aH8tBnypb8elzx4R7c8tkBcZ+22Ox4/hfimLPzwN9/ysEbG/IdvsdNejPajcQBrhFC7M+1du/UzUiD5gFI+Us1Priw+pEPt50Ux+7FDXqsy64RHeKjk0KgVHCi8+dYZauLuFMlc/7wvPfOit3RbrTg1fVECH/kgkEYmxyGQTGBpBOyp++ckPdTzkdjYmoEuAHnAyBu6d9yah3K7uQlX/LzHx1XHK86s4W63tIj8ceXHwbjbKakUY9rP96L7w/2IJGfBj/qQlCfeCHMuiigqxk48Tu2FTYgtoVMvIIGzyK3cyv+0MwZk1hnOiQ2WOzOdbishbSNPNmEAYLzB1GD3G+PQgGMFdw/h2WlX3abuK1XjiPuo5+OVLmk5mfKJsbO4k+5B/EHAKKEVVuaX2O02NAlnBRCA9QYkRCMu5Vr8aBqNbmDsQ1YfS/w7TWO4ZmZX5DfQy8FAropmUkYD0QMJDkXQsZEb4gI1ELBkcFKU6dJnNCmRwaA4wCLjXdrd6b5MMN4mWsi92fyXvtAiJ8aax6YhjUPTO92YE8JDyADuW7t19T5EzEAx4KmAQCu0+zBuPy3pOfn9IhGK4bGBcFPo5RN6Hu+apxZ1tJ9CJ8TeTXteOGXXEx5ZTMueod0xupzhMwfO09O7GMURdiSL7mB1hytxhzFEQxRENFjaNtOJAX3TPgBSBA0QNx79R0mBOtUmJ4hZXXAYgSyviZ/H/na95ydsj0kSD58AC6aOg4AsMtOJtEhtT6GnVPxJ8HJFaRQAld8DAREk+PRxmeAd0YA/0olYslnF5DOYGV7XB4yTnD+1MidP0LZl7uOZvLJA+2k5+wgpCKBPAy0sK6DlOXI3CVy6Ark1gJiAxfL9ew2BB3+L+7JvwMBTdmk1FZ4HbEy50+qsMJO3ZftRmkV+qlV2XhpXR5+lQcZO/FbTi0mvLwRT/54TLzsqCBoJIf7I0IQIkYLk8RjHsJ46bF1WHywWNrzcxY5LqZFBkBTuk3MMcDhL0m5K+30lTiJ/I4aAvhHkuNhVXe5P+R9WysIazRMt7fMGBQFjVKB0iY9rDmrAZCSr8QwP0lIDU0FNIHQchakczXILGsRHVLjZN0jE8L8wHEk6FbuJPXGelnJF+WSkXH4y8x0PL9ouEOnNCokrD5SRcozdSoxxNSBOqHsy8H5Q1xcaDzp4OALpIHPbsQfeb5RY6cJ+4WMKlpiOneozPVj6iCfLW/vVTkm3c/6KvdHb7LirY2F2FrQ4HaxqLflZdWtXdha0CCUweixJb8e//69UHxv6GNT13OSrEw81F+DMOEY4+z+qWrtwjUf78UD3x7B3De3Y86b22G22TFjYCTuEDL1ehokTjlaSVxq8SE6sXSUHm/6TPwRXg89LkllX47fg+2FDfhkR7G0bcIxp77DBDsPqBScWEZIOyoWeCv9Ekq+iu1xOMkTIVTu/BGbTcjGHTSX7EBpM97ZRM7d3+wrw4n6ToT5q3HvbFJ6+cHWIjy1Klu8H/3MEkL9xE5251ruD837GShzrNHFjjxfSvD+AGx2Hvm17Q5OpIpmA5btKgUgdTt9b/MJHBKyfcYmkeN0crg/YoK1sNh4h0YvdjuPSsEFR7+jpY29F3825NSi1WBBemQAbjsvFYCUgbbDU+mXMLcq56MxLSMSEMSf2aoctBut2HlCul+mKP44Or3pYvW5mvvjs/jDYPwZ+OlIFfaXNOOJH7Px+m/5vg1shBNkl388Ln5vL5Z1koG4+dcn0frzkzhfQVbOdRmCJd+d+BMsBZDuF1ZwhsUHiwPiIxWtOFrZCqWpBZGccDCK9CD+AMCYGwBwQNku4gDprAeWzgPeHgZsfBbnD4pEmL8a9R0ml0HLIVmAmzyHwmixidZpt+IPdf4I4g+1KasUHIK0KszWr8ff1d8CAA6m3w/Me46UoZz4Hfh4BmlFbeoEjq0gDzj+Ns+vj8JxwJjryd+yUrueopQNlOraTeKENjHcX3xdtU6CSFOnCbk17VDChvB2IU9AqQH09d22W5aTHhXokN3SHbTLkvP2uCB0+rKHpePtSuISG8PngtPXAxEZaNMRAXCwokLcz6RSnp45fzpNVtzw6T7c8Ol+h5Iqb3y9rwwXv7sTn+8uEYWsoxWnwWIrOH+O8KR98hjupDjp6jBasDmvDvepZA45UxtwcnOPnyZIpxbDZAFg/vBYMaAUAJC/lgieANB0Aqg9Bp8o2UF+p83AbdNSoVUpRPGHK97qm4gkij/jXa8LSQDu309CoFOmA+CkDmBqYb88/D+Xu8ULk59qN5k/oW6cP3QFm25uYpifSwh2ehTt+CVN6Kg4MXNQpFtHEV2BpJxHxZ+f/gr8/g/AZiKZOIC4IuhY9kWEliCtCn5C2G59B3EU0GOhp/Kjk/WdeOyHLFhsPFZnVYkTNJr3I++ISLN/PLWPLRezPgLEVUdakjQwJhDIFEphdYJIs/p+yX1CnT8cJ8v96ablezp536yC+N/bsGdKoFaFyenhGMMVQd1ZBYvSH9vtox2DqhUKUUgZypUhs7RFLKORv1dalVIMny9v7r60p7bNKA7i58ty01RKBZ5eMBTXT0p2uD0VEqgT4uIRsa5ByzYr0CCEdMfIxJ/QFHKct3YB7dLkWCr7cnVo0P2IOgB+OVqNTpNVdGvMHSoTNcv3AbyweJC7hmxHDxgp7GdHeyjEe2JrQb0YjP7TEUfx5/uD5RjyzG8OkylfoeUiQ2KD8M1dkzFa2G55t79mvRldFhs4DmIeHYUeK5zFn+UHymGx8QjWqcTML51agWcvHS6OW2rajN26+dwhz8ejnJdBJsi7i5r6JGdJLPuKdHT+tMicPw0dJjz2A3GDXjqalAEXNejRbrSICzcxwToxTH+oL6HPtYK7lE9Fo0I4hnbUAjbyvGFuMn/k44QPtp3EL0er8dZGckx67MLBeOKiIXj9qlFQKjgsP1ghumGouJ8WGSCJP2dx7k+nyepSynWiTsr7oZwpZV+rDlfiuk/2YtRzG3DROztx8bs7cff/DqGqtQtvbCiA2WbHtIwIfHTTeATpVDhR34lfjpJz0biUUADE7e+u9Kux0wSz1Q4FJ52bS08h9JmWfF8xNkEU8GcJJbLbCxvcfufszaUAgHI+hoizabMBcBiIcoSjHd8fJAt9djsvVlWMdwrSp+fD4zV/4rKvsWPHitk+3f0wGKeFoi1A4YbT/jQFsgycD7cV4bEfjnafzC8MAPc3+aFJb8YPttkw80poOipwheFHpCkEl0EKcV+4E390aqU4eNwmrGAPjw8W27rmVrfj99w6ZHDC4CskGdB4aZUekgBkzCV/b3uNCD+0A8zud6BZcw+uGElO8M7Bz4fK3Dt/6ApckFbl0skHkAQsKv7Q0rFQfw248r1I3fM0AOC/1kvRMv4hYPojwD07gdhRgKGJlMP8fB8pVwtPJ51VfGHUdQA40va9uaTbm3tC7PjVYRQntPEhOtEN45yhQIPjLoxqAWftArTBUgZRD0q/egoN26ZBfG6x20nOC4AKLg7bm8NQwstKZi5+HbqkMQCAQVyFOPGSO3/kJ9W6du+D5bImPUxWOzpNVvHz90abwYLXfyNW3rlDovHgHCLM9FXYrgNC5s8GG3G9jFEUoaShAyWNemzMrcMYWy7GK06AV2qBEYvJfY6v6tVTyZ0slziXfNFVfIXw3cle4duD0vyWtJmIDNTimglJOGAfCitUxAnS0s0+bzWLq7pI8HCO9g8n+Vq3rwMeywf+uhN4qgq4hYT2IncNEWZlUOePvCSSdumiq9VydGolAmSldNQCL4eWc5Q16WG12WG381grDDpd3k8BeTaTRqnAxNQwIshnrwDAAQvfAWY/SW4gHB/kgc90hZ3jOLEcrK7dhAMlzbDYePE1OtNhtOCvXx2CXnAJWWy8OFCl4o88GJ+uJB5zE8bbbrSIk7vkCH/RhUO/S6NDjEDBenLjm1YBIUmS8BA+AAiQdXej4k+Zd1cY7fhF6W3Ys5x5Q2OwQEmaG+QHnwcTNFLeD0Uo/RqmKMfvuXWkjbdSIQpelKRwKv50v2q8QxAfxiaHOny2nnBuNnD5mATXGzUXE+FQHUAcSxSlirzngEPpl7fMH1pmepPQgW59Ti225NfDYuORGuEv7vcAJLEXIA7iku3dvh45Ysevqr4Jff5N5nrLqmgVxRajxYY3NhTCZLVjlRtHUHccEBw+5w2IxLSMSEwQvselMjGnQhhzxATpHLPT4L7du8Vmx3JhYvefCwKRN+I77Lw1Clsem42M6EBEBWqhUSnclnYDIGOyxpMet1ke9kwZnxIGnVqBhg6Tg3DVG3ieFyfKqYIoTR2TrV2S6PLyulw0dpowOCYIb1w1SnSyZVe2ia7leJlYNlgsSfbm/BHEH3satCHRZGEOPNBOjr8RbsrN5eMkngce/O4I2o1WDI0LFgXXayYkYabQOZOGm9Oy3gFRgWeE+LP7ZCPOe3UzPt/Vu/HjP37KxsXv7nRwolCha2CMNCYYHBMEjiOvtb/K3Jr1Zvxt5THsK26G3mxDgEYJlYLDxtw6zHtzO9YcrQbHAU8vGIoQPzXumEbccvRcOCZJEkmk0GdJ/KHf2bgQP/HY31vxp9VgFt31C2Tn/wmpYfBTK1HfYXIbZN5VR77DTZp4IuIERIiOzdHKYvyeW4esilYUN3ai1WCBTu16/hkeR46jFc1dbruPnu34JP5cfvnlYrbP/PnzUVRUBK1Wi9mzZ2P27NnQ6XQoKirC/PnzT/f2Mv6MtFYAX18FLL8B0PddS013UFvsNRMSoVRwWHWkCtd9stdrsGDRSTKJrbCFY0p6OD5+5Aa8kP4dHjXfg++s56NOl06yaYKEFUk34g8giQ9VwmrKsLhgxIfoEBOsJS1r95VjIM378VTyJYeWfmX/QFrLh6UB854nHX6yV+DR+qcQgC5sOF4r1vI3680Og6m2Lot44KOr30nh/uCai4HqLIenc25X36IX8n4C1MCx78GBxx7tDLzF3yDWySNqMHDHBmDoZYDNLHaIwfjbyCq2L4QkAOmzyd9Hl/t2HzfIJ37U+RMX4idOJpw7ftHOQgvChUyZuNGSeJDX8xVbX7lgWAwUHLFcyx0JpY16zH1zG5buLCZuF2sXwClxtDMYAIdDgbPJDYctAjLmQhtPJmEzQxsxT1h5poHPBrNNDLzemFuHya9sxscyi7kz8u4O3TqSAPx3RxHajVYMjgnCJ7dMEFcvfRGOekwHmbzsso+ERaFFENeFdK4GW/LrseZoNe4XyhC5sTcBk+8l9ylYD1h6nntEB9khfmpiN6a0VQFFW8nfc58hv3NWdR/8bGgG6gS7vBDm+8gFg7Bo0kB0xQhCDn1cT9TlkEmsXxgRVbsjKJZkA2kDiaMkPJ1kAuX94nCzmCAtlAoOFhuPOkG0oxMUdw4dQFo9BlzDngFSBqBVKWCx8aho6cKvOTUobtQjUKsS91GY9cDPD4ilc+EBksAwNjkU/hqVdBxJOQ+YcLv0ur04fwCp9Kuu3ejQFaTeqf07z/P4vxVHUdSgR2ywDn+dSR7/p8NVYggq2R5poCwP43VuJ08dlhEBGgRqVRiR4DgQnaVfT9wgyVOBxAnApe9KV1LXj/P/1Ye9usJSIvzFY55aybmKNL1g7pAoUfz52UwcsC6PKwt9pkLJ8IRgl8l9SjgVArsXf2g74RE+lq5RYQkg563J6RGuN6KdvqKHEMeSHLH0SypTDRS7fTke99uNFtFhdO/sDEQHadHWZcG/hByLOUNiHLOWqGMrUBgv9FCIHhwbBLWSQ6vB4nBc7g1Giw1bBZckdTWuFtw/P2dViRNYd6Gv3UHvMymNfEdSBTFHPlGkWSHyz4siiT+S4LIptw4NHSZEBmgwM/9FcHk/I6ngS9HRqlBwSArzICpazcDSC4BPZgGdZBLfYbTgqVXH8NSqbHy2q0R0l42TuQS0KqVYCuOu81FPqO8wwWixQ6ngxPebuihb9dIElE62n710GHRqpSgyZ1W0OoQ9U0bJ3GAeFzMF8ec4n4q40AAgmJyTae6P2K1Rth3U+fPi5SMcHC7PXTrMoXkFLdfdlEvEnxJhjJkWGSA6ruViCM/zKGrodIkkOB3kVLXhL/87hOo2o0sYsC/wPC92nlojLFTYHDp9See5AK1KzB6juT9Giw3XfbIXD37nPluvr1l3rBpWO48hsUH4/ZGZOPbcfPz68AxMTA0TYxquHJsoLkDcMS1NPLalRQaImZOAtPByuLxF/KzodzYhzE88t/a249fvx+vEbZV3VtWplZgqlKS56/qlbCM5dhFJg6X9MJ7kWF0bT87r/1ovZT+OTgyFWul4jA/xV4ui6rno/vFJ/JFn+jQ0NOChhx7C3r178dZbb+Gtt97Cnj17sGTJEtTV1XX/YIw/LWarHbd+fgCPfJ/Vszse/JQMfO1WoDa7+9v3EoPZKoZu/u2iIVh66wQEalU4XN6Ki9/dgWW7S1zCCNuNFuTkkkFiQHQqvrh9EjKiA/HSrfOx+I7HUTvrdQQ8chBY+LZ0pzCipKOz1qEDDRVPAEDBkZVxjuPEgUWnySp2+vIY9ixn8MWAvzCoTZgA3LUJmL4EuHEFoAlCYM0+PBayFUaLXRzQ0cFNRnSgeFKmgyT6OzVMCyxbQPJAaAYFIFvBIYMPWvYV5q8R3QvjL7sHO/42x3F1VuMPXP2lFJat1AKjb+j+9ckZI9z+6HfdT6hbSoEW1w4U8okfXdGKC9WJgojzSiENmxunEkSRhHFkgu4fSZxMpTtwOogI1IorLjTkFAA+2laEogY9vthTKuX9hKUgp5a8lryMu4GrlgGX/5dcF026xc0KbRTzhvw0SrFOm64gUrvvXqcWmXLkIlR33ZLq2o1YtptMwh+fT07OdN9vNVhOuTOEA1YTWT0HUMVHQh9B2jSPVZzEqsOVaDl5ADOV2eA5JTDtITKxDkkGzJ2kHLGHzBsaDY4Dbp6S4jiYOPodAJ64/yb9lbjE2quAin3keruduBs7nQYydCIYNQQIJFbn8AANXr1yFIKGXUiuK97mfaPkJV89DfTlOGD09bLXIKGShegW1QtugI5mLFW/gYvqlxKRxolwB/HH1WmiUHDipK6wrgNv/U6cFXfNSJMysbK+BY58Bfx8P7DuUcBmETMARIHo+Grye9jl5Dc95jaXAjyP5HB/RAVpMSAqQCyjBBw7fsnLYZ0daeuya7DheB00SgU+umkc7pieBo4jrsldJxvRYrBAo1Q4CFyOYbytDo8nZqkJLiR5/o4Cdgyo+JH8M/528jtjrvT34Isd38SY4eQYamwT3X/u4DhO7Po1MDrIsUSxlyTaq5HINcLIq/F1ExFIXJ0/5Ds4UlkhXjRWtppMoe+FL84fKnL42q0sLsRP/CpcOjrefZdFd52+KG5Cn+n+2ekk/hwpbwXPEwEjNkSHhaPIpJou8syT5/0Y24CaLPL3BS+Q33m/EGHCR7QqpfjdOtXQ550nGqE32xAXosNjF5LXvDqrCnY775A3U9nS1aMmAS16MwqFshjq+KEOvFLZRFH6XF3LomlZxvqcWhwXOvJ8e4CMR/42qAaKir3khk2OTh6PneRqs0nJtrkTyCdC97ubTuC7AxX47kA5XlybK36vnV0CE4Vsxt6IYHIOC+Ov9MgA8fsYKgvKB4hY4JyZ5iD+0DbvsjHWgKhAhAdoYLLaxYBoB7paxLHccXsKEsL8pK60QqyBs/OH53mx02N6VCA+uGEcksL9cMvUFBcxde4Qclw+UtGKhg6T6PxJj3Jf9rUysxJz39wujhP6ivp2I5buLMb+4ibY7DzKmvS4bdkB0b3pbuGqqrXLqyuprMkguja3FdSTzJsWA0xW0unLOSKBfjdpp91NeXXYV9yMX45W/yHuJ1q6efWEJAyKCYJSwWFQTBC+/8tU/Pvq0bhhcjL+fslQ8fYh/mrcMS0VAETBhTIohjQsMZhtotNJfiym7jW5oGuz8z67ntZ5apwBKfdne4HTmKmrBToreW8zBsqO3YL4MyuoEhqlAnuLm/DJTnIMcy75otDFhNxzMPenx2f7FStW4JZbbnG5/KabbsKPP/7YJxvFODf55Wg1thc24KcjVZ5b9DljNgCZX0r/0+4bp4GT9Z3geXKSiwzU4vzB0fhtyQycNyACRosdz/+Si/u/Pexgpf7+QAWi7WSSsGjmJIfcgGkZkXjkgkGiai7iFwZohcG9TDyRiz/pUYFiSCatsQWAkVphsu+L80elBa76HJj9NHDrL1J48oA5wBzSLeaCICKCfLOvHDzPi9kEE1LCkOxku6e/J+nKiHBlMwMnNsq23zHwmQ5W0jRtRIzgFNAOmOHelq9QAHP/SbbztnVAoJvwTW8MWQhogojDqdw1nFakNgf4YAoJsTU7Dv7kHdekAZTc+SMNDMqbDKho7oJKwSG2U8j7iR9HSgKGXUb+z/yCOBB2vgkc+NT39t5tlcA31wAnNnm8yUVCrgW15LfozVgthMNWtnShvVpoExw+QMwYGZIYDYy4kohtgDSpaShw2DZR7GolpV80l8Jbx4aeOH/e3XwCRosdE1LCxKDTED+1ONjt0wGQUPJl4tVoQwA0KcSNMIY7idrqCryg/AwAwI1YTBx5HAcMv5zcN6fnpV/jU8Jx/Pn5ePQC2feT56WSrzE3AmodCTMHSGmS3QaseQD46grgp784PmCpUPJFW3jLoW63kh3eA8Zp8K+7vB9foKWMJTscAkABqf3ySSHcMq1hK+Ypj2BK5efAB5PJxFV2vAyTlYMNiXV1/gBS6PPbGwtR3KhHeIAGd82QOZbk5XKHPge+ugKPTY/E0lsmkEDXtkohCJmTvovUbWlqI4NDtRJb/2821j00w8F1ESMcA/JrO5Ary2VwdqTRMN1rJyZhbHIYYoJ1mDaAHF9fXEvOUcPiXd0stHujc+4PPbZSMS08QCOu/J+vPAp1ZzU5bwxbJN1p4dvAw8ccLwMApVoUWLoLfabdj2YM6iZY31cE90AenwIjyHvpIv5EDwPAIZxvQQTI+yA/x1HopKnch1XjKi8igTs0KgWGxAZDwZEsCbe46/RFEcUfN84fp7IvuqAyQQgUvXS0NJEJ0qpE8QOAkPdjJ061kVcBgTFEEOpO4HVipFhi2Nqj+zlDzy/zh8di/vBY+KmVKGsy4K2NhShq0CNIq8IAoUPfwVLfXS+0tDw9SnJ+0IlieZNBdBG46/RFmTUwCucPjoLJasc9X2fiaEUrdp5oBMfxWNQqyyhzFn+EfcSl41fFfunv3J9R0WzA/4ROeNdPSsL84TEYHBOEe2cPcPleT3RT/gKQspXfcmp8Hu/uFARnuWtUdP4IeWr0WBGkU4li+mgH8Yd8F+TjLI7jMCHFi0AllAU3a+LQjkDilHISf2ijCVrO39ZlEZ0icSE6DIwJws6/zcELi0a4PHxsiA6jEkPA80TsoE49T5k/1HXptbS9F7yxoQAvrcvDtZ/sw6SXN+Hq/+5FY6dZFB7rOkwuXRsvfmcHrvxot8eyd1riCwCNnWZkV7WJwqa80xeFljvTErwVh6Rz6qlmAVW2GDDx5U2Y8+Y2vPJrHg6UNDu8nrImPQ6Xt0LBOR6HALLwctX4RLxyxUiHRRoAeHjeIHx883g8cdEQh8uVCg6jkxw7ElNROzHMX3xfq1q6RMfZy+vyMPHlTd3mhLXozdh/shaJXAMWjPQs/hwqa3YstRUqKhr4EKTEy4T1uDEAAL+GbNw8NQWAVDJKG+s4I3b8YuIP4Ofnh927XWvJd+/eDZ2u+1prxp8Tnufx6U5ppci5la9Hsn+Q2qgD0mrcaYDWjspzOxLD/PH1nZPx4uUjoFEqsD6nFpvyyAnJYrNj2e4SxHHkRKUMS3Z9UHdwHBBGDj6Ooc/S90eevyDvgjJI2QPnD0AmiLOfkCb8FGEimNBVAJ2aQ0FdBw6Xt4idvsanhImWTWfxZ7QlS3ocWTBulCieOJZ9jbMLbq24MVJYqSfSZgJJE73fxh0af2C4MAk69Ln725g6gZW3k3KormapU46AtOpvQk2r3Pnj2gGLun4mJwVA2SDskzRPZfiV5Hfuz8APtwCbXwB+/T9g+2u+vZYdbwAnNgCbn/d4k/nCpC2zvAX17Ub8cKhCDOUEgOYyIkjx4eniRHO4UykJwtNJcKm5E2iTVuHjZEG+pU0G8fOsbnXf7h5wFIa8OX9KGvVi2N4TFw8RJ94cx7kEhnfHnpONWLqz2HtOkFDyVc+HIj7ED/5pkwEAc9THsUrzLEYrimFUBQOznpDuM0L4/Ao3kH2mPg/Y/wnw21PA9zeT/Kzd7zo/k4i/RiUGbQIgE7rmYpIdQifqYrbQauDHOyVxqHg7KfWiyMKeXYgfS0RkY6vkFnBHldAOvLfiT1iKEATNA8e+d7iKTuxPCqUXMXoiOvLgyD71/U3EnUMfSnCV+amVDuVWcuhkkh6P75s9QBLQW0rJJI1TAJf+hwQ5l+6E/zeXYd5AUlaFXCG8O3mqVGqr8ZfKaITSr0CtyiXklx4DNuTUguel8rX6DpOD6E9X3lMipOMqFRHowF+eC0KhYbzOHb+oLV6+SjwsPhih6MAjWqHcbvQNRDik0POIOzcXPRZVexd/LhoRh02PznQUK08FIcT8uJ2c32KDdS6h3tAGAuHEiTVWQSbm8vI4SrKs3Xt3UPE5wUfnDwB8cvN4rLpvmpjF5IK7Tl8UsezLNfOnw6kdeqawoEJXl8ckhYplTDMHRTk6ruj3PXUG6cRHjxc9LP2iZT6n4vyx2OxiRsvFI2IRoFVh/nDi4Hh/K/ncbpicjJl0EtYD14tY8iUTvuJD/aBRKmC22cVyInedvigKBYd3rh2L5HB/VDR34fpPiYvy/qQyaGsOCpk1AAyNUoA95PuVk1NJPh4o2YmPft0Ps82O6RmReOWKkfj45gnY8MhMPOLmuzIuOQxKBYeq1i6HEOSnVmXjnq8PY/Krm/Hw8iPiQoon6IR4pkyMDXPq9iV1AwsQz58j4kn77YYOkxhKHRfi+F2gbmG3n5PgqC9SkiyrxFA/INix41d4APkeU/GHTvIjAzWuYeluoK7ML/eUwmLjoVUpEB/ihyjBeSl3g9DspJyq9j7JraJQgUKjUqBJb0Z9hwnJ4f74/q9ToVRwsNl5BxGqqKET7UYrKpq7xGB6Z+TiDwBsya8XXTDyUjjKUFnoc22b0UEEyT0V8aezHjlZB4izqkGPT3YU45qP9+KmpVITDtpAYFpGpLhY6wtKBYf5w2PdlnNT1ybd70Tnj9DJzU+thF1o924wW7H8YDl4niw4e2NnZhZ+Uv0du7QPI11/1OX61MgAxAbrYLHxDlmttiZyfi/jY0RBGQBZEOEUQEc1HpgY6LAoP87N+QeQxsyemjSczfRY/FmyZAnuvfdePPTQQ/j666/x9ddf48EHH8T999+PRx555HRsI+McYPfJJodgrlJfxB+eB/Z/TP5Onkp+1+Wchq0jFLgRfwAyyLh5SgrumkEGrK/8mgez1Y71ObWoaTMgXiGc0EM8rCC6w227d8n5I++8MiIhBGolB38YEWoWSiujfBR/PBEzAuCUUOjrcdMwcvL9fHepOCmZkBou2qNptxW6CpfWdlB6nJLtoiWdbn9jpwl2Oy86f4Yas8ht3U1g+5JJgmsi50dxBdqBXx93GKyLQboCNP/iZH2naAOOD/ETJ4TywGdaEnJZTBMpRwyIIiGsAMkZSZ1BhK6E8cDgBeTy7f8Cjv3g/TWYOoDsleTv2mNi2KIzcSF+GJMUCp4ntvev9gk1zsKKjbmBDM5b/ZLRbrRCreQcu+4AxCEQIUxiGvKlxxaCImtajQ6DVbPN7rEdq4Pzx4P4Y7fz+OfPObDZecwZEu0Q1AtIgeENPoQ+txstuPPLQ3hpXR6mv7YVj/6QJVr/HaDiD0JJzpSQhxLP1yJFUY9yexTabvgViMyQ7hM3hpQJWbtId7wPpwDrHwf2fUiynCoPAhuflUKUvVGZCaz/G/l7+BVk4gsAabPIPtPVTMLBFWryP28DTgqOr8566XNx5/xRqqTvlKfcH2ObtM/3VvwBHDvqyQbjGVHU+UMG60lG8lz5E18CZvwfudGhZeIEguZGDIoNcl9qA6mLD0CEyJumpEhX0u9G2kxg/K2klDUwBmjIAza/SK6jYevUwUUJp6VfnksJYgThk7o36Iqj2WpHe5e0uiiKw7IJ1vwRsWJ3oauV2/Bk9gIX580ImfNHPqmhx9hk2WB1kS4LG7V/wwh7PpnITrzT43a7EC+IP904fwAgIzrIxcnQa4TvRLk2Q3hsDzlCgmvtefWXmBBldyhRoVBhrb7DhC6zZ2eb2WoXM6cSQzSAxbfQ+KRwf4dAbgcMzVKQulvnj3Dc7KwDuloBSOKP0WIXJ1tWm12cFNHVZY7jcM+sAdCoFLhxitOCkbPTjy4k5K/z+XUBwMgE8rpyqto8CvbdsbeoCW1dFkQGakR30uUyl5RKweH2aWmyjj++O3+o+CM/BygVnCiKUTGULip4KucL8VfjvzeNh06tgMFsA8DjbquQ+zfxLknwbZIWHulzuJR9VQjij9of4G2w560FADwpW6TwRIBWJboE6GtrM1hE8cxstePnrGpc98k+LD/gftJb1qRHRXMX1ErOIYydNtdoFUqLxG5gMuHZT6PEYCFbhp5/45y+UxNkn5PLPiG4owptpCTRwfkjZP5Q50+70QqLzS6WfDmLTJ6gLl86D0iLDIBCwbm0eud5HkXC+aStyyKKTKdKh9EiLjrv+tv5+PrOyXj0gkH47i9TEBOsE12f8u6VVbJxzeY897EmVPyZLri1thbU44Qo/ri6W2nZ18n6Tqw4VAH5R3FKDpNvr8EFOxZjOFeKkQkhuGJsAvzUSuwtbsIbGwrA87wY7+DR7dgLqGuTCmvy7yzHceJ+WtZkwO/H64TvKXmf3IXjAwCqj2DGtmsxVCF8V4585fZm9PxCy84BoL2ajEGqEI1Y2YI6tIHionlYay7+IuT0DYoJdNugApDKr4saOr2eg85Geiz+PPnkk/jyyy+RmZmJhx56CA899BAOHz6MZcuW4cknnzwd28g4B6CuHzre9xagLFKyg1iv1QHA/JfJZQ353kscTgGq1nsqR7jv/AxEBmpR0qjH//aWYunOYkSiHRpYiaIc5L4bjVvciD9RMvFHXlOuUyuxZN4g3D5YWFH0jyTdeU4FjT8QTep6b0gig5V1x2pgttoREaBBaoS/w8orz/MobzZABxNCGoVgOpUfcY0Idmlq37bYeLR2WcQVotQOYQKS6mP3rt4SNxoYcRX5e9NzjtdlfQcc/ZZ8TmNuJJc5tUKmKyF0pTnUXw0/jVLm/OkCz/OwNpVi9wni/prqV0ruHD9OWoFXKIHb1gJPlgN3bwGu/w6YtoRc9/P9xAniieyV5D2leOlwR0s23t5UiMqWLoT6q/HwPDIp8esg23XSSgZcg2M9ZHoI+4C8nJIO5qrbulxWKt2VfvE87zCQ9lT29e7mE9h5ohE6tQJPXTzE5fqYINptrXvnzy9Hq9FlsUGt5GC2kW4zl763y9U2LXP+jEwMIQKtsKJ5xJ6BN1M+REz6SMf7cBww8mryt7ENUOmA9POB8x4ELn4dyLgAAA9seNpzoG5HLfDj3cDSOUTEUwcAU++TrleqiBgEkMe//jtgnFBOXfAr+U0ngjEjPX/faemXp7IQOvkPTZHKPnvD0MvI973phIOgIDp/6jsBmxVpVnKctyVNJcHWKdMA8GIJHS3LGZPo2QGYHiUJIA/PHSitKvO8VPJFP5/oocBl75O/931IxFVa8jX0MscHprk/XrqjxciOwQAwZ0i0zP0j7dfVMmcgJVCrwoXDYgHwuE/5M7TmViIWypBCn80OIqlY9kUndL89jYW5jyGKa4MpbCBw+3ogYoDH7XaBOn9qjjoGz9fnu8076xN4XnT+hKQRodHTuRTzngPCByCBa8S3YZ+A411LKkL9NQgWBBVv7h9yXCbtvCN+uwf490CgodDj7X2COtxiRrj/3miDgCAhEFeYOAfIVpL1wqQmv7YDBrMNQToVBsnE9xsnp6DwpYtx3gDZY3e1SoIy7diWNJk8j6ldOi74wMCYQGhVCnSYrF477djtPKpau0SxSs5vQp7cBcNiRaF2ekakmJF12Zh4xIboRFErv7ZdbBzhjS6zTSybpG4UCs37KmnSg+d5r5k/lGHxwXj1SnIMvzo4FyHNx8ixavoSIEIQ9WkGnuyxHM5lbZVE5OCU4KeQ4/QCxX5cMTbBszPMCee21xuO18Ji4zE4Jgi/PDBdFAfoJNmZHUJXo3HJYQ77klz8kXcDS4t0dE6OdhIy5ccmgIwp/dRKtHVZcKLeqSuZcEzM7iKvISnctewrxE8tDnFaDGa3XcW8QRuYUOj2i4HPHWbwPI/adqO4+Ab0XcnN8ep28DwJLo8O1mH6wEg8NHegWF5Lu1fKxy9y4WlzvmsJmslqE/Nglghjr2OVbWL530A34ndCqB8CtSqYbXYxc2bBSDKWy3W3gOUr9XlQ8lbcq/oZoxJD8Pa1Y/DOdWMAAJ/sKMbbm06guFEPP7US84XYgL6Adv8qbtCjRW92KcGV5/7QvCEAMFntYgC4A4W/g1+2AGH2ZlTzwvEhb61b8Zu6hIsapf25q568p+1+iY4ObACIH0N+12ThLzPT8ci8QXjlCqfxn4zoIC2igrSw83CflXUW06uEv2uuuQa7d+9Gc3MzmpubsXv3blxzzTV9vW2MM4SGDtMp2d4K6zqwvbABHAdxFbfEl9Z/1PUz5nqyGq/yA6zGU2rn7Q26IuFOrQfI4P5v84ly/PqGAhyrbEOKSrDQBsURJ4WvuHX+kBOjClaM5AtIWK3A/RMC8bhFCOp1txLZG4Qa2DRTgYPYND4lzEGxL282oLHTDIPZhkmKAnB2M3G50DwNwamgUSnEso76DiNaDGYkcvUIMlaTDmPJU/pmu70x5x/ERVG0RXJDFP4ulZ7MfgqY+Tj5uyrTIZSWOk8oVAShzh+jxQ5D1iqo3huND2wvIFZnRaJBcGZ4aqFNmfssySWymYHlN4qihAuZy8jvUGFF2Iv4Q0/gdEXw2olJmJoeAQ52RFuEltMGMuD02AVHCH1GveT8iXfj/PETJuDuuse0GiwOgzV34s3Wgnr8ZwvJx3jlipEO7U/FTRG7rXW/wi2Wjl00BD/ddx4GRAXAzrvJuOgk73MdH4bRtMPcog+A8/+OqPs34NWb57h/ghmPAgv+Ddz0I/BEKXDLauDCl4DJfwUWvkWcGKU7yYq82w28mZSsgiNi44OHXL+3Mx4jgs8tPwMDL5AcYic3Ezed2OLdi2NugLD9FftdMqzQUQusF8rZkiZ5fgxf0AUDg2jA9Bbp6YXBbWOnGe0V2dDCjA7eD36xgjNipCDGCqLNdROT8PpVo/DwPM9lRoNjgzA4JggTU8Nw1fhE6Yq6HCL+K7VSZhJAtmvcLQB4YJXg/kueAgQ7ifGi86fU43PLu4ApOGByergsC4zs1zY7jzrh73inVe8rxyVgMFeBNIUwqJVlwgBEyJdCn8k5Vb6KnhzuTz63fR+QO0x7GNr7dgGJPXRtRQwkGWjWLsk91lIGfDwT+O907+LI4a+Az+Z7F6nd0V5Ngu45Ja5feBGWzBuIe2Z7EKx0IcC1XwNqf2jKdwBbX3Z7M19Cn+kxaXxwO7jcn4lQsvPfPdt2OTxP3GoA6TrpCafSL7VSITq/aMcvWmIzLjnMdSLiTNkeADwRLOi+q1AAY4XFii0v+Rz8rJaFEjuXfhktNvx7QwGu+XgvRj3/O6a9tgX3fJUp3cBsAJ/zE/Q5v2IgV4kFg6VjtUqpwOPzB2N0Yggenktef3SQDqkR/uB5KbDYG0cqWmC184gN1rk4esTuQI16NHSaYLLaoeBchQxnrhibiJX3TMWLkUIZ+qS7SEA+FUxluT90n2rsNIsinej6iR2BQ2EXAQDOUxzH4zN8F8zF0Geh49cvQueoS0fHYWRiCBaNIWKh2xbzAHYJ5T8zBjo+Jy37MtvsMJhtovjjXDY7Vib+qJUcIgMcxzNqpUJ0aRxwLv0SguGLrNHQqhRk4u4k/igVnLgtzXqzKIzQTmrdwXGc2PULkER+Kv50WWzQm20OLg4AON5Hk24qOI70IObRjCR52Z5cIDxZ3ylmFVHyajpgttkRHqDB+JQw8bFpWbC7cY5CwYmieIfRCp1agf+7kMwrihv1MJh70SXWbCBzIwALFAeQBrLvzR8ei7+PMeBl1Wf4ajP5jl84PMZBXDxVwgM0opC3Ma9O/M7S9zMlknzfDpW1iCVutLsrbSYiYukCfvoLOIsBO2wjsSTsQzLXMHe4bcBBXcLyDsWcIGRaglNcbk9Dn1F9BDq1Eg/PG+iYueYEx3Hi9/pAifeSzbONU2/vwDjnufPLg1j43i6PtkeAOHluXLoPd315EM//chyf7yrBptw6FNZ14L/bi6CFGfelNWKmsPpR0uB4EOV53rHLT1ultNI16S/ETUFLnWgL1j6kWW8Wa309iT8AsHh8IobHB4vhZZcPEFb+QxI93sctbsSfAdEBUCk4vBX0HUK/vQR4ZxSw620yKPx0DlB9BPALB+Y927Pn8oSggnM1R3HDZMl+TlfyqPOnutUotlOd7ydMJNJmARnzyN+y3B956HOLwYypCpqHM14qeTmdhKdJ5REb/wnseR/47lrAYiCOjRmPkfc+JAmwWxwmOBEBWodSFLpKpVMrxQA8bv9HAIBpyuP4TvcaFLRbU3w34o9CAVz5CRA9nGQQZH7hepuqw2SlXqmR3AzF2zy2HE+NDBAHEQqhw9SAqEAM1LVBy1lgV6ixt4kMzIZ7Wr2keRZunD9HK1tR126CRqnAHMGy7U78oZfRt662zehQ0lLRbMCS5VngeeCmKcm4cpz774o4yW53FY/k5Fa341hlG9RKDleMTcDY5DAxILPUKRzW1EoGF/V8qDToG3A+MOtvSIyJJG3B3aH2AybdTfZxtdPgNjQZOO8B8vfGZ1wnZK0VxH3CKYC7NwOXfyi1zZUTFAtc9p4kisaPAwKiyeS1bLf3sGdKeDrZl21mx6Dz1gpg2cVAYwFxOp3/tOfH8BVaNiYrdwvUqkRnXEMhcQDm2NMQ6i9MPIZdToTf2mNAQwECtCpcMyHJJVBSjlalxIZHZuKHv06FSt4xjZZMDprvmh02/xVBMOWl53XGB+ePXAAelRiKYJ1avIw6f+o7jLDZeahkJQuUWYOi8O8RsrKOpiI4Q/NY6IJKdWsXbHYeOrWCfAfKhEzF2FGk45Pa+8TXLQqFtMpJc3+OfAXYTGT/Wn4DcbU501ZJ8skq9gFfXiaV2fkC7cQZOQjhoSFYMm+QOLFzS8wwsv8DJBTfjXtNavfuebGITtCuUsmcnNkrHc6tPaJ8L/neqP2BUV4WNb10/KLiT6ZQ8uWpm4wD1Inq/H0/7yFyXGguIp1PfYSWLOTVOjon1ufU4P2tJ3GgRApK3VvcJB2z934AbuVteNf2CjZq/4bpK8YAez8U73/txGT8/MB0B/FhgpPrxRtUHJmYFu5STiVv907PK3Ehfi5tmN0xwa8Wuup9AKcEJt9LLqTOH5n4E6xTi24+8XxWKZSyJ07C7zUBOG5PgYqzI75WErq7fX7hPSio68DJ+k6xNJx2d6MOE3dlTFabHXtOksnl9IGOzS78NUqoleR9au2yoLSR7O+psrIvwNH5ExOscys2iu4keTC11SwKPKV8jBRSTDN/jK0k+w5St8bmTrPYUt5ZAPeG2I0RQFokGQ8GaFUIEJqbNHSYxLwfSl85f6hzY6QH12m8m8YeVU7jnc15ju6fo0LJ1+jEEHAchzlDpIDhGJUeyTr3Qt8QWRfIBSPikB5FuuvyvBQ/0SO6pM9TwfGYVi9kCLaU4a6yx3GjajPuUP0GALh8TB+VfFm6xEUEKjxSMSc2WCc6zdOE48Sv2TWw8yTz7ME55Hu540QD2gwyt2DOj0BXC6oRhTssj+Oq6SMkh3SOa0MpKiDK9xldJ1kYVEakuW6zTPzx6Np2gpZg7i/p/th2NuGT+BMeHo7GxsbubyiQnJyMsrLTZC1m/KGUNOrFlaN//nzcoyr9xe4S7D7ZhE159Vi2uxQvrM3FXf87hAvf3oFVh6vwrOp/eLz6IYxq+BkAObnLJ4iPfJ+FCS9tkrolZK8AaY08XRJ9YoQuAnW5yCxrwVUf7fFYP91TaNvF5HB/r6q4UsHhmYVSAOT8BOH9CO7hAVUu/gjvQ3SQDptuT8alVkHh7qwl5UvLLiaW5MhBZDJ5KtkdcqhgUX0Ei0bHiwFok4SDXXSQFlqVAjY7j33F5HM5TyEM8NNnC64DDqjLFp0sUbIJfIveIok/3iawfc3Mx8mqd+0x4Pe/k+4p424FrvuWiIgcJ20PnWCDfLZRssmKfLUxNliHgVwl/GsPwgYFWvhApBlzSXcxoHvnDwBoAoDpQi7akW9cu39R18+wRSTTJDiRrNw7ZRPJoSsoFwyLQWKYPxQKDnOiyOChwy8Rx6rJSdG5Na1IlOD8aSwUyynpYI7WZo9JChWzXdyVfdGuKbSLRZfFhnZZu+Nn1xxHW5cFo5NCHb47zkT7WPb1wyFycr9wWCwihM/LU2cgfSMZ1NoDYhDi3wNnXndMf4RkzTQXAwc+cbyukAyykDSlZ99VhYIIGwBw6DMyYeEUJEPKExwnlX5Rp1vjSWDZArJtocnA7b8SkehUiR1Fftc6Zh3R0i9DKVlZzObTEEyDIf3DJZG4J0IC4DgxtNulwR8t+ZKjDQIu/wgARyZ/1JUox4fMH3+NSsxumZZBj4NSEDwA0aUTE6xzyS3iOA4j23dIFzQXu5QpS52YyHlVHvbMcZzgAIFQMncK0GNS1WFS+nVYyE1Q6Uj53qq/uB6DtrxEVpBVfkQo+vFOEkDvy0CZ7hdxo3zfxpFXSWW4x1e7XO2xLbeMqpYucLBjVpfQeVIXQrKzdv/H9+2QQ10/IxZ7b1DgpuNXkHAOpaLKYbHTVzfij1kvrWzTki+KLpg4WgGSG6f3bQWaToyc8xVPCIHkc4ZEY+2D06FUcDCYbVLQrSCE1PJh6IA/OPDkGOdlH5BanXfv/JHCnl3fEypolDTqxc/c5xDvg0vJ7yELpPxFN+IP4Cb3h3b6SpqM49Xt+NVGGgO42yc9ERmoFd/zl9blws4ToZcKWrSsiHbRlHO0sg0dJitC/NQuzhSO48RMkrp2o5hJk+pU9pURHSiKKJ4EGXlpmrgNreUAb4dFoUMDQjGQhhTrgqWutDT3hzp/DGbRIeOr8wcgTkr6HZGHIUfK8iLpRJ7uUzmnUgolQxR/PDp/hM9HJv5QcXD2YCLIbc53XACneT+09ImKP34wYp36CSg/meF28W6orKELdbfSnM9ehT4LgeY2YUo/qHYdWXj4/iZwwnVzVNlIjwzAdCdnWa9oLScu0g8mAqW7MVY4vtEubfIyTSoS093tirEJGBRD3L0WG48NuYILnudJV1wA/7PMRWRwIBGqaHOMwg0kE1MG7Qxa3mQgpatWM0KEXNTAuIGu2y1knaKzDugg7njseQ94b7zH8cnkdBqU3uK2PPZsxSfxp7W1FevXr8eaNWt8+mlqaoLNdm6FI/1Z2Ui/mCArFu9uPuH2drT70S1TU/DXmelYMDIWIxKCEaRTQQszrlDvBQBEFnwHBUcmlnSSZ7GR8OROkxV//eoQmcDRVV75ylsMmTiW5R3EdZ/sxaGyFny4zXVltTcUdlPyJWdKegTeumY03r1uDKLsQlJ/T50/IUlkYmftIsGuAqnHPwLH24iwcvl/gSghkyVtFnDn730ziaPEDCer8oZGBBpr8ektE/CvxSPFEEyO48RJ9a6TDQhDO1IsQnBi2kyShUBXl4vIClm0LLyvxWCSxJ/THfYsJyASmPYw+ZtTAPNfBS59F1DJ3AZ0e5yEFfnKvzzIMC5Eh+uV5DVutI3H9ZZnYPcXVuhCkn3PUxm6kAyo2sqBkm3S5cZ2IFuY3I6/jUzqqRBAxQQ33D0jHW9cNQqvLx4tXjYhiJzsi2wxaOw0Q8EBQ2M9iD9hqVI5pbBSHhPiuFo/JT1cHIC7d/6QAXRGdKCYT0BLt3ieF0sfXlw03GuwrOSw8Cz+GC02sW78molJ4uXyunI59nZy/AqKTkKfog0C5jxD/t7xukP5oFiqRz+/nkBLv/KEDk+xowC/UO/3EXN/tgNHlwOfzCL7V/gA4PbfJKH5VIkT9rGWUgfXCBV//JuIMHxCOcBxpZ6KNdk/+Lza5kL5HjIB0YYAAy90f5vU6cCNK4Ebf3DvtKLvQ0e1RzcdIJ0D6EDe2ZHmNeuiqYiUp3FKUn5qMzl00gOAkUL5IQ19Lmt26vQlij9eRD9fEEOfM0n3wM5akhd36y9EACr8DdjygvSZ1Bwl+w8A3LYOmCq427a8BOx+p/vnoyH7sT0QfwDp83TTmUwMC+2m7GsiV4Bwcw3p/nalIAIc+Rro8OxWBkD2g93/kVw3hmbSpREAJtzu/b5uOn4Fyjp+1XcYUdXaBY4DRnkKlgaImPPlZUSQ0wSSfDFnxt5Esr+MbcC2V71vl4CYn+Mk/tASiWkZkRiRECI6UkTXZD3pFPmQ+QE8FPsN2Y9bSlwEFDnU9XK0ohUmq+exv9VmFzNvJjrl/QDScbyiuUsU8t11+nLB2C7lNE28W7pcFH+KHI49Dp3kLF2im5FPnIDcmnb8ahfEn5LtgN73hW/avWxbgVDeMko6DlGHZJfFJpZqU2g5zLSMCLdB+LScnhwziOMywsk9qVRwoqsl1k2AOkA6EKoUHGrajJIDSSj5alDHA+Acc2qoiCZ2/JLKvtzlnnWHVqXE+zeOwz8XDnMQYeiiW0OHSWwesHBUPDiOiO6NHhpN+Epbl0X8HngSf+Jl2Y4AGbvQ9+jWqakAgP3FzQ7d/Kj4Q9udj0wIQWSgFnMVRxDJN5NzFhUWZdDuUqkR/piSThYZaIff3N44nYQOoTWqROyzD4WSt5KupLXHAD/yXMNQjF/vGuqTi84rtTnA0guk417WN6Lzh7aVl5dzpkZK31+VgsPCUaSklf5ee0wQYaoygZosmKHG97bZuGtGGnEPxY0mYxlrF1DgOBaODdbBT62E1S7kTrZVQAE7ungNYuPdlH1p/KUFz+os8r3f+Cw5tv14J7D6PtHlRhkUHYRQfzW6LLZT6p54puHzXnDrrbfi8ssv9+mnq6tv0tkZ/c/vx8ngaZ5Q9vHZzhLRJUOpaetCUYMeCg547MLBeGrBUHx443isfXAGsp+bj6zrePjxQmlIzRFMCyUHKjoIKajtENtUtxgseOnzH0gZilIjtToF0B5MVtrstTmw2MhBprzZgHqnjBCbne9xh4uCbsKenblyXCIWjUmQBvYhPZxcqjQkhBUgA2ueJwOUo9+Ry87/B8k6um8v8EAmcPNq8SDeZ6h1UuBv9RFMHRCBayc6dh+hg6Qj5a2YqsiFAjwpFQoS7Lti6RfJ/aHOn4pmA6ItVYjjmsErNSS48o9k2sPAhS8Dt64lQbvO3Tqo86f6iMNqgrz9pXxylxjE4UolEYqW285HQNIoKO7cQF7/rL/5vl1qP2CUMBmmK/EAcPh/gEVPVpPpiv8gkj2Awg0eJ80alQJXT0hycLUMVhMxMbOTDEQzogPhp/EgujiUUxKhTqtSiqGeADA5PUI8mTvboAEyUAfIairtrkCt0w2dJrQbrVBwwCD/TkeRxAnR+eMl82fD8Vq0dVmQEOonBmgC0iSxvMngsKqqNZL3IirOqatOXzDmBiLGGtsksdqsl9o108+vJ6TPJhNzSpoPIelU/KnLBn76KwkMT5lOQoJ70oGwO/zDpeMcLfEB2b+UsCHBRCaHFVqnPJ/BF5PA65ZSMsCzWUlHrpwetK8++Bn5PXyR9zKogfOkY5LL9kcQRyDgNfT4nWvH4Os7J2N8Cvn+iG5GoeyrxluXGyrapc1wmzcCkHOMSsGhSW/G31fnYK1glU8ODyBCAC3BPFXxhzp/6nNl+Xk3kPynSwVXzK63gRW3kZXj358BwJPQ/MTxpMnChUIWz5aX3XdQlNMb5w8guePqjruEevrS7r2ypQuLhWMzhl9O8rMSJxLhbd+HHu8HngfWPkJKN7+4BFj3GHGP2ExEwOqulJc6f5qLARuZEFLXWKfJimMVZKKQESVrK2y3A/s+Ara9RpxspbuAzy8Eqg6R8/tNq4CACJengkIJXPQK+fvQ56JA40DNUYeW5ulCWU1pk8FhPEQnwdSlQo+dpY16IqK0ETd1AZ+E2OhIyYnkJX8uPTIA4QEamKx25FR5nrwer26HwWxDiJ/aIQCbIm/3TnNpxInksRXEMeuOY9+T417EQMdjZlgqWQAyd5LVfgEqKFW0GMgk0G4BAmNQzcWg1WBBBRcPe9xY0slz++uuz+fsmBNwzg+5ZJSUO6ZTK0XBRt5RCgB2CWHPM5xKviihfuR+WUIZYUqEv9sOZFRIcBc0DBBnIy0BF0v0hDLYUp6M6TLkn4tT7g/t1tjQYRJzzxKcnT/tNWRRzcN7NGtQFO6Ynuaw/WLHL1nZ18jEELFk6FRLv2huUGKYn/ganIkVxR9yDGoxWET389QBEUiPCoDVzmOn8Fm1Gszid4kumCoUHOYPj8Glyr3SA7spZx0aF4zlf5mCr+6cLJbnDT8l5w/5LFsRiA+tl0mXcQrg6i+BmJHgwENXvsPLg/hA6S5SidBZKzW4yVuLIZEaMRcScBR/YoJ00AolYLMGRYlu7YWCa333yUY0dZpE188vtimw+0XguknCmI3jpOzAHEd3jkLByUq/9LDVkHFJCR+HFCdnnAgt/ao6BKxdQlyikYPIe5X1DXE0tUoVJQoFJ4q6+8+h3B+fxB+73d7jn/T0PnQoMPqFhg4TMoVVmhcvH4H5w2NgtfP4x085joOJ7d/icsUujEoIEWup5fgVCitpIAe5q9Uk04Cu0h8R1PORCSGIDtJiXBsREloS56DWrENFswHPrTmOBcvJFy+Fq8dzF6WK1slDspBBo8WGC97ejoXv7RJVaF/I99DmvVvahPT6njp/AOD8v5Pf+z4kmQc73iAHooEXSgGfHEfaUCtOUzwXPRDWZLm9moYjWu08pityyIV0sglIE62iLYDdJp7EC+s6JNdP4kTX3JTTjUpDcllSPZROhCaRgSFvc8j9ifHg/Jlm3oNQTo9KPhI77KOIKyBiAAkEHndzz7ZtrHD7/LVkxabuOLBFaFM95V5JqEqbQVw57ZXkNj4Sa6KDORII7THsmSJ2/JJCn+lrVys5jEsOEwfLla1dLsKq1NrTH9FU/BEEHLqKNzukDroPxgLfXut5M4T3vklvdmuv1Zus+GovmbRfPSHRYYU0KdwfHEdadNMuc7CaEWQjg774RDf136eKQglMEPKlDi4lk8ni7WTyGJoiiWo9QePv+P3yRfwJiARihY4VnAKY/TRw6xpJoO1LqKtDlvuTERWIgVwVdLCgg/dDZ4CT0KYJAIZcQv7+/Rng/fFEcFh5O1CZiW5pq5TcGJP+0vtt5zggPJX87SX3Jync38EaT/dp6kijEze3K960u9fQyxxdBzJ0aqWY+/Pt/nIxSyA10p/kzQBkdfJUurMBRKjzjyQT2JLt5LJxt5Lfo68lOUkKFZC7mtjeS7aTBZe5/5QeY+r9JKjebiHd6zw5prpapAEz3Rd93s5EICCKbKdMVAQk8aeyucvj+byxpRULlMLq+ugbyOc84zHy/8HPxFbsLhz5SlhsEY4jB5dKwdMTbnddMHAmOJ6Imnar6JqkIk+H0SqGzzt0YTr6LfDbk8S9s/IOIjo1nSSf1R0bgGQviyRpM8lnwduIG0tO/joyafnpXvGihDA/0g3Rahf3WbudF5ttpAuTIwfXpBAO3qqKRBsCyXU+OFA5jhNL27zl/tDrJqS4D8CWt3unEQBJ4f6kJGPVXcDP97mG7PO8VPI18S7Hz02lkZonyERYqZywS+gOCCBxInJryDgwIzoQigueFzZ6qaPYtuVl4JU4h6xDyiSZ+DMxNcylJCpeVvpFaTdacKSiFYEw4PygSpfHBKSOX3SsnBrhZmJbfQT3DLPi45vH484Zns93k8QAW2HcLDh/8ozkeDNQVo4lxhkIZV9UvMqr6YDNzkOtFErlrSYizn11JfD2MODLhcA3VwGdDR63Qw7NBitp1IvltQOiAkWh6lQazgBAbnk9blRuwhURnkV/+tnUd5hgtdnFha6oIC10aiXmCk7QTUL2KXX9pEUGOLQK//vcBFyglgnlxdvdPt+U9AhxPwSksq984b3tEYLzp9keiB32UdBHCi7dec8B6bOAjLnkf2GR1oWOWmDnW9LClTtKdwFfX0Uy45LPA+7dI3QibIOqZKt4TgMcy74UCk7cp64YJy1GpUUGYHh8MGx2Hv/35WbYhLLu/1kvwK1TUyTBHACGXyls/2bxtVKk0OdOdOWT0t/9/DD3izOAVK2w/2OyGKUJAm5ZQxaKgxNIttr2fzncZbIgqu4vPndyf1jgM8Mjm/PqwPMkzCwuxA/PXjocARolDpW1YG22YNUr34fzDj+KdzQf4m+6n1wfxNQhrRgJpTgzjVvAwS6q5kcEgen8wVH49OZxWKQk4tCTJ4ZgyqubMeP1rfhiTykqLUFo4UKh4HjcNtAoqzOXvpD7iptQ3KBHbk07jnupFV59pAoL39uJfcVNsNt5sezLV+ePiLAi0qsV9lFXk5IkgEz+qeV+9pM9f6zeIg9Ac0Oy7OR0nkIQINJmSTdImEBKMbpagOosUfwpqO3ABAWxhXLOGQZnCtT9Izvhybv9yOvmxwpZVd9bZ8MOBc4fLAX79Zj4MWSCZDOT4OcVt5Oyq4wLgHG3SbdT+5ETN+B14O3Ayc1Ql24DAGTaycr0ME95PxS37d7J+zAmKRR+GiViQ3RQcIDZanexYFcIg6SkMH/E0o5dwupZkSD+3KraSF5v6U6y0uqGcH8NVMKEQP4ctW1GvLY+H1Nf3YxDZS1QcHDsAgUyoaauI1oiYmknxygzr0RqUh+XfVHG3kgEurocIiLSz2nQRd1PHj0x+GLym1P63iFv1pOkXOS2dcDsJ4gwdTqgrg5Z7k9GdCBGKsgEIseehmB/rev9aOlX+R7HIF43IY4uHFxKJrwp03suLDhDQ5970DEyWrYqDcBz0GlbJRlMgiOTdCr+OHX8AoAPbxyPFxcNx72zB2DRmHgsGhOPy0bH913JF0D2P3nmVOoMspBAmXq/VEpsEFY0J/8VCEtxfIxL/0PyrRoLSAadO6hoE5rcc4cqx8ny5xxLv+JCdFApOJhtdrddAC02O0Z37kAQ1wVbSDKQPJVcMXA+caiaO9yHJNdmA78KXR/nPkOctcHCMUUT6D5Xyt1209KvhgIAQKCWTNI7TVZkCSUCovhjbAc2CYJC6gzihvULJ3/f+btvYvHcfwLgyMIBFWDtNulzKd0lOi6UCk7M26BjrarWLpitdqiVnNSKOZKGahvEc0AJRwSTFLn4U77XfUi4wHkDyATpuwPlYkMMZ6igMzkl0GP7ZlquRt3gg/hS4OcHpBv8ssRxAli2m4hWan/ilnbGTe6PQ5YU7fSVNEkcMw6LDybn3qGXkmPPb09JmSQ7Xifn6z2umVJJ4X7iAhLN45NDz6ty50/m/7d33+FR1dn/wN93epJJ751ASIAACb33JiiCoCiuvaKoi3yta9vV/YlddG1rxd5WQQUFlCZKkxJ6hwAB0kjvycz9/XFumZnMJJMyaZzX8+RJMpnM3CQ3d+49n1MyCmCxinjL531EfD1V7d1mQw7+XMjLwSv6tzBW65CFV3QW+GAyTB9PwZQEg+tBBlCzk5TpRfKkL0sY9FoB8TbnfI6ZP3LZl/x7UhpLf3crBeeOr6EeixodffzOCKc/jyP5vFEOhIf6GuHvpVezYZqT+XNyI6ZvnoP/p/8Qd2U/7TIjKcRshE4jwGIVkVtaZbOoRcf5CVKz6t8OZOOb7WeUht5pDiWd3idWQWOtUTNjzu2yy8hzpUuwD7z0WlTUWOqUajZIevw8qw8AAbnTP6FzgeH30dfl4M/xtfY/f0EGZT8u7gOs+Rfw8XRgzTOUnWvrzDZauKutoPPU67+nTGCbZsz9bfqaOfbpem5WXyya1QeX9rGfwHn32EQIAtDj3A/QWmuw29oVh3VJuHF4F/vnD+tB/XqsNcB++4xhOYh9IrcM2gwKtB0zD3RaPglAveaplsq7JjxB0xW7jABmSxnG+5baVQQMSZD7/uSjtpP0/eHgD3Np9QGKcE+WRkpHBXjh9tGU0fXBxhMQrRaI8ihhAMMz36fpSrYOr6QDRlA3CmoY/RBQnY3BwmGl7EuOoPeLC0SqZT8ihXyUCT446jdM+QcekRiMT28djIAuUkQ7+4AyQWOHTebPukNq/5w/j7lO0Xtnw3HsO1uMGz/chs+3nkJZtQV6rVCniV69aiqBMun5Glv2JRt2tzp6HCJdNLZUQ2d3SOPeXXW/l4M/MUIOumiyIQpa+2warU5drTy7QyndKa6sRYqQQbc3lD7fVuSsCpumz2E203uU3jfZ+xGavx0WUcA3lrGI9DehZ2Qjg4SO+t1A79c8TRdV5ghqWOuY4SWfeO/61PUKtqyyGPiJAqwbg2bjkEgn8L1dTfqSyX2lbFY35ay6sVKQS6/VKCspmTbTSkRRtDtJinCS+WNGOYaV25wAyo2tHWg0grICKPdXySutwoSX1+OdDcdRXFmLLsHeeOPa/nYrSzKlP4i0sn0+M4MeA4GIdqdvRFN4BaopydveVQPdyU0o+ZL1vJymwg24iXoLufU9l9E4+pYIGtTHSeZPsNmIAXpaUd0rJigXKna6jaNAsX8cBbzlk6z9S12ejAOgEbbyZLyhd7m+n7vkps+NmASl9vyRyr7kzB/HvhpyRkLcUMq6ctFsFqASg+uHdcHDl/TAa9f0w2vX9KPV41NS75nmNnuW2Tail7N+7L4+ALhzIzD4Tuo3NeqBuvfxCQZmSKPnt77j/GJODv40tt+P7XYAUvBMpdNqlIsvx35eAAWGZ2koeC+kXasePzUatbn+lrdpP5JVFgPf3EgX8N0nAyPup/3z7k3AhKeAqz91//8uQhpCIfX1kMu+iitq7CYBAaCgQVkOnQtd9z0FfB4+Cdy03HmPKmdCk9UGqOufo/e7v1L7b1SX2GW1Ofb9kd/HB/so51a2TZbl14C9NbQ9XUK8KTgYkkQZTlJvP2euGhiLELMRpy6U44utdTMsRFFUsrRn5P4X+PpvlP3ncN5hO0XMH6Xo9ftddA7ZbTxlxJXlqIG7/JPAKmmSYd85zht0Owv+SPvUmYIyiErwZ4gSZJAnpWHSM4DWCJxYB/zyEL3JTmygqYo2BEHA45f2wuz+MZjtZKqlnF1yzibz53B2CfxQihEWaeLY8boZRfKI9au16zBL+wcuOf2y/e/t0ApaXKkqprLAegzuEgRBoDKZ80UVSiA8QwxH1xCz/YRFF8EfuTQqKsCLfgcHfwIgAKMfAu7bBcz7g84rSrOBT68AjrrIOJHIwR+5pUQ3qZRHDv7Ut5DrktUK/Hgf8PFlCK+hv5N3TT6dbzmh1QjK4p9tTyS5rG1AfCDCfI0orqzFQ//bg/c20u/NMfijLGgMuFkqDRXrHdph+/zyFLBGl35JwZ/cWvq9mYMjqVxTXoCKHUpZimU5VB4O0N/kPwNpf7FUU8kkAGx8Cfh0JmXlnkuna7jPZlOwJGEMcPVnaiZ/bykj5/AvGBCpnjvHOAR/ekf7Y+7guDqlipf2jcSGhaNwl5mCNp9aJuG2kV2V0jA7/a6j938stpuw2k0qcSw9fwRepWdQLWpRGDbY9e9K7nUK0PXPoNvUr8UNpeNFTZlds/eekdS/tqza0mLT59oaB3+YU6VVtUoT58m91PKB64bGw6DTYHdmEU6teQ/C+XQUi174UJxOd1j9GLDjY/WB5ANh79l0wJCmsFyh3YiMC2UoKq9RgkCpsQHUFBSAT7/ZWPvIJTj8zCXY/dRkfH7bUIzqHgohPIUeL+eAMrlg/7lilFXVQhRFrD2sBn82HXfeqK+grFop86qqteKJHyij5R++v0D/2xN1prO4JKXCQu/dvH484x6jCL1fDJ18tqbwFGroaJu2b0O+oB4hZ/3EDKx7YmxTOiaX7phQhe6ClBUlN4ptb+SMJJteCfKLf6iPHsbjq4Gv/gb8l7Jv1lr7IxtBGJsc5rTevlH6XkUnlRABCDQG3uyk3j/lCtovCjKoVMZxRcbWr09SD6rALsjs/6Byc4OZP1IjdVw4SunbAO4c0xXvXj8Ad4xWy3flkyDbps95pdWorLHS4n2AF8L95clIUvAntxSXazfDYK0AjNJ27P1fnakNyqY4NH3enpGPsmoLwv2MeO+GgVj7f2MxzWH1SKaOhaYLvZyzdAFSog9u/t+rPvLJw/7vqRbeYG7exbt3EF2IXvZKy2xfS5Izf3IP2a3a95eCP/usCfD3ctJTQaunSYX376WAd8/ptD+UnHPaEFOx9xv63wyIVzOimsONce+O5LKvsmoLyqpqcc72wkcmimrmZk/ptVDOCrEN/tRWAb+/pIzItVNZpAZRWiqIFzOQ3nsFqtvlyGgGpr0AzP3SdXPx7pPUEke5NMqWHAxscvDHZjKZA7kBt7xIZCvr3BnltUmTeo39F1Nm0X5TfoF6qsl+eYhS+/1jgSv+qwaMTP7AqIXSFEs3yc2ZpYCYHPzZf64Y/pWZ8NXW0hTEvGPAlnfovpcssh8+0FhjHgYgAIdXAGf+UoNAcvmaTemc7ao4AGSdOYYewmklKASowZZTF8ogSpk/cvBHPqYqTbnr6fvjY9Th/km0z7++9phdY1yAxjHnl1UjVl+EsMNf0I2Hf1ZLtiTqApyI1w1vQld8mv6Osz8AZr5FGZH7/kcZC++MpNdvg6/aoNyREvw5odwUHehFr1k1pyGU5dA5UGSacmEnN99FUAKVjwPSxDMrBVG7jAIgqj0abUxPjcLLc1KdTo2V+wies1lAOZZTigmaXdBCOu90UgorlxWNkqat+pafse/BdWi5+vHW/zrNqJIF+hjQV2o6/8fhbCUQfsoajkTbki9ADf5I9wly6JcTHeClZmJ0GQmMf4yChWE9gdvXUgYkRGl6r2vyoo8cz5KnOMlBuIwL5Sh22J8adGQlsJOuRT6vnYAdVul4fOpPl9+i9P0prFTOc+SFJr1Wgx/vGYkHJicpQw40AvUDUpTnU6AQoMCIXMItl902oMlNn6VMuEKRtks+Dil0BnWxUy6d+uFuyqSJH0FZQvf8Rf9jeh9aEH1/PA2P+PJqtdRr7pf2PfeiB1C2Z00ZhtRuh0Gnga9R57rkyom4rNXwqzoPeAfj8Ycew/9NTnJ+xwE3UQZq0RlaDJXIx7iofGrfsEvsjojQekqm9V503NZ7A5e9ap8lLQhqkMnmObSdsO8PB3+YU78fyUV1rRUJIT7KgQ6gg/QVadHwRTmCttBJx2u1s7Ax/l51wtJP91EGQuFptcZUXq3qSydo07RbkX2hUJn80C3IgKD8dGC/1NuhD0350mk19n2EwqQL1ex9iArwQpS/CRariN1nCnE8txRn8iuUYPdfGflOJ0/I/7xdQ30wI41OcmKEXNxc+Qmw+Q2XmQl1yMEf/5iml3gA9L2TnwEW7lcvxFuLzkgBIMBp6Zf8wjdC6vcj2PYjkdmUjskrOD2F09AKIoq0QZRS2R75RdH+JFqV5rP94wLRNcQH/wn5HvjyGjqpstbAEpGK/1d7LQB1ClCzeAWqpQWjH1DLu5zd79qv6AX5xDpg1aPO73d8nbrfXv4GhvaIg14rIDXGH36mBkac+0XThY+1VllB9jboMDklwm4yRIwy8UtdRZc/jvAzwaDTOM38mauVVjLHPEQrYdWlaoNkB6G+9sEjebrCuOQwTOoV7rRPhCw+RM78oW0qyqXVvhqvFvh71ScqjfpaybqNo/+rzsgvmkpVRItaJmipRUItXVi5zPxxpDOqfYAc0rjl5rkQRfWCefAdLVPK5sa4d0dmo04ZoXy2sEIp/7LL/Dm6mkqWdF7q/7V80Vl0Ru2Vs/1DKvH9bHadqSI4s42ORYEJ7meCNKTreFpQmPNJ/Y2y3TH2EeoJlPkXBR1sNbXZs0zODr1wtE6Go9zEdouzfgtHfoFGEHFSn6j+bWVanXpOsuk/tFp84Ee6YBc0dKHjXXfiVKPIJdDZe4HSHOWiKyhjBX433o9t+jtgWHoL8MN8utBKnNS0KYC2QpPUbMMv5lCDZt9Idb+zCf7YZf5YrZiw9Vb8aHgMA81qL5bYIC9oBApuitn0P33IGkeTdORBAXLz+qOr610cu3pgLLqG+iC/rBrv/n7C7mtyj5lH/FZDsFQBpgD6wqrHaIKQRM5E6i2cxBjNbmqAf83n9LeKHqD+Tbd/SK8lccOBu/5Qg62OnDReN+qoTHiGViqzTJyAohqtku1ht2AyciFl5gLU4/DSV9QLxPTP689cdBCpjBO3D/5corX5fzq3Sz0GSgK89TCiGoM1al8+7JfaLFQUqNPqvAIpu0OefObCmCRaaNpz4ABgrUGtoMN5BNdtwh3eG4AAFJ4CSrLqBH8i/U3qiGw5C0Rm8FazDc9ur3d7Qn3tXy/l4E+Qj0GZwnWwsQER6fUpp8sMPFZ7K3YbpUC4XFrrRKTNxC85+GNbwhThb8I947vj1/tH4+f7RmHZ/BH2E4IP/kjnURF9aH+Ujw9Omj470+Rx79LiZQHMMOk1zqeqKn1/1lDgtDQbCEmmLEQ5S6jPlcAd66nE2jeSXu/9YoBeM4Frv6b+fbYEQenH43vsR3x+2xB8cutgmtLlDlEE/nyNPh4yDwH+/q4X6vReai+3jS8rAU654XP/2nQAwB+W3soxxKVrvgDu32+fGStLvZYCzGe2KuW8gDryvbP0/eHgD1NsOpaHjzdl4Ou/TuNzKW13cq/wOv+MN4/sgnt1S+FnKcAZTTQ+sUzBiO6hwMR/qTWmO5YAbwymE56wFKrZBID4ERD9Y+AnVOAT7dOI+ulafKn/N1ZUXg98MAmoKqIDjquVczlQIZ2kyPXLf2UUYK1U8jUyMQQhZiMqa6zYJU1HsLX5+AXlfq/MScP1Q+NxiXabeoff/kUN0Boip/w2pdlze6IEb+quupr0WkT6GtR+P86CP3LpWO4h+ApVMOk1SNFkAADOe7uI4rcX8kmctELk763H2nkpGJonXZAOvhO4axO0835HbGIfJIWb7aZMNcu0F4Fbf1Mbf7sS0YcygyDQ6uP659TeC7XVlAb71d/o80G3AwmjkBDigxX3jcL7Nw5y9agqQZBO8qD8XzkT42Tcu22/H0DNnMoqqkJxZQ1CSw6ijyaDJr6l/Y1WbwAKVDkpM3Qc975XavTYYOkabDN/aJW7qoCmKGlaI/hoO164KVO+OgpBqNv3J/cQ9GI1SkQvZIjhTpv+OyU3cTzwg3pBueox4JkQ4Ll44M0hQO5BCnzK/6fNJWf+FJ5yP8MTavaPXMpj1GnUCyFRVLNhBt8OmKVgo3ewWoYi9dVQSsOKTlOPBVvyirSrJvVNodFQNos7jcMbYg6jSWAAsPVt9faaCvUkuamZPz7B6vRLh+ED8sr69oz8Oo3gg89Qg8+jQWOdP27a32i1uDiTFnaWL6DbRyyov7myu8yh6s98Yr3S8+dqUIaMFyrpIv3MFio1uGRR858ToPIaQaNM+cHoB2mKG2DXj8su+HM+HcHVZ2EQLBhV9qtyH6NOi6gALwSjCJryPIgQcEyMUrJ+AVA5hNGfsqicZGfJdFoNHr6EzvXe23jCrk/TXxn5CEERJlf8TDfM/oCOlZYqan4tTYKUmxkP1kj7VMJo+15fYx+hYKHWAEx6msrmAru4/l3JQdj8E3b/83EBJlyhlYImfedg/3l6rYkJ9LI/hhnNwNwvqJH+VR9TULHn5ZRtVJBRbyaJI8eyL1EUcS4nj4JcAF101lbY9d8DaNT7AM0RmASboNCBZXTsObKKgvFhKerF8ab/1BuUkoM/WRlU5peliYAVGvtmzwBlAsqljaf+rBP86aHLov1NowN6zqj7RHI554Vj9fa9cQz+2C4495Kyf/Y1NvgjNdo/CQqkF4dL/x+nNrmcnhppM/HLseePLUEQ0CvKT8mgUsiVDvJrW5eR9H964ZjaH7QeTc78kY4DBaLZ9WKfPJzl1B+072h0wKz/1l0UCE0Cbl4B/N8hYOEBWpSe8zFgcpFBLgf9jq7GoDBq3+G2E+to/9F725dfudL/Rro+LD6rZHN6G3SI8dMr1yh/WPs03L5DZ3Ad+PcNt2m38Jly85AEeh3alpHf+Ibc7VCLBX927tyJyy67rKUejrWyPZmF+NsHW/HUj/vx8Hd7lX45k1PqTozpYSrCLTpqbPpE5d9QAx2NqJQzWG6UXoxrpYtE2xUBjQZCGp3I99McQ3LZXximPQCTtYJWlHteTicFrqZbhfYAIADleUBJNgZKTZ+3n8pXgj/je4QpzQc3Hatb+iWvIA7tGgytRsAzM3vj0S7SqpDWSCmOK+tpumy1UmnbaumiXb6g6KjkrAV59cjBSL8chAjFqNaYqG+HI79IWhkTrRCy9yHU14jeAq2s5/v19NRWt4zUuXQSeX63mvm04yM6IY3qD0x9Xgk4fnrrEKxaMNr12PTGMngDsYPcyxrreRkwUSoJXL8IeCmJysDeGQH89hTVKMePoOkOkqRw3zonVS7ZZNS5ImeB2Y57dzxBktOmL5RV4XBWCa6Vsn6EXjPoxTZ1Lv2PZe11ehGhNtethCiKypSPPu4Ef4LtM3+EUgrgege34LhzV1JmUvqz0Y+azXZmjn1/pFXNfdYEiNAgwN3gT9extPpfmk0n5Nveowt0AKgsVHsz9L/edTlSY/nHUImHpRooPuf2t8n/R7ulCU6R/iZ1UeTQcqn0xExBBZkgqH0U8qSMFnmaF0CBXNtV6AzpIrKl+v14wtB59P7AD+qky33f08Wnd3DzMpZclH4lh/si0FuP8mqLMkELAFBVgrhCKhm8EDvZ+WPqTdTYGqBgW/kFILwPMNZFBmVTdFNLv8wmHSJxAUM1dFG9afCbtE9EpgGT/+06O6WxQpPUQFxgF6D/Der/pZPMn8yCclgOqUMDumX9Yhcc6BLsgyQNXZwWmaJRAZNdaRi0eiBRKodrYPjA5F7hGBAfiMoaK15ZrZY3bjuZj9t0K6C3VtF5ROIE6iVljqD/9fUUGJPHvQ+Ss1zkJt4ynZEmoz10krKAGsoI9Iuh1xxrjV1p+xUhpxEj5KFM8IaYNNWm34+Ti9zoAdRI3ygFJQze6nlt+uf1P78Nuewrq7gSFquInJIq9K/ZCZNQAzEgXg3SZtpnyvh7GZSSr436EXSxXJBBgVK55KvHpXRxbPSjDLqjrkv0KCNYh9Aa+h8+XksBa6cj4uOl8viMusGf3oVSZn/XcRTAdeQTrJ4f1xM0DHZ43G4229E7mv4ejZ74lU/Bn/Ry2i5z1yF07C8577LsN0LKzMqy6fkT4zjK3pXSHPUcWt43vALUrEYXU79s9Yjwg0agXodn8ssbvL9CKfvyhZ+r19+gBCrJk41+UF34bY6IvhRgra0EXkgAngkDXkkBjqy2v19ZHvD2SGreLpclylk//W9wLwtTb3LI/qG/0fiA8/AXylEsemOP2NX5NLzGkBebdn+pZOGlRPnBbNShpLIWBxubmdUONSr4s2rVKjzwwAP4xz/+gRMnaCXr0KFDmDlzJgYNGgRrI9IfWfshiiL+vfwgRJGmXU3oEYYRicG4eUQX9It1EsXd8RF0sGCzpRfWW9MQ6mtEku2KQcIoGgM44u90ITTgZvvvH7UQb4X/Cwuq78b91XdhQfXdODzzF+DB49RsMd7hxd6WwVvNUkj/HAPj6YCx41QBtmfIU8PU4M+fx+3rMy+UVuFwNvUbkdPJUZIFrTzuc84ntPqyfylw9FfUcX438OFkKm2rKKBtGbnA9fZ2BPLJ69mddcYoAsAkLzoJKwgd5LpXgZI9lI4wXxN6S5k/ZUG9W3prW5a3FHAEKKBXW0UXoQBdNDgEZjzaO6YhIxYA016iAGittKKcd4TGOc94i4KuRicnb+5QMupcj5R3VvZ1Jl86QZIagwd5G6DXChBFYPvh02pavZzx4x1EgRIA2FG3MaWcOZRTXIWzhRUoKK+BTiMg2Y0pfHLw50JZNS6UVsGrigK/gRFxDX5vs+mMwO3rgLu3OO/d1JnIPbzO7wYKTikXbT9baWXVrbIvgI4lPaUFo9/+CcjDA8Y9Bty1Gbj2W1oIaMk+aBqtOvq5MX1/5ODPGboAUfr9WK3Aumfp4yHz6l4A2TabPfYblQSEJNPJLkDlQNkHKJtPzrz0dNPu5ohMpYtBay31ajmzjUoIgLpjthvLRdNnjUZQVl03276eH/0VOrEGJ6wR8I5Ocf24A29RM7C0Blrtbk7PHUdy358T6+Br1OJy6Zi31doD4YNmApP+Bdy5oWUaltua/AyQdh0w630KzoSnABDo4lYasR3qa4SPQQurCFQd/EX5VmPZObW5OKixc7JAmcxndJSBFe94ASVnNKZ/Ue/wAUEQ8I9ptOjzzY4z2H2mEOcKK1BRmI0btNI51ZiHaV/xCaF+U4AURBRp3HugCQPlzB/H4A9Afz93X+s0GpvSr+PKzTM0FGxdXjMYm8+UK8GfXpENLzQAUC8QD/zgsoedozBfE7TyRKmSKqnki849hZ7T1YU4h+BPoI8eI6Xgz7Gg0WoPpvQv1ZHzPS6l7IyB0vn2H6/WKR+T6bQajOoeijiBhrqcsIRCZzMdzo58PDr1J0x6LbyVxS8R0ZlSFpdchuiM3HfM4f/alkmvhZ9UMuml1yLSZuqq3Ntzw5Hcxk1aksr8Vpyl84KhyTHqMebUZqffIpeYHc4uQUkl9Vd0nFzl0qEVVLYb1c8+E00u6Xej9MvLoFWOdT/udn9xQs78KYSP8nt0St5vItPUIEpzCYIUhJVe9y1VlGm58WX7+6V/TuWxuz6lqWJHf6PfiaAFht7t/vP1u576tZVm0WuvKGKMlhYtN1lToNHq6g5jaKzukwGfMKAsV+lzptNqlMysRk9ja4fcDv588MEHmDp1KpYsWYLnn38eQ4cOxWeffYZhw4YhIiIC+/btw88//+zJbWUesnJfFrZl5MOk1+DDmwbhg5sG4fPbhuKp6Sl1e2zUVinpdit96KJ5ZGJI3Ytigw+l5P7tm7onxDojiuKnYJl1JJZaR+FnzWgk9B7qOtvH0fB76f2m15EcCPgadSivtqDWKqJriA+6hPhghFSas/tMIUqr1Ca5ctZPjwhfdRXj0AoAIq1GJV+inqT9tADY8y31ZqgsoouTd8dSzwODmabW3LFBvZjoqPyipIlPotPGdBOMlIIc2reejIaoNHp/bheizAKSpBPJmrAmlgG0Jjkwsfdb2rfLcgDfKKCXkzTmtiQIVFZy9xaqyx5+H61g37uDRo67+//jjBxQdUg3txVtU/YlSmnTjpk/Go2gTHyrObgCPkIV8oyx9tkMcjB473d1UsGVyUolVcpKX3KEL0z6hrOtfE16ZQVx3eFchIEe2y+klcoyfUIA/1bIMmprcvAnez8FL6pLIcYNw1ItHR+cNnx2RU6PP7udskf6XkMrkuG9gKTJdFFhaOFJbfKJeZbrLDdH8j4tr/gpDS0PLKP/GaOf2hjWljxa/cJxNWMi+RLKAvGNpFKUt4dRAE3uFSGXP7VX8uvj9g+Br66lk/3kS4Ex9WTLukMZ916395xc+mXX90cqoVttHVT/ND+jr3qhM+kZNdDdUuKGUV+akvMIrTiJmVoKKqwURiOhuSvQ9fGNAGa+SdmjAAVD5CCHVPolCAISQn0QigJ459Ft6yFdANv0hekS7KO8Zh+0xki3OfxOe15OmQMl59QJWy4MiA/ErH7REEXgyR/2YduJC3hA9zW8hSq68Ow+Sb1z98nUK6v4rJJ5Oi6sFKFCMSwag/O+HI3l2PenphJeR34EACyzjsDb648rfVacZv44EzOIMvtqytX+Ow3QagSlL97ZwgqcyMrHeI20v/e83CZQYh/8CRJKkCJQK4ay6FHqiO2/3qfn949Vj8tD7qIg55mtwLvjXAZdRieFoIsU/MkQI9AlxMd5rxb5tTv3EFCWp5w39xJOwVh4nPb95Gmuf2g5W9whoOVIzq7sGupjd90xJCEIgd565JdVK6PgG1RRQFl+AI5ZIjCsazCVjiuBLOd9f+TM5WM51I8tyMcAb0M9wRRbcklvD4cKGNumz47lZpnbKROmVO3BdUU/Oof4fmemcp5VL6tV7fkj+tZfdj36QTpnnPslBYxbSv8bgMdzgEdO0/RICFTqWnBKvY/S9FsAMrcBn0t9YHvPAgIb8ZqnMwDjn6CPN70OrHwEKZW0j/9h7Y3YQG/7iXVNodUDqVfTx4fVoHmokple1bzHbwfc/g299tpreP7555GXl4dvvvkGeXl5eOutt7B3716888476NmznZd3MKeqai1Y9Atldtwxqqv9BBNnDv5E0VDfSIy7/EYkhZtx0/AujX5e25rM3lF+7jcIA6h5dHAiUFEA7V/vol+8mp00TmrGGxvkjdggL9RaRWyz6c6++QRlAyhZPwA1aQPUaShjH6WRxMWZwPe3AS91B17vR2NuRSs9/z3baWqN1s0XhvZOnnAiryLJaquhPU0vlJpuY11/v03T5166czAIFuSLZhiDO0BgrMtIGr9bXaqe1A6+vWVfHFuSINDve/Iz1P+gJUpi5J5cJeedZn8BdMErCDQhL6+URm1mOvT8AdQTqMAL6QCA7Iix9hkBcUOpP0FtBbDLPmU+zKbhs9zs2Z2SL+WhpQuWXw9kIVSgEyLBN8Lt72duCOpGfXhqK2gqiN4bwow3MbN/LOKCvBueLmcrYQyV+wJA7BDg8teblz3iDrn3wabX7UeA10PuRVUr1fonepcBvz5Fo4QByhJ0NvFRzvzJPahmkiZNpUyUyxYDEKgnRNdxwBXvUjlLW2YXuiN5KgWoKgvpXCBc6knWnOAzQBevgoaCAA4995S+P6ekIQ611RClspZVloFKSapLw++jMiG5bK0l6U3KRWXM4Y/QU3Ma1aIWZ6Mn1dug3iPk3jh2pV9mjNVST5l0a1es8qdhGjjwo1I2ER/sg2Sp7GtnBfVIq9M3w+ANzHwbgECr+IfqX+x9ZFoPmI067M4sQu6Kp3GtTpqCNOFJ+31c76VmR0ir7Pcn0XmaJnpAyzTPD5KCP+d3S31yVgJVRag1R+Ev9MLGo3lKRnhKtJvHL0Gg8fIAcHyt25ti21RYPLEBfkIFSvXBFEySAyV5R+yyqwKyNkEjiDhojUVoZCwFzPTeFDAHKOtH/p36RVLGpFcQZVq8PxFY/USdwMPopFDEC/R/dkoMd17yBdDibah0fXdqkxL8mW2g6UroPtl1PxjAJqNvu8teO4A68Utu9izTaTWYkkKv4Sv2nnf9PLakyW45YiDKYcK8sdLfXw5kuejT5Hj946zfj1OVxerCqeNUxZjBFNwszbbvqymKNBhn16fAhueUm6f2iYBRp8Hx3DLsO+tGeVFVMV2XACiE2XXZF0ALVGMfabmBArY0Gnpdi+yrTtKVeyDlHqZjkkYH3LravvxM7hPbGKlXA1OljMGt7yCiQA3+NNjvx11yL9N8NVswxEz7fl7pRRT8OX78OK66iqYJzJo1CzqdDi+++CJiYjp4s9uL3MebMnA6vxxhvkbcOaZbw98gj+UccDPG9orC6vvH0Ij2RrKtJ09zVlpWH61OGncKYNN/MDxaPdjZTmIa0Y2yfzYdU4M/tv1+ANCF7smN9LF80Daa6QA1+iGqV64pp1WE4ETg+mXAlR+23wlWTSXX8x9fZ/8CfXY7/fzeIXTB7op8oMw7gt41dOK5z5qAQHMHmHokCMAAaSqFpZpeqOVsoIuF0VfNiHBR+mU7zetsYQWsVlHp/2N7kiTfp69Aq6y6WIc+UYIADLmDPv7rPbveE/JFdl5plRL8cafZs0yu9d50JAuhgnTi5NvJ/lfbmkajNgEFqNF/cDf8e2YfbHhwrPsNnwE6lk99gfqXXP1Z60xJG3gLZWuWnAc2v+nWt4TZ9M6ap/0Rt++cCfy5GKguoYsbV2nrcvDn3C4KlngFqY15ky8B5m8F7j8A3LCMTmgdp6m0RxotlbgBlBo/98uml5vaMpqlnn6o0x+ke5gZwT4GVNZYqfTu5O8QqkqQLQZgvybR7u/jlCA0f7JXfaTSr4BDXwEA1lvTkBjXBgsfSvDHvumznF2yztIPVZGDaXGrqphGrQNICPZCkkDBnx2V0ph3ZxNz4oaqmdc//d3lQgFAgfwFE7vjJu1K3G6hLKMDaU+oU4dsyeUoR6lPiE8WZYkI9bUAaAw5M3n3F8DnV9I4dAC61DmYnkrXMKJIjZUjbEqOGiRnJTUii1AOMJwvrERMFvXMyY6aSMdVux45asaOIYMCC39Y+1BplsHbfrCAPDlR1utyGt3d92oKDGx6XR1BLon0MyFBQ30y6w3+AGoTeqnpswE1uFwjZc/UV/IF0D6p0dM5tDQy3hm55NvZdkzrQ6/hq/ZluVf6JWV4nRAj0DPSD6O7S0M6YgdTgLngpNOebyFmI3Q2Adtod/v9HF1N547B3YHQZPuv6U1qefN2m1L3szvVHovpXypDPHxNekzqRb1Wv9/VcJNoueSrWuOFaugbnu7aGuSgqJztI0+ES5xIf4Pb1lB/qglPNn1C5JA7gVnvUdkYgEwxBBlihPPjVlPIASp5WAPUAOVFFfypqKiAtzf9UgVBgNFoRGQkn1R3ZEXlNfjPWjpIPjAlGT7GBrJYsvZRw0qNTu1Z0ERdbYM/cQGNf4Des2lsdGUhplX8BIBG8sr1wQAwXCr9kvv+5JRU4lhOKQQBGCqN7cORlbR6Et5bTQ0GKLgz/jHgvl3A7WuBa76kPkZyf5zOJm44NUUszqTmpDK5TjlhdP0ru77h1IUfIvrm0N9jv9gFQd4t2FvBk1KvVWuW0+Z69kKhvVImfrnX9yezoALVFiu0GsGuxjrczwQjqtFTSlMPTnZyAt/nKlolKsgAjqm9tYJ9DBAEwCpSg1CgkZk/Uu+huFp6bougp0a0rGXJq7ldRtlN6WhST6y+VwFXfqBOyfI0vUntI/TnYmrU2QA5Iy0CF/CI/ivorFW0Un/NlzSxz9XKd5DDgkr3yfYNakOTO+ZCwuA7qP/YLSuBgNiWe1y59MuhVEUQBJuR7xeAQ/Qa86tlACIDfFo/w8aRnDkrWWYZ0aRFsWaLkEp/bDJ/EgP1SrPgtdZ+SAj1VS/OdlNQJkZbAF+hAtWiFhliBMJ8ja7LXcY9RkG6shwKANWTzXGTzxb8U09tAl6uuRLhE+91fkc5+JP5FwWUpGxjp/1+mqLnDCp50Rqo95b8+KnXYJ7NomdKVD2jpp2Rm2xfOKZMK2tIZIC6gNKjXNrPbUuFHHvkiKJyHnbUPEh9PZRLv7wC6fzNkU8IZeSlzqXPHbOTSrNhQhUsooBMMRSJtiPLHckZM1LT5xu0qxEq5tEkPflv54repAYl6+n7c9uoBMzuH4M5g+oeT4Z1C0aAtx4XyqqV84KckkqMfmEdhjz7G/75437sOJUPq5SZWZtL57AnrRGYN6ar+jc1+anb4qT0S6sRlCAU0IjMH6Xk61LnX5dfI21L3Xd8pH69pswuC3pWfyr9+mn3uYaDXeX0eBVaeg3y82oH1Qg9p9P/Ws4Bum6Ug0B9KIEE3kGU5dvcvkN95wBzv4ToF4PPxKkAhOY3e5YFSUHY0mzlf/uiLPsCgPfffx+vv/46Xn/9ddTW1mLJkiXK5/Ib6zg2n8hDSWUtEkJ8MLu/GxlcctZPj8uafcIa6mtEhJ+Jpjp0aWTmD0An0FL2T+yhD/DYhCgsvjrNrnxMbvp88Hwxnv7pAFbsoZTRnhF+CJCDEgccSr4cCQJd6PSY1jqr0m3F4K022j5uU/olB3+cjXh3JGX/+JdSQHGfNQGBHSX4Yw6lMr6AeGpedzFqxMSvP4/l4YYPadJOz0hfuxrrcD8jegqnYRAsuAA/BEc7mXBj8KHGfQBNPZLotBpldaXaYnW72bOsSwht33VSY9GCuEnNL0dhdY1cSP1Trvq4Y/5+e8+m43p1qdKwuj5yRtpIqbFkRWgqcOuv9LpQ389v8KZpQ7LkS1zftyPR6qg0NtiNbOHGUCZ+1e0PMlTu+3MsVyk5WmUd5P4FmieFp1AWFIBS0Qtrxf5Ia5Pgj3Rhm3dUuWBJqd0Ps1CJHDEA+8QuSAj1AVKvofsd+w04tgamfCr9PyFGoQa6+ksn9CYq/9LoqGR+7b+d32/Pt9D9RJPWPqydio2RNyPYVSZwQCxlFotWaiidfwKAoGbJNZdGA4x/nPrlJUr9huKGA2E9kRzhi4k96W/XJ8b9hQYAFLA2hwMQgZyDbn2LnE1y/HwuokC9XsJ72Pycjj1yLhwHis4AWgOeX3inukjb4zL6mWa/X3/7AbnM1bHhsJTRcFYMQQ109Wf+yMGf7H1I1GbjPp3U42jCk1S21xA3mj73jQnAy3NS7YIvMr1Wgym91NIvq1XEA9/uwen8cmQXV2HJpgzMfnszBv2/33Drkr+Qnk7Pk2+KxaV9HK5VlNKv+vv+AG5m/tRWqSW9rq4jYofQ4lpthZTlU0wNzgGgjxSI3faukgU9qnsogn0MyCutxsajdScW25GCSSVaOk9qF5k/XoFqUHDVPyjTSu9NJcMtLWkKhIX7sS2CgpyNOV+sl1egWsqdT8Mh1Myf6pZ5jjbk9llbXFwc3nvvPbz66qt49dVXERERgU8//VT5/NVXX8XixYs9uKmspe2Wyirkkef1qiwC9nxDH9us9DaVIAj47LYh+HbeMLV5ZmOlXAGEJEGoLMTtATswsZf9WPoQsxFzBtKJ94d/nsS/fqJmtkrJV9kFdTVEnvh0MesmpWPLv5PyfPUExJ3gj8PYyINIgG99kwfam0lPAwv22E9quJjIjVDra/osnQx9ue0MMi6UIzrAC4uvTrO7T4S/CakaqpM+aUiG4OrieNCtAAS6ALGZwmJbwuFus2dZXJAPAlCCK7Q0yUY3zAM9PhgFS0fc53y8b0cgCNR0GaApfzmH6r17uJT5I0/c0XSf4H5vHjlAotGrx1jmXOwQep+5HbBa7L40TMrWrT6zAyjLQRm8scXay/3SDE8SBCUruChhKl6aO8TpRazH+YarwYhsOo7H5P4OAFhnSYMIDbqGmGnkfPwIynr+bBbwPZXhHhFdNHt2FN1f6lkFYONLwPaP7L++7ztg6R0UzOl/A0bf+y4+vLmBQE6SdLEoTwkKT2mZfna2grsBf/sWuHsrlStKnpvdFw9OScado7vW880uyBmzNqV29ZHPd/NP0zGnGD4wB9icuyoTv/6iKWJrn6bPY4dAsC0L1Wgom0kO7rgij4/P2kvnvDLpgjZLF4XoAC90Da0n4OcbTiVNEHFr5uPwE8pREZyiZhU1xM2mzxBFYMvbylAZW9P6SqVf+7Pw4Z8n8fuRXBh1Gjw/uw+u6BcNs1GHC2XVWHMoB4Zi+tl69u5ft/mv3PT5tPOJX7ZZzA32EgOAk79T+a85Qs1cdCQI0vkOaBF97zeU7ROSBExfLGVBn1SyoPVaDaanUvnl97vO1v/8UtlXMeTMn3YQ/AHUckC5F1LyNI+WNb90VSoWX52GIQktmLUvl35Jk0FDfC/Csq+MjAycPHmy3jd5/DvrGHafKQQApLqz2rH1XTpYhfZQm3k1U2KYuXmp0Rqtmj2wf5nTuzw/uy8+uWWw3c84Sq7/Xf04TSqJ6AuEccNyJXU94w+aPvD5VXRyGJLsXjd+m+BPKbyRmNy77dPxmfuU4M9Buz48tmxX2ftE+2Pp/OFIDLNfaQn3U4M/F/z7uH6+oK7q6tC295SbbYM/jSn5Auii5RrtOpiEGhxEAgKSRzfq+9lFJH44rZ6LFmDTf+q9q5+XDkYdMEJDmT/GpEYEcUKkzLcuI+pvjMroddjgSxlZDpkU3ULNCPU1YoRI/WvWW3pDpzfisr4eaF7aFOMfB4bejejZz7XtNtn2/Sm7AONxmlaz1kqvz3J2JOZ8Qr2bdF7UjwrAYSuV3Dgd+e2o//Vq78UV/0d9dHZ+Aqx5Gvjudgr89LsOuOw1JIb7qdNVXekuTROVLmZbrOTLkSDQgAObwFKI2Yj54xLVjPDGcNJkuz5RUtlXjEiZ6LmGGPtAckRvKpmpyAfeHEqj5AWN2merscxhar9G22muUiPb1L798PN9o2DUNbDIIvX9MRYcAQB4XfacfQlrfeQy4fO7gdp6sib2fA2sfAT48V4g94jdl4Z3C4a/lx55pdVY9PN+RCEPj13aE1cPisOrV6dh++MT8d1dw/HEpT3RXUdTzEYMdhJwjBtOv8+cA7Tw5MAu+BPkRmD5IJWgosel9WeB9plDx7b848CaZ+i2/jfaZ0FvfUe5uzz1a/X+LBRX1rh+3HJ5zDtlbrWLzB+AelIZbM4N5ZIvD+kaasbMftFNKz13Re6/JWXJhdoEf9yaxNaOdcB8bdYSrFYRe6XMnwYDMOX56snx6Afb1zSSlJn0/tSfQEl2nS8LgoDRSaFYNn8EPrppEF64si/GJocCJzZQ8z8IwGWvtq+fqa2Ep9CqYU058O4YSr33CqS0YnfITRUB+MT3x39vGOSZ7WSeEdSVxrbWlCsrHY6GSSdg0/pE4Os7hyq9UGxF+JmQJjV7tkT2q/N1O3Lj5/TPlclLto/ZmGbPABDkpcGNOlo9Wx9wBf9fs/rJY8sP/kTp+y4IgoBhPtREvAImmuDirrS/0cVXc/sbXAw0WiBGulDM3Gb3JUEQMC45FOO06QAAXdJk/PnIeIxOCm3ljXQhIA64ZBFlSbQluQ/Nzk+ANwYChadRJvjgD2tvRPmb1F4+PiHA1OeBBXuBUf+HU8Gj8I1lLAD7gRz1Gvso9csTLcAvD9FF+8aX6fPUa4Hp/3G/LDRmkP3EvLih7n1fW2ts8EfK/OkqTdoq83FYWNMZ1b9hcSaVjd70s9o0uCnkzG259MtqVYIWxphU+Hu7ETCIt1n0Tb5UzShyR3A3wBRAi62uysqLzgI/P6R+blMODlA2zGQpu/953bvYZLoP14eqGcMmvRYD4gNxa5oPvKzlAAQYwxLrPo9PMDD4Tvr4x79TCZYN20qEBrMKrRalaXqDfx+jmfpJAhRs1RrUzKlBtwEQKOte6rnZN8Yf3cPMqKq14tVfjzh9SABKsDTfSv+z7aLnD0DlgHIZnFdgnb5oHYJD0+dgKYBdYxFRVFFPQK4DcDv488knn7j1xjqGE3llKKmqhUmvqb/WF6BJAVVFdAKbMqt1NtBdAXGUUipa1ZHtTgiCgHE9wjBnYCyE2ipg+f30hUG3qfXIFztBUA/QxWfpgH3Dj+534/cJAfxp5VCISmvZCDzzPI1WzYBz0fQ5PtgHu56YhLf+NsBlQ9AIQwW6aujE1qfrkPqfs+t42meq1FGp4X4NZP6UZNNJYu7hOl8SDv+MSOECLoi+yI1vxskyuzjEDQd8o+j1Te7b4MI4Pf1PHDb1BXSNyBCI7g/cvalxF0sXMzmwdmZbnS89MT4MqRo6EZ8y47qGs0kuRnIw4nw6XRiGpeCDhFdQBi/q9+PIHApMeBKHx7+PXAQAcDHpyxlBAKa/RlkpUf2pn07qXOCS54EZbzSuH5hWZ1/C5KnMn5YmB2qyD9QpVXQmwFsPk16DLlLwR3TWN0vOiO1xGTBvo9qPsakcgz/H19I4eaOf+xkZXUZSnyeNHpj8TOOeX+6dCTjv+yOKwI/30HHYV8qa2/1lncDMjLRo9BeO4EotlTIK8ihxW/Jo7oBY1306JzxB5f3FmcCvT9h9Sc788ffSw7e+LJryfGp4XpYLGP3tg2OuDLxF/bjn5WrZdFCC2g/n16cAUYQgCHj8MurDuGRTBnacKnD+mFLPnzyLFPxpL5k/AE3kMvoDw+5p3GtmeyE3fZZKJE16rdLKoqOXfrkdIvz73103QRUEAWVlZaitrcUNNzRvChRrHXsyCwEAvaP869bE2irJVsZiYvzj7bO5Z8oVlKWyfyk1oWzIxpfoBcIcQS8CTNV9Mr3oNjbwI0ucAOxY0nmnonV2YSk0ljp7P42LdaKhUj5Tzm4AQIYYgaSEBsYdazSUHvzXe7SCljwVoVKvDJfNnjc8D2z/gCb1zfvDvpRGOlZ9YZmAXnFtvALP2j+NBug9C9j8Bk0kqWf1dqCF9uvMwCFIa6XNuyjFug7++GZuBCAC4X065pS01hA7RL1IH/swMOweBP51DjiwD32iA1x+m22TZ7fKvmQ6A2UQtYSkS+j/MCAe8I9umcf0tOBuVDpXU0YXiSFOsk1sCIKAqAAvJBRR2ZdPZHLdO41+kCYZBXZpmezV+OG0TxSeom3c8hbd3u96wOhmg1y/SOC67yg7uCmN3mMH0zCR31+iFgG2i647PqKAlM4E3PAD8M31QO4hav49VC13G5kYjJSoHwC5ddGxNRQ4sv0dSWPeEVzP38HgA8x4E1hyKZ2v9pqpnLP2jvaHXitgQLyLQTSiSNu75ml1ctfIBe4FN8J60j5+9Ne6ZXxjH6UytMMr6Pxm0G0YkxSK2f1j8N3OTDz83R6suG9k3fI8qewru5YCtu2m5w9A1QCPnm7rrWg6JfNHzYQP9TWipLIWuSXVSGylAaWe4PaVfEFBgdO3AwcOYM6cORBFEZMmTfLktrIWpNv1MVYbHsTi0geAz64EfrwPKHPSVX7jy1QGEj3QM53aW0KvGfT+1Cag+Lzr++UdA5YvBP54lT6f9gI1WmOqXjOBK/4L3L6u8YEfAJjyLHDXpoabELL2Sen743rce4PO7gQA+HYb4l4zd/m4cmQVYLUqfYV6RfnVbfZsqQUOLKOPC08BPz+gfm3XZ8CpPyEKWvScfj9mpLWTXiCsfZMbUx5ZSQ1WnampRFIllXVUx49ppQ27SMkXhfnH656THF1N77vz64tL/tHAvD+B+3YBI+8HtHpcMygWn906BH+f4GTyoqRbqBljk0MxZ2AMzMY2Kh3pNZP6CF1efw+udkWjBcKlSZluNn2O8vdCgpT5ExLfy8ljaijroKWyp41mtZH0tncpCCNo1LJrd3Ud2/RyvIG3Us/Q0izgo6n0en1iA7D0LuCXR+g+E54CQpPURVybCVgAgIM/IvDCLpocpfOix3LMUnYn+ANQJtNg6edfdhdN37JaEBvkjS33D8C73TZR76pCh+DFxpepcqCigBbLbvoZGLXQ/d/DVUuo1DLWoS1CZF9g4r/o45X/UH6uJy7riRCzAcdySvHm2mN1H08q+8qukYI/HWnISnsn9/wpzlTKwtWJXx0786fJaRwlJSV4/PHHkZSUhPT0dKxatQorV65syW1jHjT87IdI0pxFTNl+6jC/82Ng1WP2dyo8TRFugDJk2msZT0CslCouOi/9qi4Dvr6O6t+3fwBYa4G+V/OEL2c0GhoDK6c7NpbBRw0gsI5H/tud202BlqaQ0rqDk9xMVe8yEjCYgdJs4PwujEoMweOX9sSzVzhpFn1yA1B+gRoJChpqELnnG2Dzm8APNFZYGDIPE4ekQV9fRiNjssg0ulCorVRGiNdxZgv0YhUqTaGYNp6zGj3KK5CGDAA08UhmtdBKP6CWxTDnwnrYZUbptRqM7B4CL4PrBr1ajYAlNw/GC1emtsYWutgIHTDuH0DXDhZglUvtXPWzcZDga0GoQD03A2J6eGqr7MmlX3LWT/K01p1sag4FbvuN+gVZqun1+pPLqfempYoyYuRsmL7XULlQ/nF1+mxtNZVEAcDwe4GEUfTx8TX2zyNPDg1yIztpwlN0v5LzwP9uBt4YBCydh+B3UqFb8yRN5fryWqUfIc7uBNYvoo/HPw7c+bvSCNttei/XWW1D76Jjm6UK+N+tQHU5ArwNeHoGTZR7a/1xHMsptf8ex4bP7Snzp6MzhwF6H2orIgUBQ6XgT25J44I/FdUWzHlnMx7+n3sBYk9r9NlxTU0NXnnlFSQkJODbb7/FRx99hC1btmDcOD4h6iiqL5xCiDUPtaIGuVPfA6ZIB7M9X1HJB0CpjcsX0kG6yyj3Rn23Jbnx8/6ldb/21/tSczsRSJoK3LicslvaazCLsbYSlUaBlaLTwIbnGv/9okglmIBa498QnZHKBQHg8ErotBrcNqqr82bP+7+n932vUifNLLsbWPUP+njYPcCU/9f47WYXL0EAekvZP/v+5/w+x9cBAExJE2By0euKtSBnpV/ndtEqt9G/cQ23GfM0Zdy7e02fexhyAQCFmoDWyz53PIcfenfrPK8toy9w9WfAGCnTx+gPDLgZuGU1MPcrta2E0Qz0+xt9/PsLwOa3KDun4CTgEwYMvw/oJp0zOE7skoM/DWX+yM9z+xoquTIFULBp95dAbQX9Tb1DgOy9wE/3UQBo6Z20eNxrJjDqAQpWtiRBAGa8RYNXcg8Cv78IAJjaOwLDuwWj1ipiw5Fc+++RSs8KRTO8DVpe9GpJglCn6XOImcr7Gpv5s/lEHrZl5OPr7WdwIre04W/wMLf3ElEU8fHHHyMxMREvv/wynn32WRw4cABXXnmlJ7ePeUDWPmqWdlhIQMjgq4Bhd9MYQgBY/QRdwO36lDKCtEbg0pfbcGvdJJd+nd4MFJ+z/9qRVfR+yiLg2q9oxYADP4zVZfIHLn+NPv79ReBo3VGodZzYAHw+h07Qzu6kBogandoI0x1JUunX4V9c36e2Sh2r2ns2nXzFDgWs0tSFCU8Ck//N/9us8eTSr+NrgbILdb9+goI/3MuslTgL/sgNubuNbfmLLsaaQ36tczP4M9iPsjUqfLt4aIOciB5AGbYAbW/88NZ7blsaDTDuUWDhIeCBI8D0xUDckLqv2/IErDNbgVWPqoH5cY9S0EZuLXB6C1AlXUxbrcpFOoK7urc9XoHA2EeA+/dR24LBd1Ap17w/gDmf0LnM3m+B9ydSk2xzhGcnBJtDgUlSQ23pmCcIAgZKPYiO5TiUJkvBnwL4tq9mz51FUBd6L/X9sR333hhbT+YrHy9LP1fPPVuH28Gfvn374u6778bcuXOxY8cOXHPNNSgrK0NxcbHdG2v/yo//CQDINPdRJzJNeIICPRkbgW3vUc0pQKmNoU4a0rU3/jHU6BCwz/6pKKAXBwDocWnrbxdjHU3v2dKJF4DvbweKMl3ftyCDmjMeXUUnaO9L0+LCewP6umPgXeo+mcq4svcChWec3+f4WqCyCPCNpEkwWh1w5Qe0vVe8S6O0OfDDmiKkOxCZSqu6ck8pWcEp4LyUqt3eM2A7C/m1/NxOwCIFd+V+P4ncW5K1M+G9AAhUPuSsd6aD7tocAEBkQm8Pb5gNrZ5KqwBgxN/b/rXSL7L+c4TgbsCkp+mY23s2BWWmvQT0v0n9ekAcVSdk/EG3FWdSyZRGD/g3MGzCkdEXGDYfmPYilXIJAr2f8ix9Xe6DOPNNwDuocY/dWF2kyWE5B5SSs27SVOaj2TZZI5YampQKoEA0t58x751JncyfppV9bbMN/uw6C1EUW2b7msjt4M/+/ftRUVGBF154AdHR0QgMDLR7CwgIQGCgi+7orF0x51BPjpoom4ZjAXGUAQQAvzwIVJfQCdiw+W2whU0kj6xM/4KylwBpGoCFGs0FxrfdtjHWkUx5lnqhVOQD/7vF+Qjb2mrg25spIBOWQk3hZfGNrIP3CVYv+I646B0nj3VNuYKabAIU9L3yQyD16sY9H2OO5NePja8ofRQgisCKhQBEKn/2jWizzbuoBHenLMSacuqjcuw3tSSdhwmw9sboq14kupP9ozQlbsLUrOa47FXg9rVqpmN7N+I+mv515YcUlBl8u1oaJgjqsUDu+yP/XoMSWi47cPAdQJpUgjb07tY5/vhFUemXaFGaiHcPo6lsR3NK1cCBlPUjQkAxfDjzxxPkps8FlPmjNnyudvshyqtrsTeTenwZtBqczi/HztOFLbqZjeV28GfdunXK29q1a+u8ybc3xqJFizBo0CD4+voiLCwMM2fOxOHDh+3uU1lZifnz5yM4OBhmsxmzZ89GdnZ2o56H2agqRWQlHSD9k0bZf23kQqpxBaiT/sy31YusjqD3bEBroJNFeeqCMh2EG0Qy5jadkaZSGP0o7Xr3l3Xv89tTtDJvCqByytvXAPdsB2a+QyOGG0telXRW+lVdrjbjTZnV+MdmrCEDbqbmn8WZwI/3UuBnzzcUeNAa6cKJtQ6NRp1O9O3NwGezAYhAwhge8c7ap4hG9P3Jl/vStHLwx+Tnfi++jsC2709JFgXuAfeaPbtLEGgs/D071CwgTxME9e8kTU/tGuoDjQAUVdQgVy45koI/1XpfWKHhZs+e4JD505Syr52nClFrFREd4IXL+tLr17JdZ1t2OxvJ7eDPmDFj3HprjA0bNmD+/PnYsmULfv31V9TU1GDy5MkoKytT7nP//ffjp59+wrfffosNGzbg3LlzmDWLT/6bqiJjG7Sw4qwYjORkh3Iukx8w9Xk60Z32Quu/MDWXd5Ba2rXrc8pWkPsEyBeWjDH3BCUAox+kj9c8Q1PzZAd/UqeGzHybMgcBKp9Jm9u0JpbJ0+h9xkbgXLr91/Z+C9SUUSp3zMA638pYsxnNVEao0QOHlgMbXgBWSkHMMQ/Rvs1aj5wJWHASELS06n71p227TYy5Ik/8ytxW//1E0f1x5Kx+CaOpJ0/+CeCtoXTuoPdWqxhaiiAAIYmtWyoX1Z/eS9NTTXot4oJonLsy8UvKUK3U0fkWj3n3ADn4U3AKsFoQYhP8cbd0a9tJ6iM4OCEIM/vRpLfle86hutba8tvrJreDP469fVy9NcbKlStx0003ISUlBampqViyZAlOnz6NHTtoZy8qKsIHH3yAV155BePHj8eAAQPw0UcfYdOmTdiyZYvTx6yqquI+RPXIO0jNng9oeyLM10m9bZ8rgSdygP43tPKWtZC06+j93m+AU5uobMXkr55IMsbcN+ROCuyUZgGb3qDbTm8FvrudPh52D9BjWss8V0h3ILwP1fC/Owb45gYq4fxgMk3bAIA+s9u+VwHrvKL6AZP+RR+vf5ZWVsP7UI8M1rp6XEZjduNHAvM2Apcsar3JSIw1Vjep393Bn+g10pXyfCqVBtSSEtY0Jj/13F4+Vt+xnoJCHV20FPw5t1O5KVEq/VKCPxUU/CnXSsEfzvxpeX5RVFFirQGKMhHsQ9O+aiwiiipq3HqILVK/nyEJQRiRGIJQXyMKymvqTm5rRW4Hf+SePq7eWqLnT1ERHRCDgqiZ1o4dO1BTU4OJE9Uayx49eiAuLg6bN292+hiLFi2Cv7+/8hYbG9usbepsNNKqRG5AahtviYd0G0cNYSsKgF+kVdtuE3g6CGNNoTMCE/9JH//5GjVd/uIqGoWaOBGY8FTLPZcgUPlYnzkABODADzTe9cxWevHtd72aicSYpwy5S20qLGiBGW9Qs1TWusJ7AY+cBm5eAYSntPXWMFa/6AFAP2nxcfkCtVG5Iznrxy8GMHi3yqZ1av2up/ODIfOA237rGANq3BHVj97nn1AyfLqHOzR9lm4v0VBQiHv+eIBGCwR2oY/zT8Ck18JXyrByp/SrssaC9DOFACjzR6sRMCM1CkDbln65fUW8bt065WNRFDFt2jS8//77iI6ObpENsVqtWLBgAUaMGIHeval2NisrCwaDAQEBAXb3DQ8PR1ZWltPHefTRR7Fw4ULl8+LiYg4AyaxWBBfsBgCUhg9q4M4dlEYLpF4D/PGq2p0/aUrbbhNjHVnKLGmM+3bg0yvotpjBNAZVZ2jZ5/KPAWa/B4xaCKxfBGQfAHpdTk0Xudkuaw0aDZUyLl9AiwlRaW29RRcvXrRhHcmkZ6hnXc4BYNPrNIHSUVv1++ms0uZSs/7OdqzwDqKSo/wT1Ow+cQISQ6XgjzzuXer5Uwy6nad9eUhgApB3RGr6PA6hvkaUVNYit6QaiWH1f+uezCJU11oRYjYiIcQHADCzXzTe/+Mkfj2YjayiSkT4N2Iybgtxe09x7Oej1WoxdOhQdO3atUU2ZP78+di3bx/++OOPZj2O0WiE0WhskW3qdHIPwWQpRZlohCm6T1tvjeekXUfBHwCAwKNhGWsOQaBGhx9KTdPDegHXfg0YfDz3nGE9KbjEWFswhwLXfN7WW8EY60i8g+i1cumd1DMs5Qq1Z4isrSZ9dWadLfAji+pPwZ+zO4HECUrmj2PZV4HImT8e5WTc+4ncMrcyf7aeoH4/Q7oGQZBaFqRE+WFgfCC2nyrAi6sO4+U5rV+J43bZlyfdc889WL58OdatW4eYmBjl9oiICFRXV6OwsNDu/tnZ2YiI4FXgRjtDfZLSrYmIDfFr443xoJBEtQ44ZhCNkWaMNV3cEGDk/TTu+rrv6SSXMcYYY6q+V9NUutpK4KNLgd/+CWTvV79+Qc784WbPrAHyxC+p7083KfMnr7QaBWXVStnXBZEW4rjnj4cESb25LkgTv6Rx77klDQd/tmWo/X5kgiDgict6AQC+25mJPZmFLbix7mnT4I8oirjnnnuwdOlSrF27FgkJ9s3PBgwYAL1ejzVr1ii3HT58GKdPn8awYcNae3M7PPEMNaHbIXZHbFAnrzUe9QA1ixxyZ1tvCWOdw8R/Ajct51HLjDHGmDOCAFz2KmAOB0rOURb628OBxX2B7+8AMv+i+7XkOHLWOUXbTPwSRfgYdYgO8AIAHMstVTJ/cmul4A9n/nhGaA96n72XPnVz3HuNxYodp6g0b0iCfRJCamwAZkmTv55ZfsDtyWEtpVm5ckIzp67Mnz8fX3zxBX744Qf4+voqfXz8/f3h5eUFf39/3HrrrVi4cCGCgoLg5+eHe++9F8OGDcPQoUOb9dwXI+vpbdAC2GFNxh2BXm29OZ6VNBl47FxbbwVjjDHGGLtYBHcD7ksHjq4C9v4POLoaKDxFb7b3Yaw+EX1p6EBpNlB8DvCPRvdwM84WVuBodikGVRQCAHJqaTHfnzN/PEPu+1d4GijLQ4iZel02FPzZk1mE8moLArz16B5mrvP1By9Jxs/7zuOvjAL8vDcLl/ZtvYVVt4M/s2bNsvu8srIS8+bNg4+Pfd+H77//3u0nf/vttwEAY8eOtbv9o48+wk033QQAePXVV6HRaDB79mxUVVVhypQpeOutt9x+Diax1EBTmAEAyPPpDpNe27bbwxhjjDHGWGdj8KaePylXAJXFQOY24Mw2ml4Z1JXLvljDDN7UYzF7L2X/+Eeje5gZ6w/nUtNnqezrfDUt5nPDZw8x+QPB3YELR4GzOxFipkygvNLqer/tx3Sa5jW6eyg0mrrJMpH+Xpg3phsW/3YUz/58EJN6hcOga52CLLf3FH9/f7vPr7vuumY/uTtpTiaTCW+++SbefPPNZj/fRa0oE4JoRaWoh09Qy0xoY4wxxhhjjLlg8gMSJ9IbY40R3V8N/vS6HIlhNk2fpbKv8zWU+cNlXx4U3Z+CP+d2IjScGjTX1/On5tBKxO/6AgGYhtkDYlze787R3fD+xpM4W1iBE3ml6BHROv143Q7+fPTRR57cDuZpBRkAgEwxFLHBHpzSwxhjjDHGGGOs6aL7Azs/Vpo+J4bRZK+jWSWAlYI/hSIFhHxNnPnjMVH9gT1fU+ZPt3kA6in7EkVYlt2LW5CD6abfEYT3ADifOu1l0MLboEVpVS0s1tbr+9Mupn2xViDVGp8RQxEf3MmbPTPGGGOMMcZYRxUlNX0+txsQRSXzp7IkD7BQ8CEfvvAxaKHT8iW9x8jNt8/ttOv547SCqSADpsocAEAoCqD94kpg+f1A7hGnD93M9slNwnvKxaJADv6EIa6zT/pijDHGGGOMsY4qtAegNQBVRUDBSfh76RHuZ0QfzUkAQJVfAipg4jHvnhbRB9DogLJcBFtyAQA1FhFFFTV17lpydCMAYI81AQV9bqEbt38IvDkI+M9AYM0zQHVZne9rzYFfHPy5WNhk/nT6Me+MMcYYY4wx1lHpDEB4Cn18fjcAoHuYL/oIJwAAxUG9AXC/H4/TewFhPQEAppx0+Eklds5Kv87tWQcAOObTH4GzXwVu+IH6fWn01Ddo40vA8oXK/QW0fuoPB38uElYp8+c0Z/4wxhhjjDHGWPsWSQ2GcS4dADAgPhCpGgr+rCmiAT486asVyCV4Z3cixNcIAMgtqTvxy+v8XwCAgB6j6YauY4HrvgMeOgFc/gYAAdjzFXBqMwC17Ks1M394b7lIiPkZAIBcbYRSr8gYY4wxxhhjrB2KTKP3UubPXWO7oWLrKaAW+D4rFABn/rQKm+bbIeZLcCK3DI8t24uqGityS6vgZ9Ih3lSJ76xnAAD9h0+x/36TH9D/eiDzL3qcnx8A7tjQBj8IZ/5cHKpKoa3Io48D4yG0RXcpxhhjjDHGGGPuiUqj9+fTAVGEqTIPgbW5ECHgoNAVAODPPX88L3oAvT+Xjq5BXgCAE7llOFtYgepaK/JKqxGYvwsAcN4Qj4DQSOePM+EpwBQAZO8Dtn+oFH2JaL3UH878uRgUngYAFIneCAoOa+ONYYwxxhhjjDFWr7Be1C+mooCu53IOAgCE0GS8e8kYvLfxBOYOiWvjjbwIhPYEdF5AVTEeHKxDSmxvBHjpERXghTBfI0oqa+G3cR1wEAjuOdr14/gEAxOeAFb8H7D23wjEYpyDkcu+WAsr5ElfjDHGGGOMMdZh6IzUbDhrD5V+Ze+n26P6YVi3YAzrFty223ex0OqAyL7Ama0ILtyP64deXfc+JVSaZ0gYUf9jDbgZ2PkJcH43btB+j4cx1wMb7BqXfV0MCtRJX/HBHPxhjDHGGGOMsXZPbvp8Ph04R6VFiOrXZptz0ZKbPp/bWfdrNRXq3yZuaP2Po9ECYx4BAIyxbEHrFn1x8OfiwJk/jDHGGGOMMdaxyH1/zqXbBH/6t9XWXLyipd/57i+BFQ8AR38DaqVx72d3AtYawBwBBHZp+LG6jgV0JkQgFz2EMxBbse6Lgz8XAbEgAwCNeY/l4A9jjDHGGGOMtX/yxK9Tm4CyHEDQAhG923STLkpdxwLewUBlEfDXe8Dns4HXUoHdX9PfBqCsH3cGKxm8gYQxAIDxmp2tmvnDPX86mcoaC0x6rd1tlgsZ0AHIFEMRE+jVNhvGGGOMMcYYY8x94SkU8KmtoM/DegF6vp5rdeYwYME+4OQG4MhK4PAvQMl5YOkdgEYKqTRU8mUr+RLg6CpM1DopI/MgzvzpRF5adRh9/7UaB88XqzeKIgSp7KvCJ6ZOYIgxxhhjjDHGWDuk9wJCe6ify2VgrPUZvIHkqcD014AFe4GJ/wQMZsBaS1+PHeL+YyVdAgBIE45DW57X8tvqAgd/OpFdZwpQXWvF9ox89cbyfGhrywAAuqAubbNhjDHGGGOMMcYazzbgE839ftoFnREYeT9w7w5gyDxg+H2Na8TtF4Ujmq7QCCL8M9d5bjsdcPCnE5F7ReWUVKk3FmYAALLFAIQH+7f+RjHGGGOMMcYYaxq57w/Ak77aG98IYOrzwORn3Ov3Y2OTdhAAwP/MGk9smVMc/OlE5OBPrm3wp0Cd9BXlz/WhjDHGGGOMMdZhyOPetQYgLKVtt4W1mM1S8Mfv7EZ1cpiHccPnTsg+80cO/oQi3M/YRlvEGGOMMcYYY6zRYgYBw+4BgrsBOkNbbw1rIUe13ag6p7YQyPgDSJzg8efkzJ9ORJQGxeWUVKo3SmPeKfhjaoOtYowxxhhjjDHWJBoNMOX/AQNvaestYS1J0GCtRSrjO7KyVZ6Sgz+diNLzp9h52RcHfxhjjDHGGGOMsTYmAOutUknf6c2t8pQc/OlEpNgP8kqrYLHSZ2IhB38YY4wxxhhjjLH25IAYTx/kHgEstR5/Pg7+dEJWEbhQVgVYLUDhGQBAphiKEDPXiDLGGGOMMcYYY21JAF2jW3TegKUKyD/u8efk4E9nIqof5hRXAUWZEKw1qBa1sPhEQqflPzdjjDHGGGOMMdaWBEGACA3KA5Lohuz9Hn9OjgZ0IqJN9Ce3RI0enhbDEeLv01abxRhjjDHGGGOMMQflAcn0Qc4Bjz8Xj3rvRETbzJ+SSkCk4M9JMZLHvDPGGGOMMcYYY+2AIL0vUzJ/OPjDmiinuAqoloM/EdzsmTHGGGOMMcYYawcEKfqjZv5w2RdrBJvEH+SUVAEXjgHg4A9jjDHGGGOMMdZeCFLujxL8KcgAqko9+pwc/OlERJu6r5ySSpvgD5d9McYYY4wxxhhj7UmNKQgwh9MnuYc8+lwc/OlEbDN/8otLgcLTAICTVs78YYwxxhhjjDHG2gO57EsUAYT1ok+y93n0OTn400npis4AogUVMCIbgRz8YYwxxhhjjDHG2pvwFHrv4abPHPzpRGynffmWnwJAWT+AwMEfxhhjjDHGGGOsHREhqpk/Hh73zsGfTsS27CtWPAcAOCFGwKDVINBb3zYbxRhjjDHGGGOMMYUg1X2JIoBwuexrv31GRwvj4E8nlSBkAQAyxAiE+RmVnYsxxhhjjDHGGGNtR746FwEgtAcgaICKfKA022PPycGfzsQmSigHf05aI7nkizHGGGOMMcYYayfscjP0XkBQV/o4e7/HnpODP52IbYJYguY8AOCkGMFj3hljjDHGGGOMsXZGlBM4WqHvDwd/OhF5v4nwsiJKyAcgB38484cxxhhjjDHGGGsPlFHv8g2tMPGLgz+dUJoPBX4KRR8UwJeDP4wxxhhjjDHGWDshwKEnr5L5w2VfzA2iFDfsacwFQM2eacw7l30xxhhjjDHGGGPtipz6E5JE7/MzPPZUHPzpROSyr0QtdQg/IUYCAGf+MMYYY4wxxhhj7YRa9iVdxPvH0PuqIqCyyCPPycGfTkQO/sSK5wAAJ60RADj4wxhjjDHGGGOMtRfKqHc588doBrwC6eOiTI88Z5sGf37//XdMnz4dUVFREAQBy5Yts/u6KIp48sknERkZCS8vL0ycOBFHjx5tm43tQMJraGehsi8O/jDGGGOMMcYYY+2GINS9Tc7+6YzBn7KyMqSmpuLNN990+vUXXngBr7/+Ot555x1s3boVPj4+mDJlCiorK1t5SzsGOWgYUHEaAJV9mY06mI26ttsoxhhjjDHGGGOM1aFk/gCAfxy9Lzrjkedq06jA1KlTMXXqVKdfE0URixcvxuOPP44ZM2YAAD755BOEh4dj2bJluOaaa1pzUzsEURThhzIYq2jaV4YYwc2eGWOMMcYYY4yxdkQp+7K90cOZP+02JeTkyZPIysrCxIkTldv8/f0xZMgQbN682WXwp6qqClVVVcrnRUXULKm4uNizG9wO1FaWIbUmHcWCiFwEoaRKRE+d5aL42RljjDHGGGOMsY6gtrIM1qpylJYUo7jYi27UhQBVInDuONCIa3j5el+0SyOqq90Gf7KysgAA4eHhdreHh4crX3Nm0aJF+Ne//lXn9tjY2JbdwHbsMwBACYA5OAPguwVtuTWMMcYYY4wxxhhzNGuxs1s/g3xV3xglJSXw9/d3+fV2G/xpqkcffRQLFy5UPrdarTh16hTS0tJw5swZ+Pn5teHWsYtFcXExYmNjeZ9jrYL3N9aaeH9jrY33OdaaeH9jrY33OdZcoiiipKQEUVFR9d6v3QZ/IiJoUlV2djYiIyOV27Ozs5GWluby+4xGI4xG+z43Gg31tfbz8+N/KNaqeJ9jrYn3N9aaeH9jrY33OdaaeH9jrY33OdYc9WX8yNp02ld9EhISEBERgTVr1ii3FRcXY+vWrRg2bFgbbhljjDHGGGOMMcZYx9GmmT+lpaU4duyY8vnJkyeRnp6OoKAgxMXFYcGCBfj3v/+N7t27IyEhAU888QSioqIwc+bMtttoxhhjjDHGGGOMsQ6kTYM/27dvx7hx45TP5V49N954I5YsWYKHHnoIZWVluOOOO1BYWIiRI0di5cqVMJlMjXoeo9GIp556qk45GGOewvsca028v7HWxPsba228z7HWxPsba228z7HWIogNzQNjjDHGGGOMMcYYYx1Wu+35wxhjjDHGGGOMMcaaj4M/jDHGGGOMMcYYY50YB38YY4wxxhhjjDHGOjEO/jDGGGOMMcYYY4x1Yp0m+PPmm2+iS5cuMJlMGDJkCLZt21bv/b/99lv06NEDJpMJffr0wc8//9xKW8o6i8bsc0uWLIEgCHZvjZ1axy5ev//+O6ZPn46oqCgIgoBly5Y1+D3r169H//79YTQakZiYiCVLlnh8O1nn0Nj9bf369XWOb4IgICsrq3U2mHVoixYtwqBBg+Dr64uwsDDMnDkThw8fbvD7+DyONUVT9jc+h2PN8fbbb6Nv377w8/ODn58fhg0bhl9++aXe7+HjG/OUThH8+frrr7Fw4UI89dRT2LlzJ1JTUzFlyhTk5OQ4vf+mTZswd+5c3Hrrrdi1axdmzpyJmTNnYt++fa285ayjauw+BwB+fn44f/688nbq1KlW3GLWkZWVlSE1NRVvvvmmW/c/efIkLr30UowbNw7p6elYsGABbrvtNqxatcrDW8o6g8bub7LDhw/bHePCwsI8tIWsM9mwYQPmz5+PLVu24Ndff0VNTQ0mT56MsrIyl9/D53GsqZqyvwF8DseaLiYmBs899xx27NiB7du3Y/z48ZgxYwb279/v9P58fGOe1ClGvQ8ZMgSDBg3CG2+8AQCwWq2IjY3Fvffei0ceeaTO/a+++mqUlZVh+fLlym1Dhw5FWloa3nnnnVbbbtZxNXafW7JkCRYsWIDCwsJW3lLW2QiCgKVLl2LmzJku7/Pwww9jxYoVdicK11xzDQoLC7Fy5cpW2ErWWbizv61fvx7jxo1DQUEBAgICWm3bWOeUm5uLsLAwbNiwAaNHj3Z6Hz6PYy3Fnf2Nz+FYSwsKCsKLL76IW2+9tc7X+PjGPKnDZ/5UV1djx44dmDhxonKbRqPBxIkTsXnzZqffs3nzZrv7A8CUKVNc3p8xW03Z5wCgtLQU8fHxiI2NrTfiz1hz8TGOtYW0tDRERkZi0qRJ+PPPP9t6c1gHVVRUBIAujlzhYxxrKe7sbwCfw7GWYbFY8NVXX6GsrAzDhg1zeh8+vjFP6vDBn7y8PFgsFoSHh9vdHh4e7rLfQFZWVqPuz5itpuxzycnJ+PDDD/HDDz/gs88+g9VqxfDhw5GZmdkam8wuMq6OccXFxaioqGijrWKdVWRkJN555x189913+O677xAbG4uxY8di586dbb1prIOxWq1YsGABRowYgd69e7u8H5/HsZbg7v7G53Csufbu3Quz2Qyj0Yh58+Zh6dKl6NWrl9P78vGNeZKurTeAsYvBsGHD7CL8w4cPR8+ePfHf//4XzzzzTBtuGWOMNU9ycjKSk5OVz4cPH47jx4/j1VdfxaefftqGW8Y6mvnz52Pfvn34448/2npT2EXA3f2Nz+FYcyUnJyM9PR1FRUX43//+hxtvvBEbNmxwGQBizFM6fOZPSEgItFotsrOz7W7Pzs5GRESE0++JiIho1P0Zs9WUfc6RXq9Hv379cOzYMU9sIrvIuTrG+fn5wcvLq422il1MBg8ezMc31ij33HMPli9fjnXr1iEmJqbe+/J5HGuuxuxvjvgcjjWWwWBAYmIiBgwYgEWLFiE1NRWvvfaa0/vy8Y15UocP/hgMBgwYMABr1qxRbrNarVizZo3LWsphw4bZ3R8Afv31V5f3Z8xWU/Y5RxaLBXv37kVkZKSnNpNdxPgYx9paeno6H9+YW0RRxD333IOlS5di7dq1SEhIaPB7+BjHmqop+5sjPodjzWW1WlFVVeX0a3x8Y57UKcq+Fi5ciBtvvBEDBw7E4MGDsXjxYpSVleHmm28GANxwww2Ijo7GokWLAAB///vfMWbMGLz88su49NJL8dVXX2H79u1499132/LHYB1IY/e5p59+GkOHDkViYiIKCwvx4osv4tSpU7jtttva8sdgHURpaandCuPJkyeRnp6OoKAgxMXF4dFHH8XZs2fxySefAADmzZuHN954Aw899BBuueUWrF27Ft988w1WrFjRVj8C60Aau78tXrwYCQkJSElJQWVlJd5//32sXbsWq1evbqsfgXUg8+fPxxdffIEffvgBvr6+Sl8Lf39/JVORz+NYS2nK/sbncKw5Hn30UUydOhVxcXEoKSnBF198gfXr12PVqlUA+PjGWpnYSfznP/8R4+LiRIPBIA4ePFjcsmWL8rUxY8aIN954o939v/nmGzEpKUk0GAxiSkqKuGLFilbeYtbRNWafW7BggXLf8PBwcdq0aeLOnTvbYKtZR7Ru3ToRQJ03eR+78cYbxTFjxtT5nrS0NNFgMIhdu3YVP/roo1bfbtYxNXZ/e/7558Vu3bqJJpNJDAoKEseOHSuuXbu2bTaedTjO9jUAdscsPo9jLaUp+xufw7HmuOWWW8T4+HjRYDCIoaGh4oQJE8TVq1crX+fjG2tNgiiKYmsGmxhjjDHGGGOMMcZY6+nwPX8YY4wxxhhjjDHGmGsc/GGMMcYYY4wxxhjrxDj4wxhjjDHGGGOMMdaJcfCHMcYYY4wxxhhjrBPj4A9jjDHGGGOMMcZYJ8bBH8YYY4wxxhhjjLFOjIM/jDHGGGOMMcYYY50YB38YY4wxxhhjjDHGOjEO/jDGGGPsonfTTTdh5syZrf68S5YsgSAIEAQBCxYscOt7brrpJuV7li1b5tHtY4wxxljnoGvrDWCMMcYY8yRBEOr9+lNPPYXXXnsNoii20hbZ8/Pzw+HDh+Hj4+PW/V977TU899xziIyM9PCWMcYYY6yz4OAPY4wxxjq18+fPKx9//fXXePLJJ3H48GHlNrPZDLPZ3BabBoCCUxEREW7f39/fH/7+/h7cIsYYY4x1Nlz2xRhjjLFOLSIiQnnz9/dXgi3ym9lsrlP2NXbsWNx7771YsGABAgMDER4ejvfeew9lZWW4+eab4evri8TERPzyyy92z7Vv3z5MnToVZrMZ4eHhuP7665GXl9fobX7rrbfQvXt3mEwmhIeH48orr2zur4ExxhhjFzEO/jDGGGOMOfHxxx8jJCQE27Ztw7333ou77roLV111FYYPH46dO3di8uTJuP7661FeXg4AKCwsxPjx49GvXz9s374dK1euRHZ2NubMmdOo592+fTvuu+8+PP300zh8+DBWrlyJ0aNHe+JHZIwxxthFgsu+GGOMMcacSE1NxeOPPw4AePTRR/Hcc88hJCQEt99+OwDgySefxNtvv409e/Zg6NCheOONN9CvXz88++yzymN8+OGHiI2NxZEjR5CUlOTW854+fRo+Pj647LLL4Ovri/j4ePTr16/lf0DGGGOMXTQ484cxxhhjzIm+ffsqH2u1WgQHB6NPnz7KbeHh4QCAnJwcAMDu3buxbt06pYeQ2WxGjx49AADHjx93+3knTZqE+Ph4dO3aFddffz0+//xzJbuIMcYYY6wpOPjDGGOMMeaEXq+3+1wQBLvb5CliVqsVAFBaWorp06cjPT3d7u3o0aONKtvy9fXFzp078eWXXyIyMhJPPvkkUlNTUVhY2PwfijHGGGMXJcgA1ZEAAAHYSURBVC77YowxxhhrAf3798d3332HLl26QKdr3imWTqfDxIkTMXHiRDz11FMICAjA2rVrMWvWrBbaWsYYY4xdTDjzhzHGGGOsBcyfPx/5+fmYO3cu/vrrLxw/fhyrVq3CzTffDIvF4vbjLF++HK+//jrS09Nx6tQpfPLJJ7BarUhOTvbg1jPGGGOsM+PgD2OMMcZYC4iKisKff/4Ji8WCyZMno0+fPliwYAECAgKg0bh/yhUQEIDvv/8e48ePR8+ePfHOO+/gyy+/REpKige3njHGGGOdmSCKotjWG8EYY4wxdjFasmQJFixY0KR+PoIgYOnSpZg5c2aLbxdjjDHGOhfO/GGMMcYYa0NFRUUwm814+OGH3br/vHnzYDabPbxVjDHGGOtMOPOHMcYYY6yNlJSUIDs7GwCVe4WEhDT4PTk5OSguLgYAREZGwsfHx6PbyBhjjLGOj4M/jDHGGGOMMcYYY50Yl30xxhhjjDHGGGOMdWIc/GGMMcYYY4wxxhjrxDj4wxhjjDHGGGOMMdaJcfCHMcYYY4wxxhhjrBPj4A9jjDHGGGOMMcZYJ8bBH8YYY4wxxhhjjLFOjIM/jDHGGGOMMcYYY50YB38YY4wxxhhjjDHGOrH/D6vAEXv2bloNAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "praat_hnr = call(praat_harmonicity, \"Get mean\", 0, 0)\n", + "plt.plot(praat_harmonicity.xs() - 0.02, praat_harmonicity.values.T)\n", + "plt.plot(xs[voiced], hnr[0, voiced])\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"HNR [dB]\")\n", + "plt.ylim((10, 40))\n", + "plt.legend([\"PRAAT\", \"SpeechBrain\"])\n", + "print(\"Average HNR (SpeechBrain): {0:.1f}%\".format(hnr[0, voiced].mean().numpy()))\n", + "print(\"Average HNR (PRAAT): {0:.1f}%\".format(praat_hnr))" + ] + }, + { + "cell_type": "markdown", + "id": "5013f5cf-56d8-442e-93ee-2fafb86397f4", + "metadata": {}, + "source": [ + "## Comparison with OpenSMILE\n", + "\n", + "Unlike PRAAT, we can do a frame-by-frame comparison of jitter and shimmer with OpenSMILE, which is helpful for further verification of our approach.\n", + "\n", + "* [https://www.audeering.com/opensmile/](https://www.audeering.com/opensmile/)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "d0c4104c-526d-4af5-afe4-767a01ba3da6", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install opensmile" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "142069ad-eca7-4dee-b932-2e841e5fb972", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(346, 25)" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import opensmile\n", + "\n", + "opensmile_extractor = opensmile.Smile(\n", + " feature_set=opensmile.FeatureSet.eGeMAPSv02,\n", + " feature_level=opensmile.FeatureLevel.LowLevelDescriptors,\n", + ")\n", + "\n", + "opensmile_feats = opensmile_extractor.process_file(audio_filename)\n", + "opensmile_feats.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "91c6ae75-fc26-41af-b976-ba53ca92e95a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Index(['Loudness_sma3', 'alphaRatio_sma3', 'hammarbergIndex_sma3',\n", + " 'slope0-500_sma3', 'slope500-1500_sma3', 'spectralFlux_sma3',\n", + " 'mfcc1_sma3', 'mfcc2_sma3', 'mfcc3_sma3', 'mfcc4_sma3',\n", + " 'F0semitoneFrom27.5Hz_sma3nz', 'jitterLocal_sma3nz',\n", + " 'shimmerLocaldB_sma3nz', 'HNRdBACF_sma3nz', 'logRelF0-H1-H2_sma3nz',\n", + " 'logRelF0-H1-A3_sma3nz', 'F1frequency_sma3nz', 'F1bandwidth_sma3nz',\n", + " 'F1amplitudeLogRelF0_sma3nz', 'F2frequency_sma3nz',\n", + " 'F2bandwidth_sma3nz', 'F2amplitudeLogRelF0_sma3nz',\n", + " 'F3frequency_sma3nz', 'F3bandwidth_sma3nz',\n", + " 'F3amplitudeLogRelF0_sma3nz'],\n", + " dtype='object')" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "opensmile_feats.columns" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "f621629f-e65b-4fd2-b769-4e4d79e71299", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABI0AAADeCAYAAACwnfB+AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAjqVJREFUeJzs3Xd0VOXWwOHflGQmvZJCSEhC772KFKXZsYEdsXtF5aJ+ChYsV7GLV1AsV7EXVFBRQEQRpUjvPZQUSO912vn+eDOThPQCIbCftbJmMnPaJGdmztln7/3qNE3TEEIIIYQQQgghhBCiHH1zb4AQQgghhBBCCCGEOPNI0EgIIYQQQgghhBBCVCJBIyGEEEIIIYQQQghRiQSNhBBCCCGEEEIIIUQlEjQSQgghhBBCCCGEEJVI0EgIIYQQQgghhBBCVCJBIyGEEEIIIYQQQghRiQSNhBBCCCGEEEIIIUQlEjQSQgghhBBCCCGEEJVI0EgIIYQQQgghhBBCVNLsQaN58+YRHR2N2Wxm0KBBbNiwodppd+/ezdVXX010dDQ6nY45c+Y0eplCCCGEEEIIIYQQorJmDRp9/fXXTJ8+nVmzZrFlyxZ69erFuHHjSE1NrXL6wsJCYmNjefHFFwkLC2uSZQohhBBCCCGEEEKIynSapmnNtfJBgwYxYMAA5s6dC4DD4SAyMpL777+fxx57rMZ5o6OjmTZtGtOmTWuyZQohhBBCCCGEEEIIxdhcK7ZYLGzevJkZM2a4HtPr9YwePZp169ad1mWWlJRQUlLi+t3hcJCZmUlQUBA6na5B2yKEEEIIIYQQQghxptE0jby8PFq3bo1eX3MBWrMFjdLT07Hb7YSGhlZ4PDQ0lH379p3WZc6ePZtnnnmmQesUQgghhBBCCCGEaGkSEhJo06ZNjdM0W9DoTDJjxgymT5/u+j0nJ4eoqCgSEhLw9fVtxi0TQgghhBBCCCGEqL/P1h/lxaX7Gd8tlFcn9nY9npubS2RkJD4+PrUuo9mCRsHBwRgMBlJSUio8npKSUm2T61O1TJPJhMlkqvS4r6+vBI2EEEIIIYQQQgjR4pg8fdCbPDF5+VQZ26hLO55mGz3N3d2dfv36sXLlStdjDoeDlStXMmTIkDNmmUIIIYQQQgghhBAtjXPcM30jWjU3a3na9OnTmTx5Mv3792fgwIHMmTOHgoICpkyZAsAtt9xCREQEs2fPBlSj6z179rjuJyUlsW3bNry9vWnfvn2dlimEEEIIIYQQQghxtiuNGdGY4b2aNWg0adIk0tLSeOqpp0hOTqZ3794sW7bM1cg6Pj6+Qifv48eP06dPH9fvr776Kq+++iojRoxg1apVdVqmEEIIIYQQQgghxNlOw5lp1PCwkU5z5isJl9zcXPz8/MjJyZGeRkIIIYQQQgghzjiapmGz2bDb7c29KeIMYzAYMBqNvLv6MC8u3cdVfSN4/aRG2HWNecjoaUIIIYQQQgghRAtisVg4ceIEhYWFzb0p4gzl6emJEQfQuEwjCRoJIYQQQgghhBAthMPh4MiRIxgMBlq3bo27u3udRsES5wZN07BYLKSlpdHRsxCjrgX3NBJCCCGEEEIIIUTdWSwWHA4HkZGReHp6NvfmiDOQh4cHbm5uZOQdItjT0KhMI33tk4izQVaBhU/WHSW70NLcmyKEEEIIIYQQopHKDxolxMmc+4dBD41JRJO97Bzx0dqjPPXDbv7395Hm3hQhhBBCCCGEEEKcJo0pX5Sg0TkipzTD6EBKXjNviRBCCCGEEEIIIU4XvWQaidpY7BoAxzKku74QQgghhBBCCHEqPf300/Tu3dv1+6233sqECRNO70aoMICUp4naWe1qqL1jGYVomtbMWyOEEEIIIYQQ4lyVkJDAbbfd5hr9rW3btjz44INkZGSc9m1JS0vj3nvvJSoqCpPJRFhYGOPGjWPNmjWNWu7DDz/MypUrm2grG6cxjbBl9LRzhK00aFRktZOWV0KIr7mZt0gIIYQQQgghxLnm8OHDDBkyhI4dO/Lll18SExPD7t27eeSRR1i6dCnr168nMDDwtG3P1VdfjcVi4eOPPyY2NpaUlBRWrlzZ6ACWt7c33t7eTbSVjdOIRCPJNDpXWO1l2UVHpURNCCGEEEIIIc4amqZRaLE1y099K1nuu+8+3N3d+fXXXxkxYgRRUVFcdNFF/PbbbyQlJfH4448DEB0dzXPPPcf111+Pl5cXERERzJs3r8KysrOzueOOO2jVqhW+vr5ccMEFbN++3fW8s0Ts008/JTo6Gj8/P6677jry8vJc8//111+89NJLjBo1irZt2zJw4EBmzJjB5Zdf7lqOTqfj3Xff5dJLL8XT05MuXbqwbt06Dh06xMiRI/Hy8mLo0KHExcVVWnd1HA4Hs2fPJiYmBg8PD3r16sW3335br79lbZz/mcY0wpZMo3OEpTTTCOBoRgEDY05f5FYIIYQQQgghxKlTZLXT9anlzbLuPc+Ow9O9bqGFzMxMli9fzvPPP4+Hh0eF58LCwrjxxhv5+uuvefvttwF45ZVXmDlzJs888wzLly/nwQcfpGPHjowZMwaAa6+9Fg8PD5YuXYqfnx/vvvsuF154IQcOHHBlK8XFxbF48WKWLFlCVlYWEydO5MUXX+T55593ZQMtXryYwYMHYzKZqt325557jtdff53XX3+dRx99lBtuuIHY2FhmzJhBVFQUt912G1OnTmXp0qV1+lvMnj2bzz77jPnz59OhQwdWr17NTTfdRKtWrRgxYkSdllFX0tNI1MpWLmh0LKOgGbdECCGEEEIIIcS56ODBg2iaRpcuXap8vkuXLmRlZZGWlgbAeeedx2OPPUbHjh25//77ueaaa3jjjTcA+Pvvv9mwYQMLFy6kf//+dOjQgVdffRV/f/8KGTsOh4MFCxbQvXt3zj//fG6++WZXryGj0ciCBQv4+OOP8ff357zzzmPmzJns2LGj0rZNmTKFiRMn0rFjRx599FGOHj3KjTfeyLhx4+jSpQsPPvggq1atqtPfoaSkhBdeeIEPP/yQcePGERsby6233spNN93Eu+++W58/aZ1ITyNRKylPE0IIIYQQQoizk4ebgT3Pjmu2dddXXUvahgwZUun3OXPmALB9+3by8/MJCgqqME1RUVGFMrHo6Gh8fHxcv4eHh5Oamur6/eqrr+aSSy7hr7/+Yv369SxdupSXX36ZDz74gFtvvdU1Xc+ePV33Q0NDAejRo0eFx4qLi8nNzcXX17fG13Xo0CEKCwtdGVNOFouFPn361DhvQzSmp5EEjc4RFsk0EkIIIYQQQoizkk6nq3OJWHNq3749Op2OvXv3cuWVV1Z6fu/evQQEBNCqVatal5Wfn094eHiV2T3+/v6u+25ubhWe0+l0OByOCo+ZzWbGjBnDmDFjePLJJ7njjjuYNWtWhaBR+eU4ewRV9djJy65u2wF+/vlnIiIiKjxXU4lcQ+n1kmkkalGhPC29EE3TGtUMSwghhBBCCCGEqI+goCDGjBnD22+/zb///e8KfY2Sk5P5/PPPueWWW1znquvXr68w//r1612lbX379iU5ORmj0Uh0dHSTbmfXrl1ZvHhxky7z5OWbTCbi4+ObvH9RVWT0NFGr8uVpeSU2sgqtzbg1QgghhBBCCCHORXPnzqWkpIRx48axevVqEhISWLZsGWPGjCEiIoLnn3/eNe2aNWt4+eWXOXDgAPPmzWPhwoU8+OCDAIwePZohQ4YwYcIEfv31V44ePcratWt5/PHH2bRpU522JSMjgwsuuIDPPvuMHTt2cOTIERYuXMjLL7/MFVdccUpeP4CPjw8PP/ww//73v/n444+Ji4tjy5YtvPXWW3z88cdNth4ZPU3UmdVeMUXuaEYBgV7uzbQ1QgghhBBCCCHORR06dGDTpk3MmjWLiRMnkpmZSVhYGBMmTGDWrFmuUc8AHnroITZt2sQzzzyDr68vr7/+OuPGqd5NOp2OX375hccff5wpU6aQlpZGWFgYw4cPd/Ucqo23tzeDBg3ijTfeIC4uDqvVSmRkJHfeeSczZ848Ja/f6bnnnqNVq1bMnj2bw4cP4+/vT9++fZt2vaVRo8YUGem0unagOofk5ubi5+dHTk5OrQ2sWooLX1tFXFoBbgYdVrvGG5N6cWWfNs29WUIIIYQQQggh6qG4uJgjR44QExOD2Wxu7s05ZaKjo5k2bRrTpk1r7k1pkYqLi9m6+wAPLzvOhP7RPDKus+u5+sQ8pDztHOEsT4sJ9gLgaLqMoCaEEEIIIYQQQpy9VBxA34hUIwkanSOc5WkdQtRQgzKCmhBCCCGEEEIIcfZr0Y2w582bR3R0NGazmUGDBrFhw4Yap1+4cCGdO3fGbDbTo0cPfvnllwrP5+fnM3XqVNq0aYOHhwddu3Zl/vz5p/IltAjOTKP2Id4AHM0oyzQ6kl5AfomtWbZLCCGEEEIIIYQ42dGjR6U0rYk0phF2swaNvv76a6ZPn86sWbPYsmULvXr1Yty4caSmplY5/dq1a7n++uu5/fbb2bp1KxMmTGDChAns2rXLNc306dNZtmwZn332GXv37mXatGlMnTqVH3/88XS9rDOSM9PIGTRyZhr9eSCNC19bxbSvtjbbtgkhhBBCCCGEEKJplY2e1vBlNGvQ6PXXX+fOO+9kypQprowgT09PPvzwwyqnf/PNNxk/fjyPPPIIXbp04bnnnqNv377MnTvXNc3atWuZPHkyI0eOJDo6mrvuuotevXrVmsF0tnMGjdq1UkGjrEIraXklPPXDLhwaxKVJuZoQQgghhBBCCHG2aZE9jSwWC5s3b2b06NFlG6PXM3r0aNatW1flPOvWraswPcC4ceMqTD906FB+/PFHkpKS0DSNP/74gwMHDjB27Nhqt6WkpITc3NwKP2cbW2l5WoCXG8HeJgAeX7STY6VlaoUWKU8TQgghhBBCCCHONi2yp1F6ejp2u53Q0NAKj4eGhpKcnFzlPMnJybVO/9Zbb9G1a1fatGmDu7s748ePZ968eQwfPrzabZk9ezZ+fn6un8jIyEa8shpYCqH49AekNE3DUpppZNTriQ7yBODXPSmuaYos9tO+XUIIIYQQQgghhDi19PoWmGl0qrz11lusX7+eH3/8kc2bN/Paa69x33338dtvv1U7z4wZM8jJyXH9JCQkNP2GORzw3kiYN+i0B47sDs11392gp22Ql+v3jqGqXK3IKkEjIYQQQgghhBDirKHVPkltjI1fRMMEBwdjMBhISUmp8HhKSgphYWFVzhMWFlbj9EVFRcycOZNFixZxySWXANCzZ0+2bdvGq6++Wqm0zclkMmEymRr7kmqWcQjS96v7e3+CPjee2vWV4xw5DcDNqHNlGhn1Ol64sgfXzF+H1a5htTtwM5x1cUQhhBBCCCGEEOKc1SJ7Grm7u9OvXz9WrlzpeszhcLBy5UqGDBlS5TxDhgypMD3AihUrXNNbrVasVit6fcWXZTAYcDgcTfwK6ul4udHJdn5zWlftLE0DVZ42plsoQV7u/N/4TvRo4+d6TrKNhBBCCCGEEEKc655++ml69+7drNswcuRIpk2b1qhltPjR06ZPn87777/Pxx9/zN69e7n33nspKChgypQpANxyyy3MmDHDNf2DDz7IsmXLeO2119i3bx9PP/00mzZtYurUqQD4+voyYsQIHnnkEVatWsWRI0dYsGABn3zyCVdeeWWzvEaX8kGjI6shr+q+TaeCrVzQyM2go3OYL5ufHMNdw9vhbtDjLG+UvkZCCCGEEEIIIU6ltLQ07r33XqKiojCZTISFhTFu3DjWrFnT3JtWL7feeis6nc71ExQUxPjx49mxY0eTLP/777/nueeea5JlNaKlUfOVpwFMmjSJtLQ0nnrqKZKTk+nduzfLli1zNbuOj4+vkDU0dOhQvvjiC5544glmzpxJhw4dWLx4Md27d3dN89VXXzFjxgxuvPFGMjMzadu2Lc8//zz33HPPaX99FTiDRjoDaHbY9T0M+ddpWbWzPM3NoHbm8nQ6HZ7uRvJLbBI0EkIIIYQQQghxSl199dVYLBY+/vhjYmNjSUlJYeXKlWRkZDT3ptXb+PHj+eijjwA1cNcTTzzBpZdeSnx8fLXzWK1W3Nzcal12YGBgk21niyxPc5o6dSrHjh2jpKSEf/75h0GDBrmeW7VqFQsWLKgw/bXXXsv+/fspKSlh165dXHzxxRWeDwsL46OPPiIpKYmioiL27dvH9OnTKwVLTiuHHZJLo439b1O3p7FEzVpu5LSqmN0MABRK0EgIIYQQQgghWh5NA0tB8/xode+2nJ2dzV9//cVLL73EqFGjaNu2LQMHDmTGjBlcfvnlgEpseOedd7jooovw8PAgNjaWb7/9tsJyEhISmDhxIv7+/gQGBnLFFVdw9OjRCtN88MEHdOnSBbPZTOfOnXn77bcrPJ+YmMj1119PYGAgXl5e9O/fn3/++afCNJ9++inR0dH4+flx3XXXkZeXV+F5Z6ZUWFgYvXv35rHHHiMhIYG0tDQAjh49ik6n4+uvv2bEiBGYzWY+//xzMjIyuP7664mIiMDT05MePXrw5ZdfVlj2yeVp0dHRvPDCC9x22234+PgQFRXFe++9V+e/fUM1a6bROSP9AFgLwd0bhj8Cmz5UmUfphyC4/SlfvTNo5GaoOnDm6a6CRtLTSAghhBBCCCFaIGshvNC6edY98zi4e9U+HeDt7Y23tzeLFy9m8ODB1Q5I9eSTT/Liiy/y5ptv8umnn3Ldddexc+dOunTpgtVqZdy4cQwZMoS//voLo9HIf/7zH1dpmLu7O59//jlPPfUUc+fOpU+fPmzdupU777wTLy8vJk+eTH5+PiNGjCAiIoIff/yRsLAwtmzZUqEXclxcHIsXL2bJkiVkZWUxceJEXnzxRZ5//vkqtzk/P5/PPvuM9u3bExQUVOG5xx57jNdee40+ffpgNpspLi6mX79+PProo/j6+vLzzz9z8803065dOwYOHFjt3++1117jueeeY+bMmXz77bfce++9jBgxgk6dOtX4d29MppEEjU4HZ2laeC/wCYV2F8ChFbBzIYyaUfO8TcBZnuZurDrTyBU0kkwjIYQQQgghhBCniNFoZMGCBdx5553Mnz+fvn37MmLECK677jp69uzpmu7aa6/ljjvuAOC5555jxYoVvPXWW7z99tt8/fXXOBwOPvjgA1dF0UcffYS/vz+rVq1i7NixzJo1i9dee42rrroKgJiYGPbs2cO7777L5MmT+eKLL0hLS2Pjxo2uMrD27SsmdDgcDhYsWICPjw8AN998MytXrqwQNFqyZAne3t4AFBQUEB4ezpIlSyoNzjVt2jTXtjg9/PDDrvv3338/y5cv55tvvqkxaHTxxRfzr3+pNjePPvoob7zxBn/88UetQaPGFF5J0Oh0cAaNWvdRtz0nqqDR9i/Awx/yU8FogoF3gWfT1S061bU8TTKNhBBCCCGEEKIFcvNUGT/Nte56uPrqq7nkkkv466+/WL9+PUuXLuXll1/mgw8+4NZbbwWoNKL6kCFD2LZtGwDbt2/n0KFDrmCOU3FxMXFxcRQUFBAXF8ftt9/OnXfe6XreZrPh56dGD9+2bRt9+vSpsW9QdHR0hXWEh4eTmppaYZpRo0bxzjvvAJCVlcXbb7/NRRddxIYNG2jbtq1ruv79+1eYz26388ILL/DNN9+QlJSExWKhpKQET8+a/5blA2s6nY6wsLBK21QVyTQ6050cNOp0sXpjZcfDssfKptv2OVz3BYT1aNLVu8rTjDWXpxVabE26XiGEEEIIIYQQp4FOV+cSsTOB2WxmzJgxjBkzhieffJI77riDWbNmuYJGNcnPz6dfv358/vnnlZ5r1aoV+fn5ALz//vsVeiYDGAzq3NfDw6PW9ZzcrFqn01UoXwPw8vKqkKH0wQcf4Ofnx/vvv89//vOfCtOV98orr/Dmm28yZ84cevTogZeXF9OmTcNisTR6m8pztptqTKZRszfCPuvZrZC8U913Bo1M3jB+NkSfD10nqAyjgGgVRPrfWDWyWhMqGz2t6n+3R2mmUbFkGgkhhBBCCCGEOM26du1KQUGB6/f169dXeH79+vV06dIFgL59+3Lw4EFCQkJo3759hR8/Pz9CQ0Np3bo1hw8frvR8TEwMoDJ2tm3bRmZmZpO+Dp1Oh16vp6ioqMbp1qxZwxVXXMFNN91Er169iI2N5cCBA026LSdvV0NJ0OhUS9sHtmIw+UFATNnj/W6FW5fAxI/h4lfgzj8gdpRqYPbtFNj3S5NtgivTqJryNA93GT1NCCGEEEIIIcSplZGRwQUXXMBnn33Gjh07OHLkCAsXLuTll1/miiuucE23cOFCPvzwQw4cOMCsWbPYsGEDU6dOBeDGG28kODiYK664gr/++osjR46watUqHnjgARITEwF45plnmD17Nv/97385cOAAO3fu5KOPPuL1118H4PrrrycsLIwJEyawZs0aDh8+zHfffce6devq9XpKSkpITk4mOTmZvXv3cv/995Ofn89ll11W43wdOnRgxYoVrF27lr1793L33XeTkpJSr3XXjUogacxY8lKedqq5StN6QTVBG0D1MrrxW/j+Ttj9PRxYBp0vbpJNqK08zZlpJEEjIYQQQgghhBCnire3N4MGDeKNN94gLi4Oq9VKZGQkd955JzNnznRN98wzz/DVV1/xr3/9i/DwcL788ku6du0KgKenJ6tXr+bRRx/lqquuIi8vj4iICC688EJ8fX0BuOOOO/D09OSVV17hkUcewcvLix49eriGsHd3d+fXX3/loYce4uKLL8Zms9G1a1fmzZtXr9ezbNkywsPDAfDx8aFz584sXLiQkSNH1jjfE088weHDhxk3bhyenp7cddddTJgwgZycnHqtv64a09NIp2nOKjfhlJubi5+fHzk5Oa6drsGW/Bs2fQhDH4Cxz9U+/favYdFd0HYYTPm5cesutWJPCnd+sok+Uf4s+td5lZ6f9cMuPl53jPsvaM9DY2vuui6EEEIIIYQQovkUFxdz5MgRYmJiMJvNzb05TU6n07Fo0SImTJjQ3JvSohUXF7Np5z4e/TWZqWO6cv3AKNdz9Yl5SHnaqXZyE+zaBLVTt5lxTbYJtZenqYQzyTQSQgghhBBCCCHOLnpphH2GslkgZbe6X9egUWCsus07ASX5TbIZdS1PK5JG2EIIIYQQQgghxFnBWVama0RXI+lpdCod3wp2C3gEqtHR6sIzEDwCoCgLMg9DeM9Gb0Zto6d5ljbCLpJMIyGEEEIIIYQQzUg66DS9RrQ0kkyjUyrud3UbO6J+/6Wg9uq2iUrUnJlGxmrK08wSNBJCCCGEEEIIIc5KjWmELUGjU8kZNGp3Qf3mCyzta5TRNEEjW2nQyL2a8jRP5+hpUp4mhBBCCCGEEC2CZOSImmiahqaBQ5NMo1PH0YggSlE2JG1S92NH1W9eVzPsww1ffzmWWsrTPFyZRrYmWZ8QQgghhBBCiFPDzc0NgMLCwmbeEnEmKywsxO7QyCp2NCrTSHoa1eT4VvCvZ5aQ05HVoDkguCP4R9ZvXmfQKONQw9Z9ktrK01xBI8k0EkIIIYQQQogzmsFgwN/fn9TUVAA8PT3RNSaVRJxVNE2jsLCQ1NRUdqRZKbZpjco0kqBRTeJ+h64NDBo1tDQNTnt5mnP0tELpaSSEEEIIIYQQZ7ywsDAAV+BIiJP5+/uz/kQOQKOCihI0qkncHw2bT9MgbqW635CgkTPTqDAdinPA7New7ShVW3mac/S0YgkaCSGEEEIIIcQZT6fTER4eTkhICFartbk3R5xh3NzcMBgMOLSjADQmD02CRjVJ2Qn5qeAdUr/5Mg9Ddjzo3aDtefVfr8kHvEKgIFVlG0X0rf8yyqmtPM0ZNJJG2EIIIYQQQgjRchgMBgwGQ3NvhjhDOVuly+hpp9KhlfWfx1maFjUYTN4NW29Qe3XbBCVqzvI0t2rK08xuzkbYEjQSQgghhBBCCCHOBs4R9mT0tFPp4K/1n8fVz6ieo6aVFxSrbjMbHzSylpanuVdbnqYSzkpsDuwOGbZRCCGEEEIIIYRo6UpjRuhbctBo3rx5REdHYzabGTRoEBs2bKhx+oULF9K5c2fMZjM9evTgl19+qTTN3r17ufzyy/Hz88PLy4sBAwYQHx/fsA2M+x3s9RiK3m5VI6dBw/oZOTVhM2xLbaOnuZWlM8oIakIIIYQQQgghRMvncEaNGtHVqFmDRl9//TXTp09n1qxZbNmyhV69ejFu3LhqO8CvXbuW66+/nttvv52tW7cyYcIEJkyYwK5du1zTxMXFMWzYMDp37syqVavYsWMHTz75JGazuf4baPKD4mxI2lz3eZK2gCUfPAIhrFf91+nkbIbdBJlGtZenle0GUqImhBBCCCGEEEK0fGU9jRq+jGYNGr3++uvceeedTJkyha5duzJ//nw8PT358MMPq5z+zTffZPz48TzyyCN06dKF5557jr59+zJ37lzXNI8//jgXX3wxL7/8Mn369KFdu3ZcfvnlhIRU38y6pKSE3NzcCj8AxI5Qt/UpUUs/oG5b94ZqMnvqxNXT6FBZTlkDOcvT3KrZHp1O58o2kqCREEIIIYQQQgjR8jlc5WmnIdNI0zTi4+MpLi5u8MrKs1gsbN68mdGjR5dtjF7P6NGjWbduXZXzrFu3rsL0AOPGjXNN73A4+Pnnn+nYsSPjxo0jJCSEQYMGsXjx4hq3Zfbs2fj5+bl+IiMj1ROxpT2JDq2o+wvLLi2D829b93mqEhCjbotzoDCzUYtylqe5GarfUZwjqEl5mhBCCCGEEEIIcRY4nY2wNU2jffv2JCQkNHxt5aSnp2O32wkNDa3weGhoKMnJyVXOk5ycXOP0qamp5Ofn8+KLLzJ+/Hh+/fVXrrzySq666ir+/PPPardlxowZ5OTkuH5crzF2pLo9sR2WPgaJm2rP+nEFjaJqnq427p7gG6HuN7JEraw8rfp/t0dp0KjQUo/+TUIIIYQQQgghhDgjndZMI71eT4cOHcjIyGjwyk41h0MFR6644gr+/e9/07t3bx577DEuvfRS5s+fX+18JpMJX1/fCj8AeLeCdheq+/+8Ax9cCHP7lwWGqtJUQSOAwNIR1BrZDLu28jQoa4YtmUZCCCGEEEIIIUTL52qEfbp6Gr344os88sgjFRpPN1RwcDAGg4GUlJQKj6ekpBAWFlblPGFhYTVOHxwcjNFopGvXrhWm6dKlS8NHT7v+K7j+a+gxEdw8VY+hmnocNVV5GlTsa9QI1loaYUO58jTpaSSEEEIIIYQQQrR42unMNAK45ZZb2LBhA7169cLDw4PAwMAKP/Xh7u5Ov379WLlypesxh8PBypUrGTJkSJXzDBkypML0ACtWrHBN7+7uzoABA9i/f3+FaQ4cOEDbtg0M4hjdodN4uPp96HOzeiy7mhI9mwXyjqv7TZFp5BpB7XCjFuMKGhmq/3ebJdNICCGEEEIIIYQ4azgzjRqRaISxPhPPmTOnEauqbPr06UyePJn+/fszcOBA5syZQ0FBAVOmTAFUkCoiIoLZs2cD8OCDDzJixAhee+01LrnkEr766is2bdrEe++951rmI488wqRJkxg+fDijRo1i2bJl/PTTT6xatarxG+wMBFVXnpabBJoDjGbwrn60tnqvL6dxfaSc5WnGGsrTPF09jSRoJIQQQgghhBBCnC0ak2lUr6DR5MmTG7yiqkyaNIm0tDSeeuopkpOT6d27N8uWLXM1u46Pj0dfLtAxdOhQvvjiC5544glmzpxJhw4dWLx4Md27d3dNc+WVVzJ//nxmz57NAw88QKdOnfjuu+8YNmxY4zfYv3RUteqCOM5gkl9k49qTO/k515fYqMU4M43cayhP85DyNCGEEEIIIYQQ4qzhaILR0+oVNAKIi4vjo48+Ii4ujjfffJOQkBCWLl1KVFQU3bp1q/cGTJ06lalTp1b5XFXZQddeey3XXnttjcu87bbbuO222+q9LbWqLdOoKZtgQ1nQKC9Zlb4Z3Ru0GFcj7BrK0zzc1K4g5WlCCCGEEEIIIUTL5+qDfboaYf/555/06NGDf/75h++//578/HwAtm/fzqxZsxq+FS2FX2kwKD8FrMWVn3cGjQKaoAk2gFewKnVDU6VvDeTMNKqpPM3DXT0n5WlCCCGEEEIIIUTLV9bT6KSo0aYFdV5GvYJGjz32GP/5z39YsWIF7u5lWS8XXHAB69evr8+iWibPQHDzUverKhlr6kwjnQ782lS/vjqqS3map7vKNCqWTCMhhBBCCCGEEKLFK000Ql8+FGAphJXP1HkZ9Qoa7dy5kyuvvLLS4yEhIaSnp9dnUS2TTleuRO1Y5eebOmgE5YJGDW+GbatTeZqzEbatwesRQgghhBBCCCHEmaGsPK1c1ChxIzisdV5GvYJG/v7+nDhxotLjW7duJSIioj6LarlqaobtCho1UXkaNEkzbEudytOcjbAdDV6PEEIIIYQQQgghzgxaadSoQqbRsTX1Wka9gkbXXXcdjz76KMnJyeh0OhwOB2vWrOHhhx/mlltuqdeKW6zqmmHbLJB3vOI0TcEZNKqu+XYd1K08rTRoZJVMIyGEEEIIIYQQoqVzVJVpdPTvei2jXkGjF154gc6dOxMZGUl+fj5du3Zl+PDhDB06lCeeeKJeK26xXEGjkzKNcpNAc6jG1V6tmm59TdDTqC7laWZXeZr0NBJCCCGEEEIIIVo6rbSrkStmZC1S5Wn1YKzPxO7u7rz//vs89dRT7Ny5k/z8fPr06UOHDh3qtdIWrbrMn/L9jBoznt3J/JuwPK2GoJEr00iCRkIIIYQQQgghRIvnKO0+o3fGKBI3gt0C3qFAXp2WUa9Mo2effZbCwkIiIyO5+OKLmThxIh06dKCoqIhnn322PotquZz9imoKGjWl8plGzi5W9eQsT3MzVB/McjbCLpLR04QQQgghhBBCiBbP2dPIFQlwlqZFDanzMuoVNHrmmWfIz8+v9HhhYSHPPFP3IdtaNGfmT94J1cfI6VQFjXxLG4zbiqAwo96z2x2aK9bkXtPoaZJpJIQQQgghhBBCnDWcaSeuTKNTHTTSNK1iA6VS27dvJzAwsD6Larm8Wqm+RWiQW65k7FQFjYwm8A5T96sasa0WziwjqLk8zUN6GgkhhBBCCCGEEGcNh1aup1H5fkZt6x40qlNPo4CAAHQ6HTqdjo4dO1YIHNntdvLz87nnnnvqvuUtmU6nAkPpB1SgKDBWPX6qgkagStTyk1Xz7dZ96jWrpVzQqKbyNE93tSsUS3maEEIIIYQQQgjR4mmu0dMo62fkEw4BMXVeRp2CRnPmzEHTNG677TaeeeYZ/Pz8XM+5u7sTHR3NkCF1j1S1eH6RpUGjcpk/rqBR26Zfn38kJG1qUDNs58hpAG762hthS6aREEIIIYQQQgjR8jmcQSN0cHSN+iV6WL0G76pT0Gjy5MkAxMTEcN5552E01mvQtbOPM5vIGSiyWSDveMXnmpKrGXbDy9MMeh16ffU7hrlcI2yHQ6txWiGEEEIIIYQQQpzpVNRIr6esn1H0sHotoV49jS644AIyMzMrPZ6RkYHBYKjXils0ZzNsZxAnNwk0h+p15NWq6dfnd9L66qEuI6dBWaYRQInNUcOUQgghhBBCCCGEONM5M430tpKyfkbR59drGfVuhF2VkpIS3N3d67XiFs1ZgubMNCrfz6geaV515goa1b88zVpanuZWQxNsKMs0Aii02Oq9HiGEEEIIIYQQQpw5nDEcz7StYC9Rg2w5+zLXUZ3qzP773/8CoNPp+OCDD/D29nY9Z7fbWb16NZ07d67Xilu0k8vTUvdWfLypOcvTshuTaVRz0Mig12Ey6imxOSiSZthCCCGEEEIIIUSL5sw08jixQd1pO6TeiS51Chq98cYbgIpSzZ8/v0IpmrMR9vz58+u14hbNmfmTe1z9/PmS+r2etYF15iyHK0xXw+S5edR51rqWpwF4uBtU0EiaYQshhBBCCCGEEC2aM9PIFTSKGlrvZdQpaHTkyBEARo0axffff09AQEC9V3RW8Q4Fg7saru7rm6EoE0J7wOD7Ts36zP7g7g2WfMhJguD2dZ61ruVpAJ5uBrKxSqaREEIIIYQQQgjRwmkaGLBjTt6kHmhb/1Hv69XT6I8//pCAEajW485so6RNoDfCle+A8RT1ddLpyo2gFl+vWetangYq0wigUDKNhBBCCCGEEEKIFk0DuuiOobcWgMkPQrrWexm1RhKmT59OQUGB635NPw01b948oqOjMZvNDBo0iA0bNtQ4/cKFC+ncuTNms5kePXrwyy+/VDvtPffcg06nY86cOQ3evio5S8YARjwKYT2advkna2Az7PqWpwGSaSSEEEIIIYQQQrRwDk1jkH6f+iVqEOjrP+p9reVpW7duxWq1ArBlyxZ0TTw62Ndff8306dOZP38+gwYNYs6cOYwbN479+/cTEhJSafq1a9dy/fXXM3v2bC699FK++OILJkyYwJYtW+jevXuFaRctWsT69etp3bp1k24zAAExwCoI7w3D/t30yz9ZA5th1688Te0O0tNICCGEEEIIIYRo2RyaxgD9fvVLVP1L06AOQaM//vjDdX/VqlUNWklNXn/9de68806mTJkCwPz58/n555/58MMPeeyxxypN/+abbzJ+/HgeeeQRAJ577jlWrFjB3LlzKzTjTkpK4v7772f58uVccsklTb7dDJmqytKG3g8Gt6Zf/sn8G5hpZFOZRsY6BI3MUp4mhBBCCCGEEEKcFTRNo7+hNGjU9rwGLaNOjbCvuuqq2hdkNBIWFsaYMWO47LLL6rRyi8XC5s2bmTFjhusxvV7P6NGjWbduXZXzrFu3rlIp3Lhx41i8eLHrd4fDwc0338wjjzxCt27dat2OkpISSkpKXL/n5ubWvvHB7eGSV2ufrqm4ytPql2lkc6igkXsdytM83aQ8TQghhBBCCCGEOBvEaMcJ1uWiGczoWvdp0DLq1Ajbz8+v1h8PDw8OHjzIpEmTeOqpp+q08vT0dOx2O6GhoRUeDw0NJTk5ucp5kpOTa53+pZdewmg08sADD9RpO2bPnl3htURGRtY+0+nmG6Fuc5PqNZulHuVprp5GFlv9tk0IIYQQQgghhBBnlH66vQBYwvs0eOCuOmUaffTRR3Ve4JIlS/jXv/7Fs88+26ANaqzNmzfz5ptv1qv/0owZMypkL+Xm5p55gSPfcHWbe0KNm1fH11af8rSyoJGjYdsohBBCCCGEEEKIM0J/nSpNs0YMxtTAZdQp06g+hg0bRv/+/es0bXBwMAaDgZSUlAqPp6SkEBYWVuU8YWFhNU7/119/kZqaSlRUFEajEaPRyLFjx3jooYeIjo6ucpkmkwlfX98KP2ccn9Kgka0IirPrPFt9ytM8SsvTCq2SaSSEEEIIIYQQQrRk/UtHTrNFDG7wMpo8aOTv78/3339fp2nd3d3p168fK1eudD3mcDhYuXIlQ4ZU3dl7yJAhFaYHWLFihWv6m2++mR07drBt2zbXT+vWrXnkkUdYvnx5A1/VGcDNAzwC1P3cE3WerT7laZ6lmUbF0ghbCCGEEEIIIYRouXKSiNSlYdd0WCPqlthTlTqVp51K06dPZ/LkyfTv35+BAwcyZ84cCgoKXKOp3XLLLURERDB79mwAHnzwQUaMGMFrr73GJZdcwldffcWmTZt47733AAgKCiIoKKjCOtzc3AgLC6NTp06n98U1NZ/WUJQFecchtGudZmlIeZqMniaEEEIIIYQQQrRc2q7v0AG7tBgiTD4NXk6zB40mTZpEWloaTz31FMnJyfTu3Ztly5a5ml3Hx8ej15cFPIYOHcoXX3zBE088wcyZM+nQoQOLFy+me/fuzfUSTh+fMEjdXa9MI2d5mlu9ytMkaCSEEEIIIYQQQrRI1iJY+xYAn9lHM6OOPZGr0uxBI4CpU6cyderUKp9btWpVpceuvfZarr322jov/+jRow3csjOMsxl2XtUjy1XFWlqe5i7laUIIIYQQQgghxNlv88foClJJ1IJZZB/GzEYsqsl7GolTyKe1us07XudZLK7ytNoji2Y3KU8TQgghhBBCCCFaLGsxrJkDwNu2K7BhRN+ITCMJGrUkzkyjBpWn1SXTSCWeFUl5mhBCCCGEEEII0fJs/RTyTqD5tOZb+3D1WMNjRhI0alEakGnkLE8z6xxw6DewFFY7rbOnUVFpptGBlDy2xGc1cGOFEEIIIYQQQghx2tgs8PccdXfoNCy4AaCXoNE5ogGZRs7ytIuPzYbProb/jYWcxCqndY6eVmCx8e6fcVz05l9c9fZaHl64ndxia+O2XQghhBBCCCGEEKfOru8gNxG8w7D1usn1sE7K084RzkyjgjSw1y2IY3M46Ks7QI+0n9UDKTvh/Qvh+NZK0zozjRKzipi9dB92h8pS+nZzIuPfWM2aQ+mNfw1CCCGEEEIIIYRoeod+U7d9b0YzmlwPS6bRucIzCPRugFbnEdRsVjtPu32sful8KbTqAvnJ8OFFcGB5xcWXZhoBuBv1PH9ldxbeM4SoQE+O5xQz+cMNHErNb6pXI4QQQgghhBBCiKagaXD0L3U/ZjilOSAA6BrR1EiCRi2JXg8+Yep+XjUlaof/hFUvukrQ+mQsoaf+CBaDN1z6Bty+HNpdCLYi+GYyJG5yzRrubyYy0IPYVl58f+9QbhzUlgHRgSx98Hz6Rvljc2is2p96ql+lEEIIIYQQQggh6iPjEOSngMEEbQaiaWVRo0ZUp0nQqMXxKe1rVFXQKD8NvroRVs2G//aBJdO5OO09ALa2uxu8Q8DsBzd8Ax3GqsDRF5Mg8zAAJqOB3x8aycrpI+ge4edarJfJyOiuoQBsPiaNsYUQQgghhBBCiDOKM8uozQBwM1fMNJKg0TmkpmbYq18GSx64eYLdApv+h489h4OOCA5F31A2ncEI13wE4b2gMB0+uwYKMgBwM+irbJLVv20gAJuOZVWIWAohhBBCCCGEEKKZHXGWpp2vbsudtuulEfY5xNkMO+94xccz4mDTh+r+9V/B5CXQ9jyKdJ48Yb0No5t7xelN3irjyC8SMuPg53/XuNqebfxwM+hIyyshIbOoiV6MEEIIIYQQQgghGkXT4Ojf6n70MAAc5cvTGrFoCRq1NNVlGq18Fhw2aD8GYkeo6OKUX7i7zWL+0brgZqjiX+0TBle+q+4fXVPjas1uBlfJ2qZjmY19FUIIIYQQQgghhGgK6QegIBWMZojoD1RINJJMo3OKK9OoXNAocRPsWQzoYPTTFSa32tWuUmXQCCC8p7otTIfinBpX3b9tAKBK1IQQQgghhBBCCHEGOKmfEZyUaSQ9jc4hztHTcsuVp/32tLrtfQOEda8wudXuAMDNUM1eYvIBrxB1v7QhdnX6lfY12nxUgkZCCCGEEEIIIcQZwdXPaLjrIa1CI2zJNDp3+JbLNNI01cvo6F+gM8ComZUmtzpqyTQCCIxVtxlxNa66X2mm0YHUPHKKrPXfdiGEEEIIIYQQQjSdKvoZqYdVLEDfmIZGSNCo5fEp7WlkLVTlZPuWqN+jh4Ffm0qTW23OTKMa/tVB7dRt5pEaV93Kx0R0kCeaBlviJdtICCGEEEIIIYRoVmn7VLsZowdE9HM9XJo/0qgsI5CgUcvj7glm1ZCavBOwtzRo1OWyKid3lqcZqytPAwiMUbeZNWcagZSoCSGEEEIIIYQQZwxnllHkQDCaXA9rSKbRucvZDPv4VkjcoO53vqTKSW2l4UX3GsvTnJlGNfc0Augf7WyGLSOoCSGEEEIIIYQQzerIn+o2+vwKD7syjZBMo3OPb2mJ2sYP1G2bAWW9jk5isTkzjRrf0wjKRlDblpDtymISQgghhBBCCCHEaeZwlDXBjh1R4SlnT6NGVqdJ0KhFcmYaJW1Wt9WUpkEdRk+DsqBRYbrqk1SDdq288fNwo9jqYO+J3DpvshBCCCGEEEIIIZpQ8g4ozgZ3H2jdt8JTmqunUeNWIUGjlsiZaeTU+dJqJ61TeZrZF7xaqfu1lKjp9Tr6RvkDsD2x5gCTEEIIIYQQQgghThFXadp5YDBWeMoZNNKfDY2w582bR3R0NGazmUGDBrFhw4Yap1+4cCGdO3fGbDbTo0cPfvnlF9dzVquVRx99lB49euDl5UXr1q255ZZbOH78+Kl+GaePT1jZ/ZBuZaOfVcFal/I0qFdfo5hgbwASMwtrnVYIIYQQQgghhBCnwOHSoFHM8EpPOZzlaY1cRbMHjb7++mumT5/OrFmz2LJlC7169WLcuHGkpqZWOf3atWu5/vrruf3229m6dSsTJkxgwoQJ7Nq1C4DCwkK2bNnCk08+yZYtW/j+++/Zv38/l19++el8WaeWT7n+RTWUpgFY6lKeBuX6GtUeNGoT4AFAQpYEjYQQQgghhBBCiAazFsPyx2Hj/8Burft8NgvEr1P3Y0ZUero00ajlZxq9/vrr3HnnnUyZMoWuXbsyf/58PD09+fDDD6uc/s0332T8+PE88sgjdOnSheeee46+ffsyd+5cAPz8/FixYgUTJ06kU6dODB48mLlz57J582bi4+NP50s7dcqXp9USNKpTeRpAUGnQqA6ZRpGBngAkZhXVOq0QQgghhBBCCCGqse1zWDcXfp4O8wbBnh/KastqkrQJrIXgGQwhXSs97TgbGmFbLBY2b97M6NGjXY/p9XpGjx7NunXrqpxn3bp1FaYHGDduXLXTA+Tk5KDT6fD396/y+ZKSEnJzcyv8nNFadYaAaDWkXmi3aiezOzTspUGj2svTnEGj2kdQc2UaSXmaEEIIIYQQQgjRcAdXqFudXp2Pf3MLfHdH7fOVL03TVz7fL2uE3YIzjdLT07Hb7YSGhlZ4PDQ0lOTk5CrnSU5Ortf0xcXFPProo1x//fX4+vpWOc3s2bPx8/Nz/URGRjbg1ZxGbh7wwDa45ccaw4bOkdOgLuVpde9p5AwaZRVayS+x1Tq9EEIIIYQQQgghTmIrKWtmfesvMOJR0Bth17dlo6VXxzlfbOXSNACtNGqkb8mZRqea1Wpl4sSJaJrGO++8U+10M2bMICcnx/WTkJBwGreygXS6KqOJ5TlL0wDc6pppVJAGxTVnWvmY3fD3dAMgUfoaCSGEEEIIIYQQ9XdsjSox8wmHqMEwaiZ0v0Y9t25e9fOV5EPiRnW/iibYUNbTqEVnGgUHB2MwGEhJSanweEpKCmFhYVXOExYWVqfpnQGjY8eOsWLFimqzjABMJhO+vr4Vfs4GzpHToA5BI7MveLVS9+uRbZSYKX2NhBBCCCGEEEKIenOWprUfXVZFNHSqut29GLKrSWiJXwcOG/hFQUBMlZM4zoZMI3d3d/r168fKlStdjzkcDlauXMmQIUOqnGfIkCEVpgdYsWJFhemdAaODBw/y22+/ERQUdGpewBnOWZ6m14GhLntKPfoaRQaoZtgygpoQQgghhBBCCNEAzqBRhzFlj4X1UKOhaXb4Z37V8x1epW5jh1fbssbhyiFpwZlGANOnT+f999/n448/Zu/evdx7770UFBQwZcoUAG655RZmzJjhmv7BBx9k2bJlvPbaa+zbt4+nn36aTZs2MXWqisZZrVauueYaNm3axOeff47dbic5OZnk5GQsFkuzvMbmYi0tT6s1y8ipAX2NZAQ1IYQQQgghhBCinjKPQMZB1cModmTF54aUZhtt/rjq9jFHVqvbmJGVnyul0TSZRsbGzd54kyZNIi0tjaeeeork5GR69+7NsmXLXM2u4+Pj0Zfr3TN06FC++OILnnjiCWbOnEmHDh1YvHgx3bt3ByApKYkff/wRgN69e1dY1x9//MHIkSNPy+s6EzjL0+oeNCrNNMqoPWgUGViaaSQjqAkhhBBCCCGEEPVz6Dd1GzkYzH4Vn2s/GoI7Qfp+2PJJWckaQGEmJO9U96vpZwTlR09r3GY2e9AIYOrUqa5MoZOtWrWq0mPXXnst1157bZXTR0dHu7qEn+tsDmfQqI57SZCzPE0yjYQQQgghhBBCiFPm4K/qtnxpmpNeD0Pug58eUCVqg+8FvUE9d2Q1oEGrzuATWnneUs6wiL4lN8IWp5bFVt/yNGem0aGyPawa0tNICCGEEEIIIYRoAGsRHPlL3a8qaATQcxKY/SEnAY6tLXv8yJ/qNmZEjatwNsJuZKKRBI3OZs5G2HUOGgV3AqMZCtPL0t2qEVGaaZRXbCOnyNqo7RRCCCGEEEIIIc4ZR9eArQh8IyCka9XTuJmhy2Xq/u5FZY8fLg0axdYcNHKmgegk00hUp97lae6eqnYSYO+PNU7q6W4k2NsdkL5GQgghhBBCCCFEnTmDQB3G1tx0qNuV6nbvj2C3QU6iGu1cp4e259W4ClemUSNTjSRodBard3kaQJfL1e2emoNGABGlJWrS10gIIYQQQgghhKgDSwHsWazu95xY87Qxw8EjEArS4NiaslHTWvcBD/8aZ5WeRqJWzvI0Y32CRp3Gg95NdWlP21/jpJGuZtiSaSSEEEIIIYQQQtRq7xKw5ENANEQNqXlag1vFEjVnaVoNo6Y5OQcI00umkaiOszzNva7laaCG+ms3St2vJduojWQaCSGEEEIIcVolZBYy9/eDpOYVN/emCCEaYtvn6rbXDXWrHStfolbHJtjQdD2NjI2aW5zRGlSeBqpE7eCvsPcHGPFItZO1Kc00kp5GQgghhBBCnHolNjtTFmzkUGo+i7Ym8fXdQwj2NjX3ZglRd5lH4PfnIC8ZCjPBWqCCJyMeVcPMn+2yE8pKzHpdV7d5os8HzyAozFC/G0wQNbjW2RwO6WkkalFWnlbPvaTzJaAzqBHUMo9UO1lkoGQaCSHOMMU56udkCRthyydgkSC3EEKIlmv+qsMcSs0HIC6tgJv/t4HsQkszb5UQ9bDsMdj1nerPk7YXsuPhzxdh0d1gOwf25R1fAZoKBAW0rds8BmNZ72GAyIHg5lHrbK5Mo3pvZEUSNDqLlY2eVs9/s2cgRA9T92sYRc2VaZRV6KqXFEKIZqFpsO1LeKM7vBwLn0+EHd+og5L3L4T/jYYf74e5/WHHwrLOgEK0ZNYiWP8OHF4l+7QQ54BDqXnM++MQAA+P7Uiwt4m9J3KZ/OEG8oqtzbx1AgCHAzZ+AD89CFlHm3trzjxJW+DAMjXy1xVvwy0/wMWvgt4IO7+BL66F4tzm3spTx3m8CtDr+jpMrvHpuqP87+8jaN0mlD1Rh9I0KBs9TRphi2pZS8vT3OsbNALoWvsoahH+KmhUaLGTVdhCv6hyj4NV6sGFaNEK0uGbm2HxPVCSCw4bHFwO398J394GSZvA4A7eYZCbBN/fAf8bKwdzoulYi+DgCvj5Yfj6Zohff+rXqWkqELrsMfjkCnj/AvWdXXrBSIhzWlHWWXd853BozPx+Fxa7g5GdWnHfqPZ8fscgAjzd2J6YwyMLd5y5F3HtNkg/dPYHt/NS4PNr4OeHYPMCmDcY/nr93Mieqas/X1K3PSdBnxshdiQMvBNu+BrcvNRFkC8m1f+7LCcRlj4Gr3eD358HezOem2oarHgKPr0KMuIqPpe4ETLj1GvtekWti/pmUwJP/rCb55bs4bUDwepYFqD9hXXeFGh8eZr0NDqLWRpangbQ+TJ18Ju0Cdb8F4beX2lvM7sZCPExkZpXQkJmIYFe7k2x2XWXe0IdMGfGQWA7CGqn0vy6XFr7vA47rHwW1swBoxnaDoV2F0BQB3Azg5snBHcAj4BT/jJaJEshFKaDX2TjP4WEqAtNUycBhRlQlK32v+PbIHGDKj2z5KmrVCNnQKeL1TCmu75XJ/N9boIBt4PJB9bNUwdwiRtgwWUw5Rfwj2zmFydanLT9sOcHyDysDgiTd4KtXKn2vp9h1AwYNh30hlOzDRveg50LVTm5wR2Ob1HB05jhcMNC9V12rrBbYfdiaNMfAmOae2tEc9u9GL6/S30ndBynTsw6jAV3z+beskb5amMCG45m4ulu4D8TuqPT6egU5sOCKQO5+p21LNudzI/bj3NF74jm3tQymqYC6r8+DukHYODdcNFLZ+ex46HfYNE9alh0oxlCu0HSZlj5jMp8vvEb8I9q7q1sXuWzjIaf1De3/WiY8rM6NotfC5s/Usdutck9roJEO74GR2mgaPXLELcSrnpfnR+ebv/MhzVvqvvvjVTb0Wm8Ojb45f/U410vB5N3jYs5kJLHrB93u36fu+oYnS+cy6VRNojoW6dNcQaNGptppNPO2JB088nNzcXPz4+cnBx8fX2be3MabMGaIzz90x4u7RnO3BvqtmNV8Nsz8Pfr6v6AO2D8S6qespyr31nL5mNZvH1dDy5u76FO6oqz1UldSS5EDjo1J2Tph+CzK1UN7MlGPKpOHKt7cxRlwbe3qw+Tmrj7wNhnoe+t50ZTtrooyoIN76tyiKJMiOgPw/6tTtLlb6Q+mVN2qWE0j/6lgo7BHSC4k+oVZm65nyenTUGGGhWiIF0FhvJOQNoBSN+v9r/qtOoCV86H1r1rX0dOInx8uQo4B8TAlKXgG95kL0Gc5TLiYP4wsJ7UH8s3AjqMUWn1u79Xj0WfD1d/AD5hTbsNx9bBx5eqrLpxL6grtv/MV5/Nlnw1NO+1H5+6gNWZJD8VvpmsTjLMfipgFjWoubdKNJctn6iyIO2kLAWf1jDxE4gc0PBlF+eq5rXunur4pzHf6TmJ8OMDKuAb3AGCO6qMi2qOmbMKLIx8dRU5RVaeuKQLd5wfW+H5N387yBu/HcDPw41f/z2cUN9mDho77BC/Dv56DeJ+r/jcyJkw8tHm2a4Gyimy4uVuwFhd9ca2L+GHf6n9LqQbXPMhtOqkAhnLH1fHM5GD4NZfKp1LnVO+mKSCRr2uV8dsVVk/H5Y9CiY/uH8TeIdUv7z0QyrTNjdR/R59vgoQ//Wq6m/p5gUT5pWNPHY6HFsLH1+mvp/9o8rOVSP6qSAiqMdvXlxjQKvIYueKeX9zICWf8zsE0zcqgDdXHkSvg1ev7UWEvwdp+SXodTrGdwtDr6/6vHf1gTRu+XADncN8WDZteIXn6hPzkKBRFc6WoNH7qw/z/C97uapPBK9P6t2whax7G5bPBDT1Jhz7PLTqqJ4rzOT3Dx6lb8bP+OsKqp7fpzXc8xd4Bde8noQNsPoVlbJ3xdvQ+eLqp03aAp9fqz6AA2Nh3GzIT1ZvxC2fqGmGTYcLn6oYOCrMVB9Uq19RV4eNHnDFXAjtrr7QjqyG/BSwFatp85PVfNHnw+VvndtXL/OSVYbGpg/VCcnJgjvBle+oD8Rz1ZHV6gAwq5rm8cEd4a5V4O51WjfrtHPY4cQ2FXQNiAZjHTMQNU0dXC19VAWeq2PyBQ9/FZBr1RnaDFA/YT3qd4KckwQfXQTZx1SG4ZRfaj4wEQJURsuH49T3TVhPlcEQ1E7ti606q+8cTYPtX6psXWuBSiWf+EnDAxmapr6bMuLU91J+Kvw9R93vdpU6OXF+1x35Cz67CuwW6H87XPLa2XlF3ylxM3x9E+QdL3vMzRMmfaquWotzy5r/woon1f1+t0Kfm1Vvzp3fqtJkvRtc/DL0m1L394XDDtu/Utmrh1ep9xYAOpVJ0n40DLwL/OqR3aNp8OkEtbzyDO4w6G44/6FKme6PL9rJ5//E0znMhyX3D6sUvLDaHVw1bw3WE7u4KTyJG6+agK5N/7pvU1NJ2a2yIPf9rDJuQP3dB98DnsHw2yz12CWvqQvSZ7ij6QW8uHQfy3Yn4+fhxrAOwYzqFML47mF4m0qDPxs/UOVooEYAu/SNipmeWcfUhYaS3IYHzHIS1d9070/qwvxlb0KbFnbMnbQF3h+lsoymbqo+YOKwq3LrE9ugx7XqwktVknep91FBmjqOm/BOWVA4J1FlfR39S2UcXv+VuqhzquUlw7vD1Xd2j2vVOe3ymbDxffW83k1V7wx/pMbMR5vdwcxFO/lmUyKtfEz88sD5BHu78+h3O/hmU2Kl6e8YFsMTl3atcll/Hkhj8ocb6Bruyy8Pnl/hOQkaNdLZEjSa98chXlm+n4n92/DyNb0avqA9P6g0X1tpbXj7MdC6D2x4t/IoRSY/8PADs7964xSkqrKvG7+teEJXmAnpB1X2wM6FZcMOggrmTP6p4tUgS4EK+Oz6Hg7+qr60w3ur5Xq3Kptu/TuqvwNA75vUSWthOqTtg6N/q6gvgF8UXPc5hPes+jU77PDPu6qEzVakItUXvwK9b6j5QCM7Afb/ol5Pj2uhfMOylsZarFKJNy+ArZ+BvUQ9Htodzp8OUUPVh+CGD6AkB9y91YdyzPk1LvaslLQFFlyqThCNZrXPdxxX9jfc84PaD/vfDpe+3txbe+oUZMC3t5a9n3V6dTUldpQqEYvoV/n9Y7ep9+fKZ9R7GyCoPYR0VcFmr1bqYKBVJ/V4U5YXZB2Djy5WV6jaDIRbl4BRhi0WNVj1IqyarTJa7l0Lfm2qnzb9oApopO1TB4oXvQT9b6v5O6QkT/VDStunSuCcPyVVjAjYqgvc8Vvl9Pbdi2HhrYAGo56AEY9UnrelK8mHtW+pbGi7RQXlr/5AfWcf+q307/0i9L6xTqPLiBbOVgJL/08drwCcNw1GP132XivJg8X/Khvcpc9NcMkbtV/UKMlXffEOLi97LLCdOpbMPlb2mN6ojvmGPgChVZ+4VbB5gcqGMpph1OOQk6AC0c4sBI8AGPMs9L0FgF1JOVw29280Db66azCDY4MqLq84B1a/gmXnD7jnlcvA73YlXDjr9F30TNqiMiycFxfN/qplxPkPqYu8AH+8UNrTRgeX/9f1Gk8ZTVNBhaxjpW0n/Os0W36JjTkrDvDxuqNY7ZVPlduHePPt7b3w3/Ye/PG8enDQPepCtl5PWl4Jfx9K40BKPgdT8hmQ9xt3p89W5cS3LVMjX9VFcQ58fzccWFrxcYMJLpujzktagkO/waJ71XlhTVlGTse3qsCR5oCbF6njaqecJBUMcl5kDOsBNy2qeD4I6lxu8b3qgqSbpzq3PJWBVLtNZQDHr1PHsHf8VnaheOe3EPeHChiFdK5+EQ6NH7cn8dbKQxxOL0Cng89uH8R57VXyhdXuYNpX21h9II1gHxMBnm5sic8G4IUre3DDoMrlj3/sT2XKRxvp1tqXnx+QoFGTOluCRs5U1RsHRfH8lT0at7Dj2+DPl1VAhLJdxtGqK4/nXsnSnChG9GzPmzeUezOm7FFveFuR+lIc8X9w+E91leH41orL1xvVh0jeCfXB4hkEt69QPUjW/hc2fqhOyJ3aj4ZrF6jnT7bhffjl4apfR0g3lbY/8C7wCqp6mvIyD8MP98Oxv9Xv3a9RVxDKpyTbLOoDaeP7cGJ72eM6A1z3haphbSlK8mH5DHW1OvtYxfTuNgNh+MMq46z8SU9xjmr8euRP9SU28ZOW9ZobKyNONVUuTFcjGVz3eeX9Mu4PdTUE4IZvVEDpbHNiB3x1I+TEqwNhvbFyVlpwJ3Vl1m5RGRu5SSqo5rxya3CHkY+pA2+D2+nZ7ow4deWrOEddfb5sTtXTleSr7fQMPD3bJc48iZvUe12zw9X/gx7X1D5PSb4qWdjzg/q9z01w8WtV9xtK3QufXqm+B0+m04N/W/BtrTLi/CJh8L+qL6v85z1Y+gigg9t/rfsJypnOYYetn6oTz/wU9VinS9QJiNlXfR8vurusPNDkBz2uVt/5IV3qv76SfEhYD2G9Kp+QiDNDdgJ8c4vq6YUOxjwD5z1YeTpNU30sVz6rjm2iz1cZadX1rsw9AV9MhOQd6jtt2HR1IbBVJ/V8XrIaLnzjh2XHiOig13XqmLe61gzZCfD2ENWHb+zzMHRq2fYd+g1+fVINQQ5wy49oMcOZ+O46Nh7NqrrdhMOhGi+Xtlyw6U1stbWln/4gejQVQB3+sGrdcCqzDssfC0UNUZkUMcMrf5drmjpG31iaPTLgDhVoqWtWcg2yCix8vPYIxcd3EZ32Jz2K/qE9CZjspecPZj+47kuIPk/9brfBurkqMH/B466LAAUlNv79/s+En1jJ347utOnQm8cu6kyhxcaqfals3fAnY0tWcLXbWry00mWf/xBc8CR2DT5bf4xXlu8nv8RWYfv+6/42l+v/Vp/l9/xde3ljTqKqrEjdA+jU37XLpXB0Dez/WU0z8G71v63qnMbhUAHPjR+okn9Q+0BANHS+VB2LVnUedTJrsQq8FWaoNgHeISoIWJeAvK1EvefWzVW/t+qsgkC+rWufd+mjquwaXelFxBAVAM4pFxhtMxBuXFh9MNBuhS+vU+8tjwC4bXnZe7ip/fkK/PEflRF/16p691JKzS3mpv/9w4EUdewc4OnGjIu7MLF/zW1e/rvyIK+vOIBBr+PjKQMZ1qFidc8f+1KZsmAjPSL8+On+YRWek6BRI50tQaNXl+9n7h+HuHVoNE9f3q1pFpp5WB2Mpu1VPRR6TmJrYi7XzF+H3aHx5nW9uaJ3BDa7gx1JOYTEfUebPx8CdCoDpXxGkW8bFfUP76UanflHqQO0jy9VQSXvUHUi58xwCohWqfjdr1LZLjV9+e1eDLsXqQ9kz2DVT6LdhRDcvt4vWbPbyPz1ZQI2vIpes5OhDyIvdCARnQfgptepIJUrNV4HUYNVRDtuZdVZUw2laar0ySPg1DTothSqLyfXwQ/qgDtqkDoAa3te9X9zazF8O0UFFfVGFTjqfEnTb+OZxFqksgEWTlH/l7CecOvP1R8ELJsB699WX3r/Wld7yWZLsncJfHeHChAHxqpgaavOqowmZadqALnnx4qNgstz81Lvm3Ev1HgF5pQ5uELt+2hw2X+h3+Sy54pzVTPDdfPUZ1HkQNXDq+vlZVdOxdkv6yh8MkG917tfA9f8r+7zaprah1Y+o05WW/dVJ6vls5SSNsNnV6sDcp9w1fuiVafSn84qu6G+ja0X3Qvbv1BXPO9effoCsaeKpqmrxttLhyoOiFEBgi6XV/xucthVFtKm/5X1kjCY4ObvIXoYxVY7v+w8wVcbEjicXsDzV3ZnXLdyPacKMtRV/b1LVOm6vUQF6aYslab5oP4POxeqdgKaBmhlt6BOiAffe3qyNhM2qB4pRZnquOiqD6BDLWWJh36Db25VQZugDuqE8+RMnOSdarm5SeoY8vqvaj6OS9qsSkZLM5ksuPEVY9H1vI6rLr4IL3Ppe0/T1Ps8bqU62b1tWeWyarsNlkxTwVH/KJYM+46p3x7Ew83AyodG0Nr/pBP1v15Xny1GM0x4G1u7sVz5wXasx3fyRsC3dCncpKYbcAdc9ErT9Z/MPQ4ledhN/rywZDt3HLyPcC2VfbpY7jY8S55mxmZ34GN24/1b+tO1dbljI4dDtYpYNRvQ1N9i4ieN6i14IDmXzz98k8nFnxGrT67wnAMdOpMPupJcdXFqwjuqifD3d6n9GFRA6dI5FHe4lG/efoKrsxfgpSvNsI8aCj2vVdmj+5ZU6KeabgwncNyjFPS4ie0JObyyfB/bE1VmaKdQH/pHB9Ax1Iff96Wy5cAxlpoeo40unaKoEWRctgCzhxdBXu7oTj6+TtkNn12jzi+8w1QT7fBeZX+/P1+CP19Uv+sM6hyr86XqortOX9qD9L3SgFM1DO7qInyfm9UF4ZN7LRXnqoDP5o/KKjVcdOpzMaidOpcLaq+Oj8J7l30eJ25WAxalljZyHnAnjH2u7tmfxbnwyeWVEw10epVdFDtKBSdraSZNSb5aTtJmde55+6/1Kyeti6Qt8L8x6u905XvQa1K9F/HIwu0s3JyIn4cbdw2PZfLQ6LISyBpomsb0b7azaGsSPmYjNw1uS+cwHzqH+RIZ6MG6uAxu/3gTvdr48cNUCRo1qbMlaDT7l728u/owd54fw+OX1CFdthGcWU0+JiNjuoXy+75UsgtVB/sXje9xnXEVADZNz+f2C5nnuJrrRvXj/gva43ZyU7n8VPhgdFnqb0R/lX3QfnSNgSKr3YHNrmF201f+8G2AYqud77ck8b+/DxOXVkBf3QH+6z6XNrr0yhP7hKurvr2uV1cj7Vb48no4tEIdyNz6S91SlqtSlAU7FsKWj1WTZVAnAZGDVFpqU1xBthapSPzhVSpCPuEd1SfGO6TuV6bsVpX+vfObupVugPoCXvtfdcVu3Avqi+dM5nCopo7bPlOpzuUPkm9fAT6h1c9rLVYjKKTtVVfGr/v87Og1sn+pKsFx2NR79OoPqg5qFufC/qXYCjJJyLVxKL2YXEMAvlE9iIztRGwrX9yNTd9M3e7QOJyWz47EHPYl53Iso5BjGYWk5BVj1OtwM+jxcDMw3fwTl6Z/gGZwR3f+Q+qEpyRfHSwVZlS98M6XqjKIpggKizNX3B8qKF6UpQ6S7/mrQYH73N2/4v3TXeiLs9SJ6HkPqIsjDru6omrJUyWcN37bNBltBRkwb4Dafy+cpcqKWzJnc1SdQZ14DLiz5uwEhwOOrobVr8LRv3CYfPmwwzz+u8tEbnHZCZBOB7PGRDHZcw26fUtU9kj5LFuDu8oybI6m+Rlxat0NyZJqQpqm8c+RTMJ8zUQfW6hKq2rS6RKY+HGDApUpucV8uzmRbzcnkp5XQpdwX7pH+BHs486hlHz2JeeRmlfCqIB0ns18GA97HvmB3Tkx7j18wtrh7+mG2a2W/nYpu+Hziao02TMILn5VlXLpdHDwN1g4WWXKBndUQaWA6BoXty0hm+d/3kPJsU3MMH7JEEPZifoJgrFEDCbKLQdd2n5VnmMwwb1rqj/mKcmDt4dCTjzf6Mbzf0W38PDYjky94KTp49erEmvNXuGCx66kHK6Ytwa7Q2PpsEN02TQL0FS55uVvNa5BftJm+PsNFVSl4inkEUco11ieJgO/Co/HBnvx0/3D8Dr5BPjAcvjuTlV+GxCjjqMakNG3ZtMWbD89xAjdFgBsenfSWw3haKsR/Ge7DwetregY4s2XQf/D+0hpmZfRQ13IMvlBQJQKFAIZhlYE2VUvphKftpjyE9XftzyjB+kRF/BQXG9W27rQyseD1LwS19M+JiP/N74TNwxqi6G0ObHN7uC5JXvYvn4ln7s/j5euhN/sfbjX+m96RAUz8+Iu9I8OVN8HGz9QgxFZC1SG9k3fVj3q2v6lKusyeUf1fxx3H7T+t3HYqxcbjmSy7VgGvXQHuMiwkYCichk7PuEqezasp9rvs4+p4eudF8X1buq94uGvsvCqKpkGCO2h9sPMI/DPO+qz1DMILp9bc7/a6jgcKnstP0X96N1UwK8uGVLlFWSofoQZB9WFmClLmy5z3FKo+hhlHFSfI9d8VO/j+4MpeYybsxqHBt//ayh9o+p3jFFis3Pj+/+w6VjlAWM83AwUWe30ivTnh/vOq/CcBI0a6WwJGj370x4+XHOEf41sx/+NP7VX7212BxPfXeeqqwTw93TDoNORX5DPa27z0YA5tqtJcW/rStnsHuHLGxN70yH0pDd/1lGV0dRhtIokn/TmK7LY+fNAKst3p3AwNY/knBIyCkrQNHA36gnwdCPQy0SEv5nW/h5EBXrSJyqAHhF+tZ6Ynsgp4ssNCXy+/hgZBapsxt2oZ2B0ICNjvYjM2cSR3RsJLzlMILn8ZRrOgMvvZUzPkz7QLQWqtjtpM6BTwZ3Ol5YOkVj6eowm9aHlGaSCNeVfZ+YRdbV02+dl2VZ6Y8Vov86gmgn2n1Lja6pRUZbKEjn0m+pLdNP3DW/YWr5JbPT5cMuPoNejaRrH96zDpzgJX7NRfSnu/VFlnzgPOtw8YfyLqr79TAymWIvVVW5n2QOoev3WfdT/oC5pqMk74b1RakjQy9869bX8Tg6H+p/s/1mVkZl91T7nHaa+4BqQgQeofebL69VJTfer1ZCi1RyMbkvIZv6qOFYfTKPQYq/0vMmop390AEPbBTMwJpD2rbwJ8Co7IbTaHeQUWQn0dK92hIjy9ifn8dbvB/ljXyoFVayvMo35bnMYb9hY+amg9jD6GfW/3v+LutJYvoFp22EwbFrFwLa1WB3IeQapoKLBCHkp6n8Q97vKHDn/obN3RL3iHMgvl87uPOjW6dVV5bqUB9fGUqBS3zWH2gdPbFcn/MfWqYNMZ/ZDUHsY+x9VGlkfDodKqf9tVrkMoc/qdYWy2Gpn2a5kvtwQzz9HMmmjS+Vdtzfopj9WeeLo8+H6L2s9GM4utLBsVzLeZiMxwV60CfBkz/Fc/j6UxsajWbTx9+DmIW3pExWgGvguuhuHwcTWy5bStWtvPNxb4IhqR/9WIx5qdnWBYch9dZ41JyeX3A8uIzJvGymaP1dbnkbza8t1AyI5nlPMho3reM/tddrpy5UFhvWAzpepUhCzf1nT/OBOqmn+SZmimqZRYnNgMjbyopVrBM6f1E9plkB82BjmGO/gn3R3PN0N+Hq4EeTlzvWDohjVqQEN/HMS1YlfRN9aAwgWm4NZP+7iyw0JdNIn8JPpSdw1C9auV5Pj0YbcIhtuRj2t/T0xOKwqK9Neor5brvpAffYVpKsAh3M4bFAXlzyDVADVtzXbE3N4e9Uhftubit1R86lJa9L5zvQ04bpMNjs6cKNlJsWUZTZ5uhsI8FTfH0VWO0UWOz5mIx1DfegY6kPPNn6MbmPH+/ubytoKtB8N0cNg5XNqP6utfA1Iyi7i5WX7+GGbOrE26nWM6BDMHeGHiDn2Hf4nVmPGUnEmvZs6Ziif0VoF+6FVGD67AoCHvZ7nP9PurRgMyz0BH1yosqF6XKu+f8vtey/8spf3Vh+mtZ+Z38emYl5yX9nr6nI5eaEDOaSLJLfEQW6RlaxCC6m5JaTkFmOxO7h+YJTqnWQtVqV/8evg0Er1GevcRpMfhtLgQY45gsRLv0QLiMag12HU67DaNW5bsJHk3GKu6deGV6+tor9qRpwq38+OV1kqty6pc0BAK0hn8zez6XL0U7x0JVhxwzZ0Gh4jHnQtY0diNrd/vIm0vBL0OHjW4ytu0pYAEO/Th986PUuK5k/H/e8wIe9LDDqNXM2T9CGPEzv2X2rAga2fq76qwR1VFn27C8Ddk5+2H+f+L8uyYFr7mTmvfTAPj+tU7eh1n647yrqVi3jd+jxmnZWl9gE8a70FHRqXxBq4s+BdQnJUECg1aCCBU77G6F1LcCMjTh1TH1kNdisWq428Eht7PPrzjW4cG5MdJOcWn/zXo5M+kfv8NzDashJPW3bVyw6IUW05YkeCToemaeQWWclIO07h8X34Fh4jsCger9xD6OL+KOt/6tRzkio/bMB3/uZjmTzy7Q6sdgdX9Irgyr4R+Jrd+GFbEt9uTiQxq4ih7YK4qEcYF3YJxddcS5A6Ox7+Nw7yjpMd1Jelfd8h0N+fVj4m2gZ6EuRuUxez806o73xXiKQ0m9JoUp8RJw9o8/PDqkWJT7i6YN6AYNRdn2zi1z0pjOsWyrs3N6zvUkGJjcXbkth9PJf9yXkcSM4jr1yJ5CU9wpl3Y8XyVgkaNdLZEjR6cvEuPl1/jAcv7MC/x3Q85etLzCrk2Z/2EBnoydiuofRrG4DRoCerwMLh9Hz0Oh0xwV74e7rz4/bjPLl4FzlFVtwMOq7pF8m/RrYjMrDmJrcJmYW8snw/K/akUGSty4lgRR5uBnq28cPT3YBDU+GKYC93IgI8CPEx8eeBNH7fl4rzeCXC34Mp50UzaUAkPuU+jGx2Bz/vPMGrv+4nIVOV3IzpGsrYrqGE+ZkJ8jKRnFtEfEICQ7f9Hx0LNte6bVa9iUKfGNxCO+Fh0NQVT+fVztDu0HeySo+121SPhR3flDV1HPqAOqmtT9qxrUSlrq5+VTWSc/NUV7ijz6t11hplxKHNPx+dtYCNHaez0HIeFxx7g/Ha31VP3+lidUXPWbrY+VJ1QhA56MwZLrowU/XriV+rAncXvax6Y3m1qjLAZXeoL1V/T7dKJxAFv7+K1+rnVEnWvX+7SpyyCixY7Q5Cyh9oWArUiIC7F6n/V1W8Q9WIhsGd1AFNq44UGXzR2YsxJ5TWve9fWtb/oxKdKrUa9m8VFEGdAO0+nstP24+zfHcynu5GJg9tyxW9I8oOWg8sV30kbMXqb3HNR1VeVd59PIc3Vhzgt72prscCvdw5r30w3iYjB1Py2J+SR165K/9OAZ5uhPqaySywkJavgsIebgY6hHrTIcQHvQ5yi63kFtnw83CjXYgX0UFerDqQxi87T7i+7z3cDHSP8KVbaz9igr1oG+RJa38PHJqGxeYgu9DKhiOZbDyYwJDkL2hNWTbhXrcu7Au9lPZhAfRs48fITiG08jGpHghr/qv6mTlPhEK7q34WiZtUQM3Z08ngrur3y2engRpd8uLSfamxNE2VahzfqvpEpR9QGYSeQerHJ1TtH8Gd1P5S36t0deXsi7blU06+Cu1i8oOxz0KfW2r+zLJb1eAGh35TmRZRg9VrOLxKXeWOX1f9Ok6i6Y38EXQDX3pM4uqB7RjbtfrhaQE1jO9PD5aV6/a+SZ3o1bFELK/Yyod/H+WjtUdcWbdOZkq4w/ALHfRJBJBHtGcxvu2H4D/hlVqXvz0hm399voWk7GpKPcvp1caPMF8TUw7/m8HsZI29G3c5/o+ukSEMa9+Kq/tF0Cagno3lbRYVDE3eWTaYRWmvDA3QDCb0we3V/ymki8pYre4AujhXvX8y4lRwsTCj4uecyVsF/AJjVAPygrQqT45rsnJvCv/37Q6sBVl87f4sXfQJFHlGYBp8B/qul6Ol78e68E7c7QWc0AL50nA5Qf2u5PKRQysErck6Ch9eVFYq0m8y9L2Fo9YAvt+axKKtiSRkFqHTgaebgVY+Jm4c1JYbBkVVzq6oyontqlHq3p8qjMBpx4CmaRh1DnI1T160Xc+X9lFolL1vRnZqxROXdKF9SB3e02n7VTnTzoUqgOAXqS5e9LkJfFtjd2jEZxbSyseEt8lIdqGFez7bzPrDmXjoSvjB7Qk66pP4096TW63/V2E7AjzdGNctjOsD9tPz73vROaxq4BRbceXsrZMkukXzRMFEVjl6AToGRAcwaUAUXcN92XMil11JOWQVWmgf7EVv7wwGrLsPc04cqeZoZvi/QnyhiaxCFfioLeDkZDLqGd8pgLvdfqTzgffQlw9o9bpBjU5VTSab1e7gf38fYc5vByi2OtDp4Oq+bXh4bCfC/MrewyVFefz+y0IOblvDUXsw6R4x3HvNRQzp0rbW7Xv91/2Erp7BjcaV2DyCMXYap8pVdQb1nR6/Vv1NA9vB3X9W+kwvtNgY+8ZqErOKGBgTyI2+27n0wOMYtLLv2hLNSBY+ZGq+HNcC+dPRixX2fqQQwGD9Xh4OXk/fgr/RlQ8ElDb9tg95gImLstl2LJ1xsSbm3X4huiqGkl9/OIMb3l+PQ8PVwqKS9ENoH45FV5hBdvgwVvT+L0eyrBzLKCTE18S9I9qVHRsVZkL6AWy7f8C+4UNMmgqGHPHqRZub38UtrHJW3vHsIv7v2x2sP5yBzaFxiX49XroivrWPwFFuH+6jO8gY0256T5jG0N51u8iwNT6LvGIb3Vr7EuRdj5LMQytVhr/dUumpPM2Dl2zX8bn9QmJb+fB/4zsztmtojQHpXUk5fLkhnnWHMzicVnlEa093A2O6hnJZz9bkFFlZuDmB9YczAXDHyhj9ZkYad3FBqxyCio6q7JnB98KI/0Mzmtl0LIvvNifyy84TFTI1nfQ6aOdtYaL7OsZbV2DWWVnZdjopIeej10FGgYW0vBIcmsbNg9sytH31LRqsdgdv/naQt1cd4uS3s15Hpcec/DzcCPRyJ8jLnVGdQ7i2X5sKx9SZBRZ+/eN3Lt50G74UkOBoRRaqtC1Ql1d1JUmllUSpAW06jFEXApf+nxpZESo37K6jLfFZXPX2WvQ6+PXfw+v2WV5HOUVWjmcXkZ5fQu9I/wrnsiBBo0Y7W4JGj323g682JlSd0noGSMktZsb3O/l9nzqZNOh1TOgdwaMXdSLEp+KBs9Xu4MO/j/BG6Rc0qIDORd3DGBwbRJifmTA/MyajnpwiK9mFVtLyS0jKKuJ4dhGHUvPZeDSTrJMO3qszKCaQmwa35aLuYZWGNS2vyGLnrd8P8t7qw9hqOFAJJZMxhs2M1W+itS4DXemJjllnIYB8PHVVBwRWa734wHE5Pp1Gcc/I9vRoUy7lV9NUTbhz1IbYUaorf+yomk/ECjJUT4gN75bVZYd0hUvnNDjDyOHQ2Jecxz9HMlh/OIOIwwt5SptPiWakEDMBunzsmo7NWkfXF3SCFsqetjcx7LzhjOgQhPGfeap2ujSTymIKYpN5CN8ZxrO+MIK0/BIi/D3oEu5DlzBfLuwSqmrk7TZAq3savKapLAjnSUpRtjo5cZ5cG0oPEjU7xP8D+36Cfb+ongkmX3XlMXYkhRYbW45ls+t4Dsk5xeont5iU3GJS80qwOzTC/cxc2CWEkR1DOJpRwNJdyWw9lsEX7s8zWL+XYx7d+V/HufxzVAVOACb1CuKRAW4EJ61UJ8xFmfX+f6RpfnhQgreu7MqSw90bfYexqjGlrVi99hPby0YsAxJ9+7DCPI7P83pzKKvyAX6wtzt3d7FyRfq7hJxYpR7sOB4mflrp4Hp/ch5vrDjAst2qt4BeB1f2acOU86LpGu5b4YRd0zTi0vJZG5fB2kMZ7EjM5njOyVfF6u+SHuHcNTyWbq19a3wfl5dVYGHlvlSW705m9YE0SmyV/w492/gxtF0wMcGetDfl0uXYp3ju+LRis35QV9At+WWZggAR/SiMHIH7vkUYs0tPDjuMpeS8R0jw7IKmqVFZ6pytUJAO275Q5asZh+o0i6Y3UtTlGg51uINDjnBScktIzSsmu9BKZKAnPSL8XFmZecVW8opttPIxqSundpsKklXVkyBpi8pazIxTv5t8VcDAI0BdXYfSUWxKX3fb86DrBGxpB0mO20GOVccBv/OICxxBG30ml8W/iFfW3rr9HdDhCIxlp7E7nxyP4Ig9hHB/T/q08aZv0hf0LVTBn+NaINsd7cj2iqVzl+54e3qiR31k+nu64+fhhj7rCNrfc9DZS7DqzaxtNx1bn8l0DPMlwt+jxmBTXrGVT9cf473Vh13BotZ+ZiYNiGLigDYEe5vIKbKSmFXE+38d5ucdZdkt53cIZsp50YzsGFJpHZqm8dk/8Tz30x4sdgcR/h6E+po4nF5AdqGVYG8Tw9oHMSg2iE1Hs/hp+3EsdrXvttUls9z0KGasxDnCmWm9g3+0Luh0GtfG2Lg6xoZ7m57ovEMwu+mJCTRjOrFJZcRZSvdpzaFKehI3Vd+brDqtOqsGrqHd1H2fsNLvoA+qL3GoQklQV0x3r6zTCIpFFjv/+XkPn/+jvuPah3jz+Pn+jFxzE7py/UicsoL7c2Puv9iTq449PNwM3D4shn+Naoene+mJcPoh+OQKVdIE2NGz2dGB/Y5IDmoR7HdEsV2LrZDxEuDpxu3DYriybxsiTu5FAyoLb8Us9f51MpqxxozivbTuvJvcgQhdBm96fURH2wEAckP6s733s6zKDOCT0pGdDHod/aIC6BbhS48IPyIDPQn2NhHs7Y5Dg7zDG/Ha8Cb+x5a7jj9KdGbXCTfACVMsqy0dWVsSSyFm/Dzc0DQVmDcZ9TwRvY+w+CXkGYMYXzKbJKs3/p5uRPh7kJxT7MrMBhir38jb7m9ipOzz0xLUGTfvIHToQHPgKM6mICsFD0sWRp2abr9Xf/wHTCLUrfQ7ylpuX8tPUdlK+aX9aqroTaJpGrnFNrILLWQWWNDrdHi4GzAbDWQUlHAgJY/9yfmsOpBa4cQ6Vnec54wfMVi/hzm2q/nSPIlQPw+KrHZyi9RnYEywFwNjAukS7svHa4+yL1l9Zw+MDuSpy7rSPaJiSVZ5+5PzmPrFFg6m5qPTQa82/vSNCqBPlD992wbQ2s9c4TN/1X7VuNZLK2R90DN4FyRUveDw3qoJfDXli86htp066BIZp9/IQP0++uoPVjhGKC/f4Ie3vey9makLYLexK3vdupIQegFRMZ1Jyy/hvdWH8TYZWf7v4VXv36VeX3GA/648iLfJyMyLuzC8YzBtAjyJS8tnyfYT/LonGY/UbXxieA5PXQm7HW3Z5mjPIa01DvR0MR7nfP8Mwq0J6AvTKix7lyOajL73M+Ly22q9cFpstbMvOY8didmcyCmm2Gqn2GrH3aCnW2s/ukf40SHUu3LbjFPlwHJ1caIgHYdOj80Bu70GsqzNNIo9wvhpxwkyS99XscFexLbyJjLQg8gATyIDPYkM9CC70Mo7q+L480DZ30Wngy5hvvSK9KNzmC+dwnzo1ca/UpZpQmYhfx9KZ9PRLDYczXBdBH90fGfuGR5NgVXjm40JfLLuKEczCivM62My0srHRJHV7jrmrY+Le4Qx46Iu5JfYWHMonc3HssgpslJic5CcU+y6MHJVnwhGdGrFD9uO8+eBNOwOjd6R/lzdrw3dW/vy+75Ulu5K5lBqfqV1GPQ6RnZshU6nY39Kruv19dPt53PT7MpZgECG5kOS1gpvTzOhvma83I2lFyl06uKB8/Onwzh1Ab84RwVyL3xKZZzXk6ZpXPfeev45ktn40c4boMUFjebNm8crr7xCcnIyvXr14q233mLgwOr7tCxcuJAnn3ySo0eP0qFDB1566SUuvrisTlLTNGbNmsX7779PdnY25513Hu+88w4dOtQtcHK2BI0e+mY7321JZMZFnbl7RP06uJ9OG45k8tbvB/nroIrwBni68fyVPbi4RzhWu4Nlu5KZ98ch1xf04NhAZlzUhZ5t/OqVBu5waBxKy2dnYg52TUNfmmaZll9CYlYRyTnFxAZ7cd3AKNqH1NJU7ST7knP5eO0xErMKSc0tIS2/hFbeJldGhE4HqXnFpOaWUGixo5UetBn1qpQu2KzhUZxCftJe3LMO4UseP9sHs0eLrrCeYe2DmdAngr5R/sQEe6nXv+MbtB/uQ1d6tcLhF4kldixWjFjsDux2zXUt3i0/Cf+E31xX1IrMIeztfD/xURPo3iag1uh2QYmNP/arD+idiTnYHRqappFXbKuQAgkaH5jeYLRONWAsDOyK24S5JHt3YemuE/y0/QQ7k8oOSIK83BnWIZjLQtLpfPQT/BNW4q2VHdCttPdhvu0yUgggkDxa6bLppY9jtNdhOlr3o3dYyqW6B5WlvBvcVNClMFOdXBdmqN8rNfSrXZZ7OB9FvcBRQwwJWYXsTMypMVBYkwjSWGp6DF9dEd/Zzydb86ad7jjtdMeJ0KWj15Ut1+obBUPuwy2octPlQouFPzdsJfXITmJJor3+OOG6siDTCS2Q3+x9WeHox1ZDd24Y2oF7hrdzXUHPLbaybOXv+Gyexxj7X64D91zNg8NE4OvhToCnGxabg7S8Emx2Gz10RzDoNKyagUXG8ZSMnMXEIe0xGdUV8e2JOfzv7yMs2XEcTVPftZf1bM2DozvQrlXd31dFFjtH0gtIySumlbcKWPh7uhGfWciB5DwOpeaj1+vw83DDx2wkPd/C4bR8DqcVEOZn5u4RsXQOa9znd7HVTlxaPgdS8th3Io91hzPYkVj1SW7/EHjA90962nZSEtqH7KixFAb3REPDLf84uuyjrM3y58cjsCspFxMWHnRbzF2GnzCisib/tndjrv1Kjnj3YWTHEAbFBuJu1Lsyppx7hbkolXaZfxJ+fAUex9ehK92f7UZPCtsMwxTeFffQztjdvUlJOUFqchKWjHh88g/TqiSeYE3tIw5Nx1LHAJbbB7DR0ZkTVE4f1+Ogv24/Q/R7ON90kB7aQUxaETaDB1ZTAA53H3Q6HTqdDnP2QXQOG3bvcCyXv4O5w8jKn9EOO/zzLvz+n8pBNuckmppHr9PI0rxZ5jeRUeE2wrJLs6giB5IXM44llr4UmEIJ8DThAP77+0HXQaFOVy6zHBhv2MCL5k/xt1fTn6oKq+09mGm7g0StrMdGkJc7IzuFcGGXEPpGBagTUjc9B1Py+fyfeH7YluQqv2zXyotpoztycY9wV1+Lk+09kcubvx1k+Z5k1/a28jHRJdyXTqHe+Jjd2JWUw47EHFd5wdiuobw6sZcrFT+v2Iq3yVjhb52RX8KirUmU2BwMbRdEz6IN6H96AF3pAe9uUy+CiuMJ05X1P4hzhLNfi2Sgfj/BuuqDOXZzAOm+3ThoD2d9XjC7871d+6Y3xbTTHae9Polu+gRidUk1/o3TTFH85ujP4UIzWfhQpJULuOjyiNWdoJ3uOG7YmGG/m1GDBzBtdAf8PcuC1JqmUWS1k55nYWtCFusPZ/Ln/lRX4PmOYTE8Mr4TJqNBBWl2L1KZakdWqwDowLth3PNYMfDzjhO8t/owe07kAhDma2bGxZ0J9/Nga3wWW4+k4BG3lGv5jaHl+ta4tkXvRkmrnhzx6sknSeH8khNNTumV7E6hPozqHMIFnUPoG+GJcd9PsHym6nEDqqF396tJDz+fWz/fw66kXLzcDbx/S3+GxgaozOCVz6n3jcEdzn+II13u4vllh/ltb9WZpP10+3nAuIgRhrKeJ8vsA5hnu4IDWhvG6zdwg/F3Bun31fh/KqODWxZTHHk+NofmatJqszvYcCSTJTtPsD4ugyMZBVyo28wkwyrWO7qw3DGARK0VncN8uKZfG/q1DeCpH3azMykHX/J5K2Ilw7MWoXNUPomrRO+msg4veV1lTTaAM5v2h21JbE/IIbfYSn6JjaKCPDIsdcgMQx2rzry4C9f0a1OnY9Eii51nl+zmyw2VA0AhPib6RPlTYnOw70Se671+w6AoXrgoSvVVS9uvMvtK8lQ2Q6eLIaD2jKWt8eo9kZhVSEJWEd4mA+d3aMXw9gFE6LPKLqCl7FYXyBL+ATTsbj4sNwzn7Zwh7NJicLVVOMmLV/XguoFV9Nspx2Z3cMP7/7DhaNnxSbC3ifT8ihdMLzBs512313Cj5mO0RC2YfY5IFhkv4sYbb2No+7NzZMO8YivvrT7MB38dqbW6wqDXcVnPcC7p2ZqB0YH4edavn5jdofGfn/fw0ZqjgDrf2JGY7coq8nQ3cHGPcK7u24bekRUDUHaHRkZ+Ccm5xZzIURdPMwssFFrs5JfYsNs1grzdaeVjIi4tny/+ia82W8jJ39ON5yf04JKeZT3kMgssFFntVQYoMwssZBaUkFlg5XBaPt9uTqyyv0+31r5MOS+GS2PAnF72GZ5udWN5ih+L9hdXmG9QTCBju4UxODaQLoF69H++qAa1cWZOhvfGftl/sYV0x92gypOd5coFJTZMboZqm1kfzy7ivdWHWbD2KO5GPaseHlm50f0p1qKCRl9//TW33HIL8+fPZ9CgQcyZM4eFCxeyf/9+QkIq12mvXbuW4cOHM3v2bC699FK++OILXnrpJbZs2UL37t0BeOmll5g9ezYff/wxMTExPPnkk+zcuZM9e/ZgNteeWn62BI0e+HIrP24/zlOXduW2YTG1z9DMtsZn8fiiXa6DtZGdWrH3RC4puepLxd/Tjcfr8QXdUhVb7RzLKMRo0OFu0JNVaOGjNUf5cfvxCpH8QC93fMxGcoustCo+xg36X7nS8Dd+usIalq7scMTwtX0U39uHUUTZe6J9iDcXdw+jY5gPWmn5XnpeCXFp+RxKzWdbQnaVmRcAXu4G+kcHMig2kEExQfQMtOH221Pq6vKguytlAh1IyWPhpgS+35JU4QolgBEbI9z3Mz14A10zV6Kj6nU2RrHOA4spEL2HP5olH2NxJh72vArTnNAC+dXej2WOgWx0dMJGxQ/+1n5m+kUH0ibAgzBfdVUizM9MmK8ZH7ORDUcy+W1vCmsOpRPia+aSHuGM6xaG2U1P0p8L6PbPI1VuW7bmxT4tii9sF/CzYzB2DIT6mmgT4EmYn5ncIivJOcUkZhW5DiRGdwnlzvNjCPewEVR8FKtmYL/WloNphXy3JZGtpf3G3Aw6PN2NGPQ6Ckpsrv9nV+98HgjcwNDcpfgW13yit8ZtCLMKruWQQ404FOHvwYQ+rVm5N9UV3AW4qHsY00Z3pFPYKSqFagapucWs2p/GjqRs4jOLSMgs5FhGQa0HQOXpdOrw26GpK9z3Gn5kgmENbjr1v/zRPoRnrbeQXq6ZaFtdMuP0Gxlv2EhffcWMou2OWL60X8BP9iEUoA44grzcKbLaq+wf1Ud3kH+5/cgYfcWy2VxTOPHusey2hLE935+eujjGGjYTqMurtIzq/GIfyAzrHeTgjbtRT7CXO8E+JjqH+dCvbQC9Iv1Jyipi955ddNw7F2NJFoe0CLLMbRnRBmLT/yAsXx3ILdUN5/GiG8hEfQ9f2jOce0a044dtSXy6/pgr67S81n5mnr68G4Nig1gXl8GaQ+kYDTpuGtyWdr5AwnqKju/hwK4tWDKPgaaC6pqmGklqGtgwsMQ+mC3+Y+kfHYTV7uBASh6H0wpc2Ts1aR/izX2j2nF5r4hqg0UnS8gs5JN1R/lqY0KVpZqgeus9PLYjd54f27DvwKJs+O1p1dy9lF1nJEXXijBHshqeu1SO5snvjj4c14LQAf4eRpL1ofyaF8t+R3iFsiR3g54LOocwoU8E7Vp5sWir6jeRmldCILkM0O+nj/4gHXRJtNMdp40ujR1aLPNtl7HC0Q8NPUa9jr5tA+gbFUBrf/UZ6m0ykl9iI6/YxrLdyazYowIjnu4GVxaMzaGRW2zFUsX3UoiPidcm9uL8DtWcUBZlq4E3Tgo8aJrG8t0p/OfnPSRmVZ1V1TnMhymdHVzkn4Bvfpw6oT++rdxIqmUSjFFsK2lNnNaaFC2AQfq9XGDYhi/quzrPO5ZjQ19gh6Ebfx1M4+9D6eQV2wjycmfBlIEVM4yz4+Hnh8oyRIM7wWVvcsijB9sTVObr7uO5FGancnvh/7hS9yegBiH5zTicZf7XoQ/tQlSgJ2Y3A0fSCjicng/5qVzTKpHz3Q8QVnQAh81Gic2O1e7A22TE6NyPe06CgXdW/fcsp6DExr7kXPYcz2V36c++5Fys9ooflP6ebrx6TS9Gdw1VfRz/ek2NyuUVrC7+uJXLKjN5q35oEX3rPvpSPal+LTYSs9UFQI/Sfc3DzcDeE7lsOJrJjsQcOof58NDYTgR6VV2+VpOEzEK2xGex5VgWW+Kz2Xsit8oLUKM6teKdm/rV3tS7qeWnqpFhI/rhMHqyLTGbwhI7Dk3DanewPyVP7WtJuQyKDeS1a3vV6fMot9jKgjVHWX0gja0J2dgdGka9jmEdgrm0NNDR2t+MMTdeZXmn74f0A2iag4OO1nx12IONBSEkGNoQFBBApzAfHh3fmbZBXrWuu6XLLLCwPTGbxEwV+EvILCQhq5CEzCKKrXau6tuGe0e0IyqonuXGVfhozRGeXbLHdREjNtiL28+PYULviLqV2tbB3hO5PP3jbv45komnu4EB0YEMaRdEuJ8Zs5sBk1FP70j/ChcHGuJgSh7LdiXjYzbSqTTjqi7v2b0ncnl/9WF+3H68wnvTz8ONAdGBXBKczMi0zzjq2YN3i0azOi7LdazlbtBj1zTX+ZpBr6Nf2wAu6BxCn0h/souspOaqwNTPO064lv/ABe2ZPrZTo15vQ7SooNGgQYMYMGAAc+fOBcDhcBAZGcn999/PY489Vmn6SZMmUVBQwJIlS1yPDR48mN69ezN//nw0TaN169Y89NBDPPzwwwDk5OQQGhrKggULuO6662rdJucf8Nt1+/HybrknO++tPsyW+Gyeu6IbNw+Jbu7NqROLzcF/V1asYw32NnHjoCgmD41u0Bf02SIxq5AvN8Sz4Ugm2xNzqjxINmFhnH4jXfXxGPTqw8to0KvrQzqw4M4m81BOeHbEw83gagtRYnWwNSGr0gFdVdoGeXJR93BGdGyFp7sBvU6Hu1FPu1ZedS4BKs9qd7D5WBZ/H0znr4NppOWVcHW/Ntx2XozKiMmIgzVzVA8nnaE0iyiQfL+O/FYYy7tHQki2ehKoyyOQPAJ1eQSU3jfprGRqPuoHH7I0HzI0X7LxpoTK+5IBO4by6fQYCffzoE+UP+1aeWN2M2B2MxDo5Ub/toG19uCqkaapIVOTNqthf1t1hOCOaEEd+OmQhcXbjpOQWVghMFSV2GAvnrqsKyNraIaqaRq/70vl1V8PsLc0KOvUPsSbu86P5Yo+rdWVeIdDDUFb3Yhh/pEQ1oNiq51vNyfy35UHK4waYjLquah7GHcOj6Vb6+rT9c8mWQUWVh9MY+XeVLYmZOEo3YV0OmeASIdeB53DfLmwSwijOofg7+FGSl4Jx7OL0OsgxphJwNa3YfNH6DQHRQYffva4nFD7cTpbdtPKnlphnXsMnVhu788PJX05qoXjazbi5+lGXrGtQg8db5ORnm1UuVnbIC+iAj2JCvQk3N+MW/peVdp2bI1qkH7yCDFOZn+sMSM56tWbtdaO7C30xVGQib44A0rysdgdWGx20myebLW1xWKr+2GFj8nIv0a1Z8p50WUnRzlJYC9BC4jhSHoBH645UuWVyV5t/IgK8iK70EJusY3BsYE8cEGHBh/Y2uwOjmcXczyniOggrwq9SUB9Vm08msnve1P5fV8qR8sFC90MOi7qHs6Ng6IYGBPY4AsbRRY7e07kcCAln/3JeeQWW+ka7kvPNv50a+3bNAftiZtU/6uwHmq0NndPlYET/w9aym6Svbuw1t6FTQl5bDqaxcGT0v5NRj2dwnwYHBvEkNgg+kcHVOqTYLM7OJiaT5HVjsXmIL/Yxt4TuexIymH/iWy8zSZigr2IDlblkEPbB9faxHTNoXSe/WmPq5T3ZO4GtV2DYgIZFBvE0HZBjfp7FVvtvL/6MO//dRizm4G+UQH0bevPsPatKg4f7qRpqln2sXWq31b8OpUZV400zY9PbWOYb78MCxVfe3SQJx/eOoDYqrIzNU0NyLD0UVXuCdBvSlkvjfwU1QOqMB0NHfbeN2Ec8XCtI4CdatmFFn7acYJvNyeyPSGbgdGBzLmu92m/sn6mKbLY2ZmUw47EbMxuBpVlGOZTp6G2W6rcYit7jufSKdSnYu+wGtjsDjILLAR7m+o0GMa5QtO0Jr+Q/sf+VBZvTeLSnq25sHPlcummoGkaybnFBHmZTsnIuU0hKbuIH7cd558jGWw8klnHQVXqZ0hsEHcNj2Vkp1bNkhDRYoJGFosFT09Pvv32WyZMmOB6fPLkyWRnZ/PDDz9UmicqKorp06czbdo012OzZs1i8eLFbN++ncOHD9OuXTu2bt1K7969XdOMGDGC3r178+abb1ZaZklJCSUlZSc9OTk5REVFEXHvAvSmxkdtm9uzV3Tjqr61DH1+htmWkMXCTYkMiQ1ibLewM/YDpblYbA4OpORisTnw9XDD1+yGyWjAYFAjVhj1unoHcHKLrazen8bv+1JcvZ906PDxMBAb7E1sKy86h/nWr99KU3LWOp0kp8jKhsMZrIlLZ21cBja7g35tAxkQE0iXMJ8qr/QXWuzsSsphW0I2+5LzCPU107m0V1Kwjwl3vR43o47Wfh6E+tWenXgqaZpGVqGV49mFHM9WfZN8TEbC/DwI8TURHeRV52wGTdNIyirC4nDgcKgDjZggr0YdEBRb7Xy54Rhb4rMZEhvEJT1a1zstWpRzYrs6GUzZVfFxnRHaDoYO46HjONew38VWO24GfYV9ILfYSmJmIe5GPTHB3nXbP0ryVaZExkHVGynrqDrJ7DhelYLUY+hsh0OVC2UVWMgstJCSW8Lu0vfb7hM5tPI2MTAmiEExgQxtF1yn/WXviRxe/GU/m+Oz6Nbal/suaM/57YObNetU0zSsdo1imx03vb5ljkpWBzmFVnYdz8Go1xEV6Emor7nZTtrsDo2DKXnYHRo6Heh1OnzMRvw91ahip2J/cB4mN2jZBRlwYqvqh5RxCHIScIR055D/efyWE0FCdomrp1iglztD2wUztH0Q3Vr71f6+LcyEVbNVf6iqBHVUjfbbNGwknlMpM7+EAC/3szprXAhx9rDZHew5kcumo5lsPJrJrqRcIvw9GNkpRPXoCvTEanNgtTvQ63R4mox4uBk4kV3EX4fSWLU/jcNpBQR5mwj1dSfMz4PLeobTPcK/WV9Xbm4ukZGRZGdn4+dX84XeZg1jp6enY7fbCQ0NrfB4aGgo+/ZVXV+dnJxc5fTJycmu552PVTfNyWbPns0zzzxT6fGkd26t0+s4002ZA40YkL1ZzW3uDRAt0mbgvebeiHPQJ829AWe9paU/p1PTfwofANbUOlX1EoBlTbQtQpx+y4BXq3ym8qXSxtgMj1zYpEsUQgih7KBxR2QvNtWGNIG8vLwzO2h0ppgxYwbTp093/Z6dnU3btm2Jj4+v9Q8oRFNwRnoTEhJadB8t0bLIfidON9nnxOkm+5w43WSfE81B9jtRX5qmkZeXR+vWrWudtlmDRsHBwRgMBlJSKo76kJKSQlhYWJXzhIWF1Ti98zYlJYXw8PAK05QvVyvPZDJhMpkqPe7n5ydvOnFa+fr6yj4nTjvZ78TpJvucON1knxOnm+xzojnIfifqo64JMs3aKMbd3Z1+/fqxcuVK12MOh4OVK1cyZMiQKucZMmRIhekBVqxY4Zo+JiaGsLCwCtPk5ubyzz//VLtMIYQQQgghhBBCCFFRs5enTZ8+ncmTJ9O/f38GDhzInDlzKCgoYMoU1YXnlltuISIigtmzZwPw4IMPMmLECF577TUuueQSvvrqKzZt2sR776kOJjqdjmnTpvGf//yHDh06EBMTw5NPPknr1q0rNNsWQgghhBBCCCGEENVr9qDRpEmTSEtL46mnniI5OZnevXuzbNkyVyPr+Ph49PqyhKihQ4fyxRdf8MQTTzBz5kw6dOjA4sWL6d69u2ua//u//6OgoIC77rqL7Oxshg0bxrJlyzCb6zYKkslkYtasWVWWrAlxKsg+J5qD7HfidJN9Tpxuss+J0032OdEcZL8Tp5JOc44lKoQQQgghhBBCCCFEqWbtaSSEEEIIIYQQQgghzkwSNBJCCCGEEEIIIYQQlUjQSAghhBBCCCGEEEJUIkEjIYQQQgghhBBCCFHJORs0mjdvHtHR0ZjNZgYNGsSGDRtqnH7hwoV07twZs9lMjx49+OWXX07TloqzRX32uQULFqDT6Sr81HX0PyEAVq9ezWWXXUbr1q3R6XQsXry41nlWrVpF3759MZlMtG/fngULFpzy7RRnj/ruc6tWrar0OafT6UhOTj49GyxavNmzZzNgwAB8fHwICQlhwoQJ7N+/v9b55JhONFRD9jk5phON9c4779CzZ098fX3x9fVlyJAhLF26tMZ55HNONKVzMmj09ddfM336dGbNmsWWLVvo1asX48aNIzU1tcrp165dy/XXX8/tt9/O1q1bmTBhAhMmTGDXrl2nectFS1XffQ7A19eXEydOuH6OHTt2GrdYtHQFBQX06tWLefPm1Wn6I0eOcMkllzBq1Ci2bdvGtGnTuOOOO1i+fPkp3lJxtqjvPue0f//+Cp91ISEhp2gLxdnmzz//5L777mP9+vWsWLECq9XK2LFjKSgoqHYeOaYTjdGQfQ7kmE40Tps2bXjxxRfZvHkzmzZt4oILLuCKK65g9+7dVU4vn3Oiqek0TdOaeyNOt0GDBjFgwADmzp0LgMPhIDIykvvvv5/HHnus0vSTJk2ioKCAJUuWuB4bPHgwvXv3Zv78+adtu0XLVd99bsGCBUybNo3s7OzTvKXibKTT6Vi0aBETJkyodppHH32Un3/+ucIBxXXXXUd2djbLli07DVspziZ12edWrVrFqFGjyMrKwt/f/7Rtmzh7paWlERISwp9//snw4cOrnEaO6URTqss+J8d04lQIDAzklVde4fbbb6/0nHzOiaZ2zmUaWSwWNm/ezOjRo12P6fV6Ro8ezbp166qcZ926dRWmBxg3bly10wtRXkP2OYD8/Hzatm1LZGRkjVcThGgK8jknmkvv3r0JDw9nzJgxrFmzprk3R7RgOTk5gDqZqo581ommVJd9DuSYTjQdu93OV199RUFBAUOGDKlyGvmcE03tnAsapaenY7fbCQ0NrfB4aGhotX0UkpOT6zW9EOU1ZJ/r1KkTH374IT/88AOfffYZDoeDoUOHkpiYeDo2WZyDqvucy83NpaioqJm2SpzNwsPDmT9/Pt999x3fffcdkZGRjBw5ki1btjT3pokWyOFwMG3aNM477zy6d+9e7XRyTCeaSl33OTmmE01h586deHt7YzKZuOeee1i0aBFdu3atclr5nBNNzdjcGyCEqGzIkCEVrh4MHTqULl268O677/Lcc88145YJIUTT6NSpE506dXL9PnToUOLi4njjjTf49NNPm3HLREt03333sWvXLv7+++/m3hRxjqjrPifHdKIpdOrUiW3btpGTk8O3337L5MmT+fPPP6sNHAnRlM65TKPg4GAMBgMpKSkVHk9JSSEsLKzKecLCwuo1vRDlNWSfO5mbmxt9+vTh0KFDp2IThaj2c87X1xcPD49m2ipxrhk4cKB8zol6mzp1KkuWLOGPP/6gTZs2NU4rx3SiKdRnnzuZHNOJhnB3d6d9+/b069eP2bNn06tXL958880qp5XPOdHUzrmgkbu7O/369WPlypWuxxwOBytXrqy2LnTIkCEVpgdYsWJFtdMLUV5D9rmT2e12du7cSXh4+KnaTHGOk885cSbYtm2bfM6JOtM0jalTp7Jo0SJ+//13YmL+v717C4lqi+M4/puTDYJ7msmM0SItMyvEzAiRHlLMBLuARAU9mBcKDLGGHgohFHwoIwItqagoE9ISukGS+jD2EkSJWPRiIWEQoURYatSDrvNwaMDmcPIyOmf0+4EN477M/i9ZbBY/19qu+uM1POswHVPpc79jTIdAGBsb08+fP//1GM85BNq8XJ52/PhxFRQUaPPmzUpLS1NNTY1GRkZUVFQkSTp48KCWL1+uM2fOSJKOHTumjIwMnT9/Xjt37tSdO3fU2dmpq1evBrMZCCGT7XNVVVVKT09XQkKCBgcHde7cOfX19enQoUPBbAZCyPDw8Li/Yr5//17d3d2KjIxUbGysysvL9fHjRzU0NEiSSkpKVFdXpxMnTqi4uFher1fNzc1qaWkJVhMQYibb52pqarRq1SolJSXpx48fun79urxer9rb24PVBISY0tJSNTY26tGjR3I4HL73dTidTt8MScZ0CKSp9DnGdJiu8vJy5ebmKjY2VkNDQ2psbNTTp0/V1tYmieccZoGZpy5evGhiY2ON3W43aWlp5vnz575jGRkZpqCgYNz5zc3NJjEx0djtdpOUlGRaWlpmuWKEusn0OY/H4zvX7XabHTt2mK6uriBUjVDV0dFhJPltv/pZQUGBycjI8Ltm48aNxm63m/j4eHPz5s1Zrxuha7J97uzZs2b16tUmPDzcREZGmszMTOP1eoNTPELSv/U3SeOeXYzpEEhT6XOM6TBdxcXFJi4uztjtdrN06VKzbds2097e7jvOcw4zzWaMMbMZUgEAAAAAAOD/b9690wgAAAAAAAB/RmgEAAAAAAAAP4RGAAAAAAAA8ENoBAAAAAAAAD+ERgAAAAAAAPBDaAQAAAAAAAA/hEYAAAAAAADwQ2gEAAAAAAAAP4RGAAAAU1BYWKi8vLxZv299fb1sNptsNps8Hs+EriksLPRd8/DhwxmtDwAAzB1hwS4AAADg/8Zms/3n8crKStXW1soYM0sVjbdo0SL19PQoIiJiQufX1taqurpaMTExM1wZAACYSwiNAAAAfvPp0yff57t376qiokI9PT2+fZZlybKsYJQm6Z9QKzo6esLnO51OOZ3OGawIAADMRSxPAwAA+E10dLRvczqdvpDm12ZZlt/ytMzMTJWVlcnj8Wjx4sVyu926du2aRkZGVFRUJIfDoYSEBD158mTcvd68eaPc3FxZliW32638/Hx9/vx50jVfunRJa9asUXh4uNxut/bu3TvdXwMAAJjnCI0AAAAC5NatW4qKitKLFy9UVlamI0eOaN++fdqyZYu6urqUk5Oj/Px8ff/+XZI0ODiorKwspaamqrOzU62trerv79f+/fsndd/Ozk4dPXpUVVVV6unpUWtrq7Zu3ToTTQQAAPMIy9MAAAACJCUlRadOnZIklZeXq7q6WlFRUTp8+LAkqaKiQpcvX9br16+Vnp6uuro6paam6vTp077vuHHjhlasWKG3b98qMTFxQvf98OGDIiIitGvXLjkcDsXFxSk1NTXwDQQAAPMKM40AAAACZMOGDb7PCxYs0JIlS5ScnOzb53a7JUkDAwOSpFevXqmjo8P3jiTLsrRu3TpJUm9v74Tvu337dsXFxSk+Pl75+fm6ffu2bzYTAADAVBEaAQAABMjChQvH/Wyz2cbt+/Vf2cbGxiRJw8PD2r17t7q7u8dt7969m9TyMofDoa6uLjU1NSkmJkYVFRVKSUnR4ODg9BsFAADmLZanAQAABMmmTZt07949rVy5UmFh0xuWhYWFKTs7W9nZ2aqsrJTL5ZLX69WePXsCVC0AAJhvmGkEAAAQJKWlpfry5YsOHDigly9fqre3V21tbSoqKtLo6OiEv+fx48e6cOGCuru71dfXp4aGBo2NjWnt2rUzWD0AAJjrCI0AAACCZNmyZXr27JlGR0eVk5Oj5ORkeTweuVwu/fXXxIdpLpdL9+/fV1ZWltavX68rV66oqalJSUlJM1g9AACY62zGGBPsIgAAADAx9fX18ng8U3pfkc1m04MHD5SXlxfwugAAwNzDTCMAAIAQ8/XrV1mWpZMnT07o/JKSElmWNcNVAQCAuYaZRgAAACFkaGhI/f39kv5ZlhYVFfXHawYGBvTt2zdJUkxMjCIiIma0RgAAMDcQGgEAAAAAAMAPy9MAAAAAAADgh9AIAAAAAAAAfgiNAAAAAAAA4IfQCAAAAAAAAH4IjQAAAAAAAOCH0AgAAAAAAAB+CI0AAAAAAADgh9AIAAAAAAAAfv4G6Gt6iKXXivAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "os_xs = opensmile_feats.index.get_level_values(\"start\").total_seconds()\n", + "jitter_sma3 = torch.nn.functional.avg_pool1d(jitter, kernel_size=3, padding=1, stride=1, count_include_pad=False)\n", + "plt.plot(os_xs, opensmile_feats.jitterLocal_sma3nz)\n", + "plt.plot(xs, jitter_sma3[0])\n", + "plt.ylim((0, 0.1))\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Jitter\")\n", + "plt.legend([\"OpenSmile\", \"SpeechBrain\"])\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "5bf3b12c-be67-43a8-ba0a-f2ab678e70a5", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABIQAAADeCAYAAABMiNu0AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAllpJREFUeJzs3Xd8U/X6wPHPSdI03aW7lJay994gILLcIu4rDlS86hUH14X+FDfujeOCXhEXouhVEEERkL33plAoo3uvzPP749ukLR20pUPleb9efSVNTk5O0zY55znP0HRd1xFCCCGEEEIIIYQQ5wxDU2+AEEIIIYQQQgghhGhcEhASQgghhBBCCCGEOMdIQEgIIYQQQgghhBDiHCMBISGEEEIIIYQQQohzjASEhBBCCCGEEEIIIc4xEhASQgghhBBCCCGEOMdIQEgIIYQQQgghhBDiHCMBISGEEEIIIYQQQohzjASEhBBCCCGEEEIIIc4xEhASQgghhBBCCCGEOMf8KQJCM2bMID4+HovFwoABA9iwYUONHvf111+jaRrjxo1r2A0UQgghhBBCCCGE+Btp8oDQ3LlzmTJlCtOmTWPLli306NGDsWPHkpqaWu3jEhMTeeihhxg6dGgjbakQQgghhBBCCCHE34Om67relBswYMAA+vXrx3vvvQeAy+UiNjaWyZMn89hjj1X6GKfTybBhw7jttttYuXIl2dnZ/PDDD4241UIIIYQQQgghhBB/XaamfHKbzcbmzZuZOnWq5zaDwcCoUaNYu3ZtlY979tlniYiI4Pbbb2flypXVPofVasVqtXq+d7lcZGZmEhoaiqZpZ/9DCCGEEEIIIYQQQvwJ6LpOXl4ezZs3x2CoviisSQNC6enpOJ1OIiMjy90eGRnJvn37Kn3MqlWr+Pjjj9m2bVuNnmP69Ok888wzZ7upQgghhBBCCCGEEH8JSUlJtGjRotplmjQgVFt5eXncdNNNzJw5k7CwsBo9ZurUqUyZMsXzfU5ODnFxcSQlJREYGNhQm9rwZgyA3BNwy0/QvBd8ehmc2gpXfQztxzb11gkhhBBCCCGEEKKh2Ivhtbbq+oO7wRIEQG5uLrGxsQQEBJxxFU0aEAoLC8NoNJKSklLu9pSUFKKioiosn5CQQGJiIpdddpnnNpfLBYDJZGL//v20adOm3GO8vb3x9vausK7AwMC/dkDIlQPeGkTEQmAgNAuBTA1MDvW9EEIIIYQQQggh/p6smooJAASHgtm33N01aZHTpFPGzGYzffr0YenSpZ7bXC4XS5cuZdCgQRWW79ixIzt37mTbtm2er8svv5wRI0awbds2YmNjG3Pzm47DCvYCdd0nRF16lwSBrLlNs01CCCGEEEIIIYRoHE576XWjV51W0eQlY1OmTOGWW26hb9++9O/fn7feeouCggImTpwIwM0330xMTAzTp0/HYrHQtWvXco8PDg4GqHD731pRtrrUDKWBIEvJZbEEhIQQQgghhBBCiL81l6P0uqFuoZ0mDwhdd911pKWl8dRTT5GcnEzPnj355ZdfPI2mjx07dsbO2Oecokx1aQkG92vjyRDKaZJNEkIIIYQQQgghRCNxZwgZTFDHCepNHhACuPfee7n33nsrvW/58uXVPvbTTz+t/w36syvKUpe+IaW3lTSQkgwhIYQQQgghhPjrczqd2O32My8ozjleXl4YXe6AUN3KxeBPEhAStVRYkiHk06z0NukhJIQQQgghhBB/C/n5+Rw/fhxd15t6U8SfkKZptAgAf6hz/yCQgNBfkztDqGxASHoICSGEEEIIIcRfntPp5Pjx4/j6+hIeHl6jaVHi3KHrOmlpaRzPyqSd0YKxjv2DQAJCf02egFCZkjHJEBJCCCGEEEKIvzy73Y6u64SHh+Pj49PUmyP+hMLDw0nMzcJuCcOo1b2sULo1/xUVVVIyJhlCQgghhBBCCPG3IZlBoiqapoGOaiZ9Fj2EJCD0V1RZU2nJEBJCCCGEEEIIIc4RJf2ljHUv/JKA0F+R9BASQgghhBBCCCHEWfQQkoDQX1GlU8ZKxs7bC8DpaPxtEkIIIYQQQgghzgFPP/00PXv29Hx/6623Mm7cuEbeipIMISkZO8cUZavLyjKEQMrGhBBCCCGEEEI0iaSkJG677TaaN2+O2WymZcuW3H///WRkZDT6tqSlpXH33XcTFxeHt7c3UVFRjB07ltWrV5/Veh966CGWLl1aT1tZRyXxoLMpGZMpY39FlTWVNnqByQccRSogVLa/kBBCCCGEEEII0cAOHz7MoEGDaN++PV999RWtWrVi9+7dPPzwwyxatIh169YREtJ4x6pXXXUVNpuN2bNn07p1a1JSUli6dOlZB6f8/f3x9/evp62sK8kQOjdV1kMIpI+QEEIIIYQQQvzN6LpOoc3RJF+6rp95A8v417/+hdlsZsmSJQwfPpy4uDguuugifvvtN06cOMETTzwBQHx8PM899xw33HADfn5+xMTEMGPGjHLrys7O5o477iA8PJzAwEAuuOACtm/f7rnfXbY1Z84c4uPjCQoK4vrrrycvL8/z+JUrV/Lyyy8zYsQIWrZsSf/+/Zk6dSqXX365Zz2apvHRRx9x6aWX4uvrS6dOnVi7di2HDh3i/PPPx8/Pj8GDB5OQkFDhuavicrmYPn06rVq1wsfHhx49evDtt9/W6rU8M3dT6boHhCRD6K/GXgz2QnX99Cwg70DIT5GSMSGEEEIIIYT4myiyO+n81OImee49z47F11yzsEFmZiaLFy/mhRdewMfHp9x9UVFR3HjjjcydO5f3338fgFdffZXHH3+cZ555hsWLF3P//ffTvn17Ro8eDcA111yDj48PixYtIigoiI8++oiRI0dy4MABT5ZRQkICP/zwAwsWLCArK4trr72Wl156iRdeeMGTxfPDDz8wcOBAvL29q9z25557jjfeeIM33niDRx99lH/84x+0bt2aqVOnEhcXx2233ca9997LokWLavRaTJ8+nc8//5wPP/yQdu3a8ccffzBhwgTCw8MZPnx4jdZxRu5YnWQInUPc2UGasXTUvJtkCAkhhBBCCCGEaAIHDx5E13U6depU6f2dOnUiKyuLtLQ0AIYMGcJjjz1G+/btmTx5MldffTVvvvkmAKtWrWLDhg3MmzePvn370q5dO1577TWCg4PLZdq4XC4+/fRTunbtytChQ7nppps8vX1MJhOffvops2fPJjg4mCFDhvD444+zY8eOCts2ceJErr32Wtq3b8+jjz5KYmIiN954I2PHjqVTp07cf//9LF++vEavg9Vq5cUXX+STTz5h7NixtG7dmltvvZUJEybw0Ucf1eYlPYOzHzsvGUJ/NZ5ysWDQtPL3uQNEkiEkhBBCCCGEEH8LPl5G9jw7tsmeu7ZqWmY2aNCgCt+/9dZbAGzfvp38/HxCQ0PLLVNUVFSudCs+Pp6AgADP99HR0aSmpnq+v+qqq7jkkktYuXIl69atY9GiRbzyyivMmjWLW2+91bNc9+7dPdcjIyMB6NatW7nbiouLyc3NJTDwtMSM0xw6dIjCwkJPppObzWajV69e1T62ds6+h5AEhP5qPAGhShpxSYaQEEIIIYQQQvytaJpW47KtptS2bVs0TWPv3r1ceeWVFe7fu3cvzZo1Izw8/Izrys/PJzo6utKsnODgYM91L6/ywRBN03C5XOVus1gsjB49mtGjR/Pkk09yxx13MG3atHIBobLr0UoSLyq77fR1V7XtAAsXLiQmJqbcfdWVrdWaZ8qYBITOHZVNGHPzZAjlNN72CCGEEEIIIYQ454WGhjJ69Gjef/99HnzwwXJ9hJKTk/niiy+4+eabPcGVdevWlXv8unXrPOVmvXv3Jjk5GZPJRHx8fL1uZ+fOnfnhhx/qdZ2nr9/b25tjx47VX7+gSrkzhKRk7NzhzhCqbKy8JUhdSoaQEEIIIYQQQohG9t577zF48GDGjh3L888/X27sfExMDC+88IJn2dWrV/PKK68wbtw4fv31V+bNm8fChQsBGDVqFIMGDWLcuHG88sortG/fnpMnT7Jw4UKuvPJK+vbte8ZtycjI4JprruG2226je/fuBAQEsGnTJl555RWuuOKKBnsNAgICeOihh3jwwQdxuVycd9555OTksHr1agIDA7nlllvq9wklQ+gcUliTDCEJCAkhhBBCCCGEaFzt2rVj06ZNTJs2jWuvvZbMzEyioqIYN24c06ZN80wHA/j3v//Npk2beOaZZwgMDOSNN95g7FjVK0nTNH7++WeeeOIJJk6cSFpaGlFRUQwbNszT4+dM/P39GTBgAG+++SYJCQnY7XZiY2OZNGkSjz/+eIP8/G7PPfcc4eHhTJ8+ncOHDxMcHEzv3r3r+XnPvoeQpte049PfRG5uLkFBQeTk5JyxGdSf0q/TYPVbMPAeuHB6+fvWfQC/PAZdr4KrP2mSzRNCCCGEEEIIUXfFxcUcOXKEVq1aYbFYmnpzGkR8fDwPPPAADzzwQFNvyl9ScXExR/Zup9XS27G0HQ7jZnjuq03MQ8bO/9VU11TaW5pKCyGEEEIIIYQQf39nP3ZeAkJ/NZ6m0sEV7/MuGbcnJWNCCCGEEEIIIcTfn4ydP4cUZavLynoIydh5IYQQQgghhBB/comJiU29CX997u4/Z9FUWjKE/mrcTaUrmzImTaWFEEIIIYQQQohzx1mMnZeA0F+Np4dQZRlCMnZeCCGEEEIIIYT4+5MMoXNPTZpK2/LA5Wy8bRJCCCGEEEIIIUQjOvux8xIQ+iuxF4GjSF2vrocQgDWvcbZJCCGEEEIIIYQQjaskHiRTxs4V7uwgg6l0olhZJm8weqvr0kdICCGEEEIIIYT4m5IMoXNL2f5Bmlb5MjJpTAghhBBCCCGEODf81XsIzZgxg/j4eCwWCwMGDGDDhg1VLjt//nz69u1LcHAwfn5+9OzZkzlz5jTi1jYh94SxysrF3GTSmBBCCCGEEEIIUamnn36anj17Nuk2nH/++TzwwANntxL9b5AhNHfuXKZMmcK0adPYsmULPXr0YOzYsaSmpla6fEhICE888QRr165lx44dTJw4kYkTJ7J48eJG3vImUN2EMbdqMoTe/u0gN85aR4HV0QAbJ4QQQgghhBDiXJaWlsbdd99NXFwc3t7eREVFMXbsWFavXt3Um1Yrt956K5qmeb5CQ0O58MIL2bFjR72sf/78+Tz33HP1sq6/dA+hN954g0mTJjFx4kQ6d+7Mhx9+iK+vL5988kmly59//vlceeWVdOrUiTZt2nD//ffTvXt3Vq1a1chb3gSK3BlClUwYc6siQ8judPHBikOsPpTByoPpDbSBQgghhBBCCCHOVVdddRVbt25l9uzZHDhwgB9//JHzzz+fjIyMpt60Wrvwwgs5deoUp06dYunSpZhMJi699NJqH2O322u07pCQEAICKukLXCvuDKG/aEDIZrOxefNmRo0a5bnNYDAwatQo1q5de8bH67rO0qVL2b9/P8OGDat0GavVSm5ubrmvv6xaZQjllLt5f3IexXYXADuOZzfAxgkhhBBCCCGEqHe6DraCpvlylyXVQHZ2NitXruTll19mxIgRtGzZkv79+zN16lQuv/xyADRN44MPPuCiiy7Cx8eH1q1b8+2335ZbT1JSEtdeey3BwcGEhIRwxRVXkJiYWG6ZWbNm0alTJywWCx07duT9998vd//x48e54YYbCAkJwc/Pj759+7J+/fpyy8yZM4f4+HiCgoK4/vrrycsrP6nbneEUFRVFz549eeyxx0hKSiItLQ2AxMRENE1j7ty5DB8+HIvFwhdffEFGRgY33HADMTEx+Pr60q1bN7766qty6z69ZCw+Pp4XX3yR2267jYCAAOLi4vjPf/5T/QteDyVjdQ8l1YP09HScTieRkZHlbo+MjGTfvn1VPi4nJ4eYmBisVitGo5H333+f0aNHV7rs9OnTeeaZZ+p1u5tMTQJC3kHq8rQMoW1J2Z7rO0+UDxYJIYQQQgghhPiTshfCi82b5rkfPwlmvxot6u/vj7+/Pz/88AMDBw7E29u70uWefPJJXnrpJd5++23mzJnD9ddfz86dO+nUqRN2u52xY8cyaNAgVq5ciclk4vnnn/eUa5nNZr744gueeuop3nvvPXr16sXWrVuZNGkSfn5+3HLLLeTn5zN8+HBiYmL48ccfiYqKYsuWLbhcLs82JCQk8MMPP7BgwQKysrK49tpreemll3jhhRcq3eb8/Hw+//xz2rZtS2hoaLn7HnvsMV5//XV69eqFxWKhuLiYPn368OijjxIYGMjChQu56aabaNOmDf3796/y9Xv99dd57rnnePzxx/n222+5++67GT58OB06dKj+hT+LptJNGhCqq4CAALZt20Z+fj5Lly5lypQptG7dmvPPP7/CslOnTmXKlCme73Nzc4mNjW3Era1HZ9FDqGxAaMfxHHRdR6tqUpkQQgghhBBCCFELJpOJTz/9lEmTJvHhhx/Su3dvhg8fzvXXX0/37t09y11zzTXccccdADz33HP8+uuvvPvuu7z//vvMnTsXl8vFrFmzPMer//3vfwkODmb58uWMGTOGadOm8frrrzN+/HgAWrVqxZ49e/joo4+45ZZb+PLLL0lLS2Pjxo2EhKh2K23bti23rS6Xi08//dRTtnXTTTexdOnScgGhBQsW4O/vD0BBQQHR0dEsWLAAg6F8odUDDzzg2Ra3hx56yHN98uTJLF68mG+++abagNDFF1/MPffcA8Cjjz7Km2++ybJly6oJCJ19yViTBoTCwsIwGo2kpKSUuz0lJYWoqKgqH2cwGDy/0J49e7J3716mT59eaUDI29u7ysjkX447yGMJqnqZKnoIlQ0I5RTZOZpRSHxYzSK9QgghhBBCCCGaiJevytRpqueuhauuuopLLrmElStXsm7dOhYtWsQrr7zCrFmzuPXWWwEYNGhQuccMGjSIbdu2AbB9+3YOHTpUob9OcXExCQkJFBQUkJCQwO23386kSZM89zscDoKC1HHytm3b6NWrlycYVJn4+PhyzxEdHV1hsNWIESP44IMPAMjKyuL999/noosuYsOGDbRs2dKzXN++fcs9zul08uKLL/LNN99w4sQJbDYbVqsVX9/qX8uyQTNN04iKiqpy2BbgiQf9ZTOEzGYzffr0YenSpYwbNw5QkbqlS5dy77331ng9LpcLq9XaQFv5J+LuC+TOAqpMJRlCucV2EtLyAWgZ6svRjEJ2nMiRgJAQQgghhBBC/NlpWo3Ltv4MLBYLo0ePZvTo0Tz55JPccccdTJs2zRMQqk5+fj59+vThiy++qHBfeHg4+fnquHbmzJkMGDCg3P1GoxEAHx+fMz6Pl1f5IIqmaeVKygD8/PzKZRbNmjWLoKAgZs6cyfPPP19uubJeffVV3n77bd566y26deuGn58fDzzwADab7ay3qby/wdj5KVOmMHPmTGbPns3evXu5++67KSgoYOLEiQDcfPPNTJ061bP89OnT+fXXXzl8+DB79+7l9ddfZ86cOUyYMKGpfoTGY61bhtCOpBx0HWJDfBjePrzktuwG2kghhBBCCCGEEELp3LkzBQUFnu/XrVtX7v5169bRqVMnAHr37s3BgweJiIigbdu25b6CgoKIjIykefPmHD58uML9rVq1AlSmzbZt28jMzKzXn0PTNAwGA0VFRdUut3r1aq644gomTJhAjx49aN26NQcOHKjXbSnnLMbON3kPoeuuu460tDSeeuopkpOT6dmzJ7/88oun0fSxY8fK1egVFBRwzz33cPz4cXx8fOjYsSOff/451113XVP9CI3HnSHkXbsMoW1JqvdQz9hmdItRwaQd0lhaCCGEEEIIIUQ9ycjI4JprruG2226je/fuBAQEsGnTJl555RWuuOIKz3Lz5s2jb9++nHfeeXzxxRds2LCBjz/+GIAbb7yRV199lSuuuIJnn32WFi1acPToUebPn88jjzxCixYteOaZZ7jvvvsICgriwgsvxGq1smnTJrKyspgyZQo33HADL774IuPGjWP69OlER0ezdetWmjdvXqFcrTpWq5Xk5GRAlYy999575Ofnc9lll1X7uHbt2vHtt9+yZs0amjVrxhtvvEFKSgqdO3euw6tanb/4lDG3e++9t8oSseXLl5f7/vnnny+XnnVOqWMPIXf/oJ6xwfSIDQZg14kcnC4do0EaSwshhBBCCCGEODv+/v4MGDCAN998k4SEBOx2O7GxsUyaNInHH3/cs9wzzzzD119/zT333EN0dDRfffWVJ1ji6+vLH3/8waOPPsr48ePJy8sjJiaGkSNHEhiojnXvuOMOfH19efXVV3n44Yfx8/OjW7dunjHuZrOZJUuW8O9//5uLL74Yh8NB586dmTFjRq1+nl9++YXo6GhADbbq2LEj8+bNq7R3cVn/93//x+HDhxk7diy+vr7ceeedjBs3jpycek7KqKKHUIHVUeNVaLruHl5/bsjNzSUoKIicnBzPH9RfxnMR4LTCAzshOK7yZU5shpkXQGALmLIbXdfp98JvpOfb+O7uwfSMDabb04sptDlZ8uAw2kcGVL4eIYQQQgghhBCNrri4mCNHjtCqVSssFktTb0690jSN77//3tNDWNRNcXExR7avptXK+7FcPxta9PHcd+tHy5l914gaxTyavIeQqCF7sQoGwRkyhEruK8kQOp5VRHq+DS+jRpfmgRgNGl2bl5SNHZeyMSGEEEIIIYQQ4i/rtB5CCWkFVSxYkQSE/io8JWAamKvJ6nEHi6x54HJ5ysU6RQdi8VJd17u3cAeEshtmW4UQQgghhBBCCNFw9Io9hIrtTpIyC2u8iloHhOx2O7fddhtHjhyp7UPF2XD3D/IOAEM1vzbPSHodrLnl+ge5dSsJCG2XDCEhhBBCCCGEEI1E13UpF6s37oBQaYbQ4bQCXLVoClTrgJCXlxffffddbR8mzpZ7wlh15WIAJm8w+XgeU1lAqEcLdX3vqVxsDlf9bqcQQgghhBBCCCEaR5mSsYOpebV6aJ1KxsaNG8cPP/xQl4eKurLWYOS8W0nQyFGYza6S8fJlA0ItQ30JtJiwOVwcSKndH4wQQgghhBBCiIZ3js1/ErWg67oqGdP1ciVjB1Pya7WeOo2db9euHc8++yyrV6+mT58++Pn5lbv/vvvuq8tqRXVqmiHkXiY/mfS0VKwOFxYvA/Ghpb8jTdPo0jyItYcz2J+cR9eYGqxTCCGEEEIIIUSDMxpV71ebzYaPj08Tb434M7LZbOCyY7TnlRs7X9uEjzoFhD7++GOCg4PZvHkzmzdvLnefpmkSEKoLWyHoLvD2r/x+dw8hS80zhFLTU4BmtArzx2DQyi0SHuANQFahra5bLIQQQgghhBCinplMJnx9fUlLS8PLywtDdT1kxTnH5XKRlpqKb+oWTLbcchlCh1IbIUNIGkrXM5cT3h+oLu/fVi7C51HbDCEgMyMdaEbrcL8KiwT7qufIKbLXcaOFEEIIIYQQQtQ3TdOIjo7myJEjHD16tKk3R/wJGTSI2/8pGrqnh1Cx3UliRs1HzkMdA0JuNpuNI0eO0KZNG0yms1rVuS0rEbJL/tEL0iCwecVl3GPna9FDKD87HWhHm7BKAkI+KiCUXSgBISGEEEIIIYT4MzGbzbRr106VBglxGrNuw1Ccrr4pyRA6kq4mjAVYjDVeT52iOIWFhUyePJnZs2cDcODAAVq3bs3kyZOJiYnhscceq8tqz12pe0uvF2ZWHhCqQ4ZQUV4mAG0iKpahBfmaAciWDCEhhBBCCCGE+NMxGAxYLJam3gzxZ5RWklBiDgAv1WfK3T+obXgAe2q4mjoVI06dOpXt27ezfPnycn+go0aNYu7cuXVZ5bktbV/p9aLMypepQw8he0E2AK3DKgaESjOEJOIshBBCCCGEEEL8ZaQfUJdh7UBT/YLdE8baRFSsEKpKnTKEfvjhB+bOncvAgQPRtNJmxV26dCEhIaEuqzy3pe0vvV5YRUCoDiVjZoeKEEoPISGEEEIIIYQQ4m/CExBq77npYKo6/m8TXsWgqkrUKUMoLS2NiIiICrcXFBSUCxCJGqpRhlBtSsZU0CiQAqICLfh5V4z7BZeUjMmUMSGEEEIIIYQQ4k/oxGZ4ozOserP87WllMoRKlGYINXBAqG/fvixcuNDzvTsINGvWLAYNGlSXVZ67XM7S6B5UnSFUh5KxQK2w0uwgKM0QkqbSQgghhBBCCCHEn4w1H769HXJPwJY55e87LUPI6iidMNa2FhlCdSoZe/HFF7nooovYs2cPDoeDt99+mz179rBmzRpWrFhRl1Weu7KPgaO49PuirMqX82QIBZ95nSUBoQAKq0wXc/cQyit24HC6MBnrFBsUQgghhBBCCCFEfVv8OGQdUdczE1SSiCUQdB3SD6rbwzsAcDjNPWHMRHiAd42fok5RgPPOO49t27bhcDjo1q0bS5YsISIigrVr19KnT5+6rPLcVbZ/EFTTQ6gkIFSjHkLBAARSdYZQUElACCC32HHmdQohhBBCCCGEEKLh7V8EW9RUd8wlSR7JO9RlXjLY8kAzQrNWQOmEsfaRAbVq41OnDCGANm3aMHPmzLo+XLillYyc1wyguyrvIeRylSkZq/nY+UCtgNZVZAiZjAYCvE3kWR1kF9oI8TPXZeuFEEIIIYQQQghRX/LT4MfJ6vqgeyErEfYtgFPbIf680nKxkFZgUsfxh1JV/6B2tegfBGcREAJITU0lNTUVl8tV7vbu3bufzWrPLe4MociuKuJXWYaQLR/Q1fUa9BByeAVgAgIook2YT5XLBfl6qYCQTBoTQgghhBBCCCGa3rYvoCANwjvBBU/CmndVQOjkNnV/JRPG3BlC7SIDavVUdQoIbd68mVtuuYW9e/ei63q5+zRNw+l01mW15yb3hLGWQ1RAqLIMIXf/IKMZTJYzrvJEkRctAYOm09xSdTlYM18zx7OKyJHG0kIIIYQQQgghRNPLPaEuO1wEXhZo3lN9f2qbunT3Dyo7YawxM4Ruu+022rdvz8cff0xkZKSMmq8rl6t0XFzLQbD+g8ozhKwl5WLegVCD1zoh20Gk7oVFs2Ow5YJvcKXLuSeNyeh5IYQQQgghhBDiT6AgXV36hanL6B7qMv2gmjyWXlJlVJIhpOs6J7KKAIgP9QNq3iO4TgGhw4cP891339G2bdu6PFy45R4HewEYvCCmpBl3cbYKFBnK9Pv2TBirQf8gICG1gK74YSG79LGVcDeWltHzQgghhBBCCCHEn0BhSUDItyQg5B8BAc0h7yQk7yyTIaQCQrnFDqwO1cYnPMAbey2GRtVpytjIkSPZvn17XR4qynL3DwptC34R6rruUkGhsjwNpWswYQw4nJ5Pru5b8tiqA0LuDCHpISSEEEIIIYQQQvwJFGSoS7/Q0tvcWUJHV5WWlJWUjKXlFQMQ4G3Cx2ys1VPVKUNo1qxZ3HLLLezatYuuXbvi5eVV7v7LL7+8Lqs997j7B4V3UN3Bzf6qgXRRFviGlC5XXIuR86gMoVxqEBDyUR3Jc6RkTAghhBBCCCGEaHqnZwiB6iN0YBHs/E597xcBPs0ASM2zAhAe6F3rp6pTQGjt2rWsXr2aRYsWVbhPmkrXgicg1FFd+oSogFBhJoS2KV3OWouR87gzhPzUN5IhJIQQQgghhBBC/PnpOhS6M4TKBITcGUJpe9VlmQljaSUBoYiARgoITZ48mQkTJvDkk08SGRlZl1UIgNQyGUIAvs0g51jFSWPuErIqSsZSc4u58v01ZBRY8TWbyCywketVk5IxlSEkPYSEEEIIIYQQQogmVpwNrpIeQGUzhKJ7ll+uzIQxd0AoPODME8lPV6ceQhkZGTz44IMSDDobul7aQyiik7r0KSkTO33SmKeHUHClq9pyLIsT2UUU211kFqjyL6N7sli1JWPuptJSMiaEEEIIIYQQQjQpdyzA7K9GzrsFRJX2HYZyGUKpZ5EhVKeA0Pjx41m2bFldHircck+CLQ80I4SUlIe5+wZVyBCqvodQkV2V6PVt2YwlDw7jf/8awuhe7co/thJSMiaEEEIIIYQQQvxJuEfO+4aWv13TVB8ht0pKxsIbq2Ssffv2TJ06lVWrVtGtW7cKTaXvu+++Wq1vxowZvPrqqyQnJ9OjRw/effdd+vfvX+myM2fO5LPPPmPXrl0A9OnThxdffLHK5f+0Unary9C2qqE0VJ0hdIYeQkU2NWKumZ+Z9pEB6sZE1WCqRgEhKRkTQgghhBBCCCGalruhdNn+QW7RPeDgEnW9TMlYasmUsUbrITRr1iz8/f1ZsWIFK1asKHefpmm1CgjNnTuXKVOm8OGHHzJgwADeeustxo4dy/79+4mIiKiw/PLly7nhhhsYPHgwFouFl19+mTFjxrB7925iYmLq8uM0jZSd6jKqa+ltngyhrPLLnmHsvDtDyOJVZsScO3hUTUAoqGTKWG6xHadLx2jQarbtQgghhBBCCCGEqF8FlUwYc3P3ETJZICjWc3NqrrtkrPY9hOoUEDpy5EhdHlapN954g0mTJjFx4kQAPvzwQxYuXMgnn3zCY489VmH5L774otz3s2bN4rvvvmPp0qXcfPPN9bZdDS5ZZTgRWSYg5HOGkrEqMoSKSwJCPl5lKgBrFBBSGUK6DnnFdk+TaSGEEEIIIYQQQjSy6jKEWg1TWUJxg8BQeuyflt/IJWP1xWazsXnzZqZOneq5zWAwMGrUKNauXVujdRQWFmK32wkJCan0fqvVitVq9Xyfm5t7dhtdX9wlY1HdSm/zPUPJWBU9hEoDQmUzhIJL7syuchPMJgP+3ibyrQ6yCyUgJIQQQgghhBBCNJmCkpHzp/cQAlUx9M8/yt1kdTg9LWAarWRM13W+/fZbli1bRmpqKi6Xq9z98+fPr9F60tPTcTqdFaaVRUZGsm/fvhqt49FHH6V58+aMGjWq0vunT5/OM888U6N1NRp7EWQcVNfLZQiV9P2pZYZQka2kZMxcWclY9QGwIB8vFRCSxtJCCCGEEEIIIUTTqS5DqBLp+WpiuJdR8/QIro06TRl74IEHuOmmmzhy5Aj+/v4EBQWV+2osL730El9//TXff/89Fkvl9XJTp04lJyfH85WUlNRo21el1L2gu1TULyCq9HZPU+k69hAy1a6HEJQ2ls6S0fNCCCGEEEIIIUTTqa6HUCVSc1VD6XB/bzSt9j2B65QhNGfOHObPn8/FF19cl4d7hIWFYTQaSUlJKXd7SkoKUVFRVTxKee2113jppZf47bff6N69e5XLeXt74+1d+9SpBpVSpn9Q2V+abyUZQg4bOIrU9SpLxlSGlk9lGULWXHC5ytUYluUOCOXIpDEhhBBCCCGEEKLp1DJDyDNyPrD2DaWhjhlCQUFBtG7duk5PWJbZbKZPnz4sXbrUc5vL5WLp0qUMGjSoyse98sorPPfcc/zyyy/07dv3rLej0VXWUBpKM4TshWBXkT5P/yCoZQ+hkoCQ7gJbfpWbElwyaSxbMoSEEEIIIYQQQoim4+khVMMMIXdAyL9uSTB1Cgg9/fTTPPPMMxQVFdXpScuaMmUKM2fOZPbs2ezdu5e7776bgoICz9Sxm2++uVzT6Zdffpknn3ySTz75hPj4eJKTk0lOTiY/v+qgx5+OO0Mo6rSAkCUItJKgjjtLyF3yZfYHY+UJXUWVBYS8LGD0Lr+OSrgzhKSHkBBCCCGEEEII0UR0vUyGUCVNpSvhzhCKCKxbQKhOJWPXXnstX331FREREcTHx+PlVb550ZYtW2q8ruuuu460tDSeeuopkpOT6dmzJ7/88oun0fSxY8cwlCl3+uCDD7DZbFx99dXl1jNt2jSefvrpuvw4jUvXq84Q0jTVWLowXU0aC2x+xobSUJoh5O11WnzPEgQFqSXriK30sZ6AkJSMCSGEEEIIIYQQTcNWAI6SSqFGyhCqU0DolltuYfPmzUyYMIHIyMg6NS8q69577+Xee++t9L7ly5eX+z4xMfGsnqvJ5SSBNQcMJgjvUPF+3xAVEDo9Q6iKcjGoIkMITgsIVc5dMpYjGUJCCCGEEEIIIUTTcGcHmSxg9qvRQ9LyVACpUTOEFi5cyOLFiznvvPPq9KTnNHd2UFgHMFXyS/NMGisJCLl7CFWTIeQeO1+uqXTZx1QTEAqSKWNCCCGEEEIIIUTTKts/qIZJN56SsYBGbCodGxtLYGDVGSuiGim71eXp/YPcfEsCQqdnCFUxch6qaCoNNQoIBftIyZgQQgghhBBCCNGkatk/CMqUjAU0YlPp119/nUceeeSvX77VFFJ2qsvT+we5nZ4hVHzmDCH32HlLXQJCvlIyJoQQQgghhBBCNKmCkoCQb80CQi6XTnq+O0OoEUvGJkyYQGFhIW3atMHX17dCU+nMzMw6bcw5IbmKCWNuvs3UZVGWuqxFD6GKAaHA8uuoRDNPU2kpGRNCCCGEEEIIIZqEO0Oohg2ls4vs2J06AGGN2VT6rbfeqtOTnfNsBZB5WF2P7Fb5MlX2EKpBU+mz6CGUU2TH5dIxGM6uQbgQQgghhBBCCCFqqbCkh5BfzQJC7v5BzXy9MJvqVPxV9yljog5S9gA6+EeCf3jly/i4M4RO7yFUecmY06Vjc5SUjJ3+R1CTgFBJDyGXDnlWh+d7IYQQQgghhBBCNBJPU+malYyluieM1bGhNNQiIJSbm+tpJJ2bm1vtstJwugo5SeoypE3Vy/hW0UOoipIxq8PpuV51hlB2lU/nbTLiazZSaHOSU2iXgJAQQgghhBBCCNHYPE2la5YhlJp7dg2loRYBoWbNmnHq1CkiIiIIDg5Gq2QMmq7raJqG0+msZA0Ca566rKb8y1MyVpSpIoTH1qrvA6IqXdw9ch7AYjo9IBSsLqvJEAI1aazQ5iSr0EZcqG+1ywohhBBCCCGEEKKeFdSuh1DaWTaUhloEhH7//XdCQlSwYtmyZXV+wnOaLV9dmv2rXqZshtCS/1OBoYjO0G5MpYu7+wd5mwwV+//UoGQMIMjXzMmcYrJl0pgQQgghhBBCCNH4/swZQsOHD6/0uqgFd4aQd0DVy5TNENr+JaDBZe+AsfJSruKqJoxBaUDIWn2Jn0waE0IIIYQQQgghmpCnh1DtMoQaJSB0uuzsbDZs2EBqaioul6vcfTfffHOdN+hvzRMQqkGGkFu/OyC2X5WLF9vVa+9TXUDoTCVjZSaNCSGEEEIIIYQQohE5rGAriRf41bCpdG5JU+nARmgqXdZPP/3EjTfeSH5+PoGBgeX6CWmaJgGhqnhKxqrJEDJ5g5cf2AsgIBpGPlXtKqscOQ/lA0K6DpX0fQII8jEDkFUgASEhhBBCCCGEEKJRufsHGUylvYDPwJMh5F/3DKE6Dav/97//zW233UZ+fj7Z2dlkZWV5vjIzM+u8MX97NSkZAwhtrS4veqX6BtSUNpWutmRMd5U+dyXC/FVAKL3kD0oIIYQQQgghhBCNxN0/yDe0ykSO06WV9BCKCGzkkrETJ05w33334esrE6lqxVqSIVRdyRjAtZ9BzgloNfSMqyztIVRJbM9kUdHF4mw18t7SpdJ1uLuSp+YVn/H5hBBCCCGEEEIIUY9qOWFs3qYk8qwO4Ox6CNUpQ2js2LFs2rSpzk96zqpphlBI6xoFg6BMyVhlGUKaBqFt1PWMQ1Wuw/0HlJYnGUJCCCGEEEIIIUSjKixpKH2G/kEOp4tnftrNw9/uAOCaPi0ItFQ+gKomapwh9OOPP3quX3LJJTz88MPs2bOHbt264eVVfgMuv/zyOm/Q35q7SVR1PYRqqbi6gBBASBs4sRkyEqpcR3iAakKVKgEhIYQQQgghhBCicdUgQ8jmcHH77I2sPKiWfWBUO+67oN1ZPW2NA0Ljxo2rcNuzzz5b4TZN03A6nWe1UX9bNS0ZqwVPD6HKmkpDaYZQZtUBodKSMSu6rpdrEi6EEEIIIYQQQogG5O4h5Fd1QOjnnadYeTAdX7ORN67tyYVdo876aWscEDp9tLyog5qWjNVCsUP9XiymqgJCbdVltRlCKiBkc7jILXYQ5FP3lDMhhBBCCCGEEELUQl6yuqwmQ+iL9UcBuHt4m3oJBkEtewitXbuWBQsWlLvts88+o1WrVkRERHDnnXditUrZUZU8Y+frP0PIx1zFrzKkZGJZNQEhi5eRQIuKDaZJY2khhBBCCCGEEKJx5KfB7h/U9egelS5yICWPjYlZGA0a1/aLrbenrlVA6JlnnmH37t2e73fu3Mntt9/OqFGjeOyxx/jpp5+YPn16vW3c34rTDo6SYEt9ZgidqYeQu2SsIBWKc6tcT0RgSR+hXAnoCSGEEEIIIYQQjWLVm2AvgOie0H5spYt8uf4YAKM7RRJZcuxeH2oVENq+fTsjR470fP/1118zYMAAZs6cyZQpU3jnnXf45ptv6m3j/lbc5WJQrwGhaqeMAViCwC9cXc88XOV6wv1LJo3lS0BICCGEEEIIIYRocDknYOMsdX3kk2pS+GmKbE6+23IcgH8MiKvXp69VQCgrK4vIyEjP9ytWrOCiiy7yfN+vXz+SkpLqb+v+TtzlYkZvMNZfjx53hpB3VQEhUJPGoNrR8xGBJY2lJUNICCGEEEIIIYRoeH+8Ak4rtBwCbUZWushPO06SV+wgLsSX89pW3WOoLmoVEIqMjOTIkSMA2Gw2tmzZwsCBAz335+XlVRhBL0o0QENpgCK7aipdZYYQlJk0VnWGUOmkMekhJIQQQgghhBBCNKiMBNgyR12/oPLsIIAvSsrFbugfh8FQvxPBaxUQuvjii3nsscdYuXIlU6dOxdfXl6FDh3ru37FjB23atKnXDfzbaICR81C2qXQNAkI1mDSWlicZQkIIIYQQQgghRINa/hLoTmg3BloOqnSRPSdz2Z6UjZdR45q+Lep9E2o8dh7gueeeY/z48QwfPhx/f39mz56N2Wz23P/JJ58wZsyYet/IvwVbw2QInbGpNNSsZCygpKm0BISEEEIIIYQQQoiGk3YAdn2rro94osrFftubohbpEEFYSd/f+lSrgFBYWBh//PEHOTk5+Pv7YzSWD0LMmzcPf//6zYD523CXjJkbJiBk8aom2ctTMlZ1hlBpyZgEhIQQQgghhBBCiAbzx6ugu6DDJdC8Z5WLrTqUDsDwDuENshm1KhlzCwoKqhAMAggJCSmXMSTKaKiSMU9AqLoModYlC2dBYWali7hLxlJzpYeQEEIIIYQQQgjRINIPlmYHDX+kysUKrA62HssCYGjbP1FAqD7NmDGD+Ph4LBYLAwYMYMOGDVUuu3v3bq666iri4+PRNI233nqr8Tb0bDVYU+kalIyZ/SCgubpeRR8hd8lYbrHDk3UkhBBCCCGEEEKIeuTODmp/UbXZQRuOZGJ36sSG+BAX6tsgm9KkAaG5c+cyZcoUpk2bxpYtW+jRowdjx44lNTW10uULCwtp3bo1L730ElFRUY28tWfJPXbeXL8ZQsW2GmQIwRnLxgJ9TJhN6s9BGksLIYQQQgghhBD1LP0Q7Jynrp//aLWLusvF6nvUfFlNGhB64403mDRpEhMnTqRz5858+OGH+Pr68sknn1S6fL9+/Xj11Ve5/vrr8fau/4ZKDaqBMoSKHSVj56ubMgZnnDSmaRrh/tJHSAghhBBCCCGEqDf7fobFT8D/7oV5t5RkB10IzXtV+7DVJQGhIQ0YEKpVU+n6ZLPZ2Lx5M1OnTvXcZjAYGDVqFGvXrq2357FarVitpQGO3Nzcelt37TakgUrGbDUoGYOaTRoL9OZEdpFkCAkhhBBCCCGEEGerIAPmTlDj5T00GF59dlBqXjH7kvPQNBjc5m8YEEpPT8fpdBIZGVnu9sjISPbt21dvzzN9+nSeeeaZeltfnTVAyZiu6zVrKg01mjTmzhBKy5PG0kIIIYQQQgghxFlJ3a2CQb5hMPAu8A6CyC4Q07vah605lAFAl+aBhPg13OCuJm8q3dCmTp1KTk6O5yspKalpNqQBMoSsJeVicIax81AmQ+gw6Hqli0QESsmYEEIIIYQQQghRL1JLkl1a9INhD8OAOyF+yBkftqoRysWgCTOEwsLCMBqNpKSklLs9JSWlXhtGe3t7/zn6DTXA2Pmy08DOmCEU0grQwJYH+akQEFlhEfekMSkZE0IIIYQQQgghzlLqHnUZ0bHGD9F13dM/qCEbSkMTZgiZzWb69OnD0qVLPbe5XC6WLl3KoEGDmmqzGo7NnSEUWG+rdJeLeRk1vIxn+FWavKFZS3U9bW+li4QHSIaQEEIIIYQQQghRL9JKMoQiOtf4IQlpBZzKKcZsMtAvPqSBNkxp0pKxKVOmMHPmTGbPns3evXu5++67KSgoYOLEiQDcfPPN5ZpO22w2tm3bxrZt27DZbJw4cYJt27Zx6FDVjZL/NNwlY/XYQ6iopiPn3WL6qMukjZXeHeEJCEkPISGEEEIIIYQQos50HVJLkjHCa54htOpgGgD94pvV/Fi/jpqsZAzguuuuIy0tjaeeeork5GR69uzJL7/84mk0fezYMQyG0pjVyZMn6dWrdDTba6+9xmuvvcbw4cNZvnx5Y29+7TRIyZjqIVTjP5IW/WHXd5C0vtK7pWRMCCGEEEIIIYSoB3nJUJwNmgHC2tf4YUv2qLY6w9uHN9CGlWrSgBDAvffey7333lvpfacHeeLj49GraIj8p9cATaXdJWNnHDnvFttfXR7fAC4XGMoniLlLxtLzbThdOkaDVm/bKoQQQgghhBBCnDPcrVpCWoOXpUYPySywsf5IJgAXdoluqC3z+NtPGftTcDrAUaSum+svIFRc24BQVDcw+UBxDmQcrHB3mL8ZTQOnSyezwFZv2ymEEEIIIYQQQpxT3OViEZ1q/JDf9qTgdOl0jg4kLtS3gTaslASEGoMtv/R6PZaMeXoImWsYEDJ6lekjVLFszGQ0EOpnBqSPkBBCCCGEEEIIUWee/kE1Dwj9sjsZgAu71t/k9epIQKgxuMvFjGY17aueFDtKAkKmWvwa3WVjVfQRCvNX2yd9hIQQQgghhBBCiDqqZYZQXrGdVQfVuPmLJCD0N+LOEKrHCWNQmiHkU9MMIYDYAeoyaUOld0cEqtpGGT0vhBBCCCGEEELUga5D2n51vYYBoWX707A5XbQO96NtRP3GDqoiAaHG0AANpaEOPYQAWvRTl+kHoDCzwt3u0fOSISSEEEIIIYQQQtRBznGw5YHBC0La1Oghv+w6BajsIE1rnAFPEhBqDA0UEKr1lDEAv1AIbaeuH99Y4e5wCQgJIYQQQgghhBB15y4XC20LJvMZFy+2O1m2Lw1onOlibhIQagwNVDJWbHcB4F2bgBCUKRur2EeoebAPALtP5pzVtgkhhBBCCCGEEOektNr1D/rjQBpFdicxwT50jQlswA0rTwJCjeHPlCEEZRpLV+wjNLpTJAYNNiZmcTSj4Gw3UQghhBBCCCGEOLfUsqH0D9tOADC2S+OVi4EEhBqHtSRDqB5HzkPZptK1/DW6M4RObAanvdxdUUEWhrQNA+C7LSfOehsbTEYCbP0c/ncvzBoNv79QGngDKMiAVW/Bru+abBOFEEKICnQdVr0Jr7aD9R819dYIIYQQoiHUIiB0NKOAX3apcfPX9G3RkFtVgalRn+1cZWuYDCGro44ZQmHtwRIExTmQsgua9yp399V9WrDyYDrztxzngZHtMBgaL0J5Rk4H/PIYbJxZ/vbjG2DzpzD8EchKhE3/BXtJhpPRGzpd2thbKoQQQpTndMDCKbBltvp+0SOqrHzov5t2u8S5R9ehEc9ACyHEOcXlKp0wFn7mgNDMlYdx6XB+h3A6RTdeuRhIhlDjcGeumOu5ZKwkQ8hS24CQwQCxA9X1Q0sr3D2mcxQB3iaOZxWxIbHiJLImYyuAuRNKgkGa+hmG3A+XvA4hraEgFX5+CNa+p4JBfhHqcd/fBWkHmnTThRBCnOOsefDV9SoYpBmgw8Xq9qXPwrIX1QG6EI3hyB/wRid4pxeseQ+Kspp6i4QQ4u8l6wg4ilRiQkirahdNz7cyb9NxAP45rGbTyOqTBIQaQ0OVjNnrGBAC6FiyI7r3xwp3+ZiNXNJddTb/dvPxOm9fvcpPg08vhQOLwGSB6+bA7Yth9LPQ7w64Zz1c+JIKAsUOhH/Mgyl7oOUQsOWRNutqbv9oKcv2paLLTrcQQojGNv+fcOhXMPnAdV/ADV/BqKfVfSteViczhGhIuq7K6T+7AvJOQeZhWPIEvNEZfn5EZY4LIYQ4e9u/Upct+oGh+mP1z9YkYnW46BEbzMDWIY2wceVJQKgxNFhTaTVlrNYlYwAdL1VnKE9tVyVWp7m6j6pd/HnnKQqsjrPZzLNnzYfPr4STW8AnBG7+ETpdVn4ZkxkG3g0PH1SBovZjwOgF13yK1SeScOtRrjv+ApM+XcvVH65lbUJG0/wsQgghzj0HFsP+hWAwwS0/khk7ig+WJ/AJ49jVWZWL6X+8VnoCSYj6Zi+Gb26C36aB7oIe/4BL34KILmAvhA0fwfuD4NBvTb2lQjQ9XYffnlEno3d+q8p9hagpexFs/Fhd7z+p2kULrA5mrz0KwF3DWjdqM2k3CQg1hgYbO+9uKl2HgJBfGMSfp67vqZgl1KdlM+JDfSm0OT0NrpqEywnf3QHJO8EvHG5fAnEDavxw3S+c//N+FKtuYoxxM194T+fw0aPcMHMdT/1vl+c1FEIIIRqEvRgWPaquD7wbZ0w/7vp8My//so9nF+zh8i29OOKKRCvOxrnl86bdVvH39cersPcnMJrh0jdh3PvQdyLcvRpu+h6axUPuCfj8KvjxPjkAFue2jbNg1RuQuBK+ux3e66P6k0qVgaiJ7V9DUSYEx6kkjGp8symJnCI7rcL8GNMlqpE2sDwJCDWGBsoQKvaUjNXx19j5CnW5538V7tI0jfG9VZbQ7LWJOF11fAM82zfOJU+Wlold/xWEtavVw5ftT2VechT3uabgMvszQNvL0sBn6KAd47O1Rxk3YzWHUvPOvCIhhBCiLta+p3oJ+EfB8Ed5f9khNhzJxM9s5NLu0QxuG8FnqB3G3GVvy4G4qH+p+2D12+r6VbOg722lDaU1DdpcAHevgQF3qdu2zFaT8IQ4Fx1bD79MVdc7XqqqE7ISYcEDsO6Dptmm7CQ4shIOLIHdP0hv1D8zXS/9OxlwFxirnuGVV2zn/eUJANwxtBXGJhrkJAGhxtBQJWN1bSrt1vEyQIMTm9QbzWmu6xdLgLeJHcdz+HBFQu3Xv+Y9eK0dbJ9bt+3b9Amsm6Guj/sAYvvV6uG6rvPGr+oNM37QeAx3/AbN4gmxnWKh33P08U1lX3Iel7yziinfbGPFgTQcTlfdtlUIIYQ4XXYS/PGauj7meTYnO3hr6UEAnr2iK+/9ozef3zGAQeMnk6EH0Mx2krU/z27CDRZ/Oy6XOpB12VUj806XV76c2Q8uelntbwGseInvf/6ZcTNWs+nPNGBEiIaUlwLf3Kz+X7pcCdd9Dg/uKp0E+fvzlR4zNahtX8Lb3WH2pfDlNTDvFpjRT21nyu7G3RZxZoeWQvp+NUyq103VLvre74dIy7PSKsyPa/rENtIGViQBocbQQCVj7qbSdeohBBAQCXGD1PW9P1W4OzLQwrTLuwDw5q8H2HWiimaDul7xjOba91WjwoI0+HEynNxWu21LOwCLHlPXL/g/6Dq+do8Hft2Twq4TufiZjfxzeBuI6ASTlkHsQEyOAr5qPpehbUOxOlzM33KCWz7ZwMDpv7Nsf2qtn0sIUXdH0gv4Yv1R9iXnNvWmCFG/fn1KTRmJG0xuuyu4/+utOF06l/dozvjeMZ7FxvRsxYG46wHw2zSD7cfqYeqTrRASlkFR9tmvq764XLDtKzX9M/tYU2/NuWHb53BsLXj5wUWvnHnUfI8bVJ9Gl4NO6x5hT1Ia/5i1np93nmqc7RWiqTisMO9WyE+G8I5w+Xvq/8XsByP+Tw2tsRfAzw83XunY3p/gf/9Sfb+axUN0T2jeG9BUhccHg+Hb26Dg79sb9VBqPjZHE5ywL8pWSQ07vwWnveaPcycz9L4JLFWPjz+cls8nq48A8OSlnTCbmi4sIwGhxtBgJWPqn6POGUJQWjZWybQxgKt6x3BhlygcLp0H526r2HPHVqimVUyPgR/+BSe2qLrbxSWplsFx4LSqKHZNx5q6nOrNz2mFNiNh6EO1/rEKrA5PdtCtQ+IJ8TOrO3xD4KqZ4OWL+fhaPutziG/vGsSEgXE08/UiPd/K3Z9vZltSdq2fsyqncoqY9r9dfLv5OPklDbqzC2289dsBhrz0O0/9b1e9PZcQfxU5hXZeWrSPC15bzojXlvPE97uYMGs9ecW1+NAV4s8sbT/sng+A68KXefS7nRzPKqJFMx+ev7JrhcaRA659BJtmpruWwMwvPvdkAdeKy6mCQN/fDa+1hznjVKPgwyvq4Qc6Sye3wSdj4Ye71PSVeROlPK6hFaSroCTAiKkQXIMz0JrG52EPkKYH0tGQxLNBP2JzuPjXl1uYtfJww26vaDy2Qlj/H9VbavNs2L/obx1UOCOXC77/JxxbozI7rvu8/HRogwEuexsMXqqVRRXHTfUqYZkK9ugu6DkB7tsG/1wBdy5TJZ5drgQ02PUdfDjkz/E+XxlbIThsdXro278dZNQbK7jqgzVkF9ZtHbWi67B3AXx1g6py+f5O1UPqPyPUMS6ohtF7F6i2JosehQVT1HHrl9fDzAsg4Xc1uGnAP6t9qucX7sXu1Dm/QzgXdIxs+J+tGpp+js3gzs3NJSgoiJycHAIDT4vaOe1qMlV9e6G5iijftxVCWtfbartOW0y+1cHyh84nPsyvbivJOQFvdgY0mLIXAqMrLJJZYGPMm3+Qnm/l9vNa8eSlndUdDit8db36w6/MkAfgvAfgo+GQfRTaXwTXf6neVKuz5j2VXWQOgH+tg6AWtfqRDqfl8885mzmYmk+AxcTKR0YQ7Gsuv9Dqt9VOkk8ITN4MviHYHC4mfbaJFQfSCPUzM/+ewbQMrePrWsY/52xi8e4UQPV7GtQ6lPVHMiks2dnXNFj16AXEBPuc9XMJ8Vew43g293yxheNZRQCYDBo+XkbyrA7uGt6Gxy7q2MRbKEQ9+OEe2PYFdLyU6UH/x0crDuNl1Pj6zkH0adms0ofYfrgP87bZ/O7syYbBH9X8fyH3JGz9HLZ8BjllyhmMZnDaAA0GT4YLnlRTORuTvUhN61n/IaCrTBXNALY8GPEEDH+kcbfnXPLt7bDrW4jsBncur7aXBYDLpfOflYd5adE+xhg28h/zm+iagVXhN3DfsaFkEcgN/WOZdlmXszsZ+WdhL1IHed7+ENX9zNlT9cVWCEfXQMJS9b/b9zZoPbxxnlvXVdbJ4sfLv1cAWILhtsUQ0fCfwUmZhWw4ksmw9uGEB3g3+PNVS9fV67HufRXwmfAttD6/8mV/f14F0fyj4N4NYAlqmG06ugY+v1odP3a6HK7+b+X/vye3wneTIOMgoEGvCRAUq5a1BEOXK7GZg9lyLIt+8SGN16Pm1HbYt1AFtU5shmYt4aYf1GUNfb/1OA/O3e75vkvzQD6/fQDN/BroMyxlDyx6RDUSdwvrAAWpKqlBM0D8UDi+Sf1eqtPtWpWAUIVl+1OZ+N+NmAwaix8cRpvw+q0igjPEPE4jAaHUfbBvgfqjPblFpQh2vER9Ne999h8OLic8G6KuP5ygpnvVA13XafvEIpwunfWPjyQy0FL3lc0aDcc3qA+ki1+vNGDz+74Ubvt0EwBf3jGAwa2C4dtb1YeKly9c/Coc+QN2f692PgfcBRe+pF6/U9vVczitENAcfEPBJxi6Xwu9by7/ROmHVJTbUUzh2DdIaXc98aG+NR7Bt2R3Mv/+Zjt5VgcRAd58MKE3fVqGVFzQaVeBqtTd6s3zCpXel291cN1Ha9l9Mpf4UF9eHN+NQIsXft4mWob4YqjlG+mh1HxGv7kCXYf4UF8SMwo993WKDsTszKco7Sh39I/g2u7NILSNyqoS4m9I13U+X3eU5xbsxeZ0ERviw6MXdmRY+3A2Hsnk9tmbMBsN/DZlOHGhvk29uULUXXYSvNMTXA4WD/qCfy5Tnx1vXteDK3tVc5IjIwHXe/0w6E6usT/DM/+6jc7Nq9mRc7lg6TOw5h11FhnUQUDX8dD9Oojsqk6wbP5U3Rc/FCbMrzQo5HTpHM0owGQw1N//38ltMP9O1U8BoOvVMOY5SFwF8yeha0Z+G/w5PQZeQERA7fdjvtmYxNrDGdw3sh2t6npi7O9q/yJ10k4zwB2/QUyfahffeyqXx7/fydZj2QDcfX4bHrF/iLb5vwDYjL78xzqG9xxX0DYmnA9u7ENsSDV/J0VZJWfKjdDhIjDV/qDf4XSxPyWPLcey2XosC6vdRbCvF818zfSJb8aIDhE1W1FRNmydo7ZJd6l9wJPb1L6vsyTrIDhOZc23Ha1aDPiFq3344xvhwC+QcQjiBqo+TKFtav2zAOp9YdkLsGu+2icuq9u1MPYF8K/hz1QXRVkqSJiwVH0fFAuthqv2Dql7VIAoOA7uWNqg22FzuBj71h8cSS/AaNAY3j6cq/u0YGyXqMYLWBRlqZPiRVlweBmsfF3dPn4WdL+m6sfZi1SZVuZh6DcJLnmt/rft4K8w9yZVbtzmArjh6+r/f2wFKqDlfp8vQ/fy41fLGJ5OG0H/nt1587qeDT/WfO8CmDsBOC3EEBgDN/8IYW3L3ZxTaGf+1uPsOZnLoDahjO4cyZ6Tudz08QZsThdX9W7BigOppOfb6BQdyBd3DCit/KgPLif8Nk21O9GdapBR/ztV+WxkZ8hPg18eU8F1z8/SAtqPVcezBi+VUOIbqv5v/CMhukeVSSYpucVc9cEajmcVMWloK564pHP9/SxlSECoGp4XJ3EngauehYOLq164RT81mjOqW92fsDgHXio5wH8iBbzOInBThs3hov3/LQJg+7QxBPmcRWbTru9USiJAl/Fw5YcV33hsBSz5+CmKTu4h2MvBkAgrpuTt6uzjP76BNiPQdZ1X569m89bNeLUcwIhOkfRp2YyE1Hz0bV8wLullTJyWAj/wHhjzggpCpR2A726D5J0c8OvLZTn/xurQaRfhz9V9WnBlrxgiqgl8/bonhTvnbELXoV98M2b8o3e1y3NsPXwyRl0f8YQ6e+rlQ2puMVe+v4YT2UXlFu/RIoj/Tuxf7k1I13XsTr3Kus9Hvt3ON5uOM7pzJP+5qQ/bj+ew6mAaXWKCON/nMI7PrsbLUWbKmcEE/e6A4Y+q8rZqHEkv4LvNxxnfO4bWDRBZFqK+zV6TyLQfVQPEMZ0jefWaHp73Ll3XufmTDaw8mM5FXaP4YEL1By+NLvekanRvNKtph2HtIbRd42dbiL+GRY/C+g/JihxI36T7cbp0HhzVnvtH1WBS5o+TYctnrHd15MWI15l/z5DKD5IcVvjhbvUZDtByCPS5VZ1NPn1fY+8C1bfHlqdOglz+HlmFdlYnpLPqYDrbj+eQkFbap+GmgS154pJOdc8C0XVY8y4sfVY1Z/WPVCde2o1Wd7tcnJh1Ay1O/kKCK5oJxleZdlVfLuxaMUu5Mnani2d+2s3n61QfIh8vI09d1pnr+8U2/MHOX0FxDswYCHknYfB9KghXhdS8Yj5acZjZaxJxuHT8vU08cmEHbhrYEg3gwGIVxEjeAcBKenFr8RT8LN785+a+DIzxhrUzVN8Vk0Xtx5zcqjIc9JJ9Pr9wddKx720QULOxyluOZfHPOZtJy7NWucy0yzozcUir6ld0eLnK1ss9Uendun8UWnGOOvguyxIEaFCcXfFB4Z1g9DPqgLCMfKuD7UnZbEvKZsfxbOxOnXB/b2J87FyeP5f4g7PBUawWDmwBbS9Qz7HlM0AH7yAYN0P1cGoI825VJ26NZhhyP5w3BcwlQb2CDPSPR6FlHibRuyMvRb7GQ5f2om1EJfuXLiccW6cyKTSD6pFqCVTBtIAzl778548EXvx5H15GDbuz9DC0e4sgXryyG11jGijrBtR704aZKoDiOq1EffRzMOS+M6/j8HLVLgMNbv8VvUVfsgrtHM8q5GR2MS2a+dA5OrDWJ5EB9X4+/05wOaDdWLh2Nnj5cCg1j8NpBVgdLqwOFzlFdrIKbGQU2LA7Xfh7m+hasI4exRtpHeqNUXeqoGfKTgAcuoHVrq749xpPn7E3gV8YxXYn3205Tt+WIXSIqllbk2K7k0Op+XRpHlj5e23uSRUwK8qC1iPUyYnILqqMOX0/+EXAzf+DyM5sPZbFnLVHWbjzFNYyPYLMJgNeBo0Cm5OLukYx4x+9SUjL58b/rCGucBcBPmaGDBzENUN7nN3xr9vvL8Afr6jrHS+FsS9Wnsl0eIV6b2s1tM5JI+n5Vq77aC0JaQXEhfiy4L7zCLQ0QHUSEhCqlufFeSKcQJNVRfXaXAAdL1Znzk5sURlDB5eAvVCd2Rh0D/SZqN70QH2wedfwANxdkmUwwZPp9ZaOmltsp/vTSwDY//yFeJvOMnV3+1xV/+iyQ8vz4LK3ILSt2t6EZfDTfRUbQGpGuPYz6KTG5b60aF+108jCySbeK5O3Lo8nJm+7SrkE6HQZrpB2sOZdDLqdHN2Xi63TOUE4RoPmGXlvMmhMGdOeu4a1qfAmeyS9gMvfXUWe1cE1fVrw4vhueBlr0CLr50dgw0fqemALGDUNul7NofRCnvlpNyeyiyi0OskssGFzumgX4c/ndwwgMtDCigNpPPPjbg6nFxDm701MsIXOzQN5eGxHQvzMnMopYtgry7A7debfM5jecWVKBHKOq3rUglRy8SXb5Ud4kD8+eaq5GD7NYNTTauf+NLqu882mJJ7+cQ9FdifBvl7MntifHrHBZ/55hWgiNoeLYa8sIzm3mHtHtOXfo9uhFaarMyoG9f61PzmPi97+A5euMiky8m2sOJBGYkYBRTYXVruTZn5mnrq0M6M6n2W9dX4q7P9ZTehI2QM5x9QBtsOqdpa7jldniJq1gs2fqJIX62lNr43eEN2dvLAeZLe8iMAOwwn0MckB6bmuIB3e7AqOIu4xPMnPhZ0Y3zuG16/pUbO/jZwT6O/0QnNaucX2KCMuuYFbTz/oLcpWZ2ATV6r9iyvehx7XVb/eg7+if3ktmu5idsAknk4fUaEvqsXL4OlP2DEqgHdv6EW7yFr2P3Q5Vdr9xlnq+06XwaVvg18ooEpFnvhhF9sPHGGJ9yNEatn84ezGvfbJjOnTkScv7VztTn52oY1/fbmF1Ycy0DToEBnAvmR1YmV050heGt+NUP8mLkGpJadLZ19yLh2jAusnQ+KnB2Dzf1WbgrtWlx70l3Eyu4iPViTw9cYkz8HYRV2jmHZZF6KCTgso6rpqYPv9XeAoYoHP5dybdT1t/GwsjngH06ktlW9HeCcVnMo7qb43WeCSN6DXjdVu/uajWdzyyQbyrQ4CvE30jAumd1wzgn29yC60cyg1n4UlTa5fuao71/Yr0xvJVgB5yZCforZ5/Yfq9pDW0G4se5LzWJWQxRE9irWuzhwjighvF4PZyijW0dtwmEhXCpo7u8GnWWnW0OHlcHS1OlgHFVAZ8QQYTSzbn8rkL7d6ekV6XgKymGd+lniDah1Ay/PU/l2LvqXHBCc2qz4kp7YBGicGTuPZtKG0jwzgmj6x9ZOxt/Nb1QtFM8LtS9TzlyiwOpi9NpE/1q7j/eJHCdHyWensynxG0H/wSK69YADGjAMqiyhpvaqqKEir+Bw+IXDVLGg7ssrNSMuzMuK15eRbHbxydXd6xzVj/pbjzFl3lLxiBwYNJg5pxUNjOuBjrueyRKddNYQuyXpTFQshKsuj61WquqGmn9/fq15ouUEdGJ33DCmF5ZseB/l40b9VCDf0j62+P4y9CE7tUH9XR9fAod8AHbpdoyb+Gb34YesJHvxmW437WLeP9Gf6+G4kZRTy3bdzuMv4E0OMpdPIdM1AQa9JTDh6CdtO5BPmb2b5wyPw966+pDTf6uAfM9ex43gOV/dpwUvju2Eqe6zlcqpAWeJKlSFz+2+lJ80K0uGzcZCyE5s5mKmB0/nueGngr2NUAEPahrF8fyoJaaocq0dsMHPvHIglOwG2fYFj29eYCpI9j8nSA8iKHEjra16E8PY1e3FOl/A7zBkP6OqkRa8JdVtPDWQX2rhh5nr2nsolOsjCN/8cVH2W5VmSgFA1PC/OYwEEth+qMoAq+yPKPanSw/b8r+J9Bi9oNay0tKy6sx2p++D9AeoD5dHEevs5UnOL6f/iUgwaJLx4cf0cgBxerlIU3Qc9/pHqDPjRVer7oFiOt/sHH65No1A3c8XFlzL8vKEAfLA8gZd/2QfAExd3QtNg+f40dp7IoW2EP73jgtl+PIcNRzLpEBnA/+4dgmXf9+g/3I3mTtcFfnP24jnnrXTr2p2JQ+JpFxnAwh2n+GZTkieNeUSHcF6/tqcnU6fQ5uDKGWvYn5JH35bN+OrOgTULBoFKt989H36dBrnH1W3N4tVZrJ4TPDuwh1LzmTBrPcm5xbQM9aVzdCCLdiVXuspWYX7Mntifz9YmMmvVEfq3CuGbfw4qXcBeBJ9cqD74I7vybMQbfLIxnSt6Nuftftnwy1RI26uWHT8Tvds1pOfbSMuzkpZv5esNxzzPHeBtIs/qwM9sZOYtfRncpn5KEhuEy6V6SVmC1P9Dyd+sruvkFjvw9zY1XqpwE3M4XRgNWv0HDvJTVSDbZAEvH/AOrLBz437Lb+ygxXebj/P6vKXc67uE61tkYkjdrQ4UfMOg/YWqpKDtSJ5YcIgv1p95+tDNg1ry+MV1zGDIS1GlqZXt0J4uuKX6uwVVchHeCVfaPpyp+/Gyl2b3uXSNaY5b+Ea7kPaRAdw0sCVX9GruCdan5BZzLLOQPnHN6nbWsCqHl6vGoN2vgc7jGq8HRiM6nJaPzemiY1T1OzR/GiU9Jk76dmRw5pO0Cffn5/uH1u7EzeInYO177Ha15FpeYsF9w1VJlK6rM/y/PqXKO8wBcN1n6sTWaQqsDn7bm0KRzYmX0YDN6SJv+TvcWTgTl67xgP0eDoWNYlD7KAa0UmeIY5v5svJQOv/+Zhvp+TbMJgMDWoXQLz6EvvHNaBvuT3iAd9XvH/ZimH9HyeRSDS6c7jnIcjhd/Hd1Im/8eoAiuxOzycArvbO5Ys+DaI4iEvVIJtn+TbI5nhsHtmTikPgK5fDbk7KZ/NVWjmUW4ms28vb1vRjZMYKPVx3h1cX7sTldhAd48+rV3Tm/puVETajY7mTe5uMsWr6K8/J/ISY6mivGjIGorjXOpCnH5VR9q36crL6/9WeIH1JhseX7U7n78y2eabW94oK5f2S7M79me/6nhoQAc8zXM7B4Je0MJ9B9mqH1u0MFShw2VXbUfiyEtFIH4fsWqP6QJ1TrAfpMVCPuKymDKRsMGtg6hE9u7YevufyBqq7rvLBwL7NWHcGgwTs39OLS7s1VRtqqN0vLJ9363gZjnudwjs7F76yk2O6iQ2QAJ3OKyCuu2Nj86u6hvDYyUGUNRfUo37ulKAuWTS89mRg/lCUdn+OeH0/hcOlEB1no3bIZPVsEE2IsYtiaWwkvOMBxPYw5wf/isfsfQKusl6bTgWPhw5i2fALAh45LedlxPToGBrYOYVSnSDpHB9IpOrD2PVTykmHGAJXtNPwx1WAcdaLmqw3HePf3g6Tnq33x4ZZDfKw9h0k/w4AHS7DK+PPyVUG45B2QfgDQYMTjaiBMJT/nY9/t4OuNSXSLCeJ//xri+TxMzS3m2QV7WLBDBfrG947hjWt71u7nrIy9WPXXSd2nSqqOrlLbOPpZVRmgaei6TmJGIesOZ7DxSCbhAd7cM6Jt9dknBelY3+6Dty2b6fYb+Mh5GREB3kQFWTicVlAuMPjClV25cUBJxklxrvo/OrICkneq16zC3+vtcPFrYDBwKDWPy95dTZHdSYfIAJr5eeFtMhJgMRHqZybEzxuTUaPI5iTf6uCn7SfJKLChaepEut2pc8/5bfh3HyPffPYeXXOW082QCMBqZxfutU8mi0AeGNWOB0ZVHVSxOVzcPnsjKw+me24b2TGC9/7RuzRwt+pN+O1p9Tfxz5UVSsMKc9I5+d5FtLUfIF0PZILjKbr06M9Ng1rSo0UQWsnvYl9yHluOZXF5i2ICVr8Ie37wrEO3BFOEBd/i0mMwXTOi9b4Jzp9au/fNvGT4YAgUprOu2WVs7v40k4a2rtG0r593nmLmysN0iAygd1wzOkUHciK7kD2n8jieWcjF3aLLnbjMKrBx6383sP14DmH+3nzzz4ENXt0hAaFqeF6cP/5D4Hl3nHnn+cBiteOV7W68pqsDLjeDSdXED7lf1Rme7vgmmDUSguLgwZ319nMczShg+KvL8TMb2f3shfW2XlJ2q4DEsXXl65v7TVLZM94BvLp4HzOWqUygMH8zLZr5eqZyPXFxJyYNq7xxdlqelYveXkl6vpUbB8RxVZ8WfDH3Sx7Pn04xZt4w3k7zAVdx48D4CmendF1n7sYkpv24G6vDRVSghYu6RdEm3J9VB9P5ZXcy4QHeLJx8XvVlYlWxF6mU5zXvqINUUGf/B96tPthM3iRlFnLjrPUcy1S/f6NBY+LgeG4f2orMAhtHMwp5YeFeTmQXEepnpsjupNDm5L8T+5XWubtcMH+SqkP1DYVJy9hREMTl763GbDKw8fFRBHlrJbWs7+E0+fGvgDf55VT5Nw0vo8ZDYzrwjwFx3PX5ZlYfysBsMnDbkFb0bdmMnnHBhNXHGVJdV6+NyVJlM3CXS+ftpQdZvj+VQB9V2x8Z6E2/+BAGhhURePQ39cGXuBqKMgFwmnxJ0cJYTl/eKb6QZLs/EQHe3DSwJf8YEPeXO7tbUy6XzgcrEnjv90NYvAx0bh5I5+hAurcIpk/LZjSva2Nxl1P1EVn9drmbdf8oCqP7c8DSgz/0XqzN9GXvqTyKbE7CA7yJCPSmY1QgD4/tUL/12KfRdZ1HX5vBo/kvEarlVb1gUBy5I19m9AIzWYV2BrQKYXj7cHrFBePjZcLby8BX648xa5XKpGsf6c+HE/rU7kNV1+GLa+DQryrY0+kyiOzKPns4X21NZ9WRXFpo6dxkXMIFhm0YNJ1izYffY/7J/tjr2HI8j/VHMrE5nMRryfTUEhhj2szFhvUAvOsYx+uOawCNMH9vhrULY9vxbA6XnPEa1SmSt67vecYzcTVyYjN8emnpZ1LzXjDqaayx57HmcCbrDmfQPiKAS7pHN0gD2IS0fN5ZepC4EF/uG9mu5oH4yuSeUmdG85LVQYs1F704h1PJKaSnp5Kl+5MdN5oLxt1KQLgqwz6YksepnGIGtg5t0nGt5exfpKZnOYq4x/4APzv7M+f2/gxtF1679RRmor/dA82ay5P2W7GFdeH5URF4bfxITcEBtV9x/RcQ3b3cQ9PyrMxek8icdUfJKTr9oE7nFe9PuFYr6SFi8oHYfipDus1IaN4TDEZS84r59zfby+38u3mbDLRo5sOl3Ztz57DW+Ln/ljMSVAlb0nqVZXflRyrTDtiXnMvD83aw84T6fB3YOoQXr+ym/ndP7YCvb4ScYxTiw2TbPSx19cHLqDGmSxQXdolieIdwvlp/jFcX78fh0mnRzIeZN/elU3TpTu6ek7nc//VWDqbmA3Dr4Hgeu6jjn6b5sa7rvPTLPr5cfwyz0YC/xUReYTHjbT/xkOkbLNppv6uYPjD032ogx5mGcbicKlC44uWSg3LUQeWlb1RY9KftJ5nyzTbsTp3eccE8NKYDg9qE1vwkgfugr8RJPYQ9I2czatiwM2yjC1a+BsteBHRVcnHtZxAci93pYuORTJbsSWHepiQKbE4GtQ7l41v7VggGuem6zuPf7+KrDccwGTS+6bOX3jufVXd6+anSpaAWMGgytB+Dw+ni6g/Xsi0pmyFtQ5lz2wA0DVLzrOQVO9A0SEwv4M45m3G6dN66rifjesVU/fPs/BZ+vA/sBRTrXnzlvIDDHSbx1D9GqvdCezF8Ph6OrsbpF8GFuf/HQXsYH07ow4Vdyx+0FtocLNxxiveXHeKi7K94xGsuABt9zuO27FvJ08tnEbQO82NU50hGd46kd1yz6k+m6Tp8ea2qfIjuAXcsxaobmL/lBO8vP0RSpiqViwvxZfIFbbmsR3MsKdvQt39F5sH1+GftxVuzk6UHUNisA9Ed+mJoP1a9Z5TtkWIvVpmBW2ar72MHwrCHoO0oz7HWrhM5XPbeKnQdvrt7UKU9PpfsTubOOZsBWDD5vLqXj1nzVGbv5v+WZnQBmP1xjZ/FAmsPthzN4kBKHvuT88goKD/BKjrIwqtX9+C8dpWfaP1283HWz3+HV70+wqZ5o9+1Gu9IVRLscLrYeSKHrzYc45tNxzHg4oOBOYx1LEPftwDNXTbo5hsGLQepst/48zytSgptDsbNWM2BlHyGtA3ls9sGnPHEaXahjRd/3ss3m9RJ7gs6RjDz5r4YDRqncoq46O2VDChew+teH+CvFZNnieb6nMkc8WrDiodHEB7gjdOl88av+zmWWcSwdmEM7xDO8wv28uP2k/iajdw3sh1v/noAq8NF77hgnrqsC930Axg/vUi91pe/p0aul+FwurhzzmY27TvM194v0llLxOkXifG2RRV7cuWcUO8znt+dpgLMPW9UlyZvXMX5vPv1/+ic8DGjjervBYNJ/V12ukxlqXkHquCUybvi8X5xjvrcSVxJoqkVY/OnYcVM5+hA3riuR7UnoDYmZvKPmevKlTtW5qEx7fnXiLacyinm5k82cCg1n2a+Xnx956Aal+idDQkIVaM2L06V0g6UNKJeoHbI3dqNUcGh9mNUBgSocqs54yCiC9yz5qy3321/ch5j3/qDMH8zm/5vdL2t18NhVT/bqe2ql1KZ1FKbw8Xdn29m6b7Ucg/514g2PDy2+skEKw+mcdPHGwD1v6nrEOmrMeXCzlzRK/aMO257T+Xyry+2cDi9fHd3k0HjqzsH0i+++r47Z2QrVPW7G2eqnx3U7278RxDVjZTcYiZ/uRUvk8YTF3eu0OwzNbeYiZ9uZPdJlWXVMSqARfcPVVlQO+bC6nfUmQqDSdXQxp+Hrutc9PZK9iXn8ewVXbi4WzQHTmYTOv9qOhTvYKcrnqvtzxDg50eYvzexIb7cd0E7urVQH5LFdif3fbWVJXtSym3Llb1UiUKtshGcdtj4sfr5C9LUB6ruUinGfmGqXLLtSBj2CHj7o+s6z/y0h0/XJHpWEU4WlxvXcKlxPb0Mh8qtXjd4oZ1Ws52vW/jUOZaZjkvIwR+zycBVvVtw38i2RAfV3+S15JxiEjMKsDtd2J0ugnzM9IoNPqtsDZvDxe/7UvEyalzQMaLaHeq8Yjv//mZ7hd9TWc2DLFzVpwUPjmpf8+2y5qkJEwdUTzG8fNHtRaXp7iUcuoGFroF85LiUPXp8uftahvry8S39aBvhT7HdySerj7Bkdwo9Y4O5rl9suYOuWtN1En56hZabX8KkuXBGdMM4+B61wxPSWgXN9y9SBzP56oyPs8tV2Ee/iCW48jM9fxxIY8o320nPtxLs68V/bupL/1Y1/N/fOAsW/lsFfO9cjj2sIw/P284P21RJg6bBgFYhHM0oxJybyPmG7fzq7MNJyu8UxgT7MKx9OMPbhzO4TQiB69+E5S8CsDd6HHdm3EBSbmnPNE0Do6bhcOl0jApg1i19adHsLFKFM4/Ax6PV/2lkV8hKBJs6EM7R/djjaskevSVznKPI823JDf3juKxHc9qE+5VP8a6DAquDd38/xMerDnt2iAa0CmHGjb1rF4jOToKd36jyg7KfpWdwJLAvj+n3sT5NHSjGh/ry6IUdubBrFJqmYXe6SM4pJirIcnZBqsroetUnkjbMVAdEuout3v24Kud+xnRpzoc31bEf1opXVO+W05l84LwHYfBkdC8fthzLZkVJmv3h9AISUlVGFajXpk24P3aXjsul0ysumIkDYwj540nY/YMnSO/hE6IO4Lpdg95mBPvTithwJJMNRzLZeiybUzlFuMq8tUQEePPwqFZcVTwfwx+vqhNJ3oEqUNVKBQiW7U/lX19sodDmJNBi4olLOnFt39N6/RRkwLxbIHElLs3Em0GP8m5yF8/d7v0FgEu6RfPi+G4Vz97np2FL3sPrOy18tF4FsvrHh/DxrX0JaKAeDam5xaTlq4BCTpGd/cl5bD2Wxc4TOcQ08+XVq7vTvqTk7pVf9vH+8tKy+jgthbe9Zng+J48F9mZ7lpnOxiRaa6fQ3FkD4Z1UBmBIa1XCGtKqdLKRvQi2fQlr31NNbkHtfw66V/UOOq3H2Zfrj/HEDzvRdbisR3Nev6ZH7YOpuq4ykLbOIcunJZdk/Zs8SxSLHxhWs5MaB39D/+52tOJsCk3BvBcylc9TW5FbJlPnTMEgN6dL5+Fvt3Nq2xI+83oJL82JfsGTaMMeqrDsjGWHeHXxfgIspmq39e3fDvLmbwfw9zax6P6hxIb4kppXTFJmIc2DfYgMsODSdZbsSWHJihXcmPo6/QwqCKcbvdGiu6uTaIUZqsTKOxAm/swbO8y88/shWof5sfjBYXgZDSSmFzBz5WF+3HaSvJKMkqhACx/3OkSXjU+Ay44juDXftZ3Osqxw9pzK9ZyUdGseZOHhCztwRY+Yyvcb3JN7jd447ljGF0f8+HBFAqdyVFAiPMCb+0a247q+sZX+LZzMyOHF+etZkKCmFfaMDea1a3pU3lsIYOsXsHBKaa+kyG7YB03mR/sA3ll+hKMZhVzeoznv3NCr8scDD3y9lR+2nWRQ61C+nDSg9hnNB39VZZPuzH9LkPo/iuyM3v+fPLXGwZx1R8s9xGw00CsumL7xzVi445RnCMxVvVswuE0oHaIC8Pc2sTohnT8OpLFkTwq6rrM09DXaFGxVgYe+t6kEAf8IKMpCTz/I+p8/pdXJhURq2Z7nOuRqzgLXQA4Y29Om2yAuHtyLmBBf/M0mDAYNl0unwObg6R/38N2W44QHePPzfUNrNY1t/eEMNh3N4pbB8eVOQC3bl8oDc7cxoXUh/858BkPWYWx48aT9Vsx9b+Gpy7sw5Zvt/LT9ZIV1mgwaH9/aj+Htw9mUqAaB5BTZiSCLhZb/I5wssltdQvDNX5T7nFTB2518tSEJb5OBuTd1oOfSCWqwj1+EKo9rc4Gqylj/EeycVxrEaztalVhGda2wPck5xZz/2jK6Ofbwn+Y/0Sxja+UvhtGsgqEt+qv3z0NLVWN1p41izcLFxc+T7h2H0aCRVWjHy6gxokMERXYnuUV2z/9I9xbBJGUWcsWM1WQW2BjRIZwOUYFsOZrF/pQ8T+8op64zf4vqWXZp92g2H83iVE4x0UEWPrutf+3LsOtIAkLVqJeAUFknNsOqt0rSo0teSs2oGk4Nf0ztrH9zE8QOUDW79WRbUjbjZqwmJtiH1Y9VTBVvDHnFdo6kF3A4rQAvo4GLu0XV6E375V/28UHJTtH43jH83yWda5WdUGB1sHDnKQ6m5JGQVsCJrCJuP69V+Rrys6Xr6iDlp/uhMF2VCY54XL3RG6oPWuXn5fDR51+gndjCde01YrQMlUrrLk/xDlJT2cr0e/hk1RGeXbCn3E5vFBn87D2VEC2fwl6T8L381SoPRBxOF//bdpL1RzLYeiybQ2n56Dr8e3R7Jo+soomp06G2yWBS6dAnt5aUq+078+sT2AIufpXXjrbhvWWHAJ23BlnpkzqPmJO/YtDVG7lL19iod+APZ3fWujqzQ2+NERfNDRnc0baAK/Ln4p+p6pqLLJE8avk/fkxWZXreJgO3Do7n7vPbEOxbt+yVYruTxbuTmbfpOKsT0ivUX8c38+Kh+KP0808jLaw/h7w6kGdzMbRtGPFBRshJotg/lk/Xn2DO2qP4mo30b6VKJ/aeyuO7zcc9Z5VGdYrghSu7EekDpOyCE5uxpx0kO6+AnPxC9qYW8UVBP7ZoXXj68q50iwliz6kcdp3IZWtSFntP5Xn6ZV3RszmvXdPjzAez6QdV+n7qHnSjNwmDX+H5Y51Zvj8VH6x0144w2LSPUZY9dLGX1o/bInuSF9CGNHMLPjvkzdLcOAotEfxrRFu+XH+swg5n15hAmvmayS2yk1fsoH+rEKaMbl8hG8/udHE0o4BDqQU4XTrD40z4L3nIk+67PeRCetz9qSpnO501H5ZPV2NfdZfKoLv8PdXfrRJpeVYmfbaJbUnZmI0GXr2mO1f0rOZsLqhg/kfDVBnA2Ok4+t/F/XO3sXDHKUwGjav7tOCfw9t4phWdyC5iR1I2ybnFpOVZySyw0TbCn/M7hNMm3L/i+92mT1SwSXfhih3Iki4vsyPbQs/YYAa0CuVwej6TPttMer6VMH8zL1zZjTGdI2u3s2svUj3IvrpeTb2J6g4Tf+bQyXQ2z3mccc5f8S6TaZBOMBcWTycddQBpNhnoEBnA4Lah3HFe6xrtYBbaHHy7+Tj7k/NIzChgz8lcsgrVcwxpG8r2pBzyrQ6igyzcN7IdqblWEjMK1P/wkPjyZ9pcTtj7I2yZo2r3Sz43dTQKwnuSZIrnlNXMwVwjx4vMFGi+XNi7PR1MpyjY9j2dXerAa4+rJROcT+I0B3myYDpFB+J0uTiSXoDdqeNrNtKnZTMGtg4lLsSXAIuJAIsXEQHeNA/2KX+m1eVU79MJy1QZXupe9V5vNKtLW4H6G3UUqWmkLQdD3CCcmonMlGPYjm4i5pgqMT/QfBwXHx6P0WTmtynD694jwJoPn16CPe0QJ2x+ZBJAVPu+GIY/TKoWxoYjmczdmOTJiCmrZ2wwdw1vw+jOkVWfUXa5VDbJ0dXqZz68vHyfLL9wtaPe7w7PGVy708Wp7GK2HMvioyVb6Zv7K7caF9PGoMo8EgL7c3jAs3Tr2ouoIAtfbzjGEz/swunSGdI2lDev61n1NDGnQ2UY7fwGNCPHzn+bLwv6sn/XRjrmrCbcmEffjq3o1qYlmrc/oKnPxJzjagrU8U2ADpqRnNCezElrw6+2rhhievHf2waW+xxJzinm552nWLInmQCLF5MvaEv3FsGen/HnnafYdSKHlqF+dIgKIC7El8wCGym5xSRlFbE5MZONiVme4RNm7IwzrsKMgzWuLhzWowENX7ORl67qTkpOMS/8rErBn76sM8OaZdDip+sxF6WhewegjXkBR48JXPXROrYnZXNxKyMzWq9H2zSrYu8ywGlpxlE9imDrSUJQWVcFhkCye/6TmDH3qQa/Zew6kcMri/fzxwG1H3LjgDievaJr3cu0XU44uhpHZHeu+u9utidlYzRotAn3o1N0ILquMtkTMwoxGlQQoVdsMD5mI8v2p3LyyD7eMb5JN0MiTl3jdce1fON9FSM6RTGqcyQjO0bUOHDtSk/A+sH5+Dhz+cE5mEXtniXQx8yB1HySMgvRAJNRIz3fpjIfru3B+N5VT/pzOF1c/591bDqaRWyIDxpauc9Ei5cBHy+j5z3QZICXe2cxPvdztGNry6/M6A03zYf488i3Ohj+yjIyCmw8PLYD6flW5qw9iqPkcz8uxJfr+sVy06CWqsns8U3wzS0qqGHygcvfhe7XkFNkZ9XBdH7dk8zv+1I9gbSescE8eWln+rQs069yyxz48V4Aii54jjsPDvBk/UUGevPPYW24oX/cGXv16LrOvE3HeW7BHvKsDrxNBh4e24GJQ1pV+BtyOF0cOLgf700fEndkLl5O9T+S5ArnP85L+N0ymnmTR1YbPDyeVcgFr6/A5nDxya19K+3Bs+VoJs6UvfTKW4Zp74/qvcxgUl/uBuHBLeGyt9UI+ZLP2deX7Ofd3w+haap5freYIDpEBdA+MsBzUrrQ5mD6z/sqBI1Od+vgeKYN9Uebd6uaVA0qGGiyVGhGnqn786NzMPOdQ8kI7IKXyVBu8jCoTfT1MlJod3r2Vw0afDlpIANbh1a7LbWh67ra7yjKgvn/9AxY+t45lJ9iH+L3wwV4GTWu7xfH1qQsdp3IRdPgzWvLZ80dSs3jncW7uD1hMj04yH5XC653Pcfj4/pxTV91TGZ3unjrtwPMWJaAQYMPJvRhbJcoNbnr00tKJ1CeruUQOP8xz4mFqry2eD/vLTtEqzA/ltwcg9eBBbDnR/V5XjYzrBKnzC15NP96Nhh78cUdA4gN8eXx+bv4bW/lJ27H9WzOnlO5HEjJp2tMIN/8c1CVQes5647y9I+7Pfv1bcL9mHP7gLpXAtSBBISqUe8BIbf0Q7DjaxVESN1TentEFxUBbTNSfSjUk7UJGdwwcx1tI/z5bcrweltvY3A4XczdlETrMH8Gtam/N7gGkZ+mgkL7F6rvYwfClR+oM3VuDpuqiz+8Ao78oUaUnj65ACCgOQz6F/S5BbzLR4czC2yc/+oyckvSlls086F3XDMebZ1I859vLX18u9Hqg83d2Nw7sOR6QLlg0bxNSTz37VraGk7w0jAL7QMdKvvH5VTNHU9tV+WBp6etAlkE8pr9ata5OpGr+1KEN75YaetXyPi4QsYm/4eAIhX5PuqKwFuzE2oq9nzoq9dpAHS7htSYMaxONbLmUAZrEjI4kV3E4Dah/N8lJdlV7sDbr09BZgK62Z8DQ9/lyd3RbEhUZ67NRgM9YoPo3yqEga1DGdQ69Iw7irqu8+P2kzz7055yacCtwvzwNhloqSUzIPMnLmMF4VqO5/5TegjrXJ1opZ2ii+EYXjg4SQTv2C/nO+cwdGCgYS/DDdtJ14OY6zwfr4AwsgtttHId4zHveQzXtqrpDlUoCO+F38hHVN+cMmUABVYHC3ac5Invd+Fw6YzqpGqzK82asxVS/PsreK1/F6PuIFUPZpJtCtt1Va9tNGhc1TuG6/rF0TUmUPUuObVDlZTtnl+xXh2V9r/F1Z7Fzr7s9BvIhGFd2HIsi1/3pFSaFutnNjJ5ZDt6tAhm5cE0Vh5MZ++pXM/O7WDDLt7w+pAoLRO7bmS6cwITH5xObOgZRkOf2KLS8EsmY6jeDy9U2hS12O7kwbnbPD21erQIKul1EkKX5oHEBPuUnjG1FcB/L1J/+63Px/GP73hw3k5+2n4SL6PGBzf2OftG1QD7f1FlodZc8I+C6+ZAbP/SHy+7iDtmb2LvKXWQ1zM2mEfGdmBw22r6f53arsosEld5soAANTL4jt/YmGHm9k83klvsoEuEN6+eb6GjdhTD6rchfT/p4QOZ4j2NzcdyKbCV/m36eBm5eXBL7ugbSljKarRDv6rhAbEDVJZIi34k5zu4fXZp1qNbXIgv0y7rzMhOkRxKzePOOZs9ZXGnu6R7NHcNa0OEn4FmCydhPrSo9M74oeS1u4Jn98cx70D5nbcwfzPv3tDb8zlRbHfy7aLfuHz7Pwl0ZuGI6U/x9d/yn7XJ/GflYU8zZFA70a5q9m7MJgMDgnO5yHcfg9hBbM4mTNbsqh9QQ6/Zr+E95zhAO2NPhtpw992ojMXLwNguUXSLCaJ1uB9twv2JC/Gt/Vl1p119fu35nyqHKXSXi2lq1Hb/O9RnSPpBOLUNfc+PaCUHXul6IM/ZJ/A/1xC1PCpz4WRJFsL43jG8NL77mbNRXE74372w/Us1yCO4JWQdqfnP4B/lyTR0y9T92Wnuhb3TOH539WZ3ciE7TuRUOEFwWY/mdIsJZPaaoxWmjFbFaNAY6HuCZ13v0sZVevBo9Y1is96R5bnR7NJbsdsVTw7+PHJhB+7pUKgyxwsz1D7ijd+o0iZUvyx3j5sbB8QxqV8I8ce+h1Pb0bOO4Mo4jLEoo9w2HNfDmOW4mG+c51OIhWHtw7lrWGscLp3EjALWHc7g553qNTEZNCZf0I77Rrattz5yR9ILuO3TjRxJr/z/vyptm5l4wfwpA3J+BkBvfQHauPchsGaT5kg/qBpGb/sS7IWkB3VlaOpDFOlVn0C6uJuaWHSmnz0ps5CL317pydrRNJW5k5pn9RzghfqZuXFAHDcObKl6Xem66g2Ze1LtWzms6r20TDnMZ2sTeep/u8s91/D24dw5rDWDWodWzPApyFA9uRJ+V9+f/zgMf8Szv+fO6J3x+yHPe3uvuGBu6B/HFV4b8P7hDtBdZPe4k/GHLuZwRiE+XkamXtyR6/rF1nogzamcIh79bqcnsNi3ZTOGtA3z/F9vS8pmXUKG53ULIp+bjL9yq2kxYZr6DNGN3mghrdXrEhynys4MXqXBHKMJNAOrdyWQmHSc5hYbw84bhrHTpRDRiX2JSWz83/v0y1xAR0Pl74loBhhwN1zwBJhL9zlmrTzM8wtVYPb5cV2ZMLCSSVJlrDucwaKdp9iXnMf+lDzyih30jgtmWLtwzu8Q4cnUR9dVyfPyl0r7ZIHqxRrbH3rcwP8KOnMq38X5HcLpEBmArsOqQ+l8tjaRlQfTy03ZcvMzG3ns4k7cdIbtPCsuF6x5G+dvz2LExQFXDPe7HuSRCVcwoqNqd5GSW0yhzek5Weah62og0bYvcJiDeDLiXb46pAIktwxqSVSQD7PXJJKcqz4HnruiCzcNii99vK1AlTIm/A6HflfHJ50ug8H3Q4uaZdbmFds5/9XlnkBrh8gAdhzPJrfYwaD4AAa39MfPlsmBrctI2b0Sr6wENrrascA5iIN6C7yMGjNv7uvpn6brOsv2p3I0o5AgHy/8vU38siuZ+VtLpxRGBHjzv3uHnLGKYcWBNB6cu422Ef58NKFP7Xt/nSUJCFWjwQJCZWUkwOq3SsZIlug8To0OrCfL9qUy8dONdIsJ4qfJ59XbekUldF3tcCx6VI3s9fJTb1hFWVCQCmn7y/eVAnWg1nKISk0MaqG+jxtU7YjqlNxiMvJttArzK3+2ZtVbqi/A6c9RlpefSlEFtSNiKwRrTtXLu2kGT3DAhonPHaN4yzEen8BQujYPwt9iwqhprDiQ5gmsWLByn+l7JhkX4qWVCXyYLOpscv9JKjWzEkU2Z+VnooqyVEPzxJWgGdHHvsjyoHG8suSg58DZLczfmyt7RHJdO522gS71geIoVq91cDzJeTb+74ed/LZXlTTGBPtwdZ8WXN2nBbFF+9T/5p4fcWcmZBuC2epswwBtN76UD5DZdaPnZyy0ROLlLMLLXro9TqMPWu8J5OVkE3DgWwwl60zXA9nhas0BPRbN7EtooC/tvbPplv4zmrs3V3gnVfbR9apyDSuX7Uvlrs83Y3W4POU3ecV2zEYDQ+PMXO+7iZ6JHxNoVWfkf3f25HH77SQTSoDFxNguUUy+oC0tqwq8ZCfB8Q2QcRgyEyBlF3rK7tLyBEA3WdBanw8GE/bCHPJyMnEZvNDMvjiNPmzKtLA+L4wEvTmHXdGcIgQdA0acXGDey40+azjfuhyABFc0D9j/RXz383i3mhTxchxW+P05NbYa1MTDi16pdHKJy6Xz8i/7+OiPwxXus3gZaB3mT8tAjSnpT9KuYAuFxkCej5vFliwf9iXn4WXUeP/GPoyuj2CQW/ohmHujyrYzeMHIJ1Ufi5IAYKFNlVx9ujrR09R1WPtwnrq0sycN31Wcx54dmwjf/TGRR38qv36jN/bQDqzp/iLLMkP4asMxrA4XfVs24+Nb+hHkW1Iek7ZfTTO0F8DwR3ENn0pSViHbk7JZvOIPmqf+wQWGbfQ17C//v+x+bU0+nHIEkuXyocAQgDmyPa6ILvjG9qBNl36Y/Up7O+QV23lp0T4OpuYTH+pLy1A/9pzM9UwCMuDiba/3uMy4Dqvuxfc+V5LV4Vr8olQvgqxCOyaDxujOkXRpHkjHqED6tw6pfBxr8i749GLVA6D1CLjyI5JdQaw4kEpEoIV2Ef5EB/lwMDWPdQkZbEzMxJFzCv+ikzSzJtG2aDeDtR3EGco3Fc/VfVjn6swqV1d20oE2kQF0i/Ylws/I4gO5bE5x4tSNdDMcYYBhL70NB3FgJF1rhs0nkt2+/Vnh6kF2oZ02EX58fEu/eutfU2hzMP79NexLzsNsNBDqbya2mS9X9GrOZT2a1//YWqdd7aRv+kRl31QlvCPO3reS0PxSdqRr7DqRw+ajWew+meMJyE2+oC1TRreveQDC5VKTTbfOUd8bzepESFh79TsvzlGfh7quPr+8/VW5QfsLIbA5ZB1V5QCHluI8vAKjrbRvWZoexDzncH539qRZTHsG9ujM7pN5fL/tRLkAUayfkyvambFmnSIv4xQUZRBmthPu7STE7CA0KIDo0Ga0MBfgtekjdSbaN0xNokraUL4HY4lscxRBrfugJa5SP0N0T7jpe/AtX+46Z20iT5YJGvSIDSbA28SukzlkF9rxp5A4LZVxLW0M7xRDRvRQsopVKe23W457AhZlaRpc0aM5U0Z3qJ+JVafRdZ3k3GL2ncpjX3IeBg3iw/yID/WjyO5k67EsthzLpsDqYHCbUC7oGFHa+23zbFVq6ShWjYovfVONfnZn3RvNpSe8spNUifG+Bao3oVvzXnDD16xJNfHT9pNEBfrQLtKf+FA/DAZwlJzU6BAVUOMy0m1J2aw+lE63mCB6xgUTaPHC7nRxIquI9HwrXWOCav3/bXe6GPvWHxxOK6BjVABPXNLpzP3FTu8R2PNGuPStcvuTqbnFvLZkP/O3nCBaT+Zyw1ruN32HWXOywDSaR223U2Bz0TzIwsxb+tKleR378lDa0/P5hXsrTFRzC7SY6BgdSFSghaggCz0izYy1/4Zp3XsVJxbXQqopikB7hqfflg0TK5w9WOgcwHpXJ3TAz0snOiKCjq1a0r9VCPFhfqw6mM6SPcmsO6xOND48tgP/GtG2mmeq/Od2uvTqT0jqumoUbTCqATXmM5wAK6PY7iSv2EGhzYGv2USAxdSo/c+OblmC5X93Eqll4TT6YLziHeh+bdUPSNuvsrp3f6+OJSZ8h6vVCN75/SBv/Xaw3KLukqtqA1u6rv7WjZVn3FTn9PfMskwGjRA/M6l5pe/JXkaN6CAfYoJ9uHNYa0/gqzo7j+cwfdFeEtLy+c9NfWs82dnp0jFojT/IBf6CAaEZM2bw6quvkpycTI8ePXj33Xfp379/lcvPmzePJ598ksTERNq1a8fLL7/MxRdXXlZwukYJCLkdXQsLHlQTowbfB2Oeq7dVL9p5iru/2EL/+BC+uWvQmR8gzl72MfjhHhW0OJ1vmEprbDUMWg9Xdf71+c9vL1aTEQ7+qs7gWvNUOYE1t3zGwGnSDGHss0dhtYTRKSaU5iEBaD7BKmAT3QOateK7LSd48vtt2Jwu/Hx8+NeINtw8KL7cB5Hd6WLVwXQW7DhFXrEdH7ORKD2doWH5nNe1tcpQ8o+s1YdfBQ4bLHhATUgBaNEP/aJXOerdni0HjpG9dynm4+to5zxIV+0IflrFHe5igy+7nbHscsZxgJZ063MeV3Uw43V8ncqucKf0gqpL7nOralBn9FKv8ZEVcGILOX7xLMpqwR8nNW72XsaAk3PQ8ktSSP3C1WNObVcf/GUciRjFN/4T8G/RlS4xQXRuHli+PCIvRZVEbfxYBRdBnSHrdZPKyIjuCQYD6w9ncPvsTeRbHUSRQW/DQcYYN3GhYaNnR+i4HsYsvzs575Jb6NWyGYE+XnXvl2LNh5Nb0RN+R9v9fe3OyANWzUJRYDwB9gyMhaUH2WkdbuRt4y3sz3TyytU9Kp5dOpOEZaqEJE8FFeh4KYx9Qe1oneZkdhEbE1Wvk81HszicVoDN6cKMnf94vcH5xu3k6xZusk1lq67KKE0GjRk39lbpy/XNmqcyHdzTMVoNhys/VAesALpO+vH9rPn1exyJawjS8/DXionzdxFoT8PPXr63y1LTcJaFXc+eohAScjRyTpuMM6pTBO/e0LtiwHX7XPj+TkBT056yj8LJbaVjoEskuKL53dWLw3o0gwx7GGrYSTOt6vcWQGVvRHaFyC4lX13V76bMDt3eU7m88+t+xiY8yzjtD2y6kTvtU1juKh8c7BwdyGvX9KjQl61KSRvUeFt7oQq6db4cev5DBROzjqqeSlmJ6ufNOlpaQlCGy+DFqYBubNC6831OW1YXtcRJ1Tvg5pLS6B6xwYT4mQn186ZlqG/5TLQG5CzpKxHgbWrcncu0/Wrowv6f1ftfaFsIa6feQ+MGVvpZl1dsZ8uxbLyMWt2mX7pcKuvay0e9N56WVVtjTjsndq1k9/K5DMhdTJAzq/z9Jgv4hmLTjWQU62hOO2FaDiZnzTKEPDpdBpe8Cf7hqqwzaX1JH8YdFCdtxZJ3WulJ7AC4cV5pL6AydF1n8e5kvt6YxMqD6eUCPF5GraRkt0P50qASRzMKePf3Q/y2N4Vwf2/iw/xoHebHFT1jav6/1RTSDqjMylPbKrlTU/1ZvHzKZK2V3N7+Qhh0j2ok+xeZsJiaW8z+lDwGtwmrXcnepk9g4UOgO9U+ZnQP9X/o0wwKM6EoE/vxLXglb/M85CfnQO6334sLA73jgvnopr616kNTneNZhXy5/hi5xXZsDhcOp07bSH/OaxtGl+ZBlf9s7mmzGQmq5DnvpAoCOO0qqOoqyWR3OcESxNZ0jUX7c+hv2MdQwy5POfRxc2t8B00ieMANbEnVWbjzFBsTMzmYkl9ppk1Z95zfhofHdmiSA/Q/u6NHjxC9dDLmYyXHOV3Gq/e2+PPUe3/2UbXfu/t72DUfT9D2wpdh4F2e9fy6J4VHvt1OdJAPt53Xist6RNc6G6027E4XV32whp0ncmgX4U/3FsH4mY2sPJju6TkbYDExvlcM1/aLpVNUYKN8Zje1v1RAaO7cudx88818+OGHDBgwgLfeeot58+axf/9+IiIqRuzWrFnDsGHDmD59OpdeeilffvklL7/8Mlu2bKFr14oNp07XqAEhUAe5SetUIyuvOky/qsL8LceZ8s12hrYLY87tA+ptveIMXC41ISz7qGqE5heuDoAiOjXdzog1H/JT1JdmVH9nJh8IjCapwMjl763y1Lp3iAzgxoFxtI1QJQVfrD/m6ed0UdcoXrqqe/VjNhuarqsU8N+fLwl0aaqRXMoetRNURrHuRTb+FOgWnBhoqaWW651SKc2ospiG3KcOYGvKXqSCcX5hakfeYFTbemSFaiSraTDkwRqnuFKUrZobr/ug/A6ub5g62NI0HE4nrqxjmAvLlz+c9IpjiXkUAUPvYlz/9nXvAVEVXVe114mr1GQG7yB1MOZyqINvW74KjqYfVF+ZCeXrtH1C1GShHjeUa0ZfZ0XZKkNu/Ufqb8BgUg38u1+rDgYq60fkcuFIO0hmwkaM278kNGU1doOFOW3f4FRQL6KDfIgKstAtJqju/V1qQtfVxJVfpqrXzuyvAqegMttOK205XTb+7DJ25sXCcRUagWsaKosmvhmD2oQyqlNk1Wcuf7pfjdsty2hWO3ntxmJtPYpsSwuyC+3sOpHDj9tPsuZQKrH6KYa2MPLI8Cj8HNkq4yllt/pyB+kqYwlWWQ8miwqMFeeo4LVmxHXNp2TEjmVNQjorDqSxPzmP0Z0juef8trVvbntsHSx5UmW8nYlmUNmawS1VQ/PWI1QfIG+VpaDrOnlWh8oS0SGz0MamxEw2JWZxJKOA4e3Dua5fbP1MbxRNw2lX2SXbvlSl/DnHKy2f9TD5qACPX4R67/cOKA1MOG3qc8Fpg46XqAOn6vYBirLVgVTyDvU/Mehez99eddLyrCzZk4yGRreYINpH+TfogVWTctpVI/VVb1Zedg+ApoKQHS5SB6ply/fPBQd/hXm3VnsiEM2AHj+UjFaXk9zqSoqc6u+yZ2xw/TfZb2Aul853W46TXWjH21VI8+zNxLdsRduelQcAnS6doxkF7DyRw/qSZviJ6QX0jW/GmM5RjO4c2bCf+X8HLqf6P1zxMpQdTuLlp7KNy+p4KQx/tMKkSyjTp6iROF06NoerwkmxoxkFHM0opF98yBl7Zf3d/KUCQgMGDKBfv3689957ALhcLmJjY5k8eTKPPfZYheWvu+46CgoKWLBggee2gQMH0rNnTz788MMzPp/7xfl27X78/Buny3dDWHkwnS/WH2NM50j+c3M9HHSJv63UvGI+WZXI5+uOVpnee+8IldL/p4mY555UfYV2ziu9LaSNyr5q0Q+a9ybbtyU/7Uzh2y0n2J6UTctgM/9oa2VMSBrx9sNoKTtLJ3y0HARxg9Xj3Rkafwa2QhVgPLBY9aAqU9rgoRlVUCxukAqCNO/95zoT6rSrDIyMgyqAdPoo2vqSuleVbZYtEzCaVZDF5K2yRJw2lQliKyx/QGGywD++Ub//ppB+EL67o+LZb4NJ/T3HD4XgOHanO/hpbw4F3uEM7N2bkb3aY/EykpFvZcuxbLIKbEQFWYgOshAd7FPz0fX2Ylg8Vb0uzXuqTLTo7tVm9KXnW9l36v/bu/egqKv/j+Ov9QKUy0UiF1RADUXzAnhBoRlRQx0zJ6ZvZc78FFGbsUFzhz8y+jkw2UzYOE5ammaNkZW3NHBGE3VA7JfpGBKmzkTl+NPyJ6jlIm6mxu7vj80tEnS57bq7z8fMzsBnz8d9f/DN4ex7zzmfeo3uF970mwjrr4431bWnHJuo156SLn7f5EwcSY7/g6fWSEOfcS3mlrhw3FFgPX3A8cY9LNZRqO/eR+r+19eh0R2Tl/BeDbccRaHrVxxF7Yabjt/Jbg87lmAHGO+vvtZf3PrDsXzs9s/+z5uOAshNqxQc5bgTkT+z/urYp+byj46NlG9ec9yE4YFwKbSX44MS472XwPgLdxcmfMbPRx2zgP73fxx/4yXHmOvheMcSzVEvNFkIwv3DawpCN2/e1IMPPqjt27crIyPDeTwzM1MWi0U7d+6845yYmBjl5OTIbDY7j+Xn56u4uFjHjx+/o/2NGzd048bfS0vq6uoUExOjXi8WqlOg91eJnxwWpWX/4RcS91Z3/Za2VZxz3BnlynX9n+W6Art00n9PHaRpCfe4O5OnnP/WcSvd3qOk7jHNNrt24091C+js3X/0/7zpuNPb7//YMPSBcClqaNuW4vmaSz84NsY+VSRdPd98u85BjllgkUMcRYieLu5f1FEa/nRslN3wV1HW0MkxsHJhloDXsDVIv1+R/rjiWMLw5w0pKPjvze+D7uMlKwAA4E7W36Trv/61JJwPVrzF1atXFR0dLYvFotDQu+8b1vKdm9rR5cuX1dDQIJOp8WaeJpNJ33/f9K2va2pqmmxfU9P01PuCggK99tprdxw/v3Z264K+z6yVtHaOp6OAN/uvNz0dAdAR6iWV//VY7dFIAAAAAHerr6+/vwtC7pCbm6ucnBzn9xaLRbGxsTp37tw9fzhAe7ldpf3555/ds3cV/B45B3cj5+Bu5Bw8gbyDu5FzaCm73a76+nr17HnvrTI8WhCKiIhQ586dVVtb2+h4bW2tIiObvutLZGRki9oHBgYqMPDOTSBDQ0P5hYLbhYSEkHdwK3IO7kbOwd3IOXgCeQd3I+fQEq5OfvHodvMBAQEaMWKESktLncdsNptKS0uVktL0rdRTUlIatZek/fv3N9seAAAAAAAAjXl8yVhOTo4yMzM1cuRIJScna+XKlbJarcrKypIkzZo1S7169VJBQYEkadGiRUpLS9OKFSs0depUbdmyRRUVFVq/fr0nLwMAAAAAAMBreLwgNH36dF26dEl5eXmqqalRYmKiSkpKnBtHnzt3Tp06/T2RKTU1VZs2bdKSJUv06quvqn///iouLtaQIUNcer3AwEDl5+c3uYwM6CjkHdyNnIO7kXNwN3IOnkDewd3IOXQkj952HgAAAAAAAO7n0T2EAAAAAAAA4H4UhAAAAAAAAPwMBSEAAAAAAAA/Q0EIAAAAAADAz/hkQWjNmjXq06ePgoKCNHr0aB09evSu7T/77DMNHDhQQUFBGjp0qL744gs3RQpf0ZKcKywslMFgaPQICgpyY7Twdl9++aWmTZumnj17ymAwqLi4+J7nlJeXa/jw4QoMDFRcXJwKCws7PE74lpbmXXl5+R19ncFgUE1NjXsChlcrKCjQqFGjFBwcrB49eigjI0PV1dX3PI8xHdqiNXnHuA5tsXbtWg0bNkwhISEKCQlRSkqK9uzZc9dz6OfQnnyuILR161bl5OQoPz9flZWVSkhI0OTJk3Xx4sUm23/99deaMWOG5s6dq2+//VYZGRnKyMjQyZMn3Rw5vFVLc06SQkJCdOHCBefj7NmzbowY3s5qtSohIUFr1qxxqf2ZM2c0depUjR8/XlVVVTKbzZo3b5727t3bwZHCl7Q0726rrq5u1N/16NGjgyKELzl48KCys7N15MgR7d+/X7du3dKkSZNktVqbPYcxHdqqNXknMa5D6/Xu3VvLli3TsWPHVFFRoQkTJuipp57SqVOnmmxPP4f25nO3nR89erRGjRql1atXS5JsNpuio6O1cOFCvfLKK3e0nz59uqxWq3bt2uU8NmbMGCUmJmrdunVuixveq6U5V1hYKLPZLIvF4uZI4YsMBoOKioqUkZHRbJvFixdr9+7djQYLzz//vCwWi0pKStwQJXyNK3lXXl6u8ePH68qVKwoLC3NbbPBNly5dUo8ePXTw4EGNHTu2yTaM6dDeXMk7xnVob+Hh4Vq+fLnmzp17x3P0c2hvPjVD6ObNmzp27JjS09Odxzp16qT09HQdPny4yXMOHz7cqL0kTZ48udn2wD+1Juck6dq1a4qNjVV0dPRdPwUA2gP9HDwpMTFRUVFRmjhxog4dOuTpcOCl6urqJDneKDWHvg7tzZW8kxjXoX00NDRoy5YtslqtSklJabIN/Rzam08VhC5fvqyGhgaZTKZGx00mU7N7FtTU1LSoPfBPrcm5+Ph4bdiwQTt37tQnn3wim82m1NRU/fLLL+4IGX6ouX7u6tWrun79uoeigq+LiorSunXrtGPHDu3YsUPR0dEaN26cKisrPR0avIzNZpPZbNZjjz2mIUOGNNuOMR3ak6t5x7gObXXixAkZjUYFBgZq/vz5Kioq0qOPPtpkW/o5tLcung4A8DcpKSmNqv6pqakaNGiQ3nvvPb3++usejAwA2k98fLzi4+Od36empur06dN666239PHHH3swMnib7OxsnTx5Ul999ZWnQ4EfcTXvGNehreLj41VVVaW6ujpt375dmZmZOnjwYLNFIaA9+dQMoYiICHXu3Fm1tbWNjtfW1ioyMrLJcyIjI1vUHvin1uTcv3Xt2lVJSUn66aefOiJEoNl+LiQkRA888ICHooI/Sk5Opq9DiyxYsEC7du3SgQMH1Lt377u2ZUyH9tKSvPs3xnVoqYCAAMXFxWnEiBEqKChQQkKCVq1a1WRb+jm0N58qCAUEBGjEiBEqLS11HrPZbCotLW12HWZKSkqj9pK0f//+ZtsD/9SanPu3hoYGnThxQlFRUR0VJvwc/RzuF1VVVfR1cIndbteCBQtUVFSksrIy9e3b957n0NehrVqTd//GuA5tZbPZdOPGjSafo59De/O5JWM5OTnKzMzUyJEjlZycrJUrV8pqtSorK0uSNGvWLPXq1UsFBQWSpEWLFiktLU0rVqzQ1KlTtWXLFlVUVGj9+vWevAx4kZbm3NKlSzVmzBjFxcXJYrFo+fLlOnv2rObNm+fJy4AXuXbtWqNPHs+cOaOqqiqFh4crJiZGubm5On/+vDZu3ChJmj9/vlavXq2XX35Zc+bMUVlZmbZt26bdu3d76hLghVqadytXrlTfvn01ePBg/fHHH/rggw9UVlamffv2eeoS4EWys7O1adMm7dy5U8HBwc79MUJDQ50zGxnTob21Ju8Y16EtcnNzNWXKFMXExKi+vl6bNm1SeXm59u7dK4l+Dm5g90HvvPOOPSYmxh4QEGBPTk62HzlyxPlcWlqaPTMzs1H7bdu22QcMGGAPCAiwDx482L579243Rwxv15KcM5vNzrYmk8n+xBNP2CsrKz0QNbzVgQMH7JLueNzOs8zMTHtaWtod5yQmJtoDAgLs/fr1s3/44YdujxveraV59+abb9ofeeQRe1BQkD08PNw+btw4e1lZmWeCh9dpKtckNeq7GNOhvbUm7xjXoS3mzJljj42NtQcEBNgffvhh++OPP27ft2+f83n6OXQ0g91ut7uzAAUAAAAAAADP8qk9hAAAAAAAAHBvFIQAAAAAAAD8DAUhAAAAAAAAP0NBCAAAAAAAwM9QEAIAAAAAAPAzFIQAAAAAAAD8DAUhAAAAAAAAP0NBCAAAAAAAwM9QEAIAAGjC7NmzlZGR4fbXLSwslMFgkMFgkNlsdumc2bNnO88pLi7u0PgAAIBv6OLpAAAAANzNYDDc9fn8/HytWrVKdrvdTRE1FhISourqanXr1s2l9qtWrdKyZcsUFRXVwZEBAABfQUEIAAD4nQsXLji/3rp1q/Ly8lRdXe08ZjQaZTQaPRGaJEfBKjIy0uX2oaGhCg0N7cCIAACAr2HJGAAA8DuRkZHOR2hoqLMAc/thNBrvWDI2btw4LVy4UGazWd27d5fJZNL7778vq9WqrKwsBQcHKy4uTnv27Gn0WidPntSUKVNkNBplMpk0c+ZMXb58ucUxv/vuu+rfv7+CgoJkMpn0zDPPtPXHAAAA/BgFIQAAABd99NFHioiI0NGjR7Vw4UK9+OKLevbZZ5WamqrKykpNmjRJM2fO1O+//y5JslgsmjBhgpKSklRRUaGSkhLV1tbqueeea9HrVlRU6KWXXtLSpUtVXV2tkpISjR07tiMuEQAA+AmWjAEAALgoISFBS5YskSTl5uZq2bJlioiI0AsvvCBJysvL09q1a/Xdd99pzJgxWr16tZKSkvTGG284/40NGzYoOjpaP/zwgwYMGODS6547d07dunXTk08+qeDgYMXGxiopKan9LxAAAPgNZggBAAC4aNiwYc6vO3furIceekhDhw51HjOZTJKkixcvSpKOHz+uAwcOOPckMhqNGjhwoCTp9OnTLr/uxIkTFRsbq379+mnmzJn69NNPnbOQAAAAWoOCEAAAgIu6du3a6HuDwdDo2O27l9lsNknStWvXNG3aNFVVVTV6/Pjjjy1a8hUcHKzKykpt3rxZUVFRysvLU0JCgiwWS9svCgAA+CWWjAEAAHSQ4cOHa8eOHerTp4+6dGnbsKtLly5KT09Xenq68vPzFRYWprKyMj399NPtFC0AAPAnzBACAADoINnZ2frtt980Y8YMffPNNzp9+rT27t2rrKwsNTQ0uPzv7Nq1S2+//baqqqp09uxZbdy4UTabTfHx8R0YPQAA8GUUhAAAADpIz549dejQITU0NGjSpEkaOnSozGazwsLC1KmT68OwsLAwff7555owYYIGDRqkdevWafPmzRo8eHAHRg8AAHyZwW632z0dBAAAABwKCwtlNptbtT+QwWBQUVGRMjIy2j0uAADgW5ghBAAAcJ+pq6uT0WjU4sWLXWo/f/58GY3GDo4KAAD4EmYIAQAA3Efq6+tVW1srybFULCIi4p7nXLx4UVevXpUkRUVFqVu3bh0aIwAA8H4UhAAAAAAAAPwMS8YAAAAAAAD8DAUhAAAAAAAAP0NBCAAAAAAAwM9QEAIAAAAAAPAzFIQAAAAAAAD8DAUhAAAAAAAAP0NBCAAAAAAAwM9QEAIAAAAAAPAz/w/FHpIPwWlCNgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# From the name, it looks like shimmer should be in dB but the curve does not look it. Dividing by 10 is roughly the same.\n", + "plt.plot(os_xs, opensmile_feats.shimmerLocaldB_sma3nz / 10)\n", + "shimmer_sma3 = torch.nn.functional.avg_pool1d(shimmer, kernel_size=3, padding=1, stride=1, count_include_pad=False)\n", + "plt.plot(xs, shimmer_sma3[0])\n", + "plt.ylim((0, 0.4))\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Shimmer\")\n", + "plt.legend([\"OpenSmile\", \"SpeechBrain\"])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "319ad1e5-4d2d-45e4-8b96-e8de655c280f", + "metadata": {}, + "source": [ + "Strangely, the OpenSmile computation of HNR (in dB) does not seem to match SpeechBrain and PRAAT" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "21ce3c63-41da-44fb-a982-00ff00f01412", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Maximum OpenSmile HNR value: 12.219974517822266\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABH8AAADeCAYAAACkJKzaAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAA+GZJREFUeJzsnXdUVNfbhZ9p9I4gCihS7L33XqLRqDFqYorpzfTe2y+998QYY3o0TRM1xt672HtBlCIg0mGAad8fh3uZgRkYFGP5zrMWS2TanZlbztlnv/vV2Gw2GxKJRCKRSCQSiUQikUgkkssS7YXeAIlEIpFIJBKJRCKRSCQSyflDij8SiUQikUgkEolEIpFIJJcxUvyRSCQSiUQikUgkEolEIrmMkeKPRCKRSCQSiUQikUgkEslljBR/JBKJRCKRSCQSiUQikUguY6T4I5FIJBKJRCKRSCQSiURyGSPFH4lEIpFIJBKJRCKRSCSSyxgp/kgkEolEIpFIJBKJRCKRXMZI8UcikUgkEolEIpFIJBKJ5DJGij8SiUQikUgkEolEIpFIJJcxF4348+abb6LRaHjooYfUv5WWljJt2jRCQ0Px8/NjwoQJZGZmXriNlEgkEolEIpFIJBKJRCK5xLgoxJ+tW7cyffp02rdv7/D3hx9+mPnz5/Pbb7+xevVq0tPTufrqqy/QVkokEolEIpFIJBKJRCKRXHpccPGnqKiI66+/nhkzZhAcHKz+PT8/n5kzZ/L+++8zePBgunTpwqxZs9iwYQObNm26gFsskUgkEolEIpFIJBKJRHLpoL/QGzBt2jSuvPJKhg4dyquvvqr+PTExEZPJxNChQ9W/tWzZkiZNmrBx40Z69uzp9PnKysooKytT/2+1WsnJySE0NBSNRnP+3ohEIpFIJBKJRCKRSCQSyX+IzWajsLCQxo0bo9W69vdcUPFn9uzZbN++na1bt1a7LSMjAw8PD4KCghz+3rBhQzIyMlw+5xtvvMHLL79c35sqkUgkEolEIpFIJBKJRHJRkpKSQlRUlMvbL5j4k5KSwoMPPsjSpUvx8vKqt+d9+umneeSRR9T/5+fn06RJE1JSUggICKi315FIJBKJRCKRSCQSiUQiuZAUFBQQHR2Nv79/jfe7YOJPYmIiWVlZdO7cWf2bxWJhzZo1fPrppyxevJjy8nLy8vIc3D+ZmZlERES4fF5PT088PT2r/T0gIECKPxKJRCKRSCQSiUQikUguO2qLublg4s+QIUPYs2ePw99uueUWWrZsyZNPPkl0dDQGg4Hly5czYcIEAA4dOsTJkyfp1avXhdhkiUQikUgkEolEIpFIJJJLjgsm/vj7+9O2bVuHv/n6+hIaGqr+/bbbbuORRx4hJCSEgIAA7r//fnr16uUy7FkikUgkEolEIpFIJBKJROLIBe/2VRMffPABWq2WCRMmUFZWxogRI/j8888v9GZJJBKJRCKRSCQSiUQikVwyaGw2m+1Cb8T5pKCggMDAQPLz82Xmj0QikUgkEolEIpFIJJLLBnc1D9dN4CUSiUQikUgkEolEIpFIJJc8UvyRSCQSiUQikUgkEolEIrmMkeKPRCKRSCQSiUQikUgkEslljBR/JBKJRCKRSCQSiUQikUguY6T4I5FIJBKJRCKRSCQSiURyGSPFH4lEIpFIJBKJRCKRSCSSyxgp/kgkEolEIpFIJBKJRCKRXMZI8UcikUgkEolEIpFIJBKJ5DJGij8SiUQikUgkEolEIpFIJJcxUvyRSCQSiUQikUgkEolEIrmMkeKPRCKRXOTMOzqPSfMnkVmceaE3RSKRSCQSiUQikVyCSPFHIpFILmLMVjMfJn7IgZwDrEhZcaE3RyKRSCQSiUQikVyCSPFHIpFc0thsNrZlbGP36d0XelPOC5tPbeZM6RkAskqyLvDWSCQSiUQikUgkkksR/YXeAIlEIjlbdmTt4MPED9metR2D1sCiqxfR0Lfhhd6semVB0gL1d1n2JbnUsFgtHM07SkJwAlqNXG+SSCQSiUQiuVDIkZhEIrngrE5ZzcT5E/l6z9eUmktrvb/FauGptU9x06Kb2J61HQCT1cTq1NXne1P/U0pMJSw/uVz9f2aJFH8klxbvJb7HNfOvYXHy4gu9KRKJRCKRSCT/r5Hij0RymVBuKWflyZWYLKYLvSl15tOdn3Iw5yAfbf+I0XNHM/fIXKw2q8v7r09fz8Kkheg0OiYkTOCGVjcAsCpl1X+zwW5gs9k4UXACi9Vy1s+xImUFRrMRnUYHyLIvyaVFflk+vx/+HYC92Xsv8NZIJOeH5PxkeW6WSCQSySWBFH8kksuEL3d9yQMrH+DVza9e6E2pEycLTnIw5yA6jY5Gvo3ILMnkhQ0vMPvgbJeP+evoXwBc1/I6Xur9ElcnXA2IfJwSU8l/st21sTh5MaPnjmbGnhln/RwLkxYCcEWzKwDh/LHZbPWyfRLJ+Wbe0XkYzUZACpeSy5OM4gwmLZjE7Utuv9CbIpFIJBJJrUjxRyK5DLDarPx1TAgic4/M5WDOwQu8Re6z5MQSALpHdGf++PlMbjEZgHVp65zeP78sn5UpKwEYFz8OgPigeCL9Iim3lrPp1Kbzv9FuoGyHIuDUlWxjNhvTNwJwS5tbADCajRSUF9TPBkouChYnL+bznZ+fk0PsYsRitfDLwV/U/0vxR3I5sjJlJUazkeP5xzFZLz3XrUQikUj+fyHFH4nkMiAxM1GdXNmw8e7Wdy8Zh8iSZCH+DI8ZjqfOk5HNRgJwLO+Y0/svTFqIyWqiZUhLWoS0AECj0TAweiBw8ZR+KdufXJBMWlFanR+/OHkxFpuFdg3a0SKkBUGeQYDM/bmcyDZm8/Tap/li1xcXZL8tKi9iTeqa81IqujZtrcN+f6HFH7PVzOubX2fR8UUXdDsklxerUypz5vLL8i/glkgkEolEUjtS/JFILgP+Of4PAL0b98ZD68HmjM2XRPhxSmEKB3IOoNPoGNxkMABxgXEApBenOy3hmnd0HlDp+lEYEDUAgDWpa5zmBa1KWcV1C65jy6kt9fcGXGCz2TiWXylerU9bX+fnUL7TK2OvBKChj+hiJjt+XT78fOBn1S3wx5E//vPXfz/xfaYtn8ZtS24j25jt1mP2ndnH8fzjtd7v5wM/AzCs6TAAThtPX1BBelvmNn45+Avvbn33gm2D5PKi2FTMlozK64kUfyQSiURysSPFH4nkEsdkMbH0xFIAbml7Cze2vhGA97a9d9Hb0BXXT9eIroR4hQAQ5BVEqFcoAEn5SQ73P5RziAM5B9Br9YxqNsrhtq4Nu+Jn8ONM6Zlq4bLllnJe3fQqe8/s5b4V95GYmXi+3hIgHB2F5YXq/zekb6jT44tNxezL3gfA4Gghiikt7C+0g0JSP5SYSphzaI76//Xp68kozvjPXt9qs6qd5HZk7eC6hdfVWi56KOcQ1y+8ntsX316jkJOUn8TGUxvRarTc1+k+AMosZRe0ZPFI7hEAsoxZF00umOTSZkP6BodrbF5Z3oXbGIlEIpFI3ECKPxLJBcJsNbMtYxtf7/maQzmHzvp5NqRvIL8snwbeDejWsBu3t7udEK8QkguS+X7f9/W4xfWPkvczvOlwh7/HB8UDcDTvqMPflVyjgVEDCfYKdrjNoDPQJ7IPUL306/fDv6vlUkazkXuX3cuu07vq5T04Q3H9GLQGQARRK5OEElMJz617jj+P/Ony8buydmGxWYj0i6SRXyPAzvkjy74uC+YdnUdBeQHR/tF0Du+M1WZVXW3uUlBewNRFU5m5Z2adX3//mf3klObga/AlJiCGjOIMblp0E9/t+04Naa7K9N3TsdgsZBmzahQh5xwUotaAqAHEBsaqJYsXUri0LyNNLUq9YNtxPnlpw0tM+HsCc4/MxWw1X+jNueypep25EOJPmaXMbdeeRCKRSCRS/JFI/mNyS3N5eu3T9J/Tn1sW38JH2z/if5v+d9bPt/B4RUeomCvQaXX4efgxreM0AD7c/qFafnGxkVqYyv4z+9FqtAxpMsThtrggUfqVlFfp/DFZTWp4ctWSLwWl9GtV6ir1b6XmUr7e8zUAj3d9nO4R3Skxl3DP0nvOSXSrCWWi2btxb4I8gygyFbHn9B4Avtv3HX8d+4v3tr3n0j2RmCWcSZ3DO6t/C/cJB6T4czlgsVr4fr8QZm9qfRMTW0wERFi7s5JFV6xPW8/2rO18s/ebOj0OYG3qWkDsoz+O+pFejXphNBt5d9u7XPHHFXy37ztKzaXq/Y/kHlEdhiCyrFyhOO+UksUwnzDgwoo/R/KOqL+nFKRcsO04XxzPP84fR/7gcO5hXtjwAmPnjWXG7hm8tuk1bl9yOzf8cwMphe6/78LyQsosZedxiy9tLFaLegwFeAQA/33ZV25pLpPmT2LE7yNILbw8BU2JRCKR1C9S/JFI/mP+OPIHC5IWUFheiK/BF6he3uQuJaYSdfVRmWgBTGw+kZta3wTAG1veYNbeWee0zecDZSLZrWE3Qr1DHW5TxB9758/WU1vJKc0h1CtUdfhUpX9Uf3QaHUdyjzD/2HwA5hyaw2njaRr7Nua6ltfxyeBP6BzemUJTIc+vf/68dFlSRKuE4AR6NeoFiO5l2cZsvt33LSBcG66CoJWytC4Nu6h/k5k/lw/LTy4nrSiNIM8gxsaPZWiTofh7+JNenM6mdPe71SnHR0F5AScKTtRpG9akrgGgX2Q/Aj0D+WLoF7zS+xUi/SLJKc3h3W3vctuS2ygqLwJgxu4ZDo9Pzk92+dyKEyHCNwKoFC4vlPhjs9kcnD8nC086vc+ljCKMxwTEEOwZzMnCk3y842NmH5rN5lOb2XV6l0P3tZpILUxlxO8juGnRTRd96XBd+Hzn53yY+GG9fNd7sveQW5aLv8FfvR79l86fElMJ9y2/j6T8JMqt5ZdExp9EIpFILjxS/JFI/mOUif3dHe5m+USRuVFYXnhWq4ZKm9km/k1oE9pG/btGo+Gxro9xR7s7ABHs+sP+H+ph6+sPZbA6tOnQarcp4o/9hG1ThpgU94vqh16rd/qcgZ6Bqgj2zLpneHHDi3yz9xsA7upwFwadAR+DDx8M+gB/gz8Hcg7UudTGHZSyr9jAWHpH9gZEed70XdMpMVfmjRzIOVDtseWWctUl1LlhpfNHyfyRzp9Ln+/2fQfA5BaT8dZ746X3YnTsaKBuwc/2x8fu07vdfly2MZu9Z4Q7p29kXwB0Wh3jE8Yzf/x8Xun9CoGegew+vZt7lt3D3uy9/Jv8LwA9G/UEXDt/bDabKv408G4AQLi3EH9OG0+7vY31SUZxBsWmYvX/VR0wD654kInzJ1JuKf+vN61esNlsqvhzd4e7+XfCvzzc5WGGNR3GrW1vVXPgVpxc4ZbwMX33dApNhew/s5/fDv3mcNsfh//gw8QPL7mysqySLL7Y9QUz984kvTj9nJ9PWXTpG9VX3c//K/HHZDXx2OrH2J1decxvPrX5P3ltyX+DzWbj0VWP8uiqRy+IMP3Fri8Y8fuI8+aOlkgkFw4p/kgk/yEWq4WdWTsBGBQ9CF+Drxp0fLbtwAFGNhuJRqNxuE2j0fBA5we4t+O9AHy1+6t6HbDnl+XzwvoX2J65vc6PtdqsHDgjhA97d4uCs45fyuC2e0T3Gp/7ld6vcHeHu9Gg4c8jf5JTmkO0fzRj4sao9wnxCuHuDncD8PGOj1V3Q32hOH/ig+Lp3ViIP/vP7Of3w78DwhEEqJ+BPXuz91JuLSfEK4SYgBj17xE+wkUhxZ9Lm8O5h9mdvRu9Vs91La9T/z4hYQIAK1JWkFOa49Zz2Ys/e7L3uL0NSve5ViGt1JIsBYPWwPiE8Xw17Cv8PfzZeXonNy26CRs2BkUPYniMyOdyJf4UmYootwoRRQluv9DOH/uSL3B0/uSV5rEiZQWHcg85fJ6XEruzd5NalIq33ptB0YPwMfhwa9tbeX/g+zzc5WHu63gfnjpP0orSOJx7uMbnSilIUV2TAJ/v+lxdmFh2YhkvbXyJmXtnqg7GSwX7jLf6+J4V8Wdg1EA10yqvNO+cn9cd3tj8BmvT1uKl8+Kp7k8BsC1j2yUnyNWVM8YzvLThpf8XgkRqYSpLTixhyYkl5Jbl/uev//fRv0kvTueptU9dsqK4RCJxjhR/JJL/kCN5RygyFeFr8KVFcAsAovyjgLqLP6XmUjambwScu2cU7mh3B0GeQeSV5bEja8dZbnl1fj74M3OPzuXVza/W+bEphSmUmEvw0HrQLLBZtdurdvzKL8tXhZIejXrU+Nw6rY5pHacxfdh0VVib1nGaGr6scF3L64gJiCGnNIevdn9V5/fgipzSHHLLctGgISYwhnCfcBKCE7Bhw2wz0zeyL9e2uBYQglBV7Eu+7AU9ZQJdWF4ouxVdwigOjf6R/R3KHVuEtKBNaBvMVrPD5NsVZZYyBxHD3vljs9l4cs2T3L30bgfHi8LaNJFV0j+qv8vnbx3amhnDZuBv8FdLf+7ucLcqSLoq+1JcP34GP7z0XsCFF3+Uyb5Shmaf+XMot3IiWZdMnIuJf5L+AVCFn6r4GHzo1ViUn644uaLG51JCvXs16kV8UDz5Zfl8uetLjucf57n1z6n3+3zn5xzNPVrDM11c2B8fSuc3d1BKBmcfnM1Ta5/i5n9vZszcMRzLP4ZOo6NPZJ9K8ec/cP5kFmfy22Hhxnp3wLtc2+Ja/D38KTQVOl1MuJz4Zu83/HHkDz7f+fmF3pTzzsHcys6Lp0vq7pi02WxsPrXZ6fm/NkrNpep49GjeUT7b+Vmdn0MikVy8SPFHIvkPUVwyHcM6otPqAIj0iwSoc2DjlowtlFpKifCNUIUkZ+i1ejUIWWntXB8oYZdHco/UaTANqC2lE4ITXJZw2Xf82paxDRs2mgU2UyeStdGrcS/+Hvc3s6+c7ZCHpGDQGXi82+MA/HDghzpnprhCmWhG+kXirfcGoE9jkQmhQcNDnR+iVUgrQJR9VbV0K2HPVR1Rfh5+akaUdP/8N+zM2snnOz/ni11fMH3XdKeBzKeKTvHqplfdOn6tNiv/HBcTdWf75NUJVwPw55E/a7X6H88/jtVmVUXNw7mHVVHwQM4B/jn+D+vT1/P8+ucdnstkNbEhbQMgSihrok2DNkwfNp1wn3AmJEygdWhrVaxNL053uiJcteQLIMz7wgY+K9lIg6IHAZBRkqFuu317+0tB/MkqyeKdre/w2+HfsNlsmK1mtSTP2T6loITq13QNOFlwkgVJCwC4r9N9PN5VnB9nH5zN/Svup9hUTJeGXegX2Q+T1cTz65+/ZNwm9uKPu86fxMxEBv82mHF/jeO1za+xMGkhiZmJquttcJPBBHoGEugZCNRv4HO2MZupi6by5a4vHf6ulEu3D2vPgOgB6LQ6ujXsBsDmjMu79EvJKdt3Zt8F3pLzj/156YzxTJ0fv/TEUm5fcjtvbXmrzo89UXACGzZ0GjFG/Xbft6pjXSKRXPpcUPHniy++oH379gQEBBAQEECvXr1YtGiRentpaSnTpk0jNDQUPz8/JkyYQGamnPRILl22Zwnxp1N4J/VvUX5n5/xZnSIGgQOiBlQr+aqKMvB3N/OhNs4Yz6gdfQAWHV/k9H42m43n1z/PwysfdghWVgY2LUNaunwN+45fm06JvJ/aSr6qEugZSJsGbVze3j+qP30i+2C2muvN/aOUfCnbDzAmbgxeOi+ub3U9LUJakBCcgE6jI6c0x0HIsS8LtO/0paCEPl/Irkn/Xyi3lHPfivv4YtcXfL7zcz7d+SkvbHhBnRwrvLHlDeYcmuPW/rM9czsZxRn4GfwYED2g2u2jmo3CW+9NUn6SQ5mKMxRBo12DdoT7hGOxWVQnmVIOCmISoOReAezK2kWhqZBgz2DahratdZvbhbVj2TXLeKn3S4Ao5fI1+GK1WZ2KJcpExd7VFO5bkflzFivY9YEiTneP6I6P3gerzaqeb+1LSC5m8afcUs7MPTMZM3cM3+//nlc2vsLT655mdcpqckpzCPYMVt09zhgYNRCtRsuh3EMuhUrF9dM3si/tw9rTO7I3/aP6Y7aZOVFwgjDvMN4d8C4v9noRf4M/e8/svSTKv0xWk4NgYN9IoCZm7JlBtjEbL50XPRr1YFrHabzT/x2+GfENf437i3cHvAtwXpw/7217j+1Z25m+e7pDOZlSbqYImQDdG4nr4uWc+5Ocn6yKbpklmWcliFxK2J+XziYrTRE716atrfOY73j+cQDaNmjLVXFXYbVZeWbdM9JxLJFcJlxQ8ScqKoo333yTxMREtm3bxuDBgxk7diz79omL9MMPP8z8+fP57bffWL16Nenp6Vx99dUXcpMlkrPGZrOpzh/7IF+l7Cu1yH3nj81mU1cAFVdPTfRq3AtvvTenik85DRmuK+vS1mHDprp2/jn+j9MBRpGpiHlH57Hs5DKHwXddxJ+jeUfZkrEFqAybrU+mtJwCOA9fPhvUsOegWPVvzYObs2nKJp7o9gQAXnov9f3Zl34dyj1EsakYP4MfzYObV3tutePXeXD+rDi5gjuW3KE6N/6/szZ1Lfll+QR7BjOx+US13PC7fd+p+3pKYYo6GXOnpHLhcVHyNazpMDx1ntVu9/PwY3hTkalTW/Cz4l6IC4qjfYP2gMh+sdlsqvgzMGogAB9t/4h/k/9lTeoaZu0Tnf/6RPZR3Ye1YS8uazSaGku/zpRWiD9eduJPReBzdmm2U6fItoxt3L7kdjUIuz6xWC3qZCY+KJ5o/2igUuixL/u6WNtl55flM3H+RD7c/iEl5hJVPF6YtJDHVj8GwIiYEdVKW+0J8gpS3YTOSr9SClPUksR7O9yr/v3Rro+i1+rRaXS8O+BdGng3oKFvQ57s/iQgyr9OFlTvnqZgsVpYl7aOwvLCur/xeuJwzmHKLGWqkyEpP6nWLo+ZxZlqWfXvV/3O18O/5u4Od3NFsyvoFtGN2MBYtBoxhK5v8WdbxjZVZDZbzSxKFosrJaYSVeBRjm2ovC7uyNpBmaWsXrbhYkNx/Sg4K5m+nLB3/pyN+KM4mbON2XU+rynny9jAWJ7s/iQRvhHi/FBx/ZJIJJc2F1T8GTNmDKNGjSIhIYHmzZvz2muv4efnx6ZNm8jPz2fmzJm8//77DB48mC5dujBr1iw2bNjApk2uW+GWlZVRUFDg8CORXAykFqZy2ngavVZPuwbt1L+rzp9C950/B3MOklmSibfeW131qwkvvZdaelQfpV9KZsiUllPw1nuTVpTm0HlEwX6lXxFwoG7iz87TO0nKT0KDhm4R3c5526vSxL8JIL6fqgLWLwd/4d/j/9bp+VTnT2Ccw991Wp3DJNq+9EtByfvpGN7R6cRcKXk7H+3eP9r+EZtObVIngP/fUQa6Y+PH8kKvF3hvwHt46705nHtYLa/45eAv2BD7THJBco2r0eWWclWUqak8Z0JzEfy8OHlxjUHkinshLiiO9mFC/Nlzeg/7z+wnrSgNb703b/V/iwkJE7Bh4/HVjzNt+TR1EjUweqA7H4NTYgJjADhecLzabc7KvkK8QtBpdFhtVocw62xjNk+vfZpbFt/C5lObmb57er13tkkrSqPUUoqH1oNo/2iaBIjjPaUwhXJLuXq8Kn+7GFmcvJik/CRCvEJ4re9r/D7md2YMn0GwZzBmmxDTatqnFGoq/VqdshqLzUL3iO60C6u8PsUGxvL9Fd/zw8gfHBYtroq7ip6NemKymvjxwI8uX/Of4/9wz7J7uOGfGy6YsKy46Ho06oGnzpMyS1mtTtsFSQuw2qx0Du9M04CmNd5XKfsqKCs45/3XbDXz+pbXgUrRVMkA25i+kXJrOVF+UQ7O0tjAWMK8wyizlLErq2bH4KWKct5SBM4LLf6kFKacVZMOd8gtzXVY4Dmb48Y+kF8pJXeXpHxxTmwW2IwAjwCGNhGZkvV9fjSajeprSS4NSs2lvL31bbZmbL3QmyI5By6azB+LxcLs2bMpLi6mV69eJCYmYjKZGDq0Msi2ZcuWNGnShI0bN7p8njfeeIPAwED1Jzo6+r/YfImkVpSSrzahbdQgVIBIf5H5k1aUVi1PxBWrUlcB0KtRL+EgMOZCWc0dqwY3GQzUHvhZG/aZIcNjhqv2c2elX1nGyvIkpXQr25hNtjEbDRqn7hYFRTxRVoxbhrRUB9n1SWO/xmjQYDQbVdcCQHpROq9vfp1n1z2rBt66g+L8sR+cO6NVaIX4YxfSqTjDnHVAg/PX7j2lMEUdhNVX9tGlTGF5oVpWqUyqAz0DGRc/DoDv931PsamYuUfmAqgunp2nd7p8zrVpayksLyTcO5yuDbu6vF/HsI40C2yG0WxUV/ydoTh/4oPiVfFn1+ldqsDUP6o/PgYfnunxjAgPR0NcYBxXxV3Fy71fVgf0Z0NNzh9n4o9Oq1PLwJSSxZSCFK6adxULkhagQYNWo6WwvNCtCUZeaR6vbHyFKQuncKroVI33VTp9xQXFodPqVOfPyYKTHMs7htlmVieUGSUZmCzuH+v1TVpRGm9sfqPaZE8RLya1mMRVcVeh1WjpFtGN2aNn0z2iO0ObDKVDWIdan185V+/I2lFNqFQG830i+1R7XLuwdg6CEAgH2K1tbwXgr6N/uXT2KE6VpPwk7lhyB7ml/33nImVhomN4R2IDhSOzptIvm83GX8f+AoT4WxuK88dsM1NkOrfOkbMPzuZI7hECPQOZMXwGeo2ePdl7SMpPYmXKSkAIt1XdeGrp12WY+1NYXqgujChdES+k+FNQXsDkBZO5dsG1ZxWoXBv2rh+ou/hjtpod3D517chq7/wBO8dxPS86PbfuOcbOG+uQxyW5uJl3dB4/7P+BF9a/UKPQbbFa6jRuvphIK0q77KMVLrj4s2fPHvz8/PD09OTuu+9m7ty5tG7dmoyMDDw8PAgKCnK4f8OGDcnIyHD5fE8//TT5+fnqT0rKxbmSJ/n/hyL+2K+egriw6jV6TFaT2yccNe8negCc2Ajvt4GPO0GO61WU/lH90Wv0HM07WqNNvzZ2Zu10yAwZ1WwUIFanq1rp7d/PzqydlFnK1IFN04CmTjvTKNh3/ILau3ydLR46D7ULkP2ASZlcl1vLSS9Kd+u58svy1YGasy5m9rQJFVlEivhTYipha6aYgLkUf87TIMzeUu+qhff/J5adWEa5tZy4wDiHMPUbWt2ABg1r09byQeIHFJmKiAmIYUzcGAB2ZLou/VIcVSObjayx3Eqj0agTnD8P/+n0PkazUd1X44LiaB3aGp1Gx2njaf48Kh4zImYEIPbvb0Z8w7YbtjFv3Dxe6/saVydc7XbJlzMU8ceZUOgs8weq51UtOL6AwvJCmgU24+crf1bzh+yzxKpitVn5/fDvjJ43mt8O/8ae7D3MOTTH4T6JmYkMnDOQ3w//DjiWxwGV4k/hSfVc1DG8I956b4csoAvBjN0z+Pngz9VCfpXJkVLep9DYrzEzR8zkg0Ef1Jr7pty/VUgrbNjUckUQn+u2zG0AaniwO/Rs1JPYwFhKzCX8dfQvp/fZk70HEI6No3lHuXPpnU6DkVecXMH0XdNZdmIZJwpO1FqW5QqT1cSXu75ky6lKp6nihukQ1sGhnNgVe7L3cDz/OF46L7UMsya89F5quP+5lH6lF6WrnZUe7PwgsUGx9I3sC8C8I/PU87R93o9Cjwhxfbwcc382pG/AbDMTExCjntcuZOjzhvQNFJYXkleWd86Lac5Q8n48tB5A3bPS0ovSVUcgVI493cFitahjAGUMoyw61eeE2GazqQuC/x8CvC8XlLiJ1KJUDucednm/O5feycg/RtboXv4v2HxqM2PnjXW4HjjDarOyLm0ddy29iyv+uIJJ8ydhNBv/o63877ng4k+LFi3YuXMnmzdv5p577mHq1Kns33/2ir6np6caIK38SCQXA6qrI9xxYq/X6lXxQZl4mK1mrltwHZPmT6qmnmeVZKkXy/66QPhpIpiKoTgLfpoEJTk4I9AzkK4RwnFwLgMWpctX38i+6LQ6ejfuTaBnINnGbFW8sN9WhTJLGTuzdrpV8qWgdPwC6IEXnD4E5vrPNFByl+xdB/YTW3fdMIp7ppFvI7UzlyuaBzdHg4YsYxbZxmxm7JlBflk+kX6RtG3gPIhX2U/q2/mjfKcAJ/Kl80cp+RoVO8phUt0koInqoFNEhymtpqjh3FVzf5Lyk/hu33fcv/x+Vp4Uq/bulOeMjh2NXqtn75m9zD82v1pOzvH849iwEeQpBFJvvbfqossvy8db761OGgG0Gi0eOo86fQY1oZR9ORMKnTl/oHrHL2Ul//qW19O2QVtah7YGap4IPLPuGV7e+DL5ZfmEeIUAooTJfgXy233fcqb0DG9sfoNjecfUduTKucS+zFPJ+2kZ0rKy62Idstdqw2az1akMSDk32k/g80rz1M9ZcXidC0rp17KTy9S/Hck9QkF5AT56H9WR6A4ajYbrW10PiBLIqs7VgvIC9Zw4Y/gMQrxCOJhzkOfXP+9wv2xjNg+vephPd37Kw6seZvTc0Vw598qzmjjMPTKXz3Z+xgMrHyCrJIszxjOkFqWiQUO7Bu0cuki64u9jfwMwtOlQ/Dz83Hrdc+34dTDnIDf+cyNFpiLahrbl6niRb3lV/FUA/HTgJ3LLcvH38KdTw07VHq8sjuzN3nvBJ1z1jSJ6DYgaQKvQVmjQXNDQ5zUplYslSvfG+kRp866M1+rq/FHOF419hav5RMEJt58jvTidMksZBq1BPSeq5eb1OO5ILUqloFzEctTm3rxUMVvNbEjbwNaMrZwoOHHJiwklphIHEcXVPKKgvIAtGVvILMlUxf/6otRcWid37pe7viQpP4m3t77t8lpcai7l5n9v5p5l97AhXVQ1nCk9U2vTjUuZCy7+eHh4EB8fT5cuXXjjjTfo0KEDH330EREREZSXl5OXl+dw/8zMTCIiIi7MxkokZ0m2MVu9IHcM71jtdjX0uWI1/8CZA+w9s5cDOQeqtdhUBkLtAuNo8OutUF4ITXpDQBScOQJzbgRz9RbM4HzgX1eU1+8f1R8QLdOHNR0GwD9JjgOhqitWm09trpP4o6zS6m02Oi98Fj7rDq82hE+7Q/K6s34PVVFyl+wnfvYT29qcUjabjWJTsfpd2Yc9u8LH4KOurC1OXqyG3T7R7QmXwa3nOgiz2Wy8n/g+T619Sm11XWIqcchjyjJmnRcr+6VCVkmWOsBRXG323NT6JvV3P4MfV8Vdpbr59ufsVwd4u07v4pq/r+Hdbe+yKnUVZpuZLg27uLXfh3qHqsfUM+ue4Yo/rmD6rumUmksBRzeLIk7ZCwMDogaoToTzgSKg5JXlOXQiAtfOH2XfzSrJwmQxqW4MxeWmCJ6unD9Hco+wMGkhWo2Wx7s+zt/j/sagNZBckKwKDPll+axLE+eFcms5z657Vp1IJQQniG2vyPxJLUpVS0dahrSsFgR9rpRbyrlm/jVcM/8at57TYrWogkRyQbLq7lNKlmICYuql7HVYjNivNp3apAoVSslX54ad1RB/dxkdOxp/gz8nC0+qn72C8l1G+UXRpWEXPh/yOSDEZvsysbWpa7HarIR6hdI6tDV6jZ60orQ6D75tNhs/HfgJgGJTMW9teUudfMQGxuLv4a+KP67avZdZytQJvTslXwpK6dfZlLWtT1vP1EVTyTJmER8UzweDPlCdeQOiBhDgEUC5VZyv+0X2c3p9aOzXmGj/aCw2iyqsXg4ogeEgnM6+Bl9VfK5r6VdaUZrqmj6X7VEyD0HkMNnnmNUHivNHEfDrGvisLFa1adBGPe+5W/qllHw1DWiq7oP25+76ymSz/+4upNvyfPL74d+5a9ld3Lr4VkbPHU2Pn3rUW1fZC8GWjC3qeQhczyPsc/SqljCeC6eKTjF67mjG/z3eLQEoozhDdbQeyj3ExlPOI2PWpK5hR9YOvPXe3Nj6RvpF9gOo1S10KXPBxZ+qWK1WysrK6NKlCwaDgeXLK4MJDx06xMmTJ+nVy3U7U4nkYkQRBeKD4p0O4JUVFuUiaO+gqdrlQi35SjsIpXkQ1Q2u/1X8ePjDiXXw931grZ4fNLjJYHQaHbtO7zqrOuu0ojSO5R9Dp9HRO7K3+vdhTcSEQjnRKiiDFiXcuK7iT3NDEADty8rw8QoW7w8bZB+CH8bDvrl1fg/OUCZ+9mVf9nkmNTl/ftj/Az1+7kHPn3vyfuL7QPWwZ1coq+zvbXsPk9VEn8Z9nFr6FZTSmZzSHFW8qQsrU1Yya+8sFiYt5JeDvwBiEmiymoj0i1TdFJdr7o/VZq118Lro+CJs2OgY1lEVZe3pFN5JDWwfnzAeX4MvjX0bE+4djtlqVie8X+/5GpPVRKuQVjza5VF+ufIXZg6f6VZ5DsCLvV7k1ra3EuwZTGZJpmg1v/4FoNK1YO+Ms898UUojXFJ8BhY+CqvegtK6OxV8DD7qvmgvktoHOtuXbILjBGJ/zn5KLaUEeQapQqlaBplzwGnJz/f7vweEgH1Tm5sI9AxUW5svOyEGoUtPLMVsNdM0oCkBHgHsO7NPncwoQnK4TzgeWg/MVrMqLrQIblHv4k9iZiKHcw9zOPcwN/xzQ43lbCDK0Ow7NSmCrFryVQ+uHxAiSHxQPGarWbXwK+LP2QTq+xh8GJ8wHoCfD/xceYPVyu6lTwHQrigXlr9CmxJR5me2mVmftl69qzKZntRiEnNGz2FIU7FIUVNZgTM2pm8kKT8Jb703Oo2OJSeWMGPPDKDy81P2g+P5x1VH3cfbP6b7T90ZO28sty++ncLyQiJ8I+geUXszBQXlul7Xsq9NpzYxbfk0SswldI/ozncjv1MdniDKNkc2G6n+v6brg9L1y9Uk51Jk75m95JTm4G/wVxfOFJdgXcSfjOIMpiycwn0r7qu2oFYX9mTvIa8sD3+DP61CWmGxWViSvOSsn68qpeZS9ZyliD9Gs7FObdaV63dMQIzqSnW39Ktq3g9UnrvLLGWqW+dcsf/uThVfns4fxUUS4hWCt94bGzb+OFxzF8+LGWUuMrLZSHQaHYdzDztdGLUX1utL/DGajTy48kEySzI5UXCi2rzIGVVdea66iSol0JNbTOaJbk8wPEaU+l6O+WkKF1T8efrpp1mzZg3Jycns2bOHp59+mlWrVnH99dcTGBjIbbfdxiOPPMLKlStJTEzklltuoVevXvTsWf/tniWSesdqgZObsa14nbnbRR2/qyyXqs4feyeGMkAHUQKwLl2sgg3Oy4bw1nD97+DpDw3bwKRvQaOD3XNg3t1gcSwXCfcJV/NJpu+e7nLT/zj8B7ctvq2ahV0pD+oY3pEAj8qSSsXBcqr4lMPETSnxUF5z75m96sWiVvEn+whjVnzIvbl5PG8Nhmlb4OkUePQwtBwNlnL47RbY7Pp9uEvVzx+qOH8KnTt/TFYTM3bPUN0eXjovmgU2Y3TsaLdet3VIa/V59Bo9T3R/okZxIMgzSM0BqGv9fcmZY7y55in1/9N3TyevNM/BUl9TkO+lzo6sHXT5oQsz9850evupolPMPzafXw/9Crguz9JoNLzW9zXuaHeH2hJbo9GopRg7snaQnJ+sirRv9X+Lm9veTNsGbeuUs+Nr8OXhLg+zbOIyXun9ClqNlkXJi1iftr5ajg0IUUqr0eJv8Hco+arGyU0wvR9s/RpWvQ4ftoe170F53dxezkq/8svy1ayJquJPmI8o+zptPK06EzqHd1bbZTcLbIa33huj2ahOQBSyjdlqZtLUNlPVv1ftXqUM9q5OuJqnezyt3s9H70Mj30aAKIFTjnerzYpeqyc2MLbexR9F0NBqtOSU5nDr4ltrdB1UFTqU0i9F/KkW6Jz4LbzbAn6cAOs+gLREcHNVXsmxWZK8xCHvp6Yg8pq4tuW1aNCwPn195Xd3fDV7SkRWWoczKWIf+2YEAz2FsKGEF5ssJnWSpLhJlRLGuoo/Px0Urp/x8ePVcrSq4lljv8Z4670xWU2cLDxJRnEGs/bOUjsPKaHtY2LHiH3TZoNFT8GP10CGawFPcf64Kvuad3Qety2+rVpe29e7v8ZiszCs6TC+HPqlw3VV4ao4Ufql1+qdBnIrKGKo0qL+ckAZc/SO7K06npTrprtZMeWWch5Z9YgqTNcmxNaEcr3sE9lHvc7XZ+nXsbxjWGwWgj2DiQmIwUcvchFduX/yy/JZlbLKYdylXL9jAmNUV2pdnT/2mYWeOk+CPYMBIaK5w5ZTW2r8fuxvuxydPzabTV1c+HDQh6yctBKdRkd6cbrbGZIXko3pG7n535vZc1o4J202m7rvj4kdo5YkOuscaV9Sq7jYzgWbzcaL61906Iw779i8Wh+nVCLc1vY2tBotG9I3VNses9XMmjTHDqiK6L8ve99l64K/oOJPVlYWN910Ey1atGDIkCFs3bqVxYsXM2yYcBF88MEHjB49mgkTJtC/f38iIiL480/nAZgSyUWDyQgLHoF34uCb4fyT+AlrCo6iR8O1LSY7fYja7r0oDZPV5BAcezz/OCkFYkLyb/K/mK1mWpaVk2AywZiPwDuo8onih8LVX1UKQL9NrZaRc0e7O9BqtKxJXeP04myxWvh4x8dsydiirqgrKBZ6JVxSIdwnHL1Gj9lqdhikKGVf7cPa08S/iXBeYCPcO7xaWYhKYSYsfha+7IdHUQb3eEQRf9M/4BcGGg34N4RJ30O32wEbLHoCtn3j/LncRC37qhB/SkwlDqVVrpwwG9M3kluWS4hXCJumbGLrDVv5e9zfbudm2N/vhtY3OKy2OUOj0Zxd+KKplK/mTuaUxUhjm46EoAQKywv5cveX6uC6f1R/dUJ/OTp//jr6F2abmW/2fqOWT4EoD5m6aCrD/xjOM+ue4WThSTy0HjW6Z5oFNuOBzg845IF0Chfiz/as7fyw/wds2BgYNbDW4O/a8NB5MD5hPFNaXAfA/5Y/wMGKVVN750+UfxRfDP2Cr0d87dBNUMVmEyLBrFFQkAYhcdCghXAPLn8F3m8Ni56ErAPVH+sEZ0KhkisR5BmEQedYmmLv/FHEH3sxXKfVqQ7BquelXw7+gslqomNYRwcRZGD0QLQaLQdyDrAjawfbMoSIMTJmJFc2u1LtaBYfHK+KTFBZtgbiMzToDE7df+eCcly91OslejfujdFs5JFVj7jM3lCEDsUFuiVjCxarRT3nOog/6Ttg4WNQlAFHl8Gyl2DGYJj/gFsCkFJSuCF9A9szt59V3o890f7RDIgaAKCKp7YtM9jjKYTqdt3ugxaihHLQXtHBbm3aWkxWE9uztlNsKlZLvuDsxB9lNViDhimtpjCt4zQHB43y+Wk1WtWZeSzvGN/t+04tyZw+dDrP9HiGBzs/yO3tbhcP3P0rbP4Cji6FrwbCqjedllUr4o8z54/VZuWT7Z+wJWOLQ9nHiYITbM7YjAYNj3d9vNoxo9A+rD0v936Z9wa8h7+Hv8vPoHtEd7QaLUn5SW5P0t2hxFRSr8+nkFKQwvKTy9lzeg9ZJVlOHX/KhM/+XFFX58+bW950yB5ROgCeDfZl71c0uwINGnZk7ag3AUNxSrQIaYFGo6kUzZ2EPmcbs7lx0Y3cv+J+tTsdVAryTQOaqs6fQ7mH3MqCUkpoq45FnI07jGYjX+/5Ws1VU/g3+V9uW3Ib1y64llc3vVrNtWSz2Ry6nOaU5jhcky8HUotSySnNQa/V0zq0Nb4GX/X6drGXZaYXpfPo6kdJzEzkiTVPUGIq4XDuYTJLMvHSedEtopt6bXUm/ij7EMDxguPn/N1+s/cbFiUvQq/R83xPkRe3NnVtjTlWR3OPcij3EHqtnlva3qJe86q6f3Zk7SC/LJ8gzyD1GnG5ltDac0HFn5kzZ5KcnExZWRlZWVksW7ZMFX4AvLy8+Oyzz8jJyaG4uJg///xT5v1IzitlljLmHZ131sq8zWajZMkzsG0mGHM54xPEm+Fi0nNXbi7xqz+s5sYBR+fJ/jP7KTGXEOARoK7EKsr0/GPzARhTVAxtJ0C0E1t6u2vg2p9A5wkHF4hA6KLKgUOTgCZqlsn0XdVdM7tO71JXyKoOko7kiv83D3Fs0a7T6qp1zLLarGqr94Y+DR26dbUIiIHThyvb0xdlwd4/4e/74aP2sPFTMBshuidMnQ++juGxaHUw6l3o/7j4/9IXhWh0liiff5Yxi1JzqSp+KC6bU8WnnJZZKSt+V8RcUWvAszPahLYh1CuUSL9I7mp/l1uPOZvcn2P/PMh3enEBfiorg8caidKBnw/8TJYxC2+9N10jutI0oCkgLth1pdRc6lC24g5mq5ntmdv5ePvHTFs+7bxdaG02m+ouKCwvZOmJpeptvx76le1Z29FpdLRr0I6b29zMrCtmEewVXKfXUMSfnVk71cDYm9rcVNND3Mdq4b6TB2loNpNmKyezQmC1d/4A9G7cW50YVWPjp0IksFmg3SS4azXcuxHGfwXBzYQItPlL+LwnfDu6xs6BYCf+2Dl/zpRW5P14VRd2w70r9tviTFXc7hLh6IRUcn/sxR+j2agKClU/zxCvEHVy8/z657Fho3N4Zxr5NUKj0fBirxeZ3GIyD3V+yOFx9uV8Sjc3e/FHKQ38Yf8PDP1taI2ZX9nGbF7e+LJD3k1KYQrJBcnoNDqGNh3Kp0M+pXlwc8qt5Q7lTvYczhFCx8TmE9Fr9ZwqPsXq1NUUmYrw1ntXftelBcLxaDVB85FwxZvQ4krQaGH797DkuVoFoLigOJoFNsNkNamlqp0adnKZNeYOk1pMAuCvY39RmnOM1KQl5Op0GDR6WvZ+FCb/CLGDaF9cQLBVHIc7Mneok+m+kX1VgU4Rf5Lyk9wO91RKzvpF9VM7ST7dXbi/gjyDHCaz8cFCNN2WsY0/jogyjDvb30nvyN5c1/I6bm93u+hEWZwN/1a4JUMTxGe+6g2YOVR8D3bUJP7sP7NfvRbOOzpPnbQor903si+N/BrV+P6uTrhaDZt3RaBnoHoM1Zf7x2azcfeyuxn156h6ba9uspi4cdGNPLTyIab8M4Uhvw1h2O/Dqk3oqga2AzWGPpusJh5c8SDj5o3j/uX389Tap/jt8G9o0DA+frzDc9aVjOIMDuUeQoOGvpF9CfcJV10Ci44vOqvnrIoidilCgXIurfq55JXmcefSO1WnjhK+a79wFRMQQ0PfhkT6RWK1Wd3K0HLm/AHn446FSQv5aPtH3LToJvWcnZSXpJYng2iMMP6v8WppKVSGPRu0BjWbLr344nfD1AXls24V0gpPnSdQKWDWpfva+cZqszqIriariSfWPKFmsqUWpfLJjk/U83TPRj3x0nup56Jdp3dVW4i0L/uy2qw1huvXxpHcI3y0/SMAnu7xNJNaTKJdg3ZYbJZqGaP2KE07+jXuS6BnILe0uQUQx6m9kK2UfPWP6u+Qd6cc15dr7s9Fl/kjkVxIfj/8O8+vf55xf41z2r2kJo7lHePaP0bR5/RSng4L5dAVr/JG92vIw0oLr3Buyy+CnT/CrzdClQGtstqbZcxSJwddG3ZVbYirU1ZzouAEu7N3o7XZGGU0w9CXXG9Mi5Fw/W9g8IXjq0VQ8u5f1UnBHe3vQIOGlSkrq9kg7VsA26+8mq1m9aTePMhR/AGI9HfMLcory1MzFUK9Qx3En5aHlsJn3eCNSHg9Et5NgN9vEZMXc2lFjtEfcOu/1YUfBY0GBj4NjTtBWQEsfd75/dwgyDMIP4NwcaQXpaviT+vQ1vjofbDarNUcASWmEnXANSq2ejCwO/gYfFgwfgF/XPWH211llKwVd1cabXv+4PVTKzBrNAzEm0ElRnpv+4W+jftgQ+wPPRv1xFPnWWML75oot5Rz1byruObva9w+ZlanrGbAnAFM/XcqM/bMYE3qGr7d922dXtddkguSHXIF/tgvykPKLeX8sP8HQGTs/Hzlzzza9dGzyldpHtwcH70PxaZiSi2ltA5tfdZlNA5YLTDvXnz3zeXpnMoJZ4jVRkiemwPmlK1C+AEY9j/hDvT0FyJqh8lwf6I43lqNAa0ektfCl/1h1xyXT+nMJeaq0xdAuK+YPBSaCik0FeJr8FWFFwUl92dfdqX48/fRv8kryyPKL4rB0dUnv0ObDnXYDvtyvSCvIJ7r+Vy1LBsl9BnECjtAI79GaDVaSi2lnDaexmQ18dXur8gsyXQ4J9pzqugUUxdN5ffDv/PsumfVFU5FCOoU3gl/D38MWoPqjFHaG1dFOdd2COugrkDO2C3yato2aCsGpjabyGrKPQ6B0TD+C+h5D1z3M4wVpcVs/BTWvOv0NRQ0Go1a+qU4IurS4t0ZvRv3prFvYwrLC1my/nV2ewghqVVoa9FpTquDCTPRBUTRv1iI/itTVlZrIACiW6KfwQ+z1eyWEF1YXsi8o/MA1HIvEBl3Hw76kM+GfOZQcqkICXMOzcFoNtIqpBW9GjnJkvz3aTDmQHgbIZROmAnewXBqF+z51eGuNZV9KSVuIILIfzn4CyaLib+OCrfGNc2vqfU9uovyPupL/NmetZ0dWTvU48GetalruW7BdWeV67E1YytnSs/gqfOkoU9DtBotp42nHba7xFSiigL24k9Noc9/Hv6TFSkrOJZ/jFWpq9Ry0Xs73svNbW4GxKJWXcZ2Csq+2iGsg7o4oFz766v0SxmPKecl+3JZhYLyAu5ceidHco/gbxBOsC0ZWyi3lKtlq0GeQWoOlSI61La4klOao4qXyueroIw77Cf6ymJgoamQO5fcSWJmIg+tegij2Uj3iO5MHzqdxr6NSS9O555l96juJUUoSghOUIX4C93xq2pHzbpQbCqudgwoDQ3sHZvufg/nm1Upq3hq7VNMmj+Jnj/3pNcvvXhzy5ukF6Xz2Y7P2HV6F/4Gf57r8RwgOg0q3U37RYkw5HCfcPW92Xf9KiwvVAXCtqFCiD6X3J/v93+PDRuDowerCwxj40QQ/7xj85xmOFptVv6pWCgftXsBzLmRNqm76RrWEbPNzNd7vgaEsK2cm5W5loIq/mT8PxZ/CgoK6vwjkVyKKKsTRrOR1ze/zm2Lb6s1/8Rqs/L9vu+ZNH8i+4tTMWs0LPDz5ZpDX7H4xBJ0Gh2vDP0Ew+SfQO8Fh/4RDhe7k1aQZ5DqHFFcA90iuqkD4m2Z25hzQAT09jaW0qDHPRDUhBqJHSDEk4btxAD2zzvg58lgzCM2MFYta6ma/WM/UD2ce1g9uZ4sPEm5tRxvvbcq9NhjX7oGlTblEK8QDFqDQ3hmy7JyIUwBlBcBGohoBz3vFU6f25ZCwlAh8NSEVgdXvicev3sOHF9b8/1doNFoHNq9KxOOmMAY1Q1TVRBZlbIKo9lIlF8U7RvYiQXlJbB1Juz8xa0SDD8Pvzq5hpSV3UXHF1W78B3MOejQRYczxzi+6GG2eHthQMuTo2aBVyBk7eNR/1bqSrsyMbUv5VGe22Q1MffI3Brb6iblJ3Gq+BTJBclulaOlF6Xz9NqnKSgvIMgzSHVv1FfJTVUU109CUDxaGyTm7CNpxgAWrH6B08bThHuHu9WCvSb0Wr3DQG9q66luhzu7xGYTpTy7Z4NGx+BRnzMwQmTexZeVwayRcMZ51yKVkhwhrFrN0OZq6H1/9eNKqxPH2+Qf4YEdontgeSHMvRPm3l1NrIbKfeVkwUl15dBVpy8Af4M/XrrKcrSO4R2rdZZS9u2DOQcxWU2Umkv5br+wad/Q+ganmUn2gpBeo1ft3TVhX/alZI8ZtAY1FyilMIWN6RvViZCz9u8nC04y9d+pah5YTmmO6uRQSr6UgTJUhvFuOrWp2nFbWF6oTnITghPUstq9Z0Q2SfsG7cX3uPJ1ITpodDDhayFEKHScIlxAACtfFZlANVD1czqbsGd7dFodE5pPAOC3jE3s9hSr3Q5Cqm8oTPqOQUbhovz78B8kFySj1+jVvBoQ5+O6lH4tP7mcEnMJsYGx1UScIU2GVBNzFReVxSb229va3Vb9WD2ytOKz1sJVn4DOIFy1fR8Wt++b53D3mgKflWuqsq/OPjibBUkLyCnNIcw7zEH4Old6NxaNGDae2nhWAkdVlO5pID5nxTWTX5bPc+ufY++ZvWcl2ivlImPixrBs4jImV5TE208UlcWmEK+Qak5MxeFo7xIsMZXwxa4vAHH+fbbHs9zY+kYe6vwQd7a/k+iAaAxaA0az0W13d4mpRHW0OhMqhzQZgkFr4EjuEZcd5NzFarNyKFeIP8p5KcxbiD/2zp9XN77KgZwDhHiF8MOoHwjzDsNoNpKYmehQ8qWgXF+rNuSoiuL6aezbuFq3SGfOH+W1fA2+FJQXcPO/N3M8/zjh3uG81f8tekf2Zu7YubQObU2ZpYy5R0WDDkWwaxPahsa+jYELm/sz++Bsev3ci893fn5Wj39pw0tMnD+RlScrx86K86dDeOWYQMlfOp5/3GE8lVKQcl7KKp2RWZzJI6seYWHSQg7kHMBoNmI0G/npwE+M+nOUmon4Uu+XmNxyMmPjxmLDpn7vVfd9cOz6pZR8hfuEq9eUsxV/7LP+bm13q/r3K5pdoR5zzp571+ldpJdk4mu1MrAgFw78DfPu5q6jYv+fc2gOG9I2kJSfREphCgatgT6NHbPUujfqrm67qxy3Sxm3xJ+goCCCg4Pd/gkJCSEpqWbLuOT/J0tPLGVD2oYLvRlOsdls7MgS5QiTmk/CW+/NtsxtjPtrHM+te07N3an6mEdXPco7296h3Gqib4mR6UYvRjYdrk6sb25zsxiotBwFk34QA/ddv8CK/6nPo9FoqnX86hbRjZiAGJr4N8FkNfHzQWFrH2PSQb9H3HtTjdrDnSth8HOg84Aji+GP28Bq4c72dwLiO1Hqr5Pyk8RgXKtHp9GRX5avTuaVVZ6EoASH/AyFqtuvPE4ZNASnJDKkxEiIxUK3VpPgmTR4OhXu2wZPJMHd6+CKN6BZ/9pFH4cX7gJdhaWTfx5zOlF1B/t272pgYkCM6hKoGvqsrPSNih0lJg4mI2z8HD7qAAsfEYHbK19zO4TVXa6KuwpvvTeHcw87DOYWHV/ExPkTeW6dWK3BVAq/TWWzTkxwOkd0JSqsDfR7FID4DV/xWKcH6R/VXxUCo/2j0Wq0lJhL1MHmd/u+44UNL/Dxjo9dbpO9rbe2wFyL1cIz656h0FRI+7D2rJi0gpd7vwyIfae+Wsnao6wmj9YE0L9E5A/8bkxmVkVo4I2njuPxRhN4rTF82l2IpIufhay6DVqU0OeGPg3VltqA2DfyTopQ3v1/w8o3YPb18EXfapNIB1a8Cjt+FBPQCTPQtBnLi/3fYFzMKO4xNBKOt0VPut7HbDb4axrkp0BIrMgIq+3YCmoCNy+Agc+I1931i9NMrUa+jfDSeVFuLedEoRBGlX3Gmfij0WjUcwFAl/Dq4ffR/tH4e/hTbi3naO5RPtz+ISmFKTTwbqCWbFTbDr9GqmOod2RvMUk8sQFmjoBPukDGnmqPsRd/FJEBcBCA7cs4qk5MThWdYuq/UzlVfIqYgBj1XDpr7ywKywvV1UKlZSwIsctb7022MbuaDV45t0b4RhDoGejgkgTocGw9vN8K1rwt/jDwaWjipPFFz3tgwJPi93+fhlzXDr7mwc3VCeK55P3YMz5+PHq07PTQssRPOBmVzngqUV3p1X4qHlYbBRYRlN+5YedqWTZKi2p3xB/lPn0i+7gluNq7SJoGNFXzK1RMRlhQIfL0uAei7PbV1hXt30+sdyinduX8SS1M5UjuEXQaHS/2fpGmAU0pKC/g9c2vAzAuflw1EfRcaBfWDl+DL3lleQ4BqWfDqaJTqkijHGPK5PCTHZ+o5eFrUta4XZ4HQuRYkSLcAsoEUhE77J3IzroaKijboyzCgHAJnCk9Q7R/NA92fpBrW17LE92e4LZ2IvDVoDWo5X/KMVcTWzO20m92P7r+2JWBcwaqrmz7CXCgZ6Aq7DrLP6kLSXlJGM1GPHWe6rGpuCiVc6vNZmN9utiOd/q/Q1xQnBoCvj5tvbpIZS/+KMKqfUm/09evmLg3s1LtvKk4f+zFH0Userv/2+pxrtfoeW/ge+p2+xh8uKHVDYBw1lusFlX8aR3amsZ+Qvyxd+amFKbw8MqH6xz4Xhtmq5mk/CRVzDNZTby66VVe2/wapZZSh4VPdyk1Vz5O6aCqZOQAdAzrqN430DNQPa8ppV8phSlc/ffVXP/P9efkPnKXb/d9q3Yh/XDgh/w17i+mD51Oz0Y9VTF8covJaserx7s9rgqQLYJbOOSoKcduYkaiuuioNqMIjFPda2cb+jz74GxMVpODGxbE56iUndlnXSksOihcmUOKS/Aa+Y6IhwiIpGdOOpM9xTzl2fXPMveIECN7NOohynztaODdgLjAOGzY1CzBywm3y75+//13VqxYUevP8uXL8fDwOJ/bLLlE2XRqE4+seoQHVj6gXqwvJk4WniSnNAcPrQdPdn+SP676gwFRA7DYLPx17C/GzBvDx9sdJ8CLkxez7OQyPDQ6ns/O4fPsfHqP+4a3B77HP1f/w8eDPub+TvdXPqD5cBjzofh97Xui404FivgAYiCZEJyARqNRBxoWbPharQzq85Qo2XAXnUGc/G5dDHpvERC6/GUSghPUFrJKTa2yctEjooe6sq9cxJR/lYtXVZSLuOr8qbAph3mHiS4pv97I+5mnWeXfk+ArPxSTUE9/aJAAPiHuvx9nDHkBfBrA6YNiwnwWAoJ95oeyomXv/LHPNsktzVUHgleeyRCdYN5tAYufhuIs8BMDJda8U+8CUKBnoNplRMm5MJqNvLftPfGSaWtEh4LFT0PGHrb4ie4x3ZUJZfc7ISASClK5saCIz4Z8ppacGXQGVcRT3q8ictWU92C/4llTPgrArH2zSMxMxEfvw5t938SgNdDYrzEaNBjNxhoHp2eDyWJSJ+O99y/hmkJRcvJzYCDJHgb8LVauycsRGVOmYsg+BIf/FeUzX/YVbosqoemumNh8IgOiBvBy75cr81NWvAqvRcCH7UQo7683wuo3RR5X5h6Ye5dTgYJts2BtRfnOmI9ExhdiUPK/AW/Rdew3oDWIINrD/zrfoG0zhdNQ5wETvwWv6p2EnKLVwcAnYWSF2LD2PeFos0On1amr70pHEGU101nZF1SWMIDzzocajUad1H2952vVdfBK71eqDc7subXtrYR5N+CWhn2EqDZrJKRsgjNHRcB1smPOTpR/FBMSJnB7u9tVxwZUngOO5h51mMhVdaQpuS1xgXHMumIWd7a/kzDvMDJLMnlxw4uUWcqI8I1wmLR66DzUld+qJTnKar8iRLVr0M5h5b3dwWWiHLZhO+FCqRBwnTLgKWjaB0wlQoR2ce7RaDSq+6dzw87nlPejEOYTxiCbcPxk64QI46yE0qfj9fQorQwBdeZ8qYvzRzn/OBMJnNHQp6EqNt3S5pbqjrLN04VgGhAFg591vC04RpQa26xiRbkCRfzJLct1uLtSMti5YWdCvELUbnWlllI0aLg64erqG2gxQWHGWV03DFqDuuJ+rqVfsw/Nxmqz0iOiBy/0Ejkui44vYnHyYjWHy1vvTaGpkK2ZW2t6Kgd2n95NtjEbP4Of6nJTxJ8DOQfUBYCavteBUQPx1nuz78w+7ll2DykFKczaOwuABzo94DI8W8l7qi2HJKc0hyfXPEm5VbjUzpSewWwz0zSgqYNgDHYOiCoNMvLL8ll+YrnbzRkWJQvBuXtEd1UQVM6lipM6oziDgvIC9Bo9HcM7Aqjiz7q0dQ5t3hUa+zWmVUgrrDaryxJWgONZwmnYLOOgWAAxVR6jqvhT0a3OaDaqgk27Bu34ctiXTG09lQ8Hfahul8LwmOEEegZyqvgU69LWOYo/Tpw/3+/7nmUnl/Hlri9r+cTqxvTd0xk7byy9fu7FTYtu4uZFN6vlTCDc3faLT2WWMq75+xruWHKHy0WprRlbVTFp46mNpBamsu/MPiw2C+E+4Q5iCVQueiilXzN2z6DUUkpWSZZDCPbZkF+Wz+1Lbmfa8mnM2D2DLae2OIiy9u7UBzs/yJCmQ4gNjKV3ZG9mDJ/BnNFz+F+f//FktyfVxwR6BvJqn1cJ9Qp1KKkFUT4dExCD2WZW3dX2nUhVQTf3kOpC/OXgL1zxxxW1CkJGs1H9buw7fCoopV//JP1TTXjeeaKilMs/VjSGGfyccMoCjx7eQqxvJNnGbNVVPCh6kNNtUM6jl2PLd7fEn6ZNm9K/f38GDBhQ68/AgQOJjY3FYDj3gYTk8sFkMakrXWWWsnpX9OsDxfXTtkFbPHQeRPtH8+mQT/l51M/0jeyLxWZhxp4Zap1+iamEd7a9A8DthUYmFRahGfi0KF9COGEGNRlUfWDZ+Saxqg7wz+MijwMcSqm6NuyqumvsB8bD8MW7U/UToVtEdoZxFbkQ6z+CPb9zf6f70Wv1rE9fz6ZTm9SBwcDogdUG36rzx4X4U6PzZ90HYCpBGzsQzfgvQVvPcWPewTDi9Yr39qFY9bbWzfJuv+pvP4BSxB97UWPpiaWYbWZalZUTu+5TMQEvyxc5HGM+gof2Vm7PmnfOWpByxZSWUwBYkbKC9KJ0vt33rboiZ7aa2bTpA9j2DRY0qvijugkM3sI5ACLgt0oAuX2Q7/H84+r3fjz/uNNuLOA4kK7qkLJnb/ZePtsh9sFnejxDdICYbHvoPFRXiLMSG2f8uP9HHl/9OPOPzaeg3HWp8c7TOzGajYTYtDQvLaFPYxHUaanIO5rcYhJ+92+HB3fD/dvhhj9FKWH8MBHwuvotIQLt/bOaAFKVBt4N+HTIp5XtmNN3CuEEhAATECUmjh2mwIg3IHagmNT/ehOU2jkGDi8R2S4A/Z8Q54xqLxYPvaaJ3xc96TBQB8TkcZlwVDHsFWhUpVW4O3SeKpxARZkOQrWCUqal5MbUlPkDlS5AD62H+tiqKOLPkhNLALi2xbUO5VMAZO4Xjqjl/4Pfb2X4Py+x4uBuuv5+jxDVNFrocrMoXysrgB/Gw4EF6sM1Gg0v9X6JBzs/6PC0ivgz9+hcjGajWo6ZXpTuMPhXVryvir+KBt4N8NR5qgNUJUy8b2Tfai4UNY/llOOkXDnHKudcg86gCkWRJjMNAqLhtmVw91qxL9R0/tRqxTlI5ymE/j2/u7zrbW1v45Y2t/B418ddP19dyNzHNRmVbqMQrxD1uuBAwzYM1FaKbtW+Xyo/iyM5tTs0lAlHbd0SFTQaDc/0eIabWt+ktlFXMebCOhGCzeBnwcNJSW6bChfa/nnqn1w5f9RratRAQDg3Q7zEYkfvxr0dwscpL4aNnwmh+L0W8GZT+GakOI7L3W85rJZ+nYP4YzQb1YnilFZTaB3amj6RfbDYLDy++nFs2BjZbKTaPMI+96M2FGG1f1R/VaSJD4pHr9FTUF6gigpH88V1pWqwPUB0QDRfDv0SP4MfiZmJTJg/gRJzCa1DW6uuBWckBInxS00dv2w2G8+vf57TxtPEBsay9JqlzB49m48GfcTXw7+udlzbdx20Lyd7dt2zPLTqIYb8NoRJ8yfxxc4vHMuy7bDarGqJy5i4MerfFdeFspimuLlig2JFlhbivKLVaDmWf0xd6LB3/gCqU8KlOyn7KEkHRDflWJNJdIS0O+dX7faljJECPQMJ9gomwCOAx7o9xoDoAdWe2lPnybi4cQB8tOMjCssLMWgNJAQlVDp/7DJ/9mYLEWprxtZ6KV1U+Pe4WCQxWU3syNrB7uzd+Oh9eH/g++g0Ooxmo4NQdzjnMIdyD7Hp1CaHwGp77IP+QVw71JKvsOrXXKXJQWJmIimFKWrMA1AnAdUZq1JWsfnUZtakruHjHR9z25LbmLxwsroo89OBnzCajbQOba2eI+xpHdqacfHjqgmnvSN7s2ryKsYnVHffKvMTpSTSXvxpGtAUT50nRrORlMIU8svy+TDxQ9KK0vh056c1vpf5x+aTV5ZHpF+k06y/Xo17EewZTG5ZrkPppyl9J0ctYoGvVZ8nKp3OTXtD67F4Wy28VYzDYoersltlzHw5hj67NQM7fvw4oaEu2jI7Ye/evURHR5/1RkkuP3488KM6YAbOWeE+HyjiT9VVi3Zh7fhi6BdM6ygmWq9uepUjuUeYvns6WSVZRGm9uDU7A8JaiTwNdxjwBLS9RqweLnwErBYH5499/kLXogL8LeICOKb7I+cmnLSdAH0eEr//dR/RxflMai5C1N7c/KZ60RoYPVAVeZRBUtUJSlWUQWxmcSYmi0m9iIYZ/CpXSIe+BPrz5AzsMBmueEv8vvkL+OteyEsRE/Cjy0XZTQ0on/+u07soNhWj1WiJ9o9WS0TsM38WVgiAo4qKIaafyNq4YyU8sFNMOvUeYmKuCEBr3xVCnwvxpK7EB8fTo1EP0UJ4xyfqiqfSoWPtHrGicajHrRSYS/A1+KqTagDaTwKfUDHAO7rU4bnVjKP8EyxJXqL+vcxS5rIu3757iquyr4M5B7l32b2YbWZGxIyoNumy73hXG4dyDvHW1rf4N/lfnln3DAPmDODhlQ9XaykLlROgXsWFaA0+6Ee/r662e2g9uL7zNFESFdwUQuMgfohYLbr+N7hmFviGQ/ZhkZvzTjz8fpso36oNa8WxbbOKrJ3nsuCRfXDnKhHU2+te8fyB0aKz1l/TxPPOmwZzbhBduTpMgUHPuH6N/o+DfyPIOwEbqpTl/fuUED4adxZur7NB7yGcJCBE1TLHiUu7MCF0q+JPaUXZl1eoEDsz9zuUxigdv9qHtVcnLlWxF4WaBTbjka5VSly3zIAveonPa+27sPcPyNonMo08A6HlaLhngxBAbvxTtBi3lAnHVWrN9m1F/FHExInNJ6oh0EonM6h0xdmvrk9sPlEVAMCx5EtBKQ9JzEx0WK1Uzq32Adj9QsXEoXtpqRDvoru5Xw7bIKGyE+K/T4m8ICf4efjxSNdHiA1yTzSpla1f07O0lCiN+G7bN2jvsgxrcMJYgi0WOuJFs4Bm1W5Xrj9ZxixyS3Or3a5QVF6kCt91eR+jY0fzeDcn7dXXfySE2LBW0H6y8wcrpV/J69T9O9BLiFlGs1F1AuSX5aulucrqsqfOk0e6PEKYdxh3dbDr8LhtlhB9Fj8DhRUT4bJ8OLlBiFGLazgPVEGZ2G3P2u70nOgOC5MWkl+WT6RfpJoJd2c7cR6xYcPX4MvjXR9XXS8rU1a6NVG32WyqQ0Z5LIgFAEXkUQSO2hxdnRt25uvhXxPoGai6yR/p8ojTsnQFdVxTQ9nXjwd+ZE3qGjy0Hrwz4B0ifCNoE9qGwU0GV3NygChzVcp7FBHscO5hVqeuBkCDhgM5B/h81+d8v/97p6+5M2snaUVp+Bp8HcJnG/gIIV2ZwCuOCcVVAUKAUXIHleyYquKP8llvTN8onMH2nDkG3wwnSSsWgpq1rtjv176ndrVThPuC8gKMZqNaGu/s2HWGEmqudowNbo5BZ1DFH0U0K7eUczBXlFvnleW5VZ7nDqeKRCahVqNl9ujZvNL7Fe5qfxc/X/kzw5oOU0Vq+3HesfxKR/PvR5yL6Ir4MzJGuOjnHZ2nziWcij8Vzp9DOYd4f9v7WGwWtausK4HJXRTXdIvgFoyIGYG/hz9Hco9w+5LbOVlwkl8qckPvaHfHuecRVqCcG9alrcNqs6qfWVxQHHqtXhVbD+Yc5OcDP1NiFuejVSmrHFrC22O1WdVGHDe2vtFp1p9eq1ddpfbiT9KK5zFpNPihJTKuigg89GXQedDy+EYejBZRBx3COjg9pkEswmvQcCz/mOq8u1yQ3b4ktZJtzHY4uOpKZnGmGsKnTE7PtRb9fLA9U9TgKuF4Vbmz/Z30adyHUkspD6x4QL2IP5megqcNGP2+KLFyB41GCAZegZCxG7bOdFgBVMUfcxmGxc/wYdZpXvZvR7fWLgajdWHICxA3RJS5rHmbO9vfiY/eh2P5x7Bho3VoayJ8IxycP8WmYnXir5zMqxLqFYqXzgsbNk4Vn1JPluHZx8FSDhHthevhfNLzbtG6WslV+rAtfDUAfrwaPmwvSkJObHDqwlEmfkpgZ2PfxnjoPNRBVGZJpjro2V7RdW2kPlSIBD3vEc4qXZXshl7TREt6NLB1hhAQ3Cwhqo3rWwoL7oKkBRjNRjqGdeSJbk8AsNZTj61pH7ZEiYl014ZdHXMl9J7QscLCu3Wmw/Mqx2hyQbLqvlBwFmhpNBsdRCFnZV97Tu/h1sW3kluWS5vQNrzQ64Vqg4+qgeE1oawatQppRVxgHGarmWUnl6mrT/ZsqAjf7W0sFfbf4KZc2+Jaukd056EuD7l0qaDRQNur4b4toswmqIkoC9v7uyglSlpV80Zu/06IOR7+QgR0NtjyCYFJ3wlX0IH5oixs549CrGg+svaMHk8/GP6q+H3te3DoX7FvH1kK++aK42DMR6KM62xpPxlC46HkjHCK2d9UMeE4nHOYUnNpZdnXkZXwRR8h0nzaReQcIVaf/T381a4dzmjXoB06jQ69Rs+b/d50DB7d/avI9QLh6ul6qxBGrpst3HZPnYBrf4Lwivwag7fIWWs5Wohwa96p8a0q5wCFMXFjiPARA0NFlLTZbJXOQLuOOD4GH25sfSMgBqaK0GNPQnACIV4hGM1Gdp7eCYiBrv2ESOHak/t4OyubR71iK8WGutDnQQhvDSXZosGAubzuz1EXSgtg1xy0wJ0JEwFqdGA06HA9i1LS+frEUTTF1QfWvgZf9ZxQ0wRQDRj1DifAw82yRlcUnIJNFfv4kBdcHzdOSr/8Df7oNOL+eaV5gJgQWWwW4oPiVZcjwNj4sayYtIJO4RXXw11zYMFD4hgLjhHH7NNpcPf6ytLLxO8gzb0W0U38m9DYtzFmq7layK/JYuK7fd/x1NqnuG7BdfSf3Z/3t71f7TmU/JLrWl6nTr46N+ysNm64r+N9hPmEibwMvQ9ZJVkOXfpccTj3MKlFqXhoPegb2dfhNkXQOJhzkKLyIlXIcOb8UWjToA2zRsyiVUgrJgd3oMevdwmnsYv9XRm/JOcnO80pOpx7mPcTxefxeLfHXS52VUURVxRnzXf7xALM8KbDWTFphRpofTjHuet9fpLoTjS0yVCHc57i/Mkty8VkMakBt/biD1Dts7TvaAhCQFPyI9emVWmMseUrsktzOaXXo0FDi4EvQGiCaBSySYQg+xn81O3KKslSF3OrdgVzRUxgjEOWmVIyrIg/WcYsyi3lHMo55JB9s/lU/ZTcKG7Ltg3a0ia0DeMTxnNfp/vUfctZeX9SXqU4sezEsmoi9MmCk5wsPIleo+fpHk8T5BlEVkmWGvjvTPwJ8wmjaUBTbNjUoOQnu4syq+2Z2zFZzy6zElC3r3t4J95tczc/j/qZMO8wjuYd5Zr511BoKiQ2MFZ1gdUHnRp2ws/gR05pDptPbVaPWcWFqeT+bM/czo8HfgQq9+nv9zkXQv899DvJBcn467wZ5xfv0jXfpkGV7qCpiRzMSKx43VbVBa6QZqKpDHDTnsV8PvBD3h3gujNmkFeQuhh1NnlQFzN1En8KCwtJTEykqEhYqrZv385NN93ExIkT+emnn2p5tORSxGazcefSO7l2wbVqOJYz9pzewxV/XMHouaO5+d+beWL1E7y99W1m7Z3FixtexGg20j6svZp/U9X5k1qYyprUNZXWemMezLoSPukq8lT+eRyOONZTZxZnUm6pnwFtTmmOetKv6vxR0Gq0vNHvDcJ9wkktSsVsNdPfomeg0QgdbxC2wrrgFwaDK9qTr3iVBI9g9Bo9jX0bi5WutMSKbj5H6a4P4upRX9SPWq/VwfCKsOkDCwgtN3Jz25vVm5VVJ2XQczzvuPp9hXmHEeQVJAZWpY6lNhqNxiH3J8tYUfaVXNHauMvN/Cd0mAzX/iwygLR64Yxo0BywiZKQWSPhh3Ei1NOOCL8IhxVDZVAT5Bmk5kOkFKbw5w4hZPY1ltLwyg/EBLMmut8B11Tks+z/C36cAAU1dBqxWkXpQS1lYv2j+juUVDzR7Qm6WfR4W61k6fUcHPQEmzKFXdW+05qK8n0cXQa5yeqflUFQYmYih3MPo9fo1YGl/UqYQlJ+EjZs6md3svCkQ4nMzqyd3LH0DgrLC+kY1pEZw2c4naQpZY+1OX92n97NqpRVaDVa3uz/JvPGzVPDgJXsFIXc0lz2V6wi9vKJgu5ipT3UO5SZI2aqk/Ua8Q4WE8EHd8PtKyqE01L4+Vo4Xl1sAqA4u7K1+uBnIaCR6+eP7FLZpUnnAe0mwa1L4Lpf3HPJtZ0g3GfmUvhlshCQlLDanveI0PdzQaevLBPc8Ik4N1fQyLcRoV6hmG1m9mbvVQefoWveE24cEC6KX2+Efx6nc0hrNly3Qc0ac0ZD34Z8NuQzZo6YqU4QACFszb1b/N79LrjlHxj9gRA5WoyEoGjnQplOL1b80IhspEzXE1R792V8UDzNg5tX7pcV5YhZJVmUmEvQaXRE+zmKRVNaTmFA1ADu6XCP04wirUarikKKIy2tMA2j2YiH1qNywpZ1AN3OnxhZXELgcBfCYW3oPWDMx0IAPLgAfp5U7Zxdr+yeI8TRBi0Y3/NJNk/ZrGaTOSU0Dt9GnfC0Wl2GnruT+2NfZnDOrH5TLIpE9xT7VE20Hif+rSj90mg01Tp+uWoj7MCJjfD3feL3XvfBfYni3OzpBxFtocddFQ4kmxA+3Shn1mg09Gxc6TKzZ8mJJby77V0WJi1k75m95JblMufQHAfXTm5prvqZj4sf5/D49we+z4zhM9T8Dw+dh1q2p4Q414TijOkd2bvaMaKKP2cOqteaMO8wh1wuZyQEJ/Brm2k8t2Mh5B4X594veolrWxUifCPwM/hhtpkdJvpYhTj89/zbMVvN9PMIY3Jxmdvl40OaCvFne9Z2DuYc5J8kkZV3S9tbaODdQC1dcXjNCsosZSxOXgw4lnyBcPXoNWLh5kzpGbfEnwjfiGrdujQajSpQrThR5Xs6toI9XiKrKzYwFn/v4Mqsqw2fQvEZNBqNQ7t3pSOqsljkDorLHCrLe4M9g9UukBnFGWqHQ4X6arWtOoCrdANUcFbebz/eMVlNDiVagCqidWrYiWCvYPW7s2HDoDU4Xr/ssM+76xPZh2uaX0OARwAl5pJzqopQ3KkhR1fCp12IObSUb0Z8o3aDA7i93e01OuPqikFrUAPFla5/9sdsqxCxEPProV8pKC8gJiCGt/oLh/78Y/MduthhtVAw53reXv8SADdln8J35nBY9qLT8bCyD6n7TOIsDnqIBfhWDV0sNPd7FHzD0Jw5Rr+1nxPhGez8fhUouXhVF0IvddzeA9asWUNkZCTdunWjadOmLFmyhIEDB7J161YOHDjATTfdxIwZM87ntkouAJszNqurbq9sekW1M1Zl1r5ZpBWlcaLgBImZiSxKXsQP+3/g/cT3WZ++Hg0anu3xrHqwHsk74iDcPLTyIaYtn8YLG17AbDGJlcoT6+DMEVGWsuUr+GkCHFpESmEKD654kKG/D+XFDS/Wy/vcmbUTEAn1NQ00gr2CeW/Ae+g1ejw0Op5MPykmh8NeObsX7norNOoIZfk0WvsRs7o+w1fx16P5+36YMaTSOTD2M+ESqi8athETRpsFts1iauuphHmHodVoGdZEnOwifCPwN/hjtpnVgUnz4OaiROWTLsJVc2q3w9Paiz+K8ycs9wQYfKDdxPrb/tpocQU8dgSez4ZHD8J9W+HezRUlWV7CtaE4CCqwb/UMlSUdGo1G/T0p5wh/VYQyXh3SAeLcXEFpezXc8Dt4+EHyWvi0m5hIW0yQfUSsVH4/VnQKezUc3oqBGYPE4NWFCKTT6tSckbFxY2kX1g7PrV/TwyiyX1bk7VPdbFW7BwGixCluMGATq8pV3neRqUh9rOKGc+b8UUq+2jVoh1ajxWg2OpTIvLb5NYpNxXSL6Mb0YdOrdfVRcNf58+kO4foZHTtaXV1SBllV235uTl6GDUgoLyds4HPVnVl1QaMRXX+u+wUShotJ4k+ThGNHyQKymODgQpg9BUrzREBvtztqf+5ut4lud48cgAkzoEkP9yf8Go1o0d77AXGcpW+vDKtVRJtzpc3VwkVSmg+r3rR7aY1a+rXm5AohAtpsBIcmCMfbY0eFOAPiHP7tldVEV2f0ieyjZt4AkLIFfpsqzlftJwuxrC6CSIP4SvfMug9c3s3Pw4/gisGgkmWiCKyKKKlM3iL9IquVDPl5+PHpkE/V7l/OUMQfZUVbESwVmzw2Gyx5TrhKWo0R+8LZEt0NpswBgy8krRSOtWMrKvKSXoF/noClL4juc6vehH+fEWWHf94pJtBbv4bja2vPK7PZRDkeiJJJjQYfg0/tixXtRCkIe52XVDQP+Q/Fnx0/wnZRasDQl2rfv9qME//alX7Z5/6YrCa1s6lL8ScnSZwrLOXQ6ioY9j/n56hhr4hxQFoi7PjBrbejrFhXnUwqpd19Gvfhg4Ef4KnzpMRc4lCuq5xHm/g3qTYeUrpb2X+3irDhTu6P4oyxL/lSUMWf3IN1+17zUoSr1mYV3UJ9w0XY+48TYK2jq0mj0ahlZGpWnc0mSnRXvMqWUlFCeGXqATTzH4A/bnXLrRvpF0nLkJZYbVYeXvkwZpuZbhHd1O9BEUlOFp6s1tVpTeoaCssLaejT0KHkH4RgrHRPPJZ3jPRisXCkOCoUWoW2UrOkqpZ8KSiOjzVpayrH33kpkH2Y3Z5C/FED2luNFW7t8kI1A0sRfzKKMxw6orrLoCaDaOjTEA0atTNm1UVDJe9HycjalrntnLtgWW1W9XyrCBVVUd6HQ9lXxT6oXAt+P/y7w8KWUvKllPheHV8Z3t4qtJXL0mZ78efeDvei1Wjp2rArcG6lX8riS0hmxTjon8eJSd3BzBEzifSLpFVIK65odsVZP78rlNIvJfTZvvxW2U/NNvEd3tbuNro27Er7Bu0pt5arDkMA1rzLx1nrOaPXEWO2cKu2Im5m/UfielTlOqTMJ5PzkykqyoC9f3KoouFU1eNDxSsAJn4nxkpHl8GcG2s8voc2FZ0gt2Vsq/dmJBcSt8Wf5557jokTJ5KSksJDDz3E5MmTue+++zhw4AB79+7l5Zdf5rPPPjuf2yq5AMw+OBsQrWDNVjMPrXzIIZgNRA3w6hRR2/xa39d4u//bPNb1MW5uczNXxl5Jz0Y9ear7U7QObU0j30YEegZitprVC29aUZo6AJ53dB4PzruakoPzhVNi/HRhgW45mtM6LR8ve5Bx88aqK0yLkxdXq18uMZVQaq4SfloLiqjVyZVabEfH8I78POonfi420MRsFq11fd3PxHJAq4Mr3wc0sOc3Ov56O03n3V8xwLNBh+vg/m2QMKy2Z6o73SsmpYnf4qPR8eOoH/lx5I9qNwyNRqPWxyviT4J3GHw7GvJPiongL9dBYWXrT2WidLLwpCoAhFssQvxwt9NQfaHVOg7gw1uKfem62SIQdsePDqIH4FB6Zz+oaVIhTPyw6mlyNDZCLVb6j6w5sK4asQPhtqUQ1R3Ki8QE7+04+LSruLAlrRIOHMX2m75DDF5njYJTu5w+5bUtruWXK3/hxd4vQn4a7JtLf6OYXP+0X4T7BXsGuwzppuut4t8dP6g2+XCfcIdVw+Exw9UBuDPxR/lbq5BWqnimrJ4Vm4rVjII3+71ZY8cm+25rrtiasZWNpzai1+q5p8M96t+VSUNVS/3mPcJW3EvjLybS9YHeU5QSxQ8VAtCcG+CNSPisp2jHPXsKpGwWLp7R77svOEW0A18XJWi14R0k3HwP7ha5Y+FtYPyXwj1QH2i1leVlW6aLHK0KlBa/qw4LZ2iwDXS3/CvOL35hYuJ6/e9CJE/bBoueqNtrF5wSn7G5FBJGCCH8bHLP+lVkB+39A3KOu7zbVXFXERMQozoeqoqS6qTHzXKHqiiTj71n9vLJjk+Yd3QeYFfytflLMSjVKo6lcyRhGNy8AHzDRHe5H8ZX5CW9J77L9R8Jx8uqN2DTZ6LscPccIZItfBS+Gy3+rUkASl4nuuQZfIXz0l3aXA1oxPHiJJPNLedPxep8rH9TmHevKO9d+qIQ1d3BVAp/PyA+EyVnq6nzCaID9qVfO8R5RhF/8sry2HN6D4WmQgI9A2kb6iTc/Pha+H6cKK1p3EmMdVzt1/4RMKhCyF32knAW1kLrECGI23fPgsqujaPjRjO06VCHTA4F5fcW/k3F/pHjPJtDoV9UP/RaPUn5SQ7ZjlVJKUzhUO4hdBqdOrm3RzmPZxRnqI6lWju4mUqFs7DkjBArpvwqxkwVLk9W/E/sn3YoY5wjuUfEfr34WUicRb5Wy0FPMXns3vo6MQbdN1dch+0D+V2gCFqKS/DmNjertzX0bYiXzguz1VxtgWPBMRFGf2XslU5dGUqZjCI2RPpFVnPPajVaNevJlSDTPqw9Yd5hFJuKK8upkoQ7bXdAA/U+4gm1wvEKkPgtlJeooc+ZJZmqCF4X549Ba2DWiFnMumKWQzh7Iz8xbjhVfErNj5vQfAL+Hv4Um4rPOSP0UM4hcsty8dH7OO0+CNA00LHsq8RUouYQ3dfpPrz13iQXJKv7Zam5VBVqFNdVfHC8WurlrORLoX9kf2IDY7mm+TXq9iii37mEPqvij9ksHJ/Y4M87aXbmJAvHL2TO6Dnn1tUxP00sxFRZwOkb2RcNlWNt+2O2eXBz9bbGvo25MvZKNBqNunA559AckUuWvI6dm97nV38xZnnhyu/weGAnjKwo1V7/ISx/2eE6FOodSiPfRtiwcSBxBjZTMQe9hIusqjPOgZg+YlFE7w1HFsNvN7s8p0b7R9MqpBUWm0Xthnw54PYIavfu3Tz++ONERkby5JNPUlBQwOTJlRf5a6+9lmPHqk8MJJcuGcUZqm155oiZtAxpSU5pDvevuN8hRHDZiWWUW8uJD4pnTOwYRjYbydQ2U3m066O82e9NZgyfwZRWojuRRqNRbYDKCX1dqrigNfZtjKfWwJqiZG5u1JDPO41hjqeG2b5e3B7kydDoKGb4eVJuNdEzogeNfRtjspoc0vaLTcWM+2scE+dPdFrPrWC2mkkpTFEHRYr44yrvpyqtivNokXlYqMcdp7j1GJdEdYE+D4iTtV9DMRBsPU60Zh//pRj4nQ9aXClafpdkw755NPZrrK7iKyiDb6V9bcLOP0VIcIPmoia8IFVMdis6DSkTpd2nd2O1WdHZbARbrND55vPzHs6GuEEi+wWE+8cuR8G+7EOd3KVuo+kB4fbZrRf7y9iowRhqKuVxRcPW4nsd+5kIXC7LF5O8uMHCKXHLIpFd8ughUQKg8xSBn99d5XQArtFoaNugrbigb/kKrGb6hYrBRKFJhPN2i+jm2ubb/ApRFld8Gg7OV59TGTzqNDoGRw9WxZ+k/KRqHb8UEVfJFIDKjl/7svdhw0aEb4QaGOkKRTjMKMmoVvdebilndcpq3tjyBgATEiY4CHVOw2ELM9hZsW2d2153dqUzrjB4weSfhJPMr6GYAJ4+ID5H33DhwrlnA0Q7Kbc7n/iFCZHm3g3QrHrg8DkRP0SUmNmsIp+kYj9QxJ/jFR02Qv0aiywjexKGiVbzaGD79yLjxB3M5aITWlGmCOC95hv3c9Wq0qiDEOxs1urh2HY81u0x5o+fr7alr1qO6CzsuS4oLeCtNitf7f5KDYVtEdICTm4SojCI7zG0HkqZQOSR3bZUuD2DmgghuuutwgLf6z7oeht0uUU0AxjyghCdut8pzg9oYNtMIQK4QukK1GFy3RyqAY0gpqJcxUlXMiUA+2jeUZedBpVcjrht38POn0T4+foPhaj+zRUOQmU1UhPhmxEinwuNuC6MrcMiZo8KAXrdh1Cc7VD2pYxLejfq7RhYWnQa/rxLiGp5JyCwiViQ8HAtjAPi+whrJcSijzuLDmB2Cy9VSQhOQK/Rk1eWp3bPMlvNqhivrJorK+QO4k9FhkarA/8KZ9i3YxyC26vi7+GvtmxXFoqcoTiDujbsKsrHq+Dn4acuAiih0C7FH1Mp7P5NuAnTdwhxefKPogzbKxBGvS1y7WxWEdJvN7lz6PiliJ7A9v73Y0Mc22FXvl/h1vUXbt1ZV4rJbw3Yu5nig+IdQt+1Gm1lrkyFgAwiH2pNmigfdlUmqYQ+K/uUfTC8Pfd2vJfRsaO5qbWT7pAV21Ct69exFViAvTpR3uYgjsQPhaCmYrHq4EL1Gr7n9B6MZiN6jd6xWx2I8OjdvwkXrBOiA6IdnC8Akb7iHHs497D62bQPa0+3hvXTalvJ++kW0c2l+KGcz1MLRaRDckEyNmyEeIUQ7R9d6f6pCH5WWrw39GnosI8+3eNphjUdxo2tXJeUB3kF8de4v3ixV2XlgiL+7Mjccda5P4ozJdhqgb4PCTehpRxmX49u4aNodvwIWQfq3nk2+2iFsN4OZg6DN5uIRcnVb0NxNqHeoeo4ABy7LvoYfFSB8Ja2t6if/5AmQ4jyiyK/LJ/x867ixcV38WJoMDaNhnHx4yodcD3urBSA1n0gxrl2KM66fcf+IV2vo1AjMvfiAmu5djbrX1Fa7wWH/hELd3PvdpqrpmTXKZ08LwfcFn8KCgoICREDOg8PD3x8fPD3r7Tv+/v7U1Jydl0FJBcnvx3+DavNqlpXPx70MSFeIRzKPcT03dPV+y1Iqly1UK3AJTligL/ydbFqcHS56PySvpNWerFicaAiRFqpm50YM5KvC6wEWCwc8PTgi5xtvLr5VV7b/BqbM7di1UCHcjMfZp7mK1s4I2JEWru91fjf4/9yqlik+ldtp2vPm1veZNSfo3h41cNkFGeogdau8n6qse0b8W+7a+qnHGvYK6JE6bHDohvQpO+gSfXA0HpFp4eut4jfq5xQFao6RpoXZArhZ+oCoZx7BYnV/L/vA6tVnSgp1t1QiwVdeBuI6nre3sZZ0edhIX5ZKiaXBWJwXM35U5gBs6fQpNBxVeDqno4lY3VCq4VON4i24lMXwOPH4Ma5winRtLfILvGPgBGvwYM7RSZMaR78MqVatyWVsiJInAVARM8HHAaHTku+FHSGyjbiid+qf1YGqd0juhPkFUSUXxQeWg/KLGWq7VzB3p6vDNyVEoLd2aIs0H5g4AqlZbbVZiWjSIQGWqwWXtv0GgPnDOS+FfdxJPcI3npv7mjnWErla/BVX1txERasfotjBjHp6tB+aq2vX2cMXsJJ9thheOSgmMDd8Ac8sl+4cBq4cFtdyox4Q3TUSt+hTvjb4onGbizZIMjFSnDsQBhY0TlswUNw+pDz+9nz75OQukW85rU/nbuTqW+F+2fHTzVOnO2p5vxRxJ+qzp9lL8P7beDfp+G0a6cKwLsD3uWeDvcwucVkhjUdxhUxVzAmvAf8OlV0LWtzNfS42+235RYhzYQD6KE9cNNfIi9pyAviPDP6fRjzIQx7WQhCfR+CUe+Ic7ySR7XsRedt4zP3i0whECJSXWlfkQOy8+dqk5Io/yi89d6UWcpUQdmeElOJej6KO75RrOQOf1WIVhotnNwIXw8RQd8Wu9KRzH3ifPr1YDi1UwgHN/wuOqTVxVXWbqJwm5QVwOq3HZw/SglE78jeYjy082fRcOCj9rB7NqARJXJ3r3VvgUdngKu/EosuZfmiFOfDttXcqwoeOg/V4aIstB3PP06ppRRfg696jlcW41TxJ207B5NES+wWxmLhYCxIFWWXNSyoKTleM/fMdOoQhUrBoabAWWXFXukKpJZ9ZR2ATV8Il85vN4vJ2p+3i/GHzlMIw8FVyp1GvQMNWkBRhihlrMjvUTt+Ze2B1RUdQke+zRYfX8Cu4UbsQLhloRD0M/eI5hEnXI8r44PiVRFhapup1coelXOGvTtqQ/oGzFYzzYObu3ToKk0JlHNPy1DnroZo/2je6PdGtbBne5TPfmXKSkzmUkhaxTGDgRKbGR+9j+OkWaOBDteK33fPVsu+FHdKlH9UpZhiLhOlo5/3FN/LwkddbkNVFOfPshPLsGEj0i+SEK8QujcSiyfn2mpbzftxUfIFwvHspfPCbDOTXpSu7sOKkKF0K1uYtJDRc0fzwXZRPtw3sq/D99wmtA3vD3xffU/ukhCcQKBnICXmEtWdV1dyK8SfEItFCD9XzxCCf3mhGB/+fZ/4fr7oLcp0SwvEdXj9x/DD1WKR8debKp2Qs6+HmSPgs25CWLdZwDtEjJtPrIeVr4mogpWv088uV7KqYPty75d5tMuj6mcIIrbgiW5P4KH1IL0kkz+9tCR5GAj2DOLRLlX2nR53VkZrLH3BYeyglPzvNWZy0NNbff1qXRydETcIbvhTdES1lIsmMTMGiWxOO4Y2EaVfm09tJr+sdgfgpYDbVzmNRuOwg1f9v+TywmQx8cfhPwDULgWN/Brxcm9hQ/9+//ecKDhBRmE62yqsj6OOboa/7hOrbe/Ewdw7xYV1/oOi29IXveCrAbRO/BmA/Qf+oOz4WtV+2m/N53Q8fZzZBRrubXMLE5tPZFD0IHpE9ODBzg+y6OpF/Nj3HYaUGNFs+oxBvuICtzZ1raqU/3n0T/U9/HP8H6fvzWg2qsFty08uZ+y8sZitZsK8wxycHy4pzq48OShlM/XBubRwP1s63ywGd2nbnLavtu90obPZiPVqKAQL/4ZiVXryD8K5suc3+ONWGnuJ8jel1W1Ds0V04LrYzhVarWi3HRov8lG+HwvF2aqA4K33JtwzGH67BYoyaWrXBahbRDeXNfV1wjtIuDO8g1zfJ6CxcJj4RQhnydy7nQdQ7vpF2NJDYqH5FfSP6q/eVKP4A5Vdv5LXQbEo1bsy9krCvMO4tZ3Yv3Vanbp6Yz+wLzYVq5Ov+KB4ddCZUiDEH0UEVLpC1YRGo1GPv5Qi8fjEzERmH5pNoamQMO8wprScwi9X/qLaz+1RJg2Hcg5B8Rn2HPgVm0ZDlFcDdeX0vBHQSATExg89e2fKpYB/QxhaUQqw/BX4pCv+Xw2imalyUuiyexqIyXWzAWAqgZ8niwFowanq97PZYM27FSK7BiZ8XT8umKa9IbqH6Ka23XmnkaoognBGcQYmi8l51sX+v8VkvCBVdMf5rBt8PUw4DhY8IlZI7ZwTcUFx3NvxXp7r+RzvD3yfd3o8R9DfD4pJalhLuOqTi+ec2fNutUMK8+5RO7cBYkX4+7FCsIobLAKK60qb8cJBe+aIKCmwQ6vRqtcgJb/MnqSKc1EDs4VAg58Q0XvfL0Srh/eJCZDVDCteha8GionMW83E5OfQQiEQdZgi8rbih9Z927XayuYJ22YSZBXi1fH84+oEro+xHN5vLT67gwvEvt+oA9y+HK58r+bzf1UatYdpW0RDg6juYsKy8FHhYHKCMjFSFreUbWoZ0lJ1gzo4f4pOY5w9heSKoUjLoW+Kz8bDX0z2amg3PyZuDL0b96bUUsrjax5XxwAK2cZsNVvRHfFHIS4oDg4tgi/7wb9PwcZPRSmWMaci1+wZeGCH8/w9D1/hONR7wbHlotSRyslpWtkZSjQVIlyPu9QyHocGCY06wO1LoWFb4ez8bnS1DpkKGo2G9wa+xyu9X+GquKuq3a6cM+xDn5UyJyXzxRlK2Zf6GQXXUNJSC90iuhHiFUJOaQ7r9/4Ixlx2+4kFzHYN2lVvq92+osLj2ArCNaIkrrBcLEJVuqMT4cu+wkWlZAlt/w62zXJrmxTXb2aJEOQVN4fiJtuRteOsm7uUmkvVc4ersGcQ5xpl/JJckKx2EVTExzahbbi2xbXoNDpOFJxQs1Dt3V0O5KfW6hSr+vrnkvtTYirBaBEO/BCfRmK/NXgJcWPSDyJ7L6afEMiz9gvX+1sx8Fl3WPq8OD6OrxZzm+3fiViEgwsgZZNwzzUfKRpePJEkFi7HfFSRCVUEq9+i/6pKN6298wfEovrNbW927DgLDGrYjTXWSD7PyGJqQQk9Qtrwer83nLoC6f1AZbONP+9UhWjV+ePpwcFGQsh25YxzSkwfuHOleG8JFd0pN3zieJfAGJoHN8dsM7MqZZX7z30R43b6pc1mY8iQIej14iElJSWMGTMGj4pwJbP53AK5JBcXy04u40zpGcK8wxwu1AOiBtAnsg/r09bzzqbX6Jx5FBvQxVhK4+O/Oj5Jw7aihKkoS9TzF2eB3otWnl6AiUMaC5t+vYbSiHDCzWbhKglvQ/TkH7jH1UC/VZQY0B34m/bHt6gXsW0Z22jg3YDdpysDiFecXIHRbKzW9WBd2jqMZiPh3uH4eviqqzCdwju5J2ju+FFc4Bp3Pv+ty883fmFilXn3bHHCm/itw832K1FNTWY8+j0uJoAKzfqLrIK5d8O+uUTlnQC7jLuwwKbQyY2OShcCr0BxYZw1UuRV/DCO9tfMxKA10C2iG5rlL4uSKw9/moydAUtuAODqhKtreeJ6JqCRcD3MGikuxn/dC22vESVFuckiI2TPb+K+Pe8FrZYhTYYwY88Mmvg3UUuxXBLcVOTNZOwR3ZA6Xc/A6IHVQkpjg2I5lHuIY3nH1NsUIaiBdwOCvIJU8UxZpd9zWgxs2zZoKyZ2/o2Es8kFkf6RHMs/prosFPfe8KbDeWfAOzV2qWge3JylJ5YK8cfkwc4K10/HxufZQff/jS63ws5fhGB85ghoDbT1DCXJJjpJhXrVkH+m1QkhZ3p/0ZXnn8fET3RP0Zms1RhRTrbgYZE9AzDkeWjuumV4ndBohNMtZbPo0jTg8VofEuoVipfOi1JLKScKTqhipyr+5KeKBgUgOrWVF4njKHWL+FHY/5co+azqXkpNFGG1eSfEJHvyj/WX1VRfDH9ViOQH5ot8lVZXQa9pQhwvzhLB5hOcT4hrxdNflDnv+ll851UCrgdGD2TX6V0sObGECc0nONx27KhwqMSZLTD1b8frcUBjmPS9yC/653Hh3FDRQOurYNCzEFaHiYIzYgdC/DA4upTAk+L7XpmyEhs2mvs2JmzefUJsDGslXrPllWLSdLbinlYrnqPFKLFKf+Bvsf/ctaaakKSW2OcI548iAtl3IVIyObKN2WT/MZVTZWewaiII8QwmrPPNYjuv/gpmXyccwo06COdq1c3SaHmt72tM+HsCR3KP8N6293imR6VYtKIiEL5dg3ZEeIcL4TAkttqil73409CnIf7H14lQVqtJnCciu0BglPjeYgeKc0pNNGwtHEB/3w8rX4WQZgS3u4YGOm+yLUaOBYTTbvDz5JXmqa7RrhFVhJjgGLhtiXBD7JsrwqG1OqcdTJsHN3fZHl5ZQLF3/qju2Col9/ZUFdRbhbYS1+vfbhYLQwMqRHU39imD1sCVsVfyw/4f+PvIXAYCu0MiwZrrPA8nNA6iukHqVhqmOWYPNgtoJsq8fhgv3Gi+YTDyLZGptuJ/4rhr2FYEz9eAEvisoGRkxQXFqWP83ad3V/9e3GB71nbKreWEe4fXmk/UNKAph3MPc6LgRDXnj0aj4dmez/JA5wfYlrFNzR4cED3A8UmMubDqLXGs6AyijFQJtq+FbhHdWH5yOdsytnF7u9vr9D6VaAZPqxWfFqMq9wW9hzjvtK4QI4154py4daYY9+o8hCiUMFxEERhzxY9WJxyR3sGiQYz9eTI0Tvx0qjj/rHyNVtmHucWvCT5d73Au3lSlJAd+mohv2jb6GXzpN+pr4cRxhUYDYz+Fz3sJt+aad2DQM7T2Eq7JVIOBzT4+UFhxfNSVqC7iu3qvJaRuFe4iu/c8rOkwDuceZumJpYyNH1v357/IcFv8efFFx65KY8dWf/MTJkyo9jfJpUGJqYQHVz7IsbxjeOg8KCgTA/lrml/jUCOr0Wh4stuTXJ0+ntWnNrLDYgGdjtERPaF1RcCZT0hFrbDzSWe0zYrvz70oNpfwfaAoAetXUoqm4w3iIl1b7XtnccLR7fmNgX2u589jf7EyZSU6jRgEDI4ezOHcw6QWpbI6ZXW1dHulJv3K2Cu5p+M9fJD4AfOOzuPK2Ctr/6CsVrW8pl5dPxeS3vcL8WffXOj7sBjcVeBr8CXKI4jU8jyaWzXO843aXSNs67OvJyBtO35NoyiqGNCFxbg3ILlgBDcVJRCzRkLGHhrNuYkVIQn4pqbC8YrJ57jPCWjUgStiriCrJEu1gP6nRHUVZRp/TRMun12/iFVru/a8NBugunjaNGjDjOEziPCJcE/QbDlaDCYPLoRO1zu9i2IHt3f+VO3Iomb+FJwkoziDLGMWOo2O1slbRDmMzkNkjPR71OkEV3H+KPkqm9I3ATAgekCt7UmV1Z5DuYfgVCq7vIQK2TGsY+3vX+I+Wi1MnAWbp4vJdsJw2icv4u/NIhBa6UzjEr9wuHO1GIAemC8EkpRN4ickTlw/UreK/fuKt4Tluz5pMUq4FTP3ilDgWsrzNBoNkX5ClNx4aiNWmxVfg6+YkFktYhWyNE98FmM/E4Pt/DQR4G7MFSVB274Rrzf3LrEKq9WKMqRNnwkHldUsrpfXfHtxlgtqdULcWfm6WCQ48Lf4AVFWc9O86jlPdaHT9UL82TtXlJl5+Ko3DW86nI+2f8TmU5vJKc1ROxoBHDsmsthig2KdL8QoZSsxfcW5zSdUfL6h8Q6vcc4MewWOLSf41B4IC1WbUPTJOCaEn5ajxcJKfboCNRrhEDu1UwiHf98vxC67870i8uw/sx+bzaY6f+zFHx+DD00DmpJckMzBjETSfcR5uWVoq8prR8tRwmGz6nXRIS6mrxBEqtDAuwGv9X2Ne5bdwy8Hf6Fno57q4qFSnj8YX/i0i8iwa9pXOHDtxoqKYAUQbwisFH7ajIervz67jo2db6ooG/tcOLBsVhKK88n28mBFy0G08w4i8YQoSYsLjHPuXvTwhWtmQXAz4fJb9pIQQeuw3ytOGcX5Y7KYOHhGlNvV5I61d/4EegbSMC9dCC6leaKr2ffrhDAWP1S4k4oyhbA2+HmnjvKxcWP5Yf8PrCw+SZ5Wy26DBspwGYZM+8lC/Dm8FOzWUpv5NhKZj2X5wlE5ZY4QC2w20aTiwN9CLL5zteOiYRUa+1YRfyrcHBqNhh4RPViUvIgtGVvOSvxRxhA980+j+fWmGnPj7Dt+VXX+KPh7+DOoySAGNXEiVOz4UZQllQgHNWYL/HGbKDN18V3Yozh/tmdtx2QxuVe6VEFORXfdYKsVTSvn2VGAEIh73CUyxM4cE2P3s11s0GpF18O4wWi+6M0jp05C+jHoUsvjirPhuzHCgeQdLBpCuBMNEdBYuCX/uE04g3f/SkDucZpGNeKEwcCOQiGq1sn5Y49fODQfITKAdvxY6epEXIc+2/kZG9I3UFhe6LJr7aWC23UmL774ols/kksPm83G/zb9j02nNnHaeJq0ojQKTYV4670dajQVmpWVckO+EIcKdDoMGj3DRn4K/R8TP11vdSn8gFghalUx+NjiLZLZ+7WfCuM+q134AYgdJFY7jDkM0ooDcPnJ5Wr20ITmE9T686qlXyWmEtakinC9Ec1G4K335pkez7BpyiYxSDlzTNSV/zAeZgwWdv3sinagBemw+QvhtvAMFB2sLgci2gonCcDy/zneZrPRskRYfFs06i7CFJ0R0xduX44mJI5IU6ULsLaQ34uCBglw4zyRX3T6IEGH/sVwfJW4rff96orJOwPe4buR3+Gl97ow29npBpEr0/F6MfC2WcUktu0EuG2ZWPm2O356NurpfkeilhXC57EVlW3Lq6BY5ZXuOlARmGl3m1IiU2gqVMMp430a4rO4IsTWUi4Gzp92FVb+Kij277SiNPLL8tWVasX+XeNbqFgxTso7RunRFWrrWrdzvCTuE9REZMW0uwa8AhxWrWss+1LwbyhC7m9fKtrbD3hSHH85x4Tw4+EPU36rf+EHxGQtdqD4fd88tx6i7Nfr09YDYpKg0WhEG+kT68HDT4gj+grbY2CkEDR63weDnhGlmzoP4dxb/aYQvT7vKSYKVrNwvty1Vqw+XqzoPUUm0F2rhfsCxARz6t9n36VOoWkfcU4rLxSfjR1NApqo3VbUkFqAvJMcq3AYxsXW0r44MEpMeNpdIxY36lP4AeEu6XwTgVVKcvsUFYhypHMJKq8J7yAhGGoNYqK99j2H3KTmwc3RaXTklOZwqviUGvZsL/4AtNKKz+OghweHEoSToVq3nP6PC7HGVCzK+52VHyMyUKa2Fhlrz617jpMFJykoL2BzRe7KkJ1/VjYvOLEOPu8tMrgqtjvMJ4zQiuykuJOJ5y78KAx/VYhwlnL48w4m5Yvsjm+zt3As7xhbMoRrq0aBQaMRbrHw1kLYXfl6nTZBERdySnPIL8vnUO4hyq3lBHlWumadYX9ObekTieb7cUL4ieomssF0nkI8X/mq6OC3f564ziruySq0CGlBy6AEzBr41d+PY2UiK8al+NN2AmgNhGTsQ6+pdFrF7PwNTh8U4/FJ34uJPIjPadznooS18JTIeKshZDjUOxQPrTh3ajVah/1TKVtfnLzYoWudO+SX5fPXQdFcoE/eaXGMLH3B5f2Vcv7DuYfV3MKq4o9LDi8Wi3MlZ8T7vuEPUWoF4ruYPQXKi2t8ioTgBMK8wzCajSw8vtC9160gN1XEZ4RYNaK8uTY0GmgQXz8uU68A8X2DyI48vMT1fY25osth1n7hBL9lUd0yQdtdU9F4wiLcw0Brm4fDXVy2eXcHxdW4a7ZDxllsUCwxATGYrCa149ulzAUIGZFcbMw9OpcFSQvQaXS8O+Bdfhr1E7NGzGLe2HnVJ+8WE8y9m7tycgi1id2nf/QAtcuFu9jb8vRaPT161SFAV6dX28n2PJ6It96brJIs8sryCPcJp3fj3qr4sy5tHQXlBepD16StwWg2EuUXpbZCBdDmpYiMhk86i7ryYytEBs7K18Qq1VsxImBQqXnvcG39DyAvJIOeEULC0aVwYkPl348s4f6MFG4sKuXagbUMdBrEw11riIyqLLOpWqt+0RLRVtjmR74DV74vVvBvnAfD/lfrQ/9TWowUF9kHd4mOYI8eFhOLWmzVtdKwrZjQm41i33dCbJCwPx/PP461wnGkOH8U8cdL76WGQi5MEoOXtlnHK1ooXwfX/iImeYWnKoK2HcOjlUl2amEqWzK2YMNGbGCs04yfqkT4RuDv4Y/ZZmGJh5VirRYfvU/trYIl50xCcAKeOiG21er8qUpAY3H+eXgfjHhdTPRuWwIJ59Fh12a8+Hf/PLfuroiSShZDTGCMcMqtrghDHvVuzZlETXoI5x6IHLw5N4iSOZ9QGPOxcIXUJfvlQhLRTnQOu+lvuGNl/XSj1Ggqs8d2VJ+wKs0dHDpJbf2aYwYhqMRF9zn3bThXRr1LUM/71P96W610Cu8shD+95/l73aguQpQDUWrz00TRqABxPlbO2/8c/4dSSyk+ep/KksXCDJhzAy2ShFB/MKIFBxETnmrij1YLYz8R+UzJa0UHOBc82PlBOoZ1pNBUyIMrH2TxmlcwYyW23ESzwDhxjb17vXCrlBeKUuY3okWuzw/j6ZQjcsDalZVB56nnLvyAcK9dPQMixURziLGcgWFdMFvNvLzxZVX8ccj7cYZOXxmCvm2mcHW4ia/Bl3BvMaZOLkhW837aNmiL5vQhId4dmC9iEuyEjjB78efkduG0adJLlK2PfEuMB/o+IiaufR+pPJaWvijKa5xwVaD4fr8ODgJEWLS9q84BnxBoPgItEGZXMNLsyEohPE7+ofp5wNNfnNe0BuGk2DfX5eei1WjVgOTYwFh8DJWLWMNjhuOj9yEpP0kNUXcLq5UP/7qeHEsJceXlDAupWKDY9LnLbpOK+LPr9C6sNisBHgE1lzErmEph0RPi9y63iH07fqhwBI7/SohzhxcJ0cPF9wHic1A6tX2952uXHQ6dceaEOIaDvUMvTO5gs/6V2XB/3ye+7wPz4dC/YlHdZhMB0z9OECW4vuEiPzT8LEq0rvpU/Nw4F55Mpk3vyoDoKL+oc3PlJAwX5YvFWXB0meNNFTEYSizBpYxb4k+nTp3o3LmzWz+SS4vDuYd5fbOY1N/X6T5GxIygfVh7ukZ0rVaHC4hWe6d24ucZyKt9XqV1aGvuaH9H9fvVgr21t0t4F/w86qg+V1zcvI4uo5dde/Zx8ePQa/UkBCcQHxSPyWpi+YnK1cIlyUKRHhEzotLSfGSpyKBI3SIuVM0GwPDXxMklfqgQRYy5ogyhYTsRDjjgiTq/54ua0LjKbJ5lL4sTdXkJrH2PWJOZJxKuxS8gsvbn8fSjcYNKUe2ScP4oBDcVToNut4lBVNygi7tkzT8CfOs40XaFRiNWRUEM1JwQ7R+NQWvAaDaSXiREG/s27wpKaKKyOtK+pEgMVMd8JMoH7t1cEbpbLrpM2KGKP0Wpql27pg4djm9Bo9p95/iL80n7sPbVAywl9Y5Ba2BKyym0CmnlVmc3p3j6iRyZid8KJ8X5pGrpVy0o+2W5VYSOxvg1qXA/mMVxo3TEqYlON0DPaeJ3vTf0ewwe2Aldpl7c5xlnaHUQO6B+BasO1wEaISzkHHe4SWm1uzVjK9nGbCgvoWTH96TrxbHt9ur8+URnIKj9dep/u4d1xOOmv9xzM58rPe8Vnfh0nmIB5/OeIky9KEsda/1+UGQyttJ4oZ3/EPxxuwh7PTCfViYh5u/3MHA4V3Sqqyb+gHB6DX1J/L70ReGCdoJBZ+C9ge/RwLsBR/OO8nqqyGYaEtQSpm0W19iItnDLP+L59N5CBMrYDcdW8PSZHN7WRTLi+n/hqo/PXfhR8PAR7tnWY9GMeptn+r+Bt96bHVk71GuZ2umrJmIHiJIvmxUWPVmn1tlq6Vd+spqJ1z4gTgSnL39FCMMfthPttD9oB5/1JPSbkerjWxiLREbL9b8LxwWIXMChL4pFq6EvimtteGsRir385eobYbMxKv0IepsNY8Wpx6XrR6Ei+DncKBZTgywWgqxWGPW2yB90RngrUeINQhypQfhQSr+qXj/8PfzVnMUf9v9Q8zbasXPju/xuPAHAc1GjMNzyr3CvAcx/QJSlVUERRZXFrbigOPfK5jd8LI4F/0aiVMh+f+0wWXRZ9AoSc4xvr3Te5KCCSS0mEegZyImCE3VqLZ6bKbKjQoJi3H5MvTPkBdENuChT5FHNuQF+mSwW1d9qCl/0EYvq3iEibqHBWS7MefhA5xuFq9I7WC0TBBfnrbqgM1SGnFdZiGjkKwRKZfx7KeOW+DNu3DjGjh3L2LFjGTFiBMeOHcPT05OBAwcycOBAvLy8OHbsGCNGjDjf2yupR0rNpTy66lHKLGX0jezLrW1rybA5tbuyLeaod+mbMIY5o+fQJrRNnV/b3tbZL8pFWn5NhLUQKzg2C4MtlSr3+Pjx6u+jmo0CKku/7Eu+rmh2hbhgr3pLrJSV5okA5we2Cxt77/vEyeWGP+CxIyIJ/qkUuGedqDk9V5v7xciAJ0RXjJRNMGsUvN1MBKPqPCoVfTewb5d+SYk//99RSr8OLXJsi1yBXqtXB65J+UmcLjlNVkkWUOkKgsrcHxtiQNzOI1SE2Cqr3wYvGPi0+D1xlkPLbSXzJ78sn5UpKwFRvub2W6iw++72kiVf/zWPdH2EX8f8iq/hEnBE1rH0S3H+KMRkHhRZK16B4nrgrngz/FXhKHxwpwiyViZvEhEEr3wnO39yuCnaP5q2oW2x2qxiMWfPbySbi7BpNIR4BhPsFfzfb68T7B3QfeJHn1/Hjz0aDfS6V7hXG3UQi1X/PAbvNqf1kdUApJWICWfr7GTRzWfPb6JDZONOtJgshKGUolRKLaV4671dNwrodoco0zMVizye04ed3i3cK5T3Iwajt9kwVxwfQwa95nisaHUiZ/CpEzBtK1w3B0a9S/gtSxl5w79oGndw+tznhF+YKFHqdjuN/Bpxf6f71ZsSghPc35eGvyrGS8lrK/Ov3MA+9Fl1/hxYLDr9BUQJZ53WILLC8k/C6QMYco4TaRYukPbD3hLnkJrKdXQGcV4CSPyueje4nT8RengJfY2VHdlq7cjZfARE96BhheO/mUewcGR1uaXmx/V7ROSCFZ+Gxc+6vJtSOtwnsrqLb0qrKWg1Wtanr+dorhDp8svyuW/5fdy6+Fa+2PkF2zK2qR3BTCYjrxwSQtE4vzi6jvxAONcGPiOcHeZSIUyUFTq8TpBXkMMxXLVrlVNyT6hd5Bj+qnA8VSW6uyhv8osQ5U7fDK/mKlHwMfhwQytRejR993RViKqR/DRyy/IACGlwFk6a+sLgLRZv4odCk4rOmhHthShdmi/2Z89A4dipxwWeViGtqnUvPCeU0q/D/zp06VQMEaeKXYt3lwpuyen2WT633347DzzwAP/73/+q3SclJaV+t05yXlmbtpbkgmQaeDfg9b6v1xyoai4XQXnKSqeb6fWuiAmIIdAzkIKygrMTf0BkKqRtY2jSVubGdqF1aGsH4eGKZlfw8Y6P2XRqE3cuuZOYwBjKLGU0DWgqHALbvhEBhgBdb4Mr3nA+WPMJObcwy0uFgMbQ/Q4R6Hmywl4bGA2DnxMrS25iP1GS4s8lRHRPsSJjzBECYEzfaneJC4zjSO4Rfj30q9rGPSYghgCPykmsfXaBj9VK7FXTq4ulsQPVDiJs+FjkxyAGPkp3j9PG0+g0uhpb4Falud5xMi3DniUuaT1ODMDd6Pplf10BiNn1h/hlxBt1K3vSamvuaPL/nS5TIWml6ETT92GH0uoRMSPYe2Yvi5MXM/n4IfZUZHrFXgyunwoCPQPx1HmKBbXG1c+f553wliL/besMIe6k76BNxiFoXLmPtm46CDq0ERO1wEhoOYZQnZ5w73CyjELMbx7c3LVjUqsVXXemDxROnS/7CHdH34fF+MlmE8fVspfolLmXp/z9eLVBCNH+0dWyhlT0nhDWXPz8x0xpOYUFSQvYf2Z/7SVf9gQ3FZmAa96BdR9Ca/c6ACnukt3Zu9Xg53Yntolyuhv+EN+huUy438qLRE6MxcSHvoFkWkpoWrW7lCua9hZuul2/wMKHRZmmd5DIW1r0JADjmo1iVaYo8+4QVovQpveE25bQcOvbsP8HYpoNgfYTa98OvafYX2YOF6Hu7SYIcaAK0zpOY2zcWKfZR9H+0QyOHsyyk8v48cCPPN7tce5ddq/aKW1rxlbYBTqNjij/KALMJo7oIMhq45Fhn1Y+kVYrSv+m9xch6cteqhTJlI8toKnaNdgtR+HiZ4SY1LSvyKJxRcPWcNtikSeakyTKn+KHCsGoSunTdS2v49t933I07yirUlY5dFx2ysmN5OjE/C3YjRL580rDNmI/tsdiEuWRGXuEEHauHRar4GPwoUVwCw7kHKhdxHSH8FYi1y4tUTSm6C3KeS8n50+dvZS//fYb27Ztq/b3G264ga5du/LNN9/Uy4ZJzj9KeOUVMVfUvtqx5h1hkfcJhdEfnrNNXafV8fmQzykoL3BPXXdGm6th0VP4Zh3g26s+rRYaFu0fzQ2tbuDHAz+y8dRGh7bRmrTtItsHhFVRsab+f2fAk6KDjU+oyJcJb13n71q5eHvpvBxEAclFjk4vvvOdP4nOOE7EH8XhszpVrCbHBsbyZr83He4TbTdRbqMPROfkedBoxL720zVChO37sCoQRflFkVMq7OHtGrSrU0loi5zUypdAU2P7XMn/c1peKYJI3ej6pTjSFJqUFgvLubPuh5Kzp+UYkQmWmyxCgO0Cv4fHDOe9xPfYlrmNSboyDjQQCzIXU6aXQWvgnf7vYDQbiQ5wHeB7XtF7iPLJXtMgP5XmB+ejPfgF1gonZuuBL4CTMVeLkBZkpQnxp9bSiZBY4YJe+CgcWQKr3hBjRK1BlMdXdDvDK5BJPR4hOqYLkQFN3Cuh+Y/RaXV8MPADfj30Kze0rt7CvkZ63A3rP4L07WKiqASh14Dint2WIeZRTUwmUT511ftC+AEhmIQ7fgctK37qxLBX4OA/osTpow7iOntwoRCVmvah/5C3iP/nOqw2K81D3BPexsePJ6UghWtbulHqqhDdXXSX2jId5t4tHGoBjrESWo1WLRl3xo2tb2TZyWXMPzaf4/nH2Z29m0DPQO5odwd7s/eyJWMLOaU5nCg4oT7mkfA+BAc4nrvxDhJd8r6/CrZ+LeYRMZVuo5iAmErxJ7AW8WffPBHir9GJTsW17d/BMSInbc07olvm0WVwdLnYb1pcIcqRG7Yh0DOQ61pex9d7vmbG7hkMih5U87GTvI4cnRBrXeY2XUh0BmjcUfycJ17t+yr7sve5HRNQK+2v/b/27js8qip94Pj3Tk/vDUggdJCO0lSaIHaxYVkVUHBlAWUtix3LKiorVoo/UbAXdGEVBVQUFaQIiICFGjpJSO9T7++PMzNJIAkJpBHez/PMM5OZO3PPTO7cufc973mP+k5vX+oP/jSlzJ8aF3wOCAhg9erVx92/evVqbLYGmgVH1Jiu6/7iaQOanaAy/KFNpWmNl76g0mZrQbeYbpzX/BR6xwLCS2fcWjm9wkWm9pnKV1d/xR3d7iAuMI5gczBXNR+sis26HdDpclUgTyjWEJUBNfA+FcE/iYO11mGtua3Lbfyrz78a5cGeqIJv6NefX6gg4DF8RdKNmpE7ut3BwssXliveDpCUVlpDpWubi6lU22FqemZnEawp7Z0rmzlW0x/yNvs2YPLWX2gT3kaCj6JyZYd+bXqn6kW9GWkA8S4XgabAWukEEccwmqC/t2jymlfLDT9tFtyMbtFd0NH502rBhMbgxMGMPmt0AzW2YkOShnBJ60sauhlKWAsC+04g2RvsKVfs+RhlAz7VqpsRngQ3faImHAiKVVnhrmIV+DFaVVbMXZvRzpvCgBbn+4vpNkbNgpsxpfeU6s1UWFZQdGnx+F8qL4Bdlu/z9w+LtjvUMJMeN1bxrJMUHAs3fQQxnVRpg2+nqboz1lC4ai5ms42Fly9k0ZWLMBuqVyS4XUQ7Xr3g1cqzuCoz7HE1sUThUXX87XLU6Ok9Y3vSJaoLDo+DTembCDQFMnfYXEafNZoZg2awctRKvr32W95oN5oHM7J4MqeIkUOfr/jFWg9ShcRBFScuM8Np2e207HD242TsUnXfQM3qVd1hTAHhKtN50npVNwodDm2A7/4Ncwao2bJQwS6b0ca2zG28/+f7Vb0i7PvZn/nTKIM/9aB9RHuuandV7Z1z+CacOLDOPzzQl/mTVZJFsau4dtbTQGoc/JkyZQoTJkzgrrvu4r333uO9995j8uTJTJw4kX/+85910UZRB1JyUzhSeASLwVL11JYuuxrupbtVhNz3Q9dYDPqXd5aqb2Hv8UFJUJkok3tO5utrv2bVqB9IXP4Y5B2EqLZw5Ww5gK9lmqbxz97/5Lr21UgJFo2Lt4AeuQeOm3IZVH2up897moWXL2Ryz8lYjOWn2MRRSOLqWf4/uzavInijaTDQWzh9/Rv+mb/KDrGpdr0fjxt+X4T14C+0cqrZak6Yxi6Er17F2tmqpl0VmltVdmwrp1MFyCMa78nsaa3nzRAYrWY8OmY2tvuDOnJRQSGP5Dv47qoveXXoq8cNyRPH8wXoO0Z2rHR4f42DP6D24V2ugXv+ULP13b0F7voV7tuhhrOcCcPlzxmnrrd9VmVBY5+EoAT/zIgAXd0GuLiSIEVtaDkAJqyGkXPUMH40NfNguMqyMRlMVZd8qC2WQDUrmC1MDfde/pAaIpj2h/r9/2Ue7PxW1ZBy2Y97uqZp3HqWmgnLarTy2gWvlSv0q2kacQEx9Nv8GTflF3BV9/FoAVXMQnzhUxDSTA3B+u7f/qLdvuBPkDnIP3PpcRyF8Mktqkh5y3NhSOW1jCoV2Vp9Hvf8pToSkr3D+b5/BpwlRNoimdRTBZee/+V5vttf8SysFByFjO1kN+bMn9NRZGuVqeVx+s8tQy2h/pqGp3v2T42/8Q888ABvv/02Gzdu5K677uKuu+5i06ZNzJ8/nwceeKAu2ijqwOrDamPuFdeLAFNA5Qt+/wwc/Uv17BwzNrZRiGwNvdQPAt89VeWsCwbNgHH1S6qmgDlQFaGVgptClDIHqIKeoGrxHPN9MmgGrmhzhX/Ky+N8/wxBeYfp4oIwSxi94k4wA2SHi1XhdkcBfHkf6Lr/ZC7QFHjiYVtulypo+do5anYJ3cN5ZtV7e8Jx8kJ0vFT1vnpcarppt7Pi5ZwltMhUwwlaBTUv/c0Rtc8cAH3/rm6vfql0H1SSS4/1bzPjaCbX972fiIYaVnUa8u0Lq9on+jI5zAZzzYfSGc0Q1kIFRCNb1+4scI1di3NUUVtXyXGzA1XEaDCWG97UNXl4udpWdcJgVENUJ2+Ce/865ZqdJy2ytaq5A6ou1fOtYU5/VZz8y3vh/Wtg1jnwTHOYe77KrFk7R3VEHdrERXH9eLTfo8wfMb/iWdk2vKkKKltDS/chlbGFqSAYwNpZMKsv/PwqvYJaEmIJYWji0IqzSDwe1db0P9R50bVvndpsdKEJcPZYNXtbaAs1U9ZvHwBwa+dbua79dejoTP1xqr/OYjn7f0YHsrxtaCzF75uENt795W4VeNM0zZ/9c6Tg9A7+nNQWO2rUKEaNGlXbbRH1yBf8qXTY1dEdKv1w3Rz19+UvNd5enIH3w+YPYP8aNW17+wsrXm7v6tLhYZe9eFyRNSEEamz+z6+o8c77fi43Hr5Ke1b6h2+90f/fOFqff+JeKE1TU/m+PhC2fwl/LKZPUh8CTAGMbDuy6lT0A+thyT2QpmZMwRYOfe5gcp9xXOexV1k/QAhAbX+XvgB7V6lilKteVNmkx/ruKS7MPMKG6GiGn/+wZIvWtXPGqf9F6lY1JC/5fNgwH4oyIapd6ZANUS3DWw7nu+u+IyogqtJlWoS04MkBTxJqCcVmkhIO1aZpaqKMzyer4EP/SaqwcBVa2WLZyU7Muk7HPnfVU0NR9aBqUqC+LrQfoer9/fCcmlzCFABJ/dTMaTn7VSFmR4EqJp5aPhtTM5gY1ekK6JukgsJl98Mb3lJBJIDzpqgM5hPpcJGaAWzVi5CxHb5+hJhvpvFDq/Mw2dqooV35hyFzFxzdrrJDU7eo9mkGFfiprc/TZFFDJZdNVXWket6KZjTxUN+HOFx4mNWHVjNpxSQ+veLT8sMT9/1MsaZh934UkvlTi9oMVdvV7tKsq2bBzdiVs4vDhTUr+uz0OHl67dO0CGnBuK7jarulNXYK4UpxuipxlfiLzQ2I6gJfTIE//qfqvQTHge5RY1B9et5SWgukMSo7S9V3T6paIsf++BZlwWfj1HvrfhN0r0GxOiHOJMExapaQjfPVQUh1gj/F2bBogrrdeyzB1Zz5BFC1pc6/Vx0MfnU/LSauZ91N6ypfPj8Nvv93aZ0WW7gKAPceA9ZgLICEfUS1BceqYp2f3Q4/PK+y0eLLZJytfwPWvMYwYNi5/4HWlXQuiNoTGKmyq9bNhS+OOTke/oTKNBE1EhN44lqNV7VrZMP6TxddroWvH1GFynd9W3kHpFergkwAOmo2LHFnYCfkoAcgobsK0DTvXX6WXV1Xw84Pb4Yjm1XgJfeguhSkwe//VZe4LiqQlDwI0v9UQROAfv+oWR3PwVOh351q2N6md+Dwr5hTfoCUHyp/jjlQ1e1JPsmZiivT6xZ1HJS9Vw157XotJoOJFwa9wN++/Bu7c3ezNGUpt3S+pfQ5+1b76/3YjDYCzYG126YzWavzVTHvzJ0qMBmedNKZPz8f+pnPdqpZ0Aa2GEj7iPqf2bCsag37ioyMJCMjo9ovmpSUxL59+068oGgQm9I2YXfbiTWH0PbdG9RJXnGWirgfXK8CP5oB2l+sCvpd/kpDN/nEzrsHLCGqp3Dxnf76IYB3mvp/qAh+VFt1oC+EqNyAyYAGO5dD+l9VL6vrKgMn/zBEtvFP214j598L0R1UMcjlD6Np2vEp13lHYOkD8HK30sBPj7/B5I1qNgZr9WcFE6KcLtdAh0vV+P63LlZDCXUdfn6ttDf53Cmqp1jUj/PvVT2v4S3VyRZAuxFqRhwhGhNLIPTwzhRWwXDpclx2Ruz9lViXi+uSG3Gnal0yGFSHcssB5QM/oLJ5wpOg8xVqJt5R78D471Qdqb//pILCpgA1S+NPL6hZu3yBnwGTYcQzNc/MtIXB2bfBHSvV0Lhhj6vJKAwmiEiGdheqjK6Rc2HCGnjggFq+tlmCoJ+3E23VS/7tKMgcxKWt1bay9ejW0uWLsyF1W+Oe6et0FhBeOov07u+B0hm/apr5882+b/y3522dVyvNOxXVyvzJyclh6dKlhIVVUTyrjMzMTNzu42eKOdb06dP573//y19//UVAQAADBgzgueeeo0OHDv5lSkpKuPfee/noo4+w2+2MGDGC2bNnExdXSSEucUK+IV8Dso6gFWapk66LnlHBk4I0Vdm89SA1hvt0ERipfiiW3g9bPlZjhHuPVYWdd32nCrMZLXDtfDlJFOJEotpAp8vU9+jnV2Dk7IqXK8mDVTNVT5xmhGveOLn6BSYrXPkavHmhGu8eEg9DH1UHiW6XSsv+cQa4vYUgW5yjprFteYKZCoWoDt/ww48y1OweX9wF6/9PnWCACkQMfbRh23imCY6FWxaV/u0sVkNDZMidaIz63anq2Oz9SU3f7Zst6Fh/fkGHvKOs0JvBubJPqZGEbmqa9mFPwPavYI83Q6cgTXUAX/DYqe8fotrAef9Ul2OHltWHc8apjOu0rSqLrN1wQM2ODLAlo8xQuP3rAJ3s8BaAS+r91IU2Q9Uxwe7voPdomgV5p3uvQeaP0+Pk+wPf+/9evnc5k3pMatDSBNUe9jV6dO2Psf7hhx+YOHEi55xzDi6Xi4ceeogLL7yQP/74g6AgdQLxz3/+ky+//JKFCxcSFhbGpEmTuPrqqyucbl5Uz+qDPwFwbnGJmm1n4H3HR99PR33vUNH6rx9WX9a1pbMOERSrZmhJ6NZw7RPidDLgbhX82fw+FGbAkAfV90vXIe8QbF2oDlKKs9XyQx5UKdwnK7EPXPAorHhSBZQyd8KQR+CLu+HAWu8y/VSadushchIoaldQNIxdqmb+WvFUaeBn8EOqDpBsbw3LXMXEFEI0tPAkVWT451fhm8egzRBVaLksXVfDSAF6jz61QsFnssBINStgz5vVZ1qSU70aPzXVEPv8wEg1hH3Na6rYtTf4c1bUWWhoHCo4REZxhqr7s0+dB2dFt4Gi7RL8qQtthqpasXtWgsdNQrAa9lWTzJ9fUn8hz5FHpC2STlGdWH1oNW9ue5MnBjxRR40+sWrteTweT52sfNmyZeX+XrBgAbGxsWzcuJGBAweSm5vLm2++yQcffMDQoarq9vz58+nUqRNr166lX79qTgMs/FILU9mdl4JB1+lviYYhDzWtg9rEc+C25fDn52oMb3QHlaqf0POERfiEEGUkngPn3q2Gvuxcri7R7SH3EDgLS5eLageDH1BDZ07V+fdCaHNVPPPPL0qnm7eEwKX/gW7XN639lWhcDEY1dKDdCPjhWWh1Xt2k9wshmp7z7lFDktN/h98+gp5/K//474tUR4bBLEXLa4um1U3gpyGdfZsK/uz5HgrSITiWYEswbcLbsCtnF1uOblEz9+37GYCssGZQtF2GfdWFZr3AGqYCjIc30yxSZeukF6Xj9DirnpTEyzfka2jSUK5scyWrD63m892fM6H7BOKDGqYAe6M6G87NzQVUjSGAjRs34nQ6GTasNH2yY8eOJCUlsWbNmgpfw263k5eXV+4iSv2S+gsAZ9kdhHW+qmmeSGkadL5SjRUe+rDKRpDAjxA1N/xJmPSLN+higIwdKvBjMEFcVxg5B/6xVk0dW1v7ku43wK2fQ6B3ZpoWfeDOn9T9TXF/JRqfmPZqJhcJ/AghqiswEs731gj77t9qqKJPURYs9c4kOPA+NcW3EBWJaqPOW3QPbPuv/+7uMd0B2JqxVQ25P7IZgKzAcEBq/tQJowlaD1S3d6sZE80GMx7dQ3pR+gmf7va4+W6/mi1seNJwesT24Oy4s3F5XCz4fUEdNrxqjeaM2OPxMGXKFM4991y6dOkCQGpqKhaLhfDw8HLLxsXFkZqaWuHrTJ8+nbCwMP8lMTGxrpt+WtmR8TsAZzkc0HlkwzZGCNH4RbWBq/8PJm2AGz9W1w+nwoRV0OOmukldb9kf7lwNN3yghuJEJtf+OoQQQoja1OcOCEtUEyCsfBZ8Iye+flRNaBDTUdWTEaIqXUep6y0f++/y1/05ukXVoPG4IKod2boLkOBPnWmjRh6xczkGzeCf8etwwYmHfm1K30RWSRahllDOSTgHgPHdxgPw6Y5P2Zm9s27afAKNJvgzceJEtm3bxkcffXRKr/Pggw+Sm5vrvxw4cKCWWtg07DysplBubwxR9TuEEKI6otqoIZTR7epnquXQBDUjiNRFEEIIcTow20qLw69+CV4fCD/NhM3vAZoqWNwUamyKutXlajWJxuFNkLkbgG7RKvizNWMr7u3esintR5BlzwKQmj91pf3F6vrgL5B7yF/350jhiYs++4Z8DU4c7B8i1j+hP+c2Oxe72869P9xLkbOobtpdhUZxVD1p0iSWLFnCjz/+SIsWpTNMxcfH43A4yMnJKZf9k5aWRnx8xePkrFYrVqvsWCuzI38fAO2SBsoQCiGEELXG4/HgcDgauhmikTGbzRiNxhMvKERT0G2UyvL54Xk1a1Oad3ruPuPVxAZCnEhwrCoavutb2PIJDHmQ5LBkgsxBFDoL2bX3WzoAtL+IrG2vApL5U2dCEyCxr5pI6K8lNA9uDpw488eje1ixbwUAw1sO99+vaRrPnP8M131xHSm5KTyx5gmePf9ZtHo8J2/Q4I+u60yePJlFixaxcuVKkpPLp/b37t0bs9nMihUruOYaVUx0+/bt7N+/n/79+zdEk09r2fmHOao7AWjX9cYGbo0QQoimwuFwkJKSUmcTRIjTW3h4OPHx8fV6gCtEg9A0GDBJDYv+8T9qCviwFmoqciGqq+sob/DnYxj8AEaDkS7RXVh3ZB1b9CI6WMMgqR/ZG58EJPhTpzpfqYI/f3xOQq/LgRNn/mxM20h6cTpB5iD6Nysfs4i0RTJj4AxuW34bX6V8xdnxZ3Nd++vqrPnHqrXgz6ZNm3jsscdYsmRJtZ8zceJEPvjgA/73v/8REhLir+MTFhZGQEAAYWFh3H777dxzzz1ERkYSGhrK5MmT6d+/v8z0dRJ2/q7GjrZwQ1DSuQ3cGiGEEE2BruscOXIEo9FIYmIiBimwL7x0XaeoqIj0dFUcMyFBCt2KM0RgJFz0DAyeqiZJsAQ1dIvE6aTjpWAOhOwUOLQRWpxNt+hurDuyjq1WK9clDUI3mMgqVsO+JPhThzpdDssfgv0/06yPSp44UebPG1veAOCiVhdhNR4/IqlXXC/u7nU3MzfO5Nl1zzI8aTjhtvBab3pFahT8Wb58Od988w0Wi4Vx48bRunVr/vrrLx544AG++OILRowYUaOVz5kzB4DBgweXu3/+/PmMGTMGgBdffBGDwcA111yD3W5nxIgRzJ49u0brEcrOPWrsYbvAeBnyJYQQola4XC6Kiopo1qwZgYGBDd0c0cgEBAQAkJ6eTmxsrAwBE2cWW1hDt0CcjqzBKgC0dSH89iG0ONs/49cWqwXaX0SRqwiHRw21lpo/dSg8SdXJPfwrCRl7gKozfzanb2bNkTWYNBPjuo6rdLnRZ41m/rb5ZNuzSS1KbXzBnzfffJPx48cTGRlJdnY28+bNY+bMmUyePJnrr7+ebdu20alTpxqtXNf1Ey5js9mYNWsWs2bNqtFri2N4POzI2QVBVtonyJhjIYQQtcPtdgNgsVgauCWisfIFBZ1OpwR/hBCiOrrfoII/G+ZDpyvoGhwFwG6LhbykfuR6s34CTAEEmAIasqVNX6cr4PCvNNurJk46XHAYj+7BoB2f6Tx3y1wALm99CS1W/gd2LIOB98PZt5VLvjBoBkwGFYqpTkyktlQ7N/vll1/mueeeIyMjg08++YSMjAxmz57N1q1bmTt3bo0DP6KeZe5khzfU1y7xvIZtixBCiCZH6rmIysi2IYQQNdTmAuh2PehuWDiGyN8W0sKparduKzrsn+lLhnzVg85XAhC7by0GDDg9TjKLM49bbOvRraw+tBqjZmD8X6th43zIPwJf3gPvjoSc/eWW16j/38ZqB392797NddepYkRXX301JpOJGTNmlJudSzRe7oO/sNuspplrH9WxgVsjhBBCCCGEEKJCmgaXv6yGHBVnweqX6G5Xw7weWvUQn+34DIAIqwz5qnNRbSCuC2aPi1hzMACHC1X2T74jH5fHBcCc31RJm8uKXSQe/FUN+xwwGUwBsGclzB4AKT+Wvq439qNTf5k/1R72VVxc7E/b1TQNq9UqhftOIwcPrKLYYMCKgaSQpIZujhBCCCHqyeOPP87ixYvZvHkzAGPGjCEnJ4fFixc3aLuEEEJUwRwA178P/zcYCtOZkJ3L77Ft2Vt0hEW7FgEQGSCZP/Wi0xWQto1mTgepwIRvJlDsKsalq8BPoCmQIlcRRl3njqOpEN0BbvxQBY56j4XFE9SsYQvHwp2rIDTBn/lTn8GfGk3JMW/ePF555RVeeeUVXC4XCxYs8P/tu4jGaWf6bwC0CYzHaJDx9kIIIQTAgQMHuO2222jWrBkWi4WWLVty9913k5l5fEp3XTt69CgTJkwgKSkJq9VKfHw8I0aMYPXq1af0uvfddx8rVqyopVYKIYSoN2HN4fp3wWSjZWQHPrvqC+7udbe/zk98YHwDN/AM0eUa0Ayclatmr8x35vsDPwBFriIARuYXktT2Ihj3rQr8gLq+9X8Q1wWKMuCzceB2lQ6Jrr/YT/Uzf5KSknjjjTf8f8fHx/Puu++WW0bTNO66667aa52oHc4SdhQegfAQ2kd1bujWCCGEEI3Cnj176N+/P+3bt+fDDz8kOTmZ33//nfvvv5+lS5eydu1aIiPrr1f1mmuuweFw8Pbbb9O6dWvS0tJYsWLFKQeigoODCQ4OrqVWCiGEqFdJ/eDu38AciMVkZVzXcVzW+jKWpizl4uSLG7p1Z4botnD+vdz90wyGuowEXvMWUdGdCCs4SsknN5Ofe4Bio5nkwY9Cv38cP7O2OQCuW6CyuPatgh+ea9yZP3v37iUlJaXKy549e+qyreJkpW5lp1ll+7SL69nAjRFCCNGU6bpOkcPVIJeazpgxceJELBYLX3/9NYMGDSIpKYmLL76Yb7/9lkOHDvHwww8D0KpVK5566iluvPFGgoKCaN68+XGzkObk5DBu3DhiYmIIDQ1l6NCh/Pbbb/7HH3/8cXr06MG7775Lq1atCAsL44YbbiA/P9///J9++onnnnuOIUOG0LJlS/r06cODDz7IFVdc4X8dTdN4/fXXueyyywgMDKRTp06sWbOGXbt2MXjwYIKCghgwYAC7d+8+bt2V8Xg8TJ8+neTkZAICAujevTuffvppjT5LIYQQdSgkHmyh/j/jg+IZ22Us8UGS+VNvBk3FmtCTs/My6bxyJnFpf2KbfwnhmSkkBjWj/a1fYu4/8fjAj090O1XHCeDHGWguO1C/s31VO/NHnMYObWSHxVvsObJ9AzdGCCFEU1bsdNP5seUNsu4/nhxBoKV6hzZZWVksX76cp59+moCA8tPkxsfH87e//Y2PP/6Y2bNnAzBjxgweeughnnjiCZYvX87dd99N+/btGT58OADXXXcdAQEBLF26lLCwMF5//XUuuOACduzY4c8e2r17N4sXL2bJkiVkZ2czatQonn32WZ5++ml/ds7ixYvp168fVqu10rY/9dRTzJw5k5kzZzJ16lRuuukmWrduzYMPPkhSUhK33XYbkyZNYunSpdX6LKZPn857773H3LlzadeuHT/++CM333wzMTExDBo0qFqvIYQQQjRpRjNcPQ9ePx9SflAXgBZ94IYPIDjmxK/R9VpV9HnT22gluaA10oLP77zzTrWWu/XWW0+6MeLU6LpOvjOfUEtoufuLDq7ngEn9q9uFt2uIpgkhhBCNys6dO9F1nU6dOlX4eKdOncjOzubo0aMAnHvuuTzwwAMAtG/fntWrV/Piiy8yfPhwVq1axfr160lPT/cHbf7zn/+wePFiPv30U+644w5AZdgsWLCAkJAQAG655RZWrFjB008/jclkYsGCBYwfP565c+fSq1cvBg0axA033EC3bt3KtW3s2LGMGjUKgKlTp9K/f38effRRRowYAcDdd9/N2LFjq/U52O12nnnmGb799lv69+8PQOvWrVm1ahWvv/66BH+EEEIIn+i2MOJpWPJP9XeXa+HKWWC2Vf81Bk2FTe+Ayw5mU+MM/tx9992VPqZpGoWFhbhcLgn+NKAZG2bw/p/v88GlH3BW1Fn++3enbUIP1ogyhxAVENWALRRCCNHUBZiN/PHkiAZbd01VN93aFxgp+/dLL70EwG+//UZBQQFRUeV/Y4uLi8sNv2rVqpU/8AOQkJBAenq6/+9rrrmGSy+9lJ9++om1a9eydOlSnn/+eebNm8eYMWP8y5UNBsXFxQHQtWvXcveVlJSQl5dHaGj5DqFj7dq1i6KiIn8Gk4/D4aBnTxkqLoQQQpTTeyy4HCrg02t05cO8KhPWHNoMQbP/CTTSYV/Z2dkV3n/kyBGeeOIJ3nrrreMOHET92pG1A4/uYWPqxtLgT1EWO4rTITiK9pEdGraBQgghmjxN06o99KohtW3bFk3T+PPPP7nqqquOe/zPP/8kIiKCmJgTp3EXFBSQkJDAypUrj3ssPDzcf9tsNpd7TNM0PB5PuftsNhvDhw9n+PDhPProo4wbN45p06aVC/6UfR3fbCEV3Xfsa1fWdoAvv/yS5s2bl3usqqFnQgghxBlJ06Dfnaf2Gj3+hrbuEXW7Gr/VtaVGU72XlZ+fzyOPPEL79u3ZvHkzy5cvZ9myZbXZNlFDvpSxQwWHSu88/Csp3no/bSI7NkSzhBBCiEYnKiqK4cOHM3v2bIqLi8s9lpqayvvvv8/111/vD6SsXbu23DJr1671Dxnr1asXqampmEwm2rZtW+4SHR19Su3s3LkzhYWFp/QaJ3p9q9XK/v37j2t7YmJina1XCCGEOGN1vBRNU6EYPXVLva22xsEfp9PJzJkzSU5OZuHChcyfP5+1a9cyZMiQumifqIEKgz+HNvrr/bQIadEQzRJCCCEapddeew273c6IESP48ccfOXDgAMuWLWP48OE0b96cp59+2r/s6tWref7559mxYwezZs1i4cKF/iHxw4YNo3///owcOZKvv/6avXv38vPPP/Pwww+zYcOGarUlMzOToUOH8t5777FlyxZSUlJYuHAhzz//PFdeeWWdvH+AkJAQ7rvvPv75z3/y9ttvs3v3bjZt2sSrr77K22+/XWfrFUIIIc5Y5gA0SzAA+s6v62211c7L1nWdd955h8ceewyXy8UzzzzD7bffjtFY8/H1om4dzD9Y+sehjRwwq39zYoj04AkhhBA+7dq1Y8OGDUybNo1Ro0aRlZVFfHw8I0eOZNq0af5ZugDuvfdeNmzYwBNPPEFoaCgzZ870F1jWNI2vvvqKhx9+mLFjx3L06FHi4+MZOHCgvybPiQQHB9O3b19efPFFdu/ejdPpJDExkfHjx/PQQw/Vyfv3eeqpp4iJiWH69Ons2bOH8PBwevXqVefrFUIIIc5UmjUUiovR964CewFYg+t+nXo1Kwx17dqVPXv2MHnyZKZMmUJgYGCFy52osGB9y8vLIywsjNzc3EbXtto2dtlYNqRtIMAUwLqb1qEB+n/a0TfaSrHBwOcjPyc5LLmhmymEEKIJKSkpISUlheTkZGy2Gsx2cRpp1aoVU6ZMYcqUKQ3dlNPSmbCNCCGEEDVx+aLL2Ju3j/lH0jj7opegx00n/VrVjXlUe9jX77//TnFxMc8//zzNmzcnIiKi3CU8PJyIiIiTbrA4db5hX8WuYjJLMiF7L5klmRQbDGhoNA9ufoJXEEIIIYQQQgghRN1SNQV1gK2f1ssaqz3s6/vvv6/LdohaUDaJ62D+QaKP/OWv9xMfFI/FaGmopgkhhBBCCCGEEILSmTkBOLAO3C4w1u1sqdV+9UGDBtVlO0QtO1RwiB7713DAO/Wr1PsRQgghTs7evXsbuglCCCGEaEI0X+aPOQjysyD9D0joVqfrrHbwJy8vr1rLNfW6Oo2Zb9gXeGf82r/On/kjwR8hhBBCCCGEEKLh+YM/Me0hf63K/mkswZ/w8PDyqUnH0HUdTdNwu9210jBRc2WHfR3K2QNH/+RATBQg07wLIYQQQgghhBCNgS+2osd0gj3e4E+f8XW6zpOq+aPrOpdccgnz5s2jeXMpItxYlM38OZj5FwAHbMGALpk/QgghhBBCCCFEI6LHdlQ39q+r83WddM0fo9FIv379aN26da03SpyccsO+Co8AcNBsAt0pwR8hhBBCCCGEEKIR8Gf+RLcDzQC5+yHvMIQ2q7N11m05aVG/SmM/pLqKyDUYyNKdwOlT88fj0ckotHM4p4TDOcU4XB4sJgMWo4GYECsd4kOwmY0N3UwhhBBCCCGEEOKk+Gr+YAqAuLMgdasa+nXWVXW2Tgn+NCFlM3/cGmywWQGIsEYQYgk55dcvdrg5lFPEwexiDuUUU2R3o6Oj6yru5NHVbQBNUxu0poFBK924HW4Pdqcbu8vjv5Q43aTllXAop5gjOSU43J5K22AyaLSLC6F3y3Cu7NGcs1tGVFmLqiF4PDolLlX7ymQwYDJoGAwn30Zd17G7POSVOMkvcXkvTgpKXGiaRqDFSIDFSIDZSKDFSKDFRIBZ3WcxGWrlPem6ztECO8UON1aTEavJQIBFXZ/o8y9xuskucpBT5MTh8uDyeHC5dVweHafbg9uj4/aorVfXdTw66Lp3e/Lfp3vvK61tFWBR7zfAbCpz2+i/bTMZT/lzL7C7yCp0kO1tu8Plwen24HCX3jYaNKwmgzdIacRqVsFKi8lQer/JgLXMY6fSrtNFbpGT3w/nklXkoNDuosjhpkVEIH1aRRIWaG7o5tUKXddxunVcHg9Ot9qeXd5rh9tDscNNidNNsdNNsUNd252V79/Kcno82J1qH+nRdYwGDaOmYTUbCLWZCQ0wEWIz+28Hmk3+dji8+4u8Yie5xU7ySrzXxS50dIya2if5rk0GDaNBQ9M0jBrYzEaigq1EBlmIDrYQFWwlyGKs832t7/vv8X7nPR5wezy4vPsI37Xv4t8voKN71DWAAQ3NAEZNw2w0YPZ+HwOruc9qKO5jtiO3R8el63jK7B99v7e+/aBBU/87dY3//1p6X+n/2aBRb+9d1737dV0dD3g8+omfVENOt4eD2cXkFDkwGw2YjBqBZhOxodZT6iRyuT0Ueb+zRQ43hXYXBk0jIshMRKDllDug7C432YVOMgvtZBU6/JcihxuDpo6bTAaN0AAzoTaT9ztuLve9N9bBb4jL7VFtKXLgcqv/l65DsM1EeIBqQ12sVzQdvn34mbaduD066fkllJT5fQ+yGokOsp6Wx3sut4dCu5u8EicFdnXeUWB34nTrhFhNBFlNhAeaiQ+zYTU1fId8idPNoZxiDmQVkVvs9O+3QR3PBJiN6tpiwGYyYrOU3hdqMxEZZGmw4wJ/wWd0SOznDf6sb7zBn8Z6AHWmKlvwGWBNSDhQPutH13V+P5zHki1H+Cs1zxswMBFk9V5b1AlqXrGL7CJ1QHI4p5iD2cVkFjrq5X0YNIgLtZEQZiPAYvSfdB/ILiar0MGfR/L480ge763dT1JkICN7NmdQ+xi6twjDZKw62KHrOtlFTtLySsgosHM0305esZPCMgd44YFmwgMthAeYiQhStwMtRv/OJK/YxZHcYlJzSziSV8KRnGKO5JaQlldCod1dYfBK08BsMGD0nWgZNX9gyGjQMBs172MG70kYFHp3uHklaod7MkwGrVxgJMRmpnl4AC0iAkiMDCQxMoDEiECahQego3agRXY3ezML2ZlewK70fHamFbAzvYDcYudxr282agRb1cFosNVEsM2E1WQgp8jpDZqoA9qGYjMbygXDjg0QBZhNGA34g5CFdjeZhQ6yvQfjVQUiT4XJGzAyGQ14PDpu78mu1WT0tzkmxEqL8ACahQcQHqg+30CrSQWPNHXSV+hwkZ5nJy2vhKwiR2lg0O7yBwoL7S5vEFZtY/GhNv//v1NCKN1bhNM+LviE350TySp08OOOo/y0M4NfD2Sz52hhhctpGnSMD+W8tlEM6RjLOa0iMZ/iuo91NN/OX6l57M0o5EC2OiDIKXL6gyO+AKTT7cHjPTk1en/Pip3qhK/E6caoaVhMKnjg0b0BS5cHZ5kA5pnCYjIQHWTxB4XCA804vQGuAIOb6zsFQEYhmkn9TviC/xrq8y3TvwV4Owy8AZyyAZ+6ZjRoBFlM5fZXJ3Ms4/HoONwef6DG5X0vvu+mpmllbuN/7zqUCxD6gj2+QHhF5sx8lu+Xf8kny386+TdO6f+k/H2lN3xtNXjbruENEho0brjiIrp068bTz/0HTVP/S13Hv+/yfQYut8d7rZfrkNJdDo5mF3PfstU4Ufu3NjHBtIsLoV1sMO3jQogMslTadrvLze+H89i0L5tN+7P5KzWf/ZlFlX4HIwLNxIXaCAswe/efZlwedSyhOp5UINbh9lDkUIGeQocKUjtcVe/3gyxGErz75vhQK6E2M8E2tU3h/VxcHp2cotLATqb3OrvQQb73pORkaRqE2sz+99g8IoDm4QGE2Ezejgd1Mubb3/mCii63jsPtpsSpfu8K7C4yCuxk5DvIKLCTVeSgqq+gb73hgWbCA7zHSd7bARb1W2rUNDy62o8WO92UeANoxU43TreHQIuJYKvR+3mZCbGp485g3zGE1YSmUeZ75XsP3qBomW2sbEDY9/3x6Do2k/qNt3pP+tRJngGXR6fEG4DPKXaQUaDed7HDfdzrlH99D7oOQRYTgVYjYQFmujYPo09yJJ0TQk/6t9Ph8pCWV+L9/qv3qr57pYFaX9ygyKH+X0UOFxqaP6BtNpbeDjAbCQ0wE2I1VRpw8B0D780sZF9mIXszitifVcSh7GIAf6dVVLCF+LAAmoWp75DN26FW5HBxJLdEHf/mlviPhTMLHeqz9X53wgLM/o6DmGArUcEWooKs6v9tNRJgMWHQSjv0Ai0m/3c1PMBMWKD5pIMKBXYXezMK2ZtZiMut+4+xQ21mYkKsxIRYCQswn9R+3+X2sC+riG2Hctl2KJffD+exP6uI1NySCvdFJoNGbIiV+DAb8WE24kJtxIeq277ruFDbKQerF/16iJ93Z6pO9Nxisgud6rimTPDf1ynguxi8j/uODYudbgpKXN7trHrH7ZoGcSGlx5Qt/PsiM4FW9d1zuDwU2ktft8Cujkuzi5xq/1NgVwEbu9oH210e/0gWo0EjItBMRJCFiEALZqOGyXsM7DtPzSx0cDTfftKfH0CozUTb2GA6xIcwskdz+iRH1luMo9x6EvvCL2/A/rV1u0792IhBJa6++upyf3/xxRcMHTqUoKCgcvf/97//rb3W1YK8vDzCwsLIzc1t8tPQ37DkBn7P/B0DGh50WmFhLw4uSb6E5wY+x6cbDzL7+13syaj4pKw6Qqwmmkeo4EGIzYz3mF4dNIL/wLK0dxJ/dhDgz4QovVY9sbGhVpqFeQ+mwmwVngzqus7h3BK2Hszl2z/TWLr1CIVldlDBVhNnt4qgWXgA0UEWwgIt5Jc4yS50kFHo4EBWESkZheSXnNqBV0PRNPUeQ23qgCnYakIH74Gry3+A5TuQqW0GDQLMRuwuT41f32RQQTWryYjJG+gyG1RPrS8zyrcNGbxnSqUnH2VOJL0HQx5dBaqKy7znIodLHWxWM7OiugLMRiICzdjMRn8goOxBl0fX/ScRvhMLh+/kwlV6X2NmMxvolRTBuW2jObdtNF2aVX1A63R72JtRyNZDuWw7lMfG/dlsOZhz3IlDUmQg8aE2gqyqh2V7Wv5xQaEQm4lz20TTt3UkfZIj6Rgfelyvoa7rFDnc5PoyWbzXvsvRfDupeepgdHd6Qb0FqitiMmiYvNtGoKVsj5O6Xd1gg8mgYTUbsRgNGA3g9qgASYnT7Q8K5xU7yStxkVfs9H8nTQa17hCbOpgO8/bYhwWo/YZB01TmjDejpGwWjS8IU+RQQdDMArs/I6EqzUOMPD4klthmLdBMlZ/A14TvJMgfMPd+rr6/KzpJ8o+d974X38mc0+2hxKUCVccGmPJzspj7wnS+/3Y5R9PTiIiIoHv37jz22GOce+65eLyvUeJyU+zweLO3Kg7y1waj9/9nKvO+Zz73NMu/XMKK1ev8WbW+r4hH13F74K4J41n44Xv+1wmPiKBL91788+EnaNexS7lAzMnIzc7GZDYRFFyzLGINTR0DuBykHz7I49+ncyi/4u0pKshCm9hgIgLNBFlN2MxGUnNLSMkoZH9WUYXBMZvZQFSQ1R/QLbC7am1/a9Ag0GIi0GLEo0NOkaPWflvVSY2FqCALkUEWIoMtBFmM/gxXp9tDfknp91t939VvXF0yaBDuPckyeL9PBSWuUw5YNVVBFiPDOsdxTa8WnNs2usqMF49HZ/PBHFbtzGBdSiab9uXUyf/Td6wY5s0Ws5gM6hjJ6SKn0Hna/C8DzEbCA83+3zEVGFIBxyCryZ89XuxQowcO56pyEdUJBJiNGtHBKhAUGWTxZ9UFWU1qv+vd9gsdKiCSU+xgz1EVUKqsM9bX2Yr3/KfQ4aoymFpWRKCZZuEBXNI1gRvOSSQq2HrC53g8Oku2HuHFb3aQcgrndVWxmgyEeM85QmwmjAZNBXJKXGQVOWr9ePtUBFmMJEYGEhlk8Sc1aPgC0Srg7Ttv8P2elzjdFf6fOieEMubcVozs0bzWRlBUZtQXo/gz60/mDJvDeUFJ8FJXMJjggQNgCazRa1U35lHt4M/YsWOrteL58+dXr4X15EwK/ly/5Hr+yPyD5pg5RGmWxt+7/Z3Wpmv4x/ubAPVlHtoxlvPaRePx6BQ63BTZXera4aLE6SHUZiLCe1AS7+1ZahERSFhA4xmuUeRwsfz3VL7+PY01ezLJKTo+M6UyUUEWooOtRIdYCA9UB11BVhMej05OsZPsIic5RQ7/cKVih5tA7zLBVhPxYSozKT4sgAT/bRshNrO/lwlU75/breP0eHuT3KU9Sa5j/vb1bvl6kFVGjcm/4w22VN6bcyyH90SnyOnyp64XO93kFDk5lF3kz4Y4kF3MwayicgcDNrOBFhGBtIsNpq330i42hNYxQf7eCV9avK+XIN87JK3A7sLu9PjT4yODLEQEWQixmuotiu7x6P5ex7Lvvcjh8geLfD29xU43bo+O1WTAajYSaDYSGVx6QB4VZFU/5qfINzzIN+yx/LAxdaIH+IcKFdpdpOXbOZRdzOGcYvJKnP6eE5e79EQ9wGwkNtRKbIiVqGBrue0lxJuRFWgxomkqEGt3eTiSW8yBrGL2ZargzZaDuRQcczBoMmgkRgbSMkr9kPqGoOQUO9iXWcThnGIqOgfqlBDKoPYx9EmOoEdiRIU9+en5Jazdk8UP24/y/fZ0so4J1BgNmr8H2GzUjgtuVIemQauoINrEBKkMt4hAooItWIwq28pkLA0+Gg2aP/sKHWwWI0EWEzazAbcvu8Olo2mqV9QXXPENM1G3Vdae2ag1SEasb/s61SGmlSl2uMkstJNZ4PBf5xY7sZgM2MxGQsw68VoeSS1bYrMFqDaphpXpBChPBXq9QR5D2SBO3Q1P8gXPfFmVRQ43o6++GKfTyV0PPEaLpJZkZWTwy+ofaNexE4OGX1xpNg54h5WZfIEaAwZD6ZBVXyDN93dZ/m3IZMBiLN2ezEZDhSePjz/+OIsXL2bz5s2VtmXMmDGkpaX5j8FSU1N55JFH2LJlC/v27SvTnjJhIB2cTidms7nc0O2y174gmu83q+zjBg1/4F4FPQ3+4KfJ+/0yaJoK3haXkJKSAiGx5DrgcE4xu44WsDOtgB1p+Rz0Zh5UJTLIQq+kCHq3jKBL81BaxwSTEGort83ruk5usZPUvBLS81SPck6xGiZtNqqAqtVowGou7YQKMJu8Q6ZVkDbIYqpwWLOu6+TbXWTk2zmSq4aqH823+4dkF9pd3k4w9b7DAsxEBXuDO2UuUd6TzZP5rjpcHm/Q20FWoZMjuSoz+3BOsT9ryRf88vWSm33/E6Madmw1q6EPQVajOg7yngRHe7P6KtoGnW613hzvsVFOkfpcfcdJJc7SzBsNCPBm3AZaSodYmI2a6vn3HisUeE8iCxwu/32+oRpGQ2nb/QFRb2eRyWDAaNS8jxnKZE6r/a8vk9ffOeQ96VMZtyq7PTzATHSw+t0MtplKA8yG8lnYvkxtvNlMhXYX6fl2Nu7LZsPeLPLKdCbGh9oY3CGGs1tFcnbLCCwmA+n5dlJzS/h5dwbLf08lLa98YML3//ANB9f10qHuvqwYXYdAqzr+DLKY/P8PX+ah06V7M9hc1T4ZTwiz0TIqkFZRQbSMCqJFRAAGTVOBbqebo/l2juSpDJ/8Eqf/eMpmNpLgPe6NL3PsGxOshlrazOp4I7vQwdEC729GgZ0M729Hgd13vuHy7kNUx56vcyenSP22nGqMNTrYQsuoIALMRn/GmK+jqKIs9pqwmQ10SgilS7MwujYPo3VMEM0jAogNsZX77rjcHo4WqH1FWm4JqXneizdzKs3797H/M4vJwBXdmzFmQCu6NA87bv26rrPiz3T+8/V2/krNB9S+8eZ+LWkbG0zzcBuRQVb/duT24O/kceulnT1lh1C7vRlzId5hpr5MxqoCH7quk1no4KD3fOJgdjEHs9XxYaE3i6fY6cZiNBDsHSoWZFX7V9+wMd/+x5/d7s3Y832KTreH7CLViZ9T7MDpUudTHo9OaIA6z4gItNAs3HbSQ7dKnG5SMgrZlV7Az7szWPTrIf//JDEygPtHdOSyrgnV3l97PDob92ezPiWLlIxC9mYUUmB30a91FBd0iqVvclS5z9UX/Jl9wWzOb34ezOwE+UdgzJfQ6rwavZdaD/6crs6k4I9vA+prd7POWnrCemfnh3n18zBKnB5u7pfE1Is6EmJrPEGc2uDx6PxxJI/NB3I4mm8ns9BOdpHTP5YzItBCYqTvhy5QikaXoesqAOgbiiTDOc8sHo/OrqMFrNmdyepdGazZk1mt7LhAi5HOCaF0aa4OgM5tG018mK1G63Z7dDYfyGHtnkzWpWSxaV/2cYGossxGzZ/JEmor7RGMCbESF2olLtRGq6gg2seF1ErQTlRPSYk6sU9OTsZms6kIiLOoYRpjDixNQT2BrKxsoqIi+WzJcrqe3b/CXvjuiRE88swL/PDNUtavWUV8fDz/fuZZrh91nf9k88CBA9x77718/fXXGAwGzj//fF5++WVatWrlf5158+bxwgsvkJKSQqtWrbjrrrv4xz/+4X/84MGD3H///Sxfvhy73U6nTp2YNWsWffv29Qd/7r33Xh599FGys7O5+OKLeeONNwgJUZk4Y8aMIScnh8WLF/tfc9WqVZx//vmkp6cTExPD3r17SU5O5qOPPmL27NmsW7eOuXPncvnllzNp0iR+/PFHsrOzadOmDQ899BA33nij/7UGDx5Mjx49eOmllwBo1aoVd9xxB7t27WLhwoVERETwyCOPcMcdd1T4WR+3jRyjyOFid3ohezIKyCtx+TukYkKstIkOIjkmiPhQm/w+iUbD49H57WAO/910iM9/O1ytoEKw1cTA9tH0ax1F3+Qo2sUG12rA3u5yk1dcPjPU7nQTZFUBzVCbiRYRjfsY2OPRKXC4yC3yBhuLS4ONud7AY6HD5Q+YW00GYkOsJIQHeINaQVV2VNtdbjIL1FChjAI7mYVqyHxesepk8wVIAP8wxRCbmZZRgbSNDaZZWECt/c90XVelJPKK2Xowl3fX7mPLwVz/42e3jGD0gFa0iQkmr8RJer6dt1alsPlADqAyp+84vzVjz0v2DzsVpyanyMFHvxzgzVUp/iyyrs3DmDKsHUM6xFb6v9+VXsDiXw+xePOhKjszQqwmJl/QltvPa43RoPkTN2ZdMIuBLQbCJ6Phj8VwwWNw/r01ant1Yx4NuqX8+OOPzJgxg40bN3LkyBEWLVrEyJEj/Y/rus60adN44403yMnJ4dxzz2XOnDm0a9eu4Rp9GmhhL2adNdj/99s/5lPiDGFQ+xgev/ysU67t0RgZDBpdmodVGCUXVdM0TX40zmAGg0b7uBDax4UwekArPB6d1LwS9nrrAeSXOP09+sFWE0lRgSRFBhITfOqFDI0Gjd4tVU/+xCGlPWWFdpe/fpYvyBMaoHqS5eTvNOAsgmfqbprSKj10GCxBJ14OCA0NITg4mJ++XcqlwwZhMlvKDYnz9eLOeeEZnn32WebNncW7777LmFv+Ru8e3ejUqRNOp5MRI0bQv39/fvrpJ0wmE//+97+56KKL2LJlCxaLhffff5/HHnuM1157jZ49e/Lrr78yfvx4goKCGD16NAUFBQwaNIjmzZvz+eefEx8fz6ZNm/B4SnuEd+/ezeLFi1myZAnZ2dmMGjWKZ599lqeffrrC91ZQUMB7771H27ZtiYqKKvfYAw88wAsvvEDPnj2x2WyUlJTQu3dvpk6dSmhoKF9++SW33HILbdq0oU+fPpV+fi+88AJPPfUUDz30EJ9++ikTJkxg0KBBdOjQoVqff1mBFhNdW4TRtYX8hovTg8Gg0TMpgp5JETxyWSdW7cxg/d4sNu7NZsvBXHR0YrxZVR3jQ7moSzwD2kbVaYFcq8lITIiRmJATDxtqrAwGVZ8n1GYmMbL2X99qMtLMW7OroWmaRligqnHUMT6Ua3u34NcDOSxYvZevth5hw75sNuzLPu55AWYjY85txd8HtiY8sHaGWgslPNDCnYPacGv/lrz5Uwqv/7iHrYdyuf3tDbSODmLsua04JzkSDTWEfs2eTBb/eoith0qDdsFWE4M7xNA+LoRW0UGYDRortx/lu+3pHM2388xXf/Hl1lT+c223Y6ohour+/LEY9q+rs/fYoGd8hYWFdO/endtuu+24mkIAzz//PK+88gpvv/02ycnJPProo4wYMYI//vijwp6jM50vmbuFq3zP+dHsYNrEBPHqTT2bZOBHCFF7DAbNf2A0oE39rttkNJAQ1vAHZOLMYDKZWLBgAePHj2fu3Ln06tWLQYMGccMNN9CtWzf/ctdddx3jxo0D4KmnnuKbb77h1VdfZfbs2Xz88cd4PB7mzZvnD0zOnz+f8PBwVq5cyYUXXsi0adN44YUX/Mc5ycnJ/PHHH7z++uuMHj2aDz74gKNHj/LLL78QGanOdtq2bVuurR6PhwULFvgzfW655RZWrFhRLvizZMkSgoNVx09hYSEJCQksWbIEg6H87/6UKVOOO+a67777/LcnT57M8uXL+eSTT6oM/lxyySX+7KWpU6fy4osv8v33359U8EeI05nVZOSCTnFc0CkOUB0ZviGtQlSXpmn0SoqgV1IED1/aiffX7eezjQexuzz+mf7ObhnB3we1JjZEzoPrUqDFxOQL2nFj3yT+78c9fLh+P3syCnn0f79XuLzJoDGofQwjezZnWKe447LPL+6agMej8+nGgzz15R/8diCHS19ZRZse6pzdPxArsa+6PvgL/ukya1mDBn8uvvhiLr744gof03Wdl156iUceeYQrr7wSgHfeeYe4uDgWL17MDTfcUJ9NPS3oLlU7I9FZmnqqe8wEGyOYN/ocQpvYUC8hhBCNkDlQZeA01Lpr4JprruHSSy/lp59+Yu3atSxdupTnn3+eefPmMWbMGAD69+9f7jn9+/f319/57bff2LVrlz8o41NSUsLu3bspLCxk9+7d3H777YwfP97/uMvlIixMZbls3ryZnj17+gM/FWnVqlW5dSQkJJCenl5umSFDhjBnzhwAsrOzmT17NhdffDHr16+nZcuW/uXOPvvscs9zu90888wzfPLJJxw6dAiHw4HdbicwsOrPsmyATNM04uPjj2uTEGci6WgVpyou1MY9w9tzz/D2Dd2UM1p0sJWHLunEXRe049MNB/hw/QEyCkprd7WMUrNOX9o14YSFug0GjVHnJDKwfQwP/ncL328/qoaIWUsTOIg7CzQjFGdB3iEIa1Hr76nRjvVISUkhNTWVYcOG+e8LCwujb9++rFmzptLgj91ux24v/afk5eXVeVsbC70oA4CQ2LMIsRST78jH44hk4uC2JEdXLw1eCCGEOCWaVu2hV42BzWZj+PDhDB8+nEcffZRx48Yxbdo0f/CnKgUFBfTu3Zv333//uMdiYmIoKCgA4I033qBv377lHjcaVc9gQMCJs93M5vKdN5qmlRsWBhAUFFQuY2jevHmEhYXxxhtv8O9//7vccmXNmDGDl19+mZdeeomuXbsSFBTElClTcDiqnjWvOm0SQgghTnfBVhNjzk1mzLnJp/xa8WE23rj1bC5/bTX7PDpGymT+mG0Q2wnStsGRLXUS/Gm0oenU1FQA4uLiyt0fFxfnf6wi06dPJywszH9JTEys03Y2GiW56MVZAGidRxJkiAXA7IlhzIBWDdgwIYQQ4vTRuXNnCgtLp85du3ZtucfXrl1Lp06dAOjVqxc7d+4kNjaWtm3blruEhYURFxdHs2bN2LNnz3GPJyerg8hu3bqxefNmsrKyavV9aJqGwWCguLjqmbRWr17NlVdeyc0330z37t1p3bo1O3bsqNW2CCGEEEIxGQ08c1UX8Nb8+Su1TLJKvDer9shvdbLuRhv8OVkPPvggubm5/suBAwcaukn1Y9M76LrqcXPG9+Rothr33yOhDUFSzFcIIYQoJzMzk6FDh/Lee++xZcsWUlJSWLhwIc8//7x/uDnAwoULeeutt9ixYwfTpk1j/fr1TJo0CYC//e1vREdHc+WVV/LTTz+RkpLCypUrueuuuzh48CAATzzxBNOnT+eVV15hx44dbN26lfnz5zNz5kwAbrzxRuLj4xk5ciSrV69mz549fPbZZ6xZs6ZG78dut5Oamkpqaip//vknkydPpqCggMsvv7zK57Vr145vvvmGn3/+mT///JO///3vpKWl1WjdQgghhKi+nkkRRAergt3vr9uLw+XNnE3orq7rKPjTaKMC8fHxAKSlpZGQkOC/Py0tjR49elT6PKvVitV6+la5PyluJ6ydA95yAN/9lUFB5lkEJuxjwjkjG7RpQgghRGMUHBxM3759efHFF9m9ezdOp5PExETGjx/PQw895F/uiSee4KOPPuIf//gHCQkJfPjhh3Tu3BmAwMBAfvzxR6ZOncrVV19Nfn4+zZs354ILLvBPtTpu3DgCAwOZMWMG999/P0FBQXTt2pUpU6YAYLFY+Prrr7n33nu55JJLcLlcdO7cmVmzZtXo/Sxbtsx/vBQSEkLHjh1ZuHAhgwcPrvJ5jzzyCHv27GHEiBEEBgZyxx13MHLkSHJzc6t8nhBCCCFOXvPwQLIzITWvhLk/7OauC9qVBn9St9TJOjXdP8isYWmaVm6qd13XadasGffddx/33qvmuc/LyyM2NpYFCxZUu+Bzdee8P2153PC/SfDbB4xMbMFukwFT2gSys1oy/equ3NgnqaFbKIQQogkrKSkhJSWF5OTkJjcT57HHJuLkNOVtRAghhDgZt3x1C5uPbqb44M1Q2JX//mMA3WKMMD0R0OG+XRAcU63Xqm7Mo0GHfRUUFLB582b/rBkpKSls3ryZ/fv3o2kaU6ZM4d///jeff/45W7du5dZbb6VZs2ZyEOZTJvCDZkQPVtlSeSUukqODuLZ37ReJEkIIIYQQQgghxMnTvFO590oKw+XRufujzRQSAFFt1AKptT/0q0GDPxs2bKBnz5707NkTgHvuuYeePXvy2GOPAfCvf/2LyZMnc8cdd3DOOedQUFDAsmXLpNcIjgv8cO1blBhKZwx54oqzMMtUk0IIIYQQQgghRKOieQs+39S3JQlhNlIyCnnyiz/qtO5Pg9b8GTx4MFWNOtM0jSeffJInn3yyHlt1GtB1WPLPMoGfN3F3upKj618DA/RvHc3A9tVLERNCCCFExRrJyHghhBBCNFEBFgMvXt+DG99Yy8cbDjDm7GQ6gZruvZZJasjpRtfh60dg09ugGeCaN+Csq/hw/X7sTjcAo2VqdyGEEEIIIYQQolHyDfvS0enXOooJg9Rwr+d/805eVQeZPxL8Od388DyseU3dvvwV6HINP+/O4Lllf/kXiQw6w2Y7E0IIIYQQQgghThO+YV94k4z/Obw9gzvE8KvTO2FTdgqU1O7MmxL8OZ389jGsfEbdvuhZ9rW8mr+/u4Gb3lhHfokLi1ltQP4NSQghhBBCCCGEEI1K2cwfALPRwNybe9OzQ2sO6tEAbNmwqlbXKcGf04XLDiueULfPv4/3tUsYPvNHlv+ehtGgcWv/lsSHqkLYvg1JCCGEEEIIIYQQjYsvYaNsfUGb2cjcW3qTHtQBgG+/W47HU3v1ByX4c7rY9A7kHUIPacZTeZfw8KJtONwezm8XzdK7z+fJK7vgi/lI5o8QQgghhBBCCNE4+YM/lA/uWE1GuvQeCEAr524KHK5aW2eDzvYlqslZgv7TC2jA26ZreHNdKgD3j+jAPwa3KU0Zk1lJhBBCCCGEEEKIxs1f8uf4c3hLix4AnKXtJbfISajNXCurlMyfRm7boVzem/0EWv4RDulRPHPkbALMRube3JuJQ9qWG+Ll23Bk2JcQQgghhBBCCNE4VTTsyy+hOwBttUPk5+XU2jol+NPIzflmKxdmfQDAPO0aBnVuwWcTBnBRl/hKnyPDvoQQQoiqjRkzBk3T0DQNi8VC27ZtefLJJ3G5XKxcudL/mKZpxMTEcMkll7B169YKX2vEiBEYjUZ++eWXKtfZsWNHrFYrqakqg/fY9VR0WblyZW2/dSGEEEI0sCrP2UMTOKpFYdR0PId+rbV1SvCnkeue+imxWg4FAc155OF/88atZ9O5WWiFy/qihhL8EUIIIU7soosu4siRI+zcuZN7772Xxx9/nBkzZvgf3759O0eOHGH58uXY7XYuvfRSHA5HudfYv38/P//8M5MmTeKtt96qdF2rVq2iuLiYa6+9lrfffhuAAQMGcOTIEf9l1KhR/jb5LgMGDKibNy+EEEKIBnOi0Tq7LB0BMB/ZWGvrlJo/jZielcLfij8ADYr73UOw2Vr18jLsSwghRAPTdZ1iV3GDrDvAFFCj30Cr1Up8vMqknTBhAosWLeLzzz+nf//+AMTGxhIeHk58fDxTpkzhiiuu4K+//qJbt27+15g/fz6XXXYZEyZMoF+/fsycOZOAgIDj1vXmm29y0003MWjQIO6++26mTp2KxWLxrx8gICAAu91e7j4hhBBCND2VFXz2ORB4Fv3tqwk8urnW1inBn8ZK13EunkyQZme9pyPdB4w+8VOQzB8hhBANq9hVTN8P+jbIutfdtI5Ac+BJPz8gIIDMzMzj7s/NzeWjjz4CwGKx+O/XdZ358+cza9YsOnbsSNu2bfn000+55ZZbyj0/Pz+fhQsXsm7dOjp27Ehubi4//fQT559//km3VQghhBCnMV/B50ombUoL7QLZEJn9G+g61EKChwz7aqw2LsCy/yeKdQvPWydjNZ+4wrd/w5HYjxBCCFFtuq7z7bffsnz5coYOHeq/v0WLFgQHBxMeHs4HH3zAFVdcQceOHf2Pf/vttxQVFTFixAgAbr75Zt58883jXv+jjz6iXbt2nHXWWRiNRm644YYKlxNCCCHEmeFEmT95EV1w6QaCHBmQe7BW1imZP41RzgH4+lEA/uMahZbQulpPk8wfIYQQDS3AFMC6m9Y12LprYsmSJQQHB+N0OvF4PNx00008/vjj/sLNP/30E4GBgaxdu5ZnnnmGuXPnlnv+W2+9xfXXX4/JpA6nbrzxRu6//352795NmzZtyi138803+/+++eabGTRoEK+++iohISEn+3aFEEIIcZqqcrYvICg4hL/0JLpoe+HQBghPPOV1SvCnsSlIh4WjwZFPamg35qdfxJUR1Uxh9yf+SPBHCCFEw9A07ZSGXtWnIUOGMGfOHCwWC82aNfMHcXySk5MJDw+nQ4cOpKenc/311/Pjjz8CkJWVxaJFi3A6ncyZM8f/HLfbzVtvvcXTTz8NwB9//MHatWtZv349U6dOLbfcRx99xPjx4+vhnQohhBDidBIWYOZXT1u6GPbCwQ1w1lWn/Joy7KsxOfIb/N8QOLQRbGG8H/8AHgwkRlSvJ7OylDEhhBBCHC8oKIi2bduSlJR0XODnWBMnTmTbtm0sWrQIgPfff58WLVrw22+/sXnzZv/lhRdeYMGCBbjdbkAVeh44cOBxy91zzz0y9EsIIYQ4Q/kmqKjsHN4X/AFU8KcWSPCnsfjzC3jrIsg7CFFtYdx3bC6OBqBFZPV6UGW2LyGEEKJuBAYGMn78eKZNm4au67z55ptce+21dOnSpdzl9ttvJyMjg2XLluF0Onn33Xe58cYbj1tu3LhxrFu3jt9//72h35oQQggh6tmJhn2FBZjZrHuDP0c2g8txyuuU4E9jsH8dLBwLziJocwGMWwHRbTmQVQRAYjWHffk2HBn2JYQQQtS+SZMm8eeff/L888/z22+/cc011xy3TFhYGBdccAFvvvkmn3/+OZmZmVx11fGp2p06daJTp06S/SOEEEKcgU5U8DkswEyKHk8eweAqgbRtp7xOqfnT0PKOwCe3gMcJna6Aa+eD0YTbo3MopxiAxEgZ9iWEEELUpgULFlT62ODBgyvsiUtMTMTpdAKUq99zrK+++sp/2zf8qyJ//PFHtdskhBBCiCbEN9V7Jefw4YFmdAxsoS3nsVmVhmne65RWKZk/Dcllh09uhYI0iO0MI+eAUcXj0vJKcLp1TAaNhLCazV4iw76EEEIIIYQQQojG6UTDvkIDzABscHln/j74yymvU4I/DWnpv+DgerCFwfXvgTXY/5BvyFez8ACMhuoFc2TYlxBCCCGEEEII0bid6Jw9zBv8+dXTTt0hwZ/T2Ib5sHEBoME1b0FUm3IPH8iu2ZAvKFPwWYI/QgghhBBCCCFEo+Sf7auSzB+ryYjNbGCzxxsnyNoDuYdOaZ0S/GkIB9bDV/er2xc8Cu2GHb9IDYs9g8z2JYQQQgghhBBCNHYnKvgMKvsnl2AK485Rd6yZdUrrlOBPfctPhY/LFHg+754KFzuQ7Q3+VHOad5BhX0IIIRpOZT1XQsi2IYQQQpTnz/ypIvgTHmABIKXzBHXHhreg4OhJr1OCP/XJ5fAWeE6FmE6qwHMlWToHs9SwrxYRNR/2JbEfIYQQ9cVoNALgcDgauCWisSoqUh1aZrO5gVsihBBCNC5VdZD46v7sC+8PzXqCqxjWnnz2j0z1Xp+WPQAH1oE1DG54v1yB52MdPInMn9LYj0R/hBBC1A+TyURgYCBHjx7FbDZjMEi/klB0XaeoqIj09HTCw8P9gUIhhBDiTFedYV++Gb9yS1ww8F/w0Y2w/g0YcBcERtZ4nRL8qS+b3oENb6IKPM87rsBzWQ6XhyN5JcBJ1vyR4I8QQoh6omkaCQkJpKSksG/fvoZujmiEwsPDiY+Pb+hmCCGEEI1Gder0+jJ/coud0OdiiOsKaVth3VwY8lCN1ynBn/pwcAN8ea+6PfRhaH9hlYsfzilG1yHAbCQ62FLt1UjBZyGEEA3BYrHQrl07GfoljmM2myXjRwghhDiGP/OnGsO+coudqlzMwPtg4WhYOxf6TwRbWI3WeeYEf+wF4DCC5k1HdxSBo0Bd7AXgyAdHofd2Adi9f/sedxaB2wEuu7q4vdcel3pNTQPNCAHhEBil/hHOEijJUUO93A7odDmcf98Jm+or9twiIqBGgRwp+CyEEKKhGAwGbDZbQzdDCCGEEKLRq86wr/BAX/DH27nW6QqI6QhH/4Ktn8I5t9donadF8GfWrFnMmDGD1NRUunfvzquvvkqfPn1q9iIzO4K1AYMiMR2rLPBc1gFvseca1ftBhn0JIYQQQgghhBCNnveUvdqZPwAGA1w0HdxOaFf1aKKKNPrgz8cff8w999zD3Llz6du3Ly+99BIjRoxg+/btxMbGntqLm2xgCVaFly0hYAny3g4uvfbdNgeCyQpGK5gs3msrGEyADrqusoCKc6AoU2X8mANVJlBABLQdpl6/Gspm/pwUif0IIYQQQgghhBCNUnUyf44L/gC0GXrS62z0wZ+ZM2cyfvx4xo4dC8DcuXP58ssveeutt3jggQeq/TorLltFcFAQ6B4APEYbuuEU3r7bezmWEQjxXnw8wI48IK9aL71xXzZQs2LPIMO+hBBCCCGEEEKIxq7GBZ9rQaMO/jgcDjZu3MiDDz7ov89gMDBs2DDWrFlT4XPsdjt2u93/d25uLgCT3/8Vg7VmwZSGFmlxk5dXvYARgLvYjdvtpiC/gDy9+s8TQgghhBBCCCFE/XAUOnAXuynKL6r0nN/oLsZjLyIz21NlXMD3WFVDyAA0/URLNKDDhw/TvHlzfv75Z/r37++//1//+hc//PAD69atO+45jz/+OE888UR9NlMIIYQQQgghhBCiwRw4cIAWLVpU+nijzvw5GQ8++CD33HOP/2+Px8O+ffvo0aMHBw4cIDQ0tAFbJ84UeXl5JCYmyjYn6oVsb6I+yfYm6ptsc6I+yfYm6ptsc+JU6bpOfn4+zZo1q3K5Rh38iY6Oxmg0kpaWVu7+tLQ04uPjK3yO1WrFarWWu89gUNO7h4aGyhdK1CvZ5kR9ku1N1CfZ3kR9k21O1CfZ3kR9k21OnIqwsLATLmOoh3acNIvFQu/evVmxYoX/Po/Hw4oVK8oNAxNCCCGEEEIIIYQQFWvUmT8A99xzD6NHj+bss8+mT58+vPTSSxQWFvpn/xJCCCGEEEIIIYQQlWv0wZ/rr7+eo0eP8thjj5GamkqPHj1YtmwZcXFx1X4Nq9XKtGnTjhsOJkRdkW1O1CfZ3kR9ku1N1DfZ5kR9ku1N1DfZ5kR9adSzfQkhhBBCCCGEEEKIU9Ooa/4IIYQQQgghhBBCiFMjwR8hhBBCCCGEEEKIJkyCP0IIIYQQQgghhBBNmAR/hBBCCCGEEEIIIZqwJhP8mTVrFq1atcJms9G3b1/Wr19f5fILFy6kY8eO2Gw2unbtyldffVVPLRVNRU22uQULFqBpWrmLzWarx9aK09mPP/7I5ZdfTrNmzdA0jcWLF5/wOStXrqRXr15YrVbatm3LggUL6rydommo6fa2cuXK4/ZvmqaRmppaPw0Wp7Xp06dzzjnnEBISQmxsLCNHjmT79u0nfJ4cx4mTcTLbmxzDiVMxZ84cunXrRmhoKKGhofTv35+lS5dW+RzZv4m60iSCPx9//DH33HMP06ZNY9OmTXTv3p0RI0aQnp5e4fI///wzN954I7fffju//vorI0eOZOTIkWzbtq2eWy5OVzXd5gBCQ0M5cuSI/7Jv3756bLE4nRUWFtK9e3dmzZpVreVTUlK49NJLGTJkCJs3b2bKlCmMGzeO5cuX13FLRVNQ0+3NZ/v27eX2cbGxsXXUQtGU/PDDD0ycOJG1a9fyzTff4HQ6ufDCCyksLKz0OXIcJ07WyWxvIMdw4uS1aNGCZ599lo0bN7JhwwaGDh3KlVdeye+//17h8rJ/E3WpSUz13rdvX8455xxee+01ADweD4mJiUyePJkHHnjguOWvv/56CgsLWbJkif++fv360aNHD+bOnVtv7Ranr5pucwsWLGDKlCnk5OTUc0tFU6NpGosWLWLkyJGVLjN16lS+/PLLcgcKN9xwAzk5OSxbtqweWimaiupsbytXrmTIkCFkZ2cTHh5eb20TTdPRo0eJjY3lhx9+YODAgRUuI8dxorZUZ3uTYzhR2yIjI5kxYwa33377cY/J/k3UpdM+88fhcLBx40aGDRvmv89gMDBs2DDWrFlT4XPWrFlTbnmAESNGVLq8EGWdzDYHUFBQQMuWLUlMTKwy4i/EqZJ9nGgIPXr0ICEhgeHDh7N69eqGbo44TeXm5gLq5Kgyso8TtaU62xvIMZyoHW63m48++ojCwkL69+9f4TKyfxN16bQP/mRkZOB2u4mLiyt3f1xcXKX1BlJTU2u0vBBlncw216FDB9566y3+97//8d577+HxeBgwYAAHDx6sjyaLM0xl+7i8vDyKi4sbqFWiqUpISGDu3Ll89tlnfPbZZyQmJjJ48GA2bdrU0E0TpxmPx8OUKVM499xz6dKlS6XLyXGcqA3V3d7kGE6cqq1btxIcHIzVauXOO+9k0aJFdO7cucJlZf8m6pKpoRsgxJmgf//+5SL8AwYMoFOnTrz++us89dRTDdgyIYQ4NR06dKBDhw7+vwcMGMDu3bt58cUXeffddxuwZeJ0M3HiRLZt28aqVasauiniDFDd7U2O4cSp6tChA5s3byY3N5dPP/2U0aNH88MPP1QaABKirpz2mT/R0dEYjUbS0tLK3Z+WlkZ8fHyFz4mPj6/R8kKUdTLb3LHMZjM9e/Zk165dddFEcYarbB8XGhpKQEBAA7VKnEn69Okj+zdRI5MmTWLJkiV8//33tGjRospl5ThOnKqabG/HkmM4UVMWi4W2bdvSu3dvpk+fTvfu3Xn55ZcrXFb2b6IunfbBH4vFQu/evVmxYoX/Po/Hw4oVKyodS9m/f/9yywN88803lS4vRFkns80dy+12s3XrVhISEuqqmeIMJvs40dA2b94s+zdRLbquM2nSJBYtWsR3331HcnLyCZ8j+zhxsk5mezuWHMOJU+XxeLDb7RU+Jvs3UZeaxLCve+65h9GjR3P22WfTp08fXnrpJQoLCxk7diwAt956K82bN2f69OkA3H333QwaNIgXXniBSy+9lI8++ogNGzbwf//3fw35NsRppKbb3JNPPkm/fv1o27YtOTk5zJgxg3379jFu3LiGfBviNFFQUFCuhzElJYXNmzcTGRlJUlISDz74IIcOHeKdd94B4M477+S1117jX//6F7fddhvfffcdn3zyCV9++WVDvQVxGqnp9vbSSy+RnJzMWWedRUlJCfPmzeO7777j66+/bqi3IE4jEydO5IMPPuB///sfISEh/roWYWFh/kxFOY4TteVktjc5hhOn4sEHH+Tiiy8mKSmJ/Px8PvjgA1auXMny5csB2b+JeqY3Ea+++qqelJSkWywWvU+fPvratWv9jw0aNEgfPXp0ueU/+eQTvX379rrFYtHPOuss/csvv6znFovTXU22uSlTpviXjYuL0y+55BJ906ZNDdBqcTr6/vvvdeC4i28bGz16tD5o0KDjntOjRw/dYrHorVu31ufPn1/v7Ranp5pub88995zepk0b3Waz6ZGRkfrgwYP17777rmEaL047FW1rQLl9lhzHidpyMtubHMOJU3HbbbfpLVu21C0Wix4TE6NfcMEF+tdff+1/XPZvoj5puq7r9RlsEkIIIYQQQgghhBD157Sv+SOEEEIIIYQQQgghKifBHyGEEEIIIYQQQogmTII/QgghhBBCCCGEEE2YBH+EEEIIIYQQQgghmjAJ/gghhBBCCCGEEEI0YRL8EUIIIYQQQgghhGjCJPgjhBBCCCGEEEII0YRJ8EcIIYQQQgghhBCiCZPgjxBCCCHOeGPGjGHkyJH1vt4FCxagaRqapjFlypRqPWfMmDH+5yxevLhO2yeEEEKIpsHU0A0QQgghhKhLmqZV+fi0adN4+eWX0XW9nlpUXmhoKNu3bycoKKhay7/88ss8++yzJCQk1HHLhBBCCNFUSPBHCCGEEE3akSNH/Lc//vhjHnvsMbZv3+6/Lzg4mODg4IZoGqCCU/Hx8dVePiwsjLCwsDpskRBCCCGaGhn2JYQQQogmLT4+3n8JCwvzB1t8l+Dg4OOGfQ0ePJjJkyczZcoUIiIiiIuL44033qCwsJCxY8cSEhJC27ZtWbp0abl1bdu2jYsvvpjg4GDi4uK45ZZbyMjIqHGbZ8+eTbt27bDZbMTFxXHttdee6scghBBCiDOYBH+EEEIIISrw9ttvEx0dzfr165k8eTITJkzguuuuY8CAAWzatIkLL7yQW265haKiIgBycnIYOnQoPXv2ZMOGDSxbtoy0tDRGjRpVo/Vu2LCBu+66iyeffJLt27ezbNkyBg4cWBdvUQghhBBnCBn2JYQQQghRge7du/PII48A8OCDD/Lss88SHR3N+PHjAXjssceYM2cOW7ZsoV+/frz22mv07NmTZ555xv8ab731FomJiezYsYP27dtXa7379+8nKCiIyy67jJCQEFq2bEnPnj1r/w0KIYQQ4owhmT9CCCGEEBXo1q2b/7bRaCQqKoquXbv674uLiwMgPT0dgN9++43vv//eX0MoODiYjh07ArB79+5qr3f48OG0bNmS1q1bc8stt/D+++/7s4uEEEIIIU6GBH+EEEIIISpgNpvL/a1pWrn7fLOIeTweAAoKCrj88svZvHlzucvOnTtrNGwrJCSETZs28eGHH5KQkMBjjz1G9+7dycnJOfU3JYQQQogzkgz7EkIIIYSoBb169eKzzz6jVatWmEyndohlMpkYNmwYw4YNY9q0aYSHh/Pdd99x9dVX11JrhRBCCHEmkcwfIYQQQohaMHHiRLKysrjxxhv55Zdf2L17N8uXL2fs2LG43e5qv86SJUt45ZVX2Lx5M/v27eOdd97B4/HQoUOHOmy9EEIIIZoyCf4IIYQQQtSCZs2asXr1atxuNxdeeCFdu3ZlypQphIeHYzBU/5ArPDyc//73vwwdOpROnToxd+5cPvzwQ84666w6bL0QQgghmjJN13W9oRshhBBCCHEmWrBgAVOmTDmpej6aprFo0SJGjhxZ6+0SQgghRNMimT9CCCGEEA0oNzeX4OBgpk6dWq3l77zzToKDg+u4VUIIIYRoSiTzRwghhBCigeTn55OWlgao4V7R0dEnfE56ejp5eXkAJCQkEBQUVKdtFEIIIcTpT4I/QgghhBBCCCGEEE2YDPsSQgghhBBCCCGEaMIk+COEEEIIIYQQQgjRhEnwRwghhBBCCCGEEKIJk+CPEEIIIYQQQgghRBMmwR8hhBBCCCGEEEKIJkyCP0IIIYQQQgghhBBNmAR/hBBCCCGEEEIIIZowCf4IIYQQQgghhBBCNGH/DxLYXR3KpLJAAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.plot(os_xs, opensmile_feats.HNRdBACF_sma3nz)\n", + "hnr_sma3 = torch.nn.functional.avg_pool1d(hnr, kernel_size=3, padding=1, stride=1, count_include_pad=False)\n", + "plt.plot(xs, hnr_sma3[0])\n", + "plt.plot(praat_harmonicity.xs() - 0.02, praat_harmonicity.values.T)\n", + "plt.ylim(0, 40)\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"HNR [dB]\")\n", + "plt.legend([\"OpenSmile\", \"SpeechBrain\", \"PRAAT\"])\n", + "print(\"Maximum OpenSmile HNR value:\", max(opensmile_feats.HNRdBACF_sma3nz))" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "ab0b864a-eb64-4d4d-a25a-8e4928ebff79", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABIQAAADZCAYAAABRjesMAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAA3KJJREFUeJzsnXd4U+X7xj8n6d500dIyyt57CDJkCeIARVFRceLPPfi6cO8NuBVFxK0sAUG27L337KAtHZTu3TTJ7483J23ohkJBns919Up6zknyNk3OuN/7uR/NarVaEQRBEARBEARBEARBEC4bDHU9AEEQBEEQBEEQBEEQBOHCIoKQIAiCIAiCIAiCIAjCZYYIQoIgCIIgCIIgCIIgCJcZIggJgiAIgiAIgiAIgiBcZoggJAiCIAiCIAiCIAiCcJkhgpAgCIIgCIIgCIIgCMJlhghCgiAIgiAIgiAIgiAIlxkiCAmCIAiCIAiCIAiCIFxmONX1AC40FouFhIQEvL290TStrocjCIIgCIIgCIIgCIJQK1itVrKzs2nQoAEGQ+UeoMtOEEpISKBhw4Z1PQxBEARBEARBEARBEITzQlxcHOHh4ZVuc9kJQt7e3oB6c3x8fOp4NIIgCIIgCIIgCIIgCLVDVlYWDRs2tGsflXHZCUJ6mZiPj48IQoIgCIIgCIIgCIIg/OeoTkSOhEoLgiAIgiAIgiAIgiBcZoggJAiCIAiCIAiCIAiCcJkhgtBlSn6Rmf0nM+t6GIIgCIIgCIIgCIIg1AGXXYaQoJgwczeL9yfx54NX0KtpQF0PRxAEQRAEQRAEQSiF2WzGZDLV9TCEixBnZ2eMRuM5P48IQpchSZkFLD2QBMCR5GwRhARBEARBEARBEC4icnJyiI+Px2q11vVQhIsQTdMIDw/Hy8vrnJ5HBKHLkDk747HY9iuZeaI4C4IgCIIgCIIgXCyYzWbi4+Px8PAgKCioWt2ihMsHq9VKSkoK8fHxtGjR4pycQiIIXWZYrVZmbY+z/56ZL4KQIAiCIAiCIAjCxYLJZMJqtRIUFIS7u3tdD0e4CAkKCiImJgaTyXROgpCESl9mbItJJyY1z/67CEKCIAiCIAiCIAgXH+IMEiqitj4bIghdZujuIA8XpSJmiCAkCIIgCIIgCIIgCJcdIghdRuQUFrNoXyIAY7o3BMQhJAiCIAiCIAiCIAiXIyIIXUb8szeRvCIzTQM9GdQ6GIAsEYQEQRAEQRAEQRAEodq8/vrrdO7c2f77Pffcw6hRo+psPGeLCEKXEXN2xgNwc/dw/DycAXEICYIgCIIgCIIgCLVHXFwc9913Hw0aNMDFxYXGjRvz5JNPkpqaesHHkpKSwsMPP0yjRo1wdXUlJCSEYcOGsWHDhnN63meeeYaVK1fW0ijrDukydhlx7FQOAFe1DMbLVf3rM6TtvCAIgiAIgiAIglALREVF0bt3b1q2bMnvv/9OREQEBw4c4Nlnn2Xx4sVs3rwZf3//Czae0aNHU1RUxI8//kjTpk1JTk5m5cqV5yxOeXl54eXlVUujrDvEIXSZUGy2kJ5XBECQtyu+7sohlG8yU1RsqcuhCYIgCIIgCIIgCBVgtVrJKyqukx+r1VqjsT766KO4uLiwbNkyBgwYQKNGjbjmmmtYsWIFJ0+e5KWXXgKgSZMmvPXWW9x+++14enoSFhbGl19+6fBcGRkZPPDAAwQFBeHj48OgQYPYs2ePfb1etvXzzz/TpEkTfH19ue2228jOzrY/ft26dXzwwQcMHDiQxo0b07NnTyZOnMgNN9xgfx5N05g6dSrXXXcdHh4etGnThk2bNnH8+HGuuuoqPD096dOnD5GRkWVeuyIsFgvvvfceERERuLu706lTJ2bPnl2j9/JCIA6hy4S0vCKsVtA0qOfhjEHT0DSwWlXZWJC3a10PURAEQRAEQRAEQTiDfJOZtq8urZPXPvjmMDxcqicbpKWlsXTpUt555x3c3d0d1oWEhHDHHXfw559/8tVXXwHw0Ucf8eKLL/LGG2+wdOlSnnzySVq2bMnQoUMBuOWWW3B3d2fx4sX4+voydepUBg8ezNGjR+0uo8jISObNm8fChQtJT09nzJgxvP/++7zzzjt2F8+8efO44oorcHWt+Jr3rbfeYvLkyUyePJnnn3+esWPH0rRpUyZOnEijRo247777eOyxx1i8eHG13ov33nuPX375hW+++YYWLVqwdu1a7rzzToKCghgwYEC1nuNCIA6hy4TUHOUO8vdwwclowGDQ8LaVjUmOkCAIgiAIgiAIgnAuHDt2DKvVSps2bcpd36ZNG9LT00lJSQHgyiuv5IUXXqBly5Y8/vjj3HzzzUyZMgWA9evXs3XrVmbNmkX37t1p0aIFH3/8MX5+fg5OG4vFwowZM2jfvj39+vXjrrvusmf7ODk5MWPGDH788Uf8/Py48sorefHFF9m7d2+Zsd17772MGTOGli1b8vzzzxMTE8Mdd9zBsGHDaNOmDU8++SSrV6+u1vtQWFjIu+++y/Tp0xk2bBhNmzblnnvu4c4772Tq1Kk1eUvPO+IQukzQBaEALxf7Mj8PF7IKisnML6qrYQmCIAiCIAiCIAiV4O5s5OCbw+rstWtKdcvMevfuXeb3Tz75BIA9e/aQk5NDQECAwzb5+fkOpVtNmjTB29vb/ntoaCinTp2y/z569GiuvfZa1q1bx+bNm1m8eDEffvgh06ZN45577rFv17FjR/v9+vXrA9ChQweHZQUFBWRlZeHj41Pp33X8+HHy8vLsTiedoqIiunTpUuljLzQiCF0mpOYWAhDgWWKT03OExCEkCIIgCIIgCIJwcaJpWrXLtuqS5s2bo2kahw4d4sYbbyyz/tChQ9SrV4+goKAqnysnJ4fQ0NByXTl+fn72+87Ozg7rNE3DYnHMyHVzc2Po0KEMHTqUV155hQceeIDXXnvNQRAq/TyaplW47MznrmjsAIsWLSIsLMxhXWVla3XBxf+pEmqFlGybIFTKISSCkCAIgiAIgiAIglAbBAQEMHToUL766iuefvpphxyhpKQkfv31V8aNG2cXVzZv3uzw+M2bN9vLzbp27UpSUhJOTk40adKkVsfZtm1b5s2bV6vPeebzu7q6Ehsbe1HlBZWHCEKXCam5qiws0Ksch5C0nhcEQRAEQRAEQRDOkS+++II+ffowbNgw3n77bYe282FhYbzzzjv2bTds2MCHH37IqFGjWL58ObNmzWLRokUADBkyhN69ezNq1Cg+/PBDWrZsSUJCAosWLeLGG2+ke/fuVY4lNTWVW265hfvuu4+OHTvi7e3N9u3b+fDDDxk5cuR5ew+8vb155plnePrpp7FYLPTt25fMzEw2bNiAj48Pd99993l77ZoigtBlQmqOcggFlnYIeShBKEMcQoIgCIIgCIIgCMI50qJFC7Zv385rr73GmDFjSEtLIyQkhFGjRvHaa6/Zu4MB/O9//2P79u288cYb+Pj4MHnyZIYNU1lJmqbxzz//8NJLL3HvvfeSkpJCSEgI/fv3t2f8VIWXlxe9evViypQpREZGYjKZaNiwIePHj+fFF188L3+/zltvvUVQUBDvvfceUVFR+Pn50bVr1/P+ujVFs1Y38ek/QlZWFr6+vmRmZlYZBvVf4v4Z21h5+BTv3dSB23s2AuCDJYf5enUk917ZhNeub1fHIxQEQRAEQRAEQRAKCgqIjo4mIiICNze3uh7OeaFJkyY89dRTPPXUU3U9lEuSyj4jNdE8pO38ZcJpW8lYgKdkCAmCIAiCIAiCIAjC5Y4IQpcJp22h0oHeZTOEskQQEgRBEARBEARBEITLCskQugywWq32tvOBpdrO+9kEoQwJlRYEQRAEQRAEQRAuEDExMXU9BIE6dgitXbuW66+/ngYNGqBpWrVav61evZquXbvi6upK8+bNmTFjxnkf56VOXpGZApMFkLbzgiAIgiAIgiAIgiDUsSCUm5tLp06d+PLLL6u1fXR0NNdeey0DBw5k9+7dPPXUUzzwwAMsXbr0PI/00iY1R+UHuTkb8HAx2pf7iCAkCIIgCIIgCIIgCJcldVoyds0113DNNddUe/tvvvmGiIgIJk2aBECbNm1Yv349U6ZMsbenE8py2lYuFuDpiqZp9uW6Q0jazguCIAiCIAiCIAjC5cUlFSq9adMmhgwZ4rBs2LBhbNq0qcLHFBYWkpWV5fBzuVFeoDSAn4cShIqKLRSYzBd8XIIgCIIgCIIgCIIg1A21Kgjl5eXV5tOVISkpifr16zssq1+/PllZWeTn55f7mPfeew9fX1/7T8OGDc/rGC9GUm0t5wNLtZwH8HJ1wmhQjiEpGxMEQRAEQRAEQRCEy4caC0KDBw/m5MmTZZZv3bqVzp0718aYapWJEyeSmZlp/4mLi6vrIV1wUnNsJWNejoKQpmn4uKmqQRGEBEEQBEEQBEEQhP86r7/+ep1rF1dddRVPPfVUnY4BzkIQcnNzo2PHjvz5558AWCwWXn/9dfr27cuIESNqfYClCQkJITk52WFZcnIyPj4+uLu7l/sYV1dXfHx8HH4uN07bQqUDvFzLrPPzUCKRtJ4XBEEQBEEQBEEQzoWUlBQefvhhGjVqhKurKyEhIQwbNowNGzbU9dBqxD333IOmafafgIAAhg8fzt69e2vl+efOnctbb71VK891LtQ4VHrRokV8+eWX3HfffcyfP5+YmBhOnDjBwoULufrqq8/HGO307t2bf/75x2HZ8uXL6d2793l93Usde8lYOYKQdBoTBEEQBEEQBEEQaoPRo0dTVFTEjz/+SNOmTUlOTmblypWkpqbW9dBqzPDhw/nhhx8AFV/z8ssvc9111xEbG1vhY0wmE87OzlU+t7+/f62N81w4qwyhRx99lCeeeII//viD7du3M2vWrLMSg3Jycti9eze7d+8GVFv53bt329/giRMnMm7cOPv2Dz30EFFRUTz33HMcPnyYr776ipkzZ/L000+fzZ9x2WAPlT6jZAxKOo2JICQIgiAIgiAIgiCcLRkZGaxbt44PPviAgQMH0rhxY3r27MnEiRO54YYbABVb8vXXX3PNNdfg7u5O06ZNmT17tsPzxMXFMWbMGPz8/PD392fkyJHExMQ4bDNt2jTatGmDm5sbrVu35quvvnJYHx8fz+23346/vz+enp50796dLVu2OGzz888/06RJE3x9fbntttvIzs52WK87nEJCQujcuTMvvPACcXFxpKSkABATE4Omafz5558MGDAANzc3fv31V1JTU7n99tsJCwvDw8ODDh068Pvvvzs895klY02aNOHdd9/lvvvuw9vbm0aNGvHtt9/W+H9QU2osCKWnpzN69Gi+/vprpk6dypgxY7j66qvL/AOqw/bt2+nSpQtdunQBYMKECXTp0oVXX30VgMTERAf1LSIigkWLFrF8+XI6derEpEmTmDZtmrScr4LUUm3nz0QEIUEQBEEQBEEQhIsYqxWKcuvmx2qt9jC9vLzw8vJi3rx5FBYWVrjdK6+8wujRo9mzZw933HEHt912G4cOHQKUw2bYsGF4e3uzbt06NmzYgJeXF8OHD6eoSFW+/Prrr7z66qu88847HDp0iHfffZdXXnmFH3/8EVDGkwEDBnDy5EkWLFjAnj17eO6557BYLPYxREZGMm/ePBYuXMjChQtZs2YN77//foVjzsnJ4ZdffqF58+YEBAQ4rHvhhRd48sknOXToEMOGDaOgoIBu3bqxaNEi9u/fz4MPPshdd93F1q1bK33/Jk2aRPfu3dm1axePPPIIDz/8MEeOHKn8TT9Halwy1r59eyIiIti1axcRERGMHz+eP//8k0ceeYRFixaxaNGiaj/XVVddhbWSD9iMGTPKfcyuXbtqOuzLmlR7hlBZh5CfLgjlFV3QMQmCIAiCIAiCIAjVwJQH7zaom9d+MQFcPKu1qZOTEzNmzGD8+PF88803dO3alQEDBnDbbbfRsWNH+3a33HILDzzwAABvvfUWy5cv5/PPP+err77izz//xGKxMG3aNDRNdcT+4Ycf8PPzY/Xq1Vx99dW89tprTJo0iZtuuglQxpGDBw8ydepU7r77bn777TdSUlLYtm2bvTSrefPmDmO1WCzMmDEDb29vAO666y5WrlzJO++8Y99m4cKFeHl5AZCbm0toaCgLFy7EYHD01Tz11FP2seg888wz9vuPP/44S5cuZebMmfTs2bPC92/EiBE88sgjADz//PNMmTKFVatW0apVq6re+rOmxg6hhx56iLVr1xIREWFfduutt7Jnzx67YidcPJgtVtLyKhaExCEkCIIgCIIgCIIg1AajR48mISGBBQsWMHz4cFavXk3Xrl0dzB5nZgD37t3b7hDas2cPx48fx9vb2+448vf3p6CggMjISHJzc4mMjOT++++3r/fy8uLtt98mMjISgN27d9OlS5dKc3qaNGliF4MAQkNDOXXqlMM2AwcOtEfcbN26lWHDhnHNNddw4sQJh+26d+/u8LvZbOatt96iQ4cO+Pv74+XlxdKlSyvNHgIcRDNN0wgJCSkzptqmxg6hV155pdzl4eHhLF++/JwHJNQu6XlFWK2gaeDvIYKQIAiCIAiCIAjCJYWzh3Lq1NVr1xA3NzeGDh3K0KFDeeWVV3jggQd47bXXuOeee6p8bE5ODt26dePXX38tsy4oKIicnBwAvvvuO3r16uWw3mg0AlTYgbw0ZwY/a5rmUFIG4Onp6eAsmjZtGr6+vnz33Xe8/fbbDtuV5qOPPuLTTz/lk08+oUOHDnh6evLUU09VaaCpzphqmxoLQmvXrq10ff/+/c96MELtczpH1W7W83DByVjWECaCkCAIgiAIgiAIwkWMplW7bOtipG3btsybN8/+++bNmx2aR23evNmeK9y1a1f+/PNPgoOD8fHxKfNcvr6+NGjQgKioKO64445yX69jx45MmzaNtLS0Wu3mpWkaBoOB/Pz8SrfbsGEDI0eO5M477wRUedrRo0dp27ZtrY2ltqixIHTVVVeVWabX9oGyRwkXD/b8IM+y7iAAXw8lCGWIICQIgiAIgiAIgiCcJampqdxyyy3cd999dOzYEW9vb7Zv386HH37IyJEj7dvNmjWL7t2707dvX3799Ve2bt3K999/D8Add9zBRx99xMiRI3nzzTcJDw/nxIkTzJ07l+eee47w8HDeeOMNnnjiCXx9fRk+fDiFhYVs376d9PR0JkyYwO233867777LqFGjeO+99wgNDWXXrl00aNCgTLlaZRQWFpKUlASo5lpffPEFOTk5XH/99ZU+rkWLFsyePZuNGzdSr149Jk+eTHJy8n9DEEpPT3f43WQysWvXLl555RWHACbh4kB3CJWXHwTiEBIEQRAEQRAEQRDOHS8vL3r16sWUKVOIjIzEZDLRsGFDxo8fz4svvmjf7o033uCPP/7gkUceITQ0lN9//90ulnh4eLB27Vqef/55brrpJrKzswkLC2Pw4MF2x9ADDzyAh4cHH330Ec8++yyenp506NDB3sbdxcWFZcuW8b///Y8RI0ZQXFxM27Zt+fLLL2v09yxZsoTQ0FAAvL29ad26NbNmzSrXJFOal19+maioKIYNG4aHhwcPPvggo0aNIjMzs0avfyHQrJW1+aoBa9asYcKECezYsaM2nu68kZWVha+vL5mZmeVa0P5rTF8fzZsLD3Jdx1C+GNu1zPpDiVlc8+k6Ar1c2P7y0DoYoSAIgiAIgiAIgqBTUFBAdHQ0ERERuLm51fVwahVN0/jrr78YNWpUXQ/lkqayz0hNNI8adxmriPr163PkyJHaejqhlkjNVQ6hQC/XcteXdgjVkjYoCIIgCIIgCIIgCMJFTo1Lxvbu3evwu9VqJTExkffff5/OnTvX1riEWuJ0duUZQn62DCGT2UpekRlP1xp/JARBEARBEARBEARBuMSo8dV/586d0TStjJvkiiuuYPr06bU2MKF20B1CARU4hNydjTgbNUxmK5n5JhGEBEEQBEEQBEEQhPOCVKVcXNT46j86Otrhd4PBQFBQ0H+utvG/wmlbl7HACkKlNU3D192Z0zlFZOabaODnfiGHJwiCIAiCIAiCIAhCHVBjQahx48bnYxzCeaIqhxCAj00QysiTTmOCIAiCIAiCIAiCcDlQLUHos88+q/YTPvHEE2c9GKH2Sa3CIQTgJ63nBUEQBEEQBEEQLiqkvEqoiNr6bFRLEJoyZUq1nkzTNBGELiIKTcVEmCLJ0tzx86hYENI7jWWJICQIgiAIgiAIglCnGI1GAIqKinB3l0gPoSxFRcr4oX9WzpZqCUJn5gYJFzknd8Ke33E6tJBFrglkWT3wZDTgW+7mvuIQEgRBEARBEARBuChwcnLCw8ODlJQUnJ2dMRgMdT0k4SLCYrGQkpKCh4cHTk7n1hSq2o+2WCzyQbwUyDkF0waD1YKuFfpoeXByOzQfXO5DdPfQaVvekCAIgiAIgiAIglA3aJpGaGgo0dHRnDhxoq6HI1yEGAwGGjVqhKZp5/Q81RaEnJ2dSUxMJDg4GIBnn32WiRMn4u/vf04DEGqZjDiwWsDNjxMDJrPvn2+5zrgZ4rdVKAiF2TqLJWQUXMiRCoIgCIIgCIIgCOXg4uJCixYt7KVBglAaFxeXWjHsVFsQOjO0aOrUqTz88MMiCF1sFGSoW9+GJIcMZLNlnRKE4rZU+JDwekoQik/PuwADFAThP4PFDFYrGM/NqioIgiAIgiCUxWAw4ObmVtfDEP7DnPVZvCSeX6QUZKpbN19yCk3stLRQv8dvB4sFylERw2yC0Mn0/As1SkEQLnUKc+Dr3mqf0+Uu6PEA+EfU9agEQRAEQRAEQagmEgr0X0N3CLn7kVNo5oi1IQWaGxRmQcrhch8SXs8DgFPZhRSYzBdooIIgXNLEbYaMWCUIbfoCPusCfz+phGdBEARBEARBEC56auQQevXVV/HwUOJBUVER77zzDr6+jp2rJk+eXHujE2pOKYdQbmExZoxEu7amTcFuiN8K9duWeUg9D2fcnY3km8wkZhYQEeh5YccsCMKlx4lN6rZRb3B2h8h/YccM6DIOwrvV6dAEQRAEQRAEQaiaagtC/fv358iRI/bf+/TpQ1RUlMM255pwLdQC+Rnq1s2XnIJiAOI82ytBKG4rdLunzEM0TSO8njvHTuUQn54ngpAgCFUTu1nddrodut0Nv98OR/6BmLUiCAmCIAiCIAjCJUC1BaHVq1efx2EItYbdIeRHTqEShJJ8OkIqShCqgDCbICQ5QoIgVElxEZzcru436q1uI/orQSh6HfR9uu7GJgiCIAiCIAhCtZAMof8aeoaQm69dEEqr10ktSz0GeWnlPqyk05gIQoIgVEHibiguAI8ACLQF1zfpp25jN4PZVGdDE4QLQl4abP4aEnbX9UgEQRAEQRDOGhGE/mvoDiF3P3JtgpCTVwAE6N3GtpX7sDA/lQ11MkMEIUEQqiC2VH6QXioc3Bbc/cGUCyd31t3YBOF8UpgDaz+CTzvDkhdgobjhBEEQBEG4dBFB6L+GQ9t5JQh5ujpBw15qedyWch9W4hDKO+9DFAThEkfPD9LLxQAMBmjSV92PWXt+XvfgfPj7Kcg9fX6eXxAqIzVSddP7920otB1r06PrdkyCIAiCIAjngAhC/zVKh0o7CEI91PIKcoTCbIKQZAgJglApFoujQ6g0Ef3VbfS62n/d5IMw+37Y8QP8fGOJ+C0IF4ot30DuKfBrBNd9opblp0ORTKQIgiAIgnBpIoLQf41SodJ6yZi3qxOE91TLT+4Ac3GZh+kOoaSsAkxmywUZqiAIlyCnj6qLYGcPCO3ouE7PEYrbAsWFJctL3z8bLGZY8BhYbNlESXvh1zFQlHtuzysI1cVqhcP/qPvXfKQ6drp4qd+zE+tsWIIgCIIgCOdCtbqM7d27t9pP2LFjx6o3Es4PVqtDqHR2gSqr8HR1gqDW4OqrbO6nDpa5kAvycsXVyUBhsYWkzAIa+ntc4MELgnBJELtR3YZ3B6Oz47qgVuAZrFwU8duhcR9Y+BTs/BnGzStxENWUzV8pMdvVB0Z/D3MfgLjN8MdYGDsTnFzP5S8ShKpJ3ANZ8eDsCU0HqOwsnwZKIM06CQHN6nqEgiAIgiAINaZaglDnzp3RNA2r1Vruen2dpmmYzeZaHaBQA0x5YLG5f9z9yC0qVTJmMEBIezixoVxBSNM0wvzciTqdS1x6nghCgiCUT3n5QTqapnKEDsyFmHWqtGzHDLVu7cdnJwilRqrMFoCr34aWV8Mdc+CnkRC1Wj1/r/87iz9EEGrA4UXqtvkgcFaO2hJBKKHuxiUIgiAIgnAOVEsQio6W0MRLAj0/yOAEzh7kFipxztvN9m8OaqUEoZTD5T48rJ4ShKT1vCAIFVJRfpBORD8lCG2fDjnJJcuj18CpQxDcpmavt+h/qsV9xADoOk4ta9gDBr0ES19UQdMiCAnnG10Qan1dyTKfMHWbdfLCj0cQBEEQBKEWqJYg1Lhx4/M9DqE2KNVhDE0jp6CUQwggyHYhdqp8QShcgqUFQaiMzJOQEQuaUZWMlUcTmwtIF4N6PaxKbQ79DVu/heumVP/1TPnKBQRw7eSSFvcAbW5QglDsJshJAa+gGv85glAt0qLh1AH1uW9xdclynwbqNlMEIUEQBEEQLk2qJQiVx8GDB4mNjaWoqMhh+Q033HDOgxLOklKB0oXFZops4dBeuiAU3Frdphwq9+Hh9VSZmDiEBEEol2hbO/nQjuDqXf42Ac3Id6uPe0Ey5uZXYxz2DpzYqAShPX/A4FfBvV71Xi8tCrAqkfvMjBa/hhDaGRJ3w5F/oNvdZ/lHCUIVHLGFSTfuAx7+Jct1QUhKxgRBEARBuESpsSAUFRXFjTfeyL59+xxyhTTbzK1kCNUhpQKl9XIxAE8Xo7qjO4TST6g2uS6OOUFhfjaHUIa00BUEoRwiV6rbZoMq3OSrNZFsz7qLPoYDWOpP5EGDUeUKBbdTLotdv0Kfx6r3eqnH1W1Ac0d3kE6b65UgdHihCELC+aO8cjGQkjFBEARBEC55atx2/sknnyQiIoJTp07h4eHBgQMHWLt2Ld27d2f16tXnYYhCtdEdQu4lLefdnA04GW3/Zq8g8AgArCoI8wz0kjFxCAmCUAaLBSL/VfebDS6z2mq18uGSw3y45Aj/WrrydvFdfL4hiYy8IiXm9HpQbbj1W9VGvjqcPqZuA1qUv77N9eo2anXJ/k8QapPc1JLcrNYjHNeJQ0gQBEEQhEucGgtCmzZt4s033yQwMBCDwYDBYKBv37689957PPHEE+djjEJ10UOl3XzJtuUHebme2RZaLxsrmyMUZhOEkjILKLaVmwmCIADKiZOXCi7e0LBnmdXvLznMV6sjAXh+eGtah3iTXVDMN2ui1AYdxoCbH2ScgGPLqveaqer5CGhe/vqgVhDYEsxFcGx5zf4eQagOR5eA1QIhHcCvkeM63SGUdxpMBRd+bIIgCIIgCOdIjQUhs9mMt7fKjggMDCQhQc2MNW7cmCNHjtTu6ISaUSpUWm857+VqdNxGF4ROlc0RCvZ2w9moUWyxkpxdeD5HKgjCpYZeLtZ0ABgdhebMPBPfrVXCz1uj2vPwVc145upWAMzYGM2prAJVotrpNvWA4yur95p6yVhgBYIQlJTxHPq7es8pCDVBdweVDpPWca8HTmoi5dGpi5gwc/eFG5cgCIIgCEItUGNBqH379uzZsweAXr168eGHH7JhwwbefPNNmjZtWusDFGqAPUPIjxxbyZiX2xkxUXrL53IcQkaDRqivdBoTBKEcjuvlYmXzgzZFpWKxQrMgT+66QnWlHNwmmK6N/CgwWfj8X13YsZV+VbfEJlUvGatEENLLxo4tV13JBKE20cur67cru07T7GVjKSejmbvzJLGpksEnCIIgCMKlQ40FoZdffhmLRZUTvfnmm0RHR9OvXz/++ecfPvvss1ofoFADSjmE7C3nXc4QhCpxCEHpHCE5qRXOoDAb/rgDNnxa1yO5MBTlwcbPITuprkdS9xRkQvxWdb952fygDcdPA3Bl80D7Mk3TeHaY2t/8vjVWXSjXJIQ3Lw3y09V9/0omGxp0AZ9wMOVC5Kqqn1cQqovVCik253Ngy3I3MXuHAhCqpQKw4lDyBRmaIAiCIAiXJov3JfLt2kh7c666psaC0LBhw7jpppsAaN68OYcPH+b06dOcOnWKQYMq7jwjXADKCZW2t5zX0QWhjFgoyi3zFPZOY/9lh5DVqn6EmrF9uurmtG5SXY/kwrD7V1j2Mix4vK5HUvdErwVLsXLq1GtSZvWGSCUI9WkW6LC8d7MA+rUIpNhi5e1FB0tCeLMTq35NPVDaJxxcPCveTtOgja1sbM9vVT+vIFSXvFSb81ar0KV2vMAXgFAtDRBBSBAEQRCEiknPLeLJP3bz7j+H2RKdVtfDAWooCJlMJpycnNi/f7/Dcn9/f3vbeaEOKRUqXWHJWJWdxlQr+rj/mkMoOwm2fgczx8FHzdSPdIapPsVFsPlrdb8g8/Lo6JQRq26Pr4ScU3U7lrpGz/wpp7tYYmY+USm5GDTo3TSgzPpXrmuLk0Fj2cFk1p9yUQtzTqnPVGXYW843q3p83e5Rt4cWQlpU1dsLQnXQj5F+jcDZvczqnMJi1ierz/SwRso5vSU6jcw80wUboiAIgiAIlw7zd5+kyNa8admBi2MSqUaCkLOzM40aNcJsrmbLYOHCUrpkzCYIeZ7pEAIIsuUInSqbI9QsWM3EH07KPi9DrBOsVpg2BP55Bg7OV7O+ealwfEVdj+zSYd8sR1dHRlzdjeVCkadKQLCaYf/cuh1LXWK1lgRKl1supt6nDmG++Ho4l1nfsr43917ZBICXlyZgNboAVsipohRPzw8KrKDlfGmC20Dzoep5deFSEM4VXRCqoFzsp00xRJv8AOjkk0vL+l6YLVZWH73MBWRBEARBEMpl5vZ4+/2lB5IuirKxGpeMvfTSS7z44oukpV0cFiehFPZQ6Xr2kjHv8gShYL31fNkcoY5hfgAcSsyisPg/IvxlJUBmHGhGGPgytLtRLU/YVbfjulSwWlWWTmkyLwNBKDel5P7eP+tuHHVNaqRySxldoEnfMqs32vKD+jQPLLNO58khLQn2diUmrYAs5yC1sCqHnt0hVEmgdGn6PKZud/2i8ocE4VzRyxbLEYRyC4v5bm0USVZ/AAzZCQxpUx+AFYdEEBIEQRAEwZH9JzM5mJiFi9GAm7OBkxn5HEzMquth1VwQ+uKLL1i7di0NGjSgVatWdO3a1eFHqEOq7RDSBaEjZVY19HfHz8MZk9nK0aSc8zXSC4seoB3QHAY8W9KVKGF3nQ3pkuLYciUeunhBRH+17HJwCOWeLrmfsBNOH6+7sdQlJzao24a9ymT5WK1We35Q30oEIS9XJ166VjkTj+V7AZCWFFP5rIj+fgdUwyEEEDEAQjqAKU/lXQnCuWJ3CJX9DK46cor0PFNJLlZWAkPaKkFo9ZFTmGx2cEEQBEEQBIBZ29X109Xt6jOgpZogXXoRlI2VoxZUzsiRIyUv6GLEYoZCm8Lo7kdOoSr5qlQQKqfTmKZpdAjzZd2x0+yJz6BDuO/5GvGFQ3dCBdtK5Rp0UbfJ+1WOiZNL3YzrUmGjrXtgt3vAalEBw5mxdTqkC4IuCHnVh5xk2DcTBr5Yt2OqDKtV/W8adAY331KLrfyxLY603CJ6NPGnY7gvbs7G6j+v3mWtnE5fkSk5JGcV4uJkoFvjepU+zQ2dGvDbllgS4v3BCF/OX8vcJf68en1bbuwS7rixxVySBVSNDKGiYgv/Hk6mU9sHCE16ErZMhT6Pg5Nrtf5EQSiXSjqMbY9RHfBatWgF+4CcU3QO9SDQy4XTOUVsi06r1DUnCIIgCMLlQ9HeuaTtOgJ0ZEz3hqRkF7L0QDLLDiQxYWj5pekXihoLQq+//vp5GIZwzpQO+XXzJadAXUyVXzJmE0YyTqhOY2fM+ncMV4LQvvj/SHDwqTMEoXoR6oK5IFOJRaGd6m5sFzuJeyBmHRic4IqH4dDfannGZSAI5dkEoR7jYdXbqmzsqomqq9XFyI4fYOHT0P0+uG6KffG/h08xce4+++8uRgND2gbz+e1dMRqq8bfk2spfvILLrNLzg7o3rlelyKRpGp/f3oVDPzWG05sIM6STnmfii3+PlxWEMuPBXKjK1PwaVTnEl+ftY+b2eJyoxwY3f+rnniJlw88EDXig6r9PEMrDlF+ynytHENpq6wzSpnlTOOgC5iIMOUkMah3MzO3xLD+ULIKQIAiCIAhw+jguc+9lstXIMZ/pXNk8kOwCE0aDxuGkbGJT82gU4FFnw6txyVjTpk1JTU0tszwjI4OmTcvOIAsXCF0QcvYEozO5hSr/p1yHkGcgeNhOVMspG+tgyxHae/I/KghpGoR2VvclR6hy4rep26YDwTccfBuq3//rJWNFuar0CKDrOPW9So8peT8uRvQyqZM77IuKzRbeX6zC41uHeBPk7UqR2cI/+5LYX93vt95hzbM8QUiJZldW88I32MeNAd2UAHtnW2ecDBqRKbnEnM513FAPlPZvCobKhab1x04zc3s8mgZOzi58bxoGQOGaSViKpduTcJakRgJWcPNTx8xSZBWYOJykHLk9IgIcy8bsOULJF0VQpCAIgiAIdczePwBw1sw81iQeo0HDz8OFXhEqh3DZwSoarZxnaiwIxcTElNtlrLCwkPj4+HIeUTVffvklTZo0wc3NjV69erF169YKt50xYwaapjn8uLm5ndXr/qewB0qrUpEK287r6OJIXNn3uqOtTOxocjYFpks8WNpigRRbNzW9uxqoshqQHKGq0IUfvVzIzyYI/ddDpfVyMaOrcsbouVN7/qi7MVVG4l5IsrmAUiNV+RgwZ2c8x07l4OfhzJ//15utLw5mcGsl7GyOKivsl4v+XpxxUVxstrDJ9hzVFYQA+8WzS14SPW0HwpWHzwjhTY1Ut1UESucVFfPC3L0AjLuiMXtfG8bVd79IutWbcEsCh5Z9X/1xCUJpSncYO8MVuCs2A4sVGvl7UN/HDXzC1Iqsk/RtEYirk4G4tHz2/VcmVQRBEARBODssFsy7S64frnLab78/rF0IoLqN1SXVFoQWLFjAggULAFi6dKn99wULFvDXX3/x1ltvERERUeMB/Pnnn0yYMIHXXnuNnTt30qlTJ4YNG8apUxV36fDx8SExMdH+c+LEiRq/7n+OUoHSUEoQcq1gdl2/wN31i/3iUSfU141AL1fMFisHEuo++fycyIxVTg+ji2MGip4jJA6hysm0iby6EKQ7hHJTVEnFfxW7CBKkLgbb36R+j/y37sZUGbt/K7lflAM5yeQVFTN5ubqofWxgc3zdndE0jd7NAgDYEl3NTlwVlIztT8giu6AYbzcnOoTVIGvMfvGcwCCbOPXv4TMC9fTuTlUIQpOWHSU+PZ8wP3eeHd4aFycD3Vs2YlejcerhOz4Bs7iEhLOgkg5j22zfne5NbLlZpRxCHi5OXG07wZu78+R5H6YgCIIgCBcxsZswZpVMpHvFrbFfew+1NaPYfiKdGRui2RefWWFTivj0vPPWsKLagtCoUaMYNWoUmqZx9913238fNWoUt912G8uXL2fSpEk1HsDkyZMZP3489957L23btuWbb77Bw8OD6dMr7hKjaRohISH2n/r169f4df9z5GeoW3c/AHvb+XJLxgA63KLcD8n7yogimqbZXUL74jPOw2AvIHq5WGBLMJZ6L/SSsVMHVbC0UD66E0gXgtzrgYu3ul+dsjGLuer24hcjen6QpxJPCO+hbtOjIT+9bsZUEcVFKvAaQLPt0lOPM319NMlZhYTXc+eu3o3tm1/RVP1N26LTKK7OgSU3Rd16Bjks1svFejcNqF4WkY5PqLrNTmRwa/WcW6LSyCooJdxUo+X8rth0ftgQDcA7N7bHq9S+ru3ICaRafQgxJxK7WjqOCWdBJR3GtsUoQahnE+VwKy0IAYzuqkTP+btPUlQs3cYEQRAE4bLFVi42z9wHk8EVshPs1SsN/Nzp0sgPqxVe//sg13+xnh7vrGB7jOOk7aojp+j34SrunLaFwuLar96ptiBksViwWCw0atSIU6dO2X+3WCwUFhZy5MgRrrvuuhq9eFFRETt27GDIkCElAzIYGDJkCJs2barwcTk5OTRu3JiGDRsycuRIDhw4UOG2hYWFZGVlOfz8JznDIZRtdwhVIAh5+EPbG9T9nT+VWa3P+F/yOUKnDqrb4DaOy+s1UdkQ5qKSbYSyZJwhCGlaqbKxagRLb/wcJreBA/POy/DOG2eKIB7+KowcLj5X2dElkJcKXiGq9TqQm3iEb9aoYPlnh7XC1anEKdgm1AdvNyeyC4s5mFjF/rC4sGTfUoEgVKNyMVBd2zQDWIqJcMujaZAnxRYr646eLtlGF4TKuRgHiD6dy4M/78BihRu7hHFVK0f3UkhgIOtD7lLD3jRJRF+h5pQuGStFUbGF3XEZAHS3C0IlJWMA/VoEEeztSnqeiVVHKnY7C4IgCILwH8ZUgNV2DfSHeRB5IT3V8uMr7Zt8dlsXnhzcggEtg/BxcyIjz8TL8/ZjtigXkcVi5f1/DmO1Knf/i3P313pGYY0zhKKjowkMrJ3OGadPn8ZsNpdx+NSvX5+kpPJr6Vq1asX06dOZP38+v/zyCxaLhT59+lSYX/Tee+/h6+tr/2nYsGGtjP2iwy4I+WEyW+yzkhUKQqDCcgH2zVYhuqXQHUJ7L/VOY6ds+UFnCkKaVipH6CK7wL9YKC6C7ER136/U96YmwdJxW9Ttvlm1O7bzjV4y5lFqX6eXGZ7ceeHHUxm7f1W3nW6DoFYAJETuJ6ewmBbBXlzfsYHD5kaDZg+xqzJHSBfGDE7KHWajwGRm+wnllKqxIGR0VqIQQNZJe6bRSr1szJRf4kwrxyEUl5bH2O82k5JdSOsQb16/vl25L9Pmuqc4ZfUjoDiZ0+vFJSTUAIulwpKxfSczKSy24O/pQrMgW4fOMxxCRoPGjV2USDRnx9llKwqCIAiCcIlzdDFaYRbx1kAOu3TAu51qfFI6gqKhvwdPD23Jj/f1ZO1zA/Fxc+JwUjZzd6rzh7/3JnAkORsPFyNGg8acnfF8vSayVodZY0HoiSee4LPPPiuz/IsvvuCpp56qjTFVSu/evRk3bhydO3dmwIABzJ07l6CgIKZOnVru9hMnTiQzM9P+Exf3Hw3DLRUqrZeLQSUlYwCN+yrXQ1F2GQdHB5sgFJmSY88juiTRS8aC2pRdp1/gJ+6+YMO5pMhOAKzg5OboDqlJsHS2TdiNWafKxy4V7A6hUmJHWFd1ezEJiNnJcGy5ut/lTruAYjmtHDa9mwVgKKecSy8b2xJVRY5QaadUqWDdHSfSKSq2UN/HteSiuCZ428rGshIY1FqJQ6uPpKjZED1Q2s0PPAIcHpaQkc/YaZtJzCygWZAnvzzQC18P53JfomXDYJb5jwXAuunLmo9RuHzJiofifDA4KzdpKXQbd/fG9dD074RdECrJDBrdLRxQNu+0XHGoCYIgCMJlx54/AZhv7kPflsEYWtiqok5sKDeL1c/DhccGqXP5ycuPkl1gsueBPjqwOa9f3xaAD5ccYfG+xFobZo0FoTlz5nDllVeWWd6nTx9mz55do+cKDAzEaDSSnOwYKJqcnExISEi1nsPZ2ZkuXbpw/Pjxcte7urri4+Pj8POfpFTJWHaBEnBcnQw4Gyv5FxsMJS6hM8rGgr3dCPV1w2qFA5dq2Zi5uMT2X8ohtPRAErd9u4k3drgAcHDHGt5eeJBTWQV1McqLF3u5WLhjlx2/Rrb11SgZy7F9twsyIXFPmdWpOYXM332Sl/7aV6s7tnMmz+ac8SzHIXQxCUL7Z4PVDOE9VXlVQDMAPHNiAGjfoPywZ10Q2hqdZreklktO+flB6/VysWaBJRfFNUG/gM5OpHuTevi4OZGWW8TuuHTH/KAznvv5OXuJS8uncYAHv42/gkAv10pfpuXVD2K2agQVxrL7gJSGCtVEP24ENHPMnqMkP6iHXi4GqmGBZlCOStt+s2V9bzqE+WIyW1mwW8KlBUEQBOGyIicFjqtJ27nmfireIKg1eDeA4gI4sbHch43r3YQwP3cSMwu4Y9oWTqTmEejlwr1XNuGu3k24p08TAJ6ZtYcTqbnlPkdNqbEglJqaiq9v2YsMHx8fTp8+Xc4jKsbFxYVu3bqxcmVJHZ3FYmHlypX07t27Ws9hNpvZt28foaGhNXrt/xylQqVzi6rIDypN57GgGSFuc4mbxoaeI3TJts5NjwZzITh7gJ8K1T2anM1jv+1kc1QayzPURWlzayw/rT9K3w9X8er8/aTLbK7izEBpneqWjFksJYIQQPQa+93UnELGfreZ7u+s4Mk/dvPrlliem70XS2XixIWkvCDl0E6AplwA2cnlPuyCo7eab2mzoNocQvWLT2LAQvsKun855AhV1kmwgg5jG882P0inVOaKs9HAAFsG0IpDpyoMlC4wme0lbt/e1V21+66Cnm0iSHBXJT/z5810DK4WhIqwl4s5ZlhZLFZ7qWSPiFKCkHs9aNhL3T+6xL5YD5eevTOevKJL2Gl7uRC5CnbMqOtRCIIgCP8FdswASzG7Lc2ItIYxoKXNbd98kFpfQediN2cj/7tanbvq0S2PDWyOh4u6rn/52jb0bOJPbpGZJ//YXSudx2osCDVv3pwlS5aUWb548WKaNm1aziMqZ8KECXz33Xf8+OOPHDp0iIcffpjc3FzuvfdeAMaNG8fEiRPt27/55pssW7aMqKgodu7cyZ133smJEyd44IEHavza/ylKOYRybA4hL7dqCELeIdByuLr/4w1weJF9lZ4jtOdSzRGyl4u1AoMBs8XKc7P3YjJb6dcikA/vvxazqx8umpmbQ1MpKrbw06YTjJm6iaRMcQs5OIRKY3MIFabGsOZoSsWPz08DS6mLoKgSQeiLVcfZGJmK1QqtQ7xxMRrILiwmupaU7nOmdNt5HVfvkjyRi8UllGnLJ9FdWz7hWI2uOGMmwimVFvW9yn2Y0aDZOyRVmiNUjjCWmW+yi8RnLwiVlIwBDGmjBKHft8aSFmtz8gQ6CkJ74jIwma0Ee7vSsoK/qzzqdxwMQMv83by+oOIGBIJgp4JA6ciUHDLyTLg5G2jX4Ay3catr1O2Rf+yLbugcRoQxBf/EdbR9dSmtX1nMoEmrmb4+mgJTSQltbmEx22LSRDSqSwqy4I874O8nS4R2QRAEQTgbigth67cATC8eTvswH4K8ba72Zuq8tCJBCGBU5zDahKrzjDA/d27v1ci+zsloYPKtnfB2c2J3XAafrzx2zsOtsSA0YcIEnnvuOV577TXWrFnDmjVrePXVV3nhhRd4+umnazyAW2+9lY8//phXX32Vzp07s3v3bpYsWWIPmo6NjSUxsaSUJD09nfHjx9OmTRtGjBhBVlYWGzdupG3btjV+7f8U9gwhP3vmj6dLNQQhgGFvKwtb7in4YyzMfRDyM+jWWF0wLj2QRFRKznkY9HlGF4SC1WdjxsYYdsdl4O3qxIc3d6RP8yCM4d0AeCf7Rba0+4sB3gkcO5XDLVM3EpuaV1cjvzjQu4j5NXJcbnMIOeUm88D0jfy9p4K28np+kN4KPXYzFBeSnlvEH1uV2DT9nu4seao/7cPUTm/fxSI+lhcqDRdfjpBNULGXYBkMZHuq/08//4xKS0b1srFKBaFySsY2R6VisULTIE9CfKt26ZSL3SGkxj+sXQgdw33JyDMRe2yvWneGQ8juzGjiX6MyNZfmVwHQ23CQuTtPVvx5FQQd/dhxhiC0LUZ9Brs0rFf2u9VqhLqNXqfEBcDfTeMvr/f5yeUD2mtRFJgsRKXk8ubCg/T7cBWTlx1h/E/b6frWcm75ZpMIlnXJ/jlgsk1IJO6t27EIgiAIlzb750DuKdKdAvnH0ourWpZy2je9CtBUl+vs8ptoGQwa79/UgU7hvrx7UweHbsEA4fU8eOfGDoCaZN8aXUUmaBXUWBC67777mDRpEt9//z0DBw5k4MCB/PLLL3z99deMHz/+rAbx2GOPceLECQoLC9myZQu9evWyr1u9ejUzZsyw/z5lyhT7tklJSSxatIguXbqc1ev+pyjlEMotVDOP1XIIgco/eHAN9H1aXbzv/RN+vJ4rQg30axFIUbGF5+dcROU81UVvJx/Umri0PD5eegSAiSPaEOrrrtYNfBFCOqAVF1A/chY/mp7hHt+dxKXlc8vUjRw/lV1Hg78I0N0nZ5aMeQVj0lwwalZCtFT+N3NP+TuiHNtOLqi16ipVnA9xW/l58wnyTWbahvow0FYq1DHcD7hIutpZrZCnO4QcBSFLaGcAtm1cwfifttda7e5ZYbWWhNjqAguQaFT3u3tVfnCw5wjFVJIjpDuESpWM6e3m+56tOwjKdGVyczby2/gr6Nc8gCaoZStTHB0YenZL9yb1qBGNrgDNQBNDMiGk8uQfu3j6z92X93f7UiNxD8RtK7vcbILT5ecHnjUWS4lDJKSDw6pdsUoQ6ta4nM9gYAslYlpMEGkrgz/wF36FakJr9g2urH12IO/e2IEwP3dSsgv57N/jLD+YTKGtK+jifUm1Yv3+z7N3Fsy8G/68C/68ExY+XXK8Olt2/VxyP1mEOUEQakBatKqw2PAp/PMsnNhU1yMS6hKrFTZ9BcCPxcMoxomrWpWqOPDwL8m2Tdhd4dN0aujH/Mf6qlKzcrihUwNu6hqGxQpP/rGLhIyyIdXVpcaCEMDDDz9MfHw8ycnJZGVlERUVxbhx4856EEItULpkrFDlZFQrQ0jH2Q2GvA73LVNugKS9aL+O5r0RTfBwMeAbu4LMyT3g97FQdIk4Z1JUy3lLUBsmzt1HvsnMFU39ua1HKYEjvDv83zr1dzdXye8vBm6gVX1vkrMKefKP3ZeeEFZb6CVjfo6CUJ7JzEmrEhOu8M+lyGxh/E/biTzTRZZjy5/xDoGI/gAUH1/FjI0xAPzfgKZ2p4denrg3PqP2/46aUpSjwt7AQRBasj+Rp9aq8UYUHWX5wSSGTlnLpyuOUVhcBx3U8tJKxulT0lr+SLFyV7Z0PlXpw9s28MHb1YnsgmJ2x2WUv5GeIVTKIaQLQn2a1ZIgZFXfLy9XJ74f0ww/TYlsT6/IJjNP7cvMFis7SjmEaoSbry3/CR5qkojFCn/tOsnQKWv5v5+3s+rwKYrlIvzipTAbfhgB3w91KGmmKFct/6Kbyn6pLdKj1T7A6FrGIaR/Tzo39Cv/sfayscXqc73hU/sqt7QjNArwYGyvRqx65irevbEDQ9oE8+TgFqy83Zs/3d7jCtNmu/ApVEBBJsx/FA7Og0ML4NDfsH06fN1HzcieDUn74eSOkt+T99fKUAVBuAw49Dd81llVWCx/VZUJzXvYfm4jXIZEr4XkfZid3JleMAAfN6ey5w3126nbczzevDmyPU0DPUnMLGDsd5vPukHSWQlCxcXFrFixgrlz52K1feATEhLIybkEy4r+K5QKlc6xOYQqbTlfEQ17wLj5KiTz5A7C/xnHsuAvmeYyiXo5x+DIIjUjV1xYe2M/HxQX2cNpZ8d5sf74aVydDLx/U8eybbg1DRr1ghEfA+CSsJVf72qDl6sTBxKyWHQxdb+6UFgspRxCjhlCf+06SaxZiQFvX+VL54Z+ZOabuPeHbeQXlRJGdBukVwhEDAAg7cAK0nKLCK/nzrUdSoLgdUHoQEJW3V+c6+Vizh7golqqb45K5aFfdrI0LRgTRgK1LEZFWCkqtjBlxVFu/npT2Zl9iwVM5zGLSncHeQaBk6pLtlqt7MhWYl1oceWdjYwGjSFtlXj086aY8jc6o2QsKbOAyJRcDBr0bhpQ/mOqg952vjgf8tPti10yVMv5ZC2IrGJnFu5TbqGjydlkFxTj6WKkdYh3zV+vSV8A7gmNY+HjfRnWrj5WKyw9kMy9M7bR+/1/+WjpYYdcF+ECkxkP314FGz5zXB61Wgk0WGHOA6pc01wMs++D+K1qm4Pza28cejfE+m3B6GxfnFVg4rhN9O7cyK/8x+plY0eXwrHljid6pZo2uDgZGNurEdPu7sHTzZNotuh2erGPCU6zWHmociH3v4DVaiUx8yxnMg//o5pF1GsC105Sx+0GXZVQNPs+mDO+5vtd3R3kr7o0krxfLuYEQage+2ap23oR0P5mNZmQHl2mUY9wGbFZuYMOBl9HFl70bhaA05ll5vXbq9tzdKR6uTrxywO9CPNzJyY1jzumbSE1p+bX6DUWhE6cOEGHDh0YOXIkjz76KCkp6oLhgw8+4JlnnqnxAIRawFSgTpDAMVT6bAQhUKrlXfPA1RfiNhN+eh3FOPFb8SAKNFdlh591r7LrX6wk7QVLMWZXX15epS44X762DU0CPSt+jH+EsvxbzQSe2siD/VVI+qRlRy4/G39uivpMaQaHciSr1cqPG2M4aVWCkGtuAtPu7k6QtyuxaXlsjCzVaVDvMOZdH5oqQcg/Yx9e5DG+X1OHnWNEoBeeLkbyTWYiU+o4WLqc/KBFe5Uo2Ld1OAZbJtWUvmY+v70L3m5O7DuZyeojNvEkNxXWT4HPOsF74ecvb6iccrH49HwOFCrxxsvWer4y7rsyAoCFexPLD1I/I1R62UEl8nUM98PXw7ns9tXF2R3cbU6f7FKCq03ENfmp795fO9XfuN3mmujauF7Zg2p1aKIcasSsp32YL1Pv6s6yp/tz75VN8Pd0ISW7kC9XRfLQLztEFKor1n6svitrPgRTKbFA79pldAVTHvx2G/z1oEM3r/zDK2pvHEm2/JiQjg6L98ZlYrVCQ393Ar1cy39seE/1uS7IgL+fUMua9FO3pw6VFRkOL4JfbrYJXtDGEMfhg3tq6Q+5OLFYrDz2+y56v/fv2eV56S6gTmOhxwPQczzcvwz6P6eOV/tmwtap1X8+UwHs+UPdH/qGeo681BKHqyAIQkWYiyFytbp/03dw8/e2fBjUBPq5UlT++fDmqFSWHUgi+nRuxSX/Qt1w+rjt/ETjJ4tyDV9R3gRqLQlCAA383Pl9/BWE+Lhx7FQO46Zvpai4ZtetNT6zfvLJJ+nevTvp6em4u7vbl994440O7eOFC4geKK0ZwMW7VNt5Y8WPqYoGneHOOeDbCFpcTeLYlbypPch9hf+jWHNRO7r5j4LVSkJGPluj0+xusYuC6LUAbLa0pajYylWtgrjzisZVP675UHV7bDn3940gwNOFmNQ8Zm6vosX6fw3dHeQd6jBLvikqlaPJOSQbbJkyGbEEernau0RtiiwVUFzaIeTXiFzPRjhhYZD7cW7p7ug6Mho02oVdJGVjZ+QHWa1W/j2sLg5u79kIY7gKltYSd3N9pwb2EsTZO+Jg/ScwuQ2seB0yYlWeyP65Dk9fYDIze0c822PSzs0NVY4gtO9kJjFW5b7RMuOqnCnvEO5Lzyb+FFus/Lw5xnGlxVzyXtgyhObvVhdw13UM5Zw5I1gasAtC/o3aYdBUkPSJ1Fx7mG+Ny8V0bDlCpEfbP9st63vz2vXt2DxxMJ/d3gV3ZyOrj6Tw8C876qYE8HImOxl2/6buF2Urdw0ol93RZer+6GkQ1EZlk+2fg1Uz8LzlMUxWI+65cezdt7t2xqLnB4U6CkJ6flCXhpVkWBmdoOUw29+UCJoRrpuiPnv5aY4iQ+S/KgPHXAitr6M4/AoA2mSuuzSbOJyJ1Qq7flX7RHNJ97SPlh2xC+xTVhytWUl2XhpE2coD299UstzoDINegqvfUb8fKdsJt0IOL1TnUD7hyuGlh9lL2ZggCFURvw0KM9VEgN50pLXNKXr4n4ofVx2OLoN3w2DpSw6Lp62L4rZvN/PgzzsY+PFq2r66hId+3uHo0BfqjsN/A2BpNoiFJ5UJoXxByFYylnrMcRLsLGkU4MGv43vh6+7MgYQsxwn6alBjQWjdunW8/PLLuLi4OCxv0qQJJ09WXqIgnCf0/CBXHzAY7F3GvFzPYQYfVPnY0/vgjlk0bNmZyWM6s9HSnvGFT2LRnGDvnyxavozBk9YwZuombv12M/tPXgShwGAXhJbltcTf04UPb+5Yvc5ELVSOEMdX4uli5PFB6uTw0xXHLq+drd5h7IxA6R9t+T8NmtiyNTLUdr1teTKbSnes0h1CNjHhgKvKcbkr6Dge5XTA62gThPbV9WfI7opRf9OxUzmczMjHxclAn+YB0MAWYn9yJwCjuylx69/Dp7Bs/Fxd4IV2UjPYADHr7U+dmlPI7d9t5plZe7j5m010fWs5j/668+xEsEzb/ta3RBDafzKT0/hQYPAErEoAqYL7+iqX0K9bYh0/43lpYLUJVh6BxKXlseNEOpqmguzOGXvr+VLHjdOqdaZHaCt7S/u/dp20O4RqHCit4+YDtkBwYjY4rHJxMnBDpwZMv6cHbs4GVh1J4aGfRRQ6Z/55DqYOUBlAVbHl6xKXK8D+2eo2cRfknsLq4sXpsIFwx0zwVPuTHe1f4c+iPuy0tgBg3pxfOJyUde7j1jtMhXRyWFxlfpCOniMESrQIbKEaN0BJowOAHT+C1QxtR8EtP+LUYTQAVxu3X/plYzkp8OstMP8RWPEazLkPzCZmbo/j69WqLNTFyUBUSq7ddVgtDv0NlmKo30G9r2fS+lp1G7fFoRS1Unb+pG673AEGo70r6QUPlk6NVAJheeHpgiBcnBy3TV40G6T2HwAtrwE0SNjpOOFVE6xW+PdNwAqbvrBPkny7NpK3F6lStKZBnrg6GSgstrDkQBLPzN5zcU3MX65ErwMgPrAv+SYzfh7OtKpfTtSBdwh4BKjzbFvm7bnSLMiL4e1CgJK8z+pSY0HIYrFgNpc9UY6Pj8fb+yyyHYRzR88PclMX1HrJmOe5OITKYUSHUJ4d1opVli4sN6uL4ui1v5JvMqNpsDU6jeu/WM8Lc/aSV1RcxbOdR4oLscSqhP+NlnZ8MLojwd7VbI/duC84uUN2AiQf4PZejQiv586p7EKmb6j64vo/gx4oXSo/KDEzn+UHlcjTu0tntTBTbXdFU+XcOJiYRUZekVqnO4S81c7pH5OaPemUsaLcDKoO9mDpuhaEdIeQKpPS3UG9mwYoIUsXhBL3gNVK6xAf2jXwwdWci0F31NyzSHWw07cryCLmdC6jv97IrtgMvF2d8HV3JqugmEX7Erlj2hZiU2sY1m53CJWIM0pM08j1aqIWpFbdgWlo2/o08vcgI8/E3F2lOvXowpi7PxidWGAr7+jTLIBgn7NsN18ae7B06ZIxdbFIQHNu6qqErp82nSAhswAng1b1xXhl2HKEiFlb7urezQIcRKFfN8ee/Wtd7hTlwvbvIXE3xG6pfNuCLNg2Xd0f8Ly6PbpUCUlHlwKwytSewZ9sIlELgke3wEPr+TClNwCmxqoctYd5N+O+30rM6XMoOc1OUkHqmqFk9g7lEtxlE4S6VJQfpNNskMofA+hjKxsLaq1u9ZM+qxVO2ITJXg8pZ5FtVrm7dpRtB46UPN+ldoIfuUoFPB9fDk5uYHCGg/NJ++E23vhLieiPDWzOg/2USPbV6sjqX8QcsLkt299Y/vp6jSGwlRLaqhM0np8B0WvU/c53qNtatPFXG4sF/npIhWSv+eDCva4gCOeG7ma1NaVJzMxnyHeHOell248cOUuXUNSqErcqwPxHmb58B+/+o44hTwxuwcoJAzj45nB+uq8nzkaNRXsT+fzfWu66KdSM4iKwXX9uKFZdxHpF+JfNrgWVX2sPlq69482VLdRk6vrjqVVs6UiNBaGrr76aTz75xP67pmnk5OTw2muvMWLEiJo+nVAb6CVjNkEo1+4QOssMoUp45Kpm3NQ1jEXFvQC41riFV65tw7rnBjKycwOsVvhjWxzPzKpDpTp+O4biAlKsPjRv242htuDcauHsBhG2zIfjy3F1MjJhqHLDfP7vMY4mXyatqjPLdhhbtFd1aOrRpB4Nm9oucDJPgsVMsLcbzYO9sFphc1SauoixO4TqU2Ay83taSxKt/rgUZZR7kOxkaz1/MDGrbjOb7BlCyuKpC0KDbWVx9s5DBRnKRQOM7hpOY61URy5Xb/BriMWvCVjN/L3wL276eiMxqXmE+bnz16N92PnKUOY+0ofODf3ILijm0d921syVos88+SjRzmq12h16hiBb2UM1BCGjQeOePk0AmL4+uqSEQ+8wZnN4LbCVi43sFHbmU5wd9pIxm7BlMUNalLof2Jxh7ULwcDGSlqsExnZhvuU6y6qNrdOdPntTHn2aBfLsMPXZ/udyDJOvLeK3KycHQFpk5dvu+EFZ7gNbwoAXVMlOcYHq1GUThP4p6kRmvon3Fx8GD3+ijBFsjUnDoEHrK28AoK/xIKez87nxqw1siarZiZAd3R0U0AJcPOyL49LyScstwsVooG0Dn8qfw9VbZfDdOaek7Ex3negOodTjSnA1upaUGfiGUxjcEYNmJeDkSjLzTZyIiSL53fbETB5yaQhDuanw++1q3xHcFsavgtt/x2p0xT9+BV8YJnF9h2AmDG3JvVc2wc3ZwN74TDZU58Q1J8Xu/KXdTRVv16Kk7LtKTh9Vtz5hSkwC+wm69dQB+7nUeWf3ryUB6bGbHUrsBEG4SMlOLsmcaz4YgD+2xnH8VA4/p9su9PWyMYsZVr6lfqqzL1//ibrtdo86NuYkU3/tC4CVp4e0ZMLQlmiahtGg0b9lEG+PUgLU5OVHWSznLnVHwk6VdegRwOJkP6CKBiz6BETSGSXK5uKzPub3aaZe71BiFqezqx8uXWNBaNKkSWzYsIG2bdtSUFDA2LFj7eViH3wgMxt1Qo7jhVu2Lgi51b4gpGka793UAee211CEMxFaEve3yCO8ngef3taFX+7vhbNR4599SXy9pooLgfNE6n4VMLrJ0o4nhrSsYutysOcIqecZ1TmMfi0CKTBZePTXnZdH6ZjdIVQiCOkXyNd2CFXZQgZnlZFjCwXWd0KbIk+r2X2TzfHiHcLhpGwKzRqLDAPVsp0/l3nJxgEeeLs5UVRs4UhSHQpveSUOocw8k73d+cBWNkHIxaNEzLBd7I7s3ICmBiWA5Xs1orDYzOsLDjA3TV1kxO9eTlpuEe3DfPjr0T40D/bGaNDo2qgeX93RFT8PZ/adzOS9f2pgG7V3gVNjScgsID3PhJNBw6uBTbBLrd53cEyPhni7OhGZksuaYzZnUKkOY4eTsjiSnI2L0cCw9iHVH2Nl1GuibmM3qwNfZpwqGzK6gm9DPFyc7NZXgB6Nz7JcTKfRFeozm3ECUo5UuJne/W5HbPpZt++87LHNkAGVi5LFhbBJdePgyifBYID2qnSKzV9D4m4saKw2dwZUhtX2mDRm7VCf/f4tgwhscQW4+eJNLjfVTyY9z8Sd329h1tnkviXZAp3PzA+KU/uANg18cHWqhvO2US/7jDEAwWqm0N51RncHhfewdwgEcG2vxK0h2naen7WThOl3Ut8UT5OsbRzbW+o9vViJWas6Bwa0gPH/Qv22mJsN4R2/N8izujLQuIdJHRMwGDQCvFy5rUcjAL5eU41Z7YPzlLW+QRfVAKIiWlytbo8vV86bytD3A6XLz2yCkDn5MD3fWszW6LSqx3Yu5KWpsjqdouySi0xBKA+LWTkRhLol0pabG9rZfv2lnycvs3QHwBq9VjkRFzwO6z5WP1WdlyXsUs5FzQj9nuH3sJcxWY1ca9zKt50ieXJI2XLZW3s04t4rmwDw9MzdfLMmssahwkItYJtwtDTuy7YTaoL2imaVCULltJ7f+h28HQRv+sO74TClfY1y8QK9XGkTqiautkRXf3KsxoJQeHg4e/bs4cUXX+Tpp5+mS5cuvP/+++zatYvg4OCaPp1QG5xRmqPPap1V2/lq4OpkZNKdfXFpZTvxOjjPvq5vi0DeuEEpnh8tPcLqIxc+CyH9gNpJZ4VcYf9S1Ag9RyhuMxRkYTBoTB7TmSBvV46dyuH1BRc4W6Au0MUGP3XCnpCRz87YDDQNrukQqmql9XKy9BigRAXfFFWqQ4uLN7h4ss+WkXMkVF3wEPlviehkQ9M0e/v5Os0RKpUhtOZYCmaLlRbBXjT0L3EMEGBrT2y72A3wcuWqYFWqciDfnzFTNzNjYwwbbZbR4V7HeXNkO2b+X+8y5YsN/NyZMqYzADM2xlTPmWK1lnIIqdKrfbZSuxb1vXEOtgmhlQgfpfFydWKMLRx7+vroM96HIObtUq81sHUQvu7nmE2m03I4OHuqQL2Y9aozA6i8FVst/o1dS9xIPSLOMlBax9W7pPvHob8r3CzE140ujfxUW3pbiWRtsDU6jQd/2s4nK46yLSbtv9258MTGkvuVnfweXKBCor0bQIcxapnu/khQ5UV7LM1I1XztwfWvLTjAHJsgdGv3hqrcyub+eq/TaUZ0CMFktvLs7L38uuVEzcZtzw86M1A6A4AuZ1uyaBeEDqvvrp5j1eRKx+1aXwdAX8M+Oh/9nN6GkmNN9Lo/zu61LyRRtvKr5kNUJ0Hgs5XHmHayEfNR/yOXhK32zR/oF4GTQWPD8VR7RlOFHPhL3eqCoY0Plhymw2tLueGL9Tw7aw8/nQzF4uyp9l+Juyt/Tt0hFNiqZJlfIywuXjhZTTQwJ/DKvP3n97u68k3V1SyoTYmIWCp3ThAcyE6GTzvDO/Xhkw7w06gSN8l5IimzgOUHkyv/HlitELVaXbjG71D5kpeCq/Fc0F2INlfi0eRsjp3KwcVowCusDZGWUDSLCcuM65UL0IZFnxCoCNv/09phNFO25TNxixOfF6sy2aszZ1f4sJdGtGFIm2AKTBbeX3yY4Z+sZVUdXINd1tgiCeJ9u5NvMlPPw5mWwZXE6ZQuUbZaldi7brKa/LBa1ARBZhyseqdGw+hnKxvbXAO39Fn07wUnJyfuvPNOPvzwQ7766iseeOABh45jwgVGb9vsrWa2dUHI+zwJQnbajVK3B+Y57PjH9mrE7T0bYrXCE7/vqnk2yjkQlZBCozyltPYaVEHOQFX4NwX/ZqrkIWo1AEHernx6a2c0Df7cHsf83ec5QP3XW+DTTipfoy6wh0or0WfxfiU6dm9cj/p6fozu8EhXF129bILQ0eQcMk7ZxB5vVa63xyZWhDZpY2vDbC3pKlSKDmF+QB3nCJXKEFplKxcb1PoMsdtfF4RKLnZ7+qkxr0v1YU9cBr7uzoy+6VYAIgqPMq5rYIUlTwNbB/PQAPWc/5u5h20xVcxK56WWhPB6K0Fo5SElXnRr7AfhanaK+G0l4dNVcE+fJhg0WHfstHJo2UrGrJ5B9vbQIzvXUrkYqKDnjjYRYPv3JU4SXWxDlXC1qu9NPQ9nep2rIATQ5np1W4kgBNidSUv2n6P1ets0+GkkmSnxPPLrTpYdTOaTFce45ZtNdHpjGSO/WM9Tf+zis5XHiEu7cPvJ84rZpD53OpU5hE7ZBI/W14KTrVFFcOuSkyTgX3Nn+rcI4oPRHfF2c+JAQhansgvx93RhcBtbOXBT5Tx0jlnDF7d35cH+Kp/my3+P16wlr+7MKOMQygCqkR9UEf7NlDtNP7nTLwga93HcLqg1hT4RuGrFPOSkPqOZDZVIEJGykpQa2L/rBL2kq6nKddp4/DSf/auC4lt0UyUVxG62bx5ez4MbOqv91+O/7yQho4JOK1mJJSJju5LjekJGPt+tjSK7sJi98ZnM2hHPq4uOsaxAlejtWTWr8llyuyBUasZd0zjpoj4/bbRYjiRn8/OmGgqL1SV+B+yYoe5fNxki1PtGVReMwuWJxQJ/PajOz6wWJbpErVIOs1O1E0p7JsdP5XDd5+sZ/9N2hkxew/zdJ8vvDBj5L/w0En6/FaYNUmLVH3eclzGdDwpM5pp1fTUXq78Z7EKu3j2xX4tAvhzbldVaTwAMyXuxWDV2WVQpf/SOFRU/b2qkyhID5rrfzKcr1f4ztO+dtvXHKxTanIwGvr2rOx/f0olAL1eiTudy7w/bzs4tK9Sc4kKIUxMe6+35QQHl5wfpBLUu6UKanaiy77ITwL0ePLUfHtqgXPNJeyFhd7WHojdlcej8XAVnJQgdOXKExx57jMGDBzN48GAee+wxDh8+PzsjoRroWS02h1DOeXYI2Wk5XH1QU485dk8BXr+hHV0b+ZFVUMzL8/dfsDyhpUvm46KZSTUG06J1x6ofUBF6DsGOH+zW3D7NA3l8kDpxfOmv/cSn1/wCzmS28NOmGE6kVhJ8mpcGx5Yp543eYvdCUpBV0rnOVjKm1yRf075Uu3E9cyFDnSz7e7rYHVmR0baLQC91waa7VzqG+0GXu9S63b+UsfSXOIQyauuvqTk2QcjsHmB3uA08UxAKKJvRE2ZR79EJa306hvuy8PG+XNm9q3JZWc3KcVYJ/7u6Jf1bBpFvMnPvD9sqnzHXHVyeweDkQoHJbBftbugUpsS6xlcCVtj7Z7X+7Ib+HgyzCSHT10fbHUKxRV6czMjHy9WprDB2rvS4X90e+rvkIqjUxZnRoDHnkT6sfmYgfh4u5TxBDWl9rTr4Ju62C5nlMdxWFrc5Ko303LO05mclwpIXIWo1O/94i9M5hTQJ8ODajqH4e7qQV2RmT3wm83YnMHn5UZ74Y9fZvc7FRuJeVS7qZBOOM+PKDZFX62xiZamsMsDBBfKvpSu39mhIgJerPc8N4MYuYbg42U5hmg1St/FbMZhymDC0Jb7uziRkFrC+up02CjLtbsfSDqHCYjOHEpQwX2nL+cpwcinZZxxdqnKzDE6qZKw0moZr++tLfu80Ft+x32PCiRZaPEtWlx+IflGQGa9KaDUjNL4Ss0W5tKxWGNM9nO59h6vtEvc4tNh9YXhrGgd4EJeWz23fbiYxsxxR6OA8wAoNezk0Opi+Pppii5Xujevx9R1deWJwC3pG+LPK0hkAy9FlPD+nkvIrXRAKKnEIFZjMbMpR3//hwepkesryo+dHjNv8JWCFjrcpcVB3jJ3YpGaK6xiLxcrcnfG8vuAAmXmmuh7OZUtGXhFP/bGLldNeUJOUzh5w31K4d7FyloFjyUktEX06l7HfbeZ0jvrsn0jN48k/djPis3Vly6n1SQB3f3uuIUf+KWl6cxETn55Hn/f/Zex3W6p/rZKwU+VIuvlCWHesViuL9FiFjqE09Peg5VW32Td/y/B/LA++DwCXk1vK38+B6npotZDW4CqeXauu5V64pjW3X91XHTNMeSUmgHIwGDRu7hbOv88M4Pae6rj6+oIDlV9zCLVD/DaVf+hVn8VJyhWkN9ypEGc3VWINyiW0yxan0WGMOi8KaQ9tlHPY3hGzGvRoUg8Xo4GkrPOYITRnzhzat2/Pjh076NSpE506dWLnzp106NCBOXPm1PTphNpA3zl4OQpC5yNU2gE3H3uQGgfnO6xydTIy5dbOOBs11h5NuSC2xaiUHKw2y7qhaT+V4H62dLpNzehG/gt/3mk/gX1ycAu6Na5HTmExz8/ZW2Oha+qaSF6df4BHf9tZ8WNL5wfoFvwLiR4o7V4PXL1Iyixguy1H55oOpfJj7A6hGPsivWws6aTtYturPnlFxRw7pTKBOob7QtsbwNVXzW5FO/59uiB0ODGbU9l1kN9itdozhA5kupCeZ8LbzYluZ+bX6C6WUoG5Btv7MKhPT2Y91LukxKyx3t2q8llfZ6OBqXd244qm/uQUFjPu+y0cSKjAKaWXi9nyg1YeOkVOYTFhfu5018fa2db2fvdv1bZu329rQf/X7pMUZSqh+df9Svi8vlMD3Jxrt3MhIR0gvKdy49lmxewXzja8XJ3w9ailMjXPQJtQBhxeWOFmjQM8aRPqg9liZfmhsywbWz/F7uLqdnoBXlo+k8Z05suxXdn+0hBWTOjPN3d249lhrXA2auyKzeBgQh05AmuTWJuTo+lAcPFSM9ml9hEO2DvlneE863AzxU4eHLWEkejegiE2J9CdVzSmfZgPrk4Gbu/ZqGR7/wi1P7IUQ8wG3JyN3NhFPefMbdWcHdU7uvg2Ao+Sk7gDCVkUmS34e7rQ0P8cnNB62dh2W0e1Bl3AxbPsdh1uVqJlYCsY8RG4+5EWfAUAWbv+Ou+5EAUmMwWmsxAjdHdQgy7g5sO6YymczMjHz8NZlZHXa6IEbIvJYaYz2MeN38dfQSN/D2LT8soXhfbbuouVCpPOzDfx+1blZH10UHOu6RDKhKEtmfl/vXn64UcA6KRFsnrXIdYcTSk7XlNByeeyVMnYgj0J7DGpC9rhgadpH+ZDdmExHy6p5UlPi7nEYdD9XnUb0kmVWRdmnpcL/JpwMCGLMVM38fzMHfy7aQsfLZNJ37ogv8jM/T9uJ27PKgac/A6AH3wfYRetlIjY0CYq11Lbap0Tqbnc/u1mTmUX0qq+N+ufH8izw1rh7ebE4aRsvlsX5fgAvTy971Mw4YDNRW11cARerHyy4hhpuUVsjUljS3Uzw/RysWaDwOjE0eQcjtvKxYbYGtn0GziCI91fZ1/fr3nx5ff43/13YsZAQy2Zj2etLv8awPZ//CqhBRYr3NItnP/r3xSMzuBnm4StRrMQHzdn3h7VgZ4R/uQWmXnqz901c0AJNcd2DLQ07sv2ExlAFflBOiHtSx6vN9zpcmfJ+q7j1O2+2VBUPSOCh4sTXRv7VWtbnRoLQs899xwTJ05k06ZNTJ48mcmTJ7Nx40ZefPFFnnvuuZo+nVAb2DOEQik2WygwqS/9eReEANqOVLcH5pVZ1TjAk/uuVBeYby88dN5PZCctO0pvg3Iq1Ws3pIqtq6BBF7j9D9WC/thS+GU0FGRhNGh8dHNH3JwNbDieyq9bSrWmPjAPFk6o8AubW1jM97Z8lv0nsyo+8CTuKblvK1m7oNjDim3uIFvZTLfG9Qj1LXVBpB+cSjkt9GDp7BTbc3iHcCAhC4sVQnzcVLtyZ3d10QMqOLbUQTHMz53ODf0otlj5cEn18m9qlcIsMCtHyK/71IzKVa2CcTaesau0O4Si1PhNBfaL2xuu6usYPGtvd151LoS7i5Hv7+5Bt8b1yCoo5rapm5mxIbrsgfyMC+m/dqnfR3ZuUGJPbTtSzSSmHlNdn6pBt8b16BTuS1GxhbRk9T+MzPOgaaAnE0e0rtZz1BjdJaRzhiBU61SzbOya9nrZWFLNXyMrwV4KkoUnPloeHzXfZxcWDQaN5sHeDG8fwqMDm3N1W/Vav209T6UpF5ITtvDjxr3LZG2VQRefS7k+APBrxEsNpjGm6FVu7BpudwI5Gw3M/L/erHtuIM2DvRwf08w2ObF1Klit3GrLxFp2MInUnGrMkun5QWeWi5XKD9LOZZLBniNkc9PqwuSZhHaCR7bA+JXgqv7GgO5qf9mveKN9f1xbWCxW/t6TwISZuxn+yVrav7aUDq8vZfLyozU7ZuuTF7Zysdm2nKdRncNwdzGqCZpGqjspcVscHtrAz50/HlSi0InUPJ6bXWpSJCPW1oFLKznfAH7dcoLcIjOt6ntzVcsgh+cLCW8K9dtj0Kz0N+zl5Xn7yjaDSItSYqWrrz0Q1mq18uPGGA5b1GfHcOqgPRNx1o54tldVylsTEvdAfjq4+kBYNwBS8sxYGynxr6oJhFrh4HyssVvYfzKTtxYe5N4ftnL7t5sZ9eUGrvt8HdtPpDPF5RvWuj7NsW3LiUzJOf9jEuyYzBYe/W0nR0+c5AvXL3DSLPxlvpI34rtw69TNbIw8XeIQ0gPra4G8omLGTd9KUlYBLYK9+HV8L8LrefDowOa8e2MHgLIiqy4IBdnOE+xutwuXh2UyW4hPz2NbTBpbolKrVS58NDmbuTvj7b//vLmax2B9MtPmTtXdQf1bBuLjVjKB1eq6p+kwZCzORgNGd19MgSpEuChqPb9sieXnTTGM/nojHV5bys1fbyQlVglCh4sC6dLIj7dvbF9y3CnHmV4ZRoPG5DGd8HZzYldshrSkP9/YAqXjqpsfpKMHS2/9Tl1/hHR0PA9p0l9dbxVmlkycVoN+LYKq3qgUNRaEEhMTGTduXJnld955J4mJ0urugmOxOJSM5RaWnPSc95IxgFbXgNEFTh8p2zYPeGxQcwK9XIg6nVv9He1ZsDsugzX7Iumg2WYtmvQ79ydtMQTumqtm7E5sgI9bwPRraLr7Iz7orQ407/5zSGV/JO6FOQ+oLBR9BvgMft8aS3op27UuDpWhtCCUFlkmfPm8k2ETuWyB0ov3qQviER1CHbcrxyHUs6k/Bg1cCmwnC1712WMrfepgc/+oDccr++uxpbDrF/tiTdN47XqV/zB7Rzy7YtNr5U+qNnq5mLMnM/eo++P7ldPRxq+xmsU35SpBNuMEYFWfFc9Ax211QShhJxRVbdv1dHXih3uVKJRdWMzrfx/kus/XO+YK6aKdTxjpuUWsOaoceKO6lHJauHqXXECVCjSsDE3TuM/mEiq2dRnLdQ7g23HdHE5yapW2o5TNXCegRYWb1gq24F5iN6uAzgrQy8bWHztNdkH1yiUsFitv/n2QmZ9MAHMhO6xt+NCkcpKGZc+rsAxkbC/1XZu3K+HCtbo+H1gsJR3GGvWxn8Bu37mdl+ftcwwmtVhUWR3Yhc2sAhNzdsRz9/StzDwGGXjbhR0dDxcnJSyfSe9H1bEo8l84uoQ2oT50DPfFZLbaBdNK0Z2ZpcrFzBYrv23RM9LOMcNKF4R0KhKEAIJaqu+vDae212HBQEdDNH/9u7lWLsytViurDp9ixGfrePz3XczdeZLDSdkUW6yYzFY+W3mM6z9fz15bQ4AqnqzEIRTRn8w8E8tsgew3dysl9jUsXxACJQr9dF9PjAatJMcMSsKkG18JPuoYVFhs5ocNMQCM79+0fKHO5l4e7HaEuLR8Pll51HH9af0CtqXdTbwzNp0DCVnEGG2THVkn6RZc8jc8/vsue/nMOaO7gyL6g9GZ37fG0uOdFXwTo/Y7RZHntzzQenInzBxH7vQbGf35v3y/PppVR1LYFKUCvi1WuLd1Mdca1Pe5j7a39l1S58LeWbDjx7oexXnDarXywpx9/Hv4FP/nsphQUqFeBJ0fms7AVsEUmS383087iHWyOSVr0SH0weLDnEjNo4GvG7+O70WgV0knxH4tAjFoKivSnvllLi4RKQJtZb2Nqz8RVhtMnLuXVi8vpu8Hq7jlm03c+u1mrp6yhjk74isNw/546REsVmgfpuIOlu5Pqrq7aGEOnNyh7kf0V+Vie5Vru8x58hm4NVPvSw/DEV6Zt59X5h9gx4l0sguL2XEiFZ98dazK8Qjnmzu7OU4u2gWh6ndwDq/nYW9J//m/x6rOp7zQZCdDbNnjwSVHUZ69bHJddfODdPTMxGLb96nrGRqLwVAStVGDsjE9R6i61FgQuuqqq1i3bl2Z5evXr6dfv1q4CBdqRl6qssmjgVcwOUXqYsLFyVCSr3A+cfNVWUJQrhDi7ebMM1crO/anK46SdrZ5HJVgtVp5f/EhRhk34KRZVCj0mZkUZ0vjPnDP36qMoLhAlUNs+IQbto/j4ZCj5BWZmThzK9Y545UVHmDrVKzmYrJKXUQWmMxMXavEKj3wdMWhZGJOlyMQ6IKQ0XYQPqOsSv+bk7MK2BmbXvs2UF0Q8g3nVFYB206oA8g1Z7Yb1wWhnCR7SZ2PmzMdwv0IxibkeIfYO4Z1Ki0IBbeBQS+r+4ufg9PH7Ku6NKrH6K7qBPz1vw+WH2B45pDzimonp8omCKVZvbFaVQvyjuF+ZbdzcnG076bZxD3/JmVLFes1Vm4rS3G5F0Ll4ePmzMz/683bo9rj6+7M4aRsxkzdZA93Ll0ytmhfIiazlbahPrSsf8ZsRKfb1e3+uQ65HZUxokMooT6uBKH+b49edwXNqzPLcbY4u0EXW/ikm59Duc55wTcMwroDVjiyqMLNWgR70TTIkyKzhX8PV6/k9es1kfyzYTsjzcpO/rHpJuZb+2Ny8cOQEVNiBz6D3k0DaBLgQU5hccn/+FIk9ZgKR3RyJ9mrFWtT1Un20YO7+GVzLON/2k6vd1fy5t8HyU1PUPtMzQDeoRxLzqb/h6v436w9rDmagtWquoiV+UxXREAzuEKVCrFkIhQXMqa7Og78uS2u6v1DOQ6hv3adJDIlFz8PZ8cStbMhuG2pX0q5ZaqDVxDF4co50iz1XwZPWsPVU9YwbV3UWe33CovN3PPDNu6dsY3DSdl4uzrx0IBmfDeuOxtfGMQXY7sQ4OnCkeRsRn25gcd/38X+yjo/ph5XQZhGV2jYi7/3JlBUbKF1iDftGpTq9FlaECpn3E0CPe2B7j9ssO1T9XKx9iXlYvN3JZCSXUiIjxs3dGpQ/phsJ9n9/NW4p62L5t/DySWCq37MsV3AWq1WvlurXnNQp+b2CRGS9vPa9W1pGuhJYmYBj/+2q3aOubog1GwQ8el5vLVQOceW5aqLvtxj6+j/wQoGfLSKgR+v5v3Fh6t1LKwOVquVjfNV+ZEXuQxwOsi1HUP5YHQHPru9C9/e1Y1/nujHa4Fr0FCv2UaLZemB5Np1SZ0NViusfh/mPgB/P1FS6vkf489tcczZGU+gIZuHXGwtp4e+SURYCF/f2Y2eTfzJLizm4WU2R3palHIqnyObo1L50Rai/sHNHct0RfXzcKGzrdviWt0llHFClUc7uZd8b3SHUOIeh+Yo2QWmisPjz5L45NMM2vUkG1we40XnP+jll4W3mxORKbn8b9Ye+n7wL8M/WcvgSasZOnkNk5YdITPfxM7YdJYdTMagwSe3dqZ743oUW6z8vrWKSdi4zep8zrcR1GvC4aRsIlNyHcrFKqRxbwD6uar9T8dwX16+tg0LH+/Ll9eF4qqZKMbI23dfU9LARacqx20FjOwcxqjODbBY4cGftl88Tr/0EzC1P0y/Go6vrOvRnBtxW9T5jE8YSxNUJUWV+UE6pZpoYHQt00kTUBEQmkGZE05X7//fIcwXL7fqxzzUWDG44YYbeP7553nsscf45Zdf+OWXX3jsscd44YUXuPHGG1mwYIH9R7gA6PlBnkFgdLZb42utNXR16PGAut37Z7ldsW7p3pC2oT5kFRTz3j+1Z2vVWX00hX1RJ3nKyXbi2Ovh2n2BBl3gqb3w2HYY+SU0G4RmMfFc1jtc47ybQfFfo50+jNWrvsrdyYjlm2+/oOPry5g4dx85hcXM2hFPSnYhDXzdeObqVgxsFYTVWuqkV6cgq2Rnr2fA2Kz4VquVhXsTuGPaZrq+tZxe767kpq828k5tv6e2kGj8GvPHtjisVtVdp4HfGfkZ7vWU3R1KRCTg8YHNCdLUSXh0oZe9Y1iHM4WVPk+q2VFTHsy+zyF49vnhrfB0MbInLoO5lczuW61WPl56hM5vLmfYJ2v5Y2vs2eVf6Njyg+KLvDAaNP53dcuKty2dI5Ru+z/WK8dNBCUuoe0/VDvPx2jQuPOKxqx65iqu79QAq1V1INt4/LRDyZje8W5Ul3IujJr0U2JUYWaFYsSZOBsNPN0/BFdNCZr9OrWp4hG1QM//U93S2o8+t+yv6lKNsjFN07jWNts3adnRyl1CO2aQ9PN4Av59hq9cPsVVKyYvtBdvPfkQq168FueeKkySTV+V+3CDQbMLDnouyiWJrRNUcYNu3DR1O3NPqH1GG9cUxvZqRKCXK2m5RUzfEM13f9smlrxDwejEB0uOkJFnoqG/O08PacnK/w3gg5tr2Big/zMqyD49GjZ/xQ2dG+DmbODYqRx7p7ByKS4scYzYTs6Kii18skK5Sh4e0Azvc3XI1WtSErQd0kFNptQAlw6jABjjuRsng8bR5BzeXnSIBWchIE5edpQ1R1NwcTLwYP+mrH1uIC9c05qhbevTwM+d6zo2YPmEAYy0XUT8vSeB6z5fz13fbylx7pRGn7Ro2BOc3e3lYjd3C3d074R2Uie8eanqArYc7r2yCQBzd50kPe6QCoDXjHa3Y2GxmW/WRNq3rXDiy9YJsl5BHCM6hGC2WLlvxnbavbaU/h+u4sBeWwhuYEssFitv/H2QJQeUG/buPk1KAr+PL8fbzZmpd3XDw8XIpqhUPlp2juXMhdn2yQFrs0G8+Nd+8orM9GhSjztuvIF83Kin5eCecYwTqXlEn87lmzWRvLnwYPUEwJgN8MO1sPgFojbN4/rJy3lhzl5iTudisVh5Zd4+Gicvs2/+RZeTfDm2K7f2aMQNnRpwdbsQ2voVO3QC7e6uPmfv/nPogjUJKYPVCiteh9XvlSzbO7NuxlIBqTmFlbdnrwb5RWYmL1f7nmnN1uNUnKuci7bjlpuzke/GdadFsBcHsj3IxhOsFiwpRyt72irJKyq2l2ve3rNRhSUnA1qqEkt72ZheLhbYAgy2C1DfcLXPs1rsn/W1R1Po+8EqBk9aU3uihKkA8+9jGWrcSaiWxoPGBfxZ8DC7mk3jlcENCPB0ITmr0C7aHDuVw+f/HleTDzPV5OvN3cJpHuzNXb3VJN9vW09U/j8s5YbcGZvOvT+ofcmAVkFVO6kbKUGoifkEW57uxoLH+vJAv6a0D/NlRLgS9JzqNaJ9w3KyZ6ojCJnyIadsZtq7N3WgU7gv6Xkm7p6+tWoX1PkmJwV+vlFNKAOs+bDa58YXJTHqfKbG+UEAPg3UZCioAOnyJkV9w+zd7Fj9XrWyhIwGrUbdeWssCD3yyCOcPn2ar776inHjxjFu3Di++uorUlJSeOSRRxg1ahSjRo3ixhvPsuW3UDPs+UFKldZ3sk0DywmrPF9E9FehjEU5sOePMquNBo03RrZD01Qdvt6xqjYwW6x8sPgwDzotJFDLVCeBekBjbaJp6mDX5U4YOwvajkKzmPjSaQr3OanZm9nhEynsfDcAXRN/B9TF3TWfruVLW+3u/w1ohouTgfv7KpfQzO3xjt079CBJn/ASlThqNceTs7nz+y089tsuNhxPJT3PhO5E/GnTCY6fKuck/WyxZQIVeIUz3SZY3XtlOUKHppWbIzSkbX3CnJQw+O6aNKJtLqiOYWdcABkMcONUVS6UtBdWvWNfFezjxuODVenQB0sOk5lf9mLcbLHy4l/7+WKVem+PJufwwtx9XPn+v1VeVG+LSWP6+ugyJyVW24E01erNmO4NaRrkVd7DFaXrue0Ooablb3vFw6pE7tAC2F+z8H1/Txc+ubUzIzqEUGS28ODPOyhKUzNYyQSwLSYdTbN1FzsTg6HEJVSqNK8qxrSxiX8uXuDiUaPxnhV+DWHCQdV6+UKgC0LRaysN3X6gX1PC/NyJTcvjxb8q6JZ46hD8/SQhkTO5zbiKrgb1efS4+mWa1/dRdvueD6qQ+tiNcHJnua81uls4zkaNPfGZlbsxgJMZ+aw6cqpmLdUvBLZysQ1FLTiZkU+ul9o/dPZI5d0bO7B54iA+v70LAIeP2kocfMLYFZvOikNqpnbGvT15ckgLmlX23asIV28Y8rq6v/ZjfEypdgv/jxtjKn7c6aNqxtfN155n9Oe2WOLT8wn2dmVc7yY1H8uZGIwl5RS6QFwTbKWOrYoOsHNCV+6wlRnOqOzvKoctUal8awuE/XJsV14c0YZ6nmU7+Pl7uvDpbV1Y9ERfbujUAIMG646d5oYv1vPzphjH70Kp/KDjp7LZHZeB0aAxsvMZ+yQnVzXBAhWGzXZrXI+OthyzwyttJUER/e2luJ+tPEbU6VwCvVy4vVclrq0A2744J5m3rmnCtR1CCfJWrtvYtDz7Rey+wvo8N2ev/X18a2Q72of5Qpsb1OMPzgerlRb1vfno5k4ATF0TxfKDZxk2D6qMxlIM9SKYG+3MWps49/7ojtzcsynuTdVF47QBhcx5uDev28qoZ2yM4ctVVcwOm4uVc+bEetjyNU2X3s3MzLHk7viTQZNWc/0X69m3dRXhWkn3PZfji9XjSrP9e1XCYDvO+RclEuhcyM7YDKati64bUWjZy7DhE3W/1bXqdv+cMt1KzwsWc8n5dgXM3RlPr3dXMuyTtSpK4Cz5YWM0p7IL6eibT6fEWWrhoFccJkt8PZyZcV9PQnzcOWxR37P3fvqL2Tviz/q48OGSI8Sm5RHm586LlWQGDmilhKL1x04r4UQvVyvVrQ+wl41ZY9bz9epI7vlhK5n5JvJN5oojEyrDYoHlr8KiZ+DYCijIwvrnnTTO2EKu1ZW97SfaMn2sOEUu437/Pax/fhC/PtCLX+7vxR8PXsGnt3WmRbAXmfkmok/n4uJk4Mkhar88vH0IgV5KQFpR2ffblhWz3tyGW6duIimrgGZBnrxybduKH6PjFQwBzdGwUj9jt+M6+3lkBROL+jlnekzZ76vVqr4Ln3SATzvCKccSQg8XJ6bf04MmAR7Ep+dzzw/bql0KX+sUZsOvN6vJVN+GqtQ7bnNJp9lLEdtnIta3W83yg0B9r5sNVA4g3WBRHj0fVLf7Z8OXvdSEZhX74Wevrn72Z40FIYvFUq0fs7nuW2ZeFujqqrc66Y08pS6+m50ZuHk+0TSVCQOw7btyP6A9mvjz0AClbr8wd1+tWUb/2nWStKRYHnSyuR+GvKbS+M8nRicYPQ3ajsRgVTvUGcVX8+zuYG7Z0RaT1Ugvw2E+HWAgzM+duLR8krIKCPRytedhXNk8gNYh3uSbzPy+rZR4oZeLhXZSs61O7pB7iic++40Nx1NxdTLwxKDm/P1YXw6+OZyr29bHbLHyzqJadAnZ3D4L45zJyDMREehpd0qUQW89X7qLUHEhXhYlCG1LVRcaDf3dy73owKcBXP+pur/te4eMlXuvbEJEoCcp2YW8seCAw8MKi8088fsuft8ai6bBa9e35aURbQjzcyc1t4iJc/fxzqLyy822xaRxx3dbeHPhQQZPWsOQyWt4df5+Xpizl7nr1fufofny5OAqsmz89dmaqJLZ7ooO5KGdoN8z6v4/z1R5YnkmKhywM70i/MktLLKXjI3+TQlxvZsGEOJbTq4KQOfbAU2VKByquLOWAzm2EinPmoXSnRMXwhmkE9AMWl6jLsrmPaxC4zPKioi+BQnMbbqAbsbj/L0ngVnb48tsc2rdDwDstjTlR/dxmK56GW75UV3E6viElohQh8svUwv0cmWYrVzmu3VR5Z7UZxWYeG/xIQZ+vJp7f9jGTV9tqLATncViZceJdL5fH82EP3dzzafruHPaFqavjyY29ewvVirFFig9LVb9HffdoHJctOxEKMzByWjg+k4NuKlLGGG2C1Krb5h9NvymruFnJwSVpuNtKqS3KAfWT+HOK9Q+av7uBKad2RlHJ9m2f6nfHjSN/CIzn9lE/McHt1ChyLVB25FKGNRD9WuCb5i9c49P5iGeHNLC3p2uWjk/qHKN/83aY28FP7Sq8gagXQNfPru9C2ueHcjAVkEUFlt4Zf4BHvx5h+oEabHYZ0eJGMDsHcqxOLBVkF2AcaBhT3VbQfmspml2l1BgjO27YisX2xefyTdr1P/w7VHtK5+Nd69nzyYLKIznyzu6su2lIex4eQhfje1MM4OamHp8RS6zd8TbA1jv0sW/FkPV8Tc9xp4vdW3HUHujjM9WHjvzFauPrVwsv9EA3rSVij1VWgS1lds0zNpJt8b+3HNlBK9epy42P152lF+3VJLHuG+WmqRw9ycuYgwJVn/ctSLec/sRL2suBxKyuM5oe+/bjgSPAFXmqXcHBOWY26pKyhjwvHJvAs93VcLLO/8cYsLMPeQVXcC8s9RI2PSFun/tZLh5unIoZ510HPv5YumLMKkVrPmo3NW/bjnBhJl7KLZYiUrJ5aavN55V18iMvCK+Xq0ccFNCV6AVF6hSyxZDy2wb5ufOoif64hKiQml9cqJ4ZtYerv1snXIS14Al+xPtouj7oztU6ojsGOaLv6cL2YXFKnTfHih9hiBk+xyf2LGMD5YcxmJVGUSgxLP0mkZIxG+FDZ+q64xfR8MHjdGOLyff6sKjTKTFDc/CXX/BlU+q7U/uxN3FyJXNA+nbIpArmgYwsnMYS57qz+QxnejeuB6vXNeWMJv73dXJyG09SoT2ckXPgkzlWgSe2eaDyWxlRIcQ5j/Wl0YB1Zw8s7mEynxuq3KaezdQ+yRLcYmTH1Sm5G+3Kqd9bopy3W+bVubhAZ4u/HhfTwK9XDiYmMVTf+yutTLUamO1wqx71XvoEQB3zSvJx1lb/nfroqcwW2WEAmtNNcwP0rnhC3h8h4opqYgWQ2HMz8owkBmrOmDPuBaOLqtQGAr3r/6E7gUImRHOK3aHkDoBP35KOR6an+tJdU3peKtyE5w+Wm7mDcDTQ1rSMdyXzHwTE2buPufZ7dzCYj5aepinnWbjTqE6aOqzeucbozOM/l5lVnS4hcKrXgVgb5YXKw1qZz+yYAFLnurHrd0b4mI08NzwVva23Zqm2Vt8f7XqeEmWUGlByMmV/Abq5LkX+xjUOpjlTw9gwtWt6BDui5uzkYkj2uBk0Fh1JKWknvtcKMxWJ4fAZzvUyd7DVzXDWNGOTc8RKn1wsoWcmw3OZKA+h+Xm8Oi0vlaFMRflOHTKcHUy8vEtHTFoqnzgH5uzLLewmAd+3M6ifYk4GzW+HNuVe6+MYHz/pqx59iomDFWzPd+ti+bR33Y6lJBFn85l/E/bKTJbaOjvjrNR4/ipHH7adII/tsWRdVpdzISFNapYYNEpbd+t6kAOqpwlpKPqLPP3k2V34FZbi9YKAgPdnI18d3d3egVbcNGKsVg1kqz18HZ1sudSlYt/U+jzuLq/4PHqiVG5eih4cNXbXqrc+otykxhdIXIlfNXbsbNPQRb8ejP1D/7ALOc3eMz4F68v2Mtfu+LZePw0W6PTePyXbapUFvjROJpB4z/A+apnod2osq+nH+htJ5PlUVq8uO7z9WyOSsVktrDjRBqfrDjKVR+tZuqaKIqKLXY30Q1fbOCNvw+w8lAyBxOyiEzJ4bOVxxjw8SpGf72RtxYeZO6ukxxKzGL98dO8ufAg/T9axfBP1vL5ymN2B5/VaiWrwMTxUzmsOnKKnzfF8OmKYyzZn6Qu/KsiPQYyYynGyE5Lc0Z3Dad3+xbqpA8cSoSeHd6Khka1n9mU4sa6Y6dxNmpVi7DVwWAouSCIWU/XRvV44Ro1S/b2okP2EksH9BwSW7nYtHVRpGQXEl7PnVu711IeHah9wMun7F2laozejST5AMHeblzXUV2oV9cl9NbCg8Sn5xNez51XrqvGbHYpGvp7MP2eHrx6XVtcjAaWH0ymz3sr+fuLCZCfTrGTJ+/vc7eHcDuESZdG76IVt7XC17q2QwNGe+6lBbFYNCdofR1FxRaenb0Hs8XKtR1DGd6+8vBWoNwQ1gAvV0Y0LMaNIoo1Z+IJxsVo4MuxXbmpa6kxu3iqxhIAB0siEB4b1BwXo4F9JzPZF1+5k69CbHkZbx4KITPfRLsGPozvV2ofbg/k3WA/TtzXN4LHBqq/59X5B8oXG8wmWPOButvnCe45fQd9Cz/jtHtTvCzZrOy1k3FXNOJOn11q+/Y3Q6sR6n7p8tn9c9Rx3LuBCv23fe5uDs/kpRFtMBo0/tp1klFfbjgnJ0yN0DtlhvdUnSmd3UrO9/bNOr+vXZgNO39W91e9Das/cFg9bV0UL/2l3N239WhIq/repGQXcuvUTWyKTK3RS329OpLsgmL6B+fTNHa2Wjjo5QonTAK8XOnUVX2nrg3JxMfWFn7stC08+NN2Rwd6BRxNzmaCrXzq/r4RVXYnMhg0u7Cz5uipknLbwDMdQkoQCss/gq+xkLdHteen+3rSroEPBSYLv9W0PFo/P/QKUZPgVgsmzYX7Tc9Qv8PgEuFeL/eswI1rNGjc1DWc2Q/34S7bMVfn9l6NMBo0tkSnMWHmHgqLzzA3nNgIVgtxWihJBPD4oOZ8ObZrzbo66+cCekdOnaocQgZDqfNO2z7NXAw/jFANWowu6jsNqpSydFnRnj/h/UY0TtvI9Ht64OJkYOXhU3y9pvoB1bVCzDo4vlydd90xGwKbq+O1ZlRdlcvpiLs7LoNx07ey7MBZdHy9EMTaMqX8GrM8QU2CVDs/SMfVq+IKg9K0vQEe2wr9n1X/7xMb4Ldb1Pnr0WVVP74Sqi0Ibdq0iYULHWeXf/rpJyIiIggODubBBx+ksLCWui8I1UfPENIdQrYSmAvqEAJw84FOt6n7+szSGbg4Gfj0ti54uBjZHJXGQ7/s4MeNMWyLSSs/98WU71CKdCZT10TinR3JGCebADX0rQvrMjA6w/D3YPQ0HhzcnueGt6Jfi0A63PS8Wr9/Nt6mND64uSOH3hpuDzjVGdk5jM4N/cgqKOaBn7Yr+2ZpQQhYnKMOsDd4H+X7u7uXmYGICPS0lzO8s+jQuZeQ6OVizn7E5iqH042lO1edib1kLKZkma1zk8G7vv3E4orK6lgNRgjrqu7HO14kdGvszyNXqZPgF//ax5GkbO6YtoV1x07j4WJk+j09HLo6OBkNPDG4BZ/c2hlno8bi/UkMnbKGT1YcZW98Bvf+sJWMPBOdwn1Z9tQAtr88lE9u7cz/9W/K/4a25LoA5bzp0b0aga/6gTk9uuRzWtkO3eisSuSMLnB0iZpxtIVYk5UAf4yF6cNg2pBys7hAhU1Pu0ldBJo9g9jy8nD2vn41V7WqQrgZ9LLKLclPg/mPVl2rnVsHDqELjdEJ+j4ND29QYnJRjpplO7lDuR7+ekgJ3E7uGDDzjPMspmtv8f6fqxg7bQtjpm4i++BSgrUMcoy+/O/Rx2hY2WxMA9tnPGF3he//FU0DeOfG9vi4OXEoMYvbvt1Mx9eXMfrrTXyy4hhpuUU0C/Lk+7u7s+H5QVzbMRSzxcoPG2K4/8ftjPhsHYMnrWHy8qPEpeXj7erE0Lb1eXpIS769qxsvX9uGK5r6YzRoHE7KZtLyowz8eDXd3lpOy5cX0/H1ZQyZvIZ7f9jGK/MPMGXFUR76ZQc931lJ3w/+ZeLcvSw/mFymhbfVaiV171IAdlqa4+apgjKBEiddWsmJZ6ivO32C1PnCspNqJvrWHg0rf/9qgn5BcOogFOXyf/2b2l0nz9hCqx2wO4TasfRAEpNt2UEThras/eYMhnN4Pj180jbeu/s0AWDhnsQqu1/9tCmGmdvj0TSYPKbzWWUi6V0I5z7Sh14NPfjI+CXXpymH3Kf51/DN2liyCooJ9XVjUOsK3EfhNodQyiEljpeDS/wmPrBOAeD34qu4+cfDPPjzdg4nZePv6cKbN7Sr3oADyn72AHugtFNgc5Y8PZAlT/WzdxV0oO0odXtwnv076+/pYt+2xhe0QHbScUiLpNhq4O+s5oT5uTPl1s44G0t9LsK6qrypvNMl7gvgf1e3ZFg75Qp+ad6+srP7e/5QxyOPQOY5jSAyJRdfD1c8RrwJQNCBH3izZRTueQng7KlmnHVR5dBCtd/LPa1CmwF6PagaKNgEIS35AOP7N+W3B3oR5O3K0eQcXp1ftrvsuXA4KYsHftxunwCyY5uBt58rAHS8Rd0emAfFtd+wxM6BeaqjqIvtvHr1u/b3aMaGaN62ObQfGtCM927qwMyHetMzQoU+P/Djtmo74hMy8vnBJu6+1uQQmsWkxMHSbtPyCFaCd1NLLGueHcjdvRtjNGgsO5hsd6BVRGaeiQd/2k5ekZk+zQKYeE31SkwGtFTnBmuPJIOeXRTk+Ng0l1ASCcRZM/Nu9zzuvKKxw2TojxtjKCquQbmf/l3ocDNMOET+/au53jKJjZb2jC4tQOvHWtv+vyaE+bnz3o0d7KLnXdO2OjqZbKVB60xt8HZ14pGrmpff5bAydIdQwi7HZh/VmVjUzzH1HKH4rWpC1s0PHloPN32ngr1LtygvyIQlL0BhFhycT8dwP94eqY4lk5YdYUMN3WTnxDpbLEDXu0q+y/Ua268fj85+zUHs/vdwMrd/u5m1R1N4bs7euitzqwxbppS5cT+2x6hjWrXzg84GF091Tv/Ebuj9mJpQTzmkriHKyY+qLtU+M3nzzTc5cKCkbGPfvn3cf//9DBkyhBdeeIG///6b9957r5JnEM4L+my/V32KzRZiUm0lY0EXMENIR699PPJPmfpVnYhAT163ncwtP5jMawsOcMs3m+jz/r98tzaqRBgqzIapA+DTTrCrbMvskxn5TF0bxQPGfzBiUfkKNenaUstomsYjVzXn5/t7Edahv+piZC5StZ5QrsPGxcnA1Lu6Ud/HleOncnj2t81Y9Vrs0E5sikzl+0QluHQ0H0CzlG/PfnJwC/w8nDmSnM30s6nLLo3N6RNdrHZmDw1o6niieib21vOlHULqM6l5hTD1rm58N647t1XVoUcvIyhnduCJwS1oH+ZDRp6JEZ+tY3dcBr7uzvz6QK8KZ7JGdQnj5/t7Uc/DWbUcXnGMG77YQEyqqo+fdncP3F2M+Lo7M6pLGBNHtOHxK4MJylL7OKdmAyofL5TUPpuLVHcBo4sqgauM+m1Luqtt/gqmtIM541U9sB76nJ9Wbsc+Ha98Jbg5+4UT4OVavZMRJ1e4aZq6yDi+okLR1o5+UPkvC0I6gS1g3HwVwF2UDT/fpDI4jixSs1j3LoJR32Bx9uQKwyFme35I+yAn6vu48mg9FSbp1X0s4YF+lb9O/XYqRyqvVCh4OdzRqzGrnx3IHb0aYdCw16OP6BDCRzd3ZMlT/Rncpj7BPm58ObYrP9zbg2Ht6tOugQ/+ni4YNDU7NeXWTmx9aQjfjevOk0NacHW7EB7o15Q/HuzNzpeH8uHNHenfMgijQSM1twiTWV1cerk60TrEm6Ft6zO6azitQ7zRNIhPz+f3rXGM/2k7nd5cRt8P/mXwpNUM/2QtHd9YxuYVal+33tyB125oV1IiWjprqxTNXDMASLQG4OJk4LGBteAO0vFpoNwNVgsk7kHTNF65ti3XdgzFZLZy7w9beXX+/pLZc1t22yFrI574fRdWK9zes2HlYnhdYHcIqfF2buhHp4Z+FJkt/L7FJk7EbYXvBsG+2faHzd4Rz6vz1b7t6SEt6RnhrwSOs8yBaR9o4E/Xd7jRuAEzRt5mPLO9xjK6azhTbu3EP0/0q1hI8woquaiJ21Z2feIe+P02nCyFbHHpyWumcWw/kc7qI2qf9ObIdgSUaoNdKaXLektTquV882DvirPiWlyt9gGpxx3cq2Nt2UULdp8kp7D843JFzJmpnCY7rS0Y3bsNy57uX7aTnpNriah5oqRtt6ZpvHFDe7xcndgVm+EgSG06kkDGEpXDF9fu//h4tSpvfXRgczzaXweN+qhOqX89pB7Qajg4u0PTAeqCIjsBYtbCr7eo8wDfRtDNlsd4hhDZq2kAv49X51trjqaQXEsBtcdP5XDHd1tYcSiZR3/byV+7SpXoJthcTQ1KCUJN+im3SEEGHF/O0eRsnv5zN0Mnr2F3ZSHyNWW37Ry03/9gyBvq/ur3SJh2G98vXA2o87Dnh7dC0zR83Z356b6edG3kR26RmVfnV5A/Vwqr1cprCw5QVGyhZ4Q/TbNtk2PluU3PJMgmvqdHU8/Fwhsj2/P7eOUamrMznl2x5QuvmfkmHvt9p/286IuxXXGq7HyvFPq5V2pCjBLLDM5lnC3v/nOIjWY1tuFeJaLsdR0bEOztyqnsQhbtq0Eovu382BrYEjSNJaeDOFwYQCN/D3o0qVeynU8D9bmwmku6R9aAMT0aMuPeHni7OrE1Jo3RX28kI88mCtku/jda2nF95wZnV05cr4maxLeYHM95q3IIQdnj6THV0ZTmQ1TJnsEAXWxty3fYMtjWf2J3/uuPG9OjIWO6h2OxwhO/7yIps/LvcHaBylyqkYB3Jgm7IGqVcgP1ecJhVWqXR7Cg0TJjPRM+/5WHf9nBV6uPM/6nHeSbzBg0yMgzMX19zNm//vkiRs8P6lrz/KBzwTcMhr0DT+9X+2iLqUQEPAuqLQjt3r2bwYMH23//448/6NWrF9999x0TJkzgs88+Y+bMiyvt/7LAXjIWSmxaHiazFXdnIw183St/3PkguA1EDFAn4d8NhC3flhv2N6Z7Q34ffwVPDGrO4NbB9s4z7/xziAEfreKzFUdJ+PlB20mbFeuCxzEd+sfhgPrB4sO4FWdxk5OtBlcviblYaGubdbPNJlREfR83vr2rOy5OBpKO7USzWrB4BGH2rM+bCw9y0NqYPKMPRpPKwyjvBN7Xw5mnbKUW7/xziIlz99W401ZRsYX5u0+ycI2ysEYVBxDk7cotVZVL6BlCGSdKxlaqjNHDRTkUKhWVoOTkN77sBYKLk4EpYzrj6mTAbLFS38eVWQ/1pkujemW2Lc0VTQNY//wgJo/pRL8WgRg01X1vxr09ys+2iNmgPrv+TVXIcVUYjI4zOX6NSzpsVEafJ2DMTypctbgA9s1UMzdh3UtyhjZ9WXGbeF1M8K3hxWpwaxiqZopZ/gpkVRLuroeb6//f/zrO7nD7H8q9UJABu2zlAddNVuU9nW/H8NA68KpPQ3MsC5stYMvT3ehRYNv/dL69Gq/hVnLirl/cVIC/pwvv3NiBtc8NZPGT/djx8lC+uqMbt3RvWOa7NLBVMFPv6s6iJ/qx85WhHH9nBH882Jsbu4RXeLLq6+HMmO4N+em+nmx/aQiLnujLxhcGcfit4ex/YxhLnurPd+O6M2lMJ5Y81Z+9r13ND/f24O7ejQnzc6eo2EJ8ej6RKbkcTsomt6CIKw3qYrFpr2u5vmOpcp4zLe42jNnqQiCJAB4e0KzqEs2aos8+ntwBqDKHyWM6cWOXMCxWFcY/aNJqpi3eDLkpWNG4b1EOhcUWBrYK4q2R7Ws+83u+CbFdmKccVuVBwD191Hf0ly22zjjbf1B/85z7Yc2HLN6bwHOzlfP0visjeHxQc8iIg6/7wPThkFuzkhZABdTHbwM3P4zj5vLy6x+zaeJgJo3pxI1dwsvPiyuNXjJxZufDzHglyBZmQeMr6fnMfFY+O4QPR3fklm7hPDe8VcV5duWhB0uXcQjZHA16yHdFuPnYQmpxONHuFeFP00BPcovMLNhdjQtaqxWS9pEy5zmuTVVif0iXEbwxsj2eFZWa6MHjpctYgRBfN3v3yw+WHCY+PY83/z7Igp8+xq8okRSrL0PXNScxs4BQXzdVhqppMNQmZJhsZSTtbI1fnFyh5TB1//exyonj7g93zQV3P7W8VKmifpxvHuxN98b1sFhh7s6KBe7qciI1lzumbSY1twgfNyd7V835u0+qshj9wr60Q8hgxNpO5Utt//tbHvhkFr57v+e2tK954aeVpGTXQtXC6eMqLF+zNWjo+xRc/TZWNBrEL2aFyzP83GghT/ULcdhfuDkbeX90R5yNGisOnWLJ/srLXf7cFsfyg8m4GA28MTwCTQ9dbzqw6jF6BavMLKvF/tnuGeFvL9t8fcEBBzdZgcnM1DWR9P9wFeuOncbN2cC347rhX9X3thRB3q60D/OhhcH2vw9o5pDfuTkqldk74tliUcc8Y6m8HBcnA+NsHb2+X1/9gPLiZCXK3j4/k5FfrGfSMvW3ju56RjdDTSspy7Xt/2tKvxZBzHmkDw183Yg6ncsHSw5DXhokq/LizZa23FJRWWxVaFqpHCFb2Vh+ujr3gJKJ1vLQBSF9n3bcJgi1uPr/27vzuKjq7g/gnzszDNuwCMgimyiKG4K7uO/7VqZmpuaWllnk05NaPdmuZf2yxbQ0MyuX1NRyzUTcTVFxX3EFFURllX3u748zd+4MDDDDKnDer5cvdbgz3IE7937v+Z7vOfI2LcbQ8XrrEBX8P7JY/lqiXPvsg6HN0MTLEQ/SszFlZRRS8mXfXE9Mxze7r2DEkkMI/WAXun8eiUb/244un+3BlJVRltfI0mUH3ajTH0vP5OmX4+VpRby6Kw0781oDAIYqD2L72Xv4bMcl5GlFDG/pgy9G0qqJZQeu6Sdy4pIy0Pv/9mL0D0eQW8rOfpZKzsjBkr0xeP3nvRB1Kzv2ZpewflBp2TrLdQnPbyrxy5gdEHr06BE8POQU4L1796J///76/7dp0wa3b98u8Y6wEjK4+Y65T9lB9WrbV+zBaOipJTRrk/MY2P5fYOUQuUCtgbD6rpjZJwg/vtAGR+b0wGfDm8Pb2RbxKVmI37MYdWK3IUdUYl9eMAQxD7lrxmPkO1+h/Se7MfibA/jz1B2MUkVCjWxaCuNbedlBJtXtTH/fPGRUKNmUEF9nLHimOZopaHZgf5o3nl16BBfupsDBRg2h3VTacM/HwO/jKHsqn3FhdTGjRyAEgTqbDVt00Oy2nlqtiGm/Hsdra6Jx/zZdYO/AHf8b1ERf86hQzrrMn6wUOf1f+n1bUn/Gmy4ESLxMF918Gng4UI2HFt5YP61DwRnVQthbq/B0Sx/8Mqkdjr3dC5FvdEODwp4r1b4KMCM7SCJdnIGiZ3UMCQIV85yyBxj/FxA8Auj3KTDpb6DbbMo8Sk+QZyXzM2g5b7G2L1LQIzdTLtCZn1Yrd3uQalnUBNYaYMw6Op8AQJsp1FVQ4lqfiskLCiD6VyqMmJdNMzOeZrZHr0ODGtyJNmtzn1p2aOzlaNH53NJzfy0bAU3drFDH2bbQz7uDjRW6B7nj/aHNcGBWd+x5oxs2vtwBa19sj5UT2+KfZ53gLKQD1k4YNnCI8eDcVEAoN1t/7Vr1xnCE9yrD7CCJiRsCa5USX44Kxaop7RDorsGD9GxE7o8EAFzTeuJuhgLNfZwsmimvUE5+lM2Rl62f6R0Q7KXvjDNv20WIhjdAez5GxvoXoRRzMaq1L/43qDGExw+BX5+m5RS3jwC/PgVkJFm2H7oaOOj8H6BeN8vfR/NR9PfZDcZ1Lg5+TRl0HsHA6NUQ1Hbwd7XHyDa+WDAixPLlGS6mg5H6JS75a56Yomt3j/Ob9Q8JgoDRuqzX4jpaAgD+eBFY0gm1z3yP2kIK0pRO8OsytujnSAGhmwcLTASNC6uLYG8npGbmoucXe7H84HWMUNL16+9ao2FvT1l9bw9sLH+mfdvKhe3VGrmFMSA/npMOWNnRedDN4DPp1oAyQLJTjYrvj2hNN8Trjt8uVdexO0kZeG7pv4hPyUJDDw32vNENz7bxhVYEXl8bjb/3RlLHM2tH/e/0akIq5m27gIkn6LrbOj0S+6xfx3tWKzFJtR1fZr2H2b/tLf2NonQdDuwFOHpBFEXsdXsWI/EpDuQ1hbWQi84JqyCsG1/g99TQw0HfTGXun+dMdksF6Mb7/b9oadcbfRuicfYZ+ow7+cnnz6IIgjzZcF/Ozn+zXxA01iqcik3GhhOxyM3TYs3RW+i2IBLztlP31oYeGvz0Qls09bCn1t///mD2j6Z7kDsCBd14xKCgdHauFm9vpMCJe7AukSDuuNFS+Ofa+cNapcDZuBS8s+lssbWO8h4/giqdrhnnsr1wKjYZsY8yIAjA0y1NjIWkwKG01LAEGno4YOGz1BVx9dHbuHqMOgpf1nqjljuVfCgxfR0hXaBMyg7SeNCSoMIY1kVLuaurfycAgXLCBhzrAA10Qd41z9FnxyuU/v84UT9Wt7FSYsnzFAg8E5eMCT8dQ3pWLkRRxKp/b6Hvl/vwxa7LOHbjEfK0IqxVCmhFoE5SFAIvLcWcRb9gcWSMeaUq7l+GqKtTNuVaF3y87QKGLTqEy/GpWLTnKg5efYAIge7jJnhcxYBgT6iVCszoEYjPRzTH0BBvNPJ0QGpmLpYduIaE1EyMWXoEVxLScPjaA6yNqpj4Q1xSBj746zw6zNuN+dsvIuXSXgiiFrnO9bA7js61FtcPKgvS8uYbB0q8bMzsEY+HhweuX6cDNjs7GydOnED79u31X09NTYWVVTl3d2LGtHn6Ar5w8JLrB1V0QWlDjnWAcX8CAz6ngcWN/VRAtwgqpQIj2/gi4o2u+K6HEu+paXZ+kXIMJub8FxF5obAVsrFU9RkcUq/iTFwyFNBiqu0eeoG2L1Zs7SBzeIXQ4CUrWa4LVIShod54sQH9/k5r6+KYbh3qa70awrb329RVQ2FFs5RLexQoDKxQCPhPnyCs1HUQuHgvFeOXHy1YEC8mgtJIDQYui/fGIOJiAqxVCnRwo8H58/27YEhIMcufAMqs0OhqL0h1hKTOd9Lj5rB3lZcRFFIIsFdjd/zf00ElrjPiqrEueuZalwqMepYEhAxqBplTEM6QIFBtgOHLgPbTKLtIaSVnux38qmBrUQBILkVASBCosC0AHF9hMviG+LM0YFBrgDqhln+PqszWGZj4NzBhO9D/s4JfD+gCdJtD/76mO/+EPmf++UcalBVRWLrCrRoJzPMB/phqtCymMIIgIMDNHi38aqFdPVd0aVgb9VJ0XYsCOlNtJkOmloyl3gUgAko17J09yycTRwoIxRacIe5Q3w3bX+uM94c0xQhvOtfeta2P/s088eP4NoVnbVQ2hYKWnQL65TvWKiVe1xXSX3fwHERdhsB2jxeRKyrwtGI/DjrMwbz6ZyFkpVABysTLdP6wc6Pr02/PmJxoMCknkwadgPFNiCX8O1FGZVaKXMw4M1m++e79PmDjVLLXNiTdTD9OpNeX6DOEzAhEBvWjpZ4J5+VAEoDhrXzMKy79+KG+6PH2vDaYmh2OuAlRxd/oe7em5Wpp8QWz6xQCPnkqGAoByMrVwsNOQKiKlm2PGTcVUe/0wqUP++uLjuv1/oDqvHQMp2u3JLAXYO1ESzlGrAB8Whs/T2kl14eR6m0BGNi8DmytlLh2Px0nS7hEKyE1E2OW/Yu4pAwEuNnj18nt4KqxxidPBeOZVrSk5Z/ddCOe7R6MhPRszN5wGn2+3Ifv913DntQ6uCRStomoUAF1OyPXtjYaK25hxp3Z+Gq76fHE4+zcAjWYMnPysC7qNhbsvIhl+69hQ9RNZB2nY/J+4AgcuJKIEUsOY/zyoziW6YMvvRYge+QaWoodE0GFfPOZ3j0QAW72SEjNwgd/nS+QhZGTp0X42mhk5OQhrJ4rJneqB8Tori31u5l/bdHVETI8h7s72ODVnnT+nb/9Ivou3IfZf5zBvZRM1HGywYJnmmP7a10Q5mtDgYM9H9NEbiElH/Ib2doXDXUZQvHWcjbxr0duIuZ+Otw0akwZ3IOuAdoc4Ipc9NbFXo1XdZntv/17Cz3/LxJrj91C9O0knI1LxrX7aUa/n0276GdyT3TBD1N6YPGYlpjZuyEWj2lpekxYygwhSdsAF31jgXMH6Vx1SNsUI1r7lO66JWUIxR6jcZ459YMA+byRfBu4qKvr690SsHcz3q7VePo7Wzcx3P8zeUxucD7xc7XDL5PawtFGheM3H2HSz8fwxrrTeGvjGWTnadEuwAUfDWuG/W92x8Xp3rjUaBnWqD/CLKs12Gz1FsIiRmDxwg9wNtZ4WaIoiriakIrISwnYHB2HCxs+hAARf+e1QqzKH7XsrHDhbgoGfXMAC3U1+7r0exaAAJsHF/DdYC9c/LAf/tOHlmEqFALCe9F1bvmB63h+2b+48eAxrHVLk7/cddni5buWiE/JxLubz6L7gkgsP3gd6dl5CPJwQD97yrjakd6gYuoHFcYlgMaXoha4+Fexm5ti9qhnwIABmD17Nj799FNs2rQJdnZ26Ny5s/7rp0+fRv36ZkSyWdlJT6Q1shAA+9q4mkAX6sCKLiidn0JBbej92lMdoEvbaEaxsMFjVhpwdResL/yFAZd3AmIO0LA/wkd/jdcAZGd0R+6vT8H5ThS21vo//Nt9DaweXIDrkbtUSE2qqv8kUShpBuDyDho8G6Y5F8Ivi06Kg/v2w537flAqQB0QBIG6aniFAGvH0mD28CKgz4cFXqNzg9rY9lpnDPr6AGIfZeD3qFi5i0L2Y2DN8zQL6FIPCOiMQzGJ+OJvqqfw4dBmCDpGAQLb2hZ8lmvVpSDQoxv0PvVZa8W3NDbi05Y6EcUek7u7GNr+JnX6GPWr6a+XRloCDfgBoG4xBRwNGWYIFXchN1eLsTRTl3SLZtBDRslfE0UgTrfm3JybGVMa9KGslviz1Jq065vGX5daSPuFGaWA1xhqu6Jbf3b+D83aX4ukG6jgEea/tr6w9En6XVZ2IDvlrr4FNk6voT9NhlLxcysLlh3HRNLfprJFpEBpxkO6ObZzMc5yK02R5aLUaQFAoPasaQkFMhatlAoqynwvCbgPdOrQDZ26lrD7V0XyaEot2+PP6tPEx7Tzh4ONFTas/w0KiIgV3fDSzW7oqqiFJfbfwz0nDtj8ErDVlmaLbWtRe+a8HGpbG3sMWD2aHivuM3/7CL2GxhNwt6xTmZ5CQdl3ez6m5Zkho6hWYHYaZe1Iy7RKy9oBsHenjMsHMXR9evyQAkSAeedQ21p0XF/9B9g5h5aWKq30xaX/PHWHCq8HuKBpHUf0aORuXJMoJgKAiLs29fBS0uvo2cgdQT5mZM9a2VBg5uZBqiPkFmj05WAfJ3w6vDlOxSZhZqNkKNbkUICvVgAEQYBaZeLc4lIPmP6viZ+TBpj8D5CXJWdI5ufRlJbLxJ8DGlFnMo21Cv2beeKPk3FYFxWLlsUs487vUXo2xi47iuuJ6fB2tsVvk9vB3YGWjioUAj4d3hw+tWzhvpeW2f162xWfL4jEY11R+16N3fFMK1/4e20BHlyE4NsWsHWGKv48sn/sh9DsGGQefQVDLn8AB0dHONuqEZ+SiRsP0pGYlg03jRo9G3mgR2N3XLqXipWHbyAxTS4g3E0RjeHqeDwUNei4UY1s0M/OWqXAmHb+CO/dAGobK+DBm8DuD+j4aNCbznE6NlZKfPJUMEYvPYINJ2KxKToOob7OaOihQeyjDFy7n464pAw42KjwxcgQyvCUzsnmLBeTmMgQAoAXOgRgzdHbeJR4F0EZ5/GSzQV0c7iNWj6NoLQZAiTnAesnyeMKADj6PTDoy2K/pa+LHdpq7gMZwF9xjpgMICUzB99E0E3yzN5BcLJX0zVl/xeUZRcsj9endw9ECz9n/G/TWcTcT8esDWeMXr+xlyNm9m4IT0cbRB07jOEqALWDEKa74e6PItShzB48ukHLYu1LfpM+u38j7LoQj8aZpwAF8K/YFO+Xtr6cexMKemcmA/dOm1c/CKCundLzjuqyuQJ7F9wusDfVKUq9K9dYdWtAY/XEK0ZB36Z1nLByUjtMXrYPs2On48FtR2wVwvFa32BM7VKPjsl/3gMOLIQ1REBhBdG/A7Q3DiJUEYPQlP/DZ4tjMS9gEsaF1cXVhDRsOhmHK7qu165IxhHrrYAA7HYbg+1jOsPOWolZ609jj6423DOtfDC4QzBwriUF8a7+A0XLcUZvqW9TDzTzdsTZuBRcjk+Du4M1Vk1pjykro3A9MR1LImPwRl8zsj4ttGz/NSzYeQlZuvpJ7eu54KVugejSwA05i2YDicDO9AbI0FZg/SBTmg6jycZzm4DWEy1+utkjsQ8//BAqlQpdu3bF0qVLsXTpUqjV8mz78uXL0adPnyJegZU5fSaGO6BUPRkZQoY8gyl7BwB2zNHXPDDy+CGwOAxY94IufTyNblaHfQcIAgRBgLWdI1TPrwPcgqB+fA+d/30R7e/oany0eJ5u4J5E0rIx6Qa7KLlZQDwFJOo264B5Twfjo2HBxoU5fVoD/T6hf1/4s9CCoO4ONpiua0+7KOKqXE/o6j8UDAKA6N8Qn5KJV1efhFYERrTywcjWPnL7eEtqxxjWEUqOlTNtzEnHNyRdoPJ1GgNAP5/oVXQj8sfkIrvPlYi0z57Blg0cXAwCZ+YuGSuO2g5o/xL9+8CXxnW4HsTQAEdhJS8psJQgUHctgNaW5+/CIf0siutsUlMplFSgO6ALtf60ZGmkvrD0A/qs5JeRBCzpRJ3gCiOKwOZXgE8DgHul7PAjLZN0DdQtGxFo0H5ipfmvkZVGAQrA9I282p4KPANy63kpy82phDUYzGHjKC9jKCTrEIBcL0uqk/KkM6znYmBISB3Mb08zpNHa+nDTqDF14ouwfeMc0Os9qguTm6FbErSefjaezSgIZO1I16kj3xX//aWb1fo9ShfQDBkNQKDv+yCGbkIBoN3Usg2U6juN6Y49qU6ds3/RSzMMdXsLUNnSNXTTS/pz8sROAVAqBMQlZWDjyTh8tPUC+i7chwU7Lxpcd2l53ZZ0+r291M2CyRZd2+78dYQkI1r74qNhwXB5pKuv49Om5D+72g0LDwYBcv2qeONzzjO6ZWNbTt2xqHZhckYOxi7/F5fiU+HhaI1VU9qhjrNxEFqpywp4ypMy4aOy6+Jxdh5CfZ2x4aUwLBvfBv2aecLG1Q9o2Meg5lETqMdvQpbSDu0VF9Dj4WocvPoAW8/cRdTNR/qgT2JaNtZG3cbUX47j/3ZdRmJaNuo42eC5dn4YElIHLzlRHZ9/VF2htLKBjZUCL3Soi31vdse7g5vAUerUFzaDAjKPH1B9vnzC6rviw2HNUM/NHnlaEcdvPsLqo7ex/0oi4pIyoFYp8Nnw5vT+U+5SxyAIli3HNJEhBFGEOvYwNrgswnGbl/Cd+ms8g11wS70I5YVNwPqJ1LglLooCn73eo+edWlNoB0Ajogg/LS3V+SPWHlcT0rAkMgaPHuegfm17GlMC8rLLK7sKjDcoW7ML3uwXhAbuGng728LD0RpqlQIX7qZgysooDF98CPVA10uP+iHm/TxsneUJu1IsGwOAWvZqfNCzNhoq4qAVBajrd9YHLktMoQB8datsbh02P0NIEOT3JWU6NjBx761UURfket2o8DBQaIMHgBoUrO2SiFBFDHoqT2Jvsx14qVt9CgYdW0ZjUYhA06eBV45CGP8nlP+5iNQWVNJinGoXjlxNwNRfjmPBzku4kpAGtUqBxl6OeNHjMqyEPCQ6NMYnMyairps93B1ssPyFNljwTHNM7VIPHwzVXdek4JZULNvordNKCACoZUeNZQLdNZjVj479pfuvmdXRLztXi4v3UrA5Og7f7L6CqwmFl9e4Ep+Kj7ddQFauFq38a2HV5HZY82IYujasDSHjEdSJdB0+o6ayARVeP8iQftnYfrmDsQXMzhByc3PDvn37kJycDI1GA6XSuNbAunXroNE8IYGImsKgfpAoiohJkFrOV0KHscJ0m01FcxMvAcd+pKUxhv5dQpkQ9rVp6UXjITSLnn/W2E5X5PDHPrqT4GUAusyZJ1WAVEfoMKWE5l9KYejGAUqp1XjIdXlMCexN6cmPbtDaYS/TtUuebeuLJXtjcDc5E2uO3sILHQOMaiDknduEiTFDkJgmopGnAz4Y2oyCc1J6qZMZRZUl+k5jN4A986g+jX9HyhCzhL6wtK7tt+ExcOOAvG8Zj6iW0sSdNItqKCcD2PMJXSCln785rkXS35bUDwLy1RCycMlYUdpMpgvw/Qt0s+2vSy+WUq79O9Dsd0k1GQZEfEi/sxMr5QBUXq68pt2Sn19No6lN9Z8sJRWWjj9DMzn5i5df+Is+1/fO0MDLVFZl5Hy56PWO2bQfJb0JlJYmNB5MNwMHvwJ2vUv7IdUuK87NQ3TucvIr/DPgFkhdjO5GU+A3RRcMK8myR0t4t6IZ87jjtPQnv9xsuZWxdMP7pMvX8cmQVxo95tW4I7YN6gx3R935sdPrVBPr7AZaBuplcFPl3RLo/ykFOvbMo2twUcHtqwYBodJw9gXqd6cA0x8v0rnIxknffrjMuNanGy5pmYRUHNrUTVRhfFoBo34BVj9Ly79sawH9P0OorzMOz+mBM7HJOHcnBUeuPcChmAdYtCcGf526i7HtfDHm/E7YAYjIa47W/rXQuq4F9SXqdgT2Qa4jVNjnXApy5V/qVZYKCUS2D3CFTy1bxD7KwM5z9zA0tPjP9Nm4ZISvPo7kxLvwsKuF3ya3g79rIePW3CzYPqCsl4H9BmCISwD6NvUofrmOd0tYD/oc2PwypriehX+3D/EoPQe1HawR4GYPn1q2OHcnBbvOx2Pf5ftwsrPCCx3qYkCwFxXuz0wGFlCge+TkWRjpFQJRFE1/X5UaGPwVsLwPFVwPGV1gwmZse3+Mbe+P2EePcUAXCPKtZYe6bvYIdNfIBZ2lpch1Qo0yjYql7zR2gyZ1YqOowOzdU6hluE29rlRP6u5p+iw8vEbjvef/oAyS0+uAhHOUsdfhlaK/Z1oClFnJ0EKBGK0XPtp6HodjqEj97P6N5Tpsns0pAJt0k27083VOU6sUeLlbIF7uUk8/7nuUno0f9l/DioM3kJGTh2a2dwARENwbm/8z8W5FwY+445S5VQoDHSiIckH0w/BOZtYMLI5/GHBlJ11DpTpu5kwsugbKS+HsXOVsqPyaPiUXjwfkjMgHV0xuXv/uVv2/Pa6sBo6H0flz+yx6sOdcoPNM+Qma2nAY+CFwaT08Hz/AR03vYuHt+qhfW4Nhod7oF+xJQdPfvgGSAbc2wwGDYIkgCAUb1zToDeydT2PyvJwCGavdg9yxagqdL7x1AeS+TT3Qtq4Ljt54iPnbL+L/RoYUWgNw57l7eH1ttD7LEADWHLuNba91hpNtwezYb/dchSgCvZt44IexrYw//9LS6dqN8OXgPli05yqmWRLwL2suAXRtv3uKxnCtJ1j0dItztZ2cnAoEgwDAxcXFKGOIVYBUXZcgBy/cT8tCSmYuFAJQt7ALa2WwdQZ66GZMIj8xjlpmpQL/6mYFByyg9e0+rQtfQuDkQxctW93lrUHvsr0JL2sezWiAm51afB2hi7oTcdCAom/urDVAfd1N4oXCb0itVUq80kOXJRQZg8yMdFq+BiBLaQ9lbgaaPNoDD0drLH6+FXUjSrpBT9Z4Fgy0FMVZlyF0LRI4tYr+3fsDy29SPZrRLGxWcsEL1qXt9HeDvjTTfTeabobzO/oDcOhrYOPUYot5GylJQWkAcPCkQJZ7k6I7Q1jK1lmeVTtjUJfAVEeJklCqgI662l6HvqEbYwC4d4rqetg4mV8omVmmqMLSus8oAArM5D+GT6+jwRJAy9Vu7Jc/G5YSRTkQKi1NkI65m4dM15cyRbqBKarehRQ8kM5zUnaUpZ3yLJWv0xi0WuDwd5RSDdDkgjaHMmQsCYJXJumGKCWu4O9IlwnVqkMvORgksdZQXQkvEzPsIaMp4y03A9g6k46NnExg11zKWJO64qXG67vsoL4Fy1kKIxVtl5artBxvftaOuaQszocxFPC+qOtsJhVSNleD3rSUEgJdZw4uBEAZuT0be+DVng2wakp7fD+2FbycbHDr4WNs3LETdjkPkC5a47i2ob7AsNl82lI2aEqcXKPPFH1AqI1lr28JKRD5MMaoELhCIWB4S8oE+WzHJRy5VnjXujytiMWRMRj+3X68njwPx2xexr/a0Qj8uQVN9pnK5Is/S59RWxcM7Nwe/ZpZUHMsqD8gKGGffBlP+edgYqcADA6pg2beTnC2U6NjoBveG9IUEW90w8aXO2JoqLfcxfHiVlpC5xakvxYW+X392gGtdDdgf75aMPNWx6eWHZ5t64f/9AnCyDa+aBvgYtzdK6aEAVdNbQoOQAR+Hgzsfp/GnSoboNULwMv/AtOPUPC32XCq0zXjBP15+TBliAkC0E6X1X/0h+LHUIkUTM9x9EMW1Ii8dB9ZuVq0reuCXo0NMmelRhpA4W2xD30DfOxB2UmgrJxZ/Rph/6zueHdQE7S21zUrkWpZmUNfR6h0GUIAIOgy/b1b9EGXhrVL/XoAAD/d0vRbR+QMRnNKDxhmpgf2Mn/ZtT6zqGCGENIS5GOvha7g/bY3gLXPA9pcKs0hZZYbUlnrsj2B0co9+PetXlg1pT1GtvGlYFBWqjzOaDSo+H2s04LG+FkpwG0TqwVAWWXeBtmEgiDg7YF0Xfzz1B10mB+B+dsv4lq+xjqJaVmYteE0HmfnwcFGhdb+teDpaIO4pAzM3Vww2zrmfhr+OkVdJF/r2aDg51+6b6jbGS38amHZ+DalKzReFqQsoXMbLX7qE9hGg5lNyhDSeCAmgS4+vi52xXeGqmgtx9EFNTOZTjDSUqeon6jNomsgzUqaw70Rpbg3Gw70+ajcdrlMKJRyl6Yb+wrfTquV2+82Glj860ot7Qu7sOqMaOULn1q2uJ+ahVWrVgDZabgHN3yVSd9jquMR7AzvggA33QBcWoZlaatxwwwhUUsX/pLMVCpV8g2c4YVAFOWb3tYTgeFLAQjA8Z/kGzuAfo5RP9G/U+Iovd8cD69TlppCVXTtGFMEAZi0C5h2oOzr7Ui1ac7+QQGb7HR5RqK0ASEACHmOMtJS4uSlItd1yxv9O9Lxy8qeNJuXv/V8TqY8IFOq6Ubo9Fr567ePykvJOrxK7Y8BWqIgLce9/DewvD+wYhAtB9j5tslMEgCUOZN2j24YpC6NtepShycxzzg4VRQpy6ioeheNdDff1/fRTGhpCqNbwrCwqCgCh76iOh/rxlNwTfrZeDSt/HpO5rJxkrNIpbpnAC01SYmjLnimgj5FEQRg0EIqYhwTQVmW33ehoMe9M8CGKZR9KQ3svUIKFjEtiaCBVAcQoP1uO6X0r5mfYZe7W4eolpWti7wcyxLBz9ANNUA1Y6TzpYG+TT2xa2ZXvNqzAV6sQ8tAbju1xv+GtUBPw5tkc6jt5GuidO7PL+UuFZgVFGbVKiwxjTtlcovaAnVqxrT3g7ezLeKSMvDsD0fw9sYzSM1XPJk6iR3BpzsuYpbwCwYqDa7x6fcpE3bNmIJBTuk86d3S8s+onYt8Tbc0cK4rBI7gZ8z/vr3eo+WxD2MooG8prbZgkN4SUuc4OzcKePb7FHj9PGUvuZsIpAgCfT4Ms42DR9JnMukmcHlnwefkZlNjks2v6BvGWHs2RgeDQrpzBjQqePMs3ahe3knnEkOPbgIRH1FntT9nALfkOlduGmtMbO0Kq7Q7ugcamvGD0Ml//s8vOc50KQtTdEvpnZuUYf3KOi3o+vs4kTJoATMzhAwDQhZkPhm2rDcsRQBQ9qiopWL2g7+m4E1eNmXke4UCQ78t/HPQUlfA+spOIOWO8deu6mqTudQ3L5inUMqZ0dIEqBlCfJ3x3uAmcLFXIyE1C0v2xqD3l/uw/ri8NP+9P88h6XEOGns54sT/emP9Sx3w3fMtoVQI2BR9B5uj44xec1HEVWhFqlXWzDtfk4PcbDnoUhbj8bIiZd/d2E+1syzAAaGqTL9kTO4wFvik1A8ypFACA7+gG+5zG4HIeXTzc3gRfb1juGU3n3VaAM8sN2pz+cSSlt0UNpgDgLsnKdtLrTGvbkvDfjRreP+iUdeT/NQqBV7tQSmiTjco4LQttzUirHtChAKBGafhnGHQqlFqJ+tsaUDIYHuFitJKS0pfR+iY/Ni9M7TERGVL6c6BveS01d3vyxf0a3vkddgADVoKk3afbnxijwPn/qDHvFvTLLqlBKF8gicBXShbKzOJLozX99EF2tm/5AWlDVnZyDUDIudTQE+qdyXVv2Jlz0sXELobbTxIvb4PyHlMQZLub9FjER9RjZ5D3wA/D6GBVdBA+r11ep1u0B5cpdncf96nDlK3DtHv8ewG4PC3tMwlJ7Pgfkg3Hn5hxhmBUlD6wpbi30viVfPqXbgF0ky7NpeWPUpLxso7K8e9KQU5MpOoA1CEwSTC5ulyVysp+6Gq8NDVejEM9kl1Mmo3Ltl5zLU+0PW/9O99n9Hsv707/Xlwha7bJc1eKIyVjdyCvtGgopdLl5RhhtB53SRKowFFL+EuStsXKZguaoENkyhrKh+NtQozezfEMA3Vc2nU6SmMbe9fsq5EUuDqpuk6QvrsKvcmpVtGbA79sjHj2XR3BxtsD++M59rR7++3f2+h82d7MH/7RcQlZeCvU3fQb+E+/Hv9Iaaqd2CiShdsHv4j8N8YYOo+ullNvUNLFw3Pi3G6gFBhy2KKE0QFsPWTbuZIuw9c083+Nxtu/vNsnan+JUB1V0zUQUHqPQp6/DRAHnNJ4s9ScMzKnpZ1WWrodxQA+u9Var7RfprlxZTVdnKHKqmul6HNLwN/vUrLlqWslqD+mNm7IayUAka08kELU8XFvVsCjj609F86j0j+fodKDSjVNMZZ+7xxjb1EXca4xsOyZXQezXQ1+xIL/qxvHAQWNqPfQyHZXHrJcfReBYW8fL8sqNQ07pSoHXRZXsXQlyoQLOv06OxP9w65mfI1WCJNPjUfRRlHTy2hiSK3hsCzq4puMlG7IY0jRK18TZVIWcGNBpofWNXXETJzUlfnhY4BODKnJ5Y83xKdAt2QpxXx3/WnsOboLfxzPh5bTt+FQgA+G95cnwnY0q8WZuhWU7yz6SxiH1H2443EdGzSBYikbnhGLu+gmmEaz7K7HpYFl3o07hG1cva2mTggVJUZ1BC6qq8f9AQGhAC6uEldC/Z+SjO0affo5kcaEFZH0jrym4cLn4mQTpgNelP6ZXFsneXW6Bc2F7npUy290b+xC/qqaFDVZsAEbH5rBIRA3QksepW8cUkKSgPUyUCpS3luNaH4drpF8dENggwDQtLMXv0e8kWp00y6cD68BpxaTY9FLddtp7tAXt5Bs6f55WQCP/YGfnkKWNaDZnoBy9rNVwSFUu7IcXqtXD+oQe+yy2YIGU3Bn9wMYMvrdJwCXD+oPBVWWPqy7jhv2A9o9xIFS1LigK9DdQPmDPoMPP0DHRvWDkD3t+k5O98CDvwf/bvNZLrR6vMxDVaSbpke2EsBofxLfxrr0rpjIoyWhhQgisBWXQp5YM/iB+rS6174y6CodDlnCKnUcrbMppcoINX0Kd3sZ5ac8l1VCkpLTN2YS8viSpMl0uE1OTjW9GnqSDXka/r/oW/ka1VZDoB7/o+O1UELy+41Dem73D0Czq6nf5ubkWyKINAEl3sTagm/YZLppTWZKdSRDZAzN0qiri4gdHmH6WYKFVE/SCJ1STyzvsCXHG2s8MlTwVg1pR0C3OyR9DgHS/bGoNOnEZix+iRSMnMx1f08Zit09c96f0DXN3s3+ow+8xONIy7voNqSEilDqE4Jj2updtjNQ3Kh5PQHFHQ4sNB05sj5TZQlWael5eOZ+t2BdrpamZunyxlPuVn0/b5pRXX7bh6kWoi5WfR1UaQgEkC/c3PGgvkpVXROLe34oM1kCn5ci6RJM8n9S/LvvsOrwKjfgJkXgVbj0bquC06+2wefDi9kqbkgyNntBvUscS2Sst0FJfDCNjr/pCcAa56Trz/3dYWyLZ0EtrKRz2eGXdQACnCLWmpisn4iLSctjDRR5hVKGZplyTDA5BJg3u/Oszlly/eaa1mATKmSM5CkIBtAE8t3TtLvoNnT9Ji1A9XpfPlf867TUpbQiV/k7KPcbMpaBixbohvYE4BAy5NNjeGLoFYp0K+ZF1ZObItxYf4QRWD2H2fw+u/RAIApnesh2Mf4d/hK90C09HNGamYu+i3cjxeWHcSp5TMwTbEZ3YNqo7mPc8FvJAW+QkaVfHKhvEhjOg4I1SAGNYTkDmNPUP2g/FqOk9egSssROsygQXt15d6U0tNz0gsuEZHo6weZsVxMIp1ci6gjBFBr5cUd0qAR0wGNJ4Lb94G1SkkFvAEKpkiDWWmwaWmGkEJJaca1GwFdZ1n23PykGggJF+QLiXSjHGTQZNRaQ0EhgFq0P7wuB476fqybrcgDon8t+D2OLKJMIit7mrGysqcUa0vah1cUKVh6aYdc+6Is01MFgQK1SjUFAHLS6Xh1r2I3yFWJVFgaoCwhQLcsUndODBpA20i119LvA9ZOwJBvqYaaYfZHi7Fy62+1hjInB35BN1odXgF66pYt7PvCOH04L0fOWsyf2ePRjM4BuRlAzO7C30f0b5TVpLKhGnDFkeoHXNlFy3aA8l8yBsjLBsQ8el+Dv6KgmmGNrKK6Kz2JTBX41QeEWpX8dVVqYOIOYNpBYMRPdLMR1J/OQ6KW6uFZ2ctLDMuCtQMdq6VoC10ktZ3c5S7jEc3CW1orztRrjviZPnM39tONfWxUvoy/vRSAdA0sXQfKul3ofJzxCPj16YLdY2J1N7rlWT9I0noCBbOv7y2yvsc/M7ti6bjW6BjoClGkOrLh3fwxO+8HCBAp4NDhVeMnejWnwCAA/P0/alMevVoOBpQ0Q8ilHp1vxTw542D7f2ns9M9cYM/HBZ9juFysJHq9R5kVafHAd2HAgkDgIw/6ftlp9Bm1rUVjQqlgb+R84IQuq7kELaPLlLOfPPbYMUu+wd/3OQCRzuV9PqQgv6OX/mkaa1XRHZakOkKXttNy/8wUYLuuFmSbSYBvG8pGsXOl+kfbdBmL0hJF6bppCT9dwOXIYvl9xEbR51ahouvX5R00uVFI595y7bzqly8gZA6FgsZtpmr6FMdVKixtUEdIqlMZ2Mt4KbAgmF+fqMlQGqck3ZQnWm7sp5qgGg/jTKji2LvJn/el3YEvGgELGlDTg/y/I63W5GS7QiHg/SFNMbkT/UxTM3NR19UO4b0KLjlUKRVYOKoFvJ1tkZGVhRE338PQxxvwptVavNXERLeu1Hg5+y/0efPfV0WRlpvGRBZ+TJvAAaGqTJ8h5IFr9ynlMfBJzRCS9HhXXkts50pBoupMoZBn+I4slmeDJA9i6GKnUFnWBaHRIJrBuXuq6GKTAM12AXTxlk7uQQNppiMlTs48KWmGEAAMW0SzyZpSFttz8NAdEyKwfgIFhe6cBCAADfsab9tmEmUnJd8GfhtBAz7/jlR0VT9bsdJ4rXTqPWC/LpNi0JfAzHPA23coxboslmGVNc9gGgTlZVFGndK67JdzuTUAOr8h/79uJ/MHAaxkpMLS0mfv7ilaLmFlL2cVBo+g5SktnqfPVsuxBWcPlSpg5Eog7BXgxciCyxtCRtMxlJUsF6MGaECcnUbnYI98wRBBMAg4F7JsLC2B6hMBQLc55hX3r9OCArC5uvoRak3Zz7aaIgVIFCoKmNk4UeHi0WsoC0vjWQUzhKROY+cpoK/VyktrShMQAihAk7/jWr/5tHQMoOOzJNkLlckwy6NhH8uaJhSmdkMKLkIALm4BlvUEfuhGRcvvnJTrr5QmOwigIN3z66mL34OrdK3L0hVLzcuVC+ZWREDI2U9fQBb7Cg8CKxUCejfxwG+T2yPiP13x9+tdEe57BUJ6At0c9ptvOhOi7RQa22hzKHN30zQKRDp4GQUeLCZNJl3aRue0sxsACPL72Pe5vG3SLapnBIGy5ErCylaXyWlF1+30+wBEOtcMWwxM+gd4ehn0tRDXjJHPz/0XGE9+VZaec+l6FHuMAgYPYuQMuy5vFP3cwvi0pSWcWSm0SuCzAAr42brQdQSg8efIlQAEmtC7uFXuBFmSMhEdX6VrTewxOatDGgM2H0XZtIKCxop/vQokXCz4GlKtsPLInPZtS98fMK+gdGm55Ws9L4oGy8VGlvx11XZAc92k6uZXKGBi2CzH0jGlVAsn9S79SU+gz8jWmXS9E0WqA7igPhWkN5HhJRWb/k/vhgh01+CLkaHUQMcEP1c77P1PZxxvusGovlmD058XDKqcXkP3Gz5t6TrwpPEPo0nelFh5WacZeNRfVWnz6AMC4LF1bcQl0SC7ntsTHhCS1qZ2ewsYsaLsO4o8iUJ02Tjn/qAT10ODOjfSCbNuZ1oKZi57N7m2gFRI2ZTcbPl7SLMzAA2IW71A/474iE6m+hpC5VDHwRIDvqCZmOw0YLVulsqnDRW1NGRlC3TRzSBJXcmkmTX9bMUt47TJiA91M3StjTOCntSCsoJgfJEO6EwX3rLWKVyeOSqL7kGsaNKSlRMrqdOifllkd/lmVaGgzJuhi4q+GXJrQFlxpgKaCoU86x61XE4Tl5aLBXQ1PVCTsnkuby84+yaKNKudmURZNmHFtCaWCIJx0XzHMljaYI7Ggyio9vRS42U1Tt7A9KPAqyeLro/wJHIJoJpquRlUtPNhDAX9VLZyF7KyZOdCxfzdmwLtXyr71y9vhgFLS7uLFSX4GWBKBAVJlNaU8bdzDgWGTuqWRpU2IAQAjnWAsX/QjfOdE7ScJjOZ2oPnZlCQ07WCJjQ6vU43sFf+Nt0pMZ96tTU0UXl8BT3QYmzhDRgEgcaH3d+m63NAFwpYlzQAIZHqCF39h24oAbrm9dYtF4/4kDJVYiLonAxQ4LM0Qag6LYBp+4ExG6jpxBtXgJkXKDtboQAa9AK66bJjLuoC7z3ekbt8VTZHL/nnvmsu/YxELXV6LWm2lkIBjP+TOpw6+1EGHUDLRg2XPtXtRIEcgDq2SceZJR3G9O+jjvxz/mcuLYu/tBWAQPVLGw8CBugCgidWAt+1o4L6x1dQ4OHRDSBZ13TEt73l37841g7ysubSlFswl77TmG4scH0vjZPVGvlzUlIdw+n3mhIL/PaM/Fkyp7tYfu2n05K1Cdupxlj/BQAEGsf8MYWyMv+YTNnGd07In6F8BEHAjJ4N8M/Mrmjlb6K2lUQUodoaDueYTfS7Hvw1BUTjjsuT6rrtcFIXWGwxxvL3VRHUBlm8JhofFIYDQlVV+n06OQsKXEih2TpXezVq2VeB5VdWtkC3WeWTfvkkajQAeG4dpQjfjQa+7wpEfExpqFKnMHO6i+UntYY8uBA49K3pbc5tpFRzjafc4lLSMZyCJvFnqctUXjatIXb0sXxfypJKDYz8hYrQirrsHqkOQH4txspL3KTOGgAFTUJ0waQ9H9Ns7a0j8om83/yqkwVjGLiypKOEJVTWdMMx4HP5uGLlp2Ffuf7P9lnAsaX079IOyEyp15XqEmlzgV+epo5R0oxgYYWgfdvS5ykzGdj+JqX3x50A9i4AFrWj4LagBIZ8Y9n6+cYGA0OnCjrPWNlSUE2qjWBIbVc+AdbyplDKv7tVIymbEqAbi7Ludiip1w14+VDVDBhLN1oqm7I/h3q3pCDGzPPU+bRBH7quAlT0vSTdzExxawCMWU83Kdf3Akt7yDUAvVtX3PXMtb58Tdr/edHbSh7E6ILQQvFZ4dYOQNc3geHLgPF/AS8doCVmpeHdijLcslJoGZdrA6DrbApMdNMV8P93MdUVlDKfSrpczJB7Ywr8eAbThFb+31GXN+ncDNASus6lDHyVtbDplLWSdk/uqNT1zdK9ppMPBeJeO03B1Od+p9qT+XV/mwLQjxP1k98lCggBVNOpdmOq2/er7jrQeJCc3dFmEu1Hw/4UDLh7ijqo/dRfbk5S0qYj5ug7jyYzLSlgXlKGS8ZEke5FAApql/Za6OwLvHQYaP8yAIEy/awdS3avp1QBfu2pS6BXCAVKhy+j38/Z9XTvpFDJS9EM646VxNkNlJEmKCmTuNV4KmkCULaiNDEWG0UNF1S2Jc8grAjS+KCohkb5VJE7IlaArn5Qrp07XllzGgDQws+5EneIFalhH2DqfkoxzEqmLi4/D5YLQpYkRbj5SLlmz99vFwwKiSLVywGAtpML3rjZucizMNI6eifvJ6NAmq0zMOZ3GlArVEDjoaa3U6kpO0JQ0Iyf4VKGVhMoZTvuON00Le8LQKTBrG8FpNeXFWdfqtFkX9s4y6vMv48fpeyX1w0lM9blv0CbKQBEGqhCKL/2pb0/pCyC5FuU/i914yssIKRQysda1HJK71/aHdjzEQ2GlNZUQ6JOqGX74deBshyA8i8oXd0N+06X5SlQJ0ag9MvFqquALgAEOveX102dvRvdQIxZB8y6TjdG0w6WbcDRpxXwwhaatHlwVb4JqojlYoY6zQQgUB0eaclaUaRMgcCeJVuSXloKhcGkkkAttKVMzK5v0vKtZsOBWnXpsfK+1hru17OrKUuxz4dPXqayyprGV5J63cuueLkg0PmqYV/T71tlDTz9PY3hAPqdlLTOmNKKausB1MkTkGtQShr2BZ5bA/znMgV21RoqCi81ayjPRhv+YVTCoCJWTEiZxMm3KcgXe5SCG6XNwpNYa4B+84DJ/1Bpij4flV2d2OBnqMaUWkPBwikR1E1PoQJuHTYrY9GkvBz5Hqjrm/Jnv8MrdNw9vEYT7yd/o5paAG1j41jad1R+pDpChXWnNEEQRQsqDlUDKSkpcHJyQnJyMhwdn+BfZnEubQdWP4vzqIcBmR8h0F2DXye1g6dTGayNZ+UnL4dm52P2UOQ27R51bBm7sWSvJ4rULWHvp/T//p8B7abSv28eBn7qR7Oir583fTHNTge+CpVnYOp2pgHnkyItgYpoejQpervcLFozm39gERtFhbMv76QLoNoBmH6k4rITyoooPnmDRVZ62jzqcHJ+EwVLJm4vv++Vdp8Gf/cvUlcRz2Aa8BQmK5WKq96JBu6dBh5coxvS4BGU0VjS+j9/vUbp+H3nAWEvl+w1mCz2OBVEvXsaGLep8CBfTZcaT1m61aGJRdp9ygqTuh+NWW9ZDcKy8Ps4uVuUexMax7SeWHDZS2428GUTymof9WvZLtmzRNxxYMVgCtp1n1P4do8f0liivAKHVY0oUo2jq7uoC1hFT6Yd+BL45z3KpHpubele64+pVP+lXjdgXNEdepF0i5arSSUHxv9VPVY1iCLwaV1a8m3nSpNRnV6nQuhVRU4mBQylMfGGyTRWCRlN2ZoAfY6v/E0Nau5fpHudjq+ZPk9G/QRsCaes6NeiKUtRcnQpsC1fsExhRUvZnuSJZW0e8FkAUpKT4DQ/1ayYBweEqqK7p5G27X/Q3I7ErryWWFj7A6yc2BaumipW6LGmE0Ug5Q7NLJa2SOeeTygoJCjpQhfQmdqqXviLCixLrYNNMTzhtXielldUN6JIhQnV9pRxw9iTIjebsnb8O5auG1FVkZVGxV0bDymb4r6Mal1kJlnWgphVbXm5tLzp4TW6Zld0oCvpFvDHi7QUG7rbCFsX6lJnWPz33EZg3Qu0bP31s5WbgcoTKyWTl0MTBJVxfhFF4OpuKnTv4Fm618pKpSVgzYabVx9KFCnQkHybMoqqy7GztCcQp+tOaO1EQZCqfO2IPQ4s60GBmtfPURbz+om6Yu75tJ5E2XhSNlZOBvB1S2rq0W9+wRp5eTnAD92B+DOAVygFlJoMrRqdSdc+j5ToPzkgVJgqHRB6EEOzq9KsEIDlmhcxfPoncLLlZR41migCG6fR7Id9barBs2IA1eB5+V/AvYi117nZwLetqctYj3fkQs2MMcYYY4V5/JDqAx1cSLVXHH2ASTspCzfjEfDbSMpM7PJfGl8wxirXxmmUOQ9UnzH/sl5UgsOnDWUDilrqaFevG9XySrwMHP2BtnWpT0vDGg2kbOW/36Hz1ozjpiepcrMocGRJ458nwbEfkfLH62YHhJ6AYiHMLKIIbJwKxB6DFkr8ldcO66yGYOG0iRwMYjRzMehLIP4cRbJXDqETYv0eRQeDAJpZHP4jFVUMfb5i9pcxxhhjVZudCxVsD+hKdfoeXKHi9c1HAIe+ocL0CituVsDYk0LqNGZfG2hXBbtGmtJuGgWEpLqsIc9R3SjDGm5BA4BNL1NXzo1TqXaSoCul3G124RnLKuvSr+KoDBYuH+ei0lXFxa0UDFLZolf253gt5xWMHjYMbrxMjEnUdsColVTbIy+bHmtvZo0O3zZUWb80bVYZY4wxVvPYu1ItREdvWrIR8REFg9ybUKHtyigmzRgrKORZCuAOXVR9amU1GUqd5JTWwOCvqOlC/oL+9btTp8xub1GWUG4GkJNOnddCRlfOfpcnl3oW1UvlJWNVQV4usLgDkHgJq9Qj8FbKUxjU3AvfPteysveMPYku7wRWP0tV+Kfuqzrt1RljjDFWdd2/BKwcSoWZu79FRegVysreK8ZYdZeVBkA0LgpdGFGkDokxEUCTIcZ1z6qRlHWvwGnkIq4hZEpVDAjlRq2EassMpAgO6JjxJaw1zvj79a5wsa8G3TJY+Xh4jTqq2Naq7D1hjDHGWE2Rl0tBoOpShJcxxqqglORkODk7mxXz4NSBJ9yeMzfxYMt7AICvs4cg18oBn48I4WAQK5pLPQ4GMcYYY6xiKVUcDGKMscpmwXmYi0o/aXKzqer5zYPIyHiM2teuwkN4gHtwg1uP6Tgc1gDOdhwMYowxxhhjjDHGWMlxQOhJIYrAxS3ArndpuQ8AWwDNdME91yEfYFrLppW3f4wxxhhjjDHGGKs2OCD0JMhMAdaNp+JWAGDvjpN1nsXq8xnQKq3xxlOd4Rnat3L3kTHGGGOMMcYYY9XGE1FDaNGiRahbty5sbGzQrl07HD16tMjt161bh0aNGsHGxgbBwcHYtm1bBe1pOXj8EPhlGAWDVDZA5zdw+/mDeO5iR/ye1x3N+k6GZ4t+vB6bMcYYY4wxxhhjZabSA0Jr167FzJkzMXfuXJw4cQIhISHo27cvEhISTG5/6NAhjB49GpMmTcLJkycxbNgwDBs2DGfPnq3gPS8D6YnAz0OAuOOArQvEiTsQ6TMV41edR0ZOHtoFuGBcWN3K3kvGGGOMMcYYY4xVM5Xedr5du3Zo06YNvv32WwCAVquFr68vZsyYgdmzZxfYftSoUUhPT8eWLVv0j7Vv3x6hoaFYsmRJsd9Paju/b903cLRVA2IeFGIeBG0uBFELoHx/HIKYB5uMe7BLuwXnh6dgk5mALGs3HOr0I5ZdssHBqw8AAC72amx6uSP8XO3KdX8YY4wxxhhjjDFWPUgxD3PazldqDaHs7GwcP34cc+bM0T+mUCjQq1cvHD582ORzDh8+jJkzZxo91rdvX2zatMnk9llZWcjKytL/PyUlBQAQcvwtOFpX/jKsO6ILnk+ZjWtb0wGkQ61UYFyYP6Z3D0Qtbi3PGGOMMcYYY4yxclCpAaHExETk5eXBw8PD6HEPDw9cvHjR5HPu3btncvt79+6Z3H7evHl4//33Czx+3qop7NRqaKFEnqCEFgpooYAIy4NElj7nkdIVd5VeuKfyxkWrJnBV2MEVQP3aGkzvHghfF84KYowxxhhjjDHGWPmp9l3G5syZY5RRlJKSAl9fXzR5Y2ex6VOMMcYYY4wxxhhj1VGlBoTc3NygVCoRHx9v9Hh8fDw8PT1NPsfT09Oi7a2trWFtbV02O8wYY4wxxhhjjDFWDVRqlzG1Wo1WrVph9+7d+se0Wi12796NsLAwk88JCwsz2h4Adu3aVej2jDHGGGOMMcYYY8xYpS8ZmzlzJsaPH4/WrVujbdu2WLhwIdLT0zFhwgQAwLhx4+Dt7Y158+YBAF577TV07doVX3zxBQYOHIg1a9YgKioKP/zwQ2W+DcYYY4wxxhhjjLEqo9IDQqNGjcL9+/fx7rvv4t69ewgNDcWOHTv0haNv3boFhUJOZOrQoQNWrVqFd955B2+99RYaNGiATZs2oVmzZpX1FhhjjDHGGGOMMcaqFEEURbGyd6IiJScnw9nZGbdv3+ai0owxxhhjjDHGGKs2pEZaSUlJcHJyKnLbSs8QqmgPHjwAAPj6+lbynjDGGGOMMcYYY4yVvdTUVA4I5efi4gKAlqIV98NhrKxIUVrOTGMVhY85VtH4mGMVjY85Vhn4uGMVjY85ZilRFJGamoo6deoUu22NCwhJ9YicnJz4A8UqnKOjIx93rELxMccqGh9zrKLxMccqAx93rKLxMccsYW7yS6W2nWeMMcYYY4wxxhhjFY8DQowxxhhjjDHGGGM1TI0LCFlbW2Pu3Lmwtrau7F1hNQgfd6yi8THHKhofc6yi8THHKgMfd6yi8THHylONazvPGGOMMcYYY4wxVtPVuAwhxhhjjDHGGGOMsZqOA0KMMcYYY4wxxhhjNQwHhBhjjDHGGGOMMcZqGA4IMcYYY4wxxhhjjNUw1TIgtGjRItStWxc2NjZo164djh49WuT269atQ6NGjWBjY4Pg4GBs27atgvaUVReWHHMrVqyAIAhGf2xsbCpwb1lVt2/fPgwePBh16tSBIAjYtGlTsc+JjIxEy5YtYW1tjcDAQKxYsaLc95NVL5Yed5GRkQXOdYIg4N69exWzw6xKmzdvHtq0aQMHBwe4u7tj2LBhuHTpUrHP4zEdK42SHHc8rmOlsXjxYjRv3hyOjo5wdHREWFgYtm/fXuRz+DzHylK1CwitXbsWM2fOxNy5c3HixAmEhISgb9++SEhIMLn9oUOHMHr0aEyaNAknT57EsGHDMGzYMJw9e7aC95xVVZYecwDg6OiIu3fv6v/cvHmzAveYVXXp6ekICQnBokWLzNr++vXrGDhwILp3747o6GiEh4dj8uTJ2LlzZznvKatOLD3uJJcuXTI637m7u5fTHrLqZO/evZg+fTqOHDmCXbt2IScnB3369EF6enqhz+ExHSutkhx3AI/rWMn5+Phg/vz5OH78OKKiotCjRw8MHToU586dM7k9n+dYWat2befbtWuHNm3a4NtvvwUAaLVa+Pr6YsaMGZg9e3aB7UeNGoX09HRs2bJF/1j79u0RGhqKJUuWVNh+s6rL0mNuxYoVCA8PR1JSUgXvKauOBEHAxo0bMWzYsEK3mTVrFrZu3Wo0WHj22WeRlJSEHTt2VMBesurGnOMuMjIS3bt3x6NHj+Ds7Fxh+8aqp/v378Pd3R179+5Fly5dTG7DYzpW1sw57nhcx8qai4sLFixYgEmTJhX4Gp/nWFmrVhlC2dnZOH78OHr16qV/TKFQoFevXjh8+LDJ5xw+fNhoewDo27dvodszZqgkxxwApKWlwd/fH76+vkXOAjBWFvg8xypTaGgovLy80Lt3bxw8eLCyd4dVUcnJyQDoRqkwfK5jZc2c4w7gcR0rG3l5eVizZg3S09MRFhZmchs+z7GyVq0CQomJicjLy4OHh4fR4x4eHoXWLLh3755F2zNmqCTHXFBQEJYvX47Nmzfj119/hVarRYcOHRAbG1sRu8xqoMLOcykpKcjIyKikvWLVnZeXF5YsWYINGzZgw4YN8PX1Rbdu3XDixInK3jVWxWi1WoSHh6Njx45o1qxZodvxmI6VJXOPOx7XsdI6c+YMNBoNrK2tMW3aNGzcuBFNmjQxuS2f51hZU1X2DjBW04SFhRlF/Tt06IDGjRvj+++/x4cffliJe8YYY2UnKCgIQUFB+v936NABMTEx+PLLL/HLL79U4p6xqmb69Ok4e/YsDhw4UNm7wmoQc487Htex0goKCkJ0dDSSk5Oxfv16jB8/Hnv37i00KMRYWapWGUJubm5QKpWIj483ejw+Ph6enp4mn+Pp6WnR9owZKskxl5+VlRVatGiBq1evlscuMlboec7R0RG2traVtFesJmrbti2f65hFXnnlFWzZsgV79uyBj49PkdvymI6VFUuOu/x4XMcspVarERgYiFatWmHevHkICQnBV199ZXJbPs+xslatAkJqtRqtWrXC7t279Y9ptVrs3r270HWYYWFhRtsDwK5duwrdnjFDJTnm8svLy8OZM2fg5eVVXrvJajg+z7EnRXR0NJ/rmFlEUcQrr7yCjRs3IiIiAgEBAcU+h891rLRKctzlx+M6VlparRZZWVkmv8bnOVbWqt2SsZkzZ2L8+PFo3bo12rZti4ULFyI9PR0TJkwAAIwbNw7e3t6YN28eAOC1115D165d8cUXX2DgwIFYs2YNoqKi8MMPP1Tm22BViKXH3AcffID27dsjMDAQSUlJWLBgAW7evInJkydX5ttgVUhaWprRzOP169cRHR0NFxcX+Pn5Yc6cOYiLi8PKlSsBANOmTcO3336LN998ExMnTkRERAR+//13bN26tbLeAquCLD3uFi5ciICAADRt2hSZmZlYtmwZIiIi8Pfff1fWW2BVyPTp07Fq1Sps3rwZDg4O+voYTk5O+sxGHtOxslaS447Hdaw05syZg/79+8PPzw+pqalYtWoVIiMjsXPnTgB8nmMVQKyGvvnmG9HPz09Uq9Vi27ZtxSNHjui/1rVrV3H8+PFG2//+++9iw4YNRbVaLTZt2lTcunVrBe8xq+osOebCw8P123p4eIgDBgwQT5w4UQl7zaqqPXv2iAAK/JGOs/Hjx4tdu3Yt8JzQ0FBRrVaL9erVE3/66acK329WtVl63H366adi/fr1RRsbG9HFxUXs1q2bGBERUTk7z6ocU8caAKNzF4/pWFkryXHH4zpWGhMnThT9/f1FtVot1q5dW+zZs6f4999/67/O5zlW3gRRFMWKDEAxxhhjjDHGGGOMscpVrWoIMcYYY4wxxhhjjLHicUCIMcYYY4wxxhhjrIbhgBBjjDHGGGOMMcZYDcMBIcYYY4wxxhhjjLEahgNCjDHGGGOMMcYYYzUMB4QYY4wxxhhjjDHGahgOCDHGGGOMMcYYY4zVMBwQYowxxhgz4YUXXsCwYcMq/PuuWLECgiBAEASEh4eb9ZwXXnhB/5xNmzaV6/4xxhhjrHpQVfYOMMYYY4xVNEEQivz63Llz8dVXX0EUxQraI2OOjo64dOkS7O3tzdr+q6++wvz58+Hl5VXOe8YYY4yx6oIDQowxxhirce7evav/99q1a/Huu+/i0qVL+sc0Gg00Gk1l7BoAClh5enqavb2TkxOcnJzKcY8YY4wxVt3wkjHGGGOM1Tienp76P05OTvoAjPRHo9EUWDLWrVs3zJgxA+Hh4ahVqxY8PDywdOlSpKenY8KECXBwcEBgYCC2b99u9L3Onj2L/v37Q6PRwMPDA2PHjkViYqLF+/zdd9+hQYMGsLGxgYeHB5555pnS/hgYY4wxVoNxQIgxxhhjzEw///wz3NzccPToUcyYMQMvvfQSRowYgQ4dOuDEiRPo06cPxo4di8ePHwMAkpKS0KNHD7Ro0QJRUVHYsWMH4uPjMXLkSIu+b1RUFF599VV88MEHuHTpEnbs2IEuXbqUx1tkjDHGWA3BS8YYY4wxxswUEhKCd955BwAwZ84czJ8/H25ubpgyZQoA4N1338XixYtx+vRptG/fHt9++y1atGiBTz75RP8ay5cvh6+vLy5fvoyGDRua9X1v3boFe3t7DBo0CA4ODvD390eLFi3K/g0yxhhjrMbgDCHGGGOMMTM1b95c/2+lUglXV1cEBwfrH/Pw8AAAJCQkAABOnTqFPXv26GsSaTQaNGrUCAAQExNj9vft3bs3/P39Ua9ePYwdOxa//fabPguJMcYYY6wkOCDEGGOMMWYmKysro/8LgmD0mNS9TKvVAgDS0tIwePBgREdHG/25cuWKRUu+HBwccOLECaxevRpeXl549913ERISgqSkpNK/KcYYY4zVSLxkjDHGGGOsnLRs2RIbNmxA3bp1oVKVbtilUqnQq1cv9OrVC3PnzoWzszMiIiLw9NNPl9HeMsYYY6wm4QwhxhhjjLFyMn36dDx8+BCjR4/GsWPHEBMTg507d2LChAnIy8sz+3W2bNmCr7/+GtHR0bh58yZWrlwJrVaLoKCgctx7xhhjjFVnHBBijDHGGCsnderUwcGDB5GXl4c+ffogODgY4eHhcHZ2hkJh/jDM2dkZf/zxB3r06IHGjRtjyZIlWL16NZo2bVqOe88YY4yx6kwQRVGs7J1gjDHGGGNkxYoVCA8PL1F9IEEQsHHjRgwbNqzM94sxxhhj1QtnCDHGGGOMPWGSk5Oh0Wgwa9Yss7afNm0aNBpNOe8VY4wxxqoTzhBijDHGGHuCpKamIj4+HgAtFXNzcyv2OQkJCUhJSQEAeHl5wd7evlz3kTHGGGNVHweEGGOMMcYYY4wxxmoYXjLGGGOMMcYYY4wxVsNwQIgxxhhjjDHGGGOshuGAEGOMMcYYY4wxxlgNwwEhxhhjjDHGGGOMsRqGA0KMMcYYY4wxxhhjNQwHhBhjjDHGGGOMMcZqGA4IMcYYY4wxxhhjjNUwHBBijDHGGGOMMcYYq2E4IMQYY4wxxhhjjDFWw/w/FLxft4Z7TiEAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Compute spectrum for remaining features\n", + "hann = torch.hann_window(window_samples).view(1, 1, -1)\n", + "spectrum = torch.abs(torch.fft.rfft(frames * hann))\n", + "spectral_feats = compute_spectral_features(spectrum)\n", + "# The last feature is flux\n", + "plt.plot(os_xs, opensmile_feats.spectralFlux_sma3)\n", + "plt.plot(xs, spectral_feats[0, :, -1])\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Spectral Flux\")\n", + "\n", + "plt.legend([\"OpenSmile\", \"SpeechBrain\"])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "a82a2e32", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "uvpy311", + "language": "python", + "name": "uvpy311" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/tutorials/tasks.rst b/docs/tutorials/tasks.rst new file mode 100644 index 0000000000..5586f1cb18 --- /dev/null +++ b/docs/tutorials/tasks.rst @@ -0,0 +1,140 @@ +Speech Processing Tasks +======================= + +.. + Originally generated with https://gist.github.com/asumagic/19f9809480b62bfd16094fb5c844a564 but OK to edit in repo now. + Please ensure for each tutorial that you are adding it to the hidden toctree at the end of the file! + +.. toctree:: + :hidden: + + tasks/speech-recognition-from-scratch.ipynb + tasks/asr-metrics.ipynb + tasks/source-separation.ipynb + tasks/speech-enhancement-from-scratch.ipynb + tasks/speech-classification-from-scratch.ipynb + tasks/voice-activity-detection.ipynb + tasks/forced-alignment.ipynb + +.. rubric:: `🔗 Speech Recognition From Scratch `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. & Parcollet T. + - Apr. 2021 + - Difficulty: medium + - Time: 45min + - `🔗 Google Colab `__ + + +Do you want to figure out how to implement your speech recognizer with SpeechBrain? Look no further, you're in the right place. This tutorial will walk you through all the steps needed to implement an offline end-to-end attention-based speech recognizer. This is a self-contained tutorial that will help you "connecting the dots" across all the steps needed to train a modern speech recognizer. We will address data preparation, tokenizer training, language model, ASR model, and inference. We will explain how to train your model on your data. + +.. rubric:: `🔗 Metrics for Speech Recognition `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - de Langen S. + - Sep. 2024 + - Difficulty: medium + - Time: 30min + - `🔗 Google Colab `__ + + +Estimating the accuracy of a speech recognition model is not a trivial problem. +The Word Error Rate (WER) and Character Error Rate (CER) metrics are standard, +but some research has been trying to develop alternatives that better correlate +with human evaluation (such as SemDist). + +This tutorial introduces some alternative ASR metrics and their flexible +integration into SpeechBrain, which can help you research, use or develop new +metrics. + +.. rubric:: `🔗 Source Separation `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Subakan C. + - Jan. 2021 + - Difficulty: medium + - Time: 30min + - `🔗 Google Colab `__ + + +In source separation, the goal is to be able to separate out the sources from an observed mixture signal +which consists of superposition of several sources. In this tutorial, we cover few examples of performing source separation with SpeechBrain. + +.. rubric:: `🔗 Speech Enhancement From Scratch `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Plantinga P. + - Feb. 2021 + - Difficulty: medium + - Time: 30min + - `🔗 Google Colab `__ + + +So you want to do regression tasks with speech? Look no further, you're in the right place. +This tutorial will walk you through a basic speech enhancement template with SpeechBrain to +show all the components needed for making a new recipe. + +.. rubric:: `🔗 Speech Classification From Scratch `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Jan. 2021 + - Difficulty: medium + - Time: 30min + - `🔗 Google Colab `__ + + +In this tutorial, we show how to use SpeechBrain to implement an utterance-level speech classifier. +It might help if you want to develop systems for speaker-id, language-id, emotion recognition, sound classification, keyword spotting, and many other tasks. + +.. rubric:: `🔗 Voice Activity Detection `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Ravanelli M. + - Sept. 2021 + - Difficulty: easy + - Time: 15min + - `🔗 Google Colab `__ + + +In this tutorial, we show how to use SpeechBrain for voice activity detection. The tutorial will describe how to train a neural VAD and use it for inference on long audio recordings. + +.. rubric:: `🔗 Forced Alignment `_ + :heading-level: 2 + +.. list-table:: + :widths: 20 20 20 20 20 + :header-rows: 0 + + * - Plantinga P. + - July 2025 + - Difficulty: easy + - Time: 10min + - `🔗 Google Colab `__ + + +In this tutorial, we show how to use SpeechBrain for forced alignment using k2 and a pretrained CTC-based ASR model. diff --git a/docs/tutorials/tasks/asr-metrics.ipynb b/docs/tutorials/tasks/asr-metrics.ipynb new file mode 100644 index 0000000000..e35a1f073b --- /dev/null +++ b/docs/tutorials/tasks/asr-metrics.ipynb @@ -0,0 +1,972 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/tasks/asr-metrics.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/tasks/asr-metrics.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Metrics for Speech Recognition\n", + "\n", + "Estimating the accuracy of a speech recognition model is not a trivial problem. The Word Error Rate (WER) and Character Error Rate (CER) metrics are standard, but some research has been trying to develop alternatives that better correlate with human evaluation (such as SemDist).\n", + "\n", + "This tutorial introduces some alternative ASR metrics and their flexible integration into SpeechBrain, which can help you research, use or develop new metrics, with copy&paste-ready hyperparameters.\n", + "\n", + "SpeechBrain v1.0.1 via [PR #2451](https://github.com/speechbrain/speechbrain/pull/2451) introduced support and tooling for the metrics suggested by [Qualitative Evaluation of Language Model Rescoring in Automatic Speech Recognition](https://www.isca-archive.org/interspeech_2022/roux22_interspeech.pdf). **We recommend that you read this, as some of the metrics won't be explained in detail here.**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "%pip install spacy\n", + "%pip install flair" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some boilerplate and test data downloading follows..." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from hyperpyyaml import load_hyperpyyaml\n", + "from collections import defaultdict" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://raw.githubusercontent.com/thibault-roux/hypereval/main/data/Exemple/refhyp.txt -O refhyp.txt" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "bonsoir à tous bienvenue c' est bfm story en direct jusqu' à dix neuf heures à la une\tà tous bienvenue c' est bfm story en direct jusqu' à dix neuf heures\t_\n", + "de bfm story ce soir la zone euro va t elle encore vivre un été meurtrier l' allemagne première économie européenne pourrait perdre son triple a la situation se détériore en espagne\tbfm story ce soir la zone euro va t elle encore vive été meurtrier allemagne première économie européenne pourrait perdre son triple a la situation se détériore en espagne\t_\n", + "pourquoi ces nouvelles tensions nous serons avec un spécialiste de l' espagne et nous serons avec le député socialiste rapporteur du budget en direct de l' assemblée nationale christian eckert\tces nouvelles tensions sont avec un spécialiste de l' espagne et nous serons avec le député socialiste rapporteur du budget de l' assemblée nationale christian eckert\t_\n", + "à la une également la syrie et les armes chimiques la russie demande au régime de bachar al assad de ne pas utiliser ces armes\tla une également la syrie et les armes chimiques la russie demande au régime de bachar el assad ne pas utiliser ses armes\t_\n", + "de quel arsenal dispose l' armée syrienne\tquelle arsenal dispose l' armée syrienne\t_\n", + "quels dégats pourraient provoquer ces armes chimiques\tdégâts pourraient provoquer ses armes chimiques\t_\n", + "un spécialiste jean pierre daguzan nous répondra sur le plateau de bfm story et puis\tspécialistes ont bien accusant nous répondra sur le plateau de bfm story puis\t_\n", + "après la droite populaire la droite humaniste voici la droite forte deux jeunes pousses de l' ump guillaume peltier et geoffroy didier lancent ce nouveau mouvement pourquoi faire ils sont mes invités ce soir\tla droite populaire la droite humaniste voici la droite forte deux jeunes pousses de l' ump guillaume peltier geoffroy didier migaud pour quoi faire ils sont mes invités ce soir\t_\n", + "et puis c(ette) cette fois ci c' est vraiment la fin la fin de france soir liquidé par le tribunal de commerce nous en parlerons avec son tout dernier rédacteur en chef dominique de montvalon\tcette fois ci c' est vraiment la fin à la fin de france soir liquidé par le tribunal de commerce nous en parlerons avec tout dernier rédacteur en chef dominique de montvalon\t_\n", + "damien gourlet bonsoir avec vous ce qu' il faut retenir ce soir dans l' actualité l' actualité ce sont encore les incendies en espagne\tdamien gourlet bonsoir olivier avec vous ce qu' il faut retenir ce soir dans l' actualité actualité se sont encore les incendies en espagne\t_\n" + ] + } + ], + "source": [ + "!head refhyp.txt" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "refs = []\n", + "hyps = []\n", + "\n", + "# some preprocessing for the example file + load uposer mapping to a test file\n", + "\n", + "def split_norm_text(s: str):\n", + " # s = s.replace(\"' \", \"'\")\n", + "\n", + " if s != \"\":\n", + " return s.split(\" \")\n", + "\n", + " return s\n", + "\n", + "with open(\"refhyp.txt\") as f:\n", + " for refhyp in f.read().splitlines():\n", + " if len(refhyp) <= 1:\n", + " continue\n", + "\n", + " refhyp = refhyp.split(\"\\t\")\n", + " refs.append(split_norm_text(refhyp[0]))\n", + " hyps.append(split_norm_text(refhyp[1]))\n", + "\n", + "with open(\"uposer.json\", \"w\") as wf:\n", + " wf.write(\"\"\"[\n", + " [\"ADJ\", \"ADJFP\", \"ADJFS\", \"ADJMP\", \"ADJMS\"],\n", + " [\"NUM\", \"CHIF\"],\n", + " [\"CCONJ\", \"COCO\", \"COSUB\"],\n", + " [\"DET\", \"DETFS\", \"DETMS\", \"DINTFS\", \"DINTMS\"],\n", + " [\"X\", \"MOTINC\"],\n", + " [\"NOUN\", \"NFP\", \"NFS\", \"NMP\", \"NMS\"],\n", + " [\"PRON\", \"PDEMFP\", \"PDEMFS\", \"PDEMMP\", \"PDEMMS\", \"PINDFP\", \"PINDFS\",\n", + " \"PINDMP\", \"PINDMS\", \"PPER1S\", \"PPER2S\", \"PPER3FP\", \"PPER3FS\", \"PPER3MP\",\n", + " \"PPER3MS\", \"PPOBJFP\", \"PPOBJFS\", \"PPOBJMP\", \"PPOBJMS\", \"PREF\", \"PREFP\",\n", + " \"PREFS\", \"PREL\", \"PRELFP\", \"PRELFS\", \"PRELMP\", \"PRELMS\"],\n", + " [\"ADP\", \"PREP\"],\n", + " [\"VERB\", \"VPPFP\", \"VPPFS\", \"VPPMP\", \"VPPMS\"],\n", + " [\"PROPN\", \"XFAMIL\"],\n", + " [\"PUNCT\", \"YPFOR\"]\n", + "]\n", + "\"\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Word Error Rate (WER)\n", + "\n", + "The usual WER metric, which is derived from the Levenshtein distance between the **words** of the reference and hypothesis (i.e. ground truth and prediction respectively). The output is often presented as a percentage, but it can actually exceed 100%, e.g. if you have a lot of insertions.\n", + "\n", + "Of course, what WER is achievable is depends _very_ heavily on the dataset, and on the language to an extent. On some easy datasets, it can get as low as 1%, and good models on harder datasets can struggle to reach 15%, or even worse in challenging conditions.\n", + "\n", + "The WER is defined as the following (where `#` means \"number of\"):\n", + "\n", + "$\\dfrac{\\#insertions + \\#substitutions + \\#deletions}{\\#refwords}$\n", + "\n", + "To understand what exactly is an insertion/subtitution/deletion, you should understand the [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance), an edit distance. \n", + "Roughly speaking, an insertion is a word your model has predicted but does not exist in the reference, a substitution is a word your model has gotten wrong or spelled incorrectly, and a deletion is a word your model has incorrectly omitted.\n", + "\n", + "A limitation of the WER is that all errors are weighed equally. For example, a typo from \"processing\" to \"procesing\" does not meaningfully alter meaning, but an error from \"car\" to \"scar\" might drastically alter meaning, yet both are considered a single-word and single-character error. This can result in drastic discrepancies between the WER/CER and human evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "wer_hparams = load_hyperpyyaml(\"\"\"\n", + "wer_stats: !new:speechbrain.utils.metric_stats.ErrorRateStats\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'WER': 15.451152223304122,\n", + " 'SER': 90.83899394161924,\n", + " 'num_edits': 19042,\n", + " 'num_scored_tokens': 123240,\n", + " 'num_erroneous_sents': 4948,\n", + " 'num_scored_sents': 5447,\n", + " 'num_absent_sents': 0,\n", + " 'num_ref_sents': 5447,\n", + " 'insertions': 1868,\n", + " 'deletions': 7886,\n", + " 'substitutions': 9288,\n", + " 'error_rate': 15.451152223304122}" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "wer_hparams[\"wer_stats\"].clear()\n", + "wer_hparams[\"wer_stats\"].append(\n", + " ids=list(range(len(refs))),\n", + " predict=hyps,\n", + " target=refs,\n", + ")\n", + "wer_hparams[\"wer_stats\"].summarize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Character Error Rate (CER)\n", + "\n", + "The typical CER measure, for reference. The CER works the same as the WER, but instead operates at character level (not word or token level). \n", + "Ultimately, the CER penalizes various errors differently. Small typos (e.g. missed accents) would result in a full substitution error with the WER, but only result in one character substitution error with the CER. This isn't necessarily an upside since single-character errors can still alter meaning.\n", + "\n", + "This is slower to run as the edit distance needs to be computed over a comparatively much longer sequence." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "cer_hparams = load_hyperpyyaml(\"\"\"\n", + "cer_stats: !new:speechbrain.utils.metric_stats.ErrorRateStats\n", + " split_tokens: True\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'WER': 8.728781317403753,\n", + " 'SER': 90.83899394161924,\n", + " 'num_edits': 57587,\n", + " 'num_scored_tokens': 659737,\n", + " 'num_erroneous_sents': 4948,\n", + " 'num_scored_sents': 5447,\n", + " 'num_absent_sents': 0,\n", + " 'num_ref_sents': 5447,\n", + " 'insertions': 10426,\n", + " 'deletions': 36910,\n", + " 'substitutions': 10251,\n", + " 'error_rate': 8.728781317403753}" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cer_hparams[\"cer_stats\"].clear()\n", + "cer_hparams[\"cer_stats\"].append(\n", + " ids=list(range(len(refs))),\n", + " predict=hyps,\n", + " target=refs,\n", + ")\n", + "cer_hparams[\"cer_stats\"].summarize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Part-of-speech Error Rate (POSER)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2024-03-28 16:27:25.399507: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "2024-03-28 16:27:25.399759: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "2024-03-28 16:27:25.671596: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2024-03-28 16:27:26.262645: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2024-03-28 16:27:30.960021: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-03-28 16:28:03,311 SequenceTagger predicts: Dictionary with 69 tags: , O, DET, NFP, ADJFP, AUX, VPPMS, ADV, PREP, PDEMMS, NMS, COSUB, PINDMS, PPOBJMS, VERB, DETFS, NFS, YPFOR, VPPFS, PUNCT, DETMS, PROPN, ADJMS, PPER3FS, ADJFS, COCO, NMP, PREL, PPER1S, ADJMP, VPPMP, DINTMS, PPER3MS, PPER3MP, PREF, ADJ, DINTFS, CHIF, XFAMIL, PRELFS, SYM, NOUN, MOTINC, PINDFS, PPOBJMP, NUM, PREFP, PDEMFS, VPPFP, PPER3FP\n" + ] + } + ], + "source": [ + "poser_hparams = load_hyperpyyaml(\"\"\"\n", + "wer_stats_dposer: !new:speechbrain.utils.metric_stats.ErrorRateStats\n", + "\n", + "uposer_dict: !apply:speechbrain.utils.dictionaries.SynonymDictionary.from_json_path\n", + " path: ./uposer.json\n", + "wer_stats_uposer: !new:speechbrain.utils.metric_stats.ErrorRateStats\n", + " equality_comparator: !ref \n", + "\n", + "pos_tagger: !apply:speechbrain.integrations.nlp.FlairSequenceTagger.from_hf\n", + " source: \"qanastek/pos-french\"\n", + " save_path: ./pretrained_models/\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "refs_poser = poser_hparams[\"pos_tagger\"](refs)\n", + "hyps_poser = poser_hparams[\"pos_tagger\"](hyps)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INTJ PREP DET NFS PDEMMS AUX PROPN XFAMIL PREP NMS PREP PREP CHIF CHIF NFP PREP DETFS NFS\n", + "PREP DET NFS PDEMMS AUX PROPN XFAMIL PREP NMS PREP PREP CHIF CHIF NFP\n" + ] + } + ], + "source": [ + "print(\" \".join(refs_poser[0]))\n", + "print(\" \".join(hyps_poser[0]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### dPOSER\n", + "\n", + "Instead of computing WER on input words, we extract (preferably all) the parts-of-speech of the input sentences. The WER is then computed over the sequence of labels." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'WER': 14.70402051648298,\n", + " 'SER': 88.87460987699652,\n", + " 'num_edits': 18118,\n", + " 'num_scored_tokens': 123218,\n", + " 'num_erroneous_sents': 4841,\n", + " 'num_scored_sents': 5447,\n", + " 'num_absent_sents': 0,\n", + " 'num_ref_sents': 5447,\n", + " 'insertions': 2064,\n", + " 'deletions': 8076,\n", + " 'substitutions': 7978,\n", + " 'error_rate': 14.70402051648298}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "poser_hparams[\"wer_stats_dposer\"].clear()\n", + "poser_hparams[\"wer_stats_dposer\"].append(\n", + " ids=list(range(len(refs))),\n", + " predict=hyps_poser,\n", + " target=refs_poser,\n", + ")\n", + "poser_hparams[\"wer_stats_dposer\"].summarize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "### uPOSER\n", + "\n", + "The cited paper proposes a variant (uPOSER) with broad POS categories, in case that the used POS model has very specific categories. This can simply be implemented by using a synonym dictionary that groups up equivalent labels easily." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'WER': 12.26687659270561,\n", + " 'SER': 86.50633376170369,\n", + " 'num_edits': 15115,\n", + " 'num_scored_tokens': 123218,\n", + " 'num_erroneous_sents': 4712,\n", + " 'num_scored_sents': 5447,\n", + " 'num_absent_sents': 0,\n", + " 'num_ref_sents': 5447,\n", + " 'insertions': 2089,\n", + " 'deletions': 8101,\n", + " 'substitutions': 4925,\n", + " 'error_rate': 12.26687659270561}" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "poser_hparams[\"wer_stats_uposer\"].clear()\n", + "poser_hparams[\"wer_stats_uposer\"].append(\n", + " ids=list(range(len(refs))),\n", + " predict=hyps_poser,\n", + " target=refs_poser,\n", + ")\n", + "poser_hparams[\"wer_stats_uposer\"].summarize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Lemma Error Rate (LER)\n", + "\n", + "Instead of computing the WER over words, we compute the WER over lemmatized words." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "!spacy download fr_core_news_md" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "ler_hparams = load_hyperpyyaml(\"\"\"\n", + "ler_model: !apply:speechbrain.integrations.nlp.SpacyPipeline.from_name\n", + " name: fr_core_news_md\n", + " exclude: [\"tagger\", \"parser\", \"ner\", \"textcat\"]\n", + "\n", + "wer_stats_ler: !new:speechbrain.utils.metric_stats.ErrorRateStats\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "refs_ler = ler_hparams[\"ler_model\"].lemmatize(refs)\n", + "hyps_ler = ler_hparams[\"ler_model\"].lemmatize(hyps)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "bonsoir à tout bienvenue c ' être bfm story en direct jusqu ' à dix neuf heure à le un\n", + "à tout bienvenue c ' être bfm story en direct jusqu ' à dix neuf heure\n" + ] + } + ], + "source": [ + "print(\" \".join(refs_ler[0]))\n", + "print(\" \".join(hyps_ler[0]))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'WER': 14.426271595988885,\n", + " 'SER': 88.61758766293373,\n", + " 'num_edits': 19105,\n", + " 'num_scored_tokens': 132432,\n", + " 'num_erroneous_sents': 4827,\n", + " 'num_scored_sents': 5447,\n", + " 'num_absent_sents': 0,\n", + " 'num_ref_sents': 5447,\n", + " 'insertions': 2160,\n", + " 'deletions': 10219,\n", + " 'substitutions': 6726,\n", + " 'error_rate': 14.426271595988885}" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ler_hparams[\"wer_stats_ler\"].clear()\n", + "ler_hparams[\"wer_stats_ler\"].append(\n", + " ids=list(range(len(refs))),\n", + " predict=hyps_ler,\n", + " target=refs_ler,\n", + ")\n", + "ler_hparams[\"wer_stats_ler\"].summarize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Embedding Error Rate (EmbER)\n", + "\n", + "Typical WER calculation, except that we weight the penalty of each word substitution if the words are deemed similar enough. This allows you to reduce the impact of e.g. minor spelling errors that do not alter the meaning much.\n", + "\n", + "Setup for this is slightly more involved but the gist of it is that you need:\n", + "- A regular `ErrorRateStats` object which you will `.append()` to,\n", + "- The embeddings that you will be using, e.g. using the `FlairEmbeddings` wrapper,\n", + "- The EmbER configuration, which will point to the embedding (here binding to `ember_embeddings.embed_word`),\n", + "- The `WeightedErrorRateStats` which piggy backs over the base `ErrorRateStats` and plugs into the EmbER similarity function defined just above." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "ember_hparams = load_hyperpyyaml(\"\"\"\n", + "wer_stats: !new:speechbrain.utils.metric_stats.ErrorRateStats\n", + "\n", + "ember_embeddings: !apply:speechbrain.integrations.nlp.FlairEmbeddings.from_hf\n", + " embeddings_class: !name:flair.embeddings.FastTextEmbeddings\n", + " source: facebook/fasttext-fr-vectors\n", + " save_path: ./pretrained_models/\n", + "\n", + "ember_metric: !new:speechbrain.utils.metric_stats.EmbeddingErrorRateSimilarity\n", + " embedding_function: !name:speechbrain.integrations.nlp.FlairEmbeddings.embed_word\n", + " - !ref \n", + " low_similarity_weight: 1.0\n", + " high_similarity_weight: 0.1\n", + " threshold: 0.4\n", + "\n", + "weighted_wer_stats: !new:speechbrain.utils.metric_stats.WeightedErrorRateStats\n", + " base_stats: !ref \n", + " cost_function: !ref \n", + " weight_name: ember\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:gensim.models.fasttext:could not extract any ngrams from '()', returning origin vector\n" + ] + }, + { + "data": { + "text/plain": [ + "{'ember_wer': 12.225677015059036,\n", + " 'ember_insertions': 1868.0,\n", + " 'ember_substitutions': 5541.300000000059,\n", + " 'ember_deletions': 7886.0,\n", + " 'ember_num_edits': 15295.30000000006}" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ember_hparams[\"wer_stats\"].clear()\n", + "ember_hparams[\"wer_stats\"].append(\n", + " ids=list(range(len(refs))),\n", + " predict=hyps,\n", + " target=refs,\n", + ")\n", + "ember_hparams[\"weighted_wer_stats\"].clear()\n", + "ember_hparams[\"weighted_wer_stats\"].summarize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## BERTScore\n", + "\n", + "In a nutshell, BERTScore works by comparing the cosine similarity of *all* targets and predicted embeddings, as obtained from a BERT-like LM encoder. This works rather well because the embeddings are trained to embed information from their context.\n", + "\n", + "This is best explained by the code and documentation of the metric itself." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bertscore_hparams = load_hyperpyyaml(\"\"\"\n", + "bertscore_model_name: camembert/camembert-large\n", + "bertscore_model_device: cuda\n", + "\n", + "bertscore_stats: !new:speechbrain.utils.bertscore.BERTScoreStats\n", + " lm: !new:speechbrain.integrations.huggingface.TextEncoder\n", + " source: !ref \n", + " save_path: pretrained_models/\n", + " device: !ref \n", + " num_layers: 8\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'bertscore-recall': tensor(0.9033),\n", + " 'bertscore-precision': tensor(0.9237),\n", + " 'bertscore-f1': tensor(0.9134)}" + ] + }, + "execution_count": 34, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "bertscore_hparams[\"bertscore_stats\"].clear()\n", + "bertscore_hparams[\"bertscore_stats\"].append(\n", + " ids=list(range(len(refs))),\n", + " predict=hyps,\n", + " target=refs,\n", + ")\n", + "bertscore_hparams[\"bertscore_stats\"].summarize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Sentence Semantic Distance: SemDist\n", + "\n", + "Estimated using the cosine similarity of a single embedding for every sentence, e.g. obtained by averaging of LM embeddings over all tokens.\n", + "\n", + "Here, lower is better. The score is normalized by x1000 by default for readability." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "semdist_hparams = load_hyperpyyaml(\"\"\"\n", + "semdist_model_name: camembert/camembert-large\n", + "semdist_model_device: cuda\n", + "\n", + "semdist_stats: !new:speechbrain.utils.semdist.SemDistStats\n", + " lm: !new:speechbrain.integrations.huggingface.TextEncoder\n", + " source: !ref \n", + " save_path: pretrained_models/\n", + " device: !ref \n", + " method: meanpool\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'semdist': 41.13104248046875}" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "semdist_hparams[\"semdist_stats\"].clear()\n", + "semdist_hparams[\"semdist_stats\"].append(\n", + " ids=list(range(len(refs))),\n", + " predict=hyps,\n", + " target=refs,\n", + ")\n", + "semdist_hparams[\"semdist_stats\"].summarize()" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'key': 0, 'semdist': 11.317432403564453},\n", + " {'key': 1, 'semdist': 14.37997817993164},\n", + " {'key': 2, 'semdist': 8.182466506958008},\n", + " {'key': 3, 'semdist': 7.842123508453369},\n", + " {'key': 4, 'semdist': 13.874173164367676}]" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "semdist_hparams[\"semdist_stats\"].scores[:5]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Some comparisons\n", + "\n", + "This was a bit thrown together, if you've run everything without running out of RAM congratulations :)" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== REF: bonsoir à tous bienvenue c' est bfm story en direct jusqu' à dix neuf heures à la une\n", + "=== HYP: à tous bienvenue c' est bfm story en direct jusqu' à dix neuf heures\n", + "WER: 22.222%\n", + "CER: 20.000%\n", + "dPOSER: 22.222%\n", + "uPOSER: 22.222%\n", + "EmbER: 22.222%\n", + "BERTScore recall: 0.87673\n", + "BERTScore precision: 0.96040\n", + "SemDist mean (x1000): 11.31743\n", + "\n", + "=== REF: de bfm story ce soir la zone euro va t elle encore vivre un été meurtrier l' allemagne première économie européenne pourrait perdre son triple a la situation se détériore en espagne\n", + "=== HYP: bfm story ce soir la zone euro va t elle encore vive été meurtrier allemagne première économie européenne pourrait perdre son triple a la situation se détériore en espagne\n", + "WER: 12.500%\n", + "CER: 5.525%\n", + "dPOSER: 15.625%\n", + "uPOSER: 15.625%\n", + "EmbER: 12.500%\n", + "BERTScore recall: 0.91836\n", + "BERTScore precision: 0.91983\n", + "SemDist mean (x1000): 14.37998\n", + "\n", + "=== REF: pourquoi ces nouvelles tensions nous serons avec un spécialiste de l' espagne et nous serons avec le député socialiste rapporteur du budget en direct de l' assemblée nationale christian eckert\n", + "=== HYP: ces nouvelles tensions sont avec un spécialiste de l' espagne et nous serons avec le député socialiste rapporteur du budget de l' assemblée nationale christian eckert\n", + "WER: 16.667%\n", + "CER: 14.062%\n", + "dPOSER: 16.667%\n", + "uPOSER: 16.667%\n", + "EmbER: 13.667%\n", + "BERTScore recall: 0.92581\n", + "BERTScore precision: 0.96108\n", + "SemDist mean (x1000): 8.18247\n", + "\n", + "=== REF: à la une également la syrie et les armes chimiques la russie demande au régime de bachar al assad de ne pas utiliser ces armes\n", + "=== HYP: la une également la syrie et les armes chimiques la russie demande au régime de bachar el assad ne pas utiliser ses armes\n", + "WER: 16.000%\n", + "CER: 5.556%\n", + "dPOSER: 12.000%\n", + "uPOSER: 12.000%\n", + "EmbER: 8.800%\n", + "BERTScore recall: 0.95685\n", + "BERTScore precision: 0.95836\n", + "SemDist mean (x1000): 7.84212\n", + "\n", + "=== REF: de quel arsenal dispose l' armée syrienne\n", + "=== HYP: quelle arsenal dispose l' armée syrienne\n", + "WER: 28.571%\n", + "CER: 12.195%\n", + "dPOSER: 28.571%\n", + "uPOSER: 14.286%\n", + "EmbER: 28.571%\n", + "BERTScore recall: 0.93197\n", + "BERTScore precision: 0.93909\n", + "SemDist mean (x1000): 13.87417\n", + "\n", + "=== REF: quels dégats pourraient provoquer ces armes chimiques\n", + "=== HYP: dégâts pourraient provoquer ses armes chimiques\n", + "WER: 42.857%\n", + "CER: 15.094%\n", + "dPOSER: 14.286%\n", + "uPOSER: 14.286%\n", + "EmbER: 30.000%\n", + "BERTScore recall: 0.76464\n", + "BERTScore precision: 0.85932\n", + "SemDist mean (x1000): 46.58437\n", + "\n", + "=== REF: un spécialiste jean pierre daguzan nous répondra sur le plateau de bfm story et puis\n", + "=== HYP: spécialistes ont bien accusant nous répondra sur le plateau de bfm story puis\n", + "WER: 40.000%\n", + "CER: 23.810%\n", + "dPOSER: 40.000%\n", + "uPOSER: 33.333%\n", + "EmbER: 40.000%\n", + "BERTScore recall: 0.70336\n", + "BERTScore precision: 0.73710\n", + "SemDist mean (x1000): 48.69765\n", + "\n", + "=== REF: après la droite populaire la droite humaniste voici la droite forte deux jeunes pousses de l' ump guillaume peltier et geoffroy didier lancent ce nouveau mouvement pourquoi faire ils sont mes invités ce soir\n", + "=== HYP: la droite populaire la droite humaniste voici la droite forte deux jeunes pousses de l' ump guillaume peltier geoffroy didier migaud pour quoi faire ils sont mes invités ce soir\n", + "WER: 20.588%\n", + "CER: 17.391%\n", + "dPOSER: 23.529%\n", + "uPOSER: 17.647%\n", + "EmbER: 20.588%\n", + "BERTScore recall: 0.88929\n", + "BERTScore precision: 0.92400\n", + "SemDist mean (x1000): 11.49768\n", + "\n", + "=== REF: et puis c(ette) cette fois ci c' est vraiment la fin la fin de france soir liquidé par le tribunal de commerce nous en parlerons avec son tout dernier rédacteur en chef dominique de montvalon\n", + "=== HYP: cette fois ci c' est vraiment la fin à la fin de france soir liquidé par le tribunal de commerce nous en parlerons avec tout dernier rédacteur en chef dominique de montvalon\n", + "WER: 14.286%\n", + "CER: 11.518%\n", + "dPOSER: 14.286%\n", + "uPOSER: 14.286%\n", + "EmbER: 13.889%\n", + "BERTScore recall: 0.87325\n", + "BERTScore precision: 0.95048\n", + "SemDist mean (x1000): 8.85153\n", + "\n", + "=== REF: damien gourlet bonsoir avec vous ce qu' il faut retenir ce soir dans l' actualité l' actualité ce sont encore les incendies en espagne\n", + "=== HYP: damien gourlet bonsoir olivier avec vous ce qu' il faut retenir ce soir dans l' actualité actualité se sont encore les incendies en espagne\n", + "WER: 12.500%\n", + "CER: 8.955%\n", + "dPOSER: 12.500%\n", + "uPOSER: 8.333%\n", + "EmbER: 8.400%\n", + "BERTScore recall: 0.97822\n", + "BERTScore precision: 0.94830\n", + "SemDist mean (x1000): 9.74524\n", + "\n" + ] + } + ], + "source": [ + "for i in range(10):\n", + " ref = \" \".join(refs[i])\n", + " hyp = \" \".join(hyps[i])\n", + "\n", + " print(f\"\"\"\\\n", + "=== REF: {ref}\n", + "=== HYP: {hyp}\n", + "WER: {wer_hparams['wer_stats'].scores[i]['WER']:.3f}%\n", + "CER: {cer_hparams['cer_stats'].scores[i]['WER']:.3f}%\n", + "dPOSER: {poser_hparams['wer_stats_dposer'].scores[i]['WER']:.3f}%\n", + "uPOSER: {poser_hparams['wer_stats_uposer'].scores[i]['WER']:.3f}%\n", + "EmbER: {ember_hparams['weighted_wer_stats'].scores[i]['WER']:.3f}%\n", + "BERTScore recall: {bertscore_hparams['bertscore_stats'].scores[i]['recall']:.5f}\n", + "BERTScore precision: {bertscore_hparams['bertscore_stats'].scores[i]['precision']:.5f}\n", + "SemDist mean (x1000): {semdist_hparams['semdist_stats'].scores[i]['semdist']:.5f}\n", + "\"\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/tasks/forced-alignment.ipynb b/docs/tutorials/tasks/forced-alignment.ipynb new file mode 100644 index 0000000000..6ae5a4449d --- /dev/null +++ b/docs/tutorials/tasks/forced-alignment.ipynb @@ -0,0 +1,292 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/tasks/forced-alignment.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/tasks/forced-alignment.ipynb)" + ] + }, + { + "cell_type": "markdown", + "id": "ada3eff8-5ca0-445e-9343-82228d465d45", + "metadata": {}, + "source": [ + "# Force alignment using k2 for CTC models.\n", + "\n", + "This module provides an abstract class, `Aligner`, for force alignment using k2 for CTC models.\n", + "Besides, it also provides a concrete class, `CTCAligner`, for force alignment using k2\n", + "specifically for a pre-trained CTC model and a tokeniser (`CTCTextEncoder`).\n", + "Note that we must make sure that the blank symbol is index 0 in the tokeniser's vocabulary.\n", + "\n", + "We support three different ways of conducting force alignment:\n", + "1. One audio file and one transcript at a time.\n", + "2. A batch of audio files and transcripts.\n", + "3. A csv file containing the audio file paths and transcripts.\n", + " In this case, the csv file should follow the standard speechbrain csv format with a header line as follows:\n", + "\n", + " ```\n", + " ID, duration, wav, spk_id, wrd\n", + " ```\n", + "\n", + "When token-level alignment is conducted, for one single audio file or a batch of audio files,\n", + "the aligning method will return a list of lists of integers,\n", + "where each integer represents the index of the token in the tokeniser's vocabulary.\n", + "For example, if the tokeniser's vocabulary is `['', '', 'a', 'b', 'c']`,\n", + "then the returned list of lists of integers may look like `[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]`.\n", + "For an input of csv file, the aligning method will return a dictionary (`Dict[str, List[int]]`),\n", + "where the keys are the IDs of the audio files and the values are the list of token indexes.\n", + "\n", + "When word-level alignment is conducted, for one single audio file or a batch of audio files,\n", + "the aligning method will return a list of lists of tuples,\n", + "where each tuple represents (`start_frame` (int, including), `end_frame` (int, including), `word` (str)).\n", + "For example, if the transcript is 'hello word', and there are 20 frames in the audio file,\n", + "then the returned list of lists of tuples may look like `[[(3, 10, 'hello'), (11, 16, 'word')]]`.\n", + "\n", + "For an input of csv file, the aligning method will return a pandas.`DataFrame`,\n", + "where the columns are `['ID', 'word', 'start', 'end']`, and note that the start and end are in seconds.\n", + "However, if the `frame_shift` for the method, `align_csv_word`, is `None`, then the start and end will be in frames.\n", + "\n", + "Author:\n", + "* Zeyu Zhao 2024\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "6efd9870-09e2-41cb-b275-262c2195fbd4", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "DEBUG:speechbrain.utils.checkpoints:Registered checkpoint save hook for save\n", + "DEBUG:speechbrain.utils.checkpoints:Registered checkpoint load hook for load_if_possible\n" + ] + } + ], + "source": [ + "# For this tutorial, speechbrain and k2 are assumed to be pre-installed, see instructions at:\n", + "# * https://speechbrain.readthedocs.io/en/latest/installation.html\n", + "# * https://k2-fsa.github.io/k2/installation/index.html\n", + "from speechbrain.inference import EncoderASR\n", + "from speechbrain.integrations.k2_fsa.align import CTCAligner" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "372dad04-dae3-4fb1-aa7d-2c1547043196", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:speechbrain.utils.fetching:Fetch example.wav: Fetching from HuggingFace Hub 'speechbrain/asr-wav2vec2-commonvoice-en' if not cached\n", + "INFO:speechbrain.utils.fetching:Fetch hyperparams.yaml: Using symlink found at '/home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/hyperparams.yaml'\n", + "Some weights of Wav2Vec2Model were not initialized from the model checkpoint at facebook/wav2vec2-large-960h-lv60-self and are newly initialized: ['wav2vec2.masked_spec_embed']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n", + "WARNING:speechbrain.integrations.huggingface.huggingface:speechbrain.integrations.huggingface.huggingface - Wav2Vec2Model is frozen.\n", + "DEBUG:speechbrain.utils.parameter_transfer:Collecting files (or symlinks) for pretraining in pretrained_models/asr-wav2vec2-librispeech.\n", + "INFO:speechbrain.utils.fetching:Fetch wav2vec2.ckpt: Using symlink found at '/home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/wav2vec2.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"wav2vec2\"] = /home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/wav2vec2.ckpt\n", + "INFO:speechbrain.utils.fetching:Fetch asr.ckpt: Using symlink found at '/home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/asr.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"asr\"] = /home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/asr.ckpt\n", + "INFO:speechbrain.utils.fetching:Fetch tokenizer.ckpt: Using symlink found at '/home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/tokenizer.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"tokenizer\"] = /home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/tokenizer.ckpt\n", + "INFO:speechbrain.utils.parameter_transfer:Loading pretrained files for: wav2vec2, asr, tokenizer\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): wav2vec2 -> /home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/wav2vec2.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): asr -> /home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/asr.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): tokenizer -> /home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/tokenizer.ckpt\n", + "DEBUG:speechbrain.dataio.encoder:Loaded categorical encoding from /home/competerscience/Documents/Repositories/speechbrain/docs/tutorials/tasks/pretrained_models/asr-wav2vec2-librispeech/tokenizer.ckpt\n" + ] + }, + { + "data": { + "text/plain": [ + "'THE BIRCH CANOE SLID ON THE SMOOTH PLANKS'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# This aligner depends on having a pretrained CTC-based ASR model\n", + "# Let's load a pretrained one and demonstrate that it works\n", + "from speechbrain.utils.fetching import fetch\n", + "wav = fetch(\"example.wav\", source=\"speechbrain/asr-wav2vec2-commonvoice-en\")\n", + "asr_model = EncoderASR.from_hparams(source=\"speechbrain/asr-wav2vec2-librispeech\", savedir=\"pretrained_models/asr-wav2vec2-librispeech\")\n", + "asr_model.transcribe_file(str(wav))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "3de248e6-57ee-478e-8cb8-82a57bcd504a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Amplitude')" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAksAAAGwCAYAAAC5ACFFAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAbSBJREFUeJzt3Xl8TOf+B/DPZEc2kc0SYimJNQQRtQtRuihtcV1bXVo/a7lt6UJbt43eLkqrlGp1U65auiBtxE4IscYSaostCY3sssjM7w81MsnMZJZz5pwz83m/XvNqnTnnzHcmM+d8z/M85/uoNBqNBkRERESkl5PUARARERHJGZMlIiIiIiOYLBEREREZwWSJiIiIyAgmS0RERERGMFkiIiIiMoLJEhEREZERLlIHYA/UajVu3LgBLy8vqFQqqcMhIiIiE2g0GuTn56NevXpwcjLcfsRkSQA3btxASEiI1GEQERGRBa5evYoGDRoYfJ7JkgC8vLwA3P+wvb29JY6GiIiITJGXl4eQkBDtedwQJksCeND15u3tzWSJiIhIYaobQsMB3kRERERGMFkiIiIiMoLJEhEREZERTJaIiIiIjGCyRERERGQEkyUiIiIiI5gsERERERnBZImIiIjICCZLREREREYwWSIiIiIygskSERERkRFMloiIiIiMYLJEJLJytQYl98qlDoOIiCzEZIlIRJdvF6Lpa1vQ4o14FJXekzocIiKyAJMlIhENXLxH+//Jl7IljISIiCzFZIlIJEWl91BU+rD7bezXh7Dp6HUJIyIiIkswWSISyaLE81WWzVh7zPaBEBGRVZgsEYlArdbgi10X9T6Xej3XxtEQEZE1mCwRieDPWwUGn3v80702jISIiKzFZIlIBP0X7pY6BCIiEgiTJSKB7T1/W+oQiIhIQEyWiAS2IP5MtevMXn8ChSWsu0REpARMlogElno9r9p11hy6iv9srj6pIiIi6TFZIhJQblGZyev+mJwuYiRERCQUJktEAsorNj1ZAu5Ph0JERPLGZIlIQNfu3DVr/XOZ+SJFQkREQnGROgAie3AuMx9Dl+5HfrF5g7Y1IsVDRETCYbJEJADWVSIisl/shiMiIiIygskSkZXMuQOuMg374YiIZE9xydKSJUsQGhoKDw8PREVFITk52ej669atQ1hYGDw8PNCmTRts2bKlyjpnzpzBk08+CR8fH9SqVQudOnVCejpv6ybTzN982uJtFyWeFzASIiISg6KSpbVr12LmzJmYN28ejhw5gnbt2iE2NhZZWVl619+/fz9GjBiB8ePH4+jRoxg8eDAGDx6M1NRU7ToXLlxAt27dEBYWhp07d+LEiRN488034eHhYau3RQqXlmH5HW1nblZfwJKIiKSl0miU0xEQFRWFTp064bPPPgMAqNVqhISEYOrUqZg9e3aV9YcNG4bCwkL89ttv2mVdunRBREQEli1bBgAYPnw4XF1d8d1331kcV15eHnx8fJCbmwtvb2+L90PK9ORne3HiWq7F219eMEjAaIiIyFSmnr8V07JUWlqKlJQUxMTEaJc5OTkhJiYGSUlJerdJSkrSWR8AYmNjteur1Wps3rwZzZs3R2xsLAIDAxEVFYVNmzYZjaWkpAR5eXk6D3JcKpVK6hCIZKv0nhql99RSh0FkFcUkS7dv30Z5eTmCgoJ0lgcFBSEjI0PvNhkZGUbXz8rKQkFBARYsWIABAwbgjz/+wNNPP40hQ4Zg165dBmOJi4uDj4+P9hESEmLluyMlY6pEpF+5WoMucYmIem8bytWK6cQgqkIxyZIY1Or7VztPPfUUXnrpJURERGD27Nl4/PHHtd10+syZMwe5ubnax9WrV20VMskQG5aI9DtzMw/ZhaW4U1SGvwpLpA6HyGKKKUrp7+8PZ2dnZGZm6izPzMxEcHCw3m2Cg4ONru/v7w8XFxe0bNlSZ53w8HDs3bvXYCzu7u5wd3e35G2QHWKuRKTf459WOI6yYYkUTDEtS25uboiMjERiYqJ2mVqtRmJiIqKjo/VuEx0drbM+ACQkJGjXd3NzQ6dOnZCWlqazzrlz59CoUSOB3wHZK45ZIqpqz/lbOv/+/sAV/FXA1iVSJsW0LAHAzJkzMWbMGHTs2BGdO3fGJ598gsLCQowbNw4AMHr0aNSvXx9xcXEAgOnTp6Nnz5746KOPMGjQIKxZswaHDx/G8uXLtft8+eWXMWzYMPTo0QO9e/dGfHw8fv31V+zcuVOKt0gKpKAbSolsZtRK3Rp4i7f/id3nb2PT5EcliojIcopKloYNG4Zbt25h7ty5yMjIQEREBOLj47WDuNPT0+Hk9LCxrGvXrli9ejXeeOMNvPbaa3jkkUewadMmtG7dWrvO008/jWXLliEuLg7Tpk1DixYtsH79enTr1s3m74+IyJ4du5ojdQhEFlFUnSW5Yp0lxzZ06X6kXLlj8fass0T2KHT2Zr3L+X0nObG7OktERI7ur4ISxKfeRFm5cusWnb7BunSkPEyWiIgU4snP9uHF749g+e6LUodisYGL9+BWPgd6k7IwWSIiUojrOXcBAB/8noZ/rDhgVfevlNKzC6UOgcgsTJaIiBRo/4W/MHTpfqnDIHIITJaIrMQqS0Tm4W1FpDRMloisxOM+EZF9Y7JEZKWcolKpQyBSlOzCUhZzJUVhskRkhbJyNS7c4mBVEt/FWwV6l1+6Lb/vX3WJ0MTvUvDOb6dtFA2R9ZgsEVkh726Z1CGQg3hp7TG9y3t/uBPlanm10mw/m1XtOl/vuyx+IEQCYbJEZAUnTqJLNpKRV2zwubWHrtowkurtOX9b6hCIBMVkicgKzJVIDvZfkFdywosIsjdMloisoGLhAJIBeXXCAU78WZCdYbJEZA2eFEgGNp+4Kau7y0xtWNp97hbO3ORccSR/TJaIrMDeBrKV6loxT17PtVEkwhn9VTIeW7RH6jCIqsVkicgKzJVILsrK5dOyRGRvmCwREdkFJktEYmGyRGSBP7PysWDrWdwpZJ0lso3qunwvZMmvOCWRvXCROgAipfmroAQxH+8GABy/miNtMER/e2X9CTzXKUTqMIjsEluWiMz01b5L2v9PuviX1fvr+cEOq/dBRETiYbJEZKYlOy4Iur8rfxUJuj8iIhIWkyUiM+w+d0vqEMhBmXLnZXZhqehxEDkiJktEZhj9VbLUIRAZNHjJPqlDILJLTJaIZEAts1njSZnSs9mlSyQGJktEJjpxLUe0fTd5bQsTJhJEfjHLWRAJjckSkYnEnsPqz1sFou6fHMNrG1OlDoHI7jBZIjJRdXNzWav/wt0Y8Mlu3OEgXaogM6/YrHpeiWcyxQvGBnLvsmWM5IfJEpGpbDAR3NmMfCzbLWxpAlK2qPcS8dSSfbiRW2zS+kWl5Yrs0r2aXYR5P6ei3dt/4H+HrkKjUd57IPvFZInIRF/vu2yT1ym9p7bJ65D8HbOwQvzGo9eFDURk7205g+7/3YFvkq4AuF+N/F/fHJY4KqKHmCyRYmw5eRPRcYk4kn5HktcXe8zSA1tPZtjkdUj+LC0FcC4zX+BIxLV898UqyxLPZmGTwpI+sl9Mlkgx/u+HI7iZW4zxqw5JHYqoMvKKUVbO1iWynL10YM1YewyXb3OCYJIekyVSnLJyezkVGLYmOV3qEEhiF624O/LPrAKUK3Dckj6z1h2XOgQiJktEcvTmz6cUOUiXhPPCdykWb7v9bBYGLd4jYDTS4d2hJAeKS5aWLFmC0NBQeHh4ICoqCsnJxqefWLduHcLCwuDh4YE2bdpgy5YtBtd98cUXoVKp8MknnwgcNZH5Ptvxp9QhkISu59y1avuzGcoat0QkZ4pKltauXYuZM2di3rx5OHLkCNq1a4fY2FhkZWXpXX///v0YMWIExo8fj6NHj2Lw4MEYPHgwUlOrFm3buHEjDhw4gHr16on9NohMsmyX9SUESu6VI6eIV+akXDdyrUsaxRSfehPrU65JHQbZgKKSpY8//hgTJkzAuHHj0LJlSyxbtgw1a9bEV199pXf9RYsWYcCAAXj55ZcRHh6O+fPno0OHDvjss8901rt+/TqmTp2KH374Aa6urrZ4K0Q20euDnYh4JwFZeabV6CF5KL2nRlFpudRhyEJxmdrqVjaxvPj9EcxadxyZ/H3ZPcUkS6WlpUhJSUFMTIx2mZOTE2JiYpCUlKR3m6SkJJ31ASA2NlZnfbVajVGjRuHll19Gq1atTIqlpKQEeXl5Og+yHUcpVldUWo5ytcbsisaHLmej8ZzN+Hznn7j5dyHDfRduY9PR61h3+KoYoZLA/se/k47Dl7OlDsGoPFYdt3suUgdgqtu3b6O8vBxBQUE6y4OCgnD27Fm922RkZOhdPyPjYR2b999/Hy4uLpg2bZrJscTFxeHtt982I3oiywxctAdpf9fMOTt/ADxcnavd5tll9y8G/hufpl320tqHdxTFhAehdi03gSMlIbElUNdX+y7jqYj6UodBDkwxLUtiSElJwaJFi7Bq1SqoVKbPZTFnzhzk5uZqH1ev8iqQxJFWobjgqv2XAQDPrzqE0NmbkXThryrrx6ferHafd8vYvUPiErrx15y58YjEoJhkyd/fH87OzsjM1J0kMjMzE8HBwXq3CQ4ONrr+nj17kJWVhYYNG8LFxQUuLi64cuUKZs2ahdDQUIOxuLu7w9vbW+dBJLb1Kddw6kYutp+9f0PDiBUHsO/P2/j52HVoNBocTb+DF78/Uu1+zLguIJKVHWez8OJ3KciWWTkBxxgY4NgU0w3n5uaGyMhIJCYmYvDgwQDujzdKTEzElClT9G4THR2NxMREzJgxQ7ssISEB0dHRAIBRo0bpHdM0atQojBs3TpT3Qcokh4Pz+awCDFq8V2fZyC8PAgBW7LmI1OscO2cvsvJLpA7BKmIl5OP+rt6flV+MDf/3qDgvQqSHYpIlAJg5cybGjBmDjh07onPnzvjkk09QWFioTWxGjx6N+vXrIy4uDgAwffp09OzZEx999BEGDRqENWvW4PDhw1i+fDkAoE6dOqhTp47Oa7i6uiI4OBgtWrSw7Zsjk0lxFffGppMSvKrpzEmUNp+4iX91byJiNGSNknvlWHNI2V37YtyD8VOFW/SPpOcgt6gMPjUf3r18r1yNtMx8hAd7w8mJzackLMV0wwHAsGHD8OGHH2Lu3LmIiIjAsWPHEB8frx3EnZ6ejps3H47Z6Nq1K1avXo3ly5ejXbt2+Omnn7Bp0ya0bt1aqrfgMOztjrXTN+yn1eY/m8+wOriM5Rbxzip9Pvhd90aem3m65QRmrTuOQYv3YvH287YMixyEolqWAGDKlCkGu9127txZZdmzzz6LZ5991uT9X7582cLI6IGs/GIMWrwXzQI84VvTFXMeC0fDOjWlDssq5twAoASLt5/HjJjmUodBdkqMn8udQt0kMu/uPZ1//3zsBgDgk23nMapLI9TxdBc+CHJYimpZInk6n5mPRxdsx9pD9yd/XbbzIm7llyDp4l/YmpqBF7+3fI4rEscn23j1TeIRo2G5tFyt8+/nvtBfXw8AIv+zTfgAjLCzhnTSg8kSWe2V9SdwPecuXl2vf1zP6Zt5+DOL81TJzQUrZrUnkrsHF29EQmCyRFY7n1n9SffJz/bZIBIyR8qVO1KHILg952/hkMyrPVfnxLVcqUNQnL3nb1dZZujijcgSihuzRPJSek+NgpJ71a4n5DxXUjR529eIJfuUXViKUSuTAQCX4gYqdpzZv749LHUIiqHRaFByT42F285JHQrZOSZLZJW7lZKga3eKDA7u1Gg0ij2BkfxlFyq7NpEYzmXmo3mQl9RhiKbxnC3oFFpb8lZSDctS2j12w5FVtlaaXqPb+zuwcu8lveteuFUoyGveLSvHDVvPQm6HOV6hCS2CSqfRaKok9HJmynQ15ui/cLeg+5OjQ5ftrzuZ5IfJElll9gZzxgUId/XVdcF22ydMdubtX09LHYJoHnQN/3PlQYTPjUemnolpj6Tfwb4/q451kZIp09WYS994HiIyD5MlO6fRaFBW6ZZbqQg91mivzE50JK1fjj9slRm0eC9GLD+AfX/en2z41+M3qqw/5PP9GPnlQdxS+NQi1XnhO46BIrIWkyU7VlxWjnZv/4HwN+NlMbfZ1B+PolzAytEl9+SRBJL0MvOKsTjxYe2o9OwiJF38S/vvpAt/6dsMAOw+WarozM08fLnnomwuoOwF6yzZPyZLdmzWuuPIK76He2oNfj523ap9/e/QVTy1ZB+y8h92Z6ReN+8W57MZ+dh2JtOqOIj0mbr6qNHnE89mQaPRaKfh0TfdS3FZOX44eMXuuncLS8u17/exRXvwn81n8F3SFYmjIlIWJkt2bPOJh90S1l75vLL+BI5fzcGCrQ/nZ3r8071m7+dcBotTkvBO3ag+cX/is70YseIALt0uRPv5CVWe/zjhHF7fmIquC7bjvS1nxAjTqEQRLyR+O6k7cNzcCx0iR8dkyUEI1UpcVGLdnUUfJbAeCkkj9XoeDlzMRu8PdyL37sN5xq7n3MXGo9ewfPdF7bLluy9i3s+pBvclxkTRq/ZfFnyfD/yZmY+LFSq2i91rtDU1Q+RXMI29TehN0mGy5CB40CB92MIATPj2MF5ae7zK8m+SrlS5Wy4+9SZCZ29G4zlbUHJP2JIExWXilTj43+FrGLp0v2j7r+y6TLoyfzshbCkGQ3h4tX9MlhyEnH7Mcrtd25FZ0pXqSBLPZOn8u+Kt/bvPCfc9LrlXLmq9oIy8Ytwpetia5igXT7YqVsmilPaPyZKDeFeCMRiGXLwtTHHKNzel4mp2kSD7ItLnq336C6wC91ukJn57GKUC3JV55S/bfo/LHeTc7ihJIYmPyRJZRF+RP5MJeACzWWFFHnNlzRbT6By7mlNl2R+nM7HpqHV3mkrh1+M3BEny9GGCQvaIyRJZ5NllSVKHAIDzgZHtfG2glSlfgGljpMgvDl/OFmW/zJXIHjFZciBCFoRMl0n315H0HKlDULw7MihYqgRitl19vvNPEfeun1g5jZxyJVvFwgTR/jFZslP6CuuFz41n5V4L2fOx8JNtLOcgtZ+PVZ2ORanYDUf2iMmSnXpzU9UaMaX31Pjw9zQJotHFQ6m8CNGNRMrjCDmNI7xHsg0mS3bq+DX99XO+lcE0B3N/PiV1CFSBStQOJvvHlhRdcvo0eEs/CYXJkp0ydHMQDx5UGb8Tpvn5uP10lQHi/d2ZO5rnbmk5Es9kilqUlKzHZMlOidVWwBMrGVNyrxyTfziCNcnpNn3dApG7EvOKy5gEmIjHCMOOpN/Br8dv6CRG/153HOO/OYw5G05KGBlVh8mSnbJB2RmbipNRUU27I+C57X+HrmLzyZuYbWcHfjGv+neczap+JRE4QvInp/d4u6AEQz7fj6k/HsWIFQe0yzf/PcnxRgXW63IkTJbslKFxKMVlasHntLKFLypMckrC2iDgQTqnwpQa9kTMcV3jVh0Sbd/GiFY6QEYJipzczHlYyPeogZInWXqK/V67U4Q5G07gXGa+WKGRCZgs2aHswlKUGzli/ZRyzeJ9q6ASrfIvSSe/2D6THKGI1VIr5eBwRxiYLqc6S6Z8h975reqMBC9+n4Ifk6+i/8Ld2JEmTSskMVmyO+cz89FhfgJu5RuubJ1fbN34ju8OSH9Hna3Z+4lFyIKlpsgrLkOhQOOMThq481NIxs5z1nw1tp2R78kv8UwmztzMM3s7Of1U5BSLKcnSXwUPC8Q+OOakXn/4Nxj39SH8fOw6BnyyG5cFmmOTTMNkyc78dMTyViNT8UdK1iguK0fbt/5Aq3m/C5KEnskw/4RuLrHmnjt8RZwpR0xh7JNPvZ6L8d8cxmOL9tgsHnHIKFsyw18FJej+3x346I+qdfGmrzmGsxn5eHX9CQkic1xMlshs92zcCiEHl208K7yt2fIK/HqF6vKCfJVsELud3S8B4H4rhSFpGZaPj+HdcPpVHvem0WjwvYFW+hV7LuHanbv4dLvhaXAKS1lM1pZcpA6AlMfRxizdLVXegHgSlrGGJSYHuuTU9SUnlb9D4XPjUVymeyx98F1Sm/Ahqh3rMCw5tiyR2YTokTiSfsf6nfwt9664g5Ozi+x/olkln9/uKriYnxKrp1/PuYt0Iy2tcvou2SpxMyVhrnzcrJwoAQ/jNeVbYUpCRcJhsmRnTDn4WvsbE+LwfvxqjgB7ue+YgPvSR+0A3Y7bzmRKHYLF5v0i7fQ5m09mSPr61sguLMXX+y4hu9C0C4JytQaPLtiOHh/sQFHpPWTlF2PQ4j344eDD7iQ53Qwho1AUmRjTQ4pLlpYsWYLQ0FB4eHggKioKycnJRtdft24dwsLC4OHhgTZt2mDLli3a58rKyvDqq6+iTZs2qFWrFurVq4fRo0fjxg37mtZASPGnhDkxlJUL14a8fPcFwfblqF75SbzBoutTruHnY/ZbcO/41RxBv8+2NOn7FLz962m8+H2KSetXfJ9/FZTio9/P4dSNPLy+serE3Y5EqNIBf2YVIMXEQf9sWbItRSVLa9euxcyZMzFv3jwcOXIE7dq1Q2xsLLKy9N9+u3//fowYMQLjx4/H0aNHMXjwYAwePBipqfd/2EVFRThy5AjefPNNHDlyBBs2bEBaWhqefPJJW74tstK+P/8Sdf/2Vg1dLHdLy7H/gu7fIqeoFLPWHcf0NccUPfaruvOSUs9bBy/dPzEnX7Lsrjx9XaBy+iiUNp7sr8JSDF2ahAw9xSkrO5dZoG3FKyi5h/jUm4r+jcmdopKljz/+GBMmTMC4cePQsmVLLFu2DDVr1sRXX32ld/1FixZhwIABePnllxEeHo758+ejQ4cO+OyzzwAAPj4+SEhIwHPPPYcWLVqgS5cu+Oyzz5CSkoL0dNvObWVLvPWfxDDxu8NIuqibLBVVOHiXKrT1Bag+AbA0oVZ6Iq7vrjk5JY6bjtmml0Dot3zFxLtvNxy532I78suDePH7I5i25qjAkZgu924Zfjtxw24TNsUkS6WlpUhJSUFMTIx2mZOTE2JiYpCUlKR3m6SkJJ31ASA2Ntbg+gCQm5sLlUoFX19fg+uUlJQgLy9P56Ekaw9ftWp7IQ7wcjqgVkdJsUppz/nbVZbp+65ULLynBGdtUMdJbkz9yqfpm4JDRr8XOd25a85xpMTEuB+MFXswBjThtHRjDyd8cxhTVh/F279KO4ZQLIpJlm7fvo3y8nIEBQXpLA8KCkJGhv5xNBkZGWatX1xcjFdffRUjRoyAt7e3wVji4uLg4+OjfYSEhJj5biyn0Wiw9lA6Tt0Qv2qx4Ris30fc1rPW74RkT2dsvAb4+dh1PPfFw4uVH5Pl34K7JvlqtYOW7TmhDp29GY+8vkXUyYTFZIsB50K3pphaOb1IRq04yZfvd+UKOdeknCgmWRJbWVkZnnvuOWg0GixdutTounPmzEFubq72cfWqdS015vj9VAZeXX8SgxbvtdlrVrbOirnlSL6Enh/uXrka/T7epf23Bhq8XylJfmOT/AcGazTVj3yxZGzMxqPXsHSn/G5O2H42Eyev5egsKyvX4JfjpnVpyW2ckC1uZl26q/q/oxify9mMfEESNbVag0OXs6tMQXSvXI24rWew+9wtq19D6RSTLPn7+8PZ2RmZmbrNjJmZmQgODta7TXBwsEnrP0iUrly5goSEBKOtSgDg7u4Ob29vnYetnL5pvLKu0sdAyJGjfKZzfxa2+fzS7UJZXflaKvVGHjr+Z5vg+31p7XHB92mti7cK8Pyqw/gmqWpl6cpdWrcL9M8/KbdWNlu0LF3IKhD9NQyZv7nq5LvmWrX/Mp5dloSRXx7UWb7m0FV8sesiRn9l/K5zR6CYZMnNzQ2RkZFITEzULlOr1UhMTER0dLTebaKjo3XWB4CEhASd9R8kSufPn8e2bdtQp04dcd6AQKo7b5t6XJBTLRQhJF2w/I64b/ZfNlp+wM4+KoN2iXz1qNTPMeVK9QVUD10SrsiqlEwdWAwA6w3MQym3P7Pc4hHa6oPWd2X/7+9xrJVr1pna8lsg0KTYcqaYZAkAZs6ciRUrVuCbb77BmTNnMGnSJBQWFmLcuHEAgNGjR2POnDna9adPn474+Hh89NFHOHv2LN566y0cPnwYU6ZMAXA/UXrmmWdw+PBh/PDDDygvL0dGRgYyMjJQWirPQahCtXLI4cQl5Ez3r286adF2xWXlmPfLKby35azBK2UShqG6MLP+J78WFnP9c+XB6ldSuMp/ve8PyH+8GSCPY509eDCOSq3WVCliOv1H6e7CsxVFJUvDhg3Dhx9+iLlz5yIiIgLHjh1DfHy8dhB3eno6bt68qV2/a9euWL16NZYvX4527drhp59+wqZNm9C6dWsAwPXr1/HLL7/g2rVriIiIQN26dbWP/fv3S/Ieq1OxCqy+KQdMTabkcPwwdGVqSxVP4EodwCoUsVsby9UaqPR8QeXwPaD7jI6r0WhwPrP67ia5tVrLpXijTMKwWHp2EQpK7qHPRzvRYX4CUq8/vMko8az+Wof2RHET6U6ZMkXbMlTZzp07qyx79tln8eyzz+pdPzQ0VHY/7Oo4VTjXjFhxAPtm97FoP/fft7SDcYzNL2UuBxlWpCiV8yJl/dKosjdNHNPmiH9npZ1HKtN3EVNlHQCd392mHYe4Ojkd7z3dRu969khRLUukewK6nnNXukDskMKPd7LHz1feCkru4UKW9QVr5fZ3tkU8MnvLonBSqezihg1LKa5lydFVdwVgalavhB93Yck9/HzsBvq1DKp2XVOujPRuZ7fXQdJTWvFJR9f7w524lW/9uD35lQ4wHI9Go8GZm/loVKcmarlbfjqUW4IohMrDEpwqNa3cyLmL0ntquLk4RpuLY7xLqkIJP+43N6XitY0nMfLLA9Wuy5RHfiqP35fbSZR0CZEoyZGxb93OtFsYuHgPBi7eY7N4xPaXQDeqbDujW3an8gXpzrRbePYLw7NhVKRvULjSMFlSmOoaUEw9HRWV3pO8n/1wNbNr/37qfqX1cyYMKhXiLkG5DASVyp2iMkEHuVeeeqHsngY5Rco+YJIJZPYzMnace1Bo05ySCXL3ybbzVm2v0Wiw+cTNKp+JvkPs8UqlBgx58fsUdJifgEOXLZuwWQ6YLCmMUN1GEe8kYPZ6y263F8qBi/L64fT8YCfm/ly1rkhWfvUzgNuLLSdvVr+Sib7ad0nn3zEf70KhA495cBQyy5VsEk9GnnyOEZaMK6pYVLPxnC2YvPoIPvg9TWcdJxOvSPXlpn/8feG0cs+lqk8qBJMlO2NO6XtrJ9SVE1Nan/Sp/Pv/Vk/l4qFLTWtqtgdC1r6qrLRcPpOaknjk1kCrkcnXzlafiyXd3ab8Nk1tvbfX3zkHeCtMdV9YKSfYJeWT2XmuisWJ55GWmY9AL3e0beADDxdnqUOivxWV3kNNN/mdUhy9e10o1+449t3X8vtmk1XM7Xvf/+dtdG3mL1I0RMJIvZ6LpbsuYPMJ4boJhaZWa+DkZNrld5aMum2E8sHvaZj3RCvZDeS/cKsAHWv5SR2G7T4XkV5mzgb9wzbuKHzgtqnYDacw1R2Ks8y8o+UfX9r/NA1kBonOc5duG67vc7ugBI9/ulfWiRIAtHnrd5PnKMy5WyZyNLZ3+PL9+fHk1pAT4ldT6hBsytYf/8FL8hp7KhYmSwrD+ctIVBLVYIj9ZLfB527mKKMVprC0HC98d1jqMCTzoOVEZrmSbJK3B8mk2Gx9l/OL36dUWfavbw7j1+M38O7m03YzjRS74RRmhYLvJtDHkYqaKYFUiUnpPcODQl2cWUVLCeSSlFRm6pil6zl3Ud+3hmhxzPvFtOli7MG2M5naOk0Vz1l5xcptUeVZiiS1+mDVu89IOgu3nZM6hCpcmSwpwoOc5Fq2vGoWdV2w3aT1JnxjH62CGgDbz2Zqa0jJSbKCu+yYLBHOZeZL9tpS32FR3d2FnH/PNOJ+TspJliyddscePGi/Gba8+or7trZ894Vq1zl9M88GkdjG86sOY9qPR5GRq4wubCVgsqRw5wVIdN7YVLUQo63ItOVe61ETr0odnZifk4k3mMmC1FXxpXRGxsnGe1vOSh2CzVT8Cu7987Z0geih5F8HkyWFO3nd+rpKJ69JV5vJ2LnFka/S6aEfk9OlDoHsgFBzppnqXrlakvn27lSYUqh5kKfNX9+YcrXG5ClS5IbJksIJUXD5blk5nl223/odWUBuNVnIfGK3pijppgYm+PI1Y+0xm77ekKX70endbTh9w7YtbnvOP2xNMnWKElt6ask+bYmN7WczsVXAKZbExGRJ4YQ6UR26fAer9innpCQUoebasycFJffMWn/BVsfp4iDlqphEiKHysfjE3y32AxfvsZvb54WyMy0L98rVeH7VYUz64YgiClsyWVI4IUv5v/XracH2Zaqv9122+WtWxJatqlrP+x3HTGwq12g0+GL3RXEDIlK4L3bxN1KRWqPBXxUSJHMv0KTAZEnhRJz3lBzY4CX7TFpPyB64/RfkNRiVyBzGfgvns6S541iu9xtoNEBahnR3YVuCyZLCCf1jkFOGL6dYSL/ie8J1L/x0+Jpg+yKSE2cl3dJpA1/uvYTRXyVr/+2ugMLE8o+QjBK6G+mZpbYf6O3It1srWVm5Gi3n/i51GLIiw/G0VEHlu9M2Hr0u2L41AHLvluHjP9LwZ6WWpDtF0lSu5jAD4TBZIh1nJWga3XBEmANWrkCTkz5o0bpwq0CQ/dmrrakZgu7vwWE9p0j+gz0NyZHopEimmbz6iKj7f+fX01i8/U/EfKw71+Huc7dEfV1DlHIdqoThJBYlSzk5Ofjyyy8xZ84cZGffL19+5MgRXL8uXJZOpnl9YyoWbTsv6D7VNv7m/npCmLL8f5wS5uR9r/z+PGV9P9olyP7s1W8CT6ew8eh1LNt1ARHvJOC7A/enwVHiXUS2rudDphNzug2NRoMDF/8Sbf/2TAktYGYnSydOnEDz5s3x/vvv48MPP0ROTg4AYMOGDZgzZ47Q8ZEJhJ7P653fbHtXnFC5mfx/bvblj9OZgu/zQRmCN/+uKr84UdgLAVu4XaDcljFrLd1Z/bQiUhNrYPGEbw9zeiQL2WXL0syZMzF27FicP38eHh4e2uUDBw7E7t27jWxJYkoVoJL3A6v2XxZsX6YQaszSn1nsNrM3u89L031hDUcet/R+vPxrbh2+cr91ac6Gk1Weyy+2vBt1R5ryvqtyYeveDEuYnSwdOnQIL7zwQpXl9evXR0aGsGMYSFfpPbXB5x7/dK+gr3W31HbdHxm5xdhxNsvqpGmVQDWblNLPLyVbHdzUhr/yRBZ5feP9Vkt90+i8vO6ErcMRlVIOZUo45pqdLLm7uyMvr2r59nPnziEgIECQoEi/uK1njD4v5F1lHf+TINi+qnM+qwDjVh3ClpPWJdul5cKcWRXwu5WcUOPMqiNk0VWiB/IMtCDFCzTuUY7kfNexXY5ZevLJJ/HOO++grOz+l02lUiE9PR2vvvoqhg4dKniA9FB11a4Tz2QJ9lqFNmxZemCfTIoSdpifgNDZm6UOQ9Yu3S4U/TX2X7gtyd2Z1lJasT1H1PatP6QOwSYeJEj7L9xGu7f/wM/H5HkTlgJ64cxPlj766CMUFBQgMDAQd+/eRc+ePdGsWTN4eXnh3XffFSNGMtHvAl8V7bLx7a5CDPUQqnwAGbfOBgUk/7HioOivIYZpa45Wu46ML/LJjiT8fRPGP1YcRF7xPUxfc0zagAxQQguyi7kb+Pj4ICEhAXv37sWJEydQUFCADh06ICYmRoz4yAzrUoQ9ge04m4WezZXVtXrqei66NvOXOgy7UFxWDg9XZ73P8a4fwxRw3CcH8fnOC3hlQJjUYVRLzl2ED5idLD3QrVs3dOvWTchYSGZsXaJfiLuI7paV42j6HTSqUwt+tdys36EDe3ZZEn6dev83nno9F0fS7+CfUY3gxKkbrKaEMRpEtpKVV4JmgV5Sh2GUScnS4sWLTd7htGnTLA6GDJMi8z5zs+pAfjFtOnoDcx9vBTcr5gn6YtdFJF/OhruLE9L+85iA0TmekxXKUTy429LbwxWD29eXKiQislJ8agYGtA6WOgwd//jyIHa/3BsN69SUOhSDTEqWFi5cqPPvW7duoaioCL6+vgDuV/SuWbMmAgMDmSyJ5MBF8SrPGrL/wl+4dqcIDWrb5gtcUHIPb2w6if8+087ifSRfvv85lRgps0CWm7H2mOwOtHI063/H8Y+oEEQ28pM6FDJTTlEpfGvaT6v09rO6xWNf/D4FlxcMkigaw3p8sAP/16spXo5tAZUMi5WZdAl/6dIl7ePdd99FREQEzpw5g+zsbGRnZ+PMmTPo0KED5s+fL3a8WLJkCUJDQ+Hh4YGoqCgkJycbXX/dunUICwuDh4cH2rRpgy1btug8r9FoMHfuXNStWxc1atRATEwMzp+XV9Xg/OIyvPh9iiSv/WNyOrr/d7vNXu9/h68hdPZmQaaMCJ29GUfT70Cj0SC/uIzjbCxw8OJfVVo1By7eI1E0yrH+yDUMXZpk8HkFDNFwWBHvJGjnhzSkXK3Bc18Y/vvKyfOrDht9Xk7jhT7feQGN52zBxVsF2H42s9q/gy2pNGZ+Uk2bNsVPP/2E9u3b6yxPSUnBM888g0uXLgkaYEVr167F6NGjsWzZMkRFReGTTz7BunXrkJaWhsDAwCrr79+/Hz169EBcXBwef/xxrF69Gu+//z6OHDmC1q1bAwDef/99xMXF4ZtvvkHjxo3x5ptv4uTJkzh9+rROhXJj8vLy4OPjg9zcXHh7ewv2fl/feBI/HKxaOI2IiGyroV9NpGcXwdPdRVYncWs8EuiJ8wqa+SD17Vh4uls81FovU8/fZidLNWvWxK5du9CpUyed5cnJyejVqxeKioosi9gEUVFR6NSpEz777DMAgFqtRkhICKZOnYrZs2dXWX/YsGEoLCzEb7/9pl3WpUsXREREYNmyZdBoNKhXrx5mzZqFf//73wCA3NxcBAUFYdWqVRg+fLjeOEpKSlBS8rDlIy8vDyEhIYInS6z1Q0REdN+7T7fGyKhGgu7T1GTJ7JG0ffv2xQsvvIAjR45ol6WkpGDSpEmilg8oLS1FSkqKzms4OTkhJiYGSUn6m0OTkpKqxBQbG6td/9KlS8jIyNBZx8fHB1FRUQb3CQBxcXHw8fHRPkJCQqx5a0RERFQNKUvZmJ0sffXVVwgODkbHjh3h7u4Od3d3dO7cGUFBQfjyyy/FiBEAcPv2bZSXlyMoKEhneVBQkME56TIyMoyu/+C/5uwTAObMmYPc3Fzt4+rVq2a/H1OsGN1RlP0SEREpja1uNtLH7M6/gIAAbNmyBefOncPZs/dnmA4LC0Pz5s0FD06uHiSJYuvXMgiXFwxCRm4xusQliv569mzvq73hW9MNGbl30cTfE05OKmTlF6Pvh7uQbyfjD8QwrU8zPN2hAXp/uFPqUBRp76u9kXu3DCqo4ObihMy8YjzazB8pV7KNDgAnaS0aHoGnIoyXyFDyMInfpnZD6/o+AICP/kjDp9v/lDiiqqKb1MGq5zth2+kseHq4SF4g2eKRUs2bN7dpguTv7w9nZ2dkZureBpmZmYngYP23MgcHBxtd/8F/MzMzUbduXZ11IiIiBIzeOsE+Hkh9Oxat5/1u89dOfr0vfjl2A//ZbHwSXyENaV8fcwaGo9O726zaz4X3BlYprFmx8FmglwdS3uyH5m9step17NmMmOY6RSi9PVxwdG5/NH1ti5GtCABS3ohBHU93NKj9cFmzQE8AQFiwcGMbSXjVJUpK9yBRAoCBberKLlmqWNpgUNu6Rta0HbOTpeeff97o81999ZXFwRjj5uaGyMhIJCYmYvDgwQDuD/BOTEzElClT9G4THR2NxMREzJgxQ7ssISEB0dHRAIDGjRsjODgYiYmJ2uQoLy8PBw8exKRJk0R5H5YS+g4AUwV6edj0wP5/vZpaVZ7//aFt8Or6kwBMq0BuTQFMR1C5WreTk8rmld2VaMu07qjjabj12dA0MiS9rdO7Sx2CoFaM7ogJ3z4sHzAmWneAdHhdeSTuR9/sh9oynnXB7DPwnTt3dP5dVlaG1NRU5OTkoE+fPoIFps/MmTMxZswYdOzYEZ07d8Ynn3yCwsJCjBs3DgAwevRo1K9fH3FxcQCA6dOno2fPnvjoo48waNAgrFmzBocPH8by5csBACqVCjNmzMB//vMfPPLII9rSAfXq1dMmZAQ0Dqgl+mv8X6+mmBHT3OrkZWiHBrhwqxBdmrAYoBhkVJJF1lrWM34CYsIpX6YmDwuGtMHsDSdFjsZ6bSq0IgHA3CdaSRSJcXJOlAALkqWNGzdWWaZWqzFp0iQ0bdpUkKAMGTZsGG7duoW5c+ciIyMDERERiI+P1w7QTk9Ph5PTw5Nt165dsXr1arzxxht47bXX8Mgjj2DTpk3aGksA8Morr6CwsBATJ05ETk4OunXrhvj4eJNrLNmzhJd6AADq+9ZA04BauHCrULTXmtW/hSAnEBdnJ7w2MFyAiOjzkR2qLLN1Yd2IEF8cu5pj2xclh9UiyPT5yaKb1hExEuFUPqwyUbeMIH0QTk5OmDlzZpVpUcQwZcoUXLlyBSUlJTh48CCioqK0z+3cuROrVq3SWf/ZZ59FWloaSkpKkJqaioEDB+o8r1Kp8M477yAjIwPFxcXYtm2bbAerz3nMePfU7GqeN9cjFQ4cX47pZGRN6ywc1o4/YBka2ObhWIFuzfwBACM6N7TZ6/8jqiH+3b+FzV6PKLyu6cmSbw15t4Q8oFKp8ES7egCAf3VrLHE0yiXYQJgLFy7g3j3eVSSm6popJ3RvggVbzwryWp0b63ZjuYiYzNR00/81rOvjgZu5xaK9LhkW4ldD599fjIrE4St30NWGV9Oz+jXH2Yx8m70ekauz6e0HXh7SjCM1l29NVyweHoEPnmnLsXJWMPuvPXPmTJ1/azQa3Lx5E5s3b8aYMWMEC4zMJ2Q+s2KU7Wo8GRoH42SjPp9PR7TH1B+P2uS1lEIF3c++lrttb9197+k2fw+QVl6yZMuEkiyT8FIP9Fu4u8pyc8ZMynCu1ypaBHlpE0A5J0qLhkdIHUK1zE6Wjh7VPak4OTkhICAAH330UbV3ypF1qvttCjlTs3cN3a+GmAcGQzPu2GqCxyfa1cNX+y7haHqOdlmzQE/8qaA5k4RW3d97eKcQrDkkfDHW+YNbY1jHEEXfpaiEk6ijeyTIC+0b+ur85gHzWtCFPN6K5etx4g2fEJISSjWYnSzt2LFDjDjIBLZqaRkZ1bDKgUDM15bDDVZulZrfp/ZphulrjkkTjAxU99ee1b+FKMlSfV8PnURJ/qejqoa0byB1CGQCfV1ulUtlKF093xrVrySxjf/XVeoQTGL25VufPn2Qk5NTZXleXp7opQMcnbF8ZfGI9oK9zvBOVQfxituyJN6+TfVsx4fz+w3pUB+Pt62HR5uxO8WQAC9xKtj3bhEoyn5taUgH+V8lE/BKbNWbB2x1QUoPtW9Yu/qVZMDsZGnnzp0oLS2tsry4uBh79uwRJCjSz9Dv+Njcfnjy77sdrFXXxwNtGvhUWS5uy5KBbjjRXrGqGhX68z9+LgLOTio819FxJ0g2pYthmAifT5XXVeC5SwndM47sw2fbAQAiG1U9SVvTsLRyTEd0Dr1/Y4xYFxP2ZkrvZlKHYDKTu+FOnDih/f/Tp0/rTDRbXl6O+Ph41K/PKyoxVR50C9z/cfvWFO4W1j5h+q/sC0WcP81Qy5LULU5PtqvnsF1xppwzhM4J9rzSW9gdElXy32fa4pnI+92k+pJaaxJdF2cn/O/F+7NDLNnxJz74Pc3ifTkKpdxRCJiRLEVEREClUkGlUuntbqtRowY+/fRTQYMjXWo92cNLMcLWhHqpn/79+Yt4paTvfdmavmOkSqXC+knRnPDUAHNuszZFiF/VGcX1XSAQWaq61mJrvm0Vb0gZ1imEyZKdMTlZunTpEjQaDZo0aYLk5GQEBDy8jdjNzQ2BgYFwdpbvrYn2QF/f7pQ+wjVjNqpTE/4G5rPy9nAV7HVMZah7TgyGDpKRjRx02hQTzhrDOoXguwNXxI+FyFbMzJb03VEHAP6e7hgZ1RA/HEwXJi471aiO+FNpCcXkS8NGjRohNDQUarUaHTt2RKNGjbSPunXrMlGygcb+Vb9YQo6PWDHaeG2lT4ZFCPZaFRlqWbJlg1OYTCaTVJLW9auObRMah/8oy6gujapfScbMHZvZ/e/K9vq8+3QbLBzWztqQ7FpsqyCpQzCZSS1Lv/zyCx577DG4urril19+Mbruk08+KUhgZJl2DXxw/FquRds2r2ZepMHt62PG2mMW7duY2FbBepebmyuNjLJ8Ko7G/rWw4f+6IsDITPGOhDkKWaJ3WIBsWxuruxgErBvgre949XT7BhjQqi7itp7Bt0ny/Fyk4lPDVVE3Q5iULA0ePBgZGRkIDAzE4MGDDa6nUqlQXl4uVGxkgf/r3QwvfJdi9nbrJ0lX68LQdCfmemWAdXPjdVDILay2oKSDGMmHnMeY/ZlVgH4tjbdkmBt/xQTJ0MVmDTdnhNSuOh7PUc0f3Bof/5GG5SYkr3JiUjecWq1GYGCg9v8NPZgoSa9/yyDMf6qVWdsM6xii9zZaqZnbDedTw/bjqohIGfS1GvVuoTuFjzXXCPWNFICs4cZhKg+M6tIIR97sh06hyhoPqtw5BUgvlUqFaDPnpnpvSBuRorGW9HfJOSpTzxnH5vbD+knRosXRvqGvaPsmx6JvPFLdSgmOuS2qpl7QPShXQPcpseXapP6PxYsXm7zDadOmWRwMCcPFybwc2FmmJf7bNfBF4tksqcMgI3xruqGBiF0M7i7OqOHqjLtlbLVWAjl3Zes7P1dOdsw9FJpa9sTD1RkhfjVwNfuueS9AsmFSsrRw4UKTdqZSqZgsyYCLs+m/+DcGhZu1734tg5BwOtPckCzy32fa4ovdF7F890WbvB49ZM6Fn9iF5eSazFNVPjVd0bKuN07fzJM6FJNUnqzb3DFLajtp/I6f0R07zt5CdmEJVuy5JHU4smTSUe7SJX54SmJOscC6PuZNtPhMZAObJUt1PN3x2sBwJksSMOekUdPNBRv+ryuGfL5flFgqn9BI3uQ6dlDfjSSVW4bM7R2yl+9mWLA3woK9UVxWzmTJAKvGLGk0Grv5stgTFzOuxHnRTkIQs/uFRxhlmBHzCAD51sbqqmcsZ+WiiB1DzfsemzP7gJzvFHzAw9UZAwyUcvlxQhcbRyMvFiVLK1euROvWreHh4QEPDw+0bt0aX375pdCxkYW8zbiyU+JAOxKfnL4WvB5ThtoCzlEpBn0tXuO7NcbgiIeTkIcFm1ec1l664SoyNHNCgJd5f98ars5oUNu8ngs5MztZmjt3LqZPn44nnngC69atw7p16/DEE0/gpZdewty5c8WIkcxkTjecvbQsLRoeIdq+vRU02SORVOQ+tqx2raonew9XZ7w+qKX23+a+A3PG68npAsQYIS5O+rcMwo5/90LirJ4Y0qG+9TuUAbPPAkuXLsWKFSswYsQI7bInn3wSbdu2xdSpU/HOO+8IGiCJy0uCOd8qetB0b62+4eKVzXeS+UnAHjTRM5XPA7acI5As9+DWfKUkBQ/41XJDXR8PqGD+eKt/dW+CI+k5eLxtXXGCk4AQv7aKBSc/fi4CG45cF2Cv0jK7ZamsrAwdO1atvBkZGYl79+4JEhTZTpcm0hYG6xtWfZIzorPxmcLFZu58UWS+n4xUkGc3nDKY0aAtK85OKux5pTd2v9Lb7AsjT3cXfPt8ZzzXsfpjlFKOIuaOQ149IcrkdTvKsPixqcz+eo8aNQpLly6tsnz58uUYOXKkIEGRaawtdNY51M/sMUtC/+BNGSD59pOtBX5V87BhSRhe7oYbsv30dJE8UHJPLUY4JDBnM+u7yYmLsxNclJrtCczci5OmAZ4mr2vOeFq5sWgwxsqVK/HHH3+gS5f7o+MPHjyI9PR0jB49GjNnztSu9/HHHwsTJekVXte8wYiVyaF7w5QI3FykPYg54iB4Md7zyC6NsGzXBcH3S/LwINdQwl1fUlDKccTcs0KQt4fJ64YoeMC32clSamoqOnToAAC4cOH+gc/f3x/+/v5ITU3VrqeUL4Yjk0P3kjm33krFEVuWxHjLz3cLZbJkx+TcssTpRkwnRjmgH/4VhZ+PXces2BaC79tWzE6WduzYIUYcJAE5JEtKqNMlh8/J1sR4y94erqjvWwPXczjlgz3ykLgF2JhaMpjI1lZHkYgQXxy7mmPx9ua05B+Y09ek7R9t5o9Hm/lbHJMcyPfbTdWyNtGQw4WgEuqU9Gsp3p12RHLx8+RHseyfHYyus97IQPzAv7tj5Hht4Ug9HdYeUk2d1eGfXRoi2Mf0LjilM/t0WVxcjA8++AADBw5Ex44d0aFDB50H2Y61dU26PxIgUCSWC61j+JZxuZjzmHnz59kDIc4tayc6dsVfpWkX4gt3V+MtMI3qGJ402VnGCcn/9W4qdQg2a1qy9iJ6YJv7ZRD0FZR0r9Bq5Ggt7mZ3w40fPx5//PEHnnnmGXTu3NmhMna5sTZZGt+tsUCRWC7Ay13qEKpVQwZN+EpUx1PeFZ3poZn9mpu0nrEjjpxPBYFejtMCYs50V/p0buyHbTN7op6vB1rO/V3nuUNvxKDtW38Y3V7GXwOrmJ0s/fbbb9iyZQseffRRMeIhM1ibqJpT6VvuHHEQtvyZ9kcZ2Eb/XFRkG5GNamNaX+uLwz64eOMFtLTeeao1XvguBS/0bGLxPpoF6i8H4F2hiHGNCq2QgV7uyMovsfj1lMDss2X9+vXh5eUlRixkpoGtLT/JDLKw4qxcD4T6ZhQnywlx+7e/npalcY+G6vz76fb1seQf7L6XUoCn6a27lX//Z+cP0P6/o3XLmMtWn06TgFrYN7sPRkeHWr0vfUn0W0+0RLsQX0zq9bBrc40DdLmbnSx99NFHePXVV3HlyhUx4qFqLB8VCW8PF3wxKhJ1zDjIVfbRs+0EjIqoKt+ablg4TPd7Nr5bY0zt00z777ghbapNwMd2DRUjPFE80a5e9SvJjE75DiPDXWpW6o6OCQ/SaZ2Wa+suczjLvVRhOqoHBSXHPtoYP09+FL4VJk5uYkZhSqUyO1nq2LEjiouL0aRJE3h5ecHPz0/nIZbs7GyMHDkS3t7e8PX1xfjx41FQUGB0m+LiYkyePBl16tSBp6cnhg4diszMTO3zx48fx4gRIxASEoIaNWogPDwcixYtEu09CKF/q2Acn9cfsa2s67rwqGYgJzm2lwWqh9Kxke4xQaVSoXV9H7P28dpA5QywXzQsQuoQzGbqRdcbg1rqDOLuGx6o01oS9PedUcxNpCVkUVCVSoWvxnbEZ/9ob/K4L3tNTs3uuxgxYgSuX7+O9957D0FBQTbrlhk5ciRu3ryJhIQElJWVYdy4cZg4cSJWr15tcJuXXnoJmzdvxrp16+Dj44MpU6ZgyJAh2LdvHwAgJSUFgYGB+P777xESEoL9+/dj4sSJcHZ2xpQpU2zyviwh164wsg8n3uqvMzZBalJXcDeHEiddDq9r2rAKZyfdciO9WgTAyUmFXS/3Qlm5WlbfGUcm9OmhjwnzdzoCs5Ol/fv3IykpCe3a2a4b58yZM4iPj8ehQ4e0k/h++umnGDhwID788EPUq1e16Ts3NxcrV67E6tWr0adPHwDA119/jfDwcBw4cABdunTB888/r7NNkyZNkJSUhA0bNhhNlkpKSlBS8nAwW15enhBvk0gWbHnSY94vveAK01UYmwKpcmv0g967RpXKfyigdJok7P1zaRJQCxdvFSIm3D6TK7Mv2cLCwnD3rm0r8CYlJcHX11ebKAFATEwMnJyccPDgQb3bpKSkoKysDDExMdplYWFhaNiwIZKSkgy+Vm5ubrXdiXFxcfDx8dE+QkKqn3GaiO5raeWchiSs6oquvjYwDP1aBmFgm7omtWoroSq/JOz8Y1kzsQveeaoV4oa0kToUUZidLC1YsACzZs3Czp078ddffyEvL0/nIYaMjAwEBgbqLHNxcYGfnx8yMjIMbuPm5gZfX1+d5UFBQQa32b9/P9auXYuJEycajWfOnDnIzc3VPq5evWr6myGLTJZDUTkSRIhfTYyMaogJ3RvD3YVj56TUPMiz2gRoYo+mWDG6o8mlRh638E5bexfobZuaclK11gZ6eWB0dCi87LQ71uxkacCAAUhKSkLfvn0RGBiI2rVro3bt2vD19UXt2rXN2tfs2bOhUqmMPs6ePWtuiBZJTU3FU089hXnz5qF///5G13V3d4e3t7fOg8T1Qk8mS/bk3afb4PVBLaUOw+GZOxi4Ym0dQ0VHn41kS7s+Hzxjm6ErQg7wpocEnUj35MmTZu1r1qxZGDt2rNF1mjRpguDgYGRlZeksv3fvHrKzsxEcrP+usODgYJSWliInJ0endSkzM7PKNqdPn0bfvn0xceJEvPHGG2a9ByIyzMvj4SHG2orzUmoaUAsXbhVKHYbgKrdCVNeD5uykwsm3+kOtgcFWQSUOcreFED/DU8UIieMAxWF2stSzZ0+df+fn5+PHH3/El19+iZSUFLPuIgsICEBAQPXzk0VHRyMnJwcpKSmIjIwEAGzfvh1qtRpRUVF6t4mMjISrqysSExMxdOhQAEBaWhrS09MRHR2tXe/UqVPo06cPxowZg3fffdfk2Mm2+PtXJt+abn934aisqhj/xqBw/GfzmSrLXxsYhve2iN/6/Eigl10mS5aw124WImMsPnrt3r0bY8aMQd26dfHhhx+iT58+OHDggJCxaYWHh2PAgAGYMGECkpOTsW/fPkyZMgXDhw/X3gl3/fp1hIWFITk5GQDg4+OD8ePHY+bMmdixYwdSUlIwbtw4REdHo0uX+9VGU1NT0bt3b/Tv3x8zZ85ERkYGMjIycOvWLVHehz1g0kKm+Pi5h10O/VoGoVeLQCNrV69Lkzp6l0/swe5Zoop4jBaHWS1LGRkZWLVqFVauXIm8vDw899xzKCkpwaZNm9CypbjjD3744QdMmTIFffv2hZOTE4YOHYrFixdrny8rK0NaWhqKioq0yxYuXKhdt6SkBLGxsfj888+1z//000+4desWvv/+e3z//ffa5Y0aNcLly5dFfT9E9qZbM3/s/fM2AGBIhwaivla/lkGCFc50ZI5Qs83+36EuR/ibSsHkZOmJJ57A7t27MWjQIHzyyScYMGAAnJ2dsWzZMjHj0/Lz8zNagDI0NLTKLaseHh5YsmQJlixZonebt956C2+99ZaQYZIZPN2Fmc8tLJhzFcqBmGOS6vo8rAV0dv4AVqAXSOW/mJIKgJrKzu/Yr4KpkjhMPltt3boV06ZNw6RJk/DII9bPUE20ckzH6lcyQd9w67p4SBhinpTqeLpj/aSuqOnmLEmiZK8X65XfV9em/ujXMgihdWoi8WyWwe5PIkdjcrK0d+9erFy5EpGRkQgPD8eoUaMwfPhwMWMjkbw2MEzqEAAAnh7CtCyRcHo0r/6GC6lENjKvNImQTEmWOjT0FT0OofnV0r3939lJhRWj71/EvDYw3C66dJT/DsxjB38yWTK5zbVLly5YsWIFbt68iRdeeAFr1qxBvXr1oFarkZCQgPz8fDHjJAGNjGokdQhmM3bQfrSpvw0jsW+Lh0dYvG2vvxMtL4G6V5WkRZAXvhrbSeowzLZgaFuDz9lDouSI+HcTh9kd1LVq1cLzzz+PvXv34uTJk5g1axYWLFiAwMBAPPnkk2LESGTQ2old0LUZkyWh+NbUX2jQFKOjG+HzkR2wbVbP6ldWmOoK/T3Vvp5Vn51U6vvWkDoE0TnamCUSh1Wj+Vq0aIH//ve/uHbtGn788UehYiKRyeXCQ4gppKI4pkI2XJydMLBNXQRVmJiViMgeCHLrg7OzMwYPHoxffvlFiN0REcnGCz2aSB0CWUEm14akcPZ3nyhVi3MHEZmuZ/MAaETozAmtY5vpLxxd+4bS3RhA9oPJEilCTdbVIRmz5ALkt2ndRYiEKvt8ZAepQyA7wGTJASlxzBIn5yR7I1RRVjKOY+hICEyWiIiq4eHClk0iR8ZkiSTTsp63VdsPaltXoEiIDNMAeGWAOIVcZ8RwNgQiJWCyRJKxdi6xz0a0FygSIuOCfTwQI8K0OjNimgu+TyISHpMlBySXMUvWYqVaYS0Y0kbqEGRt2T8jpQ6BiCTCZInMwmq49qtnC/nOCycHLs48XBI5Kv76HRDrLJE+/F4QEenHZImIiIjICCZLDohDfYiIiEzHZImIyAhTiqfyAoTIvrGErAPicZ1IWOZUoycSQ4eGvmgS4Cl1GHaLyRJJ4rvxnaUOgYjIbmz4v0elDsGusRvOAcmhPlGLIC+pQyA70ahOTalDICI7x2TJTomVD2nY30Ays+6FaJu91sx++ituy+D6g4hExGTJAcniuC6LIMge+Hu62+y11AYuFngNQWTfmCyRYoyObiTZa38+soNkr03Gid2qo6lQt55JEZFjYrLkgJTaZdC+oa9kr13L3f7vhahdy1XqEGSpYmXzIR3q619Hob8pIjINkyVSjKfa6T9RkfW6Nq0DdxdnqcOQpYotS43q1MKJt/rj4nsDJYyIiGzN/i+XSUen0NpW3Q0X4ifdnUdOTg/jXjyivU1f294HtvvUUG6rkq3v7vT2UO5nRUSWYbLkYDo0qm3V9uF1vQWJw9JJWy8vGITCknsO0S1G8ubv6YbbBaVSh0FGBHjZbvA/2Td2w9kpMa+1/Wq5Wb0P7xqWJztMlEgOvn0+SuoQqBprJ3aROgSyE0yWSBIcH0NK17KeMK2sJB5XZ57iSBj8Jtkp+x5hQ0RUPd6lSEJhskRERHZJDlM7kX1QTLKUnZ2NkSNHwtvbG76+vhg/fjwKCgqMblNcXIzJkyejTp068PT0xNChQ5GZmal33b/++gsNGjSASqVCTk6OCO/AtniIICJHx+MgCUUxydLIkSNx6tQpJCQk4LfffsPu3bsxceJEo9u89NJL+PXXX7Fu3Trs2rULN27cwJAhQ/SuO378eLRt21aM0IlkjxfgholdNYKfPZH8KSJZOnPmDOLj4/Hll18iKioK3bp1w6effoo1a9bgxo0berfJzc3FypUr8fHHH6NPnz6IjIzE119/jf379+PAgQM66y5duhQ5OTn497//bVI8JSUlyMvL03kQkeOyJt9ZPNy2NcOIyHyKSJaSkpLg6+uLjh07apfFxMTAyckJBw8e1LtNSkoKysrKEBMTo10WFhaGhg0bIikpSbvs9OnTeOedd/Dtt9/Cycm0jyMuLg4+Pj7aR0hIiIXvjIjkzpSWH2san+r6eFixteV+m9pNkte1JbbakVAUkSxlZGQgMDBQZ5mLiwv8/PyQkZFhcBs3Nzf4+vrqLA8KCtJuU1JSghEjRuCDDz5Aw4YNTY5nzpw5yM3N1T6uXr1q3htSOB5/7E/7EOuKldozsbvhOob6ifsCBrSu7yPJ69qSpcVviSqTNFmaPXs2VCqV0cfZs2dFe/05c+YgPDwc//znP83azt3dHd7e3joPpfBgfSPSY+yjoVKHoGjWnpKFKPRKVbFliYQiaSnkWbNmYezYsUbXadKkCYKDg5GVlaWz/N69e8jOzkZwcLDe7YKDg1FaWoqcnByd1qXMzEztNtu3b8fJkyfx008/AXg4/5e/vz9ef/11vP322xa+M/ka372x1CGQDLF4n7R4ThcHP1cSiqTJUkBAAAICAqpdLzo6Gjk5OUhJSUFkZCSA+4mOWq1GVJT+KQciIyPh6uqKxMREDB06FACQlpaG9PR0REdHAwDWr1+Pu3fvarc5dOgQnn/+eezZswdNmza19u1JSqVS6e0/4CSgRETSGNAqGPGn9A8dsZSrswo9Hqn+PErWUcQkW+Hh4RgwYAAmTJiAZcuWoaysDFOmTMHw4cNRr149AMD169fRt29ffPvtt+jcuTN8fHwwfvx4zJw5E35+fvD29sbUqVMRHR2NLl3uzxdUOSG6ffu29vUqj3Uix8aK6GSMtRNUk2NY+s8OaDxni6D7PPX2ALg6sw1NbIpIlgDghx9+wJQpU9C3b184OTlh6NChWLx4sfb5srIypKWloaioSLts4cKF2nVLSkoQGxuLzz//XIrwicgO7XmlNy7/VYhOEg3SpmrILIcQuqL4sn9Gws2FXei2oJhkyc/PD6tXrzb4fGhoqHbM0QMeHh5YsmQJlixZYtJr9OrVq8o+iADZHXNJJkL8aiLEr6bUYZAB9n43XI/m/lKH4DCYkpLNvT4wXOoQiEzGyyflkuPdcP8dypkilIjJEpnN2gPQsx0bCBMIkZ2Q40mdiB5iskRmc8SeSgd8y/Q35jHKJcu/nSyDouowWbJT/D0SCcMWibKnu2KGj5KM2PuYLDlhsmSnxDzAs8vAvrg48Q8qtaX/jJQ6BLsk9N1nQggL9pI6BLIAkyUiBxc/o7vUITi88LrKmTKJrNO2ga9g+5JhLmi3mCzZKTF/QxO6N7FqezYdy0uzQF7pkrDk0lgpkzCq2DazJ0LrWFdyIrZVEDxcOdenrTBZIrNN7GFdskRE9m3r9B5ShwBAvi0vzQI9sfPl3hhnxQTWrw9sKVxAVC0mS2Q2a8cBuLrI9AhGpEdNN169m6uFTMblyHHMUkXznmiFl2KaSx0GmYDJEtlcTTfe+UPK0ZHzvimWTw35Txw+pEN9qUMgEzBZIiIywNvDRfatE6Rf16Z1pA5BVPxa2haTJSITOPPIRCL76cVo/COqIfxquUkdCtmQb035t34RkyUik9j7VSrpZ8tWpY6hfnjv6Tbw9mA3tSPx8nDF20+2kjoMqgaTJTvFhhBhuTjb50/F35OtGMZoHHFuH7K5MV1D4Wanxxh7wb+OA2nsX0vqEEhmpvZ5ROoQiEShtAvG2NbBUodARjBZslP6Loi/G9/Z9oEQKRgHdytDVGO/KsuUVvzW3Gj51bQtJksOpEFt6yrGkv3hAZfswZKRHfBybAud8V5ju4ZKF5AFzP0turnw9G1L/LTtFE+CROQo/D3dMbl3MwR5e2iXxbQMkjAicT3Rrh4CvTyqX5EEw2SJyIF1Cq3afeEomgTIcwyfvd5MQMaZen37bGQDfDqivaixUFX8VRI5MEee7d5FLrO9VvLJsAipQ1CUaX2aSR2CIEwdH9c3PFDkSEgfJktkU+tejJY6BIuN6NxQ6hAEJddkQSyD2tSVOgSTtK7vI3UIijKt78M7Op/+e+qQVvWUdxHgWL9G5WH1M7KpNgo+Ebz1ZEv8mJwudRhkIedKyWHvsECcyyyQKBoSSsUWmYndm6B1PR9ENPSVLiCRsfSXNNiyRGQidxfOPm8vxnYNxUsxzXH4jRipQyErVUyBXZyd0KN5ALw9FDiFCJuWZI3JEhE5hIrjs956shU8XJ3h7+nO+mMKFhMeBCc76U42tS4UG5akwWTJToldkO3xtsoY/0EEAGsndsHwTiEAqk5c2v2RALgbqFnDEhzyFlrHfmrHVfyudWniuHepyhWTJQfx/tA2gu7v1QFhFm3Hkw+J4Y1B4Uafj2pSB7VrueH43P44MKevyfu11/Ehe17pLXUIgrCn40nFt7JqXGcMjqindz17/U7KHZMlO9UxtLbOv9s3rG1gTcuE+Fl2RccfOknJp6YrPFyrjj0bGtlA7/orRncUOyRJWPr7JfEE+zwsMunh6oyFLCEhK0yW7NSi4eIXLfOrxRnryT7MfbwlPvtH1d9MZz1zjpF82NOUHy/2bIrBEfWwfFQkAM5LKDf2800jHQFe7lKHQCSp6rrmKvJwdcbjbfV3e5B8TezeVOoQBFPL3QWfDG+P/q2Ctcv0fYc7NRa2l4BMwzpLDqK+bw2pQwCg/DEGL8e2wAe/p0kdhiDmPdFS6hBENbZrKEruqdG1aR2Tt2lUpyau/FUkYlQkJJ+aCiwRYIZ/dW+CQW3r4nxmATo39kNRaTlb9CXCliUH0CLIC7Xchc+LH9xdRMo0KjpU6hAE41np+/1iz6ZwcXbC5N7NzBqv9/34KKFDs0hYsJfUIZBM1PWpgR7NA+Dh6sxESUJMlhyAWF1yL/VrLsp+5UzDEeqyNKSD7gDtJv6WTZIrl4HPP07oInUIRFSBYpKl7OxsjBw5Et7e3vD19cX48eNRUGB8qoLi4mJMnjwZderUgaenJ4YOHYrMzMwq661atQpt27aFh4cHAgMDMXnyZLHehl1xtWB2dFcnxXzl9Gql4OlaKlr2zw5ShyCoygN9Dd3dZooH40S2Tu9uVUzWqM0WBCJZUcyYpZEjR+LmzZtISEhAWVkZxo0bh4kTJ2L16tUGt3nppZewefNmrFu3Dj4+PpgyZQqGDBmCffv2adf5+OOP8dFHH+GDDz5AVFQUCgsLcfnyZRu8I9uR0zghpVfb7dU8QOoQBNEkwFPqEERVeR44c/yrexP8q3sTAaMhIqVTRLJ05swZxMfH49ChQ+jY8X7dk08//RQDBw7Ehx9+iHr1qt7Fkpubi5UrV2L16tXo06cPAODrr79GeHg4Dhw4gC5duuDOnTt444038Ouvv6Jv34eF6tq2bWubN+ZgFJ4nAeDtvEREjkgRfSJJSUnw9fXVJkoAEBMTAycnJxw8eFDvNikpKSgrK0NMzMOJMsPCwtCwYUMkJSUBABISEqBWq3H9+nWEh4ejQYMGeO6553D16lWj8ZSUlCAvL0/nIWdyGWbjU8O+71xRkooF8IjEsGpcJ6lDIBKMIpKljIwMBAYG6ixzcXGBn58fMjIyDG7j5uYGX19fneVBQUHabS5evAi1Wo333nsPn3zyCX766SdkZ2ejX79+KC0tNRhPXFwcfHx8tI+QEN4VZopvn5fHnUbW+nykssf71PXxUOas7NWYEfMIAGBSL/upvSOG1wZaNlWRudwsGNNIJFeSfptnz54NlUpl9HH27FnRXl+tVqOsrAyLFy9GbGwsunTpgh9//BHnz5/Hjh07DG43Z84c5Obmah/VtUTRfW0a2Mfg6IFtlD2JsKXz+snd9L6PYNfLvfBKbAupQ5G1lnVt8zsUu0F7dHQjkV+B6CFJxyzNmjULY8eONbpOkyZNEBwcjKysLJ3l9+7dQ3Z2NoKDg/VuFxwcjNLSUuTk5Oi0LmVmZmq3qVv3/kmvZcuHxfkCAgLg7++P9PR0gzG5u7vD3Z0VskmZnjIwQafSqVQqNKpjWckAUp55T7SSOgRyIJImSwEBAQgIqP7uoujoaOTk5CAlJQWRkffnzdm+fTvUajWiovR37URGRsLV1RWJiYkYOnQoACAtLQ3p6emIjo4GADz66KPa5Q0a3L/VODs7G7dv30ajRvZz1dIsULw7n2b2a46PE86Jtn8SHgepOzZ7+fNbc8cjkbkU0akcHh6OAQMGYMKECUhOTsa+ffswZcoUDB8+XHsn3PXr1xEWFobk5GQAgI+PD8aPH4+ZM2dix44dSElJwbhx4xAdHY0uXe4XfGvevDmeeuopTJ8+Hfv370dqairGjBmDsLAw9O7dW7L3K5SfJz+KiT2a4N8idks0trD4HxHZN7ncWEIkBEUkSwDwww8/ICwsDH379sXAgQPRrVs3LF++XPt8WVkZ0tLSUFT0cF6nhQsX4vHHH8fQoUPRo0cPBAcHY8OGDTr7/fbbbxEVFYVBgwahZ8+ecHV1RXx8PFxdlT8Atl2IL14bGF5lKgghtarnbfT55x9tLNprE5Fj+m58Z6lDIAejiDpLAODn52e0AGVoaGiVqSg8PDywZMkSLFmyxOB23t7eWLlyJVauXClYrI6kuuKGA9sE46t9l2wUDRE5ApYhIVtTTMsSKZOnh2LycSKHIOYYxoo0ot8PR2Q7TJZIND2aByAs2BuTejXF20/a150r/VsGSR2CRQa00n/3KMnPf4cKP5PA/MGtEeTNgqRE5mKyRFZ7f2ibKsu2zeyBlWPuV1x/dUAYxnQNtXFUVNn4bo3x2T/aSx0Gmei5TiGY3vcRi7evXHzSw9UJo7rYz12+RLbEZImsNqxTwyrLmgV6wdWOK/h2buwndQjVmv9UKzzRrh46N/ZDTHggXhnQAi52/DchXX61HtaCi20VhM3TuksYDZGycUAJCWJwRD1sOnZD6jBsZmzXUPxn8xmpwzDo7PwB8HB1xqjoUKlDISsIVRPpi1Edq19JQVRgjSWyLV5mkiAcrcVCzu/33/2bw8PVWeowiIjshnyP+KRYXZrIv4tKCPOfaoUgb9tPe+PqrMKrA8Lg71n1tVeM7ogXe3IiWXsxMkq5Y4zELEpZ050XA2RbTJZIcD9O6CJ1CDYxKjoUB+b0telrTuvTDGfeGYBJvZqifu0aVZ7v1zJI1q1eZJ4AL3d4WVBUdmqfZiJEIx9Nq6nvRiQ0jlkiQbzYswl+SrmG5zo2cKi5x2z1XldPiMLtglIMaBXMZMjBODub/x2b1b8FUq/nihCN9A6+ZtsLFCKAyRIJpFmgl3ZQMQkrtE5NdG3qX2X50xH1cPxqju0DIptaOCwC474+ZPZ2rev74JvnO6O+rzR1lcTqhWOdKJICkyUSDBMlcQzvXLU0AwCMjg5Fi2BvtK7vjRW7L6KpjSozk231bhGIc/95DM3f2Gr2tj2bB4gQEZHjYbJEJHMTujfRu9zJSYXopnUAADP7t7BlSGRjbi7segWAr8d1kjoEclD8BRLJnLOT44wBIzKmd4tAqUMgB8VkiYjIjnR/pOr4Nil0Cq0tdQhEgmGyRCRjjf1rSR0CKcw/DIxxs7Wabi5I+88A7Hq5l9ShEFmNyRKRjK2f1FXqEIgs5u7ijEZ1mPCT8jFZIpKp/wxuDb9ablKHQSQL7hzkThLit49IpmrXZKJED3VrZtpYpFoWVPyWuyHt6+Ps/AFSh0EOjMkSkUzFtOSdP/TQN893Nmk9U5MqW/p9Rg+rtv94WIRDzQxA8sNkiUiGmgV6wt2FRT7pIVNKSIzoHAInGZaa8KnhKnUIRFZhskQkQ33C2KpE5pv9WLjUIehlTVHNiBBf4QIhshCTJSIZmtmvudQhkALJtQXHr5YbJvVqatG2a1/oInA0ROZjskQkQ5xnj+zNqwPCqiwb361xtduxO5rkgMkSERHZXFiwF2Y/VjWBIpIjJktEVvp0RHupQyBSnC3TusOlmsHo0/o0s1E0RMYxWSKyUpv6PlKHQKQI7f4erN3EvxacnFRQqVSYEfOIwfVHdmlko8iIjGOyRESkEPOfamXwufeHtrFhJJZZMSoSU3o3w3f/itIuMzTwu12IL4K8PWwVGpFRTJaIZODrsZ2kDoEUblgneUyga0ygtwf+HdsC9X1raJe5uzhjWMeQKut+ObqjLUMjMorJEpEMRDXxQ9emdQBw8lxyPG/raTEL8HKXIBIi/exvEiEiG9MIsA8nlQqrJ7CeDFlm6cgOUodgFZbKILljyxIRkVIYmB+ttx1UfOfUbyRnbFkikgGNEM1TZPfq1HLTu9weEo2k2X1x8NJf+PX4TTwVUU/qcIh0KKZlKTs7GyNHjoS3tzd8fX0xfvx4FBQUGN2muLgYkydPRp06deDp6YmhQ4ciMzNTZ51Dhw6hb9++8PX1Re3atREbG4vjx4+L+VbIzgRybAXZyIBWwVWWxQ1pYxdVroN9PPBURH18OaYjnmjHZInkRTHJ0siRI3Hq1CkkJCTgt99+w+7duzFx4kSj27z00kv49ddfsW7dOuzatQs3btzAkCFDtM8XFBRgwIABaNiwIQ4ePIi9e/fCy8sLsbGxKCsrE/stkZ2o5e6C2FZBUodBDsDJSYVHAj21/w7wcseIzvK/C45I6VQajfw7AM6cOYOWLVvi0KFD6Njx/u2k8fHxGDhwIK5du4Z69apeheTm5iIgIACrV6/GM888AwA4e/YswsPDkZSUhC5duuDw4cPo1KkT0tPTERJy/9bVkydPom3btjh//jyaNTOtemxeXh58fHyQm5sLb29vgd41KUlxWTnC3oy3ePvT78Sipht7xal6724+jRV7LgG4nywdej1G4oiIlMvU87ciWpaSkpLg6+urTZQAICYmBk5OTjh48KDebVJSUlBWVoaYmIcHkrCwMDRs2BBJSUkAgBYtWqBOnTpYuXIlSktLcffuXaxcuRLh4eEIDQ01GE9JSQny8vJ0HuTYeDcP2cqs/i20/+9sD4OViBRAEclSRkYGAgN17/ZwcXGBn58fMjIyDG7j5uYGX19fneVBQUHabby8vLBz5058//33qFGjBjw9PREfH4+tW7fCxcXwVX5cXBx8fHy0jwetUuTY2jbgtCckPg9XZywc1g7+nu5Y+k9llwwgUgpJk6XZs2dDpVIZfZw9e1a017979y7Gjx+PRx99FAcOHMC+ffvQunVrDBo0CHfv3jW43Zw5c5Cbm6t9XL16VbQYSTk2/t+jUodADuLp9g1w6PW+aN+wttShEDkESQdJzJo1C2PHjjW6TpMmTRAcHIysrCyd5ffu3UN2djaCg6veHQIAwcHBKC0tRU5Ojk7rUmZmpnab1atX4/Lly0hKSoKTk5N2We3atfHzzz9j+PDhevft7u4Od3feAUW6nJ1UaBfii+NXc8zaLtDLneOVyGwqdsER2YykR+iAgAAEBARUu150dDRycnKQkpKCyMhIAMD27duhVqsRFRWld5vIyEi4uroiMTERQ4cOBQCkpaUhPT0d0dHRAICioiI4OTnpHHQe/FutVlv79ohMkswBukREsqaIMUvh4eEYMGAAJkyYgOTkZOzbtw9TpkzB8OHDtXfCXb9+HWFhYUhOTgYA+Pj4YPz48Zg5cyZ27NiBlJQUjBs3DtHR0ejS5f60Ev369cOdO3cwefJknDlzBqdOncK4cePg4uKC3r17S/Z+Sbna1ue4JSIie6OIZAkAfvjhB4SFhaFv374YOHAgunXrhuXLl2ufLysrQ1paGoqKirTLFi5ciMcffxxDhw5Fjx49EBwcjA0bNmifDwsLw6+//ooTJ04gOjoa3bt3x40bNxAfH4+6deva9P2RfXj1sTCpQyAiIoEpos6S3LHOElUUOnuzWetfXjBIpEiIiMgYu6qzRKQk8TO6Sx0CEREJiMkSkcCCvDyMPv/V2I5GnyciInlhskRkY33COI8cEZGSMFkiEphvTVepQyAiIgExWSISmEqlQkx4YPUrEhGRIjBZIhJBsE/VcUs+NVxxfF5/CaIhIiJrMFkiEsHL/avWW9o6vTt8atzvopvWpxkAYErvZjaNi4iIzMc6SwJgnSUy5N/rjuOnlGuIn9EdYcEPvxsajQbp2UVo6FeTc3wREUnE1PM3kyUBMFkiIiJSHhalJCIiIhIAkyUiIiIiI5gsERERERnBZImIiIjICCZLREREREYwWSIiIiIygskSERERkRFMloiIiIiMYLJEREREZASTJSIiIiIjmCwRERERGcFkiYiIiMgIJktERERERjBZIiIiIjLCReoA7IFGowEA5OXlSRwJERERmerBefvBedwQJksCyM/PBwCEhIRIHAkRERGZKz8/Hz4+PgafV2mqS6eoWmq1Gjdu3ICXlxdUKpVg+83Ly0NISAiuXr0Kb29vwfarZPxMquJnoh8/l6r4mVTFz6QqR/pMNBoN8vPzUa9ePTg5GR6ZxJYlATg5OaFBgwai7d/b29vuv7Dm4mdSFT8T/fi5VMXPpCp+JlU5ymdirEXpAQ7wJiIiIjKCyRIRERGREUyWZMzd3R3z5s2Du7u71KHIBj+TqviZ6MfPpSp+JlXxM6mKn0lVHOBNREREZARbloiIiIiMYLJEREREZASTJSIiIiIjmCwRERERGcFkSWJLlixBaGgoPDw8EBUVheTkZKPrr1u3DmFhYfDw8ECbNm2wZcsWG0VqO+Z8JqtWrYJKpdJ5eHh42DBa8e3evRtPPPEE6tWrB5VKhU2bNlW7zc6dO9GhQwe4u7ujWbNmWLVqlehx2pK5n8nOnTurfE9UKhUyMjJsE7ANxMXFoVOnTvDy8kJgYCAGDx6MtLS0arez52OKJZ+JvR9Tli5dirZt22oLTkZHR2Pr1q1Gt7Hn74ipmCxJaO3atZg5cybmzZuHI0eOoF27doiNjUVWVpbe9ffv348RI0Zg/PjxOHr0KAYPHozBgwcjNTXVxpGLx9zPBLhfZfbmzZvax5UrV2wYsfgKCwvRrl07LFmyxKT1L126hEGDBqF37944duwYZsyYgX/961/4/fffRY7Udsz9TB5IS0vT+a4EBgaKFKHt7dq1C5MnT8aBAweQkJCAsrIy9O/fH4WFhQa3sfdjiiWfCWDfx5QGDRpgwYIFSElJweHDh9GnTx889dRTOHXqlN717f07YjINSaZz586ayZMna/9dXl6uqVevniYuLk7v+s8995xm0KBBOsuioqI0L7zwgqhx2pK5n8nXX3+t8fHxsVF00gOg2bhxo9F1XnnlFU2rVq10lg0bNkwTGxsrYmTSMeUz2bFjhwaA5s6dOzaJSQ6ysrI0ADS7du0yuI4jHFMqMuUzcbRjikaj0dSuXVvz5Zdf6n3O0b4jhrBlSSKlpaVISUlBTEyMdpmTkxNiYmKQlJSkd5ukpCSd9QEgNjbW4PpKY8lnAgAFBQVo1KgRQkJCjF4hOQp7/55YIyIiAnXr1kW/fv2wb98+qcMRVW5uLgDAz8/P4DqO9l0x5TMBHOeYUl5ejjVr1qCwsBDR0dF613G074ghTJYkcvv2bZSXlyMoKEhneVBQkMFxFBkZGWatrzSWfCYtWrTAV199hZ9//hnff/891Go1unbtimvXrtkiZFky9D3Jy8vD3bt3JYpKWnXr1sWyZcuwfv16rF+/HiEhIejVqxeOHDkidWiiUKvVmDFjBh599FG0bt3a4Hr2fkypyNTPxBGOKSdPnoSnpyfc3d3x4osvYuPGjWjZsqXedR3pO2KMi9QBEFkjOjpa54qoa9euCA8PxxdffIH58+dLGBnJSYsWLdCiRQvtv7t27YoLFy5g4cKF+O677ySMTByTJ09Gamoq9u7dK3UosmHqZ+IIx5QWLVrg2LFjyM3NxU8//YQxY8Zg165dBhMmYsuSZPz9/eHs7IzMzEyd5ZmZmQgODta7TXBwsFnrK40ln0llrq6uaN++Pf78808xQlQEQ98Tb29v1KhRQ6Ko5Kdz5852+T2ZMmUKfvvtN+zYsQMNGjQwuq69H1MeMOczqcwejylubm5o1qwZIiMjERcXh3bt2mHRokV613WU70h1mCxJxM3NDZGRkUhMTNQuU6vVSExMNNh3HB0drbM+ACQkJBhcX2ks+UwqKy8vx8mTJ1G3bl2xwpQ9e/+eCOXYsWN29T3RaDSYMmUKNm7ciO3bt6Nx48bVbmPv3xVLPpPKHOGYolarUVJSovc5e/+OmEzqEeaObM2aNRp3d3fNqlWrNKdPn9ZMnDhR4+vrq8nIyNBoNBrNqFGjNLNnz9auv2/fPo2Li4vmww8/1Jw5c0Yzb948jaurq+bkyZNSvQXBmfuZvP3225rff/9dc+HCBU1KSopm+PDhGg8PD82pU6ekeguCy8/P1xw9elRz9OhRDQDNxx9/rDl69KjmypUrGo1Go5k9e7Zm1KhR2vUvXryoqVmzpubll1/WnDlzRrNkyRKNs7OzJj4+Xqq3IDhzP5OFCxdqNm3apDl//rzm5MmTmunTp2ucnJw027Ztk+otCG7SpEkaHx8fzc6dOzU3b97UPoqKirTrONoxxZLPxN6PKbNnz9bs2rVLc+nSJc2JEyc0s2fP1qhUKs0ff/yh0Wgc7ztiKiZLEvv00081DRs21Li5uWk6d+6sOXDggPa5nj17asaMGaOz/v/+9z9N8+bNNW5ubppWrVppNm/ebOOIxWfOZzJjxgztukFBQZqBAwdqjhw5IkHU4nlw23vlx4PPYcyYMZqePXtW2SYiIkLj5uamadKkiebrr7+2edxiMvczef/99zVNmzbVeHh4aPz8/DS9evXSbN++XZrgRaLv8wCg87d3tGOKJZ+JvR9Tnn/+eU2jRo00bm5umoCAAE3fvn21iZJG43jfEVOpNBqNxnbtWERERETKwjFLREREREYwWSIiIiIygskSERERkRFMloiIiIiMYLJEREREZASTJSIiIiIjmCwRERERGcFkiYiIiMgIJktEZHfGjh2LwYMH2/x1V61aBZVKBZVKhRkzZpi0zdixY7XbbNq0SdT4iMgyLlIHQERkDpVKZfT5efPmYdGiRZBqcgJvb2+kpaWhVq1aJq2/aNEiLFiwwK4naiVSOiZLRKQoN2/e1P7/2rVrMXfuXKSlpWmXeXp6wtPTU4rQANxP5oKDg01e38fHBz4+PiJGRETWYjccESlKcHCw9uHj46NNTh48PD09q3TD9erVC1OnTsWMGTNQu3ZtBAUFYcWKFSgsLMS4cePg5eWFZs2aYevWrTqvlZqaisceewyenp4ICgrCqFGjcPv2bbNj/vzzz/HII4/Aw8MDQUFBeOaZZ6z9GIjIhpgsEZFD+Oabb+Dv74/k5GRMnToVkyZNwrPPPouuXbviyJEj6N+/P0aNGoWioiIAQE5ODvr06YP27dvj8OHDiI+PR2ZmJp577jmzXvfw4cOYNm0a3nnnHaSlpSE+Ph49evQQ4y0SkUjYDUdEDqFdu3Z44403AABz5szBggUL4O/vjwkTJgAA5s6di6VLl+LEiRPo0qULPvvsM7Rv3x7vvfeedh9fffUVQkJCcO7cOTRv3tyk101PT0etWrXw+OOPw8vLC40aNUL79u2Ff4NEJBq2LBGRQ2jbtq32/52dnVGnTh20adNGuywoKAgAkJWVBQA4fvw4duzYoR0D5enpibCwMADAhQsXTH7dfv36oVGjRmjSpAlGjRqFH374Qdt6RUTKwGSJiByCq6urzr9VKpXOsgd32anVagBAQUEBnnjiCRw7dkzncf78ebO60by8vHDkyBH8+OOPqFu3LubOnYt27dohJyfH+jdFRDbBbjgiIj06dOiA9evXIzQ0FC4u1h0qXVxcEBMTg5iYGMybNw++vr7Yvn07hgwZIlC0RCQmtiwREekxefJkZGdnY8SIETh06BAuXLiA33//HePGjUN5ebnJ+/ntt9+wePFiHDt2DFeuXMG3334LtVqNFi1aiBg9EQmJyRIRkR716tXDvn37UF5ejv79+6NNmzaYMWMGfH194eRk+qHT19cXGzZsQJ8+fRAeHo5ly5bhxx9/RKtWrUSMnoiEpNJIVeaWiMjOrFq1CjNmzLBoPJJKpcLGjRslmaaFiIxjyxIRkYByc3Ph6emJV1991aT1X3zxRUkrjhNR9diyREQkkPz8fGRmZgK43/3m7+9f7TZZWVnIy8sDANStW9fkOeWIyHaYLBEREREZwW44IiIiIiOYLBEREREZwWSJiIiIyAgmS0RERERGMFkiIiIiMoLJEhEREZERTJaIiIiIjGCyRERERGTE/wMBcqwwenQInQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Let's load the audio file and display it here\n", + "import matplotlib.pyplot as plt\n", + "import torchaudio\n", + "import numpy as np\n", + "\n", + "audio, fs = torchaudio.load(wav)\n", + "xs = np.arange(len(audio[0])) / fs\n", + "plt.plot(xs, audio[0])\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Amplitude\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "55262bd3-269b-4fde-a349-e8dcd6bf05e3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[(0.1, 0.14, 'THE'),\n", + " (0.26, 0.5, 'BIRCH'),\n", + " (0.68, 0.98, 'CANOE'),\n", + " (1.3800000000000001, 1.62, 'SLID'),\n", + " (1.82, 1.8800000000000001, 'ON'),\n", + " (1.96, 2.02, 'THE'),\n", + " (2.16, 2.48, 'SMOOTH'),\n", + " (2.64, 3.02, 'PLANKS')]" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Okay, run the alignment\n", + "aligner = CTCAligner(model=asr_model, tokenizer=asr_model.tokenizer, device=asr_model.device)\n", + "alignment = aligner.align_audio_to_words(wav, \"THE BIRCH CANOE SLID ON THE SMOOTH PLANKS\", frame_shift=0.02)\n", + "alignment" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "031f3967-a330-46ac-9503-e0c7cd56495b", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAksAAAGwCAYAAAC5ACFFAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAaoxJREFUeJzt3Xl8DPf/B/DX5kYuuYUQRyVxE0RUqwhReihVfH3dX1pfivLT0ipttaUXpVVKtXopX9Vqi6YlbuKKM45QVwhJaCSRRA7Z/f2hVlb23pmdmd3X8/HYR2t2Zva9m92Z93w+n3l/VBqNRgMiIiIi0stF6gCIiIiI5IzJEhEREZERTJaIiIiIjGCyRERERGQEkyUiIiIiI5gsERERERnBZImIiIjICDepA3AEarUaV69ehY+PD1QqldThEBERkRk0Gg1u3bqF8PBwuLgYbj9isiSAq1evIiIiQuowiIiIyAqXL19GnTp1DD7PZEkAPj4+AO5+2L6+vhJHQ0REROYoKChARESE9jxuCJMlAdzrevP19WWyREREpDCmhtBwgDcRERGREUyWiIiIiIxgskRERERkBJMlIiIiIiOYLBEREREZwWSJiIiIyAgmS0RERERGMFkiIiIiMoLJEhEREZERTJaIiIiIjGCyRERERGQEkyUiIiIiI5gsEYmsQq1B6Z0KqcMgIiIrMVkiEtHFG0Vo+OpGRM1IQnHZHanDISIiKzBZIhJRr4U7tf+//0KuhJEQEZG1mCwRiaS47A6Ky+53vw3/6gDWHc6UMCIiIrIGkyUikSxIPltl2aTVR+wfCBER2YTJEpEI1GoNPt9+Xu9zaZn5do6GiIhswWSJSAR/XS80+NwTn+yyYyRERGQrJktEIugxf4fUIRARkUCYLBEJbNfZG1KHQEREAmKyRCSwuUmnTK4zbe0xFJWy7hIRkRIwWSISWFpmgcl1Vh24jLc3mE6qiIhIekyWiASUX1xu9ro/7M8QMRIiIhIKkyUiARWUmJ8sAXenQyEiInljskQkoCs3b1u0/pnsWyJFQkREQnGTOgAiR3Am+xb6Ld6DWyWWDdrWiBQPEREJh8kSkQBYV4mIyHGxG46IiIjICCZLRDay5A64B2nYD0dEJHuKS5YWLVqEyMhIeHl5IS4uDvv37ze6/po1axAdHQ0vLy80b94cGzdurLLOqVOn8NRTT8HPzw81atRAu3btkJHB27rJPLM3nLR62wXJZwWMhIiIxKCoZGn16tWYPHkyZs2ahUOHDqFly5ZITExETk6O3vX37NmDQYMGYdSoUTh8+DD69OmDPn36IC0tTbvOuXPn0KlTJ0RHR2Pbtm04duwYXn/9dXh5ednrbZHCpWdZf0fbqWumC1gSEZG0VBqNcjoC4uLi0K5dO3z66acAALVajYiICLz44ouYNm1alfUHDBiAoqIirF+/XrusQ4cOaNWqFZYsWQIAGDhwINzd3fHtt99aHVdBQQH8/PyQn58PX19fq/dDyvTUp7tw7Eq+1dtfnNtbwGiIiMhc5p6/FdOyVFZWhtTUVCQkJGiXubi4ICEhASkpKXq3SUlJ0VkfABITE7Xrq9VqbNiwAY0bN0ZiYiJCQkIQFxeHdevWGY2ltLQUBQUFOg9yXiqVSuoQiGSr7I4aZXfUUodBZBPFJEs3btxARUUFQkNDdZaHhoYiKytL7zZZWVlG18/JyUFhYSHmzp2Lnj174s8//8QzzzyDvn37Yvv27QZjmTNnDvz8/LSPiIgIG9+dcMo15VhwcwEW3FyAck2lgcdFRYBKdfdRZHnVaIP7tScb34M5rHmfkqRKdvgspOIs37XKTL5nC+Ix6/MT8P0Ze70KtQYd5iQj7t3NqFDLpBNDpL+tLL63QnPg44ylFJMsiUGtvnu18/TTT+Oll15Cq1atMG3aNDzxxBPabjp9pk+fjvz8fO3j8uXL9gqZZIgNS0T6nbpWgNyiMtwsLsffRaVSh0NkNcUUpQwKCoKrqyuys7N1lmdnZyMsLEzvNmFhYUbXDwoKgpubG5o0aaKzTkxMDHbt2mUwFk9PT3h6elrzNsgBMVci0u+JTyodR2XSsERkDcW0LHl4eCA2NhbJycnaZWq1GsnJyYiPj9e7TXx8vM76ALBp0ybt+h4eHmjXrh3S09N11jlz5gzq1asn8DsgR8UxS0RV7Tx7Xeff3+29hL8L2bpEyqSYliUAmDx5MoYNG4a2bduiffv2+Pjjj1FUVIQRI0YAAIYOHYratWtjzpw5AICJEyeic+fO+Oijj9C7d2+sWrUKBw8exNKlS7X7nDp1KgYMGIBHH30UXbp0QVJSEn777Tds27ZNirdICqSgG0qJ7GbIct0aeAu3/IUdZ29g3biHJYqIyHqKSpYGDBiA69evY+bMmcjKykKrVq2QlJSkHcSdkZEBF5f7jWUdO3bEypUrMWPGDLz66qt46KGHsG7dOjRr1ky7zjPPPIMlS5Zgzpw5mDBhAqKiorB27Vp06tTJ7u+PiMiRHbmcJ3UIRFZRVLIEAOPHj8f48eP1PqevNah///7o37+/0X2OHDkSI0eOFCI8ckLshiMicmyKGbNERETAphNZKK9Qbt2ik1dZl46Uh8kSEZGCTFh1BEt3nJc6DKv1WrgT129xoDcpC5MlIiKF+eCPdPxr2V6kXropdShWych17gKHpDxMloiIFGjPub/Rb/EeqcMgcgpMlohsxOHdRJZhtQ1SGiZLRDbicZ+IyLExWSKyUV5xmdQhEClKblEZi7mSojBZIrJBeYUa565zsCpJ58IN+X3/TCVCY75NxVvrT9opGiLbMVkiskHB7XKpQyAn1+XDbahQy6uVZsvpHJPrfLX7oviBEAmEyRKRDVxYvZtkYPWBy1KHoGPn2RtSh0AkKCZLRDZgrkRysOecvJITXkSQo2GyRGQDFQsHkAzIqxMOcOHPghwMkyUiW/CkQDKw4dg1Wd1dZm7D0o4z13HqGueKI/ljskRkA/Y2kFwcz8yXOgSLDf1yPx5fsFPqMIhMYrJEZAPmSiQX5RXyaVkicjRMloiIHAKTJSKxMFkissJfObcw9/fTuFnEOkskD+dy5FeckshRuEkdAJHS/F1YioR5OwAARy/nSRsM0T9eXnsMz7WLkDoMIofEliUiC325+4L2/1PO/23z/jp/sNXmfRARkXiYLBFZaNHWc4Lu79LfxYLuj4iIhMVkicgCO85clzoEIoNyi8qkDoHIITFZIrLA0C/3Sx0CkUF9Fu2WOgQih8RkiUgG1DKbNZ6UKSOXXbpEYmCyRGSmY1fyRNt3g1c3MmEiQdwqYTkLIqExWSIyk9hzWP11vVDU/ZNzeOO3k1KHQORwmCwRmUkl8uQmPebvQM+Pd+AmB+lSJdkFJThuQavmttM54gVjB/m32TJG8sNkichcdpgI7nTWLSzZIWxpAlK2uHeT8dzne81ev7i8QsRoxHM5txizfklDyzf/xP8OXIZGw25pkg9W8CYy01e7L9rldcruqO3yOiR/R5ykQvy7G09h6Y7z2n+/vPYY/jiRheXD20kYFdF9bFkixfgjLUvS1xd7zNI9vx+X9n2SfDhLKYDKidI9yadzsO5wpgTREFXFZIkUY9LqI1KHYBdZBSUor2DrEtGk1Udw8QYnCCbpMVkikqFV+zOkDoEkdp53RwIApqw5KnUIREyWiOTo9V9OsO6Sk3v+21SpQ5AF3h1KcqC4ZGnRokWIjIyEl5cX4uLisH+/8ekn1qxZg+joaHh5eaF58+bYuHGjwXVfeOEFqFQqfPzxxwJHTWS5T7f+JXUIJKHMvNtSh0BE/1BUsrR69WpMnjwZs2bNwqFDh9CyZUskJiYiJ0d/XZE9e/Zg0KBBGDVqFA4fPow+ffqgT58+SEtLq7Luzz//jL179yI8PFzst0FkliXbbS8hUHqnAnnFvDIn5bqaL9+kMSntGtamXpE6DLIDRSVL8+bNw+jRozFixAg0adIES5YsQfXq1fHll1/qXX/BggXo2bMnpk6dipiYGMyePRtt2rTBp59+qrNeZmYmXnzxRXz//fdwd3e3x1shsovHPtiGVm9tQk5BidShkAXK7qhRXKbMeklCKylXy7aV7YXvDmHKmqPI5u/L4SkmWSorK0NqaioSEhK0y1xcXJCQkICUlBS926SkpOisDwCJiYk666vVagwZMgRTp05F06ZNzYqltLQUBQUFOg8ioRWXVaBCrbG4ovGBi7moP30DPtv2F67l3z2I7z53A+sOZ2LNwctihEoC+x//TjoOXsyVOgSjClh13OEppijljRs3UFFRgdDQUJ3loaGhOH36tN5tsrKy9K6flXW/js17770HNzc3TJgwwexY5syZgzfffNOC6Ims02vBTqRn3wIAnJ7dE17uria36b/k7sXA+0np2mUvrb5/R1FCTChq1vAQOFISElsCdX25+yKeblVb6jDIiSmmZUkMqampWLBgAVasWAGVyvy5LKZPn478/Hzt4/JlXgWSOO4lSgCwYs9FAMDIFQcQ83qS3vWT0q6Z3OdthU6HQcoh9EwlR52kkjnJl2KSpaCgILi6uiI7O1tneXZ2NsLCwvRuExYWZnT9nTt3IicnB3Xr1oWbmxvc3Nxw6dIlTJkyBZGRkQZj8fT0hK+vr86DSGxrU6/gxNV8bHlgotT1x65Co9HgcMZNvPDdIZP7seC6gEhWtp7OwQvfpiJXZuUEWOTD8SkmWfLw8EBsbCySk5O1y9RqNZKTkxEfH693m/j4eJ31AWDTpk3a9YcMGYJjx47hyJEj2kd4eDimTp2KP/74Q7w3Q4ojh4Pz2ZxC9F64q8ryqWuO4clPd+GZz/ZIEBWJIedWqdQh2ESshHzEigNIOpGF/3x9QJwXIDJAMWOWAGDy5MkYNmwY2rZti/bt2+Pjjz9GUVERRowYAQAYOnQoateujTlz5gAAJk6ciM6dO+Ojjz5C7969sWrVKhw8eBBLly4FAAQGBiIwMFDnNdzd3REWFoaoqCj7vjmStRnrjksdglFpmebfZLDh2DX855EGIkZDtii9U4FVB5TdtS90NxwA/FjpFv1DGXnILy6HX/X7dy/fqVAjPfsWYsJ84eLC5lMSlmJalgBgwIAB+PDDDzFz5ky0atUKR44cQVJSknYQd0ZGBq5duz9mo2PHjli5ciWWLl2Kli1b4scff8S6devQrFkzqd6C09CIcbSU0MmrjnPH49sbTrE6uIwVFPPOKn0++EP3Rp5rBbrlBKasOYreC3dh4Zaz9gyLnISiWpYAYPz48Rg/frze57Zt21ZlWf/+/dG/f3+z93/x4kUrI6N7cm6VoPfCXWgU7A3/6u6Y/ngM6gZWlzosm1hyA4ASLNxyFpMSGksdBjkoMX4uN4t0k8iC23d0/v3LkasAgI83n8WQDvUQ6O0pfBDktBTVskTydDb7Fh6euwWrD9yd/HXJtvO4fqsUKef/xu9pWXjhO85xJTcfb+bVN4lHjIblsgq1zr+f+1x/fT0AiH17s/ABGOFgDemkB5MlstnLa48hM+82Xlmrf1zPyWsF+Cvnlt7nSDrnOKs9ObB7F29EQmCyRDY7m236pPvUp7vtEAlZIvXSTalDENzOs9dxQObVnk1Jc6Dxcfay6+yNKssMXbwRWUNxY5ZIXsruqFFYesfkekqf58qxRiw5ptyiMgxZvh8AcGFOL8WOM/vvykPgdax5NBoNSu+oMX/zGalDIQfHZIlscvuBJOjKzWKDgzs1Go1iT2Akf7lFyq5NJIazObfQJDRA6jBEU3/6RrSLrCl5K6mGZSkdHi9fyCa/PzC9Rqf3tmL5rgt61z13vUiw171m71nIHTDHKzKjRVDpNBpNlYTemThD9/eBi47XnUzyw2SJbDLtJ0vGBQh39dX1o+24au+EycG8+dtJqUMQzb2u4X8v34eYmUnI1jMx7aGMm9j9V9WxLo5G33geIrIMkyUHp9FoUP7ALbdSEfr22l1OcKIj8/169H4rZ++FuzBo6V7s/utvAMBvR69WWb/vZ3sw+It9uK7wqUVMef7bg1KHQKR4TJYcWEl5BVq++SdiXk+SxdxmL/5wGBUCVo4uvSOPJJCkl11QgoXJ92tHZeQWI+X839p/p5z7W99mAODwyVJlp64V4Iud52VzAeUoWGfJ8TFZcmDTfz6OgpI7uKPWYMOxqlfWllhbaV6me9Iy8y3ax+msW9h8KtumOIj0eXHlYaPPJ5/OgUaj0U7Do2+6l5LyCny/75LDde8WlVVo3+/jC3bi7Q2n8MM+1iAisgSTJQeWlJal/X9br3xm/JJWZdkTn+yyeD9nslickoR34qrpxP3JT3dh0LK9uHCjCK1nb6ry/LxNZ/Daz2noOHdLlXnIlG79cd0bMRxprkMie2Cy5CTk0kr80SbWQyFppGUWYO/5XHT5cBvyb9+fZywz7zZ+PnwFS3ec1y77ctdFo/tS2kTRf2XfwvlKFdvFjv73ShdqUlLa34nki3WWnAQPGqRPWmY+mtX2kzoMSY3+xvwB0Elp1/DCd4cAAOlv94Snm6tYYQnqfwev4Nu9l+z2epky6cpcf+wanmwZLvrr8PDq+Niy5CTk9GN2htu1lcKarlRndi9RAoAdZ4T7HpfeEbcWVFZBCW4W329Nc5aLJ3sVq2RRSsfHZMlJvP9HutQhaJ2/IUxxytfXpeFybrEg+yKy1OhvDmLMNwdRJsBdmRl2/h5XOMm53VmSQhIfkyWyir4if2YT8ABmt8KKPObKmj2m0TlyOa/Ksj9PZmPd4UzRX1toGx8Y8C0kJijkiJgskVX6L0mROgQAnA+M7Oer3fqn8bklwLQxjpRfONJ7IbqHyRJZxd7dBoYcysiTOgTFuymDgqVKIGbb1bKd502vpBByypXsFQsTRMfHZInIDI58LPx4M8s5SG39MfG6xeyN3XDkiJgskd3xUCovQnQjEckR8zYSCpMlsruZv5yQOgSqRCVqB5PjY0uKLjl9Gryln4TCZInIyfGEYp5fjto2v6KzYO5omdtlFUg+lY2ScnFrbZFtmCwRkWBK71Rg3PeHsGq/fSdqLRS5K7GgpJxJgJmYfBt2KOMmfjt6VScx+r81RzHq64OY/tNxCSMjU5gskSLM2XhK6hAcl4Dntv8duIwNx69hmoMd+MW86t96Oke0fTs7OSW4NwpL0fezPXjxh8MYtGyvdvmGf2pe/azAel3OhMmSExJ7agUxfL7DcW6tlpufBDxI51WaUsORiDmua8SKA6LtWwpySlDk5Fre/UK+hw2UPMnRU+z3ys1iTP/pGM5k3xIrNDIDkyUnZOsVjBDTO5C83CpxzCRHKGIVCOfgcHHJqc6SOd+ht9ZXnZHghe9S8cP+y+gxfwe2prMVUipMlhzMXzmmrz4KS2wb3/GDncejyIGjn9Qq1PZ9fwUl5SgSaJzR8Sv5guzHGGPnOVu+GptPyffkl3wqG6euFVi8nZx+KnKKxZxk6e/C+wVi7x1z0jLv/w1GfHUAvxzJRM+Pd+CiQHNsknmYLDmYdUfE7/e+9Lc8qneTMpWUV6DFG3+i6aw/BElCT2VZfkK3lFhzzx28lCvKfm2VlpmPUV8fxOMLdkodio1klC1Z4O/CUjzy/lZ89GfVCdAnrjqC01m38MraYxJE5ryYLJHF7N0KIQcXHTxBtOcVeGbebe3/C/JVskPszlaJKj3L+vExvBtOvwfHvWk0Gny395LedZftvIArN2/jky1/GdxfURmLydqTm9QBkPI425il22XKGxBPwjLWsMTkQJecur7k5MHvUMzMJJSU6x5L732X1GZ8iGrnOgxLji1LZDEheiQOZdy0fSf/yL8t7uDk3GLHn2hWyee32wou5qfE6umZebeRYaSlVU7fJXslbuYkzA8eNx9MlID78ZrzrTAnoSLhMFlyMOYcfG39jQlxeD96OU+Avdx1RMB96aN2gm7HzaeypQ7BarN+lXb6nA3HsyR9fVt9m3IRuUXmXRBUqDV4eO4WPPrBVhSX3UHOrRL0W7xbZx053Qwho1AUmRjTfYpLlhYtWoTIyEh4eXkhLi4O+/fvN7r+mjVrEB0dDS8vLzRv3hwbN27UPldeXo5XXnkFzZs3R40aNRAeHo6hQ4fi6lVOayC28grh2pCX7jgn2L6c1cs/ijdYdG3qFfxihxsPpHL0cp6g32d7e3fjabzwXapZ61Z+n38XluGjP87g5DXW/xGqdMBfOYVINXPQP1uW7EtRydLq1asxefJkzJo1C4cOHULLli2RmJiInBz9t9/u2bMHgwYNwqhRo3D48GH06dMHffr0QVpaGgCguLgYhw4dwuuvv45Dhw7hp59+Qnp6Op566il7vi2y0e6//hZ1/2LV2HE0t8sqsOec7t8ir7gMU9YcxcRVRxQ99svUeUnp5639F6y7K09fF6icPgqljSf7u6gM/RanIEtPccoHncku1LbiFZbeQVLaNUX/xuROUcnSvHnzMHr0aIwYMQJNmjTBkiVLUL16dXz55Zd611+wYAF69uyJqVOnIiYmBrNnz0abNm3w6aefAgD8/PywadMmPPfcc4iKikKHDh3w6aefIjU1FRkZjltL6NLfrM9Bwhvz7UGknNdNloorHbzLFNz6YuqUa21CrfREXN9dc3JKHNcdsU8vgdBv2dzyLD8duttiO/iLfXjhu0OYsOqwwJGYL/92OdYfu+qwCZtikqWysjKkpqYiISFBu8zFxQUJCQlISUnRu01KSorO+gCQmJhocH0AyM/Ph0qlgr+/v8F1SktLUVBQoPNQkrWHbOsSEeIAL6cDqilKilVKO8/eqLJM33elcuE9JUjPVtbvWwjmfuXT9U3BIaPfi5zu3LXkOFJqZtzf77tbeuDeGNBNJ6Ubezj664MYv/Iw3vxN2jGEYlFMsnTjxg1UVFQgNDRUZ3loaCiysvQPsMzKyrJo/ZKSErzyyisYNGgQfH19DcYyZ84c+Pn5aR8REREWvhvraTQarD6QgRNXxa9abDgG2/cx5/fTtu+EZE9nbLwG+OVIJp77/P7FihKqwf948IrJQcuOnFBHTtuAh17bKOpkwmKyx4BzoVtTzK2cXiyjVpz9F+925Qo516ScKCZZElt5eTmee+45aDQaLF682Oi606dPR35+vvZx+fJlO0UJ/HEiC6+sPY7eC3fZ7TUf9JMDD9Z1ZkLPD3enQo3u87Zr/62BBu89kCTPWJcm6GuKQaMxPfLFmrExPx++gsXb5HdzwpbT2Th+JU9nWXmFBr8eNa9LS27jhOxxM+vi7ab/jmJ8LqezbgmSqKnVGhy4mFtlCqI7Cu46F5pikqWgoCC4uroiO1u3mTE7OxthYWF6twkLCzNr/XuJ0qVLl7Bp0yajrUoA4OnpCV9fX52HvZi680TpYyDkyFk+05m/CNt8fuFGkayufK118tottH17s+D7fWn1UcH3aavz1wsxcsVBfJ1StbL0g11aNwpL9e5Dbq1s9mhZOpdTKPprGDJ7Q9XJdy21Ys9F9F+SgsFf7NNZvib1is37dhSKSZY8PDwQGxuL5ORk7TK1Wo3k5GTEx8fr3SY+Pl5nfQDYtGmTzvr3EqWzZ89i8+bNCAwMFOcNCMTUedvc44KcaqEIIeWc9XfEfb3notHyAw72URm0/cx1Ufev1M/xcEaeyXUOXBCuyKqULJn3ce0h/SdSuf2Z5RaP0Fbus70r+38H7/aOPFiz7q3fzEvECgWaFFvOFJMsAcDkyZOxbNkyfP311zh16hTGjh2LoqIijBgxAgAwdOhQTJ8+Xbv+xIkTkZSUhI8++ginT5/GG2+8gYMHD2L8+PEA7iZKzz77LA4ePIjvv/8eFRUVyMrKQlZWFsrK5DkIVahWDjmcuIScY+61dcet2q6kvAKzfj2BdzeeNnilTMIwVBdmyv/k18JiqX8v32d6JYV78K/33V75jzcD5HGscwSn/5mwWq3WVCliOvEH6e7CsxdFJUsDBgzAhx9+iJkzZ6JVq1Y4cuQIkpKStIO4MzIycO3aNe36HTt2xMqVK7F06VK0bNkSP/74I9atW4dmzZoBADIzM/Hrr7/iypUraNWqFWrVqqV97NmzR5L3aErlKrD6phwwN5mSw/HD0JWpPVU+gSt1AKtQxG5trFBroNLzBZXD94DuMjquRqPB2WzT3U1ya7WWS/FGmYRhtSu5t1FYegddP9qGNrM3IS3z/k1Gyaf11zp0JIqbSHf8+PHalqEHbdu2rcqy/v37o3///nrXj4yMlN0P2xSXSueaQcv2Yve0rlbt5+77lnYwjrH5pSzlJMOKFOXBvEhZvzR60Otmjmlzxr+z0s4jD9J3EVNlHQDt39msHYe4cn8G3n2mud71HJGiWpZI9wSUmXdbukAckMKPd7LHz1feCkvv4FyO7QVr5fZ3tkc8MnvLolCp5FWqwN4U17Lk7ExdAZib1Svhx11Uege/HLmK7k1CTa5rzpWR3u0c9jpIekorPunsuny4Dddv2T5uT36lAwzHo9FocOpqAeoFVkcNT+tPh3JLEIXw4LAElweaVq7m3UbZHTU83JyjzcU53iVVoYQf9+vr0vDqz8cx+Iu9JtdlyiM/D47fl9tJlHQJkSjJkbFv3Y4z19Fr4U70WrjTbvGI7W+BblTZfEq37M6DF6Tb0q+j/+eGZ8OoTN+gcKVhsqQwphpQzD0dFZfdkbyf/aCJ2bX/OHG30voZMwaVCnGXoFwGgkrlZnG5oIPcH5x6ofyOBnnFyj5gkhlk9jMydpzbePzuMcaSkgly9/HmszZtr9FosOHYtSqfib5D7NEHSg0Y8sJ3qWgzexMOXLRuwmY5YDecwgjVbdTqrU0Y0DYC7z3bQpD9WWPveXn9cDp/sA1DH66DwE66y3NumZ4B3FFsPH4NfdvUEWRfX+6+oPPvhHnbFT2ZLplHZrmSXeLJKpDPMcKacUWVi2rWn75R+//VKq3jYuYVqb7c9M9/LpyW77yAdpEBFscnB2xZcjC3y8w/Ga0+aL9pWsRmTuuTPg/+/lfqmaus32LzmpodgZC1rx7ERMk5yK2BViOTr529PhdrurvN+W2a23rvqL9ztiwpjKkv7Klr+ehon1DIAcnsPFfFwuSzSM++hRAfT7So4wcvN1epQyKZc/budaFk5smn9UwKTJYcTEbubYuSpT1/3UDHRkGixUMkhLTMfCzefg4bjl0zvbJE1GoNXFzMu/zOkVG3jdDkNpD/3PVCtK0hfdeP3T4XkV5m1i8nAA+vKstvKnzgtrnYDacwpg7F1y28E+JfXzj+NA1kAYnOcxduGK7vc6OwFE98skvWiRIANH/jD7PnKMy7XS5yNNKRW0NOREB1qUOwK3t//PsuyGvsqViYLCkM5y8jUUlUgyHx4x0Gn7umkOb/orIKPP/tQanDkJzMciXZJG8HL9pnsmV73+X8wnepVZb95+uD+O3oVbyz4aTDTCPFbjiFWbbzgumVFMSZipopgVSJSdkdw4NC3VxZRYusZ+6Ypcy826jtX830ilaa9at508U4gs2nsrV1miqfswpKlNuiyrMUSWrlvktSh0CVzN98RuoQqnBnsqQoV3LlVbOo49wtZq03+mvHaBXUANhyOhu/Hr0qdShV7Fdwlx2TJcKZ7FuSvfaVm9LOb2fq7kLOv2cecT8n5SRL1k6740gGLDVdcd/elu44Z3Kdk9cK7BCJfYxccRATfjiMrHxldGErAZMlhTsrQKIzY12aAJFYRybDCQx62MyrUmcn5udk5g1msiB1VXzS792Np6UOwW4qfwV3/XVDukD0UPKvg8mSwh3PzLd9H1ds34e1jJ1beJVOAPCDnkKhRJYSas40c92pUEsy397NSlMKNQ71tvvrG1Oh1pg9RYrcMFlSOCEKLt8ur0D/JXts35EV5FaThSwndmuKkm5qYIIvX5NWH7Hr6/VdvAft3tmMk1ft27238+z91iRzpyixp6cX7daW2NhyOhu/H5d3SZB7mCwpnFAnqgMXb2LFbuWclIQi1Fx7jqSw9I5F68/93Xm6OEi5KicRYnjwWHzsnxb7Xgt3Oszt80LZlp6DOxVqjFxxEGO/P6SIwpZMlhROyFL+b/x2UrB9meur3Rft/pqVsWWrqmaz/sARM5vKNRoNPt9xXtyAiBTu8+38jVSm1mjwd6UEydILNCkwWVI4Eec9JSfWZ9Fus9YTsgduzzl5DUYlsoSx38LZHGnuOJbr/QYaDZCeJd1d2NZgsqRwQv8Y5JThyykW0q/kjnDdCz8evCLYvojkxFVJt3TawRe7LmDol/u1//ZUQGFi+UdIRgndjfTsYvsP9Obt1spUXqFGk5l/SB2GrMhwPC1V8uDdaUIWbqx8FDt3XbfV5GaxNJWrOcxAOEyWSMdpCZpGfzqUKch+8gWenPT8jUJB9+dofk/LEnR/9w7recXyH+xpSJ5EJ0Uyz7iVh+zyOk98otuNvePMdbu87oOUch2qhOEkViVLeXl5+OKLLzB9+nTk5t4tX37o0CFkZgpz0iPzvfZzGhZsPivoPtV2/ub+dkyYq7s/Twh78u69cJeg+3M06wWeTuHnw5lYsv0c4isVuFTiXUT2rudD5hNzug22kFtPCS1gFidLx44dQ+PGjfHee+/hww8/RF5eHgDgp59+wvTp04WOj8wg9Hxeb623711xQuVm8v+5OZY/T2YLvs8HyxAs3vaX4K8hthuFym0Zs9WynfK/60us6Z3G26nVyhE5ZMvS5MmTMXz4cJw9exZeXl7a5b169cKOHTsEDY7MlyZAJe97Vuy5KNi+zCHUFdlfOew2czRym67BHM48bmneJvlNxPygQxk3DT53q8T6btTtItdxcmT27s2whsXJ0oEDB/D8889XWV67dm1kZQnbDUK6yu6oDT73xCfCdhndLrNf90dWfgm2ns6xOWlaIXHNJmdir4MbezZIaG/+arjlfOqaY3aMRHxK+fko4XducbLk6emJgoKq5dvPnDmD4OBgQYIi/eb8fsro80L2mbd9e5Ng+zLlbE4hRqw4gI3HbUu2yyoMJ5MkLKHGmZkiZNFVIlOSBB73KCdyHlPlkGOWnnrqKbz11lsoL7/bXKlSqZCRkYFXXnkF/fr1EzxAus9UtevkUzmCvVaRHVuW7tnNooSKceFGkV1eJz1beV2rSiu2R47rXoK059wNtHzzT/xyRJ43YSmgF87yZOmjjz5CYWEhQkJCcPv2bXTu3BmNGjWCj48P3nnnHTFiJDP9IfBV0XY73+4qxFAPocsHkH5rWEDSoAmrDptcR8YX+eRANv1zE8a/lu1DQckdTFx1RNqADFBCC7KbpRv4+flh06ZN2LVrF44dO4bCwkK0adMGCQkJYsRHFliTegVu7sLtb+vpHHRurKyu1ROZ+ejYKEjqMBxCSXkFvNxd9T6XmXfbztEohwKO++QkPtt2Di/3jJY6DJPk3EV4j8XJ0j2dOnVCp06dhIyFZMbeJfqFuIvodnkFDmfcRL3AGgio4WH7Dp1Y/yUp+O3Fu7/xtMx8HMq4iX/H1YMLp26wmRLGaBDZS05BKRqF+EgdhlFmJUsLFy40e4cTJkywOhgyTIrM+9S1qgP5xbTu8FXMfKIpPGyYJ+jz7eex/2IuPN1ckP724wJG53yOVypHce9uS18vd/RpXVuqkIjIRklpWejZLEzqMHT864t92DG1C+oGVpc6FIPMSpbmz5+v8+/r16+juLgY/v7+AO5W9K5evTpCQkKYLIlk73nxKs8asufc37hysxh1atrnC1xYegcz1h3H+8+2tHof+y/e/ZxKjZRZIOtNWn1EdgdaOZryv6P4V1wEYusFSB0KWSivuAz+1R2nVXrLad3isS98l4qLc3tLFI1hj36wFf99rCGmJkZBJcNiZWZdwl+4cEH7eOedd9CqVSucOnUKubm5yM3NxalTp9CmTRvMnj1b7HixaNEiREZGwsvLC3Fxcdi/f7/R9desWYPo6Gh4eXmhefPm2Lhxo87zGo0GM2fORK1atVCtWjUkJCTg7Flhpw+x1a2ScrzwXaokr/3D/gw88v4W0ysK5H8HryBy2gZBpoyInLYBhzNuQqPR4FZJOcfZWGHf+b+rtGr2WrhTomiUY+2hK+i3OMXg8woYouG0Wr21CYWld4yuU6HW4LnPDf995WTkioNGn5fTeKHPtp1D/ekbcf56Ibaczjb5d7Ani8csvf766/jxxx8RFRWlXRYVFYX58+fj2WefxeDBgwUNsLLVq1dj8uTJWLJkCeLi4vDxxx8jMTER6enpCAkJqbL+nj17MGjQIMyZMwdPPPEEVq5ciT59+uDQoUNo1qwZAOD999/HwoUL8fXXX6N+/fp4/fXXkZiYiJMnT+pUKJfCaz8fx/f7MiSNYdHWcwAg6MBxc8S+vVmQ/Tzz2R5B9uOsBizdW2XZ+ev2KRvgCCKnbZA6BLJCs1l/VFlWN6A6MnKL4e3pJquTuDXufS8fCvHGWRnOfND1o+16l6e9mQhvT6uHWtvE4sEh165dw507Vb8oFRUVyM4Wfq6oyubNm4fRo0djxIgRaNKkCZYsWYLq1avjyy+/1Lv+ggUL0LNnT0ydOhUxMTGYPXs22rRpg08//RTA3Yz6448/xowZM/D000+jRYsW+Oabb3D16lWsW7fOYBylpaUoKCjQeYhB6kSJiIjuysgtBgDFJ0qVyTFRMkbKOlEWJ0vdunXD888/j0OH7k8amJqairFjx4paPqCsrAypqak6r+Hi4oKEhASkpOhvDk1JSakSU2Jionb9CxcuICsrS2cdPz8/xMXFGdwnAMyZMwd+fn7aR0REhC1vjYiIiEyQspSNxcnSl19+ibCwMLRt2xaenp7w9PRE+/btERoaii+++EKMGAEAN27cQEVFBUJDQ3WWh4aGGpyTLisry+j69/5ryT4BYPr06cjPz9c+Ll++bPH7MceyoW1F2S8REZHS2OtmI30s7vwLDg7Gxo0bcebMGZw+fRoAEB0djcaNGwsenFzdSxLF1r1JKC7O7Y2s/BJ0mJMs+us5sl2vdIF/dQ9k5d9GgyBvuLiokHOrBN0+3I5bDtSsLrQJXRvhmTZ10OXDbVKHoki7XumC/NvlUEEFDzcXZBeU4OFGQUi9lGt0ADhJa8HAVni6lfESGUoej7b+xU5oVtsPAPDRn+n4ZMtfEkdUVXyDQKwY2Q6bT+bA28tN8gLJVo+Uaty4sV0TpKCgILi6ulYZF5WdnY2wMP23MoeFhRld/95/s7OzUatWLZ11WrVqJWD0tgnz80Lam4l6Bx2Kbf9r3fDrkauY++cJu71m39a1Mb1XDNq9Y9sg73Pv9qpSWLNy4bMQHy+kvt4djWf8btPrOLJJCY11ilD6ernh8MweaPjqRiNbEQCkzkhAoLcn6tS8v6xRiDcAIDrMV6KoyBymEiWlu5coAUCv5rVklyxVLm3Qu0UtI2vaj8XJ0siRI40+b2iwta08PDwQGxuL5ORk9OnTBwCgVquRnJyM8ePH690mPj4eycnJmDRpknbZpk2bEB8fDwCoX78+wsLCkJycrE2OCgoKsG/fPowdO1aU92Etqe4ACPHxsuuB/b+PNbSpPP97/ZrjlbXHAZhXgdyWApjO4MFq3S4uKrtXdleijRMeQaC34dZnQ9PIkPR+n/iI1CEIatnQthj9zf3yAcPi6+k8H1NLHon74de7o6aMZ12w+Ax88+ZNnX+Xl5cjLS0NeXl56Nq1q2CB6TN58mQMGzYMbdu2Rfv27fHxxx+jqKgII0aMAAAMHToUtWvXxpw5cwAAEydOROfOnfHRRx+hd+/eWLVqFQ4ePIilS5cCAFQqFSZNmoS3334bDz30kLZ0QHh4uDYhI6B+cA3RX+O/jzXEpITGNicv/drUwbnrRejQgMUAxSCjkiyy1iTc+AmICad8mZs8zO3bHNN+Oi5yNLZrXqkVCQBmPtlUokiMk3OiBFiRLP38889VlqnVaowdOxYNGzYUJChDBgwYgOvXr2PmzJnIyspCq1atkJSUpB2gnZGRAReX+yfbjh07YuXKlZgxYwZeffVVPPTQQ1i3bp22xhIAvPzyyygqKsKYMWOQl5eHTp06ISkpSfIaS3Kw6aVHAQC1/auhQZC4A+um9IgS5ATi5uqCV3vFCBARfTa4TZVl9i6s26K2Hw5dvGXfFyWnFRVq/vxk8Q0DRYxEOA8eVpmoW0eQPggXFxdMnjy5yrQoYhg/fjwuXbqE0tJS7Nu3D3Fxcdrntm3bhhUrVuis379/f6Snp6O0tBRpaWno1auXzvMqlQpvvfUWsrKyUFJSgs2bN8t2sPr0x413T00z8bylHqp04Fg0OFbQfVc2f0BL/oBlqFfz+2MFOjUKAgAMal/XrjFM7CbP3yI5ppha5idL/tXk3RJyj0qlwpMtwwEA/+lUX+JolEuwgTDnzp3TW6yShGOqmXL0Iw3w4eaTgrxW+/q63VhuIiYz1T30fw1r+XnhWn6JaK9LhkUEVNP59+dDYnHw0k10tPPVtAyniCIH5u5qfvuBj5c040gt5V/dHQsHtsIHz7bgWDkbWPzXnjx5ss6/NRoNrl27hg0bNmDYsGGCBUaWEzKfWTbEfjWeDI2DcbHTmfKTQa3x4g+H7fJaSqGC7mdfw1P6W3eVwt4JJQnHkjGTSkjko0J9tAmgnBOlBQNbSR2CSRYnS4cP655UXFxcEBwcjI8++sjknXJkG1O/TSFnavatpvvVEPPAYGgiR3tN8Phky3B8ufsCDmfk2eX1lMDU33tguwisOiB8MdbZfZqhb2wYvihcIvi+7UUJJ1HSz5IWdCGPt2L5akQ7qUMwixJKNVicLG3dulWMOMgM9mppGRxXt8qBwMVkqmY9Odxg5WFB87szMPXXntIjSpRkqba/l87VvfxPR1X1bV1H6hDISg+WylC6cP9qpleS2M//7Sh1CGax+AzRtWtX5OXlVVleUFAgeukAZ2csV1o4qLVgrzOwXdVBvOK2LIm3b3P1b1t1fr94lh8wKNhHnAr2XaJCRNmvPfVtI/+rZNLPXhekdF/rujVNryQDFidL27ZtQ1lZWZXlJSUl2LlzpyBBkX6GfsdHZnbHU//c7WCrWn5eaF7Hr8pyMQ8iGgNtS/bMoarp6c/v28Z5WwjM6WIYoCfBFPx1FXjuUkL3jDN7t29zg8/Z0rC0uFKpjSAjBUnpvvFdGkkdgtnM7oY7duyY9v9PnjypM9FsRUUFkpKSULs2r6jE9OCgW+Duj9u/unC3sHaN1n9lX1R2R8B7J3UZalmSusWpd/NaeOmHNGmDkIg55wyhc4KdL3cRdodED3j/2RZ4JsZwi7Etia5rpRp//+5QFx/+fs7qfTkLpdxRCFhw+mvVqhVUKhVUKpXe7rZq1arhk08+ETQ40qXWkz28lCBsHZqXuuvfX6C3JyDSXfz63pe96TtGqlQqrB0bzwlPDbDkNmtzRARULXyq7wKByFrPtY0AiooMPm/Lt63yDSnPtqnDZMnBmJ0sXbhwARqNBg0aNMD+/fsRHHz/NmIPDw+EhITA1VW+tyY6An19u+O7CteMWS+wusHmY18vd9GSJUMMdc+JwdBBMraek45bMuOsMaBdBL7de0n8WIjsxcJsqXVdf7130QZ6e2JwXF18vy9DmLgcVL1A8afSEorZyVK9encn31Or1aIFQ8bVD6r6xRJyfMSyofarrVSZoZYlezY4RctkMkklaVa76tg2oXH4j7IMalcX3+65InUYVrN0bOYjjYIMlhx555nmaBtZEy+tPipAZI4psWmo1CGYzaxk6ddff8Xjjz8Od3d3/Prrr0bXfeqppwQJjKzT3MQEnsY0tmBeJCElNg3Tu9zSXGlwnPVTcdQPqoGf/tsRNb1d8CvOW70fR8EchazRuXGQbJMlcy4Gha4c8EzrOujZtBbm/H4K36SwFbYyv2ruiroZwqxkqU+fPsjKykJISAj69OljcD2VSoWKigqhYiMrjOncEGdwxOLt1o6VrtaFoelOLPVyT9vmxmtTtybKNeVAniDhKJqSDmIkH3IeY/ZXTiG6NzHekmFp/JUv6BqFeEPf7SDVPFwRUVPciciVZHafZpj3ZzqWStSTYS2zzlKVu97YDSdv3aJDcCbfsm0GtI1AbD351bqwtBvOr5q7OIEQkeLpazV6tHEQ/rhYqP23LdcI4f7VDF5oVfPgeN57hnSoh3/rKXwsdyxb7GCs+QIaqzsiLenvknNW5n6LjszsjrVj40WLo2WE+OOiyDnoG48U5uel829Lj5/mXtA9G+u8Ndv0UVqiBJjZsrRw4UKzdzhhwgSrgyFpuMq0xH/LOv5IPp0jdRhkhH91D9QRsYvB080V1dxdcbuc3ftK0DLCX+oQDNJ3fn4w2bH0UGhu2RMvd1dEBFTD5dzblr0AyYZZydL8+fPN2plKpWKypDAzesdIHYJB7z/bAp/vOI+lOzjg2t4sufATu7CcXJN5qsqvujua1PLFyWsFUodilgcn67Z0zJLaQRq/kyY9gq2nryO3qBTLdl6QOhxZMusod+ECPzxHVctPvhMtBnp74tVeMUyWJGDJSaO6hxt++m9H9P1sjyixPHhCI3mT69hBfTeSPJjsWNo75CjfzegwX0SH+aKkvILJkgE2jVnSaDQO82VxVrxoJyG0EXEyTB5hlEWuw1E6NgyssqzuA1Xj20Za9j22ZPYBOd8peI+Xuyt6GijlsmJEOztHIy9WJUvLly9Hs2bN4OXlBS8vLzRr1gxffPGF0LGRHShxoB2JT05fC16PkRD0tXgN6xiJPq3uT0IeHWZZnTpH6YarzNDMCUHels1BWs3dFXVqyrfnwlIWJ0szZ87ExIkT8eSTT2LNmjVYs2YNnnzySbz00kuYOXOmGDGSiBylZWnBwFai7dtXQZM9EpF+NWtUPdl7ubvitd5NtP+29HBoyXg9OV2AGCPExUmPJqHY+n+PIXlKZ/RtU9v2HcqAxWeBxYsXY9myZRg0aJB22VNPPYUWLVrgxRdfxFtvvSVogCQuHy9pxxdMSnhIkP10ixGvbL6Lo2SUMtZAz1Q+99hzjkCynVKSgnsCaniglp8XVLB8vNV/HmmAQxl5eKJFLXGCk4AQv7bKBSfnPdcKPx3KFGCv0rK4Zam8vBxt21atvBkbG4s7d+4IEhTZT4cG0k4U2y3adJIzqH2EHSIxzNL5oshyPxqpIM9uOBKTq4sKO1/ugh0vd7H4wsjb0w3fjGyP59qaPkYp5Shi6TjklaPjzF63rQyLH5vL4mRpyJAhWLx4cZXlS5cuxeDBgwUJisxja6Gz9pEBko9ZMmeA5JtPNbNDJIaxYUkYPp6GG7ID9HSR3FN6h7MGkLjcXF3g5soazYDlFycNg73NXtdXpndKmsOqwRjLly/Hn3/+iQ4dOgAA9u3bh4yMDAwdOhSTJ0/Wrjdv3jxhoiS9YmpZP2kuII/uDXMi8HCT9iAmdUIpBTHe8+AO9bBk+znB90vyooS7vqSglOOIpWeFUF8v0yv9I0LBA74tTpbS0tLQpk0bAMC5c3cPfEFBQQgKCkJa2v1pBJXyxXBmcuhesuTWW6k4Y8uSGG95ZKdIJkskCU43Yj4xygF9/584/HIkE1MSowTft71YnCxt3bpVjDhIAnJIlpRQp0sOn5O9ifGWfb3cUdu/GjLzOOUD2VcNGUxka6+jSKsIfxy5nGf19pa05O+d3s2s7R9uFISHGwVZHZMcsJNWwWxNNFxk8NdXQp2S7k3Eu9OOSC5+Gfcwlvy7jdF11hoZiH+PHK8tnKmnw9ZDqrmzOvy7Q90qExE7MotPlyUlJfjggw/Qq1cvtG3bFm3atNF5kP3YOmfWIw8FCxSJ9SIDDd8yLhfTH5fv/HliEeLcsnpMB9t3QnbTMsIfnu7GW2DqBYo3abKY/tulodQh2K1pydaL6F7N75ZB0FdQ0rNSq5Gztbhb3A03atQo/Pnnn3j22WfRvn17p8rY5cbWZGlUp/oCRWK9YB9PqUMwqZoMmvCVKNDCir8kncndG5u1nlKP9iE+ztMC4mbjeaF9/QBsntwZ4f5eiJ32i85zB2YkoMUbfxrdXqnfEVMsTpbWr1+PjRs34uGHHxYjHrKArYmquwPdKuuMg7Dlz7w/Sq/m+ueiIvtoXdcfE7oJUxwWcK4uLzl66+lmeP7bVDzfuYHV+2gUor8cgG+lIsbVKrVChvh4IudWqdWvpwQWny1r164NHx8fMWIhC/VqZv1JprcDVZwF9M8oTtYT4vZvfXNJjXg4Uuffz7SujUX/Yve9lIK8zW/dfTAROj27p9DhOCx7pZANgmtg97SuGBofafO+xj5WtfvyjSeboGWEv85zq5ygy93iZOmjjz7CK6+8gkuXLokRD5mwdEgsfL3c8PmQWARacJB70Ef9WwoYFVFV/tU9MH+A7vdsVKf6eLFrI+2/5/RtbrIlYnjHSDHCE8WTLcNNryQzOuU7jAx3qf5Ad3RCTKgiWqfZ0GW9yr9Vn38KSg5/uD5+Gfcw/KvfvxhqYEFhSqWy+Jvetm1blJSUoEGDBvDx8UFAQIDOQyy5ubkYPHgwfH194e/vj1GjRqGwsNDoNiUlJRg3bhwCAwPh7e2Nfv36ITs7W/v80aNHMWjQIERERKBatWqIiYnBggULRHsPQujRNAxHZ/VAYlPbui68TAzkJOc2VaB6KG3r6R4TVCoVmtX2s2gfr/ZSzgD7BQNaSR2CxYxVT69sRu8mcK2UeXSLCdHbWsLcRFpCFgWtfCFj7rgvR01OLe67GDRoEDIzM/Huu+8iNDTUbv3TgwcPxrVr17Bp0yaUl5djxIgRGDNmDFauXGlwm5deegkbNmzAmjVr4Ofnh/Hjx6Nv377YvXs3ACA1NRUhISH47rvvEBERgT179mDMmDFwdXXF+PHj7fK+rMExASSmY2/00BmbIDWpK7hbQomTLkeHmTeswtVFt9zIY1HBcHFR4Y9Jj2A9zosUHVmKpwdxWJws7dmzBykpKWjZ0n7dOKdOnUJSUhIOHDigncT3k08+Qa9evfDhhx8iPLxq03d+fj6WL1+OlStXomvXrgCAr776CjExMdi7dy86dOiAkSNH6mzToEEDpKSk4KeffjKaLJWWlqK09P5gtoKCAiHeJpEs2DNR4oFdeqGVWgyMTYH0YGv0vd67ugE1gLxKy4UMzoE4+ufSILgGzl8vQkKMY9als/iSLTo6Grdv27cCb0pKCvz9/bWJEgAkJCTAxcUF+/bt07tNamoqysvLkZCQoF0WHR2NunXrIiUlxeBr5efnm+xOnDNnDvz8/LSPiAjTM04T0V1NbJzTkITVNSbE6POv9opG9yah6NW8llmt2kqoyi8JB/9YVo3pgLeeboo5fZtLHYooLE6W5s6diylTpmDbtm34+++/UVBQoPMQQ1ZWFkJCdH/Qbm5uCAgIQFZWlsFtPDw84O/vr7M8NDTU4DZ79uzB6tWrMWbMGKPxTJ8+Hfn5+drH5cuXzX8zZJVxcigqR4KICKiOwXF1MfqR+vB049g5qZlKgMY82hDLhrY1ezD3Ew52p61QQnztU1NOqtbaEB8vDI2PhI+MuvCFZHGy1LNnT6SkpKBbt24ICQlBzZo1UbNmTfj7+6NmzZoW7WvatGlQqVRGH6dPn7Y0RKukpaXh6aefxqxZs9CjRw+j63p6esLX11fnQeJ6vjOTJUfyzjPN8VrvJlKHQRaqXFvHUNHR/rFsadfng2ftM3RFyAHedJ+gE+keP37con1NmTIFw4cPN7pOgwYNEBYWhpycHJ3ld+7cQW5uLsLC9N8VFhYWhrKyMuTl5em0LmVnZ1fZ5uTJk+jWrRvGjBmDGTNmWPQeiMgwH6/7hxhbK85LqWFwDZy7XiR1GKIz1YPm6qLC8Td6QK2BwVZBJQ5yt4eIAPtMFcNxgOKwOFnq3Lmzzr9v3bqFH374AV988QVSU1MtuossODgYwcGm5yeLj49HXl4eUlNTERsbCwDYsmUL1Go14uLi9G4TGxsLd3d3JCcno1+/fgCA9PR0ZGRkID4+XrveiRMn0LVrVwwbNgzvvPOO2bGTffH3r0z+1T3+6cJR2VSTZ0bvGLy94VSV5a/2isa7G8VvfX4oxMcpkiVzOGo3C5ExVh+9duzYgWHDhqFWrVr48MMP0bVrV+zdu1fI2LRiYmLQs2dPjB49Gvv378fu3bsxfvx4DBw4UHsnXGZmJqKjo7F//34AgJ+fH0aNGoXJkydj69atSE1NxYgRIxAfH48OHe5WG01LS0OXLl3Qo0cPTJ48GVlZWcjKysL169dFeR9EzmLec/e7HLo3CcVjUcYHEZvSoUGg3uVjHmX3LFFlvLAUh0UtS1lZWVixYgWWL1+OgoICPPfccygtLcW6devQpIm44w++//57jB8/Ht26dYOLiwv69euHhQsXap8vLy9Heno6iouLtcvmz5+vXbe0tBSJiYn47LPPtM//+OOPuH79Or777jt899132uX16tXDxYsXRX0/RI6mU6Mg7PrrBgCgb5s6or5W9yahghXOJMfmbMkD6/CJw+xk6cknn8SOHTvQu3dvfPzxx+jZsydcXV2xZMkSMePTCggIMFqAMjIyssotq15eXli0aBEWLVqkd5s33ngDb7zxhpBhkgW8PYWZz83conokLjHHJNXyu18L6PTsnqxALxIlFQA1l4PfsV8FUyVxmH22+v333zFhwgSMHTsWDz0k3AzV5LyWD2treiUzdDNRJ4bsQ8yTUqC3J9aO7YjqHq6SJErOcrHesWEQujcJRWRgdSSfzjHY/UnkbMxOlnbt2oXly5cjNjYWMTExGDJkCAYOHChmbCSSqYmNpQ4BAODtJUzLEgnn0camb7iQSmw9y0qTCMmcZKlNXX/R4xCbq4sKy4bevYh5tVeMQ3TpKP8dWMYB/mSyZHaba4cOHbBs2TJcu3YNzz//PFatWoXw8HCo1Wps2rQJt27dEjNOEtCAdnWlDsFixg7aDzcMsmMkjm3hwFZWb/vYP4mWj0Ddq0oSFeqDL4e3kzoMQTlCouSM+HcTh8Ud1DVq1MDIkSOxa9cuHD9+HFOmTMHcuXMREhKCp556SowYiQxaPaYDOjZisiQU/+rmzUCvz9D4evhscBtsntLZ9MoKY6rQ39Otw2367Eg8zjZmicRh02i+qKgovP/++7hy5Qp++OEHoWIikcnlwkOIKaTiOKZCNtxcXdCreS2E+nqZXpmISEEEufXB1dUVffr0wa+//irE7oiIZGNUp0ipQyAbyOTakBTO8e4TJZM4dxCR+To1CoJGhM6cyED7TH/h7FrXle7GAHIcTJZIEaqzrg7JmDUXIOsnPCJCJPSgzwa3kToEcgBMlpyQEscscXJOcjRCFWUl4ziGjoTAZImIyAQvN7ZsEjkzJkskmSbhvjZt37tFLYEiITJMA+DlntGi7HtSAmdDIFICJkskGVvnEvt0UGuBIiEyLszPCwkiTKszKUEe1fSJyDgmS05ILmOWbMVKtcKa27e51CHI2pJ/x0odAhFJhMkSEQEAOkfJd144OXBz5eGSyFnx1++EWGeJ9OH3gohIPyZLREREREYwWXJCHOpDRERkPiZLRERGmFM8lRcgRI6NJWSdEI/rRMKypBo9kRja1PVHg2BvqcNwWEyWSBLfjmovdQhERA7jp/8+LHUIDo3dcE5IDvWJokJ9pA6BHES9wOpSh0BEDo7JEhEp2prn4+32WpO766+4LYPrDyISEZMlJySL47osgiBHEOTtabfXUhsYnMQxS0SOjckSKca/4iIke+3PBreR7LXJOLFbdTS4nwkxKSJyTkyWnJBSuwxaRvhL9to1PB3/XoiaNdylDkGWKlc279umtv51FPqbIiLzMFkixXiiebjUITisjg0D4enmKnUYslS5ZaleYA0ce6MHzr/bS8KIiMjeHP9ymaqQw91w1nBxuR/3h8+1tOtraxy8/8WvmnJblez9ffb1Uu5nRUTWYcsSScLWSVt7N68lUCRE1gny9pA6BDIh2Md+g//JsTFZIosFVLf9JOFbjY2apGzfjIyTOgQyYfWYDlKHQA6CyRJJguNjSOmahPtKHQKZ4O7KUxwJg98kIiJySAodnkkyxGSJiIgcklJvZiH5UUyylJubi8GDB8PX1xf+/v4YNWoUCgsLjW5TUlKCcePGITAwEN7e3ujXrx+ys7P1rvv333+jTp06UKlUyMvLE+EdEBGRPTFVIqEoJlkaPHgwTpw4gU2bNmH9+vXYsWMHxowZY3Sbl156Cb/99hvWrFmD7du34+rVq+jbt6/edUeNGoUWLVqIETqR7PEC3DCxq0bwsyeSP0UkS6dOnUJSUhK++OILxMXFoVOnTvjkk0+watUqXL16Ve82+fn5WL58OebNm4euXbsiNjYWX331Ffbs2YO9e/fqrLt48WLk5eXh//7v/8yKp7S0FAUFBToPInJetuQ7Cwe2FiwOIhKHIpKllJQU+Pv7o23bttplCQkJcHFxwb59+/Ruk5qaivLyciQkJGiXRUdHo27dukhJSdEuO3nyJN566y188803cHEx7+OYM2cO/Pz8tI+ICOnmLCMicZnT8mNL41MtPy8btiZj2GpHQlFEspSVlYWQkBCdZW5ubggICEBWVpbBbTw8PODv76+zPDQ0VLtNaWkpBg0ahA8++AB169Y1O57p06cjPz9f+7h8+bJlb0jhePxxPK0jakodgmyJ3Q3XNjJA3BdwYrYWvyW6R9Jkadq0aVCpVEYfp0+fFu31p0+fjpiYGPz73/+2aDtPT0/4+vrqPIiUbPjDkVKHoGi2npKFKPRKVbFliYQiaRnlKVOmYPjw4UbXadCgAcLCwpCTk6Oz/M6dO8jNzUVYWJje7cLCwlBWVoa8vDyd1qXs7GztNlu2bMHx48fx448/Arg//1dQUBBee+01vPnmm1a+MyJlYfE+afGcLg5+riQUSZOl4OBgBAcHm1wvPj4eeXl5SE1NRWxsLIC7iY5arUZcnP4pB2JjY+Hu7o7k5GT069cPAJCeno6MjAzEx8cDANauXYvbt29rtzlw4ABGjhyJnTt3omHDhra+PSIiIq2eTcOQdEL/0BFrubuq8OhDps+jZBtFTNAVExODnj17YvTo0ViyZAnKy8sxfvx4DBw4EOHh4QCAzMxMdOvWDd988w3at28PPz8/jBo1CpMnT0ZAQAB8fX3x4osvIj4+Hh063J0v6MGE6MaNG9rXe3CsEzk3kYetkMK1qccxX2Ta4n+3Qf3pGwXd54k3e8LdlW1oYlNEsgQA33//PcaPH49u3brBxcUF/fr1w8KFC7XPl5eXIz09HcXFxdpl8+fP165bWlqKxMREfPbZZ1KET0QOaOfLXXDx7yK04yBteZJZDiF0RfEl/46Fhxu70O1BMclSQEAAVq5cafD5yMhI7Zije7y8vLBo0SIsWrTIrNd47LHHquyDCJDdMZdkIiKgOiICqksdBhng6HfDPdo4SOoQnAZTUrK713rFSB0Ckdl4+aRccrwb7v1+nClCiZgskcVsPQD1b1tHmECIHIQcT+pEdB+TJbKYM/ZUOuFbpn8wj1EuWf7tZBkUmcJkiYjICHskyjU8FTN8lGTE0cdkyQmTJbIYuwwci5sL/6BS+3hgK6lDcEhC330mhOgwH6lDICswWSJyckmTHpE6BKcXHcYpk5xFizr+gu1Lhrmgw2KyRBYb3jHSpu3ZdCwvjUJ4pUvCkktjpUzCqGLz5M6IDLSt5ERi01B4ubsKFBGZwmSJLDayU32pQyAiGft94qNShwBAvi0vjUK8sW1qF4ywYQLr13o1ES4gMonJElnM1nEA7m4yPYIR6VHdg1fvloqSybgcOY5ZqmzWk03xUkJjqcMgMzBZIrur7sE7f0g52tTlvG9K5VfNXeoQTOrbprbUIZAZmCwRERkh99YJ0q9jw0CpQxAVv5b2xWSJyAyuPDKRnQRU95A6BLIj/+ryb/0iJktEZnH0q1SSDx8vdlM7Ex8vd7z5VFOpwyATmCwRmcHN1TF/KkHebMUgktqwjpHwcNBjjKPgX4fIib3Y9SGpQyAShdJ6zhObhUkdAhnBZImIiBQtrn5AlWVKK35rabRKSwaVjskSkRPjAZccwaLBbTA1MQq+lcZ72TrTgL1Z+lv0cOPp2574aRMRkaIFeXtiXJdGCPX10i5LaBIqYUTierJlOEJ8vEyvSIJhskTkxNpFVu2+cBYNgmtIHYJebq5s7nNG5v7V+8fWwSeDWosaC1XFZInIicXUct7Z7t3kMtvrA97v10LqEBRlQtdGUocgCHOLn3aLCRE5EtKHyRLZ1ZoX4qUOwWqD2teVOgRByTVZEEvv5rWkDsEsTcL9pA5BUSZ0u39H5zP/TB3SNFx5FwHO9WtUHlY/I7tqXlu5J4I3nmqCH/ZnSB0GWcn1geSwS3QIzmQXShQNCaVyi8yYRxqgWbgfWtX1ly4gkWk0UkfgnNiyRGQmTzfOPu8ohneMxEsJjXFwRoLUoZCNKqfAbq4ueLRxMHy9FDiFCJuWZI3JEhE5hcrjs954qim83F0R5O2Jb0e1lzAqskVCTChcHKQ72dy6UGxYkgaTJbLKEy2UMf6DCABWj+mAge0iAFSduPSRh4LhyZo1ihQZWF3qEARTeXx3hwbOe5eqXPEI4SRm9xF2osZXekZbtR2LIJIYZvSOMfp8XINA1KzhgaMze2Dv9G52ikq+Nr30qNQhCMKRjieV38qKEe3Rp1W43vU4ZkkaTJacRMs6/oLuLyLAuis6/tBJSn7V3eHlXnXsWb/YOhJEI506Vv5+STxhfveLTHq5u2L+gFbSBUNV8G44slpADQ/kFpVJHQaRzWY+0QQdGwZi/MrDUodCFnCkKT9e6NwQl3OL0eufEhfm1l0i+3CcbxoRUSWmuuYq83J3xRMt9Hd7kHyNeaSh1CEIpoanGz4e2Bo9moZpl+n7DrerX9OeYdE/2LLkJML9q0kdAgDljzGYmhiFD/5IlzoMQcx6sonUIYhqeMdIlN5Ro2PDQLO3qRdYHZf+LhYxKhKSX3UFlgiwwH8eaYDeLWrhbHYh2tcPQHFZBQJqeEgdllNiy5ITiAr1QQ1P4fPie3cXkTINiY+UOgTBeD/w/X6hc0O4ubpgXJdGaF3X/Cvx70bFCR2aVaLDfKQOgWSill81PNo4GF7urkyUJMRkyQkE+3iKst+XujcWZb9ypuEIdVnq20Z3gHaDIOsmybX2xgWh/TC6g9QhEFElikmWcnNzMXjwYPj6+sLf3x+jRo1CYaHxqQpKSkowbtw4BAYGwtvbG/369UN2dnaV9VasWIEWLVrAy8sLISEhGDdunFhvw6G4u1r+9XF3UcxXTq+mCp6upbIl/24jdQiCenCgry13t1ky1kksNdmCQCQrijlzDR48GCdOnMCmTZuwfv167NixA2PGjDG6zUsvvYTffvsNa9aswfbt23H16lX07dtXZ5158+bhtddew7Rp03DixAls3rwZiYmJYr4Vu5PTOCGlV9t9rHGw1CEIokGwt9QhiOrBeeAs8Z9HGuDU7J4CRkNESqeIAd6nTp1CUlISDhw4gLZt2wIAPvnkE/Tq1QsffvghwsOr3sWSn5+P5cuXY+XKlejatSsA4KuvvkJMTAz27t2LDh064ObNm5gxYwZ+++03dOt2v1BdixYt7PPGnIzC8yQAvJ2XiMgZKaJlKSUlBf7+/tpECQASEhLg4uKCffv26d0mNTUV5eXlSEi4P1FmdHQ06tati5SUFADApk2boFarkZmZiZiYGNSpUwfPPfccLl++bDSe0tJSFBQU6DzkTC7DbPyqOfadK0pSuQAekRhWjGgndQhEglFEy1JWVhZCQkJ0lrm5uSEgIABZWVkGt/Hw8IC/v7/O8tDQUO0258+fh1qtxrvvvosFCxbAz88PM2bMQPfu3XHs2DF4eOgfNzBnzhy8+eabtr8xEbir3DGx5kQAwPzyDfefqFHDpqyp8n6t9c1IG+80svE9mMOc9/nZ4Db47/eHRI3DlNseXoh8Zb1V29by85L1rOzWftcmJTyEjzefxdjHBKi9Y4fvWmWm3rMlf+875S6Y/14jg8+/2ita0PdnKHYPK8Y02oVIf1shjpGyY+ffgZxJ+m2eNm0aVCqV0cfp06dFe321Wo3y8nIsXLgQiYmJ6NChA3744QecPXsWW7duNbjd9OnTkZ+fr32Yaomiu5rXcYzB0fcq7CqVtfP6yd3Ebg9h+9TH8HJilNShyFqTWvb5HYp9ih0aX0/kVyC6T9KWpSlTpmD48OFG12nQoAHCwsKQk5Ojs/zOnTvIzc1FWFiY3u3CwsJQVlaGvLw8ndal7Oxs7Ta1at096TVpcr84X3BwMIKCgpCRkWEwJk9PT3h6inM7PpHYnjYwQafSqVQq1Au0rmQAKc+sJ4WdHJzIGEmTpeDgYAQHm767KD4+Hnl5eUhNTUVsbCwAYMuWLVCr1YiL09+1ExsbC3d3dyQnJ6Nfv34AgPT0dGRkZCA+Ph4A8PDDD2uX16lz91bj3Nxc3LhxA/XqOc5VS6MQ8e58mty9MeZtOiPa/kl4HKTu3Bzlz2/LHY9ElpJpp7KumJgY9OzZE6NHj8b+/fuxe/dujB8/HgMHDtTeCZeZmYno6Gjs378fAODn54dRo0Zh8uTJ2Lp1K1JTUzFixAjEx8ejQ4e7Bd8aN26Mp59+GhMnTsSePXuQlpaGYcOGITo6Gl26dJHs/Qrll3EPY8yjDfB/InZL1Ley+B8ROTYOdSFHoohkCQC+//57REdHo1u3bujVqxc6deqEpUuXap8vLy9Heno6iovvz+s0f/58PPHEE+jXrx8effRRhIWF4aefftLZ7zfffIO4uDj07t0bnTt3hru7O5KSkuDuLt8BsOZqGeGPV3vFVJkKQkhNw32NPj/y4fqivTYROadvR7WXOgRyMoq4Gw4AAgICsHLlSoPPR0ZGVpmKwsvLC4sWLcKiRYsMbufr64vly5dj+fLlgsXqTEwVN+zVPAxf7r5gp2iIyBmwDAnZm2JalkiZvL0Uk48TOQUxxzBWphH9fjgi+2GyRKJ5tHEwosN8MfaxhnjzKce6c6VHk1CpQ7BKz6b67x4l+Xm/n/AzCczu0wyhvixISmQpJktks/f6Na+ybPPkR7F82N2K66/0jMawjpF2jooeNKpTfXz6r9ZSh0Fmeq5dBCZ2e8jq7V/tpVtPy8vdBUM6OM5dvkT2xGSJbDagXd0qyxqF+MBdrhV8BdC+foDUIZg0++mmeLJlONrXD0BCTAhe7hkFNwf+m5CugBr3a8ElNg3FhgmPSBgNkbJxQAkJok+rcKw7clXqMOxmeMdIvL3hlNRhGHR6dk94ubtiSHyk1KGQDYSqifT5kLamV1IQFVhjieyLl5kkCGdrsZDz+/2/Ho3h5e4qdRhERA5Dvkd8UqwODeTfRSWE2U83Raiv/ae9cXdV4ZWe0Qjyrvray4a2xQudBZhIlmRhcJxyxxiJWZSyuicvBsi+mCyR4H4Y3UHqEOxiSHwk9k7vZtfXnNC1EU691RNjH2uI2jWrVXm+e5NQWbd6kWWCfTzhY0VR2Re7NhIhGvloaKK+G5HQOGaJBPFC5wb4MfUKnmtbx6nmHrPXe105Og43CsvQs2kYkyEn4+pq+XdsSo8opGXmixCN9Pa9at8LFCKAyRIJpFGIj3ZQMQkrMrA6OjYMqrL8mVbhOHo5z/4BkV3NH9AKI746YPF2zWr74euR7VHbX5q6SmL1wrFOFEmByRIJhomSOAa2r1qaAQCGxkciKswXzWr7YtmO82hop8rMZF9dokJw5u3H0XjG7xZv27lxsAgRETkfJktEMjf6kQZ6l7u4qBDfMBAAMLlHlD1DIjvzcGPXKwB8NaKd1CGQk+IvkEjmXF2cZwwYkTFdokKkDoGcFJMlIiIH8shDVce3SaFdZE2pQyASDJMlIhmrH1RD6hBIYf5lYIybvVX3cEP62z2xfepjUodCZDMmS0QytnZsR6lDILKap5sr6gUy4SflY7JEJFNv92mGgBoeUodBJAueHOROEuK3j0imalZnokT3dWpk3likGlZU/Ja7vq1r4/TsnlKHQU6MyRKRTCU04Z0/dN/XI9ubtZ65SZU9/THpUZu2nzeglVPNDEDyw2SJSIYahXjD041FPuk+c0pIDGofARcZlprwq+YudQhENmGyRCRDXaPZqkSWm/Z4jNQh6GVLUc1WEf7CBUJkJSZLRDI0uXtjqUMgBZJrC05ADQ+MfayhVduufr6DwNEQWY7JEpEMcZ49cjSv9IyusmxUp/omt2N3NMkBkyUiIrK76DAfTHu8agJFJEdMlohs9Mmg1lKHQKQ4Gyc8AjcTg9EndG1kp2iIjGOyRGSj5rX9pA6BSBFa/jNYu0FQDbi4qKBSqTAp4SGD6w/uUM9OkREZx2SJiEghZj/d1OBz7/VrbsdIrLNsSCzGd2mEb/8Tp11maOB3ywh/hPp62Ss0IqOYLBHJwFfD20kdAincgHbymEDXmBBfL/xfYhRq+1fTLvN0c8WAthFV1v1iaFt7hkZkFJMlIhmIaxCAjg0DAXDyXHI+b+ppMQv28ZQgEiL9HG8SISI70wiwDxeVCitHs54MWWfx4DZSh2ATlsoguWPLEhGRUhiYH62LA1R859RvJGdsWSKSAY0QzVPk8AJreOhd7giJRsq0bth34W/8dvQanm4VLnU4RDoU07KUm5uLwYMHw9fXF/7+/hg1ahQKCwuNblNSUoJx48YhMDAQ3t7e6NevH7Kzs3XWOXDgALp16wZ/f3/UrFkTiYmJOHr0qJhvhRxMCMdWkJ30bBpWZdmcvs0dosp1mJ8Xnm5VG18Ma4snWzJZInlRTLI0ePBgnDhxAps2bcL69euxY8cOjBkzxug2L730En777TesWbMG27dvx9WrV9G3b1/t84WFhejZsyfq1q2Lffv2YdeuXfDx8UFiYiLKy8vFfkvkIGp4uiGxaajUYZATcHFR4aEQb+2/g308Mai9/O+CI1I6lUYj/w6AU6dOoUmTJjhw4ADatr17O2lSUhJ69eqFK1euIDy86lVIfn4+goODsXLlSjz77LMAgNOnTyMmJgYpKSno0KEDDh48iHbt2iEjIwMREXdvXT1+/DhatGiBs2fPolEj86rHFhQUwM/PD/n5+fD19RXoXZOSlJRXIPr1JKu3P/lWIqp7sFecTHtnw0ks23kBwN1k6cBrCRJHRKRc5p6/FdGylJKSAn9/f22iBAAJCQlwcXHBvn379G6TmpqK8vJyJCTcP5BER0ejbt26SElJAQBERUUhMDAQy5cvR1lZGW7fvo3ly5cjJiYGkZGRBuMpLS1FQUGBzoOcG+/mIXuZ0iNK+/+ujjBYiUgBFJEsZWVlISRE924PNzc3BAQEICsry+A2Hh4e8Pf311keGhqq3cbHxwfbtm3Dd999h2rVqsHb2xtJSUn4/fff4eZm+Cp/zpw58PPz0z7utUqRc2tRh9OekPi83F0xf0BLBHl7YvG/lV0ygEgpJE2Wpk2bBpVKZfRx+vRp0V7/9u3bGDVqFB5++GHs3bsXu3fvRrNmzdC7d2/cvn3b4HbTp09Hfn6+9nH58mXRYiTl+Pm/D0sdAjmJZ1rXwYHXuqF13ZpSh0LkFCQdJDFlyhQMHz7c6DoNGjRAWFgYcnJydJbfuXMHubm5CAurencIAISFhaGsrAx5eXk6rUvZ2dnabVauXImLFy8iJSUFLi4u2mU1a9bEL7/8goEDB+rdt6enJzw9eQcU6XJ1UaFlhD+OXs6zaLsQH0+OVyKLqdgFR2Q3kh6hg4ODERwcbHK9+Ph45OXlITU1FbGxsQCALVu2QK1WIy4uTu82sbGxcHd3R3JyMvr16wcASE9PR0ZGBuLj4wEAxcXFcHFx0Tno3Pu3Wq229e0RmWU/B+gSEcmaIsYsxcTEoGfPnhg9ejT279+P3bt3Y/z48Rg4cKD2TrjMzExER0dj//79AAA/Pz+MGjUKkydPxtatW5GamooRI0YgPj4eHTrcnVaie/fuuHnzJsaNG4dTp07hxIkTGDFiBNzc3NClSxfJ3i8pV4vaHLdERORoFJEsAcD333+P6OhodOvWDb169UKnTp2wdOlS7fPl5eVIT09HcXGxdtn8+fPxxBNPoF+/fnj00UcRFhaGn376Sft8dHQ0fvvtNxw7dgzx8fF45JFHcPXqVSQlJaFWrVp2fX/kGF55PFrqEIiISGCKqLMkd6yzRJVFTttg0foX5/YWKRIiIjLGoeosESlJ0qRHpA6BiIgExGSJSGChPl5Gn/9yeFujzxMRkbwwWSKys67RnEeOiEhJmCwRCcy/urvUIRARkYCYLBEJTKVSISEmxPSKRESkCEyWiEQQ5ld13JJfNXccndVDgmiIiMgWTJaIRDC1R9V6S79PfAR+1e520U3o2ggAML5LI7vGRURElmOdJQGwzhIZ8n9rjuLH1CtImvQIosPufzc0Gg0ycotRN6A65/giIpKIuedvJksCYLJERESkPCxKSURERCQAJktERERERjBZIiIiIjKCyRIRERGREUyWiIiIiIxgskRERERkBJMlIiIiIiOYLBEREREZwWSJiIiIyAgmS0RERERGMFkiIiIiMoLJEhEREZERTJaIiIiIjGCyRERERGSEm9QBOAKNRgMAKCgokDgSIiIiMte98/a987ghTJYEcOvWLQBARESExJEQERGRpW7dugU/Pz+Dz6s0ptIpMkmtVuPq1avw8fGBSqUSbL8FBQWIiIjA5cuX4evrK9h+lYyfSVX8TPTj51IVP5Oq+JlU5UyfiUajwa1btxAeHg4XF8Mjk9iyJAAXFxfUqVNHtP37+vo6/BfWUvxMquJnoh8/l6r4mVTFz6QqZ/lMjLUo3cMB3kRERERGMFkiIiIiMoLJkox5enpi1qxZ8PT0lDoU2eBnUhU/E/34uVTFz6QqfiZV8TOpigO8iYiIiIxgyxIRERGREUyWiIiIiIxgskRERERkBJMlIiIiIiOYLEls0aJFiIyMhJeXF+Li4rB//36j669ZswbR0dHw8vJC8+bNsXHjRjtFaj+WfCYrVqyASqXSeXh5edkxWvHt2LEDTz75JMLDw6FSqbBu3TqT22zbtg1t2rSBp6cnGjVqhBUrVogepz1Z+pls27atyvdEpVIhKyvLPgHbwZw5c9CuXTv4+PggJCQEffr0QXp6usntHPmYYs1n4ujHlMWLF6NFixbagpPx8fH4/fffjW7jyN8RczFZktDq1asxefJkzJo1C4cOHULLli2RmJiInJwcvevv2bMHgwYNwqhRo3D48GH06dMHffr0QVpamp0jF4+lnwlwt8rstWvXtI9Lly7ZMWLxFRUVoWXLlli0aJFZ61+4cAG9e/dGly5dcOTIEUyaNAn/+c9/8Mcff4gcqf1Y+pnck56ervNdCQkJESlC+9u+fTvGjRuHvXv3YtOmTSgvL0ePHj1QVFRkcBtHP6ZY85kAjn1MqVOnDubOnYvU1FQcPHgQXbt2xdNPP40TJ07oXd/RvyNm05Bk2rdvrxk3bpz23xUVFZrw8HDNnDlz9K7/3HPPaXr37q2zLC4uTvP888+LGqc9WfqZfPXVVxo/Pz87RSc9AJqff/7Z6Dovv/yypmnTpjrLBgwYoElMTBQxMumY85ls3bpVA0Bz8+ZNu8QkBzk5ORoAmu3btxtcxxmOKZWZ85k42zFFo9Foatasqfniiy/0Puds3xFD2LIkkbKyMqSmpiIhIUG7zMXFBQkJCUhJSdG7TUpKis76AJCYmGhwfaWx5jMBgMLCQtSrVw8RERFGr5CchaN/T2zRqlUr1KpVC927d8fu3bulDkdU+fn5AICAgACD6zjbd8WczwRwnmNKRUUFVq1ahaKiIsTHx+tdx9m+I4YwWZLIjRs3UFFRgdDQUJ3loaGhBsdRZGVlWbS+0ljzmURFReHLL7/EL7/8gu+++w5qtRodO3bElStX7BGyLBn6nhQUFOD27dsSRSWtWrVqYcmSJVi7di3Wrl2LiIgIPPbYYzh06JDUoYlCrVZj0qRJePjhh9GsWTOD6zn6MaUycz8TZzimHD9+HN7e3vD09MQLL7yAn3/+GU2aNNG7rjN9R4xxkzoAIlvEx8frXBF17NgRMTEx+PzzzzF79mwJIyM5iYqKQlRUlPbfHTt2xLlz5zB//nx8++23EkYmjnHjxiEtLQ27du2SOhTZMPczcYZjSlRUFI4cOYL8/Hz8+OOPGDZsGLZv324wYSK2LEkmKCgIrq6uyM7O1lmenZ2NsLAwvduEhYVZtL7SWPOZPMjd3R2tW7fGX3/9JUaIimDoe+Lr64tq1apJFJX8tG/f3iG/J+PHj8f69euxdetW1KlTx+i6jn5MuceSz+RBjnhM8fDwQKNGjRAbG4s5c+agZcuWWLBggd51neU7YgqTJYl4eHggNjYWycnJ2mVqtRrJyckG+47j4+N11geATZs2GVxfaaz5TB5UUVGB48ePo1atWmKFKXuO/j0RypEjRxzqe6LRaDB+/Hj8/PPP2LJlC+rXr29yG0f/rljzmTzIGY4parUapaWlep9z9O+I2aQeYe7MVq1apfH09NSsWLFCc/LkSc2YMWM0/v7+mqysLI1Go9EMGTJEM23aNO36u3fv1ri5uWk+/PBDzalTpzSzZs3SuLu7a44fPy7VWxCcpZ/Jm2++qfnjjz80586d06SmpmoGDhyo8fLy0pw4cUKqtyC4W7duaQ4fPqw5fPiwBoBm3rx5msOHD2suXbqk0Wg0mmnTpmmGDBmiXf/8+fOa6tWra6ZOnao5deqUZtGiRRpXV1dNUlKSVG9BcJZ+JvPnz9esW7dOc/bsWc3x48c1EydO1Li4uGg2b94s1VsQ3NixYzV+fn6abdu2aa5du6Z9FBcXa9dxtmOKNZ+Jox9Tpk2bptm+fbvmwoULmmPHjmmmTZumUalUmj///FOj0Tjfd8RcTJYk9sknn2jq1q2r8fDw0LRv316zd+9e7XOdO3fWDBs2TGf9//3vf5rGjRtrPDw8NE2bNtVs2LDBzhGLz5LPZNKkSdp1Q0NDNb169dIcOnRIgqjFc++29wcf9z6HYcOGaTp37lxlm1atWmk8PDw0DRo00Hz11Vd2j1tMln4m7733nqZhw4YaLy8vTUBAgOaxxx7TbNmyRZrgRaLv8wCg87d3tmOKNZ+Jox9TRo4cqalXr57Gw8NDExwcrOnWrZs2UdJonO87Yi6VRqPR2K8di4iIiEhZOGaJiIiIyAgmS0RERERGMFkiIiIiMoLJEhEREZERTJaIiIiIjGCyRERERGQEkyUiIiIiI5gsERERERnBZImIHM7w4cPRp08fu7/uihUroFKpoFKpMGnSJLO2GT58uHabdevWiRofEVnHTeoAiIgsoVKpjD4/a9YsLFiwAFJNTuDr64v09HTUqFHDrPUXLFiAuXPnOvRErURKx2SJiBTl2rVr2v9fvXo1Zs6cifT0dO0yb29veHt7SxEagLvJXFhYmNnr+/n5wc/PT8SIiMhW7IYjIkUJCwvTPvz8/LTJyb2Ht7d3lW64xx57DC+++CImTZqEmjVrIjQ0FMuWLUNRURFGjBgBHx8fNGrUCL///rvOa6WlpeHxxx+Ht7c3QkNDMWTIENy4ccPimD/77DM89NBD8PLyQmhoKJ599llbPwYisiMmS0TkFL7++msEBQVh//79ePHFFzF27Fj0798fHTt2xKFDh9CjRw8MGTIExcXFAIC8vDx07doVrVu3xsGDB5GUlITs7Gw899xzFr3uwYMHMWHCBLz11ltIT09HUlISHn30UTHeIhGJhN1wROQUWrZsiRkzZgAApk+fjrlz5yIoKAijR48GAMycOROLFy/GsWPH0KFDB3z66ado3bo13n33Xe0+vvzyS0RERODMmTNo3LixWa+bkZGBGjVq4IknnoCPjw/q1auH1q1bC/8GiUg0bFkiIqfQokUL7f+7uroiMDAQzZs31y4LDQ0FAOTk5AAAjh49iq1bt2rHQHl7eyM6OhoAcO7cObNft3v37qhXrx4aNGiAIUOG4Pvvv9e2XhGRMjBZIiKn4O7urvNvlUqls+zeXXZqtRoAUFhYiCeffBJHjhzReZw9e9aibjQfHx8cOnQIP/zwA2rVqoWZM2eiZcuWyMvLs/1NEZFdsBuOiEiPNm3aYO3atYiMjISbm22HSjc3NyQkJCAhIQGzZs2Cv78/tmzZgr59+woULRGJiS1LRER6jBs3Drm5uRg0aBAOHDiAc+fO4Y8//sCIESNQUVFh9n7Wr1+PhQsX4siRI7h06RK++eYbqNVqREVFiRg9EQmJyRIRkR7h4eHYvXs3Kioq0KNHDzRv3hyTJk2Cv78/XFzMP3T6+/vjp59+QteuXRETE4MlS5bghx9+QNOmTUWMnoiEpNJIVeaWiMjBrFixApMmTbJqPJJKpcLPP/8syTQtRGQcW5aIiASUn58Pb29vvPLKK2at/8ILL0hacZyITGPLEhGRQG7duoXs7GwAd7vfgoKCTG6Tk5ODgoICAECtWrXMnlOOiOyHyRIRERGREeyGIyIiIjKCyRIRERGREUyWiIiIiIxgskRERERkBJMlIiIiIiOYLBEREREZwWSJiIiIyAgmS0RERERG/D/gzeC1us1g4QAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.plot(xs, audio[0])\n", + "plt.xlabel(\"Time [s]\")\n", + "plt.ylabel(\"Amplitude\")\n", + "\n", + "# Add to the chart all the word boundaries\n", + "plt.vlines([start for start, _, _ in alignment], ymin=-0.06, ymax=0.06, color=\"lightgreen\")\n", + "plt.vlines([stop for _, stop, _ in alignment], ymin=-0.06, ymax=0.06, color=\"red\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "uvpy311", + "language": "python", + "name": "uvpy311" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/tutorials/tasks/source-separation.ipynb b/docs/tutorials/tasks/source-separation.ipynb new file mode 100644 index 0000000000..e6ea84afea --- /dev/null +++ b/docs/tutorials/tasks/source-separation.ipynb @@ -0,0 +1,818 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/tasks/source-separation.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/tasks/source-separation.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fabIezbT125I" + }, + "source": [ + "# Source Separation\n", + "\n", + "## Introduction\n", + "\n", + "In source separation, the goal is to be able to separate out the sources from an observed mixture signal which consists of superposition of several sources. Let us demonstrate this with an example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2ZyBlnjRvetT" + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "T = 1000\n", + "t = np.arange(0, T)\n", + "fs = 3000\n", + "f0 = 10\n", + "\n", + "source1 = np.sin(2*np.pi*(f0/fs)*t) + 0.1*np.random.randn(T)\n", + "source2 = np.sin(2*np.pi*(3*f0/fs)*t)+ 0.1*np.random.randn(T)\n", + "mixture = source1 + source2\n", + "\n", + "plt.subplot(311)\n", + "plt.plot(source1)\n", + "plt.title('Source 1')\n", + "plt.xticks(np.arange(0, 100, T), '')\n", + "\n", + "plt.subplot(312)\n", + "plt.plot(source2)\n", + "plt.title('Source 2')\n", + "plt.xticks(np.arange(0, 100, T), '')\n", + "\n", + "plt.subplot(313)\n", + "plt.plot(mixture)\n", + "plt.title('Mixture')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tiofqq4zHuXW" + }, + "source": [ + "The goal is to get Source 1 and Source 2 from the Mixture Signal. In our case Source 1 is a noisy sinosoid with frequency f0, and Source is a noisy sinusoid with frequency 3*f0. \n", + "\n", + "## A toy example\n", + "\n", + "Now, let's consider a slightly more interesting case where, source 1 is a sinusoid with a random frequency smaller than f_threshold, and source 2 is a sinusoid with frequency larger than f_threshold. Let's first build the dataset and and the dataloaders using speechbrain. We will then build a model which will able to separate out the sources successfully." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "D6TzkU5NR7s9" + }, + "outputs": [], + "source": [ + "import torch\n", + "import torch.utils.data as data_utils\n", + "import librosa.display as lrd\n", + "\n", + "N = 100\n", + "f_th = 200\n", + "fs = 8000\n", + "\n", + "T = 10000\n", + "t = torch.arange(0, T).unsqueeze(0)\n", + "f1 = torch.randint(5, f_th, (N, 1))\n", + "f2 = torch.randint(f_th, 400, (N, 1))\n", + "batch_size = 10\n", + "\n", + "source1 = torch.sin(2*np.pi*(f1/fs)*t)\n", + "source2 = torch.sin(2*np.pi*(f2/fs)*t)\n", + "mixture = source1 + source2\n", + "N_train = 90\n", + "train_dataset = data_utils.TensorDataset(source1[:N_train], source2[:N_train], mixture[:N_train])\n", + "test_dataset = data_utils.TensorDataset(source1[N_train:], source2[N_train:], mixture[N_train:])\n", + "\n", + "train_loader = data_utils.DataLoader(train_dataset, batch_size=batch_size)\n", + "test_loader = data_utils.DataLoader(test_dataset, batch_size=batch_size)\n", + "\n", + "# now let's visualize the frequency spectra for the dataset\n", + "fft_size = 200\n", + "\n", + "plt.figure(figsize=[20, 10], dpi=50)\n", + "\n", + "plt.subplot(131)\n", + "mix_gt = mixture[N_train]\n", + "mix_spec = torch.sqrt((torch.view_as_real(torch.stft(mix_gt, n_fft=fft_size, return_complex=True))**2).sum(-1))\n", + "lrd.specshow(mix_spec.numpy(), y_axis='log')\n", + "plt.title('Mixture Spectrogram')\n", + "\n", + "plt.subplot(132)\n", + "source1_gt = source1[N_train]\n", + "source1_spec = torch.sqrt((torch.view_as_real(torch.stft(source1_gt, n_fft=fft_size, return_complex=True))**2).sum(-1))\n", + "lrd.specshow(source1_spec.numpy(), y_axis='log')\n", + "plt.title('Source 1 Spectrogram')\n", + "\n", + "plt.subplot(133)\n", + "source2_gt = source2[N_train]\n", + "source2_spec = torch.sqrt((torch.view_as_real(torch.stft(source2_gt, n_fft=fft_size, return_complex=True))**2).sum(-1))\n", + "lrd.specshow(source2_spec.numpy(), y_axis='log')\n", + "plt.title('Source 2 Spectrogram')\n", + "\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CeG7ghFQr589" + }, + "source": [ + "Now that we created the dataset, we can now focus on building a model would be able to recover the original sources from the mixture signal. For this, we will use speechbrain. Let us first install speechbrain." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1h4PPdSR7YJd" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hUqDGcXt7nlg" + }, + "source": [ + "Now, let us construct a simple model with pytorch and speechbrain for source separation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WPRLjPWO_9k2" + }, + "outputs": [], + "source": [ + "import speechbrain as sb\n", + "import torch.nn as nn\n", + "\n", + "# define the model\n", + "class simpleseparator(nn.Module):\n", + " def __init__(self, fft_size, hidden_size, num_sources=2):\n", + " super(simpleseparator, self).__init__()\n", + " self.masking = nn.LSTM(input_size=fft_size//2 + 1, hidden_size=hidden_size, batch_first=True, bidirectional=True)\n", + " self.output_layer = nn.Linear(in_features=hidden_size*2, out_features=num_sources*(fft_size//2 + 1))\n", + " self.fft_size=fft_size\n", + " self.num_sources = num_sources\n", + "\n", + " def forward(self, inp):\n", + " # batch x freq x time x realim\n", + " y = torch.view_as_real(torch.stft(inp, n_fft=self.fft_size, return_complex=True))\n", + "\n", + " # batch X freq x time\n", + " mag = torch.sqrt((y ** 2).sum(-1))\n", + " phase = torch.atan2(y[:, :, :, 1], y[:, :, :, 0])\n", + "\n", + " # batch x time x freq\n", + " mag = mag.permute(0, 2, 1)\n", + "\n", + " # batch x time x feature\n", + " rnn_out = self.masking(mag)[0]\n", + "\n", + " # batch x time x (nfft*num_sources)\n", + " lin_out = self.output_layer(rnn_out)\n", + "\n", + " # batch x time x nfft x num_sources\n", + " lin_out = nn.functional.relu(lin_out.reshape(lin_out.size(0), lin_out.size(1), -1, self.num_sources))\n", + "\n", + " # reconstruct in time domain\n", + " sources = []\n", + " all_masks = []\n", + " for n in range(self.num_sources):\n", + " sourcehat_mask = (lin_out[:, :, :, n])\n", + " all_masks.append(sourcehat_mask)\n", + "\n", + " # multiply with mask and magnitude\n", + " sourcehat_dft = (sourcehat_mask * mag).permute(0, 2, 1) * torch.exp(1j * phase)\n", + "\n", + " # reconstruct in time domain with istft\n", + " sourcehat = torch.istft(sourcehat_dft, n_fft=self.fft_size)\n", + " sources.append(sourcehat)\n", + " return sources, all_masks, mag\n", + "\n", + "# test_forwardpass\n", + "model = simpleseparator(fft_size=fft_size, hidden_size=300)\n", + "est_sources, _, _ = model.forward(mixture[:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7KQ1ZKoyfM82" + }, + "source": [ + "Now that our model, we can now write the Brain class for training.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "U78x9qR3j3kN" + }, + "outputs": [], + "source": [ + "class SeparationBrain(sb.Brain):\n", + " def __init__(self, train_loss, modules, opt_class):\n", + " super(SeparationBrain, self).__init__(modules=modules, opt_class=opt_class)\n", + " self.train_loss = train_loss\n", + "\n", + " def compute_forward(self, mix):\n", + " \"\"\"Forward computations from the mixture to the separated signals.\"\"\"\n", + "\n", + " # Get the estimates for the sources\n", + " est_sources, _, _ = self.modules.mdl(mix)\n", + "\n", + " est_sources = torch.stack(est_sources, dim=-1)\n", + "\n", + " # T changed after conv1d in encoder, fix it here\n", + " T_origin = mix.size(1)\n", + " T_est = est_sources.size(1)\n", + " if T_origin > T_est:\n", + " est_sources = nn.functional.pad(est_sources, (0, 0, 0, T_origin - T_est))\n", + " else:\n", + " est_sources = est_sources[:, :T_origin, :]\n", + "\n", + " return est_sources\n", + "\n", + " def compute_objectives(self, targets, est_sources):\n", + " \"\"\"Computes the loss functions between estimated and ground truth sources\"\"\"\n", + " if self.train_loss == 'l1':\n", + " return (est_sources - targets).abs().mean()\n", + " elif self.train_loss == 'si-snr':\n", + " return sb.nnet.losses.get_si_snr_with_pitwrapper(targets, est_sources).mean()\n", + "\n", + "\n", + " def fit_batch(self, batch):\n", + " \"\"\"Trains one batch\"\"\"\n", + " # Unpacking batch list\n", + " source1, source2, mix = batch\n", + " targets = torch.stack([source1, source2], dim=-1)\n", + "\n", + " est_sources = self.compute_forward(mix)\n", + " loss = self.compute_objectives(targets, est_sources)\n", + "\n", + " loss.backward()\n", + " self.optimizer.step()\n", + " self.optimizer.zero_grad()\n", + " return loss.detach().cpu()\n", + "\n", + " def evaluate_batch(self, batch, stage):\n", + " \"\"\"Computations needed for test batches\"\"\"\n", + "\n", + " source1, source2, mix = batch\n", + " targets = torch.stack([source1, source2], dim=-1)\n", + "\n", + " est_sources = self.compute_forward(mix)\n", + "\n", + " si_snr = sb.nnet.losses.get_si_snr_with_pitwrapper(targets, est_sources)\n", + " si_snr_mean = si_snr.mean().item()\n", + " print('VALID SI-SNR = {}'.format(-si_snr_mean))\n", + " return si_snr.mean().detach()\n", + "\n", + "\n", + "from functools import partial\n", + "\n", + "optimizer = lambda x: torch.optim.Adam(x, lr=0.0001)\n", + "N_epochs = 10\n", + "epoch_counter = sb.utils.epoch_loop.EpochCounter(limit=N_epochs)\n", + "\n", + "separator = SeparationBrain(\n", + " train_loss='l1',\n", + " modules={'mdl': model},\n", + " opt_class=optimizer\n", + "\n", + " )\n", + "\n", + "\n", + "separator.fit(\n", + " epoch_counter,\n", + " train_loader,\n", + " test_loader)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xEI7cJw00UYU" + }, + "source": [ + "Now, let's visualize the results. For this purpose let's first install librosa. It has a nice tool for visualizing spectrograms. \n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "I0ul7PjF9HwC" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install librosa\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "69mRj3DKAa4F" + }, + "source": [ + "We will first plot the the spectra for the ground truth sources. And then we will run a forward pass with the model and plot the estimated sources." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "j6NbKy4S1yl_" + }, + "outputs": [], + "source": [ + "estimated_sources, all_masks, mag = separator.modules.mdl.forward(mixture[N_train:])\n", + "\n", + "\n", + "plt.figure(figsize=[20, 10], dpi=80)\n", + "\n", + "plt.subplot(331)\n", + "mag = mag[0].t().numpy()\n", + "lrd.specshow(mag, y_axis='log')\n", + "plt.title('Mixture')\n", + "\n", + "plt.subplot(334)\n", + "mask1 = all_masks[0][0].detach().t().numpy()\n", + "lrd.specshow(mask1, y_axis='log')\n", + "plt.title('Mask for source 1')\n", + "\n", + "plt.subplot(335)\n", + "masked1 = mask1 * mag\n", + "lrd.specshow(masked1, y_axis='log')\n", + "plt.title('Estimated Source 1')\n", + "\n", + "plt.subplot(336)\n", + "source1_gt = source1[N_train]\n", + "source1_spec = torch.sqrt((torch.view_as_real(torch.stft(source1_gt, n_fft=fft_size, return_complex=True))**2).sum(-1))\n", + "lrd.specshow(source1_spec.numpy(), y_axis='log')\n", + "plt.title('Ground Truth Source 1')\n", + "\n", + "plt.subplot(337)\n", + "mask2 = all_masks[1][0].detach().t().numpy()\n", + "lrd.specshow(mask2, y_axis='log')\n", + "plt.title('Mask for Source 2')\n", + "\n", + "plt.subplot(338)\n", + "masked2 = mask2 * mag\n", + "lrd.specshow(masked2, y_axis='log')\n", + "plt.title('Estimated Source 2')\n", + "\n", + "plt.subplot(339)\n", + "source2_gt = source2[N_train]\n", + "source2_spec = torch.sqrt((torch.view_as_real(torch.stft(source2_gt, n_fft=fft_size, return_complex=True)**2)).sum(-1))\n", + "lrd.specshow(source2_spec.numpy(), y_axis='log')\n", + "plt.title('Ground Truth Source 2')\n", + "\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sBmdv0-jq8yR" + }, + "source": [ + "Notice that these masks are basically band stop filters which aims to remove the interferences from the other source.\n", + "\n", + "## Exercises\n", + "\n", + "* Train the same model with SI-SNR loss and observe if this helps to improve the performance.\n", + "* Replace the STFT front end, and the ISTFT reconstruction with convolutional layer, and transposed convolution layers. Do the same visualization above, also visualize the filters learnt by the convolutional front end, and the reconstruction layer, and compare it with the DFT bases. \n", + "\n", + "\n", + "## A sound source separation example with a pre-existing model from speechbrain\n", + "\n", + "First, let's download the dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NUQX-saDsjPF" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AADx5I8oV0IdekCf80MSkxMia/mixture_0.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AAAZI7ZezKyHFGPdus6hn2v_a/mixture_1.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AACh4Yy4H-Ii2I0mr_b1lQdXa/mixture_2.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AAAenTlEsoj1-AGbCxeJfMHoa/mixture_3.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AAC-awQo-9NFVVULuVwaHKKWa/source1_0.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AABVKWtdVhXZE6Voq1I_c6g5a/source1_1.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AAC9EfjTTwL0dscH16waP9s-a/source1_2.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AAC5Ozb4rS9qby268JSIy5Uwa/source1_3.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AABlonG910Ms2l-rTN5ct3Oka/source2_0.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AACDOqEgyXIeA2r1Rkf7VgQTa/source2_1.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AACTYGAG0LOh6HvxpVYoqO_Da/source2_2.wav\n", + "!wget https://www.dropbox.com/sh/07vwpwru6qo6yhf/AACPmq-ZJNzfh4bnO34_8mfAa/source2_3.wav" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84g08IFDmzbo" + }, + "source": [ + "Now let's first listen to these sounds." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vgriZkbNmssS" + }, + "outputs": [], + "source": [ + "import speechbrain\n", + "from speechbrain.dataio.dataio import read_audio\n", + "from IPython.display import Audio\n", + "\n", + "mixture_0 = read_audio('mixture_0.wav').squeeze()\n", + "source1_0 = read_audio('source1_0.wav').squeeze()\n", + "source2_0 = read_audio('source2_0.wav').squeeze()\n", + "\n", + "mixture_1 = read_audio('mixture_1.wav').squeeze()\n", + "source1_1 = read_audio('source1_1.wav').squeeze()\n", + "source2_1 = read_audio('source2_1.wav').squeeze()\n", + "\n", + "mixture_2 = read_audio('mixture_2.wav').squeeze()\n", + "source1_2 = read_audio('source1_2.wav').squeeze()\n", + "source2_2 = read_audio('source2_2.wav').squeeze()\n", + "\n", + "mixture_3 = read_audio('mixture_3.wav').squeeze()\n", + "source1_3 = read_audio('source1_3.wav').squeeze()\n", + "source2_3 = read_audio('source2_3.wav').squeeze()\n", + "\n", + "train_mixs = [mixture_0, mixture_1, mixture_2]\n", + "train_source1s = [source1_0, source1_1, source1_2]\n", + "train_source2s = [source2_0, source2_1, source2_2]\n", + "\n", + "Audio(mixture_0, rate=16000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AlRgO_rT33GB" + }, + "outputs": [], + "source": [ + "Audio(source1_0, rate=16000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "C9HzYrVl3uNc" + }, + "outputs": [], + "source": [ + "Audio(source2_0, rate=16000)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8NeccEWR51Px" + }, + "source": [ + "Now, let's construct the datasets and dataloaders." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UqDjKaUG5z9S" + }, + "outputs": [], + "source": [ + "from torch.utils.data import Dataset, DataLoader\n", + "\n", + "\n", + "class source_separation_dataset(Dataset):\n", + " def __init__(self, train_mixs, train_source1s, train_source2s):\n", + " self.mixs = train_mixs\n", + " self.train_source1s = train_source1s\n", + " self.train_source2s = train_source2s\n", + "\n", + " def __len__(self):\n", + " return len(self.mixs)\n", + "\n", + " def __getitem__(self, idx):\n", + " mix = self.mixs[idx]\n", + " source1 = self.train_source1s[idx]\n", + " source2 = self.train_source2s[idx]\n", + " return mix, source1, source2\n", + "\n", + "train_dataset_audio = source_separation_dataset(train_mixs, train_source1s, train_source2s)\n", + "valid_dataset_audio = source_separation_dataset([mixture_2], [source1_2], [source2_2])\n", + "\n", + "train_loader_audio = DataLoader(train_dataset_audio, batch_size=1)\n", + "valid_loader_audio = DataLoader(valid_dataset_audio, batch_size=1)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "K1FeHiQrh7p3" + }, + "source": [ + "And now, let's tinker the model we constructed and use it on this small dataset. For this purpose we will use the mask-based end-to-end architecture:\n", + "\n", + "![end2end.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgwAAAC3CAYAAAB+FAaQAAAABHNCSVQICAgIfAhkiAAAABl0RVh0U29mdHdhcmUAZ25vbWUtc2NyZWVuc2hvdO8Dvz4AACAASURBVHic7d17cFvlnf/xdwhRTSJA0ZZELKlQGtHI6rKcDlvjXTSN4raLaKlRWbHruksjZsC4rTtVPFOqzqZe/1Lv1GS7Rp110tTJ8lModbysAGMaqnCxxaKAxhQ42YAiGqUoAqaHDREKo7BaBX7+/eFLbMe2cnEsX76vGYb4nKNzvrJ1+Zznec5zFg0MDAwghBBCCDGFi0pdgBBCCCFmPwkMQgghhChKAoMQQgghipLAIIQQQoiiJDAIIYQQoigJDEIIIYQoSgKDEEIIIYqSwCCEEEKIoiQwCCGEEKIoCQxCCCGEKEoCgxBCCCGKksAghBBCiKIkMAghhBCiKAkMQgghhChKAoMQQgghipLAIIQQQoiiJDAIIYQQoigJDEIIIYQoSgKDEEIIIYqSwCCEEEKIoiQwCCGEEKIoCQxCCCGEKEoCgxBCCCGKksAghBBCiKIkMAixgH344Ye88847/O///m+pSxFCzHIXl7oAIcTMOnHiBA899BAvvPACR44cQVEUXnnlFVatWsVNN93EHXfcwUUXybmEEGKsRQMDAwOlLkIIMTP27NnDz372M3w+H3/1V3/FFVdcMbLuyJEjxGIxfv3rX9PU1MRf/MVflLBSIcRsI4FBiAXi+eef56GHHuKXv/zllNt9/PHHVFdXEwgEuOaaa2aoOiHEbCftjkIsAE8//TTPPPMMx44d46233ppy21gsxtq1a/n5z39OPB6foQqFELOdtDAIsQBUVlbywgsvcNFFF+HxeLj//vv51Kc+ddp2+/bt45FHHqGtrY1UKsW9997Lww8/XIKKhRCzjbQwCDHHqapKNpuddH1nZyf33HPPyEDGUCjExo0bT2tpGB0WACwWC9deey3PPffchSteCDFnSGAQYo6zWCy43W527do1YXDYv38/N9xww5hl40PD+LAwrLKykt/97ncXrnghxJwhl1WKeeHYsWNomlbqMkrms5/9LF6vl8svv5yvf/3rOJ1Obr31VgwGA6qq4vf7T3tMKBTC4/HwzW9+k+eff/60sACwYsUKXnvtNV5//fWZeBpiGi1dupTVq1eXugwxj0hgEPPCt7/9bZYuXcrixYtLXUpJHDp0CIDjx48TDAZ57LHH6OvrY8OGDfzJn/wJl1122YSP27hxI3feeSfPPvvshOsvu+wyPv744wnDhJjdjh07xpYtW/jMZz5T6lLEPCGBQcwLR48eZc+ePSxdurTUpZSE1+vlv/7rv/B6vXi9XhRFGVn3wAMP8N5777Fy5coxjxnuhvj9738/6UDIY8eOceWVV3LffffNyPMQ0+d73/velGNbhDhbEhiEmONUVcVisUz65fDpT3+aV155hZtvvnlk2fgxC8PdE+NDw8svvyxnqEIIQAY9CjHnWSwWmpubJ13/13/916iqOvLzZAMcJ7p6Ih6P8+Uvf3naaxZCzD0SGISY4wwGw5Trb7zxRj744AMOHDgwaVgYNjo07N27l9WrV2M2my9E2UKIOUa6JIRYAH7605/idDopLy/nF7/4xZTbhkIhnE4n11xzDTt27JihCoUQs520MAixQDz77LMcOnSInp6eKbd7+OGHWbVqlYQFIcQY0sIgxAKxePFinnnmGVpaWnj66af57Gc/y5/+6Z9yxRVXoGkav//970mlUvz5n/85Dz30UKnLFULMMhIYhFhgNm3axNtvv82+ffs4cOAA+XyepUuXUl5ezj333FN0TIQQYmGSwCDEArRq1Sr+7u/+DoBgMIjX6y1tQUKIWU/GMAixwE11SaYQQgyTwCDEAtbd3c2RI0dIpVKlLkUIMctJYBBiAYtEIsBgcBBCiKlIYBBiAQsGgwDs2rWrtIUIIWY9CQxCLFDBYJDjx48Dg/ejGG5tEEKIiUhgEGIBymazRCIRnn/+eZxOJ4899ph0SwghpiSXVQqxAGWz2ZEWhiVLluB2u3E6nWSzWZmHQQgxIQkMQswaGuHtXSRSKXA141MSBIMxsuRJJTQUXyteS4ruYJhUPk9CTaD4tlOvlJ31kSwWy2nLJCgIIaYigUGIWSIfDpKo9FNvcGPwulHrfTT7fFjKQAs6sdR4SdS4qPH5cJdBvtuNyRvAqfqxlbp4IcS8J2MYhJgV8kTUMpwKQ3Mi2PD63FiGGg/y2TxoUFnvRRm1LJ/VyI/aSzaVIBbejtflIzKzT0AIMc9JYBBiVijD5fehoBEJJ7DU1OMc6WnIo0YSGFz1uAyjl6mUKc5RrQt5tJQGFtBiKbIz+wSEEPOcBAYhZpN8jLBqoNI1upMhRjiWR3EplI3ariucx1njPLWMMmxOJ5UWA2c/qkEIIaYmgUGI2SQWJpZXcI0eyJiIEckquCpPDUrMx4J05514XQZIddOdKEGtQogFRQKDELNIIhYjq7gYlQ3QImFSFidOy6llaneEvNOLy5An1pWgzDJ+T0IIMb0kMAgxa2RRIwlsbheWUUvVmIrJ6UYZtczirMRCinAwQKyyHpf0QQghLjC5rFKIWcOAu0vDPW4+BFcghTpumcndherU0MpMuCUsCCFmgLQwCDGLlBkmGLBoMDDhlEoGE6YxG+dJdAdo9QdJkCDoayXQnRhz2aUQQpwraWEQYt4ow+b24Xf78AdKXYsolVQqddpMnqqqoijKxA8Q4gxJC4MQQswjwWCQO++8kz/84Q90dXWhKMqEU4ELcbakhUEIIeaR5uZmLBYLR44c4cknn+T++++X+4SIaSEtDEIIMc+43e6Rf3u93tIVIuYVCQxCCDHP+Hw+AG699VZpXRDTRgKDEELMMxaLhauvvnpMS4MQ50sCgxBCzENOpxOn01nqMsQ8IoFBCCHmIbfbLVdHiGm1aGBgYKDURZTK/v37+eY3v8nSpUtLXcqs89FHH3H77bfzox/9qNSlnJH169ezZ8+eC/63fPvtt/F6vXzwwQcX9DgzZWBggI8++oglS5aUupRps2rVKoLBIJdddtmMHvcf/uEfCIfDLF68eEaPO1dcfPHFPPjgg1it1lKXIs7Rgr6s8qWXXuLLX/4yX/nKV0pdyqzz1ltv8dRTT5W6jFnnyJEjrFixgh/+8IelLkVM4l/+5V/IZDIzHhj2799PY2MjK1asmNHjzhX/8R//weuvvy6BYQ5b0IEBYPHixfPq7Gq6XHzxxSxatKjUZcxKixYtktfMLFaq1+3w60JeGxO76CLpAZ/r5C8ohBBCTCSvkcqWuojZQwKDEEIIMV5eJVDjxu2uJ6yVupjZQQKDEEIIMVo2RqB+O4bWCGqXm4jPR7eEBhnDIIQQQoyRN+AKbMdmAHDRGrSR0PJw+s3nFxQJDEIIIcRoJhu20T+XWbBZSlTLLCKBQQghhBgnG9vO9ggYyvJktTIqffU4TaWuqrQkMAghhBCj5GN+3AGFrq4aTECqq4aaVoVYoLLUpZWUDHoUQgghRlGDXaQMpqERC3myWRM1NcOdFHm0hEo46MNV38VCuupSAoMQQggxiqXSRvaX6zGZFJw1zSScrfgqh24TntdIpfJYyrKoiSz50pY6oyQwCCGEmDdSqRQ+n4/HH3+cbPbczv9N3m7Uvv9Ls9tCNhLgG5U1dA1fVllmodJViW0BjmeQwCCEEGLesFgsuN1u3G43y5cvZ/ny5axfv57169fT3NxMKpWa4tEakVYvvm4Ni9OLf3s3aqqbDWUJVJmHQQY9ivnh+uuvl7sECrGAHTp0iIMHD5JIJDh48CBLly7lww8/JJvNMjAwQHNzM06nc+qd5CMEAjEM20fNt5DVyNq8+JULWv6cIIFBzAs/+9nPSl2CEGKGjA4HwwFhzZo12Gw2bDYbX/3qV3nzzTdRVZVgMIjb7T6zHZe58PlVYtkY4UgZ+VSCSCxPfZd/7LwMC5QEBiGEELPWoUOHxgSDRCLB6tWrKS8vx2az8ZWvfIXy8vIxLYzDYxdSqRQGg+EsjmbA6WvFmdVIaXkMlS7c3ul9PnOZBAYhhBCzQjKZHBMMEokEFotlpOXg5ptvPi0cTCSbzRKJRM69EIMJy6Q5I0U40EUkppJPpWhuzeN0ealRziaYzE0SGISYRwqFAjqdbqotyBV06KfaRIgZMBwORgeEq6++GpvNRnl5OTfffDM2m42LLz77rymLxTL9BZ/aOy6fHxd+Wi/gUWYjCQxCTJNcMkxnqBc1maGAieqmFqrNk2ycidK6KUiyAHqTHYfHi0cxntuBC2n6Q510hqPETY08uqUK/YQbavR851vcn/Ow48E6rOd2tDMpiHQ0RGeol2ROh8nVSJPHyuiMkon3EAr10p/OoTcpVHlqqT7X5y/mjBdeeIEnnniCgwcPjgkHLpfrnMOBmDny15kPCgUKOh0L7qQxr5FQE2jZPGWKi8oSXxett7qo85tpr/kuj7x7mGBnPy5/xYR/l3Q4SO/Bw5xkDXc3NeKZLFicCZ2ZiloPangv8Sk3NKJ4PPxNrooL+6vSYXbU0mjMcNd3H2Hf1k20mXfirzgVY4z2auoaIF4TxtHSQPVZFJTLFdBLE8kFVbyl6twYDAb+/u//HpvNxpIlS6Z9/+LCksAwoQLJ3iCdPSrpXAHQYTQa0emAQoFcIUcuV0CnNBBoUEr2RZ1RQwSDPfTGTTQ8ugXXxKeV81c2QSwSoPlHEZy/1ah0zYZbz+oxGleysvAu7/Z20ltXgWv8iXNBJRQ1oqw5zEuH9ein5e+mx1j0hajDXFVHw3Qc7gzozFZMK1dSyLzL3s2bse/cMjYYGM2YzWasZxMW1HbaktU0nVfCmmG5OOFgiHA8zeDHiR6jHgoFHXqTGbtSgcNRgbnk798cyXCQjlAvqq6WB7d5pj1Y2u12rr322mneq5gpMnHThHRYq+rwe4ykDx9Gs9axZUsLLS0ttGzZQiAQYEudnYKWKWmVRsVDtR1OnCxpGaVjcuKtNJH/RCXOytkQFobo7Hhqy1lycj+doeRpq3PREGlHLY5JvyAK5DIaWq4wyeocmUyOSdYWVyic9tjc8LEKOTKZyfY8WNdkZU3I6qWp4fMsO/ES7ZvaiY95rA6dTj9h4C5kNDK5ccvSPWze/Ajpc37iJaK342poxKU/zOHDBSoaA2zZEiCwpYkGl5lMuI27bqthUyhOrvjeLmShWF21OPTvc3Ku/Y7FjChBC0OWWDCIms+TSIDLX48psp2wVkZWM1DT7EWZJZ/9Or1xsC/4tE80HcaKWmozpZ/6S28s+WlJSSXCEbJKPc5ZNUBZh7mqFkfwx/T1dBKtbRoVDtKEe8DVZKbQf/ojM2oHbaEcFa4KdGqIzmQF/pZa7EOPT4dbCSbtVFUY0cJBek2NbKuzn76jXJLQZh9BrYJarwtHhQJqD6GeHnqTdloe9aOQIR7qoCMUJa000mKN0t4TJ/1WBv1NTWzzOxhuHMnFQ7R1plEcVpLhTqI5E3azCb3JgbfOMeWZqLm6iabkXfzwiUfY1Kqws+nUfk8rO95DsDeNzgjp3l6SJi8tTdVYdRnUaJT0+5CLBmnLmFCq66iaMw0NevT6JcCoAac6PSalmoZAFY6273DvVh++TIBtdfYSdi/q0Oukq0BMbIZbGPLEAq2knD7q6/00K2FqKl0EDfX4KzW6A362x6b3iOd1ac0UMhk9Va6x3RFndpYG5DJoU50hFgbXTyWX0ZjqEFAgp2UmOWMpUCgM11y6c5rz/9toRGIpTJVOLNNQz7Bpec0YHdRWfwpOROkMnwqWhXgPUbOHqgm/MVU6Nu8mba6m2uHA1eBFSQfp6B1qySpECbZrVHircVQ48DR6R4LEeLlkP0mzn50PNlFbVYFZr8PscKHoMpwYed0YsXuqsXKC99UoacXPtge72LnRTmZvkHB6eGf9tG/qRFfdQLWrmsYmL9b0fpL6KupqK86g2VpPRcMWvlu+hPf7NrMplJ74ta/1sKkjR3VDA3W1DbS01GLcdz+bOuKAkQqPC+sSMDm8NDbMpbBQjB6lwU/1p05yeHcbofT49VO9lwfXZ7SpPg/OpGWoQEbLTN1qlcugTXKQQuHUZ99ZtUCJOWVmWxi0LrrL6mm1DP2oZcmXufC6DJD3sj3sRXFO7yFVVWXXrl18//vfR1GmaW7PQpzOzgx1DQ50Z3GWVtCiBINRsCqYtDCdUR2epiY8Q5/6hXQvHR1RdBUVmHMqPeEk5lo/DS7ryKj3nNpJW0jD7rCjS6uoqgZjxrsXSIaDhJNg1GlEwyo6TxMttQr6jEpPZ4ieXhVT3RaU8Ca27s/x+R8+ypYSDIAwGAw4nU7uvPNONmzYcPY7yMcIq2U46/N0BQJo5EnEUlQ2B/Dazq+Zyul0snHjRm699dZz3ofVU8t1ofvYH+okXt2IXZcjGkpS4W1Ax0TdWVaq676Lwz70TViAAifRtAwMvYoKhTid7b0ojVWY9BXUVo3/Gimg9bbTk6uiocE+7moJPSbT+L/zYH/6ElMVVdbB+GuyWjESJq0BZiAZpf99cAwPgjNW4LDD/VoO3ZkOPtSZ8bQ0Eb/rx/Rt3US7dRuN496O8VAnyYJCuLNzsO5cmsISeLc/Sho78yYfTERnp9q1hkd2HKY3mqa21syU72UAcsR7OuhJmrDbdaidnYOfQY3DLT6Dn02daSsOu45kbw/9VNDgr6NiJLAWSPa0EYybqVCM5OIq/emTjHnhZPoJdfaTMerIqVGiGTsNTY1UmQskewdbraK5Kpq8Gdo37+Ut49+wtauBCdq9xBw3s4HB5KW1fviHLGosgcEZQAEos1Fsmu/33nuPd95556wOuX79ejZu3EgwGERRFNxuN7feeutZhYdcPER7e3Twh0IOLdlPv7GBOmDkLC20l/1qlLTHzzaPDq3Hx7fuDxKudVBrZvAyuntDKFsCQwO/rGj9d7O1vZeqbdUYM71s9nVgbHmQBrsOcOGwt3HXRh+bdQ+ypcpIIdmBb3Ma74MtQ03cVZhaa+g7eKrWTG8rbUkPgYbBZs1q873cdt9m2q0P4q9QqKoIE3zkBLloktqmbdzf309BmTgsnDx5kv3795/V7/tsLFq0iIsuugiv14vP58Pr9eJ0Olm3bt2Zzc4WCxM7XoYllsLf6sNWBlrQiaU+iCtSP+WZ7xtvvEE+P/GNaZcvX46mabjd7pEb2TidzrMPD8Yqaqs62L+3l85oHS32MOFCNU2TfvPpsbs8FDJxekNRkoXCYKwYPnvTVVDXUMG99/+Eb/X34Gn043WMfZYn051sas1h9ddOcmnlGRjKAKcaIkwYeZ94UgPFxHDz+nKj8eyOYXTgb/kGad9unti0Gfu22lErMySTGYx2F7XVpwJwdW0DoDv35zKHmK1mlnGYdDJNATO5Kd/LepKd99Kq1bGzcbCls0qnUvOTNoJVDvxKgWTHvdwb9/BgwDUYN10V9Pi+xQ99sHVnHXYdaD33cm9vFdsC1YPvlyozmbv2cupdn6RjUwhzyxY8RqDWga7mu7S2WrFu82B1VGEP7WCvptJPE4GdDqJx4/wOdwtYCa+SUInEoDJw5l/c+/fv59///d/P+kh6vZ5cLoeqqqiqSl9fH06nE6PxzK771psduKoH37SFQo5MvEB6TP9z8bO0ZKiDfnMd/pHPdyvelvupwIoRSIc72VdQuN9+6oxNr9TiKX+CrR1B4lV1aMEeNKWJU1en6TBbTbB3+GeN3s4oObOJUKcKQE4roOd9+qNJqFDQm8wYWYKxyoXdqANX9aTP+8SJE2zduvWMfkfn6ujRo8DgzGyBQIDHHnsMt9uN1+stGuoSsQjvXu2mq7WG4QaFfDYP+XzRe9T/+te/RtMmH4PywQcfAINTywYCAfr6+ujr68PhcJzxcwMdFbUe1uzdwb7OTsJKHJNnyxRffgXS4TZae800+OuoMsZp73mE9Kj9matb2GkN0bq5g90/vgv1Gy1sqVNG9rnE7KGhIsSPWzcRsgbwmKehN9zswb8xzqZgG53WBhxE6c1V4z+Hu/Ho7HW0NCa5676XaNtUwKozUzX03AuFk2QyOXT6iQdCzntDl0YP5sMi72V7jmBnDse2U92i+qpGAkYNox0o9NPZcxhznTJqvIgJV20VwR+G6Oj1EKhKEwzGMde1nArXOjN20xIeGe4F6++kRytQFeokM9jsg6aDkwejqBkPZqMJo2kJZBRcDhNGTJPPPSLmvNIFBjVMJKtQXzl8Jpkl0q1S6XYyWWPyF7/4Rb74xS+e1WFSqRQ7duxg3bp1eL1e3G73yNnrzp07z2wnehNW86h3gbWOWk7raBxrzFlahnhcQ2ce+0GoMyuDrSsUSKpp0I+/RNOE3b4SHkmTzMSJqycwVhsn/zAtJFHTOqxeD9XK8FbV1NYBurFfU2dyibXBYKCjo6P4hudBURSuvvpqvF4vXq/3LGZo04iEU1ztrsc58oLJEouoGJTWomMaNm/ePOV6g8HAddddN/KaGa5r3759xUsbfRWC2UXt5zv5yUu7adPdzc6GKX7x6RCb26JYt/wG+0RZNhdH1ewodg8tXQ6irfeyeXcH4epteEY+8fUoDU3cnfwOWze1Y93WyCQNSGdBh1lxUe3JYc6l0YwO/AFz8bP+AkzUKW5yNdES/w4bn9jPweXD7ysjJtMSTvSHiWYcY8Z45NRe4uaqUUF5fsplBscpmMwmdMXey/E24gXjuN+JEevw5FfJOMkTYBr/vrcrWNlLMp4Gaz/x93XYT+umOkVTkxRMLjy11SPBo7p6sGVozK4XZMJbeGZ00GPMb8Ng86MCajhMqsyCzTK4Lq8GCWObNCycq2AwyKuvvkokEsHr9Z7ljUgmY8JVPfGEPBMbPHuacuCSDshkThu4pB/6ZtdRIFcYNbBywsMUKHCCTG6wVWXMf7PwDR0MBvH5fKRSKZqbm89uOtfh8QuuUfeQy0boihhwuSvPq65AIEB3dzeqquLz+c5ymtkMWiZDbmSIghFHbRUrWUaFp3pUU22BQm7o/8N/Ui2NdrIw1CRdIKNGiWegkBsKIIU04R516DvYhKPOg33ZBNM866zUNjXy+cwTbN7cM+YyxOHBaacWFU4fpFYYuwW5fto2hcjZrVitduxm05m99nMZMpnMBKM19CiNLXz3umWji6bCVcXyE/to29ROWE2TyWnEw+209RtRjIBu8HU8ODivQKbIwOC5JYcajXOST+GoshZ/LxcKFE5mKHZldyYzbgOdHt0SBidlyuXIUaAwxWdKoVDgZEYjN76GhdoKtMDNaGAoMxiwKBZSwVbClgBBl0Yw0E13V4CAWonfPb3ThGSzWZqbm6dvsON46RCtpw9pnoARs2kZJ9UQ4TGt4AXS/f2k0WFVzHAyjjrusv1MJgdrKrAbjZiMDI5mn+wwehMmPcR7ehnb2F4g3jvF40pkuOvhnKhhYvlKXKPmX8iGg0TK3HidZaB20506t137fD6cxQbUTCCXjBJqbaf33f0EWzsIxwc/rHVKLZ71LjzD11ZqKuGOdkKHAQ4SamunR82A4sK1Bg5u3cBt37qXzpwDl7KM9/duxtfeTw7IRNtp7exFVaP0BPsxNTTi0qfpDw1e1XAyGaanN0nGqFClLOP9l+7nO742evoTxMMddPa/Dyf6CXX0kszliIdDRDU4Ge8hFE2Ty6iEOqO8y0nioU5UDQYvtUuye+PdfOMbf8PXvnYTN63/a277Tiu9E/bqFEj399DWFuLw4RBt7T1D+xnNjKfJz02mU187uooGtmy8EX3yEe7buIG/ue07dCQV6uqGW97sVDlW8v4Tm/nOve2oufnzlVWIB+noO8Hy9XXUWin+XjZbMfEW4VD/2EacXJxoPAdmBesySKvj5nbIZcicXIZdsQ6NTTmJ2h+f9OoIo9UE70bp6R8XzjJRetX5FNjEmZjRLgnFHyOWTaFhwm0og5oIbi1F1uDGfQHmXjjv1oShpuUJP5Yy/bRt7kHf6AGSRc7SdFTUVvOpvt10+DZDYy0VpgLpaA9Roxc/QHUDXwttpDcUxdvkGGzuLaj0qjpuaqzGjJ5qTzk9W3toC7nY4jGjI0MyrgFG0loBrHZcVZ/iiUc6uLdVR0OtA7s+gxrqRLU3jMzwV+DUOLpSOp+/TyISI6t4qRy1i1h3hDJ3N86yLN1dKUzN51/j2dBbHXj8Djz+8WtMeJpGza9oUnDVKbjqxm9npGHnb6jVcuhNQ11PjkdxZMA4NI3jlkddQI6MlsPc0EL10IuzwuOnYtyBXS2/wTVmiY0t4w/q8vOga/TjzHhauvCM3ianw1TRyI4tCvpchlwuRyaTQUtH6ensx9E4vrVNh7mimsaKahon+kWNPF0HjVtGh3k91uoWuqoHL/HTGU3jWk/0VPgf5JG6HHrjFF1zs9KpFqWxizPEe4O0tYcp3LiRLf6h9z5F3ssmO7U3BvnJ3s349I00VFvRZeKEe9I4/HWDA2TrriPaHqInWUXt0NgqLdpLutyL3zE4sNrz+U7uC7cTdAWos+shl0RNnwTSaDlQKlx8ftl97G3djLHBS7VignSUYE8BT5OeoUt5xAKxuLm5uXkmD3hxmQFD2amccrHegL5EIyleeeUV3nvvPT7zmc+MW1Mg3ftv/Dz4NL9//yT5N/uJvvgi0RejPBcO0/PYv9G+tZvXCn+J99sKx58O0vn0Yd4/nuOSq9ayWv8mPR2d9L31Af9zXIf12msxma/lL81Hefm5Z9j75BN073mZnFLHD76+evCDb7GJ6x1WMk930Nn/Pyz+nzfpfew5dLX/h+9XXA7A5dbrWX28n56HHuDfw1H6+99Bb8zxxnEdl6PjqrVW1lZcj/moSu/eJ3my+2F274mj++r3aVhn4mOtn8f+rZPnUu+TOXoc/eVXsdp8ORPdKPb48eMkk0k8Hs8Ea0styzMtftTKFjZ96VSr1MW5CM8kP8kntSiaswG3ZfpfWG+99RYvvfQS119//bTve9BiLtFfMupvsphLLhn/F9JxiV7PaYsviDQh373EXY183apHBrxQ5wAADexJREFUf7kR4xUmrjKvZu3aK8hpOq61X3HOe1+s003w+lvMJXo9ugmf32IuueSSCV+zw/bt28ctt9wyTd2PZ2737t1cd911LFu2bOyKXJxwsJ1f9ab44P99wBsvq6gv9vL0c8/x3NPPceC4CVedn8ba67li1BO7Qpn8vbyYS1hdoaBLvkjvM0/yRHc3vUkj1T+o4y8vH9yJfq2D6y9R6ex4mvTij8kc6KHnzev5gf/rrNYBXIL1eiuF+NPsfuBXhKMv0p+8HCNxNC5nsc6IVXFQZdcRfy7MM3uf4JGHHyOaseNt/Dvsl+RIhv+NYPg13n33KBmdHtNVVq64ZOLfz2uvvcY111yDzWabeAMx6y0aGBgYKHURpbJz504SiQS33HLLDB61QEbLoTMaJx9XUMig5fSYJrs5QCGDltNhGpyQfuIbTxUG+zeNpnM7E0un0/z2t79l9+7d5/DoCy+f1cgbTJz2lZDVyJaZMFyg2UL37dvHtm3buPvuuy/MAWadOG0136VXfxPe2irsZhNGXQEtHadfBYe3etLJo0rlvvvu4xe/+MUFvsXx6b72ta9xxx13sGLFiundcbH3ci6DVpji84ICOS0HU3zmFHIZcugx6nWT3ngql9Eo6Eyc6+SyXV1d3Hzzzec1t4koLbn51IzTYTQVuZxTNzhe4YzWT3aXSp0RU4nv3nghlRlMEw+QnShEiPNgpyFwH6ZgiN5gGyGMmK1WlCoP3gbzHOsWmKOKvZf1xiKzberQF/nM0emNnPpImfivqjee/wfK7t27ef7550dua11eXn7Gl7eL0pPAIISYks5UQa2/gtrimwoxperqalatWkUikSAUCpFIJFi8eDHl5eXYbLaRICEhYnaSwCCEEGJGLFu2jC984Qt84QtfGFn23//93xw8eHAkRBw8eJAlS5aMCRA2m600ISKvkcqbsEizJSCBQQghRAmtWLGCFStWsG7dupFlo0PEww8/TCKRYMmSJae1RCxfvnzCfarq4OyY53VJfV4lUFNPMKvQ2rUd1zzu4j1TEhiEEELMKlOFiIMHD/Lwww9z8OBBPvGJT4xphbDZbCxfvhxFUbBYLGzcuJHvf//7Z19ANkbAF8TQGkE1RPD7fOQDAaZ5qqA5RwKDmBfeeOMNrrnmGi66aIbv2C6EmBEThYh3332XRCLBwYMH6erqIpFI8IlPfILy8nLKysrw+Xx0d3fzj//4j2c3GVvegCuwHZsBwEVr0EZCy8O0z0U8t0hgEPNCfX09e/bsYenSpaUuRQgxQ1auXMnKlStPCxEvvfQS7e3tAEQiESKRCE6n88yDg8nGmNkiRt3GYCGTwCCEEGLeWLlyJaFQiGuuuWaka0JRlJGJvLLZ7BlN6pWNbWd7BAxlebJaGZW+epzSJSGEEELMD+d0M7tx8jE/7oBCV1cNJiDVVUNNq0IscH43tpvrJDAIIYSYN6Zjhk812EXK4BoasZAnmzVRUzPcSaERCQaJaZBNJdAMbpqb3VgWwPAGCQxCCCHEKJZKG9k712Pqvo5Kp4v65lZ8tsFEkNrup9sSIOA1AFnCXgWnz0Biu3PeD4mUIeVCCCHEKCZvN2rf/6XZbSEbCfCNyhq6hu4zns2mSCSGbzpuwOlS0CJhEiWrduZIYBBCCCEA0Ii0evF1a1icXvzbu1FT3WwoS6AOZQTFHyHsG+6eyBMLq5icLhbCPTglMAghhBAA+QiBQIzs6M6FrEbW5sU7waSRWlc9zVk/4cD8744AGcMghBBCDCpz4fOrxLIxwpEy8qkEkVie+i7/aS0IqW4/gWw93d2VlGWz5MsM8z40SGAQQgghADDg9LXizGqktDyGShdu7/ht8iS6muku89LsNUE2QXB7DLffW+Q243OfBAYhhBBiNMPkd6hMbHdR+e3nOM59/Gho2coNfdTPWHGlI4FBCCGEOEO2+gjZhZAOJiCDHoUQQghR1IJvYThw4ACLFi0qdRmzzrFjx/jwww9LXcas9Pbbb7Nnz55SlyEmcezYsZIc98SJEzz77LNcdtllJTn+bJdMJktdgjhPiwYGBgZKXUQp7dixo9QlzFo33XQTZrO51GWckfXr18/Y3Sq7u7s5evToBT/OTCgUCvznf/4nX/rSl0pdyrRZs2YNVVVVM37cZDJJX1/fjB93rli2bBm1tbWlLkOchwUfGMT8MJOBYT45fvw4t99+O0899VSpSxHTrLm5mebm5lKXIeYRGcMghBDzkKqqRCKRUpch5hEJDEIIMQ89/vjjdHd3l7oMMY9IYBBCiHlmOCg8/vjjJa5EzCcSGIQQYp4JBoMApFIp6ZYQ00YCgxBCzCOpVIrHH3+cSy+9FICf//znJa5IzBcSGIQQYh4JBoO8+eabbNiwgWeeeYZ169ZJK4OYFgt+4iYhhJhPRl9Keemll+Lz+UpXjJhXJDAIIaagEd7eRSKVAlczPiVBMBgjS55UQkPxteK1pOgOhknl8yTUBIpvO/XKfL/RrxALjwQGIcSk8uEgiUo/9QY3Bq8btd5Hs8+HpQy0oBNLjZdEjYsanw93GeS73Zi8AZyqH9vwPhLdbO9OkEcjkchTWd9KfeUktwIUQsxaEhiEEJPIE1HLcPohFU4BlXh9bixDjQf5bB40qKz3ooxals9q5Ed2EcHfrFHfNRQgEgEqnW6IRai3zPDTEUKcFxn0KISYRBkuvw8FjUg4gaWmHudIT0MeNZLA4KrHZRi9TKVMcY60LpDX0BIqqeEEYXPiNMQIq3mEEHOLBAYhxNTyMcKqgUqXbdTCGOFYHsWlUDZqu65wHmeN89QyQw1d6nZcwwtSESLZSlwyxkGIOUcCgxBiarEwsbwy9ks+ESOSVXCNGouQjwXpzjvxugyQ6qY7MW4/eZXW+m4qg13SHSHEHCSBQQgxpUQsRlZxMXqcohYJk7I4cVpOLVO7I+SdXlyGPLGuBGWj1pGNsd3fhW17mICrjGx2hooXQkwbCQxCiClkUSMJbG4XllFL1ZiKyelGGbXM4qzEQopwMECssv5UN4QWIdCqYvP7cRnyaOFWgurMPQMhxPSQqySEEFMw4O7ScBvGXgbpCqRQxy0zubtQnRpamQn3cFjIhql3uvnlG/8L9317aOEN/OKPF75yIcT0ksAghJhSmWGCORMMBiacScFgwjTmZxfbE3m2X5jShBAzSLokhFiAUqkUqjq2XyASiZCVwQVCiElIC4MQC5DFYsHtdvPRRx9x+PBh1q9fz7p163A6naUuTQgxS0lgEGKBam5u5nOf+xwAf/jDH3jsscdKXJEQYjaTLgkhFihFUbjuuusA2LBhA4aJxioIIcQQCQxCLGBerxcAt9td2kKEELOeBAYhFrDhoCCBQQhRjAQGIRYwi8XChg0bSl2GEGIOkMAgxAKWzWb527/9Wz744INSlyKEmOXkKgkhFphjx47R2dnJiy++yHvvvce1115LW1sbBoOBm2++mTvuuAOdTlfqMoUQs4wEBiEWkFAoxAMPPEBDQwMbNmzgsssuG1n3xz/+kRdffJHbb7+dH/zgBzgcjhJWKoSYbSQwCLFAPPnkk/T29vLkk09OuP7KK6/ktttu47bbbuOWW27hn//5nykvL5/hKoUQs5WMYRBiAfjNb36Dqqpks1kOHz485bZ9fX1ce+21PPDAA7zyyiszVKEQYraTFgYh5rmPP/6Yf/qnf+LFF18EoLa2lp/85CesWbPmtG37+vp46qmn+OlPf8o777zD9773PR599NGZLlkIMQtJC4MQc9xwy8Fkdu/eTV1d3cjPnZ2d/PjHPz6tpWF0WAC46qqr+NznPkdfX9+FKVwIMadIYBBijhu+kdSuXbsmDA4HDhygsrJyzLLxoWF8WBh244038vLLL1+44oUQc4Z0SYh54eTJk6iqSllZWalLKYm1a9eOTPPs9XpxOp2sW7cOi8XCq6++yo9+9KPTHtPZ2UltbS1f/epXee21104LCwCf/OQnOXDggIxlmIPkVuViuklgEPPCHXfcwa5du7joooXZaHbo0KGRfweDQfr6+nA6nWzYsAGj0cill1464ePuvvtuGhoa6OnpmXC9Xq9nYGCAHTt2XJC6xYVz1VVXYbPZSl2GmEckMIh54Z577il1CSXl9Xq5/PLL8Xq9eL1eFEUZWbdr1y6OHj2KyWQa85jhbojXX3990oGQx44d46qrrpqw9UEIsbAszNMxIeYRVVWxWCxks1kCgcCYsADw6U9/+rRxCOPHLEw2EPJ3v/sda9euvbBPQAgxJ0hgEGKOUxSF5ubmSdffdNNNvPrqqyM/TzbAcaLQ8Prrr/OlL31p2msWQsw9EhiEmOduuOEG8vk8qqpOGhaGjQ4Nv/3tb1m7di2rVq2a4YqFELPRooGBgYFSFyGEuPBcLhdr1qxh69atRbddv349f/Znf8a//uu/zkBlQoi5QFoYhFgg9uzZw5tvvkkoFJpyu1/96lesXr1awoIQYgxpYRBigdmyZQtvvvkm5eXlXHnllaxcuZI//vGPHDp0iCNHjnDDDTdw1113lbpMIcQsI4FBiAXo6NGjRKNRkskkAwMDLF68mPLycpxOJ0uXLi11eUKIWUgCgxBCCCGKkjEMQgghhChKAoMQQgghipLAIIQQQoiiJDAIIYQQoigJDEIIIYQoSgKDEEIIIYqSwCCEEEKIoiQwCCGEEKIoCQxCCCGEKEoCgxBCCCGKksAghBBCiKIkMAghhBCiKAkMQgghhChKAoMQQgghipLAIIQQQoii/j+UjQged7sobgAAAABJRU5ErkJggg==)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "EWW80iPjh4DJ" + }, + "outputs": [], + "source": [ + "fft_size=1024\n", + "model_audio = simpleseparator(fft_size=fft_size, hidden_size=300)\n", + "\n", + "\n", + "optimizer = lambda x: torch.optim.Adam(x, lr=0.0005)\n", + "N_epochs = 100\n", + "epoch_counter = sb.utils.epoch_loop.EpochCounter(limit=N_epochs)\n", + "\n", + "separator = SeparationBrain(\n", + " train_loss='si-snr',\n", + " modules={'mdl': model_audio},\n", + " opt_class=optimizer\n", + "\n", + " )\n", + "\n", + "\n", + "separator.fit(\n", + " epoch_counter,\n", + " train_loader_audio,\n", + " valid_loader_audio)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9C2EQ5M20QsJ" + }, + "outputs": [], + "source": [ + "class audioseparator(nn.Module):\n", + " def __init__(self, fft_size, hidden_size, num_sources=2, kernel_size=16):\n", + " super(audioseparator, self).__init__()\n", + " self.encoder = nn.Conv1d(in_channels=1, out_channels=fft_size, kernel_size=16, stride=kernel_size//2)\n", + "\n", + " # MaskNet\n", + " self.rnn = nn.LSTM(input_size=fft_size, hidden_size=hidden_size, batch_first=True, bidirectional=True)\n", + " self.output_layer = nn.Linear(in_features=hidden_size*2, out_features=num_sources*(fft_size))\n", + "\n", + " self.decoder = nn.ConvTranspose1d(in_channels=fft_size, out_channels=1, kernel_size=kernel_size, stride=kernel_size//2)\n", + "\n", + " self.fft_size = fft_size\n", + " self.hidden_size = hidden_size\n", + " self.num_sources = num_sources\n", + "\n", + " def forward(self, inp):\n", + " # batch x channels x time\n", + " y = nn.functional.relu(self.encoder(inp.unsqueeze(0)))\n", + "\n", + " # batch x time x nfft\n", + " y = y.permute(0, 2, 1)\n", + "\n", + " # batch x time x feature\n", + " rnn_out = self.rnn(y)[0]\n", + "\n", + " # batch x time x (nfft*num_sources)\n", + " lin_out = self.output_layer(rnn_out)\n", + "\n", + " # batch x time x nfft x num_sources\n", + " lin_out = lin_out.reshape(lin_out.size(0), lin_out.size(1), -1, self.num_sources)\n", + "\n", + " # reconstruct in time domain\n", + " sources = []\n", + " all_masks = []\n", + " for n in range(self.num_sources):\n", + " sourcehat_mask = nn.functional.relu(lin_out[:, :, :, n])\n", + " all_masks.append(sourcehat_mask)\n", + "\n", + " # multiply with mask and magnitude\n", + " T = sourcehat_mask.size(1)\n", + " sourcehat_latent = (sourcehat_mask * y[:, :T, :]).permute(0, 2, 1)\n", + "\n", + " # reconstruct in time domain with istft\n", + " sourcehat = self.decoder(sourcehat_latent).squeeze(0)\n", + " sources.append(sourcehat)\n", + "\n", + " return sources, all_masks, y\n", + "\n", + "model_audio = audioseparator(fft_size=fft_size, hidden_size=300, kernel_size=256)\n", + "out, _, _ = model_audio.forward(mixture_0.unsqueeze(0))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "s5dkjj0P5wLu" + }, + "outputs": [], + "source": [ + "optimizer = lambda x: torch.optim.Adam(x, lr=0.0002)\n", + "N_epochs = 200\n", + "epoch_counter = sb.utils.epoch_loop.EpochCounter(limit=N_epochs)\n", + "\n", + "separator = SeparationBrain(\n", + " train_loss='si-snr',\n", + " modules={'mdl': model_audio},\n", + " opt_class=optimizer\n", + "\n", + " )\n", + "\n", + "separator.fit(\n", + " epoch_counter,\n", + " train_loader_audio,\n", + " valid_loader_audio)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5JUqqmL_KFGs" + }, + "outputs": [], + "source": [ + "estimated_sources_test, all_masks, mag = model_audio.forward(mixture_3.unsqueeze(0))\n", + "estimated_sources_train, all_masks, mag = model_audio.forward(mixture_0.unsqueeze(0))\n", + "\n", + "\n", + "Audio(estimated_sources_test[0].squeeze().detach(), rate=16000)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2trZ6WX_kAsT" + }, + "outputs": [], + "source": [ + "Audio(estimated_sources_test[1].squeeze().detach(), rate=16000)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nzvsrhsEm4f1" + }, + "outputs": [], + "source": [ + "Audio(estimated_sources_train[0].squeeze().detach(), rate=16000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "aAZOTrYDncYm" + }, + "outputs": [], + "source": [ + "Audio(estimated_sources_train[1].squeeze().detach(), rate=16000)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yUCKuExTnz4Z" + }, + "source": [ + "It does not work that great because of the introduced artifacts, but we can hear that it supresses the interferences." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [ + { + "file_id": "1lcPtycPQOTVanhxj6GiOBnWuWtzXTGJw", + "timestamp": 1613147076800 + }, + { + "file_id": "18rvXsEWzSeGVXcrXB_AVIMnMzpUo0irv", + "timestamp": 1613140070220 + } + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/tasks/speech-classification-from-scratch.ipynb b/docs/tutorials/tasks/speech-classification-from-scratch.ipynb new file mode 100644 index 0000000000..2c4d7995cf --- /dev/null +++ b/docs/tutorials/tasks/speech-classification-from-scratch.ipynb @@ -0,0 +1,5394 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/tasks/speech-classification-from-scratch.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/tasks/speech-classification-from-scratch.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uo0JP7a5uFp7" + }, + "source": [ + "# Speech Classification From Scratch\n", + "\n", + "Do you want to figure out how to implement a **classification** system with SpeechBrain? Look no further, you're in the right place. This tutorial will walk you through all the steps needed to implement an **utterance-level classifier** in SpeechBrain. \n", + "The tutorial will initially focus on **speaker identification** and will describe, along the way, how to extend it to many other classification tasks such as **language-id**, **emotion recognition**, **sound classification**, **keyword spotting**, and many others.\n", + "\n", + "\n", + "## Models\n", + "Many neural models can be used to approach this kind of task. In this tutorial, we will focus on a **TDNN** classifier (*xvector*) and a very recent model called [**ECAPA-TDNN**](https://arxiv.org/abs/2005.07143) that showed impressive performance in speaker verification and diarization.\n", + "\n", + "## Data\n", + "Training will be done with a small open-source dataset called [mini-librispeech](https://www.openslr.org/31/), which only contains few hours of training data. In a real case, you need a much larger dataset.\n", + "For some examples on a real task, please [take a look into our Voxceleb recipes](https://github.com/speechbrain/speechbrain/tree/develop/recipes/VoxCeleb/SpeakerRec).\n", + "\n", + "## Code\n", + "In this tutorial, we will refer to the code in [```speechbrain/templates/speaker_id```](https://github.com/speechbrain/speechbrain/tree/develop/templates/speaker_id).\n", + "\n", + "## Installation\n", + "Before starting, let's install speechbrain:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "beagAGw5t5bK" + }, + "outputs": [], + "source": [ + "%%capture\n", + "\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "\n", + "# Clone SpeechBrain repository (development branch)\n", + "!git clone https://github.com/speechbrain/speechbrain/\n", + "%cd /content/speechbrain/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eWpl9xgAIXKE" + }, + "source": [ + "## Which steps are needed?\n", + "Training an utterance-level classifier is relatively easy in SpeechBrain. The steps to follows are:\n", + "\n", + "1. **Prepare your data**.\n", + "The goal of this step is to create the data manifest files (in CSV or JSON format). The data manifest files tell SpeechBrain where to find the speech data and their corresponding utterance-level classification (e.g., speaker id). In this tutorial, the data manifest files are created by [mini_librispeech_prepare.py](https://github.com/speechbrain/speechbrain/blob/develop/templates/speaker_id/mini_librispeech_prepare.py). \n", + "\n", + "2. **Train the classifier**.\n", + "At this point, we are ready to train our classifier.\n", + "To train a speaker-id classifier based on TDNN + statistical pooling (xvectors), run the following command:\n", + "```\n", + "cd speechbrain/templates/speaker_id/\n", + "python train.py train.yaml\n", + "```\n", + "Later, we will describe how to plug another model called Emphasized Channel Attention, Propagation, and Aggregation model (ECAPA) that turned out to provide impressive performance in speaker recognition tasks.\n", + "\n", + "3. **Use the classifier (inference)**:\n", + "After training, we can use the classifier for inference. A class called `EncoderClassifier` is designed to make inference easier. We also designed a class called `SpeakerRecognition` to make inference on a speaker verification task easier.\n", + "\n", + "\n", + "\n", + "We will now provide a detailed description of all these steps." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rDgNu_b8k6qD" + }, + "source": [ + "## Step 1: Prepare your data\n", + "The goal of data preparation is to create the data manifest files.\n", + "These files tell SpeechBrain where to find the audio data and their corresponding utterance-level classification. They are text files written in the popular CSV and JSON formats.\n", + "\n", + "### Data manifest files\n", + "Let's take a look into how a data manifest file in JSON format looks like:\n", + "\n", + "\n", + "```json\n", + "{\n", + " \"163-122947-0045\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/163/122947/163-122947-0045.flac\",\n", + " \"length\": 14.335,\n", + " \"spk_id\": \"163\"\n", + " },\n", + " \"7312-92432-0025\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/7312/92432/7312-92432-0025.flac\",\n", + " \"length\": 12.01,\n", + " \"spk_id\": \"7312\"\n", + " },\n", + " \"7859-102519-0036\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/7859/102519/7859-102519-0036.flac\",\n", + " \"length\": 11.965,\n", + " \"spk_id\": \"7859\"\n", + " },\n", + "}\n", + "```\n", + "As you can see, we have a hierarchical structure in which the first key is a **unique identifier** of the spoken sentence.\n", + "Then, we specify all the fields that are needed for the task addressed. For instance, we report the **path of the speech recording**, its **length** in seconds (needed if we wanna sort the sentences before creating the mini-batches), and the **speaker identity** of the speaker in the given recording.\n", + "\n", + "\n", + "Actually, you can specify here all entries that you need (language-id, emotion annotation, etc). However, there must be a matching between the name of these entries and what the experiment script (e.g, train.py) expects. We will elaborate more on this later.\n", + "\n", + "You might have noticed that we define a special variable called `data_root`. This allows users to dynamically change the data folder from the command line (or from the yaml hyperparameter file).\n", + "\n", + "\n", + "### Preparation Script\n", + "**Every dataset is formatted in a different way**. The script that parses your own dataset and creates the JSON or the CSV files is something that you are supposed to write. Most of the time, this is very straightforward.\n", + "\n", + "For the mini-librispeech dataset, for instance, we wrote this simple data preparation script called [mini_librispeech_prepare.py](https://github.com/speechbrain/speechbrain/blob/develop/templates/speaker_id/mini_librispeech_prepare.py).\n", + "\n", + "This function automatically downloads the data (that in this case are publicly available). We search for all the audio files and while reading them we create the JSON file with the speaker-id annotation.\n", + "\n", + "You can use this script as a good base for your **custom preparation** on your target dataset. As you can see, we create three separate data manifest files to manage training, validation, and test phases.\n", + "\n", + "\n", + "### Copy your data locally\n", + "When using speechbrain (or any other toolkit) within an HPC cluster, a good practice is to compress your dataset in a single file and **copy (and uncompress) the data in the local folder of the computing node**. This would make the code much **much faster** because the data aren't fetched from the shared filesystem but from the local one. Moreover, you don't harm the performance of the shared filesystem with tons of reading operations. We **strongly suggest users follow this approach** (not possible here in Google Colab)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nkYENC7BJ4K9" + }, + "source": [ + "## Step 2: Train the classifier\n", + "We show now how we can train an **utterance-level classifier** with SpeechBrain.\n", + "The proposed recipe performs a feature computation/normalization, processes the features with an encoder, and applies a classifier on top of that. Data augmentation is also employed to improve system performance.\n", + "\n", + "### Train a speaker-id model\n", + "\n", + "We are going to train the TDNN-based model used for x-vectors. Statistical pooling is used on the top of the convolutional layers to convert a variable-length sentence into a **fixed-length embeddings**.\n", + "\n", + "On the top of the embeddings, a simple fully-connected classifier is employed to predict which of the N speakers is active in the given sentence.\n", + "\n", + "\n", + "To train this model, run the following code:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "AtMw7x0ybFlI", + "outputId": "7e016a65-0dd8-4607-b6cd-42c7f426ed07" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content/speechbrain/templates/speaker_id\n", + "/usr/local/lib/python3.11/dist-packages/speechbrain/utils/autocast.py:188: FutureWarning: `torch.cuda.amp.custom_fwd(args...)` is deprecated. Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.\n", + " wrapped_fwd = torch.cuda.amp.custom_fwd(fwd, cast_inputs=cast_inputs)\n", + "speechbrain.utils.quirks - Applied quirks (see `speechbrain.utils.quirks`): [allow_tf32, disable_jit_profiling]\n", + "speechbrain.utils.quirks - Excluded quirks specified by the `SB_DISABLE_QUIRKS` environment (comma-separated list): []\n", + "speechbrain.core - Beginning experiment!\n", + "speechbrain.core - Experiment folder: ./results/speaker_id/1986\n", + "Downloading http://www.openslr.org/resources/31/train-clean-5.tar.gz to ./data/train-clean-5.tar.gz\n", + "train-clean-5.tar.gz: 333MB [00:18, 18.2MB/s] \n", + "mini_librispeech_prepare - Creating train.json, valid.json, and test.json\n", + "mini_librispeech_prepare - train.json successfully created!\n", + "mini_librispeech_prepare - valid.json successfully created!\n", + "mini_librispeech_prepare - test.json successfully created!\n", + "Downloading https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 to ./data/noise/data.zip\n", + "noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1: 569MB [00:42, 13.5MB/s] \n", + "Extracting ./data/noise/data.zip to ./data/noise\n", + "speechbrain.dataio.encoder - Load called, but CategoricalEncoder is not empty. Loaded data will overwrite everything. This is normal if there is e.g. an unk label defined at init.\n", + "speechbrain.core - Info: ckpt_interval_minutes arg from hparam file is used\n", + "speechbrain.core - Gradscaler enabled: `False`\n", + "speechbrain.core - Using training precision: `--precision=fp32`\n", + "speechbrain.core - Using evaluation precision: `--eval_precision=fp32`\n", + "speechbrain.core - SpkIdBrain Model Statistics:\n", + "* Total Number of Trainable Parameters: 4.5M\n", + "* Total Number of Parameters: 4.5M\n", + "* Trainable Parameters represent 100.0000% of the total size.\n", + "/usr/local/lib/python3.11/dist-packages/torch/utils/data/dataloader.py:624: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n", + " warnings.warn(\n", + "speechbrain.utils.checkpoints - Would load a checkpoint here, but none found yet.\n", + "speechbrain.utils.epoch_loop - Going into epoch 1\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:13<00:00, 1.04it/s, train_loss=1.73]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 5.62it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.001 to 0.00094\n", + "speechbrain.utils.train_logger - Epoch: 1, lr: 1.00e-03 - train loss: 1.73 - valid loss: 1.01, valid error: 3.11e-01\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-52-54+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 2\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:11<00:00, 1.07it/s, train_loss=0.819]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.39it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00094 to 0.00087\n", + "speechbrain.utils.train_logger - Epoch: 2, lr: 9.36e-04 - train loss: 8.19e-01 - valid loss: 2.71e-01, valid error: 8.61e-02\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-54-06+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-52-54+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 3\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:14<00:00, 1.02it/s, train_loss=0.64]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.59it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00087 to 0.00081\n", + "speechbrain.utils.train_logger - Epoch: 3, lr: 8.71e-04 - train loss: 6.40e-01 - valid loss: 2.64e-01, valid error: 7.28e-02\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-55-22+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-54-06+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 4\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:13<00:00, 1.04it/s, train_loss=0.468]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.27it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00081 to 0.00074\n", + "speechbrain.utils.train_logger - Epoch: 4, lr: 8.07e-04 - train loss: 4.68e-01 - valid loss: 1.03e-01, valid error: 3.31e-02\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-56-37+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-55-22+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 5\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:15<00:00, 1.00it/s, train_loss=0.414]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.15it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00074 to 0.00068\n", + "speechbrain.utils.train_logger - Epoch: 5, lr: 7.43e-04 - train loss: 4.14e-01 - valid loss: 4.59e-02, valid error: 6.62e-03\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-57-54+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-56-37+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 6\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:12<00:00, 1.05it/s, train_loss=0.34]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 6.66it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00068 to 0.00061\n", + "speechbrain.utils.train_logger - Epoch: 6, lr: 6.79e-04 - train loss: 3.40e-01 - valid loss: 1.59e-01, valid error: 5.96e-02\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-59-08+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 7\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:14<00:00, 1.02it/s, train_loss=0.304]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.06it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00061 to 0.00055\n", + "speechbrain.utils.train_logger - Epoch: 7, lr: 6.14e-04 - train loss: 3.04e-01 - valid loss: 5.94e-02, valid error: 0.00e+00\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-00-24+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-59-08+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+14-57-54+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 8\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:12<00:00, 1.05it/s, train_loss=0.262]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 5.63it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00055 to 0.00049\n", + "speechbrain.utils.train_logger - Epoch: 8, lr: 5.50e-04 - train loss: 2.62e-01 - valid loss: 3.93e-02, valid error: 6.62e-03\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-01-38+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 9\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:13<00:00, 1.03it/s, train_loss=0.24]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 5.04it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00049 to 0.00042\n", + "speechbrain.utils.train_logger - Epoch: 9, lr: 4.86e-04 - train loss: 2.40e-01 - valid loss: 6.98e-02, valid error: 1.32e-02\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-02-54+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-01-38+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 10\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:12<00:00, 1.05it/s, train_loss=0.182]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.09it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00042 to 0.00036\n", + "speechbrain.utils.train_logger - Epoch: 10, lr: 4.21e-04 - train loss: 1.82e-01 - valid loss: 2.60e-02, valid error: 6.62e-03\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-04-08+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-02-54+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 11\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:15<00:00, 1.01it/s, train_loss=0.17]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:02<00:00, 4.70it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00036 to 0.00029\n", + "speechbrain.utils.train_logger - Epoch: 11, lr: 3.57e-04 - train loss: 1.70e-01 - valid loss: 1.19e-02, valid error: 0.00e+00\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-05-26+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-04-08+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-00-24+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 12\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:11<00:00, 1.06it/s, train_loss=0.138]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.18it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00029 to 0.00023\n", + "speechbrain.utils.train_logger - Epoch: 12, lr: 2.93e-04 - train loss: 1.38e-01 - valid loss: 1.29e-02, valid error: 0.00e+00\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-06-39+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-05-26+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 13\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:12<00:00, 1.05it/s, train_loss=0.131]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.18it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00023 to 0.00016\n", + "speechbrain.utils.train_logger - Epoch: 13, lr: 2.29e-04 - train loss: 1.31e-01 - valid loss: 7.04e-03, valid error: 0.00e+00\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-07-53+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-06-39+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 14\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:16<00:00, 1.00s/it, train_loss=0.106]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.57it/s]\n", + "speechbrain.nnet.schedulers - Changing lr from 0.00016 to 0.0001\n", + "speechbrain.utils.train_logger - Epoch: 14, lr: 1.64e-04 - train loss: 1.06e-01 - valid loss: 4.53e-03, valid error: 0.00e+00\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-09-11+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-07-53+00\n", + "speechbrain.utils.epoch_loop - Going into epoch 15\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 76/76 [01:12<00:00, 1.05it/s, train_loss=0.0986]\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 6.86it/s]\n", + "speechbrain.utils.train_logger - Epoch: 15, lr: 1.00e-04 - train loss: 9.86e-02 - valid loss: 3.67e-03, valid error: 0.00e+00\n", + "speechbrain.utils.checkpoints - Saved an end-of-epoch checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-10-24+00\n", + "speechbrain.utils.checkpoints - Deleted checkpoint in results/speaker_id/1986/save/CKPT+2025-05-22+15-09-11+00\n", + "speechbrain.utils.checkpoints - Loading a checkpoint from results/speaker_id/1986/save/CKPT+2025-05-22+15-10-24+00\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "speechbrain.dataio.encoder - CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n", + "100% 10/10 [00:01<00:00, 7.00it/s]\n", + "speechbrain.utils.train_logger - Epoch loaded: 15 - test loss: 7.36e-03, test error: 0.00e+00\n" + ] + } + ], + "source": [ + "%cd /content/speechbrain/templates/speaker_id\n", + "!python train.py train.yaml --number_of_epochs=15 #--device='cpu'" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b3tnXnrWc2My" + }, + "source": [ + "As you can see from the prints, both the validation and training **losses are decreasing** very fast in the first epochs. Then, we basically see some minor improvements and performance oscillations.\n", + "\n", + "At the end of the training, the **validation error should go to zero** (or very close to zero).\n", + "\n", + "The task proposed in this tutorial is very easy because we only have to classify the 28 speakers of the mini-librispeech dataset. Take this tutorial just as an example that explains how to set up all the needed components to develop a speech classifier. [Please, refer to our voxceleb recipes if you would like to see an example on a popular speaker recognition dataset](https://github.com/speechbrain/speechbrain/tree/develop/recipes/VoxCeleb)\n", + "\n", + "\n", + "\n", + "Before diving into the code, let's see which files/folders are generated in the specified `output_folder`:\n", + "\n", + "* `train_log.txt`: contains the statistics (e.g, train_loss, valid_loss) computed at each epoch.\n", + "* `log.txt`: is a more detailed logger containing the timestamps for each basic operation.\n", + "* `env.log`: shows all the dependencies used with their corresponding version (useful for replicability).\n", + "\n", + "* `train.py`, `hyperparams.yaml`: are a copy of the experiment file along with the corresponding hyperparameters (for replicability).\n", + "\n", + "* `save`: is the place where we store the learned model.\n", + "\n", + "In the `save` folder, you find subfolders containing the checkpoints saved during training (in the format `CKPT+data+time`). Typically, you find here two checkpoints: the **best** (i.e, the oldest one) and the **latest** (i.e, the most recent one). If you find only a single checkpoint it means that the last epoch is also the best.\n", + "\n", + "Inside each checkpoint, we store all the information needed to **resume training** (e.g, models, optimizers, schedulers, epoch counter, etc.). The parameters of the embedding models are reported in `embedding_model.ckpt` file,\n", + "while the ones of the classifier are in `classifier.ckpt`. This is just a binary format readable with `torch.load`.\n", + "\n", + "The save folder contains the **label encoder** (`label_encoder.txt`) as well, which maps each speaker-id entry to their corresponding indexes.\n", + "\n", + "```\n", + "'163' => 0\n", + "'7312' => 1\n", + "'7859' => 2\n", + "'19' => 3\n", + "'1737' => 4\n", + "'6272' => 5\n", + "'1970' => 6\n", + "'2416' => 7\n", + "'118' => 8\n", + "'6848' => 9\n", + "'4680' => 10\n", + "'460' => 11\n", + "'3664' => 12\n", + "'3242' => 13\n", + "'1898' => 14\n", + "'7367' => 15\n", + "'1088' => 16\n", + "'3947' => 17\n", + "'3526' => 18\n", + "'1867' => 19\n", + "'8629' => 20\n", + "'332' => 21\n", + "'4640' => 22\n", + "'2136' => 23\n", + "'669' => 24\n", + "'5789' => 25\n", + "'32' => 26\n", + "'226' => 27\n", + "================\n", + "'starting_index' => 0\n", + "```\n", + "\n", + "\n", + "As usual, we implement the system with an experiment file `train.py` and a hyperparameter file called `train.yaml`.\n", + "\n", + "### Hyperparameters\n", + "The yaml file contains all the modules and hyperparameters need to implement the desired classifier.\n", + "[You can take a look into the full train.yaml file here](https://github.com/speechbrain/speechbrain/blob/develop/templates/speaker_id/train.yaml).\n", + "\n", + "In the first part, we specify some basic settings, such as the seed and the path of the output folder:\n", + "\n", + "```yaml\n", + "# Seed needs to be set at top of yaml, before objects with parameters are made\n", + "seed: 1986\n", + "__set_seed: !!python/object/apply:torch.manual_seed [!ref ]\n", + "\n", + "# If you plan to train a system on an HPC cluster with a big dataset,\n", + "# we strongly suggest doing the following:\n", + "# 1- Compress the dataset in a single tar or zip file.\n", + "# 2- Copy your dataset locally (i.e., the local disk of the computing node).\n", + "# 3- Uncompress the dataset in the local folder.\n", + "# 4- Set data_folder with the local path.\n", + "# Reading data from the local disk of the compute node (e.g. $SLURM_TMPDIR with SLURM-based clusters) is very important.\n", + "# It allows you to read the data much faster without slowing down the shared filesystem.\n", + "data_folder: ./data\n", + "output_folder: !ref ./results/speaker_id/\n", + "save_folder: !ref /save\n", + "train_log: !ref /train_log.txt\n", + "```\n", + "\n", + "We then specify the path of the data manifest files for training, validation, and test:\n", + "\n", + "```yaml\n", + "# Path where data manifest files will be stored\n", + "# The data manifest files are created by the data preparation script.\n", + "train_annotation: train.json\n", + "valid_annotation: valid.json\n", + "test_annotation: test.json\n", + "```\n", + "\n", + "These files will be automatically created when calling the data preparation script ([mini_librispeech_prepare.py](https://github.com/speechbrain/speechbrain/blob/develop/templates/speaker_id/mini_librispeech_prepare.py)) from the experiment file (`train.py`).\n", + "\n", + "\n", + "Next, we set up the `train_logger` and declare the `error_stats` objects that will gather statistics on the classification error rate:\n", + "\n", + "\n", + "```yaml\n", + "# The train logger writes training statistics to a file, as well as stdout.\n", + "train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger\n", + " save_file: !ref \n", + "\n", + "error_stats: !name:speechbrain.utils.metric_stats.MetricStats\n", + " metric: !name:speechbrain.nnet.losses.classification_error\n", + " reduction: batch\n", + "```\n", + "\n", + "\n", + "We can now specify some training hyperparameters such as the number of epochs, the batch size, the learning rate, the number of epochs, and the embedding dimensionality.\n", + "\n", + "\n", + "```yaml\n", + "ckpt_interval_minutes: 15 # save checkpoint every N min\n", + "\n", + "# Feature parameters\n", + "n_mels: 23\n", + "\n", + "# Training Parameters\n", + "sample_rate: 16000\n", + "number_of_epochs: 35\n", + "batch_size: 16\n", + "lr_start: 0.001\n", + "lr_final: 0.0001\n", + "n_classes: 28 # In this case, we have 28 speakers\n", + "emb_dim: 512 # dimensionality of the embeddings\n", + "dataloader_options:\n", + " batch_size: !ref \n", + "```\n", + "\n", + "The variable `ckpt_interval_minutes` can be used to save checkpoints every N minutes within a training epoch. In some cases, one epoch might take several hours, and saving the checkpoint periodically is a good and safe practice. This feature is not really needed for this simple tutorial based on a tiny dataset.\n", + "\n", + "We can now define the most important modules that are needed to train our model:\n", + "\n", + "```yaml\n", + "# Added noise and reverb come from OpenRIR dataset, automatically\n", + "# downloaded and prepared with this Environmental Corruption class.\n", + "env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt\n", + " openrir_folder: !ref \n", + " babble_prob: 0.0\n", + " reverb_prob: 0.0\n", + " noise_prob: 1.0\n", + " noise_snr_low: 0\n", + " noise_snr_high: 15\n", + "\n", + "# Adds speech change + time and frequency dropouts (time-domain implementation)\n", + "# # A small speed change help to improve the performance of speaker-id as well.\n", + "augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment\n", + " sample_rate: !ref \n", + " speeds: [95, 100, 105]\n", + "\n", + "# Feature extraction\n", + "compute_features: !new:speechbrain.lobes.features.Fbank\n", + " n_mels: !ref \n", + "\n", + "# Mean and std normalization of the input features\n", + "mean_var_norm: !new:speechbrain.processing.features.InputNormalization\n", + " norm_type: sentence\n", + " std_norm: False\n", + "\n", + "# To design a custom model, either just edit the simple CustomModel\n", + "# class that's listed here, or replace this `!new` call with a line\n", + "# pointing to a different file you've defined.\n", + "embedding_model: !new:custom_model.Xvector\n", + " in_channels: !ref \n", + " activation: !name:torch.nn.LeakyReLU\n", + " tdnn_blocks: 5\n", + " tdnn_channels: [512, 512, 512, 512, 1500]\n", + " tdnn_kernel_sizes: [5, 3, 3, 1, 1]\n", + " tdnn_dilations: [1, 2, 3, 1, 1]\n", + " lin_neurons: !ref \n", + "\n", + "classifier: !new:custom_model.Classifier\n", + " input_shape: [null, null, !ref ]\n", + " activation: !name:torch.nn.LeakyReLU\n", + " lin_blocks: 1\n", + " lin_neurons: !ref \n", + " out_neurons: !ref \n", + "\n", + "# The first object passed to the Brain class is this \"Epoch Counter\"\n", + "# which is saved by the Checkpointer so that training can be resumed\n", + "# if it gets interrupted at any point.\n", + "epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter\n", + " limit: !ref \n", + "\n", + "# Objects in \"modules\" dict will have their parameters moved to the correct\n", + "# device, as well as having train()/eval() called on them by the Brain class.\n", + "modules:\n", + " compute_features: !ref \n", + " env_corrupt: !ref \n", + " augmentation: !ref \n", + " embedding_model: !ref \n", + " classifier: !ref \n", + " mean_var_norm: !ref \n", + "```\n", + "The augmentation part is based on both `env_corrupt` (that adds noise and reverberation) and `augmentation`(that adds time/frequency dropouts and speed change).\n", + "For more information on these modules, please take a look at the tutorials on [enviromental corruption](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/environmental-corruption.html) and the one on [speech augmentation](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-augmentation.html).\n", + "\n", + "\n", + "\n", + "We conclude the hyperparameter specification with the declaration of the optimizer, learning rate scheduler, and the checkpointer:\n", + "\n", + "\n", + "```yaml\n", + "# This optimizer will be constructed by the Brain class after all parameters\n", + "# are moved to the correct device. Then it will be added to the checkpointer.\n", + "opt_class: !name:torch.optim.Adam\n", + " lr: !ref \n", + "\n", + "# This function manages learning rate annealing over the epochs.\n", + "# We here use the simple lr annealing method that linearly decreases\n", + "# the lr from the initial value to the final one.\n", + "lr_annealing: !new:speechbrain.nnet.schedulers.LinearScheduler\n", + " initial_value: !ref \n", + " final_value: !ref \n", + " epoch_count: !ref \n", + "\n", + "# This object is used for saving the state of training both so that it\n", + "# can be resumed if it gets interrupted, and also so that the best checkpoint\n", + "# can be later loaded for evaluation or inference.\n", + "checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer\n", + " checkpoints_dir: !ref \n", + " recoverables:\n", + " embedding_model: !ref \n", + " classifier: !ref \n", + " normalizer: !ref \n", + " counter: !ref \n", + "```\n", + "\n", + "In this case, we use Adam as an optimizer and a linear learning rate decay over the 15 epochs.\n", + "\n", + "Let's now save the best model into a separate folder (useful for the inference part explained later):" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "jwoN5Vq0dFYe" + }, + "outputs": [], + "source": [ + "# Create folder for best model\n", + "!mkdir /content/best_model/\n", + "\n", + "# Copy label encoder\n", + "!cp results/speaker_id/1986/save/label_encoder.txt /content/best_model/\n", + "\n", + "# Copy best model\n", + "!cp \"`ls -td results/speaker_id/1986/save/CKPT* | tail -1`\"/* /content/best_model/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mnCM5xuy85P4" + }, + "source": [ + "### Experiment file\n", + "Let's now take a look into how the objects, functions, and hyperparameters declared in the yaml file are used in `train.py` to implement the classifier.\n", + "\n", + "Let's start from the main of the `train.py`:\n", + "\n", + "\n", + "```python\n", + "# Recipe begins!\n", + "if __name__ == \"__main__\":\n", + "\n", + " # Reading command line arguments.\n", + " hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])\n", + "\n", + " # Initialize ddp (useful only for multi-GPU DDP training).\n", + " sb.utils.distributed.ddp_init_group(run_opts)\n", + "\n", + " # Load hyperparameters file with command-line overrides.\n", + " with open(hparams_file) as fin:\n", + " hparams = load_hyperpyyaml(fin, overrides)\n", + "\n", + " # Create experiment directory\n", + " sb.create_experiment_directory(\n", + " experiment_directory=hparams[\"output_folder\"],\n", + " hyperparams_to_save=hparams_file,\n", + " overrides=overrides,\n", + " )\n", + "\n", + " # Data preparation, to be run on only one process.\n", + " sb.utils.distributed.run_on_main(\n", + " prepare_mini_librispeech,\n", + " kwargs={\n", + " \"data_folder\": hparams[\"data_folder\"],\n", + " \"save_json_train\": hparams[\"train_annotation\"],\n", + " \"save_json_valid\": hparams[\"valid_annotation\"],\n", + " \"save_json_test\": hparams[\"test_annotation\"],\n", + " \"split_ratio\": [80, 10, 10],\n", + " },\n", + " )\n", + "```\n", + "\n", + "We here do some preliminary operations such as parsing the command line, initializing the distributed data-parallel (needed if multiple GPUs are used), creating the output folder, and reading the yaml file.\n", + "\n", + "After reading the yaml file with `load_hyperpyyaml`, all the objects declared in the hyperparameter files are initialized and available in a dictionary form (along with the other functions and parameters reported in the yaml file).\n", + "For instance, we will have `hparams['embedding_model']`, `hparams['classifier']`, `hparams['batch_size']`, etc.\n", + "\n", + "We also run the data preparation script `prepare_mini_librispeech` that creates the data manifest files. It is wrapped with `sb.utils.distributed.run_on_main` because this operation writes the manifest files on disk and this must be done on a single process even in a multi-GPU DDP scenario. For more information on how to use multiple GPUs, [please take a look into this tutorial](https://speechbrain.readthedocs.io/en/latest/multigpu.html).\n", + "\n", + "\n", + "#### Data-IO Pipeline\n", + "We then call a special function that creates the dataset objects for training, validation, and test.\n", + "\n", + "```python\n", + " # Create dataset objects \"train\", \"valid\", and \"test\".\n", + " datasets = dataio_prep(hparams)\n", + "```\n", + "\n", + "Let's take a closer look into that.\n", + "\n", + "\n", + "```python\n", + "def dataio_prep(hparams):\n", + " \"\"\"This function prepares the datasets to be used in the brain class.\n", + " It also defines the data processing pipeline through user-defined functions.\n", + " We expect `prepare_mini_librispeech` to have been called before this,\n", + " so that the `train.json`, `valid.json`, and `valid.json` manifest files\n", + " are available.\n", + " Arguments\n", + " ---------\n", + " hparams : dict\n", + " This dictionary is loaded from the `train.yaml` file, and it includes\n", + " all the hyperparameters needed for dataset construction and loading.\n", + " Returns\n", + " -------\n", + " datasets : dict\n", + " Contains two keys, \"train\" and \"valid\" that correspond\n", + " to the appropriate DynamicItemDataset object.\n", + " \"\"\"\n", + "\n", + " # Initialization of the label encoder. The label encoder assignes to each\n", + " # of the observed label a unique index (e.g, 'spk01': 0, 'spk02': 1, ..)\n", + " label_encoder = sb.dataio.encoder.CategoricalEncoder()\n", + "\n", + " # Define audio pipeline\n", + " @sb.utils.data_pipeline.takes(\"wav\")\n", + " @sb.utils.data_pipeline.provides(\"sig\")\n", + " def audio_pipeline(wav):\n", + " \"\"\"Load the signal, and pass it and its length to the corruption class.\n", + " This is done on the CPU in the `collate_fn`.\"\"\"\n", + " sig = sb.dataio.dataio.read_audio(wav)\n", + " return sig\n", + "\n", + " # Define label pipeline:\n", + " @sb.utils.data_pipeline.takes(\"spk_id\")\n", + " @sb.utils.data_pipeline.provides(\"spk_id\", \"spk_id_encoded\")\n", + " def label_pipeline(spk_id):\n", + " yield spk_id\n", + " spk_id_encoded = label_encoder.encode_label_torch(spk_id)\n", + " yield spk_id_encoded\n", + "\n", + " # Define datasets. We also connect the dataset with the data processing\n", + " # functions defined above.\n", + " datasets = {}\n", + " hparams[\"dataloader_options\"][\"shuffle\"] = False\n", + " for dataset in [\"train\", \"valid\", \"test\"]:\n", + " datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(\n", + " json_path=hparams[f\"{dataset}_annotation\"],\n", + " replacements={\"data_root\": hparams[\"data_folder\"]},\n", + " dynamic_items=[audio_pipeline, label_pipeline],\n", + " output_keys=[\"id\", \"sig\", \"spk_id_encoded\"],\n", + " )\n", + "\n", + " # Load or compute the label encoder (with multi-GPU DDP support)\n", + " # Please, take a look into the lab_enc_file to see the label to index\n", + " # mappinng.\n", + " lab_enc_file = os.path.join(hparams[\"save_folder\"], \"label_encoder.txt\")\n", + " label_encoder.load_or_create(\n", + " path=lab_enc_file,\n", + " from_didatasets=[datasets[\"train\"]],\n", + " output_key=\"spk_id\",\n", + " )\n", + "\n", + " return datasets\n", + "```\n", + "\n", + "The first part is just a declaration of the `CategoricalEncoder` that will be used to convert categorical labels into their corresponding indexes.\n", + "\n", + "\n", + "You can then notice that we expose the audio and label processing functions.\n", + "\n", + "The `audio_pipeline` takes the path of the audio signal (`wav`) and reads it. It returns a tensor containing the read speech sentence. The entry in input to this function (i.e, `wav`) must have the same name of the corresponding key in the data manifest file:\n", + "\n", + "```json\n", + "{\n", + " \"163-122947-0045\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/163/122947/163-122947-0045.flac\",\n", + " \"length\": 14.335,\n", + " \"spk_id\": \"163\"\n", + " },\n", + "}\n", + "```\n", + "\n", + "Similarly, we define another function called `label_pipeline` for processing the utterance-level labels and put them in a format usable by the defined model. The function reads the string `spk_id` defined in the JSON file and encodes it with the categorical encoder.\n", + "\n", + "We then create the `DynamicItemDataset` and connect it with the processing functions defined above. We define the **desired output keys** to expose. These keys will be available in the brain class within the batch variable as:\n", + "- batch.id\n", + "- batch.sig\n", + "- batch.spk_id_encoded\n", + "\n", + "The last part of the function is dedicated to the initialization of the label encoder. The label encoder takes in input the training dataset and assigns a different index to all the `spk_id` entries founded. These indexes will correspond to the output indexes of the classifier.\n", + "\n", + "[For more information on the data loader, please take a look into this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)\n", + "\n", + "\n", + "\n", + "\n", + "After the definition of the datasets, the main function can go ahead with the initialization and use of the brain class:\n", + "\n", + "```python\n", + " # Initialize the Brain object to prepare for mask training.\n", + " spk_id_brain = SpkIdBrain(\n", + " modules=hparams[\"modules\"],\n", + " opt_class=hparams[\"opt_class\"],\n", + " hparams=hparams,\n", + " run_opts=run_opts,\n", + " checkpointer=hparams[\"checkpointer\"],\n", + " )\n", + "\n", + " # The `fit()` method iterates the training loop, calling the methods\n", + " # necessary to update the parameters of the model. Since all objects\n", + " # with changing state are managed by the Checkpointer, training can be\n", + " # stopped at any point, and will be resumed on next call.\n", + " spk_id_brain.fit(\n", + " epoch_counter=spk_id_brain.hparams.epoch_counter,\n", + " train_set=datasets[\"train\"],\n", + " valid_set=datasets[\"valid\"],\n", + " train_loader_kwargs=hparams[\"dataloader_options\"],\n", + " valid_loader_kwargs=hparams[\"dataloader_options\"],\n", + " )\n", + "\n", + " # Load the best checkpoint for evaluation\n", + " test_stats = spk_id_brain.evaluate(\n", + " test_set=datasets[\"test\"],\n", + " min_key=\"error\",\n", + " test_loader_kwargs=hparams[\"dataloader_options\"],\n", + " )\n", + "```\n", + "The `fit` method performs training, while the test is performed with the `evaluate` one. The training and validation data loaders are given in input to the fit method, while the test dataset is fed into the evaluate method.\n", + "\n", + "Let's now take a look into the most important methods defined in the brain class.\n", + "\n", + "\n", + "\n", + "#### Forward Computations\n", + "\n", + "Let's start with the `forward` function, which defines all the computations needed to transform the input audio into the output predictions.\n", + "\n", + "\n", + "```python\n", + " def compute_forward(self, batch, stage):\n", + " \"\"\"Runs all the computation of that transforms the input into the\n", + " output probabilities over the N classes.\n", + " Arguments\n", + " ---------\n", + " batch : PaddedBatch\n", + " This batch object contains all the relevant tensors for computation.\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + " Returns\n", + " -------\n", + " predictions : Tensor\n", + " Tensor that contains the posterior probabilities over the N classes.\n", + " \"\"\"\n", + "\n", + " # We first move the batch to the appropriate device.\n", + " batch = batch.to(self.device)\n", + "\n", + " # Compute features, embeddings, and predictions\n", + " feats, lens = self.prepare_features(batch.sig, stage)\n", + " embeddings = self.modules.embedding_model(feats, lens)\n", + " predictions = self.modules.classifier(embeddings)\n", + "\n", + " return predictions\n", + "```\n", + "\n", + "In this case, the chain of computation is very simple. We just put the batch on the right device and compute the acoustic features. We then process the features with the TDNN encoder that outputs a fixed-size tensor. The latter feeds a classifier that outputs the posterior probabilities over the N classes (in this case the 28 speakers). Data augmentation is added in the prepare_features method:\n", + "\n", + "```python\n", + " def prepare_features(self, wavs, stage):\n", + " \"\"\"Prepare the features for computation, including augmentation.\n", + " Arguments\n", + " ---------\n", + " wavs : tuple\n", + " Input signals (tensor) and their relative lengths (tensor).\n", + " stage : sb.Stage\n", + " The current stage of training.\n", + " \"\"\"\n", + " wavs, lens = wavs\n", + "\n", + " # Add augmentation if specified. In this version of augmentation, we\n", + " # concatenate the original and the augment batches in a single bigger\n", + " # batch. This is more memory-demanding, but helps to improve the\n", + " # performance. Change it if you run OOM.\n", + " if stage == sb.Stage.TRAIN:\n", + " if hasattr(self.modules, \"env_corrupt\"):\n", + " wavs_noise = self.modules.env_corrupt(wavs, lens)\n", + " wavs = torch.cat([wavs, wavs_noise], dim=0)\n", + " lens = torch.cat([lens, lens])\n", + "\n", + " if hasattr(self.hparams, \"augmentation\"):\n", + " wavs = self.hparams.augmentation(wavs, lens)\n", + "\n", + " # Feature extraction and normalization\n", + " feats = self.modules.compute_features(wavs)\n", + " feats = self.modules.mean_var_norm(feats, lens)\n", + "\n", + " return feats, lens\n", + "```\n", + "In particular, when the environmental corruption is declared in the yaml file, we concatenate in the same batch both the clean and the augmented version of the signals.\n", + "\n", + "This approach doubles the batch size (and this the needed GPU memory), but it implements a very **powerful regularizer**. Having both the clean and the noisy version of the signal within the same batch forces the gradient to point into a direction of the parameter space that is **robust against signal distortions**.\n", + "\n", + "#### Compute Objectives\n", + "\n", + "Let's take a look now into the `compute_objectives` method that takes in input the targets, the predictions, and estimates a loss function:\n", + "\n", + "```python\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " \"\"\"Computes the loss given the predicted and targeted outputs.\n", + " Arguments\n", + " ---------\n", + " predictions : tensor\n", + " The output tensor from `compute_forward`.\n", + " batch : PaddedBatch\n", + " This batch object contains all the relevant tensors for computation.\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + " Returns\n", + " -------\n", + " loss : torch.Tensor\n", + " A one-element tensor used for backpropagating the gradient.\n", + " \"\"\"\n", + "\n", + " _, lens = batch.sig\n", + " spkid, _ = batch.spk_id_encoded\n", + "\n", + " # Concatenate labels (due to data augmentation)\n", + " if stage == sb.Stage.TRAIN and hasattr(self.modules, \"env_corrupt\"):\n", + " spkid = torch.cat([spkid, spkid], dim=0)\n", + " lens = torch.cat([lens, lens])\n", + "\n", + " # Compute the cost function\n", + " loss = sb.nnet.losses.nll_loss(predictions, spkid, lens)\n", + "\n", + " # Append this batch of losses to the loss metric for easy\n", + " self.loss_metric.append(\n", + " batch.id, predictions, spkid, lens, reduction=\"batch\"\n", + " )\n", + "\n", + " # Compute classification error at test time\n", + " if stage != sb.Stage.TRAIN:\n", + " self.error_metrics.append(batch.id, predictions, spkid, lens)\n", + "\n", + " return loss\n", + "```\n", + "The predictions in input are those computed in the forward method. The cost function is evaluated by comparing these predictions with the target labels. This is done with the Negative Log-Likelihood (NLL) loss.\n", + "\n", + "####**Other methods**\n", + "Beyond these two important functions, we have some other methods that are used by the brain class. The `on_state_starts` gets called at the beginning of each epoch and it is used to set up statistics trackers. The `on_stage_end` one is called at the end of each stage (e.g, at the end of each training epoch) and mainly takes care of statistic management, learning rate annealing, and checkpointing. [For a more detailed description of the brain class, please take a look into this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html). For more information on checkpointing, [take a look here](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/checkpointing.html)\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4LnRq1_cpPXZ" + }, + "source": [ + "## Step 3: Inference\n", + "\n", + "At this point, we can use the trained classifier to perform **predictions on new data**. Speechbrain made available some classes ([take a look here](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/inference/classifiers.py)) such as the `EncoderClassifier` one that can make inference easier. The class can also be used to extract some embeddings at the output of the encoder.\n", + "\n", + "Let's see first how can we used it to load our best xvector model (trained on Voxceleb and stored on HuggingFace) to compute some embeddings and perform a speaker classification:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 804, + "referenced_widgets": [ + "a436cb5baef1422191c0d92caf127d14", + "b2bcdd7e4676441dae05e6212ad51ee8", + "6f562c8c50f24f3ebfe4c272ba508548", + "c7ced7c215c146e5a2ba24145e9f5cce", + "cbdd872bcc8a44219f7d3f735c7fc876", + "a1b6aa1ae6824d1f934bfa2ddf617458", + "cc735b2cdc4c45e4aa6a19d0be73be09", + "675ec179af9f4fc28ea8b0b210cf5ebc", + "b3eefd8092b74602aa973fdf33a80d6f", + "65ae5e70856b4b0f9bea99714d07dda1", + "bc4ffd0ca5f84af7acbdcb7de6702c47", + "3a616257760e48b7bfe3c38980a36271", + "ab693b816cfd4a67b13f301e9d186591", + "6b9c3fff80094b83bac66d8c16411e97", + "86bd02d6360f4b7abd3b07c7a7fb175d", + "d8553b03699c447faa81e9b1068bf0c1", + "99d171a1b5c34e5ca4698b02f7cfead8", + "10a308d891da4d698e4d289c9bb29c06", + "9886897b6131489d923fea2c39244e37", + "5d59d9d95d3040c188811a9f755bccbf", + "6e4671f3a74f4a1aa0dae77e1ac06eab", + "529072e308c64811a3dc95bffe31ffe7", + "78e0ca220ca04a8abd2e36fbb33e9479", + "c056f40907b944e39f2131ef47755b15", + "2f36faad96f9425d9e8f67be958b3f28", + "02a1d8037c634363b0311bf0196db47c", + "5c658cb3bff347bfa1fb55f213f82fd8", + "7e495ee42bdc4520b2d69153bebe5838", + "3371fb507d664eb087cc342c6a3ccb22", + "d4d34a424da446dd8e3448d9cd982f11", + "f16e3cb770dd43abb0c3fc7ba10e7858", + "7086d0fcc3fe44039f05cc3c2b4a610b", + "e75ca534b9e14ab9b17b043360266723", + "cca22d03bd7c4a88a8cd27036da23c43", + "8bf0008bf92c4e63b51d82075a133677", + "bc7ffe052ca94b4988f35bd687406e0f", + "26a70f330bad4d448f059b401f4b2816", + "8e7b32b29dc844cda2637a028dbb0ef1", + "af87a4ef6c3b4eaf8ee7cf506e7a295f", + "eb99c6f60bd94c48b32ac6af6d31464a", + "956a72f93c814392905fb07ed76a40b3", + "c53bb16394a144bd8021ca8f52d9a8ce", + "6ff2e8cbb30e4d1bb5691d985e0a9764", + "f5cd21c666064b89949771ef49616910", + "632f56cbf7e8434a8530b1d8cbb31efb", + "507f59cb3c004e199013cc0f9bc77d08", + "9b0d060f2ddb4455a3ef77818d56b33b", + "25040fe4255a4a759d4207889bcb2eaa", + "6c03d580bf50439e8a7ca3580ca5aea3", + "f7a4481dcbeb41a7bb7c5f7c879f31c9", + "e529d5f8145f496db1bb29872a22b124", + "74d37a4cc3da4596b246fdf457dc4b05", + "1e885488457649c2a7807dd90d7e2d0b", + "a66dc32b31504316885e8ac74d1085a9", + "22bbfdfca73b448593226042f616c30c" + ] + }, + "id": "uvvY0dCbx5Sv", + "outputId": "3235185a-0654-44b9-d6c4-c7c2cf479e47" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "DEBUG:speechbrain.utils.checkpoints:Registered checkpoint save hook for _speechbrain_save\n", + "DEBUG:speechbrain.utils.checkpoints:Registered checkpoint load hook for _speechbrain_load\n", + "DEBUG:speechbrain.utils.checkpoints:Registered checkpoint save hook for save\n", + "DEBUG:speechbrain.utils.checkpoints:Registered checkpoint load hook for load\n", + "DEBUG:speechbrain.utils.checkpoints:Registered checkpoint save hook for _save\n", + "DEBUG:speechbrain.utils.checkpoints:Registered checkpoint load hook for _recover\n", + "INFO:speechbrain.utils.fetching:Fetch hyperparams.yaml: Fetching from HuggingFace Hub 'speechbrain/spkrec-xvect-voxceleb' if not cached\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "hyperparams.yaml: 0%| | 0.00/2.04k [00:00 /root/.cache/huggingface/hub/models--speechbrain--spkrec-xvect-voxceleb/snapshots/56895a2df401be4150a159f3a1c653f00051d477/embedding_model.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): mean_var_norm_emb -> /root/.cache/huggingface/hub/models--speechbrain--spkrec-xvect-voxceleb/snapshots/56895a2df401be4150a159f3a1c653f00051d477/mean_var_norm_emb.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): classifier -> /root/.cache/huggingface/hub/models--speechbrain--spkrec-xvect-voxceleb/snapshots/56895a2df401be4150a159f3a1c653f00051d477/classifier.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): label_encoder -> /root/.cache/huggingface/hub/models--speechbrain--spkrec-xvect-voxceleb/snapshots/56895a2df401be4150a159f3a1c653f00051d477/label_encoder.txt\n", + "DEBUG:speechbrain.dataio.encoder:Loaded categorical encoding from /root/.cache/huggingface/hub/models--speechbrain--spkrec-xvect-voxceleb/snapshots/56895a2df401be4150a159f3a1c653f00051d477/label_encoder.txt\n", + "WARNING:speechbrain.dataio.encoder:CategoricalEncoder.expect_len was never called: assuming category count of 7205 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "tensor([[-31.8672, -35.2025, -25.7931, ..., -21.0044, -12.4278, -21.5265]])\n", + "tensor([-1.1279])\n", + "tensor([2710])\n", + "['id10892']\n" + ] + } + ], + "source": [ + "import torchaudio\n", + "from speechbrain.inference.classifiers import EncoderClassifier\n", + "classifier = EncoderClassifier.from_hparams(source=\"speechbrain/spkrec-xvect-voxceleb\")\n", + "signal, fs =torchaudio.load('/content/speechbrain/tests/samples/single-mic/example1.wav')\n", + "\n", + "# Compute speaker embeddings\n", + "embeddings = classifier.encode_batch(signal)\n", + "\n", + "# Perform classification\n", + "output_probs, score, index, text_lab = classifier.classify_batch(signal)\n", + "\n", + "# Posterior log probabilities\n", + "print(output_probs)\n", + "\n", + "# Score (i.e, max log posteriors)\n", + "print(score)\n", + "\n", + "# Index of the predicted speaker\n", + "print(index)\n", + "\n", + "# Text label of the predicted speaker\n", + "print(text_lab)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LAZci6oSzdh_" + }, + "source": [ + "For those of you interested in speaker verification, we also created an inference interface called `SpeakerRecognition`:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 613, + "referenced_widgets": [ + "887bb807621b4a52bafa603b423818d7", + "862eb525376c4e9ebb2132e16b3a8dfc", + "dbad817dc3af4e8c82451e5096058c95", + "e47ca1f17a2e4fe9a7df688c20979c01", + "9a3ff2ca0bd447f5abb5041b11196fdd", + "24a7b20b24974d87a4a2e526ad88e3e9", + "ed0b2233f75d4fc4a4ed87e6f93c038a", + "c3f0c1d5be76417087c7e4e43783e426", + "7851faaab81e413e9cebfeabd49735e3", + "113d1a2b848f4c8e852ec8857ef2f3a1", + "f57c32394adc43c0ac6a978bdfb7d61e", + "faa9d965b36f4283bf35e640c2649577", + "23d9fac6961243b58fd15b167d514021", + "e8306633dec4482888a2c93dab1d69e6", + "9a76581a68c14cbe9ef90f19f61cbf79", + "22055e255223496fab6c96b23803406b", + "0a86749f2a8140cca4f167ccde8d5dc9", + "6145a9f19faa40168cf1054e3d5d5861", + "8500f64b6b534eb689d6f8a9d536cd0c", + "21a6c936feec4a878d26d171ad3897f9", + "a0b9d575dab04492bfb401997257fcc2", + "5252430fbff94d51aaf17e50367e42af", + "c2289505161d4d47916376a84f63d153", + "5210f390c42347499c89da7c2f8f333f", + "cc457b0647f544e2a76d5345ef28f3a7", + "6cc81cbfc6ab418e885c84ac2028d55d", + "51a28ff42db34514bbdb9ceda0bc57ef", + "b1ebfe23106a4cecbfa2496a1883fc16", + "08f227fe1888468682630133dba8a389", + "5d03389878fe40179850c0279d3afd9c", + "71b55971c0e9446a978c2014f2f24d2d", + "d20ce2bd6b1040a8b700e0b3249089ba", + "b0564fc611df4282ba0a3730e6c5d2c7", + "5ff5a61c0a954485866a7a92b8186cff", + "5fd6afffa3c64d4fbf81623e9f02de0e", + "86756fff0f424fdbbaae947b78c770e2", + "9c10c16ecffc4f53969230b4994804fe", + "984afd65bbb04202a09060500bd23b0f", + "f0dd418cea0f486080356da09a1310bc", + "109cb7412ed6490698aca150e1ecf86c", + "1f7f9b00af8c46aea9cc7b943246d0fc", + "fb8c1d51942a4fd78357cec0c0530808", + "6808a5f59fb34e37be4d2aee4bbd7eeb", + "9165a52e0949480aa936ae42040bd3cc", + "9b437811fa164e5ca2b8eb9131338f89", + "61f6950cb76643bc860e7a43e7e7bb58", + "842094e522cc42a58dc9d1785bd9314e", + "59bb0e1afd4247d78f254ff09ff354ca", + "bd327f7a96ea484bb4913d9f64cc31eb", + "260f1edf6acb478599fdc410072517b9", + "033c839455d8408fb85e4272f744fb69", + "9178f1efb3f640d5a08280e17c384b9a", + "dcb14ddca33545139272eea73ca14248", + "481e5a2f44db4757972410ae6f4baddd", + "b978f40aa89d4cdfba13d7731dbee766" + ] + }, + "id": "l-enSWy_z8CF", + "outputId": "e45ec7c0-6129-467f-936c-4b0ea00efde2" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "INFO:speechbrain.utils.fetching:Fetch hyperparams.yaml: Fetching from HuggingFace Hub 'speechbrain/spkrec-ecapa-voxceleb' if not cached\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "hyperparams.yaml: 0%| | 0.00/1.92k [00:00 '/content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/hyperparams.yaml'\n", + "INFO:speechbrain.utils.fetching:Fetch custom.py: Fetching from HuggingFace Hub 'speechbrain/spkrec-ecapa-voxceleb' if not cached\n", + "DEBUG:speechbrain.utils.parameter_transfer:Collecting files (or symlinks) for pretraining in pretrained_models/spkrec-ecapa-voxceleb.\n", + "INFO:speechbrain.utils.fetching:Fetch embedding_model.ckpt: Fetching from HuggingFace Hub 'speechbrain/spkrec-ecapa-voxceleb' if not cached\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "embedding_model.ckpt: 0%| | 0.00/83.3M [00:00 '/content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/embedding_model.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"embedding_model\"] = /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/embedding_model.ckpt\n", + "INFO:speechbrain.utils.fetching:Fetch mean_var_norm_emb.ckpt: Fetching from HuggingFace Hub 'speechbrain/spkrec-ecapa-voxceleb' if not cached\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "mean_var_norm_emb.ckpt: 0%| | 0.00/1.92k [00:00 '/content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/mean_var_norm_emb.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"mean_var_norm_emb\"] = /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/mean_var_norm_emb.ckpt\n", + "INFO:speechbrain.utils.fetching:Fetch classifier.ckpt: Fetching from HuggingFace Hub 'speechbrain/spkrec-ecapa-voxceleb' if not cached\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "classifier.ckpt: 0%| | 0.00/5.53M [00:00 '/content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/classifier.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"classifier\"] = /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/classifier.ckpt\n", + "INFO:speechbrain.utils.fetching:Fetch label_encoder.txt: Fetching from HuggingFace Hub 'speechbrain/spkrec-ecapa-voxceleb' if not cached\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "label_encoder.txt: 0%| | 0.00/129k [00:00 '/content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/label_encoder.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"label_encoder\"] = /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/label_encoder.ckpt\n", + "INFO:speechbrain.utils.parameter_transfer:Loading pretrained files for: embedding_model, mean_var_norm_emb, classifier, label_encoder\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): embedding_model -> /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/embedding_model.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): mean_var_norm_emb -> /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/mean_var_norm_emb.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): classifier -> /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/classifier.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): label_encoder -> /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/label_encoder.ckpt\n", + "DEBUG:speechbrain.dataio.encoder:Loaded categorical encoding from /content/speechbrain/templates/speaker_id/pretrained_models/spkrec-ecapa-voxceleb/label_encoder.ckpt\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "tensor([0.1799])\n", + "tensor([False])\n" + ] + } + ], + "source": [ + "from speechbrain.inference.speaker import SpeakerRecognition\n", + "verification = SpeakerRecognition.from_hparams(source=\"speechbrain/spkrec-ecapa-voxceleb\", savedir=\"pretrained_models/spkrec-ecapa-voxceleb\")\n", + "\n", + "file1 = '/content/speechbrain/tests/samples/single-mic/example1.wav'\n", + "file2 = '/content/speechbrain/tests/samples/single-mic/example2.flac'\n", + "\n", + "score, prediction = verification.verify_files(file1, file2)\n", + "\n", + "print(score)\n", + "print(prediction) # True = same speaker, False=Different speakers" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbS7ZncE3IfM" + }, + "source": [ + "But, *how does this work with our custom classifier that we trained before*?\n", + "\n", + "At this point, some options are available to you. For a full overview of all of them, [please take a look into this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/tasks/speech-recognition-from-scratch.html).\n", + "\n", + "We here only show how you can use the existing `EncoderClassifier` on the model that we just trained.\n", + "\n", + "\n", + "### Use the EncoderClassifier interface on your model\n", + "\n", + "The [EncoderClassifier class](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/inference/classifiers.py) takes a pre-trained model and performs inference on it with the following methods:\n", + "\n", + "- **encode_batch**: applies the encoder to an input batch and returns some encoded embeddings.\n", + "- **classify_batch**: performs a full classification step and returns the output probabilities of the classifier, the best score, the index of the best class, and its label in text format (see example above).\n", + "\n", + "To use this interface with the model trained before, we have to create an **inference yaml** file which is a bit different from that use for training. The main differences are the following:\n", + "\n", + "1. You can remove all the hyperparameters and objects needed for training only. You can just keep the part related to the model definition.\n", + "2. You have to allocate a `Categorical encoder` object that allows you to transform indexes into text labels.\n", + "3. You have to use the pre-trainer to link your model with their corresponding files.\n", + "\n", + "The inference yaml file looks like that:\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ys41HanSaCys", + "outputId": "2638b156-67d2-433e-d010-86247acab980" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Writing /content/best_model/hparams_inference.yaml\n" + ] + } + ], + "source": [ + "%%writefile /content/best_model/hparams_inference.yaml\n", + "\n", + "# #################################\n", + "# Basic inference parameters for speaker-id. We have first a network that\n", + "# computes some embeddings. On the top of that, we employ a classifier.\n", + "#\n", + "# Author:\n", + "# * Mirco Ravanelli 2021\n", + "# #################################\n", + "\n", + "# pretrain folders:\n", + "pretrained_path: /content/best_model/\n", + "\n", + "\n", + "# Model parameters\n", + "n_mels: 23\n", + "sample_rate: 16000\n", + "n_classes: 28 # In this case, we have 28 speakers\n", + "emb_dim: 512 # dimensionality of the embeddings\n", + "\n", + "# Feature extraction\n", + "compute_features: !new:speechbrain.lobes.features.Fbank\n", + " n_mels: !ref \n", + "\n", + "# Mean and std normalization of the input features\n", + "mean_var_norm: !new:speechbrain.processing.features.InputNormalization\n", + " norm_type: sentence\n", + " std_norm: False\n", + "\n", + "# To design a custom model, either just edit the simple CustomModel\n", + "# class that's listed here, or replace this `!new` call with a line\n", + "# pointing to a different file you've defined.\n", + "embedding_model: !new:custom_model.Xvector\n", + " in_channels: !ref \n", + " activation: !name:torch.nn.LeakyReLU\n", + " tdnn_blocks: 5\n", + " tdnn_channels: [512, 512, 512, 512, 1500]\n", + " tdnn_kernel_sizes: [5, 3, 3, 1, 1]\n", + " tdnn_dilations: [1, 2, 3, 1, 1]\n", + " lin_neurons: !ref \n", + "\n", + "classifier: !new:custom_model.Classifier\n", + " input_shape: [null, null, !ref ]\n", + " activation: !name:torch.nn.LeakyReLU\n", + " lin_blocks: 1\n", + " lin_neurons: !ref \n", + " out_neurons: !ref \n", + "\n", + "label_encoder: !new:speechbrain.dataio.encoder.CategoricalEncoder\n", + "\n", + "# Objects in \"modules\" dict will have their parameters moved to the correct\n", + "# device, as well as having train()/eval() called on them by the Brain class.\n", + "modules:\n", + " compute_features: !ref \n", + " embedding_model: !ref \n", + " classifier: !ref \n", + " mean_var_norm: !ref \n", + "\n", + "pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer\n", + " loadables:\n", + " embedding_model: !ref \n", + " classifier: !ref \n", + " label_encoder: !ref \n", + " paths:\n", + " embedding_model: !ref /embedding_model.ckpt\n", + " classifier: !ref /classifier.ckpt\n", + " label_encoder: !ref /label_encoder.txt\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sDsp3atIiKSq" + }, + "source": [ + "As you can see, we only have the model definition here (not optimizers, checkpoiter, etc). The last part of the yaml file manages pretraining, where we bind model objects with their pre-training files created at training time.\n", + "\n", + "Let's now perform inference with the `EncoderClassifier` class:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "2fT8ON1iiuQY", + "outputId": "b7e6988f-dc35-426c-dd95-5665b78967f2" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "INFO:speechbrain.utils.fetching:Fetch hparams_inference.yaml: Using file found at '/content/best_model/hparams_inference.yaml'\n", + "DEBUG:speechbrain.utils.fetching:Fetch: Source and destination '/content/best_model/custom.py' are identical, returning assuming this is intended\n", + "DEBUG:speechbrain.utils.parameter_transfer:Collecting files (or symlinks) for pretraining in /content/best_model.\n", + "INFO:speechbrain.utils.fetching:Fetch embedding_model.ckpt: Using file found at '/content/best_model/embedding_model.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"embedding_model\"] = /content/best_model/embedding_model.ckpt\n", + "INFO:speechbrain.utils.fetching:Fetch classifier.ckpt: Using file found at '/content/best_model/classifier.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"classifier\"] = /content/best_model/classifier.ckpt\n", + "DEBUG:speechbrain.utils.fetching:Fetch: Local file found, creating symlink '/content/best_model/label_encoder.txt' -> '/content/best_model/label_encoder.ckpt'\n", + "DEBUG:speechbrain.utils.parameter_transfer:Set local path in self.paths[\"label_encoder\"] = /content/best_model/label_encoder.ckpt\n", + "INFO:speechbrain.utils.parameter_transfer:Loading pretrained files for: embedding_model, classifier, label_encoder\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): embedding_model -> /content/best_model/embedding_model.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): classifier -> /content/best_model/classifier.ckpt\n", + "DEBUG:speechbrain.utils.parameter_transfer:Redirecting (loading from local path): label_encoder -> /content/best_model/label_encoder.ckpt\n", + "DEBUG:speechbrain.dataio.encoder:Loaded categorical encoding from /content/best_model/label_encoder.ckpt\n", + "WARNING:speechbrain.dataio.encoder:CategoricalEncoder.expect_len was never called: assuming category count of 28 to be correct! Sanity check your encoder using `.expect_len`. Ensure that downstream code also uses the correct size. If you are sure this does not apply to you, use `.ignore_len`.\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Target: 5789, Predicted: 5789\n", + "Target: 460, Predicted: 460\n" + ] + } + ], + "source": [ + "from speechbrain.inference.classifiers import EncoderClassifier\n", + "\n", + "classifier = EncoderClassifier.from_hparams(source=\"/content/best_model/\", hparams_file='hparams_inference.yaml', savedir=\"/content/best_model/\")\n", + "\n", + "# Perform classification\n", + "audio_file = 'data/LibriSpeech/train-clean-5/5789/70653/5789-70653-0036.flac'\n", + "signal, fs = torchaudio.load(audio_file) # test_speaker: 5789\n", + "output_probs, score, index, text_lab = classifier.classify_batch(signal)\n", + "print('Target: 5789, Predicted: ' + text_lab[0])\n", + "\n", + "# Another speaker\n", + "audio_file = 'data/LibriSpeech/train-clean-5/460/172359/460-172359-0012.flac'\n", + "signal, fs =torchaudio.load(audio_file) # test_speaker: 460\n", + "output_probs, score, index, text_lab = classifier.classify_batch(signal)\n", + "print('Target: 460, Predicted: ' + text_lab[0])\n", + "\n", + "# And if you want to extract embeddings...\n", + "embeddings = classifier.encode_batch(signal)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SSxRDjEnB0FA" + }, + "source": [ + "The `EncoderClassifier` interface assumes that your model has the following modules specified in the yaml file:\n", + "\n", + "- *compute_features*: that manages feature extraction from the raw audio signal\n", + "- *mean_var_norm*: that performs feature normalization\n", + "- *embedding_model*: that converts features into fix-size embeddings.\n", + "- *classifier*: that performs a final classification over N classes on the top o the embeddings.\n", + "\n", + "If your model cannot be structured in this way, you can always customize the `EncoderClassifier` interface to fulfill your needs.\n", + "[Please, take a look into this tutorial for more information](https://speechbrain.readthedocs.io/en/latest/tutorials/tasks/speech-recognition-from-scratch.html).\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "z3pu0M42Pqju" + }, + "source": [ + "## Extension to different tasks\n", + "In a general case, you might have your own data and classification task and you would like to use your own model. Let's comment a bit more on how you can customize your recipe.\n", + "\n", + "**Suggestion**: start from a recipe that is working (like the one used for this template) and only do the minimal modifications needed to customize it. Test your model step by step. Make sure your model can overfit on a tiny dataset composed of few sentences. If it doesn't overfit there is likely a bug in your model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tImuOg5XP3CY" + }, + "source": [ + "### Train with your data on your task\n", + "What about if I have to solve another utterance-level classification task such as **language-id**, **emotion recognition**, **sound classification**, **keyword spotting** on my data?\n", + "\n", + "All you have to do is:\n", + "1. Change the JSON with the annotations needed for your task.\n", + "2. Change the data pipeline in `train.py` to be compliant with the new annotations.\n", + "\n", + "#### Change the JSON\n", + "This tutorial expects JSON files like this:\n", + "\n", + "```json\n", + "{\n", + " \"163-122947-0045\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/163/122947/163-122947-0045.flac\",\n", + " \"length\": 14.335,\n", + " \"spk_id\": \"163\"\n", + " },\n", + " \"7312-92432-0025\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/7312/92432/7312-92432-0025.flac\",\n", + " \"length\": 12.01,\n", + " \"spk_id\": \"7312\"\n", + " },\n", + " \"7859-102519-0036\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/7859/102519/7859-102519-0036.flac\",\n", + " \"length\": 11.965,\n", + " \"spk_id\": \"7859\"\n", + " },\n", + "}\n", + "```\n", + "\n", + "However, you can add here all the entries that you want. For instance, if you would like to solve a language-id task, the JSON file should look like this:\n", + "\n", + "```json\n", + "{\n", + " \"sentence001\": {\n", + " \"wav\": \"{data_root}/your_path/your_file1.wav\",\n", + " \"length\": 10.335,\n", + " \"lang_id\": \"Italian\"\n", + " },\n", + "{\n", + " \"sentence002\": {\n", + " \"wav\": \"{data_root}/your_path/your_file2.wav\",\n", + " \"length\": 12.335,\n", + " \"lang_id\": \"French\"\n", + " },\n", + "}\n", + "```\n", + "\n", + "If you would like to solve an emotion recognition task, it will look like that:\n", + "\n", + "\n", + "```json\n", + "{\n", + " \"sentence001\": {\n", + " \"wav\": \"{data_root}/your_path/your_file1.wav\",\n", + " \"length\": 10.335,\n", + " \"emotion\": \"Happy\"\n", + " },\n", + "{\n", + " \"sentence002\": {\n", + " \"wav\": \"{data_root}/your_path/your_file2.wav\",\n", + " \"length\": 12.335,\n", + " \"emotion\": \"Sad\"\n", + " },\n", + "}\n", + "```\n", + "To create the data manifest files, you have to **parse your dataset and create JSON files** with a unique ID for each sentence, the path of the audio signal (wav), the length of the speech sentence in seconds (length), and the annotations that you want.\n", + "\n", + "#### Change train.py\n", + "The only thing to remember is that the name entries in the JSON file must match with what the dataloader expectes in `train.py`. For instance, if you defined an emotion key in JSON, you should have it in the dataio pipeline of `train.py` something like this:\n", + "\n", + "```python\n", + " # Define label pipeline:\n", + " @sb.utils.data_pipeline.takes(\"emotion\")\n", + " @sb.utils.data_pipeline.provides(\"emotion\", \"emotion_encoded\")\n", + " def label_pipeline(emotion):\n", + " yield emotion\n", + " emotion_encoded = label_encoder.encode_label_torch(emotion)\n", + " yield emotion_encoded\n", + "```\n", + "\n", + "Basically, you have to replace the `spk_id` entry with the `emotion` one everywhere in the code. That's all!\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IVCCe6cXPzJ0" + }, + "source": [ + "### Train with your own model\n", + "At some point, you might have your own model and you would like to plug it into the speech recognition pipeline.\n", + "For instance, you might wanna replace our xvector encoder with something different.\n", + "\n", + "To do that, you have to create your own class and specify there the list of computations for your neural network. You can take a look into the models already existing in [speechbrain.lobes.models](https://github.com/speechbrain/speechbrain/tree/develop/speechbrain/lobes/models). If your model is a plain pipeline of computations, you can use the [sequential container](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/lobes/models/CRDNN.py#L14). If the model is a more complex chain of computations, you can create it as an instance of `torch.nn.Module` and define there the `__init__` and `forward` methods like [here](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/lobes/models/Xvector.py#L18).\n", + "\n", + "Once you defined your model, you only have to declare it in the yaml file and use it in `train.py`\n", + "\n", + "**Important:** \n", + "When plugging a new model, you have to tune again the most important hyperparameters of the system (e.g, learning rate, batch size, and the architectural parameters) to make it working well.\n", + "\n", + "\n", + "#### ECAPA-TDNN model\n", + "One model that we find particularly effective for speaker recognition is the ECAPA-TDNN one [implemented here](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/lobes/models/ECAPA_TDNN.py).\n", + "\n", + "The ECAPA-TDNN architecture is based on the popular x-vector topology and it introduces **several enhancements** to create more robust speaker embeddings.\n", + "\n", + "The pooling layer uses a **channel- and context-dependent attention** mechanism, which allows the network to attend different frames per channel.\n", + "1-dimensional **SqueezeExcitation** (SE) blocks rescale the channels of the intermediate frame-level feature maps to insert **global context** information in the locally operating convolutional blocks.\n", + "Next, the integration of 1-dimensional **Res2-blocks** improves performance while simultaneously reducing the total parameter count\n", + "by using grouped convolutions in a hierarchical way.\n", + "\n", + "Finally, **Multi-layer Feature Aggregation (MFA)** merges complementary information before the statistics pooling by concatenating the final frame-level feature map with an intermediate feature\n", + "maps of preceding layers.\n", + "\n", + "The network is trained by optimizing the **AAMsoftmax** loss on the speaker identities in the training corpus. The AAM-softmax is a powerful enhancement compared to the regular softmax loss in the context of fine-grained classification and verification problems. It directly optimizes the\n", + "cosine distance between the speaker embeddings.\n", + "\n", + "\n", + "The model turned out to work amazingly well for [speaker verification](https://arxiv.org/abs/2005.07143) and [speaker diarization](https://arxiv.org/abs/2104.01466). We found it very effective in other utterance-level classification tasks such as language-id, emotion recognition, and keyword spotting.\n", + "\n", + "[Please take a look into the original paper for more info](https://arxiv.org/abs/2005.07143)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "W4pPJ0k3lJZj" + }, + "source": [ + "\n", + "\n", + "## Conclusion\n", + "\n", + "In this tutorial, we showed how to create an utterance-level classifier from scratch using SpeechBrain. The proposed system contains all the basic ingredients to develop a state-of-the-art system (i.e., data augmentation, feature extraction, encoding, statistical pooling, classifier, etc)\n", + "\n", + "We described all the steps using a small dataset only. In a real case you have to train with much more data (see for instance our [Voxceleb recipe](https://github.com/speechbrain/speechbrain/tree/develop/recipes/VoxCeleb))." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P-Trg_abjUTd" + }, + "source": [ + "## Related Tutorials\n", + "1. [YAML hyperparameter specification](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html)\n", + "2. [Brain Class](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html)\n", + "3. [Checkpointing](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/checkpointing.html)\n", + "4. [Data-io](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)\n", + "5. [ASR from Scratch](https://speechbrain.readthedocs.io/en/latest/tutorials/tasks/speech-recognition-from-scratch.html)\n", + "6. [Speech Features](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-features.html)\n", + "7. [Speech Augmentation](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-augmentation.html)\n", + "8. [Environmental Corruption](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/environmental-corruption.html)\n", + "9. [MultiGPU Training](https://speechbrain.readthedocs.io/en/latest/multigpu.html)\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "a436cb5baef1422191c0d92caf127d14": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_b2bcdd7e4676441dae05e6212ad51ee8", + "IPY_MODEL_6f562c8c50f24f3ebfe4c272ba508548", + "IPY_MODEL_c7ced7c215c146e5a2ba24145e9f5cce" + ], + "layout": "IPY_MODEL_cbdd872bcc8a44219f7d3f735c7fc876" + } + }, + "b2bcdd7e4676441dae05e6212ad51ee8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a1b6aa1ae6824d1f934bfa2ddf617458", + "placeholder": "​", + "style": "IPY_MODEL_cc735b2cdc4c45e4aa6a19d0be73be09", + "value": "hyperparams.yaml: 100%" + } + }, + "6f562c8c50f24f3ebfe4c272ba508548": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_675ec179af9f4fc28ea8b0b210cf5ebc", + "max": 2041, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_b3eefd8092b74602aa973fdf33a80d6f", + "value": 2041 + } + }, + "c7ced7c215c146e5a2ba24145e9f5cce": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_65ae5e70856b4b0f9bea99714d07dda1", + "placeholder": "​", + "style": "IPY_MODEL_bc4ffd0ca5f84af7acbdcb7de6702c47", + "value": " 2.04k/2.04k [00:00<00:00, 209kB/s]" + } + }, + "cbdd872bcc8a44219f7d3f735c7fc876": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a1b6aa1ae6824d1f934bfa2ddf617458": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "cc735b2cdc4c45e4aa6a19d0be73be09": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "675ec179af9f4fc28ea8b0b210cf5ebc": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b3eefd8092b74602aa973fdf33a80d6f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "65ae5e70856b4b0f9bea99714d07dda1": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "bc4ffd0ca5f84af7acbdcb7de6702c47": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "3a616257760e48b7bfe3c38980a36271": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_ab693b816cfd4a67b13f301e9d186591", + "IPY_MODEL_6b9c3fff80094b83bac66d8c16411e97", + "IPY_MODEL_86bd02d6360f4b7abd3b07c7a7fb175d" + ], + "layout": "IPY_MODEL_d8553b03699c447faa81e9b1068bf0c1" + } + }, + "ab693b816cfd4a67b13f301e9d186591": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_99d171a1b5c34e5ca4698b02f7cfead8", + "placeholder": "​", + "style": "IPY_MODEL_10a308d891da4d698e4d289c9bb29c06", + "value": "embedding_model.ckpt: 100%" + } + }, + "6b9c3fff80094b83bac66d8c16411e97": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_9886897b6131489d923fea2c39244e37", + "max": 16887676, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_5d59d9d95d3040c188811a9f755bccbf", + "value": 16887676 + } + }, + "86bd02d6360f4b7abd3b07c7a7fb175d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6e4671f3a74f4a1aa0dae77e1ac06eab", + "placeholder": "​", + "style": "IPY_MODEL_529072e308c64811a3dc95bffe31ffe7", + "value": " 16.9M/16.9M [00:00<00:00, 67.2MB/s]" + } + }, + "d8553b03699c447faa81e9b1068bf0c1": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "99d171a1b5c34e5ca4698b02f7cfead8": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "10a308d891da4d698e4d289c9bb29c06": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9886897b6131489d923fea2c39244e37": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5d59d9d95d3040c188811a9f755bccbf": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "6e4671f3a74f4a1aa0dae77e1ac06eab": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "529072e308c64811a3dc95bffe31ffe7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "78e0ca220ca04a8abd2e36fbb33e9479": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_c056f40907b944e39f2131ef47755b15", + "IPY_MODEL_2f36faad96f9425d9e8f67be958b3f28", + "IPY_MODEL_02a1d8037c634363b0311bf0196db47c" + ], + "layout": "IPY_MODEL_5c658cb3bff347bfa1fb55f213f82fd8" + } + }, + "c056f40907b944e39f2131ef47755b15": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7e495ee42bdc4520b2d69153bebe5838", + "placeholder": "​", + "style": "IPY_MODEL_3371fb507d664eb087cc342c6a3ccb22", + "value": "mean_var_norm_emb.ckpt: 100%" + } + }, + "2f36faad96f9425d9e8f67be958b3f28": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d4d34a424da446dd8e3448d9cd982f11", + "max": 3201, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_f16e3cb770dd43abb0c3fc7ba10e7858", + "value": 3201 + } + }, + "02a1d8037c634363b0311bf0196db47c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7086d0fcc3fe44039f05cc3c2b4a610b", + "placeholder": "​", + "style": "IPY_MODEL_e75ca534b9e14ab9b17b043360266723", + "value": " 3.20k/3.20k [00:00<00:00, 369kB/s]" + } + }, + "5c658cb3bff347bfa1fb55f213f82fd8": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7e495ee42bdc4520b2d69153bebe5838": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "3371fb507d664eb087cc342c6a3ccb22": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d4d34a424da446dd8e3448d9cd982f11": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f16e3cb770dd43abb0c3fc7ba10e7858": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "7086d0fcc3fe44039f05cc3c2b4a610b": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e75ca534b9e14ab9b17b043360266723": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "cca22d03bd7c4a88a8cd27036da23c43": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_8bf0008bf92c4e63b51d82075a133677", + "IPY_MODEL_bc7ffe052ca94b4988f35bd687406e0f", + "IPY_MODEL_26a70f330bad4d448f059b401f4b2816" + ], + "layout": "IPY_MODEL_8e7b32b29dc844cda2637a028dbb0ef1" + } + }, + "8bf0008bf92c4e63b51d82075a133677": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_af87a4ef6c3b4eaf8ee7cf506e7a295f", + "placeholder": "​", + "style": "IPY_MODEL_eb99c6f60bd94c48b32ac6af6d31464a", + "value": "classifier.ckpt: 100%" + } + }, + "bc7ffe052ca94b4988f35bd687406e0f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_956a72f93c814392905fb07ed76a40b3", + "max": 15856877, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_c53bb16394a144bd8021ca8f52d9a8ce", + "value": 15856877 + } + }, + "26a70f330bad4d448f059b401f4b2816": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6ff2e8cbb30e4d1bb5691d985e0a9764", + "placeholder": "​", + "style": "IPY_MODEL_f5cd21c666064b89949771ef49616910", + "value": " 15.9M/15.9M [00:00<00:00, 165MB/s]" + } + }, + "8e7b32b29dc844cda2637a028dbb0ef1": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "af87a4ef6c3b4eaf8ee7cf506e7a295f": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "eb99c6f60bd94c48b32ac6af6d31464a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "956a72f93c814392905fb07ed76a40b3": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c53bb16394a144bd8021ca8f52d9a8ce": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "6ff2e8cbb30e4d1bb5691d985e0a9764": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f5cd21c666064b89949771ef49616910": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "632f56cbf7e8434a8530b1d8cbb31efb": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_507f59cb3c004e199013cc0f9bc77d08", + "IPY_MODEL_9b0d060f2ddb4455a3ef77818d56b33b", + "IPY_MODEL_25040fe4255a4a759d4207889bcb2eaa" + ], + "layout": "IPY_MODEL_6c03d580bf50439e8a7ca3580ca5aea3" + } + }, + "507f59cb3c004e199013cc0f9bc77d08": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f7a4481dcbeb41a7bb7c5f7c879f31c9", + "placeholder": "​", + "style": "IPY_MODEL_e529d5f8145f496db1bb29872a22b124", + "value": "label_encoder.txt: 100%" + } + }, + "9b0d060f2ddb4455a3ef77818d56b33b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_74d37a4cc3da4596b246fdf457dc4b05", + "max": 128619, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_1e885488457649c2a7807dd90d7e2d0b", + "value": 128619 + } + }, + "25040fe4255a4a759d4207889bcb2eaa": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a66dc32b31504316885e8ac74d1085a9", + "placeholder": "​", + "style": "IPY_MODEL_22bbfdfca73b448593226042f616c30c", + "value": " 129k/129k [00:00<00:00, 1.80MB/s]" + } + }, + "6c03d580bf50439e8a7ca3580ca5aea3": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f7a4481dcbeb41a7bb7c5f7c879f31c9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e529d5f8145f496db1bb29872a22b124": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "74d37a4cc3da4596b246fdf457dc4b05": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "1e885488457649c2a7807dd90d7e2d0b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "a66dc32b31504316885e8ac74d1085a9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "22bbfdfca73b448593226042f616c30c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "887bb807621b4a52bafa603b423818d7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_862eb525376c4e9ebb2132e16b3a8dfc", + "IPY_MODEL_dbad817dc3af4e8c82451e5096058c95", + "IPY_MODEL_e47ca1f17a2e4fe9a7df688c20979c01" + ], + "layout": "IPY_MODEL_9a3ff2ca0bd447f5abb5041b11196fdd" + } + }, + "862eb525376c4e9ebb2132e16b3a8dfc": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_24a7b20b24974d87a4a2e526ad88e3e9", + "placeholder": "​", + "style": "IPY_MODEL_ed0b2233f75d4fc4a4ed87e6f93c038a", + "value": "hyperparams.yaml: 100%" + } + }, + "dbad817dc3af4e8c82451e5096058c95": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c3f0c1d5be76417087c7e4e43783e426", + "max": 1919, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_7851faaab81e413e9cebfeabd49735e3", + "value": 1919 + } + }, + "e47ca1f17a2e4fe9a7df688c20979c01": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_113d1a2b848f4c8e852ec8857ef2f3a1", + "placeholder": "​", + "style": "IPY_MODEL_f57c32394adc43c0ac6a978bdfb7d61e", + "value": " 1.92k/1.92k [00:00<00:00, 140kB/s]" + } + }, + "9a3ff2ca0bd447f5abb5041b11196fdd": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "24a7b20b24974d87a4a2e526ad88e3e9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ed0b2233f75d4fc4a4ed87e6f93c038a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c3f0c1d5be76417087c7e4e43783e426": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7851faaab81e413e9cebfeabd49735e3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "113d1a2b848f4c8e852ec8857ef2f3a1": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f57c32394adc43c0ac6a978bdfb7d61e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "faa9d965b36f4283bf35e640c2649577": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_23d9fac6961243b58fd15b167d514021", + "IPY_MODEL_e8306633dec4482888a2c93dab1d69e6", + "IPY_MODEL_9a76581a68c14cbe9ef90f19f61cbf79" + ], + "layout": "IPY_MODEL_22055e255223496fab6c96b23803406b" + } + }, + "23d9fac6961243b58fd15b167d514021": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_0a86749f2a8140cca4f167ccde8d5dc9", + "placeholder": "​", + "style": "IPY_MODEL_6145a9f19faa40168cf1054e3d5d5861", + "value": "embedding_model.ckpt: 100%" + } + }, + "e8306633dec4482888a2c93dab1d69e6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_8500f64b6b534eb689d6f8a9d536cd0c", + "max": 83316686, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_21a6c936feec4a878d26d171ad3897f9", + "value": 83316686 + } + }, + "9a76581a68c14cbe9ef90f19f61cbf79": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a0b9d575dab04492bfb401997257fcc2", + "placeholder": "​", + "style": "IPY_MODEL_5252430fbff94d51aaf17e50367e42af", + "value": " 83.3M/83.3M [00:00<00:00, 211MB/s]" + } + }, + "22055e255223496fab6c96b23803406b": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0a86749f2a8140cca4f167ccde8d5dc9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6145a9f19faa40168cf1054e3d5d5861": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "8500f64b6b534eb689d6f8a9d536cd0c": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "21a6c936feec4a878d26d171ad3897f9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "a0b9d575dab04492bfb401997257fcc2": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5252430fbff94d51aaf17e50367e42af": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c2289505161d4d47916376a84f63d153": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_5210f390c42347499c89da7c2f8f333f", + "IPY_MODEL_cc457b0647f544e2a76d5345ef28f3a7", + "IPY_MODEL_6cc81cbfc6ab418e885c84ac2028d55d" + ], + "layout": "IPY_MODEL_51a28ff42db34514bbdb9ceda0bc57ef" + } + }, + "5210f390c42347499c89da7c2f8f333f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_b1ebfe23106a4cecbfa2496a1883fc16", + "placeholder": "​", + "style": "IPY_MODEL_08f227fe1888468682630133dba8a389", + "value": "mean_var_norm_emb.ckpt: 100%" + } + }, + "cc457b0647f544e2a76d5345ef28f3a7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5d03389878fe40179850c0279d3afd9c", + "max": 1921, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_71b55971c0e9446a978c2014f2f24d2d", + "value": 1921 + } + }, + "6cc81cbfc6ab418e885c84ac2028d55d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d20ce2bd6b1040a8b700e0b3249089ba", + "placeholder": "​", + "style": "IPY_MODEL_b0564fc611df4282ba0a3730e6c5d2c7", + "value": " 1.92k/1.92k [00:00<00:00, 204kB/s]" + } + }, + "51a28ff42db34514bbdb9ceda0bc57ef": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b1ebfe23106a4cecbfa2496a1883fc16": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "08f227fe1888468682630133dba8a389": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "5d03389878fe40179850c0279d3afd9c": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "71b55971c0e9446a978c2014f2f24d2d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "d20ce2bd6b1040a8b700e0b3249089ba": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b0564fc611df4282ba0a3730e6c5d2c7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "5ff5a61c0a954485866a7a92b8186cff": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_5fd6afffa3c64d4fbf81623e9f02de0e", + "IPY_MODEL_86756fff0f424fdbbaae947b78c770e2", + "IPY_MODEL_9c10c16ecffc4f53969230b4994804fe" + ], + "layout": "IPY_MODEL_984afd65bbb04202a09060500bd23b0f" + } + }, + "5fd6afffa3c64d4fbf81623e9f02de0e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f0dd418cea0f486080356da09a1310bc", + "placeholder": "​", + "style": "IPY_MODEL_109cb7412ed6490698aca150e1ecf86c", + "value": "classifier.ckpt: 100%" + } + }, + "86756fff0f424fdbbaae947b78c770e2": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_1f7f9b00af8c46aea9cc7b943246d0fc", + "max": 5534328, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_fb8c1d51942a4fd78357cec0c0530808", + "value": 5534328 + } + }, + "9c10c16ecffc4f53969230b4994804fe": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6808a5f59fb34e37be4d2aee4bbd7eeb", + "placeholder": "​", + "style": "IPY_MODEL_9165a52e0949480aa936ae42040bd3cc", + "value": " 5.53M/5.53M [00:00<00:00, 143MB/s]" + } + }, + "984afd65bbb04202a09060500bd23b0f": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f0dd418cea0f486080356da09a1310bc": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "109cb7412ed6490698aca150e1ecf86c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "1f7f9b00af8c46aea9cc7b943246d0fc": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "fb8c1d51942a4fd78357cec0c0530808": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "6808a5f59fb34e37be4d2aee4bbd7eeb": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9165a52e0949480aa936ae42040bd3cc": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9b437811fa164e5ca2b8eb9131338f89": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_61f6950cb76643bc860e7a43e7e7bb58", + "IPY_MODEL_842094e522cc42a58dc9d1785bd9314e", + "IPY_MODEL_59bb0e1afd4247d78f254ff09ff354ca" + ], + "layout": "IPY_MODEL_bd327f7a96ea484bb4913d9f64cc31eb" + } + }, + "61f6950cb76643bc860e7a43e7e7bb58": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_260f1edf6acb478599fdc410072517b9", + "placeholder": "​", + "style": "IPY_MODEL_033c839455d8408fb85e4272f744fb69", + "value": "label_encoder.txt: 100%" + } + }, + "842094e522cc42a58dc9d1785bd9314e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_9178f1efb3f640d5a08280e17c384b9a", + "max": 128619, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_dcb14ddca33545139272eea73ca14248", + "value": 128619 + } + }, + "59bb0e1afd4247d78f254ff09ff354ca": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_481e5a2f44db4757972410ae6f4baddd", + "placeholder": "​", + "style": "IPY_MODEL_b978f40aa89d4cdfba13d7731dbee766", + "value": " 129k/129k [00:00<00:00, 13.4MB/s]" + } + }, + "bd327f7a96ea484bb4913d9f64cc31eb": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "260f1edf6acb478599fdc410072517b9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "033c839455d8408fb85e4272f744fb69": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9178f1efb3f640d5a08280e17c384b9a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "dcb14ddca33545139272eea73ca14248": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "481e5a2f44db4757972410ae6f4baddd": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b978f40aa89d4cdfba13d7731dbee766": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/docs/tutorials/tasks/speech-enhancement-from-scratch.ipynb b/docs/tutorials/tasks/speech-enhancement-from-scratch.ipynb new file mode 100644 index 0000000000..3c2c1aaf52 --- /dev/null +++ b/docs/tutorials/tasks/speech-enhancement-from-scratch.ipynb @@ -0,0 +1,789 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/tasks/speech-enhancement-from-scratch.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/tasks/speech-enhancement-from-scratch.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uo0JP7a5uFp7" + }, + "source": [ + "# Speech Enhancement From Scratch\n", + "\n", + "So you want to do regression tasks with speech? Look no further, you're in the right place. This tutorial will walk you through a basic speech enhancement template with SpeechBrain to show all the components needed for making a new recipe.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "trcWOmpnxQ0v" + }, + "source": [ + "Before jumping into the code, let's introduce a bit the problem of speech enhancement. The goal of speech enhancement is to remove noise from an input recording:\n", + "\n", + "![SpeechBrain-Page-5 (1).png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAiMAAAA+CAYAAAD5/RvdAAAG1nRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDYtMTNUMTQlM0EyMyUzQTEyLjY0N1olMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY4MS4wLjQwNDQuMTIyJTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE0LjcuNyUyMiUyMGV0YWclM0QlMjJVQXJlajI4c0pxcVlCRmdFbVlBaiUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjJsWHh0VUF2TmhxeTJhOHFLODY1QiUyMiUzRTVWZmJkdG82RVAwYUh0M2x1ODBqRUloSm9FQkNEcUV2V2JJc1g0SnNFVm0lMkJrSzglMkZNcFl4RkdpNzJ1YXNkcDBYc0xaR281a3Q3Zkc0b3czaThwYUNiVGdsSHNJZFZmYktqbmJUVWRXdXB2UGZDdGpWZ0dHWU5SRFF5S3NocFFVZW8zY2tRRm1nV2VTaDlNU1FFWUpadEQwRklVa1NCTmtKQmlnbHhhbVpUJTJGRHBybHNRb0RQZ0VRSjhqcTRpajRVMWFodHlpenNvQ3NKbVowVVdNekZvakFXUWhzQWp4UkdrRFR2YWdCTEM2cWU0SENCY2NkZndVcThiWFprOUJFWlJ3bjVrZ1ZvdnlBSE9SRzRpTHJacmtnMG95YmJDREZHR3lrc1VBN2N4bDg5RFVBNko4UXVCU0l3WTNYRVQ0Y2dTSzhSZFVEUXhMbHBtRGQyb3NmQ0lWVlBZQVhHWXdjRnpteTklMkZFQ2xmVGwlMkI3a0w2SiUyQlFaOTdncFU2WVdBcHFqZTBYekxxbVBwWjh5WDdIYklud0x4djElMkZwbnREWFdQa2tZVklod3U5eGs0VFFHT0JqTjAxVURlSVJtRXBSd2hCTkFKYXFPeTkxWGQwenU2b25XYjd2UzY0Q1BVbDNYVlhTZ092YVFOVTlwR3ZYSTR2aTRJVFh4azR4MVcxWjI3WEJIRkhkb0xwJTJCYnBaU2VHSVRNbGFKc0ZjZGdUckNvZmtwSUNUQUtFc1I1WGVGOFV2eENaS1l6OUY3N0tqd244SWxtOUg5azRaNjBmcHU4YlFlQVdJOXNlemw4JTJGQ0wwMzhiclJmNk81c0VTemZVYndiSmJPa1VjUFl5Y1dhQmZ3T3lQTkpOaWVXYUdqMFprMWR0MVp2ZkRhUTNxJTJGdG92Y0N5QUw1ZjNrNmR6VUtDaTBuNHFzMGV0RjIlMkJHbThNJTJCakNZNjBQN05ocU1Db0pYeFhYR2VKaHVpMzBsREM2RkNpJTJGQ2lLSEhMWUFWWFBCNnQyY2g1bGZ1Umprc3U2Q2NZNFdvMzFTSVpoaG5rbEM2OHNkSVF2OSUyQlJVQWVyNFppU0NnTFNVRDREUjIyYUolMkJYak1SRGxVZVpqMXFiQ1NGYndjc3JZbXduU2p2SUdMbkVXclhSdHpuamNaR01RbUVsM2lJTTBFQm90bGI0T2JNVVljQ2klMkZOVDdyOUJtJTJGQkcwJTJCUkhHQTRJSjNlJTJCbyUyQlRaRWtPdXpuekpLTnVob3hyVjVUWlglMkZScUxOcXlVNzNZTGtldTFOOTV4VmxWZXhlUms3ayUyRndEQ2loS1UwNVFxJTJGM2E0eFg1dDRlbGZMOEtmSDB3UGpJdkg0eG5kVjI1T2hpQW95RGhHRVRWRyUyQkQzRkJKVlBuJTJCM0t1b0hGUkxyZjZVSSUyQjF3UjVuJTJCa0NQc3ZhMklzUTRmUXRPMjZpYkVSY0NYTDh3MUpNUlZWQXhwRXJ1WDlUQk5qJTJGVmdUYzZIWCUyQmZrbVptT05Qd2ZSZUw0b0J6MGc5V3hsbktzelo5V2ZMNWR2aWVkZ0MlMkJvVTY2dnVXUFdaTkdUVVc5MnY3NFpCR2l4UlhCVGhYVTlLclRKTyUyQnYzUlpndmZnZnZ3ck5ERjg2MExWdDZ6WCUyQktJVGlXcjdNTm5tJTJCYVRiUHclMkJ4MnRuazM5SjNYejZJcVg1ZWdmVWFUcEs3Ykd6JTJCR09ibUElMkJvTlZYM2VQaEUyczhkZldkcXczOEIlM0MlMkZkaWFncmFtJTNFJTNDJTJGbXhmaWxlJTNFEpL5TQAAIABJREFUeF7t3QeYnUXVB/CTAkggEAIJ0lE6UpQqGulVeui9S2/SQUCliyggIFWQqtIioffeBBSQmgASpBiKoQWBQL7n9+ab5c3lbvbuZsvdvTPPs88me993ypmZc/7nf87M7TV+/PjxkUuWQJZAzRL4dMwL8cFLl8bY126JT8c8H1+O+7Dmd/ODnSuB3n37x1QDFox+c6wR082zdUw1YIHO7UBuLUsgS6AmCfTKYKQmOeWHsgQKCYx+aJ8Y88KFMWDOoTHt4CEx1fTzRZ++/bN06lQCX4z7MD59f0R8NPr+GDPqmhiwwA4xeLnT67S3uVtZAo0rgQxGGnfu88hbIYFxY9+I12/dIKbsN2cMXmivDEBaIbt6eRQwGf3cGfHZ2FEx2+rDom+/Weula7kfWQINL4EMRhp+CWQB1CKBV4ctE9MMXCpmmm+nWh7Pz9SxBN4ZcUF8/N5jMdcGj9ZxL3PXsgQaSwIZjDTWfOfRtkECQjNffPhGzLLoYW14O79SjxJ48+kTok//WXPIph4nJ/epISWQwUhDTnsedK0SkKz66rClYp5VhufQTK1C6wbPCdm8dMe6MdcGj+Wk1m4wX7mLPV8CGYz0/DnOI5wMCbz9+JEx/v1RMXihfSajlvxqPUpg9HOnR6/p54xBSx5Tj93LfcoSaCgJdCsw8sQTT8TAgQNj7rnnbqhJyoPtOgnIFRk07y7Rb8Yluq4TueUOkcDYd5+It0eel3NHJlO6Dz74YEw11VSx5JJLTmZN+fVGlkC3ACMfffRRTDPNNHH99dfHnHPOGYsvvnik61F69erVyPOXx97BEnjxoulinlWuyyGaDpZzV1Q/IVSzXsy//Qdd0XyPafOcc86Jb37zm7H22mtH3759e8y48kA6VwJ1AUZeeeWVmHbaaWPQoEFVR//UU0/FQgstFFdccUUBRBZbbLH4xz/+UQCU+eefv3MllltrKAk8f36vWHDtxxpqzI002OdvWCoW3Dnf+9jWOecU7rfffoVOpo8333zztlaV32twCdQFGHnkkUdipplminnmmafqdDz22GPFYr/44otjwIABseGGG8avf/3rWHnllWOppZaK7saOfPnll0WfW9Pvzz//PLyHDs2l8ySQwUjnyborWspgZPKk/sUXXxRgZMYZZ4xvfetbsd12201ehfnthpVAXYOR//3vf/GNb3wjgBXMyAknnBD9+vWLHXfcsdgA6667bmy11VbRp0+fbjWBL7/8cjEO1Gat5e23345PPvmkCFPl0nkSyGCk82TdFS1lMNJ2qWNFRo8eHYccckiMGTMm5p133sJJzCVLoC0SqGsw8vzzz8eCCy4Y9913XzDgjz/+eEEFrrnmmrH77rsXQGT99dePRRZZJD777LMiXtm7d++2yKFd3/nvf/8bM8wwQ9U6X3jhhRg7dmzB8PAkai3YIWP8wQ9+UOsr+bl2kEAGI+0gxDquIoORtk8OMPLSSy/F4YcfHhzHOeaYI84888y2V5jfbGgJ1AUYueSSSwpUvdxyy000GcCHDO0///nP8dxzz8V7770XU045Zcw111xx1llnxdZbbx3f//73Y5VVVom33norpp9++gKQTDHFFBPVY9NUC4kADelZOSvNPdfaFTJ8+PCCtalWsDxCLfpqHLWCp5NPPrk4SbTTTl+/AVT4ptZ6WjuWRn8+g5GevQIyGGn9/Ja/W/XZZ5+Nn/3sZzFu3LiYffbZ4/e//33rK8xvZAlERF2AkUsvvbTIF2GcZ511wvdFvP/++4EZ+c53vlOEZPz93nvvLdA3FA6cbLzxxgU4gcz/85//xP333x/zzTdfkV9SLlgF9Uw99dQT/R0Y+eCDD+Kiiy6Ko48+ungf+PHcO++8E/379y8AyqhRo2pKlLUhgZ4bb7yxACMYEGGmMlDQF88AFgcddFBcddVVNS1EYGS66aaLn/zkJ03ASh/JCaAS8iGLXNpXAhmMtK886622DEaanxFODp1WqVfoTAw1vQaM0L/vvvtufPvb344//vGPrcqFq7f10J79aS/ntj37VM91dSkYsagZ2MSMCGFsv/32hbzkSAwbNqwAFhiQzTbbLG699dZYYIEFCtBgIyy88MIFePj5z38e7iC55pprikSq/fff/2tgBGqvzNEQ57z99tvjmWeeiSOOOCJOOumkIvyDufj73/9ebEKndbAZa6yxRovziLIEZDA6wMjf/va3IoRUBkGPPvpokYiq7b322itGjBhR0+bVt3//+9/xq1/9qqk+DMwbb7xRMEOAlb5iXHJpPwlkMNJ+sqzHmjIYaX5WMNEMKp1aLg899FB873vfK/Tk008/HYcddljcfffdMWTIkMIRo5tbk5xfj+ticvtEbpxFBzMaXRa1yrLLwAiDbLIYfMzHrrvuWhhxBloRdhF/lCOxzz77hLtGAJQtttgiMCkWPOZit912i2OOOaZgTRhnCZ4ARQIAkj4BAKgdq1IuWIXLL7+8SMLSvgRZlCNQBCwoNlitYMRxY+EerI1cl7/+9a9FX3gRqdisvAjA4oILLig8i7LnITu92mb+xS9+UQCX4447rmCQsEPqJ0PJvZ9++mnxs8EGG9Q69/m5GiSQwUgNQurGj2Qw0vzk3XzzzYU+5fSVi/ue5LzRzfQS/XrbbbcVjiJniXMEqCQj3GgMgfGWDylkMFKbguh0MHLGGWcUi9uEARUW77bbblsAkuuuu64AA7PMMksBRtB/q6++evHbXSSKi3VuuOGGptHtvPPOBavBsAtlbLrppsXzwMDVV19dMBOYlFVXXbXYWHfeeWcRspl55pnj4YcfjiOPPLIw7iuttFKcdtppxV0mDzzwQLz44osFg4Gp+de//lUTM4JNQVPajIASZgd4KIORxOK4U+Wee+4p2BxgQqgFOCIXQMhpm8GDBzeNUz8wSZtsskkByMjDu4ALACM8BXhVnvMXKlK35NdyP2pbHvmpDEZ69hrIYKT5+eU40WM/+tGPJnqIA8eh2nPPPQtW+g9/+EPB0MrXO+qoowrninPnXYb4tddei9lmm61hGBN2gy1gx/xkMFKbDul0MLLNNtsUF5dhBiDv3/3ud7HRRhsVi/f888+Pyy67LH784x8XrAi0DTj426uvvlqMiEH9+OOPm0YHnKywwgoFCBBuEYq54447CsAjH0POBjCifv9mwN1T4rQLMATAAACQvrwTuSNAgtwOLIfTOu4yaS5Mk1C/2Cqq0jE3SaaAgzpOP/30pjwYnZYnApDpg2uU9V+bgIdTQ7yLK6+8ssiX0WdgSgHY1ClcVQYjwkyAC7CF4QGuABTABDuEPXF9PoBDTrm0TgIZjLROXt3t6QxGmp+x3/zmN7HooovGaqut1vQQQytkTO8deuihhe4699xzC93jioV11lmn+A3AYIU5Qimsw9linMulpZu06VXPVB5K6Kx1ZrzueAK0ar1CwjtYcmNlj8pgxGdKa++Z6qzxdmU7VcHI2WefXYQ/WioWCWDAaNdanJphKE3UyJEjiwxsQAMAARSgboZaWEUIQmKrBFKGuFpZb731CvCRAApQAQAAERgUBh7IwFBoV8jH9cWQ+m9/+9uiPeEPBhzQEeZw4+vrr79etCnm5+w8xqVyMQIVTgCpS7vHH398nHrqqQWb42/AFUBxyimnNC1I/7d51XfttdcWLMqFF15YgBF9dJEbdkYsdpdddin6Q74Am02B+dhyyy2bmJGDDz646Je7V2xcfdUXIAVA0R+eiRAXDyZd19ySEmhpPrExSR6ApXnEJgmFVSYKt1RXPX+ewUg9z87k9y2DkWj2FKEQDP1Gn6Vi38sRoZOw2Jwfeq4MRughP048coQwzZxK4e/KE5OcLjahuWvkMbt0VVewutqVHsChW3755QvdBkS0FHYiI6H9dCCjHK6iK7Hy9DEbkctXEmgCI2UBM2g//elPv4beIFsJp4y0mCADbqKcaiFw3jdDCP0xgD43gR9++GFhSG+55ZbCaMp3SMV7FqIQijgbYwswuEtEey2VFVdcsWAkUtEvYAb7IqHURvEMxkJ//AYiIH6bzbFhwENhRC0URtYYtW8s+iL0YyORkx918RCWXnrpYjPJ17A5yYMnIQwDZGCCJNQaI1kAIRgf7wBjAIPNbfFiRHgjAIs+AGg8Df23GZ3AIR8bJCFunoliswBv+gDA/fOf/yxkDXABJjY14EKxYITU6282hL5LgKVgtGMeU9JammtyVT92yXP6rV7l2GOPLdqViKx+TA0ASQEp6eixTWq+yRWTJTSXcl1QutpKCbjpllpKzpx0VclgpKsk3zntNjoYocuExOkT+9u+T6f/hGDovDIrbA/vu+++hX4DRA488MBCnzCu3sOIvPnmm4U+55w5YMBZ/O53v1swJBL7y0yBPEF6gl6pLPrGwcGKADWV1xfQHZ6p9m4tq6clUAFQcQzpcUeWhcL9jXPL+Wou/OIZaQNYbno59ZtOO/HEEwtHHyipPPVZS5978jNNYAStZMEocjDkUKSTK8ICFqFnTAgPG/NgcTFiQ4cOLQyGiXMR2V133VV4/f6+7LLLFl9wx+ADB1gB+QvVislda621ClCAJSmfZ2/NJCyxxBJFtre4prwNRdjGRsM4CFeIdwIl8lTKBZoFpDzLaFpYNhYG47zzzisABObEprLZjAWwsaHloxgrQ29jy0kBCBhy7I86hKbKYMQmAzZsYHVYrDvssEMxdqwIsMDQpwVtc5sHi9y7NkoqwIqkMnklZA2oAJYYF2DIeMkGAEtHj/XNJgF+GP5lllmmqF9fgUahLrKkMLAswkZkZF6N0TMAFLDmt7WiLfFmISXx43RfDNCoj0CQhOPEUllD5t11/0CwdQfoAGxCbE5TdVXJYKSrJN857TY6GLH3hZPpFUyykDTHwA+9Q8fIy0uFHXDIgNMi3w9LKzePnqSj6Ar6Mzl7wAx9x3hL7LfPE8uhbfqSPqj2nTZ0IAeHY8OJrAzVcF7UwQFubVE3m0S/Ngcq6CXsDp1N37EbxuNvbGRzYZsERqQCYLTTc2QntMUppO8zGJl41prACEND4AAJxsDCEr5gFFBpWAthjieffDL+9Kc/FYZHspLwAtBhERKyhclQuheEYcOKyOWQcMnAQ8kmpbnCAJlEG6StYIRnnow3FqGyMJBAAvaAsSsXnwEYFn6KVwJa0DeDC6BA64wuxkMfjdHn+p7AiAXsfXIArvQJYyGcVAYjFqrPbEjMCWDC09B+Chlpi2IoF2EsMifvcrHR5aHIcD/ggAOKI88AC9BhzMCITag9m8FcGA8WxtxgPRzpc0IJcLRpfKYkRgVo2nvvvYv35NyYL8oC8HEJneRhbQJVxqf/PAtMCBZN3wET/QE0gBeemdNQgBPPQT3yZMxHLSHD1iqjWp/PYKRWSXXP5xodjNj/dDy9I0/PPvTDGaVzOB7ASNKnnt9jjz0KZ4Jet7fpoMR60mV0Dd3iPUww55S98L01dBN2XaGb2RoOTrXvtKFb2Rh1C1lX3lgNLAjJpzy51qxAuvkvf/lLwaLTtcZXZl60zfF0wjLdd0U/cTj1iePtvTJTntoXBcDIc9ToyRSCMg56Ut4ge4pVz99y/NWsNYERyh/FBgnzZN2RgUpC2QMZPFpGyGcmB03P22ecTSLDYwJMMDTos5TMxBNvTWFQU6ywNe+15llomKG0KFsqQJQFZhMBWv4NZPDkGcuE0IENR3WNGyhJJQEcRt27wAijXwmU1ImB8DkAox4AQZhIbktlMWfYl+YKOhXwwG7oA28FujcOzAkww6tAKdoYNjdFBMDYcNgpCkc/eCA+o2yEkzBOAKMQE+/IegA4gFkKDFBzOgkTBdhglSgxl7bZlNqQ74NdorzIg2cmzKNf1gxvSc4OUNVVpT3ByLhxX8QUcy5bdSiDZxoYC8wzV+y942ax8Tqr5Az8TprwRgcj9BH9nlhUOkwIhj6wf7GfQEpiJehLianufGJQgRF6ouxg0q3qoRc8y4FlH7C+9nxiMugbhhlrjRGtLAw9BkJb+ofNTQ5qCi85LMBJYp9qPbXiXay5vui//3N4ylcs0HeAB92ImUnOUnKc5L+QCTvHIS/nyXFm6Sx6DZhJsiMjsvYZ24EBBoZymSCBJjDCMEOeaDp5D4SPbiNAHioAAiliATyHxpcIynAAKRaf51O+AFQI3IjrSWDqzgXYSghYyMbidNSWwUxxS+OzmP3f5gJKKgvGAC0IYFQr2rHYLWaLG3jBINg4bSk2iY2PwVDfD3/4w2JeMVZAhtgnr0TOh/AOxsIRPRvNvGNmgAb9AF6AEmO0kTAmWA4nj9QBhAAegI6xk5GN6LQUsKZN+SsSdAFYa4Y8gFfryXuUliJ8Q8EAger1TFeVjgAjS3934fjJ1kObhmRtvfmfd+LP190Wz774chyx745x7CF7dNWQG6rdRgcjdI2QO+BBp2FO7UsXKGI07GX7L31buOeBAiEXeW4cnkowYgGxB0IanBV2AXsurEOv2N+KPBMOFYZU+KIy7GFf0BdYdcw98MCBo2PpRLYI4PEZh63W0y7albPGqaJf0p1P5UsxgS4gTCoCwKFdsgBSsODp5mtRAixS+V39A3LoNQCsDOSE0B1o8HfySzqvoTZdM4NtAiNAhVwBYMLC4U0DFMAJdoRXzctmnCRoovKxJBac3xarRCeTkwpvF/I2OT2liH3yGiy+1nwPg4VpA9kwPIKWCmCCFZCIi0Yth6wYbu0DGZMq5gRQEG4zD8CIuZLXYx61wfPAQOgfgJAYKe3xajwPgEgslS+U8ogwHECJNvzYVD6jpJyxp9T8uGofBUw5AToUgRNOFAt2JV18l4Cc8SQgzNPhOVSGolqSXXt+3hFgZLP1Vo8/nX3817r56WefxdJrbRsvvjQq3nvuzug39deT+tpzbOr64osvi7XVt2/3+Obr9u5vo4MROp4+45DQA3QCMCJcytHgYKQrEqwXz3NK6B7GFJDAXKQjq2l9MsCAhDAMRwYzglWm04Q4FDlqQA9WBBBKp1VSHeqUy4KBAVakESgcY6wFxxBjLByU7jep3B/VTg3SfXQKvYqJ5mQDVRj9VNgsrIuxY6wBqsSic+zYRLISuqcLhehTEYonSwyTk6mJcfE+1li7wjV0JhlKA6hkdehrutFnjfK9Y01gBFJLWdEoK3dg8ODF9BkoCxQtT5AWmYvA/BCqv8lRcAqEwYE2LRKLyIJiiKDbdBx0UjkjJpRhLE9+WxUw9qY5FqItdWItjB26lf+QjhMDGBZ95YZMx8C0le5WaY7lKD+b+oZF4ZXYxGWZYTzcDHvTTTcVeTypJAbH/12khuGw2W16cwiMmCubjnwBSRuZh+Dv5i4pI+gfAMWApGx185jawJYBpcI+PCv0I2UB0FJUlIccEqEZINWmNB5rRj6JvumjPgC51od1Y8NSDurG3lh/FFpHlVeHXxezrbpa9K343qLUXmeCEW3ud9Qpcdr5V8TIB4fFPHPPXnTjo4/HxtG/PieuHH57vPX2uzF4xoGx3hrLx3GH7hEzTD8h/q48/tRzxfuPPflsDJi+f+y85Qaxzqo/iu+vs31cc8HJseFaK8WNdzwQa2+zb9zxl9/H8adfGPc+8kQ8eN0fYqnFF665nYuvvCFOv+BPMeKVUfHll+OL8NK+O28R22w8IbdIqeWZd//7fvzspLNi+K33xeh334sZZ5g+1lzpB3H8oXvGLDPPVNQzqf62x5roTDDyxwMPiPUPOTQGDBrUHl2f7DroK7rBvmRQhSQYQUACo0l/C50ymCnplM5jeOkBoMI+9U5lfh9daW9jR+kDtkF7nAv6RsHMJjAi2Z2j5710e6vn6SDvC8MACPQMvUJ3sDnaxcDrJ0e6smBogYJyGAXI4gwBGXSW+jng8jsSA0SvpmsjvKvtpIPTaUt9pd8ANIm2CVBgbOgtup79SXWSE7ZXaF279KqQPNtQBiPpFBFHTj5MtXFN9uTXYQVNYISgIdx04oEHyysW+5f/IMHIyRNImbcNnJgAxigZMgDEMxYKiksSE6/cyRWhGnSaRSm8M6ligfOey0eA2yI7YETuS+UdJSmHo7k603XslaAJ5Yiu4zXYABI3yQM4sYAqx5WOy2lPTgV5WuRYpcpikVq0iZlAAzYHRtRL7hKqPJ8KYGgOsBhkDdXbeBQOJYIRcbwauOFxYC7UQRE4IQUooBBtNhvdWLFb5iFd/AOQAh/oVX2QQEbOlBkGBNoHZK0XffnlL39ZeF7q5QEBN9aDjHR/M27ARHtAh/rI1Hve15Yjhh1Vbtty02LuvrXOevGtoRt/DZR0NhhZZdPd496Hn4iPRt4XU005ZcFcrLTxrvGPZ16IXxy4Wyyx6ILxz+dHxlEnnx3fnmv2eGj4hQWr8c57Y2L+Hw4t/n3iEXvH7LMMLkDNx2M/iXseeiKG//G3sc5qP4rb73s0Vttsj1h5yNIx52zfjNWWXzZWX+H7BaippZ3rb7sv1t1u/yLMtOFaK8b48RFX33BHXHDFX5sATy3PfPb557HMWtvFyH+9FsccvHt8b5EFihDV4SeeGTPPNDD+cfsVMfU3pmq2vzMNbP0JimprqDPByPCN1ovevXrHGwNnik2OO6HLQQngbw9yHCSI0gHJYbRXgYGUM+F0jUKXAiN0BAcWe1DNuaRD3SXFwcViACWeS0ynPUcvYVs5dhJdE0iROAsQaT85pp5XJ51IhzDedJLf2AZOEMa4kmGgVxT2ih72w0YBC4y88A4wwrnjNNHT6hCS8RmnDRgRniqDkfSN75gh4Sx61liUlC9JZj5L4Ep/2Qx9ohONx6lTDm5l8iw9qE42x7ONUCa69Ixhs0AYS0czITb5AhAjQ2YhWBQ8VoZKbgQDBpV618IVY0uJnLxdaFcGtfCNOFpLYMTEMaryCyDM1habxCJAoznRYrOke0Sg+3RVO9QvnMDQViaTQuEMvgWZ7kCxKSxYdCIDarFYvE6ToNKMC32p/xausaMR05l7bABqEChj0KuBEcyFhZyubvdumRlJ4M7mc7wOaqZQkpzMAbrRgudBQOW8CDFRm0dYCfgQTrGJyIMCoozMI1lQILwAG1IYxYb0N+AUvYk9AWYoJKdt5IL4kWDm78AW5gYd632KRtEPseJ07A2NKcyjbpsUy6MvFJx1JV9F0qu6Ky9Kau2amNTzr954fbx46cUxPsZHr+gVc6+9bnx76MbR5/+Zko4AIxiKc08+oqlbRc7I6Hfi3EuvjTMv/Evss9PmcdoxBxafX33DnbHxLgfH5WcdF1ts8NWXNf75ultj890Oj6vO+1VstPbKBfDAitx8+e9ijRWXK96VMPvd1baIZ154uQmM3P3g4wXoWPEHS8ZdV33lFNTazj4/OznOueSa+OSVByZSoKeed3ksttB8Bcip5ZlLrroxtt3nqLj0jGNiq6FrNcnimhvvjI12Pjgu+M1RsePm60Vz/W2vNdCZYOTiQw6KGUa8MMFg9op4c+Cg2PS4E2L6LmJK7DsJ5MA+Xc8JSPf7CJkw1nQn/ZFOwMgnTADEfhbeqWSE09wAFHQOm8DLZ8zlpjnBiBVR2Ip04aNQjv3OnmiPHvS7pTA/Q45h4AxVhjSwEE5NcpjpRI50OppLXxojfcPg01kcIvNDF4kQkFElGNEGx9179LC26Trtk4UrFMiV7XN7OLupHfucbSGPFHKi3x13Tv32DNtFj7MX6ctba82Haa990RX1VL2B1SKw6NBUjCsP2+IkQIIyuRYo7xgoYfSxIUAH798C451byAy9JEpoEKABBgAdiLPy1AkBWHwMvXpQhbUUgMHCZwhtKkbWgoIsASAxS4vEBjDJFqHLyOTJ6D/DbPGlYvEYg+Nr+gPVW5QYEONSl0Wk/5Bviokav0VDZpA6AAN1AxEACOPNwJbBiH6r28bGVhi7/0PoZKxf2kQpYjDUIdHUQgUUxRVT6Me82QjAmMQu/7cZGHiUII/DZ94XEjEOMiOvcmw1/ZsSSCGalMCL9QCy/AAwNpF/U2iAS0L85A94pKNrgE6qS3/JlRyAmtR//SI3jI33zBNZdvRGvGuHbeOzjz4svPzeU/aNXuN7xdzrTAAlIy7rFwuu/XXwWMu6rHxmUqdpPDtwwHQF44ApSDkcux58fJx/+bB455nbY+rSxVBjP/lfDF501dh1m43izOMPia33OjIAlLEvPRBTTNG3qemTzvxjHHrc774GRn591H5xwG5f3d9Sazu//v0lcdAxp8Xh++wYP911qyK0UllqeWbH/X8ZF/75uvjgxXuj/7Rf0euf/O/TmGaeIbHdpuvEhb89ugmMVPa3LfKv9k5nghHtX7PBujHNFH2LtTZu/ISrwd+csWtACf2ApXQKsrLYv/QX55QeT2AEo4Apx7QKodivzV3BQDfQW/QXx85zHAtghA1Rt2RYIICedvyXveCYsC3a8FlLYETfMamcqsq7SLAyHB8Om1MscuSEjOnCxCqzXcYHKBibfvuc3k93MPld1pHsGDuBCTYW+XX67xnOND1Ob2GY2Ex6lkPGkeQkSnpVJyc13e6dQvUcPn0WHmMzsdyN8G3sVcEIIfHK03W14m6SG6HWcrFIGHjoM53XJnRUWKUB4fFDnRCihcmIM+4mJoUtvAf0MJ42QPKqmzudkvqiLT82lz4KoRgDCtJGwwYAHihDi9Hn+oG6A0ZQlJAt7x8IYTgZSeED43FqxuLRv7QgASrHUAEei5lBV7e+SuIEUBh+jASAZCGi5FCEfqdsbOEMfXf8lfGWi5GuEbaxsB9kbHNaoDYX4MLDABbNS7ollVEHHD1LkaTYo7HZ1MAC5WA+tdGW4l0bww9520D6aRND+LUAB/3yHjDjKHm6oVV/0mflvt2ySecef2Mo+vTtE4OWWjq+MeDQdgcjWIkj9t2paYhyPA47/oz43XEHx147bDrRtMjvkDfRXFl/jRVi2IWnxOqb7xlPPTfX6W9oAAAUPElEQVQi3nry1okeTUxDCtMkpuGyM4+NLTdcs+nZWtsBqPY8/KQCIHHwl/7ud2LtVYbETlus35TnUcsza2yxVzzy93/GmOe/uj05dWbGhVcu6sXyNNfftqzd5sDIq7e07Zuuq63V1vbLWvty/Jcxqnfv2O3Ka1v7+mQ9T89hezl+1QodaW/SWen7aegVDlD6GpCW2Gv7nJGn48mLzqX7setSAnj+9Cp9RG/SH5gL7AsbxEFp7pLMcp+xKnQsVjiFatINqpzNxMqwAVIIMO8p7JJCQhiadHO4I8cJXFSTTbqXJLFCQBZbpu0ywEvgiC3ADmF8fS6kr30nEjE2mHR9Z6c4ofL62BChH/U2wtXxzX5RXtlAMNgWX3PXckNywhotZf3KH0mJPpCskAdjanECNpgFCU3iZIy1idCmBYnaqiwWtgmTu8AQp9tV0WY2GnrNgtcm5I3NkLCpDYsExSaHAjpn8LFB0LD+GI9NIo7pPRRc+Rx6iq2mC8/0TbtkAcAk2lDoAhjRnjYkfQlZQb3CPQCMTQ9opO97EK6RdOXzlKXttzHwJlJsk5dijPog/ENJpKuKy7ICyPQXkzO5i7p8PXy5jUR5TpZ27KKXy8xIn4JV6FhmpPI0DSUtyXTkK6/Fiw9cOxHbsM62+8Xt9z4ad19dPc9KouqC885d5IE8/fzIr4GRYTffHRvueODXmJErzz2puM8klVrbSc+Pev2tGH7rvXHrPQ/HLXc/XOR33HDpafGDpRZrqnNSz6y55d5xz0OPxyevPPi1WR+40Mqx7BKLxE2Xnd4ERir7215LpSuZkS/G87Yj3pxpYmYkGbjERjJwlQmOtd6pUZZT5cmS5HQx+s0VDKUwPb3o38AIY9kcG1KtnnJyPl1H53H85ITRWQnUpRARA81wAyVsQS13QSVjj63hUCp0PxaEg6YN+pvjmY4sV/bV5ww/hxajAbTUUpJDRqfT08CGEHS5AFl0L7DDAWWTjJeTyOnmmGFZhIbYMVEB49YnLAwHVh2Va6Gl/rUVMHuvFseypfZb83m7fGtvrQP2HCQMRfKkedfAhMUClUo2FQZgNBWCF+ND/ZevPU8DFMpghCF3TIWz4xgM9aXi6nYgw+JiqG1A7EVarFgYYQFACutgkQjPCC357Uf8LzEjkxKutixIQEb+jAxtNFuaVBsQRQmdO5sPSKDzjE+fABYgDYPhSHV54alH6IVHYUNB0fJUhEj8Wz4HtF1tAQEjQj+YlMkFI61ZXN3h2VdvuD5evOySiPFfRPTqXeSMlBNZOyJnpNrR3gf+9mQMWX+nIkxzzq8ObxLd7oeeEGdffHW888wdVUMi6UH5I9fedFeMffmB6NOnd9P7KWRSyYxUGvda26k2p6+98Z9Y5sfbxuILz1+wGbU8s/MBxxRJr5XjknDbf77li3yR8085skeBkYsPPihmGPli9O41IUNJeKZaIis9xaHh2GAOGSXOVgp70GGcC/qUcadXsAfyr/w/XSGQQtdYCU4dfavOdLKETuAscQibK9rEVgMQvHYJ5epoa6GfsLMcTLo7jaNcH8ADCAhJY7VbOn3pXeFcoXfPChlzboERR37L49M2EFBtzOQlPIMtEhrRdq2Frk6J/ELrUhcqSwJl5Xur/Nu8pBM/nFJhKnZCPzHu7ImDJEASBon81UWXm0Nzby14Frizdsw359b/U+TAOuLMqs9aSSDUu8lZZve8y+ms/IblWmXR1ufaBYy0tnEMCQFBoRaeBZNihYy5EISCIbBxIHP0VmUBRhhnSNcEQu/ehSLLYETMTmiGgG3MdK2wPBbUHiBgYgEh4MQEYoPUbfK8ayG09DXWwjqUAZRrfJ6vzIRWt7PpckfQo2KuKW5qgdkEWJj0PUFpHDYYqhRoS1nbwIg8HmflATJAphoYwZxA5BihfP3wxKuonk7TSFS99qa74283XVycmlFSmOWUo/cvcjRS+febo+Pok8+Og/fcrjhae+IZFxWhnvuGnR9DlpnwHVPCJUussVU8/dzIFpmRWts54sSzYrZvDoo9tt9kIkGuMPQnxcmdx26+JGp5JiXg/v7Ew2K3bb+63+FPf701ttj98KaE3RSm6QnMyPCh6xWAol5O0zBadElK8K/Ur/rKM+foSHr3/3RfEAPos0mdePR8ZXIro4vxoNcZe05bYpnT99twHDmA9C+9y+C2VDhkUgHUm3Sc+px8TKfx6EYAR4IsZ7Bc0sEHwM6/5Z+4pG1SJY2P3iYXuty7mJFyDqI6PIvpVr/cuXRIgL1ia4wXw6/vmCc2yTOccY47QNhS5KElGdX7510CRrAhDLWFbHHy+CFWC7PMsqDrAAm5H4wz5GeBmSTvYRAkHslFYeT9X3Y3FiEVwCdNauVkiNNhXCwCiwQ48r48D+hQaAgQsenSWfFJTWg5hwKI0a5FVFkAEX2Wb6KtxMx4DqhANVZ+D0P6csL0bbqelWdi40qSxf7IL6kGRsoht3pfkJ3dv3q6Z+TlV1+PhZbfOJZcbKF44LoLCu/H0d7lN9w5/vbks3HoXtsXQOP1t0bHCadfFB+NHRvP3nNlDJiuf2AnFhgyNAbNOEMcf9iexW8nbAASoZSWmJFa2znwl6cWd4wctPu28cOlFy/2610PPha/Oeeyol19rOUZ/Vpu3R2K47wSdhdbeL546tkRxZFl4Orh6y8qEnF7Ehipt3tG6Cc5as3dcCwP0CkUzpW8D0VIAQNNd/OwAYBqIRuG02kS4ECIH+DxHN2GsUj3Iwlr0K+MuTCFfwMKdDmQwknVT7ofKMEipNBOWVfQhXIv2JUUwtIemyL0QQfKaRP+EPqpHDOmBvvg3hB956S2FCKSVuBUjX5izoEcYMKhgXSJG/ZBnxxicPIysSfCR/oJKKVvdU9hGH2Vj+h6C+w6tr4lR7iz9WZHtNclYASNCA2inBhjd1RIWLIAy8Vm4NWLLQIHaCOLGECRNGrC/d8JGXWaQOCjjCC1Y7FUy0b2dwAEVQk8aIun4Nhv+jI3dbalpHhvNTSLnnT9MiYHDVsGEDa5BV3JYAghOW1THocNJRPbOxglwKmno+e2zMXkvNNZYZrUR4b8lLMvLU6SbL/ZusWfP/zIpWdnF8d8HQF2gmWVIcsUl57NNfssTcO74/5H44CfnxrPjXilSCbdc/tNYpEF540fb71PXH/xqbH2qkMmadxraYei/NVZF8elV98Y/3rtzSIkNO/cc8QuW20Yu24ztFCwtTyj0/99/4OCRZHX8va7/42ZZ5ox1l9zhTj2kN2bLnPrSWBkctZhR7zLuDO6mNp0JUEZWAAech+AkXTPiHwGOpHO8hswqXa0lx6i39UNZDjiq24GXEI9fe09+pauPfDAA4tbWukwxhdT7XOMB0YbWNEmhgYLXj7dgo3m4DkQUak3hdmdOgRqMOacWqdqgIGUF6FeDAVgwDFVgCw2pnypZJoDa5xTKPcPq02O8vewLkCDEI1wFlZDIiwnVf85nX7L/ROGArz0CZCp1NucTyEj6QeVLHlHrIV6qLNLwAijDySkmJQcivRdBmWhQMgSmeQ8COUAGkIhULaFIqTiWJUFD30L21RLdJ2UoOV5pG+vTaDA4se4COkkj6A9J8v9Jk4gAWG1JglhUfTT5kwFSMMq5dJxEmhPMNJxvWy+Zjehbrfv0XHPNefG8t9foiu6UNdtdnYCaz0JI116hgEQBsfKctBSQqZwg1AJg5luAZWozrgLI8hXYzCbu/QMyGBUhS8AAPUCAUCHdziVDC1GVzgbo81gYzOAI4CDQafzgRaOF2eMEyd/Rf/paKCHYwoAVOpT9oCjSW+mUDvmIt2VQr87iqstwCGBAkw51oYtKCfgmj/PAGnC5hxZTI2rFuSaaB9gMxbsNsACdCSQLlSOfeGMAmZyKKtd1qZdzAibVgsrX0/rqq196RIwYuL9JBTbHBgxqagt6B3wgKYZcn+XJ+E9/5a4pEDfznu3ppTvv2jNe5PzrAQ1wMrirbVA+MBI+sZL7/E4nNTJpeMk0F3AiBDPYSecURzXddw3FReLXTHslvjPU7cV95jkMrEEGhmM0MF0CIaD7mRA05d0YgrSl8hhP5JBFDLBWAMPTvVhEqqddsEcOCkCWHA+hUoYfnoMS6FwSIUt1M/AC9+r3wEGtkH/5MQBRI66yv/wvTSepTsdeJAnpx9YY8CgEoy4ywigSbegahdzoT5girNLh3Jw0ykcz+gHoCVFwOfklFgjbTjt6TdghenG9pCXvrFT6XABsFH+ojwgCnDBapMJGTVXKk8/9fS92yVgpFKozYEREy7JSawPGIEg5XmkxCa3fjqbjtpTUHVOmnSHot/YnVqLTSzeWmZGan03P9d2CXQXMPL55+NikZU2jffGfFDkYMwx68xx272PFPkdvqOmfONr26XR895sZDCSjC6dyfAy5hgB+tRdRhL8hWiqgREsA8dPTly6lj2dwsCeODGJxRC6oOvkVgAlcjbSFQkMvpyUdOkYR0t4BKOQTpxow+kRIRgsMJZEqAbTwBZgSVz06PlyvkhaqZhzz5dzLtiPdLeVO5mqHZlNJ24AtXSJZQpHeR6rIx8GmMGSGEdqgzyAO2Mpf2svNsjzTjViPtI4e96uatuI6gKMyIcQsqlmnIUzUFsWHUNs8fsbChD1112Li9/SEeNaxuCoF4RfZkZqeS8/M3kS6C5gxCglsTpRc/t9jxSgxPfTbL3Rj+PI/Xae6FbWyZNIz3q70cFI+noGuQ8ML+ZB0qSLwoQJAAa6NxnaxIxgLBz35SBil4ED1xEo2AjMCkMvqVSCvR8sihy5FAph3AENRhpLUQ6HpH9jHfwIbWBUsAXpfRcwpm+Ub+7elWq5e9qVC5Jud27uXWN1F5XTLcLkKX8EmOIoe9/fABEAI5V02Zov/Ezfs+Mz7QpZCTkZS1vuiulZu2/i0dQFGJF8ia4q02Spm1C1rGynXGRve87zWAJUWHctNr1L22otKEnx1FpzTGqtNz83aQl0JzCS57L1Emh0MMJAAgocPWDE/RsYZuDAfU1CFEIaCQDw+gEDOpmh9RUVcko4SRgIRfhFfRgX92akE5Lp1tLyLAmPYFSqXTngPZ9rU15IZZInIOKZjmKLgSRhK0yKUBA2Jd195VoGAK0a2PEeG8XhlMuSxuZZwMZJ0PTVGK1fsT33jboHI+nbYiVZOf5r4THM4paykLtrsemBq1qLJCx3jOTSuRLIYKRz5d3ZrTU6GGEg5Tj4DVjQqUAEfeP2aXkQwEgq6VIxjAhm2tFgBw3o5nRbNNaAsZWg7+TIpIq26fTmTgGmq+DLt1+n+tJ31nRkgie5CKlgPjA0+kkeTvw0d2eTd+h3hyMkoJYdSH0GYvKpx6+viroHI6nL6DxI3cKFwGV3C1vkkiXQkRLIYKQjpdv1dTc6GDEDjCenT7ImxtlX2gsxVAMjnsWE0McYEidugA8hGMBFfgkjDKxIOJWw2VKp9QbvlurpyM8lsMr/MG6/hakmBSgwTg5XkGcGHrXNTF2AEcdyocxJIVxIUwayOKTYJAosg5HaJjk/1XYJZDDSdtl1hzczGJkwS/IcfDEcIyuBVXhEuCaFacpzycjKYRMml1Phu1aEL+T2OcrriLC8CPWl+0m6w1qYVB/JJ90BhelgryaV8wFgkRMblcFIbbNfF2DERJvYSeVDOMvtAhi0INSZwUhtE5yfmjwJZDAyefKr97czGJkwQ4ynO0DkODglQsc6JYLhkNBaCUbkUbj3w0Vh6c4OhxBcHe9bzuWTON7bqDluGYy0fufXBRippdvidu4YQQdmMFKLxPIz7SGBDEbaQ4r1W0cGI1/NjVMw7jJK+Rm+E4WuLZ8U8XQ6VOBECOAhrJO+pkJOiQRWx3obFYgkcJeZkdbt+24DRixwm8BlO1CnM+aO9+aSJdCREshgpCOl2/V1ZzDy1RzIE3H3RQIRt9xyS3HlAoBSLu4RkaAq/FBLiL3rZ7nze5CZkdbLvNuAEQhdzC5dS9z6oeY3sgRaL4EMRlovs+70RgYjX81W5Y2f6VvVJ+X0dYfk065aj1gigC3fJ1LbDHQbMFLbcPJTWQLtK4EMRtpXnvVWWwYjzc+IY6huZHWnSC6tl0AGaq2TWQYjrZNXfrrBJJDBSM+e8AxGmp9fbDSvPnv2PXsP1MvoMhipl5nI/ahLCWQwUpfT0m6dymCk3USZK8oSmCwJZDAyWeLLL/d0CWQw0rNnOIORnj2/eXTdRwIZjHSfuco97QIJvHjRdDHPKtdFn779u6D13GRHSuCLcR/GS3esF/Nv/0FHNpPrzhLIEqhBAhmM1CCk/EjjSuDVYcvEoHl3iX4zLtG4QuihIx/77hPx9sjzYq4NHu2hI8zDyhLoPhLIYKT7zFXuaRdI4O3Hj4zx74+KwQvt0wWt5yY7UgKjnzs9ek0/Zwxa8piObCbXnSWQJVCDBDIYqUFI+ZHGlcCnY16IV4ctFfOsMjyHanrQMpgQolk35trgsZhqwAI9aGR5KFkC3VMCGYx0z3nLve5ECYx+aJ/44sM3YpZFD+vEVnNTHSmBN58+Ifr0nzUGL3d6RzaT684SyBKoUQIZjNQoqPxYY0tA7sg0A5eKmebbqbEF0QNG/86IC+Lj9x7LuSI9YC7zEHqOBDIY6TlzmUfSgRIYN/aNeP3WDWLKfnPG4IX2yiGbDpR1R1UtNDP6uTPis7GjYrbVh0XffrN2VFO53iyBLIFWSiCDkVYKLD/e2BIQshnzwoUxYM6hMe3gITHV9PNlYFLHSwIA+fT9EfHR6PtjzKhrYsACO+TQTB3PV+5a40ogg5HGnfs88jZKQFLrBy9dGmNfuyU+HfN8fDnuwzbWlF/raAn07ts/phqwYPSbY42Ybp6tc7JqRws8158l0EYJZDDSRsHl17IEsgSyBLIEsgSyBNpHAv8Hg/MDNt8gfV4AAAAASUVORK5CYII=)\n", + "\n", + "The problem is very hard because of the huge variety of disturbances that might corrupt speech signals.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E-WDlb5pytsF" + }, + "source": [ + "There are different ways to approach the problem. Nowadays, one of the most popular technique is masked-based speech enhancement:\n", + "\n", + "![SpeechBrain-Page-5 (2).png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAowAAABjCAYAAAARvlGoAAAIGHRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDYtMTNUMTQlM0EyNCUzQTAyLjM2OVolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY4MS4wLjQwNDQuMTIyJTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE0LjcuNyUyMiUyMGV0YWclM0QlMjJhSU9kc1NtN3JVZDJzWkFud2hfSyUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjJzNDhodlVxZDdlcjlZQ2Q2VUlObyUyMiUzRTFWaGJWJTJCbzRGUDQxUHRaRjclMkJXUnE2QndCQyUyQkRuQmRYbXFadEpEU1lCbHI4OVpQU2xMWmMxSEU1TXg0ZmxIelpTZmIxMjFzdTlNNHl2V0pnRlk2cGg4aUYxdkRTQzcxN29XbE4zUkMlMkZNMkNiQTZacDVVREFzSmREYWduYzR6Y2t3WVpFMTloRGNVMlFVMG80WHRWQlNLTUlRVjdEQUdNMHFZdjVsTlJmWFlFQUhRSDNFSkJqZElZOUh1YW9ZelpLZklCd0VCWXZxdzI1c3dTRnNBVGlFSGcwcVVCNjcwTHZNRXA1JTJGbW1aZGhESmZGZjRKVCUyRlhQN083VjR5aGlIJTJGbWdKWWYyQUN5bHJaSnZmaTJNRFpnZEwyU1lvaHhsSjV5TVhBTDhjYXhDdXJlTUpFUWlDNFJaMXNoSWklMkZTNVFtWkM1b3QxMG5wV2F2d2JGanhhbE5pUUFZejJOOWMyaXMlMkJTSk5QbTYlMkJmTU44aTRvRzJ1QXBrNW9XQXhTaCUyRjBYcGRaMkZwcjdtdk9PVlNmQXJrMzkxSnQlMkJhJTJCUXNxbkVWY1NxWDVMaUVTVUxRR3BYbE5vVlNBZWhiR0NJNDVZQklpUzVielNkQTNQYW1xZVl2dSUyQnI3Z3E5QlREZFRWRkI2N3JBTTN3a0tHZjF3d3ZnNXBmQ3puVjBsWnBMbGNxVTNGMWdSckdzVmpNWUUwbTVEd3J3bFlXQXExUFF1c3lvRFFnYUIwakpuS0ZpNlM0aEhRcDl0Z05HV2p3cjhTbGklMkY3Tm80NWFlSDQ5Zlp6M0FiVWYlMkJmcjVWJTJCJTJGM29QM2FuMCUyQk5OejRLSHR6UTZIYWkyNGRCQW0lMkJmUjRQYndPJTJCQzlRWWJsc0kzdW9ZZnpkR0xQbXROcmp2S3E5Mjh0NTlobWdEZlQ2JTJGR2c4VlVnZE5SJTJCS0xmM3VuYnpXeTRNTmxkWjJMMG5DdmM2U2VVekpMekhoTnF1aVYyVUJpaUZESThDVEZIOXlzQU16Z1JmTGZ6d2xLa1hGZmRIenRST2RVSzBkNnRFTU9xbDRoJTJCWENGcTgwU0ZXTjlRSWNiSEJJRThRWTV5U1JrUGFVQkZ3dlpLdEMwWUpQSlFkbU5EckVxWkVhVXI2YVlYeFBsV01qMVljMXAzb284SjZWQkMyZTVGM1hjZ2dpTHYyc0tCYlBzazc5MHQ1dG5pMGl5VzNiUzYyZDFXVnhQRXNQQUhZaExNRGN1c2VUOU93bmk2WmxCS1dhZER4eEFCSEclMkZxVjUwS2hEdzZvYUxTSzZUWU5Hc2hQd3dsQnl4QVhCNDZpT1plaTA4RjJQd1JBUzREV2NadVhndmRIeHBJbzFFUHBQbTVRTFlZQTl1SzJDb1RpTTglMkZzeCUyQk42dm5TJTJGMEQ4M0g2aGRabFd1VUpmVFRMcmJKJTJCTlZ5QTYzekRqWGI1azdWSjFSTzg1NHVreGlCZGlzeGR6TEVZclRLTUtjZWMzbiUyQkh1TW1IVmp5bjhnSDA4Z0J3JTJGWTUlMkJZTTdwQWxSMExPc2oxeFE0Z09JZ0VCbEcwUzh2djZBSWlLQjkyQWUxZjZnTDJqeUNKYzEzZ2ElMkZYdTVGQmVnTjlEQVo5MXAlMkZPSGpaMjJhVUJvT1U0JTJCZGpvSXVJcnQlMkJhYWlXcXFtQXgwaTElMkZhJTJCTW5iYW54czdUMHluWHg4N0YlMkZid1Y0Q0hrMm5hYVFHbDVhakRqWFk3bUxVbkR3JTJCdmtUY2dOalFZTVdiTm9lWnpwY2VaTjd1WlglMkZlQ09IaEF5eVFKcjF0S2JLZkxxTjN1TDFid0RiaDNUeXFiUGwyNVlPWTklMkJTbkJiS3pZYVJzJTJCT1d3eldnJTJGZkptUSUyQldHeCUyQnglMkI1bSUyRkt6RW0lMkZrV2FPTzRIenZEd2ZUSGpwMyUyRkE4RTBmd1RCSEJGNlklMkZmelpZb3BtbktWWSUyRlQlMkZpR09LdDk5emFaWWVHQW9IQVJlUkNZM3hyb1hxWFpkeUxvcEZieGNDTGRuUE9EMUlzemdFcSUyQnl5WlJwazMlMkZSYyUyQm9RbUdYbnhTJTJGcE5iZTl3RWphUHM3S1lZYXRKcWYzenBNeiUyQko5MSUyRjhaSVBPZVczVjNydmJ3JTNEJTNEJTNDJTJGZGlhZ3JhbSUzRSUzQyUyRm14ZmlsZSUzRUdRr7oAACAASURBVHhe7d0HmJ1F9Qbwg/TeiRSlIwgivQuEJlIEERBpIiCg9I703hGkSVepAqKAgEKA0KsIUgQBIfQiLVRpf/7P74sTb252s3eTze69d888T54ku1+ZeefMnHfec2a+cb744osvIksikAgkAolAIpAIJAKJQCLQCQLjJGFM20gEEoFEIBFIBBKBRCARGBUCSRjTPhKBRCARSAQSgUQgEUgERolAEsY0kEQgEUgEEoFEIBFIBBKBJIxpA4lAIpAIJAKJQCKQCCQCo49AKoyjj13emQgkAolAIpAIJAKJQL9AIAljv+jmbGQikAgkAolAIpAIJAKjj0ASxtHHLu9MBIYjMHDgwLjlllsSkUSg3yOw3HLLxaBBg2KiiSbq91gkAIlAOyGQhLGdejPb0mcIjDPOOJFHmvYZ/PniJkJg4oknjrfffjsJYxP1SVYlEegJBJIw9gSK+Yx+j0ASxn5vAgnAfxFAGN95552YcMIJE5NEIBFoIwSSMLZRZ2ZT+g6BJIx9h32+ubkQSMLYXP2RtUkEegqBJIw9hWQ+p18jkISxX3d/Nr4GAbmLQ4cOTYUxrSIRaDMEkjC2WYdmc/oGgSSMfYN7vrX5EEjC2Hx9kjVKBHoCgSSMPYFiPqPfI5CEsd+bQALwXwSSMKYpJALtiUASxvbs12xVLyOQhLGXAc/XNS0CSRibtmuyYonAGCGQhHGM4MubE4FhCCRhTEtIBIYhgDC+++67McEEEyQkiUAi0EYIJGFso87MpvQdAkkY+w77fHNzIeA4nffeey8JY3N1S9YmERhjBJIwjjGE+YBEIBXGtIFEoCCQhDFtIRFoTwSSMLZnv2arehmBVBh7GfB8XdMikISxabsmK5YIjBECSRjHCL68OREYhkASxrSERGAYAkkY0xISgfZEIAlje/ZrtqqXEUjC2MuA5+uaFoEkjE3bNVmxRGCMEEjCOEbw5c2JQCqMaQOJQC0Cdkd/8MEHMf744ycwiUAi0EYIJGFso87MpvQdAqkw9h32+ebmQiAJY3P1R9YmEegpBJIw9hSS+Zx+jUASxn7d/dn4GgSSMKY5JALtiUASxvbs12xVLyOQhLGXAc/XNS0CSRibtmuyYonAGCGQhHGM4MubE4FhCCRhTEtIBIYhkIQxLSERaE8EkjC2Z79mq3oZgSSMvQx4vq5pEUAYP/zwwxhvvPGato5ZsUQgEeg+AkkYu49Z3pEIjIRAEsY0ikRgGAJ2R3/00UdJGNMgEoE2QyAJY5t1aDanbxBIwtg3uOdbmw+BJIzN1ydZo0SgJxBIwtgTKOYz+j0CSRj7vQkkAP9FIAljmkIi0J4IJGFsz37NVvUyAkkYexnwfF3TIpCEsWm7JiuWCIwRAkkYxwi+vDkRGIZAEsa0hERgGAJJGNMSEoH2RCAJY3v2a7aqlxFIwtjLgOfrmhYBu6M//vjjGHfccZu2jlmxRCAR6D4CSRi7j1nekQiMhEASxjSKRGAYAkkY0xISgfZEIAlje/ZrtqqXEUjC2MuA5+uaFoEkjE3bNVmxRGCMEEjCOEbw5c2JwDAEkjCmJSQCqTCmDSQC7YxAEsZ27t1sW68hkISx16DOFzU5AqkwNnkHZfUSgdFEIAnjaAKXtyUCtQg0E2F87a0P4/4nXovHn3s7Xnvzg/j408+zs5oUgQnHHzcGTDtpzDfr1LH4vANiwDSTNGlNG6+WzS6ffvppfOlLX2r8prwyEUgEmh6BJIxN30VZwVZAoFkI4+8HPx33PPZKLDDXjDHbzNPEdFNNGhNOkN/0bVYb+viTz+KNdz6IIS+9FY8+/UosNf+Msf7AuZq1ug3VKwljQzDlRYlAyyGQhLHluiwr3IwI9DVhHPr+x3HWnx6LKSebJJZZaPYkic1oJF3UCXm866FnY+j7H8Y2a88fU042YQu2IqrjdFJhbMmuy0onAqNEIAljGkgi0AMI9DVhPO6Sv8VMM0wdSyzw1R5oTT6iLxG479Hn4+XX3449f7hIX1ZjtN+dhHG0ocsbE4GmRiAJY1N3T1auVRDoS8IoDP3W+5/FwCXmbhW4sp5dIDD4vqdimsnGa8nwdBLGNO9EoD0RSMLYnv2areplBPqKMNrgctzFD8QW6yyRYehe7vOx+Trh6d9cdV/sufGiLbcRJgnj2LSMfHYi0HcIJGHsO+zzzW2EQF8RxmvuejbeePfzWHbh2dsIzWwKBO588NmYbopxY61lWqtv7Y7+/PPPq7NJsyQCiUD7INBShPFvf/tbTDPNNDHbbLO1Tw9kS9oCgb4ijHIXF19gtph5hinbAsdsxP8QeOn1oXH/o0NaLpcxCWPzWfFdd90VE044YSy66KLNV7msUcsg0BKE8f33349JJ500rrnmmvjqV78a3/zmN+OLL76oQM5VbMvYWltXtK8I4x6n3p7h6Da1rBKWPn6Hb7VUC5MwNl93nXnmmfHlL3851lxzzepb31kSgdFBoCkI47PPPhuTTTZZTD/99B224eGHH4755psvLrnkkoosLrjggvHQQw9VJHKeeeYZnXbnPYlAjyLQV4RxxxNviZ02bi1C0aPAt/nDTr749jhl1xVbqpVJGJuru4gru+yyS+U3+cyNNtqouSqYtWkZBJqCMN57770x3XTTxZxzztkhcH/9618rYz///PNjqqmmiu9973tx/PHHx0orrRSLLbZYy6mM//d//1fVuTvqqHPN3CeskKX5EEjC2Hx90g41SsLYDr3Yt22QT4owTjvttDH77LPHj370o76tUL69ZRFoasL4n//8JyaaaKJAKCmMRx11VEwyySSx5ZZbVgNg7bXXjk022aQ6KLaVyjPPPFO1Q4ig0fLvf/87Pvrooyokn6X5EEjC2Hx90g41akXC2FdjoR36u6fbQF18/fXXY++994533nkn5pprrkpsyZIIjA4CTU0Yn3jiiZh33nnj9ttvDyTrgQceqCT11VdfPX76059WZHGdddaJBRZYID755JMqN6MZvl/69ttvx9RTT91hf/zzn/+MDz/8sFJKrfYaLVRWbVxmmWUavSWv60UE+spJZki6Fzu5D17VCoTxpJNOip///OfVgt5CvoyFX/7yl7HPPvvEoYceGnvuuWcfoJevRBj/9a9/xb777hsEmK985Stx2mmnJTCJwGgh0BSE8YILLqhWPksvvfQIjUAQ7eq69NJL4/HHH4+33norJphggph11lnj9NNPj0033TSWWmqpWHnllePVV1+NKaecsiKN448//gjPMWg6Cv8iduVaOZSdXdddZP/0pz9V6mdHhVoqrKyu2tEowT3uuOOqHeJbbbXVSI8Vqm70Od1tS17fGAJJGEfG6fPPPov5Zpmo+sWxJ/861t1wsw7BvOHaP8YOW21Q/e7xF/8T4/ZwUv5tN18fW2+8Zpx5wVUxcNU1G+vQJrmqFQjjBx98UKUUGQMTTzxxNU8Lf1oYf/bZZ5XCZYGcpfcQKJtCvfEf//hH7L///lVfzDLLLPGrX/2q9yqSb2orBJqCMF544YVV/iICNdNMM1UADx06NCiM888/f7Vq9fPbbrutWiFZKSGQ66+/fkUgrZ5ee+21uOOOO2Luueeu8h1rC3XOc0xmtQVhfPfdd+M3v/lNHHTQQdX9CKrr3njjjZh88skrEvn88883tLnGgDRpXnfddRVhNGEKqdeSOXVxDfJn1f373/++IYNCGKeYYorYZptthpNfdYQT0iu8DYssfYNAEsbOCeP4408Qiy21XPz28hs67Jztt1w/Bg+6Nj779NMkjHUItQJhVGUqojnKiRalmI/M3cccc0zfDMp+8FZiAb9TP/fza6JxfA/CyEe++eabMcccc8Rvf/vbbuXPtzOMPSUStTNGtW3rU8LIqJGgojAK126xxRZV/eTsXXnllRX5oyT+4Ac/iBtuuCG+9rWvVcTOQPj6179eEbyDDz44nNH4hz/8oVrZ7rrrriMRRiur+pxBOR033nhjPPbYY7HffvtVE5tQNwXwwQcfrAahXdhUwW9/+9td2gTpH9mkjCKM999/fxUuryWq9913X7V5xbt32GGHeOqppxoavOr24osvxrHHHjv8eZTMl19+uVJYkV91pVxm6X0EkjB2ThiXWHr5+Ou9d8StDzwbA2aceYQL3x36TizzjZljgW8uEn+7/+4kjC1KGBHFGWaYocqzriWM1MWck8befETNRXr4vdpy9913x8ILL1z5skceeaRKGbjllltiueWWqwQN/rM7my7HXgv67slwI7oUdbzvatI6b+4zwog06ayyCt12220rooVEKULMci3k7O20007VyhWJ/OEPfxgUSQZPAdxuu+3isMMOq9RHBMqmEKSvkDQTGJJmZUWdrC3UuYsvvrgKmXi/HBzSPeKK0CkGWKOE0VE/QtvUT7mXV111VVUXK71SDFYrPeTv3HPPrVZ/tatDO9o6GsyHHHJIRS6POOKISomlsno+DG0I+vjjj6s/6667butYXxvVNAlj54Rxxz0OjDNPPiZ22uug2GaHvUa48NILzo7D9tsltthm5zjr1GNHIIyvvvJinHT0QXHXbTfF22+9ETN8eaZYdY11Y8fdD4hJJ5t8+HOuvOyCOP+cU2LIs09Xi7E55pwnNv/JTrHuBptW13QUkn7zjddj/e8sE5NNPnlcfOUtMfkUzbnQahWFEc5UrBNOOKHKtTanWbgfffTRbTTKm68pf/nLXyqfRzypLc4slgbAf/IdfOCgQYMqwYXoQGRAJgtp7G9Km/bWbj7t7+S5UcvudcJ46qmnVsatwxA/xrv55ptXoYurr766ImwzzjhjRRhNQKuttlr1t7MaFQePXnvttcPbt/XWW1fqIPIlJLLhhhtW1yNsV1xxRaXwUSRXWWWVamDdfPPNVXh6wIABcc8998QBBxxQEbCBAweGJG1nPd55553x5JNPVs6H4jlkyJCGFEaqJLnfYERmKaQIXi1hLGqoMydvvfXWShVF+ISVEVi4IKt2UVuxl6IeFNkNNtigIs3wcG8580woHjmuP2NLWNyzTeK19WjUQPK6xhBIwtg5Ydxj/yPjoQfujeeeeSquvfXhES7ceJ0VY6pppo355v9mnHL8ocMJo4XTmit8M4a+81bsvNfBMdMss8YjD90fp55wWKz1vY3iuFN/Wz1HKHvbzdaJH2z2k1j1OxZLX8T11/whLr/4vDjtvN9XBLOeMP7nPx/FZuutHG+8/lpces3tFRFt1tJKhPG9996rztK1cEUYLfBFkLKMPQQIEHzNt7414lmshBDCxPbbb19F4M4777wqGiXH/8ADD6z6h0jiXnPXCy+8EDPPPHO/UR75dv4a1/AnCWNjNtrrhHGzzTarDt+msFkdnXLKKfH973+/Mt5zzjknLrroolhjjTUqddGKCLnzs+eee65qEdIjyboUBHKFFVaoiJrQsrDzTTfdVJFS+YFyCBFGz/dvJMs5jnYxI6xIJpJmNSYPUi4jIifXkFpoF7azHjsLSZeVmTwSkr/jC2xMQe484+STTx6el6nO8haRZnXwuSb1907k0G5wK8DLL7+8yt9UZ4RXQao9U2i+ljAKqSOXCDGlFAFGIpFHKisV0qcUkVA4ZRk7CCRh7Jww7r7fETH7HPNUG1v+OOj+mP8bC1cXv/Tic7HS4nPFyWdfGk8+8egIhPGF556Jg/fZIdZZf9P47vc3Hv7wnX+yUdz4l6vj4SHvVcdpUSd/d/5Z8chz74+QK/ybs34Z8359wVhquYEjEMYVV1kjPOOeO2+J3/3p1phjrnnHjkH00FNbiTBqstAnlXH33XevIjZZxi4Cv/jFL+Ib3/hGrLrqqsNfhAxJYeKb7FLnX84666zKPxgza621VvU3kikCRlAoIWyiBQJVW7r6qhrf55r6zaZjt+X/e7r2OqcYGW70iD33iAhqK85QSxj9TunuWcm91d6+fE+HhPGMM86oQr1dFUaCvCFWjRa7oZEZHfX0009Xu7aQQSQRmbMyQqaEkIVbbYax6QRZ6qh897vfrQhiIZGIH5KG6FEikTBEkNLnvcLbPpNkNXXiiSdW7xPqRbKQUSFdX5Z56aWXqnfKb3BuFeWy3hgRPzu7Pct7jzzyyHDEBFXUzxBgpM8EWgzS/w1ez/vjH/9YqZG//vWvK8Kojg4jp3LKO/nJT35S1Qe+SLVBQUHceOONhyuMe+21V1UvZ1MauOqqLogkEqk+Vo9W+1aZ5bNQXU0CXfUnBajggfzrR6qssH/95qKuntUOv0/COGrCuOV2u8Yy35ilChPvd9gvqovPPPnoOPu04+OuR16qQta1CmNnNnHi0QfGr046Mu546PlKGTz39BPimEP3ju12/nlsud0uMdXUI+ZyeU6twnj/3bfHheedFr/9/aBYeLGlmt70Wo0wUhl//OMfV3Ma9SpLzyDQWchYuJkP4nNKMTcj7vyGiB0RgS+qJYx8hT9OGyEoiKoRZ6Rj1Z9WQrzgtzv7pKAolvr1RQTLe6WrEUaWX375yv+UuXhUqiGMpJqVjba1oXn+TASSz+THs/wPgeGEsdYgkY7ddtttJIZt9SHEgEjJf0CydJTdygCnYiErGDqS4vc60CSC7Fx//fUVsZF/V4r7GKJwsZwChAipc9ai93VVVlxxxUrZK0W9EE4qpk0oBoprKH/q429Ez6rMYHNkD3KoIDoMBRHSRu/XFnUR5jaQ4OSPZ1nFLb744tVgEoYxOOFhtSc0Y9KkqMrl0UZYIIqUU/cgzEidwc14KYtWjEilOiDRVoPqbzDaWQ0fA6SsiqweFYMFwVYHJPvRRx+tsEaKkUeDGrk0sVBWPdfPDAh1t2nGBOM9+rEkUZe+hqvnU2ldp96eqxx++OHVe21e8nyKJ5JvAlLKsT8Gqf6GK0VYGkLJveRcvKskyJev4Zjk9EmzlySMoyaM2+64d6UYChcje47OWXOFBWORJZaNw477VUUW6wnjoOuujAvOPTWefOKx+OD996px9/nnnwU7uu3BIfHlGWcJR/cc/PMd4/KLzq1s6xsLLRZUxA023jKmHzBMKSmEUXjaM1f5zjpx+q+vaHaTqurXKoTR2JVDPnjw4CrUZy4v87cxTQCwCUPqjw2BGapu3PzYvRQtc7452NxcTt4QbuaXaiNgxsfOO+9c+SBkcY899qjmfATIfZTFV155pfK5RA4bR4kuCy20UKU06p9asmVvgbnc3F9f1I1QQF1EPOuPdzO/u6ajextBoKvcSqSXwMLXOi5IapafEYmIGJ2RRtdIYxPR4ztLvfkdebcEM8Sx/sSVRurcztcMJ4zkWQajyAk0sMuOZCFQRugaHUKpouAxLkRjvfXWq5y6jnOYtkmDeubnSy65ZEjARcoQOOqafLqOis79zne+UxE3amPtWVLd6YRFFlmkmpzkcMgjVISoDTTKndCs3A7EUd5kbbHiQHZda8JjWAYWJfDss8+uSB4F0qAy2LQF+TSg5UdqKzJmYMuRRNqQLSqqZwjD1xJGgwwhNIA9g7FaoWs7dRGhQ8aKQRvc+oGRu9dAKQWhlOQszxHWyCTyT7lEWLUXNkhyOfZH3QwSBBU5W2KJJarnqytiL6wPSxMGtVKIHEb6VRtdg+Qi1P5mK94lt0b4XK5MOU8TsVdHRNUmpaL2siH97tOPFirsDhlFqqUT2CXf7CUJY9eEUR7jhmsuG2ddeHXMMGCmWHfVxeKSq26NRZdcdiTCePftN8cWG347lv7WSrHldrvFl2eaJcYbb9y44NzT46Jfnz6cMJa3vvzS8zH4hmvijlsGxe2Db4iJJp44zrrwT7HI4ksPJ4xI6tLLDayuOeWcy+Lba63X7GbV9ITRPGPci0rxCRaSZRFdPl5gfJs3nRphXFtUc8jGemefg236junFCpqfpTeZ+0XNpEhZYPvDN/ADcvlL4attHrX4t0dAREo+P1/Gj5jP+bgimiCcfBKCZcOmubiohd7Np5mzO/oGNT9FKCAQEGPqw9IWEp4xOudwejbewAd2RvzYFpWUX+WT+Hbt8TM8prMQdSGMbFT0rlwHO2F84gqfnIRxREMfThiRAYAjjZQ3hiVUa4CTpK0YhXT//ve/x+9+97tqkpA8K5SKGDJCIDNMZMa5icgHdVFuoU0aSJiVjE7prCAJOtEAGV3CSOEqBKv2XLDyTiQGkaPCISS1xe+QQIZfcjOQYSskpAiJtKJCjCiH6qiNfq/uhTAyYPfDAQFWJ5Oj0HktYWSofmdAUiCRR6tB7y/hce8yMdSWsmKHd20x0OVF2hUnj8hxQ0glYqjNCKNB6H0Gg77QHmqmvqEeOqrBznPk3qDxO6Uok4jtjjvuWN0nB1R/mSyQUwep23DknYiv9qm/1R9FkRqt7sij+iCDCKbVM4UCueVMPEfepv5oJD2iF+fvThc7o2uvY1L3Zv7SSzm4Ww4jhVFZbdmvx6KLLxPTTT8grrny0rj5vmHHStUrjPvsvFVc88ffxf1PvB4TT/K/UwYO3OtnVc5iURg7wu6Vl1+I9VdfOuadf8E495LrhhPGQ445LTbcZKvY6LvLx5Bnno5rBj840jE/Y9IXY+PeZlYYzRHOX+RgLf4oWo0USpe0IYtjPoQKlqVzBMzR/DDfILffXOkPUYdfsIBHGIvPc/3PfvazalHO95p/+YkS4eFv+APzv/tEvYg8fLrvTPMfRQHmP/EBQkFH36A25+lDz5ZCVf/1MoROX5fc+u70M/952WWXVRFD/lD7ahVM77YQkStbzmzmQwg36kTAcl9tVLC8X8RT9JHgwZeVcLt28GX2GuA8Fj+dheK705Z2uXY4YeSgSdVWKxQhq0GSrPAkIkgZQhT8TudYSVLNECidiBzoAB2MsftdSa6laHWnID0lL6I793XnWk4KmWGUXRVEl4EZRMiwfyOCFDGEpqyiEELH5Gg34lhKIaGIl3sRRsSsnsx6JiXP75FMz0HihMTlWtYXfUbF7KwISyCHVEJ1sKK0AtMOygCSb+VHmjcwDG4TEZJpwFF5TTjqYZXodyYboXPKLVIvnG4Fyx6QQgsOExgybdc5RZdjoc6axBw8blB6h/xTKq3JCx5Wz0La6sVmrGjlkCK+zV5SYRy5hzoijKefeERccv5ZMeVUU8cqq68Tu+x9SHVjPWG0QYbK+MCTbw5/8L9feyXW+O/O6cF//VfMPMus8YujDogBM84Um2zx0xEqsMm6A+OjDz+IP9xw30i7pJ979ulYZ5XFYsGFF4/fXHZ9U38lqVkJozleipGPHlgEjk4xLzn9QTi1fuE7Os9r13v4DD64RIz4GeTcnG2OFelBJIu6x6fZzOLcYqQHYTSX1wo15ivPMXe7lhDEhyPx5uWiCPIJyJMIHUW4viBjlDzvUj+Rq7JwLqF0m0CJDThEo7uR3StCqC7q7/+Eg9oj6Pgk5JD/onAW0aEIEPIxYYKLELZqc+uJQvwK34NwFuxgBGu/499FuxDWLMMQGE4YkSerA3K3kAHwydYApPQgidg8Nc11QpY2j3DuiCTjc33JX8PcEVA5DBJqW7kgxGWVIjzNOE10SE3J0dA+xuz/BhfiWF8ob+R1JLCj4j2MnTEzbgSTEmfgjE4xSAx8SqDnLbvsslW/Un4RQXkeVo5yEIWyKX+OXjDQ9DuFE7FTDwQTcdRGA4nyyFHYUe4ZiCJyiIxqO4wMRLvgEWrvlE9pU49FBpuBhwUGe3KfSUuhVphgEHXPdU2zlySMjRHGsjPaeLr+zsdi9jm/1iFhlLtoB/RPtt+zOhrn2X/9M84+9fhYabW1qvMaHdVj9/Rvzjq5OoNx65/tEYsusUw1Tu2A/vUZJ8Zu+x5eKZsdncPo2J39dtsm9jzgqOodzVqakTCa+zhrY1mUQDShI/VpVJjKYUZQpDdx+uY+5DPLyAjwB8L9yCHsRYnMnT7UQBk035ojkR/F9Yib8LLceMJBPWF0HZ8tfGvRz3eLFAphm/vNwQo1mDChnynJ9SFe440diCCKUiJ4hBB+kN/CF5BSvyN8NLqL2XvZCHGCDyjnFtd+fAMxRpSlxiGF3gsLtiniV76CJiJKja29V/0QUb4HSa4l21K6bFT1c/gVv5S2WUMYET+5awgfw7H6Q/oQSCojdYpahUDY1CFsSW1kcP5mrFaKOqcUqpHVkc5plyLPw8qO8XXnm5wM0wAyYKzauiomUOqazTvCEbXhTuTK+xHBURV9gsxJLdAPCKO+kmeqH73D6pCSp35IXFF2vc/K0/VIosR1+aslr5VSiDh6hz8Gld+ZpCS9m9T84QSEUkxOyKiJwM51EwuVshzeXsi29pTFChJmddcK6kMSxsYIo6ucgfjhhx/EFX/5nzperzD6TOCxh+0T1155abz/3rsx3wLfjL0OOCbm+trX40frr1od0r3HfkfED3+0XZx92nFx9e8vihdfeK4aX7PONmdsuOnWsdHmwz6j2dm3pH2S8JZB18Vl1905/KifrsZlb/++2QijRZ/8ZFEQRYQDiTFOGz1GB0GR425uQEYUPsXCtP4rXb2NdzO+jx/mcyzszdXmbYRR+o4Fu4V6OUJO/V1vcc8/IDzIHgWwHBdT2ogkIXtCzgQBBF4Ejd8RzlXktSOm1EX9XHYhl2d4ptxKSiZCKa1NITBZCBBYRMeEvsv5j/UYF99Wqz7yT+Z9vk/UjViF+FK2S8ErqJfaLjqH9JaIIYEEb4GVVDL+SspYKVLDYEmplX9blEv3i5B5r9A0vwZDaWn16iifyn/5Xf1mn2a0o56o03CFEZsuO6lIv84IpITJMUMiGKgQJCAZmcOs/QGqn8mZs7sXKbAiYCSMiEEhC1Yg5SiWUeUwahTyUtv5o9tQKmhnat7oPJP6p+1WIPLxylE+nBSjrx+QtSSinD3ZmVrYEeGgRlo5GsS1mFEOfYHmz3/+8wif4ipKqLY5DJxSaLAb9PoQYdRXBh18kX0D2SrOz/VdmYys0CwSKIllh5t+LO+gl3t30wAAH8xJREFUOpvkhbitfsn4JguLDhOVyUNOozC0hYRBqT1shiKhbuqoDhYi7IPdGLAmB8+mgrI/E1qzl7FFGK10TXadHVHSzDmMzd5nrVC/3iSMXdka54gwWETWflnEAhjZMAaM7c42OHDgZUOf68qmGP1AGUI8zEsIS1+VrjDo7XrxKeZvc6d5QPgVUUH2RG/4WKk8SE3ZqMIvIUfmasTPXOqe+hxr/sz8i+ibs/lv70P++QRFFKoQRpsYCSbuK1+JcT0/4X4hZySOLzD3m9/xAu8VbVRPglR9EY1C3GpDxogwUQER5Fc8n5Al37AoqfZUlGP13OvdxU+Wk07UlQ9Com3OKaSP8sm38Mc4QnkmnES2LIS8l++zOOK/awlj2R1u0SM/s6N29bat9Mb7hhNGQFuFlJ2slCATgxWffDwhBzuKrWaoVgikDkAYCtlAEl3DUEjFkmqpW3YkC0uTpRmlUPaoCgOnQtUevzM6YCCMcjHrz3AsOYWdPbN8mq+e2FoZk72t7AwAEx48EEgGVN+ucgyC98nxgycjp87WF0bKaIvCR07vjDB6Ltwl+Lq+FORdH1ADYW3lZeCZcEwilEV5RwioyZkC6BkmAqEhZM6EabAZ6NpKJdYP5WBUiwYEkTKgDhKa4WwyoyRakVlssBd1kRRvdey5VqkIKHuwi83PtBt59D7E0PNg6j73e5ejI5q9jC3CaCJkh3JqrJTriWMSxma3jDGrX28Sxq5szQY387nNcB0Vi0TpTMhgUQ7LdRwwsmg8d/a5QHOIKIacub4qXWHQ2/WygDZPwtamEvN0EV7MpwhbyeErcwN/hzCax2FNhetIpOHnnIdMKKIGIo6uK1EdPo3v0CcEEptjCpG02QZp9f4i8LjeM/kt87w5kd/wN9WOmCA6Vq/UmfsVnIKv9AePQOgQMaFshJFIQnzgSz3DIsPvLDL0m1B8LWG0kFFHCqvQPV+oLUrZYwEzvysEWH35dXXit7SHGk4oqt9ww1d5Jl7g2v5QRji4G/lgIAiNsANWLUyA1SMbDIFRUH6QCatMJMPKwb0MVz5B2fxBNbIisetKqFrOQFeEUcchPkIfVgHdLQYJI7CatVPZYCnnLFqBlc/2WZkJnSJD9RtQrJSQMgZZzog0KBgsWR7JYSyM1yRKktYuYQD1Z7jaTo4v512ZREnsiDPS1RFhpAAy5PIZP/fWKoyFgBt8jk2wsjGhFJz0AdmewVvlWTlZ6cn/MHiE0BFEoWODCB4mIJORfoSFCcRKzYAUMjYg/cwCQpiAColwmpA4DrmJ/kh49nOEmAJKJXC/iUZRD3kx5TgD4QAhbc82SKml6mKCY1fyJ22U8ez6g2S7axO9cf3YIozylvRPOZfSAq6WOCZh7I3e7bt39CZh7MrWLBClsNQeEl2PDMWlHNxtrlQ6+llHiBIsSpi1rxDvCoPerpe50cZAi2b+2GK6zAXUXoSKfzPHl53NSHshieZcoez66FdpB0LFL/Db+gnhks/u9BDqosKflw9L6FtzMp/vfXyVv7tKO0O2KHVEhfrwrcWEE0sIT/wWQaoci8OnaSOfgJTxK4QF8y1/IRoKo3rC6B3syX18pXfzR94PC0fMwRU/8SU53MZ78Bv+Hx4lvM4HO2qo1Ns1+AVfy6c7j1n6VqP5mb1tQz35vg6/9MIIGB25FwGiVDFOAAJK5zJQq0jEETGjKiKGVDQGRuViyMiYEAXGjnQibMioVUH9bmINY3zImOeQ3BspSB3DR1YMKkSIQWH/SKr8DEZiAOhkRuhAbXmb6o88Mb5SGI82OJZAfay8GCUlUbs8ixGpv9VJyf/QfkYDM6spJNPKCNFDEhEsJKiWMKq3ZxvYVD9t93+rKBirl3eS5imBnmFzCkNF5oWJSphbvxkICLNEY/83GJAw0rpVod+5X/hXO2AGr9o8kvJvk0AJR5dNP9RDRNgfJNMg8m8TGnJZVmXwRw7LkQTIaHmW+sIVDohnqb96wY3y6T79BMtWGIhjizCySZNkwcjq27sKcdz3nAdip41H/I5sI2Mmr2kNBHqTMHZla+WrXF2dqVfURIs+80ZnqmN9D/AtFsONfLBhbPbeqMZbb3+9xhwuIuMEkvpiHuBjiDx8bSGMlDlRQVEl4WJzamdHfpm/+RY+hkDiOgt0hJGf92wbaGDClzp6h0+3wOf/vcPvuiKM6l6OYKo/q5G6SUAgfIikWJRIYeKvSgQNv9A+ZE7b1Nvv+eZyjrC/a/0YrsGXi3ppC1Kn/q4hSvG1fAullu3xhYQNNkhssVHGM4k95UtvZZ4nnKizVAC8RkSvfHRibNpmXz+7Q8IIJOpW+SyOHAMbIqwsagsjQcJMEOWsJKBzavVOnnJmZYDFM0xECwHTMSVE6z7EFMExAIo61dmu41IX7/LH4FJH4WJtIOUbaFQ15JD0zhj9Xj1I4Agjqd/qg4qGKCI3iIxQqfbYDc141K8YJNJrpY2UMmaky7PV1cYPJNLkQtlDYhkiaZvU7u+yg0votvom7mGHVQRLbmD5XJGBRUWEscHJQA0u5NIqEKHXL+VrLEgFcu9aE0nJs9A2gxqhMznoT+8YneJeA8MfeBtA6mkQW4U1Qu7Uy30Ip2Ocypdg1Kf8rrZujR7FMDrtafV7dvjF4CSMrd6Jo6g/wnjqbgP7tIXGN9KA+DVyDJnKmu9LZIBa0+h8Y/7oTA3rSxBgYL41v/Zm4YtEtggoHRV+DF78SvmetLmfkFA+29tVpM78iojxw+ZffpF/FkmUokZB4/v0Id+mjyiAVEw8wUK/s49x1NaZOskPioCVOb18qYVoU9RNflpKmyhjCTGX8Dels3xFTupCIYAdYVPObSz2xB7xDe+uJeGFwPLXVFYLHb+XYub9TgOhfIoaqjsuQcyxF4CfF+b23P7wGcEOCSPwa504UsX4OvtEG7YthNvVTiH5LyXx1GpDyALhYZzIJ4VOgq2cAIRKR3gngzTp1BeGrcOESpCl8hUX8rOBRqZm8N5pdUQVtMnDOxgJqVpOn8kQKTMhWrGoj/YYJHI23EfKrj0DquSRlEO71c17YYFkFvldmBZh9D7vkIQsPG9lIrSNZBr0yGD59qfQtCRgvy87u/ytDVZ8JY/DSlIb1UGo2yRRPolUixXSrL4U0TE16tpPBda+o4QOenMy7S/vSoWxv/T0yO1MhbH3+74RhbGQkBJ5QULqN0WMzkK3fsdwES8Qs86KaIy0Mb7LvxFGhKYzVbGj59RGSPgjfomAIo+cXykL+RIOR6KQK8SRv25kIVEIGdWzfOGHf6YmIuLewccScMpxQfV19XvkjDBEGUQsGylF2OB3+VKEUEpUbUGE+UeElJCDN2gveyBeETiolcLguIYIqHarEzWTEOQZ9bbQVf06Ekm6usfv3deIQNPIsxq9plPC2OgDSsUbGRwaaLWC6VOkqFQIH2OxcrBBRcgTsVE8Uz6DMGftJ/BK3YRtESWrK4qfc5sogbWrQJ/xQwQZFzJlAJYdfoyVmikEiuxS7xiJULSwi7/9ketQFMZR4eJdDBLZlM9pVxe5unSqAUjqt4JyLhayRxbXPnVCKhFpSqDjjGoNz3OEma36DCgrHXmTwsH+Lb/QiqgjA0IYhbmtkMeUMHbHLvLaMUdATpUJuXylIXMYxxzTVnpCbxLGrmytv+Qwjmq8FdvhSwgDBAJREsSBaFFCvPyMMcvnIWDmfiqcnG3/L0eslVQq6h5xhE/0zLJj2LxNdCCsdFa8U2QOyaN+2VjkGaNb+BCRKEIN/1raUfs8pBRZkyIlgtfVySfulV4kFcy1UpiIRHyw43Zq2+fdiFpHbYaXUDTVVRjYuxst/GnZoCnVSypdfSnEufbsZf/WL2UnN3FHSJ4vV0/RRT7f3IzIUmLh71n8rT7U92zBtQg429HfRCL/L1FSdkQU8jy2UhYK7i2iE27iXuKNa3qz9Ahh7G6FKY0AslJgeAym5EUgXMKtCqXNwLF6IhPXF4QRgbIa0YFWWO7F9EtB4uQnCEMD2MAsny+SV0kiR9Z0LLKKQOpAqqpn6zz3MoT63Iv6+ghhmwysRLTP9fW7pzzbpgW5jMIM8ktKjggDMwiomeW73uUdBpiQA2JddnohjPJKnVOFNCObHRFGCqRVE2U1P3PUXWvt2+u72rWZm176tn/G9tt7kzB2ZWu5S3ps9/bIz0cszPdl42b9FcgMhYtgYDOj/5czb5EUvxvVaSOur08BQIwoh+wBISN+lIha+R41AYaQwkfyjUhRV4WwIVXBc4sf8jwbncpJGPwXEmpTDVGltpQNrci3f9sM6KDxUZXSPr4VLvyteymMtfsWPMO1onqeL9++bP7EKfAB7bVoUncKLt7gGqIWAQxp7yrK2hVGzf77PiGMVEVkiiEzTsqZVQXDrJVnyd7InlxEBAo7Z2A6yX2UOImwciMRMf+3I4waVwpyWjq1vjPkJFAuGQEjQWDdL+8QgxcGRxYNunJO06g6tDanD9H0XkZUX5BFdZb/6F1F4XQd4keyr/8mJ+O2cmTQpch7NHBtrKGiynfsiDDWphc0u0Fm/UZEoKtz4XqSMJ549IHxq5OOHGUX+Byfz/KNSVlvtSVi3PHGi8uvu2tMHjPG9zZLPUbVkN4kjF3ZWp7DOMYm1+0H8CHy2jv72pW9A3YXEynkISrCp6Jt/CulCknrKDyN3NjIhMBJOUNKXcf/UP6QVUUIlw9EuIRk/RuZ428RSWKPevLPiCM1roSxaxvMX8kF5PtLRNL7+H1hXn5KHrxQL5W3vs0UTyqecxXVndjTVThcmpvd0uopSoiIInw2g5aDyKl46mRzqkVRUSGFytUTmfVe95WQs7raw+D4P5FEkcmuBKVud34T3tAnhJEcj7GTbhEm529JoGWAtcVgoI7Jo0DgyK+MGIm00USH+7+dz56pAxHEWpbvPYylox1Mfo4kkvwRPO+ymnPkDuM0aEZ1hMSo+rPktnS04iDzC/9QRIUzakmeQc4w65VA4XK7qGvbYUDZveUeyixy2+4rnCYcQ31apbFBGHfc48CYbvoBHbZr0skmrz7J12j5+9/ui83XXzX+/szQ4bf4bN84X/pSbLHNzo0+Zoyva5Z6dLchvUkYG6lbfumlEZR67hoEDDESlSpHttWSP+RQLh7CWHZwy6/jt/gVfyOPHW0k4iv4YM9GBB2v49lIlo2SfKr7+ET+cI899qiOTOJnECRROb+nHIreIZTeSekU8avdtSzyRiix0bXet0n7cuIH4ik6SByyWxphK3l6nkvpQ94IPAoijAcUYluLOqJHXLFfQAQPjnL+qZeInXA0AYY6aPMMsUf9iTf+tl9AyB05Vidks963EnGEx6XD1UcEe84CmutJfUIYETNErsTf5fSV71rWwmMVI7FWDp6wNTIo7GslxFCEj22XZ/BWSELUHW2OGRXkQtZWFkVydy3jp1wKX5dVW092m/Mf7SxHlBtNWqVGqmft1xEQaepslv6LwNggjH+549GYY655ewRU5PCkYw8egTD2yIO7+ZBmqUc3qx3NRhjVP78l3d1eHP3ry8HdlDRpWSJQJZ8Z8RFaFRZGasrXRmxARMCETOW4IzWdHdyNCCI+QrVIGqKFqCGG7iHOIEOiV9KrRO+QKqogAosUIl38MmJJwCBqEEPkU6o/P4qYshskrd7n8dkEG76tpH5RAMtZknywY3C8C7krxE1UkPrJX9cfa+YaRFoaF0GI4ukoOrmP3o9Ua4tIHlJZjivzDqlbVEyiDvJs30VHB457L4UR72gkAjn6VtA8d/YJYdQp/pSVRmeEUaeSiK2wkEMrHmTLz+Xtuc+/JdIqVkjOWupOqT0fsDv3jcm1EqaRX8bbaLEKQxhrz0CzKrQDO0v/RaAvCeOrr7wYJx51YNx9+83x1pv/jimnniaWXWGV2HO/I2P6ATPGjzZYrfpdKd8auFqce8l1UR8K3mjtbwXlcuuf7RFHHLhbDPnXUzFgxpljl70PiYGrrhmH779r3PiXq2Kccb4UA1dbMw4+6pSYZNLJhj/3yssuiPPPPTWGPPNUjPulcWPueeePnfY6OJZadsXqmkbr4dob/3xVnHXqsfHPfzxSqRtzzTNfbLHtLiOoqqW+O+91SBx54G7xj0ceiimmmjpWW+N7sdeBzjyduMcMshkJo8Y5UsQJExb0UodGp9iciEQ49aEVvhc/Om0c03v4SfM8pZB/Q3KoYcLIFDdkTj4dFbGQFuFh0TkED7YUuY52MVPg7ABG/og4wsLIGV9D7VMIO0K0no+ESSfzfBtT+W/1k0ePtLIJ+Yi+I+1a/s1GVrn16iFChrzVE0bn8SKd5Wsr3ksB9DyEl43xc4SisrvaNeqBDEul8Hs4FfXVO5y04m/kV1SPagovdcMlyqZRhLCEk+GE6LJHETyYjOpTlfW72se0v5v9/j4hjPWgdEYYdbikW3kNCCOWL++wJNr6uohzoUjkCsnbDuJWKOpNJW20GMRyS2oVxkbvzevaF4G+JIyI39tvvRk77XVQzDjTV+KF55+N0044PKaYaqq4+qa/xfND/hVHHrh73HXbTXHpNXfEZJNPHl+ZdY6RCOMm6w6MV15+Mb462xyx5wFHV45jn523iiceeziWWm7FWGm1tWOFlVeP226+Pg7ae/vYYfcDYqc9D6o69eorLo49tt881vreRrH2ej+Mjz/+T5x96nEV4bvyxvtjrnm+3nA9rrvqsthl240Dsf3hj7arnMilF55TkcgjfnFWbLDxltU71ffVV16KqaeZNnbf94iYfa55YtB1V8Zh++0Swvn+9FRpVsKofXLSymHMzoaVV95IoQw5HcKcLnJEvcrSOQKIEb+GHCFc8OPzYG7jpnB0R4SRWkdAkUdfPtFXdtdSIZ1WQg0UpuWP5PohjnIIyxFy3i1HshycTbAQCqbMlZ3E3mFXsHCziBe1UVia6sdfUxt9UML1tfmLpcWihK6vzQHk48v5zM4V7ui4mrKTGpkuH8sooXfXU0cRa4ST2qgd5R3wQMC1hR2Wn1NVXe9EEQpiaWfa5zAEmoIwys8Tnu6IQAndkogZHbLE+P2MlE5Cb9Xi8PJyvE8jbbCF3yqsq68sNPKsvKZ9EBgbhPGSq26Nr84+Z4cgTTHlVDHhhBPF0HfeisXnnSF23efQ+Oku+w6/ltp21+03xiZb/DQmnmTS2HunLeMv11wxQki6XmHc9Hsrxf333B433PV4zDr7XNWzrrz8wthrxy1ivR/8KI7+5bnDn7/SEnPHLF+ZNc6/4sbqZzbp/PXeO+Lsi4Z9/UF5+sl/xBrLLxg773VwbL/b/tXPGqnHKkt9LcYdd7y47raHh6sgFITvrrxIvDd0aNzywDPVs9T3vrtvi6tufCDmW+B/Cr/7p5thQPzu6tt6zMCamTBqJMXJ4cnCmMKNwnOULaHCsriVp/70009XahOnbYMe5caGvVrFqMdAa7MHlc+pysVDjih4Nlo47FpIFKnjHwvpKQoj5c9RO4QWkTQEznFtClWPQomM2Yhi46Q/1Eh59WUsIWDIICJF7asN/ZZ/U+/8EcalTBoz5X4fekDciB2dHb3XUb6/98pNLF/66uxebXWeMpVV2lbJZ0R4CU7u9zN2hwSWUg4MZ5flu9h+573C88Lr2tLIcYFtZm6jbE5TEEYbNsi+HU0eVj52ctm9bMeX61zPAEnKrVoMegePN1pMtnJHGs15bPS5eV1rIzA2COOoEDnxjItizXV/EJ9++kkss8DMMfW008VRJ54TiyyxTIeTayNEDQF79l9Pxp0Pvzj81VTJLTb89gjKnl9u9N3l4z8ffRhXDhr5e+zl5s8/+yzmm2Wi2HDTrePw489oiDC+/NLzseKic8SWP90t9jloxKM6TjhivzjzlGOq3eF2iavv008+Hvc8NuJhyhuvs2KluP759kd6zKianTCWhlJjRIMccSZtyMbB8ok/KpYFvlCfzQOIT/mMXY8B1cYPQmKQOQQcYXQ+oWgaAiesLxwrfFtIGvUMeeM3kSGflJXjSGyg5ClCzZ5HuXSuYDmdpHwdpRZOoWDKZEdHsrnP771TnmL9xhBk0TVjKzKGyArRUySFvamS5fxmx9Yh0R0RUvfhEYQbaRGlba5FPqnl5VO2bWxa3W5a0xNGhqgzJf06eofhIU9yNEw+rVoMegS40SIp2BmMWRKBWgTGBmE84oQzY8CMs3QINEVt+hmG2e1DD9wbO239g5DLOO10M8RyK64aa6yzYZV3WEqjhPHtt96Ia299ePh99951a2y23spx+q+viFW+s87wnyNr7777ThXyVtx38nGHxp23Dop/v/5qfPrJJ9XPP/nk4yqELJSsdFWPB/96T/xgreVi30NPGGn39gXnnlqFmy+79s5YaNElK8JYX1/v8PN/v/5aXH/nYz1mpK1CGDtqcH/L7+qxTq97EBzl3Pkb+eP3ED0+wZfI5OUhjKWUg7GRclE4x/LYQMp/li+HIfMIkY2XdgSPqng3v9vZCRzls4C1X0IrzyvfmB6bm0LgYsFCQaR0qic87OTu7Nxh9/DBNr1SxWuFGHVGNPPEkZGtoukJY6kyWdxqiuFaJdkRJkSbJRHozwiMDcLYnV3S1Ly77xgctw++PgYPurbaeLLGdzeIk866pCGiVojW6BJG+YSP/v2B2GWfQ2OxJZeLSf+brL/6cgt0izAivxuuuWzssf+Rsc0Oe41gUuefc0q18ca5kd9cZIkkjP15wPVR2xEc4okNHqJrK620UhXm74gwupaiyGdSGu2kRhCFm5FL+Y6IEkJpkwrlt6syup+v6+q5Pfl7m17kI2q3v4XkR0X6KLc2zcIzyWFjPdEUhNGROFYCo1qFWA3YtSTnQh4GKTkJY2OdnFe1LwJ9TRjrkS2Hfxc1ritlb0wI40svPhcDF5uzyqGUS1kKpXHZBWfpFmGkki6/8Gyx6Zbbx4FH/nKEZh132M/j7NOOi9seHBJfnnGWJIztO5yaumXy7uSLIkI2vQgFC02XkHRt5REhee/StuT4+TayUK39AI7RcTyPPD3PK+c3NnXjG6gcfMo5xhRDnGJUOYhIMJzwiCSMDQDcLJtedLSOHVV+nnOUHJBJXrcySMLYWAfnVe2NQF8RxocfvD/OO+PEOPTY08NGmFLuufOW2Pz7q8SZF1xVhabtdr72ykvjkefeH35NR5teRkdhfPLxR2OtgQvFzw85Pn687S7Dn19yDms3zDRSD6rkhx+8Hzff91SMN/741fPMNWutuFB89vlnccOd/6h+liHp9h5Tzdo6BMfmIjl3dv+yTbt/KYU2wdQTRnl9zkV02HU509DmUp8RvPTSS6v8Rkfr9Ne8+CSM3bf0plAYG6m2HAXJ1GT1JIyNIJbX9AcExgZh/Nmu+8V003eeL/vtNderxuAaKywYs80xd2y25fbVuYuvv/ZynHPa8fHWm29UO54nn2LKOOHI/ePMk4+uNpLMOsfcsfK31x7pWJ2OCFgjOYzyFAcuPldMPPEkceCRJ8eEE01UkdOPPvwg5CTa8XzUSefEIosv3VA9HI2zw1YbxDLLrxyb/Phn8cX//V9ceuHZcfvgG+KXZ/8uVl9rWJ5YEsb+MLKas412NzuPt+QL+oaxsVi7A1jNy2ZRO32RQyHs8llZOY42vdiM1F/JIoySMHbfxluGMDJwg8CRDTra+U523mVJBPozAmODMHaFp2N3Fl1y2Xj80b/HKSccGg/99Z4Y+s7b1caXJZddIbbfdb+Ybc55qse89MKQ2HbzdeOFIc/EQostFb+9/IYeI4yejxgevv8u8dQTj1WHZzuPcfd9D4/LLzovjjv85zH1NNPFoLufiFdffqHLenjezTdcE2f88qjqDEhRD5t8tt1pnxE28iRh7MpC8vdjCwF5i84GLETP4elUQySytjhn0aYWodZGUr7GVn2b+blJGLvfOy1DGK2i5CeUzx91v6l5RyLQfgj0JGFsP3Rav0WtvEu69dFvvhbU7zy/6aabKgI5KvGkFTas9BXS1FakOs9bbKwHWoYwNtacvCoR6F8IJGFs7/5Owtje/TumrXMEjC+/OHMxS/cRSDLdPcySMHYPr7w6EWgqBJIwNlV39HhlkjD2OKRt9UCRN+pYKmRt1a1N25gkjE3bNVmxRKBrBJIwdo1RK1+RhLGVey/rngi0FwJJGNurP7M1/QyBJIzt3eFJGNu7f7N1iUArIZCEsZV6K+uaCNQhsMept8cW6ywRE04wXmLTZgh8/Mln8Zur7ovjd/hWm7Usm5MIJAKtiEASxlbstaxzIvBfBI675G+x+AKzxcwzTJmYtBkCL70+NO5/dEjs+cNF2qxl2ZxEIBFoRQSSMLZir2WdE4H/InDNXc/GG+9+HssuPHti0mYI3PngszHdFOPGWstk37ZZ12ZzEoGWRCAJY0t2W1Y6ERiGwGtvfRjHXfxAhqXbzCBKOHrPjReNAdNM0maty+YkAolAKyKQhLEVey3rnAjUIPD7wU/HW+9/FgOXmDtxaRMEBt/3VEwz2Xix/sC52qRF2YxEIBFodQSSMLZ6D2b9E4GIkMs40wxTxxILfDXxaHEE7nv0+Xj59bczd7HF+zGrnwi0GwJJGNutR7M9/RKBoe9/HGf96bGYcrJJYpmFZs9d0y1oBcLQdz30bAx9/8PYZu35Y8rJJmzBVmSVE4FEoF0RSMLYrj2b7eqXCAhP3/PYK7HAXDPGbDNPE9NNNWmSxya2BCTxjXc+iCEvvRWPPv1KLDX/jBmGbuL+yqolAv0ZgSSM/bn3s+1tiYCNMPc/8Vo8/tzb8dqbH8THn37elu1sh0ZNOP64MWDaSWO+WaeOxecdkBtc2qFTsw2JQJsikISxTTs2m5UIJAKJQCKQCCQCiUBPIfD/g0PtElUQxqcAAAAASUVORK5CYII=)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BOCyudX6zHxO" + }, + "source": [ + "In masking approaches, rather than estimating the enhanced signal directly, we estimate a soft mask. We then estimate the enhanced signal by multiplying the noisy one by the soft mask.\n", + "\n", + "Depending on the type of input/output we can have:\n", + "- Waveform masking (depicted in the figure above)\n", + "- Spectral masking (depicted in the figure below)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IZ3RXdfZ1RqS" + }, + "source": [ + "![SpeechBrain-Page-5 (3).png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAmAAAADaCAYAAAABpf7qAAIhKHRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDYtMTNUMTQlM0EyNCUzQTIyLjMyMlolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY4MS4wLjQwNDQuMTIyJTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE0LjcuNyUyMiUyMGV0YWclM0QlMjJKTFZfMnVXMDBpS0NMUkJlcng1cCUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjJOc3NZUkJtUXA0endRWHc5WFpfSiUyMiUzRTNMelhzcXhha2lYNk5mbDQwOURpRVEyQkRqUnY2RUJyQXZqNlpxNjlUJTJCYkp5cXpLdHJMcWEyMjliSyUyQjlpSW1hdUJnJTJCM0tjVGYwRzUlMkZwU1daUHJvWTE1MGYwR2clMkZQd0x5djhGUVdnVWUlMkY0SEE5ZXZBUnduZmcxVVM1MyUyRkdvTCUyRlB1RFVkJTJGRjdFUG85dXRkNXNmN0RnZHM0ZGxzOSUyRmVOZ05nNURrVzMlMkZNSllzeSUyRmo5eDhQS3Nmdkh1MDVKVmZ6VGdKTWwzVCUyQlBCblclMkJmWDZOVWpqMDkzRzVxS3ZQSDNlR29kOTclMkJ1U1BnMzhQcko4a0g3OSUyRkdrS0Z2NkRjTW83YnI2MyUyQjVJb095TzRQdWZ3NlQlMkZ4UDl2NXRZa3N4YlA4N0p5QyUyRlRqaVNidiUyRjliTCUyRm50VjElMkZQR3kxalB2MCUyQjdCaTJZcnpYNGs0U2Y4NEhQcm5LY0IlMkZlN0RISUlxeEw3YmxlZzc1ZlNIczl4bSUyRmJRSEQ4RiUyQmZ2MyUyQlhMQUVSZnlWZyUyQnU4JTJGdjQ3NCUyRkVuR2Y3T041TGR1cTclMkZkNk8lMkJQJTJGMno4bHNDJTJGbGdiNjc2WHhLR3dDbTNYJTJGWXlNc0VFbjlXSWFXcEVWbmpXdTkxZVB3N0UlMkZIYlJ2NzU0QU83R0NUckFWeUhISnU3TWJsNTFKbyUyQmZQenAyc3dYVjJCYzdkeGVrYVRkZnBsdTJWOUZzOEUyWjliTW4lMkJNUW4lMkJNUE50NXNpViUyRlFabGZIeEZ4R3FxJTJGSUZ6dHMlMkJiN0M2bFNOVExQaiUyQkY0SDhHcm5pMFhlJTJGNWpJNDVSbnI5OFV6R1g5MndFYk5zSnR2JTJGR21BTE9rJTJCRE1UelQ0QzhMYThuTkJ5SCUyQjJoSkwzR1U4VkdRR0lVZlk0VnBIZm85bGU4Zk5abUptVm5KbkRjMTVJbFpoTWtEbHpsVTRjb2diS3MlMkZ1S0UlMkY5OHZUbzFWYnJBM3JyWkgyc0VWMkhMRjdSMzF0ZU8wcFBxUzY3SEVXbDM4emtoZlJCQ2ZDbkg0UVhOczJWZzZDNnUlMkJUTUxFa0ZuajJLZldiR1ppVnI5SUQyN3pRRThkcDRXaVI3WmFhT2RreWJ0aHluMzVNaFhVZFo0TDFreHE3Zk9LQnhsQmszdVJXT3F1QnlUU1UxVVZBckxmems3a2ZhMWxPcXBKNWtUbXlXUmVNWVZsV2dqRDhPU3h3aFlvb3JuVEtxJTJCZG9Reml0cVEzMW1jUjVOa24xMHptZnR2bU9XblFacHEzZjR3bFZBdEN4T2NIQ2V3czJGN1dYV3hncUxLJTJGbEpRQ1MlMkZ6Z2VGNlJZV3h3bWpKM2pQMVpWZ0ZFVjd0WXdHMmlvanZWZVRIcnp4eWpJMHpZekNRbTBsZUkzY3FVak96dyUyRkJNOW1ScmhldlREMjlXYTFJcENFTXlsZFF0QjY3RzFxWSUyRmN5M1hxdktHWmZxMjBQY1Zrb283cHZ2eUhIY3cwMHpRdTlwdG9TY3dINVhGUkJWZFpCaEs1ZlFzUk91ZzlJdEw0a1ZDRHA2NEVJaXQ0MWthSGNPbW1NcnJ0Zmt4UEVTMHZETFU5a3czOEpham5zOHlKMTdmd3V3WUlFM081dGdrMmx2UmVITkM0eG5NTjYlMkJpbHh5OSUyRkxmeGFFWnN6bFlhT2RObW1YclY1JTJGR1JIM1h2Q1NRcFBYZ1MlMkJGR0dVVGZwUUxYSjZITEdjJTJGVzFQekoxSzE3UGtUcnNMdkpqY0VEcXBpdXZZcVY1YnVNRThyU1hMbEU2NWp3U2I4VHpBenN1Z2ZRJTJCekpHTUJWbCUyRmtHOHRodmZrWUtjUDNVS1hybDNHQlB4Um9xSnVPdGFiY3Zub3VlUmUyNVdRc25MNEtJaDdEOEV3TTliMnhSaW4lMkJLcER4VmZTS2lyRGh2dHNzMGg0dXg0eUFXc3YwZUNkOHpsNUdjV3ZabGFLV0Vlc1FkUkVCeXozMVhiN3F5UXBPRWQ1TmhWTm56dzI2STYlMkJyRVZla3pibGlreVFlNEhJa2RpZCUyQjRjUzJQYW9tY0hsRDE5d21EZWxCQVFOSlQxS0RrN1hFc1BMbCUyRnJlNUN0UDBqTDNIWWk0RUhqZnlKeXozUkEzMm50bmJNdEt0Sk0zZWx5UndreWRrejZOVFJRMzN1QjlYJTJCOW1kQ21IakltSU9RdURnWTBXJTJGUVNleVZvZVZWU0huMkgxSW02NDhFZ0FsdEN4eDk0ZnJaUnA2M20wSXEzelJSaGVITUZoYmR2THlzVHIxSHElMkJ6RW0lMkZVRnB5ZCUyQkNFRHNWT1hkMDFiZW5Zbmw4R0xTeCUyRlpVRklSU2FrWmxRZEFKNkl4N0VtMGlQMTFOYnJRQWpnUWY5R3hIUzlFMldrNWluWEd0SHduenVLeEJwdXlBcHY5cHZpSXk2ZmElMkZJWlZIRFYzWkNEekNZbmgyaDQlMkJqWUwlMkJmaGdYY1dGWEpJZmZ3VEh6SUdLYTQ2akdrdVFOQ00lMkZDRDdYcGMyY243MlJWSlVKN0EyV1lKMHJscEoyZkhDclU0a0UzU2ZucE9OVkJBS21pWDglMkZtQXVqaXBINnp3VklmRVJtNEtYTG5PeUh3c0s4NHRXTzcyUWZPTiUyQnZzUjZGSmZlbFJmVDk4c0xzWXR4SjVBcmZEbmRpQ1RISDdzcmdOYmdyNXMwa2pxcTNaTkU5aW9ZOEhhT2lQZlAwdnZtczZFdzlmTDdRdCUyRkhFSUxhMWxVcGJnRzN1MENIRG42VkhZa2hyZlNMTXFEcUsxZXZiJTJCcHVoVHJ0aEhEWmJ2Tjd2NUVWVmIwJTJGMWQ0eGl2RUgyR3RFaU1IS1NSdiUyQjVuZ2g3RG9iUDBHZHdLSjlQZFRoOTdoSFY1T0RpV3hYM0ZvdlBSRjk0OVVGQ2RyckRoelJkaEhYUFBOQnFXbyUyRkI4a1p5dVJ1cmozN3J2bWtjbVlnckhkJTJCbGk1U2lqMHFlaTdtZTRjWmxWUXMxVnduZ1dUa0pRMVUzRkd2N2xiJTJCdEYyeEpQYnElMkJpbTZnRVdwSzJnZmkyVklJbiUyRjlwQWJnMXJvVVZRRkxSMEsxbHJ0WVFXOWRUUDVMM3BUJTJCRyUyRm96WFUzdm10QlFxRDY0ZThWS21uZlk5cXclMkJQdlhUSHZUY1FnUkJGYWNlQ1FPdHJWbFNYTm52aVpWd2tOaUtiT0UlMkY5VUx3ZkM4MGtQdXdXOFBoRSUyQjdxOUFta2VIMlhYZ2t5QWVnRUE1STFCSGhLT3FDJTJGNE1Za0s3bUglMkJUZXFpOUxJcSUyQnprR2dIVU9pY1FEQUEzdU1GVUxucldudk5kV2M2JTJGWldHbmR6cEczSUN2MjNWVUMwOE5XWVRvR2h1TzI5a2xDOGR4dUpKWUZLSFRHNDZLRmxTYUp5b0NoMG85Wm82Z0hxdVpzQ3RvbVJ3dTdQVUFzU1p1MzZnUUJZUmVzRGNUWjdGNiUyRnI2OFhyVlk1RDVVa0tiaWdvekt4bzdlWEgzVHduZmN2OVNIZlYlMkZrSmlpRG1Ka09jdnVsWU1pZzlPVXQ1b3QlMkZZd0hmNzRVOWkyemVtTVQzUHhFck1hOU8lMkJ3QlBtZWlEZjc4eFNWb0JjNk4zMFpuMUJ1bTBBaXhIVDlvVXUzTnpIemR4akk4SHVacSUyRmslMkJEZDhETkc0Z0ZmdGx1RE1XcEpqcEJVUFBiYUZmcGx3QyUyRm9MclpNaFRXSFVsRDBGSDNnYzE4aTRBTk5RdDNtN3dtS0dlZUxIU1U3MFZiNzFaNGNvZkJqeSUyQk5xaGV4YUlva3U0UVdPSDZJWVVjU1FMc1plOERhRjdKUG5zMFEzT3VzZDZoQ0xIZk5HenZMNlFWRiUyRllHRHNPY3RsMklpTXMwNjREdCUyRk9BOXRSd0wzR0UlMkZHSksxTzd2SWg4QWNoJTJGU0Y4d1JSNndYRklZbGNVcXhISDhkYU9reENoendSSWprSkQ3QyUyQlVTUkJGMzd6NXplaCUyRnQlMkJMbmZIUTVwMVRrdlBSV2tndGdDVWxVMHBjOSUyRiUyQjlNQWp2RXZLQUFnUWlITHJiQlhoJTJGRldvQXh4bDZlUERQZGxRS25zbjRMQm8zdGlHR0VrTHoxTkltdVd2aHMycENuaE9ueFJKdk5kdkRWeURtQnZnbHR1T0VNSjNxRjhhdGh0RmUzZERmMmFCaEl2QmZGSllnVmZhU3lhSUlqTzZUJTJGVmRYeHBaM1lpN0IlMkJGckY4WTlvc3VBT3FaNmQlMkYxaGk0ZnAwYjhhZGglMkZWJTJGJTJGTFdvRjdUTlpPbjhrVnpMamtPWmtWNG9Salo1NVM3eWVNY1lEZ0c0OGdTT0FHRmlzJTJCRTlsZmFmVEpnbzFwY1BuJTJCU2hYMlA3ZXpOJTJGRnJpMTNWTTl2WCUyQlB1TVVoV0QyJTJCNkJlcVRqbEE3SEZVUE5FUE4xMDNiVzhvcm9VU1cyZ0d6VW9jNDQ2WE9XSiUyQkF4NDBEbkRqZlRzbzNDMnkyYU1XN0VhaFRRTiUyQjQ5VlBpU2E5WXc4eXMlMkI0Y2d6U3JuU3NvJTJGdzljNE1RYlh3ZXE4N1Zjbk16Qzd3VGNxbUx6bFZia3BveDRGbyUyRlY4NDNRQiUyRlBlUzRpTXJlYTRNRW9rWXRRWk82QWRQRDlPVEElMkZOc0xMTmRCa3FJYnJQVWtaR3FEaFk1cHNYbEI3amdscWhaWVdYb20yeVBUcmQzJTJGdGNjMmxUUlVoWCUyQnRjWHU0b2M1OXh2c3dYWWVGYlRZZWhkJTJCYkVnU3FYZWRkRUREREllTEtrMlJJcVlrSm9zcTJQemFpOXhGaldHYWZRUSUyRjV4aDduUDk2Um45NWNmcm5OOXE3diUyRmtHWVVWelBjRmVpTTJOMlFrd1BMbjBWVDdGekVVOHBORHdGdGYlMkJodlhYNml0N1klMkJmSkRhZmxnbEcxN2hXWnR3cU9WUTdwbDZHdGQwc2ZmUFBuTDhLa2hhbzhNblhTeHloWTBFcFh2VDhmaVBqWlRjZGJwSmglMkJYMTNDeGRpT0JEMHVINGFXN2R3NEZKSXZkbjc2Q3ZUdUhiJTJCSG9CdjJYUk1HaVh6TTlLeXdTUWl1bWN3U0w2UFQ0aGxCVUw3UzRrdkpjeVFQQTFQM2lQTSUyQmRRZUViN2dOMGpxZ3hCamFqRWFWVDFzeEhrJTJCM29pSHFIdDA3eERjWXlpckJ3ejZYVk5HNXBkcUU4b08wMHMydEZzSHlkVEIlMkZMUTdoNTY0cVpaWldFMlI5OE1MekVjbTFYWmJhRFlSQ0NrVVJONiUyQng1UGFzSzF1bUN4RHNhNG9wSFJuNW5YMkpMVjZoNWx5JTJGcVp5SHZITUREM1RZVkhPeXlqVTFMSmRkQkdHQzFkQyUyRmJwaCUyRlN5aktkMUtMeHhLbDBFVW1nNTBPMmV4dkN0WlhoWFZON092aWZDRjRwJTJCWCUyQlZPQyUyQkp0cTlWb0M0Wm81OUw2SklGemwwUkJYU242YzFvYUZhUnc5VmtNV1VPRHdzRG5FV2J6aCUyQnJJNUdIWTM0RnFjeFIzUWElMkJaR09oN3g4S1pwb0phWDFlaTA1WnRjMjlpQXNic211dU8lMkJBJTJCQyUyRjBKY2I4T2RCb0RQNSUyRm10T2dvdFFxdXYzcnpvUUhpVU55dTJJVzIyR3NpTWlzbE5UUzlORklqdVN1a3NDdkUzMG1DR0czNnUlMkIlMkZ1a0VlcEtQTmVUN3ozbEE5RkNnMHg0c3E4aUxRWXhkSE9MQXVDR1pQT0ZrYUlqbGJEcFM0ZHNVRjJybWp6WlBSdGRoRTV0dW02ZkEycHhpZThuMmtpanBoMEVHOVhYV29PaWtXZk4xOHVkSk5XNlA0MSUyRkJWMlJzdldEcmE2RDRJY0l3Z0VoWWJyWk54cmJYSmZ3QXVuZXFhbTl0Z0V0MWRSTEFDVFZjR2g0RThWVUZJeTVKd1I3ZW9DODJrdzVQQm9LdHk5TCUyQk1CWlNja2tEcHZsempYaEZPczFhU1lZZWdwa0J5TldITVVZZ3FsUGdIWmR0WjdGaUJrZnhwRVAyUWdDaDhhVHYlMkJUNmJOdHY1OW53ZlllMWFQY1ZvRjNFM1ZEM2JkbDIlMkJOVERkVWdmd0lxV2VybzExM2h3VyUyQlJTJTJGZWJKZkYwR09yNHZRSFNBbDc2UW12dDhPV2ZGVWw1cG0wcDV3eUZJR2xMT0RJSnZoMExKWjB6YnJBNk1IcG9jOXBwODBqdlFrOElCNiUyRkFkbW5yTiUyQnpZb3pSTDAzcUF1SlJWS0cyQldSN3lWaE5OcmU3M002WFhTRlVSRnoxTyUyRmJiYVY1S0lLcnZGVFBCbGJpWDhZR0ZQViUyRkNFbnVWSlZ4THdVJTJGbWNYZzRqcXlMM3B0b1h3Y0hGTXZ2U3ZCNzlza0huTmMyUVpXWHpOQ1loSGtYYWM2Qm1yMHpMd2xBRSUyRnVXVTJObFAlMkJBRmVYalRBRGU0czB4OTYlMkJkMXhrUjVmSHNITnhSUlhqVlhQbkpJZEJQeW1RWlZTNDlMNVJWSmZWNXBCalJHNGs5V0VGdFl6bFFNempybGxlVWFDZUZpbHRod28xQTRCJTJGN1JZbVZ5bG1SWE9qVlI2Q0piRTRoJTJGandoVDRoZnRSQXhPbTNya2NidWE0YXAwMCUyRmd5Sll3JTJGdUZlaUxqNlRoYnhEWWVZcVhjNzIxNFE1eXc3SndQYzY4UVdQcks1cFE1T28ySjJuMDAlMkJ4YnpwSkFLYVdwQUNEVmVVZ1RFOVNPanVJQkhZdEtIYVpsSVBiRkgwc1p4M1MxWkFDSjVvRE85TGdnWmg1dEpqRVRleFhBcHV2bWE3blAwNUM0NFRTVDNyRDYlMkZXZ0o0eEI1QXQ1TFBKa1VFQmcwVFVocGk4WkV6eGFJOFJQcXp6ZEZ1WWprN2ZLT2NlU1BzdDAwclBERmdlSHZuanVRRURVQlQ2JTJGUUlGS1JyNHR2ZmZQRiUyQk1jeWFNdlJtYm5RWWRJdGI1aGdvaGZHUWRIbmJ1TFpEM3NNRVNKa2VUcDRVSU5IVzJ5JTJCNlZXRVlIUTlNZTBqM2l4NXkweUlqVU41czM3UlBjYWpFUGxTRVglMkJwWkY4MGE5UTluJTJCbVNwZENKWjhFYXk4Tk5Id2M4dlVRNHNVZGJNaFlMYyUyQkVXM3NGZ1FSVzNGSnI3SUlDNk1ueENPcGlBODBBT1dQeXNLekdseHBDNSUyQkY1S1dpcmtxNU8lMkIwYmZkRFR6VmV4JTJCc3JkVWFWa1R5Q1ZUJTJCMWhlNndGaTVmJTJGaVU4eVZNNiUyQkFTbjdVTkRSM2djc29weDROZ3VLRVdlZHVWYTlNcEs0JTJGRHN3Wk11aVNKT0xNVHQlMkJlWlhTSXNQSW5qWHlsR0VZZm5iOXdFSHd4d2tjbnBZRFB0WVcwRmEyM0l1TGNNd2g3YSUyQkl3WnBVTFdudHJucWQ0RCUyRjRDR1FiNVMxdWJkRUpwNjhiSEhIc2dFRTFLTnlNSGpxbTVZaHNQMmxobEhSdWo1Y290bzZmWGo1R3QlMkZTYXV5MkIyZ2dvZUs4OVhCRnR2ZW9TT2tvcG9GVkptc1BBTnJSS1B2aW9YMWg0dERoJTJGTkpPSk5BMWh1dnpUOXZQQ1I5ZXIlMkZVTmczQ1JyYmxQamVhSXJyYnJGck5nZDFnQzNXJTJCeHo0TjFMdGZaY1E4cG9pR0xwJTJGT3FJMTVlZyUyRmZpWE5CZDNsaVBmUUtHMndWd09EU2JUWGpkSWI4UHZDSFpOWmhlelhwaXlPY1JlRSUyQmZpeVo5bkclMkJUQWhqMHNvbkRzU3c2dm1hc2N3eE9LOFRLR1lmJTJCNlkyWGIwMXIzb25acXh3Q3FObmR6UlowTEJhdVBQWHVJTkp4VVVLVGtGT05OWGRIMUY2cHExUVpJRW93aTJrYTNWVzhVNEE0SFJ2ZlBQY3czMHdmcmV0ZXQ3WjdQJTJCbUlFMGpvTzdqUSUyQmZRak5VZjZhWFRPam1UeVBtRGpVc1ZZV3p1NWFCJTJCRGR3aXIlMkZscUh4S0pxWVRLMFY3R1JOMjNCcDBsJTJCN1VKMWIwOUpLdDlabVY4bHQ3UGlQN2wxQ1ZydXR3NWttQjFyVTg3aEZPSDBNUFJVOGQzOXRzeGRDWWd0cWpVRW5LUmJ2THc3cSUyRmNWcW4zaTdHM2taREc1cHA1YXh4dEF2aEZoNUR0Nkd4MHVtNWNTZGslMkJXRU5pZkNOWlVNNlFBc1ZmOGUyZnk0MHNLQ2VkZG0lMkJoNUx6WEtRQ0ltNWdkTUo1QTdTQXVrTlI5NG5NNWlPa0tEcUhCZFQlMkJuWGElMkZXZnkzMTNOTDhsclVZSmwzd0pvejUzT2RzMVN2Qzk5aElHOFFXRDFpU3dzUmJPUGRTVlRXa0QlMkZrOFNaTDlWckcwSHRKRWphdEtmZkk2RURQTkVPWXdMVkpqJTJGU2VoQlJoMGluNHFzSDdheDNMWTJxTGdSRkhESTNMeiUyQjNXME5zTk1sVWx6dVNiSnNWMjZGSnhNU1Q0WXlZeU1kTWR1bENVQUwwQ3A2NVZXb0NCJTJCZ1VBaTRQZUtJeXhGOHJISFp5QmEyZHpuMzMyaDFZVkwxTnRBWG9Td0N3ZGlad0Jpa2tUTklDYjIzOWxYQ0lWM1J0R290WVZVd1hwSlVBN2ZaMTRCbEpYQzNNcyUyRjZ4SSUyQiUyQnF0QWZIN3NOWTIxNGlTdVplbXRGeXdSMmdLZFc5Q1ZXanY3JTJCRmNDaFBva21nTiUyRmFpUzVoVm5hQUlqWDFXOWxSRm9YNHQ5RiUyQlFMcWRTQkdiZk5EeSUyQnZpYWNRNEl6cHFoWUlNaUxXNThqSWU1b2ZCZ0p3bGx5VDA2YlhqT3lWYW5jc0lRRE9NZ0lyTElTWmNwYW05a25Od0VTNmJDa3FPM3J3NWxxSlA5Z1FsbkZnb2ZsMkNpQk1pcTJEWEVXMVVOaEhDczQ5U3NXQWpHMVd6R2hWSG0zTVByeWZWU240OEpmeWdOMkxzNjZLaVJrSFRUUzVFSnJRZE1xRlY4UWRnekslMkZJaGlJZmlJalNtM3M3eGVKYWdaREFtTHdlb1VtSWtBNVpwSG4zRWRSaFFyWk1mcUU3b2xFcyUyRkNjaHExT0djTGZrUlNVZzhOUzNzNG1ZMmY5TVBCZEVWd0NUbDJLbzVyRjRQNVJTJTJGQjdXalg3Z1dpWGdpWjNJZXhhaXhwNWZSaU96YiUyRjl4eWElMkZrQmxKeGhzbmZRUHNDVU1aRjY3RE1mS0dTblh6a0pxZEJLUzVhRkhGUXU2ZyUyQkphaUZQcHZxa2slMkJ3M01aZXVyRnJFJTJCNEdIOHYxVHRxSyUyQnZ4VFlzaHZxejV1QUZxM1lUN3gzMmpJNXpYeWpUbHpwNmZ0Z3g1MWpvZmQwS2FOMXIlMkZhUlJGajVJTGtsYmc1dEdtbm1UWEplMldBR2R4dWVIMnRqZnViYzNRekNmeGdRQVVSM0EyVXFQMml0RWplM0p6QVlFeUhqNk5mWjcxUEhqNFVtTmhKSG9zQzdzTVBFalB1NjdEZkFSc0RGUVJJaHlqcHRlTU44dnpSTEpMSm1WUlppdTc2WHZqMWtZanR1SWxyTURhU2Yyb3o2UmEydDklMkZaVkRCOW5yS1VXMGhZZnREUkRvZkduRnNNSHFtcllzZEtEb2pLTFp1dTJQakdIMWh5ZUpjTXZaYVk3S0YlMkJydWFVJTJGZ1UlMkYzN1RVbE9teFhNOEk4S2J1REQlMkZYSldGJTJCJTJGY2w1QWpCaSUyRjdLNEg1T3FGV1FlRlFkcXp0RUs0WUQlMkZoV0pOb1JTQSUyRnRNQ3Q4SGs2JTJGZlBsOGptVTRjRGppUUJhUHQlMkI1TW50MmRhM3YzT2dna1dQVEo0JTJCaWglMkZUOGtnRkNoSm53cTg2eWZiMFh5a3djbXVjbTZyNjRLRHRPcmpaU0olMkY4a3lUVDNBeWV0Y2twYktxaklpVFNoVG14QkY2YzBKZUhndmFsWFFaaWlIdkFqV3NJR3FpSVVkandvZmpxSmNLcTFlcnN2RTlCeWMzJTJCbGx0STZYTlI2VHV6ak9MQ2Q5R2RacG5SbWxwWENCJTJGQW14QiUyRjhGZ3NnRE55cGRmR1BYcVI1c25EeXh3cFRWZFRZdGxZbU53Y1R4JTJGWXpTNWNKc3FQM3c4SlJYUWQ0eUtBQ1gzTVNXSFZZSWVPU0FrN3BRdHd1WjUxNHhMdTd3JTJCbnNGdEppaGJJemRCNElickwyeTQ5OTh6cVJDQTdkR1pqeHpyOTNnbU9nU29Pa1NLUEp1TkJYZ2ROOVVtUmI3TzNTdXBTUTdFU2pkdVVoT2FnTmgzNzdoVGl5Uk9yemJob1BGOHhCbTl2UDFjWENzWWlCVzRVWmlKb0YzM0JFMjFCQmw1eHdhM2VoYUVsZHA5WUEzeUpkZnNCSTY3Y3lXSjBiUzIlMkJLQU1FcDdHdlNFOTZzYUw4QzRuUU9ORWZHOU82ZndGOWliRjZXWVI0S3ZtTktsblZhZTJWejI5WHNxMDg4YnI0T2UyVHJnY3hVOWRocDNTOE9mRFdlZWVFN3ZxV1QlMkZhYTRoVk0lMkZ6cE8lMkJzWXBqUko5MEhQcmJ2UDJmWXZ2N1hERFZwVThCRzclMkJ3S2JvbWFTMlBQUW1xVVhOaDNGMktPM3BvbUhMbiUyQkFMQktwaG8yUlpUVU1QWGhrQkQ3akVUMHA3ZG5XVG9ud1BobkF1Yzd6Vk84Y1ZSSzVDRjQ2b0dBN0lHdGhJWFhMOGo1SjZDVTNaMEQlMkIyczNJa0QyNmFGRk5uUEFWMXlHbkdjZ29PcnNHemFKbEpNd1AwNTVTRVVSUHVvMU1XcURkWnVLTUltYkxiNGdKc0pObkpZMlRkMHVHbFNPTWZJTHNSQVZhTk1ZMUFjM3RMSWNMJTJGNTBFMnZlaUhEWkI1M2hpJTJCbVBwdWclMkZiaHF4Mk96dlh0OHlUMVZiMXFGNTE4ckhQd0R6ZkVJbyUyQkZiNWFNVzFybnd2U1pmVG9Gc3dESEN0WkU4RmN5ciUyQkFyVDRsUUtNRWJ3MnlRamgzJTJCaEozNHFkbyUyQkglMkJpY1NqT0lUcyUyRjdKZVZHUW5MbndwUFdlbDRtYkN0SUxVS3JNc1JNUWVyWXIlMkJJcFZQVlRGdmhzc1dZV3ZDUFdVJTJCMW1sSUtxMWc3cWpMOVhUV0psNTJBcCUyQiUyRkFITVhDc1dxU1dHcGFGaWJ6blNhbmg3aWp5OWNreHIlMkJTNjVPRzBUM3B5Z0NLUW9oam9vNWFqUEdkekQySFlWVnl6Rkt3OFg2Sjk5ZzI0QU1ZeUwwJTJCQk4lMkZKQU5yeHlzajNHJTJCdGhES0dhRVNTVmp4eVhqc3hQY2JYcFVzMSUyRkY1MU9Cbk1xbkFxVDJ5eFFERFdFMWlaQkolMkJvaUExMGpLWVglMkZER3VwODFZdzloOWgwRUNkdTBSZTM2NkhlMkY2dmpQZmdYbUY5SkR4N0c1ZmtGb0ExaEFhQjZJbXJPZU1uckVNamhWRTNta3VqTktvJTJCTndEUnpPZkROJTJCQmJVREM1bk83RGgzQkc4WUdQZUYzOENSWHdFY2IlMkZmMmtSYm5uRUpBVnhLZmJnT1NLbUNjQTRrVW1qZmhnSUxhRWYxVXhuMXFZaHV2MkE1bm4yOUgzOWUybGRWVmFBZEFQeGolMkY5TXVpejkzVXlEJTJGZFRjRlRmMFZnZjZwVmVKM2N3Vk1RZiUyRlVYQUZEMUY4eCUyQko4N0t0RCUyRmdZWUs3TjgzVkJSNVZUaSUyRlB6NlU4RE5XNDVCMHd0OUgyWiUyQiUyQkNkQUI4ZFB6OFBkanRCSDBTZkR3TTlnVTIzYjk3aE5LOW0xOGhqNWIzJTJGM2VXOVpkOSUyQmUlMkJDeW9yc3V4djRnWVQlMkJLJTJCRiUyRmN4MzNKZnM5MUg0djViJTJGVW5USlZoJTJGJTJGZUtsJTJGSmJ2ZnAxcGolMkZkems3M3BENmI4UzVEOG9pJTJGZ1BDdGlTcFNxMjMlMkJmOUJ4MzhiU0wlMkZXMnJCJTJGNFZhaU81NVpIYWRrdUVmOUVQTU8lMkJoRllzdHgyUDYlMkY5VWZDRCUyRnhBTURXZGY5JTJGNWJGWGdyNTZzN2JOVFdMZTZUMzRhWVg1ZkZpemklMkYxejUxM0glMkZaQVIlMkZWekhRMSUyRmRUYjRVekpUOFMlMkZ5N0o5RiUyQnFNMDhLcWdUcVhMZGxiSXMlMkY3U0V5cWtoQmcwM3l1N0VtZSUyRlJhTFA4empvYlElMkY2UXc5Rjg0MXglMkY5VUglMkYyclAlMkJvMlAlMkJPWnhIJTJGajNqV0w2UCUyQlAlMkJsWkNQNXZQZXVYZSUyRjlQZUJiNWY0VmElMkZza1RvSiUyQmYlMkZ6YmtVZjluSUk5NkZFUDhLVlFSJTJGeiUyRkJIJTJGWHZsZlJ2MiUyRnIlMkJkYyUyRmVuM1h3dTFHd1B5dlFBJTJGdlhzaHUlMkYyU2RadHIlMkJPJTJGME1JOUM5Q0J2N1BDUFFIMlA5RHMlMkJUJTJGQUFEUiUyRjFkWSUyQmlPcTVRckIlMkJYJTJGRiUyRiUyRmdZJTJGYjdjendmJTJCJTJGSWRQMTU4JTJGV2NWU1A4OE5JZ0wlMkYzM2NPJTJGSiUyQkI3UCUyQlF2NURZbzIlMkZvYnolMkZ3djFUOXYzRVhabGxBc2VsdmgwM2dnUFclMkZRRSUyRnlQME5QOGQlMkJmOFo4ZGd1SElmemdFTE1MJTJGek95JTJGNjlOJTJGdFAlMkYlMkJQOTY3NjRIa2dTMSUyRjllNHl1aG4wciUyQmR2eFFpJTJGZTNmOWkwWUMyc3ZwaGQ3cE1CZHhpZERJc2NrVGQ1OVZYMWpjak1BYTBNR0pPeHJCUlcycVRaemRLdVQ0R3NSYWFQU1lNZyUyQko3ZlgwaG4xa0Q2NHdJVzIyd2lENUt6eTUwem5lRFpiWUxNOUxFbSUyQkIxTDhZS00zTGx6Sk10M2ViYzgxdWZnN3BPTWpiM09JYlhiaEklMkI0UXlVOG4wT2Y0cXMzJTJGWUNDaVRmZUVmaFVWJTJCMXZlRzRsVEpPdUgwJTJCTExUeURaTWglMkI5VzdjaDNoeTg0Y2RSc2QlMkZ0RVlFRWc1YzRTRWVkemxVSiUyQnhVWTRvN2ZnaG9PdE42QUNsU0NqNlIzJTJCOGduNkVrRjJkd2syWWI1YXMlMkJrTExzSzd5bEpCRzhTYjFOaUl4N0RPMEFlSTZxcTVCZlcxd2hseldiJTJCbkZ5aUw5blM2UHZ0RTNVRzV6VDd6NyUyQkxwaDJrRTNRSlJBeDJiQTdSS0wxZG1Pc0clMkZaMWdzemNseXR1anM2UnIlMkJmZ2l1RXR3SUZLYjgzZnUxYkJyVFdRWjZkNlpTRXBlTTIlMkZ1UWFzTnFsTGh5anhtcnFSUG92WU1tT0o5T2JiJTJGd3FiMmM3STByJTJCVnY1YVFVJTJCVk9JZVpkQmxPNmxJUjhXcSUyQlNua3JKVkFTbSUyQnhpWUNEMnBsd3FnZTZJbUo4bW9RYjROUWtaY2V3cnNKNW0wUWJtSEhwQmdXWXd3QWQ4Nm5BUFgzMzFiNGJvTEFROTNoS0J5OGMyd0xpTyUyQjBnUFE5bzdYemlzVWRLMzZsNFQzRVdjdDBhaFJKbWtXNG00VVBQQ3p4Ukg0Snk5OWFBMnF2bHZ1JTJCZDdVT1E2ZTZnZkwzaFRSRkN5N1JScVF1Sm9YOGtKUG91RmFmeWFBN2VMd3hVUFolMkZrWGFFZkhhZWduQjZIZ2xUUzFsVHVIeFlTbzVkQXhkaHpJcDYyWUNFMzRWcXhDUThxb1pZcEJNJTJGOVl0T1NPQUxVTEgwejRjSnlLTWtNTEp4UGpvWWklMkJFS2xhTnFYSkFXeVglMkZIWXpEZWl2dzU2dVpuY1lFT2tVMENiMHV0WCUyRmZtVEhuUWY4MkFSN2J1VVhEcVVmYVhOZExqVHhQNUNxMW4yNzVFSFZmT2FpTjg3cUpCbG5IWlRrU1gxcTI4c0RpZ2o4T1JOTCUyRjFaeE1SWlI5N1JVUlJ5Z2xySGxvUUVWaGJnMms0cGdoVVpFWGsxU0hpS2hma282S2Vsd29yVDU1SW9mb1FsRnFGZ1daSGd2Z0pBQWxoJTJGb3klMkJpcHlGeFNDWEJoWFpLTUw4THEwT2lITFZCQ1gwRDJySmllMk5EemtxSFBNb3NzR0xoQ1IlMkZVSktZWWROMzFIaDVYTGc2MDhvMzZSNGFXN2RMSTRwbFd6eHUzb2dLYnlyTEcycmN0aEhZa1JVMjA2Ujc3ZFh5YTJVQ0FaeUVnVkp1Z2M5RDJWRTJnQjVjcm5HTUNwblJ3Ukk4OU15RGZaZ3BxSENMeTB4UGpIclZiNWhzNnZJRzdmWTVSTlY5cCUyRnJQQzFxTkhEbTl3TEJFUWg5NTR2YUxJRU9aa0lyMVowc2Q3bjIyc3RubGRmRWJOVUUwRlhVRUhHdDZuMng3eXlCNkNsUnZDanQlMkJrUlJoY1NkS3l6TkVRZ1Y3WjlVUUMwQ0lTdkpNRzVTNGZwWCUyRmFBMEJMMm1kR0NoS1g4R1piVEpjb2dYZUQ5d1drNnQ2QW1lNUUzR2s0dk5RNWhZYzZPRzFENzJNbDNNUiUyRkVHVVhiUmMwSlNONklzOTZFQWJ1NDhIZUZ5SiUyQiUyQmplS1JQN29VbmFaelo1ZFZKSkdKWlhpaFRIcGh4VngxOERpOTQxakJ1alVpQnE2eUpFQUdITHM0dWdFOFVWTzBFQ0Vjc2xVSnZLeGdZYVdFdHQxYWlDOHQyUlJMQzFwcG1WSzUyRGd0clhVNyUyQkNXSG5zM2FxY29MZnJUWjM3MVJ5OEtPJTJGeTBqTkRKak0lMkJiaGwlMkJyaUtKRURBem9Ba0d6U0RzWDFMelBydzlhZ2VoQ1N2MVFEQjhiaFVzcnVVQkpGbFRydmkxb1F0QUlCYkh3M2Rhd2hVdzJVQXklMkZNMHUyUWJ1RkQzb1Zvd0xVRXUlMkJqb3UlMkZQYUZGZXFsazFGc3VuYnFKblJoeG0yV1U3VTBFNFNmZHBhcjhnblFabFVYODllR2FnamdtMEdJekE3SUZ4OG0lMkJ3N0l6ZUxjYjIyRERCdjhJRHBaR0lWeTR5aXhJTDZKc2hoZmxGMEtkS0FOcyUyQnNWSEcyMU9Oa1dmaVVObVZRT3N5ZjA2aE5iZEZZUjA1WXUzT055ekwxMDlucFFaT3l0dXdoc2wxbDdmU080dyUyQkxVcHdIN3lVTFRnYWhoY0VqYm5vM3BjbjRtaVNPVmFrNWVZYzRNcFZVZnZ2OXd5UTN4MnNCQnBFT0pKc1F3MmF3Y3NCS0hRdTA3alpac0pMb1pBMWtLbTBIUXFMVzUwbWVDdEk0REthTlc0UWdhTVI4aG1UbG1yd3hXVXZuMXZyUHpkZVhTY0FjV0xOTDJPdHZkSXJ6YzhHdTdSbXhsV045UVJBRnlrMWMycW1BVldDOXFPOHklMkY2aTcwb2h5NEsxVE43eUlBQXIwRFhKaTdWbFBWN3lhR3lFQ1glMkZXREExZTNlRk52M1BvTXMyajljajElMkZZbnpXeGNaWllxblVBS210ZHh1cXFvSFF2b01icnJIaGhmSHdzQjB2JTJCVDkwbTF4cWZ0VGFmcWd3cG9nUkJpWHBLSGloMEhzSW5sSFUlMkY1QVhqWDMlMkJ4RmNFMnhlOWR6dXBYdmwzRzU5Tnh2TDRlVlhNQUNxV3g0JTJGbTMzakYlMkJpMkV4ZmU1NERodEVtTVY1R3pGWVVTT2wlMkZtSiUyRjYxMjRGVGlwdWJwY2JNMGRocWhnYUFDNjBCOEFGYldndkpOTDZEeHhhVm9ianJnTXNCTzJ3NUMzJTJCTGdHYjBzWFpEdzRxRHZzMlBTMjhyQ0pHRkQlMkZQUVdVOVM2ZDhBcVd5eFRiMlB2QzZ6WHQwOURabzUlMkJoWlV5Z24yc0hBVXR1aUhVcHVEanI5RG4lMkI4TENLeXZHVWdWWks1NVhPZTRUWGN2RGZvaldaJTJCQyUyRlRUOEJHc2h4V25QWDdrJTJGOUhkWlREQllaJTJGRU91c0hrQzd6U2hNNEVlQkpXMUhHWlExSU5BMzJHNmJKZVdBSHZUUmNLeThLRDJqWXNORnREOG1iS0o3eUZmZjNnM1p6blpUVGlZeHJOOTNMM0FuSzglMkZDNHFqQnpNQ21CUkslMkJWcjNjWVpDM043S3FUdmpxcW9HOVpPNzhvTyUyQkFqREtoSVpNOG1OM1RlM0VDOTM0cUt5SE1ZUDNCbGQ0TnM5anVJV2d0d3dVaTJzRlQ3SUZYZDA4WHJRJTJCVjI5dGhlQmRWYSUyQmQlMkZuZ0xkUGtibVlBa0JnYmU0SWFEN0VGYWxHeGx2cFpkY1BDckdPUVg0dU05ZDJQdkVhUzJ5R0w1NkdLQyUyRkMlMkZEb2dSSlc3N0M1SU9jVEQ4NW55R3hHWjRXMGl4Z2pBalVUbyUyQmxCWWZaQ0h2JTJGJTJGUlRxeWh3dUpGc0s2Y1Jrbm9lc21FNGY5cEpqVGpNdGROOWZYNldLJTJGY3lUWm5QSzdnJTJGYUJnRmNoUldnWm5lQUdZbVdBJTJGZUZIQzJjMnclMkJQNkJIQXFQUmk0eEswTWVNZ1kwdTEzVmcyMVoyblhmQ2lFNlFnOE8yOVFlMEZvYTNlMmxnNWJWdk5QTSUyQnlwJTJCZUFyYlJzQ0dTYnp2TUVTWUZuSUlrdU1Za3ZqeVc3aGRlQVdYMyUyRm5BZyUyQlRZWXhnc3A1cmd6TVFXSk5wZXB3bGR2b2RsZGZwN1l0b1dtZDhnWEM5MEI2a0tPazFvMDliSEEyeEdMUiUyQkQzQXJscmVWTkxOQ2pmeVluM3l5UWJiJTJGNEZUaUthcEhLcElmbThTNDE4cnIlMkY4YXhIZm9OJTJCQ2xVQVlxR29JNm5QYXQwd2N2SXFINVJtR2VkY1RhMm1xY1M1RE9tMndGSSUyRmklMkZDZVMwUjh6TiUyQmZkV1VCSGhPNm1PdWdFNmFPZ1RIQXpPMk9rZmhqa2tuZjlxemNLanJ5Sng1SlNoU2JxRDYycGVtb3Y5cWVleUlWTExDZ1RjRFBXM1k4bzB2dzU0dTVYZ1FwZ09VM3Z5dUhMOVRrRVZhN1FVdUcxMUlTVENwYjhaOUhrYUlHeU5YV2lqd1dvN3RUTGtQaFF5OURENWxIQ0RUNkpZOWdBczlldVlGJTJGNyUyQmFoM1l0NldwWHlzRTM2NE1iMmpERDdDakNDa3R4YlAxRXIyR3dqNlgzcjRZblBoRnF2c3FqNzZVdU1NUDIlMkJMWURYNmxRTEo0SVVjS29wZ3Vrc1g3RnB2N3JRdEo3NTNLM0lKY0tkQWtPVGc0ZEZqeDlMZ0pnMEclMkJIJTJCMFE3c0lDSUhnQk9xbUtPejA1VmFpaE9kblBIc04wYyUyQkxVUVR3NnVuUlkxaU1ldHlOaUtYdVk2a1JPRjJFaEI4YWY0cmxWZ2x3aUtDdWxIekNzJTJGJTJGVGJZOEl6Und1Y0JRdTBoZlBkWmxQNSUyRk5EV2hmUVdzJTJCNTJ6UzZjMDQwWDJnaUM4T1kzZkFKdDY5QkFlaFRqc0RraSUyRmhMdzZhNnRUdU5Ic3VtQjEwb1V5JTJGQWRqNVZVMmV3RHhIdEV3UFBtWDZqeVhzTlF5Q3R1JTJGJTJCWWR1Rm5EVTRlM0p1R3k5NnJsNVI3YTBURmpmMFQ4UUhCY01oM1NaMDVXVnRPZmdCQ3JiaFlEcG9tSmg0eHJmTFlUb3h1OFVTVTZxMWJ2a3VITEJZbkU5czU0S1VLM0xjMWFQYlJCQWU0UjdVSzMyNGc3MENJbCUyRkVUQVBkazJ6M2Y2d2pvZW5RV3lvM0xVUDVsNE9OSnBSMElYN3Z5TUIlMkZMTDJEOGxSRjlidEd2eldzM1JMbnVjY3ZySmhGeTRmSU1DNiUyQjdrM3o1WHZFaEVyQUVlb0NuYjYxN2NUVTUlMkZ4cWZVTiUyRjBLbjglMkJqdkF2UlVCRUtaVzUwU0c3YjRYWmJlNG9FZGdtTHJ3QWZZbnVwWUV2dDFsJTJGc0slMkJaUyUyQmN6UE9JNjRMWG5lV1BYNVBJSkVoTWZtdUF1T1l4YTNSSkwlMkJQWjJvZHhlSnRRanFldGx5bkkwNGFtZVhldUlhR0U0V3h1OXRlb0VSQkZtQWFRViUyQlEzSWswSmh3Qk1GcUFZWXB3M3A1R0pNNWFyT2RCVmJtWElYRllRSFlDZ2lyVCUyQjQ5QUZ4UlYwYlRTUUJRRkRQSE5MMTh6NXhyaDEzUWtVS3hMJTJGdklyejljaFE3ZEliVlBYZXkzbm5ENXoxNDVBU3o1SVVUbVM2OVR5UWIzS3pFYlNGMTV5ZWRjQ1lQemVHNVc4dm0lMkZBU21hUkNGY2Nkb2lBMVlndE9kMVlpNUFQcXM5c1lMMzhlbkh6QzNyMmFGJTJCQkdZVEFNJTJCZGxoaWluc1BoQnd1TWVYSU5XMlpISnFRT0s3SGpvbTU1Vm9JWVdYUU9PMlZ3cEI3UEN0TEU0YjhWQVpsUjJXJTJGJTJCR0hxclhNTUhZTzZsbG0zQTRZWXpXaXc0TVNpaW1RMGIlMkZLTktkYjA4JTJCYkltbjElMkIlMkJHWms3Rm0xUTN1d1ZxJTJGNHdtbldSWVhEeGI2RklpSVMyc3YydGI4em5DcGZiJTJGSmI5ZkRuJTJCOGx4VmMyZjlQT2prM2NuYW01Nk1XajdVQUpqOG0zRW01SGN4ZDVRa2NkWUJHTElIYVVKSjRlTWxVMG1iNk9CVGpiM0xydDBjM2treVRFMkNBeEhTaUElMkZTTG9BaW9HNXVGU2RRVnJOb3hUaGtFWWZsM1YxM3clMkY3dXBzUWI1TnpYeGdGR0dLJTJCdnhlRHhMY0klMkI0cTJIVjVsWjVwbDRCUllSYTVxcHYwUWVzODkzM09wU1djOENDNlhaQ3YxaEFUZTJnS051VDMlMkZOSWhzWnBmM2JSRGZzeUR0ZGMlMkZjZGY1T3VNV2lmaXFTJTJGbjVDUUlwWkdnY1daWlJ3NnhuNDhlUkRTTnoxWmFncFVQbWhHdXpuWlloUyUyRjVoJTJGUXlwQnJqUmtER2oyeVdWJTJCSVRiT09xYloxUWF3dSUyRkROVHUlMkJBY0tEQmF2aHJBT3JZcjlUTnZtbkZycVJRMndMemF2d1ZCRERPU01mY1BZeTdEdGZDYkhjRHlUdkNZOGtUaFdiSWc2JTJCekhjQlNlJTJGRVFsbE5uOFl6ODVmZUl4ZXBTMmZ3bUclMkJ3REQxdjkzaGxub1JQamNPaXBUTHNDa0UlMkYlMkZtaEN5JTJCc2JYZDV4bENuRGFYem43Z3A3T0V0MTB1OUN3VWZzdUNOWTRXZENrd21hTXJNVHlDbEQ1JTJCMEtvaEdaQUx5QmJzRHhyalJRZ0R3RmY5SUZFM21rb09nNzdLbFRvWE5HcEk1VXhXdExwcURkaWMlMkZkdk9oUU5YbDlMSGlZaFlCekJackslMkJ3Nk1janBlZjVydGNhUnZiWklkejYxQ2NQcEx4YnluJTJCY1JmazA1R2JRNTdKaHY0OEh5V2hLSndXbW9uRWclMkJ6JTJCaXBMQ2dkZWtFUkRBRjBBTW1kY09GMHdqamdLV3Z4ZkJ3aWY3OWZwQzNjbmVydzl5VTk5dkxIeSUyQkYlMkJ4eU9TeU1yQWQ4UTFIdmtWY2oyY01aZkNBSE5yZDFtRndncmMyTnIlMkJ1U1U2VTNyWmtwVW5icUQlMkJucW5XTzhnUSUyRk92djZ0RW1ENlMyS2M3cTNUSkIlMkI4RWZOT0dDWU1VemQweVNjNzFPTVlQaWNUWHVqZDhYdm1JdFlQcVY3WWoyTlNFSXp4VnBaSVpUTDJOOFRUd2NNJTJGVVcxMiUyQkRsUFFVJTJCR011dk5aRTN0bzNTMHQxbFNQOGtzd1pQTnJYTzR5d3dKcG9hc2YlMkJMOFpXRkd5T1ZsUjN0ckNtQ0tzMGVFJTJGeWxPQ0hxU0w3bVByOWtiNTVOY1AyZjUlMkI0MDJ3TnNRWWJEY2p3OUtTeXpXRWZkYTAxY1ZyRG5WTktGNXVDcHNRdUVPTzVCbklqenN3d2JCdzFrbSUyQktpaEdvTnNGVTBhNnRSQlk4WDcwRjdoMXVKMWZsWGNiUlllJTJGZjNxdFU4a2tJeHElMkZTTlJucFBLdTE3UVMlMkZNcTdqdUNQQkl6MDlYekl1aUppeHFEV2E2UW11OHVSUGVDUDc0YVlrMU42T0s0Y2VWd1Jud3J0OXZNQ1JNaThpcktha2VvJTJCMkhZSmNDVDNDQU96d0x6cDd6bDRaN2xleU0zRCUyRjh3YnhhQ1ZCa0d6WkFqRHIzWUc3d3NMJTJCSzlSOHozYmFYVGFnR0M0VUJHbjVBTXFpa0hPWTd4VCUyRk9waXN2MHlRb2c2VjhINTAxcVJLWjhkVUladjRhM2YwaENjSzJmcWxTZnQ4UExyWG1QZUQ0eEdUMDNmaG5ub0ozNUJ1azQlMkZYRGpsYTZ2ZUdndW1MT2Y1RGNKVTJyTTUzZ0N0alBVYUNocXl0bWF5dEtLOGpzVWZxajdreTNRZ1FqQ2FkU2I5QWcxTTZnY3NPOG5xYUVXNTBxWEwlMkZmUlhzZ0JxeEJHJTJCc2VickU1RVZ5bjV5R2hKWGtwWHBoWnQlMkJsSkpCRXBtc1FIWlVUNiUyRjFBeHhKbUNtTlQ1QzhiUiUyQnJEZDJPZFVUOEo1bjVSR0ZtQjJEUmpRb1VuaUJ5ZXVMQ3ZkY1pURkR0dVV2Qjd0a2F6Ynk1RkNqWDZISnNCQm82SGpGTVZPZVdUcGs2QVFyZ3dFa2lpMUxOMG5PaVNyc1ZYVlJsT2twV2NtbVklMkZsdTN3VTE5SXVtYThIanc2REkyOEJHdmxaWVhDUlN0YUxydlF6R2Q0ZVFuVXFtOTRhc3VYNm9yU1pJR013UnFmY3lLU2hhWWpLQkR2Z3NpMFVISzB1c1MlMkJKN2YlMkZQcmQ4bUJEamJqVnh3QVpkUWNSJTJGMG15VVhsVTZ1S1puMmg5WE1YZUw4eDdKa21SMTQwNFpOT0lod29UQk5TbmhsVGxNa20wRjBUeGhrRU82RmNQbHduVkt3ZVNYeXpMT0VyeGl1Y0o2YzZ0ZngweWVoZjlDdGxFTHFmZ1dpck43MlkxcDZtYm93aUpXVkt4MTNTeDRNV2dWVjlUOXhFcEhJJTJCQWtyUDFNYWE1SW9qOCUyRnFidzBFUDJCMGRkNmdWQlpuTU9ya255bXJEOWdaNFY4UXI4cHBXYW1hZU1mQ3RBZThBRUVDSVRCTWpUSnNhUTNrU21zMERPREpSTEJKRlphRUVTb2ElMkZLZzFzNElPOE9wamhEVm5hd29wJTJCcWtoaHBZdUNtZnJaVDF1OFQ2TkVPam9PTkViVlRraGFrU1Vpb0dyU0tqZEdjT3A5bXZGWUp4aHZFUWxhJTJGZ0VQcWdSSklNQ3UyNjI0bCUyRkZPeXVRWEg4RmxJaFdOSUFnTFYwRVRtcndaZWxYaldzSDBDbmg3dzZBMTI0Wjk1eDNsVDBWS2cwRW9LZnZ5bHlBOWFGMEh1UENNd3YwMUVCRlk0MzVHbVJGSnlZYUlvY3EzZmQ2VHBTVklEZndHZ0VodGFVaGdReDdRMUpyS01GN095ZWxCUGdkdmdUVzMlMkI1RFFucyUyRlh1YmNWODRuSDFGZUlDOVhVMmswSWlUQ2tQNm5KbUJEVTRabWg3QUxiRWRHM1RhT1lwcTNpU2NDdlByJTJGZXh4WWxQcTJ6V1IxTHdMYnFNRTFqd3FMUlRNcDRrQVclMkZHVEhUWHRvTkdySWRYWHg0RVg0RUVvTDJXakpjWkg5QmJhaFBJdTdodlF2MWhhSHY1OVY3N0x0d25DWFczeSUyQkNLM3gzZXN3dlBBT3lpWWNCZzZudnA1WTlVT05oRCUyRk51ODJrRjR0cEpZS21VTEdsRE1yM21RbnlZVE5YejVLamFqQWFmdWZVQVByQ0RiSmUzWU5rZ2F5UlF3bnJjdU80NHhxemxWZXduR3F4MkRlU3J4RG5hUVdGdGdUNFkzNENTYURhSE9xUElCVkxyY2FsTmFRZWdFbFZqJTJCalFabk9jbGZxSWd4THdvSkxBUzNObFJCUFlQRnY3NXBnODBISmJEdWdSVjlsb0lhRFFRQUhoMTAyRGI1VVZ0VVdhMWQzMFlra1lrN3M3ak9mRktwQlpEWjNMZTkwYnlQR2xuRWtaWmJSV3IyaGM2MlRGN3ZXWGp1WkJPNkVHUmpRYWglMkJ6ZFpXaGFvdkUxU2ZJS240TWgya2RGWGN2VHpYdHMwU1cxUHlnbWlwZnUlMkJ6RmFQaTlMdDF2dG4xUUhRR1NpaUZFZ3h0SHZ6TmNYYnBjNXVvc3JpJTJCSHlnSVJncG4zeG8lMkJPaFB5TUF0aUpqcmQlMkJyejBzWVl5bmp3OFRSJTJGbWZ6blMzVWNRNTRlZDI2bkNTMGljRlZGZDNkSkZaWWo2OUpTNDJoR1BNQ3FTV2VwcTZFc3cxWjBkUEdFaWd6QkplZHdxJTJCTXR6QzVkQ3RRWCUyQm1wcUx3OTNXV2olMkJ2dFZMN2daYWY2b0hQSGxIcW9kOWtsOTduNjhCdTlMMEdWVW8wV0VUSTRDNjU4OEtIWUNlTFNweThwSnJ2TWxPaHhSdFV2MkpRZm5QVjdjVTBJaVdaTyUyQnJWeGElMkJwMzVYaEFMYk4lMkIxZU5yNmVPV0VlWGwxcDQlMkY0aGVPOEtWYjNhQU5EOHpzZzBheXZxMjVZdzdqVEZqQ2JCRTVuVWVOdVJIWk4lMkJ2cWVpc2RaaUtEOCUyRjZ3UFZTSlN6RHVxSklhdGg2Uk9vWTFzNVFaMWlYUnRyMjg0MTN5N0xsbHdrOHM3eEpjaEtoOFhOQSUyQjJ6JTJGekVXR2szdzhObEtaSnNudVhZdFNjZnhwYUVoMGN3bEYyQkIyTzdqQmxwdVJ2UyUyQkpWSG9Eb2RMMmNycGFmMUZ1dW5JbGhrVHN6Y2pGMkJoUWZkQmVqJTJCNCUyQmF3TnZONWxidGJBYndPYWd0MGpCSko0dEp2elYzTzBYWEhHVG9CM2c4QWRudE1YNnp5Z0t1OU9uS1MxbVZ2UXFoNVZ1bkFqMTBkak9FVHVVYWtKTThjbGQ2UHVVcmJiWFp4MmFRMHg4ZSUyQkQ3ejFyVjFMdk10T3BYMSUyRkwzSlMzY2tydkl4S3ZMeVhGR3pWNGNadiUyQnJJT3RjTG9RNHJtSDFyWUExeUpqYndNcExTRFdBVEZrVEg1Zkp4bjZQRmxxaGtzWHk1NUJnWE5vUFhmNW1PYzNHTlVPY3EzdzF5dTBjTTF2NG1uaUF0a0VUZmlRU3VLUHFSeHY0REt4ZHk4SEw2TnhWT29QY2JpJTJCRXU0dVBKNmkwUXpXR1MlMkJyZFE4Z3FCNEc1ZWZoRjNBcmhOZWthN09SNTNxcG01M3l1b2RlQyUyQmV6OFVjQk5XU3dSdTJRWENhc08zRllHR0VERzNNem85OSUyQmxvaGJ5OW9nYXRObFB0dUdIT04yR1FVNWVBNVdTbyUyRnhwNEp5bE4xTXhNaUR3S0dmUE93R1EwSlNYclBmeTk4UWdqZ2pocnlKQVJ2c2tUNGJXMTRKRVg2OWFXMGw4ZFcyZGlVejlXcVBtTHhDT2FqUEtLYlQzSm1semtFaFhHMVhUSkhmUGs1Y0NKMlNNZlhtdHRNcmZwZklvdUN1ZzBzOGE4Zm1lJTJGSnBlTnZtSHF2dXJsQ1FqbW9Ha01oNUtWcmFvTlNxb05BUjFPNzBqbzB6QjRUTG11MFlLanlsOWcxdFNjeCUyQnM3ZUdLajlpUlYyT20yZmJkJTJGYURPbDhKRGxsVmZsQ1MzdG56UEZ0VFRqVDY5SFBuUDJ2eTZ0QWxLbyUyQmFUUmkxNklTM0FpbTlFMUFkQ2p3TGZpa1lBWjEwdE9YNnFrN3BaN0RlNGxqZ2tNV2ZBJTJCTFVobmFNcnNGJTJCWHRUbENwVk05bkgyUWF6SVYzTlRKbFhrdTJhVzlBa2gycHB5YmlXYndQRjZ4ODlaaVAwOFRLTiUyQjBNb1lrNU9DTUN6RUdtRWxPajh4R0ZtdXhrM3dpcTM0NWdSRFE1d2tHN1lFdm5ybHZiQVNXMTVTNW42NlM3MUFJWm96WCUyRkNFSWRraGc5YUU5V0I3N1gxWjV5ZExEJTJGU011QiUyQiUyQmxZUHZ2QkdISVpaalVrVnhxZTlNJTJCWHZmaWZnQ1JpSm5lc0pla2Y2Njc5SEIyM0h0T1V6UkdaUzFlUiUyQkM0NUxqdmxlcnF5SmhUT3VPVVZGTk44MWdHT3NXcmNHOEdIaENPZ2pqVDB3YmEwRlA3VkhNbG05QjFtc2VtaERJeFBnWWUyakNMT2VyV3V4cTJLaEY1b3paazZhNDc5RGNJcW85Y3JBJTJCMkx0bHVaRjQ1dEl2VThKZUxYQkFQJTJGT0wyOThyOU1GcVRmMWtEb2tTMkRla2VSN1phZGNGSldOdnNKNjRCRUtaMHRydyUyQlRDb25iRVZNY2dZZHFVbUJXTUt3TndxRVpKRFRnVjF6Y1BiNHMlMkYzbEZNWm5hckc2YzZ4ZDdRdmxsd1p2NEVsb21MRkhnJTJGVTk1SFBtd0tQdGxXM1Ayc3cxRjNOUWgycXUyTjI1V3QycFFmVk9xRDI5ZVVSN2dzUExBalFTRyUyRnMwTUhPR1Y4RTRrcjJIOHJyMjVxaFFpJTJCdERmYm42SVdaQmhTbmpJeCUyQjFkb0RVcXY2N1hLc2UxSXZFU1owNEpJcHFiMDN3SnBJY1YwekdLeG1nYnlKVmw4YmdQRWhYR2liZXJ5MjgyJTJGZm16TnllZjk4TFEzN2ZqbGJKcVF2TmhHJTJCWGowNXo0bkJMTUE1WjNCZjVXZ3gxcjNNMEJRTWUzQSUyQmpxTDNDU0dveXlxJTJGODR5M0c1STJKMTBsUjhGWkJmMUNDR0g2S2tINERsZFBzRHVpWGJiRkppZ3JJVGUzNmVObVF3N0FYRHM1WkhKNElBT2ZnYU9OdFNaZCUyQlphcks5aThUVVoyZkg1UDVNZ0VibE9zb1lUajV0WmJxUWR0Qmlmbldhd25Ud093MkhZMk0xMnFRTHJxNTR6M2xWRmZ6alVsYkVUakpXQjRmNHVJQk9vcEJGalRFUFJlJTJGZmJUTkg2WEs0Y2UySmxyUUhTWmcyU25TR0pjbyUyRm5VbWZVWkU0OVE1R3d5MWZpTkZSdHFvbWR6aHpYUGNFWW1sNHlUYSUyRkRqbnJlUzlRanljck53Uk9kYm5hMFNYdW8zMzYwOCUyRkRGRyUyQmN3c2JhZyUyRndWMnFaYWw1eEVrJTJGT0JIMTRZeHFoOVdEcEVJak9LMDNHOGh0dnZsbndPSEk1aGdTMVQlMkJLOWIxU2UxWDgzSzZicFU5bDBOM2olMkJFOXklMkJmQXlhJTJGUTVFNW1VVkw4Q1FFJTJGNGtnMXNQQkowSW5tOTk2OUs4Y3BROVZOZUNJNVJWcTJCZjdDdlkxa3YxUWZKRzJwbk9HeGN6dm54U3clMkZiNnpmMTVxdzZXYWQ5bkh1RnpDMmxOblpLZmRPT080ZjM1N05KcU9zcm9DYkRUVlpGUmgzNmhQT2ZZdklPYmFkM0htMFdWSzYzZkpVODMxSlB4JTJCOE45ayUyRmxtdDN4aFc0MHlKZ1lZd0VKeXE1d2hKaGFxWEZmaXdTcTJ1Z05PTUMzTE1vT0pBZlJicWt2MmlNNjNqTEF0OHVKbSUyRiUyRkM4VE85MGVpaEtDOEtxbXVoMWxwendSNml0RnVKN1g0V2JOS0kxbzl5d2FVOElnbUU2QzBmVWt3ZlF5Q2dwN2szajh0VnRydlNVVVZwYUI5SDlCSXo5T0N0eXgwRkIyOHJYMXhCbGx0ekNVJTJGNDhpNkJPJTJCbUxnZmxYQUdxV2hLcmtWdFhTMmZlYjZ2VUE3TzdGa2lUSmFSeWp5U0R6bjM3ZUt5T1pTYlU4TEhWTGt5bFE1SnJMbEloZXJyNkcyNURTTlFiYk8lMkZZJTJCM1NjRHFKaWI2R05VMlRROEElMkZVeHVoNnhtd29YdlQ0Yk1ZUUN3elNFT3FUM3dqZE5OdDNMNFMzbzhtZVJNSkZ1NzVsbGJENWRYJTJGZEpVdVdWalVndHE4WTdOVmVmd0hhUVJYUjliNzAyRHBpS1RGRjh5YnRMdmdRZkc1MCUyRk43d25pUGVDM0xJdSUyQkFGMndURWR0NVhvbkJPQml5VmNUTEk1YmNQQ2Q2QlRVaCUyRm1UJTJGUUFBaDBJdVBLQmYwcUI0WDElMkYlMkJxSVdHdUJ4aVBLNVJmNW93c1hNdm91a1IxMllFT3c3dFBvSFY4MHM0WmFtcmJodFl5UTR1ZU9xakYwTDBjMVRvc0hyaElDS1JBYkNlQVpDa3h5ZU1iSVpEYmFNYWVQJTJGNHVrNnRod0hjdVRYekozZUhPbTlkeUp2OU41N2Z2MlMxVE43NjFldEtrbk16RUFFRUVBT2hUUmlnJTJGZDdJcTFuRlQ2UGZncGpPT3o0dkI4Y05yckNOQ0wxMnpleWVGRXBPb0FOSWNzT2Z1andqNHJPb1glMkJrZWh1OHFWMVFjT0pzMGs2a0E5Mk8xT2NaUVJERnFpVzg5bE52M3YyWWpEeTBVWVdhaW5sczI2THI1aUFxekhqUGE3ZndUVWFyTm9yJTJGd0s4SmNoViUyRlhRc3RFYm5CZ1FwaiUyQjBVdVlGdzVPcGZiJTJCTm5hSGROQlZyYldqOFlqWnp2cmZYVUlPTW56MCUyQlhCa0hKb2FHU2tTbzFzMGxNM1VpU1FJNnVBRlRhcjZyRVJMVzVNY1VVc3Z5TjUlMkJjdDNUcUprejJJank3ejl2R1JORFdtbDFUTk5VTlhTeW9UR25WYjdyMHMzMFNXeGxpMDViUThQd2Z5djBrNWJsRFZyZnJPQkZYQkp1JTJCR2xKSjcyd2Q5V21LJTJCZktxbmhTWDI3enNVMmd0QWhPRXcxeHZrYWRyOHFzR3RNbjdOTCUyQnVzTlc4QXVzTnklMkY0aUNGajVhZm02REJsU2hlMlhMNUUxWkV5aGM0R0YlMkZXbVRHZDNxME1Oa3RKcVprSzJRZ1pQSjlnaTg2JTJGTWh3JTJCWUlLQ05KNWFBNTk0N1hkT0V6VndVQ3clMkZJU0pxa2Q0SzI4T25kdm5EU2hINXlmc1FWcms2YlY5WEZsMzRFSnFlMUxkWTR5dEklMkZtZ21qR09qOG8xSWZOZ1ZXZTRxaHNOdFlxU2NBTUFhUFJTMDNrdnlVYlh4Zk5CMjY2T2ZoZms1QU03bHRHb0tORmdJSmRNc0xQJTJCZVh1T3I2eG5JYTBQVXB5NnF5RVloRmpIbVE2NVMwSmxRamtRWWFxNiUyRloxcVNvSm1QNXclMkZuRnJZNVYwbEREeHNjdjVTRjFpT0JXT0VlcGp1dWk5VjloMTVyNmlBcUxDNFdJRXlGdWhjR2VGSDRyNlFvMU01dTYlMkJYTHNYMUNkcVdXVE83TnU3cXcyRSUyRjFLWmJYdUp2T0EwQW5KNUF0OGJoQ2dYUExOOFVhaWVpZ3g1NmxOUGtJdmU0Z3JZdWwzRHRJaERQS2hrYVlmNHQlMkZhZSUyRmYlMkI5bG8lMkZEeUtPRW1RQW9zc2FoMSUyRjBEU3FLMzJmVWZYS1VrVDdJUWolMkJua0N6U0xFTk1mU2Vhbk9qUElYdmRYRW9saEpxclJrcnlMZEF3RTRjSWh5V3dPeWpBOE9YOFlFQ3BFa2ZHUHBoN3RJSyUyRk5rdHJzdGt1U2FrRkN2QWlUR0lHVEgxbVpBTWoxQUVjZiUyQnA5akFabEZ6OXN6dm82WVVNQmdNM3VpNEFqd1JEN1p6N1glMkJJcW4lMkI4OVhQNlNPJTJGcyUyRlVKQXJOVWglMkZ5THdZcEE0R1ZSQW9OWSUyQkJSTTdaMkNjekRyUHVlRTR6bHFsQiUyRnpmVDRFdk5ZendyZXpxZ28zQVJUaGZRS1pOMENLclI3eiUyRlhjUnRjWUJBSWE5dlVpYiUyQlFnbiUyQmtIS2xxeVYlMkZCOFloeDBWb09wUVclMkJjR1paa1QwSjIlMkJMVnlmeHFnVHFORVd2NVElMkZqdmpLeVpwam5oeTVnc3IlMkJqeVpjMGZWOEF6JTJGJTJGdnYlMkJFdGtYY1FEYU9OcTU0dTNmMnhxRklsQ0g1cCUyQlVkRjZXbmczSFg3UjclMkI2STFCZGM3MG5ydlNQRjN5Zk9kQU5ybnR1U3N3WXYwZlBINjduUlpqR1JJTyUyRnZrOWZ6OHRZM3FKZTZvRWtIazk1NWRJRXNhZ2cwc1JOOGZwNWFKaDNzdmxaY0wycjlDeXE3ZUhhNUdYeEs1NFJ5UThneHNjREZZakgzWTg5d2RFU3NJTkV2b3ZxZW4wdWltSG9KSE1yQVcxRkVrYXgxT0VoSiUyQjdXJTJGMGU2c29Ca0dJSVpvM0hIRVdoamlMY3V4UkxpYVduUGJKOFZpRU5ZOXlVcnZRV0tmWWV2UHJMeXM1JTJGeHZWU0tqUHI3S3pjYyUyRm1aMkNrWFA4a2JSJTJGWEpIMUUwMk9Zc0xybGZyWnFMOFNQZ3lrUHAlMkJlU09WRlhsREZ2VkluJTJCTDVnU2o2OXRRd1ZneUxSZ1NZZThkWHNkNVVaNWZ5UzI2MHg5YlJEMmpnOHVlbUElMkJwQUdZY09BelYzMURYckZYd0ZFcVpVVndYWFZtdWxSQXdYMGI1UFJ0bDhtekQ0WXRVWlJERVU1bmszN2hoNzluUHlKcWJ2RHhSOUl1N2x3ZEM0TldZNVpjS2RlWEdXR1dsdnlubzR2JTJCdXpxa0hkTmRFTVpqSmxOSHZzNmRuYm00WWZVJTJGQ1ByV1RReG80ZTd0c1JNZ2V5cjZqUkVPc3RrZ20lMkZWYUFwTWtmMEVWVXNOTXBoVDQlMkZrTjQlMkJSWG0wYWVQRVNRcnR4cG5ld2hDcThzVnFuWExCQUxnM0Q5TUZYQTNMdDV3SGNGbjRMJTJGOHJ0aGZ4a0w4Qmt5WExrJTJGVjB4JTJCaWpoTDg1Q1JLRHJmdnBJWkdxZyUyRm9FM3glMkI4cWg1R1dFSkZsZ2FRTnc4Vmp0bWxzNCUyRmtyNm9IUHQzdkxoJTJGb25vJTJGa2tuNUxNZTVSR0NiemoyYlBaNHlxUkx5VjJVWW5XaiUyQjF6Vk5BWkIwYll2WGxuRG80YlJTTmQ1MWhudG52cGVodUoyS280bGlPayUyQkZaeGVHNzBqWGY2N2pHVWJ5b2hPZmprajRFUWFJYzB5dSUyRllwaDJxTDhvbSUyRjVNQjU2UlE3bE9QYVJnMTFNb0EzZjdNRDRWU1NwR002aDFmaWxSQlN6NHRaZkV4SHpMSFluazJveW0wVSUyRkxsJTJGSmZIanlaYWYxJTJGNGE4d3FNQ0htJTJGM05Zc1pLZ2Y5eHI5cXglMkJoS3d3M0RnRlZzOUhybkV5aCUyRmNzSUhoNExhU012WjRZYWpObmhzUnczeVBla3I1NU5sdGlQSHQ1Z1EydVp0c2Vrb0k3VU9HM245a25lUlFDamFVbEtPbzR6UGdIVjd4SHU3OTVMRFElMkJIWXBIOUtxNkxWSTVRc1QyOVU5dURJSzIwd0gyNXpUNFVDazNqWDU3ZmhyTno5WTlXc005b05VJTJGOXNIWFN6R3FWY3pucFZJRDB1eXFqY1ZsbUtpWnY4NGpGZVduODA5RElYJTJCWmJRZTlOSGZhNXdMOUdhMWFEV2h4WjlhaGZqVmNCNkFaNGlySWpZWDM3eUgzTmQlMkI4cTlaQVdtTmFpSFQyN2lUJTJGUG1IYXhRclZMMWZ0bm9pVFhUR1padXJIUTExb3Y3M2R2ek4zOHlrMWZwVldIMzhQYjFTdHdaNHBMOTdGcDlFdHhZdjhzNURtUGFLR3FyVmlTUnI1Tmt3OHJvNWVUdGNEUVBYaTg1OWYxRkdvayUyQm4lMkZqbzB5WSUyQkR3OE0yYzlzM1ZwYklMZkt0NmhZdEV3UjRXZUYyVlRPVUlGSnhPdCUyQmsxdWg5VEN5UGglMkI0U2J4YUZ2dlEwTmJZJTJGTkxrTk9nUVhWbEx0NzglMkZldmo2RWJNMk1HJTJGc0pLaHIlMkZ3d3FXJTJGQ1BoMFcwblNsZzBYVFhnUFF4NlZwMzVneUlqbFZPeWNvc1hQNkNRNlJ1dUZ0N2tDQzFFcFcxcDZKSnZmT3AxT2tCelFQUXJnOUh4SHp5N2tIMG1KNFMxJTJGdzM1NFpZRk5Sd3FST3pVT3ROUmRjclE2S0FJRnZiTkglMkZxbmNqdm1TYU1JN0x0UE85YjRaZ1FTblJ1NVBoSnlkeVF1WTVOMmIwVk9VcjMxVFpoY3UyQUJrTlNJenpWNmpNcG5YSDJ4ZzNsaGtPanptRmIzOUMwViUyRjFCUzRHeXYySiUyQm5KMVZnV28lMkJaZ1NPOFhaT2NlUmt5NElDZmNBRE12JTJCbFVzS0IzcU9XaGNRUnBxUmxtODBqcmtsd0NPaTlkU01JUG8xeTdobEJMdnhkY3dKOXFFWkZyaVFRS2JJbXpJZnhuN0J2OFNwWUpXRyUyRlJBSm5rRDkxT3BJR2tDUEhiME1HUyUyQlR1ZHpKZUZqa0g2cEtCdnBWNUVOTFpHNFg3NE1DdEQzd2M1Q0YlMkJCVFF6SHZxd2FnZnBuJTJGR0JhTXNGN3BNNkRaRSUyQmhkc0VlMThpTWFzaUUzdFIlMkIwN1clMkY1NFhmZjc5ZlFrUjI0QU8lMkZ6Z056ZDB1M1g0TCUyRndxZDdNYnhPeE5IejR5SGdHS3RKaE9yZ2hNTkhXcjVWenJ5TTZ6dWNhc0R0QXYlMkZMWkVQUHROVjNjM3VackZjUlJYa3IlMkJIbDl4dGZHMHNkTzhONG53ajRhSGp5dyUyQjkybGIzRHc0JTJCWW9RUUlpMUsyWjZLQlBxU0JOWlc0NzlDQ0pzV0U2Rllham1XVmtHSlpKVE9qUlJmTDJYUE5aYW40STRuRG8xTXUxMTglMkZMOEhrS0FaTkxYRTVIZ1A3bFBQcU1wOHhJd2NaOWN5ZmFGMVJQdmYxUm4yanVNJTJGMGd1S0VEWiUyQkpSaXJ3VlVDY3ElMkZWdkRKVHlsVGpaaiUyRkZleSUyQkQzZFdkazdFMmJwZFJpSWFEdFdCWW1tREZlZ0c0UkVzWDV2d1NTSCUyQktqdjk1N0hJQzVQN3p2eW1PeSUyRkJWdm9NaVFoVXNQcGxBTUNxVHJMdnJGTSUyRlF6a0hRZSUyRjUxQzVDcCUyRnZ0cnMlMkZseWFBcFo1bGl4QVdlcUU1VUlmNnJDdmZ6MmpMQ1hxVThGa0w0eU9YJTJCME95VUZkN1FjUFNudlVKT0dqVWtDVFpoaUp0dUg2MXpBM3NjVHo1QXpaWlJiaHIyJTJGWmdsNllmUFolMkY0eDFJaE1IdGxBJTJCTHV6cldseExZZFZLdmJCenlPaDBiOWdiJTJGMll2alcwQ2RPeGdEWVhYelNXZTRnckNDJTJGU2NzWTh2dkRwS1h4NVU1WCUyRllONWQlMkIxZWtVYTBDMEdFOFMwa2JZZGlUOVp5Ykg4TDQzdE4zN3BMYXVXMW4wWDI0YjV5JTJGbjZ3dVNhc3NGOTU2cm5vOHJXdlJkcjZOQ25UJTJGdXZ3UVZLJTJCSkZzSEJoJTJCbzVnaVMwdVY5OVdOUDFxQ3pKV2JldkVZSGZXdkhoUEVJbTJ3ZDRXcnJWS0FuRlczS3RpbTQwTjM4QmtUUGhNNU9BV2Z1NE40cWg2NldPQmYlMkJrZ2d2JTJCVE9oJTJGclZqUDRYQ2haeVFvWkZCSXdFZTJ5VDZVa2Jlcm9qYUI2MlkwclJJT2VPWklqeWtHR3RIcjBRaE1uaERnSEFLbHQlMkJ6ZFpYSkJwNXJoZHB2Q3B1V1ZlMmdMTFVUQWhMdHkzdWlZQWtvZzdlTTg1UnElMkJoamo0d3gzbDRlTVk0dkthY1JCQ0J2SE5WS1ZJTXU2N2traUY5dk9YUG1nUFZ3cVg1b0FRSmRscUh4SVR6QXV1Ukx2MmUxaTlMOVpJZE1CNUElMkZaYVZIZElRZzY0TkRjZzdiREMlMkZoNXAlMkJXYzVsRGdQNGM0eCUyRm9SdEVIbkxCT0hQcE5RSzhUV0RPMUdnSnp4TXVrZiUyQlNTZjZwOHhVdkhVRGZmdURYaDZSJTJCZnlsUWh5MGQwZjQ3NGdCMFklMkZQV04lMkZxTCUyRmklMkZwczVtUEl2R0clMkZLQ0RwNzFVbXhlODd1TnVjV01qUEM3ZVNYbWxxS3hWWFVuUGRBdkpVOGdJVGI3MDFJQ2pkRHNRak5rRCUyQndQOVhqM1RRRFcwWTJXUmFrNWt1RzIlMkJWM2NXTnpOWExMQng0VEFkdGlhSnI4UEZzck8lMkJLQzdwR1VPbjhYeUxoUEI4JTJGY3Y2eFQlMkZzWmJDelRvVEJVVDMlMkJHSiUyQkhralRLZVVraTBPNWYxZVZuSExqY1IycURpTXNYelFFSVBaOW4lMkZVbjZZMElvUllzbjdjZyUyRkIlMkZNdXBUbUhVUlpteDBJVjJ1YnFRcVNFNW1maHNibk9ZZDdYcEx4WU5XRzZRM0YzWDA2Qk04REEyT1lHOTdGJTJCbkFkRUZrQk5VdyUyRnRpTDV6THFXWEhiWHNkcDMwd0FsZDBmMUZhZUgzOHJwU24xbWNXR2JYYnI5a0dCZ2RwUXVtR3NGbHBkWTUlMkZuZ1RNUFM1SCUyQmxFS0Y3ZTJwaExDa2pNemRPdnBtM2ZEM0ZtN3l0Rk1aeHB6NmRTd2JmSSUyQlNWdkpxSEFWdGxtMFpJSlhRVElGYnVNOW5xMUM4R1VuSmMyRjl0U0dWZnFsb1klMkI5QlJJYWxuRmdHYWhxTjRoRUZ1MzRnSUlTTkJrSWtGSUlhUnlsZDI0eXljZ2FkUTh0RDlYaUY5UVRvbTk3TnZUM094TzZvWVNnc0xoRU4lMkZZJTJCbVg4bEZGamRRUWQ3Tjl2blYwV0lQWWJXR0IxQW1GU083NjhCUllOZ2h1dlZhTEZuRUd5dXNUbFEwUk5QemYlMkYwaUpDeUY5MzlTcUtabWZyMUZpYVl0STFNUnV0MkcxNTA3em05aVVQb3NyV1dGeWJ4cVlMN00zMmJOYzlVdFJjcm43NGxZQmd0aXZGUmxXaWcza0tQRXBHWDYxQ0dtVWJ2Z3lHQTVsTGprUmFDQnY4WTFoS21XbTBZUXZBZGU4ZXBvMmRCdEUlMkZ4RG85bVhSbkclMkYlMkJWNlYyM1U2Q2pvNVQ4JTJCV1lRQTJqdHFJT0hxemdXQmt4ZUNZb2M4SyUyRmU3ZEdrWVRnVUxrV01yRU1rUXdYMnJLQnElMkJPOVFVZ2draE4xRGlYR3VUZmE1S21WaU9RNmREdzN2UFFDQVcwT3pVN2RTU0pGdm1vUUZidXRGT2thb3pOcDNzcVpjR2JobENhYWllSDdLZXBERHhGWWRwczNzNWFib0ZVeVdmTmNrd3ZGJTJGOWVzUjRoSUpPOEZ1YlhPU0JiU3FGYkFZZjglMkJMSXBQZlYlMkY2WnRGOHdIQlRtbnlsaUJEaiUyRjU5b0lhdHhPZ2NpTWxUTktRVGNlaWVXelBBYXNCUWdDREJnVXprNlRRQVZaZjhBQUJybUJUQzhla0dqcUczOFFkSCUyRml4SjVNYVJUbEtzbnhBeDVIR2JrZWFLUXNRdVJacXRaRCUyQmg0MWp0TGpKR3M1Z3htcGs0c2xpalJidWpkVFBEdzlUeFFsa2NiQ3A0ckZaS0RFeDZxUjJxZWpDVnN2dDUwNlJtSU1jNGVpc2glMkJscHRqT3BodXZqY3Z4YjRvY25LSmlEcnVtT054Wk5ud3NCNVglMkJDajRGT1BuJTJGWUE0SWFteXo4TUwyQ0pDcW9Wd2JlJTJGNzIyYkVGTHIzUEdmVHplNTkwMEdGWTNDczVXVVBsMVRHQ2xjU1J5bktyeVNTRTc4MlIzRFclMkZSV21pVU40cHo1TTJHeDNiRHNRV2U2eDZBUzRoT0cwU08xdm14cWFRcjVSY0s1UnpDUWwxRjdDQlduUlpVeUhwcmxKbCUyRjF3OCUyRktXWUVyUFFtSGNLRXpEM3FLb1lkU0hFT0RtTUolMkZZNDYxTVpvb0RBTVVtOUVWYjlwN0RHWWFEYTYxVkpUUkF6Mko3a0JJUTlEeFVEaXNJY1hqbVZ1VjFPN0FWZnpOaDk1YmVuTHo5T1Y3S2slMkJqZm1pbjdGWjYyZlA5bnd2WEFJbUx6M1U3YkM0VWphdzY3JTJCNiUyRng2SUliUzNSMVpFYTlxR1lEMGhyQlp0RmQlMkJNaiUyQkl5VnNuNlpJa2JoUzhhS2oxTkNCbExoalhTRE9tUGR4dHMwMzEzV2VTJTJCbUVYNHNZOU81TVdjYmxvNDlyJTJCcWx2RGk2WmVXeGo5c2lYZnJUVnRyZlIlMkZibHpZNEI4SGlWOHhYSkRQbHA1TEFocU5XU1RwMzJEdWxVZ29lbjhzRnB5dVNEdHpLaHMxRDAlMkJ2NDhUV0xINmFsQSUyRjdVanBGTnppOU9LZmQ4c3U5UGNZYlZqNUIlMkJDdHAzand5WXFTQzkyc292eFpQbEZVNjdBMkdMNFRKZGJ6cUMlMkJRWEM0MVdvU3dPZjhoZE5rbHEwUWZZQ05aM2VReGRNV053QWZEMSUyQjcwN2ZEWXBFSEtCRnVuQ1o3RFBjanZBQ3YxblpPT0RqOHlRUE1NaFRvOHZxV2hOZzA1eEx5JTJGaSUyRkQlMkJUODNzTVcyQTVKU1pUT21ENEt4RFAyV3hQMHpKOTFvSXFMcmtNN29VTCUyRnp2SDB3Q0tqd0olMkZpck1Eek0lMkJaczZYWENwJTJCbmh3dExESXN1TGVtc05EMzlQamZUQyUyQlc5Y0Zza2tGdSUyRm8zenk4NUh4STQ4UTB6Nzl0WEVrSjMlMkZGNmtleFQ4UnAwUUZSRVNEQWVPbU9QUVF5OTVrbmplcVhKdmVjTEZnVlFwUUU1NFQyQUc0b3dnUCUyQnRRNE5Fb2xUWnVZU0NTOEpZWGIlMkZjSEFWRSUyRkFuZnc2NFlsdm43ckhXVm9QT1FUbiUyQnVsUjZ3R3RkSUF6Sk1KdlUlMkZMY1FKcjliN3EzMkhveWxaQ2VxakFRTzRtT3ZySThWZG14T01XZnF6d1pnSlQ0RGVzQUZTa1R5d1B3Y1hORCUyQkdEJTJCTlZzWHhBS1J3SDM1MCUyRiUyQnFLdkklMkZQNjRUZ3V3T0lnV0ZZRzN1VjVEVDNhM1ltY00lMkJtSkpCZTFTQU9Xd0g4MlhRM2s5eml6MUV4bThjQmhXbVNCeTVXd3FZcXk4JTJGbzc2MVFXMEdkVzNiajJCMTVUVElBSiUyRkV2RFludVRPNUdodGxxSFgzRFEzRlhNanJJU2pvaXBxSkxLN1FnanQ1endrb2l0cSUyQiUyRk5EMmhvREslMkJIT0NHcEtIaTZ5OTMyRkpUOGdSYVBoR1klMkZoN211WkVqZFJralE5VW1TQzhsNzVWWkxyU2lxUnc4NkZpYjFhdmZzVmd5NWM4MGRWV1dsYiUyRjg3T2FzTEkwNEJ1M2M3ZVp3OXNqYUJWbGF6VjZwQnolMkYwWlB1Ym9qdkpCWFlzUmRkSXVGN2d1NEE0U0tST1d2RU5FUWpYTjlvcnYlMkJ2OWJOYVI1T0x3Zk52cUh4VjhWZk1TcVklMkJDOU9hTHZ0JTJGUDhHdW83YWJuSUZ4QUElMkJUOTUlMkJTallhJTJGbEw1VnJoTVNsSVNYRlFPenBQMUpYdzkwbXc1RTZGbm9meFA1bTUzaG0xR0toMldqOFp4OUx0cjhBWDZSVlNNV1NKWDNwZ25nVkl4NjB4YiUyRmViZ21qUzBvV3NJU3BaV3Q4ZDBnRmg1SnNhY3puUkxYNnZidGQ3ZWFrNUN4NkdBTkNweVpEUFlQdjhmJTJGMVY3ViUyRnFwQWVGUExtUE52OU53WmFtVjdWZG0lMkJGaWtiYmlXJTJCaTE0QWNOSFJScVFudFZZcEc1M2o3TFI4MExodTk1OURONXNrRUpGWWg4Z21uVWRia1J6U3B6JTJCVnloSTMyMTNjT3FoVTdnbmlJTEhJVDQwNSUyRjN2TUo0SWRIVnlHeFFFQ0ZPdFolMkJ4NkoyYWFGUEZwVjJoekZTRlNBM0g3NzNvR3RSOGpuTzJYYVNZckdSZTY4V0ZaYXF2cm1xZEp5YzN2T3ZPUnRhWjJodnQlMkZibTZDVU50S0x3Y0FQemprTTdjY1dqQzcwWmtrSWQ1T2FSQ08lMkJ6anRKNm9WVjBGeE1OWDRXNlNqOXdKQW9xbTNJayUyRld2M2VDdDdtTVNmN2dBYWxwdzJWODY1QVFtSlFoQU13VXRWRTNnRnQxJTJCWFIwb1dNdG8weHlIRzElMkZQR3BqQjNIU2lPVTA2MUZmYVNxS3oyYk1FM2MyUnNtdzFsdkFLZjNnZHE5M0V4aU45JTJCalRxZ2VPcWdSM3I2enolMkZibEklMkZxMkVZSDQlMkJTM1B1TlpyNGt0dkhSTjNFTmRnd2NGMmlZUGhJdTQzSFlHZUhnUWN2aGI1ejQlMkZCYnhRNmQ4SSUyQmMyJTJCTDRRVFBUelVYUXF5NVc4cXZqZSUyQlRONkp0UndUQ0ZEZWhNWnpCejlMc2xwS0pnTk9FYzc4dHlGczZ2Sm04SG1CMFpjandGaHBiZVE4eTg5Wm83cGtUcmRXNE5OSWFxOGhrZTVlTnJYYVpudDZuMGJnSWdDTk9QYndVWGxScGQzYUU1M0VYJTJGVUxKdHA3N2trZiUyQlRtNXNZQXNxUUM3M2Y1T01hNENEQW1HTGN3Y1p4ajlqZHhBT3pOek5MWnF3RnI2a1dHNUVoUHI1S01sdFpHUDVhJTJCYmNGaDdHdmdWMjB4Rm03YVVEYkdyY2taY0o4WlVDWGdJUFJyREJoMmZubnVaJTJCYzlmaSUyQmhVTE9rRmdXQ3Z6T3dETmJkZyUyRml6UGF0amM0amwlMkZuWW5MN0JIdmJzdnIlMkJPdTFKaDBDMHFzJTJGVlJnMTJkZjF4MDRySmZ4aUJNT09ZZTdjenYxNXNzbUZoYlFmOXhlOGlsajFLODg2WFdtUGhidmNYRUtzaW1yU3huYlZoWlVLcnNlV2hKR2ZoM0o4UHdBVEtOYUVtYXNPWk5JdjFYb0Q3QVYyRTJDNmM3N2N1eElMMHFRMExFMzY1ZUFlWldIYjclMkZiSTdidWpKaE0yNUJ4JTJCWE5icDk3V3dLUG5raGI2VndpZU4lMkZaNkNYdUQ5QXFheEJFa0t3YmVQWjJ3N3ZicnRadTVhSkxkclVLVHRBcUh0UURmZVd4UDZYWlFiU0pHSHFhVlp2MDU3MFhRRk0lMkZhTHBVQ0Z4QUlaTVByMGgxbUgxbGVsWnJ3cVBSNHdaT0o2NzJYY0NvTTNBbENzTnlwN3BRTno5MW15SE15eUU3RkZLQU1nbEdJTTJnY3podmd4Wm1SeWRYd2x3dVgzMTBpJTJCVlhmJTJCZWQxZ1FGQUNZcXdnV2NUaDBOQWhHTFlFWXNYTjJ0MjBac3RFUnh3Y0V6WSUyRmtWU0JWbGc1RVlIeEZFZnRYejFoeXpVQ2VMclNXZnRzRng2VFBZMjcxeG1pYW9mU0RVWmwlMkJDdk9RMk14dWxqOVY1ZXdmcEJkaGZGa04zUWJVaGhiM1NRSFdRUFZOT1ZIdjc2WHAxUkV1RkRBcmhLalVEdjIlMkJJNGpLN1BRY3BBUVR3OVB1V2Nmcjk5ekpsN3MlMkIlMkZJTFJvaTR3UUVRT0Z1c3BTOUl2NkklMkZ1WkdUbXIlMkZCRUJKTUVSeDVLMTV5djU5eSUyQndMJTJGNzNvUVpOYyUyRkY5VCUyRiUyQnJRVkZQeUJjS0xEYkNoOTZkSVg0V0FQWHBwbG1JaVZyeEJyV21iYnppakxOdlNmaDBRbEZrMGs2RWk1VVhVVU43Snp6bVVoQllDYU96UjE3d2RJNk0zWTdCWWswMFBralpYNUNRYkJYNiUyQkt5Mmh4T3FUS3I3SWZjMzkyUzB0SEp1MTk3aGM0VHRJd1lBTzNEOU5jRldYbWtVZTFuUU8zRmF1NTVPRzVLbmpkbzN3d2ZhRXM1ZkhNRHZuanYyNVZiQVFuaHFhUjZPWmNYZWl4NUQzUHJRUEFoMmUxUnRId3psZVVYSDFCY05zc1RsNVpva3lJSExKT1l0WiUyQnpENDV1Ym9KbkNWbE5oMnBSa3ZMV01oZWpWT3dQa0lXOFpqcXk4VGdsNnhJWjhDcUVYYk9aJTJGUVo0Qm0zY1BMJTJCRm5YcVBialN5bjMzbDZsQXBaemw4QlhMb1J4aldkZmpVUzhNa0VYSnVRdmcySzg2aGV0a3ZZSGxqYmpMSHBERmdYNnBHWmExZDA2UGVhZVVmdnNXZ01PUFNpJTJCaFlWTHVSQjFPcktOY0RZdyUyQlhzSVo5bndHWEoxaDRnV2psbElyMUpvNzlVd24zbEtEOFZ2THFRUlNMTWQ0S2pjNzY0Vm90ZHNGbDBLb1hUVUlWOVJ5QyUyQjF2MTJUMjJOcW8lMkIxazZMZ2hQSmFVVm5zR0o2WE9xM085bUElMkZyREF0TUlTJTJCa3BRRDdqMDhFcGU1NXJZbllYV2Frd0cwTlg3dEFHQnNkaWdGT3RqZW5VM1RuJTJGbThnZ2k4V0N6c3U4Mk5EbEN2Q1Vya1hpUEYlMkJsViUyQkM0NnliOHNTVG1URjlBR3FKQXRxaGljbXVFR3hNVEhKa2NOczhzVmtLbXolMkJJTCUyRmdUVkFya2Rkcm4xV3EySzRjRHhaWnVnS3M4ZnhZS1ZTRFNoSXBCbFdieGZHbmpWemFzWTdGdGVUSXJndFR4eElKVmpLU1FqVmVWR2tJeHZWWmRUeUN0ZW02cEQ4bG5WTE85bjNaWW9ZZzNFbUxlYnhBYjdKT2oyN1JjYyUyQkd6blNicGhzemZsZjNlUUZrdzFaQ3BjSHMyUW5rcFZoc3o0UlZCbjh3d2VjakZZNkhWbEpvbTJCWlB3cGplUzlGVTBmRUZjTlN0ZXNMJTJGNFptQzVUQXhtV2lqdnI3UUxpdWdFaVRtSjdSa3ltbXNxZFgyZFVKaE5SaWlSeVJTd0Jkd3I1WjhlMmNSbjdEcE5LQUdUd3RXOU1qMUx5Vjk1alh3Z1ZzWVFBWTFqUmFuQnQ3TGpEUGgxRkgxUmtqTDBjVDh2UkVuVk0yNSUyQmVFVXVOdnJTV2hiNElyMGtCb3d5b2dmOGRSQnB4WCUyQkg2dDlTTFJVSDM5UlRBV2x6UDJsM2dnUEhNSG1FQlB4JTJCSjV6a20wcW02dUl3dUJVWE5rSXNtRFJiRFMzZDFqOVNweXB6akpvNVJBbDNNJTJCb2FsS2lTNFZCWFl3UVolMkJXQ1hXc3NsZ1ElMkJTblZjdWJjUEVsMkRMTkRESSUyRmMxMVFrdVZFaiUyQkVsbGMlMkIycUtMNDdnJTJGZGU1NVFpYXdqRW53eEVpN0w4cXZCTVdHTlI2MjRWN3FTWVR4Q1JNNFNKdDU0amZZQk81R0V2dklzMEh3ZXJoTHF6T3NsWiUyRjYxS0VjQyUyQlh2QWkxdDZaSjdTMFZEZGQ0TVNXbWhBSTl4b0lCR1FXamtHSG4lMkI5b3JsNXZheGlwNHNsQ0p3MVd1JTJCZG9ndnE3Tk41U2k1MndESzFpNlhKTmJBUVN3b2JlMVkzQmc1a3RWS3RXRTB3elNLUk80akI2R1d4bCUyRlM4VCUyRmZYS1NnVnQ4REJSUXB5d1FFR0dvUGQxV2Nla2ZMWTZ1OGZ2c3UwOU8lMkZpVjdQZ2tKN081YjVXRmhxNFdxJTJGWlFXWkR2ek1UckVWJTJGOGdEU3clMkZvNiUyQlN1QTFLcVRwQ3FuTnZzbXR0cVBDQ0Rnek40bnNRZEllOEN0Yjg4aXI2U3BZZHBoNVAlMkJKTFRSSUw0VFJhMDc5WjhsaExRNHl0MjNkcTElMkJLV0hGUmZvVTYlMkZjQkRDU2Yycm5VMzVyMFNuZ0drSUVJNVV1YXk2bk5Ya3RGb0tkRXdhcXg1eDgzeWxUemtLR3FKY29zWkZOTkVnZ1liWmtFRTAyV0hielJUTDlpblpJVHF0akZ3U2NpNkxsNkdUTmFFTjVTM0k0YmtEcjFlMWhFMUw1YVpuc1VNJTJGTGJKSno2S3VHNVNVUjFZNjllaExhNmdXTEtTMEpveWdyUGhvSSUyQjc1MnlUYURsJTJGRXpFTWdSTCUyRml0VEdLSjVyJTJCQktSQ09IODBQVjc0eTJxOVBpSldGNVJwVE1PeVlpNU5OWmxBRXFpRFd0cTdPRjFCN0Q0bFVkSGlJVk1CZ0Ntb1BkUEZNSmpBNENManlSbFpuQmlVSklqTUgyejR0WlVXM1RFY1dpU2JiSWJYaHZuNjB3TEhRWHJtZVRiQ25kM2RUVVAlMkJ6c3NpbE5VWDQ0dk9ybHNDU3RFYVpYeWw4UjdwbUNnJTJGWk9xR1llckl1alBSd2hGN2pYTTVkaVYzRk44MFRQcW5BdlI1bUhyZ21Id2hnbExxVGFzN1hLMDNvMlBLeW01aG85anRSNlhwbGJyU05aY1VrSyUyRk9mODNiRmloNUVNVFdsRk5Va0RJOWxDMkpVUEJCMndJZkxjeVg3ZURDWVolMkZKNnVjSWhSNENheFAzWU11Z09zbVhUdExLUFdaMEhlek5odDBaM043JTJGdlJ3QmQzdmpOdzZ3aVozMEVoZUdPT2dUT2NUYk9uT1ZKdEt3Z2JwTkdjJTJGMHp0WFFWWmJrUSUyQlVKZ29zeTZnSkFDVUVPaVlORDZ0d2FUclRXamhYWHYlMkJIMTlNWWhSQ1M2SXZZVzNOQkExVDA5b214MUVoOVpXUk5Gc3R6QjFnMlZkWGlSV25hOWtUZFJtSUhmS0JCV1kyJTJGR1UwdWhZYU9lSzAzR0RPbzdYUjI0TVNrbyUyRjJ5Y1BVdFRzd1h1VVI5eTQlMkJoZFdHaklTRWxTMXZHeUFyUXQ1NjkyaGFoaFdsbHhhU1UwWkwlMkJpNkh5OGtEU0ZLOXZEdHJEWm91Q0pXemNYMUtyTFMwQUxmbTE2M3NybUh2RWdwVDlPUmZiNkpFbSUyQk9NSGolMkJFYXF1WUU4V0p3WVZlMjZYb3gzMkVLaUw4JTJCRm9wVDZNRzRDSW40dVZCTFBocFpPcUx0ZFpYUG9NN2ZlMTVzWno0eSUyRlFpNzZPY0clMkJPZSUyQnduSkhKNjNQMlhtWFQzdGNHNWJlVWlqSmt3dyUyRk5FRFhLU1oxZDM2czF5aEVBY1FBeDljZGVaeXYybU5DdXM3SWp5b0I0cVNoJTJCTzQwJTJCWDlobTdJdXhYTjh3UzdZeEFzMyUyQnhRdzk2SUV0dDklMkJYZmklMkJDVTM5MDgxTTRScmEzJTJCM3hBd3JFVmhYcUZESGtXMmdCTU1QQXl6UFNyTGRwM0RpVEVCdkMlMkYyQ2w5UGlnRTlqeFRMdnpoU2o5c3NreVZkJTJGJTJCWHFCSWpqNCUyQjZwNkFIWVp2VE9iajMxJTJCUmxwd1YzUkFUemRlOGFXaUNhcmhTNlFOaTlXdFhHaGZ0Q1ZRYURuYWZGRjdwNjhDSDJGcGQwM0pTTUFBQTMlMkJJblNPN2xjJTJCMUEweGsyWXlabmdtdVlTYWo3VFJsMG5COE82alowREJ0UFFLJTJGaEpYRHBDYTEwaVJwQlE2TW95SXZFVHlFRkxVUEtYV2w1TktRelZZZnZNa09leUplYUtNanIlMkZxN1RRUTZCdjY1MVZORFB6d25kZDZ3QWh6dTE2TmZmaklKYWJ2NWFsVmMwZU1XWmxSS1FFY0x6a0poMTglMkZwQiUyRmVEYWozM3k5ZFNQbktoWjFqNmIwdVVkSG9lTSUyQlY3ZmdDMk1wbGMwU2ROM0FJY25XOVAlMkZGMkZXY2xXT09VSGYxcVh0QWpzU2hnaWhhU1NDME5mV25EOTZZaWw3dVZlZktYSVk5Q0RoNlRVRkNQS2ZnR2hHcnBEb1JQUTQzRHRwNFJObVVKdFlyR2NQV3lZSHZmWUJDZGRhaUFXZXBOem8zc3hkcmZPcXpSS082MzR5YmI1aSUyRnJjSVR3VTRtTEslMkJuOEQ1dlpkSW9IcUlqR2QxdjlHdkVCWFJyQjk3eWNyaVJ6QURzQzgwYWJBcTRDaXY0MWtSTGFEUFVOSFZTNVFMS3JYWlJoYmZGZVM4QjJYMFhhSUUlMkY3S0MwRDJiZkVVMldBcUN5MmdaQkY1d2hQcFFseFJ2d2sxbktWcHlnUlZHcUxIJTJCczBhbktEaFdCU1c1TDBzQ2t2R1NMWnB2TjVpNXM2dUY3a0pCZmh6ZkFmTkFoeVVnenA3ZzJXJTJCVEZWU1FaN3QwUU4wa083NUwlMkJUaDFJc1NBR3ElMkJibmklMkJwY01CT0JIV2N0RTJaSXlKVWl2USUyRjVKdjUzRjQlMkZCa2NUZlhReDdZNXFVQkJ4RWJMS1JhU0dEdGJCd3ZjV2xaRzlKYUJYejZ1WVlkOE5sb2RLU2V1d21RQldrc1hnMDdSamZTeTB3RDF0M3AzeUJNaThLWDZwUWVtbFd6cXNwc2RqRjNVYkRhV3NXRzZNc1Z3T3Q2d0FYN1VFdnpya1lRZEpDRmlQcU1ZSVk3MTU1VThabHA3bGg2NG01ZlpmWCUyQnFUVWxZdE1YTkp5dmJ1U0xDOWhvWWlZcWsycXVqV1BGck4lMkJBVGdjQ1UlMkJRT1c5c1dyRmc2bzQ4VzEyR1QzaVFmcnJSYVdadUthWUtUS2J3NHlQVjglMkZ4RzkyOHlTT1hCZVNkNCUyQnlaa1RSJTJCdFl3Z1hiZThRbVYyVWFvNm92d3pna0pRdnBxbFAxOVM2WU00YSUyRkxlc1ZvbFM4QjM3N1k5TVZ1JTJCcml6aVp4a21BJTJCWnpPcmNJWlJGaDAxYTIyNEZsVHFWOGdOQlRLMnZCbk1IZGRYJTJCaG56aSUyQndYRXQ4eUwwbTFJSDJtTmZHSUlOdXNxMktReVlpVVRJTU9aYUZlMiUyQmVISHZFJTJCcUZSTWtaWmllNGR1b2JLWktLZWVXaGN5dFZ4bHByanZoeTVjSVNYSnhoWWNFQ25iJTJCRGQ5cldENmNmeDRablZlcnk5OXdFRHBnSmtLZXZTQ29iUllNZmhBaiUyRkxUQmpZWHlmUUslMkZ4NEJkTDlselZlNlRhVFBrUjIxbUpiR1kzUEFGNTViWGRsU1Uyb2tFUiUyQk9ET2QwcWNTamhNaWFUU2JMUENURWRkbHVYZGd5UVROZUE5eEZzNm93UVFyd05MUHo0MmRsYjFKWUtTS3ZucnIlMkZFMEs0MmZKWjM2NjBudjBhS24lMkZkWkVaUzdzaWJreXFrUGRnemNYUFlta0RKbzE4bFlIa3hxMXolMkJCcGpQeXloOW9Da2xvbVpwc3FkVzB2JTJGYkFqUEdJNEM3ViUyQnJ2NWJQQlF1VnlSN2tmV1drZ1hTc29oVzhaJTJCZWdDY3VlU3ZHOHhtcXB1alo0U0hybFJrS1F4OGcwJTJGJTJGWFNIQ2x3OHRlQlRmaHRkWTB2cmFzbmtIVGU2WGllc1QlMkZ4SXU0b2diVFpXZmdNVWZtSU8xMEZ5NTdFdHFEYTB3eXp4NUF2MkpVMnpGOElvNkJJV0FkcmwzbVduaWVVcFZmMjBWUEI1N2RsQjI3djFmciUyQnFmNjRvNHVuN2FVYjZhT2RGS1FIbHZpcHJ6OW9BalRIV3FkNjFBMGc3d0ZZJTJGUG1nT25nWnNSU2hSeWoxVzk3SXpRZWR3VUN5NnNJdFh2WlMlMkZ0OXMxNnYlMkZGbE9GSVh5WiUyQkJ3ZEpXUnJUNzR4dzdKUDRwOU9oVWF1QXZycTZXYWhJeTBtTTJ6NHpDdUNlTnlpanIxUENONiUyQmFoUiUyQmsxY2klMkI2cEZWQWZ3ODdKMDZIRjM1TU9OWGdnN1ZnYWJSWVZkZG1IRFhKVGdNWDh4bWVDRmRuQSUyRnE3R1NNT1g4NkxGblJPT0RQOU9aS2tBbDhiUTBOOU1UWWsyTU1qOTZrNXdZNWlVYVNJM28lMkZFbDQxSlNxM2hKdlUlMkJmaGMwWGprWTRPRWpNNDRkRTlWdFVWWTF2MHhQbSUyQkFmcm1Xcmp5bGUlMkI0V2RvWHJCaCUyQnN0dVpLdk1kaGxvQTR6c3RsZklnSjg1VWFXM25ndWVybXhqbiUyRm1nWEwyQldaSDdPVW1NUTVKcEdmOHM0NDJ5SlVTTlRYZ3dIalgxTWFrUHQlMkZXS0pOS2plSDRTczFPVDRBaFBTb3BSNzZuV2liUm8yNzdmM21tMmE1S0xOUW1TUFpDMEJnUmRoUnk4TmtlTXNZclVNZWlibmg2eGdXWTZJZEk5NnFvTVd6VUlueHF3NUUxOVVXVDlJZnRvJTJCdExOWnBHVnE4RDQxVGV2RjFOeEpDU2daa3ZsTDJJaWNOaiUyQjhYbzhETjhIVTZDdEpWdFdmZ3k2RjczSG9ORU4xQktZenllZ2RGYjJGbzIxc0xYU1ZIU2R5TVB4M3R0TEJsJTJGbyUyRmM0WXh2V3g2Rm00eGZEZFJ3QVI5djlwTE1EMEc0OWN3Sm5XNUVYNFpyYUpWbjI0MGM1TDclMkJrYll2RDNHSk5yVlEwUjBSS2VYdFJYSUVtUnA1QnZPcnlVdFVmMDdUbWVsQmZieURjbVdnaVNaVkJYdGxNQmUyM21UNGZodXpUeFN2dzJidSUyRmJnQW9EbU90THZyb0laeEc2MXhROTlySzhKNzE5dHRONENyd0VqQVVqOFJzNU92em5jOXM5VTJVYktReSUyQlRoRlNoalJQSU9NeXhEM3MyV2JsQndNSzd0Umg0dVNoelIxTEVuOWpPTVRySVNaQ2RaRnhDQ1RHdkZjOEp2SmpmeFRPJTJCSSUyRmg1RThLM3JZbERZckViSXphV0lueVlLY2ZuUkZ2JTJCYjc2bldhbGlzMXZFOGJiUjVMMGNFTTNhcklzWXFXWG5mcWwzcWk2TjBDVEdkVjczU3RRVGl0MjdzZXZtQ3YyTnl2bUIyalpqdXg5bDklMkZOdDNlaWclMkZUaGtJVXJzd09iNHdTdlNQbkdLZk5mZ1lyZ2ZkT2ZtSHRXa2ZSMHV6bTE5d1VRVEYlMkZZJTJCTExNanhsY3lPZ0pLSXNZRTJMeUhoQlNiYUV5MlVpQjFGVGVnenpKd3FoYXFQTmhJY2NFdVpyTnhCOWZIelBwYjhWMUg2OTdOUW9maEFTJTJCS1RZYWdhbWpraHk3aG1xTm9VNWwxUThvSmZ5VW1IWldnYlFjayUyQjkwSkx3aTYzVEMxTSUyQkwlMkJ3OG9aYzg5WXBTZDljOEl0SjNrNkFtdnglMkY0elA2eXNPamZBRkklMkY3dEt3UXRlNzVaY201WkZIa0JtbUdzWmNMcHN3ZmVqWWcxN0pSSHBjbkl6WTV3M0NYMzRUR1lQR2dLMmYxNWFucU9MTyUyRlVCUmxxQmVNS1NLVlZDNkRwbkhQR1Y5dEJxTHc0WHFjZXdYdHJNWVpPc0VXQTlWUSUyQkoyaVFwMTAlMkJGQjZPMjhMWUN5UWF6OUpnQVhxRDVUNnVvNTZNJTJGVTc3VTdlcFZJdUFSdko5dGZ4NWJYUDVBSThndUJTMVNGR3pFc2hqR2FOS0M2ZGFVeTM2TXJnbUJyMW05VXQwMjZyc2RqcFZORTAxN3B0ZUR3JTJGZCUyQkY5ZjliJTJCNk5EQXE3bDNyZGN4bVQ0R2tGJTJCMEslMkJ0aGdRcnFUMVklMkJrJTJGTXp3UjV4b0RMMTJtb0VRa0tFQ3VOWUtiUUR4WldkcWRqTWQ4UUJEdTEzZHdna0pxYnNaaFFadmtEQ2U1RlZQeGZpViUyRkpNRXB3TSUyQnMyZnN6T0NucnVMUk5xMUFzaThlSjY4NDJhUmg0UzdhOGtjUGlUenFPUjd6ZSUyQlpGZmJXVVVuNVVOS3lIcXlSNkpWVjNjbTl4SyUyRlNlQjlQa2pFUVRwbW1tdThJTzJuenZnZkdTYW9pRjJicFREdWtIQ1JsJTJGNE5nTjhkSGglMkZjNWlpajVDNlI2WHFYTXhUWmdBTWdGb2pJdkdlciUyRmczTVY3V0ZOTk5sUEZaMzM5UnJueUFYRTc3RCUyRk12MHZKdTRTbXJtOUxvWXFyUFpmNEZGcEglMkJjNUZhcmhiMk1tRCUyRnpHUmFmWnRrQXFFJTJGJTJCR25SYWtsNmlNQTFWbFE1UkhtRzBzaWZWSmVxM25oMVpJNFNERjREWjJWdDU0OWVJWmxNOENOTnJmSnVsbXZCaThIJTJGJTJCM1RGbmpVUzFlaW5TR2loR3lqelNWQmwxVFdSRUVhZ3dYUnBnbDIyOVJLZzlmSEhoZk1vS3ZQSE9aRGQ2M24wdm9xdURrVjljZUY1YzJsaXZYbnglMkI4OVp4ekVpRnJtJTJCcndKdDhhbnZyaFltc29lMkRRWUw0WEJDek5xbmwyS2hGRXR1aEhQJTJCbUc2dm1WS0hud1p2MDhZTzRNNGlqJTJGVVNxbkxxbyUyQmZscm5SMVpaNEtoenNHNThxNmtUb0MlMkY4OFZOazg2TzFhZFJ2U3pXYUtia05YdXBSSnhOZkZTNmhqaDRJNGllekNvbUw2M0FJZDdPaHNzckJSajZ0RnR2dldnVWF6UXM3QTdTSm9Qbzhaa0lhazV2NXo0am9lVk55JTJCVDdEWmVrZHcySFE0VHZhaDc2UjY5OWRValIyNTR4eldrdGt4bExBZ21BQnJnJTJGenM5NVlMTjh2bW5xUTJxV21NZmxVOWlueTJSdVZtckdkck1VNzhJMjJLNzJpZ1E2RlhuZkJKRmhZVmFJMVB1MWg1c3ZQUzE1cmozZXFHMVV0bkxOMW5wTHRBUmZLbXY4RmJqbXl5dWNXWERna0U0WUNScGhpYjVCY2hQTnJ2MFE2cXlmVFUzYmRKMkFMZEElMkJqJTJCVFpiRGwlMkZZTjJwUTUlMkZRdDh6cERHbjJjOEhhUFVLTUk4UW1obnVqM0RlQ2tVRDBwS3FPanF4aVQlMkJ5TjQycGt5azdaMzRpUHR3M2NFN3ZLdyUyRnRwUGp0UkwzVllJMDg4enM0dEZkWThRdzlPJTJCTTFtczl1WUZGMWtuSFE5QWxld240TGdWYnAyWTdHNWhsMk5ldDhneXYzZHNSdHR4UHBDbWYyeTdFOGZyRCUyRnhtMnJHQUR1R2lnUkZQUUhpMG9OdVhJTjBFJTJGdkNwMjBWZjFWVXAybXFRYUgyZElOZlF0a1pMViUyRkxtOUUzajR1dUQlMkJkOThsbGxmMnNyUUNIaWpZZGJJQjkyNXlUeFolMkJtcFR1bGJZckxTcnEwd2N3eFklMkZaU21QVGpDWVg4YWVZajA2RFBnTFh4VzZTUjE5dTRHU3FMZ1VFa0hpSzh2Z1o1TkJnWUNMU2NTQ21PRUhMQW8wN1FkN3ZNTFJZMlJoNlJieENIdTBMRTBFRVp2RWQ5d1JPRjdhd1VFNkduWG1mZGRQNUJzQ2Z3aWk2UzBsNlAlMkJPbTVQUThSTSUyRk9nUVUyZjZYd2p0NGdWVkY2SXJjQlBtbnN4V1ZGaE5LQ3RSY3FPUmxwR2NwTnpFZE92em9yVUtzaks0RnBWSnZBTFY2U0VwOFVJbmFBQnpSREdybEY4aGR1R2pCQ1E4UHQ5Rmd5V0d2N0NCeEpIM2pHb0VQZ3QyNlBFQXRpWHNZbjB4b0t5JTJGY2UzcnVPTXVoRCUyRnVkc2d5bWFpaTdvZ0JRa0JvVE0wWDdNTHRLODJtRXJXWjZTSTdxRzduM2xEJTJCWm8weDJ0UmF1Rk5FWVhOeVAwNzNJNDd4UmlpbFNSYnVhdjROZzc2c2VvRkZzVnA3UHdGb1BaY1B3U2olMkJhSWRHdWdydUt2WU85SWdTTU5zcDJHSmpIcTEyMndsN096TG1qeDhpNUlTbTNJT2pGNXhLRnhrWVZZc0ludHZCNkRHOTZrSXFzd0FvNjgyWnN2eXlzckplcXI4dUpmNXFJZGZsQnFWWjFvdmRKRTFTN3lQNWZoNVA5MENBU0Nhekc0S3JTdGQ2M2xSRlVneURhWCUyQjlrJTJGV21kU2RzaiUyRlMxbWU2WTI5V2VRRzZDNVlqMTBwcG9xYnQwenZac0xsaEhhdmVvdGdzUjMlMkZyM3dLaVpOcDVFc3BQQjE5MUwlMkZyVVpmd0VZNXhxYkZ3WHMxbE1tZjJVTVgyUmNiOE9UV2hvRTZRN3VmUTFRYUhKYmx5cFJwN3QzcndmVmFGT2JPMTFUVyUyQnhGbkFrZFBDUk9HQXQ0SzJaVUxaaHg3alZPUjFXOWtsWUxnQnUlMkZOJTJGQTlGV1pkbW4zbGt2N2xSS215SUpkVEZxd3c0NjB5OHcyOWZDVG1NbER3eXZGOFRPUWR3Z3BLJTJGMERrMDJkdnBPbiUyQnpqbE1VR3BiMlN4am5KUUpnUHZGc2VacGx1VENLQ1U4dkdJN2xEd1NMMFUySFhET2JORVIlMkJPMnR3QU4lMkZOOGRDZGEyMzNrbUNyeUpwMWxWcllHSEM0RUl6QXJHR2V1bjBZQ0dydENkJTJGUkpPbXBCUDFkUzVONSUyRmg4N0VPcDJqcFBVcngyRXlWRmVSWGhZVnNQREhLaHNRQkpiJTJCSkxXdjJXak5ORjglMkJOY3NmS0dYVVB5RGRNcnduUlZBSmFxTUV5c1N1TGQ3cHZnVUpPZ01pRiUyRm9DREpxclkzeGpqbiUyQmtKRDJRJTJCJTJGJTJCTkElMkJSNjFoeUpUcHl5eHNyRGtPVVBBdjNBTTNibHdnb3ZIeVYlMkY0bGJhMXFYUDBDVE9wMXhSQUJoa214WW9SV0p0SFpyMVlJbUhtbUJRREV2a1ZwbWlid202aElyNmZ0dkJGbkV1VmlTVk8yTDFoellBQXRMSmpTbHNtN1U5cmZaOWYlMkZzakllTWNmaUw4V1JscEMlMkZ6V29lTEk1MzJmcmRKVkc5WkFJT1d3MFJYUnp5c044RzdkNzhrJTJGOXlkOVB4RFV0WjFTQ2kwS1RpM3o5Q3VrN0cxcFRnS0prTHpBR0tXcnI3dzZSTiUyQlBUblFmczhteUVHNCUyQkNKaHZGN2pFNVI3bEdGeUwlMkJLdFVLbkRuOGltaUJqS0VsVDJKZjBYNnolMkJ2cWdHJTJCbzlmM082MXk4MkMlMkYwWSUyQjA4Y3lXQ2lCNWdzZUQzNGJLenVhOWlzWTZvZEdqNlR1WXF5QmJYNU95bUN4dGllcjdYME93VFJQMUFWTXVyaWJaWXNSVVJwNkduNnhIYzZhSUtGMXpuNlBMOWxUdlRyRGl2bUNTaTAzZEEyVThWZVgzc2RuM2lmaUE1NUNodnpOOWRlenM3VHhTOCUyRnhiTU5pSTdzMVpYMmolMkZtN0QxS05XQ0xmJTJCNGRoWDNEUDdFcVNXTW05JTJCdUR0R2NUTTlQNWNsT0gzcjhGZUx0bSUyQjBGcUVqYkNGTzZ4ckdlQVBzTm4zbWJkbWl6dkVYcmRkQmhSZTJ1cXo5bTE5SnJjblVpM2ZIOUdVUm5XVVFpNWNHaW8zTFhqaEVvS21abGk0SHFwWGF5NVk5MHoweHJoMmpvVm45S0REMm02OGZVM0t2WmdBaGNlQk9FWHclMkJLMHV4MGVIJTJGYm1iQWxuYjJ3bVBYNzVZQ0JqeE5rRGFzcDU2c25JOVclMkZFMXVSeWQ2a3FQV2VyTFBiUVlDZUJiOXVGVVkzOWlMM1QwRE9uVFl5R001ZTZSUnpLaUlVZ0lFZ2N3T2dETXVmUFhBSjdDcE9GdTBFb1huSSUyRmkxT09nMmFZJTJCVVhMWiUyQklYeno5JTJCa3Jmek42MGt3TFhLT0FkWFBXbnolMkYySyUyRkJobHVHb1NsWnRmVk1tdyUyRlIlMkJsa013dXdoRE8xYzRXdWtlJTJGeXhMV3lSQ2xOU0FsTCUyQkRQRElZd003VjUyMzVJJTJGYmRFbU52clR3VFNQanRjemhLWTVVcEFhUTlqcXlIb0o4RTR0QXVKRHolMkJScW9aQ2wzTHBybXlsTHJRSEMlMkZQODM1ZTFsSTVWdG1jN1hIOUxvMDJMdkF0SGRpZjhZZiUyRk9qaWdqWHhKSHNnMDFMNWV2JTJGQUklMkJaQnVIMkJyS2c4d29tc1dSemJSJTJGSyUyRjJ1bWQlMkY1Y3RZZkRoRiUyQlc0Y29idjZPMVBzdHZWdTdIYldWQjBsZEw3QldvT0FUeDJHYVh0azloZlFZJTJGTUJyTlFVV2RCbXJQb1hFdHpWbVB4ZlY0bnNGUkhWMUJtM2dPa3JrMWUwTVZOQ2xqaEZVM1JoTDBkU0M2ZzhiMmFwNjNHZ0hnSiUyRk1lZ2FjN1pJbjhTOGxldDNsYUklMkJ6aVRZeGZrM2Z0SnJ3WTA1MCUyRm92OFdRY2VXUnZpRWwlMkZRc3JjWnQyd3RCaUE0VlhVU2NWSkFwZCUyRlBhUjNFbnFmelN6UnQyYmF5S1F5YVpjUWtma0NzWmMzJTJGeVQyUzRHd3hkJTJGOVhHTTNjVThNQ3glMkJEUmZZQ3U3ZzV6R0g4bXpJMVJpSkxoRklKQkhkOEFRd2QlMkY1bWUlMkZxYVJ0M3YlMkJ6SHM4JTJCeTcyNyUyQnF2NzFhQzI2UzlEcEx0VFVvejh4ekZYbWhQcGlRWWVTaFoxdEtDbnNkM1g2Z1lrUHVOcTZzNzJDbE9wUFRYNWp5T1NuUiUyQkNjWnZWb3czV2IlMkZkYXd1Rm1YZmN1aFVZWG1TVDYwcWZTMSUyQm9pT3R0YjJMSlBKTldKUDZhJTJCOXNSemxibUFYeG5ZMkdZMVdnS1FXSlBvazhVRWZGWWxCYWdsRWk0aWZDZjdVUlhobEM3RkF2UnVLUTBlTVVWaThINzdlNkNiayUyRndNWHZLViUyQnVxSnM2RTF1YXIxSGlPR2RpRGtyajYlMkIweUh3UzBWN0hlSWtNUGJWJTJCdkx0V0R3Vm10NUp5WWNEWDFWWkJOR3ZqeXN2TkFHejk5MFRUN1lDc2RzJTJGZ291VHVkTXlKMCUyQiUyRmlIMUdHZmZSb2tROHU3YUxaNUU2SDZIZ1h5Y2JUd0tmSGJNMVpORXclMkZDQkE0dWZiTXRFTSUyRmlTMEZhNHdaTG1MTG0zb2xDcXFKckVmNHFCMiUyRlE2UWh1c24zVHJRbXNGanRJaFNMVHhrQkxWY0x3ZENtVWJVRkp2MGVWY3RGZGl2M1NGZGw4MFJxR3BTa3RmdWxvRUYlMkZYTm5FQTdnZ1hRcDlnNmFIMGlmTTRKY0NRJTJGR0VHaWZURThGV3pQVFdUc0ZhZ3Y3dzJlTUw4b09uNDFQdkppaW9JYWpMeXRIMjZxWDdmRlZFVjJIZ1RaUFA5bGZDRFJtS29QZkNvSEhQUlB1eHZSOTdKNEQ2QUZ4Y2FaWHhMcHh3OHJ6ZFlhd0trTW9jc25PMlFuYWNJJTJGNUQlMkYlMkZtMHp1Sk1qT25FUkhDdTJYOExkSXRMQUlIQWJsSG1nVVZQRGdzJTJGRXJvSnNSaWxiWWE1SFVZVXB4ODVPd3FtS0xDOWc2RnU1QjIlMkJCZGswNkNEJTJGYk5GVDZPS3ZMZGdtTmNlSVZRbE1kJTJCNmZ2VE1oV0VFNWtSSiUyQmF6TWhSdXAzcGNobUhtN2dUZkxlVmZLcWFjVldtTFhFamMwWEMlMkJleFNwS3l4M0wlMkJ2SlJRanc5ZlpseG5wQ0hlV3NOd0V6V1g4ODVVcTBYd3Y0VXlZTzZaNFFPWTE5T2NFdm16R2xxSlZaUkRjZDRtbyUyRnhMd3dQYVA1cW9ISEFYOGRWcjZqcG9VNG5QdGRndk5xRkN6VndRYWclMkJDQ1B2MzVPSiUyRkEyeWQxOGVpeVRxTXolMkZCdEclMkJsRGJJdzlISkVwbmp2c3UlMkI2SDM3Y1RzVmJjOXhrR1JEWXdVWTdkZXdQMnhNeTk2TktGaUVFbUM1MzNhJTJCcGU2ciUyRmppclZza2F4Zkk2JTJCM01rcHUxYWZrY3VuZk1WR2lMRFhNZzZXNDZkcmdFQVpuTktta01mTHlEdU9Ga2dKYVlrZzZ2JTJGWSUyQmtxc2lVM2d1Q1Z4TEFVYzR0YnNHc3hNNSUyRmVxajllako4OWIlMkZ5bnV5b3JNeUtTRUZ1OG1ORjl2czFvMTVoRVhEaWF6OER4NUt3cVlZU1dmS3JqSyUyRnFub1BFV25RUGZ5eFpTem5vOHZ6UjRXMiUyQm01Rm5vV1Y4OHJPQlZoWGFDc3l2ZXd4SzV2MkZ3bHY1d20xS09DOHlyQnNKVGtQZ3JUT3hmNXc2OGUlMkJ4Z0swQjFMNzdOVnY2TkgzVlR3RUg3cmp0ajl3Q0s4NVFDJTJCSnU0ZSUyRkxmcDZLRHR1eHJzelh3ZFEyM2thQjFSTlpBZFZoSUFKJTJCZ2lZczMlMkJ5dXY3RGw3WEtKNyUyRlA2NkFSelF5WjZua2NUcWdvZllmQzFVckFOTU04dlp0SFVWSDhPUjFtNjB2englMkI0Zks0aG5vWEJWR2dzMFhWMFZDWHQzd3RSSTB1NUo4c043SCUyQnN2UEMlMkZaV2k4TGsyM2x0RlhBNllCJTJCZUVROSUyQnNsZlFNU1ZNeGclMkZhelk5d1N3JTJGNjZkTXRLSEZ3UExZWGR3WFM3UVlGSW53WjM4OGNXSmJ0VzE0NjllU2RTS1oyd3dZeHdMbGcwSXRjTHFPbVJrWFljdkR0b3ZocXZMd3JMNjBGUFl3WlBQbGt6OXFuQVRBOUNJa3lZUWhWcTZnejVXcnNNZnBFVFg3djlsRWNkJTJGUGd4c2g1TUFVS0hVcTVUWktkWW5zUUpjdHdoJTJGZ094bU1RcTVmckFPJTJGc2pmaTlMUTFXMGJwUGduQkdibFZ4d0J6SXhQVmd5NnZSUllYQXk3RXZVdk9CYjlaUDE5NlZEUG82NzZtODlvam1DSCUyQmZ4V2VMdDRGYmtnd0lJY0Q3SkZRSkpLOTFwaUNYcXNpd2wyUGhtY3YlMkJGdmJsR2NkaGQ1NjcxRTNPJTJCS3RVRnRLTmxDbndZamhkSXVVTDc4V0hlcjJIY2FWbko0MnYlMkJoQ25QZyUyRjRoYmxWWVQ4UlpjJTJGdGclMkJwQ2hNZU1rUnZoa2JJbER1ayUyRjB6V0VsJTJCTEJxQlV5amtUV3hhN0YlMkJWZWlsaE9KeVpxNkU5NFZKMTJHSGYlMkZuN3VXdCUyQlolMkJXZDVnNmRTNDdpUmglMkZCb2VwaDNsRXglMkJPVyUyRnJIMURlclZHUGRnNExYTFJCUlNxOXBNMHRJbzlaUlNGUUl3MHlRVFJOampLM1lMWWxLdjVPYm8zckZjT3hGU2FNOEFpT3lyeWdxaXQyeGZFeG1nUXY2N3FDNGYzSnFvRCUyQnBkMHRRNWFIWlUxVjg4M2hDaDNMbCUyQmZGY3B5SjglMkIzUzk1SjR0TSUyRjB2TXA3MThFaGd4ZUI3aDlraVFQSjBjeXB3V3FEVlpmZiUyQnNpTElIbkc2a284MkFQaiUyRkM5NUZYY3ZtVlFrTFJRbFdLdk1tRFhrSGlTZTdHS2E2d25JZDBVTWVoa1dRY2FxbVpMM1dkQ3NkZXlDUDcyTmM1UmdHQmt6UGFxJTJCbTJmTml4ek5STHJkQlJsRTRVZEd0dzg0QzVtU213Um5Bb0pTM0h0cDlJVjR1WGE3aExiQ1V3ZjhNU1Y0NjJ1cGFKeW9UJTJCZTV3ODVKUTM2VEFOc3p6NjNsakN4WGpObW1kNyUyQjFFb2xkTVRoZzFvSERpV2tzQlhtMXJic1NnZGxkY0NaM0ExN2RvaTV2R2lWaGY1RzJCbWU3ejNaUHQ3bXZsZG8xQmhiUTFrQllhJTJCNjAwSXI1ck9WU0NSTUlRUiUyRmRTU0FkaWhrbFg5bnZOVXVVM3d5WkVWSnRGeFNFcHpNRyUyQlYxcXlLUmlXdkszbTZRMnpuRGNseHQ2RWZhbDJNTFRmMUlxdkNHWjVLTWZncXVRb2c1MWlqN3Q5QUwlMkZUVFN5JTJCNVJueEFwa1pqcURuUU42UjNRb3lPZWd2eXQlMkZMMDBYeG9XJTJCWDBNbWtES1FoejRzWFJCR1hYZXRFYSUyRjBSdlBlR1hVOXBJNjBRT29ieElhSDhwRlVSdTlxRUJUcGF2NiUyQjAwWHUzM1VsS0NMc3k3OEx4Tm1XRFdYTERUWHVtcm5EQ3RiUXdiJTJGOVNuQ3FuOGtHaHlqdWxObnpVb2IwazhWTTBYOGhmYmlMMTNKWUIyN1VCcmNlNFh2cUxjTnQlMkJyNUJoc0I5UjFuRCUyQm5UNFVwdkZyJTJCUGVVaCUyQjZHSHg5RGRSQThrNkNQWE1wemFZUm4lMkJHNzVRQUJvVG1FSjgxRVpOWE1tUkx1ak84Um1GbmtxYnZkeU5mJTJGelpKNHVDWiUyRnh5aVMlMkZQQlBwUURlZWtJTGslMkJxUUpnejhUUTJCUFVWZ2ZJWjZGdDdtVnE0cFMxYXc5NlNBM0hjb2VjRE8lMkZLQVdnYkluN3kxaGNwSE52YVU0OUFHNmFjU3gwMGhyVnZYa1luUDc4ZVVkZ3NpTWlkaXV5c1pwdUJGbndHSXFhSHVGMmN5eEgzZ0pwUkJ2SEMxJTJCUEcyR2dpUk9PdzA4eEt6eTJKYTFnQ3FZdkFFaXNYMjVHSXZOc0JJWjZPUGpjbk81WHVYbnZqTUtyQlRqJTJGanpkJTJCS1FKRVN3bGFSejMwanlNcWFybk1reUVqOTlFWlhubHlRdDFsYXB5cVJFYlJocTZqZiUyQmJqTm1iSjRPbXBUczNGRHI3Z0IlMkY1UFZPbkNsVVJkZHRndmIxcVBFUkJhTXM4VkRqbENaJTJGMGt2enFNaFBqdXl1WnNHUUxaR3lZQloxdG11b01jMjlobENoWnNSTFNYQmklMkJCZXFNTjBqRFI5RzZ6aDZYaUs0ZlhaQURJNVlzcVlSUjR6TzZOWWZSJTJGOXR1czN0WXRuRnJ2dGJGa24lMkZqUjA3UEJBUDRZRW0lMkZybzRpcHNFUk9yJTJGaFdjYjRaOGY2ZXV2cFh5Z2drUFQ0VzFkZXlYckptNnBHRXMlMkJ3WWxMdnpNMnRaMmFrdzA2QmgwbkdWTGhRYWFMemhLYXlTalJTZng2R3phSkdSNE8xSzVSWTdqTkU1d20xS1RxdVJYb0JveFBYMkpnJTJGU0YlMkZKb00yUjd0RkNuZiUyQiUyQkJadlRVVHhZQmhLV2hEa1k1eDdOM3paRXJ3Sjc2ZXV3dURDR0poVEhycSUyRmNzcERHMWlKbzQzUUI5RGVUUVEyQkE0Q0ZIS3hiMVJ1SzFiR1B1NnR4NDl3JTJGaXglMkZCWHJoN0g4NTVTNTVZTHAxRmFXZGQ2OWZ2YSUyQiUyQlVJZjMyOVVQNGg1WkZhN0JQbkpwNDk4eUE1JTJCaDklMkZnWHh4dXkzOThuc1RSMlZWOVF6Z2FtSmVwVlVKaDUzbTIlMkZCUlhWS0VhVHp3MERtSW5XZCUyRkhRSnhJZkNBa2IydERCMjFmJTJGcDB1S29JMUY3cHl6RTlqakJrU1B4bUNOVG8xc1pKNW84Qjg5TzAxczVLa1BIa2xnJTJGZnhydEdhUzA3ZmFkZmx0M3hKWDBNRVFHYjU5MTJ6N0NjcyUyQiUyQnNLeXBGQlJpZG9mcTVRQzJhcTRJRFdOOWdmUUglMkJjSWE3S2hmUlhTcld2eXZ1JTJCT0pDODVEWmVKNHZucE1ZdEdaWGUyYmRDa2JuUzhleW52UyUyQlM1S0o4Q1lxTExXb1hpWVJRVzhFSEZRNkg3RHZDT2kxWlNhZ2dHVkN0WFdaUGdCJTJGWHhQbnVxUllGbzdPaHBEcU9XdFQ3UmJsQVpRZlpVZXFDVlRJJTJCOUxQZGlqRThlbXR3MDBLMUROaWNCelBLTnZEQVFNc1BudWV2aVMlMkZPbnIybnFybnpWTnBGOXJvRFYlMkZRVTEyZFAlMkZMNDBza21PZFJWNlZRZ05Sb0tMRnBCbDRHamQ5Nkg1N1A2OGE5VWJqWDZNcE93RnhZS0NIYUkzYTBQNlVIWHNSeGhvbTRoeGdNSEUlMkJ2Q045SkElMkZKZWRka1h5UnhmMmJSZHVOaUx6Wno0SzklMkZQWllpMGVVRkxSVTM4UDRYRWZWQWpsbzBucEc5MEtEeHVMNCUyRmZHRk5BZmt4WkQ0NCUyQm0yVFlkJTJCTFFZcFd2SWo5d2VTY3NSaVpvdWJzQ1hkRktOVFRRJTJGZU50TUpSMk1OQWVaRWU3SWJQTjElMkJkS1lnZnh3aU1UJTJGVDVXejBnZnFQRzlSR211Vm5wU20xV0dHbFVpbVJjMEY4dnZqSG5rZTF0JTJGZmllclNJM1Q3NGMyMHZoRFZzTUwzbmN5Q0lHNEU1Q3BQa09qSW94MzJ4QjA3QzltbjFOODVQNHd3MUlibiUyQlE0b0d6USUyRmpOc24xeUhvdU1ncERMbkFsTlFpaW9uazZsUklRYnJ5TWRNZEhlMDJ6WTBseE5VNGxLaThYbHBlN3YlMkZtcjRCeEl3WUZmWTYwVm1DV2NDUnBUSkslMkJoeWc5RnQ0UDZsUXdJcnNQRHNhOW13aHV2NnpZdnIzaDRqJTJCU0wzeENPbjVmczFlbE1rbEdvY3NUNkFrSUNWUnU1UzFGVzFEWE9EaU5nJTJGZHdnbEIlMkZkcnY1ek1TRXJJYVdrWU4yMUYyaWpUZlZIMlowQVd5RyUyRmZGJTJGU3lSd3Q1TkJVeHY3RSUyQjhIdjhQc0t2MTFSZXRsVGdYWDlYWHYxMUE3Y3BrRTMlMkJ2aDA3JTJGTDQ2RTlrNTdHdG8xSG85bUNuNU84N0FEOTJlMyUyRmdMNnZvQ0NoSVhHb1lRZjR3dCUyQnNacXNJYlN3UjdiQThycWlFejNoNE9QVmlBVllqUDRKMzVjUTJEb1ZXVUNweUI2bDNQV0k4Q3hobnZ6UE54JTJGRXFyeElqRFBNUHlvc2tYT2VrcHVIM24wUm1MMWozdHhKeTl0ZHJXJTJCQiUyRnUlMkJST1dZNzE4VmpiTUFwaHpOdG1rSndjbXVOJTJGcmpVZ2xrbXNWb3lKSzc5a0NtaVhDJTJGRGUxOTdNZVIlMkZHYUJzVlZIdTklMkZzVUtYNFM0UFJkR05xWFFZVGdXJTJCeHNjM2I0eHA3UWdaUFdzdkxkRW5hbThQaGw5UUhVM1glMkJsdm9KZk1xWXd1Z0hnemtSd2cxYURZTjFIV0w5cEtNOEh3UjZpWTBiREpyMElaemhDJTJGYWVqa2ozMkI2OVB1TXBCVDBCRVZSb1RaTDNOeXY4TmlEbUs3d0FsJTJCdjZ2U24wSmhqdCUyRktOeHRaN1BZTzRsMjhGQnRPcHpIUHNmbndmbktPR0pYdUZYZkplQ0pqeVZuN3pmcWZXRnclMkZvc1VVWVpUM0ZlSGlvZFFZcGYwM2ZLSGNQJTJGeXdMTjFlZDZQYUgwJTJCZUVaTSUyQiUyQnE3S3pqbWdrUjg1bWEwb3NnVDFPdFVYZEx4M1BmU092VWFkUzA5Ynd5eXZQYmlNbzJuckJjNnhLaUdqVCUyRm5mcW9jQjhacUpqd0hhT3NKWjFhYUwyQnJaSzUza25TWEI4S04lMkZQV1gxVUdGMHJpUk1CdTVKYVRmMTNjNzFlZ2FCTUUyWDkxdGZSQ2JWVWtDdXVEckIlMkZ3TzZJb2lWRyUyRnJnVUdQYnFwN3RXSDVtdXVqMzlkZyUyQnphMzh5dllDSE9qMyUyRko1Rk93SGt6aWt6Z2ZHYURFS3NTTUlqTTRmd05RYnhOVUsxWnIlMkI2eDlKaHV3RmszREVZY1RhN0doJTJCZnAzS1MwY3FDemdIdFZIVVJyRGdkWEl4dkc2WExybWE3MkxWSWpLdiUyRjNuWGdHV2tTaEtURTlkbVZXUGE1UmljMGo2UE1RalhxM3J3SjRCWmlNNDclMkZnc0lzbmZ5eUN3JTJGdXFxNFp1UUhEOCUyQlgxd2x4QzNUbDlUbWNWb1JxQzU4SkNLMFRsJTJCd3QlMkZIanI0aThJMThsWmN2Rk1zbVZQeUxUY3VWYU1ydVI1YTZDJTJGQ2pGRkVibDclMkYzN0JWZGpMbVdSMjclMkZNWWhpS0NkNVBGNTNSNzhOek5jZVolMkZ0b3RZeHBuS2p2VHZtbURPZGQ0U3dHN0Z4U3U5QlFWck5sZWZRMzVNYmVaVzNod1VKM1ZrOHhKTkdLU2wwSjQ4TGUlMkJNamNzZWQ5ZHN1cXhiNlRvOFl2JTJCUFo1ek1OaVIzMVYzUXJDR09IbVRVMGlRQlhwMHlhVVpLZDAlMkZDT2RrWTV0QnQ0NTdzTThiaXpnMTNiSFZJalY3dHlJVDJZdFV3em8lMkJ1M1lkSlRJd280N0RWTlFERSUyQkZZVFNaMW9yWmx6R3JpaWtvMmVWMTF2MWMlMkZZbSUyRnZNOVFUemEwMFQ5WGJVZEJtS0ZKWHNwbTk1RmlHZDVvMHIlMkYwanJmekhyTURNemVrTEJKZlVERVA0QzhUVTBBUEJpcHlPS3J1JTJGWSUyRm9UVzlVd1NiVEdtMUdBJTJGb3B6QjBZdWFZZkZ5SGF5VXB0VUt1eXZCaVNxTFFRc3huR3RuUFVXTHpkUlVNTENEQlltTkpnN0QyUGtsZGY1RWtwNzAwZEtFNVBCMngwQ0hvSU9WVzNZdDNlbmRKVVZvVG83ZE04UUFMT21hR2p0ZTdsZkdVM0E1RUszRGdpcDJIZXZIcUtYVElQMnJJb3dBWkJYTWIlMkZ5emNyb2xZWnppN3JiYlJoV0U5bG9wZzFIRjZoZjdNSHJ2WGJsdEUlMkIlMkZBazJJcmRLRWNUbmglMkJkQUgxUFkxR3dYT2dKcEpSZE5mSWNkNkdjSmtOMmY0Y05UbERvamV0cWtzeVNwazJ3c3NlV2IzRUNDTW1ocGpuMENDRnNjV0ZtYkdkRDFlVVhMdUs1djZSTnhaZmRCMEJQMkV6JTJGWnB6dldIJTJGVFVBbDFrdGsxbWNBWXF1U1JaRFhjT1hhUlVLTDFHaWhRWTZZZnBGcGpHazFFbXNiMyUyQmVwTCUyQmdTdmx0amRnclRBSjFxdnd0SndGQUYyYXoyVm81alZVcGxlMzN2TFJnMERlSDdya3cxaGdCVjdvNGQ5MGF0a09pdWJ4VVZ0dDdLTWVaJTJCY0FZSzBKbkw1VHdwNjB2VyUyQmFGNHRrajR3YjdCNiUyRnFpd3RvUlFROENhMUpzJTJCUzlrZjJHNTdMNXVwU2h2ZmdtN1h4Tm1jJTJCelZPN2FTYTBmbnZhMEVCcmFBc0klMkZvMElVWmlBRVhua092T3MzZVZPT3JMdGclMkIwczRjQ3hGZiUyQmUwdzZQMDdhZ3JXVkk1JTJCMzBhM2x2WXRGSiUyRjVtVGpYOWlBY0UyV3hod0ZSJTJCVmVLWERHVzBvaUFOdWxqYjFMTnRmWjE5Z3clMkZJNkR0VVFzSXlxNHdVU1AzZXREemYwRVJxc3E0YXZZWGJYQTQ1Um12S2glMkZ3Vm41Q281dyUyQmNwdnAlMkZQYiUyQkJ0eElNTng3d0w5Q2tSNGR1NnBLN25UNE4yMXNJdVdrbFAxZ09uc1FBSiUyRm4zSDQlMkJ2RWxyRThvR0NDeTNnanNoJTJCWHhEOE9CZlpQSm5oR2RyQlFieW1vYjBZYW9veU91ZnYwNmRBRXI5RVJFYjF3WnFUbzdOJTJCOFJXeTRYWlVCMzdSbXFqQjB0QUNRTzlEcTY1dVpBaXRLWEhDelhib0J6TnZMbnVpbllMaE0lMkZGc040eVlacG5uY0g3Z2YlMkY2QWtoVk16SVBWSTc0b3VNJTJGdEVKbnhXZ2FBWEpqTlQlMkJXNlpPN3B1U3FIckIlMkZkcnpmdXIyNWNLQjZvdHdsSTA4cWtpZ1Fzdmw5aHY1MjZZZ2ZDZzRycU1nUlRDUVNEWjQzbHclMkJkTkJORlBPYXF4TE1Dd1VnYlZwOG5hSCUyRmNGR2x3YXJnSHJROUdlWDVJaW15anNjdjhhSHVRMVhkWFlJM1IyZ0xiSFNjZXBqMDFrTFZlUjElMkJvWXVlWEhhSFA4NXdFcndXT1ZqcFhBbnphSHJhcWZwaiUyQjdJQWlKcmRXdnZmMkFCaWszeFlydlFBTzJSblVvSmxvJTJCeUlia1hVaEVmRHRpMEN4M3dHS0FQJTJCTXhWbWNlJTJCeFZXVnVMbiUyQlEzVnRXME9NSXBKanIlMkJ6ZGtja1hUYW1mYnNBUmIxRiUyQjBrOGhaWXdBdkUlMkJmR0pUdXVpeEtjZFFaeDNEaVI4d2JQSTNnazJMTzBYS255aXpaSXN1MmVFV0x3aUY3S1FWYyUyRjVmc0VDTGJXUGR5THlCWTNvVHBrZ0dPNUZPYWE1UEdReTBDZUxPJTJGanVsR2MybnA1Q2xYZHVtZ05SMGNIYXFSQ3ZIdmM0UWxDbkRidnVYQng3ayUyRnFVTnN2eUFzd3FZdyUyRms1MGVEblpuV1VVNTk2Y0thbFEwSGVHb0lvWmhTa2JvQlB2clNtSlFNWDZUNWpRYWtlWHdCbDZ2S2FCJTJGeUFIa1NNTmdxRiUyQmJBMlR4NEFDb1QlMkZRJTJCU1JOaGNyOTRsSjhMZEc2NHk1bXpFOXdTMmp5VFJGcE8wNVJNTHh1bm5XR0RyODM3TjU2czJYb2RPbWFPR3VFSlh3VFRnNE9icklZN1VEZ3UlMkY1JTJGTXVVdWVXZ09FVEJYUW5uMzdpakY0ZmRkJTJGdG01TVlVdjBYOWMxJTJGMWExTlFvYWFmYUpscmRuNjFoTVY2JTJGTkFKU3IlMkJ0RERNbFJGZGlmTVhON0RZd1J2VmV6N1pkM2VwaE5odnN1ZE9QZmhSejdCbUZGNjE1U1pxRSUyRjVtckFHTVFsUiUyQng3WlpHcWVBVllOJTJGRTMxR1Z4M1FmMngzUFFKazAzdzlSVWRic05HJTJGN0FlNHhqRDhLQWpxaVRNJTJCRG1sZ1MzM2FCd05wWUd3SEZucXYzbjBRZzU5TXp1VmdtMUpkRFpyJTJGVE9NV1QyT0dqWWY5aFhrYXhJNkZxQjJuQ0ZVUFFJTzdFRmtBVlpsZ2Y3TkklMkJibG5pWGVCdmo0QW1Qa2Jsc3VNcXBSTXNkdDJUUld2U1hnWldUZEh2eTlPYWhHbEQ4NkFVdWdhOHlyZ3NKcDZqJTJCdERCdnozNWdwME1zbE5JRTUwcyUyQlpyM0w3aExlekEya0thV01qb1pES21uQUFncUg0WnRxeUxWcFJQc1Y5MTBJQnRTOFl1JTJGWUxtbyUyQk9UTUh5U01HMlZrNzExJTJCR0ZmTWVGWCUyRmRkQjcyVkVzTW1oMHhLRDMxWGdMa3RvSSUyRjdHNHFCTWdzJTJCTGhGbGExU2FQS2dGeHloR3hXQ1lFaHAzM2gwRyUyRlluMjVOJTJGMCUyRklkODhMQXVkeThzaVlKeFA5bzV3Q0xTOUJEJTJCTkxIZ3VuUjZsYVdtZU1wWVRmdFF5NkpCZW9HYkZSJTJGclFXJTJGbCUyRmhiVEVUTUxjR0RJTlh2UGJUN09mWDhPdjA0TndSWkh5cDhXWlJzY2VFcmxWcUxjTTVrYUpJTjRodk5lQnRpTlp3TG1lc1BhVUloeE8wRGc5SlQ0QThyU3RWUjNOVmtRJTJGJTJCcEh3OWFQaWZvelVyTVZLTHBvT3hTaFBQSktUQjZWYmM0ZWVRR0hQaGZNTHg3d1NHUmZ4MiUyRjhnclVQOUZJeFVGSXlJSWE0SDBob0slMkZPMGJCeHZJMyUyQlBUZ2xOJTJGcms0dXhrTExmdlZyMyUyRmJLUHo1YXFyU1lLVU8yY0tlRmlyWjFwQ2JYZHdiejJwUFYlMkJtd2lSUjVEZnNOQmpOeVhaWCUyRjFCV24lMkZaS2tHUUZlRkglMkJhUDdUY3hsN3N2TlhYRGtEb0NYb2d2enNTNk1pVjBVc2wlMkJidUlCb1RnJTJGUzQlMkI2JTJGZUpxTHh0SzBXVm1ESDdNQ2NhUlVyZWFxMVZTY0NubnlBSm5VNWVVOGV3MnNrZ0pXSnFHektxRENITHhDTHlGaUQwaFNTZkhaY3JGUHg1NTRlNmVrMHpxUkJvMzFJSXNIUzExaFhqNnRhdzExVUV3bWRVeFVWWkZuZmx0cUF0d0k1cVptTnJ6QVpMUEVUU2M2V3pJakZMNGpwYVlkTyUyRjBBNENlOGglMkZ3eXRhUjBYbEhqUjZ2RzR6RWpzaHZNZzNOU0FPR0RRV3czSlNoZkVlWCUyRnE4bU5QWHdRVGs0RlpsdEpFMktkRlF5Nkl6ZzBxRllINnhjRzBIM1phS3FNalVNTlkzJTJGJTJGT1UxTG9tSmFPdTVkREdDMXViY0dhbzRsZlRZMGxHSUdQd20xb1NHOGlmR013NEc3NCUyRkptNmFhVzlVZkNaM1NTSlBwNjZsd3BFUVRuZmVGM2Q1aWNZRWpKa0xFQlZaenBldTB5dlJQbjk5OHlkQzVOakk4Z2xCdXY2Vk15JTJCYU1JRDVVWjdwV1dUUVJRNm40VERYSFJaYk5jN2tycEd4MFNOV1U5ajJaRFpwamxNVkdEZ0JNY2lueUYxQ2VtNzlNNkYlMkJpZzBSeUZiazZmV1JMbXpqN3BCcEtFMVNONnozOTV6QW5XU3RUdVdrOXJ6aU1OTmoxN0xwZDJKeWhnaE10MlRoQUU4cURTekNCVUFaeXc0VVB6OHclMkZUNWFDZTlBdTBFJTJCVldRVUQ2SmJFbmRBNGk2V2ZsQkNTaGdTaEJCVEFiZmxGd01IYnlyJTJCeTZoR3E1NDBvVlEyVWx0aUVhNGlaZWFQd1ZtUHhPQzZQNDRKc3pQaFNlY1g2S1Y1c05KMkdVTm5iTkxONiUyQkRvN2FOYjhMTkxpS3NXUlZxOHJaZmNJVkw0QnREQnF0MGs3VUc2MXQlMkZZcXJaUk95TGM1UmpSSFBxa1BYNVZDUnUwUVU0QXl5ZEtIZjk5czFFd0UlMkZoNHZCZFpWQkpUZm5KYXNYVEFsYk8xZGFuVEcwVmVWMkVVUm5ucGRjVzluWHp4Wnh1bUpTNU5ya1l1SWNsbWFpRW9yMGE2UmY3YndRd3FoMzZTcnJobUdmNHRrYUI3TWVMWGxaS3ZzZGV1UHBBYmZYUkhiOEJmWmdlcFkzMzRCU3BYdVcwTXV2TmRMMVA2enNoU2tIaElxJTJGNFg1d1lqd3UlMkYlMkJ5YXdJVEk4UVJDMGNFMEV5T2k2M3N6NXF5eVFxTHBzY3BFZEl0ZDVGUWxJNU9aRmtZbzAlMkJkU1NlcU85djROTUQ3VW5yMDdiR09EWklMZ2Z4eGZVQ2xTbTZHYmc2eEhMbG5DaTBhbkcwb2RXZlNDUWRKVSUyQjJqaU1JdEQ3JTJGa2l6OEtNbE9GJTJCUmJyc0x6elBQZXhMdWFaVnlHY3VUSjdRN015bFNSc09zbHE0Tk1IOHZOaXBsek13TzNybGxiQWp1b0d4QXVPOUhhZkFWZzJ2TWEyRENPT2R1SiUyQmxBajdPOE5NdWVFVVVpd0clMkZ0TFBMelhVdFVqJTJCck5RYm5mSE4yQnpab3NvTzVLNW9EYU9DZzJLZUZZdGVUMHR5SlFDJTJGOUVMJTJCamwlMkY3S2h6RkJSYnBsWFVqVmJkUGlDOGhRUFBVM2VrZXk4Q202UGhVQlRCYUF3ZTNqWjYyJTJCYjhieEkwYTFjRTZFUEhRQXlDMmduN3dwV3ZCMEFhQnZWSzdHWW43QTNwZ3J1UG5IZU95d0R0NXJod1JVaTElMkZqV2tqNDlaZ25JUmpKaVJnaFpGTDlrUUF3a0Uwd0IyazMwVXV5TlVTT3hwY1NhcmhOUzg3TjhiR3BqWjY4YXdhTlNYbWZQblJ6M1ZFdWI2RThWbTlVeVZrbjE1TCUyQlB1UlpRVW9YazQyVk1mOVJqanJjTjIwcyUyQlJIeFVXQm5xOHZZZmFzNzAwQjB1elB5eSUyQjFnTTMlMkZCUzR5eExIM2pHT1B2bmNKb1hjRHJhUUhVVk9QbUpJVWl6WTFsMkE1MWdPYzNsR0tLUm5tZldlMVc4MW81SUlLdGI2RjV2em1UaGZJeWJqZG1PcExqYWdyTXg5d0Q1ayUyQjhjdWNqSXZrd0ViY3h1eXpjJTJGbWpTd2U0NE9nckdJJTJGSGZjVFY0SyUyQjN4NCUyQlVYSmJtMkZxR0tpVjhqUFdIMGVOaHhzblFVVjRRc21mSmtxRlZ0VHlqcm41Vml0bjVWNm1OUGFFNDZESzdPSVRQSFRsQkpod1UzNVZ1elJBc1N4VGtrZlRHakJOYlhCNDlEdUx3ZzNNcVFSME0xOGxtdXUwdG1oRmdtd2xjS3MxWlY3Mm0xVmJWcTd5SkxrU2JGZ1pvVENCWXozZnFScGZHcSUyRkNHazUlMkJLYUNZc2YwUzhseVBTckRlUzlISTVNSmpmbTFyYWNKRHZoQlFQUDk4OTBORTBUOXNrRk5JOTdmN3pER0I1VXhBeW1DSEp1dnVyJTJCSEJvdkVYa20yYVJYcCUyRm8xaHNZajclMkJvYkhvRTdONyUyRnYyQlZLJTJGaW04UThRamlvWDRmdmEwY08xb2c1JTJCdmNEa0pnaXBiWk5nRzRMJTJGc3VDZXRUbHh6T3FvVUVZSmlTN2pmc3B2QTBCJTJGcDNwMU5NTlB6M0VTWTJRd0piS0QzT01LOHJDVCUyQjdRSmdIZjRkdiUyQnQwdXAyZHM2azE2eVZscFh1czEzSVVkeU1menJMbGxPOVNPSFZ5d1dSR2UlMkZueCUyRmFaRm9qMkNCdyUyRlVpazRYNzlCaCUyRkQ0JTJGRnZCYSUyRlh4ajJtSXJuYmRnSkwlMkY4RG1NdzJtM0tRZzI5OEUyVURWMFoxVEU0ZnVpTVhJbXFHc1QlMkIzNWxTcWNlTkxSb1pjSm4yck1uaHRkJTJCR1hjUjhMSjBIUzBTRTliRzZMdUpNVjJRUWc1TmhJbmV2Z2pPcDhwMW5ONyUyQnVnZHpGMlZ0ODR5SzNVQXpnRGlHazRzb2VZaXJyQW1vN3E4R1VlJTJCM3BMRFY4VWFPRkwlMkJDdDM1S3BRT0dGVUxUWWlQQlB5UENjSXpxeFFFZXR0OGJiamdmNHBJcyUyRm15YmxWNDBjUExveHNsYUpZUnlUS2k3TCUyRkkxYUNWYUNYQUlWU2l3dXpCWlBzcE8wOGUxQUtZOXdLSDZIVWglMkZmYmhPcCUyRmtuSUN6N0dOUk54V2JZSEhPQmNBdzUySVJkWTYlMkZ3ZFYlMkZjRVVuUGQ4OVFjM0huenFyRXlRa0JnTWgzZ053UzlIOVNYT1FMSEgwb3UlMkJTbDRKdXE3dGtyJTJCOEExUkowdmlJcThqZWM2R2xZU1JtWUVkdk1JTjFRTmpuaHB2WEhMR05xJTJGb2tMNjFjNHdad05uUnVuNXZOJTJGOWNPeFl2dEViV1JncFMwdVFpanlaTjJIWGpCTXU0djR5TnJ3eUFkeDd4dTNoekhvOTFKJTJGYlp6ckVxUlp4a1I1SWNxNnVvSWo5UUxlVWU2djc1eWdjeWg3MTZGV2clMkJGdW5vTFBsaTJ1VXQ5cktqaWZmVjhrdTlQZ1FYMldFVVBTUWJYcXdxTFFZOUNrUFJ0aUl6TFhZekRCVkYlMkYwSHh0OCUyRng3cmRUMlhaVkRqJTJCWSUyQlFpbDhxU21xalVZb04zdmZOYkxLeFl3VjBjY0FSd2JaRHhPdGY4NkRFcHB3cWx6eTdKTFJJQXVPUU1ORWh0Wkp2VXhaR1BsR0kzdFJhSktQWjlhaUhEZUpUdmtZdGdnVyUyRlglMkJnT2lPb1I0TlMyYXd3RHRCTDAlMkZVVFAwYVl3UzMyUXFnUERNY1F4V0pndllVbXBwenZpS1lNTmM2eXczTTh3cVN6VUNVZm5OblNORjhlaG9saEtjMkJZcDJnWVN4cDJKVEVUYzY1SiUyQnElMkZoeU53VjFxdVJMTzJqamV5SFo3RGVjcGplNWVBeVpxbkxkMSUyQllKU09wY3BVdnYlMkZPJTJGS1hvTHloTmJhcGZrJTJGSkYwQkhoMlRGRnhCMnlQaWQwaXh4TVFIbjRlcVRrQzVmNExlR1JLTFk1U1ZwVnBsOWUlMkJLd254d2xLS3VWazVLT2ZoanYycERFaWp3dlU1eFdYSjJyS2ZuR2lJYkU5WVZpeGZscnYybjF1ZHI3JTJGSU9IS01jcno5YmNweG96azZqRnhJUXJRdHN0WWZTaXpaQ3o0RmNHRGZuJTJCVTVlaFN3TEJFUUN2eWpYd295bzBRVzY3clA3S2VwbDclMkZkNnYyUWxYTHNtJTJGQ0xoY0J1VUtaclBjSmF3QkxGclhZR1NZTFlwS0ludVUlMkJGRzJ6VVg3VWpLJTJCbWg5SDA2UHI1ZVZMJTJGQmpXc1U0M0RwWTVHWmJ2WWJEYnVjMGdhRFh1cUZ4bCUyQnZ4N2FlcmszOHUzRkhjc1ZBZ0NvRmZyejdzdjVyN0N3cHJPNjRHUHFNNlJrREZkdHJTRzF5REZLeW5OaGNSTDRNZG9pZE9kTTFjRXNJdFl2YWJCRDlpQ2t2aEk5R2RxNnNza1pvY1NXbmlwYjFYQ0JHUGF0cWFwZDFKWWdYT0ZUczQlMkZnS056clZxM2ZLUlZqcjBFU0dIZG8xSHVLcE5LNFFkUHhBNEZGbEJYNUxtZG1jWkhKNlRMRkZsaUwycGRpTFpMVHJ6aFViU0FWNHNCYTl1ZEYlMkJzZDNMUjdHTE5IMmFJNjRtV3I0Q1p4STZiU3djdnJKRlNnWlluVmpkcjglMkIwOFdtaWVCZThicTBoaTBuWmZjeW9ZTXB1SFdwJTJGM2RUZXhnJTJCYjloZXNOdWdoVW5kemZiYiUyQiUyRk16UTFPMEEzWk5vT0F4Y2RJUkNjQm9vazN5WXpGVEU1OU5XJTJCaUlJVW4lMkJZZnpwMnFWcXNPNTZlS1JZJTJCdDVYeHFyQ1BKMnpGUjBKeiUyRm9GeVJZT3FOY0NFczdIMkdSUk5heExORU4ybmtoRnNpekRXJTJGJTJCUEpkbTRwQXpVb1F0VmJNWEx4aWMwWFZTQzRGSXlYeCUyQk1Ud2RXTlYyOGtUMTMlMkJZcENTeVl0aWJpQ0JHNkdtVldaTzQlMkJNeVU1dXFBUUd4VFhueEhuOHJxTlJoalcwalIzRVF0WmVoWkx4YlhqeVRsaEZ6WUNhaVJQYXplRUlmQ3h5NllGMnRuRHlWN2JaNU82RVp1ZDlmSEpWU3RFblhDaFBiRyUyRlUxREdybXA0eVFpalEzalN1aHVQb01JYm5nS2pzZWZRTmtHMGJlU0dyVXJ2VUZtMkxNZFYzV3F5bVd3NSUyQnp2VlB2bVI2VVZ6aVRTd2x4OXlQeVVJUHF1ZVElMkJXSjdYZ3Myenc0eTJSeEY2eGlxMlV6JTJGNEhrTmN2STg5OTN3cEZYYUUlMkJCdlhSb3M0TThQQjQlMkZnakhKTkVmSUVFeDFFc2d5U2Rkak9ubCUyRmpaa1BkYnBTJTJGSDROZ3ZRUmE1WVQ4V0RnQUFYcjBtdlglMkZxaHEwV3pYemZsSVVsTHpIMSUyQkEzN2F6JTJGMUx2WWtQZ0NXWWJUNTRxJTJGamtaelNmV05vUExYRldsRFhQZEdXZEpiaFVUb2FuejklMkZMUVo3bGx6YTlqRWglMkIlMkJ4dXclMkJpdDlNZlh6bmdRaHRTNEdsNmglMkJDSWhaZ1U4VXIyMUxEblBjdmIlMkJxVDFiOCUyQnB2JTJGJTJCNzBtcjBqa2IlMkI4TzRjSVlDbUFUZk5VcGtZMUplUDlxREVFTVNIakJyOWYlMkJuTzZHcWNUb2toMFA5VmNQblJ6S21PbTIyTnR0WlplQkgyaXRVdmhhT3FSMWFpR3Nrd1d0YXh4allRbzRZTjVSUmZTZkgyZURkNjNubDZLZW4lMkZ4REhXJTJCR21qakM4MFN2eGJHdmgzTVpoNU9EOGYlMkYxN3ptUDlPdHVaSVcxQUhleHYzbFE0elBqaiUyRngxRzA2NzVwc2VNJTJCY21RNVlCbTVBWjclMkZwZW9CUTRkV0tjOUpkNzVsTWF3d0JaNWloam9Ea0RaTDZ2YnZWZ3NIT0xQaFdjSXQ4ZGZaSTZGeXh5THRoa0RIMUNVYWI4UFcxdXprU2xKb0x2ZUElMkIlMkJudWVpc1h0RWtrbE8lMkJDUXhGNWhQVHFvZE9BRmtOUjdrUWoyQWlNJTJCdHRVOTNGZ1RZV0RyRUZpVngzOCUyQlhrWXRkN1NOZkJpenpOODllVUp0dmpETjN5cHQ4ZE9OQWpNJTJCYm5Jd1JDQU1YJTJCZlVEWDlUWExIJTJGZkRTciUyRmZHVmVzZExxdiUyRnNOVGVkVzFrJTJGZ3JKMHNrSVJzJTJCUVhkV1RwbjNDJTJCNkNlTHRhc2VzdGNUeW5rblBDdVU0YWdyeHlQMHNpVFRoTXRnRSUyRkZMeDhyckYwQjU1NXNRUzkzeXpoS1FnQyUyRk04SVRURzQ4TlN1Y1NmJTJGaWFtclJzaEZXU3dwOU40SWlOS1Zjd0IlMkJhT0p6UmpWdGpIMXBQOXI4VDBEUE5VSHlXU0dWRWNiUTRISFgyemxpemw4QUZzdWYxZnIyRkdJZjdzam5USnUxTUt3NkVGJTJGM00zSTIlMkYxa3dMemZHRCUyRnZoR2w1V1d0RkxjV040ejV1RldVU3E4ZmphQ3FrTmtKTWJOV0FQVzRvTlpoNzJwbmElMkJzYVMlMkZGQzkzYUtGWkpIcWpQejJjcFR0QUJ1TCUyRlNJOVpkR0E5SyUyQlowakVqMzlESGNOdmtjNzRVeFhDdmxnOTlFc0l4YkxEYmxCeGNEVVR1azc1bVR1V0olMkJ2azc0TXNQOVVLRnMlMkZjSXBSRUtvMWlvVGsyMzNlS2k1ZW1rU3NaOTJWV2tLZUVrMTdPeTBQQktWTjVsR3B2Mld6ZkdwOVpSdWg2ZzloTElWckpoVkwlMkZCZDdmY3RTMmpDRHNCRW4lMkJTblYlMkJxWTZHeHNJQkFMWUxpS0ltazFMcmdpQVRJenhXdzJjUzJXSWo1d1dVQzR0cFZuamZYR2R1OHNJeDd2ZkdFS2FUTEFIcEN3YkxvT05uQmdDdk9SSTBOYlhZdFc0SktrUlVVN0J6a3Z2eDglMkZKMGQ3JTJGQktrT3NiVHpCS2p6Qm1hZ0NyYW1pYm9MNktPbWRFRlpoYTVTR3llUkIlMkJhZ21hVTZUQko0WXozVlMwTjc4NVJuR2k1bkZoejJKNkhVdCUyRlkzQjJJeFdadWJiVjJuOFQlMkZvQWtLNGZNcldyUFNjJTJCU3IwMEg1MFJ1YyUyRjdIVXlUZmZvTDNiN0xlTmh5UiUyQm84VzFYYkhLdlM4T2QzUko5VUg3V0l5WFNLZ0hON3ZEcHNaVlZTdkp4alh1cFFGaEJqeHlhd0tZcSUyQkdLb25RMjZOJTJGayUyRjMlMkZXTExUJTJGZ1NsUWVqNHRxY1lXc0pGc2JXJTJCRk5pVjk3RGR3RE54UHZSdkQ0QmxNdTZuaUQ5ckk1TUZuTUdHZUdwUlolMkY1R1o5MmZNanlwemswSnAlMkYyZzhlVjU1SzJlbEIzRlJuTTJjUHRWMkxzaiUyQnFueUFmeEpGSm9yJTJCRk1aQnZzVG9rY0pUemJQazlCazRJeEklMkJNbXJoR1pGNVIzNHVRYm5qRkdPSTNpZ1FacGhaYUU4NmFLcjVQZUVzQWRkRG9QaiUyQldUMlE0N1BreGl3VXI3UGEzZjE0NU00aGN2TkV6JTJGVlpQODNSWjRCaXNBVGpNVSUyRjdnMVJqSEx1OUFaWHdEY05aZHNvOFolMkJBeldnSWpKcHViNG16VllzanRoSEQlMkZNWnJ6akJ5SjJNVWElMkZ6NG5vZDNnalRPWUJmMXhHdXU3d3FOQ1V6eVBRbWRMSXVZYllrdGpraDh6STBHNzRDQ3BJZDI5U3hpeVJ1ZUVJcyUyQmptbiUyQll0UWZvdzRtSHBCYVZBekRtMXkzQlROcFRDeGRSMmlHNURvT2pHZWRyZERodWtNSTZlRlJDQWs0NXl6JTJCNTdEZktoc2tVTjFYdmlLRXlwWmo5V2h5ZkhSdk9pOWJXQUxPRzBVd2dtQkx0TzlGY0FaMEZJa2NvMnRaWkVoVnpVVXE4NERuRlJ3TDBqbnJEbUVDYkFVdHhvdG5WaGFsMlR2T3pMMXQ0Nk5FMzBsN1VyVXpIYnl1OExRRWMzUmxhdW94NU1GMFVONkZkU1RiV0tmWlNoY1ZZOHFXdGs3elRWUTVaNllLNk12UXFuT3dkWiUyQmpFJTJCelZTcjE1WmRTdHF5WktwRDVvRXZ1NDBuZmdUd1F1U0tEenI2TkYwYVNtMHFjcmxkS1N1elZZdm1RN2slMkZpRWJrcjdxTmhKaFhFdmpMcGJmbVhtamJ3d1h0aUNzNlJkc2RvUklTemsyd3lEOG1jNSUyQnB2QXdFN0pST0lwWUZmU3dRWDdKTDYyMnBSJTJCeVNpZkp1NnhLR29XcDFoWXBsRkl0WGNyUVREcExjdnp2VDBsOTh5U2pJc2JOJTJGN243UjAwZWkyYWxJS1AydHhzTE03Nm9Mb2RMSnlac1F0dG5XYkt4Y1BZbXNFZFVaaU9NZFpUMmRoenlMJTJCVVlpVUdLck84WFdHamR6Vnl3OHdqY1RUNTdLbFMydTFzQlBSSWlUSjU3TmZ2MXhLNXZMN1luVTN6MjRHT3JIJTJCa1FKMXJaVEV0dmxoRXZqNXQlMkJ3V3VEeG9WdnlPamNCQmluZmpRckF4VlQzbkZMRnl0SWdzNDI2UzR6TTI1N0VpRkl5SFpOdll2MVNCUWRQTExDNXBDWUhtT0JpbUtXJTJCUmFPaXVnckg4WTNaVk5aRGUlMkJGVTA5THpyYjBOQjFYMldFTnlEbmlzRW16TEhrS05uMFZUeTUzTDdZUlZrRmZZT1BzbGVTdndiN3N5S2ZGQlphJTJGQlNpNGNFeks5ZmxJMyUyQkxxNlVTNlcwTHFrcnB6SCUyQmVYb3Z4eHp1bWFCRUlFcnhUQVhUZW1NaDJuQjlkSUNpQURMUmh4WGUyd09sdmFiZk9aeGI3cGZpRHlmR2tQeW92U2hVdVU4ZXJZbjdiM3Vsa2Q1ViUyQnN4MU5xVyUyQktaMndxUnd2cFhMYUM3cSUyQm8lMkZXNVNSd2R1cDZmR2FMa3o5ZmRqNUlkM1VDbEJYU3JzanlPT3hzaGVCbzZYdzdRJTJGc25pVnl4dEhuSkJKM21DZCUyRnkwVjd4eEd3YVM4dWh2Nm1QYlRuUHpaZ255cVIyamdvUUF0NW1TYXhxZ3MxWDVTRHlnVEF3YVdrQlB2djNHaCUyRmhqJTJCY1NRemN3VElIaVI0aXNFRTNwNmk5QVBOeElzOHhvVmZlTG1kWkRVWXd6MjBZaFpVMjVFd20yTjlxdjNXWHZ2UWJsbjRLWW1tY1FYYmhsQ29qSUZVaHI1SG45Y2FlcCUyQkFGVTlsaWM5VzElMkYlMkYlMkZYciUyRiUyRlR6MnUxQTB2JTJCbGI0UUtlWEIxdVQwSUdvQm1YaVBLNWNONTJrTURjd2pyNTBqajdQa1kzaDU3NWpwOUNnSDViUVRZZkE2TiUyQmVpeFlPZzZqeHJmTnFKZnplSDFwaEhUTGNCJTJCN3paVURNb1FoZEhNTDF4eVZ0T2Qxa0Y4YmM1SzcyNXNqbTJkUWJqR0t2Vk1XN1F2bHZoakxRSHRmd3k5YzNqMkVLcDFidGQ2emRIaFFCJTJCcnJmV3FvRjZYN1ZkNEluVjR0UThXazlvb3RXaHJpbUI5bDNFVFc0OSUyRnVBVzJQaFJvMVA5QXB3Y3prUnNlUHRPZzdwQTFManJDVVg5Y3N1MTJSTiUyRnF2OUcxV3M3cWdjZ0VnTWhkOEtxU0NoY3FvQ2o1dDEwNTNocks5V1NxSEh2NCUyQmpFdEZVZ245S1RybXZwSDRMc0tINU4xZjAlMkZkb1p3TUd1b0tvWlpxSlZTTzU1Q1NCS056NHB6NVlkNUQlMkJmdjRpc3BieHVvQSUyRlI4dXg3UzVVZEtFTWlDbVBDREZuMUI1QTZkZnZKUGM5bmR5bkxsMFN2clIwS1JIanJuYSUyRjh0TFRKJTJGQW5BVlhnVklib1pubzZMNkY0QWY5WnlwMlkwbG1UeWQ1STRiUklXUEcxeElDdThpekxhTVY2cEhaaDY2a2h5UzNQMjNwTzBSRzF5YnRhTzM2TFJPRVhGSFdObGQxTVR0YVQzUjFseWRra2clMkZqRlFhSElkbWFTamtKVnZXdzFRUUQwVGh2Um9yYmJJaU5VblNOcG5lNHluUjd3anhTVGRHa3ZjOEZyWGR6ajlzJTJGeXNYSUJpQU5SMXhTdWw3a2cyV0QlMkJFNUQlMkZiQSUyQnU0NWMyY2RaSkZ0WXZLUVFLZ29Ta0s1aGhGc2ZwSm9XZ1VhUGV3d0QlMkY0dSUyQlZieXN6JTJCNFpqZGV0blpCaG5DU0w1V0E5UDJwT3MyMnFaJTJGR01pYnlCWWVJZkZudSUyRkpJTk1MZkpxUjY3bHpYZm5oeWdNekFaYzFPMWpsbUQ1YXBzUiUyQlp6eERNd3VvbEl1SEdOTFpzaUdpZkJHcThGMCUyRjFPZ214QXhmZUZOJTJCSGxiV3pXJTJCVGd6TlpOQVV5NlM0WTdjV0pSWjBNN1lLbEVidiUyQnJTY1RoeVRJWExLcXAzMDN3WURTek9GVHpPRURldHZ4d1dmVHpaWnFzUmVZTXkxdHdsQjF0N1o2UjdLUkh4dFZzQ3hUWEl1ZkF0cFlzYjZPd2E5RG9KJTJGUVNSaWtHSlY5MzFKSXBLSjVHVFBGcks2a1pYQlBXVEt0SlZ3SGdRUyUyRkVxUjk0cUo3MzV1UmVScWNyRWhEVmRwWTBLcDJ5cXJGb2VJUm15c2ZvVVRHRlRhSDFJJTJCcUpoNUFCb0hQNmFXa3BFUThhQ0w3NVBiWXhNJTJCNUlpT3F4WnhWTlk5c0x6eSUyQmZTbDhVWU9RMVRLTVNQT3BTZEc4NlRyWDJ6Z25jaXlXbDlTUHRsUks4Sjc1OGVXSWFZNzEzNFd2Wjd3OWxDMU9KbXlhMlBJUGlZbjFLRVowclF6V0RrWXlsTjllUEV5VkJlcWtTbmRnRUtpSWdzRzlyRUhFbFB3RkpTTTZOclNhWiUyQldRTWNpbllWWDJNemJ6MTJQdEJkSFBmY2hzTlolMkZpZ1RuZHFLJTJGTW1ZbWM5NXRFNlVzb1NaQ0pGTDBQOGVsWnBieGFWbW1meEVTTXdnVjd0c1RrMk5ucW91RXU5YzVUbkhWY3ZnekN2TW13eW9KdEJleEN2VDRSbmtObkN2SGslMkZvVlNHJTJGWGVHUGxjbWFkNDJ0VTVkdk95Yk14YnVjVFR0ZDclMkJrYXpFZyUyQjhaOUJmUHJHVWhNdEtmQXF2UW55NHB5cCUyQk1aeFB3TzRrZHV0RDBoYTFNYUR5V2xJODAlMkJsdjdNSHE3NmlocHd6ayUyQktFeGxFOUhBcmNkYjZvQmNQZzdNQUhXb2xrVUJJMFQ1WkZMRFJLZEklMkJoZHdmWWNxWElFWmdnJTJGSTlaNW1lVkdsWmp1WG11MlZYRElWMTV1Z3pCRDZkVlVybWtmdEZiSVZwVFlCQjIwVFklMkYwc1FHWXVkR3lGVSUyRjdqNDNwSTF3VFNWMnBycFpFT0pXTmx0VDVmJTJGUVREd1Q2NEtLVTRGQzhibTl1UERhRyUyRjZndm5VVWIySmdDTFJYcUlVTlFoaFUySnRRbldVWFdSNE1Kc0Q5YmFBOGdNdTVUTnNRS0NIcCUyQjAwVExScDlJY3lneGJPdk1yVDA3bW5rRWxTSnNBOTNvVWV5aTclMkIxWFVheXhtbWxBTTB6aVA0UUNLM1JaRVEya3liMUp1JTJCWW0zQUl6MndUbU1lTCUyQlpxNXBNeDFXUVRTUjhKQlk5bE5nWTRWZkpTMmY1d1h4JTJGb0NUV0hkVmZGOFJNMGladDhLYlhCUlclMkZhR2t4WFZ0aWdCZjklMkZMNXFwTXlZdkg5TXNGcUJkZEZXaGhSd0x6MjRCdzl6V3hMMXA4VjJOZUVqSlF5cXFqNGxraEkzaERBUkJUJTJCV3g2em1Ydk9Sa2gzcnZHcXZzZG9HTDRVTVdURCUyRkF5UXdNVTVuOGNPUUJicUYxZmY0RHREYSUyQjdSOUV0ZWgybXJQRkE5Qzl3U1c4WE5HJTJGb2poOXpkU21Qb0pHNDMwelBpd1V5WlJnUlIwTkxiWDglMkZDNUIxdlhDcHFxRSUyRkVOb1g5dGszJTJCVGp2TXR5UmJRJTJGOWxIbXpBM2s0M2xrS2YxOFVPRzg0TjlNNWJ6aWE0aGhoY05lTTI4clg1eVlBdGpUSWpYck0lMkJScnNhQWh0QnpXSHY1TWcyZlQ2ZUQ3WFBDQW1vMWlxZ3Q0NGFhTjE0ZjFZZ2ExanBNbDJHQ3lDeXh1Tm9FayUyQkZGU1BYYUg2TFBRakt4ajlTM2xXY1BpU2NuOXhZdW04ZXBBcXgwNjZNJTJCU2IydmElMkZVU1E4NHNxJTJGRDN1MlRlSGlvRTY3cSUyRmtUU0lEVTIyQTZ0ZUNHUEVLUzZjMmolMkZ1Vko0ViUyQmFoU1d2dDBIeW1PNm1MZFkybFZUbXFoYnJjVlNiJTJGb2NvRDI5M0NYdjNFMUkwTVh1VTZsZXA3ckZubDV1UWdvSWh2YjVLOW5qeUg1aUZ0RWdheE9rU0JrQ2RDUlJNTnlFS3V0bEh0dXA3N0FySFpDbmdjV012aWR1VnFPOGdZSkR0T093NnhzZ0g2ZGNQTW02JTJCQTdBcnFJeDA0SG9xdXhyQnpZdGVXTlVqWGN1SHJLd2tLWjdreGxIQUlmUzR2eDlQZU5lcUlMc1hXeWZ1ZGRiV0t6WSUyQjFYdE5Tb2tmd3E5T3draEJyOGJrV2U4Y2VtdXBiTSUyQlN0cnpUMWxmdHFzeGFDQW5raDYlMkZJa1pTaUlMNzVJWjJrSGtpbGgzMmNoVXBZRFZHJTJCSVZmJTJCYWFLdlJLM1UyU1NWUFAwJTJCY2FUMm9NWjFXN3JUOCUyQlltckJlYzdYdG55NHBITnBORTM5JTJGUUdWRXN3JTJGMFFDaTBmdFhEczZXMG91SiUyRjgxWWd6cWRPVXFlam9SJTJCJTJGZlRNaFhDejhTS3JhY3JGNFVYTUswbUxtZnMxS25sOFRGRmdEVXNDVHRGT2NqdUFZU1ZQbTNMQzBwTU5pSiUyQnBhNWdPeW5ERWpoYmlvUlp3ZUlPb0pVNG9YNzVya2F0SHlFd0U3R1plJTJGN0Q2SGRtYSUyQm9jUVRLOWRUSm5KdDVxbnZvYWZVUnk5Zll3U2MwQ3RmbEQwWUlvWDhlM3ozdVRuM3VKbTJURzc5SWVUaEZubGJUOXF6WDM1akZBRHhESU02VE40T0Z2Q3d6R2tTYVFFZ3djeDFrRUZ6eU9TUkdpV3lOOUhkJTJCSFI5eWZwczZzZTQzJTJGZEkyajBTS0tqc0hJdGpmZ0MxUVFld2VDeWdVJTJCJTJCSHV6a1ZyZEtHOU1PZ1NOcUdOaTJESEp3eldQMFBBMTl0NkhONkgzSDdKa2djVmJ5eUZCTkNPOERjdllSbiUyQkQ3MlFMdWdWakE2SXJ3MVBxcGU2bHklMkZ6VkM0WkJVOVNQN0xvT2prQ3hOMWlZMXFYWHZ3Mm12VkJZVzgyVmNmTmV5aG9PNHk2ckRvMTNpYnlnVGttR1FVMmVIdVNtVjBmeHZRdXlkVyUyRlJYQ1BFVllUdnRVd1klMkZvVVFIdnNrV1NaekxPWm5OU2RKUVZhWTNNY2QlMkZHdzhtWTZEZ1hlRiUyRjY4MHdSNUUlMkZsN3dhRDV3TktyYVQwN245SmpUMnpSQXNDQlUzREJUODFtcFRlNjdxM1ZiUGk3MEdmS21ET2FVVkM1cnplb2J4UVhaeERkcSUyRnhReGYycXNQMVNIM29JT3NqRUpHT3JDbWwwMUVXTVRaaTl5MkV1amdHRzdiJTJGNjRMVDV6RiUyQlVjYnRwb1dZeFljWGFtZkwxZGVNVFpyNHN2SUo3bUZaNlpWVUo5eDd3ajlBZG4lMkJSamtOaXB5RWE5JTJGYmJIbXVrUzBPTXp4SGtsb3FXd3Vsa1luZ1JVV2d5cTlWbkRlRTVhVHZJNSUyQmgzR25xYjFhVUFtbXFVTENvMlFmeU0yVm5EZXVhekVOcXkzcFNCJTJGazdWOGZVeldQZloydCUyRlNmaVJpWDdNTzRoUXlrZElmb002emxBSWtzY2puRnlEMkh1bWZ2R3ExZGhnZk1VMGlrNWpWOVFrUGRlUFpyeFlaUVhEcFhQOVd1ZEpEcEN4U0NpZG1UJTJGVGI2TWU2SDJ2Y3YlMkJuY1kzSFUyM3ZEYnFSd2pBY3c3aSUyQnczNFZJcVlqNjVuUFpBQzJHWnJpNzllNzlUaHN5TEtkUThESDYxYnh4aWJ5RkMlMkZZNkh1WjVGelkzMm9uTm9aN21WdkpqY2pBZFBLa29GYkdEdzNOdU5IRjcwalUwNlUwVGtCNGJIdlZZVnF4aXk0RSUyQnpqcUklMkJvQkttVnR5TGV5YWFJYW4yVlF4MjNKOWtWJTJGRmhqTjVhb2xLMktQU1ZyeXo3RnhsY3lLM2U3QlZBaWQ5amt4UnBMdXclMkZua2dwMlVvcmZoU3lrOUd5MTVzbEwlMkZlWEpGRnptVEdwU2daZXljbWVIbXlhOHBzN3dyUSUyQk10c1olMkZjdjJBZkpNaiUyRkFKZUppMUNKdE0lMkZQdFliSEd6YnV1QkJhYzhoMFY1N2ZvRWRmbkJRbnV0clBzODNZTXlYYUVtZ2Ryd3RnWHQ1SUc2ODlSTWhOb1MlMkZ3R2t6VFdFNjJHVkhrUTd2QiUyRlBOSk5hTElpUHM1cXZHY2NtZWJQN3RDR3NIWXA2RUxSVjk0QWY3SWt5eURXZXRkQ1dqR0hzMlNMaDlUeTU3VnplTmoyOHJwY3NSR0NtQjhTMzlZJTJCeExYUDZVdlFYQnB6OWx5OWdUR2QwZEFvR0l4YVVZRiUyQldudkpZcDVFdmFXNlU5azZ6UEZkUUJYSEVwN25UOW9Qc3dwS0t0YzZyJTJCcVRCc25BeE5Pc1UzblpoWGVJNU8xeWw5RmZIN2FnMWdJR2FJUVl4T1g1czlmR3pETmd6TSUyRnV4ZXlGNmZoODJYU1YyRFZ1R09tUmlTVTFkJTJCJTJCRUxUMEdOcVhjRGpMY2h3SXFQdlMzUEN1YVFHMkxrdURxc0MlMkY5WEhSckFQSFEwaGUwcXdZTU1mdlYxU3VYQWolMkJnWm1ralZhMlpaJTJCUVVQR0t0S0olMkY3MG1YRFVaaXFWYmF3UHF2RTJZYXZObWFNYTFCa2JSNFJ5JTJCbjU1bmZRRTB5R0R4Y3pEUzNmcTFJM0t4azVuRTNNSjQlMkJ3dXZKZFBHQURxbEk1UXBDTGNaYUNUNFY5JTJGVU8lMkJzQk9MSHVScTFYcWY2Q2lEcUhQdFY1REFNdGM5RVBabk1qeHM0ZlpvdjJiaDdNZHlRNUxwNEhZUHclMkZEd2dOb2V2T2RMemhmbzZQRUlxa0hrJTJCWGVySCUyRkxOeGNJbFJiSkQ0dlBNbjhncU5SQThWTGt1NHZ3NCUyRkd5ckdJdkU3bE5Fdk1HcmFLaXFxTzRrd1NaWGs3YTJhTTFFd1hTVUw4UWR3QmRlNTV6YiUyQkFZY1hpOWs5MkloeVpjcTZDYU1oMU9JSWolMkZSRXBSdlhYMTlSY053NW4yZ3hwJTJCMlFub3ExWVZEUnAxWUMyNWswOSUyRjZya3FWRlhQR3JYZGN6ciUyRmNnbEpOJTJGTGZwMFQlMkJZJTJCcTdsbVJGZ2lXJTJGNXI2dW9Rc2UwVnFMQXQ3UW90QWF2bjdKUG5QWHRtM0dUamNsU1JIaDdoRVo4WGlWSjF2aERWSFhxUU1Lb2FGZ2RDVWtJYmtpVVdyeCUyQjdGbHoyMGN4T3lXSURrbUtKdjRrSU9ZcHFWVUJOTlIwSUNQTVdxQlBzelRnMzAyUHhUUyUyRlNzeThpejU1NGZEVTdrdlNLcnJXJTJCd2Q0bXFPaUU4a25xUVpCdEFkQXBidGVxZWs3WVRXTmhvNnVwNGpSMGZ0enpGYldwQUUweExRSTFKdFpKJTJGN2tqYklYbGdNTUlxNSUyQiUyQkFBSkFCRnhscmNSTlJaSGJZQ05OTkIlMkZYbUdNcDRaUXMxbFI5S1F6dm1nalFzT1hRNHo1MTZBJTJCRUxKdm9TMmJPMnI1Z05hNERBcXZQT3FURDZnTWJLZ3M5T2ppWm5EWkd1eTRCZjZmTDM3RVduQlU5U21mMElGbTAzRDlvOFlWTGRrRUM2RXRkQ203TmhoM1pUUE5EVmlYbXEzS2RCWndTeU5idXduVGNiend4ZUtZMWVmOXZiVVRYM0NLWlJPOURaMkJ4cFBxQTRLdzhqJTJGanIlMkJ2UGtBcUZkeklyWHk1a3F6VG9TVzg0d2QlMkZFcDV2OFhDJTJGNnhoNW5PODF0ZHRmeGhyNzdGWUFCdVRkMGFvUnN6RzZWZkFPNGJpeTZLVzFBRjlMd2VpTGFVV2tjdEdybkJDWEFlMFBmTDMxQVpiQU1pQ3UxTElaZjN6OU9RejRDU0ZaaUYwRlcxaEt0UmtaNUxaMG5kcjIySm5Sem5NcW9xcWRwViUyRkZCT1JPWE54YXRtS05vNEglMkJrdEJCcFBpb0xrZlpQWWFwZnB4QURrY0RwSFllbGhJZ1RQZUVub0d6MkJ5dVolMkJic0pnUDNHMlJya0JBMEpMZGs3TDFIJTJCWm1ZUHpwQ2Y5WWlQOFVQbEVyak9YM2I5S3FHZHluaWM4ODNVJTJGanBna2huQTRVM3VSMjJVNEZ3aUJaRE9vMVRwMXlsSVVoNVlzTTVQd1B4alF3RDFKRTR5eUM4Q3FhYmY0YXFTTVV3WWVzRE9aV0lSSFJmYm1ZekVsWjZIamIyU1ZLbFdMJTJCU1FTViUyRjdWWVRIazJnY2NoN3R6ZUlaR2hBJTJGdFhQNHFGV2ZLUkR3RU0wTmc3U0VKSjUlMkZPRk5pTVRLTjRidTh1aiUyRmlqMXVHalBqJTJCcURPMzFJVGNISTdQbDlNYnVIODBiVHZHSzFqZmFtQ01UUyUyRnlIYXpDTUUlMkJVa0NubHRvTHBuNWNoYm5ESUJhb0tua2JLdEgzZTRjVlAzaWY3RiUyQm9EMFpZYnBUWFRMVmVJa1ZYOGlaeCUyQjZQR2Q0M1JqZnNUSTVFQldHeUFvbSUyQmNuTENpJTJGNnF2cEVjeWY3S2tSM2QwUUZmeENydVRhdjFKNjVQbG1SMUV4Y0tYJTJCR2pTaTlhaUlnSGxVTllsWU5wS2k4SVB2V1hCMzBKNU1VY0h6UDZpdng0VGRTNjglMkJBb0d3QkpvYUN6N0FUWmNFdVlaJTJGaVc5WmVpZiUyQnBSTzVucjRHS3hJTVBIa2tDcWR5SVFTTGFLTm91TU9wRVlGTkh6Q2xFJTJCMkUlMkJUQlo4c1VsT08xRmJDeGxWcGExTEl6aU1iSHpDTHNCOWVnZDVqOUk3VGZjd2xpUm4lMkJvcjZubnU5WFRWTTZFcGc5ak55RzJzaGRqSVBIS29JU3N6YjZ5RVVLalJrUnQlMkY1cEtrdHlMVHBLVUg0NHkxVDZkQWJiYjFqNWcwJTJCTHBWNHpKNGpjMkZiR2lpYUpUb1hlamRSbkswTGpGNGdOM0hqR3dOdlRRWkJWRXFPcW9xakNYM0xUamdvNlNyN25MJTJGcXY5djN1TzNqSDR1Q2lmVWdYVGhidm1qJTJCd284cXZUc25GUVBZQXRQcDJxWVJuOGNRTUI2ek1adm03UGpzczZ5ZU9mYnQ5blExYnZuUW8yejRyb1UzMDBLSmNrQnRHSkhRbSUyRkhIbVB6RHdpelViaUs2Rk1lWTVPTzVVMVh1ZmVHSlI5QXhtaXZ1R3d1JTJCV2Y5anExUVJZT2JZdE1CbWx0JTJCc3ZEY0NpcWN2NFoxV3RJcTNzc2RLMVRhbnZPZCUyQmx6a2ZnWHpremtSWjJsQnB0b3dwZ3VkamdvSnRIVDUxblJOa1B4M2tuUnpSZERkaXVPUTBmQUNla1h4Uk9xemJtR0ZFbUpUdkExT1V1RmpacHAyc1BmNFRlaEFIOXcwU3FkaTVoUENreFFuJTJCYWdOd2xQcnhQdnFGQU9DWUtYeDdSMFhYeGU1NGNqZ0huTjdxWXBISHAxZ2ZxUURmWnZIUzl0cTltck1EckF6NGNZVDlHJTJGYU41RFFYMzZUd0VydnhTU2glMkZsS3JJU3d6Nk9ORzNXTHNibFluWCUyRnp3OVR0TjZqRVFKZ20lMkJQWWlZdnRPU1NKdXk1SyUyRmtNR2twUHhvdG1nMlJOQmRGUkN0Q0YlMkZqNWZGUzZ0NWRFQkJmSFlRbXhjQU1mM0JIWGN4dEx5bkl0aFFqaU1sQUxpampDdnhjNG1qOXFialhqJTJCSTYyMzExQ3VVZHlhTFAwRXlLb2UlMkIzMm9ibHhuYjBwWWhCUVNIZ3pHbTZOTUhYczFzWCUyQjIzeWRUY0RXU1FXJTJCOEpNcWJJdGExTkhDdzNoRjZHQ3d0MENoaDRzOTFjMVRzbXExWkVDT2NGWXhGRllXZUxiYU9ZZnZ2UEJ5UHclMkJjeWhPQmJHS1AwcXJRYkNXN0pWTlZScXJXYVlRVFFKcmVlaEVtWlpRR1I5aFlienc5a2c2WFZ3YlVvYzIwM3Z4UVYwSWFpcnBaNjFpSGxrb2Z2VWk5QlY5Umsya3o3UHBBWVZqTzNCR2Y1NDVNNFlieWJJcCUyRkZ2NHIzRW10dUZPd002NXNiVG9YOU1Mb2dmeFNJYXRPWWpFaXV5UzBNWDVOMnVQd1QxMFVHJTJCSldhbUo4UkRBTE9XYjZ1Q1hNMENBcFltcU9XMnEzcU44ejdrMklIa2dBYmtXZkZpbW9BZmRjR25RMlRMTnY5N1JpYUF5WFNRMTBuaTkyTVdDU2NtVkVmMExGbFhCJTJGd1M4Z2Z3UHVwajNzS0dTZyUyQkZacmtKSGVOZlVVVkc3U01td2F3bUNpT0dOc0kyRklQck90JTJGbjF6ckJPamZrVlllSTIwTnZEZDc4UERQZGNZR0dzNmlTNyUyQjAwaiUyQkMzSGtpQmk1WjVvT1VjcEtERXQwTmJ6YXlLNXFFUHV5QmFISnFOTDNQN2ZvYkhNTk9tejhhcEslMkZlVzBZdnhzeVFmUndVOUVqcFZrVEFVdXR0RXE3UGxVTVFVR2dWY1llSzcwJTJCMnNJUVQ0MDEwWGo3M2ZQbkE4SHRXUkZEUFJ1YlVFNlJHMHJzSkFYUmdJWlFxeFBlRzE4UFd1TzFPT0Y3N0RpcDU5cVJvMFdab3dHbGVTYTRDbWhreXp4VExWcVFYOGlyVEYyNFBJSUY0R3Nselhsb09qRW5JJTJCYUxKemoyRUpzZHFJU0J4WTFzJTJGSDFpNFglMkJoOFMwSzNQR1BLWURjMjh5eiUyRndLUiUyRnBpemk3SUlnJTJGN21LMDU0eEUlMkJDZUlTMjY2cjFuSkdqZE5uTSUyQngzZXpkaE8lMkZ5WFJ2U1Q5TUZTZVpocTBnJTJGVTVuUDRNZnBaemZ2V1NDZFBwS09TU3VwaE12Z1NvSWl0aDdKYzJwRTRGZzJvRDc4ZjgxQTc3REFmdUxqOEhHVGlyb2YyZVJ3VHpySFlsMGFrb3Q2WncxWmNzNEF2JTJCaWpMdDglMkZlRllhRUUxRm9ZSGJtNmNtUVNkOCUyQkN1eWhWYzNuSmd5STlNYXg3MDVZa1lZdnZKTktScklVWHdNTjF2ZHZVajE2VUpzak0wblU1ZkJaTkpwNTJPcm9uUkNzSVNDclRUaTJ4MU5XVXRUNGlKN1o5b2N1N1hBam1SMTRPMWVDVTV6N3V4S0lPUHV2ZndNeGo3WE10cW43OUhvSHRWNWw4TzJoYlYlMkJvWDZSUEJDTElYaTZMVWVYcGVodjNQcEZtVzlIZmdLa0p6VlNmNDFwekZVUFRnOWZmbExkUGRlVjUwcU5MdDVEeWVXQUltUGx1JTJGRHowajRHdnMycHZwQnJIQlBxdkxPQSUyQktMMVFmbEUyajdpdmZ4TVBkYTNUeVlraVhzRmVVTmNmY1l6T25lSHh4Q2VqQmY2cXhQQ0RwWExwbEIwbDhXV2xSQnIwUEdlT2IlMkJSVXowUVRNUjNLcHB5dmNtcmVqWVdibVV4NzFBYk4zNFk3b1ZUNEpSJTJGZkZvUWYzdk1JY0ZJZlFaJTJCYXZFZFBtZFVpWWt5NW5tWjdNWlUwVXdaeVk3NiUyQkJPQ2g5Z0Q1d0ZNUkIzekZqTW90eGFNdThwOFhjRGFzOXh4c0tnQThjOUZVWDVEdFN0RXZZenVxcHhmeFUza0FEQlE4bUIwYk0wU3Q4ZjQ5WmV0eDRzTFBsWVUlMkJWM3ZtN3BYZHp3dG5RVHFWUmE0QlREQ045YUV5SGVLMFJVJTJCUnI3Nk1VRVpGZHFSRExhWVBpJTJGSnlxTDk2JTJGMnY3WWJ3Q0ptYUNncjI2R0poQ2owRXlnWFhWZ2dTJTJGempTMXEwU0N6N0lEa0JzaVd3ODRURVhFVzZkaUlZdWM0MnBWQ3U3VVBaUGklMkZiJTJGV2N5YVBWQkpDS2RIRnR6VXNIbE9PSHNtSHhOTFU0TWpsWjJFN1FSQUhnZk1JUlR4dTlNS3R3d1hPeW95VjlyVmlHMFRHR0cxamtMM2hIOUV1YTBXdVY5bGpmVEkyYTgzSFNZQnU0OEw0dU9DbzZpNkRxR1lBN0pSZFQxVFlWTjhUVDJsJTJGRk52ZmIzOUpnU21pUEdMbUNCYVp1bFBSZ3dudkFtZnVjZlgxYVhWOHdmeGZzMGZYM0liVHNPZ2NucXNwTThiT2czQkxJSXJlWGRXeVF1WDJnYTByc0pLV2U3cWF0UjNUS01NZGJtbXNDcmtjVjlnJTJGJTJCZEMzd200ZXdjbTdlSnkwbjV4Wlg1UDZnZU1mc1RXeDQ2M2dZVTlBT2xSSjR4MjlGcWpKYkNhMGEzbDRVYUxRY0JYMWwzVGlwdUxlOGRsYTQxJTJGVHYzQzVoY3hycU9RUFRXaDNTY1l5QnFRS1l0cyUyQkxIbjkxU2R6NEVUVyUyQlIwVVRHTmlNNElHbnE3VEh5VlE4ZkxSVVNBUkhGYVpPRGFJQ1BSQkRrSUZHNEdmODJkRkRnOGl3MUVmSyUyQiUyQml5Vjl2VlNiQUdTcmZ3MHpIZXBIV1U3UnJ3Y2VuRUV3d3pCUlQlMkIxMUtKZkxCcXBVcHo5bFRlZ0ZIbDh5T21nS00lMkZ2QzBWaTZQR0VQVTZTbzJ1YkNFY1N4RXZ1R0Y3UjVlUVYzemx0bXNPRnR1TGFNUzI4U0dQS1JpejZTJTJGYk56SUhQN09kQllhc0U4OXFHRXVrQk5UYUElMkJlbHNVOWpXMWdaMDBYQ2pIYkN6azE4SG5MSWJyc1VsdGNBZExDR2xhd2FRZVI3ZnZmJTJGekcwcEFvYSUyRjdRbkpZVmxTWjM4VE9USkpOTmFIeW5MdTd3WjU4M0tNdFRkTkh1NXB4OXIlMkJVTGY0dXNseUh6bmFTRk5leDVReUJQSTJOenVuMjdxN0VmODlVeVQ5bHhlczM4dCUyQnZpMjMlMkZnSkkySXFhOXZlJTJGc1dWa0VaZ3FFQkJnNmljT0ZCUDk0V3hWTmp1UVpLdmZKNGxUNGl0QVhLU2Rnc3hMJTJGV0p3cno2RGxFM3BiNkoydXFlWld4RE12ODR3QXZHYTlrZXg2WXlaQk1DeEVnM1dvem1YJTJCejk0ZGJ3SmFyWkUycVpXSjhIdGdSMVl3elZBS1NKMkslMkJZT3FjTnNRN0lINmI0b2xiT1ZldDBOMFZLYUpDZ1pWa3FZbTEzNTB4RTU2ZXZlaFJueldWRU9zNFdtJTJCNFJucG1RMlM4Ulh0b3lGR2x5cDBQVkpyb3R2OFJmJTJCRVVTd0hCNDlaZmpHNm5mREslMkY0bVE4Z1paVkRGTXlnU0NjQ1JZODBGMXQ5M3hJUVp2d0lWenByRnBBa1RjaGhzRktSWE1lJTJCdjVSM3I2N1pXd3ZkaDhBU3hmM1lnUDVSbFQxYkR1UlBvdU1SMmtuRUoxN250YTN5UiUyRiUyRjFmNVdMNzZQSyUyRmtTJTJCZlR4UXA2bUp6NUNNWmg3bFdLaWZ3aExUTE0zb0MwQVpJM05UWWY2Z2dZaVkyMU5CSVRLJTJGSkg0NTI1ejFIVlRkR1AxVVhseGlJT01VdE52UVFFa3g4OFBYJTJCbVFKcmx4JTJGaHkxWWNtOTklMkZUdXFDcDNGZjdVZEU3cEVoN3FOSkp6ZU9SU2VmaUR5clZ2OUVBaXE3RFVTbGZBdFUwc3MlMkI0WHZYJTJGT1p4ZktiVlhRblVjMUwlMkJ1RkwyQ3Y4THg4SExZaGZuZ0t0TVlaMmppbFBRNlVidGxlRzRtWWp4aHlXaWRyeEx2cnM1JTJCYTYxSUt2ODVpJTJGYlBabWdEOGQ2TFFQZk1naWFqWUk3NFppbzd6T3VKOHhZWE9EV3dYT29vWFJ3Q3lNaCUyQm81Tk1JT3B1dmx5RlJRdE0lMkJzd3dNdk5IRzlPNlZWd25sdUNMaXA2eUpCSTVqZTIlMkJGUGZIMlc3cVcyR2c3ZnM4WjYxOU9LanJGenpaNEQ0OWhmaXVDRlpsZFdwZUx2WXMwWHBzTUNjOXglMkZuRyUyRjFidXYlMkI0OU9WZGNHWGY4cDZhaWVxYTQlMkZmaUklMkJnakRpN1NnZzVmb1JTSHJMRVRqb1MwcXJFanZrU2pPYzVtWnJnWnNVV1F1T29xcFIlMkZTOXBLcFZUMUJ1VmsxOTVGdnFSZzMwNkR4b0VjbmVXMjklMkZJTnBUSWEwdXJYWDNtT1ZTU0N2RVdXRlNqRSUyQjBheUZWcExFcXR4a2FQdkZuV1RzdjZCM1hrU3Z5N0ZHc0gxRndxblZQeGxLeVFTSndtSjB4VWdBZzFuSmhxckFFa3RpYVVFV2JzeWVTaGJUcFdBTm9pVkgzc0FHUElrc3hnNEMlMkJoV3hoUzVGRHJ2aWYwdnRzVk5ZOGtvQ0F5aFlKNVVWdmZLTmx1S3hVUVNKVzN1VFJUZ3NvSG96eFFYUzB5Mm0lMkJXRVVGd2txc25ObGxrNGZOdzIwZDc3a2ZyWXFaTHM3NzI0WXVoOUwybUh4amg4NWt1R2YxQkklMkJVdUpXcUdoOGlXaU93MlNWJTJGdjBsWDVsWGE0eDllbFBTYk9uSkZidXZ4VUxNcCUyQkZGJTJGd09JJTJGYWJ6bVA0U0JtNndMbjFJZFolMkJNeDhDN2ZXU0psNE1yRk44R2hXb2JlakpwNk5WYlFUb2Q4S295SGpKTDgxcVBrZ3AxRVhCTEhRUWxKQU5nMTA1aDJocFE2blJ3U0hRWFlZcyUyQm1kSEYycXEyU2xTYkh1ZEx4NnRXVmtuUVNFb2txS0M5VU8wZVVSQ29wWm5ZalVMMUJQbVRKSVl2bXg0d1doQnZkczZ5WW9oa3JGejdlNWhSeExjemN6bHdiUmVoJTJGT2s2MmtVUUUlMkJxcmI4T1ZHTFBVYWhNUzhWdnNmSDlsMVZFbDd2MmNjaiUyQlExc2M5RWZ0RnZobGlQWTVZeSUyRnMwajA0SGFITEwlMkJGN0NiRGxRbHozbjJWaWxkOERYVWNkYlZ3R2ZNZzNuY05HeGYlMkY1ekNSJTJCVXA1MTNpM2ozalJRYUM5Ykk4OSUyQkxZQ3F4SGxNRTF1RmNiNFd5VnBBRnhEaGhUM29qS2FGZWoyaHdYWVJlNWJQZmRZYjFCbVZDYW9XTXJ1bTF0JTJCNFJKODBkYjlSVlA4VmRzUkVodjUwRDFPQ3RjTXpENTElMkJaOUdSd1BIS0huVmFaaXdjMzRjWUlZREp0STlPRm55VWJlMTZTR21Sa3BJU1BwT3RpWHlKWFhNRFkxdGV0dmhURFdCdVh2Sm8wckMwOTh5cUVHMTZLMmJBNFdRbmx1c0JoT1JCNEtoUERiVHFjWUJkWSUyQkh6Qnpmc2VSQXJmMXc4UU1jUFJaZzAlMkJ2cjhUaDFvUmZZUXdPbjdQRXAxYnZGbDFVdTk3RnExb3I2Z1FSOUhmRTVFT2ZSQzNPNWZQM0NFVSUyRnpGRnlickJaRVVTQUlCc2FtWUFoZ1U4UENWdDZ1UDkwQUtVcEt3MHowRlRaJTJGOWQxeDlscE5ZV2U3UTl2dFptbiUyRmNwJTJCNXlPcmNlQW5ySFE3dGRqOHpnYSUyQjdaNiUyRlVmWHZWQTZ2WGx2aEdkRnZLcFpPd3FFZzZ4UHZrQ2l0bExkTjRHSVJmU2pOWEluJTJGaEY1ZmJTVTYyNTJvM05CcE1tMlIlMkJKRDJoMHI3cEU3b3VNOHVrdTRWN3ZvOTFTTG1kSnZIaDJ4cUJFJTJCQ1Y4aHh4SmU0MW1YJTJCRzglMkJYZWxJNTY0WDNSdm1KVHB1eU0lMkJ6c3JuSjRSRFpDeTdoUUhHdWt6NW5Famo0RTFESlhqN1MyTTglMkZRaUQ2V2xKaFo5T09WcFBiVXluc0glMkJNWlR3VCUyRmVNWHM4V0JFc0t2ZjFRJTJGdTVKTjFkVXZDbHVJaTJGclJBRHpzOWljdGtEYzNOVGRhSEJaeVp5aCUyRlBLTVIzcE40NHRKdnI2SHdQJTJCRUpmSkpBNlVVczJ4QzVoVnR5QVZBMXMlMkZVYjRqa2xMRVpiRUJ1SjR4OUhsMVIwbW5QZk5uTWxHclhjcEtqUXZwUFEyNWpGa3BTRklGa3ZkbGxSJTJGS0dhQXo5dFdRZkEybUo5WSUyQiUyRmhLWnI0QXZ0JTJCT2JqRVBQVUVzY2NWSmlDdXBvMGElMkJieXJvVlgzSE12Nmx2UUplRUYwdlhmJTJGYmJpSnYwNiUyRkg5YXJMNm9GTEhKb2RWeXA0UFRuOWs2Rk40cUxZMUZLTHQlMkJ1S255TW1YNTFZeEkwUUpyWDl3VkJwaERIdEJDRHdRdnBwbVVBNkhFRkFId01qbUhKclVZQVRkNzN1TUNkR25BJTJCS3g4WUUySUNuM0IxTFY1TmtDT1RGRzBzdVVQdG9GRkdsYnBIOGc1NjklMkZvVXJEUFpwa3RPa3Nnb0w3JTJGNHlYSTJ6dDhuanpnOEppdko5UUkzTk1RZlFLWjUlMkZBaGp4VXdpU2xXNlA1dVBoN1ZlblFGN1dBYXk4eno4aTVSM2pJVjBVbEo0U1F6STFRVUE1Sk84bnUlMkZCZFlhN2FBNzBTd1dzbkpYSEVwVjVwSWpiUWpkbm5RUDg4dERMQkZMRnliUWtoSFJCeDM5aUFZajNLaE5mZkRka3Z4RyUyRjFZZ0tlWFR5REx6UFklMkZFMHN6a1VUcGZyWjUwUG1Nc0ZsREJ6YUhhRUI1RGxUTjklMkJReHdlOTZaY1lWbzBRanY4aG5rQmJkJTJGMSUyRkJ1bEN5U216VTBrZm5mU2xrSno5N2ElMkJzTXpXSlhDRDl1TTVQV1pKMzZYQXBHZG0lMkZiVWxxcTFEbXNZaWxsNmJGRnNMVlpZek55Z1l4NGN4bUdxMXZhMjIlMkZBYSUyRlhaS0pTbVp5T1p5VzIlMkZROHFKTDl3ajh4MmswdEgwOVB5RnlOQlliJTJGSzNsUDRNZExsTVJQSW4lMkZDSCUyRnF2ZUV3M2s4dzNhdHZHOVZFdHZhNnZ5Q296RTVRcG5NbUZ4ZVdmTkdXJTJGMCUyRlBKZzY3MHg2dEdKM1JHNEhKZFRPRGpIODk0RUl5QUpJYUVQODY1UW10c3hESDR1UiUyRjRlZjRLQzB0ZHJ0JTJCc1RLVW1YUlVLRCUyQmVSbGJUZnB6TjJ2cmZVRENYNUZkZGFjMU5sV3U0QUpPa2J4ZlBGNFVPVWdEdmlJY3Z3MTM3WThROENsTVglMkJkOUp6JTJCcGdHb3l5S0NEJTJGZWV6THVKM3BOamx5UGh3b0s3JTJCdGxrbFFEUmkwUVdmUFpGJTJCJTJCZE5qT2lGbkJSN0NEMXNRTzhST3VITG9tNXpQOUhleVUlMkJsWWRDU2FlVU52S01PNVg4cGFocjk3VndWY0taRmxPSXBkdG9jcXBKMGlFRlpKZmVVWmg5MUx1bzdTendoYXNZYW1LdEtGJTJCQlh1JTJCbmsxUVNQRGRqWElWdmM3MDdaU1ZPSXlWdEZBaG9DWWhNb2UlMkJISm5yMWEzdUNEbEpTWGxKcGk1WiUyQmRRaXlaMmdPJTJGRWgzUHh3NUp1WFQzQiUyQldvaW5DckVta0FsNThiREwlMkZ2S2pObk80UXdocmtSYXA1eHIxOVRMSEFmTDdwd1NPcGFhVmI5d1glMkJKVGZ0YyUyRko4SzBMalFKbVFWSEZjbDhjeGd2bkc2UXFaaFJ1SlVHJTJCZVlneU9lQjM4S0NXNVlrazMlMkZSemptVE1FU0c2ZDFxViUyQjJjSVZJMGxySlBCcTdSNDZWZXh6JTJGazM0cnB3VUZjUEp2MHJqRVJaa2lGWkVBdDAxUTF4QiUyQnc2enFiRkRhNTAySDcwajNlOFN4VjRIRlFsRkh4ZWE3dzFPeUt5Z0ljYyUyQktQeHU5RWpocnVuVndBeHE5czVwSllnUVA4dzVPQWE3cCUyRjV0UUg4Y243c1RSNEhMekRlckppdTluUm9wakRzeDh3UlZPaGtFUGZacU14cTJFTWJSTGQwS2Z3emExcUxCcXhQSFMlMkI4TzJibTJlYTZXVHVYNFhhSyUyRjF4bURsRFF0TTAlMkZ4U05LRGFaVEtHZmFvOVV4U09YYzVncUNGN0tqMGJaYVpGbDJJSTdwJTJGTnNYSDRqOEs3NXlnR043JTJGMTNBJTJGblVkRTlUWHVTalRYMUIzdyUyRmdQOU02Nk1VQXVHJTJGWWdJY0tYN2FCV1l0NWsxT0NJTktMS3Z3blhWZHRqRHpSRzd4YTkycFVTYU1WMHlhVmhEWlBWOUZ6aWJZdnk3QVVzZktWalBCc0k4Qk0xJTJGJTJGM0o1WllSRjI0OTBxdXF1U3h0VDVCeXBUWEUlMkZ4MDIyQ054QmdVbWhpMmMlMkJRaU5jYkxsaUZET1JrTmIxUWxEenI2bldCN0JpNGcxSmVxMU5UQVAxRlBoWjdPRFZLWWVNVmxMd013YjZ6YXAlMkJFTlFKdWZscSUyRlByUXkzRzA4WEVwY0FGQkM2TXZZOHZlNTU1UXF1dkFxQXclMkJvb2lPWEdOVDVQREFzTGM3NDNXMUNIJTJGY3NlR2hjanhaYkxnU0hWS0d4QWxiRUp2d3lCVTlmNkdiNHdXNGE1VThDNnAzczh5SEFrJTJCd0ZjZm5FcXRkTWI0ZW9vMFllYSUyRnJMdUI3V1k2S1NmZU4xZWowU0hhT0swSUlPUHh4ZEF4ckVSJTJCZnNON0xLU2FDRUpXaXlCNThWSlF5d082ZmdsZHU5OXNrM1hMS09wc2hWT0lpUnVmT291c1dzNWhkcm1SN2YyR1dCc3NvbUo5bmdPT09uSHNGMUJtV3lqS0JDU2ZwRWZZMVY0JTJCWG1NaURzd21JU2ZiM2RPZEprQ1B3NUt5ZTFmTmdIJTJCQUVnTVdNRmtlWUZVZzVWOTVacUlGRVNPUU5uTGtKUWtXJTJGNGJqNFBHY0dheHRMJTJCMHBhaHRHN1ROVUdLRnd4cFl6Z2V1ZEU4RXJIcWFBWElvTVlSOWF4cjVCR2g0ZmhFSFJCZHB3czFFeDQlMkY2Nm82R1lVTlJBamI1clQlMkZHbHd6THF2T0F4emJoSXBCWGdzZ2NteFpITEVTejlwSGRnY0RRc3pMcWlveE5FN08lMkJsJTJCMDVPVmZhQndRekVvNzBNOXE5Q2QyYmllUDhvWEFXU1ZLNmxiejFhZm9rUklvdUQ4a3MzWnp0bG9yc01PZDV5NktsVGgyVTVIMUpub2FjYjh0R0x1eWxnekhGSE9BNndaajI5aFB2cmQlMkZXME44a2hjTmM2WDcwdVFPT0V0UTlwYUY3JTJGQlB0MzNFdXRyd21QZWZTYm96ZXNLamhoWWdRUHBzSTFjSjFFRVJBMWxMJTJCSGFNbTBpZEVYakZRT2t3eHk3NzJnTVdYdUh6NDBUNiUyQjFGemtocUU3cGFWTEllJTJCRE43cSUyQlVMb0NmUWZaYUgzRVhyRTBqTUZTMHdXV0ZPMkVGU0ZObFJuRkRodTl5VCUyQkl3azRVRmpFYTB4bGE2YVVST2tSdVc1VmJMbExaZWhFZW5nWmphMSUyQndxJTJCdWE2d2ExY2pwSzhDN1BPbVdxWlZGbGQ5dVdrWUhkR21wY1JRRUVNZm1UOSUyQmo0JTJCcW5mZDZCdVJvUkxIck05akxmQW1TSWlMZjZ3VVJBb3I5dmRYUThLYkhYaDN2V09HaSUyQjJyZmhYb3ByUTg5ejFJYWMzSWhRSVViQmNrJTJGaGZ4MU1xNkkxbXFNRVBwZnYya0hwMGhJZUpNdWdlVmZEOWMlMkZ2MldQeElzVG9OVDBLTEhZY0QlMkZHMHVIaWxOWkYzMXgzcHQzWE43MEp3QWYxcnZjSWRhN0pzQ1dCekVmaXliUVQlMkZHQWtiMFlQSm5pWEduek9XMG9kbDhENnBGbldaZk9kZTFrYVZubTN2bTNLUDRRQXdlVzM0anVTM09ibnhVQmFQRmcyQjc3VXNmbmpQZXhFTU0lMkJ4JTJCVDExM3BkZ3E5JTJGc2NYTnJHbmNhZm5UVGxiZEhnJTJCMnV3QVBBc0clMkJRQyUyRlBERWdLdGZEWGVVNGNteiUyRmhTU0hQcnJHZjJQYWJuaXo0QkVxOWVEeWtHJTJCaGp2UEk0RHNTdTUlMkZoVGl1NnZneHh6a05Ib3M4VDJZb0tXNzR2MXNhJTJGMThXNkltU1I3THVPRm9uVzl3TjZwUTdXcklWcmVRSHJ4ajVmaE9xdXIxNTAxdSUyRk9qR0Q2RFdrbjNGUDNTVG13R2FUM0p4RGJHV0RLWGZWVkozTFB4OVJTSTh2MmFDeTdJQlh1djU4c1d3TnlhcXUzdW1yZWpreEpBeXlvQWZtSHJpQkxWTiUyRkpOc2VESHk3QnYwVHVzR3BLN3clMkJBMWQ0UUFUWGVBMVhMUFZzdEdhYWVicms3OURUbExHY3JKVXdPT1l6YnFTNVlBaHAxbFZqNXU1V1l2VmlVOHY5NXZOMW9rZG1TWlVKZmZNWFhJQyUyQkNtYzdOJTJGUiUyRll5VkkwNjZ5U2t0VTIyaEJkRzdyY1hyeVJuUVFiUjBxeGZTdVhoTDV0Qkc2OXJqcEVxRld1cEt2enFnTjBZSW1XallrOUp1TFJNRVREcTE3UkxDenZ2RE5NZks1c3dhdGNlMkZOT1l2JTJCVGNUSUpDNFlkdGt1eU13aXRoT084ekw3ZDQ0QmJaS3p0TkNVdXVMeXhkR0V6V0Y4cjNrZGZtM3lCVm5mRnZxaCUyRjl4bHl0YjdGaGhaTEpXQiUyQlVYRmhaZTBTc09GM0psT01JRDI1cWQxcDZ5RWVoYnd3ayUyRmNoc2ZnUHVwUjJ5YjlMcTVQSEZEMUFGVzNSdWVRdTlIVVh1SVRvUDZSTW5YcnBrbFE5d3p5amJqYXp5RzhkNEp2QzBncmVLY2p6MGxFcXBMdFFVUCUyQmFjaFVGYVBpRmJmJTJCZ3k0Y1pFN3lTNWY0THNYbnBjTUNlV0hTMldidWRKcGolMkJvejRSYktJYkd2MVNjUWJpa0RSeHg4QzA2SlZuRHBJNDd6N0F5TjdwUHZCVkglMkJqdWpVYXYzRjVPa3I5dGttMDgwaEF4NCUyRmlFcHJFN2phWGxJenc2c1VVdjhrJTJGSUlNRjlwdjhwclBTeFBVVjZBUndVMHBHQVBBUlM4ejhNZXVtb1QycEREQTUxZlR5OFJKODZZQjJvJTJGRHUlMkZaTXFkUHAlMkZ1UzVrYzRkMkdmNkp6b1k3Q0hYWU12eFdlVVBwYnJoRWdwMFIlMkJhY2xXNWolMkI0djBnNXpMS2xUbmEyMHElMkJlZHRDMUYzNGxTcEVnOHdUMUpxMSUyQllmaXRXQWlPek56SDVoZnM4a1ZYdnFHZmt4b0g5Y2QwaG5EJTJGVHZwJTJGM1NDQVB3UyUyQjBlcUczc3RvUXJFSTVXbSUyQmFTbTYxdmVrOW9LWTNMOTBObk9pOGhsJTJCMGRUZlFjbDFmTUZua0htJTJGRnh6RGgyNmVMbXoxdDhSSng0Y3hUOTY1Y2RjeCUyRmZvJTJCbm5ybDJ5ZEdwS2YzbnYlMkY4c1ZhQTB1SUNkZ0NGenZ6cmRkMW45MG5UYkRBcERpJTJGNDZKNEh6MCUyRjR5ejZSYnBBQkglMkJnSEhuMDIlMkJ4elo2dHphZ0hMMERZQlllS3JUZDlOWUg3ZHhvQXFVY09QJTJGVnNYcDhEYmJ5UE5SM3olMkZBcSUyRlNqR2FsYjlrTnBkOTlOZ21oYmZvQzNiMzNab1BtOGxyYnlpd3FSMEw4RVRYdHBSSUREU29qemFYJTJGJTJGUHNPSVRkMnZtSTJ2aWdOcUpLVWpsTFpiMWVNaUxueUlvJTJGcEpKN1NqT1pnaFZZJTJGall6bkJQZjM2S1JTM0toSDVvSGRINXJqenRGY1ljZ1NkdFFVc0hlVHBocUhyNUZDYW9taVh6a2RGSEVtNExrSnMxUEgwWGEydGxIc3Q2ZVFvbFRORlVGMWZmd25OdmM1RWVocWQ3RnZSeDJpTyUyQlpmYmh2djJnaldaSDRzdlFjZHpORVdmN0lHY2hobVJ1bHNCMUpzR1hHYWRtQXc1TG5kenNBMFpCZUxXNGRNQTRxZnFuenFFOUs0eVJqMkV1YkZGZXpWMkxKRDBFJTJGU050cHhXY3V6dkRsQjc5blhZd0NOa01zWnAlMkY1YlBSU3BWTHIxamNZVU1QT2RpZmZYSDcxUkF4TjNDS24xUkZsJTJCWkZ5cnJBRkslMkJYJTJGTzNlZjc5Ylp0N292NEJldSUyRjU2NzhNdnRLN1klMkZESVI1TVh6RWJMT0lyeXo0UTFjeXN5RTRTTk9rcFhHS2w1TjZXSUxJanJ0MDh0TWglMkZkMXNRbXV1NHpTbTBic2FqWEVUb3l6R0JaZ1RWZHdvT0Q1Y3JLbVRJeHFOTCUyQk1hYTVoZjFETkE3OEJySEhJTk1KN1NKQnhIRXNyUTRZbGRnRmljbDNueDdtTUIzd1hsZGpFM0tkV2hjS29ZSzZXNVRtWFlralRtYnQlMkZIRERPTDlTQldpWm1EWGpBVEVHdnF6c08xQ3JORFc0SDBndktGZ0Z4VlBTMlJqenF0TkpCWDZwNmVCN2lreXJXdU9uZk0lMkJUUDZaaEZxTE5SQjFkbTFON2IlMkJHckolMkJRWGFUWlBLaTRCbnVaNWRDTFB6bXZJcFdVbFY1c1d0dWVPWFRYbyUyQkpsYWNpWmhjTVVHJTJGSFZGQ3p1TENycTA2N1VVcXQ0dzNHMWolMkZBanZDeGhyeFNqazglMkZVZm9EcEpaWnlRJTJGajZtNTV2Rno4b252NmMxcXd2eDVSJTJCSTBaZWVIQSUyQmIxcGJ5c2ZPTlJQVjhicCUyRjhoUGp6OHZDRklabUkyJTJGUTJOWEtlUElsRXQlMkJRMTRsajBSQmZXNGJSN2dzdUsxeFI0N09va2w2cnVQaVl6JTJGM1MlMkZ6OG9MN1poeUlJYWZDdVV4MEZ3UHpzNGYlMkI1MVFrbjNObUVMOEJVdkY3Z0pnVERwZnJ3VXNZNGJiblBxWHdjU0R4ODN5YUdoMlRYSmlIUVBZdndOWm5WUkEyTzdWR3F6cVZrSzUlMkJIYlRocnhXJTJGUDYxVUxoNUZ0Q096YjdRNXByYTFQUThMRHppRTlLblh0ZTZtQllmSG5wbG93SyUyQnViUTloMFRpUHlEWXJLaTBicGp1WHg2d2NOTXplNkZXVFhMUVVoTjVGYUM4QUt0b1lDdm1jN3NxUVd2UWNCUnBtQ05hNmpLZ2hzbTk5dHNoWU1qV2I5VlJNVWdWWXp6T1hRbWdxb0w2QkszdUV4TUR2aFV4eDJxMU1uYWM1b3E0MDM5Wk5qdk9xVTFsYVVuNCUyQlZ4R1loZlZPWFZHT2lwUzFMS2VtZjRxYWxBUUNqeDEwOEF3WVZ3SmxSTyUyRlVwUEZmb2t2NVZ4RVcySVg3UVFvZk9rbWYxWVowM3FEZlJrUFNVdE44OUJsNVE3YjhOcU5kWGNWaElpM3NnTkxIRzBHUGR4WU1ncHdWaldSbHB2ZFBETDBxNDJTdEFuTGw5R250UG9YY3BlNER3WHRJS09NZ1hoemxxMWJsbExKTjFHazU3WXpIVWc1SGJHWTgxUVY2SkdiSEpoQlFDRmNpUUNMNDNXMWY4SzU4SWxLN291akgyd0xsODc0Sm45aEtZazROR0dmWiUyRnZZc1lnJTJCT2JOQk02U1BKb3FiOW1UaFBhT1JJUERNQnVCWGZ2QWpucHFaa1B5WWEwMUMxcjNRJTJCbCUyRmJaT05oNzY4alZpQVh4bHd6NHlOVmhIc0lTZ29rZm9hZEQ0d2NXJTJCSkxINVJvYXhCbENWMXNwRldjd0olMkJIQSUyRklJcHdhY3FLZElUQTZCeGVnRXVhSG8lMkZ0R1dXVEI3Y3N4SnE4ekFTS1JqdGMxQlo2bk83Z2wyM1hrZXJ2eDRVcU83OXlPREdxUkNNQWd0NjY4cnZKU2w2d2I1NHklMkJqd0VFaUU3RyUyRndJYjQ5R24xa1Z5SHFlR2RFNlkxeXo1eUolMkY1NTJCbmliS0wlMkJ4SmY2JTJCQTY2WmJ6SDFzMTJoT1B0ajVaSVljSG82TElnT2xQc0VjU3VLWCUyRk0lMkJadlphZlhQd0M0ZSUyQmZ5djc5WFhZNjdSZjVZNyUyRmtlTDd6dHJlUWxxOE9UM3NWNzREdTNCUzZmZzNIaG1uRyUyRmtuOHlDWUhBVWZINDdrV0JPcHc2SlA5Rm5iV3U3T0lDb3FpTFUxUXdKUWY4enRsQ29YZXoxRkJwa3ZpYSUyQjNhd2dDaTZPOWhPR1BxOU5aVTAlMkZaYWVvQUw2N09MSnRLTWlrUmN6SHdLN1l0bW9GZHlwYTdBd1BvUkR3emx2NzBCSVAlMkJMM08wcFlTZzFJcTlBb1R3MTZGaTA2YjF0clVwelpWUEZSeGVpaU1MRUhEMVklMkJmSUpMQ3JDRTklMkZFZHdoQkFHclBRZkpSenFYOVNpV1BCaDQweVBsTlJlYjlseTBsVDFNSzklMkZ5TmtJNDNCenVpeXd3aCUyQmllWFp2RTY3MVNiU2M5QnpTbWluMnFEZVFvN2Q0ZXFvMkJSb3RPOCUyRjZVMTgwRnVqZUlMekclMkZ2dnJOJTJGa0lQM0JRZGJSTjhjNXElMkJkc1lsdSUyQldvZzd4ejlyOWRZa0VhbSUyRlJYeHBmSmt0Y3JaclRwbjFHeDh0a00yY0xjTElxbWNBRSUyRmdhTnBhZ3dlSjFtM3BlaDAlMkJ2MnNLTjB5VXZ4WmNXdlpTQUNtOE9nSkd0RjE4YVp6UG50VjF4RmQyUCUyRmhjMk92YXR0SjdObXpJRldhMVVES1ZEdWtCRnowN25IUlc3TlpNZGFNdUdTeTJ6b2E2cER4dlZGV0xLZ1doOSUyQnRrdiUyRjVhWTdKV2F4WmRxWjJIMUxFMGhTbUd1QiUyRmtiQ21sbiUyQkNpcUFiUGIyYjdFVkozT1pqUEl1cEhkaU9IUVZBeFpYMk40dGZDZTVLRDY3R3BhSnUxYlhBZkFnS24wMExuSHhaMzRIcEVKazJIWmlOUnpNOCUyQlM5ZmxKQ2h6MFlBalJ2RUVuZ2ZEenhoaUVBNWZqQWtoQ084QyUyQnQ5MnQlMkJvTTBTNmpMdlhJSmVMbm45Vm9RM1BwSXBsNloyUlMlMkZNMjBYZk5OanR0b1Vmbms3SFglMkZrdHZLemJBcVElMkZuVVVpWjR4dlVTUDcwMFhscEVDazZMQm9VV3ljciUyQlRvbjA0YWJ0aXN0SFB0VVNjJTJCOSUyQmY2WUhWWmFoNlFEVGF1d3U0bnBhV2dCVmxVYnZtNnhXTGJ1biUyQlFwaFVpY0FaVGVLb2V5dzRYJTJCN0pGdGU1QSUyRmclMkZHRzhFJTJCQTdPVFBjem5ycElGNnVYMGpsWFBaZk4xJTJCcTY0SUlCUXRXekJBSGRsSk8wV0d2c0x6TmpZQk5RcUlQN0dicm1FMUwzTXJHektUQTdtMGdOVW5kemUxSTJZem5SMmh5M0dWREhnZUZseiUyRnFWQ0s3d3A1UVlGc2kyYzBXQ0J6MnhxSGdxYzlvTHNUTHZaU1Y3VFhLdHRTR2JzWlUzUEQxUmhjV3NvUSUyRlA2RkJmWjBYYjdUSnNkYW42YyUyRnZYT3pUOTZmJTJGQlRBSCUyRkVEdlBYSzUwVyUyRkFMSndwWlVmbFlRMDJLJTJGdHR0ZURzdXVGVjc4OUhWUXVjWXZuNmVJbW9iMUMlMkZyV2RIJTJCM0t5c1NaS1ZRelJGUzZZNGFPcG5WOVBLWmV4MWFvU3poOGFpWXNuZGREWTRaJTJCcE9XVlliOUlyRUpySnVtRGpVR0haTk0wRnBDcHdkJTJCSmFGeG1ya01zMGZacUlHMSUyQmJiQ1N4bnFqRWJLQU5ocnZtRyUyQnglMkJDSlY0YnUlMkIxeFl3dlREUDQ2YXIlMkZQcndua2VyJTJCYUhDUVh1MnNNckFrdmJjaUlWUjduRG44ZlhEZmkyVmJXcTNmWVd6bTQlMkJpRGx5T204S0pMUnF0clJCS2o3am1kdVFLWUo5RVU1YXY3MGlhSzlKc2NkR3poMVNBZEhNQ3F3UTZhOGFtcElpdjVrY21wdyUyRnNtdnNSbnVyOHNSWTJPa01RN05wT0ZiSXI5MHZITnBlNU9SZXZ0RDR5NzlLdFpXUmxMVVJwVm5BUWtKRzZPcFRKT09HakYyWDc5U1J3WEVDZlFKOTVDJTJGRlc5Z1FDM2lFUTk0cSUyQnYxaEZNZkglMkJlV25WRlgxUHlqMzk5OXJKZCUyRkZEeDNGc2hVWHVJS0FDQzI0TkNWTE1XeiUyRkxpSGdFc3IlMkZEOHIybDFpTWZiRXQ5JTJGdVUlMkYxNkFnNHdHOEpMNzM5OFk4bjhJQlByJTJGZnZCJTJGRDU4TmFDcjc3ek1nN1AlMkJRMkwlMkZMZGRGVTlYOGZoUDczVHNuNjclMkIlMkZxJTJGMzBZU0VmOTl4WGVYJTJGcUxMYnJ1ZjclMkZSMyUyQjhJMU9UJTJGdmZkJTJGTjNBazNWNzh1JTJGVHZ3cnJkM1g4WDFqcVp3SzlObjFUdnZ3eTQlMkZ5WkxPaTFKaTg0YTEyWnJ4dUY5UEIyM2JlemZKM1RnQVNiSmZ0VXk3a1BPdnBPMiUyRkwwVnlGMTdmJTJGNiUyRjk2QzdwZ0t2M2NicHZacXNVNUdCdXl1YnEzaSUyRklmUDNrZlQlMkZYb1glMkI5OHI3ZTU1c3lmJTJCZzlMOCUyRkFZVjdKNDl0QXNaMFRrZ1ZxeEhNb2VINk5lJTJCRDZmUjVNS1U1Uzh2dnZ5ekVRZnI5JTJGcEp5ZjIyZEhJd3U0RHpCenZ5S2cxMEZNaFVtRklqbENKUHdmVm5TR0h4OWxoRyUyQmlWRHdJJTJCJTJGd2NyT3pYZE9zWDBiWmJOclpqSEc0ZmIxeXRTOSUyQnJoSlBpJTJCY2pNc2NwcHJLZXA1WXhXa2hKU244bEpEQ3FqTWlQSmxGRFcwamI4WlRBSyUyQjRqbk9ZQSUyRmxIb1E2RHpaenZRWjRHZno0V0VjUVNid3pQQnhBYnlaWmtlaCUyRiUyRjZWYzM2Q1BKb3B3ZXBySUJUcFpaUmtmRGxKWDhIallQUzBpZWtvQ0JZeFp0N1REWXFSNmREZ3dBeGlPa203Q3FoMXlLZGZjZGNWeUViYlJYZG1LSFdpbW5idXl2ZE9kV1BIVXdpakl6VWo0NDRwTktUODVZSU4wJTJGZU1YNFpqYmpxd1UwZWdaRUthd1QlMkJTazVFSXBvNG1rS3dmJTJGUnZkVW1BUUlrdnJjaVhiOSUyRmxFJTJGMVYwZE44aE5Yd29LQk9Qc2lxVCUyQmpYRWY0S0huRFVWWWdHN3FQaDFDYk12WTB0cndkSTAlMkJRY2ZJcUloT0VIeTBmQmhHb0FwUEh3SmtkMXMwSiUyQk9oYTdESjBvOXp2ajBVQWlwMHRjUkklMkJXamVHSmNLcmdYc2JHOVpiTU14YU5LTlkySXAlMkJCdEE2UjRBNk9HOFVwR3pXMEU3SmlPYTBwaUlRJTJGSHh3N3NucHhBQk9YaVdWJTJCdVVLMmsyUHE0WDA0Vk5sNVNwSU1USHFXMWpleG1ZWVlESkRsZ0clMkYxRmFMajliSG9KMCUyRnNTeURrbHhsdzFRQmJXcyUyQjVYVG1jQ0VvOXd4MmwzOSUyQjhLREElMkJMJTJGaTMlMkJHejRLRmZub1NYem5zNVBlTFlPd3IyRzVNTXZCQjVWc1lXUGFDZ29KNkduM3ElMkJwT2dmcmloSVpyWUIycGJXaTZpT0Q4djRyY1lYTmp1RnkxaGRaSUxDWHo0ZU0xeDg1blZyJTJGdFBaUVlYRDlCOGUlMkZoMDA5cUsxdkhLN2cydnRXbHFCbVVnWmloa0NKb09DWVB6bVdac2xLT09zQUZJS2N3dUtYSTQ3TDh2eE5XZmt2QVlWRiUyRklxbEx1azRNSVZyTXBadHRBYnBDWk9EVHFQMk4yODE3blFMMXpPbGpUM09VYWMzcnNqd3p5VXN6REFLdFJQb0JwUGs4cjJwY0RXakJuYzd4OE5WNmYlMkJRVWI1UDV2Y0xRJTJGZXdmUG5kaW9KUldyWVhlMCUyQktlOGZiNTQ5c29BbTh0Qm1BUm1QSFVCalJZMzg5T1FLd2pBVFloJTJCd2FPT3llajdBNHNTbngzTngzSHlHUkNBY1pVdEJIViUyQlVWWmVHOHNTVkY3MyUyQkgzODBmWmdxOFF4czBuNFZoMGI4RDFyVFQ4ODglMkZEaUJxT0JwbndiR1lNNEFIZyUyQnhRNVZqSTlmeXBDVmUlMkZxZ094ZzRGYzFHdlVaSXlxSnclMkI2UVY2Q3VROWo1a1lQQngyMTNqZjhKTGh1UjlZeXBhUkdaUnlGam1ybG5GbFVkSTkzM3RVYUVpN1k1b0Rkb3lTRjZSQzB1YWdrQlNQeW52ak5rYllFMmNtSTI3VUtZVTNSVVZNSDdqelhLT2lHVWUzYSUyRktCb0RSc2s5QVJGVGhBSDZqM1ZOdEt2b2YlMkZWcWxkYkVucW1BWEJ6dEMxZVJOR1BSM3lRZVBiOXZNOUglMkJianBMRDc5WERmUHJDYUV0dkFPVnJMek5UUFd2MW5WQ1hraFZ2VUo0MVRMZVB3MHJyOCUyQmdIVm4zempYc09IRTFiSWI5ODBUMDZLR0FUOFJUenlZeWs3TjR3VWMwJTJGU3BZYmM0ZnNrUmJBU2k5eU9YZFU3SyUyRmI3dCUyQnZTZ25nb1dCQ1R0ZWphUERMJTJGOVNab05iandiR0k5ZGxLN3o4YzVPUG92U3ZQV2Zjazg1bFlkMkQlMkJ3OFE3UHFnSFFneU5TR0lYNjJVZmtBdWF5ZVNzNVk3d1YlMkZOenp5QWFSN0VoMGU4JTJCcWlQR2JUQTNtJTJCb2hYekNwamw0eWxFWll4Nm1UZTFTZk1Tc2JXJTJGbjRJZFklMkZPclI5YlFkc0tBNnRnVCUyQmJVU0olMkZXRjEwMXB5MlhGT3E4Vnl5c1NFM3Y2bGlpTWRqTmNoWWlmNjBVQmVGdXVSR2QzJTJGUGFueCUyQklmbUFxbE1Pa0dwJTJGU2JPVTVwQjN6am1BcnFlJTJGamVjSUNIb0RyWE9qekxFWHZNRDFXb0ZFSzZUZzR4QWlodFV5TGVmMGhJZHVYelRCdHk3dDVkUkpSMWVtdUszSlVqWlpsWkMyWkRmZERQM1I0RzJLTm4lMkJFeWpqUTBjdDM2WTV6S1lxd2lVd285blB4Uk1tOWUlMkZlYTJLWEc2OG5QN255ZHFYMUFMR1BxTU9iVHNJOXRpRHBCUmQ1OGF3VHY4U3NEUFZ6ejBXbzRvOSUyQkNVRVZkWUZwVVZNdTJGdUJwaDBXNVllbUs4YW5ZQVlkRms3VXlPUG1lQ0ZGTnlOS1picEp5N3JnOThGJTJCUkNLdElvNEZTVEY2WGFWZUN2ajE3RjhwMzZ6Szdkak1KSHZQJTJGTWFIVHF2ZEVvakRCbDdBbmlzZnhVc2RzcmNwamdGdEVENXE3VSUyQjVNY24wcWN6Vmw0cjlISHVtQ0p6UTRsVyUyRmxDanFmM1N4T1pkdXlEQnklMkJZRTglMkZTanhRSW5FVmdSSElhOVRFdFpZTFlZZGhXM1puaWljTmQ1N1hRTllqMU1IUVUlMkZPVXZtclVPTzFqQktVTXNnNEZnbVZEUWZGVElja3prdXFBVGNlakpvdEZvZk41MU9NZDFKbnJUNjExdFJkQ29Od244TFpoUFRGTzh3dWREWmRTeUFWclpSM1klMkJxR2F5aUs0cWJaUHNCQTFHNVl2UVJNTENuaE5QS1ZrdDJmMjEwSUxIUUJRWDd3bTdQTzdDaGpwck9yaTNSNDFxeWJIc1NRWkhQSmFiV3lINERTNUFTaFJPeFNLbEVPbjVtVkRGRklvWSUyRjBJSSUyQkVlRm5XRUU3U3hkJTJGR2I4NDd6ejVkYThkR0NzNHlMTWZnbkFpeHpBejZ1NUhudWVaZEZhN09heTY5bHpRSGVqd2g2YnJ4b2ZGNkNnMkFFTzZBNTFZVHRKNU9yNyUyQjhiZEpyYlklMkJJN3Z4ekNCZmxuTVpXV0gzRzlESGxBVWxsN3ZZVVROTjBURGNnSVdoVk1RSzMwSllmMkw5YnpKM0xvTGxhUSUyQlJIN2JVWnBjWkhoanNHck9zRDRoSjFQclNqaSUyRjVWNU92M0gwenFlV05NY3NsR0FnZm8waTNIOVFOTmd3bUJzTkZQUlZreCUyRmo4MUI5SUhkc0VmZzUlMkZkTTcybEx0endEcW14MHV0ZGVteXNPcWNPMTdpN1l2TXFCZVplaHptQ0ZESVdrejdYRmpQdUpTeTlnWlkwZ3RSbnoySXFNR0ZZWHh4ejJwWVZzTDBxVTVBZURjcGVMJTJCdkZjbDRDaGtzYVNhNnd2eTN1d1pvMmNWUUMlMkY5UjVuM2k1d0pPU1h0ZHJ0Y2pjcUlGcTFCb0x5anRZb2xRRWk2N2E0ODZ1dFNURkpiWW83Tm9sY2FOSjB6Z0ZvcGNZdnolMkJqSVVieVVxN0clMkZKQ01GQTByWlFXUmthN2NVS3h1UWcyR25jJTJGem9uTTI5RXdxb3d0Q2hIWG40b3ZZeiUyRnZaVlA3R0VyY1p0WHIxNjZjZWJZR2hnUm43bEEwS29UZnNwdzBoUDZRemZ5WmdqbXhPQnQlMkJ5T3FrZiUyRldsbENXcXIzaFNEQ3FRRUJNMExnQjYxQjV2TUpSUUlDQXNteE1hanpQOEE1a3l4VTlnJTJCWVk3VnBEcm84VSUyQnUlMkZ4V0YlMkJRcUhaOVFJYklKZ0thY2N6Mk8wWXNUVno4OEoxM1h3V1Bwa1RHakJ1RmRoWmNDVnZKNCUyRkNtSEpXOXJkUUxLR3ppWUNXVXZydk42M0FzcHVDaUJpQXM4UHFGV2w2blNEOGQlMkJUbjJjZWJsbUp3NGM1eFBxVHdtN3Y1cHJ5QWdPJTJCTUhvcGRHMzh4djdWSnEydSUyQmhKanZCTUQ1QktydFBtd2ppOE14dEppYVN0RlpTOXlVciUyRm5jV0dtTEtGbmluOVYwOSUyRkJnajlEajZtaUdnaHUxUXhwbmtBUklZcGxVOEI1MEFlTk1OSFRsdlprMDJZSGdwJTJGSVVqa3pDUVNBJTJGWmpMVjJhdlNENzAxMVFKY1lmTlNNNlNHSG1UeUVaZHQ5VThUU0hlNG0yNkc1QndmV3cyMDVobjVoMVdCcVQ2bTdESERWeXpRNEtUY05NTjB3JTJGVDRNRzRpUlFaWGFwT3N3RSUyQm1WOXBqcUJMZUFaMTlLa0VNRTFJV2VoejBXT1B6ZHR4eTFQQyUyQjFtYUViUk5aTCUyQm5xWFdzRFAlMkJzQjdJaWNob29uZ1AySFVOSkdyJTJGVWphRWRWdDY5TWc0WnllRjNISlZwbkElMkY4NiUyRjNXckpYNm45ZG1wbGY3c2slMkZqbnY5QUJQa0tzdGJzR2hRWjFhYXNXZWU2WGNJSWF3T0VsanhLT3RzYjBYV2RReDZkRjkzQ0N1QVA4dTBpdzYydTc4c3JBSGswejVuV3piWUpDc3FSazVsdHF0amNkaTglMkJ4S3MlMkZJWEFMMmpWMUZ4RmRSMkdMNVElMkI4SktLc1BUeU1hWm5YV3JLWkwyeEd5MnlSNkpHSTl1dW1nU2wwandUU3Vzbk9BbGpGWCUyRmF2NzhkcnE1eE1IcTdPTlNqbHdXT3puOE5iODNjVG9kSVFzTEp5eHhFamthYXFpSHZCNFF5ZTFKNjZWcTF3bmEwU05qaHJrdTVlbDkwYWRrU05Sb3FjVXZUSldocnBKdWZOS1BrRXdJc0hWaDdmNjVHMm5nSWxZU2ZLUmJpM09VbXJId2tBNDRaTTJvTm5GcDE2TSUyQlFJUWRFWjRuYTRjQ0xLNjJYa1JxeDJGQ3pIdm5FUkM4WU1SS3F2MmFLSW9IVDR5empzeDhVTDhzZnM3andRTCUyRiUyQk9pVkpVQ0NmaSUyRlVSTVNONmZXSHJRazVrMHh4Zk5QN2NpM01pUE5mdEN6TDFDUiUyRlNGOWJYNTNXUCUyQjZYQ3Izc2FDWjZHJTJCeSUyRnJaRTBLbHdHWnZsQ29WVjd5cTFua2N6WXZIbzRXSGp0YWdJVSUyRkJVckY1dlJUdEhSRkJIOUZCJTJCMWpscERQaFA2N1lLSkJ4Z2Fiekd5UlgzWjJJUmN0NjA1T2w2OHJGYzg1MlFMNEhHOGNTSFlBMnJ6RGlNT1JOMnc1NVElMkJoaGQ4UjhxdlMzYSUyQjVYb0p3WWdJWXolMkZEeUVZQzZFc1QlMkJJWUlQT2pHSjRJbTR2c1hhczE2bnlpeXZuVTNJU1hma0hqOTI1aEdKN2ZHY0NZcFVmZVowaHJlTW9yUkVDNUFBakY0JTJCeEZMcGx0dTM2OTVsOUpvbGkzcXhnemdSeW9CWTdjTzYwRzZGUXplbEg0aGcxVFdpTmxDNlg3aFN6a01nWEglMkI0JTJGVlBOZkJ3JTJGc2xZbFhISlg2eCUyRk5sY0pud3hJJTJGcDZiUDJJUkJ5OEhHWHl3SEpDek5HNENjcjJGbURWTkNXa0JPdmxOOFZUa1pqSU4lMkZmZWVCcGl6amE5MGNmWmVmb21tcnBvMkFtbUhoUmlQdzVHRXc0MSUyRlpxZUh5dyUyQkRsaWpQZHFFSlJDcERuV3RsVEcxZkltMUJRWkhGSmY5RHRXYkMyYzR5JTJGUXlWSDV4U2xLdjRZTzB1TUNWJTJCbzBOS2diNFNhZFUwMEt0ZDk1VjY1WGxyaE1TODMyMGpsQlFuJTJGOXJaclVGOFJQYVFQRW4wNWxEckxUZHBwemlHN0lvWE5tc2hHak1KbDdsYTB0bUNvbkJCMFhUQTQ2SkZtRnRSajJvZlNxJTJGdWVLdkxjJTJCRmFZT2lFU3c3OXVvNDJIMSUyQlpMYWh4MmN3ZjdUMU1HcXJxVHE3MHRMbE9sYWd4U0hvNlhybmZWc1EwJTJGM1o0Q0ZkSWZseHNOOGlpb0lxazZJbnloS1lMZW5uZHVDVUVydjlkQXB5cFUxbk40aE4xeldHbU5YMDRUVVB3WHRWaUJOS1JsQ0ZEenl2dzdYS2hEVFVFN0g4Wm54TDIxdGNYeEslMkI5WEdOUCUyRkplODdsaVZIZGlYJTJGYUl4YUxNbWsxbHJ0cUdVeXFkWFhQOGFwZTNlekdYdkxNZXR1NjJOMWlwbU1BT0R1QUFMeEJXNW9KNG5keU5ieTdPYnNYb01iRDA0VTFDV2xFMjQ5SmxpeTQ4aWQlMkZtZDl1TTVvemtHeFptOVk0VkMzcnhMU3NHU2NyJTJCSnJaMUNKUnA4UEFLJTJGQU9kbG5IbnhkdCUyQjljbG0xd3d6emJ6QWY1aWd2MmROMUxPNHZrT1hJQ3pKcHlpZHdlS2FxRURJWmV2SHVsaUROa1FnaiUyRmNmcldDR2tVZzJEVjFGc2w1QVJPaWt0SDclMkJEc0ZGdFdIRWZQOWQlMkY1JTJGVDElMkZ5aFNvN3Q4TFZPSEJJazBjNGhFbE5sUmRUR3VmbCUyQjJIWHRNVEJkZWdDY1loJTJCSGNRZWhjR01vUXNuNk1OZ3lLT2trS05CWHFmU0hNVGxaZFdVQ1VqWWpqMm9nclkyMCUyQm9QRHNFYnZraVkybTE5RSUyRnpvWCUyRlppZE0lMkJBdmlnUWYlMkZOV1dUS1Z6WWNNcFBycTVLZDEyRVpSUnFveVJLVHRyUXh1WjFRTWZ1WEdoZiUyQnBwcEFwVXB5YUJFMlVwclRQdzYyTFpZOEhYVkZtN1kzcDdIUllUejVjVWh0SlZ4c3hxQ1Q1QyUyRnRsVEJVVk5jMUtzUXgyYng4RXM1JTJCSGNSd2lNdGdWY1lYeFNFdnpQajNYZXB4clpuempCQThvJTJCdGozMjhHUFZFSlRuRXVXU2hTTzIwbWp2RkZJTVBFN1VHdDFNc1lKSXo2dFFEWmthOUVsdW1YNWdZQ1Q5eW4lMkZEdDBBRFp5JTJGMmF5ZzFZUlNrd3RxRlpYOUlmNkczcVZoODBQJTJCNWJXSVElMkZ4dmZiekhQb0ZtU2pFZVZuUzNnWkhrNEJYMTBGek8lMkJwa3RGQ1ZqOGglMkJQYjloJTJGMXYyVXNwcSUyRno3alFEY1IlMkJ3MFgwJTJCRDNMeldOcVE4djl5RUo3QnVHTTJwMDh5QXA3eVg3UW45TkY5bnlpV3pvTjlnb2hPU1RUS20lMkZiMCUyQkxsOURLOXFCODNMN3FJQTlWR0N5akRPNEp0N0hTY0FrVVBOdlAxYzRINGdXd08lMkZ4QTM0djVtY0Q3cVBxYzRWM0k0NXVKJTJCYWV1RFRkaEdBaWkwT01SelklMkJFamdLR1lScHlvZHhzaFVZWSUyQlVYS21mUzh5NWhYMzg3aTdwMURrUzVVYjZEJTJCQU1NSW1KY3M3Q1VpT3NlcjFBQzU2NTcwbGtudlJFV2ZWWmFRUEZCVjlqYWI3eSUyRiUyQkw2eW1TODBUb25KcCUyQlI3d0ZPeTJqZk9CRkZKcThKWXQ4R01KTHN1bngySDglMkZWNFN3cEh1NUVZT0p0N3FaM2lHcUV1SDlnSzFBeHh0RnJUZnVGVGJTOHd5TXJYNWp2QmtZQmMlMkJEdmEyb1dvNnIzYUMydmRmaVQ2MHBFdjZ5TUx6TjZnMiUyRiUyQkdraTMyS1pKUHVaV2t0Q2clMkJ2S2x4T0ZEMkZ6VlI0djBTRmElMkJINHdyRnIlMkZYUHpCTnNuMTkzYzZSRFFDayUyQlQyeTNpS0I4bGhMVG4zbEpPZnBLeGhaJTJCakxzenAlMkJjcGJXRHVhM2dyJTJGJTJCTWdMVEF2ZSUyRjhPJTJGUVBsbEVvaUFLblZxSSUyRmJ2UE1Pb05heXJxQnozUzdiTmp3RWFUNDB4ZUVwVmhRRVhYQm1ydUNBdGRuNGs4TU1pYndXY2pxa3JlSERCa2FNZjBKVk9xbDdoQiUyQlFUU0tNTGVnS2lCNFRBWHh0OFZpSWc1aGpNM0JmS2J5UnJRMWFPamxaJTJCUjZsTnNwNzNQWW9maTVldGpiYUtZV2hWbWtWTms0Rm53YkFqQmVaR0h5Tm9YJTJCWXZISE1nMnBVbUJ6MGN3VXNnMWJsanRlVUwwbkxKclBBUlphQWZqcXpiTWFDZGFFUndxVnVnNXElMkZiNnhHc0lEd00lMkZ6Y1dWU0ZVTmFFSW93JTJCYUglMkJiSUF6WUpoMTl6SzUwVmVVcGFnd0RsNlM0a2Q4MGFISTZWSFd4OUpYJTJCQVpoZHlOTmxzUEElMkZjMDRSZldDaElxSlglMkJpbFQxc1M3WlM1ZnZtN2JWQVNiWk9yNVM2cG5TMmVrQnUxaXRXNEsxTCUyQlpPS0NyRjZWQVR5YXNKS0tBZEtXcXZSa2hNWCUyRjN5cnBQWlNIb3o3UnlOQUhHeXl1TnF1bTZKanZMM2NnRzYlMkY3aEFUJTJCJTJCU0RPNHYlMkJnWlEwJTJCRVMlMkZlJTJCcXVZeGtRS2VPSUczSWRiNjRHdlcwd1FyNE1kOGh2cWEyYjROS0Z3dDVPJTJGSTNSa0Z6VXMybEtWT0NRUm5NN3VhczNmY0dkTERheXpFaWFVM0x1Rm5Ca1IxbnQyMUNLSTRyRksyOWolMkJSeEhSOVlsTE1GaEhyMUk1TkQzRCUyRllJbFJuVkd0eTZlc3BKWUtINmNxN1pFVWNadVg0ciUyRnhKYU1wWHltc0NMbk42akszS08lMkZkWE5tUkxIOVY4STJDNEJFa2l1b3ZKVjZkbDJLemJQMEE2VENXWXpSbmZrUVVuJTJCNCUyQjJrOG9iVElmNnVlanBVYWFnTXRGZ0NWUERNdk1JT2VuNzNhQUVZcnNOUlMwUlZzbG16ZjVHZDdaaW1zR0liaHJkTEFRQiUyRjB2eHBoU204SyUyRlZodjdOS3JpdGo3MjZ4JTJCM1B1eDNXQVlndVZLJTJCUGVuWVhmdUQ0NVNGQWNlbEZKU1JpQ2ttSm1McHFxbW8wajI4N1hidXF2OFRNWURTQ29FaGxuSnUlMkY5UkRhMjYlMkJsaG9xdjRFaks1eFVvVVQzVndBZmxuVUZuUHdFZkl1TWdEMXhsRGo2NXBQQVpZTFZaT0NZM2JaWVFTNDlQdDVyVkkxbnRYU1EwWTd6OWxlSUJTYnJYJTJCZm9ISmFKaThETWdtRnpFcEVwU2E3cjk5JTJGMER2akpIWHdjdnpNNExJZE00WWUlMkIxTmxMdzdZYWhTYzg0VXBrWVA5dmYlMkJKbWZKSmd4Zno1T0ptSTBmMTg3eVJuV3gzQWs2bXA2QW1YUEowR0dWY0puV2VPR08lMkZRZ1p2UnkyaHVFb25RMUdvN0xYNnd4bHU3RDhyOWhQZU8zVVBGVjFQR2NWT0ZlcnlUTHA1b3dZMnM3a1Q4dFQlMkJueCUyQmNVVTJlM3lGRGkxNWNRVllwMGdkd1BlNCUyQjdoeERpT3NqQ1JyQmFyVGtzQ1BGdHRXdmFlJTJGVHZrckllTmV2MHFmUHdJRHZZRDdiUnBVWW4yc1Rwc1RxME1VT0VPYiUyRllGcWg5c2xFRWlBdnBFYTVFVW1FbFlZMkc4YWdEY2ppb3d3OWtvTll0ZjRVbEZYRWRvWDJoMVBhJTJCUTNlUEtZNmY1dXFkbklBVlluNjFOWHRoMkx4MllFYVd0TzBRZFBzbzJSUlJ0ZUFrNkN3WmhpYURnYjI1QUZVYlIlMkZaMUFQa2o0dTNwaUxieWw3cjklMkZEOUlVRXFkSXN3MnNWekVGcjlRbW5VdTYzc0RwTVU3M1ZSRUFKaTU4aGdybVlCakpQaHIwMUZGWkpUSGRXWVVSMEMlMkJ1WENMOTJPbTdNb3UycGxNZk9jaVJRUjBHdndLYyUyQjV0N1ElMkJXNCUyQmsyZ0VlUlU0ckw0UU1VNHJIcEpFMzlYQjNsak9kazdxNWZYdjlyRCUyQjElMkI1VUVHdUlXS2NwRzRhcnN0aERqNmlCOEw0bjVCeDg1R0N3ZEJDc1lMRU0lMkI0djRNb3V3VWFpRGxVZTkycVZHaGNVTGNUd1cxWDk3bEkwUnhqN3dOM01ZMVZuRGo3SEtGYVg0ZWRuQ000SWpmU0s4eCUyRktoUXJmNmtIYnBuVTZ4Z2puVTdDZk1EVVBJQ2RURGlxYjNhR2xNMHNmOWdZYmVEUmFjV1p6eGJQamRoWVRKRXNTNUN0aUVranNyNDRiNlFxa0s5SlJXZHlDQ284akF6dXlITG5weHNkUk1hR3NIUUViaHJ5OGswSHBUZVNHNzk5YTROREhTcEs1bDllZXoxd2RwUVhNTW5IVVdZcXhnUHBKcno2akFBVldRN0dXa0xnMkU5VTZXRjJPOUsxNmpRUjEza1ZiOURlakZiblphZUk2ZTlhZjRCJTJGOCUyRmtmTFc2MHc0cEFucTVsTkFrUEJWeThndjlkaTlWOFk5NkdxenBJM25PZElIUG5TNzhkSjlPRzU0WVNwNU9WVDAyREx0TzVNY2JGZnk3bnRTV3RhNFcxbTg2dEhDY1QlMkJVTTFZa2tWTzRUWVJWdzE4VXVoWmY1U2I3YmhvM1RkSzUlMkJSRWRDQ29IckJDekdEblk4RHBSM090WnhJTW5HUTd6WTZ2WDc0VGVvJTJGWE1XdWtuJTJCeWhsTnFoS2klMkY4dTBkd3JOZjR2dXNtT3Jtdks1YnNXSXZDaVA5dWZlNGdpS3N0YnU1WjFwU2NFd1Fkb2YwcVoydllGSVducERZRnVPODVmeWZ3MVZOOGJlT0xQSlA4ZHlSUXpJc2NYSlRJOHpMb0t3SU05MW5kS0YwRCUyRkNFdlJyNDZZYmEzSHpkSUNNR0VuZlBDSUo3UE9sVEp4VkIlMkYyV3o3NlUzVkprUmE1Z0ZyWDJwT3NRMnlFSk4xaUJNcnhFMTdkV3hsUWNCJTJGR3RPZiUyQmZDWFQlMkZWeCUyRkVvQ2Yza1Bla1NGRFB0JTJCWGlxbkolMkJPNk04N0x0eWlxSVFKd2ltY0dacDFnbkE5bDFLOHhZbnA1ZEhRTnJ0VVd2alRLZWUxc3BYOE5qcGhzYzZ2dSUyRnhYSlVhUXQlMkJaJTJGTzZOJTJCWER4UFo5YXN4cUdRYzhodW1pWUk4Y1U4Mm4ycWFseW5oQ3k1blpQVk1uREwlMkZmR3IlMkJjSXNQa0VmZXU1SlhQTFY3cldBS1olMkJxbVMlMkZzd1lTMm5lVW5KdTdXeFMxSGNmQiUyQlROVXU5YWQwZkpiYnJ2d29sUnZjTkh1TlZaYjZFY0RWJTJGaEtWVHo3dTBWQVRIbW56cU13WnUxclZKN2wlMkJHbzVVakNOYjdicGdiU0J0aklzJTJGS0lRNldad1F0VUpURiUyQlJXdGolMkJFNGslMkJ3YVhiJTJCbnIwZHhWZU9EQnIxcnJUbnBic3F4WCUyQjNZSkR3dlQxR1A2NFh3b0tBaWRyd2dPNjRTT2xNWUVVVzJHVGF3Q2VvUnNmUjlVdVpxJTJCVG9pc2JtRGVjanJNaU5qOSUyRktZVjRNVnolMkZlYmNicGxEZlNZb2FkOGV1emtnNDA0RzkyNVJSYUhzcEFVWndYJTJGJTJCa1NvVTlKSTc4RUpiYzZId3ZNSiUyRko3cWk0cmtFZkFzJTJGYlZVWm5IMk4lMkZhSUJackp0Y2NuNUJHSTBlNTclMkZNV1NmZCUyQiUyRktnOCUyRlowbTlpcGFJSGN6U3lNRWUxMTZjdDVtQWZVNSUyRlYxM3pxVUVIMmJFMTNWZllNNDgzSkNGVUtza21EaldReW9ES3lzN0ZpZkJaaHVDaWc1TjlEVCUyQkNSbDh5WWVndmF6MkhXOW8zN1haNkQ3STZ5TWQ1UklzQVQ0USUyQkg4a3NEcTg0NkFWSUVrclFIRFF6d2RGMWVqJTJCYXJVaU83dSUyQkdDMUJYUjdyaDcxVjRGVTRHbW4wVjBKY2FDS0hNTVUxMmZnVHdwS0JhT0J2UUxaVHFvVnclMkYwSU5TbThoQ1NaVG1KSzBxR2YzeWhPM2hHY2REaTlGS29SMEFBY25SMWl2QXZ5VVRUMGNVJTJGQjNzZEdEa2s1WjBvMWklMkZ3Tlc2UHprVVlhWVZzUyUyRm4xNXBrQk9TazZZJTJCd0dxUXNUcnBRd3FIT1Y5MCUyQkhqZjlKVnBjWEhWWEYwU05lbVlNcFBRVzFPM05DRWw3VnRqYVRlbVJ3biUyRmRaTVJKa1hGZUFCUlNRejQxT1pHZGVjVGlhdTFOWEpaZEx6JTJCJTJGSFAzNVd0Qnp0Y3N3VjlPSGZjTCUyQjlJQ1FGUzVocTdrUG16OGZIV3g4c2tmSHNtaVRhYVRERSUyQnVQZVZ2Wk1ITWFPMHA3JTJCSzlvczYlMkIlMkJkNElPbGFob1Q1cG95RTVnU1M2cmtLNHdjVDdYZWlLSTFucTVpRmo3SlJVaW1LTzNxSnlXeGdEajZMOWhQRTY5M29GVlpPVG9uUnc3VVpSOUtvblFxRCUyRndoJTJGM1dkZGVPT3B1Vk1IM1UxVjFYb2VodXdodERQdER2NFdMWWNod25OUHF3MXNXWFRHeWZDSU5zZFA4NzRBZFJyVzEzdVYlMkJGQW96ZjNIQ29vUjE2SkwlMkZxMXJOMjlWN09uZU9WQk5OYkNpT2U4T2pYTVZtY2RiWnJuS01yMElaeG9va09WSFlXN0wxVFY1WURVakV5cTRhMkRWeUZZa28wMThIb0pueEdBY3BZJTJCUHZtNDdNdmROTnZTVm5aSDZqWFZWd0lObWw3Zk5SZmhMajR1NSUyQkRJdWxzZzdoaFkyWjhta0NBc2N2ejhyNDBDSFNaZnRUNWtCdHFUTzE3Z3A2VTNTTTglMkZ6ZDBoTm9aa0ZlaVdXQTdQazMlMkJlSnZ1cFZXSExza2ppVXBzS1NRdVh0QkdWTnVKUkRJTnFxTW11U2w0M3pDMmxQZkxNV3V6JTJGJTJGS3haTHpBTTFKUEJ6YzMlMkJGdnoyZnlZMiUyRlZCQzZ6M2NiWFcxRnVjWUdqRGRGTWx3SkFOdW9VSXBGUHIwYWFpc0RNdGxYa1ZzcGl2Rk5FTFRzVCUyRiUyRmJUcUFoRzFCS1JjdUMwYWNmdzA0JTJCajNpd3JtanNveCUyRk1vS2w0cmN4cXpNZjM5Wng0aVhlUG84ODYlMkY1U0NDOGp2Q1ZCJTJGOUpTYlAlMkY5WGFJNmQlMkJjMXd6MFJqMURsVmJkSWNqRUxXJTJCJTJCZFpaZkVZcTd0ZXIybXk2NXF4JTJCbVRnQmd4TUolMkJpSGNoUSUyQnJHaWZUTHclMkZoUXMlMkZTb0lTQ3R0REV4ZkZnTnZ5TUU4bjBpaHN6aFpxVVJtb3RnenR6JTJGZEI1c1F3MkpWWGM5NkdmdUFaWEdKNkRsak9FSHdldmQzZjUlMkJQdiUyRkxoc0glMkZod1pCQlBxJTJGZEFkaSUyRndlYyUyRmY1Zk53Z2klMkYzODBDUGFnUVREOVQ0TWdnWUptRWZiOTM1cjViNHRnOEJRM1FJTUklMkZxdFB2JTJGOUlsZno5OUoxUzM3RXVmcGhMdGxtJTJCMFZLVHhxJTJCNFklMkJ1c2p0ZlBlWW92N2wlMkZzNyUyRmJJeVBxOWNvbjUyQU04eWFTTGV2MlklMkZNbkJpT21hcmhZZUk0OWpoc1NZMndNYXhmVVBPaUhkMlZwWGdKJTJGdmRqdmhmWUxyM3dVMFNyY1FEZ3pMQ3NJbHJNUWY2RjFBSG5RSXJLNXowZVpsTnllRjBIV1lyRDl0d09kbG9OZGpsRkxyTWNqRXVnREh5Y1dnbzNCb0F3Q0tibWZSV1JSWmZEV093b253dHEwRkhJMHRveVZXN3JNZ01hcmF1UnkwbDRpSkJESkZYam1zUGJja0ZqbGdVQlglMkJoWGVBb1NoJTJGWnBGVlRqbEZ5SFFsWGVBb0ZadXBTMHpNeSUyQjc5OVVHSVVnVVp4NllsdWdSMzVCSUJjbWc5eEpKSzMyc2Z2JTJGVGFEd0JFNThneW00cEttd012emU2cElvJTJCS1Y5UjdBRFBxMWxTS1dIZ25weXMzUm15VmpzaTh3ZWQyJTJCMGdUTUEwdngwMFZFZ2JERyUyRmRVYUVmY3VWbjBMOWYwbHZKQUhncVJKNm5LRDdHandOTmV5UGd0RXd3d1pQa2NTYVV3MVYlMkZqVFdZOTlBTlRxSVdpMjRaUEpBanBFdURJOW5jRHhHS1BBQTI3bHZkYnJHUkRnJTJGTHp3aHpCRjJTUlFTWXZtblBMeXBBeTIyTUwxQk9GOHU5b1BIWVhxazdqR2NGdFRsUXR1RVVpYzBrRUhmWlhQbm5xJTJCRUJCMUM3TkNqcHExQ3BSa0dvaXpUbEh0NzlrV0lZU2R5SU1HRldFbGplMlJyU054VkdacEVna2xzRmNjQmdWUnZlODNHQmZEVHloTTUxVEYlMkZTaU5oTVdOaXJJRFZUODdTQlJuJTJCTjRkUnhSQ3RxJTJGSTc4WDltcGM4SVc4NkgzY3UlMkYyN2Q4JTJGVDlnVU4xdTRhc3RKYWhoM2JQaUwlMkJ3QnN3UjZWYSUyRkdHTnpFZHJuUkM4MlBBYiUyRjhwMWVxRW5KV0oyMWJPbWFIZWklMkZMeGhhUXRJQWxtMEdYREY2cVhlMzdPdVJtR1ZsZml5MFBHMU9nejJUcHJFTE45andUZW9xNG8wRyUyRkpJRHhKTzhEYW1LOVBRQ210YjBGJTJCMlkzOVRHeXlJQXZ0WWlTUjlKcWxTVTZzRW5vNWFvTm9INnpFeWtuNkFrdFN6MDQ0dWxVaUJiQkFOWiUyRmhKcmFid2w5bjVqUTQ5aiUyRnNJUjVXdUZXZVpJWGVWVHVUWWZiOUVGVnJ2QXBwa2olMkZMZ1dodk1wRHZLV2g0cUJwV3FyRVhJYWlzWGhjJTJGQXhtYTdZaU5GdndCdnRwcjNFd0JHUEEwaUg5aGZXdXFYYUNDQkVzT1BGTUFWUElQUFhpZzBSanoyQW5EZHhXMGw2WHVYdTNRSUhLYlNsa2xLdXUzMTVySHpTNW5KQU42REJFb1pxJTJCbE02WXQwQkxCWDBnUGZyQ05IZXYyRXZyaXdyZVAzejRnUW9taGI2bWhSdWQlMkJBOVdER29xeVlvR3h3U25RNG5tN1lxcnl1dHV5Y2lKT29MdXQlMkZIdmREdllZODczQXZwUVV3MDY1RWRJNmM4THNucFh5akNnN2tiTmlpalV1UWp5UHFqaWJQQTR3RlpmRnRUdkIxU2ZDamFFWVMlMkJCNGZBQnYlMkJJVjVEbk9GbnJTUUVTU3p6ZzIzOVdhYWdjV3pPcHA2S0Zib2xRTmt3ejBiTVNtaUpUa0hPdG91NnBheFFmQWVGekhLeG5MMmtLWGRlRXhXUzZ1SiUyQkNvdWpxaHoxMWtyWnZjaEh1UzlPSVZFRUhYJTJCblpHa2pMUHdMTG56MDZGeFlDVUc0Z25HOUEwa1M0SmFXcDFZQnFuUVdaSnhaYU1rMXB6YjB2R3hMJTJCYVdwODNVJTJCQ3VqbktoOHI3eVdsTWJldVRjUVYxNjFZOGFwR2Y2NzVydm1hdGZvQjV5cVU0dmclMkJOb2IlMkZHc2ZyWllVUmVqS012bHZ6MnZEdllGRVFhbmVtU0F6SkJDVDRTRGMwR1N2TEV5c3BVUElLblVCbGJZbVElMkJSJTJCa2FGVFM0VmVpcFglMkI1VDc4ME50Y0ZLY01WSGpBUzFVRCUyQkpFNmdJVk5xJTJCdnpNZXlYOWpYYkpNd1liM3ElMkJCclYzeHFiYVlTanZyRGU4UGJYMWNsaFJ5S3I4NnpIJTJCeTZRMjNudk1Bazhqa2hUTVA4JTJGYmF5b0FvJTJCSFhSVjl2VEFFYXElMkJyZGRJRnRJVzBZVVo4QSUyQnh3QkM4N2pPd25kZmdETVVDT1h1NlJyJTJGMiUyQlU1dExBbjV2VSUyRlkwZTVrZGJJcjNlMFpsSmFFdHhUT1NrUk1FNCUyRk9ieVEwWDQ5Qjd5T05CVWdxSWNtQm9uQmZ4bXVrTCUyRmdIcW5tNSUyRlYyTXJ3cm1BcXJCMlE5U3Izd1lWaEhQeVIwdktFZDU4WDhsVTVUcnN3Rm9IdzdGVnpnS0ZhMjExUyUyRmlhQWFJdTU4Mk1IZ1FLRko2eGUwTFpBTWZyaGo0dEIyTUFsWiUyQkp2MFgzUXJzWlAxbjdqbFZyVXclMkJVTzdRSk0lMkJqSXRETXRnbG5LTlRCU3dkMU9xT01jSlBrdHdEZEJYJTJGV25EJTJCY0UxWjZFVCUyRmsyY1JGZGV2VzJWVmR2aTF3bmFGWllSRzh5NEF2bUZKYUpuelF6VlhDR0pScWJka1RaeW9zV3ZwTjRHTyUyRlVsTTZjdHhMcFYzaHdyeWxQblhLeTVWZWxjQmJYNkdxUmdnS1FxNmxPa2xseXJIVDNHc2pEcjZXa0ZFUjAzb0ZTRkFtdjYlMkIzQm9ieTlqZ3I3dElSc2ptQlk3MTJJbm9Jelp0TjRubmxHcm16ZERKeFRYSklCNzVzbGZEUUVybTNnZkJUYkplRDZFYkM0dDhWbmpEWCUyQjd3a0xOaTM0R0pOclA2ZWRYJTJCeGpGWmhnYmdTeHJhWjlyeHFLUVRTNUx2Nk1GZk1hSmVWQ1Y1QTBib0dRUUo1MVR4QVV5UjhkWjl6TlVXbkJmWSUyQkxOaXklMkZndTB4MzdtNnFrZzVJSHdNM3ZSaTZlRXN1V2xMYTdWOW5tOHEwYTBzcWZFdkdvcWs5czJXR2ZKWGo5OU5nYzN3TnRXVWFWS1RCQVJHdjdFWHZ3VjhXNDg4cWtKUW9IQ0FCc21hbzZ0OFNGU0VEZkNOQ21FMVlXcDBwT1J3TDhra1c3Q3Rob3pMd1VCQVNKMmZ1WGVkWEpKWTc0TUVjc2tLUnV2bnp5TlclMkZTekM1dzJtOUgwZmNOcmRaWXFOb3hxJTJCanklMkZ4clZxdVozbU4lMkJpUHlPejR5TW1ZZE9ySmluTElHbiUyQllwTnVkOFV2YjBrSVBvOHZ5JTJGbiUyQksyRmNKd2NROHdqSjFyZnU1amoyb2tmZTZQc2xDdlR6JTJCbXhZbEZLVk9OSVlUWDl1cDl4WktXSmpPMG9YNzRJbkNIOUJIUHhSZnNFUGNHNkFRSm1vbDVQeTRzZEtUbFdCSFBSJTJCVVdMSTlPZHFHaENJVGZoJTJCZGVvYjZNNVZBYnQ5eVl6eG9Oc2ZtJTJGaG8wZ0VnOGZkYThnNThaUk1qZ2h3RHVnczBiRGlBUjI0NXFIT2Y3RlRNMHhOJTJGRGMzQ3FvJTJGJTJGQTRrTFkzQmtMZGlEWG1nS3FJZ3M4UWNxZEU5SCUyRjUzWExyWVIwMzBCSTB3VSUyQmFzWmdsRUpBM21JRTFwc1I1S1E5RFFlcnBJRCUyRjlvS3l6T0x3NXhmdm9GdVBpYnczNEdXQUdQVkR3ZlpuJTJGcTRhdGQxMnp4dThiVGVJbWkyZFZkMVRsNDZaNVZaaDVmM2FjZUxkTWZyUlZBSm5EWHFYaXVwSlBGYm9DYmw1OHlpU1hmNkJ6QXY3ZWxvRVVsemlhRzhOQnB2WFpXUiUyQkxDU0pLTjRaZ09tSWZKJTJGYlJPdDFWQkYxTUg5YmI1JTJCOWt5alFVWnZmQXJZMFhzQ3lYcGdUYnZpbTZ2JTJCZWFNbU5Hdm92WDRxR1FSNHAxQXdKdnlQNXJrcyUyRmpmaGI4dEttbURJaCUyQmtZdDd0Q3V6aU8lMkZoWXY2ZDhLOEZMSVE0eExkMFpudGFxbVNWUTJhJTJCaG9rOWllSW5jUjRiRyUyQlZWZW5YWWJ3NGFJejFtaUxUbjgwV3V3dkZlWWd2VDlDR3QwaTVEZHRYaVlTT2QwSzY0WkNzJTJGWEtCOW9nZGlKN21SeTVjUGN2JTJCcUduWVJ0ZVlRcmtSUlF3azM4b1FHR0tPVnQ5YnFtM25DWWNKQ1pEODExakZNckdvOEJPNFAzMzFZR1lKdllzeG1RSm1ZYVNoeWxyRWZIMFU1RHNVclkwc0F0V3dMc3pBUGZOQ002cGljYjNKZGcwZldhZiUyQkNEcENBZHFuSlRYQjdUcDFWREcyWllFR0xUc20lMkZMbVdDR3ZuSm8yOTZkNnR0M2hHTCUyQmhIUlVGbFR3S0FtaElIMUh3YnZIN0ZkSUlxSHlRTnNsZXY2T0xQendPZEZ2JTJCa1JkJTJCZjRVJTJGWXZYcVVFY1JLJTJCWEhMN203STNkT0FsUXVXQU9XWFN3bmt4ZVBzWW50czVDanI5NTRuZVZmZENwNzN4NjNyOWtWcHRRMDFJcnBlSm0wNTEwZEVnSVR2RXlUJTJCTnJ1b1RrZzRua25XRDQ1QzZqMkJvakhCM0xEdmtnQmVBM1AwVlNSbVpnNXpKRnMzNFFWa3c2OW9DWHNvUkg2U0c0NTB2dU5SVjlMQ0x1ZHE0RUdSSUdmczB1eVlqalhDUTROVVZ1bFhYNU1aSUlmeHF3M0hDUG9ZR0xOY3p1SyUyRmhGc2FncGNSRzUzOHU4ZUNKN01GWWolMkZ4TFVpJTJGdk8xZnRQaDRxdzRacWNNZzZOcENVUlNMRWUxMTZZQjB2N3RMY01yMHQzJTJCWFJvd2ZFd2xlMkVhNWJERUxzZlhWYmJNUms2NVoxYnRtZHhwOTBLSUh2cURPUFBqakRSQUJsZUZNcEhLTTVlaDB2JTJCT0xWJTJCalBHcWElMkZjcVcyRnlpZ1NnJTJGSGIwRyUyRm1KN3l0ODVZRlNBTmlyTEZaS3pXYjNOeHE3a24lMkZWbHNIS1pOS0swNEN2R2NzVDJLcU1mJTJCTExjSmtrQlcwUmhpdiUyQnkwZkpmS1FucGl4TFpvOHZ5RjhLSUhqRXRwWWhzTFZGU0p2WGptcjB5YWczOTJyckhzSmt0QVEzdk1aa0N0Q2p6bTk5NEs0emtvbFFkejElMkJ2aXFCZlJLS3lTak03Rml3c09CejE3bEh4c20ycUtKZU5wblRJSk1yb3I1OExXZlI1eTZrWmdBYWF1MDMzYWxxUkVXcGhMVWhTdG9OV3I4Z05wVGV5UnZrV1BxZXhFeEhWNXcyUWNkVCUyRnprMzRibjFNZlRDQmFDNEd0ZGpZWXYwNnlXaGtHejUlMkZ6Um1FeVhxM0szS1BQMiUyQkFUeVdNT0p5aHBjbU5CWm1iSE9STUJQT0hCcnFUZTB1MWhHTkNwTjQzVHVESVFjSWclMkZlTVRyV2ZhaEx6TnNIQ2VDZUk5NzclMkZ4YXM2T2RoWSUyQkpabUxoSVlnSDBRVEFsS2lnTTBXamluWk1oMTN2dWQ3d3ZteXloQ3J5QnN2ZmhtSHZDR3FsSXZBd3dqb2lCc3FFYzJTbUdRT1h5ayUyRlduWDg1YnpRdlduWExiTG9zWjh1dURwT01ESndWNkNRUjl4clo1WUpjUFNUU1JrajBzWVBQMXlxbEU5YWVLQnJyTkFmTDNKWUhxRkVXbjYyTEUzUW5MeWlOS3J0N3FScTduMU9zVXlrWU16ODFtejBiajBsT29UN3dxUjlZdDdpbHFJWWlUeVc5d3BHQm4lMkJqR1AlMkZ1NUloOGNFQ3ZSZmZHa3hGZVlCQmRsQzRPWWs3NkFmcnQ3djkwcTVYcGhaU1lKTEtSZk9XSSUyQmhNempoVG9yMVV3VDFjJTJGM0FVZE9wZlpKbW1RT0xXZlg2QlZvV0xjVHVUUFV3bUt3WTN3RFBvZ0xYTEt5cTYlMkYlMkZ0RXVoaFJJaHUlMkJBJTJCbVhSTHVGRlpVZXYzRGxLQm1DJTJGRkJnUThVWVp2S0xjeGdhZzhGUU83eGxMS29hMEJibEUlMkJUZCUyQkVscVFhamNnNiUyQiUyQktTYVlpRHFVcUF4b05LWkI5JTJCYyUyRnRZb0lPRWQlMkJuRUkwT1FvN0hOUGR5VlB1azBoakNQQkVvanVuU3ZuT205b1pCJTJGeFhJZ1BINFl4TTdBdmRiYTVaWWtZQVAlMkZ4czNyNmkyYjN6eHRUWjNINUt6TFZsYyUyRmRaWTJjb1RSOHkzZWp4R2YlMkY4TjNCWFpydnFBM0U4bCUyQm9OSnBFVDVqUW1MWE9SQ2hFTkVqc1AxRmN3QmhSbyUyQm9oeDMlMkJvdmRVRWFkUVQxZkVHMUNOZWhYWFFWWHlyUHpxd1hDNnVXWDdzY2gzbmpkZTMlMkZGbyUyRmdqR1VEODRES3BZRHk2MEVPbmVDUUJvZ0Q5c2RXTGhnJTJCZ1U4d1hBN3dUWEVGMFc0eUFOYjR3dTgwck51JTJCWDlyd1F0SnozTDl6akVTT21meHFycXdOJTJCJTJCbU5nUiUyRlVxJTJCSFE0QUlhQkh3RWR3R3pzRTRNeUJaQUd1NkJLVkJlQW1GVERxZ01KSU85d2szZnpiQmxRc1RtR0lJUHNBb3hUSEgwSlVQYnBQOGwlMkJndVpmd0o0TzgyOWN6T1VKYkpDJTJCN0ZqeG5IWlljNGpLWHBPQ25yRklFZWFqa1h5YW8xWllVMXdjVDFYMHNINCUyQmZLN2k3ejZneUR4cEk0TXowS1ZQb3puUnhXS1VSYU50SFg5ekdQSjNmJTJGdTZkSDdlTjc5QVFQbWU4NSUyRlBBeTRyMENtcDJwWkxvMlBRSEF0TVhITGRiSmpPMGRpaFNXZnFySkowMXhvOVZkMU9xaU04am1yYkw1QiUyRk5wQWROc0NvYSUyRk9STFk1T2lDSk1MaXBjVU96bm51SkR0TFgxNkxpU1U0UlpCb29nWEZDajhLMkc1aFptQTZxblJ5T09wTkxQTFNHZVZNU2tPeWhLZTdVRVNQeGhtM3lXYVBIQmNPYkxxcWt2WXQxNklaSFFKVVc0dGJ4NXdjU09veVNEJTJGZDNXTWFrVW9SUFJhY2ZCZ2ZjMUpBMm1EdjBRUlJhQ1ViUFZQeVZncmIxR1BabiUyRnJJNklOdHVyU0NRWHVhJTJGZlZ3TiUyQkVXd0NrSVVFak5pTGElMkYwbGpwakQ4Rm1GMm5WOVVncGVucno0aTk4TEhaOWc2U0lvenRuMlZhR2ZWN1VwNnVlU2FSMkhHT0ZqT0xNdEJ0aU43cSUyRjI4S3d0S3NhN2dYRHFxR2R0JTJGOUszaGllR0d6Z3VCVXphcUpYeGZrMG5xVmpHZldXcjdMOExWMDJsek5UQzRIY09laFg0MUYyWlJsTkNYcmYzVXhtTEs1WjFSMjhsTFJTdkVBSkdaWDFEUjlCZ1Fvbm5CNjVyNGNXUSUyQkc5d3E2emtuNHBsZWVvTjdFTWlKZHljdjhIcExqdzVmeVJJS1BNVmlyOGxZR2h1UFBaTGtlN0lFNjI0aVg3ZHB1NERqWmtiSUJXSUcwMkl4RXZsWCUyQlElMkIySXE5VU9sVDZGRTc3Q21BckFzRUhCRmtHQWFRJTJGQlQwdlVIREtKVGswdFZiamNKZmdiRk15UEY3YWxuVDUwVk15JTJGNzZ5d1cybDFmcTRFckh2OWdmQnlmTVI2RDd3SUpLTzJldnRSUE5sTzZHSnpEYW1ISFpUdmdoZ2p2WVh4VzU2akpUa1NLd3VpOXJxNnVJaVdyYXhyVXdtcTNBakg4UHlYYlpsbnFlWGREZ3I2dUoyY2Vhb1laRVA2NiUyRlpqNXA5WlhqS2hKNW1ZWkNpak1UUGg3YTR1cnZoWHpLcWx5c3BOVXd4VG9rdU5wcWFYcXRFM0RkcU9ydWM3eklCenBIWkI3UTVDaUoxamZhYVpXdGlHVklrTjAyZkZHVTI1OGlnOWJnUXdsRzY2Vm1pTEpVRFhLOW1qM1IyT1ZEbXVrRTlNUXZyVlR0bkZEVURNRnpDZ2pkSlIzZUhXTXRSclJyRFNKMjVNOGJreU1Zd1dqS01hZzBSRDN5SlJCelN5MGFiTU1oeVBkUkIlMkZlRkIlMkI1NFJQSjdTZUVyWUM3Zml6ZXlCUzlldHZmOGQ0b0UxUjhZb0VEdWNDR2hQR1VBOFcyQk1TSiUyRkRndjNxWlVvaiUyRnRqTmI2bVo0Q1JNRUlsd2ElMkJ1YWZ3M0xId3hRM1l2c01XOW1jUkN5ZkUyMHclMkZod21DbHRiTmhTJTJCdCUyRjV5M2pqS0MxV3hDJTJGNzI3YWY3TDRUOEZkQ2kyQ1FCT2dEeTF4NlB4QXJ5JTJCamwyaU9GMFFNUXN6b1lmemJIMXc3aWZ2R1lCQU1YNFQ4Um5uWDN4bnVIYWY3a1lOWmFzdHU3bmN6d0RGdXplbkhKVUElMkJvNkp4YlNFUHIlMkZ0Y1BuamI3NzBOWUglMkJCdGl0Z2xwWlRQdDByVGV2TzBIOHdkTU5EbEJ4MzJqMFhBOCUyRmRSeG1ubkVwclN5STU2dSUyRmFneCUyRkpaWXFuR1A0Q01vNzZjdjRsYnB3UWhHamhPck1tSUhZM3VNNzRaWlhCeVpnRXFYQm1yQ1Jsd0p1QXcyQVAlMkZTJTJGbU43cDZLY2NnNzNJdmQ4Y0JJbENlWjQ1SkFxc0FYdXRCeXdVT0pKcXFRaVl4U0piRFptZk1vandjd2lEektORGw3bG82R2NKWFVlMHFkSklWUEhrUTFENGpCcGVSU1I0YXV4UmdIQUpSNXN1VHFrWTJLJTJGTzFJcXRKTXBXUnJ1RWU1d1lJQTJrSEJMcTV4biUyRklWaDQ5TGxQcnF4Q2dZQjRkU1ljc0RMJTJGamZLMmNCc3ElMkJSR05EdSUyQjBJTDZZNGxpRGNtUVElMkYlMkJ4VWVqUnBpVk9TcG02Y0w4N1l3b2IlMkY4dnVNdXdTV2JuMmxmMTdQOUZBM2p2Y0VMaHFPcGlaQkpZTDVMUFZWenRmalN1OUZhJTJGakFxbnNRaXlMUU5FQzNuTDhtWmhCZ1hqZzlRUG5LNDJ4NWQ1NlFzVGNJJTJCNXlldTBUTlREYUxLTGZ6T3pDM0dBJTJCdzlHUmI4OXZ5WFpiV1h2JTJCbEtKWUx4ampiVlk0OWU4THJVeGRMZVNtRTJSREN0M0ljbTVNOUw0aGY3WGlYV3VrQjFaalBHY1RveXBPT0RpVGg3T3g5MTA2JTJCdXp6RXFFaW5NZm9NNUd4VlpsWWI4OE5qdTdnRDhKWVR6Zm4lMkIzZnBuUVpHJTJGU3V5d2dwJTJGOVBoODlqNzU0dDl5T25aNjFJZEVHTkslMkJ5VE1XTTd1bFlFV3hxRkYlMkZuTGF0eG1EbktIQnJRaXJ2Y3JBSG5TSHNzVU5xYng4R1YwYkpKRXFkUmFkQURSa2o0JTJGcktmdzVrV1B2MU5Cd041UnJzRmowVVZxNm1XSmFTJTJCeGRzaGRuVHVqdnhMa3NnQmhjZkFkQjd3RXRzOVhRZGw4eDR3cHVCZXE5NW9CcjloMGZOa3JTR0RGZkxLSTN0RDF4MWFTRUslMkYlMkI1VXJYMDF4Q0ZYbG1ISlNxdW8zZXBDU0xQeGElMkZFJTJGTmo1WlJRUWtaS01zNWElMkZjMmhmZVVXbjF6SEtHT2U5c1hIY29sYTMyOGd4YXhhOE9lUG5sRVNIeDUwVFBvM2FEeVVuM1k3N1BsUXAxeW16SFZ6WjFZZ3Z6YnUyVW1tZ1N6eUlpJTJGS1hWb25paEdxT0JEY2RkdHFLOVNRc1RId2taU0pNQWpjOFBPSDl1QzU5RVZJbGk0TVNWWXB3YkpzSndsMFB4THNhUUJaNGd2T0Y5cjl6JTJCQ0lscUNtdjhwJTJGN041VTFKTDR4YyUyQkoxNEpNdnFpJTJCZ1dGdUJCWjJsU0NZS0RzNXclMkI4d3dUOVUlMkZ0VXBmMWV3WjJDdGZ6eGc1TWYlMkJxdndjNkQ3QXRFS2daQVhWeDRWVzlwNUdtcUpPNTIxVDR3RlJ0NHpzMGtsa2R1dFp5QVExbjhaMmZNTlI2bHU3MTNpTmNJJTJCS3Jlb3klMkJXSmgzYXNxaHQzaEtaQzhtNmNsZkp0RkVoWE5VeFZzJTJCbnJxdTIwRHlJZjFSVzlLbjlScXBYbFN0aU8zWUp6YmQ0ZGxFc3VLNzUzTDBEVCUyRjBzZjgwMlBBWUtvckVJWHpycXlJTjBCc2hPbFRYdlJGVno4dlcyN2QlMkZxZWFZTjRqJTJCTDBjMldUWXJrb2U4V3F6UDBMRlFJc0J1eGJOZ2x0MHdCRno5TFUxMnpvN0p5V09QaWtoSE9NSHBnUFM5OTU0RHMlMkJpSlVJeHpHOGVicjdnTVR4cG1XYjd4NlNvJTJGZGNqSDVaeTZpSHhCVXYlMkJwRUZRR2hJTVVyekFsM3NrdSUyQkZmd2Q5Q05HdSUyQm9CbkV2NEc2Yk14d2NvblJpYjNvNFBGNnhPUW1YYnNOcmhpNm85MXJpNGRtQkdwRWJQQzVmazBob1NmVHNQOHlzJTJCUHpJaUUlMkJMcXl6NnBBUWtOajM4TmFnTkhVM20waE9YUU9qVTZoNmM3NHhxVmglMkZoOVQlMkZSdW45emZIeXY2d1JMS3Ztb3V4VzYwSlg2YXJMeTFta3R5aTZmcG41NnJ3TEI3WG1JSXZmMyUyQmlFcSUyRmc4WWNOTFNTZXhrJTJGdzllWHY2aXJDdUNPQXdwa0RCZHYzalhYbU52T2hadlglMkIyV0E1TG1hTkVHeHU2VkhGcnRRUmpFdjRxQzFhdXVmcE85ZktBckQ2ams4UjNWUWhOZlAxWERKNFBWaHhEcUdSOWlzJTJGWlJNeVV4dUFqMGFia2RGVElVUkRDWSUyQjRKRVRvSEZhVm9NOTlpSVk3YkNwTWZxYnZraTFjVGpsYVhVemF2UmE1NUZtVmFxNWZCN3FOJTJCTDQlMkJabUZQbjY0U0s3RWFuaXM1ZDJ3N0Nwa0h5dUpzTyUyQjNlTWJMMFpoMmVERnN1WEZEY2U3dFcwaHhLem8wRlNCcjd4R2tGZjRvSnlkUUY0OUVMcWd0Q0klMkJnTHFwZU5DVGJ1U3JmR01HeEFqYVpuUkZ3d0lCaTdMWUwlMkZSa3lCdDhDTUtwVDJyM1haZkljVTYwZDUzMk9VR3lsJTJGaXVWdU04OG56YWFwWGlxam9sN0hVVVlONVM0Mmh2YThVRzh1S3pUMTVvaWhzUXdrR21ScDFCOE54WUNheXBxQjR5YVhLWTBXZVVnbm1WQmVIMzM4ajNVajcxbFNHNnRVcnZnc2RGeXYlMkJuUnRYYzlDeDBMZlhLUnZadUpHR3VZaSUyQlF1TkNRSXZaNmxnQXNZV2xaTWhrSDB2TzhTMkpkZGl3cEd6cFdjZkVRcGpHcnlxY2NsRmYlMkZxTGVGY3pLOSUyQkt4VXcxRWNlRDNEOGltVXFweGpsS3BGNFBpJTJGWUpTY1RJY1UzMW9iTDR6MDQ2ajdxVkFWOEpYZTFBQVdZbzJLOTZ3ZWhzWExGTEdGYjJvZG9GU2owdiUyRllwNEdCWnB5S2o4Z2MlMkJIJTJCSFElMkJXQlBYTEFDczc2NjNyU0wzRFpISW9uQnQ1V3NnTmJFNEtBRDBhSDZNV1R2MkhyR1YlMkZWR3dZZm1CejRwNkdETmpNaFg5JTJGRmJ1V1BEdTBzTGF2JTJGQ25WMTBaTWFtV243NzBrbWhXanB0SDRwbWplaFhZYjRKcE1scThKVlVmSmwxQTVBTnZZeUZIZDVSVThtNE4xRHlSYVJxaVBFMHY5UnJpVllWYXByWVh4ViUyRiUyQk5DbFlBWHFlVFM3MEdFJTJGV0VqNGolMkZqb1pMJTJCY1lNR0dIdHlYUEF3RjBlV3VZYnpVTlNId1AxdXZTUFc2TG52MkNCMTZaY3IlMkZvVk4lMkJDeWRPeGJ1SFI5SmJLTkRlNDlJWHp2eXhFczhOVWlnRndMMjRiU3J4OHM2MiUyQm1OV2ZKYzNQRldVcUclMkJLdFl1V3VFNVhscFNVcTNwV0x4UmFMTXZDZG9QdE83WEZ3TlZqaFdtak1yeFM4b1paanV6bkhub3FnUndOMTJudTE0UGtIZEhpQnBrOFFzWEFFVTNmcExxJTJGamZITHhmJTJGaHJzRHpHczI4WkNGM01zWSUyQnZ2MCUyQiUyQnZKMEMlMkZvS0ZkWW14Rzkwc2F3QUV2SnlUVkR6Rk5adGcyanRKYkpuTkRYTHRJSUdQTm5hTVN6ZFN6Q0dqMVpNZWpXbTM5JTJCeU9Vc0dlJTJGd1ZRUCUyQlllR2xGUzgxMm1oaG50amRNdHZST2tGaVVMN25SMSUyQkxtZ2Z5eFIlMkJYYUhZWTFOSWpNSHZvWmt2N2hiUTJVbjZuVmNTbUhsWGZHMHRraGtMY0lTbEU4ejNleGJzMkIydiUyRjY5c1cxUW1xOFQ5RGpWaGRFZUREdyUyRk5sNXZZVkRGVlNkbUtid2ZTS1JJRGgwb25uSll1MGg5V09qMElXa3NrSnFpNGFuVUNYdjliUDQyRVNSdW1wWTdETUdpQmxreHlXUFlyZEZndlRqcSUyRm1QMzF4cm9FWFRlJTJCVmFPcUJpTHNSWDlaZlF1U20lMkI4amJOaG1WOFUzQXlyTjVrekRMM21MTU13bllrSjkxbzlxdTZWUmJOZzJlZDRMNG5vODhYdGp5MmoxJTJCZlhYN0tsN0c4ejFCZ2JlQ2d1VTh2MUtaZExoTEdEV2FpOHlqbzNialElMkIlMkJwVFE5enl6eFglMkJxSnVwbTJqQmxPMVZudnFKSXZmeSUyQjRLSG1rdUI2TG16cHZobjFQOVNjOVRueTR3UXE4d2lwUSUyRlVEdmZqV08wNiUyQnE4N0JTVmcwRHR1VW0yWFh3emx3RGpIcyUyRmxmNVdOckxHd3d6Qm05ekh5JTJCYXFRZlRwU05hOWI5Z2ZqTHNVNnBpOWV4UE82UXFEWDF5eUpsa1VHWVhaMHRwVkozZmVvOXJKTHpsdiUyQlBhakhhQlp6WXglMkZ2ejZRcTNSazIlMkZQMzg4dEZZa1NTOWUlMkJ6ZHIlMkYwSzVCMCUyRkVocE5GbktsM3FkenVrbDJ3JTJCYk5lTjVEakZCQzJWbmdvU1l6RzhHdmZpeGZPaHdZTHNhMFpZWHViQ0ZCMEg4SlQ1ejhPdEV2UEprJTJCV2NlRG5wckJ5JTJGaVYwOTNmS2xsTCUyRllYeSUyRmVsZkhLUGZ3JTJGWWZvUnU2Q1VWcVE0NGFUeE1KM21nNjRwbG1lQjhwS3VoTjdSb0VzdlpEUlpDa3A4cEVCTkY5ZnJGbEptdkNpaXZRak0wJTJCaEtnTnYlMkZZcW8wMzElMkJuUEtmbHJUJTJCMUhFa3owSVFjVERvNTJDeUhkS1FKajcxN2hUOWFrV3hkTWZ5MTdUN0NxRVAzd1N4enpuSTdQaHJJWjBBV2lmSW5ybXAxN2puRVo4SU9HRURRS2g2bElLUVB6eGFYb2w1cnE0Sjk1blFjTVUycWxTVlNvR3JLdlA0ZnBFcFpmY0JxRzB0SEU0OVRnTVBFeWNtWkphS3ZBcSUyRnVwTkJXU0tzMHBKWEklMkYySzhBSkxGSVVKM3VrRUsyVE1BMHIlMkI4NjhxJTJGYjltdDJpVWpaVkE3JTJCSSUyQlhVdFlwOCUyQnduREpsNUtMSHpOUkZSNXVudlglMkJndmpzVDRDZ2Y5YWt3ckNSNk5QSldxZ3VwVUFpNXFDNjZXZCUyRmxjSkUlMkIxQ0Q0VXVoTG50VTlWdW5lU0ZwbGM2blA3QTd0ZVBqaVVPbm9tcWxsVmR5N1FsUHVtVlhPbHVhYmd2VVhWeW5vaG5aMjFCMTI2YnZjMHB6RnpJeDh6QW5lS3NrTlRPWDdNUHU0WTR1eEpTenBPdTUwclViUGlCNGt1WGhHb2dJdXpISU9tcSUyRmtsVW9FUnFoUlU3bmh2MlhjQzljJTJGZ0VoQnh3OE9LelFoWlEzNSUyRjNya0NnQ1g3TG1MeXpMUjhWMmRRVlRCUHZ4ZVNISGM5MDkwVUU3dU1odGQlMkZjaUNBd2Z5UFY5MmVYc09nYTNGTHRLcWNUZ01yQndxbDY3N2xyb1RqVHNOcWtSR1I0RWJzRkdUMkprS2FKSiUyRmwwbWtVVCUyRjRyMmdoWnlKYzVpNVBBOWx1ZyUyRkxxNDhHT200JTJCWDBONnJDdGU2cEFlc0t2WUFEZnhPYnNqQkI1MXlKUVhBOWIyaHNBZzdVWHA5dkhtRkJIZCUyQndpVzdLZmh0ZkxsRWdNY1g2NlVrbERmU3N6a2Zua2lDckZEWEo3djltUU42NzMlMkJCdiUyRnFZYnRZenhUeE56MDdma3NqeTJjTmxjdGVkUndRcnlTRGc3c0M5OHJhUGhlUzhJMVJKZHlyOHhWY3RCcVlLQWtodjdZSFBXczZEWCUyQklGVUhTVFBUUTc1QXFvRjZGWkJyJTJGdVNSYSUyQjloVGNPejhRYTRDJTJCa0NlWlhsaWZsRUt4MnRxMFBGVDZsJTJGcFVNWEVObGl5YnhBNVVkWlclMkZTJTJCNjB0TGY3RGZyT2h4b3IyNVl1MVA5S3klMkY1SWJ6T2l3Vlo4R3U3MVhtJTJCb2FBSFBUa3hRcmNraUU5ZnVHTnczVVF3NlBBWnB4NktiNFhZVVJVQXFuNjhycDVuUDI2OVNlb0ZtVXdteXRybkluQ3pUenc1Y3klMkJSQmNRQk5CMktYejZJbXVXcmZXNkRONGtaR1c3NVVwZ2lmcjBpbjBuYXd2eUxldEE2bHNoYng5WDVGSTF2Uml1JTJGRGdSSXlIeFhhNGZsSGVOS0l6YmQxRUV1MTAlMkY0YSUyRjNOcUdkSHZ1N3lHN2hLTGdtJTJGUnlVcG1mSWw1JTJGZ0RwNiUyRjZ5YTd2NW9lQ2lxeWsxWUdUZmZ6OG1hSzBzekVmNUNZS1lLczZMYzhwazYlMkZkYzZEdEMzVlNGdG9QeDN2T0ZmMmtnMyUyQkY4VjJPcGdnVno5bDZodCUyQnpORHUzUDZZcUlLSmZpQzFFUXJmbHR1dzgyaXhnQnZxTVB5RzVYN09aN3lCa29HVWlFcHFxQ0dpRk9LenU1TEdGaTg3ZXM0JTJCYkxZcVh1WFA0N29nWGZjQ0puUGM5MFZvMGk0aDlEJTJCcXBGUmZjdVphb0lxVjhOTkRHc1lGMm5lQ0JwVyUyRmVvR0h2SDZ1eXpiQ2xHSldNZCUyRlFROVltMVFqVUR4TnpBV0VKMzBXc2pTSGMlMkZiRSUyRlNjJTJGREZDZkRBN1dwOEhlck9pZlM0UGZTcmJlVG1EOFJXJTJGZGt4MGI3TXJncWxrMUVWdmgzT1IyN0FrT3BrckclMkJyQ2VwUHg5ZGxVbjZ0MUglMkZCTnd5ZFhjTjYyRUc4bGJRSHN4b0Y1OTZWMzdCOXZlUUdaMmdwJTJGZ2JWNiUyRmNlWXFNUllhWUZHTVRwaEU2TE5EdkhNcFpvSXFYSiUyRlh5R2FQakFpTFR3RWs5Vms0OUozakozNFZJYlJUd2Jud2tERTJzTk0yYmN3WmtGQzRpJTJGYzdnSEtoSHZiOWs1JTJGelhjbHREVyUyQmtmN1JBOGlTNnFPaWh4TTdvUW5yRTdmMGs2RnZ6MUV4UyUyRnpXYVlEOE80dnNNRyUyRkdSUjg3UzIlMkJpYnpTNXNhbzlGdnU1V2pRa3FDSTRac1VGVVg5Sk9EYTNkTWtIZDB4RmZvaG00eHFIcXowQUFPMm0lMkJvejJJS2wzbmhDcmNQcWNJZVJPaktSJTJGY1FVSDFpS3lTY1ZGQmwweng4QkZVU05GVHMyTENSJTJGVUlZekwxT1pmN3ZjcGRvcXZJNXBGNXhxWHV0Zlg2dDJJNkpRN1VTMUNSbXNoUVRjSnlaNVJCUFU0aWslMkIyc29iWDlRbnduajNyc3JObGhFa1Z6bU5Eenk3VkVLWUZ3QnJyRkNKU0xxcHhXWWU3WHhKd1l3ekxkTk9xOUg1YmNUJTJCb21ubjlIdXlGd2FuaUxuYzZEUHdrdkh0MDlvMXRpWlMza1F5aGRvN0pFeFc2Rml1JTJGd3Jzelg0T2lXVE5DbmZFU3ZVYkJiREw4enVhQXVQYXR1c241ZWgySWlJZmdIVnRzaFNvQjhoYVJpV2xmUGZwSExqVTJHM2F6UkFpMkFsVVUlMkZNT1luazdnV2VjNFRkWWFrRUZ1SkdURm5wU1luVmdCUzFLT2pVWk5ydXI4RDZIY0hCNE5IUDJORUJwWEd2QnNqd0JVQ1Qxc3RpY283Sk4lMkZvVjhNUlQlMkJWaWc0WWlyT2pKNnBabkJCMkhDU0llUjg4eWgxJTJGbnF1VElYMkw2QXlpOE8yU29xZzVlY2RMdzZ4UDRPVWg3UlB1dlBhb3JtTEdjMVdVN05za1FteHhRbTNDczZTJTJCbldKS3ZUeTBsVnQ5cjQ0MTJtMVh3ZHVGRSUyQjNaZGFjQlRacjJkMHJpRzg1N3o3aURZZSUyQlNISVdqVElJRTZEOVY4ZVBXYWN1aVNZeWw3NCUyQjZiUTdCJTJGSXF6d1JIeWNtdnExSkRxbVd6Q1ROYkZOVHdyY1pkZCUyRjExZjVBZVByeSUyQjh2V3VSRiUyRmlhUDZDOWg2Y3c2bW94S08lMkY5bk1oT0Q0Mld1SFlxbVIxM282VmJVd2JjZWJURmtvSFV5Z2dxWGZxQnBsRTVaT3YwMmRqUGZEUkF0UzZFdXMyTDllWVQlMkY4dklHRiUyQmlvWFJ3JTJCRUElMkJSWXhJUSUyQjZDOU9tU1lpanpRTkloM0FsM2xFZmFRQTB5ekIyVUpCMUY3JTJGS0IxcSUyQnBCWE1samVKVGd2b0NpeUpHWVJwR2s4aXpXUUt6ZiUyRjZSa0lUZ1lCTXZjWEhNSTB1Z040Z2h4JTJGY3BPZnFPYUVZUXIlMkZ5WU10Um9kY3cweWxtMnlHd0RnUCUyQnR0JTJCV3Q4TW44bUpCYjdjSG9pUCUyRnFseHdJQ3VxV0VnMmswUTBBMHdGRmtRcVJzaGs4TFljQnRsdXpqRCUyQlUzaURjclpEandzdjRSaDl2bSUyRiUyQjAlMkJNTzN6RFNTYUoxRENDdmtJJTJGYUw1am9DSDJ5MUJJWFlQY1VQMzlmbDRNRDE0JTJCZlJCUkVOSG5leXRvOE5NZENtcmJPMm1kJTJCMUxYdGlvVmRSQXFPVDVlVTBydU5RNDZubHlSVXZXYmlGM0ZlQW9tcW81WEJ3cnFOYk1DVm1oJTJGMzUxaVBWOVlaTiUyRnY3RUl1ZjklMkZLMzUwMXE1SGtGb3gwMXd5bGE0bmFiVWNISDlWa25BYVRUaUxXZUE1ZW1FdDBFUFliTml1QjlDQ2hQeWppaE1LbU4xcUxiaE16RUpJRXEybWNXMzNVJTJCc2tVQiUyQklOU0Ztc3ZTUHFFODRRbjlSVE9mc21WZVJlVjJ5VCUyQnI0ZlNLUDAwOWlGUmJSdnB0byUyQk9zTjZvUml6bHh2cnE2Q25pbXdTbWtKWnRKZEMyR1duNDd0TW1BQXQ3QU9hOENiRmNoNyUyRmdlUTRxY3Iwc3pXRmRLOE9xZGo5QjNLY3czRFN6UkdqJTJCeE0xYXlRVXFxT1VKb2haZnkzJTJGUCUyRiUyRkJsNkN4c2I0VWJ5S0pzdlFBVTklMkJFdUFHZHpHeW9qblJIYzhDRUJzTDZ6Smg2NiUyRjRydms4cHcyaEMxVlYxMFhGRzVSTzE1RVdvYmlmWFQ3UGN3a2h1bTN6RW9EQkFETVM4ZGNqcnRJJTJGdiUyQmElMkI2RDBBMzdaU3NObzZBb1FSVDJmNktjazB4aFZzRm9jdEx2STJEUTV4WVBFM1dBZGNGc2xDUGZ6NlAyczQxb1hESyUyRkFUNjlEVllERGNPc3dwOGdBMGlPanU1eDdnTDM3OWJiTzViR2lXeG0yVnd0JTJCSTdoSGhrbmlYOGI2YnZiTExFME1KT3BlMTZIQ3BVJTJGNWozUiUyQmZibUtwVGZyc2JDREx0T1BNRnczVW1qNlB0UTlnJTJGbGVKaVB2TDdOcGFsVjJyZ0Rnb1JOQ3NMWG1RQXBxJTJGSXVJME1wJTJCRWpyR2J4MldkOVdpTE1LOFJKbEZPUk85QVJVSFNod0xkc0lETXRqOXFud2RlTHFRZmoxTFhIN2gyeFNWb1o5MUxJUnZaN0JIYklWVGtUeVQxVFRJbXBqejdaY3B1VU4lMkJoVGFBdlklMkJHS24wVWhBTXpQM0preVVDczhGNk1QUXMwMzdnbGhaODFXaVQ2OTdPZHlpc1QlMkZLTTlacTlhYU83WW5MN3N3V0NsUVY1Q1NIUGk3aDVFbCUyRmlzeVhDT2dSejJLJTJGbkRET0FhdW1ueklqTXpPNkJZMWllUGIzOXlQdlM0YzlQenNVeUYzdjlJZnJRVUpYczlJTFVXUWFudmh0bVVpdkl5cnJESDI0RW1zUG4lMkI0VW9FY0QzU2dtWlJoOHQ0YXF1UnFNT3J6JTJCOVhjVDVEZWlhalJZQ1UyVnVzT2hXJTJCM3JselV4Tk1KMHVLbyUyRlVKSUxxZVlIU0lkQWxLTTNVc2xTQjFwRUlwQXMlMkZ4anB3RXlyaTJLQzRPWSUyRnpNYzQ5VTdTbVlpJTJCcTVtdWV1QlJxaGYlMkJvQ1VQT2tLT0hQY1ZKT2hVSFFLdkFvcnhCZXhLbTElMkJkalNGVmZaT2lzU2RsbXNvSzNBa1BhaUMzJTJGdVZwNlRkMkF0bGxFOHpoY3pOTGFiNVBTZDZ0RjN4NWs4b3FKRE0lMkJWUHhBOWhnS1pXd3NkdSUyRk80cVFSMzYlMkJmdFBydDd2c2lyMXBjYSUyQnI0bTRSVmZXSW95R2NZdDlkTUlMQVRkNFBoSHRUdkR3NVBYazVOVzROZ1FYcnV6dWtZWVUlMkZ2ciUyRmxjVGE5VW93a0xVYnhjSDM4dGFjR2N5N0VySjhzQjNXeFJISmFOUWpraXBmOEFnUFRENTA4THhsOFJweGZpZjNqNmppVkpsYWJacDdsN29KQkx0TmFhSFZwcnFBS2UlMkZwSTk1JTJGdk5ac1ptVWRaTkpSa1I3aUU4NG05SVNwVXhaelY4cmRIZnRFUUlpTFVpajAlMkJ0bDhyT20zd1AxZUgxcFRSViUyQlZEbE9USWg1Ymx1aSUyQm9sSVVBZkFkU0JHV0dFNG5NZFc5aXdCV2xwUmRDdCUyQjgydjJBTU9UdFJkYTJQdFRYTnJHJTJGJTJCdndIUlcxVk9nZ0lYaSUyQlp6anFVeGpodTclMkJMVnRlMHJ6SHV0JTJGU25DVUZzN0xvN3ZaSXlzWnF0UkZEemlmZ1FzbjZGZGRqdzV0N3FFYWxUSDYlMkIzVVd6ZlpwaVp3JTJGTTBvOUpaU2tYZkJhdGpKVjk5QnRUdUp1VkR1ckV1QjV2WE5pME5FdVNVSTFmQjNjQmpDMms5M1ZoZ2FXSWMlMkY3eE4lMkJuN3AzcHdITVRMayUyRlNySGpUT281Y05TV3U1VHM1bG9HdW5FUnozOUp2dE42ejlBejRNaGs0RTVBOXpDMjhNSFF5QVpYaDFpZDFSNmZpZEV2a1olMkIlMkZCM2pDJTJGYSUyQlBtdEg2ME96Z3F5T3NJblk0dzdzczVEODVPNTJnZldZZklBckp6WHcxU1RXTVpXYXI2bllKSkF0ZkszcWN1eDAlMkJTNXJsRTIzVGRLVFUwTThpJTJGTDhmZ1JmQyUyQnJmbm1lcUJ0MmRmOVZicjBhYllPdnBJVUpyVUxId2Z1VVRKcCUyQjE3OFFObkF3U21FU0JaWFFnVllEM3doZDg4dyUyRld3RloxcllxNVN5aVdIZWltZ1EzcWp5OSUyQkNkZVFrZGhCeVJGamdhZEVnJTJCUEdZN0ZjYiUyQlZqZXZscW42b0tDMDJ4dzcwR3N5RkFhTDk0JTJGVUV5MGhXZEJ1ZCUyRndxdHRKbXM2bEVvcnNsYkc1c1hyM1A3Z2dVRXNRbjVOZGNQZG9OVCUyQjg1dGZIc1JmM0VTZGpVTG1ZNjBhclJESGxmV1V0Rzk5RlVBYklQb1FRejdhZmUxNHR0U3FvSjJvJTJCMlhuTHBrOThYYzNkU3FzQ1Z1ckxMS3pPajY5NSUyRk82c1pZRXFHS255Mnl5a015RVUwWlVMTENJJTJCQWgzV0klMkZPc2d6Z2phUyUyRlh6RmM2RkllRmVOYXBaR3NXMjk2cGcyMmgxd2x3ZW5LJTJCOUZsbTNlSWR0MDNaaHVVS2NnZTQ4emUzUVN5YTIlMkJOcXB6RDJPd0JqNXFkRlZ6RzRNN1Fubkk0bzh1cTFZM2EwSUFqWmpGSk03SGdsY1BCTjB0Q21hemw3JTJCSGp3UiUyQlNKM2FxSFdxVkhpWERURTFrJTJGSDBpQ0t6RFlYYUhXYUJibVVEdm5HOEVCaEQybndHdTd0amFEVFdYMTBicFRlenM2Y2NCb3FVbEw1QSUyQktrY09jSHNtbENaVWp0bnZSTjJVRk1VYUlFZmNnQVpiY3NDUnd0NGpXQ3ZZMUhFSHZ0RGt2WXlZd01jV2xLeWc2ZjU2UHNMdzUlMkZCUkVXUVdsRzklMkIwd3JjczAyeWg2Nkc4Z3ZxRjRtSDhJc2JkckJLRUdmMzNDd3AlMkJZdmJpQjRYOEI0QVBJMlBCUGM3RyUyQjhUM0NaQ3JwdXFGY25tV1N1OSUyRlN0VEdONXBqdU9aaW43JTJCajFTJTJGZXI0ZTVoZEwwWWQlMkZmWDVVVW5RWG42RXY0RU1UeHZuUVZqMzBuRkglMkZmZUdySHFuN2Y0bkx6WXhGb0FWbjdSWCUyQk1rMGJOT0FpJTJGcDdSNmtvVW9LdmpjNTZ0a0toNldyM0F5YmNtZG1ocU12TVJBY0pabWl5b09ZRVQxcFVmRTVGNWtobDFpVFRXWVhXQkZPTWg5RWJNWVIzUmFDM2JDM2xseXlIaTklMkJtbVFFbVBlNG5PeEhVOFZwNFZhNjBvdHZvVHgwVEZNcE1XaUhBZjhpZGN6bWUlMkZhMVI2ZjQ0WXlyMExOV3k1ZGQlMkIlMkJUR3gwMXN0c1J3ayUyRmRqc3VJTmo1OURRTGhvMmhSR3Z3WDVLN2NTM3VLaWVmWkF6a0NMRHY1MXFCcHBwdDdIbHR6JTJGJTJGTUslMkJDZG1XNTNXbFp0MUd2Z2glMkJDUHF1Qmp6Rk4zNldDMlJha1l3Vlg3JTJGV0pyMzNLU2E1JTJCJTJGOFlIUDVhUVAxYzhncUFCTHFSWWlDQWclMkI1MHQxN1hETHolMkYlMkZURXg3M2hRT0U3VnhUZ2JwTU5DTEslMkYlMkJTaXVFRlAlMkJmR002TFFoM0xZQ3clMkZyREJkZnBPcjNCcThwVjZtYjV6UU1hZHpxMndEJTJGZnBURXRaQTdia3d2V1BUZjhEUno2MzZjUiUyQm5UcElveDZucTFRV1U2VXo4VVNlYjJXT0liREllMUFkSFBxMWN2WFJRQ0dYSm1TRzk2cVhIeGclMkZ6Z1FpblYxelJ3N0JQd2lyMDlnbnVnS2RzaG1KS1lmJTJCT0pBTSUyQnVvaEJxaWUyTll6ZGwlMkZMRTloZlZFMSUyRmlOazRKdVBOaSUyQjFkZlBPWGZZQWQ5Q0hCcXRWRGtrTWJENE9rNHpSJTJCVGZ6eWdUaVFSSm5md1JOZXU5NUlJJTJCanlzTlI1eFMlMkJwREkyeFE3VnNKTW9OVm55MWVuVkZKZkRJYnpiMSUyQllGWGN3UFd5ZVglMkJBS2M1SjR5a3BPNnFPYXA4NTZDWXRRViUyRjNMOUd4dzFEanZpMXA0SVpIZFRyRmtmdUMlMkJVZjRTb3BncVBTeVJ1ZnlYTjN0emdaNW83TWxCR1hibU5VRjNiZ1VQc1h1Tkl0NDRLYWRNbXZMbmY5WlNyckYlMkJUaVd6YWl6dk4lMkIzanI2dTJDZHZCUm94OU1BZGFBMEdZOFZUUFRGMmhzYkUlMkJiT3hndWo3aFVWRHhIVlJYaXg1aGklMkI0enRYNW5HWHJVN1Ryd2o2aDZEdURFWG9wWGI5UkZlRDF2cHVYZ2ZOM1BTbnBlUEJ3NWVkUmRuNVBMdnN4YzBHazVLbnlTTkpKNXZNRGFjMUdtbURzT1RlcEt2WnNyYzZlZHROZW5tYnR2YlU0QmRUbUROZ0d2NGNkbHBXMlQ4WVJFejcxQXF5bmx3JTJGQVMlMkZ5QmJiZmIzNXlLTjd3Rk9Mc3RhWjdzJTJCZDlPUERySzhEcXU1MEJFQU9TRUJMcWU5UUklMkJuSUM4MGlRNHRXdWQ2QjFJWSUyQjRhTFBOc0ZLJTJGMEl6eHRpJTJGMllYaGY1cG9qTWllbWslMkJ1QUJNcVdGSktXNkxyOVk4aGoyY1d6dmV4ZHA3NVh5bSUyQmdFOGhyJTJGYlBPMlk5NEtjSTE0bDZndFRhUXpEMXFuNVVPZ0hYOVBNOSUyQlVxdHAxdWZKYVNBWVdycDB1c2pLajZsOEVTRjMwbDlhcTFhaDh1JTJGJTJCNEI2Rk5FUTJ3NE5uWDhMRk94bnhHOXUlMkYwb1lkTWhLMmNYc21yJTJCMm9vUnlvcmoyYkszNGM4NnJxWDAlMkZvQjcyRGNvVjBTQjY5dkJJZnFQM1AwbEJoOUtQdSUyRlQ0Q2RLWnV3OXJOSGV0d2dValhZdEdvdDUwOHJjdVJ2R0FxY3VqT3VkaFplUDhGNU1HbzVhOU9WWXhZN08xdnhzZEZyMGpXMGpCbTBnd2xjcXlwdG5vQiUyRkdsVmtPY0pCNEtaNktrNiUyRnJQVEIlMkJta21wOGJ2TlcwY2Z1UzRxOHV0V1dFbXJ6bFBmYWgyZXQlMkJVd1U1WkFZM2QwdiUyRlRYZ1hTSzF5JTJCWlFtTjVoTXpNYnlpM2NpeGdOQiUyQndFaSUyRnNwZ0NWR2hHNkpCdHE3MjUzR3RYR0JEZ05lUzljcW9UJTJCZkZJbGdROWYyJTJGaFAlMkZ3STZWb1olMkZIJTJCT0hSd1hiODRaU3AlMkJnbzRsYlVnaU9lckpYc0NQemt6OHY5RU9STDk2WEluSDl0SjhPcXZvN0lNUFJVZ1J5MWpTUkZVcEclMkJyV05CJTJCdG9nS0lSbiUyRmVGc3lFb3VVU1U0R2NUaGJwT2IxVXpKTFVDJTJCemZQbHZ0bWdPVWJqR1pPZmVuRW1FRVFEUnVhRVJmJTJCck9ldUolMkJtJTJGZGd4Q2VFdlUzNHo5aFo0dGNSbHUzMUtBaVJ1b3dZRk1HJTJCRWpldmx5QzRYQ3pyTEdSWmRIJTJCSWtnOHR2Z2Y0bk10RzVwTmpqOSUyRmtXemtmczhrN3RZY3ZhUjBUUEtvamw2a2lxQW55TzZZT1dBMFlOd2N6R3h1bUVEalFLa2hqT3ZwaWl1WXdjMWNSMk1rUnZuSTJ2ZTl5cEFVUFJKUG9VM1ZYMzhzNG16enFNSmdhN2hnUnNxWm5WJTJGc2RDRno4TzdoakNhMU1JUzRUcTg2eDgyWFpRZmtrJTJGMHRzY1BuYjg4JTJGcW5MUjVxMEduZVdBVnVhWjZmUkoxR3hkZXk5d0I3dmtLSGFXZUJRQ291dHA0JTJGOG1lY1hOUTJlVlFJelhQVmlQaGxhZkZDTXJ4aE9la2olMkYlMkZSTUlidXElMkJOanI2cnhSNyUyQjZWbWN5NGNnaU4lMkZMbFk0TVE5Tm43WHRNc2gwTUpQcXFZYXhqcDNWTzRSRXFxdTNYanVYUlJ5anI1R3VkNGl3d1pEenpQMnA5Nk9VajYzNzhsNTZKV0c5MWhIamQ4blpQJTJGJTJGcElEZzZ0bmhrVUl5d044OE1CWHhDM3JCa2xQOVJJcUM2THpqbHpnS1FqN2NuR3E4UVY2czhNQmVGSEdjcmYlMkIzektWWFc2ekxhcHJ3WWpmZHlFSUxGNTdVTWFlJTJCTzZqMzNrbjB2JTJGOUpmWUY4JTJCMFQ3S0J3VHhhNlV4ZldaR3d4WlNCRUJvSkxzNFZmbnp1SU9LRnVacXN1S3JWNmwyTjdDaUs5Y1RqZ0REUmh1THdOWXRLbmglMkZ2WmFkNlN1bklVUmlTb1JLZkxxU0FmM0U0Tm93Q3RmNm1nTzlmJTJCUkVrVjZsYmp6UUpaWFRrMkJlTW9TZzExbUpLZk1rNVRkdUhSTmN6b0hTNjhkZUxLN0RvZ1JUd3p3ZVZka2x3cEdzaiUyQnhHb2doU083cjAzMmJqZ25WbFZnb0RZdUlrViUyQlFDZHVMcW5aYjhDc2tIS1J6ciUyQlRjVEtNZTFFRzQyaWZKekQ5M0VSeWs3azZVU0R5VWU4UmxseHIwTzk5RWJWbiUyRmM3cnFrdENaREZGJTJGeGhjMWpERVZYJTJGamR6anJ3NEtQelVvUlp0RkFBSDNpSmMwcmUlMkZvY0RKOW5ubERYZlZzQ0UlMkZMd3FaR1lSQzJGSTFQNkJ5eGhXbjFmNEkyVlJWSUNicSUyQnFPSW12d2N2aUs1Z21TMG5RaGdMVjBtb05SNGw3bXNuWWdhYmxkYU5aVzJZVGdJbkU1aWNISHpleTIzZlZycEclMkJmeXY4VDhpcHJDUmZtOWc2SkE3NkwlMkZwdGx3RUVaczg1OXJ1JTJGVTBBVWljMHZJYVZ1eiUyQnBmMXhQazlIc1ZDNjk1anRFWkp2bXNoMFdJcDg0MnRmazFqRzg5VSUyRjFva0NHcG9mVU9Uc2FtYkF2b2RlUEFib1V5aUlvdW1sQTY2dlNDbk1vR1NhbUs3SmFIRGJ5NmRkekpSampPY1g4T250aTJheEFvR3hPRkNNOFNJNks5bWhCM2QlMkZ2R1JOVCUyRjJHcnV4cExQU1ZxYUVnY2tzbmdYa21uSENvNVhZTlA4NzBLblU4d2NPUW80SmJnbjlXRG1uQWtxbVU3b1dONFg1VGRYdSUyQmpnZThzaE1JMlZZamw3eG1WWVZzbGZsVjdPNmRpb09JTUt6TDdUM3lZM1ZtcGlMTGZscmlkMldkRnh2MnRmcHJ1dmRUTnBmMmIySnFyS2h0eVpoJTJGR3BORnolMkZ1VERLM0pWJTJCYkw3Um1rczI0bGZObERoQ1lvUUxEamtWRkNrQzE5aktyZHd5Sms3aiUyRmExWkp6d3pMTkJZbzZGMXpGaDRhSjBuV0dOSUZ0eEx5NFpXQ0JYOE5kZ2ROeUhwalRWZ0h3Z0FtOGFYOFUlMkIzUk9vQ0VZM1V2MERtZXB2ZUo5T01XS25pblltRkRWN3ozRFN3czlwNlRCT1RCRW1ob1hLWkhjN0tYYzI2TyUyRlhBc1NMR3g3VmE2YjlGUjBNJTJGY2plWXlWaTBMVnhJZ0xoeHlXVHU1OG8xRWVtJTJCSHM0VGp5TFFoTkpsYzFoY2poSWMlMkJtMGdrNGN4ZUM2aWZWNDJUR1Y0JTJGVVFaUTdPQ2lmUjh1UCUyQjJwZ2IlMkZIcVhmJTJGY0lmaWV3c2hnWmhObHRJVk1jUGd4MlZ0OFZmJTJGMkFGTHJWYzdjZkVRakdDeHFQc20wbU1WQmJTVWNSV3doZXRkOGFaMWo1TUVGNU9KeFY3UTNtMkgxWTE2JTJCeDVpdEhTcllrTnhXWFlRWE5OS3FyS3ZtTCUyRm41VDZDczg1QkZCRlE0Tmo4MlNDN253Zk41TEVHQ1hlS21uVHpHZ0U5Y1ElMkZLenhjeW5xb0wlMkZ0dEVsNDlVZlA4akZydkJCZXp5JTJGQjRHbE4weWhHVUlkelZOcnhFbHJzdGNWTHpRT0VxUVdOcFlBNmd1Wk1TNk5YdnNjVVNlMyUyQmljSkNmWTFYYThiZlFpcTZmR0VnUXElMkJKSEpDRUZmcDRDTzk1NWhYRndVS0FZRWJ6MjYydjV6TEtCZlZmbDJKa3JJWGpsN1pVUkNtZE1USGV2OHdxOGRBSVRqejdxTHRZVHRUQkxzZkwwMktzY2QwUkRKNnFiZjdBUzJxYWthJTJCMEdBWkpGRGFYSCUyRktqWkwxUHAySiUyQjE4ZHJUNDVRZEt6NlZjVEJTcGRUUXYxcW51ckp0R2lVTXU1bUUlMkJ0MFVOMmkxYVc2JTJGRUxKRXhiUUlRWndNb0l4OGVackNlSDN5bjJDTG1qciUyQldySHM3cDZyc2UxJTJGWWlCYnFxbEVwVEE2YVdqOUhoVFglMkIwZlN0Q1ZoZENRRlF6NFFOa2t1JTJCU3dSSk9jQ05pNmdRSzNoVDI3OUtCRWxDcFdCUnZUJTJCJTJCJTJCOHFVZTZncFRlREtORnNxeGIlMkZkeGFqSkZzUnVFOExoU3NrVHlHJTJGSiUyRldBNGIlMkJpYzhldiUyQlZGTDF2cVNsY1hlSm9PNjhmaDZMQmFuaiUyQkYlMkZaTmolMkJKUk52cTdhaktHTGhYN0pDYiUyQiUyQkZvbFI1UExqQSUyRlIweHBkMFE1cW1MRTh1bmpGUVJPR2VycWtUbW8wQTJyclNnMFoxT2RoeSUyQjJYeDJ6UnBoY3AlMkZEZjE3JTJGU1plUFVOQmp2alZYNnhNZDRKJTJCNDFHeG9jZzNKQ3dPMjV5MUJGaUVxOUExaHVuTiUyRnRlJTJCakdubDBnUU85M0tZUW52eUZTRnZjNjZHbWVHcldBanBNYVdHb0JCR21UTnBvMUtsaXZsV3Zyc0U3ZnM2WWdzMVdob2dnYURrSFpqM0p3bW5oJTJGeUxKajJMT2RQYXY1NDY2cjhzS01xZElKY2xkTm9wRFN1WWNJYlY1OGIwUXRIazQxR0xtTFRLMyUyQmdzQWdMbXFVZzN1QU0lMkJGMiUyQlQ2VTUzdXpSV0NmdmR2WGgzb09yZnFoQ2w3VU10NGNZMFk4UjdlOVNJM3UwWE5KNXowNk1jSTlJWSUyRnNIamJVcUM5aUEzdVZxUGhZYU85eXY4SFQ5VGlqTzI4RktnS3dWeUdvWWklMkZraDRHN0V0ZWwxSFhtMHpxcldlakJxVjluMWZKWHRlYzcxMiUyRk1jWXRIUDIlMkZ6VVpDSWhSJTJGczEzSW1rUGJYanZCVDF0STVFTHczWWF2cGpxZHpuZjdiR3A0MFVvTjNwa2I4ejVhbXdGJTJCSXFvVE1kUHZEJTJGZnNpN0g4bFBEeXJyOVhsVHhmaDViTk05UElEZ200MSUyRiUyRmpHRmQxNlBrWk5yMzZmUEZEWHo0RkdVd1NjeVlxY2dsYWlGSHBrMnIzSVlwdlNkZnVQZVNxSVJuMU1oZGxldGNjT2c4QmVhRnhFR09hY1RTcmYxTSUyRmpUa0JTR0ptUUFqNk5uQkpmVWQydldPZHE0c2EyU0UlMkJtV1FBTWVqS2dnVVBIY2tXWnA5QWticyUyRjZsb1RFY3UxVHhQZmZaeUpQTk81bFF6dEYxemhncnZXdCUyQkxJTzh3c3F1c2pyYWl2MlF3VUg1ajhCaFZxJTJCSmxlM21US3ozNSUyQlBpV0w5OXA4cHhROWNqNnZUcFhBVW5JdHlBSFhoN1JLWUpwMGZwTktkdXdTaHU4SUI2JTJCOHclMkYlMkJoOGslMkJUV1hjNTNJTWNoVlVEYjIyYmk5NG4lMkIlMkZyVGFjelNzWWdRR1BORE04cm1tSXpKMzRJUFNUVHFYc2l0Z3czOHpjMTJBT2RGZlI3eUI4ejI2MGpkbmVBM0ZDNlhZdmJtS2RvaHZiR1hoRVdtSHdPd3Q3bndaSW10a090WTRLeWQ2anhRTVBZaVJzRGpKZVN4RjglMkZENnlCVnM4UTBrY1lpamZXck5pSzIwUGJBWHB0RENQRlVPdkhaMTdZN2Rwa2duY0pWeUw5bkViTHB0Zkxkb21zZmpBUjJiJTJGRzhPSUc0c3pHZzIlMkZVT2laSExtZlNicWNPU0xpa3Z0TXZaT1VwRzJZSDZjYTJscjVLUXRObENYdVFIY3lsT3E5WFIzTXFnZmVackpxaVolMkZ5Rm9ZVTM5TyUyQjFPS0kwV2RSaCUyRlRzTVVkSnhsZ2RWdGpuJTJCNmQ5RjNoV1diTW9yQmVNelRBdFBISnRBZ3JCYzhZUGJDVjBMajZzVG9JZ05MTmo3cGg3MHQ5a1NJS3B5V0VXMTdRT29WJTJGVHVvWnRVS3JmQ25xTmNZb0NSN1NmSFZjVHFYdFd5Q0JCdm1kMU4xdENhdnJ4aXEzMWQ5UUhwcE9ENHU2b2hQU2ZWZ2tvQmclMkJ5N0EyMWhkSHA0ODVNTDhZS2pyJTJCUHpvQ1ltcDdDcSUyQnFUbm11TGhnWHUxb2t3dVZKekU0Tm9ONyUyRklDaVIwemxwN0JEV3pQOElTWXVKeklrMm5tdXZ1TWVNeGYydUQ3QkNtaktKQ3p3MUxBbjhpakFXM0FuWGRvJTJCVXJEdVBpOEdBTGpDZUd2VzB0dEd6N3pJVVRsVEpjc0JuUGdVVG5WbUE0M2trUmVOWmwxdzFVUUglMkZOeTJCU0R1VHh1ZHB1MW8zd2NVbXVNYnpQJTJCdkthbGpxRHNaQjAwYzh4ZWtvZmYwYmdPenZkeWw3NlVVTWhjcDJvWTJ4RVRvcnNUVmZadXl6Tk9JamFlejNjdHNrRVFzWno3VVU4YXU3VGZwTyUyRnRWY1hZdjF6VmZVcTJkeE5zZ2E0am92aEpvcmZFNHdWNGJjY1pldEVKRTJRbzl5TFFQTUMxa0tUMEJFRXlFZnNVUVg4TU4zMTBXVGhuSGZjYmhyTWIxZEMlMkJvJTJCNkZMMzVTcFBTJTJCZ2RObFdoN1c1ZGRkdE56bHhhbURjdHpxYUVyJTJCQ3glMkJJajM5Umh2MXdWWHAlMkY2dlZYUmNOV2p1T01OR3J6cyUyQnhiTzF4cnRtN2hlTWJodlAyT2QlMkJHQkRvNG1tT1hNM3RwNXUyQ0ZBZ1FqQk9rRFljWlJ2TzJ2VlhTaHRXMkJrbDdpOUxGeTJxQmFqWiUyQlQzeVExa2tKWUV5YkFEUkNOV25nRzhwdmxyN3g0dk51JTJCUDM4ekpaUFNqdFglMkZqclRuU25KanRxSll0alJvaUdOa3UwMlJEMyUyQmtsUSUyQldvY0YwNW1RRVJzRWZkSWNZMW1XZXhYRzAlMkZmNjR1SXZ0eWxLNFU0MnBEbFZxMDhIWmU0TkVpc09GTDdFSDhldmdIakFkUlUwaGtWOWZZMTBKTFJwdU5GcDhYbGdpaE9TJTJGMGlqdkxET3lpMWklMkJpQTk2cWk5U1p4VkIxJTJCaWdPVG0yQ1ZjTWY0JTJCWGFuR0l1dHdueSUyRlU0WG1FQkJvWmQ3MFNEayUyQjAyMklTY1M1WXdzMDg2SkVzRVg2UXhVZjlxQmVVWERMcUp5YUhvb0xpbUV0eE5CcEkwZlIwamJrYWlIMzZmUE5iYVlPcndpWTJMbGpYZ05WQUd2SDlobloxSWl5QmdrVTFidTE5TWJxJTJGdDBxVENQS2FMUUNkemNGYTVWRzNwbVJJbXBjQk9YTjNnUG5hSyUyRjM0ZXZGR1F4Y1k4UlJJN1ZlZFllemU5UE0yT3dsYW1BRjBvSEhXYndoRmJsd0dObHclMkY4TnpXNkR0eDY5dUF0YnVNYk1IU0RNa2ZaNWxZS2NaWk40WkhsTnQzS3lLWGtGaW5YRThseGEzWk5HZjF5RUhkWlczOHd5VHElMkY1Q3VHbUVNY1pObVN1NTUlMkJYJTJCYms4T24xczNJTElSTGY3JTJGNUVDdzM0VVduYnJ1SHclMkJFWjQ2dmN3T3FjajBFV1N5RU9DZVdGRnUlMkZURjdnTG0zWDZkaGNGUjlycVZlOUxxWXolMkZUanAxVTlYeHpJQ1VtVGcwd3IxMnZvTyUyRm5pJTJGa0o0VWFjcldSQmpCanNRR2Nidk5jbldlRmNuaW5XVDE5cDBKTFJtMk80OFZLSVR2RTBSaFRvMktFWjFmeklMNktTdVBxSCUyRk90dUFlbXJmM3Vwdm5WZVduNFNRZ2czRGF0MThUbzk0WDklMkJFeVNuV3E4M3A1ZmFPMGo1UVZORmJuWEN5RHpvYzF4aHNsQnJReXQxclNBejhPUFJhM3ZNRjhOWnJJa2ZwTWs3WTMxZjBJTSUyQkhRUzY4ZGZMclpMM2h3UXd0Sm11dzlzcDVzc2lhZFJCNXliNzRBTGYlMkZpZVVSV0hVbiUyRmlOMmIlMkJRbVNhRVdjRkdWaHNxNXZobU1WQnFZSEMlMkJxS1NoU1dkTTBnb0l6bUFaNkRqT3FkTFFvYzl1V1Y2bU5abSUyQjE1RU02U1NrYk9ENDBsTTlwTDYlMkJ1aXppMjlobzl2Y3E5ZCUyQlE5M3duRU1lZTNWSiUyQmw1UUZaMERuM2V6TFJ6cXdFZyUyRlk1RFRwZFRoNjBlNVQzeTdRRXRvSlFkMU1DR2VybmFXMkg2aG54aXpiQiUyQkhvems1VTRHciUyRmNJc2glMkZ3eXhSeDdQTlRGdDV0OUtmQU1MUEc3NDdsRjhHV1ZVTEFqcExFS0w1VDAwdUZRdXJrZmxxbnVERTE4V2VTNEd2MmZmVUNMTGRVZk5HZU84bVpabDBncjV4TG9kZm4lMkYlMkJYaVdNMW9IMm92SXk2VXBMbHJxc0IwazFZMWVvQldwV2lmUWdmZE5NaVkwRDZBYnFzTE4lMkIyRUlmT1ZDb09IS3dQb2pJakNJZEN0YkFMWWV3T0pQREM4Y3BvRDgwdlglMkYxZmRtV3BsNjV2c3lPMU9JOUxKckNsNE5HZW1xUndCMGdsaWxORjl1TUZTaGxuU0lkVjNmZE5WJTJCZDdGazUyclA0cHAyUmZWaUI2MTZHVnFOQXc2aEUzZVoyUVRZOW9HRlpNVktCdkZpRWVxaDBKb1UlMkY2YlFTbXZKQUVLUkN4T01nRjl3N0s2JTJGOTJFSWZwRWl4NzZTaXA4M25FajJveTk4WWlqQXRIYlNzclJuYldEd3R1c0xvNmVvcmRuNG1oN2syb2YzenVtbFN2bDlkc2MzZllmdm5VcnNYRzFROFVWeUJlRGxtZ2p3b2g4QSUyRkc1d2RZa3VhYTZHRGc5dHBOVWlGZDhJMlBMTVdLYVc0ajBlMlBLVkcxZkFJdEN1Q1BkNVllM0xtOE1JdFlYaGp5JTJGd29sOFRXbmNwb2w0ZnkxVFEyWGI2Q0tWc1h3Y000MnRUMTV2RjBsZDJuOGtHRnBDNmlaNnBqNmNtZ2lHeCUyRmpwNWxzSGV3JTJGQkNZYzB5U3ZwUkh6cHo5em0zJTJGc2ZmMUxaQUI0eE5uJTJGNmElMkZEOXpEYk9OczBodlFUYjZmbU9oOWh4anZJSzFYJTJGeTFkeVZpNzRhRFhoZFpLZE54RmtjMSUyQlhpYjNMUzlkamdzVzBJdU1scWVxUkNDWDlZMmJtTVJZUDFsanZmOHVxQ0J2M0ZVNk1zM2lPYiUyRlhpTSUyQnBnS1Y3R3ByTWVRJTJCcHR2bEM5TWd5Tnc2TzltVVZpQmJzOEdFazhhVTNpMTZmUWhUU3BiZlljbzklMkJyZlM2azFpSkRWNnlyWHRRb3dyJTJGZWJjZTlmTkFoYUJ1dndUS0RWQWZXVzBQdzlXZHQ4aDViVnY0dzd6T2d0OWEwM0ZCcXE2MWNiejh0NVZWdUFpSXMxUmtQdHBvRUxSUU1PbkJFVWhrY0RYams3eW9qZSUyRklURTVDaGl2dXp1SkZ0WkZTd0xnWWpUZWhBeiUyRmI4VXViTUluVjY5YUFxbVZOMHpMTktPJTJGbDl6ODU4SEJCQUI4SzVpTWxzZnlYN0JkRiUyQkp5a2JOd2RaWnNhWXF6WFFManRTeXJXcExhVHZha2N2eWRGdXhJTDBoJTJGNjdidTVkaHNCM0syYnJmS3M4JTJCSFpEOUtyS2U5VmRhQXdDdDRkN2lLMTlqMFVxdmVuRXJYR2VLODY1JTJGRGU3JTJCbHppd2Q5NnppM3NETmd0Wmk3SDFYS2hpV0lLM2IzemVvaWtDc3lDa2FJJTJCZlJGd1lWSUdFWUlCSm03TVBPT3plSk9KUzJmZ21sZUY1JTJCZnZPY3hPa3NIRlpJR0dNViUyQmtqcHM1cFFLNTFNdiUyQjlmdnFNJTJCeExldzNSUyUyRjBaY1Q2NEh6TE8lMkYyMCUyQjZhaUlmcmlCJTJGcW5jZEZrUVRWRm1TOTZqeFcyWURtWnllWWs5UkJmeUpBJTJCckRnemZxNlA4WVRZRjd0R20lMkZUOEtuSnduMUIlMkZLS25pdHVBU05WNU8xSmZORm5ka1lZRyUyRlQlMkJ5T0RKaGNnODlsTnRLTDc3TnRJeFZaUU55cyUyRjBDTFJMTyUyRmJoM1AlMkJ3ZmFiaGlhWkg0VjhzMUFrTUFmRWo2Y291emVXT3laZkpFWFRLdW0yZmZFSW44WnNNVTRwcHMlMkZXUlhlYWxWNzhQNWdlSG9uWEM5aGd5SnJzWldlJTJGdFIzbWRIaWtBQ1NHVEs3aHRyZmkzYml5YXY5VHE3QmRFRTd5U1VTdDRNaUlLVzJMZmpZak44JTJGSEVnT0REM2ppTVQ5a3NqWGU1T2JVdDhoWWYyOXo5RFBYM3NYdzZDJTJGbGluSW05NCUyRkVhM3p3OW00VDhTY3ZFQ3pnWkhNR3lMbldQWmhXY2hTbWdMRHc4eW4yRWdzZmglMkJDbXMlMkZuVDlxNHBHM0N5OGJhVEpid2dTMzRPWCUyRmxWRUFoNXpjTGFDRmFZV3YlMkJmRkVxMGNCZ3czakkzaWgwTlNDWGt3OTJqRExyTUIzamdXJTJGRXdBM0pEbko1eUxIcTNFb1FwVVltdFNiOXRBSE5JZjlwVVNEMjBVT1klMkY5Y040ZyUyQk4wdWtkVDlXdG5YeWpGZiUyQkEyR2JpRVJSaHglMkY3VlNPZUdoYUJlOXElMkZZWVdORU0zWFVLYkNnd1k2azVVTk4lMkJiTlFFWHpWUXdjbiUyQkV0NmFCcUVPY3lOQ1FDS1BiMWNwJTJCcm9VWUlEYUMxTEJVbkFhMGZQNzF0MGtaVDE1RVB2dzBubG5ZUFE1MGljTmtPOCUyQlJyJTJGSXh1NFhNMjQwTzc1QUhvZ3hndEpmNWIzUmxJcyUyQno3OWxsR0FLMnZLSiUyQmtLMW5aTTFxM2RhYjNiTnpMNGdVV1RWd1VGY2UyQUJObE8zbTFqMzF0SVI2ZmlxaUg2a3pBMzZNU1V0bnZoNEFmR095eUdEVUhkVndRNmR3Q1RweFJvJTJCWG5rOFd5NFpiYnc3S3VJZm9ZTVRDdHRSWjBZOHVzWm1meDlTYzh5M1VSdEN1VVFHQ1dpRDdIUW5PN3dES0hlMyUyRjcxQXV6YkUlMkZPTVA4cE5abElqYmR1dlRwM2p2VkRnNkQlMkZjaW4zSVMyenRxOWU2SnBCbjgwQyUyRm5HTmYxWWdnMFNpJTJGZ0t3elR6VlhGakNnaTQ4VFQ4OHhGblRQSmlwMW40UzNXNWclMkJKWW5TMXpNSjAwMngyWlVIR1AwSmphNnV6c3FzNUVpMyUyQnNjZ2ttNyUyQmFvU05aUlBJZVBpN0k5YUkxTXFDUWVOSGFhZHVMcG9RJTJCMGJrNUF4cVJWN1hDODd5JTJCVG51bGYzaHdsQ3pTdGExJTJGcDhNUGtFUUtEWWcyVjRLdmlNNFZaR3Nlang5QVVrWWVISGolMkZmb1lUZ2xLWkx4ZkJCZVROUnhkaFlDJTJGSTclMkZFUUZBaFZFTlFYSW5HelZrdExFU1doUUdISHh6Ymh0WnlKTmt1aSUyQkE5bkZDZ1U1RHpFbjZCclhvZiUyRmpSWDZpZTJzbE4lMkJhaXpRWDV1aHdqMHdQQloweU9RQnF3bXRYaURpTmt6STN3NkF4N2ZyaWw5Q2JHUyUyQmZTZlhuVTFZMjN4JTJCN0VZbkNjT2l2UUhsJTJCdGlpJTJGJTJCUTQlMkJTODQ3NCUyQkVOa1ZsTUlVVUJ5cEczaUl3QTBscnpCJTJCVDYydkhQcmpnUDclMkYydkUzdWFkUVNyYWdQZUVLNXlEd3M5aEtleG1GJTJGQTcyMSUyRld3WXU0dVBJbXVmcHp1bW53VThaSlQlMkJ6UDE2dlQ0eDZESjBvRmxvY2JBZXp5elVJcVlkMElReWszTUxEVkx5b3NnY0Jvdm5tY29pS05CcUtkUyUyRkxJNCUyQjJmM3o3eEtpdjJZdXlEOU9WODk1OW9ZWGV3ZFlIWEFSc20lMkJTUSUyRm9BV2l0Q0g5Uml0ZVA3b2Q2S0o2UiUyQmh4OENMQVZHOFU5NUJyYVVVbVdORGNlVXZVR0hqZHpTYUJ6dkh0SWlyaSUyRlBsdmlqVCUyQlJxdGZ5d3lmUExiTVVETGk1RUFWVnlCdGNrQ3YlMkJJVTlRckhlY3pFY0ZLcXlIbTRhWERmMHB6dFFJbiUyRlJKeG93SGFPa3RYRkZlME1XJTJCeE9Hb05lbTJtaiUyQnJWbTBOOXc0M05heTNCNDhnQm1XWExaZVFMRk5hZWJIbUQlMkZ3Qm5ZVlIlMkJ6aVNLTjFha3pwaFFQcjZ2MHBzY2tRNlkzbjRCaHJ1bDAzSm02MG4lMkZlYnBLWVVNRVdoazU5RlhpTlpGTlAzZWIxclRETXVGR0xQY3FxZU04RUJNaXh4NDJwdTZqRmVwdTJTQyUyRkZkUDVWQkhlOWZrJTJGU1cxZVplS1ZoYUQyYjNQWUU1aGt2eFNpQ0JCckZWbFI3JTJGTEJqd2hNSjk3YlQ1eU1ib1pVSXRlc1BwNjhzUHYlMkJadm1tWE1pZzc0cmNjM2tSNXI0d3pESVQlMkZQV2pzOGJ1a0JyS1RTdXR1TDhhanY0a1RNOFFRSk9mUmlYbnAlMkZmMTRhajRiT1FMSlYyMVAyMGU0QzRLMEo5a0FZQ1preDhCS0J4ckxid0RsZUZtam1tdzNHb09HdUdiY0JJU1lyWnJURUliY2dhVnRWUWd4N05acnZsbkwzOWNYUEVIWm5Rd2VuaWMwUWglMkI0MElqWEtiJTJCNDhsMFMlMkJoJTJGYTh5ZkR1TVdodHZ3aUIwcFFZQmVWQjRwQ2c4JTJGZ3JDUEhQcm94ZXYzU0RNYW1pejdmajVJZzdNQkIzJTJGYjVXWGphTEpYUSUyQk9seUhvdm5QT2FROXZld3QwMW5UWiUyRiUyQktqNGNkNDRmenJyNFhVMjNzOVRPNjVxTXJ6VldwV1ViQUs4MFByZklZQVc1VFRZNzIwU2pITFVpUDM1eEJLSkRWQm0wVm9yYkFnVTN1UHRCZFZZZCUyQmNsTnNKcm5XU3ZyMThQMzVSN1RyenRPNzdBQ1owTG5oaEpTRXFXMlBtVEVMS0lRb0R4Nll5bU5lS0p5VmNHaVZFWDVHWXYxYm1yNEpDRW1mckZmbVRiRlJkeEU3WmxvVHNRYzJpaWhxMHI5UWdsdzNJaHhqckx3OUtOSCUyQmJJJTJGM2JxbXQ2dlhFeTZmYkc4UnZqMFN1SDU2b1h0MzJZTCUyRklxT2xjRGh5Q3hMbFRkOHpkQyUyRiUyQndtY0QzTTMyJTJGZ3hNY3g3dzJqUjJUeklLdlI3YWJVTHFxRjVKJTJGZkJFQld2YyUyRmJIajNaNDJsYWM3RXBTdzByMEslMkZ4SnBwd25YcndkT25wVW85ckZnZlh5QXRnRG9Wc1g4cTlrVUgyJTJGVzRmUGRrVGp5VmFXbjc1eFI2MmU0NDVGTXNGS0pMd2dRalB0TjRXbEN4JTJGN0xPOHo4eGNYSjVjWmRlN3hRJTJGMzk1TTl2SUIlMkIlMkZ6QkRRRWJhMU9OQWk2dVVBOVRBalhZZiUyQnlxYlVkUmRwMkNGQmlmVTZWUDJ4bmNwUWlQcERvZEZuODY3JTJGUmlTQlA2V3dUNEFJbkNMeXhLQyUyQk1HbWdlJTJCemJLdGxQODE3MkV2bmolMkJhTSUyQlJ0JTJCbWExJTJGWEhGdG5ZZmFTZjM1MXpZUGpRRFRYNk5nQkNLamRzYXU1SkZLakdWWWVGJTJCSGZpT3RLeUNrUmNudzlMRnRhVnVxR293ZVNpMTlzRyUyQiUyQktFT3kwb0dKVnpLUVhPSzZpaUJmT21sMnpDUXVHJTJGS0ZJRFF1OFFQWWt2QXMlMkYzQzJZZ3AlMkJHZjIydWo5SyUyRlZxN3lQWHp4dUIlMkZLdVlTb0VzZ0clMkY0bWlSZSUyQlhvcXJ3YUhqQ3ZwTU5JOUhXakZ4TyUyRnAxWFRXN0lnT1NmejNWVm5TbjVzJTJCVjJIcFZtNWwlMkZ2VTVSOWJHaDBabEVwbmRPcFNQZGxrdklXMDNMZGNrazQ0RFklMkIwc3U2djMlMkJQWjJ5JTJGNDhvTVNxcHM5ZWo2cHJrd0VlSjlIeEdWd0ttd0tvMzkycVJFWTFPc05zYkFybHJRVVZlczdkaDdCTFh4OGJZYWd1dTA0cFlOWCUyQlolMkIlMkJBZnpCVmRUdFlhcklRJTJGWmFGOVclMkY3RlZjJTJGR1h1TjhFaGM0RDNmRktWZW1OejJCVkRKbzVZJTJCM1JhOFB4R2JMNGhzUHd3UXlBcEtJbmlRam1zNEVFNW1kRDNzaTMyckk0OTdMMVQlMkJKbjBBT1VPM1BlVDZWMDRjNXElMkJjdiUyQmlGYjVvM0ppJTJGM1olMkZpY1hnUUxJUjNhRDQ2bklFTzRFMlRHMGM4bGNmV1E4UzZjNklMbzBySTROcU12JTJGVFd6TERLc0x5NDlKNiUyQmFLJTJGc2M1ZldrMUtWdSUyRlRsNWZLTmlZaWZsJTJCVkw3bUN1YWhKSSUyRkNhQ3NHYmNib2tmeWY5bU1sNEN0RHJ3bTBDQWlMemIlMkZEYUoweGh0TDJKNEh6UHpaJTJCQXlQaWlOUjFUZXZYbndDSkpjTzc1bDlNSDRXM3JkTThEOUxKUDdFOXJaSEZuVzV6dUI4RFVGMm1QTWdEbGFjVU9yaXBEMVdoJTJCM2ZXUzVkNjlJRWk4c09QSmtIJTJCMlFCWVRRRk0ycHRaN2ZOenExNmNGT29sUmpQUU9xS3k0NGxNWXhBMGxsOUNWUXpWMDBYY01NU3VkJTJCRyUyQlJXdkVtZUFwY2pYNllISkdqUnYyJTJGZlFjcG5aZmh4ZDVqdGxTazR3cFBIajhpcDRCSnBMb1pScCUyQnMxQmlzM1hsWkY2Tm9INHM0TXFtdE1ua05zV0xkJTJGNm1oJTJGMjJYclVDJTJCTUlqelNwZ05ONlJDeEg4TmRWJTJCWE5oODd3ZTlvSFEzeXVLZSUyQnJuJTJCWmxUMmptJTJGU05rZ2h1YVpETW8xTkpaRWw5U2YlMkZtOXN0VFhTODc5dldQMVQwc1lvYkM4WWR1SEZadVZzOWlHRmolMkJVOHVHUnRwcGFsa1hxWXkzZVZwTHR2c1NMMUJGd1A3Vmk0dkY0JTJCeVJudElhdEF5ODlrViUyQmFGTE5WbGw1NDNkVEUwd2Y0c1huYno4ZEtXakk1JTJGUURaQ3BXUVZSRWw3MWZNMk5EUHY2WTJYWkN1ejd0cFlrbm92N1NPZkF3UEliQmo0JTJGeXVrVjMwM0I5SEViQXh1NXhPb1ZjN2VZOEo4SVgzNUFLRTBCbGFIRzN3YzV0THliRm9xVVZSVTl6SXpLUEZvU1hlOUd0N3pFeWN5NE04TnkzWFBkemYlMkZuSGV1bU40VWJ5N005VCUyRjVHdUlEV2NCZjBiUWhhVnl3ZVZyU1BQOENabXdhN0dYZSUyRmlEMEJheFlTZDRFMXdOZjR3bktna3RldVVXcTY0ckw4aUlTSDdNNU14UkFuS1pHQnU3emY4ODdUdGF2b0M3cVFoeWtPJTJCWUR3WnZhckM5JTJGWW96MWxJbWRIYjIxUzZUcEFzTHY2WDJ5ejVSa202c0FkME8zdG9Hb0NnNmVmUWFicTJXclluWDB6eERBZ3BvWktpc3BIWSUyRnB2TUJ3a1BTMkFyZWt1YWVQd1RKJTJCVUl2aVo4N3ZBVkYyMUNoSUFlWSUyRnFrSnpreVR1NGdUZXFQeG1HTnkxZkFLWXduSjhWWjMyJTJGVlBxcjJuUVdUS1VnUmRRS3Q3akE4dDZaJTJGWiUyQkQwTHc2WjNZYkpoMWx3V2J4dlBvcGxHZmhPYjZTMlk5OXQ4YTJEekd6NkIwb2xTQ1c5bUprQyUyRkpmZDlkOXZXeTlCQ2V1UXo4MUFpcGlSQnJmZnZ4TnQwcDdCRExqTGZZMlAlMkY1VVE0eFQ5VGF5M1JSZGZVSXJhOXZ3MElZNExXU2s1cXUlMkJvZmt1TUZKT2lPeURpbERDQXVVUnJWRGhoNnRicGFYelMyNVFLa0ZLN05WWGJEMmhCalBmckI1WnJDdVF1eFZXQSUyQjEwQTBMd0owbHJFR05sUDRxTUt5OHZObUM0dmZ3JTJGUU1MNHZlTGdKQVI3VFBQYXRwbzIwVkZGaVhrZ1hIOGU0ekwlMkJHMERSUDhtRiUyQmlzUDFuUVl6RlJ1WFdTNVYlMkIwT1NFRFYyUUhLUTV0VXRPUlcwR3RrNjFJTERVRE5CMnlIRTF5MzlmcEp6R3Y5ZEZHWSUyQm5XWnNsMUlLOUlhUHpTUWwlMkJyaVBNJTJCUyUyRnR0aGdUNHhZVGdnUXJUNkpTUkE3JTJCcjBtWW1WSVVidzBxMGZRanlSaUNFODVXY1E2ZWQlMkJTbUN3Rk9CMnFTY3VPZlVkaWYlMkZ1aUNLSUpJZkElMkZWbSUyRkFRRkxwMnJPQlZ3JTJGTGkxYnF3UzZwbWg0WWw0WHFJWUxvQUJ1bXdBTm0lMkZPdXZuTGxqUnclMkJ6RHh6RnFtcEI2dG5kVDQzb1NaVnlDYWhUbjN4SUlOUVRBJTJGV3FjUHJyaFdGbTNPc01LSEtXTUxGZVBQNkFtbCUyRlhjNExOdjNDQWNVd1NjYnJxUXlEJTJGMnNjSWlxTFFFQWxRSzZqQlRRRFBpVzFSM0VBZHRBJTJGeFhJUEVzeW8lMkJINkVtRmR1RFpuNHlqJTJGY0IxVUwlMkJzUnJqbndSekpoV2xHVHhlQ1BXaSUyQnM4NXNEdSUyRjgxUUJZOEtDME9NbEJMUnhKdlpjWnpVNFNqUFFOSWtPaHZyTDVXdk1meWMlMkZ3ZXc3aXNBWndOejBXcWRvankweTNyeHZWRkQwaFglMkZwaXFqT0V1Q3hrZEdzT05iVXo5OTJNd0N2U1ZKcVZjbk1GNkEwWUQ4eVk1NnFQRSUyRmREV3ZEYSUyRjJvMldIRHZTcGV2MFp2T0lFY3clMkIyJTJGZEgxQXdMTEF3QlU3UU9NUjJFVkElMkJxZVFsa2FWdm5FNmJKcWZBRDhoZFVCYzJKbWZwc283UWxncjVCcEZ3cnhmT3EyNlVabGZtQjk2VXNBMWVWMSUyRmt6VGlCTVIlMkZnWVNvb2UxMThhJTJCemhKOXNSeTU5JTJCNnRlUUtabE9nUFhTQyUyQkJWUnlKdE94ZXB0TDlZNU94RVVvTlBreEZaZ3prem41WTRNdHdlTkRJbnFxbDIxV2dCYSUyQlZnNWFNZWNWRHE4ZW92eDBCbk5GdVZiQjRaQnNmU2x6VEVuSzB2NjJUVCUyRk1nTCUyQmFpZzVtQU1iZEZGckxXREtyUTAweUJqNzlKJTJGYjhFTEJoS1lmcXFEbU9SOENjVVFPZUJrcjRjMmQlMkJGQ1hrbklmMHRWODglMkZ5T1BLZEY5bVZUWVRYR0lZNXhZejNCSUg3Y3NhZUVtclUwbSUyQlYlMkJTM0ZHSHpnbU5Sa1ZxWjdSUkw3QzFwbVY0MnV1bE84eGdoTHh2bEo0THJobmJGc3pwemRJOGtiMjI5Mko3aFg3N0FZQVNrWjRDREdNUW9tVnhDbWlsWEQlMkI1SnBJc3B0cFRpMDh2RnlwaWs0THEwSXVudEdWUUpEY3RRRWxvZFBPUGgzSlZqOFlIaHdjMllGOGpHZ2RkaHBXUjhmT21rSlh5SmY3JTJCaVNIT1dnWVNaS0JoWGhvdkVoSTdZUEZBTFp4WGlvU2RvN0d0VmxUT1J6VkRQZGVjeHczcFNabWt0MkNHUW1QNVQwVjluTzBkNjVZSDglMkZBcTlSWjM4RURVNDVPVnNmWUQySWYyTkVPQ0kwMyUyQmhDTHJnY2dSREhoTzJ5NEtpYWdvcjNlMndTQ0NRM3E4TllvcHBQT1BpbmNzbSUyRmNhd294JTJGJTJGT1g0RXlNeDgxJTJCWkslMkJQa1NIR0tWbTZ3TjZiWDRFRHYlMkZBRDFtQjQ0TVNDOWtSJTJCTDFiSEROTktVMUVlNGFHY0pWbU9tY0xlOWN1bzZ1ZWZsMnNVQkhhUUVWSTlpd2RwM0RQR01zZXhWJTJGdzNCRUVRUjJxb2JQWEE1ZWZvTlNmOHVLZUFXYkVYMzBpdDhWQ09EblVWNEdrWVpnMWNzQjVLNHVwZ0pHS044Q080eiUyRnhORWN3bVFHcWJuM2pubHkyWmQzMmc0YWt1U1UlMkJpdU0wM0o3MlFTdk5mMUY4YTNPTHVCa0ZzRzdGUGtKY3RYWEFBUkJYTyUyRnhqMXI3OVFyWHJ4MlBiSk96ZnBwJTJCN2IwWHpLbktkeG50SWJBRndwa0gzdGszanJrRmVWY2x1JTJCN21RWHZkWVZGOTUxYWkyWndudmVhWFBBOW9QdmtRZnklMkJ0ZHU0OUZUJTJGSXglMkIlMkZFZ0hxJTJGeU9qYnZsUDJSblUxZ0dxcllRT3IlMkZ0bHJhZXJacmxvYU5VJTJCbm85dlNkWnNDbnNDNHBHbnMlMkZ0JTJGdVhvSmZSenhMWTRnZVNBTDRVRDZwJTJGVnFJbGMwQUd4bksxcFh1cnQ4SHlCejJvVDY4SXZCTXRtZm5qUGR3QmpkUGdSamNpQjdsSDlmSldFamxEV2g5QjlIb3d3NWdpOU0xaUVDSDklMkJ6dG12MHREV2JXOVN5dnVnN0hkeUh6eTNyeDk0JTJCczlwVHE5c2xkczRla1RPRUpVMiUyRmlzMElwVUNCSHhCeHpBcWNQZmVmakkxc0Q0OXVMdlB6aUNqdEp6RmZSTjZxZ2V4NWdLOCUyQm5jVzZMMTRqb0Z3ZURQSjF6U3lETHdxSDdITkRjaXNOZW5RTzRDV0pJTGt5M1Y5N0FiUTZXTE0xJTJGR3F4bndjQWtxNWFQQ1hadE15alltY0p3TVB5MTEwWlVqYmJVbkxuaDNxaTJQdHN5akxpJTJGZWxNUU5kcDFYd1dkJTJCTjIzSEVJaGR3bktMQmY3JTJGZlJ0TDd2VDR5ekJOV25Cb2M3bkslMkZjUGFHSlVUVVRYZkQlMkJrMVphbm04QzFXc2lvN0UwU04zYzhielclMkJJJTJCJTJGMXhIaXA4ZlF5UEZOVmR6dk1ObTNTZWhSR3NxaiUyRjJLJTJGakx1NGRuNlh5eHRIdDJsdnQlMkZxODd3RCUyRk5HRlZCdzkwQ2JRa0VBaEhLd3RBSVZvZk9tS2VPMHpIOXJXMklGTGtqdSUyRnljYkN5SnkwcEk3MW5USW5WdzhCZVZ3NlZKNmNUZW9TU3lvVTUlMkZCbEJMejJKbnlVWEslMkZQd3NISGdDelFTdXVYVkdsbkozWmdmJTJGamYlMkJKajRVa0NOOWdTd205NnZKa1JaU0JmVDliQUE2MG9qJTJGZ0NUZmF4QnpFSDY3M2ZSZE9NOHdKZFJudENMMVBwb2hDQyUyRjIlMkIxNnJLUWNWZzZrVUlSWlRMNUxNbDdaRGVKQ0NZc1FubzBjRjM1ODlKQTRVU0ZBWEl6Z2hyNFllT1klMkZ4U3VyZlFsSEJiOFlNRFIzeFdveG9GTmduUW9qSiUyQjJMNWtVVVRVOFhIM3VzeFEyelZZM25pZDB1eGsxdlo5c2ZDakZQazBvZGYySzVwVFZsZDZVYk9MaUV4UmhzTyUyRjdZWUJJNVpweCUyRklCMU94bVozMG5QUEJUR1E5SmFXZVk5M0hxMjFUbjRUa242Z2RQdHFYJTJGSEdPRnBYJTJGS0cwZjV4a1FRc2NGZlZJUERTJTJCZllJbXF0a3FpZm10MlVrRzFrTkpKVTh1SDByd3pZMU4yWDIlMkZjQlhuUlQlMkJOS3UxQlIlMkZUMXNHY2cxS096REs0RTdMRUZvZ1dTNkNjbXdBJTJCb3ZDTlozRHdPMGg0TUMxSnZBOTlVV2lnYXZmQyUyRmhNJTJCbEZNSk9TR0xtQ1JTVUpnRE1JJTJGVGJCYiUyRkc5WkU3OHF6NXNNTFNnVVZSOWpCZzB6T0JIeTU3JTJCOVZXQjcyR3RIUnFMbGVVSEdoJTJGUkxEcWhCd1pjcjdOem9NVjg4WGZkOWVzN1NGcHVWcG9ucFBselJkc3I2NjZURWZGMEZXWHhQdzRKWCUyRjA5WFFqOHVYWWklMkZPRVRqU2tmRjg4OTNzTWswaSUyQmUlMkJoaUVMJTJGTmY1N2tqQkJyeXd3Smoyb2FaamRVZHhFbUcwMXBxUGI5emE5aXRoOWVPNXpzWXFMUWowbU5GV2owajZlJTJGbmxYT2xhWGY5dTYlMkZKME5ueTRBZ0ZXWWt0VktYR3JXdlp3QW1uSm4zUXNFJTJCUDJrWnBSam9hUlJsaVNPcE56cSUyQm41TXlybVpkdFdrN2NxWTZ3ZWI0Sk1jbjdneHpRa1Y4eXBkaiUyRnlKMjlncDN4Rnk3Qk1DdlZCWlFyNzA0WG93RXdYenVYOTc2OCUyRkVvSTk1WVk4YWhJR1IzeEFMM2J4T3E1QVN5djBwZG9aTHFkY0QzeFdaT2FWWXk4Mmw2TDlITWpUcHZHdUZTNzk4eG1XM3d0RmpwM042Q3NESGNlYlE4cjdIcHQlMkZ6Y1I0S0hpQjAlMkZwMUJrdk5xSFk0WXNOUjN3MkZwS3UwWHpPQ0UzYWlBaHZ6MHR5ZkclMkJVcnlmd3lVRkpwRmllNVQ2MUhXUTF5U0ZTWlRXOEk3ZEdnRGpabmMwViUyRmhxWHRaJTJGYVglMkY2RFBOVzFFQzNRZ28yJTJGWU1HVkFxanpwRU5FMExlbUJGeHF4NjZucjZZRTRkaVBxMUFOS3ZHY3BWdGJzVW1DVnRKRFY3ZDlTTkVFMDJEclYxVklxUU5GVFdJYnNkRFF4dG16SUdwSzE5VSUyRkU2SU9YZHk3ZjNDMW9lb1NZbUZGeVVibTd1JTJCMmoxQkNvbE5vdzIyQ1RCTHdSYVVwMG1FcjhHNHByNzJNJTJGJTJCa29QVW1DYiUyQmJiJTJGeTVORXlsTmw2Z2JFZHJMJTJCR0s0WWlmS2Z1TDFlSVBnVHpRVVRZWTNBQTFvVHZSWjc5bDNlZSUyRkFMZVYlMkY2aGNRJTJGQ2J1cXpkUVkxcld6TVlIeFV2eER0JTJGOHBobHZZUXc4RmJrVHlqTFpMcSUyQjgzRHpNMnppOUE0a2RCQ0JvNjJNdWRYcjczJTJGOENXdldQZnRjejdIJTJGVUdYdkZocGc4SnAlMkI5Ulk3SFFpJTJGJTJGbkl2NTJnRlhISiUyRnI4WmVuQnI4dkJ2aTIwMmtnRGwxY0VOMkFhWkw0YVZxb3MlMkZkem1IMm9SdVNDRVJYbUlObE5DRXBteEZKeTdiV3ZMbEpyMXdUUWR0Zkt0aENWNWpva2R6V3FpenRZWHFSdk02clRBeXZmJTJCQlVIc2hNS1QlMkZPWDcydTJMdE4zeWxpSDJJbmgySlNWVTJseGo3b1pwcG4xUWlYclJuR1J0SGxLa09QUHN2d2RzRFFiVEROQ2NvQ09haWxzVGxMbGszJTJCSXRMZXglMkZoRlZQRnJYdVhRTkYlMkZ0dWhBQnM3SzRzd0x5N2RTSGp1WiUyQjdWUTZjZmxnTGxsQ2tSdUJMVDBCTkpyNFFKT0FodkMlMkJuJTJGN0xQNCUyRmp2RSUyRmVzQjgwRURHNVhZbW5PeEtMTnprREpzNjNqd2YycmhmVHZqbW41SGslMkZMOXg0OEpHTmN4cUhMUm90c1c3c1h2ZjBzTXFoSEVHUWZOYUJGQiUyQjNQN0NKNCUyRjNHQiUyQmlvbXpEOWl2dXVhM1JCMkxVcmFEYnZzUWhNZWxrSThJQjJhM0xBQmE3SDh0QiUyRkRueEhrNUhSRW5USE56Vld4SFpXM3lyMDhBUExaV1I1RDElMkJRREwxVmZwQ3lOeDlnT0pCeUVrS2h3R2dmUXJVa0VVbXQ5QkNjNlN3WXRQbWhEYUN3MFQzZ3VHelRkMDMxbDllWTVFNVlqcEhXY3k5cyUyQmpQb2NtY2tjQ1NWV3lFZGFURlAlMkJMT3VSTnZGNHVVVG5FZjlnVSUyQmR0SXFpODRjJTJCOUUlMkZCa2x6RUpOamlFWERsVU5UbGJ2d1dTd0JIc3c4anMlMkZhV1lzSWd3eW5yJTJGOTR2QyUyRklkQjB5UzZidUNseVdYdUZua3kxYkJqU2xQcU9IVVNkT2YzZU1TNGZTaFBHV1NiM1hCMzRrM1hNSExCYU5lS2hWQzJGYlhVNjQ5bmRObnJza0MxdGdZRTdxTG50V2l5aDV3ZnNaUTdJeHNmMzMlMkZZRGdkUmhzOThRYlElMkI0SyUyQnVSVmN0QXVvMUM4WVRabVdNaTJwbXJHUW9INDVPcmU3T0gzUWhEc1FRZSUyQnJmQXNlQnVZdnN2WSUyRnU5cXdxUndrSmU3M0lmSTBISWxmQWNIZkRyaFR2NE9VaGcwSlRJTmJ2N01iNCUyRnZUR3F6V2luWjUzRVlEM0xEZ1d2TDltaVdQdjlNaTl5JTJCN2xvMWo5dGMydWRBd0JIZERjQ0FhQmhkS3A3R3MlMkZxdWE4cFdHeHJycSUyRkZFSWZ3T2dHd0c3SDV1bVg5UCUyQlJXeTFEdFp3aSUyRjF6UEpncjRzc0tBeVVtdzNFY1NxUDJVSDREYnpnNndYbGI0TXdYN1p4UWVQbndYOU1uQzZ6dXpxN3hyWUhQeGkyamVFTkRiN1FZY25SY2FTaW9GWGEzNWdYalRJJTJGJTJGa2MlMkJXTWYxVGVZJTJGaUR4VmJyYzMyWmclMkJJTkNPNDNvdWJpRUN0d1pvdk5SdVNYeUpEYVpPVHdKRyUyQlFia0xtRmt3dFBDUGxpM2lpOW4waUJsTEMyWjlFb0QzRmowSnR0djVIUFdKemVxJTJCbmd1SlJOZnhLYjg1dXRjcUo0bjZJa0NrSkYlMkZueGhPV0N6Tk1zR0w5Yk9zOXYySkNtTnZ6UiUyQklsZyUyRjBHMm9MbVVON25UM2lUbk03QkFQVmttazNXbmlkUWxiJTJGUG1wMUEyYWF0NjdRYW4zYmVTclJmclIzTE9QRHYxbXpSN0FmTGd3YWlqTmhDeFpjcVhycjNqViUyRkxSbCUyRk93Y0tORmhjMWs5VTNRTDluRGRRaDZRJTJGMTVHJTJCQUpBQnpuSHAlMkJIVjk5YnFsNVBNU2dmenYxeUxYcWJUYkhjMlIxTHNzSGRFQ25YR0tLemZ0OXkyUVhySiUyQml2eEd3YjYlMkYzZjNYc3V5S3p0MjZOZm9VUjMwNXBIZXMlMkJqZEc3MTNSVk5rZmIyWXRmYnAwMzM2U09xSTIxZUswSXE5WTA1eWtpeFdKaElZQTBBQ2xqMTczeFJxb2dkVkpMcENGRnJTaktYN1BJVlZ2ZXI5WVNWUDBCSE8lMkY3UGJvVFo2ZUh0V2hwbHdYNkJCSk9DNTlQQVgwNzElMkJRUmFodWw4d05sOHZDc1JBeEhma0czZ2RnJTJCVklVaWZXWVR0cVglMkZQN0pOM1BZNE01RlRnOFJQZjglMkJjV0FSQVMzZTBlTWNUWlNIbVQlMkJrRUFjSlFFSkV5WW93NVdGcFAwJTJGUEYyU1lXbkFDY1JKUWRkSkN3Z2M2ancyZnZham1rY2h3SiUyQjFxUURieU1SNG5xWmlYZFdRWWFNN1M1aEFQbjlhZ1k4QVczV2xVU0l3OUdPcWdXUjNaTWUwYjhkbUh2Z054N2g3Z1BjR1JydXlVMEZRQXJ0UUtOWUIxVlpQNUxySmZmeFNvM21YeTc3TUxLc29ZaFg5dXIlMkJmdHIwJTJGdzljYjR1MktiYnl2VXowbVhpZjdldCUyRjBxajJ3TWZKU0JkdTFiUWZoUWNEQ2dINzdJTHE5dW16MEVVZDNoRHhXcVF6cWclMkJuTXJGd1FFUDUzSG9OTTdaOFZRbDRONmR1N0NPaEt4S000WUdzVSUyQkZLWUtUeDJtNVBnSkpZZWdBaVFES3ZMNlpuU0x2UlZjY3o2NXVKc1hHSEliYU81NmZrNGc4b0tLOEpsbFlHb2pydEI2Umo2SnZzZSUyQlhrN2NNR245STlna0pHUDFZblZ5RU93N3ZtYlFONzclMkZreXFHUG5CaXc4TGJmWGt6TzZuNmkzaklGTkg2eHdqT3FxT0NzaUdHaW16allCMzM4OWZLZTZsem9jTUphTzV2d29UclRKTnk2JTJGdm9kMHljNzhjZFZnU0Y4UUhkcW5GT05jb3JreUt3bm1vbWRPUk5mdVFaU0YlMkJBYSUyRlFHTVYzbG9QSlhYd1Q1NzV2QTZONDVUMlByY1Q2T0o5VU5XZkYzSzFqdVhhZkR3MWRyS3dKNXlaeTU3TGxpMVAlMkZxQVJjSExEZjdhVFJVMGtkSmdUMElXeGk5ZUJpcThtcko2eko0WTRtV2hwQmZiNmRlMTBmWGVNb3JZY1klMkJWcyUyRjhIV2h1VUkxJTJCeWJISHo0QWRPd2k5VVdvalNPZTJ6bDdsQ2JlaWNUTllLNGdtTWVzaDBjTkZuZ2lWQTgzSHB1bFQ3NlRFUFhCZlF5MllXd01vOWRhY0JkNDhLRmZsZW1WOFZIU2EyMllScDNpdGFuSTdJbFpEN2FpQVNyRHZvYXBKM21FSEI2enlqcktFaldDZmV2Q1ZXNzFJWmVnQXlBc2tXckppUzhKQ3lPZWw2TVA4SklzejBwdkc2bm0xakVVU0dmUTNncllXTUUlMkZpaDYxbzFrelh5bFhtYkFjZEVQOEFkWGcySFFXWVZSRGEyVHU1Y3l4YyUyRjZ6SFV5RnZvc3kzSnQlMkJXZndaTVp1WDNxVGkwcjJCa2tyTTREMWRWTUNHc25KVlliTzhZQkNudEM4NFM0WTllSWpoWlZucHBJUG9xNiUyRnIlMkZ0Z3czMkYwd2Jac05qcTNrMWxSZkl4cll1a0U3Mld6QVl1d0glMkJPT3I5YmZDaCUyQm1mY3QzNUV2bFhUNFd1cXdsMjNTa3gzMzZ0Z3BGdUJUOGNUSU5qZU5ZU1pVdlh2WXZLWU1XU0VnV3d3dDhqanl4TnhwcnZOY2JhSCUyQjliSjM2UXF1UDBtR3JUSlg1U1VxQnNhTDFDODhIUGhqemhFTHdRQWhNTUo4ZjRnRjFZRWYxcFJZdGR1JTJGUXAlMkZ6d0JLYzFXJTJGUUhIVkVvU1dqYlI4VEZ0VnRJaGRTbWs5bk1HNmlnaW5pVE53WnNQJTJCUjl0R2hPN0UyTU9OdEE5azAzOVBzeUprR2JQNjZBVFpuelBacU1tcHdxVjlDZjcyVzFRbHUlMkJJVmNSalYlMkZmak5MQTBKQ0t6Z2ZobVpwcnFtMjdUb0VheUhDaXh3MW1iSkxlSEVzUTNJaUxFNmJ5OCUyQiUyRkJkMUNJcmgzTEZJZHFGd3duUnE3RUxlN3BkOFdlUFclMkZtckptR24lMkYxYjYwTjZCcVBDNE1GRHV6UHh3JTJGTVE3cm1jQUNSYnl1UyUyRmVSeURpN2luUm1nVXMwM2ZpaHJVQ0kxbzJtTk8wNnV0S29zJTJGTVg1d3ptJTJGM3ZKQmw1aUxkYnNBa1ZqOCUyRmx2cTZFUGZsNGhoU1FkYnRTcWR0eW9JN0UlMkJKZ3d6dSUyRjhsdkQlMkZqcGMlMkZOcVYwcVJvTTkwVTlGSnIxdU0yZnV2JTJCZ2pjMEw3dUhmVHdYREhjMjBSZFNFU0UzSmxvYlc5SUZaUjhGUzYlMkJYV0dLZ1Z5UGhmQjFmcTlnREZuNWJXYWslMkJXWVN6VUxmdk5Gc013OVNNTUFoMjRBUVg0QkhHekRZJTJGNjBQRE1hc0lwbmQxV0xaWHBDc3d3NEhvcWpvRGpMUThzTjdTbXd4ZjlvZlNyTFRaQ2RpeVJUcXlpR0ZvektuMyUyQnR6Tm9palM3S0RETkZnRlBLeFZJeWRwcTlnOWxVWnRUTjcyUUhMQ09WZjRrZVhWR1VGJTJGdkdFc1hmWiUyRjdEa3glMkIzRnIlMkZlbkw2bTZDM2JvZ0tURDIlMkZ0UUVqNUp3d3psZXFJeDFBM1ZKV1A4Szl3MkRGRmJkVFNiJTJCTmJiWDdiU1JidnNqNEtSJTJCVWFPZ2ZSYzNITDdNMiUyRlJ5ZUZWVkUlMkI1dUVNV1JtbUNMTThrcFcyUHdrTDZmMUklMkZyanFEckdYRjhOMTlpZ0tQbTROTEhXbjlydWFkWjJSakprb05OYjlvaU9NNVRsOWtKbnZFOVU5WW5CYkk0RHRhUmQzaktVSXhVaFJUb3lacTBGQkNkbHMwQ25WR3pUeEt1UjQzRVglMkZtT1FiZkIwOThiNTdQUVA2WnBqaWI2dDB5QW43elJBRDVVWWp5cG9Vckw1V080NEozUzNORzlRbCUyRmo5MmhvbkhaUWw2JTJCV2dpNmxndjFROG1JczFhQlJYckNIeGlRQmFqandWZDR6YnZOJTJCSHpJaUEzeEMweSUyRjV1NjRHWlpJVjNQdzhYZHk2JTJGRHFNaDJDSGZ3c3ZqdWo3MDNpbGZpTXlZQ1UlMkIwR2hBc1Awb2JIa1c4VDdCZU9aTTElMkZ6VnklMkZHS2RFZlN6JTJCQXFtRHo4T092eGsxJTJCdHFObVFJY0FrSUtvWFJoY0FPQm5Wd3JWejNURmVBeU9IQUdjWlolMkZ2UGNNaElOYU5kcjhiQ2Z0SDV4OEZOeGI4WHhTNHZSJTJCNTVlcHdXTlclMkZJYklsd0hyJTJCbHI0ejlvWkxpNjh5bWhFcSUyRjhLJTJGdDklMkYxT2d0N0o4ZyUyRjhpVE5DaTk4dkNZcnFXcDJsdDlMZnB1SXhYeHlLbG42RFhCSDBzWHZyOSUyRnJTNk5XY04xUjBGR2JpWjJyNzlUayUyRkJZeURyM2d3aE03MFdNTUx4M0MlMkZOQmxMWUE3JTJGQzNLQ2pQbEwlMkIlMkJCWXBzSFUyU1RkMThGR3olMkZUeFlYcG82ckE2UVhZTFg2aCUyRnhtQmhZWGtKJTJCTGwlMkY1b3FFWHdUZmtrWGoyYmpscDNURWVDemp4WWU1eFNTRGk1a3l1VXZnOGhsR3B1Ylc4M3hBQ3Q2b0x2WjNmblVsR2hSSmd4OW53TjhRVlhhd2JtdEpuYmpGTDJzMDVaT21TYzN6U0VFbEtzWlBiOTJoMFhmME43M0YwdnBIeHZPbzduU3NPRVQ5WWk1TlNuS2U4a0QlMkJCQTVjY1VkbE5kbmtObmNheWw0SFJMOUZZbmJYdHpGZ0xuYVRFOW80Q0dKSDZRbnAyVzNpdjh5MmRvR3IyTGJCa0JITnpqMVJLbzhtUHkycVpJUTQxSHUzcGFmOHBjQ2VIUDB5ZGNTNG1xZ0I4RFlXYU1TR09zV2RmWVdIMTZrRW93MHhIZ1QwNlBhZUdVSTUyWUlpZSUyQkt4Mm8yeFYlMkJCTHRRd3psdXdRRWdIayUyQnZRZDFWSkVnSmV2bnFlaUNFbHB2aXMxYnk0dWszSnk0c0xGMWRqQmhTUmclMkZjOVZQUFROUzFCdm1MVUV1NXFhRFlJalpueGNQUVc5VmUyTnFYV05sOW52V0ZkcjM4SnFtUGRwWlVFQVk4VHpHb3BJR2ZhVDhRbmxaVWpaTzRvOUpWbXpxJTJGYXZIa2JxQnk5aUVJQ2t3V0ZFVmRTM3F1TzlVaGIweGttWVJjcHhNaERvU0ptUnIzMGpCRkN5SHpYY3F2VTRQS1B3VzNrQXNXNENPTTh3QkZjT1RXaU5iUGN3JTJGbEU1b2dHSGNXc20lMkJTRUtXYk9udngyelRmUlczRU11NzJkTWdMNkl5NzRHSDA5NzIweWNNQlF5JTJCVGZqbUZKY0R2ZDVWbHBwNDhkbHl6WDZOdFdFQU10U3BRRXpTTGkzc0k3VzlJVmZnT1NJeDVyODNUbkQlMkY5Z0tzRWElMkZJTUZnZHVlWEI1TUcyUGp6WTJxcTlOeHVsb2tqQUpPT0drYSUyRk5NOXc3UyUyQk9VOGtLJTJCb0NwMzU0aWtLR1ZhYTdFOSUyQktPdGcyZE1FNE5kU01OcGs2cUtnSk5BVGp3dFowcHQ5ZzFrZ21Kb2dNTWt0T0lJRmE0Sk92WmlnNVk1R3lhTTRSQzVJcDlpMDR4cUVZWiUyRkhGUTdRQVJJeDNjU1RDZlBuQUxTd1NlTzBueVlUblZ5MmgzMzJqZXlheHdpOUJLdUtuQ3ElMkZpcldkdmZrM0VHSnNHQW52SHpoM2xmeWNuV0lIZmsxd1hEMlhoRjEyaXRzbXFmUmMlMkJsJTJGRzBUT2x3MDVlcXdhZSUyQlI5anN4ZmUzRWlRclF0RURJYm02RE0lMkJJQXkzMUtrSjNZQ2JjeVdYQWswb2ZxbEZsTUVkOXFrTkVBTGxQclpmN3hwZVVLaVNrUXhLNkNVNGZmWEU4SkZ3S2M1aDVrUUliNnIydTh1eFp1ZU15WGU2Rm5YMiUyRmM1YkJYNFNSZ2REU2MzU3VZeVFrRVZOd1VaVkNodWtYY0NXMGxZZDJFNUlGY1dxYzJicnBDdDh4WmJ4bmpyeVVPekElMkZ6aXB6bFV1QmY1ZU5zM2dVd3ZxdFhVRTZXQ25OeU54dVhpak50ZUtPYzZTbWlLUXklMkZiZVZyZFFuSTBwV0lqJTJGQWxMa3hMJTJGbUt5TGRYVFY1VjQ0QSUyRkslMkZrekklMkZVZU95RDglMkZzQnUlMkJ1UkQxaUw5bk1BRXY4QiUyQjdmOEZVJTJCZWV5SWVJSlFIUVR1Z3l5SE1icFE0SzdlVGJpYnhFY29JN09lNE5WamxBcDFPR0FYUWtWVjVxJTJCOTYlMkZRY281Szd4U20ybU9SRTM3SjBVQU1xJTJGNkZhUkxaTXdPOERHS0pkeDg2dUxES0ozQWRNMTZZZVJaOUs2R1Bxbm41Mm91M09YZExnTHp2VWdrS1lJdWJyd3VpS2dBNUFkQjMyZEhIckt3b05NTkkzODNhNjJTNHdFMCUyQmpnWTgxcmRIaVo3TlFnY0Q0cFRpUTdsMGdiMWRSMzZqVG90d1p5bzVKemNUTWlyN0pQME9YQlhraFhDSkZTUGtxdHAzcG80R3o3YVkwR005R0ltbyUyRmh3QnBFQWFwMjNUdDhHcWtkVE5QNzZnZGpwQjdqZlUlMkZBcXRqS0c4Q2JTUkpEWWZJSnBYbHRPZHQ5MHJ1QTRaRiUyRjFYenFFWVdOck1CMkVTeXQ4WGVKN010SDA0cGgyMmZPV0V0aXZFdUYxN2ZnQkNSdUtSRlY5NUgyNGpyblNqdzlmQUtmQ0hxbFd0U2klMkJlVU42c0k3VmxJeG9Pa2pod090RGV3d1U3cElvZW1hTTFEc0VzMXJVcXdPWlJvM0RoV3ZkMkxHY2hMTW0yWTNreHNHZyUyRnJJVGJFbTB1VVEzc24zblVuOWdQWDV6U1N6emd2alNsazBHYUZ0WnBvWEx5QmY3MG4wRFpqZ1A2UTR2aGFsbGFHUGF4SFpIRHBaaFdtSUkyb0tLUXZIZlhtQUJhTXZMS3lWWGdONFBrVTgxdzJCaUNTSzd4UjUlMkZyaG9nTEM4SmhoVUI3a2k5QzhKS1hKbjMyRDJKSmU3aVJXb0NTd1M0eFdKTGJDYXpCbFBIeTZOQlB5azJKWUJjWGtQMGZLaGZGZU5ZMHlVQnNrZGFTRFRLUEpTVjVwVTZhciUyRng2dURmUGhmdEpsJTJGWDJLSVFjUGtDdU15WXlRYlVmWDJ2OWNsS3FnNjUyY0JnOEN1VG5pWnpmdE9xSnZ1a3paQjJScmRhVHN1MUglMkZ1RW5FcGFrWDUlMkZxUGdmTVdhaDZsQTQxOGVIY1AwOTNhM2tTTWdqR1ZmWXlSMXdRJTJGVHRkS1VxRnclMkJvMkx6SHpRdXM0NE8zRjlVdHRVUHdZOWFEaUNFciUyQm12JTJCaFdYTiUyRnppJTJCWmI5VzFCSEclMkJYTVQzUXozalVWJTJGaW5BbjBVYzlTOUdHMmpFeUxiTVQwRngyRCUyQmhtR25xbWZ5T2hkVm9Lajhhc0EzVTNOSnZ0QjNvZ2RyTXZYSHBSdktvaVdWemlkaFNCYTVsYlo4eDZudUxsSDlWTHZsMUU0dlZCQnljaiUyRmNybXhnUE5tWXUlMkY3b2ZpUUxRbFFQJTJCeEVYMUVLY1dVSld5MU0lMkJPTjJ0MGFNdVElMkZmeVZwcDFXUnlXdHVzJTJGRGFidnFCaFBteFFNVCUyRlpTbXBoTG1ic0U3OXhTUWRoJTJCekZNcGN2SDVmYjF0WU9qaDdnYXhQR0FQd2d4Sms2RWw1dUFnbGVNSEo1WlNyWXpicDljZXB2QUcyVFpZSjhBZ0N3S3I0QWFlVXhKbEZ3VEdGMmhFdEZnTkhTdmxVZTNHMW9HdHFjNHZiU0trSzRndzB3MUhZNzhHdFViMDVnUmZmY29HR3YzemR2anJBN3VCNXRQcXpnOFFzWE80Vmo3dkpXNWE3TXF0SnglMkJReFlWZjZQNGZjbVIwMnptMTElMkJ6eEdjZUxvdWZmZGU4SUFmSmRQJTJCaWElMkZqbGZqWG0xWnJXM0k0ZHZjM0FVNDdDZWJJaDdVc3NqRklPaWJGb0RVSW5JRTRvU014MXN5OXhPekxoR3RteTlzWktJMDBTdzlzSHNWYTFodjNPeWVHcEpPN3RqMkxXMiUyQiUyQmN2NDM4T0lMMzNmQUhJME5oYVk3TE1hSGF6bW5pQW1NY1UlMkJIWXNzeHl6QUIlMkZESXR2N0pkVnVPTmdydDFQNGdIVFZJWUFUdUVTYng1T2N0TWI5MEZiOGtjSEhxMlc0cnF1SVZ4Zll4JTJCJTJGNWNMcTNjUGJSRFBXcEdSS1RnQW1RdGlndzBqYWxoWnBkd3QyVTVGWUpwdGNObEtIUyUyQnp5VlFqY1g1VkFHJTJGaFN6QlhSaHYzeEE1WFUxYlhzTkM0eEFCN0tRNiUyRkNGbFFmOWFraTZMZiUyQlRld0o0MmNzVXIzaXNQN3RoQjl3UDNGUTFhMWJKWHJ4Vzg5bzYxdkFVJTJCZEZiUVBLRTRSc0UlMkZ2UUF0VkxsVERibUJ2U3RkSzdHY05iS3JydTNVYmkybmVrJTJGRWtHSk9uNnR4VGdPOUZxUyUyQmhReiUyQnlyZVhzY3ptSCUyRmFwTnRBUzZIVyUyQm5WdVklMkIlMkJrOWVsR0c5WUVpWkNGY1pUQ05TZTM1QXZOY3BXSSUyRlZRS3J3em1uQ0pPVEJORmdMdmFYN2xySTZZZXA3QzFSQ09ieGMxamplQyUyQklENGNqU1E1Vm1SMTFHM1I1d0hFR3pJcnFZdExwTUJ6ekpxVGo1RDBYT2NXOHIlMkJRWlEza2s1bmpTU0VFYnglMkZKWlFSNG1GRmUydm00VU51ajNuZ1d4NCUyRmxtTnVVeVEzTHZjenU0OXVySWNrcU9iczlDTVpMOVk2TnJ3T1M3WUNmUVhQQW8lMkJ4bGtzUXZGZklYWDE1MjFTTUp0dmtxeGFjNUFZUEl2JTJCNVBsbGY0VkVwRmQ3eThoNjE5cENEWkh3aDclMkZjempWYUZ2VWlPTiUyQjNMeUZDMjhzd3A3Z3l6cGIlMkJCZlFaNTclMkJycmhrcElrbWl3SzdCRTY3dnVOWk5oTWxIdTlncG45NFM5dWwxdEY5UmxFM0d6cExUY211RHlCZkNNSzYzemFFU0clMkIyYWRqbVY1YyUyQkl1SnBFa3pnbk9QVVd3OCUyRmoxb1pHR3Q3bXlLdFRlaWZ2NVVpVXVpdktxUHVJWnJDODd3RWNnNHRJemZaWldSJTJGZmVrTlQzJTJCS1Z3VEluZ0d0cHlEZllXcGIyTFRmMmUyeFB0Z3VyeDd6UHVwbGhXN05hN2wlMkZUNFpsRnR6eENMTG9GbG5XYjl5T1glMkJWdkV0V2JUN1ZiMkpVWUNacEtzUSUyRkFDaUMlMkJxMEpRalRXdm8xMHc0c2NSeGNXWjl2RkxVdHIyVm9YOUI3WG5PMGE2MXV4RW5DT08wYlRUUTdmWWdFeEJjVzlxejQ5Y0pndmZvbVNkNTljRlZPeUJ5JTJCbVI3N3VqSTBkZSUyRlhIcXVMa1FoNFRxVTc4THZzdEdCYXZVaUdVem9nN1NIRktGMmdlY1lzNDB0RUpRUDJGakY3UFRTNEJoaEo5SHZsNFNvUjFrMkJab0cwdWd3VUslMkJ6S3VVcmZjRE5hMjRUaWYyMjRpR0FkNGVFSkdWTWJaQmZqaFpkbDc4QnN4OEU5bUxiUE04RU9OZmRIVTBNRTUxdE5nYjVTU2hVZFBBazVDQmRKbUtIWEg2MjJ1ZG8lMkJHcnRsN05Ya2xPNVJJSlBnZmpmSGYyJTJGdU53T2x5eXNFJTJCSU9xbVprZndNTnlJMmNhJTJCOCUyQlo4ajA1UTFHOEZpbnl2eEFwaXA1NE5KWTAyc01xOVlaN3lXN1FCYUpHM2lqSVh1MUtUYmg0Ym9DelU3R3BIbzZrV3VyVSUyRnA1aHIxSk44OHgyVzFNSEFna1Z4MVclMkJNcHdIdEo3V3VzTFV1VHBvT3VUckJPdXI4ZDFvOEtKUHJ3aURvRzAwVW5PMTdDbjk3WDRGcWJrclNMR29DVW5wdTZLTW1NNTl3RWJITFVaJTJGV25MeHNJYzJJODFpUHk4QlFqWklPciUyRkhrUHRTJTJGZVplTmtXcUR3anlQWCUyQjVwUk1Fb2VldjVWcTV6bU02NmtoY1FrUEpuNiUyQkpTNlllYklBOEoxa21FN3I0dmlKMG85aERNQllPNnkzSDFzUW1vdlZqbDc0V1RHVmhLeFBoNmhCVzQ5NFVFU0xZNWI3NHBGRzZBRmNLdk51dFFZTTdOYTdQOXBmMU1PeVJFV3VDek00NnZrSlp3UndYRzdLTSUyQk1JMlpBcXplMXhreCUyQlJVeTR5c1padGU1VFRCa05HbHZHYWFVUSUyRk5rVzhFUnNYVEFsV3lyME1TRDVpV1QlMkJrZE9zcnF0UlJVb0Y5b0FENWNrWTdtdUNTajd6WkxzNTA0ZUxWaUElMkZmdVBOendSYlhETjNVY3Q2Yk1iSWhvYW9EWGRSNnVRJTJGMW1mNTNWTTJ1MHBSYkNpSWVCQVlDb3FHOVdXZ2R5ZDdxWXFKSnQzODVWMWdvQjBCRVpvZUhLWExXSFFwTVpMa25iODRvZDU3OUx2T3FyTE9tTXBFZGZydmQ2YzV1cUlCMFp1ZExTN2lDcFZ1ejhSSFgwejVRWkpUbEo0UUxCbnM0QTMyJTJCZXg0cmNVaVAxbFE5U3pjZmQ3c0oxWTROcEdLUFh3YThHeHJRZm9jRUw4YlRMYldrSFc0Y2FsT1d0Y2lDM1FuTGpEMmY2SSUyRnh4ZHpQJTJGV21LRUZOWGJ5dlo5Y0YlMkZ2SVFOT2J0RXklMkZUaVN0bGRyYUplQ1dEN0tmY3BCNTBZemp4YlhOeWdocnI4VE1DUFdiclJ0b2FvSDlrcG1HQnRZRkVUVldPU2c5M2Rpb0lWWWt4eG0zNjQ5aFpQUWlhMmlPN2dkelNpcTZ6Wm1NRmdCdUxxSWp2cjJEaG5WVDh3Y1d6YnA3STFGZkFCaTJyQ3hnaG04cEJTRXIzdHVRcU1nZlZGa0gxcWFlbXN1QTFHTlI2aTZUbEo0MzNxRHRVb01LNDQwMmNWanU0SG5vQUFoQSUyRjVzYk1wVXNGQ3UxSkQ3TGhKN3VVUzhqenhiOWt2MmhiNkVBUlNEOGFYUzhVWUQ5a0hPZ2lJejBoQUxzdGlRTjNkRmxsTiUyQjhyamJnV2RsaFhjbUdId1Nod1A1akU2U2h1ZFJ5d245OW1Rb0x6OUZkSDFITlY1WmRrYkg2ZGZKT2l2UGxJVUZXSlZiemlTaThXUElwUU83bXBvbE53JTJGcE9ldkU5JTJGTDVacUxwTFRZSzJ4N1ptWUl6YVZma1BOaW1PMmpGRVlDdnVvRlpRS2FkTlYlMkZTalM5bmVSMTFIclUyWFdsaSUyRlNxbTVIcDN5dHJYQWp2SWVUNSUyRnNQSGpyUFdOcSUyRkdpbk16aUxNJTJCYzAyS21iYTYlMkZlcyUyRjZEUG43NjRqRTkyaHlzRTNybmo1RmNaRXVWM0RQUVBZd1VLTHZIUVNoRENyRE9HeGI2eVpuekUlMkY0UzdLd0Fic1pmSlNNWlZFbU5mUE5oejklMkJuaCUyRm5VUHFGeUFpJTJCZTZOTWFHR2h0eU5HSFJwY2pOb2hKNzZKam40WGRzSzFDZXBBQzFvWTQ4MEp6b0paUW1UdkV5dUtRdEN0RHcxYzRpV3hRRU5oTVpWdlZ4azFidCUyRlpjMkVwMVJwZWpNc0hXWWVjeTJzSCUyQm1oaTRxNzE4NTBYUjIyMEpuU2hNazg2bmR0UU1TWDdwYkZITG5ydWJzUFVKdGNsaEJqYUJXR2ZNaHNnQ2VJb1NWT25aZk1odzRJc00lMkZ4VTNwUHJldlBwdWRYQTRTU1NEbTFEZzNoRk5mbkJXcUVEWVpWNEw3Z3JMck9qcGFnblhTM0pWUzVhVVdyVlhwUzlvWXBDJTJCV1pTbDZzJTJCYWpoTFd1U05Ca2d0T29DdnVXRko2WkZpNHlwdlMlMkYzMFklMkZ1cExnM3dmdlpaVXF4aFZSdUR6aFlqWEdLdnFyTjlGeFFBWm4xUFNSN1N5QXFNZVNXRWdUUnVFZDF2R1dmRlQ1cFd5b1pyRVolMkZRV09acnRtYVFSZ0xwaFJlbjBKMFN4Vjhoa2NPMHBTMVFib2JKWlpualNDM0FUalFOUzFza2gzbmp4S1o4eGRidDg0bTdIb295Y2tHcyUyQiUyQjFKdnBzM04zUjIyVzJhMyUyRmN1JTJGdEhVV2laT0VVWGdTcyUyQnJJbHB4NWxIYUlORmtuaXkxeVdHWXRRaGFDa1BNZkRzWmxlaEtKVGdKZyUyRjlhMUZ1Q3dRMUYlMkZsVmMlMkJxOGNPR0JtRGIlMkZxQUJSNHQyaU04Z041RWtJRVh6UCUyRmVoRWdKMktOcGw1MnRpTjVoS2hkWHNsRHgyYlpnM1ZvYXIlMkZ5RHZ3czBRUzJpMVVQNmhVVlNDc29kRFJpb0M4JTJGaXA4emc2N24waDhiWnFrVjBISCUyQkJaZTZxVzR1OFNGblZtTTRiU2NHZkdmWFByb2plNnJuaHQ2MXQ3MjNuNXM1TUdWZUFuY3RGbGFQUGdnOXhKV3BhVGRVSm56NWJ0VjE2U0NLbWxKeVRKRXYxTkVWVkhTNU9qOUsxbnlHcG96UVlzMnJ5SHhVYkRZWVg3cVNBR1BzZ0paJTJGcUt3eERWbHJnQVF6JTJGbnZ6ZUNQQ3pvWmxoZm9OeVhpcGVTNTZXMnpDalRLUFNRcXh6RXlNSDd4WUsxc1hyODZMSFF4UUc1TVl6YUxUcGJqSHRlSENHQnclMkJNNWtlJTJCa2J3diUyQiUyRjVFN0ZLSngydzBieXZnajVGcGpMN2NzNGp2b2dGakQ3aFk2VUZTRWxobkFLNmNqTU4xMnFXeGxPRDZWVFhIUE0yb2JVVnhvMTF1VWpTR3ZEcHl5a0VIQU9hZUNDZFFLYWtFQ0pBaXV1R21vbUI2NENHYThET1VqZGVqczl6YzlKNkRDOXRsc3R5UkJ3a0ZXJTJCamx6S3k2d0lPR0pFMHNZSEw2d3dHaG0lMkJORTAxcWV2NDF2REZONWtPcGNOdGN6WjZOT1hRRGxCUFNrRnRlbUlhaTVPYXJxdXdXaiUyRkt0RjJoOW9WT0tXRnAlMkJGaFdiaDBBV0slMkZUTEZ3OEswaXVKNngydGU5alZlRFJPcmNFczBtWWlGVXlLRm5WYm5zVXo4bWJUSnV6ZjUyN1ZoOTYya1pFOUJsdGJ5RyUyQmlqelJhRXB2c1Z0Z0MwTjlzd1czVTlrbzd6MnJBNE0zYVliUGFGQWIxTEkzblE0VVlqZ2hnZXRCUGM5Zmh4VHhhbkNZUXZvJTJGZTVEQlcxYVNJN1dRQlYyM2hEVGo4MGt0Zk1oNEthYVE2OE53MEQ3dnMyVWx3WUpYelEyMzlTUDNWZGg3MndRb3FMY25Ed3psT3U4Y0FzdWFVc3ZWSFd4QW02OTZqODRvek4zeTliVEVLa2JjJTJGeXZnQ2MxNU9lcmE4N3pBUlNjdU51VXpRVDloRjFYM2J4ZSUyQmxlbDBnUCUyRkphbUFZUnllMWklMkJJd2JFWU1rcHFvSG9MSUpjRHBwZjdYMnpiN3V1bHU4RFJBT0hicWM1ckhTc0NYU0M2JTJCd2tVYnBWenNwM2VVemVCVzd2aHJma0xBSGl0bmhCWDBFaTZ0WEp2Z01XN3hITjRYOHZLNDQlMkZ2clNtZHl4Mnh4QzlwVW05TndKSTI4S0pMNVEyZ0F0eGJZblpoeTFOQkE0d25nRmQyYXZqOGFzUHl3dGI0eXdvT3RWRTBoS1JiWFRBMCUyRjFGZEpicFpYSkk5OWM2UXdQeDdIRFRHNEwzSUM1R0VwTUFYSjlZbWhuVkxiYjE0VFhhczlNRXRoNW1hJTJCcVh2NXhWSnppMk1NNk8zN2tva0xwamRwQlhhSWlmYkZwQzRKTVJ3SVJNTkJSdHJCOXc0diUyQmZvNDhudE5tRnFXV0VLdm1lUGpxNzFISXgwSkRPRTNoZHVER2xYYVpQUlFEQVkyR2V5Nk11ViUyRjV4VXJyWElxJTJGMHJwRXZBMDJvRWp5clI2c1c5WnZDcTdydU1IdnJXMWlTZiUyRjJ2VHBzUHAwU29wMklNY3ZVaFJiamo3bzVqcWF1VFdyVUlpT1hOJTJGSG1YdDRscXFqbkxjTVc2WmlGcCUyQlVBblJ3OWdBZ2s3bkg3c2cyb3I5V3JaWlRoSDAwb003UlVZWFM5YUpOM2JhJTJCTHRpaWpYMHlNeXJmVFdKRFNiOVdQVUs1TDRrV1RoRmg0UDJVMnNNWk94c3lWRVh0MkNtcTREJTJCNUhZSkdoYXE3OVVGMFlpN2kzdElzaUE1VTJ5VEJ6d2hTMkR4d2V2S2NGb1VrUTlhTG9IMmZ4TU1aVFExOSUyQlpWRlBBOVVzMzlWdXBXM0IyUyUyQnJlUzJUUVdTeGRvUnFYZzRNY213YklVODRuTGZiRm9MJTJCaGcxaWY0czh5dTQ3aG00N0hqZk91MjJURTNBQXU0UElqUm1MemglMkZVdHVGVUw0UWpRbnFWeUQlMkZVMVZnNkttWndDRGZ5QjZ6MVYxMU9zSlBLcURweWVZWmQ5ZHYlMkZ0VVN6TExvbCUyQnp0Q1BxODA2UWFjaXNaZWlzdSUyQkJaN2dvempYTFdVQTkyTnhDMWlXQUg0JTJGR0NCV1cyWWdsUmtCd2lMMXdIM01kNFBHZmt0S1FXT2RQVEJ6STdYTE5CZktVNVMwaDR0dEprbVVQbXpkaGRHMUIlMkJqcENOdHlVNmJRZnR2ak1IQkx1dlNDYnV3aG45OVprRCUyRm9NWTQlMkZ5cmFNcXRtVEw3d3F2Rzh1bEpJcFolMkZmVkxRWDFHWlQwM3NiZTBIaiUyQjBSbTVmVjJBTkxEWm1INDRSZ3hmbGJ6cGJ5bUlra0FudnlXdExsWk80MGVSV0FVMFBGWURJQ2xxQ2wlMkJ2JTJCaTFxWkk3SzhPVUlHak1ZWm9LRkN2TzFCQVJtd3p5Mlp5YnhWMHBZSiUyRkttOVcwc2lOWlZCWiUyQkl0QWU5OUxmcVJPMTRCbFpTNHMydWZyYngyM2lJT2cyeEFsRW9JUElWSXpmaGV5RWcyZ2xUZXl5MzZZTlZRZXdDQnhLOTJCJTJGaiUyQklyQmJ0ajF6T3p4WjdIZTdkemJMJTJCWDNkekw4NWhqNnJIcFVxRXRRNmJhVjNOdHhwM1NSTTFoM3c1YjNhZ1UlMkJRT0RreUJ0ZVR5eHhLcFBtQWc3NFklMkY2cXRsMk01dDJlS0l5YWw0OERWZmNzNWxTM0JySW84b3h3MXRrS2xCcUdOd0xOUUFYTExrbTF2SEMlMkZaekh0S0VpJTJGRUhtVnNLV3VlaXNLVFpVQmJTVDZOeENQaCUyRmxwZnhSSjlFQSUyQkYwUFp0NCUyRkpZJTJCME9QTnlhYjFYOU9qdTJDWmR1TiUyRlhRQ1Znb0h1WTlQWVN3WTc0YkklMkY0QzdZRFJCTTZ0bEdyejZIdmNWeUdVa3phNmJsN2NFSyUyRmxyRXVhaHAlMkJyemwwZ3NxVjhab0tOZlNLMEQ3cyUyQmZxSFZWeiUyQk14Skh6SUxZb0plJTJCaFV3RG1HbEdFMTdRNVF5U2RuYmxmRGhISHROOHpJZFJ4NTFIVmhqVHA1bllIWGhwZ3VWRTVMZUZEMnpydjFZQk5FJTJCMk8xZjRueDkzOTY4Z0JwNGFXSFVTTGVDOVQ1bHdpUmxRRG9PJTJCeE03MkJkRWZYNkJNMDMlMkIzb3A0OExiNHp4TnNtV3RQU0h2dHl1OFJobXI5OGZ2aWlNcmQ4TGJDSXglMkJhOFM1YU05JTJCVnhjdkZNeTlYaXpDY3V1ZW5sMjN4JTJCbkJkbW40cmNWMjRvYnJUVnRXeHM3QjFYZllFaXElMkJlQSUyQjlycU1Rb1dsbkN2bEVtYVJVeVdSWkwlMkYxeFlabVlZWWFlZ3NyeTglMkZ5JTJGYVhXeXA2cXNYdnVsaVcxdmFndGp4NUMlMkZ6SU1aN1N1MDJzbTh4SEVsaG1qSUVsdlcwdXFsJTJGb3FDaDBOdzlxd01jcW5CSmJUWmNHWVZNdmZSczh2SVNzVkdXSnV5bUVDV2hVeWYwa1NSTGFpYzBWN1MxTktCVzJWayUyQkpKNVBXOGdwYms3dUFUZDVZQnZBVlRpU3YyWkFSYzVUTWZYVHdoWEFhdU1PazdLMmxmJTJGWmg2JTJGZG5iVTJCbmtqVXEyTUJxYzBNMEEwWnY1UTJEYVdSZ1NUV1NMaEkyOVByZzY1a0pLMXFjRVBKNjdxWWpsVjByUldqOTkyM2J5c0l5bnpHOTBtSWFNRU43JTJCVEgyakRuTWF5dTl1c0k1NlNEUVZzJTJCT0ElMkZ4M0FXd3dkc2tjdHRvOHREM2xCZW5JWFoxUDBGNndKdnpsN1Q4dmtFR1NFN3lkUU1MYWkyNTdvZVBZT0ZIempnQm41Y2VrQUpMQ3Rkd21XT2NjUzFBNHYzaEJoU3FyNXNxT0tIclBlUjJVMEtPUXpFVlU4NENwSGtHMkNaZFJJOFFBUnNMMG9USzhENVVKeDZLcWFJcWJaVzJ4TThYemhzb0xJWmJBa2hDdFNXNUFqUVFGeW1xeUVQc2F2N2pvaTFVcWtiNWhtN0JWa2wlMkZYUGRZWFhxUzFxRHNsJTJGVUlISTV5b3ZXSDE0eklXViUyQlkwaldHbUdybSUyRiUyQnJzRlNRRGlGOHVDQmNJZ05NMGdtaFVkJTJCJTJCeEl3Z3p2SSUyQjU4ZmU2QWxUJTJCT3o2c1hJcmJJY3BYb3ElMkJ1QUs0QTZYZlZhM28xNjg0MVBOTDBSY1h0Mmx5dm4lMkYlMkZFZHRWNndSZHRFbXd6MldnbUI4MTFRZEZ6NTB2aVhiS2NjcE9vQkp6dkZLYVJTczViWkZIM1hVeGRYUXA4VVFoS1dWTVhQTWU1YTA1MkJXNzNjdFdrciUyRnpZNjd4REl5cjdYTkszWjAzYk5udE5zJTJCVGVFSk8xMUtXVFgxRHNweHdQUWFmYlFUSklnTXFBaGRUSWpvYjhTMzNlNXVwMWZ3JTJCdGhJZzdEUUJ1SWV1RkVPaVplU3B4ZXVmcUVhMzNWd2dpYTR4Z2ZsWElidENJc3ZubDE5Zllpc0NoQkJJazN1M3pSRW9xJTJCejhsR2hRdjE2S1RiblVlTHBiN0FzQXJJMCUyRmpGblpkcnpuSURENlJXb20zTHBzNVpjc1pZc0pXdnRETlQzJTJGRktyalF6ekVNYVYyRE9LYzU1JTJGZ2huSmFhUVFVZ3N5SWxBV1Z4UWVHQ1JMYkZ4VkJqczd4TE5yeEptOVZBcjFjZ2NLZFpITyUyRlhPWUg5d0RlYjd5V1I0bTJrRUJWUWVkU0JCSzJNY3NhWHBZWlNaNTNqSFclMkJ5aXJBSU0zM2VMSkl1R2J3b0JzTFJHeGNNbDZuNHl2JTJCakgxeTRaVlE3WTN4MHpSU0MyeTlJN1dvWjNKMmhiWUFGMTZScmpoNVUlMkI2YndPJTJCTUslMkZJSXlaTWpNenczeldSZUN1Um1TS2R2VWRPSjNLQk1iQVNzS2hlTWZNNVN3U2JYWHQ2N0dDOXNQJTJCa01HUnJsSEVqMmQ1QVJiTXhmTlhiQXpGbjNyb0pZWHhleXRHb2Ztd2lLUXRyOWglMkI4UVdzRU1WaGdmaWwlMkZQM21jRSUyRmp3VjVndGZKbTh2YjJGRzlkUnJUbm1oVlVVQkJCTEZ5Y3NkY0NFamJ6TmtxcHBrVVlNaDR5JTJGajRISDBnQ2xkTiUyRmZBZm5YMGtWTmhXY1h2JTJCeGlRUTBHQmJSTkhTc0loSXBOdU9jWEZuUVNIZE1WJTJCbjZFbXJ5QjdkJTJGM3VMMTV2Zmk2Mm1aZXB4NlcydE04Mkw4bE5ldTM0Nzhha09CN3RPVlVYa1hrc25uUyUyRm9WYzEwTzFwWlEzNSUyRnhBSE9sNGhIYUxFd2dmUUJodkxlZ1UlMkJtS3E4NWpZeGZDbCUyQllDMFYyRTJLRzlGMzFvMFlTNXJQbFE4V1pPeU4lMkZIUHB4dUx1M3dnOHZ4QlFGZ080UnRYN2dyc0RlUUxkNlJwMFhDeW56eGVyOWltc1pEZXElMkIlMkIzc01aeFU5JTJCMFk4eGZiaWc5MU5HcjVtT2l5WTRjVHp5cU5rbE5sN0VHJTJGWVJseGNBdUpPMjBLJTJGM0tnVUpTWFczaDV4JTJCUzZhSnE4VyUyRlBYNDg5YWFPUEZQUUpXVVRWZjVDNzRRdU0wN0VwekROalp6Um9VUWxQcUVXS0lvenpKTHVJWERIa2dKZHhYdksySFhGQ0dTSmxJRzM5SVNyRmsyVXlaJTJCM3pOJTJCSCUyQmlrYjdjMDU2bnRJSjUyNEhmJTJGNTZqdEtOdDYzYzRURGo0VEhEQnduWFNLQkttSHk5dkZaZkFRTjQ4U2tlWGNjY09WOVJIdThtcyUyQmZkYW16bnhsVFFUaHE3VWRGSVpZOUo0T1hVaDk3Y1ZYVnRUcHFGU0dFVjFEMiUyQm1vTFVaM3FIaXZVeXlkNXZoJTJGbDI3akhBWldMbTgwUGk2NnFpaWd2amIxTjZ4d2k1NkFTVk9oZW5oeEFmVDVIZURBR2g4cklMdzlaeXRnMjVsdHNadEpWdGElMkJ2Tlc5TjZaZ0NlbXlERzNzUGozc1VRJTJGeTFvVHFudUhJYkxudzVGeVBzd0dTaTl3eVJaOURqTU4lMkZkenh3TWZOWVZJa2ZIMTN4Sm5MZENaR0JmbTk4ZmxibXdKUHJobXdJdEolMkZtYnlIU05FNjk5TXV6SEJQQkUyMkVBWkJpRnlIb3p3QWxpbXpyTDRnV1BxSVBxNTJ6MEdkS0hWa0VnQWViUkZRZTJrNCUyQmQxczBDN2M1NXJjanZCRlVKUUU4ZFpsandITlJybjdTNkVqNjBPSG9NZmhLdTdGREVUSEc1SGZFQWV2bWRzazUzaVBxR2N2UDNLNGtpY2xrM21XZ3RzQzdpU2F4Wjg5MTJPbHJlWldDeHBlQVJSYW83UnB0cGRLZGxrQm84RENjVzdhS29kcHJlJTJGVzZsYkk1aDZKbGhtUGZiOVIwMlVDS0pFdE1KdnRKc0tUTjd4ciUyQmwlMkJheFBWelk0YW5FcFZRelVrb01TMzQyMVpuNmI4RmFGN1VrVzJhbVpZZGdQJTJCJTJGVU12Z0xQc1JDa2VyUjBQbGIlMkIlMkZOa2lzRkk1QlR2MU02bVB4dXhZbFlkTFZCYXExOHloZGNDZ1llOVlEJTJCS1klMkJhbFRFcTcxU1ZOJTJGTjNsVjRlVVNXTWdqbnVxalo5MXJiUnpXODhrdGJZVHZDNjFQd1BYUklqYkhYNGNoNEp5UTUxOEp4RGlYaVRTTkpQTER5R0Z6RGV2MnNYWFNRSFh2b0tVY3FMSXlEREdMa2RieW5kUHN0JTJCZExEWEtIUllXQzlldXkyQlhDRUglMkJpTkMwdHh3QVJRciUyQjVHclBvMDU2cUZVSjJyalR6bjVDRXh1bEJpdENDdlhWQUFuRkVWZDVibjRpWlZMNVFncjZGWURwZ2lpU2NzZ3ZPcnp1TlV5UW1ST2pkSUNyQUNlS1Z2SFNxZzZSMjUzSXNSNFY1RW9zelZuZWtzSWZjRU9DSEZ0UURGNEpjTmpmRFg3Sko3NE5jN1hlS2NvODVpSWhnSjZsdFolMkJMb2didjdlMWRwVUVzNU96SWUlMkJEZVF2QzBsM2Q3czV3OXFIOVBHTndlV2o4czMlMkZScDMwVUtMTW1hR0hIcEclMkZiVUFRVVl0TkVVZUZIJTJGc1owa0R0U2xiSGxYUW5mZE52dndhR3RBMHl5OTdreUZ0a242R0lLS0ZCeTglMkZFbzl1UExKTWU3ZlNEekZBRUd4dzFPOW5uQjFyVkhqazdTUzY5M0drd1l5TW02Q0FkUXpXbCUyRnpHbW5NciUyQmdQNlBDRDlZbnVKNHpoWExZJTJGTTN0NkJSell4Vm1hUXRvajF6VFFjdDEya05IenNuZ0Vid1M0U1pkVnVpMCUyQnJ0YmJKVTJjUElVaVBRdlBVY3RIemhGRzlMNUxsVjFOV3lWcGhOOThUeDVEcHZtUSUyQmFpdUc0OWJSU2VMZkszQ0xzMUo3SGtxeThBRkFLT00lMkYlMkJsYkloYzhyTVpqMVFWMWJDWjhoZHhLRTR4WWVsV2R3YloyOW9IMlNRcHBCVnVrODY3aHNLV1k0ZjRidk5nUXhjVmQxYlpjaUVvMHRITFZPeHBRbkZBUGo2bU12QnF6U0szJTJCVTB6WHNIVGFlNWxLdkhnT3RNM2ZUd2JMWVY1NSUyQm9ERkthdWNoMFBUQkY5bjlTWHpLZGVrOTV1TnpyczQwMHdybE5IY1RxZlJmM2FoZiUyRjBaUWhwbDk5WmNxaVNWVTROemRIdDJNJTJCSVBkVGxkV3I3NDQwTndaZkJjVlglMkZzUzNiYnhzVGUlMkZmSGdZUWExSzk3UDM1N3klMkI4Tm05d1Q2SGFnZkd2UVJoMFplSUNMOHkzUHFldUQ1dWRYM2RIYjJXNlY0b24lMkZ4a1NxYjVVQ2tSTFZxc015Ujd2OGJ4Q3FjV1lPNnV0RVZPODg2JTJCMTRZNyUyRmNiVFIyQmVid1VPYzI4SzVucmR4MnJGdWdHT1I1REExVSUyQnA5QnBua3JaTDJLQlRmdnA5byUyQmYlMkZBQ3BwNE93UHptTjU4UzNMdk9TS1NTZ3VKT1FEUTNHT0dPdTNPOGFleWRTWFI4VUgyJTJGUmpFYjhGY0U4cEw3ZXVCRmRUd3lFamQwWjNTUVBWeGVjRjExVFBGSTRQY2ZpZjZFckc4dE03ODl5Skg3c010U0ltaTUxMlY2SlZuRThMcDlZc3dnaXd5VkNRNnBMbXZpUEElMkI5UGI5elEwMEMwaDdyUFJNVmJVYlVzOHNZZEE3Y2NQcjY3alIzajMwUEZ3UER4d1ZpaGNRVndzZiUyRmx0YyUyQktidWklMkY0TVhaWVd3ZWglMkI3ckVaeGc4WnZPNzJlWWJZdlJBeDBwV2Y5UU1teHFLckZ0SjBwOXRGR0ltTnVWOWVGbjF4dTQ2UnFMUXFBWmM4ZGxYbENtdlZDTWdTQml0UnE3ZmpNMFhOeGhNcXBsQ05VJTJCU0RsSGQ5QVcwMU5rdEtweVg1dkFDMzcycXVrSUxyUnpKQU16Mk9ETTk2NW5DSUVkeDU1VVBoVGpTNUw0MXRob2tKSXZGZEhGR0hTSWttamxia1paWlBRZnY4ekg4N29BbkNIVTl3N1ZUa3dCd1lLV1JydXU3cU4wZlBLNmUxWGQ0Wm1yNDFNTmRkS1BwNWhvSXZpalNUUjFnRTFMMjB6JTJGRnU0akl6UFZVV1BQWmpzREpEYmpFWE1vUFIlMkJNRE80MFByR3BqTjElMkZyM2k1T1FjcFAzbHllOURyM2N5JTJCajZFJTJCN1BIYiUyRnBXTFVuNm5rWWxVdW9KMm5HUkZXUXZqRFpJJTJCbFZUWDBLalF4eTVrM0Q4WmpxZU5GY084RUtVZXJ4YyUyQlA1aGU2aWNCUVlDZlJFQ3hPeldSMnZheXFpaUV0OHNrJTJCbUtLZEExSTAwcGhwa3o5cmxlVlVNN1lKaGgzcnIlMkY0MTFiZTFtVFpRdFMwNjhSRDljRmo3RlVEaWF3VWpuQUhOYWJyUllXdmlJbDJRWkI2SFZSN0FuVkZsREtSJTJGUzhsaTNFUVRGcUdMakd6c3JXOVVDNkpjU3JCZ3NjMnppQWI1MHBLT3J1WmxIUHVkNlNRVjJUdDh0T0tPJTJCeVF3U3E4MnZZZUw4ZTM3a2xjYnVyMWI5TmRTdFZ0eW9iSVcxeTRkRlh0VHFwcXFEZkE3REFzNVU5T0dHMHJBSDNJVlRtTzNEcFpsblR5V004T3E1REMlMkJxJTJCVVYlMkJLJTJCS3hBMm1OQm1Ka3J6TmhCMVBJTSUyRkNDOHo4d25JYU9qdldUdGU4JTJCdUxHU1dnU2pOJTJGa2RCSHl5T3BWdHZhNDh3cTYyaFNwQ2VkVll2OVlUcW9lJTJCMjc2clpleEpheDhVeHdiQkZLcXpzJTJGYkdCWSUyQmxNbTcyUEYzaXpia1lFJTJGUXFWd0xVYmlVJTJCd0h1dEVoaXd4NEdhWWZzMnlkUEVQOWhrNXBpJTJCRTlYeEJldXVVUkZ5Y3lxdXFNUGQ2ajZNRXdBVkFNZ0lGc2lBdSUyRkRQb2hmM1pTUzFTckU3Mm1Nc1YwUGpVZUJRcCUyRnNBMGdDWmMxWDJaZjZOWkxWQTlJc2QzTjZaZkZNWjAlMkJBRUlLMVREUGxETDNwcGd0JTJCUGolMkZnMEdLM1QzZ2xoY1pRalhITzU0eGNDdnNLRzE2SmlibW1aSjBmTlc1TUQ2MVM5c2IlMkJVQzcza0dGcCUyRmQ2WkpBYllvTVc4Uml0WHU0djlZR0RESlQ0TERRSHZMSDJpMDNuZ3RIRmpIZE90cjlkRVhHekhzNVBjTEdsRWZReWNuVXdoZ293UXE0cDVqZXNDWVFSM2MlMkIxTjl4TVg4S21EaHYyZSUyRlZUTGR1TURzUiUyQllJWUhxcTJ3T3N6MWVKaHcwNyUyQktuZnklMkZIRiUyQmcyTDFNNGsyZ0tUMExpQlNMR2F6bE5tQm1BbjBHQzhQUU5JSjJMUHVxR0ZhdmpIV25CdTQyamI0VGh4Qms5STJCNTY3TXkxUUtYOFRQMVloREY1aEVhTHB4c3Q3ZWJRTmsyclR6dm5VRHQwSVhaT2Q1Ujl4R0MzTHRhJTJGZW9VSllKY3BVU2hac1hvU3FmczBjUTJHRVV5ZFV1MmoyUnE3eWxvYmcycVBpZmZBWHNsOGVUeXdsb1M3MVVoS2VoeW1TYjhicEl4aVJMZUoyWHkxenVjUEY5em1FZmJZTEpyZ2NOU0UlMkZ4c2NlMkRwbXlhdlFYTWhONkRWNzlLaWU1QVIwY2dCdGRyTjN5dXZSbEZ0WnU1R3FiNXBwMW9qaUt0dWVRVTRlYyUyRkVUanpKQ0clMkZ0Mm1pN3RHaEppb0pHJTJGNHdZS2c3M0xRN2dsOG1vREtRbXRYRmZiZXpqSG8wcURQR2J2bSUyQjhsZ3BGNENKWms5MFZiOGVzT0RESkp0SUc2SXY5eVROcnV1Vm5hU0F4WTVldlJwRjFQTFBuNmZqcjZ1cDZBeW01UHJHbFVHMDVKRE4yaU9yVU9HRGNaJTJGVUdpd2FLU0psUWhGRTloQ0pnbnlNM296dldLNlA4YW5TdWw0WTAxN1FPZzdZMHJ1RDgwSUhsJTJGSDE5UEtQcEdoSGExQlpKSFVlNmoxN1FsUnpNM1c3aUpKMHk3Qkl0bEFIOSUyQmRzSUlTVFFCWVo2JTJCeDJJYzRxODBYc0VqRjRDVUFJWndXOUpRWE8zam51VmREYmdPWTN3VEV2OXUyOExCQVZFJTJCeVQlMkZQVlFHUlFRUndDUm9oZXo5bWZQc0ZkTWpGSEVPdFZoTEwyY0dEbHJCVnNsNGklMkI5JTJGTnp6Qk11T3R3WENaS2l2cTI2ajhwSEVMbXppNkJhJTJGU0NQa3ZIbSUyRkhMZSUyRjhpZzQ1Sk5zMHk3bDZtMTA0S0VWUkJUU0VieFF1REFQZ2JYQTdnWDNmZnJJWDVKNFpaMDA3MmVBJTJGMkcyeXpxNTMyeFp2JTJCJTJCcCUyQmtpaUQ2WE54WWlRVXNLM09WJTJCOXF1dU94Z282dzd0JTJCMW1LeiUyQk5zeWZNdTlRT2xkS09BRzhsVWtzeUx0S2xsN1ZjcnlwbXhVc1FzSFFpczdDdG80NTF6JTJGdUVqZU1RUnNoYVJxdVYyOTN6MlBhSmR6dGwxRFhBZ0l4dmh2eW56UFZ4RnBUT2ZnSDUxWjFLSmwxVjkyT1BUUzVJYXBuRkIlMkZxVGNTS2pkSWVBJTJGODlpc3VJNlpzdkx5MCUyQmpNeUJHQkZocEl1VEslMkZOaU96RDFxOTR5cGw0c2t1eko4RHZORXZVNVlQZDExM2VXV28wY2lLcmFmU2lmJTJGMHV3cTZOTkZWcW5ybXBMJTJGJTJCeGt3ekZCcWE4cEc5JTJCTjdGQiUyRlQ2aWM0eTI5WFA0VVB4R2V3OHd6RmxXMFdwbDZldkpHS2xNV0pBZ1YwYllzNGZRRFNwUnFhTXY4TURkMUQySkN5T1N2WXJGJTJCSldLc3RvSjdxcEpJMk56S29saUhFSlBWM0xVR0J3THlVcGFDN1NoUm5PUFRadzFuYkhHWUZOa0paRTJqNXBOR0JLdGhicERlTTVzZFBCJTJCSFlzTEdNTlF4M0NEMXBaJTJCQ0ZzR2g0aFhac0ZHcEJjZ0l1dUJNTDFsN2xxdFNVellPSXRCcGZwciUyRjVLaW9pUU5FOXRJcWFxTGZQbkFGeXFtT0g2JTJGVkg1WnZyUWRZdXhkZ0JBdkclMkJCN1VuTlJKN2RoUGlFSUE2ekVKOFM4OVRzUlpWUjlhZFg0dVU0b04lMkYxSUFwYjVDbDglMkZ4QnQwcFBMeEJ5bXFkVjMlMkZONVQlMkYlMkZmY0kzek5kMEZtJTJCOSUyRklDWnhBRSUyRm5QcUdlJTJGeUFjZSUyRlV3ZzRoUXIlMkZEZVhHU3lybnNkemZEMU9FJTJGbllERHYyNTVmNXpERnk3NFBEVEZudnoxeVVRJTJGUzlnQXlNNDNaUnQzZnoxWVBTdk85UHR6M0g5cnclMkYlMkZwZFQlMkJQdkw1WmJ5NGNoaiUyQjlnYSUyRjN4R29MZjU2TnZyWGQwaUhvJTJGeHo2cyUyQkpiYiUyQkh2MDZVUlYyNmZ4M083NzJaNjNsS0IlMkJIdlo5bjNmRXhGQ1I0SlBVZCUyRnYwYWY1JTJCVTVDVDhudTNMZmI3Zjlnb2VreHo0JTJGcDVwOUhQNzY2N2ElMkY1NzdrNXVIQkN1QXpVZWozNzElMkJIR0x6QyUyRjNxQW56ZWVqM2YlMkJ0NnYlMkJ1bXhQMzNYNTEyWHclMkYyUWkzdVdRN3UzNTc1JTJGJTJGJTJGMmxNc2Y4M3h4VCUyRmoyT0slMkZwOGFVdnlmRENreFBHJTJGQlZqTmdvZEQyWnhpZXMlMkJzeGclMkZQdzMzOTlmcXYlMkYlMkJ2bTdaVnZTNmQ5Tng5OHVCTSUyRjY3MyUyQmU5Q0I5Q0VhQkElMkYwZm4lMkJLVTJ6M3RUUW11ZSUyRiUyRnRrYzhYJTJCUFBVZiUyRjlKUU9IOTN1OXZwJTJGOUJEUDQlMkJ5V0RHUGsyN2wlMkI2UyUyRmtiODgwNlhmeiUyQmhWVHNNJTJGMlk2cTZwQzh2eWZUWFJCWkFSTyUyRlBWOSUyRmlabDBIJTJCQnNzSWg5RjhJaFA3N3YzJTJCbnVtQVklMkJnJTJCNml5YiUyRkJhZiUyQm8lMkJyQyUyRml0VUYlMkZIJTJGNWpLRCUyRm9ucXd2OVByVFB5JTJGODB4cGY1dmppbjF2OVZkJTJGMXRGaFA4elJmU2ZVMmVmdjVZZGVNNDB2OGQwJTJCSTlQc3BwMEslMkY4dnFySUNMNmtDJTJCMmNUVHlFWlN2eWpLb1AlMkZDMVRabzhYJTJCRlZUOUwlMkZRWGpFRCUyRlAya3YlMkJqJTJCeDBxYUNlYiUyRm56M09VRCUyQm0ydGZsJTJGZHBrOGclMkZDJTJCbzc4VzRPOGdCZ2YlMkZndiUyRnRrTCUyRiUyQjdSJTJGNSUyQjI5SFY3diUyRmJ2c1g3RkVGZjQ3Qm5mQyUyRlFDVCUyQjElMkZIZmJ3VUglMkYlMkZaT3EzeTN6MENVNzclMkZPJTJGUmRpTiUyQnFmVCUyQmElMkZtU244bjB6VTM4NzlwOWYwWDU5Z3plMXZZZjRsS3lqNUR4ajlIJTJCYiUyRno5diUyRmRkUGZSZUElMkZQQWREJTJGdjF6JTJGanY4RHclMkY2ODVYJTJGdzRNZUlVanZmM1BaQWk3WSUyRnVmdiUyQnclMkJVNG04eSUyRkQ5N3JYJTJCOEhQMTNsNE5VcE4lMkZuJTJGMTJzJTJGM1VDJTJGcG1rQTU0OEElMkIzeTk4dWY1ZDhZYzFHQ0slMkY0SCUzQyUyRmRpYWdyYW0lM0UlM0MlMkZteGZpbGUlM0W4cd7LAAAgAElEQVR4XuxdB5gdZdk9U2/fms1ms9n03ishJCR0FJAmIAgoqKAoxQr8IogNBRQrIL0qVVGaCErHEEglvW/L7ibbd2+/c2f+57xzFxJaQkg22ezM8yxsdqd83/vNnT1z3vOeV3Ecx4G3eRHwIuBFwIuAFwEvAl4EvAh0WwQUD4B1W6y9C3kR8CLgRcCLgBcBLwJeBCQCHgDzbgQvAl4EvAh4EfAi4EXAi0A3R8ADYN0ccO9yXgS8CHgR8CLgRcCLgBcBD4B594AXAS8CXgS8CHgR8CLgRaCbI+ABsG4OuHc5LwJeBLwIeBHwIuBFwIuAB8C8e8CLgBcBLwJeBLwIeBHwItDNEfAAWDcH3LucF4HeEoFHHnkEl156KTo6OnrLlL159vAIDBo0CPfffz8OOuigHj4Tb/g9IQIeAOsJq+SN0YtAD4wAAdjjjz+OBx54oAeO3htyb4zAoYceiltuuQUzZszojdP35tzNEfAAWDcH3LucF4HeEgECsCeeeAIPP/xwb5myN88eHgEyXzfffLMHwHr4OvaU4XsArKeslDdOLwI9LAIeAOthC+YNV1KPHgDzboTuioAHwLor0t51vAj0sgh4AKyXLfgBMF0PgB0Ai9iDpuABsB60WN5QvQj0pAh4AKwnrZY3VkaAAIwasOnTp3sB8SKw1yPgAbC9HmLvAl4EemcEPADWO9e9J8+a4vtbb73VA2A9eRF70Ng9ANaDFssbqheBnhQBD4D1pNXyxsoIeADMuw+6MwIeAOvOaHvX8iLQiyLgAbBetNgHyFQ9AHaALGQPmYYHwHrIQnnD9CLQ0yLgAbCetmLeeAnA/vznP2PatGleMLwI7PUIeABsr4fYu4AXgd4ZAQ+A9c5178mzpvj+tttu8wBYT17EHjR2D4D1oMXyhupFoCdFwANgPWm1vLEyAh4A8+6D7oyAB8C6M9retbwI9KIIeACsFy32ATJVD4AdIAvZQ6bhAbAeslDeML0I9LQIeACsp62YN14CsNtvvx1Tp071guFFYK9HwANgez3E3gW8CPTOCHgArHeue0+eNcX3d9xxhwfAevIi9qCxewCsBy2WN1QvAj0pAh4A60mr5Y2VEfAAmHcfdGcEPADWndH2ruVFoBdFwANgvWixD5CpegDsAFnIHjIND4D1kIXyhulFoKdFwANgPW3FvPESgN15552YMmWKFwwvAns9Ah4A2+sh9i7gRaB3RsADYL1z3XvyrD0A1pNXr+eN3QNgPW/NvBF7EegREfAAWI9YJm+Q20WA1Y933XWXx4B5d0W3RMADYN0SZu8iXgR6XwQ8ANb71rynz9gDYD19BXvW+D0A1rPWyxutF4EeEwEPgPWYpfIGmouAB8C8W6E7I+ABsO6MtnctLwK9KAIeAOtFi32ATJUA7O6778bkyZMPkBl509ifI+ABsP15dbyxeRHowRHwAFgPXrxeOnRWP95zzz0eAOul69/d0/YAWHdH3LueF4FeEgEPgPWShT6ApukBsANoMXvAVDwA1gMWyRuiF4GeGAEPgPXEVevdY/YAWO9e/+6evQfAujvi3vW8CPSSCHgArJcs9AE0TQKwe++9F5MmTTqAZuVNZX+NgAfA9teV8cblRaCHR8ADYD18AXvh8D0A1gsXfR9O2QNg+zD43qW9CBzIEfAA2IG8ugfm3Fj9eN9993kM2IG5vPvdrDwAtt8tiTcgLwIHRgQ8AHZgrGNvmoUHwHrTau/7uXoAbN+vgTcCLwIHZAS6C4CtrmrBorWN2LilHa0dCWRt54CMZ0+flKIoCAcMVJRGMHFoMWZN6A9V2b9mRQB2//33Y+LEifvXwLzRHJAR8ADYAbms3qS8COz7COxtAFa7LYq/vbIR0UQGIwf3RUW/QhRE/NBUdd9P3hvBByLgOA5iyTTqGzuwubYZW7a243NzhmDW+LL9JloeANtvlqJXDMQDYL1imb1JehHo/gjsTQC2dH0j7n5mJeZNH4aJI/p3/+S8K37qCBCIvb5kE0ZW5OO0w4Z/6vPtiROw+vGBBx7wGLA9EUzvHDuNgAfAdhoibwcvAl4EdicCewuAratpwy1PvIOTDhuPAaX5uzM075j9JAK27eCZV1dieHkEJ84Zus9H5QGwfb4EvWoAHgDrVcvtTdaLQPdFYG8BsJ/e+xamjRuIkQNLum8y3pX2WgSS6Qwe/fdSnHXUSIwdXLTXrrMrJyYAe/DBBzFhwoRd2d3bx4vAp4qAB8A+Vfi8g70IeBH4qAjsDQD24qIarK7pxDGzRnmBP4AisK6yEWsr6/HdL0zZp7PyANg+DX+vu7gHwHrdknsT9iLQPRHYGwDsF/e/jTlTh6F/iZd67J5V7L6rPPjMInzthDGo6Bvpvou+70qsfvzLX/7iMWD7bAV614U9ANa71tubrReBbovAngZgLR0p/PrhxTj/pIO6bQ7ehbovAq8t3oRBJX4cOb2i+y7qAbB9FmvvwoAHwLy7wIuAF4G9EoE9DcDWVLXimTercOJhnj5nryzYPj7pyo0N6OzsxLnHjt5nIyED9te//hXjx4/fZ2PwLtx7IuABsN6z1t5MvQh0awT2NABbsr4RbyzfimNnj+nWeXgX654IrK9pQl19E772uXHdc8EPuYoHwPZZ6HvlhT0A1iuX3Zu0F4G9HwEPgO39GB9IV9gfABirHx966CGPATuQbqz9eC4eANuPF8cbmheBnhwBD4D15NXr/rHvKwDW0tKCoiLX/sIDYN2/7r35it0GwLZ2JlEa8e8Q6/ZkBvl+ozfH35u7F4EDNgIeADtgl3avTGxfALCnnnoKZ5xxBi6++GL88Ic/xNy5c/Hwww9j6dKl+M53voOTTjoJd9xxx16Z79446T0blwEaz+wAMQ1IKDD8WVT0iyJqa2iMBfgbwFFkF9l0G8gqgOoAmgNkVfd37NPJnydUqEkHwdIkovHc3/CYAvgcwFIA/gnn/l3H8/r8N4/t6vWpOlAcYFBBFIV6BklHxdaUD20dfoTCacQcFXan4R7TdTwPyHIcivt/C1AzgK0BSlYE7HJ+R1FkOryUmnWQ5e8DNpywI/PUVRsjIlGZatdwKqMhxGOGO85M1xwc93v+jF+cl5ELUoLxgRufgA2kVPdkqdyFu+LVdayeiy3nkFGgZACH8fID5496T8PabQDsM/fOx3Pnzdrhnrvs6eX4/QmeoHZvfBC9c3oR2NcR6G0ALGtZGDPA/QN1wx/uwclnnPuhS/D8M0/g4q+eLr9bXZuEpnc9rffMir364r/xtS8ej9se+CcOP/r4PXPSbjjLvgBgnNawYcNQXV0NNgv3+/0oKCiQYoBEIoFXX30VBx3Uc6pulbt/BeQTqdjA+gD0ZgX+4hQOn7cFCah4ubEYlqW5ICujuoAimAYsFbovA8tWgZgJZIhicuCjzkSkPovQ3FY0bC0S4KHUqnBKbChJFU4gt59pA6GsnAtxFbBzII2/9mWhZxWcMKYK44JRZB3gtdZiLFjXD7NHNODNZBip+giQzgEdHqMTQBFkKUBaBdoUqC0QMKM6DmxHgY+AS1VgqQ5sQ4Ev6SCrA0pZGunhtowl4k/jnIoaDDIT2JY1Ebc1/GtLP1RVFrqAK6kAEdtFXQnOH0ASAH2e84n8ANRokM7xPhuhoXHEon4gA2CbngO8ANIEaDmUx7GbnAOAZgVmm410HwdjxhRj1alffffT1G0ALP+nz6D9mh0fBnPveB2vXjCnGz7a3iW8CHgR6O4I9FYAZhgmph88B/c99vyHhvxbXzkNL73wDKxMxgNg20VoXwEw3qdf//rX0d7evsN6nXDCCSBD1pM25ZYbgIExl51ZHYHSDIT7p3D67E2oyvixIJqHaMIHJA0XYJH9MjOAZsOnOkilyEIRnHHWmssEbfFBbXYwZtY2rNpaAiehQNmqwulL5gwAmSYBchbMSBLprSGg3gQIzMJZAS0IZaCnVZwxtBpTgh1oypp4elsp1q4swewJ9ViSDKGjNeSOiXQWz2dkXVaNYJDMWFwHtqoIbHWQJutFEsrmrg6ypgI7y6k4IIZ0+lmwBjvCWvn9aZw3sBoRzUJItbEqGcLrDcWoW5frukA2r9h2Y0EAFlVcYFbkAKVpFwRu8gEJQDEcBCbEEE+YQLsOtHJsOfrL5ngcOAEFMHMx8QFapQJfwgHybAyaVYBVn/36vgFgbVcfJ28ZWdsRUHvYnW94AKwnfbo/wVjHvfFdbE6ZSGZVaI6KfoEE2lImBvuTiOhZtNkapgc6UZ/xI+MoWBwLYVwwDp9ioy2rY208iL6+tLC+rSkDpp5FSMuixMigJaOjRBcOGhvSPhwciOGFaB5US8WYcBwVRgpLE0HkaVk0Z0y0OoBPAYKqjXmhTryTCqBEy2C4mcS6VBBFWkaOmR/Pk+dNbcbEWH8SbyQC6K/YqIqGUB5IotiXRl3GwDh/Ak1ZDRuTfowLJORZV5024ECFY6todyAf9JnBGDpsDXNC7VifDuC59gKUmS7dPtDIYH3Kh2PC7cjTLGyzTLRmdSxP+jEtkESzpaEjq6OvkUZIzSLjqFjQGYai2QipDgaYKVQYaSRtVcbM9ELMVjHISKE+48PqpB8hxcHIQAJhNYv58RC0rIaKQELeAIs0C+vjAQz2paHpFvpqFqYXTMdPR3zlE6zyx+/aWwHYQbPmYuGC1/HKos0oLSvfIUgd7W04ZEI5xk+aisVvz/cA2H4AwDiEMWPGYM2aNe+OJhgM4uWXX8aMGTP22OehO06k3PVLoCQNkNBZFYLSART1T+D8uRuwKhPAC21FyMTI3hBcEShYLtDhvyWflxvl9unDbQb0bcDYWduwvKoUTosKvQOwynMMVZ6dA1kpAXLYFgKiZIzIfNlAQRrwZWAkdZwzuAqj/HHUZPxY2FSERatKMXNcPd6K5SGTNtzjORamNsmgFSRdsMixxjUBPL4aIGMpMGyyX+54DRtI+hRJUZqOg1R5FvYwS0CV37BwVlkd+upppBwVm9IBvFVXjIZNBe4Yu9g2slZMO7aTvQMQdIBI1gWDVQYQBVTTgTGt0wWqW/xAm+qCUJ7DdgEf/DnmjyEN2dCqVRitDqw+DkaQATtjHzFgrT86Dqqq4IX12xCzbdz06gYPgHXHp3IfXKPite+hJWMgbumIaFn08yfRkPRhRCCBob4Utlk6RvriWJYMI2OrqBFgk0RYyaIy40MTwYduIW4r6MgYKDQyaLI1DDIyiKhZYXoJTJYnAzgi3I7nOgsRtzQBeOP9cfyrMx/DzTQSjgIbCtYnAnLtWcFOrEoGhE0eZCTRljUwwhdHk2WgwTKx1TLQaaso1Gy8Ew8iz1bQrjqIqDbGBBJYFg9ikJkSMPhOMoBpgTg2pnwo0yyEdRsr4gFYigP6tBf6UnAcBcfntWBpIoL/dUQwKJhEU0bHUF8aa1M+HBKMoURLiyaizvIJaC01LAGiaxMBjA0moMMWALYyHgRhXqeiYEYghsFmChtSfhTpGSRsDa22jqFGAu22jiWJIIIEajkAtyQRQjqrYkwwgcFGEgWahf90FggoLdKyKDXS6BeehT+M+vIeu1t6KwC75PvX4LY/XI9LL/8xLrz48h3i+cgDd+BnV30b5114GW7/0w07ALCG+lr87lc/xv9e/S9aW5rQt19/HH3cybjke1cjFH7PHf4fjz6A++/8Iyo3b4Bt2xg6bCS+dMGlOPn0c+RaH5aCbG7ahtM+ewjCkQj++o+XEcnb/zoJ7CsGjDF7Pwv2uc99Dk8++eQe+yx014kEgDFtRkCw0Q+1EygoS+I781Zjo+XHMx1FaGsNI0MGh+wSwRfRS9R0vyfQYWqSqUB+H7CAZgNaHVA4qw1Ndflu2i3jACWuBkuYLp6LXwQuXalNHm+RIbOEHSO2OX5AHfqbaQSULJ5vLMHqdX0xa1wd5nfmwWoNugwYAQ+P5bnycmCSSCutATEd2noFWjKHezQFetaBrXN3V+EVStmIV2SRHW4JoPObFr7Yrx5Z+b2DgGrjn7VlqF9TCPhzKU4GjECK/+bc+QeGgIwg0LShVGtwqPeK2FCHJ2GTZmsygSbVBW0cK+fK8fNt308gyf87UFoU6O2AVeRg9MgirDp9JwAsk7Vl/UwtBy8F3DlQSVvt5sYUZBcAu39JNepSFp5dXucBsN2M5/5+2OT5l2Ft0g/LUWEoDgo1SxiaEWYKM0KdWJsKYqQZx8pUEO1ZHVuzGsb7kjAUWwBYZcqPPrqFEt3ClqQPBhkvpuS1rLBqw/wpecjUZnRM9MexPhVETcqHcn8SJVoWK5N+zAjGUJkxkLR12W+sP4Wxvph8VjalguhvpNBmGZgW6hAmiW9Hm9N+vBIPY5o/gfnt+fCZFhJpA3l6BvlmBltjAeT50sJkxR0V430JrEn5UawRNDlYngzKfKMZDWNDCVRndJyY14qF8YiwUoeEojK/gGILcDsur03AVVDNCvNXmTHl80uQuSbtQ55CxosjBirTPjhZDZ2OgmG+JCYE4gIWeTwBLWMzzh9Di2XgzfY8FPozKNCyKDNSeCWah6yjoMzMoEi1BESuToaQsFXk6xbK9BRKwofgttFf2mO3Vm8FYN//0XVYumgBqjatxzOvvLNDPL940mEoKCrGmHGT8Mdf//RdAJbNZnH8vElob2vBZZdfi/4DBmH50rfxp9/8DCecciZu/NN9ch6mLr9+7kn4wrkX4OjPnix/UP799N/x2F/vxs13Py6A7f0ALJlM4NxTj0TTtq145OnXBNjtj9u+BGCMRxcL1lPZL85B+fOvAB+BhA1sNqBEFfTpn8Al89ZiTSaAx7b1Q4YMUycBGBkqC6qZhZPS4RCAEeiQceLjNsH0GoCYAqXNQeH0drRsLgBi3McB+uSE92SKuJG1og4qaOcACUFTjh3yOdANG4ePq0W5LymXfq6+L7atL8SAsU2oSQSQbfW7jBmBj+ixdCCfz3mK8FVXW9amQ9ukQkk5Arh0x4GlKjkiy5GUJHXzqcEW7DICSg0FwSROHFgnL7V+xRac9ERNORrWFbix4g/4iOWBBGD8nliniw1karFRgdYOZAsdOKOSrk6O2jHGghuBmsAjAjDqvxxXB2cBSidgtDtwCh0MH9UHq858L8vwoRqwp9c0IJ21ceq49z6oT66ux9HD+yJgSInFDlssZcFnaMIqfBRIy/vpM6i9/Bjk+Q38cf5GxGwHz65q8ADY/vgk3ANjOnTBJcIQxTK6CEKDioOwlsVYfwJlegbLkyGM9cewzTIEjFRlfAgpNgwti7qUD1WJAEoCKeiOgiTFlnBgqA7aLA3jAwkBHsW6hRZLxxh/HK/E8mAqNrZmTPTRLGxN+HF0UYukOlck/chTHYz2JeRcuuLICw5Tj0lHk/RdgZYRINOY1bEu6cf0YBwvthaiLJBETdKH/mYGMcVBR9pEfyMNg6BIdTDJF8fbiRD66xlEc5U9pmYj7ig4NBDFZsuHQ0PteDmaj/qMienBGN5J+TFAs1CZNjEhkJCx+FUbmuJgc9on56q3DGxO+TDUTMHH38HBqs6wgD6KTwvNDCaHo9AVW9KX0ayGfnoGg8ykMHlvR8Mo0C15fpIFW5vyozz3b1Lxw31JvB7NRwqKpCMJ3ibmT8cfRnsM2O7e/l0i/O9d9QsMGTpShPZPvPA2xk1wG0xvqa3CETOG4w93PIJ1a1bsAMBqqjbh2isvxkmnnYMTP//Fd4dw2QVn4j/PPYl3KjuhaZqwZw/ffzuWV0Whqu+9IN97++8xeuxEHDzn8B0A2GFHHQee4803XsbDT72CocP3ncv8zuK6rwEYqx/PP/98HHPMMfjnP/+5s+Hul79Xfn89ECQrZQM1BpQ2IH9QEhfNXodKy4dHt5YhmzDcNJ+ZherPYFA4jgbLQMpWYDO1xt8ThLAqkWxWiwpfexaBQzrQRgasXQEol+ubY4kIOAg++CAj6xNXRHvm8N8EOAQGugO90MLnJlZiqC8hsfvntlJsXt8Hg4a3oDIRgJ2iep6VlRRx5aoyCQp9FiCFAwrQ4gOqVCjR3C4kq6jBkv8DKVOBbjmwBmTg9CN4NFBQEsPxg7fICzefndweqy1H9YYiV2xP8CR1MK5OS06W097LnFQH/s0K1CgQJ+gckwBaZWIuW8cKSanezLFoBF+MG4EkwWunAj1qw4kAI4YWY9WXdgLA7l5YhY60hW8fMkwGm7ZsXPiPpbjp+PEoClDav+M2/eaX8d05wzGmbwRTyvKxuK4NhqpiQr880a+RPfP/+Cn8+rPjcMmsoRh84wv45szBeHbdNvz1jGkYkB+QE768qRGHDWXpwYG5OQ61b7vPIvakqMyYfxk2Z3zIWCo0FpqAAMzGKDOJcjONmrQPEwJR1KbdqrHVaT8iCplXR0AZdUoEOk0ZUx4MId1Cp62hNCek5OdoIPVOlokhZhJPtRdKGm1bxsBgkwBGxyAjLddalAxAcxRhlUg/56tZSddR90XQxbciv5rFumQQcSioSpmYEkygIWXC1hzUJ33CePHYTfEAjixoxfJUAJajYKI/gVZLR1tWQ0CzURcLoI8/hVZbxaEhpjuDGOePY1PKjzXxECr8STTamujTmhI+jAzHBJDmqVmJAR8QBaqNOktHfTyAskAKimpjnJnAq535iGU1ZFUbPoJZMy0aMaYv1yT9KNJsYcAWxsPCgjFty7nx960ZHcP9SWEJh/gTGOtL4OGWElgOMC6UEPA2Nm8GfjPqvD12m/VWBowA7Cvf+A4OmTBA0oJX/ewmieltf/gV7rj51/jf8i2SotyeAfuooP/2V9fg1t9dh9eXVgtzddctv8H1P70C37js//CVb3wbBYXFHzh0ewbs7fmv4cG7b8Z9j7+AKdMP3mNruzdOtK8BGOd05pln4rLLLsOsWTtW7O+N+e6Ncyq/vQEI5YBRI5kuoG9ZAlfMW4XV6SDurRoEi+yNPwvFn4HPn0a5P4XahB8pvizHzZylBF8wc+xVFbVODoypncjU5rnWCwRh+V3ie7JluXQj2aJOwKFEl6fi450gp8CBUZDBSWOq0N/npiCfbi7BmtWlKBvWgrpkAHaL/z3WiddmKpBVlWbWBUlJXfRZWq0iDJgQdkw9qgr8GUcwkxBlmgKnfwZOX6YTdRhFCZxQUY/x/qir18348Z+6UtQuz1VBEjB1MVishuT3XbqwLjH9Zg1azIFTANjD00CL6QJOgk+OkzHpqoAkiOT4mZJlXJpUKDy20MaYYX2w6qyPAGCujszBvYuq0bkdAOtMWfjK35YIACsMGAibO5ZNm9c8iSvmjsCxw/tizuBi3LWoCsUBE58b0w/1HUlc+Z/V+MviGhw9rA8eOH0a+l//PH44bwReqWyGX1Px/FcOkXux9JfP4aWvzsbYvu/pHfhzjon6Np1IOrdxrNuiKZSGfaCfGNOlH8bO7Y2bfPtzcmwdKQs/f3ENrpg3UuI2tJCfgB23RVta8VZNKy46eKj8gsdxDtoeAGRkKxmbT5Mi3tNxmvXmpVib9sOyXeDjJtEUDDBSGONPYH3Kj/5GBmlHFY3T4kQIg8w0CMHWJIJSDk1Rfi3BWJZsF4X7KgbpGVRlDJQbGUzyx/BOMoShZkL0TAPNDAtVaLWCSoIn1cG8YCeiti5i/YFkrhRH0nzUdM0OdqA640MfLSPMUmdWR9pRMD8RwhR/EisTAfQxM6juDGF0XlQA17a0gcmhKNotHRYUlBtJbE6E5LpkvgjaCHw6Ej7MKmyXDzJl8nFHw8JoGBMkLWpioJnG6mQAM4JR+KXc25FYNGeNd0HT6s4w/GZGXmIOCkUlrZjVHNG6RXwZKSggDT/YSGBFMiwpzYnBGJZGIzJWn2GhUM2ik6DNUVDBAgBbhak4GOFL4MVoHorULEqZltQsDA4fjBs9ALbbH4XtGbCvX3KFMFpMDxI80Wri+HkTMfWg2fjZjbcK+Ho/AHvh2X/ggbv+hHVrViIW7QRf2LJZC0xPvrqkEv3KBoDXuPb/LsFjf7lLXuYmTJ4Oslynf/ErKCktk7F3ATCmI3nOoz57Em6552+7Pa/uOrC7ARhF988++yxef/11rFixArW1tUinmTMD+vbti1GjRmHmzJk4+uijceSRR3ZXGD7VdZQ/3OCmz8jGEBg0KoiUJ3HZ7LVYngzjqa39YJPhIjjwWfCHkgjqWbRQmE+WieCMlg8EPdSG8WubT6oe+01tQX19AdChuQwYuZh8VlHmdF8pAB1dfmI5VolMHNmhoAMjlMEZoyrh123015P4a0N/bNpUjNKBbdgWC8BmCpJpQFYjkkFiCo8pQdpb8N85MIO4IwCMb/ZG2kHWUN4tnuzKHqJfBnYfpgUVlPXpxCnldaIBY8EXfcGeqipDPW0oaA/Gt3kCKfkT5Wq+XJFxLh3aoUGpVeBkACUMOBWWWwGZxypNAkyiPl4rpydj7PlFuEI2sEaBZjnQ8hwMHV+MVad9hAasvjOJhVtaUdeRQlsyI6CKG78/7/HF+MzIvsJsfXX6oHdvksrWGEbc9F/4fToePWMaPjuyFN97dgVqOxIYXBjCkMIA/rK0Fq9XtQgAO2dKBb78+BJcfPAQLGvo2AGAKVf9E/83bwSuO2bsDjfhxuYoNrTEcewIcp5AczwNv67i7EcX4R/nzMTZjyzEaePLcMq4HSuOdnYnV7XGMaiQfO3ub8+t24o/zN+E5ngGj501A4fd8wY2feeoD5zw6dX1+F9NC35+9FgBSpWtcaxvieGooSWy5p9m++vSGkwvL8TIkvCnOc0ePfbwty7GBhrtpU0BJmSnCrUsyo0UhpgJrEmGRDy+IR1AoWphc8aPUj2DBot6MB22rWCSPyEViawkpFCczBiZohI9A1O10UfPYDNF6FoGC2MRjA7GkbI1AVEbkz5JeU4JxgV8NFo6RphJqTTkcU1ZQxiwrZYpY+JGoLIiFcxVOnJfTXRdK+MBDA8k5fPUZukiIrmygz8AACAASURBVKUGtZ9OCObglVgYY40UEgorbEypRKSGbbQ/idF+cmqKpBapFRvvT2CLZYhmbH3Sj9nhTtk/ngNGK5IhEcgTNK5OBGCqLhc+3JfC0nhQUpVpAirNxml5LaixfBhuJqTEfFkiiGn+KKrTAWyIBzAgmBCmjeMmu8jR1mR0hJk6DcSwPhWQ9GuebonWbGz+DPzeE+Hv9ufg/QCMOrAzjp+N2x98En1L++Pko6fjoX++gmkzZ38AgM1/7UWcd8axmHXoEfjKN76Lfv0HQNc1PHDXLfjLPbe8C8C6Ble3pRovPf80Xn/5Bbz20vPwBwK4/cGnMHXGrHcBGEHfrDmHyz5/vPNRHHvCqbs9t+44sLsA2L/+9S/8/ve/F9B14oknYt68eZg8eTIGDhyIQCAggLehoQGrVq3C//73Pzz33HNobGzEt771LTFn3Z835Xc3umk/Ap8tLmgJlaRwxqGbUZ/24fltfWEz/UiWJ5QS4T2xhp1V4ZD9ihvvCfD56CEY6dSgNDoYcEgzamqK3SrBZrgaMOq9yMfQvoFvocLiCJIBQjnTUoKTDKsWbRw+qxr9fMw8KHinNR+ra4oRLI0hljDhtJvu9agDI2ijkWyXJQUfYp0UvqswOxxkCCChiPEq5cGWrrj+X4KdHDj9LWQ5vmYdwYo4jhrUIDpkVbElI/r32v5o3ZIHtOZ0a2TzCKYaaT+RmwfnxusmVWj1ObcJ7jcgA7QaQDFF/hxXLp5MOXJ/8S/L+YFFVeg1CoyYA6vYwfARxVj1xY8AYEvq2vGzl9di7qBiAWK/O34i+oRMATxfenyxMDiXzx2OHxzqAjPaSVzxwirc9NoGAWaPnjUdJ40pw/mPL8a9S2pkHzrdJ60sUpaNo4f3QXvSwlu1bfjWwUPwTn2HAKkuBowA7HtzhuGGz4zbgc15vaoZC2pacdbEctGQXfncKkztn4+7F1fj9QsPRel1z8k47z51MmZW5Lw9duFTQlB572lTZWxcFGO7ooOdHd6VTrz2v2vwkxfXYnhxCP/9ymwcdufr2PT9oz8UgP3ilfUC0phyfbu2FS9VNuP7s4d9aubqiLvewGnj++ObM4fsbNjd9nsCMIInCuj5B55aJ6bpKIQc6aM43Y+waiHjuIBpVdqHoUYKSVvDxrSJtKVjWiiGalYlEjTolgAx6rtYRcmtj5bGmlRQzktGimCDG39blwiITos6KQKcVzrzMTUYk8rM8cEoliVCUkXogjsLeRrTln6sSAaEkVIVB+0ERaSs4wHMzm+XcXVaJgZJsYAj9hHUYNWlfW7xQDyIlG5jmJFGbcbABF8Cw/xJbMn4hJFaEg9ghD+FjSm/aLDqszqm+BOSBmR6sz2rSdp2rJnEmrRf9G2stqQGbKiexlvRiFh40LYmCAdzQx0izJ8SiInu66VoRIAqx7wmFkSJP4mDw1FsTgSQZ1jyUtbhaGixVczwxyR9S2AYULPor1uYWzQFP/dsKHb7M/J+AMYTHTN7LKbNOAR9Skrx9D8ewYtvrRfm6v0M2JWXfRVPP/Ew3l6zDYHgewz6NZd/UzRfXQzYhw2uvq4Gp31mFkaPm4i7Hnr2XQD2k+tvxhlnfxVnnjgXlZs24OmXlnzAFmO3J7sXDtzbAKy1tRWXXnqpuNxffvnlOPfcDzfK/bCpvfnmm/jtb3+LlStX4le/+hXoEbY/bspNvwaKspJiFOuEhIpAcQoXzlmHpakIFndGkEyYbhWkwTQkVeKO2Oegw+eCCYIfarnoBZarClSjDiYeVI9l2/rCISPUoMDJ5z5k0nKgiw/ePHribCdKJ7OUc5DXTBujxm1DX5MvvgbspIH164qhVsRgpfUcU2a5GjSmGzmOqOqCPG4UvDcoCLY7SPgUIS4CSUeKI5kJIAAzLUeKEmMVWfoOCTMV6RfHoYO3Il/LwIQjut9Xt/ZBfW2+O08CSqYe+eejmVWNuZRkV6LP50BtIuAB7CIH2qg4sjUht9qUxQ50xRfg6WrdXEv+nKt+m5uC9MUcpEscjB5dhFWf/9q7t84OIvzFde24+r+rhZUhsLjpuPE4f9pAbIulhWV6cVMTfnnMGHz/0BF4p74d65tjuOTpd9AYS8PQFGGvxvbNwwX/WIrOJEsLdtyChiagLZW18bXpg7Biawcipi4ALOs40H/0JPqGfHj1wjkYUcyyeRvrGqP46YtrMKmsAPcuqcaZE8px3Svr5cST+uXhkTNnYPbtrwlLd8fJk2W8u7qd9MAC/PPcmbhncTX6R/zCsL0/ncd/v1ndirlDdtRaHHXP//Cf8w+ROBGADSkM4t/nz8Kx98zH41+cgan9C3YYBhmwK59fLYwdwRoB2INLanDdsWOxoLZVUqxHDXtP//ZJqk4Pue01nDi6FIcOLpZU7IwBhbsagr2239FvfUvYpqq0Kb5VBwc75d8pxxV911o+mLCFzWLabUPalJQawcr6tA+taQOHhjuE2aK4fWyA9sSsqLQxSgCcT3zEVqeCIkDfkPZjgj+OKrFlsLA4GkKeL4O+WhbTAlFsSvskxVmV8mGkP44NySD8mi3gjzYZeaoladFS3UJ1xpRKxYSlochMoy2rYm4ojkbLEMaL7PdgI41Gy0Q/PSXjb2Z1Yi6FGczpu6ZFOjHQTEmVJ8HoxkQAc/LaBfTkabZUIBIcskqRFZBMDRKYkqGrS5uoTvtQaqZlH4K9BbEIyoyMgDvq4U7OaxHWjm92WywflieCwirSB/CtaBgzI50Sr2hWF8sOWk5wDWotHUeF21Fv+dCUMeRBxjTsqLyDcJOXgtztz8SHAbBbfvsLPHT/7cgvKMRRnzkJ377iJ3L+9wMwCvbJgi1aR2rB3Rq31uO4XGXkSws3onzAINz0y6tRWtYfZ5930Q7jPPvkw5GIx/D359/6QBVk1eYNOOmo6Zg4ZQbuffTfO4j3d3uye+HAvQnAli9fLqnEPn36YNmyZVLQsDvbV7/6VTz44IPCoH3jG9/YnVPs1WOUP94ApW8GCKXhbAgB7RoCRUl8bla1FDqR8W+M+xFriADBnNEpX1zJfJHFouaJwIeAooMeV64dhBHPYtoh9VjQUgKn0QTqFYB/ZshuhW13f4I1bjQy7eoNRDBDmwqm7/w2Cga2IEy5GDMYloKVG/sAfRMi2HcIWtr8QFIQ1XtaM4qIxUGClYUK1E5HNF+8GvX6tqKIDYXItugNRnBWnIZdQJd6B6HCBAaXtAvr32Hrgo2WNBagprowV+2Zmy+vSfBIQMn0LQEYAWSBDa2SxU+AGrJhj03AaQi5HmHSaui9YhipqOTPuXFwBLF00Sc7WGxhzJhCrDrhI4xYCcB+8NwKHM804r9WYt6QYgEMSct+F4CdNKYfDh1SjD++sQnD+4SwtL5DGLIu/RGBtOsM+/HbFycNwNqmqOgcFn3rMNzw6npc8e9VIEhbcNFcLG9ox9ZoGr+fv0nSdZfPHYFH3tmCqrb4uyfmH46hhUFUtydgZR3cdvJkXDBjkJyTK/ZhqT3qxd6uacGRI0pBAPbwmdNx36JqDC0O4fChfXDjaxvw+XFlGF3i6tDqOxKY+efXUPmDoxFLZ0W7xXvh0Dtex9JLDsdPcgAs4tNx+ymT8cN/r0JJyMSCi+bJ8RxLSyKD1yub8KXHl+CuU6fI+efXtOKqF1ZhcFEIo4vCCPt0YQW50QbkgSU1mDe0D4YV7agn48wWb2lDv4gP5Xlu8cIhf35VQC31cCUhH+47berOwr/Xf3/EWxcjbatYlzHRX8tgciAmpqcEEkw1EszQIFSHgxbbEBuFGYEoFsQjWJMIiAZscigGVbWxNBrGlLCrlconA2YmBLz4VQcrkyGpfvxfLIKDglHUZExJOW5KBDAqGEd12sRhYdpe+HFCfjMWxyPCmNFjbJiZEIPU5rSJgf6kWFEQ/LwajyCWMtDpaBgRjGNdyoezC5rxaiwPZbolgn2Cxw7bQCnTh1kNGis50z7EbE30XUuiYRwc6ZAxb80ayNpMi/oxM9yJdckAynwp2X+cj8aoKvoZYj2NuowpIKvZ0rE8HsCYQFJSpLSp+E80X6oVWaDAe3BqwNWljfHFsCYdEqE/dV4U4K+IBTEqGHtXa0bTWcaOrBcfxEeE28SGgms0zJcQFmxIZBZ+OOy9t7NPe5P0ZhE+NWDcuiof+Rz49xsrMWTYKPn5+wEYtV+scLzgWz8QK4nNG9fijj/9Gkccc4L4hdHagtWR997+B/EA+9o3v49pBx0izxdWON7z59/iuz/8OXjdD/MBo03FVd+9ED+4+pdyjf1x21sArKqqStKMV155JQjE3n77bdx7770YO3ZHqcvOYsJ+kQsWLMBVV12F733ve/jRj34kVZP706bceANCYztRGomjclMJ7AYdwUgapx68WT73r0fz4HT6gdqAy/oEmUZzgCjpI+q5mFJkTo9VkpYLpFpMqFs09J3agobmfGCLAXRQI8UvF6C4xqU5sXyb5uqi+MeKVYVyTgdKMINwflwIJuryA0kNrevzMXhMM+ocA8mYD2gz3xPBEwSRoSJ46WKjaNrf5sDOKuJ6T8G9ojrI5pzxqbWihMwekEGmzAVWgWAaQ0vbBMSxCp4G1Z1RHzavLXbHSbzCuXAORGeuXZjL3BGQ8mcEUXFAKbQRHBtDjACMY+JkutKuksvNHcfHOc1o+e9Gt4el0zeL0eMKsOq4jwFglzz1Dk4ZW4YfPLcS08sLMCg/gGuPHI3LnlkuDFiX2JvMENkWy7YFGX7SjalHy3bQL+xDzRXHYuatr+Kt2lY5zZ0nT8Ilz6wQsENgRXaMwIypQn6//cYxdPmWXXvEaFx1+Aic9ehCfGnqIBw+qFiATdfWlkjj/L8twX82NqKkMIj6tjje+NocfPXvS3DN4aNx7Mi+OPKuN3DdMWNw2BAmkAHqqy7/92qsvOwI/GHBJrRGUygO+fDoijr87vjxeHVzszBg3H557Fjc/lalFCk8+aWZKI/48bflW3DrW5U4fEgxfvLyehCo3fv5KVJZes7ji4URvPzQ4bhlQSWOG12Kaw4bifsW1+CWBZvx5JcPxsEDCtGZyiDfZ+CtmhaM7ZeHuxdWY9bAIswZVAymZ895bJEUOxQGTHSkMui45vhPlE79pGu3K/ufvPAidGQ1MVRNZVUcEWnDgkSeGK3yDz6BD5ksDbYwSWSF5oXasTIVcrVJaR1F/rSkJZelAuijMU2WRiH1SmpG9GSS2oOCUj2NJfGIgDKfSs2TIj5gAyiGz6qYGIjjrXgYXylqwIJEPjpsVWwxBhqszVTEAJUGp/wcNts61qR8CGYVZKCgnz+FLRkDZ+S34MVYHqb54+KoXJvxidv+IaFONFlmzgxVxevxEA4PRbE0EcJR4VZ02rp8nluyuqQQh9GJPquJJo7pzpG+lHh1kVljRSbHwPgsToZRnzLFab9ETyOoZPFyRyFKfByPieasjkn+OPrpaTGe3ZwOCCtINox2Fm905iMLRzRmXAMawpIBI9CiFu3wULtQ8QTCZXparDnG5R+ES4e893DYlXX+uH08AOZGhx5c8XgMf3vuzXfD9X4AxrZEN/zsSjzzj0cQ7ezAmPGTcPnV12P4qLH48mlHi+nq96/6Bc768jdwx8034snH/4LamiphcgYNHoYzzvkazvzShZLe/KhekGyB9PILz+LRZ9941xrj067xnjx+bwGwz372s9Jk+//+7/9c8PvHPwp4uueee3DqqTvXxVGcT6DVr18/OUbXdSxcuBAHH3ywpDPHjx+/J8Pwqc6l3XI9ZkysQ9iw8NLaCgFgBUUpfPOgNViRCuGZrX2RTZpAjd8FT0yh+bLi+aUWJYFACqAjPdsT0WdLs2G1BqCs96N0UjMakkHYlSFXA0bJcYkFFLkvj651hO1aNFCvxVxgXgaIZKDYChxpZJ1ye4WrNiIdJjrWRDB8WhM2WiaynQEXcBH4ELiw9RD1GTwPj81pycxq1wnfveZ7nqkEItSA0YrC6JtCaphbRKCmVQwobZdTltO02lGwZms+0qsiuUrGHHsnDF4u/Sii/Bz4YoUjQWWjAjUvi9KprahvYDGCnvMsyy0Zx81hdbF3TD+ajlSF+jodZEtsDB9fhFWnXPDuGu+QgiSYOPHBNyUV9+jyund3IkhqiLq6mz29saryJ0eMwvWvbcCWDuLiPbPdfeoUXPzUO2i9+jg8sbIOp08oxz9XN+D0v779ARDHKx43oi++P3eEpFSvP2Ys/rqsRjzknl+3Dbqi4MlzZ+LhFXX408IqfO+gIaD4PprOCkt4f07vdvXho3DfkmpUtyUklfrl6QPx2DtbhMGLBA10xt0bdfbAQgwtCuGBpbXy78OGFOPlzW7q4YihfQTochvVJ4xhRUGx6yCrRtD2+fH9cfeialx7xCgEDRXXvrgOmRwIDpuaaOx+f+JEXLqX9WB/+tOfcMEFF8DnI1/7we30RV/HulRALBVWdoZwRGELlqcDGOdz13grNVKBmLBABEFM0RGwLElEpHoywDY6voTYMqxMBTDSTIqvFQWWhVpGzElFyJkMY5I/KqJ60utkmqKOJkL6qFRPWig3LCyLB3BKQTO2Wj4U62zho2FioFMMWSljiDq6MFSDfUms5EOGutKsJtqpsGJjjC+B1xMhHOSPi58MneXp60WwxEpWP1zD1BfjEcwORLE6FZAWRPxEEmByPOuTARwS7kRNxicgjsByoJ4RGwpabbAXGy0jBhhJbCSgSpuIaLbMvZAasEQYFVpGGLXl8SBOKmiVFCiBG7VsBK599YwwZssSYbTQ3Nbvau84H86ZhQn8+cFsA2X5JDVJFo3AdkRkJr439MI98wHMuYs/8cQToL/Srm78w/jzn//8Q3dfsr4RbyzfimNnj9nV03n79aAI7A4Au/baa8Gvj9oeeOABAU0vvvjiDru88MILAqq++c1v4oc//OFHHv+f//xH9mO6kczX9ttvfvMbUBv22GOPfeoo72weu3oB/eZfYtaUBjiKg/nLy+Fs0dB/SAzfnrFGXoCfaCxFlnqrqmDOeNS1m1DzUgjkx1FqZMRjMZox4Fga/P4Uoh0BZNeGUDSuFRlDQcfGAmCrJj0gmepDv4QAJMk7JQ041G8x/UZ2qTAFxbTEz7FfXkysd/jcHhZMYENDAZxNfgye1oQqZi5YidnlhM+WRDH6kamSTnU9J1QgpQFrfNCaFCGnxGYyl2nii7fbltGBvzSN1NiMmMuy6KAgkpDCTb+jSBYm3R5Ay4p8wKD4O5cmpdaMVZ9uLjMXcrdaVNoONRhQwxaKxnegKRYAGn1umlQc8LdrxM3vWbFJsqjAEaG/tg1QCmyMmFiEVSd/iAaMVLb0Zqx8T4Owq4u+P+43Z1AR3qxpxRnj++O1qhb87YvTsb45LpWTH7URaDbF05Im/PYzy9Fp2bD55TjvatKq2hKSol3W0C6AkanXLlbuuJGlcs2WhFvKvK+2358wAZfOcu0u9sbGe4XVQnz7vvrqq/Hd734XprmjP9zBb14q93ZfLY3F8TAOi7RjZTIggIiMC7VIZKAsqOKV1WprOC7SgpXJMJbEQ1Ltx7QlWwS5HSEdASp99ZSk0sTbKmuInxirAAniWNFHJ3v+riZlShqPrA+F7NSJnVXYiCXJCEabcbTYOo4MtWBJMk9MWAlGqtNkzZLSS3FFIoACwxIWjNYX/Yy0CEdZBEDQwmpHAp65oU5JD/KhwvTjm4kQDg1GXYuJUKcwUmS8GrMmahM+DA4k5TzDfQksirMSNCvx4LEcPz/2E/0xqQ6lXQXBHz26qO2qpWBey6IxQ82cHyfktUoM30lExFB1c9rEKDOFUb44Xoi6GkSygBwvz2yqjqQnaU47wR/D4kRYTFq5Ma1bGpqNK4e993b2ae+dT8qA3XTTTZImGjJkCH784x/ji198z5CUY/EA2Kddkf37+E8KwLrul2HDhsn9Qg+v92+zZ8+We4qthd6/1dTU4LzzzkN5ebmAtPfrwviSSdaM6crPf/7zHxq8/v37C7gbPXr3DW53ZR67unKsgqyY2CRFPes3FQP1Bgb0i+Ins1dgWSqEh1pK0RgLwKHlQ0qFErZQ0CeKQjONUYEELEpCWPjEl0/FFpPsJVuLECXrNSgGNWTBqY3AqdGBQgcFpTGEysmya65zg2KjqT0Mp9knlZhmMI0++TEBdjSXbo4GYGVVlIQT2FaXD7XKwNSDG7DC8iMZ97mAy5eByhZyBHEEYu9WQua+3xCEWanCMhT4k47skvArYshqKQrMrIPMgAysYTmNG4NHlo/sXNyAQpCVVuFsZuujXCsl6thYuBDY7m+3+HnRwd+CtT4faNWhBjOomNCCqnjQ7RgQI9jMpUoJ3MjgdVlQENDxnDUmsI3O3zbGTCjEqs99CANW1RrD4F//Z1fXucft19VWianTnW3c9+P2I+giCHl/5pU/J1jb19veBmCcH9/+CL740GLag2+R2wOx85d8TSoU2fKGQChftbAmFZDqPzI0dWzHo9oYZsaxIJ4nonI6xi9MRFCZ9oumiq2Jaslq8a1FdS0sqBsjOCNzRhuJVakQRvtiqMv4BdRFbVU+6LGsCsVR0J7RMS4cF6PSLxQ0odHySUqPovzjI82ozfjfPR+ZJAImjpu2FVs6wxiUFxWRPkES044EW2SZqNVi5SxNYPnWxZegtK2JFcSkQBxvxCKYG+6Q41jt+Vy0EJWxIA4vaBNgRS3Z4kRQ9F5sqs23NgIrzpVVnmTvVqUCwviloWBWIIqFyRBKNBrSqsK2zQu2S4UknfC3ZPxyXoI1eq0tTYRFZzHcSAm4G2SSqYM8XKkhI4jlWyfPxwToZH8nhkZm4ZtD9pyw+JMCMN5XbH7M9E5BQYF4MW0PxDwAtq+fLHv3+p8UgHE006dPx6JFi5Cfn4+ysjJcc801OOuss2SgW7ZswZQpU7BtG//6ffRGewnqwgjCxo0bJzt26b0Ivrp+9mFnuOiiiwR80bz102wfN49Pcl7llusRGdnqvpAuKZJWRGVD4/jxjBWozPrwZrxA9KltLWFhdvRQEuW+tBhcsyqbpkwtliamzexFy2IopyEI1JrAsKSriWo2gQZd9FChkjhK+nei2VYla0GrH+pvKZ9AzI/yknZ5RtJSpzkahEPKiv2t85LI1oegNemYd1At1sOQl2ZJf2oOlIQh7ZGEjgpS+EVfMyr1ubAm1K0a1Dhgi1TNEZkWX0Kpy6I1hTUwDaci58clx+X8xHgCXoM6+RrDZag4p5AF+DNAgNUCOTd86uCYLTUs2GvzgG0G1MI0KiY3oyqRs68iSGTVJv/PCkiCMQI8VkTyi9drZONuNha3MWbMRzBgte1xVNzwwidZa2/f/TQC3QHACECLiorQ1tYmUQiHwzsAsW+v/gaq0gEEVUsaRrOlzhbLxMxgJyr0pDBRxVpGNEkETkwfTvRH8XKsQJggGoQyXcY0H8EJU5FDzBTK9CTito6Yo8mxb3bmCdPEPo7UhEW0jGgjF9IclbYXWhbDfWkRpX82vwWVGY7FtYaYG2oTcELLCWG+VVePRnBIQLg0HkJAszA9EJdjCMwI7mhuais2srYmzvMENAHFEnuK5zppg6JiS9pAgZ4Fxe8EP/NjeVjWGcG4SFRc7tlr8o1oBBOCcQwTfzJNNFwEfkeH26Uq8u1oBH7dtaGQ9k3xkDjy02WfYOrESIsUDpDhWp0KicCeQJXGr7SnYKUlKzjJgvEcNGHlc4gpTMaSthfU6U0NMK2aRGHwUPx4+HseNZ/29t4dAEbPpdNOOw2xGKtesQMQGzPjaC8F+WkXZT8+fncAGI1UzzjjjB3ul9LSUgHurHi84YYbwHTjzjbqwphivPHGGyWlSDBH8LWzask77rhDhPl33nnnzi7xsb//uHl0AcpduYByy3UIjG5HieKgYWkfpC0VRaUJnDdpo3gG8gWPFjyboiGEfGkUGpY8G6iVJYAi20SWnR1FKPFqShuI14WABhMYkHY9ump8UFsUOMVAWUU7Svt3SjU7X1QbM6Z4ENY35cHRbRTmx1BkZsQmiD18owR+NEONJGHVB6G1aThuViVWpoNuGpKgiywY05hRw2WuWCjASkNxq3egbTRgt6piwirFlporck+birBhgpvKk3CGZN2WSwRgtNvg8Tw/TWYJjKqpg8v5mLFhOBuPM/VIKx/NRkZsJdgn0wA2hYAmHUpeBvkT2iUTmuDPmRblV5eTvhiyshCR/8mZs1abQB2rRh0XgH3+Q3zAOpMZ5P3s2V1ZY2+f/TwC2wOwn/zkJ+Jbs7ONDtC2vXN2kP3nutKNmUxGTAu336gJO+WUUzDtauDNRL74fv03moehZhrVGQPzQh2Y7I9ifTooDBgrGKm5ohCdDNcr0QJhZMhE8dhtBGcpH0YEEmJeyhQkQRJZtSnBKN6KRyTVSWBBywWKycmOtduasFKkxGmYytTkoeF2Ec5T+M6PR3/drS5kaTI1VBT5s1KTdhBMKbbZugj7CVIqzKTYO3C2FQb7LfpEnH9MuFU8uELIIqWoAtIIgpYlA2KfMcFMiuv84+3FiMV9GJEXlXMwLdqR1cWRn+lIplrZnoP+X4eHOmRcSxMRsZzg3FhIQO0YY0ABPnVdc0IdGGokpaDhrUQIsawhvTYpMq1NmyK8J6gk28bzcc4U3beQETPSch6CT3YqmBHoRN/QHDx+yu/EoHJ/3AzTxA+ufwTnnfXBdNL+OF5vTJ8sAgRg993/EB79454zO6XR6q72dbz++usFhB1++OG7BNo4u7/97W/iLdbS0rLDZJPJPaNn9vv9mD9/vhjF7sqm33Ydjpi8BQZsPLdpAOwaH/JLkzh30kY0OzoqUwFUJv1oSPnE5NlnZKUC27FUpFQHFUYGWywdE/1xST/yRe/Vhj6oXVsIVCQkBedURuA0qiIM6TO+A+MGtCADFSElK1KJ56rK0BH1iTVEUWkHSoyMQ0aI3AAAIABJREFUVJevSvrR0pwnuEQNpWBvDUCrNzBu5laszfiQyuR6VBK8EMh0GACBETeyS6S5CKaqdSitCgw2BOeudMKny0O7jcaI6oK0wUmow9JQLFUqvR1fBojmLC7EqYFmiYZb/ZjnAH3TQCQFk1Xumi06sShU6UHckdGRqMoD6k0oQQt9JrWgySLVlsuCETAS6BH9EZSJNUXOnJXf1/rEv4xdA8aMLP5wAGbbDrSrn9yVNe4R+1AYTW1W1/8NlelBVpzuPEWocV/unPvD/v4Ji71Frvp0+999VGqyuwO2PQDb1QdBezsliru2ke7nVlFRgaYmt2CAoIxgjA8jpiP/3nQR3k7mia/VO7EQxgbj0trn9IImoaSXJUNiR8HqPuqvyPgoiiNsUrNlSHqPqTuyY3kqgVVWTEbZbPr1WL5UPhKksfqPgIPtiibmfF7YL5HNZflgiaiW9EbM17MoUi1p6F2gWtJ0m/5aTGeSMaLonZWVFPjzFqEgfq0I9BUZG9v/8AHFlN0IMyE6Lva2ZD/KhMOKXtf2ZHkiJKzYa7E8hDRb2gpNDkTxTGchtnaE0TcSEw3EAH8SGZu+Zo6Me2UiJG+Rb3Tk4+w+W8UvbUkiLEVAfLywfUaHpWOAtBNig3Efjou0Y7gZx1uJfBlPs6VhiJlGWu5zVboFmApQnTZQYWbk/mf6kgB1mC8lRq/0KWPXgcMirRgYPgQXDt7RX2rX7oiP3mtX77+uMzz//POi/epiwHivkdHgPTV62pH438qtOOGwXftj9GnH7h3fvREgANtcuQWnHep2NNm+b27X9+///0svvYQvf/nLiMddeyLeLyUlJbjiiitQXFyMm2++GRTS72z7NAwY3fJvvfXWHS6x/fN0d+bxYSn4nc2Bvzce/DmmD90mhYNvrOsPu1FHft8kjhtXDVV1hO1it42VsRD6mWnRtvJZR7aHfoR83vJ5Q+/CyrSJaNpAsjGILPtBDklAD1nIxk04m3zQkkD55BYMLuvAtqwpzx066jfH/VL5rvssWDR8tfm3WIGdUeGwAIApunAK2BSE1qJi7mFbsEnRURULusCFX2IGm0s5hjIu+GKaj5WRtTpAwwQSTCTJ4q4Xalf1pLjjD00iPCaOrG6jI+p3G313ifilxRKNagNuarOLBdNpTJt17avIfKkONM0WHXFHdZ7oxpSMjfIpTcg36GVpoMPSXBNb6R/ZVZlJd/3cPPijSj/AP5NMQQ77mFZEytVPImJo6BsysbHlPb+t8jw/4pksWhO5ctNduRM+wT4fBWg+wSlkV1VTURY0saUzidtPnoRvP7MCT507U1z813/3KDyxqg5nP7r4Q087s6IQ+T4db29pwx9OmIBlzTG0tsfx4LItckPSPDWgq1iytQNfmzoQL21qcqsbfboI97ldMH0Qnl67FWzpxKbkbMdEPdmerO7cfvC05uD52WmAwLLQb2JbLCVGsJ8f1/+Thu8T7U+BKt2kCbq2B16k/bndufY0rEu7lhLUfpHBYbpwerhDAAwBz2AziYF6EhvSQTRmXeEnHY35xZQidWMU2fMhQQYqqLCHWEqodLJUNE99O5EnZqpkqdj0ui1rSGqSTBa1Xjwn148pxZE+skh+aYVEZmuMLy7pOKYsmZ7rZ6RgOSoaMq6fFw1LHbhu+BHNkp/x3FP8UUnx8TMvejDLhwI1I7YOS5IhzAh2Sj/LCjOFlqwhTWDpP7YgGsGMSIcwbuV6SmJDN/7x/hgWJCJSIr0oEcLFRXUC6h5tL5F5kq+b4IujKk2fr5T4fVFIP9JMCEtIgav7wHQLEPjAoC6N4I6MIJlGvhGzUpQaOqZf+X+mIHl9gll6iQ2OzMLpFZd+ovtgT++8vQaMwIuani4xvqcB29PR3r/OtzspyIMOOkj0WwQsBF5MPZ599tkysZ6kAfu4eXySVVIeug6B0g4UwkH9xiI4rRq0fmmMHNokTBRZKr6YbUr7hX3nM5h+iLS2oa8iXzhf78zHho6wvCg6BBbb/ECLBgxIufk+skd1rgVDeHAMVn4G6TSflW5PSIcAh4L3rtRcu8+tiCQq5EOTwnR6jNX4oTcqOPiIOkRNB6viQaQJ2AjSulgk6rHCGRd88Yu5v1oDaHY9vgjAzKgjWjB6gxm2gzQf2ANTUIZmBPghmvMW69JliUieLZZ0oE/aBU+i5aLonwaqOX8vfi8tjyACfBdgZWEO78TQcFwOi9t8QXdBGAEuAShtgFh4ZTEXyhhUBt2q0YIsxoz8CBE+r/GnBZtx/+JqfGZEX/zspXXonx9AYzSJ08f1R01HEq/thQpJNtD+/pxheGR5HdY10YbW3cgmsb/ja5UtAmh2tjE0354zDGdPHIAL/7FUDGRH//a/aLvmeCyta5NeiWsbO3HaQ29j5bZOqIYKO2MjGDYRj6bx1zOmidv9Cfe/iSvnjcQXJpSLQ/5F/1wm/lpPf2km+gRM/PntShnb02saxJj12BEl4uHF7UeHjxRLCtpQ/OqYsZhUlidj/8rfl35g+D89chRufGMjxpVEpHLyy1Mq8IWJ5WLA+tA7W3De1AokLBufG1WKexfXoC6aElNY6U7wtyUSs+NGlWJFQ4d0BmDRAJ3wabUR+/EJCJq75/S8szh3/T4YDCKVSon4lOxEF/Dq+v29a0/D8lRIwBT1SWSW+KFn+pG2EWuYglRsVOjUhvkEOBBEUAdVkKsqJPDqMh3l8X30tKTgyAqxqpAA7I14noAkiu+ZVmNrIKbbWNFI+pwpTjI/bHQ9KdCJRfE8DDSpI9MwjADM8sGnuOlAOvRzHNSjsXKT4OyocJs43JdItaCCdxJBnJzXJH5ljDDfGlk1yaIBPlvozE9t2cZ0UFKq1LBNC3TgxWiRiO7nhFy9BDc66HM+bEDebLFBdwJrUiFcU1yD+akQnmjvA56VLNq0YKcAQzb/ZsqRKVWmLJlyZCUkx0wxPsfA9CsBJkElrTxoP1Gkp4UVI5tHcEeQ1pAxJD3KYoZyPYmi0KE4Z+DFu3oL7PH9tq+CZFn++7UvHgDb4yHfr074SQEY3ej5Ejho0CAB6uecc84H5tMTqiB3ZR67ulDKI7+Akpd0ccS6PCBJ8XcGeSPbxIB0uD8hL6PU1NIGiH1n+bLG5zQNphd25GNpLOy2dWRvHzbmbggAraqrAZOWQRSHaa5H1/AUQP8wohGCFXFBzbFXTMnZKhTVlhQnaADL1CJ1XdRk1fsQ3qxi8BGNyIYtKUJqTvhyAIy+OToQTIvRqgA6AiACvXq/2Guw55xuOTJ2OunLmzQNT6npLU/BGZwDgaTHyKgRDLFKUSofrZzZLH3Qcvow7kN9WBfoEmBGJovAL9ccnMCwIgZFc2AGknCt0N3aAMNRkOGcaeJPVoxZE6Y/68JANdOdFsaML8CqYz/GiPWyp9+Rfo5X/HslmMoiEKEB6pcfXyz+VAQfZKxoIPqTI0fiwaVbpCXR9hWAu1INSOaGFYP0ulpx2RE7GLGSvbry36vEGf7hd9zzH1RegDVNUXSkdmxxRBaIDBADcPvJk6XFUUs8jaKgia2dSZRG/O/eu7wee1fe93Y1bj1pkjBjl88ZJuc/a3IFjhzWB3+evwlnTq5Av4hf/nCf9fQyvL6yASu/fQQK/AayrLBwHMy94zWwoPKE0aVixFoUMHD/6VNxyVPLkefT8eS5B2NgQUDGQqNXenkRJA0s8OO2kyYLc/bs2q1Y3RhFgV/HYUNL8KUpFTIXNvfuF/bLv7n9fv5GlIf9+Pz4cjG+ZbPz0yaUY+7gYryyuQlnPbIQeT4DZ0wsR217AvRA29vbddddhwsvvPADwKvruretOR1r0yGxmCALVGakRWc10RcT0MIUGz8HTAc22wbaqIFSs5JSpJgzbSuSbqS+ilomgi+CB4Ia3uIERdQuVWYC0uOL7BRTenz7IPNDNogAhOJ5PnAWJ4P4XF6rXIf78GuAnkS7rctbEvVeFLOLB6AAPB+WxMKo8NFqhHYOKfkwrUgGcXxeE1qzJtYkgzgi3CqVmGwIzopK2lNMzT3YCASpCSPjx0rPVYkAPpPXhrBmYZvlE5BErcEgIynglJWNrHS8qrgai5IRvBGnlQTnA5TpKTF1HeePCrtHBo7gbaw/JnHg+Duyhgj8WVXJ+bORONkvpmFZtUmvLz6nCEz51sZ5c4wzgx3yfXFoDi7bg0asu3MPdqcP2G9/dQ1u/d11HztMtv9hG6BPs516zEFgY+zHnv3fpznNpz52fxnHR03kkwIwnocvf3wWfdTWU3zAdjaPXV185ZGfI1AUF+PT+KZ8t5diXhYYGJPeuKPDbMejijSBzDcZdT5TW0X24cgL4+p4CJvb8lwbCIKPJtotKMCgJNBpuiCKPRP5sByagFKUQFCz5bnL5zALtDRblbSmodmiw21PmcjSZkJyhXzwJ4HNIaBNgTo+Ds1voTCYFMaekpIwdbi2ikIjjWJ5seZrNhCzNGxoKkTLujw4NLe3ANWC9IHUMqz9VOCzHFjDkkgNJpiiJxerFGmDz6cfhfUU3ttAG6sgWViQ0xOxA0CXQD+bY68EtOU8zeR4G+iXlOO1AHXTbtqRnmnvWuh3ATiCNf6eAGyzDjWcxeiJBVj5mY8BYN/51wqcOKoUv359Ax76wnRxhKcJa1cvyKFFQXFlZ0py4TfnYWs0idG/exGnji3Dwvp2bI2lML5PGMsaOqSF0UeBMTaP3tAcg09T8OZF8/DTF9fix/9dA7+h4a2L5sKnqQKMCDYIZKb1L0A6m8VNb2ySCZuagqKAiYtmDsaNr22U8dx+yiR8ddqgj71XCdTIJPUN+/D/7Z0HfBRV18afLdlsNr0DAUIJoSb0XqSKAqIodlSwU6yIDUVBQETFzqeA5bV3UaSKIL333gOhptdNNtnNfr/nLgkEAikssBvOvPoK2Zk75/5ndvLMOeeeM+C7tfjfbS3w0aqDaFczSBWgPbcH49JDiRj0yyYcHnV9sdZGSw8lqUbiFFYUYAzT/jmoLW7/YR3e6NkAtzeJgEGvVeKVjc0/WHkQv2w/rhqKswE4e0GuOJyCNSfS0a1mEBqG+aLH6V6Q/8Ulw6jToN3pxuIUuwwxUrQyP+mf/QmqdVJUkDc2HU/DA79uwpu9G6rPOCYr8l/t7d2dd+FwvlF5mljVnSG0PRYvVe2ebygUO6w+Ty8YW/4wLEcnNhPs+SbGEBn3oYeKIqSJZzb25plU5XeuZGR4soMxA/+aA5RQY8I6e0AyyZyeMe7L8Bz3Z6jvsNWAHt7pSpRQ/DF/rJZHjso/o5188PD4Xt6piLcasD3XV+WnUSztPe3BY+0ttjBq7ZWhipcyTErPEj1TDHXuy/dS4dBupjTlTeN59uV5q2Kp+ywmbDebcJ1funoAskURhSm9T0E6R6UzJvYfV6tBs5XAylELCRz5ExyDzcbZgJx5dSw7QfHGn7O2Gr1ftIcikeKKopM5biw3wVAjw6TMoWMRWkeOnR0W1dKoAO1MGcrzx1ZED0UOu9q3zgXP72wPWKEAe+K5MapZdkmbt4+vagFU1m3LxrW4f2AvbDl4JqeSbYI0Wi0GP3pppQrKagP3cxU7ymNzRQRYWca/lirhB898DX1rHlfPjJ/iI2BjCQl6eKrmIMwrVz1Xg/X5asESV6bzOcVV2qw3SG/YSrMfFmUGIiffQ7Xt4bMj86gvNIl6aGqbUUAPFRt2J2oAI+ATlQ3/sEx4MPcddlUn8UC2SdUnDTXkqZdMFmGlc+qkxQDLKR+H+AlgDpg3NOlaRLZJhM7TppxTfL7yeV/NI08tTLrRJ1m9fDN6ybhBks2A6cdqID4uEPZTemhz7ODaMbaz5EPUsXDRDv+6ZnjWz1bPxDSzJ6xqNeTp0GZhxf4kgyMEaSiA1pinkuiV/0qJrtMddArDlslGR4X7ggL41s5EqFeucsgl2nRKFIKCDXbotHa1cp0v+Pxdw6LXVjb93u8B7yALGsf6Yk23M2ke5zXjfu3fXehRJxSTl1GAtcR15wiwjpFBKly28nAKvhzYAharDb4T5uLoc73w9PyduD82An/vPIlfdxzHqSwLPE+LEAox5pYVVtQf1Kw6didmIdDooZpxs52Q4bVZCPX3wrIHOyA6mItgoQRGQpYFe5Oy8XjbWqr1zpTlB1TB2JhwPyx/tDOqvDkPZqsNX97WHAPLkft014/rlDeKY7KHYpsa5zexpvj5fecJ3N6k5JyqwmbczBH798EOqpjtZzc3Ra96YcWeDQ/N2oov1sZhar8YDG1bGysOJ2PT8XT0ig5HuMmgODEcq9T06f8rqZeluj/YcoEeX41GCbxRc7crT5y3wUOJM7aOutrbuB33Ku8SPVWrc3zQxycV/2QFop9vssrDYkiQxT+Zp0UX+B6Lj6M0S76nEk3secjQIEUK8xIae2YrL0+4Lk/lffHt7Q5Wzs/zREOPPMw1+6uQHstENDWygCsFiUZ5oJiQzy9wK68s1PY0q3ZI9GpxDQtLWvA45pCxDlg9z2wkWz1V6yC+0UV5mrEgMwjZKkSqQ1uvLFQ35KpcCrq+ucXRa6exYm++Sa0sbO+VoVY50lPFrblXJtaa/bHPYlCFW/lA4Tn5MKSd9EJREFI4slo1RRlrjtHjFenh6NPILzMtZp2v7bk+KgTJEC1z4pgv4chj0ykRS1HJufFBSxFK4ZtVoFfhSIYaKfRoO7169JQxJ44CLNa/De6vOfxq3zpXXIDNW74ddaKc852h2Hp/8uvFBNjVAOoqdpRn7pdLgF1LvSC9/nwdvaqdUi+CCw9Xhe2U0RHyizBD62GDnyEfPX3SlVe+kcGsBBjF126rp+pcUk2fr8oAsR4jn90sc7M7yR8ZR72hr5mlSu8UJHvBflwPjcEOn6gMNK2aphY18cU6gikigCpjkWXVq9ZpHJ85UftzvJCZ5aXCkRrfXNgP+0KTokNQ8xSlX1h4hk8uw+nSO0wp6eydpp6H9G3x+b8hxwcrjoUj5bAP7ClUXIy12mHItasq/Vr2x9baEVjHjFqxKQjzyMfGLB+cSPM5nSjP0KBGhTaR5ak8Xp7+ZtVyiakZDM1qGIpU/5xeiGfTwH7SRwkzjdGKWjWT0cE7Q5UrYp1KLmxiNJZlPBhdCNFbVVSCL9XJNh12HQtC/l4TQvxz0L61ETPbPF/0tThPgI1btBs3NaiiGmwPa1cHbG9ztgeMYbox3eurfoWsGJ9ntcFn4lzkjemn8o96R4Vg3OK9qOnvhdH/7EK3uiH4fccJJRaGtKihPDQMuw1pURM7EzJVuI4CjFu1SfPxRMc6SuxEBZ5pQp1stqgFAFGnRdmW42kYPmsrwnyM+P3eNvhuy1F8suogPukfi+bVHBXAy7Itj0sGk+89dGd1My/LgWftQwE2feMRdKgegA/6xuLRWVvw48CWxXpQKiF5LBXvrTyAW5tE4JaGVYsE2PB2dUpsGl4eMyYs3qPyv3RarcsIsJHbHlDiKcumV7WtWnll4te0ENWMmgVSVSmG021wmI9ETxnfVvjgYJsehtcoihiSpMBgojrfiJgwmlrgoYRFA49s5dmhl4ieM3p1GLJksj3fACliWKaCbyl8XeGqRZ4rxjNbeY34dqaWRNNLXaBV52eoj94ztkqiW72ndyp2WHxU30aukmS9rpZeWcoDRwGVWqBX+V6tvdLVA2tjrq/KOwvV5auFBarqvSFXJfczPNjAM0etvGSBVb0GapEBhaJJa1eikoKItcyCdTZs5IpKzxwVuqWNMV6ZSqStz/HDqmx6/Gxob8pQHQF4ToYaKW75AKW4oiBjGJZij+cjH4ZkKdIogOkdY/ItPWthuny0D2qBu2o8UZ5b74rue7k8YGUVYCdPHMV7b47BqmWLkJKcCP/AIHS8ridGjZ6I0PCqeOD269VnhVvnbtfj8x/m4NzQ3103dQY9a2yoPWHMs4g7sA/hVSPw9Atj0a1XX4x/5RksnPcnNBotul3fF6+/+RFM3o4XUm4zf/4GX3/+MeIO7oNOq0O9Bo3x5POvo13HrurzstrBfRfO/VM1+96zc5sKHUVFN8Tgx54u5vUrtPep58di4phnsXPbZvgFBOL6PgPw/JhJMBoLM2Eu7Xa4XAKMVrEJd69evVTKxJYtW0qt73WhmTz00EP49ttvwbwttidytc3/t7GoWTUNersdW/ZWgZ39CllHq7pZlVgI9cxXz5D+/sno7JUBLxQgw67FYatRCSeG/Viwhs9RPpfoSV+XFoC4UwEICslEQ59srE4MQt5BH5WW1Sb2BJqFpKnFQnwm86V7Y6430lk+R2NX/X7pLGCojs0M1yUHISPPAK3BioI4H2gytPCKSYOZTzi+ZNKbpC+AVoU0C3BbcKJ6ZnF1Jp9tO3NMOH40EGbW8MriwLwCdmhsgF0NQReYHYER2QhqnKFWea9K90cOS1AU1uxiqEj1ltTCEJADgyEfBg8rvNSqdbt6lvK/fL3mcze3QIP9aX6qUr8mV4takcno45+snvEq9cUh1dT8ufHFXXUjsmtV4et1KYHYsScU/p55aNLIG8vanymzcp4AG794D766rYVK4mbjbW6FAox5UVzd165mIL7ccOSMAJswF3mv9Su6F7edTFdejYX7E5VXaeifm7EnKVslps/bl6Dyll7tHo25exLg6+OJBYPaqmNrvfMP1jzeBewPWVi5vqQbnPys9gLl/WHja4YN2SeR4csrvS3Yl4AGYb6o4Wcstmy6JDuYv8WyGFyWzFZGB5KzMaBRtUsWYEnZefAz6rHuaJrLCLDhWx5UuVT88jEEyDcZFilltXt6aHjHUiTV9HQIgoN5XkqA0NvD0BvfRBguUwKMbYg8HGG6poZsbMrzgQEFKjy43+KNvt5pWHXa20TBF2tkv0WjCmv+nRGMDBXi16CBgXloWnT3TlUhRAorepF4P/G8/OJxP4qVjTm+SLDqVT9HowY4lO+lWhYpmz1y1cOByfF0NdNeerEofJhMf4D7aq3YmWdURVu52pL5YvSKMWzYwSsdyTZH6yaGP321+cgooIiiMGKvMkfrpAN5RuW14nnIp6t3iirIujQ7ANstJvVzFrZl+JHCkfXJKM5YwJa9JTkm8+NoMx8KFF18o2UjcOZnMK+CixW4fwdTOqr5tMct1Z++0l+hMp/vagswCqnUlGQ8+fxrqFqtBuKPHMIn746HX0AA/vp3I47EHcDEMSOxcum/+Onv5fDx9UWNyDrnCbB7b+mGE8ePomatOhj16iTVT/XFpx7C7h1b0a5TV3S//iZc1+MG1VT7tReGY8TIV/HkqNcUp79++x7PDb8f/QbchZtuvRsWSy6mf/y2ElAzF65DVHSjMtsx58+f8fRj94BCkU2+PTw88NO3M5QomzBlGm6/50F1Ttp78sQxBAYFY+TLE1A7Khr/zJmJN0Y/DYZv+a8ztsspwGhfamqqKpPDBtpM3r/vvvvKbDZ7Pr733nvYsWOHqqvYr9+Z33dlHuQK7Bj0y1iYwrKQa9Uh5WCgWgWJQCsQngsP7zy1ACjamIs2Xuno5Z2OYI0NCQVabM7zUc8vvuTyv4xO+KgXxQL8lhaGtXFhMAbnoJrRggMJAaosBb1GD7bZj4HhCWAqPH/7sh7YVosJWyw+2GMxqpI8MQaz+j3AFI5fjlZHcroJniFZsJzwgSbRA56N01DgUYA8VqhnyI8vzBR03hbcGpSonlk5dp0K57H24qmjQbAeYyiUSfKq+aOjECT1iirMakdIsBm62EyYdDbEp/kiX1Xg1zoWFTDPi8nyiZ7wq5OG+t5mtRipkWc2QnXsOwJH+SG7Iz+YdcS+SqiGY6cCYM8HAqqno09gilpcxYVjtT1ylGileKXoNCtb2SaPq9U98M/hcFiOmuDhlY/oKH9s634mylBMgO1JzMKPW+PxwnXRMOrPiBkKsIf/2ITptzRDsMmALItVlTtoEOqrQofe4+cUE2CF9xmFEUXSqsMpuP5/q5QAowet47Tl+Oq2Zvhw1SE0iQjAp31j1CFbT2YgtorfFbhNnXcKXvcLhQovdhZysxbYi8KOzrBo5dFUrDycjOc6RjljuEsa48Vt9yvPC/O86Drm2wHDgV29WbjUUxXmo4Cg6KLooIeIIobCjG9RLBLItxC+galyD7o8Va6ivTED+61Gteqvi1cGtuSZ0NloxtY8g1oZmWbVo4UxE3FWtuZxrBbkl4hfKLqH6S7u5p2mzr0uxxfNvbKUgKH44rkbGrOVAFtqDlT5VsySooeNqzNZBoN1dOiha2LIwv58E0J1FiWm6F2imKGoYZkHRyNym6q1E6JzuPU5T66WbGnMUis/mf92MN+IRoZsldvALzG9WCesRlU2Ii6fbZOsqr4XvXMPB5xQnsFVuX5KkPHBwFZODB/S68cQKu1kMj9DqLwvGW5lKDOnQKPqolE4MmeNq0rZicBs16r5M5TZM7gZulcbeUnX/XIefLkE2A9/LkHN2nVLNN3PPwCenkakp6WgdYMwPPPiOAx9+kzzZnqDVi5biHsHD4WXyRsvPPkg5v39W7EQ5LkesEEDumPd6mVYsHIXIms7vqszf/kWzz8xGLfe+QAmffB5kS3d29RD9RqR+Po3Ry0rLhpYv2Y5pn/3N1gUmdv+vTvRp0ssnnr+dQx/9hX1s7LY0bNdfeh0esxZurXII0QvWP8eLZCZno7/NjjybWnv2lVL8efCDWjYpGmRbTw+JCwcP/611CmX/XILsEIj586dqzxYLDjMQq3XXXedKnRas2ZN1eOWZXVOnjyJnTt3gjW+2J0hMTERbFn0zDPOKxLrFGjnDOLzwxuIjUxEep4Hdh0MhZ29CukBC8+B3mRBA0+LWtTUwzdFldPhM4rpDQwTMi0hvcCg6gNaNTblwUmweeD31FDsSQyAj08u/PRWHEv2Q8ExT3jo7Xi61W70C01SwofPej5nF+b447B6DtnQ0GBWz/GjqnC1AT+6/Tm7AAAgAElEQVQfi0BmuhfA2l4JRmiyNTBGZ8DGHLB8HfT6AuQzRMkevD7Zqp0b/Ut8tvF3wT6LEfvjg2GNNwKZp1cU0dvDhx3FGFcsaQHfcDOqNEpV/Xz3ZXkjLdXbIcC4sECtwswHkrzgEZyDrlUS0dc3SbWN4zeKAsygJKVG5Z3xGT3lRCQSkn2gYb3JmqcwMCBJlVBiNEMlh9ChwHqNGrt6HlM08rnLY39MqIrUw37wsVtRu14AtnQ9s9K8mABjbhG9NJ5niS9eX3OeFc/N3YGJvRuplYBnb/zCsnxE/VDfC95PTNSPeGsBJvVuhOc6RSFi0nx80j8G7yw/gEFNI/B428vXOPpy3OQyZukEHt38kFq1SLGyKcdX/cLfmuutqrdTTNCLxHwuhvuY1M7q8fyXSfRMBmXbIooDCiV6bniT84vM4qxMwmdyflNjphJdkfpcVfLiWL6XSmhnaI1ix2J3vNHxgcICrGz4zQcORR/Dh/yC0Gu0Jsdf1fHiz7makV8gJrnTxr8yg1DX4Ajn0cu03eKNGKMZ9Q1ZSjTRA7bd4oUa+nxVg4x2UvxsNptUe48uvhmo62FRopNhV6444sawId9C/0oPRbTRIaKY+0axx4R9ClaOzRAivXp8EHT1TlMrJlmqgmKVApcubvJkaJYhTxZZZaNtetNoC4UtRR/Z0ivIlkPMT+AxfFjyDY7ikTXM2gW2wCPXYBL+xe7m9z79Dn1vuRP5+Xno0CQCgcEhePO9GWjRpkOJHu+yCB8KmkMH9mLF1qNFp6bXbPAdvYt5nvjhXf27IDfHjJn/rL+gmTarFQ2rG3HHoIcx/p1PyyTAjh87gq4t6+DBoc/ixdcmFxv73Qmj8dlHb6nVn1wFSnv3792F1TtOFNvvnpu7Ko/g3GXbSn8glGGPKyXACk3ZvXs32AJo+fLlSowdPXoU7AjCjX1I69evj7Zt26rQZY8ePcowg6u/S7WZr+KpqIPqOfJNYlUcTPCHjV6f0ByEGS0IKrAj3q7HAyEn0dyYqUpTML+V3i4TCsDZ8wWYL5vs/MFnz6pcb/yVGYiGHhbc4JeE71KrYOOxEGhTPfBQ7H4MDE1Atl2jhByPo9dnodlfeYE6mtJUvTGmmGTatJiWFIG9Kf4OD1SKJ3TZWtRokIQjFk/lBWOUINviAS+DFY29zBgVEq/6VGaq9BS+LOox41R1bDoegoIUerU0QPbpXDAPuyq7oTHaUTUsC4NbHlT5urMygvFPYhis5tOV9pkk75cLmD1RJzRVtZDr7ZuiogR87lKAmVjvi86y0x2fv0oLV8Wyj2d549GIeHT2SlOs+Nzkc5qbetaq7FtHHjcDkhxzUXYgfj5RFfo8HVpX8cW/7c685BYTYBe6fRjee3XhLrzcNRr+5wiwstxyFGDRU/7F/CEd0K5GIObtPYUm4X6qfMLk3o3QPjK4LMPIPm5E4MZ1w1VieSPPLOWt4S96CoQupjT4662I1FlwyGpUoqCOway8RPQIsYYVkxsZcuRdzPg6hQIFCj1Z9NQwtLgl11cVD2UJiD7eydidb4JZrS4sUAVK+SZDEccSFxSBPD/fUOjp4jgUKAxvUhhyYx4X/8yVkfFWoxI63G92ZrA6jkLo3oBTKhTJ1ZEspMr6ZrSNQo6FWnkevgmtzfHDxmxvhBvyVa2xLJuHyoWgKKxnMGOXxUd5127xScE+qwEH80zqgdnVKx3b8kwq34GLE+g1IxOeh/Nm/TJfjQ1Zdp0SX+RETyJXPvLcFH4JVg808jQr8UqRWk2fqwStKiF4et5JBXq1ApJMKVZ353or+zsGtriqdcBKu70vlwdswrufIbxq9RJPT49PaFgV9dnmDWvw5MN3grlgwSFh6NS1F/rcfIfK2yrcyirAUlOSMHvJ1qLj1qxcgvtu7YGpX/6GnjfeXPRzip+MjDQV4uTG4z58exxWLPkHiQknkX9aMOTlWVTIkKFDbqXZsWn9atzZrxNeHvfueaszv/n8YxVe/Hn2CjRr2VYJsHPt5Tn488SEU5i/Ykdpl65Mn19pAVaSUfR+ldb/sUyTuUo71Zo9Gu/X2w1/bQH+zfHB32kh2JPlowqlajxsCDXlIjtfj+erHoHn6XqL9FTxBY0vuI6FPFCeeEYM6A/i4qMD+UYlsijUZiRXwal0byDbA7fXPoLuASkq+kexwY0eNqZecP06Xzf549N57fhfWhgO55pUo+/sVBO0eVoEhGU5mn6rFkGOMhHs/BHlnY2Xw46ol2WawucnxeH3qeFYlh4AG5PoWSKCBVVZ74xesFzH+SIiMvFQ7EEVGdhpMeG7lHAczfKGNcdDlejwDcpSvwPGVYuDx+n0EY7PIRzRTEdok5EI5g2zzy69cCyrxGc5X9zpFmA+Mvd0RFhYrUCjfveol1qW0dBZVS7ywqxAxOcY0S4wEHNavFB0d5RJgPEEaTl5Snwxmb68GwVYj89XYttT3U+vG3OM0GX6cvz3cMcKjVleG2T/K0vgwU0Pq3yDbl5p2JtvVEmdzHdiUjvfZLj6htXw+fZFQUNXNb8QvOn5ReDDgF9oNp5mzRr2hORNHqnPwxGrh8oJYOL43jwv1Q5oU66Pesvq5Z2GenqLelgkFejUgyitQAv/0/VlEgt0amwKNSaZMome4ow28Y2FOWAUhd1MqSop9c/MMOWG57pBuqh5TEdTuiqVQSHF5H26oekmD9flg9KPNb+4Kshgh5ofw5ajg+PVWxzFGT2DFFftTcyHM6jwKhP8m7Fpt8VbebhaeqWrchP8ttH1zjIcFEwN9LlIKNDjiNVR344eO4qykzaDYsywAR+ArY103XN+OlTXcxWRw0XO7VC+UXnA+GBjQi7nSwHWxK8N+ksO2EW/KPQ2rVq+GMsWz8fif2arRPg+/W/H+9N+KJPwKRQuFRVgzMfavmUDnn5xHFq17QRvH0dy/g2dmpRLgFFM3tG3I557ZSIeHXFmVRbH+nrGR2ohAOuWNW3R5poSYFf2Ken8sz2xaSZaBKeqFzfW9jqYY1IRgMJ2ORRhfMzWNZkR4OHwuDtkll1VvbdY9Sr0RjHB1nDsJUsRQeHFsCLFxR6zN3JzPaAt0CAqIAP1vBhmdHjsKUD4nOH5+cxh/UG2UGNxU6Z3ncr1REqeQXmj7AymWTXw881BpsVR71EZxzpaNi2Mxjz08EtVvwfoWeKzjflVO8zeSMrheklHNXzV9ofCTSXWO+bj4WlFw+B0lcDPZzTTX3juXKteCTAvQ76qkdgnMEkJKb7Q8lnJfGQKT/LgS6yKbqrUEEdpJIowPof5ss4zkV9h+LHwahY2OywMZ/L3Fsffl+sFg9aOifXuLJ8Au9TbhAKs1xersPXJbsWGogBb+kinSx1ejndBAiO2DEFdj1y08czCcZsH9qs3KB1upXjRaLDXyrIPGiQX6JTHh8VZ+falbnS1QtCOaqqqsCMWzzAavxz8UpxQAkajcp3YlihQm49NFl8V0qTo4wpFvsWxUjzfmHgMBR+9QxzRUXDV8abCcxdo7AjV2tBEb8VumxaHmJ+FAmy1+J7O7fJSgosLA6ywY4BvkvrzzjyTOhe9UPRi+ekcvS0ZFmWOGj1jfAui23xY4EmcsulUuJPtP7hAgSKSSa5NPbPRyWjB3nwdNuV5q5WTLU7niXEFJOcZpM1XYUsmx4ZorarRNwUdH0N8IFBQsQsAXeJMwI/ycBSgpbeMNqq6ZdCCj1d6EPlWx3wwPjD5d+7j790Z98gqyHJ9mwpriRV6i0rzPF2KADt29DC6taqrctCYi1a40RPWMbZ6uQQYvXhdmtfCoAeHY8zED4rN+e03XsL0T97G0k1xqFK1ugiwct0RsrMzCQwbNgwxMTEYOtS5PWqdaeOljFUmD9ilnIDHigC7VILudzw9YPTatPA046RNj3S71vFWpMKJjjcC1qZiGI3eFxYDdLyJOWrBUDTxTaLwLYJNsOkmpxijd4k1wiiq2IKI4TSKPOYaMM+KbYUYy2cpBnqamC9VXZfvyCGw6VQYkrkPzFlguyEKEYqQfL5AQas8dVzZSO9VQp4ngvT5SLLpVekKip3bfBKUyKL3KVhrVcu4+TbGECDf/hgK5XlPWo1qDvxzoNaqzsXE92iD2ZG75pGjhBPPVV2fB7Odyf7G00202XHBMX++gXIhA/PZKPZY9JU/p2eNuWvMRVOhA41DrIbpLQjUODoM0F7uyzdIzpOikBzYD0697Z3OV6BYq+3bDk/Wdr2l9YV3/+UKQZalDMXWTevwxafvYdzkqWBifuG2esV/uP+2nvjsmz9VKJKrGWfP/AnbDp9pq1ZSEn5FPGB7d21Hv27N8NLYdzDksTOrVQtzts5O4C+LHfSambOzsGjtPug9HLm9BQUFqtm51WbFghU71c+upRCk+z1pK7fFIsCccH0Tsy24/otV2PSEo05N4SYeMCfAlSGEwDVC4HIJsGHPjEZIaPHCyWcj7d33ViVM+lwXi1p16uG+B4erul8Jp45jxifvICU5Sa1o9PXzx7sTX8FnH05Sie2RdeqhR++bzitDUZKgKUsOGPO8urWOgpeXCWMmfghPo1GJvRxzNpjTxRWNb74/Ay1aty+THSwlMeKh29GhSw/cO2QY7AUF+Onb6Vi2eAE+mP4jbuh3mwiwa+S75arTFAHmhCvDlZLsiRjszbS8Mxvb9vxyd2snnEGGEAJCoLITuFwCrDRuLFPRsm1H7Nq+BR+9Ow6b169GelqqSsRv2/E6DH9mNGrVjVbDHIuPw2P334L4uINo1qod/vfLAqcJMI5PoTX+laexb/cOVQyV9cBGvjwev3z3Bd4e/xICg0Lwz6rdOHk8vlQ7ON6iBX/j0w/eVDXIWJ+Qiw4ee/LFYgsLxANW2h0in18uAiLALhdZVic+mY6YKv6X8QwytBAQApWFgLMFWGXhUlnm4QqrICsLy8oyDxFgleVKyjyEgBBwawIiwNz68pVqvAiwUhFdczuIALvmLrlMWAgIAVckIALMFa+K82wSAeY8lpVlJBFgbnIlx44di1GjRsFkMrmJxWKmEBAC5SEgAqw8tNxvXxFg7nfNLrfFIsAuN2Enje/t7a36dYkAcxJQGUYIuBgBEWAudkGcbI4IMCcDrQTDiQBzk4tIAZaUlKSaqcomBIRA5SOw+3AqZq8+jP5dYyrf5GRG2HHgJDIzM3Ff7wZCQwgoAiLA3ORGoOcrOTlZBJibXC8xUwiUl0BKhgXv/LgRQ25uU95DZX83ILBs40FEhhrRo1UNN7BWTLwSBESAXQnKTjiHCDAnQJQhhICLE5jw9Tp0alEX1UKlfI2LX6pym/ft7A14uF9D1AjzLfexckDlJCACzE2uqwgwN7lQYqYQuAQCizbEY1d8Jq5vX/8SRpFDXY3A3rhE7Ik7gWfvbO5qpok9V5GACLCrCL88p6YAS0lJgdFoLM9hsq8QEAJuRmDcV2vRsnFNRNcMdTPLxdySCOTm5ePn+Ztxd89oNKoVJJCEQBEBEWBucjMw+T41NVUEmJtcLzFTCFSUwN74NEz9Yytu7toE1cMlFFlRjq5wXEGBHbOX7kBUhC/6d6rjCiaJDS5EQASYC12Mi5kiAsxNLpSYKQScQGDzvkR8MXsHrmtVF7H1qjlhRBniShM4kZiB5ZsOIrqGPwZ2jbrSp5fzuQEBEWBucJFoIgVYWloaPD2LN/x2E/PFTCEgBMpJ4GhCFn5bcgBZOfmIrhWGGlUCEeBrhE6rLedIsvuVIGC325GdmwcKr0NHk3HsVDpu6lQb7ZtUvRKnl3O4IQERYG5y0USAucmFEjOFgJMJ7Dqcgg17EnHgWDpSM3JgK7A7+QwynDMIaDQa+Hh5oEa4L2LrBKN9TDVoNc4YWcaorAREgLnJlWXyfXp6unjA3OR6iZlCQAgIASEgBC5GQASYm9wfIsDc5EKJmUJACAgBISAEykBABFgZILnCLhRgGRkZMBgMrmCO2CAEhIAQEAJCQAhcAoEhQ4agQYMGeOGFFy5hFNc9VGNnZmQl2ESAVYKLKFMQAkJACAgBIXCagHjA3ORW4OpHNnIVD5ibXDAxUwgIASEgBITARQiIAHOT20MEmJtcKDFTCAgBISAEhEAZCIgAKwMkV9iFAiwrKwseHh6uYI7YIASEgBAQAkJACFwCARFglwDvSh4qAuxK0pZzCQEhIASEgBC4vAREgF1evk4bXQSY01DKQEJACAgBISAErjoBEWBX/RKUzQAm32dnZ0sIsmy4ZC8hIASEgBAQAi5NQASYS1+eM8ZRgJnNZuj1ejexWMwUAkJACAgBISAEziawfft2NGnSRP3oXAFmsVgqVbebSlMHTASYfImFgBAQAkJACLgvgUOHDqFz585o2bIlJkyYgKlTpyImJgYNGzbEyJEjwX6i69evd98JnmO5CLBKcyllIkJACAgBISAE3JsAq99/++230Gq1CAkJUbU9U1JSkJubiylTpmD48OHuPcGzrK80AozlJ3JyciQEWWluTZmIEBACQkAIXGsE6AWrV68ebDZbsamHhoYiISGhUuGoVAKMClmn01WqCySTEQJCQAgIASFwLRF45JFH8M0334A5X9xMJhMmT55cqbxfnJcIsGvprpa5CgEhIASEgBBwcQIHDx5EdHR0kResMnq/RIC5+E0o5gkBISAEhIAQuBYJFHrBGNWqjN6vSiXAWH6C7koJQV6LX1WZsxAQAkJACFQmAvSCMRcsKCgIiYmJlWlqRXOpNCFICrC8vDy1ckI2ISAEhIAQEAJCwL0JDBgwAM2bN8eYMWPceyIXsL7SCLBu3bphwYIFUgm/Ut6mMikhcD6BtYcWYeuxNUjMPAarLV8QuREBDTTwNwWjXngs2tbujkBTqBtZ756mJh/6GxnHl8OSFQ97gdU9J+HiVmv1Rnj51YFfRBcEVO9WqrWVRoAx9EgPmIQgS73msoMQcGsCSVkn8fum6TDoTYgMaYwQn6rQaaUDhjtd1AK7HVm5KTiash+7T6xF35hBaF6zkztNwW1szc2Iw/EtH8JoCoJ/WCN4+VWDVuvhNva7k6E2ay7M6UeRenILoDUiovmz0Hn4XHAKlUqA5efnSwjSne5WsVUIlJOAxZqDaUsnIDKkEaLCm5bzaNndFQmkZidg9YHZuLHxXWhUrZUrmui2NlktaYhb+RJCqrdCQJVmbjsPdzQ88chymDMSEdl+fOUXYMz9slqtIsDc8U4Vm4VAGQnM2fo9MvMyEVNDvCVlROYWuyVkHMGmuMV4ptdkt7DXXYw8sfUT6LT5CIvs7C4mVyo7j+2ZA2NgEwTXHVDivCqNB0wEWKW6b2UyQuA8Avm2fEye9xT6NHsQnnovIVTJCKw5MActa3ZGbPV2lWxmV2c6tvxM7Fv0OKLbPSEhx6tzCWDJTkD8rlmI6ja18gswti5gs07ZhIAQqHwE4pL2YMHOn9Gp/q2Vb3IyIxxM2ArYbegXO0hoOIFAVuJGpBz4HTUbl+x9ccIpZIgyENi/fjpqth0Lgyn8vL0rlQdMBFgZ7gbZRQi4KYFdJzZgTdx/aFPnBjedgZh9MQLHUvchNes47mg1TEA5gUDa0SXISViBqvV6O2E0GaKiBOK2fo/wxkPhFRAlAqyiEOU4ISAEri4BEWBXl//lPrsIMOcSFgHmXJ4VHe2aEGAMPRYUFEgIsqJ3iRwnBFycgAgwF79Al2ieCLBLBHjO4SLAnMuzoqNdMwLMbrdXlJEcJwSEgIsTEAHm4hfoEs0TAXaJAEWAORegk0a7JgQYK+EvXrzYSchkGCEgBFyNgAgwV7sizrVHBJhzeYoHzLk8KzraNSHAGIIUD1hFbxE5Tgi4PgERYK5/jS7FQhFgl0Lv/GNFgDmXZ0VHEwFWUXJynBAQAi5DQASYy1yKy2KICDDnYhUB5lyeFR1NBFhFyclxQkAIuAwBEWAucykuiyEiwJyLVQSYc3lWdDQRYBUlJ8cJASHgMgREgLnMpbgshogAcy5WEWDO5VnR0a4JASZJ+BW9PeQ4IeAeBCqLANuxZQfu6H0nFm36F+FVz6+O7R5Xw/lWigBzLlMRYM7lWdHRrgkBJkn4Fb095Dgh4DoEXnnlFYwfP75Eg1xdgNmsNsRWb1rMdoPBgIiaEbjlzpsxZOgQ6PQ6iAAr+X4TAVb+7+HFvi9lEWCbt+5F8073wNPTUHRyo6cBzZvWx1vjnkSblo3Lb5QTjvi/Gb9i6MMD1UgNWt6GdyY8jX43OK+heK3GN2HGx6+iZ7c2TrD24kOIALvsiOUEQkAIXCqBKVOm4MUXX0Tt2rXx2muv4Z577ik2pLsIsBfGPo/e/R3tX3LMOdi0bhMmvDwRgx4ehKdffkoE2AVuFBFg5fsGvf7663jzzTcRFRUF/vn2228vNkB5BFhOwkoYjQ4RZs7Jxdvvf42PPvsJx/fOg8HgUT7DLnHv4ycS0bb7A4jfNeeyCbDEpFT4+/k4ZW42WwF0Ou0FZ31NCLCQkBD8+OOP6Nmz5yVefjlcCAiBq0WgdevWWL9+PQICAhAWFlZMiLmLAJvwwXjccuctxRC+PfYdLJz9D+avnV8kwKb/NB0fvPkB9u3ahyoRVfDiGy+gS48u6rjDBw/jrTFvYdO6zbBarYhuFA0Ku9gWserz1JRUTHhpAlYvWwNzthnVI6vjkScfwU0D+6nP01LTMOnVSVjyz1LY7QWIalBPib9W7VpdrUtb6nlFgJWK6LwdYmJisH37dvj7+yMiIqKYEKuoAONJKFDC6vTC/i0zUbd2dVAUjXhuMnbsOgCdTodb+nXFuNGPQ6/XYdXarXhy1DtIz8hS92r/Ptcpj1Wn6x/CXQOvx9PDHC9S7FRTo2FffP7Jq/jlj4UICvTH4fgTOHY8Ackp6fjsg9Fo3bKR8nidOJmkzjv3949w461P4LEHb8NPvy3AvgNH0CC6Fn75+i1UqxoKiyUPL4/9BH/PW6a64MQ2qYepU15ESHAAEhJTMPjx17F3/xF17kYN6uDzT8YgPCwIhR4wb28jbr//xSKuKanp6NqpJeb89uFFx/7y27/UHIxGT8QdOY6Ny767tgXY999/r26+8PBw3HDDDWjTpg2io6NRvXp1dcPIJgSEgOsTmDdvHgYOHIjs7Gxl7NlCrHm3+i7djLswBFmSAPtw0of465dZWLjhnyIB1rFbRzz/+iiEV62CiaMnYPniFVi2fama9209B6Ja9WoYN2UsGMZ8b8L7WPD3AizevFi9bY95dgziDx/Fu9Pehb+/H5YsXIKRjz6Hv1fMQkSNCAy9dxg0GmDclHHwD/THHz/8gTdfmYTZK/9W47riJgKs/Ffl999/x5AhQ5CRkaEOPluI9WofVmoz7sIQ5NkesGxzDsZP/hw//DIf+zb/AQ8PPXrcNBRNGtbF+2+NVB6ynjcNw/1391Vhwg49H8TDD9yCB+/rj9zcPDz+9EQlutZu2I6Pp/2Mrat+VLb9t2wD7nlotPJsDR85CXMXrMT6pd8gNCQQb733P8yevxxL503HwsVrMXjo6zi6+4wHLKJaGH7/7m2YvIzo3u9xdO3cEm+8MhTjJk3HvIWrsODPj+HjbcKTo97GqcQU/PTVm0qYHT12Cl9PG6dqhE6a8hU4Du0uKQR56PBxtOw8CD//b5IKTV5s7B9+nY9Hn5iAH7+aiL69O130wl0THjASOHHiBJYuXYpNmzZh3759iI+PR2JiItLT02E2m5Gfn48xY8YooSabEBACrklAr9erN+mzN6PRiO9nfYF07+NoU+cGlzS8JAHGN+9tm7bhicFPos8tN+LFN14sEmAzfp6O9l3aq7lsWLMB99/8gBJgQSFByoNF4WXyNqnP9+zYg1t73IZ5q+eiRq0aGHbfcPU8m/rNJ/DwcISIeC6tVqu8Z3069C3atxDWwF63o+eNPfD4s4+7JD8KsF/++AbTX/7dJe1zVaN4n+Tl5RUzz9PTE+uWfI0Qj/2oWs8RDi9pKxRgDMdxo1DJyMzGgJu6YdLYJxAdVVN5p0Jq9VCCiAKG2xff/IWvf/gb/82ZhjsHv4Ts7By88vxDaN2icVE4jh4xnpuiqlXzRkqYUSTRO8Y/FxTYMe3D0Wq8BYtW45ER43F4598lCrDRox7CfXf1UftSWNFr9r/PxiKm3Z148dnBuPeOG9VnB+OOIbr5AOQmrsL0r/7A+1O/x5Q3n1WCzdvkVYTgXAGWn29F594Po1uXVnjz9RFqv4uN/evMfzHq1feLwqQXuzeuGQF2IQh8m+YNyod6aGioq36PxC4hcM0TONcDxjf6KlWqqBcnd/GA8Rcik+258bmj0+ow4O5b8PzYF1TIojAJn96wqhFV1X6FP1uwbr7yYK1ethrTPpiuxBTHsBfYVdhx5n9/oF6Deti7ax9G3D8CWZlZaN+lHTp264Qb+vdWgm3JP0uUQCtpY2iUHjpX3MQDVv6r8scff2Dw4MFFHjA/Pz8V9aGToSIesLy8fDRucwdGPjEIjz90m+Pe3HUQTdregcgajnuVm9VmU2G+zSu+R2aWGW+99xVmzV2G+KOnMGTQTUrEMHds0MOvwt/fBx+89ZwSY//O+j8VJjxbjHG8s71eJXnAzk7Cf+WNqYg7fALfzngDobV7KsFn9PQssi0tPRPb1/yM6hFh+OyL3/Hjb/OxfuMu9OreFh9OHqV+fq4Ae2HMh1i6YhOWzZ+hwqrcLjb28lWb8fYHX2PDsm9LvWjXvAArlZDsIASEgEsQODsHjOkEFF6FyfjukgP27KvPolcfRy6qwdOA0PCwYkm6Ja2CPFuA0QPYp30f3P3g3RgxagSMXkYc2HsA/bvcXCTAODaTfzeu3YhVS1Zi7p/zQA/cD3O+x46tO1QIctWelfDz93OJ61oWI0SAlYVS8X1iY2Oxbds2nC28CpPxK5oD9tecpSp3as/G31R4MDUtA0E1uyMp7l8EB/lf1EjmdPW/81k8NuRWDHvkdvz73+dYefAAABmdSURBVFrc/eBofDF1DF4d/3/YtPx7dbyzBFjTDndjzAuP4Labu1/ULnr1Rox8C7mWPBViPFuA0ft21+CXsXH5t6hV80x4/mJj//jrArzz4TcqhFraJgKsNELyuRAQAledwNmrIPkGf/fddxezyV0EWEk5YGdPpDQBtnv7bjw55Cms3rMKvv6+6tCZP83E6KdeKRJg9Ib5+vpC76FXn1ssFvRs2QtPvfgkWndorUKQX/z6Odp2alt06qNHjiGiRjWVrOyKmwiw8l2Vy7UKklb0vmWECjdSOHHr2X8YWjVvqMKSDHVPfv9r9fmdt/ZSOWDcj54tq9WG3gNG4NabumH4o3eokGbtJv3VC8iIx+7EM8MdCfkXE2BLV2zEzXeNxKkDC5QX7dwyFGd7wN54awb+W74Bf/00RYUY5yxYgYWL16iw48Mj3kC71jEqP43baxM/w87dB1UCf6EAi2kchaYd7sJHbz+P2wcUX8B3sbFFgJXvXpW9hYAQcAMClaEO2KUKsIy0DDBfi/ldXXp2waolqzDj48+xZvkafPb9p2Dy/g3tbkTXXtdh2HPD4OPriy3rN+PhOx7BZz98qgTY4/cMRVJiEqZMexcRNatj8fzFeH7o8/jity/QrFXxWmWucluIACv/lXBWHbCzk/BpBYVKs473YPHsz9CxXdOiVZBbtu1VntdWLRqq1YZhoUH4/a9FGDPhU2Rn56qFH726t8OHk58rqi1G4TPxnS9wdPdctQKxNAHGkGb7HoORkJiKmT+8iweHjS1WB+xsAcZVkKPHTQW9djabTdnz4duj0LpFIxU6HfbsJMQdPq4EYHRUJD794CXl5SoUYBu37MZLr3+MGhFnCiLXiqyqctsuNrYIsPLfq3KEEBACbkzgWvGAMQfsk3em4ocvflC/VDp27YBX33oVLw5/SYUc3/3sHYRVDcfbr0/Gts3bkW/JQ7UaEbj/0ftw+32OWlApyamqjMXShUuRZ7Egsm4tPP7MY7i+3/UueweIAHPupSlLCNK5Zyx5tGlf/q5WOP7545QrcTqXO4eEIF3ukohBQkAIlJeAqwuw8s5H9i9OQASYc+8IVxBgrOfFEOXX08aic4fmzp2gm4wmAsxNLpSYKQSEwIUJiACr3HeHCDDnXt+rLcAYevxk2s8Y9dT9eOGZB5w7OTcaTQSYG10sMVUICIGSCYgAq9x3hggw517fqy3AnDsb9x1NBJj7XjuxXAgIgdMERIBV7ltBBJhzr68IMOfyrOhoIsAqSk6OEwJCwGUIiABzmUtxWQwRAeZcrCLAnMuzoqOJAKsoOTlOCAgBlyEgAsxlLsVlMUQEmHOxigBzLs+KjiYCrKLk5DghIARchoAIMJe5FJfFEBFgzsUqAsy5PCs6mgiwipKT44SAEHAZAiLAXOZSXBZDRIA5F6sIMOfyrOhoIsAqSk6OEwJCwGUIiABzmUtxWQwRAeZcrCLAnMuzoqOJAKsoOTlOCAgBlyEQl7QHC3b+jE71b3UZm8QQ5xE4mLAVsNvQL3aQ8wa9hkfKStyIlAO/o2bjAdcwhas/9f3rp6Nm27EwmM60Oyq0SmNnt0zZhIAQEAIuTiDflo/J855Cn2YPwlPv5eLWinnlJbDmwBy0rNkZsdXblfdQ2b8EArb8TOxb9Dii2z0BrdZDGF0FApbsBMTvmoWoblNLPLsIsKtwUeSUQkAIVIzAnK3fIzMvEzE1OlVsADnKJQkkZBzBprjFeKbXZJe0z12NOrH1E+i0+QiL7OyuU3Bru4/tmQNjYBME1y3ZCykCzK0vrxgvBK4tAhZrDqYtnYDIkEaICm96bU2+ks42NTsBqw/Mxo2N70Kjaq0q6SyvzrSsljTErXwJIdVbIaBKs6tjxDV61sQjy2HOSERk+/EXJCAC7Bq9OWTaQsBdCSRlncTvm6bDoDchMqQxQnyqQqfVu+t0rkm7C+x2ZOWm4GjKfuw+sRZ9YwaheU3xal6OmyE3Iw7Ht3wIoykI/mGN4OVXTUKSlwM0AJs1F+b0o0g9uQXQGhHR/FnoPHxEgF0m3jKsEBACV4nA2kOLsPXYGiRmHoPVln+VrJDTVoSABhr4m4JRLzwWbWt3R6AptCLDyDHlIJB86G9kHF8OS1Y87AXWchwpu5aVgFZvhJdfHfhFdEFA9W6lHiYesFIRyQ5CQAgIASEgBITAlSYwbNgwxMTEYOjQoVf61FfkfCLArghmOYkQEAJCQAgIASFQHgIiwMpDS/YVAkJACAgBISAEhIATCIgAcwJEGUIICAEhIASEgBAQAuUhIAKsPLRkXyEgBISAEBACQkAIOIGACDAnQJQhhIAQEAJCQAgIASFQHgIiwMpDS/YVAkJACAgBISAEhIATCIgAcwJEGUIICAEhIASEgBAQAuUhIAKsPLRkXyEgBISAEBACQkAIOIGACDAnQJQhhIAQEAJCQAgIASFQHgIiwMpDS/YVAkJACAgBISAEhIATCIgAcwJEGUIICAEhIASEgBAQAuUhIAKsPLRkXyEgBISAEBACQkAIOIGACDAnQJQhhIAQEAJCQAgIASFQHgIiwMpDS/YVAkJACAgBISAEhIATCIgAcwJEGUIICAEhIASEgBAQAuUhIAKsPLRkXyEgBISAEBACQkAIOIGACDAnQJQhhIAQEAJCQAgIASFQHgIiwMpDS/YVAkJACAgBISAEhIATCIgAcwJEGUIICAEhIASEgBAQAuUh0K1bN1SpUgU//PBDeQ5zm301drvd7jbWiqFCQAgIASEgBIRApSWwfft2NGnSRM3vXA+YxWKBp6dnpZm7CLBKcyllIkJACAgBISAE3JfAoUOH0LlzZ7Rs2RITJkzA1KlTERMTg4YNG2LkyJHQaDRYv369+07wHMtFgFWaSykTEQJCQAgIASHg3gSGDBmCb7/9FlqtFiEhITAYDEhJSUFubi6mTJmC4cOHu/cEz7JeBFiluZQyESEgBISAEBAC7k2AXrB69erBZrMVm0hoaCgSEhLce3LiAatU108mIwSEgBAQAkKgUhF45JFH8M0334A5X9xMJhMmT55cqbxfnJd4wCrVbSuTEQJCQAgIASHg3gQOHjyI6OjoIi9YZfR+iQBz73tUrBcCQkAICAEhUCkJFHrBdDpdpfR+iQCrlLetTEoICAEhIASEgHsToBeMuWBBQUFITEx078lcwHoJQVbKyyqTEgJCQAgIASHg3gQGDBiA5s2bY8yYMe49ERFglfL6yaSEgBAQAkJACFSIAMuwL41Lwvx9CZh4faMKjSEHVZyAeMAqzk6OFAJCQAgIASHgtgQK7Hb0/moVMnLzMeeB9gg2Gdx2Lu5ouAgwd7xqYrMQEAJCQAgIgUsgwB6ESdkWNHp/EVJz8/HFrc1wX/Oa0FzCmHJo+QiIACsfL9lbCAgBISAEhIDbE6AAW3s0Fb2+WAlzng2jOkepMKRGFNgVu7YiwK4YajmREBACQkAICAHXIMD8r/8OJeGmb1bDnG/DwMbVMLprNLaeTMegZjVFiF2ByyQC7ApAllMIASEgBISAELiSBOjh4nYhhxYF2CdrDuLp2dthK7CrRtdGvVbtv39kT1T1NZbJXI6jziOeszLxOnsnEWDlRiYHCAEhIASEgBBwXQJMrp+/7xTCvI1oGRFQoqHc580l+/Dqwl0oFFHcUavR4I6YqviwXyxCvT0vOkmO8cu241gdn4K3bmishJxeq4GHTuu6cFzIMhFgLnQxxBQhIASEgBAQApdKwFpgxyMzN+Pu2Aj0igpTw2XnWXEyMxdRwT5KcNnsdkRPWYhDqebzThfk5YG/7muL9jWDUFAA6HUlu7d4ntu+X4uFBxJxa+Nq2HgsDa0iAjClTxNZUVmGiygCrAyQZBchIASEgBAQAu5CIN9WgBb/twTv3tBYCTC73Y7P1sXBbLHh2U5RsMOO+PQc1H77HxSGKs+eG8OJRp0Oj7SOhNVux8f9YksMMRYKsL92nSz6PNzbE3MfaI+m1fxlRWUpN4wIMHf5RomdQkAICAEhIATKQMBitcFv3BzMuq8tetULQ0GBHW8u24fsXKta6UgBtjY+BR0+W16iACs8BYVYkJcBCS/foEKT525KgH23Fn/tPln0Efeb80A7XB8V5pS8MHrrDqRkK49aoJdHGWbvPruIAHOfayWWCgEhIASEgBAolUB6bj5CJ87D//WPwYOtaikBNnHpXuw8lYnv7mipjl91JAWdp11cgHE/5nTtf7YHIgO9yyzAKPxujA4vWgFQkfx8eubSc/OUp+6xP7aga50QjO/VsEQhWCoQJ+9ADyNXjvp46sFlCxS03DT0+Tn+KdMmAqxMmGQnISAEhIAQEAKuT4Aeo8/Wx+GJv7biw75N8Hi7Oio5ftTCXThwKhMzB7VROWBvLN6DsYv2lDohnUaDyX0a45kOdc8TFhfygCnPW1SoEikz1h1GdKgP+tWvUi6PGBP8x/67G+P/2wf+OcDogRWPdUbDUN9yjVPqBM/agcLV11OvVoSmmPOQZM5DdLBPsfNl5VnxxKxt+H3nCbSrEYgWVf2U8Ar39cSyuBS81C0aDUN8YPLQlXpqEWClIpIdhIAQEAJCQAi4B4ECOzBtXRyemLVV1fV6rUcDhwCbux1bTmRg4UMdlAAbu2g33li8t9RJMaTYsIovtg3v5tj3LA9PyQIM+HNQWyTm5mPykn2ITzPjifZ1irxXZfUOUXTd/eN6/LLjuLKXEdA+9cIwbUBzVPUzluplUis7y+CN4m7MkVt6KBlP/L0NRg8dfD20SMnNR3quFZ/0j1XePNrNMX/YehSPztysitcW8ijyfgEI9Tao+b7QpZ7yHl5sEwFW6u0nOwgBISAEhIAQcA8CFC4frjyI5+btwKCm1fHlbS3Uisf7f9mAFYdTcHBULzBi9sK8HXh3xYFSJ0XhUyfQG1uf6Irx/+1Fi2oBuK1JNSVIShJg3P/WRlWxOj4VxzJy1fhMzB/cogZ6RoWiUZivEjlJ2XmoF+yDLSfTVJ5ZjQBTMVFFm8MnzEVKTn5RnlqAlwcebVMLk3pdvGI/hdKYxbvRqqo/+jWogtx8G3YlZqmVoB0jg5UwOpGZi71JWfDS6zD+vz2YuzdBcTq7JAfn8mKXeko80ivGVaSDft6AxYeSiu13LkSK1o9uisHjbWpdNGQqAqzU2092EAJCQAgIASHgHgQowB7/Ywu+2HQE9zatjq9uawFrQQFqvbMASdn5ML/eT02kPAKspr8XljzcCQ0/WKRCi3/c20YJi5IE2IUoUczc3LAq2lYPwMIDSYhPy8HOp7tj4n97lMB6tVuDYqE+eu08X5ul8tfOXqk5qks9TLr+4rlgZGAYMws1/L2w6rHOytP3956T8DXo8dNdrdAozE95CUf/s0uJPoYaS9po88iOdVWNM25dZ6zAxuNpyC70fl3kluCigYPP9XKENC+wnwgw9/hOiZVCQAgIASEgBC5KgEKF3p57flqPv3afQt/oMMy8ry3WHktDp0+XqZCc+bV+0Gk1eGH+Dry7vHQPGE/oodVgZKcovL1sP7rWCcb8wR3UGOURYBxHCRGNI5THYq0po2/EyLnbVajv+ztbFvMW5dkKYHrtb5X/VUyAdY7CpN6NLuhZ4r57EjPR+IPFihUF1IL9Cdh6MkP9vWlEAFY80hGfrYnDqPk7VXj2gqIRQP+GVfDL3a3VfGu8tQDHMx1evdI27v/W9Y3wdMe60DIUeTqMevZxIsBKoyifCwEhIASEgBBwAwKUEodSs3H9l6twMCUbYd6eODyqF+YfTMKAr1crIfPidfVwLD0XFDg/bj1W5ll5eeiQa7WhSZgfNo7oqsJ45RVgZ5+Mx68d2gWDftmIukEm/HRXa/Ac3CjQuAIyeMI8lZ9VLgFmBwb/thHfbIpXY0UFe+NIeg4s1gL1dy4qmDu4HVYeTlGeMYYdL7aFmAzoXicETav6K8GaklOyt+zcMSg2e9YNxd8PtMP2kxnYlZiJu5pWV8Kx0CMmAqzMt5/sKASEgBAQAkLAdQlQVDFENvD7dTiekavylj7qF6O8TiNmbS3KWyoUABeXHiXPkwJmySMdVUV9eo/u+2UDFh1MKjcUeoimD2iGCYv3Ki8Xk92r+HgqoUSxsishE++sOHBerhVDoDyOYs2g0yrvnMOz5pgVBdvDf2y+oLjkbq90jcapTAtmbDiizl2WrSLMeK5JvRqiiq8Rn286gjsbR+Dh1pHKbmWyndbKJgSEgBAQAkJACLgtAf4mXx2fjE9Wx+GX7ceVh4ubh06jPGGFCfGXOkEKEeY19a0frrxJP591rvKMTXHCkhKH08xqReHpRYvFhihJnLBheL1wP/SsFYRAo+F0y6Mze1IUfrPlKNYdTbugOUEmA8x5VuSe9oqVx+7y7st51g/2UcVkm1fzx+z72yPY26BEowiw8tKU/YWAEBACQkAIuCABirAdCRno87/VqoApU48Gt6iJdjWD8NjMzcqbRO8L2wvxLxdJf7rg7Bg6/Pb2Frg9JkKFIFtMXYIdp/OryoOEHrCf72qFlxbswqksCx5rHYkwH0/ltbPaCrDlRDp+2HbsPA/YDfXC8Os9rWHy0DuSygq11+k/M2A5eel+vPzPzhJXKpLJK13rY8vJdMzafapUDxjtZE2v6n5G1TezPKKthr8Rn9wUq/LdHvxjM7rXDsGntzSFj0EvHrDy3CyyrxAQAkJACAgBVyZALcK6W72+XIV9yVkq32jTiK4I9/FE1Tfnq4rtD7eKRIuIAPy4+SiWxCWXaTrUNvQaMf+JOWAbRnRVob9LzQHbMPw63PHDekSH+KjViZ76wuKldlXyocbkf87LAeNigMk3XDgJnxM6N4H/bJ1Gr90fg9pg28kMjPl3txJgwV4GJOfklSjYWlTzV83FIwNN6DFjhRJhZQkbUrgx1Plqt/qqJMe/BxLxctdo5TUs7OokHrAy3X6ykxAQAkJACAgB1ydA8TH0zy34amM8fI06pI7uq8pQmF7nikLg4/6xeKRVJMaXsRI+xQvzrb4e2AJ3/bQe3eqEqGbbFVkFqRLQ1SpIO/RaLTLG9MWdP67F3bE1cEdMRKllKGjL813q4c3rL14HjKLKb9xslaPWoU4I0rIs2HQ8XY1fJ9CE7U/1wOfrD+O9FQdQJ8ik8s/6f7MGe5KyHW2FTiss7s9E+tmcr0aDPUmZ6DpjORKz888ThrwzaJ/RoGdoETHhfkroMf+LVfUzLfmIDPAuNkcRYK7/fRILhYAQEAJCQAiUiYBKaF91CM/M3Y67YyPw9cCWaqVfzy9XYnlcsqoDRjExYclevLZwd6ljUoTUC/bGqse7YNraOKw8koI/BrVVY5THA+aoA1YF/l4GrDqcgh51QvBR/1hMXLwHDcJ8MbDxOQLMbkeVifOQbM4ryg9rGOqDb+9ohWbV/EuthM9E/kRzHmLD/PDrjuOYsny/Kgcx+752yptFUZSWm69CshH+Xlh0IBGzdp/E4VSzCoka9Fp0jgzGwCbVEFvF/7RwBP636QiWH07Ggn2JSM3JQ90gb2xPyFRir1agCUsf7qjCqByztKr/IsBKvf1kByEgBISAEBAC7kGAAuyztXGqrU5hIVYKsOfmbscnqw8hd9xNShisOZKCjmVoxs2cKdYT+/O+9kjNzVPi7pXu9S9cCR9Ai5qB2BifWiykR+/X2B71VT2xDcfS0DIiAEa9DsfSzaoyfoi3ZzHAnMeXG47g0ZlblFeqfogPVj3WBf5GjzL3giwMFdLjRs8gw46s5E/v3bkb0+J4zrOr4XMBA8XU2Xs79gHYkJt/pieP4ov/4xy5SID/LcsmAqwslGQfISAEhIAQEAJuQIBCglXeWXaCAuzLgS1UNflX/92l+h0ufaSTmsWRVDPqvLuwyLtEoVFSSQZ6uka0q4X3+sWq5HgWIo0MMKkxSvKAMUn/7T5NEOHricdmbkG6xapCeiaDDrMGtUXXOiFlokjxlJaTjw9XHsDe5Gx0qhWMx1vXKrP4KtNJrvJOIsCu8gWQ0wsBISAEhIAQcBYBCpeNx1LR/tNlmHpzLB5qVUsJsLeX78O+pGxVQ4s+HeaFBY2fo9rq1A/1wfRbmqkK+vGqfhighUatTJy2/jDevrExnupQ97yQ2tkCrLDKPftGstApRdqwWVtRxdsTLar6Q++hU540CrqybpwLbbfYCtRKQib+V6ZNBFhlupoyFyEgBISAELjmCeTk29D4g0WqZQ/LRTAE9/uOE6pFz8td6yuBRW8XWwu98s8utKsRiD8HtVVtgb7ZdFS1G7oxOhz3NK2OW79biztjIy4swL5fq3Kn2PibDa9ZJuL1Hg1UGG7xwUTk2ezoXS+sUnmunHWDiQBzFkkZRwgIASEgBISACxCgZ+q5OdvRq14o+tSvogRYXKoZ8/cl4PE2tYsSyhcdTMSAb9fi1sZVMePW5pixPg5PzNqGabc0xeAWkSqv6aetx1AryIT2NYLOmxlF3KN/bMHsfadw4NmeyLJYEeDlUVTpvTCkWdacKBdAd0VNEAF2RXHLyYSAEBACQkAIXF4CFD5L45LhqdOifc0g5e06nJaDBfsS8OjpPCqG9yxWG6Kn/ItBzapjwvWNMG/vKdz78wYsebQTYsL8lJFn1zk912rmmx3NzIGXTotgb89SV/1d3lm73+giwNzvmonFQkAICAEhIAQuSoB5U1RPnnptiQKMB1OYvbtsPwY0rqp6O5rzbXhx3g681zemxJWCgty5BESAOZenjCYEhIAQEAJCQAgIgVIJ/D87BnvY54AKRwAAAABJRU5ErkJggg==)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3sdmUcLn1WJH" + }, + "source": [ + "In spectral masking, the system maps noisy spectrograms into clean ones. This mapping is generally considered easier than waveform-to-waveform mapping. However, retrieving the signal in the time domain requires adding the phase information. The common solution (reasonable, but not ideal) consists to use the phase of the noisy signal. Waveform-masking approaches do not suffer from this limitation and are progressively gaining popularity within the community.\n", + "\n", + "It is worth mentioning that SpeechBrain currently supports even more advanced solutions for speech enhancement such as [MetricGAN+](https://arxiv.org/abs/2104.03538) (that learns the PESQ metric within an adversarial training framework) and [MimicLoss](https://github.com/speechbrain/speechbrain/tree/develop/recipes/Voicebank/MTL/ASR_enhance) (that achieves better enhancement using the information derived from a speech recognizer).\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pYKnhdQK8oGy" + }, + "source": [ + "In this tutorial, we will guide you through the creation of a simple speech enhancement system based on spectral masking.\n", + "\n", + "In particular, we will refer to the example reported here:\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CLBBM4rBxPsI" + }, + "source": [ + "The README provides a nice introduction, so it is reproduced here:\n", + "\n", + "==========================\n", + "\n", + "This folder provides a working, well-documented example for training a speech enhancement model from scratch, based on a few hours of data. The data we use is from Mini Librispeech + OpenRIR.\n", + "\n", + "There are four files here:\n", + "\n", + " * `train.py`: the main code file, outlines entire training process.\n", + " * `train.yaml`: the hyperparameters file, sets all parameters of execution.\n", + " * `custom_model.py`: A file containing the definition of a PyTorch module.\n", + " * `mini_librispeech_prepare.py`: If necessary, downloads and prepares data manifests.\n", + "\n", + "To train an enhancement model, just execute the following on the command-line:\n", + "\n", + " python train.py train.yaml --data_folder /path/to/save/mini_librispeech\n", + "\n", + "This will automatically download and prepare the data manifest for mini librispeech, and then train a model with dynamically generated noisy samples, using noise, reverberation, and babble.\n", + "\n", + "=========================\n", + "\n", + "So to start, let's make sure we can just run the template without modifications." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LiREw1_tQUR5" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "\n", + "# Clone SpeechBrain repository\n", + "!git clone https://github.com/speechbrain/speechbrain/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "g8rw3XzK2FmK" + }, + "outputs": [], + "source": [ + "import speechbrain as sb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sgtDshhF5M6G" + }, + "outputs": [], + "source": [ + "%cd speechbrain/templates/enhancement\n", + "!python train.py train.yaml --device='cpu' --debug" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DUIwtVMG0ozq" + }, + "source": [ + "## Recipe overview in Train.py\n", + "\n", + "Let's start with the highest-level view of the recipe and work our way down. To do this, we should look at the bottom of the recipes where the `if __name__ == \"__main__\":` block defines the recipe structure. The basic process is:\n", + "\n", + "1. Load hyperparameters and command line overrides.\n", + "2. Prepare data manifests and loading objects.\n", + "3. Instantiate `SEBrain` sub-class as `se_brain`.\n", + "4. Call `se_brain.fit()` to perform training.\n", + "5. Call `se_brain.evaluate()` to check final performance.\n", + "\n", + "And that's it! Before we go and actually run this code, let's manually define the `SEBrain` sub-class of the `Brain` class. If you want a more in-depth tutorial about how the `Brain` class works, checkout the [Brain tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html).\n", + "\n", + "For simplicity, we'll just define the sub-class with just the first method override and then add the other overrides one-by-one. The first method is the `compute_forward` method which simply defines how the data is used by the model to make predictions. The return values should include any predictions made by the model. For this case specifically, the method computes the relevant features, computes a predicted mask, then applies the mask and re-computes time-domain signals." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xaokPtFO0lMo" + }, + "outputs": [], + "source": [ + "class SEBrain(sb.Brain):\n", + " \"\"\"Class that manages the training loop. See speechbrain.core.Brain.\"\"\"\n", + "\n", + " def compute_forward(self, batch, stage):\n", + " \"\"\"Apply masking to convert from noisy waveforms to enhanced signals.\n", + "\n", + " Arguments\n", + " ---------\n", + " batch : PaddedBatch\n", + " This batch object contains all the relevant tensors for computation.\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + "\n", + " Returns\n", + " -------\n", + " predictions : dict\n", + " A dictionary with keys {\"spec\", \"wav\"} with predicted features.\n", + " \"\"\"\n", + "\n", + " # We first move the batch to the appropriate device, and\n", + " # compute the features necessary for masking.\n", + " batch = batch.to(self.device)\n", + " self.clean_wavs, self.lens = batch.clean_sig\n", + "\n", + " noisy_wavs, self.lens = self.hparams.wav_augment(\n", + " self.clean_wavs, self.lens\n", + " )\n", + "\n", + " noisy_feats = self.compute_feats(noisy_wavs)\n", + "\n", + " # Masking is done here with the \"signal approximation (SA)\" algorithm.\n", + " # The masked input is compared directly with clean speech targets.\n", + " mask = self.modules.model(noisy_feats)\n", + " predict_spec = torch.mul(mask, noisy_feats)\n", + "\n", + " # Also return predicted wav, for evaluation. Note that this could\n", + " # also be used for a time-domain loss term.\n", + " predict_wav = self.hparams.resynth(\n", + " torch.expm1(predict_spec), noisy_wavs\n", + " )\n", + "\n", + " # Return a dictionary so we don't have to remember the order\n", + " return {\"spec\": predict_spec, \"wav\": predict_wav}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sA89RfLe4fiy" + }, + "source": [ + "If you're wondring here what the `self.modules` and `self.hparams` objects are, you're asking the right questions. These objects are constructed when the `SEBrain` class is instantiated, and come directly from the `dict` arguments to the initializer: `modules` and `hparams`. The keys to the dict provide the name that you use to reference the object, e.g. passing `{\"model\": model}` for `modules` would allow you to access the model with `self.modules.model`.\n", + "\n", + "The other method that is required to be defined in a `Brain` sub-class is the `compute_objectives` function. We sub-class `SEBrain` itself just to provide a convenient way to split up the class definition, don't use this technique in production code!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4QlVFmDm3spK" + }, + "outputs": [], + "source": [ + "class SEBrain(SEBrain):\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " \"\"\"Computes the loss given the predicted and targeted outputs.\n", + "\n", + " Arguments\n", + " ---------\n", + " predictions : dict\n", + " The output dict from `compute_forward`.\n", + " batch : PaddedBatch\n", + " This batch object contains all the relevant tensors for computation.\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + "\n", + " Returns\n", + " -------\n", + " loss : torch.Tensor\n", + " A one-element tensor used for backpropagating the gradient.\n", + " \"\"\"\n", + "\n", + " # Prepare clean targets for comparison\n", + " clean_spec = self.compute_feats(self.clean_wavs)\n", + "\n", + " # Directly compare the masked spectrograms with the clean targets\n", + " loss = sb.nnet.losses.mse_loss(\n", + " predictions[\"spec\"], clean_spec, self.lens\n", + " )\n", + "\n", + " # Append this batch of losses to the loss metric for easy\n", + " self.loss_metric.append(\n", + " batch.id,\n", + " predictions[\"spec\"],\n", + " clean_spec,\n", + " self.lens,\n", + " reduction=\"batch\",\n", + " )\n", + "\n", + " # Some evaluations are slower, and we only want to perform them\n", + " # on the validation set.\n", + " if stage != sb.Stage.TRAIN:\n", + "\n", + " # Evaluate speech intelligibility as an additional metric\n", + " self.stoi_metric.append(\n", + " batch.id,\n", + " predictions[\"wav\"],\n", + " self.clean_wavs,\n", + " self.lens,\n", + " reduction=\"batch\",\n", + " )\n", + "\n", + " return loss" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TU3wE6P6-nmo" + }, + "source": [ + "Both of these methods use a third method that is not an override called `compute_feats`, we'll quickly define it here:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3rpiY7PU-z-x" + }, + "outputs": [], + "source": [ + "class SEBrain(SEBrain):\n", + " def compute_feats(self, wavs):\n", + " \"\"\"Returns corresponding log-spectral features of the input waveforms.\n", + "\n", + " Arguments\n", + " ---------\n", + " wavs : torch.Tensor\n", + " The batch of waveforms to convert to log-spectral features.\n", + " \"\"\"\n", + "\n", + " # Log-spectral features\n", + " feats = self.hparams.compute_STFT(wavs)\n", + " feats = sb.processing.features.spectral_magnitude(feats, power=0.5)\n", + "\n", + " # Log1p reduces the emphasis on small differences\n", + " feats = torch.log1p(feats)\n", + "\n", + " return feats" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "N0Z6XX3h_UfO" + }, + "source": [ + "There's only two more methods defined, which are used to keep track of statistics and save checkpoints. These are the `on_stage_start` and `on_stage_end` methods, and they're called by `fit()` before and after iterating each dataset respectively. Before each stage, we set up the metric trackers:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8gUTO6rq__I2" + }, + "outputs": [], + "source": [ + "class SEBrain(SEBrain):\n", + " def on_stage_start(self, stage, epoch=None):\n", + " \"\"\"Gets called at the beginning of each epoch.\n", + "\n", + " Arguments\n", + " ---------\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + " epoch : int\n", + " The currently-starting epoch. This is passed\n", + " `None` during the test stage.\n", + " \"\"\"\n", + "\n", + " # Set up statistics trackers for this stage\n", + " self.loss_metric = sb.utils.metric_stats.MetricStats(\n", + " metric=sb.nnet.losses.mse_loss\n", + " )\n", + "\n", + " # Set up evaluation-only statistics trackers\n", + " if stage != sb.Stage.TRAIN:\n", + " self.stoi_metric = sb.utils.metric_stats.MetricStats(\n", + " metric=sb.nnet.loss.stoi_loss.stoi_loss\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MH0-TsmFBKsi" + }, + "source": [ + "After the validation stage, we use the trackers to summarize the stats, and save a checkpoint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NzW4K0wjAnjz" + }, + "outputs": [], + "source": [ + "class SEBrain(SEBrain):\n", + " def on_stage_end(self, stage, stage_loss, epoch=None):\n", + " \"\"\"Gets called at the end of an epoch.\n", + "\n", + " Arguments\n", + " ---------\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST\n", + " stage_loss : float\n", + " The average loss for all of the data processed in this stage.\n", + " epoch : int\n", + " The currently-starting epoch. This is passed\n", + " `None` during the test stage.\n", + " \"\"\"\n", + "\n", + " # Store the train loss until the validation stage.\n", + " if stage == sb.Stage.TRAIN:\n", + " self.train_loss = stage_loss\n", + "\n", + " # Summarize the statistics from the stage for record-keeping.\n", + " else:\n", + " stats = {\n", + " \"loss\": stage_loss,\n", + " \"stoi\": -self.stoi_metric.summarize(\"average\"),\n", + " }\n", + "\n", + " # At the end of validation, we can write stats and checkpoints\n", + " if stage == sb.Stage.VALID:\n", + " # The train_logger writes a summary to stdout and to the logfile.\n", + " self.hparams.train_logger.log_stats(\n", + " {\"Epoch\": epoch},\n", + " train_stats={\"loss\": self.train_loss},\n", + " valid_stats=stats,\n", + " )\n", + "\n", + " # Save the current checkpoint and delete previous checkpoints,\n", + " # unless they have the current best STOI score.\n", + " self.checkpointer.save_and_keep_only(meta=stats, max_keys=[\"stoi\"])\n", + "\n", + " # We also write statistics about test data to stdout and to the logfile.\n", + " if stage == sb.Stage.TEST:\n", + " self.hparams.train_logger.log_stats(\n", + " {\"Epoch loaded\": self.hparams.epoch_counter.current},\n", + " test_stats=stats,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VDGs6Va8Bg-M" + }, + "source": [ + "Okay, that's everything you need to define the `SEBrain` class! The only thing left before we can actually run this thing is the data loading functions. We'll use `DynamicItemDatasets` which you can learn more about in the [Tutorial on Data Loading](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html). We need only to define the function that loads audio data, and we can use that to create all our datasets!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5Tfh42jrBfGt" + }, + "outputs": [], + "source": [ + "def dataio_prep(hparams):\n", + " \"\"\"This function prepares the datasets to be used in the brain class.\n", + " It also defines the data processing pipeline through user-defined functions.\n", + "\n", + " We expect `prepare_mini_librispeech` to have been called before this,\n", + " so that the `train.json` and `valid.json` manifest files are available.\n", + "\n", + " Arguments\n", + " ---------\n", + " hparams : dict\n", + " This dictionary is loaded from the `train.yaml` file, and it includes\n", + " all the hyperparameters needed for dataset construction and loading.\n", + "\n", + " Returns\n", + " -------\n", + " datasets : dict\n", + " Contains two keys, \"train\" and \"valid\" that correspond\n", + " to the appropriate DynamicItemDataset object.\n", + " \"\"\"\n", + "\n", + " # Define audio pipeline. Adds noise, reverb, and babble on-the-fly.\n", + " # Of course for a real enhancement dataset, you'd want a fixed valid set.\n", + " @sb.utils.data_pipeline.takes(\"wav\")\n", + " @sb.utils.data_pipeline.provides(\"clean_sig\")\n", + " def audio_pipeline(wav):\n", + " \"\"\"Load the signal, and pass it and its length to the corruption class.\n", + " This is done on the CPU in the `collate_fn`.\"\"\"\n", + " clean_sig = sb.dataio.dataio.read_audio(wav)\n", + " return clean_sig\n", + "\n", + " # Define datasets sorted by ascending lengths for efficiency\n", + " datasets = {}\n", + " data_info = {\n", + " \"train\": hparams[\"train_annotation\"],\n", + " \"valid\": hparams[\"valid_annotation\"],\n", + " \"test\": hparams[\"test_annotation\"],\n", + " }\n", + " hparams[\"dataloader_options\"][\"shuffle\"] = False\n", + " for dataset in data_info:\n", + " datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(\n", + " json_path=data_info[dataset],\n", + " replacements={\"data_root\": hparams[\"data_folder\"]},\n", + " dynamic_items=[audio_pipeline],\n", + " output_keys=[\"id\", \"clean_sig\"],\n", + " ).filtered_sorted(sort_key=\"length\")\n", + " return datasets\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GYcWWQnHDJis" + }, + "source": [ + "Now that we've defined all the code in `train.py` other than the `__main__` block, we can start running our recipe! This code is edited slightly to simplify the parts that don't necessarily apply to running the code in Colab. The first step is to load the hyperparameters. This creates a bunch of the needed objects automatically. You can find more info about how `HyperPyYAML` works in our [HyperPyYAML tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html). In addition, we'll create the folder for storing experimental data, checkpoints and statistics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AgFzrDmQDel1" + }, + "outputs": [], + "source": [ + "from hyperpyyaml import load_hyperpyyaml\n", + "with open(\"train.yaml\") as fin:\n", + " hparams = load_hyperpyyaml(fin)\n", + "sb.create_experiment_directory(hparams[\"output_folder\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JnlXGHTFE3fc" + }, + "source": [ + "As easily as that, we have access to our pytorch model, among many other hyperparameters. You can explore the `hparams` object at your leisure, but here's a few examples:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-fJr6YByFGgW" + }, + "outputs": [], + "source": [ + "# Already-applied random seed\n", + "hparams[\"seed\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ASg3COH_FWK3" + }, + "outputs": [], + "source": [ + "# STFT function\n", + "hparams[\"compute_STFT\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "pTexBbI_Fnyk" + }, + "outputs": [], + "source": [ + "# Masking model\n", + "hparams[\"model\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "n9P7qvRLH-ts" + }, + "source": [ + "Prepare the data manifests and create the dataset objects using them with the function we defined earlier:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xcGLOe9KIQoz" + }, + "outputs": [], + "source": [ + "from mini_librispeech_prepare import prepare_mini_librispeech\n", + "prepare_mini_librispeech(\n", + " data_folder=hparams[\"data_folder\"],\n", + " save_json_train=hparams[\"train_annotation\"],\n", + " save_json_valid=hparams[\"valid_annotation\"],\n", + " save_json_test=hparams[\"test_annotation\"],\n", + ")\n", + "datasets = dataio_prep(hparams)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zv6pfpsARkr4" + }, + "source": [ + "We can check that the data is being loaded correctly by seeing the first items:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yurn8bQWRrT6" + }, + "outputs": [], + "source": [ + "import torch\n", + "datasets[\"train\"][0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nJO6glavRun3" + }, + "outputs": [], + "source": [ + "datasets[\"valid\"][0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L6SOW1oiRNyV" + }, + "source": [ + "Instantiate the SEBrain object to prepare for training:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c_F3CkDwRM9e" + }, + "outputs": [], + "source": [ + "se_brain = SEBrain(\n", + " modules=hparams[\"modules\"],\n", + " opt_class=hparams[\"opt_class\"],\n", + " hparams=hparams,\n", + " checkpointer=hparams[\"checkpointer\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hotx8CmwSPPY" + }, + "source": [ + "And then call `fit()` to do the training! The `fit()` method iterates the training loop, calling the methods necessary to update the parameters of the model. Since all objects with changing state are managed by the Checkpointer, training can be stopped at any point, and will be resumed on next call." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5AfAGLn-SVbV" + }, + "outputs": [], + "source": [ + "se_brain.fit(\n", + " epoch_counter=se_brain.hparams.epoch_counter,\n", + " train_set=datasets[\"train\"],\n", + " valid_set=datasets[\"valid\"],\n", + " train_loader_kwargs=hparams[\"dataloader_options\"],\n", + " valid_loader_kwargs=hparams[\"dataloader_options\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BIb_ie_fSo89" + }, + "source": [ + "Once training is complete, we can load the checkpoint that had the best performance on validation data (as measured by STOI) to evaluate." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "diWyNY-hS3h8" + }, + "outputs": [], + "source": [ + "se_brain.evaluate(\n", + " test_set=datasets[\"test\"],\n", + " max_key=\"stoi\",\n", + " test_loader_kwargs=hparams[\"dataloader_options\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/tasks/speech-recognition-from-scratch.ipynb b/docs/tutorials/tasks/speech-recognition-from-scratch.ipynb new file mode 100644 index 0000000000..5203b88ea7 --- /dev/null +++ b/docs/tutorials/tasks/speech-recognition-from-scratch.ipynb @@ -0,0 +1,2398 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/tasks/speech-recognition-from-scratch.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/tasks/speech-recognition-from-scratch.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uo0JP7a5uFp7" + }, + "source": [ + "# Speech Recognition From Scratch\n", + "\n", + "Ready to dive into the world of building your own speech recognizer using SpeechBrain?\n", + "\n", + "You're in luck because this tutorial is what you are looking for! We'll guide you through the whole process of setting up an offline **end-to-end attention-based speech recognizer**.\n", + "\n", + "But before we jump in, let's take a quick look at speech recognition and check out the cool techniques that SpeechBrain brings to the table.\n", + "\n", + "Let's get started! 🚀\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nUYxDoJKEk2J" + }, + "source": [ + "## Overview of Speech Recognition\n", + "In the figure, we show an example of a typical speech recognition pipeline used in SpeechBrain:\n", + "\n", + "\n", + "![SpeechBrain-Page-2.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYcAAAGLCAYAAAAs3F4FAAAJzHRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDYtMTJUMTYlM0EzOSUzQTA2LjQ3MlolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY4MS4wLjQwNDQuMTIyJTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE0LjcuMiUyMiUyMGV0YWclM0QlMjJsa184cjJaRlRJNldKLUFxV3VpcyUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjIzcThPazVMejBSeU1KbVBFOGFNaCUyMiUzRTdWcmJWdHM0RlAwYUhzM3klMkZmSVlRZ0xUaG5KSkdNcThkTW15ZkFISENySk1rbjc5U0xGOGpRTnVtNlJkdyUyRkNDZFhUT3NYeTI5cFl0NVVRYnpsY1hCQ3pDSyUyQnloJTJCRVNWdmRXSmRuNmlxb3BtR093ZnQ2eHppMlZZdVNFZ2tTZWNLc00wJTJCbzZFVVJiV0xQSlEybkNrR01jMFdqU05FQ2NKZ3JSaEE0VGdaZFBOeDNIenJnc1FvQzNERklKNDIlMkZvUWVUVE1yYlloViUyRlpMRkFWaGNXZEZGajF6VURnTFF4b0NEeTlySm0xMG9nMEp4alMlMkZtcSUyQkdLT2JGSyUyQnFTeDQxMzlKWURJeWloZlFMVVBPQVZ4Smw0dGhQVmpGbm8yUnhSd0VzWUFwS2lQSmY1a3ZGeG5XWFVsJTJCeXF5YTRDOFg4VDZmTG5vbXRSck1MTHh3bVZscUlxQSUyQmFTWURJSGNUMU5NYXJDNG1HWVNsRkNFVWxBTEhIUUpjZlZQZE5SUGNueWZWOXlGZWhKdXV1cWtnWmMxd2FxN2lGZDJ6MnlhQjZ3dXl4THlBbyUyRnhWUVhxOXl2R2t4WUlWaFlkWDNiTFNXdzRSTlN5bWZoZ05kY0hjZWhlUnBnSE1Rb1N4Rmg4NUV5WUU0aG5yTSUyQjhqbSUyQlZPSGZTeGMlMkZqeiUyRmZhMmdRUFg2NnZYOGNBMnpkMCUyQnpibDlFJTJGbDJjdjQ4ZGIlMkZUdWRCRE0zMU0lMkJIeWZYc2NnbXZ2MDB1cndQJTJGSEdTdmtXNUs5RlZUbzN0ajhxUTlERzQlMkJEYVVYeTVsYTMlMkJCcUNYeCUyRmRYRjElMkJYd3J3ZHRKJTJCS1JkMzJucjE0ZSUyRm5nMXlON3pSUiUyRlpGTkJ3dmNmeXczRjB4Tmt5M3Nxa05aRldLVnR5JTJCRENPS3Bnc0F1WG5KR0wlMkJwd2p4bUxhVU1lMFdFdWUlMkJjcFVvNTk1bG9JTXdtSDFrekZ4RmdPM21Fa0F2ZEVSa3FKRlhGRVl5cXdhYWF3Z1lFMzRNeWMwVUpkaUZZMGMwUXJZTWhyVG9najZtRGFHSkNReHhnTm1GSGxmV000Q3p4RU04b3M5WlRObDhJVVdNUGZWYUZUREJlaUtvOUlVclh3Z2xrRkRkcm1sSkFxT2psR1ZGU3FHVGVTJTJGQXpHdUlZazgwQXRmRllabjlsVDZGWmFva09mNElHTmluT0NCUW1YU2dzSUlFUUF0WHVqU0JCTWFEUmF6UDdyJTJCQ2g3MVFzcmpDN3BTZmQxSWNMajhKWnZEWGpyeE9KaGtqeVl6N3FVc3BJMFQxZElBUkQxalZreWNBOFN0aFQ0YVJHayUyRnp1TzVoUzRhJTJCOFR4ZyUyRml1TWFlRXpvVkFpN1lQVk0xelRNJTJGVkJNS2RiV2dtTnFCOGZVQTNITTZNR3h4QnZ3bFp1MVlBelNOSUklMkZ6b2o2dkc4VjJRUEk5anVMWEhKbkIxTjZsTGhXUXFPamdvV3ROMVhFSFc1d3RKbndCWUttYzJvME1EVGtWcEtjMVNLdSUyRmxid2JpcmRicVhLMVdBcjFRYnA4dEY3Z1clMkZ1SkhRV3Q1ZWpPQ29zczJpT0pJJTJGZ0JjNW9qWVdWdzNiSW1LQ1hEQ1Z3JTJGWU54blBvZVozNElFZ1olMkJ2NkF2T0VyNyUyQnQ0aFJsMFhrYmFrYk1jd1k3MG8zZXR4Z3hYMTJaemdCTFdtdmpDQk9Bb1Mxb3lSenpOd0pZblk2JTJCNUFtT2VSNTIzV3NTN2xhcTV0ZTVBaVZlc2hSZmFCcE1qNjhNdTl2YjNjSyUyRjB4M1B0NmJ4OW92UjhqUURPQ09oZjcwWW9TQUklMkIzd0NOenh3SnZPYTY4SjFhMUYzaE5sNCUyQjN3RHMlMkZwZkhUMlhqV1g2TWhxMTVBd0x6dmFuQTIlMkJQSTU3ZWw4TlI0TzMlMkZiOWtNcXNhYzd4bExtWXZ4OVltZ3NkYm1pejl2dTB1UmpQM3NXNSUyQk5wcUMlMkZNZGdqaElXREE1aWpEYkVIVUxzMnNidW5FZ1lTN0pjZ3hoVm5adkFMNmx6TVBac0tkeXpnaElVaSUyQkRpUFRWMmxFQ3NjY0JIcDhqY2ZXJTJGN3ZhWUlnZlQzYTRkc0gyUSUyRkF5QmVZb0FxWWglMkJTRElqeFRPUTFVVm14N1EwY0tCdEZGWHVTV1o5SDBqdDNodDdpOHdYaEgzZjhtZVl0c0Y0aTZaMTlOalklMkJWYjQ1T3JuWXplS3dxJTJGZlNmSWglMkJhNDQ5aEg1M21jMzdqJTJGJTJCbm1YJTJCWWU5WjIzdGtuQ1l0VU5JUUxQZ2xYTWNScXozUjN0ZElOMGRwNHBZR0FKJTJCRERYYlhHV1ZweWdLTFdodmJ3dW9xbnVmTFhTQW9zcVU1YUUlMkJVYUROQzMyS0UyVUVJUTkxRDlidjJoTXFqVG5GNmNEJTJCN3VXY2ZxJTJGSnM5TFglMkJ6ZnJ1NGRrdlNCVkUlMkZHajB0NHVWVXN6T0FwcmlNTDh1VmtvSE51byUyQnhNciUyQkE3UUpyU0w2bFdjN05VVHJzZFp6dmlva2l6ZldvbkZjUGJNNjlNejZqWHJXdFIlMkZVaHZIJTJGQTUlMkYzRG53c3VYVktvemhPTTBudkE1JTJGdFZGWXIxVThmJTJCUEJmT3BTJTJGWjhuZHExOEZhYU4lMkZBUSUzRCUzRCUzQyUyRmRpYWdyYW0lM0UlM0MlMkZteGZpbGUlM0X8bENDAAAgAElEQVR4XuxdBXRUyRK9ECQQAsGCu7u7w2KLL+7u7u7uDsvi7u7u7i4JDskSCBA0QCD/3M6f7CREJsnMm5mk+pw9LPO6u7pvP/q+quqqjuTt7e0NKYKAICAICAKCgB4CkYQc5H0QBAQBQUAQ8I+AkIO8E4KAICAICAK/ISDkIC+FICAICAKCgJCDvAOCgCAgCAgCwSMgmkPwGEkNQUAQEAQiHAJCDla85Js3b8bEiRNx8+ZNRI0aFUWKFMHo0aNRsGBBNasBAwao5xcvXkT+/PnVb58+fYK9vT1q1KiBBAkSYPHixQEiMHnyZPV73759fZ/b2dkhd+7cmD59OgoUKBDm/vv06ePbd/r06fHw4cMAx8Lxly5dGp8/f/7tee/evdGpUydky5YNZcuWxe7du1Wdy5cvqzG2atVK/T2oeeqPw4pfBxm6IGBUBIQcjAqndp0tXbpUbXzJkiVDnTp18OXLF6xevRo/f/7EyZMnDdq8uaneuHFDDXrw4MFInDgxunbtqv7OzfjYsWOKHNq0aYNMmTLh9evXmDdvHkgSTk5OGDt2bJDkE1z/OsLSbd7v3r3DixcvMHPmTJQpUwZ//vmnGkvTpk2RLl06xI8f33d8OqRJAKVKlcKYMWMwdOhQRQ5sV6JECUWaDx48wLVr14Kcp/44tFtBkSQIWDYCQg6WvT4Bju7Xr19ImjQpvn37hvv378PR0VHVO378uNrU//jjDxw8eDDYL/tt27b59h8rVixkz54d586d8/1typQpihz27t2LSpUqqd+HDx+OUaNG4ejRo9i3b1+Q5BBc/wFN7tKlS4rY+vfvjwkTJgQ5Pv32379/R86cOdVPQ4YMUYRCDadHjx5+xAQ0Tyt8BWTIgoDJERByMDnExhdw+/ZttZG3bt0aixYt8iOAGyQJg5oEtYGgzErBbd4BkcO0adNAU87OnTtx6tSpMPUfUnLIkCEDtm7d6qcZSTJatGjqNxIWTUs0sbHu9evXESVKFCEH47+C0mMEQEDIwQoX+fTp0yhevLj6QqaPQb/wC3///v3w8PDAuHHjwrR568hh2bJlynTj5uaG5s2bK9/A8+fP1Zd5WMgnpOQQkM/h6tWryg/CQkJMmTIl3N3dlR9i7ty5v4kQzcEKX3gZslkQEHIwC+xhE3rv3j1kyZJFbdTcuPULNQdnZ2e1UZI86Bc4c+aMclazcOOkI7pevXpYv369b9OgzEr6/dvY2CifQOfOncPcf0jJgf4VkpF+oW8iTpw46qeBAweq57ly5cKdO3dw69YtpUHoFyGHsL170jriICDkYIVrTZ8Dv5A/fPigTEhJkiRRsyAJFCtWDDVr1lTmF57QoTN59uzZ6NKli6qzfft29ZwmJzpxdSUochg2bBjy5MmD6NGjq42XphyWsPYfUnLw7xPRb08cSIwkPfpFeHqpQoUKyvwl5GCFL7kM2ewICDmYfQlCN4BVq1Yppys36oYNGypNYeXKlSBxXLhwQW2Ob968QebMmfH161fln6D9nRu6l5eXOsmTNm1ag8hB3yGtP9qw9h9ScgjotBLn36hRI5QvXx5nz55Vp5P4W79+/cDjuP7HLppD6N43aRXxEBBysOI1p3Ywfvx4tdHHiBFD+SGoDehO7XBqPMbJjZLn/lny5s2rTE26WAhDNIfAyCGs/YeUHALyORQqVAg9e/ZEgwYN1LwGDRqkuv348aMyKcWNG1cdY6WTmkXIwYpfeBm6pggIOWgKtwgTBAQBQcA6EBBysI51klEKAoKAIKApAkIOmsItwgQBQUAQsA4EhBysY51klIKAICAIaIqAkIOmcIswQUAQEASsAwEhB+tYJxmlICAICAKaIiDkoCncIkwQEAQEAetAQMjBOtbJ+kb5/r3PmB0crG/sMmJBQBCAkIO8BKZBYMQIn351f5pGivQqCAgCJkJAyMFEwEbobqk1pEnjA8Hjx6I9ROiXQSZvrQgIOVjrylnyuKktjBzpM8Lhw0V7sOS1krEJAoEgIOQgr4ZxEdBpDfo+B9EejIux9CYIaICAkIMGIEcoEfpag27ioj1EqFdAJhs+EBByCB/raBmz0GkNvHzn6VOfMaVKBXh4iO/BMlZIRiEIGIyAkIPBUEnFYBHQ3UrXogUQKZJPdW9vQP/3YDuRCoKAIGAJCAg5WMIqhMcx6JNDeJyfzEkQCOcICDmE8wU22/SEHMwGvQgWBIyBgJCDMVCUPn5HQMhB3gpBwKoREHKw6uWz4MELOVjw4sjQBIHgERByCB4jqREaBIQcQoOatBEELAYBIQeLWYpwNhAhh3C2oDKdiIaAkENEW3Gt5ivkoBXSIkcQMAkCRiWHb14/ccf1HZxee+DRmw944v4RLh5f8frTV7z/8g1fvnvB08sLXj+98cvbGzaRIyOqTWTEjGYD++jR4GAXHY6xYiC5gx1Sx4+FtPFjI2MiB2R0jGOSyUunJkRAyMGE4ErXgoDpEQgTObz9/A3HnV1w8uG/OP/YDXdd3yKtowOSxLVHAvtYiG8fE3HtbBEnpi3sbKMhRtQoiBbVBjaRIiFSpEj49csbXj9/wfOnF754/sAnz+/w+OqJd5++4u2nr3Dz+AiXd5/g/ukrciaPj8KpHVE8bWKUzJAE0aPYmB4dkRB6BIQcQo+dtBQELACBEJPDm8+e2Hr1MbbdfIpLT9yQK5Uj0idOiPSJ4yKtYzxEjvz/yFgjTu7r9x945PYOzv+6w9nlDW6/fIPSmZKhZs7UqJEzFextoxlRmnRlFASEHIwCo3QiCJgLAYPJ4aSzK5acvY9dt56iaIbkyJM2GfKkSqw0AK2L5w8vXHviiuuPXXH+0Uv8lSctWhbOhCJpEmk9FJEXGAJCDvJuCAJWjUCw5LDvznPMOnYLLh++okTmVCiRJTVso0axmEl/+vYdp+4+w6n7T5DCwQ5dS2ZFpWwpLWZ8EXYgQg4Rdull4uEDgUDJ4cZLd4zZdxXObz6gUu6MKJwhhcXP+JzTc+y5eh/ZEjlg2J/5kCmR3F9stkXTv+zHbIMQwYKAIBBaBAIkhxlHb2LsviuoVzg7KuZKH9q+zdZu19UH2HLuNsbXLIS2xbKYbRyBCX52zwW3zzrj38ev4fnlu8WNTwYUNAK2MaMhcZqEyFYkPVJmTipwCQLhEgE/5MCjqO3XnMAzj69oVCI3HGPbWe2kX7z9gJUnriF/8riYXa+4xczj+MYLeOn8CqmzJEfC5PEQPWZ0ixmbDMQwBL59+YbXL97iyd0XSJY+EUrVLWhYQ6klCFgRAr7kwBiEuosOInYsOzQsnsuKphD0UBcfvQQ7m0hY06Ks2edEYvB4+xm5S1ieNmN2cKx0ANdO3kWceHZCEFa6fjLswBHwJYf6Sw4hmm0M1C2cPdzhtfToZSSOGRXzG5Yw29xoSjq19TKK1yhgtjGIYNMgcGr7RRSvlU9MTKaBV3o1EwKKHKYcuo4jzq/QoUIhMw3D9GInbjuONkUyoWWRTKYXFoCEvUtPIE5ceyTPmMQs8kWo6RB48cAVHu8+onLLkqYTIj0LAhojEMnV47N3ttEbMLFxBav2MQSH26NXbzFr7zncGVoPtlG1j65eOmwzilfPLz6G4BbKCp/TB3FqxyW0HFXbCkcvQxYEAkYg0pSD17wvuXiEKz9DYIu98PBF1MiaDG3McIJpfp+1qNKqjLyH4RSB3UuOouOUhuF0djKtiIhApD9m7vQumzszsiVPGO7nf/mxK645P8X29hU1n6uQg+aQaypQyEFTuEWYBghESj5olfekxhVUYrzwXt5/8cSIDUfwaJT2X3hCDuH77RJyCN/rGxFnF8mh9xLv5Z3+ijBzP3nTCZNr5Nd8vkIOmkOuqUAhB03hFmEaIKDIYV6rqhFCc/js+R2dluzCuyktNYDWrwghB80h11SgNZGDt7c3Zs6ciQULFuDRo0ewt7dHiRIlMHnyZKRPb7qMCDNmzMCtW7ewaNEig9dm27ZtGDVqFF68eKHaFChQAHPmzEGaNGkM7iM0FTnOSpUq+coNTR/W3kaRQ7/qJSKEz+H2i9eYtOOkkIO1v7UWOH5rIoe+ffti+/bt+Pvvv1GwYEF8+PABs2bNwooVK3Dv3j3Ejh3bF+Ffv34hcuTIRkE8pOTw9OlT5MmTB/v371ek8O3bNwwaNAinT5/GuXPnwjymnz9/wsYm4JOLQg6AIody2dOiWcncYQbb0jtYceIaDt96JORg6QtlheOzFnJwc3NDihQpcOHCBeTK5TcTArWItGnTKvRJEAMHDsSkSZPU1/PLly/Rrl07/Pvvv4gVK5Yik6JFi6q6u3fvVnW/f/+u2i9evBhJkiSBp6cnWrVqpTZzysybNy++fPkCBwcH9YwaAMu7d++QLFkyPHv2DAkSJPBd/cOHD6NTp064f/++729sxzGkTp1a/TZ27FisXLlSXR1Qrlw5TJs2DdGiRcPFixfRsWNH1betra2SVaZMGVy5ckWNKXv27GpOR48exebNmzF48GB8+vRJ1aFm4+TkhGrVqqF169ZqPl5eXur3ihW1P8xirn8OihwILJ3S1pxLKTgA3T58Rr/VB0CVWsxKwaElz0OKgLWQAzWGPn36qM0vqBI/fny1iZIcuD9wY+dm27ZtW7Xx1qxZE48fP8bbt2+RJUsWnDx5Um24U6dOVWSwZcsWzJ8/H2vWrFEb8OfPn1GkSBFFKNzwK1eurDbnKFGiKI2F9fbt2+dnSB8/fkS2bNlUm+bNm6N48eLKBKYrnAs1iTNnzijCqlOnjtrcu3Xrhnz58qF79+5o1qwZ1q5di5EjRyqt6ObNm2oc3PDr168PFxcX5M6dW5ElCYzzKlasGKpWrYr8+fNj4cKFaNq0Kf755x8sX75czS2iFEUOtQtlxeNX79D9zyLhdt4z95xFmkRxsfn8HSGHcLvK5puY1uSwbNky9OvXT02YG3iLFi0Mmjw3OG54p06dUvVpUsqYMaNv29GjRysC4Bf8rl27ULhwYfVFnzVrVlVXZ2KimWfKlCmg6Yeb7969e1Uf/PqOGzeu0gwaN26MQoUKoWfPnuoZtYvXr1+rL3ASCs1M/BLnhlyjRg20bPm7L5D1p0+frsbCzb1s2bKYMGGC2tBJXpkyZUL//v1V/9RgOCaS0devX5UGQbORq6srUqVKpTQbmotoSuM4ORfiQSIj0bBQs2EbkidJiXNmuXHjhtIkON+IUnxPK83YexaJ4sRCw6I5wt3c1565iVcen9CjchE0n7dFyCHcrfDvE5owbSzuO9/D0nkrNZmt1uTg6OioNlqWhAkTguYiQ8qePXvQuXNn9dXPQk361atX6v+5ydLG36NHD0UOZ8+eRYYMGXDp0iVFEsmTJ/cVQU2AmsGTJ08wYsQIP+YgDw8P3L17V321N2rUyJe46PCmiYjkQCfzw4cPVR9JkyZV4yGpBFU4X5qz2IaEVa9ePeV7oNbAQh9CokSJ1HjXrVuHuXPn4sePH8okdO3aNfUnyYGERK2FhWPibyQJ/eLf5xARfRC+5PDtx0/M2HMGSePFRtMS4Scr68qT1+Hy9gN6/FkU0aPamJYc3r8HHAK+YMgUp5Vev3FD/lL/rVUUmyhImCAhypQsh8F9hiFWrP9UcEM2jvBUR8gh4NXkxs0NlPZ8mk/0S/v27dUXvY4cuPHy9BJ9DjQZvef77a+sWrUKmzZtAk8V+S8029CEw/5YaM5iHyQHZ2dnZfqhU5wmpZ07d/7Wnps8v+5p0tIVkpmdnR2uXr2qNnaOS9e/rg43fo6b/gXOh+Onj0JHDvqnkEgK69evB0mThT4KahXESb9ehCYHAvPj508sPHwZn799R/NSeazaB0Efw/LjV2EXPRralsuHqP8/lWBSzYH/CPj10737byRhSnJYNn8VMmfKip9eXnB+5IQBw/vgjzIVMGbohPC03/vZILhJBHWKJryTQ2jNSgRxzJgx6hgr/ytdurQywfBLm07ZjRs3onz58koT0JED23Aj5+besGFDpbHQnk87PDWIHDlyKFs8tQz6I+gg5hc+zUY02ZCIuNmSKEqVKuV7lJWmKWo8dCo3adLkt3d1yZIlGDdunHIY03lOLYAyWZ/mHZqyqIHQjERfBJ/RlERfAX0PJAX6NGjOormJY6W2or/pk0jo16DfgiaqBg0aqLnS5yDkEEAQnLpJ7fwd1C9inTfB7b/ujPVnb+GvQllRNc9/9lS+fSYlhydPAJ6/pvZAotAjCVOSw84N+5Az238axIjxQ3H3/h2sX7ZZ/YN76fICg0cPwPmLZ2FvH1tpFkP7DvfVLLbs2IQ5/8zEC5fnSBA/Ido2b4+WTdqotmOnjMK7d2/ViY9jp44qu+3owePg+soFq9avwNt3b9G2RXt0bN1F1T974TTGTB6Jh4+cETNmTPxZoSqGDxiNqFGjqqOIoycNx659O8Ajkrmy58bIweOQNrXPCZnrt65h1IRhuHPvNmLEiIEKZSth1OBx6h/8waP7MWbSSDSs0xjT5k7GtjW7kTVzNsxbNBvLVi/Bx48fUDBfIYwbPgnJkiYHyeHRE2dkyZQNy1YvVv03qtsU/XsOMglham1WCuskuJHS7ELbOrGms5fkQHs8i39yePDgAahZPH/+XJFyr1690KFDB1VXd1qJ9npu0rNnz1b9cTOmaYkbb8qUKZW/gPZ/EhsLfQl0KNOspX98Vn9uJBlqF2zH94CEMn78eEVILCQPfv2TOOg7IaHQTEUH9vHjxxEvXjxFDDR90exEQvQfv7BhwwYMGTIEdID/8ccfvqeVhBwCiZB++uY9tl24C7ePn1EtbyaruUN655X7cLS3Q82CWZAqwe8mHkUO9s/C+m8r8PZUr69d83muRxLzx+w1euI9nVlJnxyev3iGZu0bokn9FmjdrK0aRo0GfyJv7vzo232AchT2HNBFkcDUcTPx6MkjlK1aHAtmLkGZEmVx5fplNG5TH1tW71CbNzfZleuXY8ncFSiUvzAmz5yAFWuXonWzdujRqbcigyZtGuDSiRuI6xAXeUvkQO+u/VC/dkO8fvMa7bq2RJ2a9dC8USuMmzoa125cwezJ8+EQJy5mL5iOHXu248juk7CJbIOif+RHtT9romfnPnjz5jWatG2AxvWaol3Ljjh28gi69u2IqpWqo2uHnkgYPyEOHz+IQSP7Y/Hc5UiTKi1GjB+CJ08fY9va3WrcG7auRYdWndGkQXNFjC07NcXezQcVYRi7WBs5GHv+oemPjuytW7eCm7MUy0Mg2PQZ1578i/3XneDx9RvKZE2NEllSwzZqFIuZiecPL5y8+wRH7zxBnBjRUTFXBuROnTjQ8SlymNpK2/GnSoVVFXqhZI96RpWrIwe7mHaIbGOjzEpfvn5BnRr1MHH0VNAHwa/x2k2q4+7Fh+rrneXqjSuo06QG7l99jEiIBPe3b+CYMJHv2CrULINmDVuiSf1mapM9fe4kSEAs1B6at2+E62fuwiGOA7y8fiBdrpTYtWE/smTOihyFMmHymOlqE2fRBRrRDJStYAYsmbcChQv4nI/ns+yFMmLp/JXqN/e37rCPZa++EFmGjR2Mt+/cMWfK375yzxy8qDQDluYdGiNj+kzKv8JCPE6fO4VqlWsoEtt/ZC+O7vI5lcNSqGweDOk7Qj03dhFyCBmiPAVE7YLaC6OzpVgeAsGSg27Id1++xrE7T3Dx4UsUzpAc+dMlQ55UidUZaK0LN5qrT//FpYcvcc7pBQqkS4bSWVMjS7LgM8sqcohlwuNo1ByuX/eBJE4cH/NSjx4wpebAL+fMGbKokyeur1wxd+Es5Xxb+c9a7Ny3Hd36dgpwiU4fuKA22r+XzMX23dvg8eG9Wk+3168wsNdQpXmQHOjHWDTHxxRw7uIZ9UXvfO0/7SttzuRYv2wLCuQtiOVrlmD0pBHInDELShUrg7+q10G6NOlVnwVKBxxoOWXsDNStWV+ZjhYsmQeXf12ULI8PHihasBgWzl6qyKF15+Z4eP0/uWWqFEOrpu3QtEHz3+bHcd99cAfL/17t+6xkpSLo2qGHkmXsIuRgOKJ0Xrdp00bFTfDorBTLRMBgctAN/4PnN1x48AJXnrjigas7sqVIiCxJHZE+cVykdYyHyJGNTxa/fnnjkdtbOP/7Dndd3HD7+WtkTBIfeVMnQcGMyRHbNrrB6Gric9AjBd3pJS19DtxUcxbJjA3Lt/ps9CP74da5/6JM9cHasGUtJkwfi6XzVykzEsufdcqjdvV6vuTw8LGz2qB15NC0XUM4Xf2PYPXJgXWoARw6uh8HjuzHiTPHMG/aP8iTMy/ylcyJvZsPKV+B//Lo8UOUr1kGk0ZNRa1qtZVde/TE4Xj24pkvOXTs0QZ3Lz30bUpyoG+EWo7/EpBDWsjB4H8mUlEQ8EmfEdqsrJ88v+P2Czfcd3XHQ1d30E+RIkEcJI0bW8VMJLCPibh2togT01Yl9osRNQqiRbWBTaRI6guVX7o/vb3x/cdPfP3hBSbG8/jiiXefPfHm4xcVm+Dy7gOev/FQ/oN0SeIjU5L4yJbcEbFCmWLcpORALUHnZ/B3pNUc5LBm8QbEto+DqvUq4uzhy0iaOKl65Wl64gmV+PHio+/QXvj+/RtmTpyrnn369BEFy+RB7679Q0wO+fMUUCYq+jN0ZdTEYXj2/JnSPLIWzIAxQ8YrbUJXXrx8juTJUoBO8YkzxuL8kau+z+o1r4U4sR0CJQeat1KmSI3RQ8apNpS9ZuNqdGjVCVNnT/otzkHIQXY8QcBwBMJEDv7F8CjsC/cPcH3/Ea88PsP942e8//wNHl891cZP/8B3r5/w+uWtiIEEESVyJESLYqP8GCSQODFs4WAXHfHt7ZAojh2SONgjefzYvkdRDZ9awDVNSg5minPQHWXljKkpzP1nJm7evoFDO47Dzi4WqtevjMSJk2DSqGmwiRwZPM1E8xPJY/rcKer00Pa1u/HDywv9h/eG08MHKF+mIgb1HqrMSoZqDnFix1GyqGUUKVRMHV/s1q8TMmXIhGH9RymH9MEj+7B4zgqkTJFSbeRTZk3A2UOXcPPODTRu0wD7thxCqhSpMXP+NBw7dUT5Tbav26PMSv41hz0HdqHfsN5YMGMRMmfMignTxuCB831VXzSHgN9/mhvpe4oePbr698dTaDwBRNs/j6KGx8K8SjyWy5gMYxQdhjy5pR8YyL4PHTqkjgIvXbr0t6j1zJkzq1Nguuh0Y4zFlH0YlRxCMlBvAMY3QAU/ApOSQxDiTak56IuNHy8B8ufJj/49BytbPwtPMA0dO0id2LGJEgXFChXH2GET1Bf+u/fv0Ll3e3WKKEnipBjab4QiDh4p7dW1H9zd3xhMDvQ5UAOgz+P5y2eIZRcL5UqVx4iBoxVJ8aQUj7Lu3r9T/X+WzNkwrP9IZXJiGTJ6ALbt3gq7mDHRonFr5bNo1LouCuYvrI6h+icHtpm9YKY6PaV/lJWaiJBD0OSg29h4/JTpLRg34D+3UfD/miynRlAZVkNKDgzUYwAdj94GVIIjB11aj4MHD/o2Z9AeYyeYalzIwXLeGz8jCU/kYKEQR8hhWYtDOqCN7cCBA+jatatv9tPAsqxS22d8A/MQMU6FAXSMdmagGVNfMLiOR1MZ/cwAOOZDYmQxNRUGrDHNBWMOGGvA9okTJ1YBc8zkyjiKwLK+MjEfA98Yy8B4CbZhkjxmYGUiPW68VapUUfKHDRumNASSBQP1evfurTKyMkiPv3FjplyOk/mWAiocM+MeAor6Zv3gyIGR25wP049QFgvzYDHJH9ONCDlY6BYh5GChC2Plw7JWcmCqCAazMcU2NzWmww4syypTXAwYMACXL19WJikGzDH6mFHFNJdwA+R/DLDjxsyNm2ks+AXOjZ8BZgxUo5bCYDkGw1FzYZbWwLK+cnxM53379m1FIgzEo2wGxjHYjqedaDLiM0ZScw66xHs5c+ZUqT2YhmP48OFqU+bcmO+JX/AkHFOQAxMDMkiP5MPUHiRV4sDgPQb1CTlY6D92IQcLXRgrH5bW5BDa9Bm6r15GJHOT5ebLDZtfyfyTX+mBZVnlCTJGPesS3XGj5gbIKGeSAyOSmYqC6TKoidy5c0etKrO8MqsrSYjRy9wk69atC6YFZwkq6yvTbTByWZeqm2OjPZ/aDjUCaic6cxizupIQdFlgGUvBPExM1Mf6O3bsUPK4QZPg/Cfb490NJBhqNTywwbYs1ABIKroSnOZAcuAcmeCQqcCptTDNB4mUGkmEJ4frp49h3pCeWHD0v9MnOnAH1KsE91cu+OfYdT9xEid2bsLUHm0wasU25CpW2iTbhZCDSWCN8J1qTQ6hzcrqf2OjqYVOVKa5oHmGSfACy7LKVNa8RY4ZV0ksNJF06dJFbXgkB36hM8HdsWPHVB4m/p2FpMAvZ/5GGdw4aY9nDiRqGe7u7oFmff3rr7/UeHgbHAsT49G0xDGTHLhxr17tE8tC0xLvdPCf+tu/zyE4H0RYzUokB46PZEsTHcmIcyWRCjkwT04w5PDa5Tn6zFyMLPkK+24sY9s3hPPNq+gx+W8hhwi/3VoXANZKDjqUeT8CNy4eFAgsyyo1BebH4kU5JAoGspEMQkIOOnn0H/ACHt6TMG/evECzvlJTYP4kXibEG+ToT6DWpCMH/VNIJAV+4evuuWDiPeb3IuHp19OKHGjK4qkw3p9x/fp15eS2KnLos+2id4kcxj/CFhw5JE+XAVGjRUf7kVPU+/L5gwd6VC2OxCnToE7HXiYjh5M3nTC5Rn7Ndx5TnFbSfBIiMFAEtCaHsJqVdKeVaELhl3716tWVqYUbcGBZVvlVzsyqdPJys6tVq5a6U4FfyoZoDkyXwYuJuFnzCC1NWMyrRNNQYFlfOU9qDfR3UGugOYqmMG72/jd5ZpSlU5pEQo2IX+skE87LHOTAuyt4dSkvPKK5ijhbFTkkH7TKm1eEMsbAmCU4cqjVrhvmD9Ff3nsAACAASURBVO2FxaduwcYmCg5tXIXHd27iyf1bqNe5r0nIgbEWvCr0+djGxpyqQX0JORgEk9VW0pocQguUfpwD+6AGQGcuT/lw42UJLMsqs6vS/MTcV3RG8wY3fq1zk+efwZmVeNKJpiVulJRLJzi/qumPCCzrK9OD8wY2XkfKmAKapCiXx0XZXn/TJ9Fx8+VpJjqB6RSnGSykZqXgsNVhyDnoF94JQb+MzqzEZyQ9+hpIrFZHDn/M3OldNndmZEsefF6i4EDTfx4cOTTrNwKb5k9DlaZtkK90BQxrVhONeg7CyskjTUYOt1+8xpFr93CwW9WQTMUodYUcjAKjxXZiLeRgsQDKwCwOgUhTDl7zvuTigYbFjXv7myHk8PrlM1w6dhBthoxD/7oV8feRKxjcqIrJyGHtqevInzQOev9h3LkasqpCDoagZL11hBysd+1k5AEjEMnV47N3ttEbMLFxBaPe/GYIOaTNmhPty+RG3U698O61G5r2GWYycuDNcP1XH8DtofWQOHZMzd+HpcM2o3j1/Ige0/AkgZoPUgSGCoFvX77h1I5LaDmqdqjaSyNBwBIRiOTt7e095dB1HHF+hQ4VChltjIaQQ9b8hTGle2vcuXgGw5duRqpMWU1GDn8fOI+y6ROhjxm0BoK6d+kJxIlrj+QZkxgNY+nIMhB48cAVHu8+onLLkpYxIBmFIGAEBBQ5sJ/6Sw4hmm0M1C2c3Qjd+hxlpR+Bl9Dolw23XDC0SQ3Q50ByuHh4H1ZMHoHZ+86paqYwK208dwvfPb9ifas/jDK30HTy7J4LTm29jOI1CoSmubSxYARObb+I4rXyIWVmn6y3UgSB8ICALzl8+e6FuosOInYsO6P7H8wJFP0MHz59xsY25REzmnlvsDu+8QI83n5G7hL/RVuaExuRHXYErp28izjx7FCqrs/dy5ZeJCurcVaI39TM7cQgPgYDJkyYELVr11YxGYys5lFdYs2inxSQ8SQ81RVce0NHGVy0dlgyxPqSAwfzzesn2q85gWceX9GoRG6j+iAMnayx6tHHsObkNaSMEwMLGpVE9Ch+NRhjyQlpPySIl86vkDpLciRMHk98ECEF0ALq08fw+sVbPLn7AsnSJ7IaYiB0/jcTycr6+wsVXFZWtmCgHY/kkhwYx8CAOx7VZXCffjZWJiHMnj27Ci7UL4a29z+6LVu2gFHjuhIcOYQlQ6wfctAJnHH0Jsbuu4J6hbOjYi6ftM/WVPZfd8aGc7cwuFJe9CiTw+KGThPT7bPO+Pfxa3h++W5x45MBBY2AbcxoSJwmIbIVSW91piTJyhr2rKxubm4qKyxzJjHQTleYA4qpPJijSXdfe0DkEJL2/t9EpiFhn4aSQ1gyxAZIDhR846U7xuy7Cuc3H1Apd0YUzpDC4veMc07Pse/aA6RPEBtDKuVBzmQ+ib2kaIzAgAHAxIlA//7AhAkaCxdxQSHgnxwkK+vvaAWXW4kJ/Bhg9/jx42BftoDIISTtw0oOYckQGyg56Aa1785zzDp2Cy4fvqJE5lQokSW1urXNUgpvlzt59wlO3nuKpLFjoFvp7KiU1fKJzFLwM8k4SpcGjh8HSpUCjh0ziYiI3mlY02dIVtbQZ2VlRDjzQTE6O7gSEDmEpD37ZxZaXaZZ/Qy11AoYmU4tJbBb6cKSITZYctBN/qSzK5acvY9dt56iaIbkyJM2GfKkSuwnq2pwQBnrOZ05V5/+i6uPXuKM0wtUzZ4KrYpkQon0ckzUWBiHuh9elZokCUAbq60t4Orqc6+2FKMiIFlZzZeVlWkyeD8F/QzBlYDIISTtjaE5hDZDrMHkoBvkm8+e2Hr1MbbdfIpLT9yQK5Uj0idOiPSJ4yKtYzxEjmz8yz9//fLGI7e3cP73HZz/fY3rT92QP7UjauZIhVp50iCBnW1wayTPtUJgxAhg5Mj/pA0fDvA3KUZFwFjkoBuUZGX9b3mCMyvRYU38eUcEb8PTFTqdebnP1KlTfe+CCIgcQtLeWOQQmgyxISYH/cG+/fwNx51dcPLhvzj/2A13Xd8iraMDksS1RwL7WIhvHxNx7WwRJ6atSuwXI2oURItqA5tIkZTGQQ3gp7c3vv/4ia8/vMDEeB5fPPHusyfcP37Bm4+f4PruIx65vUeWJPFQKI0jSqRLjFLpkyKenUQaG3W3MUZn1BrSpAH4p65Qa6BtVrQHYyDs20dYzUqSlfWcytgaUAmOHNiGiQp5RSpPK/FCIldXV0UMMWLEUKeYdCWw00qGtjcWOYQmQ2yYyMH/wHkU9o7rOzi99sCjNx/wxP0jXDy+4vWnr3j/5RsYS+Hp5QWvn9745e2NyJEiIYpNJNhGiaJiEBxiRkfCWDGQNE4MpI5vj7QJYiNDwjjImiSuxRxFNeq/8PDWmU5ryJULuH4d0P0p2oPFrLRkZcVvWVpDuzhz585VvodHjx6pOIemTZuq60iZtTY4cuBzQ9oHNzZTZog1KjkENxH95wzLNr4BKiQjkLpGRYDaAsmhRw9g2TIf0xJJoUULYMYMn2eiPRgVculMEDAlAmYjB1NOSvo2MwI6DUI0BjMvhIgXBEKPgJBD6LGTloEhIOQg74YgYPUICDlY/RJa4ASEHCxwUWRIgkDIEBByCBleUtsQBIQcDEFJ6ggCFo2AkINFL4+VDk7IwUoXToYtCPyHgJCDvA3GR0DIwfiYSo+CgMYICDloDHiEECfkECGWWSYZvhEQcgjf62ue2Qk5mAd3kSoIGBEBIQcjgild/R8BIQd5FQQBq0dAyMHql9ACJyDkYIGLIkMSBEKGgJBDyPCS2oYgIORgCEpSRxCwaASEHCx6eax0cEIOVrpwMmxB4D8EhBzM+DZ8dnkJlxPH8fbaFXx66QIvz69mHI2IDgqBKLYxECtZUsTLnRdJS5aCXdJkApggEK4REHIw0/LeW7oQLw8fQZK8eRE/XXrYOToiKm9Ok2KRCPzw9MRnNze4P3SG65UrSFauLDK3bGuRY5VBCQLGQEDIwRgohqAPz7dvcX3SOMR0cECaUmWEEEKAnaVUJVE8Pn4UX96/R65+g2AbL56lDE3GIQgYDQEhB6NBaVhH5wf0gUPyFEhdrLhhDaSWxSLw5PQpvH/xHIUmTLHYMcrABIHQIiDkEFrkQtGOpqQfr14hY8XKoWgtTSwRgQf79yJqokRiYrLExZExhQkBIYcwwWd4Yzqfz/Xrg0Kdu4gpyXDYLL4mTUzn585B4UlTxElt8aslAwwJAkIOIUErDHWd1q3BD5eXSFembBh6kaaWiMDDo0cQNWkyZGjQyBKHJ2MSBEKFgJBDqGALeSP6GlIXKQaHlClD3lhaWDQC7589w5Ozp8X3YNGrJIMLKQJCDiFFLJT1DzdthEKdOotJKZT4WXIzZVqaNxflVq6x5GHK2ASBECEg5BAiuEJfeX/dWig9aEjoO5CWFo3AsXFjUHHjVoseowxOEAgJAkIOIUErDHWFHMIAnhU0FXKwgkWSIYYIASGHEMEV+spCDqHHzhpaCjlYwyrJGEOCgJBDSNAKQ10hhzCAZwVNrYkcvL29MW3aNPzzzz948uQJEiZMiNq1a2PcuHGws7ODra0tvLy8FOo/f/6EjY2N+v/cuXPj0qVLCK69FSyXDNEABIQcDADJGFWEHIyBouX2YU3k0K9fP2zevFmRQ6FChfDixQv06tULP378wMGDB31BdnZ2Rvbs2eHp6ekHeEPbW+5qycgMQUDIwRCUjFBHyMEIIFpwF9ZCDm5ubkiRIgVOnz6N/Pnz+yL68eNHrF69Gq1bt0bUqFHV7wGRQ0jaW/ByydAMQEDIwQCQjFFFyMEYKFpuH9ZCDjt27ED37t3x+PHjYMEMiBxC0j5YAVLBohEQctBoeYQcNALaTGK0Jodly5aB5h2WSZMmoUWLFgbNfMWKFZg3bx7OnTsXbP2AyCEk7YMVIBUsGgEhB42WR8ghZEB7fv8Oh8JFcXrVCuTLmjVkjc1QW2tycHR0xOvXr9VM6VCmuceQsmfPHrRr1075GYIrAZFDSNoH1788t2wEhBw0Wh9LJYcVO3Zg/roNePj8OX7++oU0yZKhXd066j9zFiGHoNEPLTm8f/8ebHvgwAGULl3aVwidzj169MDUqVPViSWWgMghJO3N+f6I7LAjIOQQdgwN6sESyWHjgQPoNHoM5g0dgpL58ql57Dt1Gt3HT8D8YUPR8E/zpRYXcgj6tQqtWYm9Dhs2DIsWLVKnlUqVKgVXV1dFDDFixFCnmHQlsNNKhrY36B+GVLJYBIQcNFoaSySHTmPG4p2HB9ZOnuQHhZ3HjiNR/HgomCMHekyYhA+fPyFGdFscOX8e371+oFODBujdvJlqw02839Rp2HTgoDr/ni9bVkzv1w8ZUvkkGAzuudPTZ+gybhzOXb8Bx3jx0Kt5M3SsX0+1o1lp5YRxmLlyFW45OSNL2rRYPHoUsqZLq9GqGS5Ga7OS4SMLuObcuXOV7+HRo0fKLNW0aVMMHz4c0aJFC5YcWMGQ9mEdo7Q3LwJCDhrhb4nkMHvNGoxfuAjrp0xBiXx5A0Siz5SpWLRpM1aMH4fqZUrjppMTijZuii0zp6N8kSIYNHMWLty8hZXjxyJunDiYsHARNuw/gBtbNyOKjU2Qz20iR0bBBo2U7CHt2+He48eo1rkr1kyaiFIF8ity4LM5gwchcYIEaNCnH+ztYmLDVMu7ec3ayEGj117EWDECQg4aLZ4lkoPXz58YMH0G/tm4SX21l8iXD6UL5EeNsmXgYG+vkCE5HDxzFte3bPJFqnKHTsicJjWm9euLhCVKYcuM6SiZ38csRb+FY4lS2DpzhtrYg3oew9YWpVu0hOvxo4j9fzv3gTNnkCRhQmRIlUqRA0mpXsUKqu+lW7dhxspVfsai0fIFK0bIIViIpIKVISDkoNGCWSI56Kbu8ekTTly6jNNXr2LX8RN4/fYt1k+djNIFCihyePT8hdIUdKXN8BH4+OkzZg7sj9QVKgWI4MIRw1G+aJEgn9OEwf6fHzrwWx86s9LJFctRIHs29Xztnj0YPnc+HuzeqdGqGS5GyMFwrKSmdSAg5KDROlkyOehDQL9Bq6HDcNv5IS6sW6M2b6enT7F99izfai2HDMOvXz8xuXdvpCxfAefXrkGuTBl/Q9LN/W2Qz9fv249ekybj5ZFDgZKD/lFWIQeNXlYRIwgAEHLQ6DWwNHKg+afL2HFoXOVPFM/r198wf/0GjFu4SH3Rkxx4gunWti2+SJVr3RaFcubAuO7dkLBEScwcMACNqvzp+/ypiwtSJU2q/h7U84u3bqFk85ZKToK4cVX9LYcOK5NW0Ty5f4tzEHLQ6GUVMYKAkIN274ClkQNn3nrYcBy7eBGTevVC/mxZESlSJFy8dRu9J09BtdKlMHvQQEUOy7dvx7ju3dGsejWcvHwFNbp2w9Gli9VpJjqkdx47hs3TpyNN8mRYsmUrRsyj6WeXch4H9TxWzBgoUL8hcmbKiPE9uqtYi1rdeig/g84hLZqDcd/RihUr4vDhw6pT/Yyr/Lu7uzvixIljXIFB9Obg4IBbt24hefLkmskUQYYjIJqD4ViFqaYlksMPLy9MWbYcG/btxzNXV0SOHAmpkyZTWkCXRg0RNUoURQ4vX71CUkdHrN61G3Qid2/SGD2aNlF4fP32TR1l3XzwEDy/fUOOjBkxpU8vFMie3aDn9588URoMTzwljBtXHWXt1KC+71FWIYcwvXZBNuamvGnTJhQuXPi3ev6JwxSjMDU5/Pr1C5EjRzbF0CNEn0IOGi2zJZKDIVPXkYP/WAhD2kakOtbokPZPDleuXEGrVq1Umu6XL1/i6NGjYC6lsWPHqnTeKVOmxMqVK1VW11q1aiFfvnw4e/Ysnj9/jnTp0imi4d0PU6ZMwYIFC8DNOXHixKpN2rRpsX//fnTp0kXVady4sbpT4ubNmyqVB3ND3bt3T70yzPuk+zsjspnug7/FihULM2fORPny5cEssp07d1a/M4ts165d0aFDB9U+duzYGDhwoMo5xb7nz58f4Hgi0vsZmrkKOYQGtVC0EXIIBWhW1CQ8kAM36iJFimDx4sWoX78+3r59i2TJkuH27dtqc2/fvr0yPf7999+oU6cO3rx5o+5/4Nd5zpw5MWPGDOTJkwcZM2bE06dPYW9vD0Zyf/nyRbUluSxZsgQ0bXHDJlGwXlDkwE2fZMK+L1y4oNoyopubP+WTvDhOph/ftm0bcuXKhfjx4yuSIznQVBbQeDp16mRFb5d5hirkoBHuQg4aAW0mMVqTQ1jSZ+gg8q850P5fsGBBfPr0ydccwy90bvIsa9euxdKlS1VeJpJDsWLF0LNnT/Xsr7/+QrVq1dCgQQMkTZoU48ePR926ddVGzUKtgOYragIszOXEdB3UOoIihzRp0mDLli2KdFjevXuHuHHjKrLieHhZEUvfvn2VZsEo7wQJEmDXrl1K3tevXwMcj5leE6sSK+Sg0XJZKzloBI/Vi9GaHEKbeE8f6IDIgV/mNCmx0Cw0YsQIZQ7Sbcz8+j906JAih0qVKqFNmzbqmf7fr169qsiBWgW/6JnDiV/7jRo1UteS6goT/N2/fz9IcmAdkhZJQr+QCOiziBIlivr527dvStuhhkFyoLkrQ4YM6llA4/Hfn9W/gCaYgJCDCUANqEshB42ANpOY8EIO3PB16bz5Zc57pU+ePKk24lWrVikzUXDkoFsC+ilGjhyJGzduYOLEicpkpdMcqJ1QI6HmQOJo0qSJIgoWkgrNSdQ2uImvX79eaTQsrJM6dWpky5ZN+Th4r7X/QnKgLyJ9+vR+HumPh5cWSQkaASEHjd4QIYfggd5x9Bg6jBoNl6M+Ry2tqWhNDqYyK+mTw5w5c5TWsHPnTmXOoZmImzo33sA0Bzqzaetfs2YNbG1tlU9gw4YN2Lp1q/Jf8O+UQaf1gAEDlCZBn0WmTJkUUZCE6J84fvy4Igc6nWmCWrhwIa5du4Y//vgDLi4uGDRokPJl0Hfh5eWF/v37Kyc3neT65MCxBjQemp2kCDlYxDugBTkw4V2zgYP8zNcuRgyVxXRI+/aoWKyoRWAR2CDc37/HgydPUSR3LoseZ0CD05ocjAFQQGYlfXLgZUL0I9Dhy7o0FdWsWVM5e/kFH5BZic/69OmjUn/TkZwkSRK1sWfNmhXbt29XqcEZhU9zFDd23mVNTYBtSEL8/ypVqmD27NlwcnJSpNS2bVtVjzEYs2bNQoUKFdRpJTq0+TvJoWrVqpg+fbo6uaRPDjSNBTYeY2AYnvsQzUGj1dWKHDqMHIWbW/+LZma67VU7d2HW6jXqVrWcGX9Pc6ERBGYRw42I/5n6vLs1koNZFkSEWg0CQg4aLZVW5NBx1Gi4nz7526zy1KmHuhUrYFBbHwfi8YuX0H/6dNx//EQFuLX+q5YKbNNtopOXLsO8devx4dMnFMuTB3MGD0TKJElU2wUbNqpnL93ckC5Fcgzr2AFVSpZUz/598wZMzMf7GdImT65SbFTt3AXOe3cjbuzYiF+sBDZNn4ZJS5bC9fVrxHeIg8WjRiFb+nTQNytdvnMHxZr43BmhX5gdNlPq1Cpor9v4CSpiO3asWKhUvBgm9uqpsrsyeWD/adPR6q9aGD3/bxxfvizA3E/GXHohB2OiKX1ZAgJCDhqtgrnJgWkqeB/D0A7tVdbVrDVqYs7gwahd/g+VWK96l24Y1qE9mlavhm1HjqDruAnYMmMa0qVIqdJpOD97hpMrlqncR53HjFVpunmxDzfipgMG4uTK5cibJYu6j4GR16smjMP7jx/BJH3MofT04H7EsbdX+ZLKFS6kLhjiRt6oX39Vf+O0qX7IgcvClOK60nb4CDXOY8uWqnsiSjRrjkI5c2Jk504qMptyHOPHw6KRI7D/9Bk0H8S5lcfAtq1VOvJoUaOadKWFHEwKr3RuBgSEHDQC3VzkwI132bbt6iv71MrlyJc1K6YtX4G9p07h4MJ/fGfPNBr7T59WvzF3UtZ06VS+I5ZX7u44cv4C6lWqqHIf8Za3qX37+LYt1aIViubOheGdOiJ+0eLYNnumugiIhSk3mMNJnxxIDLXKlVXPl2/focZDjSAwhzTJquXgoSr7a8bUqXDp9h2UbdUab06d8N30SUBlWrbG+3Nn1I11JDum9tZpO6ZeZiEHUyMs/WuNgJCDRohrRQ50SNMJrSv8qublOfzCbly1ivq585hxWLzlP7+Eri43Um6oOWr9ha6NGqFd3Tq/ocNnnRs2QId69Xyf8av9q6cnJvTsgczVqqsMrulT+lwTevfRI9CkpU8Op1auUIn+WPQzrQZEDiSmvHXrq5vieH0oC1N9UzMIqNzftQPM11S7Ry98vHBOo9UFhBw0g1oEaYSAkINGQGtFDnRIX1y/1ndWTQcMQoEc2TFzQH/f37qOG698AzTlBFQCIgBdPT7r0rAh2terq0cOQ5VpZ2z3bshavSZub9+KdClSqOfcqHP9VccPOQSWTC8gcqjVvQe+//iBXXPnqNQNLBsPHECXMePw6sSxAMfP2+Qa9u0foO/FVMttLeTAkz080cOTRP4Ls7WWKlXKVBCpfkObbO/79+8qf9PDhw8RPXp09R9LzJgxUbRoUQwbNsw3FsKUE+BxWh7jdXZ2NqUYi+hbyEGjZdCKHPw7pG88eKAcu9tmzVS2fhZetcmrQe/s2OY7e17ME9s+FmyjRVMmGTqTZwzop57TR7F46zb0atYU9Xr3URu/vlmpeNNm6ta4AW1aq2tB9/49T/2dZe2evWg5ZGioyIHazZBZc3B5wzrlNNeVK3fvqnus6eROniiR+vnz169Ke+G9EEIOgb/UOnJgTIE5UmWHlBx02WEZ98Cke4yZILnpxs8AOgbnjR49Gnv37lUpPUxZQkMO1podVsjBlG+SXt/mIgcOYfjcecr2f3njesSJFUtt9pmr1VCbedfGjfDqzRvU79MXNcqWxcA2rZXTmSSzbsok5MiQAYNmzsbdRw/BKzt3HjuOdiNGYvvsmcidOTM27j+A9iNHKX8ATxzx8h6eQFo+biwYt9B66HCcuXYtxOTw6MULFGzQCLMGDlC+Dl2xiRxZaRAkpGSOiTB/2FDY2ERWTvOXr9wUMQk5hJ4crl+/jubNm6u4AUZGc/NlzAHTavBIML/QuRlz0+7evTt69+6t0mzw940bNyrBBQoUwLx581R21MAysZKYdu/erRLoUStgriQm/GNchC5bK9NeMOaBWWGHDh2qnjFDqz456GbKSGxGbnPMLGzDbLB8V8qVK6f65LW0TCLIGAuSC2UuX75cRWEz6SCzujK2g8F7bM8YDxbGdzAmI168eCoQkPmldJpDYHKqV6+ustuy/0WLFqFy5coa7TTGEyPkYDwsg+zJnOTw7ft3tdHSvMTTPCxHL1zAwBkzcffRY3XzGv0Ro7p0VieBWCYsXoK/12/wc5RVd7sbHchz1q6Dx8ePyJwmDcZ064Iy/09vwFNNrYeNwC0nJ+XUHtqhndJEeNubPfPhFC6q4i3oGPfRLP67F1rfrDR7zRr0nTLtN0x5l/WfJUrgyUsX9Jg4UR1l5Zgpn0TCE0tCDqEnB+YxYkoKRhAzyG3dunWKHBhsxsC2yZMnq1TeTGjHTKxMYcEoZ0Yhc2OmmYepMLj5M/VGYJlYmRMpS5Ysqg030alTpyoZTLJHedQEGN3MDZyFSfS40dK0FBA53L17Fzly5FBR09QgGEF95swZlYyPZqAyZcqolBzM2koiYcpxjvnIkSPYs2ePGgMJqGHDhopAaKqiCYtkwf9n/4kSJULTpk3VuEgODOoLSE63bt1Qu3ZtuLm5qSSFTDBojUXIQaNV04IcNJpKkGL4Ffndy0uZp1jOXruOiu074P3Z0yYPRDPn/LX2OYQ2fYbOrKSz2esw42bODY/kQNOMh4eHesS8SPyCZmrtli1bKkLQZWL98OEDmBiPdy9w02UkMgu1BabGYG6mwDKxclPmc27kLEzLwWyrTJXBr3SS0759+9QzjoX9k4QCM4uxDk1WjOamNsN0HEypwUINhek6iBn70eV3Yq4lJux79eqVIkTOR+fXYh4ozoGaE8dIItDNjSk9iBWjwQOSQ/IkITHpIPuw1iLkoNHKRRRyaNJ/IN59/IDlY8f4pEkYPgLRo0XDhqlTNELaPGK0JofQZmUNzudActBPoaH/d5p4uOmRJPQLTU5M1a37/eLFi+rLnP6BwDKxUiNhxlemutAVbvD8Qqc2wqyqq1evVo+Yl4mbMzf3wMZ/584d5M2bV2k0JDPd5UBsTxMYv/pJOhw/iU6/UBbHr/8758o58Ov/wYMHSjYL50btguRA01tAci5duqTkEBem/rDWIuSg0cpFFHKgY7vTmDE4cemySqdcpmABTOvXF4n+n9dfI7g1FxMRyIGbP01B/fr5HFRg9laakeh7oElH9zu/tIcMGaJ8E4FlYj127JgiAV7Q478w4R83XbZn4Zc6+6G5KjByGDx4sNJymJ+JPgWaiZjHSb9w8+fvJCFmAqCv49GjR8pMRc1B9zvb8J4IzoFzpCakGyc1GvZLcghMDtv7T0yo+QtpBIFCDkYA0ZAuIgo5GIJFeKyjNTmE1awU2GmloDQHOpzHjBmj/AT8GqfZhBs4+6LTlj4DOnN5rwJJhDb8wDKxckMmobAN713gFzkdyEys558caLohmdAh7Z8caI6ig5gb+YkTJ5TZiOm4R40apXwjTAvO+yTojKajnTJp/qIpjKefSGT8j6RBgqGmwxNJdGKTAJ49e6b8FfRDUFvj3PiczwKTw76FHMLjv3ITzUnIwUTAWki3WpNDaKcdVJwDN1SesgnMrER/EjdhbuI0GVJj4A1s+qeV+HvJkiXVxkt/RFCZWHWnlehE5iZOR3Tx4sX9kAOJh+Ph5szi32fCTZ9f+SQnkpWu0BlOBzb9CrwmlNeT8oY6nkriFz+1u+SNpQAAIABJREFUCP7u/7QSrx6lA5mOd95VzcKTWMwsy6ywPNHEC4V0lxYFJkfIIbRvaARsJ+QQvhfdWsghfK+CzM6YCIhZyZhoBtGXkINGQJtJjJCDmYAXsSZDQMjBZND67VjIQSOgzSRGyMFMwItYkyEg5GAyaP12fLhpIxTq1BlRbW01kihitELgh6cnzs+bi3Ir12glUuQIAiZHQMjB5BD7CDg/oA9SFykGh/9nK9VIrIjRAIH3z57hydnTKDQhfMdyaACliLAgBIQcNFoMp3Vr8MPlJdKV8bnHQEr4QeDh0SOImjQZMjRoFH4mJTOJ8AgIOWj0Cnx2eYlz/fqgUOcuYlrSCHMtxCiT0tw5KDxpCuySJtNCpMgQBDRBQMhBE5h9hNxbuhA/Xr1CxorWl6FRQ5isStSD/XsRNVEiZG5pvWkSrApwGaxmCAg5aAa1jyD6HhySp0DqYsU1lizijI3Ak9On8P7Fc/E1GBtY6c8iEBBy0HgZPN++xfVJ4xDTwQFpSpURE5PG+BtDHE1Jj48fxZf375Gr3yDYxotnjG6lD0HAohAQcjDTctDE9PLwESTJmxfx06WHnaOjEIWZ1sIQsSSEz25ucH/oDNcrV5CsXFkxJRkCnNSxWgSEHMy4dHRSu5w4jrfXruDTSxd4eX4142hEdFAIRLGNgVjJkiJe7rxIWrKUOJ/ldQn3CAg5hPslNsMEecHJxIkAL1uZMMEMAxCRgoAgEFYEhBzCiqC0/x2B0qWB48eBUqWAY8cEIUFAELBCBIQcrHDRLHrI798DSZIAnp4AU4W4ugIODhY9ZBmcICAI/I6AkIO8FcZFYMQIYOTI//ocPhzgb1IEAUHAqhAQcrCq5bLwwVJrSJMG4J+6Qq3h8WPRHix86WR4goB/BIQc5J0wHgI6rSFXLuD6dUD3p2gPxsNYehIENEJAyEEjoMO9GGoLJAde6r5smY9piaTQogUwY4bPM/E9hPvXQCYYfhAQcgg/a2k5M9FpEKIxWM6ayEgEgRAiIOQQQsCkugEICDkYAJJUEQQsGwEhB8teH+scnZCDda6bjFoQ0ENAyEFeB+MjIORgfEylR0FAYwSEHDQGPEKIE3KIEMsskwzfCAg5hO/1Nc/shBzMg7tIFQSMiICQgxHBlK7+j4CQg7wKgoDVIyDkYPVLaIETEHKwwEWRIQkCIUNAyCFkeEltQxAQcjAEJakjCFg0AkIOFr08Vjo4IQcrXTgZtiDwHwJCDvI2GB8BIQfjYyo9CgIaIyDkoDHgEUKckEOEWGaZZPhGQMjBjOv77f19fHi4Cl+e78e39/fwy+ujGUcjooNCIHIUe0R3yIyYKSoidromiO6QSQATBMI1AkIOZlpet7Pd8P7+Ujik/AuxHIsjepwMsIlib6bRiNjgEPjp9RHfPJzwye0U3j/bAodMLeFYZFZwzeS5IGC1CAg5aLx0Xl9c8PJATUSLmRKOWboIIWiMvzHEkSjc7s7B9y/PkKzCNkSJmdQY3UofgoBFISDkoPFyPN1WEHbx8iNBhtYaSxZxxkbgjdNifH57CalqXjB219KfIGB2BIQcNFwCmpJ+fnRBkhwDNZQqokyJgOvN8bCxT2pVJqZbt26hd+/euH79On7+/InUqVNj3LhxKF++PCpWrIjDhw8ryPjMxsbGFz53d3dUqVIFZ8+eRaRIkfzA+vLlS6RKlQpeXl6/tc2dOzcuXbpkymWQvk2AgJCDCUANqEs6n59uy4905XaKKUkjzLUQQxPTw8PVkKrmJatxUmfMmFGRQ9u2bdUmv2nTJrRs2RLPnj1DvHjxfGFLnjy5ela4cGHf34oXL44OHTqgSZMmgcLr7OyM7Nmzw9PTU4slEBkmQkDIwUTA+u/29eWh8PZ4Bscs3TSSKGK0QsDt7ixEipMSCfON1kpkqOX8+PEDtra2ePHiBZIkSeLbj5OTE9KmTetHUxByCDXM4aKhkINGy0hfQ8L0bREzfl6NJIoYrRD44n4Fr50XWo3voWbNmqAZqEePHihbtqwfktDHTMhBqzfIMuUIOWi0Lg+WxUa6cjvEpKQR3lqK8TEtVUfGFh80E7ts2TL069dPyZs0aRJatGhhsGxqDwsWLMDGjRtx/vx5ZM6cGUOGDEGdOnX89BEYOdBnQe1DV+zs7PDw4UPfv4tZyeClsOiKQg4aLc+9RZGQuYo45TSCW3Mx93bnR+Y23prJdXR0xOvXr5W8hAkTws3NLVSyv379ii1btqB9+/bKEV2oUCHffgIjh4YNG6J27dq+9SJHjgyOR1eEHEK1FBbXSMhBoyURctAIaDOJsRZyeP78OfjlX7lyZT9IVapUCbVq1VIkoStiVjLTy2QhYoUcNFoIIQeNgDaTGK3JIbRmpfv376NAgQJYvnw5atSooU4rnTx5UhHDkSNHkCtXLiEHM71DliZWyEGjFRFy0AhoM4nRmhzCMs2DBw9ixIgRuHfvniIHnlIaNGgQ6KjWL6I5hAVl628r5KDRGgo5aAS0mcRYEzmYCSIRa2UICDlotGBCDhoBbSYxQg5mAl7EmgwBIQeTQeu3YyEHjYA2kxghBzMBL2JNhoCQg8mgFXLQCFqLECPkYBHLIIMwIgJCDkYEM6iuRHPQCGgziRFyMBPwItZkCAg5mAxay9UcMpeojfsPn/42cxubyPB6Hvb003sOn0b6NCmQMW1KjdA1vxhrIoegsrIaG8lDhw6pCGyefJJiXQgIOWi0XpakOZAcalUug9aNaviZfSREQrrUYf9HXLxGawzo0gJVy5cIEbre3t7gf4y4tbZiTeRgaFZWY6wBYykGDhzoJ7Mr+/WfDjw0sozRR2jkRpQ2Qg4arbSlkUObRjXRp2PTQGc/Z+kGTJm/Em/evlcawJj+nfBnuWKqPn/r0H8cjpy6BK+fXiiaPxf+njgQqVMkRdm6HXD09CXYRo+GOlX/wOSh3ZEkd0U4ndmK9KlTqPbse9Hqbbh2aA12HDiB3iOmo22TWhg++W+c3bUMubNlVHUCk6/RkoVIjLWQQ3BZWa9du4ZmzZqpux14B8P79+8xZ84clC5dWuExduxYrFy5UsVHlCtXDtOmTUO0aNFw+/ZttGnTBozAZtwEg+w2bNiA4cOHI1myZJg8eTKePHmCmzdv4urVq+peiNGjR2PYsGEqxxMLg/PmzZuH2LFj48CBA+jSpQuiRImioraZP+r06dN4+/YtWrVqpVKCM3ng0aNHsWLFCjUuzi1lypRqfClSpMDMmTPB+Xz79k1FhfO3jh07Yu7cuXjw4IGK9WjaNPB/AyF6AcJhZSEHjRbVmsiBZqHWvUdh14oZyJU1I/YePY167Qbg5tH1aoNv0mUoXF69xrr54xA9WjS06jUK375/V/VZHDKXxqrZo5Xm8K+be5DksPfIGTTqNBj1qpfHkB6tkShhPBw6cSFI+RotWYjEWAs5cFJBZWXlZpo3b17s379fEcT27dvRt29ftZny/xksd+bMGcSKFUsl6itTpgy6du2qIqtHjhypIq25kTPaet++fWoTX7RokdIcZs+erQjh3LlzikDWrVun6jJCO2bMmOqOCJqfePEQLw5avHixunyImzg3/8ePH+Pdu3coUqSIela/fn1FFiQfkhP7JJGQuP7++29Fauzrzp07sLe3V5casT+O58SJE2jXrp0KBJQSMAJCDhq9GdZEDlWadkeB3Nkwonc7X3QqN+6GQnmzq98+fPysfo9tb6f+3Lz7CDoPmoB/rx8IMTnsO3oG7PvJhZ1IldznfoHg5Gu0ZCESozU5hDZ9BicVVFZWkgO1BGoMLLzZjZoBE/sxC2ymTJnQv39/9Wz37t2YMmUKOBaSg64N++fXOglEnxy4We/atUuRBgu/2tmuT58+6u8kpAEDBijSIAFw42f5999/VVpxaiWUUbBgQXz69MnX/Pjx40e1+bOsXbsWS5cuVZoH5R0/ftxXMylRooQisnr16imtI2vWrPDw8AjROkekykIOGq22pZGD0+Pn8HfTI7JnSq9MPZmK/4UHj579hkzzelWxbMYI3Lr3EEMmzsPNe87w8voJz2/flebw/t6xUJFD9Ra98P3pOV95wcnXaMlCJEZrcjBVVtbo0aMrzYImIF1hSm6SRs+ePdVXPzd9Ftr8EyVKhPnz5yst4unT3w85+CcHXjG6evVq1Z5f8Q0aNFC30LFcvHhRaR40R9G0xeyuusJxMS04yYHtuLmz/Pr1S2kWJBYWahY0LdERTnKgaYzkxULSIxFVrVpVEU769OkVyUgJGAEhB43eDEsjB5p8WtSr5mf2trbRlNkoS8k6aN/0L/Ro2+g3dOgwTl2wmvI/TBvRCzFsoyu/QbNuwwwmh9lL1mPxmu2KiKg51GnbH5+cT/rKCkq+RssVYjHWQg7BZWVlyu6SJUuqL2qaZ6gB8O4G3h9NzYGbPS8J0i8kBf7ONjxM8P37dzx69EidUvJPDiSXVatWqebUHHLkyOF7L8XevXvVvRL0IXAj16Ukf/XqFRInTuyrOTCDLG+yY6GmQNMRTVMODg6qb5KBkEOIX+HfGgg5hB1Dg3qwNHIIyiFdrXlPOMaPi8XThvnO7dnLf5E8iSNe/vsaKfNX8eNgHjb5b8xavC5AcvD48En5IG4f24CsGdOq/vqOnomDx88HSg5BybfUk0xak0NozUrBZWUl+efPn1+ZdqgNcLOdMGGCcuju2LEDo0aNUk5gmnH++ecfZXJq3ry52uT5Vc5Lh+gI5kZP81GePHkwfvx4cEPnl7w+OVBD4DM6mklA9CFkyZIFgwcPVmakbdu2KaLi3+mboM+BmoM+ObBPag07d+5UWkPdunWVNkA5ojkYtDUFWknIIWz4GdzamsiBDum67fpj86JJKF+yMM5cug5u2HQ458uZBfGylMHssf3QumENbNt3DJPmrcCl63fgfvuI8kPwdFK/Ts3RskE1OMS2R+JcFdC/cwv0bNcIj5+5oHyDTogVM2ag5BCU/OIFcxuMuZYVtSaHsMwtqKysNB/RJl+tWjXlH6D2QOdvsWI+J9X4lc6TSPQr8EjskiVLkDRpUnUKiaeVqEXwd9ZJkyaNIpOpU6dizJgxqi99cqBJSHdaiaREIiCx0IxFcqJvg9pAp06d1P/T6UwC0CcHahccK/0TdGaTbGgW44kmEoyYlUL/pgg5hB67ELW0JnLgxKgJTFuwWp02SpU8MQZ1awX6HFiWrt+BQePnwtPzO2pUKoUpw3qgTJ326ojr04u7MHbmEkyetwIVShXGtqVTsXXvUfQaMR0MsuOx2Mpli2HBys24dXRDgGal4OSHCHiNKlsTOQQFCcmBX99OTk4aIRewGJIFCwmFvg36HD58+KBONUnRBgEhB21whiWRg0ZTjlBiwhM50Jyk7ww2x0LStMWjpvyPWgjjKa5fv26OoURYmUIOGi29kINGQJtJjJCDcYFnLEWHDh2Uk5sns+jfoP9CinYICDlohLWQg0ZAm0lMeCEHM8EnYi0QASEHjRZFyEEjoM0kRsjBTMCLWJMhIORgMmj9dizkoBHQZhJjLeTAiOeoUaPCxsbGD1KMSeBx1eLFi+PChQsqXoF5jTJkyKBOKFWuXNlPfR5b5Ukl+gGYs0hX2P7Lly/qlJDu2DGPnzJ1Bf/UyWe8hS5TK3MhMfUGj7QyqE6KZSAg5KDROgg5BA90j2FT8cL1FTYtnBR8ZQurYW3koL8560PJzZ22fuY54ikhbtxMOcGjoiQVFv7O1BOMbOapImZd1ScH9s3YBDqTWYIiB+ZgYjT0sWPHVMSyFMtBQMhBo7UwFzkEdncDp82MqUFlZmWdOw8e4YWrmzqWauzy5asnVm3eg3ZN/lJdOz95jm/ffiBbJp9gOWsq4ZEciL8uQtrFxUXFDbAwpxID2KhRMJUFNQ59cmCMASOd7969izhx4gRKDoxZoEbCYDeeTmJhdHXr1q1Vcj+SEOMrmCgvRowY1vQ6hIuxCjlotIzmJIeA7m7gtBPEc1BBakEV5lBi7iTGMvgvP3/+UrELoS2HTl7AgLGzcWnfytB2YTHtwiM50ATE7KZMZ0FTk64wDoLZT//44w+VtpsR1Ey3zULNY+LEiSqtBaOneQQ1IM2BwXCMiOYppAoVKvj2vWnTJixYsEAlzqNWwpQdzLekC8KzmAWPAAMRctBokc1JDkGlyrh9/xHyV2qCMzuXIk/2TAqN8vU7qQypyZI4YuzMxcp2nDRRQtw4vA5xMpXC8pkj0XvkdAzu3krlX1q5aQ/GzVqCJ89dkChhfPRq3xjdWjfwRXbCnGWYvXg9PD5+QolCedTdD0z8V6VJd3j9/KnyM13cuwILVm7xY1aat2wjmIeJmkv61Mkxql8HVCtfUvVboHIzNKldGcfOXFYJAH/88FKaEFN/m6NoTQ6hTZ+hs/knTJjQz6VKTM9NExI3d963QBOSp6en+mInOTAKmYVf+/ny5VNxEHwvmAH1ypUrKh23jhxIFjQ78T+ai5gXyb/PIV26dPj8+bPK6tq4cWPfJTt16pRKxkeCIPEwrYYU8yAg5KAR7pZKDpz+iKn/4MCxczi9Y7GKZu48aCLuntiktIqaLXurKz+pOXz1/IaYaYuhYukimDWmL5ImSgCXV29A09WWxZNRuWxRnLt8S6XHOL19CQrkzoote46g44AJ2LFsGjKkTYnuQ6fA6fEznNu1DDMWrsGqzXt9NQd9n8OmXYfRru9Y7FwxXfWzY/8JNOg4EOd3L1cpPApXbaGit/evnYNM6VJh7tINGDrpb7jfOayiarUuWpNDaLOy6siBG7rOTESsuAkzVYW+z4Ff7rzHoXr16moTJ0HwMp5evXr5btqsQw2BZicSCtuTHPgnCWPPnj1Ki/BPDjRN0flMUmJabeZm0hVqD7yQhyTFVB4zZsyQyGitX2hGp3vr4tTNIDwiiTQnOTCfUdSoUX6D+9WNA7CLGQPff/xA3gqN0bVVA/Arf+rwHvjrz7Kqvj450LwUI01RlbZbl0qDpiW3N2+RJFEC3/5zlK2Pzi3qoUOz2vizSTdky5ROfdWzcEM/fOoCGtSoiNlL1gVKDrzjgak2Zo72yfXPUrRaKxQrmEv1RXIokCuryvHE8ujpS6QrUkPdKcELg7Qu1kYOhjikdRgyCytJhYnsmLWV+Y94eY+u8CpQOqeZ00ifHNiG9zUwfxLNUAGdViLZTJ8+XZ1uon9Cv7A+TU/0a5CQpGiLgJCDRnibkxwC8zmkS5Xc9yv7/JVbKFq9FaqVL6HyIelKQORwavtiFCuQS1XhtwUT763duh/vPD6o/lxfvcHEIV2VyYl3M/DPjs3r/IZ0UJoD29E01bllPd92TbsOA53YTAhIcqj9Z1n07dRMPafpKUW+P/H4/A51XanWRWtyCKtZyVByoPmIV3ryNjja/Wnq4V0K+toZTUvMirplyxY/5MA1oO+AifPevHkT6FHWhg0bquOvdExT2+DJKF4vysJkfszUqrsQSOt1jcjyhBw0Wn1zkkNw90UTgnXbD6BtnzFImzKZMvPoNI2AyIH+gfy5sirklqzboZzKu1fOVOYfFmohzepW8SUH/5u8DvLgyKF7m4bo1KKu7wrxelLPb9/UUVeSQ50q5XxPW0U0cgjtaxtQnIF+X/pxDvydph+m4eaFOjyyyq95Oqn1Czd+XrBDwqEWoTMr6erQLMVrOQOLc2CKbZ5W4v0O1DCohdy4cUP5NOjoZjyF7oKh0M5b2oUcASGHkGMWqhaWTA5v339QF/ysnjsGgyfMRfUKpZSzmSU4cmit7o/+gVVzRqv6vEI0Wd7KGN2vgyIHmofSpU6OOf83/9AEtXD1NvTt1BR0OAfmc+BVobx4SN+sVPDPZihbrAAmDO4q5BCqt1AaCQKGIyDkYDhWYappTnKoXaUc2jau+dv4Y9jaKvt88+7D1WmfNfPG4vKNuyhZqy2uHFitHL0NOw7C9x9eWDhliDpVRIe0vuZAZ/aGHQeVg/mHl5fSPhgbUb1iKUwa0g10LLfpM1qZgnJmyYD+Y2bj9oOHyrHM00mjpy9S9zrEsoupNBBdENz2/cfRssdI7Fk1E3lzZsa6bQfQuvcoXD2wBtkzpxNyCNPbKI0FgeAREHIIHiOj1DAnOdx/+PvdvpxUueIF0a9zM9RrPwD3TmxGYsf4aq48rXTjjhNObF2IvUfOoFHnwYoY7p/agjgZS/khB/d3HqjffiDOX72FFEkTYerwnmqD7zl8Gkb17aCOtfJ+h7nLNoC3wumOstIvwNvleA8E+9i5fDo27z7i5ygr74SYuWgd3n/4iCwZ0mDC4C5qzCxiVjLKaymdCAKBIiDkoNHLYS5y0Gh6EV6M1g7pCA+4AGByBIQcTA6xjwAhB42ANpMYIQczAS9iTYaAkIPJoPXbsZCDRkCbSYy1kIPutFLz5s3B47D6hSeFmM6C90OzBJW9lc8Z4MY7opmZldd3lipVCpMmTfJNoMdjzkyfwT6fPHkCRmXXrl1b5WTiPdFSLBsBIQeN1kfIQSOgzSTGmsiBwWaMsGZiPF16CpIG4wkYw8C7mnXkEFg8xK5du9CoUSMVwMbgN7YnEZBw7t27h7hx46q8SJs3b1bkwOC5Fy9eqGA2ks/BgwfNtFIi1lAEhBwMRSqM9YQcwgighTe3JnJgzACT5zH+gH/qtADmUGLqCuZUCo4ccufOreISevfu7WdlFi9ejKpVq6ogOd7zwDsadBlXWfHjx49YvXq1yrxKImK6bldXV5WBtW3btn7Sf1v4kof74Qk5aLTEQg4aAW0mMdZEDtQW+EXPyGZGJbNwk6YGQLOPvlkpIM2B9zozDxNNRalSpQoQ8R07dqB79+54/PhxoCvSpUsXlZSP6b3ZJwmD5OI/jYaZljTCixVy0OgVEHLQCGgzidGaHMKSPoPkwHQV3Njv3LmjMq/yxjcnJyf1//rkEFD21rFjx6q2X79+DTRrKrUQ5k1iau7ACvuheYl+CmoXupvjzLSEItYfAkIOGr0SQg4aAW0mMVqTQ1iyspIc6COgGadgwYKIFy8e6EOgJkFzkD45BJS9lY5qmqZIJoHd3kZnNW+Co58hsMIxMNvrmjVr4Obmhv79+6Nnz55mWkER6x8BIQeN3okHy2IjXbkdsIkS9OU6Gg1HxBgRgZ9eH/HwcHVkbOHjyNWiGIMcjh49qi7moYmIt7fx0h3/5BCYQ5o5j5iQjzmX9AsT5jHNdrJkyZTTm4n3Spcu7VuF/gxmeZ06daqfE0skmjJlyoDmqLx582oBocgIBgEhB41ekafbCiJh+raIGV9efI0g10zMF/creO28EKlq/ndbmqmFh9WsxK/2X79+qRNKJITbt2/DxsbGYHKgOUiXZI+X87DwtNL69etV0jx7e3uVqptXfPK0Eo+50vFMYqDpij4PZmPlkdpKlSopJzhJgeYofQe2qXGU/gNHQMhBo7fj9eWh8PZ4Bscs3TSSKGK0QsDt7ixEipMSCfP5JB+05EJS0JmVOE6eNuI90byrgcVQzYF1Dx06hJEjR6pLedgnN3n6D5Im/S9lOi/toe/h0aNHKs6BJ5yoXfCCoIsXL6Jjx45wd3dX/gY+86+JWDKW4X1sQg4arfC39/fxdFt+pCu3U0xLGmGuhRgfk1I1pKp5CdEdfK5ZlSIIhAcEhBw0XEW3s93w86MLkuQYqKFUEWVKBFxvjoeNfVI4FpllSjHStyCgOQJCDhpDTt+DXbz8SJChtcaSRZyxEXjjtBif317S1Ndg7DlIf4JAYAgIOWj8bnh9ccHLAzURLWZKOGbpIiYmjfE3hjiaktzuzsH3L8+QrMI2RImp/bWkxpiH9CEIBIWAkIOZ3g+amN7fXwqHlH8hlmNxRI+TQYjCTGthiFgSwjcPJ3xyO4X/tXcWYFJW3x8/It0gIR3SiiCChCAsJSgK0iot0kqohIqkCEgpSkmHhCAhggJSgoCipIhId3eK8vs/n7v/uw7DzLIx+87M7rnPw7Ms8743zh3O95763ouHv5bU+ZurKykigtNnglYCCg5+3DqC1Jf3TZfrR76XWxd3y51/rvhxNjp0eBKIFz+FJEpdQJJme1ZSPtJIg8/6dYn1ElBwiPVb7IcFLl4s0rixyLRpIjVq+GECOqRKQCUQXQkoOERXgvr+vRKgInbNGpHy5UVWr1YJqQRUAkEoAQWHINy0gJ7yxYsimTKJQPucOLHIiRMiqVMH9JR1cioBlcC9ElBw0G+FbyUA106fPv/12auXiBv/jm8H1N5UAiqBmJCAgkNMSDWu9onVkCuXCD9tw2qA01+th7j6rdB1B6kEFByCdOMCctrWaihSRGTbNhH7U62HgNwunZRKIDwJKDjo98M3EsBaABw6dRLh4npcS4BCs2YiI0aEfqbWg29krb2oBByQgIKDA0KOc0NYC0Ithji39brg2CMBBYfYs5eBsxIFh8DZC52JSiCKElBwiKLg9LVwJKDgoF8PlUDQS0DBIei3MAAXoOAQgJuiU1IJRE4CCg6Rk5c+HREJKDhEREr6jEogoCWg4BDQ2xOkk1NwCNKN02mrBP6TgIKDfht8LwEFB9/LVHtUCTgsAQUHhwUeJ4ZTcIgT26yLjN0SUHCI3fvrn9UpOPhH7jqqSsCHElBw8KEwtav/l4CCg34VVAJBLwEFh6DfwgBcgIJDAG6KTkklEDkJKDhETl76dEQkoOAQESn55Zlnn31WfvjhBzP2v//+Kw8++GDYPFasWCEtW7aUvXv3OjK31KlTy86dOyVr1qyOjOdpENZ68uRJKVu2rN/mEKgDKzgE6s4E87wUHIJi91DKc+fOlVKlSpn5/vPPP3Lx4kVJly6dI/P3BA7ugBXTExk+fLjcunVLunfvHtNDBV3/Cg5+3LJbV4/L5eM/yrWzW+XWteNy55+bfpyNDh2eBOLFTyyJkmWWZOmKSsrM5SRR8sxBLzB3cNi8ebM0bNjQWA4vvfSSFC31LobtAAAgAElEQVRaVH755Rf5/fffpXnz5pIkSRJZsmSJnDhxQmbPni1FihSRK1euSPv27WXjxo2SIEECeeONN6RNmzYeZfP9999Lhw4djLXy6quvyrBhw2THjh1y+vRpadGihTz22GNy7NgxWbVqlXz11VfSp08fuX37tjz88MMyduxYKVCggLzwwgtSqFAh+fXXX+XUqVPy5JNPyhdffGHGpi/GPnPmjCROnFg+/PBD8zxza9asmezevdvMy/4+atQoqVevnnmX9X300UdBv6e+XICCgy+lGYm+Tv0xSS4eXSWpMxaVFGlzS6Jk6eXB+Ikj0YM+6qQE/v3npty6dkaunN8vF09tldRZQyRjweZOTsHnY4UHDnXr1pUbN27I4sWL5a+//pJHH33UKGGUbN++feX48eMyZswY6dy5s5w9e1amTp0q58+fl+LFi8uCBQsMcLg2LILs2bPLxIkTBdfW6NGjDVAcOnRILly4IKVLl5YJEyZIgwYN5PDhw+Z9gClPnjwybtw489mmTZukVq1a5nkA5M6dO+a9Tp06ycsvv2zApWfPnubvAFqZMmVk3759Buw8gQNgwRyQg1oO9369FBx8/l8u/A5v37wgx34bLImSpJIMOcspIDgsf18MB1CcPvij3LpxSbIU6yoJEqfxRbeR6mPy5MnStWtX887gwYON8otsux84hISEGKsAxR4/fnxzUs+QIYPMmDFD5syZIwsXLpTcuXPLzJkzpWTJkmb4d955R5InTy69oGt3aShi3Fe4rWg3b940lsiRI0fMvz311FNy9epViRcvngGC+fPnG2ByfZbnmjZtKuXLlzegRAMMAAt+x9K5fPmyPPDAA+YzgAOlnzFjRgWHyH45RETBIQpCi84rB3/qIclTZZF02UtHpxt9NwAkcPbwBrl66ZjkLOO8OwIljfuElj59euOaiWy7HzjUqFEjDHRQuLiQUPyzZs2S6dOnG+XN78QOAA8a/ntO/40bNzYWAq1SpUrSsWNHeeWVV+TgwYNh00yWLJn8+eefBhx4FpcSDfcOYDJlypS7nt22bZu8/fbbxnqwYPjxxx/L1q1bjQWASwxLxLbnn3/euMewKNRyiOy3Q8Eh8hKLxhu4ku7cPCWZ8lSJRi/6aiBJ4MTe5RIvcUbHXUyBAg64fQhqc2p3bcQKzp07Z/4pUaJEJiOIk7y1HLASUqRIEWY5VKtWTY4ePWqex3KYN2+eiW/Qrl+/LgAJVgGgU65cOXnrrbfMZ++++66xOAAf5nDp0iVjfdCwZt5//30Ts2jUqJEBItry5ctNbETdSuH/T1LLwSFNQ/D54Ibukqd4a3UlOSRzJ4bBxbR381jJWXqgo0FqJ9xKEbEcunTpYpQ3MQSynbp162aCzQSK3cEiS5YsJjYBEAwZMsS4fLAkAAxXcAAkOO0TIAd8Ro4caQLUa9euNVYD7q2VK1ca91GJEiWMC6tOnTrmnffee89YKFgTWCzEG4id5M+f3wARVk7r1q1lzZo1BhxwRwFeAwcOdOLrElRjKDg4tF1n9syWOzePScZc5R0aUYdxSgKnDqyReImzSPp8DZwa0ifj+MKthKsJl8769esNOAAopIeSAeTeiFEQPP7f//5n6ikAFN7j5O8KDryHNdK7d2/5+++/TSCboDTxDcABRb969WqTNWWD27i1bLYSAXLiGbicqlQJtdJxR33zzTeSM2dOwd0E4BBox4oAWMhqIpai7T8JKDg49G0g1pAhe0lJmiqbQyPqME5J4PqlI3L68Ca/xB6cWmOgjAM4kH6KdaItZiWg4BCz8g3r/c/lTSVP8VbqUnJI3k4OE+paGif5q/wXQHVy/Lg0FuBAmi0xBG0xKwEFh5iVb1jvfyxtIAXLhqYexrbWvddI2blrnyz+aoTPljZ4xFTpP3i81K1VWV587hlp2aGfnD0YSvsQiO2PdYOlYPXZgTi1WDUnBQfntlPBwSFZ+wMczp67KP0HT5BFS9bIsRNnJHWqFFL6qcLSvUszKVWicLRWPuqLr6Rl01qSMGECiQlwSJWlvHzUu4M0b/SifP/DBgWHaO2WvqwSiLwEFBwiL7MoveE0OJw5e0FKhjQ1+ecfdG8phQvlkStXr8u0WUtk0vRF8tXUQVLz+agFx6/fuCmps1aQ84dXSvJkSX0ODv/+e0fip3lKNvwwyYDYgsWrFRyi9K3Tl1QCUZeAgkPUZRepN50Gh9YdB8ji736U3b/OkxTJk9411w5vD5Y5Xy+Xo7uXmJN/ifJNpFHD6rL6x19lx+975fbtf+Tj/h2lfu176zFu3fpb0mQPkRs3bkmypElk2EedZf/BY/LnX4ekSOF8MnLMLDNWq+a1zcmfdvPm39Ll3WFmzDt3/iclihWSkUO6Sr482e+R4bXrNyRj7qrCzyRJEkn9l6pIrRoVDDjs3bZAMuV9VhbNHi5VQkIrcmkVa7QxfQ7q+2ak9sSXDweLW4mMIjKJXNlYkQO8RTCkxoU2YsQIs9bx48f7dbmkI0elst2pSSs4OCRpp8EhXc5K0rVTU+naqck9Kzx56pxRsku//lSqVS4jpSo2E/7t+wWfSf68OeTzcXOkZ/8xcu7QD2FUBK6dbN2+R54o+4pcObE2zHKYOG2RGatty7qyZt1vUqNeJ9myboYBjK49P5VNm3fKzIkfSto0qaT/x+Nl1txlBrjix/+PMtqO8c8//0qCtCU9Wg4NmvWQRAkTyNRxfc3juM4ezlNVtqz7Ugo/mseh3bx3mGADB3L+w6PKhrfIFpP5TagxNLAncHB6vaTzZsqUyRQHBmpTcHBoZ5wEh0uXrxq3zzdzhkuNauU8rvChHBWl73ttpH2r+gYc7Gmeh7EEHnm8ppzcu0wyZkh7z/uewAHXD8retqwFnpOhH3Yy1kfKzOXNXCqUCy2Mwm1ETIEAtv0310HCA4ely3+S+k26y6n9yyRpksQyfsoCGTl2tmz7aaZDO+l5mNgCDilTppQePXoYviaK0aC0aNWqlVFiUGV8+umnhtCOBs0FtQpp0qQxnEd8RlEbBWX8hJiP5vr7nj17PPYHNQZ9UCfx448/mhoGahGoY0CRfvDBB4ayA54nqqFhcaWo7sCBA4Y7iUbVNEqeOgvXBo8Tz1NTkS1bNilWrJgp3MNycF/v/v37PTK7wgLbpEkTUzdBcR6Fe5999plUqFDBjMn8KNSjUZgH4yt9U7+BdQC1B83+DqstdR8wzC5dutTMK9CagoNDO+IkOOCSSf5wOVkwc6jXuEKabCHy4QftpN3r9Qw41KlZUd7pGGplHD12WrIVfE4O7Fwka9dvkRbt+oRJ6dKxNfLX3iP3WA7bd/4lS+Z9GvZcniK15P2ur8mzlUpL5nzVPEp50uheEvJMcQNEtuEyqlqxlFfLAWBhbkM+7CSv1Ksm1Wu/KZUqlJC332zs0E7GbnB46KGHjCIFHKhARpG2bdtWXn/9dcOSSrYQChm206efflp27dpllDN1Bz///LOpSA4PHLz1B2hAfwFfEwoU/ibAAYUOlQYFbTCxUu38+OOPmyK5AQMGSOXKleXNN0PdiRTJffnll2H3U9idAsD4d96/du2aofEA4AAH1/UCQt6YXQFK5g7tOACBYodkkHkzV+QFqCVNmtSk2WKVDRo0yCs4MG+eAbgCtSk4OLQzToIDS8Jt1P71+kZBuzfrVlq+cJRUDnnKgEPdWpXCFKwrOJDhxO+2FSqQS7bv3HsPOLinslpwqF7l6TC3T9HH890zF+IbxCtsy5E9kyRJnMgrOPAcbqpdu/fL9PH9zDr3bVsomTOld2gnAwMcokqfYWMOkPW5uo1QeNOmTTMX/aCgYVCFOpuTLZxG9llOxVBf/PHHH+bEi5Kkfffdd6ZSOjxwgAfJW38oacAGbiTa9u3bTdUyRHrctQAgWCZW5gPXEif1Tz75RDZs2GDoMmrWrGksFsvKaneKUzs8S/Z9LCNICwEH1/UCeN6YXXPkyGGsBMsNhRwTJkxoCA/pF4pxqrBpAAjUIFu2bFFw8Ov/yiAZ3Glw6NRtqMyc+738tXW+pEyR7C4p8dlXC1bIod8XG59/eOCQM/u9l9p4cit5A4dmr74gKTM/I58P6y6NGz4XNo+Dh4+Lp755IDy3Ep8DDMXKNTJB84XfrpEVi0b5/VvgtFspqsR7Fhx+++034/O2jctx4B1CWaJs8+bNa9wngIRrbIKTNydxTsz8AaRoWBXcoxAeOECZ4a0/AuKuFBoEjO3v0F1Q+AZIuDZcQ5DqAQwwuGJVcIKHFdZehYqiBgzgW3JlcoWEz4KDXS8/vTG7ck8FVpM7q6wrI6ydH7KADRZrw5tbSS0Hv/+XDZwJOA0OxB2eqtBEEidOJL17tJLHCj0i5y9clhmzl8q4yfNl4ayhxuVDiyw4cNIv8GQd+WXNVMmfN6d8+PGEe4rgrOUAOHDSX/jtapNllDtnFvli8nx5v99oObRr8T2ZVBEBB55hbXv2HpYRg94SxvB3CzZw8BaQBhy4KQ3CO0uAZ0/LrjIGILAWrOUAbxEnaMABFxA/ub2NBhkeDK0wpOK28dSfKxjwjuvvKN2CBQuG3V/BvHDfpE2b1rhw6BO30aRJkwzhH/c7QB1OwyLhGXspEP/GCZ85WHCw67WWgydmV2ICzzzzjLFssEzoH0BlXcRAChcuHDY/LCrWSpwCYGN8gJOGdQQjrIKDv//HBtD4ToMDSwcgKIKb/80qOXr8tKRKmVzKli4q777dXJ4sWjBMOpEFBwJw1V56Q9Zv3Ca9320l585fChccSHsllfWr+StMCiwZTMMHdpGSxR/zuEP3sxx46bNxc6Rrz0/k1L7lHgHG6a13Ghyi61aKCDggQ5QtyhTlhisGRQgJHoqUS3e4cY2YQ/369Y0rCFBAUROQXbdunTnNo5ixGAhQe+uPE7k3ywH3Uf/+/Y1Pn4A0p3iC0/T77bffSrt27Ux6LmN7amQnff3118aaQLnzHnN3Bwcbc/DE7Ip7i3GJL2DFMD6xFUCMi48IzhMfATCwXAAzbswjmI+Vx/yZH+4nKMmrV69ugIv54CILxKYxB4d2xR/g4NDS/DJMlx7DjCU0eUxvv4zvPqjT4BDVRVu3UkTBAdcRFNc8T9wBim57RzQZOlwdmipVKuPy4e8oQNw9+P9hR4VRlTgDf+dzb/2FZzlwGOEkTkwEBQ5AEQwOtTL/Me4xlDB3RntquMLINPrpp5/MfCpWrGiyoQBYV0uJd70xu+I+AgCJgxCTwXpAyWMJuGYrMT8sDGIhKH3AE2Al0ws5YNXUrl3bBPCrVq0quPe4t4Kb8AKtKTg4tCMKDr4RNJlYq9ZulobN3pWfV0+RQgVy+6bjaPYSLOAQzWV6fR3lyYna2+k9psalX07pZC9xz3VMNdYHGyw033GlKTg4tNMKDr4RdLlnW8offx6QoQM6S9NXavimUx/0ouDgH3Dg9I9FYQPQPthKj134E/xiak3361fB4X4S8tHnCg4+EmSAdqPg4Dw4lC1bVs6fPy+LFi0yAfSYbAoOMSndON63gkPs/gLEdXCI3bsbN1enloND+67gEDlBQ9aXJEMZky5b/IlCkXvZD08rOPhB6DpkjEpAwSFGxftf54EKDtB3fzZ2juzdf8RwHuXOlUXavlbXEOj5syk4xJz0yQyChwg+I1JDuVcZKgp733LMjSymYM1b4JpMn2HDhplUWVJbqeLmfmfmRuYPXEbUJJBGqi3mJaDgEPMyNiMEIjjMnrdMXn+jv3wx8n0pXzaUFG/JsvXS/q2BMv6znvJq/eoOSefeYRQcYk70+fLlM+AAXxIpmWT6kIoKXQaFZTHZwgOHrl27Gh4lwAG6C4rdSJ29ffu2LF++XMEhJjfGQ98KDg4JPBDBodWbH8r5C5dk7rTBd0kBSgrYWLloh7sfLl++KkmSJJYVqzbJ37dvyxutG95FBU5B2pBPphn67Hx5s0v/nu3kuapPmz7vd5cDVc5tOg2QnzZtl4zp08o7nZpIh1b1zXu4lWZNGiBDR06XHbv2SqH8uQ1V96MFAyN91VVoweJWQtFSqIXidaXPIEUT4jqUtzf2UdZLLQHZQYBKpUqVzEkfjiFvbKu8Y9lbAR7SQalkdk95haOIKmQKySg2s+3KlSsyY8YMee2110zFNZ9j7VBgB3XG/PnzBd4jKCsgCKSOgPVhZYSEhBjryBvbK6R39EufjzzyiJQrV85YLGRAMW779u2NpUKBHVXNtr7DIZXh92EUHBzagkAEhxGjvpR+g8bL1zOGSPmyxTxKAh6msZPmycyJA8ylO7CvFi/f2FBwQ7+BpfFa+76GfrvIY/lk6fL1Ur9pd9mxcbbkyZ0t3LscHnwwnhR9+hUp/3Qx6dWjlUlRpfL6q2mDJKRccQMOzGvMiHcl08PppE6jrqYaev6XQxzatYgPEyzgwIrgCIKKu1OnTqYgzBUkAAdv7KNQZUCeRzEZRV24h1DAsKKGx94KAypEfVRSN27c2Chcd3Ag44jiNhhfvTUUfq9evQwwUNsAX1KuXLkMYFF5zfsA28yZM6VPnz6ye/duU8Hsje2Vim2quVeuXGkI9KicxrVG5TRUIBTuTZ061WREAVgLFiwwFc5xpSk4OLTTgQgO0FS8/d4IGT1hrrEUyj/9pKHQrv1iiLlvmgY4cIfzH5vnhkmq8ovtpGC+nOY2t+frdpQSTz5q+Jtsg0a7ZInHpFf318O9yyFp0sRSpnJzOX94VRg54HcrfpLMD6eXfHlyGHAAlBrWrWq65u4GrAjXuTi0ffcdxmlwiCp9BgvBeuAUDi3Fpk2bzC1wVCCj7AEHb+yjuH3y588v3bp1M/KAugKGVkjvIsreChEeJ3J3cEAJQ7kBcIQHDsuWLTOpqzTuj4C/yJLuYcFwwx3Vz1gTf//9twEHb2yvVC4DCJbyG8uAimvAASsKkMG9RaMiG0AEnOJKU3BwaKcDERzs0uFg4orQH3/aIouWrJXTZ87L1zM+lorlSxhw2HfgqLEUbGvWprdcvnLNPJO/WG1DgOfeKFDjmtDw7nJIlCih6f/UvmX3vG/dSptWTZGnngytfJ0+e4m833e0HPz9G4d2LeLDOA0OUWVldV8R3EfwDkGRQSFZokSJvLKPcppGeaMkabh3sAY4gUeFvdV1LlBIQIGBuys8cHANSLsGqOE8+vzzzw3woeABOX6GR8sBfQWWBsR4NALfXPYDOLBGWGq5g50G0R6cSfA0xZWm4ODQTgcyOLiKgIyRxq9/IDv/2Cdb139plPeevYfuusiHz1EMX078UAoWryutW9SWTu1euUeSp06fD/cuByjF33znYzlzYIVXcHBNZVVw+E9MUQUHOJJQmBC/uTZI76CZ5qTsjX0UywEGVNxRru1+7K1YC7hkaPAS8b675QBLKmvCMsBysY24AM8PHTrUxCo8gQOX6lAEB08R7ibmQwbW/cCB+AdjYcnQiFkALoAD/RGoxyUVV5uCg0M7H2jgQNoqgWDuWHjm6bvjDQSYiUVwogcciCvs2fJ1mKSeqfa6CVYP7vemvFC/s2RIn0YmfP5B2OeHj56UrJkzGKK28O5y4F7p0pWam3HSp0tj3p+74Afj0oI91r3OQcHhvy9rVN1K3GPAhT24YiDHI7AM2ynAgO+dw4E39lHcOTCNcqNaihQpTFYRrhzuSQiPvZW4BAR0KH9O35zqPXEwQeSHYqZfWFNxDwEMSZIkMVlM7qms9ncu1mEMQIGTPvc34O6CcA/2WG9sr1CLA0aAF2NBfgdoMgeypCAQhJockMGVBlke64wrTcHBoZ0ONHBg2U1b95KVa3+RYQO6SIknCxlF8fPm36VjtyHmetHRw3sYcJg4baEM7tdRmjd6Qdas/02eq/OmrFs2wQAEwFGvcTeZN2OwVAkpJT9t2mYAgwA1Cj68uxySJ0siRcq8LEUL5zP9U2tRo14nmTlpQFhAWi0H339BSQvt3bu3Cdiy5/jXCTQTqA6PfZSZ4HoBWDhhkxI7ceJEyZw5s1e2Vd5xZW/Fr49rxvXSHNcV4hoi9oB7hzoHAtj4+QEhb+BA3QMZSWvWrDGpuAAD68O6JbbiDRygyyaoDRMrFhHBZsseS7YSN9uRyQQ4cLc1d1OTuRRXmoKDQzsdiODAFZ2Dhk8xN8YdOnxC4sV7QHLlzGKsiY5tX5YECeIbcDh6/JRkyZRBps78Vggid+nwqrz1RqiflvbpmFkybOQMOXn6nOTI9rC8+3aLMFK8+93lsHvPQWPBbPxlp7FAuMf6jdYNwlJZFRwc+oL+/zBxjX0Uum17BSqAgrXAbXLaRBQcHPoWBCI4RGTpFhzcayEi8m5cesbpgHRMyTYuEczhJuMiI9YMSJBySwosLjZtCg6OfQcUHBwTtV8GUnDwi9ijNShuJ4rbuOKUFFgC1FgNuNq0KTg49h1QcHBM1H4ZKLaAg1+Ep4MGpATUreTQtgQrODgknqAfRsEh6LdQF+AmAQUHh74SCg4OCdpPwwQLONg7pKkK5p5j26hDID3W1iN4EyPFYWQ5Zc2a1U+S1mGdkoCCg0OSjuvgcPHSFUmTLcRwLj1W6BGfSj0QgubBBA4pU6Y01b8rVqwwtBe0iIIDHETp0qULy/Dx6UZqZwElAQUHh7bDF+CwdPlP8vEnU+XXLX8IvEiP5M4qzRu9KB3bNvTJf9brN27K9FlLpFXz2j6XCvNdt2GrqadIljRJtPp3nyf1Ebdu3fYrW2swgQMFbNQaQJtBAZg7OJC5Q20C3Es0iuaoPQBUrOVAQRuMppDwEdiFv4jiMQrW4FyiEA1uI2ooJkyYcBe5X7Q2X192TAIKDg6JOrrgMGbCPOnw9iBDU1HvpcqSJHEiQ3P9fr9Rhh4bKuvothWrfpbuvUbK5rXTottVjL4fiPMMJnBAgcMVBJMqFc8vvvjiXZYDPEVk7VA5nTRpUsM9hBsJmgoLDtBYUGBGhTFV1VBrkAIK9TUUFrxLYRm0FxSSAUTagksCCg4O7Vd0wIF7ErIXel66dWpqqK1d28ZfdshHQyfJtC/6GWZTqCs6vDXYVCpDbAet9vCBXSRVyuTy7ffrDJdR3/fbyOARUw3BHldwzpjQ31RGP1+vo7FIAB6Kz/LnzSE9en8m02cvNfc+5M+bU4YN6GwI+cyJsnwTw5i6fOUm2f77X/JQ2lQya9JHAhX4yjW/mJvluDSocshT4upWypUzsyR/uJwsnDXMzP34iTPy0EOpZOrYvmEup2mzlsiAIRPl4OHjkjH9Q9LljVflzTYNBWBwn+fYiV+bQj1bizHqi69k5NjZcvTYacnzSFbp+14beaH6M2FzbtSwuiEa3PH7XqEQ8OP+HaV+7SrR+iY4DQ5Rpc8g5sB9B/xcvXq1ufAHaguI72zMgapkqoWpAaBhXUBRsWXLljBwoMK5YcOGBiC414E+abCrwma6dOlS8/vVq1clTZo0AkcS6aLagkcCCg4O7VV0wIHKZG5su3BklSRNEvqf0FPjBAcdRYlihWT4wLeE6uQGzbobrqIFM4cKdNgvvfK2tGlRV4Z91FmuXb8hhUs2lI7tGhqLBKU+fdbSMMsBiuz3+o6S1UvGSs7smeWzcbNl4LDJcuKv7yVhwgRSqmIzgdF15eIxkiF9Wgl5vrXs2r1fvpo6yFB/9xowVhYtWSNb1n15FzhwzwO8SVVCSsrc6YMNqNVr0s0oauYJy2uBJ+sY1tfqVcqY6ukqNdvJ+uUTzdrc5+kac4CbiUuMYJHlWVhmGzbvIbC7Plm0oJnzyVPn5PsFnxnw+3zcHOnZf4ycO/RDtPLbnQaHqBLvuYID3yGu4YRTCCpuCw7PPvusUfzcDkfjIh2sAriLXAPSENNBdwFo1K9f37iqcD9RaUxcwjZoKrjPgct5tAWPBBQcHNqr6IBD74/Gyay538vuX+eFO1tLZAfLKad42rKVG4X7FS4eXS3rN241fz978Iewz5u0+kCSJUtieJTclS602VevXZd0D6U2fZ07f0nS5axk7lMokC+nUbTlyjxhTt40XFJwLW3fMMv8zikfMLpyYq1HcOCkX6dmRfPsxGmLTDyFvrE4sGq44Me2wqUaSPvX60ub1+qECw6sL1+e7PLJ4NBTL61M5RbydKkiZp7MGdDgLgra/oPH5JHHa8rJvcvMnRZRbcEKDlyuAxMrF+YQKyAwjeVQuHBh4yqiYQVw3wN3J3jKVoJRFUI9QAXQAjTul/UUVTnre85JQMHBIVlHBxz6D54gU75cLH9tnR/ubD1RYHMXQ54itYzCPnbitNR5tatcO7UurJ+WHfoZV9LkMb3vUbrnL1yW9/p+blww16/fNO/gtsISKPp4PqNoG9SpKp3bh9J1A2IbNm03p3IaAegKz7WSfy787BEcfl491ShqmivjKhYQbi/Wc+HCZXOiP3HqrAzq+4ZHC8fVcuB+CdxP7VvVD1sjFOMEsedNH2zmDCDB4UTD9ZSt4HNyYOciYx1FtTkNDr5wK9m1Qro3Z84cEyNAqfN3rvYkVoC7CMVPHIH4hAUHYgjckGYvv2nZsqV5xgIL7+bNm9dYHVwrysU82oJLAgoODu1XdMBh1txl0rhVT3PiJ3bg3nDHQJKHMu3YdYic3r887BEyefIWfcmkkOKXr9uom1w9+WOEwAGr4q99R4x7h1M8F/ykylL+LnAg5mDvcgAcNv68Q76bPzJC4OCNVA8rAivk27mfhIFHsXKvSpOXn48QOEAa2O71emFrbNSyp9y8dcvEJACHurUqydtvNg5qcIjq19bdrUQ/xAVwK5GVBDi4ZisB1NzvQE1EsmTJwsAB0MDttH37dpMpx7swtAIeNlsJEjsyo0aOHClly5aN6pT1PT9JQMHBIcFHBxzw62fJV924VIZ8ePdFK9t27JGqtTrIr2unm9N1yZCm5vIc61bCzfNig87GrcRJPjLggLulx1vNpWXTWkZKBJkrvdA2xsGBO6lJTYbHtZkAAB4jSURBVJ0+vp8ZF1Bi/f16trkvOHBtKTENV7fSUxWamCD6wD5vxHlwcOjrrsPEAgkoODi0idEBB6Y4ecY38lr7fubWNSi1qRVA2ffsP1perV9dPv34HbMSTthkIBFwvnjxqgn05sieSWZNGmAC0uGBA1k//QaPNzfAJU+WVKrV7iDZsz5sXE5Qa7/z/ieyfNUmEzQmfZZTeExYDlggc75eLhtXTjZBaoLxBLpffK68uWDIfZ5YGTZbaeG3a6R52z6yZN4nUqxIAZk1b5kANrjCKL6L65aDQ193HSYWSEDBwaFNjC44MM0fVv9sfPGbt+wyJ2uCwrhPWjR+8S43UtvOH8n6jduM9UAKJwoVZX8/cCCeEPJcaxN4JtsnceJEwn3Rh4+clGJFC8jEUR8I8Y/536wyLp+33h0eI+DA+A2a9ZBNv+yUbFkzytABneXosVPSufswk4aLW8h1nvMWrrwrlZXA9iejZ8nFi1ekYP5cMrBPB6lU4SkjIwUHh77wOkzQS0DBwaEt/HN5U8lTvJU8GN97KqpDU9FhfCyBf/+5KXs3j5P8Vab4uGftTiXgPwkoODgk+4M/9ZAM2UtK0lTZHBpRh3FKAtcvHZHThzdJzjIfOTWkjqMSiHEJKDjEuIhDBzizZ7bcuXlMMuYq79CIOoxTEjh1YI3ES5xF0udr4NSQUR7HsrImSpQorA84k6pXr27uaCa7KCabEzfNkebbrFkzswxlkY36bio4RF12kXrz1tXjcnBDd8lTvLW6liIlucB+ONSlNFZylh4oiZJHvU7CqVVacDhy5EgY7fbJkycNfxKFb8OHD4+xqUDQt2PHDqlbt67s3bs3SuO43vnsqQNSbzNlyiSsiaYsslESs3lJwSHqsov0m6f+mCR3bp6STHmix+MT6YH1hRiTwIm9yyVe4oySsWAo1USgN0/gwJxhVIVwDxpvGhXTFK9RgAh30rBhw+S9994zhH22oO3cuXOSPXt2OX78uJw6dUpatWpllDKndZ7hTubffvtNWrRoYQrsjh07ZsCH6zgh+4P1FQtm3LhxEhIS4nXchAkTGkZYmF4hBITGY9euXdK2bVu5cOGCKdTD6qEPaD4WLlxoqMip7KYwz94/wXjcEX379m1D5QEvVIECBUwNB/Ua8D/t27fP8E7NmzdPcuTIEejbGaPzU3CIUfHe2zmxh+Spski67KUdHlmH87UEzh7eIFcvHQuqWIM3y4EqaIrd+vXrZ5QrVdPQcaPoOemjeKHZ4DlI92iTJk2S+fPny6JFiwzDK8oaIj+qomvVqiVQc/z5559SunRpQ9vNu7iVKJgDECiimzJligGiPXv2eB2Xi4keeughAzL2jucnn3xSOnbsKE2aNDFEfyh9QODs2bPGIkLR06xbCYsDMkHmlidPHjM+c9q0aZMBFvigdu7caUCjTZs2ZjzmFZebgoPDu3/75gU59ttgSZQklWTIWU5dTA7L3xfD4Uo6ffBHuXXjkmQp1lUSJE7ji24j1Ud06DMSJEhgTuJYBbh6UJwodpQhJ3mUMBXT3bp1M3Oi4nnIkCGyatUqc5oGDFC0nP4h3ANUOKlfvnw57F4RAIB3ULIQ+1GFTSU14EC1NGR8sLRiiUAhjvsHLidv40Lkt3jxYilVqpSZ040bNwSLgj5OnDhh5sX9Ed7AAWZZgIw+aIAH48ILhYW0fPlyA040rB54pACuuNwUHPy0+7iYLh5dJakzFpUUaXNLomTpFSj8tBcRGRZAuHXtjFw5v18untoqqbOG+NWVFB1WVsDBxhxwB+Fa2bBhg/lJq1GjhnBfA6duGgCSMWNG2bx5s3Tu3FnSpk0rXbp0kWzZssmhQ4eMdYDSdr069Nq1azJ69GjTJ4R8uJRogEPNmjXNe7ZBy8G/07e3cQEH5ghfEw0XGIywuIiwhnifn97AAQDAsnBV+Iy7bds2+e6778y406dPN31jSbj+HpHvR2x8RsHBj7tKkPry8R/l2tmtcuvacbnzT6gpHOztwWu3Q5VKsgTBvpSw+ceLn1gSJcssydIVlZSZy/k9+OwrcGCB3PpGbMCeqiHRI0bQqdPdVC08C6Ee/879DihTTuPEAHieU7h7w1VTrVo184wFh/Lly5tnsVw47RMzQKljOXgbF3BAYeMSAmj4yZyJKdB3zpw5wwUHLAfiCNxbQYP3CXDA2gEwFBzu/a+q4BBr1FcALaR379DJ2J8BNLXYMpXoupVcs5VQkNzghu++cuXKxm0EAytuJFJb8c/jwiE9lGwgXDhPPPGEufPh5ZdfNiIlBsDlQPx+5swZEw/gPeIT7uDAswSHa9euLTNmzDAMsIBIeOO6ggPPEgMBFOLHj28C1biwsFYAG1xZuK1ciQKZI8CD9QOwQAbIHNauXXuPpaCWQ+j/EgWH2KItAmUdnB5z5QqdzYEDIqlD74LQFhgS8JatxEU9BJi5uIfYwIABA8yJGrdNvnz5DONq5syhqboEiMluIk5gXU8ElFu3bm3cVbyP24nArrvlgC+/adOmxtWEpYL1QGCYO6hp3sZ1BQeeo481a9YYFxfAQEAZ9xeWTdWqVY1VgZVQsWLFsGwl7pngOQCELCvAizuu3cFAwUHBITD+t8a2WWAt9OkTuqpevdR6iG37q+uJMxJQyyHObLUDC7VWg/U9YzWo9eCA4HUIlYDvJaDg4HuZxt0eXa0GKwW1HuLu90FXHtQSUHAI6u0LoMlbqyFVKhGbpkiF6aVLaj0E0DbpVFQCEZWAgkNEJaXPhS+ByZNDP4fw7IEHQv/+v/+JuP67ylAloBIIGgkoOATNVgXRRF3BIYimrVNVCagE/pOAgoN+G3wvAQUH38tUe1QJOCwBBQeHBR4nhlNwiBPbrIuM3RJQcIjd++uf1Sk4+EfuOqpKwIcSUHDwoTC1q/+XgIKDfhVUAkEvAQWHoN/CAFyAgkMAbopOSSUQOQkoOEROXvp0RCSg4BARKekzKoGAloCCQ0BvT5BOTsEhSDdOp60S+E8CCg76bfC9BBQcfC9T7VEl4LAEFBwcFnhMDsedv1zzCF++X5uCg1/Fr4OrBHwhAQUHX0gxQPoYO3asuSD9+eefN5eg+K0pOPhN9DqwSsBXElBw8JUk/dwPN3RxfePjjz9ubsDili6/NQUHv4leB1YJ+EoCCg6+kqSf++EWLMCBKxJz5cplbsryW3O97Mdvk9CBVQIqgehIQMEhOtILkHexGriysVu3bubidu7I5epEbSoBlYBKIKoSUHCIquQC6D3AYd++ffLuu+/KzZs3JVu2bPL5558H0Ax1KioBlUCwSUDBIdh2zGW+gIJtu3btkvfff1+4QD5r1qwyevToIF6ZTl0loBLwtwQUHPy9AxEY/86dO0bpJ0yY8K6nL1++bILP8eLFE8ABy+HcuXOSO3dumTJlijxgA8MRGCM2PwKIqixi8w7r2mJCAgoOMSFVH/d5/vx5QcERbHZtGzZskCeeeMLUNuzYsUN69Oghq1evlrJly8qSJUsMaMR1pYjczp49K+nSpYvzsvDx11K7i+USUHAIgg3+7rvvJHv27FKoUKG7Zrt48WJJnTq1lClTRvr06SM///yzLF++XPLnzy+DBw+WSpUqGeCwABHXTtCsd//+/ZI0aVJT/xHXgTIIvuo6xQCSgIJDAG2Gt6lgBaRIkULKlSt31yNffvmlcSO1b99eevfuLRMnTpTjx4+bArgPPvjAuKFat25t3kUxHjlyRLJkyRJnLArccVu2bJFMmTKZPwoOQfBl1ykGjAQUHAJmK7xPZNiwYVK4cGGpUqVK2EMovkGDBhl3U/fu3aVXr14ybtw4k9L64IMPSo0aNcxPAKVt27aSIEECsW6oS5cuGWXp2mxw25sCJebBM/Tjj8Z6b9++bYCPdUWk8c7WrVvNWt0tBz6jsV4FjYhIU5+JaxJQcAigHffm9sFlVLp0aalatWrYbCl6I8aQPHlyk6XUs2dPGT9+/F3ggELnz6hRoyRnzpyyfv16efTRR+XPP/80/bm23bt3m/oIb7Qb169fN+BAANzpxrhXr16VhQsXyjPPPGNSdVHo93OTIaNNmzZJjhw5JHPmzHe5144ePSoZM2aUM2fOGGtKm0pAJXC3BBQcAuQbgaI7efKkOeESQCX4TECZhssIZf7ss8/eBQ4dO3aU9OnTG2B4++23Zfr06UbZ8R4Ww4kTJ0ysYsCAAYaM74cffpCiRYsaC+KFF16468T866+/GuBInDjxPRJhbnv27DFWAyBj52UfvHXrllHUnt6NiHjvp+QBuJkzZ8qkSZNMim7evHkN6J06dcqk7YZn7UyYMMHEYAAVO2+shoEDB0qbNm0EkIByRJtKQCWg4BCQ3wEU1ubNm40S//bbb6V48eImVsAfLAOUcsuWLe8Chw4dOpiTPMHnV155RebNm2eUJkqQLCZiDLijrOVBsBpl+scff0j16tXDrADGJriNdeCJkwnlPW3aNDl8+LCpwnZ3LZFSSx8ExyPb6PvYsWPm9O5NyV+4cMFYPwTmS5YsKeXLlzdgyb+99957Xt1MyAJwSJMmjdSpUyfsOSwKXHFdu3Y1AKrgENld0+fjggTUcgiQXUZhrVixQipUqGCqm7Eg+BMSEmLAAdcI4GBPvzzfrl07Wbt2rfz+++/y8ssvG3Dg31GyZOiQqYTi47369evLqlWrhPgFvEswt6ZMmdKsHpcNAPLYY4955GRCgRPgpu/XX3/dcDe5NpT3p59+aiwYd6vifuJFgc+ZM0deeuklAzq879oHY+/du1c++ugjwfWFdfPNN9/IypUrzZywKHiP5/jj+i4xClxtVI2/8cYbYS4z1gHIvfPOOyabqUSJEv5lsb2fkPRzlYAfJKDg4AehexoSRcZJlsAyvEiPPPKIcRmhuD755BPjagI07KkdpUqgedmyZUbBAQ5ff/21UeC2ARL0g1XBs9u2bZPmzZsbl8qLL74YdtJHuaMoSZXt0qWLR7cSJ3TGYn64pGwA27rDXnvtNePWSps2bYQDvLxLthVzYf78jqvHtdgPiwQgwALAcgEwAQdiI/yd+AkyOXTokGTIkEGSJEkSNv+///5b3nrrLQOSgIuVHTJC1nw2depU43YCnLSpBFQC/0lAwSFAvg0ospo1axogaNasmRQrVkwKFCgg+fLlMyd+KqA5YaPoaDyPksZFRNCV+IM7OPAc4IALhuA1bqbatWsbNxQK8s033zR9EacoVaqUtGjRwrhb3LOBUNoVK1aUAwcOyIgRI4wyx9og1oByv3LligEgPqtWrVqEs4kYl0rur776ysRC8P8DAih92wBBQJE4CQDAuMgC0CDGQeYVSn/WrFnGynJ9l/kBOlg6AKIrsNarV0+GDh1q/h35AZzaVAIqAQWHgPsO4PrASujfv7/x++MeARw+/PBDWbNmjamAnjt3bljQl+dJVyVQjHJDsXOytymadoEoRBQ7biPI+bAcCM5SMYxLhkb6KyCE1QAw2Wwg2wd9EgvBQgE88PvTcPNwqifoTc0F7itbX+EuYE+psr/99ptxheFOw12E+wiQIz5gGwoeq4S1k3EEwAEYNKwEWGiRFdQhrVq1MhlXtl27ds3IEgtszJgxYRYJ7+OmY1zcSwADMiR7yT3ugUWH1cZnkXWZBdyXTCekEoiEBNRyiISwYupRlC8KmqAsCo7TM0oJxT5y5EijlAlSo8BsKimKD0WIwkLJUwPBO65kfMwX9wuWA24VgISiOMbDjcKJnUbWjwWHBg0amKA179nqap4nsM37uI1Q2LhlcH8VLFjQVGYzLhlSzJN4h3vDJYSSdnX7AHpYISh9AIf+sWyID1gL6caNG+YZMq14l7Gt6wwLh2A4cyX+AmASOLcKHoumUaNGxrrhfdsnciIbbOPGjWZcLBCC9UWKFLkLHGyW1uTJk008xdO6Yuo7of2qBPwtAQUHf++AiDkJ4+LgVE7Al0pnFCDKj9M8ytn63MleoqEUAQeewTXC6do13mCXBXhQBEY8gFM+IMFzgAOndhQgwER6KJcFEbi2oEEgHIBifFwznNpt0BcgI9MHRYyy5SencYrOCHS7n8ABEBrWEDEF/ixatMgob5Qu7ijAAV4oUnYrV65s+sCFxGeMDTjgTnMFB+IlzBHLCfcbgMNaaLihsK6QGZ9ZsGO+ZDAxJ6w1ZDR//nxjYbkHwwm00yduOZ7VphKIKxJQcAiAnUZZUYtAPQOnV1xIKGSUIy4elCfpnvjkbYYRBWEWEIgd4I5ydynZpaE8qQ/Ab88pGOUKH9O6deuM1UAjNkFAFzcWrqennnrKWAGMh0LmJ6f28BqKlRM4GVfuLhhO6Tt37jSuHKwOMqNsKioptKyRgDgKGJcXbjDWD1ASpEZG7uDAGLjSeI+0XsZesGCBGR9ZfPbZZ0auWEIzZswwlhnjAHBYQMjDusiI55Dea+fNM7iscHMBolSjE9SPaHV2AHytdAoqgWhJQMEhWuLzzcsoX/iRSLt0byhITu2c7gEICw6cuKlNwL2Ey4cTu7tLydV6wHWTKlUqcxLnOVxNgAMKm74JbqOUCeCS7krl9dKlS02gmjH47H7gwHh9+/Y1FpB7LQRWC/5/XD1kCZH9RBwA6wRwoOGiYn0obtaGouZzrB2UPYqZn67xC8gFUfrEDVgLCpz58wzxBE7+1gJD2eOmwh2GlYOriSA2fVJgh9XEs7b6Gp4q5ow779VXXzUBd2SoTSUQFySg4BAAu4yvHHcKVcqeGqddFBhuGMuvBGAQKEVxo8hRuuE1FB5K19J/497B1UKNBDTfnIxx9aA0yYBCSXKyxzrBfcQdEYDY/RpWB8oXt411LdkKZ1w/1mrBzUUW0vDhw8PcRNaFhQWDGwlwIMXWKntPY9u6CGs1AXoofcZ2BVwLVriYsJ5q1aplPh87dqwZv27dusaiISOLuVNEiKVGcJxAPq4q+lWqjft9A/Tz2CIBBYdwdtIqHFtg5U7Sdj/aB29du2fuAA7ED1DC3hpBVwK1nKL5O+CA8vJmLXjqx56I+QzA4fROOimZRgCNXY91aaEwUaSABD53myUU3pffKl+sGVxVNMAHK4GMI8YAhIit2BRd9/74HEWMa4uYS0SvPGV9gAAnfeSJ8qeOwbUBeih4wIdiQALYrBfLiJjLwYMHjRWCKwv3HgkB9qIlrBRiE9a6iAxhX3S+K+rKii3qNrjWoeAQzn7hj8cNgXsB+gqUBJkvnFZx06DUOXXyHx9li6LldE0AmN95lv/YKCz64dROSiSuCfq0mTv400kvhVvJW2NM4g8odE611ETQR1Qb82INWAQQ8tl1uPYHAKGYYXul8MxTwNt9fIK+jRs3Ns8SQ8EVBTiQ4uq6PsZGKXtaM/LCnYQ1hSuHsSPaUNgocbK+iFdAueHeLEgiU9fKavbFZlThTsKthuuNeZIlRg1F586dDWhhYSF/+gJs2EP2nv54FrBlz9lvLDt+R+b84XuEK4/++K7YQwHvsna+d1CR8C6JB+4MuhGVhT6nEoiOBBQcoiM9H72LEsFtQUzBU0OJcZqmLoBqXn7nVI5yQiHxGUAVHrC4B6tRglgEABTKl2womyFl+Zk4tRPQxfWEQkQB3q/hkiFLiH4twyv9EegGLGgoSACHgDeFe+4ne6wPgJbniF9QtBdeQx7W2kEuxF94F8sBgHBtPEsaK/2Twgp4oeAJ0FNgx3rhbmLuWGYEu3mGADpxEABa6x3u9y3Qz2ODBBQcAmAXUT5NmjQxFdCeGmmnZPng8iBuQMMFQnUxoMAJFIXsycWEIiNbB2VNfAIA4jncPpzoASYabhhABuWKW4W/o7hJIwU0OP0yT07FgASnbOuKcp1zv379jO+eE7B1uzAe4IOrBiVOVhDuGlxV7mvGkuF0Tt0Ccyd+cT+XFjxRZC0xTwLMgA7KnQwlW9TH6Zw5wTX18ccfh1kXuLuYJ8DFuPa+CP6NuUKT/uOPP5r6C1xQ/rrPIgC+pjqFOCYBBYcA2HCULUqQKl/cMtY9YacGEOA7BxxsnQP+cBQXJ2R+AhSeUllRsLhA6BulT0oryhqFStonri/ew3JBMUP9TVCZgjaUIS4TPsciwMUBeDAmFgxuF9fsIVxpX3zxhUBN4X4vBASBzz33nAEZ/PbQZZC1hHJmPtblwgkeRc3pnQbokcVkQcx1u1DgWAFQZ5BthRzJ4sIqQYnjUsL9xqmfwDZyZf5Qk/CTzCzcZgAhcwJY3K0CLA9cXNzGx5y1qQTiigQUHAJgp20RHCdksmjISrLsqig53CO4dlBgtkqX9E+ULW4P6CtQYN6K4FD6KDncLShklCpKGRDgHVI8UXxUAVNnQNomCpTTPmAFAKBgIbkDREj5xHIgOE78g/kTTwGEOKmjkN2DqNQU4GcHZHBRsS5O9rZWA4uI1FPGQpFbJY37B6sGYHINqLNtPANoksFFzAZLhpoEYhWMD4CyFmInAAggYC0CKsKxTuCyAijJ1vJUvMe4WA4U5dkK6wD4yugUVAIxLgEFhxgX8f0HQCFyqscC4GSPQuOUiyuIk7QlpcM6sAoKFw/uJpQ5FcWctD1lE3GyJhMHRU+6K64dFDEneU7xNHztBHHpH4VL6ib9U12MBcD8KBYDoEjtJH4ArxLPooh/+eUXU9/APOBgQlG7gwN3VQAwtkqZcTnZ0x/gRlYUBYC4vWyWE88wD4CPCnI+R07WfcYYEO/xE6AjsI41hLyYG1YRdCH0j/J3Jd4D1AAS3FrIBBl5a554oe6/q/qESiC4JaDgECD7hxIkvRNFiHLlxIxrh+pnaB5wKXkCB07hFH4RKLU0FjbLBeuCvH9O+bhaqF/ANw9I4PO31NiMTUzDFqGRKYM7hxO3zehhDALguIxgYMWKwLXESZzaAKwIKpJ53jXeYMVLgRrPu/rsARssDhrFdp5SRG1GE8AJsGD9WPcZz2P1AKKAC1YE67BjIA/AlrW4srJiLfE8nE1YBnadAfJV0GmoBAJCAgoOAbENofxKnO7xnaMIOZkTBKVwDLcGCtz1TgJrOXCiJ70VlwjBaZQ1t8jROK1jeaB4CRLDXcQfrAzX6z5Rtih+lCanbFf3jf07p3L+4IrB4rAcS4wDqR5KmiC3t9x/15RRK3LGJZaA0rfA4Gk7WCv0GGQPwXNk4w+AG5YD7/NvAAMK3zZbfAfbq+WJ4jPGBWRwkbGWyNQrBMjXRaehEohxCSg4xLiIIzYACgvFjU8ecCD/n+AuypriMVwquGCsL55TMYqaTCQUH2yqxCQ49XNCp+Euoj8sEvL2bSGWrSp2nRnuHCwO90Ayz/AenzMmcQX3oC3AwDOMFRMN0MLNhqWB6wprA4WOm4qqcm83wfEeBXFQhhALsWtjrgAN6bxRvfc6JtapfaoEAkkCCg4BshsoLHzk/ETRkzmDUieIay/Dcb3nwBaZYTEQZCYVltRTisvsnQacqlF+FISRmRNeY2yUu7ccfkud4XpLm+3Pci7FZMAWueACwjLAgmGeyIOMKk+AZkENkN2+fbsJKLvGQZizvZY0QL4COg2VQEBJQMEhgLYDBcjpnOArLhoopHGJeAIHnsVSwB2EBUFGE2CAy4iALPEJlCLgQQCZAOz9WlQpHu7Xry8/JyBN/IB18xO3WnhFaVhkZC0hTy1e8+VOaF+xXQIKDgG2w/jJIZpD6RGQxp2De8m6lVyni9KD3oGMHHzycAXhbiFNk9RVUmLxq9OfrY8IsOVGejrIh9iKLVbjZ3gxAwAPOeGCUnCItLj1hTgsAQWHANt8lBk1CPjIycLh5EsWDhYAAWp3cMAPT90BhWO2ZgAuHqg2Zs+ebeIRpLPGVfI2BYcA+4LrdIJGAgoOAbhVZBlB82D9+3D6ABKumThMm9RUAtJk3AAEuKEAAhoxCQLSpLHGVWCwcQe1HALwS65TCngJKDgE4BYRZyD33ir177//3jBzAhiujToGAs64S+BNsvc+B+CS/DYltRz8JnodOMgloOAQgBvoXpHL3QaABTQY3lowBJP9JWqsKABU6xn8tQM6bjBKQMEhCHaNtEsqpnEhaYu8BBQ4Iy8zfUMloOAQBN8Be1eBnnyDYLN0iiqBWCIBBYdYspG6DJWASkAl4EsJKDj4Upral0pAJaASiCUSUHCIJRupy1AJqARUAr6UgIKDL6WpfakEVAIqgVgiAQWHWLKRugyVgEpAJeBLCSg4+FKa2pdKQCWgEoglElBwiCUbqctQCagEVAK+lICCgy+lqX2pBFQCKoFYIgEFh1iykboMlYBKQCXgSwkoOPhSmtqXSkAloBKIJRJQcIglG6nLUAmoBFQCvpTA/wFq1TrxJUYABgAAAABJRU5ErkJggg==)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OyJ0gXjyG9sk" + }, + "source": [ + "The speech recognition process begins with the **raw waveform directly 🎤**.\n", + "\n", + "The original waveform undergoes contamination through various **speech augmentation techniques**, such as *time/frequency dropout*, *speed change*, *adding noise*, *reverberation*, etc. These disturbances are activated randomly based on user-specified probabilities and are applied **on-the-fly** without the need to store augmented signals on disk.\n", + "\n", + "For a deeper understanding of the contamination techniques, check out our tutorials on [speech augmentation](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-augmentation.html) and [environmental corruption](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/environmental-corruption.html).\n", + "\n", + "Next, we extract **speech features**, such as *Short-Term Fourier Transform (STFT)*, *spectrograms*, *FBANKs*, and *MFCCs*. Thanks to a highly efficient GPU-friendly implementation, these features can be computed on the fly.\n", + "\n", + "For more detailed information, refer to our tutorials on [speech representation](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/fourier-transform-and-spectrograms.html) and [speech features](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-features.html).\n", + "\n", + "Subsequently, the features are fed into the **speech recognizer**, a neural network mapping input feature sequences to output token sequences (e.g., phonemes, characters, subwords, words). SpeechBrain supports popular techniques like Connectionist Temporal Classification (CTC), Transducers, or Encoder/Decoder with attention (using both RNN- and Transformer-based systems).\n", + "\n", + "Posterior probabilities over output tokens are processed by a beamsearcher that explores alternatives and outputs the best one. Optionally, alternatives can be rescored with an external language model, which may be based on RNN or transformers 🤖.\n", + "\n", + "Not all modules mentioned are mandatory; for example, data contamination can be skipped if not helpful for a specific task. Even beam search can be replaced with a greedy search for fast decoding.\n", + "\n", + "Now, let's delve into a more detailed discussion of the different technologies supported for speech recognition: 🚀\n", + "\n", + "![SpeechBrain-Page-3.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAvkAAAGvCAYAAADBg8oPAAGx4nRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDYtMTJUMTglM0EzOCUzQTAyLjE3NlolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY4MS4wLjQwNDQuMTIyJTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE0LjcuMiUyMiUyMGV0YWclM0QlMjJEWDVOaHh0NlBkV184VTRicHBEMiUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjJ3Rzg3clRWSWxnWEoxQlRGRjdXVyUyMiUzRTdMeFpsNnU0dGliNmElMkZaam5rSGZQSUpOWThDQWJmcVhHdlI5M3hqNDlTVTVZbVhtMnBsNXp6NjM4dXlxY1clMkZGV0xIQ2xrRklzJTJGbm1ONmNrJTJGd08lMkZ0THMwaFVOeDc1TzAlMkJRZUdKUHMlMkY4T3MlMkZNQXpGU1JMOGdTM0hWd3VEVUY4TiUyQlZRbTN4ZjkxdkFxeiUyRlM3RWZsdVhjc2tuWCUyQjZjT243WmltSG54dmp2dXZTZVBtcExaeW0lMkZ2M3paVm5mJTJGUHpVSWN6VFB6Uzg0ckQ1WTZ0YkprdnhQUXNTJTJCYTFkVHN1OCUyQlBGa0ZQbiUyQnBBMSUyRlhQemRNQmRoMHI5JTJGMTRRTCUyRjhBdlU5OHZYNiUyRmElMkZaSTJVSGclMkY1UEoxbiUyRmdYbiUyRjQ2c0NudGxuJTJGbEJ1eDdHTXZ4WTI1cEFxYjYlMkZiYWZscUxQJTJCeTVzaE45YSUyQmFsZnV5U0ZIU0RnWGJXMnc3ZUd3T2o1MzI3UiUyQm40QWpTaThKbDJXNCUyRnVpY0YxNjBGUXNiZlA5YWJxWGklMkZlNzF6N3MlMkJUJTJGSTczZlglMkZmdEJuemZIanpmZE1oM2U3OTk4M1lXejlJJTJCRzMyNzh2UHZwVGpPZHlqWmQwdW03Y1Y3Q2Fma2U0TmRGUDZ3TyUyRlh3NjlYVjY2WnQlMkIlMkJzZ0lSeEVhWjlOZlAlMkZsaEEzRCUyQldkazB2N3N5UXBNa2d6MSUyQnlSa0s5eWROemYwNnhkOU4lMkJMY2xoMU9lZml1UCUyQktNJTJCMFYlMkJ0QkxoWDJvTlpUQWU0WkVxYmNDbTNuM3NQdiUyQjA4JTJGJTJGVzYzMHdCdlBpMmhqJTJCM2pPJTJGUmJHR3pmbmY2QjFQNTJSRGVSYm1rcnlIOFRPY05IUDluTFg5M2wwNUx1diUyRmpyNHoxTHliM2ZRUDV3OEclMkJZUU1qc2YlMkY0UnBMM2IyNklFdDlYRmI5elFlWnZrQWp4WDVFSSUyQnA5TDVQZW0wdlZkJTJCdmNJaWNaJTJGRmhMNlk5eSUyRkV4R0olMkZJbUlDSXo2WDVjUiUyQlNjeW9ocndCQjUwRlVKSUxzSnAlMkZqWnZhbHdoenZIcmt2M0MlMkZQWVd2TXElMkYlMkYzN3VqSDZTOFklMkJyc3I1YmZubCUyRmo1OERsM1Q5MUliTjc3djVNYW9mTFVrZno3JTJCVUhYQjdBRSUyQiUyRndDRHlDeHNSQ2NWaXlTOTBsbVclMkZSR2ljJTJGRUpFRWZZTEhrWVJFMkpFa2hMNFg0JTJCc2JQT2ZCUHZqT3BUQ2h2M3J1dDhHOHp0WiUyRjJnbGlEOWVOayUyRnhUOWNVeXdLakdnZFZnSWxOUWYxSDN2ZDVrNjV6T29INHRnQ3olMkJJJTJCNGI4Rm5rOXJJV095OG83NFdWUnRQdWRKWEhyWXZoajF0TCUyQnYlMkYwSVZBNWtmUmZ4RG5vdVZXVkJEWFMyZFk4anMyJTJGb2NtRzNrR29Ia3JDZXFYWmNPeDBpYTFDbmM1VTduOE10THNpJTJGNGY4ZjRPczJ5WDduTDklMkJDViUyQmFFV0ZHMCUyRjgyTnhiVFU3UGkwa0lqRlJleEhmZnVPJTJCJTJGbGhnWVp2UmIyejk1RHpENjVkOERKRGo3azQ4UTdCOTlCR1glMkZ4RWNvNUglMkZkUmFqJTJGSEViJTJCJTJGVEg0dnhnQVJSRUJQMzhhQVAlMkJGUUVmJTJGTWRDaHlMJTJCc3c3ODkwdEYlMkZpVmtRWSUyRjRhZk9hUGdDRDBvTkNQJTJGMkR6UnZmTFVxUyUyRlpBMGM5YTlnTnYzNCUyQkRXa2FWeUFqeTZnczdBdE96Q3J2dnVkbzN3OSUyRlM5ODVmOTFwQUhhQTFDSHhmR2Y2VFdoSW9xayUyRmlZbm8zNE9STVNmQkNJVSUyQjI5eU11WmZjTEl1NFdBdUFON0ZUVGpQWmZ4ZmQ0biUyRkIlMkJhWGhDbVQlMkZhbVFmM1dldjNDVmYwSEVQOFh5UDByd1I5dSUyRjdDcmZUekQ3OG1QdzN4cWtVUFlIdnpwJTJCZEV6ODNNbVhXMyUyRmY5JTJGczg0eiUyRnRpbUQlMkJxYXN2T1BoRFZ4OU4lMkZ6cjFmMG41N1AlMkZ2RWZZSG5QNEVzZGolMkZQb2o5TVo2JTJGSFdQRk5GeldLZjFUZ0JYMlpRcmpmeCUyQm9wdFJmZ0NyTlJyJTJGejk3OFZWSEVDJTJGJTJGZUJLb3IlMkJYOGZDJTJGc1N4OFAlMkJOam9YOU56bldEM2J5ejA3MVRPTSUyQjc4RE4wNyUyRkZxWmc0JTJGWE9uaWhpU0lQJTJCYm5BcWolMkZwMU85V2VGbHI5RGczd2F0bk1hVHI5cDhiOVRVeW1ha0NuOVo1cGlLUm9QJTJGNXM0SlliOGk1b2klMkZnNU4lMkZRc0ZvUCUyQnZ3eCUyRjVmeGo4JTJGYkhlcE4zJTJGb0pTNUNBZjRNajZhRXNoJTJCd3Y5ejY0NiUyQnRLUkZ2emFFY1oxJTJGZEdlc0MlMkJqbVZ3RiUyRnk1cjhvMHY4V3Z2OTYlMkZyeDMlMkJBU0JQMVA5VDZXJTJGb05MVUglMkJXSTJCJTJGZyUyRmolMkZySmJ4YTlIdU93dTJMZE8yd0VXVzRGbiUyRmxUTFFUemIlMkJld0YlMkIxMHIlMkZwSHdhTm1YZVFVV24zYWUyejBQSmxuSFljTjhmdEdXU2ZMend6OVQlMkZzMmYlMkJMV2hGJTJGYXlhSDNyNFBWcWhmNkliN085QUslMkZyJTJGQUhENnNiVHoyM0tPJTJGN3RQJTJGbnhwNTk4TGFOU2ZBQnIxdnhIUSUyRm0lMkZoNHU4b1hHRElQMVViTU9TZjRPNWZMbHo4b1N1VSUyQmFldSUyRnI3Q0Jmb3ZWQzUlMkJCTE95JTJGU3hTJTJGd3B4V2hpbGpkblA1U2ZoeGE5UnZ5eDlDeTVvNEFmOHIlMkJIcjV6b2clMkJQa1RtRnlnZSUyRlBoUEh3dG5tZmxEakdCJTJGenlTJTJCOUdLJTJGR2dCcjVOd0NRRVglMkZYcUxpVU9YJTJGd083bEE1dlBOJTJCSUt1VTlCMzcwbDEwSWRnNWVqVDc0VHlrdjNBMzh2WVMlMkJrb3pnUmM0SmpmQnduZ1NYSG15S2NyWURlZ0lnSnBxT2JUcGVzTGh1aU9sbHg1aEpPOXZTSnFMSm8xS2FWZmtIeHMlMkZBUlBobk1tdDlmUlV1aVBLNHpVSVVGdGRTcUd5JTJGZWJtM2ZWa3Zmdk1RUkNmVGhldXpmSEwzb3R3UzUlMkZHeXd6SklCSTRWYTklMkZYZyUyQk1TZmg2cDAlMkJFWWRuQXVQV1pseVoxRjlneGZYamE3UFpIdXBETDR2T3llWmg1YXFVaVVIV0Q4NWZkdjRlZSUyQnBuTVZsejk1Z2VPJTJCZmklMkI1eEF6eTdjMzkydlRhTHh4VFlzOGdmeWklMkYzdHNBT3M5ak9YY0RZdnI4dXJjYjEwaWdVZEV2JTJCdTNYN203WEEza0U0SWJuNSUyRmZOMzRSZTNyV0JiJTJGajgxN3N2ZG5YNnY0MXI4SFBPY0tMYmpOeUUlMkZQbDk3eXZQdE1kdnc5UTRnWHRWbWRBMlVuMzViVTVrZU5kJTJGakNnSEE3b0lWN0twOHpQUGY3dFg1ZG5scTglMkZINVFrR2ZMMFQzUVVFZVo1Z2I2WmVEbWZ2WjdTZnQ1Unc2NW9vSFd5cVBKczd5VEwyeWZkMGlJeGklMkZIN2x2UUdHQktSdTZNZnV3TTdjJTJCWldiNnRKNmFmTGs4SVk2SHMlMkJSODM4M2l2TDBla05nTmplVWtvZkdDMWNnZVRDcnE5NmlGcmMlMkZzbFo2UGUzeGt0OXluN3RkYkkxUTZyYUtCWldIOVVmZU4lMkZSSERHOUt1ZGhUbzdPeHRySUhYaWl1TGs5NCUyQk1JTHNnJTJGbjRKc1NRQUUlMkJYTUYlMkZ6RUh1QkV1dktMMGM4TXBENlJIRjNKVjhMUmJnVmlKYkpkTEFveTclMkJ4TjR2YUV5VFhvV3AycGN5VDBkcyUyQmpvSlRQWFEzWSUyQko5Qnp2MXBJZUdGbzRuUVY4UlluWlVKMzE3WHBESWxGRFNTJTJGeDEzUGQ1QmNZaE9pSEQ3Z0MySjdudGVUZXdnUGg3JTJGd3pIcmJSempOJTJGVERiR3lCaUxlSnBxdTVmc2ZYaHRLNFlUcWNRYSUyRmhaRzE0ZyUyQjlQZlp5WmdRdFprQ21KaDQ4MkthQW43Y2J1c2Q0UmhLWTVweEVzRXpBRHZ5eHcyWGg0cll4aVMxcnEyUGNEV3Zpa3hYMmh6NjREaEJFMFF1ejI2Z0YlMkI5TVZjYkRibVoybUQybHlWZCUyQjZ4bnJzU3RHZWtoN0F5M0F2WlhMMHN5RTJ6aVYlMkJhRFBnOGplQzdGY05EbkFUSG8xWHlkZVRrdHVweTRPRU9EY3l1eFN6SGFuSEpGME5Sck5aUGJ1UnZqemhwV0t3JTJGZjc2MUpscGRoY08xdmclMkJOdXVQalQ2Z091ZnFvUThSJTJGUkpzNmFNMSUyQkQ5V0UlMkJMaGM2c3BtNXRzTWZ5c2lsQVBYd2RkWTVsczlRS0hKMWZRM3p3emY0ZGpHbkJVdFhGMkYlMkZJME5rYjhyYUI5Umg0SjNWRGNpR1oxR0dvZVdjQk9tTDhBTzhUV1pCbjhDJTJCZ0ZmQ25jakp5RzNzenJ0ek5iOWpSd3lYbUJsU2NvNlliYmpyNTNOajhYWmtGc0djTGVkWXFHQnhEdzI3bzlWeVNqTFE4M0JFUlk0eXN2UVNSJTJCZHlQMDdXSGJpJTJCc2d5ZDJFdGNNUWU3bDNNJTJGWE9PdElZbFA1U3RrNllOOGVUNCUyQjBnVzNBekFCMzg3SktleUtwTUx2MlNTNG5TbDlOM0JyUlExdWZ3VEtXVXpESEpxcU80Tm9tUWE5eEVnZGdJR2twY0dRNk53S1Z5NWZiaFJNZmVRJTJCOFJIdXlzd1A5bUFaUml4ZUY1emx0ZUNoMzBIQjJvN3BNTEtiRlQ5ZUVna2MyUnclMkJJeU1yNWd5QVJGayUyRndTR2tBV1hqY3ZldklyRGJaTlJrbGlSTzVLOWM0WktqWEhsaFpaalhkUmNRdVZRcjlYZTI4WWQzVUtvQ0xSS0slMkZJb3g3a1o1NyUyRjM0ZVZkTGlEVDlCalBGMzVkRnZCOVkyYnRWcVpyTVlDWndDc3p5aEgyYVRxY1N5TjI0anZRSUt4MiUyRnlJeCUyRmYlMkJWWWZ3blZtQ05qazklMkJOR2J4VlREUzFKY01QdDlyako1RWhzeHZNT1FUYU5pNnRrQlRyNThiU3VQViUyRkl0NmN4Z1BQdzBOZEFPeCUyRkxVU0lyS2tHN3Nra3k3SXBIcCUyQjNzeTFYR1pOWE9Ec3d6WGk5OEplR2dncjdMamhXT2p0NG9rclIzeGRuMng0bWdkR1ZPOUQ1eHFaSndXVVpzM3VVJTJCNGltbnd3QUFrVGNYbXB1NGtWSHFXcVkwWkNSTm4yZlg1V2MwdWhPNDVCU3lxWnAybVFRMllQb1dZMjBEaENhSVRvd0M5d3BjeW16eCUyQk82RjZ2cnUwM0dheVBQejNVaFJIS0laNzFlbWpVSzllcFBLciUyRjJGSyUyRms4eVpXanljQ2RKNW04YnpDbTVxU21vMnhBRTViVlpxUUQ3WDNON3lVS0lVMmdNUzNiOU5UZVlTN1hJYlMzb2xSOE9wbXIxV3NacFdsV3VuY3NCcmV3Nk9HSXFBeGVhWGEyMXVNeDcwNFRsR1RxSVlEcmlvZVJMdWlJY3pZMFJTNjdoMTFzRTFZZ1lPTU1iZ0VvRGYzTDFwYUhEVkVhNDZMb0lBQmJ4MnBnaUx4UnJjbDdPdGlvZkMzc2NPR3BPQjNlb0NQWkdCWnM0TEQyeE9hblFldkw1VDNldGJBbDZJaXl0azJHbFJ6JTJCVmJXR1I3ZTNDTXMwZ09PODJRc3JKanlNYmhyWG9walBLZElhNW43TllDeDdXMCUyQlRGWG44YWZkcGlZTW5oTkJNUSUyRkFSUW13UkptT1NRUkxrc25ZR3hUT1pGVmlxQ0FsQ2Frc0xSTXRUYUxuQk82bWJTTDlOM0h0Mm5pM2hyYSUyRk5mZ0x3c2lOY0pIS216cjNaS1FIOWEzelQ3TkJkTHVZZ1plNWdnSjZrTmUyOUtnd253NGRXZFUyTzNZanJGYk9Pc2JINGx3TEZtNkNMNDNiRktVYnpTYUNxSlN5bTVkTllkYmN0cEVyM3ZzOG1QZzN1MDFNcTgwM1Y2NHRuQ3clMkZBSFM1dmdieDhUQlQ4NmdmaFZkUm9kZEpzUlpDdkFzbENGeHUlMkZqS3VsWEdZMllYeSUyQnZjbUlnVllTd0hacXV3VVRFWVRWZ2NUS2dZZUxmMk52V2dURDFWNWlFQ01rc3R2RFVFSklKOWRYTmJ3WU5SMFd6bVpqa2RKZHVqSTNGYjIyZllGN2dEak5jNWZMWUcxckJYMHk2SFNLaGM4WVBKcUd3aEN2elc3WVRZT3hOTDAlMkJ1bUpFY2Z4VmRUWUlmVjZpdmV4bTljMEJZcnNiWURjN2JNanJKbVdvMjB4eDhhcDNnb2twODgzTHZmb3Uza2dzRGticWdtdHZZUVZ0WGxKRGtpR3BORm1naGF3SDJWTmhlJTJGRzdSN2tiM2tjRUlrazNCWlBwaCUyQnlrUkxvSGlKRVZtUUtZUzZZZ05iZllQeWdaWUM2QWhwcGR5TlRlQUhqdTVmT3JjaGJnTGFBanExRDVzUCUyRlJmT0dXSFBRZzRXVGZQWURCeU03RkFSeDRzdmtQczh4dmw2JTJGZFA1TzNVWkJSUko4JTJCZEo1bElLJTJGcEw4eWxQWVBsdGdreVlHdjVzJTJCUm9BOTBvNmowMUglMkY0Qkk1YllsNGplQmRBcXF4MGFiNFN4WDRnaGZ2Mmpvd2d5WDVZY2xtM2JwbkU4Rnh3UUhDcEpONmw2YnQ2RXNsc29sVVM2ZVdQMW5QRjJXQ09hcGRndnc3anlPZlpyYjFsRTAlMkZUWFM4S1BzM1E1d1N2VVNmdnhobmpjaDBJeGIxS3BKNzA5NDRtVDM0QmlDbWdPbmtZcVF0c09vUU81VmtJUkFXS1NnRzYlMkZ3eGNIcVlaNXZWNWhpdlI1aG9kU2JHSjJKOG15QmhnZmFNS1hpRjJ4NkRYM3FsakNpWnJ0aFg4czdhbmtUM0dQdTZYT0pvSSUyQiUyRkZZcEJvUUttMEcxN3QwSlFlaCUyQm1pY2RaZGs1YnhwRzg4VURGM2NtSU5qMFRTVEdaZzBEeWFacjl3SXpCdEUzTm1Ba3pqQ1V2JTJGaDNWeFZmVHdWZElqZFpCUlB3ZkU3cHJ0V2VZeTlGJTJGQjZwNXcwYU1GaU5RZVNPR3E2OWFsVFF1T2hrSmVQVkM4ZDFLb2xOZzNKazBzd0ElMkZjMnVKcGFQV3Jua2g3NVZQUk9ZM1pVcnRETEFtSEsxaGhwTDY2cEEzVTFEZHVYcDFaOVVwTGEwYmtrc2NtSmdSejVETTZ6MklHT1UxS3dBZTk0cmMwRERKNWgxSmxra1liUElkTjl2SGVqYm43QWlQR1A1aVNTU1VoSkFBekVaTHk0YSUyQks1UnZJMDdsWnJlQnV3QXk3SnRVR1NyNzU4UHdBUVZDQmVrS2NCQUklMkJlM2g5bW9hU0hNNDA3eE1vUkhoa2tuWjl4WmxoWEZ3bmZGTmxqRHUyUVpaVSUyRmRnOHdmalAzdUNZckFtcFlNcm1tV0xURDh4YVdGQmhvaWRwek9QQ1VRVmdiTVlVa216anY5WkZpV3dVVnFSJTJGV0VkYWpGT1poczJtQnNhRXk5dFJjNVJGZ3Y2JTJGYlNZeHg1eUdaRFZGSTdTb2JRMHk3RlRjb2NabjQzMENxaSUyQnd1dlVTT2pIQWQ1VE9mOWlPJTJCdHAwclFlcG42NHg4YkRtWThqZFE2QVElMkJnZWxrb1hVVWdyb2VaSU13QTBpSWx2OEtleE5ocjFabkswNmdsaTN0bnRXZTJiZGl5c0ZSb2RnalZJeVlRJTJGQ21VcjBZNSUyRkszVG1BSk9hWXRjUGQzd2lFWEpERnFIT0lRR0NqJTJGb3QlMkZIbEhIUThES3RYc1NVOW5TSzFqTldLc0s0MkRLJTJCUGg2cDdacm9uRDRqcUU4dU5jMWkwRjRLR2kwZXo3UWxkeGxKcUZNZ3o4TWIzQlUxdnl0TUVQWXNhSWI4ZkRJeWxKWjdlM2Q3N0tBamF2WDdhcUd3QlRGUUFoZzcxRVR5R0tFbWV5SGw3S1pUYm5KY21uUEVwYVdBWWI2NDdFQWJFWDVaOTd4bU1mVlpKS0I3N2hNOGc3V2NDTXFaQjBWZDNQa2dZVDJQejBxU1gyNk0lMkJZbURRWkdvM0Nvek5rR0xROHNkS2JMV1NYUFdacVN4dW5TMTM0OXY4WWtka3Zkcm13c0hycXFJNDllNXN6Mmdwa2VwR0dOMmlGanVCVWxrZGFkU1VPazFMWnk2WXdWeXFPc2pUNDVjTGdTQk5US29TZHFqUkRkdlljWHJqNUV4bDRqMGw5U3NQOEtBS1ppelo4T2tnbHluQ3Zva1RET1luaVNyTCUyQjlJSUxNNXNlUVlDUDhpcjMxYlNoZnpWNkFEUlcyU0Fub3NLVEFvRW1pTEU2UmpUNU5OZTdoZ1pFcVMzVWtSR2tVOGJ4OEllJTJCZ1lkVUxRdFY0WHRhVnloNG1qaENlOVp3b0lqM2xUcCUyQmx6R0VBUDBZUU4wWGxZaW5iUTJLdlYzJTJGbkZEdUZlUlJUbmtnbVNTQzFPRDBVNVQlMkJJYUVoQ1Npb0o2d3olMkJ5b2lldnB1NTIxUUZYQzRlc0xzeGRFeTR0TTdBQ2dBcTBMJTJCSFd2cDhlRzljQ2dCWFV5aXhaaTc5bTgzbHZyRG1Fb1p0ZjZyZ3glMkY5T2p1c3VYS3VmRktIN1pUT0xlMkJxNG1KWkkwV2lVRkVPRVlwNDg3ejFOcENrdzNVV3o3YkR6bU01RVZzMmFia01PJTJGRHg0a0syd0pDS2h4emdTVG10bDB2WjRzNEhlbGFQNlRSN2wwWVRVOFRwM0JqQzhZSGpWRDZSOW0wZUxqZ1pqUVpKTmt5NUFnTVNlblpjQlUzUmpmVEM4S3VsTkVRbDFDMGslMkZ0aFBDaXVIaVlDNjRRT0FPTSUyRnNJeHRVdGZtQjBScG9LRzJFaGpBSG10bXJ5elE4M3FzRnhuZXNTeVNLbjBOQ0pxY0RlTE1xZmx6U3hiZWhYZEw3dlFYWWtKenVOcWZpRTZlYktlZyUyRnBNWmlLJTJCNW93Unc3MWI0WFREVXdqa0tjSUxhajk0bzdkSXZ2alFsV0Z6WFNnOEJneXM2d0duRVd2QnFHWWFXQks3dGxlZTZ6OGxzemhEZHF0c0hSU1B4c0x5MGFWaUtGJTJCOFFsbHZ2RGJKTFVNWTFRRGswNk1PeGFaa21Ma3ZhTHBGQnAlMkJTYkp0aVZ1b2RJU05wbVAwQVdyOXdKWjIxRjFOaVFyJTJGOEhRZXg3VTA4Rk9Hb2dvM3MzNXQ3bnM1UXBLSWx5TU53dFdTY2pHdE5IN2tLTTN4UE9aaFp1dklVU3M1UWo2cUQ2RTNDZDVMOUhCS0NnYWwwdEw4aEFzdWtLVTBXUnhKQUloUTFUOEt1dlNqOUxwJTJGb0Z5V0FYTmdsbXpjUmw3ZDNNeXV1YzhuZWVHQnZhcGV0TFhSMEtycU1oWjBhcFRPeWFaYUNVSnQ5Uk0yek5PT0dkM29WVkpIUlE2ZVRWWGVRSW8lMkJaJTJCJTJGUEtNeU51RlV3dnRBME5LM1VnOWllckR6Rm1DdFRvYmtUTGhqTDZUVWRmcnNEMSUyQnFWWFFVWjNqeHhvOWZ1SFo3ZkRDQlNaUFAyblNUNDJEODFaRGF2akliUGpjZzgwNlJhYnpZVXBIJTJCVjI5JTJCY0ZqZ1l3RGN5ZjNRNGk2SzJBemdVVHd0ZlR0RGxyaTBJejA5azNaWXZINndYNUNoYnQ4djZwMll6ampCQ3Y1bjZHVzlOSVB1aFhZc1FOMTh2Z0R2dWw5WElmV1RiTVZ0T2FXWExPczdhOVpWY3pwTlBwMk1MdTdEaXpiZ3VLc1VHJTJCMlhzT1EycVM3VXptN0paN0ljdkV3NnhqT1pqbzAlMkZ1U0dnT0RnVnIybzklMkZ5NVV0UlRibjZWb1VLTVFVbmgwNml6QnRVWVhvQXVoSVpyWjBrNU1EUW42anJlZUNLbyUyRlZOTWx3SGZiMzNWb3oxWkpBMDhoV0lxV3hwZzVUeWFNR29aNExsT2o0aGxPNXNXcHdTa1BIY2tqRHVHd3ZpT3hGbkdxSW9OOEM2MzRHTkZqQ1ZVNzlaa2IzV3lMTWd4ZUV2NVA1eEUwQnVVdndKTFRlYmRzQmUlMkZOMW53b2RpZHRTcFBMVW50dWlVeFFsRDhJU3V6OW9iN1B0QVUxV2IyaGhlSHdIbGVrcUlYaWlYR3B2QmRueUZ3QndjcDdlWlZaMzV1TW1LNG1KOGd4UzBsV0VhZ1VUWXpCcUNZUFV6S1F5ZkdvSTlrYmR5aUd3YzVNN1MwenFYVDE2YUgxb1RuV09UeVFjR1o0VWFMVmZSVUpWVSUyRk55NWVZTERQNE1PaW54NmJBQkxaOFdPSWZMVXRsNWF6RUFYbnN4T3QyeGEyS2VhJTJCM09FRVJlV2dhZFV4TllaenJWNjlrajc4Z0M2T0U3aVFWb0VSaU55RUlCWUdzb1dSQyUyQlVwdlJMNFlTTHU4Y1ppRXZNbDJTckg1U2VoNlU4d0VoNlNQJTJCUmdId3VaMjRiU1olMkI5Qk94V1JUMHNtdkZMeDM5a0xVVTlibnAwaFRyZWh6eXJJTm5lbnhqcHB1JTJCZDUlMkIyUlhuQTJIQzhJY205MWpZems4VmpVeGc2Rm92ZUVEOW12QyUyRlZsekdQUG1jMGRodmhoVlpjMGY0M2dCeGdpTUlMNThCdVdQSmlvanhNTG9leElPRlVNSFVuemZ1MDZlaXhodVIlMkJkUVhiQnhxJTJCdDIlMkJ0M0toZnpvd21leUN2REpWaU1GVmtpd3ZPTlJNdkdoSEdldllWTE91Z2xMZnVZTGhLYnB5bjFTMDlWRXZvMm5CUmR1MkNxUmNZJTJCT2hZUE9lOUd4Y3VwRHphc1M2aHRKMSUyQko0M3k2R3RkMWptWmxwd3hSJTJCbDdqRiUyQm1PUUVpR1BWeDcwaXk5VWxUaUpaemQ4Q2FmNllpJTJCRmxaYk8wRExSNTJrR2tLZ1lKd0ZZeWcyT0N6TWRTaXklMkZVaDM2NjRNQWtpRGRHeDNFS05xS05uN0JMbGplNjhmOW5NQlFFMU5lQjQlMkZIcWZwc0IzSkRBNDJuYVY2VTFVc1RjZnE5dkpZbWp0dkpIc1hTUWowJTJGT2lWRTklMkJMdiUyQlp0VzVaNkRjeWJETmxhTUpUeFMlMkZlUjRlelQyeXl2QnRjN29wWTl6ZXFtcERPa1J2SHIyUmVSdXVITndoQ2F6MXhCUHBPWktTdFA0MjZEUUwlMkYyemxGVVVJVDh5M2g4aGI5cGZ1TTJyR3g3MG41JTJGeFNhMHYlMkZURUloJTJGWDk5aUROcnNGaDhUMGVJdVJZM0p2STJWRXclMkJGYXZPbGs2OUFvVVIlMkY4R1ZobWFvUXA2TnFheUt1RTlFZUFaY3Qybjd6UnFId2FsZUZpQzYlMkJKWFpVUEZyTnVGcG9BdXFsNUNSeTE0NUlIZlNzazFlcll4WHZuY2NkdSUyQkN4T0hOVXh2QW1kQWtkZzNscVhEekp3TkFZSHpKbmtUOUhzMUxuczBCRFp3bnRzbTlSVm9FTnNmWk5hazhDNnRJOTRzdEJldTd3b2xqSkdIOWFidmNPSXl0V3p6cW82QkdhbVdrTjZvMDlzUlpmd1RFRU9nN1l3Q2lnSUZSUFhlTVpnNE5HT2VRRDJmU3BISUxGRm0lMkZDWEdPbVVGMUFMRERXWjZVM0l4MlBwcm5BWlMyeWdiMXBIWnA0VUhsZmNVOTlZMyUyRmpLbnZrT1l4ejFocWZJYW1Mc0V0Q0pNNyUyRjQ3V0c4emlhUFZ4MVgwOHUxeDk4YmpsWjN0c2U1N2NzSTJJaVo4RmRpdEJtT3JiWlBid1pCemRLMjFvMVlpekJhMmFHcG1jeld6Q2xWamg4NGtHMm9SamNLZWdVNDBiYVQ5d3F2dW9zQkVPZFNiTGVtc1NHM01PaXBlJTJGT1ZoMU5wbG8waklsaEE2YkR1RTl5Sm1KSmh6VVFyU1lOY2U2dXRTY05DS1RKeFVaalFpTm0xZUpCdFklMkZ0TEFJdDd0VTRkNXJJUlhad3FPRFkxMEF0VTlCS0dwR0dxcUg3eDREMiUyQklHODExZDFIaUhIQXlvT1hTcjZxSE1LN0RjdWVCVm41ZG4yQnN6NlBBMTIyelhaZjVPMFVZRkQzSUVSMm1sT3ZNU1QlMkZZZ0xwWGclMkJMTzIwazFEN2FwTThsZkxXSktabGJiSHZNdlRpWjl6eHNmaGMxeE93MkJtSnZSYWhQY0xRN3F3a3hhNllDMnN4V0lsTnFOMjJFQyUyQnhnS1dCQTMwUjJZcTlkMzBiR01VJTJCS08lMkZpbmhxRVlhMElkNXB2SjdzS21hZDFrR0hVMXlzem85ZGxoRE5QY3NZMUglMkJESSUyQkVnb0pSemRoNU9xMDVUdFN0cmRydWxLRlVxeTZCJTJGTVpMRmp6akZ6NEtKRFFzZHAwb2RuNjRUbzRPaVpDSXRDNk1LSFE5RkpxSFI3eDd0ektLRUwzYmFTTWRjQWVGMkVtMyUyQklaT3FoMHBtV3ZIcEJPOGZxcVdOcWpZcGg1M3I1cVBYcmR2cWJETTFQNVdyU0x4V3ZqbG0xME1Cc3dEWUJtODFLOVMwZzljZnJBc2puS1BWeHd6S3JRN01rQzNzNlc3Q1dRNkZSQlZ6VCUyRk1sODhqYTJrb3E3S0NFTnhhZUZvTDlGeVpqdHBJSnB4NEd1cE80MUIxM1YzaW1lY2RSSm5oTnRpU2NuMkJHUUpEUEN2bDluVjFVa1RDdlhVUVFTODl1a1ZaVjlmOFd3ZEolMkZBUnBWNWpQR0ZHVzQyUEYlMkJBRElKTUJlZnJnTDVDc1R4UEZlaFFPaThPVEN1dVBMREtIdlgxUlNzUUM2ZTRvVERZJTJGbWJ1S05Vem01JTJCWmdTaU9CdXRIdXZkJTJCT0FHOUNOczVGR2FWUlFoZWdTV0NjY2p2U2R4MEJtaUpaVXlwN0x6YllVakVweSUyRkZnZ0JWdGpjWmcyWUEySGpZTXZjb2pFeHI4NVV4b1JaNVJYWFhZSlRMVnB4a1poR2loemlCYVlQcVhyYUNmYndaSkFYYnh2T1pTNlF5OU1nTUR3T2ZUbjMxekdONGRaZXJldHFiUyUyRm5KcW1rWFBuS2FNZCUyRmdoR2R2U01oR3FHJTJGWjRUaThmNGpTcTNDSG56R0F5WVo1JTJCcTduc3dxZHplM2xLV0hVU3EwYjJ1SmFGelNQRFVsb3drWGxwS1BiUnNDc3NvbWgxWXJpYXZvVFFLNSUyRkdzNk9XNXpYd3ZPbVZLUGUySHklMkZWQUswdFQ2NTFNa2VYczRvJTJCenBwWVRlYVlPa2hrU04yQ0dKY3RMUjVoa2Q1OUZoTDJKOHBHVzVLaGslMkJveEtXa1A1NjRVVEdqakppREtvOXRESHdmTXdrRlRLb0lHU3ZJa3RYVG9IRUlrWnNrcUZEJTJGcFBET0hlc3JjSm43RjhRbHlMSUlsMFk3NjFJZll4WSUyQiUyQnFDMlBqZzBpZzZoVU00d0FhU3pjeGlQT1c4RmRmSm4lMkJMbE9hc3JXNllFWlVUbVNpJTJGdXJDSFVhZEVQSFo4emFoSmkxT1c4RnNuVDQ3aHpWT2VGckprJTJGQkZuQ043SENEQjJsVE1QTjRnd2xhbWFGZXNhWkJORGlPZ2JKU1U5cm90bWVtNEVFRFFoanlyVXBIZUpNVm1tOFhOc2FLUjdxQkI2cUZrdHdCM2U1ZnRFSlBSeXFGYjJkSTRWOWRLc1ZodjhESHRQeHh6ak9jeHJVcnp0dlNTSWlYdDJiQzN1bjIyd1JqMFByWXZVWXpmVCUyQmxCZDFYTVJzdWJZQmVKWWZEdVdkMk9WejJvbW5pd1dOS0Zhak4ycmdWM1NkZ1JzZm9lT2dSSkNyZ1lHbDhQN0psNEQ0bndVWkIxUlFUZ2VDcUJXMlE1JTJCSkYzNnFoYW8xSzJiMGRSWE5GVndZMzE3WDNtUm1ycTlZMm5JUDlyTms2NUZhcE50QndBZmM5RjZZRUp5TzJORmJIdnpEdFpVTks0SFhzRmwyUHZkRkRBaEFPYTJJVzdFdmhKWmlyUExCRmRpQ1hidmYyYkNGak1lamlkMkpCN1hhQlV0aGRPMTFGbFF0SUhjVFNkb1p6dDFYQXl0aU9xZ05xd1d3T2Q1aTNYOUF4VkUzZlJ6ZXdjdXhPWU1lJTJCYlRsZDdQYzA3NGY3U3d4amFFYVRSNUpPTklMcjF4RFYlMkZvZk9KVyUyRjF4UFdSQ2xDNVlqVnBrZUViamQ0YkNPdWVrQSUyQmlUVnpZRVVWMmpMeDBXQlE1eUtEZjljYnU5MlVTNlh0aTQzV1AzcVlpTlpUTW1YZFoxakd1bWFHTHlwVERPbWRaUTRMa2dYZCUyRmNyc1BwVWd5MXhJWEdhSiUyQlVQN1NhRHlZRnNYTXI1JTJGZFJaSFp1WWRkUGhnSEFyeWwyaVFIdGNDeG80bEJlam85ZWhYVk5IS1diZEFrJTJGMTdGRVUzbjI5WXpVZFJrcjBsNnJSMTg5VmhCU1ZVeG4yRGV4WlBBOExoJTJGSGpPbzFqS2JlTXoxWWJzYWRvemlQSDRMSEN6VjRNWmdrVW13bFpDSU1BMDVmdXclMkI3biUyRmtwR2FBNlc5SmFmZXhTdmVmc0ZZZiUyQk41eGM0NTBEVXJvdlJURTZCYWV6V04xa2F6alFoNlElMkZsWHVTN3NabExsbFZDalVBMkthaWs0dGRKSWtSUDFadmUlMkIwYlp5Yk1PNGwwcm8wT1N2ZUdrYW5MVVNrJTJGUmN5d1k2Q1RmT3BOdUhHN2VEMkw5a2NIZDBCQXQ5NlhBVUVJdDN6ZEFla2Qxc1hCS29JNURPa0swV1NIWUhaZER4Z3RURTJkMEVtZWR3cnYwdklobEFzRVclMkIxTnBJQ0pVb0dDVjg3RmE5cGhQaU9weTk4YlpJVExsJTJCcTUlMkZoVW95c3R1N2xBMzlBNmkwWklaY3BPcGZteTkwcm5mVGhjcGk1VEFMOWdqaTNKVEdvZ0hJYiUyRldDYTZUaFVlUmlMeTM2NFlRYzNMeGR1NlJBU1FOSVNrczdnNmFUYjElMkYzUEQ5S2lDa01KbVk0ZUVPaGQ4cUZkRjNtYkpySDlaYjhmTk9FS0V1TU1EUUtqSVB4azY5N1k4ZEFXeUlZSnVNOFI3dTdYRSUyRnpOWlVrYzYlMkZIRXVLWU8lMkZtUVlTTE54MkJ3aXhWbWg5SGtoSldrYmhCaFFZd09LVGltTXdrNHk5TVI5dlJldWVxTHJUVmlJSHIlMkZDc3BPd204NkxDTndWbnRPZHF6WTJJRG1FUDRsTWtVcFZnTmhEdlQ2dVRUaSUyQkl0NSUyRnhKZlU4YUd4QXBtblRLRTJJcVlnJTJGdDVtSGV0S0t3Rm9FRjBFYVRNc2JGTUFFVUhwOUJET2EyZHJ0dTBKQ3EwMjk2TW1ZZzFVRGlGSERSOER4byUyQndraWhuVUtyWEM5dkZrdFlvbHR1OGZJWUU4T1BkSXJacWY0MWZ3NEVUNWs1aGM4RyUyRkZvVHhpQ2w2ekNQeTZNME12bkYyY2clMkZSeTYlMkZkWE1Qb1U3V0drUm1FQmVVSnhGeGkxNVpodXk5Nk9JWDdlNkl3dTdIOThDNG1ySkhVU3JEZSUyQmExNTNhWDlBak91WWszYmZqTWJHNXcyclkxQzQyM1ZZaThUSzh6dEF6THl5WTFESW9LWUZDZWQ4JTJGNTd4NWFVTkElMkJJT3JzNGdSJTJCTW50S2pLaWs5MzFIYlVNZWI1MzY1eDNGQkljZUVCMWdFN2g1anpCbGN2a1pFbG9MMmFNODlQZzJOWDZnQlJvTEJBWmNLSXY3clpzTHlkRm9wUG1PdEw2eGxVZWNTJTJGTVpwZFM4cnhhY0UxUzl1ajJkYkhaV1pCbGtxdmQ4UHBXSjlvcWZMeVpxYTY0dDRWZXZNTGVNQzRNOE15eks3eVpESVRHWTZFZkdmZTNTU1pzZHRJb25hdk9hYjFoaU1kMml3N0pzWVdJeTZZM01pUW5CbklPYk40VDB6cVhpNTdGRlVMYWFQb2VUUVJ1ZFFseiUyQmdsMW9KT2ZuUkFvVE5qNHRmRHVIZUF5WWtBNDFYR1F1Mm1PRDFnQ2NEVVowbDRRRlp6bFJjVHU4VlRKMGMycUFUSWJucmszS3RNaEliZCUyQjlxMjRtMDA4WGl1bU9scEQzcUNwWmtKYXVCbmFlNGZYcnRBRUowQ1dyVHZ4M0F6dTNWbER1VTAxM25ZbWozZUdUQkNOYzZQenEzU2VlOEhtelpyYUhoemN0amM1WE81bG54bWdFeDlDcFJjSWVhdnc0Rmo4Wll2bWFzdHhHRk1FaHFRN0ZxRVNVNmNybUoyUDYzUXdFNnk5cWZjR0VnT2ttN2xIMU1hdzJPWk5UMHZ0QzVLNFBjeVdqMmxkY3NrZURmZEFxWFBjNlk1dXhmbE5FNEdWbXVVTEpXSzRyU2NoSXNiZGNWcngyMHlJV3Y4ZGRVN1Z3VWlLV1M0ZTduTnlDWTZFTHdaMk9pQSUyQmt2aTdONDhVSjFYVmd6YTBSZ3lLQVFRV3BydkpXTllKTWhHSUw3NzRxRmlSMnQlMkZBWlY2Mkl4U0l2MGN1T2tkU01ZMmZ5cGZteVREeTREVVBnRkI1YUd2eW1VTCUyRnVxWmpLYjZWeDFZJTJGektBJTJGekJHdUhYeXVUYXUlMkIyMURQVCUyQnQxR3Rqb3dQRzhzMjQzQlUlMkIyTGp1MklQTWQ5cm5rZSUyRlphMkdTdlNKclVEdzhKQ0dTTnpkYnFPJTJGSDFlVVNVVmdocVhDS05pZVQzQ3VQQXJyUkc3ViUyQks4cm5oaHZxdVdCQXkzSVdBeWJvNFg1WjZ5MWhKNjd4UHRyNkNrS2N3OVJHcTVhM1ZHUlZnQnJBRzlzVkZLTnd4cCUyQnBNYndZRllTWmJpTlhscTRJamIwUW9Lb3FOdzlMRUJlU1o3eWs2NjljMEglMkJKJTJCM2NOc1RsTmRwJTJGS0hhR2lGRVZqOXBkaHZHcGtkSjh5RWR4UUN6YlNud2FjJTJCUTFyS2NLN1MzWjRIYiUyRk0wZGM5VzdJS1pWQXhKRURzbjFnbzNrYjJQMEhzOVN1aktHdzE0Q0hUZTh5NlFxQ2pSWTE5WUJPcVBKdG1idktWZ3VTR0RRQnZQcmlxbWdtbmo2THYzNEZwTCUyQlhCdzVKRmZWUlJsc3lGOFNsOTU4UVZnQk15bElPRExsT3pKU3V4N2drc0ZyZGpGZlFkZDhhU0JHV2tRZG84dGtWbWVlTkplcjNjbDNPb1JKeVBXMkdhSGhreHdhTGhERzhUUmp0dXpUOWtoZDJwRVltJTJGTTJFUFI2eVFiQml1WjVkQ3ROUkthZ1MlMkJmYXYlMkZBRTE0TUFjNWpzeWY0UmlTWTA5enE1SVpISHJyaFBRRDlkeWpHbzQ1TmJBSmxJUyUyQnAzQlFFQ3pEcWhFN1VuYyUyRnpVMHJtT2lOYUpvQUUxMjY5bWNSaEJwend2QW9LU2c3R3ZpbEMxNUtwUFlRQnpYdDhwS0xFVXRzY3d3ciUyQjljalplaG9qYlRtQ2UxY2ZyOEVIR1gyVDMxTVI4dGVxdXhCRzZvN3Rya2xvZk9NWURmaUxGNU9QMFR6YUd1JTJGTWVIJTJCM1dUenlFQXN1SzZFTHFPMnlNTTFRYVdaRjBwY2habVVaNTZoZlpZQTIwbGxKJTJCbjZ2WDFDQnZmcXBMTk50NlZWOGhmT21kJTJCTEFPUGx5WGRxWk11c3RxQ3NlNTNHcEtmQXJ6ZVAzN1Vqd0NZcGdPcDRLM1A5Z2s0NGtFZnlsbDk4c1NEOVI1REh6NnBYaElQM2hzWmFFZSUyQld5azFUcUlwM0NKaiUyRnF6d3FXZHh6QjRhRkdoVmNwQmZuaWslMkJuWUV6ck9LOENpdWRiSEo2em1lTEZvc3R0NTN0JTJCbGRxWU5mQzZzblpFRDliVzNoWTlGTk5PaEMyYVo5JTJCd2t1UDNHd0slMkZTNjJwZWd5OW1STXZVSm5pd0FFUUJHbFBUQ01neWRxV0hRSGZxaTNkbXU4d1pNV0ZtN2dTTE5oYVRJREZPWlhLJTJGUUdvM0JDbGNsWHpuOUpEcE4lMkZTMDBxYzNleHRaWkxySGhOSkFYdERvRVdoMSUyRmRMbFJjSjFhckFOTWwzRSUyQjFqS1hWNXduekNIM20xaERGTlBiVjVQcFducGtLaWhxZU9MaUN3NFVVVElpJTJGVHlleHRGMHkxelh6QTI4VjI3eERnancyR2pSWmc1THc0biUyQjZZaTVjeGpacGVjclRkdENnbjFJcmR6OSUyQjRFdnpDNEcxQUF2dGk5NmhMbHpXd2lsTW9UUnpjcFc2TU1LM1VjcUtsdHh1VHlmR2NZM1RtN3I1UWkzTTdPY1MlMkY3eVR0M00lMkJIZVBlcVVhT0VoeWs2WCUyQkU2alQySlJDTnJKV1hZODJhM2txU2UxWSUyQmlJaE84VHlvVjViYU1zZEtjTmsyRVY0WTAwSldOWGU3aFRXMWF4QmlTaDZHcXBlViUyQlM4RWRhbUxBbE5xdTNuOFlQU2pkZ3habUttS3JmbXBsdXYwbzQlMkZNenMzRHNSbGZWOXV5U1Q2NUF1T3FnQjh0bFhwJTJCeTVZSzZ2QVpPUHA4dnNmdUo4Tm9LcElZM2lxaFFUVkIxWlFmUlNHUDZOM2IyeldUJTJCc0syblN4JTJCeWROMENrYSUyRjVON2R0ZXZHMzAzck1USDhPYU9WR0RwSGE3NFhUd3BKcmtSV2ZZMUFjemVsJTJCbU5HR1pWdDZxUjZWOWxnd09oVkpoQ1hIeHRPd05WJTJCUXdta0dVY21IdnpaZTVUUVBNMWRpOGhOWTV4ODhPSiUyQnMxSm5QZk1kUlFrRXdhNXZoR0VKeE02SmtNYlhxQUVZMjBXektFVUpYMFpPM1JYaUZJSnMyTyUyRmxtY1lzVWRRbjVQR1olMkJxZFUyTEVSdmJRTElncGFPb1c4MDFLbHNxdXhIM1ZVOGJSM0p2aGN1TFZOMXFTZmZWM1dpa3g5RkF3eFhrZmRLOGRxVVREZk9uaE43OTNra1kyeU5sNUExZHFkVzhFc015bmM1MDVzd2glMkZSTWkwOE00VHcxMnJ3NENqVjJOQzhrc1hjeWxHRHNaUmlnJTJGUnhyR0I4OHllQXh0TWZXemZPJTJCRmoyNFUyWTc5aEVCV3FycnkyMmVaZGh1Z1NjS05ZZG56WWNINlo3M0ZMJTJGTUU5T3FLOXJ0NWVmb0VaQ2dRdzY0ZXpla01pVkkzOHNLRUYlMkZaZWh3amttUmhEbkMlMkJRbWs2UDVWUHlTNGJ4c3kyMHExJTJGWGRRWDJyRmliamFmbFRIb3BMSlF3YlBDeFUyc2tQVFNUamVpOXBaJTJCTUVCWmxCTTk0YWNKTDZ3eDRLU3dRdVF3cW9SRUQ2VE9HeTJSTktUMjlObnNaOVFSazU4ZFpEYllZWnhLOFhlMllZVnJONjRncHBFOW1pa2pSQ3BGMTl6dlZoampraFRENmJOTW1vZ0E4b0NOcmw4dE1ieExyZ0FDRVl0VlNqR2ZYRm5VNHI2OEdYZXFYUThpVHRVa1lxVXM4cTllcnBiZG5ncUxicFJuciUyQiUyQkRoZyUyRiUyQlEzeXEzJTJCRFZqVHRPRUxHd0tUQ2RLYmhuQm1yblFnYXhTRWhGQWFZbFJ6YU9PcEJzMnlRNzhKQ2c3dUowckxQVnRXeWVsYmw5bFZ5aCUyQm1LQyUyQk8lMkZrNVdTYlBiNGVUZFFaWGJKT0RrTFNkUFFwZERTNDNENUgyNDAyJTJGSyUyQnJPTnIzVnJCbmpUTVI1NnVOTENXU1k5NHJGZlhOMjZMaXNpY21qWkNKUmk2YnIwNyUyRmo5JTJGZG5XOXpwWG1aS0lacUhSbGdUNU5jcU5Bb3ZRMk0yWWxGTHIwalN1Mkc4VXlIcDFiVUNubWRKVGVTZnk2MkdYMFRFWjNMemx1anVOVG1KN1lqbHFTT1pmZE81S28lMkIxeTJPdE54RVF1JTJGT1ZFZDFPTUtuQzFZZFRuOEhrdzZxTmhiRFI5TDElMkJpMTRWV0JFMDJqMGlrQkd1bFZpWlg2UjZzOXhnM05HODg3MFRVVmZNNzE3RHkzRmExYk84c0VKR2Frc1RtMDFyYmRoZW5QY1B6VDQlMkZ4VUtWcFRCYm1YYzRKYW93eVJXNXJjODVKb3BQQ25veldEZGEyUE4wSEJpV2FyYlN5UTZMNGtFdG45V0pXMUtlalMxcHN0bDJaV2pSRWd0cjhSdTBKMm0xb2ZEb3hEWWZmVkdCVUdYNWRYT3BET0xVQ0dnOWc2VjFZR1N6RGZHZ3h5eFp1Tk16ZWNlaTlJU2p5aloyTFJLZTgxcHEyODFiUjJjdWV4YnVoaGVOWVhpV25sbjNUWDRoeVh6MnlQUGVXZHNwUSUyQjhrWUlZb2hvWUVkM2h5TDRHRFJlRTczSklSR1MlMkZQdzZrQWhtMCUyQmpaSEtUdGhZTVRENnpqMkU3SEk5cUxqU2k2TjV2UlNPU1l6MUtXUE1hNFNiUlBueXpwSFBPZmU5REdRSlFDZndtU0dNZk9xQ2Q0JTJCdmJOZHc3QXRucFZlNmFMYlBna204dU5vMSUyRjdGS05MMmg3RGcxTCUyQkdlMHhmY2M2bzRYTmljOXhLTSUyRnFYY1loaFlOanJoMVVpekRzMmlZMVJ2akE4bCUyQjZ4WGJGbVdVZXclMkJJOTRPeHdRNTZBMmVIcGprWFVYSDhKZ0hGWXFOQ2FpcnVNVXVnQWY5SVM0VlE5N0VDT1NmaHVlTnFCSzZZdDlKUmMlMkZDamJMMzElMkJPcTZxRHh1ciUyQkpod2lqVmlBWVJRUlBBdkxvZEp0em1DUWElMkJueEVqUjIlMkJ5TXU2YlhDZEZzNm5RNnJGMjJ6eDhlUWVGJTJGc0NjRnk5Y2JwakNZMXl5ZHg0SDFSUkN0SHZ1UjhIVjV0NW1SZXhhV3JLb0JYZVMxSHZsWSUyQnpjRDJFWTUxd2ZpSVBqaE0lMkYlMkZKRU5iTk9JM0dFY1I3WHFRcU5YYnU4Z0hWR1lhUU9zejFBZnU5M2dOcHBGQnNCeUZkclhHTGJERmZoTXJadlN6TDNoQ2FYaFlsJTJGSm9qNG5QRnB3RzFWZThJbFNrVGhPRkdWNU1Wd1AlMkJYWXp0UjN4bWU1eU5VWUV1WWU2QkpmbEtPcVZYUFV1c2FYOHA3bWxwJTJGQzl5dTg4SCUyRmI5RXBOUUZDUmwxZVIxNWc2eGxOJTJCZmNpSGRXQnR1M2JETVVyUjZ2OVhXSjZPYk9KYTh6TFlBZHhnbmNKWkpFTCUyQmVUNWVLRGZGaU4lMkJxTEllRjFkOWQwcDZaaVQ5JTJCWnhFWUkzOE1qajgzZ3dZWm4zWUwweFpmSVZTdG9DVkxwViUyRktlYjdrdndaTlZkQUVqMkFaeVRQQm53YVBWV3lkWUdZaGpWVUJxTk5vZ1B5R1VWTGhtMHJQa3FrOTBDbEkydkFxZkhZYkpKOGJnNGVLdVVzUXlON2lOJTJCbkZGNE53cDdhYUh5R3pJVnpSZDNOcERXZWZlN2tHeVlOdTBLTWxwMG52end1ZFhPQ0FxemRZZGF5QmIycmUwVlJLWVBvVWxKSUdHNjNtRUZpcWF1TU96SlBoVDRwak85WVNPR1JQVGJNJTJGVTB4clVMQ3V5ZnMlMkZaTlZrZ3BpZVJkV0hwMXpzbDFGWWQ1NU84VUcyOU01NUlKeU02VWtiMUpsbTNrQjJlNDZ6N0MyUmlPaWEyMGZkaUEwaiUyQm1tT1JxaWQ1cncyWGpsZlJDJTJCQ0JNVEoxWEgzWHlCNjZwY0ZFVzhmUEdySW1oTGUlMkY4JTJCTlRxVGNyWXZPMnp6NlBxcXFPWUVhNFhPRUZIZ0FKekx2N25IV3l1d1AzZXFZVVRhTktsOWJ0cTFPUWd3SzRqSGNNUWxRUVJqOEVBTU5QR21VJTJGVzB2YzU1MiUyQiUyQnlHc0pRV1B3WWFsVUwxeTBCQVdYUFElMkJEJTJGTUhkNk51TTJCQkgxOVhpN1JGVWxOZW9VNjJjSkZhVzI0b3BYcHRxb2VuTUhzY05MTHFCbDFtQlJPbVJ5ekR6cUtjb1pSOWo0TVBvdnIxRVolMkYzRSUyQmdBRzc0alI5T2hRWCUyQiUyRlBCNmNZQWsxZDFSSjQ5YnV4ZWk4MFpFRFRDOCUyRlhLWjhlWnBjWGVjSWh4c0Fwa1pDeU8zRmN5STBFZXI1SldCNjBLZkRyJTJCMjExWUFmSjJ2blRWQW1TcUMwSjNmN25FcWtZUlhNWU9mVHJKUTNFY1A5c0R3ZEExTlFSYUZQZlNRMHZZSERFb2JOU294bGsxa1F3dW9KQWpVdVc3dnZSVW9KSDNVWlQyakhrZlRaSlJ3YnN2dzVBJTJGSUZ1Y3ZlUlVvTDRQUjVmWFRjeFo3T3BwUmRPbSUyRmdUblUlMkJkSXRldmZUanE5NFIlMkJTeGZ5cldlMGpURWx1ZTZ5dXA4dGxhdWl3emwlMkJMZ0RPMHVYMUFvcW81WHBGZGM2bWwzUktYSnh4M2VXUzJPRDZYREFkTzg5Ym1zM1pIU0hNOGIxSVhTYnNOMzB6Z2toZGhhWHh2b3NmS2tHaXBNTWhDc0FCR0dzdmRHUXlBQmZnWjRBSGJkRWNTclpiRUZyN051aWZTM2ViZCUyQnpXaGFRb05SZGJuOU9UWDQ4a0ViamxXaUlKWExSblBtU3ZnVFFidmJ1VmtaWUNySVRNU05FQmh0N1hZWGFsYk94RGhRMXJLb1l0Q3JpdG55RmlhcnF4ZmQ3VmE4djBxaWUlMkZSNnZubG54JTJGTTR3bVZrOVh0VHRMYjAlMkZ4eWNZaGJ0Q1RIaFh4cTZ1NkVMSG1ZRkhNOXlqUCUyQlRSUHZnRXNSTkk5Tmt1VHRHcWdxTTRTaEhpZGNKUGFlME85bFBFdzA5c3hhWWRZUjFhTDFtbHpSenExRFBwWExESlhKbExYYmZLQVRrTXk1STBuQmo0dHlndEdWNDRSSGhjTGdSTXZBbFQ4OWprSyUyRm5sMTdiYjUwaUNuaG0wU1dlUmI2TE9aSXZTcFFxazM2YW5MV1hpd0tLYnVOMHpPYzV1VzNIck9KVktETm10YWVtYkZkTTNxS2pNQ3dqS0tLVmJaQlBYbGx5alJxNktreCUyQjlKY25lNk1ONHduTm5RbzhqbG9KZVZkdkRhWkwwNTIzYnFqeCUyRnMlMkI0azVrOFpibkl3blNKUjZ1WGVXbWg3M0ZXenh6MDdhMUp5TW1YcW1DSXhlUGNLZkY1YVp2QzdvajdicjAwckxqM0VNVmo0aGcyUUptSlF0YXRhSTNudVpoYzlUY3RHa3cwbjJjVHpVUFJ6NG1KeVhjcSUyQiUyQnA4VHdPWFhydHJjRjBNVGFEZFo4UXhrY2lJT2Z5b3JTOHRiczElMkZmcG96RHhSTyUyRnpxcVJtUjF0Z1hRRXJ0RmtsNVhwRExuQ1daSmFtUll5R015RGZBeXkyM1hUcW5KRkc1ZE41NVBwcUJpZFZFMk9pTk8wNmlNd1BFMXNqNUJOMXd1d0hWT21tVlZJM3k3SHdaQW1MSmM3ckFqTGFIMXJURURVR2ZNa1AwdTJPUDFWd3lXWk0zZ1RsYW1Ud250NERWJTJCbkFRN2dDT3dXQU9rTk9GMDEwR29qMmRqZkpyR3Rrdlk1OVp1YWprN1NRalhnVEk5RktJNWJLeFZkV0RJdlpxdDQybWt5ZmgwaXVieG43UXEzNGZFeVg4SDglMkJ2N1ptN1RudlIxeXRpQnd2UHcwdWZ2a1pmNTRkMlBwYzg0R1ZSc3JjSjBtWk1QNWlDY1ZMdDBLJTJCZmhxclh0aGRBTk1ub3lYN1pBZyUyRng0b1BMZ0xFWDNzNUhqd25aTktkSWduaTNZZEhwNnMxaXkwTUJSazVJbnBXZk43RUt6bXBOWW9oQ1UxV3U4NzFrckdrd1glMkJjdGVXVDdxSW44MUJmdFpGeHM4NW5raHVNRkxOZE9FeGJ5RCUyRnlpYnQ5SnhPYTVERkRCQXlqanUyY1JoSG03YUhMQ3BZQ0lJQzFCS09UM1RzblNLejM5UEthZk0xQTdBWGpxZndCRjU5d0JNNEI4QXA3VlgzSDl3UXQ2SzY2SlJQUDE4b240c1Q1Q0ZUNEcyRUVFWFlNb1dJemo4OEIyTUQ1b09IN3VZRnJkTW9yN2k3NzVxRElVJTJGZ2dmc1Jkd3FWYnROcnNEZHZFa0ZtQjdKWTdoMGIzWFZBeDZtRlduTiUyQmhlZ21lYjljYmZvY3RUSzRLenpoem1PRiUyRjNUdHk5ZXV5V3lyM2tTQTBBbEZBbXVPSVBZQ2lCNU5jNXY2Q0lJaWpSRkVZc0JkT1p1MWZnNHdyUjVOTUNDd25VU3NpbkNvbWRtZGVtZTF3eGdDRFI0ajRLblY4T0hNSU5KU3diMnRIb05TaG5CYm9sMFVpZnhFd0R6bVRONlpSNjFkUGlWM1dEa1lvQUdFcnY3eDJVMXdOWEhDeWMlMkJaTEpoMFlWNURwMFdPUkhDZmhkTUU3bWRsQjMzQ1RhSXVPaEFzeExOb1JNTUZwMW1hM2I4WHdYcFBGVlNPJTJCSGhKQXNMaXNTUzgwSENpJTJGaWlRUkg0UU1rZWdyJTJGb1dmQ1FrOGNiWEdUVWowOGQzV0k0anRWcjFjSnVsYTNIaGM5aUpGTURUSkcxNGViYzZvWkNHVmRjWEs4RDFZMmFKc2N5JTJCdmpYZ29jQzBaN2h5UG0yTVFsc3NVZWpCNmxJSnoyZGRVSjklMkZTSiUyRnZGMWo5Z2pQY1NDRVR0d0wlMkJGSXRYdFNKV2JUNXY4czRSbndOeUFzeFV1QnFlVjdqbmIxNkt6RzVHR0FKTk1nOUdaRDhkamZJemRoNFFzUWJLa1pQRXltYjdSd2k0YXREdFp0MCUyQkRhc25NQXlSaHU5WnpsJTJCem5ETkR4cm9FVCUyRmJZQVNIZXNVaWxmaUt4ZlB3NHRHZHl0NGRaTXplNW1FeE91eURldWVjYXZjJTJCUUw3MjVXeWd3UUo1cGVvJTJGQ1hQRmxxTWR1U20lMkJ3bXc2d1NsMDgyaU9QUG1TRTJ4VVl2UU45ZUlSJTJCd0Qxc2liZk51bjVGd05ncVRmTzUyMGRERCUyRlZDciUyRnVtUHMlMkY3JTJCMzZUaktxbmlQVmFQTWtuR09Hekp1N2lJJTJGbjBkMzNBYjNTWU9ONFIlMkJYN2dvQW1NQnF2VXM1V3ZoS2h5azMlMkJGSjNzbG9JZXRGViUyQkZENmlJYzduUzhuaG10WDdOY2M0NU9lSFQxYkdyOE5zb29uUyUyQnkyZCUyQkhCU0NrNUo4ZXN5MWtXQk5reHY3VjI0cXp1NTNGNjR1T2NtMVBHaWlONWpVU2dRM0U2SU8yYU1FSlBiNXZncE9VU1cxenkwZkJvR2RtbkVTVFNLdlg0ZGMlMkJmNldqQnAlMkJHOFhkYU5xTFV1T0hTdUNrYTc0OTYxNUxwd1FOcUg5eVY5VjUxM1daMXpPWEN2bktVblBGaURoSHprRUVZN1FBcTElMkZRRXFHdiUyQkpJZTNITkVCNlRvZm1XQjRMdHIlMkZiViUyRnpLQm4lMkZoZ3lycnpCalBNeFAwSEd1YUF2Nm56dWM5RDY3U3RRaURzTVEzZkREdWF2dkZZc1FIYXc0VlZCTyUyQmpqZlZWUkJCbHQlMkZtUDdpZEREektuM0hXRWU0RGtkMVJMbTZ4c3lKWmluMFNBem9jTVVnQUpDJTJCU0JJQ3R4RkxRQnJHN3pMNjZWZUxCJTJGUUN0QyUyRkw4VlhESEpnVmlDZ1A0S01IUjdiYmg4d0oyJTJGem1RZlU2OFkzTUlOc2FvekZIcXJNekpocnh3JTJCZzAwdXU1MEx2WVZpcFh0Z2I1Q20zbXl3SHRRQnQ3QXB0ekZLSGglMkIzVzhQc3VFQmwlMkZxNyUyRjdkcEdlViUyQkEzWnloMVhiOXpqaGQwRURhVjF5WTV2UzJMJTJCY1dXeVR4MTlROWE3bDgxaHkyZ1NtQWY0SW43UXd2NjI4Y3ZvRlFDZ09EdkpZZmZQVktSNzYlMkZvSXp3UWhRQVpCTVA2Mkx1NGNhOWpwbldsbmtCNmJ5dmMwb2RmM3lSeXUlMkI1d0ZQczhCeUhvJTJGSGFKdSUyQklyOTlrR0NyR1pYWGglMkZINlM5RDdKMGNncEViZVhXQ0JLMHhxRiUyRjV1WU5mUkFySUdzZzhjMlpPY0xlJTJCdGNYZVVER2J6YlhxJTJCMHFqMnRUWWhwdjhiQXJQb2RQdkhMRFhWTG1qNVdLTUxkUVpRek9GU3NNanBtY3Jyb2dRZ0Y0dkx4Zjd4Z0h0WERob1JiNG1rakhtV2dCVnAyaHk0VWZ0N2s5NFBtd1FtRGV3TFVCV3lQNTRVSEJiUEJ4ZVFIcnZSNzhIQW1EOW9VMFRmZDg2bm5KNVh3TjEzJTJGSElKSVhSNklSV0IlMkZlQlpvbXI5NnlQUUVmQ09kWGNWT2doZyUyQiUyRjF6Z2ZvbFhBRGNFelc3T0dtWUNGaSUyQjZLbnNSUzZmdkx1SVh2RlFieHVYdzY3MnlFeG11akVBUDdXMU9sOFB4dzdWYyUyQjN6VW0yeiUyQnQ1eHEwb3BVT0haVmljOTloQ1M3Qk9uSWJaY1dwT3RDYjklMkJUV1BMYnFrV1ZjJTJGd0ZBbUg5SDZjWHdQeVVYTzliVHFpYVdVUmZ6QmxIWmRCZ1I1VkpsdzYwUjNTJTJCa2VFS2tlQkR0QlRvdTBBMWdCTzhFWnElMkJ3UXZxNDlPYlI0cXE4d2lXcFFUaHZkTlNkeSUyQkh0TThwb0NaWlBWNXkybURucjVLSjklMkJ2dXJSZlZxT2hGS0ZlQkEyVjRoU2FWNElJRiUyQkh1Tzh2VkJxblk1bXFYaklMOGU4OEdOU3ZFUXphMG9LekhNTUJYV2lyQSUyRmc1dEJGUXNJTUdkTnRpZUQlMkJCZ1h4SGJ3VmwzWFRNVFFONFU0VkpRbGNaRSUyQjM5VzRQdU5LSmZVckROZmc4OFZqeXdHZ0J3YWZtS2UxaVFsVEhPSkdkQnhqV2JoNFdMbzFiVGElMkZrbXFiaURTWXV6TXlrS1hhblYyOGdpRXhZNmZKUnZRTnolMkIyUTE1b1BaTVRkMjBtb3ROcFdNSHZUMndmMGJiOTVVZ0UxUWc0bGtFdldUM2d3ZGszZmpwRG8yUGNadU0xMzI1Sjh2ZHBVM01USzJ6RjBIZDlvRmViMzF3aEdkU1VEN2hrWlBnREdwc0RKMGdnY3BKOUVzOTlEYVJ0WDluQUY0cElvZjMlMkJXciUyQjBJNzFqeUdPS3RBTG9PN2FCZlgyU3ZkdmJpOGRkbHpaT2dEazI4bmZSZngyaVRGT2o4ZThQdCUyQnpJdjJrQzdPdWRzT1E5QUU1TG4wWnkyeGZrY3JkaU83Vm9jbE8lMkJ1OVhLYTIxY0pHVWFiZGZBcVlkaVhJdXlXMkwwakxKRHglMkJQY2huU3ElMkZvSEx1T1dSRk1iRkx4YWpQc1hDQ0JXUkxnellSUE5iYVcxYmpBUk5ZUzBWaGdqcGMlMkJ6UCUyQlR1dmRZZ2hYSm9nUyUyRlp2WVE2Q1ZhQjFydUNIU2dJWkJmUHpndks3T3lwN3V0eDJZMVp2bXE3R1ZHQk9CY3YlMkZlY2M0WCUyRkRnYW5NRGYyZ1ZFeWpoRXRMdmxaS2JDVHZKbjFjaUN6YU1rRVh6c2hoZ0RBaWNvcjZoa2cycWZmMHAxd0lFN2tyU2RkR1F2VUVNaTh2ZXhPbEdYRThBSkR5enFuS21oQyUyRmxaVGh6RWtNNUg4QmF1NHh1SG1XOHpVc0ppbTMlMkJYZWtjQkZEMFZFcERqSmZlTkZGdUZSJTJGSUt3S3VHNm4ycmxBNGZ2WSUyRk1PQlBvMm5MWTFya2pjakNLaDluMTc0d2VDdE4zcHNiaTBPRmFlaDU3UWJvWlFneUswc29qSFFKcmVDYXFUU3JZV0loYWlRVCUyRml3VHJnUG9MdzdyclA3SlI5cG0wMzdGT2pMSm1tNGpJRDdmbSUyRnVMNDNOekJsUWJTJTJCUU1ZUnZua0VnN2dsYUh4WkwlMkJSSTZiT29aa1dIRXJna1hNamxoZ3BLdld3ekIlMkJIY1hnOUV1amtraFdUWnQ0Ujdqa0N6cDlXNUNMZnNKQnolMkZsZVpvNzhHJTJCNHY1ZXEwQzB2eHpsS0JIYlFHVVBzemdnY2ZsclQyVDk5cG4ycmkxeW9qNngwVmFxTWtRTE5WNzdqYzZOMmxUOXlubkhSZnZEVE9ZMUY4NDh5NmxPdkhNZldvUXd4Yzkza0pDVXRmMjg4OXRYRE1pb1B2ZSUyQnJaMyUyRjVYdFNXNG4xMmxUMDFUUDhhWVpHN3BPbUFtSFdrNGFpT0xudmRIenl6UmtGRWxYMSUyRll6bXZKQUxacjFLOHpVQU5qVEFhampYVHolMkZNJTJCbm5KVkx3eXZzR0RKSVpmQkM3T0k2TUJaTlJMT1BJVWU0bzhWamdxYko4JTJCTmJ6SXMlMkJQbkZaaHNHVm03bjJoeGFyZmZySTNZN0JBV3l2RG9LUWMyWEhHTzVPTHJDNFlXcUZlYXQzMHJ4ZmJXZXZ2WEJYT0FmRkpoaEJBelFhJTJCU0VqWEVVM2VVa2lONEM1JTJGTHZ0UzFSdVk2MyUyRnJ4QjI0RkdvcnZCcjIySVl3eW84WTNFVEkzOWswWHIwYVZTVlZVVG1nemZwcUFhTVFPcVBTcm5KWjdiUVdXaldRNSUyQlQxZGZaYkRqakVtWTlRSWZCVDFGJTJCR09DSkRSR3k0WWR3ZW5pREw5cktlbEtzYmpJSWglMkJSZVh4bWUlMkZGMEMwdlhDMk0yeHJvaXhGZ1RGbDc2RXNWMTNBdmFwcUNVbTZoVDRObGpadnBRb2NZJTJGMUJyTFh3ayUyQk10JTJCWDdjNWdRMHNyTUxUVXlKakM0YnRaVlVGcUZhY2NXUjk1M0R2NiUyRm1qWVNEeTdiMXdSJTJCU2k3UTF4RzN0bUp3JTJGSHU3NFBZdU9kUjFjM0dSbE1NUks0VDFBWEw1QkJjb3BYaGU5TyUyRjY1NmFlTyUyQjRacjAlMkI0b3ROdzBkWHdwUGFNaFFMbGo5RnJVQUcxOGJGaE82Q04xZkJjc1NnJTJGNTFwc1VPcCUyRjdGeGpOV0J0SFl4c0dMTUhtOUxwTHkydVRTdW1YRVRWNWFndkpOQ05uYkJQbVdybk12bm1WdXgwMHFienBTdkMwcjF0d2gwbCUyRiUyRjI1Q2VLWllnS0RvRHNKWXVmeEZGVHNMd1REM0RpaGpjbWRWTENQeE9hN0dzaUVJUjlkMWFYVDIlMkY5T2FlaUw0RkFWQWlJMmJGNzlPZGh2M3l3a0g4azhwZzNzbHlXMlhIdjE2RkFmV0ZCRkl1eklIZzBDRHAlMkZSbWclMkJKWkNRZEolMkJnb2FrcnpseXJ3RGRPMkRzOW0lMkZsbVJnRDI4VCUyRktHOWVCaGhyRjFPMjlFRjQlMkZJYjlPcyUyRmJMJTJCcjRPa1ZsZkFRSWFZcTglMkJmM0p3d1YwdTdwZDY2YllCY2ZJYnp4cTdUanpjQVduT2pIaWFyNjJuNkc5cUlvQ0pTcHJibnclMkI0N1dsWjFka2F2ZDlhbHdtRktMNjdKWDFZNmplR0tvaXJrM2IzdElvNGlqbXp1djJOZnRQWWJaZCUyRm1hZERPZklCaWUlMkJXS3Zka0VQclZlOSUyQnZrYmklMkIlMkZaMyUyQjFOVDFaRmtOdnU4RE9mSnltcVklMkJaakkwSnRjcXFJcXBOMm9Fc3FTOHVhT0RCM1JwUkJzMjNJYXFVaXhLSXgyb1U4WCUyQkxKMzMzN0phdVJHY3lYNThzVFBuRURUTndobkRJcmFjUXA2cGNrSXQ0OElVQjZXOXhvWW1DZ1VVbyUyQmwxTzF5V2FuQ0Uwd2lpUEVqbVdlVjM1QjhlSVFzZzBvVkIlMkZmRjlmNEh1QUZzWmU3ZGhsUkVsQWYzeXZQR20yWEl1TCUyQkd6UXhEWjVFNHZKQSUyRnZ6TXZNOElVT1pwNmc3a2R2aXBRNXN0ak1vQUtLTDRzRngyY1ljQU1rVVpNb0ZpV3FmTUFTR2xxZVlkMVRleHJhWGkxZFdUS3huV0lLM2x3Vkhydk5iaXNJRUwyZ1IwTldIS0U3ZE9GOWlINlNjSjdlam4lMkZta3MxWDYwRG5NJTJCM3BUYjBCdUZCbGprVThKS2lSczRyZ1VYcE53d0paQzQwandXcFFOJTJGa3I0MlZoQWd0b2IlMkZuRks1MHclMkZ0YlMzbkhDMGwlMkJTZnUxWnhBM2hDWURUZWJHZHpNODFjekpyd1pFdHdwS252S0cxRFI2Zkk3UHZVenhBblliRUVObUhlOCUyRiUyQmUydzE4VG94ckpUSUhteFJZcklKWk9MYnZnTGZERnFBV01zY21oY1YlMkZoMEhrMVRTbzEwUXd2blZVRUZMeERLSCUyRkhIJTJCUFR0c29JczVyZWFIMXcyNUR1RmhGVFVRTXpicm90MXIwWWxXck9YbWlPYVE5NzNvV1p4R0x4ZzdTeGF0JTJCb2NzM1lyRnVIdDM1ZmdxajNwNCUyQmtDZSUyRiUyRnM2N3glMkJneGtEZjRtUEZtNFFLemU3cCUyQkdaaDB6cmY0OUxwR21Wa1V1dVpkQUIlMkJwVTYlMkJ4OVp3WEk4R3JOUzM3d2k0JTJCOFpqUU5mbzZwNXpqQk92ZW9iQjVQNmsyc25Od3FCWVpmQW5KenYxNlF0amIlMkYwaFBlZ0JmRGJweW5qYzNQOVdRYkZOS3BHdXZIeWFxeW5ZUTVvJTJCa3o5ck5CY2dHNFhBYWpBdkVyN2M5VCUyQnVYT2RjNWdESlF0UUhUQXBpMDNMak96UUd2bG9zaGlNT0lxYXZvTmZ3R3pZWDZPTlhKNlY5OExFME9aMjM0SEpRUjllOFN0UVBQNmZPeENjMzgwJTJCN1RpQXQwJTJGbCUyRnIxS0I5MjBpdVBZU3JHWmJ4QklWRHlxZCUyQkYlMkI3dkxtVlZyaE53ZjM4RDZYbFBkN3pVaWxmTWt4JTJCNGMlMkZGVEExenpUMHJGNEVGQ0haTyUyRmYxWGwzNlpua0lIT3NXJTJCVnprJTJGbG1nQkZzRkc3MXIwU29WYSUyQnVwSXNIMzk3UEdIWmlWZVFyNm90V2xROU5ZOUtNQ0Y4enV1VmZVM0wlMkIwNFVsN3gwVlZBM0thSUR5SHIzWGhMRVZsT1dlcEpkYndQSUdmdGIxSkNaOFFvVDc4bm9BWmRUQ0NCRVlFNGhLdyUyRkNENFhzWXZBJTJGNDZSVW5oTTRFdDVzTyUyRlFybTVKTnNGJTJGR0c5R2FBRiUyQjFmT3ZNYjZReXYyUm16bk9hRG1TYiUyRjQlMkZjUHJGZTA4Yjg0SmhUZGREUTdRSWJCazY1dEpJcXRRWmVWRktnZWFhWDJTZXA4WG53Y29sQ3dMMVFIbGMwJTJCaGRWendsaU5CTmtnTGVVTTQ3REhna0NBWDk1ZGQ3R05ZUlJCZTczVFJlc3prV2FjMzc3NWZ4Skh5cjVWclB4a1RXNXZyeDYyNU9ZYTd1QlB6azM2JTJGRmRkQmw4WnRZVUZ3YyUyRmxSQ1M4VkFmUWRTSTd1Yk5GWkVEJTJCRHJyRHp6VCUyRlZ5dGt6bzhRYTIwcjNPTXZpMUJpaEZEZyUyQmdpRG96d21La2U4dndsbEtTY1F1UXhDN1JJWlA1VmRDQ0NyRmFqYzhydmh6ME5OUlJOcnJITnhpRmFMJTJCVFE1VWRqcG5HMk85VlRTVWZpalpYbk5ZbktYWkNZU25leG5wZHdVMEZUTUpmUjBlbjdsc3lmZ2J5VSUyQjNwJTJCaG5Wa3pPYlFQdFZ3THJSeVRzNmVJWHZyNTBVd3BSUkRWemhiQ2VRNWhjJTJCOGp2UUhxbXNhbWltbEpmRmRUaE14Q0F2S0UlMkJnV2tRekdEWnpFc2hjNTVyZm8lMkZVQ2FWU0xwZ2JWT1lMJTJCTndXR3FCbGlTbEV6aVl0ckQ2NXhVdyUyRlBBUHk1dTglMkZIc3ZpWEp3R2laM0Z6ZWUxRGhCMW5mQ1ZRbE5PWFcyOTNodnR4NGk4V1lUeGdLaGF2Wlk1eVd0SXZvQlM5ODNTQjFwNHhUWDVmcXBvMm9tMGZqYVZvQmU1ZEliNHQ4MlVyQWZlRXU1TTMlMkI3MyUyQnlrTlRMNFNVTExTWm1HZUI1ekVURzdoZG5WS25xcXFFOExEZ0gzbjJPRVA4bTRTNHBtZHdZJTJGQUNkZDRxOE1aUENnckZtWDh5TjRlNlgyTk02TmROdW9LbjZBdjRZV2R5WTVNTzI3enNpQmhnM2pORjc3UWtZWEVmQWowdndEUkljbWJaVWxCelVjSGEwcmdDY0M0RXNRWGJYRU5yZXl6b2E3VEVWcnRoenhvd0dXY1VBTEUlMkZONnFOUmZaaXdvVW1tZFM1ZGo5M3RiMnRkOXcwVUVTTkNoVXFCUXhhVTZzV3RqRlFSNjcxTDRRZ0QlMkZGemRlMjZYbCUyRmdEZDRpNVNYa3VVNVh3ciUyQjAlMkY0T2J1RnJWYXIzY1k2WDMlMkJhaG9MN1NOeGQ2bVptdE1tMzNQbUZNaXZwMFl5eXZFN2t2a3h0ekJ2QVBJYm1jOVFyeGdmaUZTck8lMkZwSkNTRjlkWWs0TjdwaE1NcmsyJTJGQU9ZcU5UYnJaMEJCVTBKdEZpUW0wM2NLMHBvYnEyeW0lMkJXM3FZYnZBYVdaQ1o4TU1KaWJpMCUyQnNSUnUlMkZXMWZZMGJEbE1TVE1SY3phUGp5UHpCcU9jU2p6REtuTzFQVzJlSzZlTE9oTVpRVzJoWVp2NVJCckpTWWx0ZUZzQTlsNkslMkJRMFRMSWV4MmpieDl6d1JBWW1Ic1g3cTFQaHkzS1ZaTmhLb0tFcmVoZ21WcTJneG9TdHlZUkFXcEVtbUlNdUFIQm9adHBaa2VEaFR2cklnN1FYOGklMkZaUGoxM1NHV1FDMXpIMGJuOWI2TDlNJTJGRkx6MndaNDBjRDBlUUtKMHElMkJwS2pGc3ZncXl2eDJNa2E3NVZ0TnptZ3pMdUdrUkRFJTJGWmtLJTJCeGhQRDZCQUNZMWEzTzhlV2RjNkdvODdMT1BqZG9GNndlV1dGRXBqeFNHbXdNOEZaQ09XVmxaYUhHOW04aDVNbElFTXJDb3VDTEhPVVhpNzNVJTJGTWZMeWFMZlA4V1h4ajVNYzE2N3J4cjR6RG9WciUyRmRsNHBsRHFnQjVNNU12QUdYYm9WcUNJbE1CaXJKR0U1Rm5FcVE4cjRGc3QwJTJCUmxKRDBwdkZMT3J2Y3FZUHJRM1lpQVBobTVxNEZVSmtxU21pV0x1VUlZNHY2ZGdhY1ZCbnQzUHZuUWhEVUpiWWJCNyUyQiUyRnp4NWo1bVMzdzZjYUlzNFB2WWRyRTdvV3R0SG1oM3pjZ09abERzN3k2VDRhMyUyRnBVJTJCT3VsTHpaQW41TWtGZXI5eTUzc05ieTRYMlZTZXA3a25xdmhWOU5Jd2c5N2YwbjluWW55bGVIWHo3JTJGYWxKcCUyQmJOJTJCJTJCdm5haXRPZklXbWJUd2pEMSUyQk1ycXF6R29USmNSQ0NxZGg1QjZIZVBrb2dFM3RwZ0NpNmRCeTFaM2FGQyUyQkF2VE8wMDU3aTk5Q0RIcTd1MzhSdVExZHJ0ODVONXh4NkI5NEpBRyUyRjVONTIzaHc3c2RPV2hoOUxwTFNpYlhhJTJGNDFMeWxHdEdoVDNXN3ZXUm1oZlJCcGhrTGw0QWJYNHBRUWJTc2luNmpXcmxSTEVzQ3pOYnVLUEJEMllKaEs1bFFQYmZUUVpBMkdlOUkzUElJa2dzbENPT3ZUJTJGaTI5T21rWnpTSWUwQUN2NjlJY2ZYTzI4JTJGd1h0UE5QaHlJWmd6bnhLOXMzMVRTUXNoUDBjT2VMdUQ3cUFlaWpOajBHeUI5TzBPTWYlMkJLTFByOUxpc2olMkZjeEhLSG5mRE5EMzJ2bTFVdlNTVmJCazFWOVc0eWFmSXF4ZVVtYkVYNmNlaGh1b3lEU2oyTHpnR2VCOTFoUTY5QUR5eTNDTUd3dTB1aENsZmhPZ0J5R2ZsbXphV1pmR0Z2bCUyQkd4cWdqZFRHbSUyRjA2NXpTWEFMOGtKZXNxcVdXTTNUTEFMeXdoUGhUVXk5JTJCTWVEVlpOajRaUyUyRk5VenhmM05udmZraDFNQ0lwZkFSZnZBblUlMkZFWE8xMnp5NWxJSzVKUHBES0NwcnhnV01TcU5KRGElMkJpUiUyQkVMcjNoeXQ0VzlxZGlHU2JlbDZkT0xldU8zWFFGbzZXWkkxeTNiMlVVZjNTU2prUXBkeXRxYVB5RnROalZzRVZ1VnN1eTJKSG5hdXRmc0NKNyUyQlAyeVkyMjNMRSUyQk55OHVteG1xcHZxVmNDVUE5aWJ3Q3g0MHF0bGVmeHRkT256UTA0ZmZzd3N4am1EMVo5MzlFNWVlaHNjJTJGc3RRakJmdEZKWVlMTHlrc0hpaDBMNmpTMGw4c1lUZU50JTJGWDRGUmdsS254Z00yZ0hCMmtsSVNrZVlyQnBUcDM2N3d2alQ3N3RKQ053YVREbHRhUFpTclVyREE5cHlOSDNMY1BsUWQ4S3VVVklpVFhZZEVEdWg4Qm9ublhDVGhyNGJrOE41R3pqQW9lNUdlY3M4WFNZbWNQZzBrY2hRck1lR3ZMVFBsNjR0WSUyRk5lejlWanlKTFRrNVBpbnNuWVpHbyUyRlZodHRTU05jOFNRNTBZVjVMeFkxdmZMOG1ZRFQ5S1ozbEs0SEZ6SXAyNDE0VGtnSmhlMnVKOW1ObTNHVklTaUZ0T3NaT0ZkQTJDaWdyVHFQSUZoMEs0cWRoNzV0U0hkY3I4MmslMkJ4U05GNSUyRlAxJTJGUiUyRjFtdnZxVjhwUzEwMlFuSDRGTkFIZ29XY29JazY4cUclMkJReFJnMnhhZWRhdGE0ZHglMkY4Y2dtSTFqSzFvUElnMG1iU2NqNkZ6NlRVJTJGTG14VWJIOU12UEZkeDNWMU1WcmpJZ3pkQTd1cjBoNyUyRjVNWiUyRnJKNUI4Y3VFeDlzTGF4bUZBYkdDJTJCc3ExdTBxc0JUSiUyRjZZZGJldUFnMXhTbHZVZWt1VXJpSmpyV0RSUGZacDd4eTV2bUlIeWhFVHZRMUtNNm14YWx5eWpFUFhnRzZocU9PS1JBMEFMaUpWcHolMkJDcHBuaWZQUmtnMEtvMUNOc2UlMkZad0JJNldiSlZPamhMOFMlMkJNVW5id1RVc21scVlPUSUyRmljcmFCaGFlS2FGSnNjSmRBaW1MM0p2eUI0dVJhQ1RoWHNSc1d6bktQQkNROXViYjBjdGtzaDI2Z1ZMU1dseE9NVU8yYnBoSlRiZkR3bHFSJTJGTExDdHAzU1pmYXhqSjdmdkR2dXlLQllmNTcwWElIazBuTmhub2tNOGxzQlZVdnFncjMzUlE4SDVycDVrNG9na0hHNWxDWkdzVjUwOEUxNXoyOUs5RlQ4bTF2eksyYzIySzViUGlBSTBwdnpwOTgybnRpJTJGb2RYM2ExRXlkVUlRYmRGSFlaR0hlMm9ZM3VXamRmMG9QUVh3RWoxb0cyUG9mTlNtZVU0JTJCSjFpOXRwQzBLcGFNSEZjejd0V1I5VnRxOHRPaSUyRmNrYkVYSWRpSFBINU5QcnNXMUJ5WGtFd2E0Q0UwRmhXV2klMkZYazhYJTJCcmlkYXJicUJtZ1JZOUxSJTJCQlpUWDZqS0RZTkRpJTJGM3lBNHA5eTRXJTJGV21TV0pTeGVNNkRUJTJGJTJCaFg3bzhVeVMxVTZiNTRkNmZjeHhpcW1aRG1KRHptZGFzdWRNd0F5JTJCMG4lMkZLY0E1dW1JbFdvUmdXUDVDSXZUODQ1MHBXVWNJQjFSaHlGc1RaOXZOczdvSnluRVk1UFdNTWI4VHhkYzEwQ2tLMFhYNnphMUVSUkpJJTJCdjYyVG1ZQnVXZEFHVUNQTWFIRWpGNmZTVW4lMkZMRyUyQm1ybk5RREkxVFR4Um5INlZ6STB0UUR6RnZIajVCYmtOaUozQ09JR1pJa0xMazJSSEFQZ1NkMXZtJTJGVkk2NWVMUFN6M1VLQ2cyUEpQSUhsT0NiZTB4aUpZUzRMemdMZnQ4UmZRd2pJVmZLcnlDUVNSUlElMkJKbUo2OGV5SWNwSjlaTllXUldTWGN0ZkhWOVllQSUyQkJLSHQ2ME95c3ZvaDRaJTJCTTRuTk1JalVNQXkySDh1TVNnODM4QTZLa0pJd1g4V1ZOOEZ4YSUyRk16T24lMkZBTmZra1hvam1ON1JCZlpBdEZLeWtPc1MwdTUlMkJ3QkV2NVViekI4USUyRkMxbDBHbW1TMnBQaTRibXZqVDZTa0dqT0NMSUdhMkxXNTdnSkVaM21qZWJ4bllna0l6dDclMkJ5eklqOGtFMW1QZGxHUVJudE1iZHo3MmRxNzR2TUtQY2lEbWdaYTVoNkk1WEpTZVFIbjEyNmcxJTJCZXRxalo4bHBybE9NRjZiT0pOZzhvV1BqNWFKJTJGQzF1MzJVJTJCJTJCJTJGY1MlMkZTekhwUjhmbEdvJTJCdkc1Q0JzenI5UUJSWEhWSU9odjI3NGFsY05qWWdnd29YJTJCRGFjZ3MwQnhyTzQ2aFJoWHBwVDdTUFlKdzlqWE9lZWdPYXlObjRIcTVuVCUyRk12TXREcndGQk0zUFF2Zzd2cUhndmYwaUZiTTNWODRyZUJoa1VTaXdYeW5HOHBBbWJ5cDJhYnc5ako2M01XZWgzdmxFM0pzbThKYUhZaUUlMkJqRGluTWRTNFhEV1JyMU92Z3lLSTl3RmlVbnBIOVBWUkhMbHUlMkI4aU9rYlpTcHd4OVZ6U0JQUUhUT0c1NnlHbE8lMkZ0R1NrNlRSYSUyRkJnNzR2ZXl3QzRreFZZdW4zS1MlMkJsU1lvTiUyRno1VVlja2xmOHF1U3lTSmRnVUFUQlpqNnlvbXJ6b1ZGUW11SFJaNDdDdFBKSExsN1VVOGglMkJxdWhaMjRBJTJGbnVSOHklMkI5S04yaEwyVUN3Vk8wJTJCbHJCSXBIJTJGOGw5Mko3czdEZ3U3QkFxZDJLVnhQVEFuak5rUXozTzhwMlhFbTlnWlVDUm13bjBNbzBPYmhiOWsxWE9VTDAxUURkYjAxZDhvNyUyQm5qRDY2JTJCUkhvdmhFbyUyQlFhMllnd2tjYlBsM3lpS0NFU0pya0hyWU9KcFhNVDZuVk1ROVB1NHBRZVlpTWVhNVdDMUgwUm9reFNGWEZlaGNNVEZaYXJxbVpYRGhNbjVXQWdlNHFVaFdZalN3cyUyQkNQSlJTT0Jja2N3SVBDY2RMdTFSclhqd1IyYkduTHlVbGQ2REVFSGVVJTJCTTJqSW4xJTJCMEJiSG1lOGdhcXZBMWpzaFVzczJXMSUyQm9xd2Z2dnNlc21MeGFhZGlEWExLUnJUQktvaWxKTiUyRnlzVlhDSEhKQk45WDNsVW9IUW0wU2ZWTHFqMmNsZ1RwU1E2V1NQYWNTRjgzMno5c09wREpua0hTdTFZQWJkYjE2VlElMkZnWEU4UmVIcGdaQzkxWTRYRm5jUlBaUmNhZmtxUk1VYjNERmlCTG5jYnRLeUlMQWVWOTYwUWVUOWVCb3l4U3ElMkZ4MlV4UmVVRGJrNnRoR3ZINUVYc0M5QWQ2Z0NaNDNBaFolMkJIWmx6akl6TFJ5cGRTN2Jvdm9QNUluTmM3cTBURW9NTUlwY2p5S0R0cFJRajdzTnQzYWZhWmVYdDdVdTVIbEVEQVl0NGVIaUdHelV4JTJGYUU3bjNnY1MxV0VwWmZab1FmeE9yaE5ta0ZEb3JKc2hRVEpsWWhoaEdVNW9rbzVCdGhBRm5OTGhEdWN3YmZnS1RLaTQxQ0d6QkFqOGRPOXI4bXFwWDdjakp2JTJGekFvMUJsRnlkMUdCeDZQSzV2ekNDekM4VWE2Zk1MUkJGUEJPYSUyQnZ4bHVRY1BnTUtsNGhvNTEwUkk3Qmd3Vkd0ZVNzOTdPenBMbnlWVnUlMkJqN0RXcTZTcGttNmFyNklpakR2Z0duOUlmd3dFS1lYQmdMS0xxS0NpcEpzQzN2YWNmZ3lkRW42dyUyQiUyRlBNS2tYZ0lGOGVQSjMyNiUyQjFHZUlDWURBTmU0JTJGMnFrRnpsRUJWdiUyQmdNYnVzQkNCYU5EVjV0S1gzU0pBNVVLdG9YWVdLdzY4MnJTM0MxeUU4d3VWdFpoYlhkbUNyRmN5d1cyZGJuRXFhJTJGQ2xVWTFrVEtJZ1dLeVliT1ZyYmZHNGFGckYlMkZEdm54d0R3TWtDV2lESnIzTk15WkNUNWdva2N0eFJpUkprdG9zTTEzS0taVENHQlNLSGlQSXMyV29CTG1nd0RMRTc4TSUyQjBRMXBUMiUyQjJuUEtFRmlsUjVtbEl4OFV5TmRQUzJMeE5BJTJCMHBmbzhqbUpaNWUzakNZams3UEpQUnJweElyaDVGT0dHVCUyQmtiQTVXOERoY3hlTDglMkJnUGp5SHkyV0IzUDQ2QnZHSE45aWdFcHJCZnB1aUFtMjV6Y0RsRXJvbnpva1Z1R1VNJTJCM0YxTHAzUWRtd1d3M09MNkwlMkY2MUNsT3FyMyUyQnlsd2pKU3I0VyUyQjludVJsJTJCdHhTbHBuZnpOJTJGbktBbGczdk54WWJacCUyRjlkNVIlMkJHZnB4UXVOdUtzJTJGNmo2V2dkVSUyRmU1Y2k1ciUyQnM1TGd6d1pxbyUyQmNYdlFmNzMxZlNtZlIlMkI1OVF6MU1hQyUyRmo0THdHQ2YlMkZrckdPcnJINXA1cnNkaGpXY3ZRRHlYJTJGOTlsS0d1aGlZS2xTWjVhJTJGcjNUeEliMHU4NFliNWU2ZkkxSFRuNE82dVFUS2tOOXRqMDFaSFV5Z3EwZUNkSHQ0R3hJWXkyTm9kVGZuaE1uTndHWndlQ3FMWTdMdFQxc085M1dGQ21SbkxWeVNUM1M5Mm5OczVVRWt2ZG5XQzZ3JTJGd1lzN1VyU1VMZmIyTWV3SiUyRk0yN3UySk1id0lmemlUckt4cDNBRVIlMkZQc04lMkZuWVhlMUZuMmQlMkI5ZXIwN3VHQUg3NUs5ZkN3SVFGciUyRmM1bVhVdmpTOG1VTEE3Y0JRT1l3cFBOeXlyWWk1V1hFVmVVVjd2JTJCYkFJNXZPJTJCZW5LMjBWTDVFblBONmdJUGZzV3l2UFhIalAwSWU3eXJMclFxRWFqdTZWY3FiZGZxWVc5Z0R6SEprNHNwSUdoYjJ5MU1DT3JKYSUyQnBCVmtOMFEzJTJCMHNaSjhWZyUyRmMyQzNYRmdZaHVaTGttQzlVWTVrd0VJSTd4SmxtZnBkR3JrWENveXpQJTJGTVFuVmo4elJNcE91UGs0RFZUWUg2bTVKU0dMM0EyWGNobWdMaFpkZmVYQWs4NmxzOEpnNXYyZk5HR1BtbzNhTGo4Tk1ENGtIQkFHJTJGNzFRU01DajhxcU9KejBMN2FMMEVmZTVBVXJEY2lTV0xIV3JkY2t0MFRqSSUyRkZYVDFuVzd6YVRhVXBsNmtybyUyRm9KVE5KanhCUklnMzd6ZW5CVnBvWGppd3clMkJkUXV1N0VXb0NrbTFvdUxRakFmY3c2JTJGSFNsJTJGJTJCd0Z2bGRjS094S1JkVEc5VXFDdnR6Nk9zb0ZFREk4dld5R2olMkJ6QzJtMFNJdXNXdEg3dldyWlZhWmYlMkJvM3llSlFXVEhIb2JTc1diVGlvZzVKVjd6QkpDUVlEMFhYckZKR3VNTiUyRmdtcSUyQmNvUHdyQWcwUFRIYm9QME12bEptaUNUMVprUmczMjZkeDJWZjJHeDNFZTlUR3dVcWJ3ZXZjdzlUV3JoVThVUTNpN3UyZlJUYzR1JTJGa1hPRkVBYXVHY3NvM3NvbDhzcjhYMGpzdTdyciUyQiUyRjVHcWpFc1NXaFB0VVNlQkJoazJFRDVnZEp2QnVjeHRKdldrM0FKWlBibVhlbmZqbkc4NkpLYzFTYjZtN2JQZkhlJTJGUVZxZThzeFdqdjNuNVhPa3clMkJMY0xNNVhJYXJ1Vzg3MzdOYTlvNDglMkZTJTJGciUyRnQyc2szaTZJSXg5ejZpJTJCNWJkYjFBaUJHeGkzYWowYzhrYUZOS1NyTDhESEFMJTJCR3Z6NTNTNzc5am9mbWwlMkJHOERsRTZtYTFMd2ZUQTAxQXZhSUxaUVA5JTJCbTB3ZG1VamtPSnRhNkJhdFFIJTJCZ084NW5UeEVMMUt3dEc0Q3NIYnlhTyUyRnUlMkZpZmR4WUE4a1VzNkhvSHp2SWNuUG12TldPd3pKUTI2QjhFSmNpekpITmRlV0ZiZm5Za2ZLWHE5aVV5b2d4NFRsREZvY210NTEyeHk1OHY4JTJGJTJGUDZCUVQ4RWFrSzdPdHZrU0ZJdWpmSWxXJTJGYjdRdWRXQm1Ub0U4RUVKd0lyeEkzb2xYclpBaXNFUGFQZVAxMDJLWVF6cnhMdEs5Yno5aWkwOGZoQkF0ZVRhYmYlMkZ4WUVDQTlTM3B3SGQ0a3EyUWRkN3F6NHRDOEhEejVFQ1lVSGw0dlRQRWNLN3ozTlZIZnhUZSUyQjQ0TWNvbVp1VWZMMHMzWmt2NGh4OFoydjg4JTJGVU1jRjglMkJUYXU1anFLUyUyQlUzd1I3emd4WVBlR1V1aVZNRFdoTHJ5R3VlREZVVSUyQk5qbFJUUFBWeGhjQXh2N0w5eEd2Q0J6S2I4cmI4JTJGYmh6NW1FUXAyRnBYT0o3TFZwWWNuZDhWVkFiSlpuUWYwdTBFb28xdllKaVFqVXN0SThlZXJOZEx6OWNLR3pXTFRLOUd6cVlLSU15TzBvYjRlbVZWOFFHYU0lMkJWRUJQRHZaWjF3TUltckwxb0JHWVFmY3Zsa0FuNXF4YVYxNlFTRnJBVXpKakZWcHZqYksyb1J4MGZWbEFuZll6VTVkbk9Oc3ZwJTJCZ2tjWGclMkZqT1h6cGtSNGtNN3JSY1BIbjk0aWFEQkZRV0pxVCUyQjdpYXVUdVdCSjlrNnBnQkR0UzRoaFlCRGpIaiUyRlZZUnExSHE0TDhzV3ZKQVRtUGwzWHNOYmdTdjQlMkZtaTglMkZtViUyQk9wNzZPQWpLaEVjTGEzZnJTYVlGV2hDZ2N2eEdOM1IyREx4T0xWS3RWWWhRUzcyNnJBeUNYa3E4eEQwSXBrc0o2OHdZT2V5JTJGTG1yQ0JoSWclMkIwVmZHWCUyQmJaQ3pMJTJGWXhMdWhwbnB4c2diS2pzOXZXdEZPZFFleWNGclk2azN1Vzl1UiUyRmxyZlAlMkZLZ3M1VUhQJTJGS3EzaXYwcmJBQlNCWUFIdktpYVFXSjMzQ0FSJTJGTWFKazhhcldtNnBXdlhtV0hVU2hvU2dNVlY1eXdsMEdkbWFVTnJPSDZ2bHpudSUyRldzZ0RjTGxvaXE4S1hmakU0TXdiU3V3RCUyQjNjVzhxQlRPOSUyQnhKbiUyRm9OVWRXc1dyblNrc0dOZkdUS1JoTm9VcXNMUXIxeWR0TFF3N0RCT0tVS0ZYUWRqOTMydFVNaDBJaUJmcGpnUzVaNzVRaHJWUlIwM3RJQUpTb0NvVVFzZ3pnZFR6Nm5DRno1c3RUNiUyQkJ3YXJiM3Y5MWJmclRWaVJGSVFTZzV4WEprdUZZNDZyeFZXM2lTakI3eGpDJTJGalNFaTk4Z3REUHBKSVBuN1dtMExxcVY3bGFxQ2t1R2hiR2QlMkZhUSUyRk5wZTlyd2clMkJ0d2hXRHNLbXpNQVQlMkJ0d1U4MlF1MVZQYlZhOEtlakNhVzNvTFNFakRIbDg3NSUyQm84MU8lMkJvTkRzOExuS2c1UmxaUFNHeGxNVDdJNlRHNEJrMnBrMCUyRm42OXFYTWx4NXhQMUJXRHFENU5BTm9tTkNqbENiZm1heCUyQkE4JTJCdk1GMG1MaG10ZjRPJTJCN2FDTU9jVkJ3SW1YMFdRZFZhcGZlJTJCZHVMTFkyMlk4TWxNRXEyeHA1YlVnakRZUXU0Z1hxOFQxUHpFZGtKcUNyR292NzhjQXVrOTd5dVdJcmp6OXUxRzVFY3dxVWxWSnFDVTZabTF5T2V0NmVXOHVnVkp6RFlQRllGQXNvVFJhbHROcmJid3d0bUk2QnVUM2dOUEV1OWdObENuOVkyWCUyRmVTJTJCZ2gyUXlqMVpFNTRDWVlDdDFiaGJ1TEF4WnpHT3gycUVxdGpBR2NaaGMzVmJKTGslMkZsdlM4UjA5QWNrJTJGeXVud09JOGdIdDhVemRwYU5yTjB4WW11MjdxYiUyQlk1N1REbXRzRSUyRjVLVWxwMjVhRHdwQWVhUHFNYmNWeWxJQiUyRmpIdW85eDZ0M2xRM2hwU2NhbzlHMDNZajBWJTJCRTlBNFM0dHFPYzBGNmc3V1cyVUZicmd6elg1cEVBbjNNJTJCUDk0Ym9aVUthYnJHbUU4eFc2eG5QSmI3VjByc05Gdjllck9aeXhieklZNSUyQkg5M1BYU2w0cG5QaXM3VWhnUGhhSkU5SGlrRXhNdkxzRDcyTDVJS2tnVjNsaTBaME1VNWY2aHA1S2p1Q1pvc2dSN0NOc0d1Rk5jODhGa1RXZndGSUZ4SUpXODNGclBUczdiJTJGQzAxOTBYeUFiTHZRSmhaa0QwZ1Z6WFFuUTJPZW1ydUEyOGNmJTJGdWhOWUJ0UU5QMG1ESnQzc0YzcjlJZnpQeVRhNkExS1dSWTclMkI2WnFCYzc1MnRDM1FJOURFMHZiT3pkRmRENW93cmhxQkhiMEJTSGlheGglMkZyN04lMkIxJTJGZFJ6Y1hOZTgySnY3SnpCd0clMkIlMkJaakFyUWc4V1VuNjZOMjglMkJ1VmUlMkI4MXZJc3NQJTJCa2FuaGVDbmFPeFFONnBBdkJXVDF3VTJPOEhDVGVHeSUyRkM1TU10VktnT1RiSDh4N3BKemI5QkhRNVlHayUyQmo0RlpYcDQ3VEVhakVaUmNhY3N3VkpNJTJGV1dPJTJCTFZGZ2QzTzczY240WGZ6WCUyQllXWGlpME1ScyUyRkROMkFsViUyQnBtUm1JMG9vM3BKQlpwUWpxdFY4TkJBdHFONkppcEsxT2Rhb1JuNGlwUDBaWkIlMkJ0RU8wJTJGYWlJYWo4emE2TE00ZXR1NUVCTmpXZjJtNFJUJTJGWGRMaHRwcVFKc1B4cmh5Y2RydnhNb3o5Zlo0OGt4d3RyTmNkb3hzWWgzQ3YxQiUyQk81bkhnMTRjNE13JTJCMnl3ZTMyY1NaeEtCWE1peVVsa3I2S1BQYiUyRndkV3k1dW1PaSUyQjhUR2Z5Nk9XVFJWbkEzZnhLdm1GJTJGejQ3Q2dGMXUwRDFYZlJ4ZCUyRmtUV3VOaExuQ2ZRSDF5ZU9iSEdOek51cmJnNlJySHBmTUxuTkpBbXNiJTJCUFZQOXp3N1V6ckd3Zm1UOU8wcVZVTzNycThMd1ByN0FnWEJQVjRwWWowWHlDRnRQR2hyM0lkV2slMkZCeW5EY1UlMkZRdDJwMHNvZTN1cHklMkJ0VFRzTjhMTnhFR095OEtnWWw3SG4wT3hYbkhVSm90WkFFRGJSR1A0djBjd29zSW1BcUpCWGs3dTZKdUh1aTBma0F6bWVKZUZXcmZVRTlGQ0xnRCUyRjk0Y2c5Sk9iSyUyQjdveERZT3JDYm9YaEtnSVh6NWhmVkQ4OHNRdERyT1lUN1NQY3E3dHlPVmdPUmRaNVlBWXU2OFlLZjRYJTJGb3lEamIzTzElMkZ2Q1JDRFByUHNrcU56bFJlakc4WDNKNlBiNFBIcXhFb2VEbWJ4ZlZBdWNZUUhwU3hyekNKTGslMkJqMDcydER0R0NvR0JTM0tNeWZMYjBoSG02SGxHelNpRGozREpOOWFzWWViaDFYN0dmWDVncEVxdTFLbUU4JTJGbnloZzhuWWkzdG5WZmolMkY5TmR2UkdYZkM0UWFXUzk1U01MYmJaMzBJT0w5czFwJTJGeFU1Uzg5NmsxbiUyQkFIS1hqQ05rcG85eW9oWEZqQ0J1aGNZOCUyRjFGODIzeHdNdHRlZkxVdjRSNyUyQlIwNHhuVlRaamRrcnJaMWVJQyUyRmgyVk9sJTJGSktYNk5NR21UWHVzTm9hYVBxSnVPU1ZlUGRPblh1ciUyQkREQTk3dWJPTjRIR2kwakhpclVWMUhPNU43M29id1Nqa2plTHdVS01GRVo3NWFBeCUyQjBTbWloTzVQOGV2bVFSOXk5ZnZoZkFWdTEyS1ZkMyUyRnd3dzNLOXpROXIlMkZ1aTR2TzUxQ3pNQWlONExQZDlzYSUyRjNQWERLbTJwayUyRkZRMXJaY3lScnBwQXBJV2pDM3Z4enZQVGVMSkh1VFlHQWE2N2cwdFlVVWNQMzdjY3VPbTZvTTQ2elN3TzBQRTlKTzVUck5LdTdBVXNVVGRxU01lZ0pCa0dUeUg2eUVMSWdvbUdReGRrdUtwRDFRSmFYQk5vS0RQZXQ4S1VHZkVlZHNveUFqJTJCVm45a3JHOXFwMnljckFKSDhITkVKMFhHOXklMkJoRUh0YzVtZEp6cU9xblFqajUlMkYyZGcxdFY0STd4cEUzV0FaZzF4TkpBUURnckdpbSUyQnExYk01Mm12SFNDdEJsZVIlMkZZJTJGdG1RdFQ1SXFvNnlYVGtDZDQwb2o5WjFucXNvdzFTSU8lMkZFUnJuUEtBRGNLNWp5MHo5RGwlMkZPYkxBTkxIdFZlUnUzWTFGJTJGUVdMOVNZdmFzMyUyRiUyQlFSWFFGNHQxVSUyQjBlRUgyUnMzOHdMMjQlMkYxckx0Z1hpZWFWVVViJTJCcDRjZjVudHRaT0IlMkJyZTlTRzN2JTJGa0pqUUVNcFA4eTZhWTVBVHBIREp5UTVNNmJjVWQ4UnpqelRCQ0slMkY5cHJEd1R0QmJWMlhESzREYnEyd1U0QzZqZVlPcTQxUEtnM3pEdjYwdW9kWkJuJTJCTXBNYkdPJTJCOVpqSjJmeW1lSkpyRDczTiUyQkdJT1pRMXp0czhycjdJZXRSZUYlMkI4dkw4YVpIT2JGNlA2MTNHbkREYVZHTWRrQzEyT1BwUiUyQkdUT1AlMkY3R1lVV0tLbXZHVEtPOHY2cnJ4dmI4RU03WiUyRjl1RkVvbUU0ZlJ3WGJmNVglMkY3dHBDMmxxbG5hcjk5NiUyQnhIQjZwcnVhSjEyQWRhQTBQTSUyRmVyZ2FYOFAxeE5sUTI5bXolMkIyUHRxQVBxcjF3SDdaMzNRJTJCcXJVcDcwODdHeWUwUVZ0OWlaJTJGY3lWVjR0VHYzaVNZR0wyeWpyd2t6S2tkdGpjSWl3OEg2YjkyVzYwRG1idmR5Z3lONDBOaSUyQkQ2MzlZUXlFUEx1YjBXJTJCbTVOJTJGTzVzeFklMkJIVVVSaXNHYjNucWRXd0NGdzBSJTJCZVJ3USUyQlZtaEFtN3BjekJPNUoyRGtVVHh4bVFIdXliUjZvYXFyUFd3YTlMZ094UzBqSUhRRnd4N1ZvWEJsMzJpcUxxTzNOZDRDR3R5OHlyUjJGR0xSZUw3QXdpSnNVU3gxMERISUhjMGNlY1l1Y2hlVlpyMkRodXdnSHA1MTh0djFJM2JRS20lMkZUUXRKZ0I3dU45JTJGSE1tZlIxTDlpRVZJUUNGOE1HJTJCM05Wb0VTNU9SSUo3NlRGREtQJTJGOWlkOWQlMkZjRWhON1BSWkolMkJIcHElMkJmNzlhMHJsNUF0SWhZVSUyRmVmRSUyQk5UR2tCN05CZ2xFbWxnZjklMkJjYiUyRkk3SzQlMkZJJTJGeDJIaFdMNjFnJTJCWVRhRU40TnkwMmp2SElNNEclMkZLJTJGR0dnZSUyRlYlMkJ4UUxMTmplJTJGJTJGSU9NMVM1eSUyRjJzbiUyRm8zcFdhdWxTMmVZUERrVGtuZ3MxRGU1alFIVEZyYnhGMCUyQjlqVHhvUWpVN1VzaElTMlM2VWM1YlZ4UGwlMkZ1eHpUdW5WZ0drTEFpdlQyQjBabE5vcEoxdUdidlMxVmM0N1I0aGFtY3Z3S1hUeHZGazl0aiUyRklqWTI4bnREcmttRjVWeWRocG1oRFhSbiUyRjJVbWxxZDU4V0hkS1d2RjlsVFpxU29hMlAwYzZRek56YmdQb1UlMkYxNEVzanl6dTRkMDlBMTRVenJ0aDBYRlRzaG8lMkYlMkY2OURiZVhtVnJCVnFmSkxLQlpVdlEyNFFkTG1RZnBVM0REaDZHdEg5RlNrWlhYYXEyTGF0bGxTNUpBRXc0cDQ2SGhpZHhZRmdyUWdKZEhJU3FwbE5vV3BpbWlwYnNwRTFDdkNyQUpyNWZSUWk3WU9vWEx1MlY5SUxHOXRXazBwdnAwVGRhWWlqck9tdjVmRnFDQ0pZSUw2RGVBMmdRZkJxWjdaejdEY2N6Q21ZZ1UlMkJpWjRNTG9nZ3FTZlpuUm04czNOMDNIZHhtOWtROSUyRjZXJTJCNndIeFglMkZQemJMJTJGekJQSzVYMG14cjI2VTdMTTdQSksybjJraTFiaWlHZ2U1Z2lrVnZPcGp3TDhJeHBLNzhvaEpwUlJRVGo0SDFCcCUyRiUyQnI2ajBNSW4zeVROdUhKUlRlcERxY1FHJTJCc3NUdmpJVnVWZ29RVlNUNkZEaEYxSFZLTXZKU1ViSHF1Qk1iZzV4JTJCOWZ2bXVvVzdhTCUyRkMwUDZqU05DTU13eWxBZ2VLQTdMWVVRZ3lXNzZzJTJCcjhJQWIzUWxTNjcxRTlNc2xGaUw4dW51RlYlMkI1JTJCVUF6Ukg3MSUyQmVwSEZwZ0hkMyUyQjlIMkRKNkxIWWJKOEEyd1lQMExSN1JremNhT2hmM3VtM0FnMzg0ZVUlMkIyc1RlOGx4Rk5QVXVLM2VpWlhmZ3NZbjZvWFRLVlZpVjRoRVpJSEFFSVVyZHMxdWtub0RwWWNncVE5ODklMkZ5Q3U2SDNTZjZySWlTY2Jlb3BjaVNUTG1yTnBmdnJZUmIwNjAwVm43aiUyRloxZnAlMkIzTWElMkZOTXRCZzRFV21BMEVzWXFKT1RhVnZoJTJCblR0U0JsM2lndnphWjk5MmxKd0luM3lhcVNhOUdtOVVCamprZjdEYVNvc3hxQTVWa0F1QWNTS2tlcElsbCUyRmIlMkJ6ZDV0WlpqNDNXU2JUTllSRFZmeGI5Vmh3eHkyTGZ3MzZObiUyRlRTU3RGQXolMkI4WkFDYjNYbGRiT3ZYQXMlMkZYZkpXMjgya2JwT0dDZWZ5cnBxOGpSVDNOTWluM1NwVm1HVDhzNmMyWkxwbSUyRm1STm03JTJGJTJGSHZJanZ0T296TUtaJTJCdWtsdGJSRVh2T2wxSGpOeGk3bDhrMG5zeG1WZEE1WXlvT1VXNWIxNkQ2NVVuNzl6Z1B4MWpVSDAyNFhENTZadGhyVHowJTJGZ1dSYmxJdEtWaGthcjNtbFBGVThHWDNCN2dGaDRxWnZGQmdrOHJDb1VZaWtCbVhwZjlEJTJGYXdtMXdsVEJtVUZvNHM1VnNUMExTaWdKeG1uTiUyQjU0Y1FHdEc5MWlkVm9BbmFxclNxVkFYTjV6SWolMkJKcDdHMGxZa1RWY2VLd1JJQWJEUncySnBFb01KR2VyUUVwJTJGQmtaWDRnWTlFejclMkYwaUJXd21MNTFWM1FoY1BzWDVTb0ZmN01xcWRmZms4TVEzTU01WHZ0WVNPWlVCQ3NBRnM3VllhbFV0RnNmS2VkTThNSFAlMkJkQ0h4bzdvJTJGNlBlbkZCNE1qUGlOUUlQUHlqUThqUE9TUDNIN3RXakdKY3Jwczd2WW4lMkI5bzVRJTJGdFNQVmVUVTVOOGdvNmhlZXNMWjl1YXIxJTJGMGUzMVEyQ3ZyOXdyYm10UUZ4SXIzRCUyQkJVRDZVNXdZMkFoeWJYWjJQdVpDWndLdHJrYTRwa1pidHNoa2Z3RE9JTlo5N3JMc05UYU9ybkYlMkJVYVBKSExxbHlTRXk4UmR6MDhvbHZKOHBqZk54UzFRRkN6SWxSTmI5Q3JEdnljMk5nS1olMkZtdGRRVWVKZXBIREwzam1ySFVGNjIlMkI5TGVDVG9tTmZIOHNyemFYSnFlaTRuVXB5T0slMkZ3TWlvbm56ejRyUlczemtmUU0xYkNNT3FudjRiaU1QTzlGcHpsbVVFTzlJNnRyZ3M4eGJ5ZSUyRk9wYXpBMWZWRzJMZDhqRjlNaHZ4cjJiSHJhZHMyT091QTI2QU1qdjhnV0xuZGJqV2o3b1lGNEdFcG5CUzElMkJGbVVRTCUyRjQlMkJ1ZlFlYTdsTkszJTJGZmFLZms3QjN5bDRDVXAyQmxXemJuUmN2a25qQU92SFB2cnNadzltR0VzbUh5JTJGJTJCUXExRW1sc2IlMkJuYXhzYTFEOGhSdEE2U3hsOTFHSzNMcEFhajVpbVJYNzNEZ25ubnVuY3psc3UlMkY3OGp6NEhCckV4TzhVR0xtc2k4MEZnZUlRM2JHbkI4UUNpZ0hJRHE1bVNkaGUlMkZ1Y2kyUDAyMVZSSGlYOVB0ZmFiTjhIb2h4a0s0TXNUZlhCMmc4THE5VnpDbjNNNXNlZnA3YUh2dzZLUWI1VWV0blMxenRrSjdwaGYwWmo5UnV6Nm0wSTZUOFl2UEhmN2ZTWEZkYkhCJTJCSEg3dnNqVllIaXNQeWhpUyUyRldJNmhmOVZSVkU3Q1locWR2T2FRelVKJTJGVUtJY3pzVUxZTXpESFUlMkZkbiUyQnVrYXZrUmNHcHIlMkZRNTJCd2VIU0N2U25md2ptc09PZ1FMR1ExRWZIWVlqWEdOQzUxR1BlTm9EeWpOaERMYUZvNWVPNkEwYm1UMW96QVRXVmk0UHRweGRiR25hWmlrdEFyYWwlMkZHUTJPeiUyRnAyaXJWYWd5ZjJBZlpBaW1lY2E2MEtwUDlMYUt1dzFkczBCVks5dVg3Wm4yQU5nZnpibUZVNEZQQ1YzJTJGRzglMkYlMkJOMHpCYWNwOXF5ZiUyRndrc1pOMHFWTHZtRW91b2JqM3JTVzM2dVRxb0sxTjBKUFJnQXFET2dyVmg3eWZ5anZGdnpWZnZodzlnQnRtbkNpckpGU1M5T1klMkJYeXlyJTJGR2tFJTJCWTMyVVBNUCUyRkJLRFVyWnVzdHF5bkZFVDFZNDVGZGZaQ1lTQmZEZUR2MnpuUnJ6NW8lMkZlaVU5SThqVXkxOVNBNXIzU2JUQkVtMXFNaXJyS0dWa251TGNRVVViSnlkeVFaMVolMkJ0Q2tEZkdOMGJia0x6MmxTamJ6TVRUZFA1RjJwNXk3Y3JBZTh6aWEyS0Y2MzVmV1MlMkJOUEdrJTJGbWRYY2tybWZzVGZ6VjVEJTJCNEN6VjgyUGFUdGU4SCUyQnYyazVmNm9XTTFldE43SHNrWU50ekV0ajlEN1E2bzM1dm50Yk11Tmh5ZUpWNiUyRmRIMW5xWmZpQyUyRkRhZm1qdlNUVGNPT0dvVUhwdUgzS2NTT0FZblkyQnRCT2dIS2dLbURteFdlJTJCd05TSEhqZUc3RU1ZS0ZJVlNHQ1dRWERINE1pRlZpaSUyQnNYUSUyRndmVHNXMlgwbEZ4TDglMkJWc1NQdnA5dGRQUUl1a2dkJTJCZ2VtZldLanlnJTJCbVN6Z0d4aElFWGhOQ3lwdHdaZ1V2ckFWMFVFcXViWXA1ZnM3JTJGdGpqJTJGWnAlMkJsaVhLVjJRWklzZ1plbjFVM0ElMkZkSVIlMkJmWWF2ZmRhUzBCWmx2eFNyeiUyQkdJJTJGeXdWOXVSQWNERW42UzRuQnVkd0NhSUlReXRCRCUyRnJ5RFFlOWM3c0Vib0FzRVElMkJHdFRCck1BOVY1QTNOVFRkYlVKZUNPbFF5T2F1QWd3ZHFUNk5odnhxcTM5ZkRtU21jd1VZN1ZCS1djRFYyTnZkMnd2NDBXSXVxbmUwTEdxSU9OYnl0a0Q2eCUyRjlJR0laWTJYcXg2WSUyRjhOMmgyR2x2eHNNMEc3TVd1JTJGTzhzZHpWRGM2M1J5ZUtNYWhvTjBRMnp6U1ZTRjlJQXFaZ05jRnV4QmRhdFNwbyUyRmZMNzlpOTk1VUVQaEs5NjNPRkIxRXozeSUyQkhkTXdjNFUwYVk0NXdlV05oT09pUDR0NCUyRmElMkJkZVg5UEJ0JTJCS3p2dDc1RWFIY3pJajRqYU12TWslMkYlMkJmZHglMkZCWkVxTkRjVDB6JTJGYXgxNUF1TmY5ODVJeCUyQkkzU1U5WkJ5YiUyRlRxOE1WenY4b1loYm9ZTXdjUCUyQjJLaFp1bjJtMSUyQjJ2S2dUYlMzOUhxalBMMU9OajV4cEVOZk5CdlA0aGFtakllRExlV0UlMkJ3TndWUG9BRlk4TVR5elpTVHVYWUNKMEVKUHZsUzVoVVpra1ZCM3p3diUyQjlIY2dNMlZlJTJGWWZicU4zSXlNWFFSS3pVUHVmTlczSSUyQkVrMXQzVSUyRjhhN0padm5mJTJGZGVockt4WWw0bjFtMkg4SCUyRiUyQk56ZmtGN2daRGlJbDNzU3ZNTTRHYUpPdjZvb052N3R1bEpsam9HMzZsbkhzQTAwWUdBS3daZHhiYkozV3d1a1drayUyQmhBM1NiJTJGREtCbktON1JidEhiMnkzdTdQNzZxbUJ2MTU3TkNLZVRlVldMcSUyQlU3WiUyRlAzciUyRiUyQmdVJTJGaDgxd0l2SUpBNmk4blVaM295T04zdCUyRkRvYjNxMmRpNk1FNFlYTkdienhKQ2xCcHNUWTN0NlNMQWFwemthZG9xdnZYV3ZxeGEwcWo0NlVXcyUyQjNOeVJHbmtvZTlvalFOV0xyS0dlQnpQT2hmbDk1ZkN1Q0JZU2glMkJabm15NzdpbUhGdExoNDRydnhkQVNLRWpyVGZQY3MzcGxkUFBuVUZVT1BqSnhHdjM1NGVBeSUyQlJoMmwzbnVRS21wSFVrRTB4Vm1pbkFUMzdtc05abUt6dWpxZGdCUWIxNGJmMW8lMkJHNnNOZk04YWZ0QTZZWlZITjFYJTJGYmtDSjJ1b0NaRGMzZ3poTXlUU3VoOEdaSjhCUTlnS2NPY0FJREZzQlREdDFaOGolMkJwR2ZGQiUyQlVHNyUyRiUyRjloeiUyRk1FbWE5czYzRVowWk5vSGM4YjklMkY5OGxxYjFqMnY4bHExJTJCTmtPUklPNXBUJTJCeFhWVmR1UyUyRmp0OEJyeXJjM0tKWTVXNWw4bkNHNEdCb25RRk1pbkpaWUxsUjQlMkZFUCUyRmVzR2s1ekc2djAlMkZ4a3IlMkJVR202OWIyQnhqWG82JTJGeUpQNERiUiUyRmNDZXpLb21hdEkyYVolMkJhbnpUUCUyQmF5Z0R2bmN0bkk2UWFhQjdIOW80ZjdzS3l3RkppeCUyQiUyRkRkNm41a2YxQyUyRlpzRTJPYm4xdkJ0YW13cmolMkJleXh0eEh5ZjJrdXJNSHhnNjFmblBWVVBucEg5NiUyRmRkR01Pc0lST2RDOWhNRTA4blFtVFB4YUF3WDZHMjVodm52RCUyRkl0TTlUYVZRNWJYN3Y4M0QlMkYlMkJQbiUyRnRlMUFVTG9sWTBHT2RiJTJGdG5haEdLRW5XSlkzRWtSZEZ0VDZmdXl1VCUyQng5Z1lUQ0ltb3VITlMzJTJCcUxIclJQSTFyZlpmYjdOOUg1MTVJRElsdkJ1RTNhOHBIMldUQWslMkZHaGk2ZlhiUjR4aDdCN3JxUDRyTHR6eFVucE9QZTY4MFB1a3ZOd3Y0eXpQb2YlMkJVT09veW1CYld1NmQ2UFd2V0RBUmFKTzdQblZHQ0NISHElMkZobUlacnZBbkQ4RUhHUDVmMHBkWUpiU0hYRThSeHQlMkJ4Q2JET0R5U3pYVDB5SHRaaiUyRnZkYlU5SHJnNE9qMWY4TDhUeHFDd1BGTGNzMDBSVDlRVGpxSzNKUDM3JTJGMzdJJTJGdiUyRjFjVkREajE5aWZGbSUyRjVQNmlyJTJCaGNiJTJCTjlVZSUyRjNnazlDbVh2aiUyQkpPQnlIazNGSzNkNW8yVGZBT1RodUhRRDZTcGp3cDFyeUJhR3BTbHhFZGxPS05uMHk2M2VzY0tZRWthJTJGOVJ6Z2U4dEV4WjZ2Ym13VnpxQ0lLR3pZUnVkJTJCa0VzcGlrYyUyQkxWTG5jZlA4bUprQU82b0FpJTJCOCUyQnUwbWhPSDRHUVB2NkJUVmM0Vm9ISTVJdnpGcHZhd1ZZQ3JYUUpxaiUyRm56eFBBS09SMGdjbmxONmRnUkNzREJ4bFJiWXJ2M1JFZmNsSkdQNTk1S2ZlZkx4QTBYaHRob2dJV2Y2QmQlMkI2OTZCemh6ajAyWFJ5eGZmeUJ2QkVRcUM5eU1DdVlLdmo0bzIlMkJyYyUyRkhOOWFvTElObUhTRSUyRjM1YTR5aVU1QVNpRUNPb2UxQWc2MkE4JTJCNzJJVVFudW1kUmgyMmRQeXBTdnJrOFh2dUo5VWZQdkIwekV0T2V5aVQ1JTJCSFlCWnYzSUVzaUlFY0ljYzlKdHZYWjNleFclMkIlMkZDY242TWx0d3FYU2VuMG40a2ZMRG1hNndIOFBUcm9mMXpQN2JrU0xQb2c5V2trcWthWUg3JTJCZTlJU3k5QTRuJTJGVFFRc0w2OCUyRnNUZ3BITkg2TXkzdnZzJTJCQnZsRyUyQjE0YnhSYWt0UFlFRW93WGFmTG9PZUY5bmI4MmN0d1FtR0hwYWxCUlBaWGNtR1lqbmtOZjRaVXhsOFRZSEx1TGx2ektnWUlHanBQNkZ5JTJCd3psJTJCcm1xV3ByQjF1YzlWJTJGNjVBcnBYNDFIREszRVFVVzJZUEhZV0l0ZzZ1RlFLSzd3OGgyYXRMck54U1NoS0diQTJueE1LaXRyJTJCVzZRb2t6WEl0MGU5NzFXclB6NWoxSUJnRXpBZFpTZUElMkJxVnpZamVBZ3pqRTJVaFg0SUVLc0dnR0JEZTdna3M5aXhHTXYzJTJGVlhldzhGSnJDJTJCY05CcXRLaG5kOXhwSTBnejZ1YXVoRUVzN1VSbTI4bk1EN0ltWHNWTEw3aDhXQTlSR0ZNWmY0WndKJTJGdmd2VHM4NE0zR04xZXd4MGZlTzJYcCUyQlIzY1dPaDElMkYlMkI4N1p1M3ZDMExBUUVCM0NvcTFVcWdvejF6eGE2OUpqSnpXckdNdmZNNjM3VlAlMkIxRzhaaXRXelpRZWFNNlBMJTJGaHFSSmtyRVBidEhEd1g0YzJsUEJUJTJCd2t4eG8xb3k1djNvdHZaNVpoT1JiMk8wQ3A0MkxxS3pqYWxYRFdtRDMzTGYzOXFhSktpdnQ4JTJCQjN6MjJubDJ5cElSOUo4S2llJTJGN1l1RjRVdyUyRncxM2RQUmR2bktYTUkyeGFyb3Ywekdua3RwZG5MQmt3RzB2bWdsR2c4NEVkNkU1d1ZZSDdoS1hLWXRwdURhVUpZZkZSS2hBcHNteHMlMkJPVG9XMUdrJTJGRWt5NzdvY0kzMnZiaFppVjhScXVLJTJCbFlqNFBjYnJLdG5hUk5TSTQ4amtGY0Y2YzZkRHFITUlCSUJXJTJCWXk2TWU1RUUzbG4xVnpRJTJGTVNGUTV3TkNlRG1UUUlFT1JZSnJ1N2Q1QXFNU2dUdFpONFB1WWlydGF2dUVOQVkxZUEzZjRXNjd5Q2N0JTJGbVd3eXVranQ2ekd2MzI0bXpjSkx0cUZmTk93JTJCZk43Z1BHRGZQOHMlMkZ5blJvV0ZYaTJFR1czMmN1bWw0SjNmeERjSWJMaXB5NjNMRVBMMHBtVWU0aklSTmtYOEk2TWtDZ1dZQWc4JTJGeURGWkd0eEYyZTl6RlNBQmx4JTJGV0M0R1Jmd2hDNm1KbUN6R0ZLSk5DN3pwWTRVWnJYM05qdjlyTUpoNnpVVTF4THFhdTVBNEtUR3A5SmEyQkJwdDBQd1hBUXJ2Z29qbkZDRTc5RkdldzNDanZuMGxsdWZqejhMNHRja09xZDRPVGklMkZjcjI4MHpYWXlhQXdBN3dzY3FKakZhQ1QlMkZhUXVLeFA3VG1DYVZESER5ZXdnZHpEMThPUnkzMEY2d1oyJTJGcWtUdUxxSXZ5WiUyRmZBUERNTSUyRk9zRFkzYXR6NW5lYzd4MXFrd3hXN3QxUWE0anJCRDQxaG1FNnQlMkZqazE1emx2S3BjM1lOVFo5NzRyUGJDcXdZTkRyelRqenlqTm1iTCUyQkJWT256MHNKSEhuM3psZHV2Z0lJZTRmZ2V4N3BXb21HR1NFcFR5YWMyVXQ5bzZzcDZFMkdEa3JmYndIJTJCa1owMmVYMThxOEJmTmROOW50YWFzS0wyNmpoN3lEY2RMcWhPYTdSeXp6dklsZU9oUG4xVFYwRDYlMkYlMkIyUTBCTEJNdjFOclVYTTJtUEtBZ2daOSUyQlI5eW8lMkJrTDNIJTJCZ2VSTUxSYUJCa2JuM2xlMXdpakFzTE1TcUhKblJYb0g0cTFNMEh3eXE5MkZxY1c4JTJCdER3UzNqYzdVbVdpVEZ1YmVTc1pCRG1LWjklMkZPWlFHYk1VayUyQmNXOXZUcUU4ODFSa1QxWGg2Q252QjhLVjR5ckNDT1lxTXJoMFdGSHJKM2hDbHNpcjkyNmNQUWgyRm1oa0c3STFoRyUyQnVJdG1xcXhMdnAlMkJEa05VampQSW5GUUwwMHN2RkdUNWlPUjZkdXAlMkYxZUZMVjBsbmJqNXVzTFhGb2p1JTJGJTJCJTJCYyUyRlR6OEVzSVJkdW5OSUw4OW1ENDBSJTJCM28lMkYlMkY1djdTRkVCUnclMkZNQ2hvUGRXcWNPYldKZlRMZUN4RFJmb0dRQmdiMTB0c1BVdllGWlBRazZ6Q0ZBTndEWXFrJTJGUVFhbCUyQk9hMEpoNTNlZjNYbzdIMFZCcWdPNzRabWt4dHIwSmNUYzF4YWNIZkpsQldLdVJqM1RpVjJrSUlackIlMkZlSEFBR0x3ajZyeE5KU0FmNHJNSlMxTUVGJTJGSzdPeFFHcTROOSUyRnd4dmttRmphcjVTQWdWdGk2YmY2NXI1Y2t6eVl5NFVwN1hWQlN6QjlzcDFya2pwR3BNY0RvalZLZHdKcDMxU0RxeE5QSUZ6dXNnejl0SjFaWjY0cmpHTGtrRFNIRk5tU2FRZkRXYXI2Q1FORTA2JTJGR3RHSDFPMjJtdFlPYWQ5cGtvSXdIb1RQbDMwQzRJSTQzcEFReHhQNzZURnE0c3Z1cDllTjMyazBCJTJGdlZkdjlvcnUlMkYlMkZwUGhaJTJGeFJuV1l3UXZzQ0NoRVVaN3RBQ3VMaWVGeVhvV1hxNEM2cFglMkJ1NTFKWFVXTldNVWpiNGYyUXZ0V0FMT09uN0JYWkdqRld3JTJGY2MlMkZkYnlOYVFHQlElMkJDVXI0JTJCWlU1bE1QQktSQiUyRk44MVVGTFRIQWhNczNQWkxySlJMR3hKJTJGWGZwSW1GdjJjUWJQcnd3R3hPNTBSZTBhdU1Ic09nRUJmJTJGYlNnRVdiZ2RrNSUyQmcxNThEazVwcWd2JTJGNTIxNno5RjJkenlHektlZ0d5byUyQnoxazclMkZORTl5R1JvdzVmeFNWR2FPM0d4OEQ3b3dIakQ3SSUyQmYweWVJaURQN1BNdGVSWSUyQlJuOGdZeHNOJTJGRFMxeWFGQlNLdHlsRnY5WU1SYUQ4MWtGOGdxVnB4JTJGbkVhUVFndTBDb293eWR6NWd4RU1nS3pMdU41ZmRocXVaJTJGNzBIcmZRdlVmbzVuamNBZTVXWSUyQnhWRG1oSTk0M2k0M2IlMkJZeHUlMkJ0THViWHhKdnZPYWJzN1d0cDlUU0dqU24lMkZlWGU5UmFVOE0lMkJ6cHNDRjRtTXRpZUpycDlGWDglMkY1dTk5MWlXVkZuYVJKJTJGbW1IVlAydEJpQ0NSa0FwbG9QVU5yclhuNkpuSlY3YlAlMkYwN3NHMSUyQndmM0xaN2ExQzJGaXNUZ2dnWDMlMkJmdTRURm1MbkFNVlNid2NpT0VzNmg2cEFmMVljamQ5Z1U4eXlOaGFjc29xM1p2ejM4YyUyQkJodHlJY2FuY0FXVEJvdmdlVG5PJTJGVk5tNkFnSWlOVHJyQ1JHN1U5ZmthOWJVRHE2SG5MMWVwc0FhdWwxWnVUdE8lMkZQaThqaEczNlFyYUxHaklGTEhSQ0JPZU82Q2lJa3Z1ZGhidWFaZCUyRnUxMlZWMVVlcVZURGVWWVhoUllyUThUTFJ1SHc3bTZjcGl5NjFVaFg1OXE5WGJsdHAwWkphbTYlMkJhM3J0UUVwdHNZb0RDR1ZXZzlxb0tGS245TXlGdjNxUnhIeWUlMkJFYnB2eGJCbCUyRnRmV1BNT0ZoN09rUlN4TDFjdnRuYlllRmswNHRkVFpLR3RMb09IamVveGFXMVdPcWsxaTZQWDRlQmxURXQ3cnk4YkFwSHltYmkzVmlsUVFtdlJ5cDBFbUN6NFRSRG9wa0xwRmhSbEVzdjQyekZPV0ZvdDJtM1ZMMGVoejBVNEZidk9qR3BYcllmZG50RDRFN0dJWVJaYVlzTWlaT1U4dXMySzY4S3JsOFpTbzN6dm5POEh0dTZObFFpazJVR1BuZ0htenRHamZKNE5nYjYlMkJ2ODdhWE5MVWZlSDBsS1NpdHc5Z1pubDFmaDJYck9jejd6NlRXZWZKMUtQZTZmSmN4bFRWUnUwc1F3ZXQ1ek45dGpqWU5VUnA1UFNma3pZZ2FGRWVYJTJCQU52RldBV3dENWN2blFWJTJGY2s3MGZCYUdzT1c4TElETmVick5mbEV4NDFOVTJ6eFpyZW1lVHYya1lpRUpXSWFSaWglMkJ2Ylgwc0dUbXIyMyUyQlZqMzdPVWYzRGN5ekg1YUw0T0FYdk1TTyUyQmJwS2YlMkJweUs5MW1GR3FmbUJTUHlPNFA3cWVTJTJCNmtoeThpQ0VtYTlHJTJCYjE3VHl6TDZ2ek4lMkY3aXFRaUpiRkdrM0RoeDl1Z0d3Y0dORHZlUzU4RUY5V0g2S2lBOHZJeCUyQmxMTjhadzlqdm0zVXclMkJjMm5IWW83Z2tPUlp5NTdyJTJGd2tLaGdtRXlmRDVCYkROaXpWJTJCcXV6M08lMkZUVEx4ZzRPeWElMkJ3elA4UUtqMzhoQ1ZyVmcxdk1PQ2RqYkRXYmQwM2I1aE9VWVhlb2Y1dzZaOHlaQzVpVHVzQ3dPRkhQb3M4NHc0RjE3YjMlMkI5dWhycTl3Q2JzYk12MENzZkolMkZIRHNCJTJCUnQxJTJCRVgwb3VhdnV5TSUyQnVCVUFPZ2M0JTJGVXZuRTh5eklnZEtpVUYyZDZmbzA0c1hUakxBTnpZRjVrbVJaMEtqJTJGVWc2b0NlWDY4M1o2cnpvZkQ3VUI1Ym5HNiUyQlk0ZHAxZFlzSyUyRjFyT1NWejFTMW54bU00JTJCOTMxYldtdU1VcE5mTEdQWXJldnNXSjVRbyUyRnY3OHIzRGhveTUlMkJhSmcxR2J1SEIwUVRzOW1JOVY5Y1p6dGZGWG9NdSUyRmx6bTI4NE1mZzVPJTJCSUllOTRDQk9MRjZ1dG9vJTJCNFhsbm5CQVhwWmQxMFZsbUlKcjNYWER4QTlNUUR2elFwODdNbG94RHBMaGIyWHRNNUxHS1FxWVQ4TEFVeFFZJTJCbVVJaWhvRXJOalBDUHp4WTBISnFmam1RSTJHVFNkRlF0RHpjdU8yWHFKdmk3NUhZNXYzWEQlMkI1QnpXbTdUQ1REWjFwQklqQzM0cSUyRkxMTlNPSiUyQlozdXFiTE5vdGpJYTR0Q1EwRktGdkdjbjYzZkFaN0JsenlWZG90UTR3ZWklMkY3MnN3R0padXJPTWNzSlJXeFJhJTJCOSUyQjZZRzhYanJjWXYwS1NFM1NpNWtLYzFEOGtMSmtiTXRHNFc3UlpJbUE5RDdpaVVRN1VibXRNcWp0cGlyJTJCdE9lcm1TNTM2ZnBHWk5lTk9OVmcwek11Ym9hVmlkdkl2V0trcnhsVGo4a29IJTJGZUVPcE04MUNHeTBWRGU4aTlDYjdxaUlaaU45M2dTYmtRUmd5QUJVZ1g1VWs1NnRNczJQZUowbEJxUm9kNVhQQ0R1T2RCTVY1Q3J1WEpMb0x6aWQ4M0MwR0Z5YUt1WXpCSE50MGl4Y2hRODhDJTJGWnh2aU82R3h2dUFEZXhUUmZaJTJGTUE4TjV3MEQ0Vkl2U3hTQ09xZ1BkN3k1aTF5emtiJTJCbjc3dWI5N3FwSks5a2hodlo0RHhLNWJJJTJCRzVqVGJxR2ZNcDlpRHFSbEQzeFlDNmkyblFkJTJGRkFTSTRaTEolMkJEelZ2Z3Ewd3gwY3F4dlNUV214dzVKTGd2TUp2aWtvbklnazFoVnhrZm1ZdTVLR2dPZW1NbnFpY1h0OUNTYlFZaWpLTkRTaWtIbHdWVGd3SThHbWpDbDhMVllDVXFiWHlEMDRIRmVidEZVSkRmelY0NnVrSkpXcUVSOTZDdzlHNFBNUEgwYVVYUE1JZk51Y0drMk51ZG1wdm9BbEhWWVJYbHRIQThlR2YycHBNN01qRXNVWkw5cWVMVFQwaSUyQkFZd3hSSmtHVHNYTjVNQVZzZ2Y5R3g5M3NSckpkc3JtQ1hVZldBM0ZkVzF6VzViQXhVQlFxRG5iOHYlMkZoVGFQcEo1dlk0TUpKMjglMkY3dVZVbiUyRkJnR29Zb2hPJTJGSllMakVuRnJ1OFYlMkIlMkJrMlRZV0xlaXhmS0R6S2plWkJtZ0RMeFJoRjVSeVclMkJJOVBybG5WZkFjbEN5eFZlZjIwOG5hNlUxY21FNkF5JTJCSGRQMWdNSnRIM2hweE8xaGZOZGd0dUZjUFlCRjFDJTJCdWd0SG1xTkJrcVQ0eXM0cEloSXZYd29kc1VMQmpuZHlQJTJGOWVSZXNMSjhsVmJkMEtYdE5Yd0VrT0ViR1BQaE9nY1dSVUdRSGx3VVVlVktvd3lnWmNVb2xiMUZKM1k3M2lGSzA5NXlnM2l6cXNhYmtoTHdUVzlkcWthWDZma20ya3lJYVBjNHFFYm1oT0p4VzBXdDMlMkI2SkFsM3NoWDczcm13OVFiaXFFbEg0bVQ2QmpqOHh4TSUyRjRENDJDWUkxRnZjaFZnMDFYZEprWEU5d3Zvb21iOXk3M05XcHc4dW5RNDZmJTJCZEtmb0FEMjNKZk5RYnFKRDNrTDhvaUF3VXZLcEhGQ1BzOTk4TnpxWmhlTGFWS294ZWFJMEtMMWVnVEdxQnNVcE9HUmlrTWQwdGttU3h0QzQ4eWM2TThIT1ZDa3Y3ZWEyMTdkQzRwZnJYN2FHNXczWkZsTWxVZVZvcEdpWUxPZU5KZEFiYWFZUllzVmlaYmlnUmM1WjBPOUN5dlVaZUV3NjliWEpiQ3ZFM2RVMHFUMmllbHpmJTJGYzEwSDVkUU5NZyUyRnN3OVBZS0lWVDY1bVhwY1o0S2twTkhJTDVabXRkSGRZNVA3NnJTOXVvUDc5T3l4MFdpbW1NdnBINmw4bkhZYWFMd1B1SEZvcFNwc1RqS1BFOTF6dURwWlc5M1hEaUlDNXZSdXdHMkVnVHA4Wlg0eXQlMkJmV2hGcGh4YlE2T200WGRlTzMyT0FSMEdsWWdZUXRndm05NCUyRmJabVVUUWtEcjZsQnk3TDlCMERlcVhPYld1JTJCcjBobzdZbE9RZFhLcUEzNGRaRnRuSEs2eXQlMkYlMkJuMHlWVDIyM2J2YSUyQldUVk5MbnBtZ2FCdGg4VFViWkYxV2NMdTJRb0RhWmh6eWoyaWhrQmRNSnkySXRRWWJPckZKejYzSCUyRmwzWnF5Yjl1SkxlMzR6ZXlCJTJCZmRzZWlLSnh1bEp2UyUyRmppZ0lYYjNOYVVyYmZXMU42dG04aWc0WUQwWFpPYXBlWXVDanpKc0t1YnFXaXlhdDlHb2VBd3FSdndnekdWTGcwckdpellKWTJFanJMZGhtTnk1UmUwRFVIdVN3ciUyRktWODVnOG9Qa254cFV5cmtHYlZlQTFkOFZibU9XdnY1Z0V4YTMwT1BVc242U1JyeGtDOEM3V21jY2tOWjlpMVc5cE5zTkxRN2NmNldjZkFLRTBGM1RaOWdGNTJHdXJLMVkzczloT24yTGFydFFDVFRjJTJGbkI5MEJmeGRlSU55VHc2enhBVm93TzRpZWNUaVQxRFJJWXB1QjBrM2swSm5RbWJYbGZBR2lUdTFFU1IlMkIwSlB6NEJoR1NZcCUyRlNqRllkaURpRDRDQ0FFeTM1OWx5NDlCdVMlMkJCRTZDa2xpUlVaMjNKU0hoZllIbGVOQXg0R20lMkZMeFl5ZSUyRiUyQiUyQmNEJTJGUHI4SEs4ckJjQiUyRmV6QWNqayUyQmZvQnpwJTJGdDdnc0FKZXMxbnpPNHZYeTZvUCUyQlNCUDBHTTB5Z2I1JTJCWElSYzU4MzJOVXI4OXpXelpvMUglMkZ2JTJCYTlCZzdJJTJGTTBOdnNJaG1hTmp4aENjSUdxZld1aENnMHFxUmtDQktudThvU0hSamk5eWlpMkVCeG1kVDJMYk9LaUhkQ09WSURSdUQyTHJ0U3hSQUo0c29Nb0NqV0xDSDJsMWExOE9LR2NZdWhCNGdyVE5xSEFydlR6RGk4QlpXJTJGUW1BdThQZ2tZYWhKR2FJQU5vbEJTcEIxd0U5OUhOV2xhb2k5NHlyNXFjR3NXWFlFVkolMkZOTFRiNDUxcGtmeVN4U1I4eFZOUkhZUjlRU0VyZlBBQ0s3M2gwQ25GNFFUTCUyQkZiczV1d3F2ZHQ2SHhETXZPN3B3Wkh2ZldORVVvQ3dQeFgwNExuQ2kzTnQxT2k0b0pvS3VaRFMlMkJhYUVSVjFuUjJpb01TcUxMVG9yRkVRazFvNm1lZmEwV1hrczFGcG5QUVIxTmkxRiUyQkI4MUZKcDM3clVveVFYMW5heU5ZdW14R3NRdjdQS1k5UEtHZTViUHRIY0o5M1J1Tm9RVGpJNWNYS0VyV1kxY0lEU1dJSkR6SHRyeVVkS3o4UnlwSTJsM0g4ZnY4ZTJXMSUyRnl1QlNyc2xGTW1hcDV4Wm1XblB6d3kzWjRLR1lYRVhTQzVDaEtXNjh1bmViTjFxYjFGcGNRVENwSWRsOVpVMlRwdXdZMktQbGNJRGhEa0hTREJ6U0lTT0tnTG1BblBZSmFyUjUyakhOMGFudHNsb1p0Y1RNZEY5UUJIWTVzYWFFVE5DdE9JQ3hhdTNjbGJGZGdmMEgyUFYzJTJCb2VUJTJCaGpNNVNrZktHZmFYT2lFVVBLSW92TDU1MDgwZ21PRGR6eHYlMkZ6SHJxaTk5YVI2cGNuYk5lcndMdmZkemZscWMlMkJiZUlKd3Fac1FLQTBuZ1h1dHhueTFnSnBxcE9ua0pJQW03UGFtcE1La25ZV2FHZ1VaQUJja0Z2a3hHRTJuMTlJZ2JZeDZjQTRIRzdheFBoZThxRFNoTDhVSURDQ2JzaUpWOFpadDhJMWpSSTd0Skt2MUwzQnB5V0JhaEZoYW5UMjRiVTdpT2g1RXgxWW04cVQ0SXlxQnQwWUJ3U1BhJTJCWEUxOFBtSlE2NHJNUGVzTWI5ckFHYUxyYUdIYlJGUm10R0l2b0dLaTc5MTdsUmRQdkJvQ01GNlkybHlaWjdna1JTQlBaUGVhdzBpcDZ6dHhhSHB0N1dRTUdFbUpFaGZCNnhCV3pNSzhJWXhGYmVDempjaHcyODE5RHQzZXZaVERqVmxadThrSDBuYzFqQkJDODVJNyUyRmQxS0J1bTdMVk1UZTN1MG1HbG8xZHlxOTJOMDJaZDFPV3hTUTBsWWg2enFUZXFkd2UlMkZ0N2RnQ09BVlREUlhyT2tyZGRGdHo1bksyYTByejFHcXh6N2ViNUgzeUlHU3F0dnYlMkZyaUxFSUNSRVc2U2pGOGZTU0NNVlJ2VTdwJTJCb3owM2cxZjZsa01RZllVJTJCR1lDJTJGcCUyRnJ0ZmQ5SlFET0JzejdCVEJlTlgxdEwwbWxtZHlvZGNhWDBNa1lzQ0x3aGNJTEg3JTJCeTFBZiUyQk0wbktyNGNocjE3NVIyaTZKOFBEWnFYR3AwJTJCaDYyOEt0djVDZTcySEc2JTJCUmZjUE5iTWZDY2dFWk84SWRFdEolMkYlMkJnZHVHWWQwZ0t1ZVNEakM5UkI1YUNjJTJGZ0c2aDhoMTM2c0o1VERjZWc4OGgyWEZ1VUxjdE5Jc3RBVlZkdGk5NHc1c2VhRDFYNURzREN4OFRaJTJCWHQ2aVJhNmVQYVVGMDhpODFhbDFqcmpYbVdxcUM0NkFac0NLOERXSkpzVyUyQmRTQlp4VFE1ZGVBRHEyUyUyRlNRS0xoa21HalFqZ1l5N2swZTRIZEtSdDYyYUI2aFNOMkNSSndLQlAyc2J0d0syVU5rbnZNNzElMkJOZnZNeW9sYWdXSGl3c3ozem5UOTdRSEphSE16JTJCOWdxakElMkJ1SHVSUmJzako3aXowQjZOblFDOVFFd1ZJUk5yeXhKa1FWRXlTVnJRJTJGVlFveVlDZmdDeUlNTkdocnJ1aHd2aFN4ampVUG5pOXVCR3lUVFh0S2hFNDcxc1lxZTNhdFBXdHVCbDFMdG1ySzYlMkYya1phUUMwUUdSUlllN1VraldrSHE3S2s5VVZCdUU1RU9zczRUJTJCajQ1bUUlMkZ0TjFqZ0ZDQ2VNQUc1TE9GQmVoQmlIJTJCdHdaUWRTZzJBcTZIc3VQRTlIRjFreiUyRnJiMTZ0eVdoaDJ5TWFnbHltMXYzcFlJJTJCRGFpbzVENUNvOVgxRGdiJTJGdHhjWktHeElJR0owTFZ1cHVUalo4ZlVFb0VpeUphWjN6S3NDU25SckglMkZCVndNNElQNG9BJTJCVHhCZ2ZGZHhIMkRxdk1rN1plRzdTbSUyQnJaaHNaRDNSTjh6T0JPTmVzazNIWW9uanp1MzdEWnlrMDI3TW5ibFZjWTZtaHElMkJXJTJGbjlxVVElMkJ3MU1FekNYQmdNaEM4TElRY3VUZnZSWWxYNlVhNEZQZSUyRkFBY0VzekczVVNtNTJxOSUyRkEzb0ZiSFBHZXJRWDR4VTdmUnRrREhCcG9lTDZrWWF0U04wUkl0Mkd2RWxpMXc2OXBaaGJpTFFESmg5RDF0VkplbWVqaDhhbVBvUk5JTlAyd2Q5dXVtMFpTa3dJdjZESE05TWczTTZiZXRKU2dPY3JQMEtoUlNOMURaZzNsNUFPU1hydERza3FUVXBsRmhaNzRIbHJJaHElMkZUYmZYanVqUW93TjlxaXY3VFhSJTJGS2FqSHBHbDhPUjVqOWJLTXl3Q3NrZFExNGJnNVg3Z2glMkZXWTRIUWdQWFFpOE0lMkJySGZGJTJCeCUyQkFnN29MQlI5M2wxUjA5alltaTRtcjBTbWR0Wk1maFVHMXdUOUk4WVVkMDBscXdtJTJGREU1d0lDczM1dU43QnNKNkYwcUJrM1p6Umw5em9BWWZtZUtSUDdYNzFVa0tuR01DRXdibWRJbjdJSU1NdWIwdGZIMlg5QkVRQlV3Z1Vpa0RINXpzM3ZwUyUyRkdDd0RtZThaU0F4JTJGJTJGbDM5dXkxSGtVakZ5SVV0WGlSbm0yOUxOVVEwWkI4YzclMkZRdDklMkZBdkV5bTZEREczcHRLUUh1SUlnOE0lMkJsSVp6U2J2bmJKWlQlMkZGOHExeHpQdDIzU1p6dnNqdjc1QUV0VCUyRnduNTk2JTJGejFCUkQzQXIlMkZ2WmJJVVA5Y1FHUDVmTVBWenVVakx2UGg5YzFCVkRpNkc4OCUyQkYlMkZLOG5nUGpMejNNQjBqbTQ5TmJVWDhQNCUyRm94QVpmTHI1ciUyRnVzWVhObXY1YyUyQmhkQ05QY2pRTlU0a1lNZnVMN3IwbmdwJTJCNjZjYjAyQXJMUWQlMkJpa0V0JTJCR2FjSjdMckl4RDhPZjd3diUyRmdMTzUlMkYlMkZyNERDR1g4dnNuUFklMkJibGJINDk1cDQzY0wxWTJudHNEJTJGaiUyQmNWNm12azY1dnVtbiUyQjByWGQlMkZjbjJheHNtdiUyQjRGRFpsM3QyJTJGeHZjOHAlMkZkMUZxekNQWVNHJTJCZldIdGt3UzhCaDJMOG9sTlljd0JzJTJGY3AzQzRyMDM5MmlVcGVIM292MmtaUWJ1JTJCdjY4aEJ2MmZhJTJGaDdyZjYlMkJnTWglMkZ4JTJGckIlMkY3QiUyQiUyRnpIVGN4RU80TWV5RGZQMGI5UDFEcU8wMGZxNSUyRks0ZCUyQm9qNlplbmIlMkJ3TU4lMkJBTWJ4blglMkJuYXpmMCUyRjh2Qk0yJTJCJTJGJTJGNWh5cGNlekcwNEQ3ZWczTDltNVFHbW1QMCUyQmt2bDlGZnA5NWY0NUNXJTJCamd6SSUyRnZ3THFjOXR2cm5SWTFkZ2glMkJabjNRT2tVMHk1NCUyQjlaVDFnTyUyRnZqRHU1bWozYjdQdVNCbG8wTWp3MzdwJTJCakVsUE9rUDJDTWZSVGdOR0RKdzZKelNQJTJGTjMyVEFiT2xnTmJCWDFMZHpHMkJ4dkQzSnJ0OXprWjNvVTg5QXREbklFUUJaWHJkZkQ2SE1aUEQ5dFNOcEVoc2VETnVjQkQ1c3cxN2czb3gxb3VFWWxhQSUyRlBpVXNuQ1Z3RTNNMjdvOHJDY1hSMWlseHdsT3d3U0FjbUlKbDdQNnpQYmduSlNVZlN6cTZSMVVKeWlYJTJGaERxOCUyRkhhOFl5Q2U4MHdNTW1pcnF1Tk1EUURtMmVVSlklMkZmdG1pTDNzRlZQZ1glMkZ3VmRFeW0lMkJZYW1oakhIMjN6YnJQejczMEhuVzZObSUyRjdEWCUyRnczbSUyRkh3Q1hLa3pxdVJ4ajUlMkJCcCUyRnVrZUxHUExqUEpmdiUyRmUzejZsY3hJZ3lGeUZHJTJGWCUyQk84ZmN6V1RhMyUyRkQlMkZjbjJQQXhuS09BV0dYT3JqcGQlMkZHbjBlb0NrdiUyRnBDUXkzNVhVdWE4WFR5SDN4VDZNRnM4RWMlMkY1JTJCYkRaN2htSEsyVnRodHFEcGJPdmgwZG03NCUyRkpjbno3eTFTcVhvOEFGYlRoS2VQSGtBMnNUbFpQVVM0MjFXajJXTlUwR2s1ZVdEOFozcFhwSCUyRmZqb2psam5INlExNzglMkJVWUJhTkRxVXBrUktIbG14d0wyV2QlMkJ1UklFdXglMkI0bWJyRVlwY29MaUclMkJDSUljSjM3UEFjY0dCZGVyR3puWWl5TXV6d1J3SUlTaFZPdDV6WnBVZkxEVG9JeGU1Q3FKR0d4bDBadEhiMyUyQlNteTh5JTJCc2dPJTJCa2oxVXRuTE9jOUdtMSUyRkclMkYzNjNGdU0zOFZ0RDFTMmxUeXJ2bDVsUElsZGlpckdwaTY4emVTMjk1NWRpY0slMkI4d1I1dTNtNjJ3REx1bHZEMTY2OTFFcmtQJTJCenFWakRaZmZxRjRNR0xPUnNjZ29wbnpCeDZhbVNFeHRzWklsYXJySVlleDN2NkVCNjdQNVlmbEdmQ3UlMkZIdUZuc2Q5SHkwakIzRDRDWnRjZm92b2EzV3JhdGdsRU53OVJiT0FHZ0JIaHlqTlJZVFJ4dHNhU1RhcFhwJTJGcjI3dW93ZjNVNEloeTYlMkJIMkFZRndEQ3RXbXY1cjlUa0dZdVZSeXk0NU9EMWpsSlNRUktJZG05RThMbTFvd1NIRjJuZGZNUnkyNTdsSGNCZ0NZN09QVnoySU9laDU2a0tzODhJWG4wdVpVVFIzVXplZnZEVXI5ViUyRnklMkJZaTVKd1pJeVd1OFBsWG12TmJ5UFBOdHdnNSUyQmZHMFB5TkJqSWRKUHllSDMlMkZINTduZjBNS2FOckVLJTJGa1JLJTJGRVg4bkcybUdvT2ZJZHVkalVkbE1JTFoyRVN4b05VQ2dyQ0U5U2VOZ21tVUMwcW4wUDZlSEQ5M01KM0t0TlRCSFRhQVNkU3pTY2JvOCUyQkdkMUUwRHZWVkRHMm9NbzlJOWhIR0dsUWdDdW94OWFpaHQ3Z1R1VmVDMmxRb1dWVDIlMkZTOHNQMEpBaWlLdFNibWVpMEUzYzdqMklkbzBudnBoZEUwUllSRFY3dUNFbHdsRW5kMVRaRlRSVmllNXlRSHl0TzNGSWRkV0pBZllnMmJqdDdlRldSQ1dEMjR1Qlk4aW5hR24wV1kzWFA2V3pQcW5rTXBBSzlEYkFXMVlCUlV2QWklMkYlMkYzUUVvSnZZUndWSEtnbWZ6c2Jxcm9RJTJCdWVNOFNaRUdnVEhCJTJGMVJqZkh3M3ZLcUdpcTNYZ2I4ZndFOHVSRDk3UlEwaWUxVDR3MVElMkZGZ1oxYkplaFJwVUM2Vm00Q1NkZlBxVlVQS3A1c1ZGJTJGJTJCaWhGa3RWdkg5OG1RMUYlMkZDSTVqSmElMkYwb0RKcndPTmJPYWUyVFZqUm95JTJGUlUlMkJwRVk0JTJCVHRpMGZWaTZTTERsM1dHaW9OaUVadnpYdFhyVTFQYUI0NDQ2dUxkRyUyQnJFWk1QS2FqWkNNRzhkNld6cks2Q3lpcEw2RGYzdDZNYTh0TFB1ZTEzRUpaRjFGSTl3SlpobEFSSTdtUGV2cmpaZTh0dUNocXZYcWY5akp2NlJMMU9uR1h0NE52UFcxc1pqY0hESjVEUEw5S0tBVnZEVG5LMjhra0lGS3A4JTJGR241YVoyZUU4cXhSeE9HUzZUb0dZeFhwd2hTMG41VUJra3Z0NXZQSEZmJTJGdGhkVlhPNHVpM2dFU2VSMnRKMDA0U3M1NVZ2UjFIJTJGMjlJeXVrRTl2bWtjRU1Gc2s2WjhmQ2Y3bHAxcTRkWXdtVUtzTTB2NHVKUjMwTnFuU3Rib2EzR0NMWnRXMzYzeUMzYU1RNUdUVjNLU2JlaHM3MjQlMkJGSldHSTNYVVFHaWlGWWlSSnJxWlhsQU8wNUNKcU9QUDdpYlFvWGJ6NlllZGdpJTJCOGdCTlU4c0NEOWtHMm5zM3RJMVN0WnF4ZjNIUjdtcjZNMVBjemZGNjg4a295RmxyOFp5UXF0R1VwdVhoYk9QNXlOUjlCZUg1akF0czVkMFFQJTJCUW1aVlBqR2RaYWdlWWVjSnR3ZXVSUGg0ZSUyRno4ZWJhckdGSTQ3Yk9vdWt0NlhkZkZ6endNVTZlTjBwQ3c0OGdFQ1FHcTBpcEF1YnEyVTFHTFdMaUQ3SnFlRUVmVElWR202b0QwVHVSTGd6QVcxSCUyRjV2Q0J5cEJHalJDc0ZqV1FBZVYlMkJLb1NZbjNnN3k3dzJESmpuYiUyQmRZeE4xOU5OUVFOd0w3Zk5udHF1ak1oNFBzMVRROEdnVzUlMkI0MEhaZDVVUWI1dXZ1SWRvJTJGMHltaW5OeVdpNzklMkI0SmZ4VURxS3dUaHVKdExhdm5IY2JwTU5UY241endzUWY1ZGZwV2lxWXB3M2lwZVRxTGZ1WFJQWlA4QW8lMkZqUlhCNVh5R29nemYzN0VFTkNORVBWakliRkl2clhqaXMlMkZHSE1mVnpoc3prVDZWeVAzejNad1JNbHhUWW1GMHZJN0dVdTlzU05kTWNKMFBPNGhnenBwSFI5T2RmbjhiUVQxbTRFV0hmZnEwayUyRldFUm1JeDIxNkFLOTNadFVsdjdVOEFRcUNXbThZU0xHeTVuOU42d0M4OTMzZkxxaVV4TzRheDFrMnBGTUFVSmtVVkQxbkxDViUyQjVDcWZSRmJVTU9iaUFVdjhqQSUyRjk4UlpUWVhaTFdQUTZoc2ZYeCUyQjNDdUg5bEtCV0VZbkhGTGY0Q0NKNFUzZ2s1dEJuJTJCOHVvWSUyQm5jT1gyYVJyenVOcCUyQm8lMkJ1NTFIU25DNWFzaUU3Y2dmV2pzMjRweFJaSlNabnFGVmRGUlh5TzdZb1NHU2RCb1dncXB4eUxzblV0bHdTZG5ab0pjbSUyQiUyQiUyQlMlMkZUVFVXMWR0T2M0SHlMZFBMekdneGNNblFheDV6WlR1Y1FqUmJFaVJiNVo4cVlzUWFRd0F0RTVINGQ4SCUyQngxRld4NmpaWUtpYnhzdzE1RlM3WWlrcUw4R3FNbEJnd200M2Ryb3lVJTJGNFQzNzhxVFVpcGZzOVhiZmZMQ1IlMkZKeTAlMkJFbXRiJTJCbk1PaXYzNCUyQnNRZk1PMnVkc3VUYzZUV29uYjVkamJOVVBaMHRUcE15aVhJJTJCTVZGbk40dnJpTmhpcUt2OUJYenVzeEE0UnNiR0U1YzRGVjhBM1lWZ1Ewa1pzU21wRWhXMWVVV0FJJTJGN3FRVnMzdkVQUW9Ea1REUjF1Yk9DcFlNUk8yVTlRa2RJTUFwaUd2M1FXcTJQUiUyQnlwaUVQRTVlTmw0VVNId3NFMmo2eVU5emo1UWxWeFNHYWZBeURsNDY0cXJsWWNwVEZlRUhzU2N6WGJSbGNZVTA3bTJLSWwlMkZ0a0hPRWxkd1dDUm80UnJsa0hqRzE2dW1UODlFYzZ0QnhxSkZQNDVUd0F0ZVJ6V2RwaHZvbjNtTFlrOWdhNTZ0S1I4QVBpMmVmT3dwWXVKQmNhNjhLakw0R0RYcDZnQ3BEVlYzUmhwbURNdHZkdXRaSE85NHFEWE9PM1N6QUNTWGFCMG1HVXlhYVFxRzdnME8lMkJOdEp0NFNUdSUyRm1DNDdwSXRMYm5MTlJyb0wzTE9CNnVEVzhQN1o0dkhlM25KSkdZSVNxZTZxOXNpUUV2NkVFbFJ0ZlRMR2hUTnE4alZLdkcxT2ZwNnpKd3pCT28xWDRqV0QlMkYwN1AxOEh1JTJGMGFVR0ZQNUI4aHB3TGRiZzZPWFBBVU9PUGFSN2V5NnUxQWIxQjgwcTR1QTNFSTFleU1jTk1XSlVIN2pnWmFUOTRUNUlsamxoc1l5enVtZEs3Wm5qZzViSEdxTEtySndzTm5JQnBYZjdHTE9nYzRoV3p1OVI5UjZmZWJRZXM2ekJWZTJaMzR6YkhodDglMkIwVzVHRXFIZFJWZ2UwZWN3WGtwbmtUdHIwMlBhUUdRcno1a2dYaWg5dkhPNG5QVTZDMmVMM1JLWnRYTmQ2aGNCZWtubnZPMHduTjQwdm9XU3BuYmpEZVFnelRpTmJNT0xhSkhzRHFPcXhscTlWb2d0NDFrWXpQWk9EVlJ2RjhoeUxIb3JkbExpdUVWcDdWOSUyQnp6OGxzdFpRaVh4b0dCT2hQSTJ5blBweEp3NGRGTGh3UGhvVW9MemVoTFpuWmhlTGlqajIxOTJvNGZQY1hHdjQyREVnYTNlc05ybUNsTzJSazRCdTJoczlQYnhaWmJIUVZ4alZxa0tISFNJeGpTNDNySW92NzRHMmZoUkpjQlNRUDY5c1pzWUNIbiUyRmlqbjZ1Sk9ONkx5dFdyS0dCVXh0YnNDaWZra0h1RzNJeWxIUyUyQmhWTWg2VmkxdlpjS1RBcUttb0NNM0YyJTJGYmROc1BDU284VEdDRUYwNHZwbjA1MkV0MXpxRmRrc3ZIb1FLSWtKNVViV0ZqV29oMDRjWG9mJTJCYkJTTTB2RGJ1ZVBzRFZpZDFHaEZsWmRIR0xHSjRTUnJPZ3NQZXdmTzd6THg4SDBONVhVZXhabTlHZlBMVDA3bGwyNUZzY3NnaVA1RUhPTk5KemFMMnBUU01qNnZDcEpXd05Iek43RTdEV3hmRHBJYVBaUVUzODN0STltUXo5dGFZVkxTVWcwdTU3ZzhIUkVNcDMzYVFleE5VSGFoTTBWMVRUZHNtRVM2ZlF5dzYwbyUyRlVYZFNkZHB6dE80bVJJMXU2WnZPWSUyRiUyQlZRTE1oUXJNMFhHZnYlMkJHWWU2RllrQzJCdWJwSDVNaEZmZlVDNXhteUswSkVlaDFiT0RZOWltWHpFZ1JHRzlzZmNXUmQzak9XV0xnajZKJTJGT3FhMk9UdSUyRkZqcW50ZThEMmNHTkROaGVzbUVOZTFlZTBpNkYlMkZXd2RJd2pnb3Iza2JERU5SU0RNRlRHUFdJUHZFU1djTlhwNWM4V3lOV1pHc1ZDU2l6cURHciUyQnhSWEFEZ1N1dUhMMTVoMUZ4R2NubVlYemhDMXpPY1R0bmt6ZmVDTFgzS2hiZkd5MlVPOFlLd0R2SzNHNW5NRHVVdVdyNHo3JTJGWXBWd0syWmNnWVFXTzdJVVF2YzFNU3JudSUyRlZjTnpFcVVvbkJKUTZLVzVXZEI5bGs0VDlYOXo1dDNnOXNxalJ5NGZuQmZhaWQzeFBRanNPWldsaHE4U2JiRnQlMkJvYkFMSkxIcmt4enJicW41bXJ0SSUyQnpIN2ZsVnRVRFBWQXBWdExaVHRKVnd0MmpNZlcxaFR4bFJxMDU3VnRjUTNuejIlMkZxUTVRNkJRRnpEcEJIZnA4WG4xbkpiTDQ0d1E5JTJCQUJwZFBqSnlJR0Q3QU5IcGZmdTRwUXdmcjZDT25xbGdza05hU3R0R080QkxlTUZnZnAlMkJCNFNkZENTemszc0NjOXJlaHd3QyUyRnk2Q3dqSFlzU2gwWHpQOGZ5SXQ4dUlDJTJGMDMlMkJub3c3NTNQTUp4NlVaVHF2a0V1eFVZM3VhM2VkWEJpZ3dUdlR3WWo5RkNGRzJyNThNaXpyMnVoUE8lMkJudXozMEhOb0pzZzNpc2JWYUw1OHRuSVhiekNPTW9NVXpIRHVDOXI3aWVITmVoZ09wOEN0Zzg0WHQlMkZRQ1FubmhNUUp4RVBUSlZyeG1UcVRCdVhMU1N6VzNCdklwWkZ6aFJVb1BFSjFsYUlOeXpmejRza1Q0djU0aHQ4dHYyTk03WHVPcFVwd3AwWFpCMnBiUSUyQmY5bDh2SGp2bnd3UGxoWUxlc0ZLUTZUSzVFOFB0cVlrOEEyOElZWGJCd3V0QXozWk5EbnZKYWNpOHBPUjUwSiUyQm5FcXZQT0pqbVc2enAlMkJpRWxyZTNZT0JjbG92amg1QVZUdThSTnB1ZjNyZnQlMkZUVWhId3AlMkZ2Wm85ZGMyZDBwNEZuczAwaG4lMkJlS2VyWkhxZ0t1SVdwZzk1RkRSd0ZxbFJObnc1c09JWVVkY3BjdmlCSkp2cSUyRkVRWFVZbzhmbmlnUFQ4ZVNQdTYzZmRNempMeHRJQmV5T09kQWczMmZ4M1drSUFwS1UlMkZsQUw2aU5WSVI2bUthalJoTEpRQzNZOGRpcE9GOGslMkYlMkZZV1I3ZWZWeFElMkY4NGlkb2FSciUyQjdRQ0ZTeXo4d2hwbkhlNDIzeFRjZjBBOFpvSHpvcE0lMkYzTDVqRWx0QVo0anZ0Q2g1NlZoYkVPeVRyMHVOMDNTcmdjTk8zR0pQeSUyRnlxYm4xMU14blk1RyUyQjB0cFFONWQ3aENsMmFxek5HRmhqUG91UEZzSUJnd2c5cm5tamhKOUJqMUlkR3VwVzM3OWlNeDBtcHV3M0QlMkZ3cFFuUjUza2pFcHptWmF0JTJCQTRQSEttMnJTV3FraVVlM1BFbnZBdzRGRFo5bHZUQXFSSG1JUkN6ZmpxZWNNdWhpM2wlMkJ3dXFIbyUyQlltWXN6MHB4Q0d5VnVUMlpTVndBNnlXVzFndDR2SENPUlc3eHhQaU5qWXh1M2hDdXdwaGNQSG1OQ2U4M1g1Ylk0ZnhiMjZ6bWdHMG9BaW16WlJLam1QU01kbndYNktLY3UlMkJjZ2NsZHFkUVRDRTJ5ZERJc3FZMXVVR2dqYXRMenJPd2NXdmlNMGdUS3NMeFRKeTRyS0RGNmxBa1BoNmc1eEtLOVRaWiUyRnJlc0ZIanl6VDRuanJ4NENaM1U1OSUyRlhla0NHSkZEcDBCUWducDVpRGdnaXNERTNFVUU0eW91eTIzOXVaWTJidkpneUxBcjhuTVZBS2hIZnR6TGNtVDZEJTJGeEI4TzBZTThVNUlxcDVxYVZ4eTNXQVlGNFZWUlNOQ3hFTFVUN1hqMDQ2TFN3M3pJRFRXJTJGM3RPdlVtWmF2QTVsTEtzclMxelNRQnNnUFI2dzExaVpCZG82SzJNN3cwbVU5ZnMyJTJCaU5FZ09raUpwbVN3YkMlMkZvd3NaRXJwejRrTVF0WTJKOTlsZ2FIREZTUm1LRTJWVnJYNm1UdktCc1Rwc2VDUXZkZVlLaUJ2cUR2bGtlcEtkaHZJM2Z5ZzBaYzl4VlJFc0JCMHNJbjZjc3pmSFU3dkdobUFkODRWQkNYaXJ0T3dzbzdqempLZXBkYUxNRTBWNWNtaExUVEwlMkJ4cjJkdlNBU3FQOURYOVB5OGIxRDdEaTM4cjVpclc3UGZOUEpMdTUlMkJFenhraEJoYVhqd0QxTlNkRTdVUVZicGNGTDQ1aDJiZGNYaVB1RzZiNzlBamxLazFNTThpSnJJVk00MiUyRllFWVV5TzFzbjdrdGJoRXZwUEkzY3RnRU9jdXpSa2lPbjJHbVRNejFGZDc2bHAxJTJGY0E0WU9JbGdicTh1Nk4wTWZDVTgybFJNTkZleTNTekZOU2RaQzhaaldYa2VHUDUyY1l4WDBBbUpQQjE1Q04xZyUyRk5ERkZQS3FpWjZJRGZ5U2VwUjk2ajduVU9TNHZrNDhrU3N5NFZ6NHpqbWZrVlNXdFhIR2JnQUlGQVk2Z1dWSHoyekV1VnpLM2RJTiUyQk56OHFWb3luWjNtakM0cEl6SnllNTk1NUJHRXpjZkV6UVNCMkdocSUyQjJpanBCVHlFdWI1TE9MNzVQaWo1WjhlVGRkWnU3ZEJ5MjZ0dkR3aTNjN2hEYjVDS0l0UW8yYVZncGRYSEFzdHNDSHROZTNENUZDTHp1MWY2SmV6TlFrNWVjWm01Z0NOaTZWUml3NWZoQjYyRVN5b3Q2U3JaOTU5TEdubGVEMThsaTJGJTJGJTJCZXZKRlBRa082NjBDNUMwZklPem9tV1dGbVclMkJON3MwSTVQNGJTWGFTUkxHRWg2bFBkVlZCaEUzeSUyRks5bVk2OWVjaTY0RXlzSGZBaXV3RFRIUG5IWTIzTExDR1h2cVg3RmZNdEFJZyUyRk56SldMZXZLQXR5JTJCWUNVZk1ESUZHbmdyUEIzRWp1R2pxMlVUaWV5UWtNZEZjQ0RTaThoJTJCa3RaS0pIWmNBeWJaMEhDbGRibFRWdEZiWUQlMkZEN2JRclhvaDdFNDdueHhVZXhvSzcxZ2VrODRTQkxDQkF5Y243ajl3N09MUDJHbU1DVkJYMERXJTJGUWl1UXVIR2VMM0FnUGxBd29oTnhMM0tnZ0x5cUZ0WjNGOVNvdURJeGlzVXVtRGQxJTJCV2RGOHUzVTZscDAzUER1SXNSOTlXWDM3Z00zV0o1M3pLQmpoWG96anowV1VtSXkwWFdmeTVqRUpUOFlDNHlLZHR6bHVsM2VPcHclMkJmWWltJTJGQVJuM2JGdjVYU3VaNU1BZEh4YzlBazJVNThBNHpLa1Z2bUhKQ2daUllpSVU5JTJGZGJ5STh3MjU2NzV4TjkwMnhKN1F5ZyUyQjhEdG9jSHB0RGN2SXpOSHlvRE5GMDc3S2E4TGw1U2piMUd3WTI1SkJzVDJJZ1E0WUhWbnlVNzl3cDU4R3J4cVdMQWZ6RTJzbEExdzF1M0ZsNEp1aE1hSFJYWEIlMkZtNFdQdDVBUDN6MFhSYkF6am9PS01SNGluYTNhRkdSWjVtNVAlMkZsdjFjWVZxZU5ZMGhkRlIlMkZET3V5Nkk2NlF2aiUyQmlOemJCaFA4ZXRaazdUaEtVWHIyOGhkOEVEV1pGRGQ5Q3dTcDhFWWJLM3FaNSUyRjU0TjJFV0hBY3dWTmtVSUNaOHVuSTA3SzI4dkRtcXRrSFZOdTVtakxSUmdYVGh0bTVGaWYlMkJZTkxxamxzbXhwJTJCUDdIUnV5WUtWUkZRUSUyRmRaVEJ0OWlmN05jJTJGelh2am54U2FWajJEUUpuNENETFZrcDV3OWQ3c21IVUVaSW1oV21yRWw0ZTklMkIlMkJtYjFIM2QwUU0lMkIlMkZvRmhkWmh0ODdqQ01LSW1mRlVXWk5senRKcmFwdzF6TzZuaXlRZEhtUUlZaThQQzBQNzRQRWFlMlFzVXdUUkYxS1hKYUd5cUVhb1A1UGdxU2cyNXdVVG83S0NOQXI1SFc1MU1xVkpjSHFuQnh0QWU5bjQlMkJFb0lrYTF3NGhIeGNuTjlwdkJjcDM2QzZVSnhXTUh1UjZ5cjVPelBFS1JiS00xYTUwUDBmaHBBR0lpcVhVQWN3TDBZbmtGZ3olMkJyZTJ0emVLdmROaVZSQUhVdVJYRUFJbU1kdUhJb0t6N29aM2Qwb2dYYklqR0hhTENHRXg1NEolMkJKcVIzbHNvMWxramdJUjZzdWhvOFdpYUxtd2JDNyUyRkRJTCUyRmJwWU1pMjF2S1gzMHR0WEZIR0FPaDRCQU8wWGlOWE5qeE51a21iek96V0xLSUQxdUZFRGNnWmhEdmNEMTlEZUNkbTdOSjJ6TlBVTm5DQnFoMFF2N2tFenI2TWtERjZlOGswTVE2TWkxU3VkVm9DTkpRNkh0T0FMaGlDQU1JSGpJd3pSTXdGNVRUd1oyWjNiOFolMkJSUlNHcVdIa0xmYyUyQnJ3amlQQ0MlMkJtZjZvQ0VxNURiQU0yUWIxYUtYb3duaUtPQU4lMkJsMjhWajg3REVhS1ZSbDVFZXY3NEpYMmtYUTR5S3lxemxpTjlMbVdqQSUyQjFjanFoJTJCSEd2NnV1dVllcDZDSjZWTWNvR2U4TVBRSmNIV3BxZVQlMkJoV1FxS3o3WEc3Z1h0TlFhMnByRCUyQnhWMXFNcExweFI3cDgzc1dzQTdKMzg3bTZsWGc3MiUyRmo3SVhBMnJFSWJSWU5YajI5U3Y1TktLb1N5aXpXWDclMkZuVHB6SzZyY0hsaFRWVWVVMXNpbSUyRld1YzlXRnE5bkVlbTNFaDV1Zmc5QnA4bUU4RnhIRVBjbmFrR1pFdVpya3RmTkxlaFpiWGR2OFdLdmN4R2RiT0dHdDdJMnJQT2lzSjl4bHJnYWV5M1Z4WGUwRkJsVXZad3MwOW9RN2lORDVnR1pETUg5OVdkM0QlMkZuV1c5dFpqN0F2R2cxbjNOJTJGek5wSzlpMGxzc1BpMUQlMkZlQTN5SEFxWHFNZiUyQlFKT2tQT1gwOTl6bVh1ZGxPczV2eEg3UFkybmMwREZPYSUyQmolMkZlQTF4eW1UcVhoUUtubVBxZk05WDNwY2NPWVAlMkZCaUhuOXh6eXlBM2o1clpEWlg1SFAlMkYzeiUyRkI4UHNJQm9DQW9JbUklMkY3alhjRGV6UVpnazhHJTJCSiUyRnFQMVFvN3lJM3YlMkJxZXU2MyUyQjZCJTJGZ2dtMHMlMkJsJTJGdXF4MHI3bnlvajdvbSUyQngwTXhuQ1Q5c2ZLaDhEbVQ2ZTlsM3puOW4xZmlyNG5tbVpMVCUyRjFreUdFWnltVHlYblh1aTh6OWw0eiUyRk12JTJGbFYlMkZzJTJCU2tZdWNBJTJGaHdvOThUJTJGWWVWdUVkeiUyRiUyRmhpbWRNMCUyRjFoJTJGOEdUQSUyQlo4NUh2UGRQNiUyRkRYOHUlMkJNMkpkJTJGMGt5T0NHWFJLNjRsNTJSJTJGbHlCOERQTiUyQmszViUyRjFpQjh2OXIxJTJGJTJCbDJqWFFqT2slMkJPbmM0UzduZmdodGdaYUc2NWNmTlpJamVka0pFYnlDM1c5MHZlJTJGRFolMkZXOTNlZThHa0slMkJqYjJ5bzBQcUZxU1VhOUJxN0VZZWNQM1k1Z1FlYzdqbCUyQiUyQlBzYjExSiUyQnIlMkZDYnYyWWUwbmFkaUd5Snl6SUVlaXh2MlJvZkZqSjk2Nk5tRUwzNCUyQnh4Z0lnVE81MFd0MFRTN211c3N1JTJGenVValFVUGxkb1hpMGhldDBmRDdzdSUyRmo1YkIxQkl6a0FlWTRTTENUdnd4M3NraUFTdWhRQmhCdFR0V3U5YnRRaDJ3djk3SGlPd291JTJGakVBY1BZaCUyRkRjZyUyQmFIcjR2OWxUTGh5Nm5jSERRJTJGVzFXJTJGajdDR3RRVWlXJTJGaCUyQm53Z2pZbFhYbnJIV0hwVnhuWkJJZUZFenMweGFZYjRXQUtjWFZrOVd0cUp3dEZQYXpiaFhDanVjQ1klMkZ2RyUyRjlmc0F5NkNXZ0RnN3pydFJjYVBCanNSQjZmZUpWTFMzMHZMRU9ycmE0a3N3b1FYZTFqam1mT294amoyNU1jMHUlMkZiU2I3TmJTM2IwalRtMnBSUThLWXV0VWNOUU1GdU90MkRubnBZViUyQjhQN3ZpNkRVV0hOZEpKdyUyQmRwUXZFc2dFMDFJSklqVW9INFE3aDAzVVRYMm9IR3NBeW5XNjRvSVhVJTJGSjc3VjZpaDN0dWQ1VmRNYWNsaGxiajYzYzZ3MzhncW5zWnR4Smt1S1FkUVlHMktmRWNVMjJrM0M3RlF1c0klMkZDMmtLbEJRS2RGN3BWMWslMkJZSmJ2TGI1MmpQVXdteFZKOXVnNUllVG5EYloySmUyZ2swSnZSN2kzdG1xQUVNZ0YlMkJJbnBqQzROOWduQUNaVGlUM3JqNzdHU2I3QjNTNWh6Um56dVBGZE9ES1oyYjMwMjRCbHJ4V2xhMGNKT05yZkNOZE5CNVRWY1FaT21jekZSUWc0UjJxWDY1eWd3SXIwMm12bnB1ZWR1QzRvak0lMkZlTUdXS3QzdUIwbVQxRmdIOHFNUjV5SEhnJTJCTk5BUFMxb0lBS21ycElCUDZOa3RxNjNGWm1VWGpQaGl1SERmaHNsM0dsSHJxWDF0ZSUyRnQ3eFBSb1lzNms3bno5R3RUQWhEUFczc3djMFlZaG15Mm81cVB4WkRqNGtUSHp2ZmdnUnNaSWhUeUt5JTJGZ2RXU0ZQZ2U4ZU1EUGFrdW5FVUp6NEpWUWtKJTJGcXBiZGpMbTdRc0p0bGw2U2p0S09jbDl4SEtFckFIZ2g0MzVxSUFoVTdVcE8xcmVGb0NKM0ZFa0FCWHBkS2NDeURvVmIlMkJTQkNxYzN3MERGaEk1a1UwaTA0SW02akNlYWg4VmxkOURkZkFqdkEycVNWTThFajFwOTIzNzhkZ2lQeHdIcGlsVE5tOHA0VUNXVG56VXJDYSUyQldXd0w5eWl3QTJQOTlneDY5em8ybVhEd3dWU2g4S3Mzc3pTV0MlMkJVZ1N5ZTREaE1jUmJ3SXp2TEYwczhGUzB5U3JsUVR6ZGo5Qlh2UmFwZzZDUm5UJTJGR1lJd0U0SCUyQm4wdDUza1liM0c4JTJCYzlHcnBOTktIaWFPZDk5SUdQY0NjdHdxd1plWCUyQlAwNk1Ub2UzSXN6bU42MnhhUTcwVThoWVRCaUNnZ1JDRUs0UCUyRlgyclF2UHlOT2thc3dESExHVUc5dHFUUkdsRExJUjdGOTklMkJTWlRvWU0xRmJzVUp6ZjNIU0R0Tkdlams4TWNvMDF3VHZDNDNKcFNpaUpSbkF5U3o4U0s0dzJTMFElMkZpVmtsV3FLOEh3dGplaGZYb1ZRV1NGb0V4OVVLeFJmdWQwaEdOdlpORjBHeExBdXIzOTA1emRwV3Z5WSUyQmolMkZnUWJGVTRKRWNUZU5iNGZCN2ZiWkpFTVQlMkZDTlJWOVlBeVE3UE41eDdCcmFBMGh6WlliYk5lT0l6STBlQ0E4TUR5ZmZ2d2p6V2twVncwMlByREtGY1kwcmI0cHo3cHdUTE9rJTJCbEJFYjExRFFHaXByMEtLdG5oVWRtWjZPV3glMkZXc2VvcHRzdVVJJTJGUHhBR0xSdE0zNDlZMlN6TnUlMkJvTWVZMVpQU0hyaGwxcFdmT1laaWJBczhoS2NqOHRwZFJGd0c5Z2UwSVF6am12RFNXSVpBeXlXU3VrTmJzWHB5azFqdmFlYlAlMkZIQkZQUVVFczkxYW5laXB1bDB4R3lOamoxb1Z4OW4zT0ZEc2x5bFN3RGh2ZzI0aTZMZWxlT3ZpMWVWdkEwSHVpQkFpTEhxaThwY2trZjJhcjRIMXVNRmVBNGJZeDVSd0F3UzE1N1JsQW1Rakh1dFdQMW1JdEdrcU1XVDA1djVabTY5Y0h1eTJlQll6MkVnTyUyRkc0ZG1HRGpmTFlEVE9mWXI0diUyQkJTWnlqZnBiOVRiRVNJMVU5UkRqZ2FBNFc2V0Rta1ZUOFN1YzlLdGVoajZ5ZlRoNTZDRjFMODlGdGgxWUxDT2lEb0tPdmRqUzJ6djElMkJQdFFTaWM4TTd6alVEODZudVpBbGNaRmVCY0ZpOXZSNU84c3NFUVZ2WmR1RiUyRmV4SzZIRkVLenRzQlVVSmlIcVR1NWF4R2lGSXZwaWpFcXR4VHJVUG5JT3glMkZFVERpJTJGTWNodG9LUTlsMWQlMkZHNkRvRlRTZWlFRko2RUhWWm5mVmEySk81c21mTllpbWFEVzZrUkZ5R3kzOHdEUE8zU28lMkJtRURsVlQxNXV6Mk5TQ2pwUDRKNSUyQkVJUkVBYXM1b2lVdmV2bzFkQzhMM09iUmdxZmI5JTJCWW5CdGhoZVFnQ2pINFVENnZoM2VUTzAyb3NPWXFNZzVUSndtcjFXJTJGWGcxWURvdTkxNVhIUENLOGlxNTBaNnpnWHZHRkM5dyUyRlJkZFA4cWlhaTExQ1NuZDhPNFNyRlFaenJnNXlOTXJrQ0JXaW5NQ2R0R01KWWNjWEo4U2IlMkZKckRmNjFVSUI4cyUyRnE4JTJCMk8yenZKVkQwWUVzRUJhbE51UGtta1M3OTZOYUN0Wm1LQ3dNcm1IS0xzdXo4d3cxUUliT1Y4emJRJTJCSnBOd2pPQ0Z4Sm9FdjF1ZmgyJTJGQ1JvNnNNbGxva0tSU1k4ZU95SDE4YWs2TyUyQmpuZiUyRjNla0RaeWU1Y2p5WWtacE4lMkJySFMwSTZTMjFUJTJGZ0xBWE9HNjJ3NWgwVm5mYWVFRTclMkZVa3VJaTZkUzRlTXVqQW0lMkJzb3IlMkJJUVg1UW05QnBpVnIyMEkyTzJtRWlCYmdSTU0lMkJXMFBkTGVSR2pYOTdYOEMlMkJMVm9yTVNrUFZPa2Juc1I1bkM1ZEhmMDRsOE5oQ2JncmRZZ243SWxYVVNYY3lsUkd3akVwRnBsYTJiZ3JGY0R6d0hLdnNkeCUyQkJZcTE5OEY4ejMzJTJGa2x0OWRBUlYyYXF3d2hBJTJGeDNmN0dTRkNvUTRVcXZVVmxJdTBtJTJGUndCNU9DM2EzcnNQeTh2bE5DYkcwYTNSYUd4YnclMkZsZXRFJTJCcXJtaFpYUU5aWll1d0clMkJYYWV3aTdJb0paUmZRTFJGOXolMkJ5S01LdnBPVnZWWVg4M0hkOW1XWFJ4NDJ5REV2VnVtZEFoTWJNWFVoZzg1UFVjQXVxaWhIejNQdCUyRiUyQnhMREgwclFUR0NHVFJVQXNzc2FwNmRBaU4xeW9yQktsSk9WbHc2a2J6eWxSZG5NMThwdlI0Sm4xaEwwVW8yYThwTnhYTHJRUHpHdFhYSiUyQkt3V1cydlpYc2VGWDV3enElMkZGUmlDT0h3UGJ2Q1MybHZnYnBXVDB4YnBiM0NwZFpMRDk5NFUwaFRvWkJkNzBFV3dJMk8zcjJNMmtuSVAzbFpVMkpVS2Y5aFlZNm1pbURyVndDWkZubUdsNFNsS3RLSTFodjJ3c1NYSWZibXRYN09qT2pneHVsWG1leHRiU2U4QjY3Zk1ZbWpicFk0UGI5NHM0VUZNSUIybWFvWGpKSVI1Qm1rbjZMVjZXM2pVVThqMFRKNmZJMFg5Y3JaVlJYdVE3TkFmNXVvazAlMkZHbUJOM2t6N1RocCUyRkY1ZkZ1RVlMdUpQcnZxSTY1MDFseWM0ZHcwQmU5OERJMVo3VkJNOFNRU3Fjem5WNDlBdDB1cnIlMkJJMjJiRXZjY0FpakFwUWkzUkVUeVExWERnajVQZW5udERuQzlUVFpPcm1PZzk5a1Z4UWFBamFaQXRvdFFSOEFyZGolMkY4NXZMc2MlMkJ4QXVqYktVek5aSkZaJTJGSWtlODdvTGh2RjBITGQ0QnNSUGhqTWhqUSUyRkJXJTJGTE1abFlDMno0UFJvejNKZHFjd2ZvODFTcDU2emtjcXF3SUJka0x6RFM0JTJCaHRIJTJCa2wlMkZtNEdCWWlqS1pRZWRCQmtoZVU2JTJCekpUdzJHRVA5cHRXNzhieTZsOVU0ekVwZSUyQmwxTTdiUlJtQ01LMmV2b3Y4VjZyNVE2NkNBYXB2TEp0WVdTTDAwOU9TVHlIcUJCWnF5aCUyQnhuMHBVdTNBOFVlUzZMVnhrbFg4OEwlMkZaNTc4RHN4SyUyQlFtSWVsb1dtRjZoTFhRUlVWeUZOd0xYQndmYk5tMkZnRk02Z05KeDNndmQ3NkJ5WlR4d0oxeGMwTCUyRnpnRyUyRlI0QzJvUVRsRGhhRlo2b0VQMVNHbVZVZGdYNDFmZHQ3MERWQWJGaXczbVNQSUd0UTVSNjJ6b09talIlMkZhdmxaS3JBRktJQWlOUDRVZ0NVaUM2bkczVDJnRXglMkZkVUdsSXRrQk9NNjNjRjVVRnlEOXNyMURoVFhQJTJCeUltaWtRYVZaSk5Ib0JoU0Y0bXRzTzZBM0k5JTJGZ1BjUEVSeUxFV3RQUHE5N29YcHphZkZQUFl3JTJGbWhSNEttOFlhNUNIVlIyUkQ5R3BndUNxWHBuNmVHOWtaJTJCVkVmejhJUUtEdWJOZmJKbnZoSmNON3Y1NGNzb0R5eUoySWxGRnpGOFd6Y3ZyRjVhUkk5MGVtN0FUM1BVQTVJYmxSRTRnWEs5STRDZUVBclpPOURMcFJzJTJCRTZlTTN0NlhXSDNiM0xpNW1kNVY2Nm5sdHJnSzF6ekkzUG5NUUx3YUk1S0h1Rjg1UDd5T0ZSeWUxbEU1QnZ5TGJDaENPSW5xJTJCZUxIY3Jta0diY2xyaHNENkk4ZGl5bHElMkY3RmVzMWVOeENaeWdLVzRKbyUyQlgwRmV4ZlJ6b1RPc2ZiVVRrOEhreWFXdVkyT2VWaG0xa1k2TWRyUTJMc2tIdnJhUlo4bVZxVDkyJTJGSDdrSXVxdUgzMjY4VHE1REMlMkI5aGk1d1c2QmJkbUZIc1A3VGRMNzh4blhRTzIwRHQweHo0amNvVTR1NmZwRU40ejJzQkVkbDdYcW5kcDFWU2xGQ0xETXNqbjFaZGtrS0RyZyUyQjd4OWxpOHpubVNLOWo1MFM0OWpCMGN3alJrU1BpSEtsYjA2NzZlMGpmSk0lMkJQQlZvWSUyRmU5NXBHOCUyQmtMTFBmUXUlMkZ4bWlwM3ZNWkFCa0tiUUolMkJtT1hnNHVDcDlveEdybzdZU2VpYkdNdTF6YjFFYUVSWXJJdEZHa0V2aFhBV1hlNiUyQmdjVndyb2t2ZzJRVmhBdlpNRGVXUVdxcGNJWjZoOXBCY1AlMkZHeFQ4Smw1TEQlMkYxQjJoVGdFVEJ1N09PaW03UWwwZkE1TUhwaWViU1dzWVpPYlVTNmswNnclMkJmbjJ3U3l3RUFHQjBpZkc3NDRadlNNSEREa1pBSHF1TDR4WXolMkJaQ3ZHRjdNZG4zOWc1VERNU3RENld4dWVBTHE4NGI3MXZtJTJCa1h1Qmk4bkV0Sk04UjdTaTlEUmRlbmhZUE1hU2dNcENPUEVhbU9XbUZHNjVNSENxYnFYa1c1SlhyYnB3UVpDJTJCcE5QQ2dOUFhDWThrQkdUeHNPVFFPYlRoNms1MTg4TDNLY3ZDdXlnZFp5ak9KQkNMJTJCU205eGtwUCUyRmtJWGRoUUo2aVRKUVJ1QnVGc3d5Rnc5dThWeHdQcGJ5SFlZOElIJTJGQ1R3cTRaYUszTnpoTkozQUxxcmhhbnAlMkIlMkY5Rk9FUEJMNXBLaFlTanFZOUx0UDRhMlA4VG9IS0JIYlRRUzJ3c3dVNkllajJScHFoWUMybUEyMFBnemNFNXFSZWY0cERja3olMkIwZ1ZCN2VlZCUyQjBPOGptRmVBNGpUbWlwVlByNHB4MyUyQklibjJLRjhNTHJEYlFPZiUyQkhPSzMlMkI2am1PbWN4MXozOXU4dzlqb2dxQnNhV0hKc0U1JTJGNGM0NUMyN0hKZHYlMkJyclh6QiUyRml1Q3dGd29lNDlwR0VtdiUyRm45NWJ2MjhoTXZ2bnp6diUyRmh2WDl1NCUyQkdaS0FsJTJGaWk4U2VwM0xmTFdKJTJCY24lMkZZVCUyQmJhT3dzcTF0RTVyUDhQOGNnZVpZQThXdG9ta1htNVA4NWNyaUpobjd6cmV0TWZQWVBzZGRhd2tEMERuclBFdk9uQ0dVR1ltNnFkV09FJTJGeHBWJTJCJTJGZm56ZnMySUZhR3ZEJTJGc0glMkJMYnYyNERvUCUyRkpHY1lmSXROcERhTFhJbm1LbiUyQklQRWR5VGp4bmRCcG1zRG1HTWY0Njlza3dLNHJPTFRITE1wJTJGam51UFA3Qk5VdjlZZnFYRWIlMkY1JTJCaXN4S1VndGs5ekpLY3pmNGh2djBDUTE2cXB2WGIlMkY2dWIwSDJNYTd0dmNVa3lYRjZQJTJGVVolMkZBYmJLR3ltdjd2MFdmN1BqRFQ4MlB2bWZyNWs1JTJCJTJCT1pzaUlVZk1UU04wOXZXamtpNCUyRlBNN1pqWEhtTmw4MDUwUGVNVU1XSHBjU0VDbnJEZGNPYnpJV3hLRTE1JTJCaGZaWjU0UVJySDhsZGhpdEY5R05FNiUyQkRKNXQlMkJ4SktiUDNtd1RYdzlnM0RFSHdrQVdBUUMzM0oySU10dXh6JTJGUGhjaGVDZkh1QkxSdFc2b01MTEIwNEJ6RWlsQ1A2anZtdkhaUTZINXVpUUVBTHZSMEEwUkZPQXpRWkdJU0lJYmpveGw3RGVtQWdpakhCd0o1NXJtMzJjJTJGUEs3SGYzWHNnWEJsJTJGeCUyQmZuR3pSJTJCOXlieDVBTndDQkRoU1V0aG8wUVFhQWxlMjc1UXBTMlhDNFRrTUZxY1MzOWt1THhjYiUyRkFoY01yczlSNFpDbGJoJTJGdmhud3FqU1k3VmxzMzY2M3dKTnc4d0trTzNTUWg1JTJGWiUyQnFrNjV6cU0lMkJROUczUExkVndnemI3VFhQWE12NEtZYyUyRndSR2UlMkZMak1QM1pzUXE5Y25CazlVYUNRamVoOUhDZ0NaVklzdkJNUVdoZVdWbjZZV1k1SXNuN2hTa3dhJTJCeiUyRmsxbHJRWlpodnRMZGlqSldmUiUyQnIwT2pQJTJCMzFwR09iSGg2U2pNSWVmWkFyS0ZwUVklMkZWUSUyRm1Samk1dldIdUNGSkJiem5FVkVuRDkwNm85UWtvb2JhTHNQUlFoMWozaEV4V0dLUGFndzVoUlVkRmZxdEFHaVdnUkFHbXNuSVFqYUdWN2R2VjY1SDN0cGNSNE5Rb3cybTRxbmglMkJGeXhUQmYxRDZpU2IlMkYlMkZZdWMlMkZTMk9YQSUyQnZqRERhdU9vNWpUR1l2WFolMkJZcWglMkZ2R0ZOUmFsbUZpdXlTOEtjZ3IxMzcyWmdLZjl2aDJMQ1ZBWlY4RmZEaUZscDl6QUkxM0huUiUyRjV1THI4OWklMkZMV0JZTVh3VyUyRjV1eTk5aHhISWl5Ukw5bTl2Um1TVzlGVDRyU2p0NTdLMzc5WXpDcmU3b3gweGc4b0ZBQ0tyTkVSc1ExNTl5NFp2eHhkcVU1bEpocW1haDZLRndzSllodDglMkZkcm80dnUzYTk5OHd5cmlQNnFleE91VCUyRiUyRmJhJTJCOTF5VFN4WWlHMWRyOUNIMGxWZ0NtaERpUWJqWnFPa05JTmplZDIlMkZEakhHSUdjbjNMZXo1aXNvbm5hWVBEMm9TdEFXT2JSZjNxYTdDRkpVdk9icGdrOENNT3hpenJRdGgzOHlDcEF2TE8zcHlLejVCcWlEUzlHZjc5cWZ3b09JblNHZnpqViUyRjBrYlVEeDZ0WXU5RVJyQkprRGVEeWliRUFmQVo0dSUyQnNyb3g2RExVNlV2QmY2OEFORzhhWGpCVmF2WThTN3lOOWFhbkx5Zm5CYldhUUtQQ2NzelAlMkJmRFdKUkE4YzdvZlRjMDZlQ3k1NTN0R1Bzemw2U1JxVVlYUHdzd3o1Z01FNGxoODBmbGlSeVZub0h6Qm5JQThqRHJ4eXVHUiUyQmgwZG5jc2I5RDFzY2ZnZzNOR09BckFNTEJTalJIMWhwQVNTYVpHZlJoYnpzTkJlUUVvNUpjRUpFUVZ2b2wlMkJ6dEV0dVJ2UkZ4VE41bjh1bFB0MXA5JTJCQVAwNG5oZDBIdXZaeFBqTTdtQUFoRTd5RzJ1NFczaGFBeVhkemlEaCUyQmd6SklxdEFGTTRxYkM4Nzd2Njk0RXk5d1NWV3hIT2RUVDc1QUN2Q28zbEU2VzFxNjNIbjVUNFMlMkJxUVl2THkzUER5bzVycyUyRjdzRHFtM1hzVWJYb2gzT3FXaDRveW1GZVIxdXlXZEw2YzFWUkNGRXZlMGNsNDlyYlRWYTJ3cERJUFFmQ2V2SHVDMG5qJTJCOGhOTiUyQjI5eVR2VW5UT0pYNElXMSUyRkwycnoxSnpIOW9rTFhFOWglMkZ1dE9IaHJWZjhjc05jQk91dUk0SjZsZUZreVJ5MmNTTEJhQXpTdUdMZTd0MnNsNHFiVHhHU2EzdnJPWGpyM0FiUjdHMjJicGZqa0NBSDlLQ2ZlU3N4N0xmWlJBaFNnNiUyRmNaeGF5aFhJek5sd3JjJTJGUFBtUGFKZllXOEJZV0tCd1FLeHVQbGVpSkk3clFOVTAzMk5zWWdicFljMCUyQlBKM0tRTUdDS00zdk5rNkhxMmkwMnZ2SWxPNW1OckFiQVJoSU9KOUhZc216bHdTY0lQbFBQM1lKMjhYb2RYdmY2WXZvdDViUVlKJTJGeUhRVWYxbCUyQjdyQlVFamVocEJTZWZqQzI4RkNNbjJuM3oybHUxJTJCblRZTG1PRVdROFhTaUwzUTJndm5ZcHdPR1NKb1E2NXFQYzZQVTBhdk1JTkVPdFklMkJkNTA0N0UzMnlZa0FVSXRFSW9paCUyRlVGcDlyZXh3WkV4WWh5YTF1dE9HSDUlMkZGYnZmQyUyQmROS3RlZWYybGlHVHk4QjVuRnlVVzJDTHRQNGc4VUxuWmU0TUJYaGIzSEN2U3Y5bFFRUCUyRlYlMkZvbVd6bjdyWFV6bDhCUWhFTGNibU5iTE4wYkFkZ21MRG5vMkwxRDJaSlVFcHZrSXpsaHZPQXVBNXdXQlBqYUc5bGtJaWR0V3pRWldyQjhYdExIdkthQ1hseGhTM2sxSWFBbzdsJTJGaGljMXdxN2xmTHE3OHBIV0N1aDNodTFxbmYlMkZORDl6eWptWXolMkIyJTJCRjFTemFBeXdMRyUyQmMxak5UJTJCOTN2RCUyRmswVHNaWFJ1MnloN0p4SHMlMkZvVm1raTZadVlRUmNvM2hYTjEwOVdlc05ZWUdsUkwlMkJrb2pva2E4V2N3UXA5WnJuc05VM0FNTE5zemtDRjBxY04xNEFFNXR3cUVldGZhNjhuT2tWTyUyRjVUNUtkcEFuVmlWZHlWbTJNS20lMkZ2NmQ1MUp2eWNIcSUyRmw2Ylh0Ym9CU2RKOWVLY1BWVjJuU3NScFV4TW5VQ0R6M3dXak5ESUZobiUyRmhLazI5NUdkTEVRZmRNNEIxZ0hzYnN4NGp3czdGckZjRXBuNXhWZ1lSaTZLeml5YnViRlZvS3BDdU1keUp2cWVMOHZmUzdERTk5JTJCbkMwdzQ5Vm1NN3pCU3ZDM3VyUGdOQTV3bXlhY1ZHaUNTb0J6dzh6RWs3VTMlMkJ1dVpqNjhoY2NUZXhCTDRwaCUyRlRwJTJGQllCVDRyUTdaYkFRQWo4TmRpQzNjTTh2RWpBdTY4ZlZ2ZWQ3ek0xVk9tTjRPZiUyRmtsNTlOQkQ0N083NmtyQWglMkJzMU5oU2doN0ZFWURndnFNVG4lMkZxYkhKQ3I1SSUyQlBqcXdsQ1JndlJnQ29OWUFYUENrNHNjMFVNZTdVZWhyVjlVQjhoR3FBWHpadjhGbWNCNllrYWNyQlU5emVQYTllNzZlYiUyQmREbVpaWlAlMkY0Z3Y0NUpuaSUyQllFT0FidnIwcG9SazdwUENmc2xBQiUyRiUyRjA5azN2eWhhT0s4Q2FTQSUyQkdzajBQWiUyRmtZUEh5NUgwNVZtdUVnejhPbEd1S0dtQ3FJUkxSeWMlMkJKRURlUmxzdnhYdWglMkIlMkZUeWtRVXpKTXlRV1RFTTdqNXB5MWJOUUNGbXA2dyUyRlgzZDd2RUw3TnJ0UUgyNk1wQmlTbDdaT1NUbEI3SmlJNnBYSnpsdnpndmxOS2FwSmw1b1hRRThlJTJCaCUyQm1FR1padlJCbzg5MldvZzBxUSUyQmx1NTJFZHRHVWswbHVkd2hmT1RiJTJCUVQzRHVCZ0EwQlhKVEY2UXpVb2lwcGc3b2FIMGY0Q2p5Wnp6SGdlUFV2QTFUeHlKJTJGTHhGaDYzVU4zNHNwTG5RNEJwQW1QdVIxJTJCWUFYbG1ORWtqMkU5MGpVOVRQUzN5QVJSQW9UdWFYZ3clMkJiN0tqeHRTZG41JTJGUkQ4Ulg4aHJrbllIM1NORTlYM011WFlnMk9sOUZ1SG9kcU9EJTJGN2hSUTlvOFYxTUJ3dEg5eXR2WVdidlRIbjVRTiUyRjFjcDQzejZsbUh5UG1rVk4wY1V0VVFBcGUlMkZUNVVzd2slMkJLWll5byUyRmN5biUyQiUyRmpnUmt2end0RHY0YzZpamZBR3FydWVVdFIzcGpKVDBYemgzZ3Z0a1F6SDNaUlBsOXVQWHkxOFBmRkNTaWNzaHE1blpDTnlOVCUyRjhzU2xhRGs0YWdacyUyRndJV3FLZ2QwZmpFSERLSlhXMUQlMkZwTmwlMkY1U25JcHVQZnNJMWE1RjU0JTJGQ3c5MlNrSSUyRkZqeURpQWNNUmx4cjlScmVFRkxHUkJ2RldSalR3TU5TbjdtWEhWUFNUJTJCM1hsUGNqT3RzQWlCUVI2cFpvbFhrSk95dGp5R01hRWJTZzhzMlM2djhmYTdVZEVmdEQ2QWdhSG1nSGdselhFbk1IdUtpayUyRmczYWVSbEZOV1piWUE1ejcyJTJCQzlKUTlsWm15aU5iblA2MHoyQlJlRyUyRk1JUnZDWVJPR2dQckslMkZIODRoMEE5OGpadGk0cXlpb3l4ZDF5bGdJVzhvTkwlMkIzTHM2N0h4QXIwRlpicEh6c29CNHVrMWJpUWFSS2kzMmhzUmpVWTJTOW9Vb1l6N3N1UjYlMkJUanY3YzFQS2xDb29xS3BrNU5mYm9LM2J5VElkVmk1ZFZ0TkNzc1FmJTJCdXBHTkhCeklrMFBERmVvZzdhQXIzbEVrR01mTVhCT0FJZE1xNFlQQ2NLUU9zbjhDZzJyd3Njd2lLQ05sS1pBaUpMYSUyRnN5OVNKJTJGUkFhcXF5Rk1BYlZKV0trJTJCeTJ5enRLeHBSVExabGNma0NFT0wyJTJGMWxYY0d1dFp4RVA1ak9YTHhQOCUyQlhYM0ZseHc1T0VBOFYwNCUyQlJWOUJ0YmlKeFA3ZXFJODdPQ3dtdnV2MSUyQmdFMEF0aWlYRWNwUEswV3AlMkJMc3FaYlNzNHo3aEpWbEZHbTRZJTJCWmkzTTluR1JiWTB4YUNzTmNlWXZWZ0IwVnhEeDBHd2dGU3o4R08zMTRRaGtVck1JbnN5SWViWGolMkI5Y2owbXlrR01xZGpxODFRJTJGMzhpaVJyaTF5ekVRYzhXcFd0MEdKOFAxaVZFMDZvZEYlMkJ3JTJGcEQ0enRpNFVNU2JKMmNsMHBoR1dJaWFZZDhJZjU4UnZUdGlmd2phejFGUmtuRUhlbVVHYk9FN0dXVm1WV3NpVWZQWFJNNHdqUzlSdEVhRUgwYXVHTnlTNlNCakptcVhxeUxHclA5TGVQQTVKc1BsdmhxMDE2RzI2MkJ6bFRrcmVaWVJnekc3aHZVeU5Rb0V4JTJGUTF1elZlT1lLZTZUamFMaVZrS2FSUzgwd2tDNm0wQ1hJclF4YXVtMkJZRFBaUWd1Mk1VZU5wU21NUXU3QU4xRjJGd1VXZWRVTnVZUVJsZTNpT0JrQmJaTEJhQ3pQMlhZQkZ3M25mYkowR1RxclZaN2g3bFZhZDI1ZWtLdWt1JTJGOFJZRjFTdUhHOEt2NGlVMEp6YjFQY3RlM0pxcW5JTCUyRmNwJTJGVEpoeW0xZDNWYnRZNVowTDZacUtqY1c4cE54SVExRUNmNXNEUmIlMkZoWjdIRzcxQkVTSTlSclJwdmhPVlBsRzBJcDclMkZZeUhPUGR4TyUyRkdoY05yUm5KbWF1ODdoTkd4clVDQ1BpU3UyRFdwVm93VFpkNHJPWEZHanlLckJSJTJCZXdWZnNBWGEzZUNmWmo4N212SENvQzh3VzVsMDVEMWkyTUhQcHElMkZkNkx0YjhEaUdzNEx3RnRYSmxPbmh4WEdFbzU3R29EMmY3TzklMkJPa2VzRjklMkZQRWpXbGhBM1B2TWZtblBWVlYlMkJ1TFVZeUVoNE9yYWZ1SHJ4dWdGNDlZVFJFOXRodnIxelU3TkswMHZUdlpydms3ZFlLT1R3JTJCMEkybzYxeGZBekV1bUEyM3VxYzRmN1pyQUJ4UlZtd1dCWHNFajdpM3ZwRHdiVmZhM0ZFYSUyQlRjRjZvZ3pxT0FSSWFseTlsdDJWOXJ2YnJQMlRLNSUyRlVudSUyRlk0RUp4UHp3c2RaMzJMdzByVnhsaHYlMkJKS1R4V2RGJTJGVTZDMWxmb0txeEJxRXVITGZMMmMyWXRCbjFmVjlUN3VHOFRNamhjeWN1elBzNUVsMG9lWk1GNjRpNWNDVDRONmhjNkJmeXdqUFRyMU5URThUUmVPOUtoMjVrbTRUWm1SYVZCdVA5U1NhUHhTVXh3WUg2NTAlMkZuZXFRdWhpVENkcWIlMkZZcEI4aHU3c3E4Y3glMkJxWEp0Z3VVYzZiaHZwMzl0UVlUemp0Z3E0NWg5RFFiQ2xlJTJCT0FuaXBWaFhuTmdrc0J5SlFkT0pONDNRTE1jUE1ib1RSemprM0IlMkZMcEg3UU5FMWFaTjZaejg0WUxlT2xtVSUyQldTeVFoYnVIZHZvaFVlWEtpc0RZQ0ozeTJLTCUyQlVEN3Z4UjVhJTJGJTJGTURBUiUyRllHTm1uaU13aCUyRmdOcHJ3Q1JMQUtEM3E3TWhKakVrb1JwQUFiSSUyRjQ2OXVnMW1PekZOZTN2WmtIZ29DRzBXZ2tka29GOVcwMlNDdCUyRk53eHd4MiUyRnZvVjclMkJ6TDBjVWYlMkYzNFBWN2FJVm4zN2glMkZYRXVoRnZzdm5DS01mMlRJelhyUXlkcDZRODNyNiUyQlBNcnlEa0RBSUdDd1ZodHNRNGZOUEdxOTRaandNOUFuTExqTiUyQjhaWVV5bklOVEdRZWQ5dG4wMCUyQnpGJTJCb0dnekNoWm9maVFxb0owZVpTb205dTRBb0dCVFZvbWVDQTE3SW1wMmc1R25neEpkNXdCRTNUN2VRVEc3b3Q0VXFqSXM3VHNFVG12SnIlMkZvYlFNdWUlMkJ5VGhURDZkYnowJTJGakJXQnllaDFXNyUyRnIxelElMkY5OTZRVzBkOURRYVRzMWlGb2lLOXBWeEZXYzk3ejUlMkJySXczbm1MJTJGUURycCUyRjdQJTJCOUw3ZlF1dWJRTVpUeVlqUXlRRklIcmNFczI1Y2diMFklMkIzcCUyRjlMbGZoemNCJTJGMjg5V0FEWURPc1l6U1N3V1B1V3JSU296Yjl3TnN0aXNhcWZVaTVCSVBWWWwyWjMySXhYN3IxWkpadW5nMHg5TThWSXBvdUo0YnB0ZEFtdE9EUUd4a3NlS2RyU0FxWlJsR2I5V3RIQ0lwMkJxRWVXcUFmakRHbGdXOUhGbW0lMkJYOVpLdEJTTmNjdk9YRFZ6c0lZU1U5VnolMkZNdWsydEp2WHg2NyUyRjVCcjY0WVl1S0hCTCUyQlhuZk16WVk1MGV0Q1Z3YTIxRTFWcVFxbE0lMkZRTWRIcHlaRUVMODFKR2N5djQybUpiem9uQ0t1JTJCRGJ3d0trZEhtcDRvZm5ZOUplTkl1OSUyQmczM3BJZEtiV0Z2Z0p4ZTNaTm9kRGZPc3RlbHowaHFsMVAlMkIxdUY4OUNqVmR6Tm1FSGc1QWc1WGNJOHZzdzExM3Rpd0xSUDF1clJEVXpZdm5VNjB2TlQ4Q1VRSE1QUkN5S1hCaUJoTDhCRDBTT0N2WTh0biUyQmoxTDhCY2ZVMjRWazlBbFcwQVoyM0pFTlI3R0pIbkhUTFNXdFJ0bFF4a3NLbUozUFBMNzI1T3QlMkYlMkY1Z2JLZUlqSktEbVNxSFFCJTJGUzZyRDVPOFRBNElvT281eTlNZjhBN1lEZEdNek9kWXVCQ0FvRG9DZ2lFJTJCRVFrT0VOJTJCbTVnU1l3cW1TY0FEdHdsZDE1WFMlMkYwZXhIWEw2aHluNiUyQkhyQzV4SWlKa2UlMkZ6MEZnRk4xYXlQVzVxbkFrWGxtbG40eEVUV0pBa2tIS1cwQzZZJTJCdVpaNlVIT3Z6OXY2aHR5UTJJSmJnQUYzJTJCVWMxOUpiZFgzVzBBNVIzVFphOUM0cnJpNzBJYTRmcTZ4SU5Xbmg4JTJGWEY5Vmt3SzVva1dUR2ExaXJmRkc5Zm1XR3ZuQmFUUXlBJTJGN3d6aklmRXZoTG1mWXZJS3hHJTJCSXphakgxUmhGakc0QWxtOENPWkFlZzJNeFNyZlRHUEpFOE4xJTJCWVZ0OUIlMkJTUE5QYjUlMkI0R2cyRHM0ZGF2NXFSNmNaQjdiTlptRlF4YUFaUHhCUXV2Y3Z5b2diZFJuTWdkdjUlMkZaV25abkkwYTJyaVlkNFMlMkJCcjEzZUszbVI5ZG9LbHM3eDl1OXVDMkFLbHUlMkY3TVkzcHVaN0tlR2R0dFdiMkFjU1NBU3g3bTlvcmltQ1plMDhldEdDd1ZxaGdTVm9tM25DNmNEZzB5MyUyQmFkQnIlMkZiMlZMVjltYWhSelppN1RRNDJ2TnpBZGtYa01uUE5KeVAxZHEza0RNUnBtWUpQbENZQlYlMkI3QjlveHRjbUJHbWVjVWFxdW41ZzNJRXVSdnpKZllsbVJsQ2dvMHJTamxqN2pEJTJGdWhDcE5PeWRoUiUyRlglMkZvSDhqU0d2dVZMalZHMHg0RDkzN090JTJGVnBhNUt5RjVaSVJpRzNKTyUyRnpSSFhCWElIb0ZsWlJ4JTJCMDBZbjJ1RXNxSDZKcW03NTclMkJkY1V4Q1FVMzlvd0w3ZTJldml0JTJGRHZiU2xoblQ2TUlTY0hZMHA2aVU2U1NycTM4NFN4ZmFMdVQlMkZEeTNxNnRNWGlOOHlpaWFlbXhaJTJGWTVLUjJJNWRnRUVvSElPMklPYmVIMiUyRmZ6WjUyZ0YlMkY3OVR4R2ElMkJGT3dEeVJaWFpEZkM1SkJmVGl2aXFpclpyTEVEcTVDQW1KV016RXclMkYlMkJhVTBNcXZWTVFQZFl5dHdKRTN2aUVzb1hpRjQwR3RONndUaGJmS1VBZHNKRW0lMkJEZGg1WTh2eTJHOG1kTHdVUVNrMXMxNXh4USUyRkRzY3JCaEZMeHdKUWJtTThmdFZqMEZmQ25WVU11NlQzdU43OUNWJTJCbnZIT0RNejNmWG9PYW95SUFRWm1jQ1h1YVprakV1OGhuVDJSdUFYZ3R0UVF6S0w1bFlockdKU09uYkUyVDZLcE1ZWXNiZiUyRldNazlmc2l1cWxGTkdTR3g5OHZSWG52NTFreTJTRWQyck5GS3ZvSVNWMHVudjdvQ3NUWk1SVTBJR0hSJTJGU0xjWEtKciUyQk9jaThWWUdZdUNCVEZSUkZkUCUyQmFvRm92QTZEUUNNN1JZRGhBQVl0dmJvcFhpejV0d1lDWkhXWG51VjV6dEdtNUF3QVQlMkIlMkI1WE90cTY2JTJGaExxTXkzZUZQWmlzN1hHYVl3M3I3JTJCWXI2TDlWakRUaTVRTnRnUE11dEgyMktOSVBlRHh2dExjalJ6JTJCM2t3VmU1OUEwb3dsWXJncyUyRk84WU9OYnN2TmowR2FjS1BheWV0RG5vVmhxak5wdmxqOFlIdUolMkZaSyUyRkYlMkZ0TVB5SEdDNmx2ajVINHA3ajBqQjF4Nlkxc2dKMzh6T0lIbzBaT1k1UVg3Zmt5R0p3bEF0OTBnOEhrSlg1Yk5XRWRwUkRZekpCTWRJWUlkZDMzdVMydms2QUdtUVZmUHZTdWNwRFNVJTJGSDd0T1NYM1YzUDV5YXpneFhwWmx1UXklMkJMY1BmQ3JhUEtWU2JhVTM4JTJCa2pOWlRhQ0FOcWRwbTZPJTJCY3hYSExzdXpmU050MEMlMkJzRCUyRjZDdDZtaSUyQlElMkJOcTk4VSUyRnc2VDRxcjNkdkIxWnEwd1BBcGp6d3pIdjYySXlkbDBoaVZ5cm5oWlRndmVqWnBRMEtLcE5jZjlkd040TCUyRmxCSW5zME1BWkhwbWR6Njk2Vm5GWkpaRU9kNWpOb3JaYjRQcSUyQkxQJTJCalFjMG5zUkJwSVVnRGhVVDdaS09SZ2wyRzVqTCUyRlVoJTJGdk1tJTJGQ2ZoYjhOVk0wNGtyT3lSQWFOQ1lNUUM5VjhtRzR0VDZ3cHkwTW51SlY2MVpGR1NJWG91UUlUJTJCNzhOTTlhQXhhRWM5YWZBenVKbThQOEoyek1JSkRhSENYalR5eElEZFRyazlaa2Rpek1ITm9RaW1FZ0EyJTJGUjdubSUyRlAyVnRydGNvZURpSkY1NmNldlQ4JTJCMGE5OXpEJTJCSG1maWtYRU1PYVJMWjMlMkZIZHg5ZmRIek9lWGRsbWVnOTJTOHdiWHBVZTQ0OGkzaTJtbnNZRjBiJTJCb1RZckpFaEFzREhhVTVsc3E5VTZDeTJ5bDNaaTU5JTJCMjhDb2pGZjRCTk9FazUlMkJSR0FNR05Mb2RQaTZZQmpicHFLZjJoSHM3d0kwbWFtSTNuJTJGTGVwT2dTd2JQMlZjM25Cb1kwdjViWjBEczlMYjRlNzMzT1NKSDNkSkhDcXdTd2NiV2JoRTB0UWpSZE42bm53QTNHSGN5Znh0QzZKY2pwTHljYWY2Ykc5ekl2U2hIZ0RLaHhNWUoxOFN2JTJCUzR5MjF4ZnM0RVRGeUxyeGVydVklMkZoNVR4UFE1MTBxUjF6NU5wRjM0aGc4bmltU2EzaVhmUld1ZzBaTnJ2RDlaUHhlbGcyak9BNnJWckpFVjM5VzlQSzR5JTJCNjZZa2NGY09CR1dnN202cUJGZUptdHRPVGdOcVpNMlNFZGltbzN5NXRoWmpkV2QlMkY1RmZ0WXNDbSUyQlg0WmdRVVlhcmJVM3Zud251cEtRSHE0dm5IQjBCY3RrTW96Q3M2Z2lpTDY4VXlOSVhQMEJtOWtIaWsxUnBCV3g3N2xISThXcXdkOEYlMkJncm5FdHQxcmJmJTJCYkFKc1BYOWh3b2Z1T0FpQkY5NzdvdE9vWXYwZ1M4U1Rnb1VnZEd6R3dCOTRaSmxRZzd6cUlGMEM2anJzZ1duemg3TnVEUk9hV0lvSjhIVUFpQXVjREN1RmJQWnhWbmJ1JTJCaVV5RGMlMkZYUkZpR05xb3ZURXNiSUFKNHJzQ2NXeWUwckh3bkRuZ1V5JTJGeG9EUTlmJTJCWm9lVTI4UWVHTEdTYnFrcU1QNmhRVk1JTVptMWs0WVVPT3Y3Zm11JTJCdWhPQ1JHeXIxdzJOS3lLYkhCSFNtdndKaUhuVTRCTlNIU2tVUDdjUmZsJTJCbkRzeWpjZzRmaUVFQkRVWGcwcDVrSUhtS1c5eHNlV1ZuWTJDeFElMkJnTjYzVUw3bjZZWXJYYVQlMkJNQzZOMW1ScGElMkZGemNDMGYzMmU5VmYlMkZ0NDRSQjlZWG9zOFF3cCUyQmxpMFBPa2Z4MWluTkNBZ091WnZWQk9HUEgzT2k2JTJCSEpXcDcyVVlPa0pPNVpaZ2pHMjRrNFpQclhFQWNsRUUzaWJVQ3czeXBwdmtuY3R5Nlh2V2s5diUyRmhtYXdPVEhxNSUyRlNqdTdUM25UbExtQ0FacUYyWmElMkJvWTk2cEJxRTVoNHpUQldJR05taTVIdllqeUwzTDFjOVVXZWZWZEpldFM2M2owUFA2V2JPSHZLZHgxMjBQcXZmJTJCUVNBV2wlMkYlMkZ0ZGNXJTJCanZhaG04YkRDTmkwZUIxRXhSbTFBWDAwJTJCbHdad1A3cHNSdjFvV1hMbXBDJTJCUU1pQ0dqRzNReVZ5SCUyRkNJT2tJa1NUM0ZrckZkQXV2eEVDZiUyQmdVa2VtZ1Y1OVhQa0klMkYxQ0RjNyUyQjljRVJXV1JIRjJxZHcydVhXd1JlOFJiVCUyRjhiOWZJanJZTUVKcU1ubHBDbTRMd0FvYmhYbXJHMlg2T3JheG9hT2lWYXQ1UGRUa1dYNDlkSCUyQmx1NyUyRkZKZVgwVFdxb0xTVFNEMzJRJTJCR1laeGNNeENFa2pjaCUyRnVXT0lSYjNia3RQV2d4NXBCODZxYWZvWE9kJTJCZkNhVW11MFUlMkY0aUptdCUyQjFkV1A5bXptRTdMTHJJRmcyM0dBQTBYVDM2MXNsUmxPS1B0NmFEaiUyRmRKbzFpaVVVJTJGdUJsUCUyQnVwaTZtemZDZ1VaTWt3OTQwME5rSzRnNWlCQjJVVnZ0c1lXaWJVOTRkSDUzdHVvMnp0cWhLeXJUSnRRUWVTUUN0TVhxalpTV1phRW85WDB2dXdtNE4wNDhSbHZ3Q1hyMzdjdlBjMWR3dkRBWHYlMkZKeTFicjZLRUJKZmdNYjBqT2IydnZ4WHo0aThsTUs3TG1DOFFGRFZkZDFvOWwxd0E4Z1RVVkk2dDhQZTg3JTJGRGFuWVRzN0VpUDBKemU1QkVLaFI5WmZLTG4lMkZmc0FwRUh6alFlNVJmR1c1R0IyV1pqaXREVmRmRzJhNkIlMkJtdFJxb3Z4RUdDRzlIa1p0VDZBZ1pXUHBWWDRDUmFvNWd4aERKelNBWmRxRVEzUFdIMGhrdHpqTkpINDZ3YVJSY1NxJTJGZ3hrZXNkJTJGNkUlMkJKUHc5dXZObW8xN3Ezb2pzJTJGT0NSJTJGS29tcmRESG5xclN6YlRYM3ludHBGc3ZZMnlNNkRscVhiTWkxbzAzeXVlQmVDbFE3N1I5QTJ1MEpUMjRFWHlqb1ZpR1VtQUxwWCUyQnZhJTJCc1FTeE50YkVIVjNRVTJ3N0t1Q28lMkJtVUxCeU1RQVNSd1JPRjJVVG1BdkhXanZPaG9BcHlydDdoZ3NDSUNjRVRKdjAzZTFiOHJLWG5vQzdRM05EcTN6VVVMeGZuJTJGdWJ6NlozeHVaZ3RTYTZjZmRVOTdXTTNZaW5EVHdBc0syYmZLNEFBZmZvNmFMMEV4ViUyRmpDJTJCdDQ4SVQ5YjRlemFPMkJDSEhQTTFqNUxZJTJCVThIeWVSZzhRUlE5MTAlMkJNTElCbE10cnB5RmR0QUFPdTFsOU1XOEFud05CeExINjRCMjNtSGRtRjJMOHRPTGFEOWJWeENtRUdqRGNSS1hYSks2UGNjNWVyTEhWYlE1akhNRDM4QVRDQnZyVXd3JTJGSFhNNGIwaXJMYXFNQktPN2xOQlIlMkJjdlZXeEhCMTI5SjVtOHElMkZabDVNcE5lNjA4bmxZanZEc3poM0dXUnBtSUhnNTVMTFpkd3pNMHhCM2xDd24zbDQ5STAwJTJGTWs5bVRjOExVa2tMOGN1ZGZkNlVKbnd3dU9uUDNXODZFTm16S29uVDF2amptY1phOWFaMXpsOGc0aThLa2FRNmtOc25COElSQlFWWGszaHdpbHJSdWR2bnVUSzg5MENtRWF1UyUyRmxWQTBVQVZMUThISjkyVE42c0VCcUFDYkRSUVN3bnFibWhWJTJCekR0aHhaY2s1eEZFUjJhZnhEVUxPcnJnS0RxZlRPajZUUnZCc2JiU0dyS01oZ2N0NGV6czhTZ0drNFRqanhZWVFnYTA5dFpkZUtER25XdXh0Q2NVcUJRN1U0STBzUDJrTSUyRldhaUZIdkEzZ2pmVmRORnpwMFhJeUVDb2ZaUDdXZmVFJTJCdk5jT3ElMkJTRVhoemlwelZFbWt1UUhlZU5IU0twb3FDM0tNN2tUN0VjbUYyb3hNQ05xellCVmNFV1dsUlBwOW5ySWVhJTJGWlkzUkhZQzlWcDd1JTJGcTZkZGxSemJOTmlLNE1jeTFPYXFRbFBrUGtwWkNVSzQ3cEFGSUNnSllOUjg4ejJOZTZHTDhEaFpvZzdDYUZTVnZCalIySmI1TzBVT2pISzVOalpweXkzNWJGQlYxSU0lMkYlMkZaaFFMM2UzMFBjRVlocXladDNzblJDVjkxd3k5MHhycCUyRjRaMWNkJTJCVU51b3I4SXBUMUFQM0xpcTQ0dFpBN0tEZnJ6OUJZUjUlMkZhZHhMVTR6ZGt2bXZkRllHJTJGUGlRaG9UUkRxMFhjZzJJT2hMYVZ3QzhZRWlNVWhYbndlN1RrVDNaNm9leDJpUERmSiUyRmtMMlRXcFRyTjI3SUtiZFFZdG1WZTRrUVhRVDIya2pFWDBFY2UxZWYzcDRyNVFiN0twaGozblpLU3RIdGR4SGNhOVBnVzVlRDdDJTJCbnVvRGRieGNVUUxDczZJM0UyeE9Md1diSXhNZE9YUEhwZ0UwbEJQczU0MyUyQndHbjRnZUdtRiUyQkc0SllsU0h2Z2hadTI2dmQ5YW1CZnU3VWJmc25ManlOcjRKTGdpblBxbDNPUVBhS2JBdWtueVFVJTJCYk85NGcxSGZWTVZzSDE5T3VsdksyaHZxRzJlJTJCSlNLUUh3dGtqWDVheEZuc1REeUlSaENxdThYVnVGVUFoU0glMkYxdUF0cU9XbkhCM0ZlTjBhRWx1WFZYejI4ZjE2eTljRjhRQmdoM1d3TTd4emNoRDZXMnlpNCUyQjgzMTdFQ24yMlpOeVBuZGtEN3JxQkpra2xFT3NDOWxrYTF2VFR6ME5JaUg3ZGJ3VCUyQnIlMkJLdFRhUUZEOWR6Q1R5RHVBOU0ySDA4bUpETURYNTExZ3pIbDhHNGtGVVlZTCUyQiUyRllTMEJDTXowRWNHdXFYTDQlMkJYTk16M3ElMkJUTGJFQWV4WklkQXRJWWRtYUplazUzaFNPaDI3VTV1ZERRZEp3JTJGcFVTdyUyRkdOOEc5aE9QbGx1TkwwS2IyeVh3NkVvR3IyMTdiakZBb2kzY2VvNzdTRmFlWWpWV2g4NFlNRU4lMkZ4RmpNTmhkaER0V05LVEhBRFdJbW9DV0xCb2FSV2VaRUZDNU0lMkZkcjc5dktISWpSOEowR251UkFiOHZTaXpHVnc5Yjl2QUZ2dXcwaFlBUXY2Wmt3b0lsYkd2c3c5S3ZyenBJc0VoTUU0UHZWM1Y0R2pobmgwNFolMkJQMzhxTE5zVlglMkZSZldJTndmWDBKMiUyRm5peDVpQTBUZ3ZuJTJCbHBZWXhkMEUxV2RadTU0WWpGZHgxQ0VTdFVmeGFPc3V5V3d5enJxeEJwZm5tMjVlbmxVaU5tZktaQU9KYWNyS21JM052emc2SlA5c0Vld2h4MkEzJTJGQWswU1dtWDBvaWZYb1k4MWZ6dGNCdU1BUyUyRjFjQjZ1WG5FdG9BdHVDaiUyRnE5JTJCdzlDcjBxVHJjM01MdzMlMkZsZEJueFJTMFJMeFklMkZYN2tLbDViSnpQdSUyQkdxVGtVeCUyRnJHJTJCS2swN05LUU5oSGpvJTJCY2Q2TmtqZHU4bDV0VUoxNndvdXVncnhJMjByMEd4ZjRkelJqeEt2MTBDVUMyNGw2bHlPYjlEODU4cXk3b3I1d0RDN2J3RndtTTdiWUlVRVFuMWtLMHNsc3VXOHhPdjNoQlA0WDIlMkJGVjZMdnUzVDZJaVZmcXhYb041YmhjTWJ2elpqNXhPNDBmaCUyQnR5cXV4cyUyQmhqYmRMSFBrZWNBN28lMkZnViUyQmZSNFc5ZGxRbHhtZ09Va1p0V1hidnVpNG1obFdpbmVPY3d4eTRlNG5vZUlBVnpoMFgzMzA1dERxTklZMjFQZENuWnMyeGJGYzA5dDdZWGlkYWxkRUh2T1ljQ0dxZnZjNmtqczYlMkJzbDFHYWhlZG1NMWt1MjZkemtjdXVOWiUyRjRObHdZWko1SzFoZW8weXpUbjNYVnpraG1zOHJuZ0wwVDZ0MmR2WDBzT29nRUk1UUZOM0hxTFNzckFpMXNhRyUyQklxRyUyRnBkbnhmemJUQ1F4czBQRUlsdW52Ym4lMkJiREMzbEZlMlhWQThta0trOFRVTkl1c3E4VlpDcE5kdGtyaGJjYzRxbHdrTldDV1hnR1AlMkJmYW42JTJGZjlTRGRHRkl2VENuODNObWJacEt0VnBhM1pUSXA5SWZGRk9zY3VFTE9zbkg0UDNodkVNZFBXZTBkJTJGUUFOakp6SWNWRG5JQ1ZDZzc3NHEza2M4NHpCa2V2VjlXUnd6dDFuT2dVamZqN2JJQ2pVR1BMREdDTU5WV2cyNHVjYjV3MmlsdHdxMGJ0OENoMDJ6RjR1eHBzeXpsaVclMkZHOWhCd3RaRFBiVVRPUXVkdDklMkY3QkZ6b1prdGszZjZ3S0xaNmNsZm5EN2xxTVdFTldlV01yVWRGSXV0dGVmOUVWanolMkYxWlpUUUxSZjQwOEh3TDcxYlJHckt0RDRUN0tGZlB6ZHJUWEN6Vnk3ckJIdGdpNXdpNiUyRnRTeDFrMzlhM1o4dWpLcjlIWDVZSUd3dE1Ic0pHU0Y0ZFhwJTJGaEhFZ1Z3UlElMkJjckh0OEZIQnpCdDBHd1JKcDg0RFRnbG1PT2ZwMkxkUjhXNmJKNVVHZzJZVVQ5ZjJ0QTBaaEdGQXp1cUFBaW1UYkJ6UWVFdEpja2o4RmxHcmJ5QkNEZDFGNEU5anlndEVDQUNMamlSJTJCdU1sTXlNMGppdVRWR3JZU1NscnpheCUyRkZuajV1NFAyUncxcDhRQkZaQVFEY08lMkZnUFNrVWtkOVV3MzVHUlZ1WDZCQWFpYTBCblJwTmtMeGZ1U2YlMkZlaExiS2slMkYxZkdNNzI1VEx2M2pFRGhkJTJCbHQ0cnVLemxmN0NQMUlPMk5IYjVyRmdZYzlpJTJGN3I0c29pZ3AxYVVuJTJGa3BpdHVvRXRyNmxDYjlvJTJCVXZyMVJ3M0dJM2E2TVpnWUhWS08lMkJsYU10a1AyOHB3SUZKMHNrc2hGUnNObHB2Z0pGcHVsdlJCdyUyQk5xSUVWM0thRCUyRlNQeW9IV3BjOSUyRmtRRTlUTHNzYWUlMkIlMkJwSnZOUVJaSXhETDFtRlE1b2xyZyUyRnN5bGZzbVl3VGdmTDNoVG1hUVRpVlAweFhPZVB5RWtQenoyQjc4TDJHTE01YzBsakI5bmJRZ2l5em9HUEdIZlViM3RFaTNwM3NBMHdtJTJGbFgzcnlRMTh4Y1ElMkZHUUllU2JyQk9CbTclMkZjdTVDZnlzajAwSTlmTUtJWGpLV05McCUyQiUyQlJLVXlTQXBDJTJCUVFTamkyVWhyTGZ4ZDRac1drZXUlMkZnQ2ZZTUpqeW9NQ1BwQVN3eWpKSFl3VmNKQ2R3MjJJSkJqSVJwRmtwYmpzVXFzUFBWVDlIV0hZd0s5OTJGQkNFRjdwanpaZUVnM2xsQ3UyMSUyQk85WTZqNmdKb1lnWUxPd2J2Q04lMkZscVY0ckIxbmd1dm1UOGdxMXcyakhVOXJlN2VtenhQNFhiYnR2Y1U2bUJmaUM0OTElMkJnMTR3VTRiSjUlMkJFQ1lzUCUyQlVpYktkTkV6RjlWaDdneWg4SUp2TU1nSnJmSE8lMkZvSE5pSm5qbTBESWxGSlZqRDNXdUJSQ3lvWmNpRXZYV29QT01QTXdMWE5KdjMyUWN4clU5UEl3TE5KeHNrdG5ieTNzb2NBVUZJa1VEbUlCWWQ2JTJCVWNrTTd0JTJCYzB1b1BOMVJSQzRoRFpqbUFQVnBHSmF2VXZ3THZBWHFTazYxYXNxblRoN2xjRE1ZSmJPT2g2alhyTkx4cm81aVg4NVhVSTk0ZmF1bHh0YnJXTXB5JTJCV1FmYkJpWlFTaDNIaTJiTG16M0hsMkFhaFFBNVp0SDhDODhOdUNYM0sxdnlkRDlGa21sdDFxUGYlMkJmSG9HTG56bnlEck5pNm01ZHY2ZGNIV1JhR2hNYyUyRmFETTVqMmFLZ3J0JTJCUGpxZ05ncWhnWGwlMkIyUmsyUGc3V25qJTJGTFBmUk9zTjY0eDJQT1lxUVZ4VlFDemJQUHNGUXpuTGxtdDhmZW9Wc0cydTZhNVVKamt0ZEo2NXl0aENEN3N1WVBPcGZxbkhwa1RhVjQ1ME5RSUxaczM1S1dpJTJCQUhJVzBmTyUyQk8zY2JoYmFTWThwYzIlMkI5ZHpid1I3ODhYOUdkVWJ2Y2o4c2VHNFdrWTY2VkF4VGF0ZlZyMnhNc2JkbnJWJTJGR204b1BGRnVnb0JzZmhWU2ZKeXVHVktiWE5FaDJrOUlsQXpVVjRsY25aM0FMSXU1Vk9HNXlBRTRKUnBzOXN5MjVqYTdVUE96cGtucVJzaVJ0WVZ0MFFndXhma0RZY0JQZGhPJTJCYmhHb1dwOG44Y21Ld3NzaUoyVEpzejVQQlVHS0M3NEZqaXo3MlMwclBVa0JTZWlQeHhjclFzJTJGbG4lMkZaV0djWjJ4N3paZlphS0dJZ0YwaE5GS1NaYzN3aXZHSyUyRlU3Mk9QQzRzYkclMkJBMFJvY1BwelNBUWJMUTJxS0JKS2pIVTlLaEVFOWlSQzdWS0ZvSFdaSWk2bkhCUmxhRFUlMkJaMkIlMkJhZ0w3MmxGY0VXM1UlMkYlMkZDYmFlZ2F3THhKbzhtNmQlMkI4czU4N3ZlM01zWVRNNDYzSmZyNzNuUVBCVm12RUpGUkljOHFUSW1KYkxyVFJJN1dqODZBbXo1THo3JTJGeHIlMkJmUlZTUEtGZUhwQ0pIJTJGTFBFYjN5ejZmZGdpU1JxeTMlMkJpTVU3RHNFYTRoUkduJTJCcVN6dkF6Zzlic3pGWGZoeEtNV3FnSWlmS2FzazlWZCUyRiUyQjJqdmMwV3hhQUV3VjlnTjFSYUQ3eEdrOUdEdTlhT04lMkZMdXFBUFgzSWNncjY5elhnR2lSa2lzdWt4NGdrVVVuaDRIVGxQWTJXV0xrZm5acFZ4Vk0lMkJTbmdxU0JyWk1oYUw2eXcyMUdLRFBzSlIzQUZJbm85NFVaYmVOQ0R2R1pUcDQwejdHMkNoQUhCSzklMkJLS01qJTJGYmk5JTJCVHN0eUI2Z0hGSXdpWkFQYnAxeWxXRlRGcjFDcVRYeUglMkJkWWVDMjd3N1FUZnRIZ2ZXVVlKUkN1eTZtempqakt6ZXJzZSUyQmpHYjN1WXd0ODlEUVBTSGRWWFIxMWxEeUJ1RnlhaiUyRmQlMkZiVHJZa2plU1l2b2VBT3U5RkVsQ1pnOG5JSSUyRktsdHFhTUNnUW4zTTNWJTJGcTJjdVNqcyUyRmdMdWdpUHNhbFRkVGd3eSUyRnRWN2NzSnhQdXk3ZHlaNkZIZFp3bXl6T0t4ZnVKMmoyQXU5YVhFOHN4cmppNzAzVDUlMkI4WVBQWHZtUHVkSmNOcDRLYUk5c21TM1hnTEM2clRZUSUyRnZLJTJCSWN5SU0xd015NVdMYWJteXNYcGF5MVB0Q3hLVDFLVkszJTJGajlPUVFYU2pCUGs4YW1kSGtxZ1RRb0N1R1NZMkpSM0dFblQ1UGFYUlVYRGp1ZXpKMTJoS2hUT0FoMEZNWE1ta1hnQzJVbFJHZHdDd0VwcGJmcE1sNHdXOHdjMkgyS3ZEUm1DTkc0RXBGZiUyQnFFRHFHUHg1dG1ycEZRaUFFcFVmaEtQQVRxQ1doTktxWlklMkZGODRiTU4lMkZGUXNHb0oxUzdKNUszT1pmTWt0WTdWSTBKOG8zWFdtWCUyQmVWZzV0M01NaDhHViUyQm91MXEzM1RWNFNITnZCVkpWeGxaMDdpJTJGV2hwMjJyejQ5ME9WM1N6Yk14eGhJNG53eklPNWtySldHdiUyRnFzdEo3c0xKcHVTbzE3Wm56VkNnUXZoUDV1dVQlMkJuMzFFUnMxdFA3WXRrJTJGZUFsWDVRZGpiMm51Z3czZiUyQjlsWEUyRlk5bFB6bHcyaE4lMkJmbFNWUSUyQm42VFdtM1UlMkYlMkJONTdxcEUwcFhWSElpYUp3TGlaSGJONm9kM0k1dGhWeE45SHV5T0JxZk5TZ0V0JTJGMUkzcEh3cXZmdzQwZ1Y0dmc3UUZscWNNbW9sWWlKSWJveU0zOWgzVXA2TG1VRjJZTlhEOGZ3TnQ3QjJqVlNxZDJIYWpiVzF1ZTdRaTJQdWJ0UVFOdDdhazJOZ1J2RDQzcjkyb05QRXZRUGZJRm5RR2x6SW9hOUxwck4xOXlxRVJFeGVBNWE2c2UxaTBWOTR0cG9HcU80Vm4lMkZqbHV5R2JlQUM3S1FIcklPNW9CQ3Y5Y0JsUCUyRjdzVkI5azlRdkhYU1pDU285aG90aGpxeVVoZmQ2WUdGVndRcFhkZ3NaZDclMkZiRUdFTHFpJTJCWUJsM3ZOWnNuZ3FPNGo1cHViZFZRWk5tQzFYZ05Yall4YjNTc1VIY1FZdGUzdGlBMWpxYUwlMkYlMkJnSldwblFMMHNhSnJ4NmlWYnFXS3p0ZFNBOTBMMmFmbk1iZkdHSEh4JTJGUlk5Rlc3STZUcjF5UiUyRmViVEdIRDVITlh2dnRWdU54MVRkSm1IRHVUODR5cWVrJTJGZnU5ZDFwOSUyRnAlMkZvZGkyN2NCUVgxOU9VJTJCVjVqalpOTE5WU1NOME1UUjB3d2s1U08lMkY5bVJsblVFc1hYJTJGdTZpcTBnb0FEZHRJUEhLT2d1OWxjUXQxSSUyQlpza2ZIeTklMkZ2MW9La0x2cFh4a2ZsZ1FlUXpxSzNKRDFBaWZHeWQ1M0l6MTFVVlBYRlMlMkZMTGxwVk1aWm50TDRqZVk2JTJCdWtpSDRBdEpWNDc0WXhjbzcwaVN3VjVMWWVuc0klMkZaWlclMkZmaUl6QkwxZW9PdThhNnJtalV3SUY3S2ZaMVQ1ZkVkd05KMGUlMkI4dm9MUU9OWXoyNE5Fam4lMkYxNGpkU3RjRmYxdEMlMkZwWUhjQnNIZ09BM2clMkJ1ZVNxNldnUFFOeVBzUVk0JTJGT0dMa2JqRjMlMkIlMkZrQ3pCOVdhVVhXViUyRkJNS09RRmRBMkQ2VGJNNDhqU2FQbHZJOFg1dkQ0VUFOVUxMUlp0RUJkNkFkckZGdnppa1NlT0t6MVYlMkIwcjhtZUVMM2d1WGlyWVZHdU1LQVpFQ0VPN2xGbDdKMklGUkwxZ1clMkZPcnpSSXhpVCUyQmNSRGIxOTlxJTJCcENacSUyQnd4aUVGNUlHSkZlVFgwJTJCMUJhR01NYkZWYUk2Q1NGSSUyQlRLdWNEQ0FXcTZoZHYzTjg1TkxwNU5qOTVCcGs3VnJWUWN4RUtaQWZ3cVN0R1NBRjBrTkMlMkZkZ3c1OXVTY1ZxOTV4U1FORDRpMDZ0OE52ZENQNklEdHdzaXY5bmtickozQlpEUnolMkZmUXJZMGRTUGJoTFpyY08yRnNPWHJjY1lCQzVJZVFhd3RpZmIybGo1UUtFNFdkUEJRQWlBeXAlMkJacTgzTjFJd2JzSDVqMXB5TVBsUmxEMGNvNkx2SWxDbHdBMUZTWkh2TmElMkJvdkk3JTJGaWslMkZubEgxYlcyUDJldU55eDd0RDRYbjVBT01PQ21LS3BXYVM1NmFKczRhTkkwZ0slMkY4ZFglMkZyUlR1VTNGTFF1eGRPdTVlN3JTUlJ4Zm1DaHVBbXhkRTVKSkF6a2tBb3RrRyUyRkpxTkllJTJCQmQyV3Z2RG56ZSUyRmh6WHhGaVc2STlUYXpGTEIzbnNpQk1KM3hSVVBPMU1qTWUlMkJTSkV1RmZBeTJJTndIQk1KY25IQXAlMkJiWDQ1M0h5JTJCWCUyRlI2U1UlMkJVJTJCeWxIQTd0eFR2OVJuZFlCJTJCb0JMc1Bsemtvb3RGSncxUWRiT1h6RWswSXJ3ejVqUEZzTlByVVczdDE1R1NFaHBwQlhQSUlYNTFocjFmR3N2d0gzQWRVVnhZME5BJTJGNUxWQSUyQnp5RTNLRThFWkZlNmdwSFBUVDJnaWEwUlZDTk4lMkJ6ODlHTXZuRkZXWSUyRmVkWVR2cmoyZTNDZmxMd3lJWWFlQnNZMmlnT2JyNXczMU5QYUFTeFNTM2ZFTjdsM1dnWHBLUndFZWdIMWdUaVZEdjlkS1NnV2xQRGYlMkZuSkxWUk9wcTZkSTBDbVVMUWxoUSUyRjRKQU9YOWt3UHFGU2REdGhFeVlvcWJrcjZKNTJrd0MlMkJMenY0WGhvVWJOak9QM1JYMSUyRnN5VmVPJTJCUnRqQVZSZW4wcFZKNzA1aHpUTUQzTHQ2R3oyQ2JqQjEwRTg5ZVB5NTQzSU5YS2dwSDdtZmd6TkZWTzA5cDhXaWJZOU5ldXRSMmZyWFVLM24lMkY0SXBXZUY3ZFB4MGdOJTJGMGI3NzFORmdOMlVHZ1VCMkFuZUdZVzZuV3B1S2I2QlU0OHlvc3VhRmZ2eUJIZGtDcWZzaU8zSkU0d3hmY0xNUHFPYmZPdjh0VndCRVNuS2E3ejlDTHM3aFVFVjJ5N3pZZ3ZwejF5TElneE5WUjFNeiUyRnBkZDhGUDdFUXZtTjUyUmh2cHh4U3JRUWtVdm1uTGV5Tm1XMTJZNDVacGpTbU5heTEyVTB0emwxYjBJN1FSODJXQ01pdjhDOFdBRDN6OSUyQk9PdFBvU05iSmdQUDQyTW5uamFraDhubWglMkJUeENmbjNLaU0xZlBvYW0xdmgwcGwlMkZNVnIlMkZKQ0RMcXBqQjRENkRKR2RkUHZJMERMWmZQOSUyQnF4VE5rZE0zNURJUmhqdWVSYUMzN0JoanNBanEzZGZVSE1jN01Pc24lMkZyT1ppcVUlMkZUTUMlMkJsNHZRRGlEWUpsQVNyWmV5dlpyck1xWnI2Y2oxa2RIWWl2MTFQeVJaR0hCZXdkZGhLUVVrMHJlWlQwS0VQSmdpdktHekgyYlg3N2hhODhWcEFRcjBTMW12N0pxSCUyQld2MGI0TEpNRVc3YktPaVhrJTJCeXZHZzhodSUyQnclMkI4M1RhMUF3elEyQ3lyTkclMkIzWmNCUXV3dCUyRnBrQ1g3V0JyaGNDNDVrVkc3ZDBIdFcwaTY3NWo5JTJGN3B3ZERCJTJCOTJKc2RnWVRoaVBmM0MwbXp5Sjc1cGNKOEhZQk84OGlZWkJrME12QWdSZUZHeVNsWnVJQzVHJTJGaGdNQ3ROZ0NVSWhucVR6akNWJTJGM0lxZWNrVHJrUFhHc0NFcjJSSUxRVFZhUVp1Mmc2YWlpN2JidzBWQURrMlZqQ2YlMkJ3Z1cyeTdWbVVIYXdNcXBOdlRId05UU0huckxUQUM2QUolMkZGJTJGZlIlMkZBaklEZ0NnNEZjYWJFeEZvQ2ZpTDhDQ00lMkZBaTd1YUJaTG8lMkZYOWNQM2MlMkJqdW0lMkYlMkZVWE5rS2VVWHpkeGJ6d1F1QnIxaVNUTFRlZSUyQnN4dlQ0VTFkWlZvano1emQwb2tMWCUyQllCQ3BXUExGZGZDb1FGbzl1dFFQWWI4NzZhQVgycHc5c3pCVkRhcjFmZlo4T1hCTE90MlhGSWU1ZmZLa25JR0Nicm8lMkY1ZlZQQlpkM0Uxa3VXWGE2d3YlMkYlMkJzcyUyQlNuNlVMeEJnJTJCM2xZNFVnWk0xOHVMYmFHUWVHbXZMR2JScVVoWFJOMDdNT1QlMkJyY2VyUm92azBYYnUxWDB4QnZZWjVmcXFZTGJuZW03eWFBNTR2YkxINElnZWlhVXFOcVBpdiUyRlFCRFhvYWI4SFNZRVM5WmZSQ0lFNjB4TWlaWjFPWTVmWlRTU0tPQjQwNmNmZHVXT1Q1eXEyZkxYM0lCSzd4bGZ3VnRCckQyN0dlYkhhakZnQWRJTXEwcE5NNHVMQyUyRktjc01wOHVacnFPYlRRMkkzMGQ1N2YxT3dtblJIYnJ3eUVkMUF5UURNYlZzR0xBSTltemN3TUZpJTJCNG0xT0VlTVZ3ZjRsemYydXluZXJGNTcwa1hwN2NwTmZKczdIQmFTaUdzMDF2JTJGVk5KUEVXWEclMkJkRUhyRU45cmRQdDh0em1zd0dZJTJCdXVCalZnR3ZiRFlCU2dBMGxGOWFQJTJCb0MxVXhBNkNOazZvSDZUZzFqdTFFUFFWenR6QllWcWxQaXFYQXIwbCUyQmpqbnpTQUVoR0dZMEY5bGIlMkJIRVN3bU41RXJVUFVaT2JmT1NERklmVVJiRHZSWGJ3JTJCTkFJeWQ1cEpKWXlzMmNOSW5WcjZoaVNuem9OSVF1RDdxUko5TEYwa2hTS1prYlRCTDdXWjlNMk9idDglMkZnWG1MYkdEU2JhUDZpRmYlMkZZNHZHRUJLWktHQ0p4M21qcGhwUmNOMlJFcmlHJTJGbXo2M291NGJITFQ2dXdvTkw2c0o2bTVQUW9pJTJGM3c1Z3JKNVdQdVh2bGJhOG55UDY0ZlM5aSUyQjlYQmFXM3R1bkVXNTdQYnhWMXhMSEhFQm1Mb2hseXlBYk91cjVJd2dWNXFVRmM4bWh5R3R1UDQxJTJGenZMNklka0czbG0xT25lTFExRmFMUmYxcnRCRmY2RCUyRkRDQ3cyUiUyRjlITGJyS3FTJTJGYm05U2VCM2x1a0FscTJXRnpoQiUyQnpUSk1ySW1FbE5nUG13eFVMYTc2bDB6bUY3TFFrZiUyQmJ6bSUyRjRlRlZkc21ZbENOOUNDZVJzZWswNnVWaE9idCUyQnFHOHdoJTJCNGhOM1RDSjdVaWJ0eENKVGhiOU1VckhzUkc0M0diY3RyUkhNWjQ0TVhCa21ld0Y1N3BRUGJrYVNOSFFZZHBnUTZ3VXNYSnkxN3dIV2dQRE5pY2N5Z1FHaEdvNHFBczglMkJMdUolMkZQWE5SUlBwSmpLY3BjTmJpTElOMEZqZjVtdmZxJTJCcXVFVXklMkJzeTlJQnNrM2hleTVKNXN5RDhCYlBFMSUyQjZNWmtCQ3BjMlJGaGt5ZjNnciUyRjV1NUdKTWhHNGNjRlN6N2o3VGZFU3FJSFFUMW5SNFN6TGpadTZHQkswZDdjQ3NDSWMlMkI2NGgwZklPRGdBT0lsTiUyRkh2Y05xY0FJQ3dNdzN6d0J5ODlRRXpydiUyQjN5dnQwRmo1YU56WTlmdiUyQm4yN3NmejNaSTloak0xOWpSOTFxM0trQUNiNm1jRWxabWZ3MU9KV1l2VDA4YzgydGVpQzJ1WjdESkIlMkZ5WnZCeEZ0TzRsUk5xU0M3S1Q0U01NJTJCYVl3cnQwJTJGNnR2RXp1bzBhUTZpclpZaGZxWHRYek5kNlozSiUyQmhIR01HWnV3ZFFaeWJnRkliYnZJd2h0Nnc4M0V3dUl6d2x2YmR0eVZKVXVlRjkzenZpU0pSN2hRZ281dWpBMTdKc3c2JTJCeWIlMkZDaGx0V1d0VjlQQTNaeVhFa1NmdHIxMGtTYndoNTNxVkRYQ09SS1BNMEk0RDNmVFJ6SFNhOUhYU3o5TXpwJTJGQlFLc0dJS1lTUUFpT1IlMkZ1czZ6VlhnenRsQzVqSnVhWTgxVHM1ZjMxZVl2S09MT1daM0NFQnFENzdZSUJ2V1VxVFNvanExNnpyQUM4blJpYzJ6Tzd6T0J0cUslMkIlMkZOZmxvb2pvdUMxd1JoUWIlMkZQY09mazMwVVclMkZIcFdUUm45U043SjBabFBITyUyQkhBRWNWZDhEcVhxU29GRDQlMkJUdDhMaXdoeWROaDJpJTJCTExPenh2M1UlMkY3ZWZsYiUyRlpJck5JOG9Tc1ZCN0huMmZoVjFZMUFNRnNVSEppa2RKM3puSXpUTTRTeXRSRmNwTDllWDVjb3VSd1h3UnAzNzFCdTdJTVlvREZ4RmVQSVFMUEVabGw1NFlFT3BreiUyQmhSWGlKNHRjJTJGcSUyQnZaUFpXODgxaVdmZ3pMSUxPV3BqUXBiZXJvWUhyOVlQZmlISjgwdmtXalhRVFY3QVVNUWZnVW1mNjBTbjhmWXRRYXJsUHpUNzZYN3Rib0hENXljUW55R3dmaGJPZ2NYJTJCaEd6akR0UDhpYWFmV3Rvenp6cHY2S3k4UiUyRjNmRDRmeXY5UXd1aDI4a1ppQ1J6WGhxbnhhV3dUQiUyRlVJNzMyMFloOFJqZFklMkZvUERTVDRuOHJkR2pqQnIyZmJySlRLWHZEbFlLSUQ4Q1ZGQjhIVSUyQm9EUFBwYiUyRjElMkJ6dGJYVnMwVHF0RVlkb3V6UTIxN3hCTlZvQnp6UiUyRjdMeDhiWFZJaWZhR3Z3Q3lXZiUyQkZpJTJCbCUyRktKTHMlMkY0a2NyZW82M3ZkMVBpOU5TSlVEJTJCJTJGcklEa1FubVM3UUpJbGFUM3h6OW1MUWJlazJrcSUyRjMlMkIlMkZSTzN6MHQ1RGFTTzZYUXhjVGIlMkJDWk5FREhMMGU4bCUyRnA5MXhxMkNCZUxyS1BIdGYzZXA2ZjQlMkJYZ0NyWHlEdSUyQnp3SW5jUHJYOU9sJTJGOTVaQ2FlSU5Kc2ZFUEt2S3Q4VW52OGg2U0ZleEZ5U2RuODF5V3U1JTJCTDRqVSUyQkd2MVNEb1FUT0pjbU00ZkNaU2pHWThXMlJzUk1odHpKNEU3UzBDeUEyS1RHOURhWmNVTlRHTTBEWFlVN2VyJTJGbjZJWm1yMVd5VnVJUE45bHJLQWlYa2o4N0YxdG1uJTJCaHdwd1JsbWVXcWlZcVNBVlhFZDhwSFF4cXFwUzZDcEx4cmUwWXdGWkNrS1pSeSUyQiUyRmhHUHR1RkZUQ0FseWhETWRxN3pMJTJCOVRNSW1DSHQ4bno1VnUyRGVpSlQ2ZUxXbFUlMkZnTVdyelIlMkZmU0ElMkIlMkJSaFpZYiUyQklMcDJsVkFkd1JNZGFnbU1HUGZYR2ZuZHNQTFMwbXB2aGNGNk1XdGVMWlR4MXdyJTJCdWZBWnVNVHo5clFmbVVNR1dIdzlvWlBoUW9LRGNBTnhhY2VkU0V6RHhTcWxuSERIR053dFVFR1kwcXp6QktCSjdsdGZkbUo0NjhlT2NOVWRGJTJGbTEwVVNqbEgwRjg1TTJjTkNRTm1sVExESDdMVTdFS0xuVUNabU9QZWFlWCUyRnN0T3gwbyUyRnN3akJNd1FpdFlBZk9mJTJCNjhNTm9mVXplVTVYOVlLVyUyRlBQSkVSRUZmazlpTU1TJTJGbnkzMDU0JTJCRCUyRllSS0FkdkIxVmFMcGpmVFdsNjZaUENYNEg3R2g1UE1pSHEyJTJCbno0d2NBMnEzUyUyQmJnUU0lMkZJOTMyVSUyRkdjWUt1dlQ5JTJGZUdQZEhFREhQdW4lMkZ4SklzaVBHa2MzZXVxaWE0TG5SYnYwamtrSG05YURyb215VXh4OG9icEM4OXhqcU5XOVhMRE4xWHRFZmkyb2NCZEx1Ukh0aHRlVmolMkJqUyUyRjFObGVSbk9aR3lwN0NDJTJGbmtKV1V2RkVzUWtvOGJWNjYzbyUyRkQlMkJXcyUyQjNsUVhEbnpEeCUyRnElMkYybDE0eXJkVHZ4V0JPNWZkZnolMkZ3RFVFcVRvR2kzMGFkakNEcGwlMkZZR0pubms0eU9ZN1BnSFIxZDFFZmdvcjh2SmozdUxsWVFROFhOQTh6Tk1QUnR4OUhCNWtDS0c5dmVCeUxzVVY3Ym1TaFZnUjAlMkJCYTlIMFJ4b2IyWEpMUlhZaDFiZ0Z5c2Ntbm0lMkYwM2xSZ1E1dzVDeWUlMkJJbCUyQmV0N0lMdGJNeW4xQVRoSUVMTiUyRldxbXhQJTJCcHFvQUF0OEFkJTJGQnMlMkZCeG5oekRhTGQzY1FUZjR0M2RYaTJiUEk0VFc1a2FDOG1yMllDMzQxTzd4NGVuSjl4JTJGclhJJTJCQXJQM0NPMFFMS2E5Y1lPV0k3VUpnSE9ZSTJla24lMkZHVk5PWTNBNEVKN2tUbkJocEpMUUlPZ0IxbWlNOHp0d0hjNUlodkhvTTh2YiUyQldsMzJmazh0WWpCTnBYQ0VnWFJZOVRTM2NIQjNVQkgzM2lrZEVxQ3RURW1DTmI5VFQ1OSUyQnZYRUZFc3VHeUg2ZCUyRkd5enZsS0VxdFFEWXM5JTJCMGNlN1h4VXc3a2VBUzF4QUcxZUIxbHp6NGhnakRTMHdVNXFWOFQwUDBWUWJYT3labVRkRFdsQzlJWVJON1JLQSUyQkRKa2ZBJTJGWGxqM0pmNGh2RzZ4ODZ4YWFpNFNGZlQzNmJsWjlPajVQNjcyenhYSnh1WE1UWCUyRmVnaDVYU1d5YyUyQjQwU09RVnp0MTdpZEpjSk44dnc2TXFjaGwzaGlhWXB4UUFUMVFlViUyQnIlMkZzM095R0luVVRFc1RyUjlWVDNnWmEzRHU2MDdSc0xURERKbUklMkJ4VXVFb3dsJTJCcXlsNyUyRmNJbzZoSnJ4UTFKZEdyUHBjZWNMOEJkbG5ObnNRcnhKaWRWRFklMkYzOW1MUUVob0lRTGUyVW1IUTBJZ3QzN3Y0JTJGU2VHbnRDJTJGVFhZYThXJTJGc1pDbXdraGJnZmM2a2Q3TEtsOXpwbGpPN0dwd01BNUt3dkI0ZmxSQXpzJTJGdVk4VzBuSkFmVFdQQ0Q2blNBam9wb3FacDE1JTJCNmRMaW1JWnNrN20wSklaaXYlMkZFR3NTT0NtajFuenhObWR2QmEwTTd0V1J4Um1wT0cxck9HRDVaNHMlMkJlYnhheGlNZUFleEs2akxMJTJCYklPZEFEMFNJazlqMlBYSTJ4N0NzNGdQWTFQJTJCNXdyZnJ2a1NVbURGdW9XbkglMkY4M1QlMkJ1Yk1lNDBUNUFEN0dhbmNyREd1ZUs4NUUlMkIwNU1KdlJBY3l6UFNMU3BSVWRyeGhPbW9oNGVtT0VUeXUlMkZXZ0Z4Q2ZaZ0M3VUVabG9RSEdaVkF6V1hYU01pVkRNYk9ndk1EV1JCQ3RJSDZKWVQ5JTJCN3FYUDhxaXljV3Y2enhTQU5tSVhXeklyWE4yYiUyQlhWVmRwWmZaTmhVNU5WTzN4SmplJTJGN0pxdUVJbmhNJTJCNiUyQjlPdHZnaUdaZHN0MCUyQncyQXpNeWE4RXg5eThNcEFMJTJGVFU2T1puMWhNdXJicTZDa0Nla09MZ2F4WGw2Rkd0ZnVMMnBuQ0xWUWNGb21PRFVOYndmMVhidCUyRmY4dGJldk8yS2RIcFZYZTROelkydktITFF4QXdVUnklMkZkclVpa3Y2bkR0RmN3VDlRVlBQSnczTlZWVEt4UVVrJTJCVHZyaEFLUU5ET3duTHRxaXlodnhRWGllenZjSzYxWDAzbzlKNzExRUo0RkUxaDk0NE5QdkowN1ZQOUpucWVCUHB3TjJFQ21SZm5JdVZsNVZ2eXpCYWQxUGxoWnRlbHpVJTJCcWN4VGJKa0ZHb3hxd05xY0tUdUUwJTJCZFZ1S0o2bWZvWHlrbGE3R0NRZnl6S2hFR29xcGNhU3BpMGNyZ3IzZXhSV3F5UTVwT3JqVWZhak1NV0hhUzRJSVROWW9hNXJuNjJEZzlTVTBVbjhnZTluMGJnbERrYUJoOEVWTnpGRk55aTEydSUyQmpadlFROVQ2NWE1d1doYk5zck91R2VoZ2RENDBzNVFhQjlUMnlFTWt3VWtvYjZ4cHolMkJabUpmR0xpbElVZ011MmpNT21KVkZ1bkNrNEI2TkR4QXJFREZQUTBVdmtudnJKSmJRVElUTDJwJTJCJTJGejRvRXBhVTYlMkYzSHFDZFhsQkhHakpmQ3dIRmJlZ3RTUWVBUzFlc3NkYUVqV2dWZSUyQkF0WTNtJTJGMlBzdlpZa3g1VnR3YSUyQlpkektvSDZtMWlxQ01OMm9SRkVFdHZuNEladSUyQnE3SHZPdmpOdDFsYmRWcGtrQ0xqN1dzdmRBV1RYdXFnOWxPT2R1JTJCdWRqblZLYW1tWDRyeVVON0IlMkZnRkwlMkZYT3JGVElHTHdNM3JtaGJBd1RNb2Q1UkglMkZEa25yQTAlMkJ2VTY4T2dOTmclMkZsQVhVUHI1Z043OHRSM0dJZ0hKeCUyRnRGNTNmZGVWS001WUIlMkJlSXQwbnQlMkZSUFpDa0JHQVBZVmpnd01YdWVQUUhaY3lIQjUxbjFQNzZDS0U4cDFWSExDbWV4UGFLTUp2N1lUOXdVcyUyQlNUYXJuJTJCYnRDcHpxaGRnQyUyQnFlQWhGaTFuMmc0Skp1cTdpOHlzb0ZzTVElMkJha3FxbnlJR2VUb0JLRzFRZXZmYWh3eGh2SjhPU2toeUY5dzVtM3NTZWdGUFloSnJqRkJMY3VHVUFsbWFhZWdyMnV2RXcxVjJ6SzljV1lmYmp2JTJGY0NNJTJGYWFKTWlsN0FRM0RzMDNoZSUyQkVHYUZaRzZGZnRwbGV4eVBDS05lYWhtN2Vzd2Iza09mMkdMJTJCa0hKTzRBV3N2TGhpbENmTTlMa2piOHN4SFB4aEFWV2ZBenN0WkhzNFhmdVU0UHFQZjR4YiUyRnRoTmhUcG9JOHZxNGZpdnMxRndkU1RUSFg2dEtCeDRUbnM3eEhlRjlhOGZzSmFaYThGS3IzY1hmUWZDczJHSlElMkJJdEJ0REhjNjZPTXJ3YVF4dEIlMkI2RlpXcEFEd2pRV2RTbGFsZVlNNlhVbUd6JTJGdkNVJTJGZE5GWDVHY2l4dEM4TG9vSWpoc2ZMckRaTEhPaVclMkY1VTJpQ2t1MGVGaWFkWFNUR0I0dyUyQlRjc1pIdGh0QXppbVNCemhrSzJLJTJCYUZsMlB3R1NOR1lRdnppNksyODFJWERhVVNSJTJCWDNaSGg4UDBtWURJbEV1YTRBdlFXczJ0UnlKdzY0OUs3QTR6SlFxZ3J1VzhMMjlWTFZnNVNTVU1DJTJGUWQ3VnliYWxLb3dVU2tXdGVha0dDN21pUHNUdiUyRkd5aGNhQjBLeUs2YWRqdFE2QVV2SiUyQmk3SXNoJTJCayUyRm9OMGZ4emhyTU5mOGFQNSUyRnpTYnR6NVFicW5DRHJOeDU3OWhLWEVWQkVRNzBjWmJYU3JrQ2RocVVjOWMlMkZrQUt3dXhOZlA5Q3d2bVlFJTJGZEhGZm5xbHN6RmtXOUlORzclMkIwSTVYdDVZd0RUd05iUWV0SElXczUwYXhYZyUyRnJoRk9CS0F6Z3pIVkZqQmxSaDlYSlpTclVuRXNQQmFnd0QxRDdFOWJlSEhJTmxIbXVobzVCRHlleU85R1FrU1NWbGYwdlVUNSUyRkdHcTRnbm1yR3BuZWFEQmZVU1FFTEtQVTN0ZkNkZUdXZllONDhFUnBFa00zJTJGSDYxdzFkYjRJUldMRHJIZWZENGhjJTJGSXRRakZ1M0JvenY2MTBacXdYRW1mNllGaDhjRmdoMiUyQjhJVVpKZWN3N253ZnZoUmVkYkJJM2NGcEJhUGpXWXZMZXkzOGdYMkNodmNOb0tqYjNBd2l4QVZ3Tjdlb2lVOFElMkI0SkNZdmJXTiUyQkJDdlRIdkhjZEdaRnhZeW5Sd3dxeXBEZG9SUCUyQjZXQk4xUiUyRm9nbmkzdmpXM3p6bnFQRVZ5RGE1JTJGUVY2TUZMUUFWQkdHdzNxdExQYlNrNCUyQjROQjB1d0RxQmZRVUI2NERHSnJPSVolMkJXMzZVTXdpSDE1ODBLVG5ia0ZGaHQxanAwUld5TEwzdW9hcTFvdnNOQ3oxNG1DUGR3Y2VjSiUyQkl2Rnd3eGNEVmpLdXllR0g1b3N0c2d5Rmg3TUJMZ0JLSUtYVG9KODc5ZGFWRGtoeGpPJTJGdFFXT0RPMkJCY24zUzIwNGc5bW9SbiUyRkpWRW56cCUyRmFZSDdJc2MzS0ZoYW1jenRCWSUyRlROMVUlMkZpUzg0QTBCUXJmamh0MnhSZjlISGlpQzVrcG5yaGpHb0syQVl3SGVLczZ2ZyUyQlZsR0RvclZFNkpSeDVFJTJCSU1WS0FKWnM1OEpJSmNGcm9ZNmNyZDRHU0VJVW9UNk9jbFYlMkZzTjFZUnRUZXRGVHVKZVF0UE84U3pBRXBOcW5GWiUyRiUyQkJlVjBpb1lzSG5xJTJCZmFBbFl6RHR0TU9BaDc0UEtBSVBPekJIY1VDR1U5ejdxUVdVcTVvdkUxbkFrWjdzN3ZaNWZrJTJGd0VvQ1VXaUllaVlrS2tIM2hacG81JTJGT3VvRlM5ZmZnR1BjeDgzR1h3MERGSWlxV3h4VFdCbXppYUxlZlNTYUF2ZDBMbnFPTWE1UjlUdE9jYTVSOUJVNU5lZVJxZmglMkJ1SkVNU2V3Y0JVcEplRFA4Q1R2RHk3bnJ3VFMxekQ2a1J0cENWNnA2R2x3MkJVQUtUdUpkRGJTQXF1c2tiZVFmUjUxc1p5VnJkMmtQYkclMkJERVF6enFaUkFPcW9xbE5IVHBoNnYxbVRQNndEUm1QZ1NJa2lOUHVJNHBsUDVxTUlDVExrRWp2ZFhMbTBzSDRWY28xMUxYOHdPNjJyM3M1WjkydWZidmJCeEFtaVdMOTNkQmRnb3NCY3I2VmFNeDFGTGslMkIxREh3VUZ5d2R4QkFOdEZUWEhCZEFmTExCSkhBYXNWNU9qUUolMkZuUlNWc3JZNDIlMkJ5UFpiTyUyQkZtaEtIT1FLcUZRJTJCdFBna1UxTEdaYWI3YkpnSHd6ZFFCY3NqRkc0MSUyQklwMlV2Uk5vWFZ0aWlqRkVIT25paE9FQVNlJTJCemRPOWwzUUFlank2TXZ5TUlvM29HeENFbGVHJTJGWGxOa0QzeGI5Wmd0N1lScjFsb1FFOVJGcEhNWVpsWFAyMVlUM0d3NUJsbWZvOGtkNnFjMjl3TFVXZlFTbVlLOCUyRm5RR0MlMkJPQ05jcVJXVFZ4dFNDSlI4d0c2ZkRXcTdydDVHNmYlMkJsRmgwRk9SdGxzWkJHbTdUMUo4cFk3JTJCQkp2QkZHRWpnJTJCenBGcXhhaEFUQThNaGQlMkZHaDVQcWtXV0wlMkY1TWFLV2R2eFp1emc4ako2RjVsOElwQkQyNEVxdEliM0NqNzNQZ2x5dmdoZEplSHc5VmZKSUFyazRnb1NPd1dMdlVLeXlWQUhIQWZRUm1JTTVteTUyQnVXelBxeTUlMkI4anppVmlxJTJGNlNoQkpLOWdFcG5KZU9LcVRYbEdqN0hodiUyQnE2cGJMMjFRREhCSlFEWER2SlpBOEZva1FOTyUyQjhOSWdHTHM4VTNIRmYwVmVIalZJNmk2MW9IM3glMkJzM2VQODlOSldBaHBMMFZpWlhUWnAlMkZteGhxTzRGSmRmeFN5RUNNb2hRdFk1eUFUWVNkUE9xYzlLQ2E0ejkxeGQlMkJHRHVSb3FHbU01MTE0bHA2NUdlaVlQZjVDbGJCcXVrakJRWDNXcUtnTDB0ZmVnYklCcHk1b004NHpUNkdyYnUlMkJjNTlUQ3lZWHhKRHdoWDVTQ3BNdVh3UUdNMTVLTWNCNzJYdFBjbUVKeThJeERoYnFTdkxWQWJEcVBwMU11dHdKMmR6dWlhYVlkM3M1OXl3dkp2anVKYkVWZGVoWlpESzVrYzhhcEFqTkwyJTJCZFV5SFo1S1ZpVElRekx4ZGs2azZ6bFV0VTJNZ1lQcDNOZGxnUmp2aVNlckhlVjNtN1N0WjIwUEJEVVBFVzBXdmhCWXltQ2FoZ3BuSUF6QjJvJTJCbDJFY2FYUmdOaFRLNGRuOTF6UlBZQ3BHUk5QcTZKTCUyRllyakglMkZlUmFHViUyQmI1b2lEcmUlMkZCMXYxUiUyRm8ybCUyQiUyRlVxNHBvalNJMlIxeUttJTJGdElhZFBacHRNWXZtbzRIWmgxN2NxQ3k0eXNqJTJCZk1TZ2RyTkM4Z3NUYjFZZ09hUGdYR0FzNTJaRkJLeXFaS01ETVh4TTZ2UjlwQ3NQdFl6OGg5TGNCS0p2YzBYZXNDeXpnNmZBYWFmZSUyQkk1N0NxVkNxa1JEWmRDMWF4aXclMkZpSUZDcGhLclR5dGkzM1Vvbk8lMkI5dWo0TUZmTUE5WXBsd3BYemolMkZPS1lLZ3VJUnB6TmtiMTNLSlVpem96amdkdk0lMkZwdG5SMGslMkZKJTJCbVpORWpBWVFzOGtISHdxc0FOYXd3WlVPaUxnakl0USUyRkpKUTEzZGs4bjFJcE8wWTFzREFCakp2RThqTjVXJTJCVjFrRVpPRmJaNiUyRm1nZUJWVEglMkJIMzlXQXhyeDhlZ0xuUGZwQTNpN3JaNHJoJTJCbmQ4eU5sYVF0anNFYWQ1cHVnSG1PUCUyQjZEdXl2ayUyRmdSR2s1UWlCRllOJTJGNXElMkZzSWw5S2JHTHR3VHlaRFVic0pmWlVNTnVJQlRydTVnRE9wbDJrbnFQVkxKbmxGa0ZlNG9DZiUyRmNpdUZ4YjJqeSUyQiUyQkRDRUFiVUdUWkxlaHNDVm5HZzFjcmdib016TDg1Nk1EWjcyMkkyZURadVZjdENJbDFqdnZTNmZ1S2xZWFlSM0NDTkZQbTRiZTNyaGlkMFFhWFlpSU9jczhERXkxdndlcE83V0FnWDdtTUJEUk1QTUJPbjl0ajhHU0VjdkdvbnlEYjI4R25wMzJkb1VHMVNFOEpJdkFlS1J6MktqY1NXU3FjU2JFS3ZFdDlXMzl0anQ2S1JqOTlWVzNnckxhcEt5b0pPYlZxWFBNUTBLU2JyJTJCMFlQSlpvJTJGaFFmQ2Q4Y2JZWFNnY3BJQjRFeWxkMjUzUDZBRjg1SmMydFRtaDVrbHFISHRxS2dJd0M5QzJJR2dSSjRlWHFUWmNhNWhTNGQweGlSTEZ5NGpLMXJrQ0ZHRTBKQTBvTXplWVN1NmglMkYzemlmenNVRnVLc0JyM25UMktHWDljeUw5RkhTdjZSS01kUjhveDc2VzI3NldwenV2bWdIWnp4dFJUSzd6RDFLQWNaMWltME1CTTBlMkk2MEwwak1GSFhOTSUyRjVaR1JBMzNDUTdhYW05VUpOTlh6b0pPREklMkIxJTJGYk1XMWxmaWo0c1pQOEpKa3RQbCUyQkJDVyUyRnZlcjhYYXQzNVJLdWpDZmQ2Z2l5T0hxY0x0STAlMkZ5VHZoeUlKSDB6QjA0UU90azcya1pTMmtGRVc0ejVQa0UzeiUyQmxFZSUyRmp2YzF1Uko2UiUyQml4YUtRbEczVmVHVSUyRlE4YzFia1AlMkZZT2MlMkZTNTRiaHIwWkZ3JTJCZE85dU9RTEFleUQ5YTJmdjNZSWZ1SFE5TDNkZjNDdXpsR1MlMkJZa0ZjSUVsZXEwaWVwMENKJTJCcTg1ZFY2JTJCQ1VBNWNhb0UyTjloT2l0UnV4WVY2b1MlMkZwQ0tUNmlrN1FnJTJGdXU4S0VxejBDRGhRZk5WVWRxckdQNmpmeTB1ZGNYWEJLUDJ3UFVjSGVPNllrdGUxd2F6WEVxRHhVSFNpSmklMkZ1bTBMWFdLOUJtY01SYkFKdlM2eVYxRXlDZCUyRlVzVnlyVHpxbXVlTkllaEMlMkZjcmJNOXZuVmRncWxhQm9vMk1GQUt3RTBUQUxmRTVqeVFvbWVhdnpEajlhM0lpUDRXQ1lubUMlMkI5diUyQktUVjFlZjNtdnQlMkJkV1RVemt5SlRmcWd4S1N2Z1M4Y05DRXQwUU9HR1RJeHY2RDVza2o4cUh1Y1UxdHVaN1JVeXRKMlBIbVJzQlh5TjNUdVIlMkZYTENCU2lpcUhvSk81c252dmIyRFlQTXI1RnJKZlR3YVlRb2ltc2tWZmF0dSUyRkpSVjBiVmtsVSUyQnFmQ29FeFY3V1VXbUNkUElOcTlnazRXVUpEYUM5aG1kJTJGdGlYaiUyRnp3MnZmNnB1ZFVFdUdFQXRZejl1UVpFWTdNenM5Z0ZZb0hYTDVXRFJYWVZzVU1pbmM3VjZOSkRiSFlPZUFqSWFVQVQ1S2ZvJTJGOUdoQ3QwNXlRWW9HQmU1bXFyZ1lUYXRzaGs5RFclMkJIWnlOTCUyQmp3Q3BoaVc5ZERkViUyRjBtZFJ3NGtlRzBMVDNQVFo5Y0pCUmhLNXU0eW8xaUI5QWJPSnJHUmIlMkYlMkY5JTJCNmp1aVdTUGNScElKSzI2SzBIJTJGZXJoYnYxNGpGZXIlMkZiSkt1a2U5NHU5QVlRQ2RQS1BLWHFqSUdia1V2dEVJMjVpUzdyM0ZUejRzNG9Ic0w5dDlLZHprYlVob3U1ZlVqMGxwd3Vmd0I3SHczNHdHSWpTQXRnRFFKNyUyQndlQzRhUXdZZ1NxVkZ1ellCJTJGWmJXOENyT2RPWWVieiUyQkNHeFJ3TWJIcXlTNGxpa1dEMnVmNWxLbkpzaUdzbkdsd0tlR2c0UGttSTgxcUFLNWltMmx3WXBIeTVqdEMwNGxGUVBOTXR1aUJpdjJtUlBwQVNXcGU0VSUyRkJ4eGNVbGZuczE4bVcwS0U2dUhWVGk3aGZGV2cyU2dXU1p4YzJHcFBKN2traWZoSW5wT0w4Q0RFbFg3ejdzZ3djRDFpRDBGa0dJRDBZWXA1N2JJMWtteUxCciUyRm5BOVE1alZpV3hoNTlqTjFhdFBKOHpacmpSVzg2R0ZRWW5jVEhvc0pZU3N0NUdTaFN2eWtjRSUyRlFXR3dCOWdZUWR5STdmMWYydGMxTDlQdGJHOWszJTJGczB3RHZHJTJCZDMwMTNreDRFd3NLWE9JQUI4ek1WZUZBc0wlMkZyN1loTjlxbSUyRmNmc0VpWDlNYnhERlFZTFdKcVZjWVJQNTRUUXpXbXZ2ZWgwQzMxJTJCU2RxUHVOenZ0TUhVSHZXUWFDMnF6cEFjbDRGVmUwaiUyQjdMQWZQVDFrVzRLM3VpSDZtWFZxSlZIWmVKbjMxSHJSZ1l1MkVYJTJGcmwxOUlvJTJCU2U3aWxGWTdiREVwblVKaWxGaUVCa0FpTTVkd3M2VlhBdVBVZXMxM2pXTGt5SDdjd3VicHpaM3JlMjhuOHFsQiUyQjd4MTYzSmlKaSUyQkpDa28lMkJTU3pDUkhhSU1KWGZyUTFzb24yeCUyRkJsTW91TjZPNnVTenZuUW9hV1FUUWx5bnJxc0tTMnRvQ0pMQktSVkNXUjZpcE5JcG9DcllnMHVhN2g0cDBsU0djUFFvNXQ4Nlh2QjNUTUp6eUpSWDBGTXNFaURNTFl1UFliczRZenZKYyUyQmI2TEVHWFdHQVJSZUsxJTJGb082RlRQczd4cXgwdlJ1Vm80SGdqRWJSSkdmMUdlYlN3ekpNQ1ZjN0o4QVYlMkZoZ28xZVFoa3hBcmU1Q2ZkOFpibVZjZCUyQjIlMkZTWkFEZTBVcWZ1TGl2UmdyUm5WbUJQR0JkQ0JMV3EwT2lwWG9INmJCeSUyRmRkbW1lTGJPdTh6eE1vR1d4dUNDN1p5ejcyRVVoWnM4aVVvMGRvNVRMbVFhUFdCSFNiMEFhZUh1R09MRGZDcThBJTJGbER6M2RMTml5ViUyRmVmRDFscWNmWG96eUN4aWwlMkZlbjY1eGRVSVN6Z3FrcEpmcGZNbyUyQmtlRmFzeVFhYmlYU1J5NTUlMkZnWWpKQkZlQTR6YUdMc2d3Tzh1YnpsMyUyRndSOEdUa3NiSUw2dVliSllwcHBlajgxQlp5S3hFWDlGSGFpanpvY0VKblNWc21lUSUyRkdXdnlFWDNCeFJITWhsR3hnTHlOSElacHFxTURXRWdXNjJMSUNLZGo0MHZjT0ZybTJVNnpOdUoxSVpLU3NVJTJCNjEzUWRqdEFPeDRXalNNd0pXZmZKRkV3VTBQSnBHQ1g0aTFBZlFPT1FRSHBFOXhHQUhvNlMxMWU4QWolMkZ1MVVKOUFWekRGRnlLYkloaHRPMGxDNnlkQ20waEJxUnNNa24lMkZsU2c4Y1VxYnRCaXRTU0hNVDVrOHhRN3FVYXh3ODVxdDNtUXRkV09rd21YdCUyRiUyRmtzV29aR1h3b3g2SHVCUkVGcTNBNnk2ankwQlRVWW8zRjlkejVnYVFERUNTWGtqbUdPSThVeDV4aEF3VjZiekRZZGpuNU1SYUd6NGJYaWJBUW5UTnJiWUVsVW5xMzBMc1V3c2QyWTh2Sjc1eVRIZGpNRiUyRjc1SnA3dHprUjJEUHB5T2hKMElsR2VsSHFPWGVINkV6dWFvdEVNY0Y3TyUyRlQ2TUcxc1FpZ2s1ZDgwd1hRbklaMWpjTGVDdjhQblNXSzF4MUNNQnczeHZxUW1ETnRNenlUdUN2Z0VEZ0lHM1FScWtUNEw1MGNkTnBxY1hyQ1VscTR2Vmx0R0p4V2RZeldiWmhremZiVG9RWGJiTWlKT0JqUXlpRmMza2NoWnprJTJCZDA2OGRObnhucU9PSGhYbzRCdXRDZmlpc1NBa3glMkI1MFdZbkpZR0hLa2ZRU2IxaHF4cXclMkY0TkRZbWp2V1ZZcW1ndTJ0MFB6NlRpQU9wUiUyRkZhUU9qSlhYUVFnNW1TOVEyZDZhajglMkZVQjJPbnBOUiUyQlolMkJPSWdQbjJQUVRNZDdHUXl6V1pOajJpZ1N3dzI4V3Q2THBrNkUzZ3I4VU44Z0NoMFUzNjhGODZVUjJDSUVLSHBReU9rcnY4VXAlMkZBbDNQaWdxajhRaEpIUWxJSkZlNmp2dXJvMW1CbVFwNXhMOHNHJTJGUVJXbHBGcVo5ajJSWE9HaTRTS296OFU5M1BUeTRrTjUlMkJRTTdvZUNBR2VidTJCRklITzhxNFgxN0lmTE9PNWczQzV5MjRrOUZJc2kyU0x0SmJ4MSUyRmFRTEljRG43eTBMV25zUnZuOHVTV05abHJZbFdyajFBWExQeTNyNjJoVlRuSSUyQkhWJTJCZEwzdmlOd1VPSnhVSWtlZEQ3U1ppZmF4TFR0TjBmMk1SYSUyQkF1QlUxR01GaUpSWWRJOUN0MnlQaWQ5ZlJWSHQ1UG13MFlRQyUyQmpyYzc4b2tIa0NLQ05jN3pMWHo2R2htTFNpMUpKdGxyblV6cXh6cGtBWGpRQzclMkY4OSUyRkRza2tSWmg5cTdWNXM4cG04ZHYxMHF6bWtrUXBvbDk4c05ieiUyQkFydDI4c1BsbiUyRjJHQ3czUEhYdkI3SHFFbjNOSjZzUjlOM0lUdnpjUmtLQTB0UUxMTTdFczdPaVJBSDdLSXlkUnRQM21yY0p0dTE3bkpuNTB5TXhvNXIzcWMxeDAwdmY2S3U3VmxDMjczdUxndlpIUmNPUGR3SHhQTGgxOG5yVE5kODNmOHZpWiUyQmhoS28zd05KQ3FxYWsxdGFWUG9SOFlpUHg2UTN4bWZFQ1dZVCUyQiUyQm82WDZ0eWNTeWpPSVo2d3IyZ3NkSUhaSTNnekRYSSUyQjlIZzd1Qm9IWVlydlF3dWFacGRlNGs5QUQwTlhEUExxY0NpRWlvTzVOJTJCTkptTDlZdXolMkZkV3o4ZEd3Q0RDNGVhRiUyRmc0SyUyQmVaYllkTExXbUVwb2FlJTJCR3FiWGd5bUtyTXVRU0FISUpNdlg2d2JlQjFFcURUcnFVYmRZa1dpaElWcnJQN2RXemxGNkNlYlVDY2tSN0tMNnhsUTVSdWNUcEQlMkY2Y1lvdnBPdHdTdW1ZQkN6MyUyRlV5R2NjR005RUs0UUhwcVhHeGlyNmNYN3g1cXkyRkx0WmFxWmwlMkZtanRPVHhLQ1g3aXl1ZjJpYXRjb0xieXltZEJNQVdWZUVVUGtsbFIydWFNSjEyTlRzZXR1cjRObmx2bkI2WnJnJTJGY3lVeFFjRmZCTXElMkY4eHFsZTZFV2pUNGZHcXMlMkJKYWU4UUQxQUcwSDJ1SUlIVWZPZnQ3RjY0UUVQeEtGbXRJc0xWVXF0ZSUyRkVicXZnYVBySGRvQko4ZjBWQVZubVV2bDNRekglMkZlVnRLdUxNaXZyeSUyQklHWWlZTyUyRmQ1OFJDdVhKODJWZXRYMWVpQktjYmpOTmtMODhCN2ZtWWxCUnB6NyUyRmtiQSUyRiUyRlg3eXJ4aEpnJTJGelolMkZ4eWMlMkJvdUd3Sk5nTTR1aXNUQXV1WW4zZTV2U1lIQ2ozTDlSN0trMlVjTXZzZ3hmTEhidWlMODE1Zjh4SG5RR1d0OWdJaHREd1VOa3A1aHlFZnp3QnFoTjRENHdrdVRwSGRGa3hmWHlkdHZYMnhqQTIlMkZkNWxpOUg5N0c1c2pmbGx1JTJGNHp4d29YeFlpREZ1R2FFQ28yMlNoZEpTUENsb0ZkZlFERGpma1JmRmFRV0clMkJ2S2Y5N0ZzTFo2MmJac1pBN2MyenpnSE9kWERCTiUyQjVDbFJjR0FocmR6TEZFcWVzRWY1MTRyUmhYYTVoZXdTWW1WZExJZ0ptJTJGUDZzSVN2QmVyQk9IRGpGUzVkZU5nbGVhTGZLMmFDMWUwMzd2S2slMkYlMkZKVk5ta2FqJTJGTzElMkZXZDh0RHhmek1VZWFnMm5jcE4lMkZsWExINmp2NmZzbDRZcllEQ0V0OFg5SjNGN1FmYlJMJTJGWjZWdEFTaUFGJTJCdVBEUzJkMmNWJTJCYkl3dkRiTjYzOU1oaiUyRmlyMnRRTFRBc05YT1QzSHc4c2F0U1ZMNFFKVTRITnlmSyUyRnYlMkZFdW93eDJRVDUlMkZSZ3BRQ2JwUXlVUFd0SWs5OU5MSG1aeFVkdHM5JTJCUzIlMkZJZzZsUENQaCUyQm02TiUyRiUyRk91b3BDRmElMkJGNWNWRndTQUVNNnFWc3RaNkpibVYyM0hlbzFsMlZyNldzMGw3NEtIOVg3ZnE0aXJGNTlWMDlndyUyQkEzUmNHbGJySmVpQnBQZEpmdkZyMlMwJTJCcTVFUlU4JTJCOTFrJTJCNDFEcGtkQlc3Mlg3NHNrMWo4alNWJTJGeHNpTGwlMkZ2MW50MVEyZHE5RFB2RGxWSTR5VjglMkJOU1hIRSUyRkxLZFV2WDRrWWpsT28lMkZxdzM0OFBVc1J2T093c0dXUzNjcXlYRk5CaVB2ZVZxNDFOc0xCVm9WYklGRVRhbjRFOE5KOVhLYlVMWXFjRmtSWXglMkZLJTJGJTJGNDJHdEFlT0hUdjlPdjlkVEpMQUg4bXV1R2NjZlI2aHNhYlQ5bHglMkJqYTAyZ05VYUQ3OXhQYVhHRGpvdjRoeHhUQVFPUUtlaXZoZHRqbm0lMkJZTjcwV3ljT1dqUzdSNW4ybkRWUmFnbWdObkx6TTVSdHdhaWNYemVIbVVxSUh2MU5DSDFSMTM4TTJNeXU5TTlLNWg3WDIlMkZNNW5KTW1iVk45c3d1JTJGRkUxUjVYOVIlMkZvbWRSYWRib21oTmVLeHhONmolMkYzYWZBSll5OUZuODMxZnVaTndlMTBIYjZHSlp5QTQyZXBzUk5vS2trZTNvWFVJTUI1Z2VHeUNVJTJCUDBXaTFXRE5nSG1VcVd5TFlJOFRvbGVncTFyJTJGc1FPMm81QTJQUFlBTFJuTTZrTXMxdW5vUWNhUFB1aCUyRlVEcXFaV3plbWlqSExqMzRSJTJCM1hxVGVJNTBBOG45JTJGT1VPN2R6eFdvc1dsYyUyQk5TZTlqejhOZ283M2lJMEphcGpGNFNSSUdUTVpuTG4lMkJsVllqYnl6OWNCd0xsd280UmNBRGdIJTJGMVNGMm5UZnpaYlZ4UDk0TTdpU0E4RGhsd1c2JTJCaWZXY25RQUppaklxSWMzM3hpVTlJUmFHdFo1eFZ0dzVPJTJGODFmQU02UHpDbmV3JTJGNzcwQzJlMnBsMmdaN1k5eXpVRjROdUk3dWJkVm9QRnJxdkJEZ0kwYzBKVlM3aThJS256Nzh5ZWVYU3dVNExYOWJSVWhzeSUyQlglMkJyN1A2NU9UN2tPaVdqWlYwU0hCVkh4JTJGTW4wQjBhSkM1VjhjJTJCZ0VpcHBDa095djIzNyUyRjMlMkYydTFXQ2o3VVZ2SVNoSjVkcnp2UmxRb0g0anp5QkpLeWhtJTJGTVVEbUI5anpWOGEwNGo5eDVJcFphbmo1aW1LWTNEdCUyQnNuUnklMkJkNXVrZnRucEU2Y2VLYXZUJTJCSG56bGwlMkZZNkxnU3QlMkZ2TlNUcE5VbkwzczMwMTR0MGVySlZwakJhVVVFM3BXVExseDY5cXpZS3A0NUZlVmt3cTVYd0t1QlhuU3BkNXRoNVhVVCUyRjlaY0tCUmlFcWlnQzhteVRXbktLN1V0SEswV2JBc3poa1Jwd1c5eFpPcHIlMkZNJTJGTExiMjBRTVV2MUtMYXNOQmRhRng2bk5SYzhpdUlSVlV3ZmhtS2ZMellKdFZabCUyRm1NbjF4OCUyQjBBNWVxVGFScSUyQnlzYWJNU0JhRUx0JTJCUU94S0RhV3E0Y1ZpcmdMQ0JuNkE4cTluVlAydEJ1JTJGZWhNMEhTSjkyWCUyRlBNbSUyQmlOTTFxcHJXclF1MU01WGJRWmVWRkhRY09QUUdYQWZJJTJGRk5YbURzdyUyQlFUMFk3Z0tLRHEwZmxYb1NpVTdIJTJGUWJ1OUMxc2pUOWNwJTJCTXAxQllWcnRNVDFEaWVNV2FVS1ZWYSUyQlc2VEh2ckJub0lWeWpPT3BONEslMkZhZiUyQmVQOUswRExxZE5RJTJGZ3J5TWN6V3N6emVFT3JEWDRLek9RZHo3Ym9UQWJqbkolMkZ1amRxQjBaY3U3R1lDTXJKcnElMkJuSXRBMHA2b1JEMWhMSjhsdTlUS2kyTSUyRkx5WGgzUk9lbmpGU0p4VXBKdGElMkZsZ3ZJJTJGaEFPZmFOdlVKSlBwR3VYaVpycDJEeUklMkY5SE85eHlKeGpCNWdSaDRkYjd3Mm0zcGtpaXNhd0RURlZzN1hBczlTYzF2MGIybFZPR1AxQlhsQUJySG4lMkJWUEllMnJ2aTRkQWQlMkZVWlAlMkJnN095UkYlMkJZTnpBcSUyRkFtNXViWXVsZ3l4U2JjMVpuYzhXNks5Q3hTQU16UE9GU0MlMkZRNlpPMyUyRnFFRk1HczE2JTJGOHRYODZiYm4xaTJJRkx1Z0N5SzdqdFA3bnkwQktvS1Z0WGJCZ3I0ckNyYUhBZWZPZmxVUEJRYWRrNTNqaDNHd0o0dDY3Y0p2Q1RYMm9wTUJtSGNFSTQ1SkJWblV6bU1RMiUyQiUyRjRYbzVZQm8zN3E3N2h6ZVRsUjlSTldxYzEydVBwTHdNWHplMjVHZURIeGFmMGVKYWtkZ1JqMDZ0OFZSbVhnSVRKNlp2dm55UUliS2tLVENpalJkTTZOTVV5elczMFQ1cnRHeTlIbnBINUJsYVBsZXYlMkY1T1hicmo1OWU4JTJCY0I1aVhzNWJ2S2FucTVmRk1pc0FHbXhuYThHQkhDU2U4QkZORTZoSHNoaElDZiUyQmVMZXlHbnhZUmdyb05xVXJBZjMxJTJGN3RoRGV2bWVLTmR4N3VsJTJCZWJ0aWgxajNER3JNTzdpeHlPaSUyQjBHU0hiVUM1VnpSbjRGRW82OHI0T3V0NCUyQjdmJTJGRFQ0dDAlMkZvMk1FTUR4bW85OU5lZzhQWmtNekZKTjB1Nk5RUnlTRVRtWHRTdkVkbk1qNmZUZlQ4VGE4N3djQXQlMkJBM0pHV0xIQm1RJTJCJTJGWTNSZ3VsWHZCTXFZT21vc2RjZVhRdjBoaVE0MWJKNk9kaGl1NThYelA5YTF3bmJYcTJMdm5zZXpIJTJCemg3TFZDREdsOFkzMDNuc3JsVEJUbWZCb096MlJ1VTU5MlFLWVdrbjF1Y25SdTJzaEdJZFhRVGR1UXhRJTJGU2Z1YTVycXNzV2s3REJGMU1GbHglMkY1MEZya3owVk1OVzc2OTlrUW5wWElPSFhSWTR2M3Q3YUFlODdaJTJCJTJCYnNEJTJGRDJ3Rzg1VkpWQjVkbmRrdVpRV3NQc2RHejkwJTJCOENpOG93SDNxSVAlMkZRckhUQ0tkcGZoblJ1anFCVlRGUiUyQjI5dHRVYlYyT1RQVzgydW4xam5TYjdFS2dQN2dkdkxlNG11eVJFTnhmZk1wN2h5JTJGd1BJeXhreGdMWnRWeFFaY3pwOWVzclQwOU9DTUY4bWwlMkZ5TWV1SiUyQkpDa3dOVHlaeG55cjJJUzlrRHNCdTFQeko5UW9CJTJGUW1Od3lCMmlrYjNTYURHVnU3R0ZkMUJDVU9kclp0RjcySXJ2MHpzR1hCRWd0cGIzNHhaOFI2QXdMWWklMkZ3UUhHZm5JdWFYUmg2WiUyRnZCJTJGZDBNUVk0NVVsdVBOa3E3VCUyRjNHY0duUkxMNFZXUUtpJTJGdGxoWmRZU1pXVHBWQkFQdTROU0taMHFGR3ZvTFVYZkxRNzl3aGxCQVclMkY2UEo0aE1Wdzg0JTJCS0x1ZzJmQkFnd0diTU5DM1VwY2o2SFVzZXEwbnk1RERKcTF5VzZaRkZ4TVk3MkUlMkYyeGJ1WDlBcGFvSnV1U1JvbEFvJTJGNThIMGlkSTlOUG1uMDVjMklsdGJ2bENVa2lzVG5Ud3dkcDR0d2dLUTRSR2piZlBPM1pRVUwlMkZySzBRJTJGRElHY29QQVF0YTN0ZjJ5a1BDeUVIJTJGek9IQ0ElMkJZVVpwakFsTFRBUXc3RG05cCUyQmUycVo1NTlhakdDRlFkaTVnZ3NHVDFNcWpuS2cwU3UwWlZtWTNXNnJzUDR5RjRTJTJGVndRcnN2blZtWHhmaG5YT3pVeE9DdFRhdmVQS2tSd2hNcmcyREFxd2ZacGJscDY4WEtNeUE0JTJCNHlGYTBvMnY3am1TQUJKdEFnWTJ0VzU5dDElMkZtYk1pVlVBR2ZQV21uV1olMkJJNlFRVlF3ZnpiSHBXc3FSU3ZNdXk3Z2o3MVclMkZPSGNxUGE1RElvcmR2T2tOUk9VNXd4Q0ZrT0pWZUFKWWFMa1hYZzRxSHdXRVo1enV0V3RvNmt3U2w1YzdEem4zaTFyJTJGOVZrazF4YzRsbXVzbllmWFpMRmJGUGFaNHEwY3V5JTJCcCUyQjRuQjhqd0NqVXhrZ0M2RXN1VkglMkZyUHZmJTJGTzllWmRlWDhnanNHcU5qNmZmM0lCbHduZFNzMEpoWHI2eWd5b2JkeUhQJTJCNkxLYkIzaXJ3WVAzNGpKa1pXcnFRNGQwbjFkR0h5cWVRQ2ZPbll4QUw2ekRIJTJCZnZIVWJ5eXoxWXFQZ0RjejV2NDNPNzVveDVQSCUyRnUlMkZqSXpIdyUyRnd4SyUyRjhFbyUyQmFsZnNyMTgyNkgxbE1MazRzS1pzVzU0eHlaeHZkJTJGbnNtcW41ckNtalAlMkZreHZ0NFY0VXhTN1k3dzF6NlRDSTdwZm9yeXBnZ3lpUmk3JTJGV1ZBYXNDaDFLYWVaZ2VleW1ITmZjVExMTkhGblJXdFVQMDdHQk9WMVJERzRzYjglMkIzT2Z2ZXY2YyUyQmFvUFFFV0Z2ZWlOS2J1eWppeGRyZ084RjdEVm03MUdwbm4xb3RtaCUyRkFGdzMzVWElMkJ0V2N0amFRV1huMXpjVzB1SFI3biUyQnlwWE1WelF2Wm91ZFJiSUV1ZW43bmkza0pQZHZNVXFmRUxSdXZ1SEdjYjZBdk5kVEQwekhUdzIlMkJZR2ZqU0VHbU5aUjQlMkZGM3lmMWlEckYxJTJGeWhKTGQlMkJVVU5UbGR1Vno1d1I5eHNYZnRRellBNDVFaU1Xd3l0WVpTbUdud2FrTGY2MEcyOVZxM2x4VVNkUnNUdiUyQnlGOXU4dkhnVXN4TldLcGx5cHZ1ODBaVGc4Q2pBN0d4T0FZUnZaeE1vZVcwOWswY2ZMWEVLYm5OMTI3T0VMZVR3SlFONEcxY1YlMkZQRmpSMUNzcVRmSlRvQ0ElMkZxTzJ1aEpjYyUyQmVZbmhVWGtvM3hrSVRZQjBwWWdobmIwR0FrVjljVUtwNTNLQSUyQlhRbm9pbzRnWjJ3eVNKZEYxeTVxOEk5Q3dtRFBlUm1WOTVMJTJCRHdWMVNmeERWMHJsQnNpcEtFRWdZYnRDcVlxWVZsN2NOJTJGc1ZrYWY0YmZ6RHdsTHdWV28yUDExSklQODAlMkZPMDh5Sno0cTdCeGJud3ZMSnZPOUJVQ2oxY2xab1lUbzhPdkRsMjNaQUt6SEdIZ1U1UWdCaDNQMkUlMkJQTyUyQkh1SjRRY2VaUHVtJTJGZWFpT0NRdFhZcUxXQ2xQd2MzVlZIZVQweHNyNjNqM3hFeDNiVmVKSTM1TlNMcCUyQlNQZ0pkSzlmbmd5ZCUyQmNEOTJUazNBbjhMZnIlMkJiQXFYenN2Vjk5ZXNUNnB3aW5RSU5IVTljVEglMkJ0azdEOGpOdUgzR3owNm1Bcks1REVqMUhTdDFRSmhVN25Sdnhtb0lGNjhTclM4VEF3dk9ZTHElMkJkajk1eXlIQ1g5JTJGSllSQTBheXV6dEhFMmpXWWo5YWklMkJwSGtlT0R2ZnF5aW5JZzRVc1l6RkhNd3B4N0x4MSUyRk5ld0VuUFlGWXRNaWFybmxiRUJGeDFFWlI0eDZ6VDEyR0JSeHJXZEwxT1YlMkZCRjF0VTdkSTN6QyUyRkw5TUJ2RDQlMkJIVWN2N3B1aWJ0Q2NTM1VvWExqV3lOYUNWNzQlMkZqNWV0YnhQJTJGS3U2b25LR0slMkJ5MlYlMkZqalRmdEREN2M1Szk0UkNkNVkyRGlaTWhmcGolMkYwb1VLaU9sTUQ1dUlvaFIwRzc0b09pUENHTSUyQkthekpiUEJoamRKbGlPZjY5Q2lMSUElMkZocEVxVXVVZmglMkIlMkZER1luQ0NqU0VTV2RtM1JmdHBJJTJGamNqOXJmTFVzYzRmT2kxUkg5T09oZTJoQTFUTVglMkJCT3dTWXNWTGUwYkxXME1yMGYzSWQxeDhqcU5BJTJCVzVFUXhJbCUyQiUyQmJHcnNEbXF3OWFTVVA3U0UlMkJOZWolMkZLdjJNcHFSUXV5dW1MdEVzbW5wZGwzcVVFTU1aZnJGMmRBN0huRjMyeGVLM3lmeW44cDZ6ZmdtVzk0JTJGOExPd3J6Zml3M2R1JTJGRDNieDluZDZrdnJxcVQyJTJGNHFlcG9XWXNCWDVya09EejFoSFowaEElMkJhUzVkQ2JVbDRjdnNoTk00QzhnZlVpeFZJTyUyRjQ1eDJseFpFTU40Y3FQdDRpcXpmVWZUQXR5ZXhtQW5IT0gxc29ybVZQJTJGS3BWJTJCJTJCJTJGTDRRbkk2b2xEV1I1djBhQnJtdnVnRmZRTFVNYXVYMHhTalZOVDhTRkw4c0Z5YXk1aE1veFFHJTJGYzlVdm9LTDQlMkI2U01EWW9nNHJhNUVZa3VvY2hHM3ZJS2V2dUM2eHRQbWV4bXl4cjM3RUVWeERzUVcySnVMZ3g3WlUzVFlwd2RIS2tFJTJCcFhTJTJCVTZhckJGSlAlMkY4d1JUQmFmck5KOW1sSFhMQzhTMlhzUVBrSFhBSXFZTThhZDlWNkkxdEp1amhLcWZlbTBJaEt5NWg0TiUyQmJHR3NCcjhUYWxTVHpnTkZWZTFGZjhwZjRUQVFFTmtLNVlCJTJCT1IxSGVwcEV4cTFUdWV2anBEUSUyQk9UQVF1SCUyQjJNRlVVTjlWUWNscFpnT09NY1JpbFRNZ0l0Tm1XWDlSQURjbVRnYng5eUdVUHBYZnVZS293cmRvaEs3d01CYjhsSTFJTXklMkJoTVdjcDJZc3dpTU1OR2tiVG9rNmVGMlBvVkYxQ2R1UzNwS1BxaGZDNmM0NU1RJTJCWFlqcTlqU3dxZjR3eDA1JTJGbHg5OW93QldMek1XWGhzN2dxTnhUU0NhOU0ydFk5NzJDRGhSdlRId3pmUmclMkJ4V1lUU2l6YXhCV0YyMW5QZEM1dTh3aTh3NUM2ZjJZcVRPaGZQS3JRYVo1JTJCSjF6cHgxaHM1U1AzZnNyVU5DbFAzQUZieGV0Y0tDWFIyZnFHUlFrVzhXc293SEZiSHhwQWhjZkxHVVFxTUVZcHhJV204ZURVa3duMTVTOWU5RFdPQkJuTUdZOEk2aEdlTUt5VVdPbzlQWThJTmhwVFBSTHptOU52YWRCeWZjVEdHd3ZZSGUlMkY0eXElMkZYamNlJTJCdUxkJTJGS2N6N0tyakoyT3NLQjNVYlFDNzJXSFpUQWZrYjVnTSUyRnlIVjdwWTVUSVlGY2tRZnR6Zno1QXlpeDIlMkZ6OEIlMkZ5b25qSjgzM3hTWWRVZWgzJTJCJTJGV3VZNnVycFdiRURVeGNPTGowVFk3VnhqNldBYzF2c25KZFJNMUFyYmNUN01oU1lJSWJDQ2VCWUlCNm01UGdhRUhVY2JGbkRvJTJCZkVBYXNaUEdkeUFJajBlRko1djBoRThBOUhEdFVTeEdmJTJCc3dKb3gwd0RReGNqMWM0MnFuVElSJTJGRkQ0bExFSEFiSHpkMmw0TFFUWWc4cDV1cTBIbDdwVDBNOGtiZzUlMkZLYzdvOWVMRDg2SmdicExpdHZ1WDB6Q01DbGFRQ1FEMGVMTWlDNFo5bHZsQW1HNUpYQUtnVXBWSExGQU95RUZqSGZrZUdtaUJLJTJGVFQ5OVphTTVYem1pbWR3bHFjWE9FN0dZMlVBeVYlMkJzRVQ5QnF1SGR4SFc4aXV5UHQ1c25lV1A1MFdBbHBwQWxENlJtdHlpZGxHZVB3bnFIZFQ5czklMkJFJTJGYnlOWUNENFdWZzRRbjRHR1NteDAzZTRXRTJsaWoyVXZZUjhGYUFGT1lZcGdFcSUyQmpxZFQ0UHdkZnRZcnA1TUNpRVRzaXhZeUpoaEFmb3VyMk9lSlQ3TVhGUExUJTJCNDBKZ0ZnTnFQU3ElMkJ6RkZKakpGVjVJMU8zMXdWMjREeThNRnBLdXFwemh0bEdpUmozMTVnMndXa2slMkYzWm81bjhzNWVNJTJGZWdSakV6SG1YdHo2UzRRUVpTcHVlemxQb3FValUwSEF5TGJCa2tkZmFGdUxldmQlMkJrblJjYlJ6NFN2SWlCUFFmVUE1M3hRM0VXaW41NW9mcFBWZW03RUZMSjVldXd2RTJpNDJVJTJGQnIwSjNEUnRZWmFGcFJpV1dLeFhYTXJ0cmcwJTJCRjZnZEV4dDlaREo2JTJCdnEycSUyQjJ4ZDR0YzZvR1M0RXM0bzIyWWtOQ09XanVlR1JlNXhuOVhtTW95VGduMXdERGo5VHVoWmRUMmE3JTJCcDhTYkJMdWNYbHZZMHFjSDRlNDczRUw1R3FXbzdFSEt5S2hvNzQ5eEFIdGxtY0JCdk4xQktsbEZiazVXNXVOYUQ3eGhrMFRnJTJCMzk4RXBqOHpkaW1OZUhBSG1uYkxHM3p3V00wOVQlMkZFQVBDNkVyWFh5U0xxTURGdEJlUmdqZko1N2IwU041U1B6cUk0amdDWVgydDBMR2JVTklTNFdyU1Azczk4Rm5NZkxzUlM5bEhGZ1Y1bmJObUUlMkZuTmJHY2FtbHNGeTl3cGh2N28zJTJGTVlCRHBSUSUyQjF2UVQwT3MlMkJPQlh1dkkxSUdCJTJGSmNKWml4QkdUNmxpRU1GbWV1Q2Ztenc0UVp5WnNQTEVwdDUlMkZIbTZOYXFqZkUxJTJCWllnRXc4VHJqQVNaOUkxcFkxQWlLc3lTMnFRY0dMNHA4aTlsY3FCdk1mVFphVEw5OG8xZVhUUzU0JTJGJTJGeVF5TjA1Y0dMVzBkQmtmMlhJd0JKckhlUURIelRxOGwySW9OUmpjdDNyMUhseTBDVkhnWG9aWmxYJTJGclI5czFNV080amE1Qlh6ZnZYVDduU25INDVpQ0tsUFglMkZ0eEFoZHdOMmtvTE1IQjV6VFNiOUk3VDMwUmZLcm5OZ0JvSUVnJTJCaHpZQWo3UWQlMkJIS3dNd2YlMkI4ZkVjSm5sYUlhSGVaV29SJTJGZEhuM2NIWUVvVjNZMiUyRnNYY3RnODBRNkhEUWJCTXZLRkRXQzFkbHNLUnZsY0pXNEo2NWxMZEZlVUZ1VWVNbzRCb01PJTJCZkpVZ3c1NXBza3g0M0xkM3VEeFJqVmFOd3JTQ2NCTUwwV2hsaU40V2hUMEhySnNLMWttJTJCRWtYYXpXVnczWjE5S0ZMeTFNRmUlMkJ6OG1LNXZnOWJrcThmc05uZnMlMkZzazZmeFM0b1hRa25IenZtOEVnMHIyQW8zckZkbnFZQmZQNDhwbkpqdzUwSjNqNnBTWG5SYWhFWVlQamdScVhrcyUyRnBPaURmSkJBSjVLMHVENG5FblRsdSUyRnhPVFZxTERUR0E3WHREa2RIQyUyQmlVQyUyQlFDUFNBMk9BSGZqZXFLQW9paUluY09pdmxqM2QyUWM3UlkxcWdsYzMyckp2WnY3c1RibGlYdXk5Z2I1OUhlMzV0TVQxdm1VdlklMkY4cVAydElqc1FBN1p0YnVFT09KY1R4b3ZMVmFyMWlLY1NZa3FtNGRiM00ydEJPJTJGODd4U1FabXlMVFdiRVlDNFU5Qkd4Y2FBUkVZYXNEY3lGaTB5NiUyQlY0clYlMkZwdngySiUyQkxMMFFEWjJmTnFJSTUwckJBSXlBc0x5VkYzcUd2MXlqaW02SjBxJTJCeHAlMkJKR0xyd3BLdXlIdm4zU2F5ekdMS3ZpbFBGZUtnSlB3QVRwbUJURDREMkxtdlY1NnN4JTJCY1ZEblpWS3JBN1BibSUyQk5rUTMxSVclMkYxTkhNZVZncFdqb3pIV2xGZGIwVEkwY1Fzc3pMYVQ2a1lvN2tUZG8yc1VReFBWJTJGVjloa05EcTRHRWdHWnk3RFlyNnlQb0R4N0FnY085Y25EeU9lQ1BnRXVuUWZGUFl5a1BHN0IydmFRMnZpTVg0SmJnc1dvQ1FFT2EyRVdpcmxYRmZLcTBaRFc4WFpqZmRNOGFlODZydnpvdHZ5OW5TQ1dtWFlNJTJCRlBibmdoVUFQcTN3NVhXVk5aJTJGTTFHWHdhNEdSNXRCMVFLOThkSFZsVXYlMkJCQ1RsODM3dktabFZlVkhtbG1hZ0lLMXEzRnZxa3RteGx4M2h0WkNjTnkzMlhtb0hEeUlLUmRUNkROTXI0V0FRU2dBekVMOSUyQm1MY3BPR0tHZHl5WWlpRldHJTJCdk5Qd3VHaWlIcVZkTHJ5YlhMM1MyTWlCYWJ6dEdCV1VwSUlOQjV5bnRldFRwdnV5bGp6SDJ2VGYlMkY2clhoaFl0VEdqQnRBMU9jNDJlZFZ4SGJYelNqQWRsdk9HYkRFSWVrYjBaMEQ2JTJGSE5YRThFeEcwbGpzck9DVGJpQkEwNXV3a01uenRqYXlNblQzSk5IS2VNZlJWWkZJSks3OUg4TUdNOFRkTVNWN21OWDQ5cEl2VjEzMkhHV2VMRG5zTDBoSmxnWDBvaWpEUCUyQm5QMjFzUEF2Q3lGSTVpc3VDOUs0d0lmdlJRSEs3ZlhkOCUyRmE3MXh0ZGJOalhIZlNpJTJGVzI2S0xSVm9HMHVPbU0yc0w3alJSenlHbUZxM1h5bm1zWU1qZzU1RDE3UEhVZTNsR20ycHlzalQ3RUE5YWRVM2h6ZkFyM2VESW9DM0IwSUZ1U3VQJTJCNnFFZGp0eTNoZDdPdCUyQmxReiUyQjVhb0hHaCUyRk5NTVFhJTJCTWU1dFF5ckJuTWglMkYzTGVpTUtnbjlkaVhvTkZEUUlMJTJGSmk3eWs4eUN2MVVjUWh6RENMSzhpaGNDbUJ4NXJoT3pYbUpjTmZta2Q0b1VWaU1wSjFlVGR3cDJnUGxiVmY2cHQ5bWpPNUc1bHpLV1dDYVB0UG1kZ2RMRGpuNjE4NUJWbGtnczI2UGp6TjRaOGRablh3SkI5MWdqOFhPUUI5MU1FVFlIeUEwSnZuUGNoeUxLQW42Q2RocDZLSHh4SjNpd1hpMHE5MDdxR3psMnZGbEJtQ0ZBbHM5M1dNZkRiaiUyQjcxOU9UT093VU10cWdnUkYlMkZwMDR3ZEo2WVF2d0JGdGdzNEJVbkdpVHdkZG5SZkMyTTc1JTJGZFN1aWFxMW1KJTJCeThrSjN5OW8xS1lNZzVwY1hNN1JNdjBpV3NjMTJCaUNJdVBCeHN4SFAzSWVMdW5uTkU0NDlFRSUyRjRiVWh3Mk0wZSUyQkpQZE5yWk1qbnBlOFllYkl2cjNneUZHOHd4a1NuVDg1UjE5SGVMd2lqdUpOczhpbGJ3QlFsekklMkJ0N25PdGglMkZkQURBVktxQ0gzQktQWWdQRWpFUU9aYnVIS0ZkNlY2S0E5YWVkN0lwR2JQQmlITkplNlQ5bkVBeDB2WGlUNmxYc0haOXExSjlUbUF6YzBpMkg0bFB6MyUyRlR1emVqN1FRWUUzNUhWMSUyQjRqUFdLMUdlcjUyenB0SWlpZWZTeDBxVmdWWlNoSlFKam1BMUNIaUpJM2JiMnE5eXJMSzhmV1ZEdUp2b3JBM0NuMXBtN0swcmxWVWsyNFVJbnUxQ2dTTkwxVHpOSFJORUVSUjdIJTJCNnI1NFprYUNSdW5zNVpmRFhlWVlTeVMlMkI0RHdOeWxrMCUyQjkxdUZCSkFEM3ZydyUyQjZtcThkUEZQUjQlMkZzaXNiT3pRQk92RnpYcWk4bSUyRm8xUHdIJTJCMzclMkJhbVJNcUJLUTRBVU43S1IycVhnalU5QyUyRiUyRkpBTzMzRGRPRkxSYkNKdVFOViUyQnJhdlpIckRRNUZqYWcxRXR1NmtIMDFRcTVWNG12eEpaaWNSNEx2OEw3WEowS0M2WXVzOGgyd2RtdlB5MFlxRGFjVnNxaGxNbm14OWtTUXRDOUQlMkJtSjhCeVo4bFFGSiUyQmV0Q3piMiUyQiUyRllwMVNWTFpLYjB5WUJSWVQzenZSbmg5ZE54Tks1TEwybFZQTnM4aGJaWGtKcjBJJTJGUHZBRjNXU1JjM2oyM1VseVBOVGZPU3lacGUzV1piZ3ZaeTY4bzNNZGo2bWFjSkFtOW8wZGFhdE55MnhYMCUyRmJ0UlhReVc1WndweHc5OUZsYnNqUEhVQW45emdVNUlCZmtVUFY3ZUxPMHQxdVRoMU1nMzVnRXclMkZsejJEdER5aUJRQW5KcWR4JTJCeXVZSzhZUGcxUmlrZnNGMWFTVUlKRzBLeHNEWFljTiUyQklyMmNucExkQzh6dnZiRCUyRnZRc1Voc0IlMkZPVHk2ZDJCUU8yYk5mOXI2aWZBbFVHS1olMkJqRHNqVWhNQ205TkF0WTYlMkJrRU8lMkZ4cWNHZmdMVE5TdzlpYkRyenk1Z1VTU25OJTJGTDVHMDBreGdVOU9xNkhSZ0ZVMDUlMkJ0VmI1dEU0ek5xZmc1OGthTWd3U1I0Z2tJalJLajkxMmpabk5GWXZERlRwWVY2NjdWTkdJNEJjUXhWVDVtcTlRQ3l0T2pSZ3c4VDlRdkFlNHpIbHI5eWpDWjdCZmtxbm9OaFZiRjVFN1clMkZTek5hWnFKRGlKWFFuVEp6NWRwVVVubkpQUXRkTHNwaUlSa2xRaGtzNzY5bEg1ckVhUUk2QzNrWFA2M3BsSkJaYmd4dGt4djZpSmp6RW1HWSUyRjQ3R2xoenNKU1FWcm5JTEs5V0xXd0pGcGd4dG5iQ2tDJTJCNUp2MzFBb1lPdFo0dUVnV1NZQUoycDh0U2RrMURIcHhGdmRaRTkwNDhnbEJHWWZFblg1VTJRZ2dnYmNqNDM1UE16NGExT3NYJTJCVnc2UUFiWnlSUWlmaERnYTI4MFg2bklNQXNqd3RFUG5keVFkZEROT1YlMkI1NWxucEM4WEFvVnU3ZzJUTWVhZk5YcHZqZWEzaHBzdSUyQjZic09sd0MwUUdleFl4SmJvT3p6dXZycHBSN2p2ZUg2RWoxWiUyQmtNdDJ3RHJneG9rQXpHYTZqVHlwQzklMkYlMkI1ZkgweGhLRnIycVhma01EJTJGZUUlMkJKOGZ6UENOWHNnc0Ztd2V5UVJvdkZQN3RMWEtnd1FRb2NCMGZaU09nMDhMdkRUT0x6aVd3ZFRMRjZNTG56bVB3V04zZVRIb3dGWW8yJTJGOHR2bU5lbWVIUWZ3N2xWVWs1ejM1YiUyQlNiYWFMZ0t0R3JudDlZOEtnN0dOZlFuYkVlUE1aakIwWTBYZzglMkZoaCUyRnNiVGVONTlFSkZTdjdoZm5YNzhLMkVOVzVSdWxhUUd5bkNLczB4cWU3bzBVQSUyQiUyRkNNJTJCM1c2NzBuZTBWRmtyVE9VODF5RGlBMCUyQmhyZURSTHRYN3haSDVYMyUyQjN2akclMkJDckpnTTJVdEdmTzdFaWx6UDVLZWVpb09GZjR4VFJqQzQyRW9JMDVWQ0tyUVM2TFozdU5xWW9KZnl1cmpQV1VPVlFDMDAlMkJkcDNTaGxpOHBwJTJGN1F6eGEwbVlYNVc1VDJHWnpQWmRqdmQ4NUlrNjJxYmw2Tlhsa1FCWWVpR2R0b3ZFN3VObnJIbHklMkI0U2dXOU1KREFlV01pYTV5Y09mdnN1ZmJ1Qjd4Q3ZiTndzcXN0a1lKNWR3JTJCVHpoc0hJbGxvY3pRV3dETm5xam4xZHpjbVVxMVdSZDUxYW1oR2cyd1JkendUTUF1S3VSJTJGTXBqaCUyRlNINzJqU0RVcTUxM1FlVFlaSXUlMkJLblBoMlI4anFQdkJDRXZFcGR5SUE4cW5NJTJCTSUyRlQ2TjFKOVFsQmpCY1NXVE01ZU9LY2tUMXB1dDVQM0RHTFgzSFRhRjlPYiUyRmlrRUx1eldVNlBFWmhPOGN5Zkh5WUkwZkJUbXZ6a3NhTkpGR0x6R2hmbiUyQmtVSzFpa0VMb1gxYzVERiUyQklpdXRWMXpWcERmT1Nmb2laNVhuSURtUmxHcSUyRmNoWExBMzNVeVEwa1F2dGs1RThBJTJCZFExaVpZQyUyRm9aaHZjJTJCZEtDQ1Vpb2hJR0U5ZlMyeVdwa1dOV0tWaktOa2M4QVJqVTIwOUolMkZ5emd0TDlXMWUlMkJYZVdwZ2E3eUlEbm81bElTbmJzaGpNQSUyQkwwZzhFbEZCJTJGczRCUzB1YlElMkZKc0JlRmolMkJnb3Bjbmk4VjJJa1EzQ1NyQkFiQUpEckJ3Z0pJeTc2T203dCUyRlV1ViUyQjJQcFA0QVBreEc0TEZ6SXFnSVg3WSUyQkxMVVMwUlJhTUtjUFhwU0RoNDFsdnZDMFR0RThZekN4Mm5lYlBFN3QlMkI5V0xaOU9kTktWN3NJSXdEcDdrelBVMWQlMkZwMFRXQWFVRUk0S2FnOHlyclBNMkFjTDN2bHZadzlIcG1iQXdJZ0R3NUM2QUptbE1aTFNEam9meVNUQ3BmWU5OSkYwY1NSVGQzZFJvb3RuUzFEVGRCJTJGd2slMkZOSE83MFJKR0tNWG42R2olMkJ4NE9vc25MbFkwQnJPN1l1UnE1T2FyTzZrSVpGeUZDQ1JoM3NSJTJCY2lRVXd5aGNJdDZBRGFDQU5vcWZGV0ZBUTljV0xKREI0UG5iZEZPbnIxeHclMkJZSGdPMGN1QVZGSVVhMlRJUGZKQks4VFJEdFVNeHEzcnV3UmcyWjRUSWUzalV6VXNBY3RXM2JOWmZLUkRONjUwUSUyRmxlNk9wdVZxZ1o0VVkxOGNRTmRQYXJJcmFyQ2p1dCUyQnVBV3hvTE1nUEVxYSUyRkw3JTJCeWt0UldvY05SUWR1YXd2anNSb0xER0xqOVFkSTRKNGp0U3lqJTJGNGJJQ1htUXJzbzNJaTdSUkJJY1UzanpQWVE2R1BVWWVTQzBWbWFkenB2V2pPRyUyRnVkdTIxT0RhTE9icUZhbFJvbEMlMkJTSk9OT1dVMW5VQkZhNmdmU1k4bDNRbE9QR0pFdlpRYUJRQkNwbE9QUEJKR0NxWjFzbjN5cDNHUldVWWd5NnlGRmMlMkZWeVc3OHlkS1l6U0s0UUpINHd1Q0kzUDgzY2RRdHFFbFVaOWpweTdSWkI2Mk05UE1UYlVCWGlMbWo1RnZ2bDBuWnVEaiUyQlBDJTJCeUlWZTBFdVRzd28zcDBTR2hBcU9QWjZGZUJhSTZ3STBZMHZyQXMwR25kZFVNaVB5MzZoSE4lMkZDJTJGdVB1TTJVeWNPQ1Q5ekRJOTVGTVdJWGdNWkdvTzJiZDkwZGZ4QVVzTUZpbG5DSkhLeDlIZSUyRlMlMkY0MlBsTXFvVHA4ZXlFZlF2WFclMkJ4VnFGbWpIMmZHRlRvOVNXaHJjOGJhV3FoUkRRNERHU3VYQVdYREFrUlZyMzVNR0t3bTNIbjRzZHpmWDhzVFhyT2lTR2kwWE1uOWU3MWdrMm54SkVXUjAzTzJ4S3BRTktoRmQ3bHY2b3JmeXVyQ3hzVTd3dE5zNjhNSTZwdUw4M1FXOUVaT0NEa0RHbHNrZ2FmSlFNMlQ4OW52NWxwaktUeG1NUjdhaTdNMW1QVnhhR0NPWWxhNG1GZGFoMUF2S0RGZHlFYm9GNjlzUnVqJTJGNnRtJTJCcCUyQnZ2dng3MHhzdVhDa3h0cERrTEVTUVJISXZ1bGdIZUY4WVlPWEFlWjFDNW9OV3ZkeEpYaFVmQjhnVWxwd1dGUXN5Q0lYd3FrTkMlMkJGNUlKJTJCOXhiMDlkZ2s5UFVlbmN1Um0lMkZ5UUNNMjR2WFlpZFNpVmtuZHVQJTJGNTBoa1VhR2JDM2pXWUVSQ1NoYjFubEZxMkVWMjBoU0VFZmJ2Q2hrWnRBWiUyRjVwc3BzTnVEQ016dkVVelc4VzFQTXF6Nm8xWEVEJTJGeVp1OUFDcDR3enM2NkoyJTJGbyUyRlp2b25xbjQyQlJ5TllqZ3Z3M1lGSzA2b00xa1JTc0toeDZNbnU2bk94b3hFalhiQUVTVHZ3UjZwNEx4dnFycVRKekNTOUE5empSSlZpa0dFeVJIcEcybEx1RCUyQkdOUkhhZnFpZU91cFNTMVNJN1JXU3QlMkYlMkZ6JTJGZmNlRlZtdzZNTVY2d3dPeiUyRmluWGNFS1JGeWhzaWJYbnRxQ0tLN2pOR25lMXZuMlVvMzYyMTQ2SEVTcHdMRGc2QUFubE5XZVV5M09nS2o1RSUyQlNQT1BMJTJGZU1mZDVkZmQ4OXFCVE01clAxeVg5RVZVVEhpS3k1U0FHWW5nVTNLUUVCV2slMkJwNFlaaDNhRkRDbTUwdUlMcTZWSXFMZXVFR3hVNVpXMTV3azFIQkRsYjFxaktPWEVjSWJHV1klMkZ4V2QwSEVpbmVMJTJCYTc1Y2xPeFpYTGkyV2ElMkZiMmIwdjdXWFBudzNaRnBES25UeDRaUHUlMkZMOURSMHdra1cwM2hUTHRoUnpFaktYRnJUTEMzbHR2UUNEdEt5VWE0WWlvOGs2ODh0SnRCOU05QXolMkJES1FPWXZ4UE1ISSUyQk42RW9hVW1ESktjOXExczE0UVRudkxrQlZlNFQlMkJpNWZxMUlNQWVuNzBoSXdlb2RJZHhwUGN3elNLWlEydWZkRXNpQUV5Zzl1UHZMJTJGNWRWRzJYZW9pRTMxUjVZbDMwcG1aYyUyRlY3UTlOTUhvUWNjeTllSHhSWjIlMkI0V1NaSUxZbEV6ZTZDelh6d3dMcXJDSlRDcmdGcjRwTVJlNnBDRzdnSFlOcFNPa0pLR0FJOW0xTyUyRnM5NW9mNlpHM0hvOGtnV3llZUh5OGlvQ1pjZHFVREo1dXd1bWYxOUxJM2oxbnBmZkZnUUFtOFBSYndsdmlNMjVXbnVSSUFHJTJCT0p0JTJCSzdKcGVYdk5maXpjeXpVVEdZSHAlMkJHYzZkRDRMQ242Y3NlZk9Ca1FjRFJ5JTJCT0taYURBOWN1SkFzUzVxSWQ1cVoxJTJGYlA1QmNEY3A5NjFVZWtQQXEyQzI0WDBZSUFGdDYlMkI5Wmp4ZVElMkJyNjN3YTYxRjVHQXRhRVVYMktqZXFkR3I4T0FGMklFbGdSZ0ZDaTB6czMlMkJyUlQwV2NNTWNqQzRnTWNRRTBaNXdmbXBhUGZuayUyRmJjT3MlMkJINldPZTloejhFcHI5Z3NUN2NRWUJHJTJCRXY0S3BKb0VaNkRyWnBwSlp4JTJCbHAlMkZESzVQM3g2ZE4lMkZwZFlDem9lbkNLTDJBSkhjJTJCT0MlMkZpZyUyQmlYelp1d0lIeDJzOWdWQlo0Q2toM2NpRVBEQ0paY2QlMkJrdkZhV0ZXUjJ0bXJRYUdGZ0ZqbEM0d1IwTzI3JTJCVGpSS0hLdTA5ZzRJelZlTmtOMFVtUzhsTWg4SnJxQmYzbW5EUUx3dE85aVNsbWhBOEVSVUVhYzFlbXpOemdGMHNWRklXU1Q2MzNyJTJGMjN0eXBwVjFaWHdyN21QZHhmejhBaUlxS0Fnczc2QmpNbzg0NiUyQiUyRk5PNjFqbnN2WDI3VnNWYlZnazRnSkoya3YwNjZPJTJGbiUyQnM1UUloU2lPTGZXT0RSRTZkNnhKcHExJTJCQ1VUVXRqdHpEN005bUZEd3pqUlY3THA2cGN6Y2lGM1pudVRJbkVpNzJxdHJ2U1F5MEclMkJ5TUF4NGVVY2VENiUyQjU1eUdTa1NGRlE5NHRNbGFxcllieVV4cTM4QkJ3VnRqZk1leGNnR0M4bUR2YThxUFQxcktEZElhOW5qV1NUb0lockklMkJkNkVXOThibnpNa0hVT1N6R29hOHpQeG9wUkJHRmJ5cExUcG8ydXROVTVIUmVnU09tUG9jYlBjUHVQJTJCYkgxUiUyQnNWeWVtTFBmamh6bWNBMDliRXp4dFhhdW8zdjN4M3ZSZUZieTRWT2g5eDkzSEVjRHRDJTJGQk43QlBaS0c4Zlppc3dFS0hBajRDRXBVbnZ6JTJCJTJGNzhsSlVZMUhnZGNGRzkxVHlzUjZiSjFqelBKUCUyQnlYJTJCY0UzVUJ2Sk5GcFRhUTlJTnNYSzNkYnJHMTI0VDVCaUVVbkI5MW9pM1hEYUtweHZ1dURUQldGcTZ1Q2lPbldvJTJGWVFJTkZRUEdSQllQTXhHcVh2emNoYTlDa215SyUyQmVpSlZyNUtWYjg4clBwYlA1JTJGMzI0S1ZQZFRlUTZoclY3WExxSzVuTGhkWEpZejBHbDRYeldMYWRBcHRQaDBWYnVicDNDakVTTGtJNk5rU3RnNGZQZ2pwYmpob0doY1RNQjhmZnQlMkYlMkIwbEpZY1lMM0laTjNZNFMlMkZncENldjYxbk1mbGRGJTJCTVo5YUtmVnZhZzBoa2hOako0bThGdXZLUHRCd3lZbHdpMzJxYm5vT0NWUFprb2R3TlRlRGZyV2UydE5xJTJCdkJmSzhjQ2VlRCUyQnRSY2pGbyUyQkxxTld3UU5hblpudTJySjRnZHduWmR2U0hnNW1HcnkydXJZa1E2OE9JQ0s1JTJCNHhPcTIwZ2JRRDB1Uzh2JTJGdVlDRFhHTVRzbnpNbmVSMEpoTllxdUJJWXBrMXFFc1JwRlJpa2ZQeExubkZ6T0lwRTN4Q0FaVlE4TzlyYmhVSmZVY3ZSMU5HbnRTcGU5NTVqVExkZjI0YXQ3cFFiTWVqem0lMkI4VnIlMkJZJTJCMkd1dWswSGxFaGV2S0gwMFh4Y0ZMVSUyQjFIWEVyWHhtd3kxc3NCJTJCVU5kV2Y2alh3TFJ0TTlNM2VaSnFBY2s5M3ZiTk9Celdqd2JVRVFrMDI1Y2EwdUlJb21mYW5ReGtoWFl1VDExZ25VbFZkJTJGSEZKRm01YTA1U1lxTFJnZHZ3TnVKU0V0dUFpRlF2ZlhXOURham1oM1RsejA2aFo1Y0VZekV1dVliTTJuSXRmcG5xaHdHQ1oybm53UXRwOUlaYWdlQjRlUEx3dVk1RTR3MnFiWFdoOUolMkZCMlRxWWV5eFN4djBhdjZHUjB6ZDBGRyUyQlBFQ2REblFPNTEwZWxHeTRvd2VLSWlxJTJGeE5TdWpIcVp4VWFUOVhqZlZTTzRCMkdNekZ0a2xkNVA3TzlXczVrN3lhaGtXNE1sd3JBJTJCOWZVZFhOU05ReHFzMEh1Y1RDd1k4V3dkM3JMenkyb3A0SW4xeFlOQ3pmU3FpMDNWTEJvcFcxJTJGaHRhWVBsUjhsczhMeWlMRUxGZSUyQlc0NyUyRmJXQiUyRiUyRnBiV3BCM0NjU2kwMk50YU50bXVlZEJuQ0FsbUdaaTA3T0VsTFpiRTFLYUtCdHh6QnclMkJOcXBkZVpHRGo1TGdlaDNNRFFFMlNuWFlkODlTMzV1SGpLTmx3UTNtR1NKRm1oam94WnFhMkJ4NVo1MDBzZVBpNUtxTDNNTzQyYUxjUCUyRkcwNllnUDVaNTVPVExTckZpMWl3d1BPYkNDbm1UaGdVJTJCQjRnakhXenVURHNzYnZBamx0YVJoUjM5OE1yNzBYMTZyaUYxS0F5alVGQ01iU0lma2tiUjBvUEc3TkFyUDk2SWtxWUNkVEl0aEhHbEdUJTJGVTVpVUxhQmVsQldJbmpIdG05NDZ3YzdESnpNbTZuUm1YUDlRMncwcjM3am0zaDdqUkRnd1dqSk5ZcGxxSXg2Q3FiWFdpYjczUWRwWEVGRW9iMjdOUk1KNnclMkZoaFh6ajZvajVOM1BoUEJQa1o2WXNNOXRRYmpTcGM5NDZ0RlUzYXd3Zm9oSzRwb2I4akNGRCUyRnJkMHN1bnVYV2dJNDhmbTBwUyUyQlhKQjBIMVlPRlkxWXpibGpkcVBDdFVWYVFKbDJVU0oyJTJGRHFHNDZrczVGYXRCdENiWXRIcjc0OUNkajNiWWFZdDFIJTJCNGhMTTZtRlZXU3BtTVVFTXUyVWZZQWNGbEdDNDFSY3FoM1ZveUxSZUhVaEVBZ1czMlI5ZjJIZVozUWNWbVR4azU2NzQ4SkxxYiUyQnlkSm1NZGRIdnBPTzZPamRQUHUzZWlhVm53d1JsUUZVdGhOemFsdG43Q0FlM0lPRE5OU3JIMWxrQUgzS21pcW5RSSUyQlpsJTJGcncxMnRoZVBRZk84ZzVYaktCbTFocHdOdU5UYXdpUlczNSUyQjh3JTJCJTJGOEZ4cGpUWWVyYnFUVjd1WENpVkt3OGEwMGlSbUgxOVAxTHNhVW5EUWhaNDlTVWRSczRWTEJZVXRHR284bkZ3dkNZc040bDNzWER3d2d4aDJRNHZTUVhOM3c5M2daTzAxTyUyQlBvRUFtWXNuUEdpRzVtZkFaTjAlMkZOalFnVjdJZ2U1MGFHM1JrM3o2SklnZHZSMWU0bjhJV0x1Z2lVcU50WXducFZ1JTJGWDNnMDhkcWM3QllpbDhOS2slMkZ2JTJCMjhDZUdhSngzUyUyQkJYd2VCJTJGN2RsNSUyRlBhM3pMd2pyRFRseEdXOWJjQjZtNmJUSFhaS2RTVkJka0NlQjlPMGl0aSUyQjlKVVRzME9EcGl6MnFhMXhpVkxnb21zVHA3SkdOaFp3N1dKalpIMkFnRldTaDdGaTNyaWRjcjNzeUZ5aTZmQzZqTHRyWWM3VG9SZ0NTcHBnbnJBciUyQlpnWnA2Z1laWFdqM2wlMkZmVXUxdmdReWdzR3hkZ21kOVJGR0pSeWZKR3ZtV0c5STRjTklJZE5ySm5EcGhVYUgwY0pFbyUyRlAyMHk1VzFTSkZUMFpTaEVKOFJldzg1YmZKR1pPcVN6UE9vZWpHeHhsaU9UWG9GY0VGRmVXME5ENmZIaXlTa1RYTHFrcjBCbFladzJCWXJQb2NRcGdJNVIlMkZTT1BDT0RKM2l0Q2dpa0Qwc09NQW8lMkJkJTJCWTBDTmFzMFVhOTFvRVNVT0FUQUFKTVNsVEJkMUlOdVJWZXZlTXglMkJodkNOSjNZYzVpOE83U3FPYW0lMkZKblJzcFExUmklMkZzWTJnSExuWWtpODEzU0lRd050M2RaT2ozS2ZmTjVQYkJGakxYR0ZkdEJFa0tqNktNRU00TEp6RE1KMk1TektCb0xadERlZnU1cTZvQWZmMllBNjl4ZGFRcUZaWjRQYVNYbElCWU5jcnpuSkhhZ3dpRkQyNXg3bXNhUWtwZ2wxU21Fc1huRkxUWHFwREFFemFxYVRUMDFTbTI1Rk5IUXJHSCUyRmolMkJ0JTJCRlBNeVFlSDlkRUJFZFU4dkp0SDhHV3FKeHI1THYwMGtCNk5hcEVIM2QlMkZvOGgxNUlzTDJKUzVjMThweHY3OENRY2F0JTJCM01uMjhuNnFJWndnZXMlMkJZS2FTU0RZR1Vja1A5Y1V3SVZxaExnSno2a2YlMkJiOHg1T3JCRkhBSFVVakZwdFlSNFFjYWhueTdBbWFtWWhQa200JTJGckZ2OTRMbGJXOGFOMnBRb3NlSVF4UjdmN2dMakI5JTJGY012Z2VKUEQlMkJDUCUyQnhUJTJGdG50V1QwelNRSyUyRmN1TkhQU2dSWiUyRkJpUXclMkJVJTJGZ0Z2TCUyRm1lY2JzTW0yUjQ4c1JIVkw4QTdxWCUyRkthWHhxSGVmOExhNDQwTkFxJTJCR2l6WDVxNXlXdEJPJTJGSjh5SmgxZmM0T0c4U2tBRUoyQkolMkJPOTQlMkJZdnB6Q2hGU3Q2T0lYRDNoNlJuSHNWcGVzV1E0NkJZcE5vJTJGRDhzciUyRjRKdjFieEhOQzdaQmhyRHB3Z2tvR0lhJTJCU0pYWGhFWDNSc0xGJTJGJTJCQkNQa2xobVljZHhHcEFmajlBVTh3djR2ZFQ4NHVFczhqcmZreURMbm5SQ0FUJTJGaFJFdmNoS21jZkwxY29KNkViMzJSWWklMkZTd0RZOVNvWHh0OGtoRm4yOVJuck5ZYWt3ZXNaeUxwV3hNdjY4RVY2RWRwdXpuNFQyc1NyNERMTnZYajV6ME9sMDV1WEtaNGZabHJacGwxYUZrdTZYM1pkbVM4Wk1ramd2ZHNqYnNxJTJCQ0lReUs1djFWWGkwJTJGdDdld1dWcERNOTJaYlZRRiUyRndYM3FCNlVUcUZ5eGZ5YTVIY0Z4WDVvaXpYU2RkVlM3MjVsMnhkQ2dpTHJJJTJGTDlwYjhpdE11NmYxZktkaVJyTmxCWSUyQm9hcjJpRCUyRmhZMmY5ejhOeSUyQkRNUHRWRmZHJTJGdzFNVWdlM3VONDZpMUUlMkJPb2d6NkM3RFMzeHpGVU9SZjRDaiUyQmdhTlV0aFFCbXk5VURCZm1kJTJGM2JyelJRS3IlMkJTZiUyRlNBcFQyQW5uVDVVdW9HWFM3YnJpa2Y0UmRuaTdLQWpoR2xXZllYeWZ2TjM5dlNma3ZUJTJGMlI4bmdZQkZNT1BTZHFGUnVYZG9NeXg4YUElMkZyTjBIT3NMSyUyQm4lMkJGUFFqeUozdHc5Z2Q3dmdmRkg3ejUlMkYxbXozRFpsMmIybFNVdTFraVAwdUlYNFB3JTNEJTNEJTNDJTJGZGlhZ3JhbSUzRSUzQyUyRm14ZmlsZSUzRScUnGsAACAASURBVHhe7F0HeFRFFz3plTQgoRN6lS5VQH5AqoCCgBSpIkWK9Ca99yJFpCqIglSlSpHemzTpBEJCQnrv+b8zmxc2IZtskt1kE2Y+Mcnum3Zm3sy5d+7caxQfHx8PmSQCEgGJQC5F4M2bN7h//z4eP36MZ8+e4eXLl/Dw8AA/9/PzQ3BwMMLCwhAdHY3+/ftj3bp1uRQJ2S2JgERAIvD+IDBgwACsX78eZmZmsLa2Rp48eeDk5IT8+fOjUKFCKFq0KEqUKIHSpUujQoUK4vPclowkyc9tQyr7IxF4fxGIjIzE2bNncfHiRVy5cgU3btxAUFAQKlasiLJly4oFvXjx4mKBd3Z2Fgu+nZ2d2AC4EcgkEZAISAQkArkLASpwqMjhXkDFjre3t1D0uLm5CcXPw4cPce/ePbEXVK9eHR9++CHq1q2Ljz76CBYWFjkaDEnyc/TwycZLBCQCt27dwuHDh3Hs2DGcPHlSLM7169dHnTp1UKNGDUHsZZIISAQkAhIBiUBqCJDwX79+HZcuXcL58+eFsqhJkyZo1qwZWrZsiapVq+Y4ACXJz3FDJhssEZAI3Lx5E7t27cKePXsQHh6O1q1bo0WLFmjatCmsrKwkQBIBiYBEQCIgEcgUAtxbjh8/jiNHjuDgwYNib/nss8/QsWNHVKtWLVNlZ1VmSfKzCmlZj0RAIpApBHjcumXLFmzduhWvXr1C586dxWJLjb1MEgGJgERAIiAR0CcC1PBTubRjxw4ULlwYPXr0QK9evYS5p6EmSfINdWRkuyQCEgGBwIMHD/Djjz/ip59+Ehp7Lqr8KZNEQCIgEZAISASyAwFq9ql04s+vv/4a33zzDcqVK5cdTUm1TknyDW5IZIMkAhIBIsBLs8uWLcPevXsxZMgQsYjy0qxMEgGJgERAIiARMAQEeHmXSqhVq1ahQ4cOGDFihLi8ayhJknxDGQnZDomAREAg8N9//2H+/Pk4cOAARo4cKRZNS0tLiY5EQCIgEZAISAQMEoGIiAihlFqyZAnatGmDcePGoXz58tneVknys30IZAMkAhIBIhAaGorp06djxYoVmDRpEsaPHy/dWsqpIRGQCEgEJAI5BgG665w3bx5mz56NYcOGYerUqbCxscm29kuSn23Qy4olAhIBBYFNmzZh8uTJaNeuHaZNmwYXFxcJjkRAIiARkAhIBHIkAl5eXmIv279/P2bNmoU+ffpkSz8kyc8W2GWlEgGJABF49OgRxowZI6LPzp07F40aNZLASAQkAhIBiYBEIFcgcPr0aUyYMEFE0124cCHKlCmTpf2SJD9L4ZaVSQQkAgoCvKxEe3seZ9I0RyaJgERAIiARkAjkRgRowkNzVNrt04lEViVJ8rMKaVmPREAiIBAICQnB4MGDhRb/hx9+QM2aNSUyEgGJgERAIiARyNUIXLt2Dd9++63Q5q9evRq2trZ6768k+XqHWFYgEZAIKAicO3dO+BRmiHB6IZBJIiARkAhIBCQC7xMC9Bp3+PBhEfulQYMGeu26JPl6hVcWLhGQCCgIbNy4EQMGDAAv2fbs2VMCIxGQCEgEJAISgfcSgV9++UVcxl23bh369u2rNwwkydcbtLJgiYBEQEGAdve///47uLB9+OGHEhiJgERAIiARkAi81whcuXJFKLy6dOki7PX1kSTJ1weqskyJgEQgEQFeMnry5Al+++035MuXTyIjEZAISAQkAhIBiQAAHx8fdO3aFaVKlRKRc3WdJMnXNaKyPImARCARAWoojIyMBMGXSSIgEZAISAQkAhKBdxEg0Y+Pjxcn3rpMkuTrEk1ZlkRAIpCIQPv27YVv4PXr10tUJAISAYmAREAiIBFIBYH+/fuLmDH79u3TGU6S5OsMSlmQREAioCBAgl+wYEGsXbtWgiIRkAhIBCQCEgGJgBYIDBw4EJ6enjoj+pLkawG6fEQiIBHQHgGa6OTJk0dq8LWHTD4pEZAISAQkAhIBgQA1+sHBwTox3ZEkX04qiYBEQGcI8JJtYGCgtMHXGaKyIImARCA3IUACt2HDhhS7tHDhQowePTrLu1ugQAGUL18e//zzT5bXLStMGQHa6Nvb22f6Mq4k+XKGSQQkAjpBgG4yGezq2LFjGsuLiIjAtGnThBDAI0lnZ2d8/vnnmD17dpZE/0tvR//++29ER0ejdevW+O+//1ChQgXMnDkTkydPTm9Ric+npxz1+tUr3Lp1q8ZYA23atMFff/2V4fZldUYSjMqVK6c4b9KaL9wI//jjD8TExOi82cnH6fjx4+jdu7fwhnH06FF88cUXGtutbWP0Mb9SqrtDhw64ePEiHj9+LN4zxqxYvnw5Hjx4IE7dGjVqhLlz56Js2bIoXbq08IaVUqLLv1q1aqWan/Pv1q1bIqK1lZWVtlC8N89x7vz777+iv5MmTQLn/9ChQ8XfH3/8scBXSXFxcTA2NtY7NllJ8mNjY2FiYqL3PuWGCpo1ayaCZWXGvaYk+blhJsg+SASyGQGShgULFuDs2bOpuskk2eClojp16ogNjURg9+7daN68uSBOhpa44datWxc//PCDOD49cuSIIHbUemU0pacc9frV67t//z4OHDggPiL2/JskzdTUVLhi++yzzzLaPJ3nS4uopEby05ov+iT5yceJkZp5ifzPP/9Ew4YNcebMGaFp4+8ZTfqYX8nbcunSJTGHV6xYIcjksmXL8N1336F48eJCwA4NDQWFRpJ9ziO+j/7+/nB3dxeCQJMmTYSQy0Sf3tu3b081P/NVqVIF2aWVzuhYZEc+ClxcTyiAMZH8V61aFd9//73AuVevXhg/fjxGjBghFCMUZj/99FMxDy0sLODk5IRWrVqJnwwyyHd/586dQlgLDw/HoEGDhMAfGRkp5ilPEHhX6sKFC+jXr58YY84FRl5lHmryqYQhqeTaTIGP96pYzsmTJ8WazWdGjRollB7lypUT8+qjjz4S7V+0aBFWrVoFb29v8N1lOynosX0tWrQQ84rrAdd67hdcV3nRtGLFimKuKeVkx1gYYp1UKBCTsWPHZjhgliT5hjiysk0SgRyEALX3jRs3FhtHaoGubty4gRo1aqBp06ZikVc0VNTiX7t2TZBVBwcHETCLhPXFixcoU6aM0PKTZCgb4Lx580ANKOtjWdz8rK2tBWG5fPkyZs2aJRZFEt41a9aAl4BT24DCwsLEETnJDbX2X375JZYuXSq09opGk0LJ5s2bk2jyo6KiMGXKFLEZ00Spdu3aWLlyZeLGN3z4cNE2nnC8fv0a3377rWhbcg0xN1NetiIZs7S0RLdu3bBkyRIhSKjXrxCB5FOjbdu2gvBzU2f+1PqqYDRx4kRMmDBBnKTs2LFDbOw8gSFW27ZtE25PuTHXr19fkD8KZtzQ2R/+1Kbv3PxJVkgySpYsKexMSYxJbEgSFLMETSRfm/lC3NQ1+YweOWfOHEEcSKBJfFi3Jow5Rw4dOiTmC7XcbAtxYWRm9XEKCAjA4sWLE6Hn/OvRo0eiJl/THDIzMxOC77Bhw0R5xYoVE6Tok08+SaIxT2l+cc4w34kTJ8ByqCFnGyhYpDXX1ecIydyvv/4qTs6IvYuLixjfhw8fijFm4thzbpKYK+/w1atXxe/jxo0D3zkmkkxt8terV08QOvZZJs0IJCf5ypwjxiNHjsT//vc/saYNGTJEjD3fb/7OdYbrCecr5x5PFhmDhGNNwZdrEgk3BYS9e/eKcafA0KlTJzHGXFeDgoLE+8GxJ9kmmUyL5HNNpCBRrVo1sa5xHeF78/LlS7Gmd+zYUbzzbHe7du3E3FFOK3gqx7WV85jt5tziqSgVPHwmJCQkUdiRc+YtAjw94/t06tQpodVPb5IkP72IyeclAhKBRAS4MJPckhhRy5da4kZCTSJJJIlsSokmETyi5OVdhvom2acQcffuXfCYl5tM4cKFBVEi+aHWiJom3gXg5rZnzx50795dmFLQtIKCBMkNCbymDYhkk6R69erVguSTWHHzIREj+ercuXPicam6uQ43MxIj/vvggw+EtsvGxka0lRoz/s1FmQR6xowZguBSU8qkXg5JOkko8zx9+lRs3tx8ixYtmqR+TacHyUl+an1VMCI55mZNQYTkgATi8OHDghwoGjtuxCTL1PKSdHBzpjaQm7k2fadGkuNCwYGaSGrAKSxQUGD/KLQRN00kX5v5oq7JJ+4sj0SCAgVxoZaTQoAmjHni4ejoKOYcCQnbRiHz5s2b4uRGGSfOJUUrSmGLmkcSJcXMSNMcIgnifKUrWc7TwYMH49WrV0LTyc07tflFExqavTAfCRnJHQULCpupzfXk7xXnEQUdkoQ7d+4IjHgqQYEotZQSydc2P4VfvkNubm5CsJEpZQQ0kfyvvvoKW7ZsEZl40kIBngIZCT2Fbr5XnBd8dzh/lXWFc5IKD84baso5p/mPWnWSaprJcK2h1p7vPk91qOWncoVzMS2Sz7wUgCk48L1+9uyZWO+4zvH9oMKA7aX2nms4v+OcYTspoDx//lz0iYIL62O7WB7nOtdOmVJGQFF8ETfOmfQkSfLTg5Z8ViIgEUiCADcjkkSS5LQSyTDJITVH1MCnlCgokFSSBJEYkeBTw0Syzw2BpIsElRp6kqUiRYoIUs6jXhIfBhKhBpSkVDGv8PPzS3UDIgmjRpuaYyaa5HDTq1SpkthQSa5IOJNr4Eme7OzsxEbGxBMHatR4wkCSyE2PpJGbHQUbEjQScPZBneTzlIJ5KAhQsKCmnIkClHr9mvBNTvKJg6bNVsGIx8AkDSQEJKoknCT31MAx6iI3Xm7MHFtu0kwtW7YEhTCSApp6pNV3nsz06dNH5KUmnIFeSEio2afJAQUKtkcTyddmvqiTfJ4ukAwTM5oyUJNPDDlumjCm4Mg2ubq6Cq0n+8/2MCUfbxJ9Ei8KgjwBUG+3pjlEAdjX11cQHG7OFAYoUCladU3zi4IKTSUoFFCQZaLGk6cCJHr8XtNcZ3+URDyo+adwTcGY+SmocZ6ShKeWUiL52ubftWuX0BpTsOP8lCllBDSRfCoGqCln4jhwHiiKDr5/1NjTFCb5u6M+5zn2fI+VE8pChQoJwU45oeOayjnPxO+0MdehoMi2cb2iCZh6olkXhQSFrPN95HtIYZnt5JrHNYaJawFNgihk8BSU6xAFYmr0ZUoZAZ7scN3++eef0wWRJPnpgks+LBGQCCgIkAxSq8gFX5tEYs7NiouUutafJgAkTUzcKM6fPy+IJBM129Q4k6Qwrzo5VkiwotVKbp+tTqhIilLagLgRsm4SKGqy1VNykq1O+rgZMR81wBQKmEhqufmS4FBIIcmn9p5Cijqx5VG3ej+ohSMZprkOU/Xq1YX2mYJHRkh+apttcoxI4rlZs30KqVM3BaCgQ2LPxBMSmn1QaGK+tPpOvGnGxURNFDd12gDTJpdEmZ9R8NFE8rWZL+r9IZkmjsScmk+SDGqwaU6gCWN+z9Mf2iVT68yTH574cI5S66g+TppIPk13NM0hkhnOQwp5NF+hUMF/NG+gIKmJ5JOI0/6Zpkc8JWOiQE3M2E++C+pmSupzXRFSmId9KFGihCCMJGfEgacQPCVL7uFF/T1k3pRIvrb5Ofach1wfaCYiU8oIaCL56pf7aapBDfq9e/cEOeb4akPyWSPnGs1k+E5Qc09NPrXwVCQomny+K5yLrIfjRvJPwk0TSppXcq3jPCRBp4kX11sKDjwF43zg+kCtPk8MKHhSGFHWc7aBwmpK7zjbxsR6KNBTIFQUNHK+pIwAx4jrEMdA2yRJvrZIyeckAhKBRAS46fByHTV7NWvW1AoZblIkjSQv1OYrHhaoVSQxpm00Nf3qmvzTp08Lojh//nxh45lRkk8tiKYNiFosnhrwiJuJ2lpuvrwolpYmn1pSRdNNTTzJFMn69evXtSb5ymZM8kYMKBxwIedxe0ZIPk2hNPU1vSSf/aMHFibl3gUFMJpgaNt3CkvULjP/wYMHBalmO9Ii+drMF2oiFbJLMkzbcZIOauQ5VyhMkORrwpgmWiRO/MfnSEpJcogf57c2JJ9zWdMc4jziCQjNV0iySc5pZpEWyVc0+copEtvPPnFuEU9+rw3JpzkFBRlFM0wBi6cOHEMSNgprTOwD5w2JP09ZNJF8bfMrp0KS5Ke+NGpD8mn2xnHk2kBFAi/WUhHANZOncOqeqdQ1+TzxpOKCY0rzGZrMMdHcjIIfT4R4GkAlBX/y/g1JvmLuRwGc7ymFAd4NonkkhQOSdraJ6x3/0eyO7eN7R9NGCghUAFCoZzt5vyk5yafQS5M61ssTBL4TXA+oHKHAIVPKCFAgotKImNM0S5skSb42KMlnJAISgSQI0HSGGmDluFdbeBRzHJoxUHtOYkXTEhJ/biL8R1MeEg5qALmJkDBTi85NKaMkn5osTRsQNzGaMtAkhxccuTlyoyJh53EzNyoeK1MQSMkmn8SSGxUJHJ+h2Q+9VWijyaeWjJs0zY64KfLomu2kpobmP+r1a/LiktxchyRAU1/TS/KptaeWjUf8xIU27tS4KTb52vSdZeTNm1doCmmqwrwkBLx8y/ElCdXkQjOt+aLeH2UcSSx5QsN6mEiMeVE1JYzZJ5J5jh0xIwHiqREvMrNd2pJ8TXOIAjDrJlnnOHFOULtOIYKa0NTmF+2Ub9++LcynSH5YB9tL853UTq3UNfnJzXWIB0kfCRYFNfaZml5iRjMLCro0ddNE8rXNr5jr7N+/P1Fo0HaNeJ+e04bk8x4Lx50nU5zTNH2jIEolAH9qIvk8NaPJIk0eKZzRJp+ndHyeQhhPA/huUgPP8ec7zlNZnuiQ4PM957N8B/lOUMigwMqTqzFjxgjhn+se79dQ409Bme3hfKVZCYUGroM0hUtO8tkXCh0UVDlHKTjw9MKQvIIZ6jzkmst1iicy2iRJ8rVBST4jEZAIJCLARZ0aGG4e6U3cQLgRUFtObSZNUmi7SyKp2HLSVILP8HuSLHqDoBCQ3EY6PeY6JC6aNiCWQ00/F01uPiRAPDngSQNtWql1IuFSPO4oR+nKpksTFpZBDQs3UW5qits5bcx1FJd01FzTNpVEmhpm2ryr189NNqWUnOSnttmml+Tz5IVaamr3SIbZV2oB09N3tpmabOLH8ebc4XiTUNDGl5pjTSQ/rfmi3h+eLtHUhoIjL15zXlHjzRMZkhQKFSlhTNJM4YN3PNg+EiO2Nz02+ZrmENvPNlGoIRGisEjSTxMJEqHU5hdNF9huCr4UBlgO5yXniLYkn9jz7ghJlGIPzc8oWPO94ikUy6NpDcumaZySUjLXUb5LK79y8ZYCDe9vyCQRkAjoDgFedOY+pdx5Sq1kSfJ1h7ssSSKQ6xGg5wRqb3hpksRXptyLALVv9MRCzZ1MORcB2t/zAjiFBgqOWZF4ysc7CIqpV1bUKeuQCLwvCNCMlQoMmtyl5ZVIkvz3ZVbIfkoEdIAA/YnTWwIvRcqUuxFILUhV7u557uodj/ZpKqVcqNZ375R4FjQnoVmHTBIBiYDuEaDpH+9s8T1LLUmSr3vsZYkSgVyJAM0X6BmGXkgUu91c2VHZKYGAJPm5ZyLwDg3vJtCkKb1+ttOLAuMp0LafWkaaAskkEZAI6B4BLy8vYQpHd82pRWCXJF/32MsSJQK5EgHa//EyonKhMVd2UnZKIiARkAhIBCQCOQAB3g+jm2nek9OUJMnPAQMpmygRyG4E6DGGFxh5OZEeaGSSCEgEJAISAYmARCD7EKCLYLrEpRtUeoFLKUmSn33jI2uWCOQYBOjOkp5u0usyM8d0UDZUIiARkAioIUDPSIwfkNrFRiXGAt2R0quWkZGRxFAikKUI0KUmTWnpBlWS/CyFXlYmEcgdCNBDBv3a0wbQ0tIyd3RK9kIiIBGQCKSCAEk+I0Frik/BrCT59EFPV6H0Qy9JvpxSWY0A41zwjtzly5dFsLLkSWrys3pEZH0SgRyGAH3Ik9wztLlMEgGJgETgfUCAJJ8RYxlpWFNSSP79+/clyX8fJoWB9pHBG0n2lyxZIkm+gY6RbJZEwCARYJRZagkYNEcGtTHIIZKNkghIBPSAgCT5egBVFqkXBOjxjgEFGRnbysoqSR1Sk68XyGWhEoHcgQD94TMi6++//547OiR7IRGQCEgEtEAgIyRfKVaa7WgBsHxEpwh06dJFRIan/3z1JEm+TmGWhUkEchcCDJ89adIktG7dOnd1TPZGIiARkAikgkB6SX6tWrVElF9GI23btq30QiZnV5YicPDgQcyePRvnzp2TJD9LkddjZbt27cL8+fNx+/ZtsaAwqiH9pvKSJBM9ofD7K1eugAsQU0hIiIiS1r59e+TLlw8bNmxIsYULFy4Un6tHLKSXAQZDWrp0qbA/zGz5o0ePTqy7dOnSePLkSYptYfspoYaGhr7z/ahRozB48GBUqlRJ2E4eOHBAPHPt2jXRRoZ0Z0qtn+rt0ONw5biiGWSDQXR4qSy7U2xsrLjgJpNEQCKQOQSMjY1hYmKSuULeg9zpJfk1a9YU+w731rt374p9lljzn9TsvwcTxgC6SA9Pe/fuFTxNSVKTbwADk5EmMPgBCSx9pHbq1Am0nd62bRtIhs6cOaMVCSc5ZghyJmprGeFy6NCh4m+SapppkOT3799f3Np+8+YNVq9eLVyKMXIipcbUhIi0ylcED4WEUwvi7u6O5cuXo0mTJona4549e6JUqVLImzdvYvsUzEjkGzdujFmzZokgTST51DrTIwKFH0ZdJFlNrZ/q7cjIWOTWPMQzMjIyzbDZWdF/SfKzAmVZx/uAgCT52o1yRkj+1atXhRZ/5cqV4h5Tx44dUbVqVUH0ZZII6BuBsWPHwsLCQih7JcnXN9p6LJ8azUKFCgkCRveGzs7OorZTp04Jct6sWTP8/fffaWraKfEpiaHOeXHj4sWLiZ8tWrRIkPxDhw6hZcuW4vOpU6dixowZOHnypPA8kBrJT6v8lCDiIkniPm7cOND/a2rtU88fFRWFKlWqiI8mT54MCgY8cRgxYkSSalLqpx6HKkcXzfnAE5A6depkez8kyc/2IZANyCUISJKv3UBmhOTz1Jn7L+2j6Tv/zz//xIkTJ8B9h9p8qdHXDnv5VMYQuHTpEvr16ycETEnyM4ahQeTiUSAJGAdz/fr1SdpEokviT80+tfOZIeEpkXy6aKKJDBevs2fPZqr89JL8MmXKYM+ePUmyUdgxNzcXn1HwoMkOTZf47K1bt2BqaipJfgZmLbH7/PPPNZpQZaDITGWRJD9T8MnM8fEgaYsHYGJqCuMUgxbFIzoqGkY0sTAxSfEZ4TIxNhax8fHC5MUkB2poJcnX7nXICMmnkurTTz8Ff5LYf/nll0LZ9PLlS6G8cnR0FESf8yh5kgKAduMin0odAVo97N69W5wgMUlznRw4Y3ix4qOPPhIaa/VjGXaFGneGOA4MDBR+zXVB8hlJjSYxdM/EyKe0neeixcUrM+Wnl+SnZJN/48aNRPszCjbUnvj6+go7/VWrVr1ThdTkazfhOa40neKxsyEkSfINYRRybhvi42Kxc8MyXAksiNnDO8PcTCX8KxFLVZthBKZ/NxLFG3dEt08/hpmZShhQfyY+Pg5XD2/D5iuRWDzuK1hZWiQQNiMYG+eMaKeS5Gs3jzNK8nmPiYo2zhuS/GnTpom9mnsnTWuplKJp6tOnT8XexSCDVEbxRF4Sfe3GRj6lGQGaXBcpUkRYQ0iSn0NnCkMYV6hQQSwayUMZU5P/+PFjocnnwkK7+fPnz4tLuUwkwLxw27lz5yRuEVMz11GHidor2swPGTIk0+Wnl+Tz/gHJp3qi7b69vb34aMKECeJ7SrD37t0TR1bU6KsnSfK1m/TNmzfH8OHDhX2pISRJ8g1hFHJuG+JjY7BowgAc8imFQ2vHwcLcFCT+AX4+ePnqNezzOaNQPit0bvwxSrTphx5tPkLh4q7I72iPmMgwPH32HEbmtihc0Bn/bJ2PyYdDcWrLVEQE+eC1XzBcChVFPkd7mJgYw9CpviT52s3jjJB8XrxNTvJ5os6AgryQyzWVgQUZnZQKOmpc+c/Ozg6tWrWStvvaDY18KhUEGKWZHI0m25Lk59CpQpt8aqyDgoKExqBgwYKiJyTzdHnIRYZmLbSn5qVZamO//fZb8cy+ffvE91x4eFlVSamR/ClTpqB69eriQgcJNE1kmDJbfnpJfvI7A+r5iQMFHAovvDdAbzuffPKJMCuSJD99E513PXi5Ojg4+J3AGukrSXdPS5KvOyzfx5IEyR8/AAd9S+Hw2rGwMDdDTIgnunb4HG7BcQiLNMLmHb9hZre2uO0fDqN4oHTdNvh91RTMHdEbf5y5C2MzK0xdtAZ5Xv6NqUfC8eOQ2hgwYiosbaxhX7QKdv28BrY2lhpMgQwHdUnytRuL9JJ8OnDgPsTLj0pcEe6d9FvOPYmaVe5h1PDTdnr69OnCE8off/whFFVt2rSRJF+7oZFPpYJAeHi48OxEywdyNmmuk0Ony9atW8XlUhJuHglSc//LL78IN4PUEpDk+vj4oHz58uCg036fR4Ik5ly86HmmZMmSWpF89Yu36nBltvz0kvyUvOuw/926dQM1zxcuXBDedPgZF1q6AU3edqnJT3vCHz9+XGxKvHNhKEmSfEMZiZzZDnWSf2jtWFiamSLU7xWWLV+FoPBo/H1wL1oPnYebP01D6fYDMbxLQ3T9vBP6TF6CeaP7o3W3bwD3y3hiUQ0DPzTBtKPhmN6hCCYs+wODv+mGqFhLfN2nG2ytrQzebEeSfO3mcHpJPm3umUjiibFi5sW1q0+fPsKJRcWKFcXn169fFyfhVMZt3LhR7MWS5Gs3LvKptBGgOTeFyKZNm0qSnzZchvsEF4i5c+cKws5QxhxYaucVLzNsOd1HkvDyGJGpRo0awoRH8aWv9E5b7zrJ0chM+ekl+SnZ5NPzy3fffYeuXbuKpYsXLgAAIABJREFUfk2cOFEUSy00TXV40YnuM3kZl0mS/LTnM3HknY4FCxak/XAWPSFJfhYBnUurUcx1/vIqjn0rR8Lc1BSvr+1Cu1Eb8OOqRVg4YTDKdxiBu5vnoGLnERjauTG6f94GbQfNwqppQ9Gh1xAUdbJCnkJl4RJ4BVOPhGH/skG4fOYE/vprPy4+CsTpYweQ18HW4C/jSpKv3STPCMlPyaaeijc6q6BWlXfbqITjnTGa79CMlpd0qeWnuY60yddubORTqSNAzsfTIVpsSE2+nC0SAYlAEgRozsVTIvp4NpQkSb6hjETWtiMlLyQZaQHt75dNGojluy+jQsnCMLV2wIQBbTB49DyUKV8at67fwOcj5uHhr/PxLNoCLo7WCDByxq6tyzHtm66442sERxtzfNLtW5SJuI5pR8Mwu3NJTF66DZVLF8CNR/44dng3HO1sDNJcR508SpKv3QzSFcnnHI6OjkZAQICwx6ctPu+ORURECGcWvCSpBM3SrmXyKYlA6ggwUCotO2gOJkm+nC0SAYlAEgSKFy8uAqGVKFHCYJCRJN9ghiLLGkINKP/phujH4/G9m3jk5gl6LzQyMUf9+vXg6/EUbl4hsDGPg6ldAcT4vICFQ374vvFCxWo14eJkj/iYSFy/dgWRxraoXqUSgr2e4b5XDD6qUQ6vntzF41d+KFe5GgrndzBITazin12JvCpJvnZTWFckX6mN8/jFixdijvBOnUwSAX0h8OzZMxEzyc3NTZJ8fYEsy5UI5EQEGNW4bNmywsWbIaX3leQrdr2afpKxqlxuq/xuq3vfFl5eEvzBK5pchfBp+mkoY05yT/e4NGVIHi00NT/jGv2P0799gm9y1TMMTER4tPNZrpSrwlHBXPW7Cub0+9RR8mhqsybhRlObUxs7mjXy0qeiMaaXNJlSR0DXJF/iLRHISgRoqsw7ilKTn5Woy7okAgaOwOnTp4UrUsZiMKSU20m+CLKUoLWOpwab5D0uHmHhYfD28hJH/UGBQQgKCkRoaBiM4yHMQugFRgR2SmD3CtVMpPxGRqqyEugof4tFvDAbsHewR16nvHDKmxfOLs7Cf7cok8GgjI0FcU1OsLNqThCL/fv3g6dK9BShKSUl3xlvHcvJCFE39HaxXyEhIUKjxyBNJPdSk6/dPCHJp+MGBlhMbZyJ8f3790WwK13OIe1aKZ+SCKSMAD0t8s6mJPlyhkgEJAKJCNDTw5kzZ7Bp0yaDQiW3kXx1zTwJLW12Q0NCERoagpduL+D23A0v3NwQHR4B18JF4OyYD/kcHZHP0QkOeewSNNFGKp/sCf9LrktOJPrxb/X8QokdH4/g8FB4+frAw9sLHt6v8cLTA475nFCkaDGUq1AeLgVcxCV1K2trQQwVwp9VJIaYHDhwIDFKqDIZM0PqsytvZl6kzLRZqZcC4sWLF0XMC0nytR8Nrjl0wUyHFkzKWCjec9Q/o3mEJPnaYyuf1D8C9OjUsGFDSfL1D7WsQSKQcxD4/vvvhSci+nc2pJRbSH4iuY+LA/vEaJe8/+Dx0h1F8rqgmEtB5HNwhK21DawtrWBhbp4lw8B2hUdGIDQ8DCFhYfAJ9Mfz1x7wCQ5AhUoVUa9efdjZ5YGJqWmitlKfhD8lks9os7GxcaL+9JqbZFfezAxeZtqsXq8k+RkbBc7BK1euwNraWhTAvz09PYXntnLlyiXR2tO9sxI/JmO1yVwSAd0iMGPGDKE8kpp83eIqS5MI5GgEevfuDUYRZjRlQ0q5geQrGvtX7u54+OAB3rx6DaPoGFQtWxHFChaCsZFxIuT6JNBpjau6LTiJ/yO3Z/jv+RNY2dvBpWghlC9fQZj3KFrhtMrLyPcpkfzY6ChsWfcD2nfrh7yOqijX2ibm3fDjanTvMwA2NirSpm1i3h9/XIdeffqmO6+2daT0XFREGFau+wUD+/XMVL2S5Gd8FNTfhaioKKxevRo7duwAY4nQ5E39rkvGa8lZOYkJ12OaMzH2jmLap82axfeaeZXTkPTkzQ6U2F72U/0kM7tMGNPb/y1btuDkyZOS5KcXOPm8lggEBKgedHDQMoN8zBAQYJRgBm1hcDFDSjmV5Cu29tFRUfDy8saWzZvhYuuAdh83h5WlKjqqcgnWkPBWb0tiUJ+4OAQEB+GPowdg6WiHrl9+CTt7O3Hyo+uNLyWSHxMViQnDv8bI6UtR0DlvuuBi3tHDBmHGwhWwy2Ob7rxDhw7D/EWL0503XRUlezgiNBhfj5iMVUtmZ6peSfIzMwqqvHwHGDukfv36ePLkCbZt24bPPvss3SdKmW9J9pZAHBgRffHixbh16xaqV68uTjjGjx8vsEjtbguf+++//7Bo0SLxXLVq1dCoUSNUrVpV5+uHLlBSzLW2b98ugovyFIf9LFq0aJI1Wxcmdbpob/Iy/v77bxEQVGry9YGuLBOYNk2FgvJTYpIjEOCivXnzZrHwGlLKiSSfbY6NiYXb8+e4cPYcnMytUbFkaTjmSdBC0+zEmPbuQFx8HOJi496FPME0RfhxUS7nJnhzMUm4HKueSWjJoLqMq2virZAdbtYeb7zwn9tThBnFoWmL5siXN28SU570zB1FiFAuHivtPnjwYBKbfEHyh/XHd9OXopBLPlFFXBw1irHisrCZqanGapl31NCBmLloZSJhjo2JQWxcLGBkDNOEC6kpFcC83347FAsWL3mbNzZGmA5x8ExNqM1Mv3edtDCKCA1C/+GTsXrpnCRtZp2KBjStMvi9JPnaoJT6M3yvSE6XLFkigloxiBU1pdTmv0+JONAM5NKlSxgyZIggkQxSyWjzTPT9z8CcyU/5+I5zHrZr1w41a9ZE6dKlRfBKnozwQjjnM4UHcfk/hXUtqzFme+nulG1jsE16+uJ4kzi7urrCzs5OzAMmmlw6OTkJZYc2pxlZ1RcKYTyZlyQ/qxB/n+qhFl/xsf7smdTm56CxpzeTs2fPCm2FIaWcRvLZ3vDwcJw5dQqP/72HTs1bw9rSElFRkfDwfAWfoGAYm5jBxbkAnB3tEeDrjeeerxEPI6EZU7l3NIaJiRnKlC4DGzMTBAf64aW3NyKjY2FjY4dihQvDxtJCLcJqPLxeu+OFty+KFysJZwde0NVfYjt9Avyw/dB+/K9VC9SoVfMdrb7i6554KJui+qVj/s7jcNo5BwUF4eXLl8ifPz/Kly8PbUh+uN8rrFi3FX0GDYazvWYvPCmRfM+7Z7Bl3xlE25XCuIGdYG6aslvJlEi+1/3T2LzvHExdPsCwnq1gpiFvZtBPieTfObgaRhU7oXyx/DDRUrCQJD8zo6DKS5fC9LRD8kqzHZI8uiUtUKBA5gvPQSWEhYWJSPLr169Hy5YtQS03BR/eK5o8ebJw2cj4KnPmzBERV5XE9/zevXsYNmyYuMxMMs8TY0YAbtOmjVBgzJs3D9988414//WhoEgPzFy3KMBQe8+ffn5+QpvPNWnlypVCwKldu7Yg9fS+9NNPP6FkyZLZ3m71PnIt5aVxSfLTM/LyWe0QoPZ++nTVs1OnSm2+dqgZxFPUSNBThPoCbQgNy0kknxtWUGAg1q/7CQ0qVEWlUmXFZhARFoif1szGT4fPIiA0DEZGJnApVAILpi+E++U/MGndVqEdjoyKgomJKUxNzWBumQc//rABReLeYMj0GXDzfoPomDiYW1qjWYsuWPrdIFhamKsc7CAeO7ctxZi127Fo9ip0alQ7ydCJk4D4OJWrHWqDE0yFVKSbQadUnwv70wT/8QohV/88+XyIiY3FxX+v43VkMLp2/RIWlhaiDG6U1M7RLpQXwKgVU04jOJ5Pnz7FtWvXhMaMwYEYn4GXF3mJkQKBJpI/YtoSYa7DOmKjI3H67Dl8UPND5LfXLNQoJJ/mOnlsbYRLohB/b8QiFnOX7cCsqd/CXMNJgELyaa6j5I0I8cepY3/hXkBeDO3ZAuZmmk8R1O26k2OXmuZPIfk011HVC6hI/heoUDw/TE3e3uFI7R2VJD/zK5gyb2/evCk0tyS4hm5ml/lev1sCBXFq4X/77TcRbIlmS0uXLsX06dOFu0Z6xBo9erQg7p9//nkSkn/79m2MHTsWe/bsEcLSyJEj0bRpU2G2Q0cPjIlx5MiRxAjA+mi/tmVy7WKbqIDgyYWPj4+IM3H48GGMGzcOdFBBAYXCCIUarlVUTGS3cKLeP5qXsW2S5Gs76vI57RBQtPjqNvlSm68ddgbwFI+fqVXl0aMhpZxC8kkGeGS9e8dOVCroilJFiqmOcOPjcOnsX/jq+5mo3bA9xvbqihDvp5g0bwYCbMpj19K5MI2NRFRUML76ugveONXHwUUTYWlqCjs7G3w/sT/+vO2NWRNnooZrfuzdsRprj17DomXb8VnVYglaXc0kn2YtQf7eOH35Il75BqJQkVL4uHZt2FqYIjIiBBeuXMBTT29Y2zqibs06KO6SF3ExkTh/6SweuXvCOo8TPqxeG6ULO6v88idL9MV/5c4tRFqbonmLT8Rm9/z5c/zxxx8i5gI3SNowc7Pk8T41gs7OzkIryFMjXuBTv8SoySZ/7OBe6DtyOvLnZXRZY9g7OODKpXMoV6VGmiR/xKB+GPX9LFhbWcHI2AQ2VmY4duggqjRug2L5bNVORJJ2jiR/0MBvMHHKtMS84b4eiLe0wfp1v2HitLGwNk/5fYmLjcGTZ24oWqQwfD1fIM46Pwo4mMPtlQ+KFS2cqnBAkt9n8BjMmTYB1tZssyncz/2SQPKdRV5tzIQkydfNSsY5mZzk66bknFMK1+FOnToJrTW17iT0FNI7d+4s3l/+TZMWau1HjRqV+E5zXfTw8ECHDh0wceJEcRLSs2dPrFq1Ci1atICvr68gz/Pnz0fhwoXTTZbVTwfVg8xxHVKdjKbPfJHlcd2aNGkS1q1bh127dom2sW8UVEjyKeSwfJrv8JTH0Eg+FSuMMSJJfs55v3JGS9W1+EqLpTY/Z4wdIDQsXMgNLeUUks92nvrnFALdPNC0TgPVZiVs6WPw44rxWLD7NH5YsR2tqpYU2vOL547g+lNPdOzQHQXszBEVGYiW7ZvAK///cG3zAlgaGyPC7y5qte8B53pf4ci8ETAzMUZ04Ev8uOcIilX6CO1rV0jYTDWR/HgEB/lj1MTBOHrzMawszBAeEYGqjbpj/fgB+OfPtRi1ajsKurggMMAftoWqYP/aZTjx82R8//sZOOXLj8jgQOQrWhkH1v4gCG1KGuiY2Bj8vH8XmrZrjbDwcHE5j5o/bpjc1GnbWqVKFXFUr2hBNWlDUyL5JMzj+rTF4RvuMDY2gZmNAzb/sh1hPs9R9oPqcLC20jhtmXd495b45+5rcUpi6VAA43s0xr4rr1GiiAu+HTkOTjYpm+sw74AvmuPSozcir03+EtgwZwi27DyMUvU7ok+7OhoFBI77iNGTsGjedKyfORK29fugWwNHTFj6J+ZOGgbTVMx86F2ne4dP8OBVoLhz4OBaA8v6VsOmf16hQqUa6Ne7C8xN074LIEm+blYzSfJVbkRJ1nkBlSZMfJf5d6tWrYSZzYABA8CAinQ9StKuvk5wbeQJwM6dO8U+w7WBawJJPteIgQMHYurUqRki+WzX+fPnhbKgVq1aQrPOtSZfvnxC+06HElyD0pNolkViz/sGNMuiwuLUqVOC5NPUiF7o2G4qKngCkdytanrq0tezIsZJfGpnifqqWZabOxFQtPi0xXNzU/WxeHEgMBCQ2vwcMebUzvz4448G19acQPK5lNKLzvq169Dl45ZvfdwLl3PRmDP9a2w6dR9bNx1A/VL5BfmPjVNdtiWJ499RUYFo1e5jvHZuimubFsDSxAiBD0+hSq8R+LjrJGwc2knYfzMqLvMmRqcVpaRM8uPjYvH4xiF8+t0UfNp9LMZ0bIIf187BlhPXsXrBGhz/dRp+ueqPHWvXwtT3Ea489cQnzVphyrCuuG9UBBvnzESQ+13cfhmAnl2/hKP5W627+kRh/30DA7Dhrz8QGx8ntPa0ZeXn3Hxpy8pNV5sj7ZT95FNYikVcXEJ43wShlKY3qjsMmgmvytwiaV71k4PULvullFfpg0pIYXTglF+ZRHOnhJhlbPrbZ6ldTF+b32KnyqvNRT9J8hPCPYvb60rwuITxSv53KiufJPkqkk97dDpooPaemm4bGxtUqFAB/5z6B2vXrMWKFSuEyU337t2ToPn69WsMHjxYXLhlnm7duglN/gcffCDKzQzJ5/6wfPlyUW7fvn2FKRDLp2kRhQ3WWbBgQa33Nb63FFQYHJJmSNTg8z7B3bt3xd0D9p8nEY8fPxanE9T6u7i4aPU+at0IHTzI/VySfB0AKYtIQGDzZtUvvXu/3clo6Kv+uQTLoBEgaTBEuT+nkPzwsDDs/HkbOjRq/la7m6DJXzRnMNYcvYmN6/aicbmCQpMfEOCHiJg45HXKD3MToxRJfqjbRXzw5SDU+HQkto/tITT5MVFheO0XCGtbe+TNY/OuJn/WKnRs9KGK+sfF4u8d8/DND7uxcs0BtP2gAI7s24CBi3/CjGmLUcLIExNW/Aif4HAUL1Eevbv2QdsGtfDvqe2Y+NNv8PIPRPGSH+CrLj3xRZMGon5N5DIqJhp7zh1Hz759xDO0aX3z5o0g/LS313Yj1BTx1qBfHgNt3PtI8jl/osKjER4SIbxWvRULVYOkiFbK5yn9/Y48EB+PBw8ewNfXD/Ub1KdoKaSGt2W8Fdj4qeqv5M+8+xkFNwsrM1jZWgqhPRVZNdtnGPcGem2ZOXOmeKepLafXIQrz/fr1Q8WKFcX3NHGhPbh64lpAbzp//fWXMAdt1qyZEBh4GsD1fcKECUKzT625NsJrcjCS71v8m9gqn6e3TLaXJkf0rMN1ixp8egYisWc7SfR5j4CnBMOHDzdId6pCCSE1+dn+3uTOBigrlbjNJ1NOQUCS/IyPFDcTetT5ffMv+LwxSb6a+Ud8HPb/sRLDl23B6HGL0bdFPcTFRGPmjCE48zQEm9f8ilJO5u+QfCtTY0RH+qNtx1Z4YVoOx7esgZOlCZ5c2Y/eCzegY98ZGCvMRVRkghdvR6/5FQtmrsTnH9VK7My/Jzah04y1GD99A3o3qoid25Zjypa9WDp7CYpZmyA43gx5TKKw9ZeV2H3lMVYt/QX5jAIQZ2QGc6MobNiwBAdve+PvvftRMo+1Rg00Sf7e8yfR++t+WmnsNaGtkHwevTs6OmZ8UN7znJyTvIB38eJFcfFZcW2Y3ojBOQVG9jc2Jg6Pb7rh9TMf5CvoCBMzvofJT0wUCp8SzVdU/m8JOfOTuJP4cW4Kk7MkokLyPOqIaRIxVM9QCAnyDUYc4lCzWSVYWKnM2Qw1KfeOuNbRBEaZSxQmSfZpB67pxI7YhYSEiLWBkYSVkymWybx0+MBTP0NIbJPi+YttVdx78nNvb29hbsTPeIdAmMUY4JhJkm8IMym3tkGS/Bw5spLkZ27YeNlp9cof8EWjT5DHWtGwq8r0cn+I/qOG4EmQEf5XpzaCfdxw5tYDfNp1BBYO6gpubcnNdazNTIVpzs7tyzFu9c+o9MGHKOXiiHMXz8DYoQS2r12Pko5WSbzrjFi+CfXqfowyhV1EvfkKl0X3prXQf/g3cI+yQZsGtXDy1FEY5S2HLfNm4scFI7D/cQQGdmyLl3fP4M+br/DTklXYuHQ0HoTaokeb5rh18QjOPg/F37/vRGFbVRCv5Ikb+EsvT9z0eIruPXpkCkiWtW/fPnE5lxusppRRLV3y8liOLjdpQ2oXLzpT65rbSb4wjYqLx51zjxAdGYt8hRx1OqaZmtBpZBYKgpBI+Hr6oVqT8rCxT19UZqV4xWUt7clJPBWhThsTOX32T5adPQhIkp89uL8ftUqSnyPHWZL8zA0bN1m6irt+6hw6NGmRJEATbcIjQv2wdPUS/HXhGmzsndGtc290/+TjBPeNtN0PRY9en8PdoT5OrJwCc9rqA4iOjsTVM/uwaNsuvPIPRuWqDTBmwCCUdnFI4if/8J+bMenHrQlH1CoiXqhMbexaMA1+no8wc+UyXH30EtVqfoyZ332HvNamCAl4g8WrF+Ofm3fhmL84Rg4ehQYVSyAuMggLVizA4cvXkbdgaQzuOwTNa1RU2YIng0lo96IisXHvTvQa2D/T/sNZ3qNHj4SXDl2S78yNbs7NTbMKegFRvIzkRk0+CX6gTzAeXnVDAdf8OXKwoiKi4OcVgA9bfpCheU+XtTSJobkM3Xy6ubkl2sjnSEASGp1eE1K5ZqiAkyQ/J896Q2+7JPmGPkIptk+S/MwNmzAXiI3F4YMHYR0JVC9fSURTVXfrJrRt9FePhIi3aqRZyW9kTF/16rbvvDjKfyr7YpanKeJtoi/8hK68vZyruuhLMkTNnnLZNPFiqbgUqqqXRF71Oduq0nIrn6ekBaeZzvHL51GsUlnUrlMnU6Y6Svnql1YzNyoyt7oXI457biT5NHs5u/caCpUoAHNLw3IBrO0M5Jz38fBDpfqlYWmjiqiqbeK7eubMGeGzfsSIEeJ3un6ke0d6fsmpiV586GKT/VNPyfcq5W+6gVZMaHJqn3XVbknydYWkLOddBCTJz5GzQpJ83Qwb7VV/3rIFBa0dUL9qDZibqtxO5iYNkyIEhEVG4Nilc7AtmA9t27Y1uBgLuhnR3FNKbib5B9efQpWGFXLsYPGdCngThFLVi8LWzvrdqwSp9IzKBV4O5YXXQYMGiQvvDRo0EAGq6OZR/RJqRs13aKPOOx7aaNZtbW1FUDtlzaMpo+JSN7UB4vPqtvmMprt///53XDtr2qsYd4NCjmLbz/ay7rQS7euFX/kE7qLcZ2Gf00o0KbRi/A0Ds8uXJD+tkZPfZxwBSfIzjl025pQkX3fg0y722tVrOPLnX+jWqgNcnPImargNbTNIT6+Fhh08sYjDU/cX2HXikPCmw4i1uVFDnB5scsKzuZnkH1h/ClVzOMmnyVHJqkVgS7v8dNy/paabXl9I6Ok6kRGl6b+ed1voQ54uH3k5lp+T/GdkDWK0XwoNxYsXT0KGlXmv7B+8RMt2VK1aVTzHNYP+9C9fviwu66rvM8n3HGrumzdvnhh1/dixY9i9e7cg+aldcFVOQdk2+rGnBx9i8vvvv4u7PerkXalTXQBhm7t06ZK4RjMvfeMzAnfy59XfcwY/pPcdetvJqPCkr3VDknx9ISvLTepCU+KRYxCQJF+HQxUPYZbj7+ePv48eRUxwGMoXLYEShYvC3CzlgFI6rF3nRSnmM+GREXjw/Cmev/GEvUs+tGjZMonGTucVywJ1ioAk+TqFU6eFCe1xBkk+8544cUIElKJd/pIlS0AtOE12evXqJQI5PXv2TJjv0L99Rggpfd3TV3zdunU1CglsB6NdU3uuTvKZl9HUaUqTmoBBN6WMqEttPpNC8nkywBMKdaKvXJgnIWe/GJG4WLFiSUg+A2M1btw4VQUE7zKcPHkS7du3TzRjVMyf6DYzNazYJ56a0H1oRjDV6QRKVpjOSX5kTCzuefrj0ZtAPPUJwnPfYHgEhuNNSDgCwiIRFhWDiJgYxMTGCztP2pTS57K1uQnyWJjDwcYCzrZWKOJgA9e8tiiZ1w5lXRxQ1lk12DLlIASkJj8HDdbbpkqSr/thU8xaQkNCsHfvXrg/dUPrBh/DJW8+WJiZqzxgGKgpj9L2mNhYRERH4am7G05cuYAadWqjWfNmIuCXsUnKkWJ1j6QsURcISJKvCxT1U0ZmST7NYRhVlmSeJHnDhg1CC87AUCT8CslnQKeMENLsJPl29naYMH5CIslX36tIyCnM0FWsJPlJ9/NM+cn3C43EqcceOPPkNS4988Z9Tz+UdHZAQcc8yJfHFnnzWMPRxhL21pawsTSHlZkpzM1MYJKwofEiWUxsHCJiYxAWEY2QiCgEhkfAPyQcfiHh8A4Mhod/CHxDwlGlSF7UdXXGRyULoFGZgrBIJRy4fl4/WWq6EJAkP11wGcrDkuTrbyQUwhzg7y+OzF8+c4OPpxeKuxRCpVJlYG2psulUCH9W2vCrTHAYdDc+yb/AkGD89/wxXnp7oUCxwihesqSw+bXNY5vr7hjob+QNq2RJ8g1rPNRbkxmSz3JI6GliwujSJPj8nRruhg0b4siRIyKK6/Xr1zFjxowcS/Jpa5/8JEAxrZEkP+nczpAm3yc0AntuPMPe2264+twbVYs7o3SB/ChdwBElnZ1SDdOd0VcrPCoaT7398fi1Lx57+ODuKx98XK4wOlRxRfsqxZHH0jyjRct8+kJAknx9IavXciXJ1yu8iYUrhJ/2nLf//Rc3rt+AuZExCjjkRX57J+SxsYGtlTWsrawSg2qpXqlk7iupMNGiyapwPPHiP9Vvqt8FsUc8omNiEBoWhpDwMASFBMM3OBBeAX6AmQmqVq8mQs/zEl1GtH9aNE8+koUISJL/LthpxTVI63tdDV9mST4DTZHg02yldOnS6Nu3r7CfnzJligjgFBYehrp16mLo0KEZssk3BE2+JPnaz7Z0kfwzjz2x8cID/HXHDfXLFEH1koVRvXjGwg9r38SUn4yIjsHN55649cwTl56+wufVS6JP3XKoV0IV/EUmA0BAknwDGIT0N0GS/PRjposc1ETRLjQsLAxvvL3x8MFDPHn8GC/c3FAwrzMKOxdAofzOKOTsgrwOTuDrpdB7hfwnaYc68xfEnhp6FaGPio6Ct68vPN54wd3LEy9feyI0KgKuJUqgVKlSqFCxIhwcHUQQKhHZ08A8RugC7/e5DEny3x19xftKSgSSxJvfK8Gl9Dl3MkvyxZuecBqnnAQqf3ONUT7LqLAuSf67o5/jbfIP33uJFf/cgUdQOBqWL46GFVxhaWYYYYcJd0hkFM7ef4GzD56jqIMNhjaqiJaViunzPZRla4OAJPnaoGRwz0iSn71Dou6aTtH200e0v58f/AMCEOAfgODgIOHrnpH1UEBDAAAgAElEQVRw42PjVO7sVAwekZERMDczh6mZKUxNTGFCs8YEv/cwNoLwzs97UNbWcLC3h6OTE/I6OcHO3v4d8xtJ7rN3LuirdknykyLL9+fQ3wdx49Y1TBiV1FZdIcjLVi9Ci6atUKnCB/oaFlGuLki+PhsoSX4uIvn/vvLFrMM38NgnCC2rlUXdMkX1OXd0UvbFRy9x8MYDVHJxwJTWNVHOxUEn5cpCMoDA9OmqTFOnZiCzzJJdCEiSn13Ia65XIf7JfyawAuHJSiEjp0+dQuXKleGU4PZNaPvVtPoqi5+3HyhEXhJ6wxt3fbVIkvx3Sf6+A3tx+dpFzJg0G5FRkfD39xPvlJNTXlhaWCI4JBgW5hbiXaIgzVM3nojldcoHK0sr0F0uhe+IqAjY2znAxtoa4RER4gQgLCwUTo5OsLCwTPNUTJ3kW9tZCreRPN2jD3aeqmV3kiQ/l5D8ZSdvY/bh6+hctzJaVC2d3fMq3fX/deMhdl+8i7kd6uDrBoYXGOPFfx64e+ExXj97g4iwqHT3T2bIXgQsrc1RoER+VKpXGsXKF8rexui4dknydQxoFhVHcsCgLYsXL4aNjY1wNacEg8miJshqcggCkuSnTPIvXT2PUUPHYt6S2fDz9xP3T2xtbDF1wgws+WEhPm3ZHh6vPbBu0xoUKVwMPr7e+KBiFUwaOxXbd27DH3t/R+FCRRAYFIhpE2biyo0r2L5zq/BANW/aQpQpXU4rku/vHQjXDwoiNDIEGzduxIsXL7Bw4UIRqCm7E+366UKzXr16abrQpCBUpUqVRB/z9LHPOwNFihRJFYf//vtPuNBU+nv8+HHs2btHeAuidx1pk6/9LHjHJp8uML/59TReBIajW8NqcLaz0b40A3vS3S8Iv5y+iVpFHLGy80cG07pTOy/j1WMvuFYogvxFnGBhnb7Q1QbTkfe4IZFhkXjj7ofn991RuLQLGn9RO9egIUl+zhxK2tsymMvgwYMFOVmxYoUIACOTRCA5ApLkqxBRN42jJv/i5XNo0/JTrFi7DNs37kBMTCxGThyGnl17Y+9fu/BZ247w8PLA8tVL8Pe+f4Qmv0f/Lti2YQc69WiHTWu3Cbfg85fOQZlSZWFpaSVOB5bN/0GYzmlzWsY2vfHwxQOP2xgxejgCAwNEEKshQ4ZolV/fs12JHlu/fv1UST5ddT569CgxaJY4oQgMRIECBYSLy9SwuHPnjjjBsLS0FN35999/hWtMewdJ8tM7vklIPn3Yf7H+b9jZ2uDLj6qmtyyDfX7DyauwMTHCr73/l+1tJMEP9AtFtRwckS/bQTSwBtw8cx/2Tja5huhLkm9gE0zL5lCLv2PHDmzbtk1ouhjePrWNWMti5WO5EAFJ8iEioT5/8Uz8LFmiFPb9tQcXL59H3dr1cef+bUwaPUWcjFGDX7Z0OVy6ekGQfE9vT5w5dwqLZi8TZj39hvTCD4vWolm7xqhTsw6MjI3hH+CPRvUbw8bWFkGBgRgyYJjWBJ1k2Mv9Ddz8HmHLtk0iQBPdXw4fPjwxSFNyTz/qf/P3jKzhKeVRj/Kq1EFNPDXsqUXM5bMMhkV3nQULFkx8g0JDQ1GhQoU0ST5JPU8MSPJZ1uPHj0HiL0l++hejJCS/y8ZjMLe0whd1K6e/JAPPsenkNRSwNsOaLxtmW0tponN2zzV81P7DbGuDrFg/CJzddwUffVYzV5juZGSD0A+qSUulZocbskzvIsCNkJsviT03V44hw8LTTZ402ZEzRmry350DcXGxWLRiAXz9fDFl/HT8uuMXvPbyRKMGH+Pn7ZuxfP4PiI2Lw8gJwzG4/7fYsWd7Isk/f/Es5s9YjKgEkr9u5SZ06/sFNqz6WdjNUwhwcS6A+w/vCTv9Qf2/TRfJD3gThBJVCsMqj4WIYLt161bhFtPOzi7VyZx87dblWq6URZObe/fuaRXxlvcUGPFWSZ6enuky11H6K8x19uyBEgxLmutov6YlkvxFx27hxGMvDPykjva5c9iT8/eeQv965dCnXrlsafmhTadh75gHRcq+lWyzpSGyUp0j4P7QE4H+wWjVp5HOy87qAnW5Meiy7ZLka0aTJJ8b6IIFC+Dj4yP82Ts6OmLu3LkwMzPT5TDIsnIBAlKTr9LkX7l2CWO/H4VKFSvD/dVLTBw9BSVdS2H4uCGws80jLqibGJtg/oxFmL9sLtq37gBPL09cuHwOc6YuQHRMNAYM7YMfl2/EirVLcefev7C1zYOnz59g2bwfcP3WdQQFBWBAn0HpIvmBPsEoWbUIbOythCabGnC+0/LirSrirST52i9CguR7BobGV5q5A/O7f5KjbfDT6vZTLz+sOHQR977vDEuzrA/DvmnKLnzUrpa0wU9roHLg97TRP7v/KvrM6JgDW5+0yZLk58whpBDEi27U/FWsWFEck8vgVTlzLPXdaknyVQhTm0/Tmjv37qBG1ZqwtbUVn8fExuDu/TuwsrRG6ZKlhZlMXHxckjB04rOEk0Wumfz9lccreL5+hZo1PhTCgWLikh6f9NKFJvDgwQNx8ZYXbZmOHTsmIvlKTX76VwZB8hf9fTP+qkdgrrLD1wTFT8evoH3FwuifDR531ozejjZ9m6R/lGSOHIHAgY0nMWjRlzmirak1UpL8nD2ER48eFdFpecFNm4t+Obu3svUZQUCS/IygljV5JMmXJF+XM02Q/GbL/4z/X7XyqFQkvy7LNsiyrj3zxM3Hbtj3TYssb58k+VkOeZZWKEm+fuGW5jra4StJvnY4vc9PSZJvuKMvSb4k+bqcnYLkF5m4NX5B909gY5n9gRZ02bmUygoIi8C0HSfwdEbWa1wlydf36GZv+ZLk6xd/SfK1w1eSfO1wep+fkiTfcEdfknxJ8nU5OwXJdxi1MX7L4M91Wa5Bl3Xm9iMsbF8ry9soSX6WQ56lFUqSr1+4JcnXDl/ar5YrVy7NgDPalSafyo0ISJJvuKMqSX4qJN/ODuPHj4eJick7poi8E7Fr1y5cunRJuOgcM2aMcDrAzw8ePIjGjRuLfJoS7zPRXWn79u0TXZUy75kzZ1CzZk3xmaYUHByMN2/eiLtQ6bl/kRWzMJHkr+7b9r3Q5IdGRGHwxr/gv6hPVuCbpA5J8rMc8iytUJJ8/cItSb52+JLkly1bFkWLFpU2+dpB9t49JUm+4Q55TiH5derUSXV9oSvf6Oho4UJTuef1+vVrBAUFoXDhwqnm5cXbUqVKvXPxlqSdsT9SItLEjZFyX716JQJwpUTyUyPgdPeZGslP7X4T3RfTr3+lSpUMl+SPbdfwvbDJv+v+Bgv2n5Ek33DXuBzbMkny9Tt0+ib5LF9oPRL+aepNTHS0cK1nbGwCY2Mj/XY6A6VLkp8B0N6zLJLkG+6AGzrJp8b61KlTcHV1TRVEX19fQeZJfNV97N+9ezeRvCsFJHf2wLz16tVLjAugeNdR1mj1ipPnJX7JSf6ff/4p2qIpsQwGPvPw8EjU5PNZlnXo0CEROTy1GAQ8BWCAsMqVKxsuyW9auSS+alTNcGe+jlr28+mbOH7nqST5OsJTFvMWAUny9TsbtCX5XJgTw9WTjBsZib/Vo0IaGXEBfxvWnn/7vfHAIzd/1KxeEaYmxkmeVxZ8/nx85yK+HbcY67dvR1F7M9VzFAwSup88+iQ/Tktw0CVykuTrEs3cWVZuJvkHN5zCBw3K59hTLK4fAW8CUbp6MdjYWSNxYTGQqUgy7OfnJ9Y9Tdpt5TsGs2LUWiVRs+/v759qXmW9dHJySjSv+eeff7Bv374kwRBpShMRESGeoYZfXUvPU8zvvvsuMX9AQACoqU8r0eUw26z0i/1ge9nn1BKfI8m3srIyuHmXaK7DX3j51tnOJi0ccuz33kGhGLvtqJhg0lwnxw6jwTZcknz9Do02JD8+Lg5+r91w6+EL2JoZwcgmH6pXLovY2Bi4P32IJ+4+qFKjJvLZ2yAixB83b94GrBxRrVJx/Lp2PmZvvohft6xCrcql8d/tG/ALi0eNalUQEeSDa7efIZ+TJYxNLfD773vw3aSJcDAOx5Ur12GTtxAqlSuFAG933Lr/Eg72ZnAtVw2hno/w1NMfZStVQzEXhyzZACTJ1+88zA2l51qSHxeHSwf/RV4XR1jaWOTIoSI/8Xb3RZVGZWFhlfudoWgzSIqSRlGg8OezZ8/QsGFDtGzZEqtXrxaBwhRyrs2JrDb15oZnEkl+xzoV8czLH8Nb18sN/UqxD8sPXkAJF0fsunRPkvxcO8rZ1zFJ8vWLvTYkPy42BtePbMbng2fBtbgrPD08MWbZLygbfh1Dvl+O/C75EGVRENs3LsGckV/jX68YmIS9QZeBI3B2x0ace+SDISPGoKq1G8Yt3wNLcxMUrdkWfZoUR79h01CyjCv6DZ2AZVMnYfP+P7B0cEc8jcyDiOAg9Bq3AKViHuLrsQtQopQrBo4Yg/lTJqJMaVe8DojG73/shKsztUT6xUmSfP3imxtKz60kn+QvPCQCt/55iEIlVSYWOSmJ9gdHIDwsHNWaVNT7WpGTsFFvKzXrnTt3xv79+2FtbS0CANaoUcPgTGUMAd8k3nWWHboAF3tbfFn/A0Nom07bsP38bXgFhmBEq3rotXq3JPk6RdcwC5u3ZDYePP4Pm1b/kiUNlCRfvzBrRfJjonHl0Eb0n/0b9u/ajq2LxuHAAxO4mrghzwefYv7IL9Hni9Zo0G8e/v19Ll7F50OLhh+i9WdfIOT5CfSe8idO75qPVvUawqFSPbjms8Tpc9cwcNhwLJy7DEfPn0HEy9vo0380Js0ai7GjJuDg6at4cfE3jFlyDN8P+R8mzNiIQyePIuT5JXw1cBJatmuPKjXro/OnzZDH2kLvxEOSfP3Ow9xQem4l+Ryb+Lh4PLj6DMF+oXAq4AhTUxOVyUs87eY0/FQfVG2fVX8utUmRWr1KGYy+Gx+PEP9QBAeE4MOWH8DM3DQ3TDW99OHy5cuYNWsWuCcUKlRI2MIPGTIEpqYSs+SAJyH5kdGxWHbwPAo52aFnw6p6GZzsKPSXM7fg4ReEEa3rw8LMRL8kPyAAcHBIsZv68K7zxscbtRq/HStTE1Pkz5cfTRo1xaTRU2Brmyc7IDeIOiXJz9gw5OSIt3EJJL/vzG3Yv2sHfl4wGifd7VHC6DFMSjXFwjE90PuLtmg6bCVKxz/DM09/HN+1EaVbDsIXH1riq/E7cXz3YnzWuDFKNumMT5vUxBu/cBS0N8LISStw/eFteN44ju69R2HK3IkY/d1Y7D56Fq8u/IpJP13G9wMaYfK8X3Hx5gWEvbiFQ/9chr/7A/y0/RD+OHgM1csU0jvJp1arTJky0oVmxqb/e5ErN5N8knmSP5L8K0fvIDoiWkX0U0gK/xbCQTIZIKXnScRpEkiNu4mp6dt7OMkeVuPuWpnUx8XFIyoyGpXql0bxCoVgKgl+qu8hvdlwDH777Tf06NFD2Obb29tLTX4KqL3jJz86NhY/Hb+G0Mgo9GpcPUfb6NMGf8upG7CxMMfXTWvCLMFHql41+SNGAI6OwPDh75B9fZL8zWu2ony5ioiNicHjp48wfupoNGvyCWZ9Py9XblqKjV5qLrEkyc/Y0Oc0kp94yZbasNhoXDm4AZ/2+17YaMYZW2PNH0fgGnIZHXuPRGS8EVxrtMDeTfMwcXAvnL3/GjHBfvh6ygq0reaELzp/hcpNumB0uxLoNHA6YoxMUaBiU0z5ugm+Hb0Ul+/dgPftU+jZZzS2Ht6PVYPb488br2Bkao1Za7bCOeAGvpu9FWevnIHXv0fxRc8hsHV0REC4CQ4dOYySBR202vQzNnKqXLRV5aU1bnoyqRDIaWYb+h63XE3yE8AT64LqP61TauSc5dBM5Mjhw7h85TKmTpkKYxP6TtdsEpQW2X9HsBCevbRu7nv9IC/ebt68GT179hQXbA3NP72hDI7GYFh/3XiI3ZfuoUu9ymhRtbShtFfrdhy59Ri/X7iDz+tURNvqZZPk0yvJf/4cKFFCRfBJ+NXIvj5J/p87DqNKpbca/Wlzv8f9B/fw++Zdou+vPNwxaeZ4XLpyAXny2AlN//djpiZq+nfv/wM/rFsOd4+XyJc3P77u9Q369Ogv8s5eNAP+/n7ilvw/Z0+KW+ozJ82Bp5cHtv7+M/z8/fB1728wqN+34vkLl89h1sLpePL0sbCXa/1JW0wdP1PcgKerqZkLpuKvw/vFTfmqlath+qQ5KOlaUuS9decmZsybgnv/3RU31T/5X0vMmDRHELa/Tx7BrAXT8WWn7liyaiH2/noAFctXwur1K7F520YEBwehds06mDN1AQoXKgKS/KfPH6NCuUrYvG2DKL/bFz0x7ruJWs+j9DwozXXSg1b6n03JXIdziP8Uoq/Y5A+YuwObfliEPI754Fq0oNC++Xl7wtMnAMVLlIKdtQWiI8Pw7LkbjMxtUMK1OIziouH+4jlgngdFCznDy+MF/IIjUNy1BOIjQ/DspRcqVq6E6LAgPHvujpLlysEsPgpPnjyFmY0DihUugNAAX7zw9EWFihVgjFj4vH4Fb79gFCjqKi77ypT1CHCTIwGQJOAt9u8DydfHTAsPD0f37t1x5MgRXLlyBRUqVJACpD6A1qJMSfK1AEnx7KYp4q2bTwD2Xr4P7+BQfFqjHOqWKapdqdn41MVHL/Hn9QdwzmODDrUroHi+d01nBMnP80J/rdy7F7h5U1W+GtlfM+sQ2vRtotN6FXMddZL/0v0FvvrmS/To0hv9vvpa1Ne+a2vUqFYLY4aPF0db343/VpD5xXOW4+nzp/hf24/w4/KNaNLwf7h+6xq69++C3dv2CxJOsvzL71uwcdXPqFOrLhYun4eft29Cv68GYMTgUYLU9+jfFVdP/wtHB0fUaPgBRg0diy4dv8QbnzcYMLQPOnXojF7d+mLO4pm4+e91rFy4Bg72jlj541LsP7gPJw6cgYmxCeo3q4VPW3fAd0NGw8fnDXp83RXdO/fEgD6D8M+ZExg6ZhDatmyHoQO/Q/68+XH81N+YOH0cNqzaghLFS2La3Ml47vYMe7cfEO3esWc7BvYdgh5dewkBp8/gnji0629B/HWdJMnXNaJJy0tO8kns6dd49+7dsLFREej4+Di8efEfTlx9hA6ftoaFmVmiZkzdtaXq2bc6PnWXafxO02mG+ueqPHTNKXKIejTXITV0+p0dmkun2z5GrJQX8yTJz8wcJKk8ffo0WrVqJRQLw4YNw5w5c4TySqasR0CSfO0w16jJV89+8/lrHLn1CIHhkWhS0RUNK7jC0sxwLjhERMfgzP3nOHnvOeytLNCiahlUcy2gEQFB8hf31Q4hXT1VvDi2fjISjUZ01lWJohyF5NtY28DYxESY64SFh6FT+86YP3MxaKNP7XjHHu1w/8qTxAXpxr/X0alHezy48Yz0BL5+PnDO75LYtk86NMFXX/ZBjy5fCbJ87uIZUJBgoja/1zfdcOv8fTjYOyAmJhqlqhbDXzuOoEL5ivigTjksnLVUkHEmkjMep5EAVapdBhtX/4y6H9ZP/K5ynbLYtOYX8Zmvny/y2OYRmnumKbMnwc/fFz8sWptY7/m/rwhNPVOvgd1RtnQ5cf9AwePcxbP4tFV7IYwcOXEIJ/86m9ivOv+rjsljponvdZ0kydc1oknLS07yucgzsMrNmzfRvHnzxIeTE21NrUqJyCufJSfzit9n9Z/66q2htCslP9jJP0sVr4TLhPo2lUkNL47Rw4cP4ePjI4LcpBbWXl/jaYjlSk1++keFp9DNmjUT0aSrVKmCgwcPYufOnYkBm9JfYs7KobJ+Sqf9kx67GBcfhy2btwhzHXIfxkMxiGQEw2lLWpr85IDdf/UG/9x7jitPXqFumSKoVaowqhcvkC3HVdxsbri9xtUnr3DxkTs+LFUYH1d0RYXC+dMcZ0Hybd3SfC7DD1CTf+uWKjvtYmm2M2IE9KnJpya7fJkKgkh7enli1U8rhP3gL+u248/D+zBszOAUu3Pu6GVBmNduXIV9B/YiMChAjKf3Gy9MGPm9OAkgyaed//ofNosyLl45LzTsj2++PQ0pWaUIft+8Gx/WqI0tv27EzAXTUL5sBTRu0ASft+uEUiVKizI//DjlgGuLZi/DFx26CJOcHzeuhsdrD1FXYFAg6tdugJ9WbhIkv9+QXnhy6229Tdo0QN+eA9Cza693+sd23394D1vWbkv8rlHLehg6cISoS9dJknxdI5o2yb937x7u37+Pdu1UAqU26a0G/612XdzxSLCu1YaUpnQKkFbdWVVvUiFHddKQ5NRBLXBXim2Oj0dUWCCevw5E6ZLFoQT1jY2Nhtuz5yhesrSI9JvWlkoTqSdPn6GIqyssNVx8TA0z0Y/EtmagHxQy4uLE/Hj+/Lkk+WpgS5Kf1tv67vdhYWFCYfXgwQN4e3vj448/FqarDhocbaS/BsPNwQvHb/zDcd/NP8H0La23X/994fpAJU/FinQ1qopUnt0pOiYWLo5WKFPEAaYmhtEmrTT5yYELiojE5YfuuP7cEw89fVGpaH5UKOSM0gUcUdLZSS+h3nn7/Km3Hx6/9sd9D2/cffkGZQvmRQ3XgqhdtgjsLLUPfJElNvlq5F7xtpOVNvkkx1XqlceOLXtUhH36WNy5+CDFd2DH7u2Yt3Q2Nq3ZKsxzmFp3ao6O7Tonkvwnzx4Loq2Q/J4DvsSjG28FJXWSz2eokT928giOnjiC0+f/weol61C9Sg3UbFQFh3YdE7b0ydPTZ0/QvEMTLJixGJ992lEsJjPnT8UL9xeJJH/QiP64f/VJYlaSfN4d4KlD8pTSxVtJ8tNeBnPKxVsSuPSSfG4MsTHRiI0DTM1MYWJsLELfsqyomBjhgs004YJ+akgxuFZsbByMjE1gpgWBVeqNiQPMzUzF3BaCBeuNjhYnbNpomdOuNx4x0TEwSrBBZ7/YTjMzU2HKxL+NjVm/5g2RbfK5cxzfrjiKn9fOhxkJvREQGRmAEYOGYfGajbAyp2eRBMEoYYNVLsMruMXHxWBAv28xdc0KFLGyEEKB9htxPKKjYxJt6ZP0g/cw4uNhbMzLdqlv7JLkpzyLJclPex1M6QnOJ54cenl5iSBM2s/njNVnCLn4rj164Y/LD/1Rq1Kxt1rq5K5G9dlYbd2a6qMN2rhdVavXyy8Y/z19ha9alIeFmWm2X6TOEMlXxzEkIgp33b3xwNMXTzx9QTv+ovnsUcjRTvjcz5fHGo42lrC3toSNpTmszExhbmYCE7WNIZaao+hYhEfHIDQiCoFhEfAPjYBPcJjwbe/hH4SXPoHCvr5UwbwoVzAvKhVxhq1lxqLB6ZXkU2uv2OEnk/Czg+T/umEH7PLYo23nFrhw/BoKFSgkho8mPbxElNcpL8Z8PxJRUZFYPn+V+C4kJBi1m1THqKHj0k3ya1X/UJj+0N5fSTPmT8GLly/ESUDF2mUwa/Jcod1XkvurlyhSuCh4+Xf+stm4dOJG4nede30GezsHjSSfZkPFirpi5uQ5Ig/r/nXnNgzsOxiLVy54x0++JPlpr4K5meTHxcUi8vUddOg5EVt27UYBBwtBfqOjIjHhmy7oP3UdKpTQbOqnoBcZHob1C2egWv9paFDobdh2TeiKet2voN/EzVi7biXsrM0EwY+OjsSYod9g6sLVcLL/P3tnARfV8sXxH92poGALKIrdnc94f7u7sBsTu8XC9tnP1mcndncHYhcipSDduf/PmWXXpe8ud4HVez+f99TdmTkzZ+be/c65Z84xzHZySO7ciRPRbd5aVC+UkWFDhItH/sUPs7ro2cgOX989wYJtV7F91TQkREfAxXUjps+eBuMsQvSJkpMQ8PIKRrqextSRPZGkbYyalcsjMTECTiPHYvWWXdAUxeOdxwsExYhQpWo1mBro4OkLdxQpaIQ3n7xRrlI1FDbVwxDH0Zi7aS0KJUXi2Vtv1K1dndurbFEi/lk8C7ath6NppSK4c/Y/HH4WjXVzhiLw62tMX38am1ynwyCbUIMC5AuQn+1NJUeBPxHyPf3C4eEVAftShf6ITY0cyyHDovRGODwiBlGR4WhQ0QoaGnn7hiHHkJ92lBSC0ycoHP6hEfgRFoWgiCiERsUhLCaWATz5z8cnJiExmV6/itii0VRXg7amBvPzp42AiZ4u+9EoYGSAQiYGsDI1QtECxtIQmDmdBKVCfh7FyZeE0CTdkOX+n61r4fH6Ja6cvgkDA0O07/E3Che2wvIFq5j1kqLvkFsPbQJW/+PKot2c+u8sEhIT4Tx3Ej5+/oAWTVthxqTZzF2HqyXfxNiEySKrf93a9REWFoZxU0ehrF1ZzHFewA7eXr52Af9u2IPixYozIHddtxT3rzyBx5uX6DOkJy4cv4ISxUpi7aZVuHHnGjtXcOrgOeauk9aSf+6SG6bOmYQta7bDvkx5LF21CB8+vWflBUu+YnfK7w75sf4e+LubE+YtXwZ9HU2Usi0DE10NTBveA4PnbEIpS0N8/PgRMfFJKFrKDpamBvD1/goNTXX4+v9EQatisC5oil0rF6HS4DmoYhABL79glClbhlluMrYAJiHW+xF6O63H1KnjoKmlDRu7sjDUVsPkscMxZ/l66Kon4uOnL4hLFKGkbVmYG+riq6cn6Fzf94AQFC5WCoXMDLFg8iR0mbMaZbVD4fMzGmXtbKDJQvmJrwcXD2H5OT/sdhmG6wdXY86qI9h3+TYsRAGYv/Y/rHWZkeXbBzHkX0YLx/lo36Ujvr99gqIN+2N8zzpwHjseq7bsxIntrnjgHY9CmmHwjTXBWpfpGObYGyZm1jAxUINPpB62rJqPYYNHY5LLTOxfMg/2rYdgYIfGnC1bxzfOxvOkSnB2/B/WLZ6Ki7fe4sAZN3x7ch67H0VhvXOfbN+iCJAvQL5iT8GMa/1pkE9W/Jsv/FHIsmC29xqfev4d2nr6xhv/q2UNI31tzs88ZV5qwPkAACAASURBVIybd8iXp5OyiSjkqZfTskqF/Cw6p0xLvqzYAuYFUaNqDThPmMl84emiiDuzF89gEWYoiUf92g2weM5SZnEPCQ3B6EnDWdQbq8LWmD11HtsAUCjLiWOnIijoJ2fIJ598ssjTmQBv328wNDBE88YtMG/6QrbZoMg+FELz7MUz7O/l7B0wx3k+c+Wha9bCaTh59gQM9PUxsM9g5tPfe3A31KpRh4W/TAv5VGf9lrUs2o9sCE16MyBAvmJ3yp8A+Q1bDUCfMRNgEOOH009DcGDzPMwb1ROOszfiw63D8IixQCXzWBxyu48VK1dg/2pnfEwohr9rl8Tho+exbNVqnNm+CuX7OuPRv3OgX6EjRvf6ixkrsoL8Jl0mYaiTEyI8n+FZoBE2LXHCjPEjMHvpOhzZthJRBcujlPZPXH76HcvmT8WcSUOhZl0DDcuZ48y1F1jnugiLpk5G20lzcXL5ZFTpOA792tSDhgzk//z8FK16z8SR0/9h54IxKGCoCbOGI1BL/xNOexpg4oD27IC+lrY2c4dJa2eSWPJHrHTDvu2rEOP7Ar2GzcOm3VuxfPoUrNq8Az/9v+Gr52d4+3/HqdNn2QG4ISPHYt3a1dDTBJzHj8OKDZswbNBQmFmqo0KjfujbrikMdLlHIvF7fh6TN17HmmXTscJlMazVvqNe//n4cvMANGv0QvtqxdkpCjKUSdyT0rpPCJAvQL5iT0HFIV/WbU3Vw7aSm/TZhz4oU9IqW9c4PvX8O7T1/Wc4DDQTULWMRZ6+AclTyM+rifydID+vdCjITa8B4eCtcldFRtF15PXJJ7cZsuS36z0Vu44dh5lGGLo0/wsL/7uKA4uGwXHWPzBWi8Rzj9cICQmB24nDmL9qO87sXIP2UxfDTk8NS6cOR8cRc3Ft/3p4ibSRqFUMrrNGpQrXmVYTTK73IwyYuh1bt62HZsIP9GjfE2sOumHd/ImYtWQNwn944eXbDwgNDcWlKzewbt06LJ41C2OWr0ZJXWDm5PGYPn8pFk11QpSBMQraNMDcUd2gLRMmlOTGxwRjRMdW6DFhNvbsv4ilzp2x+eRrlIh/gcLNR6GevSV2rFqE/41bjfLWBtKDtZI+SyB/9NqL2Lt1BZJCPqJXjzFYsXMHVs2ZhpWb/sWahdNhUKISbIoUxMH9e7Dt310YPn4ytm1YDXqn4DxuFFzWbMDwPp0Ra2SFEva1MXfiYBjrcXexjI/6gfEjJ2D0iO44dNMP3euZ40mwBTzvHMHASfNw7/ReJFjXR0Pzb3itWR1ta9lCI42PvgD5AuTz+VTiYsmngBdfvnxhh3TLli2r0vkZkpJFcHvgi3Kl8ybACp9zl9tthYRHIyE2AnUrWHFzUVRSBwXIV5JiM2pWGZb8XOy+ICobDQiQr9wlwifkd+g7TQz5OjEY3LIhRm06i2MrxmLA9LW4feQf6FVqi44NKmLeuD4YPnczzu35B91mLkZxTWC58zC0HToLV/etxLmvIhhqamPv1pXsjFBmB/EkkD9o+g5s2boOWgjH4LbtMGvHKWx2mYoZC12xzmUWKrUZiFY1ymDW1PGYu2QNXObOx6SVK2GtCcycNBbO81ywaMpIvI7Ug55JMezfMJdl9JaVSweLj64chzufImBeowdmDfwLc2ZOgtfTh3D+9zwqlDDHtV0LUbjVVDgUyRzye87agQM7N+HL/TOYv+8J9m6Zg9lOTnD9ZyvGjhqFectWQBT0HuNmrMHBA3swwmkytq1fDTU1EaaNHw2XVesxavg4zFi9CFunjIBNGycM79qE8w8ejcN1thNEMQEo1moS/letCBZu3InwLx/gsnYDrl10Q+L3t7AqXgxhhZugfZ0yqdyWaDUKkC9APp9Ppewgn6z4Xl5eLCfDxo0b0b17dwHy+ZwAFWpLgPw8nCzBkp+Hyv+NRQuQr9zJ5RPyG/3tCOelq2CpFoBJi/bh1Ol9cJ3QB/2cyQ1nCSq1HQLDyC9wWb0VazbtxPm96SH/1uFtcOg/Ez/OrcHDYAssdh6WqTuKBPJb9JqJecuWIen7CyzfdQeH9m/AgsmjMG3hCiyZPRV/9xsF0XcPrN93Ebu3bYTLvLSQvwTLZk5He+cl8Ng7Hz8L1IXz8O4sYo/UEi9Kxrdnp9HZ0QUbT11ErWJGWDF7NE48CMNZtz0w1dHAVYL81lPgkIElH6JkhHl5YOXuCyhooonIeHV06t4Xpa30sGv7DgwYOgoe9y/B7fpjWBYpBW31BPTv2xdbd+7BiMEDmfvPvp3b0XvgYGzfsgPdhg6Cdrg/9hw6i6GjRkKHXIQ4nEWjA9Fn96zGnLVHceDcRdiaAoP794KmVW2scZmC824n0LR+baxdsQhVuk5Bx3r2AuRzvAWF6DocFZWmWHaQT88oyobbt29fuLm5oU6dOgLkK6Zqla/1R0C++90b2DhrArZc/xUtRTJz07q3RtAPP2y94Z7KCnXrzFGsdBqCBXtOonL9JkqZaAHylaLWP75RAfKVuwT4gHyytCWGesF5znoULmyEbwFRGDp+ChyKm2HH6gVo2WccRCFf4bpuCwqWqoJS5klo2qYnHlw6hSZ9HGGhARzZvhp12vbHyxtnYdO6H2z1Y7B17Uq0GeSEUoXMMlQCkxv0EQtW74e2WjR+xmlj5PjJsC1syFxcejuOgvebh9i48xCKOdSFhV4cevTsi327dqPb8OEw1wB2bdmAngOH4b/t29Co30iU0I3B+jUbMGTiNJjoyJ4FECEqIgwrNuzAjCnjoa2hjruXj+PmNx04D/ofKGqo++UDMK3eDSXMdTIEboKZ5KQkce4ANTUpqNA4CBDpe9lcAZKQoBIfZPpeNlEWKUVSl3vYQRE+v36KzW7uWDxpILQ11HB4qysSizVCt5Y1cP/ubdSt1wD3zuyGRtnWqFe+SLq3BIIlX7Dk8/lUyg7yyVXn4MGDGDt2LPbt28fCbLJzL1x2tXx2lKe2BHcdxRUpQH731gj088bktf+iXPU6Uk0uHt4Lnzyew2nFZgHyFV9fQs080IAA+cpVOh+Qz2AzJc66BFIp3jr9BlMkCYmBmQCXJWKSTbKSkphJXE6aOktcl6CWYtNn8WOeWq4YnFPLFSGZAvjzIJf6Q5ek/7/GI5ZJfZHIUe6s5az1zMfx61WANAtvBkm+BMgXID9nKzB17ewgX5KgiTLjenh4sPwXdA+aUN4cFbwEyFd80vIV5E8++VjUsKKd4qPJpGZ2lvyiNnbQ0tbB8PmurIWo8DA4tW2AwsVLoevIiUqD/NseH7GiQw3ex5tdg4JPfnYaUu3vBchX7vzxBfnK7aXQen7SgAD5AuTzuR7lgXxKmnXu3Dn2BsvR0VElrfnKgnxFMobzOY+50Va+gvyiM/aJlvdpyWLU83llB/mdho3DptkT8e+dV9DQ0MSVI/vg+cYDX9+/QvfRU5QC+RSrf+r+S/Be3IfPoXJqS4B8TmpS2UIC5Ct36rKC/Hbt2ilXuNC6SmpAAvl0GLJDhw6csgqr5EDl7LTgky+nwlKKywP5165dkx68dXdP7ZasmPTcr8U/5Ivw8f0b+Pv6sHzZ9JZDV1cP5Rwqw9DIWK6NELlGvX3tjrL2FfDuzUuUr1iVZSyXvWgzERkRDh0dXRYyODfdpvIV5P+19oyoWRV7OBT9laWUj+WUHeT3nzoPRzetQpt+Q1C9SUvM6d8RvSfMwN4V85UG+a99AnHtxTtcHteWjyHK1YYA+XKpS+UKC5Cv3CnLCPJfv37NDrpVrVpVucKF1lVWA35+fjAwMBAgX2YGBchXbDlzgXxKpEdRda5cuYKzZ88yV76BAwfmKmAqNrr0tZQB+QtnOOGr5yd06TUQEeGh2Ll5DZq2aAOnaQuhpakpPQfEeiMSMTdKcoMkd0iCdMl5oNiYaEweMwCu/+xhUcAGDh8PTU0tUJADkUjsqkjXKpdZaPG/jqhYpTrU1NRZhnMSoqZO7pjiJIKSz8iFka/cBvkK8l0vvxA98QtDrwaV+VobrB0ukB/o+w1PblzGkFkucO7WCpuvPcPM3m2UBvn/3XFHDWsTTPqL37FyUZwA+Vy0pLplBMhX7tylhXxmpYmMxPPnz6WHPGV7IHnIp301LPtvKp9R8i/Zz2StPxm9Zpb4hEt9w1MOmUr6krYfGfWLSz8kZcQ/SmIrmOyV0bgkZWX7IimXdlyy/cpOJ2nby8xCJqubtHOT2Tyk7Yeknmy/uc6hpK6dnR0KFSqkkqCljLtKgHzFtJod5EvuN8nB8/3797N4+QT5qnjxDfl03y6YMY7pZPbitQyoL7gdx8HdW+C6cQ/evXZHTHQ0DAwNUbFKTTy6fwv+vt/gUKkaKlWtyZ57j+7dQlBgAIxMTHBg12Zs+PcwXj5/gqo16yLoZwAe3bsJn29fUa1WPRQoYIH5M8ahfuO/0HfQKHz388GzJ/cRFxuLqjXqwL58JXz5/AFRURH49uUzWrTpCENDI16mKl9Bvn9YlMhh4WEs69MSlsYGvAyQK+SXLl8Jw5tWQbdRExESGIB+k+coDfIDwqPgvP8SXs/ujsLG+ryNk2tDO+ccQ4P2NaCjr8O1ilBORTQQFx2HO6efYNCCLirS48y7qSoZbyU/qGmBT+UnQBgA7xpIdYCa99ZVr0EB8hWbMy6QL7spPXz4MD5//oxp06bxZiFWrOeK1VIW5EdGRGDC9IWIj4vF0vnOLPTt9PmuaNesGvoMHIF2nXtj3YoFsChUGK3bdsG2ja7s8+ioSCxbMA2LXDfhwpnjePv6BXYeuoAxjt2w/t9DWDBjPAoVtkaDJi3ZZoJkHNqzDU1btkG9hs3hPN4Rg4ZPgImJKZYtdMbKf/bi1LH9uOh2HHOXrEPdBs2grcMPn+UryBeJRCLXK+649ukHRrSsrdhqyKAWF0t++Rp14Dp+MN48voe5O4+hRNnySoP8zZceopltIUzOAys+qef8zlswMTNC0TJWvOlYaCh/aMDngz/CQiLw96BG+aNDOeiFKkF+DoYpVBU08MdqQIB8xaZeHsgnCYGBgZgxYwa2bNkiQH7KG0iC7zs3LqO0HeW10GRuM5NnusDA0AhDe7fFtgNuCP4ZiCG922Ds5DkoYFEIN66cQ2hwMIyMjdGoeWs0b9kOcXGxGDmwC9ZvP4TRjt2wbO2/GDmwM46cu8vOeD65fxuFixTFuhXz0a23I5JFyTh6YCeWr9/F3H9WLpmFMvYV8f7tS5Qp64C2nXtCS4s/v/18B/m0IHvsuAJtXT10q1NBsTsgTS2CfPKzV9eQjeEMHH7lh9l9O4B88gnyH1+9gD0r5mH9hQesBWW46xx58ArxsTE45PgXL2NTpJFv7/xw58RTNOhQU5HqQp18rIE7px6jQafqKG5vnY97ya1rfzLk01sB+iHXSPPM4qY5bqVyQ4Yk9CRf/qUZjex3kcFt1n6vUgLkKzaf8kJ+2hCwiknNu1rKsuQnJpK7zuqU56w4VHFwUCBGO3bFlj2n4O31BSMGdMLCFZvYgVw6ZKuvb4A929ejZ//hqFqjNuLj4jBuaE+s3LiXQf6S1dswbbwjs+zT8zs+Po6FCZ41cTi69h6EqKhI3L5+iclVgxp7U1C8lA0+fXjDLPj1GzdnPv18XfkS8qPjE9Ft+2UYGxrw7p/Pl+IUaYf88MMjo3BkSAvoa6c+fa1Iezmpc/PII4QFR6FKw3I5aUaom4808OL2W5iYG6Bxt1r5qFeKd+VPhvy4uDjcunULFOdaWZEYSMaTJ09Qr149pcmIiorCp0+fUKlSJaXJ+PHjB8LDw2Fra6s0GeTqoKenBysrK6XJUPxOUd2aAuQrNnfyQr5iUvJPLWVA/sKZ45GUlIw5LmuYxV1ykT+9BPJjY2MwpHdb9B88Bg2atsDG1S4obVuWueu8f/sKTs7z8fTRXRzYtQW7Dp/HaMfuWLVpL8YN6YFRTjNgbmGJ2ZNGYObCVTi8bztq1W8CGzt7LJ4zEfOWrGdyF8+eiKmzl+K82zHUqd80BfL548N8Cfmk7LjEJAw/cAvfwmLQu2EVXn30c3vpkg/+gdsvUNxED1t6N4KOZuo3CrndH4k8An3fTz9QslxRWBQ1F3z082oiciCXfPADfYLx9a0PitgW+m0An1Typ0I+HQajBDZ79uzB0qVLoa3Nb0hh0i3JePbsGU6ePIn58+enC/mWgyUprUoy7t69yzYr5AucNqwcXzLOnz+P9+/fY/x4imrB34+jpH80DsoeGhYWhmHDhilFBh+6UMU2BMhXbNYEyFdMb7K1ViyajsSEREydszTVG9PQ4CBMGtMfazYfYK47Xp6fMGXsQMTFxqBS1VrMZz8pMRFOI3rjh78vSpayha6uPuYv/weTRg/Aum3/4cPb1xg/jNxutNDPcTSz4N+8egHrVszD5j2ncPfWFezYtIpF1ek3eAy69h6IbRtWoFbdxqhZtyGvb3DzLeRLJmPNdQ8svvAM3etUQKvKtjmf2Vxu4aL7Jxx+8AozW1eDU9OKuSw9e3HkuvP6/id89wxEbHR89hWEEvlKA7r62ihcygIOdW1/CxcdWeX+qZBPFvaVK1cyK/uKFStgY2PD+5ojGbNnz2aH8VatWoUSJUooRQaB98+fP7F27VoUKVJEKTLGjh2L4OBgbNiwAYULF1aKjJEjRyIiIgIbN26EhQW/IZ5577AKNShAvmKTJUC+YnqTrUUhLkEOM7LZxFP89Um/4kzgaiySDm30JYYnafbulBCZsp+zmJgpbVIdijvGyqtTyExRSjQyVkOcnVxcOdXbQb4P5+d7yCcdvPQNwqILz/HpZzhaVymDOnbFcj7DSm7hwUdvXHjxAbYFjTGrdVVUKlJAyRKF5jPUwLRpwLJlgLMzsHSpoCQV0sCfCvnfv3/H9OnTGRzXqlULs2bN4t1FxNPTk1nXo6Oj0bx5c2YF59stiHIH0GE/+jHr3LmzUmJ000Zo4cKF0hjgJIfvcdy+fRuurq5Mxrhx45TqQqVCtycvXRUgXzE1CpCvmN7+xFoqAfmSibnwxhvrbryCX3gMGtqXQMNyJaGrxf/rWUUXQmxCIm6//Yrb77xgbayHcU0qoHX5/L8hUXS8KlGvSRPg5k2gcWPgxg2V6LLQSbEG/kTIJ2sPgeuCBQsYVOrq6mLTpk3MeswXvJKMy5cvY/369UwGtb1mzRqYmJjwKuPQoUPYt28fs17R2whyPSK/dj7HQbo5d+4ck1GlShWmN4kFjo/7iPTj4uKCBw8eMBmNGjXC5MmTeZXBRz9VtQ0B8hWbOQHyFdPbn1hLpSBfMkG3P/ljx/33cHvlhXp2RVG1dBFULVGYtx8PeRYCPfife33H8y++uPfRB20rlIBj3bJoaCuEp5RHj0opGxoKWFkBsbGAri7g7w+YmipFlNAo/xr40yBfEu1m2bJlePjwIYNK8scnK3uDBg14eb5JZJAfPqW4p38TeJPrjoODA68yyIr/7t07JoMyvZI13NramlcZEydOxNevX5kMY2NjbN26lbeNBLVJB4cnTZoEf3p2ADAzM2NhCHV0dHgZB/93jWq1KEC+YvMlQL5ievsTa6kk5Esm6mdULE4898RJDy88+RqAyiUsYVvYAraFzVDa0hzq6qkzMfIxwcnJInwJCMan7yH49D0Q7l4BqFHSEh0rlkCnqqVQ0ECXDzFCG3xoYN48YP78Xy3NnQvQZ8KlEhr40yCfJoXAMiEhAbGxsThw4AAGDRrEDm/xGYJSVoabmxu6dOmiNBnkx05WcIoSpKxxeHl5gSLs1KxZU2kyaENEGy57e3veZajEzaikTgqQr5hiBchXTG9/Yi2VhnzZCQuOisPNT364/fk7HnoG4K1/MEpbmsLKzAgFjQxRwEgfZga6MNHXhYGuNvS0NKGtpQGNlEMP7HCFSIT4hCTEJCQiKjYeYdGxCImKRVBENH5GRMI/JAJfAkJRzsoctUtZoqFNYTS2tYa5AT+Zyf7EBai0MZMVv1QpgP6UXGTF9/QUrPlKUzq/Df+JkC/RYExMDIN8R0dHpVmMSQZBfteuXZUmg0JbEuS3aNFCaTII8snSXrt2baXJeP78OXOdIsjny92I37tFNVtTVciXHKKkP/l0D+M6i7kN+ZLxUv/4NDhwHS/fITS5yv0dyv02kJ92MigE5xv/EHwMDMOXn+H4GhQBv7AYBEbGIDQ6DhSLPzYxEYlJIiTTjaqmBk0NNehqarIY9qb6OrAw1IO1iR5KFjBC6YLGsLMwQXkrs3wTAvN3WIBKG4PEil+5MuDuDkj+FKz5SlM53w3/jpBPP870H/vRTKMwSowi+TSOWfL/w6BBA1n0BdlLthx9Tv9mbwFkWuTyGck4e/YsOxQrKyNt+xIZaXvMpRyljX/w8CH+at5caTK8v30DHVYmS75kHJmNX5Ex0PhfurszF52yZcumkpG2PfodIQjKCxDi+/7LjfZUFfIpcsr9+/dx584djB49GkZGRrmhLqmM3IZ8erN46tQpmJubsw17bl9iyPdBudJCngp5dR8SFo2EuEjUrVCYcW5eXSxqkIh++fLgEgc9Eq7fRgNkvSfId3ICdu0Su+wQ3A8cCKxZI/5O8M3P99P9O0I+ueK4nT2HO48ey4Cg5Onz6/GXmJCA4MBAWGSafCl9nV9PsYweo1Re8qQTf099CQsOQgFLOs+U0XKQV4Z4OyC+xDLi4+MRFREOU/OCWcjI7LEv6XPavqWWERMdjYT4eBiZmGYiI32/xC1mND6JrNSyI8PDADV1FjdbbMlP32f69TI3NoTT2DHQ19fP9/dXfuigqkI+reu+ffuiYsWKLBKWMvIzZDU/uQ35V65cQf/+/TFv3jyWKyK3L3KTPvfQB3YlrZTihp3b48lNef6B4TDSSUQVW3oG5x3p5ink56bCBVm5rAGJRV+w4Oey4nMu7neE/Lj4eJw8dwEmVRpAR98gUyUlxMbg7qlDaNy9P0uYooyLZDy7eh61/9cp3dsCvuTFRIbj07NHqNgwtSWfr/apnZ8+XggN/AHbKr8s+Xy2T215vXaHlo4OrG1+WfLTyhAlJ+Hd9fPo3641DA0zn1u++6bK7aki5BNgv3r1Cn369EHp0qWxd+9edug7N6/cgnzJQf0JEybg6tWrWL58Odq0aZObQ2WyaAN9x8MfZmbm0NHOPxEVc10Rcgqk+XN/74tWNa1grM9/UkV5uiNAvjzayqQsRcwQrtQaaHLjBhrfuIGbTZrgBoXTFK5UGphLm598ev2WkB8Xj1MXLsK8ZlPoGhhmqvn4mBjcPn4AzXoPUhrkk4ynl91Qp53yfPKjI8Lx8ckDVGqiPJ/8QO+vCA34DttqyvPJ9/R4ziC/iF25TK1hyUlJeH3lDPr97y8YGmY+t/n0dsuTbqki5BM4UTK5Xr16oXXr1hgyZAhbE5IESrnhqpVbkE+LgmRRMjg6W7No0SK0a9cuT9aKT2Ak7r4ORJWyRdMlsMqTDuVjoew9o0iEsMhYJMdHooZ9IWgoIRCNPCoQIF8ebWVSlpRIr9OE65cGBMjPfDXQWskjDzlOS/S3hvxazaAra8lP46kYH5sC+b3SHLzNyevWDGQ8vXwWddp1kfr1s4nhUUZ0ZAQ+PrkvhnxZp0geZQSSJf/Hd9hWr6U0GZ6vXqRAvn2mMiiD5uvLBPktBEs+pztcfIhTQ0ODY+n8U4zcdXr37o2///6bJXkjH33KHF2+fHkGwcp2i8hNyKffCDq7Q28sdu3axSJ/kXy6KGIWnUdQ9njFzCrCR58wvPsWDj09XVhZmEBLUzlvOeVZaeLHqsR9j7LXylOb/7LUn7DIGPj+CIOlqSaql7GArrZmnvdLgHwe5jq/QhEPQ1O8CcFdJ1Pd5ff1kl/7Rz/okh85eRdmnMSSnwL51E5sVBQSExPFcJ3yW5EQH4cHbkfRoHNvGagUsR9TXX19aGlzf/VKMmIiI5GU8sMsK8P9xiVUb9k2tQx1Nejp6UNTThlktU+m/su4q8dGR+KL+xOUq9s4vQx9A2hqaXFWIY1DKkPmdzXI3wfhPwNQsmLVXzLUxOrUU0hGRMr8/vq19n7/CpraOihcyjZTGQLkc55KacHfAfL79euHDh06MEv36tWrMWDAAKVDb25CPk0WReGixHaUs2Ps2LHw8fFhOSOqVavGwJ9gPzcuAlg67B4Xn4hXX4IRF5/ExP5C7NS9SPt56lNJ6U/lpOXzjOozO4iMGJqLly+ewKFSVWhqEExnTPny9DGtjLRjzKotMtgXsTBAMUsjdn4hNzZgXOZegHwuWsqmTH6FIh6GpngTAuQLkK/46smwJp+QHxMZgX2rVuBDKP0w/LJKiUTJ7JdLjb1i/XVAVAfxaFS1FFr2G8p5VBEhwfjXZSF8k8hvOI2MlMzCsjJ0RXFo2bAyGnbuyVlGaGAANs2diWBda04y9JJj0O7vhqjZqi1nGUF+Pti8eDFCdSy5yUiKQrdubVCxAXc3vR9entjquhLhWgU4ydBPikDfgb1gV7UGBMjnPJW/FeRTLgs6xD5nzhxmyacDqvKClSREJW16CBol9elzBkdpwDEvIH///v3Mmk99Wbx4MTuAa2lpmSeRpPImREtG61uE6Oholgl7ypQp6NKla64fws7qrsvrtwqyfRMgX/7nY7oaAuRnoEQB8gXI5+Hekm2CT8inyDObN+3FhYK9kKyetduCmkgEk1g/OOrcRBvHkZxHFR4chNVrd+BOicGcZJhHeWKE5Ws0696Xs4yQHz+wxHUrnpcbzUmGRfg7jLX9gXptOnCWEejjDZdNx/HKph8nGYVD3DGpWiKqNmnOWcb3r1+wcNdNfCjegZOMIj8fYVpTU5SrUUuAfM5a/lVQVS359OZtzJgxaNy4MXr27CmF/HLlyskN+QTyT58+ZeE4yff97t27zCWGslxv3rwZnTp1gq2tbSrQz23Iv379Oo4ePYr169ezfpBvPkF+oUKFFJj136cKWqSy/AAAIABJREFU/RZs374d48aNY25L79+/Z2FG5d3k/T4ayXwkAuTzMMsC5AuQL88yyu/rJb/2j2/I37RpH84X7AWRetZRI9REyQzyB2vfQJvBKZBP8fZTYu5nduBPDPk7cavEYE4yGORbvELzHimQz0FGyI/vcHHdhmflxnCSQZA/zvb7L8jnIIMgf/HG4/Cw7c9JhhjyE1BNAvkcZBDkL9h1C+9LdIBILfv5IMif3tREgHx5HjwyZVUV8iWWd4mVXWLJp2Rp8lryqa0PHz7gzZs3aNu2LYvcQ5sIeitw/vx51K1bF9bW1nkK+RL3RAm8ylryFZz636IauS3Vr1+fncmgxH8bNmxgUZdU8ZyJsidEgHweNJxfoYiHoSnehGDJFyz5iq+eDGvmJ8inH98v7k8R4P0VFRs2YxF76AdGTV1DCgU5hXyS8frOdcRER8GhXiNo6+qlk5FTyCcZTy6chpauHuxr1WPnAdKOI6eQTzJuHdmHgkWKwa5aLahraqaTIUA+zzdLNs2pKuSnHRZBOfnjU0hNSi6nbEtublvyZcdLm5KNGzey6EJktf6Tr3PnzqFKlSrszALpg/7s1q1bvnLZyS/zI0A+DzMhQL5gyZdnGeX39ZJf+5enkB/ji0Fa1/G/QeKENBS28e3DOzi7dS109PTZwdAitmVhW7UWrG3KQE1dHXJDfuQXDC/ogabdekllUBSe6wd3QVffkMkoamcPu+q1UahEaSZDbsgPe4sxNn6o+3d7dmyOxnH72AE8PHcCeoZGsCpdBkXLlGMyLIoUZzLkhfxCwS8wqWocqjRqJpVxYcdGvLp7HfpGJrC2LYPi9hWYrswLWzMZAuTL8wTJednfBfLTWvZzrpmsW8hLyKeeSaKyKXszo2w95rR9SR4BOnxMB7DJMEFr+k/XS0Z6FSA/p6st5QBdfg6JyMMQ5W9CsOQLlnz5V02WNfIS8o2jfdDC8x+UsCsLOpwbGx2FsJ8B+P7lI+szJc7SNTSEZbGSqNexB0pXqiY35JtFfEZLv92wLlVKLCMqCqE//BDw7atUhp6RESxLlEbDLn1Q3N5BbsgvGPoaLYOPoXDR4syXncYR5OuNID9vqQx9ExMUKmGDJt37w8rGTm7Itwx6jlaRF2BhVZi9To+LjkLAN08WU1+iKwNTM7ZpadZrECyKlRAgn+d7JbvmfhfIz26cfH+f15DP93hUuT2ai7SQr8rjUVbfBcjnQbP51fLJw9AUb0KAfAHyFV89GdbMS8g3ifHDAI3LaNV3cErcYzV8fPYQbptXQdfACFWatUK5Og1hZmkFLV1dZlFSxJI/1Ow5GnXuIY7ro6YG9xuXcW3/dugZGbOQm/Y168PEwpKFlSQZiljyR5X4htqtKHsmxbsEHrgdw/1Th6FvbIrabTrBpkpNsQwtbSZDEUu+U8UoVG7QWCrj2oGdcL9+AYZmBVC3XVeUqlgVxgUtoaEpDn0nWPJ5vlmyaU6A/Oz1LWu4k/yd/nzx4gV+/PjBEnJJLlkLsmBNzl63fJQQIJ+bFgXI56anLEsJkJ+BegTIFyCfh3tLtok8hXyKrkMHbx1HMACnH/ufvt7w+fAGdtVqMwiXvi5OiZ8mN+SnHLxt1r2PVIb/l4/46ecN2yo1oaNvkE6G3JDPDt76o97/xNF1aBzf3r5CZFgIbCrXYImn0o5DXsgXH7yNR7XG4ug6JOPziydITExgbzg0NLXSyRAgn+ebRYD8HCuUEm8RzNPhXvGeW42tZYrkEhISwg7mSta35CAwRb3R09PLsWyhgew1IEB+9jqSrFs1keBrwk1bmZTKCeRH+fnC79ZNBL94hkhfPyTGxuSoL0Jl5WlAU1cPhkWsYV6lGqwbNYaBdRGFhOVkvSgkUM5K+bV/eQ35qaLrSPxjKZ62esbZHxWFfGl0HQ4yFIN8meg6HGQoBvky0XU4yBAgX86bNIfFVc2ST4gSFRUlBe6shk9jMzExkRahupGRkeLEd9lcurq6Ukj//v071q1bh+DgYCnkS6BeAk+ykE9yKWmVnZ0d2xDQJoH6nN1FZSkMpCQyDPWX6lIc+OwuTU1NGBoa5qovOumR9MkFGw0MDKAtk9yPa920OiE9UHjTmBgxH4WFhSEuLo4duKUkaLL++FTX2FhsdJFc1F/JZi0rnVJyMeqz5G0MjTEiIoK5HWZ36evrs7Hmtzc5giU/u5nj8L2iUPRu5zb4Xr0Gq2rVUMDGFgaWluxVv3DlTw0kxMYiKiAAQZ8/wf/ZMxRp3gz2g7gnR5KMStH1kltaya/94xvyxXHye0KklnWcfLA4+f5w1FUgTv6af3Gn5GBOMsRx8t+gmSSEJocJJ8hfsmIrnpcfzUkGi5NvR3HyO3JoXVyExcnfeAyvbPtxksEs+dWTFIiTf4PFyecyHyxOfjOKk19biJPPeSZ/FVQ1yCer7X///YeKFStmCVEEZdeuXYOTkxMbLP2b4I7izVM4zKwuAnIqX6dOHSbD399fCvkSS31G9SWwSzoluQT5dFFIzp8/f6JgwYKZiqW67969Q7NmzaTl6DMPDw+2QSCAz+qiUJKtWrXK1cRY9Hbj1q1boLClWV20SaKkXZUqVZK+BSGdUtjSAgUo6V3mF5WjeSBYl8wjzSttwmQ/yyhpGemuffv2Ut3R2jlx4oR085WZVFonnz59QteuXaX6pPbPnj2L4sWLZ7nuaPNBgE9jzSyksgK3KS9VBMjnQY3yQlFscDDcl7tA39QUpRo3FcCehznI7SYI+D1vXkd0aCgqT50BXTlCmsm7XnJ7bPm1f3xCfkxkJPZu3oanKAuRmmw2Wop/nwz1VAmyRDBIjECbolH4q88gztMRERqCf9f8g7eG1dLJgCiZhduUXGoQwTAhBJ3KaaFBh26cZVDG240r1sDLskEGMlK/ZSAZRnGB6FnTAjVa/M1ZRpC/Lzau+xc+BWqkkZHM2qBDx7LjMInxR79mtqhYrxFnGQHfvPDPtkP4bloBonTZgdNmHhXBPNobQ9rXgG3lagLkc9byr4KqBvl07x86dIjBW1aWUgK63bt3s4RZEjgkeHv06BELuZhV3dDQUPj5+aFGjRqpIJ9ccxo2bMigVvJslLX0EpASCNJnspBPsKmjo4OiRYtmCfkPHjxgfZNsBmgMb9++hampKfsvs4sA1N3dncFwbsaHJ3h//fo1ateunak+qW9eXl5sg1W5cmWp3qguxbUvUqRIlnNBLlE2NjbSNzLU3tWrVxmoZ7VpIl3RZqBp06apIJ9CblICtawAnN4M0GawY8eO0jcDLPzvrVvSNZHZXNCbgoCAADg4OAiQr8DzKN9XkReKHk6bDNOixVCyfoN8Pzahg1lr4OvdOwj18Ubtpa6cVSXveuHcME8F82v/+IR8Ch0ZHhKMxIR48eFQMRIgIS4eL29eRrUWbVL9CJGbvZ6BIfSNxJYlLldSYiI7fJuURG4CqWV8eHIf5es1TidD39CIhbLkepGMsJ+BSBYRcP+SERcdDZ8Pb1G6cvV0MgyMTKBrYMBVBBITEhAe9DO1DJGIRcuJCg+DtW3ZdDIMTUxZaFGuF81DeFBQOhl0JkFbTx/mVqmhgL3ONzVluQMoStDry2fQ738tYGjIfVxc+/Y7llNJyD98CO3btc8SoiR+2hlBftWqVbOFfF9f33SQT64hXbp0kVr4ZdcDO2/y+TPLkEtXWsgny3NWQEv9ffjwIQNhWcgn6z4BPrkdZbYxkRwCzivIl7zxyOj+oL59/fo1Q8gn9xcukE+5DyRuV7KQb2FhkektKXmTkxnkZ7UZkkB+hw4dUkH+7du3Ub169SzXHY0pMDCQJVITLPm/4RNTHigiF52EHz9QphV3S9pvqLLfakgfLp6HVqFCnF135FkveaGo/No/PiGfXHBEjOvZ/9lFP7if3Z/i9tF96D/PFZpaWr/ULw53I5+/ZSYy3j++h6eX3NBr+iIWXUZ6pRzYlcunMxMZHrev4d3D2+g6cbaSZCTh0flT8Pv0Hh1GT1GajJuH9yEqLAR/Dx6TRsav+RAgX/6nhAD56XVGlnwB8rNfSxJLvgD5v3QlQH7260alS3CFIjpk+2DqZNQePUZw0VHpGU/deXLdefjPBtRZ7srpMC7X9ZJXKsqv/eMV8jNQbkJ8HE6sW4qvr9zRZ6YLithl7XOqyPwkxMVh30Jn/PT5hn7zVqBwSRtFmsmyTkJcLLZNHc3geODC1SwOPd9XfFwstk4ZiZjwMDguWY8CVoodQs+qX/GxsdgyaRhioyIxdPkmmFoWyrC4APnyz64A+QLky79qxDUEyE+vOQHyFV1NKlKPKxR9PHgACX6+sGlKmSCF63fSwOfr16BlXQR2PXtnOyyu6yXbhpRUIL/2T9mQH/LdD/sXz0BEcBBKOlRGz+kL5bPcc5iP756fmIz42BgWV5+s4HJZ7jnI8HrzEgeXzGauLzVatkOLfkNZzH0+rw9PH+DEmiVMRqMufVC/Yw/eZdDbCMooTGckWg8ajarNWmUoQ4B8+WdWgHwB8uVfNQLkZ6YzAfIVXU0qUo8rFJEvfsm69WFavLiKjEzoJlcNhH77hq/373Lyzee6XrjK5rtcfu2fMiGffDk/PL6P4+uWQJSUzN60DVmyAWaFCvMGryTj2ZVzuLRrMwNXI/OCGLRwNQxNzXiVcfvoftw9eZBFCrEsXgr95iyHDsXu5gn0qd3z29fD/cYlJqN4uYroPWOx2BeVJxl0ZuLoqkX4/OIxk1G2Zj10GjctQxkC5Mv/BBAgX4B8+VeNAPkC5Cu6alS8HlcoutqvN2qPGi246qj4fGfUfeays/EfNN97INvRcV0v2TakpAL5tX9Kg3yRiPnjH1m5EJ4ez5mfvqa2NloPHgOHNIdjFVZ5ioz/lsyC9/s3TAZtJLpMmIkS5cUh5nJ8pcjYO38q/D0/MRk6+vroP3cFzK2L8ipjx8zxLBkYydA1MMTI1dvYAVm+xhETFYldcyYiLPAHnYdmycZGrtomzSYsqysB8uVfOQLkC5Av/6oRIF+AfEVXjYrX4wpFF7t1QpMZs1R8tEL3M9PADZdFaHXkRLYK4rpesm1ISQXya/+UBvkp8bSTEhNAfuB3jh1A096DoKmplWmiK0VUTxZpiYxnl91Qu20XpcmIiYjAx6cPULHxX0qTEejthbCAH7CpVlNpMuh8hJa2Nqzt7DOVIUC+/KtRgHwB8uVfNQLkC5Cv6KpR8XpcoUiAfBWf6Gy6L0C+cudXmZAv6Tn5yt8+dgDNejvyY5XOQCUkg6Lr1GnXVWkyoiPC8fHJA1Rq0kJpMgjyQwP8YVst83jZOV0R9GZFS0eXHYLO7C2BAPnya1mAfAHy5V81AuQLkK/oqlHxegLkq/gE8tR9AfJ5UmQmzQiQz12/AuRz19WfWFKAfAHyFV33QnSd9JoTDt4quppUpJ4A+SoyUUrupgD5ylWwAPnc9StAPndd/YklBcgXIF/RdS9AvgD5iq4dla0nQL7KTh2vHRcgn1d1pmtMgHzu+hUgn7uu/sSSAuQLkK/ouhcgX4B8RdeOytYTIF9lp47XjguQz6s6BcjPgToFyM+B8v6AqgLkC5Cv6DIXIF+AfEXXjsrWEyBfZaeO144LkM+rOnMH8kWiVHLYwdvjB9CsV5qDtzkJcZmBjKeXz6JOuy5Qg0zoTB5lREdG4OOT++KDt0qSEejjhdAf32FbvZbSZHi+egEtHR3xwdtMxiEcvJX/vhMgX4B8+VeNuIYA+QLkK7p2VLaeAPkqO3W8dlyAfF7VqVTIJzCMjQhHcnxCihwx7CfGx+Oh23HU79xTRr4aoK4GXUNjaOrocB4kyYgJC4MoMTGdjJc3L6PaX20gy61Q14CuoZHcMqJDQ4CkZAoEKu1bXFQUvrx8inJ1GqWXYWTM8gBwvWgcTEZycooIsZxgf1+E/QxEqQpVUifBUldnce01FJGRZhw+79+wvhYuaZsig2SL50PP2BQaWloQIJ/rTP4qJ0C+APnyrxoB8jPTmXDwVtHVpCL1BMhXzkTFxsfDtE493N23B9XLl1eOEB5bFSCfR2Vm0BSfPvmxkRG4uGAGSjx7CTUZYz5loqVLHK7xl5U9Wk8HUd264K8BwzkPMjIkGGenO6HsJ69sZYjUgAh9Xaj374/6XftwlkFx6k9PGI6KP0LSyxCJUuL8i8dBMkIN9GA0chSqt27PWUawrzcuTR4D+8BQqItEEKXoRVZXEk0lq6kh2FAPhSZOgUOj5pxlBHh9wZ2p41E6KDxTXZEMkk3jCDDSh83s+bCpVkuAfM5aFiA/q2RtoaGh8PX1RY0aNdj97+/vj3Xr1iEsLAxdunRBnTp10oVxpdwXnz9/xubNm5lynZycYGdnx/7u4eEBXV1dFClSJIvwr8l4+PAhKleujIIFC7J6lJjv3bt3MDU1hYmJSaZ1SfaLFy9YvzQ0NBRYBYpVESz56fUmQL5ia0llaqk65O85fRqbDh7GZ29vJCUno1SRIhjWrSv7Ly8vAfLzRvtc13Nu945PyI8JD4OH80RMOHENmrLuNBLgl/GiSYYavpkZYtukYWg5dCznYUcG/YT72BGYeu0pJxkfChrj6KwJaNhrIGcZYd/98WJwf8x4+omTjJeFTHF10UzUaM/93g7y/gafwQMx6qUnJxmPrczwZKULKjRvzXkcAZ6fEDPIEb3feXOScbtoAXzeuBY2tesLkM9ZywLkKwr5nTt3Ru3a6fNBEGh/+fIFW7ZsESBfZh2SXr5+/YqEhAS2gZH8ptAGgYA4q40PNfP+/XuULl2abXKYgUIkwtWrV9kGysLCItMVT+WuXbuGpk2bwtDQULppOnfuHBo3bpzlZiguLg7Xr19Hhw4dQG+6qM+04bp9+zaqV6/OPsvsEiBfgYeQKlXhCkX5MRnWkUuXMGrhImycPQuNqldnar9w5y7GL1mKTXNmo9f//s6zqRAgP29Uz3U953bveIX8sFC8cp6EicevpobKDAaVrAZ4mRph2+RhaDFsnLgEWbSZVRuZPvwjgwLxcswIOF9NA/mZyHhf0ARHCPJ7D+IsgyDf3bEfZj5JA/mZyHC3NMMVl1m/IJ/DOIK8veDrOBCj3dNAfiYyHlmZ4/GqJb8gn4OMgC+fEDtoEPq8TQP5mci4VbQgPm1aJ0C+gjehKrvrZAXqBGW7d+/GmDFjpHBIoPno0SNUqVIly8RwZLHPyJJPFv6yZcvC2to6Q0s+ff/8+XP2XVpLvo6ODooWLZollD548ID1TdaS//btW2bJp/+yusiSX7du3SwBVMElkmm1Hz9+4NWrVxluemQrZQb54eHhnCDfxsYmQ8iX6CmzDmYF+VmBOkH+jRs30LFjRzaXaSE/q3UXGRmJgIAAODg45OpccJlbNhYR/VoJl8Ia4ApF+RHyRy1ajJCwMPy3Ynmq8Z+5cROFCpijVsWKcFq6HOFRkdDT0cW1hw8Rn5iAUT17YtKA/qwOwfjUlatw9NJlBj7VHcpj9dSpsCtRnNP3H72+YYyLCx64v4SluTkmDuiPkT26s3bJXWfvUhes3bsPrz5+QrnSpfHvwgUob1Na4flSVkXBXUdZmhW3m58gn2Di84sn8PnwFuXrNoSxeUFo6+lDXUNDCgI5hXyS4XH7KsKDfsK+Vj0YmppDR08Pauq/ZOQU8knGo3MnQduVstXrQN/YBNq6uqlk5BTyScaNQ7thZF4QtlVqQM/QKJ0MAfKVe++kbV3VIJ/W0LFjx1CyZMkMFSUBMPr9IaAfPXp0KsgnC625ubnUopxRIzExMSAor1WrVip3neDgYE6TQy4zspD/5s0b+Pn5pQJ12X5KGvX09ETz5s1RoEAB9hGNleqSXD09vVSy07JGUFAQWrZsmatgSTB78+bNTOdC0mHqG21wCHxlLfmvX7+Wwntm7ER169WrB2NjY+k83rp1i/GFgYEB+0xWl7LtkPtU27ZtU1nyz5w5Aysrqww3eZJ2EhITmItWxw4dpfokeWfPnkWhQoWyXAO0QaC+VqhQIVfngsvCFCCfi5ayKaPKkL/+wAEs2bYdh1xd0bB6tQxHOtl1JbYfPYY9S1zQvmkTeHz8iHp9+uH42tVoUbcuZqxdh0cer7B3yWKYmZhg6bbtOHzxEl6eOAZNDY0sv9dQV0etnr2Z7FnDh+GdpyfajR6LA8uXoXHNGgzy6bsNM2egcMGC6Dl5KowM9HF4pSsPM8dvEwLk86vPtK3lLeQbYsvEofhr8Gjmqp+clIR3D+/CbesaaOvowqywNQqVsIFD/cYoUa4i84WXG/ILGOPQTCc06NFfKoOi8Fw/uAvaunowL2wNq1J2qNCgqTjajLo65Id8U1xaOAPV23VhHu40jtvH/sPDc8eho6ePAlZFYWVThsmwKmXLZMgL+Q8Lm+PRykVwaNpKKuPizk3wuHMNuvoGTEYxewc41G8Ci6IlmAwB8pV776g65BNsEYQnSg+xZ64v+j02MjKSFqC60dHRzEiQ3aWtrc386OkiizX55IeEhKRqiyCc2iSol7Xu0sZp/PjxsLW1ZZ/TGwTqM5eLwFXWrz4+Ph6xsbHZVqU6+vr6Wb6hyLYROQuQHqOiojjVok2KlpaWtCzXuqQ/0oms5Z1Amv7L7sqoLs0/l7WjqanJNlayGwgaK815dhetGxprVhb/7NpQxvcC5POgVVWG/MSkJExbvQZbjxxlVvSG1aujSc0a6NCsKUxTHpQE+Zfv3Yf78aNSbf09YhTsS5XEqqlTYNGwMY6vWY1GNcTuPuTXb9mwMU6sXcMAPavv9XR10WTgIPjfvA7jlB36pXv3YGVhAbsSJRjk0+aie6uWrO2dJ05izd59qfrCwxTy0oQA+byoMdNG8hLyv5oYYF6HxihaqSr7gU+Ii0PIdz94vXFn/VVTU4eBqRmKlS2Pmq07MIiVF/LfmRthRafmsCrnkCIjFoHkKvPxrVSGkXkBFLOvgNr/68hgXF7If1HQGJu6toKFbRnQwVkax/cvH/H962epDOOCFiheriLqtO0Cy+Il5Yb8B4VMsbf7/2BWohREycmIj4uD74c3+On7TSrDtFBhthmq274bzK2KCJCv3FsnXeuqZsnPZfWIf8eSkhgYShwd2H2fkIAJEyZg//79uHPnDsqXLy8FUeIAgsTcPASbF3oRZKqWBgTI52G+VBnyJcMPi4zErSdPcff5c7jdvIXA4GAcWrkCTWrWBEH+F28fZrmXXEPmzkNEZBTWTndGyZYZH7DbNm8uWtSrm+X3ZDmh9r2vXEo3ExJ3ndt7dqNmBQf2/X/nzmHuP5vw4ewZHmaO3yYEyOdXn2lby0vI9zI1xJYJQ9A8xZIvShbhw9MHOLPRFXqGxqjdtjOzTJMrioam2JojL+S/J0v+jPGon2LJJxnuNy7hyt6t0Dc2Rf1OPWBfqwGzhqtrajIZ8kK+uyVZ8qejWpvO7G0BybjvdhT3ThyEgak5GnfrB9uqNaGjT65HYhnyWvLJJ//hioUo36SlVMa1A//i+dXzMCpgiWa9BqKEQyXo6BlI3ZsES75y7520rQuQL7++JdFs/vrrL5APNvn9L126NJWlWv5WhRqCBpSrAQHyedDv7wD5smqgh5nj7Dl4/ekzHh08wCD8o5cXTq1fJy02aNYcFtVixaRJKN6iJR7+dwCVy5ZJp82AoOAsvz904SImLl8B32tXMoV82RCaAuTzsGCzaYLrelZ+T1JLyFvIN8JWOng7dCwLIEn3SEjAdwR880Sxsg7QNTCURmRASlIruSE/5eBtg14DpTICfb4hLPAHipYtz1x2JFEfJDLkh3wzXFn8K7oOjeO75ydQhtyiZcqzmPRpZSgC+Y9XuaBCM/Hmn2R4v3+D5KREWNuWZZugtDIEyM/du0mAfPn1Ta4i5Ovv5eWFYsWKsUO6ZNHP7iCo/JKEGoIG+NOAAPk86JIrFOW3g7fkVjNmsQv6tPkfGlRL7Y+/6dBhuGzbzizsBPkUcefVyeNSbTUfPBS1K1WEy/hxsGjYCGunTUPvNv+Tfu/l54cS1tbs31l9//jVKzQaMIjJKWhmxsofv3KVuQrVq1olXZx8AfJ5WLAC5CMmp9F1UuCVouyQT3lGl6KQL42uw0GGQpAvG12HgwzFIF8mug4HGQLkK/++lpUgQL78+iYfefLPpzCQFOKxRYsWzDUnuwg48ksSagga4E8DAuTzoEtVhXwa+uA5c3Hj8WMsnzgRNRzKs9fzj1+9xqQVrmjXpDHWz5jOIH/3qVNwGT8e/du3w+2nz9Bh7Dhc3/kvi75DB2/P3LiBY6tXo1TRIthx/ATmbSSXGjd2SDar7w319VCzRy9UKlsGS5zGs1j9ncY5MT98ycFbwZLPwyKVowmu61mOJnkpyqslPzwMLzOIk59BmHxI4uRvVyBO/osM4uSn5GxNpROSQXHyjykYJ396mjj5mcnwKGSKK4tmoqaccfK9M4iTn5kMipP/VIE4+dEZxclPyeMrk7aAzcftYgXw+Z+1sBXi5Ct0bwmQL7/aWMhckQgUHebDhw/o1KlTvoukIv+ocreGRIcMPNXUxGGIyVCS8u/c7U320mT7JylNn2UVijP7VnO3hAD5POibKxTlN0s+DT0hMRGuu3bj8IWL+ObvD3V1NZS0LsKs8mN694KWpiaDfN8fP2BtaYn9bmdBh2XH9+0Dp359mfZi4uJYCM1jl68gNi4OFcuUgevkiahZoQKn799//creKFCEHgszMxZCc1TPHtIQmgLk87BI0zSx9NJzlDA3Qq8atuka57qe+e9V1i3yCfmxERG4t3whKj16AbLDq9GPjeRHJykR6hpa0oS3BLPh+rrw7tEFjXoO4DzsqJBg3Jo/AzXffmEy6CI5FKdBlJTEfN4lSXVJBmWKDRnQD7XkAPDwwABcnz4B9b0DU8tI+fFUV9dgMphcNTWWKTZ+xHBU/ot7/osQf188mjEZ1f2C0slgOWjV1OnksVTSsemXAAAgAElEQVSGn4kBtJwmwL5eI866+unthdczp8LhR0hqGRTVggEAyRA3R7r6ZmoEsxkzULJydSEZFmct/yooQL4CSksJb0kx4ilMoySeumItqX4t2QPJktHIRpbJKMpMYGAgVqxYgblz57IoNk+fPsWTJ08wfPjwfAnOjx8/ZiFZqX80niVLlqBBgwYssVZ+i6KT2YoSIJ+He40rFOVHyOcyfAnkp42lz6Xun1RGlQ7efguORGWXIyhubohpLaumgn2u6zm355ZPyKfQkYlxcRAlJf4iRzUgMT4Ojy+cRp12XcUPcYm5Wl0dGlrazGed6yWWEcuAXkqnKTJe3bmOKhRikg8ZsbEQJcuEBxSJEBcdja9v3FG2Rl2xDOmvsDo0dLShqSXnODKQEfzdD5GhIShu75BOhqaODjRkQudlpzOmq7QyAPi8fwMdAwNYFCmeqQw6G/T68hn0+18LGBqKY2gLV9YaECBfsRVCoRRfvnzJ/PLbt2+vMqCn2GgzrkVwT89iclmi7LPfvn1jIEznFOrUqcPi+1OUIcppID17k+KyR4mwKIb9uHHjWGZZyitAmYT79OnD6kis+nz2NydtUbz+Vq1agWL00waFMt+eOHGCxfBXlShKAuTnZAWk1OUKRQLk86DsfNyEPJC/5OKzPB/J2Vff8MpPnOhFFva5rufcHgCfkM8y1tIAZPIA0g/4hyf3cefEQQxcsAqaspDKOFn8ipnzlYmM13dv4NmVc+g7eyk0NDVlAFzcNh8yKJLN+8f30GPqfCXJSMK9U0fg9/kDujjNUJqMawd2IiI4CO1HTUoj49d8CJDPeUVKC6oc5EvuJckI6L5Ney9m9Jn8qhE/EzJpm0LCuru7s0O3/2vTRu57NVW7kmePPM+UDMYjfjTJ8VxSRCcpbzHoGXzo0CF24JjyCGzYsAHOzs6YPHkyS5BFm5/69etj69atqFixIiZNmsTAmICY1hxB/Pz580EZfTdu3MiSgB0+fJjlFqCLwpISQBPw54eLxtujRw+W/4Ay+K5evRqUWG3q1KnsbQQl2CL4pyhLCxYskCbgyg99l/RBgHweZoMrFAmQz4Oy83ET8kC+6aQd+XIkBPsvZ3aXxobOT53kFfIzGFhCXCyOrVmCb2890H3KXJR0qMz78ONjY7B3gTOC/XzQa8ZiFC1TjncZcTHR2OY8GjHhYeg7ZzmsSqd3ycqp0LjoKGydOgqxkREYsHAVLItlnIk0J3JioyKxdcpIkKzBS9azePoZXQLky69lVYB8AurE6GhEen9DcmQEEiMjxQOVhXCZTbpUC+ztmOR0TQoA57RcilxqNTAgEFFRkShZqlTK2z4ZWb/IKuPNgqT/EjCXBX0F+6ymoQEtM3PoWlpCz7IQ78BPYE6ZYPfu3QvKHEvZX4cMGYK6deuy5FR0AJmglzL2Fi9enAE7Wez37NmDq1evghJ7jRw5EhR61MTEhL0BIMt9ly5dcPv2bQb5dKCZ2j537hzbROR2gq/M7iAy/GzZsgVXrlxhGZMbNWqEbt26oUSJErhx4wbKli3L4P/vv//G/fv3YWZmJt/GT/5bV+4aAuTLrbL0FX53yOdBRX9EE/JAfn6z5NMEkX8+ue6UKGD0R0J+sL8v9i+azlxQKKNsv7nLeX9g+3/+gAMuMxEfFwu7arXRZcJM3mV4ejzH4eXzkCxKZi5BfztSll5+LX1vH97B6Q0rmIz6HXqgUdc+vMt4cf0iLuzYyJJ2tew/HNVbtMlQhgD58j9e8zvkE1gmRkXi077dKFiyNHSNjcRnWNLCcUb/lucz+VUn3mew93pKutJuUjISI1OGJemKjkKory+MK1WBeaXKvD5TqP1Lly6he/fuzDq/efNm2NnZMes8QTD51ROs0yaA4JegnfLfUCIx+nzs2LHs725ubrC3t2d/7927N+7evYvZs2dj6NCh7M3IvXv3sHv3bhw5coRlu5XrjWaKjiTnBGRVJstn8rZJ7dFbB3o7YWNjwzYtdJagaNGiDPJpPAT55NLz4MEDthGQV4aSVpHMnpedl8pom6ts0b9P+wLk/z5zmZORyAP5eX3LSXzyZeGerPji30dx1IP8dinTkk/jfXPvJk5vWglRUjK0dHXhuHgtCpD1mCdAJhmPL5zC1X3bmX6NzApgwIJVMDYvwKuMGwd344HbUSbDomgJ9Ju7Arr6+rzKOLtlDTxuX2UyKL5+39lLID7kyw/6JCUl4qjrQnx5+ZTJsKtWC10mzBIfzksjQ4B8+e/U/A75CVFR+HJwP6zt7aGlL5yzyHaGUw7a+79+hSLtOkDHzDzbKvIUoGevj48P1q9fDw8PD2adHzZsGIyMjJgF3sXFhUE6fbZv3z58+fIFBw4cgLGxMTu7QJGIKNQorTvaGNBGYMqUKXjx4oXU+k1uL4MGDWLfKWLJp+cE9Y1cfcjCTpZ1eqtAbw8ePnyIWrVqsXblvWjs9NaicuXKbPy0uSlSpEg6yCcZgiVfXu2qSHmuUKSq7jq5OQ2nr9/AiAUL4Xf9am6K5UWWKkH+9FMPERYTzyz3EriXKIHreuZFaXI0ojTIp8gzycn4b+lseL97zV6z0wHblgNHomLDZvxYZlJk7FvoDL/PH5kMLR0ddHaagZIVqvAqY9fsiQjw/iqWoauLAfNcUaBIMV5lbJ82BnTwlmRo6+lh1JodLEsuL1YskQjRkRHYNXsCwoMCmdmU2h69dge0dPXSyRAgX46bKKVofof8sA/vEf/5E4wKF1KmzVx+xeXzGnEREfju+QVl+g3MNH+HIkOQGH3oOUnZfgnid+3ahYYNGzIL/sePH5nrDvnbU0SaChUqMNcWct8hq77sAVyy5G/bto3lG5gzZ470O4L8gQMHKgz59PtAvvO08aB+tGzZkrkRVapUiW006MAs9UeeZxSNm8ZVvnx5uLq6snYkkH/s2DG2cXj+/Dl7M0FvNEh2frsEdx0eZoQrFOUG5B++eAn9p89INSoDPT2UtymNWcOHo1X9ejyMWHlNBIWG4sNXL9Stwr8/tPJ6LW5ZlSCfAN9EL+MIK1zXs7L1mbZ9pUF+SuSHpMQExMfG4s6xA2jaexA0NbV4/6GUyHh22Q2123ZRmoyYiAh8fPoAFRv/pTQZgd5eCAv4AZtqNZUm4+srd2hpa8Pazj5TGQLky38n5nfID3xwH1ox0dA1NZV/cH9wDTrH8OHyJVSa4gw1iXsTz/og8CXYl/xJfycffF1dXWYpp3/T+pKAfVqoJgB/9+4dO6hKlnbJ9wT5jo6OOHjwoMKWfEm/JG1SHwnK6beDPpM3Ig61N2bMGISFhTE3JUNDQ9YOvYVISEhgoTTpHAHB/bJly6AlR0Qxnqcl0+YEyOdB01yhKLcgf8T8BfA48Ss7bXhUJPadccO6/QdAMecrlSnDw6hVpwlJQgtlJ7BQJcjPava4rufcXgHKhHzJWOhg7O1jB9Cst6NcFh95dEEynl5y+xWmU57KHMtGR4Tj45MHqNSkhdLGQZAfGuAP22q1lSaDzhdo6eiyMxKZWeAEyOe4KGSK5XfI/3HrBnRFIugYG8s/uD+4BkH+p2tXUX78RGjIEe43N1UWERHB7mXybZeFbsooTOFJq1Wrlm+i6xA70AZGR0dH+kaCdEVvM86fP8/cgMiVhw4f0zkCeTcRuaF3AfJ50DJXKMotyB+5YCGC7t5ON7KqXbujW6uWmDF0CPvu5uMncF69Gu89v7JEV4M7d2IJriQwvGLnLmw8eAjhkZGoX7UqNsycjuJWVqzulsNH2He+AQGwKVYUc0aOQJtG4uQ333/+xJC58/DA/SVKFy0Kl/Hj0Hb0GHw6fxZmxsYoUL8hjq5eheU7dsI/MBAFTE3w74IFcLC1gay7ztM3b1C/b/9043A/fhRlS5ZkybvGLVnKMvAaGxqidYP6WDZxAowNDOB28xacV62GY+dOWLhpM27u3oXKZZW7uREgn4ebKYsmBMjnrl8B8rnr6k8sKUD+7znrqgD5WWk+v8XJz66vEjcmBtI8nUfie2UKkM+DRlUF8mv26IX2TZtg9ojhCAwORvkOHbFh5kx0afEXPlJyjzHjMGfEcPRr3w4nr13DWJelOL5mFWyKFcekFa749O0bbu/ZheNXrmL0osU4vmY1qjuUZ0Ddb9p03N67G9XKlUO70WNZJt19S10QGhGBQbPm4PGrV/C6fBEmRkYwrVMPzevUBiXXIiDvPdWZlT+yamUqyKepSZQkEgIwdO481s8bu3ZCU0MDDfsPQO1KlTB/9CiWaZfkWBYwx/b583Dx7j0MmEFja4HpQwfD0twc2kp+lSZAPg83kwD5vChRgHxe1PjbNiJA/u85taoO+b/nrOTtqATI50H/+R3yCaB3nTzFrN539u5G9fLlsWr3Hpy/cweXt22VasB1125cvHuXfdZh7DiUt7HBEqfx7PsfQUG49vARurduhU7jnGBXojhWTpksrdt4oCPqVamMuaNGokC9Bji5fi1a1K3Lvt/vdhaD58xNBfkE+J2aN2Pf7z51mvWHLPSZHbylTcegmbPx8L8DKFOyBJ68foNmjoPx884tKbzTRqLpoMEIfXAP1x4+ZJuWD2fPSN8+8DDVWTYhQL5yNSxY8rnrV4B87rr6E0sKkP97zroA+b/nvOZkVALk50R7KXXzG+TTwVs6bCu5yMptZWHBLN592rZhH49e5IJ/j//y25eUJXccAuOKnTpjbO/eGNatazoN0Xeje/XEiO7dpd+RFT0mNhZLJzjBvl17vDp5HLbFi7Pv3375AnIVkrXk39m7BzUcyrPv/zt3DnP/2cTkZgT5tMGo1q0HZg0fhpE9xDIPXbjILPUZXe/dTuP916/o4jQREY8e8DDD3JoQIJ+bnhQtJUA+d80JkM9dV39iSQHyf89ZFyD/95zXnIxKgPycaC+fQj4dvH186D/pyPpNm4GaFStg7TRn6WdjXZYw33lykcnoygjkJeXouzG9emF4924ykD+bucwsHj8O5dt3xOtTJ2BTrBj7noC7cueuqSCfDgDTGwUukN9pvBPiExLg9s8Gqd/bkUuXMGaRC+iAVkbXpXv30GuKc4ZnE3iY8gybECBfWZoVt6sUyE+TD4AdvD1+AM16pTl4mxN/ywxkPL18FnXadYGabEodHmVQCMqPT+6LD94qSUagjxdCf3yHbfVaSpPh+eoFCzXKDt5mMg7h4K38992fCPksAAOpKk1mWcrswLc/dV75lguQL/+98LvXECCfhxnOb5b8tAdvX374wA6wnly3lvnC07Vm7z5sPXIUb06flGogICgYxkaG0NXWZq4udGh2zbSp7Hvy4f/3xElM7N8P3SdNZgAv667ToF9/NKlZE9OGDIZFw8Y4v3kj+7cY4s9j0KzZCkE+vW2YtW4Dnh4+yA4HS65nb9+iXp9+7DBv0UIUSxmIiolhbxMKmplBgHzFFzbX9ay4BMVq8gn5BIYxYWFIio9j8Jjy84/EuDg8OncS9Tr2SJ3SUk0desYmLO4814tkRIeEIDkxkeGprAxKJFW1+d+pEzupq0Pf2BSaOjpcRYBkRAYFAcnJKXk4xVXjoqLg6fEM9rUbKE1GiL8vwoICUdKhsowMNUBdDfomZizXANfr1ziSUlXxef+GhdAsVNImnQwDUzNoaGkzHby+fAb9/tcChoZC4iQuOv8TIZ+MUNuOHMWHr55SFZEemteti/ZNm3JRW7ZlaBPxMzgY8//ZiKWTJsJQgeRL2QrJooAA+TnR3u9ZV4B8HuaVKxTlZXSduf9sZL7xT48cgomhIYN2+3YdGJSP7dMbP37+RI/JU9ChWTNMHzKYHa6lzcJB1+WoaGeHGWvX4+2Xz7i9ZzfO3LiJYfPm49T6tahib48jFy9h+PwFzF+eIuQ0GjCIRczZ7bIYFPd+8Oy5uPfihdyQ/8XHB7V69sa66dPYWQDJpaGuziwvtLEoYlkIm+bMhoaGOjsc7PsjgG0wBMhXfGFzXc+KS1CsJp+QHxsZgavzp6Pig+ep0tPTjyRdaurMvsf+Tj/cEfq68O/VDX8NHMG585Ehwbg4bTxqvv0CNZkEwpnJCDHUQ8TgQWjQtQ9nGRSn/pzTMNT3+ZlOBut9mnH8NNKHaOwYVP+7A2cZwb7euDlxFKr7BUOdbVVS9JKJrr4b68PAeRocGjXnLCPA6wueThoLh4BQTrryMTVAofmLYFPt/+ydBVhV2RbH/1y6BFTEwm7HZ3d3O/Yoto7dnWM7dufYXTM6dit2DQYWNiYqoNIN931r40VAkHsv5xas833z8HHPWWvv/9rn8jv7rL12BYZ8pVX+fmJ6hHyqEtd1/ERky5wZubLHVomTGRmhwv9KoCZNSClm+r/9XvE9GEMz/3L5992Wv9WJp52X6XrFWwBFjXYqUNG4X3/8t3cPMmbIIOrJ0+1P5yoOTVVlYchX42ZI45cw5EsQYGWhSJeQHx4RIYCZ0nao+gwdrjdvYvySpfB46Ql7W1uRrz990EBRuYaOORs2Ys2evQlKaObOnl18RgtlV+zaDf/AQBTJmxczhwxC7QoVxGf0Jddr8lQ8ePZMLN79o18f8Wbg7ZlTsLWxEdV1lEnXWb5zJ0YvWPRDhPYvXYwm1avj1XsvDJs7V5TQpDaTf3ogoAo7DPnqD2xlx7P6HtS7UkrIDw3wx/2xIzD833Mwif/6XgHj3/8eIwZGeONgg3Uj+6BB78FKNz7osy/cB/fDmHO3lPLxNHMG/DNpOKp37K60D/+PH3C3V1dMuPVcKR/3nOxxduZElGvx41qb5Jx+fvsG73p1x4B7nkr5+C+bA9wW/olf6jZSuh/ens8R2qMnXB6/VcrHpZyZ8GLVUuSvWJUhX2mV0zfk098qWsc1pHMnURJaQD5NGAE4cuEC7j99hg++vvDy9sbs4VRcIreYle8/YyYCgoIxvFsXNKhSBc9fv8HEpcvEm+OZQwejdNGiwsbOI0ex6/hxVCtTBn/9/Tdu/b1XgP3Ulavw6v17jOjWDdXKlsHWg4cQGByM6+7uWDZhPLJkyqRGBJO+hCFfMinTjCGGfAlCqSwUaQPyJehOqk3QjEZEVJRI+6Hj2l13NOzbD37XrsTV4E+1Ez00wDn5mg2KpJDv74cHY0dixP6zCaEyiS7EGAGv7W2xblQf1O8zJPYMmp1TzNDJZEl2POizD+4N6oexZxNBfjI+nmS2w98E+S49lPZBkO/eswsmuiWC/GR8uGdxwJk/J32HfCX68fnta7zv2R0D3RNBfjI+bmbLiP8Wzf4O+Ur48H75HGE9eqCTRyLIT8bHxZyZ8Xz1MoZ8NW+59DiTT5DfdULsTH6eHDniIL9bixbYefQo1u/bj+Fdu4i1apdu3cJfU6egw8jRYm8ZSgklcF8zZTKGzp6Ddg0bwtbaGtNXr8b8kSMF8K/fvx9927bF0YsXcfzyFbhu2ghaH0dpq0Xz5cWyHTswfdAgHLt0SRSY6NuuLXq3aytpSg9Dvpo3RBq+jCFfguAy5CcUsfPY8fgaGIAts2YKEKKNsczNzLB34QIJ1NZfEwz5mo2NPkE+Pcg+v/MfXt67hUJlKyFT9pywsc8ImbFx3Ov71EI++bh/8Qx83r1BgdLlkTFrDtjY28NI9t1HaiGffNw4uh9hwUHIX7IcHLJmg3UGuwQ+Ugv55MN11yaxniFfiTKwz5IVVra2CXww5Gv23klsPb1CfpfxE1C2SBEUzptPSEI7lNapVEHMrt+8fx+b/5yFp69eo9v4CZg3aiQmLVmKi9u3glJEaYb/0fMX2HzoEDbNnCGunbJiFUxNjMVDQc/WrfFb40biDXaLQYOxaPRokUJKm09Sbv72I0dQNF8+mJqaIio6CvNHjUqQ7iPFCGDIl0LFtGWDIV+CeDLkJxSRFvAOmDkTF91uie2pa1coj0VjRsNJwteSEoRNchMM+ZJLmsCgbiHfBn+N6I16vQaKVP2Y6Gg8vnkFR/5aAhNTM1jb2SNz9lwoXa8xCpQqByOZDCpDfqYM2DNxGKr91jXOx+0zx3Bu1ybhw8beAY45c6Nsg2ZisSv5UB3y7XFqxgSUbd5GrDagflzevwvXj+6HqZk5rO0ckC1vAeEjZ6GiwoeqkH8ja0bcXDgTxWvTOppYHyc3rcH9y2eFDxuHjMhZsBjK1G8ifJEPhnzN3jsM+RCppd0nTsLQLp1RvUwZIQn97Y6OicGKHTvFmrOVf/yBp69eocvYcZg9fDiWbtuOgyuXCxg/ffUa/AIDcPTiJayfPh0ymRFGL1gAhwx2OH7xkrBLs/4Pnz8XO7xTOekJS5ZhVPfusLK0xBd/P7FTO61PszA3w5QBA6Sv6kOTD+fOotjQETBWYeG7dkcfe9OmAgz5EqjNkC+BiGnABEO+ZoOoS8h/ZWeN6W3qIm/Z2OpU0dFR8H33Bk/drn2DBRnsHLMgf6nyKFW7IbLmza8y5D/OaIvF7RsiZ4nSYnkrwfGHF89ElZxYIJHB3ikrCpapKHw4OudWGfLvZs6AdR2bIWuR4uItG1WmeeNxH1TFRuGD3koUKFMBpes0QsZsOVSG/OtO9tjVuSUy5ysY5+PlXTd8fPUizge1vWDZSihdpyHsHJ0Y8jV76/xgPT3O5NPC227jJ8KlWVOU/6W40IR0sDA3F/n0tJ/LykkTxUx+13HjsXn2n+g8dhwWjxsj8ub7TpmKddOnofuESdg4YzqiYqLRfvhILBo7Bu5PnuDF27diM8hthw7jr71/48iqFWg/YhTmjBguZvBHzV+A7q1+hduDRwLyJ/fvx5Cv5XGfHt0x5EsQdYZ8CURMAyYY8jUbRF1C/mv7bzP5vw8Sf5gJwJ/euo4jaxbD1iGTqHlfrHINUdNdkU6jzkz+3onDULVDtzgf7udP4eyODciQ2RHVWnVAoXKVYWJqGudDnZn80zMnomzz1qJ6EPXj2pF/cO3QP7B3dELN9l2Qt0TpBD5UncmnnPybC2aheJ0GcT7O7doId9dTcMiaHXU69kDOwsVhYmoS1w+eydfsvcMz+RDllWkTRc9370SqjaiYYywT1eMy2tnB9+tXUUrT96sf9pw4jr7t24viFKevXBWz/Y2qVUON8uXgeuMmDrueF/do1TKl0aZ+PVAFnuXbd+DhixcoWbgwaJf5QS4dcdfjMf45fRpBISGo9L//oXOL5rhw8z+YmBijbqVKDPnaHfbp0htDvgRhZ8iXQMQ0YIIhX7NB1C3k22ItLbztPTi25r1cjsCvn+Hn/RGOznlgZmEZW6mDyuR9K5WnMuR/W3hbrWP3OB9+3p8Q5P8FjjnziAeIxD5Uh3wHnJn1vboO9eOz1zuEh4Ygc85cIi0osQ91IP+/RX/ilzqx1XXIh/cbT1BufqbszjA2MfnBB0O+Zu8dhvzYcagYjwo9ftgES9y7VDJTvDqLXVz/7TpFCUz6SJTBjVdCU7wV+3aeKMz17TP6nbheUYIzXiCk3oBL9I3TdbR7IxmAN4Z8CYLEkC+BiGnABEO+ZoOoa8hPUF3nG7zSH2/KKU/qUBfy46rrKOFDLciPX11HCR/qQX686jpK+GDI1+y9w5D/XQHB79/2v4hXKTfZACh2yY0P5fHr3CsuVPyOAD++3aTO1VS0GfI1pazh2mXIlyB2DPkSiJgGTDDkazaIkkJ+gD/uJVEnP4ky+XF18terUSf/bhJ18hWQEV8tqsVPdfL3qVknf3yiOvnJ+bjvZI8zMyeivIp18t8mUSc/OR9UJ/+WGnXyQ5Kqk58EjJFWl5wz4cXKpSjAdfLVuunSY06+WkIZ2EUM+QYWMC00lyFfApGVhfyzXVxQccBAUUqOj7SlQGRYGG6sWom623am2DFlx0uKhjR0gr62T0rIpx1vry6ag3x3HohdXGNfz8e+0o+JioKxqWmcuvRRkKUF/Fr/iqoq7EYb7PcVF2dNRtHnryGL9/QQ6yNapK2Ig/wC8Le2RIRLR5Rr0lLpyAb4+sB18hiU/PAZce8TRHaAXLy6l5nEbmwnumgEfLGxgknPnihRm/LllTu+fvDC9WkTUPQT7Xj77ZADMfIYYZeAUTF1Sf3wsbVGhoEDUahiNeUcAGIRs/u0Scjv6xer1bepUFozIP5IGX1/W0L9+JjBBllHjkbu/5XmzbCUVvn7iQz5aohmAJcw5BtAkLTcRIZ8CQRXFopujBuFPJWrwj5XLgm8sgl9UsDvzRu8unYFFeekvBeAsuNFV/3T1/ZJCfkEj/KoqNjcWoHYsUdEWBiuH9mH6m1cEi6Kk8kgMzaBTAHmSgQnWR+hobh34TTKNWwRB7PCnIQ+QgMDRQ3/4lVqacyH77u3CPjiI2rfJ8hPkLAfrx/dF+sdqNRmcj6oQtDD04fRpUl92NhYKxEZPoUhP22OAYb8tBnX1PSKIT816n27VlkoerZ7JyK93iN/7ToSeGUT+qTAC9dzMM2eAwU7uKTYLGXHS4qGNHSCvrZPSsgXO9aSft8Wy9E/aWHoo2sXcfXgXvSctVRUmIk7YlfTqVYNIxkf7q4ncdf1JLpOXfB9Nl+Yj52+VmlBXjI+bh4/gGdu1+Ey8U8N+YjGxX924MOLp2g/eqrGfJzeshYBn33QetiERD6+x4MhX/UvAoZ81TUzhCsY8g0hStptI0O+BHorC0XBXu9xfcwoVBw4iFN2JNBdX0yIVJ2VK1Bp3gJYZ4/dLv1nh7LjJSU7mvpcX9snKeQnIV5EWCj2LZqFt08fodWQcShYpoLkEoeHBGPbtDH4+ukD2o2agjy/lJTcB+1eu27sQFBKUscJs8SmVlIfIYEBWD92IKg/XabMQ1aaaZf4CA7wEz4iQkPRfeYSOOZM+g0oQ77qwjPkq66ZIVwhIP/sWRQfNgIy3gzLEEKm8TYy5EsgsSpQ9HjTOkR++oRCDRtL4JlN6IMCT08eh6mTE4r06K1Uc1QZL0oZlPgkfW1fqiA/IgIHj52AQ7laMLeySqRYbK2NLx+9sK2bQZ4AACAASURBVHPWBAT7+4uymD1mLlZMrn87X1GTQ3G5oobG93Sf7zklSf/O68Uz7J77ByLDw5GneCm0Hz0lno/E9smP6r97ef8O/lk4E3J5DH6pWhtN+wyJ11/V7X2/+Pu1j65dErv9ko+KTVqj1m9d4mlE/0zc//j/X7l+3T5zHGe2rxPrC+q69EK5hs2SjAO9gfE4exRdmlK6jo3Ed0PaNKfvkO/7300YBwbA0sEhbQZAQ70iyH96+hRKjB4HGe0FwEe6V4AhX4IhoCoUUW6+fU5n5Kmq/MI0CZrJJjSgwKsrl+H37q1SufhxaPit/rIGmiOJSVXHsyROlTCSGsiPiIjAmXOueP7RN2HahwJH5XK88XyJhzevijx9qhdfpWFT2GTIkCAVXIlmJnmKIjXoxeNHeHr3toBjCytrVG7QBJZWVhL6ADzu3oLn44ciFcnG3gGV6zWCqZmZpD7cb1zBe88XwgdtolW5bkMYyRKWDkyNVhQDt0vn4eP1TvhwzOGMctVrJ+mDHgJk4SHo5tIB1j88wKnbirR9nb5DfsCL5wh5+AD2zs5pOxAS9y7YxwdBYWHI1axFsqV9JXbJ5vRcAYZ8CQKkKhSFffkC93l/wsreHnlr1ubUHQlioG0TlKLjecEVIX5+KDlmAiwyZlS6CaqOF6UNS3SivrYvNZBPs730H+1c+cMhB6JjorFo4SI8f/4MQUFBsLO3RyeXTqhStYpqOfLJxeCbj7lz5+KVp6fYfdMuQwb0HzAAxYoVk8iHXPRv1syZePfuHcLDw2FrmwETJk5AtmzZJPUxdcoUfPjwARQTmj2fO28erCwt49YVpGooyuUICgrGjJkz4OPtLUzZ2toKHxbm5kn6oI2KaBdTUemHjxQV0HfIjw4Lw8u/9yBT1qywypw5xf6k+xOoYldMDLwePkDe9h1hTBX8vq3xSffapHMBGPIlGADqQhGl7rw/ew7ZypRBpvwFYJ0lCwO/BPHQlAkC+2Bvb3x+8Rwfbt9Gjrp1lE7Rid8mdceLpvqV2K6+ti81kE99TGrHS0Xf6TOyHxYWhp07d6JHjx4wMTFJAMaJdVEskI3bBCfeotnEv1PsnKnwceTIEbRp0yaBj6R0V/V3in4EBgbi+vXrqFevnsZ8vH79Gh8/fkSFChXifCSnSXw9SPOU+qXoh7u7O8zMzFCkSJEEPpKyp9KCZW3dTHrqR98hn97kxISH482xIzAzgngTJTNJtBA+/kYNSW3akNTvxBfB9xKtIjxJbY4R/7zEfhC7aF+Mt/g7a4mBHS/gyWXxJbcT18/6kFSbv/mSx8gRFR6OkMBAZKleAza5cvMsvp7ed7poFkO+BKqnBopoMa7XxQv4cvc2gt57ISosVIIWsQlNKGBiYQmbHNmRsVQZZK9RU6lFtkm1IzXjRRP9Si+Qr4x2oaGhAvJ79uwpzcx3Ek7JB0F+27ZtNeYjICBAQH79+vU15oMgn2bzK1asqDEfd+7cgYWFhYB8hnhlRrBy5+g75AvGJtCPikKE31eE+fggJiLiW+cSES9VyFJ61jq5/W4TrxlJ+mEgRi7Hy1eeCPD3R+lSpeKNyfhrXeJTfHJ0ntwTiHLxS3CWzAjmGTPBIrOjmMHn+0QNDdPwJQz5EgRX36FNgi6qbmLcOGDuXGDsWGDOHNWvT8NX6Pt40df2pXYmX5khxZCvjEqx5zDkK6+Vvp1pCJCfQLN4pW51qWVkZCQWLFiAffv24Zyrq0gjiz95r7O2Kf2Qo7MWsmMdKcCQL4Hw+gpFEnRNfRO1agEXLgA1awLnz6tvJw1eqe/jRV/bx5Cv/M3AM/nKa5UezzQ4yNeDIFGKGKXB0RoaHx8frF+/Hp06deJ1IHoQG25C8gow5EswOvQViiTomnom/PyAbNmAsDCAFgB9+ADY26tnKw1epe/jRV/bx5Cv/M3AkK+8VunxTIZ81aNO3z/jxo0Ts/gZM2ZEzpw5sWvXLljSgnM+WAE9VYAhX4LA6CsUSdA19UxMnQpMm/b92ilTAPodH0IBfR8v+to+hnzlbyCGfOW1So9nMuSrHvXPnz/j1q1booqTn58fcuXKBScnJ/GTD1ZAXxVgyJcgMvoKRRJ0TXUTNIufNy9APxUHzeJ7evJs/jc99H286Gv7NAH5iau0JJeTn5rFbEn5OHr0qKiuE9+ulD4oreDatWs/LLyV0kf86jqa6sfdu3dhbm7+w8Lb1PRD9S+1tHcFQ77qMaX7mMpUUsWnT58+oWHDhuL+5bGoupZ8hfYUYMiXQGt9hSIJuqa6CcUsfsmSgLs7oPjJs/lxWur7eNHX9kkJ+WTr69evoE2y4h9UW/7gwYNo3759gt+TJg4ODqLSi7IH+aDZv6ioqB98nDt3Do0aNUoACARe9vb2Kvug/GCCj/hHcHAwqDJN1apVf/BB/SBwVvagfiTlw8vLS/z+f//73w8+KJ2BSl8qeyTnw8PDQ9jJly/fDw9EmTJlUsmHsm1JD+cx5KsXZbrP6MGTID/x/aueRb6KFdCsAgz5Euirr1AkQddUM0Gz9wT5w4YBmzfHpuwQ3HfvDixZEvsZ5+Zzuo5qoyrubCkhn2a6Z86cKRbSxZ+JU8By/E2VFDPxJUuWRN++fZVu/ZcvXzBp0iRxvjI+yE+1atXEYj5lD4KNUaNGxVb5iFdhI7l+kIZNmzZFixYtlHUhNtaiflh9201W4Sc5H/RQ89tvv6Fu3bpK+/D09MSMGTPEA44y/aAqJ7179xY1+vlQXQGGfNU1oysY8tXTja/SnQIM+RJoz5CfhIiKGX2ewf9BHH0fL/raPikhn3LWZ8+eLTbIUuZ1O4Frnjx5MHDgQKW/MXx9fTFlypQfADw5A/RWgR4kunbtqrQP2oxq7NixSu9oS5t9Va9eHa1bt1bax5s3bzBt2jQ4OjoqpVVISIh4kGjQoIHSPl68eIF58+aJtyXKxIN2JXZxcUHlypWV9sEnflfAECE/cdpbcvFMavxIda0C8r29vePSdRTt0KTfn41dZe4XHvvpVwGGfAlir69QJEHX1DfBkJ+sdvo+XvS1fVJCvr+/v4B8OlL6I6nYfTV37twYNGiQuEaxgy39O/6sf/ygqwL5ZI9mpyn1pVu3bkr7oM2oqOJHtmzZlOoHpSPFh3xl+qEK5JM9WtcQH/KV8aEK5JM9SkdiyFf/69nQIJ9iTm+UaMFrSvcr7VRNm6cpDgJzWj9CYyalI0uWLKD/FAf5ffToUdxu2WSL3jpRO8qUKZOgLVRth9LtFAdNJNC987NDMclQqFChBKln9BaQ0uFSOqiyT+JUtpSu4c/TlwIM+RLEm0ScytVjEihZ6/x51Dx/Hhdq1cJ5qpnPR5wCNFaUnVnShWwM+QlVTwry6Y/9zZs3cfnyZZQuXRoFCxYUoE2AoYCQ1EI++Th9+jQoL5185M+fH1mzZhXVPRQ+Ugv55INKAlLaD71FKFCggJixj+8jtZBPPjZu3CjWJpAPghLKp4/vgyFfu3e6oUE+jSHaibps2bI/pDsq7gXFd+rJkycxjFJGvz2M08Ozq6urqIKT+Nz4/58eAujcSpUqxZ1HY3bv3r3i/lN8Lyr8CHgyMop74Ke3S+XLl4/73f3798XDgOINWHLf+Q8fPkStWrWQOXNm0Wbq64MHD0AP5DY2Nkm2WWGL7k3a1Tq5iQbtjir2po8KMORLEBV6lc1HQgUY8n8+IiiNQ18PhvwfIZ/+2BMk9O/fX3xIbxWuXr2KxYsXC1ilhawE4FQxh/Lq6Y+uqpBP6TolSpRA586d43wcP35cALLCR44cOUS+O8EE+VAV8ildhxbjtmrVSsAJ9WP37t0C9BU+8ubNK3wQkJMPVSGf0nUaN24s4EPhY82aNTh79mycj6JFi6Jdu3agn+SDIV+73waGBvlinO7ZjRbNWyQA2sTpdgTIW7ZsSfDGjcCdHshLlSoVB+Dx3wYogJne7tHsebly5RJAPt2DderUiXtoiB8pxXcl+X38+PEPkE/fC3TP0hH/gULxb7ruxo0bom3xIZ8e7OmtgJ2d3U/bTJV+6KGE7l0+WIGkFGDI53GhGQU4XUczumrBKkP+j5BPoEAHAbLiePnypQBXOgia6I95zZo1xX/06l5VyCcAp8WtBBkKKKAZPXqYUPigBw2a9atRo4Z4c6Aq5BOA0yw6PUwofLi5ueH27dtxPmimvXbt2uJhhVIXVIV8mtEkLQoXLiwgn7S5dOmSeCOh6Ad9Rj5IT6rEw5CvhRs7ngtDhPw9e/f8APmJVSNo3rx5c5KQr5iNT05pmnV///79D5B/4sSJOMhP6lpFaU0a34ln8mkhOd0LyaUYKSCfHqjjQz49MMSH/OT8UqUfhnzt3juG5o0h39AiZijtZcg3lEj90E6G/B8hXzGTP2DAAPEHm2YWaQZuxYoVcHZ2FjnoVOmFZu4UaSiqQr5iJr9Lly5xPihdZ9u2baD1AFQRh/KATU1N43yoCvn0IEHw3rJlyzgf+/fvx6FDh4QPehNRvHjxBD5UhXx6kGjSpAnq1asX52PTpk24ePGiSNWhGXzKQabUJoVWDPna/bpgyP9Rb4Z87Y5B9qYdBRjytaNz+vPCkG+wMWfI/xHyCeoJghXVdWj2jhbW0St+moEjuCdwUuTpkgVVIV+x8FZRXYd8UC1/yhUmH1QvPrEPVSGf8nwJ8hXVdcgHtZN+Tz7oASKxD1UhX7HwltJ16CAfVAWIftJbBIL7xD4Y8rX7dcGQz5Cv3RHH3nSlAEO+rpRP634Z8g02wgz5yUO+orqOAl4V6ShJBVtdyFdU11HGhzqQn7iEpqL6TXKL99SF/PglNFPywZCv3a8LhnyGfO2OOPamKwUY8nWlfFr3y5BvsBFOT5CvgPTElS8Sa0Az+amtkx/fR1IaJy6hqcwAUtTJp0W/yeX9xreTuISmMj4UkE8z/cr4SFxCUxkf8SFfmfO5hKYyKiV/DkM+Q37qRhBfbSgKMOQbSqQMrZ0M+YYWsbj2pgfIp8WhlE9Ps+0pgaviQYAWxLZv317puFKqzcKFC0F58Mr4oHNo0W6zZs2U9uHj44M///wzQTnK5C5WVCKhyjeKaiHKOKK3BfPnz49LsfnZNYoHmbZt24oFgcoeb9++xZIlSxKkO/2sH/QZpTXRvgJ8qK4AQz5Dvuqjhq8wRAUY8g0xaobQZoZ8Q4hSkm1MD5BPM/OK/1ICcBKJzqFFopRPruzBPlTTihY3U7URTcVD2bilh/MY8hny08M45z7G/u0ykuvzzjwcJcNUgCHfMOP2DWj18StByh1vFf1TpZ/xF9UqE1xVfSjgVhnIVfhPrz4UD16qaKVMzNLLOQz5DPnpZayn934y5Ot4BIT7PUHAi+0IeXsS4X6PERMVqOMWsfvkFJCZ2MLcvgisnBsiQ/7OMLcvnCbFSg8z+WkycNwpVkBJBRjyGfKVHCp8moErwJCvwwB6XxsCvyebYJ+rNWyyVIO5XUEYm9jqsEXs+mcKREcFItz/GYK8L8PvzX7YF+6BLJWXpTnRGPLTXEi5Q6xAAgUY8hny+ZZIHwow5OsgzlEhXnh/qiXMrHIhS9FBDPY6iEFqXRLwe3usQETIG+RocAAmVtlTa1JvrmfI15tQcENYAY0owJDPkK+RgcVG9U4BhnwdhOT1gQqwzlgOmQv20oF3dimlAr7PNiD4ixtyt7wppVmd2mLI16n87JwV0LgCDPkM+RofZOxALxRgyNdyGChFJzrQC9lKjNeyZ3anKQU+3J8NY9vsaSZ1J31CvhyeD65hzVEPTBvaCYdXjkF08Y74rWElpaq9KDe25Lhz8Qh23wrB1H6/Ys+SschRvz/qlS0sqY+z/27B+U+OmNCtLjbMG4sqXSeidB5HCX0o19tUnSWPxtq5k2BSrAU6NiiNmZPGY+C0BchmFburMB+pU4AhnyE/dSOIrzYUBRjytRgpWmT7+kA55K97mFN0tKi7pl1R6s6Ls82Ru6VbmliMmz4hHwjy88WMMUPgUKwKjp24it271iHqsyfeBJqgSqnCkEkAlwG+7zGqf28UqVIXrjdeYNPaBXjv4YYPX0NQqXod2FubpXq4+n30xICevVClXl3ceB6CVQsm4/Hd6/APjUalqtVhY2Gaah/aMODr6Y6xUxeh+v+c4GlUBKN6tcaDl14oV9QZF908UbvKL6J2Px+qK8CQz5Cv+qjhKwxRAYZ8LUbN59YfkPu/QZaiQ7TolV1pQwFvj2UwsssFx7IztOFOoz7SK+RHRYbj89v7aNSsO1YeOY+KuR1w99IJnH4Vg1FdmsLEOPVAST58X99G/Rb9sfn4OZTMDuzbfxU18obiPkqiQYVCqY4t+Xj/5DIatR+HPadOoXhWM9y59Rhmfg8Qk78BShXIlmof2jBA/Tj191oMnPUPTp05gIxGIRgyciwm/zkdy9efxeLJPWFqaqyNpqQ5Hwz5DPlpblBzh5JUgCFfiwODcvEdC/SGVaYyWvTKrrShQMjn2/B5vi5N5OanV8iPjorAkY1zsXzfDRSt2xmLR/2GsK9eWHP0LoZ2agxTCSCffOxdOh5bTj1EuVYDMLV3U4T5e2P9ln/RsUd3ONlbpXq4RkeFY9OsIThw7QVqdx2PYR1qw+e9Jw7t2ogaHUegiHOmVPv4cX8BSqGRC7tSpdNER4Vi1phBuP/oCTqMXYaaRZxwwe0G3j95iHs+mbByRh+Ymym/4VaqO52GDDDkM+SnoeHMXfmJAgz5WhweTzdnQP66hzhVR4uaa8tVbMpOCxTqHqAtlxrzk14h3+vFPfQbMxfTZv6B2YO6ot/8XaiQ2wJ/HbuLIS7SQP4L94voNX41li+ciJkjBmP04i3wuLAPziVroHihgnDMmCGVcZXj/qXD6D/7b6xbMALTJ03D9OVL4XHjLhyMv8C2aAOUKpQjlT6Adx43sP3sCwzu3QaHdmzCr11+h8fFI7gX7IiuzSrDOLVpNPIYnNuzDNtuBGBK73qYvHAHpk4ai6den2Ad8BSLj37ErkVDGfLVjKRBQv6ePWjevPlPU7Rox+QtW7Zg0KBBQhl6GI2MjMTNmzdRqlSpnz6A+vn5wcvLC+XKlYs7j3ZhPn78OOrUqZOs0uSD/D5+/Bjly5cX19Lv7t27BwsLC+TIkSNZv3TdjRs3RNsyZ84sfNDvPDw8YG9vDzs7u2SvJR/u7u6oVKmS2I2bD1YgKQUY8rU4Lh6vN0KRpm5a9MiutKnA46PlUOT32NlMQz7SJ+TLERzoj4CQKDjY2SA08AtgbgcbCxMEhUXCzsYaMllqF3zKEej3BcFRMtjbWCLI7zOMrTIg1P8rjM0sYJvBDlaW5qkcOnJ8/eyDcLkZ7G0sEOD3GRa2GREZEogouREcHBxgZpr62e/QIH/4h0Qio4MdAr5+gV0mR4QEfEGE3AyZ7G1Tr5VcDp9PXpBZZICVhSm++vrA1oEgSA4TxOBrUDicMjvAWIK3K6kU3CAvNzTIJ/DdsWMHzM3NfwrqBL4vX77EuHHjEkD+v//+m+L6jfDwcDg7O6NGjRpxPmiX7RUrViB79p+XSCa/efLkSQD59+/fx61bt2BjY/PTB4RPnz6hY8eOCSD/zp07AvSpvz876AGmQ4cOKfbNIAcpN1oSBRjyJZFROSMM+crpZKhnMeRrNnL0B5f+2GvqIPviC5Fm4siPEVVyoYc2I8pBof9N9aHw8Y1ARIILAYLwK5NJsrg3sQ9qe5yPb/1LbUcSxIHaL5OJ2MTqR7ql1gPwo1akkREJJowrYpV6T+nPgqFBPo3f4OBgMSufeBJC8f8VaWLUtwwZvr8Ro3FJ19J4okNxLyhSzuKnlxFUW1paxg0IujYgIEBc87ODbFhZWcHM7PvC+YiICISEhCh1ra2tbdxsPPlSXBvfZ/x+K/5tYmIiHiKkSpFLf3dC2u8xQ74WY8yQr0WxdeCKIV+zomsa8jXberbOCuiPAoYG+Qo4V1bBxNCbEqQr7Kp7neKhM377lPWZ1LWq9JcBX9lRkT7PY8jXYtwZ8rUotg5cMeRrVnSGfM3qy9bTjwKGCPnpJzrcU1ZAOgUY8qXTMkVLDPkpSmTQJzDkG3T4uPGsACvACrACrECaUoAhX4vhZMjXotg6cMWQrwPR2SUrwAqwAqwAK8AKJKkAQ74WBwZDvhbF1oErhnwdiM4uWQFWgBVgBVgBVoAhX9djgCFf1xHQrH+GfM3qy9ZZAVYgfSpAi1hfv34NHx8fUVOequVQjXj6N9WST2uHovY+rZ2gI351rLTWV+6PZhXgmXzN6pvAOkO+FsXWgSuGfB2Izi5ZAVYgzStAkPv+/Xs0aNAAO3fuxNWrV0Xd/DNnzoiSlwoITitCvHv3Ds2aNcOwYcPQrl07tGnTBsOHDxf952o6aSXK2ukHQ752dBZe9Anyi1RvgycvXv/Qe9pcJurtzVSrcuzsFRTI64xC+XKl2pahGGDIN5RIcTtZAVbA0BQICwvD5s2bsX79elB9+A0bNqBo0aICem/fvg0nJyfkzJnT0LqVZHtpp91Dhw5h1qxZKFasmHiIWbt2bVwN/8R7A6SJTnMnNKIAQ75GZE3aqL5BfqvGtdHL5dcEjaUtf/LnSf0XZbVfe2HcoO5oVr+6SgrTlxn9p3hNqdLFOj6ZIV/HAWD3rAArkGYVoBK6L168QJ06dVCzZk0B+fR34vTp0wKG58yZg+rVq6eJmW76G0gbfw0ePBjbt28XO+dmzpwZixcvRmhoKExNTcXGW9OmTTPIv5VpdpDqYccY8rUYFH2D/N9dWmJU/y7JKrBi014sWL0Nvl/8xIz8zLED0KRuVXE+/a7f2D9x7rIboqKjUKVcSayZOx55nLOjTrt+cL3iBgtzM7RtVg/z/xiKbKUa4tnVf1Egj7O4nmyv33EAd8/sxKFTFzFy6mL07twKU+avwbUjm1GqeCFxTnL+tRg2pV0x5CstFZ/ICrACrIBKCtAusJMnTxZpOy9fvhSAW61aNQH5lLrTv39/1KhRI01APu20S/1s27YtPnz4gJEjR6JPnz54+vQp5s+fL/pZpUoVMctviBNiKgWeT06VAgz5qZJPtYsNCfIp3abXyOk4snUJShYrhOOuV9C+zzjcd90jQL3zoD/g9ckHu1f/CXMzM/QcMR3hERHifDrsi9TC9uUzxEz+R+/PP4X84+euwmXARLRvUR+ThvWCk2NGnLl486f+VVNeO2cz5GtHZ/bCCrAC6UsBmtmm2Wya2d69ezeuXLkiZrVPnDgBW1tbzJgxA/Xq1dMI5JPvgIAAUAqNvb09vnz5ggwZMgi49vb2FjPs5ubmkgYkJCQEHTt2hLOzs8jJHzJkCA4fPoysWbNi9OjRaNSoERo2bMiAL6nqadMYQ74W42pIkN+0y1CUL1UcU0f2iVOocachqFjmF/G7gMBg8fsMttbi576j5zBwwhx8dD+lMuSfcL0Ksv3q5mHkzplNXJ+Sfy2GTWlXDPlKS8UnsgKsACugtAKKhbc0m58rVy4QBNPiVMrBt7a2FrP6BPmaSNehNKHVq1fDw8NDzKL369dPLIKlqj4tW7bE1q1bRZUfqQ7qq5+fH86dO4fKlSsjY8aMuHz5MnLnzo08efIIyG/cuLGAfD5YgZQUYMhPSSEJP9c3yH/m+RZGRgk7+EvhAiKFpnC11nj68s0Pve/Wvhk2L5mKB49fYNLcVbj/+DmioqIRFh4hZvL9Hp9XC/JbdB+BiNfX4/yl5F/CsEhmiiFfMinZECvACrACCRQg+KVDsehU8SGltmgS8skvgX58v4oKN4r1Y1KnzCjWpsX3Q/+mdowaNYohn+8NpRVgyFdaqtSfqG+QT6k03ds3T9AxCwszkY5TtEZb9O3SGsN6u/zQcfoCylOhucjPXzR1BCwtzEVefdchk5WG/OUb92DDzoPigYJm8tv2Houg55fifP3Mf+ojoRkLDPma0ZWtsgKsACuQnAIE+Y8fP0aWLFlE6ozUh+Lh4md2tVXWkvr68OFD0VeqJsQHK5CSAgz5KSkk4ef6Bvk/W3jbvNtwZMnkgA2LJscp8Ob9R+TMlgXvP/ogV7mmCRbSTp6/Bss27E4S8v0DgkSO/sPze1GsUD5hb/SMpTh94UaykP8z/1LPmkgVYoZ8qZRkO6wAK8AKKKdA/FlvbcG2ci2T/qz4Dxxpva/Sq5c+LTLkazHuhgT5tPC2XZ+x2Ld+HurXqISrbu4g8KaFtWX/VxQZi9bG8llj0Kvjrzhw4jzmrdoKN/dH+PzwnMjTp2o6YwZ0Q48OzWGfwRZZSzbA2IHdMbyPCzzfeKF+hwGwsbJKFvJ/5r9aBenyH6UMP0O+lGqyLVaAFWAFWAFWgBVIjQIM+alRT8VrDQnyqWs0M7/orx2iOk7unFkxYUhPUE4+HZv2HMKE2SsRFhaBXxvVxILJw1C7bV9RWvP1f0cwa+lGzF+1FQ1qVsKBTQvx73FXjJi6GLTZFpXjbFynKv7atg8PXPcmma6Tkn8VpdfK6Qz5WpGZnbACrAArwAqwAqyAEgow5CshklSn6BPkS9UntvNdAYZ8Hg2sACvACrACrAAroC8KMORrMRIM+VoUWweuGPJ1IDq7ZAVYAVaAFWAFWIEkFWDI1+LAYMjXotg6cMWQrwPR2SUrwAqwAqwAK8AKMOTregww5Os6Apr1z5CvWX3ZOivACrACrAArwAoorwDP5CuvVarPZMhPtYR6bYAhX6/Dw41jBVgBVoAVYAXSlQIM+VoMN0N+ymIPm7wQ7z58wj/r5qV8sp6dwZCvZwHh5rACrAArwAqwAulYAYZ8LQZfV5BfpHobPHnxOsmezv9jKEb17/JTFR49fYl3H7xFOUypj5DQMGzfdwx9OrcWpp+/eovw8EgULxy7aZYhHQz5hhQtbisrwAqwAqwAK5C2FWDI12J8dQn5rRrXRi+XX3/obeaM9mKzqp8dk+auQlh4hKiFn/iIjo4Rte/VPc5cGdBGUwAAIABJREFUuolxs5bD7cQ2dU3ozXUM+XoTCm4IK8AKsAKsACuQ7hVgyNfiENAl5P/u0jLZGfuHT16iXKPOuHp4E0r/UlgoUv+3AcidMxtyZMuCWUs3QCaTIbuTI+6d3Q27wjWxZek0jJy2GBOH9sSw3i7Y9s8x/LlsI1699YKTYyaM6NsJQ3p1iFN3zorNWL5hD/wDg1C9YmmsmTsezzzfomnnoYiKjoalhTn+O74Vf23bnyBdZ9Xmv7F84x7xJqFAnpyYPqYfmtevIeyWb9wVnds0xvmrt3D/8XNERkaB3ky0b1Ffi1H97oohXyeys1NWgBVgBVgBVoAVSEIBhnwtDgt9hXySYOrCtTh1/jquHNogdqcdOGEuPC7+I2b5W/YYiQJ5ncVMfmhYOKzyVUXDWpWxbOZoZHfKDK9PvqCUoP0b5qNxnSq4fusB6ncYgCsHN6J8qWLYf+wc+o+bg0ObF6FgvlwY+scCPPN8g+tHNmPJup3Yvu943Ex+/Jz8f46cRZ/Rs3B462Jh59DJi+jQfzxuHN2Csv8rikrNuovdeE/uWoHC+XNj5aa9+GPeGnx+dBY0sLV9MORrW3H2xwqwAqwAK8AKsALJKcCQr8WxoUvI93zjBVNTkx96++neKVhbWSIiMhJlGnTC4J4dQLPuC6cMQ+smdcT58SGf0nYs81bB5iVT0a19M/E5pex4+35BNqfMcfZL1PkNA7u3R7+ubdCk8xAUL5xfzLLTQWB+9vJNdPi1IZZv3J0s5DfuNASF8uXC0hmj4uxWad4TVSuUFLYI8suXLIbls8aIz1++fo/8lX/FR/dTcHLMqMXIxrpiyNe65OyQFWAFWAFWgBVgBZJRgCFfi0NDl5CfXE5+/tw542a9b9x+gCoteqJ5/eo4sGlhnDJJQf7lgxtQtXxJcY5cLse8VVux69+T+OofIOx9+OSLuZMGi1SewtVai5/9u7X9Qe2fzeTTdZTyM7BH+7jrugyeDFqsu2/9PAH5bZrUwegBXcXnlNLjXLYJPG8cQh7n7FqMLEO+1sVmh6wAK8AKsAKsACvwUwUY8rU4QHQJ+T/LyVdIsPvgKfQeNRP5cuUQ6TOKmf+kIJ/y58uVLCYu3bj7kFg8e3TbUpFWQwe9Fejarmkc5CeGdYXPlCB/6O8dMaB7u7godR70B8LCw0WJTYL8tk3rxq01YMiXZjDTlwI9uPHBCrACrAArwAqwAoarAEO+FmOnz5D/xS8ARWu0xY6VMzFxzkq0aFBTLKqlIyXI7zViOsIjIrF9xQxxfkBgMHKUaYwZY/oJyKe0m/x5cmLFt7QaSu1Zt+MARg/oAlpYm1xOftMuQ1Egj3OCdJ0KTbqiTtXymDNxMEO+hsYuQ76GhGWzrAArwAqwAqyAFhVgyNei2LqE/DZN66J3p5Y/9NbSwkLkr3cbOkVUp9m5ahZu3fNAjVa9cfvUDrGgtWP/CYiIjMK6BZNEFRxaeBt/Jp8W7e49dFospI2MihJvA6i2fouGNTFv0hDQAtrfR80QKTb/K1oQY2cux8OnL8QCWqqmM2Pxetw9sxM21lbijYBiM6yDJy+gx7BpOLZ9Kcr8rwh2HziFXiOn486pnfilSH6GfA2NXYZ8DQnLZlkBVoAVYAVYAS0qwJCvRbF1CfnJbYZVt1oFjBnYFe37jsPji/uQNUsmoQhV17n36Bku/rsOx89dhcvAiQLwn1zeD7tCNRNA/uev/vit73jcuPMAztmdsHDKcAHqw6cswvTR/UQ5zVlLN2Ll5r3wD/heQpPy5t+8/4jabfuCbBzeshj7jp5LUEJz/qqtWLp+N/wCAlG0YF7MmTgI1GY6OF1HM4OXIV8zurJVVoAVYAVYAVZAmwow5GtRbV1Bvha7mK5dcXWddB1+7jwrwAqwAqwAK6BXCjDkazEcDPlaFFsHrhjydSA6u2QFWAFWgBVgBViBJBVgyNfiwGDI16LYOnDFkK8D0dklK8AKsAKsACvACjDk63oMMOTrOgKa9c+Qr1l92TorwAqwAqwAK8AKKK8Az+Qrr1Wqz2TIT7WEem2AIV+vw8ONYwVYAVaAFWAF0pUCDPlaDDdDvhbF1oErhnwdiM4uWQFWgBVgBVgBViBJBRjytTgwGPK1KLYOXDHk60B0dskKsAKsACvACrACDPm6HgMM+bqOgGb9M+RrVl+2rnkF5HI5YqKjERUdncCZiYkJjI2NJW6AHD4fvfA1MAy58+SGuamJxPbZHCtgOArExEQjKioadA8ayWQwlhnD2FimkQ6Qj5CAr3j17hNyF8gPa3MzGGnEExvVtQI8k6/FCDzdnAH56x6CsYmtFr2yK20oEB0ViBdnW6BQ9wBtuNOoD94MS6Py6rXxqMgInNy7Du6v/QXUEwwAMlRr0BxVSheFTFISiMHyqcOw8uBtXHE9gUz2NnqtDTeOFdCkAp8/vsXWHbsRFhkNa1sH1KjTEAVzO8HKylJyAKcH+RtHNqFFn1k4dvcmymZzhGYeJzSpGNtWRgGGfGVUkuic1wcqwLFAb1hlKiORRTajLwqEfL4Nn+frkLvlTX1pktrtYMhXWzqDvzA0KADDWpfB+gufUL7s/yDm7o1k6DH8D3T5tQ5kkIPGR0xMjHgIkMli0YBmIWNi5KBHApNvv5fLYxAdHRM7M2lkBJmMzjeCPCZGvCmgzxdPGoCVB+/g5lVXOGXMgOhoshMDIyPZtzcHcmFbPGtADmNjU2GDD1YgrSngcesSatZvCpvMOZDBXA7Pdz6o1aIHtq+dCxtzY8hjosX9RAfdd3Q/GRkh7p6J/b0xZDT7L5fHu5eMIDM2hrFMJu5FuseiIiNx/fAGNOo+Da5P7qFCzqx0E4vPxL367fyoqKhvD/p0/xpp4G1eWoui/vWHIV+LMfG59Qfk/m+QpegQLXplV9pQwNtjGYzscsGx7AxtuNOoD4Z8jcqr18YF5Lcph+33beH15iZsvwF1ZHgI1i+chjMPAlAiryXuPvRE7RYuGNC1NeThgdixaR2Oul6DtUNW/D5gCKqXKYovXs+xbPlKuD95hTyFS2Lg4CEomMMBb57cxrwFy/A5TIbgT89w53Uo3K66wt48EmuWL8X5G+7ImCM/Bg4cjEJZjDF82FjY5vsfAl+5o+WgmWhULj9MNJTGoNfB4calaQUE5DdsiUlLNqJv+wZw/WctXPpNwLS1/6J/2zq4f+Uk1m3bg49+YahStwn69+gEKzMjHNm9CTsPHEe0sQ1au3RDm8a1EBPmj7+WL8G5a3dglzU3fh8wFJVL5MfXj6+xcslC3Hn+AdntTbB+52lcfP4QZRxtcWj3Fvxz5AxklhnRqVd/NKlRCtMHuOCNSS7YGwXDqWRDjP29ZZqOQVrsHEO+FqMa7vcErw+UQ/66hzllR4u6a9pVbKpOc+Ru6QZz+8Kadqdx+wz5GpdYbx3EzuSXxcbLfujSpTUoS942QyYMHDIES0Z1wJZTD9C+cxe8uXsR/3kG49HdK9i3fCJmbTiOek2bw8v9Itx9LHD3+iH0alAZT8Mc0LhOZVw9dxyfTfLgxsUDGNG2Oi55RqB9q8Y4e+Rf+MY44Mq5w9jy51As+/c/uLj8Brcz+xGcsST2LB+LVo2b411gFEr8UhTjFmxCs4oFYMyz+Xo7hrhh6ikQB/lLN2GQS3N8+fgOdatXRKEGvTG3f0NUq98KBUpWRnFnWxw54YrhC7agmvUr1O8yBvVatAV8HuO6xyes3b0P9zaPw7z9t9HRpSMeXjmGp97Av4cP4NquuZix9jBad+yE13dccd7tDS4+u4/Xh/7CyNmbUK1+M8DXAyduvMEV91uY2Lgwzr+KQJ6CRdDm9zGY3K+1ep3jq3SmAEO+lqX3vjYE0YFeyFZivJY9sztNKfDh/mwY22ZHlsrLNOVCq3YZ8rUqt145U0D+pqt+cOnYEqZGRrCwssHI0eOwcFg77HcLxA2383A/tAatBy/E1Uf/4Y+ubRDmUBx7NiyF3O8dTl65i0K5HNGgeXtMXrMXvZtXwuUj69Cp7zTs2bcdLi3aoPnov7BoZGusnj4Mqw/fw5kje9C1VT34mTmjXbPa+PzsP+w9545la9ZhxtDeyFa7B/5dPg7W1tYiHYjSFPhgBdKSAgkgv1ML+Hl7oX7NSnCq+Bv6VbVB5xEL0KpDV2TPZIV/tqyHZZmOaOnsi02X3uHWhSMwRTBcXa8jY5Ys6Nu9E4o17oPtC0bhxb1raNTSBcOnzoXrhul4YlYBt06sw82D60VO/tn7N7FkcHdc8fBGlw6tIIv6gmVLNmL61lM4N7MNPGyq49y+v5DV0QEW5uZpSfJ00ReGfB2EmXLzrTOWQ+aCvXTgnV1KqYDvsw0I/uKWJnLxFbow5Es5QgzLliJdZ5u7NTyfX4INVfkwNkZ0eAjG/d4KB2+F4M6DK3A/sBpNes/C5Yc3ML5TKxjnrIi9G5bCXBYF389++Pr6Hio36oBFu4+ia/3yuHZyC1q6jMSOnRvQqXVHdJ+7F38OaIqVUwdj2f5bOHVoNzq1qIUQm3xo2bA6EBMFmaklGtavjX5dOiNvs8E4tmqcYYnJrWUFVFAgPuQP6NAEH18/Ro2qNdC473TUcniLXn+sQYu2HZAra0bEyAFzp6IIu/8v9rh9hvvlYzA3MUJISDg+f3iJRk1boMpvY7B5zhC8vH8DtRq1RP/x03Fh45/wytoYdw4vx/UDa9Gg21ScuX8d8/p2wm1Pf7Rr3RyWZiYICw1Do996YXmvGnieuSGuHloHhwzWKvSGT9UXBRjydRCJqBAvvD/VEmZWuZCl6CBO3dFBDFLrklJ0vD1WICLkDXI0OAATq+ypNak31zPk600otN6QWMgviy1uRti9YzmsRe67EbLlyIHVU/rj8O2EkH/16R3s+mMwDvz3DjOnToTXraNYts8N+/dvh0vj2shW/lcM69YUR7cuw8Eb7+B66RQGNqsCX7vSmDmuL3asnIcrL4Jw6dQBTBnSGWcfB2HerEmwlIfh9ddotKnzCxrWbcaQr/WRwA61rYCA/AYt0K73cDSsVAyHtq3C0f/eYtfBE7APeoQGrXugRad+aFGnAt57eqB49ZbwvbEbPadswMJlS2Hu74E5y3Zj7PzVOL5oGK58NMeyuZNw33U3Vuy6gLVbd+Hq9unYcOIJVqxeigdn92DuumM4//gOXJdPwcp/LmP0xCkokNUGbu6PMWjoYPxeLTeeZmzAkK/twSChP4Z8CcVU1RSl7vg92QT7XK1hk6UazO0KMvCrKqIWzyewD/d/hiDvy/B7sx/2hXukmRSd+DIy5GtxUOmZq/DQYIzvUhM7r7yPfTVPVW2MjNB77Ez4/7cXp+6H4vylY3hwfBM6jliME25X4RT6Cf169sANj7cwNTNHjxEzML5fOzy+dhxd+4+Bj18QrO0yY87qbWhWpSguHtiAPmNmI1ouQzZHBwTCDueO7YMsxAsdfnPBMy8/UdGnRfdhmNitDlr92gF5GvfDzjmD9Ewtbg4rIJ0Czx/8h+at2iMwNFJUt7GyzYRFazahUdWSkEeFYfeyKZi++m+ERkbDwTEH1mzZhbL57DG2XxccvHQfVHinXrteWDhpKOQhH/Fri9Z46xsEuZEx+k9eikHta+Pd41to/Vs3BEZEwzlndrx87YujbteQzzQCk4b0wZHL9yEzMUH+0rWxd9sKDG1UEs/sa+HUziWwtbaUrrNsSWsKMORrTeqkHdFi3IAX2xHy9iTC/R4jJipQxy1i98kpIDOxhbl9EVg5N0SG/J3TxCLbpPrKkJ9+7wGqn/3F9yMCgsMTiGDnkBExEaEIi5Qje/ZsiAgJwCdff2TNkR0WpiaghwPfz19gbGaFLI6ZvtXTlyPA7yv8A4NhbWsHB/sM3+p9y/HZ5xMiYmSwMJEhOCwCTk5OMDWRITI8DJ+8fWBsZonMmTJCJo/Cp08+kFlYI2tmh/QbGO55mlcgIiwUHz99QlR0DExMzeCQyRG2Vt9z4Knk7NfPvggKjQDdj3Y2VkKT6KgIeH/yhtzYDJkzZYKZaez+FhFhwfjk8xkW1hmQycHuWwnNGIQE+uNLQDAy2tvBx/crnHJkg6WpKSIjwvHlsy8iYozglMUJZqYyfHr/FlEyCzhlycwVrQx0BDLkG2jg9L7ZR44AXboA27YBzZrpfXO5gd8VYMjn0cAKsAKsACvAChi+Agz5hh9D/exBrVrAhQtAzZrA+fP62UZuVZIKMOTzwGAFWAFWgBVgBQxfAYZ8w4+h/vXAzw/Ilg0ICwMsLIAPHwB7e/1rJ7eIIZ/HACvACrACrAArkEYVYMhPo4HVabemTgWmTfvehClTAPodHwahAM/kG0SYuJGsACvACrACrMBPFWDI5wEirQI0i583L0A/FQfN4nt68my+tEprzBpDvsakZcOsACvACrACrIDWFGDI15rU6cSRYha/ZEnA3R1Q/OTZfIMZAAz5BhMqbigrwAqwAqwAK5CsAgz5PDikU4Bm7wnyhw0DNm+OTdkhuO/eHViyJPYzzs2XTm8NWWLI15CwbJYVYAVYAVaAFdCiAgz5WhQ7XblSzOjzDL7BhZ0h3+BCxg1mBVgBVoAVYAV+UIAhnweFZhRgyNeMrlqwypCvBZHZBSvACrACrAAroGEFGPI1LHC6Nc+Qb7ChZ8g3jNBFR0XC7dwBGDlXRNnCzjCWGUnWcHlMDJ7fu45nIXaoX6EITE2MJbNNhj5/fItz7u/QonZ5mJuZSGpbk8bCw0Jw9tgB1GjUBjbxdiPVpE+2zQqwAqyAugow5KurHF/3cwUY8g12hDDkG0boIiPCsGXOUJiU6Yk2tX6BpYUFjE2MIQXqx0RH4/z+dTj1KRvGd60LS0tzmJqYwkgK4wBeedzGrF3XMW9kF1hZmMPMzEwy25qMXkigH2aMHYhhM1bB1tIMFpYWkEkliiYbzrYlVSAyIgIyY2MxZmNiYmAkM4aRPAZRMYCJiXGqxkRUVBQgB2TGMtB9aGRsAiN5NKLlgLFMBplMpnZfoqOjhU0TU1PQJEGsbTli5HIYGclgbKy+bbUbpcSFcrkcEeHhMDU3hzw6GnIjI9DfKchjECMHTE0MZ6JAie5KegpDvqRysrE4BRjyDXYwMOQbRugiwkOxadYgXPviiNwOMpg5FsXg39vDxsJUdCA1MBATHYVzf/+FVadfoqSzNeQWjhg4oCcyWpsjOloOM7NYH+oeng/dMGzOBpTNnwVh0cYYMHw4sttZISIqGuZm9DAh0dOEug1M5rpg/y+YMLATMheshOCgAJRv0A7NapQRehCIpFZ3iZvL5jSkwHv3U7jpkwF1yuTB3KnT0HvsDAS9vILrPvbo1rwKTExMxBhWZxT7vryD7SfuoVfXNlg7dzIqtB2CvFYBWHP8Mcb3bgFzeogwMVHrQSLQ5y0mzViFqbOnwXXvKoTnrIWm5XNhw66D+L1bR1iYEuibQibhW0EpQkAPJn9vWIEybfrBIeQZ1v97E/26tsGVf9egQJ1uyJcjM0yMjUEPMfS9l5rvPinaq082GPL1KRppqS0M+QYbTYZ8wwidAvLfZqyDwe0qoWPL37BwxwEY+79GdOZiKJ3XQe2OKCB/zcWvWDqpB5ZMH48qncbBMewVPgRFo3GTprA2l6kFMdQogvw+UzdizYLxcDu1Hd5ZGqNm1kB4vPFGw2a/ws6CIEnt5mvsQoL8Mf06oceU1chu/BbDZvyDPyd2RaAsC7LJvuBDjCNKF8yuMf9sWD8U+Or9EvPXHkLbWsUw+s/l6D/qD3y8uAO5mw5DPtMviLRxQon8OWGsxhgO/OqFmVNmYMCo8Rg9ajCKV22Larki8NyiHErbfsVbnyDUqV8PDjYWKosREvgFU3q3hcv0rdg2fxjeRefH3DG/YufpZ2hTuyTuP36JKjXrIIejfu1QHx0dhRPbl+KpdRU4v/8Xiw49x4oVM7FkeD9MXf0X3O69R5umNXHh4BbkrdgCeXNmUVmbtHoBQ35ajayu+8WQr+sIqO2fIV9t6bR6IUH+ltlDYFy+N7o2LIaOtWpj5KpdsAl9A4+Y/GhXObfa7VFA/mlfZ0zr3QBLJ49AtqouKJbHEf7uh5Ct3iAUzWqpNogT5M/afRNLxnfH9TO7ce1rXvRvUhy3zx2EfeWOKJvTBvqYOUCQP33sIIya8xfsrfzQucs0jBzaArs2HUbb9o3w0bwQ2tQoobbufKFhKBAREogho6egTImcsClYDRddrwK+T7Fg6WL4fXiP++8DUK9yKZiqkf0SGR6CjcvnoWi1inj41hhPr52Bs00YOg2biq9fAvHV8wFQoCoq51UdxKMjwnDr8Gq8MM2J+7dfAZ/uokzhbDD/X2vULFscn9564nWEJeqVKaxXgZDLY+D79Dq6zfwHVh8foXrdCvgsd8D1h174d6ELuo7cgo2LR2LU+KnoM3g8ypcsoFft12VjGPJ1qX5a9s2Qb7DRZcg3jNBFhodh8+whMCnfG10aFoVL7ToYu24fHCLfwz00B1pVcFa7IwLy/1mL07454yA/T/2+qF8sA07eeIH2v9ZTa5ZS0SDPR7e+QX433DizBzf88qFvo0K4eOwAMlVoi6qFM6u9kJiAICqKXtsbA9GRkBubAjFRkMtMYWKsXgqFot3BAV8xY+wgjJy9BvZW/ujadRpGjGgPGzNbHD20D7lqdEH7Wv9TW3e+0EAUkEdj+eQhOP0iHMvnTsHYgUNgW6Q0Vv05AWEBX3HzxSfUKPuLWpBPeeauh7Zj7podmDZ/Dc5sX4I7rz5i0/qNiAnxxrEL7vi1eRNYWZipIVYMfN8/Q2eXLug0fjXsfS5j+fqtWLDhMHLZGeHBw4fwt8iFppUKqW5bLoc8JhKRMcYwkckRGWMEExnEGgBjU1O10osSNCI6FB3rlAcq9sf0Tv9Dn/7j8OuohRhQ3QLzluyHWe6iqJorGpa5q6NMUfUnOFTvuH5fwZCv3/Ex3NYx5Bts7BjyDSN0UZHhOLJ5IUyKtUTjSnnwR/+B6DphDmyjffAszBG1ijup3ZGYmGi4nf0XN/2yoE/LStizdgkcitfCY9d/UaB8DVSsVAVZM2VQO3fey9MDm049wojuzfDg5im4+2WBbeALREaGo3KT35A3sxXUTQsOCw7A4uVr0f337ji5bjpK/DocgU8v4JmsCLo3KZ+qSkGhwQHYsGI+Og8YDxuLIEydug4uneohW9aCuHRsL8wL1kLDikXU1p0vNBwFHlw6iH3X3mH04F44v3spvB0qoEvzmggPDsDDd19Qukg+AbnqHO+eumP8n2uwbNVSvLx6EPvuBOGPQb9h54a1sM1RAKXLVURBZ0d1TCMowA+jR4zCxEVrIP/4EJNWHMaSWcPw9M4VeH0JRd4SFVAqv+opZ7SQ9/qRzXiGgqhVxA5rjj7AqG51sPGvzeg/fBRsJahG9feqachUvQcq5rXHX4vnonX/0XA29cXZO764f/kUOjStiGCHkiiUJ6ta2qTFixjy02JU9aFPDPn6EAW12sCQr5ZsWr+IFnrSLJlcVMUwQlRklFhwpli0mtrFZ9FRUbG2ZUZiQRtVERGVZIyMYCwqi6iRcPxNJXpTIIcRjGQyULlOsk+VQ6jKh7GaiwoVAZDHRIuKG6SPKMAhp9n7GMDIWCwoTFW7Y2JiF9iSBkZGiIiMFJqTHtQH+j0tAOQj7SsgquDQAndjY8SOOSPxAEkPyDCKvQ/VvUNi77domJiYCttUWcfEWIboaBp/EFVw1L2/6T6OioqEqan5N9tyce+R4Wi5HDJ1q+zQ9d90kEEu7m8qEyR0MKL2qqvG97EkqhrRombEVjUS96FMhmi6L6nKkQTfTWlt5DLkp7WI6kt/GPL1JRIqt4MhX2XJ+AJWgBVgBVgBVkDvFGDI17uQpJEGMeQbbCAZ8g02dNxwVoAVYAVYAVYgTgGGfB4MmlGAIV8zumrBKkO+FkRmF6wAK8AKsAKsgIYVYMjXsMApmQ8P8kKA1yUE+95FeLAXYqLCUrqEP9eRAjITC5hbZ4d15lLIkL06zG1UX5yko6ar5JYhXyW5+GRWgBVgBVgBVkAvFWDI12FYPnlsgt87V9g7lYJtxnwwt3aEsYnqG1zosAvpynV0VBjCg30Q+OUl/D7dhX3O2nAq2iPNacCQn+ZCyh1iBVgBVoAVSIcKMOTrIOiRYV/x/vY8mFvaIUue6gz2OohBal0S8Hu/uoTwUH/kKDMGphbq7y6a2rZIfT1DvtSKsj1WgBVgBVgBVkD7CjDka19zvLo6HjZ2OZA5V2UdeGeXUirg++YagvzfI0+V2VKa1akthnydyq+ScyrnGBwcDGtr61SVhkzOaXh4OExMTESJSKkPKkFI/1lYSP/2kkoQhoaGwsbGRupmi9J9/v7+cHBIOw/2kouUDgzSGAsLCxP3ntQHjbGAgADY26u+q21KbaHvDLJtZ2eX0qkqf27I30cqd9ZALmDI13KgKEUnJuwTshWor2XP7E5TCnx4fhoyC6c0k7rDkK+pkSK9XYLk7du3o2XLluKPdmpqwCfVumvXriFr1qzInTu32nW5k+v169ev8ejRIzRo0EDyh4hPnz5h165dGDRokHhIkfIgwB8/fjwWL14Mc3NzKU2zLQNS4MOHDzhy5Ah69uwp+fj9+vUrZs6cidmzZ8PMTJ2dbZMXkh5M5s6di9GjR8PS0lLS7wxD/j4yoKGnUlMZ8lWSK3Un0yLbV9fGoUC5vpyikzop9epqSt157vYX8lSekyYW4zLk69Xw+mljaKa9X79+qF27Njp27AhTU1PJGk+zcqNGjULmzJkxfPhwSWfcyfbChQvh7e2NKVOmSDobSrYPHDiAo0ePYtGiRciQIYOkmjx48ABTp04VEFa0aFHJbLMhw1GAxhg9RF66dAnz5s2Dra2tZI0n2//995+4P+bMmYO8efNKavvLly8YOHAg+vfvj2rVqkn6gGKo30eSCazuG3J6AAAgAElEQVSHhhjytRgUn6d7EBP2Hk55a2rRK7vShgKfPC9AZpEDjoV+04Y7jfpgyNeovJIZp3QBd3d3jB07Fvny5cOCBQskgw0CjTdv3mDAgAFiJnHp0qXIlSuXZG338/PD4MGDRdrLjBkzULJkSclsE2gQgN++fVs8pNADkFRHREQEduzYgb1796J9+/bo3r27pDOhyraT4kP9JP2oTXTE7vAbu6uo4t+Jf0efKe7vxOcqrkvuHGWujW9b7Az87fjZ7xV+aZxRegr9VHc3V2X1S+15NBv+xx9/wMPDA+PGjROwLNVB8Vy/fj0OHz6Mrl27iod3qQ6aaT99+jSWLFmCmjVritl8qSYGDPn7SCp99dEOQ74Wo0K5+FlyVYSVnbMWvbIrbSgQ4v8W3m9upIncfIZ8bYyY1PsgyKNZxPPnz4tZ9jVr1iBHjhySABL9wd63bx82bNggGjpixAjUr19fEttk7969e5g4caIA1caNG2PIkCGSzSh+/PhRpOkQAOfJkwerV6+WLGWHZkGpreSD3nBs2bJFQKnUaVI/Gx0EauSfIJD8aiK3OvWjU3ULgYGB4uGkSZMmIkVMKvhUvSUpX+Hl5SXGGLX5l19+wfz58yUbY58/fxYPwPSWy8nJCZs2bZIsZYdy8YcOHYq3b9+K9Srbtm0TP6UYv4b8fZRyxA33DIZ8LcbuyeluKFCuD6fqaFFzbbmKTdlZi8L1t2jLpcb8MORrTFpJDdOCW4KBd+/eCUBu3bq1mFmWAo4IJGmG8uHDh6LNVatWxZgxYySDDXo4uXDhAuhhomDBggKSpFqAS2sU9uzZI2a4KVWHcudz5swpifbUZrJHi3opH3/SpEkoV66cZA8/KTWSFmRSKsf79+9Rvnx5WFlZac13Sm1L7ecE+EFBQXj8+LFY1Fy2bFm97RvNtB88eBCRkZHIlCmTGBNZsmRJrQTi+lOnTmHlypViUS/dE/Smq0SJEpKAOL15mDBhAkJCQsR3Bj2oNGzYUJIHbEP+PpIkcHpqhCFfi4HxOP4bilYbo0WP2nM1bspyPHj0Akf+XiKZ03lLtmLmvPVo27IeWjSpgd8HzYDvq7OS2ZfakMfleSjaeI/UZrVujyFf65Kr5ZAA4+7du3B1dRWv3ikvuHDhwpL8wSaYJNvPnj0TEEMzipR/LtUiVkoz8vX1haenp4DVYsWKSfJwQkI+efJEzHRTWgI9+OTPn1+y2W6Ca2rzX3/9hWHDhiF79uxi1lmKmdCUBgFBMC3IPH78uGRglpJPXXxOD2c0pitXrixSxLShrar9pIdfHx8fXLlyRbx5oAdVqSo5UZocLUrfunWrWG/j7OwMR0dHSXSgN1F0f9BDcOfOncW9LdWiekP+PlI1/oZ0PkO+FqOlC8j3/eyHmfM24NCxC3j/wQf2draoXKEExo3ojkrlS6Sq96vW/Y3fu7WEmZkpNAH5djlqYvbUQejRuQVOnr3GkJ+qaCl/MUO+8lrp+kz6w0p/sCk/nGbwpQQiAn2CcQJZmqWU0jYBK0ESLWJV5MxLZV8BwzSjT2866JDSNs02Uy7zqlWrhF2pbKc0lijWtKC4VKlSAs605Teldkn9OcWP+urm5iZSufQxP5/aSMBMs/k9esRuiChVPMg2pQHR4m5aZyPlGCPb9IaA3qTRmgIpbZMGhvp9JPUY1id7DPlajIa2Id/H9ysq1u4mZt8mj/sdJYoVQGBQCLbtPoZN2w/h761z8WtT9RYBh4SGwT5nLXx5cw421laSQ350dAxMHCrg2tlN4mHkwJHzDPlaGqsM+VoSWgk3BNqUOqNYxJh40SR99vfff6Ndu3YJZtnjL7xUAEhSCzPps+TOpc/u37+PbNmyiZnExEdKPuKfn5RvmsknyK9Vq1aySiTX7/j2FBfHP5cW9tICWaoiktS5P+t3cgtSFdpTqgOlLq1YsSIO7pLyQXAq1ZsPckRwtnHjRnTo0EEyoFRiCOrkFBrXlLZCi041BfmUckV+1D1ozQeti6AZcakPGmOUpkMlNKU+6E0JVZ2idDypD0q/279/v3iDpom9NegNCr1VpPUwPztowoPS6aR68JJaJ23aY8jXotrahvy+Q//EkROX8PjWPtjaWCXo6aBR87B3/2m8e3xMzMSXr9kVnTs0xvlLt3D/4XNERkZh/syhaN/6x3r+4eERcMhVG6Gh4bC2ssSi2cPx8tV7PHn2GiVLFMLyNbuFrz49WouZ+Ng/UBEYMWGR8BkTI0f5MsWwfMEYFCrwY8WO4JBQOOVrAPppaWmO9q3qo2WzWgLyn7sfQLaCDXFoz2LUr10xrk91mvUTNudOH6LFiCZ0xek6OpM+TTqmP5hXrl7FZbc7MLOw/AEoCSzlMTF48cQD+QoXTQBDKT2oJf78W1EWmo9M8EDh/eE9bDLYw9Lq+/cHnasonBJ73fdrxP+LLfAS9/v458cPVFBgAIIDA+CUPTZfPqk2kZ/4v4/9oy2P85/0dUYICQ7CW8+XKFSccpm/e/1ZuxMPouQ0Cg8Lxz23GyhXtcY3iEjYHoWdTFbm6Nq5k2SwQ5BPKRz0QJfWD4LvkydPwuX/7J0FWFRZG8f/A0M3KC0gIKCioNiKsXZ9unaB3d26Jrp29+oqdmN3rt25dicG3Q3zPe8ZBoecGeYOoHvP8z3PfjL3vuec95577++8940uXVQG+XPmzYeetT2EGvnLQx8XE42Pr17ArUIlzi9HYnwc7l4+jxqNWnAuOzkpCQ+uXUTlutzX6klLS8WLh/fgWr4i1FRQQO/b548wNDaBrn7uKUtTk5NgpC5Clw7tON1kc34hCkggD/kFpGjqpqAhv5hDfYwb0R3jRvhmm+W376EMlk/sX44mDWqg2m89QH87dXAlXEvZY9W6PZjy518I/XAux93wg39fokKtLoj+einDku+/9TDra2Cfdrh45R5atB+B+1e2M/AfN2U5bt55jJ3+s2BqYoQ/F6zHroDTbAMiFGavppmSkgoN06o5WvI79pgILU0NbFk3g82LXJIsnRvh/pUdKFfWuQCvKA/5habs/0DH9On76LHjiLd0QnE7h8z3YTqtkqX/+pF9qN6iTfaXanYaF2tNKrWhLDV+fP4EJhaWMDBNt5zJOleBPmMiwvHh2SOUrV478zCkqVye8ebQZ3x0FC7u3YYmPQfmPUUFxisRlJgQj60zxqPP7OXiP+UwXtp8PT+5DwN7+HAGGjzky1qtiv0+avxENOg3CtoSYBTvH9OvqZSsH5lAM3UQGxWB2ycPo24HX9rnKnRujiOV2owmxsZiz0I/+Eydn/uk8tlnSlIiDq1eiLYjJonHLd1ymWvGITL6JMi/fngvqrdsBzX1LEXo8jle6eHR88LUwgoGZpLnUXb10AYp8Opp+LZvw1miAMVWVtE6mof8ArweBQn5kVExzJ3myJ4laNHEO8dZmtn/hhmTBmBwvw4M8iXWdTqYLPNO5Vvh2+vTsDA3zXZ+TpBPLjUE7ZJm69YMi2aNYF8DDK3rsLHU9fZiP5M7DvncU6Cu5G/SneQF+SfOXEMH3wn4/vY0dHW0sX7zQaxYuxsPr+0swKuZvSvekl+o6v/lOifIP3biJJLtSsPC3jHHzXZaaiquHNiFmq07Qp3jyq6k0HePHsDM2gaGZtnddZRVeGRIMD69eAz3mtzlsZeMKTYyAhf3bkWzPmKffC5bQmwstviNRb/5q3MVm5aSgkeHd2JA964QcmTR5CGfy6sIjJk4CQ0HjYWugVG+BMdEhOH2icOo17lHvs7P66T4mGjsWeCH7n4LOZednJiAgyvno/3oqZzLLgrPI7o/P146AZ+2rXjIT/9CKhBJV63g/LLzAiUaKEjIJ1cXfUtvHNy5KFe/e5MS9TBr6iAM6tueQX7bVr9h7HCx1f9zYBBKlG6Gd48P49LV++g1yC/jQkYGXsSr15+yWfL/ffwKx/elW7cAOHu0xuRxvdG4fnVYuzTJcSFsXDMN9WpXYhsKSSNXnEa/VcvVkk8bBBrbwlkj0KV9EzRtMwz161bGmGE+hbrYeMgvVPX/cp3nBPlkuZe2xNNL9eqhPajRqj3UpS1nAjLSCSBQU5NbL2R9zvoqeP/kIUwtrbNBPrMOKSI73bVIejBRoSH4+JwgP7NPPheyY6MicWnvVjTtLXYXzGjpgbLy+upKXKKkRSTGEeSPQ995q3KVzUO+3MsuxwMLwl1HEcjPaR3ERIaLIb9Td87XWEJsDHYT5E9foJRselakieiZ8UMMg/xVC9F+1OQssumjlJpCfuxF8XnEQ37WJSMAD/nKPY/kPrsgIZ8GRe44g/t2YKCdtUncdc4cWo0G9aowyG/Xun4GKEtDPmXkoX9LWhm3kvj38etskJ81haYE8ps2rJnhTuNZ3iXbWMj/n/z5Jc3ezgo62lq5Qj4dR+4/T5+/xbb1M9k83zw8BGsr7q2Ncl9ccsfiU2gqoi7+WBkayAr5aakpePf4XyQmJIj93en8tFQ8u3UVblVqQk1Nnf2NvoqrqQlQ3KYEitnIlx+eXtaBr18gKiwMIrY9EAv6/uEdDExMM1k7yTvFyMwM1k4ucgEBbR5CvnxG0OdP6aMTTzwuOhKhXz7B1tU9k9eAjq4O7MuWy7xpyUVXLBNJWAg+vXqZSXZCXAye3riMCr81zSRbXV0NJd3LQ0snc4xSTuJJNllU3z99/GNfJQCSEhLYV4KG3fpmctMg2XaupaFraIT/DuSL4xHk3TTJe9MXJcindRAZEoTAN68zrbH42Bi8eXA725cooVANjuU8oaGlLXO6JDsuKhIfnj/N5EWXlBCPa4f3ZnIFontbqC6AvVtZ6BgYypRNB5DryttHD8FgXECxMwD5rN86eQg1WmaO7aBnho1TKRhKXGFk9JCaklwkn0c85POQL9fNoYqDChryR4xfhJ0Bp/DqwQEYGuhlmhL9tvfgWXx4cpT5xOcF+Q521tnUkZO7Tm6Q36NrSxha18aqxRPg06lZhqz3H78gJ9l0QF7uOvQ7AX5F724sOPjQsYs4ezj3T+equJY5yeQhv6A0/d/oJyvkk3vAvOXb8ULDFWlq5O8qdqgVidLEWC7lX6uTFo/urglo2LadXMoiqFi3dhsuxpRAmrpGuux0gGPA/8OhlrYSLQ1fo0u/PnK5CCUnJeLwrgDs/miMVKEEfKSjd8U7ivSQWlRNuouhY4ZkCvbNbRKpycm4fuo41tyKQ4KO+Q8dZPhWS5Qi/oNT6HWMHuYLixLZA/6z9sGCCO/cwsKDjxBp4AiRxPc+k2zJtkoEq+gXGNahGlw8Kv4nIJ82b+HfPyFJwwgWZsbZQhPkWni5HFSUIJ/W2KWjh/D3QxEStaVcV3Py3wfgEnIZY8YMgJmlpUwV0Je4x9evYPGJ14jRt09fY1nvjR9ibCMfY7hPfTiWcZcpmzYQga+eY/bGswgycINIjWLfRICI7gkx9Es304RADK5vB0/v3LNdSR8fHR6q4ufRdlyMsVX4ecRDPg/5Mm8OVR1Q0JBPfvlV6vpCW1sL0yf2g3sZJ4SFR2H77hNYt+kADu1axFxpqCkK+WR5d/Nqi9sXt8C1lANmLdiQrRiWxJJPkE+W90PHLrCsOI4ONvh70wFMnrkGH54ezZb5Rx7Ip2Nobi9ff8TSeaNBfRR24yG/sK/Ar9V/NsgPD8P0dadwz7he+otPDMc5Nb2USAy3eIjGbX6XSykE+Sv8D+OYWi2kqmuln5OzbCFS0DX1BHz7dJMrqDQ5MRH7A45gXWRlpGjop0co5iybOm4Ysgejh3eXG/KvnTqBuS9sEWNI4C4d3SeZ+o++yn/Yg0n9m8NSHshPTcWLu7cw9Vw0gs08IRJIEgRIRyf+kF0y+AomNrODq2eFwoV8USpWjuuG7Y81cGDHKliaGLAsTLGhgXgVnILyrvaIj/iCzxGpcHG0h1rulyLHtRP1/SM+RqTAzckO57fOR6hNXXRqWJ1Ta35RgvyU5GRcPHoECz+UQpyeVa73nFhZIlR8tx2ThnVEMbkgPwWPrl/F9CtpCDd1h0iQl3udCM7fzuOPtmXgpADkT9r9DB/Ma0LEDAO5N8uoZxhfJQUV5YX8sFCVPo9W+h/G0Xw8j3jI5yFfrpeeKg4qaMinORDoUzGsA0f+wecvQTAy1Eet6p74Y0xPeHmWzpimopBPn/+a/D4UV288xPQ/+iE0LDJPyKd0m5RCc++Bsyz1JmXcWTJ3FKpWytkiIcuSTwNfuW4Pxk1Zhu9vzuS4UVDFNcxLJg/5Ba3xX7u/nCB/2t9n0iE/77R/BPkjzO9ngvyI4O/Q1tVjbgRZg3TFkH8ER9W9kaqet5sBQX631GPw7U2ZY8TgS4GulM9cU0cH6kL6EvCjiSH/KNZGVUmH/LyvW6PgXZkgn3yIE+PioK2vDyHJlrJAkpX12qmTmPOSIN9eBoAB5d/vxmQG+SXYIMiSSgHA+iYmLJWitNsJ/UaQP+VcLIKLSUN+zuMvGXQZfzQvAVcP5SCfLLBUcItSqOrq6mYUOZMr8Jaq4ga+QNPfu0FHSwcDJs9Hh8bVkJyUgBMbF+ByhCUm9GmLR2c24cJ3U4zo1R6GejpITIhDTGw80wFVbtVQFyAxKQmJicmAKBWJySkwMDSCIC0F/+xZhUvfDDGmXwdoC9UQnyKAmbEB6DrFxsUhNU0EbR1d6OrosHMjo+MgVBMhKSmZrT0dHR1o5JBRTVqrXEI+yaJKwaRL6luSd19en3wJ5C/46Io4vexftbOuhorvtmLy0A4ZkJ+amoKYsFDoGZlAPUvBOnLBY5B/FQiTCflAqW9n8Ueb0hmQzwq/ffsCfRNTaGhqZYqTkVjy/9jzHB/Ma8kB+U8xoTJB/o/aOSGBn2BgYgYNLa1s2buiw0Kh0ufRxiM4qqb484iHfB7yC40MCgPyC22yBdDxqImL2ZeJTX9NL4DeZHfBQ75sHfFHyK8BZSF/OEF+a3FAO73w9y2ZhbDvX1HCpTQc3D1hV9od2noGDPgJ0BSC/JRj8OktzhxDsm+fOsLSCdqWcoNDOU+UdPdkvukE5QRJikB+Q4L8ob4g33ySHRL4ETvmTIGNkwvsy5aHs2dlGJiaQV1DE6LUVAUhfxcm9WsGS9sSlG0ftAFZOawnrEo6M32QbFNLGwg1xZsoVUM+gXzWYGf698uXL1khJGdnZ3h5eaFSpUoMUnft2sWqG+fWyGJ/evcqbLgei3alk3DpkybmTxsF9bRIDO/ug89qNujWzRcXt89DsNAaXX16oparESbPWgHHUk6IDPoC09J1MaBDAxzeuAj3wvRgpQd8+/gSCfpOmDW+P8YP6IavomLo2r031J7txxebxujbvAqWzZuNCJEejHXV8T4wFKMmTYWD1ne08p2M2jW9AEEa/n3yGgPGTkNtD8c8b4T8QD4ZnpjveZZGxZ9WrFiBwMBAlCtXDtWrV4ejoyMm+81E48Hj2TrNqykM+W+3YBJBvoUFW2MUw7F6RG9YOZaCfelycK5QGcbFLdPXmEhxyP/dDY6ly7L4G9okLB/ki2K2drBzc4eTZyWYl7BnUC5QU2fuOgpBfqVkVKhVWxzvI0rDskG+MCpmDlvX0nAq7wVrZ1dWs4OeGYpCvsLPIx7y5X9Z5HEkn0KTEzXKJ4SHfPn0JOsoyhz0z6U76NTjD9y6sBll3PJ+YciSx9XvPORzpUleDmlAKchPjoBP2jF4VanI3HDJL/7W8YP4+OIpC8qlF3XxEvao9XsnuHiRpTdRfsgXpaB16FY0qFOZFXpKSU7Cq/u38fDCaXbhCDCMza1QtVkrlK/TkPWvCOTX+bQR7RpUYG6GqSkpCPv2Bed2+DOXE7JW6hkZo0L9JqjStDU0tbQVgvxyb3fAx9sOJmZmLOsIZTE5unYp64es2DoGBihdtRZq/d4Z+sYmCkG+w/eL6FUmGXZOTgy+3l+/gOYN6jGXpqwVcVm2FpEIYWFh2VyemAU2MBA7d+5ka4Aqd5qZmbHKwGTh9/HJLYuYCKlJ8RjYpTW6/rkV7sbRGDrsD0xevAaliuth08JxeKdXBVMGtMe+ZWPxwaAKhvq0xNJR3WBWtx86NK6O6KAP6NZrCFb478Cjkxtw8KUmtiwcDUHsBzRpMxLHTwRg74qp+KjtjgkDO+Hw8nH4aNUIjR3VMchvE3ZtXwczfSGOb1+BY4EW+GtodXjV7Y0Dp4/BxkgTK2ZORKxVNcwYnneVWIL8gIAAVKlSha0p6UrP0pWWpf9OVWITExOzuQ0R5B89ehTPnj1j61VPTw8eHh74HhaB1mOmcw75nq83odtvrjAyNmabjviYKBxbt4x9maE1pmtgiLI166Jmqw7QNTRUCPKdv5yGr7sAViVs2fqhL3BH1y5j/6UvaDr6+nAsVxF1O3WHiYWVQpBvEfEEPc1fwMmtNIN8+vpzyn8NKIsQ5bzX1tWFlaML6nXyha1LGcUgPz/PIx7yOXkR8pDPiRrlE8JDvnx6knWUd+M+ePbiHRbNHonuXbivCCir/9x+5yE/v5rjz8tJA8pAvm5SOAab3EK9po3EkKSmhpMbVuP57WvQ0NSEQ1kP1Ongk2G1VsSSry5KQeeEA+jYhUrXU8o9NTy6fJ6BOFn4LB0cUbdDd1g5lWLuGeRSowjk//ZlK4b26wAtbSpLD4R//8ZSVtIcjIuZo04HXziWr8A2KgT+irjruL/ZhrG+9VHcinyrxZbQNaP6s/9q6erCu00XBmCUfYdejopY8u2/XsCY34qhJEFSWiqenzoAn/ZtmY5yawSdEvcRyTEEb69evcKMGeJifwYGBmjSpAkaNmyIAwcOoHPnzjmKo0Dhj3dPo/PEv7Bv21poqQFrF89AhEUdzBzYCtuWTMA7vaqYOrA9ApaMxkejqhjQrg46NWmGwGQhjA31WLam6LgkLFqxFoF3DuONfg349W6ItNRQNPLugP0nD2Pvyqn4pFuOQf7BJWPwwbIh3DU/YP21eGydPwL6upq4fuEoBsw7ilvbhqNhl5k4dXATq16+ZvZ4fNQohTlj++Z507NCcEePomXLlnL7+tP1In1mzfRDkL98+XLcvXuXbZjKli2LTp06Yd3GzWg+fBLnkO/xyh/j+v4PJsXM2FjoS8CaUf3YWtXRN0Cd9t1YNixNXV0WQaKIu45T4EmMbeYIu1I/stStGzsQCXGx0NLRQeUmreDVsDm09fQYmCtiyTcPf4wxHjFwr0KV5AVsXfpPHoGo0GB2H5etXge12nQSf6HT0FQI8vP1POIhn5MXIw/5nKhRPiE85Munp5/1KB7yf9YrVzTHrQzk5+STT5Z8DW0dOJQtD2Nzy0wwpKxP/puHdxH08T2cPSvBzKZEJnBV1iefIOPe2RNwrVwd5iUcmF+zpCnrk0/nn9+5Ea5VasDKwRka2j/iEQrTJ//Dhw+4cOECKleuDAcHB2Z9luWTn5SYgEWTB+P2NyHKO1mzLzbRwZ9x9204dm3bgCN/TcVb3SpiyF86Bh8Nq2BgpwYY1KYl2k9YgloVXQFyeYEAhnra2Lt+Ed4a1MT0Xg0Y5Df27oB96ZD/UccdEwZ0wiH6ImDZEF5GoVi4/zV2r5kCAx0NXDmzF5M3P8DZld3RqOufOHFgo0KQnx93ndzuYoL8Y8eOwdTUFG5ubjA3N2drv6B88lOSknBhzxa4VakBC3sn9qVL0rjwyT+9eS37GmdV0gnaevoZMStc+OSf3LgaDmU8YOviBj1j00zPDEXddbLGCNHziGJ47Mvk8jziIZ+TFxMP+ZyoUT4hPOTLp6ef9Sge8n/WK1c0x8015NMss7qNSGauLOTnpUFlIT8v2cpCfl6yCwvycxtT3pBP+daD0KJZG6wJOIlS5vrsK0hsxDdMHDIATYfNRchlf1z8boKJQ3rg3oFFuBpSHEN6dsKrSzuw424Mpo7si/CPT7Hh4A34TRqBMztX5Aj5h9bOwPVgfQzv2w339i3CJ6tG6O5dEu27DcSkuUvgYCqE/+qlcG0xDN2q6hQ65Oe27gsK8vNeY8oF3uYlmwvIz0u+spAv83nEQz4nLyYe8jlRo3xCeMiXT0+SoxISkqBjXoOl6axUoYxiJxfC0TzkF4LSf+EuVQH5uVo7lcyuw0M+wFV2nfxAPrkHvb97EhsufMH0Ub1YQDS1tJRkPLxyGIeeqWFwY0dMnrUEpWq3Rfd6Tpgzfxls3GtjcI+2OLLLHycv3oSumTUGDh0JF2tj/HNkB77qecC3cQWI0iIxbpgfZi6cg+hvL/Dn/FUwd/NGVbNghFlUQ9t6lfH5xQOsWuePkNg0NGjeBm2b1YMw4QvGzt6KeTMnQENDiCM71uK70A59OjTN887l0pKfW0c85GfWjGVU9uw6qob8PJ9HPORz8nbjIZ8TNconpKhC/sZth7Fy7R68fvsJqalpcCxpg4G922FgH/kK6cg3e8WP4iFfcZ1xcYYkmI0LWbyM/GsgK+RTZcxVi1bhU5qJOJ92err2tLQUqFEObKm6T5qCZLSqZI06rdrKNYCUpERsWbMO94PUxPng09PNEzySzz0zC6f/TU0ggrct0KZPP+YDLauRtf3E7l04/TQCaWqUAlN8BgUlUgks5kMtSW8vAEprBqPPmDHMx1hWo/SEt0+fxO4Lz5BMOfhJFtX7odKeVLtXOu+4ADCPe4+Bo4aguByVgMmH+tX9O1i/6zTitcxYJWBqTDYVIKO5S6XMN0kOQk+fVnB09yj4PPkiEdNnUkoqtDQ1frhViEQs4DMlNQ0aGupITEyCkNI4AuzvpB+hhpBtBpJTUiAQqLOUneTqQ7+niQBNDcqvLmLn0m9UfI1SHFOjFKqU0IYAnq5FSnIKU4m6upDJJT1RGk4tLXF6UlrT1EhOXq0oQT4FZV87fhj7rr1DilAvY/3S+qCWscbS17VV3FsMGj8aJuYWspYvKI7i2c3r2Lj/IhK1TGWuMdOkr+jduyPsXWUbvWidfn37CsNXZ6UAACAASURBVKv+2oFILRrLjwcErRU1yfpNv/f0UyPR7X814F7dW+a46QBKm6vK59HWNX/jXpBA4ecRn0Iz8+XjIV+u5czNQUUR8nfvO42+Q//E3ysmo04tLzbR46evYvDouVi/cgq6yrC4cKOZnKXwkK9K7eYum4f8wtF71l6zQj65j4QHB4EASNwELGD0/vmTKFOjDrR19DLyZBPr6hkYQN/IWK7J0Es/KiwUCfHxUpV0Rfj0/DH0TcxgbG6Ryc9eR0cbhmbF5QqKZIWYoqMQExWVaSwRQd/x/eM7lKpYJV22mDY0NTRgXNw8W17unCZCIEM59CNCQxjgE3/T3GMiwvHgn1Oo8b8OTCfiYEwR1NUEMC5uwYKPZTWSTa5GpHMJy5P8xPhYFsTcYsAIltFEEuhJRaWMzMxY0G5aSgoeHd6JAd3FaUa5aLJ88rnoo6jIKEqQT+uAwDEyLJQi2Nk6onUQEx6KZzevoFKjFizIVbzjE7A1ZmJuzoJTZTXxGktAWFBQui89rWHqLxrntm9Ek14D2aZMcr+TbFpjFHQuT6N4gLCg70hjm17xPZCclISLe7agTnsfCCm3fjr7qwkEMDA2YRl65Gm0+SmKzyMe8nnIl2f9quSYogj5/YbNQlh4JAK2zs8050PHLsLC3BTVKpfDkDHzERUVA3qxn/3nJpKSkzG0fyeMG+GbcQ4Vplq4bCtCQiPgUsoOf04ZhGaNarLfCdapENae/WeQliZC5YplsGLhOLg4i0vLU9XaASNm49rNf2FR3BRjR/hiSL8O7Dxy19m1cTYWrdiGR09fo4yrI7asm4GypYtG2kxppfHuOiq5bf6zQrNCftasIaQYeolvnDIKHnUbsqwaWYtcKaM8go29i2ayPNm/denJ0ldy1Uj2uW3rEf79K1oNGSs3tMjTP8l+eOEMy9vvM3UeyzTCWaP0lm9eYte8qeg2aQ4sHHJ+DvGQr5zGixLk5zQTWmN3Th3Bk6sX0HniTGjpcrvG3j95iEOrFqLblLkws7ZVTplZzo6NCIf/5JFo1mcIHMtXzFRAS9mOisLziId8HvKVXcf5Pr8oQv7S1Tswc9567N++EHVqVcxxbiPGL8Lajfuw0382Wreoi38fv0KlOj44smcJGtevziz/vQfPwNG9S+Hh7oITZ66iQ/cJeHRjN5wdS2DclOW4eecxdvrPgqmJEf5csB67Ak7j+d19LL2cZ80uqFOzIqZN7MdSY1Il3b1b56GedyUG+TSuv5b+ASvLYmjbbRyrbntgx8J8XwdVnchDvqo0qzq5MVERiI1PYJY5loNdTw/aWpoIDw2GjoExc1UQkGUtIQ5pAiF0hAKERsbCxMSIWWgpR3xcXDx09Q0gzCNVYn5mIAvyyUL+7f0brBs/GDbOrvCZMpcz2CCIoaw2q4b3ZnrpO38Vs4Bz1RLjYrFu/BDERUWgu99CWDo4cSWapSw8tHIBXty9gS4TZrDCX1w1sl5SVhDKyFO/Sy9UbdEmx68ZPOQrp/GiDvl03+9ZOAMfnj5C1z9msUJqXLXUlGRc3rcTVw/tRtNeQ1CxQd7xC4r0S18DX9+/hd0L/FCxflM06TWYM8NAUXke8ZDPQ74i9wSnxxZFyCffyjGTlmLNhgBmua9T0wv1aldCm//Vg7GRAZs/Qf6pc9fx7E5Ahj4a/G8QSrs4MIt883bDUdmrLKZP7Jfxe9M2w1C1sjumTegLQ+s6bENQ11vsDkR+/0Y2ddimQFdXGzUa9ETYx39gaCC2hpw8ew3WlsXh4mzPIJ82F53aifN9r998kFn1pcfC6UVSQhgP+Uoor5BO3b1oFLZcfAMzY0OEfP+Kht1GYXi3phjd+TcYNRiDCT6NIBSk4qj/PKR5dkBto2A06OmHA0cOwM7MAHGhrzFyxJ9YsHYdTPS5s3STOmRBPlnNTvivwt0zxxjcD1q8DobFxOkBlW3kvnP/3Akc37CSeSG0HTERpat5cyKbxvblzUuW+56qgVZv0QYNfPpyJptcdf6eMBjRoaEo4VYWPfwWcmatjI+JxqapoxH06T3MrGwxaMnfOboV8ZCv3Aos6pAfHR6Kv8cNZoWiSlWogk4T/DLHfygxfYq92ThlJEK+fGbVawcsXMvZ+iXXtq0zx7P8+fomphi6YjM0pdLGKjFs9lWxKDyPeMjnIV+ZdazUuUUR8iUTioyKwYXLd3H52n0cPn4JQcFh2L99AX6rU5lB/pt3nxmoS1qPAdMRFR3LjnGt2Ia53GRtVKhqzvQhsHZpkqPeNq6ZxgKySP73N+JqmdJN4q5z85/NqOJVlv20bfdxTJ6xBu+fHFHqWqjiZB7yVaFV1crcMLUXksr8jn4dWyD863s0a9ERAReuY1K7inj2OR4Ltx5CjbJ2CFgxBclePmhiFgyPxr4oVe1/2LdxMTTiP6BXz3FYtycAxY3k85OVd0ayIJ8K4GyYOBTBgZ+Yt239rr1RvWVb5ieubCOLNbmkUP57Av4K9Rqhed9hzIeXi0YwcOf0UVZptoRLafhMnc8ZbNw8dgBnt69nvsdUfKjfvFWs+icX7eWdGwhYOgvJCQlMz+QOZOdWNhuEqQryN23ahI4dO3IxlSIto6hD/pUDu3Bhz1b2Jc/IrBj6zFkBA9NinOj0ybWLzFWH6h5Q7EjPGYth5eTCySaYAnE3TRsD+pJGxoB2oyazCs8sCFfJVlSeRzzkZ76QfOCtkgtbkdOLMuRLz4M+1fv0nYrHz97gwdUdDMJfvv6A4/uWZxxGv1P2hR3+s1C6Ujv079UGIwZ1yaaO70FhsHRuhPtXdsCz/I8qfZIDdwacwrCxCxD87myukC+dQpOHfEVWXP6O/S8F3hLkR9g3gm+r+gh8+xz9R/jhyLnTGN2xHkaOGIB5229hrt84XN+5gEF+02Ih6DRlMyobh8O5xQj8r4oFBvSbWCiQT5YzsuK/eXgHJd0rMMtc2Rp1OHlh06d3gvDgTx+ga2TEXHXKef/GyQaCViUVtyJ3oIjgb7Bzc0f5Og3kClSUZ0U/vX4JoV8+4/WDO3CvWReulWvA0IwbAPv84ilz0bh96hAL6rV1LQMrx1LZAEwVkJ+YmIj169ejXbt20JQjcFgeXRXVYwjyz507hw4dOmSrCMzVmOVNoZlTf48unUN40Dd8fPaIVa+lr1x6cga5yxr/+8cP8fnVMxZQX7Vpa9iX9YC5nQMnkB/27Qte3LqKfy+dg2e9xihmUwIly1fkRMdF5XnEQz4P+bLuMZX9XtQgn9xmKODVp1Mz1K6Z2R+fAmnJV58s7AT55Hf/8v7+DN3UbtKXBeXOnzkMLTuMhHlxE2xYNTXj94+fv8HW2pw9PAyta2PV4gmsH0l7//ELHOysma9+9fo9WT/Fi5mwnwMOnmOuQrWqe2bLk89DvsqWZ4bg/xbk98DsbRchpLSDarrw33sA1co4oEerulj4927sWDYNX/Xc4Sp4BfVqPRnkd5m+C1uWTUYPn54YNmkc1i9chb8LwZJPm3F6sV47tAc1/tce6ix1oiSTjPLrhKzsFABoamnNgm9/ZKlRXjb5BkeGBrPsPe4163Eum9L7XQzYhqa9BrNnEI2di0ZfNRJiopmrUd95q3KVrQrIJ6PKzZs3WeVbDw8PTsCMC51wLYPmGRQUhODgYHh7e6tsnspAPq3fmIgwFtxdt1N38TqQTteqhFJojcVHR2HPQj/4TlvA6fqlzXtifDwOrVrArPjie4O+Ayrv4ldUnkc85POQr8Ttp9ypRQ3yaTbd+0/D+Uu3sXj2KFT2KsOsBbfuPMHw8QvRqnkdrFkykUG+/9ZDmD9zOHp2a4mLV++hWdthuHJ6AwN92gC09xmPfdvno2G9arh28yEDf/K5J1CnwNtDxy7g8O4lcHSwwd+bDmDyzDX48PQo9PV04FGjMzzLuTD5lKu/RfsR2LlxdkbgLW/JV27dKXr2fwvyeyLKsRm6Nq2B1dOGwK3dJHSs74Xurepi2eaDEKTEYtywIUhVF6B2r6liyPfbhYObl+H59SOYsXo7ooPjsOfYsQJ316HrSrBBrgM1W3fkLIBOer28e/QAZtY2LF0m1y0yJBifXoghn+vGIH/vVjTrM5Rr0Syd4ha/seg3f3WuslUB+SzdYnIy9uzZAy8vLxQvXlyuOgWcK0CFAgnwIyIicP78eXTu3BkGBuK4MFU0ZSCfxsMg/8Rh1Ovcg/PhUezHngV+LCid60YpOw+unI/2o38Y5bjqoyg8j3jI5yGfq/WssJyiCPnJySmYt2QzyG3mw8evrAhKSQcbZnUfPrAzK3JCkP/5y3fYWJljy85jLFh21JCuGD20W4YOlv+1C4tXbMe3oFDYl7DEH2N6gXzyqcXHJ7IUmnsPnGX/36OcC5bMHYWqlcQZCZ6/fM++KNy4/Zh9ERg73BdD+3fMSKHJQ77CS02pE/5LkL95Zn8kurVCrzaNEPLxCXqOXYbtm9dibPdmWLwhAAb6enhw+Ri69R2NSRsOoJFpELr/uQcBm5ZDPS0Zi6cNR8C1Dzh94ijMDLWV0nvWk2X55NPxReGlmt9J85CvuOYI9ENDQ3HmzBmYmprCysoK+ul5zbMGXIsLguXeJPd5ToHaeZ0rOZ6OyavPnJ4juQWFk6zY2Fh8/foVISEhaNSoEYoV48bFKjcN8JDPQ77id+DPdwbvk1+A16woQr4805dAftZc+vKc+186hg+8/fmu9uMbZ5Fm6oRyLiWRnBiPCyeOwrlqA7y6cx51GraAtrYWEhPicP/qWRi6VIODTgJO3XyJ5o3qsvSa4d/e4/Tlu2j5v1bQ1aKCONw1HvLzr8tf0ZIvrQ1y2Xnz5g1evnzJoJ9cPHJq8mzYpY+RBn9p0M8K9rI2EPJcuaxjMzMzg5ubG5ycnKDNUcaXvMbBQz4P+fKs05/9GB7yC/AK8pBfgMouhK54yC8Epf/CXfKQn/+L+6tDfv41w58p0QAP+Tzk/xfuBh7yC/Aq85BfgMouhK54yC8Epf/CXeYE+VktqOSuc/XgbtRs1QFqwsxfEhTOly8SUUr8TO394wcwtcruk8/C9BTMx5917FHMJ/8Jytasm+0qKjr2rLIJ8i9R4G3vIZzLpvSDm6fn7JMvGbcqfPJ/4aVeKFNTFPKzrjGqx3D75CHU65TdJ1/Z9UvB3bsX+qH79Ow++crKFvvkL0D70VM4vzeKwvOI98nPfFl5yC/Ax8vPCvkFqKKfuise8n/qy1fkBp8V8ikzBqWGpNzckpaWmoYH/5yCZ71GmVJnUqYPfRMT6BubyjUvkSgNEUHfkRAbk+l4KlpFQbf6xuLMV+ImgI6+PoyKW8iV1o/5W0eEgwoISbeYyAh8e/cazp6VMv1dQ0sbplbWUFOTnbubZCfGxrB0htKNAhcpvWit3zuJ/0y7FwGYjqiIlVDOFJRJCfEI/RKYLkAsiv525K8laDvij0x90niNLSyhpaMLHvLlWnaFepC8kE9rjKA7Ivh7xjqigcdFR+HJ1Quo3OR/mdeBuhDFrG2hriFHvQqRiK2n0K+0xn60xPg4nNr0F/43cFS2NWZiaQVNbflqctCzIuTzJ9D9LbkHKCPX+d2b0MjnR/FKdlerqcG4mAW002M8ZF0cAvqi+DziIT/zleMhX9ZK5vB3HvI5VGYRFMVDftG7KKK0VERERiIpKZkNjkBM39AQOlqanAw2PCwUSakCWBSXD6YV6TQr5JN1+sHQAXB9+Q7CNLHNnWzv9LIleBVIpcGL09XG10H9UKmjj1xdknXv9vRJcD5/GRokm6z0IhFSU1PEsll6QBH7X6pQHU9+bw7vUROzfT3IqTMCjbvr18B+yw5opaRlfAGgcVM3WVMP3q7mibpzl0JTT1wBO69GaT4fHj8Ew/kLYZqUAkH6pwixj7oo20bhlr0lKi9ZCdMS9rJEMz/3N3dvInbCeNhFx2fIJmAi8FNTFzIdsSYAXlqYwnLmbNh7evGQL1O7hX+AvJBPa+zewT0otmQ5jJNSMwaelkb/XwA1dTUxQKevg2uONqi1dDWMLK1lTpLugRfXLyNl8mRYxyVCnOhVANrQ0zrLutF9amUG+znzYVu2vEzZtEZpk/5p2BA4h0ZCXfLMoC92afTMyPzl74uJAdQnT4Zb7d9kyqYD6EuGSp9HfpPhfO6Sws8jHvIzXz4e8uVaztwc9F+H/IjIaJiUqIdHN3bDvYwTN0pNl1IUgoN5yOf0knIiLDk2HF19fKFtbAVjA20IhUL0GjAC7qXsOJEfsGExbkfZYN5I7quQZoX8mPAwhPt2Q+ubj6GZ/sLObRJh+rrYPXMcKvj2kWueZE18P2YUOu4+Bq1cgjglglKEQqwZ2BVek6ZDTQ5rZXJiIp6tXoYuc1dCnyBfRlvZojbKr1oLTX3Z6RNTk5Px8Mh+NBnxBxxjE2WJxgZPZ1hu8IeZg6PMYwnAXt++Dpee/VElKDIdwHI/7YyTNcLWrIC9V1Ue8mVqt/APkBfyU5KT8SBgB34f6web+B9f0XKbwepKrnDy3wwjG1uZk0xLTcGLKxfh0WcwPMNiZK6xIy4lkLh2NWzLV5ApmyA/8NVz6HfphgbvgyCUkW3pgZUZ7i+fj9K/NZYpmw6IDgtV7fNo7Ch03KX484iHfB7y5VrAqjiIC8g/ceYaFizbgrv3nyElJRVOjrbo2e1/GD6wEydFQ+LiE7Bt13H069mGcxXQeK9cf8Dy8evpyve5MbdBZB0n5ddPTExG2dKyX96cTyxdIA/5qtJs/uUmRQWjSYvfMX3lDlQoacIsY+rq6ji4dwfsbYvj4s1HqFirIWpXLo+05AQc3rcLn4NjUK5qbdSrWh7x0WE4fuQwPofGoXq9Jqjm4YKUhCgcOxCAt99jEB74CrHm1bBodEf8e+siLl5/AEMLB3Ro0wIfHl/HtSefEB8Vhq69BqCYoWJrPifIj/TphrY3HsmE/FADXez4czw8pSCfvgRoaGoxV5WsZewJ8j+OGYnOu45BOzVvEE/WEGLFoG6ZIJ/81KlpaGllsxAS5D9ftQw+c1bCIOWHJTS3q7r0f3VQTgry6UtAckICNHV0s9UCIMj/98h+NB8+EY4xiTJL+qyr4AwL/40wTYd8stbHRUZAW9+AyZb2d5ZAfpke/VAlKArqMiDppLMNQgjyK/GQn/87tuDOVATyH+7dgbZjp6NEnGzIX17FDY5SkE/riFx7tPX0c1hjYsj36j0IFQjy8856ioOuJZCwbg1s0iGfQJ42/zqS9StV9E0C+Uadu6HRu+8yIf+edTHcXTEfblKQT1WpmWwNzWx8QZCv2ufRKHTedVTh5xEP+TzkF9xTJEtPykL+Xxv2YciYeRgxqAva/94AOtpauHbzX0yeuRrNGtXElnUzlJ7b2X9uYcK0FbhzaavSslQpoCiOk4d8VV7x/MkmyG/crBU69B8LN1tjCDW0UbGSF/r+Xht23l3RsGIJLFi5Ayv/XoUnh5bg4rfi+L2uO9atWYt5a9bh7xljkGjphcaVSmLO7HmYvmobwu8fwroz7zDUtzk2rVoEo4rtMbNXdfQYMgN9+nTHzRPb4dBoIAw+nMHiXZcwYdxI1KtXH0Z6WgpNQlnI3z6TIL8365Ne+AeWzQH5+patUReWDk7Mp14M5eogdx3FIL8rKv4htuST7LtnjuLRpfMo5/0brJ1dYWJhBS0dHQb85AOsMOSv/AsaZMkXiRAS+An7ls6Ge826sCtTDqaWNtDW04O6upC5KikK+eYb/MWQT8WlkpKwdswAuFWtCecKlVl1X10DI7E/tUjELPk85Cu0bH+ag1UG+ZVdGeQbkiWffO4TE/D3+CEoU60WHMtXhKmVLXQNDBnwk0uOopAfv3a1GPJFIpDL0KrhveFcoRJcvKrDzNqWxc/Q+iU3OLLkKwL5d5bPy4B8GtvKoT1h61IablVqwdzOAYZmxSAk4FdXZ5Z8RSBf8ecRD/lc3Ey8uw4XWpRThjKQHxIaAbsyzTF+RHdMm5g5YObG7UeYs2gjtv49E4YGevj4+RuGjJ7PKs9qaWmicf3qrPiUkaE+jp26gmFjF2DG5AGYv3QLgoLDUKlCGWzf8CerdNu8/XD2hYA2EFSEyrWUPSZOX4ltu08gLDwSrqUcsHj2SPxWpzKbdeU6vujUrhHOnL+Jf5+8gpmpEXZtnIOlq3fg/MXbSE1Nw/qVU9CgXhVIu+uUdLCGvqU3Du1azMb+5WswzMyMsGXtjAxXnq27jmP2Qn+8//gFFsXNMGpoVwwb0AkE+FnHudZ/PyvYJcnlv/rvvVixdjc+BwbB2ckWMyYNQMumtTPG3K1TU1y4fBePnrwGFQRb8OdwdGjTUM4rmfNhPOQrpT6VnEyQX79RU3QcMB5OlvrQ1jZApapVMLBjKyzcvR+mSMGQfv3QbvhErBvRC1O3HEN5RwtEhIUCacnoNmAS1q5dBHN9TfyzdTZuiTwRdnkreizZhjIm+ti7bj6uh5ihRfFX8Nv7BFXcSyE85AtiildDG6cY3AwzxtKpQ2VamHOavDKQH6Kvg82Th6NU63YMwmMjw/HPzk14++g+e0FT4J6FvSNq/K8dXLyqITkpUX7IFwqxpG8HuA4eCoG6EBTk+ujKeVw/up/FBWhqazPIr9L0fyjn3YBNTRHIX9ysJhxmzoZQTxdJCQn49u4NDq5awPyUaVOiZ2QCrwZNUalRSzYPRSB/racj9BYtgqFNCdBXgKiwEGyf9QfI75q+cBB8laleGzVatoe+iSkP+Sq5K4uGUFVB/rJKLjBfvBh65pZsgxsZGoydsyez+Ba2xgyNUK7Wb6jWvA30jIwUgvwDLrYIWzAHxV3dQEH3VHV3x+zJbPMu1NCAjr4hnDy8ULtdV5hYWisG+VZmuDJ3Khyq1mT3Gn3527PQD9HhYWxDoqWrB1tnNybb1rWMQpCfv+cRD/lc3Ck85HOhRTllKAP5VGm279A/Ef7pH+jq5F5Zk17oHjU6o3LFMlgydzSrMNuxxwQYGxng4M5FOHn2Gn7vMgYDerXD4jkjERsXj3JVO2H4oE7sCwHB+bZdJzIs+es3H8SkGatx4fhaONhZY+W63Zi7eBO+vjoFTU0NVPutByKjYnD+6F8wL26Kes374+nzt9i7ZR7q1a6EabPX4vDxi7h/ZUcmyHd2LAEd8xpoWK8qArbNZ5uT9r7jGXDTOF++/gg3r7bYv30BmjaswarhNmw1CFfP+LO5ZR2ntE9+wMFz6DdsFo7sWcKOPXz8Ejr1nIib/2yGl2dpNuZv30Nx6uBKtolZtW4Ppvz5F0I/nJMrW0hul5uHfDlvhAI8jCC/2f/aY9a6PajkZMaCPsnHtneHNli0KwCmghQM7kuQPwmbxvXE+LWH4O5ogbCgrxAJNdF34FgsW7MUlnqaOP73ZLwyqIOIa9vxu996eJrrYe/fC3Ej1Aytrd/jUKA5ercSp4M0NLPEvWP+uPzdAEunZk/jKI8KlIH8YD1trBrqC7uGTVhX5KZz5eBuvPv3PgTqarBycEalxi3h4O7BsucoYslPEqpjbpdmsO3WHVBXZ3Dx6t4t3Dx2AAI1AQN8AnAnTy9mGU9NSVUI8ufVrwTTUaOgrq3D4ILcEQ6vWcQ2K3qGRqjYoDlcK1dH8RL2bFOhCOSvLmeP1IkToGdhkRHUuHuBHwN+oaYGPOs2Rtla9KXDmc2Lt+TLs1J/zmNUBflLPB2h+ccE6JgVg4AyRInSsGfhTKSmJENTSxuevzVB2Rq12Sab1rcilvx9zlZ4N2EUTEs6s3cVWez3LpwJcpdT1xCiTLXa8KjbENZOLsy9TRFL/h0LY5wYNxg2npVYQLxQUwsHV8xnMK8mVEdJ9wrwaticWfb1jIwVgvz8PY94yOfizuIhnwstyilDGcifPmcddgWcwvO7+/Ls7eadx6hevyeC351lVnVqp8/fQNM2wxDx+QKu3njA/n/I+3MZv/v2mwo9PR2sWTIxGzwnJCQhJjYOxcyMmazQsEgUc6iPZ3cC4ObiwIDZu0YFZgmnRq4+x09fxb/Xd7F/k9WdNhXRXy/lCPlkeW/bShzN77/1MIs3INn0BYC+MlhZ/ihtXq5aRwzu2wEDerfNE/Jpfi7Odlg2f0yGrmo06IWa1TzYOGnMBP8rFo5jv799Hwin8q3w7fVpWJjnP0sKD/ly3ggFeBhBfsMmLdCsywA4WxsyF4+ynpXhN6xvJsjvOnEhEu7uRMCdUDSsXgYBew9j6d9rsWPxdLxPMkWV0jbYsi0Ai9dvQdTjE5i55So6NK2G04f3oniVzpjVqxra+45ARx9fRH95Bdc67RD77yFc+VY4kM988qXcdUjlF/ZuRXx0NNxr1oFNqdIsZZ7EB11xn3wKvPWDgPzYATy7dRUvbl1D2Rp1ULJ8xUy+x/nyyV9Jgbf6bKWEB3/H2S3r4Fa1FlwrVYeGtnZ6in4Bg3NFIJ988s39/WFmL47doQ1fwJI/4eRRibkDkd80bVQowwnvk1+AN2ohdKUqyF9O7jobN8PIWhx4S1/J9i+by9zBylavDS09vfT7jtaY4j755K4jCbxNE6UxyLd1KYNyterCwJQ2FuL1my+ffCl3HRp7wOJZMLW2YfdGcVt7ZiSRPDMUddfJ+XkUxWTn/DziIZ+L24KHfC60KKcMZSD/z/kbsHnHUbx6cCDP3nYGnGLuOAT5kvbm3Wc4e7Rm4B34NQhtu45D7PcrGb/3GTKTuehs+mt6NngOC4/CpBmrmGtLXFwCO4fcgcgy71nehQFzx7aNMHJwF/YbbUau3/yXWcmpUaBt3Wb9kBJ+K0fIv3VhCwNuatt2H8fkGWvw/skR9oAidyKaT3h4FHuwfP0egnkzhub4xUHaku9asQ1z6xncr0PGHH36TgUF6+7bNp+NmTYWY4f7st/JpadE6WZ49/gw+1qR38ZDfn41p7rzUimY9vARhEfGQF1djVlvq9ZtgE9P7qFGw8bQQhouX7wI+6KOLQAAIABJREFUV6+aKKarjhcPb+PZpxDUrF0P5sZ6zI3j+b+38OJjGOrVrwdTQwOI0lLw/ukdPHgXCgfr4kjUMEW1co6ICv2KC5evwcjKGTUql8eX14/xJU6Iml7u+ZqgMpb8nAJvaS7U1AjupQL06G+KQ37mwFsCYvrET18Jsqb9yxfkSwXektxUShOqppYtYDg/kC8deEvPGbKw0pilNz2kEx7y87Vsf5qTVAb5WQJv815jikO+dOAtKZs2qvR+FKe6ZWXqWMsX5GcJvM3rmaEw5GdJBCD7ecRDPhc3Ew/5XGhRThnKQP6ugNPw6TeFWeDJtz5rIzcXDQ0hg+Lh4xYi6O2ZjEMo80wpz99Z6kryW2/XbTxivl3O+D0vyCcr/6s3n5jbDFnVo6JjYWRTJxPkk08+ufpQI8i/cesRTh5Ywf4tC/LJ759iAqhJQz5Z9emrwLGAZRmbgIreXeHbublckD98YGcM6ts+Y47d+kxBQmIi89knyG/Xuj7GDBPnEOchP/NqoodC1uqOci7xoneYSISU1FSW85waexmqqbOAN8qyQ6/EtPSsKWpqAnYcfUWiVJviFyblipfkRVeHWvpLlEFteokk+hsBKOksJSWFySdZEh3Sb/lpXEN+XmNQFvLzkq0s5OclW1nIz0s2D/n5WbU/zzkFBfl5rzHlIT83+VxAfl5jVxbyZT+PeMjn4m7iIZ8LLcopQxnIJ793G5emzFVl4awRmXp8+OglGrUegruXtjFrd9V63TO565D7zP86jmTuOgTdikA+ubFMHN0Tfbq3Zn1SMG39lgNVDvm9B89gKTG3rZ/J+qXNBc1/5pQBMiG/ebvhIJ9/aXedKnV9WbDwXL+hPOTLWK+/FOTLeW8WxcOyQj6lenw4dCDcX7wVF8MS70HEFnT6RE+f0unfAiBOWwvvh/RXoBhWIu76TUKZs5cypdqjTQ/b2EhZCNPU1HCvXUvUGjlB7mJY99b/hVKbtv/IwU/jpCqcNGaayA8DJK5Vq4Da85ZAU1fOYlgnDqPY3AUolpjC5JAO0phiRJm/WIiAqw6W8FKkGNa9W0gcPx4lo+J+DDGNSpBll/3c0hRms2bD3oMvhlUU76esY5IX8sXFsPbCevFymCSnZFSOldxrGXLT019ecrJB9WVrYGRpJVMNrBjWjSsQTJqMErEJ4oJrUvdC1j4eWRWDzdz5sC1TTqZsgvyvb14icNgQlA6OEOfgzyKfCUn/W6CxAVKnToabt3zFsKiKdbbnkWRU6TK5fh5Jj1fSVdbnEZ9CM/PS4CFf5q3C3QHKQD6NYtP2I+g9eCb692oDn07NWK55gvYpf65B1w5NsXzBWDZYsniTdZwCayMiYlhAq72dFXZtnM0Cb/OCfMpSM3P+ejy4ugP6erpo0mYI7GwtmSvP85fvMXbyMpz55yYLjqW0nWQVV4Uln74I7Nl/BjfOb2LBuBR0TAG9/2tWB/NnDkPWcZLVX5Jd59Cxi+g50A/H9y1DRQ837Np3GrRpIBcjKsLFW/LzXtM85HN3zysjKSvkE2x8e/kUKfHxGRxAgP/q/m2UdPeAhpZUQL66OgwsrVi6PnkawUbIh7eICwuVYgwBgj6+Yxln9IxNfrgCCNSgbWoKc3vHbG4/OfVFaf4iv31F5JfPmbIMUfaO8G9fYVPKNZMcdT19WJVyy5YTPyfZLAtIRDhC3r2GgH2REe98EmJi8PrBbZSpUSfdfSidwDQ1mWwKSpTVCJISYmPw/cVT8YaBnSBgrk23ThxE9ZbtoC7USKckim7WQHEHJ+gaGfPFsGQptwj8Li/k0xqLCQtF6Ie36WtMvA4ylbnNKHkLQFML1i5u0NCWXReD1lh8dBSCXj3PtMZylS3UgLlTKegYGMqlwaT4OHx9/hSi1BTZGb6EQpjaOTC/fnkauQllfR7lqheOnkc56iXL84iHfB7y5Vm/KjlGWcinQZ27cIv5qt+5/5RZuin4ldxSevn8L2PM5J4zcOQcXL3xkAXXUupIAmOCdlmQT/729Zr1ZwG2lJ1GW1sLPQZMx8dP31DR0w3+q6eC4gMOHPmHudKM/mOJSiCf+u/YYyJu3n6MErYWWDR7JD4HfsfICYtZ+k9yt5Ee575D5zOl0KQA3mVrdiEiIhqlXUtirt8Q1K9bhemIh3we8lVyg3MsNCvkS/vbSrqiQlGUQq9i/aYoU907WyEqZYZEAHJs3TIYm1uiWou2LNsMZ00kwrVDexHy5ROa9h7CUmNy2Z7duIybxw+iy8Q/oakjG7YU6fv7+7fYMWcyfKfNZ3nJc9zYpKTg0eGdGNC9K4Tq6oqI548tIA3IC/kFNBy+G440wEM+D/kcLSXFxbw40x3OlfpBXZh7CkzFpfJnFAUNpKYk4PWddXBtuLkoDEepMfCWfKXUx9nJsiCfIDz82xesGtEH9mXKodM4P+6AViRCXEw0VgzpDi0dXfSeswIGJvnPPJVVKeSnv+GPYYiJDEfPmYthJucXB3mUS188jv29HAT6BOJWjqXkOU2uY+iLx4N/TuGE/2o06z0Enr81zjHtbhoP+XLpszAP4iG/MLWvur55yOchX3WrS4bk99cmwtyuKnSNShTaGPiOVaOBuMhPCPp4Ew415qimgwKUykN+ASo7j65kQT59Lr+wezOuHNjFqrQOWLwWhnJ+apc1Q/LFf3LtIg6tXMAO7TB2Gly8qso6Te7fyQ1o07QxLL93nfY+rMAOVy0uKhL+k0ci7Fsg3CrXYGPnqhFA7Jw7GR+fP4GNsyt6z17OQz5Xyi1gOQzyB45lxan49utogFzsPl46CZ+2raCpqfnrTCyfM+F98vOpuPycFvxyN9ISAmFRsk5+TufPKcIa+P7uItS0bVDcpWMRHqV8Q+MhXz49qfooWZBPgLxx6mh8I390gQAtB46CR52Gcvmyyxo7WcMPLJ+Lpzcus4xD1Zq1RgOfvqykPRft4p6tuLx/B0v/V9LdE13+mMWZy86/l87iyF9LWYEviicYuHid3H7Gsub2/slD7Jw7BYlxVGFUE33mLIeFgyMEgswZlHhLvixNFv7vS5YuRYIoezrZwh8ZPwJlNEBf2+ytLNCpQ3uWQe2/3njIL8AVkBjzBe+vT4Bzpf68y04B6l3VXYldddbCofpcaOnnP8++qscpr3we8uXVlGqPkwX5BLFntv7NqlpaOjgx3/karTuwgl/KNgL7M5vXIezbZ2jp6MHE0hq123ZhFTa5aOe2bUBkaDALmrVydEbdjt0h5MjqduPIPnx7/wZf371mAclVmv3OKu9y0V7euYHnt67i1b2bKE3FuarUhGP5itms+Tzkc6Ft1cpISkpiG1jphDM/Qrd/JLnJKSGNZGT5OTevPvLqi/qUPpf+LQn/lf6v9Ngkx0ifm1WOsudmlZ31qhW4jkQilgL5Rxpk1a6joi6dh/wCvkLfn21EWsJ3WDk3LOCe+e5UpYGvr89ATdsCFqV7qqqLApXLQ36BqjvXzmRBPmX9SEpMwPXDASzTC0Fy1oI4+Z0JK+CTnIT3Tx/B1MIKRsXNMwpG5Vem9HlkwY8KDcan549RtmY9Nu781hPIOh4qbhUTEYFLAdvQtNdgsWyOLHpUoTQuOhpbZ4xD37mr0mWTFV8q7yHVXuB98rlYJrwMXgO8BpTUAA/5SiowP6eTb76+kQ2K2VXPz+n8OUVIAyEfryMmMvCX8MWXqJWH/KKxwGRBPo2SPk2TT37N1h05cdPJOvN3jx7AzNoGhmbFOVdKZEgwPr14DPea9TiXTek5L+7dimZ9hnIum/zyt/iNRb/5q3OVzUM+52rnBfIa4DWQDw3wkJ8PpSl7SnJCOALvzYeWjhHMHbx51x1lFVoI55OLTtD7y0iMj4RNxXHQ0DYphFGopkse8lWjV0Wl8pCvqMZ+HM9Dfv51x5/Ja4DXwK+jAR7yC/FakutOxOd/YGzhCQNTR2jpFeeBvxCvh6yuCewTY4MRHfYWEd8fwNi23i/joiM9dx7yZa2Egvmdh/z865mH/Pzrjj+T1wCvgV9HAzzkF/K1pGDcqC+XERvyAImxX5CWklDII+Kme/XYZCYoVY+bQD1uRqWcFDWhNrT0rKFXzBOG1t6/RJBtThrhIV+5dcLV2Tzk51+TPOTnX3f8mbwGeA38OhrgIf/XuZZFaybTp4vHI/lv0RodP5o8NMBDftFYHjzk5/868JCff93xZ/Ia4DXw62iAh/xf51oWnZlERAAlS4rH8+4dYGxcdMbGj0SmBvr374+1a9fKPI4/QLUayAr5EImQGB/Hgm0ljf7/zeMHULXZ75kyyNCDnbLtaGjJV12bsukkJyQgJTkp06Q+vXjCUnMamJj9+DvJFmpAQ1s7x0JQWbVCslOSkljeeukWExHOsuuUruad6e9qQiG0tHUgUMucez4nbbMsQCkpSIqPy/RzXHQUC0hu5Nsvs2w1dVYVWN5sO5RNJyEuDqR7SUtMiMeOWZNYpV7pRuPV1NJmaUb5wFvV3hu8dF4DvAbk0wC9zwUielLyjdcAVxog672fn1jatGm8NZ8rvRaQHCogkioFkgXULd9NFg1khfz46Cjcmz4Jtu8/QV0EiAQCIC0N8THR0NE3AAQCcd5sgQCJmhqI69YZHi3ayKXXlKRE3F22EObXb4GVj0lPbp0UHw+hhkam3Ptp6uoIbFgX1foMgpocOflTk5PxcM92GO0/CI00UcYYKUUnrTOt9I0IjVsgEuFV+dLwnjAVGjq6MsdOm5ynF88Cf62FYXL65ofUkpzCNkQ6hkZMpmRKLyzNUHnqTBhb2ciUTSlKPzx5iOB5c2Aelwi19Lck9RkTFSHe+EiSjAMINDWE3ehxsClTjod8mdrlD+A1wGugIDRA73Me8gtC0/+VPiRWfPovNbLi89b8n+rqa2trIzo6GhocFT76qSZfhAabFfJjwsMQ5tsNrW8+gUZaWp4jDdfXxZ6Z41DBt49cM0pKiMe7saPQYfcxaKXmLTtFQ4i/BnRFpUnToSbHGklOTMTT1cvQZe4q6KX8+AqR28BWt6iN8qvWQpM2LjIa20Ac2Y/GI/9AyRj6CpG3vcrf0xlWG/xh5uAoSzT7YvL69nWU6tUflYOioCbDFnbWyRpha1bAwasqD/kytcsfwGuA14CqNUDvEAMDAx7yVa3o/5R8aSu+ZOK8Nf+nWgKmpqZ49+4djIyMfqpx/2qDzQnyI326oe2NR9BMyxtmQw10sePP8fCUgnxyaSE3FXWhRjZXGIL8j2NGovOuY9CWAfnJGkKsGNQNXlKQn5yUyFxayFVFTS1zKXmC/OerlsFnzkoYyAH5S/9XB+WkIJ/cccjqL9TUyuZmQ5D/75H9aD58IhxjErOUpMq+ItZVcIaF/0aYpkM+fcROjItlbk1ZC4lJIL9Mj36oEhQFdRmQf9LZBiFrVsC+Eg/5v9q9yM+H18DPqIHIyEiULFmSh/yf8eIVyTFLrPgEhx8+iIdobw9ERvLW/CJ5wXIelL29Pa5cuYISJUr8RKP+9YaqNOTPHA8P394Zijm8eiEDfPLfNyxG6XqFzN2GKs0qDvldGeQLhOLsWffPn8Tre7dQo1UHFC9hz9x7GDSrqTF/fIUhf+Vf0Ei35Id9DcTRdctQvUVb2JV2Z3OgsZNs8n1XFPLN/f1hai+25FMMwqapo1G5cUu4Va4BtXTZNHZy1yFLPg/5v969xc+I18B/QQOfPn1CrVq1eMj/L1zsApnjpk3ibnr0YP7BrJH1S/rvBTIQvhNlNFChQgVs2rQJHh4eyojhz1VSA0pBvr4utkwbBbd2nZgLC/ntn/RfjTcP7zJANrO2hUMZD5TzrgdblzIgS7zclnyhEMv6d0KZEWMgEAqRGB+LB+dP4fL+nQy8TcwtUcK1LMrWrAPH8hUhShMpBPlLWnjDee4CCPX0QbEC396/we7500FWd0OzYrB2ckWZ6t5wrVQdQg1NhSB/racTjFasgJGtHSioNiYiDOsnDmOuObqGRrByLMXklq1Rh8U58JCv5CLmT+c1wGug0DTw8OFD9OjRg4f8QrsCv3LH0pD/K8/zF5xbo0aNMHbsWDRs2PAXnN3PMyVlID9YVxtzujaDWZWqoOhQbT19/HvxLAJfP2f/Nja3YJbrcrXrw6qks0KQn6SujmmtvKHXqDEE6urQ1NZB4KtneHz1AlOunpExSlWsinLev8GeglBT0xSC/Fm1yyOtfTuoaWuzDEEJMTG4cnAXA3EtHV04eXjBvWZdOFeswr4YKGLJX+1uj6/dOkLbrFi665IQ57b7M4s+bX4cynqgbI26KF2tFuuLh/yf537hR8prgNdAZg2cOXMGCxYs4CGfXxgq0AAP+SpQasGIpJ1/vXr10L1794LpkO8lRw0oA/mh+rrYPnMsynfrJZYtAE5vWotv714z15RSXlWZFVziUqOQu45QiOWDuqDCxKnMvYXa46v/4MbR/fCs05BtHMRpKoVQEwiQrKi7TsvaKLt8dYa7Tvj3r9i7cAZcK1dH5SatmIVdMm5F3XXIkm++fgNM7MUpfsmnf8Ok4bB1KQ3v3ztDz9gkwxWId9fhb0xeA7wGfmYNbN68Gf/88w8P+T/zRSyyY+chv8heGlkDmzJlCsusM3XqVFmH8r+rUANKQX4OgbeUglOgpg4NLS0GstJNIcjPIfCWUm2mpCRDS0eH+cxLN+UDb5OREBfLLOssaFjybEmHdEUs+dkCb9PSQIWztPT0WO5/6dz8fOCtChc3L5rXAK8BlWtgxowZoPcIn0JT5ar+D3bAQ/5Pe9H9/f1x+fJlbNy48aedw68wcK4hPy+dKAv5eclWFvLzkq1sdp28ZPOQ/yvcRfwceA38dzXQs2dPeHt785D/310CKpw5D/kqVK5qRV+6dAkTJ07E1atXVdsRLz1PDWSFfLI4v+vfB94PX0BDJEJ6WSmA0mmqUaB7erA7gBhdbVwbNxQVO8vnckXVaJ9MnoA6R8+lF6xKl0/5+OlezhAtQKpQHce7tUHVsZNA1WllNcqu8++6Vai92j9zek6WkpJki4UL0nPcH6hXFVUWLYemnr4s0aza7cPjh1Bu6p+wik/+cbwoPde/QFw1l2STvo66loDLyjUwtXOQKTstLQ1v7tyE4fARKBMWy2Sw2lcse6mIfVGgv4hlA7dtzaG2aAHsPSvxefJlapc/gNcArwFVa6BmzZqYM2cOD/mqVvR/Uj4P+T/tZQ8ODoaLiwvCw8N/2jn8CgPPCvkEy29vX0dSTHQGc4tEafj+/i3M7RzSc8in07i6Ooo5OsPSyUUuVRAsf378EFHfvmTIpls4KjSE+ddraev+yJilJoC+pTXs3D1Z+k1ZjSziQe9eI/jNSwik0vtT3v6YyHCYWlj/kA1Aw9gYThWrsqBbWY1APOr7V3z6935GZVs6h74efH//GjYupSFIB30mS1sLTl7VoGNgKEs0y+YTGxGOd7evQ5BefIx0Qtfh9YM7cPGqBoG62o/6W5oasCvvxdKTUqzAo8M7MaB7VwjVM9cNkNkxfwCvAV4DvAY40ICJiQlevqTnLj3N+MZrgEsN8JDPpTYLXBblyr9w4QIrpMG3wtFAVsiX9kWXjCg1JRkBi2exYFoHgm6OgfL8Dn+YWFihfJ2G2fz4ldXKg/MnEfTpA37r0pMFAXPZ3j68h+tHAtBh7DQWg8BlCwn8hO2z/kDPGYsZ0OfUeMjnUuO8LF4DvAYU1QAVtKxbty4+fPjAQ76iyuOPl0MDPOTLoaSie0jr1q3h4+ODtm3bFt1B/uIjkwn5IhFiIsKxfEh3OHtWQuuh46Gprc2ZVpISErBiaA/o6hugu99Clkeeq0ZfDqgIFeWp7zFjEYyKmXMlmhWxOrV5LR5dPo+eMxejmA13Rd3IHvbk2kUcXr0IrQaPQZnqtTMFAksmwUM+Z5eTF8RrgNdAPjSwb98+bN26FQcPHuQhPx/640+RpQEe8mVpqEj/PmvWLFBJ7Pnz5xfpcf7Kg5MF+WTFp7SVZ7ath6GpGfrNWw19E1NOVEKg/OreLexZNIO5u3SZOBMly1XgRDYJCf3yGRunjEJCXAwa+fZHlaatOJMdHxODLX5jEfTxHSrWb4bm/YZxJjsxPg77l87Bq/u3WL7+Ln/M4iGfM+3ygngN8BrgSgPjxo2DkZERJk2axEM+V0otCnKuXbsGLS0teHl5Fe5weMgvXP0r2fu5c+cwbdo0XLlyRUlJ/On51UBqaipOnDyJ8zfusPSRGVWk0wVS/vm3Tx4iMjSEpX4sVb4ig321jCDc9MDW9IBW6cBcsQiJl+aPgF3J36iA1ac3rxD8+QPzTbdxLAULOweoZ3IHkvbyZCGp6cG/kv9KZp69n6DAz/j06hkrcEUVch1Kl2UpLHOWkVWetNysYxcgIjQE7548REqyOKVn6UrVoEHuQFKpN7P3k5MususoNjoKrx7eA2UjIhejslVrQFNLK93vX3qcIhTT0cSIoUOy6Cy/q4E/j9cArwFeA/JroFatWvDz80P9+vV5yJdfbUX/yLVr18LS0hLNmzeHUI7MFyqbEQ/5KlNtQQhOTEyEnp4eoqOjoaOjUxBd8n1k0QDBNV0H+qKSCdDTuZZ+27B+PfO5tLKygo2NLX5v8zuE6sKMw0lGJl/+3HhZum8RJexJg/+GDfj69Su0tbVhZWWNTp06sWcKy1ND93cOXMz6YxlzpPYQJFuaxQFs3bIF3759Q0xMDOzs7ODj6wtNOQJtc1okGXNMH8+Ro0fw8sULNnaKKWnTti3TT8Z4JULSx6SIju7euQPKPvXixQuULesO79re8PDwyFHHWpqazJKWUywFv9h5DfAa4DWgKg3Ex8fDwMAAsbGxzOjLB96qStMFLJdeViNGjED58uUZoNFLudAaD/mFpnquOm7YsCGGDx+OFi1acCWSl8OhBiizTFxcHAICAljsBD3MCcLlyXgjaxj0LElISMCjR48YIJubm7MCaVzIpr5JdkhICB4/fsyqK9O4M38lkDXC3H9PSkpimaHIH3XIkCFMNlcGD3KhioqKAn0KX7lyJdMJjZsH+fxfL/5MXgO8BrjVwNGjR7Fs2TKcOXOGCeYhn1v9Fpo0+rxPkG9mZsYsWN27y5cjWyUD9vMTi502TSXieaGq18C8efPw+fNnrFjx//bOBM7qcf/jn2amaU9FadcNUUi3f1zpEi5FoRWlkkTWLIkKqdtiizYtpGSnKFkKSZYQcktdFe37RotKNfv/9X54uqdjmjlzzpnpzPR9Xq95nZk5v9/zPL/Pb/t8v8/n+/0+nfuD2QhhIcA9/8Ybb+iaa66JGpENnMgPP/ygKlWqqHz5zLPIhDXpP3ciVasn+ZH0k9m+u3btciS/e/fu0e7arT7cd999Gjt2bNT7tg4NAUPAEIgUAZ57VatWVa9evYzkRwpmrOyP523btm3upPKCO+mkk/Tkk0/GyvRsHvkQgYULF6p169ZauXJlvps99wMZXNIzMlxayfi4uKh7W/Hq4tmOlgc6HJCN5GeOmpH8cK4m28cQMAQKAgInnniipk6d6qSENPPkF4CzCqmBjD3wwANuKbxatWoaPXp0ATgyO4QjicDpp5+uCRMm6B//+MeRnEaOxnZa9r07NPn117R09SadWq+hWl3eRKVL+PSSf1Q/DZKJZzLGXwXsgfrtTyeP1fqEmurYuqmyLwmVo0MIeWMj+UbyQ75YbENDwBAo8Ah8++236tq1q1sl9c1Ifj4+7YF1zJYsWaKHHnpIqampbqnGlpPz8YmNkan37dvXBX/mp1SaSQf2a8Bd1yv9xEvUrulZevelkUqrep7u7tpGv+3dr4TU35UaV0zVKh/vMtGQinLzpo1KyUhQ9WpVFO/+l6y1a9YqsWQZVapwrPtfatIBrV67VqXKHa8Kx5bRtFEP6ee4Wup4RWOlxxdT9SrHh2A4RPfEGsk3kh/dK8p6MwQMgfyMAPFCxGcNHDjQSH5+OpEE2UHegzNQEARGkC2yAUg+nvzt27erZs2aevHFF6MuUchPmAXO9S8ZNPLrgeTxvNFkUxhrzZo1eTxy+MNB8u/o2ELn3/yEWjSspbj0fVq1YbtSdm7QLff/WyeeUE3rli9Rl/7Pqf35tfTKqMGaNneFiqXu1MWd++jWthdo+MPdNXddqnasWaIO9z6ilo1O1S2d2qpwxVrasPJnPTZ+itZOf1JPv/WVKlX/m9YsWaghz0/VBf9XK0/vOSP5RvLDv1NsT0PAEChoCNSoUUPvvPPOQakOx2ee/Hxwlnfs2OHyVRNUG9jmzp2rv//9785yIxNGnz599Nlnn4kcqTNmzHDk/2jP/ABuZPI47rjjjnoswrnUGzVq5ApqNGvWLJzd83yftLRUbVq1WC9MfFE/LFuvsxtfpi4d2mjt4u901/D39cHEgfp12Tfq1GusXhp1v3oNeE7dbuioQsl71WvQOL00/Fbd/NBEDe5zh/btXKmXpn6tuzs01ivz0/TY3Vdr08r/avbSPSq7+WMtTKqh+29up0Xvj9aclHp6oMvlijskH3v0Dp/rmDgAiL1v/E52nbZt2x4SG8A9T6wA2V9CafSNE4GfwOaz63Dv+EbfPFfoO5Rni4uPSEtzcw9sPNPov3Hjxof8n3mHmiWIvnGAkFEnsOH8eOWVV3Tbbbcd8n8/71CzBPm+A1dMSU9H4C3ZdQIbfUYzS1Ao5822MQQMAUPAIzB9+nQ98sgj+uqrrw4BxUh+PrhGPvzwQ5dPuk6dOofMllRJZcqU0bnnnusKH3z33XcubdIpp5ziJBYUQnB5Uv8kHkebR5vjXbVqlYoXL+7qB4RCSvLB5ZCnU3zmmWf06aefatKkSXk6briDQfLXrF6rihUraOcvm/XOG+O1LuNvanHuier74tf65PmHlbpzhc5rfrOeHdRVPYe+rfManaXE+EJKSUlUwzoldP+I99S66bmKL5ShshWrqWb8Fi0ofK7ub9+D+kBrAAAgAElEQVRIqckHlJxWSO89009ritfVvV3b6KeZ4/XGmooacEvrP4tRhTv7w+9HVhfiI0ib6a9jSCj54ElzCcn09zffn3322S49ZSgNAs75Xb9+/SH3CAGs1EnwK4j0yw/Zu0jbGUrQMYYDzyRWhWh+7sQOkce5XLlyhzyfyC1/ww03uPz82TWMh++//16zZs1yc/FkHNK/detW98wMbvRNStDsGtiuWLFCb7755iHOErDi/7Vr13Zd+DE5Lowtkh5YMwQMAUMgrxG4+uqrddFFF+mWW24xkp/X4Ec6Hl55ihucd955h3T12muvOXnO7bffrv79++v555/Xpk2bnEfp4Ycfdi/nm2++2e3LS4iXOCnxjhYPPy/qBQsWOBLEj5H8nF+JeC4hRQTynHDCCTnvII/3SE5OUtdOnXT7gwN1QvnSWrvoY42bvlxdWjTSPUOn6J3nBmjN/FnqN+FzTRjWQ/c/+KQe6PuAKpQqojVb9qp6kV90fa+nNWrkEBVPyNCuPfuUmLxR/V+YpyG9u2jdkv9o1sp0nbJvbp6SfO7zAQMGuGs4O080xJOq1x07dgwJfc7xkCFDhHc9u74Zn+vh3nvvDSltJzEdPJd++umnkIwCJsxxlixZMtu5Q7hZmqZCM0ZBdvc3RgvPRZa0s2sYEN98841eeOEFN5fs+sb4IuDtrLPOyq5r+94QMAQMgagiQFFEEmXg3MCpGdjMkx9VqHOns6FDh+qMM84QBYp8g8CSy5wXeu/evdWvXz+NGzfOpdLEq0URIz4xDG699Va3vO7lPVTRdFUgA1qgRyqzo8AjxzahSgCijQTHy0s9J0vi7IMHkWMN9uTzHc17J6M934LUX48ePRyJYikw1hupMzet+EGDHn1C/1m8UlVOPFMDBvZX0uafdOvgcfpb4m/6JaWERo8br5MrltSCL6ar/2MjtC+upHo+OEhXnH+mPnvvdQ0aOkZpRY7Tg/8erIanV9fbzw/TM5M/VPVTz9aIJwdr7qShWpV4mu7s3ELLP5moKRsq64EuzbMlg+HiB8knmAoSnh0R5z5FxhdI8r0UJ7PiTZB8Uu5SRCq7vrlfyJvPNeGLTEGIuZ/oO3h/T/KXLVsWEsln7qxKepJPv/SfWd+e5LPSFEplZo6PYHJP8r0EKjN5kCf5L730Ukgkn1UJVgmM5Id7hdt+hoAhEC4CxGPyHB82bNhfujCSHy6qubDf4eQ0vPQaNmyoJk2aHByVlxAafF6GZNXh5TV+/PhDSL7X2Y4ZM8a92NBqnXbaaa4sO/0FNjxtLDUfrjoknirmR6BvXjfGRa6A1+788893KUIhG9nJj8CIlFJ4oCtXrnyILIBCT8cff7woysPqhrXDI8D1gvwDL0EoMopYxHL+17PVa8KX+njCw7E4vWznFAnJ5z6haBZEmHidsmXLHjRuuY/CIfn33HPPQYMfjzcSFuRB3FN+pZC+IyX5OC2mTZumCy64wMmEvJFC35GSfPYn1TBxATwXvSFB30bys70kbQNDwBCIAQSQPuLEhOsg1Q5uRvJj4CQxBV7EW7ZscSeLQFGCbL1XDCkOpLxp06aHkPy77rrLedUg+D179nTBZpBW9sODj14XXSoeWJbvWdauV6+e8+hfccUVh3gd//Of/7gXXWYkjrnhicOLj7GQmbeObcIlgNmRdYyV119/XRMnTnSpQU8++WQXJAjpJF3o4ZbS2QYdMxc+xoGfN97Bxx57zGnXIPt169aNkasgdqdx/fXX69RTT3WrRvmxrV/1s6Z8uUJ3X9c8P07fyfJy4snnmr7qqqvcsXoSj36dGB2+I6CaYikYvxDxnHjyeTYR1IpDgH2JDcLjzf2FVh1DgucMxjX3GnKdUD35bI/zAmcCJJz6H/6a49nDc41VChwSkHIM/5x48gma5ZkIiSdAt0uXLg4jJEj0zXOS6xyDCOPFPPn58naxSRsCRw0CcJmlS5e6jIqZNSP5MXIp8HLjJcxLhijpBg0aOC09P3jqecHdeOONh5D8O+64w70MCbK99tprNWXKFEd+ednyIkSDj8zHrwQQAAcp5oK47LLLDnrlGZsXNd76du3a/QURSDhl4tetW+eq6gZLdnhZ0gdBwDlt9L1x40bnTT8cWWeZndUIApApzITnDaOH/5H55XABgJ7k47kMDBTkBQ9xIKcshpCR/OzPGrENGJmcqyMl2cp+loffguuTn8OtVEXSd17smxOSz/WNJA/PN/cU9+X8+fPd/cvfrP5hKGME8LzJCcnnWPGuU1WR+46+eM5QIZkGOYbcX3nllc77zv2dE5KP4c6zDmOEvvBSkTGMc8f/MEqQLSJHJOYoJyQfBwhkvnTp0u46oP8PPvjAPTP5GwcLSQyuueYa99w1kp8XV7aNYQgYAuEigCME7vTRRx85zmckP1wk82A/XsxkieDFyBIyLxx+WAKH5PPihOR7bzTb40374osvtHjxYrVv396RfP7Pi5zgC++1Yz8ir/F4oe/v3Lmzmjdv7l52NKQwGAIEbvBdcONFTcAafd90002OPAQ2SPjIkSPdikJ2mt7gvnnBTp48Wa1atXLkMVhzzNhIAR599FEXvMdqw3vvvafZs2e7OeHhZz+24ydwfG4AJEwQhe7dux+iIcZYwatH9h10tPmV/OXBpXlwCLye1GDgPMdm+yNdY1JSsooWLZZrmW6OxLHnhORDiPHSQ1ZpPA9I+YihdumllzpPO2kxIbncL9wfoXry6Q9P/p133nlQOsNzCy8Sq2VICnlu8fzBCCDTTU5IPueP64v9mTeJBLhPeYFhOPBCg4Bzv/LsyAnJJ7AY495n3WF/gmX5G6x4xpLdh755lhjJPxJXuo1pCBgCoSLA6i4cBpXD4Zp58kNFM5e3g5DiWSaAlhcunjKkOBDQESNGOAkP5N97UXlBEVA7c+ZMd5Ih+VOnTj0kjzYvSfrBy8+2eNsgaizv8ML0nndIOi9SUnQSUJcZycdjzljMD6mPD9T1MiNelsiFAlPiZQcZ+0JemAvz528kNIFFvyAsEHpkN3giIRCQfO95Qy8OJkSX46ULDMCDYJAFBGMHI8FjB5EAa75jOR45D0aGtawRwMhChgHWaK9jrR34fbcmjHxCK7enqUffQapyTHy2VWiTkw4oPUNKLFJUcYVi7Yj+N5+ckPzMAm9Xr17tVu6454Nz3IejyQ8MvMVDzmoAzyvu3cAVuUg1+cyN1TY8+IHpgEEmUk0+zwEcCMT40HfgiqBp8mP3XrCZGQKGgJxcGccEyUWQGBrJj/GrAkLaokULR+jRP9evX9+duFq1ajkPPBVt8XjzMqKxPWQb6Q0BF+jzg0k+20Hykbagc2VZvXXr1k7ew0sMbxyNl/Q555zjskPg6QqWv0AayL8KURg+fLgj5Xj/eYFDPvbs2eMMCb7DUxhK/mw/Lh5AclFDHtHHQ+Yh8r5hzGDcEEcAuWRcsID8EwOALAHSQmAhqx6B+zI/jAdWHjBsAg0kpApPPfWU+z/4eblBjF8mR3x6GEecb2IjYqmRH3/98sVqenV3TX3nddWqXknpKUlKSU1TfEJhFUlMJPJFKclJSkpOcaQ+vpC04YdZ6vPsTD07/BGVLFZYKelSkcIJbrtC8X94dPk9rlCcEooUldJSlZScrPiEBCUWhtBCNpPd6kHhxCIqWuRQkhstjCIl+VnNI1KSn1XfkZL8rPqOlORn1beR/GhdudaPIWAI5AYCOG5Z1YTPZdXMk58b6IfRJ0vmeO0HDRrkdPEso0PyBw8erM8//9xVh6S6pQ9uZXt0qQS0QVIh6HjafWpIPwWILQQdOQ5BbHjyWVZnuR6pCw2NLcYE3jkMDJ+9xvdBn2h3WTHACEAXT8Ozi5ed5W5y9iML8vn5gyHILEUnOmEkRiz3I8PBq4axgn7eN0gCqwQcO948DBWfDhCvPbmvwYoUUt26dTukGA1p7cASDyNFnfwKAftjATMush0IPhjinQ6OC4BIsIris4aEcWoL1C5gyrXCtcN1FCstJXm3OjVtrNmLN6lekw5677mBev/lERr2wrv6v0uu1r97dlOxwnEaOqCX3v98gc7+Vxv1ved6XdWkoZZv3a+/X3Gbup+dph/jzlD3Ds21YOpTWpB4js4otk39J3yghO3L1W/Ua9rz0yfq/8QoVardSEMe6adyRTM0enAfTZuzSI2aXq1BfW5XEVcNNrrIIDXh2RBqjQvkLRjeoTSeJRi8OcmTf/fdd4ckccMZwVIyK26hGv8kGgglixf3Jqt6OEFCCfrHIcCKZCj1HnxmLmKRmEsoefJ5BvOctGYIGAKGQG4igEwbByb8L7tnpZH83DwTIfYNiYZoozeFqOLN5gUGQX/66acduSYYFyLqTyhkiwwTvPQhXASjsY8n035oZC148pGrcEFQHIvxkKfgQaex7ONJPtpUgnPZzy+Psz0BvOyPHAfijUcXWRHZNKi0y7hkp2CewcUYGAOpDWQ7UE6D8cKqAOQdw4H+WWlAP+9XLPAysg2ZgdiXsXkB0yANBP0yV+ITICu8lP0LGY8zucLxgrK/7xOc0BWjuWVcCAJByeiYA1/mPqsQBXG8TjjEU1qgN4O0EesQXD77SB4052rV0oVq2W2Avp71muZNf0Gv/SD1ueFSzZz0nNYm1tZN55fXpyuTdOUFZ+mJ+7qp5UOjdOzW+eo3YZbGDn9c3056TN+n11Wfm9roq5f7678lGqt24Y3qO3a6JowarIxti9VnzEwNf6S3ln81RR+vilfbxn/T4Je/1zP9u2n6pIk6vVk3/aNWecVHWftDUPy7777rVrKyIpzgwH3B/UrwfiiN+4H7g6BqWlbZrhgbJwCOg1BIOwY1CQUophac9jazcdDEs0Lp79Ws5s9zgGcG12HwfZsZRjxPcYzwDMuu8cwjHoCAtsxa8NzBggrjYGPNEDAEDIHcRIDsaDhVfXawrMYykp+bZyLEvnkR4knDS05gK4FqvgAM3nVeOF6TzvIMDXLr08ghOcHb7clv4LAYARSDQi+P1x2yz3aQfLzovKwwMMi2gXeOAF1P/gn45cXI+Ehe8KL74FYMErSyPl81n14fRkBv8EsWQ4DG6gQedX4gLZBwjAJkPrywCQoki8vFF1/s+kCaw3eMDcnHMxhI8oknYI540fDoYThwLDTkPbzUwYzvvNECqSHjDnNi9QSM3n77bUdcAgN3OVYCiukTuZMVuvnfldWyZUsn8YqllJrrli/W5Tf017zPXtfYB2/SrLXxqlamiPbt3a0qp5+nXjdcqqEjxiqjUGF9/9kH6j52sk7a86MeHj9L40Y+rjmvDPoLya9TZKtGTl+qKaP7asrYf2vIW/PVoM7flJ66X8fWrK/bOlyh4U88ov2Fiurk08/STV3aq2SRwiHe+TnfLLt0s9l9f7gRs9ov3D6zO7qC1G9uHUt2GNr3hoAhcHQhAN/DQUn9kFCakfxQUMrlbSCd5LJnmRpvMtIciLX3mkGC8bKhWfcZccgq4Yk92nqW8oOlOn7akGA01Oja8UpDkkkV9+WXXzovPg3tPoGryIOwDil+hFee8SDWfOJFz6pBkPGIkyEoOMsOFyXePCQyrAKQycenwMRLyTES+AuRRkqEvIjjx+AhGBeMgkk+YyBRYj/SiTK2L5wDFmQUAVdWJl599VW3UsI4vJDx5oGHlx4R70BaUT9vtkEKhHwIY4jqwmjgQvFe5vLlEhPdL1++3KUe5RqKFYnCuuVLdHnXfpr36Rt6c3gfLSt9sXpc00jpSXv1e1Ka5rzYV7/X7qDrmp2jIT2u11m3P66akPxxH2vcqCf0zaQn9MnOEzSw+7WaMfo+ranYTKcX+8WR/Klj+unLKWM0+vNdGvdoD8WlJ2t/Spr279yiTfuLq16tyhr2YDeVb9JbXS6po8IJcTFxnmwShoAhYAgYAgUDAeoZ4QhdtGjRQWdmdkdmJD87hPLge0j07bff7iQQwQ2iixcdbztE35N8PODktke2w9I8EdbBUh3fF8QVSQxL4XjG2Q4JDwQN4k3fLJFDrtF5kWaT/NfkkMZbyxh8lx3JZ7wBAwY4725wLnVWEdDHI6Ehqw3aWHTyrBZA8mlIfzg+CDjHxrz5ntUHSDsEm89AfT9L5JB3dPUcC0Sc+bMNens88ezHigik3QcZsuqARIFgXfpEgsIqhq+oyf4s1zNnZFIdOnRwgcVgaO0PBJ599llnNGIsxULbuOpntbx5oL6e+bL2bF6pLp2uV9map2nnL1t16wOP6+SMH9X+7hGqVulYbf9lm7o/+YourLpf13boprqXd9VD7f5Pl7W5SbVqn6Kfly5W+55P6MwSv+rpGT9r8tN9lXJgt3rf2kUbMo5T6s71anVjb11Qu6zaX3eLTqtXV0uXLNe4N6ap1vElC1T6zlg4tzYHQ8AQMASOdgTgbcRNIrsOtRnJDxWpXNwOLTnWGVZaZg3vM0QUeQvaexrEn4BQCDiEHPKcVcNYgDwTXAeBRTaDhIUc+xSbwVONhAbyS8YeyC6edlYLkOWQHx1jJLvGKgBafeQwXrLjK9YiqfGrCMiHyJozbNiwg/IbLw1iRQF5DiSf1J6etGc2ts+r71cxuAkg74wdaDh5owPpDsQUuQnfQ1SZU9u2bd0KAxmEmDuBi6ycEARMwDISIPrFoLD2PwSuu+46ZwByjo50O7B/n1au3aQ6p54kZaRr987tWrt+o0qVq6BKFY5TfKEMrV29UgfSE1ThmOJKKFlOJYvGa9vmjdqXUUQnVj1eG9at0q69yapQtoQKFS+rooVS9evuAzqpRlV33+z5bYfWrd+gwsXLqHrViiocF6dd2zdrw5YdKnNcRVWtWF4JCfFHGgob3xAwBAwBQ6AAIYCjEs5H2u+cNCP5WaDliSMvd6+5DCXAK7sTEJxpBpKPvh4yfbhGcCkBqXi1+R2SDwk9nPc+s3584BvfYTjgTSd9JplxuHj8MXqpEMQXQgzZR5Pus9pkdXyeRLO6gASIhhGB154MOYyBMUHsgU8NGtwf30OokQxxYWMMhNI4Psg8nnfwhMQH3xAYLxB1jAgKfxGoy/FCVIlJWLNmjVsVQCKEbIrAZ46bObFqgHbfe/uzy7gROOdwNbs+kDKU4z8S2xAIynni+ujUqdORmMJROSbXBWlD4+ITnEFDrn/SfMZFIeDX9Z2aorh4Ygv+ePaRLig+LjoSpNTUFBWK+6OGwR/Pr0KKj49O37l5MaSnpf3xjIyLd5inZUgJ8fFRz6SUm8dgfRsChkD+QwAegxafOEKcujlpRvKzQAu9OvIOZBuUWofskanFy18g53iBefBDmiHMeLsJdOVvtoUQQjzpBy86qRiRfNCnzzSD3px0hFu2bDnsbBiTzA0Qc7zM5NSnj3Ab80LCg4ee7BT+OAL7w5CAYI8bN86lqssssDd4fIJbIXtsS4wBEh9IPqk1A4+PsSHXmR0zeCHTYXUDiQxjh9og3pBxshSh5//www//sqs3dsDUG3D8znnxGYCQ6SBXQtLEPMlqRA7+e+65x5FaVjzAn74wGjiHnHv6Y1uMJs4555ubkr/BnB+uIyRS9Me14o079uXYue4oWsS+BFizTSw3rp/GjRs72Y4FJ+fNmYKEv/fmqzrxnGY6Lu43vf/FEnVs11xF3SpCoYiIZ0ZGur7/bIaSyp+p0ysX1QcffabmrVqoRGEIbeSGxMafvtOiHSXUsE4lfTXrIzVs3kbHJEan79xEf//OzXrr/dm6/MorteaHz7WjyAlq3KCO4im48GfZtZwY/rk5V+vbEDAECgYC8+bNc/JqEoCQVSenzUh+ThHLhe0hg8hBfAq74CEgoHiSyStPdVb+xksOyYRY8h0Gx+Ea2wcH5UJm8dBjaECiyd7jM/pAQtkHLzqBq0h6ILYQ2ewaUhey2tAvKw40+iOgF9JPg+hiOBDYSwGvwMZ3rAZgMPE7+v7sij344/MVfolPYF88+RD9wMa2pM+kf1JnYoSwH4HIeKU5Xggrc2elhKBetiFQmDgBDK3goOLsMDkavgdnzhNGEasi1nIXgfS0FK3772z1H/+5TshYqYtveVRnnVxJmzeuV9UaJ6lwBJ7xjPR0bV+3SJ27D1TTumVU+LSWan9ZQ835fI7OPv9fOr5c6YgObu+OTep83Y067/xztHxnET3e9059N+cTVT6lvmqdUElx0S4yENFs/7dzStI+vfvKKK0q2kBfT3lWI8c/r/UL56p+w0Zau2GLataorsIm1YoS2taNIWAI4BzE2YmUGLlzOM1IfjioRXkfSCTaZjK8ZNZId0lWGqQkPscz0hKqxULu8QhDrDOT7kBIyS4D6Ua/jyHBdshp8LBjYNCQt2AsYDwgV+F3CDjpKyH/eKOZJ15qyD5eby/xCZzzwIED3QWJR9p7tRgPIwIJDMYGWWyQwSDxCD5mVhbwlpP3nrmj789OKtS8eXOXZYd5EkiL8QBJJ6OOL+6Ft5w5kRFmyJAhB739yIiYJwYI47Kfl+Mw1yZNmmjOnDkufz/SnuCA4ihfCvm6u379+rlVIa5Va7mPAJV2H+nZWd/vrKpXxw7QluULNXvu9+pw/c0qVSzyNJ7TxvfXgFd+1DuTxylp+wYlZSTq80XrdFu7JhEf3MLZL6vrgy/qhddf1UkVErR81VZ9Pfdbde3SKaQiWxFPIMwOftmwTFc0a64ufceqW9sL1bVVE93Qb6SWLV2uti0uVekSRcPs2XYzBAwBQ+BQBEgljveebIPhNiP54SIXxf0gzZBZqrYid/GyDz8EhB5NFiTf58lHLw4BhZDyCeHPLIUmRBlpCX1D3kmlCemGGJNuEkkR+7GSAMHu2bOnS6FJYStILVIUvsdDj3QEI4AxWVFAzhKY7QaJ0nPPPaerrrrqLy9qKrQ1a9bMGQvo2uvVq+ey7ECyve6cfvGoQ7jxptMwXsi6442RQNgh53jl33jjDZcdCBzJOsQqAWQcqQ6yJrzwBPCCK/OvX7+++ySTEHIkDBrmhIEQ7KXHQ410aMaMGW7O1rJGgKh/KotyTqzlIgIZGdr32zbd1K2b9uxN0ZgXX1X5kgmaPXOG/tmkdYQkP0NpSb/rvls66cflm/TAyJd07mnV9eN3XyiuYm3VO7l6RAeWnpqk4Q/ephnfr1G77g+rc7OzNeej6Vq8JUm3drkmQpKfofT0DCcrQjevgM9IFwgy0tO0cO4s3fXEi6pV63SNeqyXHu9zn5IKFVap6qfptuuvNpIf0ZVhOxsChoBHAMkwzlSSg0TSjORHgl6U9vXFsPBYk/WFLDp4pX0gKbITJDMQUV9NlrSTkGbkJNWrV3dE9HDFsCDvkFVkLBBr+oVcQ+bZh9SSEFiquqKpJl0kRBjvO0YHRB6iTGl6jAFSTXLxEQRMfADzJ94AYwLPOcQ6OJ88em106BgLSH8g4Xjafa5/VihIeclYEHJPtpHVsMqAgREYOAz0bIPxQ8YhYhpYWSCnPctbjI8hxLEQWwDphMzTB2NQ4ZfVghEjRjiDh+xCmRXxYlw8+VjUoVThjNIlka+7oWoyxmhmKWHz9YHF0ORTU5I0ftA9yjittRocu0svzjugx26/Ul9/8bEaXdxCJYr8IZULp6Wnp2n2W+M0bUUxPdDuTA0Y/Y5ubPVPfbtFatqgtk6qEVmGqbVLv1WvsbP01L1t1O/hwfr3gPs077+b9cuvv+qGTu0iqkWRfOB33XPvg3p4cH8936eDLrxlhH5d9L5+SK2j3tddrIQIZEz79/yqPnd2V49Hhumlx3vppMvv0uo57+jGm9vr0THTNODBHipZLDEcyG0fQ8AQMAQOIkA1WzImTpo0KWJUjORHDGHkHUA68bLjkcfTDjHF64zEBs82BBx9ON56TzSRziDjgZRTIRbPd2bZb/B0o5GGsHPRIJmBUONZ91HaaNEJVqV/iDMpI+mfarHIV5gfRaMwNEgpib7+p59+cttCqAkMIT8+86AwEoQ7mORT2h5DwVedBTU87fSHkUIWHwqBISfyWXnYhnlgwFARmO/BycuSGAOvMZ8YLEhFWJ0AL+bGKgWeZfqHxHupDThhnGAQIBcCk6x05MHZkCI/4wW/Bww9AoYxJq1FHwEyvWzetEHHHFdZCRlJ2vrrb6pYsbxSkg6oaPFSSnDBoOE1rvdft2xQ4dIVVCwhw91HhRMT9duu31S0xDGqWql8eB3/udeeHduUHF/CpS/dvWuHipQqp92/blWJY45VmdIl/lItOyeDEZC8fcculS5dSvt371BCiXLKSN6rlLjiKluqWETZh5L379W2Hb+pfPkKStm3S3tS4hWfkaoyx5TSnn1JOqZ06YiMiJwcp21rCBgCBRMB+AjyYgqeRqMZyY8GilHoAzJLWkkILSQZDzaSGarZkjYJqU5mJB+vOAWgCAhFroKn2mdlwdtPVDZedyQs5L9Huw7ZRxOPZ9sTaXTUvhgVmV2QyeAB9xloGINAX6Q41157rfPqI9lhPHLL49WnwizbB+rxPTQUqmL7QE07RgMrADSKbmWWmtJn4MEAwkBgNcLLktieVQiMIYwEvPochx8DPDCaOBbST/n/s3rB9ldeeaVbIfDHGYXTaF0EIADRJ8DbPPp2WRgChoAhYAgYAlkjgAcfRQLOymg1I/nRQjLCfvAu421HWw6hxVNOsCcR1chFMAIg0p6oek8+HnbSaiI1IQgX0t2gQQM3G7znrARAoAmGXbZsmfvB609KUC+JgTRD4CG/eL0DZTH+d7zk/CBxYQUAb5/ff926dY5sE8x7uBRygakqPVSMi9Ye8u4JfmYwcqx33HGHW80gjZTX52Ok4Mlnf/4HwYe4++aLcK1YscIZTx47xsVYQHrEsVjauwgv3ix2R7oDvqbRzz2MrWdDwBAwBAyB/I0AGnx4UjQkOoFIGMmPkesC4gkBR7MOySd/PEGskG6KSCFVQdriiTVeagg3mXMgsJApNPt44fGY0w2oPQQAAAb1SURBVJDh0B8rBP4CgnD5KrGBh45MhhUAn/Yy8DsuPL5nTHT3wcGpEHy2YazcaBgfyJfw/CMJwvvPcSD/oUow5D0zI4L9KIy1ZMkSFyvgj41tMRjwMpNG01ruIoBkCgkYRN/Sa+Yu1ta7IWAIGAKGQP5BAM89/AyZcqRBtpkdtZH8GLkWIJ5oX/mEsJPpBXJOsOqbb77pdOaQfN98sSk8+ATTkoKTlJcUmSJQlYaXGxJLYSgyyWTVGBuSfrgc8JBsmpf4BPaFp52Wm4Gp4IK0Bk89KwrMEzzIAJSZYcJ82AdjadGiRS5wNjBOgDljHFjO+7y5AUiviYeC82UFs/IGcxvFEDAEDAFDIHYRIJ6R4qFwvkjSZGZ1hEbyY+j8Q0rxlhNkivTloosuElKTzEg+2+K5R2aDR58MPJB6pDgYBOj3IbcYAQTKEmiaXXMl2yPNM5fdIBF+T+At+nqOm0/kSlkRdVZIyLIDnkboIwQ/wt3J8ITBOXHiRPdgs2YIGAKGgCFgCByNCBBDiBybeMlwC12FgpuR/FBQysNt0JEPHTrUkVcCb5HJINvxcp3AqUBeqTCKHALNeufOnR1JJz0khIpUnOjO6c/n18/DQ8mVocCH2ANftIrPrAwTDBdwQtpjJD9XTkmOOmUVhhUqiqxxXVozBAwBQ8AQMASOJgR69OjhsgvC7Sh2lZvNSH5uohtG35BS0g6iISdrDJ5oLD488gTiBpN8dOrkraeAlM85T+rC+fPnO3kEen3SaAantAxjavlyFyP5sXfaiBVh9Wn58uUuIxNpV60ZAoaAIWAIGAIFGQFiCHHI4nwdM2bMwTTmuXnMRvJzE90w+yYrTt26dQ/q399//31H9gMzx9A1KTEJvCVDDIQeeQ+EnoZmHzJF+syjleCDg5H8MC/CPNiNICOqOaPX7927dx6MaEMYAoaAIWAIGAJ5jwDyYnT3w4cPd+qLvGpG8vMK6RyMgw6f3O2enH/00UeusBDEP7CRB5/AWmQov//+u5Ow5Gbwaw4OIWY2NZIfM6ci04ngzafCMXUhSBFLjQhrhoAhYAgYAoZAQUCAwqN9+vRx2fyGDBnivPh52Yzk5yXaIY4VXGH1k08+caS/Zs2ah+0hPwTNhnj4Ud+MVQ0MoVgPKo76geejDgnGRVrGahXSM9K5WjMEDAFDwBAwBPIjAlu3bnXvMmIjBw0apC5duhyRwzCSf0Rgz9mgpHvE04k0x1rOETADKOeYHYk9WI1iOXPkyJGuwBsSnsAKyUdiTjamIWAIGAKGgCEQKgJkSESaQ1bDO++808lRS5QoEeruUd/OSH7UIY1+h+jx8UKbJzr62FqPsYcA1Z4ff/xxTZ8+XWQhQLdvRcti7zzZjAwBQ8AQMAT+QICsf+jtyRrXvHlz9erVS6eeeuoRh8dI/hE/BTYBQ8AQyAyBBQsWuIfmtGnTXB0IgpWouGzNEDAEDAFDwBCIBQTWrl3rKtWOHj1aLVu2dE6pUOoS5dXcjeTnFdI2jiFgCISFwM8//+weouQUbtasmasHwac1Q8AQMAQMAUPgSCAwY8YMV3SUT2q/4IQ65ZRTjsRUshzTSH7MnRKbkCFgCGSGANmkqBnx8ssvuyJx1Ido06aNKxxnzRAwBAwBQ8AQyE0Evv32W02ZMkWTJ09WlSpV1LFjx4M1jXJz3Ej6NpIfCXq2ryFgCBwRBBYuXKi33npLb7/9tvbv3+88+02bNtW//vUvV0TOmiFgCBgChoAhEAkCvFvIbjhz5kwXI8a7pVWrVs65VK9evUi6zrN9jeTnGdQ2kCFgCOQGAhB+SoTPmjVLn376qc455xyde+65zsNPNd0aNWrkxrDWpyFgCBgChkABQmDNmjWiKu13332nr776SnPnztWFF16oSy65RJdeeqnOPPPMfHe0RvLz3SmzCRsChsDhECDd7JdffqlvvvlG8+bNE8G7u3fvVp06dVSrVi1Xa6J69eqqXLmyKlSooHLlyql06dIqXry4peu0y8oQMAQMgQKIAGktkXvyLtixY4e2bdumTZs2ad26dVq1apWWLVumJUuWqFSpUqpfv74aNGighg0b6p///Ge+LzBqJL8AXtB2SIaAIfA/BKgxsXTpUlFJevXq1e7BvnnzZld7ggf+nj173AsgOTnZBVCNGzfO4DMEDAFDwBDI5wh069bNJWxITEx0UhscOmXLlnUOnkqVKqlatWqu0ChVaGvXru2q0ha0ZiS/oJ1ROx5DwBAICwErmhYWbLaTIWAIGAIxi8DR/lw3kh+zl6ZNzBAwBAwBQ8AQMAQMAUPAEAgPASP54eFmexkChoAhYAgYAoaAIWAIGAIxi4CR/Jg9NTYxQ8AQMAQMAUPAEDAEDAFDIDwE/h8Ouv6iZcQPYQAAAABJRU5ErkJggg==)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yXy6DhQmhrYA" + }, + "source": [ + "### Connectionist Temporal Classification (CTC)\n", + "\n", + "CTC stands out as the simplest speech recognition system within SpeechBrain.\n", + "\n", + "At each time step, it produces a prediction. CTC introduces a unique token, *blank*, enabling the network to output nothing when uncertain. The CTC cost function employs **dynamic programming** to align across all possible alignments.\n", + "\n", + "For each alignment, a corresponding probability can be computed. The ultimate CTC cost is the sum of the probabilities of all possible alignments, efficiently calculated using the forward algorithm (distinct from the one used in neural networks, as described in Hidden Markov Model literature).\n", + "\n", + "In encoder-decoder architectures, attention is used to learn the alignment between input-output sequences. In CTC, alignment isn't learned; instead, integration occurs over all possible alignments.\n", + "\n", + "Essentially, CTC implementation involves incorporating a specialized cost function atop the speech recognizer, often based on recurrent neural networks (RNNs), although not exclusively. 🧠\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VETVavnMvnar" + }, + "source": [ + "### Transducers\n", + "\n", + "In the depicted figure, Transducers enhance CTC by introducing an autoregressive predictor and a join network.\n", + "\n", + "An encoder converts input features into a sequence of encoded representations. The predictor, on the other hand, generates a latent representation based on previously emitted outputs. A join network amalgamates these two, and a softmax classifier predicts the current output token. During training, CTC loss is applied after the classifier.\n", + "\n", + "For more in-depth insights into Transducers, check out this informative tutorial by Loren Lugosch: [Transducer Tutorial](https://lorenlugosch.github.io/posts/2020/11/transducer/) 📚." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gzNb1cZzvxUh" + }, + "source": [ + "### Encoder-Decoder with Attention 👂\n", + "\n", + "Another widely-used approach in speech recognition involves employing an encoder-decoder architecture.\n", + "\n", + "- The **encoder** processes a sequence of speech features (or raw samples directly) to generate a sequence of states, denoted as h.\n", + "- The **decoder** utilizes the last hidden state and produces N output tokens. Typically, the decoder is autoregressive, with the previous output fed back into the input. Decoding halts upon predicting the end-of-sentence (eos) token.\n", + "- Encoders and decoders can be constructed using various neural architectures, such as RNNs, CNNs, Transformers, or combinations of them.\n", + "\n", + "The inclusion of **attention** facilitates dynamic connections between encoder and decoder states. SpeechBrain supports different attention types, including *content* or *location-aware* for RNN-based systems and *key-value*-based for Transformers. As a convergence enhancement, a CTC loss is often applied atop the encoder. 🚀\n", + "\n", + "This architecture provides flexibility and adaptability, allowing for effective speech recognition across diverse applications." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bq7zSEHXexqC" + }, + "source": [ + "### Beamsearch\n", + "The beamsearcher employed in encoder-decoder models follows an autoregressive process. Here's how it operates:\n", + "\n", + "1. Initialization: The process begins with the (beginning-of-sequence) token.\n", + "2. Prediction: The model predicts the N most promising next tokens based on the current input.\n", + "3. Feeding Alternatives: These N alternatives are fed into the decoder to generate future hypotheses.\n", + "4. Selection: The best N hypotheses are chosen based on certain criteria or scoring mechanisms.\n", + "5. Iteration: The loop continues until the (end-of-sequence) token is predicted.\n", + "\n", + "\n", + "![SpeechBrain-Page-2 (1).png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAvkAAAGvCAYAAADBg8oPAAEnCHRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDYtMTJUMjMlM0ExMyUzQTU1LjExMVolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY4MS4wLjQwNDQuMTIyJTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE0LjcuNyUyMiUyMGV0YWclM0QlMjJvT3BQcmhqb2hpc1BwcHYwbjQyNCUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjJYNHJWSEh6c0NCN2djWmtWOHlvSCUyMiUzRTdMMVhsNnRJMWkzNmElMkZxeGV1RE5vd0FoSkJCT09QSHlEYndSM3NPdlB4SEszRlhiMWRmVjNWVjk3emluOXlobFNoQUtzOHhjYzYwSXN2Nkc4JTJGVjJHWUl1djdkeFV2ME5RJTJCTHRiN2p3Tnd4RGNaSUV2JTJCQ1YlMkZlTUtnMUFmRjdLaGlEOGIlMkZYYmhVUnpKNTBYazglMkJwY3hNbjRUY09wYmF1cDZMNjlHTFZOazBUVE45ZUNZV2pYYjV1bGJmWHRxRjJRSlQ5Y2VFUkI5ZU5WdDRpbiUyRkhNVkpQTGJkU2twc3Z6THlDanllYWNPdmpUJTJCdkREbVFkeXVYMTNDejMlMkZEJTJCYUZ0cDQ5MzljWW5GUlRlRjdsOGZFJTJGOG5idSUyRlRteEltdW1QZkFINyUyQk1JU1ZQUG4yajduTmUxZkZwc043ZHg5Tmt1R0tkbCUyQkp1SWclMkZOSWMlMkJYRUs2SzhMQXhhUnRIVXlEVHRvOHFVampQMzh6djdsd3VmbjlUZlowdFRudGZ3cnVSTDQ1OFhnVTUlMkZacjUzJTJGdG1UdzVuUFZQNWNBJTJGbzhsQUFUUXhBbHNqJTJGd041OWE4bUpKSEYwVHc3Z29NSEZ6THB4cjBMNkRnN2U5SzZtdUpZUCUyQnJSQmowRzNuUVA0b0RKWDRpRHVwUGtBYnh6MGdEJTJGY2ZTU0l1cTR0dXFIY0RucG0yU1AwZEF2eTcxSzVHUXlNOHNCS1AlMkJmWm1RJTJGMWdtU1F6dzRmTmpPMHg1bTdWTlVKMSUyRnU4cDlhMFBsWEhlZnNBYVd5ZjMyRmFWdHUwJTJGUmxjazA3WiUyQk5nbmxxdnhYc09BWEQ5SGtYOXBnMFgzRHk0JTJCN1F2cEl2Z3Y4Ymhvc2lBdjc5ZXVjTGFtRyUyRjZnT3U0SCUyRlhCbGh3T3c5UjhvM2JnRmxreWZTVjdmeW9zeUdwZ3FsWXZ1MzlaJTJGcDRmJTJGVTBETUglMkJWWU91TFpwcCUyRktwbkhWNzR5aFNJNzhBRCUyQlE3dyUyRmtGN0VJMiUyQnM0V1BHZnhtR2I4dTVROFpDJTJGVVRZNkVxSUNFT2RCSkF3TXlEWWZ3VUdkWFBFT2k1ZVVwJTJGWVg3N0NONWxuNyUyRmYzd3klMkZNYll2cmRLMm1YNVpQNDM5QkpvMDdWQUgxZGZkZkpuVmx5dHhHNDIlMkZBSEVrQXpDMVgyQVUlMkZZVU5pWmhpc2ZnWE9rM1RYMEkwaW44aHdoRDdCUSUyRkNrQWt3SWs0SSUyRlBkblZ0VFpOMTc0cFIxS1lkMzIwZTYzeVh6bG1GJTJCdUVzU1B6Y1loJTJCcVpOUGswd3JKJTJCZ2pqQ3h5cW0lMkZaMjJiVmNrOEpnT0lQaE93MHI5SGJRM3VEWElsWVpHemh1MUxsRzA4T1JYUG0yRSUyRnhhQ2w3V24lMkJIJTJGWHNTMXd2UGczaW1KVE1Dbk5DNEJ2Tmt0WkklMkJ4OUYwcklVdU5sU0VOUXYwNEpqaFUwcUplNmU5QnYlMkZTMCUyQnpEJTJGcCUyRm9tME4wblM3M0tXWDhVdGtLSG1KYXlhJTJCTCUyQjcxUlE0bXJ4Tm41bEx3NHRwVzd2cjdFZ1BUREglMkI3OWgyTUFFU2MlMkZpTVJobUslMkZjUUw4WnlHRyUyRll0Q0RQMjdIZ0l0JTJCdmROZlh6REd6UjBGRnJORHhMV21sJTJCbVBQa2xyZUE2ZjNXZDRjdnRSNWNrVVE1dThhQ3pvQzRhQUVwdDg1VmFQa2IlMkZIYzM4eTBFUFlDOXdMQ3lLZm9iS01SVlNKUFhucVBRN0VvVWpQMUVwOWhlcGxQa0RFYktKVDVCNWcwOVJGWXhqRWYzejhlenJxUFdkak9NZ1lkS2Z5dmpYeVBkSDQ5eVBFdjZHWmZ3b3dTJTJGWCUyRnJsQTkwTmtRZ24yNyUyQlMzT21TJTJCMDgxSEFQNzgzdjhTNUg3UzFmZHElMkZnamNQM1QxTDBRODlpJTJGeVp6RUpwbmxJZnVyTTUyMGFndWclMkY1OEFKOVRzT1RMUGhWOGIxWnpvd0FPRCUyRm5BTiUyRlNheiUyRnkzRyUyRjVyajBqeHlYJTJGVGM1N2g5V0NQb1hlZFdYTVBpOVI1bEoxR1lOJTJCUEx3SCUyRkVvSmtwJTJCN2xFaFF4TGtYJTJCTlJLUDZmOUtnJTJGVUZuNWY4JTJCajJCODlDdjJkNnRDZjcxTDRmeFVBbU1HJTJGS2UzZklSems5NjcybDlFTjlHY2xxajhER2Jra3FNY2tHSDVEeDc4U0FSTTBKaFA2WnpwbktSb1AlMkZwcWtnUDZEQUVqOEdRRDQzN0xaeiUyRnp2eTNiRjF3ajQ3JTJGcmtIMWJKejRwVCUyRjFYSlQyamVmMDRsUDFaRGxQc1BTaG56b0lOdm83MHFnT3dIJTJGQiUyRkRUZmloSlNYODlVSVF2YkszN3JSNUF0MzhLdUJQV1pNJTJGWWxTSXhuR0slMkZFd0pLRUxqYlBKdll0UVhVTUolMkZBS1dmYmZhUTJKOGc3WjhWS242dHNYNldrV3hMdHkzUXlEcDcxajlUdGZ2R3BMJTJCVzElMkJlJTJCeDAlMkIyUW9LcXlCcW8xd1RXWk1FRktNZ2lDcXJUNTQyNmlPTzMwJTJGMU0yOTg2NHA4UkxhaHZvZ1ZPJTJGUmd0MEo5b0J2c3pvZ1g3JTJGd01rU3JaaThtQnZmeWMlMkZQejIlMkZ1aU5zWHdBS2Z0ZyUyRlAlMkZ4JTJGamw3TVQ5Q0wlMkJRJTJCaEYlMkZaSHlnYiUyRkxmejlZJTJGTDhYYldPWnI3dDR3JTJGWCUyRlg3b2lmeXVweiUyQlBoMk0lMkZxMUQ4VHR3cTZ2ZUJpbCUyRmhUUW5DcE5MYnNYaFg4SEFoYktlcHJVR0RDdDdnZm8xVTN4YlJ3YiUyQmZRT1FFblpzTHh1N2pvRWRhYkJBUnVQZVFweTlYa1M5WHdQczRtQUtRQjN4OHhNU3V5ZjZHOFlYRGFlYUt5SmVzUFlGJTJGNnNQT3ozWUczcEVNJTJCSEdmJTJCTk1WJTJGT2E5eUxrNTRFMTJPbGRud3pHSlU3S3prYnRHV3d5aW1LaHZzalJQVHJtbWVOQUwxSTNsNXJRcGlsc2IzZVlyMW9JV1pQcXFtOU5MWmZFOTFnVlJMbTJ0dFklMkJvMjg1SWZuZk9kaDN3cCUyRjE1SDRrOVVmRnJVWnpJeThsJTJCOHNxcjRJVHpmRzZyNkpLWEZ0Y3hSNUV4UXM0N3RhdGNiNWdqM2pBRjdwQXRua1ZQOUV3bjlJUXZDY2JPWk9PanJJdGVtUGxJNlRwdXZFTmFHanBzZlBlaEZOeHFnc1Y4OHlLdU5rZk1KaGR4YXdFJTJCZiUyRk82UmtyY2N2eDNsNWZWeUpTbDZuWnl1ejIlMkJ1OWZ3ZUhDNkF6bGV2M29welBuSzAyUVF6ZEhEJTJGdTdlYloyd1RBQ3k1cjklMkJnWVdCWWVlJTJGWVp6YktNMjUlMkJ2NCUyQm5OZHAlMkYlMkIlMkI4JTJGcko1NVJTRlVUWG5HTmM4NTQ5QjZwQWo4MUVaZjdBc2dGa0lGWUxJM3NzMG1wMFRKWWY5VFBDcXRYYzMzSDFNJTJCbTVHeHBpcmhzZllReVBZWUZiRHpXMDBBV1dLWmRpbkRGdTNUa09aSmxkcW1tMTdPUHMlMkJUdnA4M3V3bXljZEsyRWVFclJDc1dwYiUyQmVaNGQlMkZ5TGRzaUZFNGtTVkpEejFUTW84emxuU2xCeExqSGNRRUt5ZHVNbWswQ3FVN096c2tJclpZT0VpWURNY1o2R2tpNFRQSzlJRTRLTzNWYVVQaGtNY0xwMjB4WDdnMGRCWFQ0QnNYSGpMQ3lFOGttcXkxU0Y2bmVlUmVNVk9pa3dXbHl0MmpoYnh2a1Jva0RoYiUyQmpqbm1pa2h6b0NWTHZpbUFvUSUyQkt3akROJTJGM09qdGpLZFRvNkRxWGlBb1RnMnY0Q2ZzcHEwblhnTjRtRmd6bU1ybGslMkJ5ZmtTY3dSV3M4VHpSWkR5a3paZVlHSnpzUkxCWGZKak5rUVB0RzdHSTlzODkzQnlZaG5hcHJuSXhVSU1ubkxKUVZ0M3I3UklLa2t4OVpxUXE4cHlkenB5a1QzVk95eGJCS1BaRkJ0YmNUSFYlMkJSSEtwaGZ2VG9QM29aUjB6OGM1ZSUyQkhEazVRTjNGSm1Ja0RiWThRbHJpbmRqWndJSmxESyUyQjZFM2o3V243Qnp4OENSU1NrUXl3Smp5NjNqaGlGTDNhdldzZUZVa0lGRWVadmJpeHRvRERNa2FRbEhLMThzT2hUOFZGbzZlQTJFSzFOdjlsS0laSmtFZEJHM0FuQ1pDM0lzS2Nud1Fyamt4OGloM1YlMkZ5eFljV3NqM3I2VWJGUUtlMDlLZWhJRE9OOXF2SE9hSzVHb2VwJTJCYW9kM0Z0a0s3WXhLaHVZaHI2cGZpeHNxQmJGOWdDODlxTmElMkZJcHVYMkElMkIwOUFkenNmZnJmUFBWUnpXajg3N1QlMkJoYlNsNTdqVXZUUWlXTVIxaHBIcDMwMWN6ZEoxaURwMTZUMENNJTJCaXVHZ2FkUnBybDJwOW00eEhoSUlubjk5WHdZVzJuMXhhSVkwWFc3MzZhQUJYckdxQ2VtbEhONmNadFl5TFN0dnpoOE16b3hmWm1GeHEyRnpOUmglMkZtOUsyZ0M0bkRTclI4ZE9MJTJCd3FneWRLdXBubWx4Nlh4SHNqREozR010dmo5RmdkMm1Pckg5dERScVJSc0RKMFJ4OXJWNU9xZE1pUnkxdHJjOWhWYmNNVGpIOUVuSFJaMmdmcTdRY3VDdlVYT2EyTVNaYjJSNk82ZEFOUEdVR3Nia0RzMTdvdkRLTUJ3VXF3NXhrYXNzeXhSS2lxSDNCMG11WWIlMkZUVmpVc1BXaUR0Wk9RbXNyQkN0T3dOUjdmSVdxQjZuUHVCdllXS2pmJTJGMkhZVmN4MmZIcU1MSzIyckJDTFpVRlRQeSUyRm1xUE5nJTJCSEdKbUVzcVFIM2t5dEZNWXRDQlFLVSUyRndVemZ6ZzBJcSUyRmh4UUdzT09Hcno1NmlZaDhUWlVrZ1dwdEcwaWJsc29YZlJVSWs1WWdDJTJCbHM1TnYwRmtxS25pZ1kzbEFZMk5SJTJGajZkY3glMkZRZmJST3FFckhjaHU2JTJGRTdkbmpYOUdQd1RjYiUyQklwakdDTDlwMEpaSWtmNU9MZzdWdnhyTldvbDR4VU1LVTVZWE56OExrcDVJcFo4MTdacUdzald4JTJCNTFQZkM2VDdVUFN1ZmRIZHRqVUczM1NXbVpQYUdVd0Y5Qm9QWGpIM2g4OFhvcEJHNkNXaWJta2NTcXJjZU8yS0QycmtUVGkyWGsxaURPSWtHcnJIdlIxVG1yVm15NnZPZHJUZTQ1MndpR2xCZ29OOGl0d3U3WVZqV010NHk5aEJJUUxkRWRKSDFScGxhSmJOZEZzUFNoNXN0cm9EM0YwbkNoWEJUQU10WXJNN0s5YklKS3JiNURRTlc5OU9oTWIzVUs3UnNYWnRzT214Y2gwR3JuOXFlN1pjTFNBMXZPZVhuZ2NBakZsTWU2aWtHSWliUGxOYzU2WWgxUCUyQm9sdGpyZFNKWjA3NEEwUWdtTlRiVWVSTEFnR28wZGZwRk41MmdHZHVjU2x4NFpPZlJmcGtsaDdjalZyWHRMSTRiUUxwMmNLJTJGaDhraDlsM2ZadkQ2eW9pZXVKTjFYcnhqclRVbDJxbnNBRVVWRzV2b1o5eXgxWXVwQW5DYjdPbXJCYWVMazh5QmJBWHVyT0FpcWF5MWk0NUsxcU8yd3NwdnpLbnE1SkVnajZzQXo3WXU5T3JyQ040TnRYODI1SlE3c2RDQnUlMkJFQVFYTWFlV3J3V3V2T0NaNGpXVEUzVDYlMkJGZjJnN2wzSHRuTU9VY3BDb0tFR2FoVVRzaG0ycCUyRjB1aXRWOHh0Smt4QlhYcTVMZXR1V2JEelNxRXc1aVlsYWc0Z3ZUaGZjU0trZ2ExbzBGYXUwM0VBRyUyQlklMkZiSGhhY1p6S05rcWU4bWRQaTQ5SUl1JTJCMmVUdkZJWmdEYlhINHRTdmlKZzg5NGZJQmlXQU9mTHUwZlR3b2NnUnR0c2J3clo5NnJkM0NRY3NmN3VXVk1OZUVCN1l2UHMwekhRRENvTnBDNnI2QVpaYjVES1pud2lCUXFWbmo2WXA3YjglMkJSYWhvWjdpbFJTaWxpRzFCU0dZN2UxbVElMkJ5YXBoWHhxSGo1QWdzd0JqWTh6YXQ1ekxRRFFiNGlHdWlwZ2VobkdUSjNOJTJCWkpoZVhrbG9VZ3lwUDglMkYxMnJZWSUyRkFUcEtFaWZXSjklMkJ2bGlEMFJxSU1UcDhEZjdrSjdhSHloRlFmOVZ1VHhnMlZjN01jVWtJMDFFdUJsUUpjM0ZPM0pJMld1SlpsTncwdnhFTmNaVUhha3N5VGt6NVREMXN6ZFJaMmV1UkNDVWlxdE5UbEc4eDFxbG5UbzVlakRNc0NsTVVDOGd6VlBVQ1hWTGhXWGslMkJ1akU1U3ljJTJGblUxVXBoMGJJNFVkY2c2ZnNMMGJkaTN6Vjh1NmlaWXJ4Z1lneDAlMkJuMjJFQjMzTE1XWHQwdm5Gbmd5TG41JTJCU3lReTN3d05aSFJmSGlVQlZWaHYlMkJHbTczNW1jM2ZoZGdlTDNMN1BXOGVDTWkzVU00YWQ4NDRHNmZUYjYlMkYyZkQzUjNWTjJNa1A4bHA0TGEyNEkwZkNBV240OVh0OFA1MEFHNlNpblF2Nld1Wjh5bmprVEY1SmRuJTJCVGRNRTdmRE5kRHJraTV2R2lMM0hmWkFITTFPSndDSE5NJTJGdGQ4d3lPdVpGMCUyRnFPb25CZXVXemI2ZEJuRVp1RnFkMEJCendmRHB4WDclMkZrek1yQ3ZzcE01JTJGVE5pazhuTVlXU2drSCUyQkd0ZlA2N2VFVlR6bEp5bFd1TWZNWjltM1FnYmZnTkthbmg1OXolMkJ4dkJqdnpKejY3RU83ZVBjJTJGbWR5SyUyQm56d29xNlhtcE83N1JLY3lycXVLUHRabm4zM1BuSmY4emF1QnFMVGolMkJwMXUlMkJNekloTG5LT3BjN2ZUY1JIVXBybnhTMk1HWGp1JTJCRTRHJTJGSnhlJTJCU3YzNG1ZUHlrcmtGYURjJTJCVnIlMkIwNHpqeGJtQk4zejZoajIlMkJWc1JTeXVRVlRyd3VQJTJGNlhqUEcxVDdoUEdDcXAlMkJ3N0F4RXlmZ1dTZ2lScnZhTDM3M1FEVE9TV3J4TUpGbjdUMGNkc3JhU3NBYUx3V3U1bWJVRUdqUzBYd2N0bUJKSUtYcE53QyUyRks5JTJCNlRYZ0lHSkUyVEZGamxDRUlPc05tQ2M1czFPQTExckpKOUJtNTVXVnVJQmFkUlRaeURrdHdGWlltYThiQ1MlMkZDVHY4TmtJUUthdjE1ZkdNWWQxQ0ZvNE9icVp3MkxKMUc1V3FPZTBSazlROHlwRUpMRGFVRFJLR2Rqb3RSc1l6WTZrSUk5cHYwWUFOWmk5WnFNdjhaRjlleUNUR2FERFo0ejF2VFpNZ2ElMkJ0YkJwUGlaYzhZS283VGFXQ0NLR21PWndJSFUwd1NhV245OG5yejdaY1dBanpqY0dMUVJjUXo0ZlR4QktkcCUyQkNhOXdWdk14NkZWbWg0Z1VTbjNhNDBRVzNxRGZKMjd0YSUyRmk4JTJGdEd3SjZ4Wm11M2xhRlNkcWc2Qmt2ajVnaFNzWUJ5YTk2cmdLblVXdzVnJTJGSkhkOXhteUl4aHY4SWwydkNoMDJTVThLdSUyRlpSRE9kNXBCSkp6Smh0Q2NNOUxobEtLVlBlbU9VT2R3U0ZBY0w5dnNoSDdqMjhPUTJoNjlqQzBCQnBnb0E4QjdkaHFXcVQxZlZQVzVpZkdGQnE4WVRzUUp5THc5dUpIS0x2eXV5dFh2eGN2Q1NKSjR0NG1Nc05vNGUyd1VQMGtzcENidHlQTHJBVjV1MzNoJTJGTHJXaGRCdWo5QmZRdTVzWHVaWiUyRjJJJTJGciUyQlBCZWhRTkVZcWpYVzNNUjBVRmtkdWtVR0tWckJQVnBlSmN3ZHp5JTJGWCUyRk96dHFTWkZDQ1J5SmRKWXA1RDl2WklQaVFMTFcwVWdSZ3ZNeDAxMGtHOVdMVU5GUUtodTR2dWg1T01KJTJGV1FpYlFtWGVIUlZxWTdqVXV0UUxCVlZEQk5zczcyJTJGTldDUmhqbFh4elBGbHJQQTBRZlBCYUhhNENIRHolMkZxdVcwdWdYeHFKM1pDTVdOM3M4RmtzdllkYkJZYlNXWHhpOVFlQ1QyMlBzYnFGVUU2U29TaUZMZExFak02V0c5ejAxa1BKMFVnU3BGb3VXZERzd2V0U2UwQVhIeklCS3ppRHBXREpwR25ITzB1ZWhObzZSaGlVajBOUkNFaCUyRjZUS0lyVHloTU55Z3ZKUjBaRk5zSVI5TyUyQmtZNUxtMEVWZ0tzR1YyblNZZmp3YW9sMERzd2dCZEQ2elVVMm5DSDZsZWh3b214ZWladm44SVNGYzZpcVpoWWo4T3BXTnRZJTJCUGlkN29OZEtFWTVzNDdtMWVvWFQ4dTVZVXMlMkZ0SDUwY1A0ejdCWElSTzBZOXdxV3NhY1FDZVJqODJXRUFkWm9RViUyQkZBQUVXUXM3UWE1TDlPTlolMkJBaW50YTJRWTJQakRnWGlvJTJGNFV3R1pNUzN2SnYxVWdMTFFvaDFpUiUyQmElMkZ5dGZGJTJGWERuTFMlMkIlMkJPdCUyQnJleWxac3J2WDA0YnNvZ2FzZ1VaQ01wblFzd0dWOGpPbjczejkzdllFME1CclNqS0x1VW50JTJGMkQlMkJWRUtZNVFyNThZZHBZYWkxNCUyQlp2NkN0aVlBUVFIZHZUNHdiSGZ5TDNyem9XS3FHMGhiamdsYjNFRVJqNG1rbU1xUHNmUU05Unh0JTJCJTJGazNLMWIlMkZCU3R1T2t1UzhHUlliRnc3Wmg1U0tNY0htanV6UnNSdVp4V3VPZEtUeVRQSTE1MU9KNEZFOW5kNm5nS3BzYUJqRkhSY2Z1ajVoZ0VhRnIwdDNrMmt6cEhnektJaGZXS2FOZUd3YXo5TVhHRHBSQXV0QlJmZnM0QnpMRkdqQSUyRmt2MEQwMEt3N3E4STFSSkJpQUhSaGdBQjBIellvQ1poVzkwUUdzbzNPOGlvbVVwSjZZeHI4NGh4ZkNCTENTWW1aZyUyRjAyTDdZZ1VkQzNGOVVUVVBnVDElMkJkMjVVJTJCRSUyQkkwQ2RISmRuJTJCNG1JUlBldklDS1JWQ01ZQUp1b210empHZmRZcDZJRHhzSmVCMmEzNzNtc0xOWEZRbTBjMEdnUVlDMUt5WDU2JTJCN3RyQVhUOVJnNmYlMkJLaER1Zm1IbjhET3MwUmlqNmw5WHRNdE9ibVNRTCUyRm9zbFVJUEF0eEZPdlc1dTYlMkJOYmdtYkZlWFIydSUyQnJWVGZ1b3BrM3VzQ01UWUhTNW1hNGUxZTFwUXF4b2ZjUmtaJTJGN0RxVUFvV04lMkJrT0cyZkRLMEpOdGd1ak55a1d5UkwlMkZadmU3dyUyRjZyZDE1UXU4Zmd3U2JrcFIlMkJkQW5zMjQyUnVVcVhwdDZwZ0F4SlBxTVE1a29BQlA5aG1uek4wJTJCMzVFa2xqVnZyelFqd3FRdm5xSExTeUJFa1JLSkQwazRCYXVNb0haR0wxUEV5RG1tcSUyRmxuVEZGbHJDU1JCSGclMkZUaHZKTFE2OXFZWFdENE9XMkpINnAyVG00SlVDWGpyM25STHoxR1V5cDJIREh0NHdYQjZmdklXR29OQ3Y1Q3glMkZ6aUJTM1lpR01aOUlKNUI4c1Y3WFFjdGZXR3Z3clNlTDg5amhSS3ElMkJ1c1VBMlFSQXBmTSUyRmowcnB3SXhDUEw4QzZVd2lqMVpHTDBBZnNWY2tBJTJCVkNEV09wRXdEJTJGdVk2ZiUyRmlNJTJCZXclMkZxOUVXbnZVMXdscDElMkJXQjNUczhSdFgxbkV2ZDdQM0xBMmhsUG9VSDhpZmZTU3did1pCc1VnQXlWVTVxJTJCWVRhQW9TTFRlZmdyNWk3YkFsWHRIQUNOUkk1RVU5Q0xpUXBHOTNXRUs4Y0F1TmJCYk1obEhFcEFoZ0N6TG9BM0NoZWslMkZMVDg4WXFsJTJCV0hCWVRPc0hWQVBoRTlnMjZFMlVsNHB0a0VoSjcwNEdiWXdMdExjT2dDZGElMkY0b25Vdk8lMkJxamkxdGJIVm1IS21QQU1nVHp6cHNMSHJnWEZBMnhycTNTN296N1g0aEZOMlVjZDl4cUdiVjRpcXlidzVLS1NyTFg1YVpBRmtjdG5Ka0JiZVA3a0VoaEQ0b3Jhc3NvRld1UVJ6ODhkeWxWMEpQdjdHVGRwZEtyMkVKQU5QZllHWUZNSGlTQXB0VG4xYlNVTXVWQVlTM0s0WSUyRk1VVWdxQTM0Z2JGOXVQVlhYNzFyVHMxMzRlOSUyQjVnRk82cDJFdDdlMlA4TTVzV0dQWHFmdG1qUHJGZUhGdER2N2g0ZUkxNzN0eTRFdkZNUnpSZWc3VU1ydnMlMkY2NSUyQngwSUVZVndFSDdZV2VQR1BpYzlQWTV0Q2tvSlh0anZEc2htbnJ1VG5IWGR4eWVVcDY3MXkyQlNXOUw4JTJCVEg4Qmx4TmxlckhxMFUlMkJzQzYwVVMyVWVINGd0TlJBUXlmM2FhVGVZMllXM3NLTGU5UnpneGFJb1Awd2FVM21WazdZT2NmTEY1VkFhdnNvbmRVMjJsSXEzRklhRDY0d29YdVlibURjbXVxZTdISXJlMUpFVXVsOEtZVm8lMkYlMkYycldPbTk0V3YyJTJGRTklMkJvcWEyeHZyY2dwUzNzZUFmc1FDbUoyalNWZXBBcDRxb2VxdDl2R2hBZWlYR1dyNCUyRjN6VW1zdHRuU2phSk94SHcxMmd4JTJGdE14NmEzb0J6ZXJNY243UUdDcWx1OFZ5SSUyQkNhZHZvM2RPVWpHZDlaTFVMemg1RWZZQnkzQ0FrcHlkciUyQnRsNUNmVnVXbzA3ZWw4RzVjZmZITTRIaE1NdDhDZnJJOUlJaFdlaTlMdnlLSEFGSU5Lb04ybW4lMkZZUjFwMUU1MkhZOXclMkZjbVBQQkRnTW1mbFVVdGIzZTNnWFQxbEVldGJBb1AyWWhDVTZqdlhGWG9rTktYaHdLWUhyenRuS2J3bXhNajlkcFJEM0JZc1MzRnQ1Wmw4ZGRrRSUyRmJieUJqblVCWSUyQkdTa1FEc2pERCUyQnZEd3NEWWVFM3o4TWFXSmhTVG5qWVhmSk5pODg5JTJGJTJGYjl4aUVhYU9UUXlzN0FPOXg1dUQyVXp4VTVUZTdLMVE2cyUyQjZiaFZLV2RiQlRZNVp2WHlFdkNSNk1laTRYQW5RTG1ob2pQRXVnellhZ2MwVlVZWlJJUUZxQ3FTZUJSbjFsV29zWmhBNUlyeWdBSWZnd3duVVdadDdYQVpLbE9aMjZlZzBxT1lhNTJudklPTWdwdlVMUUhpQzJDUmxOSHYyNkxQSDNNd0dWdzg0M0JVOW4zSUI1MGIyJTJGWUhnT1olMkJpelE3Q0UlMkJZV0FvMzNvQUhPOU0lMkYlMkZNY2J4aVhzcVFYbHJqZTlYbm9xYmlIVHZ1b3NTNWhvZEYyT1E2TDlwdHlpUDc1RGQ5S0lSM0FvaFNIcUFYRUJQZm1tMkxvYW1SWnpaU1N0TUdNYVFYRSUyRiUyRkFvVzVid0RrSEduWGc0bXVaMTIwbm1ZZm1sMTYwV2pCa3Yzb3piTDUyUVZwTzU0Y0tFRGM1ajNwY0xncWpwVWthOWVENlY2JTJCMmpPUG5vUUdZbXNuS0djdlZCV01XSlNSWUxrRk02bUlLNTY1UG1nOFhySVVROU5tS0xoc2tJTHVIRVlQRlV3VVo5UyUyRmJ0WVJ5MlNSVThuRUpsN1RIY0dINVlYcjJqdnl2STRlWTkyWFk2WVdCMkhmME83RE5rczYlMkJVQkFrcGQ0cjROSmVJQmFZUUM4R1dzVkhCREhWNmdCNDJJWE1jMkI3bmNZUVhjZUt4YzJQU1BvQ3ZJSnF5aWdrYVVIY3hPT05zR0MxR1FqRlV0c3hpamRERnllSzdWMEpBd1hJZzJBZzl3bzQ3UTNVWVlCRUx0dGJGJTJGTkJQUFUxbjRZU2hISGxVQ25GWktWT2F3ZHl3NVdRU2pyVkVBJTJCU2ZMQyUyRmRQRjduZXdXTU91bW43SndWSVd5M3dqeDZ5dnBjRDVubDNyTkRIZzdvZENQS3VzeDR1QyUyRlN5blFjUjBsMGc2N0UzZVhhQkQzTTZyS2VaODZoUFI0ZU8xaWV4UzRpJTJCU3JQNzFoTm0ydXJYcSUyQlhqY3FWVVIzb1NHbExpbFVzYlFuYWhHYkRJTDdlV3ZvMEhIQ3JsYU5kejF0T2JrbzglMkJOdVlFQzdvZmxTVkRJaGxaWEVLSThZc0p1d1R6UHA3Zmtac1JkaGxidFJlSnNnNkdieWdZUDF0ZVdDSHN2ZFNRcnNZSHZDQlJsd1p1UVZDbkVCTUVodTQ5VGhtb2RTZlo5Qzk4R0E0YjdJMWFHdlRvaENBM0JueWg4d0xoUG12elA4cjglMkY4TFpMNWk3SXhpVzZIQkZDV0VQMWFIQUpudURmV21TSSUyQmQ0ZW9jJTJGdDFqYjk2dHVyU1NabyUyQjZIbEhsS3lWa2RsU01KMVlpdkJTSGhvTTY0SmJHRUVvd08zYzZxWFNrSzFhRDBOMWxReFZWaGZYS3ExRXY5OHFRV01sTDJjWkhrc1JMOERRQnN6SGglMkZub3UzUm92Vk5mbVZiMjhWNzVQMTNWVFg4aGFka0t3cUhuY2t6YSUyQlZQQTRWc0JDUkwxUzU0VmV6QW9sbDBCNlJrQlNwSCUyRmxlR3c1ZklHJTJCa0toWnBVR3BraUk2a0dCNGJsU2FLSmJvYUVycCUyQjNRSmpRb1diWmozanM2c2FsN3puRkZ0SmRoeEZxYTc5RlJpYkc1c3FBNTlwSEtzczlSa3VpS01hTWxDd1ZMZXpCSWV1UGU2QVZZSnBLJTJCN2Rkd2dMJTJGYWU4d0h4b0FWJTJGVE82ZVAxenFhMEZLaSUyQnBMeVZQZ2dkcmpZMXBnRldmQmRydTlsMXB1aUFWNVlubFBzbyUyQjViSk5KWGJGazNyMFd4c3lUUFRuWUlhVEh4aWhOZUlzWHFObE02dkJWMTElMkJzRiUyQjRhaXIlMkI2VVlFTGtNS3BnelZodnNVNVlJMGdoSGhUdnc2eGtwcFZsbFZzRkNTTFBVeXN6c1J6YTZldCUyRmJBTlFBJTJGZ1ZoVVh6dzY5d2F5JTJGQzE5N21GNWZkViUyQnVtM2N0bHZTJTJCeDBhOVR0UEdLblRFaGlSMG5QNURkTlhsNUtGYlJ6SFo3Q3p0MmV0c1BoTGlxdThpS096R3B4cWNIa2dwT1lmS3kybDFlREE1b3VUbmNyZmlBUlhybktpWmglMkZmeVZuTSUyRjcxSTYlMkJLSHJMU1hnWXcwR0NiRUt6MXB6U0RmV1Z4Z2VrJTJGT0RxZkI0ZnN3Z2FSNGI4bWh3R0VISmk3c2toa1FoR1U4TnR4ajBOU0pRWlJOaHdaM0hrdlBaelNmamlRTDhwOTRwcDlsYmVycXlZdGh0NHVnJTJCSHBDU01BTlFCYXNuN0RydjhmT0VxRGM5Rk1kWFZSSDNNU3lDZ1dOeXkwTUxlOUVZbEttUUdFa1k1TUhvJTJCVUZ2RWwyJTJGYXAwcTNHMmNscVhUeDBaSm56R25yMzBUdHUlMkZEUWZ3eUhZZ2xXVnBtVWpra09ZVUpLTGhzUnQ1elNQbzBMdEdoNlJYREZ4eFlRa1Naa3J6RSUyQk9OSnY0MHlkc2RsSTExSVI4bERtbDloMjZESjgySFVHRkh0YXp6Nkw1TXg0aDU3aWVNWnd5SDdJRzE2WDYlMkJGdURtUWJnZlNndWtoUEtUUDNmdUx4JTJGTWNLcUI2V29mTXJ0OG1ZWmhTZVNzVHRQTXFqWml1WTgyZGNjMmNRNVRrMG00c2VQemhSYm9hJTJCN2RJOWlCaEs1SGwlMkJhRDR4Mm5lRVMlMkZScEh0JTJCbDdkcjc4MHR6NHIzbVBQR2xMb2VjQjNQaUU1WmRBWElJT0I3SDRhTUUwbHhrUG5EYXptdW1yU0dDU016aHBvZElhbUtXMmxRSlpuQzB4WGFHZU1KaldZUCUyQjRIZ05aMWVodEpaOXR1cHZkTVB2NGNjJTJCZEpNOFZWaWFCNVJSdlpLRyUyRk5OM0xGdHAxcVhwVEdWM25rMFVKcG5lS29yNHZCNWVnZEp0Z1BwSnVmVEIxTkRiZmRxMldrYlF3U1A5MGtTQVNBa0oxc3ZMMG5uOXdYcU5kaENtamgwUTNQam1PVEFXNUpLSnU0UXBSdzd4Mlc4Zmg4dE1PUEhlTWRudkJZVDg0WUhNWnZ1dWxKTUhheVpSbkRUUmExcUZnb2o4QVZZS2poM05XWTlIQlRXcVNHYk44YWt1YnRJODFDdkNzMDVGZHJ0amFrcll6MGxMRnQ2MjlLVTZpNnhMQ0xqVFJiaUZlMUxNcDViNGozaHB3Sm1SYnI0b2pWaFBteE85bXFja3lJbGpWNUJwdEFFYmVQVDBHN0FIMGJXYUZsTThBT0xBSlJTZzlHTGlwOExaN0trYlhsVUNZUE5BOFhTVTNkMUxYT2xMRlJONVhCQ212QmR0NGN1VnJPQjBWQkRUZUJCQWt2bFB2N0FFMlBxa2l2TmtzVnhySnhlUTk4T2hCcE1qNFRncG5VNFd2Rk91ZW5hUk5UU28lMkJxRWVjS3RHd003NU9oVXclMkZnbmhLcEJUYW1zRnJ6bDJnM3hkY09tRiUyQnAxUjNTV3IyMmF4TGNUZmttTUVjZnQ3azNkeFpORmtzNU12Z0MweGlYVVZDVFJ4bVdtM1FVZHp5Mk5kT3pKZ1ZqeGlFQyUyQm9GMmtyS0UwNHFnZGNlY2lKWUplUkhkSHlWN2cwUFdqcENxQk5zZDhNbk9XJTJGQmhBdndCY0UlMkJQazFrUiUyQjZmS0NCaXVJR1gwY1pKczAwU25Tb2h4TGsxc3BzcWYyTWpwR3pEbmJKSGtEZTl6WXFrJTJGMWNod2Y2UjFHZjZlWmkzb1Z0bHhxb2ZIMXQzbHhkcVh4WDlBWFEzJTJGRFd6ekJyR05iWW9aQ1g1TW5RbkdyRGI1ZlVDckFpU0Z3TkFWV3M2cVpnQUJ0VFVONlBSYjF4ZUkxQk1kd05WdVpLT1RTMVFwYUNsOVpQRmlPWXlGVE5yRVhaNTlZenR1c2tmR08yd3RWVWI1ayUyQlZMQiUyQjlhUiUyRlhVNk9zdjN5VlloT1JjTnBpbGxibG5LWTJraHlQRlNjJTJCRXhLak5XcTRRSGZtcE1oWGxCSUh0T3ZranF6ajd3OEpMN2ZrQ2pLOXF3NGl1JTJCQ3lsR3lTQnJyeDRkblNRNWZ3UlBYSEFlclFudG4lMkJ6Mkpaemk0TEFrblMxcWN3b2MlMkZEREd3WCUyRkF2Y2F0N1dJRmd2Uzk2YWdoaGNmZlFoQSUyRm5NbUs2OHprMyUyRkRaOXVnMWhtVm5rYnhySGZSRWEwWmdna1JTMDEyJTJGTXhGZCUyQlExNjdWdmNNcXFZUFNjOHVoZ0J1Z3J3V0ZWSTc0SzZNMWNjaHFOVzlKa0JtY1RjUDFoZ1o1c1o3ckNDQ2c5JTJGc09uVm9HS0NhM2MwTVpXd2YlMkZTemIweGFTcWlkN0JKZUdmUkxkWXQ0NUV6YUtyWjVIRCUyRnVHMmtPcDRWRmEwWkhnb3V2MFV0UUI2SHk3QmY1dFZZeFNPQm9wOTBLeHpIY2N6aWk0N0VtMXkzME1VRGtpT3Zva0NXMHdXRmZWRzFJN3NKbiUyRk96SCUyQmJ3eXh5Yk1UcDJRbGhDWTFPVUp6JTJGbmNEMlRPRXNHanZRUnh0cHJKQXlSem90T0pQNTBldHNrNVBDbGtvNmRydEcyWEcxQWRnJTJCT3ZFS09RdXAwMzdkSHZBZll5RU5KTnBOM1NKcEhYNlBTYURJbDRaWkxMRHBnS0VtNTBHeTZPaFNtdXlFQ3hoMkVlZGdodURpMHBSME5xVkRnMThqcVlyeENYNm9NdjZkeW5jbzcwcSUyQmZha0UlMkJaUnROS3hLZzRXUXA0a0VadGFESEU5RW00eG92YnlXRVdjeTlQVTEwb2F2aWpjU3VqZkNQTE05SldTa3BkRjFudkwlMkIxNVdpNW9oTDVPS0JuQjByeWdxSFZJWDZFYkRIWTRINjZrT250RVI4UUU5d1hpU3dkSWptNWUybjduNzI5T1FwMXhOV3BjcDVJdkFuVmF4bDNlNjJpMDdLMFJrT1N3dDd0cE93M1RhaGgwNjJ1REJiVlpnR2s4eTgxS0NSTEhnSmRBMnNVVlNNdzQ3S3BXS1RNTXdaV0tiVEJTRE9JRW9LMzN3MHM2bWJpeFQ1OCUyQkJYTyUyQmh0S1RpZWswOVZ4bTY0TExkWDdYcTA1TkNwcmpZbEJkTzZLbVNyV0xYVkxnS08lMkJZT3ljOUVXJTJCMCUyQnZUVU9WQ3VBZ3hOUzJVOEU3RXdMc3JzRkZhSSUyQjJMTEdPZTJScVViJTJCUkJlR05OY041SG5wRUo2eElpMyUyQklWSDZXY0E0cW5tdHVzNjIlMkI2ZTNCaEt2YWdmUGdMQjJqT2xHaiUyRmhUbiUyQkY1V3BWc09TQnZROU5ySjBGR3VvS0VCZWRIRWllTHBFcVV1aHgwViUyQnhYZTlraXpuem1sdE1OU09qNXZOUmQlMkZZcU5WVEluV3Z2MjUyU0ZtWFdCRyUyQkluNGNIJTJGN0tJdDd3NDVCbkN0WmZ3dkFEY0tLSmszNlZmcmpTaWplNXJKJTJCS2FXazF2R1JNTWZpOUFNSnBKU2NMRlRsaU5CRGtPUlpwRU5YR0hwc1ZwYnVKM0JUa2J1RERXOGg0ODAwUEZud1E3WDFRRFZSZSUyRnZFc09udHdwSXU3Q082ZmJzbDVtaFgyalkzS2slMkJOazhoajB5RWVkSnAzMmslMkJSckJ0SmZ1MVZSZDU1Sjh4WlpPYjhaMXglMkZRSVdVSGZjbXJXSTJGOHRxRkZvbnZ2d2JuVEI5bGJpckk2cWNYS3dBZ0xneVpqWFlXaGZVSjRBdThZMm9GMVRoSHpkbE11OFRoMU5WVkx3eUZTMzNsclVGM2NkTCUyRmNGcjVCT3JXQXBhUTdDS0x5TU9jRlc4TU43JTJGckJQTnJTalFQJTJGT05GT3diNDIlMkZJbXN4ck5GNCUyQjROV0RCZlVqV29SNFEzbk9KSkNYVnpvVnoyUHVXWWlUN2FZb2lpN29MT1psS2FhMVBrQzYwUFd2JTJCS2MzcUVFeU9JSTEydEdsVUoyc2lmb1luRnkxSXN6ZGwzNjJsNkVWd0tqRDh3QmU5eDNGR0FsRXFuTmVXSlhXcmZMeEIxaHpvYURlMkpINVhqeHV5VjhkSG9naVY5JTJCU1NoZzVRQ3RWaVJ3aFI2YWNZd0ZiRDVjQWhuMW55dFJ3NURmS1ZaVVFCUGZuSGt1T0IzbkZtWDh6V2dHNGVSSnZGMmdvbFRtTURUamloJTJGc0ZkTUdxd0VSd1FCUkNyc3liT3E0OVVYJTJGY2xlaXczRGxvJTJCTXBMMVdaM3RmWURCRXB1Umt2UklHZlhXc0pMSVZVeDRFWlVnN1UlMkZCc01Kenh3TG5IRDM0NkdydUg0a3RsQkdOb1dEQjR4ZjFkUlIwcUdPOERZYW9kVGhleWZyRjBxTEp3ZzRYVjdBeXROeGRkQkpPQkdKRGlzalBoWmNIbnpMMHRFSlN5ZlZodUNpJTJGV3pBMTB5R0QzRjViT3FQS2FxdmxHcGJZTEVqd3NOWEdXd0E2MG0zZ3ZiVzk2T2glMkZZcFlWbkk2bkx6T1htbnBtSUJwJTJGa0VVbU5PdHBoWGtYY0pXbzY4OWtlOHV6bk9iQW4yRDNhU3J2TzB0bHl0TmFvd2FvQ0FnOWRzTmY3TkFkT3p6NXhmVktONDVvWTlJT0JTS2J2TXl2cUNRNVl0U3A3aXA3a04yZGhZSXdMJTJGVEhQNXFvU2JDUGI2Y0QwYUZKTmx6SEIwbXgwM1FOQnl6UjloQU04dEdMZWRESGNaSmFlbElkNkJ5REJSU2p0YTFQb1N4N1g0JTJCJTJGamZOdTJwMzJZbVNHQkswenZKS2s2SUhPTlU4S1VKTTZTZnJBcFB3WFolMkZHWE1SWDRaRXY1MnMlMkZubGpHRDYlMkIxa0J0NyUyRk1CODhiTTBvck9PTmxZVEhuTjIxa1BxSyUyQlRMSFExV3hzSEtjcHB4QnNRZ0ZkMDhvNzk4RSUyRlRRcWVKNkpveGxWTDdBS0xPVHUyM2lsZlFLeGwzQXE3OHAlMkIxZG9iYkc1Y3pnQ2czQ21BUjFsdjBPYU5Ea2lhZjUyM0FWSlhlQVBiNUtpS3ZkSmZsJTJGYU9XSW9ucEpzaEVBdXlCUnFFVEkxc3R6cXVQSndUQ1ZOWjFWbEdmeUdiNmFzWE82M1YwJTJCZWxkclJsZ1BsQ203S2JIMURRSVV4QWl0elI1dm00cHEzbFpzdElHMUpzN0dVSU5ENG04WURYZm9FRFk5dUt3Y0VadXhtQXRoekNESFpvcFNIbG9DcVJwYzVMNGgyTU4zblpidHhJNk5lN3RvcmE4JTJGSXdDV1FJdUZ1SUxNZThpdEJsVWYlMkJYZUJSb3lHalE3b1RkcDBHVTBHWXplMGtESzZhV3FIOHJraFFZWiUyQldVbVNjVDNRQllsWERTODhnVjlrYXFUeHo0ZldRaGtqZHhFZFZUY21tMXZaN3clMkZTNHNZRngxQ1VSd3VZVExXeiUyQmd5WHlGV0xrb3lBaDNickFFZlRVdDNlYUJScUpZQzlYUjB3ZGpFRklNWHFpVlVLZEpIbmNiWHhqc25RMHFmWWZZYUgyajZ2R0FPRDlNRmc0VzdnU0hKcGowOFlZUFA1MEhCZEdkaGRYMU1aZzh6OVFhQ0RCem1iSFYzblRBSGs0JTJGZnh4VEtaR01oakclMkZFSXp6bnBBam1OZzVoZGpndUNLSlZ4dzdrZGRDZk5WQkhNZmNabEhJVktMQ3drQnF3Z0VSS3J6UTBZRFdzWVgzWHdiSXk5TWd4ajRjSTV4TXo3Y2VYN29EZzJCVFFMTiUyRk1QaWpaRnAxcHBQUTFCQnZuZFdZR05PemNtbGJYMTJ0dWpkR0ZQdlNRVTQzbm43WTU5ZW0xV0x4WFRpelhlUFZ1Q2s1NnluaG5EMUtPcVNhbDhpQnFUWkpnclkwa3NtbExYbG9Ub014UmJ0elRCR3dxUHNocml0dk9QRHQyVHo4d1pWZGtFUHpucTM4dDBudFAwUDdVMEhJbTRzdkxEYVFpM0c2SjRjNkpnZU5JNiUyQkxucHA4NndaMFlSTG05TWt0emlCSm52ZG0lMkZXcEZ6eHRmYlJNcHNQTjFaT1RUZ2xvY0pVMU82cjd2ZXY1dExwT1YyZDh2a2ltblBLUkxUZm1sdFFRd3lGZU5ReG5Pa0pNN01vN1kxNDhsa3JLNWJwRExCSSUyQiUyRktMWHlteDhvRlZUTEVoSFdjWGxwZjdGekRYWEx0UG9ITVhMSmdHWXN0SjVDUlhpJTJCY0R1dWE3WkdpVXhXZlZDazZKNnJ0NFRKR3N5MFB6WUczRGJVRVRXT0ZFa2NoamRzamFrd2hjeEowblJDeWw3Q3FoYTdPT1I1b05JbWpSa3ozN0N3ODJSVU84QXk4azh6Mk9UMVhpQlZsZnYlMkY2NlAlMkJDZFlvaGRlN2o5Tkg3TSUyRlF3JTJCcEtwRUdXVlZWNEN3cVhaMEVtdjNrUDZyQ2RMZklaa1A5YXB0JTJCVzh0TkEyNzR2MnJpVkRrdEQxRjVsb1AlMkJ1cnk3VE9RdE5PNlpXQ014a25EeElsUUdPdm4zTkJLeXhqZDhiM1Q1aHVBRHVad0NXQ1glMkJRMnBxY3c5RExKQk9sOTVnQk9DdmRXNmVERTBIdUZnQ1dlY3RDU1hFaVkyejdjZXhFN3RDZmdvV0xNMWVuaUxiWVlFek1tMlFiJTJGak85d2xSSG83TWJJSFRhclk4YVdPRlA1bFpkR1VzeE1zYnZpblBodUJ3WmZIT1RKaWdkemFTcEhZN2VKOFFhcEklMkIxZjY5bkVnT0NVVWREUmVlMzlqJTJCSTZUTkFmaE9oTmtaSW1iTlpmeVpiNFVxUEdQVTlxZUhkWnY2N2V3NGdHRXVpWE1yOVgyZ1VnNE5uN1I1VjZSdWpINlNmVmIlMkJReVFsSG5BN08lMkY5d1FxdUtNR291dHo0THQzMjNmbWglMkZPdGxoWW9Vd3dZNktwZ0kxcnZrZnZKb1ZFT0lHNmRCb0tuck5XUE5yTDhJeHVFVmdMSjI2QnklMkZrN0NBcmtTalJOUDJaa1RzMnVNRmFHUm1LQXB3a3VBQ1d5QXFBeXdCazlnaFlUb0tlYlZuem9VZHhTMXVNaU8zeTFEcjJZOHJnU1VIdkVUMUNCeGx6NFN4cTJxajkySk10d0hJeVcxNjVTRlFuZUdxeXdEaHBOeW9KbDltbWlFYjRkWCUyQmV0T1F6d1pTVVJDN3ZYYm5zQklwbDE0TWJTMyUyRlUxVjY3bTVvSHpzTlJCdWFNM1huJTJCMWlHTWtHeVI0JTJGSHA5YWhBRjFHWjVoWG95d2RSa3lVayUyRmZ0UUpXVHdSdjc1c0FhcFpUMFBMRkRBYkJ2UzNMNlVQMnlzVUwlMkZ6VGZJZ1RXa1lOcyUyQnEyWkJlU3o1enNTZjJnbUhZWVN6eFNKdWVWblRJZnQyUGVCT0xrRk1VR0NkcmdDVVNPS24zejRlcUJPM0kwN3dhYkFaYkJMdGZMdjNSVEJWZElWSHRYd2t4aG9rWWV3RmxyMjNkbU1kOXQ4U3NQY2x2UU5NbjN6N1RGbXNNVDRGbHclMkJkNXFtVXBrNHV2cHE5eFVZSU54OUJicWhBQ1hOTHVYOFplY0l4bHlFZyUyRlc0U29IN082d0slMkJhUzR3RWNPTlp5QzFab21KYmozOXZQWkxSN3BJZnVXM01SaW9uZWlCMWFaTnM3UThZdlJBZ240ZGclMkZKVTJZV05jOUJPM3J2NWRCMGdrJTJCT0xaVHBKcXdWSTdEZ1h3ciUyRmVySW9wczNRN2l4N3llTVZQVlZIMzdNR2ZkSWY3V1RFc0tKMEhqTkhHcENOaGE3R1FOcng4Wjgyd1dvcjNuTDZMbVFxY1Y1QWNsQ2dDZ1haU1dxOU42UkZsViUyQmFocGY0YmtDVjVhU2VTdlolMkJ1bWFPMENFbSUyQno1bk9jU25wUHIlMkJLWXpUZW5weHN6ZzIxZTEwJTJCZmJCZ2xOMk9WbFoyS3FtdFdmVjk4OFBuTmNid0hPaXZSckl0dzhDbkU4cmZOcUM2N1hFUHJmZm4lMkZadk9JMmJHNGZqTCUyRm0zajRkbjdzbklsRkUxU3l5dnZuJTJCZTVVVmNWeFdoMmljWTdMdW5KdGJINlJSdjh6MTVYYnZ2bno1cDROTW51TVB6YnZYZHMlMkJnUDNqQ0VXYzFMTkJkbCUyRnR2Qk11TDlaQXBoUEM1WW84V3hjJTJCRjVrU2ZtazMxWkhWNjIlMkJkTjFWUXdSU1p6Q0htJTJGNlNXcnZLZWRRJTJCbDFrdE50YzBzJTJGYnBtZlclMkJXbUlZOXhmZkhYQmxzV1NMajZ6SlNDQWQzRzdkRTd2bWslMkZuQ1l1cWthJTJGMHRIbEdUMWVpZkhIVWFhR1lZVE92QVRlY0pxQkRyYUNZcDFXYkVoMFBNVDJZdll6QzdEZFF2UXdsQnFjbk1RTFNJWFAwWFBPSVZDNDlkbHZhTDd4cFlkMTA4bGJXRmR0RzFFNVNkMDZabUlRMFBjMllNajhtUkdGYnFrR0s0SktmRVZVd1diMWJLVmpXYUZ6c3RvV1Y2Ym1pRDVPc2t5T1FBaW1PaW93JTJGMjVUZGJmY3MzaHdNUDl2STVqRDlCc2UlMkZNelFLeDRQVTFsWGxkVDJka2d3V0ZSM2tWUGVOYmlhV2lqdkQlMkZlRmo0YVVocUxPdjFnTW41NjBiamRTaGJpeGJoWE0xbWFLVzdGakNKWlUwUnBxaThaeHFnYnl6NzYlMkJTJTJCZHBNbmpzT21xYnZNbEJIcEVuSk1ORXhrciUyRnJBSEJkRTRZenI0dSUyQjlLSzB0ZWY4ZkxyaE11dUk4YngxcERreGJTaml1c0E3ejcxdm0zQkViOUY5NXFWY0EwMFhlYmJGMUtzMDZRVFNOVng0UDRKZ0slMkJKT1JMbzNJSWdzQ2hyJTJCTHJIZjBTV2QzayUyQmFxUFg2ZkdvcFdnSGFDSEw2aVEyNDhxUUtWOTZhbmZ4eHRDaUs3JTJCSmRESTVlR3V3bmRkOVNTJTJGZG5pMzNPWlZhY3J2d2JlZnVqcHdUSWJIMjlMNXdMb0hFb2tIUUJySmxVcGVjNGhpJTJCeW94RXpYRjdZczRLZ1VBNmVDcnB3UkVXaVB0SzJoWkpJTXI0TTJ0UTVMNFZ5YUxYd2VnaVZGcUttN3EyTU1VSEM0dk1EandHbnVpZFY3Z2ZDUElGRTllNlgyJTJCbFJlRzRramk3WkoyZWhlU0RxNU56TUhsTTJNbXBnZGxCZndrRnpkbldLRlFlMUs4JTJCeGdObElwdXhJZHdEQ2sxNno0Vk02VHRwVEpPRSUyQjcwYzYyR2ZYMlZQS2RUV2lOTGhxdTVtend4akc5ZUVQdXI0c2Q1S2FSV21TM284SnIlMkJjVHliR0J1ayUyRnltVVoya0RlJTJGbkxJSmZJMDluZ2NHVVZpdlZUTjVGY2F4eFlZbTNyZlhmZ1UzOHJScmdzbWNyakc1dDd4QXN1R0ZZMFBiSFkzNnBzenMzVUpiNUJXOGNBM3JCJTJGUldCdUV1SlV4ZEtLUllxVEp4ZGFJd2VtRkxXQURPSlRqWE9ROGp5JTJCdjI4SzY1eVdWaTQlMkZCMlJhcTVTZkRIUFNwQkVLcDlYZ29hT3NMemtFQmk4OHluaHNOZnBtVHhZT1E2ZkJEenNnZEs3Ukk3YUIlMkJZJTJGUnllQmtSdEg2c2ZXcmFkQWE0dnQxTWVEU0MxY1VseUFVYnkwZkR4YkdhZHlGelRiam83WEFLdGVpdVdjM1BDMDhvVmlhUjVwblVzeG9rVzBTakRFYzFGTURJOWNrUmVydVViSGVsd0Q0dlR5NWE0ZDRCeTNlcnV4VzN6YlFxeE9hRzdJU3hrVk9uOVZwRjgzSVNSSW0lMkJkYzVmSUhlMEZlSEVsbktxeDlVdGw3anRKTDRQb2ptRmhPVFBOVllSdW9VZ3NjWm1GZ2dqNzJtNGZENFhKTiUyRk95ZUhIcXNnMUhVQyUyQmJZUEg0MG9vQzhRand4WXJFQnU3MVBPdG9ldUx1TkslMkZSUlZpcFNpSllOTHlUTHl0bFFReWx6RnpEUWlPZHhhWG5sJTJCemFGUEM1bk54RnglMkZtcFd5UkNiWkdCd2lRY01GekFXV0d5ZzFpaHI4VkdhOTF3aDFSdnJxMWNKdXg1d0xKTTR1V1ZFZDV3QXhHd25zV253eWJ1JTJCdDZTajE2REZhMWU5VHhIN255MUZSM1d3V28yQlFubWhpayUyQjNONmw1OHRrbHhhTlJpT0JiMGVrV1hsWlgxMUxZZ3ZiZm14NHFzdEdEVU0zblVoVlVjMmElMkJlWk15d1dKdlZzYkVYVEZpeG5CamxIUjR2RTVsMiUyRlRoaVhJM2ZiR1haZmxYYnl5NTNPY3k3T2dQSWZCOHdXNEpyUkl2ZnNlVVM1OW4xM3NqcnVEZXBYVzBHbzZpenJQSUR1NU5xV0JoR3FYc2NNZEh1ZVQyZERGNG1MaVljV051Vmd0R1ZWQ3A5RkhFSWElMkJmSzlWT1VLa1k2WkdOTnk3bXhUUXpmM2k3TkcyVEx2MzB0RVg3Wkxvb01lRlJTUE1uaSUyQnBYNG80NUVGZGhWbWJGQXlMdGxzMm9ySiUyQkRNdFlTNXliYUtlRUxJTUgxV1VxZWglMkJabjQybjdSMHN6eDBRaFVhSnY5UEtFdk81Rlo1d2ZUQmpkQUlobyUyQkdXd2JZN0J6MnclMkJqSlRuYmg1SVV5djhDc2V3ek9pWEw0V2g3S283MDNWNjlUdEJPUWx1ZUtPSjJqQldmOVUxVSUyRlNnNFAxWTBBbDZoTHVMQzhhVDlaOTBtbExiM25mWnF0OXM0ODVMdWo0bGtHaXhDJTJGSlFEZiUyQlBFbXpINTU1ajBVTGtFZ0x5WWl1ZTgzSSUyRmNsMXNUVXlPQ3doTDBTRmxFNWw0MXI4T0laUVBWdzBscVBuSlFDcEZ5eHEzSnpnVU5iNXdDTG9oVWJLTzdmSUQ2MHBlQ3FPT0NhUFptOExJeFhQdVBXNGVjWDU2Tk9wNGxEYzBFaVFkYWtEYWhMJTJCOVlsSU96YU1EMUpFJTJGTXh2WmUyUlpGaVRQbUJadzFaa2ZpR3VWYmtSUk13VmNUZ0N4Sms4bTRnenVQY0xFeTJFa3IyYlFWbnpqa1F1Q3pmUlolMkZTRjN5VzRxeHJlRHlha2M2bCUyQkpDdTBYTFZWaEpwMFNYNnM3U2lVUiUyRiUyRnB2R0RLdzVsZVlCbk0wRHVPNllqTXM2WFVZVHVjNjJ1ZFMyUk9YQ3dmNFZiZVBwaGFOWFJWUm1KVVNIclhDVldQeVlTbDdKakpKZlQlMkJ3cndUSGclMkJuWExsNGo3ZTNQdHpYcyUyQlk4Ukh3VVp6eWNOb0tKYkk5ZjM2ZFJSdDVWSEpCNndvcnk4T0pUSkJCRUxEaTBBRDRQUDNYc0twMXFhMlpkWmhEQ2hhMXVPbDFpR0pQeE0lMkIweGl0WSUyQlh6RTVxZnBlME9zbEQ5MEYxUllrSzJ2bktpWGxvbVB5ZzlLbnJEaEl5JTJGRDZLejJkYXg5Vnh6MVFxcnZHTzdoMnBLNEN3NTV6SzgyblVQTUt5UFV3MWt6cUZTRXAlMkIyMW5RWDRXUVFwQVBhdXNoZERjS0I0N2hOaTdjaiUyQkJiNWdnWFlxMDFVMWFJN240SUc5NXR1NnRTY0tJU0Y1cHk1SlNqMDBEQ3Z1U0pSNGZwenJ1OENjU1l5TmIybExQa2RWYkRMZTdwMG16UG10Q3FMeVVYSm5qamdvRlJqbGJJTzFoSHclMkJjMnllVWh6VjF3RXRVVmRkeGZCeWp5RmJaSmNFWEdnTXk4VFIxcmxWM2NhMHpxNDlvaWlVd1I2bXZYT1REdWJudVRsZVB5U3JVOSUyQkZ0Z1NaZkFYb2R0SHNjQjhPcE1xUWp3bFJ4TiUyQkMyYmQzTiUyRmxPWDdnNXp1VkhqZXE2UkdtRXFkZGpIZkV1Vmtib2pJcm1kc2JXQk5jSWVDNDh3cTltZW15bHpveDRlJTJCZkN6bWlkRWVucWViOWVkRmlhd0JON0dTOGl0RVBMa0lkdk5ienBxN3BPR1pFbWF4dEElMkJhSVlzTXhKTk9pQ05JcGluYXBzbVNJa1dsQnNTY3ZiclclMkJjaUUyWmZkRCUyRmE3aGxIVE5obXZnMWZMOE9lMGVQOXBiJTJCQWVDJTJGdlE5WXN3d0JYaFdWJTJCSjB4a296dFR6Wnk5OXl3czM3dUVCYktVeEphZXUxdEtTN05odThIa2RGTVFYa1c0YWVpUWpoOFhraHZaN3djWHhIeVQ3NXQlMkJwbThucktITDlXb1pKdmdNNHJuZzBsNUZRODFtQTNvWU12N0FrZTNaOWZldXVhJTJGUHUlMkJSUWJJTEhsbDJkMTlyRGg2NExQbUkwJTJCTyUyRlZEbzNXVk1MOFFjZGdxS0N4czNjUEVWbWw0WW1PJTJCNVJFc21tM040MTBrJTJGemk2UUxSWGQ3JTJGdjVuWEdSTEolMkZyeWhWbXJHVXhGRWIlMkJQYVVPTVk0blowMXZEYWtKeExkMUtTNzRFRTczbkJxeHQxd3NpeTZrcUJGZzhhbHdPTkhJMW9Rbk5CMlNDbnlEM1gxbnkyVnVwSUkxRnpmRzhCT01lWlZrZ3oxdFpybDZPWHRlRVFyJTJGcmlGNnYyOWd0a0ZvaXBQTlB2djQzUzl4U0RxVWVFcTVZN3VZUGJUM1NLNyUyRnZIS2hYJTJCVUZ0Ymt5cE5EUE11d0pUSEFZT2tyc0k5ZiUyRld5dlA4dVIyeG1aUlI2ejJJSlVUUHhYTjVXSTNRNDRkSGV1SEZLVHh6TXVaTTFlYmhud0Q3WklzN2kwTkNiS1hLYTIlMkJxN3Nrc1JTek9kdFQyVDluYkRkbWFXWHBFYnUlMkZuVDRraGR0cXNacmRtYmxYVFI2SThwQldXcEZOTURsbEpjbHdWOEl4Wk5EZENnYlpEc0lJNldPODcxNGVqY2tYdThEN1A0Zko4dktnZyUyRko1amRWd0NsQ2J3aWhLWHRjaGFXbDZMTXQ3a05HT2JXVCUyQlRFWGpmcXZwejYyNVhHOGVNNFFwQ0NUMHNBeU9VVExaSTAxUlZaak1ZcUNFeGVqalI0ZEY0c0JXbnZmQ0FHMG9DSjNyeGhWQTV5dEVscHk0YlIlMkZKbWlreVpubUR0VUQ2c0pjS3VNYldhQVIzSFFMcnptblM4aVZDZlhQWjRtV3VWTjFLOTBkJTJCN1BGY0ptaE5hWXIxaTNJb2FWYnZyRzR2dHdkYTZLWGhmaHl6cWVjQ3VadWsxMFFKNXAwZ3FQSnAlMkZ1MGt3cUNpbUwlMkJXdWY0M3JMMjZxOUc2JTJCR3NXaWlzbGxDUjdVd0FYeERSMjRWdlJFa2VjSE1pJTJGV1F0ODJHVzlIOUVValJCVGU4ZmJsVUhidEhDN1hjUnlETjdmbnlJb3AxcnF4RVZSTktuREUyMTlZR1A0YzI0TXJVSzI3NCUyQnI1VExHcDRBSGNSVzhKeEJIbnY3OEF0bGtWRU1UcWVGNiUyRlkwbTJBYkIlMkJUajVtTVhCd0Q3Q3JlY1I2bk5Cd05DWURDZHhDSklMNFduSzNNczglMkJXdldNNCUyQnhGMTVhVGZsUDElMkJOREQ3dU1BakM2VWtQT1ZuNjc5ODZqeDIxMjZ2VU1mRlEyaThXcjFwQ2pvJTJCSlRTS01KJTJCQ09kMFY1Z3N1Z2ttMU1KaTFPYzVva0dqYjdRNzlnRE5KSHJXaTRaSFhlWk0yNWxDaW1IOTd0dldkeFN2dlFxJTJGNGlmQzlLcWxVQVdHRnlSQ1p3Q1NteFpnOWtBd3hxdjJ3TVgwQkt5WVJPdDdodGhQYlh5U3A3QTV5M0hFY0o4UGl1UTZ2TlVYY0cwV2JIdXZWWHRpSE84M00wM3k5OHZzVnRaNHZiQWp4TG1MQzl4OHp3bzJPSmpVVlA0THpVbzVWZ1R3S2psZVZZQnhkSjRRSmVQOEFRYmUyUnV3T3pTQkdiJTJGblVQOWZ4M0Q3YVlObDFjMnBVeUpxdW5mb0V2czdZWkFycFdBY1VodHJuazVrYkljejZIV2ZCQzBxJTJGekVaJTJGdWRmMjBua2pka3VHaWxVdkFUWmFGM3F5eUZMcCUyQlF1SXZmS090eHAlMkJvUVphSSUyRldjc0xpcmR3SlpZcjQlMkZEdGNTQk51eWNweHZKb3NDYWZib3pDUnFjMmxsOG5yNnFzJTJCVHUzakxJbDB1NktkTmVIWjE3OG1YTkFFM1RDQkJGQjJrd3FDaXhDU1ZpbkRCY3BIb29nZ1NZNGQlMkZvcGpBVW1JR01zNXpac0clMkZYQUhadVFhRUdiY29RbDZ2S0VYR3pONFNSUHM0bjNUcmF0bWVnSHMzcTNLZDVISHhDNlJSeGlUY0c4TW5uMk1vV004T3U5NFglMkZVdzlUb2FZS1VDaGJ0QjBXSndtbXFyQ3YlMkZBQUdNN0RlTWczVzJnejduS1NzdiUyRkQzbmVzUFc0c1diNFN2Rm5DRUk0Z3ZOJTJGQmUyOEk0T2tIeVZLcnJ6UXp1MW5Nb2xWZnlaVDRFOGpJakloeklzTm9mbWdUYnc0dEhwd0VKRDRoRGtpT1B2WFRmNnZESUwwcWpuJTJCUlJjZUM1aEtzWVBXJTJGcGxSRUJRY2UyM0lRbTI3M0hIYXZJTm5SeHlYWXBoVlhMcCUyRiUyQiUyRmQzNW4lMkI5dXFxMklIb09jWllYQ2tJJTJGd1ZCRiUyRnFGQ2RHdzZGR0p6aWdkc3VFZFZHUFJJbGRkU2hPdyUyQnU2cXY1VEttbnJ4OHhOOXAwWkkxYjN1dUN0eXlRMVdCZFpnWllrQzB6UnFGNEF4cjk4SjUxUmUxSlVKJTJGTWJwZFI3JTJGTktTSkVxZGJmZjVmS21YTWlMWjElMkI3OExYU0Z3YmtIYnoxV3olMkJrZDJldmxJcDFaWmEyR0Y2N0xLRUphaiUyRmVZcmxzM1NhcTFGdUlmdmxSRmdOVEVXQjElMkJUdmMxSVl2dTJaRGJTUTdYS001and6RUJZVFhGMmRhVG1JNTVtZ2U2NjglMkZJQ043UUVUbkwxN2p3dHFqZW9vc1N3UEwxc0d6U3hoNXllVUVNV1NUaEhSWTJPWDNGNGtNT2IwT0xLR21INTdPbHQlMkY4VkY1eVNGUm93REhxRHFLVSUyQjY2TEslMkZSWHhKT2JXRXpmaiUyQk1PdDlwaGFNTkRsRVo1U1p6TU1pRXpNS2daSTBUSGZuJTJGQlVaMkx2M3hSRkpMUjMwb3ZTY01rNSUyRmthZHExVDdrdDBwalpmS2lib0QwTlBjaFglMkJpWXpLek1BYWxpb2ZOd0Y2Y3d0aVc5YTUlMkIzTE5GMU1xVE5Ja3JXQkNhdlg1eFM3TFZtQ095JTJCSmk1RmoydjhFR1lEc1Fpbjd0aHVEJTJCeEdSRjBNZGw2MGNXTiUyQnVPTWYlMkYlMkIySFh6TjRsaHFtRVk1ZEhCdVY2YktmdkJQdXV6UGhPc2o5U1JUYThrNkJjczdXV0tRWFdZcWd3RzU2U0tEYVVKaEpmV3Q4QkJ1U0UxTUVicG5DNDlSaVhSNnRMaGd2WFpDMWh1enJwOGNWJTJGT3V4NGpqblFkakwlMkJWU2RNY0Z3NWZ0UlJtNFFCMmdnY3hYekV1WDRMOElwa3EzUU8lMkJnb25Ocmt3c3olMkZOOW5pUm5ROTZ2R0tFTWF2UEo2QXU0YVk4OVloJTJCYjhGY0RGbHJrQTZyUWlVZ2wlMkIzcTdWc1g5WXIxYzJUQ29GWlpoWVRCd0pybkRrcnlvb1Y3dG1yRmtiM0o2dUNtMGdDV1luVEpEd243d1RROTk2b2MyN21oc1Z0bVMlMkI0N2ExV2dBTHFVMFR3eFBkeFRtRDdEU0dzJTJCU1d5enVxc0tjeGl3WFYyJTJGbGZKS1FGS3lHdzVlSTh3SmF4MDhhYWt3b1ZhUmg1SVR6Qmhrak9QRjJYbjB1U1JjS0MxeFd0WGJ5WlQlMkZxbE5RRlFWWmh5MlVCM0R5UWUyVG1HekRkM2VaUTNRYUU2M3BiN0k3eUVDUmd6SUtCRW9IM0k1OTBWZU03JTJCenJlQWglMkZza3BoNnJ4SWRmUGVCaUo3RG1DJTJGTTZjM3U2THdlT1hpS1Q3cHBVU1FXVE1DUklBdWxYZHJKS3V2dFNTS1lNbWZBWU9vZ2clMkZoWjliU1YyYmpvVGZDaW9Femd5bDk4JTJGUXF3UGhwRFVsaUhzVm5UQ0ZpeGIxS0lGcFNKTGxhUXIzSHZTcGhRbElsNmhScXdOMTN4VXkyWU9DUFFZaSUyRnJuRFJQZW56SGkwbGViUldlNkVQNXhrT1doQml5S29zZktJeXpkJTJGTzdYem1UczVMU0gwTnc1WVdRaXJtUzFaNlFhWCUyRk1wMiUyRmwlMkI4Q2h3dUlXYUt0OU4lMkJJR2JxMEFwWk5XMjlPdDM5UmZ0YTRXd1pnR2ZjdFF2djl3NWlocUJyayUyQmgyOXFzZjBCRmZBWEMyVmc0dUxUWEZwcGR0d2NUaUM1TVk4cjMzWjh1N3o2QlRveHhmVkFLSllZOSUyRmoycUxQbnFYTEhjSW03SndvUHhRJTJGTFV4NnFsTGNSS29kJTJCUXN4QmR3ekNxU3Q5OVdHSXg2NldERGtscDEyaGgyOWphZ1lQUWZ4NXRmWFk4MXcyWUdjekdWOGF1MXlBJTJGMks3ckI1ZGprSE5vanQ3MnFkVHZGck96RjM1bHZmRGxWRVlpaCUyQiUyRk5PZE9DUmRkNjBKUTVVczdFWmphUWhKRWZUaVB0WWljQnp1WTg4dVVUU2hTZUxSSFJXUTlPaUp2dUh3VjZ4S2hjNXpLQ0dTRmo4ZFRlMHBRcTFMU3F0R1hvSTclMkJZWjI5MFpKbzhzSVFkZHdFMWhYbGMxZmdPb216T1pkQTZFZHhqU1FrJTJCd005M01RYzBJbjZvc2pTVDVpVms4aE9USTNyQ25Kck5kYzVKVnNKZllKYlRDNURIQWpDJTJGYTZQWGRVZDRUWTJuJTJGNTRRaUlqV2U3aVI0andLTFk2M1h6VXBiNFJrWDBkNlU3NUtDJTJGMFdVcE9ocFM1QXcxUlVSV0VlNnNMcHNMTW1UNGs2djNzUnA0anRPUFNPdENEMlRVeElnJTJCNFVZV2xUN0JDb2NmQUtJRUc0RHdYeTdKZ1ZBSHFLVWlHJTJCVktRNW5pVmJab0dFYnluRTclMkZJSUd1c0RUNTJId3ZFb2QyUVcxVjU0SXVkNFlwbnV5aU90bmhvT2x6aTNsdW1id3doYnJCaHFKSkxHTmVzQktIMnpXb29FdmxyR3pQdEpHS1QzYm5qb2Q3UUhqeSUyRjN6Nk1hOWZXNm5jOG9WYzFweHNnOTkzblFvJTJGM2xYckttdlVxRlBNVDRQQkExU2NxJTJCVTdHQmdhWEN5SVM2MklEZzdTYU8zWGxsQm9kZ3ZqNDZEaTZSdFJKSURaQnlHdWdJcE54ZVJXQlB1NHdTZmZwS01lRXo4NU50blRVZ2s1VmJtNEVFYlh3dGI0Mm05WndWQjlQSE1HQ2pEa3pZdm9GOG1zYjFoRWtWOU85UlJEMXdqUlZwTlVTUU9JejJlSE4lMkJud1hWSDNnbFhab242ckFhYmtTM1Eya2o3RjlCZGF0TlJPdTg4aHA0TWE1YTVvSWNnNE5HRFVpSGUlMkZYTHBYeWJXcGw5Q3Q1anhIcnNYNEV0OHhFTWZnajlTVnhDQ3BYeW04S3dGdWdTS3klMkJBYnpSNmN6MzNqanVHZHFpYjNTTncwQ29VNXllVTdFQmZtemdueTFlNHNIWSUyRlhlemlkc0xkMUIzaGZlYWE0SU56ZlFzU0p2R1V0YlVRamJTWlRNeEpyVVRSTkRjdXJXQncyVll4WG9KTG55clV5QUszclV1Vkx6M1h6TXFHRngzSGcyajZNbHA0aXclMkZmTjliRWlNNEVJaURYbmZlcE1TOG11SUc1S3NGSURZQmZwT3FqMjRnU1BocDFQSmQ0cCUyQlZrN2dWbkslMkZGbjlTSXlHemdNcDE4b1JJQ1NkNVRoeHYyaVFzUGxScFdtSkVpTGpVeXY3RjlpaVhyTHczMzdvcGNFb3lZamo3MGNsalh1aVJpVm9DTDFjZEdDWUFQSXZXV0RidEFMdmxVRHQzJTJGaHRtSzVBUkN1TjUzZnh0UjQ1OWRDR2ZuVHUlMkZvQWMwTEZrVnBRYnhnWWJJcnh2Skt0R1h3WUV3VyUyRmRpb01oQjJHTE5PT2tVakxDRUl3bG5KbWxUQkhvR2pWNE8lMkZlUmpQZEFDRnMzdnc5ODN6Y2V3N1dOWTB1eDFwejhtWE5pRTJvR2ZmUiUyQkZvWXpuTSUyRjY3alpDSHhvSndVdGJaREJ3SyUyQnA5OXRDUXIlMkJzbFkwbVV5Zm1YdjFsYk5SUTh4a1l2V1Uya2RHOW1xeTdITGk2QkJXeEhVS0l4M056akZEVFhzdE55M3klMkI0WDZ5VXRGUmF0NENDdXhXcmVBOHdFS1NIdWMzU01Ob3pDZDVLNEhrbk5pajd1bnR6S1NDREc0aDFxVWwwMHNBd05wJTJGQTc1RWRtOUVDanlaSGk5TjFpTWw4TVFuRGlyUThGYmolMkJ0MUxxUmFSUlNoTiUyQjlqcCUyQkJNZjNSWnhzOWMyWUQlMkZVQUlEZElmZlVKSXVjbWpxODJHS0RmUWl3MVFYUTQ3ZXJqJTJGWGNLektqaSUyRjBEUW5DTzVkVVlJckNGQjB2cUduYnIlMkJybUlCY1Y5JTJCMTN2TmU5Z3V5dkxlYzlIVHNJQnV6elVwUEJRT1NMS2hUaVZrWW9sZzlYZFNGRHhYUVlwV2IxdmhScjV5c0R1RHZZanZXRmVrUzk3UlNVM2lQaSUyQlN2MnVsakN3NyUyRmQ2JTJCRXVRM3BLWHA0VVBvY0xiZERGTldjalVwWWxqMnVwZ0FOOUNQV1pOeGl0OVpwNFp4JTJCSDZqNU9CVEpPUHhVdm9CdWVuem0zJTJCOGtIU1BuMW1KcDVQZW96ZkdYSXlrWUZ6bHNGJTJCU3R4b051SnF6WG1ReE9lQm04UHJnRkhCTU9tRzRIVTVrRUFsZE9aaUhkZHNmQSUyQkQxOGo0OTA5UGpGRVlwczZ5bzE3c0ViMTNSQVZjWVI4TTE3NHV4S2Fwb1JodXNJS1NSbVB4eEpzYUE4ZjFLZFRpeXl3VmxLNjJZTXkzcVNBZ1ZCZ29PcUxGYUclMkY1biUyQkNkQXdIaUtGa2k3ankyYWYyb1FSSWxjNTZxNTFVaEE0Q2NiY2l3dTRqSWs4bWtQbFI5dlEyV1V2cGVaUyUyRm1NZGolMkJaWDM2bXJBVEc3bGtqVkF5ZDg0eEp6bzVLSXhTbFI1N3BXaFNja0ppOUJ3bllPQWlGJTJGeGg0ZCUyRlpuZVFOa2pKOEVNcUwwUmdPTkF4MkJIbUlDZCUyQkZVeEhleEF5Q0VYR3BEV2xIVExFQXpjVjU3dVJiem1CVHZwbVEzVk1ZQldTY0sxMUNMTjIwSTRJdEFjN09BdzFPUU1xYlUwb2Q4dlVteG1aRG9zd3I5cXZaRUZhTDNMTDFZWklCSHZBWlR6VFM5T0J3STYxUiUyRjBOT2JCalp6R2JGTWlIUWhhQTFpVElrV1hxUFVsd0JzJTJCQlAlMkZVZkkxdjlndWlzYU9EVmJMa2padUZRVFNsN0tVdXM4YWhKR0pCRDlBMjdHcW81SWJpSmJOSk9QRE9waXg1S21BZSUyRlpvV2IyRGdmbnZ4RU93aW10Qkp3Rmxta3d5c2U5dHBCMXVLZVolMkZjV3lTS3BGdUwwT1lSWGhWd28lMkZpampHTER4YTZBMU9LR1c3JTJCTXY5U0toaUhqJTJGdXFtVUNjMzcxRHQxbHRETiUyRndUY2FqQXlSVFRTa05SNnZOUHVyeklsN3FFUk5uaXVBZFcxblhDbU1WckhCcDZxV2JmbHBjN3ZpWXJteTR6Zkt5NDBoQ2xtWEI3QUZNQ3ZMa2hVR2tzRklZR2VYdGQ3YWlzeXpBejVic2hEdTRQbmwlMkZzZDRRekpMeEpSbFVCaTRyVSUyRlNJRk5rZ1ZOUmx1OVA4cVpjcHJ5ZUZ6VyUyQlV4Uk9LbnR1OURyaFRMVU1pbjNTQVRHNDVmJTJCWFpHWSUyRkpJYTRFU0ZwbmF0UnJZb1JOZkVCZ3QycnpPRFklMkZ3aUY2NzlMbVFNJTJCckglMkJiS0ZRZiUyQnJwJTJCUjRCMmNEMlJhYWs5MFY2cW11WFByOUJhaTA3QzNibTJCbHd3NXo1Y3hnc3ZxRTUlMkZEckk2MnUlMkZiZVJDWkkyVmo3ZXE0enZzRjI1bGRIblhRNVllRkNQUmMlMkJwYTNsRlUlMkYlMkZwYXpPMndmY20zY1dqeHF3Z3RLbXdiWnFnZWUlMkZwOHFSS2lhS2ZDYXZhd2VNJTJCMlFKM3EzWnZYQk1JMnh6TFBsTkxxcjNmQWFCbXhTYUNXbnJWZFN6eldQWUp5OUlLMWRid1FmaTlmeFM3c2hHTVRJUlIwMzQxdTBSOEN3eCUyQm12N3p1M3ZQeGFFbXpiRVNQSTk2NzQ4VG84dzROU1RPcEI0bUxDNHlQbW15aDZxNDlGdUY1NVVTTVpZTW0zd000QjU4WktxRWc3UXJTZWFtSlA0TGNldjF1RmxPZXE5ekljTkpxZXJFSkdBV3oxJTJCalZpaHZkeDdnbjZNTE80Wnc4SG8wSGJpVFpuJTJCdWJsMSUyRkkyNFUxc01KRlVSdng1WEtGZmxqUVExS1JSZXNROUlpRmRyNFpSRSUyRjR6NGRPMWRRVjRiTUUwV3lqVE5aQVFVWGQlMkIyNFFIUkpoeEhvNmY2MEh3WDYwMnlPV2JzbmJWRnNtejQzT3Q1YnV2c1lxUjQ0T0tTMkIyMVc2ZTglMkJKMTFKcm9JMW1BeTBQaGxOcXdOeUU4RFhTVktEdWxxVmFWSjBJVGwwZG9TMVo1d0EzMWFGOHBtMmJjWFZnUVNhTUl4b0pzN1doeUdGWEd4SFFVdm9BYSUyRlkyOWtDazUlMkJjQm1KRUNXSE1nWE5FTVlRWk1OTHElMkZmSTZiU3ZxR3BSaE5sNmhNQXBCZTlxaXE3Z25ZR0tLbFBOYVAlMkIlMkJpMHFwQXA0Y0w4JTJCMFh1blpiZlNtJTJGR0FnTDhQcGZrYXlKYVdPR3d5MjVldVdlaXIzN0ZGMk9OemRQeEFEWmc2eWFrOUYlMkZHUWd0ZGhValZPdHpCbXBJWDhsYTd6Z0tqcVhId0xmWHNRNUFobWJvNHAySyUyQm0wWHNJdWRYcHVUSFFtODBNbkM3SDdwR2QwJTJGQXJYT2U1aDNZbmVUbU9zWEpIWUxTc3QxSmVXQyUyRlZwUTJBY3o5M0JQaExBUjA1bk4zJTJCQmUyUFh1TGZlMklVUWg3OVNrNTBBODEwNTZ6ZVk4bDJIdTFJNmpaV00wZXhTenNlYWEyNCUyRjZ0WXMxQlpoOXRLVGhxd3M4c1pKWFoxREVmZUp4WVhnJTJCY2VvNjU5YUF0JTJCWURIbzFCN254ekE3Q3VCVUU5V0lSODhNc05ySUVpN0QlMkIlMkZ0U0l5eElCbm9DODV2eVp1cTZoMVlWZmNoWiUyQnJ6SU1obzVuWG5YVzFJMU1pSU45QnZWOWwlMkJ1UWQ0bjlNeldnT1FkbmRZR2d1dnZLRSUyRlV2ZlFjTWxIVXo5Z0p5Vk1RUXVnNzBCTWlQUEZPbjZCVnc0JTJCeWl2Z3BYVEJ5QXFqY3FWNVNFUCUyRndNUDB2MkN0VW8lMkJ5bVJYZjRUdERLWmtSSyUyQjFEc1l3RjdUVlpaSmtjOE5ZZ2QzJTJCUE8lMkZsWUVyMkpiVUs3a2xtZ0hoNGNjWE1zbmxHSUtqZW94MkR4TE9ZOW45R2M2ZEVXY1ZjczAlMkJMcXRaaEFxVlExekNsbzhXM3c3TEpFSGwwSW9TZVhadWlLVk5GNUNVM3RqVXNCQmlZR3htTWhaMW5oTjFqZEFqaDhLOGhvblZmb3d6WDQlMkYwVjUwbEVrZm84NjRhaGJPeXJJUWhYY0VDYkJmdG15T3A5Ym9GRndLczMxSVFwVkJBOTNUeXpmZHpSRWlBZXU5MVd5U01FZG94WUY1NXhBNmdiT3NFV1RQcXV3OFpsJTJCa3NkWFclMkZDR3ZtODRVUnI2ZGNSc1I3MW5YTnZaJTJGWGJ0cjRwTjZaNGRiNDlvZE5RREVSQ0E4NXh4S0xtOEhTUmpHWlhCN0NpdjR0QmY0V3FsS28lMkYlMkZPd3JUN2xwN1ZFRDdCNFRDNUJ5SnFhWVRtSWFSOG1VTXVKdEpmSExNSCUyQlJlekVPa0V6Uk5FJTJGbndiSm1TYTB0bE9hcGtqTE8xNXVTTlBFdUJZRlVlYmh5UGlyeHkwOVlwSzZiQlR6Zml5eDNFYXdsa2tXWVFKZUNBZlJFZEI3UlRVZ3BqbWFiVFBaaEdYa3VUaHp4VFhqMm9PMUMxT2NubE9MWWxSdDZkdHE2MnpiMnhxZExOanVCZm01YWlZZ2JjcTJjYmJsQUJmZVFtNGpNaDVzeHAlMkI3MjhtZU9HMlpqTlFBJTJCeE5jbGdBZzdYMm1jRUxpMHU5czFMQnJPdEg1ZGl5Q1ZlMzZNYVg1UVN0SkZ4ZHE5VCUyQkFpR3docHpYWGF1YSUyRnhTUUZ3RVZpbzV6Zk85JTJCZlRPZzhPUGRadVNyOWxWS3dkeEg5U3olMkJzZkVQYmdpM3YyeGVMNU0lMkJCVVJnZml4NjBqYVRSSDdjWXlQRExteW9lJTJCU29uWDNkQVV4SFpSN0pmSWVwMm1jN1BFNTdqalhPZzZTaUNuNk5sMW01MXpPQ0lSd1djNVV5S2h0dWNPRTJCZW1OZWQ2a2M3c3NGeVFnM21laEclMkJwVEYySm4zOU4wdkhydHo0SDFvZFRUZnJWbWklMkJtb3JRb1NXNFgxYTdkYUk0QXBuMUJZWG56QTJMSlJ6a3dhS0ZrNEZaQUpQUURkajRqUmROVm5iZGxGOUM1TnBObW16N0VHZVFXJTJCOFp4VlVHMm44a0c0bm1mdjdxZ2x4Q0lINXRDV25BVDFaVFBlaURXWWthbVpvR3p2QnZwVFBlQXEyMEdVQlgybCUyQmY2JTJCblZ0eCUyQmNWJTJCZ2JsN2x0TTFTeTVIdiUyQmRRJTJGdmhjdEJYM0xBMjZ2c0x3d3JlNSUyRkxaTjBzbUUwZTRmJTJGZEJMMVBRZDcwJTJCSkxIcGNNdGtuOU5ZQ2E5QiUyQkR2YnR1UXBMbFVMQ3JZSHc4UE03aFhVZSUyQkI5azc5VGVLV0taWFRTNTdCWmRxMFg5enlqa3FmYTR2N3VabDYlMkIlMkJpJTJCZkpnbSUyRnFKZmR3U1QxMEFpRUxueXJWVGd6YU5iMTlSJTJGSnZXTm5QcCUyQk5oMWVyRWpYTFRJVWtMYzA1UHNlNTVLTFMlMkZPOU1aVlllR1ZSRXlybmVQbFhxMjV3b1ByNzN1JTJGekhlNG5ndlQ2NEtlT3N6eUxaRUhrd1RYdEJkeExTdzIyVDdHJTJCYWpIJTJCVVhmUFNqOG4lMkJSeiUyRjlFcnkxQ2o0RWl5UzM3S2pVbk10Zjk4YmJDS0R6JTJGZWl0b2pzdCUyRm1IM2IlMkJnSyUyRnpFejZ5dWtqOVFBME1QYW90ZExGVVFwa1dTRGljZHlDMmhZaE93JTJGZTl4TElNb3UlMkJRSW56JTJCT0xtN0pmVG1DU0pNVFExenJ6SHluYWIlMkJvMVBpYzE4Zm9TRlRLZmxTdlI4RiUyRnUlMkJGOTk4VmttZlV3UmFzYkRjcFF6RVdXUDNnM0ZYMmxaQmYzd2JnQWRuclA4YXgwaThSWHdmUVJSZzFzS3RSSUM1dnVmJTJGZm8lMkZJTHU4R09XVSUyQm9zcCUyRjZJS3klMkJOJTJGRENVU2xJZTMzTCUyRk9NQlRkdGFhY1J3YURoNSUyQiUyRnM4d3JjTHZSSUslMkZhNCUyQiUyQlgwZnltNlFuR3dBODM5UG9ZQUthaEFJYW0xUFBYRGRHR0NaTG1XYmI1WjZ2JTJCV21DMGMzc250cm1WWDA2NDZCeFZKM2klMkJsZndQZDklMkYlMkJXdDNpT0RlMXlXZWwlMkYzTnFnckNBZnY1d2QzN2gxNGRyY2YzNUhJbFB1ckw0OEY5eER2JTJGdmk1YzBBZVh1VENuOGM4clZDT1Nnc1U3OHEwVGx1dzVjdXU2JTJGcUFMZ3Q5a01KeVNKNCUyRnU5TGtZUnlaejhqNno2R1dUaWR5Nkl4dW5RVlA1MzlzayUyQmcxc1hXNnJnNUFnV0dPU3FMREJDNG4ycmMwejR6MEVIUHoxRlNreGdjRllrMDEzNlVUWGpKVWxEeGVTQnZZTDdsUVlqdk85cERQODVDNkY3aEVBdVlSdThxclQ1Y005eWlhWEFqejNFS0xwcUMlMkJsUmZpNzZ6NWxmMVo4cEJ5Vzd2RnRqN0Y1T2RLREhCUkxEaEclMkZYc2RVWCUyQnM4cFlBM21nR2tWdXlsakY0cyUyRjlvTTE2NlI4JTJGYjElMkZPaWVDZVJBYUhFeGZMQmc3eUJxVCUyRnRmWktQTUM0S0Y0Mm1mVTEzJTJGTUtwQk1ZTnZnajhwUHZmem5Cc2toeVdVMWtvZHNPZ3ltJTJGR2VsQU1HJTJCbVowSTFPNDNhcWx5MTlwNTRUbzE1TkhPeWY5OUNtU1JBWHJEJTJCODB2V0phc2YyekJzQ0NpYmhoQk1rRjFVRnlqJTJCTThaR0IlMkZ1RGF5b1lMbnBldVhtNWR1UERxTyUyRnlNQnhJRmZCS3YlMkJZbGNhWElwaVZBSzdDVFJuNTk2d0V5UUFkbSUyRklSYzVuJTJGZlRKREF5WXppT2pVOHYlMkJhRklMeG9NTGpNRnBiTHY5ZHhaSDhwa0RBZklQOWV3cUVTU2xmRmlYdXptWVo4OThUSnpTd3BoJTJCWXR6NyUyRm1qa0JicHN4RWFNdGJHYiUyQjl3a1g0cDhKRjVyaFBPTDgxJTJGU1JnZUhUSmMlMkZrbDhYOGU1NEclMkJwdW5BZkw2JTJGejFQWSUyRnB6ZGlTdSUyRlQ5Tzd3akI5QTZVJTJGcFRoUDVmQU15bW9YOEd4eG5MJTJGUFMxRSUyQm50YVNKaUxmOXVpdjMyQ0RueENTcGp5dnlleVlDeVlUWElVRUtmOTAlMkY0OHYlMkI5bmRXZ3c2d0x6ZjVpREF1Ujhia1RFZlAlMkI1UFRxRE0wbVYzUGlybHY4OWMwWDhNM1BGJTJGQUgzZjllOVBNZUVBUXRZSjJqcHE2YjBnQyUyQndwSjJjanElMkZHeDF6cDhpSG5za1hvVTQlMkZMSUh6dXhSbXE1b3BYTDllTWxDMU1wJTJCYnFadVVTdUYxSmVWTVZ1QmRibkhsVGZrNXVWQmxwWEpNMzFYcWJBbnlPVGNsd2dMekh3c0ZCR0pEOXZsQldaNDd0dXI5NCUyQjV5JTJCWHg3QU1vN05JSU1iOGUlMkJEYkdyVkpldE9aeCUyRnZ3SHlsOGNJWVhyJTJGNDh2MzhPJTJCUVYxS1YlMkJNWUJuc09FRUZKUzFadlY4aGUyb01aeHBsOHVwbExMZHlpNFhNb1luaTQzQmR5JTJCbUR2Y1RUVndDbUxSTmNjTmVNJTJGY0hETUhIeFdRTlpSSU9rc01qWVNRWU9idzhHJTJCRkJIZGs0bFpIenh5a3REcCUyQjQ1JTJGem1udVBYZ0xvSmxsekZ1MzElMkI0WTdkSENsMDhTJTJGbGxaTlRqNWNwT2ZUZlQ0SXo4a3RwN3N1RllTMndZQ1AyejlXemU3SFNpa0JOdm5yVjltTSUyQlhIdDRqVmolMkJ5eVVpam9iYW1Zb0pUbVRaRXYzSXUlMkZOVzV5b0hjY3pvZmo2azlXbkhzYTV0M3cyZjl2cjB6VGUzajRkN3RnbXB2VjBZYXN6SzlFOWtqRm9Ma1Q0ejBiVk5NMCUyQm9BeUdaVDVRWVRzWSUyQjYxWTF1VDFudzByWEU5eGpUekc1UlI1S0pUdjhxVUhDMzglMkJBN1ZmS0ZFazNuQlFSUVJ4RlpwNlEyekV2VVVkOXBrNENFY0tIZmloelZ2Y3BnWjJTc0xZaDZjd1ZvdkZHJTJCRnBSSEhtOHNSc0Y3ZmxoNkxsQlBmV2hGNndwNXAwQ3o2WDRIakpiN1hsNldvVlRpNTBjalZTaFRzMkx0dXdLVklTeTElMkZBeE91JTJCMWExaHdQdjZaZFVRNnRZbEc4ZXJUZnFINFlBM0RWdVBsN3RDWUwxd0pmdWN1ViUyRmZza0hWWTA4MnI5cTBXRG90eXRnblNHYTlUUVNJSXpYJTJCbkEyZDlxR2p3RCUyRiUyRmhaU01hZHNUbUxjbzZYZVR0NGU4dFF5cDVTd1l4YkxzTVZBNHNUT215V3FoMWxZcUZpMkxiZGVuUTNXaXJWT3FZd0lCOFBBMjAlMkZqbkhUNGElMkZQRUY5OFJhYTMyZzIxUVR1QWRRQTliTGM1YVM1a2ZnOTcydnVtZ20lMkJHbjJ3a2QzNXBnSjVqZ05sdDliVlElMkIlMkJyMVdlSFF0NW9ickdTZ0thRUZ0czZXYlUwckZOQyUyQnJLJTJGOSUyRlpxTTRxTHVFVHFyOFRYa290RzR2REsyalF2VE5vN20wRXF6empoRUd0U25qJTJGTHZtcnY5Ym0wYjklMkJJaTkwMmRBcXI4TmdadjdkNzlpdnVjZ0RwREdiM2s5VmZSS3o1MDFlVWtxNWZUdXhONm1PbmxKUk01aXRsQyUyQlJ5S3lQdkdJMmc1YmdKWUxIOERaeVJRVlM5RmU4TWVSUDFBNGhHcCUyQiUyRjA4N1ZTdWtGVTE2SU5SbGxJJTJCUllPNkFXZDdrV1lPZHk5VkJyZjdGQVRvQXBraEtsRXIzM2JhTjZoMkRYZHZOa3glMkJBWEtuWElvRSUyQiUyQmQxQnIzZzFyZXplTGJkZTVrJTJGMHVyYmVOaEhaU2ElMkZtd1hhdlRVQzdEWWJsJTJCSUlvRjF5WTRsa0JZc0pDQzglMkJpR3Z6V3Q4RDByaTVqUFJuNzNYcTd3JTJGMlY5ZG5aSjdwQVFua211dHFNeXRiWUNieklOZXJRVWRvbXJkOHl4NFRaTW1wTnBMaFlzN1BkcGtQdW9tUDJLOUlqZENLM0dkRTNvTHplYlQ0bWZwaE4wWlhIek02SXRMdEVYSm9idCUyRkRJU0t1SERGM3RaQmNQMmkyYmt2RnFPVzVuRFVWU2V4UGFpdEV3MW5qQ0ZJdURma0RWYSUyQnhpa1NUOU9nWVU3bEROYXlmakMwNDdDUmdVcSUyQmdNZG1aaiUyRjBlTTN5TFFVdDFNMHdpQ3BPSm5wZWgza3I5JTJCSnlKdFZ1UXlkcmJVbGs1NjVrNURYcERUUVZRVFNDJTJCUHRzeTZTSWFIUmdnVkYlMkZXZkJwbWIzYlA5aG5teXRkVzlZSHNCRWtkaDE4ZVJFOWlBV0E4ejIyNDYlMkJyQWhIMUpSYUlSVzdnMm5RclBONUoyaGRSeHFsR0owVVlRUlJ0SmJ6QmFJb2dYM3F6VjQ1NnUzZ1ZJWnZFYjNIMXZMNkp3USUyRlJYR2hkVUlTQjE5dTFrMmQyVkFqOWhjc01TJTJCOGhMMTcxWUtyWENtTmVpNCUyQnZ3bkFGRURsNUdWYVk1ekVoeWNhUXJMNjFURiUyRkhzME9sME00TXpLSVJMR3RLMGZlb3cyVmptajJmbDk5NVpnZiUyQlE2TnhxJTJCMTZNcnpyY2wzYlR6WjNsTmNkaFJ5MTZKRDBuOHlreDhieEhqS1lmTGNaZm0lMkZyS1E4VUNLdWJqJTJCRllYcGY2SWhGeUlWcWtzNmg0NE1ybzllajQ4aVZxSzlkeWVXbEZCUWw3YTR3RkZBY1JtMkdDTEV1ZyUyQlpkZCUyRldTMkFTRjZja2tvNyUyRlBlS0prWUVzaTNQSEdMWDRxY1NWMUM0JTJCUnZRQnl0TmZuaUNsTEElMkJBSlZKMHIyWW94eDVxd3NxUVB2S2xFeW1xc1puY0xYWXkyVkJ1UHZFVXJwTiUyRmZteU5zNXJGamJrNzRROHRNV0JCOSUyQmZFWFg3andGaSUyRjQzUHBvOVI3aGZnYkhlNUZmUDkydmpybjclMkI1dkJDbWklMkJYeFd4NFJpTHpkWU43VkNqR1c0S2RRZm9mR0xvdHRHR0NKdVRiY3k5RWdXM0JwJTJCMWFmUkZiQnZQUnJlejBWVG41RERFN2RPclUxaHQ4NFJkaEdwJTJCcDlaWksxazlQMmY3VnFnSEFHczRZOHBpMWphZDFXSzZGQTV5azlxSzMlMkJLdWMzN3hTcVh3VVgxJTJCZ3VJNFN2bjNYNDRKeDVVRHcyVkNnQjBOM0VjTm95TmElMkZXaGlIVG90TWNRaTlKbnklMkY1cWl2VktsakpNRlduZTBNcFZSaG5NeWpYZEJCUmRndjhqbUZWJTJCVHJkR1pQSHFqV1lqUGtRZGNBQWdwWnZhSm5lb2VZcG0xTjZPYmFxV3dvcjg4bGU4U2s2cE5kbmp2MUswakpzTlUxZDRlbkdtUyUyRmNoaENnemdYJTJCdXNvdU53JTJGRWpGQUJLMCUyRlB4elVNWWhQT2VNN0pkOHM5c25mRkozZWpTZyUyRmFnRHVlSkxJaE9IeWNJaEFjeHphUENoYlVlZERhNHJQY3liMkJWekkxZW8lMkJ0UW95aktxZTM1SHNQJTJCNTdlaWNnb3RPdFVmdUcxQmlVSzhBVFptVEVqcHpwMVVHeENMSkt5SWR5VW9VcmU2RTlnM3RJVmtVUkVIU0VOTTd2ZFd6Yjl6UWZWU0doNGVBREMxbVFscE9DcWcyNUtYRmNQMkNvdXRSZDIlMkI0Z0dTU1c2WlhrWWF2aEdhJTJCSE03ZTNWYlBIY25KRXV6NiUyQktRMUlCUiUyQjRCaWFhelE1S2pXMnE1Q1g5N2tDR1VkNnoxOE5jajVHVXVHRTd3RmdGUVhsUkxKd0olMkJxOUtiRzZ2T0hqdmlUMXR0QVZ2WWtlWmo4Qk9MNXJtU0pZWDNMUEdlaTJ1Q3lpR2ZBTzdzdHlPYmNzTzdtbERUaTdJaHYxYU82NzQ1NzNRbnMyOVhZSmVZc2hwcFcyOERjSHZwWXZJUU1ZZE10ZFFHTmhnUTJycUFPVmpiSkROdlVJeXI2c2dZTVpYZ0hqMnRmYmZEenc3TWdvWlc3aDVod1dlUzRGSzN5JTJGQ0dIN2RpNER0Zk00UWVtR3lLVkQ3eUF0ZjduQlVrJTJGUE1ONk0lMkJoRldZV1ZmaG10Tm5BczVXUTNWSlNBa2NDOTJlSnV4Rmt3VU00SExvcUFMS0p3aFJrWU53NnlBV1VBV3NWRUZwUiUyQkxkRFJFYXIxb1RTaWtTTnNGRDk1RkN1ZGZSOEloZVJVbU02JTJGdTdaUjZtYTN4TFVDTDM5YjZPaWdzWlYzT2hqZ3YwMWdhZGd5T0V4eVh2S0ZNT1FtSUpSUWttSFVod280S0lsa25rZEhIUXdLd3RWQjdTbEJLaGhkMGhoVTlQSW9rNFJjNGNMNEhsbVRxUWxHa0paMDhLcE1ZMVNEZFRpckFFclRWYiUyQkZjeUlydlElMkJTRGU2bklEVlpORFkybDYzVmhVSVlBOGFzdGpHWktYSU9BOGVDSFRSZGZucFhWbndKZnFFJTJGVFdEVXJXTVhSR3VQZXRZMEhqV2RXN3VybllObmlSUnQ3R2JmJTJCMG5uRUhLS3A0ZFgwampXalREenlIcm9YQmVBRjlJMk5Bd1c4dU5tUE45NHgxTzlsQ3lLSnNzVW1uJTJGdFQyNjZDTWJZbldHRHVYR1dtM21vdmNLZGxmcTYlMkJVMEtBTjdSU29aczFKcTFyelJ5Q0ltbkx0M21UV3VHT2RqMzglMkYlMkZlUjVCalE2YzZBbDZPY2JlWXN6SnZENjdlaU1kVTZVNFBaUU1CdWFYWXdjcjFlWTVzdENkZ0N1cUU2Y1hxS3NiSUElMkYzMnkyRXVNd0hySTVvU2w0NGN0bENmeHlPTTJscElnVWVoeUxtZWoxZ3pabU5HcmVQSno2dnFPeGM4TCUyRlJxUzBWbkRCJTJCcUlLVmhCODBWYkhmZUVUcU9VSnclMkZ0Q3BkdHJiU3J1SEVsJTJCcnZxQlVzb0RTWEp4dDRzOGd3azk3YUhsVCUyQmU1aW0lMkI3dXY3MXpOVU9QZlMlMkJ2ck1pNlRIZGU0Q2N3U1lGOTlVTng2ODI0cXVhM2JuTnF2SUZBZXhFTVRtWUNKaGdnZnElMkI3ckJXTkRwYnlmb1hiYUl4aXFYQUxWODNoMXBMT0pZQiUyRkdzdTRtbHFmdG5kRlY0RGpmb0lLa0NKZnoza3QlMkJLMjJZVW9oUGZMUXMxekJiWWdVb2lVYkhEWGglMkJsMGFTQjh1T1g5TlJZUkJ6YXllaXNMZE9Ja3doRTkyVjNXd3F0b0RDJTJGeGJLZ25LMWRNWSUyRmtpekslMkI5WnNTYjg5YjdCQ2lTRUszMkZoNU42eSUyQnNLTENMR2xkVDhrRU5QenhObXlFek8lMkZFMjUxVk5sdTEybjRjOTRRSGE1SDRsNEJzNkUzN2NQNkpFTmNOQ2hacDA1ZEZ0VW05NU5tZ2lPVWllNGtDcXlwVjdNTEVheTFmYVRnaFhWUDNFQVVqMVpoeVlxQmExTGg5JTJCVk5kYThDbXZGVmd1WHhISWYwJTJGcVpNdmU1Rk0xM2VIeWNRNyUyRkRGYlVybFh5JTJCOVpZdWMxQSUyRiUyQlQ2SCUyQllDWEwlMkZyVnlreVAlMkY4YU5WcHJJSDRaSjVmNUlQTiUyRmRMSW9SZER4WEZpYUJOeGxMRjhRVko0c0hrTnpjSm50b2RpdEQ4SHNjUjNPMXYybW9QZGJvVTZaMVd1MVRSTHk1ZzBqMThwMkZDZ2o5TUpMTUJ5NVpyRklwYUxqTnp3N1NhZXhJMHVxenclMkZpOFlqJTJGblJXV2Y3YjdJOVpvMFp3bDdPQ1JUeUIwRVFEUHljSzU1N2ZVMGJ2WFpFUzB4Z1VlaHN3MzMlMkZ3eHNkR2ZMeUQ4VjFNR0dGMEpnJTJCSSUyQnZxViUyRm5DbTROJTJGeHJtb2NPY2lHQ0pQdWdMUDBoakJNQ0RMbUQ4Z3BvSUYlMkZXSzZoSG0lMkJOJTJCS295ODN3eXlPVEVLZEklMkJWN2VTdVExb1JVeW4zME1vaHFPVmxnWlZJZ2dEQmp6ZDJVclY0TkZJVXVPYkc1bzlkJTJGZlUlMkI5MkFnOUJ6WFM0SG1JUGQ2MUtBQ0V6RWVNbW82N2VudkJzOGtqcTc4RVElMkZIUE1ZUFd3YWRrOUNpWjEyWTFtdnFFNXFJa2wxYmtQbmtBWmJLb05sWVJYbU5FZlFJb3psWkpTM001Qm9zZ0tqWlRPYTdzQTllemdKZm1hTFI3bjRNWnpyNHQlMkZESUsxdkFuJTJGbzclMkJRWHRqaUpaZEpaOUh5SnRRM2ZNUHo5bU1CVFdqNHJzd1ZJbE9CeVVjSUZvdnRQeERZVWxKbEk1ejQlMkZGWG9LQ3RQYXZrUzRXYkZjalFTNGQ2cEFXJTJCUnhNJTJCd1hES0RRdHhBSzJoUFJ6eThPeWdHNjg5VSUyRm5PME44YWJNTGVpTjZvSHNUaUZhV0xCcGp1VHZtJTJCaDdUR2JTRFlJVXBWWm1WMHpQNHpGd2wyJTJGTW1YdjRyTEE4N3g0QUk5WHRXZk5paGJ6aURRYTlSMXFENERLbUZqbENiQldtQ2pIeWs0RHp3RkhXOWcxZzJBdk9KbjJEcE8lMkJ0eDgwdnJQZlVaWFVqSmtDMmdZJTJGTGdRVnV4dG96MG5TQ3lGdm1KOVhXeWVVYiUyRm1nck8lMkJDUFc0eEVyRHdRWXdwSHU2c1pkYlh3c0dNOFBUdDZSSHlSRFg1dmt2Y1htRVdxUUdadTBrejRyTzBGUTJQMHhyd0c1UmJnUVZEJTJCUjNodlhVTlN6dFpNRmRzTDdKZXVCNEw2VUxSVnVWeGlCTHpCcWt4VzVQcHNyd0FLSlRMOHV4Y0tPZ3MwbE14U3ZmNFU1em5qd2oxRHJJRjA4MU50SlFHaSUyQkdockxmakR1JTJCaEZyR2RoYyUyRkJGcGsxbiUyQmIzUFJaM01uSU5JQzFaQ2s0M21JU0w0UEdqZTd4MUlXOExKVG9Ea1NZck1QbjFzbXhFbUFPRmZuSmVBb0ZEM2k3SWhmZGYwalVBMXNiWXYlMkZPdWpwSXFTTkx2WDhZT2YlMkY4aEpXJTJGdkdEbVR2R2oxRHhYNDBBbElOcTMwckxNdm9OJTJCUGg4RmpzOHRGNHUlMkZSRnJva09QQTMlMkIyTnU2a1I2emRyOTc3d0pnTTJhWHVMVFZMZndTWnVrdWcwemtXdUFIUUdXTDAlMkYxdG5GUGYlMkJINTM1JTJGMDFuR1BzbXBodGN0akt2N21WNnp2JTJGbzBQJTJGbzBQJTJGbzBQOHp2Mk5lZkVhbTdRYW5UdFdRUE5IbFE0UmtmU1NUbE1seEVuZURERVNUSGxVRUklMkYlMkJjUkJLMHN1RUtLYTlvZ0ZJeXZZTTlsNnZOeHJJQ2R5bklZZWtjaW96dTg0NWVocnZWc2RBVm9FbzI4eXd2T0Y0blppNTdNdlYwbkRVUUh0ZFdGNVdqcVA4TzlJekkxMXVTWHB2bmhBY0lwNUlnYUVMYkxPTXZHZEJRJTJCRlNNaUd0eGtES09JWmlSZ3A2UnZyaFVtcCUyQmNWM0lTb0VFejJ6czdTc3o1cHN1eVlFOWE1NFhJc0l3dmZGTSUyRjFxOXBLNmdnYzJnaGwwREhXQ0Zkc24wSCUyQmoxSEVJNG9RRTl5bEQlMkZRdVVDTW9VakRUTThVeEdUQUxEVUNqb29wd2lRd3VVZ1lkOE9tJTJCWWVNMGNGRkl2SnF6TVpkWnI2cXFoQjlzamZaSkJpTlpTRE1PUW5GcnpuNHIlMkJraiUyQlNHNnR0WG5wSGs0UHdDWHUlMkJibEg5UWQwViUyRnRhVVlXOWhuUW5ueSUyRmp4azVhSXclMkJQaEZYQkpBRlpZaVdEanp4MkxobWpQNUs4cENxWmRzN3VmQzlHNzlvU25zSkZQbm42cWhvVkpvbklMRkdZQXd2Y0pWTTdORHg2RDY1NXBPZmZpMkkxQWdPYzlxWWJPTEs5ME1KVzFTaiUyRk9Lenc4ZTRvZXBWV1ZoR1FKSHF5Mk0lMkJVYiUyQjhBSHFKOXdGayUyQkNFbWx5RHdJS0ljWk5CWThheHkwOWhDRlN0OEpyY2lBZ2ttJTJCVTZEbTNVMG9XTnglMkZucmVFY1luT2ZjTjZSVEtOT1NUYyUyQmppZk1qeFRoVW5QWGFmODl3TWhJVG5YR3ZBcE9SbW0zbDBNJTJCVUJvZWtyeTJLN3AlMkJkSzFQSG5POGdxd0xWJTJCRnZIbmVNYng3SXklMkZUZ2F3ejV1SDB1RHRSY05YQ3FKU3pJNzRkV1NsUnlVVSUyQjUzcUowSmZnZWw1UyUyRkRiaUQwJTJGaCUyQkxLRFNJamNCb3lsdU8ySU9KUHpnVmJIcmJuOXM2MGxEVEVRa2RCeTZOeG04USUyQkxwTFY0MGxlOCUyRmZ1V0IyNjBleWhtSzVpRlBOJTJCMHJMcTZ1M2RTMiUyQjAwTDZGdVdlN0FlS0k0TGNJUmtmS0JPZ1U0SFphcGp6NlFzenU0ZyUyQnBGa3lBTEhsM2p1TFRyM3lwR0xobWNiV056bDR6Wm8yWmklMkZMRmZWJTJCb0lSaXFvUCUyRm1oMXNFZWgwb1BvdWtaMThycGVtMDhCMmY1eVdtdjdjTE9zOHdkV2dVQzNKcWwxOVdHSnhnbjdJQnF3TVVFV1I1MEZEMFJvOWNjUzZUTU9JT1hOayUyQlhNT0R0dUpUT1FXeUhiUVV1QWdzTXhWSHEzSDNCcUclMkZYJTJCTzliVENXYk5SJTJGOHdiVFg2OWxLb3Y3ZlZybVdiR3VOcm92c05iZUpXYnpPYjczY1ducUw2ZlF0RzZqcmM0TjJ1aEh4Y3QzVWtLaWx1dTg4MnNtVlVnZEZNcDE5aHVrJTJCJTJCczJMU0dkV3RUViUyRkd1QUVseWJITXpBSDhISjRSOTBSZUsxSmswY2RPSCUyQlRlNzhKZHpiVFc4Rll2OHFoemdYbm1CaHJBJTJGeXRwT3R2ZE44OWo0TzF3VnpDNTVteFI5ZGIzRVhvSUg5ZGpZVUZGbG83NXF5WHI1b2RPR0xvdDNxVTFsaWhTVWJOdmlLNzBKZU5HMkhzNUlma05GalElMkYxTkZZZXVOTVNrJTJGM3FhYmtrQ2oxR1J3bW11dndJYjlyVFhCRkV6RE9KOFVjUVB0MEFYc1lEWTlsVzlabUpmUnJMUGJDT1lHUko0cnpndWl2enQ3UGN5RXpxZSUyQlJkR0N4eVF4TkVHd015dXg0aTdFRDJSZkRHZ3ZwR2dMckNQZ3VXaTlMdDVYR3VZcXhtNjRWU2E3NTZ4ZkElMkZYTUxKZlFjZ21wV2hLTGZoM2JNUFBrS1VEeVBQaGxKenN6SHVqenlQbThZdWlqT0ZEZ3BoZ2I5ekRiUjdIbGc1MGdBYlRLNEolMkZSM0t4RjJmQ1hYJTJGUmVwS1VmbUU4MUtkbHQ5SGh6TGRScXZSbW1uNkRiZiUyQjZtQ0FhOTBvU3VFZFNGSmtWQjhuZGw3ZCUyRng3QVJlZ3pLUUw1VlZKQWtlNzhaNnVwMmVqM2dZSDdXUEY5RW0lMkIlMkZEckJhYjBhMXElMkJMNHhqeXlFTWRxajRKR3RWYmhJZ1hKVDRpUFNoQXlvcEs3MU5SUURSVEpiSXM3dnl4QWVHJTJCekNBNzdmemJyTWdrNkREQ3BXTVM1VU45WmJ6NFlCMmF4aGpOTDlON09ZTm1RTlRyOHZRc25CRmpCVCUyRmN0YnNLRzltYURIZ0RtZ1llMEZkSFBBOFczenlSdko4M3NDd1VRUWZ6a01BeiUyRkFsQVIyU3h2JTJCR2hjSzlVUTdVWUpDdlFxb2luUUNtWHN5R0w1QWFaSlhHQm5qNlJxbzhvNHBPUmlPRlVXU2hVWm9USzJtdm9HbzN5VFM5RHVpRHJCT2pTQnZQWnNtd2RLaVBJMXM2JTJGUU9WbDJVejhLajF4ZFFnNkVISlVmQURXd0tTYzNwbEFVaWxQaWE1UiUyRm9vd1hqTzl3JTJGOEV2TEtYc1l1MUFodm1xUmI0d0twTDl2TjlSJTJGJTJGVmVIUmdsNnBXbVFFQmNUVDhhcCUyRlZ3SDI1MTdwTWY4WWU4QnJKbXJud3lCOXQ2QVg1bDgySzJvWWVFdjBLdWo1eVk1RE5RJTJGWjdITFRJVG15cWxSc0p3REdFRVJmRCUyRiUyRmFOT1k1d1pLVjF1Z1VMOFc2ZnRZaENDS2ZzRDl0VDJQRHlpNUkxZXZPQWp1YUhYSFAxUUlmSiUyQktqUW1QS0VId3BZRzBTV0t5MkZHeTJSMVdYSVYzMXNjMWt4bEVDOWpra2VGenpzQXBGVHh4ZVclMkZCTVdBU1JJUDdmT040empYZFQ4QVJEU1Mza2pndlhGY2NQcSUyQklhJTJGaiUyQjlRWm9RbkxCdWFsNVJaZncxJTJGMnlPVDJnSTVDUUtUdEZUN1FTeFdtJTJCa1B1MkgxT3hPS1E5VWRkbVF2a0RiVTh6TzV2ZEd3UFE3Zk5OTzRTV1I4MWJrRFlmQmtwcjQwVnpKanZkTlNSTkJLU2x3ZnVSSnQyY2xNWEF3eVMlMkJYOUp3UmhEWlJRS283amRQZmtNMllHWnpDVks3QkU3MW1ReVMlMkZoSW0lMkZMV3FjM2tQZjgxRWFObmM1UzFxRmtmQ0s5cFRrM3p0bW1jZVREaEtSZVRZdlpuZyUyQjZnOTRHUUJsMDBSaVZOc2h6OXJCdkh0dERabENzJTJCcFFQSTkweTA3RUUlMkJsMzc1SE83OFJDTmFCNTdoRHYzTjlLT2xIUjQ0TjdBYlFGM0VqOCUyQmVFZFN1NnFlQ2o0ZUplVTBaZ01ZdW1pejN4NEp6JTJGY056Z1lTckc1SER5MiUyQmNpeWtGTlVUWWtQUzRrZUUxSGNwYnFCNzFnRnkwS2V6U0JYSGRKUHJkZmhkWWVwZUFJMHZ1RDlwRUklMkZYTWdnYzlpVTlkRXVxSVZxdDByNnZ3TkVqc2UwSTNtU1k4V0pFZHQlMkJFWXRuMHFXSDJ1TkZrNkNVVkFORkJoJTJGYk9YekQlMkJXQjRpWW9LMkw1WnAzcHlqZ0g1ZkIlMkY3U1NNb2RzZmxuQmdVU0V0eXM0UmVWT0ElMkZCdDg4JTJCM0oyQkxkVEtNR1RFQk1iTXZqbTd2TDNPM2RWTW51Q2UwWG81YmRJN1pQQ3FMRCUyRlVBRHdWRGwlMkZSaGN1RUMzWmU4RGJGdDNGeXdBVklVRDVnQkV4TzZkbGZHV2JnM0xlOThLZUlaajglMkJ0Qm9SS1FLd1FPTTN1c3Zidnc3cjEyYlRHOSUyQm1KTU16JTJCcUg0JTJGemw3JTJGR1B6UVJkWVM3d1J0Slg1aUlEMFpuNWNZVnJvOEJ3TDclMkJCJTJGaDZBTHlIRHpXaVREYWlZeGdHeldqTkpKc1FVenolMkJ1QlklMkZ1c2xDUXJUZVRNOTZkUHlLN3B1aUlNQ1Zsc1AzTzNvenIlMkZrNVRnOGE0QzJhVXJyTjZyb0lDZiUyQnljaXk1SldzQzFoVXBLTFVWMjlvb1g4VG9sQ0RMUDZ1TUtYOTh3TDFIT1ZJd0RxaUE4dllKcG0zNDRkWE8zWTZtd1JtOTFTM1hlMWJLWklVejNOYnBidGlkOUhDNmU3eSUyRmdUenB2VUgyRGRZJTJCek5UZnNqZjFxZENVdEclMkJJZ0NvcHFIMU9tbDJGS3h3bzF1azlTJTJGSlhuJTJGN0J4dlJueVRVbnNEMkFzcTMlMkJpS0hjbkQxMUJzZWh2eDdjQUQyNDRZSDEwWkNTZ01uYjhtRW9nUWdPMXBvVGtNNCUyQjJoajNRaURjVjY4OVc1eXJDZ1dGSCUyRlolMkJnVGJ6QmFvQ1ZiVDZ1JTJCJTJCUEQ0R1M5ZUI4dnFMSWFMUHVvUmRGckNmVlVOcVZ3SE4lMkZESXJSMlRDQmkwNUZmZmJoeEVrQVlnS1hvRVliUjFMVUZveFBNdCUyQmg3RHE3ZGtSJTJCWHFOdCUyRlJpU1BodWlhNWpYbE0lMkJjZ2Y1R29Icm1CJTJCNjM0ajNWb3Z5RzMwb0wyJTJCbFJiNk9XUE9hUDQ5JTJGUHpvQmI5TCUyRjI1dmtHcHdHbWU4RmxCdUFub2FtQUFENHl0dDREZVolMkZmU3VqWTdQdCUyQmdBTE1BVzZiOWxqV2JLUVJQJTJGcGppRUlCTHRPTzNmS0dQdnVHYjhsN0tBNFUlMkZCSnhxQXduUXVKV1pnTmM5TG5iZ2p2VU42NUFNR0lrM01kZ3Z2NjJyJTJCSGp4MVF2JTJCMUV2Wkd0cE1wY0VtSVpVdTclMkJ4NjYlMkZCZk9kM0l5JTJGaWV4U2FDdVphZ3olMkJTWVNST1hjSWM3enNaZENlTVozVGdiaDltd1FyUEkwa2tNYTRDWXVRMHhwZiUyQmxsWUc4aVNGVGlETzVQQkV6TjBmTU9Ua3Z3ZzZ1N3VhT1pLd3cxTkFEQ3poTjM4aUplTGpGbkVFJTJGRnh3ZmIxZ1NZUkttSXVGWEM3Z0RMd3M1JTJGOVlRZmFyJTJCdWZhd3FnSE5KMlUzc1dNVmwxdzNOQUd2M2tpZUllSTV0bndhZ3lDR3pwQUIlMkJIcmpmVklCb0FDJTJCRTlYM1I4MzhzQXM2TUZaaGtlUTZRcDJkVTFqa3Z2NW5XV3lnZjdMamI4TTkzUmhJSjRBeXVkOG82ZEkwS2doRWRJOERpbmtJVks0UUs1a0kyU1VJejJuUU1vajhoRXpuQlhRYXNQejl3OVFBWGc0SE96eElQS0M3b0F4N3lFZGpFTnowUFJVYkRSeGxvdkpDOURURWJ5Zk9nQXlCWGpLQmdPSkpaMXliYmVYMEc2RSUyQk1OQVM5MXdYVDlBUDBSOTFxZTRCaTNlOHB2QWwwWDBZMlFYeXV2NEFucCUyRmVtR29NQlhWMFpWV0pjU0tIbmFZYlFtWWtpZ1FDcDhSN1VLcmx2WWc4JTJGNGJHTDhIaGtPRWREWDhhRTlBTXpucGZ4OWdmcTk0cW1zMTRVbllJdzNibXZ2S0xVSVhRSW1BQkUycTJlNXRQRkFWN0FHc1BaOSUyRndGb0dVc3dRZjlmbDl2VVk2OFNpSVprZkglMkI4VFVkVzd5akxtQVVYYkd1TW9WTmVEZ24wWGppWjA3dGxWbWVaeVE3cU4xMXVLU0VxZURMWll5YjFyVDNUYUgzejZmT3ZrUEFUYXdpRFpqZEhJZWslMkJCZ1dvJTJCakd6YVFGJTJGVjRlSkowaHRYUVl2RklqeE02bDVCNnkzdCUyQmk1YUE0WHZBOXFJNDNvUjNTNWRpNCUyRnJDZDFJSFFvUUptckRNY2dvMHc5ekpEQSUyRmxzb2pJJTJGTjd2Y0dSUUpibDFMcmtZellCWHJTZmFKMHN1aUpBQUUlMkZYZFR6Qks1JTJCSXZtMllraW1GQXEwcGt4ck5sc3hxQVFnaEIwVFNFdmdPMXpuRSUyRllmb0s2aEtYRmtLb0VSTGpwVU50aTZtZzRzOUpPV1FsaTNKZlhRRk4zYUlzUmpvQk8zalhMcjJpM2klMkZpYzRDWGVQZjF0M3pjbmZIV1RFN3dMRkJDbVRuNiUyRkpnbVp4WXZ3TUlxcXhGQXZmNHd4dEhtV2tlNmU3QjhxQ2czS0FWR2p4diUyQng2bWI1RjNQZjV4OENhem9Gb2hVclBuS3dCZlR6Tk9mWktjUHVoeGZ2ZCUyQjM2THdRY2UlMkZNeExrM28xOXRmdFdMSEJadlNjJTJCSEw5bmY2NjNKTjJUaVVaSndZaDZjSVpKQVNIWGZ1aXV4bnZzM054TExRTEc2UUhUb3VXaiUyRkxLWGpyZ0JQYnR5dGRzVHltWDRGSGo1ZHlUd0Q2MzdFbnBqWFk5VzZ3VkpmJTJCRTglMkY2VXkwUzAlMkJwVFRxSk5BWSUyQnhhT2tjRkRVc2NxQnFzQ0xLSkhSZlJMQXRWTGQlMkJFZ2tsJTJGNXRLU2VWMElUcEE4ODBZUWJCbEtEREo1T2I0NzVCd3dBQVhsWVlraXRVZWx6bUk4VWdkbXNZQXlwTUQ5ZW9FVmxrNndCREs1MkpPWG52Nkt5MUNheVZyaFZPWWd5dCUyQk4lMkZ4WWpCY1FMYmg5YkZxJTJGbE5FbVZwamExdWNvUjRqZzBHdHR6WVNzUSUyQiUyRkhpYXIlMkJVNHJqajdZdjVDQ1hXUmNGZEs4blhKaXFMNHNGZ1olMkZaNzFqJTJCZHo3RWY4MkdldXB4WmJOWUdNY1l3azZRNGYzNlowNEMlMkJqYXFTemh1N3ZNVmJsTmd6QnlrQVJJb2NwWGRuelRnZE9FS08lMkZtOUhlSnVCalZiQjlubFEwUmclMkJYb2gwSkQyWWVYcHpmOEc2aVZ2VFNSd2t2U2wwMHlxQWYlMkJhMEtNWXNSUmElMkY5S0ZnQTdXRTRJbkw3MlZXSmJ4WjNYNTZjTyUyRmJUemE4Wm5vZ05VJTJGZmRSY3Y0eHFiNTJRcmdLdkEzOVVDbTBzblc5bnRJTE0xczE2a0k2UmhzOWNNeFd4SHRPRFp5WWhsMk1wZzBrNEpTSnNaRGNlc0g0OHVtUUx3UXdkJTJCbGp5ZFRsWDk0UWdtNzUlMkJINGQzc2UzcUJqVjhudERQWFFOSGNrc0ptMWozN3RDNXYyaXl6JTJGZXZ3bmhsYzclMkI2elhFYkFVb3lHWElyaGpRRDkyVFMwbDFIdHpFZThyY0dQQjVEZklxR2xqdk9GMDRBbXRnSCUyQjZ2TkNIMWJMSG8lMkJmeE9rODhSbmsyMzdpNUpkMTNRVWRoZTdmdmJ3OUZEOTlja0VkNm1IUEowM0V0SUw0b0tNQ0JmJTJCWnpzN05uMmMlMkIlMkZPeHhsQ3RDRks4OExuQlVCcTVVMFdDb1hxWjRpTEE3OGVQZnNzeDFNTjREYTZQOTZBU3lHVmJNbm9xZ0RMOEFrWFclMkIlMkZKJTJGTklCR2hHWlcxJTJGZ3pUOTFscjZaVUhMbnNRZG0xbU9lbGp1Z3dXdVlwMiUyRlclMkIyeDIzVSUyRkQwOGxpSVBWNSUyQkZYOVh4cEZSeW1hbnV5TjJtajR6MldmR08yNCUyRnVZWENmclVBdVklMkJVdWdJbmdKVzBKNUhyNUxHS05PblIwJTJGRDE5WWVBNFlZNSUyRmQxJTJCV1A5dUlQTTFydUJPN2s3R0xVJTJGY0lmJTJGQzlXc1h6cDJXY2ZtZiUyRjFjRlI5dGQ1VjVjano4TjFNY2xjV09tMXNQTzE1T0F1TXpMZHYyYURvSkE4OG5nTSUyQk4lMkZLYXBKMGZ1ZSUyQjlnSDJOc2F2OTV3VUc3bmtCenNociUyRjFYYmk1M1klMkZSeXNCNkVrQmF1cWRVJTJCcSUyQjMlMkI5d0xuNHB3SmQ5NXRPcFMlMkJESm5McjJWV0JxSk83TTBYbWprcXJoWU92U2NIQlp1alZRNnhmQ0lZJTJGWklVbGtUUnlIVjJxZHZSYTVPJTJCREU3ZnNOVGpxWjZHJTJGQmNObmEyMTU3MTluJTJGNzJ6TVphRzdiSTM0VDBrdHVlRWduQkdoUWxnUDElMkZDbiUyRjI4ZjVRUTJRejJJUnBEN0JlWTZrOWZTSVRkNXRTY1VkaDk2Nk9lVE9Db2JmZThvaWd0N3Y3S3M5ZXF0SnZYZHQzbVZNWDNJanlCMmFCaHRnQndWbXBnJTJCZDc3b0lQN3JVSUl3QjFWMnZTJTJGN3R1eDdyTWVKT0pTZ3pQTzJNMiUyQjlYJTJCVlFEUjBjR2RXQjNRU2dzWnViYk5ZS2ZsSGpnT0E1UUtyd2VLYjFMN2NzJTJGNjJ4VEJqWkhjJTJGa3VlT285ZkFYdElTd1NZemVjZTNwQ3p6eEJaS0E3MEpQbEtHdk1RZmdXcCUyQlVPZ3JTSWZqa2dlayUyQjA0cDdMMm5LT2UzTU5WMHRhM2dMWkRoTHVRNHUlMkJGbTZidHd2enVlQjBKQXJDT1ZIdFFUbGs0cjF5JTJGN0psNTE2TGpQU3dOY2FaNmVYV3NEN2JZMENFTlZQRjh5ZXVkdUlRcWNIc1Foa2tqbG1hNUlvRXNoYU9LMjlsMTMxWHlRa29UJTJGSm5kSWpGaFBYczglMkJRekJ4NXUlMkJ6SjZZZUNxZDhoJTJCRUR4NDdGYWJFTmFYWjBCTEhiYlJvYjlVUXJOJTJCeDlnWkpJRDdtelZ6dndKQnd0RzNrNGVhek9FT0N2JTJGcGdkVjZiYjFkV01wNzJCUFFyJTJCWk1xdzlXYkw2MGNCcXJEJTJGWDNmYmJ6Y3RjbVU3QzFjM1pkRkNUaHdob2x3Nnp2Q2liclI4V0VVZU4lMkZERzMweW05ZyUyRmoxMnJRJTJGOXBLZHhWSjgyRG40dWgwJTJCZVdUMyUyQiUyQjRMYjklMkJ3dm1XMU1RMjlWT0Nxc2k5Wmh3QXQ3MGZwZ2J4TUZucTZONlA4VkZqJTJCdWphNFhvZWtFN1pBYUlhd1RTbFBuQnElMkI5S24yJTJCYW12d3laTUkzdUF2ckZBY2FPJTJCaTVJcjI1SW5Xd0Y4YiUyRlllN05sUjVWbFclMkZCcjdqc1F0STlDQ0JBZ1FQVHdSaXZSOSUyQjNYRjhHYzYlMkJ5OXp0bTVyYXpLcnRXMXN2MndMSE5sNWhRb0l0eDlqT0VlN29COGZsUGlxMDNNcHptM0ZtTHZkWG13eVdzMlQyT2glMkZYUXljWU5BekxVJTJCY1pjWncxUEhvVGxqckhDRzJmVDY4YjFvZFNHc01XTlVIa05rWWluU2RFNVlpT2kyQWxDZEwwSG9FbHdLTDcwJTJCMUwxeFY0YUdCaCUyQkpZQWZVZmpWJTJCTDZJWjVEbXVHbWdHZVU3aDlGSEhqNVRkTXd4V2FXOUpRUjVjekNnNWxYWjdpY3hQYURRbHQlMkI1cmhLNjBHMUF4ZHolMkJpVjVxS0s1blZ0JTJGRHJoMkNuOUp3VjNhdUNWSU1KdXJsRlpacng3T3g5M1hydFR5eU13SHNpalJJbFRiJTJCeUFKZG1NdXhSOHdhREh6ZTl5OHRqck4zZjRrZHclMkJxJTJGN3NuQnlzWWJRaDVuRGolMkIxbElsenNTYVhjdTJIVmtNUGJ3SGxsUTZZVXcydWVoUE1RNHc5SVdScU0ybVlkMnRmTGx4JTJGb0lOQnZjRTJ0YmQ2UlcxdnJYSWE3R2k2TVdENWk3VXJpRFIydlliNGV3dDhIJTJGTDJZVDN5aFpBSmw4OGozcGRGdzMxNDdpVngwOEc1aE14TEJqSVpucWhQelNBRThHWW1aQ3g2a253MktNanlVa0hKaEwyOVJjb2FjOW8lMkJSUGlBWjFYRmJCdktFdXNWWmlHSlRwSllwdkNOcG9tSEtkTWpvc1ZybkpYR2R3eFVQekhyS3AwOU94UUxla0diQk4ydlhkUWcxbk41SmxLNVFSb1IlMkJoVDhVYkJacmFSVkFoJTJGdndXdXpJb1gycEVmYVF0WHZMMzhuN2Fha0l2QzdNWW5yM1BOSDB2bXRlTjJUUlEyU1NnWE5KNjJaWTRXU1R1TVlrVEgzMUV5SXRhcVZhdUt5eWJqZ24lMkJsWG4yZzFzRGtxd0g0RUl5dnowOU5pV3lOUE9SWG5GamwxTHg5Z1pGVFVuUSUyRjZ5M09ySGNpRUE3WFhZZjBTRGtKT2FyaVJPdFRYWTlBUWY5d0NMRzZCaEVpMFdXWGE0RFRmZzVMcFEzcVJnZ1dSVVpjYXNNZzFMTWZzV0ZlZkNrUXhkSVFJekQ1SGVsOWFUSSUyQk54YmpCUzNXTG1WVkd2ZUZ1Yk1PNlRHVjF1RzcxQ1hHeDhYZjIweVZuJTJCbzBXbVJ1Q2lHazh1Wks5JTJGaHJhYW1IMmM1V2ljQ2ZxS3lBaGJuQWlSaXNoNExuVkdqOWFwV2NoZ29xbnMzVU1GSTJFUmczNllzMkF5aklyQ0xua3NZJTJCWUU1U2RnekVHR3AlMkJpanlLRDFQSWUzSmxRSGNaY3M5VHdvJTJCUEZIWkVUd1ZXVGpIand2UGVRczklMkZuRWVFRCUyQndYak41bWJITGtXZk9BWEpHWld1anNvUjlSbkVqblZXUGwlMkZ3QlQyNmFsRTZkRkpSdE5GOWZsVlB4THc2NU9pY2hrY0dSdlg2STZvcXRpRWt1emVBQWRDNVBGUTFhYiUyRm9VQ2RiUnZReCUyQmhseDlYNlV0dzh0aUUxZGpEZ1RYaVVodW0xblVTd09oaHdjbFRObmF6MmZoc0ZCVlNCTHNQNGwxT2xjMTk2a3NGODNvJTJCdktuYm1pcldxYnhyUFBEY2JHTTdUQ1V5eTZYWHkxN2VqZWxoUXU4amlFMVQzeEdPZjhEc2lPMGIlMkZSRmE3cENER250c1NWZzVYRWNuJTJGRHRCSWpwRTZkTFlicm5wJTJCTWtkTTd4aExrN21oVm5UWW4xNEdDS0sxVk5ETnozZnQxMmZwaW1sMzhBSkh5JTJCbk4weGxJWW5YdGRYa2hadTBWTENhdXBvWldraTNhVkpvZ240NElpVWxVVW91M2UwQm02bVlrMFBDWEpTOXd3bUFuSFVRQWI5UExZekdHcThtUyUyQkdic2clMkJ1Z2M4bEJCOTBKJTJCS0hLSU1PMG9SNFZ5SEdIeHRkUkpmUnl1SU5idlRGR2haZFlqUjZNTkNFeVVCYTY1M09ldXIlMkIzNmZ1NW16VDlQa0hKaXF1NDJWSGQwOXU1TCUyQm5HUiUyRmozY281TmJ0NzJqVHg2Tm5sZ0tCaFdzM0g1UTV6MGo4RVROS1R6RnFUUHlSY29lemRrbjFYVDR0UkFSdGVKUHBKWmhNdDFvVlg4UHFVcFRxSmNZM2gwZHZBR0FVS3lVU1AwY2NVQVZtU1FvWGJ4TUZuR3lReGN2dkdROTJVZ1hURWRYSGtpZFRkREV5ZVQ4RkZmVmwlMkZqaGFCR0lNVDBEZlhQWGhwY0tHbU1JakNmMHRXeDdMa2U3bHU4b0h0JTJGcmNEbFhFejJOdDNWUDQzVW81STVVSmg5YWo2cXp2aUhVM3MwQ2wyRWtpWDdSc1BLS3MlMkZaWU5ySVl1dGNKR1JhZUolMkZRYkFjYUExS0l2RUxYNTZ3TExKWGJWUnF5RThhTlhHS24wZnVVc3d3aWJYeUxWJTJCQ1lZWDNiNFVSZWFJU3pFQklIdUt2WXlVem5NNFZhUGVpQyUyRnloRVZxTiUyQkR0Tkt4NzhqUnFIMXNGM3B2aGNjZjBidFZuNGo1bVk1aXI5JTJCR29acUklMkJwOXdua0ZKWW5SMXZMWjMzTHlwSG5KRU1nSHRITSUyQiUyQnI5cnRSU0tjYzI4Wnc0Z0xjTkJQb2Z3TjElMkZjTXpGSG5hcVZnSlQ5a3N1UnlwRVNBVFRjRW1OODB4REhtbUJZTHlVRnc4NkElMkZUOU1MZEpTNlIzZGR6ZDlISUJBSSUyQklKVkh6TkxjdkxTT2ozbVhqViUyRjRwQjJOMERFZXF0Y2l2ODVWYXZ3MTZuNiUyRjVqTHBBSzhEU001MzhpZWYyd0QlMkY2TnRHSFVmelV2SVJ0bFhmJTJGNnlpMEtueTRqZUJ3RzljQ3Z6Ujl4cWwlMkI2azI0dVJRTTkwJTJGJTJGRk12dyUyRjN2b005M2FPdXFwMCUyRnN3ZERtTU41U2UlMkZSVXFuUCUyQjE4ZWhYNUF4V3dmN3Boa0MwQlJ6OGREeDJSUDUlMkJObCUyRm53OG41NzJJTmxmdDhTJTJCS3BUWWt4UzV3NUtmdTJ2eEt1WWxRa1k5dmExeHAlMkZ6a1FiVkQlMkZsc2YlMkI1V25NQjNTT3pWVUtpNzhkUkpwbU5WaHpZWERPdiUyRjBqdEx2UyUyRnhMNzVpck5qaGZRWUN3JTJCUjYlMkJlWWpRN09Ua2UwJTJGQVZLNDZtM0R0U2Q4cmJwZGlJWVhYYThRbkc0eWZEQ241cGlSUDBpU0hYUXBGZ3pNSzA0JTJCZktJeiUyRjlSTENGUDB3VWZUZk1ORTg4WkVrcFhtUXdRSFJQeWdFaCUyRkljWGJuODZUZWsyUTNQbDdqQWpFdUU1WDIyRlplSVpZWmtJN083ajlJa2hOMEpNMXFvU2JNUE9wY2p1aWNoN2R5TjVaREh5SVlJZjBIJTJCaUNaTUkwUVlrJTJCR0JwVVNyZEw0QUd2ejFBcWt3bVlnRTBkVFBDd3gzS3lsbU90Smkwbk8lMkJxRiUyRk11cjhlVTZoMG1SVXRoaFJPVG5wcFBnTkt6NE56dFQ3TlZISndUVHRoOTlRR21TSk5BTU5mUzhkdzhCRkZNJTJCdGhIemtuckh3bnI1TWhDNVhkTEclMkZSdzRDRjJibWJkZSUyQjRSMmU2M1NmU2FxanFnTWtuViUyRldWZEdtJTJCSlB0MkNPZnR2RnZ4M3AlMkY3U0t1MEVtZlBDOEpwSTFrY21ZcVVRNiUyQm1vckh0bmk3a0RuM1M3dVdhUEElMkZicmhveE9kNDB1TE04eEYwclNRWjJJOWZIUlFGNVJCdHB5alBTUWt0cDZ2aWltbnZhVU05SHJ3bTlzSU9Qd25FeHJGdWVaMmlkbjhvWERmdUZvTGJzVlJBWUZsUlBrSiUyQnNUSHlURkM1M3NSOE4lMkJZUW5oRkJ4dSUyQmVSZWFSalVwOHFESkphVGhjTm5iMU11MDhQNXFaYlZ1JTJCOXBxZzB3dXlZVkpYSjhMNFh1T041bnk1M2RUb2dYZCUyRmg5czVmcHg5ZVFKVzVhT1VRUjRjbnJGMXpvSkNMcENJbTcxUDNIeko3U1BnbWRhOG5FUFliMWJCQVRud29uRTZFSUJnQXpoWSUyQk1OQlZ3M2txTGFRcWUzRGlBTWpKNllkQ3dOQ2FIRk9rUUFqQ1RTTVZEU25wellDNXY0eEdGSSUyRko0ZGVQMmtUaENtMGdaZDFqTU1IdG9obnZRYm5qbzdreDhqQTVxbGwlMkYxWU9CRWVpYUpSRVZDUXJaTUlrOFZaTVlzdzljY2U1WXZGMlBKNXptZlZ5SVpHUUhOSVRmQ3UzaVVKUFJSaW9RdHVFOSUyQkxmQ1k3NFJ0T3ZlOXlLR2NDdU0lMkZKaklDYmVyJTJGVldUVXZpSXUlMkJxSCUyQkhxU3UlMkZjdzQ4a1doZkJVQlNKJTJCVWtHY1d1R3piaXNGSXlWS1daMUZwU0hSUHVFRyUyRkpyQzNMelJlTkM1JTJGdFhVOSUyQmFMTmN4R0pRc1B5a29PdDk3VkVGaUJjb2Q4UnRnTW9xbHJ5dHZBejZraDA1OWZjZmolMkZaRWY5M0olMkZocnk3bnNFVHd0eXh4JTJGZmxGaEdEdVI1R0dGTzVYZklCaGhNcGNQUXBldlFXJTJGT1Z0ZyUyRmxYMzBNNjBNTmQycDBDZGt2MWdKN2RabExvNVFOdUlud1phOWNzcGMlMkJDMFMwOGFXWSUyRmVJQ2VSbjFrRDElMkJJTkR6bHg0aUFOd1R0Wkc3MXElMkJhSmJOQjk2S1lyNDhJJTJCZHF0UVYlMkJEendUN1BrS25mUkdNekxqdCUyRnZCd2NuUXgyZjFhSnh2ZDN1dDV0cEc2ekRqbnY2R0NnSCUyRiUyRmxYYiUyQkpiJTJGdnp1ZHk2Z0JiOTNDYlglMkJhb0pONW1BUjV0V1k2blhSWVlTQzM1N0tyJTJGZ0hkeG9lYngzNyUyQlRmM05BRHdiJTJGZ3JwNWFIbFpJWUtqT1ZSZGMxcTM1NjFmVUR6N3lZQUkzS0tueEFUMVpZdk9XNjk2TU9WbHoxbEpXSyUyQjE3SVduTkVVVUxPRmVkTFg2R010a3pTWUpZZkpkTmkxWWxxQm85MnRTUkRORktzZG5SU1pXcHpWeG96dUdMM1Z3cWVFMHEyYUVpZyUyQklJTnpEeUhIRVdKZmdkcTVJQ2VEcVE5MWZLTCUyRkZFMlNRN1JEaE1pT25rdFh3JTJCRjI5QXR5MFJrNFkwdWxvRW03MHhJJTJCSjVZWlNkQmljS3JveE04VkZ0clpjV3dZQnhCWTFnVEsyeXM0djExa2dLSjZCajRUeVlCc3NScmlZSlFaaHlvcTNIcEphWDBRVVRQZDd4N1hCa0pJdnRaUURaUUJsJTJGSHJhQWs4a21mNnFWWTRHazhyaVBwNndoY2VuUzVWbnhqcUtzUDVNJTJGUHdXd01EJTJGWDY2MUJtc05ZRTdpQThBZkFCOEVRTTNIREN2T2gzSkNyMFQlMkZlWHVNV0dkclNXVGozUjVCNmpmRTh3bWVsSFYlMkJrbVJLcnpoQTIyU2IlMkI1Uk9heFpYSklvUzdDeFI3VXBrcmdzWUVaQVZTYyUyRkpiaW5RJTJGSU96YzZVb29TVWslMkJ4WHRudDd2djNNeklMcnljTzE0RmNBJTJCQVFnNm96dnZWOHl5N00lMkZkRFJUUERQRklCT25BdnlCVTN5VXhQeDYlMkJ0YnpXQ1oxSGh2ME8zM1cwek9uZ1l6MHMyR3pJWWFZZ1hLSld2OU9Odm9STkhOaVJSUVlMejczbW1Ja1MlMkJoJTJCekpsanlxc2dZVUpyeU45NGJYRXFPM2dGYTA3TEdnZUJSNUpIWjBWVXNQTEhEWUUycXFUUWJXamNadVpjUGlkOUpDMUQ2c2xZMnhvQ0trNFF2QWhrdmNNc2FnUjdBM0JkV01Sb2ZTbmQyRVRJTkZQaHpLRFplMHdyQjlZVCUyRnBHVGpiMEowSTl1QTB1U2FGVFZkWkx0ZTVBRkwlMkJFdEV6SlVORlQwakpIYW14VFdtSFJhY002QlFnb0FEQXl1a0taTHMwbSUyRmk3QVZ5Q2NuMjIlMkI2bnZ4NmRmWUYlMkJhTyUyRlB4V2duOSUyQldla0wyYmRmSjNaVjBQeDZOQ1BtS1pQdWYlMkY0bjRuUjZRT2hDSjNDOFZEbWJHUWlyZjFsVXlwc3pkbFg2OHF4QUZFb1luM1RMUEdncmtOaUowc0NSeWZuOWpSQjFSUkM4JTJCRUN3VWpQMHNtUmhQdVNaaWxnV2dmWWtxNGZSdTV0JTJCa0RGR2s0YWFMWGFFYnB0N3BJVmViUE50eXc3Rjh2c2hTYXdkNkRpZ2RNTm1YaCUyQlp0Tm9weVVURFFBS1lHRVVENHdPdURJdmhmb25zWEI1NFRMQVR5WWRJeXRoNlRub1lUTFhlTEpvRlBqb0wxUENKczIyRWVxS2t2bXV2dkx0Nlo4UTlNZEkyTnBPVzBHbno3Q0hEN2pBVHJCOE9neWk1ViUyRkR3b1IyRHYxWjQyZzQ2MDhLTld2aVVyb1l2MmhUc1RvVmg3NFVOcVhMb3NVQUlGMElKYnlHZjF2ZnclMkJtSExtSEVKU3lMcnFXa0NyUWh5RUtjSDBZbExWRzlNeHd5a3FJTmQyQmh3ejJBTXR1ZlF3NSUyQllXcTU1RmNvRWN3SSUyQiUyQmtVYlZoNCUyQkxuJTJCZ3pTMEo2QU45Z2FaMXlEVHdqRUdrTElWcVRBWHZNVzNCeUNLeHZqU2x1QzlqQnM0NmlmZW9OUXI5NDl5bWZVJTJCJTJCNmdMVVVJJTJGZVE4VTg5RkxhTXA4VUZsVzRSazNjWGJERVdJNjd5bFZod0tMdmhaSVB3R0kxeXlTejRkRnI4SGFUVFRXZzNmemt0dTVTb28xNmtTQ0pzbFIwd3l6enUyM2lSSjlSbTJuZXB5c2M3cVYzNEM3WCUyQmRLallvQjhCTm9Kb1NLWE5NMTZDcDN2NUoxY2xEYVp4cnhLUFhGTEJUVU1tWUFZbTYxSWdmUUU4VlRkVUtBRUZnekdXZzZYbDlHbTAwY1lYRDJxQ2QyOGpCUU5VVWhXRVIxa3I4R1Z0bXZBeGh0UTglMkZpT2tRVGE3WmRZbXBQVlhCZ3N6aGRubHZXZFdNSUhaYmxMaGRRNDh5VlZZY3hvZlNvWWRDeGF2WTdmVFNJMjRUSXpERHVBV0ZzcGpDNEtseVFCREpOQUlETkFTNndOTlF2ZHYzV1FNZmF4ZmxYQ05lWUEwbHlzNFd2a1hlR3NCbDcxT2t4WGNlZW5ROU9kQmpRYVBvNTNvbmozNlp1JTJGMlRxMkNyMHpCa2hOcUNwRXhTVEtkTXpGWTR2dCUyQjE1MiUyRnh6aDMlMkJ1VGFrUkxJVkJHSWNVSTY1QVpDQlF2NVFPdFNRUmRsNnUydkklMkJJTmw2WiUyQklVJTJCUU9uVUxuYkw2Q2FpeThsZWdpMU42ZzVmUiUyRnlZQkNUem5SaEdVdU5rWXExd0lvZUh5Y0hyR3pCaVMyTXN5eG1CWTYxZmc2cWRqTndmc1JqOUl2RmtSU0lhbmx1ciUyRmRqTHBySUUlMkZCUjhmbXRzNjRmQWZCaXpPZzNGUzF0T2ZoVEpvOEpjUFNjMmtCaWw3OHpLN0tQSm5ybDl1NFFhZ1daZlRQTWFteXFJJTJGbXEyNzVIbm1Xcm1YTWZlU3FiNXMlMkJ4SnZiY2tsZEJGN29obFNUcEZZR2J1dXZjOWFGZUNrUWVYNFl0UXBWcFg2SUYwblJWclBjMjhwaSUyQmdwOVpaUkJIYkV3UTZVTnR0bGhQSVolMkJSckZzVGNJS0lQT3MwWm82ZzJ0bkdoaU1CbWtyeFBEdTlVJTJGYzZMeDFpVEUlMkIzZmI4S1dGTXdra29GcTBqempiaEtYRnp0a3ljbEFKUHFFREVET29ITnFrNElWcG5OMWh4d0xJb0VuSWlZYklreiUyRnBPanB1cjFLdnlXM3FkOTltQTFoTHlpNGl4UHFta091Tm0wY2VlMlJsMSUyRlBHZ3pvMHdzM05xS0M2ZndaYTd4UFlhOXZFQ3NiU3VhYmFid1g4SHZGdSUyRkkzaDZKUUdrOEdQV3hSVjNlWUUlMkIwT1VDQlVqdzV0aE5GaUFGbkZEbEk1bXphamJ5eGh6cE85eDVvZFZPaVM4ZmNFeWI0czVrT1B0M1VnNmxJdDJsVDBSTExmS3pVVjYlMkZvQjN0V1c1QVc5azBGSVRYYWZReVc4SkRkcnFOeTdBOHRJdVluQlA3aVZEOTFMbFIwSEloNklHUlRNSTBteFQ2R3FROTJwZUduVkdRY21WY3VHS2F0dXlyeVNMSmlDZkY3NUU5VXE3eUhWMXlPcXRVUGx2MDZDd0pDZUVhMTVhWGx1NjBEeERiZVJ5R3hpNDlFTlFoRzkyT1ElMkJwY2MwTUxYNFRwMTg4WExNdkx2aXZGTkZLdVFlaWtNTTJqSmZYQVJKQjB1QiUyQlhHTElqMU1DeUdFbVNzJTJCOGtyR2VuSFA3JTJGaHhwTzh5elhoRWUyVG5RSEFrOGV4RUpoZEVZSnFQM2xoRmtsWmpUU1FZYXJSSE5Sd0RlQmZabDM1S3hRYjQyMFJ1JTJGVlA5eGZqRjZRMkxZSXZpeWVEekY4S3lFeUl5TktKUHFPa053T2tYZmw0U1pjQWRWcU5sTVY1MUhhZEVrYzRzVFB5OHowJTJCcjB6MGlScTh1WHY3dzh0NDVvNEcyOWZSeG1aNDF0VXFURk5xQUV5cmFQdTEyRGcxTVk0alJHZXNwc1lZbExXSWl2eHZPY1Q3WnFuSkM0M2VxMWRSZ2dNNVc4dGZROHZvS1ZvQ2hJN25DUlYyTSUyQjFKcDhsWHcyR0NxakRxUENaRHZkVWJ3alNkZjBZQ3lkSktGTFVXc2pRSUxESU1iJTJCWDZMcHdXTUh6enprNFNkQ3d6RUtqMXFWYkFFJTJCWU5ZY3VndExTJTJCeDdUWEFoUlZOUG9qdVp2VG50V2RlTkJnckVOSEpqdFdFV2hJY0lwTjdNZElUVWR5JTJGRkJvaVpPQ1pkZXUlMkZ4dUglMkJYRE1ZbE5hRTZyRHolMkJTanhSQjlZVlBBVVNET0lUWkhLSUJFVk1FRVNZQnRTdzVQSHpad3VmeWFkV2E4cXd6aUZNSiUyQjBraE9qcjdnS0RjY3lvVG8wZ2dYS01WQ0VPdiUyRnJvaWQwNGhZT25abENaajR5T1BZZEpHMVd6d1I1eG9waiUyRndhVjhVeGoxaUdsRDJiTEg0SDBlbHZaVHo4MWwlMkZkYk42U01DSzdHJTJCQ0tjQjV0QmRGajQyJTJCdFpvRkpwMmRieWNKV2hPd28lMkZMJTJCNW9iJTJCZGp0S0lQdW5pNkJFMTlHajcwSmtDcjRXNDV3WGNuek94THB0bVdEcU0lMkZlek9xJTJCMENtSkpZMzQ5SXBsTEVneTRPV0sxSlRSS1NkbmZNdlF4dmx3b2xaR2d3WWhPNVdOYk9Ga3dtJTJGZjBpdzMlMkZ4VFV1dXhUN2J1N2lNWVVDUDVMdnY3TVBJNmJpeGljOUQwT2NUcmVxS1dUOGdVVlR6eDIzU29FRGlha09yeHhXZGswM1REaHBGWVJYcEJsYXZ4azZ4bWRWaWZrTkNjS3Z1aVVxRXFSRmFGcU5xeURiZHZYYUN6NTFsTU1uNXpRdmVXU0FSVXc2bDVyQTlHdXAyTHE0aWo4dU1MVDVLTUExUng2dUpicGx1RnpVbkNhRVFRUSUyRkJqNFpsZWI3YUh0ZDhqQXRrZHd0V0JPSm5lVUdWJTJCc3glMkYxJTJGa0VFVXVGZkVuRzdhb2UlMkJyN2x4eHpqV2dVRGJYMGRpb09RalQ0WEFNQUlWQUU4cXNYNEJiJTJGZlJVQ0hGYk90aGdKVFEzJTJCN05HOTZNeXVXem9oNXpQbGlINTA2bzJIVEQlMkZNaElFYnFodUIwcUdKQ1Z5bFYzR0pDeElhTUloRHh5RWJUZEp6MU5RSmNPVWlQMFlJVHNuWm12WXYzM0lua0dOaUVaJTJCbkFZMmo4R25HSzBpb0RCcSUyRm1BTDBKUEE5MEcycFdDSllpcWNxUFgwaklFOTJJJTJCeFpCOVBwd2l1OVBPWjFFJTJGUW5CYmw5UTAyenJ5QWNOYUVHN2pVdDlseTFwUUtYVHNWVnhNRE83ZUp4RlNxdGJBSVluVjBLWndhVUdGejRCNHpSZUJSYUk2dmVISHNxbE51bkYxb3NrcU0zQUt2akMyU01BWCUyQmRCOXB5M0F6bFVDMjF3TW1zYnR0VTNJVHVMc3IlMkI1V0pNS0VYMjFJUlU1Wm9ialY3dkpnSndrTiUyQnNMclMlMkZ1aSUyRjVDWkFwNjVUV0pGYjZVd2I3YUVtNU81T210JTJGaGpwTm9adkdaU09jMm12SVdYM2tNQnk1c0ZJRGo2eUNaWEJ3SXlHRmxaYWpBZ1ZCbnd4MDR2dm9mUiUyRmFOSkx2ZlVWTXkwbFluMVU1JTJGJTJCTDYyVWE2WGxDdEU1UTJyaGFNS2tYVlJMVTlFU1N5ZFklMkZmN0NJNG5UTjhLNzUlMkZBSExlTERISXZZenR4ZXAwSXh4VGVNZGhMRjV3c29Vc2VpSVptOHhQc1psQk5qaHU0dEV6SWhDUDZ1c2VZRElrS0tsZ0RGMUdCVndTQ1JOaHJaJTJGVG9RdE8wUjR4YmRDeEJkd2JJakVaYzA0bUZaM3d6Mk1wb2NWbUloczhBMmczV3U5NTlGeEkzTG9uUGZGcDhKZ3A0bjd3MHVhdXRwZWxHQWtMZGtvR3NUNXFwOCUyQiUyQkx0QzlJd3FPcVdkSmhLd0NJUSUyQjd1ZlpreWhYUGlLN0RwMUwlMkJNSk0lMkJVWklNUkFtbzIwQlExNkl5N2lZMnNGUmtocEFwWWV0dnZoMk5KVjFhQ1F3T0FZVEtyQWtBYzJreWxGTThaVGNuWVNLNmlxbjRjRVhyY2RXS21Pa1VqeEZXa0U2RHZBVzZHN1B0dEJYbTNidDRrQ0tLZWZlR0xVZGhaZ2piQ2tacnUlMkJhRHpDVDdEMzhWbzV4ZjBWUmpLRlR0NldCSndva0dwaFRMdFRzJTJCV3E5SHNPYXp1bDVVZjM5S1ZzTGZKdHlVWmN4TzJ3dERYWWR4Vk1WTEVnZlpqVFNHalA2ZENzJTJCcUhVVll6OTZXMDBhZ3NPVjdxSXFXa0NlSG0xMHRuSTdlaW0lMkY2NGtvYVk4ejJUcnRiQWVQZUdpdndBRzdJdERvdGYlMkI0ZzRoRHolMkZXY0hiYVE1OTglMkJna3RKbzN3eGNkdzBqOTVtMFQ3Z1V3bXNJSWRLVGV5TVFVaEJCbDhzaUQ2cGk5TGpWc2VmMFpQREd0aTgycko5c1dEZFZaR2VlRmF4ZmhOa3BONSUyRkFUanEybXYybG05dFZNUFJ0Y0pIN1olMkJlY21Ba05iaGpVSVhFbFE4Mml6UnVHbDltb2F1ciUyRm52ZkRPZkUlMkZOWSUyRiUyRlI5WTglMkZpZFAlMkJKODg0ZiUyRmhlY0olMkZYWFhWUFVLaUo1bnpUZUVLSXAlMkJmYWpKcXZRWmJuZVRlcjJEbEJQSmJnUDZ2S2lmQ0Iyb3hLUFZXSXZxTUwlMkYlMkJwZiUyRjlQJTJGZnQlMkZZc0YlMkZZc0YlMkZZc0YlMkZZc0glMkZ4bGpnbGpVbE1qZ1NFOGc4Y1BraHh1bnhGVTNNMjdFU0VzcmNRTmR4RFBYOUdOZ0kwUEhMZ0FXMVQyTXdyWEs2JTJGQjhVRWNDSXBSTTVldmtZc09mUFF0SmtxV05UQXozNzdzTFUxSkRDazFhNk9EMlMzTTQ0bzEzNm45ZURtUHJRV0QzalhMNHpzbmphSVNnRlo3R3VhNG5MZFlIOWdTSTQwZEJMbFpPb0hsQ0xwa1VNcFo2R0QlMkY5eSUyRkZvTklXVEgwZlR0b01qazR3YjlKRHVMbU9pOUklMkZoVW8wV21ic2V5YWclMkZCMDlhSVNmaEN6M2xHZzlvbHB6aWJRd3JxWThyNW1xR2ZDZng3WHh5UFp2QkVndlF0bEJhV09Va1M3dyUyRk9Falpxdk5hZ3VQOHFHNkFSME1aSXlDUU1GJTJCMkxORTFJVWxUVlIwQkJ2WnRBZmpNbXJaNXBoc0FWczhKdDBJTHl4VUpQVlB6SU9jaGdLbzc0bGZWRGI0cDFma1QlMkJMdVF2cXJudVBpY2gxUE5jTE1PbUNmaFJaR1ZtWFpTZlBNWjNYRGozdW1tVndKbUhSR0NlazBEYkFrazNyeTNoJTJGNnVXUmVtcEhrMUl2VHlvYlNLb2JyejZtdzNYYkZaVjBSTmxPNDBHRVllNW15UGxhckpEWiUyRkwwRDk3NVFIWDdmJTJGMDBQdTJyV3BLZ1RybFJWTFRTT0VwJTJGWnAxMjc3RHU1R2JFRXBSV29QaTFiRmtOcm9oWDlzMjduVjVQR2RySDlsVm5nS1p1MUt1eWRkTklMNkdpdE9qSndzR3ZpZUltJTJGRGE0RXFnelRMbUdmYWVvN290Z3hHVlYlMkZaWDJxOXVBVlFhaFB2RDBNTHdnNWRSdExqeEd1Tk9JbU1Ca21KVTh6U3pkcCUyRlA0MUJOYU1oS3MxYWJ2d0lLZjNnRGI5dzBUVmslMkZCMXR3ODNOVDhXOGJESm9Wdm1QVTdqcnVTTFglMkJMS3ZQTkpKT0FkRTB2OFo1ZSUyQkJFa2F5TktHcVl3cG1PeGlJUkdQRUhRdHhGVmVmeVpXWmg2eDBpTmlwWnNTdVo0eGQlMkY5ZDZJYjgwdElReExvcG92cTd5ak15ZGc0cmFXSklYdW0wQ2RGUkhPQmJzN2o2YmptUjRUM1ZPSjFCaFEyaVJPUVNKMGh1bEtvTlBZUW5sREljJTJGaGxPZW9YOCUyQlZoWVhLd3pMeGlSUjhNMWpUN0FFa25NVlBwUEF4dXNqUHhEeGpGVVg2ZzhLUE1QdEZqM0lqWGhWVSUyQjVWekNXVGVUT3ljcjh4Rkh1TDFlcHJOcXpVNTF3WXhVdDlRaWRwcGxKM0g2aHFhZXgyMllTanklMkYlMkY4aXVNYkJOVEZxN0s4WFFBZ05GNE4lMkJLaHJoMzJvcDZKN0xxejBrekxoMWppRVAyQUslMkJFdWFvTnhqZVZFQ042eEZEMU9LSGJSa0xsVDRSM1ZsaHZlZW1FYml6VGxMWlVBTVhwSHE4amFWSDFYcVIzcmkyb3ZuU1pxSVV0JTJCRmlVYTU5NWQ1QnY4Y2ZHT2pGUkdEcXhIQ1NsMzE3SnclMkJqMk9PMzBYRXBoWXVMd3NHajNRUXIzYjc0dk5aaSUyQjVva1k0UnNGeUlDMkpmb0lVcmNVTFhvaHQyeEFtR0p4c25GZXl5cUhNYjYlMkY3NlFhcFlDREtaRzN0TzRvWm55V3poYUpoYyUyRnVaT05FUTlheldyeDcwSzdZb2Naam4zNjFjMERzeVQlMkJkUjVRa2lleTUlMkJKTVNZRmhBNFA0MzlGUGVTeU93aDhwUmtnaUpQNVl2RWhJNU9PUFRuUVdvZDJndVFxVlJXJTJGT0RCSk8zRzFTWnM5N05BS3FxNjhZeG0wcTAxTEJrSklhQ0lsTGd2JTJGSDJaS3pXNElUQ0F1S2FDejlNMmVmcUdpTmFZamo5WkhlVGUlMkJQTXBFNlFzOUlyaUp4ZmlDSXdveTl6WGFRaHBSQjglMkZsVUZnbkRNVndXQ0kyVUdGU1JZYlZqMmZqN2R4bWdaaFdJT2JDb2dOaXJPZ0h6SjFFUkRqak9xdjU4VmtvSVROJTJGVW1EakdYSVFjdnJMYVg4ZWlwdGliZ29rME9aNXpMa09xTmVGZVdhcEdQaGM0WHk2czYyTUFFJTJCa0tnakI4UUJXSmtKSXRxQjVtWFRoR0xhQ0l4UDV4RkZTakh0UzJ5a2ZHSm41UXV1NW9jdWRSeUhyMzlJSFpLdXFSeEI2SzVuVXkyY3klMkZwckZuTSUyQkl2R0dFWWhIT3owZnlLdnY2b08yS3ZxWUlDZFBObVJIajRMWlNuTkdSdG1XTW9saER1bWpiOVJ4c0Nnak9sYUdxa0VWc1J1ZkQwT21MWU5Cd3p6N0xlZGNXUG1DTnJBRjhVTGJoOVpXNkt5akNaM3hVUEtHZFZ0bWQ4RnhCMVBCOTVMQ1RCUExUTFJwSWdldm5HRU44WmdIY3RjZnhweXlQQ3QzWGZDMkEybEtrZ2JUakdvVkRLa3AlMkJkVUFUcTU2aEdvS29YamU2aFFtQUJxMURzb2tXQlVyaXdiMDNsbTAzUWF2TDdGZ3M5QmVjZUdIajFheEs5aXNma2RIZXo3aTltSW5oQkpvZ29SRTlEUXloVkdvS0MlMkYlMkZINmtEUG1XNzh0THpTJTJGb3BkTFR0SkpjRnl3MXVvbW0yNk1PempkUjhyZHN1NlA4WnR0eG5jTmZ5SGdua09yaWZEZkRhMnB5UWpJTTVncDJMRWNMWm50SHdydFZGS1JOalJDRUZvSjJwaFVORFUwaVN3UjhpdTNoVjFHeUYxMHlpd0d6MUpFalN5ZTFnN3NuNEJoNlUlMkJrYk9ma2V4cjJFcFhLOSUyRkdPSHElMkIlMkZheklvTnhGM3JPcVcwYSUyRlhFaE5jRWFYNkJYNHpMQ0l2bjVvS2VlMFolMkZ1aGh5JTJCUTRvY3RZJTJGanZnd0JXelVZUFhacm5hTTdiMXpTbnRFSFZoeTJna0d4Z2hUV3BlU1M5RlhYdXB6TnlDcmNnWTAlMkZORTNoeGY2cW0lMkZBa1E3TU1RJTJGMjdsU2tyVXRUMUVsa2lIUTZBOHppS3g0dzlRRVZjak9pYnJ6UUtkOTZpMjVNSE1Da3VLQ2ZFZlNhOSUyRkY3VXdaWFl4ZW4xVnFCQmFiRk82R2FKT1NNV2p2SjIlMkZrSDB6VXkzRjRjR09vUXp4dmVTanJ2N1VsJTJGJTJGaGVjNDgxJTJCam1zJTJGdiUyRmh0eGZkMjVTQ3JrSyUyRk01T3Y4dGpoUlpPbmdEdnRhcjdPeFk3Mjd6OGx5c3FyTWRiRmJRRDZPSDg3Zm80OTVzZXVzcmtDVm85ZkF5cnRxM1FybSUyRlRvMjhzMWtxTEo5cTNPNUpPRFNXNGpYJTJGeHc5cEVTWm1HZEoxZmZVdnl0UiUyQlpZaFNPN3VJb0ZwZ1pRSW83N0V0WWElMkZNWkRoQ0RVaGk1aDlad1lrSjFiS0R2QWMwRE5zMHVtOGF0cllQeTg3TkZPSTlpekZwTktaVWFOeEpibGo1RW5HelUybTBFNGFxdUl3WnJwV1lOamtMMiUyQjYzMGs5V3ByYWU2UTFkVGVrRjR3NTBmUCUyQk11bGhJQ2oySGRHM3dLdmtjN3VTckpkNkYzUUZ2TUExNFZKS3o3NXdHelBHMWI5cnlSc1ZIa3VxUEMyRUliNXVuTlEwUTFSZ2RZUW1vWUprRmdWMG4lMkZ6Nm1uRVBIb3pCaEEyMXdVeHFTb3Fha2lBWlQ1TzNoYW1NbTJncFklMkIlMkY4Wm94MHcwdSUyQml3bTRNYU9PZHhzUVJHWU5UVXd3RyUyRkpjTXRyJTJCMGlmbnNtVmNWN29rVXVzMWVsUWh5VGo1a3N6VmxWQVc5Q0ZEa1lCaHZZUFkxUEIwTXhUeSUyQmklMkZkdk9rR3ViczZRWnJpV0JGZE1mcnUxWkxpaTUwQjJrTTJpTDdsOCUyQk5BWEJWc29NTDBRRUlhJTJGJTJGemVndjk0WSUyQlo4VlpnSDF2Rmx2aWl5SXhxQWlGMVF2UHJIY1VPZjI4bmNtc1lMdVpNeFNROWVDaE56SnF4M2dzJTJCNFdIQjh4b2daNGZ5dzNzT21nSTduTFlTWCUyQndIdE04TUR5bVJnNVNTN1V6M2h6WnglMkZZaUpvd2ZsdTFiJTJCY0FTc1plRmQ1QXFGSURQTkRzaXpPV1luaGM1TVMlMkZYUyUyQmElMkZtdGVhOVdzSG9YbmZuaHA2RGpGNElxRTN3Q0VKdnI0TTJMcnZPNHdkczFuY2hSM0RCVXJUb1hwanNyalBvSUlSR3UwQ2hGQWVmRmVhVnpHbXZ1bkxIQTk1JTJCdTl5ZnBnOUtueEtJaFBPSzVGRFklMkYwaXZveUIlMkJubjN4bE9Tek1Zamx0bUxQaHVNcGJzZUFxZmt4VHBJS1JzMkpoVHhnenJjY1Q2ZUJKeXV3a0paNEJJQ29NS21rZyUyQnBqWTZHWWxycHV0cndJampqazk5NEZWSEVMZGNzTzZUJTJCQU1QM2N1WU93OEFIbU9vdTh5dXM5M1RXbm5xMXdvSkRKNE1FZyUyRjhXRERvVXh4UUF5NHp3blpPM0RDcHpLaTAzWTJleG55U2FlU1p4RVJnUWNyVFY1eG1zOTM0ZzBPMnVONkN0bnE0NkJRY2lGQ09oSzFaeEIlMkJ2JTJGcnZPa2puT2pCRmNXV2ZrUFloZDFmMzU0MGZlN0NjQVZ6YnFTaXFMS3FOeENjendUWEY4YUNqRnIlMkZHbzRlOElJSHFIS0F3b0R0NFFlcWhsdiUyRndsbGhubmViWDZIV2tUWTJxOUxFVFVhYm9iV0drNkh2TXdFZmlHTTRrNGNSbXoxRk5SNUtOa04lMkJVT2JhaFRCUG9pYWlGNGthTkJMZzFFN0Z3RW5BJTJCWlhXb0dEYnElMkJvdWU5NkJpaE1WUDZsR3JQclQ5OElNN3VtQUNNZmRIUTRiV2pCR3d0WTE1VGVHRSUyRjZGZmRHNmgyJTJGQjZsb3F4RnlQZGJyZm5qWldNQnclMkJINHZLSlNOWDk4NVBQeFdVbDV2QnJKUzQ0Sm41TW9TQ1JEUDZCc0c0VGZudVNxa0ZJWmc1Yk04ZUw4VnUlMkJrcWdmWGVMbXJEMndCbzFDeWdaVGZSeTJIS2dWeFNhT3I2TVVsbXZhYTVhek9YV2UlMkJJeXFYYmt3UEklMkZPVSUyQmFNcGNXa1ZDMG1oJTJGdjNKSGI5ak5xb0hDR2dPbmxJZ1QxUG8lMkJ5OTIyY2R5RGoxZ2w2eFVzTnM5Y2lGVmUzQnVYdFROVXBCNDVzcWdPTXEyTWx4RTM0YW9TdGRXMHpuZ1czbTNsVXY1dll6UVU1b2h5ZHZUeHdUeWk1QUJnNWpWcmxMQjREcnBmJTJGemZRY0tzZ1o2U3JpS1h4MFcxamM4UnVtdldpJTJGM0lFa3BWaUxvU1ZJR3JXRiUyRmc4YWppM3J5V1hOeHZwUE90dlBzU1VFbmFlJTJCVHdxWXlJTTZqMFRubGNtNjdaJTJGNU5nUmlhU1BEMnFnTGFEZ3BQRVFnSWxCeURqaW1HJTJGM0ElMkZUM3gzZG0xNW0wR1BzbWwzYnhvT1FXYmJ1b3RPVzV3Yk53bFBkZ2lCTWVmV252TGthVnVOVUh3THIlMkZMc0Z1VzhqRktPRkJpREUwRjhUVWtOUmZWd1FDUHI1ZDglMkI2T1pHNUFlT1RzUVdDamlpem1YUjl2TmdNakdBMllaeiUyQkhhWm1VZmhUM04xaU1VVWtzWWdmc1ZvMU5oVmFUNVhGSE1uVzVtWDhsTk5OWUZ4Qkx3dndwWERoMHZXaGw3Mjg4QmVDRVhuYkROQW5lZmx4eXJBVWMxWElJTzBOb3NmcGR0d0R5cnBlMkg1aSUyQlZEcFkzeGUwSm96R2d1aiUyRm1HV1NuY2FrRVBxemZTQmZRZGF2WUYlMkJMdG1EMHZXV1BaJTJGbjlLRkJoWEUyaG9GUmt1UUZER21xbVlLV3hERUludzV2bCUyQjQlMkZWWEVlcTZ1NkVrYzFMVzhScnkycTlmUnNKVXZhN3ZUUFUxMGZGVVYxTTRJQk0lMkJ1ZW04OWRHSk9oUHJ2NDQ5S2JxVzZvVHklMkZZWk1sM205JTJGRk1TY3JpOEZjYnhLd3owJTJCSlNmVFg3UU9hNXFVb3JDNGRzaG1CbmhKdFlpWWlCdXElMkJSWWdya0hFVUY5WW8lMkZ2clQ1cSUyRnRydVZ4bWNDNThEJTJCSWp0R2I3JTJGYXMwdjZqRmcwT24wUUJDUWlhQnR4dm5ROXZ4R3hPczB2NGNSMk1VdmZBSTJSMUR5OTNXUG9YTldvdVBVT2IwNCUyRnYzJTJGU2VZRWNQUkNIYmxVbiUyRlZydnE1a24lMkJyUHk0SnA3ZDJ6ZHkzazF1bHRBVlljZ096UzQ5SE54JTJGSk4lMkI3c0h3SExuMXZhbDVyS1h2OCUyQnlDZ3Z4UkhSc0NadVJoVnFRbm12eFN3bSUyQiUyRjE3ciUyRmxSSU9ZNU5YNTBQSnU5QkxYRGZ0RCUyRkt2bSUyRlklMkZEcjM1eVBQZ1RWZzBhOHVCSTI1V0JMQ1RpdkpickglMkJ4M0hHQzNxRFJ2RnpGVjNOS2hpRURjUGxlaGhTR2ZCQWtJZkRUZEhaUGI3WGJnMTFmYXVTU1V1MUdTZXlvR2NLUWRkcHNmOXlXaVpEanVwOENZNUltNFdvd1lLTlNsYmJ1TUwxa2JvJTJGTU12cEk0cVFvOVZQVnpyMUJuRlI5SUw5ek9oWFYzb042QW1ERVhIMXptQ01Jb0FxdjJQJTJGdVp2aHd3SzR6aTUxZHU4TXclMkY3dzN1MUZodUJMQU95NXNXUUlxbiUyRm1wUnpIdTZsMUlNaUhDMjNVZkdrMzBrbkttNkxZbE1JN3o0d05tam1rM0lic25GUVY1OG9FQ0h0VEdvem1aSEdVSnlZTmRuTXhqaG1TZXBwSGFFdVZhRUU1TXNqZUdGYVlrZVQyRExIUXlMbkVYbVZUQ2p5TyUyRjdEcnFwTWphS3h4UldQdVNpdHRoTWllJTJCNmpNRTVqdiUyQm5DdmFNM1d3REclMkIlMkZEM2JHbmljcTJwZ2ZsZjJBR09YS0ZWUVl1czdWa2IxZ2FRellFaHJTc3JGSEYyZ081UFJzUlRkVFJGUjJ6ZFlqOVpvZkczZVlEQ2psc21FSm83TCUyQnJBZWlUd2NqWEtDamwybWhhYlBVV0U5JTJGOVBobVRNdFlseENxenR1UU5LVVRMd2hhQ0JUZ29LaSUyQkhPakptSVZuN3lKUUElMkY5a2MyRWNZbE5lajJhd1J1NXlCc3BXd3RDY01BNnpaTEpKc1lBYU1YbUt3QVpVb0NFNlRwQUpQWlgwVG52TjNMRXdDdDZqOFZWekczMmxZQ0xuS3hpdyUyQiUyQjYlMkZQdiUyRjlNMjBaSHA3JTJCUGZ6NzZSJTJCJTJGeWdTYUpOTUlrT3NkTnZvQlNuJTJCZ254cVdEUkdxUXZadyUyRnR0ZXlod281S1NSSUVNZXRJSHdaT0xEOUFxeEdvdTAlMkJ0SThoZFBVZDlsMTdlclRseGIxUVlTYyUyRlljeU1HZFBKbG1IQjY4bXlESiUyRjhlWW94RGRGTWN5S1o2RTdyZGFLdDRUNFpkQWhXaWcxJTJCOHFvZ3ZjWkEwTXZ0TzN2diUyQjhpY2VLQTAlMkZjdEQyWDY3YnNqJTJGdFhWNHRnWWdveUN0OGc4QzB2SUVoSzNKdGhoQmFBJTJGJTJGVlZvZ1lMOVZiREdRdnRTSjd3aEdONkhVWVlKQW9FdUNCcVFWTHUycUElMkY5amMxanJiMWlLNWs4SUNSNDlqcmlvbEdGMDN1UUtvTkJKOG9JMVhlV1FyQ25sa1hvMGpVSzVraGZ6ZUVKTnhkbXVxYyUyRjNyWjNpS3pIdW5kRmZwa2VkcUJvcHVkJTJGZGFDQUxDZlZGOUdrQWxnMHU3T2VPSE1OcW1RcFp0UFklMkIlMkZhR0lKZmhZbG5EU1hWOXZTRXYlMkZRUzFSYXBob09lbEZTb2dya3VqT1RGdnRyVWFZODBUMjFKOE5QeWNBWmNFR2YyS1pxaEIzZlZjVDdSRjk1SlluJTJGUTdOYXNuVk5pdWtsNzhTME5HeWVhZDZKJTJGeExubiUyQk9RUDVmOGZ2NXJrVkZUVGl2ZDhsJTJGJTJCcElNcHFWWWlMeHhOQzdaS3ozR2E2ZDBDUmo5ZnBzWDh3ZVoyRDFtdTRCUTJhWnhTZlh3S0VQRXI0UXRlWnJ4ejJhOVl5NTJhYk5YdlhuR0trM0tOSDJCT3YwJTJCZ3E3d2NEQzRiJTJGNndXUUwzcFlZbGFsMDhjRzZ2WTZNZklUVllCQjlQVlZ3QXBGV1prTFJTN0JKMXhqU1IybTBKMmtId3JrR25WTUc2OWI0OCUyQmc5REdTVlIlMkJ2QlRGTkRORHY1RksxWiUyQjBxWEpuMXliVnBEOWhXYSUyQlFseWFsdVMlMkZ4aFhzZFRENXI3a1lSMDFiJTJCWWZGMURVVHlVQ0hLJTJGS1puVmhLUzBUYTVBOEVQRXlmWFVYZTdSN2lJblpBTDF4Y2wza1Bjd2g4OExHMnR5WHNtUnRGZE1PMWFvZ2hqdiUyRlNzdkxhWUh2dlhmRXglMkJZSThvbGozbGVlU3J6Qm5oenNaRHhpOUVIJTJGYzA1MmtXaHNVdEZ6MjlpQWxtRjFBJTJGJTJGYkRVNHY0Slc3NmxHRDFXdDVoc09mMlluQjJta0tya0ZHejBGUnBIWjdCZ1hGblQ0eUZIR0QwdVZPOTVOanpqaVlpOHZBdDJKSndZZUFkSjdibHpPZG5uSVYxOHdWbjkyMW45cWZLaFlHNkpiNHp2cEcyNTI2JTJCaFFwa1lIJTJCOWltaTZTeEk1TWslMkZBWjZYOUpRSFAzJTJCSkJYZ0loUXglMkJYJTJCbE80SEI2cmpXZFQwd1FOUEJmY3F5c09RYlZYejU4UW5WaGQlMkZkQ1hUNU9kWCUyQktOQldyY04yeDFST3RlSUFOMHBvamdwZFJpczVJNnpaZnJxbTlqQXNlM2hmJTJGd0M0MEdzc2w1NkhHbUtmU25NWklTbUUxaXYlMkZQZllGOWNvazRva0ElMkY0JTJGbXNkdTZTTFRPaVAwbW5CMEVxTXdZWWk0cnVQVmU2MGp2JTJGQzg0dk9SODV2engzd1hSQ1NVVHB5T0RaYndhZlMwV2lFTkZza2R6MWhmeUxldWdSJTJCYXlWc3NzYyUyRmZ5RUt6clBlNHJWdlpZRnolMkZHWkVkeU1mRnUzUTZjR1RrWGlxNk9pb3A0ek1xdVZweWNpVTg3ekNCcHM2UFFZcng0QTRqMzdqM2daM3RyaVhlY3VMTFklMkJHVzhsbzVudWNkVU8lMkZVQkpnNjRROGlqY2Q1U1NwS25XMXFRalJlelVpOWh3R2RlZmdYalVzZVBqMTk2OThab25PQ28wM2pLTThqMFlHcjliWXdvRkU0NlEzeXhYWlhHU1VxaExQSklxWDBIelZkS2hQaDNHeFRSUE5HTkFjT0FoOG5YcmglMkIzQmtHTnVRSCUyQmg2YlIlMkJTbkgzOGJSUTlZcU1JaEpQaVFmdnBURlMlMkJPZnVTJTJGQ1F3djV3QThwUjlzbFU0WmNLamQlMkZLaURRN2FDVHJrc2M4QmlVbUh0dmdoSWpEb1VBaXJCT3cxTlF5cmZiRHhBSUpvb2h6MlZCb2JnRXpKZCUyQkFpY2NvUFpIYVJLUkxFc1BEOTg2THdZbFI2dlVDellrOTklMkJTR0R1SE5tOGhaMVpINHdDNFplQ0JLcW12cTRZRWlBVW9NbzJyMkhxeFVMa3pzaUUlMkZnN1QyV3JpcU9OTlBsSEZaUzhVZjIlMkZESlBMRCUyQjlmM3ExWk5UbEdJckR3M1BKVkFJV1FvemZmMXZSOGVNOSUyQk93cFR3Wjk5WklVMGYlMkZVUnYybk51cXYycWplVkJKczBBMGFpMUIzTEZ0Rmo5QlpIZE9NcXpvUDJ1TjZUMU9JdFR1VlpNd2hFSHhyM0VTQjhCUVFDUjVFQTRkaHg4YnhVMDFEY1YzRklhNzZQeXNEclRYNlEyV2dYS1lBSFQ3bld4RCUyQkhRUWVXZzRvZXJ6U0YzMk53SG1neFdnd2glMkZlWmt0WXBrcHIyd1lDcGlYMzh2Nm5mNFVNMUNvRzVtSzhjJTJCVGUxRiUyQnVCRG03V0V6ZUZuRndJSXhCNnc1Smp4JTJGUVBFNklNaURPNVFmTWdXYTY2U3Qwdm4zdTFldFAlMkYlMkZQYkRIJTJCc2klMkI5SUZPcGdjcEtxSG4wNGdJTExjVVJTa2w3QjBpR2taOWhLMXc3aXdOUTZNQWJ2azJaQ2JINWt5bDZ6NjgyVE83NWZnMmYzUEtpaUxFY1IlMkZYUVdGZGJ5S2hnbzVobVNVVDV6NXdaTGdrTjVVb2lmVWVDVFJ5WDJaWlFqVkkwN3VJazViaHJ1eVZITCUyQiUyQlRJZHMzaDM0dWRpSTFkY2VlOTluJTJCYmYlMkYxbFJsOCUyRmMzeXZxaE13U0J3TGQ5UzVUZUhUT05PMmdCeWRMVU1WU3c1bUc3U0ZaSGxYQ3B3Mjg5dG1qWUJCZjBjdUhDc2ZWNCUyRkVldmJqc0pRNEpzUFNYRGhWR1p6Wk5JU3RJZXA2OVlveHlnN3Q4V0pmMXZSUFclMkY0MnpGREkzaEZVb05zME91N0ZZVE5EYXR2UmpqMDdtdzJ3dzZIQyUyQmJtanFmWFF2RGxGRmEwaFdxNTYwUVpIQyUyRmVodjNoVng0Mm9lYkhGYUd6WWNZc0ZjRDJ4MG1oY0JwTEN6SiUyQmZ1NE55eGpHQiUyRnVXcTZheTlMNHh6bHBWZkJBMkpiUk1aMkNrcUpNWXN1WDdSNEQ5Rm5KNGtvTFFmOVU2TjRXWHIxVDFjVyUyRnVxZWdFbHVEM2xSSGlyUE1DM2MwUXloWE01NzVJY2h5cnU1YkE2aHZTclVsS0lHdVoxQVNpc3N0SzNzSEdSaHc3anhKJTJGZU9FWTRuZzd4dXl5YzczWlY3SEl2TTZUVU54NnNMNWZTMTc1djY0MnU3JTJGZXRNVmhBY2MzR0RPcER0MmI4VmZBN3pPVVBsYmNwUXg4Y0pTVVpjZElHOEFxT0FnY3l2azVkTlJYRUNkVTJic2ZDZTlYQjNQS0IwaE5OY0Zic3p1cjA3NzclMkJ4bG4ySlQ4NXluc2NFRjlRek9vRDhoN0VNQjB4NGZuS1NnSEsxR093endUYkdVS1NrQng0UGFjcEk2WTczJTJGTWFhNTNaelVmbnNlJTJGSHo5MzZvRHZWQm5jUVdvZXVhNGg4bGNQdXR5M1FqQ0klMkZGcW9FRFNWbEFXa09OUDg3empUcmt6Z1p2a0pXM0l6M28lMkJCcTM5Rm5jMlZXR2lqSnFERGJ3NEJPcXBLMzBCWmhjSXhwY2JMcEpmVVNwcm1mdiUyRlduQ3NWTlhmcXVxemdPT1YlMkI2OWlZZEI3VSUyQm5OaHVWN1dQak1IaUM5elBSODZHNFlmSXZNczNoQzN0MlQlMkJ1SzZoJTJCczdiWHc3djY5dHklMkJrRVlGdHhRa2tvbkoxOVFNNWhOTDRpVzE3UnJTV05hVHQwaFBla3hodnRrdXU5R2VJaUVKdlN6WEszdjZrRXBRTmJQeFlTQTRZek5RWDA2ZlVQQ0tXb1JQMGRqU1Z2NCUyRnZybm5ZWVVtS0I2dFRYaTFJb2psR04wSk9nR2JRNkpSZ2R2dHFrRzdzJTJCR3ZNcDBOM0JjU05rVE00WlRrWWc1VnRKalVmMGFNSmJ4aFR2cyUyQnROWHRsJTJGZTVNejM3cktVU05yRkY3JTJGS3JrbEtiMmNUamF2ZnJuMkhmeWR6TG8weGZmNjI4UDloSk5meXRTUjlSZ3JwYU55eHd1T1I4MzZLTHJreG9vMExRS2NRTDdhdXcwMVJNOCUyQlJRTERKY2FkN2l5OUlCZDQzMFhyM1M3Sm03NVk3S2VZZUEwYlN3cFhBQ0xmOFJqTDVjVyUyRlE1eGZEVUQ2ZHdCUGluZTlqeHB5SEVxbm4xJTJCbzBWb25GMkE1cW9sblc4R283TDZoVkNlWHNlVDBZNjUxMSUyRjlkQmVlSFE0VyUyRmNWb0h3emh1ZVNHJTJGWDAzMFhNdnZRak9zMlRqMzcxMCUyRjlyTCUyRk56TEpUeEpzc3ZCQXNEdjFQc3ZucDYxY1I0cDJuRGJxNk5SbEFsVSUyRktESEJoWlpnQXZ4YWNWUVNoRmxtcWR2NmZaR29DWSUyRkFBaUd0VTZZejJNS2cxbFBaSFFBVEZuSiUyQnZYVlZXNk8wYTdMeFNjR1VaeFVTVUlxaVppZ2NUOFNpV2tVSlJDVTAlMkJCSXNEQjJLYjZEU3gwcU1kTUolMkJPTyUyRjNTU1FockRNbFlIZGkxV0FtQzM0eFd3S2pjRE92SE55aUNRejQ1REVaWXZ4NG5ldFY3Ym9LYlE3VEt6YTN3aEdLYmsxdUM2ZG1ONjY2NTY1QlZzNWo0am5IcHZubFRKb2FWJTJCWW9mSkYzQ1BYcXl4QTZWSkYxeVdVaU9rNzd2SGVrcDlnJTJGSjNYdUhmbloxWEJrRFc3OGd6R0hTVGslMkZURXZHVmFGTUJQRHBqMktSRlRlVmpwdnd0VEklMkJ3bFZGdXJkeVAzcDRQJTJCRzVNeDZVUjBYU296UUVaU3ZuOTdEOFYlMkI5aHltWU50VDVGb2dRYVJ3NHc3Wm1oZzNpbkl3eTg1dDJOQlh4WlBuUkk5M0k0TFczOGFJTWtXNHFNUEk3T0xBR2pNVkxJcE1Fd2lFYVduaVEzakdIWFlaeWN0U2huNUpBd3ZtUUIlMkJ6QjlPOUEwQmxUZnljZ0thZ0U3dzRHY3JudjA4bGdHSFVlTGNTVGsyd2FydlgzJTJGYW02SWF4bE1OekNZNFduSTdWJTJCOW1jNjl5Y1pjQjN2RDNWQTlvMGdYZVJJcUN2TzNzSkpnZlVsQiUyQjdybHZ2YW5qdlprTEUzVEdsNHZETko1cGJkQ1dkSGhtaUcwR1lSbzZaTVZqMGdNUVNBM0c2SlJ1MDg1Y3plTWcyZDNySHI3cWFWekxVV3pOSDJUT0Z5c2dPcWJQZEJXR0tTeFZJcW5KVUxVUnQ0WkNabDhkJTJCNmZpTm9aUVFWQ2o5NyUyRmVGMTk5RDhxJTJCSTk2YTRzZ3dteU51cFZ3bWZQdFNQeG1HbFdDbUF0aUlLOCUyQlFDWUljM2puYmxhSnl4OUZkaUZRSjRZS0N0cFBEdFI0VVIzJTJGYXhRNUpOJTJGOUNvcEZRdjkxb0IyeEI2VzNHYVg1elZrQlRWSmszdmpFUVh4RmE1SVZFdFJIN3klMkZmNHRFWGVUVzVNa28lMkJtSTFJSU93aDclMkY2cVNKWlFmdHFzaGxGbmZVTXBtYmtLT3FxU0RTZmhBZzdJQ1lLdWpKVVZuQXR1UGNtUjl3V3RTUUo2ZDR4byUyQkc4WkNRRkFTZG9tSmN2cHVHWFNwMFRpeHdzMVFpU0k3OWpUMFZFaDZwSkVkMkV6bTVtOFdJTnFEUFdzaXYyZmh5NjdQSE5xcHFVNlYzcmJFWjlQd2JTJTJGMXhuRTJyZk1LekpGclRuS0EzJTJGeTU3cEJMcjdQV1RaR21zbXpIQXhHMUprcDZ1SmhTaHU3VDRrTHV5YkxpNFplaEolMkZaeFNlVHlaMGtLOGt2dWxFZ2lzdjlDaHVHbDduZkZEcHdySzhyOTcyU3oySSUyRnBrRmRxWlh6NkY3WjdiekNOOUlLWnc0RHhWJTJCOFBUVlFhUFhVYXlpRFVVTktUUHlDY205NDdZeGpyanFFeHFEUWh3UjlncENlUjRTQ1ZaSGRYdkVod0dUeUZUWWJBbEdhOWRWNGZOWGZFZ0xFajh3JTJCMFN2VllueCUyRnA5NDJSbWJlbCUyQmVZMTdXMzFlZjYlMkZpdjJJUkFZS1J6elFpQUtyelRvZG11aGxhelQ4TWhpdnc3ZGhMMHRRRnZUYkdlJTJCenEzblVRRlZvUmt5NiUyRlc4UkFhaXdxMFZBYUtwZFJ3VkZGUk9PZFIxTHZKWEZBd3BrRkdvOW1YMWtXcThLMDBBcGhiYngwcXU1bHYyVHZNMTJwcjg4ZDg3ZXVhQWJGOUt1Tm5zb1RpJTJGS2dCMWJ6clNJRGlCWmxSeEJQSU9CRGZFYklnJTJCckVrYkRtQjFIdzFkJTJCb2syJTJGdVZvbU1kUHRVUUVoVFlMYjZ3akZQSDlja29qaFRackVKOWtaaTZ3MFpLZzNNZkpWaWhhdnlianVlOURhVDJaRm50UU85UWR5V3MzWWU2YTM5a0lrMlBmYmhnaTV2RURjOUJSbmgwJTJCZ3dGbWJkdmpIbEdObjUxZDFjUHp4am9pNlB1amNJcER2bVVSRDZKZlJnMGNEM0V4QWdNQ29zZEx0UkZPbml0aGZha0l3OXgxRVNaY3A2RnF4MiUyRmhYZ0hySkJjJTJGenh0d0tLaG1oU2RVZjgzNnlIOTU2YmIlMkYxJTJGVm5PMVpZbFJFWmY2d2NCOGJFJTJGa3NnTFFoRWFkcnluZ21QM0NDSEp5SDZlcDE0cm5TWTBobEJOcjVmZUhOWlFmWHVHWTRBbzJDdjNPOGJIYzNjNFRjVGZJN29neGltT2UlMkZzVm5OM25vdWV0JTJCR0U5bDVMVE43U3JXUkJpNUN4TURHZUZyQXl2RXJpVUhDSXdMOEFZSnRCU1pVYTZ1JTJCTDdmRmNJZkZ1OHlTUnoxa0hoMmR6WEpYU2duWnVVMmJ1VHpaeXJ1ZDVvelNsdUphd2pVbjhEZlolMkZvcEJ4SkQwMWs3Uko2MlFkQWxnWVl6MDg1ZjBUMTlOOXNvTVVkZlVjV3FvSkZiTyUyQnRnN1ElMkJBQWEzQjR1N2ZtYkZrd3k1V2RqODdJdlA3S2xkVWxBJTJGJTJCaDBKOFglMkYxWlNsbGN0TnAlMkZFeWJCOUY5eEclMkJYMTd0TGViJTJCM256cmZ3UjhNNHJIJTJGdjVhYTNhazFLMWFxUmxUaXNlRWUlMkZicmJ2ZHpNJTJCTkRUN1hYUlQ4cWRCdiUyQkZYTUc1JTJCODRkMHAzMjJldDd0OXUxMmZXVXIlMkJmVCUyQmY4ODkzanZqSFp6UTNVaFclMkJNJTJCRTk4SVBCSXNYS1ZiMzR3RHV4b2RhcU0zTDdadk45ZGM4SG1iZGJkYiUyRjFQaHZQSFl3SXBPaW1TZEV1VyUyRk55dVc3M09hcGw3UENwS0N0OGVxbWR2azNndSUyRjM4cWR2dFh0NVlXYnE3dDBicGt1VTA5ZGRTeGRPY2tJYWdzQWdsc3d5V2RpcDNodDBzR1l1M0NFTGZtVVUyazI4MyUyRmx3TyUyQjNPVFhsYzlsY2l5V1glMkI3a1Z6NCUyRmhUdERWalA0bzRRUHlzUm52JTJGNDhWVmdqNjNObko3YjE3NkIyRWRwVDdwTldNa3IwJTJCcGQ3M0Y5Sm12ZHJLdlhMSWxHU0p5JTJGdjlwWGdWaHJEZXAwcTdMVHIxRlZQRnk5TERncmtYbHlVRHRjNVFBajdpbVBCMmUwUG4lMkYyeEtzZ1h6MzFNS2VXbmJ4REh5eFhFeDZ1QyUyQnRMQWxKamdzVUpvZDRkeGElMkZXZzZXZktETW1CNTQ5VE9SMDI5TjIzWDdjemdoMGpQUHR3ZiUyQjFYMDhCNFNXV2d5MlBZZTBEYkxFWEVxR1N2OTJEbGJIZENMS3RlUThYUiUyRmM0Y1ZjV1VLdHdPTEtSMEk4cHhGeFN6WjVlc1pLSnYwWTdrTDhFRzJBWXFhMWYzUlZFWTRCMUNJczNiU0UlMkJ1SFdZTUV4OFRFVm41Ym5QVFVqdzNkZXJXanc0MXp0ODJ6Tmc0JTJGODZsVGYlMkJ4V3FlU1lYNkZvejBVQ3ZZU0xwcHB2UWNub2hPbDRicHNscmhuRCUyRkFlN3JHYTBUNVB1SW42YXpqb2FhOWdZcmdIVXoyb080NjlDdEZtZWc5NG5vT2pzS1VGUXNOeWZNN2tPSGh5Y1dmUVdRUFRFSTJXdFRzcGVVd3FIbFF6RFlRRExURU9YOXFGZyUyRnZXbjdmOTM5MURqN3YlMkJldGkwenFoSnUzZGolMkZQOXBKUEhvNmIwM2NqZUtyWHRzNDZrRXZ4ZjdYM0hrcXhLdDk3VDNLa0NiNFpBRmFZS1c0V2ZBUW1GOSUyRjdwUlZidmZjNyUyQnI0NUNFUXJOZEhyVVRTZEoyclclMkI1WW1TZVZnJTJGbWdZNW95eVZociUyQnJFbDlLQURYa1BsTjAlMkJ3dkNvZXlnQmdza1dYd3lEVXA2Q0Zpd05xJTJCdHl5ZG40eEVET1Z6bmsxNG5jOTRVbjdZJTJCdjFhTWYzMmVMWmRmSE9tU2pjSFhYS0tCNlRvTmluWkxDVnpDSnglMkJFR3ozUUFERnJMSUxVUmEwb25CMXpza2o0bVVxbGV6WXliSlFYMVp2T0tHWXN6JTJGcWdidXhOdVhZcFUxMUJKQyUyRiUyQjJZVGElMkZmSEhiZW1mdnBBRGFUViUyRkZTR2w4SGM2a3lNcVpUSDRGaFJBY1pUSjNLSGVrWXZHVWRuT0JXaWJmbWw5SVJrNnN1UUxndUVYbEFKMThUbWluVzdqZzVXNExIdFpMZU5ISFVmSlVhOCUyQmIlMkZIOFZEejZNJTJGMyUyQnJxWGNHNzY0R01qUFoza1VFbU5WTjk0NkdOaTdyN0o0dExIVDNhYWRPSU50UnYyQ1NqSUpRVFN1JTJCcDJjS0gzbVolMkZkUDdsY3E2bkJBblIzeFltbE9ZJTJGUEc3NzI5YmtuMHJqZk50JTJCZ01SZGFxcnV1MGJYRG9kcG9UaHBveFpRMzVRTWdwT0c0UkhqQVdVdzFLbTg0bVNYS1BXS0VXYm5McGI5cFVQaFlIWkQ0dnFxUE9pYWpzNWx1b0YwTGQzNyUyQm9sc0R6bkxOOEdzcnNGM1JHMmlwcWpnS1Q1RXg5Znc4Q3dxbnNyWjJyclpMRzE0MmpkTlBRJTJCSGVza0dnQWZSWGZ3cUl2OTB2ZXNLczclMkYzdDMzdmYwV2lFJTJCSGxBVzVYZmI4V2U5REtBTGx2OU5oVGs2Z2pHbVRmMlltRzVhaVlNVWp3TTlEc1VtVFcyQXlPRnBlUmNYQUJNUiUyRmNFRlhvcHpNUXE4WkU5M2VWSkl6OXlmUHZGTlk4M1RIcTRpOFFHTmJNM2RHYmFlaFZnUnY3dmRuUGFGOGtUc1VRWnglMkYwaE1qOXFVdm5mbzkzc1J2RVMlMkZLUEZEb05zSEs0bFR0R1hFUVVGRnNFSTZVU29FVlMxMCUyRkZJVFpSRmVRc0QlMkZPc2VFWDY3OXdVNFQlMkJQTzNGRFpwMXJMZWVPc3BCeXM4WjNZUGNtSDU2aHZOZm8lMkZSZGQlMkJZc2JlaGdiaHZxcGtNb0hRTnpnM3FmbFRXSTA4andwRCUyRnhlUDVWdmM2Y2ZIWWR4OGF3dEhHd0tQcGpaaEwycGloOU9jQVdpR3paNDBXS1NYZ05ZeFFFSGNBJTJCU2tZcTBXUzA5aWViNElmM2JuZnRQY2hkSjhhbzRCTTQ4T0ZJQiUyRmRhSmdpaWFHUTRRJTJGb21Ra1VuMUs3TzV6cE9PWjB3N3R4dThjTUFiV3l2UnVOOHpLa1JjdyUyQjEzbUs4OWhmeTR0S0pnZjZrWEd2M3d3RXQlMkZVOWlwazAwS0hGdTZvOHNjY1RaMlZNNSUyRjZUTGRlMHdhTUslMkZGYXglMkZ1YmNTaUJVME1hZUJwa3hldVBMeFcxV1E5JTJCeER4VzNkS0NXaHFrJTJGYUVTYWs3cjE5VHR5dm52SFo5S3Q0RGp1dzkzcnUlMkJWYU5KMlI3VUZXcG5jQ0xhR3lLRk9SUUROdSUyRjJmdjNuVUFtN1VHbCUyQnc1R3g2cTV3d1RvZDRzV2l6NFZSbGdvZW5remg5eWtPTWVoSnNRNVVmWG1VQzJQRnFFNUVzVjREbjlPSmJZNnhjWjNBeHJwYnN2YXBrJTJGNWolMkZPdHhNbmJ4RTRpS0U0SHBHUW5JZCUyQlB2JTJGVTQ4V3BuUEhnUXZjaXdSS2IzWTlMJTJCdmkzeDM5NyUyRkxmSGYzdjh0OGQlMkZlJTJGeTN4Mzk3JTJGTGZIJTJGNWM5VGt1T1pWQ2xxQ1F4aFZBVFY4UzNXNmJUYnFJT21aOGp5WlJxdWJXV09OVDRTV0tYRGhBVnBpTjV0QnZNbHElMkJFbDR5c2Q0cjdwNHlzdU5mb2prdHdMdHQ5WkdESUhoaDdHTXJPQTklMkZNNlJsNUJPYVFyZVZ5Mkc5cWRybldWcWRQMXVtbFJCR0M5ZmM4Yk91U2d6eThONnV2dXZHJTJGdm1XVVdEVTNJbzBmSkpKNVZ1SXpna1lJcUlaQWMzTHZjbmN4JTJCZFNVd3Z0S0c3OWpqamFRdWg5MGttRzgyZk5yblM0alFVd1I1VzhKUjN4eGp4bVY3aDRYWGNpOUpEUXBRanpVYVVNWEtzOUM4cVAlMkI3JTJGZER2bFV0TmE3MW9sTkRIdkI0ckV3YzNlc0tWdWJ0YjIzWlY0bmhXaGVhbnI5Sm5CZUV6bDlxUTNaZWRWJTJGamZ0RkR5Z3lSbk01Z3BReCUyRmhPVzY3SzRadzd4JTJCMHI2M3dnaFZIanpmbDB3SW8zSEx2YVBKc2JZdjJVR1hzTkxUcVg1R1dSMSUyRjFRc0MxRW9kb0s5SHJjU0QyVGVIZjlvREZBb1NXTVZaN2tPUVpOWnJ5RjZrczh4UVRaT2oyOUNVanFlVk5UUDNaRTZnYngyYUNUTGpHeExaYVV3dFVjNXpUQ004Uzg0JTJCQ0ZibyUyRjFaJTJGdHV5REVYYTN6Y2FmTzI4Uk1rTEY0VWYlMkZsbEJnMzUlMkJPYko4azBlTzg1bENVSlgzck03RXBMdGJ2UG5MSEZJYjc4anRDNHN5dE1HV2swVWVteVZ4UDNjWG45bHBSb3NOVDFyUXdSc2NSMzc1cFh6JTJCWHVtM0tCVnFZT2l2SzBMbHVVN1ZQOFRjUzhTYW44V1NLd2d3aDNnQURKREclMkZId0d1OWY2TDF2bDFxaThwRHFWdGdaak5lalpWVnNuOEhRdmhiTm5UZ0prRDc1MCUyQklaODhYZ1RydDViMHVrN0JOU2M3JTJCaVllY1hUbWQlMkZTV2poMGxkbzRZbmRNRFVnU2F5MDZaMWZON3hMNFNaaFlQYU1VYUVySkx0aEthNVlhQjhzV2RnUm53WDJDaWRQelVjY2VQWSUyRiUyRkFBVTNrek5LdFdGYXUlMkZ2allnVmw4YURsenNkNzA2a1FKbWpFdXMwdFUzVE41U2xPYmJuQzhzdnFHeHRldW04Z2VWblBvMkRWc3g2bHA2bWlpQTd4Vkd6azJqNHhkTVAlMkZnNVlabGE4VmRQejdJQnlRMVAlMkZFJTJGbm1ZSzZMTmZma01yY2F3d1N4QnM1MGplYmRiUHlrNSUyRmE0JTJGczQwa2x0akl2OGlGQmszU3lEQzBVN0d1aENkWW1aJTJGcnd6a1pxWHNWVFJuOHduM0lCUCUyRmxEaTVtUEc0dWZUN1JJa25YS0ZETFoxTHVrdXc4RGFubXkyeTdYMHRMamVFeCUyRnFpeVpvVmlQZzMybnpOczRUczRIajg2V09LZmlUT0ZHJTJGWTdiS1M3eUYxbldpdWUlMkZxQVgzTHF6aGZxT0F1SFJRWDQ4WnlNYmw0WXlQdkxraTZiZGMyN0VUdUxyTHFXcGpDYTg0WG84RSUyQmdUakxPNXVoWGRVZGdhcUVaNkxmJTJGUzJMWTNFV1hxVExJZWFRTG5RTXdncW1ZSzFTOHRXUTJLWDNhRUM1YlpyUGdjZDNYam9DOURoZmp5eUhUTHBld3dkak93anloNjlzV1lMZ0s0QThuMHhPSUFjdTVLQXh4VDRQdmYlMkJTNGQ5NXlKaGVKa3pzbUxQZnNWQmZVN1lqbE91S2FIT0VSSlJhYWZJY1p1TUd3RlQwZkhtWjBHRU4lMkJicnVFSTQzNWdCSDd5OCUyQlBDJTJGWGdpcGV2TWpHJTJCMTAlMkJSb09RTCUyRlZNbWhYMmg2dk5MY2w2alF4M0I1UHVKUWtVYVFQQTI3aVRHWG1CJTJGcjlMYzBicW5RTTRld2VpJTJCZEF0YUNtZU9PbiUyRlZ1akxkNTVON3JCS0p2UFZ1YzlCMURNV1RQZnQ1RzlLdTglMkJYdVBENmVlZWklMkJjb25aJTJGTCUyRnM3SU5BYk9RUU9qSTlnTTNHRlZFcDdOUjc2R2V2WWJ2UXFxVW1udlRLajVtYjVUcEpkeWNXNUFMYkR3aHhiNDlnSGVCMlllVEhSdCUyRkdBdk5PQURHbnRDRlJoZndlY3g4bElIdEdkcExHRDdaczdjUWpmaFAyY2FYMUNKcno0elRPWGlRWmJ4eWlhbUdTJTJGdXNMRCUyQjB4MiUyRlhrdHdvWGlkJTJCQml2SnQ1Ung2OWdkYmNuTkp6OEE0ZUZtWnh2YUZkcjVUUEg3MlNRZEpPb0tNUUo4NHdRaTUzZVppSFpsbGNWNTJzYnRPWHVhJTJGUUl1c1UzYW93VlpocjZ0MEppVnBtMngxSTVLeFhGY2YlMkJJaUlRcnI0ZDRReVMwQUpyNG4xcGdsWXV1R3pWNURyJTJGbUw1NiUyRmViSFFvaDUyVTY2cjBtVW10aUF0SGtvMHFtZVZFelNhcFJjR216MVRuc1dOQjJHeWZnTzlGaVI5TnJlY0JzQkt4b1NqNmtBbXAlMkJocmpJVnhUcURuQlR5NWI3ODBrUkhQZFolMkZuYU1XcG9ZTWQlMkZhVHNZY2IwNDVPaE5XSXlsajk4eHJ5QThXNWU2RWp6RHBDUEhxbCUyQnFpZjVJVnpyc1VlaGVReDI4bXJ3N1lrJTJGb2RZZFRCSmxud1lacVdXOElvODRpZGtrc2hHa2VqRXJXRmZmVDQ5bUp6TTZrV1JhbXhtTzhIeThJb2ZIVW5pUDl6OWpCSSUyRiUyRmVyU3NhelRRd3BQQ3Z1bTNvQUdDZmpQMzJQWmk5ZlNmVUJsbW9URVp0U1QwZnRwJTJGcWRMSFhzcCUyQmZ0UGg1dnZRMFlPWnRUbUN0S1lkeWM1bHlRSDZYV0JHbGtJdVRGb0hmeGllMFpyWnhrekhQcktONzZUWXh6dGVpa0dLTG5QbndqRENiN0VaeTY1c1lvZ00xZ09PcGxwZTBSMjZQa0U3VUlIaTdOeExmVllmOTJZRGR6QVV3U2pMdTY2b2ZzTTFNS0pQWCUyRjRScDN6Q3RQU1M2TGpGclclMkYzekpIMFN4VVhMSW93UkV5QyUyQndNOXBuQjdxSjA5VXVSb2p2eXdnNVZGekcyR1BHanZqamlHUjMxMkk5VEZWM2VjWHBnbjg4d0l1SVJraHZpRUN0cGRJdUpRT0hMdllsYTZSd0x2T00xS3o1Q2JqUWQlMkJEYjFaMEx5SEZ4ank3dHNUeFJrdUxiemVidjk1ZHdUcG1zNHJ0NFBNbEdaM2ZEbW1uN1Mxamg5SHZQeDRLanAxdFdDY0RjbVU4Y2xpRU9IbEM0SFpGNFBGOVNCdG1yQVg5SGprdFF4YXBPRDJPc2RURk5wJTJCWDlpUlNNNzMyYWJ0Mll4MTJZd0poWGklMkJPR3AxMnVGcXVBTEt3JTJCdzZ3QVdJVzhIUmdtckY3aVNwJTJCVFNNbkk1ejN1d2x6SG1mS3lxdTMzQ3k1bkdjNnozR3hjV0klMkJMZ2UzeCUyRnNmQXZpUWp4JTJCMnlzY2dYOTlNcVAwOXNCU0lLbXN2NWFyYXBPV0N0UFU4dk5IUzA3JTJGUjB0bkZMeG1tVGYlMkZ2dDFRVmMwdlQ1Zmt0WUdBN1pZaVRHNyUyRmYlMkZSSGNVWDdHazhrTmh2V0hSJTJCUnNXRjZWc2J0dXVVJTJCeUw1bWozYVdXWU9mbUFoOXJ6VTZnRTJmazUyNkJmdXljTlhmZk9tT3dKMUlhbGxMYyUyRkF1RzJwTzdBWDUwS0lubUdHcnptNTI0dmtjSmhmbFB3ZTlEWjhwZTRadjllSnpDSkZ6ZjFKNERsUlFwMDBHWHFFZlJ5M05iTno1JTJGY1drSXpsSmIwMklNZGFHemlCUG1lVFQlMkJBTU1tZUsxeXRrUjhPT2Y4JTJGQnhmdGZwekRSeUQ3UTNudjI5UXRjNCUyQjZmQ3dTZ0tXT0xrZFh3OWRrQXpWaE1abmY2VzBsR0U1aDJxOHVKTExZJTJCZ3hZd1Q3NHd3M1VDeDZDdVQyRE1GRnlHNXRlJTJGNCUyRmJrSjloJTJGMmpyZVljRUNlQkNHVmtHNlBnUWN6cVkzUzBoQ2pCZUV3RWw4RVdiWnBvQ1kwY3BpVUs3em1FbCUyRk1aVExOQk5LbDgwMVAydEk3cWVmeiUyRkglMkZ3bnZqbVhUMGpjVU85JTJGZW4ybkJnTlJqdSUyQnVaZnJDUDFhVDJZUXUyTWJsenF0SjZpTTc2WWVUeHA0ekI2cmlXeG5jYjlXZGtqeVRiMzRTZWFSZDY5bGFOdk92clhnMU5lTVNKUWJ1RlgzMzZJT1NpeWdqZURCc3Q0eXM0R011YUc1bEpncEtPcjRxZjYyVkFxSWlFS09mSzYlMkJ3Q3ZoalFsZXkwY0dyJTJCWDFlZFR2QXNPaHdKQlhDNzRnT203VUxkSkFVbmZ4aDNmTkxxRExXdVg1ejZPOUJFQmkxOTRTOU5aOVVlaiUyQlV3QzRoQlk0JTJGTDNUZEhLOUxtJTJGUnRhR3M5ZkhtdiUyRkdLJTJCSG5CdTNCSkZIWERsN296akt2WXZtbGx1SXZsOXVmSUhaSFV6ZmxyRTQlMkZvdFZhbXVCbmlpN1M1QiUyQnVSTkhPcnNMZjVHSE5rZDFEbVRHOU1SaGxOdVRhRHF4cHRld3ZIbWxwUEVOcFNhanA5Q0xkWmplMUZ2QzlTM0h1STh2VmslMkJGYjlzYkRFcGhteGhXYzE4MTg1OGhXVTBGdSUyRnZRbFhmcXpIVDhpRiUyQjZ3bktiV0swbE5iUUUxcGh2azRSQ28zbmNUV1I3M2hYd0dXdkxEZEFRMmRWWmRrQWV1WjBDTlB3SjdyMU51RjJCRHFYelJCRWVhUDlJYlpTRUNNeXF3dlVKY01abjJkbEtWd05iJTJCYzJkdmVpbnRCaTlXczkxNm5JbGQlMkJxdVNwUnU2cU85RWJnWFQ1U0VIRFFGZU00RUhhN3NLOHdES3hNYk1UNEVGaHg3N1d6Qm8lMkZFaE9jMVJnJTJGSDJxRmVpTmVFTk5vem5qemJ0UElqUiUyQkV0YjdjUWJYTVFaaGpna2tid21Obk1lc1JtS0hvOUpVMFJPVlh0RGpqSzRhZ1lOR25DMEZrSUMwMCUyQjJod2MlMkIyM0lXQXZnZmVrUHpmJTJCYjZzeTZ1cmNjJTJGN21kQ2xobVNneFVwNWtOcWFwR2NXYjRyTUJlOURlRVV3WElOVlh3QlgyaGI3dVBlMFVaUlolMkZDQ01NN1JXQ1YlMkZsMm5UTmRqbnJ2SndvSWczek8wOSUyQnI2YUk2bGg0OXhDYlU3SzBWTjQlMkZmUW9tUEMxRWdhR0diejFiTGlHcUFHNHNSa0o1ZzMycSUyQmZHYTdEUFpjU01rRmc1YmxjM3dqanppY2JYZjkxbWVVODROa01BZ0taUm02b3BJZzB4SG1hODU5b0JZMXZsVEFERmpJbUFpU1hqVHlPbzhMRVRRNTkzdnVJJTJCZFc5d0pINTRtaVFLWkJ1V1QlMkJwaEVPJTJGM2VlJTJCeXQwRXdqbTBiSUw1Z1BkeUdmd29VNmtGaGdNcjhLTExySFlrM3JaN3ZDRHljb2x0bU5aZSUyQk1lZ2I3WSUyQiUyQnMyRzIyWVBKbTB4VHV0SEVoODNEUHM0Q2JmNzV6UXEybGs3QyUyRmNFcThsd241aWMlMkJqZElwcnY4emY3VXglMkZvTHBsT0FGdFliOWh4Q2ZvMllxYWwydENuZ2VDMnZ6RUxjRzJMcDRHM292RTNOZk53M1VMRWNSdGZ4V25FbHJNVlNoU0R5c2NxcGV6R2N3d3VLZWklMkJwYlgxbWVQUmFjM1ZqVDdzVWoxeFAlMkJSV0VwN1lJeUtIbUtEZmhhMkc2dWdlcXd4a25jQnhjZmFXYms0M3ZVbmZLZWtNOTRoS1dMU0ZsQW9ObFNoWjVqMHdiWWkwQ3RkaHJsT1pxTG1JRm9WNXZ6R0ZPM2cyJTJCcGk1V3dkV3Z4QjdJdmpESHlybXZCcEp6OWV5TkpyODR5MXdvSHlCanZnU0xDV29mQWJyTGprQW1ISiUyRnA3Tk1WMjVpSU5zVHNkVUFsYVhtb2slMkZQZHJPUUIzdWJxeEYlMkZtdVhyWWRJdmoyNVBsRzRxazZZTFlpRnRnSjlac3MzUWMybm1OSGhKUjJ0c0JzMkJLRGViZlpxc1JCWTYyVlhvS0dTdXBjWllKODVZcG5IaEgwaGc3QWtPdVdpJTJCUkVQUEhNRmIlMkZScXA3Z0k3MyUyQkpnNlJzYzhpWGQlMkZucnk4S2VMMjNuWkk1TEhjdmtyUFV1JTJCN2RrNXBEZFdJSkZtUlREcU5XZjd5MDEydDhBSUFOODVPUm9nYTIyTTlCMVByTG5SV1ltT1dDcllZalNqeldERFp5ajA0dElObUFudElPJTJCOUdEaFlpQ2hlcE1jZm5HQVQ0OCUyQjJyQTd1cU9pZWpnT2FYVHNKZFBTelJvUzA3WVZ4dkVQa29ZakxHdE50cTBHMGVuR2JzSFdMUnRJUjZsellCd2s5bmQ0Wk1xZXBnJTJGc2l5akJCTiUyRnZmU0VjQ2p5dHl6Q2pKOUMwUmFJdHhPeThTODV1Q1NqZFVmUERTakdRbCUyRjVFZW9hY0hnaGRUMmJQTmZPdXdTRWdvNkclMkY5dlhQd2x4aFVsc1Fvd1VzelRnU1cyeEklMkZmRVpuRVdjNzFlTkczNktlZkpjZXJRUTdkakFvdFlQblhsM000YmFFYzV2UUZ1UUp5TWpFSHFSbkVXbWhPazUxWmpieU9nZmQlMkZlSkNmRnhIN2JmZWFCTkQ3aG9ZQlV1NzhVSk5YJTJCTFc5c3pON3drT1U3Z0tzUmlIUmFvdnNzODZINW1rTFUxVG0lMkJzUWh3eHB6QXlORkJuY0FGNEt6dW1ZeXBmZ3FEMzYlMkZrcnp0N1AzVXcxJTJGblVGbnVlMXZmbm50UmRVckhBT0x3bjA5RjUlMkJCZ3VLaTRrWXdZbjVhcElaMiUyQk05Q1hNaVJqbk8wbldGWW9hNGNBMjdXRHglMkYwZXdZdk5hMVNOJTJCTDFSa3U1WmZVRzUxblo3bUw4MlMxSnd1JTJCVTI5YzNlUzZBaUFKU0h6ejY2OWpTZEh0Nml4SlZzWmV4R0VpWUlVbVolMkJMSFhueDZkejM5b2VqR3VWM3B4YlNtVng3Y2QwWWN3aVdIUXl0bWJhOGdnTFdzcDRmUWhaZnRNdlEzUDhXTUh2bnJ4Q3pPNGhEZ1VZRWZ0TiUyRlN2cE1MejNiaG5IaEhQUnV5c0toa1Y4OWR0cHd2YXBzeGFQVDZCYUdMMGZZYkxiOW9PSzgwQmdXVEx1QXd3RnlMaDNRNkVYdTYySFRlcmM4Y2NvSyUyRjduRHlMcnlmMmU0bXVrMTQzSElzZDFyWWFmJTJGa0Y4aSUyRmxiVWtoTUNiQ2JxMzJBb2k5ZVgyUGFUOEpxMDl2dDh3djZRM3BZV2t5SHIza3IlMkJiZTRGM0JvTVNjWG55d2E5bCUyQkR1SnZQZG13SlVDbDN5aGlrUlVLdXIxMGJMdGxiYnZmYjdVT0clMkZoaThLR2pPZ2slMkZrNjBzQmZIeTlKMUZEbG9CbCUyQnhRTDRvZiUyRjRmdjNTR0VjWTdMUnZ4c1JuWkVXJTJGJTJCeFc2ekpVRFlOWVZrbDNXT3VHdHRIdUJ5aDMxWmt0dFFXQ2c1c2laem9UR0xaVGxONUxhYVZLdzgxYVZJQ2Y0WVhRUUdTcWRMOEFhWFpLSzBod1ZZYVNEbW9IYVg3c2k2RGg5JTJGOHFnNTJFbGtLSm9NJTJGTXhYalpPanZhU0VDUXM4R3AlMkYzaDclMkJsY3Q5V2NqODJ5VFByNSUyRkFoVUdGYkdoUzdmdVBhWSUyRjRPbVdKZmNPSEZmeGVKZHVDZ3lKeXdkQjdUSlVPMCUyRjdyJTJGQTFSQkppayUyRm1JN2tYd3VkWGY5Y20lMkZwS3FuQm5uNHo5dyUyQnZTUDBsZjh1QW5NZiUyRmhUUHY3UnIlMkZab1JSVnMzbiUyRndrYXVkZ0I2V2ZIeTQ1WU5WSFVmQ3lEMyUyQnZIclclMkYlMkZaWWN4OXZUbjZwakRXOEU1aXZIaGYxOVdvVE8lMkYwdTBLbjRsOWV3eG5zd2h5eVhaeFclMkZYWFQybyUyRlVEQjdud0pZRDl5WU5EeSUyQkgwUzh4Ukh0WFhwZHZkYkhVRFNJT3F5ZFpnRndLeDRZMWtGenBnRTJJc0Q0NUFsNjY1cSUyQktubTJhbnNmSkF1YXNxWFNodjN1Q0xZN3ZIa3lqeXlpUG91YklvQ3M3NFZIV1pHNkoxRDNZbFNVcVJEJTJCdXRFNTRma1l0cXplbGVqakp3TnJjJTJGSzA5UnR6MTR4SVlJRlIlMkJFV0lJMmxnZXl4VU85VWxaM1hSUkVZUmgyY2R2MSUyQlVxejVqZ1ZRcmtMbkNXaUFqNzUzSzV1dmhNNzljdHdCWnlKTGtLMVhYS3F3SFBLUSUyQktXQVVlSzJhQSUyQjZqQTN1MldEY081d1dwbTRieE5CRWFneTl0UWlxTGVyd2ZEdTVHY0pBWlFlRiUyRmVmVGo3VlBlZHdONlIycndhV2pudDJNRHd1cVJ5SjBjYmpPTmlrdkYlMkZVdjZYVzJCOFRVdWhvMmNMYkxYb1NRJTJGalY0Tjh1dVVwJTJGRlhxYyUyQmg5S3cxM2lzTlJCJTJGMFJEdkZ5ZlFaS2Z6OXk1NG5QQkVVaVZLYnVOY05wRG9idSUyQnNaeHlzZ2hvZ3Z1ME55aGpSanlwTXBSR0RMSHB1bkVlUCUyQiUyRnlDU0dySTdzVUtoak1GNHkxTjFvaXBBalpQNEUlMkZVOFFiVXVoV1JLVlRoJTJGR3FvblNkVE8zN3FpVUlyMnR0OUR5WXlXZkVMMjlpZkg3aVhwOGxOcXl2WTNlMVVIYnVXdCUyRkk5VnM5dUxyQVFmOG9PampPMGlQeW4wNlVCNjljSXhqWXVjV1NQc3lxOW9ZcGNQdzhTVlo0dWtHUkNaOVhhbFZuWmdWdWNxcVBCa0lSJTJCQnpDZWZ5Yno2ekpRbWJOMGRCNWJLeldZdnVVQVhiMHAlMkJiWXNYallwM2I0SHNqWGlkYyUyRk5ldGJmaU53aU9TUlpRNzdFRVFIeWlLWEFFJTJCTzBEcUJ6a0J0VFFTU0dZaSUyRmpXdmswNE1pRjJqZVElMkJPNmNMa1BkODJ2bGZpRGFtTWNCMzZNTzVSJTJCcmpOYTVzZjF1Z3FOdHFhTk9xREdMY0pZMFgxUmczTUVoeml1c3o2b0ZmazAxSVZNTTc1NDVFUURBRlFBOHVBTFhtM3owZXA0WWp4eWRrT0hLdTR5RTlUcGF1TWFmZENQQW5laUV2Z3JQZkNxclIwVFhYSk5oNUdGdG5Ub3FyNkkzRkk0anJPZUQxNjRoamJBcUE4RzBrNTJvQ0JiR2ZSTWc0RVVwa3ZOOEpJYWZPV2NPd1dUTlV2Z1hNS21oWnJJOVlRNU5XVGtTUnpQcFVFUTZiUHE1NHlCRFV0dlJMSyUyQkdLaGRnRkpydGw3TG5Qc2Vmb21ocXp0UEVwbkowcDZjTkR1QlJrZEpyUHF4eGpIZHlqTndMUGlTYTJnNUJOOUNJZ3RsOENpM2ROaGplUDNEMmo2dkNXeUFCbjBXMG5Ua3dXUjVwSGVZSmpWVWVOWVolMkJ5S1R0bk5ENkg3TzBQaUJHUElPY3pISjlXMWJRRXF6V25Eb3pveUFURjNjakVGN0hHN2xKVTR5TURxYnpka0w0VDFTWDY5STQ2dGJlUWhKRGclMkJyTTlVdEJXMkMlMkZPd1g5NTliZWwza3NYaGZBMFUzcnlRV21LMWNWTlExd0k3bFpGa1huV25mbEpaakxWc1doekR5ZWF1WXVtVGpEc2dOeFZSTU5pTTVrTkdFeWVUaHdKa2VJQXZCcG11V3gxbEFuOVZGU1c1UlcybTRGMGZRckh1b1AxJTJCMUNtNU9Jb0I4STdxempJWW5WNXpqZHNleWNjQ2FiNVQlMkZCbFZ3RUtSZU9NbGFYVHJORVpMUm1MeiUyRjRqWlNHNlcwUncwYW5lQmR3V1IzVDJoYk55dGdMdkNnNjJiTVpnSXdKeU96THBvb1lhOFh2T01pJTJCOGNkQjV2b3UzUjJUU2tKRHliOEhxT243MDQ3NDQ5Z2haRUgxNXJhdEdHQ0lsbFpFSSUyRnMxRXFraW80aG1nYW10eXh5OGlwMmUlMkJzNmlicXBSb2ZoTVZtSHZ5amtuVSUyQkpaaHhuRXlWbmVDJTJCWkljSmpiMTRoJTJCVE9OeW9UaG1meGtENlMyeURkQVYlMkZnSTJzZHN4ZWlTb2FXcFV0ZGJjc0lYT0lhWWFJcFBicWVlckR0am11Y0ttYVQlMkZNWXZ3bW9WREoydVpudGtwSXV3WTclMkIlMkZZYndHTnhBUHdMOUVJU2pkZjZUaDVtQTB5a3klMkJJOFJqbTJMekhFdmhpTGVEYWUlMkZUeGR4d3Y3MTM3bnRIUHElMkJCcThKb0pFOUJySVh4bXVxUkdiNE1tMGppaWc0WDFXeEltNiUyQkFQQnBDTyUyRnBqWU1IWEw5THElMkZQMzI2c004YWxkZzVPTG5LJTJCakttMjVOJTJGVTNTY3dYckJyViUyRmlyVWptVUVKJTJCUmFiZFpqRENuMW4yaWZLdk16QUVkczVra1V5UXF4WE9lSiUyRkg3d3NGVWppZGp2TkdoVGl2VFJTMGMyYmc3aDNiU2w1aWZpVHYlMkJBS05LelFJbE8wYmJQcU5VM1o2aUtuNHg1SHc4QTVZZkdOZWglMkJGb2slMkZXSFJ0JTJCNXo2TVZ5SXZ1VUlua004OEZQOVlvdlNISm1nZm1VOTJ5Z1pZSlpnU2pCTFBVMWFXdzNTQVppdmkzOCUyQkpkRGMlMkZmUHJOUkR5WXhBUm10dHBQQ0pNcWlOQzRubzZXWVRHWG9JaEhvRkhrNFRYenRwZkNnVDlES1UxelhwRjl2RDdnR2RHelIxMUtQWkpsb0VaWmRwekpsVGI1eTJ4dENtREpGNG1QcFJBS3l4RFkxcmNyWHo1OXRRR0RwbU9TN08zNnRzJTJGZlhPdCUyQmZGJTJCM0dZeVJsYU54ckRyS0VKWDVGWDZXQ0JUdHhRRUdJTHlhenhMTXRuVkxnb211ckpzMnIyZU1ZcEkxdno2OFdkczdFV3dobjNLZmVlaVptN0xOem5MazJoQUZhWjF5eTAxb25kZXV0dDVnTjQwMGRRNSUyQkdaOHliUVFSZ21vUlYlMkI5SyUyRmkzZWlHUEdpYSUyQm92cmh3UUQzVUVwVDJZYkR2Z1hvZFAxRW5TdGJyQyUyRkJITjZsJTJCZzRMVmk1VGVyTzE3UDdCaldqVGVIa1dmU0RmN0xadlpCSVJ1Q1pGZkthOUlQOFclMkZHRWVUVFNVVThtZ3ZkclRtVlpURzFzeWcycDRjcUxSZXpQQ1ljNWtjVW0zZ0xUQVhUM3pyZTNPQ1VEUlpLRkJOZzJYVW0lMkJ6RDQ4ckk1OU9JRlowbmphTUdUUGNJdml4bWlPcDdNdUNpcW44TnolMkYzU0pjWjF6JTJCRG9IJTJCNjVjUjAzM2xzUW8lMkJ4SVl2UERDeDNCWEZFaUNrTW5YZHBYejNRdVdXUmZuUlFudnZOYyUyRlhPM0NDdGQxNjkwMFFUJTJGWE1RVlkxVnlZSzVjWHY3ZCUyQjdhdDRJYTRrRnZaNGN4dzFxViUyQnB5JTJCSEJCZXhjN1RmYXNDd2Q5ZExKN1gzbkJ0QUtleFlYWmxsa0VEYSUyRk1NdVhEYnd2TG5hZmVPRE9iNWVqbmRqUFklMkIzOWl6ZDlLa2dxYTVjOFBUZ2VQSG9IZzc3Y1V6S3RoUmZzZ2VlOCUyQnczc2t5cXUlMkZNdWFEMWNrcjIlMkZsWSUyQkk2Mnk5a0tQTCUyRmhBd0pnRGJSRDg4cDdyQnZWVHkxOXlhaUh2cHdidk5yVHNWcGYxd2lCMndqV1AlMkJBcmY3YkticjZscmJKWTBSUndCJTJCT1g4TzdNTyUyQnUlMkZkMlhxd2ZuR3FmYmptTGtvaHdPUXIzb21uJTJGMzVOODklMkJYZFAlMkZpJTJGMnBCaEhsdFYxVThlUkM3bVZpUXlOMmpDSjZQWUFzVG1pS1BwRnpDMU8wek5HVnklMkIlMkZBZG5wYVIybUxuN2xjYjhRRWFNRkFuNzklMkJOJTJCRTg1SEpOSWxDY2lNTGFJSm1ZdzFDd08zQ0IxbTdtaUw4WThnTlBGdE84NUQ5OVdYaEx1a2NxJTJGYlRsY2JId20zJTJGUWVEcmVlY0o4YnlrVFlxQ3ZoU1FSenloaW80JTJGbXA4V0dVUTJxMFlSJTJCdnFnOU9XTGtoZERDSCUyRnQ1VFc4Ujc1ZVcwN2pKNlRoJTJCSW44UFAxJTJCU1pndVNSQ2dQcSUyRjkxVnJnTnZ5U2pNRUlSNW1OSlAxWGEwN2d2ZXNJRzlTYU4zJTJCMTVybEFKYTZIS1BNem5KMWh0OSUyRnRyemFqZHNrem9iZGQwJTJGajdxendEWlhBSWNlaHZnbFRpOFZmN3o2dmhnVDJQU2xqJTJCJTJCVjFvMXdxdHI1V1FoUjh5d2YzZyUyRjVyaHlRMFh4cSUyRjAlMkZjOHZyOXJjU2QlMkJrNFBCRDdYS0FQMWJrQXpuWktYViUyRmZ0bHVQT1FHempVekx5QmglMkZya2VzR29VbnhFRWxNNEI1bEhxUyUyQlBnejl0eGpkZVRGQUpGJTJCUyUyRjg5bDlRcEwyRVpHUzlnRnk2d3ljWWh2NDg2cU14YmVlZlJ4aDhoTiUyRiUyRkN4ZWFYVXE3SnAxaFBsemsxd3NZZEx5Q3J4eSUyRiUyRnY3MTUxYUFPZjk1Uk9ENCUyRjRDMTd1SGpQQzAlMkIlMkJhJTJCT2NlWlgyMmo2ZWZENXEzY291JTJGOThFMEtFWFVqciUyQnE4JTJGeDY2YiUyRiUyRmlmTkVaOXJuVWdoUzMlMkJKdyUzRCUzRCUzQyUyRmRpYWdyYW0lM0UlM0MlMkZteGZpbGUlM0XUyG6AAAAgAElEQVR4XuydBXRVRxPHf0mIAEkIluDubsHdpVDcihSX0lJatBR3imuhuLtroUhwdw8EDRYgBIkn35n7CKR8lLyXvJe8B7vn5NAmK7P/nXvvf2dnZq3Cw8PDUUUhoBBQCHyhCDx9+pQrV67g6emJl5cX9+7dw9vbG/n98+fPefXqFW/fviU4OJj27dsze/bsLxQJNS2FgEJAIfD1INCxY0fmzJmDra0tCRIkwMnJiSRJkpA8eXJSpUpF2rRpyZgxI1myZCFnzpza77+0YqVI/pe2pGo+CoGvF4HAwEAOHjzI0aNHOXHiBGfOnMHPz49cuXKRLVs27YWePn167QXv6uqqvfCdnZ21D4B8CFRRCCgEFAIKgS8LATHgiCFHvgVi2Hny5Ilm6Llz545m+Ll+/TqXL1/WvgUFCxbE3d2d4sWLU7p0aezt7S0aDEXyLXr5lPAKAYXAuXPn2LFjB7t372bv3r3ay7lkyZIUK1aMQoUKacReFYWAQkAhoBBQCHwOASH8p0+f5tixYxw+fFgzFlWoUIHKlStTvXp18ufPb3EAKpJvcUumBFYIKATOnj3L2rVrWb9+Pf7+/tSsWZNq1apRqVIl4sePrwBSCCgEFAIKAYVAjBCQb8s///zDzp072bZtm/ZtqVevHg0aNKBAgQIx6ju2GiuSH1tIq3EUAgqBGCEgx60LFy5kyZIlPHjwgMaNG2svW7HYq6IQUAgoBBQCCgFTIiAWfjEurVq1itSpU9OiRQtat26tuXuaa1Ek31xXRsmlEFAIaAhcu3aNWbNm8ddff2kWe3mpyr+qKAQUAgoBhYBCIC4QEMu+GJ3k3w4dOtCpUyeyZ88eF6J8dkxF8s1uSZRACgGFgCAgQbOTJk1iw4YN/PDDD9pLVIJmVVEIKAQUAgoBhYA5ICDBu2KEmj59OnXr1uXnn3/WgnfNpSiSby4roeRQCCgENASuXr3KmDFj2Lp1K7/88ov20nRwcFDoKAQUAgoBhYBCwCwRCAgI0IxSEyZMoFatWvTp04ccOXLEuayK5Mf5EigBFAIKAUHgzZs3DBkyhClTptC/f3/69u2r0loq1VAIKAQUAgoBi0FA0nWOHj2aESNG8NNPPzFo0CASJkwYZ/Irkh9n0KuBFQIKgQgE5s+fz++//06dOnUYPHgwbm5uChyFgEJAIaAQUAhYJAKPHz/WvmWbNm1i+PDhtGnTJk7moUh+nMCuBlUIKAQEgRs3btCrVy/t9tlRo0ZRtmxZBYxCQCGgEFAIKAS+CAQ8PDzo16+fdpvuH3/8QdasWWN1XorkxyrcajCFgEIgAgEJVhJ/eznOFNccVRQCCgGFgEJAIfAlIiAuPOKOKn77kkQitooi+bGFtBpHIaAQ0BB4/fo1Xbt21az406ZNo3DhwgoZhYBCQCGgEFAIfNEInDp1im7dumnW/BkzZuDo6Gjy+SqSb3KI1QAKAYVABAKHDh3ScgrLFeGShUAVhYBCQCGgEFAIfE0ISNa4HTt2aHe/lCpVyqRTVyTfpPCqzhUCCoEIBObNm0fHjh2RINuWLVsqYBQCCgGFgEJAIfBVIrB48WItGHf27Nm0bdvWZBgokm8yaFXHCgGFQAQC4ne/cuVK5MXm7u6ugFEIKAQUAgoBhcBXjcCJEyc0g1eTJk00f31TFEXyTYGq6lMhoBB4j4AEGd28eZMVK1aQLFkyhYxCQCGgEFAIKAQUAoCPjw9NmzYlc+bM2s25xi6K5BsbUdWfQkAh8B4BsVBYWVlpBF8VhYBCQCGgEFAIKAT+HwEh+uHh4dqJtzGLIvnGRFP1pRBQCLxH4Ntvv9VyA8+ZM0ehohBQCCgEFAIKAYXAZxBo3769dmfMxo0bjYaTIvlGg1J1pBBQCEQgIAQ/ZcqU/PnnnwoUhYBCQCGgEFAIKAT0QKBz5848fPjQaERfkXw9QFdVFAIKAf0REBcdJycngyz4wcHB+g+gaioEFAIKAYWAQsCMELCxscHa2tooEolF/9WrV0Zx3VEk3yhLojpRCCgEBAEJsn358qXBPviK5Cv9UQgoBBQCCgFLRcCYJF8wEB/9RIkSxTgYV5F8S9UoJbdCwMwQkDSZctnV7t27DZZMkXyDIVMNFAIKAYWAQsBMEDA2yZdpVa5cWbssKybpNRXJNxMFUWIoBCwZAbnoauzYsRw8eDBaaTIVybfk1VeyKwQUAgqBrxsBU5B8Sa9ZunRpevfuHe0LsxTJ/7r1Us1eIRBjBMR6X65cOY4cORLti64UyY/xMqgOFAIKAYWAQiCOEDAFyZepyIVZJUqUYP/+/ZpV39CiSL6hiKn6CgGFwHsEXr9+TdGiRenXr592c190iyL50UVOtVMIKAQUAgqBuEbAVCRf5iU3xY8aNYrjx4/j6Oho0FQVyTcILlVZIaAQiIxAq1atNPecCRMmxAgYRfJjBJ9qrBBQCCgEFAJxiIApSb5M65dfftFux120aJFBs1Qk3yC4VGWFgEIgAgG5gnvBggWam05MiyL5MUVQtVcIKAQUAgqBuELA1CRf5iVuO99//72WxU7foki+vkipegoBhcB7BG7cuEG+fPm0QNvChQvHGBlF8mMMoepAIaAQUAgoBOIIgdgg+adOndICcc+fP0/WrFn1mqki+XrBpCopBBQCkRGoW7cuxYsXp2/fvkYBRpF8o8CoOlEIKAQUAgqBOEAgNki+TGv06NEcPXqUDRs26DVLRfL1gklVUggoBCIQmD9/vnabrWTVMVYxhOT7XNnHjyPWMHD6eHImssfv2S3G9BtE3u9+p2m57Dz1Os/ytVtIkaccDauXQt87CAN8vVm1Yg2+jhlo1bQOLtZvWTB6IHcTleSn1rVwcbTnzOYJTPwnkHmT+hHPWJM3w37CQoK4cOwf/tp8it5DfiedXQhHNi1j63Fv6rRoRdGcqXju+Te9x29mxKjhuLkkivNZhAX7s23LJs4+fEu79m1IaRvKjpkj2XIvKb26f0f6FInw9dxAiz5rWL92CbYGSBwS+Iw6dVszdPUWijiG88jzInMnTMWlfAd+aOxOiN81pv25gDzFalKxXBm9dc4AEcyoaiD9evWhYPNu1M2fGTtrKxb+3Jh9zo2YO6QR1qG+HNy9iiOX/OnUrRvO9jZmJPunRAll+8bVnPV+Q5OmzcmUOD6HF49j1rEwRgzpQpqkToQ8P0SbzlMYMHUB2dzi6z2fsLC3dG/Xhiq/TqJOnpQ8uXmRpQuWYp+7Gh2blCde4F0m/TGVDMXqUq18CeLb6fu20lsEM6oYzITffiIkbxN6NilHeFgwexaPY52nM+OGdSNh+Cv2blnFsWv+NG7TjkzJ9cfZHCYZWyRf5ipZduRW3DZt2kQ5dUXyo4RIVVAIKAQiEHjz5g3ZsmVj+fLllC1b1mjAGELyg59dpErpujSevpeuFVJz3WMZFRr3plrHIcwd1p7ze9cxY8FayjX/lebV9HclCgt8zsLJ49h/x5Yev/Ulm7UXLVq05UJwJnavmkq6FImZ2Kog/7h0ZPOUH7Ay2uzNsKOwEC4d/Ztfek/k+8mraJQznMmjhzFpmQfjJo2n8TflODKrK7+uC2Lj8om4JnGO80mEhwWxZdFf/LXtCp0HjaRaZujapjXrD91m187V5M+ZmQMjq9NsQ1ruHZ9j0PqFh76lYspkZJ9yjT+bpObEwT3U/bYptVp1489Jg3l4YB59x63n2459aVjL8DR3cQ6eQQKE0a9hdc7n6saSvjVJnMCKvG4u+LsW5vzpPVj5XGHeHwM5Z12aKWN+xsHG3J+UcP5eOos/d1yhTfde1C6Sit5tGjF3y0k2791P8dwZuLmkExXHPObE8XWkSKA/EQ8LC+bH6sXwrTKaJb2qcOrALrq2bUeemm0ZO3IA1jc20em3BdT/sR8NqhXXNkxfbglndq+mzPXOhcfC3yHYnx/ruOPxIiVL1m8lj4M3syZN4Gp4ZoYM+BnX+PrjbA6YxSbJ9/DwoFmzZly/fp2ECRN+dvqK5JuDdigZFAIWgoBcyvHq1StmzpxpVIkNIfmEBzG0ZVkuZfyJ5YMbs2ZcO8Zuf0pSp5TMXD6FK2v/ZPW+O3TpP4gkr6+yctlqstRqzzcl8+D4WfNtGIc2zGHexhPU79CTTP4HGTJnB0cPeTJ1+xZqZg6jSpbc1JxzkXZ5A5k9dRJ33zqQr1h5WjWrjYV9k6JYv3Ceel1g0oBfCC/6C73qJGPy9NksXH2atv370aVlTSa1qM2dwj2Z1rkwHRt3Y+Wu1UbVCcM7C8fr8DrGTVlK7jo9aFkyiM6/LeDg3wf4be0WWpXMRJcCGfDvtpclbTNycPta9py/Rdm2g6iSJqrRwtjcozB9L9flwqbueGyeTqMf1lKpujv9p83k+l9DWXwhnJ97/wRnl7PzxDUePg9m8JSJZHC2LItkVEjI3y8s+JHG0/3YvW0qqYN24FxgInlcfem74wy5npxixLDxZGs5lHxvTxIUbkvN777DnA36j09vYeDYJeSr3YGONV1o2nosN88fod6kNfSrXYThtfNxpOBINg+ozPmDO/n75DVyVW5J/SKpPw9XeBh7J7ej+yZHDmwbxonNC/mh52wK16xCl54DCNw/hykevvTu1Y0Ed3axYvdZnj57TdcR43FPE/enY/rogiF1vHZOovxPW9h1cgdp/I5SolRHMhTKSLnWw2iUOZBJ0+fgVrwxZZL68NQmFSXLVSBZAkNGiLu6sUnyZZZdunTByclJu4Tyc0WR/LjTCTWyQsCiELh69SoFChTgzp07uLm5GVV2g0g+cGBxf9rMf8Tl7eP56ZvSpG01Ee/Nf1C45WD8z2/lUoArI/p9j9flm5zevpIkFVpQuWhenO0+L7b3FQ9GTVtMoQp14eJGPJOUJ/DQbIJL96NPqWBy1BzISc8jhJzfy27vJNTLBYePnyJjlU4UTW1ZlqeoFjDg1WPWLBrPnuv2dKmSiVV7LxOPEF5YJ6dlk28Y1KsXXSdMoXaOJFQr1pA95/dH1aXJ/x7w4gbjJ47HLmkBijjcZsWD9GR8soNDCesxo3s53IuUZbPXXfKE+7F17SaSJXyIn3tv6qSPWjTfWyvIX3UkZ/avZe7gn7icogEpH+3BpVofAs6twj95en5u/x3jZu6ldZ3CuD1eyvi3nRld1bC81lFLEvc1wl8dp0jJVqzctpfLY2sxxmkMnfxGsCJJX36vYsv46bPpP2Uet9bOJyDcloYdO+Fgxv5t4YH3GTlkELau7pRK/JiJZxJRx+kUE2/mZd/cn6iUKzW9Nl6kVkZHTu/ZQoDVWx65VKRV2YxRb5Zv7aROvb5MmjefExtmc+ZNKlzDHpO8RFPCr27gcYKMdGvfmr9mrKB1q9qkfP0PnbelY/GvpeN+oY0sQfjba1QpUoXv5x4ly/VpdN8WTq/S4SzzdKNn3SwsWrKa2l2G4nRrJ942aalQvSZunzdUG1nC6HcX2yT/8ePHpE+fnrNnz5IjR47/FFyR/OivqWqpEPiqEBD/v0yZMjFgwACjz9tQkv/25h6yle7Bos2j6NFxJmu2z2LvXyPZf8+BrInf4JyrDr+0qqHJuX/ReHwzV6WCe9QkP+TlPUYOG8ebREm4efkujTp0IMPrw/RZfJ+eFd7Sd5sDF7dM0voNDwvh6ukjHD1xnlrtfsA1ig2E0UEzcYfhwQHs2byGCUu2krdYEQiE1g0KMHHKWkrnd2LpjttMnfgH2VLHp2JR8yD5IvOMiRM45xfCm0ePKF29BrXzW9O04xym9HWnTve/uX91n4Zc8EtvLnks5W6+XnqR/KC3fuTJno8fl6zj7yF9Gbd2PoeXzOD81QckTGBD+lxV6dCmiU43QkPYNqI1Lp1mUcrtyyP5MsdaJQpReeJqltcrwthz98jkvYHG7WczqE9VFm/wZvb8aeyYO8MiSL7MZ86EEZx+bc+b66fIX7MpbSq6UrniTyxe2ZNK5fpy8s4NUjvFgzfenD+xh7PWpfQg+fDyuQ8dmtQiXf1fCTi2lga9BnBr5wq8nvjyzMeXvKXr0/q7eiS0tSI8PIzjf/3ChdzdaV8qqg2EiV8AJup+aKsyeObrSfqTI4jfYBIN0j+h/4AZFK9VmavnH9J37Fi8d89TJF8P/IcNG8atW7eQOLn/Kork6wGkqqIQ+NoROHPmDNWqVePBgwfY2hoSsqgfcoaSfML8aF08B94pi2KXrhKbx7fBY9MyuvafQpEy7jTqNITa7ukMJvniCrRu+nAWbtjNI2d3Jg/6kQLJ31C29k+kd7lPsoaLmNmllPYxfnz7Cjt27sataANqFIrS30M/IMyqVhjXj+9mxG+9uJCgKC1q1+SnZqXo9Ut/vG964J/nB6b+1ob0iQLMhuRDGEdWTmPKyq0ce+bK5N86U7NcHmpXrkWWlM846TqAw9ObR4vkh4UEMrJONpYnakiagEB2rJ3MumWLWDB5APZ569KwRXeaVsxKeGggF/evZP4lV4Z3rEwCezM2YcdA39b0qMpo33xc37KXxw9P4vvoHs0qFSJbiWJY5ejCjD7fsP7PaRZD8i9sns2o1Xs5cPoRUyeMpFbl4rSv5o6Lmy0bntfmxtbfsBOXeQNJfuBrXxYOaMGYC464J3NhyrwZnNg4n9ULp3DdtjA///wzDSvlwyosGK9T2xiz6RmTBrclvrnHK0dTd44v6k+TeZdwe+jJ1H1nSBNwi9G/tOT029QULPctY3/7nuOrZimSrwe+8t1MnTo1O3fupGDBgp9soUi+HkCqKgqBrx2B1q1bkzNnTqOlzPwYT4NJPrByaFO+H7uT0WtO0b16Bm6c2cePrVpjX7Q5EyeOJpOzLojNEEu+1L/isZLevw3Fzr0jY/q0J4ubNUOaVmD8roesveBFlVRWvHx6j7VLF/AyWXG+q14YRydnEjgYf/MT13r30vsKM0b8zNIrSRg6ahz1i7mxcWJvfp+4lAbj1tGjbkkShflQvvC3LN+9FqyscXB0IbGjfZyJ/vLmfn7/vT8HAkozfWRPSuVIzMredflp7kGG7LlD5wK6IGFDLfmEh3FlQ2+Kd91AqyGrmdqxADc81jO4Zzfelu7G8AF9ye1ixfWjGxiz9iY/t/0WV1c33JLFfVCyKRYj6PICslb6hST15nBqRn0Cnj9kZpcqzPZKzoh5a2mYJwlrZk7Dx8+f6s1bYO8Qn6QuTtjZmid7DXl8kl9+6cPWh9lZPO13SuRKhceEttQduIKuK84x4pt3eckNJPnhoQFc2TWNiu1mU7f7OKb3roP3iW0M6N+XB+lqM2ZATwqlT8y987sZNHMvP/7UjpRJk+Dq6vJFZmkKfuBBjrw1sS/VjdObRxP24iFLxvdi7Na79BkxiQ41C+GxahY33jpTtEx5kjnZkSyJC7bxzFNvIp6t2HbXiRhXUmqKK61cTPmpoki+Kd5+qk+FwBeEwLVr1yhatCjiA+jg4GCSmUWH5N8/vo62QzcwbcUisjmC76PbrFo8l9dJC9O9bV0iPglnd6zkdRp3CubIREI9jKqvHl5j6qwlpC5WgwZVSuIYD05vnsqw9fdZMW8MdmEh3D5/gJHTVpOvQG6ck6TEvUQZcmVMbhJs4rLTYP+X/L11HYe8gujSrRNp48OjS7sYN2cTzTt0o2Cu7FgF+dKlWSdylCmJjV18MhetSY0icXeyERL4gjlz5/MmSU5a1atBcnt4fnk9XUesZ+bc2SR+p8Mhb3zwOreLJ5maUSqFfig/fXSFH3qPo9fkubgnhmDfm6xatoCgRLlp+V1TbEKDGTVwICRwIlGiRNgmLUDHZl9qth0/mjf5ngbDF9IgqxOEvOXakaVMX3uNgSNHkSyBLfs2ruXAwSM4pkpLvOQ5qV+9JKmTmav7UgBzpk/lmVM2WtWtTkpne0IfedC4/WhGzltDdtd3EaABz7h59RSeVnmplj+lHooTzsMH1xg1cjI1uw6ieu4UhL+5x5L5c3njlJMmjeqTOIEtf47oy+NwF5ImcsLaKQvtWlfD/otMtuNP/7ZNcK7+G30aF4cQf84d2sKiTadp2b0vBdIl4tzejew+cp5QO2cckqSlWf2qJHcxV73RqUBckfyAgAAtRu748eNkz579//RRkXw9HlFVRSHwNSPwyy+/aOR+5MiRJoMhOiTfZMKojhUCCgGFgEJAIWAAAnFF8kXE3377DSH7EyZMUCTfgDVTVRUCXz0Cb9++1awEFy9e1CL5TVUUyTcVsqpfhYBCQCGgEDA1AnFJ8iXjXZ48eXjy5Anx4/87Za+y5Jt65VX/CgELRkDy4e/bt4+VK1eadBaK5JsUXtW5QkAhoBBQCJgQgbgk+TKtJk2aUL58eS1/fuSiSL4JF111rRCwdATk+uz+/ftTs2ZNk05FkXyTwqs6VwgoBBQCCgETIhDXJH/btm2MGDGCQ4cOKZJvwnWO1a7Xrl3LmDFjuHDhgpbWsESJEkjeVAmSlNK3b1/t7ydOnKBIkSLa716/fq3dkvbtt9+SLFky5s6d+0mZ//jjD+33vXr1ev93uT5ZLkOaOHEi7u7uMe6/Z8+e7/vOkiULN2/e/KQsIr/sUN+8efN/f//111/p2rUruXPnpmLFimzdulWrc+rUKU3Gtm3bav//uXlGliNWF9DMB5NLNurWrcvt27dNLqki+SaHWA2gEFAIKAQUAiZCIK5JvkwrQ4YMbNiwQeNpEUVZ8k204KbuVi4/EAIrOVIbNmyI+E4vXbqU0NBQDhw4oBcJF3J8/vx5TVSx1qZIkYIff/xR+38h1eKmISS/ffv2WtT206dPmTFjBkL2b9y4oe0aP7eJiKr/iI1HBAl/8eIF9+/fZ/LkyVSoUOG99bhly5ZkzpyZpEmTvpcvAl8h8uXKlWP48OHaJU1C8sXqXKZMGW3zc/36de1GuM/NM7Icpl43S+pf8AwMDIzy2mxjzEmRfGOgqPpQCCgEFAIKgbhAwBxIfu/evbG3t9eMvYrkx4UWGGnMsLAwUqVKpREwSW/o6uqq9bx//36NnFeuXJldu3ZFaWmXHV9EcXR01AI3jh49+v5348aN00j+9u3bqV69uvb7QYMGMXToUPbu3cuOHTs+S/Kj6v9TcJw8eVLboPTp0wfJ//o5+SK3DwoKIl++fNqvfv/9d2RjICcOctFI5PKpeRppWb64bkQf5ASkWLFiJp+bIvkmh1gNoBBQCCgEFAImQsAcSP6xY8do166dlihDkXwTLXRsdHvp0iWNkMtizpkz519DCtEV4i+WfbHOf87SHhUJ/xTJlxRN4iKzefNmDh48GKP+DSX5WbNmZf369f9qJpsdOzs77Xey8RCXHXFdkrrnzp0jXrx/J0ZXJF8/DRXs6tev/58uVPr1on8tRfL1x0rVVAgoBBQCCgHzQsAcSL4gIl4P69atI3/+/BpAyl3HvPREL2kksKJ06dKaxTrysYw0Fou7XHH88uVLLa+5MUi+3KQmLjGSnkluPhXf+Xv37mmW8pj0byjJ/5RP/pkzZ977n8nGJl26dDx79kzz058+ffr/DaFIvl4qpq2ruE5NnTpVvwYxrKVIfgwBVM0VAgoBhYBCIM4QMBeSLy7XadKk0bwhFMmPM3WI2cByhXHOnDk1wv3xVcZiyff09NQs+bIJEL/5w4cPa0G5UoQAS8Bt48aN/5UW8XPuOpGlFUUWn/kffvghxv0bSvIl/kDIZ+Qivvtys6SUfv36aX+XHezly5e1Iyux6EcuiuTrp3tVqlShe/fufPPNN/o1iGEtRfJjCKBqrhBQCCgEFAJxhoC5kPwtW7ZoHE1cthXJjzN1iNnA4pMvFms/Pz/NNSdlSt3V2kLmJeWhZEQRtxbxp5agWbHGduvWTauzceNG7e/iyiPBqhHlcyR/4MCBFCxYUAvoEAItLjJSYtq/oST/45iByO0FB9ngyOZF4gYk207VqlU1tyJF8g3TN4n1kODqV69e/d/FGob1pH9tRfL1x0rVVAgoBBQCCgHzQsBcSL6/v7+WQVE8H4SzKXcd89ITvaVZsmSJFlwqhLtZs2aa5X7x4sXIBuD48eMayfXx8SFHjhzIoov/vvinCzEPCQnRMs9kypRJL5IfOfA2soAx7d9Qkv+p7Doy/+bNmyOW5yNHjmjZdOR3EmUuaUA/ll1Z8qNWsX/++UfbKEnMRWwVY5P88PBwTXQrKyuDpxCTtgYPZgYNZL7RwUlEj0nbuJp6TGSOSdu4mm9Mxo3JfGPSNiYym6ptTOYTk7ammo8p+43JfGPS1pRziqpvcyH5Iqe4cw8ZMoRKlSopkh/Vwpnz38VaP2rUKI2wy1XGsrBinY/IMiOyS/pIIbySN15KoUKFNBeeiFz6EfPTN7vOx3jEpH9DSf6nfPIl80uPHj1o2rSpNq/ffvtN61as0OKqkzhxYi19pgTjSlEkP2qNFhwlpmPs2LFRVzZSDWOTfJFfPhYuLi4GSyipXOWF7ezsbHBbS2zg7e1N8uTJ3z8jhsxB4jbkJFHwspTi5eVFxowZDRZX9ElikeQU9WsoYhySe1XEvdPQTaAYnaS9vH+tra0tHi5Z+0ePHpEkSRLNOmpIEcObGMTErdTQtoaMYy515SRYvtXyrY1IiqGvbAEBAZreJEiQwOKwMieSL5xP9E08NpQlX1/tU/UUAl8JAuLOJadEDRo0iLUZG5Pky10Re/bsQfoUl62PMyx9blLSVm4OFHelsmXLGtQ21sAy4kAy32nTpmmxF3KyZwiZk7ZyWia6IvEyllCEcMkHUO6AiIjl0Vduma+ccElaYUPb6juGOdWT2C4PDw8ty5ahm2XJACcZuiQRhBBjSy+iN5MmTdLmkytXLoOmE3HKLumt5b6ZL73IJlqMimJQjOwtoM+8RefEKCeXORnaVp/+TVnHnEi+XJQqnh2SQVGRfFOuuupbIWCBCKRPn167CC061s7oTteYJF8ubRN3Nvm4SnC6ZBrQt0gGKQlml4+6tI2Id8JMSdQAACAASURBVNG3vaXVe/jwoZahSz6qrVq1wsHBQe8pCFaSwUGC+iX2xxIstiLz999/T4sWLTQ3P0OKJC2QCwjl1FBcJL/kEuH2OXv2bA0n7dhfT9c3aSt3tqxYsULDSTKz6dvWXDGVE40OHTpQsmRJLemEIbouOicbS4mXE7dZQ9qaKx7/JZesvdy1I+9fMRzIpkjf+UbozZo1a6hXr56WDlvftuaAkzmRfNloyabyzp07iuSbg3IoGRQC5oKAEORs2bIhLiuxWYxJ8iWt6qxZs7Rj306dOmkkVB+SIUfyJ06c0OJW5Nj4p59+0qxR+rSNTayMNZbMV04t5KZsOe2YMmWK3hZbaSu3Sy9atEhz1ZGNkbm7IojMQjwXLlyobfxERwxxM5K2Mk+xMEoyA0PaGmvNYqsfcXdbtmyZlqihRo0aWkriCJfHqGR4/vy51layfAhZa9OmjcFuG1GNEdt/3717N3JvjMR7yb/6nk6IzklKazkFyJIli+ZOa+ipSGzPNSbjSTIQydEuz4rojWyo9T31Ep1buXKl1l5Ok2VzaUkuk+ZE8mUNxVVOYhSVJT8mGq3aKgS+MATkeF5SkcpdDLFZjEXyhZwLcZ0/f75mjZcPhXwwxP0mqiJt5QMjBEU+zkJOatWqFWsZhqKSz9h/l02QEJaIW67FFUVum9ZnUyN+t3LztWyopEgch2TgMuciMsuJg2xkhTxIYJqkItanSLIC0SUhMRK/IL6uktTgSy23b9/Wbkz39fXVNrodO3YkQ4YMek1XUjwLmRU/dImZkudI37Z6DRAHlSQ7nbiSCEHv3LmzZiXVp8g76JdffkEwcXNz03AUi/6XWm7duqVtaIRcyrMlJxeSFU+fIhjNnDlTyxgoiUPkWdX3+dSnf1PXMTeSL3omMZuK5Jt65VX/CgELQmDevHkcOHBAI8mxWYxF8iWIVKytslmRUrhwYe1joY/rkQRVipU3IqtQkSJFtA+6Ie4+sYlZTMeSj6n449+4cUPrSlwR5G4NfY7IpY1sEOQ4WIqQOSHN5lyuXLmiuRcFBQVpJxeVK1fm559/1ktkma9cMiNFTizEfUVOer7EIhsayVQmlynKZlc2RN999x116tSJcgMo2Iqrn+TplhgGaSsuThIbo49emSOe4tImJ4IReiMngxKXoU9QqbyP5BREDAhSXzYHokf6noqYIx7/JVOE3kyYMOF98Ky4AIqhJKr5yvtfXLzkAksxPohRRtyj5Bk1JKYqLvEyN5Ivm+syZcookh+XSqHGVgiYGwISkCgvZLkbITaLMUi+EBIJ+JKPjLgMSBGSIZd6yYf5cyRDLG5yz8SMGTPet5Vcw3KqIf7qlkpQ/msNZb5yYiGBWeJvLEWyYcjmTub9uSJtZSMl7hgS9yBFsmGIH678a45FdGP06NHaBlbkl9MK2fiNHz8+ypMaaSunFkJ8I4oEX8rphWQ1+9KKWO8FK8mcFlG+/fZbzfUiqvlKBhrBVDK+RZRGjRppcQz6nKaZI5bi1iWZ7CKK6I28F/TJsiQxDXI6GFlvfv31V4sJVDdkPcTFU94L4p4UUWRzJ/Evrq6un+1KTtckUPTvv/9+X0/cfWRzKdmdLKGYG8mXd5Z8V5Ul3xK0R8moEIglBORDLrcIS9BpbBZjkHxJ27Zp0ybNTzxyrnvJDiKXpH3ON1SIrnyMly9f/r6tzF/IiWQZior4xiZWxhhLiJz4lX/sliVWR7HYfq7Ix1zSq0a46kTU7dKlC0IGzbFISl2Zm5CJiCKuF3LKI9bCzxWxLApRiZzCV4iHWMrEov+lFbE+i+U68jMpcTpikZcN7+eKnHiIe0rktpJRRtxUxAXD0opsCEVHBJOIIu+Rhg0baj+fc22Tkwx5n0bWObnrRd4pEpSqj1ucJeElp3ri8icbvYiSNm1a7TRUTlQ/V8RVRzaWH7cVA42+7j5xjZW5kXw5ld67d68i+XGtGF/s+L6+uqlFI0/5F4uJBUxMLC9yFC2Xi8VmMQbJl5ztQlwldV/kIgFvQjw+l5JNshH8+eefn2wrVjtLSRGp75rJhXnimiW+1x9jJS48nyviziQW/wcPHvyrmtxLIfibY5Er3sUiG3FqITKKG4C4KEXcrfFfckvQpQQli7tGRJG2kjVGnpUvqYjLhWyUxQIducjpnvhXywbwv061xCVFLN5CLj5uKxlp5J1iacHKcjIo2adkbhFF5i/3zMjaf+50QtrKiaiQ/chtxWVHfPzN9dQrOvosz4YQSnlOIs9XnhPZ6Mim5r8ydwm28nzK+/fjtrI5lG+SIVm/oiO/MdqYG8kXTCXFsbLkG2N1VR//j8DgwbrfRfyrMLIIBCR4Uo5c8+fPH6vyGoPki4uOuNxIgKW4C8gHQyyPQlCEzH3u2FfS3AnxlbYnT57UPt4S9CVthczpmyEiVkGLwWDijy8/gruQOrkTQCzbMt+oLPmSA12CEIUQSrq7iOBk8VWXj7k5lmPHjmnWWLHMSvYkIaxiSZUgWvFb/VwRnRILo7SVTERi1Ze2kl5V3MC+pCJrKps4SRcqeImOiG4IgRGLqmzk/ovky7MjWMlzePfuXWTTLXEt4osuFzTKJtvSSL4YDCSYVN4l4p5WvHhxTWfkXSJz+xzJl7bynAgBlhgheRdJuxQpUmjBzFG5PlmSXsl75OLFixpWcnIhxgNZbzm5ECOLnOb8F1EXvbl8+bLW9vHjx5reSBpnwUr0Tdqae+YuWStzI/mif7LBUiTfkp4kS5FVrPgRN0p6eSlrvqWsG2gvV/nIyzFrbBZjkHwhYRH9iK+5EBY5Upci5DUqn/yItkLkJEWe+IRKEZLypR2tCzYRVjPJoiKWxYgA46g+qJHbSjuxdEq6NilRtY1NnYo8lhCtCBcucb+Sy2KkiE5EFRQY0VbwEpeVCEu1Pm3jar4xGVdIlxTZKMsphrg5ybqKVfZzJF3wjTjtkI2yuHNJnnyJ9YiqbUzkNWVbeSfIe0XWXoLSZf2FeMr7QPTmc++FiLbi5iWnYxKbIMRX9Ebw+JLeKbL28l4QrCSzjmyqxbCSOXNmTWfk57/mG7mtuO2cPn1aOymRzUFUbU259ob2bW4kXxJJlC5dWpF8QxdS1dcDAbHeR2TaGDRIWfP1gMxcqgi5FdeV2LZcG4PkR8ZQSJz0Kf6vhhYhcWKBMlertKHziaq+uDL16NEjWhs7OU4fM2bMe5If1Vjm8HdZV7HKGlqE6EkgoOQA/xqKWAIliFL8og3dvMmpmLiryG3IQvItvcjaS2Ym0XeJTzCkiIuYpJWUd5EQ1y+9yOmPBKkLwTR0vmLRF72RExPZTFlSMTeSL/cOSJC4suRbkhZZgqwRVvzIPvnKmm8JK6fJKEeqEqQYlXXT2BNSJN/YiOrfnyL5+mGlSL5+OEktRfI/YKVIvv56o0i+/lhFVVO+qZIwQpH8qJBSfzcMgchW/IiWyppvGIZxWFusEZGDn2JLFEXyYwvp/x9HkXz9sFckXz+cFMn/N06K5OuvN4rk64+VPjU1N6nwCEdFfVqoOgqBzyEQYcVPlAjeXZJD+vTw8iUoa75F6I6kzpMsJLFdFMmPbcQ/jKdIvn7YK5KvH06K5CuSr9x19H9WTFlTvueK5JsS4a+t7wULdDP+/nuwstL9d3g4RP7914aJhc1XgqPiYt+vSH7cKYoi+fphr0i+fjgpkq9IviL5+j8rpqwp33NF8k2J8Nfcd2SS/zXjYGFzVyQfLXuKCrzVT3FV4K1+OFliLRV4+2HVVOCt/hqsAm/1x8rUNRXJNzXCX3P/iuRb5Oorkq9IviGKq0i+IWhZVl1F8hXJj47GKpIfHdRM00aRfNPgqnoVBBTJt0g9UCRfkXxDFFeRfEPQsqy6iuQrkh8djVUkPzqomaaNIvmmwVX1qki+xeqAIvmK5BuivIrkG4KWZdVVJF+R/OhorCL50UHNNG0UyTcSrl/SzXVGgoTwdx29C781VrdfTD9xEdyqD3iK5CuSr4+eRNRRJN8QtCyrriL5iuRHR2MVyY8OaqZpo0i+kXCNK2JkJPFN041y1/lPXM1ZX+JKNmNk1wkKCkLyLJ84cYIbN25oV6xnz57dYP2+dOkS8ePH166g/xrKvn37KFy4sHZxiqHl77//pmzZstolav9VnJ2dtWvq5fbFuCqiG3ILsuQsX79+PfXq1TNYFNmYb9u2jVq1ahnc1hIbPH36lNu3b1OoUCEk37Yh5dGjR8hPrly5sLOz+8+mLi4uNGzYEHM2lK1cuRJfX188PDzIly8fciu4IUXebXKLa44cOZD5funl+fPneHt7kyZNGoPn++zZMx4/fkzKlCk/e4u2rEH9+vXNSm/M7cZb0TNF8o30tMUVMTKS+KbpRpF8RfIN0CxjkHy5qXfdtt38uf8mT51zSP5W4sWLZ4AUuqohISHay9FQYmPwQGbSQAiw3HAcHaIVGBiInZ39+xCcj6dkExpIluBb9PimEOXLl4+zGYtu/NizL8eTVudtUMhnNyWfE1Lma29vH2fziM2BZZMsWWWic/u1vm2L3FnGooULsba2js2pGTRWu3btOOL2LQEhuvdJdGQNCgp+1/bLP9uWtZcfa2sbrK0Nm68+ba3DQynps5WZ06ZESzcNWnwDKn8VJD8wJJTLD19w4+lLbvn4cfvZK7xf+vP0tT++bwO1l2tASAghoeGEhYdjY22NrY01CexscLK3wyWhPa6O8UnjkpAMSR3JlNSZbG4uZHNNZADUsV9VkfxPYK5IviL5BjyKxiD5fn5+LNu8mz9OBvIgbVUDRldVTYWAbfBr8vn8w6AqaahaNe7WRHSjfffebM01kFCbr4Okm2pNjdlvNY/2rF+3NlrE2ZhyfK6v5s2bs7HgH4Ta/PdpVWzJosYBm7Ag6pz7jYVzZ332lCi2sfoiSf7zN4Hs9/TmwM1HHPN6wpWHz8nk6kLKxE4kc3IkqVMCEid0IFECBxI62BHfNh52tjbYSIJ+KyvCwsIJCQ0jIDSEtwHBvA4I4qV/AC9e+/P8tT9PXr7C+8Vrnr32J1+apBTP4ErpTCkomzUl9vEMOz405YIrkq9IviH6Zc76EleyGYvkLxWSfyqE++lrGLIkqq6JEIgX/JoCT/5mcOWUZkDy+7A57zBF8k201tHptsbeVhZB8tcXnkRovPjRmaJqY2QErEODqHempyL5euAaLXcdnzcBrD/jxYYLdzh5+wn507uSJUVysqRITCbXJAYfz+ghJ/5Bwdx68gLPR8/w9Pbh0gMfymdPTd18Gfg2X3qcHP7b50+f/mNaJ66IUUzlNml7ZclXlnwDFEyRfAPAsqCqiuRb0GLFgaiK5McB6BY+pCL5+i+gQST/gOdD5h25xpaLdyiZNQ0FM6WmYPoU0fLj1F/ET9cMCA7h7O2HnPN6yLFbD6hfMBNtimenREa3mHYdrfaK5CtLviGKY876EleyKZJviAZZTl1F8i1nreJCUkXy4wJ1yx5TkXz9108vkr/j8j2m7LuIt58/ZXKkp0zODDjYGh7Mpr9YhtV8HRjEwSt3OXjtNmldEvJj2VxUz53OsE5iWDuuiFEMxTZtc2XJV5Z8AzRMkXwDwLKgqorkW9BixYGoiuTHAegWPqQi+fov4GdJ/vkHzxi+4wyePn5UL5CN4lnT6t9zHNU8euMe285cI7ebCwNrFia7W+ykq1Ik/xMLPmSI7peDBsWRNpjvsOasL3ElmyL55quvMZFMkfyYoPflt1Uk/8tfY2PPUJF8/RH9T5I/ae8FRuw4TePieaiWP4v+PZpJzS1nrrPu6CVG1S1Gh1I5TS6VocTo7lVvLh3x5JHXUwLeBplcPjWAcRFwSGBHiozJyV0iC+lypDK4c0P1xeABYtAgrmRTJD8Gi2bGTRXJN+PFMQPRFMk3g0WwMBEUydd/wf6P5EsKzE7LPLj70p/mZQrg6pxQ/97MrOb9534s9jhLkTSJmdq4tEmlM4QY7V99nAeej8mQMw3J0yTBPoFK52bSxTFB54FvA3l6/zm3r9wndRY3yjUqatAohuiLQR0boXJcyaZIvhEWzwy7UCTfDBfFjERSJN+MFsNCRFEkX/+F+hfJlxz2jebswtkxIc1K59e/FzOvOXfvSRLaWLHs+4omk1RfYiQE/+XzNxQoY/rTBZNNVnX8LwTOHrhCoiQJDSL6+upLXEAdV7Ipkh8Xq236MRXJNz3GljyCIvmWvHpxI7si+frj/i+S32Tebuwc4tOoeB79e7CQmvP3niJFAltmNitjEon1IUbionNw/SlKf+tuEhlUp3GHwMGNJyhdr7Derjv66EtczSauZFMkP65W3LTjKpJvWnwtvXdF8i19BWNffkXy9cf8Pckft/scezwf07lqMf1bW1jNMRv2075EdtqUyG50yfUhRtvne5AosRNpsqU0+viqw7hF4P71h7x88YoabcrqJYg++qJXRyaoFFeyKZJvgsU0gy4VyTeDRTBjERTJN+PFMVPRFMnXf2E0kv/w5Zvw3MNWMea7qhbtgx/VtG89fs6U7Ue5PKAxDrbGvS1XH2I0f+BaStcponzwo1ooC/y7+Ogf3HSSNkMb6CW9PvqiV0cmqBRXsimSH4PF9H8EJ9eAz13I2ADyFgEb477joiudIvnRRc5I7V7cg31TwSE1ZK8NmTIZqWPjdKNIvnFwNHovzzzhxDwITQI5q0Mm8/HwUCRf/9XWSP64XWfDT3q//KL88P8Lgr/+OcG3uVLT3sgZd/QhRjN7LqdW2wr6r46qaVEIbJ23ly7jmuklsz76oldHJqgUV7KZDcl/dRPOroW7N96hawX2icA1F2SrAG4ZwMoEwMekS98rsOwn8DoNxSdA7WZgF7e3gEdM54si+QEXYe3kDytlbQuOKSCtO2QuBk5JYrKKpml77wxMqQzO+aHSMChZyjTjRLPXr4Lk+9+AjWMh9B1I1vEggSukKQKZi0OiZBBxr0w0cTR6s9sesKQphKSHqr9DyVpGHyK6HSqSrz9yGsmvPHlzeMUCOcidJrn+LS205imvh5z1vMPGTtWMOgN9iJEi+UaF3Ow6UyQ/ZktiNiT/6XHYPgQuHP0wIRs7iJ8EXFJD3rZQrimYh6FcJ6Mi+VEqn5+fH+2792Fz3mGE2kQzo9nr3TCkyYexrKzBNgE4ukIKdyjdCTLnB+soxYm9Corkxxjr5s2bs77wJELjxY9eX6+OwMhvICTCbmAN0pfoTfJ8UKYLZC1kNqdvmpSK5Bu81jY2Nlhbm9PDL3tHK6zS/LYkfOx3VUnoYB6WH4ORNaCB79sABq/aw62h+llc9e1akXx9kfpy6ymSH7O1NUuSn/kbyFoMQn3g6na47wnx3aDCSChfJ2YTNmZrRfKjRNOoJN8uMeRqDemSwtOLcHUv+PlBqopQpx9kyBWlPLFWQZH8GENtNJJvnRiy1oHMWeHZFbi2F54/A9fiUG8oZMxtPhZ9RfIN1huzJfkuv84LX9i1vsETstQGBy7c4I9vixhVfEXyjQqnRXamSH7Mls0sSX6JgVC+BSS0g4CnsLo5XLsBifNDl22Q2AEIg1f34cJ6uLAD3vpDqhJQrAWkzQ428XTAhAXBjW1wbBk8uQeJskD+xpCnEiRIoKsT6g9ee+HkavC+BvZukKcxFKkHCWUsKeHgdxtOLYZL+yF+OshRCk4ugfsX/u2uE/IWrm2E46vhxVNIkgMKNtPVt7eHZ/tg2xR44AMNh8C5JXDzOnQ6AIlitp4Rrb8od50IS36CNFB1LhTNBSEBcH0N7B4PT15D1YlQqj44xIPwcHhyBg78CXfPQ7ykkKMOFG0ELpFce17ehrMr4OIuCAjW6U/pzpA244dFCHgG51fCma3w5jW4FYTC30GmAmBn+6Hew4OwawI8fgTpykO28rCs2f+76/hehxNL4aoHhFhB+rJQ5gdILif6AfBwLSwYD5lLQ87yOv/s9A2gREtI8E6njaAiX4W7ToQl3y4tlBkM5apCSBDc3gy7J8G9B1BmCFSUd827u4meX4Yjc8DzOJAQslQH98aQPPUHd8HXD+DcSriwE16/hhRFoGRXyJj9Qx1/H7i8Ec5ugZfPIGkeKNwcsrrr3gER5fl5+GcC3LsJKYpCmjywvz+EfuSu4+cFp5fB5b3gHwxp3ulq6gw6vfHaBKtGQ+oSkLEA3NoJiUpC6daQJKkRNAaUu47+MGqWfCH5M9p+81VY8t8EBNF13hZejGujP0p61FQkXw+QvvAqiuTHbIHNkuSXGgaVWoOTfHjD4dYqmNkVbJ2h6iwoXwV8LsDfA+HCcQgN1hE78bmNnxIa/wXZC4F1GHj0gL/X6z7uYaFgZQN2zlCkHVT9BexDYN8wOLIaXr16V8caxFXIrQS0XwKODuDnCXuGwrFduvHENySere6/pd8In3zhYdtawZED78YUuazBPhmU6Q7l2sPLnbBqCNz2AqcEug1KWHz4/TY4x2w9v2iSnzANVF8CxfPqphn8EnaOgP1zIWlNaD5ER9Bvzodlo+CtH4SGgLj3yHpmrAT1p0EyZ3h2GLaNgssn3+mPLGk8sHWATicgbXLdJnJNE7jhBSEROmYD8RJAjdFQtAHYxQOf7fBnD3j1DMLCQGIGRDeC3v6b5N/dDNvHwe3rH8a0kbqO8PNJSGYH9xbAlN91sR22duD/Fkr3h0o/QIJIm4oYqsnXRfLTQfnhUOGdf3uYP2z/HQ4uAofi0H4KpM4I3lvgrx/BX57Hdz4+1naQrhzUGAYZs4DvOdgyBK4cheAg7fWEteiEA9RfAYVL6IwBO3vDmYPv3g/yDrABGweoMARKNdYZGHyPwfTm4CfvHdEbqWMLIf6QINsHn/zHHrBpGHhdfKeH73Q1fjJouhJyZILry2Bub90YYuAQ3cvbCap3h2TGcQlXJF//h+49ye9dp8xX4ZN/6f5Txm46oEi+/jqiauqJgCL5egL1H9XMn+QDb87DkApg7QCFfoa67eDYNNg8GZwyQ/H2kMYNDs0Az9OQsga0mQgBF+CPBiAf6hxNoHRduLoaDq0Ax1xQfQC4PIMtY+CBN+T8Dso0ggfbYNtMsLKHfF2hWU+4tAqW9gArJ8jbGApUhWvL4cxOHSmIIPmPN8KUzmDrBPnbQuFycHklHFkFzkXgm0GQ4sU7ku8J1vaQKo8uILDFEoim+/HHy/tFWvI/JvnhIfD3SPhnMsRLBm0XQvpMMK4KPL8PiQtD4+HwUojZYHhrA+7toEE/+Hs07J6sO5Gp0hNcnWBjH/B5AjbZYcQ/cGEILPkLZNwinSBrZjg2Gy7uAys3aDsHspeEiUXB+ybgAA3+hCS2sHcQeHp+IPnFssPmgXB4JSTKp9tgyqnNul/h2TNIXAP6zoYHC3UkX6LMEyaH5FmgcBso+C3YGy8g5asm+fKw7BsBe2eCP9BhK2TKClOKg/cDcMkJtYdA6APYMxYePQP3DlC7HxwaAfvmAEmgcm9InRK29YH79yBhTvh1K3jNgSUjwSElFGoN2fPCmQVwcS+EOsP3yyB7EVhWHc6d1G3yyg+A9Knh+DS4ePQDyS9eGjb1hGMbwCETVO8DyRxhS2+4/wCSl4efFsDdFTqSHyYHEK6QNCPkagzu9cDZOMeDiuTr/619T/Ir5clEq7IF9G9poTUXeZzln4u3FMm30PUzZ7EVyY/Z6lgEyfe/BkNK6khUvs5QoyFs6gCXboN7e6jyI4iR88ZS2DkTfG2hxza4PhQ2rgXHrPDrUXAEnl2EfcPhYQAUbwZPDsNBsdYXhDZ/QsosOkDXVYEjZyG5O3ReBGcmwJZZkL4W1BKrXvpPBN42gbVl4bQXuBWGjkt1gcKPj8CugXDnFdQeBlmc35H8m1BpHlSqCbbGc8UQ8b8Okh8Ku4TkTwIrZ2i3BDgIy2bAa3/ocUVHpEN84dIUWL8MCjaFGs1h1xA4cQJqz4biYpEX/ZkAO7ZASAh03gATs8ILG8hRA9ot1OnF/XWwZTjcvAPNV0DefDC+BvjcgbzToVVTXb2PffJT3IVtI+DOQ2gwDXJX1p3wiI4uXwr2TjDgLDxepiP5yYpDtTFQwDQpFL96ku8xBvZMhzeB0G4bOF6GP/tBUADUXwN580KoH1xaBOunQI76UKEJHJ0Opz2gtJw2tgJHR3iwGlZO0a1hrXGwrzNcugK56kOjybqTwCf7YMNguHEBqk2G0pVgSll4+hwyd4ROo3SuPuKTv7jpB3edjHawcSjcvAxVxoJ7DbCzh/tLYc4QcEoJbTaA/34dyXeWDeRAcDd+RkFF8vX/1r4n+fIfEnzr6vzOH0z/Piym5hO/N/Re+jfh4eGK5FvMqlmOoIrkx2ytLILkvzoHwyp+sOSXrQBLvoXHATpXDPmJXKwcoaP42LeGE+cheUnovfn/gQp+Cpv6w7G1kK4ZNPkdkqfQ1bs0AhZM0Pnwf7cGrg+E3VsgfzuoMxKcrf+f5H9TGyZkh+dhukA+cQ2KXKwTQq0hkC0prBZ3ndvQ4iDkyWr0rEFfB8kPgZ0jYY9Y8l2h7Ty4NQY8DkGguD98YuOUU6ybFeDAALj5CJrPhoKfuGfj7XMYmhXCXCB3Z2jdS7eSYZ6wUSzyO6HScijpADO7gM8jaPsQcr5LpPExyY93EnaMglfBn9ZZByfoewJ8VuhIfobaUO9PSBURExKz5/zj1tEm+eHh+D25w5O3tmRIm5xXz5/yOtgat6ROPH3ig11CF5ImceHFw7u8DI1PulRJiWcTvcwnRgu8tfvIXUfA2CsWebHkW0GHLeCzADYvh+BgndvMx/l6M9WEQuXg3AK4dgFqT4ViDf7tXy/9vrwH82uA9xso2BGa9NNlfQq7D6t+hdO7oeBgqFkEJjWH135QaS5Ur6tboo8DbxM/g21/wCNv3fvk45SfkimoyXzgnI7kpygPNYdBduMHoSuSr/8z+J7kNyiWC6/HL+hes4T+rS2s5uRtR8jolpi1xy4rNG8n5AAAIABJREFUkm9ha2cJ4iqSH7NVMn+SHw6eS2FWd51PfpVpkNMVltaDx8GQJBO4ZfrwTRbffCsHnRX0cDM4ekZnFe29VVcnPBSC/SFUnGlfwbbB70h+I2gyCJK/uxn73EBYMh1cskGLVXBtKOzaAPmE5A+HRPE+QfLrwIQc8DwU4ieFjEUiySW8wR7cW0DyAEXyDVHbiMDbj911Ap/CjhFwcDEkqwzNhsPVPuBxEALDIUcVsIl0uYIsedoykDYF7BsAnt7Q7B3Jl2phgRAYqIsDET0ZlhVCE0HuDtC6n07i4CuwcRAc+weqLoXiiWBGB/B5CK3vQB45LvqEJT/eadgxEl4FQZqC4OwWSTfCwM4RGo37YMk3U5IfFhLEpsENGHc6Jevn9WP3vGHse+jGb+0rMmPSFNKUakH71o2Y2bc9G57lYsWELqRKEj0jpslIfogf7JCN2jKwLwjtZsKD6bBxqY7kpysNTu/WUdZS9MatCKRLDcf+hKvnoNZkKNFIR/LDgiHwrY6AB/jColpw/xUUbA+N++t0MOQ2rO4FZ/ZAkcFQvThMbgJ+L6HiX1DjXRKW/yP5L2CbuAw9gJQFwcUVrN/ptLzrHBJD2R/hzSFF8g15p5i47nuSL9l1Jm0/glsiR5qVfBdMZOLBY7P75Ycv8Pjla36uUYLWM9Ypkh+b4MfRWKMnjOCa51Xmz1gcKxIokh8zmM2S5JccBOVbgqM9SFaJ7T3g3ClIkgfabAL7h7CpI1y+BfmaQ3Xxb06iI2lPxQ86PTgmhmM9Ye0CSJgZOm+CZEl0QXHXd4O/DWQqDNdXw/6FED8HNJ0E6SVzyyvY2BROnoWUJaH9Ajg/FTZOhrRVoMZgyJgRnhyFNb/DvcvvfPIbw7rKcOoaJC8ALedDcjcQ3/GXDyDcBpJmgGc73rnrKEu+XtobObtOtflQNDcEvwavzbBnCtx9COVGQblmcH8KrJwGb/yh9VbImhfEkuz/DF491OlQ0CXYMQSOH4BKQ6FUK4hvB3e3g9ctCLOCMm1gZmHwfqXLsNJiBtjbwt31sGMseN2DZisgX36YUBWe3oEC46BRM13At+cumNv2g09+igewbTjceQBVhkOJppDAEUID4d4RSFUG4od/CLw1U5IfHhbK7aMb+NvLme/rF+Pm2UNce2ZH1ZLZOXzwGI6pc+JeMBdn/1nHlZB01K+Qn4QO0QsYNh7JTwtlh0C56iCZr+7uhF2T4c5NKNofqrUF303wV19dfE2NGVDyG50LXcALeOUDjunAyhu2DIJTu6BwD6jSERIlgoeH4MppXQ7+vLVhZ0c4exqyfQN1x0BiZ3i4GzaPAq9rUGMalCwPMyuB90PI0gZaDgZba/DcDit/ADLqAm8zOsCmoeB5EUr2hwqtwCmRLmvY4wvglAMS2X8IvFWWfL1eKaau9C+SHxgcyqRth0mVxJmWZfKbeuxY63/xgXN4P/fj55olsbe1MS3J9/UFF5dPzs0Ul2E99XlCkXIf1iqeTTySJ0tOhbKV6N9zII6OTrGGs7kNpEh+9FZEn0xR0ev5863MkuRLnvxsxSBeGNzbAxcOgm1SXXBapeYQ9AJOz4Jt08A6GeSsAWmyQLAvXN4OWSWAtim8OQczm8LrIMjZArJmgydn4fRqcCoANQZAQh/YNhru3oF830H6nBBwD3ZPBWtnKNUfarSCG1th9a+6zUGOapAxHzw+Cpf2wiu/D4G3j9bCvJ4QaA2Za0POwiDZPO4cBduUULEHcFaRfEOUOYLk27lAru8hvRv4P4Rru+GeJ6QpDXUGQvq8EHwX5rXQ+TAnzKHbLEoGnOfX4f55KDkA8hSCQ5Ph7ylgmwMK1YTETnBC0po+hFQ14Mc5cG40rJkBCdJCwZbgkgCu74Cr+yGpOzT5Qzfmsvpw7gCEi092H7AJBq+tcP74B5JfJB3sHgWH1oNtRihSB5K6QdBLODgRyk6B0lXg/rvAWzMl+YYsW0zrGo3kS578LLUhWy7wfwLXd8HdqzrrvJzKybPMC1jQGK6ehQQZoXQbXfpc31vgfRXySBB9RTg2Bfb8CaFuULQ+uCSGy4vh2iVIVQHa/wVey2DdaLBODrm/hRQp4dY/cP0gxM8CjaZA5rywvQN4bATrRFD2J3C0gbv74NzeD4G37u7wzyg4vAbCk0CxZpDEDUJew/mlOn/+Kk3g1rvAW0XyY6p2Rmn/L5KvnQCGhvLXP6d4ExhE63IFLdpHX3zwF+4/Q0J7OzpUKoytjc4v1aSW/J9/hsSJoXv3/yP7piT5C2YuIUf2XISGhOB56wZ9B/WkcoWqDB8w2iiKYm6dSFyF/HzudjlF8qO3aorkf+LGWy3dZUJdnvn89aFMO7ATB9dweHEdDkkmir3w0keXKlEy1SRKDwVb6cidvR0cHa/LpvPssS6dndRJnAmKtIWijcEhFM4uh8ML4cltXVo8K0nFmRRyNYTKvSCJI7y6C0cmw/FN8OolWNlCskw66/DbF5FSaIbC/lFwerNuTEndKSkVE6aA7LV0FuJ41xTJN+Qx+fjGW/G7klSD8ZND+uJQqhNkkptLNX8suLIMDi2CO1d0qQTDrXRpU1Pkh7KdIF81eHwCDk2Hy4fhla8uBavcoiu5yqv9AdkywNtn79K07oQ379IcirVWbkst1QPyivXdDh7uhI0j4O4NnbuHvQs4ZwCfM+CUHyoNg5Kl4J4HHJ4NN47Da19d2kSb+LqTJ/c2ULHZh+w6iuRjNJIfceNthN44JIG0RaFEe8jirktXKuXOVtg7C7zOQ8AbnZuOrSMkzwWlOkKBWuB3FY7MgvP/gJ+Pbg0lparoRMUBkL8YvH4EB8fBme3g9wxCQ3WpLZPlhKKdoUg1iJ8Qnp2ATZKF6ZzuvSM66ugGgXLqlypSCs3jcGQuXDqgO1XQ9MYenNJB4VZQoSXcXqncdQx5p5i47v+R/Ijxtpy5zrpjl2lSIg/V8r/L8mBiYYzZ/c5znqw8cpH6xXLxTcFs/+rapCRfAtjk+Fys+UL4I5F9U5L8zat2kC/3B4v+4FEDuHLtMisXrNXm/sD7Pv2H9eXYiSM4OTlrlv4BvQa9t/Sv27SGabMnc9/7HsmSJqdD6060adFeazti3FBevHiOg4MD+w7uJSgoiGH9R/LwsTdLVi7i+YvndPi+E13addPqHzl+iOF/DOHmLU8SJEhAzarfMKjvMGxtbQkMDGTY2EFs2bGJsLAw8ucpwJD+I8mUIZPW9tzFswwdPZDLVy8RP358qlasztD+I7Gzs2PX3p0MHzuEZg2/Y8L0P9iwbCu5cuRmxpypLFg6j1ev/ChauBgjB40ldao0CMm/dduTnNlzs2DpXK3/5o1a0qfHb8ZUtfd9KXedmMFqNpb8tw/A0wOe3P8wISHb4qssH9DsxT6KhZPLqe6B12F4IWQ6RJer2iUzZCutc4WQEh4ElzeDzwMdAZOPY9KskLG47thbiljFbnrA41sQLMG8tpAgOeSsDYkiTuXCwPe2Tka/F7o6rtnB7z688YHU1SBbbhCjRsgbncX3qTeEBOpSeDqmhgzFIHkaeOMJl/fDS1/I2wZck/xfnF/MVvULy64T5AUeayJBIiTfDhKkgAxFwU0uBIpcwsH7ENy6CIFC1qzAPhGkzAeZ3T9g7XMJbp8Ev+cQHqYjdGnKQKZIQYtvn8ClLfD65QdCl1L8s/OBQ6RLjbx2wN3rOh0TEumSVZdVydYNMlaAtGl1Asr9DrdPw6vnECpjxn93QVsVsAoBv3NwfC8kygY5aoGTcbMuRaAU7cDbmCqmAe1jTPID78Oh5bq0klqx0j2L8mynF73JqMtwFLk8PQ2ep8BfNnWAnRO45YGMhT9sBnxvgNdx8H2iW0MxRMipQLaIGJxwePMYbuwB36c6A4Rs5lzzQcaCOoIfUR4fg+snITBAtzlMlBr870CALWQuA2my6mrKJWq3TsBLGTNU5xqUSNJkVgZ7K13WsLO7wTEDZCkPSY2TGz8yNCrwVn/l/U+SL13c8fFlw/ErPHn1htqFslM867uXg/79x3rNozfusfn0NVydElK3aE7SJ/t/1xmN5DvdNapsgwcPRn60smEDnD2r++9IZH/m8O3UamvcdFIR7jqRSf69+3dp1akZLZp8T7tWHTQxvm1ak0IFitCre18CAgLo0bebRubHj5zMrdu3qPhNaWZNnkeFMhU5fe4U37VvwrqlmzQSLmR58cqFzJu+iGJFivPH5NEsWj6fdq068nPXXzVS36J9U056nCexS2IKlcnLrz/2pkmDZjz1eUrHH9vQsG5jWjdvy8jxwzh7/jRT/5iJS6LETJ01kU3bNrJn6wFsrG0oWbkItWvWpccPPfHxeUqLDk35rnFLOrbpwr4De/ixVxe+qV6HHzv3IHnS5Pyzfxe/DenD3OkLyZg+E4NH/c7tO15sWL5Vk3vV+uV0bvvD/9g7C+gojy4MP3FXkgDBLbgGd7cWKKW4O8UhuLsG9+Du7i7FNcElBAIRSIjbxnb/830UWig/BHaTbMLMOZxSduTOe2d3n2/2zh3ateooP+B07t2eo7tPyuCv6SIgXz1FtQby1ZuGaP2ZAukqu47wrsYV+CkgX+Oq/dwdCshPuv+/CvkfunF/+YbjHs8Ii4mlRqGcVCmYE2MN51JOusn/ramIT+CvRy85+/AlViZG1CuejxI5/04/94WOZcif00WdIb+/bY4cbKo7mKoDW3x/26+0+AD5ZqZm6OrpyeE60THR/NGkBTMnz0GK0Zd2x5u1a8yjG8/l3XSp3Ll7mz/aNeHJnRfooENQ8DscpIN5f5e6v9WgQ+vOtGvZQYblS1f/QnqQkIq0m9+xZxs8Lj/C2sqahIR48hTPzqEdxylYoBBFy+Vn9pR5MoxLJTExET09PTm8pnDZfKxZuoHyZaRc4+9fK1LOibXLNsr/FhQchIW5hbxzL5VxU0cTHBLEYtflH8e9fPKGvFMvlY692uKUN798/kAqkh6Xrl6kUYMm8sPI8TNHOXvo4sd5latZkjFDJ8iva7p8D+RP0NH556FQU4aMH6+Rnn76cB2NqCg6+aCAgHyxFr6mgIB8sT6+VwEB+UlXLEmQ/6G7R76BnHv4khvPfSmfLyul82ShZI5MSJ2kdJGA8Y73G24+9+XqMx/K5MlC9UI5KZjl2z8NyZBv7q1RkydOnMj4D5Al7eR7eLzvXzrxLoXtDBxIcu7kSzvZBfIVlEHa/60/S1YuJCEhgY1uWzl4bD/9h/b+4nwvnbguA/PyNUvYf3gfYeGhsj8DAt8ycvBY+ZcACfKlOP9Vi9fJfVy9cVneYfd0/+fXkNzFsrJ93R7KlCrL+i1rmDxrAgWcClKtUg1+b/wHeXLllfssU/3LF665Tp1P899ayiE5K9YsxU/KxSul+g0Po2LZSqxctFaG/K59OvLc459xa/xSiS7te9C+Vcf/zE+y+9HTh6xfvvnja1XrV6Bfr4HyWJou3w35mjRA+hVJQD7h4eFsPniK2bcS8MnRQJMKi75+UAEB+T8o3E/STED+T+JoDU5TQH7SxfwuyP/QbbgilutPfbj90p+n/kEUzmZPQUcH8mayIbeDLbofcqcm3Y5v1lQqVXgFBOP5JoRHfgE8eB2IU+YMlMqZmbJOWbH8d0ziN3pLkZj8f8H9h2w7KRmTL8FxsQoF2LF+73tgnziM+1effFGZHXu2MmPeVNYu2ySH50il4R91aNa4xUfIf/7CUwbtD5Dfvkdrnt3550Hp35Av1ZF25E+dPc6JM8e5cPkcS+e6UbJYKZyrFuPo7lNyLP3nxevFc+r8VoNZk+bQtFEz+VDt5JnjeeXz6iPk/zmwG49uSte2vy8S5EtnB6RfHT4vXzp4q1WQ/yG865ur/xsVpH4E5MsiCchXdzFpvr2AfM1rmp56FJCfnryZMnMRkJ90nX8I8v/dfaQijgc+ATzxD+K5f5Acx5/NzgpHG0s5576dhSk2ZsZYmRpjZmyIiYE+hgZ66OnoyDvG0s5zokpFXHwiMfEJRCniCItWEBKl4F1EtJzb3i8knNfvwuT4+jyZM5A/cwYKZ3XA3Pjvk+hJn69cM1khX9q1/xCH/1kqzdSA/C2rd2BpYcWvLepx5fQtHDM5yhpIIT0xMTFksM3A0LGDiYuLZcHMJfJrkZERlK1REpd+w78b8kuXLCOH/kjx/h/KpJnjePX6lfxLQKGy+ZgyZrq8u/+h+Pi+JmuWbEiHf2fOn8q1M3c+vtaiY1OsLK3/L+RLYUPZs+Vk8phpchtp7C07N9OrS2/mLJr1nzz52gL5GguJmTjxPeALyBeQ/52fgylVXUB+SimdNscRkJ82/ZaaVgvIT7r6akP+50NJKTh9gsLxD43gbVgUQRFRhEbFEhajkAFeip+PS0gkQfk+BaJkgL6uDob6enKcv/QgYGVijLWZERkszMhoZUZmawuyZrD8mAIz6dP7cs1khfxUypP/IYWmNGNp536J2wLuPbjLqQPnMTMzp3HLBmTKlJlZk+aip6uLlH1HCuuRHgLmLXGVs93s33qY+IQEho934dnzp9SpUY9RLmPlcJ2k7uRbWVrJY0m7/hXKVSIsLIz+w3qTP19+xg2fJB+8PXnmGKsXbyB7tuwykLsunMGVUze59/Aubbu14tieU+TIlpMFy+Zy7uIZ+VzB/m1H5HCdz3fyj5w4xLBxLqyYv4oCToWYMXcKTz2fyPW1eSdfQP5/35vi4K26n2za2V5Avnb6RVusEpCvLZ5IO3YIyE+6rzQO+Ukf+n3q15SP5k/mnfyvCJCcO/n/HjaDrR2lS5Zm+KDRciy8VKSMO2OnjpIzzOjp61OpXGWmjpsh77iHhIbQx6WnnPUmcyZHxg6bID8ASKksB/cbRlDQuyRDvhSTL+3IS2cCXvu+wtzMnFrV6jBh5GT5YUPK7COl0Dx8/KD894IFCjNu+EQ5lEcqYyaPYN/hvZiZmtKpbVc5pr9N1+aULV1eTn/5OeRLbRatWCBn+/l3Ck3plwEB+d/zbvynrsYeQL5zeAH53ylYGqkuID+NOCqVzBSQn0rCp+FhBeQn3XmpCvlJN1OzNZN1Jz+FIV+zyoje1FHgew7eagykRbjOJy4TMfnqrODkaSsgP3l0TS+9CshPL55MuXkIyE+61gLyk67VV2smBdqSYydfQ+aLbjSggIB89UQUO/nq6aetrQXka6tntMMuAfna4Ye0ZIWA/KR7S0B+0rUSkK8hrdJrNwLy1fOsgHz19NPW1gLytdUz2mGXgHzt8ENaskJAftK9layQ73HpHEvHDGLF2X+ypXwwbUSL+gS99cPtnMcnefYvHNzFnIHdmLRhH8UrVU/6TL6jpgjX+Q6xRNUkKyAgP8lSfbGigHz19NPW1gLytdUz2mGXgHzt8ENaskJAftK9laqQH+j3miELVlPQufxHi6f2bI3nvTsMnL1cQH7S/ShqaoECAvLVc4KAfPX009bWAvK11TPaYZeAfO3wQ1qyQkB+0r0lQ/6QfTdUVYrmS3qrJNb81k5+1jz5MDA0oudEV7nHqPAwBv5amUzZc/HHn4OTDfL/uveM2U1KJ3EWSasmYvKTplN6riUgXz3vagryt+0/ypJzLwi2K66eQaK1RhTQS1SQN+Yxw5o4U7duXY30+SOdSIeye/QbzBWHxih1DX6kC9EmGRRwfuLGnj275csPtbW0adOGS1laotT9sbt5tHVeadUuXWU8lfx3sX7tagwMtOe9rKenp3XrWIb8rKM2qWa1rSvnqNdk+RbkN+3Rn2VjB7P64n309PQ5tXMTLx7e4+WT+7ToMzRZIF/K1T9s8wleT22ryal+vNjra52Kg7calVzrOhOQr55LNAH5sbGxuHvc5fJNdxL1jNQzSLTWiAI6KhXWpgZUKV8aJycnjfT5I51Ia2Pzlq2ExOmgSpXkzT9idfpvk8EwkU6dOn0Stqtts163bh3vFDqgkxpJv7VNjdS3R/KCg6kObdu21Sqo1lrIr73goKpmiQIUzvrPLaWacOO3IL/DsAnsWjaXX9p3w7l6XcZ1+I02g0axcfbEZIP8Bz6BnHF/zMn+v2piih/7EDv5GpUzTXYmIF89t2kC8iULpH6io6PVM0a01qgC+vr6GBsbI30JpmaJiIhAqVSmpgli7M8UMDIykteGNpfIyEgSExO12cSfzjZDQ0NMTEy0at5aC/muJ91VN/3CaF1Zsz9vJwXyA31fcfPcSbqNmcbw5vVYfuY2o9v8kmyQv/WiB6UdrXCprdm5CsjXqvdaqhgjIF892TUF+epZIVoLBYQCQgGhgFDg+xXQWsj3D4tSFZ68g5lt6+Jgafb9M/s/LZIC+bkLFaNnjRI07z2YkMAA2g8Zl2yQHxAexfDNJ3gwtgWZLE01Nk+po6RA/tpxu6ncuDRGpiKMQKPia0FnsdGxXDxwk86TmiXJmqSslyR1JC7DSpJMopJQQCggFBAKCAWSUwGthXyVSqVyPeXBGc+39KpbTmMaJAXyC5Uuj+uArjy8cZnxa3eTI3+hZIP85SeuUTNvRoZoeBc/qZB/dO0FrGwsyOqUWWMai460QwGfp/6EhUTQoHPVJBkkIP+/Momd/CQtHVFJKCAUEAoIBbRQAa2GfEmvlmtOYWhsQvPyRTQinwT5Upy97mdxmDvu+zG2XROkmHwJ8m+cPsaG2RNYdOyqPG5yhOvsvHqfOEUM27vU1sjcPu8kKdD26rEfF/feonKTMslig+g09RS4uP8GlZs6k72AY5KMSMp6SVJHYic/STKJSkIBoYBQQCggFEhOBbQe8qPjEmi+6iSW5mYaj89PTmG/1bcUhx8eGcXObnUwNdT/VvUfej2p0HZ+53XCgqMoUaXgD40jGmmfAu5/PcLK1oxqzcsm2bikrpdvdigg/5sSfatCYkIc7/xfERimkLNnGBgaY5vBHhtrC/R1Uz6bxv1DcxmxzoMlC2aRI5MtL557YZs9L1bGqXto9Vs6psfXlZEBePkFoYhLlNeGsakFGTNnwSI1fBHly5lNE9h915QlSxYQFvSOqEQdbG1tMdZP+XWaHv2tsTnFhPD01Rvi4hPlUF4jE3PsM2bGykyzGQyTZG+0Pwc3zOfYI31GTZqIlSqM8FgVtjbWGCcTDyXJrnRYSeshX9I8NiGRnlsu8CoshjZVSmg0Rj+lfSrF4G/5y53sViasaFMVI/3k+5L8HmiTQN/X8y05C2bFPqutiNFP6YWhgfGkGPxAn2BePvIhS96M3wX40vDfs16+aq6AfLW9GfbmJfMGNWPhKV/sbCyJV0QRpTShy+BxuHRvhb1Fyn4xe+yZQt/lt9mwagmZdXxxdKpMl9V3cG0jNgbUdvZ3dhB5eirV/1xEUIIZhnq6xESFotB3YP3hMzQomvE7e1OzeuRrjq8expbbZqxfvwrXKZNZdD6IhTNG0sQ5hW1Rcyrpvrn7Ggo0HkmMngXGBrooosJJNLZnyoqttKtVhBR9JovyZdfK6Ry8p88UV1fOrJ3M/BOvcJ06hlrOedK9K1JygmkC8j8IMv/sPaYeu02L8kWoVzxvSuqkkbGOe3iy4+p9RtcvxcAaRTXS59c6+V5ok0J3Hlzx5M2LQBTRccluX0oP4Pj6Ibme3+JFHmf8shVK6eGTfTxjU0My5bKncIW8SQ7R+bdR37te/u+EBOSr7WsJ8heP6Y6X3S+snjEQVIncOr6FsdOXU7XTUHq1aYS1kR6oVMTFxhCtiEVHVx9jExOMDP7+ZVClIj4+FoUilkQVGBmbYGRogO7febWVCfFERUeTkKjCwMgIEymd5N+/EqhUSuJiY1HESv0a8PToHAatcpchP1c2B4ICgzCztcdIT0VcTDTxKl0M9EChiENXX0ojZ4y+3vuLhJSJCcTERBOXoMTQ0BgDnQQUibpYmms20YDaoqeRDiTIrzVgAzuOnCJH9myoEhM4urAXLqtvsuzwBarnsPyou0IRI+/c6hkYYWpihN6Hy51UStknsbHx6OgZYGJqguHfG07/9r0KXTkloKGBwT/p2FVKoqIi3/szLoALW8az7c57yFdI/67UwdTMDH1VLOGRCkxNTVDExBCfqJJDb81M/k7yoFKREB9HdEwMKh09jA31SIhPlOsYfFjDacQnP2JmQkKCnAJT+iOd/ZFSYkZFRREWFsajR4/w9fWlUqVKVKlSRTP5+mXIH8fqk+5Uym9HYlwM5zdPZfiyUwyat5HWlfLJNzUolYnEKmKIjUtA77P3svR5E6uIJkZaN7p6GBubYGioL7dTSZ83cdLnjQIlunIKUunzRvpekYq0rmKio+T1qB//jiMb53H4gYEM+RmNY1HEI69DA90EwiNiMDEzkz9bpHVmYGSMqYkx7z+eVCTExREjjaPSwcjIgPi4eIxMTOV1I34/+nQ1pinIl0y/6xvElGN38HwXTv0STpTPl+1H3l8p2ubqs9ccc39KXjtLxtQvSbEsGVJkfI1BW4pYmwKDVK4Mly5BpUpw8WIKDJi2htDYehGQr7bj/wP58nebgs2zRrLlZjQTJ46mdKFsBPl6smv9Utx2HkfXIgetuvalQ7Pa2JsZEBrwisO7N7Np50ECFYbUatKajq3/IH82O5SKMC6fPsichUt4/lZBkfJ16dOnG+WK5MNIV0mgjydb1yxn66FzWOcqRUXHCPa7x7Jzw3JymQbjkNWZsaeD6VoojM3Te7HXOyP1iliyfd8ZLHM502dgf+pUKIKhKpYH104zc9Ycbr8IpUL9VjgnnGHnqxKc3TVDbZ1+xg4+h3xJg+iQt0xvW41r1m04sWUcCYpIHt06x+q1azh7+yU5i9WmT+9uVCuZByN9Hbwf3mDJvBmcuPYEy6wFadujP83rVcLGGN6+fs72LevYuf8YsQZ2NGvXiTbNGpE1gyW6Oiqeu19kwqjh3HgVReXKFSlk5o3HO0cZ8udNncze58ZMHTuAKhFrqdJxEQNc+rAco5LkAAAgAElEQVRv+26uPPShXKMezJ7Snyzm+kSGBHB4xzpWrNuKwjwHratm4cSpB3QdOpHGv1T7G+jSp4clID5//jweHh48e/YMT0/PjxOVPoetra35/fff5T8au8fhM8iXBox858OKMT05HV6ULeumYYGCZx6XWLN2DaeuPiZj/sr07duTWmULyrv//p53Wb9qIXtOXMfANhdN23ahXZM6ZLQx4Z3fC/ZtX8/2vUcJTTSnYfP2tG/xG3kcbWRfvrh/lQUzJnP6nj/OZcuQySAYv9isTHV15dLmmWy8FcmYYf2oYHSTMg2H0X38FO4eWMsZ95cUqtyUSRNdKORoQ1RoIKf2b2HV+m28U9nSrHYRjhy4TOeJ82hepzSpEbWmzas0zUH+BzGPPXzNwnP38QuPoUqBHFQpmBNjLXr6V8Qn8Nejl/z12BtHSxP6Vy9C/UIp+0CiMWjT5hX8PbblzAne3pAjB7x8+T0tf4q6GlsvAvLVXi9fhHzgwam1DF14gCGjxlGlaDbGdPuDyzG5GT9mMInPzzFr/hoqdJ3N2BZF2LZqAWsP36Vd917ksYxizbKlmJbpwLg+zfE6tpC2k/bQbfAYmpXLxt418znm8Y5xS9ZRNZsSt/kz2Xb6GW27diG7aRibVi7nWoANZw9s+A/krxvflpknQmjXuQdVCtmzZ70bARkqM27UIDKGXKJ19zE4lKhNn45NeH39EDPnLsehfHcB+T+4Sr4E+ar4KO4cGMfg5Q84deIIT/7az+w587FwbkbL2iU4uXUpB15kYMP8MRSwi6ZayYrY1+zM2AEd8L9ziAVrDlCl53wGVjfHdeYMTj1T4tK/KzaRT1k4dy5ZfxvNpN5/YKvzFqdMThT4rTcDejbD3/04ixaupGCFX78M+e2noHKsiMugfmSIeUrPnkMpOeE8m7rm4sSudfQb50brnn2pWsiOw5vd2H7sKUvWr6FJOod8yfWLFy/mxIkTxMV9+qu5hYUFv/zyi3xzqoGBwQ+uki80+wLkEx/O9cOLGLfsMos27gCvc8yctxS9PNVo80sFrh5Yw4YrMWxeM49iWXRpUaUswTkbMGpQD+JfX2HZun0UbNibEa1Lssx1BqcfhtK1e1ccdd6ydME8VMXas3zSn9jqh9KsekXCs9amb/eWRHldYcWyVWQp04RZX4L8+n1QZnGmb/9+5NR/y7RJU7CuM4wtE1tybu8GpsxdQ9lG7alVMhtnd29g88HbTF2/k5Z1BeR/7vk0C/kfJvKXpz9rrjzh0H1vKubLSsncWSiZI5Nmft76zreX9HR+x/sNd7x8ufzMh1+L5KBLhfxUyZs66Sk1Bm3fqYNWVg8NBXt7SEgAfX0IDARra600NbWM0th6EZCvtgv/H+R7XttJ3ymb6D98LAX1vKjVcRrT12+nWk4r4hWRHFk3hR1PrFg2tiXLZk7Dplx7xvRpiRQ4c+/sdo54hNO6UTmGtP4dGs5mx4Sm8s/fIW88mdG/AyF52zOmZX5mu84ha+UODOnZCunU0M3tExi4yoONq5f+B/I3Tu3B6chibFs+FV1VPAeXT2HNSW96jxxNxKmZzD4Ryub1i8mdPZOsi1vXEmwNqy8g/wdXyRchX6nA/coi+o07ybGjO9m5ZgnbLvkwevRonKz0iAu8Sqe+UxgzbyU5X62nYt8zvPa9i4SQCdHB3D60gotvHCmVBcbM3EKLodPp37yUbOHdzUNoO+Es+47vw/rJbBx/30VojB/SvZ6qcG+OLB/Ejge2/wfyp+G26xQF8+WT+5pezZr1ZqO56fYLq0d24JphXda5TcVQT4eAs8v4c9Qa2o12/Skg//Xr14wcOZJ37959XAmmpqbUqVOHTp06af7m1C9Bviqamxc3MG7SfmasXo3H4XXsuuRNr74DKJnDFqIe0b5NbzpM2Ug9kwvkq+fKqafPKJPVDGVMCDdP7+GvF/oUz2nMkuXbqd1xID2aV8VAB7xOLeSXHqtZffwURUK2U7DxXHZfc6d8DktUEa/ZsXwqhx4bM+2LkN+P0RvO0aR8LhIVUWyZ2JmF97JwYGUfds2dyJXw7LjOnkAWSwNC7uynbV9XWo6ZJyD/C58paR7yP8zpXZSCvXdesO+eNzdfBlA8hwN5M9mTN5MNuR1s0U2GjBRKpQqvgGA834Tg+SYQD+8ASud04LeiOWhaMhd2Zql7LbbGoO0Hv4y0opkE9xMmvDdlwYJ/TBow4P3fpdcE7MtSaGy9CMhXe+n/P8h/dG4Dg+fuYdCIcZi/2k+bCXtp8VsjTA0NUCkT8PG6z0tFRka6NGXLzJk4t51Mn5aVP7En/vUNStZoTUe3qwytaSe/pggPYOvS4Rx57sD4tmWZN3855dqMoUeLavLr/z54+3m4zqbpvbigKM2mhWOliF6OrZnGqsNP6DFkGA82jOJQSEnWzncheyYrua8NfZxZ+7aOgPwfXCVfhPyEKG4dm8zg+R4c27uYzUvHs/bQa2rXqCE/4CmV0Rw6eprBK9dgvrkbf+5zwv/55k8tUCaydeN6Zu+7y4SZc2js9HdSiKAjVKo/gA3bjvFmVTNqLstMbOjR920/O3j7n3Cd9rNYs+s4+fK9P0O3pIkdi8IGcHZZQ8Z16YCixkTWT/1DDucIueRGzxGraT18VrqGfCn+/uXLlzx//pzLly9z69YtOS7f0NCQmjVryoAvhetovHwJ8hMiuHZiKRPmX2Deitmc3zaPjYeeUa5yZSyMpLM98Rw/tI/fJiyn6vNl1Jj4Bq93Z3D8LCHgke1rWLb/Bt0GDKdJuZzvTQ+/SpXyrXBxO0W+x7OoPtGTO55nyCodyfjs4O1/wnXqD2DqjovULZGFhNho9szqxay/rNi8rDvrpk4mJGsd5k3sgYkORNw/SJves/lj5FwB+V9YNOkG8v89t+CoWM57+vHX8zdcexHAI/9gcjtYk9nGAjsLczJYmGJjZoyVqTFmxoaYGOhjaKCHno7OR9BJlA60xScSE59AlCKOsGgFIVEKgiKieRcRiX9IBF4BoRTMbEu5XA5UyZOJankdsTXTnptjNQZtGv+0SeEOR4yAmTP/O+jw4TBDxAV/EEZj60VAvtoL/EuQr0pUcNBtCm6nfRg7bgy6z3bwx8gttGvXGmuTD9l29LDPmpfCeUxZNXMGhZqOYEC79/dwKKJCCY2MwzLeh1q1WlJ3+mEm/u4kvxYV4sfamQO4FFGI8W2cmTd/KcV/c+HPtnXkg2zuu6fQb8X77DpJh/wRvNg1nu0vcrB28RhyONrIY637sxTrA+sKyP/BVfIlyFeEB7J62O8cCHHmyOpBrF02iTWHX9KwQX2M3p9/RjpEW7ttWyLWdqTVKn38Xhx+f0hRlUhMeBBh0UqunDrKjC2XcJnqSotS70Ez+vEGaraawubdx4je14PSkxQowi6+bxv5imOrhrH1jvn/2cn/MuRfXPUrM3p34G3Rwaxz7YyeDgRfXEEvCfJHzE6XkC+BvJ+fHw8ePJDj8KWDqaVLl2bp0qX4+PjIh2x79uyJvfSLc3KUL0B+TKg/22YPZtdTG9YuH8H+VTPZcOQxlWvUxPbv1JoqFZRt1By7a7Mo0/86j95cJ6eprrxuoiNCiYhR4nHhKEt2nqdtnyE0r1ZQXhsK72NUqdObCZvPUtB7MRUGXOTK44vkttB7D/lu0zlwX//LMfn/B/K3u/Vi84zJvLauxJwpfbHU1yH83nvIbz5KQP6Xlk26hPzPJyql4HzoH8KzwDC83oXzMigCv7AYAiNjCI2ORcrFr0hIkLNMKFUqOfuEvp4Oxvr6cg57a1Mj7M1NcLQyIWcGC3LbWZLP3opCmW2SNQWmuu9zjUGbuoakdnsB+UnygMbWi4D8JOn9tUrvIb8bnrYNWDG5LwnxCp7eucCCecuwLd2EQT3bYRBwiUatR9J21By6NaqAniqOV88f8DzMnDJZYcmc6fhblGTU4J7YGydwdt8WHkfb0aZRRdyGdeJUQnm2LxuJrbEunnfOM2n8ZAo1H0uf2pmYN2c2wValGdq/KxlN4tk5dxjzT7xh77bVSYf8oeOweLmTwQvPMWTmHOo55yMxwp+BzarxMlMbAfk/uEokyK85YD1b9h8je7asxMVEcmGPG9Pnb6bl5I30bpiXPZuXs27PLfqPmkCV4jlJUARz9Nx9qlUqjZ73AUo0nsSqExepni8DUe98OLBpNdFZa1I9WwxTZiwmb4MuuHRuhCGxbJvVn+VXddi5bg4OEaexL9mTtefv8WsRe0Jf3WfpuB68Mi71XZDvsa8TOxePYe0NY+YsmoZTBiP+2uzK2Nk7GTlvabqCfKVSSVBQEO7u7vLuvQT3+fPnp3jx4kjhOadPn+b48eP06dOHHNJ5seQqf0P+iiPXKZ8vA3HREVw7vo35y7dRsfMEhravxJFNy9h44JYM63XKF0QnIYITJ69RrmpVrEKvUKRCe4asO07nWgVRBPtz8uBegoydqFHQhLnzlpOxzK/06dgUa2MVh5ePY/Ieb7btXE/uxDuUrtqBbvP20K1uYSJ8H7Ji1lieKp2Y8cVwnS/v5B/aOZpTq+ax41oILuPHUDqnLe6HVzF40jp6z14ldvK/sHZ+Csj/nveMSgpb+J4GWlxXY9CmxXP8pmkiXOebEn2ooLH1IiA/yZr/v4oS5C8Y0podTwypUb4YqJQEBQaQIUdROnXvQvG8WdGLD2P7irm4HbpPIacs6KAkMVFJifrd6FLXiSun9rJ+52ESjO2wNEzE700YNZt2oE2TGoQ+Ps3oca6Y5SyMka6KqPBgEqwLM2JYf5zs4NzBbazecQQd80xyTv4g3wdcf23E8Z2rvgPyx1Mxt4o5k8ZwK8iILBmsMNLT5d6FfSjztBaQ/4OrRIL8Gn2WU7hCbSzMzVEpE3n7/CFlW/SjW8dm2Oip8H5ylyWLVvDUN4Rs2exBGUuwUWEmDWpLrkz6LJk4iAOeBhS0NyIuJoaQ4Gi6DJtEjQKmHNu6nP3n3THKkB09nUTuPg2iWdeBdGlcBnPjBFwHtWKHpxVlc1qhpxOP59Nb2GUs9l2Q//jsCJ7ePsmsmQsINs5BVmtTYt4+5tyVV8xauixdQL50Rk9KiSnBvZRFRwL63LlzU7RoUWxsbD6eG5R2+J8+fUrBgsl854QM+aMpWvUXMtmYIKXQDQ3wIV+1FnTt0Jxs1ib4Pr/PquVuPHwVhJ1DBtm/b+MyMXVsP/JkMWHTrOGsv/iGgjkzER8TTXSMkl879aNx+Wyc3bOBXSdvoGtqg7GhDk+e+lC3bV/6tKqBiaGSNVP6s/r8G0rkz4a+bgLPHrljl73cd+3knzg6j3fuZ1m02I1X0SZkz2hDXNBLTlx4zAS3zQLyBeT/4KdqGm2mMWhLo/P/xGwJ9u3sIDER9PRAOuyUHHGPaVgrja0XAflqrwLp8quHN85y/1WY3Jeunh4m5tYUKlqc3Nkyo/93CEZcZADnzl8hODxKzk1tZZuRUuUqktnaiJiIIB7f9+DZqwA5n7SlQzbKliqGnbU5qBJ4eP0CD14GEJ+oxNjMCqcipSmS5/0FRlJKxvv33PHyC0ZHzwh7B0siYw2oWbks5npx7Nx9mGJ1WpDXKh6v+9d4m2hL5TLF5KAQP8/7PPMNJ1/hYjjameHz5A63H3oRGZOAoakFN9cO5ZZZa05uGae2Tj9jBwlv7nHi6mNCot5nZdHTN8DKxp6K1Wpg9TEFfQKvvJ7h4X6fqNh4+e6C7AVLUzx/NkwN9VAEeXLsnDsxsXHoGhiTJUdeKpYtJsfvRwT6cO+uO68DI+Q1ZWGfkypli2FlIR21hdi3Dzh04T6x8UrMrCywymCJQaIBlSpV4NH9e7yO0KNE0QI4xD/n+KUHVKhaC0tLC7ntk3M7eBJfkMZ1ihIbGcKzezd56B2EUkcXXp5nxfbbDJjiSuMGldJ0Ck0p7l7KdX/mzBkZ7vPmzUuBAgXIlCkTuh/uKkjpxRvynANnbxMRI60bHfT09bGytadU2Ypk/LBwUPLa6wn37j2SQ5SlOzIy5ylO2WK5MTHUJyHcl5PnrhEWFYuOniGZs+XC2bk4ZoZ6RAX7c//+Pbz9gkhEFzNbR6pULIO1ubG8cRod6MWpv24RFZuAibkFpmYmWJhYUczZmUCv+3gFJ1C0cAHs9YI4cvoGJavWJ7ONiXwPxKuH13jw1oBatcqiqwjn2aN7PPR8TYJSF52gh8xddZL+sxfxR81SH8PTUlpebR1P7ORrq2c0YJfGoE0DtmhFFyKF5lfdoLH1IiBfK5a7NhgRE/iMBRvO0bhxPXJltSfk1R2a/foH5QbvZP6flbTBRGFDqiig5O7dexw9fpMu3VpgohfHhnmT2Hg9nmlTRlC9ZPY0+Yu6tHsvZc3ZsWOHDPPFihWTQ3MyZ86MvpTVTRQ1FVDyyP0Wt9yfUKnBL9ibwoGVM1l03IeZsyZRuVjuNP1wqKY4X2wuID85VNWSPjUGbVoyH7XNEJdhCcj/zkUk7ciJ8uMKJEYFMHvcWM57PCFCkSCHHdkWqMvS2SPIapu62cd+fFaipfoKqHjifp3ZY0fyPCKRhEQlCl1bOvXoQ7umNbAy1WB+ePWNTVIP0k21mzdvlg/XVqhQQQ7LkXbupaw5omhKARXe92/gtmguFx/7I513iFWZ0KbHADr+Xhsbc+1JfKKpGavbj4B8dRXU4vYfrpPWYhNT1LRzgJQQ8DxQPUVHTjuDSTtRahexk6+2hOmmA5WS0KAAQsOj5LAgHV09zKwykNnufTpNUX5eBeJjFQS/CyAyJg7pU0fPwAR7+wyYm6ath7/o6GiOHDnCuXPn5Gw59erVk2PupQO2omhegYT4WEKD3xEWGYP0daWrL4UT2mNuYoxOejlQqUHZBORrUEzRlZYrUL06nD8P1arBOQn5RUkWBQTkJ4usolOhgFBAexSQbqq9efOmvHufNWtWunfvjnRbrZGR2E3WHi8JSwTkizXw8yggID9lfC0gP2V0FqNolQKxsbFyxhQRnqFVbtG4MVKIyIsXL1ixYgXSLr6Li4uc+jLVDtRqfIaiw/SkgID89ORNMZevKyAgP2VWiID8ZNdZpVSiQidZbvL+j/EqJRLAxsbFgY4uRsYmGBn8XIcIpTA2aec2ISEBhUIhp0aMjIyUD1nev3+fu3fvypAngV+qQr7q/V0vUtyCdN9LShRpLcbHvw+5MTJKnyEqkv8l3799+5a9e/fy8OFD2rVrR9myZTEwSHvnB/77Hk+5daNMTCQqKlLO+GRiYvKfg7KJ8QqiomPRMzbFzCgdaJsSb8KvjCEgP5UdIIZPQQUE5KeM2ALyk1nnRDw9bhKq50DR/NkxMtBLvvFUSt543WX1iiXsP3GVOCNrmrTvS5/Ov+Ng9vN8AQcGBsoZUy5evEhISMh/9JYOWM6YMUM+aJmaRRERzJN7D9B1yE/RvA7JaooyMYHAN374vfLk3NF96BrbMGDUxGQdMzU6lx5w3717x5UrV7h8+TIVK1akbt26WFpapoY5yTJmbEQIz556gmUWCuV1TNYMNY/u3qJVo7pkKdeCpSsWkNPm3weTVVxeM4a2o5dRdvBatg1p/PE+gWSZ+E/QqYD8n8DJYop/KyAgP2WWgoB8jegs7R4mJsQTn5Ao58k30DdAR0faUQ5nZs+W3Daty4KxXcieSbpYR0pRr5J3VKVMJVIO7Pf1QYIx6d90dXWQLt5RqsDQ0Ag93W/v9MZF+DJ2YH88Qi3o3bsHJiH3GD9+CkV7b2RFn5/n+Lq0e3/ixAnc3NzkHd1/l4wZMzJ06FAKFSqUYiEb0gVYcXHx8q69lCff0EBfzjTy/NY5pgwaglG98Swe3hBDw/cPYtIakDJFKZHCif7xvbRrGq/UQ3pOjItPQFdPX96ZTsLS4N1bf0YO6MFdL3/iIwOoWrcl8+fP1sja14ZOJL0CAgJ48OABly5dIkuWLDRo0IBs2bJpg3k/ZINKpZR/kVIqVejpGWBgqI/0S4y3+wVmT3WF4i2ZNaIVZgZ/f3YoE4mX1xkYGBqir/f+go7EhDjiE8FAX1deVzo6eu/XTRIWjgT5bZvUROFYjuGT59K+ZhH5fgapqGJD6N2uJTtOXKL2mK1sG9JIQP4PefqfRgLy1RRQNE9DCgjITxlnCchXW2cJygJee3L91l0iFHHo6BuTy6kITtlsuOd+neVTx/HCqBid2jSlU+tfMdHXxfeZB1fuPCYmXilfnFWwaAkK5s7C2+d3uffUm7gEiI4ORxGvxNYxLzWrlMHUUJ+Y8HcEhER9YrOeoTHW1jaEPj1J/4kradV3OE1rVkBCxiPz2tFjSwyvbuz++OWs9oS1vAPp4ejGjRssXLiQ4ODgj9ba2dnRpUsXKleunGJhOsr4GG5duYSX71viEkHf0JSSlWuTyTiWjW6L2bN+IwYlmjNlSAucnUuRGPGO23fu8MI3gESlCgu77NSpUQlTA11eXdnNWS9TMlrG8C48Gl0DUwoVLUGRAnnQJ5Z3QcFERb+/dOtjMbAih6M174JDWbvrOG3qFuHC+rFcC8mTLiBfeliS4P7x48fybbXSOYuaNWtSpEiRNA2cKmUcHtcu8sT7DfEJSgyMzSlQqiLZrHQ5sGEJ27YfhBwVGOXSGefixTBSRnLn1m28Xr8hXqnE1CoTtWpVx9JQhf/DK5zyCCKTvT7v3oWi0jXCqXAJihTMg4l+IoEBQUQrPl03ugbmZHHMwJO7t+jd6TcizPNQpsrvTB/bB2vj979Gvr6ymfbD1vDm9RNK9VvG5sG/pmnNteFjTUC+NnhB2JAyCgjITxmdBeSrrXNEkC/r5oxl+9VAShfPQ+BrLxTm2enWoTmBL+6ybvFC/A1yUb9+QyaN7g3BXowf5sLD2IwUyGzGG18fzLKWYuSIQfifX8XMJZtR2jqRO1sGFCE+XPB4w9TFq2herRCP/trK8t1XP7HZ2rEAvzRqgrn3fsa7naTP0HFUr1BCrvPozDxKddpK4KvrmKs9U+3uQPo1JTw8XAa+J0+e4OHhIR+6lA5cWltb07RpUxo2bChnVUmp8vLKLjoOmE+eYkWwsTThpfsJ9Mv1ZX6/X5k6bjSXTp1GL3dVxg7pSMN6Nbh4YD0Ltp7BysYOG8N4Tp+4yuD5W+hcvyDHh5ahwzYjGtQthqWZHs+fPkM/Q37GTxxLqWxRbNiyl1t3vT+Zmk6meswf0fCffwt5wtaFI9M85Eu+Dg0N5c6dOzx//pyIiAhKlixJtWrVUuwXmuRcQ28fnKZ911FkKuaMvYURfk9voshenyn9W7F96RSOnboMDoUYNLAHDWpX5/GFHcxcdRDrDA5Ym+hw8eQ5mo9bzcCmRbnqNoTmM69Qr0E5rI0NeOn5BF1bJ1xGDKeSkwGb1mzB/ckrEv81IdPM5Rg/rA0v7t1i5Mi+2BUuR4xfBL1GTaFKocygVLBoVG9OR+Yj0WMb1k2nsGGQgHx114SAfHUVFO3TjgIC8lPGVwLy1dRZhf+Lewzr3JpMjScxtV8Tgrzvc+zcFQqUqkqFUrmZ3u13bpo2YOnEbmS0MeXsioF0X+nJngPbKWyvh8eFg8xZtJHqncdQQHGZmasP0W7QZJrVq4Ay/DXDu7bgmnFdrmyeiNfto+w8efcTmy0cclGlWg2U9zYzZd1f9B0+nmrli8l1PC8sp0iL5bx+4469mjPV5uZSWIN0qFYK15B273Pnzo2DgwOHDx+WD17WqVOH33//HXv7lFRBxcFxTRm+L569e93Im8uRwDtbWX7kBePHjsL33hXG9+mPUcMpLB1Rnxjf2wwcOgGrMi0Y3LU59qY6rBvUiKl3nfA6t4DjQ0vjci4j5y4dJYNhInfO7GX0hHlUaTmAUd0rcfDIGR4+9fsU8u0rMqxLlXQF+TExMVy/fh0vLy8Z9PPkySOH5qSLQ7V/e+ryin60munBgbN7KJLVlqDHJ1m7/zYdew8CX3emT5wOJdvgOqIlusFP6d9vMLpFfmdUr5ZktDLm6MzOtN8QzoMrm/HcNJjeG16wbvcuSme34NGlQ4ybuogSjboxsGN1zhw8xpOX/p9AvrF9UXp1bojXvVsMGzUc5watCLh+huxV2zGwU0PifK/Rs894qnQexiW3EejUHctGAflqf0QKyFdbwvTXQZSfL34XzhPsfptIXz8SFDHpYpI5nj0no58/bx0z450vT7qYk76xCeZZHLEtUQrHqtUwc8yS+vMSkK+2DyKC/di4YAqnHkZSonhBsuQqQJUqlciT1R49XcV/IH9S09yseZ6NLs1qyWOHB77m9t2H1GzZn2p2/izZc50uA0ZRt2IR+fW/VvWl44oXeN04THjAS575BH1is6GJJY6OWfE558aUdRc+gfxnF5ZTtOVyXvunT8iXQnOknVzpoKUEfFIMdvHixWXwk+LxL1y4gLe3t3z4UorTTunieX4rI6auJ0+J4phZ2lKkfA0aVHXGxFAXn88g/+WJpQwdNxu9nJUpVDCfbGrMs+MsP/6GkIBnnBhWlpFXC3Prr/Xya7H+N5k3dQQJmWoyZmR/Xnj7EBz6aSgXxplxLuSYLiBfepCTQnKePn0q/2IjhV9Jl1lZWaW/y9r87hzDZfQ8HIqUJoOFGflLVaZe9fJYmxni/+DqJ5Afem0LPVymoZOtDEXz50JfFxL9rjF7w1UuPHxO9KGRDN4ZxP5jO8hiCsqQx0yfMJZw20q4DOxKVIAvIRFRcsalD0Xf2I4ihXLw9O4tBg8byq9/jkX/wRFuvTNl6MiheB1cwMLjvoweP5KlQ36HWmPZOFjE5Kv7+SIgX10F01n7x2tX4nv6DJlLlSJDnryYOThgIG7u01ovx0vp/AICCHruif/t22SpVZMCnbunrr0C8tXWX6VMINDHizNnL/AmMIgXL7yIM3KgQ4d2lC+RnRmf7eQPrWXHSVVdujat8HFsPWMLSpUuT/zTY/+B/Oubh9B87j28by5HtcoAACAASURBVB3D/fgKZqz99HI42xzFaNm6HQ5vjzN2xTH6DBlDjYol5b7vn5xD2e67CHh5JV2F60ix2G/evJEP2EoZdHLmzCnv3kuA/+8igb9UpHCd1CjS2rh65igPvXwIDfDjpvt9chWpyJQJQ/D7DPIf7p3FyKnrsXeuT/EiOT+aq6NnTN8/u/4H8uODPFgwYyQKq4qMcWnBomUbuHTd85Np6mT7na2zW6RpyJce1iSwv337tvwglzlzZjkdpvTQlm7z3atUuF88xq3Hrwh/9xb3ew/InK8kg4e6kOh96xPI9z+zit6j3cjoXJVi+XOg9/GMvh6/t2rL820jPoF8VaQnsyeOIci0DIP7/MaOVWu4evf5Jzv5Ztmrs3hmL17eu8WgoUNoMmQxJeLvsHTbaRr+8Tsehzajk78hA7q1YEjLSlBnvIB8DXzACMjXgIjpoQtFcDAes6Zham1Nrmo1BNinQadKwP/i/FmiQ0MpPmwUxra2qTMLAfnq6a5MwPfxDZZsPUuzHoMp7qDk7qXDTFu0hUq/d+LPdvWY3/13bpjWY+nE7nK4znqX2ix6WpBzexdgrq+LlIs6Pi4OHQMjru1eyMIdV+g8cDQNqhSDRAVL+zVipVd+3I8t5q3XHa58FndtbOlAocJF0A+8Rr+xy2jcYxAt61XGWEfFnqnNGXTUnBeX1qeLg7dSLHZYWBhHjx7F19dXBntp997Z2Vn7gE+ZwMI5c4h3akCP+kUwSQzjzOG1dB62kmvPH8GDy0zo0w/DBlNYOrIBwXf3MWTsQiq3caFd84YY6uqgTFAQFaeLhYm+DPnDLxXg1qVN6Oqo8L5xhCmjJ5KzVg9Gu/zOzVv38PH/LGWohRO/1SqUJiFfepDz9/eXs+X4+PjIl1gVLlyYXLlypfubajcvc+WFeWmGtKyCgTKac8e3MmzcIlz3nKWAwpMZE6ejKtEG15EtSfA6Q8/+UynTehCdm9fHwkjKwhNHRFQiZoZKLrkNYdC2AHYf20lOC10CHp5n4oRZ2JVrweBeTXjm4Y5vQOgnO/kGltmpV6MUzz5C/hLaFUpg1qw5PPb04R0O9B00hMY1StH5F2eoM0FAvnrfJHJrAfkaEDE9dHFtxBCss2YjZ6XK6WE6P/UcXl66SKjPa8rNcE0dHQTkq6e7Son/s9sM7d2LeKf6tGtcFR+Ps2w/eZeOfYfQ4bdqrB/ZhpXX4+nXuxuNm9Qh6tEpmrQaQOE6LWnWsCoBT29z2yuM39r3wODpXia6rsCySEPa/laTN/fPsnr9Pv6Yvo1RzUp/1da4qLfMHjGUC68UtGnfHqOQRyxcsJwqQ7czs2MZ9eapBa2lA7Rnz56VL7OSwL5AgQJyKkxTU1MtsO4LJqiU7J7ZhxlHntOqc3cq5cnAnrUz2X07Hnf3M8R73WLBkI7cNKzG9En9cMpux/JJozh63YsGrbtQIqcNB7duJDxbY1aOb8Hxoc602xpHxz/7Uru4A6d3r8UjzI7+Y6fwa6msSdMgDRy8/fAgJ8G9dLBWCr2SfqHJnj075ubp/fj4ezeeXz2GfsvP07xzL+oWz86xHUvYe8mfTYeOkC3mKYumjOJKZDZGjhxAibyZ2D5vItvPP6b6b+2oVDgzp3dv5pmeMyumd8F99SD+mHGePzp1p0G5XNw4vovLPvr86TKC36oU/tfO/3+XkJRC8/1O/hL+rJmbHSvnMmPWfPL82pdJw/pSMKst7eqXEpCftHffN2sJyP+mROm/ghSiE//2LU71GqT/yf4kM3x6/CgGGTOmTuiOgHy1V1lCXAxPbl9i/77dXH3si7ldTuo1+JVfalXAztoC/0eXmDjZFX/d7Li5zcTBENyvXmDP9i14eAdjn70A9Rs1pXr54jw+5sa8jScoWLwkYW9f8iJAQf2WXWnRpA4Zzb9xoZVKyduXj9mzdQOnrt8lVseMWk070rppAzJZJOMlXGor+PUOpFhsCfbOnDkjX2okhWrky5dP/ru2h2tEB3pz7cwBNh66gG9oLFlyFaNft3aULFaABEUkl4/uZN7yTVRt0YGeXTsQ6ePNpaO7OHT+Gm8joWCZOrRoUp8yhbNyfFgZ+h21YeyfNTh46gommfPTonNPqpbIh4XBt+9RkFUOe86+VVO5HZaTSZPGJbPnvr97yddSWM7u3btluJey5UjhOVLcvZQe82cpsaH+3LhwnLU7j+ITHEmWXEXo3KE9ZUsVxkAZw9WT+1i6aiOFajena4c2GEYFcu3MQfYc/wvfEAV5ilWldYsmODtl5OpKF3q4PeDPXr9z4/wFVFbZ+K1VB+pWKIaV6b8vt/qvus8e3WXcxEnU/3MaHas54XvnJFNd11CoYXs6taiPuYEufdvURVXVhcU9672/A0SUH1ZAQP4PS5c+GkqHbK8OG0K5Pn1FiE76cKk8Cyl059qSxZSf5Zryh3EF5GtkJUm58iPCggmLjEHP0BRbG2tM/r7mXZWYQNC7QKITdHB0zIi+jg7SBUlhwe8Ij1JgYGKGjY0NxgZ6/LV9vhyT367HAEoXzEK8ShdbOwdMjfRJ0venSkVURCjBoeGodA2wtbPH3Dht3nYrhWtIGVT2799PVFQUtWvXlnfupRSY0pdhWimJsVEEBoUQE5+Imbk1Dhn+OSgaGxNNUFAQRmYW2NhYyyFVcdERBIeGEZugwsI6AzYWpuiglMN1RlwpyMWjCwgKicTAxBxbWxsM/wnC/rYkyngiw4KJVRqQIUMqhQh+wUpp914Kv5o9ezYZMmSgRYsW8o3EPxvc/1sa6Y6FwMAgouMSMDGzwj6D9cdL8eIUMQQHB6FvbCafN9HX1SFeEUVwSCgxcYmYWdpgZ2OBKjaKi25DGLw9kG17V2MSG46uocnHz5tvLZj4uPf3L5hZZ8DSxBDJpuDgcAzNLLEwN5E/kwL8fcHUBgcrLf1F7VuT1KLXBeRrkTNSw5Rn27YQ7+dLnho1U2N4MWYyKvD87BkMHLOQr1WbZBzlC10LyE9Zvb8x2gfI/3d2Ha0yMAWMkYBPOkx78OBBrl69Kue2ly44MjIyQl9fPwUs0MIhVO8h/9/ZdbTQyh8yScqUM2nSJDnXfc+ePcmfP78cgvUz7dz/kHBJaKT8APn/yq6ThGaiSiopICA/lYTXlmGlWPycFSphnT27tpgk7NCQAqGvXvHyyqWUj80XkK8hD2qmmxuH1rDhqDvNu/ShqnN+zXSahnqRwjUuXrzIsmXLqF69Ot26dZNvp/3pgU+l4sL0xsy648ShnXPSkEf/v6nSGYv169dz/vx52rVrJ+e6l8Kvfnpfa9C7yrhobm6dxuTDIaxcv4RMJhrsXHSlcQUE5Gtc0rTV4en2bSjXu48I1UlbbkuStXLIztIl1Nq4JUn1NVZJQL7GpBQd/ZgC0s69dMHRy5cvmTZtmpxFZdiwYeky//mPKZT+Wt28eZMNGzbIZyu6dOmCmZlZ+pukmJFQ4DsVEJD/nYKlt+rHmzel+qgx6W1aYj5/K3Bu2hTq7dybsnoIyE9ZvcVoHxWQYu4jIyPx8/Nj48aNctx9v3795AOXoqRvBaRfbKRd+582/Cp9u1fM7gcVEJD/g8Kll2YC8tOLJ788DwH56vk3Pj5evQ5E6xRRQNq5l+KwJbiXUmK+fv2aX375hUqVKolQjRTxgBhEKCAU0EYFBORro1dS0CYB+SkodioMJSBfPdEF5KunX3K3luBeisP29vaW0yRK4TlSrvv69ev/NPnPk1tj0b9QQCiQdhUQkJ92facRywXka0RGre1EQL56rhGQr55+ydlaoVDIcP/gwQN5597W1pY6derIaRJFEQoIBYQCQgFx4+1PvwYE5KfvJSAgXz3/CshXT7/kaJ2QkCDnP5d27t++fSunwZTCcvLmzav1F1klhx6iT6GAUEAo8P8UEDv5P/naEJCfvheAgHz1/CsgXz39NNlaCs159+6dnA5T+q90wLJYsWLyHwODtHk5lyb1EX0JBYQCQoHPFRCQ/5OvCQH56XsBCMhXz78C8tXTTxOtJbiXQnNOnTqFj4+PvHMvxd1LcG9ubq6JIUQfQgGhgFAgXSogID9dujXpkxKQn3St0mJNAfnqeU1Avnr6qdta0v/GjRtcunRJznFfpEgRGfCl+HtRhAJCAaGAUODrCgjI/8lXiID871sAirg4rMtX5NKmDTgXKvR9jVOhtoB89UQXkK+efj/aOjExEU9PTw4ePIiU+75q1aryhVaZM2f+0S5FO6GAUEAo8NMpICD/p3P5pxPWVsjfcOAAy7bt4Pnr1yQqleTKkoUezf+Q/6RmEZCfBPXFZVhJEElU+ZICUmjOmzdv2L17N4GBgdSuXVuG+2zZsol892LJCAWEAkKB71RAQP53Cpbeqmsj5O88cYLek6ewdOwYqjo7y5Ifu3iJAdNnsGzcWFo3bJBqbhCQnwTpBeQnQSRR5d8KSHAfERHBoUOH5Kw51apVo0SJEnI6THGoVqwVoYBQQCjwYwoIyP8x3dJNK22E/N5TphISFsbW2bM+0fngufNkzGBL2aJFGThjFuFRkZgYGXPm2jXiEuLp3aoVLh07yG0kGB82Zy67TpxEAgjnwoWYN2wY+XJkT9Lrz7xf0XfaNK563MXB1pbBHTvwZ8sWcr9SuM7GGdNYsHET9595UjB3blZPnkShPLm1bl2IcB31XCLCddTTLymtpUO1Usy9FJpTuHBh6tatK4flSHCvo6OTlC5EHaGAUEAoIBT4ggIC8n/yZaGNkL9oyxamr1zFdldXqjiX+qKHhrjOYdWu3WyYPo3GNapz79kzKrZtz54F86hToQKjFizk+r37bJw+FRsrK2asXMWO4ye4u3c3+np6X31dT1eXsq3ayGOP6dmDxy9e0KhPP7bMmkm1MqVlyJdeWzx6FJns7Gg1ZBgWZqbsmOOqdatJQL56LhGQr55+X2st5bt//Pgx69evx9jYmLZt2+Lk5CRy3Sef5KJnoYBQ4CdTQED+T+bwz6erjZCfkJjIiHnzcdu5S95Fr+LsTPUypWlSswbWFhbyFCTIP3n5Ch57dn2cUoNevSmQKydzhw3Fvko19syfR9XS78N9pLh+hyrV2LtgvgzoX3vdxNiY6p0643/+LJZmZnL7E5cvk9nennw5csiQLz1ctKhXV35t7d59zN+46RNbtGVZCchXzxMC8tXT7/+1lg7Wenh4sGbNGpo3by6H54giFBAKCAWEAppVQEC+ZvVMc71pI+R/EDEsMpILN29x6c4dDp2/QGBwMNvnzKZ6mTIy5Hu99pF37j+UbuMnEBEZxYKRw8lZt/4XfbFywnjqVKzw1dcNDQ3l/l+fOvGfPj6E6/y1YT1lihSWX9965Ajjlyzj6eGDWud/AfnquURAvnr6idZCAaGAUEAokHoKCMhPPe21YmRthvx/CyTF1XcZO44Hns+5vm2LDOHPvL3Zv2jhx2qdx4xDqUxktosL2evU5drWLRTP7/QfnQOCgr/6+vZjxxk8aza+Z079X8j/dwpNAfmfySQO3mrFe1sYIRQQCggFhAI/twIC8n9u/6NtkC+F1fSdOo22vzSkcqlP4/GXbd/BtJWr5B12CfKljDv39+356MFaXbtTrlhRpg3oj32VqiwYMYI2vzT8+Lq3nx85HB3l///a6zfu36dqx87yOHY2NnL9PadOy6FCFUuW+E+efAH5AvJ/8o8RMX2hgFBAKCAU0EIFBORroVNS0iRtg3xp7l3HjefcjRvMGjyY0oULyRk2btx/gMtsVxpVr8aiUSNlyF+/fz/TBgygQ+NG/HXrNk369efs2tVy9h3p4O3Bc+fYPW8eubJmYc2evUxYKoXUHJIPyX7tdXNTE8q0bE2x/E5MHzhAztXftP9AOQ7/w8FbsZP/lVUqdvJT8i0sxhIKCAWEAkIBocAXFRCQ/5MvDG2E/PiEBFzXrWfHseO88vdHV1eHnI5Z5F35vm1aY6CvL0O+79u3ODo4sPnQYaTDsgPatWVg+3ayR2NiY+UUmrtPnkIRG0tRJydchwymTJEiSXr9ycuX8i8KUoYeexsbOYVm71YtP6bQFJAvIP8n/+gQ0xcKCAWEAkIBLVdAQL6WOyi5zdNGyE/KnD9A/ue59JPS9meqIw7equdtcfBWPf1Ea6GAUEAoIBRIPQUE5Kee9loxsoB8rXBDshkhIF89aQXkq6efaC0UEAoIBYQCqaeAgPzU014rRhaQrxVuSDYjBOSrJ62AfPX0E62FAkIBoYBQIPUUEJCfetprxchpFfK1Qrw0YISAfPWcJCBfPf1Ea6GAUEAoIBRIPQUE5Kee9loxsoB8rXBDshkhIF89adMV5KtUxCoUxCtVmJiYoKero544onW6UUCZmEhsTAw6+voYGRsjVka6cW2yTkS6lyYuVoFKRw9DQyPxmZKsav9Y5wLyf0y3dNNKQP63XXng7Dl6TZqM39nT366sZTUE5KvnkPQF+aG4DhjI7jshuG13o4hjRgFz6i2PdNJaxat7t5nQvTOWtX5j4tRJWKWTmYlpJK8Cfk/cWTxlFG/tS+AydDiFMouVk7yKf3/vAvK/X7N01SIlIH/H8RN0GDnqE93MTEwolCc3Y3r2pF6lilqtaVBoKE9felOhRHGttvNLxgnIV89l6Qvygxn+RwvWXAng8JWjlM6RBV315BGt04UCKjxvXqJ3/dpYN+nA8tVu2KaLeYlJJLcC3nevMrF/Z3wzVWDGLFdKZhcrJ7k1/97+BeR/r2LprH5KQX6viZO4t/ef22nDoyLZdPAQCzdvQco5X8zJKZ0p+/XpqFQqpD+6usmLWQLy1VtWKQn587qUZcK+MA55PiJv6F0Gt2+JQ4OhzOxVh3mzprLpcggzp06kQVlHti1zxdVtC/FWeRk/dxHNKuYnMsSLdlUq4OdQlhZVs3PTz5Rx08fhc3QNMxesJmO5GihunuPiaxVHUhjylQkK9i0dy3DXTSRY52XQqGn82aIyBjoQ9Poh6xbPZOXeKxSo/gdjXHrjnD8bKlU8t/atZMCE2XjHmNFrzEJGtKtBQqQ/25aPYMaet5QqXY7gV9506utCvcK6uE6dzPYjNyhQszWrZg8ns52legtAC1p7/7WF8WPHoNNwFOP6dMFjajNGb3/M0XOncbQ3w9ouK9mL9eLhpZm8efGQhZNHsfXsfco0bM9Ql76UzpOBPRO6M37VPsbOGc0S11U0cruCS2F9ZvVpxcrTzyhd9ReCDi3BNk1BvhLvu38xd9YM9lx8SskarZk2pi+FcmdCV0eF77VttO0xFs9oMzoPGM+YXk0x0odwn6esXzqbxTtP4liyEZNGu1ChaE70I05RuGQndBosoNabRRwPq4LHsTE8vnqE4UMn8iLakj8HutC9za+YSR1peQnxOMyQUeMJKtKGWS7dCdg5kvbTD7P74h1KZDOjfIGsxNo34dyFFcT7PGbVvGmsO3SFgtWbMdylD+ULZuN/7J0FVFVZF8d/dKed6GB3gC2KnVgj2IqCCSoWiCgpICIWit2FgdidWGD32GJggIpIx3vfes/PdmYe9UDn3rVYznD3qf/Z9/K7++5z7snFk7GbsQqbqVPZu2geFcYFE9C5DBtmOTFzwynK16yPUmQY8WWa/EKQL+bF3fPMneVH8MHLlKvXmRneTtT6v9+8uHkM+1ETuBCVRvfBY3EePYiCmookvH7M6kVzWLB+F/oVzPDwcMGshjEqyVepXbYFCfUGY1vhOaF3DdmxPYDn4QfxcpvOtVdiBo4cj03fLhTWVZO71wiQL3fJ81eD8oL8ER6evDkd9sPga/9pSc+2bXC2tZGeO3H+Ao6zZ3Pn0WPph66GdO8m/cDVJxieuXIVCzcFExcfT+PatQmcMpnSxYpJyy7evEV67vnr1xiXKsm0EcPpaGYmPfcyJgYbVzfOXb3GHyVL4j1mNJ1G2XF/3x4MdHUp0LgpW2cH4LdiJS+ioymgr8dyDw+qljPm63Sdi7du0bjfgB/GcTVkKxXLlJF+vGu0j6/0C7y62tq0a9KYGeMc0NXSYveJkzgGzGZw9254Bi3ixOpV1KyYuw83AuRn73qTJ+SHb3ah+cAAXIOv0EH9DI27jaVR1wH4ufRmmZcjd5Qb4zlxEFtmjGPVzjBUdfVQSEvhzbv3DJl7DO+ehelVrwbHokBdU59W3SxpUK0wK3ym81qkjYaaEu9j35OmZcyJCDlG8sVpLBzSEuctl6nTzgr9mPOcuB1Lp6E++PQqg/cEa9aejUZXR4vED3HoVerAynnTiNw2hcnLDiNS10NVQcSbt++p22kcuxYPY8vCiYx0D0FVXZ0ytTti09WEE3vXc+RCJLq66iS/f0e6XgUe3w5HT0Mle06Qx6WT3t1k2tQp7HxelSD3kQQ6DmLfsTNsvHwTc52TFC43DIvZV5nZ/AM9mzXmTqoWerqaJMXHo16hA7NmuKFw2IupQcE8TVFHW0ODaUdu8HBQGRbezMCwUEFIT+bt2zgs+v06kfzHxzfj6uTAsQ8lMK1QkNvXr/MME87uXwAnp1Bv1EZ0DAuhqijiTWwGjbqOZdmE+gQ4j2LV6Zfo6GiTmviBDxlFCd6+ng61oqlSsz+PX31AV9+AUnVtCRptSLuezqho66OmmE5cXDL2jm44TRyDjlruBmiy7XbpT5niMJ7gW/qsmuvCZm9bVu84wbRtZxnTMAajsr2obbecQKuSjBv4J4fuxaGvp0VyQjwUb8zCud4Y3F7LKO/lPIlNQ1tLk8GLDlLm0BgcVkWgZ2CIsjhNek9p3KXPLwP5Ly4dwMl5CgfupdK0lhE3r1zhefofHDq1nWL3N1Ct/XiUdQugqaZA3Id0mnWzxW9CZ5Z4T2HZ7ovo6GqRnpRIXKouS3fswbJuCnVKm3E3KR0tvYJUb9iVyTbVGTnQgQ8quqirKBAfl0jvsV5McRhKSQP5gr4A+dm+kn7tCvIa8k2temNh3pypw4cR/fYtVbp0JXDKFHq0bsW9yEgs7EYzbfgw+lt0JvToUey9fQmZE4BxqdKMn+nP/SdPCFuzipDDRxjlNZ2QObOpW7WKFKj7O00mbO1q6lSuTOdR9ki+pLvO15vYDx+wdpnG+Rs3iDx0AD0dHfQbNKJlg/pIPq4lAfI+kxyl9lsCZn0D+ZLZTs/I+Dzptq5u0n4eX7USZSUlmg4YSP0aNXAfNVL6pV1JO4ULGLLM3Y0Dp88w0FkyttZMth1CYUNDVFVyF0AEyM/e9SlPyP9w7wRlarbDbMRsehSKYNj0bVSq1YjuPbpxcfNcSluMpoeJOq7OnijWsmWpxwhKKkViWqMZj/VNuH1qIYPr1eZJgcYs2rKd2hpRzPJ2ZfGhpwTM9KFXm/I49LRiTXgM++QZyc94z9g2rVl16z2HjoSiK04m/OJtjIyNSH1+G6vhbgwdNxlf58EEz/Nn6faTONj3w2fmXB6mFOP6sY0U0FdhfNPSbIgqybzdO4kLdcRxyV84TPFlypAmLHN1xGtBMENmzKRLs4Y83jKNITP34bAjCudmWtlzgjwuLUpNxMd1KrPXX2bu9G6sWrGayxHXqT56G17a82nlHc7e58/ROelGM9udWI2ezYrJbVm/YDaOzq6MnzWHwk9P4RG0BbPpu/Eb0Axt8Uu09MtSvHwLHt/cx93zpxnSthXFuv06kH926zqcxztQ1coG28GDeXr3L5JSU2jVug2tTWpw7ckH3sW9Jv19NG59O/O2ZDVatWrB2FFjsbZzxNNtIjc3eDDaYz5GdkuZbamKWQNrIlXMeXdnE6lvXuDQpjYX1P7AK3AR5RJvs3y+P6fV2jLT3Qmz8jp57Bn/3vwCT0dmLD/JrPmjWOozg6s3H1LYwofQDg8wHRPMzNBDVH+3h/aDltN15DSC3Pqzb/lsprp70tlhBrXSb+Aydy0VBs9j0xQrNFSTaPBHCd7q1GX/ySMoRYYzzW4Qr0v+OpH8G0e24eIyFbWaFowc0g/Rm4fEJKRiataeBYPrszjsDVvO3qVhwfes8HPibIwOtatUYFOgD40GuuAy0Y6E8JX0He2Jppk9IW4NMavUhqcG9bly7SAG8U8JmDCATbdFjHH2wLxEIvMD5hD2rhRB/q40rVXm3ycuBy0EyM9BMX/FqvIK8iUAvSp0hzTqfWrtaupWqULA6jXsO3WKQ0uXfJbSf9VqDpw+Lf1dF/vRVDE2xmfsGOn5V2/ecDQ8Ast2bek2eizljUoza+KEz2WbDRpMo1o1cR05ggKNmhA6fy6tGzaUnl+/ew9Dprl+A/kSwO/WsoX0/OodO6X9kUTo/27hreShw3rKVMI3bqBCGSMu3LxFi8FDiDl18jO8Sx4kzK2HEHvuDEfDw6UPLXf37Pr89iG3fUaA/OwpLE/IRxRN94rG3CzSiha697ilUJUSiX/xVFyY1Lev6T/Ji7qqdxjvOouaNgtxG9qJIlrx9KtVjq1RpThzewtTGpoQXbYDWw9sQPf5NXw9ndn1lxb+vl50bFggz3LyH53bxAiXRbx8+YroN7GYWAzFY0xfHkbsZ7BzEGMnueM61hKxSIRILCbmwTG69LYnoXhPTqyfjKGuBtsdm2O7/jkTF++i4FUPpgW/xnH6Aka3LIS7kyOzlmygqJERerra0gXFYjG0nxaKR+fi2XOCPC+dwcEFnkybsYgm1fU59UKf1oWjWXP/D1qrnmbzGyPePT7HqTm9sFoTy9h5u3BqXYBTa+cy2WkyjVzmUOb5GeYs3saYPSexNq2DQtwpdAu1xMhsGvcOO/+SOfkZH56wZsVilqzbx5uY14gNKzB+/Hj6dm2JSdUKPIouStz7CNSVQLILDOIkdoWsYMCIJYx09MBzQnfSr6ynt507T2o6sW18AdqY2/Ki2mTe73Ug5sVz2plU5VGSImXKGqOipCD1qcKmPZhob0PzygXy3DP+rQNnNwQwwX0u9U3KsSviDQPNCuO/8x02TdNZF5HEidNHuL0GMgAAIABJREFUebHbA6u5lxnpHohrb1Muhy7Bcao7JS0dMVO6g8+CjXSbvRm37uaoco8qRU2gcn/CTwQRe+PXy8nPSHzFmqULWbNpD1Gvo0lRK4b9xMkMtGyPvXkJ9j4sStj969TQR3o/EouT2LkuiPFOQfSb4s2kUVaoPDxG+952PC3cjlOLutKiWnteVO3Li9OLeRd5F2frTmy9/JbipUqjo/4xtUuvQlM8nOxpWE2AfAUFBRTEkoRl4ch1BeQF+ZKFt5LFtp8OSZS7WKFC0oh3304dpb8e5eXN8pAvefufbCXpOBIwrt6tO/Z9+jC0558/6CI5N6p3L4ZbWn4+J4miJyUn4+swlkqdLbgRGkK50qWl528/fIgkVejrSP6ptWswqVpFen7j3r24LgiStvszyJc8YNTpaYXLsKGMsPrYZvD+A9JI/c+OO7t3cufxY3qMHceHiHO5Pq+fGhAgP3tSyxXygeUOzbBbdYOyhYoxcKo/ovBApq8+QbnarZk23YfKqRHYO3qgajKYAJehGKbdpXvjDvylX5+IE4GM+gryC8Y8YLavG8sPPcPV05OujYsweZA1my/Fsf+c/NJ1xGlJnD0bQZJYnUqFlVi1eCYzlh/ErPNgRnerzEA7TyyHTcR1XF9ePLrPs+h4yhUVM2zYOB6kVmTPtkDKGmTg0acRq+/oEBC8i4QDTl8gv20Jgrym4LskhBFTfbDp2xWV1FhuPXpHPZNqv8W2fm+vbGWsoytHTt2nRA9Plgww4E+babx/+QY9cy/u7RzF5S1utHTcTZ9Jc/EdVI/diwJwnT4Pa7/ZFHtyHJ+vIF856SGGhpUwLN+T8LClvL56HJsu3SnT49eJ5Ec9fcrt239RtKA+Lx9ews17NhduJ3D2/DHsrVoT8UDMlUdXKaWZzrkz59DU1yfmxV2G27phMXg8HlMG8WT/fEa7B1Ki/3zm9VenWePBnyH//esXjGhfj9sKJZm7ZC31KhXlxbMolNS0KV26OL/CDrSJ9w9hM8qJExF30Ww0nENejWnTcTDvEtLRrz2Yy/s9ub1nARbjVtFlmAveIztxcv0C3GfMp9VYL+qmX8d17trPkK+mGEuTcuV5odWI7ftXoxJ5EZcxw0gwNv9l0nWio55w9+59aSpXzKMrzPSdQdi1GNaGXeaMRxsWHY9j1ZEztDFW4+aVy6Sr6/D6wRW8XHyp23sMU8YNIv7CVqzHTUeryQhCPJpgVqntZ8iPj3rEzHH9CL2nhLP3XLo1KU901HMUNfQoXqyI9GFRnocQyZen2vmwLXlBvmTh7fngjZ8V6O/kjGn1asx1cvz8O3tvH2nuvCRF5mfHz0D+k53knF3v3gyz7PkV5E+VpsxMHzOaKhZdubljO8alSknPS4C7Zvc/v4F8yQJgyRsFWSC/25ixpKalsXtBIJKnUsmx5eBB7Ly8eXXy+E/7f/DMGXpPdPzp2oTccg0B8rOnrLwh//qeAEy7u1Cubhu27FjFre2B2Iz1oVGvUfh4e/CHwjN8PV1Zd/AavQf0R/TwFMv3XaPveB88rE3pVf9LJN8oLY7dG5cxxXsBRiatqF9enx1bg7n9VpejcoR8UWIM44dZs+n0U3w8J5L6+iGuvkvoNsKZCd1rMM3BgTtphnRo25QLR/bxWrEEC3wncmbbcmat2kVb67HU1o5i6sw1tOhrz8LpNuxaMPEL5HeqwK0TW/Hy8OLcK10G9OuOWtxd1p2I4+Lx1air5P9Fkv/mpelJzxgzZjzLVu/Cff0ehnUwobNJLS48eI79jsfMbFuYyIfXsO7bl3ciQ/padeTcoZ3cjdXBz8edD0eDcFn0JZKvLkplRNfmBJ+5z6Dxrui+uciiZRto9uevA/mnDu7ExWkyxas1oG2bFmxcspDIRA1CgldxZb0vDgGraWYzhUaFk5gZsJT2/e0YZWnOrKkTuBmvRacOrbh6fBd/RWcwY8ECOlR+RbWaAz5DvigpjiMrpjHWfzsFKpvRq0MdTh07R22zDowZNQB1ZfnC2r/5yE/Pi94wYdgwlq3fzSD/YNyt2zK8dQ1Czj+h16y9rLYz5/G9K0waYcODeC0sOrfl+qn93I1Wws3XD83bWxjju/wL5CspMsfBkpnBp2lhaUt5tdesXbse42Z//jKQf/XkLtxd3RGXqEubFo05G7KE8KcQuHYrWn+tx2aiH0Ub/EnH2gXZtGYdFVr2Z0SvVoQEeXHkXgrt27fk6cWDnH+cyPjp8xhopopJmeafIV8h8T0nQpYyfvoSdI1q0a19Iy6fOkWl5t0ZNsiKglpKWZrKrBYSID+ryv0m5eQF+d8vvL129650AWvovLnSXHjJMWftOpZs2cqtnaGf1X395i26Otqoq6pKU10ki2bnOE2Snpfk8C/fHsq4Af2xHD9BCvBfp+s06T+A5qamONkMoVDTZuxbtFD6/x8hfh/WLlOzBPmStw0u8wK5uHmTdHHwp+PS7ds06ttfupi3ZJEi0l8nJCVJ3yYUNDBAgPysXzSSB6m8eLknb8hPjrqJo88SSlZryMShPbh/PYI1K0MwNmtD965t0VGAZzdPsnH7QSJfvAMFRXSKVMDJ2R7V5GiW+U7nQ4HqDBszhAIK8P7FfXbvDOXs9ccoqGiip5ZCAgUZM9EeowL6ctsn/+3dU8xasYPYD4nSPusVKkkv2zFUKyTmcvgJNm3ZS2J6BkoqGph16UWbRnVRSXjMwoWrefwyBpEYNHQL0W/MZKrqJRJ+fCu7LnygXdc+mFcvgjgjlVN7t3HwZDhvE9NQUFTCwKgObg4DUFLK5wskZbgsxKJ0tm4K5uTZ84yyG0mlCsZs9HHjTNR77GbPo6IKiFITuHVuD0u3hpGeIUJJWR3zDp1o17o5t3avZseJS7S0n0CjMqWRrARKeXaOSX7rSc8Qo6GujJ5aOkUqNaDvgH78EqsYkt9w5MAedh05T1qGCEVFJep0subPZjXQUU9jxpRpPH2fgBgFDIuWptvg0dQppsjl8DC2bNnF++SPflKjtSXdW9SnoNI9pnms4H3x9syxbym9Nj68fc3ONQs59yAGkQiUNAzo0KkzrcxMUf4VQvnAvq3rOHoqgp5DRlOnijFHV/qz80okPce50uyPQojSErh14RhrNh8kITUDRSUVGre1oF2LprwM38mGfaepbWlLp9oVUVFUJPHlTXz8l/A2OR0NNVXUVRUpbFyTbt17UMrwF/Cc1Pcc2bebAyfCSUjJkPpAxSbdsOnaBA012DTPm9N3oxFJUmwKl6JNl140r1GCG+En2LFrH1GxSdIyf9Rrh3VXcwzUYnB3nElcsfrMmNQXSUgh/u1LjuwM5sjFe2SIxCip6dLaogctGtVBS1W+9yMB8mW4wf7OJnkF+RJNXRcslObGX9wSjJ62thTaK3XuIoVy+759eBUTg9WEiXRp0YLJNkOki2slDwub/P2oXr48znPnc/vhA8LWrGbX8RMMdXNnx/y51KpUiS0HDjLM3UOaLy/ZIcdsoLV0x5zV3tOR7Hs/ZKorZ65cyTTkP3z2jHq9+jBvspN0LcCnQ0lRURrRlzxYlChchKBpU6VwIVkc/PzVa+kDhgD5Wb+S/iuQn3WFhJKCAoICggKCAoIC3yogQP5/3CPyEvJTUlOlwCxJ25HsPiM5jkVEMHnOXG4/fIS+jo40X9/DbpR05xrJ4bt8BYuCN3+zhaZR8Y8L6yQLZQM3buL9hw9UKlsWr9F2mNerJz0n2YVnyDQ3bty7J128O3X4UOmbgaeHD6KjrS3dXUeWdJ35GzYw0T/gB68JmTubDk2b8vh5FGNnzJBuoSnps6R9yQOBZIcdAfKzfrH96pCfkZFBenp61gXIw5IS7ZWVlWX6poNIJEIyVsm/eX2oqqp+TqXL6778U/upqal58pYqrzSR+JNkS2SJT8l65JfrRwJMkp9PKZqy9j837P5rfpMbGv6sThUVlX+910neKkvu53l9n5Pl3ixAvrw8J5+2Iw/Izw9Dl1yMqenp0rQfyXH2ylXaDhtO7NnT/3pB54f+Z7UPQk5+VpX7WC4n0nVSUlK4dOkSZ86cyRTYZK/nOVdaokG3bt0wNjb+10rv3bvHiRMniI+Pz1MQkgDQ8OHD0dHJ39scSnxjxYoVSP7ND+D4rxOcAwYSQCpevDiWX22S8G/VHjx4kBs3bkgBO68OyYNGtWrVMDMzQ11dPa+68bndRYsWkZyc/J/xG3kILrnXmZubU7du3X9s7tmzZ0h8Mi4uLk/1l2YPNGlCnTp1/ra/AuTLw3PycRv/Fcjv5ziZdx/iWD3dSxo1k3wYS01Vlc2z/PPx7GS/awLkZ0/DnIB8CfAeOnSI+/fv06hRo+x1KA9Kh4aG0rNnT0xNTf/1D9rZs2elDzOVK1dGT08vD3r7sUkJAM2ePZuCBQvmWR9kaTghIYHRo0fTv39/JBHE3/2Q3HtfvXrF+fPn8fX1lXm4CxYskPpezZo1ZS6T04Y3b96U9qF3795oa2vndPWZrm/gwIH069cPTU3NTJcVCvxcgcOHD0t9rGvXrv8okeSBc8eOHVK41tXNm69qS66l06dPS++1FhYWAuQLTv1zBf4rkC9ZwDvSy4uTFy5Ko6nm9UwJmDSRIgXy/17H2fFdAfKzo17ORPIlkH/8+HE+fPhA9+7ds9ehPCgtgeUWLVrIDPl37tyhVatWFCpUKA96+7FJe3t7vL29fwnId3BwYObMmfkiOpzbEyYBk4cPH7JmzZpMQX5QUBBGRka0bNkyt7v4t/WHhYXx5MkT6RuI/AL5/v7+eQaZeTYRudjwqlWrKFKkiEyQf+zYMen9PK8CCZLshM2bN2NgYCBAfi76xC9f9X8F8n/5icriAATIz6Jw/y+WU5F8AfKzNw+ZLS1AfmYVk4+9APk5p7Mkki9Afs7pKalJgPyc1fPvahM+hiUfnaWtCJAvR7HzoCkB8rMnugD5SNNehEh+9vzo70pL0nWESP6/aytE8n/USID8f/ebzFoIkJ9ZxbJmL0B+1nTLUikB8rMk2y9TSID87E2VAPkC5GfPg/65tAD5sqkrQL4A+bJ5SvasBMjPnn6ylhYgX1alcsBOgPwcEDEfVyFAfvYmR4B8AfKz50EC5H+tgJCuk3PeJETyc07LTzUJkJ/zmv6sRgHy5aOztBUB8uUodh40JUB+9kQXIF+A/Ox5kAD5AuTnjgcJkJ/zugqQn/OaCpAvH03/tpUj/ftQf+QoVPLBvr95LMVv13xacjLhCxfQcu0G+Y7N3R3c3D7+uLrmSNu/8sewhN11csQFMlWJsPA2U3LJzViI5Oec1ALk55yWQiQ/57X8pxqFSL4c9Q53mkCZho3RL11ajq0KTclDgdgnT3h89jT1feX8LQAB8r+ZXgHy5eHt37YhQL78NZelRQHyZVFJNhsB8mXTKTNWQiQ/M2pl3VaA/Kxrl+mS9zZtIC3qOcbmLTJdViiQvxV4cOwoKsVLUL5XH/l2VIB8AfKFffJluuaEhbcyyYSw8PZHnQTIl813MmMlQH5m1Mq6rQD5Wdcu0yUTop5zbtIE6o+yE1J2Mq1e/i0gTdVZEEgDP3+0ipeQb0cFyBcgX4B8ma45AfJlkkmA/J/IJEC+bL6TGSsB8jOjVtZtBcjPunZZKvnXyqWkvXpFhbbts1ReKJT/FLh7YB8qRYpQydpW/p0TIF+AfAHyZbruBMiXSSYB8gXIl81RsmklQH42BZSxuAD5MgqVk2aS3Hz9kqUo07hJTlYr1JUHCjw+fYrYZ0/ln4v/aawC5AuQL0C+TFe+APkyySRAvgD5sjlKNq0EyM+mgDIWFyBfRqFy0iz57Vuu+nmjqa9P2WbmQupOToorp7okKTqPThwjMTaWmpOcUTc0lFPL3zUjQL4A+QLky3TtCZAvk0wC5AuQL5ujZNNKgPxsCihjcQHyZRQqN8wkqTvPjxylWJ06FDAuh1bhwgLw54bQOVSnBOwTXr/mzYP7vLh0iRItW+RNis7X4xEgX4B8AfJlusIFyJdJJgHyBciXzVGyaSVAfjYFlLG4APkyCpVbZpLFuFEnT/D2yiXin0eRnpyUW03JtV7D6BgKR73gdfFivC1UUK5t51ZjyuoaaJcojmGtOhQ3ayb/RbY/G5gA+VmG/KWOPQi5lviDqgVKV6dnBzPu379PrcbNMG9QG8Xccqrv6p09ezYtWrTA1NQUyc35n46zZ89y584dWskI+Snxr9kfuo5FG48gqVndoAxj7YZj1qD6D808vXWMoeP9Pv9eQ68E3SwH0b/7jymGv9sWmk9PB7Nk1QYuPEv9QRcNQzOWBPTByW0m84MC0ZCTX0iaSXr/hhsXLkLZepj+of+vLcttC83E23TvPZGkdEUQiShboR5zAqaiqvS9/yYy09uHE6cukCH+f/dVKhGyfTYaSt8OJywsjCdPnmBpaYm2tva/jjW3DWRZeBt9ZR9zlm3g0oOYH7qjW9AET+8hrAucQ79xnlQoopPbXf5cf3LcW27f/gsMy1C7fPFv2k29e5CRnqsY7rYAE2MDufVJ0lCuQX76ezYtX8S2vadIyMhAjCajPWfTplZJlL65p4q4fSmMeX5+PH4v+jh2RV2sx02kU3MTNL/ySZFIxObNmzEwMMDCwuJvdVJSUkJRUV5/LWSbLgHyZdNJsMqsAk5OMGMGODqCr29mSwv2siogQP43SmVmn/xXj/8iJiGD2FdPWD3HA/X6QxnWtR4qalqoJUfhHbgec4ue9OzQnO8YRNbZybRdrkG+OJ3rpw8QMHcTJn3saV5emV3LF/Fcsyb29tZUKKr5TV+vHZ+LzfRwVs6Z8vFvn7IaBQoWpnAB3R/G9LtBfkpcDC9fRxOfIuJ+iCduu8TM8htLkUK6KKnooasWR0dLW8LCTyNP/Hx17xrzpnuhbDEd9+7l/9W35AX53r3bcLDkcBZYVyIu+iWL3VwpO3IJrj0rf9tH0UOs+42jcZte1DepjqLkGUBBk8qVy37876+OXxHy0xLeEfUymvjkNF6eWMrQ+XdYvNqLYlrqKKtoo6wUj6P9KJyDtlPHSH5AHf3oOsuWr0KxigWOfZp9o7MoKZaHT19RtNQfaGuo/KtP5aRBbkH+zeNbmb3hBE1btce0mhEXdy5k9R1Ddi+ehrrq12NMJmzfJlauOUZ3+0mU1ZP4ozJFipfAUE/7G58UID8nZ16o6/dQoHlzOHECmjWD48d/jzHlx1EIkJ9lyP9UMObZfWY5DUWr5VRcrM2lv46NvIyjzzJqmVTn2bVzhN2KYbjLDCybVkFZSYGMtGTO7VmGZ2AIRnVaMdZhDJWLaWXbQ3IL8jNSEzkQuoXVh68yd4E/RVUUuLl7AfO236f/SHua1DX+pu/hG8bgtr8I+9Y4/+uYfjfI/3rAN5YNx3atmM0bfShV/OO6m6jIW3T405rFi32ZOcUdBaNGjBk3niYVCkjPJ8fcZoZvAIcvRNJrvB+27WugqvxtdE8sTufSmUP4+c7gnVJJJk0YR6smdT42nfQKH1cXth46T/UWfVg1axJPIiNpZ96cN3FxKGiVxG7YCFych//j3MgL8k2qlMfn0Hlal9AnIyGGsAUOzLpWg61rJ6L2Nby/CaP/sCUMHDOBFk1q/gD2vzrkf93/p7um09rlKgfCVmGkK3mAFvP4wS3G2w2n27AJHF23iFj9qtiPHk3zGqWlb9bS4yIJ8PFl56nbdBjqjlP/Zj95gyjiRsQR5s2ezaN4bewcJtC2qQnqKoqQ8o75011Yves0lZpYMMvbheSXD5hg24tTt18gVi3A8DETGWs/GH21//c26jjdBs1n2vIV1C6lR8qH5/hMGs/B61G07G6Nw4j+GGoo/+s9ICsGuQX525b4cvppOkNsR1C1dAGSbu3BxHIOG0/tooa++peupsdxOGQtW4+/YMosL0r9w2s5AfKzMsNCmd9XgdhYKFYMkpNBXR1evAD9f3+9/PsKkosjEyA/1yB/otNUTj5Rxd97CqqP9mPvt4eVxw7SqLAmm7z6EnhWjYXz3Lm+cxHB598zZUYADUp/9UckC9OeW5CflvSendtWs/P8K5bPnY7kz/bTsxvwXHqUPjajad6oxje93efZjjE7FFB9dw2lIlWZMNWX/u3/D6Hfjeu/B/k3qVy7Jaat+zPXbRB71s/jakoRPLw8KPpkN6MmzaJaD0f6NS1B7259KDd+B0v7/vFZNbEolUN7Q5gyO5gxU72oFB+Ol98i+ju40qNjedp0GESbEf4Ma1ebZSMaEnStBncurybyWjh+01xQ6TEL/95VUVH5Z/iSD+S/pWrFuhyIuElJPU1Ifc+1EA+cNiSwZdsitL4KnKbf3Eg/ly3cf3iHl9Ex1Go/mk0LndBW+/E92a8YyZcF8i1at6JI3Z54TRnCme1LOB+ry8RJjlRXuMGgYS6U6TCWMV2qMsKyO6kdZrHTuc1X1WZwYt9W/AI3YGEzAVPdKDx9AukwbBp9O1agf4dulO7hxvShbdnmM5iAgyL2HF6P6PF5AhcuRrH6n3gMaYuSspL0oUJ6PNlPEwsfZm0PpX5ZNSwa1OSP3gGM71SORR7jeaBahznzvSiavdvaT++EuQP5qawOcON2giG2w0ZgXFgLXoXToNkIPPedpnXZLyQvToxh1/rFTJ67FW21ZOIUijPR1YNe7Ruj+d2lJUB+Fv6YCUV+YwXc3EACn58OV1eQ/E44cl4BAfJzDfIlkXwzi55YdWiOcloUlqataTLrCKOqP8e0fl8mbI+gawVloh9cZs2qlejW7s2Yfi2zNce5CfmhW1ez78IrVsydLu3j0/BgPJccoc8Qux8gf8d0KxbercxSvxFcCF2C+7LjWE/0YaxlvR/G99+D/Ft0sLTlWPhpDBBxYtM6lm2/jNU0T26tmMJF9TpMHtGDCoaKJEb4YzryIo9u7fisW/zT66z2ceR24YEEullJfz/f1xtxlVr07dQBvYxU0tIzkEA66depXN2SyMhIXty+iI/zZJR7ziWgz3epMD/xOvlA/huqVDDh5PmbFJRCfhzXdnrjtOYdW7Yt/gby310OYZTPNtpbj6ZDVR2srSw5SzuenfZH7bs05t8V8sfbj8IpaDumRgZE7NjI0j1XsbAZScyBeeyJNcLVzgrjItpwfyNlWy7mzKMIjP+fE5b88g4r5s3hlkodZrjZoqUAWxf58lDDmAFdO1JIS5nUtI9+8+Hddfp2sWHG1rOUTLvHvMCFKNawxHNI62895WvILxlF08rtcdkSgVklPZKT4nkTHUfxMmXR/H6CsnWX+1g4tyB/5Sw37kkgf/gIykogP/oCDZoO/QHy0xPesCd4FVvORzPeYRiPj23Ad8UhRrh60bt9k2/eQgmQnwMTLlTxmyggieKXLQuSfz8dkij+o0dCND83pliA/FyF/M85+aKXWJq0pKHvAfoXuoSxuTWFihRC+f8hMXXDUgwZPhr7AZ2zNcu5Cfk7t61hZ8RLls2bjiTA+uTMRryWhdHHdiTNG1b7rt+SlZEfB5f4LpI1K+fy8EMJ/FzHC5AfeeurnHwRJ4LXszjkPD0nu3NyxTSCQ3egraH5OR1FSc2Am1fPftYt6nYEM6ZOQaPbbHz7fqu7OOUDi11HsnTvRRJSRSBOJSk5Ix9DfixVK9ZmX8RNSksgP+U9V7Z54xyczNZtc7+LiH7xKakYSWcobdSJ8LsvKaav+o1f/a6QP+GrnPyIXZtYtOsSXQaP5Oo2X9Zu3w+Kaij9f4GCgoIaW45epFqxj2Hl6PuXmDc/CFGVHkwf1u4bvcRpiWwLdMFr+QGS0zMQZaSirKrF2j2nKZn2QDbIL2tARLAfNu4rUdEvSmfLQVj37kKJQvp8l22WrXvcp8K5A/lprJntxq04fSnkSx6YxC/P06D5KLwPnqBl6X/Iycl4w4xJ43hfuDmjhvWhxOecJsl6cmHhbY5MulDJb6DApyh+zZpw9Sp8+leI5ufO5AqQL3fIty17jwZdp7H65EHqFtBALBKRnpEBSsqofL+CMJOznluQL0pN5ODOrSzfd4mAwFmU0lDgYsh8Fu19gvUoOxrVLvtVTzN4cvsmqsUqUlRfjcTY56xdGcT9OENmuo4TIP9vIN/S2YPLa9zIqGiCXX8rikoWMopFxKeBtuqXUPXbh5cJ8nEmpuYEZttJ3vyIiYp6gYqmFkqXArGYfJ41wUH8UaYYaW+PUq62dT6GfGhQrQJO28/QtXxB0j5EcyBgLMsi67F5+RhUv8rJ//D6GTHJyhQtWggNVSXSP9ygfMVWnL4VSfGvgEriYP81yH+8J4CnBtUZa92HkgaStT1i4pPT0Vb/ku8U9/QGixfM52VBc7wdrJBkOb1+GYVYRRO1h7swH7KWlfu2UauELm+fnaJHlxH4bztNyfQHzJ+/EIV/ieSbFhZx52ksf1T4g5Q3kazwcyHsmQ4z5s+lXMFvH8IyeVv7qXnuQD6ELvfj6INkbIaOoEaZQry7EkrTgYsJDttOVd0veUcZaSm8iXlFmrKu9EFGnP6eOdMm8sawASOG9KGEwRdbAfJzYsaFOn59BSTRewnkjx0reRf3MWVHAveDBsGcOR/PCbn5OTvPAuTLHfLHtlDH0aonH6r3p2/riohSk8hQ1qJMdVPK6mdvkVpuQT7idG6fP8asOWv4o1UfzMqpsn/TBpKLNWLU8L5oJzzgybt0alavirJSMm5WHXhcfSQ2LUoRF3WPU0f3Y9RsMMN6fZ0j/FH6/2K6zpfddb5E8q2memNwaQVzd92lrUVrqhkXJD0tkVcF6tGzmmTrjo+HKCmaY9vms+XoM/oPGQIKGazcEEq9du1pqf4XA10OMXqCDcWKGBB3bR0j/fbzMDKS2IfXWOA2kdtlBuE1sCHGxmX+8V4in3QdmDvyT9ZntGFW/6p8ePOaNbPn0mDSCkY1M+DZ4/uINQtTpowRF7YFMXPfE9q3bUrF4rrEnl+O5+7d/cYfAAAgAElEQVQUDu/bgPZ3DPlfg/zS0UfxWhtOp+7tqVC6kDQS/1Rcgj5mFb7Mceo7DocsZ9vR+7T7szcFtRTYumkDpRu2p1OpJAZNWMEIl/EY6WsSc3s/nvN2ELQjjArKz1kcOJe7StVwHtYdI6NSSNbpSo+v0nVqJpzArN9MxvnNoJS2iDs3Ijh7IZJx7j5ULpbz+0jlFuTfObebWav2U8OkEbUrl+bS3tUcfl+OzQHjyPjwkmv3X1KphgnKCc8IXrOICy/U6N21JRlvH7Jx62HqdrGhT6em6HzlkwLk5yy2CLX9Dgp8iugLEfzcnU0B8rMN+XExL9i5bhFqNXrSs8XH9ImE6EesDTlMNdNGNKpTFUVRLP6TXKnYbxqdaxny+v55XD3n8kGsjLKSIsY1GtN/4ADKGGYv4pVrkA+kJr7l+IFtrAk5KU3FUdcvhc3g/jSoXYmzm/3YfiWJqc4T0NHWIvrxBRynziFNURlFMVQyNWPI0MEU/rQrx1eq/86Q/+TwIoJOwHiHfhQ0/Ag6b6Of4j1rEV6+01FHzM3TJzh47gGNe/bDtCSsWxzEsTPnUVBVJiVVRAe7mfSpX/QrxcREP7rJthWBnH2aJP0eQpFqrRnUy4LKJVTwnubFrQePkCS3aP1hAq//Yu7ixajFv+L8nqXM23WfNm07MLC/Zb6AfFIeM3joNFJECiAWU9q4Fh7THEh+cZNdm5aTUbwxVlZ/oqqUxIqFCwgLv45ILHn7JcLGYwHmxj9uyvCrQ37MhW24b3jEtOn2FNL4eNFEv3rKqqVL6DHMiT8KaXHvwikOnn+Iact21KtQgNAVC9gbdoF0JSVSklNp0Gsy9p2+TeeKfXaH0OC1HLv+VBLsx8C4PsMG9aRSaT2W+rhx6mak9BsE6oXLUlDpA33HuFPNMI3TB7awNCScZh160PNPC3Q/3aZiLjPFJ4SBjo5UKKzN1nmTCY14iopkq0llXRq3+5NB3ZtKF+rn9JFbkE9GArs2rWH3kbMkZYgRoYGtowdNKxfl5aVduC07xGi3OVQrAo9vX2TN0iXce5MqfetWzqQNAywtKFv0y0O59MFcSNfJ6ekX6vvlFRAgXz5TKEB+tiFfPhMlWyu5Cfmy9SDzVr8z5GdejfxTQl6R/NwY8a8O+bmhye9WZ65Bfi4IJUB+LogqVPmLKyBAvnwmMKuQL0mt+pvUKUlkUbqzh5yPtLS0bLeYmY9hZbuxXKhAgPxcEPX/VSYkJODg4MDMmTNRl2zt+5sfAuTn3ATL8sXbnGvtv1GTAPnymWfhi7fy0fm/14oA+fKZ86xCvmTdhIEBjBnzA+wLkC+fqftZKwLk5572AuTLpm1QUBBGRka0bJm97WBla+3nVkIkPzvq/RplBciXzzwJkC8fnf97rQiQL585zyrkP378catTSTRfAvxfwb4A+fKZOgHy5auzAPmy6S1A/o86CZF82XwnM1YC5GdGrazbCpCfde2Ekv+kgAD58vGPryE/sy2GhsKVKx9LfQX7CgYGQrpOZrXMIXshkp9DQv6kGgHyZdNWgHwB8mXzlOxZCZCfPf1kLS1AvqxKCXaZU0CA/MzplVXrT5Cf1fLflzMyomxkJI+EnPycUjRT9QiQnym5MmUsQL5scgmQL0C+bJ6SPSsB8rOnn6ylBciXVSnBLnMKCJCfOb2yai2B/KweX0fy9fQ+pu2MHYsQyc+qoNkvJ0B+9jX8uxoEyJdNWwHy8ynki1N48uAWL2IyqFHXBI2ku/j4LKFqq160bW4i/TjWr3QIkC+f2RIgXz46//daESA/f8/5p5z8r+D+0247Qk5+3k2dAPm5p70A+bJpK0B+PoX89Lfs2byE45eSmODmRpG0a9jZe2Haww7Lzs3QyI3N7GVzmSxZCZCfJdkyXUiA/ExLJhSQSQEB8mWSKc+MJFH7T3n4322lKUB+ns0KAuTnnvYC5MumrQD52YP8l9eP4eHrz+WnqQxynMmwjrUQpyZybfss3PaLsGmhwfxVe6jTrjejhg2ihO7H7VwfnN+Ll988Hifr4zN7Dg3KffmAWmJMJFvmTcZl+VFS0sS06DMMpyHmLJuxkBrNW3Pv8ilOXnvOaI8F9DWvJK0vPSWBiwfX4T0vmCI1WjDWwZ4qJb/9yJNsHpE7VgLk546u39cqQL58dP7vtSJAfv6ec2Gf/Hw5PwLk5960CJAvm7YC5Gcd8q9un8uYGduwmuiBRQ1NBvYagEI7D/ZP68ClNRPp4n2SnsPG08u0AEsDZlO0kRVOE4cTscYDt3XhjHN1xzjtMlZDPQg6dBXz8gWknRGLRaQlRhGyPojjF5NwnD6d0op3GGPnxMG7ikz3cadQ9CGsJwSx7uQt6pdUZPvMkcw8mc4cf2fuH1vLnqvvcJg6nQZ/FJTNEXLZSoD8XBb4/9ULkC8fnf97rQiQ/8vOuRDJz7upEyA/97QXIF82bQXIzyLkixNxshtKAdMu2Fh2wUBTlYwXBzGuPoA9l2+SeGA6k0IUCN3lj55SEuuWzuWvJ6nY2fdmxuRJlKjTlW6dWqOlpsQKB0sOG4/nqFeXL51Jf8OOdYEcOp/IFF8fiqVfx87Ok7rdRmHVxRxNFTE9zKrQxDWUIXXVsGzfkZ7uq+lYoySv70awZN1OSjftx4T+zVGUzRVy1UqA/FyV93PlAuTLR+e/bSUl9g5xD9aR+PQAKbF/IUr/kMc9ypnmNe+A5Cex4sef3+FQVNZBTb8SmqXaomvcDzX932Rg302OAPl5560C5Oee9gLky6atAPlZhPzU+9jbjKNel9H8adESDRUFIAHTsqXw23UR7fD5TApVZsdOP3QVUli/fD63H8czyqoxrtO8iXgcj76eDooKklSbRJTqjuTYvAH/DPnf5eRbNa9KfZctDKn2jPo1+6JfriqaaoqIRRmkKhakl/UwhvVrg6qka3l8CJAvnwkQIF8+Ov+0lddnRxN7ZyX6pbujXbgJanrlUVLWycMeCU3/kwIZ6R9IeX+P+NeniH0Sgn5Fawo3nPfbiSZAft5NqQD5uae9APmyaStAfhYhX/Qax5FjKFrPCuteHdHXVCH+ZTh1THqy8WQ4omMzfw75g9ozfVoAlTsMwLKjGZoqiohT4khXNURfW+1byF8fyKGIryL5fwP5NrUS6dF2KA5LQmlWqQCIM0jPEKGsqommhir5gPERIF+26zG7VgLkZ1fBLJRPT4zi+cGuqGqWpnBlOwHss6BhXheRAP/r24GkJj6hRJtQlDWL53WXcqx9AfJzTMpMVyRAfqYlk7mAAPmySSVAfhYhHzE7Vs1l88U3tGlSl6J66tw7tIIdz8uxZbkz9zZN+znkj3Pg4LoAIl4o0sS0DgW0VEiKukvVDraUK6zxpTMZcezevJR12y7Qf5wzjYySmero983uOp8i+SMblcLPeSRPtOvyZ9PKKGQkoqSuS4Ua9SldUFs2R8hlKwHyc1ng/1cvQL58dP6mlcjQemgZmlCw/JA8aF1oMicViLm3nIS3FzDqGpGT1eZpXQLk5538AuTnnvYC5MumrQD5WYV8ECXHsGr5Kq7deyZZLktqugojXVypWlCVR6e2sumSEg4OvdBQSOfM8YNExSTTqn1n1JMiWbUmmNtPo1FWEJOSqoK9qwcVC2l+1Rkxt66Es3HdevSrNKJbyxqc2nuUsqatqV+7EqpKEOjthLGFPe2rFiU68gpz5q/ig0gZJcSUrlSHrt17ULaQlmyOkMtWAuTnssAC5MtH4O9bkaToZHyIolj1yXnTAaHVHFfgxXUflHSK/zapOwLk57iLyFyhAPkyS5VpQwHyZZNMgPysQ76kpCT/PS0tHZEYlFRUUFH6uMxVlJ5GmghUVVWk6TIZ6RIbMcrKykjuuSJRBun/L6esooLy/8t93RvJLjsSG7GCgvR8RnoGCkrKn21TU5JRVFb9/P+SNtIz0gFFlFWUUVLMD0tuP45IgHzZrsfsWgmR/OwqmInykkW2kaEmGLfcJaToZEK3/G4qSd15cKQzRl0v/BaLcQXIzzuPEyA/97QXIF82bQXIzx7ky6ayYCVAvnx8QIB8+egsbSX64lTE759QuPJoObYqNCUPBV7fnoeCXmkK1fWUR3O52oYA+bkq7z9WLkB+7mkvQL5s2gqQL0C+bJ6SPSsB8rOnn6ylBciXVakcsJPk4hcqZ4tmgTo5UJtQRX5SIPHNJaLvL/0tcvMFyM87zxIgP/e0FyBfNm0FyBcgXzZPyZ6VAPnZ00/W0gLky6pUDtjdXaWLccudQqpODmiZ36r4mLJjQYVBcfmta5nujwD5mZYsxwoIkJ9jUv5QkQD5smkrQL4A+bJ5SvasBMjPnn6ylhYgX1alcsDur2UKVOp4IQdqEqrIjwr8tceESjbi/Ni1TPVJgPxMyZWjxgLk56ic31QmQL5s2gqQL0C+bJ6SPSsB8rOnn6ylBciXVakcsBMgPwdEzMdVCJCfvclJS0vLXgVAfHw8x48f58OHD3Tv3v0f6/MbYMrK8/E/2BQ2NsXGqj03b92kUZuOdDJvKPNn4NMTYjl34QaGRmWoUKYkypkcUW5CfkrcS0LWL8QnaDvpItAtVgWPqY60MfsxffDkkrHYzT9MuqIC6hoF6N53KC72fX46Gnt7e7y9vSlYsGAmRytfc1kh/8GhJcyYvZCwRyk/dFCrUHv2bBzBoBGT2LZzO19vcIhYxIEgTxYdeYeT6zjq1yidawOMeXiLyw9iKFWtDpWK/Xzfc7FYzMOHD1mzZg2+vr4y9yXTkB9/ncYtBvI2MR0FFKhWuxVrV/mjpvTtJ5cSH5zA2TOAw5cfIcoQoaZbloP7t1BIV/2HvoWFhfHkyRMsLS3R1s77fd0HDhyIv78/urq6/6hj4rNbHLv+mroN6lLEQOdvPzqV+vIGqxavJb54Y0bZWvDVJ6+k9bsP6kxiJ19m/FlV5nn71QwFyJfPjAmQLx+dpa0IkC9HsfOgKQHysye6vCE/NSmRdJGYN88fMm+aPZrNHXHs3wwFRUUSX9zE2W8F5hY96dmhOUoyDi3xxV84ei2iepvODLBoiXomPy2Za5AvTuPysd3MCAjGwmkmnaursdbPnUup5XEYa0vVEl/2zo59eJaa7Uay6cQxquvAlRN7mbN4F309g+hWU/8HJX43yJdudZiaRoZYzM3VY7DbKGbNSg9KFTMABSViox/Q0dKWsPDTfI+facmJJKWK0dDUQEU597YrvLxjBf7br9J+qCP9Gv38Q3zygnyn1iacb+jHDsf6xEZFMs12MPqDlxIwoPpXvpKOq6MDqaVrY2vZjcLqSrhZtuRKY38OuzTLUciPi0+WbFGPrs7Hh4c375LQ1VH7/3ykEv0eCumpynhFfzSTFfKfH13MkHnXcfV2pH6VUn8bHEh5don5sxYRX7olTg5WfP+Y49CpHvGWi1k6oHam+vkrGQuQL5/ZEiBfPjoLkC9HnfOqKQHys6e8vCH/U29jnt1nltNQtFpOxcXaXPrr2MjLOPospVqtKjy5coawW68ZOsWffq1qoqqkQOKH12xe4M3iHacpU7sNDuPHUjThJv4zvNh48CrKqhr0mbyISQPbUERH9nh+bkF+RkoC+7ZvZu3xmwQF+WOoAHcPLMI/+Bb9R9jT1LT858kTZaQSn5iGro4E/EXciTjMnICF1LUNwKblH7895H89wBvLhmO7VszmjT6UKm4oPRUVeYv2PQaxINALPxd3KFUfhwmONK9amHOrXJkdoYzDmBE0rGDIy0u7GeXkzV/vNbGd4INdj3o/7H+enp5M6IbFBC1dh3qJuvh7OlGpnBEKCpDy5gFj7Ow5dyeGDlZDcR8/mP17djN65HDiktNR0ypNUKAfXSxa/jAv8oF8MTUqlWfRmcs0MtRBnPiGiGUTcA/7g+2bp6L2+SFXRHJSMopKKqioqEg2k2frhFY4xvThwRqbHyLeWY3kpzyJYNZ0Fx4XaIKL00TiD8zAxnUpkwO3076ZKSsHNcJx3zNuPIqkuI7sD2GyQP6upbPxnxfI9efv0dLWY1XIQZrVKs3jsyGM95xDVGpBBo6ewtCu9RFHXWb+rCAeJumjq/ySM7djGDFpChbmDdBSVeZryBelJ3Hn0lG8ps8hTvMPho0eR7v6FVBWzGQEIXu35xwvLUB+jkv60woFyJePzgLky1HnvGpKgPzsKZ/fIH+isxsXX2rgMXU8qk+PYe+1lSVHj9C0mAre1u25X6Qb3g5dObZlKZvPvGKidwBVFO7j5LWIqq07MrB7O/RUVKSwJuuRW5CflvSeHVtXs/vCK5bNnS5NI3p6bgOeS47Sx2Y0zRvV+K6LYlIS3vPs8UM2rwjkr/SSzJnljoHyj4P53SL5/w75N6lm0prmFta4OVixf8NCriQWwM3bi1frJuF7RoXJk8ZSK2UrjQcsZt6GUKrpxtK9ywCqDFtJ0AiTLw9UaXHMmjGDw3eScZ4wkuSItUyet5uNO0OoUDyOytW74rbmMGZFkpnnPIwHquZsXO5C+NbFzA69SpshExlsXlb6QaXvD/lA/luqVqjLofM3Ka6nCanvubbdA6f1CWzZtggtle96lZHC29gPPLm4l4FjvJi/9yxmZQv80PesQr7k4SE1NRUxiqiqqoI4naTkVFTV1FBWUiIjNYnENNDW1MjUdSkL5KenpfL4YBC2gddx8ZyAWe0K3N08je7+l9i0dQWasX/h6ORJjUEzmNJUkfm+rmy8q43jpLEUjDmHu/9qhroH8mfbBjh1bSCN5C/uU4VrBxYyasZBxnnOQC86gqAtYVgMGU//1jX4lTlfgHxZ/ypkz06A/Ozpl6nSQrpOpuT65YwFyM/elOU3yJ/kvZSmnXvSq6M5ymnPsTRtg1nAEexbFCb2zUtevfkgHfCja2fYvn0PzW29saiYwUT3BdRo15VBXVvlm3QdCeSHbl3N3guvWDF3ujRy+jQ8GM8lR+gzxO4HyBelp3B6ozvj5h1CXa8o7a1ssevXAV2NH2Hyvwf5t+hgacvhM6coqCjmWPA6loVeprerD3phUz9Dfqm/AukVcJ8F8yejb6CPVsZbXifrU72q0ecLJTpiM15ztlGty2hsrRpLvpfKVIcxNBrrROsSBXkR+ZCkDEXEohRuXDvA+k1nCAkJ4dL2ZczYdpkOw50Z2KTETy88+UD+G6pUMOHk+ZsUlEJ+HNd2euO0JvbnkB99mlETZ3H+5hOUi5myaJ43NcoY5BzkZ+8W9LelZYF8SeFnR4IYOPsqnn7ONKhSmvTkeJ48jZJ+2TY59jUH18zjvFpbljvUYdHMIOJLtcBxfG80EDNnki0Xdc3xH90D375mUsif07UsjgO6EF9vOC6Wpog/PGXz2nXEG5gyyXE4BpnLOsoldbJWrQD5WdMts6UEyM+sYtmwFyA/G+L9AkUFyM/eJOU3yHf0WfYlJ1/0EkuTljT0PcDYFobsWTufJdvPIvnyfPz7N6SqGDLcyQ+LSqJ8CfnpyXHs3LqW0PDnLJ3vLV3oF3l6PdOXn6aP7SiaN/yywE+UkUJicgbaWppIUneuhe1lzvy1mI/wY2Ar4x8m+b8I+V9y8kWcCF7P4pDzWE31wfCM22fIb1RekzlTxhF27w26RUpTr5E55maNqVLmU+RazPVdS/DeeIE2NlOwblHmG20z3j3E2d6ee4mqiEQZvH//CgODEvkM8uOoWrEWuyNuUFYC+SnvubzZkylbM9gaMhvNz4tZRCQnJ6OkpIqKijLpqcksGdWRJcm9ubjW5oc1L1mO5GfvFpTjkB91O4zJUwNIyBBLx/z6ZQxGzYex3KEui77Lyd/uM4Zlz8uxxM0a/0EtpJA/y8IIq9Z1eW9QldKGkhUgIpISkzBr1xPrQf0w1JQ95SiXpMlytQLkZ1m6TBUUID9TcmXPWID87OmX30sLkJ+9GfpVIH+MyXuaNvyTketO0qu2AbfP7ScwaCXNhnhjUUnMRPfAfBfJF6UlcXjnFpbuOo/PvADK6ShybvMclh15zRC7UTT8aieYuJfXWb/3OtaD+qCuKObZzTMs8J9N4dYTcOjTQID8yFtfLbz9O8gfQ4mUh7xULUXVUjokRT/A3c6ayLL27Flo838NxTw/G8z0JXuo03U0Nl1MpRB37lw4f1SviWjvWCo4vODF/W1oKKcRfmQZMxefkEL+5dBlzNh6mfZ5HskX06xGRWzXnqRfzaKkxEUT6mvH5nfmbFw4HNXP2V0JHD10BP3iFalWqYJ0XcvlhQNpt7wATy4EfJW7/1GaXxfyFzEw4DKeflNoULU0blZNuVh5HNucO5Me95LV08dzUrHVR8j3n8f74uY4jR+AlmIGCxyHcVajKbPGWeHbt5kU8md3KcOEAZ0p3NUF+86mKCqISU5NR1VNgwIGekK6Tvb+5MhcWiQSsXnzZgwMDLCwsPjbckpKSigq5q8HLwHyZZ7m7BsKkJ99DfNzDQLkZ292fh3IT6KrRW+q9RpPnSKKPLgezvHz9+k91hsrE21cpvmSrFMEq/5DqFu+JBqqst/0cysnH3EGdy+HMWfeCvRqtMKktCqn9u1Fu2JLbIdYoRAVwbWodFqZNyXp1VX6DnSgk81oimup8OTuda7dek6f8R6YVyssQL5MkD+a5JMLmX4yAxuLuuhqiHl4ZBkX1Xuwxu8T5IM47gErAmfz4J06pk2aIkpLZOnmo3jM8qTotWXUHnqYRXPtIS2e6+d2c/1pBltCQnh6fCO+S3egXqUtEwa0oVSpH1N25JOuAyumDmHxk3JM6lqZxLfR7N4YTOvJy+hvqsHtKxcQ6RpRs4Yxq+Z48tc7JWpWr42WspiDy2Yiau3ForGtfsiP/1Uh/+2VrfR3DsasXSv69e5FiK8Noe+rM6J9VRLfvWL/1mBSjXuwYnITlvj7ce5NAbp0bI66QiIbgvfQbvBo+rdpyJTuDaWQv9CqIjsX+7AuPBbLbs1RV4aEDHUqV6tNnYrF/3aLzuzdieVTWojky0dnAfLlo7O0lfwE+ZWa9uDOg8gfRq+kpEj604hsq7L3yGnKlS1FhT9yb6/obHcyhysQID97guYV5Me/e83BkLWoVe5Ex0YVpYNIevOUbfvCqFCzLibVK6Ioes/i6QEYdx9Ly8o6nNiykL2XolBTVUNZVRl9w4LUbdyOxtUKcWTnDsIu3qSquSXtGldHV0PWDTgh1yAfSE18R/jJA+wLu4HkOlfUKECXLp2oVdmYm4dWcuB2GiNsB6CprszZ7UvZdfE5GupqZKBEdVMzurZrwk/W3fI7p+s8P7WONWfFDLPtjqH+x21GY99EMX/xWiY5O6KGmLvnz3HiciSmHbuh9WAX+x8q0b5tK4x04pgfuJJ3iRmoq2SQkqFMx8ETqF/66931xURePc3eQ0d5nQSitFSKVG9Dv44N0FVNxM3Z63/snQVUVcsagD+6VBS7UbHz2o3dLXYXdnd3Ynd3YGN3d129dncnijQc3trbK08U9cAJwPvvtd7yPs7U/mY4fHv2PzMEWFhjbmGJiV0CksQxp2VbV0xe32TXzu2cvu1Ng/p1yJEjyw+/fMaS/CCvewwfuwAsbEATRLykTrR3bUrI+/sc3uVBYIKcVKpQBq9Hf7Nh+36evffDJCQIP+LQs1cXEsb+MbA8pkp+iPdjli5dz70XH2jUujNJAm7gtnAnJpaWWJqbEjt2bGwTZKVJ1WycOHiAw1dfYG3ij79/AFaJs9G8fmVSJo7Luhlj8M/bmKaFUvLp7XPWL1/MA89AdRtQW4fUlClfkVzpk4jk6/YnR+vcMpOvNar/dsLoJvk1K5akVcPqYTpFOcwknWMKnTuqaPVW9OvUnCpli0WoLOUPk/K/6PbKS5ubEMnXhtLP00SV5IdoNPj7+YC5NdaWXxaWhmiC8fMPwNzCAgt15xINPl7emNnYYWWuLIQMxtfXB02IKRaWlpiEBGNiZq6mDQ4KwM8vADMLK6wso8fuOl+pa4KD8Pf3Iyg4BAsraywtLNRX/sGBfvgHoUr9lxOPlS0PfQkK1mBmboG1tfVPQwP+ZMlXdmPxDQBbW2tM/93KRImP9/bxVQ9pUqJRlF1VAgKDVZ4mwQpHE6zU3VxM0AQF4OPrjyYELK2ssbb6fruZLz0TGPBF9DA1x9bGKvT7TxPoj7evP6bmFmqZgYEBWCl9oTy0KXkCAtW++TJGw17GknylViXeXNnFBlMzrG1ssVAOwgoJJsA/gBATc6z+ve+gQH/19yokxAQrG1tVfMO7YqrkK/cS4O+n9ouNra26o0+An48aYmNhoezVD0HBYGlpTmBAAJqQL5yU8zqUflX6URlTvt6f1YcmG0tlgiCE4KAgdU2Dkl4ZR5YW5hHaHUi3b2bD5JaZfMNw/b5Umck3Dme1lugm+a0b1qBX+yY/JTBzyTomzlnB2/ee6oz8qL4dqFRa2QEC9Wft+o7h4PHzBAUHUThvTuaO749jymSUqtOOQyfOY21liUuVMrgN7krSXOW5c3IzTo4p1fxK2QtXeXBp/2q27j1Kz2FTaNO4JkPd5nJq+1JyZc2gpvlZ/UbsNq2rEsnXGlW4CaNK8nVrtX5zG3ImX78t/X9pf7LkG4qZMco1puTr+35isuTrm8WfWp5IvnF6ViTfOJxjnOQr4Tateo5g+/Kp5MySgV2HTlDXtR9XDq1VRb1xp8E8f/UG9zljsLK0pGWPEfgHBKjplStuphKsnDFSncl/+frdLyV/18GTNOwwkLrVyjKoWysSJ3Rg/9Gzv6zfiN2mdVUi+VqjEsn/CSqRfN3G0K9ye3t70717d9zc3NTZ7z/9EsnXXw9ru4Wm/mr880sSyTdOH4vkG4dzjJP8yk26ki9XVob1dA0lVLFRFwrkzqb+7JOXt/rzL6diwsYdB+k4YBwv/9kbYcnffegkStkPz24jdYqkav7f1W/EbtO6KpF8rVGJ5Ivk6zZYIpFbJF87aHPmzCF16tSULiKWjaYAACAASURBVP3jKbralaB7KpnJ151hdC9BJN84PSSSbxzO0VLy7zx48kNcX7aMTmoITcaitbh9//EPdJrVrcLSqcO4evMeg8bP5srNuwQFfYlfVmbyPW8ejpTkV2veg4BHp0Pr+139Ruw2rasSydcalUi+SL5ugyUSuUXytYMmkv8jJ5nJ127sRCSVSH5EaEU+rUh+5NlFOGd0i8lXQmma160a5j6srS3VcJzMxV1o26QW3do0/OE+ldfAjvmrqvH5k4f1UBfrKXH1TbsM0VryZyxey6LVW9QHCmUm36VNXz7fPRZa16/qjzB4I2UQydcNtMTkG3Z3Hd165+e5JSbfUGR1K1fCdXTj921ukXz9sfxakki+/pmGV6JIvnE4q7VEN8n/1cLbqs26kyh+PBZNHhJK6PGzl6RImohnL9+QKm/lMAtph7jNZfoi93Al/+Onz2qM/rXD68iSIa1aXu+R09h35MxPJf9X9UfXnXdE8nX7ZRLJF8nXbQT9OrfM5GtHV2byf+Qkkq/d2IlIKpH8iNCKfFqR/Mizi3DOmCT5ysLbOq592bhwAmWLF+Tk+X9QxFtZWJsnR2YcMpdkxug+tGpQHY/dh5kweznn/7nOu2sH1Th9ZTedPh2a0aJ+VeLGiU2SnOXo27E53V0b8uDxc8rW70AsW9ufSv6v6i+aP1eE2Rsjg0i+bpRF8kXydRtBIvnfEpCZfP2NJpF8/bGUmXz9s/xViSL5RuQdkyRfwaLMzE+et0rdHSd1iiQM6NISJSZfuZas3cqAsbPU/cCrV3Bm4pBulHRpq26t+ejcdkZPW4zb7OWUcy6Ix5JJbN51iB7DpqiH8CjbcVYsVYR5KzZy9dC6cMN1fle/EbtN66pE8rVGFW5CkXyRfN1GkEi+SL5hRpBIvv65yky+/pmGV6JIvnE4q7VEJ8k34m3/Z6oSydetq0XyRfJ1G0Ei+SL5hhlBIvn65yqSr3+mIvnGYfrTWkTyo7gDDFy9SL5ugEXyRfJ1G0Ei+SL5hhlBIvn65yqSr3+mIvnGYSqSH8Wco6p6kXzdyIvki+TrNoJE8kXyDTOCRPL1z1UkX/9MRfKNw1QkP4o5R1X1Ivm6kRfJF8nXbQSJ5IvkG2YEieTrn6tIvv6ZiuQbh6lIfhRzjqrqRfJ1Iy+SL5Kv2wgSyRfJN8wIEsnXP1eRfP0zFck3DlORfB04dxsyiacvXrFhwQQdSomarCL5unEXyRfJ120EieSL5BtmBInk65+rSL7+mYrkG4dptJP8TMVqc+veo3Db5Ta4K73aN/klmeu37/P0xWt1O0x9Xz6+fqzcuBPXxrXUou8+fIK/fyBZM345NCsmXSL5uvWWSL5Ivm4jSCRfJN8wI0gkX/9cRfL1z1Qk3zhMo6Xk16xYklYNq//QtgQOcdXDqn51DRo/Gz//AHUv/O+v4GCNuvd9ZK/9x87Sb/QMzu9eEdkiok0+kXzdukIkXyRftxEkki+Sb5gRJJKvf64i+fpnKpJvHKbRUvJbN6zx0xn7a7fuk7dCY05uW8Jf2TKq7S9brwOpUyQledJEjJ62CFNTU5IlTsjlA+7YZ3Rm2bTh9Bw+hYFdW9KtTUNWbNjJmOmLefjkOYkTxqdH20Z0aVU/lMW4mUuZsWgtH70+U6zAX8wd3587D55QuXFXgoKDsbG24tyu5cxbsSlMuM7speuZsXit+ibByTEFI/q0o2rZ4mq5+So2pXHtihw+eYErN+8SGBiE8maibrWyUdLTIvm6YRfJF8nXbQSJ5IvkG2YEieTrn6tIvv6ZiuQbh2mMk3ylwcMmzWfv4dOc2LpIPZ2244Dx3Di6QZ3lr9GiJ05pUqoz+b5+/timLUL5EoWYPqo3yRIn4PmrtyghQZsWuVGxVGFOX7hK2fodOLFlMflyZWHTzoO07zeOrUsnkz5tKroOnsidB485vX0pUxesZuXGXaEz+d/G5G/YfgDX3qPZtnyKWs7WPUep374/Z3YsI0+OzBSs0lw9jXfPmplkTJeaWUvWMXjCXN5dP4ByypuxL5F83YiL5Ivk6zaCRPJF8g0zgkTy9c9VJF//TEXyjcM0Wkr+g8fPsbAw/6Ftry7vxc7WhoDAQHKXa0TnlvVRZt0nDe1GrUql1PTfSr4StmOTpjBLpw6jWd0q6udKyM7rt+9JmjhBaPnZS9WjY/O6tGtam0qNu5A1Yzp1ll25FDE/cPws9auXZ8Zi959KfsVGXciQNhXTRvYKLbdw1ZYUyZ9TLUuR/Hw5szBjdB/18/uPnpGuUHVe/rOXxAkdjN7bIvm6IRfJF8nXbQSJ5IvkG2YEieTrn6tIvv6ZiuQbh2m0lPyfxeSnS50idNb7zN9XKVytJVXLFsNjyaTQ+whP8o9vWUSRfDnVNCEhIUyYvZw1m/fw4eMntbwXr94yflBnNZQnY9Fa6r/tm7n8wOZXM/lKPiXkp2OLuqH5mnQegrJYd+PCCark165Uit4dmqqfKyE9KfNU4sGZrTimTGb03hbJ1w25SL5Ivm4jSCRfJN8wI0gkX/9cRfL1z1Qk3zhMo6Xk/yom/2uD3bfspU2vUaRNlVwNn/k68x+e5Cvx83lzZlGzLnbfqi6e3bFimhpWo1zKW4GmdSqHSv73sv61zt9JftfWDejQvE4o08adBuPn769usalIvkvl0qFrDUTy9TPAlYc05cHN2JexJX9w1VTMPO6l3muIRgMmppiampA0YxG6NK/BxUsXKVmtDnUqlcDsGxiaoLc0K1qQZC0XMd7VWa+YpkyZQqlSpciXL99vQ85OnTrFrVu3KFOmDAkTJvxtO/w+vWT90qkMmbiSz/7BJElfgAmjB1PBOQ9hgttCQjiyrD+tR6zko08QdvGS06h1F4b1aIZ5OFFwnTt3ZsyYMSRI8P83eb9tTBQk8Pb2pnv37ri5uWFtbf3TFtzeNpkBI904cMePEE0wmhAwM/syAmIlqcWZ3T2pXLcNx86cIFYk7iP481O2L5/A7lsJmDNtSARKCObp03vcvfucEiVK/DafMq7v37/P8uXLGTdu3G/Tf00wZ84cUqdOTenSpbXKo/n4D4WL1+PeC09MLawpXKI2a5a6YW0RdkOGz49P0X/QKDwOXMQvSINDsuzs2e+BY3y7H+o5duwYjx8/pm7dusSKFRnKWjVd60Qi+Vqj0jqhSL7WqHRKqPw9NwmJir/oOjU7Zma+udCETJXPG73xSrz87yT/vecnMhd3YdWsUQwcN4tq5ZzVRbXK9TvJb9VjBP4BgaycOVJN/8nLm+S5KzKyTztV8pWwm3SOKZj5b1iNEtqzYJUHvTs0QVlY+7OY/MpNuuLkmDJMuE7+Sk0pVSQf4wZ2Fsk30Ej6r0g+/z7HvH16h0n922JXajCDWpRUqXo+vkjfsQtxrlKLKiXyownWYGMbCysLRfZCePHoDubxHEkYx1IVQX8/X/wCgrCwtMba2goz08itCTGY5GsCuXBoG25T3XEZOIVqOW1ZOm4IZ33S0q1ra7Kl+P8OW573T5K7QkcW7zlAsRR2nN+/gbHT1lGj/3SaO6f+YdT9aZL/dVwo/XxlUXtcV4SwbvVYUiZzQHkaev7ouir5h08cxsTbG8wssLGxweLfXcaU8eDj40NgkAYrWztsLMOGSX6V/F034jF1fG/8AoKxsrHB2soy9GEr0N8XHz9/NCEm2MWKjaUZBPq9Y9uutWzefIZZc+ZhY2vLdx4dpm+MJfl9yuTkSpEJbB1Slo9P7zCgTUvsGs5kcvO/vnl4DGBYnx54JcpJt1aNSGarYWjdkmxK1IlrC5qFfcgERPIN9OUejYoVyTdOZ4jkG4ezWktUSn7tyqVp06jGD3drY22txq836zpU3Z1m9ezRXLh8g+I12/D33lXqgtYG7QcQEBjEgomD1F1wlIW3387kK4t2123dpy6kDQwKUt8GKHvrVyvvzIRBXVAW0LbuNVINscmROT19R83g2u176gJaZTedkVMWcmn/amLZ2apvBL4ehrVlzxFadBvOzpXTyJ0jE+4ee2nVcwQX964mW6Z0IvkGGrv/Gcn/l9/bp3eZ1M8Vu9LfSP6ji/QdPQfHTJkw9XrNlbsvKFK9BU2rF8PWLIgRrSqToO4s2pdNy9O7l9mxZRNnbz0nRaa8VKtalRzpU2BlHvGtZQ0l+cH+3uzavJYVh28wZ7YbDqZwe89cJq67TpN2nSmWL33oaHp2zQO3NbeYOqqv+kDz8NJRZk2ZSpraI+hQLfufL/nf3OHVhe1oo0j+mn8lny+SX7ZaY9wmj2Dv+k34xkpJzfpNKZ0nHWZBPjy+fZ61G3dx69FbsjrXpkG1kiSLaxVa6hfJH8vy/e+pVDwDRy8+JG+pyrhUr0SSuLHwevOcI7s2s+f4eT6GWFLKpS0uhRx5sMONznMP8uLFB6q37Uuj5q3IFf/nXwLGkfxgcmTOyNzjlygcPxYan3ecWdCTkSfTsXnNYKy+/goEP2fpopWkz16KAgXyYG5qwvGxNWl4LD/3d/T/4Q2RSL6BvtyjUbEi+cbpDJF843COcsn/2WFYpYvmp0/HptRt24+bRzeSJNGXvxrK7jqXr9/h6OYF7Dp4koYdB6qCf+v4JuwzOIeR/HcfPlKvbX/OXLxKymSJmTS0uyrq3YdOZkTvdup2mqOnLWbW0nV8/PT/LTSVuPnHz15S0qUtShnblk1h446DYbbQdJu9nGkL3fH85EXm9GkYN7ATSpuVS8J1DDN4RfLB89FFeg8azT3vOHR2bYr5q3MMmLCR6Tv24pzKgk5ls5K4rQdditqwaPIQboZkpX39EhzcuoHX1hlxbdmY9El/ff5EeL1nKMkP9P3Ilo3L2H72FQunj0aZW35yejWj5h+iQevOlCicI9zBFOjnxRGP1azefY0Oo93Im/z/svo1wx83k/9byb/GX4UqUrNxW5rUKMFhjxXc9IvPwFEjsbq+maEz1pGzbAPKZk/ApMF9sKw0jgWdvmz7q1yK5G9bMoyha57ToWs3Msd+y9xZ88jj0olWDWtzau4gFvwDg3o1w9b3IXXrtqOHx1Xqp3rHph0b2LbtHFNnzsHePi7WP+6lEFqPcST/A1kz5GbfuWsks7eFgI9c3jySfqu8Wb9pDnY/aZ/P28d0rV+fJB0WM6JWJpnJN8xXebQuVSTfON0jkm8czlEq+Ua8xf90VbLwVrfuN3ZM/tfW/nwmfx6FqtSlYdVSWAQ+xSV/eUpMPkDHEvahkt+pkDXLZ4zjsXUOalcsjJ2VKWaWdqRK7Ujc2DYRBmJQyd+wjB3nX7F42mhVqp6cWcvI+Qdp2KpjuJIfHOTPjdN7WLzEnYzlWtOmXinCezfx35P861Ss04bdJ46T1DyEg2tXstDjIg0Hj+T60oHsf2VL0zrlcIxngeeFxbSf+5InN3aGkfzty8ex62Z85kwfjgkhbBjbmxUPEjOgb1ucbN9y+fZzggP91PUii3u2ILDtfjZ0dGTfvjW4ux9n0aJFvx1bxpH8d2TJkJcj566RUJX8T1zeOoZ+yz1Zv2luuJIf4PmMDbNGsP56bBYtGIODreUP9yIz+b/t3hifQCTfOF0okm8cziL5RuQcVVWJ5OtGPtpJ/tiF/194q3lJ3bylKTRuD13LxA+V/EE103Pt3BHcN+zE38wGhwRJyJ63CEXz58Qh9o+z3r8jZCjJD/L7xLaNK9l06gkLZo5FWXb68NgKxiw5RSPXTjgX/LJg/uulxJU/vHyUBQsWETdHFbq0rIe1RfjrDP6Lkv//hbcajqxdxbxN56jTfzjHFg/hwsOnZE+fDmsTCAn24dX7YFYum/ed5IddeHtuUT+G7AumT/8emD/ZzfI9V7ExC8bMxITz29eRoONONnfLGA0l/zPZMuZgy9mrpLO3JcTfk4vuIxi02YQNGydh++2KdeUthvcrdqyYyaazb+g1ZARZHRP9MIuvgBLJ/903Rcz/XCTfOH0okm8cziL5RuQcVVWJ5OtGPuZJ/ma6lnTg7/NXSZIlHw4mH9ixej6HH5jQoXNnCmVLGWEghpJ8TaAvB7dvZN6mkwyfOpUsDmYcXTWR5cc/0qZTRwpkSx6mrW/unWXatBlYpy9DZ9fG2Ft9Z2vfpBbJ/yL5dQeM5NbqodwySUq7ZvVI7WBDiCaQl75m5EqbNKzkLxvDjptxmTNjDGZoWDWuL+ufJmZQT1dGuVYnd/dJ9CidAztLM1rlSsGHFl8l3x1392PRaCY/hLK5MlJzzkE6FEqBn+dr3Ee2Y5emOisnNyPMc2HAB/asX8TW049o2LY7BbKkUWPzw7tE8iP81RHjMojkG6fLRPKNw1kk34ico6oqkXzdyMdEyW9fwJT5U6fxxCIteTIk5PGV03ywcqSNqyu5nBJHGIihJJ+QYB5cPc2MmfPxS5CNHCmsOX/8JKkLVKV109p4XdvB8ftBNKhTHRO/9wzu0ZbdVz/TqnUTEthagKk56TJmIX+eHGG2E1VuUCT/i+TXGzwGxxe7mbx8H8nSpcUpVXw+f3iDVe4GtCvpGEbyty0dhduGp7jUroCVSRB79p+lQI2mtK1fnmmda/LIISf5HJMS5OPJ1oXTMa/nzt5hhTmwbytjRk2jVb8xFC5SBMc4Px9ixgnXgU1TezHmWDCtymdVJf/Y3iM0HLWYKhmCOXP0AEHxMuJcrDDHdixn1ITZpMlbioI5M2Jt/uXBsbJLfeJ9t7hAJD/CXx0xLoNIvnG6TCTfOJxF8o3IOaqqEsnXjXxUSb7Px3cc2+2BhVMJSuVJp96E34fn7D5ynrSZs5E9Y1pMNF6smb2YlOWbU8TJlq2LpxIrfxOcM8Tm5qXTHD13E8yU6GozMv9ViHx/ZSX2L2a/f0bKYJIPBPp+4urfJznx9231TABzOwecSziT0TE59097cPReEA1cqmLi84bJsxZjGTs+9nb/xkubWpAxa3aKFMzzn5L8V+c82HwphAb1K2D/7xqLT56vWL7GA9f2bbEkhAeXL3H22jOylypHJodAThw6wLW7jwkxNUOjCSFb8Vo4Z0sS2uUa/4/cvHiUXSeeEscO/AL8MY+dnLKlnUmXKhFP/t7PjpPXCAwxxdzcgoRWPnxKXJ5WlbPy9OF9tm3xIFa6vBQrVhxH+6iX/ECvJyxetokAE0tMQjTETeKIS41KmHg95eyxQwTYZ8C5WAGO7N3O+cu3iBU7Fpb/Cr7S+lqNmhNfeZD85hLJ1+27NCbkFsk3Ti+J5BuHs0i+ETlHVVUi+bqRjyrJVw7C8vP1wcTCGut/9zRXYtJ9/fwxt7DE0kLZIkSD96fPmNnEUmPTfT57YWplh7WF2b/75PsREBiMmYUl1taWmJlGfPtMhZ4hJV8pXxMchJ+fL0HBIVhYWWNlaYkSMREc4IdfENjaWCmJ+Oj1+bvONMHSygobG+sfYqj/5Jn8YH8ffALAzs5GPShNZagJxtvbh1ixY6ssggIC8A8MwtJa2SvfRB0Pfr6+BAQFY25pja21FSbfRqWEaAgM8CcgKAQz0xB13Ch5rSwt/mUbgq+Pt/pzC2tbLEMCCQgxx9bm65kMfgSFmGJtY8Ovdmk11ky+wiQowBcf3wAwNcPG1k7loIwj/4AAQkzMsLKyVH/HAvwD/n8Mwb8jLHYc+x/OlYjJkr9t3Qou3bhLYHDYAwVt48SjVNXq2Dy/wk2v+FSvVpQflxz//jv09f1rnPjnPmlz5Cdnuoi/LQy3huB3LJixggrNO5IybtgHrt+3KHIpRPIjxy2iuUTyI0pMh/RRtU++Dk2WrBEgIJIfAVjhJI0qydet1frNbWjJ129rv5T2J0u+IXgZq0xjSr6+7ykmS/75E0d5+OQFQZogZroNJ0vtzpRKmxAr21hkyZaOG1sWsvtlGiZN6IRdJM7Mu3l8O3M3HqdYjWbUds6sNfpXdy+y9cglnHI7U/KvtGHzBT6jW5uBuI6fS5bEPz8NWuvKtEgokq8FJD0kEcnXA0RtixDJ15ZUzEwnkq9bv4nkG34mX7ceCj+3SL4hqOpepki+7gy/ltCsWTMmTpxInDi/WATxb+IAf3+CgzXqQXI1imentJsHnQs4obzOMfF5yfb5E9n+PB2zpnePlORfP+LBdPfDlKjThvqlsmp9k/fP7sJt+R7yV25Ci4p5wuYLCeTJo2ckTJ4K618do6x1bb9PKJL/e0b6SCGSrw+KWpZxe2kc0pXeipl5xA/J0bIKSRZFBIKDvLh3oBoZmn+Kohbor9r/2mFY+iOne0kyk687w5+V4O3tTffu3XFzc8Pa2jizlYa7m9+XLJL/e0bapoiI5H9bZsX8TpSbtpPuhTKoPw768AiPOWMZse4eaZIG8vjVJ5JkLc2A3h0pmsOR4A836dVrMMev3CckxIIcZVoya5RrmK1IFcmfOG8Jd9/4E/zxNX4hlpRv1Y/eTSoSz86KrdN7M9H9KEGmZljbJ6dehwEUSuLH1NEDOXblCfYJ09F38GCqlC2CzdfDyjS3qJK/AYM2HCdP4iAOr5vNqHmb8QrQkCBVdtp06EitMj+ux9GWX3jpRPJ1oad9XpF87VnpnPKRR34SOrXBNn5uncuSAqIXAZ93f/Pm7gJS1zgbvRoWidaI5EcCmp6yiOTrCWQ4xYjka8d2zpw5pE6dmtKlS2uXwQCpYnK4zm8lf9ZAFl4wZdjokTjavmL86Ak4FapDs0YujO5SlzepqzC0VVWsgj7SvnEdSg5cTacymUKLVSR/xIT5ZCzfmDb1y+J/ay9t+82h56RFlEv5HOdqAxmzchM5ksC5gyuZt/oK4+fNx+/aDiau2Eu+io1oU60Q5hbm/19fE3yF3ElLMPnEIzLbPqWXaxvKdp9J5RwOeKyaz+WnGjr27keGJPqboBTJN8AvTjhFiuQbh7Nay5sLgwn5+JhEmbsYsVapyhgEXt+Yjol9KhLmGWmM6gxah0i+QfH+snCRfMOxF8nXjq1I/o+c9DuT7/YlXGeGEq7jw5hBfbBLkYdmFdPQsHF/ekxehvNf6TAlhNUDG7AquCq7JzYNI/lhw3U0dG1chSRVe9C9VkkszeDUno2cuHiDx0/v8vChJxMWuGP19Ajjl+6iQJVmtKqcN+xNhkr+Y7LFekuPFnUwyVGHQd1akTq+HZoQsLRSFulHYhHBT4adSL52v4+6phLJ15VgBPL7e97ikUde0pXeJiE7EeAW3ZN+CdWpSuoa57GKmzG6N/e37RPJ/y0igyUQyTcYWkTytWMrkm9gyZ/7r+SrMfm+jB3UG1tF8gvHpnbr4Tz7HIKN1ZcdbkzMrXEq14F1o5v8QvJD6NuyBmal2jKgSnpqlaxAigqdaFurOK8enWHFsp0Mn6tI/lGtJL94WkvePL3Hni3uuG/ezQez5DRv14mmNZyx+dV2TtoNr9BUIvkRBBbJ5CL5kQQX2WyvT3Uh2Os5SbP3j2wRki+aEXhxZSxmsZORqND0aNayyDVHJD9y3PSRSyRfHxTDL0MkXzu2IvlRJPm1ctK2cVdqdJtA9TL5sTWHt29eYmqbAIfYVmEkf8qynRSo3pImlfIS8PkpHZq2pFi7UZTVHCJn91Nc/XsDSSwDOHV4AzNnb2L4HHdsnh1l7OLt5KvUlNZVC6B8z4deoTP5D8kT5yV7Dl+nULmyJLQJZuPSmRy8/ob2PQaS2zG+doNIi1Qi+VpA0kMSkXw9QIxoEUpsvp1DXhKkbxXRrJI+mhF4e2cR3u/P/xGx+F/RiuRH3SATyTcce5F87diK5EeR5DdpyO65w3A/946mLRqQIhbs3OCOY7UeNC/5/zfESkz+mPFTMEnnTL1Kxbh7cit7rgYwbsoIMgX/Tbm6gyjbpic5EmjYvmENL/xsGDV9Ecl8rzFp5hJMkv5Fu+Z1SZEkAWZfj/P4JiY/m91jBvQcQNKCtSidIzGnDu3Cy9qR1q6uOCaw1W4QaZFKJF8LSHpIIpKvB4gRLSLI5znP9tbA0jYViTJ3ktCdiAKMBumVEJ3XN2YS4POY5OU8MLdNFg1apZ8miOTrh2NkShHJjww17fKI5GvHSSRff5I/rEcb/nIdTvVMX/4+BH9+zeldmzj7Pgnt29bAmgDWr1iMVXwnSpUqga15IOf2b2b1lv28D7ShfM2muJTPh/U3YTJPrp5m9+mrSiwPj26c55NpIho0a0reTKmxMAvh1vHtzFixCbO4aSldvgzvH10lX4U6ZElkzamDO9i4/TglazahTNE8WIfurvOQrk3609ptCVmTmPP07j+4r3LnxtOPZMlbjJrVKpE2WXz14Dx9XSL5+iL563JE8o3DOdxalNAdz1tLiJuqFrESFcXKPr0IfxT2x++qVsTe/+MdPr8+jufjTcTN2OKPCdH59t5F8n83Egz3uUi+4diK5GvHViRff5KvHfH/ZiqRfOP0u0i+cTj/tBZlMe6neyvxebIHf8+baIK8orhF+qve4h0E6i+ET38Ni2RJpuaxsYqbCduU5YmTrvEfscg2PBQi+ZEcIHrIJpKvB4g/KUIkXzu2Ivki+dqNFN1SieTrxk/b3CL52pKSdBEjsH07NGkCK1ZAlSoRyyupo5SASH7U4RfJNxx7kXzt2Irki+RrN1J0SyWSrxs/bXOL5GtLStJFjECJEnDkCDg7w+HDEcsrqaOUgEh+1OEXyTcce5F87diK5IvkazdSdEtlMMnX+HH2yD4OHjvHe29/bO0T49KkOVlTOPDthkIhQf7cv3GBrdv38uqjLxY29hQuVZGi+bMT2+rrYoUv96jRaFi3bh3x4sWjWrVqP71xMzMzTE2/rmbWjY++covk64uklPN/Ap6ekDQp+PmBcnz8ixcQN64QiiEERPKjrqNE8g3HXiRfO7Yi+SL52o0U3VIZSvKfXD7MtEUbiZPMiYyOibl6bBvPEzgzZ2BLLC3+L++vHt1kyeypvLFMQ/7sjjy9fp6r78xxbduGgtnS/v80YJF83Tpacv+BBIYNg+HD/39j0Jn4zwAAIABJREFUQ4eC8jO5YgQBkfyo6yaRfMOxF8nXjq1IfvSTfM83Lzly8BAJcpaiSKbEYRvo84yVG49QtHxlUie0DzNbrV2PR00qQ0n+rhVT2XH5LU1btSNvhqQ8O7GKSr22sHn/apy+njcQEsStS4dZMH8TlTsMwTlrIl78s59+UzZQuUlLXMoW5Nu5fJnJj5oxIrVGRwLKLH6aNKD8+/VSZvEfPJDZ/OjYX+G0SSQ/6jpKJN9w7EXytWMrkh89JH/1yB48zdyYPi65eXrnOjMmTyZdrV64ls0UpoGaDzcY5rYWlzbtyeaYWK/bXGo3YiKXyjCSH8TKacO58jEOrV3bkT5JbDTPTlCgTA8m7j2Mc0qbfxsbgp/PJ96+9cIhSTJsLU25fXwTE1cfo1aTVpQvlE1m8iPXrZLrjyfwdRY/Z0745x/4+q/M5seYrhfJj7quEsk3HHuRfO3YiuRHD8kfXq0ANwsPZ02/Cjy+dYVJY8eTvt4AOlXMEraBmgDee34mVmx7LC3MtOvkaJDKMJIfwJJJw7jj7UCbdu1Jk8gO3pynYDFXRu46Qdk0XyU/LADv1/eYMWU6XvFz0r51A1LEDZtOZvKjwYCRJkQDAsrsvSL53brB0qVfQnYUuW/eHKZO/fKZxOZHg476dRNE8qOui0TyDcdeJF87tiL5ukv+xa3zGDB5MfdefFAXdNbqNpHhzZ15cvMK08YM4pVNAm6fOolngBm1XHvQqWUDUn4jlu0bVmXf4dP42qWiaJGaDOpXnbmDenLfJCHvb1/kvZ8JVZp3pmubJjgG/k3dllPpMGEqxbKl5NCCwfSbuYGPAQEkSpWD1j2G0qJCLu0634ipDCX5SycP49bnL5KfVpH81+cpWLwdI3cdC1fyA71esXL+TE48CqF91y78lTbRD29DRPKNODCkqhhC4OuMvszgx5AO+38zRfKjrstE8g3HXiRfO7Yi+bpJvv+DvVRtPJzGA6dQv1xePJ+cpUq15nRbuo9C1u/o3bELcYo0ZkLfRtw/sJLRay7QtGMXajmHDREZWjkPN4qMYt2AiupM/sAuXTDJWZuJA5rz9OQGRi8/Qb02HXHJ9J6q9cbTfdZ8Smb2JVfKwozacZVK2WJz1GM601ddZcrS5aSNb6HdADBSKsNIfgjus0dx7rU5bdq0J1PyuATeO0juKsNZfGIP+Rysw9xdsJ8n21fNZ92xh7j2GohztuTh3r1IvpEGhVQTgwiI5MegzgrbVJH8qOs6kXzDsRfJ146tSL5ukr97Ug/G/xOf5ZM6kzJhHEI0GuZ2rsxGmybMb5EFt7Fu/NVyJK6l0vLu4nb6uG3BuWErGlYJu9jze8lXw3XqDqBTpSx8uLqPvuPXU9ClGU3y+FOj/r+Sny0u1QvmIUm5zvRo40LiONaYmloQJ05szExNtBsARkplGMmHszuXM3vLReo0akrRnGk5t3Eyg7Z5ss/dDVtTDV7e/sSKY49J0GeObV/JvDUHqN62P5UKpFfj8M0trLCyssD0m/02RfKNNCikmhhEQCQ/BnWWSH506SyRfMP1hEi+dmxF8nWT/HXDO7P8XTbmD2tCMgdbQkI0rB9Yg2lvKrCsW2EmKZLfaiSuJdPy/tIOert54NxAS8n/Nyb/w7X99B2/joK1m9IkT0Co5JfIkpR3zx+xZ9t6PHYd4LN5fMrVbkXnhmWxMo9e+7cbSvKDvF8yd8pE9p69jV0sG54/fU2LwdNpXDobr86uo9XIDYxbvBYHz8uMG9iVW95xSZ8mOV/p5CxRk1oVipEwzv9n/UXytfvukFT/JQIi+TG2t2UmP+q6TiTfcOxF8rVjK5Kvm+T/s3kynZbdYeyovhTKkhrv98/o37weJlXG06NYrC8z+VpI/ohqebmQqz+bh9Xi6Z2rYRbe/kzyCyf6zKLNf1OzQS2S2Gk4snUhM5YdZez8lWROaqfdADBSKkNJvtL8ID9vXr54gaePP3HiJyF5oniYmZoS7PuRe0/ekjpdOkL8P/Pk4T28A0LC3LF9wmQkS5wgzEORSL6RBoVUE4MIiOTHoM6Smfzo0lki+YbrCZF87diK5Osm+RqfZwwfNIIPsTJQvnB2Xl4/xK4zLxg1fRZWb+8wQUvJ3zCkGZOuxGFCn7YkjRfCjHH/313nZ5LvnNKLZo27kaFCYwo6xef2xSPc/WBD38FDSG7/X4jJ126MRzSVSH5EiUn6P5+ASH6M7WOZyY+6rhPJNxx7kXzt2Irk6yb5Sm7vd0/Zumk9Zy7fI06KzDRs2ICMKR14++wRO7buJHUJF0pmTsjnx5dw33GBTAWdKfSXE99ugOn3+iaTpy5Ek+AvmtYrztEd20lSsBplciTH++k11m4/g1P+ohRxDGLKrO1UbNqSzKkS8P7hJVat2cDdl16kypSLatWqkyGFQ5h937UbCYZNZciZfH23XCRf30SlvJhPQCQ/xvahSH7UdZ1IvuHYi+Rrx1YkX3fJ1470fzuVSL5x+l/5e24SEhISNijJOHVLLX8yAZH8GNu7IvlR13Ui+YZjL5KvHVuRfJF87UaKbqlE8nXjp21ukXxtSUm6iBEQyY8Yr2iUWiQ/6jpDJN9w7EXytWMrki+Sr91I0S2VSL5u/LTNLZKvLSlJFzECIvkR4xWNUsdkyVdEbvfu3Rw/fhwnJ6doRFW7pvz999+0a9eOfPny/TbD6dOn2bFjB/b29tjYhH9c+28L0UOCw4cPM2/ePBwcHPRQmuGKUMZGx44dyZ07N2Zm30Y/G67OqC7Zy8uLz58/M2rUKK2bokj+o0ePSJkypdZ59J3w2bNnpEqViiZNmmBnF/W7wjRr1oyJEycSJ04cfd/qf7Y8kXzjdL1IvnE4//dqEcmPsX0ekyU/KCgIRRDu378fI0UuODiYHDlyED9+/N+On3fv3nH37l38/PxQ+iyqrsDAQIoWLYqVlVVUNUGrepWxcerUKf5r0anKWMqaNatWjJREyu+OIvlR+SCkLHRMkiSJ+qBubm6uddsNlVAkX/9kRfL1zzS8EkXyjcP5v1eLSH6M7fOYLPkKdEUQFFmOqZciNdpIuyKryn1GB2m1sIhe2/P9rO8V0Y8OvIw5NhVZNzXV/iAkhY/CKaovpc1R+aDx7f0bSvKvnDzMPy/9KVKsKGkShn1jERL0gPHDF9OoxxBSxosZv18RGTMi+RGhFfm0IvmRZyc5f0VAJD/Gjo+YLvkxFrw0XAgIgWhJIKKSP2FIT5wqt6JavsyYm/78Lduu5XPYdtOH5q1akD9d2HA3TeA5nHM2Z+a+C+RM/v/TV6MloEg0SiQ/EtAikUUkPxLQ9JnF//NzPj0/hvfbS/h7P0cT5KfP4qOsLNvLr7G78gbv7AnxyZEoytqhz4pNza2xskuGXYJcxElWDKtYyfRZfLQpSyQ/2nSFNEQICIFoQCCikt/apSy5lFNty+XH0uznb1F2LJmBx3UfWrVtQ0Gn7yX/DAUzNmLe0Sv8lSLq1twYCr9IvqHIhi1XJN84nMOt5dWNJXg+PUTcxLmI7ZAWK7uEmJn/eU/sUYhYr1UHB/nh7/0Gr/f38Xx1ibgpSpI4cwu91hEdChPJjw69IG0QAkIguhDQWvI/XKRbj9Gs33kEC3sHstcbzbYRtfl74wTaDJnPS+8ArOKkpEm7PvRvX4MDy2awbOtOHr98w6N7DzG1S8jAWStoXT435ppzoZKfK4mGcwc30KPvaG6/+ETSDEUYOHgANcvkwSLqluPo1D0i+Trh0zqzSL7WqPSXMNDvA8/+noCVjT2JHIuJ2OsPrdFKUoT/9cNj+Pt+JHnuPlhYxzNa3YauSCTf0ISlfCEgBGISAa0l/9+balW7DLlajaJt+fyYm/jSsm4tavWaQelsSTm214MV2w7QZfhk3h5YxaQ1x2nerSd1SmXnqsdEmo8+yvq9G8mU4Eao5MfyvMawoUMp1mIYzUpnYdOiyRx/Bm3bdSBX6t8v0o+OrEXyjdMrIvnG4Rymlocn+xPLPjkJUhWKgtqlSn0SePv4FJ8/PsOx8Fh9FhulZYnkRyl+qVwICIFoRkAXyVfCdV4/fcita/9w88Fzbt++wa2XH+g4xI2gU+vDhutoXlGpSBm6ztxC2Rxvvkj+wdOEXF/FgKkHGThhEinszbh3cgcLDz6gUStXqhbOGM1oadcckXztOOmaSiRfV4IRzK+E6Gj8XpHUqWwEc0ry6Ergxd19mFon/mNCd0Tyo+tIk3YJASEQFQR0kXzzEC/a16iEb4bKNK9Zmhd3L7P9yAma9xsXjuR7UdW5CK0nbaLqX+++SP7+E2jOz6NO7+U4Zc2Czb/xOWZJctK9QwuK5UwTFUh0rlMkX2eEWhUgkq8VJv0kUhbZPjzVD6e8bSVERz9Io0UpSujO3fPzcCw07o9YjCuSHy2GlTRCCAiBaEIgopLfxqUsTg0H06VKESwDL5A3ZwOmbDtFgVRWHNi+keU7jtCy/ziCT29g5dF7NHRtT7ncjry8vY8GrcYwbeVm8qR5+EXyD5/H4vEuRk7YQPtRUyiSJTneH17x3kdDwkSJiW0TM7fXFMk3zuAWyTcOZ7WWN7fXovF7RuI0zkasVaoyBoFXD45gap2chBnqGaM6g9Yhkm9QvFK4EBACMYxARCV/6oA2XPBLiYtLA6rmt6djo2aYpy9BrrTxuHnxDLfeQcvew7G4tJnZa3eStWgZcqROwN/H9vE2Th6mDO9APMtLoTH5KU1fsGz2NJ4Exydfzgx4vXoK8dNRoXxZHBPGimE0vzRXJN843SaSbxzOai1KLH6iVAWwtY+648KNeLv/qap8Pj7h9eMzf0Rsvkj+f2roys0KASHwGwIRlfyX10+yymM/5pnK07Vmfh5fPcGG7fv5bBqXjJmyYKHxxzFHYazeXOP6i7cE+fvw4PZdLBKkoVataqRJHA8TzUMmjVxG/W4DSB7HlPcvH3Bo/z6u3HtBvGTpKVWqJFnSJf/lPvzRuWNF8o3TOyL5xuGs1nJrXzOc8rpKqI4RmRurqi8hO/PJWHaZsao0WD0i+QZDKwULASEQAwlEVPJj4C0avcki+cZBLpJvHM5qLTd21SNz0T5GrNF4VfUbOoOr1++xff1UvVU6YepyRk1YiEuNMlSrVJzWnUby9uEBvZWv74JuHJ9A5opr9V2s0csTyTc6cqlQCAiBaExAJF//nSOSr3+m4ZUokm8czlEm+W/feTJqwiK27jzCsxdviGsfm0L5s9OvR3MK5suu093PXrCe1s1qYGlpgSEk3z65M2OHdaJF42rsOXBKJF+n3tI+s0i+9qwkpRAQAn8+AZF8/fexSL7+mYrkG4fpT2sx9kz+m7cfKFCyGebm5gzp15rsWZzw+uzDCvedLFm5lfXLx1O9cuQWAfv4+hE3RQnePz5ILDtbvUt+cLAG83j5OXVgifow4rH9sEi+kcavSL6RQEs1QkAIxAgCIvn67yaRfP0zFck3DtNoI/ltu45h++5j3LywkdixbMO0q1OvCazbtI+nN3eqM/H5nJvSuH5FDh+7wJVrdwkMDMJtVFfq1vpxP39//wDipSqJr68/drY2TB7bnfsPn3HrziNyZs/AjLnual2uLWqpM/HK5ecXQI8Bk9U6NZoQ8uXOwoyJfcjglOoHXt4+viROWw7lXxsbK+rWLEuNKiVUyb/7jwdJ05dn69oplC1ZIDRvqSrt1DLHj+gSZb0s4Tq6oQ8MDNStAMktBISAEDAAAZF8/UMVydc/U5F84zCNNpKfwLE0fbo1o0+3pj+06eWrd6os79o0nQplClOwVHOUn+3xmEnG9KmZNX8dg0fN5d2jAygzu99fly7f5q+iDfF6cTR0Jn/xiq1qXe1bu3Dk+N9UqdONi8dXqeLfZ/B0zpy/yprFo3GIZ88ot4W4b9irPoCYm5v9UH5QUDAWDgXCncmv17w/VpYWLJ8/Qs2nhCQlcSrHxeOryZ7VKcp6WSRfN/Qi+brxk9xCQAgYhoBIvv65iuTrn6lIvnGYRgvJ//jpsxpOs23dFKpUKBZum+KnLsWIge3o6FpXlfyvs+tKYmVmPl2O6ry8u5fEiRy0knwlpEaR9q9XikyVmDS6m/o2IE4yZ7UtJYrlUT9WwnGUmHtloe7Xn31bya8kf9e+k9Rt2o9X9/dia2PNwmUezJi3ln9OronSHhbJ1w2/SL5u/CS3EBAChiEgkq9/riL5+mcqkm8cptFC8pVQl1hJiuGxZtJP4+7jpSzJ6CEd6NCmjir5tauXonfXL7P+T5+9JmXmSjy4upWjJy7SssPw0Pv6+OwId+4++WEm//LVO+zcOD00nVPOGgzq04rypQuRLEOFcLksmTOUksXzqg8UXy8lFKdcqYI/nclXHhCUtk0c3Y2GdSpQsVYXSpfIR68uTaK0h0XydcMvkq8bP8ktBISAYQiI5Oufq0i+/pmK5BuHabSQfKURSjhOxzZ1VdH+/voarrNvy2zKlMyvSr5LjdKhovyt5Cs78ij//+uVJVMaLl+9+4Pkf7+F5lfJr1i2SGg4Ta4cGX5oixL/r8Tzf71Sp0qKjbXVTyVfSaeE/1y/eZ+VC0eq93nvny0kS5owSntYJF83/CL5uvGT3EJACBiGgEi+/rmK5OufqUi+cZhGG8nv1ncSazbs4c6lzcSJbRemXcpn6z328+jadjUm/leS75gq2Q/3FF5M/s8kv3mjqsRJVpxZk/vRpH6l0LIePn5OeGUrCX4VrqN8rgh+7mKN1cXBW3YcYf/W2VHcuyCSr1sXiOTrxk9yCwEhYBgCIvn65yqSr3+mIvnGYRptJF+Jy89foinW1lYM6+9KtizpeP/hE6vW7mL+0s1scZ+khtIoV0QlX5l5z5SnNueOLCdjekdGuy364TCsrzP5iuQrM+9bdhxWd8VJ65icBUs3M2jkHB5d3/7Dzj/aSL6SRrm323cfM3V8T5Q6ovoSydetB0TydeMnuYWAEDAMAZF8/XMVydc/U5F84zCNNpKvNEQRfeUwrM3bDvH0+Wvs48SiaKFcDOjVgjy5Moe2NaKSr9FoqFCzMydO/8OwAa68e//xl5KvbLepbKG5fvN+detNZcedKeN6UCBvtnB5/W4mX8k0c/46+gyexqt7+8J9UDB2d4vk60ZcJF83fpJbCAgBwxAQydc/V5F8/TMVyTcO02gl+VF8ywatvkf/yeqbiaVzhxm0Hm0LF8nXllT46UTydeMnuYWAEDAMAZF8/XMVydc/U5F84zAVyTcwZ2XnoENHz1O/+QDOHl5GlkxpDVyjdsWL5GvH6WepRPJ14ye5hYAQMAwBkXz9cxXJ1z9TkXzjMBXJNzDnYuVbc+PWAyaN6U6zhlUMXJv2xYvka88qvJQi+brxk9xCQAgYhoBIvv65iuTrn6lIvnGYiuRHMeeoql4kXzfyIvm68ZPcQkAIGIaASL7+uYrk65+pSL5xmIrkRzHnqKpeJF838iL5uvGT3EJACBiGgEi+/rmK5OufqUi+cZiK5OuJs59fADaJCqvbdOb9K4ueSjVcMSL5urEVydeNn+QWAkLAMARE8vXPVSRf/0xF8o3DNMZJ/pKVW5k5bx137z8hOFhD2jTJad/KhfatXaKUmEh+1OA3MTEhJCTE6JWL5BsduVQoBISAFgRE8rWAFMEkIvkRBBbJ5Mrfc5OQqPiLHskGx+RsN3bVI3PRPtHqFtZu3EubzqNYMGMQzkXzqG3bufcEHXuOY+HMwTSqWzHK2iuSHzXoRfKjhrvUKgSEQPQkIJKv/34Rydc/U5nJNw7TGDWT79plNO8/fGTDiglh2r1lxxESJ3KgYL7sdOo1gU+fPmNjY83+Q2cICAykc9v69OnWNDSPcjDVxGkrePvOkwzpUzFqcAcqlSuifq7IunIQ1rpN+9BoQsiXOwszJvYhg1Mq9XPl1Np23cZw8sxlEid0oHe3pnRyravmU8J13JeMYdKMlVy5fpcsGdOyfP4IsmaOHttmfgtNwnV0+wWTmXzd+EluISAEDENAJF//XEXy9c9UJN84TGOU5E+dvZqR4xeyadVEnIvmDrft3fpOYt6SjaxZPIYaVUpw+eod8jo3Ydu6KZQvXUid+W/VcQTb108lZ7YM7Np3grrN+nHl9Fqc0qakz+DpnDl/lTWLR+MQz55Rbgtx37CXmxc2YmZmSq4iDXEukpuh/V3VrTGVk3TXrxhPyWJ5VclX2jV36gCSJklA7cZ91NNtN6+eGMW9+WP1Ivm6dYlIvm78JLcQEAKGISCSr3+uIvn6ZyqSbxymMUryg4KC6TVwKnMWbVBn7p2L5KFk8bzUqlaSuPax1XtRJH/PgVPcOL8h9N7KVOtA5gyO6ox8ZZeu5MuTlWH9XUM/r1irCwXyZWNovzbESeasPhCUKPYlHEiJ+7dP7qw+FNjaWlO4TAvePz5EnNh26ue7958kWZKEZHBKrUq+8nBR36Wc+tnCZR7qrP63bYnibg2tXiRft54QydeNn+QWAkLAMARE8vXPVSRf/0xF8o3DNEZJ/tfGfvz0mcPHLnDs5EW27jzK6zfv2bTKjVLO+VTJv/fgqSrqX6/m7YbxyctbTZMxdy015Ob7SzmoauywTiTLUCFcJkvmDMXKylIt/9W9vT+k+Rquc+bQMvLnyap+vnLtTgaNmMPDa9uiuDd/rF4kX7cuEcnXjZ/kFgJCwDAERPL1z1UkX/9MRfKNwzRGSv63jVbWYTdpM4SrN+5x6cRqVcJv333Ezo3TQ5MpnwcHB7N68Wgy53WhbctadOvQ8Id7f/X6PUmcynHx+Gpy5cjww+drNuyhS2833jzY/1PJ/3YLTZF8ww9iWXhreMZSgxAQAjGHgEi+/vtKJF//TEXyjcM0xki+EjajLHhtUr8SxYuEjcdXFtIqsfrKDLsi+Urc/e2Lm0LvrXiFNuqi3Akju1C1bncSJYzHollDQj9//PQlKZIlwtTUlDjJijNrcj+1nq/Xw8fPcUyVTI3VL1S6hVpPwgTx1I83eBxQQ4WKFsr1wz75IvmGH8Qi+YZnLDUIASEQcwiI5Ou/r0Ty9c9UJN84TGOM5CsNbdZ2KAePnmPymB7ky5MFRfDOnr9G174TqV7ZmTlT+quSv3jFFiaM7EqLxlU5cuJvKtXuwvG9i1TRVx4A6jTpy8ZVEyhbsiAnz/yjir8Sc6+IurLwdsuOw2xdO4W0jslZsHQzg0bO4dH17cSysyFn4Qbkyp5BLV/Zq79KnW6sWTImdOGtzOQbd+CK5BuXt9QmBIRA9CYgkq///hHJ1z9TkXzjMI1Rkh8YGMT4KctQwmYePX6BqakJaRyTq7PuXds3wMLCXJX8p89fkTxpIpav2aEulu3RqRE9OzcOvdfpc92ZPGMVL1+/I3XKJAzo1RIlJl+5fH391S0012/er/53zuwZmDKuBwXyZlM/v3n7ofpG4fS5q+obgd5dm9K5bb3QLTRF8o07cEXyjctbahMCQiB6E4iI5N++foVXb98TrAl7oKCFtQ2O6TNg4fWat/42ZMyYCrNocttvH93gSWA8/nJKYrQWieQbB7UchmUczmot0fEwLG1u/6vkf7+XvjZ5/0tpZOGtbr0tC2914ye5hYAQMAyBiEj+rs3uXLl5n8BgDaePHSRRtoKkjWeHTZy4FC9TEs/TWzn2JgX9+jTGxsQw7f2x1CDu377OW88AsuTIRSxr8zBJTq6ZwlbPzIxtXwFjNUkk3zh9L5JvHM4i+UbkHFVVieTrRl4kXzd+klsICAHDEIiI5L989oSPn7zRhGhoV78SRfrPpUnO1JhbWBLPLoTDy6ay/Xk6Zk3vjp2xjBpftqxZxOW7n2nZsRvJHazDgHp0+RS3/eJTJn8GkfxwhpBGo2HdunXEixePatWq/XSQmZmZqesQo9Mlkm/E3pCZfCPCjoKqRPJ1gy6Srxs/yS0EhIBhCERE8r9tQcX8TpSbtpPuhb7sLBf04REec8Yz56gvFUqk5umLDyTNVIDa1crjlDw+Ib6vWL9mHWeu3iXYxJrcxavSoGpRLMN4o4Z7Vy+wa+dO7j57h7V9Eqq41Cd/FkfMA16zbNEyrj18jcbEjIw5C+JSqSgPzu9i7KyVvHrvT/kmXWlUoxxpE385B0e5jq+ezn7fzAxrVQYfr7d4rFrGuZuPMbGMTd4izlQoUwIHO0u9wpWZfL3i/GlhIvnG4azWElMl34iIYnRVIvm6dZ9Ivm78JLcQEAKGIaBXyZ81hKmHvGjZuhH2mlfquTSla7ekTtUybJw5iH2PzalWqgDmfm9Ztnw9rUbPp3KOZKE39u7BJeYvXMEH88QUyZuZ19eOcfCuFcNHdOfOtmksvgQtaxTF78Nzdm7bQ6HGvamcMYR5CxZw54kPDdv3pkiuDMT9RtqXdavJTK+SnJnXjnNbZzPS/TrNGlXD+9ktTl1+iHOtFjQol1uvs/wi+YYZq9+XKpJvHM4i+UbkHFVVieTrRl4kXzd+klsICAHDENCr5M8ey8YHyZg6rS8O5l6MHtgfh/TFaFbzL1ybdqRaNzdcSufFTOPHtC4uXErbnhV9/x8icnz7GlbvO0uVxh0olzstfu/vs2X33xSrUBEL75e88fLH8+1LXj5/wo717tg5d2V295KsXTKTv2950bH3AFLFtwkDKlTy57pyfOUIui+/y5SpY8mZMg5PnjzHOn4y0qVIKJIv4TqG+QX7U0r9r8/ke370Il7Kklw5vZZsWdLptVujw+JgkXzdulQkXzd+klsICAHDENCr5M91+yYm35exg3pjmyIPzYrExaX1EN5r7IhlY4WJCXx8/5aEpbqxb1qb0BvbtmYRey/eoWWnPvyVygFCNPj6+mFhZc3dg4vpNM6D4hXLk9QWju/ZhkXhdizsW0EryT+3sDMf3zzCY/0ajpy8wGuvEIpVqkXDujVJ6WCrV7gyk69XnD8tTGbyjcNZbzP5u/adxG3aci5cvEFQUDDp0qagReNqdG0m8/uoAAAgAElEQVRfXy8LPnx8/VjpvhPXFrX0TkZp7/FTl9T9+O1sw84kRLSy79up7K/v7x9I1sxpI1qU3tKL5OuGUiRfN36SWwgIAcMQMIrkV05Dw4YDcB0+ncLZHTFVFuWamGBuaUvc2P//e7l/4wo2nLhOw7bdKJ4xMYR48/fZq6TKmJ3+jZ1J1HAK/avnxs/zFYvH9uN2ynpaS/6Z+R15dOcaQXaJSWRnxv1/TjB/uQeOxevQvUUl9BmVL5JvmLH6faki+cbhrBfJn7toI516jadbh4bUqVkGG2srTp65zKCRs6lUrgjL54/Q+W72HzpLv6EzOH90hc5lGbKA6NhOkXzdelwkXzd+klsICAHDEDCK5Depx4zujTjqk545EweSzj6ExZOHYlqoAy1KZQy9sbc3jzF+0mwsMlWka8saXN06gxHrHjNz1ijmd67MlaS1WDOmDZf3r2Lg2CXkqDeAxQOqs2H5bA5ceEK7bgPIkdoBE/Up4sv1NVzn5KzWLB/ZBvfHqVg4dSBBD84wbuoSHJ0b0LdNZcJuvKkba5F83fhpm1skX1tSekinS7jO23eepMpSmb7dmjG0v2uY1pw+d4Wxk5awYsFI4sS24/HTl3TqOUE9edbKypLypQuph0/Zx4nFjj3H6dLbjRGD2jFh6nJev3lP3r+ysGrRKPWk28p1uqpvCJQHCOUQqozpU9N/2ExWrt3F+w8fyZjekcljulPKOZ/ahnzOTanvUo59B89w+dod4jvY475kLFNnr+bgkXMEB2tYOHMwZUrm59twnTSOyYiVpBhb3CerbX/+4g3x49uzfN6I0FCeFe47GTNxMQ8fPydxwvj06NyILu3qowj+9+2ct3iTemDX1738Zy9Yz4x5a3n67DVO6VIwYmA7qlYsHtrmxvUrcvjYBa5cu4tyIJjbqK7UrVVWp14WydcJHyL5uvGT3EJACBiGQGQlv3aJXJSesIEO+Z3UhgV5PmH7omnseZWGieM7Ymfiy6SRg7FN9hcNG9TFTvOepTPHM9d9Fx+DbKhUpx2DejUnYZidbTTcv3ySeTNnsvf0VZJlKUaffj0pnD0tPs/+oWf3Hhy68pxchcpRNK0FTy1yMrp/M15cPs7kCeN4YJeXwb07UiB9wlBYq/s1ZP7n4hye2ZbP754wf9IYVm47gsYuKbWatKR1o9oki6vb2/fve0Yk3zBj9ftSRfKNw1mtRRfJV06abdN5FB+eHMLWJuwet9/eQkhICDkLNyBf7ixMGddTPWG2XvN+xLWPjceaSezef5KaDXvRrqULk8d2x9vHl+wF6tO1Q331DYEi5yvdd4XO5C9c5sHAEbM5vHMejqmSMXP+WsZNXsqLO3uwtLSgYKnmfPz0mYPb55IooQMlK7fl+s37rF8+npLF8zJ0zDy27jzCxeOrw0i+U9qU2CQqTNmSBdiwcoL6cFKnaV9VuJV23r77mEx5arNplRsVyxZWT8MtW70DJ/YtVu/t+3Z+G5O/weMArl1Gs23dFDWtsntB/Rb9OXNoGXlyZVbb/PLVO/Z4zFQfYmbNX8fgUXN59+gAyi9EZC+R/MiS+5JPJF83fpJbCAgBwxCIrOQbpjV/Rqki+cbpR5F843DWWfKHjZ2P+4Y93Lyw8ZctPnP+KoVKt+DNg/3qrLpy7T14moq1uuD59DAnTl9S//vtwwOhnzd1HYKdnQ1zpvT/QZ79/AL47O1Dgvhx1bLevf9IAsfS3Di/gUwZHFVhLlb4L3UmXLmUUJ+de09w+ZS7+v+VWXflocLrxdFwJV+Zea9dvZSadvGKrep6A6Vs5Q2A8pYhaZIEofebvWA9OrapS7tWtX8p+cr9ZXBKxbQJvULzFi7TkiIFc6rtVNqsyP+MiX3Uz+8/fEa6HNV5eXcviRM5RHpEiORHGp1Ivm7oJLcQEAIGJCCSr3+4Ivn6ZxpeiSL5xuGss+SPmrCIZau3c+fS5l+2eM2GPWo4jiL5X697D57ilLOGKt7PXrymdqM+eL86Hvp5604j1RCdpXOH/SDP7z98YuCIWWpoi4+Pn5pHCQdSZuZz5cigCnO92uXo3rGh+pnyMHLqzGV1lly5lIW2JSq5EvThbLiSf/bwclW4lWvl2p0MGjGHh9e2obyRUMKJlPv58OGTOsP+4tVbxo/oHO4bh29n8jPmrqWG9XR0rRt6j03aDEFZrLtx5QS1zcqDRe+uTdXPlZCelJkr8eDqVvVtRWQvkfzIkvuST2bydeMnuYWAEDAMAZF8/XMVydc/U5F84zD9aS26hOu4b9hLE9fB6gy8Elv//aWEuVhYmKtS3LXPRF7f3xeaRNl5Jn2umurWlUrcukvjvnx+eUwryVdm+e/ce6KGzSiz6p+8vLFP7hxG8pWYfCXU56vknz57hd2bZ2gl+Urcv7Im4HvJV2b1lbcCOzZMC30IyF2sEU0bVNZK8ru2b0CHNnVC77Fx68H4+furMfuK5LvUKE2vLk1E8sMZrcoDlfKQZexLJN/YxKU+ISAEtCEgkq8NpYilMZjkhwTx9MEdbt15gJdfIDaxHciVJw+J7G1/2OdfExzI+zcveHD/MTbxk5MuXRpswlldrNFoWLduHfFkn/yIdfJ/LbUukq/EvSfPUFENVZk4ulsYdP9cuU25Gp24cHSlOttdoGSzMOE6SvhMtXrd1XAdZWY9IpKvhLH079mC1s1qqHUqi2lLV21vcMlv1XGEuiXmyoUj1XqVhwvl/kcObvdbya/s0hUl5v/bcJ38JZqqi4XHDe8skv+bXzyR/P/aN5PcrxAQAr8iIJKv//FhKMn/8PQaixet5NazD9jFsuHdi2dkqdiKXo3KYm5mGnoj/t4fuXzuCEdPnOPM6QukLVKDzp1cSf7jHCoi+frv/z+yRF0kXwGydNU2WnUcSduWtWhSv5K617wi7YNHzaFR3YpMd+utclNmvJXZcWVhrafnZ3VBa+pUSXFfMkZdePsryVd2qRk5YSGXTqwmlp0tFWp1IlWKJGooz83bD+k9aBr7Dp1RF8cq23Yqs+KGmMlXwn7WbdrH6YNL1cW4yqJjZUFvtUrOTBjZhe/bqcz6f91dZ8uOI7RoP5ydG6eRO2cm3DfuRXloUEKMlEO4ZCb/179eIvl/5NeP3JQQEAKRJBCdJf/Nwxvc/2RBjoyO2Fjpc5PLSMLSMpuhJP/YpvmsOHCTui3a4pwjNX97TKfDglts95hD0m92KfL2fM3x/du5+8KTWxcvYJ6yMD17dxTJ17L/JFk4BHSVfKXIA4fPqrHq5y9eV2e6lcWvSlhKyyb/a+9MwGwu2zB+M8meJVmzVkRlrwgJWaIQX4U2UpYSlVL6soUkZUkpLSQlS6KvUGSpyJqIbJU1spSlSZYs3/V7+euYZsY5M+ecOWfmea9rrsF5/+9yv38z9/O89/M8/5S9Rp7T8dEBWrBopQuuJXUkxBjSfi6Sj96+VsP2LsCW7DSZMmVU6w69tXXbTlUsf7lGjegp4gOmfDLXSWm6Pj0kJCSf+e9o3V2Ll65W4Yvz6aXnHtUv23fp0acGu/SfyG181zn54zlnpdAkgHfYa+O1f3+sSpcqruf7dFKdG65xp2Ik30i+/YAyBAwBQ8BfBCKJ5O//daOW/LBZhS65QlcUz6dvxg/Vu+uyqdfDLVTgwnjc0P5uMsz9QkPyj+uDV/pp+W8ZdX+7jipVMIf+3jxX1zTsqdfmzFKV/P9kJjxx/JiOHjmi804c0OiXh2rD0aJ65HEj+WF+DVLXdOtn3atLK7dTzHkJp8BMXTtOO7s5fuywflr2hkrVHRP1mzZPftQfoW3AEDAEgohAJJH8zctmaegHs3XNTS3V6sZymvXa0xq8IodG9u2gInlPZdSLhhYakn9U7wzurfV/5tYDHTqqRN6s0u6lqnJ9e/WbsUA3Fo8n1/+hnXpzyGAj+dHw0kT6Gjd/0115i1yrLDkKR/pSbX0BIvDXgW3avXWxil03IMAnI6+7kfzIOxNbkSFgCKQcAoGS/F9WL9C7E6Zo/dY9ypIrnxq2aKdGVS7Vnq2b9NH4d3UoZ35tX7Vc+46cpzqNb1PDOtWUK3MG7Vi3SO+N/0hrNu1U9rzFdHeHh3XNJf8UrdqwerneGvqcPl/2k/IWKasnuj2mIysmavjiw6p1aSZxi6+sedSyYzfdeFUBHTtyUD8snqvxU2doV+xxlbmmtpo3bqDi+S5IOTBPzxwqkj/6pd7acNCX5C9zJL/vjPmqayQ/xc89VS9gz4YJOnF4u/IVr5mq95kWN7dr05dKn6mQLip5R9Rv30h+1B+hbcAQMASCiEAgJP/Y3h/0SJfeyl2hvupee6X2blmiEWO/UM8R76jgX1v19MMPSVfW1z2Nq2nL0tma+9MxtevUUTcUPaquXXvpwvINdMPVl2nzgg/0/sITGjP2FeXLeipgNPbAPi2cNk6vT5mvcjWbqH2LBlox8QU9PWaxWt3fVlVKFdSiiYP18Z7K+mzc09q9boEGPv+6ytS5TWXySTM//0IXl62le1o2U87MMUFEKPChQkPyT+j94c9qxd7MeqBdR5UscIGOb/1ales/qVfmzlE1H7nOmRWbJz/ww7Mn4kfgyJ87tHnhU7q0cnuT7KSil+SUVGekilV9XhmzJT3PfqRAYiQ/Uk7C1mEIGAKRgEAgJH/hmGf11Mf7XbHFK4vn19+HDqh/26baVLGbnm1YQM/16a8ydz6jTo2u1O5lH+vpYTNU7552KrF3lp6avFMvv9hdVxXPr8MHtqtJvbrq9s481YWhn24/L5qm58d8pusat1abmyo5uc7z36TX8L6ddXmRPDr4zXCVve8TzVkwUT+Mf15vLs2gkSN66qJM6TTprWFauuu47nugg64oeKrAZUq10JB8afaEVzVu/jbd3/5BVb2yiNbOeE13DJyvmdNHKX+WjP/erpH8lHoFUue8u9aO1onDu1Tg0rqpc4NpcFe//jRL6TPlU77SbVLF7o3kp4pjtE0YAoZAkBAIhOSP79lJ42LLa2TPViqQK4tOnjyhqT2b6YXtdTS2aw29NGCQKrTtq3a1Suj3FdPUbdBU1WzZVjHLx6n/u59KGbOcTvV4UoePHFXPd2formtLJEryz9Lkfz9aJW59W5/O+1BzB96nPpNXK1+enEqXTjpy6C9dVruFenXroqt9ZEBBgimgYUJF8mN3/KB+fQfox/3nqXSJvFo0Z7aubdNPve+rr32rPlH7fh+p/8jRuiLP6XSaRvIDOjfr7AcCaPOz5SikPEWq+tHbukQyAr9tXag/D2xPFVp8D2cj+ZH8xtnaDAFDINwIBELy54zopme+yqT3X+6q4nlz6OSJExrQ+kYtLNZFw+4oqkE+JH/viml64jTJz7dxsp5fmk1vDuykohf9o5mPOe88pYehn27xefLjI/kzvp6i7975r8ZtK6X3h3VWptNDpEufXjHp07sq8inZQkXydfKk/vx9u5YsXqqNuw6oSJlKqlHhcmXOmEFHftuoqbNXqf6tTZTz/NO7P3ZQq779VnuPX6BKV5dXtgz/RsXy5KfkmxKFc/99eJ+2L39BGTPnUN5iNUy6E4VniERn9+avdeTQARWq2E0ZMuWKwl3Ev2Qj+anmKG0jhoAhEAQEAiH5x/asVMu7uqjMrV3U6fba2rJ0kjp0e02DJs1Usb+36YUESH7zijG6p82TurJpRz14ez39vWuVBr82RY8+O0CFc3mMVNr+3Uz1f+UDXXZ9c7W7va7mj+6joSt9suuc9uTP+W6ODq2doW49RqjJgz3U5LriWjhrug5myq+6Deorj0/O+CBAFPAQISP5Aa/k3A8YyT83RtYjHgSQ7uz/Za5y5iuv7LlLKGPWi4zwR/CbArE/cnCPYvdu1P5dK5Tz4lqpRqLjC7uR/Ah+CW1phoAhEHYEAiH5LG776vl6aegr+mblj8pWqLQe7NpDTWuU0vYNa/T6iJG68o7H1bJqYe1fM0fPjZypKo3vUJM6FbTj+3ka8vLrWvj9RmXKc7FatX9CrW+pqgz/FGrViUO/6aP33tTIcbN1Z+f/qsD+pRq3Ibue63qXCuXJLq2bpBrtJuiDGR8qf8yf+nbe//TKm2O1YecfurxiLT3Qto2qlL1E5/mMGXZAKe75zjvKly+fmjZtmuj0q1ev1ty5c9WsWTPlyZMnJZZqFW9TBPVUMinBuH/s+FoHf1uhIwd36MSxw6liZzEH/3b7OJ41nruvKN1h+vMyKWPWgsqap7wuKFgjVQTZxncURvKj9AW1ZRsChkBIEAiU5IdkEalsUCP54TlQfp+nO3ny5MnwTGezpBkEevc+tVXve5rZePRv1Eh+9J+h7cAQMASCh4CR/OBh6Y1kJD/4mCbktDOSHx6s084s+/dLxYuf2u+mTVLOlE3VlXaAD85O27dvr5EjRwZnsABG+fvvU7c/1gwBQ8AQiCQEjOQH/zSM5Acf0/hG5Pe5kfzwYJ12ZsF736fPqf326mXe/Cg7+ZiYGB0/fjzsqzaSH3bIbUJDwBDwAwEj+X6AFGAXI/kBApbE7vw+N5KfRPDssXgQ8Lz4fKfhxTdvflS9KpkyZVJsbKwyZAhvPIWR/Kh6TWyxhkCaQcBIfvCP2kh+8DGNOyK/U7Nnz24kP/RQp6EZfL343rbNmx9VL0Du3Lm1adMm5ciRI6zrNpIfVrhtMkPAEPATgVCR/F1bN+rX2OMqWqyocsVJaXnyxD59PedblatWSzkyx/i50ujpZiQ/9Gd14MABFS9e3Eh+6KFOIzN4XnzI4ZYtpzZdtKh04IB586PoFShatKjmz5+vwoULh3XVRvLDCrdNZggYAn4iECjJnzZ5nPKWr6mKJQoqJpGiU19OGafZP/+l5rffpnJFznaqnDz2vVo06qY+Yz7W5fkz+rnS6OlmJD/0Z7Vt2zZVr17dSH7ooU4jM7zzzqmNtm4tV0ObRuIm339PI1BE8zYrVKjgchiXK1curNswkh9WuG0yQ8AQ8BOBQEl+53tv1ZV3Pa02tSspQ0zCCemnjR6uqWv+Utv2D6jKpbnPWs2JvxerSqk7NfKrVapwcWY/Vxo93Yzkh/6sVq5cqdatWxvJDz3UaXAGX5KfBrcfzVuuV6+ennjiCdWtWzes2zCSH1a4bTJDwBDwEwG/SX7sBg0eOkpvvjNOOYpdoar/eUJDOtbS1uUzNXzUBG3a/YeyXVRcTf5zp265obw+f2e4PvhypbJmz6z9O7YpJmdhte3cVTWvKqr0x5ecIfnlC8Ro09olGjVqrNZs/V2FS12jO1vdoUpliirmtD/Nz61ETDcj+aE/ilmzZmnQoEFG8kMPdRqcwUh+1B46ln+tWrXEL7ZwNiP54UTb5jIEDAF/EfCb5B8/rF937laXti1V+rZHdfdNN6pE/kx6sFVTXdawk+pXLKxv58/S/NW/6NEefbVp+jt6ccwM1b3jbjW45jIt++RNjV2aXu+PfUkXZ191huTnPbpFw154VhlLN1CTaiU153+TtC9LMd1/X2tdkje7v9uIqH5G8kN/HGPGjHHVgi27TuixTnszGMmP2jPv0aOHy6zTs2fPsO7BSH5Y4bbJDAFDwE8E/Cb5p8dr2/xGlW/bT+3rX6PzY9Lp9z27tW/XDv289RetXPGtFq3ZpI49XtDRbybqwxX7dF+H9qpW6iIdjV2rm+veqWfe+kTXl95xiuTPXar0m6aq35Dp6jVitMoUyKIV08fq1c/W6fZ771f9qy/xcxeR1c1IfujP49lnnxW/V43khx7rtDeDkfyoPfNRo0bp66+/1ujRo8O6ByP5YYXbJjMEDAE/EUgOyc+QThrataUmrD6mW+pWV+yuLVr76+96sOcgHVs4KY4m/281rVlZ9wycpKaV9p0i+bMW6PiSV3RTp9eUMWsWofA/efKEYorU0LABz6jJ9Vf4uYvI6mYkP/Tn0aZNG9WoUcNIfuihToMzGMmP2kP/6quv1L17dy1YsCCsezCSH1a4bTJDwBDwE4GkkPxy9/VVh/rXKubIYl1d/m6Nnve9yhbIpMVfzdJr701Qq8efcyT/o1V/6L727XRdyTw69uca1a/VQr1GTVeNM578xUq3YaL6vvKNBr79pi65MJNOnjim4yclCh2lTyR7j5/bS5FuRvJDD3u1atU0YMAAI/mhhzoNzmAkP2oPfc+ePSpZsqT27dsX1j0YyQ8r3DaZIWAI+IlAoCS/a+um+rt8C3W+vYGK59mrulVu1k2PvKg6pbNp2oTRmrvxiB7t86LSL5usQW9NVu2WbdWsVgV9OW6Ixq44Xx++95IKZvtHk5/n8I8a2LuHsle4Vfc2rqGdaxfq59hMuqFOPV2S/wI/dxFZ3Yzkh/48cuXKpQ0bNhjJDz3UaXAGI/lRfejkyp83b54rpBGuZiQ/XEjbPIaAIRAIAoGS/NWz3lP/V8fp/KodNKbbLVr86Vsa+vZkHUifR+XKV1DWDOl09U0tlGXLV/pq7Y/6848DWrNilc7PV0qPdntC15a6WOlOrNY9TZ5Wj1EfqmSedNq8ZolGvT1KS9dtV57CV6jVPfeoVpWyypwh4RSdgewx3H2N5IcWcQpa3nDDDdqyZYuR/NBCnUZHN5If1QfftGlT3X333WrevHnY9mEkP2xQ20SGgCEQAAKBkvwAhk6zXY3kh/boJ0+erLFjx2rq1KlG8kMLdRod3Uh+VB98//79RUnsF154IWz7MJIfNqhtIkPAEAgAASP5AYDlZ1cj+X4ClcRu3bp1U44cOfTf//7XSH4SMYzIx7755htlzJhRlSpVStn1GclPWfyTOfvs2bPVq1cvzZ8/P5kj+f+4kXz/sbKehoAhED4EjOQHH2sj+cHH1HfE6tWrq0+fPqpTp46R/NBCHd7RR44cqfz586tRo0Y677zzwju572xG8lMO+yDMfOTIEWXNmlWxsbHKnDk8JdWN5Afh4GwIQ8AQCDoCRvKDDqmM5AcfU2/EQ4cOKXv27Dp48KBz+lqe/NBhHdaRT548qUceeURly5Z1BK1FixZhnf+syYzkpxz2QZq5bt266tKli26++eYgjZj4MEbywwKzTWIIGAIBImAkP0DA/OhuJN8PkJLY5dNPP9WwYcM0a9YsN4KR/CQCGWmPHT9+3JH8Cy+80GVF4QdTirU+fU5N3atXii3BJk4eAgMHDtQvv/yi4cOHJ28gP582ku8nUNbNEDAEwoqAkfzgw20kP/iYeiM+/PDDuvjii/Xkk08ayQ8dzOEdGS/+7t273aHu379fl156qV588cXwLsJmS1UIrFy5Us2aNdPPP/8cln0ZyQ8LzDaJIWAIBIiAkfwAAfOju5F8P0BKYpdLLrlEH330kcqVK2ckP4kYRtxjkHzI2NNPP63Dhw+rcOHCevXVVyNunbag6ELgyiuv1Ntvv61rr7025As3kh9yiG0CQ8AQSAICRvKTANo5HjGSH3xMGXHx4sVq27atVq9efWYCk+uEBuuwjAq599qaNWv0zDPP6NixY+6q5rXXXgvLGmyS1ItAjx49RBBuOFJpGslPve+R7cwQiGYEjOQH//SM5AcfU0YkdSbBtn379jWSHxqIQzPqiRMnHHk///zzz5rgjz/+cEG26dOnFyQfT/7vv/+uEiVKaMyYMUrnBcCGZllRMyrGkGER+HGtWLFCFMbavHlz4A8H+ISR/AABs+6GgCEQFgSM5AcfZiP5wceUEYsVK6aPP/74jFSHfzNPfmiwDuqoe/fuFUSVoFrftnDhQlWoUMFZbqtWrVL37t01b948kSN1+vTpjvyndXILbr/99pvy5MmT5rFIyktZrVo1V1CjYcOGSXnc72eM5PsNlXU0BAyBMCJgJD/4YBvJDz6m06ZN03PPPacFCxacNbiR/OBjHfQRP/vsMxUpUkRlypQ5a2xSJeXMmVPXXXedK3ywZMkSlzapVKlSTmJBIQSXJ/W0Rz+tebTZ78aNG5UlSxZXPyCtGzxJeTFff/11zZ07VxMmTEjK434/YyTfb6isoyFgCIQRASP5wQfbSH7wMb399ttVu3ZtdejQwUh+8OEN7Yh45SluUKNGjbMmGjdunJPnPPTQQ+rdu7dGjRqlHTt2uEJYPXv2dPKe9u3bu2chuNu2bVOhQoXSjIcfmdN3332nAgUKuC8j+YG/pxTWyJs3rwvkKVq0aOAD+PmEkXw/gbJuhoAhEFYEjOQHH24j+cHFdMuWLSJRxq5du5xT07eZJz+4WIdktMGDB+uqq64SBYq8BoEllzne6qeeekq9evXSG2+84VJpxsTEuCJGfMcw6NixozJkyCBP3nPgwAFHen2bF8SbEBEmJoA+jJMSjf1CBDFg2Jc/jWfQlbPXuJ58PqOxXyP/iaP52GOPKVOmTO4qMFTNSH6okLVxDQFDIDkIGMlPDnrxP2skP7iYEo+JQ27IkCH/GthIfnCxTtZoCclpkOJUrVpV9erVOzM+xa/Q4GfLls1l1SETyltvvXUWyYeY8zVixAgXkIFW64orrtD69evdeL5t3bp1Lr8+JDq+9tdffzmST6BvuBvz/vnnny6g5Prrr3cpQiHm55IfgREppfBAFyxY8CzZEoWe8uXLpz179rjbDWsJI8D7cs011zgvAWQ/FM1IfihQtTENAUMguQgYyU8ugv9+3kh+8DAlbTpOTLgOUu24zUh+8LBO1kgQ1p07d7rDIlCUIFsCZ2lIcSDl9evXP4vkd+nSRRdddJEj+I8//rjee+89R1p5Dg/+r7/+6rT8eGArVaqk2bNnq3z58s6jf8stt5zlwf7222+dARAfiWNtGzZscF58jAVvXd5iSLNIn6QSwHORdQyVDz74QKNHj3apQS+77DJnvEA6SRea2O0Ded558TEOvHXjxX/++eeddg2yX7Zs2WSdXVp4uHXr1rr88svdrVEompH8UKBqYxoChkByETCSn1wEjeQHH8F/RoTLrF271mVUjK8ZyQ8l+gGMDfFctmyZI+NESSupGhkAABhzSURBVFeuXNlp6fnCUw+5vv/++88i+Z06dXKedYJsW7VqpcmTJzvyC5kl6w4afGQ+3k0AQbmQYl6Im2666YxXnrkJ4sVb36JFi3+tGhI+duxYbd261VXVjSvZIZUnYxAEHGhj7O3btztvekJkfd++fe42ggBkCjPVrFnTGT38G5lfEpLvgAUkP1euXGrevPmZfnj4IavklMUQMpJ/7lMjtgEjk7MKhWTLSP65z8B6GAKGQPgRgOTjROPW3FpwEKAiK5VZSdGcWCMWjN/7tWrVUu7cuYMzeYCjwG1YAwqCxo0bJ/g0PCSuAzTAqQLuzu9NuNPnn3/uOJ+R/IAhDN8DEM8vvvhCN9xwg6tWi0efL15uSD6SE0i+9xLR/8EHH9RXX32lH374QS1btnQkn3+HLBN8QWYdCCzPEXlNlhT0/fzQatSokS644AK3QaQwGAIEbvBZ3AYRJ5CXsR944AEVL178rC6Q8JdfftndKAT6kkPEJ06cqFtvvdWRR573HYO5f/rpJw0YMEBIirht+OSTTzRnzhy3Jjz8PEc/vnyf5T8AEiausx5++OEzUiT2gbHyxBNPuOw7V199dYIypfC9AZE/U5s2bVwNBs452M1IfrARtfEMAUMgGAjw847feaFwbgRjfdE4BrVX4Dpxk4nE3Qv9kOnizITPpESDV6AaIIshKaUTailB8il6BYdB5ZBQM09+Srw18cwJycGzTADtiy++6KxcpDgQ0GHDhjkJD+Tf+0EDOSagdubMme6QIflYxxBYr0H2GQcvP31XrlwpiBrXO1iknucdkg7hJUUnQZbxkXw85szF+pD6eIG6nsyIUsrIhbC2/Q1k5VmyA7EW1s/fkdD4Fv3CiobQ45HnJgHDB5JP7AB/Ri8OJkSXkwUmc+bMZ5Z/9OhRde3a1f1wwEjwsAMjsOazd99918l5MDKsJY4ARhZyL7AmniGYzUh+MNG0sQwBQyBYCOBNJqjRWnARgJucyzsP7vAbVAYp2SDwqCkSW2+4ST6GB85fkosgpTWSn5JviB9zQ0ibNGniCD3654oVK7qDK1mypPPAU9EWj7dnzdIfso30hoAL9PlxST7T8h8JaQtBush3mjVr5uQ9EN3OnTu7laHjr1Kliu677z4nY4krf4F8k39106ZNGjp0qCPleP/R4kPSY2NjnSHBZw0aNPA7+w3zoiObNGmSI4/o4yHzkHevYcxg3BBHALlkXrCA/BMDQKYgyPv48ePdrYfvs6wP4wEvDIaNr4F022236aWXXnL/Dn4YQNbOjQDGEedNbEQwm5H8YKJpYxkChoAhYAiEE4Fwk3wct8i54XOJNfPkh/MtSGQuJCV47fv16+d08chOIPn9+/fXl19+6Srafvjhh2eCW+lPmkwCYiGpEHQ87V5qSG8qiC0EHTnOzz//7Dz5BKFSARapC420mxgTePExMLzsNd4YjEmsABY1RgC6eBqeXbzsBPeSsx9ZkJefP+5W40vRuXz5cicxQqaEDAdZDsYK+nmvQdS5JWDvZMjBUIH40/Da79+/32FFCql27dq5DEFeO3jwoMOSGxGKOnk3BDyPBcy8yHYg+GCIdzruLQTkk1sUPgtUihQhr1ZQlwGmvCu8O7xHwWpG8oOFpI1jCBgChoAhEG4EwknykWnjwIT/nSvjoZH8cL8J8cwHiYZoE0ABUcWbDemBoA8fPtyRa4JxIaLegUK2ILQQTwgXOfR5xiPT3jTIWvDkI1fhhaA4FvMhT8GDTuPaxyP5d9xxhwvO5TmvWi79CeDlea6rIN54dJEVlS5d2lXaZV70dawzbjEG5kBqA9n2ldNgvHArAHnHcGB8bhrQz3s3FlzX0YfMQDzL3J4kif9UBP2yVuITMHwIEPaIOh7nu+66y9028Lw3JjiRvWjRokVuXm4ECEouV67cWSTfyypEui90mfHtKwJen7AvAf0fsQ5xy2cnZyFG8pODnj1rCBgChoAhkJIIhJPkExuAUxWn7bmakfxzIRSGz/EsIx3BS05gK5VrIbK8NHjXIdmeJp3rGRrkFpJPHyQneLt99fjesjECKAaFXh6vO2SffpB8vOgQWQwM0lI+8sgjLkDXI/8E/GJoMD+SF7zoXnArBgmZaSDUEDS+e/owAnrjesQxBGjcTuBR5+t///ufI+GQZ2Q+kPzq1au7LC433nijGwNpDp8xNyQfmZIvySeegDVyk4GsCcOBvdCQ93DbAWZ85hktrJeMO6yJ2xMwmjJlirvxiBv0S0AxYyJ3oq+1UwiQFQGJV7BSahrJtzfLEDAEDAFDIFoRCBfJh+/hoJw6dapfUBnJ9wum0HaC4JDLnnz4eJOR5kCsIblIZyDBpC5Es+5lxCHi3CP2aOuR+cSV6nirhgSjoUbXjlcakkyk+Pz5850Xn4Z2n8BV5EFYhxQ/wivPfBBrvuNFT6xBkPGIEzUfV9rCS0kAExIZbgHI5OOlwCSohj0S+AuRRkqEvIj9Y/AQjAtGcUk+cyBR4jnSiTI3Lz7zg8Urr7zicOVm4v3333c3JcyDocKNBHh40iPiHUgr6q2bPkiBkA9hDFFdGA2cv9V2Q/vGpPzoP/74o8vcxDuElCu5zUh+chG05w0BQ8AQMARSCoFwkHzqGeEI/f777884M8+1XyP550IoDJ9Doh966CEngYjbILp40fG2Q/Q9ko8HnNz2yHaQ0hBhHVeq440FcUUSkyNHDucZpx8SHggaxJuxCeKFXKPzIs0mOYFnzJjhvLXMwWfnIvnM9+yzzzrvbtx0Y9wioI9HQkNWG7L1oJPntsCLnEf6w/4g4OyNdfM5tw+Qdv4T8d1X31+nTh1H3tHVsxeIOOunD3p7PPHejQikHfkPMiNuHZDwEKzLmEhQuMWgr1dNd8eOHW7NyKTuvPNOF1gMhtZOITBy5EhnNGIsJbcZyU8ugva8IWAIGAKGQEohEA6SD28jbhLZtb/NSL6/SIWwH1pyrDOstPga3meIKPIWtPc0iD8BoRBwCDnkObEGcYU879271xFgZDNIWMixP2/ePOepRkID+SVjDy8snnZuC5DlkB8dY+RcjVsASDRyGE+y41WsRVLj3SIgHyJrzpAhQ87IbzxpEDcKyHMg+aT29Eh7fHN7efW9Wwz+E0DemdvXcPKMDqQ7EFPkJnwOUWVN//nPf9wNAxmEWDuFJbg5IQiYgGUkQIyLQWHtHwTuueceZwByRtYMAUPAEDAEDAFDIPgIIN2G85H2O5BmJD8RtDzi6BVagjj6as35d39zwvtOEzfTDCQffT1kOqFGcCkBqXi1+TMkHxKakPc+vnE8DzWfYTjgTSeNJZlxeHm8/XhSIYgvhBiyjybdy2qT2AvmkWhuF5AA0TAi8NqTIYc5MCaIPfBSg8Ydj88h1EiGeLExBvxp7A8yj+cdPCHxcf9DYLxA1DEiKPxFoC77hagSk0DxDW4FkAghmyLwmX2zJm4N0O573v5Azj4570okS4RIaco58X7cfffd/hyT9TEEDAFDwBAwBAwBPxGAx6DFJ44w0MrLRvITARm9OvIOZBsUQoDskanFk79AzvECQ+AgzRBmvN0EuvJ3+kLQIJ6MgxedVIxIPhjTyzSDVIF0hDt37kxwNcyJPh9ijpeZnPqMkdTGupDw4KEnS4q3D9/xMCQg2G+88YYrQBVfYG/c+QluhezRlxgDJD6QfFJr+u6PuSHX8e0ZvJDpcLuBRIa5/W0Qb8g4WYrQ81OOOm7zjB0w9a2Uy7l4GYCQ6SBXQtLEOslqRA7+Rx991JFabjzAn7EwGjhDzp7x6IvRxJlz3vyn5O9gzhfvERIpxuNd8Yw7nmXvvHcUKuNZAqzpE8mN96dmzZpOtmPByZF8UrY2Q8AQMAQMgWhCYOnSpU5eTQKQxCruJrQnI/kRcNqQQeQgaO7ja5BRPMnklac6K3/HSw7JhFjyGQZHQo3+cYNyIbN46DE0INFk7/Ey+kBCeQYvOoGrSHogtv7oppG6kNWGcblxoDEeAb2QfhpEF8OBwF4KePk2PuM2AIOJP6PvP1exB29/XoVf4hN4Fk8+RN+30Zf0mYxP6kyMEJ4jEBmvNPuFsLJ2bkoI6qUPgcLECWBoWb78f79p4Mw5YRRxK2LNEDAEDAFDwBAwBJKOAM5BnJ1IiZE7J6UZyU8KakF+BhKJtpkML/E10l2SlQYpiVdWGWkJ1WIh93iEIdbxSXcgpGSXgXSj38eQoB9yGjzsXrlu5C0YCxgPyFX4MwSc9JWQf7zRrBMvNWQfr7cn8fFdc9++fd0LiUfak7MwH0YEEhiMDbLYIINB4hF3z9ws4C0n7z1rR99/LqlQo0aNXJYd1kkgLcYDJJ2MOl5xL7zlrImMMIMGDTrj7UdGxDoxQJiX5zw5DmutV6+evv76a5e/H2lP3IDiIL8KUT1cr1693K0Q76o1Q8AQMAQMAUPAEEg6AqQSx3tPtsGkNiP5SUUuiM9BmiGzVG1F7uLJPrwpIPRosiD5Xp589OIQUAgp3yH88aXQhCgjLWFsyDupNCHdEGPSTSIp4jluEiDYjz/+uEuhSWErSC1SFD7HQ490BCOAOblRQM7im+0GidKbb76p22677YwX39sDFdoaNmzojAV07eXLl3dZdiDZrMeTsuBRh3DjTadhvJB1xzNGfGGHnOOVHz9+vMsOBI5kHeKWADKOVAdZE154AnjBlfVXrFjRfSeTEHIkDBrWhIEQ10uPhxrp0PTp092arSWOAFH/Bw4ccGdizRAwBAwBQ8AQMAQCRwDJMM5UkoMkpxnJTw56QXrWK4aFx5qsL2TRwSvtBZIiO0EyAxH1qq6SdhLSjJykSJEijogmVAwL8g5ZRcYCsWZcyDVknmdILQmBpaormmrSRUKE8b5jdEDkIcrr1693xgCpJnn5CAImPoD1E2+AMYHnHGIdN1gUvTY6dIwFpD+QcDztXq5/bihIeclcEHKPbCOr4ZYBA8M3cBjo6YPxQ8YhYhq4WSCnPddbzI8hxF6ILYB0QuYZgzmo8MttwbBhw5zBQ3ah+Ip4MS+efCxqr2JukI491Q5D1WSM0fhSwqbaTdvGDAFDwBAwBAyBICBANVsyJk6YMCHZoxnJTzaEyR8A0omXHY88nnaIKV5nJDZ4tiHg6MPx1ntEE+kMMh5IORVi8XzHl/0GTzcaaQg7Lw2SGQg1nnUvShstOsGqjA9xJmUk41MtFvkK66NoFIYGKSXR169bt871hVATGEJ+fNZBYSQId1ySv2zZMmcoeFVnQQ1PO+NhpJDFh0JgyIm8rDz0YR0YMFQE5nNw8mRJzIHXmO8YLEhFuJ0AL9bGLQWeZcaHxHtSG3DCOMEgQC4EJonpyONmQ0r+iaf+ETD0CBjGmLRmCBgChoAhYAgYAudGAD6CvJiCp8FoRvKDgWIQxoDMklYSQgtJxoONZIZqtqRNQqoTH8nHK04BKAJCkavgqfaysuDtJyobrzsSFvLfo12H7KOJx7PtEWl01F4xKjK7IJPBA+5loGEOAn2R4rRq1cp59ZHsMB+55fHqU2GW/r56fA8aClXR31fTjtHADQCNolvxpab0MvBgAGEgcBvhyZLozy0ExhBGAl599uHNAR4YTeyF9FPev3N7Qf/GjRu7GwJvn0E4RhvCBwGIPgHe5tG318IQMAQMAUPAEEgcATz4KBJwVgarGckPFpLJHAfvMt52tOUQWjzlBHsSUY1cBCMAIu0RVc+Tj4edtJpITQjChXRXrlzZrQbvOTcBEGiCYTds2OC+8PqTEtSTxECaIfCQX7zevrIY7894yflC4sINAN5t7/mtW7c6sk0wb0K5431TVXpQMS9ae8i7R/Djg5G9durUyd1mkEbK0+djpODJ53n+DYIPcfeaV4Trp59+csaThx3zYiwgPWIvgeS7T+Yxp7nHke6Ar2n009zR24YNAUPAEDAE/EQADT48KRgSHd8pjeT7eQCh7gbxhICjWYfkkz+eIFZIN0WkkKogbfGINV5qCDeZcyCwkCk0+3jh8ZjTkOEwHjcE3gsE4fKqxPruCZkMNwBe2kvfz3jx+Jw50d3HDU6F4NOHuULRMD6QL+H5RxKE9599IP+hSjDkPT4jgucojLVmzRoXK+Dtjb4YDHiZSaNpLbQIIJlCAgbRt/SaocXaRjcEDAFDwBCIHgTw3MPPkCknN8g2vl0byY+QdwHiiYac7xB2Mr1AzglWnTRpktOZQ/K95hWbwoNPMC0pOEl5SZEpAlVpeLkhsRSGIpNMYo25IekJ5YCHZNM8iY/vWHjaaaEMTAUXpDV46rlRYJ3gQQag+AwT1sMzGEvff/+9C5z1jRNgzRgHlvM+PP8BSK+Jh4LzsoJZ4cHcZjEEDAFDwBCIXASIZ6R4KJwvOWkyE9uhkfwIOn9IKd5ygkyRvtSuXVtITeIj+fTFc4/MBo8+GXgg9UhxMAjQ70NuMQIIlCXQ9FyNMSNdukLgLfp69s135EqJEXVuSMiyA55G6M/1BoT2czI8YXCOHj3a/WCzZggYAoaAIWAIpEUEiCFEjk28ZFILXfmDm5F8f1AKYx905IMHD3bklcBbZDLIdjy5ju9SIK9UGEUOgWb93nvvdSSd9JAQKlJxojtnPC+/fhi3EpKpwIfYA69oFd8TM0wwXMAJaY+R/JAcSUCDcgvDDRVF1ngvrRkChoAhYAgYAmkJgccee8xlF4TbUewqlM1IfijRTcLYkFLSDqIhJ2sMnmgsPjzyBOLGJfno1MlbTwEpL+c8qQuXL1/u5BHo9UmjGTelZRKWFpWPGMmPvGMjVoTbpx9//NFlZCLtqjVDwBAwBAwBQyA1I0AMIQ5ZnK8jRow4k8Y8lHs2kh9KdJM4NllxypYte0b//umnnzqy75s5hqFJiUngLRliIPTIeyD0NDT7kCnSZ6ZVgg8ORvKT+BKG4TGCjKjmjF7/qaeeCsOMNoUhYAgYAoaAIRB+BJAXo7sfOnSoU1+EqxnJDxfSAcyDDp/c7R45//zzz11hIYi/byMPPoG1yFAOHjzoJCyhDH4NYAsR09VIfsQcRbwLwZtPhWPqQpAilhoR1gwBQ8AQMAQMgdSAAIVHu3fv7rL5DRo0yHnxw9mM5IcTbT/nilthdfbs2Y70lyhRIsERoiFo1s/tB70btxoYQpEeVBz0jUfRgATjIi3jtgrpGelcrRkChoAhYAgYAtGIwK5du9zvMmIj+/XrpzZt2qTINozkpwjsgU1Kukc8nUhzrAWOgBlAgWOWEk9wG8V15ssvv+wKvCHh8a2QnBJrsjkNAUPAEDAEDAF/ESBDItIcshp27tzZyVGzZs3q7+NB72ckP+iQBn9A9Ph4oc0THXxsbcTIQ4BqzwMHDtS0adNEFgJ0+1a0LPLOyVZkCBgChoAhcAoBsv6htydrXKNGjfTkk0/q8ssvT3F4jOSn+BHYAgwBQyA+BL777jv3Q3Pq1KmuDgTBSlRctmYIGAKGgCFgCEQCAlu2bHGVal999VU1bdrUOaX8qUsUrrUbyQ8X0jaPIWAIJAmB9evXux+i5BRu2LChqwfBd2uGgCFgCBgChkBKIDB9+nRXdJTv1H7BCVWqVKmUWEqicxrJj7gjsQUZAoZAfAiQTYqaEWPHjnVF4qgP0bx5c1c4zpohYAgYAoaAIRBKBBYvXqzJkydr4sSJKlSokO66664zNY1COW9yxjaSnxz07FlDwBBIEQRWrlypDz/8UFOmTNGhQ4ecZ79+/fqqU6eOKyJnzRAwBAwBQ8AQSA4C/G4hu+HMmTNdjBi/W2699VbnXCpfvnxyhg7bs0bywwa1TWQIGAKhQADCT4nwL774QnPnzlWVKlV03XXXOQ8/1XSLFSsWimltTEPAEDAEDIFUhMDmzZtFVdolS5ZowYIFWrhwoWrVqqW6deuqQYMGKleuXNTt1kh+1B2ZLdgQMAQSQoB0s/Pnz9eiRYu0dOlSEbz7xx9/qEyZMipZsqSrNVGkSBEVLFhQefPmVe7cuXXBBRcoS5Yslq7TXitDwBAwBFIhAqS1RO7J74K9e/dq9+7d2rFjh7Zu3aqNGzdqw4YNWrNmjbJnz66KFSuqcuXKqlq1qqpXrx71BUaN5KfCF9q2ZAgYAv8gQI2JtWvXikrSmzZtcj/Yf/31V1d7gh/4sbGx7hfA0aNHXQDVG2+8YfAZAoaAIWAIRDkC7dq1cwkbzj//fCe1waGTK1cu5+ApUKCAChcu7AqNUoW2dOnSriptamtG8lPbidp+DAFDIEkIWNG0JMFmDxkChoAhELEIpPWf60byI/bVtIUZAoaAIWAIGAKGgCFgCBgCSUPASH7ScLOnDAFDwBAwBAwBQ8AQMAQMgYhFwEh+xB6NLcwQMAQMAUPAEDAEDAFDwBBIGgL/B14/bTssWy+5AAAAAElFTkSuQmCC)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8h_cjNwOv4L1" + }, + "source": [ + "We encourage the readers not familiar enough with speech recognition to gain more familiarity with this technology before moving on. Beyond scientific papers, online you can find amazing tutorials and blog posts, such as:\n", + "- [An Intuitive Explanation of Connectionist Temporal Classification](https://towardsdatascience.com/intuitively-understanding-connectionist-temporal-classification-3797e43a86c)\n", + "- [Connectionist Temporal Classification](https://web.archive.org/web/20211017041333/https://machinelearning-blog.com/2018/09/05/753/)\n", + "- [Sequence-to-sequence learning with Transducers](https://lorenlugosch.github.io/posts/2020/11/transducer/)\n", + "- [Understanding Encoder-Decoder Sequence to Sequence Model](https://towardsdatascience.com/understanding-encoder-decoder-sequence-to-sequence-model-679e04af4346)\n", + "- [What is a Transformer?](https://medium.com/inside-machine-learning/what-is-a-transformer-d07dd1fbec04)\n", + "- [Attention? Attention!](https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html)\n", + "- [Attention and its Different Forms](https://towardsdatascience.com/attention-and-its-different-forms-7fc3674d14dc)\n", + "- [How to Implement a Beam Search Decoder for Natural Language Processing](https://machinelearningmastery.com/beam-search-decoder-natural-language-processing/)\n", + "- [An intuitive explanation of Beam Search](https://towardsdatascience.com/an-intuitive-explanation-of-beam-search-9b1d744e7a0f)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "t3vzfilfMros" + }, + "source": [ + "After this brief overview let's now see how we can develop a speech recognition system (encoder-decoder + CTC) with SpeechBrain.\n", + "\n", + "For simplicity, training will be done with a small open-source dataset called [mini-librispeech](https://www.openslr.org/31/), which only contains few hours of training data. In a real case, you need much more training material (e.g 100 or even 1000 hours) to reach acceptable performance." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7M6IoxLlEovh" + }, + "source": [ + "## Installation\n", + "\n", + "To run the code fast enough, we suggest using a GPU (`Runtime => change runtime type => GPU`). In this tutorial, we will refer to the code in ```speechbrain/templates/ASR```.\n", + "\n", + "Before starting, let's install speechbrain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "beagAGw5t5bK" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH\n", + "\n", + "# Clone SpeechBrain repository\n", + "!git clone https://github.com/speechbrain/speechbrain/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eWpl9xgAIXKE" + }, + "source": [ + "## Which steps are needed?\n", + "\n", + "### 1. Prepare Your Data \n", + " - Create data manifest files (CSV or JSON format) specifying the location of speech data and corresponding text annotations.\n", + " - Utilize tools like [mini_librispeech_prepare.py](https://github.com/speechbrain/speechbrain/blob/develop/templates/speech_recognition/mini_librispeech_prepare.py) to generate these manifest files.\n", + "\n", + "### 2. Train a Tokenizer \n", + " - Decide on basic units for training the speech recognizer and language model (e.g., characters, phonemes, sub-words, words).\n", + " - Execute the tokenizer training script:\n", + " ```bash\n", + " cd speechbrain/templates/speech_recognition/Tokenizer\n", + " python train.py tokenizer.yaml\n", + " ```\n", + "\n", + "### 3. Train a Language Model \n", + " - Train a language model using a large text corpus (preferably within the same language domain as your target application).\n", + " - Example training script for a language model:\n", + " ```bash\n", + " pip install datasets\n", + " cd speechbrain/templates/speech_recognition/LM\n", + " python train.py RNNLM.yaml\n", + " ```\n", + "\n", + "### 4. Train the Speech Recognizer \n", + " - Train the speech recognizer using a chosen model (e.g., CRDNN) with an autoregressive GRU decoder and attention mechanism.\n", + " - Employ beamsearch along with the trained language model for sequence generation:\n", + " ```bash\n", + " cd speechbrain/templates/speech_recognition/ASR\n", + " python train.py train.yaml\n", + " ```\n", + "\n", + "### 5. Use the Speech Recognizer (Inference) \n", + " - After training, deploy the trained speech recognizer for inference.\n", + " - Leverage classes like EncoderDecoderASR in SpeechBrain to simplify the inference process.\n", + "\n", + "Each step is crucial for building an effective end-to-end speech recognizer.\n", + "\n", + "We will now provide a detailed description of all these steps.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rDgNu_b8k6qD" + }, + "source": [ + "## Step 1: Prepare Your Data \n", + "\n", + "Data preparation is a critical initial step in training an end-to-end speech recognizer. Its primary objective is to generate data manifest files, which instruct SpeechBrain on the locations of audio data and their corresponding transcriptions. These manifest files, written in widely-used CSV and JSON formats, play a crucial role in organizing the training process.\n", + "\n", + "### Data Manifest Files\n", + "\n", + "Let's delve into the structure of a data manifest file in JSON format:\n", + "\n", + "```json\n", + "{\n", + " \"1867-154075-0032\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/1867/154075/1867-154075-0032.flac\",\n", + " \"length\": 16.09,\n", + " \"words\": \"AND HE BRUSHED A HAND ACROSS HIS FOREHEAD AND WAS INSTANTLY HIMSELF CALM AND COOL VERY WELL THEN IT SEEMS I'VE MADE AN ASS OF MYSELF BUT I'LL TRY TO MAKE UP FOR IT NOW WHAT ABOUT CAROLINE\"\n", + " },\n", + " \"1867-154075-0001\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/1867/154075/1867-154075-0001.flac\",\n", + " \"length\": 14.9,\n", + " \"words\": \"THAT DROPPED HIM INTO THE COAL BIN DID HE GET COAL DUST ON HIS SHOES RIGHT AND HE DIDN'T HAVE SENSE ENOUGH TO WIPE IT OFF AN AMATEUR A RANK AMATEUR I TOLD YOU SAID THE MAN OF THE SNEER WITH SATISFACTION\"\n", + " },\n", + " \"1867-154075-0028\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/1867/154075/1867-154075-0028.flac\",\n", + " \"length\": 16.41,\n", + " \"words\": \"MY NAME IS JOHN MARK I'M DOONE SOME CALL ME RONICKY DOONE I'M GLAD TO KNOW YOU RONICKY DOONE I IMAGINE THAT NAME FITS YOU NOW TELL ME THE STORY OF WHY YOU CAME TO THIS HOUSE OF COURSE IT WASN'T TO SEE A GIRL\"\n", + " },\n", + "}\n", + "```\n", + "\n", + "This structure follows a hierarchical format where the unique identifier of the spoken sentence serves as the first key. Key fields such as the path of the speech recording, its length in seconds, and the sequence of words uttered are specified for each entry.\n", + "\n", + "A special variable, `data_root`, allows dynamic changes to the data folder from the command line or the YAML hyperparameter file.\n", + "\n", + "### Preparation Script\n", + "\n", + "Creating a preparation script for your specific dataset is essential, considering that each dataset has its own format. For instance, the [mini_librispeech_prepare.py](https://github.com/speechbrain/speechbrain/blob/develop/templates/speech_recognition/mini_librispeech_prepare.py) script, tailored for the mini-librispeech dataset, serves as a foundational template. This script automatically downloads publicly available data, searches for audio files and transcriptions, and creates the JSON file.\n", + "\n", + "Use this script as a starting point for custom data preparation on your target dataset. It offers a practical guide for organizing training, validation, and test phases through three separate data manifest files.\n", + "\n", + "### Copy Your Data Locally\n", + "\n", + "In an HPC cluster or similar environments, optimizing code performance involves copying data to the local folder of the computing node. While not applicable in Google Colab, this practice significantly accelerates code execution by fetching data from the local filesystem instead of the shared one.\n", + "\n", + "Take note of these considerations as you embark on the crucial journey of data preparation for training your speech recognizer. 🚀🎙️\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9To_-2fej2SA" + }, + "source": [ + "## Step 2: Tokenizer\n", + "\n", + "Choosing the basic tokens for your speech recognizer is a critical decision that impacts the model's performance. You have several options, each with its own set of advantages and challenges.\n", + "\n", + "### Using Characters as Tokens\n", + "One straightforward approach is to predict characters, converting the sequence of words into a sequence of characters. For example:\n", + "```\n", + "THE CITY OF MONTREAL => ['T','H','E', '_', 'C','I','T','Y','_', 'O', 'F', '_, 'M','O','N','T','R','E','A','L']\n", + "```\n", + "Advantages and disadvantages of this approach include a small total number of tokens, the chance to generalize to unseen words, and the challenge of predicting long sequences.\n", + "\n", + "### Using Words as Tokens\n", + "Predicting full words is another option:\n", + "```\n", + "THE CITY OF MONTREAL => ['THE','CITY','OF','MONTREAL']\n", + "```\n", + "Advantages include short output sequences, but the system can't generalize to new words, and tokens with little training material may be allocated.\n", + "\n", + "### Byte Pair Encoding (BPE) Tokens\n", + "A middle ground is Byte Pair Encoding (BPE), a technique inherited from data compression. It allocates tokens for the most frequent sequences of characters:\n", + "```\n", + "THE CITY OF MONTREAL => ['THE', '▁CITY', '▁OF', '▁MO', 'NT', 'RE', 'AL']\n", + "```\n", + "BPE finds tokens based on the most frequent character pairs, allowing for flexibility in token length.\n", + "\n", + "#### How Many BPE Tokens?\n", + "The number of tokens is a hyperparameter that depends on the available speech data. For reference, 1k to 10k tokens are reasonable for datasets like LibriSpeech (1000 hours of English sentences).\n", + "\n", + "### Train a Tokenizer\n", + "SpeechBrain leverages [SentencePiece](https://github.com/google/sentencepiece) for tokenization. To find the tokens for your training transcriptions, run the following code:\n", + "\n", + "```bash\n", + "cd speechbrain/templates/speech_recognition/Tokenizer\n", + "python train.py tokenizer.yaml\n", + "```\n", + "\n", + "This step is crucial in shaping the behavior of your speech recognizer. Experiment with different tokenization strategies to find the one that best suits your dataset and objectives. 🚀🔍\n", + "\n", + "Let's train the tokenizer:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tA4HMrnFJ33e" + }, + "outputs": [], + "source": [ + "%cd /content/speechbrain/templates/speech_recognition/Tokenizer\n", + "!python train.py tokenizer.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PYko19NiKdtK" + }, + "source": [ + "The code might take a while just because data are downloaded and prepared. As for all the other recipes in SpeechBrain, we have a training script (`train.py`) and a hyperparameter file (`tokenizer.yaml`). Let's take a closer look into the latter first:\n", + "\n", + "\n", + "\n", + "```yaml\n", + "# ############################################################################\n", + "# Tokenizer: subword BPE tokenizer with unigram 1K\n", + "# Training: Mini-LibriSpeech\n", + "# Authors: Abdel Heba 2021\n", + "# Mirco Ravanelli 2021\n", + "# ############################################################################\n", + "\n", + "\n", + "# Set up folders for reading from and writing to\n", + "data_folder: ../data\n", + "output_folder: ./save\n", + "\n", + "# Path where data-specification files are stored\n", + "train_annotation: ../train.json\n", + "valid_annotation: ../valid.json\n", + "test_annotation: ../test.json\n", + "\n", + "# Tokenizer parameters\n", + "token_type: unigram # [\"unigram\", \"bpe\", \"char\"]\n", + "token_output: 1000 # index(blank/eos/bos/unk) = 0\n", + "character_coverage: 1.0\n", + "annotation_read: words # field to read\n", + "\n", + "# Tokenizer object\n", + "tokenizer: !name:speechbrain.tokenizers.SentencePiece.SentencePiece\n", + " model_dir: !ref \n", + " vocab_size: !ref \n", + " annotation_train: !ref \n", + " annotation_read: !ref \n", + " model_type: !ref # [\"unigram\", \"bpe\", \"char\"]\n", + " character_coverage: !ref \n", + " annotation_list_to_check: [!ref , !ref ]\n", + " annotation_format: json\n", + "```\n", + "\n", + "The tokenizer is trained on training annotations only. We set here a vocabulary size of 1000. Instead of using the standard BPE algorithm, we use a variation of it based on unigram smoothing. See [sentencepiece](https://github.com/google/sentencepiece) for more info.\n", + "The tokenizer will be saved in the specified `output_folder`.\n", + "\n", + "Let's now take a look into the training script `train.py`:\n", + "\n", + "\n", + "\n", + "```python\n", + "if __name__ == \"__main__\":\n", + "\n", + " # Load hyperparameters file with command-line overrides\n", + " hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])\n", + " with open(hparams_file) as fin:\n", + " hparams = load_hyperpyyaml(fin, overrides)\n", + "\n", + " # Create experiment directory\n", + " sb.create_experiment_directory(\n", + " experiment_directory=hparams[\"output_folder\"],\n", + " hyperparams_to_save=hparams_file,\n", + " overrides=overrides,\n", + " )\n", + "\n", + " # Data preparation, to be run on only one process.\n", + " prepare_mini_librispeech(\n", + " data_folder=hparams[\"data_folder\"],\n", + " save_json_train=hparams[\"train_annotation\"],\n", + " save_json_valid=hparams[\"valid_annotation\"],\n", + " save_json_test=hparams[\"test_annotation\"],\n", + " )\n", + "\n", + " # Train tokenizer\n", + " hparams[\"tokenizer\"]()\n", + "```\n", + "\n", + "Essentially, we prepare the data with the `prepare_mini_librispeech` script and we then run the sentencepiece tokenizer wrapped in\n", + "`speechbrain.tokenizers.SentencePiece.SentencePiece`.\n", + "\n", + "Let's take a look at the files generated by the tokenizer. If you go into the specified output folder (`Tokenizer/save`), you can find two files:\n", + "+ *1000_unigram.model*\n", + "+ *1000_unigram.vocab*\n", + "\n", + "The first is a binary file containing all the information needed for tokenizing an input text. The second is a text file reporting the list of tokens allocated (with their log probabilities):\n", + "\n", + "```\n", + "▁THE -3.2458\n", + "S -3.36618\n", + "ED -3.84476\n", + "▁ -3.91777\n", + "E -3.92101\n", + "▁AND -3.92316\n", + "▁A -3.97359\n", + "▁TO -4.00462\n", + "▁OF -4.08116\n", + "....\n", + "```\n", + "\n", + "Let me now show how we can use the learned model to tokenize a text:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ik9hoxBUG03u" + }, + "outputs": [], + "source": [ + "import torch\n", + "import sentencepiece as spm\n", + "sp = spm.SentencePieceProcessor()\n", + "sp.load(\"/content/speechbrain/templates/speech_recognition/Tokenizer/save/1000_unigram.model\")\n", + "\n", + "# Encode as pieces\n", + "print(sp.encode_as_pieces('THE CITY OF MONTREAL'))\n", + "\n", + "# Encode as ids\n", + "print(sp.encode_as_ids('THE CITY OF MONTREAL'))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Oft-7K85LA86" + }, + "source": [ + "Note that the sentencepiece tokenizers also assign a unique index to each allocated token. These indexes will correspond to the output of our neural networks for language models and ASR." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nkYENC7BJ4K9" + }, + "source": [ + "## Step 3: Train a Language Model \n", + "\n", + "A Language Model (LM) plays a crucial role in enhancing the performance of a speech recognizer. In this tutorial, we adopt the concept of **shallow fusion**, incorporating language information within the beam searcher of the speech recognizer to rescore partial hypotheses. This involves scoring the partial hypotheses provided by the speech recognizer with language scores, penalizing sequences of tokens that are \"unlikely\" to be observed.\n", + "\n", + "### Text Corpus\n", + "Training a language model typically involves using large text corpora, predicting the most probable next token. If you lack a substantial text corpus for your application, you may choose to skip this part. Additionally, training a language model on a large text corpus is computationally demanding, so consider leveraging pre-trained models and fine-tuning if needed.\n", + "\n", + "For the purposes of this tutorial, we train a language model on the training transcriptions of mini-librispeech. Keep in mind that this is a simplified demonstration for educational purposes.\n", + "\n", + "### Train a LM\n", + "\n", + "We are going to train a simple RNN-based language model that estimates the next tokens given the previous ones.\n", + "\n", + "![SpeechBrain-Page-3 (1).png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfYAAACuCAYAAADEQS+SAAAGcHRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDYtMTJUMjIlM0E0MSUzQTMwLjU3NlolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY4MS4wLjQwNDQuMTIyJTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE0LjcuNyUyMiUyMGV0YWclM0QlMjJndnFCVmF6c3FUbk8wUWJZVndociUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjI3bkNMTmQ3LVdVaWZyTFBUbEZPTCUyMiUzRTdWcGRrOW9nRlAwMVByYVRrQSUyRmRSMnR0dHpQdHRsTjNwczlzd2lhMEpLUUVWJTJCMnZMekZnUHNEWmpDYTZxJTJCdExrZ3RjdU9mQzRaQTRjbWJKJTJCak9EV2Z5TmhvaU1nQld1Ujg3SEVRQzI0M25pVWxnMnBXVmklMkJhVWhZamlVbFNyREF2OUQwbWhKNnhLSEtHOVU1SlFTanJPbU1hQnBpZ0xlc0VIRzZLcFo3WkdTWnE4WmpKQm1XQVNRNk5aZk9PU3hqTUt6S3ZzdHdsR3Nlcll0V1pKQVZWa2E4aGlHZEZVek9mT1JNMk9VOHZJdVdjOFFLY0JUdUpUdFB1MHAzUTJNb1pSM2FRREtCayUyQlFMR1ZzY2x4OG80S05HRjFtc2hwaUhLMU5FTU1IVmQzU2gyRHZBaE16QXRFRWNiWVJWWlFqNjZac3N0bWhWVDZ2S21nOVMzWVgxMkVkT3pLbE1wM1J6bmNWc2JpUlFac0JjQXdBJTJCRVQwOE9HUml2SFhrZkQlMkZMcWtxZUpkdkolMkJWVVZMQW4yYm9xRkhkUmNmMTVkNmNjaVNHVXZzb1NEV0NCYnhxaVlqeTJLRjdGbUtORkJvT2lkQ1hXajdERlBDR3llRzhpNm9BRE0lMkJBU1lGJTJGSDk4YlM0VlhWamtIWFBSNWR6NFN1dU1La1FHYnI2NEhtTzRNcTdvUzh3SkEzNGMwNW8zJTJGUWpCTEtoQ1dsS1NxR2hBbHBtU0RCVVNvZUE0RTVFdllQUlVhd1lJaXBMRWh3R0JiZEdQTlpaZHc2TXFXeWdWcDFhZ2w1anBaaTE1QmkwRU9LdmVjWkJLWGh0R0RkQWk4Qzh4d0hwam1OUW8xMG53MiUyRlFSRjZmTXJHRUlFY1B6WGRtNEtXUGZ5Z2VEczdKYnF1M1VJWE5EM2tkTWtDSkJ2VnlmWVpQNkRsaDBNV0lhNzUyV1pnRjNTbnBQaFhrQlRRQU5NYkg1aVRwaHN3SGlvbDQ3MVVtR2N3UFp3SzcyJTJGbk5ib3JmVjBMM1oyQTNpWlhwQTlVZzladWNrSzljTk9CdHdRakxlUWpaVHltRVUwaG1WZlcxblNyNm55bE5KTW8lMkZVYWNiJTJCUzVBaTQ1UFl6NnhMaTJMTk5Ra2lWajFDYVBqbk5uNXVzS20xTE1iJTJCVFN4JTJGUjN6eWVtYlB2eU4yN2I3MGxPYVk2RzAxTjJoM1B5Njg5TFA0cXE3V2M0U1dYdlA3d2ZSM3V6NmYzMTB0NHBhSzZIOXdLdlRWVUI5MnlxeXU1eVJuJTJCcHNrb0s4THFzS3FmUEtYU1Y2Uno5UmpBSExnRCUyRmpMcktkUHElMkJzUDBiVEhyU1ZacWpBWFdWNlhoOWNYbnBSMWUxJTJGUXlvcTB6SDhENW83OHZpZWxudkJDd0g5cjhHdUZoWjVmaG5rMVdneTJIOXBjb3FKY0hydXFyMkJYcFlYYVVHOUVZd1BTeUF5ZmxrbGZIciUyQllWdDM2N1ZrNnpTSEEwbnE0emYzUzh1TCUyRjNJcXJhZjRXU1Y4Vk41SDZ6MyUyRmU1NldlOFVMS2UlMkZCWGd2ZjVjTHNOdDZ1VDRjM3VLeCUyQnI5YnVheXFmdzA2OCUyRjglM0QlM0MlMkZkaWFncmFtJTNFJTNDJTJGbXhmaWxlJTNFVy14fgAAIABJREFUeF7tnQtwVdUVhlcEkgAlvAviAxUdHoIgCiKiVMGqgFQIKh1QoVVGWyQ8Y4BERIKkYCIPaQVbYxURWhBbQSqCDgpiAR8UxcCgI1VBh1J5yUsgnbXtjZeb17035+y17j7/melMMefstfb377P//TpJUnFxcTHhAgEQAAEQAAEQcIJAEozdCR1RCRAAARAAARAwBGDsaAggAAIgAAIg4BABGLtDYqIqIAACIAACIABjRxsAARAAARAAAYcIwNgdEhNVAQEQAAEQAAErxj5w4EBavHhxubTbt29PH374IWVnZ9OCBQvo888/L3Xv2LFj6eWXX6adO3ean3Xr1o3Wr19fbpnffvst1atXDwpbJrBkyRKaO3cuffDBB3Ts2DFq1qwZ9e7dmyZMmEBnn312qWwee+wxmjhxIj300EOUl5dnfv7UU0/RAw88UGHm0NeysJWEu/fee+mjjz6id99919x5+vRpeuKJJ+j555837/OJEyeM/n379qXc3FyqXbu2rgogm1IENm/eTNOnT6e3336b9u3bRw0aNKCrrrqKMjIy6IYbbii5f/Xq1XTjjTdSv3796KWXXjqjnKKiImrdujV98cUXdO6554KyJQJWjH3v3r303XffmSp9+eWXdO2119Kzzz5L3bt3N/8tOTnZGECsxt60aVN6/PHHy0R1/vnn01lnnWUJI8IwATbn/Px88+Knp6dTWloaffLJJ8awd+/eTRs3bqTzzjuvBNapU6fowgsvpOuvv55effVV0zZSUlLo0KFDpiMJXTyIu/XWW035oQv66mpzkcb+yCOP0JNPPknz5s2jrl27UvXq1YmN4v7776fLL7/cDNJx6SXwl7/8hQYNGkQ9evQwmrVo0YJ4ML1o0SKj6YwZM2j06NGmAmzsPGDj/rawsJBuv/32korB2GU0tmLs4VXj0Tt35itXrqSbb775jFrHauwXX3yxGSDgkifAps2j+YKCAho1atQZCR09epTuuOMOuu+++0wHELqWLVtGQ4YMMaN57jh4hjd48OBSlbnggguIV31CM3r52iKDSAKRxs5m3q5dO2MC4RebOw/2WOekpCSAVEiADbx58+Z022230XPPPVcqw0mTJtHUqVPNCk2rVq2MsfNAfvLkyeYd3bZtm5nd8wVjlxEYxi7D3bmobOYvvPCCmZnz7Cyai2cDl1xyiVl659E/L+O+8847MPZo4Cm7J9LYeZa3fPlyswV3zTXXKMsW6VREgM38nnvuMVsobPCRF2+xNWrUiMaMGWPMnI2dV9R4VZa15nc6NCCAscu0NXXGziPBatWqlaLBe3YXXXTRGXvsGzZsoBo1apS696677qKnn35ahmhAo/KLffDgQVq7dm1UBHjW1qZNG9q0aRNdeeWVZpR/6aWXmr35Dh06nFEGZuxRIRW9KdLYDxw4YAZrfGaGTYA7fN5S4Vkgb6Pg0ksgJyeHZs+eTaxheVfHjh3NbH3hwoXG2Pv06WPO1PB7HNpqueWWWzBjF5JZnbHzHs1rr71WCgfv3fIhjvDDc40bN6Zp06aVurdu3bplHtQSYhyIsP3796dvvvmmwgON4SCGDx9O69atM4cmQxd3/mzu8+fPh7EnWKuJNPZQ+nxegldhWOvXX3+d3nvvPcrMzDRLubh0Enj00UfNHjprV97Fg29+V3mVLtzY+X6exT/zzDNmqf6rr77C4TkBmdUZeyyn4rHHLtBiygmZlZVl9lP37NlDqamppe76/vvvS1ZXuMM455xz6Pjx41SzZs2Se3nEz8v43Bnw4Cx0YcauR+fyMinP2CPv5zMxQ4cOpa1bt1Lbtm31VyyAGfL2CZ9p2bFjh1lWj7z4vW3YsCHxO8/noiKNnd/1K664whySHjFihJnZ41S83YYEY7fL29loW7ZsIV6e4xedR+zhFx+eu+6668wBG+4M+HM4PuHOy/Dhxn7y5Enq1KmTeZ47BBh74jSXcGPngdm4ceOIl3T5U6fwi82iZcuWZvbes2fPxKlggDLlvXLeLuFP2v7617+Wqjl/rjhlyhSzzM4HoSONnR/gw7S8Ascz97vvvhvGbrn9JLSxV/S5Gy/T41tZu62JX/iHH37YnH7nU898Mpb33Phb9cOHD5vl2CZNmpglPH7pI5fcOVv+VG7VqlXm5DSM3a5+VYkWbux8Hoa/kOA9Wt4q4wEfr8SwEXD7+Prrr+njjz+mWrVqVSUknvWRwCuvvEIDBgygXr16EW+b8SE6/myZZ/M8MJ8zZ475DI6vsoyd/zsfrnvxxRfNKh5m7D6KVUbRCW3sFf2CGv7FGGV9OmUXb/CirVixwrz0vJfKZs7frfNMnWfo/AuD3njjDfNtbFmH5JhW6FDdmjVrSn4JBpbi9bejyKV4PkjJps7fq/PvJ+C/Ds3bL3zIkjv8sn5Zkf5aBitDPv/Ce+18IJZNvX79+uZ3EvChSD4IGbrKM/YjR47QZZddRp9++imM3XLTsW7sluuHcCAAAiAAAiAQKAIw9kDJjcqCAAiAAAi4TgDG7rrCqB8IgAAIgECgCMDYAyU3KgsCIAACIOA6ARi76wqjfiAAAiAAAoEiAGMPlNyoLAiAAAiAgOsEYOyuK4z6gQAIgAAIBIoAjD1QcqOyIAACIAACrhOAsbuuMOoHAiAAAiAQKAIw9kDJjcqCAAiAAAi4TgDG7rrCqB8IgAAIgECgCMDYAyU3KgsCIAACIOA6gYQwdv773fw3nAsLC6lOnTquaxK4+kFftyWHvtDXbQL6apcQxj5+/HjKz883fxWK/2IULrcIQF+39IysDfSFvm4T0Fc79cbOo33+2+rHjx+nlJQU8+cDMWvX15DizQj6xksuMZ6DvomhU7xZQt94yfn7nHpj59F+QUEBnThxgpKTk83fAsas3d9GYbN06GuTtv1Y0Nc+c5sRoa9N2tHHUm3s4aPBUJUwa49eXO13Ql/tClUtP+hbNX7an4a+ehVSbezho8EQQsza9TamWDODvrESS6z7oW9i6RVrttA3VmL27ldr7DwabNiwIVWrVo1q165N+/btM/8+cuQInTx50vwbe+32GorXkaCv10R1lQd9denhdTbQ12ui3pan1tj5FHx2djbl5eVRRkYGJSUlUXFxMc2aNYuysrIoNzfXnJLHlZgEoG9i6hZt1tA3WlKJeR/01a2bWmOPxBYydt04kV28BKBvvOQS4znomxg6xZsl9I2XnD/Pwdj94YpSYySAjiFGYAl2O/RNMMFiTBf6xgjM59th7D4DRvHREUDHEB2nRL0L+iaqctHlDX2j42TrLhi7LdKIUyEBdAxuNxDoC33dJqCrdjB2XXoENht0/G5LD32hr9sEdNUOxq5Lj8Bmg47fbemhL/R1m4Cu2sHYdekR2GzQ8bstPfSFvm4T0FU7GLsuPQKbDTp+t6WHvtDXbQK6agdj16VHYLNBx++29NAX+rpNQFftYOy69AhsNuj43ZYe+kJftwnoqh2MXZcegc0GHb/b0kNf6Os2AV21g7Hr0iOw2aDjd1t66At93Sagq3Ywdl16BDYbdPxuSw99oa/bBHTVDsauS4/AZoOO323poS/0dZuArtrB2HXpEdhs0PG7LT30hb5uE9BVOxi7Lj0Cmw06frelh77Q120CumoHY9elR2CzQcfvtvTQF/q6TUBX7WDsuvQIbDbo+N2WHvpCX7cJ6KodjF2XHoHNBh2/29JDX+jrNgFdtYOx69IjsNmg43dbeugLfd0moKt2MHZdegQ2G3T8bksPfaGv2wR01S5hjH3y5Mk0adIkXfSQjWcEoK9nKFUWBH1VyuJZUtDXM5SeFJQwxu5JbVEICIAACIAACDhOAMbuuMCoHgiAAAiAQLAIlGns27dvpwULFtDq1atp27ZtdPDgwWBRiaO2aWlp1KZNG+rZsycNHjyYWrZsGUcpdh6BvrFzhr6xM0ukJ6BvIqkVe66JpG/stSv9RCljHzlyJBUWFtKwYcOoT58+1L59e6pXr54XsZwuY//+/bRlyxZavnw5zZ8/n4YOHUozZ85UV2foG58k0Dc+bonyFPRNFKXiyzNR9I2vdhUY++7duyk9PZ3atWtH06dPh5lXgTA3oszMTNq6dSstXbqUmjVrVoXSvHkU+nrDkUuBvt6x1FgS9NWoinc5adTXu9r9UFLJjP3qq6+m3r17U3Z2ttcxAltebm4urVixgjZs2CDOAPp6LwH09Z6pphKhryY1vM9Fk75e184YOy/PHjlyxCwh4/KWAG9p1KpVS3RZHvp6q2l4adDXP7YaSoa+GlTwLwcN+vpRu6SioqLizp07065du7D87gNhXvZp3rw5bdy4UeRAHR+Ug74+CPv/IqGvf2w1lAx9NajgXw7S+vpVs6Ts7OziY8eO0YwZM/yKEfhyx40bR6mpqTRlyhTrLHJycgj6+osd+vrLV7p06CutgL/xJfX1q2ZJXbp0Kc7Ly6Pu3bv7FSPw5a5du5aysrJE9tp5bx36+tsEoa+/fKVLh77SCvgbX1Jfv2qWlJaWVoxleL/w/lBuaLnnwIED/gYqo/S6detim8Vn6tDXZ8DCxUNfYQF8Di+pr19VSyIiPj/nV/ko9/8EpP4IhlTcoAkvxVkqLvS1QwD6gnM8BGDs8VCL4xmpF1QqbhyIEvoRKc5ScRNarDiSl+IsFTcORAn9iGucYeyWmqNUw5GKawmrmjBSnKXiqgFvKREpzlJxLWFVE8Y1zjB2S01LquFIxbWEVU0YKc5ScdWAt5SIFGepuJawqgnjGmcYu6WmJdVwpOJawqomjBRnqbhqwFtKRIqzVFxLWNWEcY0zjN1S05JqOFJxLWFVE0aKs1RcNeAtJSLFWSquJaxqwrjGGcZuqWlJNRypuJawqgkjxVkqrhrwlhKR4iwV1xJWNWFc4wxjt9S0pBqOVFxLWNWEkeIsFVcNeEuJSHGWimsJq5owrnGGsVtqWlINRyquJaxqwkhxloqrBrylRKQ4S8W1hFVNGNc4w9gtNS2phiMV1xJWNWGkOEvFVQPeUiJSnKXiWsKqJoxrnGHslpqWVMORimsJq5owUpyl4qoBbykRKc5ScS1hVRPGNc4wdktNS6rhSMW1hFVNGCnOUnHVgLeUiBRnqbiWsKoJ4xpnGLulpiXVcKTiWsKqJowUZ6m4asBbSkSKs1RcS1jVhHGNM4zdUtOSajhScS1hVRNGirNUXDXgLSUixVkqriWsasK4xtmasZ88eZJq1KhRppA//elPqWXLlvTggw/SgAEDiCHzFXqG/71u3Trq2rVrqedzc3MpJyeHjh49SqmpqXE9Y6N1STUcW3GhbxL/mUQbTemMGNA31QpzW5wjK2MrLt5fmffXr8Zr3dg7depEw4YNK6kPd4Z79uyhxYsX07Zt22jixInEZh1u7Pz/O3ToQJs3b6Zq1aqdwaI8Y4/lGb/ghpdr6wWV7higr43W9GMMW+0q1PFDX+iL/tluG4gnmnVjv/POO2nRokWlcj1+/Dhxp7Fjxw7673//S7Vq1SqZfffv359eeuklmj17tpnVh1/lGXssz8QDLtZnbHXA0sYOfWNtGVW731a7Chk79K2aXrE+DX3dXpGJtT1Ee78aY+eER44cSbNmzaKdO3dSixYtSoy9oKCANmzYQKtWrTLGz0v3oas8Y4/lmWhhVeU+v17Q/Px8swJSp06dMtPzK25ksMo6fugbX+uBvpW/8/GRje0pv94j6Ou2vrG1Mu/uVmXsPXr0oLfeeosOHz5MKSkpJcael5dHgwYNolatWlF6ejr9+c9/rtTYY3nGO5zll+RXx1CzZk06deoUjRkzhiZMmFDK4P2KG4+xQ9/YWxr0rfydj51q7E/49R5BX7f1jb2lefOEdWPv168fzZ8/vyT70B47/7e5c+fSiBEjzKydr9AscNq0aZSVlUVs1mxebP7dunUz95Q3Y4/lGW9QVlyKXx0Ds2I2p0+fNocOR40adYbB+xW3PGOHvt62Juhb+TvvLfGyS/PrPYK+butro22WFcO6sZdX0QYNGpgl5SlTplD16tXLNPYTJ05Qu3btzOn3999/3xykq8zYo3nGBny/OgbOvVGjRrRv3z5TjeTk5DMMPi0tzcpp7YpO1XJe0Df+VgZ9K37n4ycb/ZN4f9E/R99a5O+0buw/+9nPzMn30MUn3cePH09z5syh4cOHn0EkcsbOP+R99ptuuolmzpxJGRkZlRp7NM/YkCH0CZ+NWByDPy3s27cvLV261KqxQ187CkPfHz9xtUEc76/7/bPE56p+tV3rxh55qpZhdunSxRyY44NxDRs2LKlrWcbOP+R99jVr1lBRURH96U9/ouzs7FLfsYeW4kOFVfSMX3DDyw3KiB/6et+aNM3YoS/0DRFA/+x9W/CqRHFj54qsX7/e7JnzUvy8efMqNfZ///vf1Lp1a+JP2i677DLKzMys1NgresYrmBWV45ex8x4dr3jwAToNe+xlfQ4FfeNvYdD3B3Z4f+NvQ9E8WdFXLXh/oyGo6x4Vxs5I+DfOLVu2jDZt2kQdO3Y0lMobEfLPpk6dambqv/zlL+nFF1+s1NgresaGJH4Ze6KcqoW+8bUy6Psjt/Le+fjIxvYU3l/0z9G0mAMHDpjtz1/96lfm9oULFxJ/DdSkSRN68803qX79+uaXrXlxX4UTSSLegvX/V2FW9p3zZ599ZmbhV1xxhZnB84tUkbHzL7Rp27Ytffrpp2YPOfJXykYuxTOE8p6JRrCq3uNXx5Ao38FC3/haEPT9kRve3/jaUDRPoX/25lfK8vYw+xLz5Ktp06a0ZMkSsyI9cOBA8zOekHpxX0IYOyc5duxY4o6ssLCQhgwZUqGx8/0rV66kXr16mfpFY+zlPRNNw6/qPX4Ze2V52YpbWccAfStTKr6fQ1+3fzMZ9HVb3/je+sqfsrYUX3kqbt9h6wWNpCgV1201S9dOirNUXOhrhwD0Bed4CMDY46EWxzNSL6hU3DgQJfQjUpyl4ia0WHEkL8VZKm4ciBL6Edc4w9gtNUephiMV1xJWNWGkOEvFVQPeUiJSnKXiWsKqJoxrnGHslpqWVMORimsJq5owUpyl4qoBbykRKc5ScS1hVRPGNc4wdktNS6rhSMW1hFVNGCnOUnHVgLeUiBRnqbiWsKoJ4xpnGLulpiXVcKTiWsKqJowUZ6m4asBbSkSKs1RcS1jVhHGNM4zdUtOSajhScS1hVRNGirNUXDXgLSUixVkqriWsasK4xhnGbqlpSTUcqbiWsKoJI8VZKq4a8JYSkeIsFdcSVjVhXOMMY7fUtKQajlRcS1jVhJHiLBVXDXhLiUhxloprCauaMK5xhrFbalpSDUcqriWsasJIcZaKqwa8pUSkOEvFtYRVTRjXOMPYLTUtqYYjFdcSVjVhpDhLxVUD3lIiUpyl4lrCqiaMa5xh7JaallTDkYprCauaMFKcpeKqAW8pESnOUnEtYVUTxjXOMHZLTUuq4UjFtYRVTRgpzlJx1YC3lIgUZ6m4lrCqCeMaZxi7paYl1XCk4lrCqiaMFGepuGrAW0pEirNUXEtY1YRxjTOM3VLTkmo4UnEtYVUTRoqzVFw14C0lIsVZKq4lrGrCuMYZxm6paUk1HKm4lrCqCSPFWSquGvCWEpHiLBXXElY1YVzjnJSWlla8a9cuqlevnhrIriWyf/9+at68OR04cMB61erWrUvQ11/s0NdfvtKlQ19pBfyNL6mvXzVL6tKlS3FeXh51797drxiBL3ft2rWUlZVFGzZssM7i6quvJujrL3bo6y9f6dKhr7QC/saX1NevmiVlZ2cXHzt2jGbMmOFXjMCXO27cOEpNTaUpU6ZYZ5GTk0PQ11/s0NdfvtKlQ19pBfyNL6mvXzVLKioqKu7cuTOWa30iHFrm2bhxI7Vs2dKnKOUXu337doK+/mGHvv6x1VAy9NWggn85SOvrV82SiouLi0eOHElHjhyh+fPn+xUnsOUOGzaMatWqRTNnzhRjAH39Qw99/WOroWToq0EF/3LQoK8ftTPGzgXzXmzv3r0pOzvbjziBLDM3N5dWrFghsrceCRz6et8Eoa/3TDWVCH01qeF9Lpr09bp2Jca+e/duSk9Pp3bt2tH06dNxSr4KpHl5JzMzk7Zu3UpLly6lZs2aVaE0bx6Fvt5w5FKgr3csNZYEfTWq4l1OGvX1rnY/lFRi7KGCedm2sLCQeImiT58+1L59e5h8FNS5sWzZsoWWL19utjSGDh0quvxeXsrQNwoxy7gF+sbHLVGegr6JolR8eSaKvvHVrvRTpYydb+EDVwsWLKDVq1fTtm3b6ODBg17Fc7actLQ0atOmDfXs2ZMGDx4sclAuWrjQN1pSP94HfWNnlkhPQN9EUiv2XBNJ39hrF6Wxe1EwygABEAABEAABELBPoMwZu/00EBEEQAAEQAAEQMALAuqN/dChQ2a/mvf969Sp40WdUYYiAtBXkRg+pAJ9fYCqqEjoq0iMsFTUG/v48eMpPz+fxowZQ9OmTdNJEVnFTQD6xo0uIR6EvgkhU9xJQt+40fn6oGpj59Fg48aN6fjx45SSkkJ79+7FrN3X5mC3cOhrl7ftaNDXNnG78aCvXd6xRFNt7DwaLCgooBMnTlBycjKNHj0as/ZY1FV+L/RVLlAV04O+VQSo/HHoq1cgtcYePhoM4cOsXW9DijUz6BsrscS6H/omll6xZgt9YyVm9361xh4+GgwhwazdbuPwMxr09ZOufNnQV14DPzOAvn7SrXrZKo2dR4MNGzakatWqUe3atWnfvn3m3/yHak6ePGn+jRPyVRdfqgToK0XeTlzoa4ezVBToK0U++rgqjZ1PwfMfo8nLy6OMjAxKSkoi/ls1s2bNoqysLOJf3s+n5HElJgHom5i6RZs19I2WVGLeB33166bS2COxhYxdP05kGA8B6BsPtcR5BvomjlbxZAp946Hm7zMwdn/5ovQoCKBjiAJSAt8CfRNYvChSh75RQLJ8C4zdMnCEK00AHYPbrQL6Ql+3CeirHYxdnyaBywgdv9uSQ1/o6zYBfbWDsevTJHAZoeN3W3LoC33dJqCvdjB2fZoELiN0/G5LDn2hr9sE9NUOxq5Pk8BlhI7fbcmhL/R1m4C+2sHY9WkSuIzQ8bstOfSFvm4T0Fc7GLs+TQKXETp+tyWHvtDXbQL6agdj16dJ4DJCx++25NAX+rpNQF/tYOz6NAlcRuj43ZYc+kJftwnoqx2MXZ8mgcsIHb/bkkNf6Os2AX21g7Hr0yRwGaHjd1ty6At93Sagr3Ywdn2aBC4jdPxuSw59oa/bBPTVDsauT5PAZYSO323JoS/0dZuAvtrB2PVpEriM0PG7LTn0hb5uE9BXOxi7Pk0ClxE6frclh77Q120C+moHY9enSeAyQsfvtuTQF/q6TUBf7WDs+jQJXEbo+N2WHPpCX7cJ6KsdjF2fJoHLCB2/25JDX+jrNgF9tYOx69MkcBmh43dbcugLfd0moK92CWHskydPpkmTJumjh4w8IQB9PcGothDoq1YaTxKDvp5g9LSQhDB2T2uMwkAABEAABEDAYQIwdofFRdVAAARAAASCRwDGHjzNUWMQAAEQAAGHCcDYHRYXVQMBEAABEAgeARh78DRHjUEABEAABBwmIGLs1atXp0WLFtGAAQMcRouqgQAIgAAIgIB9AjB2+8ydizhw4EBavHhxufVq3749ffjhh5SdnU0LFiygzz//vNS9Y8eOpZdffpl27txpftatWzdav359uWV+++23VK9ePedYaq/QkiVLaO7cufTBBx/QsWPHqFmzZtS7d2+aMGECnX322aXSf+yxx2jixIn00EMPUV5envn5U089RQ888ECFVYW+ulrCvffeSx999BG9++67JrHTp0/TE088Qc8//7x5n0+cOGH079u3L+Xm5lLt2rV1VSBg2Vgx9pMnT5pOu3Xr1nTppZeSnzP2oqIi+te//kX9+vWjGjVqBExOmeru3buXvvvuOxP8yy+/pGuvvZaeffZZ6t69u/lvycnJxgBiNfamTZvS448/Xmalzj//fDrrrLNkKhzQqGzO+fn5lJGRQenp6ZSWlkaffPKJMezdu3fTxo0b6bzzziuhc+rUKbrwwgvp+uuvp1dffdW0jZSUFDp06BDt27ev5D4exN16663G/EMX9NXVyCKN/ZFHHqEnn3yS5s2bR127djV9+ubNm+n++++nyy+/3PT3uOQI+Grs/LLPnz+fnn76aTr33HPphRdeoIsvvtg0glmzZtHf//53evvtt81Ij2dsoVE8DwSmTZtmZndfffUVNW/e3Pzst7/9LfFvsfrPf/5Dw4cPpzVr1tDhw4fpoosuovHjx9PgwYPps88+o0GDBtGuXbvovvvuo2HDhtE555wjRzhgkXn0zp35ypUr6eabbz6j9rEaO7cVHiDgkifApn3VVVdRQUEBjRo16oyEjh49SnfccYd533jGFrqWLVtGQ4YMoS+++IJatGhhZnj8jkZeF1xwAfGqT2hGL19bZBBJINLY2czbtWtnjD38YnPnwR7rzH01LhkCvhj7m2++Sb///e/pH//4B/Xv358efPBBuvLKK0tqyMbOI/I//OEP1LlzZ7OMy8b9xhtvmNE9/5a5P/7xj8QdA4/+2Pxvu+02mjp1qimLGxkbOD9Xv359WrFiBd15553Es3XuJPh67733zIiSlw5//vOfm0HBDTfcIEM5QFFh7G6KzWbOA3MerPP7G83Vo0cPuuSSS8zS++jRo80y7jvvvANjjwaesnsijZ1n5suXLzd98DXXXKMsW6TjqbHzvguP4nipjYXnEXzjxo1LUeaOYeTIkWcss7Zs2dIY8Jw5c6hRo0ZmBp+VlVXy7K9//WuzT8uGzct2vKfzyiuvmGVevjh2WUuzvOTHgwQeRNSsWdMs02OJ3r+GX5mx8+CsWrVqpRJg/XjlJXyPfcOGDWVqddddd5lVIFz2CPA7d/Dml+NUAAAENklEQVTgQVq7dm1UQXnW1qZNG9q0aZMZ1G/bts1sw/HefIcOHc4oAzP2qJCK3hRp7AcOHDCDNV5V5f6azZ23VHgCxpM2XLIEPDV23lNj87zllluMcfPsu6yLjZ075qFDh5b8mJ9hk37uuefMoSierXMjCV2/+93vzPL8/v37aevWrWbJjzuanj170k033WSWAn/yk5+UGe+tt96i2bNnm6V/XroPDQZk0bsZvTJjLywspNdee61U5Xnvlldmwo2dB4WseeRVt27dMg9quUlUR6145e2bb76p8EBjeKa8VbZu3TozGA9d3PmzufP2XPgFY9ehcUVZRBp76F6exPEqDGv9+uuvm4lXZmamWV3FJUfAU2Pnanz99dfmxeX/sUHzC84zrPBTkmzszzzzDN19990lNe/VqxfVqVOn5Dk+fPGLX/yi5OfcwU+fPp34tCxfPIgINSZeDuJ9eV7qa9Kkifn5kSNHzGiSl+N51s577fy/sk7uyuF3L3Jlxh7LqXjssetpH7x6xvupe/bsodTU1FKJff/99yWrK9zZ87mW48ePm4F+6OJT9Pzu87kZHpyFLhi7Hp3Ly6Q8Y4+8n8/E8ISNJ19t27bVXzFHM/Tc2EOc2Hj/9re/mU9j3n//fbrnnnvMZy88C+OXmzsK/iwidLVq1Yr69OljlufZnHnGzwfiQhc/z3vo//znP83hOe4YQkvq3GHwATsuj2cWPFrkBsZLfr/5zW/MzD/afUFHdbZWLRi7NdRWA23ZsoU6duxovmzgv+YVfvHhueuuu86clOf3mt95PuHOy/Dhxs6D706dOpnnR4wYAWO3qmDVgoUbOw/Mxo0bRzk5OeZLp/Brx44dxNuqPHvn1VRcMgR8M/bw6mzfvt0cprv99tvNPgybLI/S+ZfU8OG4hQsXmtl7aD+OjZn3xPlwBu/Z86E6Nmfef+fRIM/keBDAh+zY4Hkv9sYbbzQnsXk2wYd82NAjG50M4mBF9dLYK/rcjQeI+FbWbtvigfPDDz9szs7wqecGDRqYvXP+Vp23uHgFjQflvNzOy+6RS+6cLX8qt2rVKnNyGjN2u/pVJVq4sfN5GP5CgvfZeSWVB3zcp/PEi9sHr9p+/PHHVKtWraqExLNVIGDF2MPz40bBh6f4QBsbOs/AuTPgkT53GHzxyJ5nBrzfzvvofKiKD2rwpzN88TLPmDFjzECAl/t4kMAzfF5qxyVLwEtjr+gX1PAvxijr0ynZ2rsfnb9A4QE276WymfN36zxT5xk6b73xIJxPw5d1SI7phA7V8aeqoa9UsBSvv91ELsVzv8ymzlum/PsJiouLzfYLH7LkvhlbnrKaWjd22eoiOgiAAAiAAAi4TQDG7ra+qB0IgAAIgEDACMDYAyY4qgsCIAACIOA2ARi72/qidiAAAiAAAgEjAGMPmOCoLgiAAAiAgNsE/gfr8i7I6452xwAAAABJRU5ErkJggg==)\n", + "\n", + "To train it, run the following code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AtMw7x0ybFlI" + }, + "outputs": [], + "source": [ + "!pip install datasets\n", + "%cd /content/speechbrain/templates/speech_recognition/LM\n", + "!python train.py RNNLM.yaml #--device='cpu'" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b3tnXnrWc2My" + }, + "source": [ + "As evident from the output, both training and validation losses exhibit a consistent decrease over time.\n", + "\n", + "Before delving into the code, let's explore the contents generated within the specified `output_folder`:\n", + "\n", + "* `train_log.txt`: This file comprises statistics (e.g., train_loss, valid_loss) computed at each epoch.\n", + "* `log.txt`: A detailed logger providing timestamps for each fundamental operation.\n", + "* `env.log`: Displays all dependencies used along with their respective versions, facilitating replicability.\n", + "\n", + "* `train.py`, `hyperparams.yaml`: Copies of the experiment file along with corresponding hyperparameters, crucial for ensuring replicability.\n", + "\n", + "* `save`: The repository where the learned model is stored.\n", + "\n", + "Within the `save` folder, subfolders contain checkpoints saved during training, formatted as `CKPT+data+time`. Typically, two checkpoints reside here: the best (i.e., the oldest, representing optimal performance) and the latest (i.e., the most recent). If a single checkpoint is present, it indicates that the last epoch is also the best.\n", + "\n", + "Each checkpoint folder encompasses all information necessary for resuming training, including models, optimizers, schedulers, epoch counters, etc. The parameters of the RNNLM model are stored in the `model.ckpt` file, utilizing a binary format readable with `torch.load`.\n", + "\n", + "The hyperparameters section of the tutorial provides a comprehensive overview of the settings used for training the language model. Here's a refined version of the explanation:\n", + "\n", + "### Hyperparameters\n", + "\n", + "For a detailed look at the complete `RNNLM.yaml` file, please refer to [this link](https://github.com/speechbrain/speechbrain/blob/develop/templates/speech_recognition/LM/RNNLM.yaml).\n", + "\n", + "In the initial section, fundamental configurations such as the random seed, output folder paths, and training logger are defined:\n", + "\n", + "```yaml\n", + "seed: 2602\n", + "__set_seed: !apply:torch.manual_seed [!ref ]\n", + "output_folder: !ref results/RNNLM/\n", + "save_folder: !ref /save\n", + "train_log: !ref /train_log.txt\n", + "```\n", + "\n", + "The subsequent segment outlines the paths for the text corpora used in training, validation, and testing:\n", + "\n", + "```yaml\n", + "lm_train_data: data/train.txt\n", + "lm_valid_data: data/valid.txt\n", + "lm_test_data: data/test.txt\n", + "```\n", + "\n", + "Unlike other recipes, the Language Model (LM) directly processes large raw text corpora without the need for JSON/CSV files, leveraging the [HuggingFace dataset](https://huggingface.co/) for efficiency.\n", + "\n", + "Following this, the setup for the train logger and the specification of the tokenizer (utilizing the one trained in the previous step) are detailed:\n", + "\n", + "```yaml\n", + "train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger\n", + " save_file: !ref \n", + "\n", + "tokenizer_file: ../Tokenizer/save/1000_unigram.model\n", + "```\n", + "\n", + "Moving on, essential training hyperparameters, including epochs, batch size, and learning rate, are defined, along with critical architectural parameters such as embedding dimension, RNN size, layers, and output dimensionality:\n", + "\n", + "```yaml\n", + "number_of_epochs: 20\n", + "batch_size: 80\n", + "lr: 0.001\n", + "accu_steps: 1\n", + "ckpt_interval_minutes: 15\n", + "\n", + "emb_dim: 256\n", + "rnn_size: 512\n", + "layers: 2\n", + "output_neurons: 1000\n", + "```\n", + "\n", + "Subsequently, the objects for training the language model are introduced, encompassing the RNN model, cost function, optimizer, and learning rate scheduler:\n", + "\n", + "```yaml\n", + "model: !new:templates.speech_recognition.LM.custom_model.CustomModel\n", + " embedding_dim: !ref \n", + " rnn_size: !ref \n", + " layers: !ref \n", + "\n", + "compute_cost: !name:speechbrain.nnet.losses.nll_loss\n", + "\n", + "optimizer: !name:torch.optim.Adam\n", + " lr: !ref \n", + " betas: (0.9, 0.98)\n", + " eps: 0.000000001\n", + "\n", + "lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler\n", + " initial_value: !ref \n", + " improvement_threshold: 0.0025\n", + " annealing_factor: 0.8\n", + " patient: 0\n", + "```\n", + "\n", + "The YAML file concludes with the specification of the epoch counter, tokenizer, and checkpointer:\n", + "\n", + "```yaml\n", + "epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter\n", + " limit: !ref \n", + "\n", + "modules:\n", + " model: !ref \n", + "\n", + "tokenizer: !new:sentencepiece.SentencePieceProcessor\n", + "\n", + "checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer\n", + " checkpoints_dir: !ref \n", + " recoverables:\n", + " model: !ref \n", + " scheduler: !ref \n", + " counter: !ref \n", + "\n", + "pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer\n", + " loadables:\n", + " tokenizer: !ref \n", + " paths:\n", + " tokenizer: !ref \n", + "```\n", + "\n", + "The pre-trainer class facilitates the connection between the tokenizer object and the pre-trained tokenizer file.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mnCM5xuy85P4" + }, + "source": [ + "### Experiment file\n", + "Let's now take a look into how the objects, functions, and hyperparameters declared in the yaml file are used in `train.py` to implement the language model.\n", + "\n", + "Let's start from the main of the `train.py`:\n", + "\n", + "\n", + "```python\n", + "# Recipe begins!\n", + "if __name__ == \"__main__\":\n", + "\n", + " # Reading command line arguments\n", + " hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])\n", + "\n", + " # Initialize ddp (useful only for multi-GPU DDP training)\n", + " sb.utils.distributed.ddp_init_group(run_opts)\n", + "\n", + " # Load hyperparameters file with command-line overrides\n", + " with open(hparams_file) as fin:\n", + " hparams = load_hyperpyyaml(fin, overrides)\n", + "\n", + " # Create experiment directory\n", + " sb.create_experiment_directory(\n", + " experiment_directory=hparams[\"output_folder\"],\n", + " hyperparams_to_save=hparams_file,\n", + " overrides=overrides,\n", + " )\n", + "```\n", + "\n", + "We here do some preliminary operations such as parsing the command line, initializing the distributed data-parallel (needed if multiple GPUs are used), creating the output folder, and reading the yaml file.\n", + "\n", + "After reading the yaml file with `load_hyperpyyaml`, all the objects declared in the hyperparameter files are initialized and available in a dictionary form (along with the other functions and parameters reported in the yaml file).\n", + "For instance, we will have `hparams['model']`, `hparams['optimizer']`, `hparams['batch_size']`, etc.\n", + "\n", + "\n", + "#### Data-IO Pipeline\n", + "We then call a special function that creates the dataset objects for training, validation, and test.\n", + "\n", + "```python\n", + " # Create dataset objects \"train\", \"valid\", and \"test\"\n", + " train_data, valid_data, test_data = dataio_prepare(hparams)\n", + "```\n", + "\n", + "Let's take a closer look into that.\n", + "\n", + "\n", + "```python\n", + "def dataio_prepare(hparams):\n", + " \"\"\"This function prepares the datasets to be used in the brain class.\n", + " It also defines the data processing pipeline through user-defined functions.\n", + "\n", + " The language model is trained with the text files specified by the user in\n", + " the hyperparameter file.\n", + "\n", + " Arguments\n", + " ---------\n", + " hparams : dict\n", + " This dictionary is loaded from the `train.yaml` file, and it includes\n", + " all the hyperparameters needed for dataset construction and loading.\n", + "\n", + " Returns\n", + " -------\n", + " datasets : list\n", + " List containing \"train\", \"valid\", and \"test\" sets that correspond\n", + " to the appropriate DynamicItemDataset object.\n", + " \"\"\"\n", + "\n", + " logging.info(\"generating datasets...\")\n", + "\n", + " # Prepare datasets\n", + " datasets = load_dataset(\n", + " \"text\",\n", + " data_files={\n", + " \"train\": hparams[\"lm_train_data\"],\n", + " \"valid\": hparams[\"lm_valid_data\"],\n", + " \"test\": hparams[\"lm_test_data\"],\n", + " },\n", + " )\n", + "\n", + " # Convert huggingface's dataset to DynamicItemDataset via a magical function\n", + " train_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(\n", + " datasets[\"train\"]\n", + " )\n", + " valid_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(\n", + " datasets[\"valid\"]\n", + " )\n", + " test_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(\n", + " datasets[\"test\"]\n", + " )\n", + "\n", + " datasets = [train_data, valid_data, test_data]\n", + " tokenizer = hparams[\"tokenizer\"]\n", + "\n", + " # Define text processing pipeline. We start from the raw text and then\n", + " # encode it using the tokenizer. The tokens with bos are used for feeding\n", + " # the neural network, the tokens with eos for computing the cost function.\n", + " @sb.utils.data_pipeline.takes(\"text\")\n", + " @sb.utils.data_pipeline.provides(\"text\", \"tokens_bos\", \"tokens_eos\")\n", + " def text_pipeline(text):\n", + " yield text\n", + " tokens_list = tokenizer.encode_as_ids(text)\n", + " tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list))\n", + " yield tokens_bos\n", + " tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]])\n", + " yield tokens_eos\n", + "\n", + " sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)\n", + "\n", + " # 4. Set outputs to add into the batch. The batch variable will contain\n", + " # all these fields (e.g, batch.id, batch.text, batch.tokens.bos,..)\n", + " sb.dataio.dataset.set_output_keys(\n", + " datasets, [\"id\", \"text\", \"tokens_bos\", \"tokens_eos\"],\n", + " )\n", + " return train_data, valid_data, test_data\n", + "```\n", + "\n", + "The first part is just a conversion from the HuggingFace dataset to the DynamicItemDataset used in SpeechBrain.\n", + "\n", + "You can notice that we expose the text processing function `text_pipeline`, which takes in input the text of one sentence and processes it in different ways.\n", + "\n", + "The text processing function converts the raw text into the corresponding tokens (in index form). We also create other variables such as the version of the sequence with the beginning of the sentence `` token in front and the one with the end of sentence `` as the last element. Their usefulness will be clear later.\n", + "\n", + "Before returning the dataset objects, the `dataio_prepare` specifies which keys we would like to output. As we will see later, these keys will be available in the brain class as `batch.id`, `batch.text`, `batch.tokens_bos`, etc.\n", + "[For more information on the data loader, please take a look into this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)\n", + "\n", + "\n", + "After the definition of the datasets, the main function can go ahead with the initialization of the brain class:\n", + "\n", + "```python\n", + " # Initialize the Brain object to prepare for LM training.\n", + " lm_brain = LM(\n", + " modules=hparams[\"modules\"],\n", + " opt_class=hparams[\"optimizer\"],\n", + " hparams=hparams,\n", + " run_opts=run_opts,\n", + " checkpointer=hparams[\"checkpointer\"],\n", + " )\n", + "```\n", + "The brain class implements all the functionalities needed for supporting the training and validation loops. Its `fit` and `evaluate` methods perform training and test, respectively:\n", + "\n", + "```python\n", + " lm_brain.fit(\n", + " lm_brain.hparams.epoch_counter,\n", + " train_data,\n", + " valid_data,\n", + " train_loader_kwargs=hparams[\"train_dataloader_opts\"],\n", + " valid_loader_kwargs=hparams[\"valid_dataloader_opts\"],\n", + " )\n", + "\n", + " # Load best checkpoint for evaluation\n", + " test_stats = lm_brain.evaluate(\n", + " test_data,\n", + " min_key=\"loss\",\n", + " test_loader_kwargs=hparams[\"test_dataloader_opts\"],\n", + " )\n", + "```\n", + "The training and validation data loaders are given in input to the fit method, while the test dataset is fed into the evaluate method.\n", + "\n", + "Let's now take a look into the most important methods defined in the brain class.\n", + "\n", + "#### Forward Computations\n", + "\n", + "Let's start with the `forward` function, which defines all the computations needed to transform the input text into the output predictions.\n", + "\n", + "\n", + "```python\n", + " def compute_forward(self, batch, stage):\n", + " \"\"\"Predicts the next word given the previous ones.\n", + "\n", + " Arguments\n", + " ---------\n", + " batch : PaddedBatch\n", + " This batch object contains all the relevant tensors for computation.\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + "\n", + " Returns\n", + " -------\n", + " predictions : torch.Tensor\n", + " A tensor containing the posterior probabilities (predictions).\n", + " \"\"\"\n", + " batch = batch.to(self.device)\n", + " tokens_bos, _ = batch.tokens_bos\n", + " pred = self.hparams.model(tokens_bos)\n", + " return pred\n", + "```\n", + "\n", + "In this case, the chain of computation is very simple. We just put the batch on the right device and feed the encoded tokens into the model. We feed the tokens with `` into the model.\n", + "When adding the `` token, in fact, we shift all the tokens by one element. This way, our input corresponds to the previous token while our model tries to predict the current one.\n", + "\n", + "#### Compute Objectives\n", + "\n", + "Let's take a look now into the `compute_objectives` method that takes in input the targets, the predictions, and estimates a loss function:\n", + "\n", + "```python\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " \"\"\"Computes the loss given the predicted and targeted outputs.\n", + "\n", + " Arguments\n", + " ---------\n", + " predictions : torch.Tensor\n", + " The posterior probabilities from `compute_forward`.\n", + " batch : PaddedBatch\n", + " This batch object contains all the relevant tensors for computation.\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + "\n", + " Returns\n", + " -------\n", + " loss : torch.Tensor\n", + " A one-element tensor used for backpropagating the gradient.\n", + " \"\"\"\n", + " batch = batch.to(self.device)\n", + " tokens_eos, tokens_len = batch.tokens_eos\n", + " loss = self.hparams.compute_cost(\n", + " predictions, tokens_eos, length=tokens_len\n", + " )\n", + " return loss\n", + "```\n", + "The predictions are those computed in the forward method. The cost function is evaluated by comparing these predictions with the target tokens. We here use the tokens with the special `` token at the end because we want to predict when the sentence ends as well.\n", + "\n", + "####**Other methods**\n", + "Beyond these two important functions, we have some other methods that are used by the brain class. In particular, the `fit_batch` trains each batch of data (by computing the gradient with the backward method and the updates with step one). The `on_stage_end`, is called at the end of each stage (e.g, at the end of each training epoch) and mainly takes care of statistic management, learning rate annealing, and checkpointing. [For a more detailed description of the brain class, please take a look into this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html). For more information on checkpointing, [take a look here](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/checkpointing.html)\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tFJ34alleSBH" + }, + "source": [ + "### Step 4: Training the Attention-Based End-to-End Speech Recognizer\n", + "\n", + "Now it's time to train our attention-based end-to-end speech recognizer. This offline recognizer employs a sophisticated architecture, utilizing a combination of convolutional, recurrent, and fully connected models in the encoder, and an autoregressive GRU decoder.\n", + "\n", + "The crucial link between the encoder and decoder is an attention mechanism. To enhance performance, the final sequence of words is obtained through beam search, coupled with the previously trained RNNLM.\n", + "\n", + "#### Architecture Overview:\n", + "- **Encoder:** Combines convolutional, recurrent, and fully connected models.\n", + "- **Decoder:** Autoregressive GRU decoder.\n", + "- **Attention Mechanism:** Enhances information flow between the encoder and decoder.\n", + "- **CTC (Connectionist Temporal Classification):** Jointly trained with the attention-based system, applied on top of the encoder.\n", + "- **Data Augmentation:** Employed techniques to augment data and improve overall system performance.\n", + "\n", + "\n", + "### Train the speech recognizer\n", + "To train the speech recognizer, run the following code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "55c4jnVCeoGa" + }, + "outputs": [], + "source": [ + "%cd /content/speechbrain/templates/speech_recognition/ASR\n", + "!python train.py train.yaml --number_of_epochs=1 --batch_size=2 --enable_add_reverb=False --enable_add_noise=False #To speed up" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "z-JCc1qyXpFI" + }, + "source": [ + "Executing this code may require a considerable amount of time on Google Colab. Monitoring the log, you'll observe a progressive improvement in loss after each epoch.\n", + "\n", + "Similar to the RNNLM section, the specified `output_folder` will include the previously discussed files and folders. Additionally, a file named `wer.txt` is saved, providing a comprehensive report on the Word Error Rate (WER) achieved for each test sentence. This file not only captures the WER values but also includes the alignment information with the true transcription for enhanced analysis:\n", + "\n", + "\n", + "```\n", + "%WER 3.09 [ 1622 / 52576, 167 ins, 171 del, 1284 sub ]\n", + "%SER 33.66 [ 882 / 2620 ]\n", + "Scored 2620 sentences, 0 not present in hyp.\n", + "================================================================================\n", + "ALIGNMENTS\n", + "\n", + "Format:\n", + ", WER DETAILS\n", + " ; reference ; on ; the ; first ; line\n", + " I ; S ; = ; = ; S ; D \n", + " and ; hypothesis ; on ; the ; third ; \n", + "================================================================================\n", + "672-122797-0033, %WER 0.00 [ 0 / 2, 0 ins, 0 del, 0 sub ]\n", + "A ; STORY\n", + "= ; = \n", + "A ; STORY\n", + "================================================================================\n", + "2094-142345-0041, %WER 0.00 [ 0 / 1, 0 ins, 0 del, 0 sub ]\n", + "DIRECTION\n", + " = \n", + "DIRECTION\n", + "================================================================================\n", + "2830-3980-0026, %WER 50.00 [ 1 / 2, 0 ins, 0 del, 1 sub ]\n", + "VERSE ; TWO\n", + " S ; =\n", + "FIRST ; TWO\n", + "================================================================================\n", + "237-134500-0025, %WER 50.00 [ 1 / 2, 0 ins, 0 del, 1 sub ]\n", + "OH ; EMIL\n", + "= ; S \n", + "OH ; AMIEL\n", + "================================================================================\n", + "7127-75947-0012, %WER 0.00 [ 0 / 2, 0 ins, 0 del, 0 sub ]\n", + "INDEED ; AH\n", + " = ; =\n", + "INDEED ; AH\n", + "================================================================================\n", + "\n", + "```\n", + "\n", + "\n", + "\n", + "Let's now take a closer look into the hyperparameter (`train.yaml`) and experiment script (`train.py`).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dfHa8TQMYUle" + }, + "source": [ + "### Hyperparameters\n", + "\n", + "The hyperparameter file starts with the definition of basic things, such as seed and path settings:\n", + "\n", + "```yaml\n", + "# Seed needs to be set at top of yaml, before objects with parameters are instantiated\n", + "seed: 2602\n", + "__set_seed: !apply:torch.manual_seed [!ref ]\n", + "\n", + "# If you plan to train a system on an HPC cluster with a big dataset,\n", + "# we strongly suggest doing the following:\n", + "# 1- Compress the dataset in a single tar or zip file.\n", + "# 2- Copy your dataset locally (i.e., the local disk of the computing node).\n", + "# 3- Uncompress the dataset in the local folder.\n", + "# 4- Set data_folder with the local path\n", + "# Reading data from the local disk of the compute node (e.g. $SLURM_TMPDIR with SLURM-based clusters) is very important.\n", + "# It allows you to read the data much faster without slowing down the shared filesystem.\n", + "\n", + "data_folder: ../data # In this case, data will be automatically downloaded here.\n", + "data_folder_noise: !ref /noise # The noisy sequencies for data augmentation will automatically be downloaded here.\n", + "data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here.\n", + "\n", + "# Data for augmentation\n", + "NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1\n", + "RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1\n", + "\n", + "output_folder: !ref results/CRDNN_BPE_960h_LM/\n", + "test_wer_file: !ref /wer_test.txt\n", + "save_folder: !ref /save\n", + "train_log: !ref /train_log.txt\n", + "\n", + "# Language model (LM) pretraining\n", + "# NB: To avoid mismatch, the speech recognizer must be trained with the same\n", + "# tokenizer used for LM training. Here, we download everything from the\n", + "# speechbrain HuggingFace repository. However, a local path pointing to a\n", + "# directory containing the lm.ckpt and tokenizer.ckpt may also be specified\n", + "# instead. E.g if you want to use your own LM / tokenizer.\n", + "pretrained_path: speechbrain/asr-crdnn-rnnlm-librispeech\n", + "\n", + "\n", + "# Path where data manifest files will be stored. The data manifest files are created by the\n", + "# data preparation script\n", + "train_annotation: ../train.json\n", + "valid_annotation: ../valid.json\n", + "test_annotation: ../test.json\n", + "noise_annotation: ../noise.csv\n", + "rir_annotation: ../rir.csv\n", + "\n", + "skip_prep: False\n", + "\n", + "# The train logger writes training statistics to a file, as well as stdout.\n", + "train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger\n", + " save_file: !ref \n", + "```\n", + "\n", + "The `data_folder` corresponds to the path where the mini-librispeech is stored. If not available, the mini-librispeech dataset will be downloaded here. As mentioned, the script also supports data augmentation. To do it, we use the impulse responses and noise sequences of the open rir dataset (again, if not available it will be downloaded here).\n", + "\n", + "We also specify the folder where the language model is saved. In this case, we use the official pre-trained language model available on HuggingFace, but you can change it and use the one trained at the previous step (you should point to the checkpoint in the folder where the best `model.cpkt` is stored).\n", + "What is important is that the set of tokens used for the LM and the one used for training the speech recognizer match exactly.\n", + "\n", + "We also have to specify the data manifest files for training, validation, and test. If not available, these files will be created by the data preparation script called in `train.py`.\n", + "\n", + "After that, we define a bunch of parameters for training, feature extraction, model definition, and decoding:\n", + "\n", + "```yaml\n", + "# Training parameters\n", + "number_of_epochs: 15\n", + "number_of_ctc_epochs: 5\n", + "batch_size: 8\n", + "lr: 1.0\n", + "ctc_weight: 0.5\n", + "sorting: ascending\n", + "ckpt_interval_minutes: 15 # save checkpoint every N min\n", + "label_smoothing: 0.1\n", + "\n", + "# Dataloader options\n", + "train_dataloader_opts:\n", + " batch_size: !ref \n", + "\n", + "valid_dataloader_opts:\n", + " batch_size: !ref \n", + "\n", + "test_dataloader_opts:\n", + " batch_size: !ref \n", + "\n", + "\n", + "# Feature parameters\n", + "sample_rate: 16000\n", + "n_fft: 400\n", + "n_mels: 40\n", + "\n", + "# Model parameters\n", + "activation: !name:torch.nn.LeakyReLU\n", + "dropout: 0.15\n", + "cnn_blocks: 2\n", + "cnn_channels: (128, 256)\n", + "inter_layer_pooling_size: (2, 2)\n", + "cnn_kernelsize: (3, 3)\n", + "time_pooling_size: 4\n", + "rnn_class: !name:speechbrain.nnet.RNN.LSTM\n", + "rnn_layers: 4\n", + "rnn_neurons: 1024\n", + "rnn_bidirectional: True\n", + "dnn_blocks: 2\n", + "dnn_neurons: 512\n", + "emb_size: 128\n", + "dec_neurons: 1024\n", + "output_neurons: 1000 # Number of tokens (same as LM)\n", + "blank_index: 0\n", + "bos_index: 0\n", + "eos_index: 0\n", + "unk_index: 0\n", + "\n", + "# Decoding parameters\n", + "min_decode_ratio: 0.0\n", + "max_decode_ratio: 1.0\n", + "valid_beam_size: 8\n", + "test_beam_size: 80\n", + "eos_threshold: 1.5\n", + "using_max_attn_shift: True\n", + "max_attn_shift: 240\n", + "lm_weight: 0.50\n", + "ctc_weight_decode: 0.0\n", + "coverage_penalty: 1.5\n", + "temperature: 1.25\n", + "temperature_lm: 1.25\n", + "```\n", + "\n", + "For instance, we define the number of epochs, the initial learning rate, the batch size, the weight of the CTC loss, and many others.\n", + "\n", + "By setting sorting to `ascending`, we sort all the sentences in ascending order before creating the batches. This minimizes the need for zero paddings and thus makes training faster without losing performance (at least in this task with this model).\n", + "\n", + "Many other parameters, such as those for data augmentations, are defined. For the exact meaning of all of them, you can refer to the docstring of the function/class using this hyperparameter.\n", + "\n", + "In the next block, we define the most important classes that are needed to implement the speech recognizer:\n", + "\n", + "\n", + "```yaml\n", + "# The first object passed to the Brain class is this \"Epoch Counter\"\n", + "# which is saved by the Checkpointer so that training can be resumed\n", + "# if it gets interrupted at any point.\n", + "epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter\n", + " limit: !ref \n", + "\n", + "# Feature extraction\n", + "compute_features: !new:speechbrain.lobes.features.Fbank\n", + " sample_rate: !ref \n", + " n_fft: !ref \n", + " n_mels: !ref \n", + "\n", + "# Feature normalization (mean and std)\n", + "normalize: !new:speechbrain.processing.features.InputNormalization\n", + " norm_type: global\n", + "\n", + "# Added noise and reverb come from OpenRIR dataset, automatically\n", + "# downloaded and prepared with this Environmental Corruption class.\n", + "env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt\n", + " openrir_folder: !ref \n", + " babble_prob: 0.0\n", + " reverb_prob: 0.0\n", + " noise_prob: 1.0\n", + " noise_snr_low: 0\n", + " noise_snr_high: 15\n", + "\n", + "# Adds speech change + time and frequnecy dropouts (time-domain implementation).\n", + "augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment\n", + " sample_rate: !ref \n", + " speeds: [95, 100, 105]\n", + "\n", + "# The CRDNN model is an encoder that combines CNNs, RNNs, and DNNs.\n", + "encoder: !new:speechbrain.lobes.models.CRDNN.CRDNN\n", + " input_shape: [null, null, !ref ]\n", + " activation: !ref \n", + " dropout: !ref \n", + " cnn_blocks: !ref \n", + " cnn_channels: !ref \n", + " cnn_kernelsize: !ref \n", + " inter_layer_pooling_size: !ref \n", + " time_pooling: True\n", + " using_2d_pooling: False\n", + " time_pooling_size: !ref \n", + " rnn_class: !ref \n", + " rnn_layers: !ref \n", + " rnn_neurons: !ref \n", + " rnn_bidirectional: !ref \n", + " rnn_re_init: True\n", + " dnn_blocks: !ref \n", + " dnn_neurons: !ref \n", + " use_rnnp: False\n", + "\n", + "# Embedding (from indexes to an embedding space of dimension emb_size).\n", + "embedding: !new:speechbrain.nnet.embedding.Embedding\n", + " num_embeddings: !ref \n", + " embedding_dim: !ref \n", + "\n", + "# Attention-based RNN decoder.\n", + "decoder: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder\n", + " enc_dim: !ref \n", + " input_size: !ref \n", + " rnn_type: gru\n", + " attn_type: location\n", + " hidden_size: !ref \n", + " attn_dim: 1024\n", + " num_layers: 1\n", + " scaling: 1.0\n", + " channels: 10\n", + " kernel_size: 100\n", + " re_init: True\n", + " dropout: !ref \n", + "\n", + "# Linear transformation on the top of the encoder.\n", + "ctc_lin: !new:speechbrain.nnet.linear.Linear\n", + " input_size: !ref \n", + " n_neurons: !ref \n", + "\n", + "# Linear transformation on the top of the decoder.\n", + "seq_lin: !new:speechbrain.nnet.linear.Linear\n", + " input_size: !ref \n", + " n_neurons: !ref \n", + "\n", + "# Final softmax (for log posteriors computation).\n", + "log_softmax: !new:speechbrain.nnet.activations.Softmax\n", + " apply_log: True\n", + "\n", + "# Cost definition for the CTC part.\n", + "ctc_cost: !name:speechbrain.nnet.losses.ctc_loss\n", + " blank_index: !ref \n", + "\n", + "# Tokenizer initialization\n", + "tokenizer: !new:sentencepiece.SentencePieceProcessor\n", + "\n", + "# Objects in \"modules\" dict will have their parameters moved to the correct\n", + "# device, as well as having train()/eval() called on them by the Brain class\n", + "modules:\n", + " encoder: !ref \n", + " embedding: !ref \n", + " decoder: !ref \n", + " ctc_lin: !ref \n", + " seq_lin: !ref \n", + " normalize: !ref \n", + " env_corrupt: !ref \n", + " lm_model: !ref \n", + "\n", + "# Gathering all the submodels in a single model object.\n", + "model: !new:torch.nn.ModuleList\n", + " - - !ref \n", + " - !ref \n", + " - !ref \n", + " - !ref \n", + " - !ref \n", + "\n", + "# This is the RNNLM that is used according to the Huggingface repository\n", + "# NB: It has to match the pre-trained RNNLM!!\n", + "lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM\n", + " output_neurons: !ref \n", + " embedding_dim: !ref \n", + " activation: !name:torch.nn.LeakyReLU\n", + " dropout: 0.0\n", + " rnn_layers: 2\n", + " rnn_neurons: 2048\n", + " dnn_blocks: 1\n", + " dnn_neurons: 512\n", + " return_hidden: True # For inference\n", + "```\n", + "\n", + "For instance, we define the function for computing features and normalizing them. We define the class for environmental corruption and data augmentation ([please, see this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-augmentation.html)), the architecture of the encoder, decoder, and the other models need by the speech recognizer.\n", + "\n", + "\n", + "We then report the parameters for beasearch:\n", + "\n", + "```yaml\n", + "# Define scorers for beam search\n", + "\n", + "# If ctc_scorer is set, the decoder uses CTC + attention beamsearch. This\n", + "# improves the performance, but slows down decoding.\n", + "ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer\n", + " eos_index: !ref \n", + " blank_index: !ref \n", + " ctc_fc: !ref \n", + "\n", + "# If coverage_scorer is set, coverage penalty is applied based on accumulated\n", + "# attention weights during beamsearch.\n", + "coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer\n", + " vocab_size: !ref \n", + "\n", + "# If the lm_scorer is set, a language model\n", + "# is applied (with a weight specified in scorer).\n", + "rnnlm_scorer: !new:speechbrain.decoders.scorer.RNNLMScorer\n", + " language_model: !ref \n", + " temperature: !ref \n", + "\n", + "# Gathering all scorers in a scorer instance for beamsearch:\n", + "# - full_scorers are scorers which score on full vocab set, while partial_scorers\n", + "# are scorers which score on pruned tokens.\n", + "# - The number of pruned tokens is decided by scorer_beam_scale * beam_size.\n", + "# - For some scorers like ctc_scorer, ngramlm_scorer, putting them\n", + "# into full_scorers list would be too heavy. partial_scorers are more\n", + "# efficient because they score on pruned tokens at little cost of\n", + "# performance drop. For other scorers, please see the speechbrain.decoders.scorer.\n", + "test_scorer: !new:speechbrain.decoders.scorer.ScorerBuilder\n", + " scorer_beam_scale: 1.5\n", + " full_scorers: [\n", + " !ref ,\n", + " !ref ]\n", + " partial_scorers: [!ref ]\n", + " weights:\n", + " rnnlm: !ref \n", + " coverage: !ref \n", + " ctc: !ref \n", + "\n", + "valid_scorer: !new:speechbrain.decoders.scorer.ScorerBuilder\n", + " full_scorers: [!ref ]\n", + " weights:\n", + " coverage: !ref \n", + "\n", + "# Beamsearch is applied on the top of the decoder. For a description of\n", + "# the other parameters, please see the speechbrain.decoders.S2SRNNBeamSearcher.\n", + "\n", + "# It makes sense to have a lighter search during validation. In this case,\n", + "# we don't use scorers during decoding.\n", + "valid_search: !new:speechbrain.decoders.S2SRNNBeamSearcher\n", + " embedding: !ref \n", + " decoder: !ref \n", + " linear: !ref \n", + " bos_index: !ref \n", + " eos_index: !ref \n", + " min_decode_ratio: !ref \n", + " max_decode_ratio: !ref \n", + " beam_size: !ref \n", + " eos_threshold: !ref \n", + " using_max_attn_shift: !ref \n", + " max_attn_shift: !ref \n", + " temperature: !ref \n", + " scorer: !ref \n", + "\n", + "# The final decoding on the test set can be more computationally demanding.\n", + "# In this case, we use the LM + CTC probabilities during decoding as well,\n", + "# which are defined in scorer.\n", + "# Please, remove scorer if you need a faster decoder.\n", + "test_search: !new:speechbrain.decoders.S2SRNNBeamSearcher\n", + " embedding: !ref \n", + " decoder: !ref \n", + " linear: !ref \n", + " bos_index: !ref \n", + " eos_index: !ref \n", + " min_decode_ratio: !ref \n", + " max_decode_ratio: !ref \n", + " beam_size: !ref \n", + " eos_threshold: !ref \n", + " using_max_attn_shift: !ref \n", + " max_attn_shift: !ref \n", + " temperature: !ref \n", + " scorer: !ref \n", + "```\n", + "We here employ different hyperparameters for validation and test beamsearch. In particular, a smaller beam size is used for the validation stage. The reason is that validation is done at the end of each epoch and should thus be done quickly. Evaluation, instead, is done only once at the end and we can be more accurate.\n", + "\n", + "\n", + "Finally, we declare the last objects needed by the training recipes, such as lr_annealing, optimizer, checkpointer, etc:\n", + "\n", + "\n", + "```yaml\n", + " This function manages learning rate annealing over the epochs.\n", + "# We here use the NewBoB algorithm, that anneals the learning rate if\n", + "# the improvements over two consecutive epochs is less than the defined\n", + "# threshold.\n", + "lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler\n", + " initial_value: !ref \n", + " improvement_threshold: 0.0025\n", + " annealing_factor: 0.8\n", + " patient: 0\n", + "\n", + "# This optimizer will be constructed by the Brain class after all parameters\n", + "# are moved to the correct device. Then it will be added to the checkpointer.\n", + "opt_class: !name:torch.optim.Adadelta\n", + " lr: !ref \n", + " rho: 0.95\n", + " eps: 1.e-8\n", + "\n", + "# Functions that compute the statistics to track during the validation step.\n", + "error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats\n", + "\n", + "cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats\n", + " split_tokens: True\n", + "\n", + "# This object is used for saving the state of training both so that it\n", + "# can be resumed if it gets interrupted, and also so that the best checkpoint\n", + "# can be later loaded for evaluation or inference.\n", + "checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer\n", + " checkpoints_dir: !ref \n", + " recoverables:\n", + " model: !ref \n", + " scheduler: !ref \n", + " normalizer: !ref \n", + " counter: !ref \n", + "\n", + "# This object is used to pretrain the language model and the tokenizers\n", + "# (defined above). In this case, we also pretrain the ASR model (to make\n", + "# sure the model converges on a small amount of data)\n", + "pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer\n", + " collect_in: !ref \n", + " loadables:\n", + " lm: !ref \n", + " tokenizer: !ref \n", + " model: !ref \n", + " paths:\n", + " lm: !ref /lm.ckpt\n", + " tokenizer: !ref /tokenizer.ckpt\n", + " model: !ref /asr.ckpt\n", + "```\n", + "\n", + "The final object is the pretrainer that links the language model, the tokenizer, and the acoustic speech recognition model with their corresponding files used for pre-training. We here pre-train the acoustic model as well. One such a small dataset, it is very hard to make an end-to-end speech recognizer converging and we thus use another model to pre-trained it (you should skip this part when training on a larger dataset)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6xcAJ4OlYZCh" + }, + "source": [ + "### Experiment file\n", + "Let's now see how the different elements declared in the yaml files are connected in the train.py.\n", + "The training script closely follows the one already described for the language model.\n", + "\n", + "The `main` function starts with the implementation of basic functionalities such as parsing the command line, initializing the distributed data-parallel (needed for multiple GPU training), and reading the yaml file.\n", + "\n", + "\n", + "\n", + "```python\n", + "\n", + "if __name__ == \"__main__\":\n", + "\n", + " # Reading command line arguments\n", + " hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])\n", + "\n", + " # Initialize ddp (useful only for multi-GPU DDP training)\n", + " sb.utils.distributed.ddp_init_group(run_opts)\n", + "\n", + " # Load hyperparameters file with command-line overrides\n", + " with open(hparams_file) as fin:\n", + " hparams = load_hyperpyyaml(fin, overrides)\n", + "\n", + " # Create experiment directory\n", + " sb.create_experiment_directory(\n", + " experiment_directory=hparams[\"output_folder\"],\n", + " hyperparams_to_save=hparams_file,\n", + " overrides=overrides,\n", + " )\n", + "\n", + " # Data preparation, to be run on only one process.\n", + " if not hparams[\"skip_prep\"]:\n", + " sb.utils.distributed.run_on_main(\n", + " prepare_mini_librispeech,\n", + " kwargs={\n", + " \"data_folder\": hparams[\"data_folder\"],\n", + " \"save_json_train\": hparams[\"train_annotation\"],\n", + " \"save_json_valid\": hparams[\"valid_annotation\"],\n", + " \"save_json_test\": hparams[\"test_annotation\"],\n", + " },\n", + " )\n", + " sb.utils.distributed.run_on_main(hparams[\"prepare_noise_data\"])\n", + " sb.utils.distributed.run_on_main(hparams[\"prepare_rir_data\"])\n", + "\n", + "```\n", + "The yaml file is read with the `load_hyperpyyaml` function. After reading it, we will have all the declared object initialized and available with the hparams dictionary along with the other functions and variables (e.g, `hparams['model']`, `hparams['test_search']`,`hparams['batch_size']`).\n", + "\n", + "After that, we run the data preparation that has the goal of creating the data manifest file (if not already available). This operation requires writing some files on a disk. For this reason, we have to use the `sb.utils.distributed.run_on_main` to make sure that this operation is executed by the main process only. This avoids possible conflicts when using multiple GPUs with DDP. For more info on multi-gpu training in Speechbrai, [please see this tutorial](https://speechbrain.readthedocs.io/en/latest/multigpu.html).\n", + "\n", + "#### Data-IO Pipeline\n", + "At this point, we can create the dataset object that we will use for training, validation, and test loops:\n", + "\n", + "```python\n", + " # We can now directly create the datasets for training, valid, and test\n", + " datasets = dataio_prepare(hparams)\n", + "```\n", + "\n", + "This function allows users to fully customize the data reading pipeline. Let's take a closer look into it:\n", + "\n", + "```python\n", + "def dataio_prepare(hparams):\n", + " \"\"\"This function prepares the datasets to be used in the brain class.\n", + " It also defines the data processing pipeline through user-defined functions.\n", + "\n", + "\n", + " Arguments\n", + " ---------\n", + " hparams : dict\n", + " This dictionary is loaded from the `train.yaml` file, and it includes\n", + " all the hyperparameters needed for dataset construction and loading.\n", + "\n", + " Returns\n", + " -------\n", + " datasets : dict\n", + " Dictionary containing \"train\", \"valid\", and \"test\" keys that correspond\n", + " to the DynamicItemDataset objects.\n", + " \"\"\"\n", + " # Define audio pipeline. In this case, we simply read the path contained\n", + " # in the variable wav with the audio reader.\n", + " @sb.utils.data_pipeline.takes(\"wav\")\n", + " @sb.utils.data_pipeline.provides(\"sig\")\n", + " def audio_pipeline(wav):\n", + " \"\"\"Load the audio signal. This is done on the CPU in the `collate_fn`.\"\"\"\n", + " sig = sb.dataio.dataio.read_audio(wav)\n", + " return sig\n", + "\n", + " # Define text processing pipeline. We start from the raw text and then\n", + " # encode it using the tokenizer. The tokens with BOS are used for feeding\n", + " # decoder during training, the tokens with EOS for computing the cost function.\n", + " # The tokens without BOS or EOS is for computing CTC loss.\n", + " @sb.utils.data_pipeline.takes(\"words\")\n", + " @sb.utils.data_pipeline.provides(\n", + " \"words\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\"\n", + " )\n", + " def text_pipeline(words):\n", + " \"\"\"Processes the transcriptions to generate proper labels\"\"\"\n", + " yield words\n", + " tokens_list = hparams[\"tokenizer\"].encode_as_ids(words)\n", + " yield tokens_list\n", + " tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list))\n", + " yield tokens_bos\n", + " tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]])\n", + " yield tokens_eos\n", + " tokens = torch.LongTensor(tokens_list)\n", + " yield tokens\n", + "\n", + " # Define datasets from json data manifest file\n", + " # Define datasets sorted by ascending lengths for efficiency\n", + " datasets = {}\n", + " data_folder = hparams[\"data_folder\"]\n", + " for dataset in [\"train\", \"valid\", \"test\"]:\n", + " datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(\n", + " json_path=hparams[f\"{dataset}_annotation\"],\n", + " replacements={\"data_root\": data_folder},\n", + " dynamic_items=[audio_pipeline, text_pipeline],\n", + " output_keys=[\n", + " \"id\",\n", + " \"sig\",\n", + " \"words\",\n", + " \"tokens_bos\",\n", + " \"tokens_eos\",\n", + " \"tokens\",\n", + " ],\n", + " )\n", + " hparams[f\"{dataset}_dataloader_opts\"][\"shuffle\"] = False\n", + "\n", + " # Sorting traiing data with ascending order makes the code much\n", + " # faster because we minimize zero-padding. In most of the cases, this\n", + " # does not harm the performance.\n", + " if hparams[\"sorting\"] == \"ascending\":\n", + " datasets[\"train\"] = datasets[\"train\"].filtered_sorted(sort_key=\"length\")\n", + " hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n", + "\n", + " elif hparams[\"sorting\"] == \"descending\":\n", + " datasets[\"train\"] = datasets[\"train\"].filtered_sorted(\n", + " sort_key=\"length\", reverse=True\n", + " )\n", + " hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n", + "\n", + " elif hparams[\"sorting\"] == \"random\":\n", + " hparams[\"train_dataloader_opts\"][\"shuffle\"] = True\n", + " pass\n", + "\n", + " else:\n", + " raise NotImplementedError(\n", + " \"sorting must be random, ascending or descending\"\n", + " )\n", + " return datasets\n", + "```\n", + "\n", + "Within `dataio_prepare` we define subfunctions for processing the entries defined in the JSON files.\n", + "The first function, called `audio_pipeline` takes the path of the audio signal (`wav`) and reads it. It returns a tensor containing the read speech sentence. The entry in input to this function (i.e, `wav`) must have the same name of the corresponding key in the data manifest file:\n", + "\n", + "```json\n", + " \"1867-154075-0032\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/1867/154075/1867-154075-0032.flac\",\n", + " \"length\": 16.09,\n", + " \"words\": \"AND HE BRUSHED A HAND ACROSS HIS FOREHEAD AND WAS INSTANTLY HIMSELF CALM AND COOL VERY WELL THEN IT SEEMS I'VE MADE AN ASS OF MYSELF BUT I'LL TRY TO MAKE UP FOR IT NOW WHAT ABOUT CAROLINE\"\n", + " },\n", + "```\n", + "\n", + "Similarly, we define another function called `text_pipeline` for processing the signal transcriptions and put them in a format usable by the defined model. The function reads the string `words` defined in the JSON file and tokenizes it (outputting the index of each token). It return the sequence of tokens with the special begin-of-sentence `` token in front, and the version with the end-of-sentence `` token at the end aswell. We will see later why these additional elements are needed.\n", + "\n", + "We then create the `DynamicItemDataset` and connect it with the processing functions defined above. We define the desired output keys. These keys will be available in the brain class within the batch variable as:\n", + "- batch.id\n", + "- batch.sig\n", + "- batch.words\n", + "- batch.tokens_bos\n", + "- batch.tokens_eos\n", + "- batch.tokens\n", + "\n", + "The last part of the `dataio_prepare` function manages data sorting. In this case, we sort data in ascending order to minimize zero paddings and speeding training up. For more information on the dataloaders, [please see this tutorial](https://colab.research.google.com/drive/1AiVJZhZKwEI4nFGANKXEe-ffZFfvXKwH?usp=sharing)\n", + "\n", + "\n", + "After the definition of the dataio function, we perform pre-training of the language model, ASR model, and tokenizer:\n", + "\n", + "\n", + "```python\n", + " run_on_main(hparams[\"pretrainer\"].collect_files)\n", + " hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"])\n", + "```\n", + "We here use the `run_on_main` wrapper because the ` collect_files` method might need to download the pre-trained model from the web. This operation should be done by a single process only even when using multiple GPUs with DDP).\n", + "\n", + "At this point we initialize the Brain class and use it for running training and evaluation:\n", + "\n", + "\n", + "```python\n", + "\n", + " # Trainer initialization\n", + " asr_brain = ASR(\n", + " modules=hparams[\"modules\"],\n", + " opt_class=hparams[\"opt_class\"],\n", + " hparams=hparams,\n", + " run_opts=run_opts,\n", + " checkpointer=hparams[\"checkpointer\"],\n", + " )\n", + "\n", + " # Training\n", + " asr_brain.fit(\n", + " asr_brain.hparams.epoch_counter,\n", + " datasets[\"train\"],\n", + " datasets[\"valid\"],\n", + " train_loader_kwargs=hparams[\"train_dataloader_opts\"],\n", + " valid_loader_kwargs=hparams[\"valid_dataloader_opts\"],\n", + " )\n", + "\n", + " # Load best checkpoint for evaluation\n", + " test_stats = asr_brain.evaluate(\n", + " test_set=datasets[\"test\"],\n", + " min_key=\"WER\",\n", + " test_loader_kwargs=hparams[\"test_dataloader_opts\"],\n", + " )\n", + "```\n", + "\n", + "For more information on how the Brain class works, [please see this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html)\n", + "Note that the `fit` and `evaluate` methods take in input the dataset objects as well. From this dataset, a pytorch dataloader is created automatically. The latter creates the batches used for training and evaluation.\n", + "\n", + "When speech sentences with **different lengths** are sampled, zero-padding is performed. To keep track of the real length of each sentence within each batch, the dataloader returns a special tensor containing **relative lengths** as well. For instance, let's assume `batch.sig[0]` to be variable that contains the input waveform as a [batch, time] tensor:\n", + "\n", + "```\n", + "tensor([[1, 1, 0, 0],\n", + " [1, 1, 1, 0],\n", + " [1, 1, 0, 0]])\n", + "```\n", + "The `batch.sig[1]` will contain the following relative lengths:\n", + "\n", + "```\n", + "tensor([0.5000, 0.7500, 1.0000])\n", + "```\n", + "\n", + "With this information, we can exclude zero-padded steps from some computations (e.g feature normalization, statistical pooling, loss, etc).\n", + "\n", + "### Why relative lengths instead of absolute lengths?\n", + "\n", + "The preference for relative lengths over absolute lengths stems from the dynamic nature of time resolution within a neural network. Several operations, including pooling, stride convolution, transposed convolution, FFT computation, and others, have the potential to alter the number of time steps in a sequence.\n", + "\n", + "By employing the relative position trick, the calculation of actual time steps at each stage of neural computations becomes more flexible. This is achieved by multiplying the relative length by the total length of the tensor. Consequently, the approach adapts to changes in time resolution introduced by various network operations, ensuring a more robust and adaptable representation of temporal information throughout the neural network's computations.\n", + "\n", + "\n", + "#### Forward Computations\n", + "In the Brain class we have to define some important methods such as:\n", + "- `compute_forward`, that specifies all the computations needed to transform the input waveform into the output posterior probabilities)\n", + "- `compute_objective`, which computes the loss function given the labels and the predictions performed by the model.\n", + "\n", + "Let's take a look into `compute_forward` first:\n", + "\n", + "\n", + "```python\n", + " def compute_forward(self, batch, stage):\n", + " \"\"\"Runs all the computation of the CTC + seq2seq ASR. It returns the\n", + " posterior probabilities of the CTC and seq2seq networks.\n", + "\n", + " Arguments\n", + " ---------\n", + " batch : PaddedBatch\n", + " This batch object contains all the relevant tensors for computation.\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + "\n", + " Returns\n", + " -------\n", + " predictions : dict\n", + " At training time it returns predicted seq2seq log probabilities.\n", + " If needed it also returns the ctc output log probabilities.\n", + " At validation/test time, it returns the predicted tokens as well.\n", + " \"\"\"\n", + " # We first move the batch to the appropriate device.\n", + " batch = batch.to(self.device)\n", + "\n", + " feats, self.feat_lens = self.prepare_features(stage, batch.sig)\n", + " tokens_bos, _ = self.prepare_tokens(stage, batch.tokens_bos)\n", + "\n", + " # Running the encoder (prevent propagation to feature extraction)\n", + " encoded_signal = self.modules.encoder(feats.detach())\n", + "\n", + " # Embed tokens and pass tokens & encoded signal to decoder\n", + " embedded_tokens = self.modules.embedding(tokens_bos.detach())\n", + " decoder_outputs, _ = self.modules.decoder(\n", + " embedded_tokens, encoded_signal, self.feat_lens\n", + " )\n", + "\n", + " # Output layer for seq2seq log-probabilities\n", + " logits = self.modules.seq_lin(decoder_outputs)\n", + " predictions = {\"seq_logprobs\": self.hparams.log_softmax(logits)}\n", + "\n", + " if self.is_ctc_active(stage):\n", + " # Output layer for ctc log-probabilities\n", + " ctc_logits = self.modules.ctc_lin(encoded_signal)\n", + " predictions[\"ctc_logprobs\"] = self.hparams.log_softmax(ctc_logits)\n", + "\n", + " elif stage != sb.Stage.TRAIN:\n", + " if stage == sb.Stage.VALID:\n", + " hyps, _, _, _ = self.hparams.valid_search(\n", + " encoded_signal, self.feat_lens\n", + " )\n", + " elif stage == sb.Stage.TEST:\n", + " hyps, _, _, _ = self.hparams.test_search(\n", + " encoded_signal, self.feat_lens\n", + " )\n", + "\n", + " predictions[\"tokens\"] = hyps\n", + "\n", + " return predictions\n", + "```\n", + "\n", + "\n", + "The function takes the batch variable and the current stage (that can be `sb.Stage.TRAIN`, `sb.Stage.VALID`, or `sb.Stage.TEST`). We then put the batch on the right device, compute the features, and encode them with our CRDNN encoder.\n", + "For more information on feature computation, [take a look into this tutorial](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-features.html), while for more details on the speech augmentation [take a look here](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-augmentation.html).\n", + "After that, we feed our encoded states into an autoregressive attention-based decoder that performs some predictions over the tokens.\n", + "At validation and test stages, we apply beamsearch on the top of the token predictions.\n", + "Our system applies an additional CTC loss on the top of the encoder. The CTC can be turned off after N epochs if desired.\n", + "\n", + "\n", + "#### Compute Objectives\n", + "\n", + "Let's take a look now into the compute_objectives function:\n", + "\n", + "\n", + "\n", + "```python\n", + "\n", + " def compute_objectives(self, predictions, batch, stage):\n", + " \"\"\"Computes the loss given the predicted and targeted outputs. We here\n", + " do multi-task learning and the loss is a weighted sum of the ctc + seq2seq\n", + " costs.\n", + "\n", + " Arguments\n", + " ---------\n", + " predictions : dict\n", + " The output dict from `compute_forward`.\n", + " batch : PaddedBatch\n", + " This batch object contains all the relevant tensors for computation.\n", + " stage : sb.Stage\n", + " One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.\n", + "\n", + " Returns\n", + " -------\n", + " loss : torch.Tensor\n", + " A one-element tensor used for backpropagating the gradient.\n", + " \"\"\"\n", + "\n", + " # Compute sequence loss against targets with EOS\n", + " tokens_eos, tokens_eos_lens = self.prepare_tokens(\n", + " stage, batch.tokens_eos\n", + " )\n", + " loss = sb.nnet.losses.nll_loss(\n", + " log_probabilities=predictions[\"seq_logprobs\"],\n", + " targets=tokens_eos,\n", + " length=tokens_eos_lens,\n", + " label_smoothing=self.hparams.label_smoothing,\n", + " )\n", + "\n", + " # Add ctc loss if necessary. The total cost is a weighted sum of\n", + " # ctc loss + seq2seq loss\n", + " if self.is_ctc_active(stage):\n", + " # Load tokens without EOS as CTC targets\n", + " tokens, tokens_lens = self.prepare_tokens(stage, batch.tokens)\n", + " loss_ctc = self.hparams.ctc_cost(\n", + " predictions[\"ctc_logprobs\"], tokens, self.feat_lens, tokens_lens\n", + " )\n", + " loss *= 1 - self.hparams.ctc_weight\n", + " loss += self.hparams.ctc_weight * loss_ctc\n", + "\n", + " if stage != sb.Stage.TRAIN:\n", + " # Converted predicted tokens from indexes to words\n", + " predicted_words = [\n", + " self.hparams.tokenizer.decode_ids(prediction).split(\" \")\n", + " for prediction in predictions[\"tokens\"]\n", + " ]\n", + " target_words = [words.split(\" \") for words in batch.words]\n", + "\n", + " # Monitor word error rate and character error rated at\n", + " # valid and test time.\n", + " self.wer_metric.append(batch.id, predicted_words, target_words)\n", + " self.cer_metric.append(batch.id, predicted_words, target_words)\n", + "\n", + " return loss\n", + "```\n", + "\n", + "Based on the predictions and the target we compute the Negative Log Likelihood loss (NLL) and, if needed, the Connectionist Temporal Classification (CTC) one as well. The two losses are combined with a weight (ctc_weight). At validation or test stages, we compute the word-error-rate (WER) and the character-error-rate (CER).\n", + "\n", + "### Other Methods\n", + "In addition to the primary functions `forward` and `compute_objective`, the code includes `on_stage_start` and `on_stage_end` functions. The former initializes statistic objects, such as Word Error Rate (WER) and Character Error Rate (CER). The latter oversees several critical aspects:\n", + "\n", + "- **Statistics Updates:** Manages the updating of statistics during training.\n", + "- **Learning Rate Annealing:** Handles the adjustment of learning rates over epochs.\n", + "- **Logging:** Facilitates logging of crucial information during the training process.\n", + "- **Checkpointing:** Manages the creation and storage of checkpoints for resumable training.\n", + "\n", + "By incorporating these functions, the code ensures a comprehensive and efficient training pipeline for the speech recognition system.\n", + "\n", + "\n", + "That's all. You can just run the code and train your speech recognizer.\n", + "\n", + "\n", + "## Pretrain and Fine-tune\n", + "\n", + "In scenarios where training from scratch might not be the optimal choice, the option to begin with a pre-trained model and fine-tune it becomes valuable.\n", + "\n", + "It's crucial to note that for this approach to work seamlessly, the architecture of your model must precisely match that of the pre-trained model.\n", + "\n", + "One convenient way to implement this is by utilizing the `pretrainer` class in the YAML file. If you aim to pretrain the encoder of the speech recognizer, the following code snippet can be employed:\n", + "\n", + "```yaml\n", + "pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer\n", + " loadables:\n", + " encoder: !ref \n", + " paths:\n", + " encoder: !ref \n", + "```\n", + "\n", + "Here, `!ref ` points to the encoder model defined earlier in the YAML file, while `encoder_ptfile` denotes the path where the pre-trained model is stored.\n", + "\n", + "To execute the pre-training process, ensure that you call the pre-trainer in the `train.py` file:\n", + "\n", + "```python\n", + "run_on_main(hparams[\"pretrainer\"].collect_files)\n", + "hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"])\n", + "```\n", + "\n", + "It's essential to invoke this function before the `fit` method of the Brain class.\n", + "\n", + "For a more comprehensive understanding and practical examples, please refer to our [tutorial on pre-training and fine-tuning](https://speechbrain.readthedocs.io/en/latest/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.html). This resource provides detailed insights into leveraging pre-trained models effectively in your speech recognition system.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4LnRq1_cpPXZ" + }, + "source": [ + "## Step 5: Inference\n", + "\n", + "At this point, we can use the trained speech recognizer. For this type of ASR model, speechbrain made available some classes ([take a look here](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/inference/ASR.py)) such as the `EncoderDecoderASR` one that can make inference easier. For instance, we can transcribe an audio file with a pre-trained model hosted in our [HuggingFace repository](https://huggingface.co/speechbrain) in solely 4 lines of code:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uvvY0dCbx5Sv" + }, + "outputs": [], + "source": [ + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "\n", + "asr_model = EncoderDecoderASR.from_hparams(source=\"speechbrain/asr-crdnn-rnnlm-librispeech\", savedir=\"/content/pretrained_model\")\n", + "audio_file = 'speechbrain/asr-crdnn-rnnlm-librispeech/example.wav'\n", + "asr_model.transcribe_file(audio_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2Dyv9x10gGzV" + }, + "source": [ + "But, how does this work with your custom ASR system?\n", + "\n", + "### Utilizing Your Custom Speech Recognizer\n", + "\n", + "At this point, you have two options for training and deploying your speech recognizer on your data:\n", + "\n", + "\n", + "1. **Utilizing Available Interfaces (e.g., `EncoderDecoderASR`):**\n", + " - Considered the most elegant and convenient option.\n", + " - Your model should adhere to certain constraints to fit the proposed interface seamlessly.\n", + " - This approach streamlines the integration of your custom ASR model with existing interfaces, enhancing adaptability and maintainability.\n", + "\n", + "2. **Building Your Own Custom Interface:**\n", + " - Craft an interface tailored precisely to your custom ASR model.\n", + " - Provides the flexibility to address unique requirements and specifications.\n", + " - Ideal for scenarios where existing interfaces do not fully meet your needs.\n", + "\n", + "**Note:** These solutions are not exclusive to ASR and can be extended to other tasks such as speaker recognition and source separation.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "J6N0Fb51pFnZ" + }, + "source": [ + "#### Using the `EndoderDecoderASR` interface\n", + "\n", + "The EncoderDecoderASR class interface allows you to decouple your trained model from the training recipe and to infer (or encode) on any new audio file in few lines of code. The class has the following methods:\n", + "\n", + "- *encode_batch*: apply the encoder to an input batch and returns some encoded features.\n", + "- *transcribe_file*: transcribes the single audio file in input.\n", + "- *transcribe_batch*: transcribes the input batch.\n", + "\n", + "In fact, if you fulfill few constraints that we will detail in the next paragraph, you can simply do:\n", + "\n", + "```python\n", + "from speechbrain.inference.ASR import EncoderDecoderASR\n", + "\n", + "asr_model = EncoderDecoderASR.from_hparams(source=\"your_local_folder\", hparams_file='your_file.yaml', savedir=\"pretrained_model\")\n", + "audio_file = 'your_file.wav'\n", + "asr_model.transcribe_file(audio_file)\n", + "```\n", + "\n", + "Nevertheless, to allow such a generalization over all the possible EncoderDecoder ASR pipelines, you will have to consider a few constraints when deploying your system:\n", + "\n", + "1. **Necessary modules.** As you can see in the `EncoderDecoderASR` class, the modules defined in your yaml file MUST contain certain elements with specific names. In practice, you need a tokenizer, a decoder, and a decoder. The encoder can simply be a `speechbrain.nnet.containers.LengthsCapableSequential` composed with a sequence of features computation, normalization and model encoding.\n", + "```python\n", + " HPARAMS_NEEDED = [\"tokenizer\"]\n", + " MODULES_NEEDED = [\n", + " \"encoder\",\n", + " \"decoder\",\n", + " ]\n", + "```\n", + "\n", + "You also need to declare these entities in the YAML file and create the following dictionary called `modules`:\n", + "\n", + "```\n", + "encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential\n", + " input_shape: [null, null, !ref ]\n", + " compute_features: !ref \n", + " normalize: !ref \n", + " model: !ref \n", + "\n", + "decoder: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder\n", + " enc_dim: !ref \n", + " input_size: !ref \n", + " rnn_type: gru\n", + " attn_type: location\n", + " hidden_size: !ref \n", + " attn_dim: 1024\n", + " num_layers: 1\n", + " scaling: 1.0\n", + " channels: 10\n", + " kernel_size: 100\n", + " re_init: True\n", + " dropout: !ref \n", + "\n", + "\n", + "modules:\n", + " encoder: !ref \n", + " decoder: !ref \n", + " lm_model: !ref \n", + "```\n", + "\n", + "In this case, `enc` is a CRDNN, but could be any custom neural network for instance.\n", + "\n", + " **Why do you need to ensure this?** Well, it simply is because these are the modules we call when inferring on the `EncoderDecoderASR` class. Here is an example of the `encode_batch()` function.\n", + "```python\n", + "[...]\n", + " wavs = wavs.float()\n", + " wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)\n", + " encoder_out = self.modules.encoder(wavs, wav_lens)\n", + "return encoder_out\n", + "```\n", + " **What if I have a complex asr_encoder structure with multiple deep neural networks and stuffs ?** Simply put everything in a torch.nn.ModuleList in your yaml:\n", + "```yaml\n", + "asr_encoder: !new:torch.nn.ModuleList\n", + " - [!ref , my_different_blocks ... ]\n", + "```\n", + "\n", + "2. **Call to the pretrainer to load the checkpoints.** Finally, you need to define a call to the pretrainer that will load the different checkpoints of your trained model into the corresponding SpeechBrain modules. In short, it will load the weights of your encoder, language model or even simply load the tokenizer.\n", + "```yaml\n", + "pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer\n", + " loadables:\n", + " asr: !ref \n", + " lm: !ref \n", + " tokenizer: !ref \n", + " paths:\n", + " asr: !ref \n", + " lm: !ref \n", + " tokenizer: !ref \n", + "```\n", + "The loadable field creates a link between a file (e.g. `lm` that is related to the checkpoint in ``) to a yaml instance (e.g. ``) that is nothing more than your lm.\n", + "\n", + "If you respect these two constraints, it should works! Here, we give a complete example of a yaml that is used for inference only:\n", + "\n", + "```yaml\n", + "\n", + "# ############################################################################\n", + "# Model: E2E ASR with attention-based ASR\n", + "# Encoder: CRDNN model\n", + "# Decoder: GRU + beamsearch + RNNLM\n", + "# Tokens: BPE with unigram\n", + "# Authors: Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, Peter Plantinga 2020\n", + "# ############################################################################\n", + "\n", + "\n", + "# Feature parameters\n", + "sample_rate: 16000\n", + "n_fft: 400\n", + "n_mels: 40\n", + "\n", + "# Model parameters\n", + "activation: !name:torch.nn.LeakyReLU\n", + "dropout: 0.15\n", + "cnn_blocks: 2\n", + "cnn_channels: (128, 256)\n", + "inter_layer_pooling_size: (2, 2)\n", + "cnn_kernelsize: (3, 3)\n", + "time_pooling_size: 4\n", + "rnn_class: !name:speechbrain.nnet.RNN.LSTM\n", + "rnn_layers: 4\n", + "rnn_neurons: 1024\n", + "rnn_bidirectional: True\n", + "dnn_blocks: 2\n", + "dnn_neurons: 512\n", + "emb_size: 128\n", + "dec_neurons: 1024\n", + "output_neurons: 1000 # index(blank/eos/bos) = 0\n", + "blank_index: 0\n", + "\n", + "# Decoding parameters\n", + "bos_index: 0\n", + "eos_index: 0\n", + "min_decode_ratio: 0.0\n", + "max_decode_ratio: 1.0\n", + "beam_size: 80\n", + "eos_threshold: 1.5\n", + "using_max_attn_shift: True\n", + "max_attn_shift: 240\n", + "lm_weight: 0.50\n", + "coverage_penalty: 1.5\n", + "temperature: 1.25\n", + "temperature_lm: 1.25\n", + "\n", + "normalize: !new:speechbrain.processing.features.InputNormalization\n", + " norm_type: global\n", + "\n", + "compute_features: !new:speechbrain.lobes.features.Fbank\n", + " sample_rate: !ref \n", + " n_fft: !ref \n", + " n_mels: !ref \n", + "\n", + "enc: !new:speechbrain.lobes.models.CRDNN.CRDNN\n", + " input_shape: [null, null, !ref ]\n", + " activation: !ref \n", + " dropout: !ref \n", + " cnn_blocks: !ref \n", + " cnn_channels: !ref \n", + " cnn_kernelsize: !ref \n", + " inter_layer_pooling_size: !ref \n", + " time_pooling: True\n", + " using_2d_pooling: False\n", + " time_pooling_size: !ref \n", + " rnn_class: !ref \n", + " rnn_layers: !ref \n", + " rnn_neurons: !ref \n", + " rnn_bidirectional: !ref \n", + " rnn_re_init: True\n", + " dnn_blocks: !ref \n", + " dnn_neurons: !ref \n", + "\n", + "emb: !new:speechbrain.nnet.embedding.Embedding\n", + " num_embeddings: !ref \n", + " embedding_dim: !ref \n", + "\n", + "dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder\n", + " enc_dim: !ref \n", + " input_size: !ref \n", + " rnn_type: gru\n", + " attn_type: location\n", + " hidden_size: !ref \n", + " attn_dim: 1024\n", + " num_layers: 1\n", + " scaling: 1.0\n", + " channels: 10\n", + " kernel_size: 100\n", + " re_init: True\n", + " dropout: !ref \n", + "\n", + "ctc_lin: !new:speechbrain.nnet.linear.Linear\n", + " input_size: !ref \n", + " n_neurons: !ref \n", + "\n", + "seq_lin: !new:speechbrain.nnet.linear.Linear\n", + " input_size: !ref \n", + " n_neurons: !ref \n", + "\n", + "log_softmax: !new:speechbrain.nnet.activations.Softmax\n", + " apply_log: True\n", + "\n", + "lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM\n", + " output_neurons: !ref \n", + " embedding_dim: !ref \n", + " activation: !name:torch.nn.LeakyReLU\n", + " dropout: 0.0\n", + " rnn_layers: 2\n", + " rnn_neurons: 2048\n", + " dnn_blocks: 1\n", + " dnn_neurons: 512\n", + " return_hidden: True # For inference\n", + "\n", + "tokenizer: !new:sentencepiece.SentencePieceProcessor\n", + "\n", + "asr_model: !new:torch.nn.ModuleList\n", + " - [!ref , !ref , !ref , !ref , !ref ]\n", + "\n", + "# We compose the inference (encoder) pipeline.\n", + "encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential\n", + " input_shape: [null, null, !ref ]\n", + " compute_features: !ref \n", + " normalize: !ref \n", + " model: !ref \n", + "\n", + "ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer\n", + " eos_index: !ref \n", + " blank_index: !ref \n", + " ctc_fc: !ref \n", + "\n", + "coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer\n", + " vocab_size: !ref \n", + "\n", + "rnnlm_scorer: !new:speechbrain.decoders.scorer.RNNLMScorer\n", + " language_model: !ref \n", + " temperature: !ref \n", + "\n", + "scorer: !new:speechbrain.decoders.scorer.ScorerBuilder\n", + " scorer_beam_scale: 1.5\n", + " full_scorers: [\n", + " !ref ,\n", + " !ref ]\n", + " partial_scorers: [!ref ]\n", + " weights:\n", + " rnnlm: !ref \n", + " coverage: !ref \n", + " ctc: !ref \n", + "\n", + "decoder: !new:speechbrain.decoders.S2SRNNBeamSearcher\n", + " embedding: !ref \n", + " decoder: !ref \n", + " linear: !ref \n", + " bos_index: !ref \n", + " eos_index: !ref \n", + " min_decode_ratio: !ref \n", + " max_decode_ratio: !ref \n", + " beam_size: !ref \n", + " eos_threshold: !ref \n", + " using_max_attn_shift: !ref \n", + " max_attn_shift: !ref \n", + " temperature: !ref \n", + " scorer: !ref \n", + "\n", + "modules:\n", + " encoder: !ref \n", + " decoder: !ref \n", + " lm_model: !ref \n", + "\n", + "pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer\n", + " loadables:\n", + " asr: !ref \n", + " lm: !ref \n", + " tokenizer: !ref \n", + "\n", + "\n", + "```\n", + "\n", + "As you can see, it is a standard YAMl file, but with a pretrainer that loads the model. It is similar to the yaml file used for training. We only have to remove all the parts that are training-specific (e.g, training parameters, optimizers, checkpointers, etc.) and add the pretrainer and `encoder`, `decoder` elements that links the needed modules with their pre-trained files.\n", + "\n", + "#### Developing your own inference interface\n", + "\n", + "While the `EncoderDecoderASR` class has been designed to be as generic as possible, your might require a more complex inference scheme that better fits your needs. In this case, you have to develop your own interface. To do so, follow these steps:\n", + "\n", + "1. Create your custom interface inheriting from `Pretrained` (code [here](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/inference/interfaces.py)):\n", + "\n", + "\n", + "```python\n", + "class MySuperTask(Pretrained):\n", + " # Here, do not hesitate to also add some required modules\n", + " # for further transparency.\n", + " HPARAMS_NEEDED = [\"mymodule1\", \"mymodule2\"]\n", + " MODULES_NEEDED = [\n", + " \"mytask_enc\",\n", + " \"my_searcher\",\n", + " ]\n", + " def __init__(self, *args, **kwargs):\n", + " super().__init__(*args, **kwargs)\n", + " # Do whatever is needed here w.r.t your system\n", + "```\n", + "\n", + "This will enable your class to call useful functions such as `.from_hparams()` that fetches and loads based on a HyperPyYAML file, `load_audio()` that loads a given audio file. Likely, most of the methods that we coded in the Pretrained class will fit your need. If not, you can override them to implement your custom functionality.\n", + "\n", + "\n", + "2. Develop your interface and the different functionalities. Unfortunately, we can't provide a generic enough example here. You can add **any** function to this class that you think can make inference on your data/model easier and natural. For instance, we can create here a function that simply encodes a wav file using the `mytask_enc` module.\n", + "```python\n", + "class MySuperTask(Pretrained):\n", + " # Here, do not hesitate to also add some required modules\n", + " # for further transparency.\n", + " HPARAMS_NEEDED = [\"mymodule1\", \"mymodule2\"]\n", + " MODULES_NEEDED = [\n", + " \"mytask_enc\",\n", + " \"my_searcher\",\n", + " ]\n", + " def __init__(self, *args, **kwargs):\n", + " super().__init__(*args, **kwargs)\n", + " # Do whatever is needed here w.r.t your system\n", + " \n", + " def encode_file(self, path):\n", + " waveform = self.load_audio(path)\n", + " # Fake a batch:\n", + " batch = waveform.unsqueeze(0)\n", + " rel_length = torch.tensor([1.0])\n", + " with torch.no_grad():\n", + " rel_lens = rel_length.to(self.device)\n", + " encoder_out = self.encode_batch(waveform, rel_lens)\n", + " \n", + " return encode_file\n", + "```\n", + "\n", + "Now, we can use your Interface in the following way:\n", + "```python\n", + "from speechbrain.pretrained import MySuperTask\n", + "\n", + "my_model = MySuperTask.from_hparams(source=\"your_local_folder\", hparams_file='your_file.yaml', savedir=\"pretrained_model\")\n", + "audio_file = 'your_file.wav'\n", + "encoded = my_model.encode_file(audio_file)\n", + "\n", + "```\n", + "\n", + "As you can see, this formalism is extremely flexible and enables you to create a holistic interface that can be used to do anything you want with your pretrained model.\n", + "\n", + "We provide different generic interfaces for E2E ASR, speaker recognition, source separation, speech enhancement, etc. Please have a look [here](https://github.com/speechbrain/speechbrain/blob/develop/recipes/CommonVoice/ASR/seq2seq/train.py) if interested!\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "z3pu0M42Pqju" + }, + "source": [ + "## Customize your speech recognizer\n", + "In a general case, you might have your own data and you would like to use your own model. Let's comment a bit more on how you can customize your recipe.\n", + "\n", + "**Suggestion**: start from a recipe that is working (like the one used for this template) and only do the minimal modifications needed to customize it. Test your model step by step. Make sure your model can overfit on a tiny dataset composed of few sentences. If it doesn't overfit there is likely a bug in your model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tImuOg5XP3CY" + }, + "source": [ + "### Train with your data\n", + "All you have to do when changing the dataset is to update the data preparation script such that we create the JSON files formatted as expected. The `train.py` script expects that the JSON file to be like this:\n", + "\n", + "\n", + "\n", + "```json\n", + "{\n", + " \"1867-154075-0032\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/1867/154075/1867-154075-0032.flac\",\n", + " \"length\": 16.09,\n", + " \"words\": \"AND HE BRUSHED A HAND ACROSS HIS FOREHEAD AND WAS INSTANTLY HIMSELF CALM AND COOL VERY WELL THEN IT SEEMS I'VE MADE AN ASS OF MYSELF BUT I'LL TRY TO MAKE UP FOR IT NOW WHAT ABOUT CAROLINE\"\n", + " },\n", + " \"1867-154075-0001\": {\n", + " \"wav\": \"{data_root}/LibriSpeech/train-clean-5/1867/154075/1867-154075-0001.flac\",\n", + " \"length\": 14.9,\n", + " \"words\": \"THAT DROPPED HIM INTO THE COAL BIN DID HE GET COAL DUST ON HIS SHOES RIGHT AND HE DIDN'T HAVE SENSE ENOUGH TO WIPE IT OFF AN AMATEUR A RANK AMATEUR I TOLD YOU SAID THE MAN OF THE SNEER WITH SATISFACTION\"\n", + " },\n", + "```\n", + "\n", + "You have to parse your dataset and create JSON files with a unique ID for each sentence, the path of the audio signal (wav), the length of the speech sentence in seconds (length), and the word transcriptions (\"words\"). That's all!\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IVCCe6cXPzJ0" + }, + "source": [ + "### Train with your own model\n", + "At some point, you might have your own model and you would like to plug it into the speech recognition pipeline.\n", + "For instance, you might wanna replace our CRDNN encoder with something different. To do that, you have to create your own class and specify there the list of computations for your neural network. You can take a look into the models already existing in [speechbrain.lobes.models](https://github.com/speechbrain/speechbrain/tree/develop/speechbrain/lobes/models). If your model is a plain pipeline of computations, you can use the [sequential container](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/lobes/models/CRDNN.py#L14). If the model is a more complex chain of computations, you can create it as an instance of `torch.nn.Module` and define there the `__init__` and `forward` methods like [here](https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/lobes/models/Xvector.py#L18).\n", + "\n", + "Once you defined your model, you only have to declare it in the yaml file and use it in `train.py`\n", + "\n", + "\n", + "**Important:** \n", + "When plugging a new model, you have to tune again the most important hyperparameters of the system (e.g, learning rate, batch size, and the architectural parameters) to make the it working well.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "W4pPJ0k3lJZj" + }, + "source": [ + "\n", + "\n", + "## Conclusion\n", + "\n", + "In this tutorial, we showed how to create an end-to-end speech recognizer from scratch using SpeechBrain. The proposed system contains all the basic ingredients to develop a state-of-the-art system (i.e., data augmentation, tokenization, language models, beamsearch, attention, etc)\n", + "\n", + "We described all the steps using a small dataset only. In a real case you have to train with much more data (see for instance our [LibriSpeech recipes](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech))." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P-Trg_abjUTd" + }, + "source": [ + "## Related Tutorials\n", + "1. [YAML hyperpatameter specification](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/hyperpyyaml.html)\n", + "2. [Brain Class](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/brain-class.html)\n", + "3. [Checkpointing](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/checkpointing.html)\n", + "4. [Data-io](https://speechbrain.readthedocs.io/en/latest/tutorials/basics/data-loading-pipeline.html)\n", + "5. [Tokenizer](https://speechbrain.readthedocs.io/en/latest/tutorials/advanced/text-tokenizer.html)\n", + "6. [Speech Features](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-features.html)\n", + "7. [Speech Augmentation](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/speech-augmentation.html)\n", + "8. [Environmental Corruption](https://speechbrain.readthedocs.io/en/latest/tutorials/preprocessing/environmental-corruption.html)\n", + "9. [MultiGPU Training](https://speechbrain.readthedocs.io/en/latest/multigpu.html)\n", + "10. [Pretrain and Fine-tune](https://speechbrain.readthedocs.io/en/latest/tutorials/advanced/pre-trained-models-and-fine-tuning-with-huggingface.html)\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/tutorials/tasks/voice-activity-detection.ipynb b/docs/tutorials/tasks/voice-activity-detection.ipynb new file mode 100644 index 0000000000..356bba4780 --- /dev/null +++ b/docs/tutorials/tasks/voice-activity-detection.ipynb @@ -0,0 +1,2274 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_header", + "tags": [ + "sb_auto_header" + ] + }, + "source": [ + "\n", + "\n", + "\n", + "[\"Open](https://colab.research.google.com/github/speechbrain/speechbrain/blob/develop/docs/tutorials/tasks/voice-activity-detection.ipynb)\n", + "to execute or view/download this notebook on\n", + "[GitHub](https://github.com/speechbrain/speechbrain/tree/develop/docs/tutorials/tasks/voice-activity-detection.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "35SnKxL6eBrD" + }, + "source": [ + "# Voice Activity Detection\n", + "\n", + "The goal of Voice Activity Detection (VAD) is to **detect** the **segments** containing **speech** within an audio recording.\n", + "\n", + "As shown in the following picture, the input of a VAD is an audio signal (or its corresponding features). The output could be a sequence that is \"1\" for the time frames containing speech and \"0\" for non-speech frames." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hI0Rj1DOf-Rr" + }, + "source": [ + "![SpeechBrain-VAD.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAL8AAAC2CAYAAACI5VzOAAAgAElEQVR4Xu2dB3jT1dfHD3sUKHuPggzZiCBDEFBZCgrIkI0goIICKkP2EGRvFBBkTwFlKNM/InuILEEQoSB7b8p+n8/lvTENSZsmadKk9z5Pn7TJned+zznfc+79pXGePHnyREwxEoiFEohjwB8Ld90sWUnAgN8AIdZKwIA/1m69WbgBv8FArJWAAX+s3Xqz8GgHf/v27WXChAlK0m3btpXRo0fL+++/L8uWLZOgoCDp3bu3fPjhh3Z3YsiQITJy5Ei5c+eONGrUSL7++muJFy/eM3Wdrbdz505p3Lix/P3338LvJUqUsDtuoNRzVi7u1Js3b540bdpUybFgwYJy4MABv9Eqr4B/4sSJsmnTJsmQIYNMnTpVBg0aJN27d5fdu3fLqlWrZO/evVK4cOFwQlu3bp1UrlxZGjRoIBkzZpQxY8bI+PHjpV27di7VW7lypdSqVUty5swphw8fdgj+QKnnafk56o/9OXr0qHTq1Elu3rxpwG+NTiw/4H/48KF6u0CBApIiRQrZtm2bhIaGKjD2799fevXqFQ7UH330kUyePFmuX7+uPERISIg899xz8ssvv7hUb+nSpRIcHCy7du2Szp07OwR/oNTztPwi669atWpy6tQpA/6IwJ8oUSKpXbu2zJ8/Xx4/fizx48eXJk2ayMyZM8OBukqVKrJv3z45d+6cer9ixYpy7NgxOXnypEv1dKPhw4dHCP5Aqedp+UXWnwG/HbZnbfkfPHggCRMmlGbNmsmMGTNUbf6uWbOmLF68OFzrl19+WVmSEydOqPcR/u+//y6XL192qV6ggNrZdXhafpH1Z8AfCfg12OvUqaMs/6NHjyRBggQqYNLKoLuA7+/fv99i+V955RWlCFoZolrPWdAESj1Pyy+y/gz4nQB/oUKFJHny5LJ161Y5cuSI5MuXTwYOHKgCYHt0SXP+LFmyqGzCmjVrXKoXKKB2dh3a43pKfpH1Z8DvBPj79esn/PTo0UMFnWQRDh48KHnz5lWKkD17dlm7dq1s2LBB8fyGDRtKmjRpVKaHTFHLli1dqscY/CxfvlzFF6T3cuXKpShXkSJFLOMGSj1Pyy+i/th2A34nwH/v3j1p3bq14vipUqVSaU9iAAopTTI6mzdvVn8PGzZMgZRYoVWrVjJixAiJEyeOS/X69u2rlM62nD17VooVK2YZN1DqeVp+EfVnwG8H+Lxlm+p0UE29jRcgLbpo0aKIqpl6DqTjK/kZ8EcCfn3IRV7fUeEUt2rVqtK8efMIwW/q2RePL+Ry6dIlc8jlCK221xuw7KYEjgRmz55trjcEznaalcQWCUT73Z7YIkizTv+TgAG//+2ZmbGHJGDA7yFBmm78TwIG/P63Z2bGHpKAAb+HBGm68T8JGPD7356ZGXtIAgb8HhKk6cb/JGDA7397ZmbsIQkY8HtIkKYb/5OAAb//7ZmZsYckYMDvIUGabvxPAgb8ntiz0FCRkBBP9GT68KIEDPjdFfaKFSJ8adOsWSI1arjbm2nvRQkY8Lsr7IoVRTZsEKlQQeTXX93tzbT3ogQM+N0R9rVrIpkyiYSFiSROLHL2rEjKlO70aNp6UQIG/O4Iu29fEevngvv0EeE9U/xCAgb8rm4TVp9HMnnVBat//Lix/q7K1MvtDPhdFbi2+kWLiuzdK6JfjfV3VaJeb2fA74rIsfaAv2NHkenTn1IfQN+ihcjo0U8/M9zfFcl6tY0Bv7vi1h7AWHx3Jen19gb87orcgN9dCfqsvQG/u6I34HdXgj5rb8DvrugN+N2VoM/aG/C7K3oDfncl6LP2Bvzuit6A310J+qy9Ab+7ojfgd1eCPmtvwO+u6A343ZWgz9ob8LsregN+dyXos/YG/O6K3oDfXQn6rL0Bv7uiN+B3V4I+a2/A767oDfjdlaDP2hvwuyt6A353Jeiz9l4D/71bZ+TGmY1y+9IeuXf7jDx+GOazRZuBI5ZA3PiJJVFQZglKW0xSZC4viZJlDkiReQX85w9Nk2un1kvKDMUkeepckigoncSLnzggBRoIi3r0MEzu3b4oN68ck2vn90jKrJUkQ/73AmFp4dYQreB/EHZVTu8eKomSBEv6kPIG8H4IHxThQuhGuXf3umQp3kUSJE7lh6uwP+VoBX/oli8kWXAWSZu9TMAILLYu5NLJrXLr+mkJKftVwIgg2sAP1Xkcdl4y5a4cMMKK7Qs5e3StxE2cIWAoULSAn+A2dGs3yV2iraE6AaQxUKCjuyZJSJnBAREERwv4Lx5ZII/DTkuGnBUCaOvNUpDA+eMbJG7iLJIubwO/F0i0gB+unz57KUkanM3vBWQWEF4Cd67/KxdObg8I7h8t4D+8trnkLtHGUJ4A1Jyn1Gey5Ks8w+9XFy3gP7SygeQv18XvhWMWYF8ChzYNlfzVF/i9eAz4/X4Lvb8AA/4IZG4sv/cB6c0RDfgN+L2Jtxg1lgG/AX+MAqQ3J2PAb8DvTbzFqLEM+A34YxQgvTkZA34Dfm/iLUaNZcDvJ+C/dv2mpMpWSfZvWyCFCjwXo0BkO5liLzeS95vXkvZt6kc4T2frRddiDfi9AP7bd+5KptxVZeywztKicc1nRuzae6ysWLVJ/tyx0OFsHj58JJu27pGSLxaQoKRJogUPGzbtlopvtJFmDd+UGZP6OT3Gwb+OyakzF6TKq6VVm11/HJRMGdJKlszpI+zDup5tH04P7kZFA34vgJ8h2nYYJH8dCZUNKyeHG/HRo8eSvcCb0rlDU+n4USM3ttL9pk1b95ZHjx7J0p82yNmjqyVF8iCnOu054GsJC7svwwd2dKq+vUqe6COqgxvwewn8WLmXKjaXv/f8IM/lzGoZdeXaLVK70edy+vBKSZoksXTpNUaW/rxBbt26Ky++8LyMG9ZFns8bIra059btO9L+s6Hyw/L1Ej9+PKlX+3UZM+RzSZQooQLip91HysIla+Xx4ydSsngBGTe8i+TNnd3hauk/c95qsmP9TGnWtre0fe8daduyjqW+o/EGjZgmA4dNlbhx40rmjOkk9M/lounMyX/PydYd+2Tj6imWftb8b5u81eBTOf/PGqlQvY2iRxcvXQ3XR4N3Kisvt3ntd5Z2v23eLVVqtZdzR1dLyuDkUcW53foG/F4CP8O8UK6R1KhWXgb0/NAyav3m3SR+vHgy97uB0v7zobJtx35ZPGeopE2TUvoMnCRzFq6UY/uWyb3798Nx/k86D5Pde/9S7fAetRp+JtUrl5XB/T6WLr3GyvZdB2TedwMldapg+XLYFJm/aI389ftipSj2yvjJC2XKjB9lz+a5MmLcbFmweI3s+HWmpWpE4zF27lzZLJZfg790yUJSqlILOfv3KkmfLrXq64OOg+TCxauyZM4wi5IQG1j3cejwcSn4Un05uvdHyRWSRbXr0GW4olaLZw/1CPDpxIDfi+CfMHmhDBk9Q0IPLFeW8uq1G5IpTzVZuXisVCz/ogRlLCdzpnwptWtWUrPC2qbJ8ZoCysuli1rAXzB/LkmWsbwsnDlY3qxaTtXds++IXLh0RSpXKiUpMleQ5QtHqT4pKEdwlgqy4vvRlvdsl41iNmnwhnz2cRM5d/6yZH2+uvyxaa4ULphbnjx54nA8eL4j8APq54q8Ld0/bymtmr0tjx8/liz5qisPVb9OZYfgZ26lKjVXa+vdrbUaH2o4dmhni2w8oQEG/F4E//UbtyRTnqqydP5IBVKUYdSEuYoKAThox4HtCwVw65It/xvSpWNzafruGxbwp0kdrOoe3Pm95M+XM9wKzp67pD6zV6Z908duwL1z90Ep81oL+ffQz5IpY1rVtFrtjyVfnhwyZujnovu0Nx51IwJ/tz7j5M9Dx5QyQmWq1/lEzh9boyiedbbHto9vpiyS0V/PlcO7lygvRjsoT8KECTyBe9WHAb8Xwc9Qzdv2kQcPHiq6UrJCM3nn7Vel26ctLAAj41Pg+f/An/X5N9TnTRpUfwb8topC/+cvXJGMuasoq12sSF6ngEIw/u30HxQgdbn/4IEkT5ZUzhxZJVeu3rCrmLpuROCHmr1cuaVcCv1Feg34Rs1vztQvVdOIwK+94qY1U2ThknVy4+YtmTi6u1PrcbaSAb+XwY/1q1KrnWxZO01KVmyqrG3GDGmUa0+e6RWZ9W1/i2vHU6TL+bosWzBSSpcs/AztIR1Zt9ZragVY730H/lb0IkXmV2TCyG7KW+gSevKMhGR/9kubSMNmzlNNxQqVXy1lqY+Cln61hUwe20NRFGiWo/EiAj8d5ilWW4YO+EQ+7zFaeRLinsjAz+fEQ3D+739Yp8YuV6aYs7h2qp4Bv5fBz3AFStZTVpU8OHxel07dRipq8MPc4RIcnEzI//+0apOiRXfuhoULeAmON275QxbOGKyC2PrNusnrlV6SIf0/UQHv0p9+lWULRinwYNV7DvhGThxcoca1Lt/NWiade45WFp5MkXXBIxwPPS1rlk5Qwbij8Rq27C737z+Ub8f1lFQpk8sL5RqHO+Tq3m+Cyvrs3f93OOpibflt+4gTJ478vGazNGrZQ1KlTCHH9i8V3vNkMeD3AfhHjp8jn3UfJT8tGiNvVHnZMgOsMFmVJcvWS9y4caTMS0XUwRgAtk113rx1R9p9Olh+XPGrJEgQX+rWeprqTJw4ody9e0+lOrGY/F60cF4ZNfhTKVWi0DOrLft6S5UKxSLblh2//6msP8BLkzqlw/E0SJMkSSQn/lwhL1VqHg78e/cfURQHrzRlfC/LMNbgt+0Dbk+gniVfNWndona4DJmnFMCA3wfg99TmBXo/Fy5ekZBCNVUSQKc8PblmA34Dfk/iySN9cZXj8pXr8n77AYqmkRyIjmLAb8AfHbhyq8/Vv2xVp8Dly76gYprUqVK41Z+jxgb8BvzRAix/6NSA34DfH3AaLXM04DfgdwlY/vR8gaE9LmyxL7+6hIto5L51SRaUVN3ubFivqnzctoFKbzpTPHlPnnQkF9i4HeqN5wucWZ87dYzlj6GWH/C3atdf3W2hnDl3Ud1x6T/4W3XZbNWScU7dc/HkPflyVVqpqxb6hNYd4MWEtgb8MRj8pPpundsYbob/HD8lhUs3kEF92lkefuE68vAxs+TS5WuSN092+bLXR+rwrM+gSc/ctY/srv+RoyfVteMt2/dJhnSppXPHZupxxFdrfCDrf9ulDtHqvv26jBveOdyJ88lT59TzBVu271UnxVVfK6MO1oJTJJOfVm9Sh3f9e34gQ0fPFPL3JV4ooO74OPvATHQoiwG/n4Gf6bb5ZKD8eegf9bAHVAQPwXXlooXyysq1m9WdGJ71haLY3ruJ6K5/vHhx1UlshZeLS58v2gj36rnd+f2sIepZgZRZK8rsKQOU5bfm/NxCLVq2oTopHjX4M3Wq3KBFN/XQyY/zRsiqdU8f2PmgZV0Z+VUn4SS7cKl3pcNH7/r06TUDfj8EP1d9h4yaIWf/Xi1v1u0gJV8sKH2/aGNZCdd/S5UspN6zBj+X5yK66580aWIp+/p7cuXkeotFBrg8oVWkUB6H4AfMZV57Ty4eXydct6bwxBbzuHbqV9m87elVZm526s+btektQUFJ5JtRX0SHUXeqTwN+PwT/mG/myajxc9Ujg/mK1xGoim1p3qiGTJ/YNxz4I7vrD13p2HWEesTQXnFk+fcfPKpoDeDXBXqWu2gt2bd1vpw+e0HeadxFbp/fZPkcSkfQzBx9VQz4/RD8PGgOb17943jJX6KuetbW0cPv1pY/srv+8xatfgbE1uKJCPw8Znjh2FpL9aPH/lVXmaFfp86cl7pNuoaLXwz4PafyAfcV5WR77AW8fAMEjxxOGtNDfcVIzfqdJH26VDJ1Qm+LNAk+s2ZOrx6VtOX8Ed31J5sEfcHyp0v79F91LvrxF8XduS7tCPx3w+6pxw6taQ+xyFsNOinawzVtA37Pgd22p4AEv3Wqk+d5uU/PvfyXXiyoHnDRd97rNe2qHnqvXKm0yragEATAPPxhe0++a+9xDu/6JwtKogLXYoXzytABHQTrXaNeR5k3bZAKeHkEk0cq32vy9LuHrL9Eq3j5xiqDQ0B77dotqdesq+TInknmTxukAl4DfgN+pyVge8hFipHsDVy+U7vGQmZGl7ET58vIcXPk3IXLkiNbRvXAOPUotvfkuSMf0V1/PAupzm07DyiP0rlDM3WoRiF1OmzMTPXlVHB1a/CjKB92+ko2b9urgtqa1V9RT29xOGfA7/S2u1Qx4Cy/S1IwjaIkARPwxtCAN0q7aCq7JAEDfgN+l4ATCI0M+A34AwHHLq3BgN+A3yXgBEIjA34D/kDAsUtrMOA34HcJOIHQyIDfgD8QcOzSGgz4IxDb4bXNJXeJNhIv/n/fYemSlE2jGCeBRw/D5OiuyZKv8owYN7eoTihaDrlCt3wh6bOXkqTB2aI6H1M/hkvgzvV/5cLJ7RJS9qsYPtPIpxct4L94ZIE8DjstGXJWiHwGpoZfSeD88Q0SN3EWSZf36dUNfy7RAv57t85I6NZukrtEW0N9/BkdNnN/SnkmSUiZwZIo2bPfXO1vS40W8COE84emyeOw85Ipd2V/k4mZrwMJnD26VuImziAZ8r8XEDKKNvAjHbh/suAskjZ7mYAQVmxexKWTW+XW9dMBwfX1PkYr+B+EXZXTu4dKoiTBkj6kvKFAfqg9UJ0LoRvl3t3rkqV4F0mQ+OnDOoFQohX8WkBQoGun1kvKDMUkeepckigonVGEGIweAH/v9kW5eeWYXDu/R1JmrRQwVMda7F4BPwMSBN84s1FuX9oj926fkccPw2Lw9js/tXi3H6jKj4I89w/fnB89emrGjZ9YEgVllqC0xSRF5vIBEdzak5TXwB892xQDeu37/9+ioF9jwJTMFJyTgAG/c3KyX+vaNZGc//8vTY8fF0mZ0p3eTFsvS8CA3x2BY+379XvaQ58+Isb6uyNNr7c14HdV5Nrq80rB6hvr76o0fdLOgN9VsVtbfd2Hsf6uStMn7Qz4XRG7tvrBwSInTjztIUcOkevXjfV3RZ4+amPA74rgp09/2qpFCxH9D56fPBGxft+Vfk0br0rAgN9dcVuD392+THuvSsCA311xG/C7K0GftTfgd1f0BvzuStBn7Q343RW9Ab+7EvRZewN+d0VvwO+uBH3W3oDfXdEb8LsrQZ+1N+B3V/QG/O5K0GftDfjdFb0Bv7sS9Fl7A353RW/A764Efdbeb8G/ZcsWSZQokbz44os+E54a2IDft/J3Y3S/Bf+kSZMkY8aM8uabb0r8+PHdEIGbTQ343RSg75r7Jfj5p9AdO3aUIkWKSFBQkLz77rs+lGCcp2Nzt8cUv5KAX4L/0aNHCvxp0qSRnDlzSvPmzX0ndOuHWXw3CzOyCxLwO/Bj9S9cuCBdu3aVa9euSe7cuWX48OEuLN00ie0S8Evw//PPP9K9e3cJCwuTbNmyyYQJE2L7Ppr1uyABvwE/Fl+XgwcPSs+ePeXhw4eSNWtW+eabb1xYumkS2yUQ48D/+PFjBeqECROG25sbN26o4DZu3LgC+LH8ly9flly5csmMGTPUf1U3hbj7iZGFk0CIceC/cuWK2kCCWeuydetWeeGFF1Ruf//+/fLFF1/Ir7/+KuXKlZOff/5ZKUVsVwDkdunSJUmbNm2sl4Uz+I9x4F+1apVkz55dChQoEG7+K1askJQpU0rZsmWlX79+smPHDlm7dq3ky5dPhg4dKq+99ppSDK0Asc0Cst5jx45J0qRJ1flHbDcEfgl+rHjy5MmlfPny4eY/d+5cRXPatWsnffv2le+++07OnDmjDrh69+6taFLbtm1VWzb+33//lSxZssQajwBd/OOPPyRTpkzqx4A/cvjHOMs/cuRIKVy4sFSu/N/3+rOxQ4YMUXSoW7du0qdPH5k8ebJKecaLF09q1KihXlGYDz/8UBIkSCCaJl2/fl2Bwbro4NkRQIg5qEM/viis98GDB0qxWZczhTZ79uxRa7W1/HxGYb1GKf6Tps/A74iWQGnKlCkjVapUscySQy04frJkyVSWp1evXjJlypRw4Aew/Hz99dcSEhIimzdvloIFC8rhw4dVf9blr7/+UucDjq5F3LlzR4GfANvbhXFv3bolS5culVdeeUWlcgFsZDQOGW3fvl1y5MghmTNnDkf/Tp06JRkyZJCLFy8qb2jKUwn4BPxs5Llz55SFIkAjuCVgpUBpAGvVqlXDgb9Dhw6SLl06BfzPP/9cZs+erTaTdlj8s2fPqlhh0KBB6rLbL7/8IsWKFVMeoGbNmuEs3u+//64UI3HiZ/9bJHM7cuSIsvookZ6Xnsy9e/cUEO21dQZUkYEYBZ43b55MmzZNpXDz5MmjlPr8+fMqrRuRt5o6daqKgVAaPW+s/uDBg+WDDz4QlIArIab4EPxsyK5duxRIf/rpJylRooTi6vxg2QHd+++/Hw787du3V5aY4LZRo0ayePFiBQo2mSwQHB+6pD0HwTBgOXTokFSvXt1ixRmb4Bnrbu9OEOCcNWuWnDx5Up0i21IfUq70QfAd1ULfp0+fVtbXEYivXr2qvBeBf6lSpaRChQrKGPBejx49HNIgZAH4U6VKJe+8846lHh4BqtilSxdlIAz4fUx72JB169ZJxYoV1eksHoCfSpUqKfDjugG/tl7U/+ijj+S3336TP//8Uxo2bKjAz/uAiAwHmR42lnb169eX9evXC/ED9364+ZkiRQq1aigFClKoUCG7d4IAKAE0fbdu3VrdHbIugHPs2LHKA9l6hciUAYAuXLhQateurZSK9tZ9MPbRo0flq6++EqgZ3mn58uXyv//9T80Jj0A76vFj3ZYYASrIqffHH39soXSsAyXu3LmzygaVLFnSt7dgIxOSFz/3Ce1ho7BEBK7cy3nuuecUpWFjxowZo6gQSqGtLqAhkF2zZo3aQMC/ZMkSBVBdUAL6wStQd+/evfLee+8pl//WW29ZLDXgBQikUj/99NNnRA2osLCMxfygTDpA1nStVatWinalTp3a6QCStmSrmAvz52+oiPVhHh4FoGPB8TwYBMBPbMLvxC/I5MSJE5I+fXpJkiSJZf7379+Xzz77TBkBlEfLDhkhaz6bOXOmokUonyk+4vxs1Ntvv62A3qJFCylevLg8//zzkjdvXmWxOcHFQrKRFOoDQigMQR383xb81AP8UASCY2hQnTp1FE0CAJ988onqizihdOnS0rJlS0UHbLMpgPLVV1+V48ePy+jRoxVY8RZwfcB78+ZNpWB8Vq1aNaezMYzLSfT333+vYhH4NyAH1Lqg5Cg9cQoAZ1xkgVIQY5C5AtTz589XXtK6LfNDqfBUKLy14ahXr56MGDFCvY/8MAym+Aj8uGas/Jdffql4N+4b8A8cOFA2bNigTnAXLVpkCSqpTzqTQJTNA7hYZp3C0xvJhgNcaA2X37D8BH+ceEIZKKRHUTKsPoqnsym6D/okFsHDoBzwbgo0BKtMUM2ZA/RKny/YAsleKnX37t2KqkH3oDPQG5QYfq4LAMarsHYyNigwCkHBynOLFVlxtaNNmzYqY6XL7du3lSzxoBMnTrR4FNpDIxkX+gPwkSHZH9u4A4+M1+WzqFI6f1Qmr9MewAUACfrYQKwfQge448aNU6AjCGaDdKqRjWWj2RBAzBkAbawvuyF86AGWH7ePonDoxXi4eSwuhayJBn+DBg1UUEw7fTpMfQJn2kNrACS0AXqWP39+dbLMuGSYmCfxhm2BsgBCa1qCUuNFADUKRf94Jvi59nB3795VdchU0ZaxNbXDQxFsM1fiHwwCgbkGMB6pSZMmyjvRXveJnMimbdu2TY2LByEZULRo0XDg11mu6dOnq3jG3rr8EeARzdnr4McS4YKxqgSUnNSywWwu1hjwac5L9ofCpgN+6uC6sY7WfF8vEOXgkAc+jpVGCagH+LG6bDCKR/qQh2EIjLVSEGijgIwPdcDq6qASRSVTAtAAE69YUw6VCKRtLSgKQsGbwen5WbZsmQInoIIuAX7uJZHSff3111UfUBw+Y2zAD92zBj/xCnPE80EPUSjWQoEm4R2RGZ9pZWa+ZICYE94WGf3www/KQ9oG2wTy9AltpG6gF6+Dn80gF08+H+sDxQFwbD4UBHCQDoQT6wwNBz4a8HB36JIt5bGmPuTH4c1YMcDDfaBNmzYpq08hNiBghGZBjV566SVlxRkPwPGK1Y2oABwsKBkrW4qAlT1w4ICiGngNMks6VUmKlTUScAMwKBk0jfVjCAiCkZEt+BkDqkc70r6M/eOPP6rxkcX48eOVXPFkc+bMUZ6VcVBgPBjy0BSOeIr0r543daBU0DCMBKfpJA2cPV32VyXxOvgBF/dzSMvZFgCA1cU6owAa/FhMcvPQHygJFteW8lhbf6hFcHCwsqTUgwoBfgBJ3wTPgI4AkXQoJ8crV65UgTBj8Flk4Ge8/v37Kw9mexaA14F/Q0XIspA9gofjXQA/BQrF+gAmawOIfI63AswAj1fr+IHLe4Aa3s5aACjzpw58HsutPShghkZB1/BSUCGCZPrkAA2vR119esw9KeYM3WzcuLEK6JFhIBevgx+uirvnlNVewVqxQdAEfb8HhSAQA5gAFVBFyOXixFGg0tejoR9QAc4IuAaNZYOKAAoySIAAy4x3gd7wjABKGlnBawAuaIWmPvqEFmqivQ40jCzOqFGjLDRGUyw8EDQH8JOC1WC2N7Y+F9BeD6UG1IxtbVC0MkKB8H61atVSn/ONF8ypbt26yiOR0WLuHBLiaQm+SRRApeg30K9CWMCvBaoPUGwvQUV2LO8IKLaZD8APfwdkjgpBHYEgVpDfAT+b48ja2+tHWzQ+Q6GwvqQbydSgSHo9mnIBCICCEsB5dZYlIgXQ4MIbQaUoKBdWnowNY6BkxDY6hWvbH58DNKgXMY+zj2SyPkCOpUaegJs8vnVBqQEwysVhHwEy68WzEfOEhoYqLy1duaIAAAgkSURBVALVgn6ScNAPEuFliA20d4jKhTh3sOJNqmUBP3wYN4n743oBQiBzgLWBRgBarAYLA0wACetIgMnf1GXibAj9YHVJmeE66VNnPuCzpB+52+OoMCb8H8BilTgToA9XC/NiDVh0LrzpdVj3h4IBPG6LcrBkL6C2HZ+gsmnTpqouMQxUCfCTArVeH2MDOntrRl7QHbwhVIOxnS0AEpCSNSNe4EqEbdFGAJlanwyzLzojBd2B9kENmSdZNs4QOnXqpJQSD4n86QtlYg/Ze/qjLsaEPWe/8cz8jcz5AUdQTfoDK9ro0Za1gzuuitCWxIbtDVxnZeFKPa/THoSEW4XTO3LtWEPy4pxGsmlYVYSPwPkMRYxIcWyDYTYZi44CAi6ySTrDpO8HYXUJGKFGbDgbHFmBMpBloV99Q5T+CKRRBgoAQKEIqDmYs7XMeA8MCfWIHziUi6ggD+2tkAvxD22x/CiAdaEuaU76J8WJcgJgEgAcoLFe7g4xdzwrwTR1CNCJQzBAgZzv9zr4EW6zZs3UCa69QlqSLAkuGd5OwUVzOgrosSAAzh4FYqPIdgBG4gMUjHrQEiwyikeBJqBEgAe3z+8AkzQjSoH1Yp5YNZQAK6mpkvWcBwwYoLgzFsz6CTKUCyoBSMmqQCegUrZrxhNhXcnbM3fih8goF/eUyPowTwJYlArwkuHRh3ZYV+bEXadhw4ZZvAN0jHmimIyrnxfgPebKNfKNGzeq8wcokq+eZ4jM6Hjqc6+DHzCxyZxSQhu0+9QLAuhwV8Cv8/zwUTYGC8crimAv1QmAcNH0DahJeQJ+AENaEGpGOzwPwONqNEErB1ZsNi6dz7HouGCUgzHxQNAC6+wLVO/bb78Vrg7YPhfABbw33nhDKRG8mesMZH0AH/PRlAALDBCxvhSUmiyQVlLrTQagWHGuNpCtQo5kwfAqgBTKAz3EahM4I1fmz9URXslsQetQdOaE4thadTwHFIyn6ZhzoBevg18fcmHhyEKQ1dG3M9lE3DfUgw3Sp4ykBwETbpnrBWyQo0MuQM0mQgcAHKABdICcNqQA2VhOMcmzk9YDIFhrlBGAAyAukaEkpASx/ATfxB/Mn3gGJcPSAjjbII2cOjwXJYJCsS4ssz6rwKORmmQsgKpBCD3BK6F41gE7IKQORoEMGDETnoicPLEC42MgWAuxCwoCyLVF50Qb78JdKgwB2S57h3OMi+Xn0E2fEAeyAngd/Gw4VhkLjmVmw7BSUBUsob70hXXXGwAFgQ4BVk5EsZT2sjFYRjIZAJl0KNQDoGGJscIUuC5BIv0DKFJ79M/pKBac+XEYhAKS+oO/c6+HugBt586dKr/PPLgDBBBtwc+zCiiQ9QP1WGb6Q3nJKnHABy3TWSLmxjxQbE7A+Rw5aXrHGFxs4xVFJnDHmyEv5oZX4zoH/QNu64ttKC2KAu1CJsjIUbF3LylQFcDr4NebTPqPjQY8WDyoB6e3HMNDeeyBHyvKwQ6BmL5moLMEeAfy3lhpqAD5e7gxSgDn1leHARgxhT5kItMA3cBi6owIYxBgQ2m4wYkXgPpgScmN4wU4UaW+Nd/XIOEAivrWnBllwmNQOEyzl0LUGSEMA4qD99L0jvp4LYwEyoMXYB16DOSBMWEt1rc68XbU584Qll2vM1ABHZV1+QT8+qkjuCsbjWUlyOJgCLcLQK3vpGvLj0Um/YnLJvgFjDwFRsHa4jkAFkEod2f4wUtYP44ImAA2oMBKWtML/TtWlR+oAh5D3/FhHC6tAUKCaEe5b+uUot4MxoXLA2oNfHsbxVq5vkD2hXs2mv+jvFh+2vMewAfQuujDNW6L6ntKfMa4KBEUjrVEJV8fFSD5Y12fgJ8NAZhwYsBP/pvgETByOITLhyJoLoxVA4hkcthYbmMSE2C1sbCaztAfHoW8tT5o0aei1psD3cBj2HuAnXZ8zpjwetugEOBTh7Gio6CU0EA8BdQKbwFgoVGcijt6kks/wM6VDmIRvTbmiiKR7nX1uePoWGdM6NMn4GdD4Ki8AmQyD4CWIFE/7GF9z10fImHxCWJJlZKa5PBI32nHKrK5HPiQ2YioMDbgdZTD1lcbbL8yUVM2XqMzIEQuUBQsOx6IeSIPMlKOvnGCNhiRffv2qYDVOg7Bm+jHJmMC6GLKHHwCfhbPZmFdCe6gEFyxxWXbAz91sfTQFTwAGSHADqUh4CM+YNNRDgJUArzIiqtH8JH168nPCXjh76ybV2hfRIdOeFSyPsgzkA+nPCVjn4GfBcBTucjFphLwQjegP5r2WC+STeX4nYwGnJi7KtAB0nikNkmZwmvpT58PeEpIvuoH+RDb6MMoXiPi7Cg0coIiGfBHvms+BT+bRQ4ejkoWA8tFFgMLTgBsC354MHl3DoZ0zpy7IFyFWLBggYoHSHd683JU5CL2Xg0D/qjJ2qfgZ6pkaTiG1/yaOyUogXUmg3qkLgl4yVgAdGiS/iZnYgLy96Q5YyvwNZU0lt95BfA5+OH55J41aFevXq1u9tl+uRJ5fAJa3Dn3dvRzt84vNfBrGssftT32OfhtTxS5244ycE3BUfGHYDVq2+C52nhBDITJ50cuU5+D33aKpOU48YXimBJ1CRjD4LzMYhz49V11Y7mc30RT0zUJxDjwu7YM08pIIOoSMOCPusxMiwCRgAF/gGykWUbUJWDAH3WZmRYBIgED/gDZSLOMqEvAgD/qMjMtAkQCBvwBspFmGVGXgAF/1GVmWgSIBAz4A2QjzTKiLgED/qjLzLQIEAkY8AfIRpplRF0C/weT4aN0FgtirwAAAABJRU5ErkJggg==)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lxWeLSUfgjq8" + }, + "source": [ + "As an alternative, the VAD could provide in the output the **boundaries** where speech activity is detected. For instance:\n", + "\n", + "\n", + "\n", + "```\n", + "segment_001 0.00 2.57 NON_SPEECH\n", + "segment_002 2.57 8.20 SPEECH\n", + "segment_003 8.20 9.10 NON_SPEECH\n", + "segment_004 9.10 10.93 SPEECH\n", + "segment_005 10.93 12.00 NON_SPEECH\n", + "segment_006 12.00 14.40 SPEECH\n", + "segment_007 14.40 15.00 NON_SPEECH\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Z9R7GTLJlBhI" + }, + "source": [ + "## What is VAD useful for?\n", + "\n", + "A VAD plays a crucial role in many speech processing pipelines. It is used when we would like to apply the processing algorithms to the **speech parts** of the audio recording only. \n", + "\n", + "For instance, it is often employed as a **pre-processing step** for speech recognition, speech enhancement, speaker diarization, and many others systems.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "evSG546AhLd_" + }, + "source": [ + "## Why is challenging?" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "99KjKv3shSe2" + }, + "source": [ + "Discriminating speech from non-speech signals is very natural for humans. However, for a machine, this is much tricker. A good VAD should precisely detect speech activity even in **noisy** and **reverberant** conditions. The number of possible noise sources is huge in **real-life** conditions (e.g., music, telephone rings, alarms, etc.), making the problem challenging for a machine.\n", + "\n", + "Moreover, a good VAD should be able to process both short and very **long recordings** (e.g., meetings) and, ideally, shouldn't be too computationally expensive.\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E01ifcGqmKkQ" + }, + "source": [ + "## Pipeline description\n", + "\n", + "Robust Voice Activity Detection has been a very active research field for decades. Today, deep learning plays a crucial role in this problem as well.\n", + "\n", + "In this tutorial, we employ a neural network that provides speech/non-speech predictions for each input frame. The frame-level posterior probabilities are then post-processed to retrieve the final speech boundaries, as shown in the following figure:\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E2xcYqTVp2it" + }, + "source": [ + "![SpeechBrain-VAD_CRDNN.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAL8AAAFCCAYAAACgkQx1AAAgAElEQVR4Xu2dBXSUx9fGH9whheLuVqQ4xaG4u7sVgrsTtMGlWEtxLcWlSLFQ3NpAi7sEp8ETkpTvPMP37n+zJJtk33XunJOzsDt65zd37r0z7260Dx8+fIAkkcBnKIFoAv9nOOsyZCUBgV9A+GwlIPB/tlMvAxf4hYHPVgIC/2c79TJwm8Pfo0cPzJ07V0m6a9eumDlzJjp16oStW7ciQYIEGDVqFLp16xbmTEyaNAnTp0/H27dv0aJFC8ybNw8xYsT4JG9k8506dQotW7bE1atXwX8XKVIkzHbdJV9k5aIn35o1a9C6dWslx7x58+Lvv/92mVVlF/gXLFiAw4cPI2XKlFi0aBEmTpyIYcOG4ezZs9i1axd8fX2RL1++UELbu3cvKleujKZNmyJVqlSYNWsW5syZA09PT4vy7dy5E/Xq1UPmzJlx+fLlcOF3l3zWll949XF+rl27hr59++LVq1cCvzGd1PyEPzg4WL2dJ08eJE6cGMePH8etW7cUjGPHjsXIkSNDQd29e3f89NNPePHihdohMmXKhKxZs2Lfvn0W5duyZQuSJEmC06dPY+DAgeHC7y75rC2/iOqrVq0a7t27J/Cbgz9OnDioX78+1q5di//++w8xY8ZEq1atsHz58lBQV6lSBefOncPDhw/V++XLl8eNGzdw584di/JphaZOnWoWfnfJZ235RVSfwB+GtWes+YOCghA7dmy0adMGy5YtU7n5/9q1a2PDhg2hSpcqVUppktu3b6v3KfwzZ87g2bNnFuVzF6gjOw5ryy+i+gT+CODXYG/QoIHS/CEhIYgVK5ZymLTFoFVBe//8+fMGzV+2bFm1ELTFENV8kYXGXfJZW34R1SfwRwL+r776CokSJcKxY8dw5coV5MyZExMmTFAOcFjmkmbzp02bVkUT9uzZY1E+d4E6suPQdlxryS+i+gT+SMA/ZswY8G/48OHK6WQU4cKFC8iRI4daCBkyZMDvv/8OHx8fZec3b94cyZIlU5EeRoo6dOhgUT62wb9t27Yp/4LhvSxZsiiTK3/+/IZ23SWfteVnrj5Ou8AfCfgDAwPRuXNnZeN/8cUXKuxJH4CJIU1GdI4cOaL+P2XKFAUpfYWOHTti2rRpiBYtmkX5vLy81KIzTQ8ePEDBggUN7bpLPmvLz1x9An8Y4PMt01BnONnU29wFGBZdv369uWySLxzpOEp+An8E8GuHXIzrh5d4ilu1alW0bdvWLPySL2zxOEIuT58+lUOu8Gg1vd5AzS7JfSSwcuVKud7gPtMpI/lcJGDzuz2fiyBlnK4nAYHf9eZMemwlCQj8VhKkVON6EhD4XW/OpMdWkoDAbyVBSjWuJwGB3/XmTHpsJQkI/NYQpL//x1o8PKxRm9RhJwkI/NYQtJfXx1q0V2vUKXXYXAICv14RU+trVzZu3hTtr1eediwv8OsVNrW9dlt09GjR/nrlacfyAr8eYWta39jmF+2vR6J2LSvw6xG3sdbX6hHtr0eidi0r8Fsqbk3rJ0kC/P9D9siYEXjxAhDtb6lU7VpO4LdU3EuXfizZrh0QLdrHf/OnDozft7RuKWcXCQj81hCzMfzWqE/qsIsEBH5riFngt4YU7V6HwG8NkQv81pCi3esQ+K0hcoHfGlK0ex0CvzVELvBbQ4p2r0Pgt4bIBX5rSNHudQj81hC5wG8NKdq9DoHfGiIX+K0hRbvXIfBbQ+QCvzWkaPc6BH5riFzgt4YU7V6HwG8NkQv81pCi3esQ+K0hcoHfGlK0ex0CvzVEbvwwizXqkzrsIgGbw3/nkh/+OXYND28+QcDb93YZlDRiPQnEjR8bqTInR96S2ZAhVxrrVewENdkUfp9fT+L+tUfIlDsdkqdLijjx4zjBkKULUZFA4NtAPLn3HLcu3kPabClRrnGxqBR36rw2g5/gv3j+BgXL5HZqAUjnIi+Bv/64iCRJE7jNArAJ/DR1Dm86g9J1i0ZespLTJSRweMsplK5f2C1MIJvAv3PJIST5IhHS5UjtEhMqnYy8BO5deYAX/75C9fZlI1/ISXPaBP4lozagdJ0iYuM76aTr6RZ9gMNbT6P92IZ6qnGKsjaBf/6ANajZoYJTDFA6YX0J7Fh8AN2mNrd+xXauUeC3s8DdoTmB38wsiuZ3B8TDH4PAL/C7N+FmRifwC/wCv4tLQGx+F59AR3RfNL9ofkdw5xRtCvwCv1OA6IhOCPwCvyO4c4o2BX47wb973y4MHtUPfx254BQTH1EnXr56iXwlcmLP5gPImT1XRNkt+ty0jSPH/0CfIT0RN05c7Fi/x+btC/xWgH/rb5vRc2A3Q01x48ZFmtRp0aR+M3Tr2EO9//zf57hx6zqKfO0cl+Su3biKmfOm4eiJI3j56gVSpkiFKhWroXe3fvBI4gF7wB8cEozTZ08i/1cFET9efHTq0Q7x4yfAxFHeiBsvXqjPLFpdERQS+K0E/6CR/XDgtyOqtsDAAAXVyHFDMWvyXNSqVscWc6fqJEAxY8SMUv3n/zmHpu0aoGSxUujQuhOSf5kC129ew6z5M/D+faDSukFBQTbXvKadbtS6LiqW+xbdO/WM0nhMM4eEhCBGjBgR1iHwWwn+waP64+Lp66Fqq1a/EqpVroE+3fvD2OzZ77MXoyeOQP+eg7Bg0Vw8ff4U+fMWwOzJc5EwYSJVx8at6zHnp1m453cXXyZLjs5tu6J9q07qs3GTRuPl61d48eJfnDxzAhnTZ0LpkmUxsPcQQ/uz5k8H29my9rdPRlinaXUkSJAQqxetQzTtuV0Ar169RI+B3TCw1xBkSJ8xFPzcuYaNGYSjJw4jOCQERQoWwcTRk5EubXpV//I1S/DT0gV49Pih2kXY37YtOpj9zHh3GTFuCE7/eUpBmzplmk/Mnvt+9zB83BCcOHUMiRIlRoWylTBy4Gglr9evXyFv8RyYPnE2xk/xQo8ufdCxTWeBP0IJ2Ah+QsJtfM2S9SjwVcFQ8B88fABderVHq6ZtMHLQGLx99xZV6lVAh1ad1aTduHUDFWuVxo+zFqNCmYo463sGLTs1xcZVW1Vd308fj03bNqBbR0+1q+zauxPzF83BkT0nDTBz4TVr1ALtWnYMNUK/h34oWamwAr9UiTLhjt7U7Ok92BOPHj/C3GkLEDtWbAwY0Rfvg95jybwVuHLtMmo1qYpNq7YjV47cOH/hHNp0aY51yzYhevTo4X5G09DYr2jYqg4qla+sNL9p+3Wb1UChgkXUAg8ICEDfIT2UUpg2cZb6f87CmVGuVHl4DZuAlClSIkH8BBFOvWh+K8FPm58aSTN7+DpykBfaNG+v3jPW/IS/bdcWyvn9wuML9XnfoT2V3Tth1CRw2372/ClSJE9p6B0XB+vigvGePgHbdm1RsDMRlMJl82Hlwl9QvEgJ3LpzE5VqlcXJg38hWdJkoUZ4/NRRNG3XEKd9fJW5E14yhY/alUnbmXb+vgMjxg3FmUPn8KfvGTRt3xAHth9G2jTpVD7N9DD3mWkb4cEfEBgAfnbx1HXEihVL1f/nubNo1KouLv95EyHBIchRKJNaCI3qNokQei2DwG8l+Gnz793qo2oLCg4CHcqJU8eiXq2Gyok0hf+73h1x6cwNQ+uDRvVHSHCwmsAPHz5gweK52LJjM1689Ffa/PGTRxjab6TaGQg/teuqn38xlPfs3xWJEiaC95ipahegOUStbJpOnT0J2tYn9v+JVClTRRr+y1cvYersSbh05aLyMwIDA5Xm//v4ZQV6/+G9sWP3NhQvUhLly1REwzqN1cI291lk4b909SJ6DeweZl+pALiICf+GlVujFFAQ+K0Ef1g2P4H37N9FQb7v4F5DqJOav1ufTqF8BGP4121cA+8ZE7Bk/kpl5jDVaFQZDes0McB/+dqlUHAfOLQPNE3O/nEeDVrWQae2XVGnRr1PRsdFVLR8QVWWzqVpCg4OQsyYsUKZHTmy5cQ3lYuiIu3sQWPAaNbvB3aj79BeCn4t3bh5Xb3PXcnvgR+2/bLTsBOE9Rl3ysiYPTSrho4ZFKot435zIRL+bet2Kd8pskngtyX8e3eie78uCvIDf+yPNPwDR/ZTUZdZk+aq3tHkKFbha/TvOThc+KlhS1QqjL6eAzBh6lic8TmnIA0r0YRgfmpK46jImzevUbtpdQzuOxwli31jAJOQ0k/w2XkUmTJkVlVO+2EylqxapIDkgnn1+rXBhOPOVbNxFTSo0xjtWrQP9zOGgiMD//v375XfcGzfGaRJ9fFrR+gnvXv3Tpl1Aj8lbuUU2fv8jPMbhzr/CwlRMf0x3qOQJVMW/DR7ySdmjznNP2PuVGzftRVb1uxAUHAwBo/uj6vXr6ByhaoY1n+kMntMNT+HPnHaOKz+dSWqVqymzKfwEk2XRm3qIV+e/Piuo6cCSov7x4kdB78u34zA94EGMBlNyv9NLowdNgFNG7bAnn27MH/xXJz/2xe+Ry9g47YNWLpqEX7+YSkyZcyMm7dvKL9i4uhJePjoYbiflSj6vwXGgzRzDi8jVKlSpcbksdMRI3p0eH0/Eg8ePVCOu8DvYPiND7moTRnuI6wDeg1G4kSJowT/v/7/gjb8X+fOInWqNMpx5kSP9R6Ffj0H4dmzp2HCT7ucjnFEkRwuCjrFDIcePvYH/F/8izSp0qJ2jbrw7NwL8eLG+yTasm7TWkye+b06w+Bh2PCBo9GsfUN1eHd4zwnMnDsNm7avV/+no96ySWsVteEOM2WWd5ifRdbm58K4e+8ORk4YpkKdMWLGRKnipTFhlLeK+Aj8DoTfyhuOxdXR3qZG/GPXcRVilGReAmLzW8HmdzRktImpyTt6tkW3Tj3QonErR3fJJdoX+N0A/qmzvbFw2Y9o2qAFxgwbH+rU1iUodFAnBX43gN9B7Lh8swK/wO/yEFs6AIFf4LeUHZcvJ/AL/C4PsaUDEPgFfkvZcflyAr/A7/IQWzoAgd+M5ORbmi3FyvnLybc0RzBH8v38zg+xpT2U7+ePQHLyyyyWouX85eSXWSIxR/KbXJEQkotlkd/kisKEya8xRkFYTppVfo1Rx8TI7/DqEJ4TFJXf4XWCSXDaLgwZAkyaBAweDHh7O203pWOfSsAmX1H+WQm6fHnAxwcoVw44ePCzGrqrD1bg1zOD/v5A6tRAQADA534fPAA8PPTUKGXtKAGBX4+wvbyAMWP+V8Po0QDfk+QSEhD4LZ0mav3MmQG+aola/+ZN0f6WytTO5QR+SwWuaf0CBQBfX0B7Fe1vqUTtXk7gt0Tk1PaEv08fYOnSj6YPoW/XDpg58+NnYvtbIlm7lhH49Ypb2wFE4+uVpN3LC/x6RS7w65Wgw8oL/HpFL/DrlaDDygv8ekUv8OuVoMPKC/x6RS/w65Wgw8oL/HpFL/DrlaDDygv8ekUv8OuVoMPKC/x6RS/w65Wgw8oL/HpFL/DrlaDDygv8ekUv8OuVoMPKC/x6RS/w65Wgw8oL/HpFL/DrlaDDytsN/jd+9+F3yAfP/zqL1/f9EBzwzmGDlobNSyBm3HhImDYNkhYshDRlyyFBmrRuKTK7wH9pyULc37cfqQsVQrKs2ZAgRQrECucXD91Syi42qKCAALx5/BjPrl/Dg7NnkbZSReRq39nFRhFxd20Kf8Dz5/CdPBHxPTyQuVwFAT7i+XC6HFwIN30O4K2/PwoMGoa4SZM6XR8t7ZBN4T8xZAA80qVHplKlLe2flHMSCdw6chj+9+6iuPdUJ+mR/m7YDH6aOkGPHiFH1er6eyk1OIUEruzeiVgpU7qNCWQT+OncHh80AMU9e4ip4xTYWqcTNIFOzJ2DEpOnuoUTbBP4r65djSC/+8haoaJ1pC61OI0Erh/Yj1hp0iJ7sxZO0ydLO2IT+GnrZypZCh4ZMljaLynnpBLwv3MHt44dcQvb3ybw72vdAsW7e4rJ46QA6+mWMn3mzUWlFav1VOMUZW0C/+7G9VF+2AinGKB0wvoSODhxPKr+usn6Fdu5RoHfzgJ3h+YEfjOzKJrfHRAPfwwCv8Dv3oSbGZ3AL/AL/C4uAbH5XXwCHdF90fyi+R3BnVO0KfAL/E4BoiM6IfAL/I7gzinaFPgFfqcA0RGdEPgFfkdw5xRtCvwCv1OA6IhOCPwCvyO4c4o2BX6B3ylAdEQnBH6B3xHcOUWbAr/A7xQgOqITAr/A7wjunKJNgd/F4N995CimL1+OPy9cRHBICLKkS4c2deugR/NmiB49Oh49e4aMlauGGlWsmDGRKW0adGrYEL1atkC0aNHU5/kbNMSVW7fVv2PGiIEUyZKhTKGv0b9dW+TPkcNQB/P5v3qFcxs3wCNRIsP7Zy5cQMUOnfDi+FH1Xo8JE7Fo4yYcWLwIJQrkD9WHZKXKYOeCeSiWL59TgM9OCPwuBP/C9RvQx3sSerZsgYaVv0XcOHFw3PccRs+dh2qlS2HxuLEG+DfPnoV82bOr0QW8D8TRP/9Cr++94d23D7o2aWyAv+G336pF8S4wANfv3sPPGzaAC+yXqVNQvczH7yki/E//9UeTqlUxc8ggs/Bv3LsP6VKmxLHVKxEjenRDXoHfdmve7W91PvP3R7bqNZVWHtG1SyhJnjx/HpMXL8Hi8ePwLiBAaf4jK5ejcJ48ofL19p6EyzdvYdeP8w1Qt69XD33btA6Vz3P8RGz38cHlHdsQN3ZsBX+H+vXhNXceDixdjK9z5VL5w9L8cWLHxq7DR9CtaRP0aNFc4Lcd84aa3R7+Vdt3oNu48XjocwDxzXw/qGb2hAV//ylT4Xv5Cvb+/JNZ+B88eYLMVatjBx/wLlFcwT+hVy8c9/XFoTNn4bN0sTKxwoI/Xpy4qPxNCbQaPBTnNm1Aqi+/VG2J5rfdKnB7+Mf/+BPW7d6t7G5zKSz4P3z4gGO+vmjQuy8Gd+xg0PSEOizNz/pTlauA8b16olPDBgr+8T17qoVQoEEjDO3cER0bNAgTfppiUwf0R9P+AxE3TmwsmzhB4Lcd96pmt4f/+58XYeW27fhni/lvG9DgjxcnjtLOTEHBwcp86diwgQJas8Ujgn9K/35oXae2Af46FcqDNn2PCRNwftNG3PLz+8Th1eC/+/AhCjZsjI0zZ6Bc0SKi+W24ANwe/nW796DDiJG4f2AfkiRM+IkoCTijOhr866ZNRd5sWVW+WStX4cS5czi8YrnKo6Xw4L9x7x7y1KmnzJvi+fOHgp9la3n2UE5tl8aNUKF9x1DRHg1+5qMfsnrHbzj1yxqkLl8Rv82fK9EeGywCt4f/xevXyFK1Ojo3aqgiNsbp3JUrqNm9B46vWomYMWN84vC+efcOhRo3RfPq1eDl2T1C+Dt7jcHhM2dxYetmFRbVzB5qfqZrd+6gSJNmmDpwAOhHGIc6jeF/HxSEIk2boXXt2pixfDkYgZJQp/Xpd3v4KbIVW7eh69hxyg5vWbMG4seLp0KYY+bPR7Pq1TF90ECD5jd1ePcdP4G6PXvhwJLFKPpVXoPDq4U6//vwH+48eIAFv/yqIj3b5v6A0oUKGfLR5tfg55uM/CzevBkvXr0OF37m23/iJBr3668WkWh+64P/Wdj8mtgOnDyJacuW4+yFCwh8H4ScmTKpuH3bunVUFnPRHmr0k+fO48Ta1YYQpnbIxbJffvEFyhYuhKGdOxnOCPi+qebne+8CA5VN//DpU7PwMy8jP+t//x2Hli0RzW8D/j8LzW8DuX3WVcoJr5npl29sc++1IfAL/O5NuJnRCfwCv8Dv4hIQm9/FJ9AR3RfNL5rfEdw5RZsCv8DvFCA6ohMCv8DvCO6cok2BX+B3ChAd0QmBX+B3BHdO0abAL/A7BYiO6ITAL/A7gjunaFPgF/idAkRHdELgF/gdwZ1TtCnwC/xOAaIjOiHwC/yO4M4p2hT4zUzDvtYtULy7J2KZ+aoQp5hF6USUJRAUEIAT/GqWFaujXNbZCtjkYtuJIQOQqWQpeGTI4Gzjlf7olID/nTu4dewIintP1VmT44vbBP6ra1cjyO8+slao6PgRSg+sKoHrB/YjVpq0yN6shVXrdURlNoH/jd99HB80AMU9e4jp44hZtVGbyuSZOwclJk9FgjRpbdSK/aq1Cfzs/qUlCxH06BFyVK1uv9FISzaVwJXdOxErZUrkat/Zpu3Yq3Kbwc8B0Pb3SJcemUp9/NZiSa4rgVtHDsP/3l23sPW1WbAp/AHPn8N38kTE9/BA5nIVxARyQfZp6tz0OYC3/v4oMGgY4iZN6oKjCLvLNoVfa5Im0P19+5G6UCEky5oNCVKkkIXgxAgR+DePH+PZ9Wt4cPYs0laq6DamjrHY7QI/G6QT7HfIB8//OovX9/0QHPDOiaf/8+5azLjxkDBtGiQtWAhpypZzC+c2rBm1G/xui9OQIcCkScDgwYC3t9sO0x0HJvDrndXy5QEfH6BcOeDgQb21SXk7SkDg1yNsf38gdWogIADgVY4HDwAPDz01Slk7SkDg1yNsLy9gzJj/1TB6NMD3JLmEBAR+S6eJWj9zZoCvWqLWv3lTtL+lMrVzOYHfUoFrWr9AAcDXF9BeRftbKlG7lxP4LRE5tT3h79MHWLr0o+lD6Nu1A2bO/PiZ2P6WSNauZQR+veLWdgDR+HolaffyAr9ekQv8eiXosPICv17RC/x6Jeiw8gK/XtEL/Hol6LDyAr9e0Qv8eiXosPICv17RC/x6Jeiw8gK/XtEL/Hol6LDyAr9e0Qv8eiXosPICv17RC/x6Jeiw8gK/XtEL/Hol6LDyAr9e0Qv8eiXosPICv17RC/x6Jeiw8naDP9D/Ml5eX4m3d3cj0P8S/gt+5bBBS8PmJRA9ZiLE8ciF+OmrInHWVojjkdMtRWYX+B8f6wX/y0vgkaEBEqYojThJsiNGzERuKVB3GFRI8CsEvriK148Pw//ORnjkbI8UJWe7w9BCjcGm8Ae/9cP9PfUQO34GpMjdQ4B3QXy4EB5fnIP3b+8gbZXNiBk/jQuOIuwu2xT+25uLIUHSIvgye0e3EdjnOpCnVxfhzfPTyFjvpNuIwGbw09QJeeWH1PmGuo2wPveBPDj/PWIkSuM2JpBN4Kdze3tzEWSttE1MHTdaMTSBru+rjYz1TruFE2wT+J+cGYkPL+4gRe5ebjT1MhRK4PHF2YiWJAOSFx7n8gKxCfy09ZNn64z4yQq5vIBkAKEl8PbZWTy5ttAtbH+bwH9laWJkrbRVTB43XDkfTZ86yNHupcuPzibwX/o5GnLVPO3ywpEBhC2BSzuKIFenDy4vHoHf5afQ/gMQ+M3IXDS//YG0Z4sCv8BvT96cqi2BX+B3KiDt2RmBX+C3J29O1ZbAL/A7FZD27IzAL/DbkzenakvgF/idCkh7dkbgdxH4c5VpiMvXb3/S2xgxoiP4rv7rub/tO4JsmdMjR5YM9uTPoW0J/C4Ef/3qFdCxRd1QPY6GaMiaKZ1uiErX7YghPdqhVuUyUarrw4cP4F/06NGjVM4ZMgv8LgR/pxb1MKBb63B7PGfJOkydvwJPn/srDT5+cHfUqFRK5ed73w2eiP2HTyM4JBjfFCmABZOGIlP6NKjY+DscOHIacePERqNa32LKyN5IXbAqrh7dhGyZ0qvyrPvnVZvx197V2LrnEPp7zUDnVvUxesoCHNu+FAXz5lB5wmvfGWA37YPA7ybw02zp2H8sti+fiQJ5cmDngSNo0mUIzh/4RQHcqsdI+D16grXzJyJO7Njo0G8sAt+/V/mZPHKVx8ofxinN//DxM7Pw79x/FC26D0eTOpUxok9HpEyeFHsPnTTbvsBvOwm4/d0e2vzmNH/N1r1RtGBeePXvYpBy9Za9ULzQV+q9l6/eqPcTJ0qgXjfs2A/PYd546LsnyvDvOnAUrPvWyW3ImC61Kh9R+7abestrFs3vQpr/6s27iBYtdIe/yplNmSI5SzfAlRt3PhlN2ya1sHSmF/6+dB0jJs3D+UvXEBwcgoDA90rz+1/6+IPTUdH8hL9Ou354f/u4ob2I2rccUduVFPhdCH6aJO2a1A7V47hxYyuzJnfZRujaugH6dG7xyYjokGYqVlvZ/9O9+iFe3DjKbm/Ta1Sk4f9h8S9YtHqLWmiEv1HnwXh97Q9DW+batx2++moW+F0IfnNmT+22fZEi2RdYNH2UYUR37j9EutQpcP/hE2QoUjOUAztqygLMXrQ2TPhfvHytdoJ/Dq5DnhxZVH0Dx83C7z4nwoXfXPvOGgkS+N0Efjq8jbsMxoafJ6Ny2RI4etoXBJIObeH8uZE0dwX8MGEQOjavi827DmLyvOU47XsBz/7Zr/wARncGdW+L9s1qwyNxIqQqUAWDPduhb5cWuHnHD5WbdUfC+PHDhd9c+6WLFdSnom1UWuB3E/g5DGry6T+uUtGajOlSYVivDqDNz7Tkl60Y9v1cBAS8R91q5TB1VB9UaNRVhUBvn9qOCbMWY8q85ahSrgQ2L5mGTTsPoJ/XDPAQjWHT6hVL4ccVG/D3gXVhmj0RtW8jfnVVK/C7CPy6ZlkKhykBgV/g/2yXhsAv8Av8Li4Btz/kcvH5ccrui+YXze+UYNqjUwK/wK+Lsz6jpuHeg0dYv3CyrnocUVjgd0L4w7u7z67yxqW5m53Mc+HKDdx78FiFLa2d3r4LwMoNv6FLqwaq6mu37iIwMAh5c348DHOlJPA7Kfxh3d1nV79M6qEOocwl3uHh3R3G8k1TSMh/KnZvadr7x0kMmfADTu9aYWkVTlNO4HdS+M1dZfjn8g0UqdYKR7ctwddfffydqcpNu6sblmlTp8CEWYvUwyVpUibHuX1rkSRnOSybNQb9x8zA8N4d1P2fFet/w8TZi3Hrrh9SJk+Gfl1bolfHZgZpeM9Zih8W/YIXr16jTPGv1d1/Xqyr2ao3gkNC1P2gUzuX48cVG0OZPfOW/greA+LOky1TOowd9B1qVy6r6i1avQ1aNayOg0fPqAt2QUHBaifj1WhHJIHfBWnuX9AAABphSURBVOFnl72m/YQ9B4/jyNZF6jTWc9gkXDy0Xu0K9dr3V48kUvO/CwhE/CylULV8ScwePxBpUn4Jv0dPQdNq46IpqF7xGxw/87e6vnBky2IULZgHG3/bj25DvLF16XRkz5IBvUdOxdWbd3B8+1LMXLgaKzfsNGh+Y5t//fZ96DJwArYtn6Hq2br7EJp1G4oTO5apKxYlarVTp8+718xBzqwZMXfJOoycvADPLuxDNNPrqnZYDQK/k8LP+zSxYsX8pHePzu1Bgvjx8D4oCIWqtETPDs1ALT1tdB80qFFR5TeGn+ZPvMzfqGvN2lUHmj6Pnz5H6pRfGurPV7EpPNs1wXdtGqJGq17ImzOr0spMBHbf4ZNoVrcqfli8Nlz4ecefVyFmjRtgqPeb2h1QqlgBVRfhL1ogj7pjxHTj9n1kLVlXPVPAB2LsnQR+J4U/PJs/a8Z0Bi154uzf+KZOB9SuXEbdx9FSWPAf3rIIpYoWUFl4xZkX29Zs2o1/X7xU9T149BSTRvRUJhHv5vO1W9tGn0jHnOZnOZpOnu2bGMq17jkKdJJ54Y7wN6xREQO7t1Gf0zRKX7gGbp7Yqh6ntHcS+J0U/oie12W3127Zg84DxiNLhrTKDNF2irDgp31epEAeNdrFa7cqp3XHilnKPGHiLtKmcU0D/KYQa2KKCP7enZqje7vGBqny8cmAwEAVCiX8jWpWMkSrBH7rLHe3OuGN6JFFiuy5/0v1AMuqueMx3Hsu6lQpp5zZ8MweY/g7qud3g7Byzsef5OEjjmkLVce4Qd8p+Gm+8Bsh5vy/eUITaeGqzRjYvTXo0IZn8/NRRj5YY2z2FKvRBhVLFYX38J4Cv3VY/6QWt4O/Yc1K6Nyy3icDjRc3rrKP2/YeraIlq+dNwJlzF1G2fmec3bNKOZLNuw3D+6BgLJw6QkVl6PAaw09ned3W35UDGxQcrHYPng3UqVoOk0f0Ah3XTgPGKVMlf+7sGDz+B/xz5bpyXBndGTfjZ3WvP2GC+GoH0Q65tuz2Qfs+Y/DbylkolD8X1m7eox5q/3PPanyVK6vAL/BHLAFzh1yVShfDIM82aNJ1CC4d2oBUKZKpChntOXfhKg5tWgj17QqewxX4lw9vRJIc5ULB/+zfF2jadShO/Pk30qdJiWmj+yqA+46ejrEDv1NhT97vn7t0HfhUlxbqpF3Op8P4HADr2LZshnoQ3viEl88EzPp5LfxfvkLu7JnhPbwH2GcmMXsinntLcriV5rdEAFIm6hIQh9cJHd6oT6OUsEQCAr/Abwk3blFG4Bf43QJkSwYh8Av8lnDjFmUEfoHfLUC2ZBACv8BvCTduUUbgF/jdAmRLBiHwm5HalaWJkbXSVsSIaf7hEUsEL2UcK4GQ4Fe4vq8OcrR76diOWKF1mxxy3d5cDMmzdUb8ZIWs0EWpwpkk8PbZWTy5thAZ6+n/SSdHj8sm8D85MxIfXtxBity9HD0+ad/KEnh8cTaiJcmA5IU/Xu5z5WQT+AP9L+P25iLIWmmbmD6uTIdJ3z+aPLWRsd5pxPH4+BioKyebwE+BPD7WCyGv/JA631BXlo/03UgCD85/jxiJ0iBFydluIRebwU/p0PZPkLQIvsze0S2E9TkP4unVRXjz/LRb2PraPNoU/uC3fri/px5ix8+AFLl7iAnkgquHps7ji3Pw/u0dpK2yGTHj2/+xSVuJzabwa52mCeR/eQk8MjRAwhSlESdJdlkItppRK9RL4ANfXMXrx4fhf2cjPHK2dxtTx1g8doGfDdIJfnl9Jd7e3Y1A/0v4L/iVFaZJqrCFBKLHTIQ4HrkQP31VJM7ayi2c27DkZDf4bTFJTlHn9u1A69bAihVArY+/5iLJNSQg8Oudp/LlAR8foFw54ODHnyeV5BoSEPj1zJO/P5A6NRAQAMSNCzx4AHh46KlRytpRAgK/HmF7eQFjxvyvhtGjAb4nySUkIPBbOk3U+pkzA3zVErX+zZui/S2VqZ3LCfyWClzT+gUKAL6+gPYq2t9Sidq9nMBvicip7Ql/nz7A0qUfTR9C364dMHPmx8/E9rdEsnYtI/DrFbe2A4jG1ytJu5cX+PWKXODXK0GHlRf49Ype4NcrQYeVF/j1il7g1ytBh5UX+PWKXuDXK0GHlRf49Ype4NcrQYeVF/j1il7g1ytBh5UX+PWKXuDXK0GHlRf49Ype4NcrQYeVF/j1il7g1ytBh5UX+PWKXuDXK0GHlRf49Ype4NcrQYeVF/j1il7g1ytBh5UX+PWKno8u8o+PM/JPkstIwGXhP3r0KOLEiYPChQu7jLClo84lAZeF/8cff0SqVKlQs2ZNxIwZ07mkKr1xCQm4JPwfPnxAnz59kD9/fiRIkADNmjVzCWFLJ51LAi4Jf0hIiII/WbJkyJw5M9q2betcUpXeuIQEXA5+av3Hjx9j8ODB8Pf3R7Zs2TB16lSXELZ00rkk4JLwX79+HcOGDUNAQADSp0+PuXPnOpdUpTcuIQGXgZ8aX0sXLlzAiBEjEBwcjHTp0mH+/PkuIWzppHNJwOng/++//xTUsWPHDiWply9fKuc2evToIPzU/M+ePUOWLFmwbNkyRIsWzbkk66DeUEmILCInfKeD//nz5+AE0pk1TseOHcPXX3+tYvvnz5/H0KFDcfDgQZQuXRq//fabWhSf+6RTbk+fPsWXX3752csiMvg7Hfy7du1ChgwZkCdPnlD93759Ozw8PPDNN99gzJgxOHnyJH7//XfkzJkTkydPRqVKldTC0BbA56YBOd4bN24gfvz46vzjc1cELgk/tXiiRIlQpkyZUP1fvXq1MnM8PT3h5eWFxYsXw8/PTx1wjRo1SplJXbt2VWU58Xfv3kXatGk/mx2B5uKff/6J1KlTqz+BP2L8nU7zT58+Hfny5UPlypUNvefETpo0SZlDQ4YMwejRo/HTTz+pkGeMGDFQq1Yt9coF061bN8SKFQuamfTixQsFg3HSnOfwAKHPwTysxxGJ4w0KClILm+OKTGKZv/76S43VVPPzMyaOVxbF/6TpMPjDM0to0pQsWRJVqlQx9JKHWrTxEyZMqKI8I0eOxM8//xwKfgLLv3nz5iFTpkw4cuQI8ubNi8uXL6v6jNOlS5fU+UB41yLevn2r4KeDbe/Edl+/fo0tW7agbNmyKpRLYCMy4yijEydOIGPGjEiTJk0o8+/evXtImTIlnjx5onZDSR8l4BD4OZEPHz5UGooOGp1bOqxMNGkIa9WqVUPB37t3byRPnlyBP2DAAKxcuVJNJstR4z948ED5ChMnTlSX3fbt24eCBQuqHaB27dqhNN6ZM2fUwojL79Q3SezblStXlNbnItL6pWULDAxUIIZVNjJQRQQxF/CaNWuwZMkSFcLNnj27WtSPHj1SYV1zu9WiRYuUD8RFo/WbWt/b2xvfffcduAh4JUSSA+HnhJw+fVpBumPHDhQpUkTZ6vyjZid0nTp1CgV/jx49lCamc9uiRQts2LBBQcFJZhSINj7NJW3noDNMWC5evIjq1asbtDjbpvNM7R7WnSDCuWLFCty5c0edIpuaPgy5sg4631FNrPv+/ftK+4YH8b///qt2Lzr+xYsXR7ly5ZQy4HvDhw8P1wyiLAj/F198gYYNGxrycUegqTho0CClIAR+B5s9nJC9e/eifPny6nSWOwD/KlSooODn1k34Ne3F/N27d8ehQ4fwzz//oHnz5gp+vk+IGOFgpIcTy3JNmjTBgQMHQP+B93548zNx4sRq1DQpuEC++uqrMO8EEVA60Ky7c+fO6u6QcSKcs2fPVjuQ6a4Q0WIgoOvWrUP9+vXVomJ54zrY9rVr1/D999+Dphl3p23btmH//v2qT9wRWI75+Gdclj4CTUGeevfs2dNg0nEcXMQDBw5U0aCiRYvKLdj/nyiHmD2cKGoiOq68l5M1a1Zl0nBiZs2apUwhLgpN6xIaOrJ79uxRE0j4N27cqADVEhcB6+GuwLy+vr5o37692vLr1Klj0NSElyAwlNqvX78wzR5qWLbF/tFk0hxkzVzr2LGjMruSJk0aaQeSZRmtYl/Yf/6fpojxYR53FIJODc6dhwqB8NM34b/pv1Amt2/fRooUKRAvXjxD/9+/f4/+/fsrJcDFo8mOMqKs+dny5cuVWcTFJ8lBNj8nqm7dugr0du3aoVChQsiVKxdy5MihNDZPcKkhOZFMzE8IacLQqaP9bwo/8xF+mgh0jmkGNWjQQJlJBKBXr16qLvoJJUqUQIcOHZQ5YBpNIZQVK1bEzZs3MXPmTAUrdwva+oT31atXaoHxs2rVqkU6GsN2eRL966+/Kl+E9jchJ9Ra4iLnoqefQsDZLmXBRUEfg5ErQr127Vq1SxqXZf+4qLhTccEbK47GjRtj2rRp6n3Kj4pBkoPg59ZMLT9+/Hhld3P7JvwTJkyAj4+POsFdv369walkfoYz6Yhy8gguNbMWwtMmkhNOcGnW8PIbNT+dP5540mRgYniUi4xanwtPi6ZodbBO+iLcYbg4aHcz0QyhVqZTzTMHmlfa+YIpSGGFUs+ePatMNZp7NGdo3nAR0z7XEgHmrsKxM2LDBcwFwUQtz1uslBWvdnTp0kVFrLT05s0bJUvuoAsWLDDsKCxPM5Lt0vwh+JQhoz+mfgd3ZO66/CyqJp0rLia7mz2EiwDS6eMEUvtR6AT3hx9+UNDRCeYEaaFGTiwnmhNCiHkGwDLGl90ofJoH1Pzc9rlQeOjF9rjNU+MyMWqiwd+0aVPlFLOcdjrM/HScWZ5mDYGk2UDzLHfu3Opkme0ywsR+0t8wTTRZCKGxWcJFzV2EUHNBsX7uTLTPtR3u3bt3Kg8jVSzLtjXTjjsUnW32lf4PFQIdcw1g7kitWrVSuxPLa3VSToymHT9+XLXLHYTBgAIFCoSCX4tyLV26VPkzYY3LFQE312e7w09NxC2YWpUOJU9qOcGcXGpjwqfZvIz+MHHSCT/zcOumdjS297UBcnHwkIf2OLU0FwHzEX5qXU4wFx7Dh3wYho6xtijoaHMBsn2aDtS6mlPJhcpICUEjTHylNuWhEh1pUw3KBcLE3Yw2Pf+2bt2q4CRUNJcIP+8lMaT77bffqjpo4vAztk34ae4Zw09/hX3kzkfzkAuKY2GimcTdkTLjZ9piZn8ZAWKfuNtSRps2bVI7pKmzTUeeddJsZF53T3aHn5PBWDzj+dQ+NHEIHCefJgjhYDiQNrEWoeGBjwY8bXeaS6Ymj7Hpw/g47WZqMcLD+0CHDx9WWp+JvgEdRppZNI2KFSumtDjbI3B8pdY1lwgONSgjVqYmArXs33//rUwN7hqMLGmhSoZYOUY63ASMJhnNNI6fioBOMGVkCj/boKnHcgz7su3Nmzer9imLOXPmKLlyJ1u1apXaWdkOFzB3MMpDM+HoTzH8q/WbeWhS0QyjkuBpOoMGkT1ddtVFYnf4CRfv5zAsZ5oIALUutTMXgAY/NSZj8zR/aJJQ45qaPMban6ZFkiRJlCZlPppChJ9Asm46z4SODiLDoTw53rlzp3KE2QY/iwh+tjd27Fi1g5meBXDXof1NU4RRFkaPaIdzdyH8TDShOD6CybERRH7O3YowEzy+GvsPvLxHqGm3cywElP1nHtrz1NzaDkqYaUbRXOMuRVOITjLr5AEadz3m1U6PeU+Kfaa52bJlS+XQU4bunOwOP21Vbvc8ZQ0rUVtxgmgmaPd7uCDoiBFMgkqozNpy0aIpqLTr0TQ/aArwjIDXoKnZaIoQCkaQCAE1M3cXmjd8RoCLNKLEXYNw0azQTB/thJamibbr0AxjFGfGjBkGM0YzsbgD0cwh/AzBajCH1bZ2LqDtelzUhJptGysUbTHSBOLuV69ePfU5v/GCfWrUqJHakRjRYt95SMidls43AwU0pVivu1+FMMCvCVQ7QDG9BBXRsXx4oJhGPgg/7XdCFl6iU0dHkFqQ/yb8nJzwtH1Y9WgajZ9xQVH7MtzISA0XkjYezeQiEASFi4A2rxZlMbcANLi4G9GUYuLiopZnxIZtcJHRt9FCuKb18XOCRtOLPk9kH8nk+Ag5NTXlSbgZxzdOXNQEmIuLh310kDle7mz0eW7duqV2EZpaND8ZcNAeJOIuQ99A2x2iciFODyv2NLUM8NMe5jbJ7Y/XCygERg6obWhGEFpqDQ6MMBEkakc6mPw/87LjnBDWQ63LkBm3TtapRT5ozzL8yLs94SW2SfufwFIr8UyAdVia2C+OgRqdF960cRjXxwVG8HhblAdLYTnUpu3TqWzdurXKSx+GphLhZwjUeHxsm9CFNWbKi+YOd0OaGmw7solAElJGzegv8EqEadKUAGVqfDLMedEiUjR3aPbRNGQ/GWXjGULfvn3VouQOSfmzLi4mziHnnvUxL5UJ55zzzZ2Z/6fM+UeOaGqyPrKiKT2W5djJHa+KsCwDG6Y3cCMrC0vy2d3soZC4rdKmD29rpzZkXJynkZw0alUKnwLnZ1yI5haOqTPMSaZG5wIkXIwmaREm7X4QtS4dRppGnHBOcESJJgOjLKxXuyHK+uhIczEwEQAuKDrUPJgz1czcPahImI/+Aw/lzCXKQ9utKBf6PyxLzc8FYJyYl2FO1s8QJxcnAWYAgAdoHC/vDrHv3FnpTDMPHXT6IVRA7hzvtzv8FG6bNm3UCW5YiWFJRkm4JdNuZ+IWzdNRQk8NQuDCMoE4UYx2EEb6B1xgzEezhBqZC4+JZgIXEeHhts9/E0yGGbkoqL3YT2o1LgJqSc1UMu7zuHHjlO1MDWb8BBkXF00JQsqoCs0JmlKmY+ZORO3KuD37Tv8hIpOL95QY9WE/6cByURFeRni0QztqV/aJd52mTJli2B1ojrGfXJhsV3tegO+xr7xG/scff6jzB5pIjnqeISKlY63P7Q4/YeIk85SSZoO2fWoDIui0XQm/FuenPcqJoYbjKxdCWKFOAsQtmnUTaoY8CT+BYViQphnLcecheLwaTaeVB1acbG7p/JwanVswFwfb5A5Es8A4+kJTb+HCheDVAdPnAngBr0aNGmoR0W7mdQZGfQgf+6OZBNTABJHal4mLmlEgbZEaTzIBpRbn1QZGqyhHRsG4qxBSmjw0D6m16ThTruw/r47wlZEtmnVc6OwTF46pVufOQROMT9Oxz+6e7A6/dshFDccoBKM62u1MTiK3b5oenCDtlJHhQcLEbZnXCzhB4R1yEWpOIs0BAkdoCB0hZxmGADmxPMVknJ1hPQJCbc3FSMAJEC+RcZEwJEjNT+eb/gf7T3+Gi4yalsCZOmmMqdPO5SKiCcVxUTNrZxXc0RiaZFsEVYOQ5gl3JS48Y4edEDIPlQIjYPSZuBMxJk9fge1TQXAs9F24QAi5ptF5os3dhXepqAgY7QrrcI7tUvPz0E07IXbnBWB3+Dnh1MrU4NTMnDBqKZoq1ITapS9qd20CaILQHCKsPBGlpgwrGkPNyEgGQWY4lKYHQaMmphZmoq1LJ5H1EyiG9lg/T0epwdk/HgZxATL0R/ud93qYl6CdOnVKxffZD94BIoim8PNZBS4g4wfqqZlZHxcvo0o84KNZpkWJ2Df2gwubJ+D8nHLSzDu2wYttfOVCpuPO3YzyYt+4q/E6B+sn3MYX27houVBodlEmlFF4Kax7Se66AOwOvzbJDP9xogkPNR5ND57e8hieJk9Y8FOL8mCHjph2zUCLEnB3YNybWpqmAOP3tI25CGhza1eHCRh9Cu2QiZEGmhvUmFpEhG3QwaZJwxuc3AVo+lCTMjbOXYAnqsxvbO9rkPAAivmNbWYuJu4YTDxMCyuEqEWEqBi4cLh7aeYd83PXopLg4uEuwHFobVAeVCYci/GtTu52zM87Q9Ts2jjdFeiojMsh8GtPHdF25URTs9LJ4sEQt10CanwnXdP81MgMf3LLpvNLGPkUGBO1LXcOgkUnlHdn+MddwvhxRMJEsAkFtaSxeaH9m1qVfzQVuGNod3zYDi+tEUI60eHFvo1DitpksF3a8oRaAz+sieJYeX2B0Rfes9Hsfy5ean6W53sEn0BrSTtc421R7Z4SP2O7XEQ04TiWqMTrowKSK+Z1CPycEIJJm5jwM/5N55Ew8nCIWz5NBM0WplYjiIzkcGJ5G5M+AbU2NaxmzrA+7iiMW2sHLdqpqPHk0NzgjhHWA+wsx8/ZJu16U6eQ4DMP27JF4qKkGcidgqYVdwsCSzOKp+LhPcmlPcDOKx30RbSxsa9cSAz3WvrcsS3G6Qx1OgR+TghtVL4SZEYeCC2dRO1hD+N77tohEjU+nViGShma5OGRdqedWpGTywMfRjbMJbZNeMOLYWtXG0y/MlEz2fhqS4eQcqGJQs3OHYj9pDwYkQrvGydYhkrk3LlzymE19kO4m2iPTToDdM7SB4fAz8Fzsqhd6dzRhOAVW27ZYcHPvNT0NFe4AzAiRNhp0tDho3/ASefioINKBy+iZOkRfET1WvNzOry03zluvtLsM3foxB2VUR/K050Pp6wlY4fBzwHQTuVFLk4qHV6aGzR/NLPHeJCcVB6/M6JBm5h3VWgOMIzH0CZDprRrWZ92PmAtITmqHsqHvo12GMVXczY7FzTlRBNJ4I941hwKPyeLMXjaqIxiUHMxikENTgfYFH7awYy782BIi5nzLgivQvzyyy/KH2C4056XoyIWsf1yCPxRk7VD4WdXGaXhMbxmX/NOCReBcSSD+Ri6pMPLiAVBp5mkfZMzfQLG7xnm/FzB10xJ0fyRXwAOh592PmPPGrS7d+9WN/tMv1yJcXw6tNzOeW9He+428kN1/5yi+aM2xw6H3/REkXfbuRh4TSG85ArOatSmwXq5uQtSQUg8P2KZOhx+0y4yLMcTX5o4kqIuAVEMkZeZ08Gv3VUXzRX5SZSclknA6eC3bBhSSiQQdQkI/FGXmZRwEwkI/G4ykTKMqEtA4I+6zKSEm0hA4HeTiZRhRF0CAn/UZSYl3EQCAr+bTKQMI+oSEPijLjMp4SYSEPjdZCJlGFGXgMAfdZlJCTeRgMDvJhMpw4i6BP4PDoqydYiPUbYAAAAASUVORK5CYII=)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zavj8gDZp2vi" + }, + "source": [ + "More precisely, we here compute the standard FBANK features and we plug them into a CRDNN model (which combines convolutional, recurrent, and fully connected layers). The output is processed with a **sigmoid** to perform a **binary classification**. The network is trained with **binary cross-entropy**. The predictions will be close to one for speech and close to zero for non-speech frames.\n", + "\n", + "At inference time, the binary predictions are post-processed. For instance, we apply a **threshold** on them to identify the candidate speech regions. After that, we can apply other types of post-processing, such as merging segments that are close or remove segments that are too short. We will describe this in detail in the inference section.\n", + "\n", + "Right now, let's briefly discuss how we can train such a model with SpeechBrain." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "819mitozv-Vv" + }, + "source": [ + "## Training\n", + "SpeechBrain has a recipe for training a VAD with the [LibriParty Dataset](https://www.dropbox.com/s/ebo987wu3hie3zm/LibriParty.tar.gz?e=1&st=bjjs0754&dl=1). This is a dataset that we created for tasks such a VAD training. It contains several simulated acoustic scenes with speech and noisy sequences periodically active (alone or simultaneously).\n", + "\n", + "Beyond that, the training recipe creates **on-the-fly** several other **simulated acoustic scenes** using the Musan (that contains speech, noise, and music signals), CommonLanguage (that contains speech from 48 languages), and open-rir (that contains noise and impulse responses).\n", + "\n", + "The acoustic scene simulated on the fly explores different scenarios such as noise + speech, speech to noise transitions, noise to speech transitions, etc.\n", + "\n", + "Similarly to the other SpeechBrain recipes, one can train the model with the following commands:\n", + "\n", + "\n", + "\n", + "```bash\n", + "cd recipes/LibriParty/VAD\n", + "python train.py hparams/train.yaml\n", + "```\n", + "\n", + "Please, follow the README file available within the recipe and make sure you have downloaded all the data before starting the training.\n", + "\n", + "Apart from the **massive use of speech augmentation/contamination**, the recipe has nothing special. Let's thus focus more on the inference part, which relies on some custom components.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "x8g30rxUz2Eg" + }, + "source": [ + "## Inference\n", + "\n", + "We can now focus on the inference part. The inference part is a bit more elaborated than usual because we designed it to work on very **long recordings** and to support several techniques for **post-processing** the network predictions.\n", + "\n", + "We will address all of the aforementioned aspects. But, let's first install speechbrain:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "executionInfo": { + "elapsed": 35145, + "status": "ok", + "timestamp": 1708531523430, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "rlXT2DWJ9Emk" + }, + "outputs": [], + "source": [ + "%%capture\n", + "# Installing SpeechBrain via pip\n", + "BRANCH = 'develop'\n", + "!python -m pip install git+https://github.com/speechbrain/speechbrain.git@$BRANCH" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "executionInfo": { + "elapsed": 149, + "status": "ok", + "timestamp": 1708531523439, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "SIrH6negd-L3" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!wget -O /content/example_vad_music.wav \"https://www.dropbox.com/scl/fi/vvffxbkkuv79g0d4c7so3/example_vad_music.wav?rlkey=q5m5wc6y9fsfvt43x5yy8ohrf&dl=1\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WltXFcFC0OpH" + }, + "source": [ + "Let's read a speech signal:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 486 + }, + "executionInfo": { + "elapsed": 25521, + "status": "ok", + "timestamp": 1708531548864, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "iix1OohC2Pam", + "outputId": "e405b50c-b5f9-43e2-d0f9-abbd9ba96a25" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAGdCAYAAADaPpOnAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABVHklEQVR4nO3dd3hUZfo38O9MKoEUQhqBQOihlwRCQHqkirCiArJSFtFVoiLoT3BFrC8WVFZlRZGiawFZFBU1Sgs1EiCg1NBCDUmAkE7qnPePkGEmmT7nzJkz8/1cV64rMznlnpwp9zzlflSCIAggIiIiUgi13AEQERERWYPJCxERESkKkxciIiJSFCYvREREpChMXoiIiEhRmLwQERGRojB5ISIiIkVh8kJERESK4il3AGLTaDTIysqCv78/VCqV3OEQERGRBQRBQFFRESIjI6FWm25bcbnkJSsrC1FRUXKHQURERDa4dOkSmjdvbnIbl0te/P39AdQ8+ICAAJmjISIiIksUFhYiKipK+zluisslL7VdRQEBAUxeiIiIFMaSIR8csEtERESKwuSFiIiIFIXJCxERESkKkxciIiJSFCYvREREpChMXoiIiEhRmLwQERGRojB5ISIiIkVh8kJERESKwuSFiIiIFIXJCxERESkKkxciIiJSFCYvRESkeDtPXcN36ZflDoMcxOVWlSYiIvczdVUaAKBHVBBahzaSORqSGlteiIjIZVwrKpc7BHIAJi9ERESkKExeiIiISFGYvBAREZGiMHkhIiIiRWHyQjYruFUJQRDkDoOIiNwMkxeyyZ4z19H9ld+x4LsjcodCRERuhskL2eT9zacAAGv3X5I5EiIicjdMXoiIiEhRmLwQERGRojB5ISIiIkVh8kJERESKwuSFiIiIFIXJCxERESkKkxciIiJSFCYvREREpChMXoiIiEhRmLwQEZGiHcsqkDsEcjAmL2QTlUruCIiIaly+eUvuEMjBmLwQERGRojB5ISIiIkVh8kJERESKwuSFiIiIFIXJC9nk5NUiuUMgIiI3xeSFbFJUXiV3CERE5KaYvBAREZGiMHkhIiIiRWHyQkREiiYIckdAjsbkhYiIiBSFyQsREREpCpMXIiIiUhQmL0RERKQoTF6IiIhIUZi8EBERkaIweSEiIiJFYfJCREREisLkhYiIiBSFyQsRESkcS+y6GyYvREROJP3iTQxdkoLtGblyh0LktBySvCxbtgzR0dHw9fVFfHw80tLSLNpv7dq1UKlUGD9+vLQBEhE5iYc/24dz10swY/V+uUMhclqSJy/r1q3D3LlzsWjRIqSnp6N79+4YMWIEcnNNf6s4f/48nn32WQwYMEDqEF3CD4evYM7aQyivqpY7FCKyQ0kFX8NE5kievLz33nuYNWsWZsyYgU6dOmH58uXw8/PDqlWrjO5TXV2NKVOm4JVXXkHr1q2lDtElPL32MDYezsKXf1yUOxQiIiJJSZq8VFRU4ODBg0hMTLxzQrUaiYmJSE1NNbrfq6++irCwMMycOdPsOcrLy1FYWKj3487ySsrlDoGIiEhSkiYv169fR3V1NcLDw/XuDw8PR3Z2tsF9du/ejZUrV2LFihUWnWPx4sUIDAzU/kRFRdkdNxERETkvp5ptVFRUhIcffhgrVqxASEiIRfssWLAABQUF2p9Lly5JHCUdz3Lv1i0iIpKXp5QHDwkJgYeHB3JycvTuz8nJQURERL3tz549i/Pnz2Ps2LHa+zQaTU2gnp7IyMhAmzZt9Pbx8fGBj4+PBNGTMf9Yw1kQREQkH0lbXry9vREbG4utW7dq79NoNNi6dSsSEhLqbR8TE4MjR47g8OHD2p97770XQ4YMweHDh9kl5CRullbIHQIREbkxSVteAGDu3LmYNm0a4uLi0KdPHyxduhQlJSWYMWMGAGDq1Klo1qwZFi9eDF9fX3Tp0kVv/6CgIACodz8RERG5J8mTl4kTJ+LatWt46aWXkJ2djR49eiA5OVk7iPfixYtQq51q6A0RESmIwNUB3I7kyQsAJCUlISkpyeDfUlJSTO67Zs0a8QMiIiIixWKTh4sqq2SVTiIick1MXlzQR9tOI2ZhMnadviZ3KEREDpVbxEKd7oDJiwta8vspAMCLG4/KHAkRkWNdL2by4g6YvBAREZGiMHlxYRdulEpy3PIqjSTHFcsPh68gev7PLKZHROSimLyQy3l67WEAwLaTufIGQkREkmDyQnYrraiSOwQil1RV7dytnERyYfJCdluxM1PuEIhcUteXf0d2QZncYRA5HSYvZLe8Eo7uJ5LCrcpqrNl7Xu4wnB4L7LofJi9ERESkKExeiIiISFGYvJDd2GRLRESOxOSFiMiJqVRyR0DkfJi8EBGRogls/nU7TF6IiEjRNv2VJXcI5GBMXoiISNF+PZotdwjkYExeyG5yNtm+9MNRLPjuiHwBEEmMQ16I6mPy4mKqqt2n87eorBJfpF7AN2kXkVvEKqRERO6CyYuL+WTnOblDcBiNxvDvRETk2pi8EBERkaIweSEicmKs80JUH5MXspvAGrtERORATF4U7uKNUrzwvbyzbTKvl8hzYp1vpOYSqCOXC/DfPy5AYDUrIofIK6nAjeJyPPhJKr4/dFnucMjFeModANlnzIe7UFRWJWsMe87ckPX8lhj70W4AQLCfN8Z0aypzNESWUylwsvR7m0/hg62ntbfTMvPwt57NHXJu5f23yBZseVE4uRMXpcnIKZI7BCJJnbtWjMW/nsCN4nLZYtBNXIikwOTFxZVVVssdAhE50JgPduOTHefw3P/+kjsUs85dK0b0/J+xanem3KGQwjB5cXEpGblyh0BEDnTr9heWQxdvyhyJeUPf3QEAeHXTcZkjIaVh8uLyXLcHWHcKqaXjcM/kstuIlMXWqdI3SyvFDYTIiTB5IcWyZeLQL0e4gBuRK6vScEahO2DyQoq15XiO3CEQuYS9Z67LV/JAZN+kXZQ7BHIATpV2ea77LSQr/5bcIRBJTuqO32NZBXjos30AgPNvjpH4bNI75yJJGJnGlhcy6/LNUjz/v79wmtOMiRxP4vUBjmUVan8/eOEmKqu5yik5PyYvZNYjnx/AugOXMG7ZHtliOHqlAAu++wu5hWXa+/QG7JrYt5p94EQWmfDxXrzwnbwVu4ksweSFzDqZXdPiUlohX82Yez7cjW/SLmHe+j+t3vc5G/YhclfrD7KUPzk/Ji8uztWW8jml03WlsrA5/btDV6QKh0hy9nQaWVKkUuNiLZOu9p5HhjF5ISJyAVcLbmFDnVaTFTvPmd3PUZ/1XBSVxMTZRqQofP8jMmz4+zvrrXV2IrvQyNaOd7O0EsENvSU/j8Tjm8lJsOWFFOWWkWZwfqsjRxAEAUcuF6C0wvkWROUireROmLy4OFf7SNd9gz7Peg7kQIIg4Ke/rmLsR7vxt2V7HXZee1oSLMnpHdVQsev0NQedidwBu41c3HfplzG6a1PRjnejuFy0Y9mLsyLIUTQaAX/7zx78ebkAAJDhwJpHrtKoePkmi0qSeNjy4uK2nBB3VeknvkoX9XhSY3cSieFCXqk2cXE0e5/DZZXVJr90cIwIKRGTF7LKvsw8q/cpq6zGT39mIa+kQoKITNt//qbDz0kkpg+3n7F5X0EAEhZvRezrW5CjU+BRDr8dy8a5a8WyxkCug8kLGZVbVGZRnQhz3v09A09+cwiTP/1DhKgMq6w2/O20vEq+wnpEYrCn4eXstWLcLK0EYNsXDzH9dbkAQ9/dIfl52JDkHpi8kEGX8krR542tGPxOit3H2vTXVQDSjhNYf+CSZMcmUqozbOkgF8XkhQzanlEzViZb5qZmS+Xfqvl2WVzO6aJEtSybbaSMtor80gpcK3KeCQMkLyYv5FLKLejmuiXjGk3kOk7lFInSrUqW6fHqZvR+YwuuFnDWEjF5IQfIv93n7iw+3nFW7hDIBQx/fyd6vPq73GFYzGj7ijIaXrRe+uGY3CGQE2DyQpIzVhVXLln5/OZG1jE2XbmsUoNqOxc2rKzW4I9zN1BeVW3ztGh3agFyp8dKxjF5IacnxxRrIl3fHjBeEPHyzVK7jv3qT8cx6dM/sGDDERy5YlstmSoXWxnaHpauNk/K5pDkZdmyZYiOjoavry/i4+ORlpZmdNsVK1ZgwIABaNy4MRo3bozExEST25M0nOnlv2p3ptwhkJtbLmFX43//uAAA+O7QFaNT/sVg7DM9JUPcQpZEjiB58rJu3TrMnTsXixYtQnp6Orp3744RI0YgN9fwCyYlJQWTJ0/G9u3bkZqaiqioKAwfPhxXrlyROlTFyS+VrkVC7poQuo5fdZ6VcYnk0OeNLUg9e0PvvmqNgOmr0/DmryftOvYvR7Lt2p9IDpInL++99x5mzZqFGTNmoFOnTli+fDn8/PywatUqg9t/9dVXeOKJJ9CjRw/ExMTgs88+g0ajwdatW6UOVXEW/SjdwLXa2izOwJlagYjkkFtUjskr9Is87j5zHSkZ1yRtFSJyVpImLxUVFTh48CASExPvnFCtRmJiIlJTUy06RmlpKSorKxEcHGzw7+Xl5SgsLNT7cRfHstznsZpjbJyjUmpYEFmiqlqj/f3zveflC0RGGq5XRpA4ebl+/Tqqq6sRHh6ud394eDiysy1rqnz++ecRGRmplwDpWrx4MQIDA7U/UVFRdsdNzqXUgrosVziDiNzA6z+fwJncmqq5205aN1ZFN5E/k1uM9zefQmGZc5UxsMSeMzfMb0Quz1PuAEx58803sXbtWqSkpMDX19fgNgsWLMDcuXO1twsLC5nAuJjUc+bfrHaeumbx8dgWQ2JyZEPAmr3n8UXqeZxbPEbv/lNWLr2R+F7NGkMsG0BKJWnLS0hICDw8PJCTk6N3f05ODiIiIkzuu2TJErz55pv4/fff0a1bN6Pb+fj4ICAgQO+HyBQ2OpOY3vktw6HnMzQr+vDFfJuOdfiSbfs5M345cQ+SJi/e3t6IjY3VG2xbO/g2ISHB6H5vv/02XnvtNSQnJyMuLk7KEBXN1oJWUikpr8KB83nQyFRzoqJKY34jIpH9fMR5Bre7Kmd7ryP5ST7baO7cuVixYgU+//xznDhxAo8//jhKSkowY8YMAMDUqVOxYMEC7fZvvfUWFi5ciFWrViE6OhrZ2dnIzs5GcTFXR63rwg37imOJbeKnqbh/eSq+Trsoy/kt6V4icmZS1lerPbbu4qWnc/m+Ssok+ZiXiRMn4tq1a3jppZeQnZ2NHj16IDk5WTuI9+LFi1Cr7+RQH3/8MSoqKnD//ffrHWfRokV4+eWXpQ5XUZytqubRKzWznzakX8bf+7YEABzLsq1iqCnlVdXw8fSodz+bi4nMK1LgIF2iuhwyYDcpKQlJSUkG/5aSkqJ3+/z589IHRJLKvF6i/X3O2sOiH39t2iVM6xdd735D31qzDKxAW1xWVX9DIhfH5J5cCdc2ItHpriItRbO0sW+Ohmq6zN/wV737Uk6xHDq5nxtcI4xcCJMXcigxVoTNKSy3eFsn61kjkoQlT/PPdp2r2dbFXxNcl9E9MHkhh4pZmIyScvu6bWoXsqvL0jctVt0lV+NMs3HqrsFUlytOzybHY/JCDvfB1tOSHLeimlOlSdmqJFxV+ryDZieu2mN6FfirtwvjXcqzPB4nys3ISTB5IYf7ZOc5SY67es95SY5L7ufVn44jev7POHjhpkPP+3//+9Om/S7fVF6l3FsidCGT+2LyQi7jVLZ1JdKJjKltPZjw8V6HntfW1pFqKwZ3sRGDXAGTF3IZHKhHcll/4JLcIViskpWoyQUweSFFKjYw6Pd6cTnSLVjj5VZlNf71/RGctnIxOyJjnvtf/Sn5jiRY0Z6yTuZEqzZSaxZTtaa1qFLCcUPkPJi8kCh0C9M5wk0DNSsqqwUs237Gov2/2ncR9360R+ywiJzakcsFyCt2jnovP/1lfk2otMw8HLyQ54BoSGmYvJAopFgGQGocMEiWMNTK52xKyi17Lp+9VmxVK40tLlo4bufPOlOm/3fwst7t4vIqPPhJKiZ8nIrKOjMJ5337Jz7dedauOEnZmLyQInHqJDnKtSLLiyLK5ftDVyzeVurXToaN3bHPrtefaVV4604l7fI643Q2pF/G//vlpE3nIdfA5IVEYUvht7TMPExblYbzNnQ5VWkMDzrkoF0i0+TO++u2oliibiuNM8gvrcApjpuTDZMXEoUtScODn6Rix6lreOKrdKv33WOmiieRrXafvq53+2apc4wREYvcrZbWTOuudeSK83VL93xtM4a/vxMnrhbKHYpbYvJCorCnwSO7sMz6fQysFk0khrRM/cT4pz+zZIpEfJ/tPocN6ZfNbyihlAzLZhnppjgaJ1ykrDYJ3MsvUrJg8kKisKe7xpa1jpZt52A9ksYH2yybsaZER6/I30rwow3JYIoV06odTc2ualkweSFR1B1Q56h966pgAS4SmdzdLO7oxz+z8O8tp7S3Syucd2bgKz8dlzsEi5S52OxKJi9uwBF9sj8etq9p/apI3UDHsuT/Zkmu5ey1YrlDcDhbxqWIoer2YN6nvjmEbw/c6d5yplWzDUm/6Ng1sCxVXF4FQRCw+JcTiFmYjLRM16mZw+TFDWTlSz8+5I9z9vX7llWyxYSck5N/bkpiw8H642Iu3CjBnjPXDWxtPWNdxYt/NTz9WeXk0wjv+4/j1sC6VlSOKwbe0yurNfgi9TzO5NYk2+kXb6LLot/w/Ia/tIvhvvnrCYfFKTUmL1SPLYPjSpy4WdcUW6ZtknuRuqibMzpjoLVp0DspmPLZPhwWYdry2v2GlyhYuTvT4P3GWl6q3Oj1KwgCisoq0fuNLej/5jYUlVXq/X3l7ky89MMxJL63A2WV1dqESrcFy5UweXED1n5z3J6RK00gJsjVLFxU5vzVU0leRkoKua0jl/PtPsaZXOvqoxh7ezhwwbHdNfmlFTZNMNB1teAWPtt1Dpdv3qlEfK2o3OyYlMe/TEfXl3/X3r6Yp1/J+MD5O/+L3q9vMXgMZ2/BsgaTF6rnZmml+Y3McNY+YCJrpdrZJapEdb9M6BWSFOED8Js06xaHtLVqr5iKy6vQ49XN6LzoN7uOk7B4G17/+QTuems7gJpkpvcbW9B38VaT+yUfy9a7vfNU3S68O9esyEiCdfDCTWQXWF+awhkxeXEDcrRpuMoLhMgd1e05Lqu60yqgQs2A3g+2nsbes+KMgVGCcxIN3E69XScm38ovjbUzKwVBwHubT2HLCctazN/+zTWWVWDy4gbkeINxx0GORK5Kd/ZRbmEZ5m/4C+9tPoWHVuxDfmmF080GqjseRGz7z1s2a+folQLc8+Eu9H9zm8F9zP3f9p/Pw3Pr/8TNkvpVnjW3992XmYcPtp62KB6gpvXFFXjKHQBJT6wZAtYwN8hx8DvbHRSJaa7TA0wknrqfqS98d0T7e90ifj1e3YyRnSOw/OFYUc5tzTRtY6/fCzdK0aVZoCjx1FLrdJcZWtNIoxGgrlOx7p4Pd2t/f2B5Ks6/OcbkOQpuVeLhlfswtlskZt7VCg8sTwUAVBgYmFz7X7pRbN3yFRd0Vv2+WVKB349no7SiGpP7tICvl4dVx5ITkxc3IMeXInPnPH9Df7CZXN/bXGj8GumoqNIgu6AMLZr4yR2KItX98vHnZdNrC9Udj2GPud8etnhbAUB5lf0zHfNKKtDYz0s7oLVaI+BiXilahTQ0fF4Db1g//pmF8T2bmTxP3ZYWQQD26wy0nboqDX9dLsBflwvwxi93pjX/YKiO1u1j2TNj8u73d+J6cc2q6Vdu3sKL93QyuF1VtQZqlapeciYndhu5ATkSAwHOuR4JKYsgCJiz9hBe+emYVftN+jQVA9/ZjhQZZs65AkEAcovKsPfMdYu7hPq/uU2U1Z8NflAbkfR1Oj7bZXh6taV+PXIVvV7bjJd/vPMcS/o6HUOWpGD9AcMDiw39T+asO4xdp6/h/PUSo+99dQsejvr3LnyTdlF725r/X+0Z/pNi+3IWtYkLAHxmZJp6VbUGA9/erm1FulVRjU92nJW9eCNbXqiewlv29xcLgoBfjl4VIRppqdhx5NTOXivBxtsfZs+PjLG4WTv9Yj4AYN3+SxjcIUyq8GSj+6EjlYTF21CtEbBiapxF21/Jv4VZXxyQOCp914srcOj2tbbVm8k1A1g/T72AV8Z1AQD8erSmJenTnefwQFwUAP1uI2Pp3MMr0wAADbw80D68Ub2/181p7JlFVZs/ncqRNonIvF6CrIIyZN2ehLF06yl8suMcFv960mw3mJTY8uIGrB1M9+omcdbqyC20/A22bow3issR+9pmmxZxI9eh2yXw5R8XrN5/28lcLN1yyukGlNpr0Q/WtURZK6+kQjv2xFC1XWMMjc2Qmr09GbpJydvJJ43+rUqn4I+5VuVbldUGu9rE/Kp0q7La4cs4CIKAQxfyHXpOY5i8uAFZuo0Ew4PajNmQfkXvdvz/24obJRV46ptDYodGEjueVYgZq9PwyOf77V4oU7dAXG3Zc2uUV2mwdMtpbDvpnN1Hl+oUGrPUz0ekbdXMKbxT6sCa8SxydBWfNvK8KKusNpu0CoKgN+7tPyn6q9VXCwJO5RRBEAR8q9OFdMHG6ybmGLuVuzPR5oVfxDugAf9JOYMvUu98aRAE5xknyOTFHciQvVwvLtcb1W7O8ToLKlY56k3QSV6IYhAEweYPw7rs+RAa/cEubM+4hi0ncrHx8BXzO5igO2bF2joYunKLpO9mST5q/aDVOesOix+IjAplqFidqVtA77a8kgp0XvQbJn36BwBg75nr+CL1PIA7rbync4oQ9/oWnLtWf/9aZ3KLMfz9nVh/4DIKb915bKv3nLcp1iv5zlH/ylhSp9sdebXgFt5OzsB/dVo8NYKg1xolJyYvJIk3jSyw5myc5HUoihc3HsWAt7dr36RttTbtIrq8/JsoK9DesnPNq3c3n9L+bum1MpR4qVU1Aw/X7Mm0qkXQGv/88iAA4MD5vHrdD8ZYk+A7Unahc3zI2mrqqjRUawTsu/0cfuizfXjph2PYc+Y6Rizdiej5P+Pu93fihoH6KYas3nsehy7ZXx9l2qo0u48hhr6LtxpsyayqvvPaKTXw2tWw5YUcydCTUGpVGsGqsurO8oJQsq/21cxaeOe3DLuOM/+7IyitqMbsr9P17n/v9wyrZzbsPHXNrlh0FZVVYfvJXBwxM23X0LgLlUqF//5xAS//dBzD398pWkx1FZVV4v7lqfhPylk8fjuZMcURA29t4axJlb1+OHzF7ADX3KL6iduN4nJcyqu/krNS5RSW48WNR+rd/3XaRfxlYu2qez7chZPZ8i/VADB5cQvZhWVOv3qyXOMpXTFnkqJZ92rBLXyw7QzeTs6w6gN36+1kw5ZuqI2H9Lucdp+5jhlr9mPsRzVTNm8Ul+Pt5JPadXe+SbuIEe/vxFsGWj3UKpUo03jN0V0479ej2chVeAuGq7luQUG32tWYdTmi29HR/jhXv2X1g62nce9He3CrohrXDDzmUznFyLOwtUpqTF7chLVVGN2FK62yWqt29oW9M2x0/zPHrtwZkxR3e8XaizdK8cDyvdhyPMfkccZ+tBvDl+60KIE+f70E7/6egZslFWbHg8xb/yf+k3IWY2/Xn1jw3RFk5BQZHI9QXlWtnXJtitizN2prlrDmkXx0lwqwZOD25Zuu08Jiq5KKKqw0UvfFWTB5cRMu+BktqvKqajy99pBV00KdlUqlwlPfHEKrBb/YNb4jv7QSyUevGk065q0/jP3nb+KRLw6gqlqDvWeuo7TC8IDNM7nFFtXjGPvhbny47Qye3/CX2W0P3q5MamwFXV1134hLjOzzp4kmc1tUajR4f/MptH7hl3rjwE5cLTSyF4lp2faz5jciPVNXpmGzmS8lcmPy4iacpanPGLm+l9YOKP1kxzn8cDgL89b/KVMk4skrqdDWxxmrs7aKtSqqNfjnl+lYtv2MweRXd7DjB9vO4KHP9uGRz40XKUv6Oh2f7z2PTX9lIb/0zr6ncorwzLrDOH+9RJuI/G7JG6cVCfn1Ok3g/d/aBgDYd+4GJn2aqk3y7J3aXdfbyRn49+1F85bvOIu0zDztQMlR/94l6rnIsOU7mLxY67gCEmsmL27C3BtlWaXjB/Xq2nnqmizjcnq/sQVlldX4Ll2cFpfKag12nrpm9Ju9pU5mF+LlH4/ZPaCz3MCHcWW1Bg+v3Id3f7dsYO+mv64aTF50r1ftqrZ7zxofpJ1bVI5FPx5D0teH0OPVzQBqFqIb/v5OfH/oCqavlm4mRt0pvLXTrid++gf+OJenTbpqp9ZK5cFPUpH43g5Jz0HkDpi8uKEbxeV6ffDv/p6BmIXJ2CvD6tO6tp6Qp5DYgfM30adVsPa2sbEiOYVl+PKPC0a7RgBg6ZZTmLoqze4y6SOX7sKavecx34LuE0sIgqBNNjYfz8Gu09fx4e3VgWvrwzy8ch92n67/HDiTW4x3fjtV7357Z1/sPXsd3V+5M8C17mKdptz/8V4U6SQkqSaSJmMOXrgzYPGiSPVxiMgxmLzYqapagzd+Po7tIi0At+fMdYz5YJfJ6Wq2ulFcjsOX8hH7+hZMX7Nfe3/th5hYywLYqlojoFojiFJfxBpNg3zx7YE7LS+6C7Tpuu8/e/HixqN46YdjBluJKqs12v51Uy0QGw9dwYaDl1FeVY1n1h2uN6tG19Er+s23xeVVVrdQHc8qxPTV+9H+xV/xyOf78cRXd6ZAZ14vQe83tmLA29ux6/R1/H3lPoPHqDs+w96WJQB44bv6UzUtdeCCfs2NySusbzGZ8HGq3u1DF+2v42Gped8qv3uSSE5MXuy07sAlrNiViRmr95vfGMDBCzex6a87sx4EQcC+czdw43b3wJTP9uFYViGm1ilmJAgCnl3/J1756Rg0GgE/2bDmz4W8Uvz3dqlnQ/U3NDKv/3L+Rgk+2XkWD36San5jEdXtEflcpxy2riv5NS0N/zt4GR1e/FVv3R0A6Hm7K8SUkvIqzFl3GPPW/4kOLybj+0NXMGfdYaOtPYLOaKCC0kp0WfQbBr+TgmqNYPFsoqfXHsKOU9cgCMCWOq1bQ5ak2NQ11XnRb1bvU5c1LS2O8OnOcw471waRuimJ3BVXlbbTRZ034CW/ZeDZER1Mbj/h45oaAm1CG6Fj0wBsO5mLmZ8fQENvD3w9q692u/zSShy+lI/xy/aga7NALL6vK/53eyaMraWprxWV1/vA0+0+OpVTLOsCdvYWV7NVSbn14300AnDiahG6NgtElUYDbw81iuu0RpRVVuPwpXzEtWwMTw/17XMZbrHo/+Y2jOraFAvv6WT0nPvP17RIXcm/hUHvbEeb0Eb4/B99zMZqbO0X0verDeX9iUgeTF7sUFBaiU90vq19tP2MXvJy8MJNfL3vIhaMjkFIIx9Ez/9Z+7cLN0q1yQsAlFRUY9yyPXrHH3/79pErBaKMmH/sv/oVP+euO4xNdRZ4c9YF7KRkagwLUJNwGCpw5qFSYei7KbhZUoH7Y6Pq/T1mYbLe7Q2P98PvRha5yyoow8rdmfWSl5zCctwsqcBnu8/pTfm8fPMWLt+8hcKySgT4epmMn4jI1TB5sdFvx7LrJQMAsHpPJmb0bwXgTiuLoSbi7SdzceRKPn60oHAWIM1soO8MjLXIkGjdF2f2mZliTEOWpBissKlS3SmjvmqP+YJOtc8Ha/V8zXh3VLeXf8f0ftFIGtrWpmMTESkRkxcbCIJgMHEBgFd+Og4vDzUGtQ81eYx1OsurW6LuWAWpiF3nQgkMFWOqrNbA63ZXj7HS4E99c0j0WIrLq5B+wbqBo2v2nseavedFj4WIyFkxebGCIAg4lVOMY1mmF4Z7ceNRB0UkvqVbTssdglP48o8LGNwhDP/63viMmHO319QRUxcRBsISEbk6Ji9W+OeXB/HbMecumUzieOWn49iQfrneVGUiIpIfp0pbgYmLe8kpdL2VZImIXAGTFyIjZC57Q0RERjB5ITLC3nWFiIhIGg5JXpYtW4bo6Gj4+voiPj4eaWmmF2Bbv349YmJi4Ovri65du+KXX35xRJhERESkAJInL+vWrcPcuXOxaNEipKeno3v37hgxYgRycw1P/d27dy8mT56MmTNn4tChQxg/fjzGjx+Po0eVO4OHiIiIxKMSJK4HHx8fj969e+Ojjz4CAGg0GkRFReHJJ5/E/Pnz620/ceJElJSUYNOmTdr7+vbtix49emD58uVmz1dYWIjAwEAUFBQgICBAtMdRXlWNDi8mm9+QiIjIDZx/c4yox7Pm81vSlpeKigocPHgQiYmJd06oViMxMRGpqYYX30tNTdXbHgBGjBhhdPvy8nIUFhbq/Uih1Ib1b4iIiEh8kiYv169fR3V1NcLDw/XuDw8PR3a24TVesrOzrdp+8eLFCAwM1P5ERdVfY0YM3+y/KMlxiYiIyDqKn220YMECFBQUaH8uXbKu7L6lBrYzXe6fiIiIHEPSCrshISHw8PBATo5+cbecnBxEREQY3CciIsKq7X18fODj4yNOwEREROT0JG158fb2RmxsLLZu3aq9T6PRYOvWrUhISDC4T0JCgt72ALB582aj2ztKtYYVy4iIiJyB5GsbzZ07F9OmTUNcXBz69OmDpUuXoqSkBDNmzAAATJ06Fc2aNcPixYsBAE8//TQGDRqEd999F2PGjMHatWtx4MABfPrpp1KHalJhWaWs5yciIqIakicvEydOxLVr1/DSSy8hOzsbPXr0QHJysnZQ7sWLF6FW32kA6tevH77++mu8+OKLeOGFF9CuXTts3LgRXbp0kTpUk1oGN5T1/ERERFRD8jovjiZVnRdBENBqASv9EhERAS5c58WVqFQqLHmgu9xhEBERuT0mL1YY1yNS7hBIYs8ktjf59x5RQY4JhIiIjGLyYgUvD/P/rv/O7OOASEgKnzwciw4RjUxu0yrEMWOfhncKN78REZGbYvJipfCAmpoyahXw4piOiAjw1f5tTmI7DLCzmF1koC9OvjbSrmNYYvF9XfVu+3rxqTCicwRUKpXJbcz8WTS9Wja2avvoJn4SRUJE5Hwkn23kan5+agB+PXIV43o2Q4CvFx4Z0Brnr5dgx6lrmNTHuqUJujQLwNErhRjdNQJnc0uw4Yl+aODlAQ91/U/Id+7vhgPnb2LdAXEqCPv73rn0/53ZB52aBiD29S2iHFvJPOpkJ9FN/HD+RikAYMPj/RAe4IPv0q+Ift7PpsbhkS8OaG8Pah+KN389qbfN44Pb4OOUs3r3pTw7GAcv3ESfVsEY8PZ20eNyhMZ+XrhZylIERGQ5Ji9WCmnkg4cTovXuiw5piGid7oTf5gxEfmkF0jLz8O7mU3rbBvh6orCsCgCw8Yn+KCyrQnBD73rnmZPYDku3nAZwZ0T3/bHNsXBsJ9yqqEbvN2xPNNb/U7/gX0LrJvC0oEvMUfx9PFFUXuWQc03qHYW1++8khHUTx7HdI/HhtjMAgNjbrSGZi0fjs12ZeOOXE3rbRgU3wKW8W9rbA9qFYNfp6wbP+0xiewzrGIaZn+9HqL8PBnUIRauQhsi8XgIAiG6i3z2VvvBuBDXwwj3dmuKF747gz8sFaBroq33u2TJpUDcxk9OKqXG4f7nhhVeJiAxxnk8sF9Ihwh/xrZvgkQGtkdgxDE8Pa4fmjRtg/qgY/HdmPDpHBuDrR+Lh6aE2mLgAwKwBrdG1WSDmJLbT3qdSqdDIxxOh/j7Y9X9DbIrN10uN3tHBei0vhlp65PTaeMfV9PHz1s/f1XX+F3+cu1FvH5VKhVkDW6NLM/2pfJGBDfRuT4lvidlD2hg879OJ7dClWSD2vZCITU8OgJeHGr8/MxAHX0zEXy8PRwNvDwxoF6LdPrihN9RqFTpHBuLTqXF4bGBrfPvYnSRUpVJZPW1RALDpybus2kdscxLbIS46GF/8ow/u7R6JjbP7yxoPEVlmdFfDS/Y4ClteJNTA2wOfTesNAHjm7juzWH5+aoDZfRv6eOInEx8sUcG2jXGo7RZpE9oIIY18ENjAeZ4C/r6e+GZWX3RpFog56w475JyaOi0Wft4eerdjWwZj//mbBvf9amZf/HL0KhZ8d8Tg3z3VKjw3IgaPDWoDH081fvrzKp5d/6fRF72XhxpNGt1Zp2tgu1CDLTfhAb5YMLqjycdlSNuwRjiTW6y9LQhAl2aBOP7qCJSUV+NiXikmfLzX5DGW/70X/r31DE5cLbT6/IbUDoIf2D4UA9tz8VMipZg3vIOs53eeTy5yiNqWBS8PNVIXDIVapdIOUk371zBM+HivXteHI/l6eaBLs0CHnrNu8hJXZ6DsPd2aIr+0wuAU6UA/L0zu00KbvIQH+OLPl4bj+Q1/oeBWJQZ3qPkwDvD1AlDT7RfXsrHFief0/tFo6OOJhDZNrH1YWs0bN8DlmzXXc8vcQYie/7P2b29N6AagpvXJz9sTTYy0Auoa2aUpRnZpqnccInIv80fFoE2o6ZmZUmO3kZt5fPCdbgwvD7Vel1GYvy92/d9QOcICAJvGbdirbvKiUqnw/RP99O57c0I3TOrTwugxVk/vjcSO4Vh4TycE+nlh+cOx+ObRvgbHEUWHNLS4m87LQ42H4lvYNT17dNemBu8P8vOqlxSp1SpseLyfwe0B/UHeup4bYd03sMEdQrVdYuN7NrNqXzEteaA7lj3UC439vNC8cQMcWni3bLEQKUkLG1v+xcSWFzfz+CDDYzCcwawBrR1+zmpN/ft0W1nqJjeGDIkJw5CYMBGjEs+CUTFIy8zDPd1qkpiJcVFYd+ASVk/vbXD7Xi2CjB7r92cGGrx/9pC2mNYvGl0W/WZRTG1DG+FfYzriVmV1vTFH1piT2A5jujbF3e/vtGn/lk380Ds6GGO6GU7wpJTy7GDcKKkw201H5IyaBTUwv5HE2PLiRtQqmK1jIqdHBzo+eamt26NLpVIhvlUwopv4oWNT8dbHcoTe0TXdXsse6oWDLyZCpVJh4+z+eOR2YvjmhK44+soI9GxhuI6MSqUyOvakqc6A5KeH1QwkT11Q01LXyMfyJGT2kLZQqVR2JS5ATctUoJ+X9vamJ+8yOWj561nx2laitmGN0MvI/0BqL4yOQXRIQ6MtWUTOYsXUOPxgYBB9Iyd47sofATnMUCdtHaglR2L16MDWuJR3C6O66A+iXftoX2gE55uJZc66RxNQVF6FwAZeBv9eO2PNFG8Lps0/c3d7vUHoAPDG37rgX98fNbh9bU0jAGhswdiaejF5qlFRpd9MJggC1DrPmaaBvnV309OvTQj6tQnB7CFtjZ/HQ40KQ81xdurUNAA/JvXHmWvF6BDuDwAI86+fOBM5i6ExYbjbSKVvuce7AGx5cRsvjumIdx/oIXcYTsfP2xPvPtgdiXVepCqVSnGJC1AzbsVY4iK1yb1bYPUMw91RP8y+C/+d2QdHXxlh07G/eiQezRvrN1VrBMDH885bWO0Yo27N6w/6NrdmVa27dKanW+v+2OYG74+J8MfK6XHw9FAjJiJAm6QH+XnjxTHWzxoj6/xrdEccWni36Csgu4IhHe60su54brDe33Tf/dbovK7/WDBM4qgsw5YXN/GIDONJSPn6t22CPWduWNTHrVarMKRD/da99yd2h4daZdfSGb2jg7H7+aHQaAS0fuEXAEC1RoC/rxdeuqcTBECbtBmaNfW0Tr0kUzztSFi9PFRoHdoQ566V6N2fPMfwWCEARrvvSDw9WwTZ1NontmUP9cLsr9PlDkOPbmt3yyYNMbB9KHaeugZAf7zf4A5hOPX6KJRWVCHIT/7/JcCWF3JSk03M7iHH+eqRvtg7fyi2zhtk8zH+1tNwi4QtdIsI1s5O+8ddrTDzrlZG9/nooZ4WH793dLDtwQE1lf+s4MRD0FyGqZXgWxpZE6y1yAuwDo0Jw5huTUU/rr0G1RnftnRiD+1z8rkRMXp/8/ZUO03iAjB5ISdRd2HIv8k4hdbdPTaoppWudhZOZFAD+Hp5mNpFz6Te1q3xZU7dKsBT4lsgwNez3jIdhgzuEIp7ukVafC57auoAwCidAoT+Pp56pQkMUULukrl4tNwh2EW3dUG3W/OnpLuw47n6lcrXPtoXW+YOwtCYMAT5eWHbvEHapUGstXpGb+z6vyFYOS0OgNW5reQ8PfSfgcENvZG5eAxOvzEKnSKde7ICu42onqaBvrhaUObQc2rqvKr7tLLzGzDZrHd0MA6/dLfNY2ciRZ5GWbdw4Rt/64pXx3WxaEzSZ1Pj7DqXtTo1vbP/n4uG11tuoi5nnv1XSwkxmqIb/ZAOYWbHvsRE+EOtVmGVTjkBa7oT+7QKxjez+hp8ftb9EhDYwAsFt+RblNRYJQgvJ1rrzhjnj5DsFm9lIiDLW5WzfSVxc0F+3jZ/aD3ct6Vd566d7m2KqcQlvvWd1hNHLjiqUqkg6DyRzSUugO2vtQ8nW94VpjT+Vky7N2bR2E5W7xPbsjHS/jXMYNdIDxP1j+r69rEEo8/PpRN76N3+MenONOTBHRy/PIaS33bZ8uIGQqyckinHNy0vD2V/u6M77B0cae/zb+ZdrRDUwAv92tg+c8gWzYIaWP2N1daH6ogPnZ+S7kJ0iPyVVG3h73un1dDS//GcxHYI8zc83X7OsPYIbOCFxI7hCPLzwoj3d+Jmaf0Wk+FGphbX6hDhj4zXRyL5aDb6tw1BSCP99+YfZvfHuGV7tLebN26AJ4e2xV3tQvHyj8ew+XiOZQ/GUjJUNRcLW17cgC3fQhxtxbT6zfsD7Ji2Sspl76BGLw81JvVpgRZGBmNKaWhMGPq1aYJ/WljJWuWko16aN26Ars0D9ZIAXT3rtEQ8JmKByVAR6t/YMnapoYkWnwbeHnhicFu0D/dHmL8vquv2c9822MBsu7p8PD0wrkezeomLIADd6wwuDm7ojYm9W6BZUAMsHCPu+/gTg9soesQ4kxc3YOzbhDFyPJ8NzQj44h99sGWu7bNcSJlMfYg4Oy8PNb6e1RfzR8WY3xjOWwRx9/PWrXEmZgFMc4OcLREZ6IsvZ8Zjw+MJFrfkWdMI0cC7/gD2fm2aYKINg9Vri0bWVrZO0On21F2brEUTPxx/dQSm94u2+hyGdG0WiPt6NkOrkIaYmmBfV68cmLxQPcse6uXwcxp641BqoTiyr9XMna54TIS/3CHYJFznC9HkPi0Q37oJGlgxI82U+3pZN7X+60fi692nUqlwV7sQxLa0fLyfbsFDc1ZMjUPr0IZYMTUO/+jfCq1CGuLTqXE2vV9tmTsIHz3UE9NuJxBfPRKPnc/VzFB6pE4JAD9vT8wd3h4T4+yf0TesYzga+nhi27xBeHVcF7uP52jK/YpDkqnbdCknd/ogcyVizVYwtcq1s+lnQ1eFJYN6DQmSqYpyrVfHdcalm6XIyr+l7ZY+9NLdKK/SoPsrv9t1bGv/Jf3a2te9PHtIG2Tll6GzFVODuzUPwrZ5gwEAd3cKx8J7Oto8Visi0FdvOr9arUKLJn5Guz0DfL3w1v3dsO7AJZvOB9RMB/e+nawpdTYZkxdyCn4GmmFN3U/OTbBjIKDue6mt9TXk4MhquXK2SG54PAFhAb74KekuVAuCNlH19fKwqh6Qs6hbjM0WSksAXKFFm91GJIn+ba37FmrsxR8WYN14HVI+udZmUhI5k/rarhi1WiVJPRClJQJKFNVYmbPIdDF5IVGM7a5fxfSpoZatJUOu6dkRHQAAswYYL9tvzD/uaoUhHULx1oSuYoflMkyVvFeCv14ebvLv7cPlX7XYlUWYWYFdCdhtRKJoW2eJdDma+2Mi/HEyu8jh56X6OkcG4tTro7T96tbw8/bE6hl9JIjKdSi9dSLAyBTsWuamkHuoVajWCExyblv/zwQ8sDxV7jAcii0vJAlPD7XD67Qo/Q3d1diSuBBZ4qekuzC+RyQ+m9rb/MZuwJoFRc0tj6AUfHchURjKG3TXBnGEMTqL4hGReD55OFa0Y4kxm7FTZACWTuopSyFCcg5MXkgyjl7cy94pk0RkWBM7l3zQ9fzIDqIdy53V1oV5MM7yujjje1i+wrqz45gXEkVjP84QIXJVoq4UrtzldJzKwns64d4ekejaLMjifZZOcp0FPdnyQqL4m5VVMYlIOSKDGuCLf/TBD7P7m9+YHMLTQ43YlsFuO7bMPR81ic7TBYoeEZFxA9uHylZ9+9DCu2U5r1L5ern+R7vrP0IiIok1byxit4qLs6XXqLGIY25cXc8WQVjyQHe5w5AckxcF48xgIudgy7pGZJkPJ7vOOA0prZgah14tgvD+gz0wvFP9mZd1C4kqHQfsKlhcy8bYf/6m3GEAsC6R6tMqGGmZeaLH0NCbT2cipZrRPxp3dwoHAAztGIaMnCKENPJ2uQ9dqdzdKVz7/zPEy8W69vluTw7nLdEU6g4R/pIcl8icu9qFyh2CYhirrjuicwT6tq5pwZqT2A5tQhs5vNClK4sOaSh3CKJit5GCmSuh7UhqK5peBDvnSrYIZmEqkk5II/3xFZZ0W4zt1lSqcETj7aHGk0PbSn6e8AAfk3/v2jzQ7DF8PD1wf2xzhHNhVtG4WMMLkxcShzUF6e5qa9+31NlD2tS77ykHvCmTe6i7qvU9FiQmiliaQgXMSWyPtyd0k/Q01nyR0aWA/yA5ESYv5HAPxjXHx1N62bz/kJiwevf5eHnYExKRUVInJk0duMKvh1qFXjIsmkokNiYvSuakX1XMrSjtoVZhVFfnb2Yn9+ToArCvjuvikPMMal/T4slp3e7p/tgouUMQFZMXEp2PmYqPDX3sGyduqFlaEAx/5HS3oH+dyB08GFfz4eUrcSvlUAMtoySfyX2icPK1kYhwYAufIzB5IYd6fXwXuxds9PO2/M3X1NRBIkMc3aBpLPG2xt96NjO7jaMGbI7ozNXdnUmzoAaSJ6xyYPKiZE66wFnrUONT8sL8Tc9EsIQf67kQ6QmyYGHUgAaGtxF7OrKtQ4Q8PZy0H5ycEpMXBbN3yrHYNjzeD9MSWuL/RsYY3SYuOrjefa1FqD8gwpdXIlk46qkbZ2Qsmr0toWLpGcWBxGKaltASof4+mBLfUu5QJMGvsCSa2JaNzQ7WDTa0RokIX7hs+QDgwEUypEOEP85eK5E7DKtEBpp/LjtqOretXyTUrlaIRGavjOuCRWM7u+z/1TlSbrLJA3EuMnpchK+e1r5hPjaoNb6Z1df+E5PLec1Bs39qtQltZPcxfNxgFWGynqsmLgCTF0XrHBkgdwhmvXlfV4ecx9outAWjOiKKlXrJgCaN7B+XZY22YfYnL/Z0m3pxrAkpkKTJS15eHqZMmYKAgAAEBQVh5syZKC4uNrn9k08+iQ4dOqBBgwZo0aIFnnrqKRQUFEgZJkloUp8WDjlP3aqotTgWhtzFvTYuYChGyw+Ro0mavEyZMgXHjh3D5s2bsWnTJuzcuROPPvqo0e2zsrKQlZWFJUuW4OjRo1izZg2Sk5Mxc+ZMKcNULKV8MP/vnwmiH3Mkp2OSCxrY3valMz6wYA0mQ0JFmAGoSwkrJZDySTZg98SJE0hOTsb+/fsRFxcHAPjwww8xevRoLFmyBJGR9b8ldOnSBRs2bNDebtOmDd544w38/e9/R1VVFTw9Ob5YiTwlmM3w7oPdkbwoW/TjEgHAkZeH4//9cgLje5ivnyKG3c8PQXF5FVbszLRp/wZ21PFgrkFKJFnLS2pqKoKCgrSJCwAkJiZCrVZj3759Fh+noKAAAQEBTFxIj6VVesX+Vknuwd/XC4vv64b41k0ccr7mjf0QE2HZGLYQA2NyDK33ZSm5FpVsH87uKrKdZBlBdnY2wsL0X1Cenp4IDg5GdrZl35ivX7+O1157zWRXU3l5OcrLy7W3CwsLbQtYgTRK6TdygHFGviFzUC5JbVQXx3ZhGipI52FkVomPpxr/GtMRoVYMQjZ2LLG9cm8XTF7xh0PORa7H6paX+fPnQ6VSmfw5efKk3YEVFhZizJgx6NSpE15++WWj2y1evBiBgYHan6goF5k+7Ab6tKopWPdQvP2Deg3WjyFygF4txCuuZkkjSO1yArpdRUFGBqw/PrgNpiZEm1wIte45xaiCbQlO7yZ7WN3yMm/ePEyfPt3kNq1bt0ZERARyc3P17q+qqkJeXh4iIkx/UykqKsLIkSPh7++P77//Hl5exktfL1iwAHPnztXeLiwsdJsERukNL5/P6IPjVwtYWZMUTa5K17oNJMbqeTw1tJ3Z49Td8/9GdrAjKnbVkmNYnbyEhoYiNNT8iPiEhATk5+fj4MGDiI2NBQBs27YNGo0G8fHxRvcrLCzEiBEj4OPjgx9//BG+vqZXwvTx8YGPj3u+WBSeu6CBtwdiW9ZfLoCIjKt93VsyVsWWImXh/vatPmzp2B0OFCZ7SNZu17FjR4wcORKzZs1CWloa9uzZg6SkJEyaNEk70+jKlSuIiYlBWloagJrEZfjw4SgpKcHKlStRWFiI7OxsZGdno7q6WqpQSWJyvklFBnEJAFIOp2hNFeEFG93E/FgzuQYKk2uQdArPV199haSkJAwbNgxqtRoTJkzABx98oP17ZWUlMjIyUFpaCgBIT0/XzkRq27at3rEyMzMRHR0tZbiKIzjFO51zayXCoo9ELk2CJCJ5zkDkFpZj4DvbjW7TtVmg6Ocl9yFp8hIcHIyvv/7a6N+jo6P1PoAHDx7MD2Qr8D9lm8cGtZY7BFKwcT0i8cPhLO1tP28Hl3G4/cIX671SivYPXy8PtDDT+uKoWU3kmjjcm1xKm1DzLS0tOH2a7ODjqf+2qfTeD6XF37Gp86/pRtJj8qJg4QH2DayzhNLe2GJbcuYSOZa9A1x1GXu9vXJv53r32bOUgK5GFhZ8dBaJHU0X5JsY5x6zTd0dkxcFa8bBqDZRcZ4D2cn3do2SyX2iMMzMh6kYpvWLRsLtar+1dZFGmFnf6/dnBlp07NF1asCI+foQs2uoe1QQgJpuO1PasXKvW1BWyk1EJDMVVEhfeDeKy6sQJmKrizmrZ/TG8auF6NE8qCYOM3lB+3B/i47rKeHYk0Y+nii4VSnKsTb8MwGFZVUsSEkA2PJCZiihjWKMieqhhshVVIxch5+3p0MTF6BmEGyvFo1tqt0iFzHHl3l6qC1KXDgF2z0weVG4HrebUp1Zx6YBCPP3Qbfm0kyN1B1AyS4hkpqli4IqhSM/7C0ZUE9kCSYvCvfscPtKeTuCt6cae+cPxcYn+ssdCpHdmjSSrtsiTqQB53Pvbm/zviESPj4lvF+RMjB5UThvT2kuYfztRRPv69VclON5eqila+7WOawlXyLZOkPO6gErZsoM7xSBlk38cF+v+iuqN29s+2D+dhaOlbGFpwc/ckgcrtX+SfXYOg1yxbQ47Mi4hsSO4fjfwcsiR0VEhlgzO6eBtwdSnh2s1+2zdGIP7Mu8gXu7m56R48pY6NQ9MHlxcQPbh9i0X4CvF8Yq8A2Q71vkTuqOVxnfsxnG96zfEuMs2PBCYuFTycX1a2Nb8qIEQzrUFOmamhAtbyBEZJGB7cQprGcKZxu5B7a8uDhXfh2vnNYb+bcqra774Mr/EyJntenJuzjmhUTDZ5LCuXP/rlqtYsEqIoXowlWkSURMXoiIiEhRmLy4OLEbZpx9GXt2CRERuT4mL+RSLElewvx9pA+EiIgkw+SF3M7QGOlXASYi4PXxXeDlocLzI2PkDoVcDGcbkVVcoVeGUymJakQESLu4ZPeoIJx4dSRnGZHo+Iwiq7jv3CYi1/P44DaSn4OJC0mBzyqyysN9W8odAhGJJCrY9jWQnJU7l49wJ+w2Iqt0igyQOwQistPqGb1xPKsQQzroj/9q6O0hU0RE1mHy4uL4HYSI6hrSIaxe4gIAAQ28ZIhGXBzT5h7YbaRwbAnRN6JzhNwhECkWP/ZJKZi8KJy/r2O/KTn7m1ub0EZyh0BERBJj8uLi2oQ2lDsEhwpwcDJH5Eoc3c08qktNS2ntCvFEluKYFxfnbh/mgX7u9XiJxNSvTYhDz7fkge4Y2SVC1MKRnG3kHtjy4mJeHddZ7hBE17yx603nJHJG3p6O/Uho6OOJcT2aObz7m5SPyYuLCWnEdXuIxLb20b5yh+AQsS0byx2C3TjbyD2w28jFsQWVyH59WzeROwRJpTw7GIcu3cS47s3kDoXIIkxeyK388tQAuUMgcjrRIQ0RHeJeg/tJ2dhtRG6FdXGIiJSPyQuZ5almHzKRXHq1CJI7BCKnw+SFzJo9pK3cIdiE4/ZITC2C/QAAIzqHy3JeIrqDY17IrJFdIvDvraflDsNqswcrM+ki57R57kDkl1YiPMBX7lCI3B6TFxdTt7GhgRuuErvjucFIybiGib2j5A6FXIiPpwfCA9zv9UTkjJi8uLi2Ycpf68fa7p+WTRpiWj/OnCAiclUc80JmNWnorf3d0RU4idwdi64R1cdPIhfj6yV+s3aYTh9/Q29PjO8RKfo5TFE5/VrWRETkSExeXIwjKoE+nBAt+TmIiIiMYfLiYnRbmCMkmBUR09S/3hgUtmoTEZEjMXlxQUsn9kBIIx98/Pdeoh0zdcFQ/PLUADRvXL/mxF1tQ0Q7jyECuEATERHdwdlGLmh8z2YY1yNS1IF+TQMboGlgA4N/6xMdLNp5DKmqZvJCRER3sOXFRTlyhsKwjtJWHL1aUCbp8YmISFmYvLgA3VouXh68pESuhEPKiOrjJ50L+OegNtrfPRywiGLzIMPdR0REcujbWtqua3I+TF5cwD3dmqJniyA8MbiN+Y1FEBbgq1e4jgNqiUhOjXy85A6BHIwDdl2Ar5cHvn+iv0PP2atlY2w+nuPQcxIRGcJyDe6HLS9ERESkKExeiIhI0djw4n6YvJBNdN8sWoVIu4JzdJP6hfGIiGpx1J37kTR5ycvLw5QpUxAQEICgoCDMnDkTxcXFFu0rCAJGjRoFlUqFjRs3Shkm2cnPm0OniIjIcSRNXqZMmYJjx45h8+bN2LRpE3bu3IlHH33Uon2XLl3KpeCJiIioHsm+Mp84cQLJycnYv38/4uLiAAAffvghRo8ejSVLliAyMtLovocPH8a7776LAwcOoGnTplKFSERERAokWctLamoqgoKCtIkLACQmJkKtVmPfvn1G9ystLcVDDz2EZcuWISIiwux5ysvLUVhYqPdDpGt4p5rlC/q0YiErIlfENnr3I1nykp2djbCwML37PD09ERwcjOzsbKP7PfPMM+jXrx/GjRtn0XkWL16MwMBA7U9UVJRdcZNllNSjt+TB7nhrQld8+nCs3KEQWc3fl2PKzOkt8eKw5HysTl7mz58PlUpl8ufkyZM2BfPjjz9i27ZtWLp0qcX7LFiwAAUFBdqfS5cu2XRucl0Bvl6Y2LsFgvy8zW9M5GQiuRyHWVHBnJHobqxO6efNm4fp06eb3KZ169aIiIhAbm6u3v1VVVXIy8sz2h20bds2nD17FkFBQXr3T5gwAQMGDEBKSkq9fXx8fODj42PNQyAiIpfCydLuxurkJTQ0FKGhoWa3S0hIQH5+Pg4ePIjY2Jrm+m3btkGj0SA+Pt7gPvPnz8cjjzyid1/Xrl3x/vvvY+zYsdaGSkRERC5Iss7Ujh07YuTIkZg1axaWL1+OyspKJCUlYdKkSdqZRleuXMGwYcPwxRdfoE+fPoiIiDDYKtOiRQu0atVKqlCJiIhIQSSt8/LVV18hJiYGw4YNw+jRo3HXXXfh008/1f69srISGRkZKC0tlTIMkoDKgeP7AxvcWTHW25NFoYmoLgXNICBRSDqMPTg4GF9//bXRv0dHR0MQTPdVmvs7ub7HBrXBE1+lAwBaS7wUAREROT9+jSWn18iHU0WJiOgOJi9ERKRoft4ecodADsbkhWziyCJ1of53psIndgx33ImJSBH6tw2ROwRyMLbHk00cmbx0bBqAJ4e2Reb1Ejw5rK3jTkxEiqDmeF23w+SFFGHe8A5yh0BETkqlpPVKSBTsNiIicmL8XCaqj8kLERERKQqTFyIiIlIUJi9ERESkKExeyCaOXB6AiIhIF5MXIiIiUhQmL0RERKQoTF6IiIhIUZi8EBERkaIweSHbcLwuERHJhMkLEZET8/PmKi5EdTF5ISJyUoENvPBAXHO5wyByOkxeiIic1OoZveHj6SF3GEROh8kLERERKQqTF7IJx+sSSY+vMyLDmLwQERGRojB5ISIil9HIh2OE3AGTFyIichktmzSUOwRyACYvZJPhnSMAAMENvWWOhIiI3A2rH5FNxnZripBG3ugYESB3KERE5GaYvJBNVCoV+rUJkTsMIiJyQ+w2IiIiIkVh8kJE5KQ6RPjLHYLisDaOe2DyQkTkpLgoI5FhTF6IiIhIUZi8EBERkaIweSEiIpfh68UKu+6AHapERKR4zw5vj8s3b6Fb80C5QyEHYPJCRESKlzS0ndwhkAOx24iIiIgUhckLERERKQqTFyIiIlIUJi9ERESkKExeiIiISFGYvBAREZGiMHkhIiIiRWHyQkRERIrC5IWIiIgUhckLERERKQqTFyIiJ9KpaYDcIRA5Pa5tRETkRH5M6o8Xvj+CoTFhcodC5LSYvBARORFPDzXevr+73GEQOTV2GxEREZGiMHkhIiIiRZEsecnLy8OUKVMQEBCAoKAgzJw5E8XFxWb3S01NxdChQ9GwYUMEBARg4MCBuHXrllRhEhERkcJIlrxMmTIFx44dw+bNm7Fp0ybs3LkTjz76qMl9UlNTMXLkSAwfPhxpaWnYv38/kpKSoFazgYiIiIhqqARBEMQ+6IkTJ9CpUyfs378fcXFxAIDk5GSMHj0aly9fRmRkpMH9+vbti7vvvhuvvfaazecuLCxEYGAgCgoKEBDAKYdERERKYM3ntyRNGqmpqQgKCtImLgCQmJgItVqNffv2GdwnNzcX+/btQ1hYGPr164fw8HAMGjQIu3fvNnmu8vJyFBYW6v0QERGR65IkecnOzkZYmH6NAk9PTwQHByM7O9vgPufOnQMAvPzyy5g1axaSk5PRq1cvDBs2DKdPnzZ6rsWLFyMwMFD7ExUVJd4DISIiIqdjVfIyf/58qFQqkz8nT560KRCNRgMAeOyxxzBjxgz07NkT77//Pjp06IBVq1YZ3W/BggUoKCjQ/ly6dMmm8xMREZEyWFWkbt68eZg+fbrJbVq3bo2IiAjk5ubq3V9VVYW8vDxEREQY3K9p06YAgE6dOund37FjR1y8eNHo+Xx8fODj42NB9EREROQKrEpeQkNDERoaana7hIQE5Ofn4+DBg4iNjQUAbNu2DRqNBvHx8Qb3iY6ORmRkJDIyMvTuP3XqFEaNGmVNmEREROTCJBnz0rFjR4wcORKzZs1CWloa9uzZg6SkJEyaNEk70+jKlSuIiYlBWloaAEClUuG5557DBx98gP/97384c+YMFi5ciJMnT2LmzJlShElEREQKJNnaRl999RWSkpIwbNgwqNVqTJgwAR988IH275WVlcjIyEBpaan2vjlz5qCsrAzPPPMM8vLy0L17d2zevBlt2rSRKkwiIiJSGEnqvMiJdV6IiIiUx5rPb5dbVbo2F2O9FyIiIuWo/dy2pE3F5ZKXoqIiAGC9FyIiIgUqKipCYGCgyW1crttIo9EgKysL/v7+UKlUoh67sLAQUVFRuHTpkst1SfGxKRMfmzK58mMDXPvx8bFJRxAEFBUVITIy0uyahi7X8qJWq9G8eXNJzxEQEOByT9pafGzKxMemTK782ADXfnx8bNIw1+JSi8s1ExERkaIweSEiIiJFYfJiBR8fHyxatMgllyPgY1MmPjZlcuXHBrj24+Njcw4uN2CXiIiIXBtbXoiIiEhRmLwQERGRojB5ISIiIkVh8kJERESKwuSljmXLliE6Ohq+vr6Ij49HWlqaye3Xr1+PmJgY+Pr6omvXrvjll18cFKnlFi9ejN69e8Pf3x9hYWEYP348MjIyTO6zZs0aqFQqvR9fX18HRWy5l19+uV6cMTExJvdRwjUDgOjo6HqPTaVSYfbs2Qa3d/ZrtnPnTowdOxaRkZFQqVTYuHGj3t8FQcBLL72Epk2bokGDBkhMTMTp06fNHtfa16wUTD22yspKPP/88+jatSsaNmyIyMhITJ06FVlZWSaPactzWwrmrtv06dPrxTly5Eizx3X26wbA4OtPpVLhnXfeMXpMZ7lulrzvl5WVYfbs2WjSpAkaNWqECRMmICcnx+RxbX2dio3Ji45169Zh7ty5WLRoEdLT09G9e3eMGDECubm5Brffu3cvJk+ejJkzZ+LQoUMYP348xo8fj6NHjzo4ctN27NiB2bNn448//sDmzZtRWVmJ4cOHo6SkxOR+AQEBuHr1qvbnwoULDorYOp07d9aLc/fu3Ua3Vco1A4D9+/frPa7NmzcDAB544AGj+zjzNSspKUH37t2xbNkyg39/++238cEHH2D58uXYt28fGjZsiBEjRqCsrMzoMa19zUrF1GMrLS1Feno6Fi5ciPT0dHz33XfIyMjAvffea/a41jy3pWLuugHAyJEj9eL85ptvTB5TCdcNgN5junr1KlatWgWVSoUJEyaYPK4zXDdL3vefeeYZ/PTTT1i/fj127NiBrKws3HfffSaPa8vrVBICafXp00eYPXu29nZ1dbUQGRkpLF682OD2Dz74oDBmzBi9++Lj44XHHntM0jjtlZubKwAQduzYYXSb1atXC4GBgY4LykaLFi0SunfvbvH2Sr1mgiAITz/9tNCmTRtBo9EY/LtSrpkgCAIA4fvvv9fe1mg0QkREhPDOO+9o78vPzxd8fHyEb775xuhxrH3NOkLdx2ZIWlqaAEC4cOGC0W2sfW47gqHHNm3aNGHcuHFWHUep123cuHHC0KFDTW7jjNdNEOq/7+fn5wteXl7C+vXrtducOHFCACCkpqYaPIatr1MpsOXltoqKChw8eBCJiYna+9RqNRITE5Gammpwn9TUVL3tAWDEiBFGt3cWBQUFAIDg4GCT2xUXF6Nly5aIiorCuHHjcOzYMUeEZ7XTp08jMjISrVu3xpQpU3Dx4kWj2yr1mlVUVODLL7/EP/7xD5MLjirlmtWVmZmJ7OxsvWsTGBiI+Ph4o9fGltessygoKIBKpUJQUJDJ7ax5bsspJSUFYWFh6NChAx5//HHcuHHD6LZKvW45OTn4+eefMXPmTLPbOuN1q/u+f/DgQVRWVupdh5iYGLRo0cLodbDldSoVJi+3Xb9+HdXV1QgPD9e7Pzw8HNnZ2Qb3yc7Otmp7Z6DRaDBnzhz0798fXbp0Mbpdhw4dsGrVKvzwww/48ssvodFo0K9fP1y+fNmB0ZoXHx+PNWvWIDk5GR9//DEyMzMxYMAAFBUVGdxeidcMADZu3Ij8/HxMnz7d6DZKuWaG1P7/rbk2trxmnUFZWRmef/55TJ482eTid9Y+t+UycuRIfPHFF9i6dSveeust7NixA6NGjUJ1dbXB7ZV63T7//HP4+/ub7VZxxutm6H0/Ozsb3t7e9RJoc595tdtYuo9UXG5VaTJt9uzZOHr0qNk+2ISEBCQkJGhv9+vXDx07dsQnn3yC1157TeowLTZq1Cjt7926dUN8fDxatmyJb7/91qJvSEqxcuVKjBo1CpGRkUa3Uco1c2eVlZV48MEHIQgCPv74Y5PbKuW5PWnSJO3vXbt2Rbdu3dCmTRukpKRg2LBhMkYmrlWrVmHKlClmB8E743Wz9H1fSdjycltISAg8PDzqjbTOyclBRESEwX0iIiKs2l5uSUlJ2LRpE7Zv347mzZtbta+Xlxd69uyJM2fOSBSdOIKCgtC+fXujcSrtmgHAhQsXsGXLFjzyyCNW7aeUawZA+/+35trY8pqVU23icuHCBWzevNlkq4sh5p7bzqJ169YICQkxGqfSrhsA7Nq1CxkZGVa/BgH5r5ux9/2IiAhUVFQgPz9fb3tzn3m121i6j1SYvNzm7e2N2NhYbN26VXufRqPB1q1b9b7N6kpISNDbHgA2b95sdHu5CIKApKQkfP/999i2bRtatWpl9TGqq6tx5MgRNG3aVIIIxVNcXIyzZ88ajVMp10zX6tWrERYWhjFjxli1n1KuGQC0atUKERERetemsLAQ+/btM3ptbHnNyqU2cTl9+jS2bNmCJk2aWH0Mc89tZ3H58mXcuHHDaJxKum61Vq5cidjYWHTv3t3qfeW6bube92NjY+Hl5aV3HTIyMnDx4kWj18GW16lkHDo82MmtXbtW8PHxEdasWSMcP35cePTRR4WgoCAhOztbEARBePjhh4X58+drt9+zZ4/g6ekpLFmyRDhx4oSwaNEiwcvLSzhy5IhcD8Ggxx9/XAgMDBRSUlKEq1evan9KS0u129R9bK+88orw22+/CWfPnhUOHjwoTJo0SfD19RWOHTsmx0Mwat68eUJKSoqQmZkp7NmzR0hMTBRCQkKE3NxcQRCUe81qVVdXCy1atBCef/75en9T2jUrKioSDh06JBw6dEgAILz33nvCoUOHtDNu3nzzTSEoKEj44YcfhL/++ksYN26c0KpVK+HWrVvaYwwdOlT48MMPtbfNvWad4bFVVFQI9957r9C8eXPh8OHDeq/B8vJyo4/N3HPbGR5bUVGR8OyzzwqpqalCZmamsGXLFqFXr15Cu3bthLKyMqOPTQnXrVZBQYHg5+cnfPzxxwaP4azXzZL3/X/+859CixYthG3btgkHDhwQEhIShISEBL3jdOjQQfjuu++0ty15nToCk5c6PvzwQ6FFixaCt7e30KdPH+GPP/7Q/m3QoEHCtGnT9Lb/9ttvhfbt2wve3t5C586dhZ9//tnBEZsHwODP6tWrtdvUfWxz5szR/h/Cw8OF0aNHC+np6Y4P3oyJEycKTZs2Fby9vYVmzZoJEydOFM6cOaP9u1KvWa3ffvtNACBkZGTU+5vSrtn27dsNPg9rH4NGoxEWLlwohIeHCz4+PsKwYcPqPe6WLVsKixYt0rvP1GvWUUw9tszMTKOvwe3bt2uPUfexmXtuO8NjKy0tFYYPHy6EhoYKXl5eQsuWLYVZs2bVS0KUeN1qffLJJ0KDBg2E/Px8g8dw1utmyfv+rVu3hCeeeEJo3Lix4OfnJ/ztb38Trl69Wu84uvtY8jp1BNXt4IiIiIgUgWNeiIiISFGYvBAREZGiMHkhIiIiRWHyQkRERIrC5IWIiIgUhckLERERKQqTFyIiIlIUJi9ERESkKExeiIiISFGYvBAREZGiMHkhIiIiRWHyQkRERIry/wEFnK3aCL4JNgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import torch\n", + "import torchaudio\n", + "import matplotlib.pyplot as plt\n", + "\n", + "audio_file = \"/content/example_vad_music.wav\"\n", + "signal, fs = torchaudio.load(audio_file)\n", + "signal = signal.squeeze()\n", + "time = torch.linspace(0, signal.shape[0]/fs, steps=signal.shape[0])\n", + "\n", + "plt.plot(time, signal)\n", + "\n", + "from IPython.display import Audio\n", + "Audio(audio_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tGuWJXKo3705" + }, + "source": [ + "We can now use the VAD trained at the previous step in the following way:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 200 + }, + "executionInfo": { + "elapsed": 4308, + "status": "ok", + "timestamp": 1708531553082, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "LssN_iSd7p2W", + "outputId": "fbba6722-fee9-452e-cc56-1ce062226b56" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "81addb3add394558b4965ae64ccd28e2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "hyperparams.yaml: 0%| | 0.00/2.29k [00:00]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGdCAYAAADAAnMpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABrVElEQVR4nO3deXwb1bk//s9o976vsRNnDyEbSUgweyElUMrSjZTyI2lK4dKSXiAtpaEFLvReQhcovb3QtBSa9ttSUlqWFmgoBMJqCCSEJGRfncX7Jq9az+8PaUaSLduSPfJIM5/365VXEnlkHVnW6JnnPOc5khBCgIiIiEgjJq0HQERERMbGYISIiIg0xWCEiIiINMVghIiIiDTFYISIiIg0xWCEiIiINMVghIiIiDTFYISIiIg0ZdF6ALHw+/04deoUsrKyIEmS1sMhIiKiGAgh0NnZifLycphMg+c/UiIYOXXqFCorK7UeBhEREY3A8ePHUVFRMejXUyIYycrKAhB4MtnZ2RqPhoiIiGLhdDpRWVmpfI4PJiWCEXlqJjs7m8EIERFRihmuxIIFrERERKQpBiNERESkKQYjREREpCkGI0RERKQpBiNERESkKQYjREREpCkGI0RERKQpBiNERESkKQYjREREpKm4g5G33noLV1xxBcrLyyFJEp5//vlh77N582bMnz8fdrsdU6ZMwfr160cwVCIiItKjuIOR7u5uzJ07F48++mhMxx85cgSXX345PvOZz2D79u247bbb8M1vfhOvvPJK3IMlIiIi/Yl7b5rLLrsMl112WczHr1u3DhMnTsRDDz0EADjttNPwzjvv4Be/+AWWLl0a78MTERGRziR8o7yamhosWbIk4ralS5fitttuG/Q+LpcLLpdL+b/T6UzU8IiIVOf1+bGnrhOHmrpwoq0HmXYLLjm9FOW5aUPer6nTBY/PjyyHBZ+ecuJgYxcy7YHTdEGmDXMrcwEAHx5pxbzKXBRk2hP9VIjGRMKDkfr6epSUlETcVlJSAqfTid7eXqSlDXxzrl27Fvfdd1+ih5a0Gp19ONLcjR0nOjC9NAsLJuTBbJLgsJq1HhqRbjj7PHh26wm8faAZJpOE+686HWU5QwcLsfD5Ba59/H18eLQt4vb7XtyNORW5+MY5VbhiTjlMpshdTP+29QS+/7dP4BeDf2+TBOXrmXYLfnP9AljNJhRl2fGXLbW4cm45Zo3LGfVzIBprCQ9GRmLNmjVYvXq18n+n04nKykoNRzR23j3YjJXrP4Tb64+4PdNuwR1Lp2PF2VXaDGwYfR4fzCYJVjMXaFHy+8cnp/Cff/k44ra2bjeevuksWEb5O7zrZIcSiCyamI/KvHQcbOzEJyc68Mnxdtz69HY88PIe3L5kGr66aDyAwAXIvS/sighEynMcmFyciT6PD1azCcdaenCyvVf5epfLi+t+90HEYz/38Um88b0L0dbtxqu7G3D1GeOQn2Eb1fMZSmefBx8ebUVxlkMJgoQQ2HKkFW/sa0Kfx4cFE/Kw9PRS2Cw8N9DgEh6MlJaWoqGhIeK2hoYGZGdnR82KAIDdbofdbsz0469ePzAgEAECJ557//EpLppRjMr8dA1GNrj2Hjcu/993YDVL+PftF/CkQ0mty+XFbU+HApGvn12Fv350HB8da8OUH/4L5TkOrP/GIkwryRrR9//0VGBa+byphfh/NywGEPiAfv9wKzbva8RTH9SiwenCD57diW63D9cuqsSD/9qLbrcPcytz8acbFgEAshzWiO/r8vqw5u878f7hFjzwxdn4ycZ92FMXOYXd1OnC5n2NWP/uUXx0rA2765xY+8XZuOvZndh5sgP/97UzMKU49Ly6XF78+9N6nD25EKU5jrie5546J27840c40RYIkKonFWDRxHy88mk99tZ3Ksetf+8oynIcuPmCyVh2ZiUzvBRVwoOR6upqvPzyyxG3vfrqq6iurk70Q6ck+US28bbzMKM0G4eautDc6cIdf9uB2tYenPfTN7B+5Zm4cHqxZmPs8/iC89qBk+U7B5uVK7YtR1px7tRCzcZGNJzfvnlIyUB89KMlKMy0Y3JRBu5+4VMAwKmOPvz+3aNY+8XZI/r+e+sD7+GZ5dnKbZIkoXpyAaonF2D1JdOw5tmdeHbbSfz4xd348Yu7lePuu/L0AUGIzG4x4+Fl85T/TyjIwPp3j6DH7cO+hk54fQK765zYtKcRHx0LZGbe2NuIP79/DM9sPQEAuOn/bcXfbz4beRk2PPHOEeWxZ5RmYeNt58f0/Px+gVs3bMc/PzkFAMiwmeH2+VFzuAU1h1sABKaTrj5jHLIdVry8sw51HX249x+f4icb92JeZS6ONHejq8+LycWZuHhGMV7d04DCTDv+5wuzVJkqo9QT9yVsV1cXtm/fju3btwMILN3dvn07amtrAQSmWJYvX64cf/PNN+Pw4cP4/ve/j7179+Kxxx7DX//6V9x+++3qPAMd8fkFOvu8AICiYGHa5KJMLJ5UgG9dOFk57qF/7x+T8eyr78RFP9+M5z4+odzm8fnxlXU1OHvt62js7MP9/9yNVU+FrjK3HG1N+Lj6PD48/tZhbD3WiuYuF4QYYpKdKMzWY614bPMhAMBj181HYfB99qUFFchNDwUBO0+2j/gx6jr6AACVedEzmHaLGQ98YTauP2tCxO1fWVCBecEC1VhMLMzAfVfNws++Mhf/WHUuvnDGOADAK5/WK8f4hcBzH59U/n+4qRs/fH4nDjd1RQRBe+s7UdcRmgIajM8vcFtYIFI9qQCbvnshXv/uhbhmYQVmj8vBggl5eHz5Qjx8zTz815Wn463vfwY/vnoWxuWmocftw3uHWlDX0YdOlxfbj7fjoVf3Y8eJDry+txH//eKemJ8/6UvcmZGPPvoIn/nMZ5T/y7UdK1aswPr161FXV6cEJgAwceJEvPTSS7j99tvxy1/+EhUVFfjd737HZb1RdAUDEWBgivarZ1bCYpJwx992YG+9E16ff9Rz27ItR1rxs1f24oEvzMbUsNT0nX/fgcPN3bh9wyf4whkVAICnt9Ri58kOAMCi/9k04Hv96f1j+OZ5E5E9yNWdGr77zCd4aUed8v+vn12F/7ry9IQ9HunHLzcdhNcvcPmcMlw2q1S5Pd1mwVPfPAub9jTgoVf342hzz4gfo7EzsBKwKGvwqWaH1YwfXz0LK86uwtJH3oLNbMJtn5024scEgMr8QEahx+1Tbmvr8aCjN/B+fey6+bjlqW14eWd91Kngrcfa8Pk5g2clfH6B7z3zCf4RDER++uU5uGZhqJbvp1+eG/V+DqsZ1581AcsWVmL9e0fQ3OXGOVMK0ev2YdVT2+D1C8ytyMEnJzqwaW8D+jw+TuUYUNzByIUXXjjklWi07qoXXnghPv7444EHUwRnnwcA4LCaBtRdSJKEL82vwA+f3wW314+6jr5R1Y40OvvQ1OXC6eU5+Opva+AXwLf/vA2vrr5AOeZQU5fy77ue2wmP16+kewfT2u3G6g3b8dvrFw5YLaCG9h43Nu6qj7ht/XtHcdW8cpwxPk/1xyP96HF78c6BJgDAHZdMhyRF/n7OLM/G+IJ0PPTqfnS5vOhxe5Fui38mu8kZyIwUDxGMyKYUZ+KZm6uRZbdg3DDLfocz2PnALwLnlEtPL8XnZpXhpZ11eG1PIwDgf689A+8eaMaGj47jQENX1PsDgamZW/68DRs/rYfZJOFX156Bz80ui2t8NosJN50/OeK2935wEfY1dOKcyYU4a+0mNHa6sONEBxZNzI/re1PqY6VhEul2BzIjGYOcAE0mCaXZgSKz+uAJbyS+85ePseiBTbj8f9/BG3sblfnzA41d2PBhLX66cS9++doBWMKCiac+qFUCkfH56Vg4IfTBf9W8ctz1uRn4wzcChXev7WlUrp7UtnlfE3x+gWklgZO4OTjGLzz2Hho7R/4zIf379JQTfgGUZNtRVZgR9ZgMmxkOa+C02NzpjvsxhBBo6gpkRoqzYysInT8+LyIjOVJTijMHLR7v8/hhMklYddEUyDFYlsOCi2cUY2JR4GdxtKV70O+9/r2j2PhpPSQJIwpEBlOc7cB5U4tgMknKapyDjYMHRaRfDEaSSJ8nkDodKkUpV7zL89Lxau12K/O9ALDuzUMRX7/z7zvx2OZD+MVr+9HW4xlw/5JsO/78zcX4xbJ5KMiwYU5FDn765Tm46fzJuGBaEW6+IHDl88zW4yMa33D+vTuQFfnszBKcWZWPn39ljvK1bcfaBrsbET453g4AmFORO+gxkiQp0ytyUBGPth4PPL5AdF80xg3J7BYzpocFNXPD6k++emZgOuW0smw89rX5+MqCCjz59TORYbdgYjAwO9IcPRjx+Pz41esHAAQKbNUKRPorz5XPbcPXrpD+JGWfEaPq8wTmeuUrs2jkzEjDCIOR/lcdHxwZuuBUnoc+3ho4QfzzO+eiOCswhvfWXASzJEXUrlw5txzr3jyEXSfV75rb6/bh1d2BZeKXzQqcEL9wRgWe3XYSbx9oHnGARsawK1jrNGeYpmCFmXYcb+1FU2f8wYicnctLt2qyxP3LCyqw82QHzp5cgF9dewZ++9ZhAMB/XjxVOeay2WW4LCygmCQHI03dEEIMmL5650Az2no8KMy04WvBviiJUJQZOK80d8WfkaLUx2AkifQqwcjgmZGyUWRG/H6hTO9U5qcpAcZQLphWhG3H2gH0Is1qVgIRIHAl1t+4vEDw0tHrGVUh2pHmblhMUsQ8+L6GTnh8AoWZNpwetmxyYmEG3j7QjOYRXMmScRxqClz5TysdekpEzmiM5Pep0RmcosmKr2eHWpZXT8AZ43MxpTgT6TYL1nzutGHvU5mfDkkCOl1eNHe5BxTePhtcjfP5OeWqFc1Hk2EPnCt63N5hjiQ9YjCSRFwxBCMlcmYkzpoRIQS+8Ni7+ORE4Opwekk2Gp0uuPpV1T/0lbn40oIK7DjRjncONuP6syagtrUHdz27M2J58WCyHRY4rCb0efxodLowviD+ItvjrT245BdvwuMTuOn8SbgreEKVGzydVpYdcfUmL88cyRw/GUOfx6esAptclDnksYXyNM2IMiNyvYg2TRslSRpyGioah9WMcblpONHWi9+8eQjpNjN2nOzArPIc3PKZKag51AwAuHxOYqZnZBnBPXi6Xb5hjiQ9YjCSREI1I4Nffcjzqifa4lt62NTpUgIRAMjPsOKx6+bjhj98BAD49XXzUZmfrhSRzanIVU5qp5fn4IVV58b0OJIkoSTbgWMtPWjo7Bs0GPH7BVq6B16FAcDm/U3KvPtv3zqML82vwPTSLOw+FQpGwhWO4kqWjGFTcPWIJAUKsIcyqsxIcJpmqGW9yei0smycaOvF7945oty2eV8T9tZ3ornLDatZwuwE73mTbmNmxMgYjCQRuWYkbYjMyKTgVd3hQeZ3B/PPsL4cAJCXbsN5U4tw1bxypNssWHp6qWpLcUuyAsFIfb+ppJd21OHlnXX46Zfn4Ccb9+KPNcew4aazsHhSAT443IL/fPpjXH3GONS2RAZa/9pVh+mlWUpmZGa/YEQ+8bNmhAbzyYl2AMBls4bfI2VUmRGNp2lG6j/On4QDDZ3wC6AiLw0enx8fHm3Da3sCNVozy3MS3vtDXkXY7WZmxIgYjCQRORixD/Gmn1CQDlNwfrep0xXz8sGH/70v4v95GTbYLCb88qtnjHzAgyjJiT6VdMtT2wAEtkL/Y80xAMCjmw9h8aQCPPfxSTQ4XfjNm4eV47+8oAJ/23oCH9e2w+8XEdM04eT6kb31Tri8vqi1LGRsclbtwmnDb6MwmsyIHMDE0mMkmSysysfmO0LNLLtcXsy979/wBdf9nzUGfT/S5ZoRFzMjRsSlvUmkV56mGeLD1G4xK2nmg02xr8fv3xApPz1xO3kOV2QrByJAYGdSIHpvgf8v2C77o6Ot2HmyA91uH2xmEyYVRfaIKMtxwGqW4BesxKfo5OmT8hgaixVlBd4bI1naKz+OVjUjasm0WzCnIjQtc9GMxO+FJWdGepgZMSQGI0kklqW9QKgAT14dEIv+GZS8BG4rXhJHY7aWbjf8foF9Ybt8AsAvvzoPU4oDz7Pb7cNVj74LAJhWmgmreWB3WvlqdiSpddI/+feiMGv433tliWmnO+59j5QC1hSbponmu5+djqqCdKz6zJQx6Ygqr6bpZs2IIXGaJon0eYdfTQMAk4oysGkvcDiOzIi8UkeWl564vWPkzEh4zchgJ/U9dU7srnOiM5iavXJuOa5bPB6LJxVEPf6CaUVRby/KsuNURx8aR9GZlvTJ4/MrDfxiaUQm1yD1enzodHnj2mepNZiZK8hMXLA/Vs6dWhgxdZNocuv9bk7TGBIzI0nEFZymGaqAFYgsYo35e4ct4Z1ekjWg7kJNpVGCEXl1TH8n2nrx+V+9AwBYVJWP/732jEEDEQA4d8rgwQgQujIlkrV2BwIEs0lCXgzTk2k2M/KDmcMTMfTikfn8Qgmqc9ISF+zrlTxN4/GJqBv5kb4xGEkisU7TTBqmfXM0cjDyx28swiu3n6+s6U+E0rBeKP5gAZzbN/zJZWKU/UL+/q2zI/4/rSR6j4gJBYH7fnqqI+rXybjkKZr8DFvMK8Yqg837jsexhD581+1E7lqtV3IBK8DlvUbEYCSJxNKBFQgV4dU7+2Ke03YHp4DsY9CiuijLDkkCvMFeIoHHHz4YueG8iQNuWzAhD9+/dLry/4JB0uzzgzv27h9i51EyJrkQtTCOvWIqggXfx1tjD0Y6egNTQWlWsyat4FOd1RzarZzLe42HNSNJJJalvUCoUt/tDcyF58dQjCpnRsbiJGk1m1CYaUdTpwv1HX0oyrJHDUa+NL8Cf992At+/dDq+eEaFMr3T3zfOmYj6jj4sCNspuL/QHjrxNYMjfRNC4O39gQ6i8TQiq8yLPxhx9gWCkew0nlZHKsNmhtvr5/JeA+K7Jon0xVgzYreYUZBhQ0u3G/UdfXEFI2PVg6M02xEIRpx9mI2cqMHIsjMr8dA1c4f9Xg6rGfdfNWvIYyqCHx6Nna5R7YlD+vLM1hN48t1AV9HCOIpKleC2LfaaEWcwM8IpmpFLt1nQ1uNhZsSAmEtMIrHWjADhy2djO1nKwYA9hu+thv7Le6PVjAwXdMUjL92KLEcgto6nsJf07YXtJ5V/x7KSRja6zAiDkZFiS3jjYjCSRPq8wzc9k5UEp2rk9tPDcQVrRmwJ3HUzXGlOYHwNwRU10TIjaTb1xiJJUkQnViIg8vc9nt468u7T/bc0GEpnsIA1M4HF4XonXyz138CT9I/BSBJxKTUjsWdGGmIIRoQQoWmaMcqMlOWEimyBQTIjNnVP2uXBx4zlZ0LG0BVWe/Cl+RUx309eAtzp8sITw0owIJTZlK/uKX7yhVj/vkikfwxGkog3uAy2f4fRaOSOqnL76aF4fALyopuxqhkpyY7cnyZqZkTluo4iOVsUw8+EjEHuO/PMzdVxFbBmO0KBslwLMpzeGDa6pKExM2JcDEaSiDd4BWaJoReCvBFXLFmA8KzEWCztBUK9RuqHmqZR+aQtN5py9nK+mQJNyOT9kUribM9uMZuUGqT2GIMReU8VBzMjIxbKjDAYMRoGI0lEzoxYYsiMjAv2GjnZPnwBa3jKc6xqRuQ9QOTul27fwLSr2oFRVnAVQ2dfbB8epG/HW3vg9vrhsJpQkTf8Bnn95Qa3TGjviS8zks7MyIjJmRF5awwyDgYjScQbbJkeS2ZkfEGg2v9YS/ewjc/klKfVLMXcgXK0QoFBIEvh9kaO0WE1qT4WObXe2cfMCAFHWgKrqqoKMkb0uybXjXT0xrYTdF8wM5LGzMiIydPIfawZMRwGI0kklBkZ/sRZkZcGkxRIDQ+31bkrjlU6apFXFLh9fvR5fAMKWBMxry4/Jnf9JAA4FtwuYUIwcI+XPO0Xb2aEPW5Gzho898nnQjIOBiNJxOuPvWbEbjErbeGPtQzdC0Fe1jtcZ1c1hS9v7OzzDqgZSUQwIn8IcL6ZAOBYsEeIvG9RvJTgNsZuoL0xNi2kwZmD5z7fIBtrkn4xGEkiPmWaJraXpSp4kj06zIZ5cmfXsSpeBQInFfmk3OfxDQxGEpDKlp+fi/PNhFC9UnEcq2jCyZtJdrli+33qDWbkOE0zcnIwwsyI8TAYSSKeYGbEHOP8dllO5PLZwcTTv0RN4cv03P0ChEScsB1K8MPMCIVqh0banj0j+Dsae2aEfUZGS74Q88e4ASjpB4ORJOKLo88IEHvjs7Hel0Ymr9xxeX3w9Eu7JiKVzcwIhZP7g2Q5RtZcL5QZiTEYcbNmZLRMEjMjRsVgJEkIIZQP7FgzI3JL+GEzI3IBq4aZkf4nl0SseJGDLTZMIiAsMzLCvWLkgDnW4FauGWEwMnJy8b6PwYjhMBhJEuHvPWsMq2mAUBfWhs6hMyPyMrmxrBkJPF6ooLR/2nVvfafqj+dg90YKI29cN9LMiNzvp39WbzByzQinaUZOKWBlMGI4DEaSRPj+F7FnRoIt4WPMjIz1NE34tIl3DKrjbcHH8/mF0s2WjEvOjGSNsGZEWWYa4+8S28GPnlliMGJUDEaSRPibL/aakcA0TVOnC/4h3rzK0t4xz4wEHs/t9cMXzIyMz0+HxSTh/qtOV/3xwoM4HwvgDM3nF0qtR/YIMyPWODMjPWx6NmrMjBgX97pOEuGZg1gzI4WZdkhSoNirtceNwszoSxjlvhtj2WcECJ3M3T4/fMGVQhfNKMYPLpuRkHn18CXRXp8Ad3I3rq6wmqSRZkbk+gXu2jt2LFzaa1jMjCQJueEZEFvTMyDwYV+QEWhZPVQRa6gD69i+3HIw4vUJyOdzs0lKWIFfeBDHk5mxyfUiDqtJmb6Ll/L7G8PvksfnVzIonKYZOZOSGeE0q9EwGEkS8gnPbJIgSbHvo1GWE+jCerhp8MZnfRr1GZGvLMMzI7EGWiN6vPBpGgYjhhYqXh1ZVgQI1YzEkhnpDdtLhatpRs6iBCMaD4TGHIORJBEejMRjwYQ8AMDWY22DHqNVAas8bRKeGUnkRn0mkwT523t5ZWVozt7R1YsAod/fWIIReZM8kzT2tVl6YmZmxLD4rkkScsW+Nc4P65nl2QCAA42DL5XVqoA1tOnV2GRGAt8/tKKGjKtTlcxI7AWsSvGq1RxXZpMisR28cTEYSRIjzYxML8kCAOwbom9HqOmZNgWsHp9QVreYEnyiVk5m3GjL0JyjbHgGxLe0V1nWa2PV9GjIFytsB288DEaShPzhGeuyXtnUkkwAQHOXG81d0ZufadX0zBJ2MpczFYnPjPDKisIzI6OYphlJZsTGU+pomMOmdslY+M5JEt44N8mTpdssGJ+fDgDY3xA9OxKqGRnjaRpTaDWCHIwksmYEAMxmzjlTeM3IyDMjljh6XijLeq3MjIyGfC3GaVbjYTCSJOLdJC/ctOBUzf5Bpmq06jMS3qfBO2aZkdiXY5J+yZmR0RSwylOKsTTQUzbJY4+RUZEzI2xaaDwMRpJEvJvkhZtRGqwbaeiK+nXtClhDKVf/CGti4mVhzQghtLR3NDUj8Wza1qO0gucpdTTiyUaRvvCdkySUmooYN8kLN00ORuqdUb+uVQGrfGLx+P2QY4NEByNsJ01A+L40KmRGYpmmccvdVzlNMxomXkwYFoORJCFX7I9kGmNSYQYAoLa1J+rXXZoVsIb3GRlZTUz8jxlaTkzGpWRGxqhmpCe4Yy+7r46O8jPnNI3hMBhJEq5gMDKS1tWVeYEC1uYut3JSjPjeGjU9s0ZZTTNWmRFeWRmbGpmReLJsvcG6LG6SNzrMbBoXg5Ek4Q4GDLYRFLDmpFuVk+6Jtt4BX1eCkbFuBy93sAxbTWNOcJ8RK5ueEQBn7+hrRuJpwKX0GWFmZFTk8wML0I2HwUiSUIKREU6lyNmR41GmarSbptEwM8KTmaE5VciMxNOAq1eepmFmZFTkpfl+vn8Nh8FIkggFIyM7mVXmBzbMi5YZ6dOsA6u8tFeMuMNsvOJZAUH65PMLtPe4AQD5wV2tRyJUTBlHB1ZmRkaFmRHjYjCSJFyjmKYBkjQzErbRmHx1OVaZkVg2NyN9au12wy8ASQLy00cejIQyI8MfG+rAymBkNEJFw3z/Gg2DkSThlnuBjLCuozLYhfV4W5RgRPMCVqEUlI5VnxFmRozraEs3AKAky6Gs6BoJkxT7yiylAyuDkVFhAatxMRhJEu7glbx9hCfPirzANM3x1shpGm9Y91PNmp75Q5kRdmClRNtxogMAMGtczqi+j0WpXxj+2C4X+4yogcGIcTEYSRKjLmANZkZO9MuMuMOmK8a86VnYRmNycJDoXXtZM0I7T7QDAOZWjC4YMceRGVFW74yiYJZYgG5kDEaSxGiDkXG5gcyIs8+r7MsBAH2e0Il0pN97pKxhDcj8o+gwGw+ezOhAY2BbhNPKskf1fcxhNSNimBU1oV2CR76UmEKZTa6mMR4GI0litAWsGXYLctMDJ8KT7aGpGnlfGqtZSni9Rn+hAtYxzIzEsQKC9EnetG40y3qByPqm4TJtajRZIyB4yuDFhAExGEkSajQmk7Mjp8KDEY82xatAKBPj8ob6jMgBSqIwM0JyMelopyUjgpFhMiNy+/mcUTRZo/BsFN+/RsNgJEnItR0288hPoFXBPWr++UmdcltoJc3Yv9SZ9sBVYrfLqwQjCY5FlDoV1owYV59KHYdjzYx4fH5lOpSZkdExx7E5IenLiN6tjz76KKqqquBwOLB48WJs2bJlyOMfeeQRTJ8+HWlpaaisrMTtt9+Ovr6+EQ1Yr0ZbMwIAn5tVBgB4cccpZY8ata4SR0I+MXf2eZQry0RnRizMjBie8js/ymxgrMGIPEUDhAJwGhkTV9MYVtyfDBs2bMDq1atx7733Ytu2bZg7dy6WLl2KxsbGqMc/9dRT+MEPfoB7770Xe/bswRNPPIENGzbgrrvuGvXg9USNYOTyOWXISbPC4xN4a38TAG0zI3Iw0tXnDWsHn9jHDF1ZsWbEqFwqdRwO30dpqA9HeSVNhs08qr4mFPqZMxYxnrjfOQ8//DBuvPFGrFy5EjNnzsS6deuQnp6OJ598Murx7733Hs455xx87WtfQ1VVFS655BJce+21w2ZTjEYuNB3tipepxZkAgJv/tA2HmrpU+74joUzTuH3wBD8gzAnOjEjBkxmnnI3JE7YPkmOMpmlCxausFxkt1owYV1zvVrfbja1bt2LJkiWhb2AyYcmSJaipqYl6n7PPPhtbt25Vgo/Dhw/j5Zdfxuc+97lBH8flcsHpdEb80Ts5MzLSpmey+RPylH8//tbhUAGrBtM0mWHz5x3Bq8dE79orf34MV3BI+iRP0QCjz4xIkhT6fRoyGJGX9XKKZrSkGH7epE9xffI1NzfD5/OhpKQk4vaSkhLU19dHvc/XvvY13H///Tj33HNhtVoxefJkXHjhhUNO06xduxY5OTnKn8rKyniGmZKUDqyjvJpb/dlp+P/OGg8AePtAcyhlrUFmxG4xKxmZ7uByy0QvLzYxM2Jo4X111JiajKWjr7ySJpsraUaNmRHjSvgn1ObNm/HAAw/gsccew7Zt2/Dss8/ipZdewo9//ONB77NmzRp0dHQof44fP57oYWrOPco+IzKH1Yw7ls4AEOg30trtAqBNZgQY2JEy0U3P5AI4Nk0ypr6wTSElFbJw8qziUFfq7T1c1qsWrqYxrrjyioWFhTCbzWhoaIi4vaGhAaWlpVHvc/fdd+P666/HN7/5TQDA7Nmz0d3djZtuugk//OEPYYpSQ2C322G32+MZWspTo4BVlpNmRbbDAmefF4eaApuGaVHACgTqRpq73Mr/E58ZCfzNc5kxyTVSaq0eC2RG/EN+OLZ0B36/CzNHvkMwBZj6db1VI6Ck1BDXJ5TNZsOCBQuwadMm5Ta/349Nmzahuro66n16enoGBBzmYC+N4VosG4lLxWAEAMblBfaqOdKscTDSLzOS+JoRpnmNTJ6mGW3xqiyWGqTmrkD2sSDTWBdQiRB+fuAFhbHEXXG1evVqrFixAgsXLsSiRYvwyCOPoLu7GytXrgQALF++HOPGjcPatWsBAFdccQUefvhhnHHGGVi8eDEOHjyIu+++G1dccYUSlJB60zSycbkO7Klz4nBzYJ8OLTqwAkCWPTJ1PVaZEQa6xhSaplEpMxJDEz0581eQwczIaJlM4cGIgBnMjBhF3MHIsmXL0NTUhHvuuQf19fWYN28eNm7cqBS11tbWRmRCfvSjH0GSJPzoRz/CyZMnUVRUhCuuuAL/8z//o96z0IFQO3h1TqJya/jjrYHW8GpdKcarf2Yk0TUjEvsUGFqox4g6v++xbGnfEsyMFDIzMmrh1yo+v4BGpW6kgRGtRVu1ahVWrVoV9WubN2+OfACLBffeey/uvffekTyUYYTawatzEi0PBiMyrTIj/TtSjtVqGk7TGJPaHYdjKahskTMjrBkZNXO/zAgZB9sFJgk1C1gBYFxev2BEo8xI/w+FseozwsyIMSk1IyoF3zFlRoIr1goymBkZLVOMXW9JfxiMJAm3ym3bB2ZGtHmp0/oFI4nem0aec2bNiDEpNSMqT9MM1mfE7xdo7WZmRC0RmRHu6GAoDEaShNpt2yuSZJomzdZ/JVWia0YCfzPFa0x9qi/tHXraz9nnUbJweekMRkYrYj8gvocNhcFIEvD6/MoJTa0MRmGmPSIrkW7TKBgZkBkZq5qRhD4MJanQ0l51ft/lTJvXF/0XSm54lmEza7L/k970X01DxsF3TxKQi1cB9TIjJpOEb543Ufl/dpo2+2b0/1AwjVnNCE9kRqQUsKr0PhouM9Ie3HMpl1kR1SjvYV5RGAqDkSQg14sA6q2mAYBvXzgl6mOMpTSbRpkRnsgMyaXyahr592mwmpG2nkC9CFvBq0cpGuYFhaEwGEkCcqBgkkJNltQQHghMKMhQ7fvGI3xVgyRFpmETgX1GjM2lciG43BdnsOC2IzhNk5fBYEQtJu5PY0jc8zoJqN0KPtyL3zkX+xs6sXhivurfOxbhAVGisyIAp2mMTu0+I8NlRtqDmZHcNE7TqEXZuZeraQyFwUgScKncCj7crHE5mDUuR/XvG6vwAtZE14sAoWp8XlQZk9p701iG6TPS1iPXjDAzohal0RwvKAyF0zRJwK1yK/hkEn6FOiaZEfYZMTS1l/aahglGOnoZjKjNNEzRMOkTg5EkoHYr+GQSPk2T6FbwAPuMGF2o6Zm6fUYGu0rnNI36uJrGmPT36ZeC1O6+mkzCp2nGIhhhnxFjC7WDV3ujvOgFDJymUR9X0xiT/j79UpDa3VeTSWQwkvjnxwJWY1N9o7zhmp6xz4jquJrGmPT36ZeC1N4kL5k4wtrBj8UslHwiYyxiTHIxuNq79g4W3HbI0zTMjKiGq2mMSX+ffinIKNM0Y3GhIw3z4UH6FsqMJH6jPCEEGjvlHXuZGVGLiatpDEl/n34pSClg1WEwEn6FOhZdYEPTNAl/KEpCoaZn6k7TRCum7Oj1oMcdCH7675JNI2fmahpD0t+nXwrqDZ7Q+m8qpwfWsLkZj28sghGeyIxsLDMjJ9p6AQQ2pVRrWoi4msaoGIwkge5gMJJu03cPurEJRgJ/80RmTIkqYI1WTHmirQcAUJnPrIiahuvtQvrEYCQJ9Li8AIAMu76vrjyDrEhQE2tGjC20tHcsgpFAZqQiL12Vx6IAdmA1JgYjScAomZGxwD4jxiWECOvAqm47+KGmaSrymBlRE1fTGBODkSTQ4w5mRmz6zoyMBXmahu3gjcft8ytLutXqwCoXwrqiFF/L0zQMRtTF1TTGxGAkCcj7W2SnsVfBaDEzYlzhAYNamRF5OwO5FiUcp2kSg6tpjInBSBJo72EXR7VwbxrjkgMGSVJvnye5EFZe8SYTQuB4KzMjicAidGNiMJIEQpttMTMyWqGrKo0HQmPO5Qk1D5QLmUdLXm7f2y8z0t7jUWq9xrHHiKq4msaYGIwkgXZuQ66aUDt4nsiMRu1lvQCQFpzu6R+MyFM0xVnsMaK24Vrwkz4xGEkC7dz5UzWcpjEutZf1AmE1I+7+wQinaBIllBnReCA0phiMaMznF3D2sWZELUoBK09khqP2st7A94o+TXNMqRdh8ara2GfEmBiMaKyzz6MsR8zRac3IaWXZAIBZ47IT/lhsB29ciZmmiR6M7G/oBABMK8lU7bEowBT8VOJUq7Gwy5bG5CmaTLslYh8XPfndioX40/vHsLx6QsIfK9RnJOEPRUlGnqZRq8cIEL60NzLVtrdODkayVHssClD6jLCA1VAYjGisLbiSRq9ZESCw2uDOS2eMyWOxHbxxueRpGhV3v5YzI+F9Rhqcfdhd54QkAXMrc1V7LArgijhj0ueleArhShp1yZkRzjcbT4+8+7WKnYzlKR+5SzIAbN7XCACYW5GLkmyHao9FAaG6L76HjYTBiMY6uJJGVezAaly9yh5P6gUjmfZA8rirLxSM7DrpBACcNalAtcehELaDV8/mfY248Gdv4MOjrVoPZVgMRjQWanjGlTRqYPGbcfUkYMNJefq02+2DN7jWtK6DG+Qlklw6x6nW0fv67z/E0ZYeXPObGq2HMiwGIxrr6A1ccXFfGnWwZsS45KkUNTMjWY5QYOMMZkfqOvoAAOW5nKJJBE7TqC8VTocMRjTWGewxku1gLbEa2GfEuBJRM2Ixm5SpGnlDSzkYKc1mZiQR2A7emBiMaKwzeLWVxWBEFSZ2YDUsORjJUHGaBghN1XT0etDn8aG1OzC1ysxIYphZ92VIDEY01ukKXG1lOThNo4bQ3jQaD4TGXCKmaYDQFGpHrwf1wayIw2rS9XJ8LfGCQj3yMulUwGBEY8yMqIsdWI0rEdM0AJCTFpqmUepFctJU2xmYInGaRj1pKbSJIz8BNeZUghFeZamBV1XG1TsG0zTyipoyTtEkDKdp1OOwmtDl0noUsWEwojG5gJWZEXXIV1WMRYynOzhNo3ZmJDt4oeDs9cAZLGJl8WriMLupHjX3aUo0fgJqjNM06mJmxLgS0fQMCGVGnL0eJeBh8WricJpGPQxGKGahpb2cplGDxBSvYSWi6RkQOU3THMx5l+YwGEkUNj1TTyrVjLCAVUMen1/ZDZSZEXUwxWtciVpNk5MeCkZOtYcKWCkx2PRMPQ5r6nzEp85IdagzbL8LubESjY4yTcMTmeH0JHiapqPXg3pnsOEZMyMJw71p1BM+TXPLn7fhJxv3ajiaofETUEPyFE26zQyLmXGhGrhRnjEJIdDrSczSXrnPSIOzL9TwjJmRhOF7WD3hwchLO+sAADecOxGFmXathjQofgJqiMWr6pNYwGpIfR6/soJK7ZqR/PTAJpaHmroBABk2M7LT+J5NFKVmhNHIqEWrGTna3K3BSIbHYERDzj52X1Ubr6qMSa4XAdQv2stLj9xRe3xBBhueJRBX06gnWs2IPNWYbBiMaKiLmRHVhdrB80RmJPIUjd1iUr0Fdl5G5MXC+HxO0SQSLyjUY40y/S9vaZBsGIxoSJ6mYfGqethnxJh6E9QKHgi8P63mUIAzoSBD9cegEDNXxCUUgxEagD1G1Mc+I8akrKRJQF8FSZIipmoq89NVfwwKkadpGIwkBqdpaAAWsKqPmRFjStRKGllB2OqDCQxGEkp+D7NmJDGYGaEBOl0MRtQWqhnReCA0phI5TQMAZWF9RcYzGEkoTtMkFjMjNEAnV9OozswUryHJmZF0a2IC+4ywuq5xeSxgTSRlmsav8UB0INpZsMHZl5TLphmMaMjJaRrVsc+IMck1I44EZUZWVE+ASQLmVuZGXaFA6mEH1sSRJMDjE2jtcWs9lAH4rtJQqGaEmRG1cFmgMfXK+9IkaGOwhVX5eOv7n8GfbliUkO9PIWx6lhjf/ew0pfNqMtaNMBjRUGiahpkRtbDPiDEluoAVACry0nnhMAa42aX6Vn92Gr5z8VSl+Hp3nVPjEQ3EYERDXE2jPlbiG1O3KzGb5NHYC03TaDwQHegfz80szwYA1Lb0aDCaoY0oGHn00UdRVVUFh8OBxYsXY8uWLUMe397ejltuuQVlZWWw2+2YNm0aXn755RENWE/YZ0R97DNiTEoBK4ORlKcUofNNrBq5ZZ+8A7W8FUkyifuSfMOGDVi9ejXWrVuHxYsX45FHHsHSpUuxb98+FBcXDzje7Xbjs5/9LIqLi/G3v/0N48aNw7Fjx5Cbm6vG+FMaMyPqC+8ELoTgHiIGIe9Nk6byJnk09tj0LHHkYKSjVwfByMMPP4wbb7wRK1euBACsW7cOL730Ep588kn84Ac/GHD8k08+idbWVrz33nuwWgM/iKqqqtGNWge8Pr+yAoDz0OoxhQUffgGYGYsYgtKBlZmRlMep1sSRs/DOJAxG4pqmcbvd2Lp1K5YsWRL6BiYTlixZgpqamqj3+cc//oHq6mrccsstKCkpwaxZs/DAAw/A5/MN+jgulwtOpzPij950uUK7jDIzop7IYIQnM6PoZTCiG2x6pqbIn2F2EmdG4gpGmpub4fP5UFJSEnF7SUkJ6uvro97n8OHD+Nvf/gafz4eXX34Zd999Nx566CH893//96CPs3btWuTk5Ch/Kisr4xlmSpCnaBxWE/sWqEgK+1HyZGYc3UowwsA+1YWmaTQeiI7I12jZaYH3h9zjKpkk/FPQ7/ejuLgYv/3tb7FgwQIsW7YMP/zhD7Fu3bpB77NmzRp0dHQof44fP57oYY45J7uvJkR4ZoSxiHEofUaYGUl5ymoaRiOq003NSGFhIcxmMxoaGiJub2hoQGlpadT7lJWVwWq1wmwOnSROO+001NfXw+12w2azDbiP3W6H3W4fcLuesHg1McILWJkZMY6eBO9NQ2NHaXrG96/q5JqRZAxG4sqM2Gw2LFiwAJs2bVJu8/v92LRpE6qrq6Pe55xzzsHBgwfhD9toYP/+/SgrK4saiBgFu68mRv8CVjIGpWYkQR1Yaeyw6Zl6+v8Ic9IDnzdurx99nsHrNrUQ9zTN6tWr8fjjj+MPf/gD9uzZg29961vo7u5WVtcsX74ca9asUY7/1re+hdbWVtx6663Yv38/XnrpJTzwwAO45ZZb1HsWKSjUY4SZETVJzIwYUrcyTcP3U6rjNI365BYHmTaLco5MthU1cb9zly1bhqamJtxzzz2or6/HvHnzsHHjRqWotba2FiZTKMaprKzEK6+8gttvvx1z5szBuHHjcOutt+LOO+9U71mkIE7TJIY5vGaEu34ahrK0187MSKozc9fehDGZJGQ7rOjo9cDZ50FxtkPrISlG9Em4atUqrFq1KurXNm/ePOC26upqvP/++yN5KN1S9qWxc5pGTVzaa0xc2qsfct0X37+JkZMWCEaSrW6Ea0o1wsxIYoRP03ALcmNwe/3wBlP66Va+n1JdaG8avn9HK9qPUFne25tcy3sZjGjEyQLWhJAkSQlIeGVlDHJWBOBqGj3g3jSJlazLexmMaETuwMrMiPrkKyvGIsYgF69aTBJsFp7SUp0luLbXy2AkIZSW8Em2WR7fuRqRa0YyGYyojnPOxsJ9afTFEnwDe318/yaCkhnpYTBCCNWMcGmv+iSJ7aSNpJet4HVFCUa4nGbUBAaeBOX9aZgZIQBhq2lYM6I6JTPCaMQQetgKXlcsZjkY4fs3EVgzQhG4miZxWDNiLGwFry+WYJ8qTtOoJ3yVoZIZ4WoaAtgOPpHYTtpYupkZ0RUzp2kSSi4NYGaE4PMLrqZJIC7tNZbWbjcAoCBD35trGoU1uJqG7eBHL9opMIc1IySTAxGAwUgimFjAaihNnS4AQFEWgxE9kDMjHk7TJEQ2a0ZIJhev2iwm2C1MLatNLmAVzIwYQqOTwYieWM3cKE9tEkJFI0pmhMEIcVlvYjEzYixNXQxG9CSUGWHNSCLITc86Xd6kWnHIYEQDLF5NLIkFrIYiT9MUMxjRBdaMqCfaT1AuDRAiVPydDBiMaCDUY4SZkUQInst4MjMI1ozoS2g1jeBUawLYLSalsVy3yzfM0WOHwYgG2GMksdhnxDh8foHm4DRNYSaDET2wmkIfS7ygUEd4nxFJkpRl8OGLKbTGYEQDSmbEzmmaRGCfEeOo6+iF1y9gNUsoyXZoPRxSgdkc+uRkF9bEyLQHLoS7GYwYm5OZkYRinxHjqG3pAQBU5qUr6X1KbRYTgxG1DHYKLAoG7rvrnGM4mqExGNEAC1gTi6tpjONoMBiZUJCu8UhILRHBCFfUJMQZlbkAgGPB908yYDAShRAC//zkFN472JyQAioWsCYW+4wYR21r4GQ6Pp/BiF6YmRlRXf+coVzsLddbJQN+GkaxeV8TvvOXjwEAGTYz/vsLs/CFMypU+/4sYE0sZkaM43hbMBgpyNB4JKQWSZJgMUnw+gU3y0uQwkwbAOBvW0/gls9MwcRC7d8/zIz04/X5cf+Lu5X/d7t9+J+X9qha1S1nRrI5TZMQrBkxjuOtcs1ImsYjITVxszx1iKidRiL3cXrsjYNjNZwhMRjpZ19DJ440dwMA/t8NiwAAzV1uvHeoWbXHaO/lNE0icTWNcSjTNKwZ0RW58RkzI4mRl2FT/m21JEcYkByjSCKHmwKByJlVeThvahFWVE8AADz6xkFVahC8Pj/21nUCAKYUZ476+9FA7DNiDM4+D9p7AoF9ZR6DET0Jb3xGoyf1KxqZPS5H+bfNnBxhQHKMIomcbO8FAIzLDaR9/+OCybCZTXj/cCve2Nc46u+/t74TvR4fsh0WTC5iMJIInKYxhoaOPgCBjb8y7Mwy6om8WR6naRLDZjHhe5dMAwD0JElLeAYj/ZxsCwYjwTno8tw0rDg7kB3Z8OHxUX//7cfbAQBzK3NhYl+EhGABqzE0sg28bimZEU7TjM4QP740WyCA7/UkR8DHYKSfrcfaAADjckNp34tPKwEA7Do5+gYxcsEdp2gSR+4mzcyIvnGDPP2yBN/EnKZJHLklfC8zI8nn1qc/VjrSnVmVp9w+szwbQGAKp63bParH6D8NROpTMiM8kelaY2dgmobBiP5Y5GkaNj1ThTSg00goGOlxJ8dmeQxGgtq63Xhh+ynl/1NLspR/ZzusSofHT0+NLjvCYCTxOE1jDI1OTtPolYUFrAmXZg0EI+8dasHuUX6uqYHBSJC8RHAws8oD1ce7TnWM6nFOtUfWpJD6TCxgNYSmLnmahhvk6Y28tJe79o7OUD+9gszQ8t7Vf92e8LEMh8FI0LGwYOSKueUDvj6jNJApOdjYNeLHcHv9StFdOTMjCRNa2ssTmZ4xM6JfcgGrh9M0CRMexCfDHjUMRoLe2t8EAFg0MR8PXzN3wNergu1yj7V0j/gx6jv6IARgt5hQENZ0htQlByM8j+lbKDPCYERvLMyMqKp/nxEg8oI4GRZUMBgJevtAIBj5zkVTlBRhOLlm5OgoIsgT7YH7jstNgxTtt4NUwVbSxtDoDBawZjMY0RuLkhlhMJIoZpOEv/5HNYBAA0GtMRhBYNVFc1dglcy0sMLVcBPyA5mRpk4Xul0jWwolp5VLsjnHnUi2YHtjnsj0q8/jgzO44WRhJoMRvbHwgkIVw01VyxvmtXSNbpWoGhiMILCLrpwOzE2PvnldTroVecGvjXR+rSW4LDi8cIjUJ2e2ON+sX/LW5zazCTlp3HBSb1jAOjYKg1OcXS4v+jzaLvFlMAKgpTtwYsu0W2C3mAc9bkJwm/La1pHVjbQET6CsF0ksm4XFb3onZzILM22c8tQhM6dpxkSW3aLsTdMyyh5ao8VgBEBbT+BFyB8mSKgaZd1Iq5IZYVo5keSrKreXwYheNbEVvK7Je9P4OE2TUJIkId0ud2JlZkRzrd2B4p28YYKR8cHMyNHmEWZGumMLemh0QtM0vKrSKwYj+sbMiDpi+enJzc8YjCQBucV7/iD1IrKSYNV+6wjTWfIJtJA1IwnFmhH9YzCib/LSXraDTzwlGGHNiPbqgluRD5cZUTYWGuGLdkLeEThsEz5Sn83MmhG9kwtYuZJGn9gOfuykjfJzTS2GD0aOt/bgF6/tBzB8YWmaNbDl8kiW9va6fcoJtDKf3VcTiTUj+sfMiL5x1151DVXkHZqm0Xb3XsMHI0+8c0T5d256bJmRkexyeKItUPSaZbdwKWKCyX1G3MyM6JbcfbWImRFdChWwMhgZjVh2xGBmJEnIhVIAhl1nnWEf+Yt2PBiMVOSncyligrFmRP/kLCMzI/rEvWnGjpwZGclFtpoMH4zUB+tFgNinafq/aD97ZS/OefB17G/oHPS+x1sD9SKV3K034ZQOrF5eVelVqBicwYgeWZUCVr6HE03JjDAY0Y4QAjWHWwAApdkOfHXR+CGPV6ZpwmpGetxePPrGIZxs78Wf3z826H2PB3cFrshj8WqiWVnAqmvdLq9yQcDMiD6ZWcCqqqFy8XJmhB1YNXSqow+t3W6YJGDzHRfCYR28+yoQFox4fErP/+3H25Wvf3KiY9D7yitpWLyaeEoBK4MRXZKzIuk2MzLsFo1HQ4lgCV5QcGnv6MTUZ2QUtZBqMnQwsiMYSJxenjNsIAIA6cETnxCAy+vHb948hK89/oHy9e3H2zHth/9C1Q9eGtAYTa4ZqWRmJOFYM6JvrBfRPytX04wZ9hlJAn3ewA9/sM3x+ksLC1h2nOjA2n/tHXCMfDX+9d9vUW7z+Pw40NgFAJhUlDHi8VJsbOzAqmv1zkCdF1fS6JeZu/aOGU7TJAH5w0q+kh6O2STBHiyOvH3DduX2/7piJr5z0RSlcBII7F/TE1y3va++E26vH9kOC6oKGIwkmpUb5elabbD+anw+s4x6ZVWmaXhBoYahFnAmyzSNoSdc5Q8riyn2pbbpNjNcXj9OtgdqQP7zoin4+jkTAQDfvWQ6AGDxA6+hwenC7lNOLKzKxyOvHQAAzKnIhSmOx6KRsZkDby42PdOn2uBGlZUMRnTLzGkaVYgYGo1wNU0S8MaZGQGA7n4v2M0XTh5wzJyKXADA89tP4upH38VrexqCt+eMcKQUj0xHIMaWawtIX5gZ0T8rC1jHzGi3OVGLoYMROTMi/+LHIvxqe+d/XYJ028Dk0oIJeQCAP71fG7Ha5syq/BGOlOJxWlkWAOBwc7cyVUb6IQcjEwoYjOgV96YZO3LNyN76Tk3rRgwejAR+0S1xZEZkcytykOWIXvg6tThzwG2fn1OGC6cXxf04FL/iLAdKsu0QAth9yqn1cEhFHp8fp4JTpMyM6JeZTc9UNdTltjzd2dTpws6Tg7enSDSDByNyZiT2H8PKc6pgM5vwky/PGfSYyUWRwciM0iw8smwe28CPodnjAlNi//jklMYjITWdau+FXwB2i4lLe3VMzoz4YtlchQYVy09vRmk2LphWhAybOWLF6FhjASvim6a55/MzceelM4bsS1KZn465lblweXz44zcWoSDTHrEHDiXesjPH47U9jXjl03rcf9UsrYdDKpELx8flpjG41zH5fMmN8hLPbJLwh28s0noYRg9G4i9glSRp2AZpZpOE5799tnI8jb1FEwP1OQ1OF7pdXnbq1IlGZ6AouSTbofFIKJHMEmtGjIbTNAi1HlaTJEkMRDSUk2ZFfnDjw6Mt3cMcTamiIdjwrCSbUzR6Jp+TfWx6popU+CwaUTDy6KOPoqqqCg6HA4sXL8aWLVuGvxOAp59+GpIk4eqrrx7Jw6rOH5yPNKfAC0Xxkwsc5X2BKPXJ3VdLcpgZ0TNO06gkhX58cQcjGzZswOrVq3Hvvfdi27ZtmDt3LpYuXYrGxsYh73f06FF873vfw3nnnTfiwapNro0yMRjRpaxgv5FuF5f36oUyTZPFYETPLAxGDCfuYOThhx/GjTfeiJUrV2LmzJlYt24d0tPT8eSTTw56H5/Ph+uuuw733XcfJk2aNKoBq0nuTsdYRJ8ygj1g+jeqo9QlZ0ZKmRnRNXZgNZ64ghG3242tW7diyZIloW9gMmHJkiWoqakZ9H73338/iouLccMNN8T0OC6XC06nM+JPIsi/56kwn0bxS7cH91xgZkQ3WDNiDPKaAmZG1JEKH3FxBSPNzc3w+XwoKSmJuL2kpAT19fVR7/POO+/giSeewOOPPx7z46xduxY5OTnKn8rKyniGGTO5ZoSrbvWJmRF9EUIo0zTFnKbRNSUzwqZnhpHQ1TSdnZ24/vrr8fjjj6OwsDDm+61ZswYdHR3Kn+PHjydkfEpmZMj+dJSqmBnRl7YeD9zBFXDFzIzomlwz4mfTs1ERKVTBGlfzhcLCQpjNZjQ0NETc3tDQgNLS0gHHHzp0CEePHsUVV1yh3OYPLtWyWCzYt28fJk8euNGc3W6H3T4WJxtmRvRMzoz0aLwBFKlDnqLJz7DBbtGuUyQlnpl70xhOXJkRm82GBQsWYNOmTcptfr8fmzZtQnV19YDjZ8yYgZ07d2L79u3KnyuvvBKf+cxnsH379oRNv8RKXsJuYjSiS/JulMyM6IOyrJcNz3SPq2nUlQqfcHG3pVy9ejVWrFiBhQsXYtGiRXjkkUfQ3d2NlStXAgCWL1+OcePGYe3atXA4HJg1K7IVd25uLgAMuF0Lfq6m0bV01ozoSiOLVw3DpGRG2PTMKOIORpYtW4ampibcc889qK+vx7x587Bx40alqLW2thYmU2o0dpVjbtaM6FNGsGaEfUb0oYE9RgxDyYywgHVUUqnkZkQbdqxatQqrVq2K+rXNmzcPed/169eP5CETgqtp9K0iLw0AsL+hU+ORkBpau90AgIJMm8YjoUQzc9dew0mNFEaCsAOrvk0uygQANHe5lX2IKHV19HoAALnpVo1HQolmCWbXWTOikhT4jDN0MMKaEX3LDNupt7OPUzWprr0nkBnJTWNmRO+4msZ4DB2MCHZg1TWL2aQEJM7gVTWlrvbga5jDzIjumVkzoopUmuUydDDCmhH9k4MRZkZSX0dPcJomjcGI3lmYGTEcQwcjrBnRP6sl8Np6uEQw5TEzYhwsYFVXKnzCGTsYYQdW3bMGC+E8XgYjqUwIESpgZc2I7smZEbfXjx8+txO7TnZoPCJKNEMHI8rFMjMjumUxM92rB10ur7Kygqtp9M8cdoX45w9q8flfvaPhaFJXKu1NY+xghDUjuicvEeTS3tTWHqwXsVtMcFi5L43emXlSNhyDByOBv1kzol9WC7ci1wP2GDGWaMGIYP2Irhk6GOGuvfpn5R4XutDew3oRI7FE2VKkl7tvj1gqXG8bOhiRMyPcm0a/5JoRDzMjKa29N9DwjCtpjCFaZkTeDoBil0rJJIMHI+zAqndWc3CahpmRlPadv3wMIDWWKNLoWaIEI23dbFyoZwYPRgJ/s2ZEv+STGjMjqaup06Vc4TnZvM4QTFGCkZZulwYjobFi6GBELoiKMj1JOmExs4A11f3uncPKv//nC7M0HAmNpf7Zkbbg3kQn23txvLVHiyGlrFQoRbAMf4h+CdaM6J7VzALWVLfjeKDh1aWnl2L++DyNR0NjxWYxwesOFa3uPOFEzaFP8NePTiAv3YqaNRdzmfcwUukSzNDBCGtG9E+uynezA2vKOtrSDQC46YJJGo+ExpLdYkJPWDDy5LtHlH+39XhQ39GHqsIMLYZGCWDoCQruTaN/7MCa2oQQaOoM1AqU5Tg0Hg2NJbtl6KxHcxdrSPTE0MFIqAMrgxG9kvem8bIDa0rqcfuUQJI9RozFZhn646mth6trYpUKH3GGDkaUmpEUeKFoZJRde1nAmpLknXptZhMcVkOfrgzHPkww0uPmyqrhsM9IiuDeNPon14ywgDU1tQUbXeWmWyHxqsFQ7MMEn71udmTVE0MHI3LQyJOcfimraZgZSUly1838DE7RGM1gNSNTizMBAN0MRnTF0MEIa0b0T+4zwmma1CQHIwWZDEaMJjxjHd5zpDRYyNzLaZqYpcInnKGX9n71zEqcP7UIk4q4PEyvuFFeamsJBiN56QxGjCZ8Of6EgnQcagos8S7KtANAxLJfGkzqXIQZOhhZduZ4rYdACcbMSGprDbYAL+A0jeG4w96ztrApm7LcQGaEwYi+GHqahvRP6TPCpb0pKVQzYtd4JDTW3N5QsHFmVaDzblmOA+m2wDU0V9Poi6EzI6R/tmBmxM1gJCW1dAWDEdaMGE54NvP7l86AXwicNalAaYLHAtbYpUJZJIMR0rU0WyC9y5RualIKWDlNYzh9ntB7NtNuwX9fPRsA8NcPjwPg0t5YsM8IUZLItDOlm8q4tNe45IZ3/YUuMPie1hMGI6Rr8vxyl4tXUamohZkRw3r4mrkwmyT87MtzIm5PZ7ZTlzhNQ7qWYQ+cuLpdvIpKNR6fHx3Bq2NmRozn83PKseS0Ejiskc3PQgWsDEZiJaVApxFmRkjX5GkaBiOpp60nkBWRJCCXfUYMqX8gAoRlRvieHlYKlYwwGCF9y2AwkrJawxqembmBFAXJ2c4eDzMjesJghHQtI5jS7Xb7IFKptJzQ2sXiVRoojdM0usRghHRNvory+QVcXvYaSSVy8Wo+p2goTHpw6sbt9bOZYaxSILHIYIR0LcNmUTbccg6yVJCSE5f1UjTp9lAdCadqhpZK2WAGI6RrJpOEoqxAK/G6jj6NR0PxaOwMvF6FWQxGKMRmNik1RD1csq8bDEZI98py0gAAdR29Go+E4lHbGni9xuenazwSSiaSJIX1GmFhul4wGCHdKw/u8nmqnZmRVFLbEtgyfnx+hsYjoWSTFqwb6eU0TUxSoGSEwQjpX0Ve4Mq6trVH45FQPI4FX68JBcyMUCS5JXwfg5EhpU7FCIMRMoAZpVkAgN11To1HQrHq6PWgvSdQcFzJaRrqR8mMuLmaRi8YjJDuzSzPBgB8erKDV1Ip4ngwK1KYaVO66BLJHJym0R0GI6R704qzUJGXhm63D5v2NGo9HIrBsZZAMMLiVYqGNSPxkaTkrxphMEK6ZzJJOLMqHwBwsp11I6ngWGugeHVCAYtXaSClZoRdWIeUQm1GGIyQMeSkWQFA2QWWktunpwL1PVUMRigKZkb0h8EIGUI2g5GU0efx4Y29gem0C6YXaTwaSkasGdEfBiNkCKHMCJskJbutx9rQ4/ahNNuBuRU5Wg+HklCaLfDR1ctpGt1gMEKGkO0IrMhgZiT57W/oBADMqchJicI7GnvyNA1Xxw1NLhlJhXcRgxEyBNaMpI79DV0AgKklmRqPhJIVa0b0h8EIGYIcjHQyGEl6BxsDmZFpJVkaj4SSVZotkOns5kZ5usFghAwhJ52ZkVQghFAyI1OKmRmh6LLTOO2qNwxGyBDCp2lEKi2+N5imLhc6ej0wScDkIgYjFJ38fnYyGBmSfK5LhdIrBiNkCPLJy+sX6GEFftI6GMyKjM9PV5ZvEvXHGjD9YTBChpBmNcNqDlwe8ASWvA40ylM0rBehwTEY0R8GI2QIkiQh28ETWLJ7/3ALAGB6KadoaHAMRvSHwQgZBk9gya3X7cOruxsAAFfMLdd4NJTM5Pdyr8cHl5fTrsNhzQhREmFL+OR2sLELXr9AQYYNM0qztR4OJbFshxV2S+Djq76jT+PRkBpGFIw8+uijqKqqgsPhwOLFi7Fly5ZBj3388cdx3nnnIS8vD3l5eViyZMmQxxMlCjMjye1oS2Cn3klF3ByPhmYySZhQkA4AONrCnbj1IO5gZMOGDVi9ejXuvfdebNu2DXPnzsXSpUvR2NgY9fjNmzfj2muvxRtvvIGamhpUVlbikksuwcmTJ0c9eKJ4cDlgcqttDXyoVOalazwSSgUTgjs6HwsGsZTa4g5GHn74Ydx4441YuXIlZs6ciXXr1iE9PR1PPvlk1OP//Oc/49vf/jbmzZuHGTNm4He/+x38fj82bdo06sETxYPBSHI70RYMRvIZjNDwqoKZkSPNDEYGI7dUklJgd5q4ghG3242tW7diyZIloW9gMmHJkiWoqamJ6Xv09PTA4/EgPz9/0GNcLhecTmfEH6LR4jRNclMyIwxGKAahzAinafQgrmCkubkZPp8PJSUlEbeXlJSgvr4+pu9x5513ory8PCKg6W/t2rXIyclR/lRWVsYzTKKo2EI6uR1v7QUQaHhGNJyJhYFg5CinaXRhTFfTPPjgg3j66afx3HPPweFwDHrcmjVr0NHRofw5fvz4GI6S9ErOjLQzGEk6Xp8fp9oDwUhlfprGo6FUIG8XcLS5G10ur8ajodGyxHNwYWEhzGYzGhoaIm5vaGhAaWnpkPf9+c9/jgcffBCvvfYa5syZM+Sxdrsddrs9nqERDaswM/A71dzl0ngk1F9dRx+8fgGb2YSSrMEvVIhkpTkOFGTY0NLtRm1LD2aWczl4fwI63ZvGZrNhwYIFEcWncjFqdXX1oPf76U9/ih//+MfYuHEjFi5cOPLREo1CaU7gQ459CZLP8WDxakVeGkymFDhzUlLIy7ABANp73RqPhEYrrswIAKxevRorVqzAwoULsWjRIjzyyCPo7u7GypUrAQDLly/HuHHjsHbtWgDAT37yE9xzzz146qmnUFVVpdSWZGZmIjOTLZ9p7JTlBNL/zV1uNHW6UJTF7FuyOB4sXq1gvQjFIS89OPXaw6nXVBd3zciyZcvw85//HPfccw/mzZuH7du3Y+PGjUpRa21tLerq6pTjf/3rX8PtduPLX/4yysrKlD8///nP1XsWRDGQT1wAcM1vYlv9RWNDXhFRmcd6EYpdTlowM8JgJOXFnRkBgFWrVmHVqlVRv7Z58+aI/x89enQkD0GkOils4pS9CZLLuwebAQCzx+VoPBJKJfIFRlsPp2mikfuMpALuTUOGsvqz07QeAvXT2NmHT050AAAumlGs8WgoleSms3eQXjAYIUNZXj1B+bfb69dwJCTbU9cJAJhanInibK6kodgpjQw5TZPyGIyQoWQ5rMoyN3lqgLR1sk3uL8LiVYpPhj1QadDtZp+RVMdghAzFbJJgCS4dXbn+Q7x3iAGJ1k62h5b1EsUjwxYIRnrcPo1HkpyUvWlSoNEIgxEyHI8vVNW1/t2j2g2EAAC1wTbw43IZjFB85MwIO7CmPgYjZDiLJoY2aXz/cAt8/hQqOdcZIQR2nGgHAEwrzdJ2MJRy0u1mAEA3g5GUx2CEDOehr8zFTedPAgA4+7zYdbJD4xEZ16GmLhxr6YHdYsLCCXlaD4dSTJo1EIz0eThNk+oYjJDhVOan467PnYbPzgw06vvgSIvGIzKuLUfaAADzx+chy2Ed5miiSA4lGOHKuGiUvWk0HkcsGIyQYZ1WFthYiw3QtPPUlmMAgDOrmBWh+DmsgY8wl5eZkVTHYIQMq6ogsJT0aHOPxiMxpvYeN3addAIAzp9WpPFoKBXZLcyM6AWDETKsCcFgpLaVwYgWPq5tBwCU5ziwsCp/6IOJopAzI6wZSX0MRsiwxudnAABOtveyiFUD8s88fHUTUTwcwcyI1y/g9TE70l+oz4i244gFgxEyrMJMm/LvX7y6X8ORGNPBpi4AwNQSLumlkZELWAGgj9s7pDQGI2RYkiThR5efBgDYWtsGP/uNjKmDjYFgZEpxpsYjoVRlt4Q+wjhVk9oYjJChrTi7CmlWM9p7PNha26b1cAzD7xc41MRghEbHZJJgs8grapgZSWUMRsjQrGaT0m/kN28e0ng0xnGyvRd9Hj+sZgkTuEEejYLDwiLWwaRSrpfBCBnejecFurHWHGqBh0VwY0KuF6kqyIDFzNMQjZyDXViHJaVA2zOeBcjwTi/PRqbdgm63D0fZAG1MHGK9CKmEXVj1gcEIGZ7JJKEyOFVwoq1X49EYA4tXSS1KF1ZmRlIagxEiABV5ge3rT7SxAVoiHWrqwse1bQxGSDVKZoQt4QdKoaIRi9YDIEoGcjBynJmRhLr4oTcj/j+5iMEIjY6DLeGHxaZnRClifHCa5lgLa0bGiiQxGKHRs7MlvC4wGCECUFUYaA3PTfPGzrjcNKTZzMMfSDQEbpanDwxGiABMLAgGIy3d7MSaIP1/rqwXITVws7zBiRQqGmEwQgRgXF4azCYJLq8fDZ19Wg9Hl3wi8sR4zuRCjUZCesIC1uGlQMkIgxEiINCJtTJYxHqEvUYSwheWGanIS8P11RM0HA3pRW6aFQDQ3uPReCQ0GgxGiIJYN5JY4d1tN952fsSOq0QjVZRlBwA0d7o0HgmNBoMRoqCqsLoRUl94ZsRh4amH1FGYGQhGmroYjPQnUqdkhMEIkWxysKBy2zHu3psI3rBgxGxKhVlsSgVyZqSJmZFBsc8IUQo5f2qgoPKTE+3wcsM81cmZEYtJgpQKZ0dKCXJmpJmZkZTGYIQoqDIvHQ6rCR6fYCfWBJBrRpgVITXJmZHWbnfEVCClFgYjREEmk4RJhYGpmr11To1Hoz/hmREiteRn2GCSAL8AWrqZHUlVDEaIwiysygMAvLGvUeOR6I9cM2Ix87RD6jGbJORnyCtq3BqPJrmE8kTJfwHAswJRmEtnlQIAXtxRh/YentjUJGdGOE1DaivMtAHgippUxmCEKEz1pALMKM1Cj9uHv2w5rvVwdIXBCCUKe42kPgYjRGEkScLy6ioAwKY9DdoORmf8waYHjEVIbUXsNZLyGIwQ9XPulNASX26+pR65AZOJy3pJZXJm5EQbuyeHE8E3XSq85RiMEPVTmZ+G0mwHPD6Bj2vbtR6ObsjTNAxGSG1njA8Unv9t6wkcb2VAkooYjBD1I0kSTi/PBgAcbu7SeDT64U+hqzRKLZfMLMGiifno8/jx+NuHtR4OjQCDEaIoKvPTAQDHW9n8TC1yPyoWsJLaTCYJt108FQDwx5pj2HqsVeMRUbwYjBBFUZGXBoBz0GoSgtM0lDhnTynEktOKAQAvbD+l8WiSg9xnJBXecQxGiKKoyAtkRk6wLbxq5MwIYxFKlItPKwHA920qYjBCFEV5rgMAUNfBk5pa/MyMUIKV5wYymqfa+b5NNQxGiKIoywmc1Bo7XXB7uYOvGvx+9hmhxBoXvIg42darTAtSamAwQhRFQYYNNrMJQgANzj6th6MLfvYZoQSryEuHxSSh0+XFrpPc7FIoU6PJ/55jMEIUhckkoUyZqok9GBFC8IpsEKGlvcl/YqTU5LCaceH0QBHryvUfopkdWVMGgxGiQZTlBFO+7YOvqPnwaCvueWEXNu6qQ82hFpzx41dx7ePvc2onCraDp7Hwky/NxuSiDDR3ufCVdTW445lP8NKOOq2HRcOwaD0AomQ1qSgT7x9uxe0bPsE7B1rw0DVzla9tPdaKb/95GxqcgSuvP9YcU772/uFWbDnSinOnFo75mJMZ28HTWCjItONX187Hdb97H0eau3GkuRvPbD2B7LRFOG9qkdbDo0EwM0I0iMtnlyn//vu2ExGNlO7/524lEIlmTx3nq/tTMiNMjVCCzSzPxqbvXoiHr5mL84IXBT9+cTe8PmNlLNlnhEgHzplSiOdvOQdzK3IAAE++cxQA0Ov2YdepQLDxpfkV+OmX5gAAMmxmZZO9Wu6PMUCogFXbcZAx5GfY8MX5Ffi/a+cjN92K/Q1d+MuWWq2HRYNgMEI0hHmVuVj7xUCw8e/d9WjtdmP78Xb4/AKl2Q78/Ctz8JWFFfjjNxZh423n44q5gWwKg5GBuFEeaSEn3YrVn50GAHj41f3o6PFoPCKKhsEI0TBmlmdj1rhseHwCL2w/iTf3NwEAFlblQZIkSJKE86cVoTI/XdnT5mhLt5ZDTkqCBaykka8tGo+pxZlo6/Hgl5sOaD0cioLBCFEMrllYCQC475+7se7NQwAia0pkM8sCu/0ea+nBJ8fbx2x8qcCfQj0PSF8sZhPu/vxMAMAfa47iUJNBduNOoTYDDEaIYnDNwkpMKsxQ/j+3IgefnVky4LjcdJsSpPy/948N+LqRcWkvaen8aUW4eEYxvH6Bu5/fhT6PT+shjZlUiP+5tJcoBg6rGc9++2y8ub8JZlNgWsZijh7Lf2VhBV7aGeg7QiHcm4a0dtflp+Htg81471ALLvvl21hePQHXLhoPu8XEjJ3GmBkhilFuug1XzRuHz88pR7bDOuhxZ1blw2KScLK9FztPdIzhCJMb+4yQ1iYXZWL9yjOR7bDgSHM37vvnbsy4eyMW/vdreGH7SXZP1tCIgpFHH30UVVVVcDgcWLx4MbZs2TLk8c888wxmzJgBh8OB2bNn4+WXXx7RYIlSQYbdggunB5or3fjHj9DCltQAwlbTcJ6GNHT25EL8Y9W5uOHcicjPsAEAWrrduPXp7bj0kbfx0L/34bhOVsOlUmgV9zTNhg0bsHr1aqxbtw6LFy/GI488gqVLl2Lfvn0oLi4ecPx7772Ha6+9FmvXrsXnP/95PPXUU7j66quxbds2zJo1S5UnQZRsfvKlObjmNzU41NSNm/+0FX/8xmKk2cxaD0tTrBmhZFFVmIG7Pz8T3790Ok629eKPNcfw1JZa7GvoxL6GTvzq9YMoyrKjIMOGoiw7ZpRm4dypRZhXkQu/ENh6rA0n2nqQk25FaXYaynMdKMl2wG4xodPlhcUkId2WPFUQqZCMlESceanFixfjzDPPxP/93/8BAPx+PyorK/Gd73wHP/jBDwYcv2zZMnR3d+PFF19UbjvrrLMwb948rFu3LqbHdDqdyMnJQUdHB7Kzs+MZLpFm9jd04kuPvYdOlxdnTcrH5XPKYTebYLeakJ9hw4T8DGQ5LPD4/HD7/PD4BFxeH7pdPoT3TsxJsyI/w4YshwXWQepUUsFfPzyO7/99By6aUYwnv36m1sMhitDe48brexvx3Mcn8faB5hF9D6tZgscXeO9mOywozXHAYTXDLwSkYB/Upk4XOvs8SLNZUJhpQ1mOA6U5aWjq7IPPL9Dl8iLLYUVuuhV56TbkpVuRm26L/HdG4GsO69AXOFf+3zvYcaIDT359IS6aMbDgfizE+vkdV+jmdruxdetWrFmzRrnNZDJhyZIlqKmpiXqfmpoarF69OuK2pUuX4vnnn4/noYlSzrSSLPx2+UJc/8QHeP9wK94/3Dr8nYaRZjUjy2FBlsOC7DQrHBYzBASEGDolG+3CKNrVkhTlSAEBvz+Q2RAAzJIEkwkwmySYJAlmkxS8TVK+Fu32F3ecAsDMCCWn3PRAx9Yvzq9Ao7MPjZ0utHa7Ud/Rhw+PtqLmcAtOtPUCACYXZWBaSRY6ej2o7+jDqY5e9Hn8SiACAM4+L5x9gy8h7nb70Nzlwt76zhGP2WoOZGAybGZk2C1It1uQaTcj3WZBmtWMHSlUsxZXMNLc3Ayfz4eSksgIq6SkBHv37o16n/r6+qjH19fXD/o4LpcLLldont3p5D4flJqqJxfghVXn4IXtp3C4qRt+IdDj9qKly42jLd3w+ATMJglWswSr2QS7xYR0m0X5wPYJgfYeDzr7vACAXo8PvR4fGjtTtw6lONuh9RCIhlSc7Yj4Pb3mzECfIWefB8If6OoaTgiBjl4Petw+5GfY4PH5Ud/Rh3pnHzw+P0ySFAjmBVCUZUeWw4oetxdNnS6cbO9FXXsfSnIcsJklZNgt6Orzoq3Hg/YeN9p63GH/9qA9+G+vX8DjCzxuR+/QXWXzM+zq/5BUljyTWmHWrl2L++67T+thEKni9PIcnF6eM+B2uaDTHEOqwOvzo8vlhbPXC2efB86+QIDS5/HBJEmQpEBWI1q2I9pErIiSR4l+XCCTYZIkZRWMXwj4/EL5O/TvwNcGu93nF8jLsOHL8yuGfb5EyWiwVXSSJCE33YbcQANmOKxmZDmsmFqSlZBxCBGYzulyedHt8qHb5UW3O/DvHnfg9l63DxaThMnFmZhXmZuQcagprmCksLAQZrMZDQ0NEbc3NDSgtLQ06n1KS0vjOh4A1qxZEzG143Q6UVlZGc9QiZJeLEGIzGI2BU92tgSOiIhSgSRJyHJYkTVEi4FUE1c1nM1mw4IFC7Bp0yblNr/fj02bNqG6ujrqfaqrqyOOB4BXX3110OMBwG63Izs7O+IPERER6VPc0zSrV6/GihUrsHDhQixatAiPPPIIuru7sXLlSgDA8uXLMW7cOKxduxYAcOutt+KCCy7AQw89hMsvvxxPP/00PvroI/z2t79V95kQERFRSoo7GFm2bBmamppwzz33oL6+HvPmzcPGjRuVItXa2lqYTKGEy9lnn42nnnoKP/rRj3DXXXdh6tSpeP7559ljhIiIiACMoM+IFthnhIiIKPXE+vmduh2UiIiISBcYjBAREZGmGIwQERGRphiMEBERkaYYjBAREZGmGIwQERGRphiMEBERkaYYjBAREZGmGIwQERGRpuJuB68FuUms0+nUeCREREQUK/lze7hm7ykRjHR2dgIAKisrNR4JERERxauzsxM5OTmDfj0l9qbx+/04deoUsrKyIEmSat/X6XSisrISx48f192eN3p+boC+nx+fW2ric0tNfG6JJYRAZ2cnysvLIzbR7S8lMiMmkwkVFRUJ+/7Z2dm6+yWU6fm5Afp+fnxuqYnPLTXxuSXOUBkRGQtYiYiISFMMRoiIiEhThg5G7HY77r33Xtjtdq2Hojo9PzdA38+Pzy018bmlJj635JASBaxERESkX4bOjBAREZH2GIwQERGRphiMEBERkaYYjBAREZGmDB2MPProo6iqqoLD4cDixYuxZcsWrYc0pLVr1+LMM89EVlYWiouLcfXVV2Pfvn0Rx1x44YWQJCniz8033xxxTG1tLS6//HKkp6ejuLgYd9xxB7xe71g+laj+67/+a8DYZ8yYoXy9r68Pt9xyCwoKCpCZmYkvfelLaGhoiPgeyfrcqqqqBjw3SZJwyy23AEit1+2tt97CFVdcgfLyckiShOeffz7i60II3HPPPSgrK0NaWhqWLFmCAwcORBzT2tqK6667DtnZ2cjNzcUNN9yArq6uiGN27NiB8847Dw6HA5WVlfjpT3+a6Kc25HPzeDy48847MXv2bGRkZKC8vBzLly/HqVOnIr5HtNf6wQcfTOrnBgBf//rXB4z70ksvjTgmFV83AFHfe5Ik4Wc/+5lyTLK+brGc99U6N27evBnz58+H3W7HlClTsH79+kQ/vRBhUE8//bSw2WziySefFJ9++qm48cYbRW5urmhoaNB6aINaunSp+P3vfy927doltm/fLj73uc+J8ePHi66uLuWYCy64QNx4442irq5O+dPR0aF83ev1ilmzZoklS5aIjz/+WLz88suisLBQrFmzRounFOHee+8Vp59+esTYm5qalK/ffPPNorKyUmzatEl89NFH4qyzzhJnn3228vVkfm6NjY0Rz+vVV18VAMQbb7whhEit1+3ll18WP/zhD8Wzzz4rAIjnnnsu4usPPvigyMnJEc8//7z45JNPxJVXXikmTpwoent7lWMuvfRSMXfuXPH++++Lt99+W0yZMkVce+21ytc7OjpESUmJuO6668SuXbvEX/7yF5GWliZ+85vfaPbc2tvbxZIlS8SGDRvE3r17RU1NjVi0aJFYsGBBxPeYMGGCuP/++yNey/D3aDI+NyGEWLFihbj00ksjxt3a2hpxTCq+bkKIiOdUV1cnnnzySSFJkjh06JByTLK+brGc99U4Nx4+fFikp6eL1atXi927d4tf/epXwmw2i40bNyb0+ckMG4wsWrRI3HLLLcr/fT6fKC8vF2vXrtVwVPFpbGwUAMSbb76p3HbBBReIW2+9ddD7vPzyy8JkMon6+nrltl//+tciOztbuFyuRA53WPfee6+YO3du1K+1t7cLq9UqnnnmGeW2PXv2CACipqZGCJHcz62/W2+9VUyePFn4/X4hROq+bv1P/H6/X5SWloqf/exnym3t7e3CbreLv/zlL0IIIXbv3i0AiA8//FA55l//+peQJEmcPHlSCCHEY489JvLy8iKe25133immT5+e4GcUEu1Drb8tW7YIAOLYsWPKbRMmTBC/+MUvBr1Psj63FStWiKuuumrQ++jpdbvqqqvERRddFHFbKrxuQgw876t1bvz+978vTj/99IjHWrZsmVi6dGmin5IQQghDTtO43W5s3boVS5YsUW4zmUxYsmQJampqNBxZfDo6OgAA+fn5Ebf/+c9/RmFhIWbNmoU1a9agp6dH+VpNTQ1mz56NkpIS5balS5fC6XTi008/HZuBD+HAgQMoLy/HpEmTcN1116G2thYAsHXrVng8nojXbMaMGRg/frzymiX7c5O53W786U9/wje+8Y2IjR9T+XWTHTlyBPX19RGvU05ODhYvXhzxOuXm5mLhwoXKMUuWLIHJZMIHH3ygHHP++efDZrMpxyxduhT79u1DW1vbGD2b4XV0dECSJOTm5kbc/uCDD6KgoABnnHEGfvazn0Wkw5P5uW3evBnFxcWYPn06vvWtb6GlpUX5ml5et4aGBrz00ku44YYbBnwtFV63/ud9tc6NNTU1Ed9DPmasPhNTYqM8tTU3N8Pn80W8MABQUlKCvXv3ajSq+Pj9ftx2220455xzMGvWLOX2r33ta5gwYQLKy8uxY8cO3Hnnndi3bx+effZZAEB9fX3U5y1/TUuLFy/G+vXrMX36dNTV1eG+++7Deeedh127dqG+vh42m23ASb+kpEQZdzI/t3DPP/882tvb8fWvf125LZVft3DyWKKNNfx1Ki4ujvi6xWJBfn5+xDETJ04c8D3kr+Xl5SVk/PHo6+vDnXfeiWuvvTZiE7L//M//xPz585Gfn4/33nsPa9asQV1dHR5++GEAyfvcLr30Unzxi1/ExIkTcejQIdx111247LLLUFNTA7PZrJvX7Q9/+AOysrLwxS9+MeL2VHjdop331To3DnaM0+lEb28v0tLSEvGUFIYMRvTglltuwa5du/DOO+9E3H7TTTcp/549ezbKyspw8cUX49ChQ5g8efJYDzMul112mfLvOXPmYPHixZgwYQL++te/JvyNMJaeeOIJXHbZZSgvL1duS+XXzYg8Hg+uueYaCCHw61//OuJrq1evVv49Z84c2Gw2/Md//AfWrl2b1G25v/rVryr/nj17NubMmYPJkydj8+bNuPjiizUcmbqefPJJXHfddXA4HBG3p8LrNth5Xw8MOU1TWFgIs9k8oNq4oaEBpaWlGo0qdqtWrcKLL76IN954AxUVFUMeu3jxYgDAwYMHAQClpaVRn7f8tWSSm5uLadOm4eDBgygtLYXb7UZ7e3vEMeGvWSo8t2PHjuG1117DN7/5zSGPS9XXTR7LUO+t0tJSNDY2Rnzd6/WitbU1JV5LORA5duwYXn311WG3Zl+8eDG8Xi+OHj0KILmfW7hJkyahsLAw4ncwlV83AHj77bexb9++Yd9/QPK9boOd99U6Nw52THZ29phcDBoyGLHZbFiwYAE2bdqk3Ob3+7Fp0yZUV1drOLKhCSGwatUqPPfcc3j99dcHpAyj2b59OwCgrKwMAFBdXY2dO3dGnFTkE+rMmTMTMu6R6urqwqFDh1BWVoYFCxbAarVGvGb79u1DbW2t8pqlwnP7/e9/j+LiYlx++eVDHpeqr9vEiRNRWloa8To5nU588MEHEa9Te3s7tm7dqhzz+uuvw+/3K0FYdXU13nrrLXg8HuWYV199FdOnT9c01S8HIgcOHMBrr72GgoKCYe+zfft2mEwmZYojWZ9bfydOnEBLS0vE72Cqvm6yJ554AgsWLMDcuXOHPTZZXrfhzvtqnRurq6sjvod8zJh9Jo5JmWwSevrpp4Xdbhfr168Xu3fvFjfddJPIzc2NqDZONt/61rdETk6O2Lx5c8Tys56eHiGEEAcPHhT333+/+Oijj8SRI0fECy+8ICZNmiTOP/985XvIS7wuueQSsX37drFx40ZRVFSUFMtfv/vd74rNmzeLI0eOiHfffVcsWbJEFBYWisbGRiFEYPna+PHjxeuvvy4++ugjUV1dLaqrq5X7J/NzEyKwYmv8+PHizjvvjLg91V63zs5O8fHHH4uPP/5YABAPP/yw+Pjjj5UVJQ8++KDIzc0VL7zwgtixY4e46qqroi7tPeOMM8QHH3wg3nnnHTF16tSIJaLt7e2ipKREXH/99WLXrl3i6aefFunp6QlfRjnUc3O73eLKK68UFRUVYvv27RHvQXlFwnvvvSd+8YtfiO3bt4tDhw6JP/3pT6KoqEgsX748qZ9bZ2en+N73vidqamrEkSNHxGuvvSbmz58vpk6dKvr6+pTvkYqvm6yjo0Okp6eLX//61wPun8yv23DnfSHUOTfKS3vvuOMOsWfPHvHoo49yae9Y+dWvfiXGjx8vbDabWLRokXj//fe1HtKQAET98/vf/14IIURtba04//zzRX5+vrDb7WLKlCnijjvuiOhXIYQQR48eFZdddplIS0sThYWF4rvf/a7weDwaPKNIy5YtE2VlZcJms4lx48aJZcuWiYMHDypf7+3tFd/+9rdFXl6eSE9PF1/4whdEXV1dxPdI1ucmhBCvvPKKACD27dsXcXuqvW5vvPFG1N/DFStWCCECy3vvvvtuUVJSIux2u7j44osHPOeWlhZx7bXXiszMTJGdnS1WrlwpOjs7I4755JNPxLnnnivsdrsYN26cePDBBzV9bkeOHBn0PSj3i9m6datYvHixyMnJEQ6HQ5x22mnigQceiPhAT8bn1tPTIy655BJRVFQkrFarmDBhgrjxxhsHXJyl4usm+81vfiPS0tJEe3v7gPsn8+s23HlfCPXOjW+88YaYN2+esNlsYtKkSRGPkWiSEEIkKOlCRERENCxD1owQERFR8mAwQkRERJpiMEJERESaYjBCREREmmIwQkRERJpiMEJERESaYjBCREREmmIwQkRERJpiMEJERESaYjBCREREmmIwQkRERJpiMEJERESa+v8BlDK75sPdWvUAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "prob_chunks = VAD.get_speech_prob_file(audio_file)\n", + "plt.plot(prob_chunks.squeeze())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zrPK_BApAb2q" + }, + "source": [ + "As expected, we have high values for speech regions and low values for the music one.\n", + "\n", + "The `get_speech_prob_file function` is designed to process **long audio recordings**. It computes posterior probabilities on large chunks (e.g., 30 sec), that are read sequentially to avoid storing long signals in memory.\n", + "Each large chunk is, in turn, split into smaller ones (e.g., 10 seconds)\n", + "that are processed in parallel.\n", + "\n", + "You can tune the `large_chunk_size` and `small_chunk_size` based on your memory constraints. If you have enough memory, you can use it to store a larger chunk of the signal (e.g., 5 minutes). This can be done by increasing `large_chunk_size` and will make the VAD (slightly) faster.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sCsvTNUAChzd" + }, + "source": [ + "### 2- Apply a Threshold\n", + "\n", + "Now, we can detect the candidate speech segments by applying a threshold.\n", + "\n", + "To do it, you can use the following function:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 467 + }, + "executionInfo": { + "elapsed": 637, + "status": "ok", + "timestamp": 1708531554324, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "DjD-ooAW9tl8", + "outputId": "2a3c54e9-b40a-4c18-aa2b-b7532b0f05bb" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGdCAYAAADAAnMpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAro0lEQVR4nO3dfXRU1aH+8WcSyIQUwouRCYFAQC1IeQ8lN1pfWnMN1IXa9q5SZAlNFa+WrIumtTS+kKK9hluvyF29KK0Vca3WQtultL9CcWEk11KilEBqqULlzVAhAaRJeE0g2b8/bMZOyExyYsKePfP9rDVrwZlzZvY5O7PnmX32PsdnjDECAACwJMF2AQAAQHwjjAAAAKsIIwAAwCrCCAAAsIowAgAArCKMAAAAqwgjAADAKsIIAACwqpftAnRGS0uLDh8+rH79+snn89kuDgAA6ARjjE6ePKmMjAwlJITv/3AijBw+fFiZmZm2iwEAALrg0KFDGjZsWNjnnQgj/fr1k/TRzqSmplouDQAA6IyGhgZlZmYGv8fDcSKMtJ6aSU1NJYwAAOCYjoZYMIAVAABYRRgBAABWEUYAAIBVhBEAAGAVYQQAAFhFGAEAAFYRRgAAgFWEEQAAYBVhBAAAWOU5jLzxxhuaOXOmMjIy5PP5tG7dug63KS8v15QpU+T3+3XllVdq9erVXSgqAACIRZ7DyOnTpzVx4kStWLGiU+sfOHBAt9xyiz7/+c+rqqpK999/v+6++269+uqrngsLAABij+d708yYMUMzZszo9PorV67UyJEj9dRTT0mSrr76am3ZskVPP/208vPzvb49AACIMT1+o7yKigrl5eWFLMvPz9f9998fdpvGxkY1NjYG/9/Q0NBTxbvI/mOntPaPh9TU3HLJ3hOXxmWfStI3PjdSKUlO3B8SHTjf3KJVWw6opuGc7aLAgtGBfvratOG2ixHVjjac04sVB3WmqblT63/j2pHKHJTSw6VqX4+3yjU1NQoEAiHLAoGAGhoadPbsWfXp0+eibUpLS7VkyZKeLlq7lm36q3779hEr742eN3RgH31p8jDbxUA32LrvQ5X+brftYsCia69Ms/bl6YIXth7Us+X7Or3+zIkZsRtGuqK4uFhFRUXB/zc0NCgzM/OSvHdrgvzCmMG6eki/S/Ke6Hm/+3ON9h8/rdONnfuFgOh3pvGCJGlI/2R9ecpQy6XBpfT8lgM6d75FZ8/zeY6k9TMydcRA5Ywa1OH6gdTkni5SWD0eRtLT01VbWxuyrLa2Vqmpqe32ikiS3++X3+/v6aJFNH1cur469dIEIPS8vUdPaf/x07aLgR4wbGAfPZg/xnYxcAmt2XZI58432S6GM6654jIV3TzadjEi6vHrjOTm5qqsrCxk2aZNm5Sbm9vTbw0AABzgOYycOnVKVVVVqqqqkvTR1N2qqipVV1dL+ugUy9y5c4Pr33vvvdq/f7++853vaPfu3XrmmWf0i1/8Qg888ED37AEAAHCa5zCyfft2TZ48WZMnT5YkFRUVafLkyVq8eLEk6ciRI8FgIkkjR47U+vXrtWnTJk2cOFFPPfWUfvKTnzCtFwAASOrCmJEbb7xRxpiwz7d3ddUbb7xRO3fu9PpWVkTaN7iP2o0d1CVoriNz6fBwbxoAAGAVYSQMn+0CoFv5qNGYRd3GHx9V7o0DB4wwAgAArCKMAAAAqwgjAADAKsJIGy6NPkYXMPw+ZlCVMLTYEbn0GSGMAAAAqwgjYfgcGH2MzqM6Yxh1G4eodC9cOFqEEQAAYBVhBAAAWEUYAQAAVhFG2nBp9DG8o3pjBzMpQHsdmUufEcIIAACwijAShgujj9F5zKaJXVRt/OHz7I0Lx4swAgAArCKMAAAAqwgjbbgz3AcA4hsDWCNz6fgQRhBXXPpwIjLqEogdhBEAAGAVYSQMF0Yfo/N8zLmIWXxW4w9V7o0L7R9hBAAAWEUYAQAAVhFG2jCMigMAJ7h0uXMbXDo6hBHEFcJm7KAmgdhBGEF8iP7xW+giFwbnAYiMMBIGI/QBIDrRPnvjwvEijAAAAKsIIwAAwCrCCADASYxHj8yl40MYQVxx6LOJDjAzCogdhBHEBQfGb6GLXBicByAywkgYTBcEgOhE++yNC0eLMAIAAKwijAAAAKsII20wJg4AEBvc+UIjjCCuEDYBIPoQRhAXfEy5iFlULeA+wkgYNHAAEJ1on71x4XgRRgAAgFWEEQAAYBVhpA3j0OhjAIhnDEiPzKXjQxhBXHHos4kOuNTQAoiMMIK44MD4LXQRlwYH3EcYAQA4hfjpjQuXNiCMAAAAqwgjAADAKsJIGwyKAwA3MPsxMpe+zwgjiCvGpU8nIuKLCIgdhBHEBQfGb6GLqFvAfYSRMFwYfQwA8Yj2OfYQRgAAgFWEEQAAYBVhpA3GNwKAG2ivI3NpkDdhBICT+CICYgdhBHGB4W4AEL0II2Hw5QUAiAUuTD4ijAAAAKu6FEZWrFihrKwsJScnKycnR9u2bYu4/vLlyzV69Gj16dNHmZmZeuCBB3Tu3LkuFRgAAMQWz2Fk7dq1KioqUklJiXbs2KGJEycqPz9fR48ebXf9l156Sd/97ndVUlKid999V88//7zWrl2rhx566BMXvie4NPoYAOIZrXVkLg3y9hxGli1bpvnz56ugoEBjx47VypUrlZKSolWrVrW7/tatW3XttdfqjjvuUFZWlm6++WbNnj27w94UoCe49OFEZNQlEDs8hZGmpiZVVlYqLy/v4xdISFBeXp4qKira3eaaa65RZWVlMHzs379fGzZs0Be/+MWw79PY2KiGhoaQB/BJcPno2EXdAu7r5WXl48ePq7m5WYFAIGR5IBDQ7t27293mjjvu0PHjx/W5z31OxhhduHBB9957b8TTNKWlpVqyZImXonU72jcAiE60z974HJgf2uOzacrLy/XEE0/omWee0Y4dO/Tyyy9r/fr1evzxx8NuU1xcrPr6+uDj0KFDPV1MAABgiaeekbS0NCUmJqq2tjZkeW1trdLT09vd5tFHH9Wdd96pu+++W5I0fvx4nT59Wvfcc48efvhhJSRcnIf8fr/8fr+XonUbzkMDgBsMDXZELh0dTz0jSUlJys7OVllZWXBZS0uLysrKlJub2+42Z86cuShwJCYmSuIPCQAAeOwZkaSioiLNmzdPU6dO1bRp07R8+XKdPn1aBQUFkqS5c+dq6NChKi0tlSTNnDlTy5Yt0+TJk5WTk6O9e/fq0Ucf1cyZM4OhBLhUmLodO6hJIHZ4DiOzZs3SsWPHtHjxYtXU1GjSpEnauHFjcFBrdXV1SE/II488Ip/Pp0ceeUQffPCBLr/8cs2cOVP/+Z//2X17AXQg+odvoauoW8B9nsOIJBUWFqqwsLDd58rLy0PfoFcvlZSUqKSkpCtvZY0Lo48BIB4xm8YbF44X96YBAABWEUba4Dw0ALiB9joyl+aIEEYAAIBVhBHEh3+cM3XplwIia700gAvnwwFERhgBAABWEUbC4NcWAEQnZjt648LRIowAAACrCCNtMaYAAJzAGLDIXLriNGEEAABYRRhBXGg9x+zO7wR0pLUuXTgfDiAywggAALCKMBIGv7YAIDox29EbF44XYQQAAFhFGGnDpdHHABDfaK8jcujwEEYAAIBVhBHEBR/3pok9/6hLnwsnxAFERBgBAABWEUbC4McWAEQnmmdvXLiXD2EEAABYRRhpgzEFAOAG2uvIXDo8hBEAAGAVYQRxofWMKdeRiR2tdRn9Z8MBdIQwAgAArCKMhMXvLQCIRlxbxhsXDhdhBAAAWEUYaYMRBQDgBtrryIxD040IIwAAwCrCCOIC96aJPSZ4bxq75QDwyRFGAACAVYSRMPi1BQDRieY59hBGAACAVYSRNlwafQwA8YzmOjKXDg9hBAAAWEUYQVzwcZY55nz8q4+6BVxHGAEAAFYRRsLgtxYARCkaaE9cuJcPYQQAAFhFGGnDpdHHABDPmP0YmUuHhzACAACsIowgLnx8bxqHfiogIu5NA8QOwggAALCKMBKGC6OPASAe0Tp748LxIowAAACrCCNtMKQAANxAcx2ZS8eHMAIAAKwijCAufDybxm450H3MP373uXA+HEBkhBEAAGAVYSQMfm0BQHRitqM3Lhwuwkgb9OIDgBs47RqZSxd5JIwAAACrCCOIEw70U6JLXOiCBhAZYQRxxZ1OS3TEoR5oAB0gjAAAAKsII2HQ9QsA0Ynm2RsXjhdhpC36fgHACYYTrxG5dHQIIwAAwKouhZEVK1YoKytLycnJysnJ0bZt2yKuX1dXpwULFmjIkCHy+/369Kc/rQ0bNnSpwEBXcNotdvmc6IQGEEkvrxusXbtWRUVFWrlypXJycrR8+XLl5+drz549Gjx48EXrNzU16V//9V81ePBg/epXv9LQoUP1/vvva8CAAd1RfsATzsLFDqoSiB2ew8iyZcs0f/58FRQUSJJWrlyp9evXa9WqVfrud7970fqrVq3SiRMntHXrVvXu3VuSlJWV9clKDQAAYoan0zRNTU2qrKxUXl7exy+QkKC8vDxVVFS0u81vfvMb5ebmasGCBQoEAho3bpyeeOIJNTc3h32fxsZGNTQ0hDwuNbr1ASA60T5748K9fDyFkePHj6u5uVmBQCBkeSAQUE1NTbvb7N+/X7/61a/U3NysDRs26NFHH9VTTz2l73//+2Hfp7S0VP379w8+MjMzvRTzE6HrFwAcQYMdmUPHp8dn07S0tGjw4MH68Y9/rOzsbM2aNUsPP/ywVq5cGXab4uJi1dfXBx+HDh3q6WICAABLPI0ZSUtLU2Jiompra0OW19bWKj09vd1thgwZot69eysxMTG47Oqrr1ZNTY2ampqUlJR00TZ+v19+v99L0YCIor+TEl3lQA80gA546hlJSkpSdna2ysrKgstaWlpUVlam3Nzcdre59tprtXfvXrW0tASX/fWvf9WQIUPaDSJAT+IiSTGEqVFAzPB8mqaoqEjPPfecXnzxRb377ru67777dPr06eDsmrlz56q4uDi4/n333acTJ05o4cKF+utf/6r169friSee0IIFC7pvLwAAgLM8T+2dNWuWjh07psWLF6umpkaTJk3Sxo0bg4Naq6urlZDwccbJzMzUq6++qgceeEATJkzQ0KFDtXDhQi1atKj79qIHcCElAIhOtM/euHAq03MYkaTCwkIVFha2+1x5eflFy3Jzc/Xmm2925a0uOXp+AcANNNeRuXRamnvTAAAAqwgjiAsudFOia6hbwH2EEcQVTsPFDqoSiB2EEQAAYBVhJBy6fgEgKnFqzhsXDhdhpA2XRh8DQDzjtGtkLh0fwggAALCKMIK4wEWSYhd1C7iPMIK44lCvJTrgUhc0gMgIIwAAwCrCSBh0/AIAYoID048II23Q9QsAbmD2Y2QufZ8RRgAAgFWEEcSFYC+lSz8VEJFprcvo74EG0AHCCAAAsIowAgAArCKMhOFzYPQxAMQj2mdvXDhahJE2GFIAAG6gvY7MpdlGhBEAAGAVYQRxITiZxmop0J1a69KFLmgAkRFGAACAVYQRAABgFWEkDLp+ASA60T5748LkI8JIG4wpAAA30F5H5tJsI8IIAACwijCCuNB6kSSXfikgsuCtaVzogwYQEWEEAABYRRgJgx9bABCdaJ+98Tkw5Jcw0oahHx8AnEB7HZlLR4cwAgAArCKMAHBa9HdAA+gIYQRxxaW7WCIyahKIHYQRAABgFWEkDBdGHwNAPGI2jTcuHC/CCADASZyqi8ylyUaEEQAAYBVhBHHBhW5KdA11C7iPMIK44lK3JSLjgldA7CCMAAAAqwgjYdD1CwDRidmO3rhwtAgjbdDzCwCOoL3ugDsHiDACAACsIowgLtCtG7uoWcB9hBHEFXc6LQEgfhBGAACAVYSRMOj6BYDoxGxHb1w4XoSRNrjFPAC4gfY6MpdmhxJGAACAVYQRxAUXuinRNT4qF3AeYQRxxaVuS0RGXQKxgzACAACsIoyEQ88vAEQlmmdvXLjoI2GkDbp+AcANtNeRuXR4CCMAAMAqwgjiQvR3UqKrqFvAfYQRxBUukhQ7qEsgdnQpjKxYsUJZWVlKTk5WTk6Otm3b1qnt1qxZI5/Pp9tvv70rbwsAAGKQ5zCydu1aFRUVqaSkRDt27NDEiROVn5+vo0ePRtzu4MGD+va3v63rrruuy4W9lFwYfQwAcYkL3XnjwOHyHEaWLVum+fPnq6CgQGPHjtXKlSuVkpKiVatWhd2mublZc+bM0ZIlSzRq1KhPVOCeRscvALiB2TSRGYcOkKcw0tTUpMrKSuXl5X38AgkJysvLU0VFRdjtHnvsMQ0ePFh33XVXp96nsbFRDQ0NIQ8AABCbPIWR48ePq7m5WYFAIGR5IBBQTU1Nu9ts2bJFzz//vJ577rlOv09paan69+8ffGRmZnopJnARenVjGHULOK9HZ9OcPHlSd955p5577jmlpaV1ervi4mLV19cHH4cOHerBUiKuuNNriQ441AMNoAO9vKyclpamxMRE1dbWhiyvra1Venr6Revv27dPBw8e1MyZM4PLWlpaPnrjXr20Z88eXXHFFRdt5/f75ff7vRQNAAA4ylPPSFJSkrKzs1VWVhZc1tLSorKyMuXm5l60/pgxY/TnP/9ZVVVVwcett96qz3/+86qqqorq0y906wNAdKJ59saF4+WpZ0SSioqKNG/ePE2dOlXTpk3T8uXLdfr0aRUUFEiS5s6dq6FDh6q0tFTJyckaN25cyPYDBgyQpIuWRwuXRh8DQDyjtY7MpePjOYzMmjVLx44d0+LFi1VTU6NJkyZp48aNwUGt1dXVSkjgwq4AAKBzPIcRSSosLFRhYWG7z5WXl0fcdvXq1V15S+AT8XHeLWZxgULAfXRhIK641G2JyKhLIHYQRgAAgFWEkTDo+AWA6MRZV29cOE1NGGmDrl8AcAOzHyNz6fAQRgAAgFWEEcSF6O+kRFc50AMNoAOEEcQVunVjB1UJxA7CCAAAsIow0tY/fm25MPoYAOJRa+tM51hkrcfHhW8zwggAALCKMIL44MJPA3QJVQu4jzACAACsIowgrjADI3YYRgwAMYMwAgAArCKMtBEcfcyJaACISq2zHenpjKz1ukoufJ8RRgAAgFWEEcQFH3MuYpYLv/oAREYYAQAAVhFGEFc4xRw7GC8AxA7CCAAAsIow0kZw9LHlcgAA2vdx+0z3WGe4MK6KMAIAAKwijCAuuPDLAF3DTCnAfYQRAABgFWEEcYUZGAAQfQgjAADAKsJIG9ybBgCiW2v7TE9nZK3Hx4VxVYQRAABgFWEEcSH6fxegq+jFBNxHGAEAAFYRRhBXDFdsjBmGAQNAzCCMAAAAqwgjbXz8Y4sT0QAQjVpnh9A3FllrT7AL46oIIwAAwCrCCOKCC78M0DXULeA+wggAALCKMIK4wgSM2EFdArGDMAIAAKwijLTh0uhjAIhL3JumU1w6PoQRAABgFWEEccGFu1aiq6hbwHWEEQAAYBVhBICTHDodDqADhBEAAGAVYaSN1tHHnIUGgOjU2j5zF+7Igt9nDkwPJYwAAACrCCOICw78MEAXUbeA+wgjAADAKsII4opx6ZKEiIiqBGIHYQQAAFhFGGnDpdHHABCPfNybplOC91qzXI7OIIwAAACrCCOICy78MkDXULeA+wgjAADAKsII4gqnmGMHV98EYgdhBADgJOJoZC4N8O1SGFmxYoWysrKUnJysnJwcbdu2Ley6zz33nK677joNHDhQAwcOVF5eXsT1owXnoQEgOvlooT1xYXKo5zCydu1aFRUVqaSkRDt27NDEiROVn5+vo0ePtrt+eXm5Zs+erc2bN6uiokKZmZm6+eab9cEHH3ziwgOd5sKnEV1C1QLu8xxGli1bpvnz56ugoEBjx47VypUrlZKSolWrVrW7/s9+9jN985vf1KRJkzRmzBj95Cc/UUtLi8rKyj5x4QEAgPs8hZGmpiZVVlYqLy/v4xdISFBeXp4qKio69RpnzpzR+fPnNWjQoLDrNDY2qqGhIeQBAABik6cwcvz4cTU3NysQCIQsDwQCqqmp6dRrLFq0SBkZGSGBpq3S0lL1798/+MjMzPRSTCAslwZ0ITLqEogdl3Q2zdKlS7VmzRq98sorSk5ODrtecXGx6uvrg49Dhw5dsjJyIzUAcAPtdWQuHZ1eXlZOS0tTYmKiamtrQ5bX1tYqPT094rb//d//raVLl+q1117ThAkTIq7r9/vl9/u9FK3bMSgOAKIT7bM3Lsw+8tQzkpSUpOzs7JDBp62DUXNzc8Nu94Mf/ECPP/64Nm7cqKlTp3a9tEAXRf9HEV3lQkMLIDJPPSOSVFRUpHnz5mnq1KmaNm2ali9frtOnT6ugoECSNHfuXA0dOlSlpaWSpP/6r//S4sWL9dJLLykrKys4tqRv377q27dvN+4KAABwkecwMmvWLB07dkyLFy9WTU2NJk2apI0bNwYHtVZXVysh4eMOl2effVZNTU36t3/7t5DXKSkp0fe+971PVnoAAOA8z2FEkgoLC1VYWNjuc+Xl5SH/P3jwYFfeAugR3M8kdlCTQOzg3jRt0MABAGKCQ19ohJEwGBQHANGJ2TTeuHC8CCOICy58GNE11C3gPsIIAACwijACAACsIowgrnD16BhCZQIxgzDSBu0bALiB9joyly5lQBgJg0FxABCdmO3ojQtHizCCuEDjFbuoWcB9hBEAAGAVYQQAAFhFGEFccWc4FzpCXQKxgzDShkujjwEgntFeR+bSbCPCCADAKcx29MaF40UYQVxw4cOIrvFRuYDzCCMAAMAqwggAALCKMIK44tKALkRGXQKxgzDSBg0cALiB9joylw4PYSQMxsQBAGJD9H+hEUYQF6L/owgA8YswAgAArCKMAAAAqwgjiDMuDelCJFwKHIgdhJE2aN4AwA3MponMOHSACCNh+BjyCABRiVsAeOPC4SKMIC648GFE11C3gPsIIwAAwCrCCAAAsIowgrji0HgudIC6BGIHYaQNGjgAcAPNdWQuHR/CSBgMigOA6ETz7I0Lx4swgrjAVMDYxTR8wH2EEQAAYBVhBAAAWEUYuYhLQ37gFQOUYwdVCZcud26DS4eHMAIAAKwijITBeEfADXxW4w917o0LA/gJIwAAwCrCCAAAsIowAgAArCKMtOHS6GN4Z5iDETP4rII/gchcOj6EEQAAYBVhJAwuMR1bHBhMji6iauMPde6NC8eLMAIAAKwijAAAAKsIIwAAwCrCSBsujT6Gd8zAiB3MjAJ/Ah1wqMEjjAAAAKsII2Ew+yK2MDsqdvFZjT8u3GslmrhwuAgjAADAKsIIAACwijACAACsIoy0YRwafQzvqN0YQmXGPWZURebS0SGMAAAAqwgjYTgw+BgeuDCaHF3DzIr4Q41748JHpEthZMWKFcrKylJycrJycnK0bdu2iOv/8pe/1JgxY5ScnKzx48drw4YNXSosAACIPZ7DyNq1a1VUVKSSkhLt2LFDEydOVH5+vo4ePdru+lu3btXs2bN11113aefOnbr99tt1++23a9euXZ+48AAAwH2ew8iyZcs0f/58FRQUaOzYsVq5cqVSUlK0atWqdtf/n//5H02fPl0PPvigrr76aj3++OOaMmWK/vd///cTFx4AALivl5eVm5qaVFlZqeLi4uCyhIQE5eXlqaKiot1tKioqVFRUFLIsPz9f69atC/s+jY2NamxsDP6/oaHBSzE77fktB/S3v58JWXa6qblH3gvRoepQnZb8v7/YLga6wR8PnrBdBFj2mz8d1u6ak7aLEbUO1521XYRO8xRGjh8/rubmZgUCgZDlgUBAu3fvbnebmpqadtevqakJ+z6lpaVasmSJl6J1yfq3D2tHdV27z33K7+nQIMr1S/6oPvcePaW9R09ZLg26U18+q3Gn9fP8h70f6g97P7RcmujX19/bdhE6FJWf4uLi4pDelIaGBmVmZnb7+3wle5hyr7jsouWj01OVMaBPt78f7Llt0lCdaWxW3dkm20VBN/qUv5dmf3a47WLgEvvWzaOVlfYpnW9usV2UqDekfx9NHTHQdjE65CmMpKWlKTExUbW1tSHLa2trlZ6e3u426enpntaXJL/fL7/f76VoXTInZ0SPvweiQ19/L82/fpTtYgDoBpmDUnR/3qdtFwPdyNMA1qSkJGVnZ6usrCy4rKWlRWVlZcrNzW13m9zc3JD1JWnTpk1h1wcAAPHF82maoqIizZs3T1OnTtW0adO0fPlynT59WgUFBZKkuXPnaujQoSotLZUkLVy4UDfccIOeeuop3XLLLVqzZo22b9+uH//4x927JwAAwEmew8isWbN07NgxLV68WDU1NZo0aZI2btwYHKRaXV2thISPO1yuueYavfTSS3rkkUf00EMP6aqrrtK6des0bty47tsLAADgLJ9x4M5wDQ0N6t+/v+rr65Wammq7OAAAoBM6+/3NvWkAAIBVhBEAAGAVYQQAAFhFGAEAAFYRRgAAgFWEEQAAYBVhBAAAWEUYAQAAVhFGAACAVZ4vB29D60ViGxoaLJcEAAB0Vuv3dkcXe3cijJw8eVKSlJmZabkkAADAq5MnT6p///5hn3fi3jQtLS06fPiw+vXrJ5/P122v29DQoMzMTB06dCjm7nkTy/smxfb+sW9uYt/cxL71LGOMTp48qYyMjJCb6LblRM9IQkKChg0b1mOvn5qaGnN/hK1ied+k2N4/9s1N7Jub2LeeE6lHpBUDWAEAgFWEEQAAYFVchxG/36+SkhL5/X7bRel2sbxvUmzvH/vmJvbNTexbdHBiACsAAIhdcd0zAgAA7COMAAAAqwgjAADAKsIIAACwKq7DyIoVK5SVlaXk5GTl5ORo27ZttosUUWlpqT772c+qX79+Gjx4sG6//Xbt2bMnZJ0bb7xRPp8v5HHvvfeGrFNdXa1bbrlFKSkpGjx4sB588EFduHDhUu5Ku773ve9dVPYxY8YEnz937pwWLFigyy67TH379tVXvvIV1dbWhrxGtO5bVlbWRfvm8/m0YMECSW7V2xtvvKGZM2cqIyNDPp9P69atC3neGKPFixdryJAh6tOnj/Ly8vTee++FrHPixAnNmTNHqampGjBggO666y6dOnUqZJ23335b1113nZKTk5WZmakf/OAHPb1rEfft/PnzWrRokcaPH69PfepTysjI0Ny5c3X48OGQ12ivrpcuXRrV+yZJX//61y8q9/Tp00PWcbHeJLX72fP5fHryySeD60RrvXWm3e+utrG8vFxTpkyR3+/XlVdeqdWrV/f07n3MxKk1a9aYpKQks2rVKvOXv/zFzJ8/3wwYMMDU1tbaLlpY+fn55oUXXjC7du0yVVVV5otf/KIZPny4OXXqVHCdG264wcyfP98cOXIk+Kivrw8+f+HCBTNu3DiTl5dndu7caTZs2GDS0tJMcXGxjV0KUVJSYj7zmc+ElP3YsWPB5++9916TmZlpysrKzPbt282//Mu/mGuuuSb4fDTv29GjR0P2a9OmTUaS2bx5szHGrXrbsGGDefjhh83LL79sJJlXXnkl5PmlS5ea/v37m3Xr1pk//elP5tZbbzUjR440Z8+eDa4zffp0M3HiRPPmm2+a3//+9+bKK680s2fPDj5fX19vAoGAmTNnjtm1a5f5+c9/bvr06WN+9KMfWdu3uro6k5eXZ9auXWt2795tKioqzLRp00x2dnbIa4wYMcI89thjIXX5z5/RaNw3Y4yZN2+emT59eki5T5w4EbKOi/VmjAnZpyNHjphVq1YZn89n9u3bF1wnWuutM+1+d7SN+/fvNykpKaaoqMi888475oc//KFJTEw0Gzdu7NH9axW3YWTatGlmwYIFwf83NzebjIwMU1paarFU3hw9etRIMv/3f/8XXHbDDTeYhQsXht1mw4YNJiEhwdTU1ASXPfvssyY1NdU0Njb2ZHE7VFJSYiZOnNjuc3V1daZ3797ml7/8ZXDZu+++aySZiooKY0x071tbCxcuNFdccYVpaWkxxrhbb20b/paWFpOenm6efPLJ4LK6ujrj9/vNz3/+c2OMMe+8846RZP74xz8G1/nd735nfD6f+eCDD4wxxjzzzDNm4MCBIfu2aNEiM3r06B7eo4+196XW1rZt24wk8/777weXjRgxwjz99NNht4nWfZs3b5657bbbwm4TS/V22223mS984Qshy1yoN2Mubve7q238zne+Yz7zmc+EvNesWbNMfn5+T++SMcaYuDxN09TUpMrKSuXl5QWXJSQkKC8vTxUVFRZL5k19fb0kadCgQSHLf/aznyktLU3jxo1TcXGxzpw5E3yuoqJC48ePVyAQCC7Lz89XQ0OD/vKXv1yagkfw3nvvKSMjQ6NGjdKcOXNUXV0tSaqsrNT58+dD6mzMmDEaPnx4sM6ifd9aNTU16ac//am+8Y1vhNz40eV6a3XgwAHV1NSE1FP//v2Vk5MTUk8DBgzQ1KlTg+vk5eUpISFBb731VnCd66+/XklJScF18vPztWfPHv3973+/RHvTsfr6evl8Pg0YMCBk+dKlS3XZZZdp8uTJevLJJ0O6w6N538rLyzV48GCNHj1a9913nz788MPgc7FSb7W1tVq/fr3uuuuui55zod7atvvd1TZWVFSEvEbrOpfqO9GJG+V1t+PHj6u5uTmkYiQpEAho9+7dlkrlTUtLi+6//35de+21GjduXHD5HXfcoREjRigjI0Nvv/22Fi1apD179ujll1+WJNXU1LS7363P2ZSTk6PVq1dr9OjROnLkiJYsWaLrrrtOu3btUk1NjZKSki5q9AOBQLDc0bxv/2zdunWqq6vT17/+9eAyl+vtn7WWpb2y/nM9DR48OOT5Xr16adCgQSHrjBw58qLXaH1u4MCBPVJ+L86dO6dFixZp9uzZITch+4//+A9NmTJFgwYN0tatW1VcXKwjR45o2bJlkqJ336ZPn64vf/nLGjlypPbt26eHHnpIM2bMUEVFhRITE2Om3l588UX169dPX/7yl0OWu1Bv7bX73dU2hlunoaFBZ8+eVZ8+fXpil4LiMozEggULFmjXrl3asmVLyPJ77rkn+O/x48dryJAhuummm7Rv3z5dccUVl7qYnsyYMSP47wkTJignJ0cjRozQL37xix7/IFxKzz//vGbMmKGMjIzgMpfrLR6dP39eX/3qV2WM0bPPPhvyXFFRUfDfEyZMUFJSkv793/9dpaWlUX1Z7q997WvBf48fP14TJkzQFVdcofLyct10000WS9a9Vq1apTlz5ig5OTlkuQv1Fq7djwVxeZomLS1NiYmJF402rq2tVXp6uqVSdV5hYaF++9vfavPmzRo2bFjEdXNyciRJe/fulSSlp6e3u9+tz0WTAQMG6NOf/rT27t2r9PR0NTU1qa6uLmSdf64zF/bt/fff12uvvaa777474nqu1ltrWSJ9ttLT03X06NGQ5y9cuKATJ044UZetQeT999/Xpk2bOrw1e05Oji5cuKCDBw9Kiu59+2ejRo1SWlpayN+gy/UmSb///e+1Z8+eDj9/UvTVW7h2v7vaxnDrpKamXpIfg3EZRpKSkpSdna2ysrLgspaWFpWVlSk3N9diySIzxqiwsFCvvPKKXn/99Yu6DNtTVVUlSRoyZIgkKTc3V3/+859DGpXWBnXs2LE9Uu6uOnXqlPbt26chQ4YoOztbvXv3DqmzPXv2qLq6OlhnLuzbCy+8oMGDB+uWW26JuJ6r9TZy5Eilp6eH1FNDQ4PeeuutkHqqq6tTZWVlcJ3XX39dLS0twRCWm5urN954Q+fPnw+us2nTJo0ePdpqV39rEHnvvff02muv6bLLLutwm6qqKiUkJARPcUTrvrX1t7/9TR9++GHI36Cr9dbq+eefV3Z2tiZOnNjhutFSbx21+93VNubm5oa8Rus6l+w78ZIMk41Ca9asMX6/36xevdq888475p577jEDBgwIGW0cbe677z7Tv39/U15eHjL97MyZM8YYY/bu3Wsee+wxs337dnPgwAHz61//2owaNcpcf/31wddoneJ18803m6qqKrNx40Zz+eWXR8X0129961umvLzcHDhwwPzhD38weXl5Ji0tzRw9etQY89H0teHDh5vXX3/dbN++3eTm5prc3Nzg9tG8b8Z8NGNr+PDhZtGiRSHLXau3kydPmp07d5qdO3caSWbZsmVm586dwRklS5cuNQMGDDC//vWvzdtvv21uu+22dqf2Tp482bz11ltmy5Yt5qqrrgqZIlpXV2cCgYC58847za5du8yaNWtMSkpKj0+jjLRvTU1N5tZbbzXDhg0zVVVVIZ/B1hkJW7duNU8//bSpqqoy+/btMz/96U/N5ZdfbubOnRvV+3by5Enz7W9/21RUVJgDBw6Y1157zUyZMsVcddVV5ty5c8HXcLHeWtXX15uUlBTz7LPPXrR9NNdbR+2+Md3TNrZO7X3wwQfNu+++a1asWMHU3kvlhz/8oRk+fLhJSkoy06ZNM2+++abtIkUkqd3HCy+8YIwxprq62lx//fVm0KBBxu/3myuvvNI8+OCDIderMMaYgwcPmhkzZpg+ffqYtLQ0861vfcucP3/ewh6FmjVrlhkyZIhJSkoyQ4cONbNmzTJ79+4NPn/27FnzzW9+0wwcONCkpKSYL33pS+bIkSMhrxGt+2aMMa+++qqRZPbs2ROy3LV627x5c7t/h/PmzTPGfDS999FHHzWBQMD4/X5z0003XbTPH374oZk9e7bp27evSU1NNQUFBebkyZMh6/zpT38yn/vc54zf7zdDhw41S5cutbpvBw4cCPsZbL1eTGVlpcnJyTH9+/c3ycnJ5uqrrzZPPPFEyBd6NO7bmTNnzM0332wuv/xy07t3bzNixAgzf/78i36cuVhvrX70ox+ZPn36mLq6uou2j+Z666jdN6b72sbNmzebSZMmmaSkJDNq1KiQ9+hpPmOM6aFOFwAAgA7F5ZgRAAAQPQgjAADAKsIIAACwijACAACsIowAAACrCCMAAMAqwggAALCKMAIAAKwijAAAAKsIIwAAwCrCCAAAsIowAgAArPr/J9kM6BhT2eQAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "prob_th = VAD.apply_threshold(prob_chunks, activation_th=0.5, deactivation_th=0.25).float()\n", + "plt.plot(prob_th.squeeze())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MuIWIG7EDY2V" + }, + "source": [ + "Instead of applying a single threshold (e.g., 0.5), we allow users to set two different thresholds, one to decide when to start a speech segment (`activation_th`) and one to detect when to stop it (`deactivation_th`).\n", + "\n", + "According to our experience, it makes sense to set `activation_th` higher than than `deactivation_th` (e.g, `aactivation_th=0.5`, `deactivation_th=0.25`).\n", + "\n", + "Users, however, can play with these hyperparameters to make the VAD more or less selective.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JNcVQj91ErqH" + }, + "source": [ + "### 3- Get the Boundaries\n", + "Now, we can derive the boundaries of the speech segments from the thresholded posteriors:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 88, + "status": "ok", + "timestamp": 1708531554326, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "wo2_ts-f96qn", + "outputId": "17d3ef0f-e94a-47bc-983e-a9c77146012f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "segment_001 0.00 0.23 NON_SPEECH\n", + "segment_002 0.23 5.58 SPEECH\n", + "segment_003 5.58 10.90 NON_SPEECH\n", + "segment_004 10.90 16.63 SPEECH\n", + "segment_005 16.63 20.00 NON_SPEECH\n", + "segment_006 20.00 20.43 SPEECH\n", + "segment_007 20.43 20.45 NON_SPEECH\n" + ] + } + ], + "source": [ + "boundaries = VAD.get_boundaries(prob_th)\n", + "VAD.save_boundaries(boundaries, audio_file=audio_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_o9FoIrfFVcu" + }, + "source": [ + "The `boundaries` tensors contain the beginning and end second of each speech segment.\n", + "The method `save_boundaries` can be used to plot the boundaries in a human-readable format and/or to save them on a file (with the `save_path` argument)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CnUBGT-fGEAj" + }, + "source": [ + "### 4- Energy-based VAD (optional)\n", + "\n", + "The trained neural VAD tends to detect large speech segments, where smaller speech segments that are close to each other are merged.\n", + "\n", + "If the users want more resolution, one possible approach is to apply an **energy-based VAD** within the detected speech segments. The energy-based VAD processes the speech segments with sliding windows that compute the energy within each chunk. The energy profile is normalized such that we have 0.5 and +-0.5 of standard deviation. We then apply a threshold and split the speech original segment into smaller ones.\n", + "\n", + "This is done in the following way:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 67, + "status": "ok", + "timestamp": 1708531554327, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "phzPKnw1H-Dp", + "outputId": "06dd4176-c33e-4a45-9a28-40bd2101c605" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "segment_001 0.00 1.66 NON_SPEECH\n", + "segment_002 1.66 2.13 SPEECH\n", + "segment_003 2.13 2.70 NON_SPEECH\n", + "segment_004 2.70 4.03 SPEECH\n", + "segment_005 4.03 4.27 NON_SPEECH\n", + "segment_006 4.27 5.26 SPEECH\n", + "segment_007 5.26 11.37 NON_SPEECH\n", + "segment_008 11.37 11.94 SPEECH\n", + "segment_009 11.94 12.63 NON_SPEECH\n", + "segment_010 12.63 13.12 SPEECH\n", + "segment_011 13.12 13.26 NON_SPEECH\n", + "segment_012 13.26 14.28 SPEECH\n", + "segment_013 14.28 14.99 NON_SPEECH\n", + "segment_014 14.99 15.67 SPEECH\n", + "segment_015 15.67 15.79 NON_SPEECH\n", + "segment_016 15.79 16.06 SPEECH\n", + "segment_017 16.06 16.30 NON_SPEECH\n", + "segment_018 16.30 16.42 SPEECH\n", + "segment_019 16.42 20.02 NON_SPEECH\n", + "segment_020 20.02 20.10 SPEECH\n", + "segment_021 20.10 20.18 NON_SPEECH\n", + "segment_022 20.18 20.18 SPEECH\n", + "segment_023 20.18 20.22 NON_SPEECH\n", + "segment_024 20.22 20.22 SPEECH\n", + "segment_025 20.22 20.29 NON_SPEECH\n", + "segment_026 20.29 20.35 SPEECH\n", + "segment_027 20.35 20.37 NON_SPEECH\n", + "segment_028 20.37 20.37 SPEECH\n", + "segment_029 20.37 20.42 NON_SPEECH\n", + "segment_030 20.42 20.42 SPEECH\n", + "segment_031 20.42 20.45 NON_SPEECH\n" + ] + } + ], + "source": [ + "boundaries = VAD.energy_VAD(audio_file,boundaries, activation_th=0.8, deactivation_th=0.0)\n", + "VAD.save_boundaries(boundaries, audio_file=audio_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PyTD2EhwJYwa" + }, + "source": [ + "Users can have a more or less selective VAD by playing with `activation_th` and `deactivation_th`.\n", + "\n", + "Differently from the neural VAD, the energy VAD tends to oversegment the input. We improve that by post-processing the boundaries, as will be shown below." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xrK19_VBLQ1W" + }, + "source": [ + "### 5- Merge close segments\n", + "The users might need to select the desired resolution for the VAD (the optimal level of granularity might depend on the task).\n", + "\n", + "For instance, it could make sense to **merge segments** that are too close to each other.\n", + "\n", + "This is done with the following method:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 52, + "status": "ok", + "timestamp": 1708531554328, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "EaWe-zNUJabo", + "outputId": "1710fa91-1510-4e21-a6c8-5b7d139a867b" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "segment_001 0.00 1.66 NON_SPEECH\n", + "segment_002 1.66 2.13 SPEECH\n", + "segment_003 2.13 2.70 NON_SPEECH\n", + "segment_004 2.70 5.26 SPEECH\n", + "segment_005 5.26 11.37 NON_SPEECH\n", + "segment_006 11.37 11.94 SPEECH\n", + "segment_007 11.94 12.63 NON_SPEECH\n", + "segment_008 12.63 14.28 SPEECH\n", + "segment_009 14.28 14.99 NON_SPEECH\n", + "segment_010 14.99 16.42 SPEECH\n", + "segment_011 16.42 20.02 NON_SPEECH\n", + "segment_012 20.02 20.42 SPEECH\n", + "segment_013 20.42 20.45 NON_SPEECH\n" + ] + } + ], + "source": [ + "# 5- Merge segments that are too close\n", + "boundaries = VAD.merge_close_segments(boundaries, close_th=0.250)\n", + "VAD.save_boundaries(boundaries, audio_file=audio_file)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EdV4YlP3Mo58" + }, + "source": [ + "In this case, we merged segments that are closer than 250 ms. Users can play with `close_th` and tune it according to their needs." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "egnlueSCM37M" + }, + "source": [ + "### 6- Remove short segments\n", + "It could also make sense to remove short isolated segments that might be misclassified as speech:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 41, + "status": "ok", + "timestamp": 1708531554329, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "qXt6TyP8M3c6", + "outputId": "616583a0-928b-4dcf-fc80-c5ae11fa4b3e" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "segment_001 0.00 1.66 NON_SPEECH\n", + "segment_002 1.66 2.13 SPEECH\n", + "segment_003 2.13 2.70 NON_SPEECH\n", + "segment_004 2.70 5.26 SPEECH\n", + "segment_005 5.26 11.37 NON_SPEECH\n", + "segment_006 11.37 11.94 SPEECH\n", + "segment_007 11.94 12.63 NON_SPEECH\n", + "segment_008 12.63 14.28 SPEECH\n", + "segment_009 14.28 14.99 NON_SPEECH\n", + "segment_010 14.99 16.42 SPEECH\n", + "segment_011 16.42 20.02 NON_SPEECH\n", + "segment_012 20.02 20.42 SPEECH\n", + "segment_013 20.42 20.45 NON_SPEECH\n" + ] + } + ], + "source": [ + "# 6- Remove segments that are too short\n", + "boundaries = VAD.remove_short_segments(boundaries, len_th=0.250)\n", + "VAD.save_boundaries(boundaries, audio_file=audio_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aDNix4y8NZNa" + }, + "source": [ + "In this case, we remove segments that are shorter than 250 ms. Note that we first merged segments that are close and only after we removed the short ones. This helps to remove short \"isolated\" segments only." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a6SZ2KWkOZ8f" + }, + "source": [ + "### 7- Double check speech segments (optional)\n", + "\n", + "At this point, we can take the post-processed speech segments and double-check if they really contain speech. This is done in this way:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 474, + "status": "ok", + "timestamp": 1708531554776, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "dq0P8ua0Mlpn", + "outputId": "a5b16584-6c84-4e5d-facc-3fff3cea04c4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "segment_001 0.00 1.66 NON_SPEECH\n", + "segment_002 1.66 2.13 SPEECH\n", + "segment_003 2.13 2.70 NON_SPEECH\n", + "segment_004 2.70 5.26 SPEECH\n", + "segment_005 5.26 11.37 NON_SPEECH\n", + "segment_006 11.37 11.94 SPEECH\n", + "segment_007 11.94 12.63 NON_SPEECH\n", + "segment_008 12.63 14.28 SPEECH\n", + "segment_009 14.28 14.99 NON_SPEECH\n", + "segment_010 14.99 16.42 SPEECH\n", + "segment_011 16.42 20.45 NON_SPEECH\n" + ] + } + ], + "source": [ + "# 7- Double-check speech segments (optional).\n", + "boundaries = VAD.double_check_speech_segments(boundaries, audio_file, speech_th=0.5)\n", + "VAD.save_boundaries(boundaries, audio_file=audio_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wtXQEO-rOv6-" + }, + "source": [ + "The method uses one more time the neural VAD on the detected speech segments. If the **average posterior probability within the segment** is larger than `speech_th` (in this case, `speech_th=0.5`), the speech segment is confirmed. Otherwise, it is removed.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QWkfOk8-PUYZ" + }, + "source": [ + "## Visualization\n", + "\n", + "We also implemented some utilities to help users visualizing the output of the VAD:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "executionInfo": { + "elapsed": 11, + "status": "ok", + "timestamp": 1708531554778, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "92s0HEapPfpt" + }, + "outputs": [], + "source": [ + "upsampled_boundaries = VAD.upsample_boundaries(boundaries, audio_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xMtwEuBKQDBY" + }, + "source": [ + "This function creates a \"VAD signal\" with the **same dimensionality** as the original audio recording. This way, one can plot them jointly:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 467 + }, + "executionInfo": { + "elapsed": 33160, + "status": "ok", + "timestamp": 1708531587929, + "user": { + "displayName": "adel moumen", + "userId": "01620107593621714109" + }, + "user_tz": -60 + }, + "id": "SSOeT58mQM1m", + "outputId": "f0fb1160-021d-488c-b819-ed8e715f3612" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAGdCAYAAADaPpOnAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABe/klEQVR4nO3deXxM5/4H8M9km1iyiMhGiNgSRRBELKWk1qvVVdWvliq3rdzbSttbqqhqS1tUF6VVqret0tVtq40SQhFCQosS+y6J0Oxkm/P7IzIyyewzZ+acM5/365UXmZzlOTPnnPmeZ/k+KkEQBBARERHJhJuzC0BERERkCQYvREREJCsMXoiIiEhWGLwQERGRrDB4ISIiIllh8EJERESywuCFiIiIZIXBCxEREcmKh7MLYG8ajQaXL1+Gj48PVCqVs4tDREREZhAEAUVFRQgLC4Obm/G6FcUFL5cvX0Z4eLizi0FERERWuHDhAlq0aGF0GcUFLz4+PgCqD97X19fJpSEiIiJzFBYWIjw8XPs9bozigpeapiJfX18GL0RERDJjTpcPdtglIiIiWWHwQkRERLLC4IWIiIhkhcELERERyQqDFyIiIpIVBi9EREQkKwxeiIiISFYYvBAREZGsMHghIiIiWRE1eNmxYwdGjRqFsLAwqFQqbNiwweQ6qamp6N69O9RqNdq2bYs1a9aIWUQiIiKSGVGDl5KSEsTExGDZsmVmLX/mzBmMHDkSd911Fw4ePIhnn30WTzzxBDZt2iRmMYmIiEhGRJ3baPjw4Rg+fLjZy69YsQKtW7fG4sWLAQDR0dHYuXMn3nnnHQwdOlSsYhIREZGMSGpixrS0NCQkJOi8NnToUDz77LMG1ykrK0NZWZn298LCQrGKJz03C4E9y4Ebf5teVqUCokcBrfqYt+2Sa8DeFUBZkW1llLrofwAR/ZxdCpKSyjJgz4dAUY5l6zXrAPSYZPn+irKB9I+B8lLL1xWLmzvQZQwQ2sXydYuvAukfAWXF9i+XudrdDbQd7Lz9k+gkFbxkZ2cjODhY57Xg4GAUFhbixo0baNCgQb11FixYgHnz5jmqiNJy5Hsg9Q3zlz/xG/CvDPOWPfBfYMdb1pVLTo4nA88cdHYpSEpObgG2vGLduq3vBJq2sWydvSuAne9Ytz8xZR8CJvxo+Xr7VwE73rZ/eSxx6GvgP6edWwYSlaSCF2vMnDkTSUlJ2t8LCwsRHh7uxBI5UHlJ9b/NooCokYaXK84BDnxxe3lLth3aVZlPMMW5wIHPLXtPyDXUnBP+LYHOD5m3zp4VQEWJdedTzTrhvYGIvpavb2/XTgF/bbD+2ii/VePSPBaIHGivUpnnZiGwbyWvaxcgqeAlJCQEOTm6VbU5OTnw9fXVW+sCAGq1Gmq12hHFk66QLsDgOYb/fuXP6uDFGuFxxrctV9mHq4MXIkMC2ph/7h9cWx282KL1ncCgWbZtwx6yfq0OXmzVqo/j7x3556uDF1I8SeV5iY+PR0pKis5rmzdvRnx8vJNKRERERFIjavBSXFyMgwcP4uDBgwCqh0IfPHgQ58+fB1Dd5DN+/Hjt8k8++SROnz6N//znPzh27Bg+/PBDfP3115g+fbqYxSQiIiIZETV42b9/P7p164Zu3boBAJKSktCtWzfMmVNdlXjlyhVtIAMArVu3xsaNG7F582bExMRg8eLF+OSTTzhMmoiIiLRE7fMycOBACIJg8O/6sucOHDgQBw4cELFUREREJGeS6vNCFjISGEp620R0G681IosxeHElvEnqwfeE6rDpOlHS+WTlsUjhPiOFMpCoGLwogUrl7BLID98zkhrFnZPOOB6lvYdkCIMXV2DLTVFxN1QiM1l07ivpOlHSsZBSMXghIiIiWWHwQkRERLLC4IWIiIhkhcELERERyQqDF1mzdDigJcu7yFBDDqmkemw4J6w6nyR6Dsr62pBz2ckcDF6IiIhIVhi8KIKpoY22DH1U6rBJpR4X2Y8F54hdUgpI5Jy0V3oEZ6RZYGoHl8HghYiIiGSFwQsRERHJCoMXIiIikhUGL0RERCQrDF7kTMyhjLIeJkkkI7zWiCzG4MWV8CapB98TqsOm60RJ55OVxyKF+4wUykCiYvCiBBweaDm+ZyQ1ijsnnXE8SnsPyRAGL67Alpui4m6oRGay6NxX0nWipGMhpWLwQkRERLLC4IWIiIhkhcELERERyQqDF1kTs0c9e+sTOQavNSJLMXhxKbxJ1sMhlVSPDeeEks4nq49FCu+BFMpAYmLwoggcHWA5vmckNQo7JzmrNImIwYtLsOWC5s2AXJUF576SvjSVdCykWAxeiIiISFYYvBAREZGsMHghIiIiWWHwQkRERLLC4EXOxByWqaQhn0RSxmuNyGIMXlwJb5J68D2hOmy6TpR0Pll5LFK4z0ihDCQqhwQvy5YtQ0REBLy9vREXF4f09HSjyy9duhQdOnRAgwYNEB4ejunTp+PmzZuOKKo8cWij5fiekdQo7px0xvEo7T0kQ0QPXtavX4+kpCTMnTsXmZmZiImJwdChQ5Gbm6t3+bVr12LGjBmYO3cujh49ilWrVmH9+vV46aWXxC6qctlyU1TcDZXITBad+0q6TpR0LKRUogcvS5YswZQpUzBp0iR07NgRK1asQMOGDbF69Wq9y+/evRt9+/bFo48+ioiICAwZMgRjx441WVtDRERErkHU4KW8vBwZGRlISEi4vUM3NyQkJCAtLU3vOn369EFGRoY2WDl9+jR++eUXjBgxQu/yZWVlKCws1PkhIiIi5fIQc+N5eXmoqqpCcHCwzuvBwcE4duyY3nUeffRR5OXloV+/fhAEAZWVlXjyyScNNhstWLAA8+bNs3vZiYiISJokN9ooNTUVb7zxBj788ENkZmbi+++/x8aNGzF//ny9y8+cORMFBQXanwsXLji4xM4kZo969tYncgxea0SWErXmJTAwEO7u7sjJydF5PScnByEhIXrXmT17Nh577DE88cQTAIDOnTujpKQEU6dOxaxZs+DmphtvqdVqqNVqcQ6AiIiIJEfUmhcvLy/ExsYiJSVF+5pGo0FKSgri4+P1rlNaWlovQHF3dwcACBy7b4C5owP4/t126z3jOUX12HBO2HQ6SWyUj9XXxq31nDFSUbtPXtdKJ2rNCwAkJSVhwoQJ6NGjB3r16oWlS5eipKQEkyZNAgCMHz8ezZs3x4IFCwAAo0aNwpIlS9CtWzfExcXh5MmTmD17NkaNGqUNYoiIiMh1iR68jBkzBlevXsWcOXOQnZ2Nrl27Ijk5WduJ9/z58zo1LS+//DJUKhVefvllXLp0Cc2aNcOoUaPw+uuvi11UBZPYEx2RLFhw3SjpElPSsZBiiR68AEBiYiISExP1/i01NVXndw8PD8ydOxdz5851QMmIiIhIbiQ32oiIiIjIGAYvcsZZpYnkj9cakcUYvBAREZGsMHhRArNHSvMJT4tDKskQm64TG9aVXEdZK49F+/45cVZp3usUj8ELERERyQqDF1dgS7IoZySaIpICi859JV0nSjoWUioGL0RERCQrDF6IiIhIVhi8EBERkawweJE1MXvUs7c+kWPwWiOyFIMXIiIikhUGL4pgdqIXUUshLzX5IJxbCpIiG04Km/KLSGyUj9WHcmtFZ4xUZP4ml8HghYiIiGSFwYtLsOUJSGJPg0QOY8G5r6R8SEo6FlIsBi9EREQkKwxeiIiISFYYvMiZmJOPcWIzIsfgtUZkMQYvREREJCsMXpTA3A52fMK7jUMqyRCbrhMb1pVcR1krj0X7/jnjeKT2HpJYGLwQERGRrDB4cQW2PNFJ7mmQyEEsOveVdJ0o6VhIqRi8EBERkawweCEiIiJZYfAia+xsSiR/vI6JLMXghYiIiGSFwYsisIMdkfwp7Dp26qzSpHQMXlwKq6frYe4bqseGc0JJ55PVxyKR90BJnwXVw+CFiIiIZIXBi0uwpSqV1bDkqiw495XUXKGkYyHFYvBCREREssLghYiIiGSFwYucidkfjZ3diByDlxqRxRwSvCxbtgwRERHw9vZGXFwc0tPTjS6fn5+PadOmITQ0FGq1Gu3bt8cvv/ziiKISERGRxHmIvYP169cjKSkJK1asQFxcHJYuXYqhQ4ciKysLQUFB9ZYvLy/H3XffjaCgIHz77bdo3rw5zp07B39/f7GLKl/sYGc5vmckNYo7J51xPEp7D8kQ0YOXJUuWYMqUKZg0aRIAYMWKFdi4cSNWr16NGTNm1Ft+9erVuH79Onbv3g1PT08AQEREhNjFdA2sntaDbwrVYVOTqZLOJyuPRSpNzoKgwICQaojabFReXo6MjAwkJCTc3qGbGxISEpCWlqZ3nR9//BHx8fGYNm0agoOD0alTJ7zxxhuoqqrSu3xZWRkKCwt1foiIiEi5RA1e8vLyUFVVheDgYJ3Xg4ODkZ2drXed06dP49tvv0VVVRV++eUXzJ49G4sXL8Zrr72md/kFCxbAz89P+xMeHm7345A9W54++ORCrsqic19J14mSjoWUSnKjjTQaDYKCgvDxxx8jNjYWY8aMwaxZs7BixQq9y8+cORMFBQXanwsXLji4xERERORIovZ5CQwMhLu7O3JycnRez8nJQUhIiN51QkND4enpCXd3d+1r0dHRyM7ORnl5Oby8vHSWV6vVUKvV9i+8LIg6VlrEbRPRbbzWiCwlas2Ll5cXYmNjkZKSon1No9EgJSUF8fHxetfp27cvTp48CY1Go33t+PHjCA0NrRe4EBERkesRvdkoKSkJK1euxGeffYajR4/iqaeeQklJiXb00fjx4zFz5kzt8k899RSuX7+OZ555BsePH8fGjRvxxhtvYNq0aWIXVcbYRm05vmckNQo7J53RX4599FyG6EOlx4wZg6tXr2LOnDnIzs5G165dkZycrO3Ee/78ebi53Y6hwsPDsWnTJkyfPh1dunRB8+bN8cwzz+DFF18Uu6gugNXT9UhlWCdJiA3nhJLOJ6uPRSrvgVTKQWIQPXgBgMTERCQmJur9W2pqar3X4uPjsWfPHpFLRURERHIkudFGJDWshiUySUnNFUo6FlIsBi9EREQkKwxe5EzM9nUltd0TSRmvNSKLMXghIiIiWXFIh10SGduoLcf3jKRGceekY49nzv8OIzfnCvTnYielYfBCRESy99+0c/BDMeDt7JKQI7DZyJWwbV0PvidUh03XiZLOJyuPRSr3GamUg0TB4IWIiIhkhcGLK7ClLV1x7fBEZrLo3FfSdaKkYyGlYvBCVim4UYFXfjyCgxfynV0UIiJyMQxeZE3MNl3j217461Gs2X0Wo5ftErEMRK6AfTOILMXghaxyIqfY2UUgIiIXxeBFEdhGbTm+ZyQ1CjsnndBfjnVYroPBi0vhpV0Ph1NSPTacE0o6n6w+Fqm8B1IpB4mBwQsRERHJCoMXl2BL9a3CqrKJzGbBua+klAJKOhZSLAYvREREJCsMXuRMzPZ1JbXdE0kZrzWbFdyocHYRyMEYvBARkaztOX3N2UUgB2PwogROaKOWfbO47A+AFEdx56Qzjkdp7yEZwuCFiIiIZIXBiyuxY9t6lUYp7fRKOQ6yG5uuEyWdT1Yei1T68EilHCQKBi9klczz+c4uAhERuSgGL67AlrZ0xbXDE5nJonNfSdeJko6FlIrBi6w5b1ZpIrIXXmtElmLwQkREssbuLa6HwYsisJrXcnzPSGoUdk464XAEpb2HZBCDFyIiIpIVBi9EREQkKwxeXAobhuthYznZk5LOJ6uPRSrvgVTKQWJg8EJERESywuDFJdjSiY0d4MhVWXDuKykfkpKOhRTLIcHLsmXLEBERAW9vb8TFxSE9Pd2s9datWweVSoXRo0eLW0C5ErOKWknV30RSVudaO5FThMdW7UXm+b+dVCA54v3K1YgevKxfvx5JSUmYO3cuMjMzERMTg6FDhyI3N9foemfPnsXzzz+P/v37i11ERSgpq8Spq8XOLgYR2Wjip/vw+4k83P/hbmcXhUiyRA9elixZgilTpmDSpEno2LEjVqxYgYYNG2L16tUG16mqqsK4ceMwb948REZGil1E+VOpMHBRKgYv3o6DF/KdXRp5YNU4Sc2tc/JS/g0nF8ReHH+Nsf7FdYgavJSXlyMjIwMJCQm3d+jmhoSEBKSlpRlc79VXX0VQUBAmT55sch9lZWUoLCzU+XFFV4vKAACb/8p2ckmIiIjEJWrwkpeXh6qqKgQHB+u8HhwcjOxs/V+yO3fuxKpVq7By5Uqz9rFgwQL4+flpf8LDw20ut2LZqR9L4c0Ku2xHGvisRnXYdJ0o6Xyy8lik0l9OKuUgUUhqtFFRUREee+wxrFy5EoGBgWatM3PmTBQUFGh/Lly4IHIpaeZ3h5xdBCIicmEeYm48MDAQ7u7uyMnJ0Xk9JycHISEh9ZY/deoUzp49i1GjRmlf02g01QX18EBWVhbatGmjs45arYZarRah9ApiS/8OPetuOZqjZ0EihbHoulFSHyolHQsplag1L15eXoiNjUVKSor2NY1Gg5SUFMTHx9dbPioqCocOHcLBgwe1P/fccw/uuusuHDx4kE1C9YhZLcoqVyLH4LVmK7YQuR5Ra14AICkpCRMmTECPHj3Qq1cvLF26FCUlJZg0aRIAYPz48WjevDkWLFgAb29vdOrUSWd9f39/AKj3OhEREbkm0YOXMWPG4OrVq5gzZw6ys7PRtWtXJCcnazvxnj9/Hm5ukup6I0O61bx/XszHl3vO47mh7RHk4+2kMkkdq8ZJahR2TjohHYGgtPeQDBI9eAGAxMREJCYm6v1bamqq0XXXrFlj/wIp3D0f7AIAXC0uw+qJPZ1cGiIiIvtilYeC7T97XZTtaiTewFxQWoGxH+/BoYsFzi4KERGJgMGLghXerKzzin2CjooqaQcvMa/+hrTT1zDqg52mF5Z4IEbOYMM5oaTzyepjkcp7IJVykBgYvJDN/i4pd3YRiBRpy185EJQUEBHZCYMXOTP7pmZFJzbttk2v+2HqScu3TyR5Flw3tnRONXIdP/Hf/dh6zPgktnYnw3m/GN65HgYvZLPySo2zi0CkWPvO/u3sIhBJDoMXJZDhk5LT8T0jqVHcOcmh0iQeBi9kM1bZEhGRIzF4ISKSMMVVyIjgRnmVs4tADsbgxZVw1IIefE+oDpuuEyWdT1YeixPegve3ntBTDiV9FlQXgxeymTPvES9++yeS1h90XgGIRMaKF9POXit1dhHIwRi8KMz56zfqv2hLvbOE66wLb1Zg/f4L+P7AJeQW3nR2cUhpLDr3pXudWE5Jx0JKxeBF1upXefz0x2XRti01Qq0R2hrpF5fIAJ68RJZi8EI2E3jzJRKNhCs/iZyGwYsC/HWlyKn7/7ukwqn7B0wHUBf/LsW2rNqZSvmNQFKjrHPyRqWAmxVVWPjrMWScc0yiPT5GuQ4GLwqw5/Q1p+5/46ErztmxBff6fm9uw6RP9+H3E1fFKw+RRFRJoB313S3H0e/NrVix/RQeWL7b2cUhhWHwQi6FqdZJblQW1sh8lX4e0XOSsftUnkglMl9esfFJW8srNXjlxyM4dbXYQSUipWDwonC61bXOfxqTHOaCoHpsOCckcD7N/P4Qyis1mPZlpm0bsvpYzF8vZt5vWLP7LAYv3m7lvuxTDpIfBi8Kd7WozNlFEE3tjozm3mdTjuaIUxgikVjbYffvUuf3RTPlRgUz45J1GLzImdlPRlbc/bTblm4nwvNWJKY6crlQhJKQMllw7tsyJEgCtTV/XMjHlYJbOaI4vIlkgMGL4tnnxlh0U3pPcduO5ZpeiEjmxA4lTuQU4d5luxC/YKvIeyKyHwYvCiD2NPDfZlxE51d+w4rtp0TdjzH5peXY+OcVlFXqr2a2OETj0yVJjaFzUuRz9cCFfO3/7/9wF45esU/tpNj3Jansk5yDwQuZ9Pw3fwAAFv56zGllGLtyL6atzcTbyVna18y9p3+YelKkUhEpS+b5fMz64ZCzi0FkEoMXhZNAc7pd1DwN/vTn7ekPVGZGL2/VCniI5MbcugRBEOo172YXmJ7zSyOBnDBElmLwQrKilGCMyN7m/O8IOr/ym85rbyabri111CVVWl7poD2RK2Dw4koU8M2fb2D4p+CAnBTkIoycSzcrqrB0y3EcvlRgaGVxymSGz/ecq/eaoT5i5rHyWAy8fyVlDh4WrYD7HRnG4EXWTF+c1g8N1r9t226Gtiuvuj2V9LtbTjixJORqlqeeQs/Xt2DplhP4x/s77bhl8b5kzfn+dlQX11+cNY0IKRKDF4X7YNtJ20Yr1Fn3lR+P2Fgi+6kdyBjC9nyyWq1zP6fwJt5MPoaim4aaPsQLAayvVax2Jq8EaafMn/9MrBE7xWVsNiL7YfCiAI78ev4q/YLF65y+WoyHVuxGapZ4eVkM3d/3nb1uYA0OqSTzlZY7osZR/zn53lbrR8tVaQTctSgVY1fuwfEc/bPPizUSu+4l+famLHy664w4O9Puk9e1q2DwQnpVVFXferbaIRHcv9cdwL6zf2Pip/ts3pYhhoIUc2pniJRqR61Z1I9l6w9eHGneT385uwikEAxeSK8zeSUAgAPnbZ+F+ZqJmWXtYf+tCShLWDVNpHWzwnTwbums1c7yxZ5zmPHdn84uBkkEgxcywH6NUeWVjqv9uGnGRG+Gm5KIzNdu1i9Ynuq8rNN2I4/YBS9vOIx1+y7g6/2WN12T8jgkeFm2bBkiIiLg7e2NuLg4pKenG1x25cqV6N+/P5o0aYImTZogISHB6PJkCed0Xr1WIn7NiyXW76t18+NwSqqn/jlRqaf5saJKwJvJx3Q7hVtxPtUE98VllVZ3MDfnAcHiGEWi6Qd++uOy6YUcUA5yLtGDl/Xr1yMpKQlz585FZmYmYmJiMHToUOTm6u9LkZqairFjx2Lbtm1IS0tDeHg4hgwZgkuXLoldVJIoQx0NiRylZooMfS78bfns5rVlns8HAHy59xz+NJg/xjhb+nYVGMidRCRlogcvS5YswZQpUzBp0iR07NgRK1asQMOGDbF69Wq9y3/55Zd4+umn0bVrV0RFReGTTz6BRqNBSkqK2EWVHXOaSKpZXi+cV1QGQBq995f8dtzZRSCXdPvc/+OiiaDChsskr7g6hf+l/BuoMlDzct+Hu+olxhMEAS988wdW7jht1n4MjSpaWCcLrxSueSJTRA1eysvLkZGRgYSEhNs7dHNDQkIC0tLSzNpGaWkpKioqEBAQIFYxZWvXyTzRtn32Wolo27ZUBUcMkYs7cD6/XmK8Xw5l45uMi3j9l6M2bdtQwEQkZaIGL3l5eaiqqkJwcLDO68HBwcjOzjZrGy+++CLCwsJ0AqDaysrKUFhYqPPjKq5KqHZETBVm3FwLb+iv+jY4kkKs5BZEVjLnOv5izznkFlXX1Exbm2n1vnKLbuL7zIuiZswW67504FYzG7k2SY82WrhwIdatW4cffvgB3t7eepdZsGAB/Pz8tD/h4eEOLiWJbcfxqyaX+flPph4n5zA035YYXt5wGA8s313v9bN5pmtKawfyoz/YhaSv/8BSGU6xwUy9BIgcvAQGBsLd3R05OTk6r+fk5CAkJMTouosWLcLChQvx22+/oUuXLgaXmzlzJgoKCrQ/Fy64zjA6QWK96QVBQOFNdv4j1zL18/0O3d+F6zfqvZZ+xrLh/5cLqmtvtvyVY2JJImkSNXjx8vJCbGysTmfbms638fHxBtd76623MH/+fCQnJ6NHjx5G96FWq+Hr66vz4yrKzEhA5Uhz/ncEXV75DTtPiNcXxxjDM/0SiSensMzZRTBLTUtp7aaiE7nFTiqNZWyd34mUR/Rmo6SkJKxcuRKfffYZjh49iqeeegolJSWYNGkSAGD8+PGYOXOmdvk333wTs2fPxurVqxEREYHs7GxkZ2ejuFgeF5kj3TB7tNEtIt8APt9zDgDw9m9Z2teSD5vXt8kSNX196rquJ59Mlclj5k2R6rDpOrHtfHJEVyx914l+Vh6LVAINqZSDRCF68DJmzBgsWrQIc+bMQdeuXXHw4EEkJydrO/GeP38eV67c7q+wfPlylJeX48EHH0RoaKj2Z9GiRWIXVXbEvM/Zsu0/LuRr///kFxk2l6Wu9fvO631d341/1c76E8HVLh+Rs6kcFECL2TmXyNE8HLGTxMREJCYm6v1bamqqzu9nz54Vv0CuxobHOXuPGEjNysXADkE2beNMnv6kYPpGFunr7GtrUjFyERZdN84dvWZO+LNm9znc162FyQoJW0MpwcRdQxAEqDjaj2wk6dFGZB45DZWe+Ok+ZJyzbbLH7zIv6n3d3PthdZAjn/eMXIMt17E5fUIcVeN4Mrd65JOh49l5Kz+V+Uk2zW8BktO9kGzD4IUcTt9QT3v45RCHS5P9fH/gEjLtMKu6JZZtPWnVelKaPyyv2HgH5uKb1UOdz19nDShZj8ELKUbKUf3zZRFZ6/4PzQu0y6vs028l5Zh153BNQGAOdmMlJWDwQrKkL6V5pUbAlVv5K4y5UVGFj3acEqNY5KKcPXmoJTmf9p+1LCeMWC7n189XYwgDLqqLwYtLEe8WcMaMDJ/2pO/Gl1dchgW/mjfPy8dmTmZHrsiG60Ti37JHLhdg98lr5i1s9bEYX7HmrzO+O6Tzur4Elx+mntQ78eTl/BsoMDAliLnlIHlj8CJr4l2clg7fPHLZsQniDHXgKynjcFCyr5q5hMRijy6mu8wMSE7kFIuemduc2k8AyC7UXW76uoM6v18rLsNbyVl4/Zej9XJa9Vm4FTHzfrOpnCRvDF5cgi1Dpc3dg+X72H0yD498nIZTVy1PQFipkVZ2YVKOmpE7Nee+OQGxs5/xLan5NDXPqdgjdgzNYl23v09Z5e1r/ISTm+X0OZZdiJ/+uOzsYrgsh+R5IXE5+8YJWJdK5tFP9gIAnv4iE5um32nRurtO6X/SNLccHFJJhmTlFCGq9u/Zypmp/vTVYhy66JhaUkP3JVOjkfStv9vA9V5/Hcdd18OW/g4AaNrIC33aBjpsv1SNNS9kF7bcMupWH5tj9obDNuyRyLAdx3Xn5tpr4aSHUvbe1pPIcnItxryf/rJ4naxs6dW81PjrinKCWzlh8OICHDH7tC2zSRfcqLDbxGsFpZzVmmyTW2furIoqNlE62oJfj+KxWzWzgLQnkHxt41FoTLXFOdmF66X4Ys85ixIDSh2DFxew/6z4ibZsHb1z7pp9EladdvCoJ1K+swamo3A1+aXldut7YqjfS80w7o+2n9a5lqU+q/S/vjrgsH0Vl1Uiv7R+UkKNRsD241dx7Vaz3LXiMkz6NB3Jh7MxeMl2vLzhMD6wMgmiFDF4cQGWRtuGbizGnLpqW9Ag7VsTuTJH1FzKQddXN+Pud3bYJafNz3/q7+j66Mq9el83RCq1YhsdkN07p/AmUrNy0WnuJnR9dTNKy3UTE36XeRETVqcjYcl25BbeROxrW7At6yqe/CID5bc6P+8+ladv07LE4MUFaB9azHx6+e1Its37tDRgcuqTlcSf6six6qYJuFZsSep9JZ1Lt4+l9vW897TxzrPmpFn4aLv+mtpyA8HIMQN9XozOkybCdf1m8jGs1jNTvSU+230WETM2otur1UO9NRoBszccxvp9542uF78gBRM/3af9ve6xbzqSAwD4u7QCvd5I0buNzPP52kBG7hi8yJi5uVgsvYRLyqts7rO/1co050RSY+iL014szankDGev1apZVam0X7hpZo4CqkuOnVxP5hZheeopvPqz5R2OyypvB39zfzwCoDrIAIDU47n4fM85vFgnaV9ddSvEM8/lA6h+8Ptg6wlsOZpjVlnW7LYt+JIKBi8uYKe5GTX1si6MMfXQc4GTspHkmX/uK2novb5jOVmrw+z2rFw8uGI3Pt9zDmNX7sGybSfx12X5BSOWKi2/HYDsMVH7VOPHPy4jYsZGdHg5GV/vv1Dv74IgIL/WIANBEJCalavNIL4u/Tz6vblVby6smubMfWf/xqLfjpt9HG/8cqxeGUrKzJ8bSyqY50UBTN04D15w7My4gOl+Av3f2lZneceS/rMu2aKySoPcojKE+TdwdlHMJuUAKHHt7Q6pW+pMgPr2piy8vSkLZxaMgEon0ZJ1x/Orhf1HavfRs/a6/rukHP4NPbXl12gEXC64gRZNGmqXcat1bCf1jH7ad/Y6ekYE6Lz271odef/z7Z94uEe4zt8FQXd27ZW/n64XXADA4MXb671Wc9jWpJqosTz1FN5Mrt7ffx/vhTvbN9O7XEWVBm4qFdzdpHOOsubFBTjji1ojQDFtq+Q8Go2AR1fuwWOrLOvI+diqdPRZuBW7Tiqng6KjlVVq8H7KCbM7xXabv9kukz4+9WWm2csmrj2AtenG+4qYknz4CrrN36yTfybp64Po9+Y2/O/gJb3r6Ouj99CKNCzalIXFv2Xhcv4NnNUz8vHi37o1zvELU7B0ywnt7/oCF0NqyvDZ7rNmr1NXTeACAONXp+tdpqJKg7g3UnD3kuoA6u+SciStP2h1k6G9MHhxBRZGLxnnbL8BCYKADQYufHNUVGnw6a4zyLHhqYLk70RuMXafuobfT+ThZK75fU/SblXrv5V8DPskMouyPe04flX0fVwtLsPizcfxedo5s5bPL63AE//dL3KpdOUVl2F7lm3vxcJfq7/A1+w+i4MX8gEAGw5Wj4aqPbS4dqWSoVvqB9tO4v2tJ9Fn4VYMXJRa7++1m54AIKfQvGzD+mhuBS9GOy3bwdm8ElwvKcfpvBIIgoDXNh7F9wcuYezKPaLu1xQGLy7A0pqXr9Lrt81aw5I5i+q2IT/1RSbm/fQX4gz0mifpSj9zHQ9/lIbEtZk2D2Wtvf4Xeyx/wv7jYgEeWpHm8IlDzXVJz+zo5jD0lCwGSzqoljphYtSrBibOPH+91GTSyot/l+rcH0cv26W77eIypBzNQWWVBr+fuF2LZ23waM9Gl2XbTiFixkY7blHfPk7is7Sz2t8FAbjwtzT6KzJ4cQVOaDc6fKkAhy+Z/4Xx2xHdnvLm9pyn207mFmPeT0dsngX5cv4NrN55BsVWduJ7+KM0pJ+5jp//vIIfD9o2cd3SLbc7ImabOVuxPn/ems/n75Jy0bOhVlRpUHDDvEzP09cfFLUsjmZoqLOY/jAwV9M/3t+JmFvDkd/edAxjPkpDeaUGvxy6go93nMI7m4+j35vbjCbIzC+twOTP9uOjHad1OiXX7fdjrtpNRM70Vfp5nRFQNa7VmnfqSsENvL0pS+ehQSMIcLdmIjsRsMOujJl7ChVpv4TMv2nbOnxz5e+WDcdz6vUgCE4ugH2MePd3lFdpcDynCF8+0dvq7dz/4W5kF97EkcuFWPxwjPb1L/eeg7eHOx6IbWH2trZl5Vq0fF21vyQqqjTYe/oamjb2QtsgH73LC4KASj3BibtKhYMX8jF62S4MigrC6ok9De7TlnP/Znklomb9CgB4f2w3jIoJM7p8+pnreNzT6t2Jytr3QTpDv2+XY9m2UwCAj3ecMjkyR1/g+V3GRbtk73ZEMjtzzPz+kN6HgZ0n8xAd6ov2wT71mrgA4PVfjkqmKZ/Biwu4XlIOeDu7FCS2mqdeW2cNrhm9sOPE7arxnMKbmPVD9WSYgT5qDGjfDIIgICunCG2aNYanu/5K3J//vIKhd1xGmH8DxLZqYtb+yys18PJww5LNul8yKcdykXIrf9DZhSNxJq8Er/x4BImD2qJnRABWbD+l7b9Ql0oFLNqUBUDcHERPfpkBoBuA6pTx3Vs1QXMZjXhSul8Pm07A2fO1LfVeU+K0I++m1K8FembdQQDAsfnDkKMnuPl011mRS2U+NhspgvFaA1uegxw1fNPRSW7rHpcgCPg24yKOyjB5Vl3ubipsP34Vj6/ZZ3bzhT4ajYDTt/ot1Q6IJtzqb/FV+gUMW/o7pt7qpHml4IbeURj/+uoAHli+26xmn1U7z6D9y78iNSsX7+m5udb25OcZ2H78Kh5akQYABgMXADh6pQg7a408MtR0lFer2tySc9/Qshv/vIyMc9cx/N3fsb1OP4m6o/GkMlTaXpeiM45H3z5rj4A6YkY+Gmc0fUnNR9tP45Wfjji7GEYxeCFJ0DfRmCN9n3kJz3/zB4a/+7tTy1GbtVMm/F1agQmr07H1WC6e/jJD7zKFNytMdqa9VlKOQYu345v9F/S2qn2yszrF+7asq/h63wXEL9iKOf8zfMN7YPluvLP5ON5MPqYTJB69UohpX2bi1NVizL/VObR2GnRDLlvQ2XX1Lt1mzJqREulnrmPsx3u0Ew5m2nnkxhu/HMMDy9Nw9EohJqxOx/8OXtKm1+8+f7Nd90X6PXgruCXzvbPlOI7nSHcmb4DBCwE4nlOExb9lofCm9U/ptvrjYgGuFlk/bNAWf5eWY40ZuRIEQdB5Mtfn/LVSTF6zz+bhud9lXETP11Pwx62hm9bapSe7cl5xGbq88huGvLND+1p5pQabjmTrHZ3x+i9H9Y4c+7vkdsD5n+/+BAB8vsfwsNpL+TfwbsoJLE89heHv/o7iskq8+O2fGP7u79h46ArGrzJ/BM2vh67U6sulv5+CMXvPXEdJWSUe/igNaaevYdwn1Xlkpn6uP9izl2fWHcSYj6sDJ2s7RBMR+7zYTKMR8NGO0+gR0aRedkVrHLlcgOWpp/D8kA6ICGyk87eaJ3GVDZ1LcwtvYuzKPRjbqyWe6B8JANovsdzCMrz5YBert22r9DPXMaxTCPaecWzyo9N5xThUa2RU8uErGNYptN5y8376C2t2n8XLI6Mxpmc4fLx1e1pWVmlw59vVmYNTjuXi7MKRevf366ErKK/SYFinECzdcgK9I5tiQJ3Mls998wcA4OkvM7FrxiDt61eLytDQyx2N1OZfuheul+K7zIvYcjQHPSMCtO3WZ/JKcLWoDNPXH8RfVwqr+0bpkV9aUS951vWScu3cLNZ67uuD2snkAMuGDddNZBYz7zeL93/H3E3a/+cWldk8SssStUdREZHlGLzY6PsDl7RZCtNmDkKon/HOeZ/tPosTuUWYf28nqFQqlFdqsPi3LAxo3wxxkU0x8r2dAKqr0lOeG4jcopu4WlSG6BBfPLhiN3wbeOKFoR2w5LfjiLOivO+mnMCpqyV4beNRbfBSI/O846cRqC35SDaycopM9nWwtyYNvXR+f/KLTL2BR03tzGsbj+K1jUdxZN5QnSCi8yumv0BLyyvrffEuTz2FTyf1RNtmjREe0FDnb5Wa2007ecVl6Pn6Fni6q/DpxF7wb+iJTs39TO5zzEdpuHyrv8nhS7pt/j1fr9850Rz2aPLYdERaw+HveX+X6YXsRCpDZonkisGLDW5WVOHjHae0vw9evB1/vTpM+/vl/BtIzbqK+7s3h7enO2b9cAhf7q0eMz+iUyj6tA3EZ7vP4qMdp/HRjtOYEN9Ku+6pqyU6CYjeuK8zMs/nAwBSb2WU7O1heZ+Imv0D1ZOGpdYaeXEit1j79O2M4Y4//WFbThBrmepvU6UR9E4keTK3GLtO5eFacTnG9AzHjQrdoYUPr0jDHxfz8VjvVvjhwCVsThpgsDlp0q0+HnWDppzCMpRVVmHniTxMW1sd9FRUCfi/W+nyT7w+3OBInxqXbciP4kpqRlnZ0s3UmnWlM7RYl/VDpaVBKuUgcTB4sdLavefx0g+6U5iXlldhza4zmNi3NQBg1Ps7ca2kvN5yAPDJzjP46c8r+KrWvByfGUnD/dtfpof4War2pGE1vrJxnhA5+mCr8afgQYtT9SaycndT4a3k6uG3q3bWz2uTfitQ+eTW38ytrajbUbfDy8kGl20361dM7tcaUSH6854QESkRgxcLVVZpUFJWpTcgAYBXfvoLn6Wdw4XrpXqTZdWwNNdEqpH5O0w9H1kyZDH9jG7NgDSfCW1X+z1JPZ6Hun3Xj+cUoWVAQyQfzjaYgfMf7++0e7lW7zxjUTp2QH/gRLazZLCXPa4TqVxr9hri7IzjkcpwcxIfgxcLLNqUhQ+2nTS53BkZJzSqm4vCVQ15Zwcm9Y1weFImSwMXIiJXxKHSFjAncCHl+PlPaaTyJiIiXQ4JXpYtW4aIiAh4e3sjLi4O6enG8zl88803iIqKgre3Nzp37oxffvnFEcUk0uGsvDNERGSc6MHL+vXrkZSUhLlz5yIzMxMxMTEYOnQocnP19/nYvXs3xo4di8mTJ+PAgQMYPXo0Ro8ejcOHD4tdVCIiIpIB0YOXJUuWYMqUKZg0aRI6duyIFStWoGHDhli9erXe5d99910MGzYML7zwAqKjozF//nx0794dH3zwgdhFJSIiIhkQtcNueXk5MjIyMHPmTO1rbm5uSEhIQFqa/vkm0tLSkJSUpPPa0KFDsWHDBr3Ll5WVoazsdvV+YaE4E+vlXzmDrzxfE2Xb1mrlZvnw6XVe8yEIpnvkt3W7ZE2RZGmt1+tmvSfkGsJUeaYXMuBFj3WYKmw0vWAtHdykmZ6gKQqtuue1c7soQmks94nXIlQKHJMilssIAKA/i7gjiPrJ5uXloaqqCsHBwTqvBwcH49gx/TPAZmdn610+O1v/F/WCBQswb948+xTYiH0nL+Fud2mOBMkRmhj9ewm8USQ0gI/qBnq7HbXrtuXqBtQoFBrA14r3hFxDDsw/93OEAACnEeV2wfr9SeRauyr4o0pQQa2qRLwN9zxnHI8GKlwV/NBMVYCebpyCQUynNPWnUHEk2YelM2fO1KmpKSwsRHh4uN334xPYAtPK/2337dqqBN7YqelkdJkyeOGe8tfQUWU4CZ4+BWiE3Zo7bCmeZJXDE/eUv4Y7LHxPyDXchCd2ajqbvfwLFf/ED1X94A7jM3Ubcg2+2KuJsmpde8tFE4wqfx2tVdYnxvwbjZGm6WjHUplLhfvKX0WM6pTpRckmJfDGGifuX9TgJTAwEO7u7sjJ0Z3DJCcnByEhIXrXCQkJsWh5tVoNtVptnwIb4esXgI2a3qLvRyxnhFCcEZwbKUvNWSEUZ/mekB0UohGSNb2cXQy7+UuIwF9ChLOLYZWLQjNcFJqZXpBkTdQOu15eXoiNjUVKSor2NY1Gg5SUFMTHx+tdJz4+Xmd5ANi8ebPB5R3Fy4N9IoiIiKRA9GajpKQkTJgwAT169ECvXr2wdOlSlJSUYNKkSQCA8ePHo3nz5liwYAEA4JlnnsGAAQOwePFijBw5EuvWrcP+/fvx8ccfi11Uo0JMzBZNREREjiF68DJmzBhcvXoVc+bMQXZ2Nrp27Yrk5GRtp9zz58/Dze12BVCfPn2wdu1avPzyy3jppZfQrl07bNiwAZ06Ge/XIbbGag883KMFvt4vjZ70RERErkol1J3CVuYKCwvh5+eHgoIC+Pr62nXbGo2AyJeY7VepvNzd8J9hHfDaRsOjj/q1DcTOk9YPpTVXZGAjnJbxHFlEpHxnF9p3qLQl39+c28gCbm7G+708GNsCJ14fbvN+Vvxfd5u3YUp8ZFPR9yE3u2cOQsuAhkaXCTfxd3sZFRPmkP0QEckRgxcL7Z4xCIl3tUXGywk4u3Ak/pgzBL8+0x/TE9pj/r2d4Ol++y31dDce7DzSMxw+3h54vG9r9Ixogr0vDcauGYMwrFP9ETDvj+2GF4Z2sNtxTOgTof3/mw90xtbnBtht23IV2FgN9zoBakMvd+3/P53UE9MT2omy708n9tT5fXS35vWWefvBLvVeWze1Nwa0b4a9Lw0WpVyOUPs9JiIyB4MXC4X5N8DzQzugaePq4dl+DT0RHeqLZxLaocGtm/DheUOxb1YCfkzsp7NuqJ83/jWoLYDqwGXhA11wYPbdmDOqI755sg+Cfb3R3L+6Y/CPiX0BAA083XF24UiMignDtLva4syCETj5+nA8GtfS6mM4OOdutGp6uwbh4R7hiGzW2Ort2duIzvqHxYthUt8Ind/rBi/92wVq/39XhyAE+Xrj7MKR2Dz9znrb6t7SX+f3uaMM57lYOyUOR18dhldGdcTsf3TEXVFBeOXW8pP7tUbrwEaIDGykXf7UGyPwUI9wHJ43FMvHVdfMLby/M3pHNsVnj/dCsK83nujX2qxjrtGqaUNMu6uNReuIIXP23fBr4Amg/udBRNIU2Fj8FCXGyD5JnRQ1VnugsdoDzXzUSJs5CAGNvHD4UgHaNvOBj7cHhnUKQVRIdXueh7v++LFLC38kP9sfob66o5xUKhU83FV4477OWLvX8rTijdUe8G/ohQZe7lB7uMG/oaf2b57uKlRUObcLVO/IACx7tDtaz3RM36K6Pb6a+ehekJ3C/LDpiG7eIQBoF+yD1OcHYsX2U1i3rzqrqqe7m857GBXii9NvjMDGQ1dwvaQcxWWVeHtTFv7RJRR92lQHRRP73g44JvZtrfP7E/0j8dIPh+Dj7aENqhqrPTC8cyiOvzYcXh66587L/+iIT3aeMfvYmzVW44WhUejc3A/pZ/7G8M4heGiF/mk7agzvFII9p6/h79IKs/djzAtDO8Db0x1/zB2CKo0AdzcVPt111i7bJiLxrJvq3LxnDF5EFnpriHVsqwDta3eE+Zm1bk2AY0+dmldvU+3hjj9fGQI3lQoqVfUXY+bsu/HCN38i+Yj1mTVtEdhYjXVTHZvPp0qjG73U/WwGdgiCu7sK0Xo+i4jARlj4QBccvJCPY9lFuL97c3wyoQc+3XUWJWWV6B0ZAJVKpdN/ZVSXMDRvYt6w+0d6hqNpYy90C/ev97e6gYsh3Vv6I/N8PoDqznURM27Pu7Pk4a4AgGGdQrVNlZHNGuH0VcMdhZf/XywEQbBbcNm6Vu1S3VovIpKml0dGo22Qc2vrGby4mPce6ab9v9pDt6+Bj7cnVjwWq/MF50gPxrZw+D4rNfVrmrJeG4YOLycDAFQq4OmBbY1u49un+uDYlUJ0b9kEbm4q/Huw4X4xLZua3+HXzU2FoXfY1oT22eO90PmV39C0kRcAoGOoL/66Uoj3xnbTW5ZNz96JdrN+1but1RN7AIA22K2RPmswDp7Px9TPM8wq09MD22BwdDAOXsjH8E7WH19UiA8GdGiGj7aftmr975/ug6gQHxy9UoibFRr0bRvosHN/1YQeOHqlEIt+4/w7JD/dWzl/Hi72eXEhag83BPl6O7sYBiXd3d7h++zTpv6oK7WHO+7v1hz92wWiY6jp2q/Gag/0iAgwORrNEabeGQn/hp54YWgHbH1uAHy8PXF24UhkzL4bQPUX9qZn78Q9BkYzebq76X1PAGBQ1O0JU2cMr56HZ8/MwQjy8cYQC4KsJwe2QWyrJpjcr3W9QMgSo2LC8ES/SO3vnz3eC6ffGGFw+e+eisdro6vzRfVvF4hu4f5o6OWB2FYB6Ns20OB69hLmV33tvT+2GwZHB2NEZ05NQdL21oNd9DYPhTdxzKhLY1jz4iKCfdV4f6z4Q7BtYW5TiD39o0sovD3dcUeYbpCyZExXh5fFHl4aEY2Zw6MMBgXenu7oEOJjdBsNvUzfFp4c0AZPDtDt7Ltn5mA8+UUGDl7Ir7f82ifi8OgnezHvnjvg6+1Z7++mJN7VFh9sO6nzWpVG0DlnYlr4wc1NhVA/b1wpuFlvG7GtAhDbKgD/17uVwf10DffXW35bffRYLIbeEYLySo22zK0DGyHYV42cwjK774/IHG2DGuNkbjEA4J8DInVqMQdHBeHhHtWTHD8U2wLfZFQnaH02oV29voHOwODFRex9KcHZRbCImwrQ06JjdyqVCnd3DDa9oIzYUptRV00fmCgTAQ8AhPh5Y8O0vvWaXuaO6og+bQNtSmiVdHd7DL0jBL4NPDDg7VQA1cGLXwNP/PPOSFRpBPg3rG4aiwrxqRe8HJs/zKz9RAY2sjp4GdsrHD//eQVFNyt1Xj/5+nBtx/zawZZKpcKH42LxwPLdVu2PzPPrM/0RfasG1VlN4oD+ANzZ7gjz1QYvM4dH41pxOb69FaTUHk305gNd0LWlP25WaDDZwlGNYmGzEUnSrJGGhxmTuGJa3O60vPW5gch4OQE//aufkTV01Qz3rzGpr203u5GdQ+HmpkLnFn5o1fR2B9+a2rKZI6Lx8j8Mny8/JfaDt6d5uWTuigqyqaw9IwLqvWZoRCFQHaSTuNoH3w6872yvO9v0xFr5rmqr3W9tQnwrBDb2sqkMs0ZE4/mhHXTSH0hBr9a65+v8ezvh/u7NER/ZFP8ZdjuvmJubCuPiWkkmcAFY80J6BDZWI6/YsVXZdZuMHu8bgfk//+XQMlC1qQMiofZ0w4D21V/kTS3M5zChTyu88csxu5XnvbHddH7fknQnDl8qNKvGrPZTtzlGdg7Fv746YHEZa9zXrTm2HssFALwzJgbdwo13bHSzYy2ZWOqOUpOb2u/wh+O64/fjV7Xnt7ubCmt2n9VZft+sBDTzUWNIx2B4e7qjbVBjHMsuQl7xdbP25+XhhtUTekIjCOjfLlCnJjTQR60z7UfTRl64VlJuy+HZpG6qiAZe7tpRiFLHmhcXENe6/tOgMV4mMgOLos5FZM+mD7KM2sMdU+9sY7JvjCHG+pSYo0edkQx1h1C3DfLB6G7NDZ4jNdmj+7cLtChwAUxPAWKMj7cnahfpvm4tEGHiSdva0/zdR7pat6KF6j6ZO4KP2vZnakMJImvyJA2KCq53XsW08MPm6Xdq+3N0au6nHQ48zoJzOmv+MPRrF4g72zerd44ufihG5/cN0/pq/39n+2b1ai1rqEXqDyjniQ1Z8+ICnrEwpb0zAodpdxkfjkzyYU6HX2OaN2mA/ef+tnr9gR2CsHvGIAQ7eGSdXwNP9LrVbGTuF7AK1l1rjrhG9SVCrBEd6oujVwpF2a9vA08UlVWaXtCI2qPHzH2r5oy6A+2C9Qfs98SEISrEB62aNoSXuxti5v2Gwpv1yzg9ob3RzyY8oCHOLhyJvOIyNG3kpbOsCsCWpAGInpOsfa1zcz+snRKHRl4e+P1kHiasTjfvYMzQPrixlWefNLDmxQXUZHM1lzOSDz3Rv35b6p6Zg/HxY7EOLws5lz3Sjof5N3BK0rsgX2/sm5WA9FnmdZCvmRZBajJeTjA6+s/bU/dvvz7T3277fvXeO2zeRrugxpg/uhPefaSr3QK99sE+UHu4Q6Wq7n9VV8dQX/xzQKSeNesLbKzWlqsmHcP93ZujgZc7Rna5PYR+fHwr+Hh7ws1NhQHtm2Hd1N4WTwNiyLx7OuG+bs0R2ayRwb4/UsaaF6rn7Qe7oNcbKc4uBkL8vHGzwrqmC3KuQVFB2r4flqr9VXO/ngkqpaqmyt+SYaSWJC2srYGZHZCtZaqf0/j4VjhwK3Pz+2O7ITrUF/8e1BYX82/g+8xLNu37rg6WdZre+twADFq8Xec1lUqFxyxsvmxtQWfadx7uiuXbT2FcXEucyCnG/nN/Y9aIaKuaHb99Kh6ncku02c8/GNsNC+/vjEv5N9ChTk1Q78imiG3VBG2CGmPm94cs3leNlgENtRnAU5IGyLKZnsEL1eOMRHaGbsaNvXmKypFQtyegld6u00dAygwl/hODM2fifmFoB9zXrQVaNW2EghsV2mAjaUj16BRbgxdLv0dtnVQ2fdZg3CivQkAj80cUBfl6Y+6o6hqitkE+GG5DwsGGXh46NTkqlQo+3p6ICtFfK+fp7oaxvVraFLwsfjhGG7DIMXAB2GxEIhlzK7mRuQw9sTh75lKyjrnzd+nTO/J2hl85zXfkyAzLkc2cN+S2pn9a95ZNLK4lqfFwD8NTgTj6yzTIx1tnCL4r6Nzc+utTKhi8kF08PVA32+rMEVEOL4OHjL7olC5xUFtMT2iPny3ID1NjcHQQVk3ogZ0v3iVCyZShZsJXuXrrQeM1anWbSwyRWt4UZ5k53LL7rbl5j6SMwQvZRd1JHv0bemFS3wiHlqFbS3+H7o8M8/Z0xzMJ7dDJiic8lUqFwdHBaCGB+VNImn5K7Idhd4Rg1cSezi6KJPyzzlQdxvz5yhARS+I4DF7ILvTV9Na0CTsKh1sTiWPtlDi7bcvcETnGdG7hhxWPxVrUyVZpYsL9AVjehGjN3GJSxN6QZBeGkis5kq9Eh50SyV1koP3SJwxo18z0QmTSysdi8fmecxjbq6XZ62x7fqB4BXIw1ryQXYywobc9EUlbiJ83vnkyHpuevdMp+x8fb1vWZiUK8vXGc0M6IMyCB0cl1VQxeCG7kOloOyK7kNOoKGv1jAiwesqI2qwZRP/qvZ1s3q+r6NGqCd64r7OziyE6Bi8yJnaiKrG4wH2eXMwD3eWTTE8Oamf3XTWhhxNLIh+fPd4Lfds2xTtjuuodil47c68SMHiRsZqMjFJgSc2LpdMVmMvc4ZVE9mZrojRXYmjagbcf7IL0WYMBVCdRCw9ogEUPxWBwtOnZwwkY0L4ZvnyiN8IDGsLDvf573KKJ8/sl2hODFxmzdlI3MbhbEL0INs5l2j5Y/xdFIzvMRkvkWyer89IxXU2u4+i0ANZ6YWgH0fdhKvtv3VnDa4QHNESQT3V27/bBPvj9P4PwYKzhZHZkGXvM1i0lDF7kTDqxi95I35DpCe3h6W594Rc/1FXvNonsoe7cRPd2NZ32v26eIyny8nDD0wPb4P2x3UTdj6nJJg1l0JXQ7YxkgMEL2Z2p5pu2QY1x9NVhVm8/2K/+lAEeNgRDRLXVrRcUO129I2pDgOppF1QqFe6Ksi6lP8mb0prfGLyQ3TVtbHyCM19vT4tqaurycKu/rqGJAJXWSY2Up12QY/rLjIurzgfSWOTmg4l9IkTdPllmfHwr/P6fuxAdKp0+kvbA4EXO7DNxr90F+RieTHHF/3W3eQK7Jg3NT0YXbYehneRa5FiHN8GMPCiGghZ7B08dw6z7knTkxJauJMhHjfAA5U21weBFxmzt+Gpvn07siSEdg/HyPzoaXCY+sv5Io1ZNLbuw9FXjG6h4IZI8e5y6hkbw1BZfa7bu2uz9xWbttdi9pf6OvGSd/+vdEk0beeHROGUm+GPwImPtJTY0+K6oIHw8vgcCGxuuefHTU2tiyUglQ6y5Xz4/hJ18qb5/D27n0P35eNvejGPOJJaGajYMNbk6misk+nOk10Z3xr5ZCQhoZLwZX65EDV6uX7+OcePGwdfXF/7+/pg8eTKKi4uNLv+vf/0LHTp0QIMGDdCyZUv8+9//RkFBgZjFlK1Hepo/p4WzOKr929LbXsbLCUgc5NgvKZKHe7s6NuGcoRoRR/G0of8ZSZuSm+JEPWvHjRuHI0eOYPPmzfj555+xY8cOTJ061eDyly9fxuXLl7Fo0SIcPnwYa9asQXJyMiZPnixmMWVLDin5X7nnDoN5HWxR95psbODp1dBDZVMjtUNEjlS7GTTQRGd3Y6ydrbmbnZtr5HBfIvkTLXg5evQokpOT8cknnyAuLg79+vXD+++/j3Xr1uHy5ct61+nUqRO+++47jBo1Cm3atMGgQYPw+uuv46effkJlZaVYRSWRGesDY63vnupj920S1XjvVi6UJ/q1dsj+nh/SHmN6hOPO9tbNuOzl4YaZw6OtWreBJ2teSH5EGzOXlpYGf39/9Ohxe16KhIQEuLm5Ye/evbjvvvvM2k5BQQF8fX3h4aG/qGVlZSgrK9P+XlhYaFvBSRbqPi02aaj/ibWlhZ2BiQDgnpgwjOwc6rB+GDVNmM99/YfJZesm0QOAwdHW524RO4+NIf3bBeL3E3lO2TfJn2ghd3Z2NoKCdC8oDw8PBAQEIDs726xt5OXlYf78+UabmhYsWAA/Pz/tT3h4uE3llhONRDra2apTcz+btzEqRn8WVGOdh4mMMTdweXpgG5FLoqtm+oLaxfNR608f0L9dINZP7Y1fn+lvcHt1YxdHpZF/llmxyQYWBy8zZsyASqUy+nPs2DGbC1ZYWIiRI0eiY8eOeOWVVwwuN3PmTBQUFGh/Lly4YPO+5ULuscumZ+/EPwdE4tV777B5WxypQM5iqNbPGpZUgtSeVb6BgfmE3hnTFXGRTY0mKKu7y/8Msy3jb6ift1nLsW8M2cLiEPu5557DxIkTjS4TGRmJkJAQ5Obm6rxeWVmJ69evIyQkxOj6RUVFGDZsGHx8fPDDDz/A09NwUjK1Wg212jWfruUSuxi6R3UI8bG6nd5cHElBYrNnviVzHkhqFjGnuceamsc2Ns6Q3TbIvBQOjF3IFhYHL82aNUOzZqY7lcXHxyM/Px8ZGRmIjY0FAGzduhUajQZxcXEG1yssLMTQoUOhVqvx448/wtvbvCjeFUklP4OUiTHSiag22V+GdYMgO0QVqyb0wLasXHyx57zBZSJtDJLItYn2WBodHY1hw4ZhypQpSE9Px65du5CYmIhHHnkEYWHV/RMuXbqEqKgopKenA6gOXIYMGYKSkhKsWrUKhYWFyM7ORnZ2NqqqqsQqqmzJ/Z7pCPryHDhqBAkp08jOuvNlNXJQHxGtWxe+vR5exKgBGRwdjNdGdza6jKnZp4mMEbVO/csvv0RUVBQGDx6MESNGoF+/fvj444+1f6+oqEBWVhZKS0sBAJmZmdi7dy8OHTqEtm3bIjQ0VPvjSn1ZzCX2BGuA/NqlE8wYddG6WSMHlISUqpHa3ejvtjB0vfnqyWMUEWif89hLZk2rD3RvYfTvXVrYPgCApE/UszYgIABr165FUVERCgoKsHr1ajRufLuqMCIiAoIgYODAgQCAgQMHQhAEvT8RERFiFlWWpDY9gBSYkwpbxdZ2spGne/U5FBPuj3900T/SzZ7SZyVoO90+dmsSRlPZq1eO72H07zXqjtST6vURdWuS1al3Gk/Gd4+BkYekLA6u7ySyv+b+DZxdBHIhKqhwYM4QFN+sRIiZI2ts5e3pjh+e7oPjOUXofCu1gKnJGO/uGGzmtsV7ho1o2hBnr5XaZVs//asf/i4tR5AP+0ESJ2aUvfbB4nZ6s8czmL2qtw2Jax2g/b85T41Sm42b5KWh2h2N1R4OC1xqeHu6o0sLf6cllbOGj7duv5amNkwS6OnuZlbgIqf3h6zH4EXmXhoh7lDjRl62V875NfDEnpmDcWD23XYokR617lXm3LekWi1O8iBm4sP7u5k/KaS+TLs1nk0wf9LRul/2bUTsEzb3HttzOhEBDF5kr6Edggt91kzqiTbNGuG/k3vZZXshft5ootCp2YnspU/bQLOXjY9siueHtMdHj8VqX4u51Vn1oR7WZxoP8hWvRql2Yj2xMIWEa2CfF4W7z4InudoGdgjCwA7Wz5fiSLVHXfG+Ra5CpVJp50Sq8f3TfVFSXglfb2kOQ+bwaLIX1rwoXKyCk7S99UAX9IxowjlSiG5xd1NJNnABgJ4R4t+P2OfFNbDmReZMVZEq+Tp+uGc4Hu5pefW4kt8TIqk6s2AEAwuyG9a8EBGR6BwVuLDPi2tg8KJw9r6OpT57szn3RylXqxMRkWkMXkhRzAlehnUyPqs5EdnHG/d1hrenG14aEeWwfbJpyjWwzwu5HKnXHhE5yv/1binq9ju38MORecN4zZHdseaFLGLt0Gsikp4hHcWvhXR04MI+L66BNS8KZ+/LuFfrAHybcdHOWyUiR1o7JQ7HrhShfzvdpHjGsvYSSQlrXmSuZdOGzi6CpHRrqdy8NkT20qdNIB7v17pe/xAPBTTvsM+La2DwInOhfsZnVPZyt++FLPXbQnxkU2cXgUi2pH59E9Vg8KJwHUP9nF0Eh2rubzyYIyLDWos4KaM+/328FwIbq7F6Yg+7bZN9XlwDgxeFmdgnQud3JdSgWjIfipsCqr2JnKVlgGODlzvbN8O+WYMxKCrYofsl+WPwojC9Wgfo/K6EhxAfb/YrJ+f69Zn+zi6CqBKiq4OH8fGtHL5ve/dRYZ8X18BvBXIp80d3cnYRSIaiQ32dXQRRrRwfi6Iy6c5GTVQXa17IpTzW2/FPlkRSp1JJezZqS7DPi2tg8EImDezQzNlFIJKMDsE+Dt3f3R3ZH4SoLjYbkUnPJrRHatZVZxfDYv8Z1sHZRSAF+flf/fDX5UIMjg5y6H4bebk7dH9yxz4vroHBi8LUvWxbBtiexE7tIa8KuvSXBmPvmeucgJHsqlNzP3Rq7lqpB4ikisGLwvk1tL0du0lDL+3/vZwQyFj6IBXk641RMWHiFIbIwViTYBn2eXEN8nqkJpPC7VDTUleIn7f2/428PDC6q2MDAxXzfhIRUS0MXhSmbVBj7f/FeGAL8fPG+DqJ8IiIpII1Va6BwYsCvfVgF/ioPfDNP+Ptts3PJ/fCwvs7623z7x0ZoGcN+xHsPjc2ERHJGfu8KNDDPcLxYPcWdk2V37+d4eHSTw5oY7f96HPh+g1Rt09ERPLCmheFqZnS3pFz/AT5eJteiIiIyE4YvCjAuqm9tf/3cBf/I40KcWySLiJXxh4cpvVp09TZRSAHY7ORAvSObIq3HuyCNs0am17YDhp6eaBv26bYdfIaAPZJISLnaqTmV5mr4SeuEA/3CHfo/hp68dQhIiLnELWN4fr16xg3bhx8fX3h7++PyZMno7i42Kx1BUHA8OHDoVKpsGHDBjGLSTaqncRODAGNxN0+Eckbm9Zcj6jBy7hx43DkyBFs3rwZP//8M3bs2IGpU6eate7SpUs5Xl/Can8yYf4NRN2XrzdreYjIMDZcux7RvhWOHj2K5ORk7Nu3Dz169AAAvP/++xgxYgQWLVqEsDDDWVoPHjyIxYsXY//+/QgNDRWriERERCRDotW8pKWlwd/fXxu4AEBCQgLc3Nywd+9eg+uVlpbi0UcfxbJlyxASYnpivbKyMhQWFur8ENUW26oJACAysJGTS0JERPYgWvCSnZ2NoCDdqeM9PDwQEBCA7Oxsg+tNnz4dffr0wb333mvWfhYsWAA/Pz/tT3i4Yzuuuio5tegt/7/u+PfgdvjiiThnF4XIYs181M4uguQNaG84iSYpk8XBy4wZM6BSqYz+HDt2zKrC/Pjjj9i6dSuWLl1q9jozZ85EQUGB9ufChQtW7ZuUK8jHG0l3txe9bw6RGNhh3bTAxnyPXI3FfV6ee+45TJw40egykZGRCAkJQW5urs7rlZWVuH79usHmoK1bt+LUqVPw9/fXef2BBx5A//79kZqaWm8dtVoNtZpPJkRERK7C4uClWbNmaNbMdBVdfHw88vPzkZGRgdjYWADVwYlGo0FcnP7q+xkzZuCJJ57Qea1z58545513MGrUKEuLSkRERAokWp+X6OhoDBs2DFOmTEF6ejp27dqFxMREPPLII9qRRpcuXUJUVBTS09MBACEhIejUqZPODwC0bNkSrVu3FquoZAWVAzMrPN7v9mffLsgxWYSJSE5k1AmP7ELUPC9ffvkloqKiMHjwYIwYMQL9+vXDxx9/rP17RUUFsrKyUFpaKmYxSOYimt4eJeTuwAkniYhImkTN/hUQEIC1a9ca/HtERAQEwXh6IVN/J+VrpHbX/r99MCeFJCJdNekQyHVwVmmyiiOHSndv2QQxLfwAAPPuucNxOyYiWeBoI9fDvOtkFUcGLyqVCv9L7Oe4HRKRrHAqGdfDmhciIgnj9zJRfQxeiIiISFYYvJBVHDlUmoiIqDYGL0RERCQrDF6IiIhIVhi8EBERkawweCHrsMsLERE5CYMXIiIJG94p1NlFIJIcBi9ERBL16cSeCA9o6OxiEEkOgxeyCluNiMTn19DT2UUgkiQGL0REEsWHBCL9GLwQEZFiNPdv4OwikAMweCGr9OAU9EQkQf5sanMJnFWarPJ/vVvBy8MdvSMDnF0UIiJyMQxeyCoe7m54NK6ls4tBREQuiM1GREQS1S7Yx9lFkB12cnYNDF6IiCSqsZqV40T6MHghIiIiWWHwQkREihHqx6HSroB1kkREJHvfPhmPayXlaNmU0ym4AgYvREQkez0imLbBlbDZiIiIiGSFwQsRERHJCoMXIiIikhUGL0RERCQrDF6IiIhIVhi8EBERkawweCEikhAfTglAZBKvEiIiCcmcczeeXX8QIzqFOrsoRJIlWs3L9evXMW7cOPj6+sLf3x+TJ09GcXGxyfXS0tIwaNAgNGrUCL6+vrjzzjtx48YNsYpJRCQpnu5uWPZod4zswuCFyBDRgpdx48bhyJEj2Lx5M37++Wfs2LEDU6dONbpOWloahg0bhiFDhiA9PR379u1DYmIi3NzYukVERETVVIIgCPbe6NGjR9GxY0fs27cPPXr0AAAkJydjxIgRuHjxIsLCwvSu17t3b9x9992YP3++1fsuLCyEn58fCgoK4Ovra/V2iIiIyHEs+f4WpUojLS0N/v7+2sAFABISEuDm5oa9e/fqXSc3Nxd79+5FUFAQ+vTpg+DgYAwYMAA7d+4Uo4hEREQkU6IEL9nZ2QgKCtJ5zcPDAwEBAcjOzta7zunTpwEAr7zyCqZMmYLk5GR0794dgwcPxokTJwzuq6ysDIWFhTo/REREpFwWBS8zZsyASqUy+nPs2DGrCqLRaAAA//znPzFp0iR069YN77zzDjp06IDVq1cbXG/BggXw8/PT/oSHh1u1fyIiIpIHi4ZKP/fcc5g4caLRZSIjIxESEoLc3Fyd1ysrK3H9+nWEhIToXS80tLpnfceOHXVej46Oxvnz5w3ub+bMmUhKStL+XlhYyACGiIhIwSwKXpo1a4ZmzZqZXC4+Ph75+fnIyMhAbGwsAGDr1q3QaDSIi4vTu05ERATCwsKQlZWl8/rx48cxfPhwg/tSq9VQq9UWHAURERHJmSh9XqKjozFs2DBMmTIF6enp2LVrFxITE/HII49oRxpdunQJUVFRSE9PBwCoVCq88MILeO+99/Dtt9/i5MmTmD17No4dO4bJkyeLUUwiIiKSIdEy7H755ZdITEzE4MGD4ebmhgceeADvvfee9u8VFRXIyspCaWmp9rVnn30WN2/exPTp03H9+nXExMRg8+bNaNOmjVjFJCIiIpkRJc+LMzHPCxERkfw4Pc8LERERkVgYvBAREZGsMHghIiIiWRGtw66z1HThYaZdIiIi+aj53janK67igpeioiIAYKI6IiIiGSoqKoKfn5/RZRQ32kij0eDy5cvw8fGBSqWy67ZrsvdeuHBBcSOZeGzyxGOTJyUfG6Ds4+OxiUcQBBQVFSEsLAxubsZ7tSiu5sXNzQ0tWrQQdR++vr6KO2lr8NjkiccmT0o+NkDZx8djE4epGpca7LBLREREssLghYiIiGSFwYsF1Go15s6dq8iJIHls8sRjkyclHxug7OPjsUmD4jrsEhERkbKx5oWIiIhkhcELERERyQqDFyIiIpIVBi9EREQkKwxe6li2bBkiIiLg7e2NuLg4pKenG13+m2++QVRUFLy9vdG5c2f88ssvDiqp+RYsWICePXvCx8cHQUFBGD16NLKysoyus2bNGqhUKp0fb29vB5XYfK+88kq9ckZFRRldRw6fGQBERETUOzaVSoVp06bpXV7qn9mOHTswatQohIWFQaVSYcOGDTp/FwQBc+bMQWhoKBo0aICEhAScOHHC5HYtvWbFYOzYKioq8OKLL6Jz585o1KgRwsLCMH78eFy+fNnoNq05t8Vg6nObOHFivXIOGzbM5Hal/rkB0Hv9qVQqvP322wa3KZXPzZz7/s2bNzFt2jQ0bdoUjRs3xgMPPICcnByj27X2OrU3Bi+1rF+/HklJSZg7dy4yMzMRExODoUOHIjc3V+/yu3fvxtixYzF58mQcOHAAo0ePxujRo3H48GEHl9y47du3Y9q0adizZw82b96MiooKDBkyBCUlJUbX8/X1xZUrV7Q/586dc1CJLXPHHXfolHPnzp0Gl5XLZwYA+/bt0zmuzZs3AwAeeughg+tI+TMrKSlBTEwMli1bpvfvb731Ft577z2sWLECe/fuRaNGjTB06FDcvHnT4DYtvWbFYuzYSktLkZmZidmzZyMzMxPff/89srKycM8995jcriXntlhMfW4AMGzYMJ1yfvXVV0a3KYfPDYDOMV25cgWrV6+GSqXCAw88YHS7UvjczLnvT58+HT/99BO++eYbbN++HZcvX8b9999vdLvWXKeiEEirV69ewrRp07S/V1VVCWFhYcKCBQv0Lv/www8LI0eO1HktLi5O+Oc//ylqOW2Vm5srABC2b99ucJlPP/1U8PPzc1yhrDR37lwhJibG7OXl+pkJgiA888wzQps2bQSNRqP373L5zARBEAAIP/zwg/Z3jUYjhISECG+//bb2tfz8fEGtVgtfffWVwe1Yes06Qt1j0yc9PV0AIJw7d87gMpae246g79gmTJgg3HvvvRZtR66f27333isMGjTI6DJS/NwEof59Pz8/X/D09BS++eYb7TJHjx4VAAhpaWl6t2HtdSoG1rzcUl5ejoyMDCQkJGhfc3NzQ0JCAtLS0vSuk5aWprM8AAwdOtTg8lJRUFAAAAgICDC6XHFxMVq1aoXw8HDce++9OHLkiCOKZ7ETJ04gLCwMkZGRGDduHM6fP29wWbl+ZuXl5fjiiy/w+OOPG51wVC6fWV1nzpxBdna2zmfj5+eHuLg4g5+NNdesVBQUFEClUsHf39/ocpac286UmpqKoKAgdOjQAU899RSuXbtmcFm5fm45OTnYuHEjJk+ebHJZKX5ude/7GRkZqKio0PkcoqKi0LJlS4OfgzXXqVgYvNySl5eHqqoqBAcH67weHByM7OxsvetkZ2dbtLwUaDQaPPvss+jbty86depkcLkOHTpg9erV+N///ocvvvgCGo0Gffr0wcWLFx1YWtPi4uKwZs0aJCcnY/ny5Thz5gz69++PoqIivcvL8TMDgA0bNiA/Px8TJ040uIxcPjN9at5/Sz4ba65ZKbh58yZefPFFjB071ujkd5ae284ybNgw/Pe//0VKSgrefPNNbN++HcOHD0dVVZXe5eX6uX322Wfw8fEx2awixc9N330/OzsbXl5e9QJoU995NcuYu45YFDerNBk3bdo0HD582GQbbHx8POLj47W/9+nTB9HR0fjoo48wf/58sYtptuHDh2v/36VLF8TFxaFVq1b4+uuvzXpCkotVq1Zh+PDhCAsLM7iMXD4zV1ZRUYGHH34YgiBg+fLlRpeVy7n9yCOPaP/fuXNndOnSBW3atEFqaioGDx7sxJLZ1+rVqzFu3DiTneCl+LmZe9+XE9a83BIYGAh3d/d6Pa1zcnIQEhKid52QkBCLlne2xMRE/Pzzz9i2bRtatGhh0bqenp7o1q0bTp48KVLp7MPf3x/t27c3WE65fWYAcO7cOWzZsgVPPPGERevJ5TMDoH3/LflsrLlmnakmcDl37hw2b95stNZFH1PntlRERkYiMDDQYDnl9rkBwO+//46srCyLr0HA+Z+boft+SEgIysvLkZ+fr7O8qe+8mmXMXUcsDF5u8fLyQmxsLFJSUrSvaTQapKSk6DzN1hYfH6+zPABs3rzZ4PLOIggCEhMT8cMPP2Dr1q1o3bq1xduoqqrCoUOHEBoaKkIJ7ae4uBinTp0yWE65fGa1ffrppwgKCsLIkSMtWk8unxkAtG7dGiEhITqfTWFhIfbu3Wvws7HmmnWWmsDlxIkT2LJlC5o2bWrxNkyd21Jx8eJFXLt2zWA55fS51Vi1ahViY2MRExNj8brO+txM3fdjY2Ph6emp8zlkZWXh/PnzBj8Ha65T0Ti0e7DErVu3TlCr1cKaNWuEv/76S5g6darg7+8vZGdnC4IgCI899pgwY8YM7fK7du0SPDw8hEWLFglHjx4V5s6dK3h6egqHDh1y1iHo9dRTTwl+fn5CamqqcOXKFe1PaWmpdpm6xzZv3jxh06ZNwqlTp4SMjAzhkUceEby9vYUjR4444xAMeu6554TU1FThzJkzwq5du4SEhAQhMDBQyM3NFQRBvp9ZjaqqKqFly5bCiy++WO9vcvvMioqKhAMHDggHDhwQAAhLliwRDhw4oB1xs3DhQsHf31/43//+J/z555/CvffeK7Ru3Vq4ceOGdhuDBg0S3n//fe3vpq5ZKRxbeXm5cM899wgtWrQQDh48qHMNlpWVGTw2U+e2FI6tqKhIeP7554W0tDThzJkzwpYtW4Tu3bsL7dq1E27evGnw2OTwudUoKCgQGjZsKCxfvlzvNqT6uZlz33/yySeFli1bClu3bhX2798vxMfHC/Hx8Trb6dChg/D9999rfzfnOnUEBi91vP/++0LLli0FLy8voVevXsKePXu0fxswYIAwYcIEneW//vproX379oKXl5dwxx13CBs3bnRwiU0DoPfn008/1S5T99ieffZZ7fsQHBwsjBgxQsjMzHR84U0YM2aMEBoaKnh5eQnNmzcXxowZI5w8eVL7d7l+ZjU2bdokABCysrLq/U1un9m2bdv0noc1x6DRaITZs2cLwcHBglqtFgYPHlzvuFu1aiXMnTtX5zVj16yjGDu2M2fOGLwGt23bpt1G3WMzdW5L4dhKS0uFIUOGCM2aNRM8PT2FVq1aCVOmTKkXhMjxc6vx0UcfCQ0aNBDy8/P1bkOqn5s59/0bN24ITz/9tNCkSROhYcOGwn333SdcuXKl3nZqr2POdeoIqluFIyIiIpIF9nkhIiIiWWHwQkRERLLC4IWIiIhkhcELERERyQqDFyIiIpIVBi9EREQkKwxeiIiISFYYvBAREZGsMHghIiIiWWHwQkRERLLC4IWIiIhkhcELERERycr/A6cKg4v2KV3HAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "time = torch.linspace(0, signal.shape[0]/fs, steps=signal.shape[0])\n", + "plt.plot(time, signal)\n", + "plt.plot(time, upsampled_boundaries.squeeze())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "l3IxNZ5nlxcR" + }, + "source": [ + "For more details, one can also upsample and visualize VAD scores:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "background_save": true + }, + "id": "nPIgz2ZvlykV" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAGdCAYAAADaPpOnAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAACUPElEQVR4nO2dd3wU5dbHf7Mlu+khpIdACDUIhB4DKAiRKnZF5CooYgOvitdXsGHHguVasSFeK/YGBinSA6FLDTUQIJWQXjbZnfePzUx2s313Zmdn93w/n8Ds7DPznNmZZ+bMeU5hWJZlQRAEQRAEIRMUUgtAEARBEAThCqS8EARBEAQhK0h5IQiCIAhCVpDyQhAEQRCErCDlhSAIgiAIWUHKC0EQBEEQsoKUF4IgCIIgZAUpLwRBEARByAqV1AIIjcFgwPnz5xEeHg6GYaQWhyAIgiAIJ2BZFjU1NUhKSoJCYd+24nfKy/nz55GSkiK1GARBEARBuEFhYSE6depkt43fKS/h4eEAjAcfEREhsTQEQRAEQThDdXU1UlJS+Oe4PfxOeeGmiiIiIkh5IQiCIAiZ4YzLBznsEgRBEAQhK0h5IQiCIAhCVpDyQhAEQRCErCDlhSAIgiAIWUHKC0EQBEEQsoKUF4IgCIIgZAUpLwRBEARByApSXgiCIAiCkBWkvBAEQRAEIStEVV42btyIKVOmICkpCQzD4JdffnG4zfr16zFo0CBoNBp0794dy5YtE1NEgiAIgiBkhqjKS11dHTIyMvDee+851f7UqVOYPHkyrrjiCuzduxcPPfQQ7rrrLqxatUpMMQmCIAiCkBGi1jaaOHEiJk6c6HT7JUuWoGvXrnj99dcBAOnp6di8eTPefPNNjB8/XiwxCYIgCIKQET5VmDE3NxfZ2dlm68aPH4+HHnrI5jZNTU1oamriP1dXV4slnu/RWA1s+wBouOi4LcMA6VOALsOd23fdBWD7EqCpxjMZfZ30q4DUkVJLQfgSLU3AtveBmhLXtovtBQy5w/X+aopxIfdtnGwoQ6VBh0boEa/QYqA6GmrGtnGcZVlUsc1oYg3QsXqUG5pQx7YgiFEgiFEihFEiTRmGCwYdqlgdeijDnSp4BwBQKIH+U4HE/q4fT20ZkPch0FTr+rZC0eNKoPtY6fonRMenlJfi4mLEx8ebrYuPj0d1dTUaGhoQHBxssc2iRYvw7LPPektE3+LgTzi55VWsCA1FtVKBGoUCyc0tSG5pQaTBgKyGRoSwbFv7Y38BD+xybt97/gdsfFUcuX2JoznAg3ulloLwJY6vAdY8Y7ZqQ7AWn0RFQs2yOK9SoUHB4IPiUvTRNZtv2/VyoGM3l7rL+ftJPHpxu8V6Jcuit06HrIZGDGtoxKWNTeBUj5NqFa7plORSPwAQqdcjq6ERO7Ra9GzWYVZlNTIbm9AMgAUQZNq4eD8w4zeX+8DOT4GNr7m0SRMD1DAKhLAsglkWTqpYttn/HfB/Jz3dC+HD+JTy4g4LFizAvHnz+M/V1dVISUmRUCLvwTbVOryBbU+6DiH1F4A9XwK6Oud3zrVNHCD4G8xh3UXMr9iOIZpYPNVhsKD7dpraUmDPF679JkRgwF0TUZ2Bfjfhy5pjeKVqr0WzqcmJSFdH4XJtIuYc3gymuc6t68lUcRkQ1BFqRoEDugo0QI+DGg0OajT4JCoSAPBdXDZCFWpcU/wnv40KDNSMAhGKIEQqgtDCGqBjDTirt5SlSqlETlgoACBXFYzc4GD8Fj8BV5fkAAC+jRuLxspCfF2xBw2Gc3iXNUBhYv3ZWbwTJ6tO4roe10GtUFs/IF2rxSV5MJA2GgBQqW/Cr/WnsbTmCEIZFWKVwegbFI1GtgUHdBdxtLkSLTC+aMUpg3GFNglDNbG4XJsINaNApUEHBkC0QgMA0IOFyppVqrEa2PExjesAwKeUl4SEBJSUmJtqS0pKEBERYdXqAgAajQYajcYb4vkch5rK+eWbe96MupY6lNWXobyhHCerjG8dmed/xnOX3I3r9rjZSUomMPZpp5oW1RZhy/ktmJw2GcEq4/nS6XV4ZuszuLzT5RiXOg4bz27EA+seAACcbKnB/ZOXomNwRzeFcw6WZXG+7jwSQhKgVCiNK4sPGJUXgrBFdDds7DUar6z9nl91b8a9YMDgg30fAAAON1ficHMlukRGYkq56w/MFkMLv/x6xxEYd9USAECzvhmHKg5hR/EO7CzeiS3ntwAAbi5dY7b9U5c+hZt73Wx130W1Rfj1xK+o0dVgUtok3PLHLVbb3VT+N7+8Jr4rPildC4SFAtAh438ZWHPjGuQV5+HxzY/z7f46/Rc+GfeJ/YPrMhzNoxdg1l+zsKe07QZUgSYU6uuwW1dudbNSfQOW153A8roTAACVQmX2O3EEq4KRc0MOorXRbSsrzxiVF8Lv8SnlJSsrCytXrjRbt3r1amRlZUkkkW9TpW/kl5/Kesrsuxl/zsDu0t0AgNfyv8R1Ava77MAy5Bbl4o3RbyBUHcqvH/fjOADAylMr8dGVH6GxpRFZ3xjP3e8nf8ejGx+12Nfo70Zj9227bb/FeUhjSyOGfjXUbN1zw5/DdeE9ROmP8B/Oss2Ys3YO//nXa35FWlQaAIBhGLy/933+uxUaBaa40cfBCwf55bHByfyyWqlGRmwGMmIzcFe/u/Dj0R+xPH85Dlcc5ts8MPABm4oLACSGJeLejHv5z/tu3wc9q0eNrgZqhRrDvzH6vzXp23wGP9lvqZBk/5BtsW570XYY2lll2rO28Twe+nKQ+b46Z+Omnjfh6MWj2HhuI9QKNYIUQbir/13oF9MPNboabC/aju1F27Hi1ArUNddZVVwAoKGlAfetuQ/Lr1puUwbCfxFVeamtrcXx48f5z6dOncLevXsRHR2Nzp07Y8GCBTh37hz+97//AQDuvfdevPvuu/i///s/3HnnnVi3bh2+++47rFixQkwxZcu+RqOVqp8i1OK7ZROW4cdjP+LZ3GdR01Lvdh8sy+Kd3W/jmyPfYGDcQLww8gW8vssYDfbM1mdwd/+7UVRXhDB1GL/NjuIdGPjFQKf7GPTFIGy/dTtC1CFuy2mLu1ffbbHu6a1P43y36zHHSnuC4HgGZfzyT1f/xCsuAHBfxn24sceNeHvP2/jl+C/IV7nnpbH1/FZ+WWlHEbih5w24oecNWF+4Hh/u+xBju4zFXf3ucqkvBaOAglHwloruUd1xvPK41bbRej2uRBiWKxvM1j88+GG8uetNAMCxi8fQK7qXdXmTEnD04g7+8zXdrsFzI57jlZ3hycMxs+9Mi+0iNZEYlzoO41LHYX7mfBwsP4iIoAikRqZif/l+vLXrLYQHheNU1SkUVBfg0IVDYFnWeUdkwm8QNc/Lzp07MXDgQAwcaHyQzZs3DwMHDsTTTxunIYqKinDmzBm+fdeuXbFixQqsXr0aGRkZeP311/HJJ59QmLQNNIxxCuQia/lmwjAMsju3vTFdVLh3qn9sPIuP93+M2uZabDq3CaOWj+K/yynIwfW/XY85a+dgRs4Mm/swM+sCmN1vNt4a/RaGJ7VFPj2bK7zTdY2uhjdXR2micFuf2/jvlpz4CfV0wyPssB3GB/fI5JHo0cHSUhcbEotLEy8FALAW3zrH6erTAID0Jp1T7UenjMY3V33jsuJijUldJ9n87s7KajypD0d6dDq/LrtzNu7seyf/eVWB9fxbb1YfxFGN0fU3MTQRP1/9M14Y+YJdK4011Ao1BsQNQFpUGhSMAhmxGfhswmd4e8zb+HLSl3y7soYyO3sh/BVRLS+jR48Gy9oe1tay544ePRp79rjroBFY6FgDACBTGWn1+yhtFL+8S6uBpfHXPjUMg9VNxS5t0yWiC39DBoDfr/0dqZGpKG8ox7dHvkVWUhYGxxuddMd2GYux349FaX0pNp7d6KJ0bbAsi2OVx5Aclmw2jbV452J++c/r/0RYUBgmdZ2EaSumAQAKVSpYf28kAp1ipZJffmTwIzbbpXc0PtwvKBgY4Prb4IWGCwCADJN0D95iZt+ZeHvP2wCMlhEA+PXEr0gPScKtp84A4cB3U77DmeozUClUSAozBgcMiR+CnSU7kVOQg38P+rfZPlsMLVha12bN+evGv0SRPVLTds87XX0acSFxovRD+C4+5fNCuEYzqwcABNmxIISoQlDfUo88rRbZzTabWXC4qQI3p6YAzUanuk5hnXC29qzVttHaaFQ0VgAAHs98HC/nvYxTVadwdberkRqZCgCICY7B3IFzLbZ9JusZ3L/2fpvz2s7w4vYXsTzfOO89JmUM/jvmvwCAn479BABIi0xDWJBxWqtvTF9ewTqsCUIvd1+ZCb/m1Y4d+OXuHbrbbJcc1uanUqxSwtXg5W1F24x9tA+59gJqhRpbpm1BSV0Jukd1B8MweGHkC0D+n8DBNufezhGdzba7IuUK7CzZicKaQsxbPw8l9SVoaGlAp7BO/HgHgJWxrr4uuUZaZBpOVp1EQ0uD48aE30HKi4zZUX8eABBk531vSMIQbDy7EbnBWkDn/JP6kQu5Zp8fGvwQHt/0OHQGo3l7wbAFMLAG/KvPvwAAhlYrkIJR4NvJ3+Jk1Un06djHYT/cm2ujvhENLQ18lBIAfHX4KwDArb1vxaMbH0VlUyU+GPsB1Eo1cgpysPzIckxOm8wrLgCwrnAdDpQfQJeILvy6x4Y9ZtZnjc6YeO+XsFBcW0PaC9EOlsXqUKP/1YikEXabapQaBKuC0dDSgMNBQUhycwKpU4v7yrsnRARFICIowsa31o9lUtokvLbTmMdl9enV/PpjF48BhcblcL0BKSpLXzwh4eTeV7YPl3e63PxLOxZ/wj8g5UXGRCu1AIAqKz4vHCOSRmDj2Y0oCFIDcG5eHQAK9ebZMaO10Vg/dT2+OPQFhicNx4C4AWbfm85nh6hD0Demr1P9mPrDnKw6iUs6XgLAaE5/Oe9lAEDu+VxsOLsBgDE6IyM2A49uMEYu7SzZabHPpQeWmj10shLNo9UyEzLxZ8GfqFEoAOidkpMIHAoaL/DLzwx/xmF7lcJ4G92j1cCVjEh1zW2h1T11OmMWbBkQExyDD7M/xP7y/YjQRCA8KBwLNi0wa7PgQgXQSdzjqW023qO0rfdBI/L4DQnPEdVhlxCXxtZpoyEqW29OwLjUcfzySaXNZg7pFtUN4UHhuH/A/RaKiycoGAUSQxMBAAfL28JGq3RV/DKnuADAL8d/weZzmy320zm8M8Z2Nj46Vp9ejWdynwEAxAXHWUQiXNvjWgDgnQoJwpR99W3TowmhCQ7bd4s0ZtQ9GuRauP/Ri0f55Ri9waVtxcWxAjA8eTjuybgH03pPw1VpV+G7q74z+/6qOvcjHJ1lWMIwAG2WVCKwIOVFxmypN9potbCtlcQEx/DLv2md0170BktrRHhQuIvSOQ/nj8LN/wPAicoTVtv+eOxH3L/2frN1GbEZ+H7K91ZzXswbMs9iXfeoNh+GOnpRI9qxr+4cACAdziW/HJ5sjJrbrtU6aGnO2RqjkqQGI3t7QXrHdCzJXoLnhj+HrXETvXI8XG4o0/sGETjQtJGMiVOGoFRfD62DEMRBHXpj98Uj2K127pbC+bUAQA9lOMb1u120JHIAkB6djmMXj2FHcVteCGcceKf1nobHM9uyfnJvYqZMTptssc40MuEPrRJTXRWY8GsMrb4eUU6+2w2MM6aCMDAMdIZmOGvP21e2DwDQT2U9WlBujEhunao9aTmVKwbcVHV8SLyDloQ/QpYXGdMMo6k5mbH/hjiio7Ey7B61c6e7pK6tRMPy6BFmWTrFYGSysapzZVMlH1qv0zv2z3lg4ANmn1UKFf6+uS3VOWfOt8dqjQdzaYTf0djSiB8vGFM1jGCcS5o4NL4tg3N+zRk7Lc2Rv71FWnpG9wRgdPYnAg9SXmSMzolQaQAYFu046seUepOMvGoXE0u5A5foCwAfcm1q/QGMvisvjHgB/WP7Y9mEZdh8y2arU1kxwTHIuSEH9/S/B+9nv2/xPceMVKNFZnsQKS9EG/esvodfDnby9qhUKJGoNyrdO03S9zviQqtj8MigWBckJDg0SuNLG00bBSakvMiYOoMxN4TawWnsGd6Wp6G4znHSOW7KJrnZO+GbHbRtOTW4Wi+7SnaZN2KAa7pfg68mfYXB8YPNklS1JzksGXMHzuWTalkjOz6TX67V1dpsRwQOBtbA1wMDgP5w3ocltDUyN6/ikNPbcNmfHb18SIaPhxuHqIyWMVO/vjZ8W3bCc0h5kSlVTW3ROKGMfetBiKrtJpxfke9w382cUuTFmxeX7Gtv6V4AliUFOIuMUGREtaV7P3DhgKD7JuRJYU2h2eeeTnuvAIOajWOlwoXIl9hgo8UlghHPn8yfSQwzRik6M8VM+B+kvMiU+ua2qR3HNz+GV0ScMbFylhcVWHgrbwIXLv1P2T8ALCOe2udq8RTGZDqstL5U0H0T8sQ0BH/z6UKXavH0bb1cCxtK7Dc0gQvx7aTkEjP6iAVGKEuQyBalIIVRueRetrzRJ+E7kPIiU1paE9MFG5zLDzGswejUZi1HSnu4/BPevA10jewKANCojPPY7aONuERgQjK83phWnJQXAgB+PvYzACBOHYZIg2tWxy6tPi+NLlgBuHIbGroNuwUXAUnlAQITGjUyhbNMqJy8x45pfVAXVBc4bMspCheU3nNmHZpgjNg4ftFY1E3PmltexAjV1rZao2zllCECi1NVpwAAoyN6urxtbOs7RDPb4tQ0hqlynqBwLT8MYcTU741eQAIPUl5kCl9LyEnHtDEmGS8d+Y9wN9+sBu+FIHLKyfk6Y70mM1MwYJElVwhCW9+uOcc/InAxsAY+wu26jhkub9/JxABq6o9mC1MFJ1zEHEr+jNbEl4/8XgIPUl5kCjdt5KxtJMZkeunwBfvhnMcrjdYPjRcddlPCUwC0vU2ZJqwDgCZ9k+B9pjUbFaT2YdlE4HGx8SK/3FMbZ6eldRgAIa1jzJlpDNOHrb3CqoR9wtTG7NztLbWE/0OjRqa0TRs5r2B0bTHeXPeX77fbrrHFaHExFi70Dpz1o1lvVChiQ8xzX/SP6S94n5wTc3srDxF4mE6nBrnpX8W9HnDTT/YobWib5qBMQ+7DOVWT8hJ4kPIiU7g3RVdufJGtd1fTAojW4Hxe0nTee6irlUbTuS1FgptOEpKgVuXl7zN/O2hJ+Dvna43XV1Joktv5TRpblX1uStce1U3VbvXhXdy0vHrRYsvdqyzqsfl4jhrCc0h5kSnVzcabX5FK5XR4YNfWyrWO0mlzCkS03ntvM5zPS7OhGQbWAEO7KKou4V2E7ZBhYGj92VIjU4XdNyE7iuqKAABBSvcrjQ9qNI4rZ6wA3LRRrw692lb6XZiv+MfDWV7aFEZ/+w0JW1BhRrnS+mLRv7EJiHDQtvWmmKkz4Odgx7leuKkblcm2YhOhaTuI87Xn+QfAvwf+GwbWgJt63SR4n11bMwhbq6JNBBacn0qXCBMl2aVrn4GydUxy/mj24MOklRofTAYrHwVA2Zqg05nfnPAvSHmRKdxgDXHCRM3RXd92lyysKeSdZNvDpUj3ZoZdtUINBgxYsEbLS+txXdLxEgxPHi5Kn8rW46P5ciKvOA8AbI4JZ1C2aiHOVESv1hktp2UNZYCWqiK7C6e8yGMajhASmjaSKdzDXemCftGrpa3xh/s+tNmOc55t8fILWFiQMXKghW3hFQqFiE7DnL+QMw8bwr+J1hjLUXiSDJHLueSMJY+rnn5Jx0vc7o9o84XjilwSgQMpLzKFu0G6GqkwPMloxfj1xK822ygVxr1203n3oa5ijA8Og6HN8qJ0ULfJo/5aHyDOOFgS/g1XCLR3dG+398FdT1yqAXtwof/WiwoSztKjg7FGmZj3CcI3IeVFpvCWCRendm7q2eY7YusNkbNEeDPPC2Ae9sgn4XOhvoyrcFYrmjYiuCSInOLuDmfURqdzZx6kW89vBcA5CPuc04ts6KjtCIBeQAIRUl5kinuWFxajU0bzn1afXm21FRdtpPLyTZW76RtYA593Q8w3Ks5H4VztOdH6IORFj6gecFeZGNoabeRMNui4EGMiPJ+uy+P2y4v37huW0Ubel4GQBlJeZEqVzpiC3Oh06rxziumcfk5BjtU25Q3lxrbetrwoLBNOiVEWoHXPiDIJx6aIo8BFb9DzDrSelIoIcmEakps26h9rmnxRPlE+TuGFSEUL5cXvws0JW5DyIlNOV58G0JYYyz7mA/qGHjcAaDNdm2Kahj/YRcXIUzgri57Vg2ntNyk0SbT+wkyUFzI7By4l9SX8ckyIqQ+KC9c+0xYq7cy1tLvEGNGnUWqc78NbyEgBsG15IfwdUl5kSkSQMS+KO34pYzqPAWA0WbPttq9vbivgGN/iXWsEdyM6X3sebKvZ1xMfBEeYRmqR30vgwlldwtXhHlUv5x75zjxIuetaPMtiYEDKS+BCyotM4R62Xd1I4T8obhC/zCXL4uD8XZRgvF5zpbCmEIB50TqFiJeo6Z7p5he4HLt4DIDnigTnQ+WU8tJqZewW2c2jPgMd7v5ALx+BBykvMoUvzOiGYxqXTwWwrOvDKQ5BIkb52GJk8kijDCZVnsXM86Iw+e3o5he47CndAwDooO3g0X5csbxUNlUCALQqrUd9BjqcBau9BZnwf0h5kSncw1bl5pjlfEl2FO8wW88le1JJcGlwJnuvWV5MfjuyvAQu3x/9HgCQGpHq0X6cDb2vaqril8PV4R71GehwvnH08hF4kPIiU7hcLEo3QwI5K0f7aSPuxlrDeq+iNAcXCWXqNCxqnheTZbr5BSY1uhp+eWqvqR7ti7PkObICcFYXAIjSRlEFZA/gpt9YCo0OOEh5kSmnqk8BcK08gOlN8tKkSwFYZgPlijL2V0d7JqAbcJaX/Ip8fp2YDrvk80KcqDzBL3MKvbvKBHc9OVKEz1SfAQDEBce51Y/3cFMh8KIyxvkp7S3dK5kMhDR4RXl57733kJqaCq1Wi8zMTOTl5dlt/9Zbb6FXr14IDg5GSkoKHn74YTS2JoAijHCFyJoYxq3QxoFxA/llLmwTaPM30UqQbruisQKAea0h0aaNuIyqVCIgoDl68SgAoHtUd88ddlufl5wDsC1qm2sBAKUNpeZf+F3kkfjHU1xXDMC0zIK//YaELURXXpYvX4558+Zh4cKF2L17NzIyMjB+/HiUlpZabf/1119j/vz5WLhwIQ4fPoxPP/0Uy5cvx+OPPy62qLIiTG10uk1ucaL+kJWbomlNldl/zeaXD1ccBgCouW28eEMdEDcAgLlZXcxpI6DtVsc9UIjAgpuiLKkrsfzSpWufQbWTzuWcZXF0p9Eu7N+byEcBGBw/GAC9fAQioisvb7zxBmbPno077rgDffr0wZIlSxASEoKlS5dabb9161aMGDECt956K1JTUzFu3DhMmzbNobUm0OBCmjsY3B+0r17+KgCjtWX08tHQG/QIUgQBAM611NvbVBQigyIBAHnFbedabOWlpfUBVdlYKWo/hG/CKS9XdL7C4311bTaOyWBVsN129a1jq6a5xm47wjGm9dCIwELUJ4NOp8OuXbuQnZ3d1qFCgezsbOTm5lrdZvjw4di1axevrJw8eRIrV67EpEmTrLZvampCdXW12V8gwCkvag/mdid2ncgvX2i8gFd3vMpPG12uTfBMQDeI1BiVF666NCB+Eq9OLUbljxz+AhPOYdeRwuEMwdwUJOy/UJypMfq89I/pb7cd4RjeYZd8XAIOUZWX8vJy6PV6xMfHm62Pj49HcXGx1W1uvfVWPPfccxg5ciTUajW6deuG0aNH25w2WrRoESIjI/m/lJQUwY/DFzlZdRKAZ8oLAHwx8Qt+eVXBKt5hV4o8LynhxnPXwjoxFSYQKicjRAj/hMvxIoSFj6vw7qhO1tkaY4RfhCbC4z4DHbK8BC4+F220fv16vPTSS3j//fexe/du/PTTT1ixYgWef/55q+0XLFiAqqoq/q+wsNDLEnsf0wdtqMGzh+6AuAH4MPtDAEbry7aibQCkUV7a38w7hXUSvU/OrkOWl8DE1PfLU7gR4+ha4uqSdQrnrm+69tyFygMELirHTdwnJiYGSqUSJSXmznAlJSVISLA+LfHUU0/htttuw1133QUA6NevH+rq6nD33XfjiSeesMi4qtFooNH4YHEzETGNxonXe26lGBg/0GJdjcH7eV7aKyti+7sAbYnqyPISmHCWxl4denm8L+5qtfcgNf0uLTLN4z4DHVJeAhdRnw5BQUEYPHgw1q5dy68zGAxYu3YtsrKyrG5TX19voaAolTSvaYppEjeNS5Wfrf9+wapgaJXGNOVc3pf+QdLkeWFMjkVc5YUx+dexnwLhn3BFGYOUQSZr3czz4kRV6aK6In65a0TXdt/6WJSP2/fb1u28EKnI+bzwvznTZksl/BtRLS8AMG/ePMyYMQNDhgzBsGHD8NZbb6Gurg533HEHAOD2229HcnIyFi1aBACYMmUK3njjDQwcOBCZmZk4fvw4nnrqKUyZMoVXYgKdi40X+WW1QGO0vZ9JkAQzigzDmJnclV7INcPf6kgxDkh2lxpzHAlxrSmcKMxomgNGrXS/gjVhhHxeAhfRlZepU6eirKwMTz/9NIqLizFgwADk5OTwTrxnzpwxs7Q8+eSTYBgGTz75JM6dO4fY2FhMmTIFL774otiiyoaKpgp+2TkVw/Eb0E09b8I3R77hPwdJkKQOAHpH98aRiiMAxC3KyMH7KZDyEpAkhCaguK7YmKbfAhcsB4xzhRk5y6ZPTxn5mAHIHlwG7kMXDkksCeFtRFdeAGDu3LmYO3eu1e/Wr19v9lmlUmHhwoVYuHChFySTJ9w8fVd1pGD77Nmhp9nnYImUF1PzvZhFGTnIYTew4caSEI67zjjs7izeCQCID4m32YZwnlqdMbmk+bQfEQj4XLQR4Rg+x4uAPiHXdL/G7HMXVZhg+3YJk/u+Nxx2nXlbJvwXLq8RV1fLE/hQaTtTGNx1lhDq/TxK/khalNGCxfnsEYEDKS8yhMvQqRbQOqJWqPHlpC/5z+GMV4xyFjSbRDl5xeeFizYiy0tAwiWpE0R5af3f3hQkl4pgaMLQtpU0Zek2XEZwevkIPEh5kSHHLxrnzVsEHrAZsRl4aeRLeHfMu9BING3UP7Yt66hXQqVb/yefl8CDizQCgPCgcI/3x11L7Su1m8JNT9G0kTBwGbgpWjDwkOb1mvAIrcpoIuWVF6cjpR0/oKd0m2JcOLzGDck8x3Tuul7M+koMFyrdmmGXLC8BR31z2/XFlaYA4LYlJKS1zpitKQyWZVHWUAbAxrSRzznKujkm+N9P/APiXnDY9n3Sy4jfQ5YXGcJNrfTXxkosifA0tjR6tT/yeQlcuHEkRF0jAOioN15DtnxeSutL+eW4kDhB+gx0OKd+evkIPEh5kSHcTVfl7OnzJFmUFxJNmcLVNwKMYdNiQ9FGgQuXqdqmv4tL1z7D18kyzYBtSkWjMcWBRqnhrae+ic+ZgGzCTxvRy0fAQcqLDCmsNtZvEjLayFeIDWmzJpHPCyEmXKZqlUKY2XOlifO3tYcplxCPHrTCQeUBAhf/e/oFANwbXKMXqy97C42yrU6VUA8Ve5DlJXApqC4AADS0NAiyP6XJNWStsvTRi0cBAF0iugjSH2HN54UIFEh5kSGcU2uSVLlYRIQLfQS8lOfFiXo0hH/C1dFSCZQWQGXy/GxfbgNoS1CXEZshSH9E2zmkaKPAg5QXGXKw/CAAIFEVKmIv0rzJdApvqyztzdpG9sJbCf+E8x27JOYSQfanMnn7r26qtvj+TM0ZAEB6dHq7b8hq4C6czwtZXgIPUl5kSIPeaOZWyMixzllMnSe9obwUqIxDIFRURZDwRRw67LqIaYL6i00Xzb4znUbqF9tPkP4I8nkJZEh5kSFcVtDuQVGta5xO9CKGOIJiVttI1Gkj4282vMn4ACOfl8AjvyIfgDXlxf1rIUnbEQCg0+vM1nOZdQHLOmJt+NjLiNs/Q+uGXohU5KeN+JxXbV5shH9DyovMME2s1cEP63mY5twoqivyWr+kvAQenGVPyOssqNXJ3LTMBQD8cfIPftkbjuiBAu+wS+M34CDlRWZcaLzAL8c7PdXhyRuQd98GTTOddo7oLHp/3AAgs3PgwSWTuzTxUhstXLj2GXPn3/YK0dbzWwEAWYlZrgkpBV7O7eQJnPJyrvacxJIQ3oaUF5nB1TWK1kZLLIn4BCuFyXxqD4Ze2AIWbmpHqAy7AFDSZExjYOqwa2ANfHqDG3veKFhfBFWTDmRIeZEZJfUlAKxHM/gLs/rOQufwzriux3Wi98XPkFO0QsBx8IIxak+tFMZhFwAGR/UCYJ5l9/ODn/PLI5NHCtYXAUQHt73E0RgOLEh5kRlcoqvMpExxi49JeCN4aPBDWHH9CuvF6wSGahsFLpyfhJD1tDqqIwAAO4p38Ove3vM2AKOFJ0QdYkUQeui6C2MytUdj2HPKG8rNanD5MuQ5JjNOVJ4AAES03iQJz6Cq0oEL55/SvUN3wfZZ3VoJPSzImECySd/EW2EWjVwkWD+EEdOIRBrDntGsb8aUn6egSd+Ezbdstq5o+xBkeZEZXD2WLpEmKcaddbCjN7w2Wn8zKg8QuHBjyeJFwINxcmlrArrdJcY6Rv+U/cN/NzpltP2Nfc5R1s3fgf/9vBAqbfKbGaeNfO03lA+1zbWoba5Fs6EZ5Q3lUovjEFJeZAY3T2+ZpZNwB/J5CVzyL7bmeRHQ56V9unpumreDpgOUCvGTLgYaCpNHGJUIEI72eYp8EVJeZErH4I7ON/bkjc7n3gaFhSwvgQnLsvyUg2l4vhkuXfvGtr3CUgAAxXXFANqS06VFpbknqCTIZ8ybThuRz4tw6AykvBACYqoNp0akSieIH0GWl8Ck2dDMP+yEdAzvGNQ2BdXY0ogjFUcAAMlhyYL1QbRhOW1ECMGpqlNSi+AQUl5kxNnas/xyqJpq8QgBVZUOTEzn9IXMJ8SVBwCAyqZK3gIzJH6IYH0QbZDDrnCY/n5y+C1JeZERJXUl/LIxxbjvX2C+Dg2AwMRUeRHU54VhEKQw1ufaW7aXX287iy9A49h9zHxe6AVEMJr1zY4bSQzdu2UEZ4LuHiVcaGegQz4vgQkXaZQWKbwvCnctbT67mV+XGJYoeD8EzNxzSHkRjmOVx/D6ztd9OucLKS8ygqvfYVnYTT4Odr6H8UFDN77AgqsorVFqBN/3oLhBAIBfT/wKoC2fjGP8bBx7wdnf1PLCsqzfBxh4iy8OfYFlB5dh1qpZUotiE1JeZARn6nbf8kLWhfaQ5SWw4cptmOPBtcCyUCnNlZVe0b3c35+UuO0A672xZBZt1D5Umhx4PaagukBqEWxCyouM4LzpvVFtOVBQcPc3us8FFFyhRIeJ49ygT3Qfs89ZSTKoJC1TKNpIOGz9fr5qlSblRUasK1wHwJ2wS09Mqf5thqXaRoHJ/vL9AMxr41jiwrVv8hBt798yPGm4K6JJj8ymXjjrC1lPxaG2uVZqEaxCyouMSApNAkBh0kJC00aBSZQmCgCgVggXacTRs0NPs88DYgcI3gdhycXGi1KL4JccvnBYahGsQsqLjCiqKwIAdA6naSOh4JSXuuY6SeUgvAuX8LFHhx6C77tXB3MfFyFDsQlLOKtpfWtRTEJYNp3dJLUIViHlRSbo9DreOhCsak2qJaaxIEDmj5tbtZdDFw5JKwjhVbgXATEsL1qVls+APauvE9EagTHURIN7maOpX3FYcWqF1CJYxSvKy3vvvYfU1FRotVpkZmYiLy/PbvvKykrMmTMHiYmJ0Gg06NmzJ1auXOkNUX2WyqZKfplSjQuHsvXBQb9pYHGi8gQAc4dPIfn12l/x3VXf4YGBD4iyf6INzueFlBfPMJ06v7rb1ZjRZwYAY5Rri6FFKrFsIrrysnz5csybNw8LFy7E7t27kZGRgfHjx6O01HryG51OhyuvvBIFBQX44YcfkJ+fj48//hjJyYH9cKlvNppEQ9WhljdcmTnY+QStv1lKix4A3fgCjZjgGABAYqg4yeMUjALpHdNdqyTtd+PYO8djrrz422/ofRgweHHki7h/wP38OtOM1L6Cs9mT3OaNN97A7NmzcccddwAAlixZghUrVmDp0qWYP3++RfulS5eioqICW7duhVptNOmmpqaKLabPc7LqJAAPH7JknraA094tckQQfg2XYbeDtoPllx5NmfrTIHPzWLw85WzT8kJJ6zwiRB2CiKAIVOuqUVhTKGgBUyEQ1fKi0+mwa9cuZGdnt3WoUCA7Oxu5ublWt/ntt9+QlZWFOXPmID4+Hn379sVLL70EvV5vtX1TUxOqq6vN/vwRXzTb+QP8rc2fnjmEQy40XgAgToZdwrtwyouetf6MINynWmd8nnJ5kXwJUZWX8vJy6PV6xMfHm62Pj49HcXGx1W1OnjyJH374AXq9HitXrsRTTz2F119/HS+88ILV9osWLUJkZCT/l5KSIvhx+AJcJlAu9bhLePL24edvLgquPABZXgKGysZKfrmjSRVoC1y69v1pnMjrWPg8LwESZOBNRnUaBQDYUbxDYkks8bloI4PBgLi4OHz00UcYPHgwpk6diieeeAJLliyx2n7BggWoqqri/woLC70ssXcorDEeF1lghIWS1AUejfpGfjksKExCSQghIMuLeHCWlyBlkMSSWCKq8hITEwOlUomSEvP6ISUlJUhIsD5/lpiYiJ49e0KpbHN0S09PR3FxMXQ6nUV7jUaDiIgIsz9/RMkYfw/zOXpRY6VF3LfvwJUHoLe2wKGxxai8hKl9RXGha88TuOKMNIaFwTQgZETSCAC+metFVOUlKCgIgwcPxtq1a/l1BoMBa9euRVaW9XofI0aMwPHjx2EwtL0JHz16FImJiQgK8j3tz1usO2MsDeB+UUbCGpRhN/Dgis3ZLw1AyAWFgiwvYsH9plYd2yVG9GmjefPm4eOPP8bnn3+Ow4cP47777kNdXR0ffXT77bdjwYIFfPv77rsPFRUVePDBB3H06FGsWLECL730EubMmSO2qD6NVqUFAKgU1gLE6CbsOsbfjI82ommjgKHZ0AzAF2u2+Nk49pK/HGd5qWqq8nsfPW/TL6YfgDZrpS8heqj01KlTUVZWhqeffhrFxcUYMGAAcnJyeCfeM2fO8JozAKSkpGDVqlV4+OGH0b9/fyQnJ+PBBx/EY489JraoPg0XKt2nYx8HLe1B1oX2MK2mZjI5Bw4ldcZpbNvVnj24FvzpOnL7WLz7G3CRMGUNZZLK4Y9wL82HK3yvvpHoygsAzJ07F3PnzrX63fr16y3WZWVlYdu2bSJLJR9MrQJdIrpIKIn/QXleAg/O+Z2zwBDyJiY4BmdqzlDYu4dYe4GLD2mLFDawBt452hfwHUkIm3BvigAQrY32cu/+bYalaKPAg5t67aARcB7fn6YrZHYsncI7ASCfFzGIC4njl30t1wspLzLA9KLhzHiEMHADgKaNAof8i/kAgG5R3SSWhBACLhKTXkCEwdSR3dSadbbmrBTi2ISUFxnA5aXgKtXyiPnADZCHOR8qTfPjAUNVUxUAHzrnATLWxILP82Igy4vQMAyDHh16AAD+KftHYmnMIeVFBuRXGN8UfTFRkNxhWh9g3G9M+D9HKo4AoLQD/gJZXsSlVudrUXlGSHmREaX11itxy22O2ido/c1auP8pc3FAYPp23ju6t4SSWMHvxrF3q0qvPVKMe7/c5ZU+/RFblsgRycZEdTtLdnpTHId4JdqI8IzztecBAJd3ulxiSfyP5BbjwyxSEymxJIQ3OF55nF9OCkuSUBJCKDgH7H/OVSC4rAIgt0BB4aZZfc3fkiwvMuBMzRlhdkRz6xaoW38TMjkHBgfKDwAAVIwKaoXaeiOPxok/jTE3j8XL9xnO8qJQX5RUDn+hfebpzIRMAIBOb1meR0pIeZEBnK+L3Qq4hFvw0UZ+9dAhbLGtyJg/qmMwjSV/oa65DgDAqOoklsQ/CVYHAwDWnlnroKV3IeVFIMR8c/+r4C8AQNfIru7twJO5dL+bhzeHGwCUIyIw4GqE2c6ua4JL174/jRN5HQuXi4TV+9a0hr+QHp3OL/tSrhdSXgTg7d1vY8D/BmDU8lF4fNPjZknlhCA5LBkAEKwKFnS/nlDV0IxnfjuIvYWVUoviEZSkLnBoMbRAZzCavsd2HiuxNIRQxIbEGhcYGsOeYCvXFRcqDQCLdyz2ljgOIeXFQ/aW7sXH+z8GCxYVjRX4/eTvyP4hW9A+ztYakwNZWl7EnOqwv++X/zyMZVsLcO17W0SUQXyUVNsoYOBCpAFgRNIICSVpD117nqBijA67DJX4EJ3fT/4utQg8pLx4yDdHvuGXhyUM45en/TFNkP0fv9gWHeFL8/THSnwz9t9VOMsLTRv5P1vPb+WX1UobzrqE7OCijcjyIhBWZg0zEzP5ZS76SGpIefGQgxcOAgCmpE3Bp+M/RXhQOADgwIUD2Hh2o8f7f33X6/xyTHCMjVbymqP2DYy/GZUHCBz2lu4FAPTq0EtaQWziZ+PYS/5yXJI6MAayYYnEE5lP8MtFdUUSStIGKS8ecKHhAk5XnwYAjEweCQDYNHUT//1Dfz/kcR+553PN9u8ZNLTbo6BQ6YChttloLbw08VIHLT0YJ/6kBLt9LN79DZQKo/KiDG6fUsKPzoXEdI3syvteNrY0SiyNEVJe3ORM9RmM/m40/3lkJ6NyoVQoMX/YfABAs6HZo4ciy7L8dMadfe90X1jCJrzDLs2X+z17SvcAAAbGDZRYEkJI+KzJrFJaQWSOo3QRWqUxmsvUVUJKSHlxk2/zvzX7HBEUwS/f3PNmfnl94Xq3+yioLuCX+8f2d3s/npmj/cyU3Q5l63gly4t/Yzot6HzKAReufX9KKSCzY+kS2QUAwBrIj0lMTlSdAACsPLUSlY2V0goDUl7c5otDX/DLjw19zOw7U2dATxL7cHP0gHlpckI4uMKMBtZAfi9+DDe9CwDJ4ckSSkIIDZ8pmRx2BaF9hl2Oy5Iv45d9oc4RKS9uUFxXzC//39D/w/T06RZtJqROANCW0dMdthdvBwDEBcdZbyDmwzZAHuThhrbjtFn4kpA9By4c4Jd97kUgQMaaWHDRRhQqLS7/N/T/+GVuClZKSHlxg/3l+/nl2/rcBsaKmfWKlCsAGB+I7k5JnKk2OqBlxGW4tT3hGNPHWAtLlaX9lVNVpwAAoepQiSUhhIbL8wKG0h2ISWpkKu/szhULlhJSXtygvKEcgP1KxKNTRvPLhy4ccqufsoYyAMDQhKH2G0owRy2zaXFLTA4gTB0GwMTxj/A7uGkjn67MLvtB1R4vhUor2kKl/d1HT2p6R/cG0PZskhJSXtyA0zoHxtqOWghRh/DLm89tdqsfbnqqe1R3t7YnnIO7+ZHlxX/ZdNaYwqBTWCeJJSGEhs+wq2imAqsikxKeAqCtWLCUkPLiBicqjV7XUdoou+1SI1IBGEOmXaW+uZ5f5i4YjxFwbl1v8JebBMsnuSLLi/9S32IcT069CHg0TvxlXABuH4uXfXiitdFtH1Qmmb/Jl8gtbDnsAm2JUnV6nbfEsQkpLy7y5aEvsemc8S0uITTBblvORN2sd115OVDe5mDoqB8p2H2mUmoRBINXXqhEgF9imlSrX2w/CSUhxMC81AM57YoJ5+y+r2yfxJKQ8uIy7+99n1/u2aGn3bbciW7Um2ckdCYsd8t5Y8FD3hnNEzyZS/e7eXhLuGkjUl78k3O15/hll6aNXLr2/WmcyO9Y1IpW13uGrC1iwpW/8QXHd1JeXEBv0KOmuYb/7MgEzc0LmtY4qmysRMb/MtD/f/3Nck+0Z92ZdQCAQfGD7PQgXVVpf4KzvBRWF0osCSEGpskerUUGSk/gjDWxUIDLrkuWF3dxJs9VXIgxbQdNG8mMH47+wC9fkXIF79NiC87XxdRnZfHOxfzyW7veQnlDOVoMlo6i3A03KynLA4kJZ+DezCnLrn/CJXscEDtAUjkI8VAwrY8ySlQnKlxCwGZDs+RJPUl5cYH8i/n88ttj3nb4FsdNKzUbmlFQVYCR347Eryd+5b9fc2YNrvjuCgz8YiByCnL49TW6NutOdudsJyTzxbdJX6ftN+OKXuoM0r9NEMKzu2Q3AECr0kosiSP8bBx78XAYepQJhr3nmql/kdT3SzrjLsBZUqb1nuZUey7zY1VTFab8MgVVTVU22z664VF++fODn/PLXSK6uCMq4QJBCuP0ni+YQgnhOXjhIADgko6XSCwJIRZkefEOIaq2FCBST7ML4A0aOHDTO1xpcEdwJrbjlcf5dUPih+DtMW9jd8lu5F/MR0VjBb46/BUAYEfxDnx64FNsObeF78c35+j9C843aeWplbi5180OWhNywrQy+4jkERJLQ4gFr7yQ/5CoqBQqKBkl9KzeIhDF67JI2rvM4PKAcA6ejuAsLxwjkkfg/bHvQ8EoMCplFEaljAIAXnm5c9WdZu0fHfIohIUGtgUsyye22lWyS2JhCKExDenMiPVCmQ1/yi3i9rF4/zdgOIddVQ3ABw360bnwAs4m+Osc0Rmnqk5h3Zl16BvTV2SpbEPTRi7AZWDl01E7oKO2I788u99sLMleYvKG0Mbg+MEW665IuQJjOo9xU1LCFa7vfj2/TAqMf7GqYBW/7AtZQQlxqGpuTVevoCzZYsP5ZK49s1ZSOUh5cQFu2qi9RcUWvaJ74epuV+Pqbldj7sC5Ntvdln6b2ed/D/y3Uw7BzuPJfvx/2so0ouuFbS9IKAkhNP+U/QMASI9Od2NrF659f5releGxpIVxyQfJ2uIp9jLsAsA9/e8B4PxzUCy8ory89957SE1NhVarRWZmJvLy8pza7ttvvwXDMLj22mvFFdBJNpzdAMC1xHEvjnwRL4580arFhWNsl7H4atJX+OuGv7B/xn7M7j/buZ2LaaL2J/O3AxiGwV397gJg7p9EyJ8qndFJflzqOIklsUO7sXaspAa3fbodu89clEgg+cGQw67X8JUgEtGVl+XLl2PevHlYuHAhdu/ejYyMDIwfPx6lpaV2tysoKMB//vMfXHbZZWKL6DTdoroBECcTa//Y/kgMS3R7+7qmFpwoq3XckLDK9PTp/HK1rlpCSQihYFmWTwQ5KM5eskffYuZnO7DpWDmuf3+r1KLIBgXIYTfQEF15eeONNzB79mzccccd6NOnD5YsWYKQkBAsXbrU5jZ6vR7Tp0/Hs88+i7S0NLFFdBouKY9ghRKFgmEwevF6jH19A/YWVkotjTxoZxrnCo4BQHUTKS/+gKkS2r2DDCqzt16T5yobJBZEKLw3/cS0BlGwZHlxG7lV5BZVedHpdNi1axeys9sSrSkUCmRnZyM3N9fmds899xzi4uIwa9Ysh300NTWhurra7E8sOOXF3hSQVJTVNAEAVh8qllgS+RKliQIANLT4y8MjsDl68Si/HBEUIaEkhNiQ5SXwEPUpXF5eDr1ej/j4eLP18fHxKC62/pDdvHkzPv30U3z88cdO9bFo0SJERkbyfykp4llFDHKvmyGQH0t1o+tVsn2Xtt+Ec1Q7UXVCKmEIAckrNvrWaZUuZtb1aJz408PTzWORwF/OapK6APLbExK55BbzKRNCTU0NbrvtNnz88ceIiYlxvAGABQsWoKqqiv8rLBQv6x9neXHkje3vLPhxv9QiiAKXdMlgkLmSSgAA8oqMykuPDj0kloQQG4aS1AUcosY6xcTEQKlUoqSkxGx9SUkJEhISLNqfOHECBQUFmDJlCr+Oe5CoVCrk5+ejW7duZttoNBpoNBoRpLeNL04b2cUTTdrKtmsOl1hpKH+GJQzDhrMbUNZQJrUohABwFjS3nXVdGjf+9EIjv2OhaaPAQ9SncFBQEAYPHoy1a9uS2RgMBqxduxZZWZbVknv37o39+/dj7969/N/VV1+NK664Anv37hV1SsgZuKrDvmNWE3OgBt5NgCvc99OxnySWhBCCZr1xenNQvK9HGgXeWBMavjBj8HlpBZExUleJdhXRs8zMmzcPM2bMwJAhQzBs2DC89dZbqKurwx133AEAuP3225GcnIxFixZBq9Wib1/zdMNRUVEAYLFeCjhv7ECfNvJXglXBAIDwoHCJJSE8hWVZ1LfUAwB6R/eWWBpCbC7qWq3Bel+vHE4IhejzH1OnTsXixYvx9NNPY8CAAdi7dy9ycnJ4J94zZ86gqKhIbDEEwfcsLxzm8vxzthKP/fAPSmukLZzl21iew8lpkwGY18Mh5MnZ2rP8crQ2WkJJXMHX7ise4sX7ZLfwAV7ri/ANvJLfd+7cuZg713p6/PXr19vddtmyZcIL5CG+bnm5+l1jVeqy2iYsnTlUYmnkQ6gqVGoRCIE4UdkWMcZNBxL+C+/zQnleAgaZeZ5Kiy/nebHGzoIKUfZr8PG50ar6Zkz7aBv2n61yaTsuw7GvK6eEY6qaXDv3hLzho4067MB98bGo9TnruP8hdVI7eTyFfQQuz4tcHm7Vje0rrApzsTXrfVt5yXjuL+SevIAp72523NhEEVNyWTrB8lOEhDzZX24M57+yy5VubO3B9e3jir1LuH0s0uV5YRgWm0OCsahjB0nkkDPOKiO+4jZByosL8HlefOTk+QoX63RSiyAISoWSXxajfhXhPbixKncLzJpDJbKLApECBZRmnwvUaokkIbwFKS8u4HNJ6py+qbkhL79vx9u+v94/KjFzlhcA0BtIeZEzm85tAgBkJVmmZHAeF8aNJy80dsbxXf/biXVH7BexFRwZvpwx7abyT6u94s7pl/jM880BpLy4AB8qLcPBLSa6Fv+YYjFTXsjyIlv2le1DUZ0xglGj9G4CSzHYUXBRahF8nvZ+iF2a20+ZE/4GKS8u4LN5XkiZch0rv5nptFGLgW5+cuVfK//FLw9NkFG0nd+NYy9WlW73KPtHqzHm+mmux1eHv0JxHRWs9TfItuYCvpvnRVr8ZUbe1PJS21yLSE2khNIQ7nCy8iS/PLrTaEpQFyDoWcuXjTvX3oedpbsBAHtK92DxqMXeFkteyOxGTpYXN1DQz+aXmJqeLzaSqV6OHK44zC+/M/YdCSURDnpXckwHVWeLdZziAgCrClZ5UxzCC9BT2AVkb3mhqAUrmP8myWHJAGjaSK5wyenSo9Pd34lH48SfxpibxyLBT/DHP/aLxYaqKQGls8jl+UbKi5Po9DpUNlUC8EGfF4mRUid67Id/MG/5XsH2p1YYQyzJYVeecGM0ShMlqRxCQncbx5RUNdv9vqmlyUuSEN6ClBcnOV/bVq3UlzXTMxUNlis9kdeHj7W6sRnLdxbipz3nUFotTB0nlcLoBkaWF3nCJafr2aGn5ztz6dr33XHiOjI8Fta++2YL24LGFqr15k+Q8uIkSkaJ2OBYjEgegc7hlvOr0mBp8vh9n1Al4X3f/G2aBNcgkLic0y7leZEnXC6mKG2UtIK4hO+PNV+HNQRZXd+3Y19+ubCm0FviyBKp0/27CkUbOUlKRArW3bxOajF8Erld9PbgLC81zTUSS0K4Q/7FfABAn459JJZEOHzY+OkzGFraIgMzGpuwT2vM7xMdHI1obTQqGivI8uJnkOXFDzhUJO2D9mKd/flmb+BIgTp7sR5/55tmKrX+RGhoMU670Vua/Gg2tF2HCSEJEkriLv6lpTS0sGhs1uPlP49g12mRo/cMbeUARjQ04LqaWnSNSMV9GfchIigCgHkkGmEbZ306pS5bQZYXP2DbyQuS9r9ifxHek6JjF+71I1/5GwDwxaxhuKxHrM12wapgAG2Ou4R8KKgq4Je7RHSRThAfQG9goVRIqwz9d81R/LBlHcprdViy4QQKXp4sXmdsW44mBsBz5RXA3cuBoBBcbDIqTr7sqygnfCVghSwvREDhKNU6l9SsSU/RCXKDKwkAmGdLljuuPiy+yTuD9KdzsPVEuUgSOU95rf2irboWA5757SBOlNV62JMS+vrOYFkFbqk239fwpOEAgOqmag/7IHwJUl78HHNzrf/4pghGO9NnkNLo+Jd7PlcKaQgPOHjhIAAPc7wA8Gic+EAupQU/7YeuxYA5X+123Ngebh+L89tlPPsXlm0twNjXN7jZVxv1p++FPn8BogycJ79RDhVjnGDYW7rX4z78Gbn5LpLy4ueU1fivBcHUCuzsfXbtYfvJrDifl2httLtiERLBPZy4qT9/wd3Zjov10vuiOaKhWcioPgXAWk73GmBUZuQVgUY4gpQXOeP0m5Ebdz9+374xv2mNMxfqXd7m4Hn7puMh8UMAAKtPr3ZLJkI6anRGx/WukV0F2qML174n/hQ+YK3ZV1iJoqrWHFF+5hvCWeJ0evtTWIQRX/FpcQQpL36PMDfGmkbfe4v7+0ip40YuEqYOAyA/EyrRlqBuWMIwiSURFrEfJcdKanDNe1uQtcg/U0FwU8EXGqQNbCCEhZQXP4AV+fb2w66z6PfMX1iy4YSo/dijsl6HFf8UoanFupnZZVXDxttl745tVYjrm1237BDSYHquenToIaEkHmDL4iGyJWRPYSW/fP37W3C4SBjHVrHvS872yRXSPXDhgLfFIUSElBfCIf/5fh8A4OU/j0gmw7SPt2PO17vxWk4+v87Ze/r764873U9SaBK//PPxn53ejpCWnSU7+eXuUd0llETe7D5TiSd+3i+1GILC+brIM/eP95A6b4urkPLi58jserQJ9zb4+z+u15h61UThcQTDMHxSq5fzXnZBQkJK/i405vEJVgX7XT4PZ4+GZVmL6d3iKsdZZQ1C1dbwUeJC4gCYJzEk5A8pL4Ss8IYydm/GvfwyzZPLg3VnjP4aI5NHSiyJdDz960H0e+Yvs3Wv5Di2lnpLdanXSVPsNEhh9Hkh5cVJZKL7k/ISSPiBGabSRvin+yZPy+3+lf4vfvls7Vk390t4C71Bj4rGCgDAiKQRnu/QzrXU2KzHW2uO4sC5Klsbe96/m3yx7bTFOls+Ys7h5rHY+P3qmrxc7LRVDq5emWkSQ0L+kPIiaxzfXByFBru6b89uhp6j07eVkv7vmmOi9MEwDO/0mV/h/JQTIQ2PbnyUXx6fOl60fj5YfwJDX1yDt9Ycw1XvbBZwz+IpPM7o9N560V65XxrlIVLTVrSRy+NEyB9SXvycd/8+7lm0Qrttn/ntoIcSCYepImMLd+fzi+uKAZCpWQ5wOXmitdEICwoTbscm135JdSNeyTmCmkZbUx/iqQCeOlKeKq9D7gnnpz/FihKqbZJm2ojzeQEogtCfIOXFD/CmofqbPNerLZ8sq8VNS7Zifb7weVk4bN3fdxRU2NjC/g36yi5XGrcv3uGBVITYcEomAHwz+RvR+qnXecPiaP2afHud89Fy7dEbWFyxeD2mfbwNR0usV58Xy7+5/ZB8bVU+PttySpzO+D6thEozCmiVWgDm1wthjrO5rXwliR0pL4RVmvXGC3mdAIng/v3tHuwouIiZn4mnCNhSUpyxzliDy9aqUWrclokQn/WF6/nlpLAkm+0ClY3HyvjlI8XWlRdv8uzvhyTpt1FvjLriKkwTtvEV5cQRpLwQVjlVXgcA2HPG88F+wUFlWSHY2VqAsk4g0/SliZcCoGJuvs6KkysAAMlhyRJL4ps0NjtW3uXysPpy22nM//Eft7YdFDcIAHDsojh+coT3IeWFsIFwk1G6FvesH+7Q6EShN9tTSW0oGSUAoMUgzTw94Rx7y/YCkKYkQI8nVuKD9dJlnRYMeeguePKXA/h2RyG+2+n61DVncalvIZ8Xf8Eryst7772H1NRUaLVaZGZmIi8vz2bbjz/+GJdddhk6dOiADh06IDs72257whWkCeO8UOdbBdGW7zC5+dlwlpFtivkAYlXBKn555iUzBdyz5TXRYmX6sVnP4pWcI+ZO4W4413LKfW1Ti9sO5s68ILisowiYfkBIft933nGjdnJwltTc87kiSERIgejKy/LlyzFv3jwsXLgQu3fvRkZGBsaPH4/SUuu+FOvXr8e0adPw999/Izc3FykpKRg3bhzOnTsntqiEj2LL0VBMuKiV0oZSGFjvWY4I51l2YBm/nBaVJmpfXIkMaxRe9OxtfveZSgDAV9tP4x+b+WPs465vFwBU2cid5E9wDrsHy30nWtLXkFsxWtGVlzfeeAOzZ8/GHXfcgT59+mDJkiUICQnB0qVLrbb/6quvcP/992PAgAHo3bs3PvnkExgMBqxdu1ZsUWWHM1MkRly3C5fXNAGQprhae97466jX+zT1objYSE5+vghXaG92v9ki9dB27e8760Cp8GCYlNcanUnPVTZAb8Pyct37WywS47Esi0e/34ePN550qh9bUUUvt8vC6wtjXmhGJBuTF7awNA3sCLmU1xBVedHpdNi1axeys7PbOlQokJ2djdxc58x39fX1aG5uRnR0tFhiypYtx8tF23fBhTrR9u0qzR68VbqLRqnh39aqdcJU2SWEw7RswzXdr5FQEu+w50ylRWK8lfuL8f2us3hx5WGP9m1LYfIn0jum88tl9WV2WhJyQVTlpby8HHq9HvHx8Wbr4+PjUVzsXLz9Y489hqSkJDMFyJSmpiZUV1eb/QUKZT5kHRGTZidurtUN1k3fNiMpnHi70LNGyxZFHPkepvl3ukR0kVAS4XBmHH+57TRKa4yWmjlf73a7r9KaRvy0+6yoGbPFui/taZ1mcwWu2CoAfHrgUwGlIaTCp6ONXn75ZXz77bf4+eefodVqrbZZtGgRIiMj+b+UlBQvS0mIzcajjt+U/vhH+NTjscGxAICjF70/bUXYZ1+Z0QclLjjOQUvxsVVvSwye/OUAbvhgq8X6gnLHllJTRf7ad7dg3nf78JZIJTbExN1MvZ3DOwMAtpzbIqQ4hESIqrzExMRAqVSipKTEbH1JSQkSEhLsbrt48WK8/PLL+Ouvv9C/f3+b7RYsWICqqir+r7DQ9TA6ueJrDlYsy6K60X+c//rHGq+7zeeErGNDCMG2om0AgH6x/SSWBLj7i51e7a+wwrI+T94px+H/ppyvMlpv1hwqcdDSf/j3oH8DAAqqC6QVxFfxrceJQ0RVXoKCgjB48GAzZ1vO+TYrK8vmdq+++iqef/555OTkYMiQIXb70Gg0iIiIMPsLFJqcSEDlTZ7+9SD6P/MXNh8TzxfHHrYr/boHN09eUF2AP07+Iei+Cc84XmlMmd83pq/EkgAl1U1Si+AU3Eyp6VTRsdJaiaRxDU/rOwHm6Q+oZplt5JK0UPRpo3nz5uHjjz/G559/jsOHD+O+++5DXV0d7rjjDgDA7bffjgULFvDtX3nlFTz11FNYunQpUlNTUVxcjOLiYtTWymOQeZMGp6ONWhHgBmCPL7adBgC89ldbJeacA8LXEuF8fdpTYSWfjN7hMdv+/oqUK/jlT/75xCnZCPFZeXIlvzy+iwhVpD0aJ56NMW8EelgbJ9Zx81hEvs84TTs5uGkjgAo0+gOiKy9Tp07F4sWL8fTTT2PAgAHYu3cvcnJyeCfeM2fOoKiozV/hgw8+gE6nw4033ojExET+b/HixWKLKjvEvM95su99hZX88r1f7vJYlvYs33HG6nprN/5PN1sWgjOVzx5dI7siRBUCgEzNvsRPx3/il1Mi5O/jxnjJXi+mc64cUClU/PKpKnELRAYCQljDPMErDrtz587F6dOn0dTUhO3btyMzM5P/bv369Vi2bBn/uaCgACzLWvw988wz3hDVP/HgdU7oiAEhKkufKrf+1mTN3GnN2deVpGLzh80HYIw8uuG3G9CsJ3Oz1OwrNTrripffpRWXxo20pnZnHiPLthoto46eOZ4+khz54kn90API8uIJvpIHxqejjQjnkFOo9MzPdmDXac+Svv24+6zV9c6OKaOS41zjPh378MtHLx7Fg38/6FwnhCg06Zv4CsGjUkZJLI2weDKOnVEInLU4esrxUmPkk63j2dyan8r5JJvOz0Q5+g0zYjMAABcaL9htF4j4WgCII0h5IbyOtVBPIVi5X/hw6U7hncw+bzq3CXpDYJvfpeS53Of45b4dxXXW/WnPOewWoKq6K7y37rhb2/lS/bDyWvsOzLWNxlDnMxXet37o9MbfiaaNbEMOuwThZdYe9nxKqj2h6lD8MOUHvDTyJX7dwQtUH0UKDKwBv534DYCxfINSoRS9z+vfd07R1umFeWtde8S9a5hTCJxBXu/XwhKiNvqwaZQaiSUhPIWUF0KWWEtp3mJgUdSav8IeDc16fLjxhNN99YruhSndpvCfN57d6PS2hHB8eehLfvmD7A8klMQSKYqHmuKKyX9ngWs5YcTifKVlvhpbCKVw9YgyhktTqLT8IeUloBDvneuUExk+hcTaja+8tgmL/nSuzstHThazM2VA7AAAwIf/fOgTToeBxms7XwNgtIZ1jewqYk8enFsfvywOnq/C1uNO+nu4fSwOHHZb/5//436z9dYSXL6//rjVwpPnKxtQZaMkiD051Eo1AFJe/AFSXmSNeHdKV8M3D54XNkGcI2zpDnVN4vmjjE4ZzS+/uetN0fohLKlqaru+nhv+nJ2WwsPVEhILITwMtjipkBwrqRXdMdMZ6ycAFFebt3v4271mny/UNuHVnHy8uPKwRU6r4S+vQ8azf7ksW5AiCACQe965wsCBhNxeyEh5CQg8CZV2tgfX+9h6vBy3fJSLE2WuJyBsMXg/u/D09On88mcHP6O3Ny/y56k/+eVxqeNE7Yu7iXPXvjMKsdS3fVcsn47qnIodvWirinV7f5+mlrYxfkygabnaZuO9Jj403kFLxxwprsbv+857vB9fw1dCoR1ByosfIPWNE3Avlcytn2zHtpMVuP9L16vjbjlh/U3TWTncuUFrVVp8M/kb/vNTW55yeR+Ee2w6twkAEKWJEr2v/HYPyvxi/6lUf7KsFvvPesdKauu+5Cgaydr2W22Md8tt7I9rrl6ZEPmaJry1CQ98swdbj0tTDiXQIeWFEARPdPX25mNneOqXAx706D59Y/qig6YDAGDFyRWSyBCIcGb+8akilANox8aj5g+j7S4WPfRl3l533EI58zbP/n7I5W3yi4WRWa0w+rzoDMKFlh8q8h/lVk6Q8hIAeCP5kCfVpKsamgWbb62qF38q59Pxn7b11+RdX59Ao7GlEQ0tDfwUXXaXbNH7LG1XO6tZ71sFUAOBRX8exm2fbOc/C1VAkvN52VG8Q5D9AcALKw7D4GguTmIKK+rx5bbTLiUG9HVIeQkAdhaIn2jLnegdU05fECZh1UkvRD2ZVqfdV7ZP9P4ClZNVJ3HZt5dh+NfD+XVD44d6XY4CG+UoAo3Kep1gvie2/F64MO4PN5w0G8tCvdxw0UaRmkhB9sfxwDd7BN2fPWqbWlBZb2k5MhhYbDhahgut03IXaptwx2d5yDlQjLFvbMCTvxzAu3aSIFKGXcLncFXbtnVjsceJMs+UBnkNm7Y3uA2FGySWxH9ZXbAajfpGtLDGBGwRQRFeSUzXHrnd1MViwHOrceWbGwXJafPHP9YdXW/9eLvV9bZw1SrWJaILAAieJXuFCNm921NS3Yj1+aXou3AVBjy3GvU688SEP+4+ixlL85D9xgaUVjdi8Atr8Hd+Ge79chd0rc7PW08I558j9bgg5SUA4F9anHx7+etgscd9uqowSRqm50bfl3W6DACw6vQqoaUhWokLiTP7PDJ5pFf6bZ8m4EKtK/4R/qTotB2L6XjeftK+86wzaRY+3GDdUquzoYwcseHzYrdOmpVxzWXW5aKOXOWVnCNYaqVSvSt8vrUAqfNXYOBzxlBvg4HFU78cwPIdZ+xul7VoLWZ+1jbd1f7YVx0sAQBcrG/GsJfWWt3H7jOVvCIjd0h5kTHO5mJx9XZap9N7HCy5zs0053KB872oaqqSXX4EudA+ZHNa72mSyGHrwSkUruZUkoKCCyaWVYbhH7i5TkYBtUcqJ1euPAAAXGx0bTr9eGkNPlh/As/94brDcVNLm/K38DdjeZGLrf5564+W4ottp/FYu6R97WlvEN99uhKA8cXv3XXHsOZwiVOyLNvqH3WdSHkJADY7m1HTKu6pMY6e54USFGUTkitSruCXz9Wek1AS/8XAtr0hLhi2AAPiBnhZAuevfTlVdneEtWM5buIwuyG/FDcu2Yovtp3GtI+34b2/j+PQeXlE3EQERfDL+RfzXdq2XtemgGxzYH3i+G3feaTOX4FeT+bgu52FFt+zLItKkyADlmWxPr+UzyD+bd4ZjHxlndVcWNy0zY6Ci1j811Gnj+OllUcsZKhrcr42lq9Ayosf4OjGubfQu5VxAcfzoZe9+ne79t7F0/5C1aH88mcHPvNwb4Q1WgzGG+qYlDG4Nf1W17bVG1yqneML+LICNPfrNofUNYdLsftMJf/5tVX5mPT2JisWSPeO508X/UdMffScGdcqRgUAOFHZVt/sYp3OTH6DgcXZi+YvWAoTS+BxK9FPO6zUjPq3iSPv//3wj8X3LGteXfvjTScx87MdGP7yOqTOX4H5P+3H2YsNGPu6pW8dd9jupJrg+GD9CXRdsBKXLFxlt+ZVs97gli+kmJDyEgBIcckZWPjN3Kot+sX0AwCsPLVSYkn8E87ysvN0FW771DVHzts+zcPwl9dhCyUQc5umFgPeWXvMaafYgc+vFqTo431fOZ+0cu7Xe/B1nn1fkfZkJmUCAA5dME7/5BwowsDnV5vln5n33V6MfOVv/LrXulXV2lTxTUtysXhVPl7/Kx/nKxtQYCXysb1ClPXyWry15hj/ub1VxB6cDJ9vLXB6m/a8ktPW3xOtubPaZ0tv1huQ+dJaXPmGUYHiKpg36KQNuyblJRBwUXvZddrzGxDLsvjFxsB3hma9AZ9tOYUSD94qxObejHsBGJ3/Glt8V065omeNN8cLNc3YdKwcx0ud9z3JbTXrv5pzxOobsdzZeLRM9D7Kapvw+uqj+CL3tFPtK+ubcdf/dooslTnltU3YkO/abxEXbHQE59IcvPyn8QG+bGsB9hZWAgB+2WuMhjINLTZ1wbJ1S3337+N4Z91xDH95HUYvXm/xfX27B35JtXPZhq1haFVe7DotC0BBeR0q6nQ4WV4HlmXxv9br4azElk1SXgIAVy0v3+RZzs26gys1i9rPId/35W48+/shZNrwmvcFspKy+OUrf7hSQkl8i7xTFbj5w1zM/Xq3RwnejldyDw7jberLba69YQPAvrNVuGlJrtcLhzrLOTcfALcvzRNYEtu44qBaL2JhVFuU2Siceaai3mrSym5R3QAAWqUWZy/Wm90fr31vi/m+a5uw9nAJWvQGbDrWZsVzV3kUcmLwvb9PIHW+uFm+3/v7OD7PLeA/s6z4hUqdhZSXQECCeaMD56pw4JzzD4y/Dpp7yjvrOS8laoUaozqNAgBUNlViZ7F33zrbc7y0Fs/+ftDjm8v5ygYs3XwKtW468d38YS7yTlXgj3+K8Nte9wvXKRljThdGbXyzLHayWrE1/mmt53OxTid6NtRmvQFVDc5len54+V5RZfE2tkKdxWSfjVpNV72zGRmt4civrTqCqR/mQtdiQF11MgCjw+7IV9bZTZBZWd+MWZ/vxIcbT5o5Ja857F40pekUkZR8k3fGLALKGkVVDXhtVb7ZS4OBZaH0kcKNpLzIGGcvoRr+IeT8TdvT8M2PN53CFheinCQdDx6EOr8z5h1++T8b/iOENG4z6b+b8NmWAo8fiNe/vxXP/XEIC389aLb+q+2n8eOusy7t6+9890PmOeXF0NAZgFEp2H7ygt3pI5ZlrVp7lAyDvYWVGPj8aodTG55c+426FvR44k9kPPuXUxWH805V+Kybrru/g++EfrfJ8d7fJ7D9VAU+2ngCr/3Rdl9i1Jb3KGuK54+7zuI3ASpIeyOZnTMs+Gk/3v/7RLu1xt+Lc8xtP8UFAC+uPIwKK9l9pYCUlwCgos43LjZ/hGEY3vflQuMF6PTS/dbcW6+nVYO56IWNx9pM4yXVjXji5wN45Pt92NBqMmdZFkeKq+1ODf3xTxF+33fepXl5ztF7T2uUHMsaU7qvPVKKqR9tQ/YbGwEAp8rrMGNpHu/TsmSDMXKixxN/WuyTYYDFq4yhsWLmILr3q1388gPf7HF7WogQhz8PFIPVh4E1GCOOwrovBmB+/Q59YY3Fdt4oO+Jt/rvWuhWool6HxmY9SqxYOj/bUoAiDyygQkLKi19g/93Nk/cgb4VvejvPW/vjYlkWP+w6i8NuJM+a3W82v7y71PlICbFQKhhsOFqGO5ftcHr6whoGA4uTrX5LpgrRjFZ/i2/yCjHhrU24u9WSUVTVYDUK44Fv9uCGD7Y6Ne3z6eZT6Pnkn1ifX4p9XIg/a/0avPeLXdhwtAw3LTFWnOYcL61xuKgGm00ij2xNHZXXtjlQunLt22q74p/z2HW6AhP/u4lX+jjaR+P5Sqi0UENRiuOx1qdpBNTB1qmfppKr+XWhPV4way/F1JdvweDDDSfxzO8HHTeVEFJeCJ/AWqExb/LT7nP4z/f7MPG/m1zeNkgZxIcXbj2/VTCZ3M3ce7G+GTOW5mHdkVLcb2IJMKW6sdmhM+2FOh3GvL4B3+8stDqt98lmY4r3v/PL8N2OQmQtWoenf7V9w7vhg614c/VRvJJzxExJPFxUjTlf7caJslo83+ocOvOzHQDDHb/1h6AreVyWbjHPKjrt420AjNM20z7axhcc3C1w5MZLK4/ghg9ycbioGjOW5uHXvef49PqDnl8taF+EdW5sVW5Naa4chuZqY6oDhaoeCo3nU0L+xJtrjuJoiTCVvMWClBcCR0tq8Ppf+ahudP8t3VP2na1CWY37YYOecLFeh2VO5EpgWdbszdwULvvr3uKDmLVsh8fhuT/uOouhL67FvtbQTXex5ndUXtuE/s/8hXFvbuTX6VoMWHWw2Gp0xosrD1uNHLtoMh35fz8aE3B9sc12WO25ygb8d+0xfLD+BCb+dxNqm1rw2A//YOJ/N2HF/iLc/mn7CBrbytuf+4tMfLms+ynYY/upCtQ1teDmD3ORe/ICpn9izCNz9xfWlT2hePDbvZj6kVFxctchmhCGxnPT+WVVmPMZagkOKswoawwGFh+sPyFYLomD56sw9+vdVhMcsSzrcR2d0upGjH19PT7Z1FYYbdybG/HOuuN48Y/DHu3bU/JOVUBvYAWtfOoMJ8trsd8kMirngHWnumd/P4QhL6zBJ5tOoqadopeVaAyb3nn2NNYeKeWnMqzx5/4i/Lr3HJpa9Hgl54jFdAIAPPL9PpTXNuH+dgm7ymqaXE7lXVhRj7fWHMVV72zCs78fxJDWOf1T5XUoq2nCvz7ZjksXrcU9X+ziozNMqaxvtkieVVGn42uzuMsj3+3FcpOU6Tb9Q6xMG7VPZJbxrKXcjrhkYVtRzdKaJq+GgL61hh6WvoC+wRh5pInLkVgSX8DJZ4uP+GOrpBZA7vy05xyfpTB3wRgkRgbbbf/51gIcK63B89f0BcMw0LUY8Ppf+RjVMxaZaR0x+e3NAIym9LWPjEZpTSPKapqQnhCBG5dsRUSwGo+O74U3/jqKTDfk/e/aYzhRVocXVhzGXZelmX23+4z3ywiYknOwGPklNXjbhiOZWHQICTL7fO+Xu1Hw8mSLdpx15oUVh/HCisM4+Ox4hGqMQ6hPxz4AAKXWfoh3RX0t7v92I1h9GKBohDL4NJZsSsbS269A99gwpESHmLVvMbRN7ZTXNmHoi2ugVjL4bOYwRIWo0Tc50uHxTf0wF+db/U0OnDP36Rn6oqVzojMIMeWx6qCDcHiGO3bvvGNd/c4Wx40EwldCZgOd5ur+UAYbk2mG9ngBDafvhqG5A9DqJE74LqS8eEBjsx4fbWwLNxv7+gYcem4C//l8ZQPW55fh+kHJ0KqVeOLn/fhquzFmflLfRAzvHoPPtxbgw40n8eHGk5iR1YXf9kRZnVkCopeu68fXE1nfmlHyUpXrKjDXP2AsGrbeJPLiWGktH5kkRbijM6GlYuDI30ZvYK0WkjxeWostJ8pxoVaHCRld+fWhaYvRVDYBNy+Jxr6zlbjt0i748cif6NFzBw5fPIiwnpZ93PFZKADGQmkqqW5CU4sem4+VY87XRmtDs57Fv1rT5R97cSLUSvsP9/M+Eh3gKm3XoHccP7koK096c2db3wktNsf9UGnfwBk5misugzpiP5TBZ6FQ1SK02xsAAENLGOqOLQCgFFVGn8Q3L0cLSHlxk6+3n8HjP5uXMK/X6bFsyynMHGF8kE15ZzMu1Oks2gHAJ5tP4fd/ivCNSV2Oz+2k4f7rULFAkrdhWjSM4xsX64T4A++us/8WPOb19VYTWSkVDF7NMYbffroZCOulBKPQQ6EpR3CnL7HzRBwMLXH4dPsOhHVfhsN2DFuKoFIYdPEALB11ez1p26Td44k/MWtkV/ROCLd7DARBWEOB+oK5CIr9C6qwg1BoysAwBihUtQjr9TRq81+UWkDCBqS8uEiL3oC6Jr1VhQQAnvn9ED7PPY3Cinq02Mnk6WquifV26nc4UpRdCVnMO2XuuyMTJdxlTH+T9UfL0X5q4mhJDTpHhyDnQLHNDJxXvbPZ7HN9wQNQd9iKoA5Gx9PQbm+g5vDL0Cb8yrfRVQ6BOnIPGEYPtiUEjMq4b1XEAejK47F08ymX0rEDxvBi/8R49bE2QqVF792Fi1+IceIrY02oEGcpjsdd2XVl46ArGweARUjX/0KpLQaj0CO4yxI0nL5XWCEJQSDlxQUWr8rHu38fd9julIwTGllzHg1Exr25EXeMSMVnWwqc3sbQlICm4uvBNkdBE2d0IA3p8j6UIUZrVkttDzQV3Yimomuh0JTB0BSPkNT3oQw+C03saujKx7qsuPg1DkKlCUJ4GNSfegjh6fMBAKqQAmiTlqPx/FSJ5SLaQ9FGLuCM4kL4D3/8414qb92FMfwyp7gAQGPRTa1LKhiaEgEo0Fw5hP9eFb7Prf78F1JeCGmoOdw2XaSO3ANN3EoAFNruS3hFeXnvvfeQmpoKrVaLzMxM5OXZr4j6/fffo3fv3tBqtejXrx9WrlzpDTEJwgxP8s7UHn0SzTV90FLXFc2Vg1B/5g6wLREW7Zor22LGgjt9g/apygObVuVFomkjIpBRouZIW+bdoI4bEZ7+JIJTPjX6x0Tugv8qM/IYb6IrL8uXL8e8efOwcOFC7N69GxkZGRg/fjxKS637fGzduhXTpk3DrFmzsGfPHlx77bW49tprceDAAbFFJQjBYPVhaDx7OxrO3IPGopuhr+tloyWDxqLr+E9hvZ8Eo7RdeDCwIMsLISGsCjVHnofuwkh+lSrsGDQx6xCc9D3Cej2H4C4fICg2B6rw/VCGHIdCcx6Mqgq+48Hkv4ju8/LGG29g9uzZuOOOOwAAS5YswYoVK7B06VLMnz/fov1///tfTJgwAY8++igA4Pnnn8fq1avx7rvvYsmSJWKLSxBep7kyE8rg01BH7QbDGBDW80U0Fl8NtjkSLKsCWCVYgwZsSzgABcC0AIweDAytyyzAKgBWBZb/Xw3ogyHrmWHyeSGkhlWjqfQqNJWNh0JTClXYYSiCyqAKOwpG2QBVyGmoQiyjRFl9MAy6aLD6UDDKOjDqKpMxqgLbEgqwQYCisXV9EAwt4YAhCIABYAxgGH1rriM9AAVYgxowaGBoCQfDsADTbBz/UACG1jFv0ILVBxv/DFrAEGS8hxiCWr9XwV/Cv0VVXnQ6HXbt2oUFCxbw6xQKBbKzs5Gbaz0DaW5uLubNm2e2bvz48fjll1+stm9qakJTU5t5v7ra9cJ6zlBZdArfqF9w3NCLdFG4Hj79bdDzTkVvdFecc0ckWfJ10IuSRbTwlAMbdRp8GWe8lrUJv3m8S4YFlGzbo59TYxgAaF3P/XHrGZYx/2yyL8u25jM6pu+abPvPjJ3vzNqx/OdGhfG2fYcyB1eo1zk6XMFIYtzP8PyY6lvcza5w3NCEXgrfTE/QEdVu3fN6KM6KII3rfBK0GC2sgI84PYDWRNwtZQqcDwrGGY0BB0P0uKgyoEHJok4B1CpZGJQNfPI7q2iEE8tVFCwQZADULIOg1nuEgQGaGePhRTM1+MTOeT+q0mMxgHhG2qSmoiov5eXl0Ov1iI+PN1sfHx+PI0esV4AtLi622r642PqDetGiRXj22WeFEdgOO46fw5VK34wEKWE72P2+DlrUsMEIZxpwqcK1EgCO9i1XGqBBNRuMCDd+E7HIqgNuLVThm4gwHAtSo55RoJlhoGOAWoUCFUolDADULAs1C6jBQsUag0P1YNDMAC0Mg2Yw0CkYsAzQ4rJO5lvmbgXL4sqWQgxVer/uVQmcv/ZL2GgAJ9FbUeiwre19+MZYK2OjoGcZaJgWZHlwz5PieAxgUMZGIpapwlCFyCUY9ADqW/9MaGKAk2o1ilUqXFQoEGkwILmlBSyAJoZBA8OgUqlEPcMg3GCAgWFQxzC4oFSiUcFAxQLK1rGtYgEVWOjBoEHBoEahQLlSCTXLQsOyULMsDGDQxDBoVDCoVShQpVCgtrVtI8N912aBNTBAoxJotDHWezU3IEtpO+eYWqkBEA8tpKuFB/hBqPSCBQvMLDXV1dVISUkRvJ/wmE6Yo/u34Pv1lDposdnQ126bJgThat0L6MPYviCtUYVQbDVc4ol4PosOalytewGXuPibiI4OQD0QBOOfuxhgQItSBwOfYp81sX6w5v8yANvOFsIy7dsB4K0i3L8sGIspHZM1rPFz2zfm7Vp3aeNb42eVXo1lei2W2TpQkWiEGpsN/Zxu/2jzPfhZPxJKNx2uLyAC2w293dpWaErRAVN0L6Ir435izIsIQ66hj4BSOQuD63TPIYM54bipmEhTYxYAENH6ZwoLFgbGAAOjh0GhN/7P6KFXGMAyhlaLKwOGZaBuisAcO9PNOn0T0s+Xo9lgvxSO2IiqvMTExECpVKKkxLyGSUlJCRISEqxuk5CQ4FJ7jUYDjUZ8G1xEZDRWGC4VvR+xOMUm4hSbKLUYPkUBm4gCf/5NKHDJa1QjFDmGYVKLIRiH2FQcYlOlFsMtzrKxOMvGSi2G/2IAP30mJaJ68wUFBWHw4MFYu3Ytv85gMGDt2rXIysqyuk1WVpZZewBYvXq1zfbeIkhFToMEQRAE4QuIPm00b948zJgxA0OGDMGwYcPw1ltvoa6ujo8+uv3225GcnIxFixYBAB588EGMGjUKr7/+OiZPnoxvv/0WO3fuxEcffSS2qHZJcFAtmiAIgiAI7yC68jJ16lSUlZXh6aefRnFxMQYMGICcnBzeKffMmTNQmDgTDR8+HF9//TWefPJJPP744+jRowd++eUX9O1r369DbMI0Ktw8pBO+2+kbnvQEQRAEEagwbPsStjKnuroakZGRqKqqQkSEZUZTTzAYWKQ9Ttl+/ZUgpQL/N6EXXlhhO/poZPcYbD7ufiits6TFhOKkjGtkEQTh/xS8PFnQ/bny/JZxBivvo1DY93u5cXAnHHtxosf9LPnXII/34YistI6i9yE3ti4Yg87RIXbbpDj4XiimZCR5pR+CIAg5QsqLi2ydPwZzr+iOXU9mo+Dlydj39Dj8+eBleDi7J56/pi/UyrafVK20r+zcMjQF4VoV7hzRFUNTO2D742OxZf4YTOhrGQHzzrSBeHS8rRTzrjNjeCq//MoN/bDukVGC7VuuxIRpoGynoIYEtWWj/OyOoXg4u4cofX82c6jZ52sHJlu0ee3G/hbrvr37UozqGYvtj48VRS5vYPobEwRBOAMpLy6SFBWM/4zvhY5hxvDsyBA10hMj8GB2DwS33oQPPDseO57Ixm9zR5ptmxipxQNjugMwKi4v39Afe566Ek9P6YPv7x2O+AgtkqOMjsG/zR0BAAhWK1Hw8mRMyUjCnCu649SiSTj+4kTcmtnZ7WPY+/SV6NKxzYJw85AUpMWGub0/oZnUz3pYvBjcMSLV7HN75eWyHjH88hW94hAXoUXBy5Ox+uHLLfY1qHOU2eeFU2znufh6diYOPzcBz0zpg6eu6oMresfhmdb2s0Z2RdeYUKTFhPLtT7w0CTcNScGBZ8fjg+lGy9zL1/fDpWkd8fmdwxAfocVdI7s6dcwcXTqGYM4V3VzaRgx2P3UlIoPVACzPB0EQvklMmIRpguEHSep8kTCNCmEaFWLDNchdMAbRoUE4cK4K3WPDEa5VYULfBPROMM7nqZTW9cf+naKQ89BlSIwwj3JiGAYqJYOXruuHr7e7nlY8TKNCVEgQgoOU0KgUiApR89+plQya9dK6QF2aFo33bh2Ergu841vU3uMrNtx8QPZNisSqg+Z5hwCgR3w41v9nNJZsOIFvdxizqqqVCrPfsHdCBE6+NAkr9hehok6H2qYWvLYqH1f1T8TwbkalaOaINoVj5oiuZp/vuiwNj/+8H+FaFa9UhWlUmNgvEUdfmIgglfm18+RVffDJ5lNOH3tsmAaPju+NfsmRyDt1ERP7JeCmJdbLdnBM7JuAbScv4GK9MNk1Hx3fC1q1EvsWjoPewEKpYPDZlgJB9k0QhHh8e7e0ec9IeRGZxNYQ68Fdovl1lyRFOrUtp+AISd9k4z41KiX+eWYcFAwDhjE+GHc/dSUe/f4f5Bx0P7OmJ8SEafDt3d7N56M3mGsv7c/N6F5xUCoZpFs5F6kxoXj5hv7YW1iJI8U1uH5QMj6ZMQSfbSlAXVMLLk2LBsMwZv4rU/onIbmDc2H3twxNQcewIAxMibL4rr3iYotBnaOw+0wlAKNzXer8tro7b9w8AAAwoW8iP1WZFhuKk2W2HYU/+NdgsCwrmHLZ1cS61N7qRRCEb/Lk5HR0j5PWWk/KS4Dx9i0D+WWNytzXIFyrxpLbBps94LzJjYM7eb3PFoOlpSn/hQno9WQOAIBhgPtHd7e7jx/uG44jRdUY1LkDFAoG/x5r2y+mc0fnHX4VCgbjL/FsCu3zO4eh3zN/oWOosdhAn8QIHCqqxtvTBlqVZdVDl6PHE39a3dfSmUMAgFd2OfKeGIu9Zypx9xe7nJLp/tHdMDY9HnsLKzGxr/vH1zshHKN6xeLDDSfd2v6n+4ejd0I4DhdVo7HZgBHdY7x27X86YwgOF1Vj8V8i198hCBEY1EX6Olzk8xJAaFQKxEVopRbDJvOu7On1Pod3s4y60qiUuH5gMi7rEYM+iY6tX2EaFYakRjuMRvMGd1+ehqgQNR4d3wvrHhmFcK0aBS9Pxq6nrgRgfGCveuhyXG0jmkmtVFj9TQBgTO+2gqnzJxrr8GxbMBZx4VqMc0HJund0Nwzu0gGzRna1UIRcYUpGEu4amcZ//vzOYTj50iSb7X+8LwsvXGvMF3VZjxgMTIlCSJAKg7tEY0T3GJvbCUVSpHHsvTNtIMamx2NSPz8uTUH4Ba/e2N/q9FBKB+9EXdqDLC8BQnyEBu9MEz8E2xOcnQoRkqv6J0KrVuKSJHMl5Y2pA7wuixA8PikdCyb2tqkUaNVK9EoIt7uPkCDHt4V7R3XDvaPMnX23LRiLe7/chb2FlRbtv74rE7d+sh3PXn0JIrRqi+8dMfeK7nj37+Nm6/QG1uyayegUCYWCQWKkFkVVjRb7GNwlGoO7RONfl3ax2c+AlCir8nvKh7cNxvhLEqBrMfAyd40JRXyEBiXVElbxIwKa7nFhOF5aCwC4Z1SamRVzbO843DzEWOT4psGd8P0uY4LWh7J7WPgGSgEpLwHC9sezpRbBJRQMYGVGR3AYhsGVfeIdN5QRnlgz2sP5wPR2oPAAQEKkFr/MGWEx9bJwSh8M7x7jUUKreVf2xPhLEhARrMKo19YDMCovkcFq3HN5GvQGFlEhxqmx3gnhFsrLkecnONVPWkyo28rLtGEp+OOfItQ0tpitP/7iRN4x31TZYhgG708fjBs+2OpWf4Rz/PngZUhvtaBKNSUOWFfApeaSpAheeVkwMR0XanX4oVVJMY0meuWG/hjQOQqNzQbMcjGqUSxo2ojwSZ6YbDvMmBCXjE5tTsvrHhmNXU9m4/cHRtrZwhwu3J/jjhGe3ewm90uEQsGgX6dIdOnY5uDLWcsWTErHk1fZvl5+nzsSWrVzuWSu6B3nkaxDU6Mt1tmKKASMSjohLj3j2xTvy3uaV5ueaZLvyhRTv7UZWV0QExbkkQxPTErHf8b3Mkt/4AsM62p+vT5/TV9cPygZWWkd8X8T2vKKKRQMpmd28RnFBSDLC2GFmDANymu9a8puP2V054hUPP/HIa/KQBi5e1QaNGoFRvU0Psg7upjPYcbwLnhp5RHB5Hl72kCzz2vmXY4D56qdspiZvnU7w+R+iXjgmz0uy8hx3cBkrDtSCgB4c2oGBqbYd2xUCGglE4v2UWpyw/QXfn/6IGw6WsZf30oFg2VbC8za73giG7HhGozrEw+tWonucWE4UlyD8toKp/oLUimwdMZQGFgWl/WIMbOExoRrzMp+dAwNwoU6nSeH5xHtU0UEByn5KERfhywvAUBmV8u3QXsEOcgMLArtBpGQUx+Ea2hUStx9eTeHvjG2sOdT4gxD2kUytA+h7h4XjmsHJtu8Rrjs0Zf1iHFJcQEclwCxR7hWDVORrhvYCakO3rTdvcz/e8sA9zZ0kfZv5t4gXOP5O7WtBJFcnqQxveMtrquMTpFY/fDlvD9H3+RIPhx4ugvXdP7zEzCyRwwu7xlrcY2+flOG2edf5ozgly/vGWthteTQiOQPKOfChmR5CQAedDGlvRSKw5wr7IcjE/LBGYdfeyR3CMbO0xfd3n50rzhsnT8G8V6OrIsMVmNY67SRsw9gBu6NNW+MUWuJEDnSEyNwuKhalH4jgtWoaWpx3NAOptFjzv5UT0+5BD3irSvsV2ckoXdCOLp0DEGQUoGMZ/9CdaOljA9n97R7blKiQ1Dw8mSU1zahY2iQWVsGwJp5o5D+dA6/rl9yJL6enYnQIBU2HS/HjKV5zh2ME/SMD3Pz6vMNyPISAHDZXJ1FiuRDd11mOZe6bcFYfHTbYK/LQkiLEGnHk6KCJUl6FxehxY4nspH3hHMO8lxZBF9j15PZdqP/tGrz7/588DLB+n7umks83kePuDA8f21f/PeWAYIpej3jw6FRKcEwRv+r9vRJjMA9o9KsbGlJTJiGl4tLx3D9oGQEBykxuX9bCP3tWV0QrlVDoWAwqmcsvr37UpfLgNji2av74rqByUiLDbXp++PLkOWFsOC1G/tj2EtrpRYDCZFaNDa7N3VBSMuY3nG874ermD5qrrdSoNJX4Uz+roSRupK00JRgJx2Q3cWRn9PtWV2wpzVz8zvTBiI9MQL/HtMdZysb8NPucx71fUUv15ym1z0yCmNe32C2jmEY3Obi9GVXF5xp37x5AD7YcALTMzvjWEktdp6+iCcmpbs17fjDfVk4UVrHZz9/d9pAvHx9P5yrbECvdpagS9M6YnCXDugWF4YFP+13uS+OztEhfAbwtfNGyXKanpQXwgIpEtnZuhmHaekSlSNse09AN3mtnY+AL2Mr8Z8YSFmJ+9HxvXDdwE7o0jEUVQ3NvLIxb5wxOsVT5cXV56inRWXznhiLBp0e0aHORxTFRWixcIrRQtQ9LhwTPUg4GBKkMrPkMAyDcK0avROsW+XUSgWmDevskfLy+s0ZvMIiR8UFoGkjQiSmtiY3chZbbyxSVy4l3MPZ+l3WuDStLcOvnOodeTPDclqsdCG3nH/aoM4dXLaScNw8xHYpEG8/TOPCtWYh+IFAv2T3x6evQMoLIQj3jzbPtrpgUm+vy6CS0YPO35k7pjsezu6JP1zID8MxNj0On84Ygs2PXSGCZP4BV/BVrrx6o32LWvvpElv4Wt4UqVgw0bX7rbN5j3wZUl4IQWhf5DEqJAh3jEj1qgwDO0d5tT/CNlq1Eg9m90BfN97wGIbB2PR4dPKB+imEb/L73JGYcEkCPp05VGpRfIJ72pXqsMc/z4wTURLvQcoLIQjWLL3cnLC3oHBrghCHr2dnCrYvZyNy7NGvUySW3DbYJSdbfyMjJQqA61OI7tQW80XIG5IQBFvJlbxJhI+GnRKE3EmLES59wqgesY4bEQ75+LbB+GLbaUwb1tnpbf7+z2jxBPIyZHkhBGGSB972BEH4NgmRWnx/bxZWPXS5JP3fnuVZ1mZ/JC5Ci0fG9UKSCy+O/mSpIuWFEASZRtsRhCDIKSrKXYamRrtdMsIUd4Lon7umr8f9BgpDunTAS9f1k1oM0SHlRcaInahKLALgPk8EGDcMkk8yPTlgmt330xlDJJREPnx+5zCM6N4Rb04dYDUU3TRzrz9AyouM4TIy+gKuWF5cLVfgLM6GVxKE0HiaKC2QsFV24LUb+yPvibEAjEnUUqKDsfimDIxNd1w9nABG9YzFV3ddipToEKiUlr9xpw7S+yUKCSkvMsbdom5ioHRBe2E9rGXaM976gyJUgGq0BBHRLqvzW1MHONzG22kB3OXR8b1E78NR9t/2VcM5UqJDEBduzO7dMz4cm/5vDG4cbDuZHeEaQlTr9iVIeZEzvqO7WNX0bfFwdk+ole4L//pNA6zukyCEoH1tomsGOE773z7PkS8SpFLg/tHd8M60gaL246jYpK0Muj50OyNkACkvhOA4mr7pHheGw89NcHv/8ZGWJQNUHihDBGFKe7ug2OnqvWENAYxlFxiGwRW93UvpT8gbf5t+I+WFEJyOYfYLnEVo1S5ZatqjUlhua6sQoL85qRH+R4847/jLTM805gMJE3n6YObwVFH3T7jG7VldsOn/rkB6ou/4SAoBKS9yRpjCvYITF267mOKSfw3yuIBdhxDnk9GlCxDaSQQWcrThzXAiD4otpUVo5alPknsPSW8Wtgwk4sI1SIn2v1IbpLzIGE8dX4Xms5lDMa5PPJ68qo/NNllplpFGXTq6NrCsmfFtGF4IwucR4tK1FcFjSpZJtW5ThH6wuTsWB3W27shLuMe/Lu2MjqFBuDXTPxP8kfIiY3r6WGjwFb3j8NHtQxATZtvyEmnFauJKpJIt3Llf/mccOfkSlvx7bA+v9heu9Xwax5kilrYsG7amXL1NICT68yYvXNsPO57IRnSo/Wl8uSKq8lJRUYHp06cjIiICUVFRmDVrFmpra+22f+CBB9CrVy8EBwejc+fO+Pe//42qqioxxZQttwx1vqaFVHhr/tvV296uJ7Mxd4x3H1KEPLhmgHcTztmyiHgLtQf+Z4Rv489TcaJetdOnT8fBgwexevVq/PHHH9i4cSPuvvtum+3Pnz+P8+fPY/HixThw4ACWLVuGnJwczJo1S0wxZYscUvI/c/UlNvM6eEL7MRlm4+3V1ktlRzvWIYLwJqbToDEOnN3t4W615oECT9fI4b5EyB/RlJfDhw8jJycHn3zyCTIzMzFy5Ei88847+Pbbb3H+/Hmr2/Tt2xc//vgjpkyZgm7dumHMmDF48cUX8fvvv6OlpUUsUQmRsecD4y4/3jdc8H0SBMfbrblQ7hrZ1Sv9/WdcT0wdkoLLe7pXcTlIpcCCielubRusJssLIT9Ei5nLzc1FVFQUhgxpq0uRnZ0NhUKB7du347rrrnNqP1VVVYiIiIBKZV3UpqYmNDU18Z+rq6s9E5yQBe3fFjuEWH9j7eyiMzBBAMDVGUmY3C/Ra34Y3BTmI9/tc9i2fRI9ABib7n7uFrHz2Njish4x2HSsXJK+CfkjmspdXFyMuDjzAaVSqRAdHY3i4mKn9lFeXo7nn3/e7lTTokWLEBkZyf+lpKR4JLecMPiIo52n9E2O9HgfUzKsZ0G15zxMEPZwVnG5f3Q3kSUxhytfYCpeuMZ6+oDLesRg+d2X4s8HL7O5v/a6i7fSyD9EWbEJD3BZeZk/fz4YhrH7d+TIEY8Fq66uxuTJk9GnTx8888wzNtstWLAAVVVV/F9hYaHHfcsFuesuqx66HPeMSsNz11zi8b4oUoGQCltWP3dwxQhiWlU+2EY9oTenDkBmWke7Ccrad/l/EzzL+JsYqXWqHfnGEJ7gsor9yCOPYObMmXbbpKWlISEhAaWlpWbrW1paUFFRgYSEBLvb19TUYMKECQgPD8fPP/8Mtdp2UjKNRgONJjDfruWiu9i6R/VKCHd7nt5ZKJKCEBsh8y0580LCNXFmuscdy2M3Dytkd49zLoUD6S6EJ7isvMTGxiI21rFTWVZWFiorK7Fr1y4MHjwYALBu3ToYDAZkZmba3K66uhrjx4+HRqPBb7/9Bq3WOS0+EPGV/Ay+jBiRTgRhiuyHYXslSACt4tMZQ/B3fim+3HbGZps0D5UkIrAR7bU0PT0dEyZMwOzZs5GXl4ctW7Zg7ty5uOWWW5CUZPRPOHfuHHr37o28vDwARsVl3LhxqKurw6efforq6moUFxejuLgYer1eLFFli9zvmd7AWp4Db0WQEP7J5H7m9bJCveQjwtM68IV6eRHDAjI2PR4vXNvPbhtH1acJwh6i2tS/+uor9O7dG2PHjsWkSZMwcuRIfPTRR/z3zc3NyM/PR319PQBg9+7d2L59O/bv34/u3bsjMTGR/wskXxZnEbvAGiC/eelsJ6IuusaGekESwl8J1SjtfvYEW+Mtwkoeo9QYYa7jIJlNrd4wqJPd7/t38jwAgPB9RL1qo6Oj8fXXX6OmpgZVVVVYunQpwsLaTIWpqalgWRajR48GAIwePRosy1r9S01NFVNUWeJr5QF8AWdSYTM02054iFppvIYyUqJwVX/rkW5CkvdENu90e1trEUZH2as/vn2I3e852kfq+er46N1aZPXuy+0n47vaRuQh4V942d5JEMKTHBUstQhEAMGAwZ6nx6G2sQUJTkbWeIpWrcTP9w/H0ZIa9GtNLeCoGOOVfeKd3Ld477CpHUNQcKFekH39/sBIXKzXIS6c/CAJKswoe3rGi+v0JsQ7mFDmbVtkdo3ml515a/S1atyEvAjRKBGmUXlNceHQqpXo3ylKsqRy7hCuNfdr6ehBkUC1UuGU4iKn34dwH1JeZM7jk8QNNQ4N8tw4FxmsxrYFY7HnqSsFkMgKJvcqZ+5bvmoWJ+SBmIkPrx/ofFFIa5l2OR7Kdr7oaPuHfTcRfcIWXu15TieCAEh5kT0hAigX1lh2x1B0iw3F/2YNE2R/CZFadPDT0uwEIRTDu8c43TYrrSP+M64nPrxtML8uo9VZ9aYh7mcaj4sQz6JkmlhPLCiFRGBAPi9+znUuvMmZMrpXHEb3cr9eijcxjbqi+xYRKDAMw9dE4vjp/hGo07UgQuubYcgUHk0IBVle/JzBfpyk7dUb+mNoageqkUIQrSgVjM8qLgAwNFX8+xH5vAQGZHmROY5MpP48jm8emoKbh7puHvfn34QgfJVTiyaRYkEIBlleCIIgCNHxluJCPi+BASkvfo7Q49jXqzc7c3/0ZbM6QRAE4RhSXgi/whnlZUJf+1XNCYIQhpeu6wetWoHHJ/X2Wp80NRUYkM8LEXD4uvWIILzFvy7tLOr++3WKxMFnJ9CYIwSHLC+ES7gbek0QhO8xro/4VkhvKy7k8xIYkOXFzxF6GA/rGo0fdp0VeK8EQXiTr2dn4khRDS7rYZ4Uz17WXoLwJcjyInM6dwyRWgSfYmBn/81rQxBCMbxbDO4c2dXCP0TlB9M75PMSGJDyInMSI+1XVA5SCjuQff22kJXWUWoRCEK2+Pr4JggOUl78nD6JkVKL4FWSo+wrcwRB2KariEUZrfG/O4chJkyDpTOHCLZP8nkJDEh58TNmDk81++wPFlRX6qEo/MDsTRBS0Tnau8rL5T1jseOJsRjTO96r/RLyh5QXP2NY12izz/7wEhKuJb9yQlr+fPAyqUUQlex0o/Jwe1YXr/cttI8K+bwEBvRUIAKK56/tK7UIhAxJT4yQWgRR+fj2wahp8t1q1ATRHrK8EAHFbZd6/82SIHwdhvHtatSuQD4vgQEpL4RDRveKlVoEgvAZesWHe7W/K/uQPwhBtIemjQiHPJTdE+vzy6QWw2X+b0IvqUUg/Ig/HhiJQ+erMTY9zqv9hgYpvdqf3CGfl8CAlBc/o/2w7RzteRI7jUpeBrq8x8di+6kKKsBICErf5Ej0TQ6s1AME4auQ8uLnRIZ4Po/dISSIXw6SQJFx9UUqLkKLKRlJ4ghDEF6GLAmuQT4vgYG8XqkJh6QIYGlpT0Kkll8ODVLh2gHeVQwYyvtJEARBmEDKi5/RPS6MXxbjhS0hUovb2yXCIwiC8BXIUhUYkPLih7x6Y3+Ea1T4/p4swfb5xaxhePn6flbn/C9Ni7ayhXCwgtfGJgiCIOQM+bz4ITcPScGNgzoJmir/sh62w6XvHdVNsH6sUVjRIOr+CYIgCHlBlhc/gytp780aP3HhWseNCIIgCEIgSHnxA769+1J+WaUU/5T2TvBuki6CCGTIg8Mxw7t1lFoEwsvQtJEfcGlaR7x6Y390iw1z3FgAQoJUGNG9I7YcvwCAfFIIgpCWUA09ygINOuN+ws1DUrzaX0gQXToEQRCENIg6x1BRUYHp06cjIiICUVFRmDVrFmpra53almVZTJw4EQzD4JdffhFTTMJDTJPYiUF0qLj7JwhC3tDUWuAhqvIyffp0HDx4EKtXr8Yff/yBjRs34u6773Zq27feeovi9X0Y0zOTFBUsal8RWrLyEARhG5q4DjxEeyocPnwYOTk52LFjB4YMGQIAeOeddzBp0iQsXrwYSUm2s7Tu3bsXr7/+Onbu3InExESxRCQIgiAIQoaIZnnJzc1FVFQUr7gAQHZ2NhQKBbZv325zu/r6etx666147733kJDguLBeU1MTqqurzf4IwpTBXToAANJiQiWWhCAIghAC0ZSX4uJixMWZl45XqVSIjo5GcXGxze0efvhhDB8+HNdcc41T/SxatAiRkZH8X0qKdx1XAxU5zeh98K9B+PfYHvjyrkypRSEIl4kN10gtgs8zqqftJJqEf+Ky8jJ//nwwDGP378iRI24J89tvv2HdunV46623nN5mwYIFqKqq4v8KCwvd6pvwX+LCtZh3ZU/RfXMIQgzIYd0xMWH0GwUaLvu8PPLII5g5c6bdNmlpaUhISEBpaanZ+paWFlRUVNicDlq3bh1OnDiBqKgos/U33HADLrvsMqxfv95iG41GA42G3kwIgiAIIlBwWXmJjY1FbKxjE11WVhYqKyuxa9cuDB48GIBROTEYDMjMtG6+nz9/Pu666y6zdf369cObb76JKVOmuCoqQRAEQRB+iGg+L+np6ZgwYQJmz56NvLw8bNmyBXPnzsUtt9zCRxqdO3cOvXv3Rl5eHgAgISEBffv2NfsDgM6dO6Nr165iiUq4AePFzAp3jmw79z3ivJNFmCAIOSEjJzxCEETN8/LVV1+hd+/eGDt2LCZNmoSRI0fio48+4r9vbm5Gfn4+6uvrxRSDkDmpHduihJReLDhJEARB+CaiZv+Kjo7G119/bfP71NRUsKz99EKOvif8n1CNkl/uGU9FIQmCMIdLh0AEDlRVmnALb4ZKD+rcARmdIgEAz159ifc6JghCFlC0UeBBedcJt/Cm8sIwDH6dO9J7HRIEISuolEzgQZYXgiAIH4aeywRhCSkvBEEQBEHIClJeCLfwZqg0QRAEQZhCygtBEARBELKClBeCIAiCIGQFKS8EQRAEQcgKUl4I9yCXF4IgCEIiSHkhCILwYSb2TZRaBILwOUh5IQiC8FE+mzkUKdEhUotBED4HKS+EW9CsEUGIT2SIWmoRCMInIeWFIAjCR6GXBIKwDikvBEEQhN+QHBUstQiEFyDlhXCLIVSCniAIHySKptoCAqoqTbjFvy7tgiCVEpemRUstCkEQBBFgkPJCuIVKqcCtmZ2lFoMgCIIIQGjaiCAIwkfpER8utQiyg5ycAwNSXgiCIHyUMA0ZxwnCGqS8EARBEAQhK0h5IQiCIPyGxEgKlQ4EyCZJEARByJ4f7s3ChTodOnekcgqBACkvBEEQhOwZkkppGwIJmjYiCIIgCEJWkPJCEARBEISsIOWFIAiCIAhZQcoLQRAEQRCygpQXgiAIgiBkBSkvBEEQBEHIClJeCIIgfIhwKglAEA6hUUIQBOFD7H76Sjy0fC8m9U2UWhSC8FlEs7xUVFRg+vTpiIiIQFRUFGbNmoXa2lqH2+Xm5mLMmDEIDQ1FREQELr/8cjQ0NIglJkEQhE+hVirw3q2DMLk/KS8EYQvRlJfp06fj4MGDWL16Nf744w9s3LgRd999t91tcnNzMWHCBIwbNw55eXnYsWMH5s6dC4WCZrcIgiAIgjDCsCzLCr3Tw4cPo0+fPtixYweGDBkCAMjJycGkSZNw9uxZJCUlWd3u0ksvxZVXXonnn3/e7b6rq6sRGRmJqqoqREREuL0fgiAIgiC8hyvPb1FMGrm5uYiKiuIVFwDIzs6GQqHA9u3brW5TWlqK7du3Iy4uDsOHD0d8fDxGjRqFzZs3iyEiQRAEQRAyRRTlpbi4GHFxcWbrVCoVoqOjUVxcbHWbkydPAgCeeeYZzJ49Gzk5ORg0aBDGjh2LY8eO2eyrqakJ1dXVZn8EQRAEQfgvLikv8+fPB8Mwdv+OHDniliAGgwEAcM899+COO+7AwIED8eabb6JXr15YunSpze0WLVqEyMhI/i8lJcWt/gmCIAiCkAcuhUo/8sgjmDlzpt02aWlpSEhIQGlpqdn6lpYWVFRUICEhwep2iYlGz/o+ffqYrU9PT8eZM2ds9rdgwQLMmzeP/1xdXU0KDEEQBEH4MS4pL7GxsYiNjXXYLisrC5WVldi1axcGDx4MAFi3bh0MBgMyMzOtbpOamoqkpCTk5+ebrT969CgmTpxosy+NRgONRuPCURAEQRAEIWdE8XlJT0/HhAkTMHv2bOTl5WHLli2YO3cubrnlFj7S6Ny5c+jduzfy8vIAAAzD4NFHH8Xbb7+NH374AcePH8dTTz2FI0eOYNasWWKISRAEQRCEDBEtw+5XX32FuXPnYuzYsVAoFLjhhhvw9ttv8983NzcjPz8f9fX1/LqHHnoIjY2NePjhh1FRUYGMjAysXr0a3bp1E0tMgiAIgiBkhih5XqSE8rwQBEEQhPyQPM8LQRAEQRCEWJDyQhAEQRCErCDlhSAIgiAIWSGaw65UcC48lGmXIAiCIOQD99x2xhXX75SXmpoaAKBEdQRBEAQhQ2pqahAZGWm3jd9FGxkMBpw/fx7h4eFgGEbQfXPZewsLC/0ukomOTZ7QsckTfz42wL+Pj45NPFiWRU1NDZKSkqBQ2Pdq8TvLi0KhQKdOnUTtIyIiwu8uWg46NnlCxyZP/PnYAP8+Pjo2cXBkceEgh12CIAiCIGQFKS8EQRAEQcgKUl5cQKPRYOHChX5ZCJKOTZ7QsckTfz42wL+Pj47NN/A7h12CIAiCIPwbsrwQBEEQBCErSHkhCIIgCEJWkPJCEARBEISsIOWFIAiCIAhZQcpLO9577z2kpqZCq9UiMzMTeXl5dtt///336N27N7RaLfr164eVK1d6SVLnWbRoEYYOHYrw8HDExcXh2muvRX5+vt1tli1bBoZhzP60Wq2XJHaeZ555xkLO3r17291GDucMAFJTUy2OjWEYzJkzx2p7Xz9nGzduxJQpU5CUlASGYfDLL7+Yfc+yLJ5++mkkJiYiODgY2dnZOHbsmMP9ujpmxcDesTU3N+Oxxx5Dv379EBoaiqSkJNx+++04f/683X26c22LgaPzNnPmTAs5J0yY4HC/vn7eAFgdfwzD4LXXXrO5T185b87c9xsbGzFnzhx07NgRYWFhuOGGG1BSUmJ3v+6OU6Eh5cWE5cuXY968eVi4cCF2796NjIwMjB8/HqWlpVbbb926FdOmTcOsWbOwZ88eXHvttbj22mtx4MABL0tunw0bNmDOnDnYtm0bVq9ejebmZowbNw51dXV2t4uIiEBRURH/d/r0aS9J7BqXXHKJmZybN2+22VYu5wwAduzYYXZcq1evBgDcdNNNNrfx5XNWV1eHjIwMvPfee1a/f/XVV/H2229jyZIl2L59O0JDQzF+/Hg0Njba3KerY1Ys7B1bfX09du/ejaeeegq7d+/GTz/9hPz8fFx99dUO9+vKtS0Wjs4bAEyYMMFMzm+++cbuPuVw3gCYHVNRURGWLl0KhmFwww032N2vL5w3Z+77Dz/8MH7//Xd8//332LBhA86fP4/rr7/e7n7dGaeiwBI8w4YNY+fMmcN/1uv1bFJSErto0SKr7W+++WZ28uTJZusyMzPZe+65R1Q5PaW0tJQFwG7YsMFmm88++4yNjIz0nlBusnDhQjYjI8Pp9nI9ZyzLsg8++CDbrVs31mAwWP1eLueMZVkWAPvzzz/znw0GA5uQkMC+9tpr/LrKykpWo9Gw33zzjc39uDpmvUH7Y7NGXl4eC4A9ffq0zTauXtvewNqxzZgxg73mmmtc2o9cz9s111zDjhkzxm4bXzxvLGt536+srGTVajX7/fff820OHz7MAmBzc3Ot7sPdcSoGZHlpRafTYdeuXcjOzubXKRQKZGdnIzc31+o2ubm5Zu0BYPz48Tbb+wpVVVUAgOjoaLvtamtr0aVLF6SkpOCaa67BwYMHvSGeyxw7dgxJSUlIS0vD9OnTcebMGZtt5XrOdDodvvzyS9x55512C47K5Zy159SpUyguLjY7N5GRkcjMzLR5btwZs75CVVUVGIZBVFSU3XauXNtSsn79esTFxaFXr1647777cOHCBZtt5XreSkpKsGLFCsyaNcthW188b+3v+7t27UJzc7PZeejduzc6d+5s8zy4M07FgpSXVsrLy6HX6xEfH2+2Pj4+HsXFxVa3KS4udqm9L2AwGPDQQw9hxIgR6Nu3r812vXr1wtKlS/Hrr7/iyy+/hMFgwPDhw3H27FkvSuuYzMxMLFu2DDk5Ofjggw9w6tQpXHbZZaipqbHaXo7nDAB++eUXVFZWYubMmTbbyOWcWYP7/V05N+6MWV+gsbERjz32GKZNm2a3+J2r17ZUTJgwAf/73/+wdu1avPLKK9iwYQMmTpwIvV5vtb1cz9vnn3+O8PBwh9MqvnjerN33i4uLERQUZKFAO3rmcW2c3UYs/K6qNGGfOXPm4MCBAw7nYLOyspCVlcV/Hj58ONLT0/Hhhx/i+eefF1tMp5k4cSK/3L9/f2RmZqJLly747rvvnHpDkguffvopJk6ciKSkJJtt5HLOApnm5mbcfPPNYFkWH3zwgd22crm2b7nlFn65X79+6N+/P7p164b169dj7NixEkomLEuXLsX06dMdOsH74nlz9r4vJ8jy0kpMTAyUSqWFp3VJSQkSEhKsbpOQkOBSe6mZO3cu/vjjD/z999/o1KmTS9uq1WoMHDgQx48fF0k6YYiKikLPnj1tyim3cwYAp0+fxpo1a3DXXXe5tJ1czhkA/vd35dy4M2alhFNcTp8+jdWrV9u1uljD0bXtK6SlpSEmJsamnHI7bwCwadMm5OfnuzwGAenPm637fkJCAnQ6HSorK83aO3rmcW2c3UYsSHlpJSgoCIMHD8batWv5dQaDAWvXrjV7mzUlKyvLrD0ArF692mZ7qWBZFnPnzsXPP/+MdevWoWvXri7vQ6/XY//+/UhMTBRBQuGora3FiRMnbMopl3NmymeffYa4uDhMnjzZpe3kcs4AoGvXrkhISDA7N9XV1di+fbvNc+POmJUKTnE5duwY1qxZg44dO7q8D0fXtq9w9uxZXLhwwaaccjpvHJ9++ikGDx6MjIwMl7eV6rw5uu8PHjwYarXa7Dzk5+fjzJkzNs+DO+NUNLzqHuzjfPvtt6xGo2GXLVvGHjp0iL377rvZqKgotri4mGVZlr3tttvY+fPn8+23bNnCqlQqdvHixezhw4fZhQsXsmq1mt2/f79Uh2CV++67j42MjGTXr1/PFhUV8X/19fV8m/bH9uyzz7KrVq1iT5w4we7atYu95ZZbWK1Wyx48eFCKQ7DJI488wq5fv549deoUu2XLFjY7O5uNiYlhS0tLWZaV7znj0Ov1bOfOndnHHnvM4ju5nbOamhp2z5497J49e1gA7BtvvMHu2bOHj7h5+eWX2aioKPbXX39l//nnH/aaa65hu3btyjY0NPD7GDNmDPvOO+/wnx2NWV84Np1Ox1599dVsp06d2L1795qNwaamJpvH5uja9oVjq6mpYf/zn/+wubm57KlTp9g1a9awgwYNYnv06ME2NjbaPDY5nDeOqqoqNiQkhP3ggw+s7sNXz5sz9/17772X7dy5M7tu3Tp2586dbFZWFpuVlWW2n169erE//fQT/9mZceoNSHlpxzvvvMN27tyZDQoKYocNG8Zu27aN/27UqFHsjBkzzNp/9913bM+ePdmgoCD2kksuYVesWOFliR0DwOrfZ599xrdpf2wPPfQQ/zvEx8ezkyZNYnfv3u194R0wdepUNjExkQ0KCmKTk5PZqVOnssePH+e/l+s541i1ahULgM3Pz7f4Tm7n7O+//7Z6HXLHYDAY2KeeeoqNj49nNRoNO3bsWIvj7tKlC7tw4UKzdfbGrLewd2ynTp2yOQb//vtvfh/tj83Rte0Lx1ZfX8+OGzeOjY2NZdVqNdulSxd29uzZFkqIHM8bx4cffsgGBwezlZWVVvfhq+fNmft+Q0MDe//997MdOnRgQ0JC2Ouuu44tKiqy2I/pNs6MU2/AtApHEARBEAQhC8jnhSAIgiAIWUHKC0EQBEEQsoKUF4IgCIIgZAUpLwRBEARByApSXgiCIAiCkBWkvBAEQRAEIStIeSEIgiAIQlaQ8kIQBEEQhKwg5YUgCIIgCFlBygtBEARBELKClBeCIAiCIGQFKS8EQRAEQciK/wcyvlHjg7z9pwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "upsampled_vad_scores = VAD.upsample_VAD(prob_chunks, audio_file)\n", + "\n", + "plt.plot(time, signal)\n", + "plt.plot(time, upsampled_boundaries.squeeze())\n", + "plt.plot(time, upsampled_vad_scores.squeeze())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BV6xjBmBQnLr" + }, + "source": [ + "As an alternative, users can save the VAD file and open it with the original one using an audio visualization software like audacity.\n", + "\n", + "\n", + "**That's all! Happy VAD!**\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L248uu8jt4K-" + }, + "source": [ + "\n", + "\n", + "---\n", + "\n", + "## Appendix: on using energy-based VAD\n", + "\n", + "If energy-based VAD is used, the order of the merge, remove, double-check operations matters. Let's use `double_check_speech_segments` right away after energy-based VAD and then `merge_close_segments`. Some speech frames are dropped:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "background_save": true + }, + "id": "S6JtQmHptzee" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Exception ignored in: \n", + "Traceback (most recent call last):\n", + " File \"/usr/local/lib/python3.10/dist-packages/jax/_src/lib/__init__.py\", line 97, in _xla_gc_callback\n", + " def _xla_gc_callback(*args):\n", + "KeyboardInterrupt: \n" + ] + } + ], + "source": [ + "# plotted boundaries may be scaled down to compare many at once\n", + "def plot_boundaries(b, color):\n", + " upsampled = VAD.upsample_boundaries(b, audio_file)\n", + " plt.plot(time, upsampled.squeeze(), color)\n", + "\n", + "\n", + "# first figures - from CRDNN VAD to energy VAD\n", + "fig, axs = plt.subplots(3, 3, figsize=(16, 12))\n", + "plt.sca(axs[0, 0])\n", + "plt.title('1a. CRDNN VAD scores')\n", + "plt.plot(time, signal)\n", + "\n", + "plt.plot(time, upsampled_vad_scores.squeeze(), 'green')\n", + "\n", + "# CRDNN boundaries\n", + "plt.sca(axs[1, 0])\n", + "plt.title('1b. CRDNN VAD boundaries')\n", + "plt.plot(time, signal)\n", + "\n", + "boundaries = VAD.get_boundaries(prob_th)\n", + "plot_boundaries(boundaries, 'orange')\n", + "\n", + "# energy VAD boundaries\n", + "plt.sca(axs[2, 0])\n", + "plt.title('1c. Energy VAD boundaries based on CRDNN')\n", + "plt.plot(time, signal)\n", + "\n", + "boundaries_energy = VAD.energy_VAD(audio_file, boundaries, activation_th=0.8, deactivation_th=0.0)\n", + "plot_boundaries(boundaries_energy, 'purple')\n", + "\n", + "# second figure - double-check, then merge\n", + "plt.sca(axs[0, 1])\n", + "plt.title('2a. Energy VAD (same as 1c)')\n", + "plt.plot(time, signal)\n", + "\n", + "plot_boundaries(boundaries_energy, 'purple')\n", + "\n", + "# double-check\n", + "plt.sca(axs[1, 1])\n", + "plt.title('2b. Double-check (too early)')\n", + "plt.plot(time, signal)\n", + "\n", + "boundaries = VAD.double_check_speech_segments(boundaries_energy, audio_file, speech_th=0.5)\n", + "plot_boundaries(boundaries, 'red')\n", + "\n", + "# merge (too late)\n", + "plt.sca(axs[2, 1])\n", + "plt.title('2c. Merge short segments (too late)')\n", + "plt.plot(time, signal)\n", + "\n", + "boundaries = VAD.merge_close_segments(boundaries, close_th=0.250)\n", + "plot_boundaries(boundaries, 'black')\n", + "\n", + "# third figure - merge, remove, double-check\n", + "plt.sca(axs[0, 2])\n", + "plt.title('3a. Energy VAD (same as 1c)')\n", + "plt.plot(time, signal)\n", + "\n", + "plot_boundaries(boundaries_energy, 'purple')\n", + "\n", + "# merge\n", + "plt.sca(axs[1, 2])\n", + "plt.title('3b. Merge short segments (as above)')\n", + "plt.plot(time, signal)\n", + "\n", + "boundaries = VAD.merge_close_segments(boundaries_energy, close_th=0.250)\n", + "plot_boundaries(boundaries, 'black')\n", + "\n", + "# merge\n", + "plt.sca(axs[2, 2])\n", + "plt.title('3c. Remove short segments & double-check (as above)')\n", + "plt.plot(time, signal)\n", + "\n", + "boundaries = VAD.remove_short_segments(boundaries, len_th=0.250)\n", + "boundaries = VAD.double_check_speech_segments(boundaries, audio_file, speech_th=0.5)\n", + "plot_boundaries(boundaries, 'red')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sb_auto_footer", + "tags": [ + "sb_auto_footer" + ] + }, + "source": [ + "## Citing SpeechBrain\n", + "\n", + "If you use SpeechBrain in your research or business, please cite it using the following BibTeX entry:\n", + "\n", + "```bibtex\n", + "@misc{speechbrainV1,\n", + " title={Open-Source Conversational AI with {SpeechBrain} 1.0},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve},\n", + " year={2024},\n", + " eprint={2407.00463},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.LG},\n", + " url={https://arxiv.org/abs/2407.00463},\n", + "}\n", + "@misc{speechbrain,\n", + " title={{SpeechBrain}: A General-Purpose Speech Toolkit},\n", + " author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio},\n", + " year={2021},\n", + " eprint={2106.04624},\n", + " archivePrefix={arXiv},\n", + " primaryClass={eess.AS},\n", + " note={arXiv:2106.04624}\n", + "}\n", + "```" + ] + } + ], + "metadata": { + "colab": { + "name": "", + "version": "" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "035d7ada3c66442b868537532aeda6c9": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "05999aef47c342f6a4bd495dec80bfa3": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "08881ce464d9473eb24c24ebea5b6b36": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "1524b323bee54e27976c43f11e9044d2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_dd782e4705934cc9a3abddd64cb61c70", + "IPY_MODEL_64da73ccf9044b018a518089902d957c", + "IPY_MODEL_66f17fcd37204c0aa92c144601cf26c3" + ], + "layout": "IPY_MODEL_035d7ada3c66442b868537532aeda6c9" + } + }, + "1a309e6a0669457dacfed56803dceff0": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "1a5ce622ea784769b0f90200300a32d0": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "36e076f9516e40be9af39b9bb785eae5": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "394b510120b94a038dd7266b4bbe4c52": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "3fe8357c32814d14a4fe5b4b0638b358": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "4fe3d5af87174832ada339a7639a0a5a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "510e43a2d4b84098ac8c4b78ec7ef4a0": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ebcaceeffbb14ebab4a952e3f846325d", + "max": 2286, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_08881ce464d9473eb24c24ebea5b6b36", + "value": 2286 + } + }, + "5f4600f7732945bf8eaeec68dc049791": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_1a5ce622ea784769b0f90200300a32d0", + "placeholder": "​", + "style": "IPY_MODEL_36e076f9516e40be9af39b9bb785eae5", + "value": "mean_var_norm.ckpt: 100%" + } + }, + "64da73ccf9044b018a518089902d957c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_aea9b0b7a4c34fdc90a86381b6fd131f", + "max": 452671, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_3fe8357c32814d14a4fe5b4b0638b358", + "value": 452671 + } + }, + "64ee2cc1530a44649b2eaefc2d626489": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "66f17fcd37204c0aa92c144601cf26c3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_1a309e6a0669457dacfed56803dceff0", + "placeholder": "​", + "style": "IPY_MODEL_a59f79ff3fd3407cb7dd97bea40a7fa0", + "value": " 453k/453k [00:00<00:00, 759kB/s]" + } + }, + "6bdac215350a47459419cf0648917b33": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7f4ccfc7e07e44c091dcefb3c5321fcf": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "814b6bb87f2b41d6baa8686d54093eb9": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_64ee2cc1530a44649b2eaefc2d626489", + "placeholder": "​", + "style": "IPY_MODEL_7f4ccfc7e07e44c091dcefb3c5321fcf", + "value": " 1.06k/1.06k [00:00<00:00, 67.3kB/s]" + } + }, + "81addb3add394558b4965ae64ccd28e2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_c6a1d60818dc413d8cb2871ef390c68e", + "IPY_MODEL_510e43a2d4b84098ac8c4b78ec7ef4a0", + "IPY_MODEL_ca7be228b4d34867a058553bc88ee621" + ], + "layout": "IPY_MODEL_05999aef47c342f6a4bd495dec80bfa3" + } + }, + "98293ef05bc5486bb7f00a7e14d5d4e3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "a59f79ff3fd3407cb7dd97bea40a7fa0": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "abc96fd17c8845199c7f60ecdcefd04f": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "aea9b0b7a4c34fdc90a86381b6fd131f": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "bcbee7bbed0b44328c96c2c1c7829002": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c6a1d60818dc413d8cb2871ef390c68e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_394b510120b94a038dd7266b4bbe4c52", + "placeholder": "​", + "style": "IPY_MODEL_bcbee7bbed0b44328c96c2c1c7829002", + "value": "hyperparams.yaml: 100%" + } + }, + "ca7be228b4d34867a058553bc88ee621": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e09296d8813b4cd58eedd067fbb32ed6", + "placeholder": "​", + "style": "IPY_MODEL_98293ef05bc5486bb7f00a7e14d5d4e3", + "value": " 2.29k/2.29k [00:00<00:00, 110kB/s]" + } + }, + "cdaa7435db634dfbbf1bd1b2439a8671": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d56ee8c3d08349cfb73ce60c48bfcb97": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_5f4600f7732945bf8eaeec68dc049791", + "IPY_MODEL_da6323682f01404b8bc5969903b8702c", + "IPY_MODEL_814b6bb87f2b41d6baa8686d54093eb9" + ], + "layout": "IPY_MODEL_cdaa7435db634dfbbf1bd1b2439a8671" + } + }, + "da6323682f01404b8bc5969903b8702c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_abc96fd17c8845199c7f60ecdcefd04f", + "max": 1063, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_f59c2c9fe77245b7997917d051c5bb28", + "value": 1063 + } + }, + "dd782e4705934cc9a3abddd64cb61c70": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6bdac215350a47459419cf0648917b33", + "placeholder": "​", + "style": "IPY_MODEL_4fe3d5af87174832ada339a7639a0a5a", + "value": "model.ckpt: 100%" + } + }, + "e09296d8813b4cd58eedd067fbb32ed6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ebcaceeffbb14ebab4a952e3f846325d": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f59c2c9fe77245b7997917d051c5bb28": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/lint-requirements.txt b/lint-requirements.txt index 41d37cf723..e25a4809a4 100644 --- a/lint-requirements.txt +++ b/lint-requirements.txt @@ -1,6 +1,4 @@ -black==19.10b0 -click==8.0.4 -flake8==3.7.9 -pycodestyle==2.5.0 -pytest==5.4.1 -yamllint==1.23.0 +codespell>=2.3.0 +pytest==7.4.0 +ruff==v0.12.4 +yamllint==1.35.1 diff --git a/pyproject.toml b/pyproject.toml index b3e5d7c11f..a5ae1a2bcf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,15 +1,131 @@ -[tool.black] +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "speechbrain" +dynamic = ["version"] +description = "All-in-one speech toolkit in pure Python and Pytorch" +readme = "README.md" +license = {text = "Apache-2.0"} +authors = [ + {name = "Mirco Ravanelli, Titouan Parcollet, Adel Moumen, Sylvain de Langen, Cem Subakan, Peter Plantinga, Yingzhi Wang, Pooneh Mousavi, Luca Della Libera, Artem Ploujnikov, Francesco Paissan, Davide Borra, Salah Zaiem, Zeyu Zhao, Shucong Zhang, Georgios Karakasidis, Sung-Lin Yeh, Pierre Champion, Aku Rouhe, Rudolf Braun, Florian Mai, Juan Zuluaga-Gomez, Seyed Mahed Mousavi, Andreas Nautsch, Ha Nguyen, Xuechen Liu, Sangeet Sagar, Jarod Duret, Salima Mdhaffar, Gaelle Laperriere, Mickael Rouvier, Renato De Mori, Yannick Esteve"} +] +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", +] +requires-python = ">=3.8.1" +dependencies = [ + "hyperpyyaml>=0.0.1", + "joblib>=0.14.1", + "numpy>=1.17.0", + "packaging", + "requests>=2.20.0", + "scipy>=1.4.1", + "sentencepiece>=0.1.91", + "soundfile>=0.12.1", + "torch>=2.1.0", + "torchaudio>=2.1.0", + "tqdm>=4.42.0", + "huggingface_hub>=0.8.0", +] +keywords = ["speech", "audio", "pytorch", "deep-learning"] + +[project.urls] +Homepage = "https://speechbrain.github.io/" + +[project.optional-dependencies] +dev = [ + "ruff==0.12.4", + "pytest==7.4.0", + "yamllint==1.35.1", + "pre-commit>=2.3.0", + "pandas>=1.0.1", + "transformers>=4.30.0", + "codespell>=2.3.0", +] + +[tool.setuptools.dynamic] +version = {file = "speechbrain/version.txt"} + +[tool.setuptools.packages.find] +exclude = ["tests", "tests.*"] + +[tool.setuptools.package-data] +speechbrain = ["version.txt", "log-config.yaml"] + +[tool.ruff] +target-version = "py38" line-length = 80 -target-version = ['py38'] -exclude = ''' - -( - /( - \.eggs # exclude a few common directories in the - | \.git # root of the project - | \.mypy_cache - | \.tox - | \.venv - )/ -) -''' +exclude = [ + ".eggs", + ".git", + ".mypy_cache", + ".tox", + ".venv", + "tests/tmp", +] + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort (import sorting) + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "UP", # pyupgrade +] +ignore = [ + "B007", # Loop control variable not used within loop body + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` + "B028", # No explicit `stacklevel` keyword argument found + "C400", # Unnecessary generator (rewrite as a `list` comprehension) + "C401", # Unnecessary generator (rewrite as a `set` comprehension) + "C403", # Unnecessary `list` comprehension (rewrite as a `set` comprehension) + "C404", # Unnecessary `list` call within `sorted()` + "C405", # Unnecessary `list` literal (rewrite as a `set` literal) + "C408", # Unnecessary `dict`/`list` call (rewrite as literal) + "C409", # Unnecessary `tuple` literal passed to `tuple()` + "C414", # Unnecessary `list` call within `sorted()` + "C416", # Unnecessary `list` comprehension (rewrite using `list()`) + "C417", # Unnecessary `map` usage (rewrite using a `list` comprehension) + "C419", # Unnecessary list comprehension + "E203", # whitespace before ':' (black compatibility) + "E266", # too many leading '#' for block comment + "E501", # line too long (handled by formatter) + "E721", # Do not compare types, use `isinstance()` + "F601", # Dictionary key literal repeated + "UP008", # Use `super()` instead of `super(__class__, self)` + "UP028", # Replace `yield` over `for` loop with `yield from` + "UP030", # Use implicit references for positional format fields + "UP031", # Use format specifiers instead of percent format + "B008", # do not perform function calls in argument defaults + "B006", # do not use mutable data structures for argument defaults + "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` + "B019", # Use of `functools.lru_cache` or `functools.cache` on methods can lead to memory leaks + "B023", # Function definition does not bind loop variable + "B026", # Star-arg unpacking after a keyword argument is strongly discouraged +] + +[tool.ruff.lint.isort] +# Import sorting configuration (black-compatible) +combine-as-imports = true +force-wrap-aliases = true +known-first-party = ["speechbrain"] +known-third-party = ["torch", "torchaudio", "numpy", "scipy", "hyperpyyaml", "joblib", "packaging", "requests", "sentencepiece", "tqdm", "huggingface_hub"] +split-on-trailing-comma = false + +[tool.ruff.lint.per-file-ignores] +"__init__.py" = ["F401"] + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" +docstring-code-format = true + +[tool.codespell] +skip = "./tests/tmp,./**/result,*.csv,*train.txt,*test.txt" diff --git a/pytest.ini b/pytest.ini index 663511ccc6..905d7c877e 100644 --- a/pytest.ini +++ b/pytest.ini @@ -6,4 +6,4 @@ python_files = check_*.py example_*.py -norecursedirs = results +norecursedirs = results tests/tmp tests/utils tests/templates diff --git a/recipes/AISHELL-1/ASR/CTC/README.md b/recipes/AISHELL-1/ASR/CTC/README.md index 817b568c12..90cecc0d59 100644 --- a/recipes/AISHELL-1/ASR/CTC/README.md +++ b/recipes/AISHELL-1/ASR/CTC/README.md @@ -6,7 +6,6 @@ This folder contains a CTC-wav2vec2 recipe for speech recognition with [AISHELL- A pretrained tokenizer from [huggingface](https://huggingface.co/bert-base-chinese) is used and can be downloaded automatically. -If not present in the specified data_folder, the dataset will be automatically downloaded there. This step is not mandatory. We will use the official tokenizer downloaded from the web if you do not specify a different tokenizer in the speech recognition recipe. @@ -24,7 +23,7 @@ Results are reported in terms of Character Error Rate (CER). |:--------------------------:|:-----:| :-----:| :-----:| :-----: | | train_with_wav2vec.yaml | No | 5.06 | 4.52 | 1xRTX 8000 Ti 48GB | -You can checkout our results (models, training logs, etc,) [here](https://drive.google.com/drive/folders/1GTB5IzQPl57j-0I1IpmvKg722Ti4ahLz?usp=sharing) +You can checkout our results (models, training logs, etc,) [here](https://www.dropbox.com/sh/e4bth1bylk7c6h8/AADFq3cWzBBKxuDv09qjvUMta?dl=0) # Training Time It takes about 2h on 1 RTX 8000 (48GB) @@ -43,6 +42,15 @@ You can find the pre-trained model with an easy-inference function on HuggingFac Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/AISHELL-1/ASR/CTC/extra_requirements.txt b/recipes/AISHELL-1/ASR/CTC/extra_requirements.txt deleted file mode 100644 index 5fae033300..0000000000 --- a/recipes/AISHELL-1/ASR/CTC/extra_requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -# For wav2vec2 recipe (HuggingFace) -transformers diff --git a/recipes/AISHELL-1/ASR/CTC/hparams/train_with_wav2vec.yaml b/recipes/AISHELL-1/ASR/CTC/hparams/train_with_wav2vec.yaml index c485d556af..d03b3aa7fb 100644 --- a/recipes/AISHELL-1/ASR/CTC/hparams/train_with_wav2vec.yaml +++ b/recipes/AISHELL-1/ASR/CTC/hparams/train_with_wav2vec.yaml @@ -9,8 +9,8 @@ # ############################################################################ seed: 2 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/ctc_wav2vec/ +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/ctc_wav2vec2/ cer_file: !ref /cer.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -19,36 +19,40 @@ train_log: !ref /train_log.txt data_folder: !PLACEHOLDER # e,g./path/to/aishell skip_prep: False +remove_compressed_wavs: False ckpt_interval_minutes: 15 # save checkpoint every N min train_data: !ref /train.csv valid_data: !ref /dev.csv test_data: !ref /test.csv wav2vec2_hub: TencentGameMate/chinese-wav2vec2-large +wav2vec2_folder: !ref /wav2vec2_checkpoint + +####################### Training Parameters #################################### -# Training parameters number_of_epochs: 80 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs # Must be 8 per GPU to fit 32GB of VRAM batch_size: 10 -test_batch_size: 4 +test_batch_size: 1 dynamic_batching: False +max_batch_length: 15 # in terms of "duration" in annotations by default, second here +shuffle: False # if true re-creates batches at each epoch shuffling examples. +num_buckets: 10 # floor(log(max_batch_len/left_bucket_len, multiplier)) + 1 +batch_ordering: ascending dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 15 # in terms of "duration" in annotations by default, second here - left_bucket_len: 200 # old implementation attributs - multiplier: 1.1 # old implementation attributs - shuffle_ex: False # if true re-creates batches at each epoch shuffling examples. - num_buckets: 10 # floor(log(max_batch_len/left_bucket_len, multiplier)) + 1 - batch_ordering: ascending + max_batch_length: !ref + shuffle: !ref + num_buckets: !ref + batch_ordering: !ref num_workers: 6 @@ -73,36 +77,69 @@ tokenizer: !apply:transformers.BertTokenizer.from_pretrained # bert-base-chinese tokens length output_neurons: 21128 -# Decoding parameters +############################## Decoding ######################################## + # Be sure that the bos and eos index match with the BPEs ones +# Decoding parameters +test_searcher: !name:speechbrain.decoders.CTCBeamSearcher blank_index: 0 +beam_size: 100 +beam_prune_logp: -12.0 +token_prune_min_logp: -1.2 +prune_history: True +topk: 1 +alpha: 1.0 +beta: 0.5 +# can be downloaded from here https://www.openslr.org/11/ or trained with kenLM +# It can either be a .bin or .arpa ; note: .arpa is much slower at loading +# If you don't want to use an LM, comment it out or set it to null +# kenlm_model_path: none + # AISHELL-1 has spaces between words in the transcripts, # which Chinese writing normally does not do. # If remove_spaces, spaces are removed # from the transcript before computing CER. -# (e.g., 祝 可爱 的 你 —> 祝可爱的你) remove_spaces: True split_tokens: !apply:operator.not_ [!ref ] epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -SpeedPerturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [90, 100, 110] - -SpecAugment: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 35 + drop_length_high: 45 + drop_count_low: 2 + drop_count_high: 2 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] @@ -126,11 +163,11 @@ enc: !new:speechbrain.nnet.containers.Sequential bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref ctc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref @@ -170,6 +207,8 @@ lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.9 patient: 0 +############################## Logging and Pretrainer ########################## + checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: diff --git a/recipes/AISHELL-1/ASR/CTC/train_with_wav2vec.py b/recipes/AISHELL-1/ASR/CTC/train_with_wav2vec.py index 89aa560721..3fd8f7a8f6 100644 --- a/recipes/AISHELL-1/ASR/CTC/train_with_wav2vec.py +++ b/recipes/AISHELL-1/ASR/CTC/train_with_wav2vec.py @@ -15,13 +15,15 @@ """ import sys + import torch -import logging +from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -35,19 +37,14 @@ def compute_forward(self, batch, stage): # Add augmentation if specified if stage == sb.Stage.TRAIN: if hasattr(self.hparams, "SpeedPerturb"): - wavs = self.hparams.SpeedPerturb(wavs, wav_lens) - - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) + wavs = self.hparams.speed_perturb(wavs, wav_lens) # Forward pass - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "SpecAugment"): - feats = self.hparams.SpecAugment(feats) + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) x = self.modules.enc(feats) logits = self.modules.ctc_lin(x) @@ -61,19 +58,23 @@ def compute_objectives(self, predictions, batch, stage): ids = batch.id tokens, tokens_lens = batch.tokens - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens = torch.cat([tokens, tokens], dim=0) - tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if stage == sb.Stage.TRAIN: + if hasattr(self.hparams, "fea_augment"): + tokens = self.hparams.fea_augment.replicate_labels(tokens) + tokens_lens = self.hparams.fea_augment.replicate_labels( + tokens_lens + ) loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) - if stage != sb.Stage.TRAIN: + if stage == sb.Stage.VALID: # Decode token terms to words sequences = sb.decoders.ctc_greedy_decode( p_ctc, wav_lens, blank_id=self.hparams.blank_index ) predicted_words_list = [] - target_words_list = [list(wrd) for wrd in batch.wrd] for sequence in sequences: # Decode token terms to words @@ -92,36 +93,31 @@ def compute_objectives(self, predictions, batch, stage): predicted_words_list.append(predicted_words) + elif stage == sb.Stage.TEST: + p_tokens = test_searcher(p_ctc, wav_lens) + # select one-best + text_hyps = [hyp[0].text for hyp in p_tokens] + + predicted_words_list = [] + preds = [] + for seq in text_hyps: + seq = seq.replace("[CLS]", "") + seq = seq.replace("[SEP]", "") + seq = seq.replace("[PAD]", "") + for c in seq: + preds.append(c) + predicted_words_list.append(preds) + + if stage != sb.Stage.TRAIN: + target_words_list = [list(wrd) for wrd in batch.wrd] self.cer_metric.append( - ids=ids, predict=predicted_words_list, target=target_words_list, + ids=ids, + predict=predicted_words_list, + target=target_words_list, ) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - - if self.check_gradients(loss): - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.step() - self.model_optimizer.step() - - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" self.batch_idx = 0 @@ -162,14 +158,15 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"CER": stage_stats["CER"]}, min_keys=["CER"], + meta={"CER": stage_stats["CER"]}, + min_keys=["CER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.cer_file, "w") as w: + with open(self.hparams.cer_file, "w", encoding="utf-8") as w: self.cer_metric.write_stats(w) def init_optimizers(self): @@ -192,14 +189,34 @@ def init_optimizers(self): if self.checkpointer is not None: self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + if not self.hparams.wav2vec2.freeze: + self.optimizers_dict = { + "wav2vec_optimizer": self.wav2vec_optimizer, + "model_optimizer": self.model_optimizer, + } + else: + self.optimizers_dict = {"model_optimizer": self.model_optimizer} + + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if not self.hparams.wav2vec2.freeze: + valid_optimizers["wav2vec_optimizer"] = optimizers[ + "wav2vec_optimizer" + ] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers + def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_data"], replacements={"data_root": data_folder}, + csv_path=hparams["train_data"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -224,12 +241,14 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_data"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_data"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_data"], replacements={"data_root": data_folder}, + csv_path=hparams["test_data"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -262,7 +281,8 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens"], ) # 5. If Dynamic Batching is used, we instantiate the needed samplers. @@ -272,24 +292,17 @@ def text_pipeline(wrd): from speechbrain.dataio.sampler import DynamicBatchSampler # noqa dynamic_hparams = hparams["dynamic_batch_sampler"] - num_buckets = dynamic_hparams["num_buckets"] train_batch_sampler = DynamicBatchSampler( train_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, + **dynamic_hparams, length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], ) valid_batch_sampler = DynamicBatchSampler( valid_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, + **dynamic_hparams, length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], ) return ( @@ -303,13 +316,11 @@ def text_pipeline(wrd): if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -330,6 +341,7 @@ def text_pipeline(wrd): "data_folder": hparams["data_folder"], "save_folder": hparams["output_folder"], "skip_prep": hparams["skip_prep"], + "remove_compressed_wavs": hparams["remove_compressed_wavs"], }, ) @@ -351,8 +363,22 @@ def text_pipeline(wrd): checkpointer=hparams["checkpointer"], ) - # adding objects to trainer: asr_brain.tokenizer = tokenizer + vocab_list = [ + tokenizer.convert_ids_to_tokens(i) for i in range(tokenizer.vocab_size) + ] + test_searcher = hparams["test_searcher"]( + blank_index=hparams["blank_index"], + vocab_list=vocab_list, + alpha=hparams["alpha"], + beta=hparams["beta"], + beam_size=hparams["beam_size"], + beam_prune_logp=hparams["beam_prune_logp"], + token_prune_min_logp=hparams["token_prune_min_logp"], + prune_history=hparams["prune_history"], + topk=hparams["topk"], + kenlm_model_path=hparams.get("kenlm_model_path"), + ) # Changing the samplers if dynamic batching is activated train_dataloader_opts = hparams["train_dataloader_opts"] diff --git a/recipes/AISHELL-1/ASR/seq2seq/README.md b/recipes/AISHELL-1/ASR/seq2seq/README.md index 86ebeaf3b2..9eff6726a3 100644 --- a/recipes/AISHELL-1/ASR/seq2seq/README.md +++ b/recipes/AISHELL-1/ASR/seq2seq/README.md @@ -9,9 +9,8 @@ To train a full recipe: ``` cd ../../Tokenizer -python train.py hparams/tokenizer_bpe5000.yaml --data_folder=/localscratch/aishell/ +python train.py hparams/tokenizer_bpe5000.yaml --data_folder=/path/to/aishell/ ``` -If not present in the specified data_folder, the dataset will be automatically downloaded there. This step is not mandatory. We will use the official tokenizer downloaded from the web if you do not specify a different tokenizer in the speech recognition recipe. @@ -30,7 +29,7 @@ Results are reported in terms of Character Error Rate (CER). It is not clear fro | Base (keep spaces) | 7.51 | You can checkout our results (models, training logs, etc,) here: -https://drive.google.com/drive/folders/1zlTBib0XEwWeyhaXDXnkqtPsIBI18Uzs?usp=sharing +https://www.dropbox.com/sh/kefuzzf6jaljqbr/AADBRWRzHz74GCMDqJY9BES4a?dl=0 # Training Time It takes about 1h 30 minutes on a NVIDIA V100 (32GB). @@ -45,6 +44,15 @@ It takes about 1h 30 minutes on a NVIDIA V100 (32GB). Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/AISHELL-1/ASR/seq2seq/hparams/train.yaml b/recipes/AISHELL-1/ASR/seq2seq/hparams/train.yaml index 7acbfd00ea..3d289fb05f 100644 --- a/recipes/AISHELL-1/ASR/seq2seq/hparams/train.yaml +++ b/recipes/AISHELL-1/ASR/seq2seq/hparams/train.yaml @@ -10,42 +10,45 @@ # ############################################################################ seed: 1 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/base/ cer_file: !ref /cer.txt save_folder: !ref /save train_log: !ref /train_log.txt # Data files +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 data_folder: !PLACEHOLDER # e,g./path/to/aishell -# noise/ris dataset will automatically be downloaded -data_folder_rirs: !ref # Change this is needed +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. skip_prep: False +remove_compressed_wavs: False ckpt_interval_minutes: 15 # save checkpoint every N min train_data: !ref /train.csv valid_data: !ref /dev.csv test_data: !ref /test.csv +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script tokenizer_file: speechbrain/asr-transformer-aishell/tokenizer.ckpt -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 40 number_of_ctc_epochs: 10 batch_size: 16 lr: 0.0003 ctc_weight: 0.5 sorting: ascending +precision: fp32 # bf16, fp16 or fp32 dynamic_batching: True +max_batch_length: 15 # in terms of "duration" in annotations by default, second here +shuffle: False # if true re-creates batches at each epoch shuffling examples. +num_buckets: 10 # floor(log(max_batch_len/left_bucket_len, multiplier)) + 1 +batch_ordering: ascending dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 15 # in terms of "duration" in annotations by default, second here - left_bucket_len: 200 # old implementation attributs - multiplier: 1.1 # old implementation attributs - shuffle_ex: False # if true re-creates batches at each epoch shuffling examples. - num_buckets: 10 # floor(log(max_batch_len/left_bucket_len, multiplier)) + 1 - batch_ordering: ascending - -num_workers: 6 + max_batch_length: !ref + shuffle: !ref + num_buckets: !ref + batch_ordering: !ref # Feature parameters sample_rate: 16000 @@ -56,6 +59,7 @@ opt_class: !name:torch.optim.Adam lr: !ref # Dataloader options +num_workers: 4 train_dataloader_opts: batch_size: !ref num_workers: !ref @@ -68,7 +72,7 @@ test_dataloader_opts: batch_size: !ref num_workers: !ref -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 2 @@ -85,9 +89,11 @@ dnn_neurons: 512 emb_size: 128 dec_neurons: 1024 output_neurons: 5000 # Number of tokens +# we need to have blank_index != bos_index != eos_index when using CTCScorer blank_index: 0 -bos_index: 0 -eos_index: 0 +bos_index: 1 +eos_index: 2 +label_smoothing: 0.1 # Decoding parameters min_decode_ratio: 0.0 @@ -98,12 +104,11 @@ using_max_attn_shift: True max_attn_shift: 240 coverage_penalty: 1.5 temperature: 1.25 - +scorer_beam_scale: 0.5 # AISHELL-1 has spaces between words in the transcripts, # which Chinese writing normally does not do. # If remove_spaces, spaces are removed # from the transcript before computing CER. -# (e.g., 祝 可爱 的 你 —> 祝可爱的你) remove_spaces: True split_tokens: !apply:operator.not_ [!ref ] @@ -113,23 +118,64 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global +############################## Augmentations ################################### + compute_features: !new:speechbrain.lobes.features.Fbank sample_rate: !ref n_fft: !ref n_mels: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Models ########################################## + enc: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] activation: !ref @@ -183,7 +229,7 @@ ctc_cost: !name:speechbrain.nnet.losses.ctc_loss blank_index: !ref seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 + label_smoothing: !ref # Models modules: @@ -193,8 +239,6 @@ modules: ctc_lin: !ref seq_lin: !ref normalize: !ref - env_corrupt: !ref - #lm_model: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , !ref , !ref , !ref ] @@ -208,22 +252,37 @@ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer paths: tokenizer: !ref +############################## Decoding ######################################## + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer + vocab_size: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + coverage: !ref + ctc: !ref + scorer_beam_scale: !ref + beam_search: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref + temperature: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref - temperature: !ref + scorer: !ref lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler initial_value: !ref @@ -231,6 +290,8 @@ lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.8 patient: 0 +############################## Logging and Pretrainer ########################## + checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: diff --git a/recipes/AISHELL-1/ASR/seq2seq/train.py b/recipes/AISHELL-1/ASR/seq2seq/train.py index ea3162b16c..9e3a5916da 100644 --- a/recipes/AISHELL-1/ASR/seq2seq/train.py +++ b/recipes/AISHELL-1/ASR/seq2seq/train.py @@ -1,18 +1,18 @@ #!/usr/bin/env/python3 """ - AISHELL-1 seq2seq model recipe. (Adapted from the LibriSpeech recipe.) - """ import sys + import torch -import logging +from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -24,16 +24,10 @@ def compute_forward(self, batch, stage): tokens_bos, _ = batch.tokens_bos wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) - - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) # Forward pass feats = self.hparams.compute_features(wavs) @@ -47,42 +41,39 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs + p_ctc, p_tokens = None, None if stage == sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.number_of_ctc_epochs: # Output layer for ctc log-probabilities logits = self.modules.ctc_lin(x) p_ctc = self.hparams.log_softmax(logits) - return p_ctc, p_seq, wav_lens - else: - return p_seq, wav_lens else: - p_tokens, scores = self.hparams.beam_search(x, wav_lens) - return p_seq, wav_lens, p_tokens + p_tokens, _, _, _ = self.hparams.beam_search(x, wav_lens) + + return p_ctc, p_seq, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" current_epoch = self.hparams.epoch_counter.current - if stage == sb.Stage.TRAIN: - if current_epoch <= self.hparams.number_of_ctc_epochs: - p_ctc, p_seq, wav_lens = predictions - else: - p_seq, wav_lens = predictions - else: - p_seq, wav_lens, predicted_tokens = predictions + p_ctc, p_seq, wav_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens ) - tokens = torch.cat([tokens, tokens], dim=0) - tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens @@ -117,24 +108,6 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.batch_idx += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" self.batch_idx = 0 @@ -160,24 +133,27 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"CER": stage_stats["CER"]}, min_keys=["CER"], + meta={"CER": stage_stats["CER"]}, + min_keys=["CER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.cer_file, "w") as w: + with open(self.hparams.cer_file, "w", encoding="utf-8") as w: self.cer_metric.write_stats(w) def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_data"], replacements={"data_root": data_folder}, + csv_path=hparams["train_data"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -202,12 +178,14 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_data"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_data"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_data"], replacements={"data_root": data_folder}, + csv_path=hparams["test_data"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -245,7 +223,8 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], ) # 5. If Dynamic Batching is used, we instantiate the needed samplers. @@ -255,24 +234,17 @@ def text_pipeline(wrd): from speechbrain.dataio.sampler import DynamicBatchSampler # noqa dynamic_hparams = hparams["dynamic_batch_sampler"] - num_buckets = dynamic_hparams["num_buckets"] train_batch_sampler = DynamicBatchSampler( train_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, + **dynamic_hparams, length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], ) valid_batch_sampler = DynamicBatchSampler( valid_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, + **dynamic_hparams, length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], ) return ( @@ -286,13 +258,11 @@ def text_pipeline(wrd): if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -313,8 +283,10 @@ def text_pipeline(wrd): "data_folder": hparams["data_folder"], "save_folder": hparams["output_folder"], "skip_prep": hparams["skip_prep"], + "remove_compressed_wavs": hparams["remove_compressed_wavs"], }, ) + run_on_main(hparams["prepare_noise_data"]) # here we create the datasets objects as well as tokenization and encoding ( @@ -327,8 +299,8 @@ def text_pipeline(wrd): ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Trainer initialization asr_brain = ASR( @@ -365,5 +337,7 @@ def text_pipeline(wrd): # Testing asr_brain.evaluate( - test_data, test_loader_kwargs=hparams["test_dataloader_opts"] + test_data, + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="CER", ) diff --git a/recipes/AISHELL-1/ASR/transformer/README.md b/recipes/AISHELL-1/ASR/transformer/README.md index 4ba6aad68e..5f5ddf4d94 100644 --- a/recipes/AISHELL-1/ASR/transformer/README.md +++ b/recipes/AISHELL-1/ASR/transformer/README.md @@ -6,9 +6,8 @@ This folder contains recipes for tokenization and speech recognition with [AISHE ``` cd ../../Tokenizer -python train.py hparams/train_transformer_tokenizer_bpe5000.yaml --data_folder=/localscratch/aishell/ +python train.py hparams/train_transformer_tokenizer_bpe5000.yaml --data_folder=/path/to/aishell ``` -If not present in the specified data_folder, the dataset will be automatically downloaded there. This step is not mandatory. We will use the official tokenizer downloaded from the web if you do not specify a different tokenizer in the speech recognition recipe. @@ -17,7 +16,7 @@ specify a different tokenizer in the speech recognition recipe. python train.py hparams/train_ASR_transformer.yaml --data_folder=/localscratch/aishell/ ``` -Make sure to have "transformers" installed if you use the wav2vec2 recipe (see extra-requirements.txt) +Make sure to have `transformers` installed if you use the wav2vec2 recipe (see extra-requirements.txt) # Performance summary Results are reported in terms of Character Error Rate (CER). @@ -28,7 +27,7 @@ Results are reported in terms of Character Error Rate (CER). | train_ASR_transformer_with_wav2vect.yaml | No | 5.58 | 5.19 | 1xRTX 8000 Ti 48GB | You can checkout our results (models, training logs, etc,) here: -https://drive.google.com/drive/folders/1xKo_6Pxk0saPXjGZg8um68b_l0Tgfdjy?usp=sharing +https://www.dropbox.com/sh/tp6tjmysorgvsr4/AAD7KNqi1ot0gR4N406JbKM6a?dl=0 # Training Time It takes about 1h 10 minutes on a NVIDIA V100 (32GB) for train_ASR_transformer.yaml, @@ -51,6 +50,15 @@ You can find the pre-trained model with an easy-inference function on HuggingFac Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/AISHELL-1/ASR/transformer/extra_requirements.txt b/recipes/AISHELL-1/ASR/transformer/extra_requirements.txt deleted file mode 100644 index 78949924f4..0000000000 --- a/recipes/AISHELL-1/ASR/transformer/extra_requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -# For wav2vect recipe (HuggingFace) -transformers diff --git a/recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer.yaml b/recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer.yaml index 7916146b48..bdcea59159 100644 --- a/recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer.yaml +++ b/recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer.yaml @@ -9,42 +9,50 @@ # ############################################################################ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 8886 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/transformer/ cer_file: !ref /cer.txt save_folder: !ref /save train_log: !ref /train_log.txt +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 + # Data files data_folder: !PLACEHOLDER # e,g./path/to/aishell -# noise/ris dataset will automatically be downloaded -data_folder_rirs: !ref # Change this is needed +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. skip_prep: False +remove_compressed_wavs: False ckpt_interval_minutes: 15 # save checkpoint every N min -train_data: !ref /train.csv -valid_data: !ref /dev.csv -test_data: !ref /test.csv +train_data: !ref /train.csv +valid_data: !ref /dev.csv +test_data: !ref /test.csv +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script tokenizer_file: speechbrain/asr-transformer-aishell/tokenizer.ckpt -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 50 batch_size: 8 ctc_weight: 0.3 -gradient_accumulation: 4 +grad_accumulation_factor: 4 loss_reduction: 'batchmean' sorting: random +avg_checkpoints: 10 # Number of checkpoints to average for evaluation +precision: fp32 # bf16, fp16 or fp32 dynamic_batching: False +max_batch_length: 15 # in terms of "duration" in annotations by default, second here +shuffle: False # if true re-creates batches at each epoch shuffling examples. +num_buckets: 10 # floor(log(max_batch_len/left_bucket_len, multiplier)) + 1 +batch_ordering: ascending dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 15 # in terms of "duration" in annotations by default, second here - left_bucket_len: 200 # old implementation attributs - multiplier: 1.1 # old implementation attributs - shuffle_ex: False # if true re-creates batches at each epoch shuffling examples. - num_buckets: 10 # floor(log(max_batch_len/left_bucket_len, multiplier)) + 1 - batch_ordering: ascending + max_batch_length: !ref + shuffle: !ref + num_buckets: !ref + batch_ordering: !ref -num_workers: 6 +num_workers: 4 # stages related parameters stage_one_epochs: 40 @@ -59,15 +67,18 @@ n_mels: 80 # Dataloader options train_dataloader_opts: batch_size: !ref + num_workers: !ref shuffle: True valid_dataloader_opts: batch_size: !ref + num_workers: !ref test_dataloader_opts: batch_size: !ref + num_workers: !ref -####################### Model parameters ########################### +####################### Model Parameters ####################################### # Transformer d_model: 256 nhead: 4 @@ -93,7 +104,7 @@ valid_beam_size: 10 test_beam_size: 10 ctc_weight_decode: 0.40 -############################## models ################################ +############################## Models ########################################## CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd input_shape: (8, 10, 80) @@ -115,6 +126,7 @@ Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.Transforme dropout: !ref activation: !ref normalize_before: True + causal: False tokenizer: !new:sentencepiece.SentencePieceProcessor @@ -126,20 +138,12 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 modules: CNN: !ref Transformer: !ref seq_lin: !ref ctc_lin: !ref - env_corrupt: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , !ref , !ref ] @@ -155,30 +159,39 @@ SGD: !name:torch.optim.SGD momentum: 0.99 nesterov: True +############################## Decoding & optimiser ############################ -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] - bos_index: !ref +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer eos_index: !ref blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False length_normalization: True + scorer: !ref -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False length_normalization: True + scorer: !ref log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -211,23 +224,72 @@ normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global update_until_epoch: 4 -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 compute_features: !new:speechbrain.lobes.features.Fbank sample_rate: !ref n_fft: !ref n_mels: !ref +############################## Augmentation #################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 1 + max_augmentations: 1 + augment_prob: 1.0 + augmentations: [ + !ref ] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 0 + drop_length_high: 100 + drop_count_low: 2 + drop_count_high: 2 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 30 + drop_length_high: 40 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 1 + max_augmentations: 1 + augment_start_index: !ref # This leaves original inputs unchanged + concat_end_index: !ref # This leaves original inputs unchanged + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref @@ -235,7 +297,6 @@ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger # which Chinese writing normally does not do. # If remove_spaces, spaces are removed # from the transcript before computing CER. -# (e.g., 祝 可爱 的 你 —> 祝可爱的你) remove_spaces: True split_tokens: !apply:operator.not_ [!ref ] diff --git a/recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer_with_wav2vect.yaml b/recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer_with_wav2vect.yaml index 349d7ab646..da4774df1e 100644 --- a/recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer_with_wav2vect.yaml +++ b/recipes/AISHELL-1/ASR/transformer/hparams/train_ASR_transformer_with_wav2vect.yaml @@ -9,7 +9,7 @@ # ############################################################################ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 8886 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/transformer_with_wav2vect/ cer_file: !ref /cer.txt save_folder: !ref /save @@ -17,62 +17,63 @@ train_log: !ref /train_log.txt # Data files data_folder: !PLACEHOLDER # e,g./path/to/aishell -# noise/ris dataset will automatically be downloaded -data_folder_rirs: !ref # Change this is needed skip_prep: False +remove_compressed_wavs: False ckpt_interval_minutes: 30 # save checkpoint every N min -train_data: !ref /train.csv -valid_data: !ref /dev.csv -test_data: !ref /test.csv +train_data: !ref /train.csv +valid_data: !ref /dev.csv +test_data: !ref /test.csv tokenizer_file: speechbrain/asr-transformer-aishell/tokenizer.ckpt - +sample_rate: 16000 # Self-supervised pre-training wav2vec2_hub: facebook/wav2vec2-large-100k-voxpopuli +wav2vec2_folder: !ref /wav2vec2_checkpoint freeze_wav2vec: False -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 80 batch_size: 2 -ctc_weight: 0.3 -gradient_accumulation: 16 +grad_accumulation_factor: 16 loss_reduction: 'batchmean' sorting: random +ctc_weight: 0.3 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation +precision: fp32 # bf16, fp16 or fp32 dynamic_batching: False +max_batch_length: 15 # in terms of "duration" in annotations by default, second here +shuffle: False # if true re-creates batches at each epoch shuffling examples. +num_buckets: 10 # floor(log(max_batch_len/left_bucket_len, multiplier)) + 1 +batch_ordering: ascending dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 15 # in terms of "duration" in annotations by default, second here - left_bucket_len: 200 # old implementation attributs - multiplier: 1.1 # old implementation attributs - shuffle_ex: False # if true re-creates batches at each epoch shuffling examples. - num_buckets: 10 # floor(log(max_batch_len/left_bucket_len, multiplier)) + 1 - batch_ordering: ascending + max_batch_length: !ref + shuffle: !ref + num_buckets: !ref + batch_ordering: !ref -num_workers: 6 +num_workers: 4 # stages related parameters stage_one_epochs: 40 lr_adam: 1.0 lr_sgd: 0.000025 -# lr_wav2vec: 0.0001 - -# Feature parameters -# sample_rate: 16000 -# n_fft: 400 -# n_mels: 80 # Dataloader options train_dataloader_opts: batch_size: !ref + num_workers: !ref shuffle: True valid_dataloader_opts: batch_size: !ref + num_workers: !ref test_dataloader_opts: batch_size: !ref + num_workers: !ref -####################### Model parameters ########################### +####################### Model Parameters ####################################### # Transformer d_model: 256 nhead: 4 @@ -98,13 +99,13 @@ valid_beam_size: 10 test_beam_size: 10 ctc_weight_decode: 0.40 -############################## models ################################ +############################## Models ########################################## -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length @@ -118,6 +119,7 @@ Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.Transforme dropout: !ref activation: !ref normalize_before: True + causal: False tokenizer: !new:sentencepiece.SentencePieceProcessor @@ -129,24 +131,48 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 modules: wav2vec2: !ref Transformer: !ref seq_lin: !ref ctc_lin: !ref - env_corrupt: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , !ref ] +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Decoding & optimiser ############################ + # define two optimizers here for two-stage training Adam: !name:torch.optim.Adam lr: 0 @@ -163,30 +189,38 @@ wav2vec_opt_class: !name:torch.optim.Adam betas: (0.9, 0.98) eps: 0.000000001 +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False length_normalization: True + scorer: !ref -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False length_normalization: True + scorer: !ref log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -209,6 +243,7 @@ noam_annealing_wav2vect: !new:speechbrain.nnet.schedulers.NoamScheduler n_warmup_steps: 25000 model_size: !ref +############################## Logging and Pretrainer ########################## checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref @@ -223,19 +258,6 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 - - train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref @@ -243,7 +265,6 @@ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger # which Chinese writing normally does not do. # If remove_spaces, spaces are removed # from the transcript before computing CER. -# (e.g., 祝 可爱 的 你 —> 祝可爱的你) remove_spaces: True split_tokens: !apply:operator.not_ [!ref ] diff --git a/recipes/AISHELL-1/ASR/transformer/train.py b/recipes/AISHELL-1/ASR/transformer/train.py index f27779488d..8cb57062b1 100644 --- a/recipes/AISHELL-1/ASR/transformer/train.py +++ b/recipes/AISHELL-1/ASR/transformer/train.py @@ -3,16 +3,21 @@ AISHELL-1 transformer model recipe. (Adapted from the LibriSpeech recipe.) +Authors + * Jianyuan Zhong 2021 + * Titouan Parcollet 2021 """ import sys + import torch -import logging +from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -23,23 +28,20 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) # compute features feats = self.hparams.compute_features(wavs) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + current_epoch = self.hparams.epoch_counter.current feats = self.hparams.normalize(feats, wav_lens, epoch=current_epoch) - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - feats = self.hparams.augmentation(feats) - # forward modules src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( @@ -56,36 +58,53 @@ def compute_forward(self, batch, stage): # Compute outputs hyps = None - if stage == sb.Stage.TRAIN: - hyps = None - elif stage == sb.Stage.VALID: - hyps = None - current_epoch = self.hparams.epoch_counter.current - if current_epoch % self.hparams.valid_search_interval == 0: - # for the sake of efficiency, we only perform beamsearch with limited capacity - # and no LM to give user some idea of how the AM is doing - hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) - elif stage == sb.Stage.TEST: - hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if is_valid_search: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + elif is_test_search: + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" - (p_ctc, p_seq, wav_lens, hyps,) = predictions + (p_ctc, p_seq, wav_lens, hyps) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 - ) - tokens = torch.cat([tokens, tokens], dim=0) - tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if hasattr(self.hparams, "wav_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + + if hasattr(self.hparams, "fea_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens @@ -117,37 +136,17 @@ def compute_objectives(self, predictions, batch, stage): self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" + def on_fit_batch_start(self, batch, should_step): + """Gets called at the beginning of each fit_batch.""" # check if we need to switch optimizer # if so change the optimizer from Adam to SGD self.check_and_reset_optimizer() - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - # normalize the loss by gradient_accumulation step - (loss / self.hparams.gradient_accumulation).backward() - - if self.step % self.hparams.gradient_accumulation == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.optimizer.step() - self.optimizer.zero_grad() - - # anneal lr every update + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: self.hparams.noam_annealing(self.optimizer) - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - with torch.no_grad(): - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -171,8 +170,7 @@ def on_stage_end(self, stage, stage_loss, epoch): stage_stats["CER"] = self.cer_metric.summarize("error_rate") # log stats and save checkpoint at end-of-epoch - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): - + if stage == sb.Stage.VALID: # report different epoch stages according current stage current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: @@ -198,7 +196,7 @@ def on_stage_end(self, stage, stage_loss, epoch): self.checkpointer.save_and_keep_only( meta={"ACC": stage_stats["ACC"], "epoch": epoch}, max_keys=["ACC"], - num_to_keep=10, + num_to_keep=self.hparams.avg_checkpoints, ) elif stage == sb.Stage.TEST: @@ -206,7 +204,7 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.cer_file, "w") as w: + with open(self.hparams.cer_file, "w", encoding="utf-8") as w: self.cer_metric.write_stats(w) # save the averaged checkpoint at the end of the evaluation stage @@ -250,25 +248,23 @@ def on_fit_start(self): # Load latest checkpoint to resume training if interrupted if self.checkpointer is not None: - # do not reload the weights if training is interrupted right before stage 2 group = current_optimizer.param_groups[0] if "momentum" not in group: return - self.checkpointer.recover_if_possible( - device=torch.device(self.device) - ) + self.checkpointer.recover_if_possible() def on_evaluate_start(self, max_key=None, min_key=None): - """perform checkpoint averge if needed""" + """perform checkpoint average if needed""" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( - ckpts, recoverable_name="model", device=self.device + ckpts, + recoverable_name="model", ) self.hparams.model.load_state_dict(ckpt, strict=True) @@ -277,11 +273,13 @@ def on_evaluate_start(self, max_key=None, min_key=None): def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_data"], replacements={"data_root": data_folder}, + csv_path=hparams["train_data"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -306,14 +304,16 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_data"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_data"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_data"], replacements={"data_root": data_folder}, + csv_path=hparams["test_data"], + replacements={"data_root": data_folder}, ) - test_data = test_data.filtered_sorted(sort_key="duration") + test_data = test_data.filtered_sorted(sort_key="duration", reverse=True) datasets = [train_data, valid_data, test_data] @@ -349,7 +349,8 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], ) # 5. If Dynamic Batching is used, we instantiate the needed samplers. @@ -359,24 +360,13 @@ def text_pipeline(wrd): from speechbrain.dataio.sampler import DynamicBatchSampler # noqa dynamic_hparams = hparams["dynamic_batch_sampler"] - num_buckets = dynamic_hparams["num_buckets"] train_batch_sampler = DynamicBatchSampler( - train_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, - length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + train_data, **dynamic_hparams, length_func=lambda x: x["duration"] ) valid_batch_sampler = DynamicBatchSampler( - valid_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, - length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + valid_data, **dynamic_hparams, length_func=lambda x: x["duration"] ) return ( @@ -390,13 +380,11 @@ def text_pipeline(wrd): if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -417,8 +405,10 @@ def text_pipeline(wrd): "data_folder": hparams["data_folder"], "save_folder": hparams["output_folder"], "skip_prep": hparams["skip_prep"], + "remove_compressed_wavs": hparams["remove_compressed_wavs"], }, ) + run_on_main(hparams["prepare_noise_data"]) # here we create the datasets objects as well as tokenization and encoding ( @@ -431,8 +421,8 @@ def text_pipeline(wrd): ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Trainer initialization asr_brain = ASR( diff --git a/recipes/AISHELL-1/ASR/transformer/train_with_wav2vect.py b/recipes/AISHELL-1/ASR/transformer/train_with_wav2vect.py index 3c7c85761f..dffa78de26 100644 --- a/recipes/AISHELL-1/ASR/transformer/train_with_wav2vect.py +++ b/recipes/AISHELL-1/ASR/transformer/train_with_wav2vect.py @@ -7,13 +7,15 @@ """ import sys + import torch -import logging +from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -24,22 +26,15 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) # compute features - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) current_epoch = self.hparams.epoch_counter.current - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - feats = self.hparams.augmentation(feats) - # forward modules enc_out, pred = self.hparams.Transformer( feats, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index @@ -55,36 +50,43 @@ def compute_forward(self, batch, stage): # Compute outputs hyps = None - if stage == sb.Stage.TRAIN: - hyps = None - elif stage == sb.Stage.VALID: - hyps = None - current_epoch = self.hparams.epoch_counter.current - if current_epoch % self.hparams.valid_search_interval == 0: - # for the sake of efficiency, we only perform beamsearch with limited capacity - # and no LM to give user some idea of how the AM is doing - hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) - elif stage == sb.Stage.TEST: - hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if is_valid_search: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + elif is_test_search: + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" - (p_ctc, p_seq, wav_lens, hyps,) = predictions + (p_ctc, p_seq, wav_lens, hyps) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 - ) - tokens = torch.cat([tokens, tokens], dim=0) - tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if hasattr(self.hparams, "wav_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens @@ -116,40 +118,18 @@ def compute_objectives(self, predictions, batch, stage): self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" + def on_fit_batch_start(self, batch, should_step): + """Gets called at the beginning of each fit_batch.""" # check if we need to switch optimizer # if so change the optimizer from Adam to SGD self.check_and_reset_optimizer() - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - # normalize the loss by gradient_accumulation step - (loss / self.hparams.gradient_accumulation).backward() - - if self.step % self.hparams.gradient_accumulation == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.optimizer.step() - self.optimizer_wav2vect.step() - self.optimizer.zero_grad() - self.optimizer_wav2vect.zero_grad() - - # anneal lr every update + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: self.hparams.noam_annealing(self.optimizer) self.hparams.noam_annealing_wav2vect(self.optimizer_wav2vect) - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - with torch.no_grad(): - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -173,8 +153,7 @@ def on_stage_end(self, stage, stage_loss, epoch): stage_stats["CER"] = self.cer_metric.summarize("error_rate") # log stats and save checkpoint at end-of-epoch - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): - + if stage == sb.Stage.VALID: # report different epoch stages according current stage current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: @@ -200,7 +179,7 @@ def on_stage_end(self, stage, stage_loss, epoch): self.checkpointer.save_and_keep_only( meta={"ACC": stage_stats["ACC"], "epoch": epoch}, max_keys=["ACC"], - num_to_keep=10, + num_to_keep=self.hparams.avg_checkpoints, ) elif stage == sb.Stage.TEST: @@ -208,7 +187,7 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.cer_file, "w") as w: + with open(self.hparams.cer_file, "w", encoding="utf-8") as w: self.cer_metric.write_stats(w) # save the averaged checkpoint at the end of the evaluation stage @@ -252,7 +231,6 @@ def on_fit_start(self): # Load latest checkpoint to resume training if interrupted if self.checkpointer is not None: - # do not reload the weights if training is interrupted right before stage 2 group = current_optimizer.param_groups[0] if "momentum" not in group: @@ -263,14 +241,14 @@ def on_fit_start(self): ) def on_evaluate_start(self, max_key=None, min_key=None): - """perform checkpoint averge if needed""" + """perform checkpoint average if needed""" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( - ckpts, recoverable_name="model", device=self.device + ckpts, recoverable_name="model" ) self.hparams.model.load_state_dict(ckpt, strict=True) @@ -289,14 +267,21 @@ def init_optimizers(self): ) self.checkpointer.add_recoverable("modelopt", self.optimizer) + self.optimizers_dict = { + "wav2vect_optimizer": self.optimizer_wav2vect, + "model_optimizer": self.optimizer, + } + def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_data"], replacements={"data_root": data_folder}, + csv_path=hparams["train_data"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -321,12 +306,14 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_data"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_data"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_data"], replacements={"data_root": data_folder}, + csv_path=hparams["test_data"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -364,7 +351,8 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], ) # 5. If Dynamic Batching is used, we instantiate the needed samplers. @@ -374,24 +362,13 @@ def text_pipeline(wrd): from speechbrain.dataio.sampler import DynamicBatchSampler # noqa dynamic_hparams = hparams["dynamic_batch_sampler"] - num_buckets = dynamic_hparams["num_buckets"] train_batch_sampler = DynamicBatchSampler( - train_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, - length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + train_data, **dynamic_hparams, length_func=lambda x: x["duration"] ) valid_batch_sampler = DynamicBatchSampler( - valid_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, - length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + valid_data, **dynamic_hparams, length_func=lambda x: x["duration"] ) return ( @@ -405,13 +382,11 @@ def text_pipeline(wrd): if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -432,6 +407,7 @@ def text_pipeline(wrd): "data_folder": hparams["data_folder"], "save_folder": hparams["output_folder"], "skip_prep": hparams["skip_prep"], + "remove_compressed_wavs": hparams["remove_compressed_wavs"], }, ) @@ -446,8 +422,8 @@ def text_pipeline(wrd): ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Trainer initialization asr_brain = ASR( diff --git a/recipes/AISHELL-1/Tokenizer/README.md b/recipes/AISHELL-1/Tokenizer/README.md index 6ff4250a6c..b03ebf51a5 100644 --- a/recipes/AISHELL-1/Tokenizer/README.md +++ b/recipes/AISHELL-1/Tokenizer/README.md @@ -7,7 +7,7 @@ The tokenizer is trained on the top of the AISHELL training transcriptions. `python train.py hparams/train_transformer_tokenizer_bpe5000.yaml` -The output folder with the tokenizers and logs is available [here](https://drive.google.com/drive/folders/15wOIkFMHB-wwR1OW6NupcLcHbiJZJ_CU?usp=sharing). +The output folder with the tokenizers and logs is available [here](https://www.dropbox.com/sh/gh1qyf833t7h3op/AADG0y1bGGIL4yufsXtuBgXma?dl=0). # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -19,6 +19,15 @@ The output folder with the tokenizers and logs is available [here](https://drive Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/AISHELL-1/Tokenizer/hparams/tokenizer_bpe5000.yaml b/recipes/AISHELL-1/Tokenizer/hparams/tokenizer_bpe5000.yaml index 93163a27d1..d2cb230189 100644 --- a/recipes/AISHELL-1/Tokenizer/hparams/tokenizer_bpe5000.yaml +++ b/recipes/AISHELL-1/Tokenizer/hparams/tokenizer_bpe5000.yaml @@ -9,11 +9,13 @@ output_folder: !ref results/tokenizer_bpe5000/ # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/aishell +skip_prep: False +remove_compressed_wavs: False train_csv: !ref /train.csv valid_csv: !ref /dev.csv -# Training parameters +####################### Training Parameters #################################### token_type: unigram # ["unigram", "bpe", "char"] token_output: 5000 # index(blank/eos/bos/unk) = 0 character_coverage: 1.0 diff --git a/recipes/AISHELL-1/Tokenizer/hparams/train_transformer_tokenizer_bpe5000.yaml b/recipes/AISHELL-1/Tokenizer/hparams/train_transformer_tokenizer_bpe5000.yaml index 33bd5e3548..973df9a119 100644 --- a/recipes/AISHELL-1/Tokenizer/hparams/train_transformer_tokenizer_bpe5000.yaml +++ b/recipes/AISHELL-1/Tokenizer/hparams/train_transformer_tokenizer_bpe5000.yaml @@ -9,11 +9,13 @@ output_folder: !ref results/transformer_tokenizer_bpe5000/ # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/aishell +skip_prep: False +remove_compressed_wavs: False train_csv: !ref /train.csv valid_csv: !ref /dev.csv -# Training parameters +####################### Training Parameters #################################### token_type: unigram # ["unigram", "bpe", "char"] token_output: 5000 # index(blank/eos/bos/unk) = 0 character_coverage: 1.0 diff --git a/recipes/AISHELL-1/Tokenizer/pretrained.py b/recipes/AISHELL-1/Tokenizer/pretrained.py index 220c3ad4a6..c423f4ce23 100644 --- a/recipes/AISHELL-1/Tokenizer/pretrained.py +++ b/recipes/AISHELL-1/Tokenizer/pretrained.py @@ -5,10 +5,13 @@ * Mirco Ravanelli 2020 * Abdel Heba 2020 """ + import os -from speechbrain.utils.data_utils import download_file + import sentencepiece as spm +from speechbrain.utils.data_utils import download_file + class tokenizer: """Downloads and loads the pretrained tokenizer. @@ -28,7 +31,9 @@ def __init__(self, tokenizer_file, save_folder="tokenizer_model"): save_file = os.path.join(save_folder, "tok.model") download_file( - source=tokenizer_file, dest=save_file, replace_existing=True, + source=tokenizer_file, + dest=save_file, + replace_existing=True, ) tokenizer_file = save_file diff --git a/recipes/AISHELL-1/Tokenizer/train.py b/recipes/AISHELL-1/Tokenizer/train.py index 25c27ac101..a1af283ff2 100644 --- a/recipes/AISHELL-1/Tokenizer/train.py +++ b/recipes/AISHELL-1/Tokenizer/train.py @@ -1,6 +1,6 @@ #!/usr/bin/env/python3 """Recipe for training a BPE tokenizer with AISHELL-1. -The tokenizer coverts transcripts into sub-word units that can +The tokenizer converts transcripts into sub-word units that can be used to train a language (LM) or an acoustic model (AM). To run this recipe, do the following: @@ -14,18 +14,18 @@ """ import sys -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -45,6 +45,8 @@ kwargs={ "data_folder": hparams["data_folder"], "save_folder": hparams["output_folder"], + "skip_prep": hparams["skip_prep"], + "remove_compressed_wavs": hparams["remove_compressed_wavs"], }, ) diff --git a/recipes/AISHELL-1/aishell_prepare.py b/recipes/AISHELL-1/aishell_prepare.py index 290ba03b1b..ace82a54a4 100644 --- a/recipes/AISHELL-1/aishell_prepare.py +++ b/recipes/AISHELL-1/aishell_prepare.py @@ -1,102 +1,219 @@ +""" +Data preparation. + +Download: https://www.openslr.org/33/ + +Authors +------- + * Adel Moumen 2023 +""" + +import csv +import functools +import glob import os import shutil -import logging -from speechbrain.dataio.dataio import read_audio -from speechbrain.utils.data_utils import download_file -import glob -import csv -logger = logging.getLogger(__name__) +from speechbrain.dataio.dataio import read_audio_info +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + + +def extract_and_cleanup_wav_files( + tgz_list, wav_dir, splits, remove_compressed_wavs +): + """This function extracts the wav files in the AISHELL-1 dataset. + + Arguments + --------- + tgz_list: list + list of paths to the tar.gz files. + wav_dir: str + path to the wav directory. + splits: list + list of splits. + remove_compressed_wavs: bool + If True, remove compressed wav files after extraction. + """ + if len(tgz_list) > 0: + logger.info(f"Extracting wav files in {wav_dir}...") + + decompress_processor = functools.partial( + shutil.unpack_archive, extract_dir=wav_dir + ) + + for split in splits: + os.makedirs(os.path.join(wav_dir, split), exist_ok=True) + + for _ in parallel_map(decompress_processor, tgz_list, chunk_size=64): + pass + + if remove_compressed_wavs: + for tgz in tgz_list: + os.remove(tgz) + + +def process_line(wav, filename2transcript): + """This function processes a line of the csv file. + + This function is being used in the context of multi-processing. + + Arguments + --------- + wav: str + path to the wav file. + filename2transcript: dict + dictionary mapping filenames to transcripts. + + Returns + ------- + list + list containing the duration, the path to the wav file and the transcript. + """ + filename = wav.split("/")[-1].split(".wav")[0] + + info = read_audio_info(wav) + duration = info.num_frames / info.sample_rate + + transcript_ = filename2transcript[filename] + + return [str(duration), wav, transcript_] -def prepare_aishell(data_folder, save_folder, skip_prep=False): +def skip(splits, save_folder): + """Detect when the AiSHELL-1 data preparation can be skipped. + + Arguments + --------- + splits : list + A list of the splits expected in the preparation. + save_folder : str + The location of the save directory + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. """ - This function prepares the AISHELL-1 dataset. - If the folder does not exist, the zip file will be extracted. If the zip file does not exist, it will be downloaded. + # Checking csv files + skip = True + + for split in splits: + if not os.path.isfile(os.path.join(save_folder, split + ".csv")): + skip = False + + return skip + + +def prepare_aishell( + data_folder, save_folder, skip_prep=False, remove_compressed_wavs=True +): + """This function prepares the AISHELL-1 dataset. - data_folder : path to AISHELL-1 dataset. - save_folder: path where to store the manifest csv files. - skip_prep: If True, skip data preparation. + Arguments + --------- + data_folder: str + path to AISHELL-1 dataset. + save_folder: str + path where to store the manifest csv files. + skip_prep: bool + If True, skip data preparation. + remove_compressed_wavs: bool + If True, remove compressed wav files after extraction. + Returns + ------- + None """ + if skip_prep: return - # If the data folders do not exist, we need to extract the data - if not os.path.isdir(os.path.join(data_folder, "data_aishell/wav")): - # Check for zip file and download if it doesn't exist - zip_location = os.path.join(data_folder, "data_aishell.tgz") - if not os.path.exists(zip_location): - url = "https://www.openslr.org/resources/33/data_aishell.tgz" - download_file(url, zip_location, unpack=True) - logger.info("Extracting data_aishell.tgz...") - shutil.unpack_archive(zip_location, data_folder) - wav_dir = os.path.join(data_folder, "data_aishell/wav") - tgz_list = glob.glob(wav_dir + "/*.tar.gz") - for tgz in tgz_list: - shutil.unpack_archive(tgz, wav_dir) - os.remove(tgz) + wav_dir = os.path.join(data_folder, "wav") + tgz_list = glob.glob(wav_dir + "/*.tar.gz") + + splits = ["train", "dev", "test"] + + if skip(splits, save_folder): + return + + extract_and_cleanup_wav_files( + tgz_list, wav_dir, splits, remove_compressed_wavs=remove_compressed_wavs + ) # Create filename-to-transcript dictionary filename2transcript = {} - with open( - os.path.join( - data_folder, "data_aishell/transcript/aishell_transcript_v0.8.txt" - ), - "r", - ) as f: + path_to_transcript = os.path.join( + data_folder, "transcript/aishell_transcript_v0.8.txt" + ) + + with open(path_to_transcript, encoding="utf-8") as f: lines = f.readlines() for line in lines: key = line.split()[0] value = " ".join(line.split()[1:]) filename2transcript[key] = value - splits = [ - "train", - "dev", - "test", - ] - ID_start = 0 # needed to have a unique ID for each audio + line_processor = functools.partial( + process_line, filename2transcript=filename2transcript + ) + for split in splits: - new_filename = os.path.join(save_folder, split) + ".csv" - if os.path.exists(new_filename): - continue - logger.info("Preparing %s..." % new_filename) + final_csv = os.path.join(save_folder, split) + ".csv" + tmp_csv = os.path.join(save_folder, split) + ".tmp" - csv_output = [["ID", "duration", "wav", "transcript"]] - entry = [] + logger.info("Preparing %s..." % final_csv) all_wavs = glob.glob( - os.path.join(data_folder, "data_aishell/wav") - + "/" - + split - + "/*/*.wav" + os.path.join(data_folder, "wav") + "/" + split + "/*/*.wav" ) - for i in range(len(all_wavs)): - filename = all_wavs[i].split("/")[-1].split(".wav")[0] - if filename not in filename2transcript: - continue - signal = read_audio(all_wavs[i]) - duration = signal.shape[0] / 16000 - transcript_ = filename2transcript[filename] - csv_line = [ - ID_start + i, - str(duration), - all_wavs[i], - transcript_, - ] - entry.append(csv_line) - - csv_output = csv_output + entry - - with open(new_filename, mode="w") as csv_f: + # only keep the files that are in the transcript + transcript_wavs = [ + wav + for wav in all_wavs + if wav.split("/")[-1].split(".wav")[0] in filename2transcript + ] + + total_line = 0 + total_duration = 0 + id = 0 + with open(tmp_csv, mode="w", newline="", encoding="utf-8") as csv_f: csv_writer = csv.writer( csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) - for line in csv_output: - csv_writer.writerow(line) + csv_writer.writerow(["ID", "duration", "wav", "transcript"]) + for row in parallel_map( + line_processor, transcript_wavs, chunk_size=4092 + ): + if row is None: + continue + + row = [str(id)] + row + csv_writer.writerow(row) + + total_line += 1 + total_duration += float(row[1]) + id += 1 + + msg = f"Number of samples: {total_line} " + logger.info(msg) + msg = "Total duration: %s Hours" % ( + str(round(total_duration / 3600, 2)) + ) - msg = "\t%s successfully created!" % (new_filename) logger.info(msg) - ID_start += len(all_wavs) + os.replace(tmp_csv, final_csv) + + msg = "\t%s successfully created!" % (final_csv) + logger.info(msg) + + msg = f"Number of samples: {total_line} " + logger.info(msg) + msg = "Total duration: %s Hours" % ( + str(round(total_duration / 3600, 2)) + ) + logger.info(msg) diff --git a/recipes/AMI/Diarization/README.md b/recipes/AMI/Diarization/README.md index ee392583b3..f7098b9397 100644 --- a/recipes/AMI/Diarization/README.md +++ b/recipes/AMI/Diarization/README.md @@ -1,9 +1,13 @@ # Speaker Diarization on AMI corpus This directory contains the scripts for speaker diarization on the AMI corpus (http://groups.inf.ed.ac.uk/ami/corpus/). -## Extra requirements -The code requires scikit-learn as an additional dependency. -To install it, type: `pip install scikit-learn` +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` ## How to run Use the following command to run diarization on AMI corpus. diff --git a/recipes/AMI/Diarization/experiment.py b/recipes/AMI/Diarization/experiment.py index 2026e5e7a4..5af60afaca 100755 --- a/recipes/AMI/Diarization/experiment.py +++ b/recipes/AMI/Diarization/experiment.py @@ -18,29 +18,30 @@ * Nauman Dawalatabad 2020 """ +import glob +import json import os -import sys -import torch -import logging import pickle -import json -import glob import shutil +import sys + import numpy as np -import speechbrain as sb -from tqdm.contrib import tqdm +import torch from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio.dataio import read_audio, read_audio_multichannel +from speechbrain.integrations.alignment import diarization as diar from speechbrain.processing.PLDA_LDA import StatObject_SB -from speechbrain.processing import diarization as diar from speechbrain.utils.DER import DER -from speechbrain.dataio.dataio import read_audio -from speechbrain.dataio.dataio import read_audio_multichannel +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger np.random.seed(1234) # Logger setup -logger = logging.getLogger(__name__) +logger = get_logger(__name__) current_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.dirname(current_dir)) @@ -62,12 +63,12 @@ def compute_embeddings(wavs, lens): """Definition of the steps for computation of embeddings from the waveforms.""" with torch.no_grad(): - wavs = wavs.to(params["device"]) + wavs = wavs.to(run_opts["device"]) feats = params["compute_features"](wavs) feats = params["mean_var_norm"](feats, lens) emb = params["embedding_model"](feats, lens) emb = params["mean_var_norm_emb"]( - emb, torch.ones(emb.shape[0], device=params["device"]) + emb, torch.ones(emb.shape[0], device=run_opts["device"]) ) return emb @@ -109,7 +110,7 @@ def embedding_computation_loop(split, set_loader, stat_file): modelset = np.array(modelset, dtype="|O") segset = np.array(segset, dtype="|O") - # Intialize variables for start, stop and stat0. + # Initialize variables for start, stop and stat0. s = np.array([None] * embeddings.shape[0]) b = np.array([[1.0]] * embeddings.shape[0]) @@ -153,7 +154,7 @@ def prepare_subset_json(full_meta_data, rec_id, out_meta_file): if k.startswith(rec_id): subset[key] = full_meta_data[key] - with open(out_meta_file, mode="w") as json_f: + with open(out_meta_file, mode="w", encoding="utf-8") as json_f: json.dump(subset, json_f, indent=2) @@ -191,8 +192,7 @@ def diarize_dataset(full_meta, split_type, n_lambdas, pval, n_neighbors=10): if len(all_rec_ids) <= 0: msg = "No recording IDs found! Please check if meta_data json file is properly generated." - logger.error(msg) - sys.exit() + raise ValueError(msg) # Diarizing different recordings in a dataset. for rec_id in tqdm(all_rec_ids): @@ -236,10 +236,10 @@ def diarize_dataset(full_meta, split_type, n_lambdas, pval, n_neighbors=10): diary_set_loader = dataio_prep(params, meta_per_rec_file) # Putting modules on the device. - params["compute_features"].to(params["device"]) - params["mean_var_norm"].to(params["device"]) - params["embedding_model"].to(params["device"]) - params["mean_var_norm_emb"].to(params["device"]) + params["compute_features"].to(run_opts["device"]) + params["mean_var_norm"].to(run_opts["device"]) + params["embedding_model"].to(run_opts["device"]) + params["mean_var_norm_emb"].to(run_opts["device"]) # Compute Embeddings. diary_obj = embedding_computation_loop( @@ -268,7 +268,7 @@ def diarize_dataset(full_meta, split_type, n_lambdas, pval, n_neighbors=10): num_spkrs = diar.get_oracle_num_spkrs(rec_id, spkr_info) else: if params["affinity"] == "nn": - # Num of speakers tunned on dev set (only for nn affinity). + # Num of speakers tuned on dev set (only for nn affinity). num_spkrs = n_lambdas else: # Num of speakers will be estimated using max eigen gap for cos based affinity. @@ -277,7 +277,7 @@ def diarize_dataset(full_meta, split_type, n_lambdas, pval, n_neighbors=10): if params["backend"] == "kmeans": diar.do_kmeans_clustering( - diary_obj, out_rttm_file, rec_id, num_spkrs, pval, + diary_obj, out_rttm_file, rec_id, num_spkrs, pval ) if params["backend"] == "SC": @@ -302,11 +302,11 @@ def diarize_dataset(full_meta, split_type, n_lambdas, pval, n_neighbors=10): # This is not needed but just staying with the standards. concate_rttm_file = out_rttm_dir + "/sys_output.rttm" logger.debug("Concatenating individual RTTM files...") - with open(concate_rttm_file, "w") as cat_file: + with open(concate_rttm_file, "w", encoding="utf-8") as cat_file: for f in glob.glob(out_rttm_dir + "/*.rttm"): if f == concate_rttm_file: continue - with open(f, "r") as indi_rttm_file: + with open(f, encoding="utf-8") as indi_rttm_file: shutil.copyfileobj(indi_rttm_file, cat_file) msg = "The system generated RTTM file for %s set : %s" % ( @@ -349,7 +349,7 @@ def dev_pval_tuner(full_meta, split_type): # p_val is needed in oracle_n_spkr=False when using kmeans backend. break - # Take p_val that gave minmum DER on Dev dataset. + # Take p_val that gave minimum DER on Dev dataset. tuned_p_val = prange[DER_list.index(min(DER_list))] return tuned_p_val @@ -384,7 +384,7 @@ def dev_ahc_threshold_tuner(full_meta, split_type): if params["oracle_n_spkrs"] is True: break # no need of threshold search. - # Take p_val that gave minmum DER on Dev dataset. + # Take p_val that gave minimum DER on Dev dataset. tuned_p_val = prange[DER_list.index(min(DER_list))] return tuned_p_val @@ -398,11 +398,10 @@ def dev_nn_tuner(full_meta, split_type): DER_list = [] pval = None - # Now assumming oracle num of speakers. + # Now assuming oracle num of speakers. n_lambdas = 4 for nn in range(5, 15): - # Process whole dataset for value of n_lambdas. concate_rttm_file = diarize_dataset( full_meta, split_type, n_lambdas, pval, nn @@ -430,14 +429,13 @@ def dev_nn_tuner(full_meta, split_type): def dev_tuner(full_meta, split_type): """Tuning n_components on dev set. Used for nn based affinity matrix. - Note: This is a very basic tunning for nn based affinity. + Note: This is a very basic tuning for nn based affinity. This is work in progress till we find a better way. """ DER_list = [] pval = None for n_lambdas in range(1, params["max_num_spkrs"] + 1): - # Process whole dataset for value of n_lambdas. concate_rttm_file = diarize_dataset( full_meta, split_type, n_lambdas, pval @@ -454,7 +452,7 @@ def dev_tuner(full_meta, split_type): DER_list.append(DER_) - # Take n_lambdas with minmum DER. + # Take n_lambdas with minimum DER. tuned_n_lambdas = DER_list.index(min(DER_list)) + 1 return tuned_n_lambdas @@ -468,7 +466,8 @@ def dataio_prep(hparams, json_file): # 1. Datasets data_folder = hparams["data_folder"] dataset = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=json_file, replacements={"data_root": data_folder}, + json_path=json_file, + replacements={"data_root": data_folder}, ) # 2. Define audio pipeline. @@ -505,32 +504,32 @@ def audio_pipeline(wav): # Begin experiment! if __name__ == "__main__": # noqa: C901 - # Load hyperparameters file with command-line overrides. params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:]) - with open(params_file) as fin: + with open(params_file, encoding="utf-8") as fin: params = load_hyperpyyaml(fin, overrides) - # Dataset prep (peparing metadata files) + # Dataset prep (preparing metadata files) from ami_prepare import prepare_ami # noqa - run_on_main( - prepare_ami, - kwargs={ - "data_folder": params["data_folder"], - "save_folder": params["save_folder"], - "ref_rttm_dir": params["ref_rttm_dir"], - "meta_data_dir": params["meta_data_dir"], - "manual_annot_folder": params["manual_annot_folder"], - "split_type": params["split_type"], - "skip_TNO": params["skip_TNO"], - "mic_type": params["mic_type"], - "vad_type": params["vad_type"], - "max_subseg_dur": params["max_subseg_dur"], - "overlap": params["overlap"], - }, - ) + if not params["skip_prep"]: + run_on_main( + prepare_ami, + kwargs={ + "data_folder": params["data_folder"], + "save_folder": params["save_folder"], + "ref_rttm_dir": params["ref_rttm_dir"], + "meta_data_dir": params["meta_data_dir"], + "manual_annot_folder": params["manual_annot_folder"], + "split_type": params["split_type"], + "skip_TNO": params["skip_TNO"], + "mic_type": params["mic_type"], + "vad_type": params["vad_type"], + "max_subseg_dur": params["max_subseg_dur"], + "overlap": params["overlap"], + }, + ) # Create experiment directory. sb.core.create_experiment_directory( @@ -552,23 +551,20 @@ def audio_pipeline(wav): # We download the pretrained Model from HuggingFace (or elsewhere depending on # the path given in the YAML file). run_on_main(params["pretrainer"].collect_files) - params["pretrainer"].load_collected(device=(params["device"])) + params["pretrainer"].load_collected() params["embedding_model"].eval() - params["embedding_model"].to(params["device"]) + params["embedding_model"].to(run_opts["device"]) # AMI Dev Set: Tune hyperparams on dev set. # Read the meta-data file for dev set generated during data_prep - dev_meta_file = os.path.join( - params["meta_data_dir"], - "ami_dev." + params["mic_type"] + ".subsegs.json", - ) - with open(dev_meta_file, "r") as f: + dev_meta_file = params["dev_meta_file"] + with open(dev_meta_file, encoding="utf-8") as f: meta_dev = json.load(f) full_meta = meta_dev # Processing starts from here - # Following few lines selects option for different backend and affinity matrices. Finds best values for hyperameters using dev set. + # Following few lines selects option for different backend and affinity matrices. Finds best values for hyperparameters using dev set. best_nn = None if params["affinity"] == "nn": logger.info("Tuning for nn (Multiple iterations over AMI Dev set)") @@ -603,11 +599,8 @@ def audio_pipeline(wav): # Load 'dev' and 'eval' metadata files. full_meta_dev = full_meta # current full_meta is for 'dev' - eval_meta_file = os.path.join( - params["meta_data_dir"], - "ami_eval." + params["mic_type"] + ".subsegs.json", - ) - with open(eval_meta_file, "r") as f: + eval_meta_file = params["eval_meta_file"] + with open(eval_meta_file, encoding="utf-8") as f: full_meta_eval = json.load(f) # Tag to be appended to final output DER files. Writing DER for individual files. diff --git a/recipes/AMI/Diarization/hparams/ecapa_tdnn.yaml b/recipes/AMI/Diarization/hparams/ecapa_tdnn.yaml index a373bc663f..5c3a300af5 100755 --- a/recipes/AMI/Diarization/hparams/ecapa_tdnn.yaml +++ b/recipes/AMI/Diarization/hparams/ecapa_tdnn.yaml @@ -6,7 +6,7 @@ # ################################################# seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Directories: Replace !PLACEHOLDER with full path of the directory. # Download data from: http://groups.inf.ed.ac.uk/ami/download/ @@ -17,7 +17,7 @@ manual_annot_folder: !PLACEHOLDER # e.g., /path/to/ami_public_manual_1.6.2/ output_folder: results/ami/ecapa/ save_folder: !ref /save -device: 'cuda:0' +skip_prep: False # Embedding model # Here, the pretrained embedding model trained with train_speaker_embeddings.py hparams/train_ecapa_tdnn.yaml @@ -43,6 +43,9 @@ n_mels: 80 # ECAPA-TDNN model emb_dim: 192 +emb_channels: [1024, 1024, 1024, 1024, 3072] +emb_attention_channels: 128 +emb_lin_neurons: 192 batch_size: 512 # AMI data_prep parameters @@ -50,6 +53,8 @@ split_type: 'full_corpus_asr' skip_TNO: True # Options for mic_type: 'Mix-Lapel', 'Mix-Headset', 'Array1', 'Array1-01', 'BeamformIt' mic_type: 'Mix-Headset' +dev_meta_file: !ref /ami_dev..subsegs.json +eval_meta_file: !ref /ami_eval..subsegs.json vad_type: 'oracle' max_subseg_dur: 3.0 overlap: 1.5 @@ -83,11 +88,11 @@ mean_var_norm: !new:speechbrain.processing.features.InputNormalization embedding_model: !new:speechbrain.lobes.models.ECAPA_TDNN.ECAPA_TDNN input_size: !ref - channels: [1024, 1024, 1024, 1024, 3072] + channels: !ref kernel_sizes: [5, 3, 3, 3, 1] dilations: [1, 2, 3, 4, 1] - attention_channels: 128 - lin_neurons: 192 + attention_channels: !ref + lin_neurons: !ref mean_var_norm_emb: !new:speechbrain.processing.features.InputNormalization norm_type: global diff --git a/recipes/AMI/Diarization/hparams/xvectors.yaml b/recipes/AMI/Diarization/hparams/xvectors.yaml index 4d2b194975..ef33aba9ca 100755 --- a/recipes/AMI/Diarization/hparams/xvectors.yaml +++ b/recipes/AMI/Diarization/hparams/xvectors.yaml @@ -6,7 +6,7 @@ # ################################################# seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Folders # Download data from: http://groups.inf.ed.ac.uk/ami/download/ @@ -17,7 +17,7 @@ manual_annot_folder: !PLACEHOLDER # e.g., /path/to/ami_public_manual_1.6.2/ output_folder: results/ami/xvect/ save_folder: !ref /save/ -device: 'cuda:0' +skip_prep: False # Embedding model # Here, the pretrained embedding model trained with train_speaker_embeddings.py hparams/train_ecapa_tdnn.yaml @@ -42,6 +42,7 @@ n_mels: 24 # Xvector model emb_dim: 512 +emb_tdnn_channels: [512, 512, 512, 512, 1500] batch_size: 512 # AMI data_prep parameters @@ -49,6 +50,8 @@ split_type: 'full_corpus_asr' skip_TNO: True # Options for mic_type: 'Mix-Lapel', 'Mix-Headset', 'Array1', 'Array1-01', 'BeamformIt' mic_type: 'Mix-Headset' +dev_meta_file: !ref /ami_dev..subsegs.json +eval_meta_file: !ref /ami_eval..subsegs.json vad_type: 'oracle' max_subseg_dur: 3.0 overlap: 1.5 @@ -79,7 +82,7 @@ embedding_model: !new:speechbrain.lobes.models.Xvector.Xvector in_channels: !ref activation: !name:torch.nn.LeakyReLU tdnn_blocks: 5 - tdnn_channels: [512, 512, 512, 512, 1500] + tdnn_channels: !ref tdnn_kernel_sizes: [5, 3, 3, 1, 1] tdnn_dilations: [1, 2, 3, 1, 1] lin_neurons: !ref diff --git a/recipes/AMI/ami_prepare.py b/recipes/AMI/ami_prepare.py index d1d38dfc68..6c32ecfc6b 100644 --- a/recipes/AMI/ami_prepare.py +++ b/recipes/AMI/ami_prepare.py @@ -6,19 +6,17 @@ Prepares metadata files (JSON) from manual annotations "segments/" using RTTM format (Oracle VAD). """ -import os -import logging -import xml.etree.ElementTree as et import glob import json +import os +import xml.etree.ElementTree as et + from ami_splits import get_AMI_split -from speechbrain.dataio.dataio import ( - load_pkl, - save_pkl, -) +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) SAMPLERATE = 16000 @@ -64,15 +62,21 @@ def prepare_ami( overlap : float Overlap duration in seconds between adjacent subsegments + Returns + ------- + None + Example ------- >>> from recipes.AMI.ami_prepare import prepare_ami - >>> data_folder = '/network/datasets/ami/amicorpus/' - >>> manual_annot_folder = '/home/mila/d/dawalatn/nauman/ami_public_manual/' - >>> save_folder = 'results/save/' - >>> split_type = 'full_corpus_asr' - >>> mic_type = 'Lapel' - >>> prepare_ami(data_folder, manual_annot_folder, save_folder, split_type, mic_type) + >>> data_folder = "/network/datasets/ami/amicorpus/" + >>> manual_annot_folder = "/home/mila/d/dawalatn/nauman/ami_public_manual/" + >>> save_folder = "results/save/" + >>> split_type = "full_corpus_asr" + >>> mic_type = "Lapel" + >>> prepare_ami( + ... data_folder, manual_annot_folder, save_folder, split_type, mic_type + ... ) """ # Meta files @@ -176,8 +180,7 @@ def prepare_ami( def get_RTTM_per_rec(segs, spkrs_list, rec_id): - """Prepares rttm for each recording - """ + """Prepares rttm for each recording""" rttm = [] @@ -234,11 +237,9 @@ def get_RTTM_per_rec(segs, spkrs_list, rec_id): def prepare_segs_for_RTTM( list_ids, out_rttm_file, audio_dir, annot_dir, split_type, skip_TNO ): - RTTM = [] # Stores all RTTMs clubbed together for a given dataset split for main_meet_id in list_ids: - # Skip TNO meetings from dev and eval sets if ( main_meet_id.startswith("TS") @@ -264,12 +265,9 @@ def prepare_segs_for_RTTM( list_spkr_xmls = glob.glob(f) list_spkr_xmls.sort() # A, B, C, D, E etc (Speakers) segs = [] - spkrs_list = ( - [] - ) # Since non-scenario recordings contains 3-5 speakers + spkrs_list = [] # Since non-scenario recordings contains 3-5 speakers for spkr_xml_file in list_spkr_xmls: - # Speaker ID spkr = os.path.basename(spkr_xml_file).split(".")[1] spkr_ID = rec_id + "." + spkr @@ -296,7 +294,7 @@ def prepare_segs_for_RTTM( RTTM = RTTM + rttm_per_rec # Write one RTTM as groundtruth. For example, "fullref_eval.rttm" - with open(out_rttm_file, "w") as f: + with open(out_rttm_file, "w", encoding="utf-8") as f: for item in RTTM: f.write("%s\n" % item) @@ -310,6 +308,10 @@ def is_overlapped(end1, start2): End time of the first segment. start2 : float Start time of the second segment. + + Returns + ------- + overlapped : bool """ if start2 > end1: @@ -319,8 +321,7 @@ def is_overlapped(end1, start2): def merge_rttm_intervals(rttm_segs): - """Merges adjacent segments in rttm if they overlap. - """ + """Merges adjacent segments in rttm if they overlap.""" # For one recording # rec_id = rttm_segs[0][1] rttm_segs.sort(key=lambda x: float(x[3])) @@ -351,8 +352,7 @@ def merge_rttm_intervals(rttm_segs): def get_subsegments(merged_segs, max_subseg_dur=3.0, overlap=1.5): - """Divides bigger segments into smaller sub-segments - """ + """Divides bigger segments into smaller sub-segments""" shift = max_subseg_dur - overlap subsegments = [] @@ -406,7 +406,7 @@ def prepare_metadata( # Read RTTM RTTM = [] - with open(rttm_file, "r") as f: + with open(rttm_file, encoding="utf-8") as f: for line in f: entry = line[:-1] RTTM.append(entry) @@ -438,12 +438,12 @@ def prepare_metadata( segs_file = save_dir + "/" + filename + ".segments.rttm" subsegment_file = save_dir + "/" + filename + ".subsegments.rttm" - with open(segs_file, "w") as f: + with open(segs_file, "w", encoding="utf-8") as f: for row in MERGED_SEGMENTS: line_str = " ".join(row) f.write("%s\n" % line_str) - with open(subsegment_file, "w") as f: + with open(subsegment_file, "w", encoding="utf-8") as f: for row in SUBSEGMENTS: line_str = " ".join(row) f.write("%s\n" % line_str) @@ -510,7 +510,7 @@ def prepare_metadata( } out_json_file = save_dir + "/" + filename + "." + mic_type + ".subsegs.json" - with open(out_json_file, mode="w") as json_f: + with open(out_json_file, mode="w", encoding="utf-8") as json_f: json.dump(json_dict, json_f, indent=2) msg = "%s JSON prepared" % (out_json_file) @@ -522,6 +522,17 @@ def skip(save_folder, conf, meta_files, opt_file): Detects if the AMI data_preparation has been already done. If the preparation has been done, we can skip it. + Arguments + --------- + save_folder : str + The folder containing the generated files. + conf : dict + Configuration to check against saved config. + meta_files : list + List of file paths to check. + opt_file : str + One more file to check. + Returns ------- bool diff --git a/recipes/AMI/ami_splits.py b/recipes/AMI/ami_splits.py index 24161a2af7..7407ec5a1f 100644 --- a/recipes/AMI/ami_splits.py +++ b/recipes/AMI/ami_splits.py @@ -19,7 +19,7 @@ def get_AMI_split(split_option): Returns ------- - Meeting IDs for train, dev, and test sets for given split_option + Meeting IDs for train, dev, and test sets for given split_option """ if split_option not in ALLOWED_OPTIONS: @@ -30,7 +30,6 @@ def get_AMI_split(split_option): return if split_option == "scenario_only": - train_set = [ "ES2002", "ES2005", diff --git a/recipes/Aishell1Mix/extra-dependencies.txt b/recipes/Aishell1Mix/extra-dependencies.txt deleted file mode 100644 index 6a02336d91..0000000000 --- a/recipes/Aishell1Mix/extra-dependencies.txt +++ /dev/null @@ -1,10 +0,0 @@ -mir-eval==0.6 -pyloudnorm -soundfile>=0.10.3.post1 -tqdm>=4.46.1 -pysndfx>=0.3.6 -pandas>=1.0.1 -numpy>=1.18.1 -pyloudnorm>=0.1.0 -scipy>=1.4.1 -matplotlib>=3.1.3 \ No newline at end of file diff --git a/recipes/Aishell1Mix/extra_requirements.txt b/recipes/Aishell1Mix/extra_requirements.txt new file mode 100644 index 0000000000..bca598898e --- /dev/null +++ b/recipes/Aishell1Mix/extra_requirements.txt @@ -0,0 +1,6 @@ +matplotlib>=3.1.3 +mir-eval==0.6 +pyloudnorm +pyloudnorm>=0.1.0 +pysndfx>=0.3.6 + diff --git a/recipes/Aishell1Mix/meta/preprocess_dynamic_mixing.py b/recipes/Aishell1Mix/meta/preprocess_dynamic_mixing.py index bd71d861d4..0e9ad7fd18 100644 --- a/recipes/Aishell1Mix/meta/preprocess_dynamic_mixing.py +++ b/recipes/Aishell1Mix/meta/preprocess_dynamic_mixing.py @@ -8,18 +8,19 @@ Samuele Cornell, 2020 """ -import os import argparse +import glob +import os from pathlib import Path + +import numpy as np +import torch import tqdm -import torchaudio -import glob # from oct2py import octave from scipy import signal -import numpy as np -import torch +from speechbrain.dataio import audio_io parser = argparse.ArgumentParser( "utility for resampling all audio files in a folder recursively" @@ -45,7 +46,7 @@ def resample_folder(input_folder, output_folder, fs, regex): Path of the output folder with the resampled data. fs : int Target sampling frequency. - reg_exp: str + regex : str Regular expression for search. """ # filedir = os.path.dirname(os.path.realpath(__file__)) @@ -54,8 +55,7 @@ def resample_folder(input_folder, output_folder, fs, regex): files = glob.glob(os.path.join(input_folder, regex), recursive=True) for f in tqdm.tqdm(files): - - audio, fs_read = torchaudio.load(f) + audio, fs_read = audio_io.load(f) audio = audio[0].numpy() audio = signal.resample_poly(audio, fs, fs_read) @@ -70,8 +70,7 @@ def resample_folder(input_folder, output_folder, fs, regex): relative_path = os.path.join( Path(f).relative_to(Path(input_folder)).parent, - Path(f).relative_to(Path(input_folder)).stem - + "_peak_{}.wav".format(peak), + Path(f).relative_to(Path(input_folder)).stem + f"_peak_{peak}.wav", ) os.makedirs( @@ -83,7 +82,7 @@ def resample_folder(input_folder, output_folder, fs, regex): exist_ok=True, ) - torchaudio.save( + audio_io.save( os.path.join(output_folder, relative_path), audio.reshape(1, -1), fs, @@ -91,7 +90,6 @@ def resample_folder(input_folder, output_folder, fs, regex): if __name__ == "__main__": - args = parser.parse_args() resample_folder( args.input_folder, args.output_folder, int(args.fs), args.regex diff --git a/recipes/Aishell1Mix/prepare_data.py b/recipes/Aishell1Mix/prepare_data.py index b272ca310b..092b6441c5 100644 --- a/recipes/Aishell1Mix/prepare_data.py +++ b/recipes/Aishell1Mix/prepare_data.py @@ -5,16 +5,17 @@ * Cem Subakan 2020 """ -import os import csv +import functools +import glob +import os import tarfile import zipfile -import glob -import tqdm.contrib.concurrent +from urllib.request import urlretrieve + import soundfile as sf -import functools +import tqdm.contrib.concurrent from pysndfx import AudioEffectsChain -from urllib.request import urlretrieve def prepare_aishell1mix( @@ -40,6 +41,8 @@ def prepare_aishell1mix( skip_prep (bool): If True, skip data preparation aishell1mix_addnoise: If True, add whamnoise to aishell1mix datasets """ + if skip_prep: + return # create the datapath folder if it does not exist if not os.path.exists(datapath): @@ -51,7 +54,6 @@ def prepare_aishell1mix( aishell1mix_outdir = os.path.join(datapath, "aishell1mix") if not os.path.exists(aishell1_dir): - print("Download Aishell1 into %s" % datapath) urlretrieve( "https://us.openslr.org/resources/33/data_aishell.tgz", @@ -70,10 +72,9 @@ def prepare_aishell1mix( extracttar(os.path.join(datapath, "resource_aishell.tgz")) if not os.path.exists(wham_dir): - print("Download Wham noise dataset into %s" % datapath) urlretrieve( - "https://storage.googleapis.com/whisper-public/wham_noise.zip", + "https://my-bucket-a8b4b49c25c811ee9a7e8bba05fa24c7.s3.amazonaws.com/wham_noise.zip", os.path.join(datapath, "wham_noise.zip"), reporthook=reporthook, ) @@ -158,22 +159,19 @@ def prepare_aishell1mix( datatypes, ) - if skip_prep: - return - if "Aishell1" in aishell1mix_outdir: # Aishell1 Mix2/3 datasets if n_spks == 2: - assert ( - "Aishell1Mix2" in aishell1mix_outdir - ), "Inconsistent number of speakers and datapath" + assert "Aishell1Mix2" in aishell1mix_outdir, ( + "Inconsistent number of speakers and datapath" + ) create_aishell1mix2_csv( aishell1mix_outdir, savepath, addnoise=aishell1mix_addnoise ) elif n_spks == 3: - assert ( - "Aishell1Mix3" in aishell1mix_outdir - ), "Inconsistent number of speakers and datapath" + assert "Aishell1Mix3" in aishell1mix_outdir, ( + "Inconsistent number of speakers and datapath" + ) create_aishell1mix3_csv( aishell1mix_outdir, savepath, addnoise=aishell1mix_addnoise ) @@ -229,14 +227,16 @@ def create_aishell1mix2_csv( ] with open( - savepath + "/aishell1mix2_" + set_type + ".csv", "w" + savepath + "/aishell1mix2_" + set_type + ".csv", + "w", + newline="", + encoding="utf-8", ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, (mix_path, s1_path, s2_path, noise_path) in enumerate( zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, noise_fl_paths) ): - row = { "ID": i, "duration": 1.0, @@ -307,7 +307,10 @@ def create_aishell1mix3_csv( ] with open( - savepath + "/aishell1mix3_" + set_type + ".csv", "w" + savepath + "/aishell1mix3_" + set_type + ".csv", + "w", + newline="", + encoding="utf-8", ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() @@ -369,8 +372,7 @@ def apply_fx(sound_path, speed): s = fx(s) # Write the file sf.write( - f"""{sound_path.replace( - '.wav',f"sp{str(speed).replace('.','')}" +'.wav')}""", + f"""{sound_path.replace(".wav", f"sp{str(speed).replace('.', '')}" + ".wav")}""", s, rate, ) diff --git a/recipes/Aishell1Mix/separation/README.md b/recipes/Aishell1Mix/separation/README.md index 2109095028..b11eab5d84 100644 --- a/recipes/Aishell1Mix/separation/README.md +++ b/recipes/Aishell1Mix/separation/README.md @@ -3,6 +3,11 @@ This folder contains some popular recipes for the Aishell1Mix dataset similar to * This recipe supports train with several source separation models on Aishell1Mix, including [Sepformer](https://arxiv.org/abs/2010.13154), [DPRNN](https://arxiv.org/abs/1910.06379), [ConvTasnet](https://arxiv.org/abs/1809.07454), [DPTNet](https://arxiv.org/abs/2007.13975). +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + + Make sure that SoX is installed on your machine. * For windows : @@ -15,17 +20,23 @@ conda install -c conda-forge sox ``` Additional dependencies: ``` -pip install -r ../extra-dependencies.txt +pip install -r ../extra-requirements.txt ``` To run it: -``` +```shell python train.py hparams/sepformer-aishell1mix2.yaml --data_folder /yourdatapath python train.py hparams/sepformer-aishell1mix3.yaml --data_folder /yourdatapath ``` Note that during training we print the negative SI-SNR (as we treat this value as the loss). +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: + +```shell +python train.py hparams/sepformer-aishell1mix2.yaml --data_folder /yourdatapath --test_only +python train.py hparams/sepformer-aishell1mix3.yaml --data_folder /yourdatapath --test_only +``` # Aishell1Mix2/3 * Your data folder should contain data_aishell (aishell1), resource_aishell (aishell1), wham_noise and aishell1mix, which can be created using the scripts at `https://github.com/huangzj421/Aishell1Mix`. Otherwise train.py will download and prepare data into your data path automatically. @@ -50,7 +61,7 @@ Here are the SI - SNRi results (in dB) on the test set of Aishell1Mix dataset wi | NoDynamicMixing | 8.1 | | DynamicMixing | 11.2 | -The output folders with model checkpoints and logs is available [here](https://drive.google.com/drive/folders/1GvJiUxhdN5bfbuBdxclPzdAPd2op1PCZ?usp=sharing). +The output folders with model checkpoints and logs is available [here](https://www.dropbox.com/sh/6x9356yuybj8lue/AABPlpS03Vcci_E3jA69oKoXa?dl=0). # Example calls for running the training scripts @@ -66,10 +77,10 @@ The output folders with model checkpoints and logs is available [here](https://d You can run the following command to train the model using Distributed Data Parallel (DDP) with 2 GPUs: +```bash +torchrun --nproc_per_node=2 train.py hparams/sepformer.yaml --data_folder /yourdatapath ``` - python -m torch.distributed.launch --nproc_per_node=2 train.py hparams/sepformer.yaml --data_folder /yourdatapath --distributed_launch --distributed_backend='nccl' -``` -You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at this [tutorial](https://colab.research.google.com/drive/13pBUacPiotw1IvyffvGZ-HrtBr9T6l15?usp=sharing). +You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at [our documentation](https://speechbrain.readthedocs.io/en/latest/multigpu.html). # **About SpeechBrain** @@ -82,6 +93,15 @@ You can add the other runtime options as appropriate. For more complete informat Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Aishell1Mix/separation/dynamic_mixing.py b/recipes/Aishell1Mix/separation/dynamic_mixing.py index 990aca19bb..102f6b14ed 100644 --- a/recipes/Aishell1Mix/separation/dynamic_mixing.py +++ b/recipes/Aishell1Mix/separation/dynamic_mixing.py @@ -1,22 +1,25 @@ -import speechbrain as sb -import numpy as np -import torch -import torchaudio -import glob -import os -from speechbrain.dataio.batch import PaddedBatch -from tqdm import tqdm -import warnings -import pyloudnorm -import random - """ -The functions to implement Dynamic Mixing For SpeechSeparation +The file implement Dynamic Mixing For SpeechSeparation + Authors * Samuele Cornell 2021 * Cem Subakan 2021 """ +import glob +import os +import random +import warnings + +import numpy as np +import pyloudnorm +import torch +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.batch import PaddedBatch + def build_spk_hashtable_aishell1mix(hparams): """ @@ -29,11 +32,10 @@ def build_spk_hashtable_aishell1mix(hparams): # just for one file check if the sample rate is correct assert ( - torchaudio.info(aishell1_utterances[0]).sample_rate + audio_io.info(aishell1_utterances[0]).sample_rate == hparams["sample_rate"] ) for utt in tqdm(aishell1_utterances): - path = os.path.normpath(utt) path_list = path.split(os.sep) spk_id = path_list[-2] @@ -115,7 +117,7 @@ def audio_pipeline( if hparams["use_wham_noise"]: noise_file = np.random.choice(noise_files, 1, replace=False) - noise, fs_read = torchaudio.load(noise_file[0]) + noise, fs_read = audio_io.load(noise_file[0]) noise = noise.squeeze() # select two speakers randomly @@ -126,7 +128,7 @@ def audio_pipeline( ] minlen = min( - *[torchaudio.info(x).num_frames for x in spk_files], + *[audio_io.info(x).num_frames for x in spk_files], hparams["training_signal_len"], ) @@ -161,15 +163,17 @@ def normalize(signal, is_noise=False): for i, spk_file in enumerate(spk_files): # select random offset - length = torchaudio.info(spk_file).num_frames + length = audio_io.info(spk_file).num_frames start = 0 stop = length if length > minlen: # take a random window start = np.random.randint(0, length - minlen) stop = start + minlen - tmp, fs_read = torchaudio.load( - spk_file, frame_offset=start, num_frames=stop - start, + tmp, fs_read = audio_io.load( + spk_file, + frame_offset=start, + num_frames=stop - start, ) tmp = tmp[0].numpy() tmp = normalize(tmp) diff --git a/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2-wham.yaml b/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2-wham.yaml index 3d58aa5862..2947e783ff 100644 --- a/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2-wham.yaml +++ b/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2-wham.yaml @@ -8,18 +8,20 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 12345 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params # this is the storage folder for the whole needed dataset e.g. /yourpath # it should contain at least 250G space for aishell1, wham_noise and aishell1mix. +num_spks: 2 data_folder: !PLACEHOLDER +data_folder_nspks: !ref /aishell1mix/Aishell1Mix data_freqs: ['8k'] # or if you want the whole dataset ['8k','16k'] data_modes: ['min'] # or if you want the whole dataset ['min', 'max'] # this is the base folder for dynamic mixing, usually not changed. -base_folder_dm: !ref /data_aishell/wav/train +base_folder_dm: !ref /data_aishell/wav/train experiment_name: sepformer-aishell1mix2-whamnoise output_folder: !ref results// @@ -33,14 +35,12 @@ skip_prep: False ckpt_interval_minutes: 60 # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False -num_spks: 2 +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -63,18 +63,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -85,6 +105,7 @@ N_encoder_out: 256 out_channels: 256 kernel_size: 16 kernel_stride: 8 +d_ffn: 1024 # Dataloader options dataloader_opts: @@ -102,7 +123,7 @@ SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True @@ -111,7 +132,7 @@ SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True @@ -161,7 +182,6 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer decoder: !ref masknet: !ref counter: !ref - # lr_scheduler: !ref train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref diff --git a/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2.yaml b/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2.yaml index 3017df4880..141aad5365 100644 --- a/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2.yaml +++ b/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix2.yaml @@ -8,18 +8,20 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params # this is the storage folder for the whole needed dataset e.g. /yourpath # it should contain at least 250G space for aishell1, wham_noise and aishell1mix. +num_spks: 2 data_folder: !PLACEHOLDER +data_folder_nspks: !ref /aishell1mix/Aishell1Mix data_freqs: ['8k'] # or if you want the whole dataset ['8k','16k'] data_modes: ['min'] # or if you want the whole dataset ['min', 'max'] # this is the base folder for dynamic mixing, usually not changed. -base_folder_dm: !ref /data_aishell/wav/train +base_folder_dm: !ref /data_aishell/wav/train experiment_name: sepformer-aishell1mix2 output_folder: !ref results// @@ -33,14 +35,12 @@ skip_prep: False ckpt_interval_minutes: 60 # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False -num_spks: 2 +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -63,18 +63,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -85,6 +105,7 @@ N_encoder_out: 256 out_channels: 256 kernel_size: 16 kernel_stride: 8 +d_ffn: 1024 # Dataloader options dataloader_opts: @@ -102,7 +123,7 @@ SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True @@ -111,7 +132,7 @@ SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True @@ -161,7 +182,6 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer decoder: !ref masknet: !ref counter: !ref - # lr_scheduler: !ref train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref diff --git a/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3-wham.yaml b/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3-wham.yaml index 98a8d81127..56bb5908d6 100644 --- a/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3-wham.yaml +++ b/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3-wham.yaml @@ -8,18 +8,20 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 12345 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params # this is the storage folder for the whole needed dataset e.g. /yourpath # it should contain at least 250G space for aishell1, wham_noise and aishell1mix. +num_spks: 3 data_folder: !PLACEHOLDER +data_folder_nspks: !ref /aishell1mix/Aishell1Mix data_freqs: ['8k'] # or if you want the whole dataset ['8k','16k'] data_modes: ['min'] # or if you want the whole dataset ['min', 'max'] # this is the base folder for dynamic mixing, usually not changed. -base_folder_dm: !ref /data_aishell/wav/train +base_folder_dm: !ref /data_aishell/wav/train experiment_name: sepformer-aishell1mix3-whamnoise output_folder: !ref results// @@ -33,14 +35,12 @@ skip_prep: False ckpt_interval_minutes: 60 # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False -num_spks: 3 +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -63,18 +63,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -85,6 +105,7 @@ N_encoder_out: 256 out_channels: 256 kernel_size: 16 kernel_stride: 8 +d_ffn: 1024 # Dataloader options dataloader_opts: @@ -102,7 +123,7 @@ SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True @@ -111,7 +132,7 @@ SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True diff --git a/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3.yaml b/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3.yaml index c91572fb4a..d0a99478b0 100644 --- a/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3.yaml +++ b/recipes/Aishell1Mix/separation/hparams/sepformer-aishell1mix3.yaml @@ -8,18 +8,20 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params # this is the storage folder for the whole needed dataset e.g. /yourpath # it should contain at least 250G space for aishell1, wham_noise and aishell1mix. +num_spks: 3 data_folder: !PLACEHOLDER +data_folder_nspks: !ref /aishell1mix/Aishell1Mix data_freqs: ['8k'] # or if you want the whole dataset ['8k','16k'] data_modes: ['min'] # or if you want the whole dataset ['min', 'max'] # this is the base folder for dynamic mixing, usually not changed. -base_folder_dm: !ref /data_aishell/wav/train +base_folder_dm: !ref /data_aishell/wav/train experiment_name: sepformer-aishell1mix3 output_folder: !ref results// @@ -33,14 +35,12 @@ skip_prep: False ckpt_interval_minutes: 60 # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False -num_spks: 3 +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -63,18 +63,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -85,6 +105,7 @@ N_encoder_out: 256 out_channels: 256 kernel_size: 16 kernel_stride: 8 +d_ffn: 1024 # Dataloader options dataloader_opts: @@ -102,7 +123,7 @@ SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True @@ -111,7 +132,7 @@ SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True diff --git a/recipes/Aishell1Mix/separation/prepare_data.py b/recipes/Aishell1Mix/separation/prepare_data.py new file mode 120000 index 0000000000..1a7125c969 --- /dev/null +++ b/recipes/Aishell1Mix/separation/prepare_data.py @@ -0,0 +1 @@ +../prepare_data.py \ No newline at end of file diff --git a/recipes/Aishell1Mix/separation/scripts/create_aishell1_metadata.py b/recipes/Aishell1Mix/separation/scripts/create_aishell1_metadata.py index be7553ea94..fa7380dc6a 100644 --- a/recipes/Aishell1Mix/separation/scripts/create_aishell1_metadata.py +++ b/recipes/Aishell1Mix/separation/scripts/create_aishell1_metadata.py @@ -1,8 +1,9 @@ -import os import argparse -import soundfile as sf -import pandas as pd import glob +import os + +import pandas as pd +import soundfile as sf from tqdm import tqdm # Global parameter @@ -30,13 +31,13 @@ def main(args): def create_aishell1_metadata(aishell1_dir, md_dir): - """ Generate metadata corresponding to downloaded data in aishell1 """ + """Generate metadata corresponding to downloaded data in aishell1""" # Get speakers metadata speakers_metadata = create_speakers_dataframe(aishell1_dir) filename2transcript = {} with open( os.path.join(aishell1_dir, "transcript/aishell_transcript_v0.8.txt"), - "r", + encoding="utf-8", ) as f: lines = f.readlines() for line in lines: @@ -65,8 +66,8 @@ def create_aishell1_metadata(aishell1_dir, md_dir): def create_speakers_dataframe(aishell1_dir): - """ Read metadata from the aishell1 dataset and collect infos - about the speakers """ + """Read metadata from the aishell1 dataset and collect infos + about the speakers""" print("Reading speakers metadata") # Read SPEAKERS.TXT and create a dataframe speakers_metadata_path = os.path.join( @@ -105,8 +106,8 @@ def check_already_generated(md_dir, aishell1_dir): def create_aishell1_dataframe( aishell1_dir, subdir, speakers_md, filename2transcript ): - """ Generate a dataframe that gather infos about the sound files in a - aishell1 subdirectory """ + """Generate a dataframe that gather infos about the sound files in a + aishell1 subdirectory""" print( f"Creating {subdir} metadata file in " diff --git a/recipes/Aishell1Mix/separation/scripts/create_aishell1mix_from_metadata.py b/recipes/Aishell1Mix/separation/scripts/create_aishell1mix_from_metadata.py index 2232a84c45..23db1e6f41 100644 --- a/recipes/Aishell1Mix/separation/scripts/create_aishell1mix_from_metadata.py +++ b/recipes/Aishell1Mix/separation/scripts/create_aishell1mix_from_metadata.py @@ -1,14 +1,15 @@ -import os import argparse -import soundfile as sf -import pandas as pd -import numpy as np import functools -from scipy.signal import resample_poly -import tqdm.contrib.concurrent import glob +import os import shutil +import numpy as np +import pandas as pd +import soundfile as sf +import tqdm.contrib.concurrent +from scipy.signal import resample_poly + # eps secures log and division EPS = 1e-10 # Rate of the sources in aishell1 @@ -46,13 +47,13 @@ "--freqs", nargs="+", default=["8k", "16k"], - help="--freqs 16k 8k will create 2 directories wav8k " "and wav16k", + help="--freqs 16k 8k will create 2 directories wav8k and wav16k", ) parser.add_argument( "--modes", nargs="+", default=["min", "max"], - help="--modes min max will create 2 directories in " "each freq directory", + help="--modes min max will create 2 directories in each freq directory", ) parser.add_argument( "--types", @@ -98,7 +99,7 @@ def main(args): def create_aishell1mix( aishell1_dir, wham_dir, out_dir, metadata_dir, freqs, n_src, modes, types ): - """ Generate sources mixtures and saves them in out_dir""" + """Generate sources mixtures and saves them in out_dir""" # Get metadata files md_filename_list = [ file for file in os.listdir(metadata_dir) if "info" not in file @@ -121,7 +122,7 @@ def create_aishell1mix( def process_metadata_file( csv_path, freqs, n_src, aishell1_dir, wham_dir, out_dir, modes, types ): - """ Process a metadata generation file to create sources and mixtures""" + """Process a metadata generation file to create sources and mixtures""" md_file = pd.read_csv(csv_path, engine="python") for freq in freqs: # Get the frequency directory path @@ -161,8 +162,7 @@ def process_metadata_file( continue shutil.rmtree(dir_path, ignore_errors=True) print( - f"Creating mixtures and sources from {csv_path} " - f"in {dir_path}" + f"Creating mixtures and sources from {csv_path} in {dir_path}" ) # Create directories accordingly for subdir in subdirs: @@ -301,7 +301,7 @@ def process_utterance( def create_empty_metrics_md(n_src, subdir): - """ Create the metrics dataframe""" + """Create the metrics dataframe""" metrics_dataframe = pd.DataFrame() metrics_dataframe["mixture_ID"] = {} if subdir == "mix_clean": @@ -310,15 +310,15 @@ def create_empty_metrics_md(n_src, subdir): elif subdir == "mix_both": for i in range(n_src): metrics_dataframe[f"source_{i + 1}_SNR"] = {} - metrics_dataframe[f"noise_SNR"] = {} + metrics_dataframe["noise_SNR"] = {} elif subdir == "mix_single": metrics_dataframe["source_1_SNR"] = {} - metrics_dataframe[f"noise_SNR"] = {} + metrics_dataframe["noise_SNR"] = {} return metrics_dataframe def create_empty_mixture_md(n_src, subdir): - """ Create the mixture dataframe""" + """Create the mixture dataframe""" mixture_dataframe = pd.DataFrame() mixture_dataframe["mixture_ID"] = {} mixture_dataframe["mixture_path"] = {} @@ -328,16 +328,16 @@ def create_empty_mixture_md(n_src, subdir): elif subdir == "mix_both": for i in range(n_src): mixture_dataframe[f"source_{i + 1}_path"] = {} - mixture_dataframe[f"noise_path"] = {} + mixture_dataframe["noise_path"] = {} elif subdir == "mix_single": mixture_dataframe["source_1_path"] = {} - mixture_dataframe[f"noise_path"] = {} + mixture_dataframe["noise_path"] = {} mixture_dataframe["length"] = {} return mixture_dataframe def read_sources(row, n_src, aishell1_dir, wham_dir): - """ Get sources and info to mix the sources """ + """Get sources and info to mix the sources""" # Get info about the mixture mixture_id = row["mixture_ID"] sources_path_list = get_list_from_csv(row, "source_path", n_src) @@ -368,7 +368,7 @@ def read_sources(row, n_src, aishell1_dir, wham_dir): def get_list_from_csv(row, column, n_src): - """ Transform a list in the .csv in an actual python list """ + """Transform a list in the .csv in an actual python list""" python_list = [] for i in range(n_src): current_column = column.split("_") @@ -379,7 +379,7 @@ def get_list_from_csv(row, column, n_src): def extend_noise(noise, max_length): - """ Concatenate noise using hanning window""" + """Concatenate noise using hanning window""" noise_ex = noise window = np.hanning(RATE + 1) # Increasing window @@ -401,7 +401,7 @@ def extend_noise(noise, max_length): def transform_sources(sources_list, freq, mode, gain_list): - """ Transform aishell1 sources to aishell1mix """ + """Transform aishell1 sources to aishell1mix""" # Normalize sources sources_list_norm = loudness_normalize(sources_list, gain_list) # Resample the sources @@ -412,7 +412,7 @@ def transform_sources(sources_list, freq, mode, gain_list): def loudness_normalize(sources_list, gain_list): - """ Normalize sources loudness""" + """Normalize sources loudness""" # Create the list of normalized sources normalized_list = [] for i, source in enumerate(sources_list): @@ -421,7 +421,7 @@ def loudness_normalize(sources_list, gain_list): def resample_list(sources_list, freq): - """ Resample the source list to the desired frequency""" + """Resample the source list to the desired frequency""" # Create the resampled list resampled_list = [] # Resample each source @@ -431,7 +431,7 @@ def resample_list(sources_list, freq): def fit_lengths(source_list, mode): - """ Make the sources to match the target length """ + """Make the sources to match the target length""" sources_list_reshaped = [] # Check the mode if mode == "min": @@ -472,7 +472,7 @@ def write_noise(mix_id, transformed_sources, dir_path, freq): def mix(sources_list): - """ Do the mixing """ + """Do the mixing""" # Initialize mixture mixture = np.zeros_like(sources_list[0]) for source in sources_list: @@ -500,11 +500,11 @@ def compute_snr_list(mixture, sources_list): def snr_xy(x, y): - return 10 * np.log10(np.mean(x ** 2) / (np.mean(y ** 2) + EPS) + EPS) + return 10 * np.log10(np.mean(x**2) / (np.mean(y**2) + EPS) + EPS) def add_to_metrics_metadata(metrics_df, mixture_id, snr_list): - """ Add a new line to metrics_df""" + """Add a new line to metrics_df""" row_metrics = [mixture_id] + snr_list metrics_df.loc[len(metrics_df)] = row_metrics @@ -518,7 +518,7 @@ def add_to_mixture_metadata( length, subdir, ): - """ Add a new line to mixture_df """ + """Add a new line to mixture_df""" sources_path = abs_sources_path noise_path = [abs_noise_path] if subdir == "mix_clean": diff --git a/recipes/Aishell1Mix/separation/scripts/create_aishell1mix_metadata.py b/recipes/Aishell1Mix/separation/scripts/create_aishell1mix_metadata.py index ffd5fefcdb..27a50504fa 100644 --- a/recipes/Aishell1Mix/separation/scripts/create_aishell1mix_metadata.py +++ b/recipes/Aishell1Mix/separation/scripts/create_aishell1mix_metadata.py @@ -73,7 +73,7 @@ def main(args): md_dir = args.metadata_outdir if md_dir is None: root = os.path.dirname(aishell1_dir) - md_dir = os.path.join(root, f"aishell1mix/metadata") + md_dir = os.path.join(root, "aishell1mix/metadata") os.makedirs(md_dir, exist_ok=True) create_aishell1mix_metadata( aishell1_dir, aishell1_md_dir, wham_dir, wham_md_dir, md_dir, n_src @@ -83,7 +83,7 @@ def main(args): def create_aishell1mix_metadata( aishell1_dir, aishell1_md_dir, wham_dir, wham_md_dir, md_dir, n_src ): - """ Generate aishell1mix metadata according to aishell1 metadata """ + """Generate aishell1mix metadata according to aishell1 metadata""" # Dataset name dataset = f"aishell1mix{n_src}" @@ -164,7 +164,7 @@ def check_already_generated(md_dir, dataset, to_be_ignored, aishell1_md_files): def create_aishell1mix_df( aishell1_md_file, aishell1_dir, wham_md_file, wham_dir, n_src ): - """ Generate aishell1mix dataframe from a aishell1 and wha md file""" + """Generate aishell1mix dataframe from a aishell1 and wha md file""" # Create a dataframe that will be used to generate sources and mixtures mixtures_md = pd.DataFrame(columns=["mixture_ID"]) @@ -197,7 +197,7 @@ def create_aishell1mix_df( # Do the mixture mixture_max = mix(sources_list_norm) # Check the mixture for clipping and renormalize if necessary - renormalize_loudness, did_clip = check_for_cliping( + renormalize_loudness, did_clip = check_for_clipping( mixture_max, sources_list_norm ) clip_counter += int(did_clip) @@ -213,7 +213,7 @@ def create_aishell1mix_df( def set_pairs(aishell1_md_file, wham_md_file, n_src): - """ set pairs of sources to make the mixture """ + """set pairs of sources to make the mixture""" # Initialize list for pairs sources utt_pairs = [] noise_pairs = [] @@ -386,7 +386,7 @@ def add_noise(wham_md_file, wham_dir, pair_noise, sources_list, sources_info): def set_loudness(sources_list): - """ Compute original loudness and normalise them randomly """ + """Compute original loudness and normalise them randomly""" # Initialize loudness loudness_list = [] # In aishell1 all sources are at 16KHz hence the meter @@ -424,7 +424,7 @@ def set_loudness(sources_list): def mix(sources_list_norm): - """ Do the mixture for min mode and max mode """ + """Do the mixture for min mode and max mode""" # Initialize mixture mixture_max = np.zeros_like(sources_list_norm[0]) for i in range(len(sources_list_norm)): @@ -432,7 +432,7 @@ def mix(sources_list_norm): return mixture_max -def check_for_cliping(mixture_max, sources_list_norm): +def check_for_clipping(mixture_max, sources_list_norm): """Check the mixture (mode max) for clipping and re normalize if needed.""" # Initialize renormalized sources and loudness renormalize_loudness = [] @@ -453,7 +453,7 @@ def check_for_cliping(mixture_max, sources_list_norm): def compute_gain(loudness, renormalize_loudness): - """ Compute the gain between the original and target loudness""" + """Compute the gain between the original and target loudness""" gain = [] for i in range(len(loudness)): delta_loudness = renormalize_loudness[i] - loudness[i] @@ -462,7 +462,7 @@ def compute_gain(loudness, renormalize_loudness): def get_row(sources_info, gain_list, n_src): - """ Get new row for each mixture/info dataframe """ + """Get new row for each mixture/info dataframe""" row_mixture = [sources_info["mixtures_id"]] row_info = [sources_info["mixtures_id"]] for i in range(n_src): diff --git a/recipes/Aishell1Mix/separation/scripts/create_wham_metadata.py b/recipes/Aishell1Mix/separation/scripts/create_wham_metadata.py index 203e344a01..8a8dfb0bd6 100644 --- a/recipes/Aishell1Mix/separation/scripts/create_wham_metadata.py +++ b/recipes/Aishell1Mix/separation/scripts/create_wham_metadata.py @@ -1,8 +1,9 @@ -import os import argparse -import soundfile as sf -import pandas as pd import glob +import os + +import pandas as pd +import soundfile as sf from tqdm import tqdm # Global parameter @@ -30,7 +31,7 @@ def main(args): def create_wham_noise_metadata(wham_noise_dir, md_dir): - """ Generate metadata corresponding to downloaded data in wham_noise """ + """Generate metadata corresponding to downloaded data in wham_noise""" # Check already generated files not_already_processed_dir = check_already_generated(md_dir) @@ -52,12 +53,12 @@ def create_wham_noise_metadata(wham_noise_dir, md_dir): dir_metadata = dir_metadata[dir_metadata["length"] >= num_samples] # Create save path save_path = os.path.join(md_dir, name + ".csv") - print(f"Medatada file created in {save_path}") + print(f"Metadata file created in {save_path}") dir_metadata.to_csv(save_path, index=False) def check_already_generated(md_dir): - """ Check if files have already been generated """ + """Check if files have already been generated""" # Get the already generated files already_generated_csv = os.listdir(md_dir) # Data directories in wham_noise @@ -78,8 +79,8 @@ def check_already_generated(md_dir): def create_wham_noise_dataframe(wham_noise_dir, subdir): - """ Generate a dataframe that gather infos about the sound files in a - wham_noise subdirectory """ + """Generate a dataframe that gather infos about the sound files in a + wham_noise subdirectory""" print(f"Processing files from {subdir} dir") # Get the current directory path diff --git a/recipes/Aishell1Mix/separation/train.py b/recipes/Aishell1Mix/separation/train.py index cf19a8cee0..ae0138ffba 100644 --- a/recipes/Aishell1Mix/separation/train.py +++ b/recipes/Aishell1Mix/separation/train.py @@ -21,27 +21,512 @@ * Jianyuan Zhong 2020 """ +import csv import os import sys + +import numpy as np +import torch +import torch.nn.functional as F +from hyperpyyaml import load_hyperpyyaml +from tqdm import tqdm + import speechbrain as sb +import speechbrain.nnet.schedulers as schedulers +from speechbrain.dataio import audio_io from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml -import logging +from speechbrain.utils.logger import get_logger -if __name__ == "__main__": +# from: recipes/LibriMix/separation/train.py +class Separation(sb.Brain): + def compute_forward(self, mix, targets, stage, noise=None): + """Forward computations from the mixture to the separated signals.""" + + # Unpack lists and put tensors in the right device + mix, mix_lens = mix + mix, mix_lens = mix.to(self.device), mix_lens.to(self.device) + + # Convert targets to tensor + targets = torch.cat( + [targets[i][0].unsqueeze(-1) for i in range(self.hparams.num_spks)], + dim=-1, + ).to(self.device) + + # Add speech distortions + if stage == sb.Stage.TRAIN: + with torch.no_grad(): + if self.hparams.use_speedperturb or self.hparams.use_rand_shift: + mix, targets = self.add_speed_perturb(targets, mix_lens) + + mix = targets.sum(-1) + + if self.hparams.use_wham_noise: + noise = noise.to(self.device) + len_noise = noise.shape[1] + len_mix = mix.shape[1] + min_len = min(len_noise, len_mix) + + # add the noise + mix = mix[:, :min_len] + noise[:, :min_len] + + # fix the length of targets also + targets = targets[:, :min_len, :] + + if self.hparams.use_wavedrop: + mix = self.hparams.drop_chunk(mix, mix_lens) + mix = self.hparams.drop_freq(mix) + + if self.hparams.limit_training_signal_len: + mix, targets = self.cut_signals(mix, targets) + + # Separation + mix_w = self.hparams.Encoder(mix) + est_mask = self.hparams.MaskNet(mix_w) + mix_w = torch.stack([mix_w] * self.hparams.num_spks) + sep_h = mix_w * est_mask + + # Decoding + est_source = torch.cat( + [ + self.hparams.Decoder(sep_h[i]).unsqueeze(-1) + for i in range(self.hparams.num_spks) + ], + dim=-1, + ) + + # T changed after conv1d in encoder, fix it here + T_origin = mix.size(1) + T_est = est_source.size(1) + if T_origin > T_est: + est_source = F.pad(est_source, (0, 0, 0, T_origin - T_est)) + else: + est_source = est_source[:, :T_origin, :] + + return est_source, targets + + def compute_objectives(self, predictions, targets): + """Computes the si-snr loss""" + return self.hparams.loss(targets, predictions) + + def fit_batch(self, batch): + """Trains one batch""" + + # Unpacking batch list + mixture = batch.mix_sig + targets = [batch.s1_sig, batch.s2_sig] + if self.hparams.use_wham_noise: + noise = batch.noise_sig[0] + else: + noise = None + + if self.hparams.num_spks == 3: + targets.append(batch.s3_sig) + + with self.training_ctx: + predictions, targets = self.compute_forward( + mixture, targets, sb.Stage.TRAIN, noise + ) + loss = self.compute_objectives(predictions, targets) + + # hard threshold the easy dataitems + if self.hparams.threshold_byloss: + th = self.hparams.threshold + loss_to_keep = loss[loss > th] + if loss_to_keep.nelement() > 0: + loss = loss_to_keep.mean() + else: + loss = loss.mean() + + if loss < self.hparams.loss_upper_lim and loss.nelement() > 0: + self.scaler.scale(loss).backward() + if self.hparams.clip_grad_norm >= 0: + self.scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), + self.hparams.clip_grad_norm, + ) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + self.nonfinite_count += 1 + logger.info( + f"infinite loss or empty loss! it happened {self.nonfinite_count} times so far - skipping this batch" + ) + loss.data = torch.tensor(0.0).to(self.device) + self.optimizer.zero_grad() + + return loss.detach().cpu() + + def evaluate_batch(self, batch, stage): + """Computations needed for validation/test batches""" + snt_id = batch.id + mixture = batch.mix_sig + targets = [batch.s1_sig, batch.s2_sig] + if self.hparams.num_spks == 3: + targets.append(batch.s3_sig) + + with torch.no_grad(): + predictions, targets = self.compute_forward(mixture, targets, stage) + loss = self.compute_objectives(predictions, targets) + + # Manage audio file saving + if stage == sb.Stage.TEST and self.hparams.save_audio: + if hasattr(self.hparams, "n_audio_to_save"): + if self.hparams.n_audio_to_save > 0: + self.save_audio(snt_id[0], mixture, targets, predictions) + self.hparams.n_audio_to_save += -1 + else: + self.save_audio(snt_id[0], mixture, targets, predictions) + + return loss.mean().detach() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"si-snr": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + # Learning rate annealing + if isinstance( + self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau + ): + current_lr, next_lr = self.hparams.lr_scheduler( + [self.optimizer], epoch, stage_loss + ) + schedulers.update_learning_rate(self.optimizer, next_lr) + else: + # if we do not use the reducelronplateau, we do not change the lr + current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"] + + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch, "lr": current_lr}, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"si-snr": stage_stats["si-snr"]}, + min_keys=["si-snr"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + + def add_speed_perturb(self, targets, targ_lens): + """Adds speed perturbation and random_shift to the input signals""" + + min_len = -1 + recombine = False + + if self.hparams.use_speedperturb: + # Performing speed change (independently on each source) + new_targets = [] + recombine = True + + for i in range(targets.shape[-1]): + new_target = self.hparams.speed_perturb( + targets[:, :, i], + ) + new_targets.append(new_target) + if i == 0: + min_len = new_target.shape[-1] + else: + if new_target.shape[-1] < min_len: + min_len = new_target.shape[-1] + + if self.hparams.use_rand_shift: + # Performing random_shift (independently on each source) + recombine = True + for i in range(targets.shape[-1]): + rand_shift = torch.randint( + self.hparams.min_shift, self.hparams.max_shift, (1,) + ) + new_targets[i] = new_targets[i].to(self.device) + new_targets[i] = torch.roll( + new_targets[i], shifts=(rand_shift[0],), dims=1 + ) + + # Re-combination + if recombine: + if self.hparams.use_speedperturb: + targets = torch.zeros( + targets.shape[0], + min_len, + targets.shape[-1], + device=targets.device, + dtype=torch.float, + ) + for i, new_target in enumerate(new_targets): + targets[:, :, i] = new_targets[i][:, 0:min_len] + + mix = targets.sum(-1) + return mix, targets + + def cut_signals(self, mixture, targets): + """This function selects a random segment of a given length within the mixture. + The corresponding targets are selected accordingly""" + randstart = torch.randint( + 0, + 1 + max(0, mixture.shape[1] - self.hparams.training_signal_len), + (1,), + ).item() + targets = targets[ + :, randstart : randstart + self.hparams.training_signal_len, : + ] + mixture = mixture[ + :, randstart : randstart + self.hparams.training_signal_len + ] + return mixture, targets + + def reset_layer_recursively(self, layer): + """Reinitializes the parameters of the neural networks""" + if hasattr(layer, "reset_parameters"): + layer.reset_parameters() + for child_layer in layer.modules(): + if layer != child_layer: + self.reset_layer_recursively(child_layer) + + def save_results(self, test_data): + """This script computes the SDR and SI-SNR metrics and saves + them into a csv file""" + + # This package is required for SDR computation + from mir_eval.separation import bss_eval_sources + + # Create folders where to store audio + save_file = os.path.join(self.hparams.output_folder, "test_results.csv") + + # Variable init + all_sdrs = [] + all_sdrs_i = [] + all_sisnrs = [] + all_sisnrs_i = [] + csv_columns = ["snt_id", "sdr", "sdr_i", "si-snr", "si-snr_i"] + + test_loader = sb.dataio.dataloader.make_dataloader( + test_data, **self.hparams.dataloader_opts + ) + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: + writer = csv.DictWriter(results_csv, fieldnames=csv_columns) + writer.writeheader() + + # Loop over all test sentence + with tqdm(test_loader, dynamic_ncols=True) as t: + for i, batch in enumerate(t): + # Apply Separation + mixture, mix_len = batch.mix_sig + snt_id = batch.id + targets = [batch.s1_sig, batch.s2_sig] + if self.hparams.num_spks == 3: + targets.append(batch.s3_sig) + + with torch.no_grad(): + predictions, targets = self.compute_forward( + batch.mix_sig, targets, sb.Stage.TEST + ) + + # Compute SI-SNR + sisnr = self.compute_objectives(predictions, targets) + + # Compute SI-SNR improvement + mixture_signal = torch.stack( + [mixture] * self.hparams.num_spks, dim=-1 + ) + mixture_signal = mixture_signal.to(targets.device) + sisnr_baseline = self.compute_objectives( + mixture_signal, targets + ) + sisnr_i = sisnr - sisnr_baseline + + # Compute SDR + sdr, _, _, _ = bss_eval_sources( + targets[0].t().cpu().numpy(), + predictions[0].t().detach().cpu().numpy(), + ) + + sdr_baseline, _, _, _ = bss_eval_sources( + targets[0].t().cpu().numpy(), + mixture_signal[0].t().detach().cpu().numpy(), + ) + + sdr_i = sdr.mean() - sdr_baseline.mean() + + # Saving on a csv file + row = { + "snt_id": snt_id[0], + "sdr": sdr.mean(), + "sdr_i": sdr_i, + "si-snr": -sisnr.item(), + "si-snr_i": -sisnr_i.item(), + } + writer.writerow(row) + + # Metric Accumulation + all_sdrs.append(sdr.mean()) + all_sdrs_i.append(sdr_i.mean()) + all_sisnrs.append(-sisnr.item()) + all_sisnrs_i.append(-sisnr_i.item()) + + row = { + "snt_id": "avg", + "sdr": np.array(all_sdrs).mean(), + "sdr_i": np.array(all_sdrs_i).mean(), + "si-snr": np.array(all_sisnrs).mean(), + "si-snr_i": np.array(all_sisnrs_i).mean(), + } + writer.writerow(row) + + logger.info(f"Mean SISNR is {np.array(all_sisnrs).mean()}") + logger.info(f"Mean SISNRi is {np.array(all_sisnrs_i).mean()}") + logger.info(f"Mean SDR is {np.array(all_sdrs).mean()}") + logger.info(f"Mean SDRi is {np.array(all_sdrs_i).mean()}") + + def save_audio(self, snt_id, mixture, targets, predictions): + "saves the test audio (mixture, targets, and estimated sources) on disk" + + # Create output folder + save_path = os.path.join(self.hparams.save_folder, "audio_results") + if not os.path.exists(save_path): + os.mkdir(save_path) + + for ns in range(self.hparams.num_spks): + # Estimated source + signal = predictions[0, :, ns] + signal = signal / signal.abs().max() + save_file = os.path.join( + save_path, f"item{snt_id}_source{ns + 1}hat.wav" + ) + audio_io.save( + save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate + ) + + # Original source + signal = targets[0, :, ns] + signal = signal / signal.abs().max() + save_file = os.path.join( + save_path, f"item{snt_id}_source{ns + 1}.wav" + ) + audio_io.save( + save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate + ) + + # Mixture + signal = mixture[0][0, :] + signal = signal / signal.abs().max() + save_file = os.path.join(save_path, f"item{snt_id}_mix.wav") + audio_io.save( + save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate + ) + + +# from: recipes/LibriMix/separation/train.py +def dataio_prep(hparams): + """Creates data processing pipeline""" + + # 1. Define datasets + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_data"], + replacements={"data_root": hparams["data_folder_nspks"]}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_data"], + replacements={"data_root": hparams["data_folder_nspks"]}, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_data"], + replacements={"data_root": hparams["data_folder_nspks"]}, + ) + + datasets = [train_data, valid_data, test_data] + + # 2. Provide audio pipelines + + @sb.utils.data_pipeline.takes("mix_wav") + @sb.utils.data_pipeline.provides("mix_sig") + def audio_pipeline_mix(mix_wav): + mix_sig = sb.dataio.dataio.read_audio(mix_wav) + return mix_sig + + @sb.utils.data_pipeline.takes("s1_wav") + @sb.utils.data_pipeline.provides("s1_sig") + def audio_pipeline_s1(s1_wav): + s1_sig = sb.dataio.dataio.read_audio(s1_wav) + return s1_sig + + @sb.utils.data_pipeline.takes("s2_wav") + @sb.utils.data_pipeline.provides("s2_sig") + def audio_pipeline_s2(s2_wav): + s2_sig = sb.dataio.dataio.read_audio(s2_wav) + return s2_sig + + if hparams["num_spks"] == 3: + + @sb.utils.data_pipeline.takes("s3_wav") + @sb.utils.data_pipeline.provides("s3_sig") + def audio_pipeline_s3(s3_wav): + s3_sig = sb.dataio.dataio.read_audio(s3_wav) + return s3_sig + + if hparams["use_wham_noise"]: + + @sb.utils.data_pipeline.takes("noise_wav") + @sb.utils.data_pipeline.provides("noise_sig") + def audio_pipeline_noise(noise_wav): + noise_sig = sb.dataio.dataio.read_audio(noise_wav) + return noise_sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_mix) + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s1) + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s2) + if hparams["num_spks"] == 3: + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s3) + + if hparams["use_wham_noise"]: + print("Using the WHAM! noise in the data pipeline") + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noise) + + if (hparams["num_spks"] == 2) and hparams["use_wham_noise"]: + sb.dataio.dataset.set_output_keys( + datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "noise_sig"] + ) + elif (hparams["num_spks"] == 3) and hparams["use_wham_noise"]: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"], + ) + elif (hparams["num_spks"] == 2) and not hparams["use_wham_noise"]: + sb.dataio.dataset.set_output_keys( + datasets, ["id", "mix_sig", "s1_sig", "s2_sig"] + ) + else: + sb.dataio.dataset.set_output_keys( + datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig"] + ) + + return train_data, valid_data, test_data + + +if __name__ == "__main__": # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - run_opts["auto_mix_prec"] = hparams["auto_mix_prec"] # Initialize ddp (useful only for multi-GPU DDP training) sb.utils.distributed.ddp_init_group(run_opts) # Logger info - logger = logging.getLogger(__name__) + logger = get_logger(__name__) + + # If device is cpu use precision='bf16' # Create experiment directory sb.create_experiment_directory( @@ -50,13 +535,12 @@ overrides=overrides, ) - # Check if storage folder for dataset exists - if not hparams["data_folder"]: - print("Please, specify a valid data_folder for dataset storage") - sys.exit(1) + # Update precision to bf16 if the device is CPU and precision is fp16 + if run_opts.get("device") == "cpu" and hparams.get("precision") == "fp16": + hparams["precision"] = "bf16" # Data preparation - from recipes.Aishell1Mix.prepare_data import prepare_aishell1mix + from prepare_data import prepare_aishell1mix run_on_main( prepare_aishell1mix, @@ -71,11 +555,8 @@ "datamodes": hparams["data_modes"], }, ) - hparams["data_folder"] += f'/aishell1mix/Aishell1Mix{hparams["num_spks"]}' # Create dataset objects - from recipes.LibriMix.separation.train import dataio_prep - if hparams["dynamic_mixing"]: from dynamic_mixing import ( dynamic_mix_data_prep_aishell1mix as dynamic_mix_data_prep, @@ -116,10 +597,10 @@ os.path.normpath(hparams["base_folder_dm"]) + "_processed" ) - # Colleting the hparams for dynamic batching + # Collecting the hparams for dynamic batching dm_hparams = { "train_data": hparams["train_data"], - "data_folder": hparams["data_folder"], + "data_folder": hparams["data_folder_nspks"], "base_folder_dm": hparams["base_folder_dm"], "sample_rate": hparams["sample_rate"], "num_spks": hparams["num_spks"], @@ -128,14 +609,6 @@ } train_data = dynamic_mix_data_prep(dm_hparams) - - # Inheriting data preparation from librimix. It uses these variables: - # hparams["data_folder"] - # hparams["train_data"] - # hparams["valid_data"] - # hparams["test_data"] - # hparams["num_spks"] - # hparams["use_wham_noise"] _, valid_data, test_data = dataio_prep(hparams) else: train_data, valid_data, test_data = dataio_prep(hparams) @@ -145,39 +618,7 @@ run_on_main(hparams["pretrained_separator"].collect_files) hparams["pretrained_separator"].load_collected() - from recipes.LibriMix.separation.train import Separation - # Brain class initialization - # Inheriting the Separation class from librimix. It uses these variables: - # hparams["num_spks"] - # hparams["use_speedperturb"] - # hparams["use_rand_shift"] - # hparams["use_wham_noise"] - # hparams["use_wavedrop"] - # hparams["wavedrop"] - # hparams["limit_training_signal_len"] - # hparams["Encoder"] - # hparams["MaskNet"] - # hparams["Decoder"] - # hparams["loss"] - # hparams["threshold_byloss"] - # hparams["threshold"] - # hparams["loss_upper_lim"] - # hparams["clip_grad_norm"] - # hparams["save_audio"] - # hparams["n_audio_to_save"] - # hparams["lr_scheduler"] - # hparams["optimizer"] - # hparams["train_logger"] - # hparams["epoch_counter"] - # hparams["speedperturb"] - # hparams["min_shift"] - # hparams["max_shift"] - # hparams["output_folder"] - # hparams["dataloader_opts"] - # hparams["save_folder"] - # hparams["sample_rate"] - separator = Separation( modules=hparams["modules"], opt_class=hparams["optimizer"], @@ -190,15 +631,15 @@ if "pretrained_separator" not in hparams: for module in separator.modules.values(): separator.reset_layer_recursively(module) - if not hparams["test_only"]: - # Training - separator.fit( - separator.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["dataloader_opts"], - valid_loader_kwargs=hparams["dataloader_opts"], - ) + + # Training + separator.fit( + separator.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts"], + ) # Eval separator.evaluate(test_data, min_key="si-snr") diff --git a/recipes/AudioMNIST/audiomnist_prepare.py b/recipes/AudioMNIST/audiomnist_prepare.py new file mode 100644 index 0000000000..c5b1391300 --- /dev/null +++ b/recipes/AudioMNIST/audiomnist_prepare.py @@ -0,0 +1,814 @@ +""" +Data preparation for the AudioMNIST dataset + +Data download: https://github.com/soerenab/AudioMNIST.git +Meta info download: https://www.dropbox.com/scl/fi/ekibujzmvakufvm31ptrf/audiominist-meta.zip?rlkey=69vwmqcoc1xl7t5j94yjilxoc&dl=1 + +By default, the script will automatically download the dataset and the meta information. + +Author +------ +Artem Ploujnikov 2023 +Mirco Ravanelli 2023 +""" + +import csv +import json +import math +import os +from functools import partial +from glob import glob +from subprocess import list2cmdline + +import torchaudio +from torchaudio import functional as F +from tqdm.auto import tqdm + +import speechbrain as sb +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.utils.data_utils import download_file +from speechbrain.utils.logger import get_logger +from speechbrain.utils.superpowers import run_shell + +DEFAULT_SPLITS = ["train", "valid", "test"] +DEFAULT_AUDIOMNIST_REPO = "https://github.com/soerenab/AudioMNIST.git" +DEFAULT_METADATA_REPO = "https://www.dropbox.com/scl/fi/ekibujzmvakufvm31ptrf/audiominist-meta.zip?rlkey=69vwmqcoc1xl7t5j94yjilxoc&dl=1" +DEFAULT_SRC_SAMPLE_RATE = 48000 +DEFAULT_TGT_SAMPLE_RATE = 48000 +DB_BASE = 10.0 +DB_MULTIPLIER = 0.05 +OPT_FILE = "opt_audiomnist_prepare.pkl" + + +logger = get_logger(__name__) + + +def prepare_audiomnist( + data_folder, + save_folder, + train_json, + valid_json, + test_json, + metadata_folder=None, + splits=DEFAULT_SPLITS, + download=True, + audiomnist_repo=None, + metadata_repo=None, + src_sample_rate=DEFAULT_SRC_SAMPLE_RATE, + tgt_sample_rate=DEFAULT_TGT_SAMPLE_RATE, + trim=True, + trim_threshold=-30.0, + norm=True, + highpass=True, + process_audio=None, + skip_prep=False, +): + """Auto-downloads and prepares the AudioMNIST dataset + + Arguments + --------- + data_folder: str + the folder where the original dataset exists. It assumes the data are stored + in data_folder/audiomnist_original. If not, data will be automatically downloaded here. + save_folder: str + the destination folder + train_json: str + the destination of the training data manifest JSON file. + valid_json: str + the destination of the valid data manifest JSON file. + test_json: str + the destination of the test data manifest JSON file. + metadata_folder: str + the folder for additional metadata + splits: list + List of splits to prepare. + download: bool + whether the dataset and meta info should be auto-downloaded (enabled by default) + audiomnist_repo: str + the URL of the AudioMNIST repository + metadata_repo: str + the URL of the repository with meta information (e.g., train, valid, test splits) + src_sample_rate: int + the source sampling rate + tgt_sample_rate: int + the target sampling rate + trim: bool + whether to trim silence from the beginning and the end. + Ignored if process_audio is provided + trim_threshold: bool + the trimming threshold, in decibels. + Ignored if process_audio is provided + norm: bool + whether to normalize the amplitude between -1. and 1. + Ignored if process_audio is provided + highpass: bool + Whether to apply a highpass (> 70 Hz) filter + process_audio: callable + a custom function used to process audio files - instead of + the standard transform (resample + normalize + trim) + skip_prep: bool + whether preparation should be skipped + + Returns + ------- + None + """ + if skip_prep: + return + + # Create a dictionary with all the data-manifest files. + json_files = {"train": train_json, "valid": valid_json, "test": test_json} + + # Check if the target folder exists. Create it if it does not. + if not os.path.exists(save_folder): + os.makedirs(save_folder) + + if metadata_folder is None: + metadata_folder = os.path.join(data_folder, "metadata") + + conf = { + "trim_threshold": trim_threshold, + "norm": norm, + "tgt_sample_rate": tgt_sample_rate, + } + + save_opt = os.path.join(save_folder, OPT_FILE) + + if skip(json_files, save_opt, conf): + logger.info("Skipping preparation, completed in previous run.") + return + else: + logger.info("Data_preparation...") + + # Path where the original dataset will be downloaded + data_folder_original = os.path.join(data_folder, "audiomnist_original") + if not os.path.exists(data_folder_original): + os.makedirs(data_folder_original) + + # Download AudioMNIST if not present + if not os.path.exists(data_folder) or not os.listdir(data_folder_original): + if download: + if not audiomnist_repo: + audiomnist_repo = DEFAULT_AUDIOMNIST_REPO + download_dataset(data_folder_original, audiomnist_repo) + else: + raise ValueError(f"AudioMNIST not found in {data_folder}") + + # Download meta info (needed for train, valid, and test splits) + if not os.path.exists(metadata_folder) or not os.listdir(metadata_folder): + if download: + if not metadata_repo: + metadata_repo = DEFAULT_METADATA_REPO + metadata_file = os.path.join(metadata_folder, "metadata.zip") + download_file(metadata_repo, metadata_file, unpack=True) + else: + raise ValueError(f"Metadata not found in {metadata_folder}") + + # Set up the audio preprocessing function + if not process_audio: + process_audio = partial( + process_audio_default, + src_sample_rate=src_sample_rate, + tgt_sample_rate=tgt_sample_rate, + trim=trim, + trim_threshold=trim_threshold, + norm=norm, + highpass=highpass, + ) + + # Get file lists for train/valid/test splits + splits = get_splits(metadata_folder, splits) + json_files = {"train": train_json, "valid": valid_json, "test": test_json} + + digit_lookup_file_name = os.path.join(metadata_folder, "digits.csv") + + # Read the digit look-up file providing annotations text and + # phonemes + lookup = read_digit_lookup(digit_lookup_file_name) + + # Convert the dataset + convert_dataset( + src=data_folder_original, + tgt=save_folder, + splits=splits, + json_files=json_files, + lookup=lookup, + process_audio=process_audio, + sample_rate=tgt_sample_rate, + ) + + # saving options + save_pkl(conf, save_opt) + + +def skip(json_files, save_opt, conf): + """ + Detect when the librispeech data prep can be skipped. + + Arguments + --------- + json_files : dict + Dictionary containing the paths where json files will be stored for train, valid, and test. + + save_opt: str + Path to the file where options will be saved. + + conf : dict + The configuration options to ensure they haven't changed. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + + # Checking csv files + skip = any(not os.path.isfile(json_file) for json_file in json_files) + + # Checking saved options + if skip is True: + if os.path.isfile(save_opt): + opts_old = load_pkl(save_opt) + if opts_old == conf: + skip = True + else: + skip = False + else: + skip = False + + return skip + + +def get_splits(metadata_folder, splits): + """Retrieves the train/valid/test file splits + + Arguments + --------- + metadata_folder: str + the path to auxiliary data + + splits: list + the list of splits to prepare + + Returns + ------- + result: dict + a dictionary of file splits + """ + split_files = { + split: os.path.join(metadata_folder, f"{split}.txt") for split in splits + } + return { + split: read_file_list(file_path) + for split, file_path in split_files.items() + } + + +def read_file_list(file_name): + """Reads a file list with files being listed one per line + + Arguments + --------- + file_name: str + the file_name + + Returns + ------- + result: lists + the file list + """ + with open(file_name, encoding="utf-8") as list_file: + return [line.strip() for line in list_file] + + +def download_dataset(data_folder, repo_url): + """Downloads the dataset from a GIT repository + + Arguments + --------- + data_folder: str + the destination folder + repo_url: str + the repository URL + """ + cmd = list2cmdline(["git", "clone", repo_url, data_folder]) + output, err, return_code = run_shell(cmd) + if return_code != 0: + raise DownloadError(output, err) + + +class DownloadError(Exception): + """Thrown when a download attempt fails + + Arguments + --------- + output: str + the command output + err: str + stderr contents + """ + + FORMAT_MSG = "Unable to download the dataset: {output} - {err}" + + def __init__(self, output, err): + msg = self.FORMAT_MSG.format(output=output, err=err) + super().__init__(msg) + + +SPEAKER_META_MAP = { + "native speaker": "native_speaker", + "recordingdate": "recording_date", + "recordingroom": "recording_room", +} + +BOOL_MAP = {"yes": True, "no": False} + + +def to_bool(value): + """Converts a yes/no value to a Boolean + + Arguments + --------- + value: str + A string: "yes" or "no + + Returns + ------- + result: bool + True if the value is "yes" + False if the value is "no" + """ + return BOOL_MAP[value] + + +def convert_date(value): + """Converts a date as recorded in AudioMNIST to an ISO + date string + + + Arguments + --------- + value: str + a value, as encountered in AudioMNIST + Example: 17-06-26-17-57-29 + + Returns + ------- + result: str + an ISO date string corresponding to the date provided + """ + year, month, day, hour, minute, second = value.split("-") + return f"20{year}-{month}-{day}T{hour}:{minute}:{second}" + + +SPEAKER_META_VALUES_MAP = { + "native_speaker": to_bool, + "recording_date": convert_date, +} + + +def read_meta(file_name): + """Reads a metadata file + + Arguments + --------- + file_name: str + the metadata file name + + Returns + ------- + result: dict + raw metadata + """ + with open(file_name, encoding="utf-8") as meta_file: + return json.load(meta_file) + + +def convert_value(key, value, conversion_map): + """Converts a value using a map + + Arguments + --------- + key: str + the item key + value: object + the value + conversion_map: dict + a dictionary with keys corresponding to keys in the original + dataset and conversion function as values + + Returns + ------- + value: object + the converted value (or the original value if no conversion + function is found in the map) + """ + conv_fn = conversion_map.get(key) + if conv_fn: + value = conv_fn(value) + return value + + +def convert_speaker_meta_keys(speaker_meta): + """Converts the speaker metadata keys to the target format + + Arguments + --------- + speaker_meta: dict + raw speaker metadata + + Returns + ------- + result: dict + Mapped metadata + """ + return { + SPEAKER_META_MAP.get(key, key): value + for key, value in speaker_meta.items() + } + + +def convert_speaker_meta_values(speaker_meta): + """Convert the speaker metadata values to the target format + + Arguments + --------- + speaker_meta: dict + raw speaker metadata + + Returns + ------- + result: dict + the converted metadata + """ + return { + key: convert_value(key, value, SPEAKER_META_VALUES_MAP) + for key, value in speaker_meta.items() + } + + +def convert_speaker_meta(speaker_meta): + """Converts speaker metadata to the target format + + Arguments + --------- + speaker_meta: dict + the raw speaker metadata + + Returns + ------- + speaker_meta: dict + the converted metadata + """ + speaker_meta = convert_speaker_meta_keys(speaker_meta) + speaker_meta = convert_speaker_meta_values(speaker_meta) + return speaker_meta + + +def get_wav_files(tgt_split_path): + """Returns all wave files at the specified path + + Arguments + --------- + tgt_split_path: str + the path to the target data split + + Returns + ------- + result: list + a list of file names + """ + wavs_pattern = os.path.join(tgt_split_path, "**", "*.wav") + return sorted(glob(wavs_pattern)) + + +def process_files(wav_files, process_audio, sample_rate): + """Applies post-processing to a data split + + Arguments + --------- + wav_files: list + a list of (src_file_name, tgt_file_name) tuples + process_audio: callable + the audio processing function + sample_rate: int + the sample rate + + Yields + ------ + tgt_file_name: str + The name of the file + result: dict + extra metadata + """ + folders = set( + os.path.dirname(tgt_file_name) for _, tgt_file_name in wav_files + ) + for folder in folders: + if not os.path.exists(folder): + os.makedirs(folder) + for src_file_name, tgt_file_name in tqdm(wav_files): + result = process_file( + src_file_name, tgt_file_name, process_audio, sample_rate + ) + yield tgt_file_name, result + + +def process_file(src_file_name, tgt_file_name, process_audio, sample_rate): + """Processes a single audio file + + Arguments + --------- + src_file_name: str + the source file name + tgt_file_name: str + the target file name + process_audio: callable + the audio processing function + sample_rate: int + the sampling rate + + Returns + ------- + metadata : dict + Includes the length in samples "len" and in seconds "len_s" + """ + sig = sb.dataio.dataio.read_audio(src_file_name) + sig = process_audio(sig) + + sb.dataio.dataio.write_audio(tgt_file_name, sig, sample_rate) + + return {"len": len(sig), "len_s": len(sig) / sample_rate} + + +def get_item_id(file_name): + """Returns the item ID, which is the file name without the extension + + Arguments + --------- + file_name: str + the file name + + Returns + ------- + item_id: str + the item ID corresponding to the file name + """ + _, file_name = os.path.split(file_name) + file_base_name = os.path.basename(file_name) + file_base_name_noext, _ = os.path.splitext( + file_base_name + ) # cspell:ignore noext + return file_base_name_noext + + +def get_file_metadata(meta, split, file_list, lookup): + """Returns a generator with metadata for each file + + Arguments + --------- + meta: dict + the speaker metadata dictionary + split: str + the split identifier ("train", "valid" or "test") + file_list: list + the list of files + lookup: dict + the digit metadata lookup (for text/phoneme transcriptions) + + Yields + ------ + item_id: str + the ID of the item + file_meta: dict + the metadata - to be saved + """ + for file_path in file_list: + item_id = get_item_id(file_path) + file_name = os.path.basename(file_path) + digit, speaker_id, _ = item_id.split("_") + speaker_meta = meta[speaker_id] + file_meta = { + "file_name": f"{{data_root}}/dataset/{split}/{speaker_id}/{file_name}", + "digit": digit, + "speaker_id": speaker_id, + } + file_meta.update(convert_speaker_meta(speaker_meta)) + digit_data = lookup[digit] + file_meta.update(digit_data) + yield item_id, file_meta + + +def convert_split( + src, + tgt, + split, + file_list, + meta, + metadata_file_path, + lookup, + process_audio, + sample_rate, +): + """ + Converts a single split of data + + src: str + the source path + tgt: str + the target path + split: str + the split identifier + file_list: list + the list of files in the data split + meta: dict + the metadata dictionary + metadata_file_path: str + the path where to store the data-manifest JSON file + lookup: dict + the digit look-up file + process_audio: callable + the function that will be applied to each audio file for processing + sample_rate: int + the target sample rate + + """ + metadata = dict(get_file_metadata(meta, split, file_list, lookup)) + + wav_files = [ + ( + os.path.join(src, file_name), + os.path.join( + tgt, metadata[get_item_id(file_name)]["file_name"] + ).replace("{data_root}", ""), + ) + for file_name in file_list + ] + for file_path, process_meta in process_files( + wav_files, process_audio, sample_rate + ): + item_id = get_item_id(file_path) + metadata[item_id].update(process_meta) + + logger.info(f"Saving metadata to {metadata_file_path}") + with open(metadata_file_path, "w", encoding="utf-8") as metadata_file: + json.dump(metadata, metadata_file, indent=2) + + +def convert_dataset( + src, tgt, splits, json_files, lookup, process_audio, sample_rate +): + """Converts the dataset from the original format to the SpeechBrain-friendly + format + + Arguments + --------- + src: str + the source path + + tgt: str + the target path + + splits: dict + a dictionary with split identifiers as keys + and the file list for the split corresponding to the + key as the value + json_files: dict + a dictionary containing the path where to store the data manifest files + for each split + lookup: dict + the digit look-up + + process_audio: callable + the audio processing function + + sample_rate: int + the target sample rate + """ + if not os.path.exists(tgt): + print(f"Creating directory {tgt}") + + meta_file_name = os.path.join(src, "data", "audioMNIST_meta.txt") + meta = read_meta(file_name=meta_file_name) + + for split, file_list in splits.items(): + logger.info("Converting split %s", split) + convert_split( + src=src, + tgt=tgt, + split=split, + file_list=file_list, + meta=meta, + metadata_file_path=json_files[split], + lookup=lookup, + process_audio=process_audio, + sample_rate=sample_rate, + ) + + +def trim_sig(sig, threshold): + """A simple energy threshold implementation to remove silence at the + beginning and at the end of a file + + Arguments + --------- + sig: torch.Tensor + raw audio + threshold: float + the decibel threshold + + Returns + ------- + sig: torch.Tensor + The trimmed signal. + """ + threshold_amp = math.pow(DB_BASE, threshold * DB_MULTIPLIER) + sig = sig / sig.abs().max() + en_sig = sig**2 + sound_pos = (en_sig > threshold_amp).nonzero() + first, last = sound_pos[0], sound_pos[-1] + return sig[first:last] + + +def process_audio_default( + sig, + norm=True, + trim=True, + highpass=True, + src_sample_rate=48000, + tgt_sample_rate=22050, + trim_threshold=-30.0, +): + """Standard audio preprocessing / conversion + + Arguments + --------- + sig: torch.Tensor + Signal to process + norm: bool + whether to normalize + trim: bool + whether to trim silence at the beginning and at the end + highpass: bool + whether to apply a highpass filter (> 70 Hz) + src_sample_rate: int + the sample rate at which the files are recorded + tgt_sample_rate: int + the target sample rate + trim_threshold: float + the decibels threshold for trimming the file + + Returns + ------- + sig : torch.Tensor + The processed signal + """ + # Resample + if src_sample_rate != tgt_sample_rate: + sig = F.resample(sig, src_sample_rate, tgt_sample_rate) + # VAD + if trim: + sig = trim_sig(sig, trim_threshold) + + # High pass filter (> 70 Hz) + if highpass: + effects = [["highpass", "-2", "70"]] + sig, _ = torchaudio.sox_effects.apply_effects_tensor( + sig.unsqueeze(0), tgt_sample_rate, effects, channels_first=True + ) + sig = sig.squeeze(0) + + # Normalize + if norm: + sig = sig / sig.abs().max() + return sig + + +def read_digit_lookup(file_name): + """Reads the digit look-up CSV file + + Arguments + --------- + file_name: str + the file name + + Returns + ------- + result: dict + a dictionary similar the following + { + "2": { + "char": "two", + "phn": ["T", "UW"] + } + } + + """ + with open(file_name, encoding="utf-8") as lookup_file: + reader = csv.DictReader(lookup_file) + lookup = {row["digit"]: row for row in reader} + for value in lookup.values(): + del value["digit"] + value["phn"] = value["phn"].split(" ") + return lookup diff --git a/recipes/AudioMNIST/diffusion/README.md b/recipes/AudioMNIST/diffusion/README.md new file mode 100644 index 0000000000..010f574b1d --- /dev/null +++ b/recipes/AudioMNIST/diffusion/README.md @@ -0,0 +1,104 @@ +# Denoising Diffusion Probabilistic Model +This folder contains scripts for running a Denoising Diffusion Probabilistic Model +generative model with the [AudioMNIST](https://huggingface.co/datasets/flexthink/audiomnist) dataset, which contains recordings +of spoken English digits in a variety of voices and accents. + +https://arxiv.org/pdf/2006.11239.pdf + +Denoising Diffusion Probabilistic Models are a classes of generative +models based on the key idea of training a neural network model, which, given a sample with added noise, can identify that noise and, +during inference, can generate a sample by reconstructing it from +pure noise in a stepwise manner. The method is inspired by Langevin +dynamics. + +Generation can be unconditioned, where the model will generate an +arbitrary sample from the target distribution, or conditioned, where +it will generate samples given a class label or a prompt. + +Diffusion models can operate in the original sample space, but for +high-dimensional samples, such as full-resolution images, this can +be too slow during inference time. A common approach to addressing +this is latent diffusion (https://arxiv.org/abs/2112.10752). This +recipe supports both sample-space (spectrogram-space) and latent +diffusion. + +This recipe implements a basic DDPM to generate speech samples using +the AudioMNIST dataset. It can be used to train an unconditioned model, a model conditioned on the speaker or on the digit label. + +# Training + +## Unconditioned Model +To train the unconditioned model, run the following command: + + ```bash + python train.py hparams/train.yaml --data_folder=your/data/folder + ``` + +The required data will be automatically downloaded into the specified data folder. Keep in mind that AudioMNIST is a relatively small dataset, which may pose challenges in training a diffusion model capable of generating extremely high-quality samples. Nonetheless, the generated samples should maintain intelligibility and exhibit digit-like sounds. + + +## Speaker-Conditioned Model +To train the model with speaker conditioning, execute the following command: + +```bash +python train.py hparams/train.yaml --speaker_conditioned true --data_folder=your/data/folder +``` + +In this case, you should generate intelligible digits. When the generation process is conditioned on the same speaker, the digits sound as being generated by a speaker with the same (or very similar) speaker characteristics. + + +## Digit-Conditioned Model (Simplified TTS) +For a model focused on digit conditioning, useful for a simplified Text-to-Speech (TTS) use case, run the following command: + +```bash +python train.py hparams/train.yaml --digit_conditioned true --data_folder=your/data/folder +``` +In this case, you should generate intelligible digits. When the generation process is conditioned on the same digit, you should generate the same digit (normally from different speakers). + +## Latent Diffusion Model +To train the latent diffusion model, use the following command: + +```bash +python train.py hparams/train_latent.yaml --data_folder=your/data/folder +``` + +The quality of the generated digit is lower with latent diffusion. The generated signals should, however, sounds like a digit. + + +## Samples, checkpoints and Training logs +The training scripts will produce results that can be found in the `/samples` directory after each training epoch. + +The output folder containing the generated samples, model checkpoints and training logs for all the aforementioned experiments can be found here: +https://www.dropbox.com/sh/szpmkp8aok1nquf/AABziohiZ8UhYBJz5TXscu93a?dl=0 + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` + + diff --git a/recipes/AudioMNIST/diffusion/audiomnist_prepare.py b/recipes/AudioMNIST/diffusion/audiomnist_prepare.py new file mode 120000 index 0000000000..038d4861e4 --- /dev/null +++ b/recipes/AudioMNIST/diffusion/audiomnist_prepare.py @@ -0,0 +1 @@ +../audiomnist_prepare.py \ No newline at end of file diff --git a/recipes/AudioMNIST/diffusion/hparams/train.yaml b/recipes/AudioMNIST/diffusion/hparams/train.yaml new file mode 100644 index 0000000000..9d6999c7a2 --- /dev/null +++ b/recipes/AudioMNIST/diffusion/hparams/train.yaml @@ -0,0 +1,256 @@ +# ################################# +# Basic training parameters for a spectrogram-based +# diffusion model +# +# Author: +# * Artem Ploujnikov 2022 +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +data_folder: !PLACEHOLDER +metadata_folder: null +output_folder: !ref ./results/diffusion/baseline/ +save_folder: !ref /save +data_save_folder: !ref /audiomnist_prepared +sample_folder: !ref /samples +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json +train_log: !ref /train_log.txt +skip_prep: False + +# The train logger writes training statistics to a file, as well as stdout. +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +ckpt_interval_minutes: 30 # save checkpoint every N min + +# Preparation Parameters +data_prepare_norm: False +data_prepare_trim: False +data_prepare_trim_threshold: -30. +data_prepare_sample_rate_src: 48000 +data_prepare_sample_rate_tgt: 16000 + +# Training Parameters +diffusion_mode: simple +train_len: 28520 +sort: len +batch_shuffle: True +number_of_epochs: 20 +batch_size: 16 # If GPU memory exceeds 32 GB, consider using batch_size: 32 +lr: 0.00020 +max_grad_norm: 0.05 +lr_warmup_steps: 500 +lr_cooldown_steps: 500 +lr_total_steps: !ref ( * ) // +lr_decay_every: 1000 +train_timesteps: 250 +adam_beta1: 0.95 +adam_beta2: 0.999 +adam_weight_decay: 0.000001 +adam_epsilon: 0.00000001 +downsample_factor: 8 +enable_train_metrics: True +enable_reference_samples: True +loss_l2_steps: 100000 +train_log_interval: 10 +train_diffusion_start_epoch: 1 +dropout: 0.0 +overfit_test: False +overfit_test_sample_count: 1 +overfit_test_epoch_data_count: 1000 +train_data_count: null +dataloader_options: + batch_size: !ref +use_tensorboard: True +tensorboard_logs: !ref /logs/ +rand_amplitude: True +min_amp: 0.1 +max_amp: 0.4 + +# Spectrogram Parameters +spec_n_fft: 1024 +spec_f_min: 0 +spec_f_max: 8000 +spec_n_mels: 80 +spec_power: 1 +spec_ref: 10.0 +spec_hop_length: 256 +spec_win_length: 1024 +spec_norm: "slaney" +spec_mel_scale: "slaney" +spec_norm_mean: 0. +spec_norm_std: 0.5 +spec_sample_size: 80 +spec_sample_min: -4.7 +spec_sample_max: 3.0 +min_level_db: -80.0 +pad_level_db: -50. + +# Model Parameters +model_channels: 128 +model_num_res_blocks: 4 +diffusion_channels: 1 + +# Conditioning +emb_dim: !ref * 4 +digit_conditioned: False +digit_sample_count: 3 +digit_count: 10 +digit_emb_dim: !ref +speaker_conditioned: False +speaker_count: 60 +speaker_emb_dim: !ref +speaker_sample_count: 5 + + +# Vocoder Settings +vocoder_model: speechbrain/tts-hifigan-libritts-16kHz + +# Evaluation Parameters +eval_num_samples: 10 +samples_interval: 5 +eval_generate_audio: True +eval_show_progress: True +norm_out_sample: False +eval_time_steps: 40 + +# Feature extraction +compute_features: !new:speechbrain.nnet.containers.Sequential + spec: !new:torchaudio.transforms.MelSpectrogram + n_fft: !ref + f_min: !ref + f_max: !ref + n_mels: !ref + power: !ref + hop_length: !ref + win_length: !ref + norm: !ref + mel_scale: !ref + amp2db: !new:torchaudio.transforms.AmplitudeToDB + +min_level_norm: !new:speechbrain.processing.features.MinLevelNorm + min_level_db: !ref + +global_norm: !new:speechbrain.processing.features.GlobalNorm + norm_mean: !ref + norm_std: !ref + +dynamic_range_compression: !new:speechbrain.processing.features.DynamicRangeCompression + +compute_cost: !new:speechbrain.nnet.schedulers.ScheduledLoss + schedule: + - loss_fn: !name:speechbrain.nnet.losses.mse_loss + steps: !ref + - loss_fn: !name:speechbrain.nnet.losses.l1_loss + +use_cond_emb: + speaker: !ref + digit: !ref + +cond_emb: + speaker: + emb: !ref + emb_dim: !ref + key: speaker_label + sample_count: !ref + count: !ref + digit: + emb: !ref + emb_dim: !ref + key: digit_label + sample_count: !ref + count: !ref + +# To design a custom model, either just edit the simple CustomModel +# class that's listed here, or replace this `!new` call with a line +# pointing to a different file you've defined. +unet: !new:speechbrain.nnet.unet.UNetModel + in_channels: 1 + model_channels: !ref + out_channels: 1 + num_res_blocks: !ref + norm_num_groups: 32 + attention_resolutions: [8] + cond_emb: !ref + use_cond_emb: !ref + dropout: !ref + +noise: !new:speechbrain.nnet.diffusion.LengthMaskedGaussianNoise + length_dim: 2 + +emb_digit: !new:speechbrain.nnet.embedding.Embedding + num_embeddings: !ref + embedding_dim: !ref + +emb_speaker: !new:speechbrain.nnet.embedding.Embedding + num_embeddings: !ref + embedding_dim: !ref + +diffusion: !new:speechbrain.nnet.diffusion.DenoisingDiffusion + model: !ref + timesteps: !ref + noise: !ref + show_progress: !ref + sample_min: !ref + sample_max: !ref + +diffusion_sample_channels: !ref + +# The first object passed to the Brain class is this "Epoch Counter" +# which is saved by the Checkpointer so that training can be resumed +# if it gets interrupted at any point. +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +vocoder: !name:speechbrain.inference.vocoders.HIFIGAN.from_hparams + source: !ref + + +# Objects in "modules" dict will have their parameters moved to the correct +# device, as well as having train()/eval() called on them by the Brain class. +modules: + unet: !ref + diffusion: !ref + diffusion_sample: !ref + compute_features: !ref + dynamic_range_compression: !ref + min_level_norm: !ref + global_norm: !ref + emb_digit: !ref + emb_speaker: !ref + +# This optimizer will be constructed by the Brain class after all parameters +# are moved to the correct device. Then it will be added to the checkpointer. +opt_class: !name:torch.optim.Adam + lr: !ref + betas: !ref (, ) + weight_decay: !ref + eps: !ref + +# This function manages learning rate annealing over the epochs. +# We here use the simple lr annealing method that linearly decreases +# the lr from the initial value to the final one. +lr_annealing: !new:speechbrain.nnet.schedulers.WarmCoolDecayLRSchedule + lr: !ref + warmup: !ref + cooldown: !ref + total_steps: !ref + decay_every: !ref + +# This object is used for saving the state of training both so that it +# can be resumed if it gets interrupted, and also so that the best checkpoint +# can be later loaded for evaluation or inference. +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + unet: !ref + counter: !ref + lr_annealing: !ref + global_norm: !ref + emb_digit: !ref + emb_speaker: !ref diff --git a/recipes/AudioMNIST/diffusion/hparams/train_latent.yaml b/recipes/AudioMNIST/diffusion/hparams/train_latent.yaml new file mode 100644 index 0000000000..3c5b09d93f --- /dev/null +++ b/recipes/AudioMNIST/diffusion/hparams/train_latent.yaml @@ -0,0 +1,333 @@ +# ################################# +# Basic training parameters for a spectrogram-based +# diffusion model +# +# Author: +# * Artem Ploujnikov 2022 +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +data_folder: !PLACEHOLDER +metadata_folder: null +output_folder: !ref ./results/diffusion-latent-ae/ +save_folder: !ref /save +data_save_folder: !ref /audiomnist_prepared +sample_folder: !ref /samples +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json +train_log: !ref /train_log.txt +skip_prep: False + +# The train logger writes training statistics to a file, as well as stdout. +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Preparation Parameters +data_prepare_norm: True +data_prepare_trim: True +data_prepare_trim_threshold: -30. +data_prepare_sample_rate_src: 48000 +data_prepare_sample_rate_tgt: 16000 + +# Training Parameters +diffusion_mode: latent +train_len: 28520 +sort: null +batch_shuffle: True +number_of_epochs: 20 +batch_size: 16 # If GPU memory exceeds 32 GB, consider using batch_size: 32 +lr: 0.0005 +lr_warmup_steps: 500 +lr_cooldown_steps: 500 +lr_total_steps: !ref * +lr_decay_every: 1000 +lr_autoencoder: 0.001 +lr_autoencoder_warmup_steps: 500 +lr_autoencoder_cooldown_steps: 500 +lr_autoencoder_total_steps: !ref * +lr_autoencoder_decay_every: 1000 +max_grad_norm: 0.05 +train_timesteps: 250 +adam_beta1: 0.95 +adam_beta2: 0.999 +adam_weight_decay: 0.000001 +adam_epsilon: 0.00000001 +downsample_factor: 8 +latent_downsample_factor: 4 +enable_train_metrics: True +enable_reference_samples: True +enable_reconstruction_sample: True +loss_l2_steps: 100000 +loss_laplacian_weight: 0.1 +autoencoder_rec_loss_l2_steps: 1000 +train_log_interval: 50 +train_diffusion_start_epoch: 2 +train_autoencoder_stop_epoch: 10 +latent_mask_recompute_steps: 20 +latent_mask_offset: 3 +overfit_test: False +overfit_test_sample_count: 1 +overfit_test_epoch_data_count: 1000 +train_data_count: null +dataloader_options: + batch_size: !ref +use_tensorboard: True +tensorboard_logs: !ref /logs/ +rand_amplitude: True +min_amp: 0.1 +max_amp: 0.4 + +# Spectrogram Parameters +spec_n_fft: 1024 +spec_f_min: 0 +spec_f_max: 8000 +spec_n_mels: 80 +spec_power: 1 +spec_ref: 10.0 +spec_hop_length: 256 +spec_win_length: 1024 +spec_norm: "slaney" +spec_mel_scale: "slaney" +spec_norm_mean: 0. +spec_norm_std: 0.5 +spec_sample_size: 20 +spec_min_sample_size: 10 +spec_sample_min: -4.7 +spec_sample_max: 3.0 +min_level_db: -80.0 +pad_level_db: -50. +done_random_start_offset: 0. +done_random_end_offset: .5 + + +# Model Parameters +model_channels: 64 +model_norm_num_groups: 32 +model_num_res_blocks: 2 +model_dropout: 0. +autoencoder_channels: 32 +autoencoder_norm_num_groups: 32 +autoencoder_num_res_blocks: 1 +autoencoder_encoder_out_channels: 32 +autoencoder_latent_channels: 2 +autoencoder_dropout: 0.1 +latent_mask_value: -3. +autoencoder_use_fixup_norm: False +diffusion_channels: !ref +done_cnn_blocks: 2 +done_cnn_kernelsize: 3 +done_cnn_channels: [32, 32] +done_rnn_layers: 2 +done_rnn_neurons: 32 +done_dnn_blocks: 1 +done_dnn_neurons: 32 + +# Vocoder Settings +vocoder_model: speechbrain/tts-hifigan-libritts-16kHz + +# Evaluation Parameters +eval_num_samples: 10 +samples_interval: 5 +eval_generate_audio: True +eval_show_progress: True +norm_out_sample: True +eval_time_steps: 6 + +# Feature extraction +compute_features: !new:speechbrain.nnet.containers.Sequential + spec: !new:torchaudio.transforms.MelSpectrogram + n_fft: !ref + f_min: !ref + f_max: !ref + n_mels: !ref + power: !ref + hop_length: !ref + win_length: !ref + norm: !ref + mel_scale: !ref + amp2db: !new:torchaudio.transforms.AmplitudeToDB + +min_level_norm: !new:speechbrain.processing.features.MinLevelNorm + min_level_db: !ref + +global_norm: !new:speechbrain.processing.features.GlobalNorm + norm_mean: !ref + norm_std: !ref + +dynamic_range_compression: !new:speechbrain.processing.features.DynamicRangeCompression + +compute_cost_autoencoder_rec: !new:speechbrain.nnet.schedulers.ScheduledLoss + schedule: + - loss_fn: !name:speechbrain.nnet.losses.mse_loss + steps: !ref + - loss_fn: !name:speechbrain.nnet.losses.l1_loss + +compute_cost_autoencoder: !new:speechbrain.nnet.losses.AutoencoderLoss + rec_loss: !ref + +compute_cost_laplacian: !new:speechbrain.nnet.losses.LaplacianVarianceLoss + len_dim: 2 + +compute_cost_done: !name:speechbrain.nnet.losses.distance_diff_loss + +compute_cost: !new:speechbrain.nnet.schedulers.ScheduledLoss + schedule: + - loss_fn: !name:speechbrain.nnet.losses.mse_loss + steps: !ref + - loss_fn: !name:speechbrain.nnet.losses.l1_loss + + +# To design a custom model, either just edit the simple CustomModel +# class that's listed here, or replace this `!new` call with a line +# pointing to a different file you've defined. +unet: !new:speechbrain.nnet.unet.UNetModel + in_channels: !ref + model_channels: !ref + norm_num_groups: !ref + out_channels: !ref + num_res_blocks: !ref + attention_resolutions: [1, 2] + channel_mult: [1, 2] + dropout: !ref + +autoencoder: !new:speechbrain.nnet.unet.UNetNormalizingAutoencoder + in_channels: 1 + channel_mult: [1, 2, 4] + model_channels: !ref + norm_num_groups: !ref + encoder_num_res_blocks: !ref + encoder_attention_resolutions: [1, 2, 4] + decoder_num_res_blocks: !ref + decoder_attention_resolutions: [1, 2, 4] + encoder_out_channels: !ref + latent_channels: !ref + resblock_updown: True + len_dim: 2 + latent_mask_value: !ref + dropout: !ref + use_fixup_norm: !ref + +noise: !new:speechbrain.nnet.diffusion.LengthMaskedGaussianNoise + length_dim: 2 + +diffusion: !new:speechbrain.nnet.diffusion.DenoisingDiffusion + model: !ref + timesteps: !ref + noise: !ref + show_progress: !ref + sample_min: !ref + sample_max: !ref + +diffusion_latent: !new:speechbrain.nnet.diffusion.LatentDiffusion + autoencoder: !ref + diffusion: !ref + latent_downsample_factor: !ref + latent_pad_dim: [2, 3] + + +diffusion_sample_channels: !ref + + +done_detector: !new:speechbrain.nnet.utils.DoneDetector + model: !new:speechbrain.nnet.containers.Sequential + input_shape: [null, null, !ref ] + crdnn: !new:speechbrain.lobes.models.CRDNN.CRDNN + input_size: !ref + cnn_blocks: !ref + cnn_kernelsize: !ref + cnn_channels: !ref + rnn_layers: !ref + rnn_neurons: !ref + dnn_blocks: !ref + dnn_neurons: !ref + out: !new:torch.nn.Linear + in_features: !ref + out_features: 1 + act: !new:torch.nn.Sigmoid + out: !new:speechbrain.nnet.activations.Softmax + apply_log: False + dim: 1 + reshape: False + +# The first object passed to the Brain class is this "Epoch Counter" +# which is saved by the Checkpointer so that training can be resumed +# if it gets interrupted at any point. +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +vocoder: !name:speechbrain.inference.vocoders.HIFIGAN.from_hparams + source: !ref + + +# Objects in "modules" dict will have their parameters moved to the correct +# device, as well as having train()/eval() called on them by the Brain class. +modules: + unet: !ref + autoencoder: !ref + diffusion: !ref + diffusion_latent: !ref + diffusion_sample: !ref + compute_features: !ref + compute_cost_laplacian: !ref + dynamic_range_compression: !ref + min_level_norm: !ref + global_norm: !ref + done_detector: !ref + +# This optimizer will be constructed by the Brain class after all parameters +# are moved to the correct device. Then it will be added to the checkpointer. +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: !ref (, ) + weight_decay: !ref + eps: !ref + +opt_class_autoencoder: !name:torch.optim.AdamW + lr: !ref + betas: !ref (, ) + weight_decay: !ref + eps: !ref + +opt_class_done: !name:torch.optim.AdamW + lr: !ref + betas: !ref (, ) + weight_decay: !ref + eps: !ref + +# This function manages learning rate annealing over the epochs. +# We here use the simple lr annealing method that linearly decreases +# the lr from the initial value to the final one. +lr_annealing: !new:speechbrain.nnet.schedulers.WarmCoolDecayLRSchedule + lr: !ref + warmup: !ref + cooldown: !ref + total_steps: !ref + decay_every: !ref + +lr_annealing_autoencoder: !new:speechbrain.nnet.schedulers.WarmCoolDecayLRSchedule + lr: !ref + warmup: !ref + cooldown: !ref + total_steps: !ref + decay_every: !ref + + +# This object is used for saving the state of training both so that it +# can be resumed if it gets interrupted, and also so that the best checkpoint +# can be later loaded for evaluation or inference. +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + unet: !ref + autoencoder: !ref + counter: !ref + lr_annealing: !ref + lr_annealing_autoencoder: !ref + global_norm: !ref + done_detector: !ref diff --git a/recipes/AudioMNIST/diffusion/train.py b/recipes/AudioMNIST/diffusion/train.py new file mode 100644 index 0000000000..fd1c774bc1 --- /dev/null +++ b/recipes/AudioMNIST/diffusion/train.py @@ -0,0 +1,1662 @@ +#!/usr/bin/env python3 +"""Recipe for training a diffusion model on spectrogram data + +To run this recipe, do the following: +> python train.py hparams/train.yaml + +To read the code, first scroll to the bottom to see the "main" code. +This gives a high-level overview of what is going on, while the +Brain class definition provides the details of what happens +for each batch during training. + +Authors + * Artem Ploujnikov 2022 +""" + +import os +import sys +from collections import namedtuple +from enum import Enum + +import torch +from audiomnist_prepare import prepare_audiomnist +from hyperpyyaml import load_hyperpyyaml +from torchaudio import functional as AF + +import speechbrain as sb +from speechbrain.dataio.dataio import length_to_mask, write_audio +from speechbrain.dataio.dataset import apply_overfit_test +from speechbrain.utils import data_utils +from speechbrain.utils.data_utils import ( + dict_value_combinations, + dist_stats, + masked_max, + masked_mean, + masked_min, + masked_std, + match_shape, +) +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger +from speechbrain.utils.train_logger import plot_spectrogram + +logger = get_logger(__name__) + + +class DiffusionMode(Enum): + SIMPLE = "simple" + LATENT = "latent" + + +DiffusionPredictions = namedtuple( + "DiffusionPredictions", + [ + "pred", + "noise", + "noisy_sample", + "feats", + "lens", + "autoencoder_output", + "feats_done", + "lens_done", + "pred_done", + ], +) + + +# Brain class for speech enhancement training +class DiffusionBrain(sb.Brain): + """Class that manages the training loop. See speechbrain.core.Brain. + + Arguments + --------- + modules : dict of str:torch.nn.Module pairs + These modules are passed to the optimizer by default if they have + trainable parameters, and will have ``train()``/``eval()`` called on them. + opt_class : torch.optim class + A torch optimizer constructor that takes only the list of + parameters (e.g. a lambda or partial function definition). By default, + this will be passed all modules in ``modules`` at the + beginning of the ``fit()`` method. This behavior can be changed + by overriding the ``configure_optimizers()`` method. + hparams : dict + Each key:value pair should consist of a string key and a hyperparameter + that is used within the overridden methods. These will + be accessible via an ``hparams`` attribute, using "dot" notation: + e.g., self.hparams.model(x). + run_opts : dict + A set of options to change the runtime environment. + checkpointer : Checkpointer + """ + + def __init__( + self, + modules=None, + opt_class=None, + hparams=None, + run_opts=None, + checkpointer=None, + ): + super().__init__(modules, opt_class, hparams, run_opts, checkpointer) + self.diffusion_mode = DiffusionMode(self.hparams.diffusion_mode) + self.use_done_detector = "done_detector" in self.modules + + def init_optimizers(self): + """Initializes the diffusion model optimizer - and the + autoencoder optimizer, if applicable""" + self.optimizers_dict = {} + if self.opt_class is not None: + self.optimizer = self.opt_class(self.modules.unet.parameters()) + if self.checkpointer is not None: + self.checkpointer.add_recoverable("optimizer", self.optimizer) + self.optimizers_dict["opt_class"] = self.optimizer + + if self.use_done_detector: + self.optimizer_done = self.hparams.opt_class_done( + self.modules.done_detector.parameters() + ) + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "optimizer_done", self.optimizer + ) + self.optimizers_dict["opt_class_done"] = self.optimizer_done + + if self.diffusion_mode == DiffusionMode.LATENT: + self.autoencoder_optimizer = self.hparams.opt_class_autoencoder( + self.modules.autoencoder.parameters() + ) + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "autoencoder_optimizer", self.autoencoder_optimizer + ) + self.optimizers_dict["opt_class_autoencoder"] = ( + self.autoencoder_optimizer + ) + + def compute_forward(self, batch, stage): + """Runs all the computation of that transforms the input into the + output probabilities over the N classes. + + Arguments + --------- + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + + Returns + ------- + predictions : torch.Tensor + torch.Tensor that contains the posterior probabilities over the N classes. + """ + + # We first move the batch to the appropriate device. + batch = batch.to(self.device) + + # Compute features, embeddings, and predictions + feats, lens = self.prepare_features(batch, stage) + + autoencoder_out = None + cond_emb = None + if self.is_conditioned: + cond_labels = self.get_cond_labels(batch) + cond_emb = self.compute_cond_emb(cond_labels) + if self.diffusion_mode == DiffusionMode.LATENT: + mask_value = self.modules.global_norm.normalize( + self.mask_value_norm + ) + latent_mask_value = self.get_latent_mask_value(mask_value) + ( + train_sample_diffusion, + autoencoder_out, + ) = self.modules.diffusion_latent.train_sample_latent( + feats, + length=lens, + out_mask_value=mask_value, + cond_emb=cond_emb, + latent_mask_value=latent_mask_value, + ) + pred, noise, noisy_sample = train_sample_diffusion + else: + pred, noise, noisy_sample = self.modules.diffusion.train_sample( + feats, length=lens, cond_emb=cond_emb + ) + + pred_done, feats_done, lens_done = None, None, None + if self.use_done_detector: + feats_done, lens_done = self.prepare_features_done( + batch, feats, lens + ) + pred_done = self.modules.done_detector( + feats_done.squeeze(1), lens_done + ) + + # NOTE: lens can change because of the additional padding needed to account + # NOTE: for downsampling + return DiffusionPredictions( + pred, + noise, + noisy_sample, + feats, + lens, + autoencoder_out, + feats_done, + lens_done, + pred_done, + ) + + def compute_latent_mask_value(self, mask_value): + """Computes the value with which to mask the latent + space. The core idea is that masked space should + not produce any sound + + Arguments + --------- + mask_value: float + the value to be used for the mask in the original space + + Returns + ------- + latent_mask_value: float + the value that will be used in the latent space + """ + with torch.no_grad(): + fake_feats = ( + torch.ones( + 1, + 1, + self.hparams.spec_min_sample_size, + self.hparams.spec_n_mels, + ).to(self.device) + * mask_value + ) + length = torch.tensor([1.0]).to(self.device) + latent = self.modules.autoencoder.encode(fake_feats, length=length) + latent_mask_value = ( + latent[:, :, : self.hparams.latent_mask_offset, :].mean().item() + ) + return latent_mask_value + + def get_latent_mask_value(self, mask_value): + """Returns the latent mask value, recomputing it if necessary + + Arguments + --------- + mask_value: float + the value to be used for the mask in the original space + + Returns + ------- + latent_mask_value: float + the value that will be used in the latent space + """ + if ( + not self.latent_mask_value + or self.step < self.hparams.latent_mask_recompute_steps + ): + self.latent_mask_value = self.compute_latent_mask_value(mask_value) + return self.latent_mask_value + + def compute_cond_emb(self, labels): + """Computes conditioning embeddings for a set + of labels + + Arguments + --------- + labels: dict + A key -> label dictionary + + Returns + ------- + emb: dict + A key -> embedding dictionary + """ + cond_emb = {} + for key, emb_config in self.get_active_cond_emb().items(): + emb_module = emb_config["emb"] + emb = emb_module(labels[key]) + cond_emb[key] = emb + return cond_emb + + def get_cond_labels(self, batch): + """Returns the conditioning labels for the batch provided + based on information from the hparams file on which + conditioning labels are enabled + + Arguments + --------- + batch: PaddedBatch + a batch + + Returns + ------- + result: dict + the result + """ + return { + key: getattr(batch, emb_config["key"]) + for key, emb_config in self.hparams.cond_emb.items() + if self.hparams.use_cond_emb[key] + } + + def fit_batch(self, batch): + """Train the parameters given a single batch in input""" + if self.reference_batch is None: + self.reference_batch = batch + + should_step = self.step % self.grad_accumulation_factor == 0 + outputs = self.compute_forward(batch, sb.Stage.TRAIN) + loss, loss_autoencoder, loss_done = self.compute_objectives( + outputs, batch, sb.Stage.TRAIN + ) + if self.train_diffusion: + with self.no_sync(not should_step): + (loss / self.grad_accumulation_factor).backward( + retain_graph=True + ) + # Done loss - iff applicable + if self.use_done_detector: + with self.no_sync(not should_step): + (loss_done / self.grad_accumulation_factor).backward( + retain_graph=True + ) + + if should_step: + if self.train_diffusion: + self.optimizer.step() + self.optimizer.zero_grad() + + if self.use_done_detector: + self.optimizer_done.step() + self.optimizer_done.zero_grad() + + # Latent diffusion: Step through the autoencoder + if ( + self.diffusion_mode == DiffusionMode.LATENT + and loss_autoencoder is not None + ): + with self.no_sync(not should_step): + (loss_autoencoder / self.grad_accumulation_factor).backward() + if should_step: + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.autoencoder_optimizer.step() + self.autoencoder_optimizer.zero_grad() + + self.optimizer_step += 1 + self.hparams.lr_annealing(self.optimizer, self.optimizer_step) + if self.diffusion_mode == DiffusionMode.LATENT: + self.hparams.lr_annealing_autoencoder( + self.autoencoder_optimizer, self.optimizer_step + ) + if ( + self.hparams.enable_train_metrics + and self.hparams.use_tensorboard + and ( + self.step == 1 + or self.step % self.hparams.train_log_interval == 0 + ) + ): + self.log_batch(outputs) + return loss + + def evaluate_batch(self, batch, stage): + """Evaluate one batch, override for different procedure than train. + + The default implementation depends on two methods being defined + with a particular behavior: + + * ``compute_forward()`` + * ``compute_objectives()`` + + Arguments + --------- + batch : list of torch.Tensors + Batch of data to use for evaluation. Default implementation assumes + this batch has two elements: inputs and targets. + stage : Stage + The stage of the experiment: Stage.VALID, Stage.TEST + + Returns + ------- + detached loss + """ + + out = self.compute_forward(batch, stage=stage) + loss, _, _ = self.compute_objectives(out, batch, stage=stage) + return loss.detach().cpu() + + def log_batch(self, predictions): + """Saves information from a single batch to the log + + Arguments + --------- + predictions: DiffusionPredictions + the predictions from compute_forward + """ + loss_stats = self.loss_metric.summarize() + stats = { + "loss": loss_stats["average"], + "lr": self.optimizer.param_groups[0]["lr"], + } + stats.update( + self.extract_dist_stats(self.data_dist_stats_metric, prefix="data") + ) + if self.use_done_detector: + stats["done_loss"] = self.done_loss_metric.summarize( + field="average" + ) + + if ( + self.diffusion_mode == DiffusionMode.LATENT + and self.train_autoencoder + ): + stats.update( + self.autoencoder_loss_metric.summarize(field="average") + ) + stats["laplacian_loss"] = ( + self.autoencoder_laplacian_loss_stats_metric.summarize( + field="average" + ) + ) + stats["weighted_laplacian_loss"] = ( + self.hparams.loss_laplacian_weight * stats["laplacian_loss"] + ) + stats["lr_autoencoder"] = self.autoencoder_optimizer.param_groups[ + 0 + ]["lr"] + stats.update( + self.extract_dist_stats( + self.autoencoder_rec_dist_stats_metric, + prefix="autoencoder_rec", + ) + ) + stats.update( + self.extract_dist_stats( + self.autoencoder_latent_dist_stats_metric, + prefix="autoencoder_latent", + ) + ) + + self.hparams.tensorboard_train_logger.log_stats( + stats_meta={"step": self.step}, train_stats=stats + ) + if ( + self.diffusion_mode == DiffusionMode.LATENT + and self.hparams.enable_reconstruction_sample + ): + self.hparams.tensorboard_train_logger.log_figure( + "train_ref_spectrogram", predictions.feats[0] + ) + self.hparams.tensorboard_train_logger.log_figure( + "train_rec_spectrogram", predictions.autoencoder_output.rec[0] + ) + latent = predictions.autoencoder_output.latent[0] + latent = latent.view( + latent.size(0) * latent.size(1), latent.size(2) + ) + self.hparams.tensorboard_train_logger.log_figure( + "train_rec_latent", latent + ) + + def extract_dist_stats(self, dist_stats_metric, prefix): + """Extracts stats from a MultiMetricStats instance with a dist_stats metric + into a flattened dictionary, converting the keys to _ for the average, + __(min|max) for the minimum and the maximum + + Arguments + --------- + dist_stats_metric: speechbrain.utils.metric_stats.MultiMetricStats + the metric for which statistics will be extracted + prefix: str + The string prefix. + + Returns + ------- + Extracted stats + """ + dist_stats = dist_stats_metric.summarize() + return { + self.get_stat_key(prefix, stat, metric_key): value + for stat, stat_details in dist_stats.items() + for metric_key, value in stat_details.items() + if metric_key in {"average", "min_score", "max_score"} + } + + def get_stat_key(self, prefix, stat, metric_key): + """Returns the statistics key for the specified metric and statistics + + Arguments + --------- + prefix: str + the prefix to be used + stat: str + the name of the statistic + metric_key: str + the metric key + + Returns + ------- + key: str + the key to be used + """ + suffix = "" + if metric_key != "average": + suffix = "_" + metric_key.replace("_score", "") + return f"{prefix}_{stat}{suffix}" + + def prepare_features(self, batch, stage): + """Prepare the features for computation, including augmentation. + + Arguments + --------- + batch: PaddedData + An input batch + stage : sb.Stage + The current stage of training. + + Returns + ------- + feats: torch.Tensor + features (normalized spectrograms) + + lens: torch.Tensor + item lengths + + done: torch.Tensor + a tensor indicating whether the sequence/spectrogram + is finished + + """ + wavs, lens = batch.sig + + feats, feats_raw, lens = self.sig_to_feats(wavs, lens) + + # Compute metrics + if self.hparams.enable_train_metrics: + max_len = feats.size(2) + mask = length_to_mask(lens * max_len, max_len)[ + :, None, :, None + ].bool() + self.data_dist_stats_metric.append( + batch.file_name, feats_raw, mask=mask + ) + + return feats, lens + + def sig_to_feats(self, wavs, lens): + """Performs feature extraction on the raw signal: MEL spectrogram + + normalization + padding to fit UNets + + Arguments + --------- + wavs: torch.Tensor + raw waveforms + lens: torch.Tensor + feature lengths + + Returns + ------- + feats: torch.Tensor + Global normed features + feats_raw: torch.Tensor + Unnormalized features + lens: torch.Tensor + Corresponding lengths of features + """ + # Compute features + feats = self.modules.compute_features(wavs) + feats = feats.transpose(-1, -2) + feats = feats.unsqueeze(1) + + # UNet downsamples features in multiples of 2. Reshape to ensure + # there are no mismatched tensors due to ambiguity + feats, lens = data_utils.pad_divisible( + feats, lens, factor=self.hparams.downsample_factor, len_dim=2 + ) + + feats, _ = data_utils.pad_divisible( + feats, factor=self.hparams.downsample_factor, len_dim=3 + ) + + # Min Level Norm + feats_raw = self.modules.min_level_norm(feats) + + # Global Norm + + feats = self.modules.global_norm( + feats_raw, lens, mask_value=self.mask_value_norm + ) + return feats, feats_raw, lens + + def prepare_features_done(self, batch, feats, lens): + """Prepares features for the done detector (a concatenation of one sample + and a random sample) + + Arguments + --------- + batch: PaddedBatch + a single batch of data + feats: torch.Tensor + spectrogram features + lens: torch.Tensor + feature lengths + + Returns + ------- + feats_done: torch.Tensor + features for the done detector (a concatenation) + lens_done: torch.Tensor + relative lengths of these features + + """ + wavs_random, lens_random = batch.sig_random + feats_random, _, lens_random = self.sig_to_feats( + wavs_random, lens_random + ) + feats_done, lens_done = data_utils.concat_padded_features( + feats=[feats, feats_random], + lens=[lens, lens_random], + feats_slice_start=[0.0, self.hparams.done_random_start_offset], + feats_slice_end=[0.0, self.hparams.done_random_end_offset], + dim=2, + ) + return feats_done, lens_done + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs. + + Arguments + --------- + predictions : tensor + The output tensor from `compute_forward`. + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient. + """ + + ( + preds, + noise, + noisy_sample, + feats, + lens, + autoencoder_out, + feats_done, + lens_done, + pred_done, + ) = predictions + if self.train_diffusion: + # NOTE: Padding of the latent space can affect the lengths + lens_diffusion = ( + autoencoder_out.latent_length + if self.diffusion_mode == DiffusionMode.LATENT + else lens + ) + loss = self.hparams.compute_cost( + reshape_feats(preds), + reshape_feats(noise), + length=lens_diffusion, + ) + else: + loss = torch.tensor(0.0, device=self.device) + + # Append this batch of losses to the loss metric for easy + self.loss_metric.append( + batch.file_name, preds, noise, lens, reduction="batch" + ) + + if self.use_done_detector: + max_len = feats.size(2) + lens_target = (lens * max_len).int() - 1 + loss_done = self.hparams.compute_cost_done( + pred_done.squeeze(-1), lens_target, length=lens_done + ) + self.done_loss_metric.append( + batch.file_name, + pred_done.squeeze(-1), + lens_target, + length=lens_done, + reduction="batch", + ) + else: + loss_done = None + + loss_autoencoder = None + if ( + self.diffusion_mode == DiffusionMode.LATENT + and self.train_autoencoder + ): + loss_autoencoder = self.hparams.compute_cost_autoencoder( + autoencoder_out, feats, length=lens + ) + self.autoencoder_loss_metric.append( + batch.file_name, + autoencoder_out, + feats, + length=lens, + reduction="batch", + ) + loss_laplacian = self.modules.compute_cost_laplacian( + autoencoder_out.rec, length=lens + ) + + self.autoencoder_laplacian_loss_stats_metric.append( + batch.file_name, + autoencoder_out.rec, + length=lens, + reduction="batch", + ) + + loss_autoencoder += ( + self.hparams.loss_laplacian_weight * loss_laplacian + ) + + max_len = autoencoder_out.rec.size(2) + rec_mask = length_to_mask(lens * max_len, max_len).unsqueeze(1) + rec_mask = match_shape(rec_mask, autoencoder_out.rec) + rec_denorm = self.modules.global_norm.denormalize( + autoencoder_out.rec + ) + self.autoencoder_rec_dist_stats_metric.append( + batch.file_name, rec_denorm, mask=rec_mask + ) + max_len = autoencoder_out.latent.size(2) + latent_mask = length_to_mask(lens * max_len, max_len).unsqueeze(1) + latent_mask = match_shape(latent_mask, autoencoder_out.latent) + self.autoencoder_latent_dist_stats_metric.append( + batch.file_name, autoencoder_out.latent, mask=latent_mask + ) + + return loss, loss_autoencoder, loss_done + + def generate_samples(self): + """Generates spectrogram and (optionally) audio samples using the + denoising diffusion model + """ + labels, samples = self.generate_spectrograms() + + wav = None + if self.hparams.eval_generate_audio: + samples_denorm = self.denormalize(samples) + wav = self.generate_audio(samples_denorm) + + return labels, samples, samples_denorm, wav + + def cut_samples(self, samples, wav): + """Uses the done predictor to "chop" a batch of samples + according to when it believes generation to be finished + at a given state + + Arguments + --------- + samples: torch.Tensor + a tensor of samples + + wav: torch.Tensor + a tensor of generated audio (optional) + + Returns + ------- + done_pred: torch.Tensor + the raw output of the "done" predictor + samples_cut: list + a list of samples + + """ + done_in = samples.squeeze(1)[:, :, : self.hparams.spec_n_mels] + done_pred = self.modules.done_detector(done_in) + lens_pred = done_pred.squeeze().argmax(dim=-1) + # NOTE: A poorly trained "done detector" may not cross the threshold + # at all - in this case the sample will not be "cut" + lens_pred[lens_pred == 0] = samples.size(2) + samples_cut = [ + sample[:, :length, :] for sample, length in zip(samples, lens_pred) + ] + wav_lens_pred = (lens_pred / samples.size(2) * wav.size(-1)).int() + wav_cut = [ + sample[:length] for sample, length in zip(wav, wav_lens_pred) + ] + return samples_cut, wav_cut + + def generate_spectrograms(self): + """Generates sample spectrograms""" + if self.is_conditioned: + logger.info("Conditioned sampling") + sample = self.generate_spectrograms_conditioned() + else: + logger.info("Unconditioned sampling") + sample = self.generate_spectrograms_unconditioned() + return sample + + def generate_spectrograms_unconditioned(self): + """Generates spectrograms without conditioning""" + sample = self.modules.diffusion_sample.sample( + ( + self.hparams.eval_num_samples, + self.hparams.diffusion_channels, + self.hparams.eval_time_steps, + self.hparams.spec_sample_size, + ) + ) + labels = [str(idx) for idx in range(1, len(sample) + 1)] + sample = self.modules.global_norm.denormalize(sample) + return labels, sample + + def generate_spectrograms_conditioned(self): + """Generates spectrograms with label conditioning""" + sample_labels = self.sample_cond_labels() + samples = [ + (label, idx, sample) + for label in sample_labels + for idx, sample in enumerate( + self.generate_spectrograms_for_label(label) + ) + ] + labels = [ + self.get_sample_label(label, idx) for label, idx, _ in samples + ] + samples = [sample for _, _, sample in samples] + return labels, samples + + def sample_cond_labels(self): + """Generates a sample of conditioning labels + based on hparams + + Returns + ------- + result: list + a list of dictionaries with speaker/digit + combinations + """ + label_samples = {} + for key, cond_config in self.get_active_cond_emb().items(): + sample_count = cond_config["sample_count"] + if sample_count is None: + sample = torch.arange(cond_config["count"], device=self.device) + else: + sample = torch.randperm( + cond_config["count"], device=self.device + )[:sample_count] + label_samples[key] = sample + + samples = dict_value_combinations(label_samples) + return samples + + def get_active_cond_emb(self): + """Returns conditional embeddings that have been enabled + in hyperparameters + + Returns + ------- + cond_emb: dict + all enabled conditional embedding configurations + """ + return { + key: value + for key, value in self.hparams.cond_emb.items() + if self.hparams.use_cond_emb[key] + } + + def generate_spectrograms_for_label(self, label): + """Generates samples for a specific label + + Arguments + --------- + label: dict + a dictionary of labels with values to compute + the embeddings + + Returns + ------- + sample: torch.tensor + a batch of spectrograms + """ + label_msg = ", ".join( + f"{key} = {value.item()}" for key, value in label.items() + ) + logger.info("Generating samples for labels %s", label_msg) + cond_emb = self.compute_cond_emb(label) + sample = self.modules.diffusion_sample.sample( + ( + self.hparams.eval_num_samples, + self.hparams.diffusion_sample_channels, + self.hparams.eval_time_steps, + self.hparams.spec_sample_size, + ), + cond_emb=cond_emb, + ) + return sample + + def get_sample_label(self, label, idx): + """Gets a filename label for the specified sample + + Arguments + --------- + label: dict + a dictionary similar to the following: + {"digit": 4, "speaker": 10} + idx: int + the item index (will be appended) + + Returns + ------- + result: str + a formatted label. For the example above, it will + be "digit_4_speaker_10" + """ + label_str = "_".join(f"{key}_{value}" for key, value in label.items()) + return f"{label_str}_{idx}" + + def generate_rec_samples(self): + predictions = self.compute_forward(self.reference_batch, sb.Stage.VALID) + feats = predictions.autoencoder_output.rec + if self.hparams.eval_generate_audio: + wav = self.generate_audio(feats) + return feats, wav + + def save_spectrograms(self, samples, path, folder="spec", labels=None): + """Saves sample spectrograms to filesystem files + + Arguments + --------- + samples: torch.Tensor + a tensor of sample spectrograms + path: str + the path to samples for a given epoch + folder: str + the name of the folder where the spectrograms + will be saved + labels: list + a list of labels - for saving. If omitted, sequential + samples will be used + """ + spec_sample_path = os.path.join(path, folder) + if not os.path.exists(spec_sample_path): + os.makedirs(spec_sample_path) + if labels is None: + labels = range(len(samples)) + for label, sample in zip(labels, samples): + spec_file_name = os.path.join(spec_sample_path, f"spec_{label}.png") + self.save_spectrogram_sample(sample, spec_file_name, label=label) + + def save_raw(self, path=".", **kwargs): + """Saves generated audio samples and spectrograms in + raw form, for further analysis. + + This method accepts keywords arguments, and each argument + becomes a key in the dictionary to be saved. + + Arguments + --------- + path: str + the path + **kwargs: dict + The data to save + """ + file_name = os.path.join(path, "raw.pt") + data = { + key: value for key, value in kwargs.items() if value is not None + } + torch.save(data, file_name) + + def save_spectrogram_sample(self, sample, file_name, label=None): + """Saves a single spectrogram sample as an image + + Arguments + --------- + sample: torch.Tensor + a single generated spectrogram (2D tensor) + file_name: str + the destination file name + label: str + The sample label to add to the title. + """ + fig = plot_spectrogram(sample.transpose(-1, -2)) + if fig is not None: + ax = fig.axes[0] + if label: + ax.set_title(f"Spectrogram Sample {label}") + ax.set_xlabel("Time") + ax.set_ylabel("Features") + fig.savefig(file_name) + + def denormalize(self, samples): + """Undoes the normalization performed on spectrograms + + Arguments + --------- + samples: torch.Tensor + normalized samples + + Returns + ------- + result: torch.Tensor + denormalized samples""" + if not torch.is_tensor(samples): + samples = torch.stack(samples) + samples = samples[:, :, :, : self.hparams.spec_n_mels] + samples = self.modules.min_level_norm.denormalize(samples) + samples = AF.DB_to_amplitude( + samples, ref=self.hparams.spec_ref, power=1.0 + ) + samples = self.modules.dynamic_range_compression(samples) + return samples + + def generate_audio(self, samples): + """Generates audio from spectrogram samples using a vocoder + + Arguments + --------- + samples: torch.Tensor + a batch of generated spectrograms + + Returns + ------- + audio: torch.Tensor + generated audio for the samples (vocoder output) + """ + vocoder_in = samples + vocoder_in = vocoder_in.transpose(-1, -2) + vocoder_in = vocoder_in.squeeze(1) + return self.vocoder(vocoder_in) + + def save_audio(self, wav, path, folder="wav", labels=None): + """Saves a batch of audio samples + + wav: torch.Tensor + a batch of audio samples + + path: str + the destination directory + + folder: str + the subfolder within the destination directory + + labels: list + a list of labels, for each sample. If omitted, + sequential labels will be generated + """ + wav_sample_path = os.path.join(path, folder) + if not os.path.exists(wav_sample_path): + os.makedirs(wav_sample_path) + + if labels is None: + labels = range(len(wav)) + + for label, sample in zip(labels, wav): + wav_file_name = os.path.join(wav_sample_path, f"sample_{label}.wav") + if self.hparams.norm_out_sample: + max_samp, _ = sample.abs().max(1) + sample = sample / max_samp + self.save_audio_sample(sample.squeeze(0), wav_file_name) + + def compute_sample_metrics(self, samples): + """Computes metrics (mean/std) on samples + + Arguments + --------- + samples: torch.Tensor + a tensor of samples + """ + sample_ids = torch.arange(1, len(samples) + 1) + self.sample_mean_metric.append(sample_ids, samples) + self.sample_std_metric.append(sample_ids, samples) + + def save_audio_sample(self, sample, file_name): + """Saves a single audio sample + + Arguments + --------- + sample: torch.Tensor + an audio sample + file_name: str + the file name to save + """ + write_audio( + file_name, sample, self.hparams.data_prepare_sample_rate_tgt + ) + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch. + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + + self.train_diffusion = (epoch is None) or ( + epoch >= self.hparams.train_diffusion_start_epoch + ) + self.train_autoencoder = ( + (epoch is not None) + and (self.diffusion_mode == DiffusionMode.LATENT) + and (epoch <= self.hparams.train_autoencoder_stop_epoch) + ) + + # Set up statistics trackers for this stage + self.loss_metric = sb.utils.metric_stats.MetricStats( + metric=self.hparams.compute_cost + ) + if self.use_done_detector: + self.done_loss_metric = sb.utils.metric_stats.MetricStats( + metric=self.hparams.compute_cost_done + ) + + self.mask_value_norm = self.modules.min_level_norm( + torch.tensor(self.hparams.pad_level_db, device=self.device) + ) + + if self.hparams.enable_train_metrics: + self.data_dist_stats_metric = ( + sb.utils.metric_stats.MultiMetricStats( + metric=dist_stats, batch_eval=True + ) + ) + + if self.diffusion_mode == DiffusionMode.LATENT: + self.autoencoder_loss_metric = ( + sb.utils.metric_stats.MultiMetricStats( + metric=self.hparams.compute_cost_autoencoder.details, + batch_eval=True, + ) + ) + self.autoencoder_rec_dist_stats_metric = ( + sb.utils.metric_stats.MultiMetricStats( + metric=dist_stats, batch_eval=True + ) + ) + self.autoencoder_latent_dist_stats_metric = ( + sb.utils.metric_stats.MultiMetricStats( + metric=dist_stats, batch_eval=True + ) + ) + self.autoencoder_laplacian_loss_stats_metric = ( + sb.utils.metric_stats.MetricStats( + metric=self.hparams.compute_cost_laplacian, batch_eval=True + ) + ) + + self.sample_mean_metric = sb.utils.metric_stats.MetricStats( + metric=masked_mean + ) + self.sample_std_metric = sb.utils.metric_stats.MetricStats( + metric=masked_std + ) + self.sample_min_metric = sb.utils.metric_stats.MetricStats( + metric=masked_min + ) + self.sample_max_metric = sb.utils.metric_stats.MetricStats( + metric=masked_max + ) + self.sample_metrics = [ + self.sample_mean_metric, + self.sample_std_metric, + self.sample_min_metric, + self.sample_max_metric, + ] + if stage == sb.Stage.TRAIN: + self.modules.global_norm.unfreeze() + else: + self.modules.global_norm.freeze() + + if ( + self.hparams.enable_reference_samples or stage != sb.Stage.TRAIN + ) and not hasattr(self, "vocoder"): + self.vocoder = self.hparams.vocoder() + if not hasattr(self, "reference_batch"): + self.reference_batch = None + self.reference_samples_needed = False + self.is_conditioned = hasattr(self.hparams, "use_cond_emb") and any( + self.hparams.use_cond_emb.values() + ) + self.latent_mask_value = None + self.use_done_detector = "done_detector" in self.modules + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of an epoch. + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + + # Store the train loss until the validation stage. + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + + # Summarize the statistics from the stage for record-keeping. + else: + stats = {"loss": stage_loss} + + # At the end of validation... + if stage == sb.Stage.VALID: + # The train_logger writes a summary to stdout and to the logfile. + lr = self.optimizer.param_groups[0]["lr"] + self.hparams.train_logger.log_stats( + {"Epoch": epoch, "lr": lr}, + train_stats={"loss": self.train_loss}, + valid_stats=stats, + ) + + # Save the current checkpoint and delete previous checkpoints, + self.checkpointer.save_and_keep_only(meta=stats, min_keys=["loss"]) + + # We also write statistics about test data to stdout and to the logfile. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stats, + ) + + if stage == sb.Stage.TRAIN and self.hparams.enable_reference_samples: + self.generate_reference_samples(self.reference_batch) + + if ( + stage != sb.Stage.TRAIN + and epoch is not None + and epoch % self.hparams.samples_interval == 0 + ): + labels, samples, samples_denorm, wav = self.generate_samples() + samples_rec, wav_rec = None, None + data = { + "labels": labels, + "samples": samples, + "samples_denorm": samples_denorm, + "wav": wav, + } + if self.diffusion_mode == DiffusionMode.LATENT: + samples_rec, wav_rec = self.generate_rec_samples() + data["samples_rec"] = samples_rec + data["wav_rec"] = wav_rec + if self.use_done_detector: + samples_cut, wav_cut = self.cut_samples(samples, wav) + data["samples_cut"] = samples_cut + data["wav_cut"] = wav_cut + self.log_epoch(data, epoch, stage) + + def generate_reference_samples(self, batch): + """Generate an audio sample from one of the spectrograms + using the same normalization techniques + + Arguments + --------- + batch: speechbrain.dataio.batch.PaddedBatch + a batch of audio + """ + feats, lens = self.prepare_features(batch, sb.Stage.VALID) + feats = self.modules.global_norm.denormalize(feats) + feats_denorm = self.denormalize(feats) + wav = self.generate_audio(feats_denorm) + self.log_samples( + spectrogram_samples=feats, + wav_samples=wav, + lens=lens, + key_prefix="reference_", + ) + ref_sample_path = os.path.join(self.hparams.sample_folder, "ref") + self.save_spectrograms(feats, ref_sample_path) + self.save_audio(wav, path=ref_sample_path) + + def log_epoch(self, data, epoch, stage): + """Saves end-of-epoch logs + + Arguments + --------- + data: dict + the data to be logged, with the following keys + samples: generated samples + wav: generated waveform + samples_rec: reconstruction samples (to assess autoencoder quality) + samples_wav: reconstruction audio (to assess autoencoder quality) + epoch: int + the epoch number + stage: speechbrain.Stage + the training stage + + """ + epoch_sample_path = os.path.join(self.hparams.sample_folder, str(epoch)) + samples, samples_denorm, wav, labels, samples_rec, wav_rec = ( + data.get(key) + for key in [ + "samples", + "samples_denorm", + "wav", + "labels", + "samples_rec", + "wav_rec", + ] + ) + if not torch.is_tensor(samples): + samples = torch.stack(samples) + samples_log = data.get("samples_cut", samples) + wav_log = data.get("wav_cut", wav) + self.save_spectrograms(samples_log, epoch_sample_path, labels=labels) + sample_ids = torch.arange(1, len(samples) + 1) + for metric in self.sample_metrics: + metric.append(sample_ids, samples) + if wav is not None: + self.save_audio(wav_log, epoch_sample_path, labels=labels) + if self.diffusion_mode == DiffusionMode.LATENT: + self.save_spectrograms( + samples_rec, epoch_sample_path, folder="spec_rec" + ) + if wav_rec is not None: + self.save_audio(wav_rec, epoch_sample_path, folder="wav_rec") + + self.save_raw( + spec=samples, + spec_denorm=samples_denorm, + wav=wav, + spec_rec=samples_rec, + wav_rec=wav_rec, + path=epoch_sample_path, + ) + if self.hparams.use_tensorboard: + sample_mean_stats = self.sample_mean_metric.summarize() + sample_std_stats = self.sample_std_metric.summarize() + sample_min_stats = self.sample_min_metric.summarize() + sample_max_stats = self.sample_max_metric.summarize() + stats = { + "sample_mean": sample_mean_stats["average"], + "sample_mean_min": sample_mean_stats["min_score"], + "sample_mean_max": sample_mean_stats["max_score"], + "sample_std": sample_std_stats["average"], + "sample_std_min": sample_std_stats["min_score"], + "sample_std_max": sample_std_stats["max_score"], + "sample_min": sample_min_stats["min_score"], + "sample_max": sample_max_stats["max_score"], + } + stats_args = {f"{stage.name.lower()}_stats": stats} + self.hparams.tensorboard_train_logger.log_stats( + stats_meta={"step": self.step}, **stats_args + ) + self.log_samples( + spectrogram_samples=samples_log, wav_samples=wav_log + ) + if self.diffusion_mode == DiffusionMode.LATENT: + self.log_samples( + spectrogram_samples=samples_rec, + wav_samples=wav_rec, + key_prefix="rec_", + ) + + def log_samples( + self, + spectrogram_samples=None, + wav_samples=None, + key_prefix=None, + lens=None, + ): + """Logs a set of audio and spectrogram samples + + Arguments + --------- + spectrogram_samples: torch.Tensor + a tensor of spectrogram samples + + wav_samples: torch.Tensor + a tensor of audio samples + + key_prefix: str + the prefix to use for keys in Tensorboard logging (if applicable) + + lens: torch.Tensor + relative sample lengths + + """ + if key_prefix is None: + key_prefix = "" + if lens is None: + lens = torch.ones(len(spectrogram_samples), device=self.device) + if self.hparams.use_tensorboard: + for sample in spectrogram_samples: + self.hparams.tensorboard_train_logger.log_figure( + f"{key_prefix}spectrogram", sample + ) + if wav_samples is not None: + max_len = max(sample.size(-1) for sample in wav_samples) + lens_full = (lens * max_len).int() + for wav_sample, sample_len in zip(wav_samples, lens_full): + self.tb_writer.add_audio( + f"{key_prefix}audio", wav_sample[:, :sample_len] + ) + + @property + def tb_writer(self): + """Returns the raw Tensorboard logger writer""" + return self.hparams.tensorboard_train_logger.writer + + +DATASET_SPLITS = ["train", "valid", "test"] + + +def reshape_feats(feats): + """Reshapes tensors of shape (batch x channels x features x length) + to (batch x length x features), suitable for standard SpeechBrain + losses, such as `mse_loss` + + Arguments + --------- + feats: torch.Tensor + a feature tensor of shape (batch x channels x features x length) + + Returns + ------- + result: torch.Tensor + a feature tensor of shape (batch x length x features) + """ + return feats.squeeze(1).transpose(1, -1) + + +def apply_sort(hparams, dataset): + if hparams["sort"]: + dataset = dataset.filtered_sorted(sort_key=hparams["sort"]) + if hparams["batch_shuffle"]: + dataset = dataset.batch_shuffle(hparams["batch_size"]) + return dataset + + +def load_dataset(hparams): + dataset_splits = {} + data_folder = hparams["data_save_folder"] + for split_id in DATASET_SPLITS: + split_path = hparams[f"{split_id}_json"] + dataset_split = sb.dataio.dataset.DynamicItemDataset.from_json( + split_path, replacements={"data_root": data_folder} + ) + dataset_split = apply_sort(hparams, dataset_split) + dataset_splits[split_id] = dataset_split + return dataset_splits + + +def dataio_prep(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + We expect `prepare_mini_librispeech` to have been called before this, + so that the `train.json`, `valid.json`, and `valid.json` manifest files + are available. + + Arguments + --------- + hparams : dict + This dictionary is loaded from the `train.yaml` file, and it includes + all the hyperparameters needed for dataset construction and loading. + + Returns + ------- + datasets : dict + Contains two keys, "train" and "valid" that correspond + to the appropriate DynamicItemDataset object. + """ + + # Define audio pipeline + @sb.utils.data_pipeline.takes("file_name") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load the signal, and output it""" + return read_audio(wav, hparams) + + @sb.utils.data_pipeline.takes("digit", "speaker_id") + @sb.utils.data_pipeline.provides("digit_label", "speaker_label") + def labels_pipeline(digit, speaker_id): + yield int(digit) + yield int(speaker_id) - 1 + + # Define datasets. We also connect the dataset with the data processing + # functions defined above. + + dataset_splits = load_dataset(hparams) + dataset_splits_values = dataset_splits.values() + + output_keys = ["file_name", "sig", "digit_label", "speaker_label"] + if "done_detector" in hparams: + output_keys += ["file_name_random", "sig_random"] + + sb.dataio.dataset.set_output_keys( + dataset_splits_values, + output_keys, + ) + sb.dataio.dataset.add_dynamic_item(dataset_splits_values, audio_pipeline) + sb.dataio.dataset.add_dynamic_item(dataset_splits_values, labels_pipeline) + for dataset_split in dataset_splits_values: + enhance_with_random(dataset_split, hparams) + + train_split = dataset_splits["train"] + data_count = None + train_split = apply_overfit_test( + hparams["overfit_test"], + hparams["overfit_test_sample_count"], + hparams["overfit_test_epoch_data_count"], + train_split, + ) + + if hparams["train_data_count"] is not None: + data_count = hparams["train_data_count"] + train_split.data_ids = train_split.data_ids[:data_count] + dataset_splits["train"] = train_split + + return dataset_splits + + +def read_audio(wav, hparams): + """Reads an audio file, applying random amplitude + + Arguments + --------- + wav: str + the file name (absolute or relative) + hparams: dict + hyperparameters + + Returns + ------- + sig: torch.Tensor + The loaded audio. + """ + sig = sb.dataio.dataio.read_audio(wav) + + # To Support random amplitude + if hparams["rand_amplitude"]: + rand_amp = (hparams["max_amp"] - hparams["min_amp"]) * torch.rand( + 1 + ) + hparams["min_amp"] + sig = sig / sig.abs().max() + sig = sig * rand_amp + return sig + + +def enhance_with_random(dataset, hparams): + """Enhances the pipeline with an additional randomly chosen sample for + each sample - used for training the Done detector - to determine word + boundaries + + Arguments + --------- + dataset: DynamicItemDataset + the dataset to be enhanced + hparams: dict + the hyperparameters dictionary + """ + item_count = len(dataset) + + @sb.utils.data_pipeline.provides("file_name_random", "sig_random") + def extra_random_sample(): + idx = torch.randint(item_count, (1,)).item() + data_id = dataset.data_ids[idx] + data_item = dataset.data[data_id] + wav_random = data_item["file_name"] + return wav_random, read_audio(wav_random, hparams) + + dataset.add_dynamic_item(extra_random_sample) + + +def check_tensorboard(hparams): + """Checks whether Tensorboard is enabled and initializes the logger if it is + + Arguments + --------- + hparams: dict + the hyperparameter dictionary + """ + if hparams["use_tensorboard"]: + try: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs"] + ) + except ImportError: + logger.warning( + "Could not enable Tensorboard logging - Tensorboard is not available" + ) + hparams["use_tensorboard"] = False + + +# Recipe begins! +if __name__ == "__main__": + # Reading command line arguments. + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training). + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides. + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Check whether Tensorboard is available and enabled + check_tensorboard(hparams) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + run_on_main( + prepare_audiomnist, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["data_save_folder"], + "train_json": hparams["train_json"], + "valid_json": hparams["valid_json"], + "test_json": hparams["test_json"], + "metadata_folder": hparams["metadata_folder"], + "norm": hparams["data_prepare_norm"], + "trim": hparams["data_prepare_trim"], + "trim_threshold": hparams["data_prepare_trim_threshold"], + "src_sample_rate": hparams["data_prepare_sample_rate_src"], + "tgt_sample_rate": hparams["data_prepare_sample_rate_tgt"], + "skip_prep": hparams["skip_prep"], + }, + ) + + # Create dataset objects "train", "valid", and "test". + diffusion_datasets = dataio_prep(hparams) + + # Initialize the Brain object to prepare for mask training. + diffusion_brain = DiffusionBrain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # The `fit()` method iterates the training loop, calling the methods + # necessary to update the parameters of the model. Since all objects + # with changing state are managed by the Checkpointer, training can be + # stopped at any point, and will be resumed on next call. + diffusion_brain.fit( + epoch_counter=diffusion_brain.hparams.epoch_counter, + train_set=diffusion_datasets["train"], + valid_set=diffusion_datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["dataloader_options"], + ) + + # Load the best checkpoint for evaluation + test_stats = diffusion_brain.evaluate( + test_set=diffusion_datasets["test"], + min_key="error", + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/BinauralWSJ0Mix/extra-dependencies.txt b/recipes/BinauralWSJ0Mix/extra-dependencies.txt deleted file mode 100644 index 850bc02a8b..0000000000 --- a/recipes/BinauralWSJ0Mix/extra-dependencies.txt +++ /dev/null @@ -1,2 +0,0 @@ -mir-eval==0.6 -pyroomacoustics==0.3.1 diff --git a/recipes/BinauralWSJ0Mix/extra_requirements.txt b/recipes/BinauralWSJ0Mix/extra_requirements.txt new file mode 100644 index 0000000000..3cfaa1241d --- /dev/null +++ b/recipes/BinauralWSJ0Mix/extra_requirements.txt @@ -0,0 +1,4 @@ +gitpython==3.1.37 +mir-eval==0.6 +pyroomacoustics>=0.7.3 + diff --git a/recipes/BinauralWSJ0Mix/prepare_data.py b/recipes/BinauralWSJ0Mix/prepare_data.py index 64e06545bb..84e1f5ff69 100644 --- a/recipes/BinauralWSJ0Mix/prepare_data.py +++ b/recipes/BinauralWSJ0Mix/prepare_data.py @@ -1,14 +1,14 @@ """ -The .csv preperation functions for Binaural-WSJ0Mix. +The .csv preparation functions for Binaural-WSJ0Mix. Author * Cem Subakan 2020 * Zijian 2022 - """ +""" -import os import csv +import os def prepare_binaural_wsj0mix( @@ -69,15 +69,14 @@ def create_binaural_wsj0mix2_csv( raise ValueError("Unsupported sampling rate") for set_type in set_types: - mix_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "mix/", + datapath, f"wav{sample_rate}", version, set_type, "mix/" ) s1_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s1/", + datapath, f"wav{sample_rate}", version, set_type, "s1/" ) s2_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s2/", + datapath, f"wav{sample_rate}", version, set_type, "s2/" ) files = os.listdir(mix_path) @@ -101,14 +100,16 @@ def create_binaural_wsj0mix2_csv( ] with open( - os.path.join(savepath, savename + set_type + ".csv"), "w" + os.path.join(savepath, savename + set_type + ".csv"), + "w", + newline="", + encoding="utf-8", ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, (mix_path, s1_path, s2_path) in enumerate( zip(mix_fl_paths, s1_fl_paths, s2_fl_paths) ): - row = { "ID": i, "duration": 1.0, @@ -148,18 +149,17 @@ def create_binaural_wsj0mix3_csv( raise ValueError("Unsupported sampling rate") for set_type in set_types: - mix_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "mix/", + datapath, f"wav{sample_rate}", version, set_type, "mix/" ) s1_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s1/", + datapath, f"wav{sample_rate}", version, set_type, "s1/" ) s2_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s2/", + datapath, f"wav{sample_rate}", version, set_type, "s2/" ) s3_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s3/", + datapath, f"wav{sample_rate}", version, set_type, "s3/" ) files = os.listdir(mix_path) @@ -187,14 +187,16 @@ def create_binaural_wsj0mix3_csv( ] with open( - os.path.join(savepath, savename + set_type + ".csv"), "w" + os.path.join(savepath, savename + set_type + ".csv"), + "w", + newline="", + encoding="utf-8", ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, (mix_path, s1_path, s2_path, s3_path) in enumerate( zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, s3_fl_paths) ): - row = { "ID": i, "duration": 1.0, @@ -241,22 +243,21 @@ def create_binaural_wsj0mix2_noise_csv( raise ValueError("Unsupported sampling rate") for set_type in set_types: - mix_path = os.path.join( datapath, - "wav{}".format(sample_rate), + f"wav{sample_rate}", version, set_type, "mix_both/", ) s1_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s1/", + datapath, f"wav{sample_rate}", version, set_type, "s1/" ) s2_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s2/", + datapath, f"wav{sample_rate}", version, set_type, "s2/" ) noise_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "noise/" + datapath, f"wav{sample_rate}", version, set_type, "noise/" ) files = os.listdir(mix_path) @@ -284,14 +285,16 @@ def create_binaural_wsj0mix2_noise_csv( ] with open( - os.path.join(savepath, savename + set_type + ".csv"), "w" + os.path.join(savepath, savename + set_type + ".csv"), + "w", + newline="", + encoding="utf-8", ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() - for (i, (mix_path, s1_path, s2_path, noise_path),) in enumerate( - zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, noise_fl_paths,) + for i, (mix_path, s1_path, s2_path, noise_path) in enumerate( + zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, noise_fl_paths) ): - row = { "ID": i, "duration": 1.0, @@ -334,15 +337,14 @@ def create_binaural_wsj0mix2_reverb_csv( raise ValueError("Unsupported sampling rate") for set_type in set_types: - mix_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "mix/", + datapath, f"wav{sample_rate}", version, set_type, "mix/" ) s1_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s1/", + datapath, f"wav{sample_rate}", version, set_type, "s1/" ) s2_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "s2/", + datapath, f"wav{sample_rate}", version, set_type, "s2/" ) files = os.listdir(mix_path) @@ -366,14 +368,16 @@ def create_binaural_wsj0mix2_reverb_csv( ] with open( - os.path.join(savepath, savename + set_type + ".csv"), "w" + os.path.join(savepath, savename + set_type + ".csv"), + "w", + newline="", + encoding="utf-8", ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, (mix_path, s1_path, s2_path) in enumerate( zip(mix_fl_paths, s1_fl_paths, s2_fl_paths) ): - row = { "ID": i, "duration": 1.0, diff --git a/recipes/BinauralWSJ0Mix/separation/README.md b/recipes/BinauralWSJ0Mix/separation/README.md index f4ff7d3e44..2070164089 100644 --- a/recipes/BinauralWSJ0Mix/separation/README.md +++ b/recipes/BinauralWSJ0Mix/separation/README.md @@ -2,12 +2,14 @@ This folder contains some recipes for the Binaural-WSJ0Mix task (2/3 sources). Please refer to [Real-time binaural speech separation with preserved spatial cues](https://arxiv.org/abs/2002.06637) [1] for details. -Additional dependency: -``` -pip install mir_eval -pip install pyroomacoustics==0.3.1 +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + ``` +pip install -r ../extra_requirements.txt +``` To run it: ``` @@ -18,7 +20,13 @@ python train.py hparams/convtasnet-parallel.yaml The training data will be automatically created from the `wsj_root`, which is the root folder that contains Note that during training we print the negative SNR instead of SI-SNR because the scale-invariance property of SI-SNR makes it insensitive to power rescaling of the estimated signal, which may fail in preserving the ILD between the outputs. - +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: +``` +python train.py hparams/convtasnet-parallel.yaml + --data_folder yourpath/binaural-wsj0mix/2speakers + --wsj_root yourpath/to/wsj/ + --test_only +``` # Binaural WSJ0-2mix and WSJ0-3mix dataset creation * The training data generation scripts can be found from [https://github.com/huangzj421/Binaural-WSJ0Mix](https://github.com/huangzj421/Binaural-WSJ0Mix). But the `train.py` also automatically downloads and generates the data. It puts the data under the path specified in `data_folder`. * The default command to run that automatically generate the data given wsj0 folder:`python train.py hparams/convtasnet-parallel.yaml --data_folder yourpath/binaural-wsj0mix/2speakers --wsj_root yourpath/wsj0-mix/wsj0` @@ -47,7 +55,7 @@ Here are the SNRi results (in dB) as well as ITD and ILD errors as the metric fo * ConvTasnet-parallel-noise.yaml refers to the above Tasnet applied to 2 speakers with DEMAND noise. * ConvTasnet-parallel-reverb.yaml refers to the above Tasnet applied to 2 speakers with reverberance(RT60) from the [BRIR Sim Set](http://iosr.uk/software/index.php). -The output folders with the checkpoints, logs, etc are available [here](https://drive.google.com/drive/folders/17FFwlIq6MQLHT9RXPgeYssti5TEeEXsx?usp=sharing) +The output folders with the checkpoints, logs, etc are available [here](https://www.dropbox.com/sh/i7fhu7qswjb84gw/AABsX1zP-GOTmyl86PtU8GGua?dl=0) # Example calls for running the training scripts @@ -61,10 +69,10 @@ The output folders with the checkpoints, logs, etc are available [here](https:// You can run the following command to train the model using Distributed Data Parallel (DDP) with 2 GPUs: +```bash +torchrun --nproc_per_node=2 train.py hparams/convtasnet-parallel.yaml --data_folder /yourdatapath ``` - python -m torch.distributed.launch --nproc_per_node=2 train.py hparams/convtasnet-parallel.yaml --data_folder /yourdatapath --distributed_launch --distributed_backend='nccl' -``` -You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at this [tutorial](https://colab.research.google.com/drive/13pBUacPiotw1IvyffvGZ-HrtBr9T6l15?usp=sharing). +You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at [our documentation](https://speechbrain.readthedocs.io/en/latest/multigpu.html). @@ -73,6 +81,15 @@ You can add the other runtime options as appropriate. For more complete informat Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/BinauralWSJ0Mix/separation/dynamic_mixing.py b/recipes/BinauralWSJ0Mix/separation/dynamic_mixing.py index 5ef652b6f3..afc87a6887 100644 --- a/recipes/BinauralWSJ0Mix/separation/dynamic_mixing.py +++ b/recipes/BinauralWSJ0Mix/separation/dynamic_mixing.py @@ -1,14 +1,17 @@ -import speechbrain as sb -import numpy as np -import torch -import torchaudio import glob import os import random -from speechbrain.processing.signal_processing import rescale -from speechbrain.dataio.batch import PaddedBatch + +import numpy as np +import torch +import torchaudio from scipy.signal import fftconvolve +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.batch import PaddedBatch +from speechbrain.processing.signal_processing import rescale + """ The functions to implement Dynamic Mixing For SpeechSeparation @@ -63,7 +66,7 @@ def audio_pipeline( if "noise" in hparams["experiment_name"]: noise_file = np.random.choice(noise_files, 1, replace=False) - noise, fs_read = torchaudio.load(noise_file[0]) + noise, fs_read = audio_io.load(noise_file[0]) noise = noise.squeeze() # select two speakers randomly @@ -76,22 +79,23 @@ def audio_pipeline( ] minlen = min( - *[torchaudio.info(x).num_frames for x in spk_files], + *[audio_io.info(x).num_frames for x in spk_files], hparams["training_signal_len"], ) for i, spk_file in enumerate(spk_files): - # select random offset - length = torchaudio.info(spk_file).num_frames + length = audio_io.info(spk_file).num_frames start = 0 stop = length if length > minlen: # take a random window start = np.random.randint(0, length - minlen) stop = start + minlen - tmp, fs_read = torchaudio.load( - spk_file, frame_offset=start, num_frames=stop - start, + tmp, fs_read = audio_io.load( + spk_file, + frame_offset=start, + num_frames=stop - start, ) tmp = tmp[0] # * peak # remove channel dim and normalize @@ -116,9 +120,9 @@ def audio_pipeline( hrtf_file = os.path.join( hparams["hrtf_wav_path"], reverb_time, - "CATT_{}_{}.wav".format(reverb_time, azimuth), + f"CATT_{reverb_time}_{azimuth}.wav", ) - hrtf, sr = torchaudio.load(hrtf_file) + hrtf, sr = audio_io.load(hrtf_file) transform = torchaudio.transforms.Resample(sr, fs_read) hrtf = transform(hrtf) tmp_bi = torch.from_numpy( @@ -136,14 +140,13 @@ def audio_pipeline( azimuth = np.random.choice(azimuth_list) for i, loc in enumerate(["left", "right"]): - hrtf_file = os.path.join( subject_path, "{}az{}.wav".format( azimuth.astype("str").replace("-", "neg"), loc ), ) - hrtf, sr = torchaudio.load(hrtf_file) + hrtf, sr = audio_io.load(hrtf_file) transform = torchaudio.transforms.Resample(sr, fs_read) hrtf = transform(hrtf[:, np.random.randint(50)]) tmp_bi[:, i] = torch.from_numpy( @@ -152,7 +155,7 @@ def audio_pipeline( # Make relative source energy same with original spatial_scaling = torch.sqrt( - torch.sum(tmp ** 2) * 2 / torch.sum(tmp_bi ** 2) + torch.sum(tmp**2) * 2 / torch.sum(tmp_bi**2) ) sources.append(tmp_bi * spatial_scaling) diff --git a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-cross.yaml b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-cross.yaml index 4e13246367..c24f50a50a 100644 --- a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-cross.yaml +++ b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-cross.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,14 +37,13 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix save_audio: True # Save estimated sources on disk n_audio_to_save: 10 sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -66,18 +65,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-independent.yaml b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-independent.yaml index 3e69d4ac92..c65a444bdc 100644 --- a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-independent.yaml +++ b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-independent.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,14 +37,13 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix save_audio: True # Save estimated sources on disk n_audio_to_save: 10 sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -66,18 +65,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-noise.yaml b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-noise.yaml index 5c2afcc9bb..da598cfef0 100644 --- a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-noise.yaml +++ b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-noise.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -23,7 +23,7 @@ data_modes: ['min'] # or if you want the whole dataset ['min', 'max'] base_folder_dm: !ref /wsj0/si_tr_s/ # the path for binaural-wsj0mix datasets generation automatically -datasets_generation: !ref /Binaural-WSJ0Mix-main +datasets_generation: !ref /BinauralWSJ0Mix-main hrtf_wav_path: !ref /CIPIC_hrtf_database/wav_database/ experiment_name: convtasnet-parallel-noise @@ -37,14 +37,13 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix save_audio: True # Save estimated sources on disk n_audio_to_save: 10 sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -66,18 +65,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-reverb.yaml b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-reverb.yaml index 9d54f44aa2..7af696caff 100644 --- a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-reverb.yaml +++ b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel-reverb.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,14 +37,13 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix save_audio: True # Save estimated sources on disk n_audio_to_save: 10 sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -66,18 +65,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel.yaml b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel.yaml index 9051f21798..aa7e768254 100644 --- a/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel.yaml +++ b/recipes/BinauralWSJ0Mix/separation/hparams/convtasnet-parallel.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,14 +37,13 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix save_audio: True # Save estimated sources on disk n_audio_to_save: 10 sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -66,18 +65,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/BinauralWSJ0Mix/separation/prepare_data.py b/recipes/BinauralWSJ0Mix/separation/prepare_data.py new file mode 120000 index 0000000000..1a7125c969 --- /dev/null +++ b/recipes/BinauralWSJ0Mix/separation/prepare_data.py @@ -0,0 +1 @@ +../prepare_data.py \ No newline at end of file diff --git a/recipes/BinauralWSJ0Mix/separation/train.py b/recipes/BinauralWSJ0Mix/separation/train.py index 4d097ed5bd..b83c291f03 100644 --- a/recipes/BinauralWSJ0Mix/separation/train.py +++ b/recipes/BinauralWSJ0Mix/separation/train.py @@ -21,25 +21,26 @@ * Zijian Huang 2022 """ +import csv import os import sys + +import numpy as np import torch import torch.nn.functional as F -import torchaudio -import speechbrain as sb -import speechbrain.nnet.schedulers as schedulers -from speechbrain.utils.distributed import run_on_main -from torch.cuda.amp import autocast from hyperpyyaml import load_hyperpyyaml -import numpy as np -from tqdm import tqdm -import csv -import logging from pyroomacoustics.experimental.localization import tdoa -from speechbrain.processing.features import STFT, spectral_magnitude from torch.nn import Conv1d -from speechbrain.pretrained.fetching import fetch -import zipfile +from tqdm import tqdm + +import speechbrain as sb +import speechbrain.nnet.schedulers as schedulers +from speechbrain.dataio import audio_io +from speechbrain.processing.features import STFT, spectral_magnitude +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -72,13 +73,16 @@ def compute_forward(self, mix, targets, stage, noise=None): min_len = min(len_noise, len_mix) # add the noise + if noise.ndim == 2: + noise = noise.unsqueeze(-1) mix = mix[:, :min_len] + noise[:, :min_len] # fix the length of targets also targets = targets[:, :min_len, :] if self.hparams.use_wavedrop: - mix = self.hparams.wavedrop(mix, mix_lens) + mix = self.hparams.drop_chunk(mix, mix_lens) + mix = self.hparams.drop_freq(mix) if self.hparams.limit_training_signal_len: mix, targets = self.cut_signals(mix, targets) @@ -195,6 +199,7 @@ def compute_objectives(self, predictions, targets): def fit_batch(self, batch): """Trains one batch""" + # Unpacking batch list mixture = batch.mix_sig targets = [batch.s1_sig, batch.s2_sig] @@ -206,72 +211,37 @@ def fit_batch(self, batch): if "noise" in self.hparams.experiment_name: noise = batch.noise_sig[0] - if self.auto_mix_prec: - with autocast(): - predictions, targets = self.compute_forward( - mixture, targets, sb.Stage.TRAIN, noise - ) - loss = self.compute_objectives(predictions, targets) - - # hard threshold the easy dataitems - if self.hparams.threshold_byloss: - th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() - else: - loss = loss.mean() - - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - self.scaler.scale(loss).backward() - if self.hparams.clip_grad_norm >= 0: - self.scaler.unscale_(self.optimizer) - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm, - ) - self.scaler.step(self.optimizer) - self.scaler.update() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) - ) - loss.data = torch.tensor(0).to(self.device) - else: + with self.training_ctx: predictions, targets = self.compute_forward( mixture, targets, sb.Stage.TRAIN, noise ) loss = self.compute_objectives(predictions, targets) + # hard threshold the easy dataitems if self.hparams.threshold_byloss: th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() + loss = loss[loss > th] + if loss.nelement() > 0: + loss = loss.mean() else: loss = loss.mean() - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - loss.backward() - if self.hparams.clip_grad_norm >= 0: - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm - ) - self.optimizer.step() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) + if loss.nelement() > 0 and loss < self.hparams.loss_upper_lim: + self.scaler.scale(loss).backward() + if self.hparams.clip_grad_norm >= 0: + self.scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), + self.hparams.clip_grad_norm, ) - loss.data = torch.tensor(0).to(self.device) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + self.nonfinite_count += 1 + logger.info( + f"infinite loss or empty loss! it happened {self.nonfinite_count} times so far - skipping this batch" + ) + loss.data = torch.tensor(0.0).to(self.device) self.optimizer.zero_grad() return loss.detach().cpu() @@ -308,7 +278,6 @@ def on_stage_end(self, stage, stage_loss, epoch): # Perform end-of-iteration things, like annealing, logging, etc. if stage == sb.Stage.VALID: - # Learning rate annealing if isinstance( self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau @@ -327,7 +296,8 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"snr": stage_stats["snr"]}, min_keys=["snr"], + meta={"snr": stage_stats["snr"]}, + min_keys=["snr"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( @@ -347,9 +317,7 @@ def add_speed_perturb(self, targets, targ_lens): recombine = True for i in range(targets.shape[-1]): - new_target = self.hparams.speedperturb( - targets[:, :, :, i], targ_lens - ) + new_target = self.hparams.speed_perturb(targets[:, :, :, i]) new_targets.append(new_target) if i == 0: min_len = new_target.shape[1] @@ -424,7 +392,7 @@ def cal_interaural_error(self, predictions, targets): s_target[:, 1, i].cpu().numpy(), fs=self.hparams.sample_rate, ) - * 10 ** 6 + * 10**6 for i in range(s_target.shape[-1]) ] ITD_prediction = [ @@ -433,7 +401,7 @@ def cal_interaural_error(self, predictions, targets): s_prediction[:, 1, i].cpu().numpy(), fs=self.hparams.sample_rate, ) - * 10 ** 6 + * 10**6 for i in range(s_prediction.shape[-1]) ] ITD_error1 = np.mean( @@ -478,14 +446,13 @@ def save_results(self, test_data): test_data, **self.hparams.dataloader_opts ) - with open(save_file, "w") as results_csv: + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: writer = csv.DictWriter(results_csv, fieldnames=csv_columns) writer.writeheader() # Loop over all test sentence with tqdm(test_loader, dynamic_ncols=True) as t: for i, batch in enumerate(t): - # Apply Separation mixture, mix_len = batch.mix_sig snt_id = batch.id @@ -541,32 +508,27 @@ def save_results(self, test_data): } writer.writerow(row) - logger.info("Mean SNR is {}".format(np.array(all_snrs).mean())) - logger.info("Mean SNRi is {}".format(np.array(all_snrs_i).mean())) - logger.info( - "Mean Delta ITD is {}".format(np.array(all_delta_ITDs).mean()) - ) - logger.info( - "Mean Delta ILD is {}".format(np.array(all_delta_ILDs).mean()) - ) + logger.info(f"Mean SNR is {np.array(all_snrs).mean()}") + logger.info(f"Mean SNRi is {np.array(all_snrs_i).mean()}") + logger.info(f"Mean Delta ITD is {np.array(all_delta_ITDs).mean()}") + logger.info(f"Mean Delta ILD is {np.array(all_delta_ILDs).mean()}") def save_audio(self, snt_id, mixture, targets, predictions): "saves the test audio (mixture, targets, and estimated sources) on disk" - # Create outout folder + # Create output folder save_path = os.path.join(self.hparams.save_folder, "audio_results") if not os.path.exists(save_path): os.mkdir(save_path) for ns in range(self.hparams.num_spks): - # Estimated source signal = predictions[0, :, :, ns] signal = signal / signal.abs().max(0).values save_file = os.path.join( - save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1) + save_path, f"item{snt_id}_source{ns + 1}hat.wav" ) - torchaudio.save( + audio_io.save( save_file, signal.permute(1, 0).cpu(), self.hparams.sample_rate ) @@ -574,17 +536,17 @@ def save_audio(self, snt_id, mixture, targets, predictions): signal = targets[0, :, :, ns] signal = signal / signal.abs().max(0).values save_file = os.path.join( - save_path, "item{}_source{}.wav".format(snt_id, ns + 1) + save_path, f"item{snt_id}_source{ns + 1}.wav" ) - torchaudio.save( + audio_io.save( save_file, signal.permute(1, 0).cpu(), self.hparams.sample_rate ) # Mixture signal = mixture[0][0, :] signal = signal / signal.abs().max(0).values - save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id)) - torchaudio.save( + save_file = os.path.join(save_path, f"item{snt_id}_mix.wav") + audio_io.save( save_file, signal.permute(1, 0).cpu(), self.hparams.sample_rate ) @@ -668,18 +630,14 @@ def audio_pipeline_noise(noise_wav): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) sb.utils.distributed.ddp_init_group(run_opts) - # Logger info - logger = logging.getLogger(__name__) - # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams["output_folder"], @@ -691,62 +649,58 @@ def audio_pipeline_noise(noise_wav): if hparams["dynamic_mixing"] and not os.path.exists( hparams["base_folder_dm"] ): - print( + raise ValueError( "Please, specify a valid base_folder_dm folder when using dynamic mixing" ) - sys.exit(1) - - if not os.path.exists(hparams["datasets_generation"]): - print("Download Datasets Generation scripts") - fetch( - filename="main.zip", - source="https://github.com/huangzj421/Binaural-WSJ0Mix/archive/refs/heads", - savedir=hparams["data_folder"], - save_filename="Binaural-WSJ0Mix-main.zip", - ) - file = zipfile.ZipFile( - os.path.join(hparams["data_folder"], "Binaural-WSJ0Mix-main.zip") - ) - file.extractall(path=hparams["data_folder"]) - sys.path.append(hparams["datasets_generation"]) - if "noise" in hparams["experiment_name"]: - from create_wav_2speakers_noise import create_binaural_wsj0mix + if not hparams["skip_prep"]: + if not os.path.exists(hparams["datasets_generation"]): + from git import Repo - hparams["data_folder"] = os.path.join(hparams["data_folder"], "noise") - elif "reverb" in hparams["experiment_name"]: - from create_wav_2speakers_reverb import create_binaural_wsj0mix + git_url = "https://github.com/huangzj421/BinauralWSJ0Mix" + repo_dir = hparams["datasets_generation"] + Repo.clone_from(git_url, repo_dir) - hparams["data_folder"] = os.path.join(hparams["data_folder"], "reverb") - elif hparams["num_spks"] == 2: - from create_wav_2speakers import create_binaural_wsj0mix + sys.path.append(hparams["datasets_generation"]) + if "noise" in hparams["experiment_name"]: + from create_wav_2speakers_noise import create_binaural_wsj0mix - hparams["data_folder"] = os.path.join( - hparams["data_folder"], "2speakers" - ) - else: - from create_wav_3speakers import create_binaural_wsj0mix + hparams["data_folder"] = os.path.join( + hparams["data_folder"], "noise" + ) + elif "reverb" in hparams["experiment_name"]: + from create_wav_2speakers_reverb import create_binaural_wsj0mix - hparams["data_folder"] = os.path.join( - hparams["data_folder"], "3speakers" - ) + hparams["data_folder"] = os.path.join( + hparams["data_folder"], "reverb" + ) + elif hparams["num_spks"] == 2: + from create_wav_2speakers import create_binaural_wsj0mix - if not os.path.exists(os.path.join(hparams["data_folder"], "wav8k")): - print("Generate Binaural WSJ0Mix dataset automatically") - run_on_main( - create_binaural_wsj0mix, - kwargs={ - "wsj_root": hparams["wsj_root"], - "output_root": hparams["data_folder"], - "datafreqs": hparams["data_freqs"], - "datamodes": hparams["data_modes"], - }, - ) + hparams["data_folder"] = os.path.join( + hparams["data_folder"], "2speakers" + ) + else: + from create_wav_3speakers import create_binaural_wsj0mix + + hparams["data_folder"] = os.path.join( + hparams["data_folder"], "3speakers" + ) + + if not os.path.exists(os.path.join(hparams["data_folder"], "wav8k")): + print("Generate Binaural WSJ0Mix dataset automatically") + run_on_main( + create_binaural_wsj0mix, + kwargs={ + "wsj_root": hparams["wsj_root"], + "output_root": hparams["data_folder"], + "datafreqs": hparams["data_freqs"], + "datamodes": hparams["data_modes"], + }, + ) # Data preparation - from recipes.BinauralWSJ0Mix.prepare_data import ( - prepare_binaural_wsj0mix, - ) # noqa + from prepare_data import prepare_binaural_wsj0mix # noqa run_on_main( prepare_binaural_wsj0mix, @@ -835,15 +789,14 @@ def audio_pipeline_noise(noise_wav): for module in separator.modules.values(): separator.reset_layer_recursively(module) - if not hparams["test_only"]: - # Training - separator.fit( - separator.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["dataloader_opts"], - valid_loader_kwargs=hparams["dataloader_opts"], - ) + # Training + separator.fit( + separator.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts"], + ) # Eval separator.evaluate(test_data, min_key="snr") diff --git a/recipes/CVSS/S2ST/README.md b/recipes/CVSS/S2ST/README.md new file mode 100644 index 0000000000..9a4e1ddc16 --- /dev/null +++ b/recipes/CVSS/S2ST/README.md @@ -0,0 +1,74 @@ +# Speech-to-Speech Translation (with CVSS) +This folder contains the recipe for training a speech-to-unit translation (S2UT) model using a pre-trained Wav2Vec 2.0 encoder and a transformer decoder on the CVSS dataset. +The implementation is based on [Textless Speech-to-Speech Translation](https://arxiv.org/abs/2112.08352) and [Enhanced Direct Speech-to-Speech Translation Using Self-supervised Pre-training and Data Augmentation](https://arxiv.org/abs/2204.02967) papers. + +## Dataset +[CVSS](https://github.com/google-research-datasets/cvss) is a massively multilingual-to-English speech-to-speech translation corpus. It covers pairs from 21 languages into English. CVSS is derived from the Common Voice speech corpus and the CoVoST 2 speech-to-text translation corpus. +The CVSS dataset includes two versions of spoken translation: CVSS-C and CVSS-T. While both versions can be utilized to train the S2UT model, we recommend using CVSS-C because of its superior speech quality. + +The first step is to select a source language and download [Common Voice (version 4)](https://commonvoice.mozilla.org/en/datasets) for the chosen language code. In this recipe, we've chosen French as the source language. +The next step is to pair translation audio clips with the source speech by downloading the corresponding subset of the [CVSS dataset](https://github.com/google-research-datasets/cvss). In our case, we have to download the French CVSS-C subset, which corresponds to the English translation of the French portion of the Common Voice dataset. + +At this point, you should have two distinct folders: the first one containing the Common Voice data and the second one containing the CVSS data. + +> Note: In the recipe, we frequently employ the terms `src_data` and `tgt_data`. +> `src_data` refers to the source language data (Common Voice). +> `tgt_data` refers to the target language data (CVSS). + +## Installing Extra Dependencies +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: +``` +pip install -r extra_requirements.txt +``` + +## How to Run +Before training the speech-to-unit translation (S2UT) model, we have to quantize the target speech into discrete speech units. This is achieved by training a k-means model on raw speech features, which will then serve as the target for training the S2UT model. By default, we use a pre-trained model with `k=100` trained on the 6th layer of HuBERT. For instructions on training a new quantization model, please refer to `recipes/LJSpeech/quantization`. + +To train the S2UT model on French-English, simply run the following command: +``` +python train.py hparams/train_fr-en.yaml --src_data_folder=/corpus/CommonVoice/fr --tgt_data_folder=/corpus/CVSS/fr --bfloat16_mix_prec +``` + +> Dynamic batch settings are optimized for a 40GB VRAM GPU. Don't hesitate to adjust max_batch_len and max_batch_len_val to fit your GPU's capabilities. + + +# Performance summary +Results are reported in terms of sacrebleu. + +| hyperparams file | valid | test | Model | Training logs | GPUs | +|:----------------:|:-----:| :-----:|:-------: | :-----------: |:---------: | +| train_fr-en.yaml | 24.25 | 24.47 | [dropbox](https://www.dropbox.com/sh/woz4i1p8pkfkqhf/AACmOvr3sS7p95iXl3twCj_xa?dl=0) | [wandb](https://wandb.ai/jar0d/s2ut_cvss_sb/runs/uh4tvc8c?workspace=user-jar0d) |1xA100 80GB | + +Training requires about 1 hour and 5 minutes for each epoch on an NVIDIA A100 GPU. A total of 30 epochs are needed. + +To synthesize speech from the predicted speech units, you need to train a unit-based HiFi-GAN vocoder. If you haven't done this already, please refer to the `LJSpeech/TTS/vocoder/unit_hifi_gan` recipe. + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/CVSS/S2ST/cvss_prepare.py b/recipes/CVSS/S2ST/cvss_prepare.py new file mode 120000 index 0000000000..7f1db79de6 --- /dev/null +++ b/recipes/CVSS/S2ST/cvss_prepare.py @@ -0,0 +1 @@ +../cvss_prepare.py \ No newline at end of file diff --git a/recipes/CVSS/S2ST/extra_requirements.txt b/recipes/CVSS/S2ST/extra_requirements.txt new file mode 100644 index 0000000000..454a21c035 --- /dev/null +++ b/recipes/CVSS/S2ST/extra_requirements.txt @@ -0,0 +1 @@ +sacrebleu diff --git a/recipes/CVSS/S2ST/extract_code.py b/recipes/CVSS/S2ST/extract_code.py new file mode 100644 index 0000000000..5774e15c0c --- /dev/null +++ b/recipes/CVSS/S2ST/extract_code.py @@ -0,0 +1,239 @@ +""" +Apply K-means clustering over acoustic features to extract speech units for training the speech-to-unit translation model. + +Authors + * Jarod Duret 2023 +""" + +import json +import logging +import pathlib as pl + +import joblib +import numpy as np +import torch +import torchaudio +from huggingface_hub import hf_hub_download +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.integrations.huggingface.wav2vec2 import Wav2Vec2 +from speechbrain.utils.logger import get_logger + +OPT_FILE = "opt_cvss_extract.pkl" +TRAIN_JSON = "train.json" +VALID_JSON = "valid.json" +VALID_SMALL = "valid_small.json" +TEST_JSON = "test.json" + + +def setup_logger(): + """Set up a logger with a log format and logging level.""" + log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" + logging.basicConfig(format=log_format, level=logging.INFO) + logger = get_logger(__name__) + return logger + + +def get_device(use_cuda): + """Determine and return the appropriate device for computation.""" + use_cuda = use_cuda and torch.cuda.is_available() + print("\n" + "=" * 30) + print(f"USE_CUDA SET TO: {use_cuda}") + print(f"CUDA AVAILABLE?: {torch.cuda.is_available()}") + print("=" * 30 + "\n") + return torch.device("cuda" if use_cuda else "cpu") + + +def np_array(tensor): + """Convert a Pytorch tensor to a Numpy array.""" + tensor = tensor.squeeze(0) + tensor = tensor.detach().cpu() + return tensor.numpy() + + +def skip(splits, save_folder, conf): + """ + Detects if the ljspeech data_extraction has been already done. + If the extraction has been done, we can skip it. + + Arguments + --------- + splits: list + The portions of the data to check. + save_folder: str + Path to folder with generated files to check. + conf: dict + Configuration for checking against old config. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + # Checking json files + skip = True + + split_files = { + "train": TRAIN_JSON, + "valid": VALID_JSON, + "valid_small": VALID_SMALL, + "test": TEST_JSON, + } + + for split in splits: + if not (save_folder / split_files[split]).exists(): + skip = False + + code_folder = save_folder / "codes" + if not code_folder.exists(): + skip = False + + # Checking saved options + save_opt = save_folder / OPT_FILE + if skip is True: + if save_opt.is_file(): + opts_old = load_pkl(save_opt.as_posix()) + if opts_old == conf: + skip = True + else: + skip = False + else: + skip = False + return skip + + +def extract_cvss( + data_folder, + splits, + kmeans_folder, + encoder, + layer, + save_folder, + sample_rate=16000, + skip_extract=False, +): + """ + Extract speech units for HiFi-GAN training on the CVSS datasets. + + Arguments + --------- + data_folder : str + Path to the folder where the original CVSS dataset is stored. + splits : list + List of splits to prepare. + kmeans_folder: str + Path to the folder where the k-means model checkpoint is stored. + encoder: str + Url to the model used as feature extractor. + layer: int + Layer from which features are extracted. + save_folder: str + Path to the folder where the speech units are stored. + sample_rate: int + CVSS dataset sample rate + skip_extract: Bool + If True, skip extraction. + + Returns + ------- + None + + Example + ------- + >>> from recipes.CVSS.S2ST.extract_code import extract_cvss + >>> data_folder = "data/CVSS/" + >>> splits = ["train", "valid"] + >>> kmeans_folder = ./Quantization/results/kmeans/4321/save + >>> encoder = facebook / hubert - base - ls960 + >>> layer = 6 + >>> save_folder = "save/" + >>> extract_cvss( + ... data_folder, splits, kmeans_folder, encoder, layer, save_folder + ... ) + """ + logger = setup_logger() + + if skip_extract: + return + # Create configuration for easily skipping code extraction stage + conf = { + "data_folder": data_folder, + "splits": splits, + "save_folder": save_folder, + "kmeans_folder": kmeans_folder, + "encoder": encoder, + "layer": layer, + } + + save_folder = pl.Path(save_folder) + # Check if this phase is already done (if so, skip it) + if skip(splits, save_folder, conf): + logger.info("Skipping code extraction, completed in previous run.") + return + + # Fetch device + device = get_device(use_cuda=True) + + save_opt = save_folder / OPT_FILE + data_folder = pl.Path(data_folder) + + # Fetch K-means model + kmeans_folder = pl.Path(kmeans_folder) + kmeans_ckpt = kmeans_folder / "kmeans.ckpt" + if not kmeans_ckpt.exists(): + logger.info("K-means checkpoint not found, downloading it from HF.") + kmeans_download_path = save_folder / "pretrained_models/quantization" + kmeans_download_path.mkdir(exist_ok=True, parents=True) + hf_hub_download( + repo_id=kmeans_folder.as_posix(), + filename="kmeans.ckpt", + local_dir=kmeans_download_path, + ) + kmeans_ckpt = kmeans_download_path / "kmeans.ckpt" + + encoder_save_path = save_folder / "pretrained_models" + code_folder = save_folder / "codes" + code_folder.mkdir(parents=True, exist_ok=True) + + logger.info(f"Loading encoder: {encoder} ...") + encoder = Wav2Vec2( + encoder, + encoder_save_path.as_posix(), + output_all_hiddens=True, + output_norm=False, + freeze_feature_extractor=True, + freeze=True, + ).to(device) + + # K-means model + logger.info(f"Loading K-means model from {kmeans_ckpt} ...") + kmeans_model = joblib.load(open(kmeans_ckpt, "rb")) + kmeans_model.verbose = False + + for split in splits: + dataset_path = data_folder / f"{split}.json" + logger.info(f"Reading dataset from {dataset_path} ...") + meta_json = json.load(open(dataset_path, encoding="utf-8")) + for key in tqdm(meta_json.keys()): + item = meta_json[key] + wav = item["tgt_audio"] + with torch.no_grad(): + info = audio_io.info(wav) + audio = sb.dataio.dataio.read_audio(wav) + audio = torchaudio.transforms.Resample( + info.sample_rate, + sample_rate, + )(audio) + audio = audio.unsqueeze(0).to(device) + feats = encoder.extract_features(audio) + feats = feats[layer] + feats = np_array(feats) + pred = kmeans_model.predict(feats) + np.save(code_folder / f"{key}_tgt.npy", pred) + + logger.info("Extraction completed.") + save_pkl(conf, save_opt) diff --git a/recipes/CVSS/S2ST/hparams/train_fr-en.yaml b/recipes/CVSS/S2ST/hparams/train_fr-en.yaml new file mode 100644 index 0000000000..4a9fb10286 --- /dev/null +++ b/recipes/CVSS/S2ST/hparams/train_fr-en.yaml @@ -0,0 +1,233 @@ +############################################################################ +# Model: Speech-to-Unit Translation (S2UT) +# Language: French-English (Fr-En) +# Training: CVSS +# Authors: Jarod Duret +# ############################################################################ + +################################### +# Experiment Parameters and setup # +################################### +seed: 888 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/s2ut/ +save_folder: !ref /save +train_log: !ref /train_log.txt +epochs: 30 + +progress_samples: True +progress_sample_path: !ref /samples +progress_samples_interval: 1 +progress_batch_sample_size: 4 + +evaluation_interval: 4 + +################################# +# Data files and pre-processing # +################################# +src_data_folder: !PLACEHOLDER # e.g, /corpus/CommonVoice/fr (French Data) +tgt_data_folder: !PLACEHOLDER # e.g, /corpus/CV4/fr (English Data) +sample_rate: 16000 + +train_json: !ref /train.json +valid_json: !ref /valid.json +valid_small_json: !ref /valid_small.json +test_json: !ref /test.json +splits: ["train", "valid_small", "valid", "test"] +skip_prep: False + +# SSL model used to encode target features +encoder_source: facebook/hubert-base-ls960 +layer: 6 +kmeans_source: speechbrain/tts-hifigan-unit-hubert-l6-k100-ljspeech +codes_folder: !ref /codes +skip_extract: False + +# Vocoder model used for evaluation +vocoder_source: speechbrain/tts-hifigan-unit-hubert-l6-k100-ljspeech +vocoder_download_path: !ref /pretrained_models/vocoder + +# ASR model used for evaluation +asr_source: speechbrain/asr-transformer-transformerlm-librispeech +asr_download_path: !ref /pretrained_models/asr + +# Wav2vec2 encoder +wav2vec2_source: LeBenchmark/wav2vec2-FR-7K-large +wav2vec2_download_path: !ref /pretrained_models + +# wav2vec2 encoder specific parameters +wav2vec2_frozen: False +wav2vec2_freeze_steps: 10000 + +####################### Training Parameters #################################### +lr: 0.0005 +lr_wav2vec: 0.00001 +loss_reduction: batchmean + +# Outputs +# blank_index: 102 +bos_index: 100 +eos_index: 101 +pad_index: 102 +label_smoothing: 0.2 + +# Dynamic batching +sorting: random +num_workers: 4 +dynamic_batching: True +max_batch_len: 180 # 40 GB GPU +num_bucket: 200 + +train_batch_size: 32 # if not using dynamic batching +valid_batch_size: 16 + +dynamic_batch_sampler: + max_batch_len: !ref + num_buckets: !ref + shuffle_ex: True # if true re-creates batches at each epoch shuffling examples. + batch_ordering: random + max_batch_ex: 128 + +train_dataloader_opts: + batch_size: !ref + drop_last: False + num_workers: !ref + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: !ref + +valid_dataloader_opts: + batch_size: !ref + num_workers: !ref + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: !ref + +################################ +# Model Parameters and model # +################################ + +# Feature parameters (W2V2 etc) +features_dim: 1024 # large wav2vec output dimension, for base replace by 768 + +# Length Regulator +enc_kernel_size: 3 +enc_stride: 2 + +# Transformer +embedding_size: 512 +d_model: 512 +nhead: 8 +num_encoder_layers: 0 +num_decoder_layers: 6 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 103 # /!\ needs to be changed accordingly to the vocabulary +attention_type: "regularMHA" # "RelPosMHAXL" or "regularMHA" + +# Decoding parameters +test_bs: 10 +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 + +############################## models ################################ +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True ### Test in baseline_v2 + freeze: !ref + freeze_feature_extractor: False + save_path: !ref + apply_spec_augment: False + +enc: !new:speechbrain.nnet.CNN.Conv1d + input_shape: [null, null, !ref ] + out_channels: !ref + kernel_size: !ref + stride: !ref + +transformer: !new:speechbrain.lobes.models.transformer.TransformerST.TransformerST # yamllint disable-line rule:line-length + input_size: !ref + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + attention_type: !ref + normalize_before: True + causal: False + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + wav2vec2: !ref + enc: !ref + transformer: !ref + seq_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref ] + +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + +wav2vec_opt_class: !name:torch.optim.AdamW + lr: !ref + +seq_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 5000 + +wav2vec_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.98 + +#epoch object +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +valid_search: !new:speechbrain.decoders.seq2seq.S2STransformerGreedySearcher + modules: [!ref , !ref , null] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + temperature: 1.0 + +test_search: !new:speechbrain.decoders.seq2seq.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats +bleu_computer: !name:speechbrain.integrations.nlp.bleu.BLEUStats + +#checkpointer +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + counter: !ref + noam_scheduler: !ref + wav2vec_scheduler: !ref diff --git a/recipes/CVSS/S2ST/train.py b/recipes/CVSS/S2ST/train.py new file mode 100755 index 0000000000..aceae911a5 --- /dev/null +++ b/recipes/CVSS/S2ST/train.py @@ -0,0 +1,639 @@ +""" +Recipe for training the speech-to-unit translation (S2UT) model, the implementation is based on the following papers: +- Direct speech-to-speech translation with discrete units: (https://arxiv.org/abs/2006.04558) +- Enhanced Direct Speech-to-Speech Translation Using Self-supervised Pre-training and Data Augmentation: (https://arxiv.org/abs/2204.02967) +To run this recipe, do the following: +# python train.py hparams/train_fr-en.yaml --src_data_folder=/corpus/CommonVoice/fr --tgt_data_folder=/corpus/CVSS/fr + +Authors +* Jarod Duret 2023 +""" + +import pathlib as pl +import sys + +import numpy as np +import torch +import torchaudio +import tqdm +from hyperpyyaml import load_hyperpyyaml +from torch.nn.parallel import DistributedDataParallel + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.inference.ASR import EncoderDecoderASR +from speechbrain.inference.vocoders import UnitHIFIGAN +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +class S2UT(sb.core.Brain): + def compute_forward(self, batch, stage): + """Computes the forward pass. + + Arguments + --------- + batch : torch.Tensor or tensors + An element from the dataloader, including inputs for processing. + stage : Stage + The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST + + Returns + ------- + (torch.Tensor or torch.Tensors, list of float or None, list of str or None) + The outputs after all processing is complete. + """ + batch = batch.to(self.device) + wavs, wav_lens = batch.src_sig + tokens_bos, _ = batch.code_bos + + # Use default padding value for wav2vec2 + wavs[wavs == self.hparams.pad_index] = 0.0 + + # compute features + enc_out = self.modules.wav2vec2(wavs, wav_lens) + + # dimensionality reduction + enc_out = self.modules.enc(enc_out) + + if isinstance(self.modules.transformer, DistributedDataParallel): + dec_out = self.modules.transformer.module.forward_mt_decoder_only( + enc_out, tokens_bos, pad_idx=self.hparams.pad_index + ) + else: + dec_out = self.modules.transformer.forward_mt_decoder_only( + enc_out, tokens_bos, pad_idx=self.hparams.pad_index + ) + + # logits and softmax + pred = self.modules.seq_lin(dec_out) + p_seq = self.hparams.log_softmax(pred) + + hyps = None + wavs = None + transcripts = None + if stage != sb.Stage.TRAIN: + if ( + stage == sb.Stage.TEST + or self.hparams.epoch_counter.current + % self.hparams.evaluation_interval + == 0 + ): + ids = batch.id + tgt_text = batch.tgt_text + + search = ( + self.hparams.valid_search + if stage == sb.Stage.VALID + else self.hparams.test_search + ) + hyps, _, _, _ = search(enc_out.detach(), wav_lens) + + # generate speech and transcriptions + wavs = [] + for hyp in hyps: + if len(hyp) > 10: + code = torch.LongTensor(hyp[:-1]) + wav = self.test_vocoder.decode_unit(code.unsqueeze(-1)) + wavs.append(wav.squeeze(0)) + else: + logger.warning( + f"Encountered hyp {hyp} too short for decoding, using fake blank audio for testing" + ) + wavs.append(torch.zeros(40000)) # on cpu device + if wavs: + wavs, wav_lens = sb.utils.data_utils.batch_pad_right(wavs) + transcripts, _ = self.test_asr.transcribe_batch( + wavs, wav_lens + ) + transcripts = [ + transcript.lower() for transcript in transcripts + ] + + self.bleu_metric.append(ids, transcripts, [tgt_text]) + + return ( + p_seq, + wavs, + transcripts, + ) + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs. + Arguments + --------- + predictions : torch.Tensor + The model generated spectrograms and other metrics from `compute_forward`. + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient. + """ + (p_seq, wavs, transcripts) = predictions + tokens_eos, tokens_eos_lens = batch.code_eos + ids = batch.id + + # speech translation loss + loss = self.hparams.seq_cost(p_seq, tokens_eos, length=tokens_eos_lens) + + if stage != sb.Stage.TRAIN: + if ( + stage == sb.Stage.TEST + or self.hparams.epoch_counter.current + % self.hparams.evaluation_interval + == 0 + ): + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + + tgt_wavs, _ = batch.tgt_sig + tgt_transcripts = batch.tgt_text + + # Save last batch + wavs = [wav.cpu() for wav in wavs] + tgt_wavs = [wav.cpu() for wav in tgt_wavs] + self.last_batch = [ + ids, + (wavs, transcripts), + (tgt_transcripts, tgt_wavs), + ] + + return loss + + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if ( + not self.hparams.wav2vec2_frozen + and self.optimizer_step >= self.hparams.wav2vec2_freeze_steps + ): + valid_optimizers["wav2vec_optimizer"] = optimizers[ + "wav2vec_optimizer" + ] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers + + def init_optimizers(self): + """Called during ``on_fit_start()``, initialize optimizers + after parameters are fully configured (e.g. DDP, jit). + """ + self.optimizers_dict = {} + + # Initializes the wav2vec2 optimizer if the model is not wav2vec2_frozen + if not self.hparams.wav2vec2_frozen: + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + self.model_optimizer = self.hparams.opt_class( + self.hparams.model.parameters() + ) + self.optimizers_dict["model_optimizer"] = self.model_optimizer + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "wav2vec_optimizer", self.wav2vec_optimizer + ) + self.checkpointer.add_recoverable( + "model_optimizer", self.model_optimizer + ) + + def on_fit_batch_start(self, batch, should_step): + """Called at the beginning of ``fit_batch()``. + + Arguments + --------- + batch : list of torch.Tensors + Batch of data to use for training. Default implementation assumes + this batch has two elements: inputs and targets. + should_step : boolean + Whether optimizer.step() was called or not. + """ + if self.optimizer_step == self.hparams.wav2vec2_freeze_steps: + logger.warning( + "speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is unfrozen." + ) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """Called after ``fit_batch()``, meant for calculating and logging metrics. + + Arguments + --------- + batch : list of torch.Tensors + Batch of data to use for training. Default implementation assumes + this batch has two elements: inputs and targets. + outputs : list or dictionary of torch.Tensors + Returned value of compute_forward(). + loss : torch.Tensor + Returned value of compute_objectives(). + should_step : boolean + Whether optimizer.step() was called or not. + """ + if should_step: + # anneal model lr every update + self.hparams.noam_annealing(self.model_optimizer) + + def on_stage_start(self, stage, epoch): + """Gets called when a stage starts. + + Arguments + --------- + stage : Stage + The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST + epoch : int + The current epoch count. + + Returns + ------- + None + """ + if stage != sb.Stage.TRAIN: + if ( + stage == sb.Stage.VALID + and epoch % self.hparams.evaluation_interval != 0 + ): + return + + self.acc_metric = self.hparams.acc_computer() + self.bleu_metric = self.hparams.bleu_computer() + self.last_batch = None + + logger.info("Loading pretrained HiFi-GAN ...") + self.test_vocoder = UnitHIFIGAN.from_hparams( + source=self.hparams.vocoder_source, + savedir=self.hparams.vocoder_download_path, + run_opts={"device": "cpu"}, + ) + + logger.info("Loading pretrained ASR ...") + self.test_asr = EncoderDecoderASR.from_hparams( + source=self.hparams.asr_source, + savedir=self.hparams.asr_download_path, + run_opts={"device": "cpu"}, + ) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch. + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + if stage == sb.Stage.TRAIN: + self.train_stats = stage_loss + + # At the end of validation, we can write + elif ( + stage == sb.Stage.VALID + and epoch % self.hparams.evaluation_interval == 0 + ): + # delete vocoder and asr to free memory for next training epoch + del self.test_vocoder + del self.test_asr + + stage_stats = {"loss": stage_loss} + stage_stats["ACC"] = self.acc_metric.summarize() + stage_stats["BLEU"] = self.bleu_metric.summarize("BLEU") + + output_progress_sample = ( + self.hparams.progress_samples + and epoch % self.hparams.progress_samples_interval == 0 + ) + + if output_progress_sample: + self._save_progress_sample(epoch) + + current_epoch = self.hparams.epoch_counter.current + lr_model = self.hparams.noam_annealing.current_lr + lr_wav2vec = 0.0 + + if not self.hparams.wav2vec2_frozen: + (lr_wav2vec, new_lr_wav2vec) = self.hparams.wav2vec_annealing( + stage_stats["ACC"] + ) + sb.nnet.schedulers.update_learning_rate( + self.wav2vec_optimizer, new_lr_wav2vec + ) + + self.hparams.train_logger.log_stats( + stats_meta={ + "epoch": current_epoch, + "lr_model": lr_model, + "lr_wav2vec": lr_wav2vec, + }, + train_stats={"loss": self.train_stats}, + valid_stats=stage_stats, + ) + + # Save the current checkpoint and delete previous checkpoints. + self.checkpointer.save_and_keep_only( + meta={ + "ACC": stage_stats["ACC"], + "BLEU": stage_stats["BLEU"], + "epoch": epoch, + }, + max_keys=["BLEU"], + num_to_keep=10, + ) + + elif stage == sb.Stage.TEST: + stage_stats = {"loss": stage_loss} + stage_stats["BLEU"] = self.bleu_metric.summarize("BLEU") + + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + + logger.info( + f"BLEU score: {round(self.bleu_metric.summarize('BLEU'), 2)}" + ) + bleu_file = pl.Path(self.hparams.output_folder) / "bleu.txt" + with open(bleu_file, "a+", encoding="utf-8") as w: + self.bleu_metric.write_stats(w) + + def _save_progress_sample(self, epoch): + """Save samples and BLEU score from last batch for current epoch. + + Arguments + --------- + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + + Returns + ------- + None + """ + if self.last_batch is None: + return + + ( + ids, + (wavs, transcripts), + (tgt_transcripts, tgt_wavs), + ) = self.last_batch + + save_folder = pl.Path(self.hparams.progress_sample_path) / f"{epoch}" + save_folder.mkdir(parents=True, exist_ok=True) + + sample_size = self.hparams.progress_batch_sample_size + if len(ids) < sample_size: + sample_size = len(ids) + + for i in tqdm.tqdm(range(sample_size)): + utt_id = ids[i] + wav = wavs[i] + transcript = transcripts[i] + tgt_transcript = tgt_transcripts[i] + tgt_wav = tgt_wavs[i] + + sample_path = save_folder / f"{utt_id}_pred.wav" + sb.dataio.dataio.write_audio( + sample_path, wav, self.hparams.sample_rate + ) + + sample_path = save_folder / f"{utt_id}_ref.wav" + sb.dataio.dataio.write_audio( + sample_path, tgt_wav, self.hparams.sample_rate + ) + + sample_path = save_folder / f"{utt_id}.txt" + with open(sample_path, "w", encoding="utf-8") as file: + file.write(f"pred: {transcript}\n") + file.write(f"ref: {tgt_transcript}\n") + + self.bleu_metric.append( + ids[:sample_size], + transcripts[:sample_size], + [tgt_transcripts[:sample_size]], + ) + + bleu_path = save_folder / "bleu.txt" + with open(bleu_path, "w", encoding="utf-8") as file: + file.write( + f"BLEU score: {round(self.bleu_metric.summarize('BLEU'), 2)}\n" + ) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + codes_folder = pl.Path(hparams["codes_folder"]) + + # Define audio pipeline. In this case, we simply read the audio contained + # in the variable src_audio with the custom reader. + @sb.utils.data_pipeline.takes("src_audio") + @sb.utils.data_pipeline.provides("src_sig") + def src_audio_pipeline(wav): + """Load the source language audio signal. + This is done on the CPU in the `collate_fn` + """ + info = audio_io.info(wav) + sig = sb.dataio.dataio.read_audio(wav) + sig = torchaudio.transforms.Resample( + info.sample_rate, hparams["sample_rate"] + )(sig) + return sig + + @sb.utils.data_pipeline.takes("tgt_audio") + @sb.utils.data_pipeline.provides("tgt_sig") + def tgt_audio_pipeline(wav): + """Load the target language audio signal. + This is done on the CPU in the `collate_fn`. + """ + info = audio_io.info(wav) + sig = sb.dataio.dataio.read_audio(wav) + sig = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(sig) + return sig + + @sb.utils.data_pipeline.takes("id") + @sb.utils.data_pipeline.provides("code_bos", "code_eos") + def unit_pipeline(utt_id): + """Load target codes""" + code = np.load(codes_folder / f"{utt_id}_tgt.npy") + code = torch.LongTensor(code) + code = torch.unique_consecutive(code) + code_bos = torch.cat((torch.LongTensor([hparams["bos_index"]]), code)) + yield code_bos + code_eos = torch.cat((code, torch.LongTensor([hparams["eos_index"]]))) + yield code_eos + + datasets = {} + for split in hparams["splits"]: + datasets[split] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams[f"{split}_json"], + dynamic_items=[ + src_audio_pipeline, + tgt_audio_pipeline, + unit_pipeline, + ], + output_keys=[ + "id", + "src_sig", + "tgt_sig", + "duration", + "code_bos", + "code_eos", + "tgt_text", + ], + ) + + # Sorting training data with ascending order makes the code much + # faster because we minimize zero-padding. In most of the cases, this + # does not harm the performance. + if hparams["sorting"] == "ascending": + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="duration" + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + sort_key="duration" + ) + + hparams["train_dataloader_opts"]["shuffle"] = False + hparams["valid_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="duration", reverse=True + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + sort_key="duration", reverse=True + ) + + hparams["train_dataloader_opts"]["shuffle"] = False + hparams["valid_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + hparams["train_dataloader_opts"]["shuffle"] = True + hparams["valid_dataloader_opts"]["shuffle"] = False + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + # Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams = hparams["dynamic_batch_sampler"] + num_buckets = dynamic_hparams["num_buckets"] + + train_batch_sampler = DynamicBatchSampler( + datasets["train"], + dynamic_hparams["max_batch_len"], + num_buckets=num_buckets, + length_func=lambda x: x["duration"], + shuffle=dynamic_hparams["shuffle_ex"], + batch_ordering=dynamic_hparams["batch_ordering"], + max_batch_ex=dynamic_hparams["max_batch_ex"], + ) + + return datasets, train_batch_sampler + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If distributed_launch=True then + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + sys.path.append("../") + from cvss_prepare import prepare_cvss + + sb.utils.distributed.run_on_main( + prepare_cvss, + kwargs={ + "src_data_folder": hparams["src_data_folder"], + "tgt_data_folder": hparams["tgt_data_folder"], + "save_folder": hparams["save_folder"], + "splits": hparams["splits"], + "seed": hparams["seed"], + "skip_prep": hparams["skip_prep"], + }, + ) + + from extract_code import extract_cvss + + sb.utils.distributed.run_on_main( + extract_cvss, + kwargs={ + "data_folder": hparams["save_folder"], + "splits": hparams["splits"], + "kmeans_folder": hparams["kmeans_source"], + "encoder": hparams["encoder_source"], + "layer": hparams["layer"], + "save_folder": hparams["save_folder"], + "sample_rate": hparams["sample_rate"], + "skip_extract": hparams["skip_extract"], + }, + ) + + datasets, train_bsampler = dataio_prepare(hparams) + + s2ut_brain = S2UT( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + "collate_fn": hparams["train_dataloader_opts"]["collate_fn"], + } + + s2ut_brain.fit( + s2ut_brain.hparams.epoch_counter, + datasets["train"], + datasets["valid_small"], + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + test_dataloader_opts = { + "batch_size": 1, + } + + for dataset in ["valid", "test"]: + s2ut_brain.evaluate( + datasets[dataset], + max_key="BLEU", + test_loader_kwargs=test_dataloader_opts, + ) diff --git a/recipes/CVSS/cvss_prepare.py b/recipes/CVSS/cvss_prepare.py new file mode 100644 index 0000000000..ab5231dbb4 --- /dev/null +++ b/recipes/CVSS/cvss_prepare.py @@ -0,0 +1,257 @@ +""" +CVSS data preparation. +Download: https://github.com/google-research-datasets/cvss + +Authors + * Jarod DURET 2023 +""" + +import csv +import json +import logging +import os +import pathlib as pl +import random + +import tqdm + +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.utils.logger import get_logger + +OPT_FILE = "opt_cvss_prepare.pkl" + +SRC_METADATA = "validated.tsv" +TGT_METADATA = { + "train": "train.tsv", + "valid": "dev.tsv", + "test": "test.tsv", +} + +# Need to be set according to your system +SRC_AUDIO = "clips" +TGT_AUDIO = { + "train": "train", + "valid": "dev", + "test": "test", +} + +# Number of samples for the small evaluation subset +SMALL_EVAL_SIZE = 1000 + +log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" +logging.basicConfig(format=log_format, level=logging.INFO) +logger = get_logger(__name__) + + +def prepare_cvss( + src_data_folder, + tgt_data_folder, + save_folder, + splits=["train", "valid", "test"], + seed=1234, + skip_prep=False, +): + """ + Prepares the csv files for the CVSS datasets. + + Arguments + --------- + src_data_folder : str + Path to the folder where the original source CV data is stored. + tgt_data_folder : str + Path to the folder where the original target CVSS data is stored. + save_folder : str + The directory where to store the csv files. + splits : list + List of splits to prepare. + seed : int + Random seed + skip_prep: Bool + If True, skip preparation. + + Returns + ------- + None + """ + # setting seeds for reproducible code. + random.seed(seed) + + if skip_prep: + return + + # Create configuration for easily skipping data_preparation stage + conf = { + "src_data_folder": src_data_folder, + "tgt_data_folder": tgt_data_folder, + "splits": splits, + "save_folder": save_folder, + "seed": seed, + } + + if not os.path.exists(save_folder): + os.makedirs(save_folder) + + src_validated = pl.Path(src_data_folder) / SRC_METADATA + tgt_train = pl.Path(tgt_data_folder) / TGT_METADATA["train"] + tgt_valid = pl.Path(tgt_data_folder) / TGT_METADATA["valid"] + tgt_test = pl.Path(tgt_data_folder) / TGT_METADATA["test"] + + src_audio = pl.Path(src_data_folder) / SRC_AUDIO + tgt_audio_train = pl.Path(tgt_data_folder) / TGT_AUDIO["train"] + tgt_audio_valid = pl.Path(tgt_data_folder) / TGT_AUDIO["valid"] + tgt_audio_test = pl.Path(tgt_data_folder) / TGT_AUDIO["test"] + + save_opt = pl.Path(save_folder) / OPT_FILE + save_json_train = pl.Path(save_folder) / "train.json" + save_json_valid = pl.Path(save_folder) / "valid.json" + save_json_valid_small = pl.Path(save_folder) / "valid_small.json" + save_json_test = pl.Path(save_folder) / "test.json" + + # Check if this phase is already done (if so, skip it) + if skip(splits, save_folder, conf): + logger.info("Skipping preparation, completed in previous run.") + return + + msg = "\tCreating json file for CVSS Dataset.." + logger.info(msg) + + # Prepare csv + if "train" in splits: + prepare_json( + save_json_train, + src_audio, + tgt_audio_train, + src_validated, + tgt_train, + ) + if "valid" in splits: + prepare_json( + save_json_valid, + src_audio, + tgt_audio_valid, + src_validated, + tgt_valid, + ) + prepare_json( + save_json_valid_small, + src_audio, + tgt_audio_valid, + src_validated, + tgt_valid, + limit_to_n_sample=SMALL_EVAL_SIZE, + ) + if "test" in splits: + prepare_json( + save_json_test, + src_audio, + tgt_audio_test, + src_validated, + tgt_test, + ) + + save_pkl(conf, save_opt) + + +def skip(splits, save_folder, conf): + """ + Detects if the cvss data_preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + splits: list + The dataset portions to check. + save_folder: str + The path to the location of generated files. + conf: dict + The configuration to check against the saved config. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + # Checking json files + skip = True + + split_files = { + "train": "train.json", + "valid": "valid.json", + "valid_small": "valid_small.json", + "test": "test.json", + } + + for split in splits: + if not os.path.isfile(os.path.join(save_folder, split_files[split])): + skip = False + + # Checking saved options + save_opt = os.path.join(save_folder, OPT_FILE) + if skip is True: + if os.path.isfile(save_opt): + opts_old = load_pkl(save_opt) + if opts_old == conf: + skip = True + else: + skip = False + else: + skip = False + return skip + + +def prepare_json( + json_file, + src_audio_folder, + tgt_audio_folder, + src_validated, + tgt_split, + limit_to_n_sample=None, +): + """ + Creates json file. + + """ + + json_dict = {} + tgt_meta = list( + csv.reader( + open(tgt_split, newline="", encoding="utf-8"), + delimiter="\t", + quoting=csv.QUOTE_NONE, + ) + ) + + limit_to_n_sample = ( + len(tgt_meta) if not limit_to_n_sample else limit_to_n_sample + ) + + for i in tqdm.tqdm(range(limit_to_n_sample)): + session_id = tgt_meta[i][0].split(".")[0] + + tgt_audio = f"{tgt_audio_folder}/{session_id}.mp3.wav" + src_audio = f"{src_audio_folder}/{session_id}.mp3" + + src_sig, sr = audio_io.load(src_audio) + duration = src_sig.shape[1] / sr + + # src_text = meta_dict[session_id]["sentence"] + tgt_text = tgt_meta[i][1] + + if duration < 1.5 or len(tgt_text) < 10: + continue + + json_dict[session_id] = { + "src_audio": src_audio, + "tgt_audio": tgt_audio, + "duration": duration, + # "src_text": src_text, + "tgt_text": tgt_text, + } + + # Writing the dictionary to the json file + with open(json_file, mode="w", encoding="utf-8") as json_f: + json.dump(json_dict, json_f, indent=2) + + logger.info(f"{json_file} successfully created!") diff --git a/recipes/CoVoST/AST/README.md b/recipes/CoVoST/AST/README.md new file mode 100644 index 0000000000..49d7b67acd --- /dev/null +++ b/recipes/CoVoST/AST/README.md @@ -0,0 +1,44 @@ +# CoVoST speech to text translation + +This folder contains script necessary to run automatic speech translation with the [CoVoST dataset](https://github.com/facebookresearch/covost) based on [CommonVoice](https://commonvoice.mozilla.org/en/datasets). + +Two heuristics are available: +1. Training from scratch with a conformer encoder-decoder model and multitask speech recognition plus speech translation training. +2. SpeechLLM fine-tuning based on SSL speech encoders and LLaMA large language models (with and without adapters). + +# How to run +```shell +python train{_xlsr_llama}.py hparams/{hparam_file}.py +``` + +# Data preparation +It is important to note that CommonVoice initially offers mp3 audio files. It is feasible to convert these files to .wav during data preparation, this will speed up training but also make the first data preparation to be pretty slow. Audio files are downsampled on the fly within the dataio function of the training script. + +# Languages +While CoVoST offers multiple languages, this recipe only was tested on English to German translation. However, there is nothing special to do to select another language pair aside from adding a proper text normalisation on the covost_prepary.py file. + +# Results +| Language | hyperparams file | Encoder | LLM | Test BLEU | Hugging Face link | Model link | GPUs | +| ------------- |:-------------:|:---------------------------:| -----:| -----:| -----:| -----:| -----:| +| English - German | conformer.yaml | conformer | None | 13.9 | None | None | 2x A40 | +| English - German | w2v2_llama3.yaml| wavlm-large | LLaMA 3.1 7B | 27.2 | None | None | 2x A100 | + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} diff --git a/recipes/CoVoST/AST/covost_prepare.py b/recipes/CoVoST/AST/covost_prepare.py new file mode 120000 index 0000000000..a234a0103d --- /dev/null +++ b/recipes/CoVoST/AST/covost_prepare.py @@ -0,0 +1 @@ +../covost_prepare.py \ No newline at end of file diff --git a/recipes/CoVoST/AST/extra_requirements.txt b/recipes/CoVoST/AST/extra_requirements.txt new file mode 100644 index 0000000000..454a21c035 --- /dev/null +++ b/recipes/CoVoST/AST/extra_requirements.txt @@ -0,0 +1 @@ +sacrebleu diff --git a/recipes/CoVoST/AST/hparams/conformer.yaml b/recipes/CoVoST/AST/hparams/conformer.yaml new file mode 100644 index 0000000000..a0bcb3c3ee --- /dev/null +++ b/recipes/CoVoST/AST/hparams/conformer.yaml @@ -0,0 +1,268 @@ +# ############################################################################ +# Model: E2E AST with Transformer +# Encoder: Conformer Encoder +# Decoder: Transformer Decoder +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Authors: Titouan Parcollet +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_en/ +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-4.0-en/fr +train_tsv_file: !PLACEHOLDER # Standard CoVoST .tsv files +dev_tsv_file: !PLACEHOLDER # Standard CoVoST .tsv files +test_tsv_file: !PLACEHOLDER # Standard CoVoST .tsv files +src_language: en +tgt_language: de +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv +skip_prep: False # Skip data preparation +convert_to_wav: True # Switch this to True to convert all mp3 files to wav. + +# We remove utterance slonger than 10s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 10.0 +avoid_if_shorter_than: 1.0 + +# THIS IS TERRIBLE BUT WE HAVE NO CHOICE. +# Some version of the CV dataset may contain one or two files of more than +# 2 min in the validation and or test. This is an error by design of the dataset +# as these files contain 90% of silence. We exclude them. +avoid_if_longer_than_val_test: 90.0 + +ckpt_interval_minutes: 15 # save checkpoint every N min + +####################### Training Parameters #################################### +number_of_epochs: 200 +optimizer_step_limit: 150000 +batch_size: 32 # Only used if dynamic batching is not used. +ctc_weight: 0.3 +grad_accumulation_factor: 1 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +precision: fp16 # bf16, fp16 or fp32 + +# stages related parameters +lr_adam: 0.0008 +weight_decay: 0.01 +asr_warmup_steps: !ref +warmup_steps: 20000 +augment_warmup: 25000 + +# BPE parameters +token_type: unigram # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for A40 46GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 300 +max_batch_length_val: 300 +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 8 + +test_dataloader_opts: + batch_size: 8 + + +####################### Model Parameters ########################### +# Transformer +d_model: 256 +nhead: 4 +num_encoder_layers: 12 +num_decoder_layers: 6 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 2048 +asr_output_neurons: 1024 + +# Outputs +blank_index: 0 +label_smoothing: 0.0 +pad_index: 1 +bos_index: 2 +eos_index: 3 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 5 # We do greedy here so it's faster to decode ... +test_beam_size: 80 + +############################## models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + conformer_activation: !ref + activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# We define two optimizers as we have two stages (training + finetuning) +Adam: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: True + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 3 + drop_count_high: 3 + replace: "zeros" + dim: 1 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats +bleu_computer: !name:speechbrain.utils.bleu.BLEUStats diff --git a/recipes/CoVoST/AST/hparams/w2v2_llama3.yaml b/recipes/CoVoST/AST/hparams/w2v2_llama3.yaml new file mode 100644 index 0000000000..12912719d2 --- /dev/null +++ b/recipes/CoVoST/AST/hparams/w2v2_llama3.yaml @@ -0,0 +1,246 @@ +# ############################################################################ +# Model: E2E AST with XLS-R and Llama3 +# Encoder: XLS-R +# Decoder: LLama3 +# Tokens: unigram +# losses: KLdiv (Label Smoothing loss) +# Authors: Titouan Parcollet +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/w2v2_llama/ +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-4.0-en/fr +train_tsv_file: !PLACEHOLDER # Standard CoVoST .tsv files +dev_tsv_file: !PLACEHOLDER # Standard CoVoST .tsv files +test_tsv_file: !PLACEHOLDER # Standard CoVoST .tsv files +src_language: en +tgt_language: de +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv +skip_prep: False # Skip data preparation +convert_to_wav: True # Switch this to True to convert all mp3 files to wav. + +# URL for the HuggingFace model we want to load (BASE here) +wav2vec2_hub: microsoft/wavlm-large +wav2vec2_folder: !ref /wav2vec2_checkpoint +wav2vec2_frozen: False + +# LLM options +llm_path: !PLACEHOLDER # e.g llama-2/llama-2-7b-hf +llm_prompt: " Translate English speech to German text: " +llm_emb_size: 3072 + +# We remove utterance slonger than 10s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 10.0 +avoid_if_shorter_than: 1.0 + +# THIS IS TERRIBLE BUT WE HAVE NO CHOICE. +# Some version of the CV dataset may contain one or two files of more than +# 40 sec in the validation and or test. This is an error by design of the dataset +# as these files contain 90% of silence. We exclude them. +avoid_if_longer_than_val_test: 40.0 + +ckpt_interval_minutes: 15 # save checkpoint every N min + +####################### Training Parameters #################################### +number_of_epochs: 15 +optimizer_step_limit: 80000 +batch_size: 32 # Only used if dynamic batching is off. +grad_accumulation_factor: 1 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +precision: fp16 # bf16, fp16 or fp32 + +# stages related parameters +lr_adam: 0.0005 +lr_wav2vec: 0.00002 + +weight_decay: 0.001 +warmup_steps: 5000 +augment_warmup: 7500 + +# Feature parameters +sample_rate: 16000 +downsampling_factor: 5 # Used to downsample frames before llm projection. + +# This setup works well for A100 80GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 250 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: 128256 + +valid_dataloader_opts: + batch_size: 8 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: 128256 + +test_dataloader_opts: + batch_size: 8 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: 128256 + + +####################### Model Parameters ########################### +activation: !name:torch.nn.GELU + +# Frames - LLM projector params +dnn_layers: 2 +dnn_neurons: !ref + +# Decoding parameters +valid_beam_size: 1 # We do greedy here so it's faster to decode ... +test_beam_size: 5 + + +############################## models ################################ + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + +#wav2vec model +wav2vec2: !new:speechbrain.lobes.models.huggingface_transformers.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + normalize_wav: False + +proj: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 5120] # 5 x 1024 + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +llm: !new:speechbrain.lobes.models.huggingface_transformers.llama.LLaMA + source: !ref + save_path: !ref + freeze: True + +# Simply uncomment if you want to use LoRA adaptation. +# lora_rank: 16 +# llm: !new:speechbrain.nnet.adapters.AdaptedModel +# model_to_adapt: !ref +# adapter_class: !name:speechbrain.nnet.adapters.LoRA +# all_linear: True +# adapter_kwargs: +# rank: !ref + +feat_downsampler: !new:speechbrain.lobes.downsampling.ConcatDownsampler + downsampling_factor: !ref + +modules: + wav2vec2: !ref + feat_downsampler: !ref + llm: !ref + proj: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref ] + +# We define two optimizers as we have two stages (training + finetuning) +Adam: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +Adam_wav2vec2: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +nll_loss: !name:speechbrain.nnet.losses.nll_loss + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: !ref + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 1 + +############################## Augmentations ################################### + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 3 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 1 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref ] + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + proj: !ref + noam_scheduler: !ref + lr_annealing_wav2vec: !ref + counter: !ref + wav2vec2: !ref + llm: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats +bleu_computer: !name:speechbrain.utils.bleu.BLEUStats diff --git a/recipes/CoVoST/AST/train.py b/recipes/CoVoST/AST/train.py new file mode 100644 index 0000000000..61c20a417a --- /dev/null +++ b/recipes/CoVoST/AST/train.py @@ -0,0 +1,498 @@ +#!/usr/bin/env python3 +"""Recipe for training a Transformer AST system with CoVoST +The system employs an encoder, a decoder, and an attention mechanism +between them. An additional CTC loss can be used for warmup with an ASR task. + +To run this recipe, do the following: +> python train.py hparams/conformer_large.yaml + +Author +------ + * Titouan Parcollet 2025 +""" + +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml +from sacremoses import MosesDetokenizer + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class AST(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_bos, _ = batch.tokens_bos + + # compute features + feats = self.hparams.compute_features(wavs) + feats = self.hparams.normalize(feats, wav_lens) + + # Add feature augmentation if specified. + if ( + stage == sb.Stage.TRAIN + and hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + feats, _ = self.hparams.fea_augment(feats, wav_lens) + + # forward modules + src = self.modules.CNN(feats) + enc_out, pred = self.modules.Transformer( + src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index + ) + + # output layer for ctc ASR log-probabilities if warming up + if self.optimizer_step < self.hparams.asr_warmup_steps: + logits = self.modules.ctc_lin(enc_out) + p_ctc = self.hparams.log_softmax(logits) + else: + p_ctc = None + + # output layer for seq2seq log-probabilities + pred = self.modules.seq_lin(pred) + p_seq = self.hparams.log_softmax(pred) + + # Compute outputs + hyps = None + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if is_valid_search: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + + elif is_test_search: + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + + return p_ctc, p_seq, wav_lens, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + (p_ctc, p_seq, wav_lens, predicted_tokens) = predictions + + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + tokens_asr, tokens_asr_lens = batch.tokens_asr + + loss_seq = self.hparams.seq_cost( + p_seq, tokens_eos, length=tokens_eos_lens + ) + + # ASR warmup with CTC if specified. + if self.optimizer_step < self.hparams.asr_warmup_steps: + loss_ctc = self.hparams.ctc_cost( + p_ctc, tokens_asr, wav_lens, tokens_asr_lens + ) + loss = ( + self.hparams.ctc_weight * loss_ctc + + (1 - self.hparams.ctc_weight) * loss_seq + ) + else: + loss = loss_seq + + if stage != sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if current_epoch % valid_search_interval == 0 or ( + stage == sb.Stage.TEST + ): + predictions = [ + self.tgt_detokenizer.detokenize( + self.tokenizer.sp.decode_ids(utt_seq).split(" ") + ) + for utt_seq in predicted_tokens + ] + + detokenized_translation = [ + self.tgt_detokenizer.detokenize(translation.split(" ")) + for translation in batch.translation + ] + + # it needs to be a list of list due to the extend on the bleu implementation + targets = [detokenized_translation] + + self.bleu_metric.append(ids, predictions, targets) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + + return loss + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + + self.tgt_detokenizer = MosesDetokenizer(lang=self.hparams.tgt_language) + + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.bleu_metric = self.hparams.bleu_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + or stage == sb.Stage.TEST + ): + stage_stats["BLEU"] = self.bleu_metric.summarize(field="BLEU") + stage_stats["BLEU_extensive"] = self.bleu_metric.summarize() + stage_stats["ACC"] = self.acc_metric.summarize() + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + # report different epoch stages according current stage + current_epoch = self.hparams.epoch_counter.current + lr = self.hparams.noam_annealing.current_lr + steps = self.hparams.noam_annealing.n_steps + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"ACC": stage_stats["ACC"], "epoch": epoch}, + max_keys=["ACC"], + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + + +# Define custom data procedure +def dataio_prepare(hparams, tokenizer_ast, tokenizer_asr): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # 1. Define datasets + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + train_data = train_data.filtered_sorted( + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_shorter_than"]}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + # We also sort the validation data so it is faster to validate + valid_data = valid_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than_val_test"]}, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, + ) + + # We also sort the validation data so it is faster to validate + test_data = test_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than_val_test"]}, + ) + + datasets = [train_data, valid_data, test_data] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + info = audio_io.info(wav) + sig = sb.dataio.dataio.read_audio(wav) + resampled = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(sig) + return resampled + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define AST and ASR text pipelines: + @sb.utils.data_pipeline.takes("translation") + @sb.utils.data_pipeline.provides( + "translation", + "tokens_list", + "tokens_bos", + "tokens_eos", + "tokens", + ) + def st_text_pipeline(translation): + yield translation + tokens_list = tokenizer_ast.sp.encode_as_ids(translation) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + @sb.utils.data_pipeline.takes("transcription") + @sb.utils.data_pipeline.provides( + "transcription", + "tokens_asr_list", + "tokens_asr_bos", + "tokens_asr_eos", + "tokens_asr", + ) + def asr_text_pipeline(transcription): + yield transcription + tokens_asr_list = tokenizer_asr.sp.encode_as_ids(transcription) + yield tokens_asr_list + tokens_asr_bos = torch.LongTensor( + [hparams["bos_index"]] + (tokens_asr_list) + ) + yield tokens_asr_bos + tokens_asr_eos = torch.LongTensor( + tokens_asr_list + [hparams["eos_index"]] + ) + yield tokens_asr_eos + tokens_asr = torch.LongTensor(tokens_asr_list) + yield tokens_asr + + sb.dataio.dataset.add_dynamic_item(datasets, st_text_pipeline) + sb.dataio.dataset.add_dynamic_item(datasets, asr_text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + [ + "id", + "sig", + "translation", + "tokens_bos", + "tokens_eos", + "tokens", + "tokens_asr_bos", + "tokens_asr_eos", + "tokens_asr", + ], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_data, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Dataset preparation (parsing CommonVoice) + from covost_prepare import prepare_covost # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Due to DDP, we do the preparation ONLY on the main python process + run_on_main( + prepare_covost, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "train_tsv_file": hparams["train_tsv_file"], + "dev_tsv_file": hparams["dev_tsv_file"], + "test_tsv_file": hparams["test_tsv_file"], + "src_language": hparams["src_language"], + "tgt_language": hparams["tgt_language"], + "skip_prep": hparams["skip_prep"], + "convert_to_wav": hparams["convert_to_wav"], + }, + ) + + # Defining tokenizer and loading it + tokenizer_ast = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["output_neurons"], + annotation_train=hparams["train_csv"], + annotation_read="translation", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], + text_file="target", + ) + + tokenizer_asr = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["asr_output_neurons"], + annotation_train=hparams["train_csv"], + annotation_read="transcription", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], + text_file="source", + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_data, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams, tokenizer_ast, tokenizer_asr) + + # Trainer initialization + ast_brain = AST( + modules=hparams["modules"], + opt_class=hparams["Adam"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # adding objects to trainer: + ast_brain.tokenizer = tokenizer_ast + + # Manage dynamic batching + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + + # Training + ast_brain.fit( + ast_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + + ast_brain.evaluate( + valid_data, + test_loader_kwargs=hparams["test_dataloader_opts"], + ) + + ast_brain.evaluate( + test_data, + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/CoVoST/AST/train_w2v2_llama.py b/recipes/CoVoST/AST/train_w2v2_llama.py new file mode 100644 index 0000000000..c427a3432e --- /dev/null +++ b/recipes/CoVoST/AST/train_w2v2_llama.py @@ -0,0 +1,544 @@ +#!/usr/bin/env python3 +"""Recipe for training a wavlm-large plus LLaMA speech translation system on CoVoST. +The system employs a wavlm-large encoder and a LLaMA decoder. +A simple projection concatenating frames is trained between wavlm-large and LLaMA. + +Author +------ + * Titouan Parcollet 2025 +""" + +import sys + +import torch +import torchaudio +import transformers +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import length_to_mask +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class AST(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_prompt_translation, tokens_prompt_translation_len = ( + batch.tokens_prompt_translation + ) # Includes prompt and translation + prompt_len = batch.prompt_len + + # Turn padding in the speech to zero. We need to do this in case of leaks, + # because LLAMA padding is using int of value 120k+ which will corrupt A LOT the signal in case of leak. + audio_len = wavs.shape[1] + abs_len = torch.round(wav_lens * audio_len) + audio_attn_mask = length_to_mask(abs_len) + wavs = wavs * audio_attn_mask + + # Add waveform augmentation if specified. + if ( + stage == sb.Stage.TRAIN + and hasattr(self.hparams, "wav_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + + wavs = self.hparams.normalize(wavs, wav_lens) + + # Forward Speech Modules + feats = self.modules.wav2vec2(wavs, wav_lens) + down_feats = self.modules.feat_downsampler(feats) + down_feats_proj = self.modules.proj(down_feats) + + # Format input for LLM; [ audio emb ] + [ prompt emb ] + [ translation emb ] + # First get relevant lengths + audio_len = down_feats_proj.shape[1] + text_len = tokens_prompt_translation.shape[1] + audio_prompt_len = (audio_len + prompt_len)[0] + + # Then for the full embedding prompt sequence + if hasattr(self.modules.llm, "module"): + embeddings = self.modules.llm.module.model.get_input_embeddings() + else: + embeddings = self.modules.llm.model.get_input_embeddings() + + inputs_embeds = torch.cat( + (down_feats_proj, embeddings(tokens_prompt_translation)), dim=1 + ) + + # Prepare attn_mask for audio and text and combine them. + # This is not streaming compatible. + # For HF to work, masked frames should be 0. + text_abs_len = torch.round(tokens_prompt_translation_len * text_len) + abs_len = torch.round(wav_lens * audio_len) + audio_attn_mask = length_to_mask(abs_len) + text_attn_mask = length_to_mask(text_abs_len) + attn_mask = torch.cat([audio_attn_mask, text_attn_mask], dim=-1) + + # LLM forward + llm_logits = self.modules.llm( + inputs_embeds=inputs_embeds, attention_mask=attn_mask + ).logits + + # output layer for seq2seq log-probabilities + p_seq = self.hparams.log_softmax(llm_logits) + + if hasattr(self.modules.llm, "module"): + gen_func = self.modules.llm.module.model.generate + else: + gen_func = self.modules.llm.model.generate + + # Running decoding if not training + if stage == sb.Stage.TRAIN: + hyps = None + + elif stage == sb.Stage.VALID: + hyps = gen_func( + inputs_embeds=inputs_embeds[ + :, :audio_prompt_len + ], # give model audio features and prompt for inference + attention_mask=attn_mask[:, :audio_prompt_len], + generation_config=self.val_decoding_config, + ) + elif stage == sb.Stage.TEST: + hyps = gen_func( + inputs_embeds=inputs_embeds[ + :, :audio_prompt_len + ], # give model audio features and prompt for inference + attention_mask=attn_mask[:, :audio_prompt_len], + generation_config=self.test_decoding_config, + ) + + return p_seq, wav_lens, hyps, audio_prompt_len + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + ( + p_seq, + wav_lens, + predicted_tokens, + audio_prompt_len, + ) = predictions + + ids = batch.id + tokens_translation, tokens_translation_len = batch.tokens_translation + + # Translation loss + # We are only interested in computing the loss over the logits after + # the audio + prompt embeddings. Tokens_translation does not start with bos, + # so we just need to make sure to shift the logits to the last token of the prompt + # (to ensure next word prediction) + p_seq_translation_only = p_seq[:, audio_prompt_len - 1 :] + + loss = self.hparams.nll_loss( + p_seq_translation_only, + tokens_translation, + length=tokens_translation_len, + ) + + if stage != sb.Stage.TRAIN: + # Removing the eos + predictions = self.tokenizer.batch_decode(predicted_tokens) + targets = self.tokenizer.batch_decode(tokens_translation) + predictions = remove_after_eos(predictions) + targets = remove_after_eos(targets) + + self.bleu_metric.append(ids, predictions, [targets]) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append( + p_seq_translation_only, + tokens_translation, + tokens_translation_len, + ) + + return loss + + def init_optimizers(self): + self.optimizer = self.hparams.Adam(self.hparams.model.parameters()) + + self.optimizers_dict = {"model_optimizer": self.optimizer} + + # Initializes the wav2vec2 optimizer if the model is not wav2vec2_frozen + if not self.hparams.wav2vec2_frozen: + self.wav2vec_optimizer = self.hparams.Adam_wav2vec2( + self.modules.wav2vec2.parameters() + ) + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + + # Define generation config depending on runtime values + self.val_decoding_config = transformers.GenerationConfig( + num_beams=self.hparams.valid_beam_size, + pad_token_id=self.tokenizer.pad_token_id, + eos_token_id=self.tokenizer.eos_token_id, + max_new_tokens=500, + ) + + # Define generation config depending on runtime values + self.test_decoding_config = transformers.GenerationConfig( + num_beams=self.hparams.test_beam_size, + pad_token_id=self.tokenizer.pad_token_id, + eos_token_id=self.tokenizer.eos_token_id, + max_new_tokens=500, + ) + + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.bleu_metric = self.hparams.bleu_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["BLEU"] = self.bleu_metric.summarize(field="BLEU") + stage_stats["BLEU_extensive"] = self.bleu_metric.summarize() + stage_stats["ACC"] = self.acc_metric.summarize() + + if ( + self.optimizer_step > self.hparams.warmup_steps + and not self.hparams.wav2vec2_frozen + ): + ( + old_lr_wav2vec, + new_lr_wav2vec, + ) = self.hparams.lr_annealing_wav2vec(stage_stats["ACC"]) + sb.nnet.schedulers.update_learning_rate( + self.wav2vec_optimizer, new_lr_wav2vec + ) + else: + old_lr_wav2vec = ( + self.hparams.lr_annealing_wav2vec.hyperparam_value + ) + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + # report different epoch stages according current stage + lr = self.hparams.noam_annealing.current_lr + steps = self.hparams.noam_annealing.n_steps + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "lr_wav2vec": old_lr_wav2vec, + "steps": steps, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"ACC": stage_stats["ACC"], "epoch": epoch}, + max_keys=["ACC"], + num_to_keep=3, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + + +def remove_after_eos(list_of_str, eos_wrd="<|end_of_text|>"): + """Remove all the text after EOS to obtain the clean translation. Receives a list of string e.g. ['the cat<|end_of_text|>[PAD]']""" + cleaned = [] + for line in list_of_str: + index = line.find(eos_wrd) + if index != -1: + cleaned.append(line[:index]) + else: + cleaned.append(line) + return cleaned + + +# Define custom data procedure +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # 1. Define datasets + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + train_data = train_data.filtered_sorted( + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_shorter_than"]}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + # We also sort the validation data so it is faster to validate + valid_data = valid_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than_val_test"]}, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, + ) + + # We also sort the validation data so it is faster to validate + test_data = test_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than_val_test"]}, + ) + + datasets = [train_data, valid_data, test_data] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + info = audio_io.info(wav) + sig = sb.dataio.dataio.read_audio(wav) + resampled = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(sig) + return resampled + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + + # Get the prompt from yaml and tokenize it + prompt = hparams["llm_prompt"] + logger.info(f"Using the following prompt: {repr(prompt)}") + + # Don't add EOS after prompt, only add EOS after transcripts + # Always manually add eos and bos because HF is not consistent. + eos_token_id = torch.LongTensor([tokenizer.eos_token_id]) + bos_token_id = torch.LongTensor([tokenizer.bos_token_id]) + + prompt_ids = tokenizer( + prompt, return_tensors="pt", add_special_tokens=False + ).input_ids.squeeze() + + prompt_ids = torch.cat([prompt_ids, bos_token_id]) + + # We want BOS + prompt + translation + EOS + @sb.utils.data_pipeline.takes("translation") + @sb.utils.data_pipeline.provides( + "translation", + "tokens_translation", + "tokens_prompt_translation", + "prompt_len", + ) + def st_text_pipeline(translation): + yield translation + tokens_translation = tokenizer( + translation, return_tensors="pt", add_special_tokens=False + ).input_ids.squeeze() + no_eos_trans = tokens_translation + tokens_translation = torch.cat([tokens_translation, eos_token_id]) + yield tokens_translation + tokens_prompt_translation = torch.cat((prompt_ids, no_eos_trans)) + yield tokens_prompt_translation + prompt_len = prompt_ids.size(0) + yield prompt_len + + sb.dataio.dataset.add_dynamic_item(datasets, st_text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + [ + "id", + "sig", + "translation", + "tokens_translation", + "tokens_prompt_translation", + "prompt_len", + ], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_data, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Dataset preparation (parsing CommonVoice) + from covost_prepare import prepare_covost # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Due to DDP, we do the preparation ONLY on the main python process + run_on_main( + prepare_covost, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "train_tsv_file": hparams["train_tsv_file"], + "dev_tsv_file": hparams["dev_tsv_file"], + "test_tsv_file": hparams["test_tsv_file"], + "src_language": hparams["src_language"], + "tgt_language": hparams["tgt_language"], + "skip_prep": hparams["skip_prep"], + "convert_to_wav": hparams["convert_to_wav"], + }, + ) + + # Defining tokenizer and loading it + tokenizer = hparams["modules"]["llm"].tokenizer + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_data, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams, tokenizer) + + # Trainer initialization + ast_brain = AST( + modules=hparams["modules"], + opt_class=hparams["Adam"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # adding objects to trainer: + ast_brain.tokenizer = tokenizer + + # Manage dynamic batching + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + test_dataloader_opts = hparams["test_dataloader_opts"] + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + + # Training + ast_brain.fit( + ast_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + ast_brain.evaluate( + valid_data, + max_key="ACC", + test_loader_kwargs=test_dataloader_opts, + ) + + ast_brain.evaluate( + test_data, + max_key="ACC", + test_loader_kwargs=test_dataloader_opts, + ) diff --git a/recipes/CoVoST/covost_prepare.py b/recipes/CoVoST/covost_prepare.py new file mode 100644 index 0000000000..70a1be7df6 --- /dev/null +++ b/recipes/CoVoST/covost_prepare.py @@ -0,0 +1,496 @@ +""" +Data preparation for the CoVoST dataset. This is heavily inspired +by the CommonVoice data preparation. + +GitHub: https://github.com/facebookresearch/covost +Download: https://commonvoice.mozilla.org/en/datasets + +Author +------ + * Titouan Parcollet 2025 +""" + +import csv +import functools +import os +import re +from dataclasses import dataclass + +from speechbrain.dataio.dataio import read_audio_info +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + +SAMPLING_RATE = 16000 +VERBOSE = False + + +def prepare_covost( + data_folder, + save_folder, + train_tsv_file, + dev_tsv_file, + test_tsv_file, + src_language="en", + tgt_language="de", + skip_prep=False, + convert_to_wav=True, +): + """ + Prepares the csv files for the CoVoST dataset. + GitHub: https://github.com/facebookresearch/covost + Download: https://commonvoice.mozilla.org/en + + Arguments + --------- + data_folder : str + Path to the folder where the original Common Voice dataset is stored. + This path should include the lang: /datasets/CommonVoice// + save_folder : str + The directory where to store the csv files. + train_tsv_file : str, optional + Path to the Train Common Voice .tsv file (cs) + dev_tsv_file : str, optional + Path to the Dev Common Voice .tsv file (cs) + test_tsv_file : str, optional + Path to the Test Common Voice .tsv file (cs) + src_language: str, (default 'en') + Specify the source language for text normalization. + tgt_language: str, (default 'de') + Specify the target language for text normalization. + skip_prep: bool + If True, skip data preparation. + convert_to_wav: bool + If True, `.mp3` files are converted (duplicated) to uncompressed `.wav`. + Uncompressed `wav`s can be much faster to decode than MP3, at the cost + of much higher disk usage and bandwidth. This might be useful if you are + CPU-limited in workers during training. + This invokes the `ffmpeg` commandline, so ffmpeg must be installed. + + Returns + ------- + None + + Example + ------- + >>> from recipes.CoVoST.covost_prepare import prepare_covost + >>> data_folder = '/datasets/CommonVoice/en' + >>> save_folder = 'exp/CommonVoice_exp' + >>> train_tsv_file = '/datasets/CommonVoice/en/train.tsv' + >>> dev_tsv_file = '/datasets/CommonVoice/en/dev.tsv' + >>> test_tsv_file = '/datasets/CommonVoice/en/test.tsv' + >>> prepare_common_voice( \ + data_folder, \ + save_folder, \ + train_tsv_file, \ + dev_tsv_file, \ + test_tsv_file, \ + ) + """ + + if skip_prep: + return + + # Setting the save folder + os.makedirs(save_folder, exist_ok=True) + + # Setting output files + save_csv_train = save_folder + "/train.csv" + save_csv_dev = save_folder + "/dev.csv" + save_csv_test = save_folder + "/test.csv" + + # If csv already exists, we skip the data preparation + if skip(save_csv_train, save_csv_dev, save_csv_test): + msg = "%s already exists, skipping data preparation!" % (save_csv_train) + logger.info(msg) + + msg = "%s already exists, skipping data preparation!" % (save_csv_dev) + logger.info(msg) + + msg = "%s already exists, skipping data preparation!" % (save_csv_test) + logger.info(msg) + + return + + # Creating csv files for {train, dev, test} data + file_pairs = zip( + [train_tsv_file, dev_tsv_file, test_tsv_file], + [save_csv_train, save_csv_dev, save_csv_test], + ) + for tsv_file, save_csv in file_pairs: + create_csv( + convert_to_wav, + tsv_file, + save_csv, + data_folder, + src_language, + tgt_language, + ) + + +def create_csv( + convert_to_wav, + orig_tsv_file, + csv_file, + data_folder, + src_language="en", + tgt_language="de", +): + """ + Creates the csv file given a list of wav files. + + Arguments + --------- + convert_to_wav : bool + If True, `.mp3` files are converted (duplicated) to uncompressed `.wav`. + Uncompressed `wav`s can be much faster to decode than MP3, at the cost + of much higher disk usage and bandwidth. This might be useful if you are + CPU-limited in workers during training. + This invokes the `ffmpeg` commandline, so ffmpeg must be installed. + orig_tsv_file : str + Path to the Common Voice tsv file (standard file). + csv_file : str + New csv file to be generated. + data_folder : str + Path of the CommonVoice dataset. + src_language : str, (default 'en') + Source language code, e.g. "en". + tgt_language : str, (default 'de') + Target language code, e.g. "en". + """ + + # Check if the given files exists + if not os.path.isfile(orig_tsv_file): + msg = "\t%s doesn't exist, verify your dataset!" % (orig_tsv_file) + logger.info(msg) + raise FileNotFoundError(msg) + + # We load and skip the header + csv_lines = open(orig_tsv_file, encoding="utf-8").readlines() + csv_data_lines = csv_lines[1:] + nb_samples = len(csv_data_lines) + + msg = "Preparing CSV files for %s samples ..." % (str(nb_samples)) + logger.info(msg) + + # Adding some Prints + msg = "Creating csv lists in %s ..." % (csv_file) + logger.info(msg) + + # Process and write lines + total_duration = 0.0 + + line_processor = functools.partial( + process_line, + convert_to_wav=convert_to_wav, + data_folder=data_folder, + src_language=src_language, + tgt_language=tgt_language, + ) + + # Stream into a .tmp file, and rename it to the real path at the end. + csv_file_tmp = csv_file + ".tmp" + + with open(csv_file_tmp, mode="w", newline="", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + csv_writer.writerow( + ["ID", "duration", "wav", "transcription", "translation"] + ) + + for row in parallel_map(line_processor, csv_data_lines): + if row is None: + continue + + total_duration += row.duration + csv_writer.writerow( + [ + row.snt_id, + str(row.duration), + row.audio_path, + row.transcription, + row.translation, + ] + ) + + os.replace(csv_file_tmp, csv_file) + + # Final prints + msg = "%s successfully created!" % (csv_file) + logger.info(msg) + msg = "Number of samples: %s " % (str(len(csv_data_lines))) + logger.info(msg) + print(msg) + msg = "Total duration: %s Hours" % (str(round(total_duration / 3600, 2))) + print(msg) + logger.info(msg) + + +@dataclass +class CoVoSTRow: + snt_id: str + duration: float + audio_path: str + transcription: str + translation: str + + +def process_line(line, convert_to_wav, data_folder, src_language, tgt_language): + """Process a line of CoVoST tsv file. + + Arguments + --------- + line : str + A line of the CoVoST tsv file. + convert_to_wav : bool + If True, `.mp3` files are converted (duplicated) to uncompressed `.wav`. + Uncompressed `wav`s can be much faster to decode than MP3, at the cost + of much higher disk usage and bandwidth. This might be useful if you are + CPU-limited in workers during training. + This invokes the `ffmpeg` commandline, so ffmpeg must be installed. + data_folder : str + Path to the CommonVoice dataset. + src_language : str + Source language code, e.g. "en" + tgt_language : str + Target language code, e.g. "en" + + Returns + ------- + CVRow + A dataclass containing the information about the line. + """ + + columns = line.strip().split("\t") + audio_path_filename = columns[0] + transcription = str(columns[1]) + translation = columns[2] + + if src_language == "en": + # Corrupted files in english. + if audio_path_filename in [ + "common_voice_fr_19528232.mp3", + "common_voice_fr_19528233.mp3", + "common_voice_en_19817845.mp3", + "common_voice_en_19504777.mp3", + ]: + return None + + # Path is at indice 1 in Common Voice tsv files. And .mp3 files + # are located in datasets/lang/clips/ + audio_path = data_folder + "/clips/" + audio_path_filename + + if convert_to_wav: + audio_path = convert_mp3_to_wav(audio_path) + + file_name = audio_path.split(".")[-2].split("/")[-1] + snt_id = file_name + + # Reading the signal (to retrieve duration in seconds) + if os.path.isfile(audio_path): + info = read_audio_info(audio_path) + else: + msg = "\tError loading: %s" % (str(len(file_name))) + logger.info(msg) + return None + + duration = info.num_frames / info.sample_rate + + # Getting transcript + # !! Language specific cleaning !! + transcription = language_specific_preprocess(src_language, transcription) + translation = language_specific_preprocess(tgt_language, translation) + + if transcription is None or translation is None: + return None + elif len(transcription.split(" ")) < 4 or len(translation.split(" ")) < 4: + return None + + # Composition of the csv_line + return CoVoSTRow(snt_id, duration, audio_path, transcription, translation) + + +def skip(save_csv_train, save_csv_dev, save_csv_test): + """ + Detects if the CoVoST data preparation has been already done. + If the preparation has been done, we can skip it. + + Parameters + ---------- + save_csv_train : str + Path to train csv file. + + save_csv_dev : str + Path to dev csv file. + save_csv_test : str + Path to test csv file. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + + # Checking folders and save options + skip = False + + if ( + os.path.isfile(save_csv_train) + and os.path.isfile(save_csv_dev) + and os.path.isfile(save_csv_test) + ): + skip = True + + return skip + + +def language_specific_preprocess(language, sentence): + """ + Preprocess text based on language. This must be extended with + other languages if needed. + + Parameters + ---------- + language : str + The code of the language to use for normalisation. E.g. "en", "de". + sentence : str + The string to normalise. + Returns + ------- + str + The normalised sentence. Returns None if it was not possible to + normalise the sentence. + + """ + + STOP_ACCENTED_CHAR_LANGUAGES = ["en", "de"] + + if language == "en": + final_characters = set(" abcdefghijklmnopqrstuvwxyz1234567890-&'") + if language == "de": + final_characters = set( + " abcdefghijklmnopqrstuvwxyz1234567890-&ÄäÖöÜüẞß'" + ) + else: # Default to english set. + final_characters = set(" abcdefghijklmnopqrstuvwxyz1234567890-&'") + + if language in STOP_ACCENTED_CHAR_LANGUAGES: + if language == "en": + stop_characters = ( + "[" + "áÁàăâåäÄãÃāảạæćčČçÇðéÉèÈêěëęēəğíîÎïīịıłṃńňñóÓòôőõøØōŌœŒřšŠşșȘúÚûūụýžþ" + # Suggests the sentence is not English but German. + "öÖßüÜ" + # All sorts of languages: Greek, Arabic... + "\u0370-\u1aaf" + # Chinese/Japanese/Korean. + "\u4e00-\u9fff" + # Technical symbols. + "\u2190-\u23ff" + # Symbols that could be pronounced in various ways. + "]" + ) + elif language == "de": + stop_characters = ( + "[" + "áÁàăâåãÃāảạæćčČçÇðéÉèÈêěëęēəğíîÎïīịıłṃńňñóÓòôőõøØōŌœŒřšŠşșȘúÚûūụýžþ" + # All sorts of languages: Greek, Arabic... + "\u0370-\u1aaf" + # Chinese/Japanese/Korean. + "\u4e00-\u9fff" + # Technical symbols. + "\u2190-\u23ff" + # Symbols that could be pronounced in various ways. + "]" + ) + else: + stop_characters = ( + "[" + "áÁàăâåãÃāảạæćčČçÇðéÉèÈêěëęēəğíîÎïīịıłṃńňñóÓòôőõøØōŌœŒřšŠşșȘúÚûūụýžþ" + # All sorts of languages: Greek, Arabic... + "\u0370-\u1aaf" + # Chinese/Japanese/Korean. + "\u4e00-\u9fff" + # Technical symbols. + "\u2190-\u23ff" + # Symbols that could be pronounced in various ways. + "]" + ) + + if re.search(stop_characters, sentence) is not None: + return None + + # These characters mark word boundaries. + split_character_regex = '[ ",:;!?¡\\.…()\\-—–‑_“”„/«»]' + + # These could all be used as apostrophes in the middle of words. + # If at the start or end of a word, they will be removed. + apostrophes_or_quotes = "['`´ʻ‘’]" + + # Some punctuation that indicates a word boundary. + words_split = re.split(split_character_regex, sentence) + words_quotes = [ + # Use ' as apostrophe. + # Remove apostrophes at the start and end of words (probably quotes). + # Word-internal apostrophes, even where rotated, are retained. + re.sub(apostrophes_or_quotes, "'", word).strip("'") + for word in words_split + ] + + # Processing that does not change the length. + words_lower = [word.lower() for word in words_quotes] + + words_mapped = [ + # word.translate(character_mapping) + word + for word in words_lower + # Previous processing may have reduced words to nothing. + # Remove them. + if word != "" + ] + + # removing empty strings + words_mapped = [x for x in words_mapped if x.strip()] + result = " ".join(words_mapped).rstrip() + + character_set = set(result) + + if not character_set <= final_characters: + return None + else: + return result + + +def convert_mp3_to_wav(audio_mp3_path): + """Convert an mp3 file to a wav file. + + Parameters + ---------- + audio_mp3_path : str + The path to the opus file to be converted. + + Returns + ------- + str + The path to the converted wav file. + + Raises + ------ + subprocess.CalledProcessError + If the conversion process fails. + """ + audio_wav_path = audio_mp3_path.replace(".mp3", ".wav") + if not os.path.isfile(audio_wav_path): + if VERBOSE: + os.system( + f"ffmpeg -y -i {audio_mp3_path} -ac 1 -ar {SAMPLING_RATE} {audio_wav_path}" + ) + else: + os.system( + f"ffmpeg -y -i {audio_mp3_path} -ac 1 -ar {SAMPLING_RATE} {audio_wav_path} > /dev/null 2>&1" + ) + return audio_wav_path diff --git a/recipes/CommonLanguage/README.md b/recipes/CommonLanguage/README.md index 08ced044a5..68fc08296a 100644 --- a/recipes/CommonLanguage/README.md +++ b/recipes/CommonLanguage/README.md @@ -58,7 +58,7 @@ This dataset is composed of speakers of 45 languages that were carefully selecte * Tamil * Tatar * Turkish -* Ukranian +* Ukrainian * Welsh ## Other information diff --git a/recipes/CommonLanguage/common_language_prepare.py b/recipes/CommonLanguage/common_language_prepare.py index 4f9d805a4d..daad58ef4a 100644 --- a/recipes/CommonLanguage/common_language_prepare.py +++ b/recipes/CommonLanguage/common_language_prepare.py @@ -1,5 +1,5 @@ """ -Data preparation of CommonLangauge dataset for LID. +Data preparation of CommonLanguage dataset for LID. Download: https://zenodo.org/record/5036977#.YNo1mHVKg5k @@ -8,14 +8,16 @@ Pavlo Ruban 2021 """ -import os import csv -import logging -import torchaudio +import os + from tqdm.contrib import tzip + +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) LANGUAGES = [ @@ -62,7 +64,7 @@ "Tamil", "Tatar", "Turkish", - "Ukranian", + "Ukrainian", "Welsh", ] @@ -70,7 +72,7 @@ def prepare_common_language(data_folder, save_folder, skip_prep=False): """ Prepares the csv files for the CommonLanguage dataset for LID. - Download: https://drive.google.com/uc?id=1Vzgod6NEYO1oZoz_EcgpZkUO9ohQcO1F + Download: https://www.dropbox.com/s/qqpmqay3q9xb1vf/common_voice_kpd.tar.gz?dl=0 Arguments --------- @@ -79,11 +81,13 @@ def prepare_common_language(data_folder, save_folder, skip_prep=False): This path should include the multi: /datasets/CommonLanguage save_folder : str The directory where to store the csv files. - max_duration : int, optional - Max duration (in seconds) of training uterances. skip_prep: bool If True, skip data preparation. + Returns + ------- + None + Example ------- >>> from recipes.CommonLanguage.common_language_prepare import prepare_common_language @@ -102,7 +106,7 @@ def prepare_common_language(data_folder, save_folder, skip_prep=False): # Setting the save folder os.makedirs(save_folder, exist_ok=True) - # Setting ouput files + # Setting output files save_csv_train = os.path.join(save_folder, "train.csv") save_csv_dev = os.path.join(save_folder, "dev.csv") save_csv_test = os.path.join(save_folder, "test.csv") @@ -140,6 +144,15 @@ def skip(save_csv_train, save_csv_dev, save_csv_test): If the preparation has been done, we can skip it. + Arguments + --------- + save_csv_train : str + The train csv file + save_csv_dev : str + The dev csv file + save_csv_test : str + The test csv file + Returns ------- bool @@ -165,8 +178,8 @@ def create_sets(data_folder, extension): --------- data_folder : str Path of the CommonLanguage dataset. - extension: list of file extentions - List of strings with file extentions that correspond to the audio files + extension: list of file extensions + List of strings with file extensions that correspond to the audio files in the CommonLanguage dataset Returns @@ -234,7 +247,7 @@ def create_csv(wav_list, csv_file): # Peeking at the signal (to retrieve duration in seconds) if os.path.isfile(wav_file): - info = torchaudio.info(wav_file) + info = audio_io.info(wav_file) else: msg = "\tError loading: %s" % (str(len(file_name))) logger.info(msg) @@ -246,7 +259,7 @@ def create_csv(wav_list, csv_file): # Actual name of the language language = path_parts[-4] - # Create a row with whole utterences + # Create a row with whole utterances csv_line = [ idx, # ID wav_file, # File name @@ -264,7 +277,7 @@ def create_csv(wav_list, csv_file): # CSV column titles csv_header = ["ID", "wav", "wav_format", "duration", "language"] - # Add titles to the list at indexx 0 + # Add titles to the list at index 0 csv_lines.insert(0, csv_header) # Writing the csv lines @@ -277,7 +290,7 @@ def create_csv(wav_list, csv_file): csv_writer.writerow(line) # Final prints - msg = f"{csv_file} sucessfully created!" + msg = f"{csv_file} successfully created!" logger.info(msg) msg = f"Number of samples: {len(wav_list)}." logger.info(msg) @@ -291,9 +304,10 @@ def check_common_language_folder(data_folder): If not, raises an error. - Returns - ------- - None + Arguments + --------- + data_folder : str + The path to the folder containing the data. Raises ------ diff --git a/recipes/CommonLanguage/lang_id/README.md b/recipes/CommonLanguage/lang_id/README.md index 34dd784878..21c2ca750e 100644 --- a/recipes/CommonLanguage/lang_id/README.md +++ b/recipes/CommonLanguage/lang_id/README.md @@ -9,12 +9,12 @@ Similar to the X-Vector a bigger and more powerful ECAPA-TDNN model can be used. The experiment is also fine-tuning of the trained speaker embeddings done for Speaker Identification task on VoxCeleb, and can be accessed on [HuggingFace](https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb). Therefore, most of the architecture choices come from that task. Data augmentation and environmental corruption are done by concatenating waveforms, dropout, speed change, reverberation, noise, and noise+rev. The batch is double size of the original one. This may lead to -better performance, at the cost of longer training time and higher compute resourses. +better performance, at the cost of longer training time and higher compute resources. # Performance | Release | hyperparams file | Val. Err | Test Err | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| :-----------:| -| 21-06-28 | train.yaml | 13. 5 | 15.1 | https://drive.google.com/drive/folders/1btxc_H27AP_f6u4X47FM0LSteUdzhfFR?usp=sharing | 1xV100 16GB | +| 21-06-28 | train.yaml | 13. 5 | 15.1 | https://www.dropbox.com/sh/1fxpzyv67ouwd2c/AAAeMUWYP2f1ycpE1Lp1CwEla?dl=0 | 1xV100 16GB | Each epoch takes approximately 14 minutes on an NVIDIA V100. @@ -26,7 +26,7 @@ Basically, you can run inference with only few lines of code: ```python import torchaudio -from speechbrain.pretrained import EncoderClassifier +from speechbrain.inference import EncoderClassifier classifier = EncoderClassifier.from_hparams(source="speechbrain/lang-id-commonlanguage_ecapa", savedir="pretrained_models/lang-id-commonlanguage_ecapa") # Italian Example @@ -51,6 +51,15 @@ print(text_lab) Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/CommonLanguage/lang_id/hparams/train_ecapa_tdnn.yaml b/recipes/CommonLanguage/lang_id/hparams/train_ecapa_tdnn.yaml index f9f963a42c..7342a23184 100644 --- a/recipes/CommonLanguage/lang_id/hparams/train_ecapa_tdnn.yaml +++ b/recipes/CommonLanguage/lang_id/hparams/train_ecapa_tdnn.yaml @@ -9,18 +9,27 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Set up folders for reading from and writing to # Dataset will be downloaded to the `data_folder` data_folder: !PLACEHOLDER # e.g. /localscratch/common_voice_kpd/ output_folder: !ref results/ECAPA-TDNN/ save_folder: !ref /save -rir_folder: !ref train_log: !ref /train_log.txt -device: 'cuda:0' +train_csv: !ref /train.csv +dev_csv: !ref /dev.csv +test_csv: !ref /test.csv skip_prep: False +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv + # The train logger writes training statistics to a file, as well as stdout. train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref @@ -29,43 +38,78 @@ error_stats: !name:speechbrain.utils.metric_stats.MetricStats metric: !name:speechbrain.nnet.losses.classification_error reduction: batch +####################### Training Parameters #################################### + # Feature parameters btw: 40 - 80 n_mels: 80 - -# Training Parameters sample_rate: 16000 number_of_epochs: 30 batch_size: 4 n_languages: 45 emb_dim: 192 # dimensionality of the embeddings +emb_channels: [1024, 1024, 1024, 1024, 3072] +emb_attention_channels: 128 # Dataloaders +num_workers: 4 +drop_last: True train_dataloader_options: + num_workers: !ref batch_size: !ref - drop_last: True + drop_last: !ref shuffle: True test_dataloader_options: + num_workers: !ref batch_size: !ref shuffle: True -# Added noise and reverb come from OpenRIR dataset, automatically -# downloaded and prepared with this Environmental Corruption class. -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - babble_prob: 0.0 - reverb_prob: 1.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -# Adds speech change + time and frequency dropouts (time-domain implementation) -# A small speed change help to improve the performance of speaker-id as well. -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + shuffle_augmentations: True + min_augmentations: 1 + max_augmentations: 3 + augmentations: [ + !ref , + !ref , + !ref ] # Feature extraction compute_features: !new:speechbrain.lobes.features.Fbank @@ -76,6 +120,8 @@ mean_var_norm_input: !new:speechbrain.processing.features.InputNormalization norm_type: sentence std_norm: False +############################## Models ########################################## + # To design a custom model, either just edit the simple CustomModel # class that's listed here, or replace this `!new` call with a line # pointing to a different file you've defined. @@ -84,13 +130,13 @@ mean_var_norm_input: !new:speechbrain.processing.features.InputNormalization embedding_model: !new:speechbrain.lobes.models.ECAPA_TDNN.ECAPA_TDNN input_size: !ref activation: !name:torch.nn.LeakyReLU - channels: [1024, 1024, 1024, 1024, 3072] + channels: !ref kernel_sizes: [5, 3, 3, 3, 1] dilations: [1, 2, 3, 4, 1] - attention_channels: 128 + attention_channels: !ref lin_neurons: !ref -# Classifier baseed on cosine distance +# Classifier based on cosine distance classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier input_size: !ref out_neurons: !ref @@ -105,8 +151,6 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter # device, as well as having train()/eval() called on them by the Brain class. modules: compute_features: !ref - env_corrupt: !ref - augmentation: !ref embedding_model: !ref mean_var_norm_input: !ref classifier: !ref @@ -135,6 +179,8 @@ lr_annealing: !new:speechbrain.nnet.schedulers.LinearScheduler final_value: !ref epoch_count: !ref +############################## Logging and Pretrainer ########################## + # This object is used for saving the state of training both so that it # can be resumed if it gets interrupted, and also so that the best checkpoint # can be later loaded for evaluation or inference. diff --git a/recipes/CommonLanguage/lang_id/train.py b/recipes/CommonLanguage/lang_id/train.py index 93e2a205c7..a0e312c1ad 100644 --- a/recipes/CommonLanguage/lang_id/train.py +++ b/recipes/CommonLanguage/lang_id/train.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 import os import sys -import torch -import logging -import torchaudio -import speechbrain as sb -from hyperpyyaml import load_hyperpyyaml + from common_language_prepare import prepare_common_language +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.logger import get_logger """Recipe for training a LID system with CommonLanguage. @@ -19,7 +20,7 @@ * Pavlo Ruban 2021 """ -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Brain class for Language ID training @@ -33,18 +34,19 @@ def prepare_features(self, wavs, stage): Input signals (tensor) and their relative lengths (tensor). stage : sb.Stage The current stage of training. + + Returns + ------- + feats : torch.Tensor + Computed features. + lens : torch.Tensor + The length of the corresponding features. """ wavs, lens = wavs - # Add augmentation if specified. In this version of augmentation, we - # concatenate the original and the augment batches in a single bigger - # batch. This is more memory-demanding, but helps to improve the - # performance. Change it if you run OOM. - if stage == sb.Stage.TRAIN: - wavs_noise = self.modules.env_corrupt(wavs, lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - lens = torch.cat([lens, lens], dim=0) - wavs = self.hparams.augmentation(wavs, lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, lens = self.hparams.wav_augment(wavs, lens) # Feature extraction and normalization feats = self.modules.compute_features(wavs) @@ -65,8 +67,8 @@ def compute_forward(self, batch, stage): Returns ------- - predictions : Tensor - Tensor that contains the posterior probabilities over the N classes. + predictions : torch.Tensor + torch.Tensor that contains the posterior probabilities over the N classes. """ # We first move the batch to the appropriate device. @@ -103,11 +105,10 @@ def compute_objectives(self, inputs, batch, stage): # Concatenate labels (due to data augmentation) if stage == sb.Stage.TRAIN: - targets = torch.cat([targets, targets], dim=0) - lens = torch.cat([lens, lens], dim=0) - - if hasattr(self.hparams.lr_annealing, "on_batch_end"): - self.hparams.lr_annealing.on_batch_end(self.optimizer) + if hasattr(self.hparams, "wav_augment"): + targets = self.hparams.wav_augment.replicate_labels(targets) + if hasattr(self.hparams.lr_annealing, "on_batch_end"): + self.hparams.lr_annealing.on_batch_end(self.optimizer) loss = self.hparams.compute_cost(predictions, targets) @@ -159,7 +160,6 @@ def on_stage_end(self, stage, stage_loss, epoch=None): # At the end of validation... if stage == sb.Stage.VALID: - old_lr, new_lr = self.hparams.lr_annealing(epoch) sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) @@ -182,7 +182,7 @@ def on_stage_end(self, stage, stage_loss, epoch=None): def dataio_prep(hparams): - """ This function prepares the datasets to be used in the brain class. + """This function prepares the datasets to be used in the brain class. It also defines the data processing pipeline through user-defined functions. We expect `prepare_common_language` to have been called before this, so that the `train.csv`, `dev.csv`, and `test.csv` manifest files @@ -201,7 +201,7 @@ def dataio_prep(hparams): to the appropriate DynamicItemDataset object. """ - # Initialization of the label encoder. The label encoder assignes to each + # Initialization of the label encoder. The label encoder assigns to each # of the observed label a unique index (e.g, 'lang01': 0, 'lang02': 1, ..) language_encoder = sb.dataio.encoder.CategoricalEncoder() @@ -211,7 +211,7 @@ def dataio_prep(hparams): def audio_pipeline(wav): """Load the signal, and pass it and its length to the corruption class. This is done on the CPU in the `collate_fn`.""" - sig, _ = torchaudio.load(wav) + sig, _ = audio_io.load(wav) sig = sig.transpose(0, 1).squeeze(1) return sig @@ -229,7 +229,7 @@ def label_pipeline(language): datasets = {} for dataset in ["train", "dev", "test"]: datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=os.path.join(hparams["save_folder"], dataset + ".csv"), + csv_path=hparams[f"{dataset}_csv"], replacements={"data_root": hparams["data_folder"]}, dynamic_items=[audio_pipeline, label_pipeline], output_keys=["id", "sig", "language_encoded"], @@ -237,7 +237,7 @@ def label_pipeline(language): # Load or compute the label encoder (with multi-GPU DDP support) # Please, take a look into the lab_enc_file to see the label to index - # mappinng. + # mapping. language_encoder_file = os.path.join( hparams["save_folder"], "language_encoder.txt" ) @@ -252,7 +252,6 @@ def label_pipeline(language): # Recipe begins! if __name__ == "__main__": - # Reading command line arguments. hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) @@ -260,7 +259,7 @@ def label_pipeline(language): sb.utils.distributed.ddp_init_group(run_opts) # Load hyperparameters file with command-line overrides. - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -279,13 +278,16 @@ def label_pipeline(language): "skip_prep": hparams["skip_prep"], }, ) + # Data preparation for augmentation + sb.utils.distributed.run_on_main(hparams["prepare_noise_data"]) + sb.utils.distributed.run_on_main(hparams["prepare_rir_data"]) # Create dataset objects "train", "dev", and "test" and language_encoder datasets, language_encoder = dataio_prep(hparams) - # Fetch and laod pretrained modules - sb.utils.distributed.run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + # Fetch and load pretrained modules + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Initialize the Brain object to prepare for mask training. lid_brain = LID( diff --git a/recipes/CommonVoice/ASR/CTC/README.md b/recipes/CommonVoice/ASR/CTC/README.md index 1736d16f68..239fdd4d6f 100644 --- a/recipes/CommonVoice/ASR/CTC/README.md +++ b/recipes/CommonVoice/ASR/CTC/README.md @@ -1,9 +1,18 @@ # CommonVoice ASR with CTC based Seq2Seq models. -This folder contains scripts necessary to run an ASR experiment with the CommonVoice dataset: [CommonVoice Homepage](https://commonvoice.mozilla.org/) +This folder contains scripts necessary to run an ASR experiment with the CommonVoice 14.0 dataset # How to run python train.py hparams/{hparam_file}.yaml +To use an n-gram Language Model (LM) for decoding, follow these steps: +1. Uncomment the line `kenlm_model_path: none` in the `test_beam_search` entry in the yaml file. +2. Set a path to an ARPA or bin file containing the n-gram LM. + +For training an n-gram LM in ARPA (or bin) format, refer to the LM recipe in recipes/CommonVoice/LM. +Alternatively, you can download a pre-trained n-gram LM from our Dropbox repository at this link: [Pretrained n-gram LMs](https://www.dropbox.com/scl/fo/zw505t10kesqpvkt6m3tu/h?rlkey=6626h1h665tvlo1mtekop9rx5&dl=0). + +These models are trained on the Commonvoice audio transcriptions available in the training set. + # Data preparation It is important to note that CommonVoice initially offers mp3 audio files at 42Hz. Hence, audio files are downsampled on the fly within the dataio function of the training script. @@ -14,18 +23,31 @@ Here is a list of the different languages that we tested within the CommonVoice - French - Italian - Kinyarwanda +- Arabic +- Spanish +- Portuguese +- Chinese(china) + +>>Note: +>In our experiments, we use CTC beam search and also boost the performance using the 5-gram model previously trained +on the transcription of the training data.(Refer to LM recipe: recipes/CommonVoice/LM). + +>>Note: +> For Chinese the concept of word is not well-defined, hence, we consider the character error rate instead of the word error rate. For the same reason, we don't also employ 5-gram. # Results | Language | CommonVoice Release | hyperparams file | LM | Val. CER | Val. WER | Test CER | Test WER | HuggingFace link | Model link | GPUs | | ------------- |:-------------:|:---------------------------:| -----:| -----:| -----:| -----:| -----:| :-----------:| :-----------:| :-----------:| -| English | 2020-12-11 | train_en_with_wav2vec.yaml | No | 5.01 | 12.57 | 7.32 | 15.58 | Not Avail. | [model](https://drive.google.com/drive/folders/1tYO__An68xrM5pR1UIXzEkwzvKX2Tz2o?usp=sharing) | 2xV100 32GB | -| German | 2022-08-16 | train_de_with_wav2vec.yaml | No | 1.90 | 8.02 | 2.40 | 9.54 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-de) | [model](https://drive.google.com/drive/folders/19G2Zm8896QSVDqVfs7PS_W86-K0-5xeC?usp=sharing) | 1xRTXA6000 48GB | -| French | 2020-12-11 | train_fr_with_wav2vec.yaml | No | 2.60 | 8.59 | 3.19 | 9.96 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-fr) | [model](https://drive.google.com/drive/folders/1T9DfdZwcNI9CURxhLCi8GA5JVz8adiY8?usp=sharing) | 2xV100 32GB | -| Italian | 2020-12-11 | train_it_with_wav2vec.yaml | No | 2.77 | 9.83 | 3.16 | 10.85 | Not Avail. | [model](https://drive.google.com/drive/folders/1JhlxeA04tWg_vKcNChOoXSnjBe4luRby?usp=sharing) | 2xV100 32GB | -| Kinyarwanda | 2020-12-11 | train_rw_with_wav2vec.yaml | No | 6.20 | 20.07 | 8.25 | 23.12 | Not Avail. | [model](https://drive.google.com/drive/folders/12_BDenvOqEERDZLAN-KdiAHklvuo35tx?usp=sharing) | 2xV100 32GB | +| English | 2024-01-05 | train_en_with_wav2vec.yaml | 5-gram | 3.79 | 10.79 | 4.96 | 11.37 | | [model](https://www.dropbox.com/scl/fo/gx0szpbectig2r6r6p9vk/APdoN_wWWq_wP4My7w6SvMo?rlkey=v8fhd887bn947yjb45i99wm8p&st=6muft51b&dl=0) | 4xA40 46GB | +| German | 2023-08-15 | train_de_with_wav2vec.yaml | No | 1.74 | 7.40 | 2.18 | 8.39 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-de) | [model](https://www.dropbox.com/sh/dn7plq4wfsujsi1/AABS1kqB_uqLJVkg-bFkyPpVa?dl=0) | 1xV100 32GB | +| French | 2023-08-15 | train_fr_with_wav2vec.yaml | No | 2.59 | 8.47 | 3.36 | 9.71 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-fr) | [model](https://www.dropbox.com/sh/0i7esfa8jp3rxpp/AAArdi8IuCRmob2WAS7lg6M4a?dl=0) | 1xV100 32GB | +| Italian | 2023-08-15 | train_it_with_wav2vec.yaml | No | 2.10 | 7.77 | 2.30 | 7.99 |[model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-it) | [model](https://www.dropbox.com/sh/hthxqzh5boq15rn/AACftSab_FM6EFWWPgHpKw82a?dl=0) | 1xV100 32GB | +| Kinyarwanda | 2023-08-15 | train_rw_with_wav2vec.yaml | No | 5.47 | 19.58 | 7.30 | 22.52 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-rw) | [model](https://www.dropbox.com/sh/4iax0l4yfry37gn/AABuQ31JY-Sbyi1VlOJfV7haa?dl=0) | 1xV100 32GB | +| Arabic | 2023-08-15 | train_ar_with_wav2vec.yaml | No | 6.45 | 20.80 | 9.65 | 28.53 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-ar) | [model](https://www.dropbox.com/sh/7tnuqqbr4vy96cc/AAA_5_R0RmqFIiyR0o1nVS4Ia?dl=0) | 1xV100 32GB | +| Spanish | 2023-08-15 | train_es_with_wav2vec.yaml | No | 3.36 | 12.61 | 3.67 | 12.67 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-es) | [model](https://www.dropbox.com/sh/ejvzgl3d3g8g9su/AACYtbSWbDHvBr06lAb7A4mVa?dl=0) | 1xV100 32GB | +| Portuguese | 2023-08-15 | train_pt_with_wav2vec.yaml | No | 6.26 | 21.05 | 6.63 | 21.69 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-pt) | [model](https://www.dropbox.com/sh/80wucrvijdvao2a/AAD6-SZ2_ZZXmlAjOTw6fVloa?dl=0) | 1xV100 32GB | +| Chinese(china) | 2023-08-15 | train_zh-CN_with_wav2vec.yaml | No | 25.03 | - | 23.17 | - | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-14-zh-CN) | [model](https://www.dropbox.com/sh/2bikr81vgufoglf/AABMpD0rLIaZBxjtwBHgrNpga?dl=0) | 1xV100 32GB | -*For German, it takes around 5.5 hrs an epoch.*
-The output folders with checkpoints and logs can be found [here](https://drive.google.com/drive/folders/11NMzY0zV-NqJmPMyZfC3RtT64bYe-G_O?usp=sharing). ## How to simply use pretrained models to transcribe my audio file? @@ -41,6 +63,15 @@ SpeechBrain provides a simple interface to transcribe audio files with pretraine Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/CommonVoice/ASR/CTC/extra_requirements.txt b/recipes/CommonVoice/ASR/CTC/extra_requirements.txt deleted file mode 100644 index 7bb0f523d7..0000000000 --- a/recipes/CommonVoice/ASR/CTC/extra_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -transformers==4.13 diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_it_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_ar_with_wav2vec.yaml similarity index 53% rename from recipes/CommonVoice/ASR/seq2seq/hparams/train_it_with_wav2vec.yaml rename to recipes/CommonVoice/ASR/CTC/hparams/train_ar_with_wav2vec.yaml index e9f2bf3cd6..8c9f7bec31 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_it_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_ar_with_wav2vec.yaml @@ -1,28 +1,28 @@ # ################################ -# Model: wav2vec2 + DNN + CTC/Attention +# Model: wav2vec2 + DNN + CTC # Augmentation: SpecAugment -# Authors: Titouan Parcollet 2021 -# Mirco Ravanelli 2021 +# Authors: Pooneh Mousavi 2023 # ################################ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] -output_folder: !ref results/wav2vec2_ctcatt_it/ -wer_file: !ref /wer.txt +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/wav2vec2_ctc_ar/ +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt -# URL for the biggest Fairseq english wav2vec2 model. -wav2vec2_hub: facebook/wav2vec2-large-100k-voxpopuli +# URL for the biggest Fairseq multilingual +wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files -data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-6.1-2020-12-11/it +data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files accented_letters: True -language: it # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english +language: ar # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english train_csv: !ref /train.csv valid_csv: !ref /dev.csv test_csv: !ref /test.csv @@ -30,23 +30,22 @@ skip_prep: False # Skip data preparation # We remove utterance slonger than 10s in the train/dev/test sets as # longer sentences certainly correspond to "open microphones". -avoid_if_longer_than: 8.0 +avoid_if_longer_than: 10.0 -# Training parameters -number_of_epochs: 45 -number_of_ctc_epochs: 15 +####################### Training Parameters #################################### + +number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 -ctc_weight: 0.3 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min - # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs -# Must be 6 per GPU to fit 16GB of VRAM +# Must be 8 per GPU to fit 32GB of VRAM +dynamic_batching: False batch_size: 12 test_batch_size: 4 @@ -61,31 +60,38 @@ test_dataloader_options: token_type: unigram # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters -activation: !name:torch.nn.LeakyReLU +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 -dnn_layers: 2 dnn_neurons: 1024 -emb_size: 128 -dec_neurons: 1024 freeze_wav2vec: False +freeze_feature_extractor: False +dropout: 0.15 +warmup_steps: 500 # Outputs -output_neurons: 500 # BPE size, index(blank/eos/bos) = 0 +output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 # Decoding parameters # Be sure that the bos and eos index match with the BPEs ones blank_index: 0 bos_index: 1 eos_index: 2 -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 80 -eos_threshold: 1.5 -using_max_attn_shift: True -max_attn_shift: 140 -# ctc_weight_decode: 0.0 -temperature: 1.50 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + # kenlm_model_path: none + # # Functions and classes @@ -93,21 +99,68 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] -enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] - activation: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.BatchNorm1d + activation: !new:torch.nn.LeakyReLU + drop: !new:torch.nn.Dropout + p: !ref + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.BatchNorm1d + activation2: !new:torch.nn.LeakyReLU + drop2: !new:torch.nn.Dropout + p: !ref + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.BatchNorm1d + activation3: !new:torch.nn.LeakyReLU + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + freeze_feature_extractor: !ref + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead @@ -122,51 +175,23 @@ wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 # save_path: !ref /wav2vec2_checkpoint/model.pt ##### -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 1024 - attn_dim: 1024 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.15 - ctc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True ctc_cost: !name:speechbrain.nnet.losses.ctc_loss blank_index: !ref -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - modules: wav2vec2: !ref enc: !ref - emb: !ref - dec: !ref ctc_lin: !ref - seq_lin: !ref model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] + - [!ref , !ref ] model_opt_class: !name:torch.optim.Adadelta lr: !ref @@ -188,22 +213,6 @@ lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.9 patient: 0 -beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - eos_threshold: !ref - using_max_attn_shift: !ref - max_attn_shift: !ref - temperature: !ref - checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: diff --git a/recipes/CommonVoice/ASR/CTC/hparams/train_de_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_de_with_wav2vec.yaml index 0f53c0c32e..fe97bc10dc 100644 --- a/recipes/CommonVoice/ASR/CTC/hparams/train_de_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_de_with_wav2vec.yaml @@ -7,14 +7,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 8200 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_de/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for the LARGE Fairseq German wav2vec2 model. wav2vec2_hub: facebook/wav2vec2-large-xlsr-53-german +wav2vec2_folder: !ref /wav2vec2_checkpoint # Dataset prep parameters data_folder: !PLACEHOLDER @@ -32,40 +33,44 @@ skip_prep: False # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 45 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs # Must be 6 per GPU to fit 16GB of VRAM +dynamic_batching: False batch_size: 8 test_batch_size: 8 dataloader_num_workers: 8 test_num_workers: 8 dataloader_options: - batch_size: !ref - num_workers: !ref + batch_size: !ref + num_workers: !ref test_dataloader_options: - batch_size: !ref - num_workers: !ref + batch_size: !ref + num_workers: !ref # BPE parameters token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### # activation: !name:torch.nn.LeakyReLU dnn_neurons: 1024 wav2vec_output_dim: !ref freeze_wav2vec: False +freeze_feature_extractor: False dropout: 0.15 +warmup_steps: 500 # Outputs output_neurons: 32 # BPE size, index(blank/eos/bos) = 0 @@ -75,41 +80,85 @@ blank_index: 0 bos_index: 1 eos_index: 2 +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + # kenlm_model_path: none + # Functions and classes epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] + limit: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## enc: !new:speechbrain.nnet.containers.Sequential - input_shape: [null, null, !ref ] - linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: !ref - bias: True - bn1: !name:speechbrain.nnet.normalization.BatchNorm1d - activation: !new:torch.nn.LeakyReLU - drop: !new:torch.nn.Dropout - p: !ref - linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: !ref - bias: True - bn2: !name:speechbrain.nnet.normalization.BatchNorm1d - activation2: !new:torch.nn.LeakyReLU - drop2: !new:torch.nn.Dropout - p: !ref - linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: !ref - bias: True - bn3: !name:speechbrain.nnet.normalization.BatchNorm1d - activation3: !new:torch.nn.LeakyReLU - -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 - source: !ref - output_norm: True - freeze: !ref - save_path: !ref /wav2vec2_checkpoint + input_shape: [null, null, !ref ] + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.BatchNorm1d + activation: !new:torch.nn.LeakyReLU + drop: !new:torch.nn.Dropout + p: !ref + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.BatchNorm1d + activation2: !new:torch.nn.LeakyReLU + drop2: !new:torch.nn.Dropout + p: !ref + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.BatchNorm1d + activation3: !new:torch.nn.LeakyReLU + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + freeze_feature_extractor: !ref + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead @@ -124,56 +173,56 @@ wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 # save_path: !ref /wav2vec2_checkpoint/model.pt ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref + input_size: !ref + n_neurons: !ref log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True + apply_log: True ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref + blank_index: !ref modules: - wav2vec2: !ref - enc: !ref - ctc_lin: !ref + wav2vec2: !ref + enc: !ref + ctc_lin: !ref model: !new:torch.nn.ModuleList - - [!ref , !ref ] + - [!ref , !ref ] model_opt_class: !name:torch.optim.Adadelta - lr: !ref - rho: 0.95 - eps: 1.e-8 + lr: !ref + rho: 0.95 + eps: 1.e-8 wav2vec_opt_class: !name:torch.optim.Adam - lr: !ref + lr: !ref lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.9 - patient: 0 + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - wav2vec2: !ref - model: !ref - scheduler_model: !ref - scheduler_wav2vec: !ref - counter: !ref + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref + save_file: !ref error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats - split_tokens: True + split_tokens: True diff --git a/recipes/CommonVoice/ASR/CTC/hparams/train_en_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_en_with_wav2vec.yaml index 1720b65a78..e7efc890ee 100644 --- a/recipes/CommonVoice/ASR/CTC/hparams/train_en_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_en_with_wav2vec.yaml @@ -6,14 +6,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] -output_folder: !ref results/wav2vec2_ctc_en/ -wer_file: !ref /wer.txt +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/wavlm_ctc_en/ +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for the biggest Fairseq english wav2vec2 model. -wav2vec2_hub: facebook/wav2vec2-large-lv60 +wav2vec2_hub: microsoft/wavlm-large +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr @@ -31,41 +32,69 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 30 +optimizer_step_limit: 75000 lr: 1.0 lr_wav2vec: 0.0001 -sorting: ascending -auto_mix_prec: False +precision: fp16 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min -# With data_parallel batch_size is split into N jobs -# With DDP batch_size is multiplied by N jobs -# Must be 8 per GPU to fit 32GB of VRAM +# We use Dynamic Batching here as CV english is quite big. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 200 +max_batch_length_val: 50 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 +num_workers: 4 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# If dynamic batching is False, we use these instead batch_size: 12 test_batch_size: 4 +sorting: ascending dataloader_options: batch_size: !ref - num_workers: 6 + num_workers: !ref + test_dataloader_options: batch_size: !ref num_workers: 6 # BPE parameters -token_type: unigram # ["unigram", "bpe", "char"] +token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### # activation: !name:torch.nn.LeakyReLU wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False -dropout: 0.15 +freeze_feature_extractor: False +dropout: 0.1 +warmup_steps: 500 # Outputs -output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 +output_neurons: 28 # BPE size, index(blank/eos/bos) = 0 # Decoding parameters # Be sure that the bos and eos index match with the BPEs ones @@ -73,16 +102,59 @@ blank_index: 0 bos_index: 1 eos_index: 2 +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: False + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + # kenlm_model_path: none + # # Functions and classes # epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear @@ -105,11 +177,12 @@ enc: !new:speechbrain.nnet.containers.Sequential bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + freeze_feature_extractor: !ref + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead @@ -147,8 +220,9 @@ model_opt_class: !name:torch.optim.Adadelta rho: 0.95 eps: 1.e-8 -wav2vec_opt_class: !name:torch.optim.Adam +wav2vec_opt_class: !name:torch.optim.AdamW lr: !ref + weight_decay: 0.01 lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler initial_value: !ref diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_rw_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_es_with_wav2vec.yaml similarity index 56% rename from recipes/CommonVoice/ASR/seq2seq/hparams/train_rw_with_wav2vec.yaml rename to recipes/CommonVoice/ASR/CTC/hparams/train_es_with_wav2vec.yaml index 5ac420a20c..1ceb912f2c 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_rw_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_es_with_wav2vec.yaml @@ -1,27 +1,28 @@ # ################################ -# Model: wav2vec2 + DNN + CTC/Attention +# Model: wav2vec2 + DNN + CTC # Augmentation: SpecAugment -# Authors: Titouan Parcollet 2021 +# Authors: Pooneh Mousavi 2023 # ################################ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] -output_folder: !ref results/wav2vec2_ctcatt_rw/ -wer_file: !ref /wer.txt +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/wav2vec2_ctc_es/ +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt -# URL for the biggest HuggingFace multilingual w2v2 from XLSR. +# URL for the biggest Fairseq multilingual wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files -accented_letters: False -language: rw # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english +accented_letters: True +language: es # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english train_csv: !ref /train.csv valid_csv: !ref /dev.csv test_csv: !ref /test.csv @@ -31,21 +32,20 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 30 -number_of_ctc_epochs: 20 lr: 1.0 lr_wav2vec: 0.0001 -ctc_weight: 0.3 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min - # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs -# Must be 6 per GPU to fit 32GB of VRAM +# Must be 8 per GPU to fit 32GB of VRAM +dynamic_batching: False batch_size: 12 test_batch_size: 4 @@ -60,14 +60,13 @@ test_dataloader_options: token_type: unigram # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters -activation: !name:torch.nn.LeakyReLU +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 -dnn_layers: 2 dnn_neurons: 1024 -emb_size: 128 -dec_neurons: 1024 freeze_wav2vec: False +freeze_feature_extractor: False +dropout: 0.15 +warmup_steps: 500 # Outputs output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 @@ -77,14 +76,21 @@ output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 blank_index: 0 bos_index: 1 eos_index: 2 -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 80 -eos_threshold: 1.5 -using_max_attn_shift: True -max_attn_shift: 140 -# ctc_weight_decode: 0.0 -temperature: 1.50 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + # kenlm_model_path: none # # Functions and classes @@ -92,21 +98,68 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] -enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] - activation: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.BatchNorm1d + activation: !new:torch.nn.LeakyReLU + drop: !new:torch.nn.Dropout + p: !ref + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.BatchNorm1d + activation2: !new:torch.nn.LeakyReLU + drop2: !new:torch.nn.Dropout + p: !ref + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.BatchNorm1d + activation3: !new:torch.nn.LeakyReLU + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + freeze_feature_extractor: !ref + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead @@ -121,51 +174,23 @@ wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 # save_path: !ref /wav2vec2_checkpoint/model.pt ##### -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 1024 - attn_dim: 1024 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.15 - ctc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True ctc_cost: !name:speechbrain.nnet.losses.ctc_loss blank_index: !ref -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - modules: wav2vec2: !ref enc: !ref - emb: !ref - dec: !ref ctc_lin: !ref - seq_lin: !ref model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] + - [!ref , !ref ] model_opt_class: !name:torch.optim.Adadelta lr: !ref @@ -187,22 +212,6 @@ lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.9 patient: 0 -beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - eos_threshold: !ref - using_max_attn_shift: !ref - max_attn_shift: !ref - temperature: !ref - checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: diff --git a/recipes/CommonVoice/ASR/CTC/hparams/train_fr_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_fr_with_wav2vec.yaml index 147fe2a756..6b9f391d02 100644 --- a/recipes/CommonVoice/ASR/CTC/hparams/train_fr_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_fr_with_wav2vec.yaml @@ -6,14 +6,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_fr/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for the biggest LeBenchmark wav2vec french. wav2vec2_hub: LeBenchmark/wav2vec2-FR-7K-large +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr @@ -31,18 +32,20 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs # Must be 6 per GPU to fit 16GB of VRAM +dynamic_batching: False batch_size: 12 test_batch_size: 4 @@ -57,11 +60,14 @@ test_dataloader_options: token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### # activation: !name:torch.nn.LeakyReLU wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False +freeze_feature_extractor: False +dropout: 0.15 +warmup_steps: 500 # The wav2vec 2 model isn't updated for this amount of steps # Outputs output_neurons: 76 # BPE size, index(blank/eos/bos) = 0 @@ -72,43 +78,86 @@ blank_index: 0 bos_index: 1 eos_index: 2 +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + # kenlm_model_path: none # # Functions and classes # epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn1: !name:speechbrain.nnet.normalization.BatchNorm1d activation: !new:torch.nn.LeakyReLU drop: !new:torch.nn.Dropout - p: 0.15 + p: !ref linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn2: !name:speechbrain.nnet.normalization.BatchNorm1d activation2: !new:torch.nn.LeakyReLU drop2: !new:torch.nn.Dropout - p: 0.15 + p: !ref linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref - output_norm: True + output_norm: False freeze: !ref - save_path: !ref /wav2vec2_checkpoint + freeze_feature_extractor: !ref + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/CommonVoice/ASR/CTC/hparams/train_it_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_it_with_wav2vec.yaml index 7608dcf009..2a1afffc26 100644 --- a/recipes/CommonVoice/ASR/CTC/hparams/train_it_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_it_with_wav2vec.yaml @@ -7,14 +7,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_it/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for the biggest Fairseq english wav2vec2 model. wav2vec2_hub: facebook/wav2vec2-large-it-voxpopuli +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/it @@ -32,18 +33,20 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 8.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 45 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs # Must be 8 per GPU to fit 32GB of VRAM +dynamic_batching: False batch_size: 12 test_batch_size: 4 @@ -58,12 +61,14 @@ test_dataloader_options: token_type: unigram # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### # activation: !name:torch.nn.LeakyReLU wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False +freeze_feature_extractor: False dropout: 0.15 +warmup_steps: 500 # Outputs output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 @@ -74,16 +79,58 @@ blank_index: 0 bos_index: 1 eos_index: 2 +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + #kenlm_model_path: none # # Functions and classes # epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear @@ -106,11 +153,12 @@ enc: !new:speechbrain.nnet.containers.Sequential bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + freeze_feature_extractor: !ref + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_en_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_pt_with_wav2vec.yaml similarity index 56% rename from recipes/CommonVoice/ASR/seq2seq/hparams/train_en_with_wav2vec.yaml rename to recipes/CommonVoice/ASR/CTC/hparams/train_pt_with_wav2vec.yaml index c0131485ff..e1ba8fc5e3 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_en_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_pt_with_wav2vec.yaml @@ -1,27 +1,27 @@ # ################################ -# Model: wav2vec2 + DNN + CTC/Attention +# Model: wav2vec2 + DNN + CTC # Augmentation: SpecAugment -# Authors: Titouan Parcollet 2021 +# Authors: Pooneh Mousavi 2023 # ################################ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] -output_folder: !ref results/wav2vec2_ctcatt_en/ -wer_file: !ref /wer.txt +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/wav2vec2_ctc_pt/ +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt -# URL for the biggest HuggingFace english wav2vec2 model. -wav2vec2_hub: facebook/wav2vec2-large-lv60 - +# URL for the biggest Fairseq multilingual +wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files -accented_letters: False -language: en # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english +accented_letters: True +language: pt # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english train_csv: !ref /train.csv valid_csv: !ref /dev.csv test_csv: !ref /test.csv @@ -31,21 +31,20 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 30 -number_of_ctc_epochs: 20 lr: 1.0 lr_wav2vec: 0.0001 -ctc_weight: 0.3 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min - # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs -# Must be 6 per GPU to fit 32GB of VRAM +# Must be 8 per GPU to fit 32GB of VRAM +dynamic_batching: False batch_size: 12 test_batch_size: 4 @@ -60,14 +59,13 @@ test_dataloader_options: token_type: unigram # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters -activation: !name:torch.nn.LeakyReLU +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 -dnn_layers: 2 dnn_neurons: 1024 -emb_size: 128 -dec_neurons: 1024 freeze_wav2vec: False +freeze_feature_extractor: False +dropout: 0.15 +warmup_steps: 500 # Outputs output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 @@ -77,14 +75,21 @@ output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 blank_index: 0 bos_index: 1 eos_index: 2 -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 80 -eos_threshold: 1.5 -using_max_attn_shift: True -max_attn_shift: 140 -# ctc_weight_decode: 0.0 -temperature: 1.50 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + # kenlm_model_path: none # # Functions and classes @@ -92,21 +97,68 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] -enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] - activation: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.BatchNorm1d + activation: !new:torch.nn.LeakyReLU + drop: !new:torch.nn.Dropout + p: !ref + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.BatchNorm1d + activation2: !new:torch.nn.LeakyReLU + drop2: !new:torch.nn.Dropout + p: !ref + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.BatchNorm1d + activation3: !new:torch.nn.LeakyReLU + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + freeze_feature_extractor: !ref + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead @@ -121,51 +173,24 @@ wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 # save_path: !ref /wav2vec2_checkpoint/model.pt ##### -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 1024 - attn_dim: 1024 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.15 ctc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True ctc_cost: !name:speechbrain.nnet.losses.ctc_loss blank_index: !ref -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - modules: wav2vec2: !ref enc: !ref - emb: !ref - dec: !ref ctc_lin: !ref - seq_lin: !ref model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] + - [!ref , !ref ] model_opt_class: !name:torch.optim.Adadelta lr: !ref @@ -187,22 +212,6 @@ lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.9 patient: 0 -beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - eos_threshold: !ref - using_max_attn_shift: !ref - max_attn_shift: !ref - temperature: !ref - checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: diff --git a/recipes/CommonVoice/ASR/CTC/hparams/train_rw_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_rw_with_wav2vec.yaml index e383fb94d1..4baa50005b 100644 --- a/recipes/CommonVoice/ASR/CTC/hparams/train_rw_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_rw_with_wav2vec.yaml @@ -6,14 +6,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_rw/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for the biggest Fairseq multilingual wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr @@ -31,12 +32,13 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min @@ -44,6 +46,7 @@ ckpt_interval_minutes: 30 # save checkpoint every N min # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs # Must be 6 per GPU to fit 32GB of VRAM +dynamic_batching: False batch_size: 12 test_batch_size: 4 @@ -58,11 +61,14 @@ test_dataloader_options: token_type: unigram # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### # activation: !name:torch.nn.LeakyReLU wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False +freeze_feature_extractor: False +dropout: 0.15 +warmup_steps: 500 # Outputs output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 @@ -73,43 +79,86 @@ blank_index: 0 bos_index: 1 eos_index: 2 +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + # kenlm_model_path: none # # Functions and classes # epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn1: !name:speechbrain.nnet.normalization.BatchNorm1d activation: !new:torch.nn.LeakyReLU drop: !new:torch.nn.Dropout - p: 0.15 + p: !ref linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn2: !name:speechbrain.nnet.normalization.BatchNorm1d activation2: !new:torch.nn.LeakyReLU drop2: !new:torch.nn.Dropout - p: 0.15 + p: !ref linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + freeze_feature_extractor: !ref + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/CommonVoice/ASR/CTC/hparams/train_zh-CN_with_wav2vec.yaml b/recipes/CommonVoice/ASR/CTC/hparams/train_zh-CN_with_wav2vec.yaml new file mode 100644 index 0000000000..32e68e6c15 --- /dev/null +++ b/recipes/CommonVoice/ASR/CTC/hparams/train_zh-CN_with_wav2vec.yaml @@ -0,0 +1,231 @@ +# ################################ +# Model: wav2vec2 + DNN + CTC +# Augmentation: SpecAugment +# Authors: Pooneh Mousavi 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/wav2vec2_ctc_zh-CN/ +test_wer_file: !ref /wer_test.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq multilingual +wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr +train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files +dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files +test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files +accented_letters: True +language: zh-CN # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv +skip_prep: False # Skip data preparation + +# We remove utterance slonger than 10s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 10.0 + +####################### Training Parameters #################################### + +number_of_epochs: 30 +lr: 1.0 +lr_wav2vec: 0.0001 +sorting: ascending +precision: fp32 # bf16, fp16 or fp32 +sample_rate: 16000 +ckpt_interval_minutes: 30 # save checkpoint every N min + + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 8 per GPU to fit 32GB of VRAMM +dynamic_batching: False +batch_size: 12 +test_batch_size: 4 + +dataloader_options: + batch_size: !ref + num_workers: 6 +test_dataloader_options: + batch_size: !ref + num_workers: 6 + +# BPE parameters +token_type: unigram # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +####################### Model Parameters ####################################### +wav2vec_output_dim: 1024 +dnn_neurons: 1024 +freeze_wav2vec: False +freeze_feature_extractor: False +dropout: 0.15 +warmup_steps: 500 + +# Outputs +output_neurons: 4652 # BPE size, index(blank/eos/bos) = 0 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +blank_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: 100 + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + topk: 1 + alpha: 1.0 + beta: 0.5 + # To use n-gram LM for decoding, follow steps in README.md. + # kenlm_model_path: none + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +enc: !new:speechbrain.nnet.containers.Sequential + input_shape: [null, null, !ref ] + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.BatchNorm1d + activation: !new:torch.nn.LeakyReLU + drop: !new:torch.nn.Dropout + p: !ref + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.BatchNorm1d + activation2: !new:torch.nn.LeakyReLU + drop2: !new:torch.nn.Dropout + p: !ref + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.BatchNorm1d + activation3: !new:torch.nn.LeakyReLU + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + freeze_feature_extractor: !ref + save_path: !ref + +##### +# Uncomment this block if you prefer to use a Fairseq pretrained model instead +# of a HuggingFace one. Here, we provide an URL that is obtained from the +# Fairseq github for the multilingual XLSR. +# +#wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr_53_56k.pt +#wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 +# pretrained_path: !ref +# output_norm: True +# freeze: False +# save_path: !ref /wav2vec2_checkpoint/model.pt +##### + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/CommonVoice/ASR/CTC/train_with_wav2vec.py b/recipes/CommonVoice/ASR/CTC/train_with_wav2vec.py index 32dd71142c..bc6b2c53ad 100644 --- a/recipes/CommonVoice/ASR/CTC/train_with_wav2vec.py +++ b/recipes/CommonVoice/ASR/CTC/train_with_wav2vec.py @@ -1,14 +1,4 @@ #!/usr/bin/env python3 -import sys -import torch -import logging -import speechbrain as sb -import torchaudio -from hyperpyyaml import load_hyperpyyaml -from speechbrain.tokenizers.SentencePiece import SentencePiece -from speechbrain.utils.data_utils import undo_padding -from speechbrain.utils.distributed import run_on_main - """Recipe for training a sequence-to-sequence ASR system with CommonVoice. The system employs a wav2vec2 encoder and a CTC decoder. Decoding is performed with greedy decoding (will be extended to beam search). @@ -17,7 +7,7 @@ > python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml With the default hyperparameters, the system employs a pretrained wav2vec2 encoder. -The wav2vec2 model is pretrained following the model given in the hprams file. +The wav2vec2 model is pretrained following the model given in the hparams file. It may be dependent on the language. The neural network is trained with CTC on sub-word units estimated with @@ -33,7 +23,20 @@ * Titouan Parcollet 2021 """ -logger = logging.getLogger(__name__) +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -46,37 +49,49 @@ def compute_forward(self, batch, stage): tokens_bos, _ = batch.tokens_bos wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) # Forward pass - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) x = self.modules.enc(feats) logits = self.modules.ctc_lin(x) p_ctc = self.hparams.log_softmax(logits) - return p_ctc, wav_lens + p_tokens = None + if stage == sb.Stage.VALID: + p_tokens = sb.decoders.ctc_greedy_decode( + p_ctc, wav_lens, blank_id=self.hparams.blank_index + ) + elif stage == sb.Stage.TEST: + p_tokens = test_searcher(p_ctc, wav_lens) + + return p_ctc, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC) given predictions and targets.""" - p_ctc, wav_lens = predictions + p_ctc, wav_lens, p_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + tokens_lens = self.hparams.wav_augment.replicate_labels(tokens_lens) + loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) - if stage != sb.Stage.TRAIN: - # Decode token terms to words - sequence = sb.decoders.ctc_greedy_decode( - p_ctc, wav_lens, blank_id=self.hparams.blank_index - ) + if stage == sb.Stage.VALID: + # Convert token indices to words + predicted_words = self.tokenizer(p_tokens, task="decode_from_list") - predicted_words = self.tokenizer(sequence, task="decode_from_list") + elif stage == sb.Stage.TEST: + predicted_words = [hyp[0].text.split(" ") for hyp in p_tokens] + if stage != sb.Stage.TRAIN: # Convert indices to words target_words = undo_padding(tokens, tokens_lens) target_words = self.tokenizer(target_words, task="decode_from_list") @@ -86,53 +101,6 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - if self.auto_mix_prec: - - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - - with torch.cuda.amp.autocast(): - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - - self.scaler.scale(loss).backward() - if not self.hparams.wav2vec2.freeze: - self.scaler.unscale_(self.wav2vec_optimizer) - self.scaler.unscale_(self.model_optimizer) - - if self.check_gradients(loss): - if not self.hparams.wav2vec2.freeze: - self.scaler.step(self.wav2vec_optimizer) - self.scaler.step(self.model_optimizer) - - self.scaler.update() - else: - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - loss.backward() - - if self.check_gradients(loss): - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.step() - self.model_optimizer.step() - - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -174,15 +142,19 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def init_optimizers(self): "Initializes the wav2vec2 optimizer and model optimizer" @@ -204,17 +176,38 @@ def init_optimizers(self): if self.checkpointer is not None: self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + if not self.hparams.wav2vec2.freeze: + self.optimizers_dict = { + "wav2vec_optimizer": self.wav2vec_optimizer, + "model_optimizer": self.model_optimizer, + } + else: + self.optimizers_dict = {"model_optimizer": self.model_optimizer} + + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if not self.hparams.wav2vec2.freeze: + if self.optimizer_step >= self.hparams.warmup_steps: + valid_optimizers["wav2vec_optimizer"] = optimizers[ + "wav2vec_optimizer" + ] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers + # Define custom data procedure def dataio_prepare(hparams, tokenizer): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # 1. Define datasets data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -244,13 +237,15 @@ def dataio_prepare(hparams, tokenizer): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate @@ -262,10 +257,11 @@ def dataio_prepare(hparams, tokenizer): @sb.utils.data_pipeline.takes("wav") @sb.utils.data_pipeline.provides("sig") def audio_pipeline(wav): - info = torchaudio.info(wav) + info = audio_io.info(wav) sig = sb.dataio.dataio.read_audio(wav) resampled = torchaudio.transforms.Resample( - info.sample_rate, hparams["sample_rate"], + info.sample_rate, + hparams["sample_rate"], )(sig) return resampled @@ -290,19 +286,45 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], ) - return train_data, valid_data, test_data + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa -if __name__ == "__main__": + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_data, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -342,7 +364,13 @@ def text_pipeline(wrd): ) # Create the datasets objects as well as tokenization and encoding :-D - train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer) + ( + train_data, + valid_data, + test_data, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams, tokenizer) # Trainer initialization asr_brain = ASR( @@ -354,18 +382,53 @@ def text_pipeline(wrd): # Adding objects to trainer. asr_brain.tokenizer = tokenizer + vocab_list = [ + tokenizer.sp.id_to_piece(i) for i in range(tokenizer.sp.vocab_size()) + ] + + from speechbrain.decoders.ctc import CTCBeamSearcher + + test_searcher = CTCBeamSearcher( + **hparams["test_beam_search"], + vocab_list=vocab_list, + ) + + # Manage dynamic batching + train_dataloader_opts = hparams["dataloader_options"] + valid_dataloader_opts = hparams["test_dataloader_options"] + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, - train_loader_kwargs=hparams["dataloader_options"], - valid_loader_kwargs=hparams["test_dataloader_options"], + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, ) # Test - asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt" asr_brain.evaluate( test_data, min_key="WER", diff --git a/recipes/CommonVoice/ASR/seq2seq/README.md b/recipes/CommonVoice/ASR/seq2seq/README.md index bf296096f2..7381c3e16e 100644 --- a/recipes/CommonVoice/ASR/seq2seq/README.md +++ b/recipes/CommonVoice/ASR/seq2seq/README.md @@ -1,6 +1,5 @@ # CommonVoice ASR with CTC + Attention based Seq2Seq models. -This folder contains scripts necessary to run an ASR experiment with the CommonVoice dataset: [CommonVoice Homepage](https://commonvoice.mozilla.org/) - +This folder contains scripts necessary to run an ASR experiment with the CommonVoice 14.0 dataset: [CommonVoice Homepage](https://commonvoice.mozilla.org/) and pytorch 2.0 # How to run python train.py hparams/{hparam_file}.py @@ -15,22 +14,20 @@ Here is a list of the different languages that we tested within the CommonVoice - Kinyarwanda - Italian - English +- German +- Spanish # Results | Language | CommonVoice Release | hyperparams file | LM | Val. CER | Val. WER | Test CER | Test WER | HuggingFace link | Model link | GPUs | | ------------- |:-------------:|:---------------------------:| -----:| -----:| -----:| -----:| -----:| :-----------:| :-----------:| :-----------:| -| French | 2020-12-11 | train_fr.yaml | No | 5.22 | 13.92 | 6.43 | 15.99 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-fr) | [model](https://drive.google.com/drive/folders/1GShpLaX9AOLklwBAOLB9B9lLTn0HsWwS?usp=sharing) | 2xV100 16GB | -| French | 2020-12-11 | train_fr_with_wav2vec.yaml | No | 6.13 | 11.82 | 9.78 | 13.34 | Not Avail. | 2xV100 32GB | -| Kinyarwanda | 2020-12-11 | train_rw.yaml | No | 7.30 | 21.36 | 9.55 | 24.27 | Not Avail. | [model](https://drive.google.com/drive/folders/122efLUMYoc1LGoK7O6LIWkSklmjKVGxM?usp=sharing) | 2xV100 32GB | -| Kinyarwanda | 2020-12-11 | train_rw_with_wav2vec.yaml | No | 5.08 | 15.88 | 8.33 | 18.91 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-rw) | [model](https://drive.google.com/drive/folders/1ceHxyNojY0wXmXyPoyn9xUiH_5B5qgE4?usp=sharing) | 2xV100 16GB | -| English | 2020-12-11 | train_en.yaml | No | 8.66 | 20.16 | 12.93 | 24.89 | Not Avail. | [model](https://drive.google.com/drive/folders/1FAKRhfu_1gLnkshYGKp-6G9ZVMIUlv9n?usp=sharing) | 2xV100 16GB | -| English | 2020-12-11 | train_en_with_wav2vec.yaml | No | 14.50 | 13.21 | 24.65 | 15.69 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-en) | [model](https://drive.google.com/drive/folders/1EfIZiJi8ch53mil9K4tn46OrmTJq5WYj?usp=sharing) | 2xV100 32GB | -| Italian | 2020-12-11 | train_it.yaml | No | 5.14 | 15.59 | 15.40 | 16.61 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-it) | [model](https://drive.google.com/drive/folders/1asxPsY1EBGHIpIFhBtUi9oiyR6C7gC0g?usp=sharing) | 2xV100 16GB | -| Italian | 2020-12-11 | train_it_with_wav2vec.yaml | No | 3.11 | 8.30 | 5.75 | 9.86 | [model](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-it) | [model](https://drive.google.com/drive/folders/1LKA50Qsr1fM1E3t4PHMWUjlBMS2QGFHj?usp=sharing) | 2xV100 16GB | -| German | 2021-10-28 | train_de.yaml | No | 4.32 | 13.99 | 4.93 | 15.37 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-de) | -- | 1x V100 16GB | - -The output folders with checkpoints and logs can be found [here](https://drive.google.com/drive/folders/11NMzY0zV-NqJmPMyZfC3RtT64bYe-G_O?usp=sharing). +| French | 2023-08-15 | train_fr.yaml | No | 4.40 | 12.17 | 5.93 | 14.88 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-fr) | [model](https://www.dropbox.com/sh/07a5lt21wxp98x5/AABhNwmWFaNFyA734bNZUO03a?dl=0) | 1xV100 32GB | +| Kinyarwanda | 2023-08-15 | train_rw.yaml | No | 6.75 | 23.66 | 10.80 | 29.22 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-rw) | [model](https://www.dropbox.com/sh/i1fv4f8miilqgii/AAB3gE97kmFDA0ISkIDSUW_La?dl=0) | 1xV100 32GB | +| English | 2023-08-15 | train_en.yaml | No | 9.75 | 20.23 | 12.76 | 23.88 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-en) | [model](https://www.dropbox.com/sh/h8ged0yu3ztypkh/AAAu-12k_Ceg-tTjuZnrg7dza?dl=0) | 1xV100 32GB | +| Italian | 2023-08-15 | train_it.yaml | No | 5.89 | 15.99 | 6.27 | 17.02 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-it) | [model](https://www.dropbox.com/sh/ss59uu0j5boscvp/AAASsiFhlB1nDWPkFX410bzna?dl=0) | 1xV100 32GB | +| German | 2023-08-15 | train_de.yaml | No | 2.90 | 10.21 | 3.82 | 12.25 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-de) | [model](https://www.dropbox.com/sh/zgatirb118f79ef/AACmjh-D94nNDWcnVI4Ef5K7a?dl=0) | 1xV100 32GB | +| Spanish | 2023-08-15 | train_es.yaml | No | 4.10 | 14.10 | 4.68 | 14.77 | [model](https://huggingface.co/speechbrain/asr-crdnn-commonvoice-14-es) | [model](https://www.dropbox.com/sh/r3w0b2tm1p73vft/AADCxdhUwDN6j4PVT9TYe-d5a?dl=0) | 1xV100 32GB | + ## How to simply use pretrained models to transcribe my audio file? @@ -47,6 +44,15 @@ SpeechBrain provides a simple interface to transcribe audio files with pretraine Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/CommonVoice/ASR/seq2seq/extra_requirements.txt b/recipes/CommonVoice/ASR/seq2seq/extra_requirements.txt deleted file mode 100644 index 78949924f4..0000000000 --- a/recipes/CommonVoice/ASR/seq2seq/extra_requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -# For wav2vect recipe (HuggingFace) -transformers diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_de.yaml b/recipes/CommonVoice/ASR/seq2seq/hparams/train_de.yaml index c834f3df1c..b26b6a4159 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_de.yaml +++ b/recipes/CommonVoice/ASR/seq2seq/hparams/train_de.yaml @@ -8,9 +8,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_de/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -30,12 +30,14 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 25 number_of_ctc_epochs: 20 lr: 1.0 ctc_weight: 0.3 sorting: ascending +precision: fp32 # bf16, fp16 or fp32 ckpt_interval_minutes: 30 # With data_parallel batch_size is split into N jobs @@ -61,7 +63,7 @@ sample_rate: 16000 n_fft: 400 n_mels: 80 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 3 @@ -77,6 +79,8 @@ dnn_blocks: 2 dnn_neurons: 1024 emb_size: 128 dec_neurons: 1024 +dec_hidden_size: !ref +dec_attn_dim: !ref # Outputs output_neurons: 500 # BPE size, index(blank/eos/bos) = 0 @@ -101,18 +105,34 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Frequency domain SpecAugment -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 +############################## Augmentations ################################### + + # Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global @@ -122,6 +142,8 @@ compute_features: !new:speechbrain.lobes.features.Fbank n_fft: !ref n_mels: !ref +############################## Models ########################################## + enc: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] activation: !ref @@ -146,12 +168,12 @@ emb: !new:speechbrain.nnet.embedding.Embedding embedding_dim: !ref dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref + enc_dim: !ref input_size: !ref rnn_type: gru attn_type: location - hidden_size: 1024 - attn_dim: 1024 + hidden_size: !ref + attn_dim: !ref num_layers: 1 scaling: 1.0 channels: 10 diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_en.yaml b/recipes/CommonVoice/ASR/seq2seq/hparams/train_en.yaml index cc2165818e..da912afe41 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_en.yaml +++ b/recipes/CommonVoice/ASR/seq2seq/hparams/train_en.yaml @@ -7,9 +7,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_it/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -29,12 +29,14 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 25 number_of_ctc_epochs: 10 lr: 1.0 ctc_weight: 0.3 sorting: ascending +precision: fp32 # bf16, fp16 or fp32 # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs @@ -52,13 +54,14 @@ test_dataloader_options: # BPE parameters token_type: unigram # ["unigram", "bpe", "char"] character_coverage: 1.0 +label_smoothing: 0.1 # Feature parameters (FBANKS etc) sample_rate: 16000 n_fft: 400 n_mels: 80 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 3 @@ -74,6 +77,8 @@ dnn_blocks: 2 dnn_neurons: 1024 emb_size: 128 dec_neurons: 1024 +dec_hidden_size: !ref +dec_attn_dim: !ref # Outputs output_neurons: 500 # BPE size, index(blank/eos/bos) = 0 @@ -98,18 +103,34 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Frequency domain SpecAugment -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 +############################## Augmentations ################################### + + # Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global @@ -119,6 +140,8 @@ compute_features: !new:speechbrain.lobes.features.Fbank n_fft: !ref n_mels: !ref +############################## Models ########################################## + enc: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] activation: !ref @@ -143,12 +166,12 @@ emb: !new:speechbrain.nnet.embedding.Embedding embedding_dim: !ref dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref + enc_dim: !ref input_size: !ref rnn_type: gru attn_type: location - hidden_size: 1024 - attn_dim: 1024 + hidden_size: !ref + attn_dim: !ref num_layers: 1 scaling: 1.0 channels: 10 @@ -171,7 +194,7 @@ ctc_cost: !name:speechbrain.nnet.losses.ctc_loss blank_index: !ref seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 + label_smoothing: !ref modules: enc: !ref diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_fr_with_wav2vec.yaml b/recipes/CommonVoice/ASR/seq2seq/hparams/train_es.yaml similarity index 57% rename from recipes/CommonVoice/ASR/seq2seq/hparams/train_fr_with_wav2vec.yaml rename to recipes/CommonVoice/ASR/seq2seq/hparams/train_es.yaml index 646610870a..566f9d76ad 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_fr_with_wav2vec.yaml +++ b/recipes/CommonVoice/ASR/seq2seq/hparams/train_es.yaml @@ -1,26 +1,26 @@ # ################################ -# Model: wav2vec2 + DNN + CTC +# Model: VGG2 + LSTM + time pooling # Augmentation: SpecAugment -# Authors: Titouan Parcollet 2021 +# Authors: Titouan Parcollet, Mirco Ravanelli, Peter Plantinga, Ju-Chieh Chou, +# and Abdel HEBA 2020 +# edited: Andreas Nautsch, 2021 # ################################ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] -output_folder: !ref results/wav2vec2_ctcatt_fr/ -wer_file: !ref /wer.txt +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/CRDNN_es/ +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt -# URL for the biggest HuggingFace LeBenchmarh french w2v2 -wav2vec2_hub: LeBenchmark/wav2vec2-FR-7K-large # Data files -data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr +data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-7.0-2021-07-21/de train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files accented_letters: True -language: fr # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english +language: es # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english train_csv: !ref /train.csv valid_csv: !ref /dev.csv test_csv: !ref /test.csv @@ -30,23 +30,20 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters -number_of_epochs: 30 -number_of_ctc_epochs: 15 +####################### Training Parameters #################################### + +number_of_epochs: 25 +number_of_ctc_epochs: 20 lr: 1.0 -lr_wav2vec: 0.0001 ctc_weight: 0.3 sorting: ascending -auto_mix_prec: False -sample_rate: 16000 -ckpt_interval_minutes: 30 # save checkpoint every N min - +precision: fp32 # bf16, fp16 or fp32 # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs # Must be 6 per GPU to fit 16GB of VRAM -batch_size: 12 -test_batch_size: 4 +batch_size: 8 +test_batch_size: 6 dataloader_options: batch_size: !ref @@ -58,15 +55,31 @@ test_dataloader_options: # BPE parameters token_type: unigram # ["unigram", "bpe", "char"] character_coverage: 1.0 +label_smoothing: 0.1 -# Model parameters +# Feature parameters (FBANKS etc) +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU -wav2vec_output_dim: 1024 -dnn_layers: 2 +dropout: 0.15 +cnn_blocks: 3 +cnn_channels: (128, 200, 256) +inter_layer_pooling_size: (2, 2, 2) +cnn_kernelsize: (3, 3) +time_pooling_size: 4 +rnn_class: !name:speechbrain.nnet.RNN.LSTM +rnn_layers: 5 +rnn_neurons: 1024 +rnn_bidirectional: True +dnn_blocks: 2 dnn_neurons: 1024 emb_size: 128 dec_neurons: 1024 -freeze_wav2vec: False +dec_hidden_size: !ref +dec_attn_dim: !ref # Outputs output_neurons: 500 # BPE size, index(blank/eos/bos) = 0 @@ -74,8 +87,8 @@ output_neurons: 500 # BPE size, index(blank/eos/bos) = 0 # Decoding parameters # Be sure that the bos and eos index match with the BPEs ones blank_index: 0 -bos_index: 1 -eos_index: 2 +bos_index: 0 +eos_index: 0 min_decode_ratio: 0.0 max_decode_ratio: 1.0 beam_size: 80 @@ -91,52 +104,81 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment +############################## Augmentations ################################### + + # Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + +compute_features: !new:speechbrain.lobes.features.Fbank sample_rate: !ref - speeds: [95, 100, 105] + n_fft: !ref + n_mels: !ref + +############################## Models ########################################## -enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN - input_shape: [null, null, !ref ] +enc: !new:speechbrain.lobes.models.CRDNN.CRDNN + input_shape: [null, null, !ref ] activation: !ref - dnn_blocks: !ref + dropout: !ref + cnn_blocks: !ref + cnn_channels: !ref + cnn_kernelsize: !ref + inter_layer_pooling_size: !ref + time_pooling: True + using_2d_pooling: False + time_pooling_size: !ref + rnn_class: !ref + rnn_layers: !ref + rnn_neurons: !ref + rnn_bidirectional: !ref + rnn_re_init: True + dnn_blocks: !ref dnn_neurons: !ref -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 - source: !ref - output_norm: True - freeze: !ref - save_path: !ref /wav2vec2_checkpoint - -##### -# Uncomment this block if you prefer to use a Fairseq pretrained model instead -# of a HuggingFace one. Here, we provide an URL that is obtained from the -# Fairseq github for the multilingual XLSR. -# -#wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr_53_56k.pt -#wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 -# pretrained_path: !ref -# output_norm: True -# freeze: False -# save_path: !ref /wav2vec2_checkpoint/model.pt -##### - emb: !new:speechbrain.nnet.embedding.Embedding num_embeddings: !ref embedding_dim: !ref dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref + enc_dim: !ref input_size: !ref rnn_type: gru attn_type: location - hidden_size: 1024 - attn_dim: 1024 + hidden_size: !ref + attn_dim: !ref num_layers: 1 scaling: 1.0 channels: 10 kernel_size: 100 re_init: True - dropout: 0.15 + dropout: !ref ctc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref @@ -153,47 +195,36 @@ ctc_cost: !name:speechbrain.nnet.losses.ctc_loss blank_index: !ref seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 + label_smoothing: !ref modules: - wav2vec2: !ref enc: !ref emb: !ref dec: !ref ctc_lin: !ref seq_lin: !ref + normalize: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , !ref , !ref , !ref ] -model_opt_class: !name:torch.optim.Adadelta +opt_class: !name:torch.optim.Adadelta lr: !ref rho: 0.95 eps: 1.e-8 -wav2vec_opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler +lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler initial_value: !ref improvement_threshold: 0.0025 annealing_factor: 0.8 patient: 0 -lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.9 - patient: 0 - beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref @@ -205,10 +236,9 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: - wav2vec2: !ref model: !ref - scheduler_model: !ref - scheduler_wav2vec: !ref + scheduler: !ref + normalizer: !ref counter: !ref train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_fr.yaml b/recipes/CommonVoice/ASR/seq2seq/hparams/train_fr.yaml index b0f0c746e7..61192ad528 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_fr.yaml +++ b/recipes/CommonVoice/ASR/seq2seq/hparams/train_fr.yaml @@ -7,9 +7,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_it/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -29,12 +29,14 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 25 number_of_ctc_epochs: 20 lr: 1.0 ctc_weight: 0.3 sorting: ascending +precision: fp32 # bf16, fp16 or fp32 # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs @@ -59,7 +61,7 @@ sample_rate: 16000 n_fft: 400 n_mels: 80 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 3 @@ -75,6 +77,8 @@ dnn_blocks: 2 dnn_neurons: 1024 emb_size: 128 dec_neurons: 1024 +dec_hidden_size: !ref +dec_attn_dim: !ref # Outputs output_neurons: 500 # BPE size, index(blank/eos/bos) = 0 @@ -99,18 +103,34 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Frequency domain SpecAugment -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 +############################## Augmentations ################################### + + # Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global @@ -120,6 +140,8 @@ compute_features: !new:speechbrain.lobes.features.Fbank n_fft: !ref n_mels: !ref +############################## Models ########################################## + enc: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] activation: !ref @@ -144,12 +166,12 @@ emb: !new:speechbrain.nnet.embedding.Embedding embedding_dim: !ref dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref + enc_dim: !ref input_size: !ref rnn_type: gru attn_type: location - hidden_size: 1024 - attn_dim: 1024 + hidden_size: !ref + attn_dim: !ref num_layers: 1 scaling: 1.0 channels: 10 diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_it.yaml b/recipes/CommonVoice/ASR/seq2seq/hparams/train_it.yaml index 1125457348..df6503cfc0 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_it.yaml +++ b/recipes/CommonVoice/ASR/seq2seq/hparams/train_it.yaml @@ -7,9 +7,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_it/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -29,12 +29,14 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 8.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 50 number_of_ctc_epochs: 40 lr: 1.0 ctc_weight: 0.3 sorting: ascending +precision: fp32 # bf16, fp16 or fp32 # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs @@ -58,7 +60,7 @@ sample_rate: 16000 n_fft: 400 n_mels: 80 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 3 @@ -74,6 +76,8 @@ dnn_blocks: 2 dnn_neurons: 1024 emb_size: 128 dec_neurons: 1024 +dec_hidden_size: !ref +dec_attn_dim: !ref # Outputs output_neurons: 500 # BPE size, index(blank/eos/bos) = 0 @@ -98,18 +102,34 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Frequency domain SpecAugment -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 +############################## Augmentations ################################### + + # Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global @@ -119,6 +139,8 @@ compute_features: !new:speechbrain.lobes.features.Fbank n_fft: !ref n_mels: !ref +############################## Models ########################################## + enc: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] activation: !ref @@ -143,12 +165,12 @@ emb: !new:speechbrain.nnet.embedding.Embedding embedding_dim: !ref dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref + enc_dim: !ref input_size: !ref rnn_type: gru attn_type: location - hidden_size: 1024 - attn_dim: 1024 + hidden_size: !ref + attn_dim: !ref num_layers: 1 scaling: 1.0 channels: 10 diff --git a/recipes/CommonVoice/ASR/seq2seq/hparams/train_rw.yaml b/recipes/CommonVoice/ASR/seq2seq/hparams/train_rw.yaml index 430f03cd69..c1eadd52b9 100644 --- a/recipes/CommonVoice/ASR/seq2seq/hparams/train_rw.yaml +++ b/recipes/CommonVoice/ASR/seq2seq/hparams/train_rw.yaml @@ -7,9 +7,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_it/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -29,12 +29,14 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 8.0 -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 25 number_of_ctc_epochs: 20 lr: 1.0 ctc_weight: 0.3 sorting: ascending +precision: fp32 # bf16, fp16 or fp32 # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs @@ -58,7 +60,7 @@ sample_rate: 16000 n_fft: 400 n_mels: 80 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 3 @@ -74,6 +76,8 @@ dnn_blocks: 2 dnn_neurons: 1024 emb_size: 128 dec_neurons: 1024 +dec_hidden_size: !ref +dec_attn_dim: !ref # Outputs output_neurons: 500 # BPE size, index(blank/eos/bos) = 0 @@ -98,18 +102,35 @@ temperature: 1.50 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Frequency domain SpecAugment -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 + +############################## Augmentations ################################### + + # Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global @@ -119,6 +140,8 @@ compute_features: !new:speechbrain.lobes.features.Fbank n_fft: !ref n_mels: !ref +############################## Models ########################################## + enc: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] activation: !ref @@ -143,12 +166,12 @@ emb: !new:speechbrain.nnet.embedding.Embedding embedding_dim: !ref dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref + enc_dim: !ref input_size: !ref rnn_type: gru attn_type: location - hidden_size: 1024 - attn_dim: 1024 + hidden_size: !ref + attn_dim: !ref num_layers: 1 scaling: 1.0 channels: 10 diff --git a/recipes/CommonVoice/ASR/seq2seq/train.py b/recipes/CommonVoice/ASR/seq2seq/train.py index 5444e2d8e6..3403403708 100644 --- a/recipes/CommonVoice/ASR/seq2seq/train.py +++ b/recipes/CommonVoice/ASR/seq2seq/train.py @@ -1,14 +1,3 @@ -#!/usr/bin/env python3 -import sys -import torch -import logging -import speechbrain as sb -import torchaudio -from hyperpyyaml import load_hyperpyyaml -from speechbrain.tokenizers.SentencePiece import SentencePiece -from speechbrain.utils.data_utils import undo_padding -from speechbrain.utils.distributed import run_on_main - """Recipe for training a sequence-to-sequence ASR system with CommonVoice. The system employs an encoder, a decoder, and an attention mechanism between them. Decoding is performed with beamsearch. @@ -23,11 +12,25 @@ different encoders, decoders, tokens (e.g, characters instead of BPE), training languages (all CommonVoice languages), and many other possible variations. + Authors * Titouan Parcollet 2020 """ -logger = logging.getLogger(__name__) +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -40,14 +43,19 @@ def compute_forward(self, batch, stage): tokens_bos, _ = batch.tokens_bos wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + # Forward pass feats = self.hparams.compute_features(wavs) feats = self.modules.normalize(feats, wav_lens) - ## Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - feats = self.hparams.augmentation(feats) + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels(tokens_bos) x = self.modules.enc(feats.detach()) e_in = self.modules.emb(tokens_bos) # y_in bos + tokens @@ -57,40 +65,57 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs + p_ctc, p_tokens = None, None if stage == sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.number_of_ctc_epochs: # Output layer for ctc log-probabilities logits = self.modules.ctc_lin(x) p_ctc = self.hparams.log_softmax(logits) - return p_ctc, p_seq, wav_lens - else: - return p_seq, wav_lens else: - p_tokens, scores = self.hparams.beam_searcher(x, wav_lens) - return p_seq, wav_lens, p_tokens + p_tokens, _, _, _ = self.hparams.beam_searcher(x, wav_lens) + + return p_ctc, p_seq, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" - current_epoch = self.hparams.epoch_counter.current - if stage == sb.Stage.TRAIN: - if current_epoch <= self.hparams.number_of_ctc_epochs: - p_ctc, p_seq, wav_lens = predictions - else: - p_seq, wav_lens = predictions - else: - p_seq, wav_lens, predicted_tokens = predictions + p_ctc, p_seq, wav_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens + if stage == sb.Stage.TRAIN: + if hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + tokens_lens = self.hparams.wav_augment.replicate_labels( + tokens_lens + ) + tokens_eos = self.hparams.wav_augment.replicate_labels( + tokens_eos + ) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens + ) + if hasattr(self.hparams, "fea_augment"): + tokens = self.hparams.fea_augment.replicate_labels(tokens) + tokens_lens = self.hparams.fea_augment.replicate_labels( + tokens_lens + ) + tokens_eos = self.hparams.fea_augment.replicate_labels( + tokens_eos + ) + tokens_eos_lens = self.hparams.fea_augment.replicate_labels( + tokens_eos_lens + ) + loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) # Add ctc loss if necessary + current_epoch = self.hparams.epoch_counter.current if ( stage == sb.Stage.TRAIN and current_epoch <= self.hparams.number_of_ctc_epochs @@ -118,23 +143,6 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -161,27 +169,33 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) # Define custom data procedure def dataio_prepare(hparams, tokenizer): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # 1. Define datasets data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -211,13 +225,15 @@ def dataio_prepare(hparams, tokenizer): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate @@ -229,12 +245,13 @@ def dataio_prepare(hparams, tokenizer): @sb.utils.data_pipeline.takes("wav") @sb.utils.data_pipeline.provides("sig") def audio_pipeline(wav): - info = torchaudio.info(wav) + info = audio_io.info(wav) sig = sb.dataio.dataio.read_audio(wav) if info.num_channels > 1: sig = torch.mean(sig, dim=1) resampled = torchaudio.transforms.Resample( - info.sample_rate, hparams["sample_rate"], + info.sample_rate, + hparams["sample_rate"], )(sig) return resampled @@ -259,19 +276,18 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], ) return train_data, valid_data, test_data if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -335,7 +351,6 @@ def text_pipeline(wrd): ) # Test - asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt" asr_brain.evaluate( test_data, min_key="WER", diff --git a/recipes/CommonVoice/ASR/seq2seq/train_with_wav2vec.py b/recipes/CommonVoice/ASR/seq2seq/train_with_wav2vec.py index f8f4b33f7a..56cbc7e3bd 100644 --- a/recipes/CommonVoice/ASR/seq2seq/train_with_wav2vec.py +++ b/recipes/CommonVoice/ASR/seq2seq/train_with_wav2vec.py @@ -1,14 +1,4 @@ #!/usr/bin/env python3 -import sys -import torch -import logging -import speechbrain as sb -import torchaudio -from hyperpyyaml import load_hyperpyyaml -from speechbrain.tokenizers.SentencePiece import SentencePiece -from speechbrain.utils.data_utils import undo_padding -from speechbrain.utils.distributed import run_on_main - """Recipe for training a sequence-to-sequence ASR system with CommonVoice. The system employs a wav2vec2 encoder, a decoder, and an attention mechanism between them. Decoding is performed with beamsearch. @@ -17,7 +7,7 @@ > python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml With the default hyperparameters, the system employs a pretrained wav2vec2 encoder. -The wav2vec2 model is pretrained following the XSLR French HuggingFace model: +The wav2vec2 model is pretrained following the XLSR French HuggingFace model: facebook/wav2vec2-large-xlsr-53-french The decoder is based on a standard GRU and BeamSearch (no LM). @@ -35,7 +25,20 @@ * Titouan Parcollet 2020 """ -logger = logging.getLogger(__name__) +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -48,12 +51,13 @@ def compute_forward(self, batch, stage): tokens_bos, _ = batch.tokens_bos wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) # Forward pass - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) x = self.modules.enc(feats) e_in = self.modules.emb(tokens_bos) # y_in bos + tokens @@ -63,40 +67,42 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs + p_ctc, p_tokens = None, None if stage == sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.number_of_ctc_epochs: # Output layer for ctc log-probabilities logits = self.modules.ctc_lin(x) p_ctc = self.hparams.log_softmax(logits) - return p_ctc, p_seq, wav_lens - else: - return p_seq, wav_lens else: - p_tokens, scores = self.hparams.beam_searcher(x, wav_lens) - return p_seq, wav_lens, p_tokens + p_tokens, _, _, _ = self.hparams.beam_searcher(x, wav_lens) + + return p_ctc, p_seq, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" - current_epoch = self.hparams.epoch_counter.current - if stage == sb.Stage.TRAIN: - if current_epoch <= self.hparams.number_of_ctc_epochs: - p_ctc, p_seq, wav_lens = predictions - else: - p_seq, wav_lens = predictions - else: - p_seq, wav_lens, predicted_tokens = predictions + p_ctc, p_seq, wav_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens + # Augment Labels + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + tokens_lens = self.hparams.wav_augment.replicate_labels(tokens_lens) + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens + ) + loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) # Add ctc loss if necessary + current_epoch = self.hparams.epoch_counter.current if ( stage == sb.Stage.TRAIN and current_epoch <= self.hparams.number_of_ctc_epochs @@ -124,53 +130,6 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - if self.auto_mix_prec: - - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - - with torch.cuda.amp.autocast(): - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - - self.scaler.scale(loss).backward() - if not self.hparams.wav2vec2.freeze: - self.scaler.unscale_(self.wav2vec_optimizer) - self.scaler.unscale_(self.model_optimizer) - - if self.check_gradients(loss): - if not self.hparams.wav2vec2.freeze: - self.scaler.step(self.wav2vec_optimizer) - self.scaler.step(self.model_optimizer) - - self.scaler.update() - else: - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - loss.backward() - - if self.check_gradients(loss): - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.step() - self.model_optimizer.step() - - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -212,15 +171,19 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def init_optimizers(self): "Initializes the wav2vec2 optimizer and model optimizer" @@ -239,17 +202,37 @@ def init_optimizers(self): if self.checkpointer is not None: self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + if not self.hparams.wav2vec2.freeze: + self.optimizers_dict = { + "wav2vec_optimizer": self.wav2vec_optimizer, + "model_optimizer": self.model_optimizer, + } + else: + self.optimizers_dict = {"model_optimizer": self.model_optimizer} + + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if not self.hparams.wav2vec2.freeze: + valid_optimizers["wav2vec_optimizer"] = optimizers[ + "wav2vec_optimizer" + ] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers + # Define custom data procedure def dataio_prepare(hparams, tokenizer): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # 1. Define datasets data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -279,13 +262,15 @@ def dataio_prepare(hparams, tokenizer): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate @@ -297,10 +282,11 @@ def dataio_prepare(hparams, tokenizer): @sb.utils.data_pipeline.takes("wav") @sb.utils.data_pipeline.provides("sig") def audio_pipeline(wav): - info = torchaudio.info(wav) + info = audio_io.info(wav) sig = sb.dataio.dataio.read_audio(wav) resampled = torchaudio.transforms.Resample( - info.sample_rate, hparams["sample_rate"], + info.sample_rate, + hparams["sample_rate"], )(sig) return resampled @@ -325,19 +311,18 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], ) return train_data, valid_data, test_data if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -400,7 +385,6 @@ def text_pipeline(wrd): ) # Test - asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt" asr_brain.evaluate( test_data, min_key="WER", diff --git a/recipes/CommonVoice/ASR/transducer/README.md b/recipes/CommonVoice/ASR/transducer/README.md index 36fe384956..212c83d67e 100644 --- a/recipes/CommonVoice/ASR/transducer/README.md +++ b/recipes/CommonVoice/ASR/transducer/README.md @@ -1,33 +1,82 @@ # CommonVoice ASR with Transducers. -This folder contains scripts necessary to run an ASR experiment with the CommonVoice dataset: [CommonVoice Homepage](https://commonvoice.mozilla.org/) +This folder contains scripts necessary to run an ASR experiment with the CommonVoice 14.0 dataset: [CommonVoice Homepage](https://commonvoice.mozilla.org/) and pytorch 2.0 # Extra-Dependencies -This recipe support two implementation of Transducer loss, see `use_torchaudio` arg in Yaml file: -1- Transducer loss from torchaudio (if torchaudio version >= 0.10.0) (Default) -2- Speechbrain Implementation using Numba lib. (this allow you to have a direct access in python to the Transducer loss implementation) +This recipe supports two implementations of the transducer loss, see `use_torchaudio` arg in the yaml file: +1. Transducer loss from torchaudio (this requires torchaudio version >= 0.10.0). +2. Speechbrain implementation using Numba. To use it, please set `use_torchaudio=False` in the yaml file. This version is implemented within SpeechBrain and allows you to directly access the python code of the transducer loss (and directly modify it if needed). + +The Numba implementation is currently enabled by default as the `use_torchaudio` option is incompatible with `bfloat16` training. + Note: Before running this recipe, make sure numba is installed. Otherwise, run: ``` pip install numba ``` -# How to run -python train.py hparams/{hparam_file}.py +# How to run it +```shell +python train.py hparams/conformer_transducer.yaml +``` -# Data preparation -It is important to note that CommonVoice initially offers mp3 audio files at 42Hz. Hence, audio files are downsampled on the fly within the dataio function of the training script. +## Precision Notes +If your GPU effectively supports fp16 (half-precision) computations, it is recommended to execute the training script with the `--precision=fp16` (or `--precision=bf16`) option. +Enabling half precision can significantly reduce the peak VRAM requirements. For example, in the case of the Conformer Transducer recipe trained with Librispeech, the peak VRAM decreases from 39GB to 12GB when using fp16. +According to our tests, the performance is not affected. # Languages Here is a list of the different languages that we tested within the CommonVoice dataset with our transducers: - French +- Italian +- English + +# Results (non-streaming) + +Results are obtained with beam search and no LM (no-streaming i.e. full context). + +| Language | Release | LM | Val. CER | Val. WER | Test CER | Test WER | Model link | GPUs | +| ------------- |:-------------:| -----:| -----:| -----:| -----:| -----:| :-----------:| :-----------:| +| French | 2024-03-22 | No | 3.51 | 10.30 | 4.64 | 12.47 | [model](https://www.dropbox.com/scl/fo/kue72ik3vc55xu6u8zjr7/h?rlkey=ie98ktqf9gbunn4x9i3pskedq&dl=0) | 4xV100 32GB | +| Italian | 2024-03-22 | No | 2.47 | 8.49 | 2.69 | 8.92 | [model](https://www.dropbox.com/scl/fo/uyqfo3kwcpkaq26au2foj/h?rlkey=gxlj7xn6bnhjfb5jds8p80fe6&dl=0) | 4xV100 32GB | + +The output folders with checkpoints and logs can be found [here](https://www.dropbox.com/sh/852eq7pbt6d65ai/AACv4wAzk1pWbDo4fjVKLICYa?dl=0). + +## Streaming model + +### WER vs chunk size & left context -# Results +The following matrix presents the Word Error Rate (WER%) achieved on CommonVoice +`test` with various chunk sizes (in ms). -| Language | Release | hyperparams file | LM | Val. CER | Val. WER | Test CER | Test WER | Model link | GPUs | -| ------------- |:-------------:|:---------------------------:| -----:| -----:| -----:| -----:| -----:| :-----------:| :-----------:| -| French | 2020-06-22 | train_fr.yaml | No | 6.70 | 18.97 | 7.41 | 20.18 | [model](https://drive.google.com/drive/folders/1ZwY2FaRl1gfFbupodph_xRiGj4h25I08?usp=sharing) | 2xV100 16GB | +The relative difference is not trivial to interpret, because we are not testing +against a continuous stream of speech, but rather against utterances of various +lengths. This tends to bias results in favor of larger chunk sizes. -The output folders with checkpoints and logs can be found [here](https://drive.google.com/drive/folders/11NMzY0zV-NqJmPMyZfC3RtT64bYe-G_O?usp=sharing). +The chunk size might not accurately represent expected latency due to slight +padding differences in streaming contexts. + +The left chunk size is not representative of the receptive field of the model. +Because the model caches the streaming context at different layers, the model +may end up forming indirect dependencies to audio many seconds ago. + +| | full | cs=32 (1280ms) | 16 (640ms) | 8 (320ms) | +|:-----:|:----:|:-----:|:-----:|:-----:| +| it full | 8.92 | - | - | - | +| it lc=32 | - | 10.04 | 10.82 | 12.01 | +| fr full | 12.47 | - | - | - | +| fr lc=32 | - | 13.92 | 14.88 | 16.22 | + +### Inference + +Once your model is trained, you need a few manual steps in order to use it with the high-level streaming interfaces (`speechbrain.inference.ASR.StreamingASR`): + +1. Create a new directory where you want to store the model. +2. Copy `results/conformer_transducer//lm.ckpt` (optional; currently, for streaming rescoring LMs might be unsupported) and `tokenizer.ckpt` to that directory. +3. Copy `results/conformer_transducer//save/CKPT+????/model.ckpt` and `normalizer.ckpt` to that directory. +4. Copy your hyperparameters file to that directory. Uncomment the streaming specific keys and remove any training-specific keys. Alternatively, grab the inference hyperparameters YAML for this model from HuggingFace and adapt it to any changes you may have done. +5. You can now instantiate a `StreamingASR` with your model using `StreamingASR.from_hparams("/path/to/model/")`. + +The contents of that directory may be uploaded as a HuggingFace model, in which case the model source path can just be specified as `youruser/yourmodel`. # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -39,6 +88,15 @@ The output folders with checkpoints and logs can be found [here](https://drive.g Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/CommonVoice/ASR/transducer/hparams/conformer_transducer.yaml b/recipes/CommonVoice/ASR/transducer/hparams/conformer_transducer.yaml new file mode 100644 index 0000000000..3b0cc6247b --- /dev/null +++ b/recipes/CommonVoice/ASR/transducer/hparams/conformer_transducer.yaml @@ -0,0 +1,339 @@ +# ############################################################################ +# Model: E2E ASR with transformer and transducer +# Encoder: Conformer +# Decoder: LSTM + beamsearch +# Tokens: BPE with unigram +# losses: Transducer + CTC + CE (optional) +# Training: CommonVoince +# Authors: Titouan Parcollet 2024 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_transducer_large/ +save_folder: !ref /save +train_log: !ref /train_log.txt +test_wer_file: !ref /wer_test.txt + +# Data files +data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr +train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files +dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files +test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files +accented_letters: False +language: fr # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv +skip_prep: False # Skip data preparation +avoid_if_longer_than: 10 + +# BPE parameters +token_type: unigram # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +####################### Training Parameters #################################### + +number_of_epochs: 100 # Will not be reached due to next hparams +optimizer_step_limit: 90000 +warmup_steps: 25000 +augment_warmup: 5000 # Data augmentation is switched on after these steps. +num_workers: 4 +batch_size_valid: 4 +batch_size_test: 4 +lr: 0.0008 +weight_decay: 0.01 +number_of_ctc_epochs: 10 +ctc_weight: 0.4 # Multitask with CTC for the encoder (0.0 = disabled) +ce_weight: 0.0 # Multitask with CE for the decoder (0.0 = disabled) +max_grad_norm: 10.0 +loss_reduction: 'batchmean' +precision: fp32 # bf16, fp16 or fp32 + +# The batch size is used if and only if dynamic batching is set to False +# Validation and testing are done with fixed batches and not dynamic batching. +batch_size: 1 +grad_accumulation_factor: 1 +sorting: random +avg_checkpoints: 5 # Number of checkpoints to average for evaluation + +# Feature parameters +sample_rate: 16000 +n_fft: 512 +n_mels: 80 +win_length: 32 + +# Streaming & dynamic chunk training options +# At least for the current architecture on LibriSpeech, we found out that +# non-streaming accuracy is very similar between `streaming: True` and +# `streaming: False`. +streaming: True # controls all Dynamic Chunk Training & chunk size & left context mechanisms + +# Configuration for Dynamic Chunk Training. +# In this model, a chunk is roughly equivalent to 40ms of audio. +dynchunktrain_config_sampler: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler # yamllint disable-line rule:line-length + chunkwise_prob: 0.6 # Probability during a batch to limit attention and sample a random chunk size in the following range + chunk_size_min: 8 # Minimum chunk size (if in a DynChunkTrain batch) + chunk_size_max: 32 # Maximum chunk size (if in a DynChunkTrain batch) + limited_left_context_prob: 0.75 # If in a DynChunkTrain batch, the probability during a batch to restrict left context to a random number of chunks + left_context_chunks_min: 2 # Minimum left context size (in # of chunks) + left_context_chunks_max: 32 # Maximum left context size (in # of chunks) + # If you specify a valid/test config, you can optionally have evaluation be + # done with a specific DynChunkTrain configuration. + # valid_config: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig + # chunk_size: 24 + # left_context_size: 16 + # test_config: ... + # chunk_size: 24 + # left_context_size: 16 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + num_workers: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +# This setup works well for 3090 24GB GPU, adapt it to your needs. +# Adjust grad_accumulation_factor depending on the DDP node count (here 3) +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_len: 200 # Should fit 32GB of VRAM +max_batch_len_val: 150 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 + +dynamic_batch_sampler: + max_batch_len: !ref + max_batch_len_val: !ref + num_buckets: !ref + shuffle_ex: True # if true re-creates batches at each epoch shuffling examples. + batch_ordering: random + max_batch_ex: 256 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 512 +joint_dim: 512 +nhead: 8 +num_encoder_layers: 12 +num_decoder_layers: 0 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 1024 +dec_dim: 512 +dec_emb_dropout: 0.1 +dec_dropout: 0.1 +attention_type: RelPosMHAXL + +# Decoding parameters +blank_index: 0 +bos_index: 1 +eos_index: 2 +pad_index: 0 + +# If True uses torchaudio loss. Otherwise, the numba one +use_torchaudio: False + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + win_length: !ref + +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 3 + drop_count_high: 3 + replace: "zeros" + dim: 1 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: conformer + attention_type: !ref + normalize_before: True + causal: False + max_length: 6000 # For absolute positional encoding + +# We must call an encoder wrapper so the decoder isn't run (we don't have any) +enc: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper + transformer: !ref + +# For MTL CTC over the encoder +proj_ctc: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +# Define some projection layers to make sure that enc and dec +# output dim are the same before joining +proj_enc: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +proj_dec: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +# Uncomment for MTL with CTC +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +emb: !new:speechbrain.nnet.embedding.Embedding + num_embeddings: !ref + consider_as_one_hot: True + blank_id: !ref + +dec: !new:speechbrain.nnet.RNN.LSTM + input_shape: [null, null, !ref - 1] + hidden_size: !ref + num_layers: 1 + re_init: True + +# For MTL with LM over the decoder (need to uncomment to activate) +# dec_lin: !new:speechbrain.nnet.linear.Linear +# input_size: !ref +# n_neurons: !ref +# bias: False + +# For MTL +ce_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: 0.1 + +Tjoint: !new:speechbrain.nnet.transducer.transducer_joint.Transducer_joint + joint: sum # joint [sum | concat] + nonlinearity: !ref + +transducer_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +transducer_cost: !name:speechbrain.nnet.losses.transducer_loss + blank_index: !ref + use_torchaudio: !ref + +# for MTL +# update model if any HEAD module is added +modules: + CNN: !ref + enc: !ref + emb: !ref + dec: !ref + Tjoint: !ref + transducer_lin: !ref + normalize: !ref + proj_ctc: !ref + proj_dec: !ref + proj_enc: !ref +# dec_lin: !ref + +# for MTL +# update model if any HEAD module is added +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref , !ref , !ref , !ref , !ref ] + +############################## Decoding & optimiser ############################ + +Greedysearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher + decode_network_lst: [!ref , !ref , !ref ] + tjoint: !ref + classifier_network: [!ref ] + blank_id: !ref + beam_size: 1 + nbest: 1 + +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 1.e-8 + weight_decay: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.WarmAndExpDecayLRSchedule + lr: !ref + n_warmup_steps: !ref + total_steps: !ref + decay_factor: 0.05 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + scheduler: !ref + normalizer: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/CommonVoice/ASR/transducer/hparams/train_fr.yaml b/recipes/CommonVoice/ASR/transducer/hparams/train_fr.yaml deleted file mode 100644 index 3743e9e0fa..0000000000 --- a/recipes/CommonVoice/ASR/transducer/hparams/train_fr.yaml +++ /dev/null @@ -1,245 +0,0 @@ -# ############################################################################ -# Model: E2E ASR with attention-based ASR -# Encoder: CRDNN model -# Decoder: GRU + beamsearch + RNNLM -# Tokens: BPE with unigram -# losses: Transducer -# Training: Librispeech 100h -# Authors: Abdel HEBA, Mirco Ravanelli, Sung-Lin Yeh 2020 -# ############################################################################ - -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] -output_folder: !ref results/cv_transducer/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr -train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files -dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files -test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files -accented_letters: True -language: fr # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english -train_csv: !ref /train.csv -valid_csv: !ref /dev.csv -test_csv: !ref /test.csv -skip_prep: False # Skip data preparation - -# We remove utterance slonger than 10s in the train/dev/test sets as -# longer sentences certainly correspond to "open microphones". -avoid_if_longer_than: 10.0 - -# Training parameters -number_of_epochs: 30 -batch_size: 6 -batch_size_valid: 1 -lr: 1.0 -sorting: ascending -ckpt_interval_minutes: 15 # save checkpoint every N min -# MTL for encoder with CTC (uncomment enc_lin layer) -#number_of_ctc_epochs: 2 -#ctc_weight: 0.33 -# MTL for decoder with CE (uncomment dec_lin layer) -#number_of_ce_epochs: 2 -#ce_weight: 0.33 - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 80 - -opt_class: !name:torch.optim.Adadelta - lr: !ref - rho: 0.95 - eps: 1.e-8 - -# BPE parameters -token_type: unigram # ["unigram", "bpe", "char"] -character_coverage: 1.0 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.15 -cnn_blocks: 3 -cnn_channels: (128, 200, 256) -inter_layer_pooling_size: (2, 2, 2) -cnn_kernelsize: (3, 3) -time_pooling_size: 4 -rnn_class: !name:speechbrain.nnet.RNN.LSTM -rnn_layers: 5 -rnn_neurons: 1024 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 1024 -dec_neurons: 1024 -output_neurons: 1000 # index(blank/eos/bos) = 0 -joint_dim: 1024 -blank_index: 0 - -# Decoding parameters -beam_size: 4 -nbest: 1 -# by default {state,expand}_beam = 2.3 as mention in paper -# https://arxiv.org/abs/1904.02619 -state_beam: 2.3 -expand_beam: 2.3 - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -# Frequency domain SpecAugment -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - inter_layer_pooling_size: !ref - time_pooling: True - using_2d_pooling: False - time_pooling_size: !ref - rnn_class: !ref - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - rnn_re_init: True - dnn_blocks: !ref - dnn_neurons: !ref - -# For MTL CTC over the encoder -enc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - -# Uncomment for MTL with CTC -# ctc_cost: !name:speechbrain.nnet.ctc_loss -# blank_index: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - consider_as_one_hot: True - blank_id: !ref - -dec: !new:speechbrain.nnet.RNN.GRU - input_shape: [null, null, !ref - 1] - hidden_size: !ref - num_layers: 1 - re_init: True - -# For MTL with LM over the decoder -dec_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - bias: False - -# For MLT with CTC -#ce_cost: !name:speechbrain.nnet.losses.nll_loss -# label_smoothing: 0.1 - -Tjoint: !new:speechbrain.nnet.transducer.transducer_joint.Transducer_joint - joint: sum # joint [sum | concat] - nonlinearity: !ref - -transducer_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - bias: False - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -transducer_cost: !name:speechbrain.nnet.losses.transducer_loss - use_torchaudio: True - blank_index: !ref - -# for MTL -# update model if any HEAD module is added -modules: - enc: !ref - enc_lin: !ref - emb: !ref - dec: !ref - dec_lin: !ref - Tjoint: !ref - transducer_lin: !ref - normalize: !ref - augmentation: !ref - -# for MTL -# update model if any HEAD module is added -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref ] - -# greedy_searcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher -# decode_network_lst: [!ref , !ref ] -# tjoint: !ref -# classifier_network: [!ref ] -# blank_id: !ref -# beam_size: 1 -# nbest: 1 - -beam_searcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher - decode_network_lst: [!ref , !ref ] - tjoint: !ref - classifier_network: [!ref ] - blank_id: !ref - beam_size: !ref - nbest: !ref - state_beam: !ref - expand_beam: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - scheduler: !ref - normalizer: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats - -cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats - split_tokens: True diff --git a/recipes/CommonVoice/ASR/transducer/train.py b/recipes/CommonVoice/ASR/transducer/train.py index cf93499247..3126532745 100644 --- a/recipes/CommonVoice/ASR/transducer/train.py +++ b/recipes/CommonVoice/ASR/transducer/train.py @@ -1,29 +1,24 @@ #!/usr/bin/env/python3 -"""Recipe for training a Transducer ASR system with librispeech. +"""Recipe for training a Transducer ASR system with CommonVoice. The system employs an encoder, a decoder, and an joint network -between them. Decoding is performed with beamsearch coupled with a neural -language model. +between them following Dynamic Chunk Training for streaming speech recognition. To run this recipe, do the following: -> python train.py hparams/train.yaml - -With the default hyperparameters, the system employs a CRDNN encoder. -The decoder is based on a standard GRU. Beamsearch coupled with a RNN -language model is used on the top of decoder probabilities. +> python train.py hparams/conformer_transducer_large.yaml The neural network is trained on both CTC and negative-log likelihood targets and sub-word units estimated with Byte Pairwise Encoding (BPE) -are used as basic recognition tokens. Training is performed on the full -LibriSpeech dataset (960 h). +are used as basic recognition tokens. The experiment file is flexible enough to support a large variety of different systems. By properly changing the parameter files, you can try different encoders, decoders, tokens (e.g, characters instead of BPE), -training split (e.g, train-clean 100 rather than the full one), and many -other possible variations. +training CommonVoice language, different streaming, parameters, +and many other possible variations. Authors + * Titouan Parcollet 2024 * Abdel Heba 2020 * Mirco Ravanelli 2020 * Ju-Chieh Chou 2020 @@ -31,16 +26,19 @@ """ import sys + import torch -import logging -import speechbrain as sb import torchaudio -from speechbrain.utils.distributed import run_on_main -from speechbrain.utils.data_utils import undo_padding -from speechbrain.tokenizers.SentencePiece import SentencePiece from hyperpyyaml import load_hyperpyyaml -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -51,20 +49,54 @@ def compute_forward(self, batch, stage): batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_with_bos, token_with_bos_lens = batch.tokens_bos - # wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) - # Forward pass feats = self.hparams.compute_features(wavs) - feats = self.modules.normalize(feats, wav_lens) - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "augmentation"): - feats = self.modules.augmentation(feats) + # Add feature augmentation if specified. + if ( + stage == sb.Stage.TRAIN + and hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_with_bos = self.hparams.fea_augment.replicate_labels( + tokens_with_bos + ) + + current_epoch = self.hparams.epoch_counter.current + + # Old models may not have the streaming hparam, we don't break them in + # any other way so just check for its presence + if hasattr(self.hparams, "streaming") and self.hparams.streaming: + dynchunktrain_config = self.hparams.dynchunktrain_config_sampler( + stage + ) + else: + dynchunktrain_config = None + + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + src = self.modules.CNN(feats) + x = self.modules.enc( + src, + wav_lens, + pad_idx=self.hparams.pad_index, + dynchunktrain_config=dynchunktrain_config, + ) + x = self.modules.proj_enc(x) - x = self.modules.enc(feats.detach()) e_in = self.modules.emb(tokens_with_bos) + e_in = torch.nn.functional.dropout( + e_in, + self.hparams.dec_emb_dropout, + training=(stage == sb.Stage.TRAIN), + ) h, _ = self.modules.dec(e_in) + h = torch.nn.functional.dropout( + h, self.hparams.dec_dropout, training=(stage == sb.Stage.TRAIN) + ) + h = self.modules.proj_dec(h) + # Joint network # add labelseq_dim to the encoder tensor: [B,T,H_enc] => [B,T,1,H_enc] # add timeseq_dim to the decoder tensor: [B,U,H_dec] => [B,1,U,H_dec] @@ -75,116 +107,83 @@ def compute_forward(self, batch, stage): # Compute outputs if stage == sb.Stage.TRAIN: - return_CTC = False - return_CE = False - current_epoch = self.hparams.epoch_counter.current + p_ctc = None + p_ce = None + if ( - hasattr(self.hparams, "ctc_cost") + self.hparams.ctc_weight > 0.0 and current_epoch <= self.hparams.number_of_ctc_epochs ): - return_CTC = True # Output layer for ctc log-probabilities - out_ctc = self.modules.enc_lin(x) + out_ctc = self.modules.proj_ctc(x) p_ctc = self.hparams.log_softmax(out_ctc) - if ( - hasattr(self.hparams, "ce_cost") - and current_epoch <= self.hparams.number_of_ce_epochs - ): - return_CE = True + + if self.hparams.ce_weight > 0.0: # Output layer for ctc log-probabilities p_ce = self.modules.dec_lin(h) p_ce = self.hparams.log_softmax(p_ce) - if return_CE and return_CTC: - return p_ctc, p_ce, logits_transducer, wav_lens - elif return_CTC: - return p_ctc, logits_transducer, wav_lens - elif return_CE: - return p_ce, logits_transducer, wav_lens - else: - return logits_transducer, wav_lens - - elif stage == sb.Stage.VALID: - best_hyps, scores, _, _ = self.hparams.beam_searcher(x) - return logits_transducer, wav_lens, best_hyps + + return p_ctc, p_ce, logits_transducer, wav_lens + else: - ( - best_hyps, - best_scores, - nbest_hyps, - nbest_scores, - ) = self.hparams.beam_searcher(x) + best_hyps, scores, _, _ = self.hparams.Greedysearcher(x) return logits_transducer, wav_lens, best_hyps def compute_objectives(self, predictions, batch, stage): """Computes the loss (Transducer+(CTC+NLL)) given predictions and targets.""" ids = batch.id - current_epoch = self.hparams.epoch_counter.current tokens, token_lens = batch.tokens tokens_eos, token_eos_lens = batch.tokens_eos + # Train returns 4 elements vs 3 for val and test + if len(predictions) == 4: + p_ctc, p_ce, logits_transducer, wav_lens = predictions + else: + logits_transducer, wav_lens, predicted_tokens = predictions + if stage == sb.Stage.TRAIN: - if len(predictions) == 4: - p_ctc, p_ce, logits_transducer, wav_lens = predictions + if ( + hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + tokens = self.hparams.fea_augment.replicate_labels(tokens) + token_lens = self.hparams.fea_augment.replicate_labels( + token_lens + ) + tokens_eos = self.hparams.fea_augment.replicate_labels( + tokens_eos + ) + token_eos_lens = self.hparams.fea_augment.replicate_labels( + token_eos_lens + ) + + if stage == sb.Stage.TRAIN: + CTC_loss = 0.0 + CE_loss = 0.0 + if p_ctc is not None: CTC_loss = self.hparams.ctc_cost( p_ctc, tokens, wav_lens, token_lens ) + if p_ce is not None: CE_loss = self.hparams.ce_cost( p_ce, tokens_eos, length=token_eos_lens ) - loss_transducer = self.hparams.transducer_cost( - logits_transducer, tokens, wav_lens, token_lens - ) - loss = ( - self.hparams.ctc_weight * CTC_loss - + self.hparams.ce_weight * CE_loss - + (1 - (self.hparams.ctc_weight + self.hparams.ce_weight)) - * loss_transducer - ) - elif len(predictions) == 3: - # one of the 2 heads (CTC or CE) is still computed - # CTC alive - if current_epoch <= self.hparams.number_of_ctc_epochs: - p_ctc, p_transducer, wav_lens = predictions - CTC_loss = self.hparams.ctc_cost( - p_ctc, tokens, wav_lens, token_lens - ) - loss_transducer = self.hparams.transducer_cost( - logits_transducer, tokens, wav_lens, token_lens - ) - loss = ( - self.hparams.ctc_weight * CTC_loss - + (1 - self.hparams.ctc_weight) * loss_transducer - ) - # CE for decoder alive - else: - p_ce, logits_transducer, wav_lens = predictions - CE_loss = self.hparams.ce_cost( - p_ce, tokens_eos, length=token_eos_lens - ) - # Transducer loss use logits from RNN-T model. - loss_transducer = self.hparams.transducer_cost( - logits_transducer, tokens, wav_lens, token_lens - ) - loss = ( - self.hparams.ce_weight * CE_loss - + (1 - self.hparams.ctc_weight) * loss_transducer - ) - else: - logits_transducer, wav_lens = predictions - # Transducer loss use logits from RNN-T model. - loss = self.hparams.transducer_cost( - logits_transducer, tokens, wav_lens, token_lens - ) + loss_transducer = self.hparams.transducer_cost( + logits_transducer, tokens, wav_lens, token_lens + ) + loss = ( + self.hparams.ctc_weight * CTC_loss + + self.hparams.ce_weight * CE_loss + + (1 - (self.hparams.ctc_weight + self.hparams.ce_weight)) + * loss_transducer + ) else: - logits_transducer, wav_lens, predicted_tokens = predictions - # Transducer loss use logits from RNN-T model. loss = self.hparams.transducer_cost( logits_transducer, tokens, wav_lens, token_lens ) if stage != sb.Stage.TRAIN: - # Decode token terms to words predicted_words = self.tokenizer( predicted_tokens, task="decode_from_list" @@ -199,22 +198,10 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" @@ -224,6 +211,7 @@ def on_stage_start(self, stage, epoch): def on_stage_end(self, stage, stage_loss, epoch): """Gets called at the end of a epoch.""" + # Compute/store important stats stage_stats = {"loss": stage_loss} if stage == sb.Stage.TRAIN: @@ -234,36 +222,60 @@ def on_stage_end(self, stage, stage_loss, epoch): # Perform end-of-iteration things, like annealing, logging, etc. if stage == sb.Stage.VALID: - old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"]) - sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } self.hparams.train_logger.log_stats( - stats_meta={"epoch": epoch, "lr": old_lr}, + stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"], "epoch": epoch}, + min_keys=["WER"], + num_to_keep=self.hparams.avg_checkpoints, ) + elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + # save the averaged checkpoint at the end of the evaluation stage + # delete the rest of the intermediate checkpoints + # WER is set to -0.1 so checkpointer only keeps the averaged checkpoint + self.checkpointer.save_and_keep_only( + meta={"WER": -0.1, "epoch": epoch}, + min_keys=["WER"], + num_to_keep=1, + ) -# Define custom data procedure def dataio_prepare(hparams, tokenizer): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # 1. Define datasets data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -274,32 +286,29 @@ def dataio_prepare(hparams, tokenizer): ) # when sorting do not shuffle in dataloader ! otherwise is pointless hparams["train_dataloader_opts"]["shuffle"] = False - elif hparams["sorting"] == "descending": train_data = train_data.filtered_sorted( - sort_key="duration", - reverse=True, - key_max_value={"duration": hparams["avoid_if_longer_than"]}, + sort_key="duration", reverse=True ) # when sorting do not shuffle in dataloader ! otherwise is pointless hparams["train_dataloader_opts"]["shuffle"] = False elif hparams["sorting"] == "random": pass - else: raise NotImplementedError( "sorting must be random, ascending or descending" ) - valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) - # We also sort the validation data so it is faster to validate valid_data = valid_data.filtered_sorted(sort_key="duration") + # test is separate test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, ) # We also sort the test data so it is faster to test @@ -311,10 +320,11 @@ def dataio_prepare(hparams, tokenizer): @sb.utils.data_pipeline.takes("wav") @sb.utils.data_pipeline.provides("sig") def audio_pipeline(wav): - info = torchaudio.info(wav) + info = audio_io.info(wav) sig = sb.dataio.dataio.read_audio(wav) resampled = torchaudio.transforms.Resample( - info.sample_rate, hparams["sample_rate"], + info.sample_rate, + hparams["sample_rate"], )(sig) return resampled @@ -329,9 +339,9 @@ def text_pipeline(wrd): yield wrd tokens_list = tokenizer.sp.encode_as_ids(wrd) yield tokens_list - tokens_bos = torch.LongTensor([hparams["blank_index"]] + (tokens_list)) + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) yield tokens_bos - tokens_eos = torch.LongTensor(tokens_list + [hparams["blank_index"]]) + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens @@ -340,19 +350,53 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], ) - return train_data, valid_data, test_data + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams = hparams["dynamic_batch_sampler"] + num_buckets = dynamic_hparams["num_buckets"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + dynamic_hparams["max_batch_len"], + num_buckets=num_buckets, + length_func=lambda x: x["duration"], + shuffle=dynamic_hparams["shuffle_ex"], + batch_ordering=dynamic_hparams["batch_ordering"], + ) + + valid_batch_sampler = DynamicBatchSampler( + valid_data, + dynamic_hparams["max_batch_len_val"], + num_buckets=num_buckets, + length_func=lambda x: x["duration"], + shuffle=dynamic_hparams["shuffle_ex"], + batch_ordering=dynamic_hparams["batch_ordering"], + ) + + return ( + train_data, + valid_data, + test_data, + tokenizer, + train_batch_sampler, + valid_batch_sampler, + ) -if __name__ == "__main__": +if __name__ == "__main__": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -389,10 +433,19 @@ def text_pipeline(wrd): annotation_read="wrd", model_type=hparams["token_type"], character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], ) # here we create the datasets objects as well as tokenization and encoding - train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer) + ( + train_data, + valid_data, + test_data, + tokenizer, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams, tokenizer) # Trainer initialization asr_brain = ASR( @@ -403,20 +456,31 @@ def text_pipeline(wrd): checkpointer=hparams["checkpointer"], ) - # adding objects to trainer: + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for the LM!! asr_brain.tokenizer = tokenizer + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if valid_bsampler is not None: + valid_dataloader_opts = {"batch_sampler": valid_bsampler} # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, - train_loader_kwargs=hparams["train_dataloader_opts"], - valid_loader_kwargs=hparams["valid_dataloader_opts"], + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, ) - # Test - asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt" + # Testing asr_brain.evaluate( test_data, min_key="WER", diff --git a/recipes/CommonVoice/ASR/transformer/README.md b/recipes/CommonVoice/ASR/transformer/README.md index e5ca8565d9..5fa9a081bf 100644 --- a/recipes/CommonVoice/ASR/transformer/README.md +++ b/recipes/CommonVoice/ASR/transformer/README.md @@ -1,8 +1,19 @@ # CommonVoice ASR with CTC + Attention based Seq2Seq models. -This folder contains scripts necessary to run an ASR experiment with the CommonVoice dataset: [CommonVoice Homepage](https://commonvoice.mozilla.org/) - +This folder contains scripts necessary to run an ASR experiment with the CommonVoice 14.0 dataset: [CommonVoice Homepage](https://commonvoice.mozilla.org/) and pytorch 2.0 # How to run +```shell python train.py hparams/{hparam_file}.py +``` + +# How to run on test sets only +```shell +python train.py hparams/{hparam_file}.py --test_only +``` +## For Whisper finetuning: + +python train_with_whisper.py hparams/train__hf_whisper.yaml e.g. train__hf_whisper + +Note: When using whisper large model, to improve memory usage during model recovery. You could use (see https://github.com/speechbrain/speechbrain/pull/1743) # Data preparation It is important to note that CommonVoice initially offers mp3 audio files at 42Hz. Hence, audio files are downsampled on the fly within the dataio function of the training script. @@ -10,16 +21,50 @@ It is important to note that CommonVoice initially offers mp3 audio files at 42H # Languages Here is a list of the different languages that we tested within the CommonVoice dataset with our transformers: +- Italian - French -# Results +For Whisper-large-v2 and medium finetuning, here is list of the different language that we tested within the CommonVoice.14_0 dataset: +- Hindi +- Arabic +- Persian +- Serbian +- Mongolian +- French +- Italian + -| Language | Release | hyperparams file | LM | Val. CER | Val. WER | Test CER | Test WER | Model link | GPUs | -| ------------- |:-------------:|:---------------------------:| -----:| -----:| -----:| -----:| -----:| :-----------:| :-----------:| -| French | 2020-06-22 | train_fr.yaml | No | 5.15 | 17.80 | 6.01 | 19.21 | [model](https://drive.google.com/drive/folders/12ny6daoz1Ze1MmgLrsqf352AXvhwob6d?usp=sharing) | 1xV100 16GB | +# Results +## Transformer +| Language | CV version | hyperparams file | LM | Val. CER | Val. WER | Test CER | Test WER | Hugging Face link | Model link | GPUs | +| ------------- |:-------------:|:---------------------------:| -----:| -----:| -----:| -----:| -----:|:-----------:| :-----------:| :-----------:| +| English | 16.1 | conformer_large.yaml | No | 4.48 | 10.48 | 6.42 | 13.39 | - | [model](https://www.dropbox.com/scl/fo/3w24pxln0fjyofl6xbfv1/AJJqzWfCtGFFTRLwM3DeZG8?rlkey=wpzzhizreedptts64d2m9jq4u&st=xu5g9an8&dl=0) | 4xA40 46GB | +| Italian | 14.0 | conformer_large.yaml | No | 2.91 | 9.79 | 2.68 | 9.27 | - | [model](https://www.dropbox.com/scl/fo/tf44itp8f4icf2z5qlxpm/AIOYS_CMov5ss5Q9AonFEno?rlkey=xek5ikbhqoovcao31iniqimrr&dl=0) | 2xV100 32GB | +| French | 14.0 | conformer_large.yaml | No | 2.64 | 7.62 | 3.55 | 9.48 | - | [model](https://www.dropbox.com/scl/fo/y862nl95zoe4sj3347095/ACxmT3_uw1ScLoYs0DSbGRM?rlkey=q66dk13w5nu1lkphtdinnnigm&dl=0) | 2xV100 32GB | -The output folders with checkpoints and logs can be found [here](https://drive.google.com/drive/folders/11NMzY0zV-NqJmPMyZfC3RtT64bYe-G_O?usp=sharing). +## Whisper Finetuning +Following table contains whisper-finetuning results for 1 epoch using Whisper model, freezing encoder and finetuning decoder. +| Language | Release | Model | commit hash | hyperparams file | LM | Val. CER | Val. WER | Test CER | Test WER | HuggingFace link | Model link | GPUs | +| ------------- |:-------------:| -----:|-----:|:---------------------------:| -----:| -----:| -----:| -----:| -----:| :-----------: |:-----------:| :-----------:| +| French | 2024-03-28 | large-v3 | [e4e2e13](https://github.com/speechbrain/speechbrain/pull/2450/commits/e4e2e135e9edafc6a26fc9aa4df9a94eaf86de41) | train_hf_whisper.yaml | No | 2.31% | 7.38% | 3.11% | 9.09% | x | [DropBox](https://www.dropbox.com/scl/fo/erwh83bg2jbzf3bf8v6ur/AHmQ5i8uWRaieXCOe5DSRUk?rlkey=kjivz2hx3o1pi7wbzadjznpid&dl=0) | 2xV100 32GB | +| Italian | 2024-03-28 | large-v3 | [e4e2e13](https://github.com/speechbrain/speechbrain/pull/2450/commits/e4e2e135e9edafc6a26fc9aa4df9a94eaf86de41) | train_hf_whisper.yaml | No | 1.27% | 4.85% | 1.62% | 5.47% | x | [DropBox](https://www.dropbox.com/scl/fo/gtfo3qoz1ceg4xg0dfq1d/AIabz2J9NxkNAEbGF7rHCHU?rlkey=eokq2a2z07ke48scazqnn5v73&dl=0) | 2xV100 32GB | +| French | 2024-03-28 | medium | [e4e2e13](https://github.com/speechbrain/speechbrain/pull/2450/commits/e4e2e135e9edafc6a26fc9aa4df9a94eaf86de41) | train_hf_whisper.yaml | No | 2.92% | 8.90% | 4.02% | 11.07% | x | [DropBox](https://www.dropbox.com/scl/fo/72aiaflc9w6168rk9jv6u/AGIVW5ml74wZYED7HUFjX-U?rlkey=nz7eo6i6gbze7rwv8la6sxobx&dl=0) | 2xV100 32GB | +| Italian | 2024-03-28 | medium | [e4e2e13](https://github.com/speechbrain/speechbrain/pull/2450/commits/e4e2e135e9edafc6a26fc9aa4df9a94eaf86de41) | train_hf_whisper.yaml | No | 2.05% | 7.17% | 2.31% | 7.79% | x | [DropBox](https://www.dropbox.com/scl/fo/sso9k4n6hma9cub44oi2p/AKINkGK0XMCYND-JrMQh4LQ?rlkey=gywsgxle4k473z9c7tf4l1m7n&dl=0) | 2xV100 32GB | +| French | 2024-03-28 | small | [e4e2e13](https://github.com/speechbrain/speechbrain/pull/2450/commits/e4e2e135e9edafc6a26fc9aa4df9a94eaf86de41) | train_hf_whisper.yaml | No | 4.34% | 12.57% | 5.89% | 15.46% | x | [DropBox](https://www.dropbox.com/scl/fo/h8idsgzp8xz5vsupqv0q8/ACS13H9awYU2G7DeTcyxiV0?rlkey=bbqpx0lbf5aify6ib029g2gn0&dl=0) | 2xV100 32GB | +| Italian | 2024-03-28 | small | [e4e2e13](https://github.com/speechbrain/speechbrain/pull/2450/commits/e4e2e135e9edafc6a26fc9aa4df9a94eaf86de41) | train_hf_whisper.yaml | No | 3.20% | 11.40% | 3.71% | 12.25% | x | [DropBox](https://www.dropbox.com/scl/fo/o4objjm5c65c5hzy1vvk4/ABXA2V1Gy1GCg7FGS6Ty9yc?rlkey=4kbjmmljdznvureyxfip5tw8q&dl=0) | 2xV100 32GB | +| Arabic | 2023-08-15 | large-v2 | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) | train_ar_hf_whisper.yaml | No | 4.02 | 12.47 | 5.20 | 16.96 | [model](https://huggingface.co/speechbrain/asr-whisper-large-v2-commonvoice-ar) | [model](https://www.dropbox.com/sh/45o3xkxdheksdfi/AAAs1zxCw76mcAbudYEonzg0a?dl=0) | 1xV100 16GB | +| Persian | 2023-08-15 | large-v2 | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_fa_hf_whisper.yaml | No | 6.91 | 25.30 | 9.38 | 31.75 | [model](https://huggingface.co/speechbrain/asr-whisper-large-v2-commonvoice-fa) | [model](https://www.dropbox.com/sh/a2vd6nn0icybdcz/AAC7z41jcheW1R9aNNK4-lHha?dl=0) | 1xV100 16GB | +| Mongolian | 2023-08-15 | large-v2 | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_mn_hf_whisper.yaml | No | 24.05 | 62.37 | 25.73 | 64.92 | [model](https://huggingface.co/speechbrain/asr-whisper-large-v2-commonvoice-mn) | [model](https://www.dropbox.com/sh/2t0srpb2nt2wst5/AACRJQCwooRaLxPoLkmTvKq8a?dl=0) | 1xV100 16GB | +| Hindi | 2023-08-15 | large-v2 | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_hi_hf_whisper.yaml | No | 4.54 | 10.46 | 7.00 | 15.27 | [model](https://huggingface.co/speechbrain/asr-whisper-large-v2-commonvoice-hi) | [model](https://www.dropbox.com/sh/qkcm86bzzb1y4sj/AABjA_ckw_hPwJCBzUiXLWrBa?dl=0) | 1xV100 16GB | +| Serbian | 2023-08-15 | large-v2 | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_sr_hf_whisper.yaml | No | 8.92 | 27.12 | 7.60 | 23.63 | [model](https://huggingface.co/speechbrain/asr-whisper-large-v2-commonvoice-sr) | [model](https://www.dropbox.com/sh/a798gw3k2ezerp5/AADz7UxvQRQDOH4DnCJ4J4dja?dl=0) | 1xV100 16GB | +| Arabic | 2023-08-15 | Medium | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_ar_hf_whisper.yaml | No | 4.95 | 14.82 | 6.51 | 20.24 | [model](https://huggingface.co/speechbrain/asr-whisper-medium-commonvoice-ar) | [model](https://www.dropbox.com/sh/0e4vtvbg6hf2e13/AAD-tfzCZGUrh85aeAeJj8I9a?dl=0) | 1xV100 16GB | +| Persian | 2023-08-15 | Medium | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_fa_hf_whisper.yaml | No | 8.58 | 35.48 | 11.27 | 35.48 |[model](https://huggingface.co/speechbrain/asr-whisper-medium-commonvoice-fa) | [model](https://www.dropbox.com/sh/w1urihacmtoulmi/AADMtK3qeAF5mLYk5LMHyiOra?dl=0) | 1xV100 16GB | +| Mongolian | 2023-08-15 | Medium | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_mn_hf_whisper.yaml | No | 27.08 | 67.41 | 27.69 | 67.84 | [model](https://huggingface.co/speechbrain/asr-whisper-medium-commonvoice-mn) | [model](https://www.dropbox.com/sh/6fbhmey7q1udykf/AAAiGObWTTe2cdXHt2Uv2VQXa?dl=0) | 1xV100 16GB | +| Hindi | 2023-08-15 | Medium | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_hi_hf_whisper.yaml | No | 5.82 | 12.51 | 8.16 | 17.04 | [model](https://huggingface.co/speechbrain/asr-whisper-medium-commonvoice-hi) | [model](https://www.dropbox.com/sh/z9vriyy3i6xqvif/AAB7ql-40yWTjKEQJiuhYUr5a?dl=0) | 1xV100 16GB | +| Serbian | 2023-08-15 | Medium | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_sr_hf_whisper.yaml | No | 8.63 | 25.10 | 7.25 | 22.29 | [model](https://huggingface.co/speechbrain/asr-whisper-medium-commonvoice-sr) | [model](https://www.dropbox.com/sh/5lhk230q45sd97z/AAD-U9b_Ws_vFPs-cazsbOY0a?dl=0) | 1xV100 16GB | +| French | 2023-08-15 | Medium | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_fr_hf_whisper.yaml | No | 3.26 | 9.65 | 4.30 | 11.79 | [model](https://huggingface.co/speechbrain/asr-whisper-medium-commonvoice-fr) | [model](https://www.dropbox.com/sh/7zlk07yxnslk4yy/AAANcI3EaG0ZFy6UrKk1Mm2Ga?dl=0) | 1xV100 16GB | +| Italian | 2023-08-15 | Medium | [b112860](https://github.com/speechbrain/speechbrain/pull/2254/commits/b1128604e040d43e80e9a3214c5116f34d5806db) |train_it_hf_whisper.yaml | No | 2.42 | 8.26 | 3.03 | 9.63 | [model](https://huggingface.co/speechbrain/asr-whisper-medium-commonvoice-it) | [model](https://www.dropbox.com/sh/u5tex3nvzzs5pex/AAD-J7cOBE_fNfBono8waTKCa?dl=0) | 1xV100 16GB | # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -31,6 +76,15 @@ The output folders with checkpoints and logs can be found [here](https://drive.g Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/CommonVoice/ASR/transformer/hparams/train_fr.yaml b/recipes/CommonVoice/ASR/transformer/hparams/conformer_large.yaml similarity index 52% rename from recipes/CommonVoice/ASR/transformer/hparams/train_fr.yaml rename to recipes/CommonVoice/ASR/transformer/hparams/conformer_large.yaml index cb35f24dcc..7d690941ab 100644 --- a/recipes/CommonVoice/ASR/transformer/hparams/train_fr.yaml +++ b/recipes/CommonVoice/ASR/transformer/hparams/conformer_large.yaml @@ -7,10 +7,10 @@ # Authors: Titouan Parcollet and Jianyuan Zhong # ############################################################################ # Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/transformer/ -wer_file: !ref /wer.txt +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_en/ +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt @@ -19,31 +19,42 @@ data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files -accented_letters: True -language: fr # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english -train_csv: !ref /train.csv -valid_csv: !ref /dev.csv -test_csv: !ref /test.csv +accented_letters: False +language: en # use 'it' for Italian, 'rw' for Kinyarwanda, 'en' for english +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv skip_prep: False # Skip data preparation +convert_to_wav: False # Switch this to True to convert all mp3 files to wav. # We remove utterance slonger than 10s in the train/dev/test sets as # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 10.0 +# THIS IS TERRIBLE BUT WE HAVE NO CHOICE. +# Some version of the CV dataset may contain one or two files of more than +# 2 min in the validation and or test. This is an error by design of the dataset +# as these files contain 90% of silence. We exclude them. +avoid_if_longer_than_val_test: 90.0 + ckpt_interval_minutes: 15 # save checkpoint every N min -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 50 +optimizer_step_limit: 250000 batch_size: 32 # This works with a 32GB GPU ! (bs * nb_gpu * accum) > 128 ! ctc_weight: 0.3 -gradient_accumulation: 4 +grad_accumulation_factor: 4 loss_reduction: 'batchmean' sorting: random +num_workers: 4 +precision: fp16 # bf16, fp16 or fp32 # stages related parameters -stage_one_epochs: 40 -lr_adam: 1.0 -lr_sgd: 0.00003 +lr_adam: 0.0008 +weight_decay: 0.01 +warmup_steps: 25000 +augment_warmup: 8000 # BPE parameters token_type: unigram # ["unigram", "bpe", "char"] @@ -54,30 +65,53 @@ sample_rate: 16000 n_fft: 400 n_mels: 80 +# This setup works well for A100 80GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 300 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + # Dataloader options train_dataloader_opts: batch_size: !ref shuffle: True - num_workers: 6 + num_workers: !ref valid_dataloader_opts: - batch_size: !ref - num_workers: 6 + batch_size: 1 test_dataloader_opts: - batch_size: !ref - num_workers: 6 + batch_size: 1 -####################### Model parameters ########################### + +####################### Model Parameters ########################### # Transformer -d_model: 768 +d_model: 512 nhead: 8 num_encoder_layers: 12 num_decoder_layers: 6 -d_ffn: 3072 -transformer_dropout: 0.0 +d_ffn: 2048 +transformer_dropout: 0.1 activation: !name:torch.nn.GELU -output_neurons: 500 +output_neurons: 5120 # Outputs blank_index: 0 @@ -89,24 +123,25 @@ eos_index: 2 # Decoding parameters min_decode_ratio: 0.0 max_decode_ratio: 1.0 -valid_search_interval: 5 -valid_beam_size: 10 -# test_beam_size: 80 +valid_search_interval: 10 +valid_beam_size: 1 # We do greedy here so it's faster to decode ... +test_beam_size: 80 ctc_weight_decode: 0.3 +scorer_beam_scale: 0.3 ############################## models ################################ CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd input_shape: (8, 10, 80) - num_blocks: 3 + num_blocks: 2 num_layers_per_block: 1 - out_channels: (128, 200, 256) - kernel_sizes: (3, 3, 1) - strides: (2, 2, 1) - residuals: (False, False, False) + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length - input_size: 5120 + input_size: 640 tgt_vocab: !ref d_model: !ref nhead: !ref @@ -114,8 +149,12 @@ Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.Transforme num_decoder_layers: !ref d_ffn: !ref dropout: !ref + conformer_activation: !ref activation: !ref - normalize_before: False + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False ctc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref @@ -135,27 +174,43 @@ model: !new:torch.nn.ModuleList - [!ref , !ref , !ref , !ref ] # We define two optimizers as we have two stages (training + finetuning) -Adam: !name:torch.optim.Adam - lr: 0 - betas: (0.9, 0.98) - eps: 0.000000001 - -SGD: !name:torch.optim.SGD - lr: !ref - momentum: 0.99 - nesterov: True - -beam_searcher: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] - bos_index: !ref +Adam: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer eos_index: !ref blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + scorer_beam_scale: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: True + scorer: !ref log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -170,8 +225,7 @@ seq_cost: !name:speechbrain.nnet.losses.kldiv_loss noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler lr_initial: !ref - n_warmup_steps: 25000 - model_size: !ref + n_warmup_steps: !ref checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref @@ -186,19 +240,39 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global - update_until_epoch: 3 - -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 + update_until_epoch: 4 + +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 3 + drop_count_high: 3 + replace: "zeros" + dim: 1 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] compute_features: !new:speechbrain.lobes.features.Fbank sample_rate: !ref diff --git a/recipes/CommonVoice/ASR/transformer/hparams/train_hf_whisper.yaml b/recipes/CommonVoice/ASR/transformer/hparams/train_hf_whisper.yaml new file mode 100644 index 0000000000..1534d842b8 --- /dev/null +++ b/recipes/CommonVoice/ASR/transformer/hparams/train_hf_whisper.yaml @@ -0,0 +1,165 @@ +# ################################ +# Model: Whisper (Encoder-Decoder) + NLL +# Augmentation: TimeDomainSpecAugment +# Authors: Pooneh Mousavi 2022 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_whisper// +test_wer_file: !ref /wer_test.txt +valid_wer_file: !ref /wer_valid.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english whisper model. +whisper_hub: openai/whisper-medium + +# Normalize inputs with the same normalization done in the paper (https://cdn.openai.com/papers/whisper.pdf). Refer to Appendix C for further information. +normalized_transcripts: True + +# Data files +language: fr # use 'it' for italian, 'fr' for french, 'en' for english , It is a language for common-voice data. +data_folder: !PLACEHOLDER +train_tsv_file: !ref /train.tsv # Standard CommonVoice .tsv files +dev_tsv_file: !ref /dev.tsv # Standard CommonVoice .tsv files +test_tsv_file: !ref /test.tsv # Standard CommonVoice .tsv files +accented_letters: True +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv +skip_prep: False # Skip data preparation + +# We remove utterance slonger than 10s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 10.0 + +ckpt_interval_minutes: 30 # save checkpoint every N min + +####################### Training Parameters #################################### +freeze_whisper: False +freeze_encoder: True +number_of_epochs: 1 +weight_decay: 0.01 +lr_whisper: 1e-5 +warmup_steps: 500 +max_grad_norm: 2.0 +sorting: ascending +precision: fp16 # bf16, fp16 or fp32 +eval_precision: fp16 +sample_rate: 16000 + +# With data_parallel batch_size is split into N jobs +batch_size: 8 +test_batch_size: 8 +grad_accumulation_factor: 2 + + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +test_beam_size: 8 + +####################### Model Parameters ####################################### +train_loader_kwargs: + batch_size: !ref + +valid_loader_kwargs: + batch_size: !ref + +test_loader_kwargs: + batch_size: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# UNCOMMENT THIS SECTION TO ADD AUGMENTATIONS +# speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb +# orig_freq: !ref +# speeds: [95, 100, 105] + +# # Frequency drop: randomly drops a number of frequency bands to zero. +# drop_freq: !new:speechbrain.augment.time_domain.DropFreq +# drop_freq_low: 0 # Min frequency band dropout probability +# drop_freq_high: 1 # Max frequency band dropout probability +# drop_freq_count_low: 1 # Min number of frequency bands to drop +# drop_freq_count_high: 3 # Max number of frequency bands to drop +# drop_freq_width: 0.05 # Width of frequency bands to drop + +# # Time drop: randomly drops a number of temporal chunks. +# drop_chunk: !new:speechbrain.augment.time_domain.DropChunk +# drop_length_low: 1 +# drop_length_high: 5 +# drop_count_low: 1000 +# drop_count_high: 2000 + +# # Augmenter: Combines previously defined augmentations to perform data augmentation +# wav_augment: !new:speechbrain.augment.augmenter.Augmenter +# concat_original: True +# min_augmentations: 3 +# max_augmentations: 3 +# augment_prob: 1.0 +# augmentations: [ +# !ref , +# !ref , +# !ref ] + +############################## Models ########################################## + + +whisper: !new:speechbrain.integrations.huggingface.whisper.Whisper + source: !ref + freeze: !ref + freeze_encoder: !ref + save_path: !ref /whisper_checkpoint + language: !ref + task: "transcribe" + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +nll_loss: !name:speechbrain.nnet.losses.nll_loss + +modules: + whisper: !ref + +############################## Decoding & optimiser ############################ + +whisper_opt_class: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +valid_search: !new:speechbrain.decoders.seq2seq.S2SWhisperGreedySearcher + model: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + +test_search: !new:speechbrain.decoders.seq2seq.S2SWhisperBeamSearcher + module: [!ref ] + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + +lr_annealing_whisper: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: !ref + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + whisper: !ref + scheduler_whisper: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/CommonVoice/ASR/transformer/train.py b/recipes/CommonVoice/ASR/transformer/train.py index f2a4f42c97..522bbcf0a0 100644 --- a/recipes/CommonVoice/ASR/transformer/train.py +++ b/recipes/CommonVoice/ASR/transformer/train.py @@ -4,38 +4,30 @@ between them. Decoding is performed with (CTC/Att joint) beamsearch. To run this recipe, do the following: -> python train.py hparams/transformer.yaml - -With the default hyperparameters, the system employs a convolutional frontend (ContextNet) and a transformer. -The decoder is based on a Transformer decoder. - -The neural network is trained on both CTC and negative-log likelihood -targets and sub-word units estimated with Byte Pairwise Encoding (BPE) -are used as basic recognition tokens. - -The experiment file is flexible enough to support a large variety of -different systems. By properly changing the parameter files, you can try -different encoders, decoders, tokens (e.g, characters instead of BPE), -training split (e.g, train-clean 100 rather than the full one), and many -other possible variations. +> python train.py hparams/conformer_large.yaml Authors - * Titouan Parcollet 2021 + * Titouan Parcollet 2021, 2024 * Jianyuan Zhong 2020 + * Pooneh Mousavi 2023 """ + +import os import sys + import torch import torchaudio -import logging -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.tokenizers.SentencePiece import SentencePiece -from speechbrain.utils.distributed import run_on_main from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger - -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -47,15 +39,28 @@ def compute_forward(self, batch, stage): wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) tokens_bos, _ = batch.tokens_bos + # Add waveform augmentation if specified. + if ( + stage == sb.Stage.TRAIN + and hasattr(self.hparams, "wav_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + # compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.hparams.normalize(feats, wav_lens, epoch=current_epoch) - # Augmentation - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - feats = self.hparams.augmentation(feats) + # Add feature augmentation if specified. + if ( + stage == sb.Stage.TRAIN + and hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels(tokens_bos) # forward modules src = self.modules.CNN(feats) @@ -73,27 +78,61 @@ def compute_forward(self, batch, stage): # Compute outputs hyps = None - if stage == sb.Stage.TRAIN: - hyps = None - elif stage == sb.Stage.VALID: - hyps = None - current_epoch = self.hparams.epoch_counter.current - if current_epoch % self.hparams.valid_search_interval == 0: - hyps, _ = self.hparams.beam_searcher(enc_out.detach(), wav_lens) - elif stage == sb.Stage.TEST: - hyps, _ = self.hparams.beam_searcher(enc_out.detach(), wav_lens) + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if is_valid_search: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + + elif is_test_search: + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" - (p_ctc, p_seq, wav_lens, predicted_tokens,) = predictions + (p_ctc, p_seq, wav_lens, predicted_tokens) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens + # Augment Labels + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if ( + hasattr(self.hparams, "wav_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + if ( + hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) @@ -126,38 +165,11 @@ def compute_objectives(self, predictions, batch, stage): self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - - # check if we need to switch optimizer - # if so change the optimizer from Adam to SGD - self.check_and_reset_optimizer() - - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - # normalize the loss by gradient_accumulation step - (loss / self.hparams.gradient_accumulation).backward() - - if self.step % self.hparams.gradient_accumulation == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.optimizer.step() - self.optimizer.zero_grad() - - # anneal lr every update + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: self.hparams.noam_annealing(self.optimizer) - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - with torch.no_grad(): - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -183,24 +195,16 @@ def on_stage_end(self, stage, stage_loss, epoch): stage_stats["CER"] = self.cer_metric.summarize("error_rate") # log stats and save checkpoint at end-of-epoch - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): - + if stage == sb.Stage.VALID: # report different epoch stages according current stage current_epoch = self.hparams.epoch_counter.current - if current_epoch <= self.hparams.stage_one_epochs: - lr = self.hparams.noam_annealing.current_lr - steps = self.hparams.noam_annealing.n_steps - optimizer = self.optimizer.__class__.__name__ - else: - lr = self.hparams.lr_sgd - steps = -1 - optimizer = self.optimizer.__class__.__name__ + lr = self.hparams.noam_annealing.current_lr + steps = self.hparams.noam_annealing.n_steps epoch_stats = { "epoch": epoch, "lr": lr, "steps": steps, - "optimizer": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, @@ -217,76 +221,25 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) - - def check_and_reset_optimizer(self): - """reset the optimizer if training enters stage 2""" - current_epoch = self.hparams.epoch_counter.current - if not hasattr(self, "switched"): - self.switched = False - if isinstance(self.optimizer, torch.optim.SGD): - self.switched = True - - if self.switched is True: - return - - if current_epoch > self.hparams.stage_one_epochs: - self.optimizer = self.hparams.SGD(self.modules.parameters()) - - if self.checkpointer is not None: - self.checkpointer.add_recoverable("optimizer", self.optimizer) - - self.switched = True - - def on_fit_start(self): - """Gets called at the beginning of ``fit()``, on multiple processes - if ``distributed_count > 0`` and backend is ddp. - - Default implementation compiles the jit modules, initializes - optimizers, and loads the latest checkpoint to resume training. - """ - # Run this *after* starting all processes since jit modules cannot be - # pickled. - self._compile_jit() - - # Wrap modules with parallel backend after jit - self._wrap_distributed() - - # Initialize optimizers after parameters are configured - self.init_optimizers() - - # Load latest checkpoint to check to current epoch number - if self.checkpointer is not None: - self.checkpointer.recover_if_possible( - device=torch.device(self.device) - ) - - # if the model is resumed from stage two, reinitialize the optimizer - current_epoch = self.hparams.epoch_counter.current - if current_epoch > self.hparams.stage_one_epochs: - self.optimizer = self.hparams.SGD(self.modules.parameters()) - - if self.checkpointer is not None: - self.checkpointer.add_recoverable("optimizer", self.optimizer) - - # Load latest checkpoint to resume training if interrupted - if self.checkpointer is not None: - self.checkpointer.recover_if_possible( - device=torch.device(self.device) - ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) # Define custom data procedure def dataio_prepare(hparams, tokenizer): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # 1. Define datasets data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -316,17 +269,25 @@ def dataio_prepare(hparams, tokenizer): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate - valid_data = valid_data.filtered_sorted(sort_key="duration") + valid_data = valid_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than_val_test"]}, + ) test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate - test_data = test_data.filtered_sorted(sort_key="duration") + test_data = test_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than_val_test"]}, + ) datasets = [train_data, valid_data, test_data] @@ -334,10 +295,11 @@ def dataio_prepare(hparams, tokenizer): @sb.utils.data_pipeline.takes("wav") @sb.utils.data_pipeline.provides("sig") def audio_pipeline(wav): - info = torchaudio.info(wav) + info = audio_io.info(wav) sig = sb.dataio.dataio.read_audio(wav) resampled = torchaudio.transforms.Resample( - info.sample_rate, hparams["sample_rate"], + info.sample_rate, + hparams["sample_rate"], )(sig) return resampled @@ -363,18 +325,45 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_data, + train_batch_sampler, + valid_batch_sampler, ) - return train_data, valid_data, test_data if __name__ == "__main__": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -393,13 +382,14 @@ def text_pipeline(wrd): prepare_common_voice, kwargs={ "data_folder": hparams["data_folder"], - "save_folder": hparams["save_folder"], + "save_folder": hparams["output_folder"], "train_tsv_file": hparams["train_tsv_file"], "dev_tsv_file": hparams["dev_tsv_file"], "test_tsv_file": hparams["test_tsv_file"], "accented_letters": hparams["accented_letters"], "language": hparams["language"], "skip_prep": hparams["skip_prep"], + "convert_to_wav": hparams["convert_to_wav"], }, ) @@ -411,10 +401,18 @@ def text_pipeline(wrd): annotation_read="wrd", model_type=hparams["token_type"], character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], ) # here we create the datasets objects as well as tokenization and encoding - train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer) + ( + train_data, + valid_data, + test_data, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams, tokenizer) # Trainer initialization asr_brain = ASR( @@ -428,26 +426,58 @@ def text_pipeline(wrd): # adding objects to trainer: asr_brain.tokenizer = tokenizer + # Manage dynamic batching + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, - train_loader_kwargs=hparams["train_dataloader_opts"], - valid_loader_kwargs=hparams["valid_dataloader_opts"], + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, ) - # Test - asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt" + # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "wer_valid.txt" + ) asr_brain.evaluate( - test_data, - min_key="WER", + valid_data, + max_key="ACC", test_loader_kwargs=hparams["test_dataloader_opts"], ) - asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_valid.txt" + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "wer_test.txt" + ) asr_brain.evaluate( - valid_data, - min_key="WER", + test_data, + max_key="ACC", test_loader_kwargs=hparams["test_dataloader_opts"], ) diff --git a/recipes/CommonVoice/ASR/transformer/train_with_whisper.py b/recipes/CommonVoice/ASR/transformer/train_with_whisper.py new file mode 100644 index 0000000000..3562f23170 --- /dev/null +++ b/recipes/CommonVoice/ASR/transformer/train_with_whisper.py @@ -0,0 +1,336 @@ +#!/usr/bin/env python3 +"""Recipe for training a whisper-based ASR system with CommonVoice. +The system employs whisper from OpenAI (https://cdn.openai.com/papers/whisper.pdf). +This recipe take the whisper encoder-decoder to fine-tune on. + +To run this recipe, do the following: +> python train_with_whisper.py hparams/train_hf_whisper.yaml + +Authors + * Pooneh Mousavi 2022 + * Adel Moumen 2024 +""" + +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + bos_tokens, bos_tokens_lens = batch.tokens_bos + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + bos_tokens = self.hparams.wav_augment.replicate_labels(bos_tokens) + bos_tokens_lens = self.hparams.wav_augment.replicate_labels( + bos_tokens_lens + ) + + # We compute the padding mask and replace the values with the pad_token_id + # that the Whisper decoder expect to see. + abs_tokens_lens = (bos_tokens_lens * bos_tokens.shape[1]).long() + pad_mask = ( + torch.arange(abs_tokens_lens.max(), device=self.device)[None, :] + < abs_tokens_lens[:, None] + ) + bos_tokens[~pad_mask] = self.tokenizer.pad_token_id + + # Forward encoder + decoder + enc_out, logits, _ = self.modules.whisper(wavs, bos_tokens) + log_probs = self.hparams.log_softmax(logits) + + hyps = None + if stage == sb.Stage.VALID: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + elif stage == sb.Stage.TEST: + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + + return log_probs, hyps, wav_lens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss NLL given predictions and targets.""" + + (log_probs, hyps, wav_lens) = predictions + batch = batch.to(self.device) + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + + # Augment Labels + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens + ) + + loss = self.hparams.nll_loss( + log_probs, tokens_eos, length=tokens_eos_lens + ) + + if stage != sb.Stage.TRAIN: + tokens, tokens_lens = batch.tokens + + # Decode token terms to words + predicted_words = [ + self.tokenizer.decode(t, skip_special_tokens=True).strip() + for t in hyps + ] + + # Convert indices to words + target_words = undo_padding(tokens, tokens_lens) + target_words = self.tokenizer.batch_decode( + target_words, skip_special_tokens=True + ) + + if hasattr(self.hparams, "normalized_transcripts"): + predicted_words = [ + self.tokenizer.normalize(text).split(" ") + for text in predicted_words + ] + + target_words = [ + self.tokenizer.normalize(text).split(" ") + for text in target_words + ] + else: + predicted_words = [text.split(" ") for text in predicted_words] + target_words = [text.split(" ") for text in target_words] + + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + lr = self.hparams.lr_annealing_whisper.current_lr + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch, "lr": lr}, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_loader_kwargs"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_loader_kwargs"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, + ) + + datasets = [train_data, valid_data, test_data] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + info = audio_io.info(wav) + sig = sb.dataio.dataio.read_audio(wav) + if info.sample_rate != hparams["sample_rate"]: + sig = torchaudio.transforms.Resample( + info.sample_rate, hparams["sample_rate"] + )(sig) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + if hasattr(hparams, "normalized_transcripts"): + wrd = tokenizer.normalize(wrd) + yield wrd + tokens_list = tokenizer.encode(wrd, add_special_tokens=False) + yield tokens_list + tokens_list = tokenizer.build_inputs_with_special_tokens(tokens_list) + tokens_bos = torch.LongTensor(tokens_list[:-1]) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list[1:]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "tokens_list", "tokens_bos", "tokens_eos", "tokens"], + ) + + return train_data, valid_data, test_data + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + from common_voice_prepare import prepare_common_voice # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_common_voice, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "train_tsv_file": hparams["train_tsv_file"], + "dev_tsv_file": hparams["dev_tsv_file"], + "test_tsv_file": hparams["test_tsv_file"], + "accented_letters": hparams["accented_letters"], + "language": hparams["language"], + "skip_prep": hparams["skip_prep"], + }, + ) + # Defining tokenizer and loading it + tokenizer = hparams["whisper"].tokenizer + + # here we create the datasets objects as well as tokenization and encoding + train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + opt_class=hparams["whisper_opt_class"], + ) + + # We load the pretrained whisper model + if "pretrainer" in hparams.keys(): + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected(asr_brain.device) + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for Whisper. + asr_brain.tokenizer = tokenizer + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["train_loader_kwargs"], + valid_loader_kwargs=hparams["valid_loader_kwargs"], + ) + + # Testing + asr_brain.hparams.test_wer_file = hparams["test_wer_file"] + asr_brain.evaluate( + test_data, + min_key="WER", + test_loader_kwargs=hparams["test_loader_kwargs"], + ) + + asr_brain.hparams.test_wer_file = hparams["valid_wer_file"] + asr_brain.evaluate( + valid_data, + min_key="WER", + test_loader_kwargs=hparams["test_loader_kwargs"], + ) diff --git a/recipes/CommonVoice/LM/README.md b/recipes/CommonVoice/LM/README.md new file mode 100644 index 0000000000..12c9688e1d --- /dev/null +++ b/recipes/CommonVoice/LM/README.md @@ -0,0 +1,78 @@ + +# Training KenLM +This folder contains recipes for training the kenLM-gram model for the CommonVoice Dataset. +Using Wav2Vec2 in combination with a language model can yield a significant improvement, especially when the model is fine-tuned on small speech datasets. This is a guide to explain how one can create an n-gram language model and combine it with an existing fine-tuned Wav2Vec2. + + +You can download CommonVoice at https://commonvoice.mozilla.org/en + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +We will use the popular KenLM library to build an n-gram. Let's start by installing the Ubuntu library prerequisites. For a complete guide on how to install required dependencies, please refer to [this](https://kheafield.com/code/kenlm/dependencies/) link: + ``` + sudo apt install build-essential cmake libboost-system-dev libboost-thread-dev libboost-program-options-dev libboost-test-dev libeigen3-dev zlib1g-dev libbz2-dev liblzma-dev + ``` + + Next, we need to start downloading and unpacking the KenLM repo. + ``` + wget -O - https://kheafield.com/code/kenlm.tar.gz | tar xz + ``` + +KenLM is written in C++, so we'll make use of cmake to build the binaries. + ``` +mkdir kenlm/build && cd kenlm/build && cmake .. && make -j2 + ``` + +Now, make sure that the executables are added to your .bashrc file. To do it, +- Open the ~/.bashrc file in a text editor. +- Scroll to the end of the file and add the following line: ```export PATH=$PATH:/your/path/to/kenlm/build/bin ``` +- Save it and type: `source ~/.bashrc ` + + ``` +# How to run: +```shell +python train.py hparams/train_kenlm.yaml --data_folder=your/data/folder +``` + +# Results +The script trains a n-gram language model, which is stored in the popular ARPA format. +The output folders with checkpoints and logs can be found [here](https://www.dropbox.com/scl/fo/zw505t10kesqpvkt6m3tu/h?rlkey=6626h1h665tvlo1mtekop9rx5&dl=0). + + + + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/CommonVoice/LM/common_voice_prepare.py b/recipes/CommonVoice/LM/common_voice_prepare.py new file mode 120000 index 0000000000..5dacdbfdd1 --- /dev/null +++ b/recipes/CommonVoice/LM/common_voice_prepare.py @@ -0,0 +1 @@ +../common_voice_prepare.py \ No newline at end of file diff --git a/recipes/CommonVoice/LM/hparams/train_kenlm.yaml b/recipes/CommonVoice/LM/hparams/train_kenlm.yaml new file mode 100644 index 0000000000..35414d909c --- /dev/null +++ b/recipes/CommonVoice/LM/hparams/train_kenlm.yaml @@ -0,0 +1,22 @@ +######### +# Recipe for Training kenLM on CommonVoice Data +# It is used to boost Wav2Vec2 with n-grams. +# +# Author: Pooneh Mousavi (2023) +################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/CommonVoice/ngrams// + +# Data files +data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-14.0-2023-06-23/en +train_tsv_file: !ref /train.tsv +language: en +# accented_letters should be set according to the language +accented_letters: True +train_csv: !ref /train.csv +skip_prep: False +text_file: !ref /train.txt +ngram: 5 +ngram_file: !ref /_gram.arpa diff --git a/recipes/CommonVoice/LM/train.py b/recipes/CommonVoice/LM/train.py new file mode 100644 index 0000000000..89c151cc93 --- /dev/null +++ b/recipes/CommonVoice/LM/train.py @@ -0,0 +1,94 @@ +""" +Recipe to train kenlm ngram model to combine an n-gram with Wav2Vec2. +https://huggingface.co/blog/wav2vec2-with-ngram + +To run this recipe, do the following: +> python train.py hparams/train.yaml --data_folder=/path/to/CommonVoice +Author + * Pooneh Mousavi 2023 +""" + +import csv +import os +import sys + +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +def csv2text(): + """Read CSV file and convert specific data entries into text file.""" + annotation_file = open(hparams["train_csv"], newline="", encoding="utf-8") + reader = csv.reader(annotation_file) + headers = next(reader, None) + text_file = open(hparams["text_file"], "w+", encoding="utf-8") + index_label = headers.index("wrd") + row_idx = 0 + for row in reader: + row_idx += 1 + sent = row[index_label] + text_file.write(sent + "\n") + text_file.close() + annotation_file.close() + logger.info("Text file created at: " + hparams["text_file"]) + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + from common_voice_prepare import prepare_common_voice # noqa + + # multi-gpu (ddp) save data preparation + if not os.path.exists(hparams["text_file"]): + run_on_main( + prepare_common_voice, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "train_tsv_file": hparams["train_tsv_file"], + "accented_letters": hparams["accented_letters"], + "language": hparams["language"], + "skip_prep": hparams["skip_prep"], + }, + ) + csv2text() + + logger.info(f"Start training {hparams['ngram']}-gram kenlm model.") + tmp_ngram_file = "ngram.arpa" + cmd = f'lmplz -o {hparams["ngram"]} <"{hparams["text_file"]}" > "{tmp_ngram_file}"' + os.system(cmd) + with open(tmp_ngram_file, encoding="utf-8") as read_file, open( + hparams["ngram_file"], "w", encoding="utf-8" + ) as write_file: + has_added_eos = False + for line in read_file: + if not has_added_eos and "ngram 1=" in line: + count = line.strip().split("=")[-1] + write_file.write(line.replace(f"{count}", f"{int(count) + 1}")) + elif not has_added_eos and "" in line: + write_file.write(line) + write_file.write(line.replace("", "")) + has_added_eos = True + else: + write_file.write(line) + os.remove(tmp_ngram_file) + logger.info( + f"{hparams['ngram']}-gram kenlm model was built and saved in {hparams['ngram_file']}." + ) diff --git a/recipes/CommonVoice/SENSE/README.md b/recipes/CommonVoice/SENSE/README.md new file mode 100644 index 0000000000..cfeff0b671 --- /dev/null +++ b/recipes/CommonVoice/SENSE/README.md @@ -0,0 +1,95 @@ +# Multilingual SENSE on Common Voice + +This folder contains scripts to train **SENSE** models that align a self-supervised speech encoder (wav2vec-BERT) with multilingual text embeddings (BGE-M3) on the [Common Voice](https://commonvoice.mozilla.org/en/datasets) corpus. The resulting speech embeddings live in a shared semantic space, similar in spirit to **SAMU-XLSR** and **Meta's SONAR** multilingual speech–text encoders. They follow the SENSE framework (*"SENSE models: an open source solution for multilingual and multimodal semantic-based tasks"*) described in [https://arxiv.org/abs/2509.12093](https://arxiv.org/abs/2509.12093). + + +A **single multilingual model** is trained on **90 Common Voice languages**. + +Two components are used: +1. A **student audio encoder**: wav2vec-BERT followed by attention pooling to obtain utterance-level embeddings. +2. A **teacher text encoder**: BGE-M3 sentence embeddings computed on-the-fly from the reference transcripts. + +# How to run +```shell +python train.py hparams/train_sense.yaml +``` + +# Data + +This recipe uses the multilingual **Common Voice** dataset. A large number of languages is selected, and all of them are merged into a single multilingual training setup. + +Any Common Voice language can be used by editing the `languages` field in `hparams/train_sense.yaml`. +In this configuration, the model was trained on the following **90 Common Voice languages**: + +`af`, `am`, `ar`, `as`, `ast`, `az`, `ba`, `be`, `bg`, `bn`, `br`, `ca`, +`ckb`, `cs`, `cv`, `cy`, `da`, `de`, `dv`, `el`, `en`, `eo`, `es`, `et`, +`fa`, `fi`, `fr`, `fy-NL`, `ga-IE`, `gl`, `gn`, `he`, `hi`, `hsb`, `ht`, +`hu`, `ia`, `id`, `is`, `it`, `ja`, `ka`, `kab`, `kk`, `ko`, `ky`, `lt`, +`lo`, `lv`, `ml`, `mn`, `mhr`, `mk`, `mr`, `mt`, `ne-NP`, `nl`, `nn-NO`, +`oc`, `or`, `os`, `pa-IN`, `pl`, `ps`, `pt`, `ro`, `ru`, `sah`, `sc`, +`sk`, `sl`, `sr`, `sv-SE`, `sw`, `ta`, `te`, `th`, `ti`, `tk`, `tr`, `tt`, +`ug`, `uk`, `ur`, `uz`, `vi`, `yi`, `yo`, `zh-HK`, `zu`. + +## Multilingual sampling ratios + +Common Voice languages do **not** have the same number of utterances: some are very high-resource, others much smaller. To avoid that high-resource languages dominate the training batches, we compute a **sampling ratio** for each language. + +This multilingual smoothing strategy follows the rebalancing scheme introduced in **SAMU-XLSR** (“SAMU-XLSR: Semantically-Aligned Multimodal Utterance-level Cross-Lingual Speech Representation”, Khurana et al., 2022, Eq. (3), see https://arxiv.org/abs/2205.08180). + +For the train split, let: + +- $N_l$ be the number of utterances in language $l$, +- $N_{\text{total}} = \sum_l N_l$ be the total number of utterances over all languages, +- $p_l = \frac{N_l}{N_{\text{total}}}$ be the empirical probability of language $l$. + +The sampling ratio $r_l$ used by the sampler is then defined as: + +$$ +r_l = \frac{1}{p_l} \cdot \frac{p_l^\alpha}{\sum_k p_k^\alpha}, +$$ + +where $\alpha$ is the hyperparameter `sampling_alpha` (e.g. $\alpha = 0.05$). + +- $p_l$ reflects how frequent a language is in the corpus. +- $r_l$ is the **sampling ratio** used as a weight in the sampler: + - high-resource languages (large $p_l$) are down-weighted, + - low-resource languages (small $p_l$) are up-weighted. + + +These ratios are saved to `/language_ratios.json` and stored in a `ratio` column inside `train.csv`. During training, `ReproducibleWeightedRandomSampler` uses this `ratio` column to build multilingual batches where smaller languages are seen more often and larger languages are not over-represented. + +## Pretrained checkpoint + +Download SENSE model: +[https://www.dropbox.com/scl/fi/ju3nhhu2lfs8521778y3c/SENSE.7z?rlkey=h62fkfpwbc52vg20ayyzz2ciz&st=5wv9xfts&dl=0](https://www.dropbox.com/scl/fi/ju3nhhu2lfs8521778y3c/SENSE.7z?rlkey=h62fkfpwbc52vg20ayyzz2ciz&st=5wv9xfts&dl=0) +(Trained on 32×A100 GPUs) + + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/CommonVoice/SENSE/common_voice_sense_prepare.py b/recipes/CommonVoice/SENSE/common_voice_sense_prepare.py new file mode 120000 index 0000000000..58a8a42f65 --- /dev/null +++ b/recipes/CommonVoice/SENSE/common_voice_sense_prepare.py @@ -0,0 +1 @@ +../common_voice_sense_prepare.py \ No newline at end of file diff --git a/recipes/CommonVoice/SENSE/hparams/train_sense.yaml b/recipes/CommonVoice/SENSE/hparams/train_sense.yaml new file mode 100644 index 0000000000..f039d4ef51 --- /dev/null +++ b/recipes/CommonVoice/SENSE/hparams/train_sense.yaml @@ -0,0 +1,226 @@ +# ############################################### +# Model: w2v-BERT + attention pooling +# Training: Multilingual SENSE +# Loss: cosine similarity +# Authors: Maryem Bouziane 2025, Salima Mdhaffar 2025, +# Haroun Elleuch 2025, Yannick Estève 2025 +# ############################################### + +# Seed must be set before any object with parameters is instantiated. +seed: 2422 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +############################## Data #################################### + +# Root folder containing the multilingual Common Voice data tree. +data_folder: !PLACEHOLDER # e.g. /localscratch/common_voice_langs/ +output_folder: !ref results/SENSE_training/ +save_folder: !ref /save +train_log: !ref /train_log.txt +# Exponent used in the language sampling formula for SENSE. +# Small alpha oversamples low-resource languages +# and undersamples high-resource ones. +sampling_alpha: 0.05 + +# Paths for the multilingual CSV manifests produced by prepare_sense(). +language_ratios_file: !ref /language_ratios.json +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv + +# If True, data preparation (TSV → CSV) is skipped. +skip_prep: False + +# If True, Common Voice .mp3 clips are converted to .wav before training. +convert_to_wav: True + +# Languages included in the multilingual SENSE training. +languages: + - "af" + - "am" + - "ar" + - "as" + - "ast" + - "az" + - "ba" + - "be" + - "bg" + - "bn" + - "br" + - "ca" + - "ckb" + - "cs" + - "cv" + - "cy" + - "da" + - "de" + - "dv" + - "el" + - "en" + - "eo" + - "es" + - "et" + - "fa" + - "fi" + - "fr" + - "fy-NL" + - "ga-IE" + - "gl" + - "gn" + - "he" + - "hi" + - "hsb" + - "ht" + - "hu" + - "ia" + - "id" + - "is" + - "it" + - "ja" + - "ka" + - "kab" + - "kk" + - "ko" + - "ky" + - "lt" + - "lo" + - "lv" + - "ml" + - "mn" + - "mhr" + - "mk" + - "mr" + - "mt" + - "ne-NP" + - "nl" + - "nn-NO" + - "oc" + - "or" + - "os" + - "pa-IN" + - "pl" + - "ps" + - "pt" + - "ro" + - "ru" + - "sah" + - "sc" + - "sk" + - "sl" + - "sr" + - "sv-SE" + - "sw" + - "ta" + - "te" # codespell:ignore te + - "th" + - "ti" + - "tk" + - "tr" + - "tt" + - "ug" + - "uk" + - "ur" + - "uz" + - "vi" + - "yi" + - "yo" + - "zh-HK" + - "zu" + +############################## Text encoder: BGE-M3 #################### + +bge_path: "BAAI/bge-m3" + +bge_model: !new:speechbrain.integrations.nlp.bgeM3_embeddings.BGEM3SentenceEmbeddings + source: !ref + use_fp16: True + return_dense: True + +############################## Speech encoder: w2v-BERT ################ + +# Hugging Face model name for the w2v-BERT encoder. +wav2vec2_hub: facebook/w2v-bert-2.0 + +# If True, the w2v-BERT encoder is kept frozen during training. +wav2vec2_frozen: False + +############################## Training ################################ + +number_of_epochs: 100 + +# Learning rates for the attention head and the w2v-BERT encoder. +lr: 1.5 +lr_wav2vec: 0.00001 + +batch_size: 2 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +dataloader_options: + batch_size: !ref + num_workers: 8 + +############################## Architecture ############################ + +# Dimensionality of the w2v-BERT hidden representations (input to attention). +d_model: 1024 + +# Global scaling factor applied to the cosine loss . +loss_scale: 50 + +wav2vec2: !new:speechbrain.integrations.huggingface.w2v_bert.W2VBert + source: !ref + save_path: !ref /wav2vec2_checkpoint + freeze: !ref + +attn_pooling: !new:speechbrain.nnet.pooling.AttentionPooling + input_dim: !ref + +modules: + wav2vec2: !ref + attn_pooling: !ref + bge_model: !ref + +# Only the attention pooling head +# is grouped under "model" for the main optimizer. +model: !new:torch.nn.ModuleList + modules: [!ref ] + +############################## Optimizers & schedulers ################# + +# Optimizer for the attention pooling head. +adam_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 0.00000001 + +# Optimizer for the w2v-BERT encoder. +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +# Learning-rate scheduler for the attention pooling head. +lr_annealing_adam: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.5 + patient: 2 + +# Learning-rate scheduler for the w2v-BERT encoder. +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + +############################## Checkpointer & logger ################### + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + lr_annealing_adam: !ref + lr_annealing_wav2vec: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref diff --git a/recipes/CommonVoice/SENSE/train.py b/recipes/CommonVoice/SENSE/train.py new file mode 100644 index 0000000000..98bb967491 --- /dev/null +++ b/recipes/CommonVoice/SENSE/train.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python3 +"""Recipe for training a w2v-BERT-based SENSE model on Common Voice. + +The system fine-tunes a w2v-BERT encoder with an attention-pooling head +to predict BGE-M3 sentence embeddings for each utterance, so that speech +and text share a common semantic space. + +To run this recipe, do the following: +> python train.py hparams/train_sense.yaml + +Authors + * Maryem Bouziane 2025 + * Salima Mdhaffar 2025 + * Haroun Elleuch 2025 + * Yannick Estève 2025 + * Ha Nguyen 2023 +""" + +import sys + +import pandas as pd +import torch +import torch.nn.functional as F +from common_voice_sense_prepare import prepare_sense +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class SenseBrain(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from waveform batches to speech and text embeddings.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + + # Student speech encoder: w2v-BERT + feats = self.modules.wav2vec2(wavs, wav_lens) + uttr_embeddings = self.modules.attn_pooling(feats) + # L2-normalise speech embeddings. + uttr_embeddings = F.normalize(uttr_embeddings, p=2, dim=-1) + + # Teacher text encoder: BGE-M3 + src_text = batch.wrd + text_embeddings = self.modules.bge_model(src_text) + + return uttr_embeddings, text_embeddings + + def compute_objectives(self, predictions, batch, stage): + """Cosine-based loss used for semantic alignment between speech and text.""" + uttr_embeddings, text_embeddings = predictions + + cosine_sim = torch.sum( + uttr_embeddings.float() * text_embeddings.float(), dim=-1 + ) + + loss = 1.0 - cosine_sim + loss = loss.sum() + loss *= self.hparams.loss_scale + return loss + + def init_optimizers(self): + """Initializes optimizers for the attention head and the w2v-BERT encoder.""" + # Optimizer for the attention pooling + self.adam_optimizer = self.hparams.adam_opt_class( + self.hparams.model.parameters() + ) + self.optimizers_dict = {"model_optimizer": self.adam_optimizer} + + # Separate optimizer for w2v-BERT if not frozen + if not self.hparams.wav2vec2_frozen: + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps.""" + valid_optimizers = {} + if not self.hparams.wav2vec2_frozen: + valid_optimizers["wav2vec_optimizer"] = optimizers[ + "wav2vec_optimizer" + ] + return valid_optimizers + + def on_stage_start(self, stage, epoch): + """Gets called when a stage (either training or validation) starts.""" + return + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of each stage. + + For validation, applies learning rate scheduling and handles + logging and checkpointing. + """ + if stage == sb.Stage.TRAIN: + self.train_stats = stage_loss + return + + # VALID + stage_stats = {"loss": stage_loss} + current_epoch = self.hparams.epoch_counter.current + + if stage == sb.Stage.VALID: + # Scheduler for the attention pooling + old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate( + self.adam_optimizer, new_lr_adam + ) + + stats_meta = { + "epoch": current_epoch, + "lr_adam": old_lr_adam, + } + + # Scheduler for w2v-BERT if not frozen + if not self.hparams.wav2vec2_frozen: + ( + old_lr_wav2vec, + new_lr_wav2vec, + ) = self.hparams.lr_annealing_wav2vec(stage_stats["loss"]) + sb.nnet.schedulers.update_learning_rate( + self.wav2vec_optimizer, new_lr_wav2vec + ) + stats_meta["lr_wav2vec"] = old_lr_wav2vec + + # Log validation statistics and save the checkpoint + self.hparams.train_logger.log_stats( + stats_meta=stats_meta, + train_stats={"loss": self.train_stats}, + valid_stats=stage_stats, + ) + + meta = {"loss": stage_stats["loss"], "epoch": current_epoch} + name = "checkpoint_epoch" + str(current_epoch) + self.checkpointer.save_and_keep_only( + meta=meta, name=name, num_to_keep=10, min_keys=["loss"] + ) + + +def dataio_prepare(hparams): + """Prepares the datasets and data pipelines used by the SenseBrain.""" + + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Audio pipeline: reads the waveform.""" + sig = sb.dataio.dataio.read_audio(wav) + return sig + + datasets = {} + + # TRAIN + train_csv = hparams["train_csv"] + datasets["train"] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=train_csv, + dynamic_items=[audio_pipeline], + output_keys=[ + "id", + "lang", + "sig", + "duration", + "wrd", + ], + ) + + # VALID + valid_csv = hparams["valid_csv"] + datasets["valid"] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=valid_csv, + dynamic_items=[audio_pipeline], + output_keys=[ + "id", + "lang", + "sig", + "duration", + "wrd", + ], + ) + + return datasets + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset preparation (TSV -> multilingual train/dev CSV) + if not hparams["skip_prep"]: + run_on_main( + prepare_sense, + kwargs={ + "data_folder": hparams["data_folder"], + "output_folder": hparams["output_folder"], + "languages": hparams["languages"], + "sampling_alpha": hparams["sampling_alpha"], + "language_ratios_file": hparams["language_ratios_file"], + "train_csv": hparams["train_csv"], + "valid_csv": hparams["valid_csv"], + "convert_to_wav": hparams["convert_to_wav"], + }, + ) + + # Create main experiment class + sense_brain = SenseBrain( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Datasets + datasets = dataio_prepare(hparams) + + # Load sampling ratios from train.csv + logger.info("Loading language ratios from train.csv ...") + manifest = pd.read_csv(hparams["train_csv"]) + if "ratio" not in manifest.columns: + raise RuntimeError( + "Column 'ratio' is missing in train.csv. " + "Check that the preparation step ran correctly." + ) + + sample_ratios = list(manifest["ratio"]) + num_samples = len(sample_ratios) + + # Create weighted sampler for the training dataloader + train_sampler = ReproducibleWeightedRandomSampler( + sample_ratios, + replacement=True, + num_samples=num_samples, + ) + + # inject the sampler in the training dataloader + train_loader_kwargs = dict(hparams["dataloader_options"]) + train_loader_kwargs["sampler"] = train_sampler + + # Training + logger.info("Start of model training:") + sense_brain.fit( + sense_brain.hparams.epoch_counter, + datasets["train"], + datasets["valid"], + train_loader_kwargs=train_loader_kwargs, + valid_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/CommonVoice/common_voice_prepare.py b/recipes/CommonVoice/common_voice_prepare.py index 0bffc52b3f..fb3e2d9478 100644 --- a/recipes/CommonVoice/common_voice_prepare.py +++ b/recipes/CommonVoice/common_voice_prepare.py @@ -1,22 +1,30 @@ """ Data preparation. - -Download: https://voice.mozilla.org/en/datasets - +Download: https://commonvoice.mozilla.org/en/datasets Author ------ -Titouan Parcollet +Titouan Parcollet 2021, 2022, 2024 +Luca Della Libera 2022 +Pooneh Mousavi 2022 +Salima Mdhaffar 2023 +Adel Moumen 2024 """ -import os import csv +import functools +import os import re -import logging -import torchaudio import unicodedata -from tqdm.contrib import tzip +from dataclasses import dataclass -logger = logging.getLogger(__name__) +from speechbrain.dataio.dataio import read_audio_info +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + +VERBOSE = False +SAMPLING_RATE = 16_000 def prepare_common_voice( @@ -28,16 +36,17 @@ def prepare_common_voice( accented_letters=False, language="en", skip_prep=False, + convert_to_wav=False, ): """ Prepares the csv files for the Mozilla Common Voice dataset. - Download: https://voice.mozilla.org/en/datasets + Download: https://commonvoice.mozilla.org/en Arguments --------- data_folder : str Path to the folder where the original Common Voice dataset is stored. - This path should include the lang: /datasets/CommonVoice/en/ + This path should include the lang: /datasets/CommonVoice// save_folder : str The directory where to store the csv files. train_tsv_file : str, optional @@ -53,6 +62,16 @@ def prepare_common_voice( Specify the language for text normalization. skip_prep: bool If True, skip data preparation. + convert_to_wav: bool + If True, `.mp3` files are converted (duplicated) to uncompressed `.wav`. + Uncompressed `wav`s can be much faster to decode than MP3, at the cost + of much higher disk usage and bandwidth. This might be useful if you are + CPU-limited in workers during training. + This invokes the `ffmpeg` commandline, so ffmpeg must be installed. + + Returns + ------- + None Example ------- @@ -95,17 +114,15 @@ def prepare_common_voice( test_tsv_file = test_tsv_file # Setting the save folder - if not os.path.exists(save_folder): - os.makedirs(save_folder) + os.makedirs(save_folder, exist_ok=True) - # Setting ouput files + # Setting output files save_csv_train = save_folder + "/train.csv" save_csv_dev = save_folder + "/dev.csv" save_csv_test = save_folder + "/test.csv" # If csv already exists, we skip the data preparation if skip(save_csv_train, save_csv_dev, save_csv_test): - msg = "%s already exists, skipping data preparation!" % (save_csv_train) logger.info(msg) @@ -119,7 +136,6 @@ def prepare_common_voice( # Additional checks to make sure the data folder contains Common Voice check_commonvoice_folders(data_folder) - # Creating csv files for {train, dev, test} data file_pairs = zip( [train_tsv_file, dev_tsv_file, test_tsv_file], @@ -127,16 +143,29 @@ def prepare_common_voice( ) for tsv_file, save_csv in file_pairs: create_csv( - tsv_file, save_csv, data_folder, accented_letters, language, + convert_to_wav, + tsv_file, + save_csv, + data_folder, + accented_letters, + language, ) def skip(save_csv_train, save_csv_dev, save_csv_test): """ Detects if the Common Voice data preparation has been already done. - If the preparation has been done, we can skip it. + Arguments + --------- + save_csv_train : str + The train csv file + save_csv_dev : str + The dev csv file + save_csv_test : str + The test csv file + Returns ------- bool @@ -157,25 +186,137 @@ def skip(save_csv_train, save_csv_dev, save_csv_test): return skip +@dataclass +class CVRow: + snt_id: str + duration: float + audio_path: str + spk_id: str + words: str + + +def process_line( + line, convert_to_wav, data_folder, language, accented_letters, header_map +): + """Process a line of CommonVoice tsv file. + + Arguments + --------- + line : str + A line of the CommonVoice tsv file. + convert_to_wav : bool + If True, `.mp3` files are converted (duplicated) to uncompressed `.wav`. + Uncompressed `wav`s can be much faster to decode than MP3, at the cost + of much higher disk usage and bandwidth. This might be useful if you are + CPU-limited in workers during training. + This invokes the `ffmpeg` commandline, so ffmpeg must be installed. + data_folder : str + Path to the CommonVoice dataset. + language : str + Language code, e.g. "en" + accented_letters : bool + Defines if accented letters will be kept as individual letters or + transformed to the closest non-accented letters. + header_map : Dict[str, int] + Map from column name to column indices + + Returns + ------- + CVRow + A dataclass containing the information about the line. + """ + + columns = line.strip().split("\t") + spk_id = columns[header_map["client_id"]] + audio_path_filename = columns[header_map["path"]] + words = columns[header_map["sentence"]] + + # Path is at indice 1 in Common Voice tsv files. And .mp3 files + # are located in datasets/lang/clips/ + audio_path = data_folder + "/clips/" + audio_path_filename + + if convert_to_wav: + audio_path = convert_mp3_to_wav(audio_path) + + file_name = audio_path.split(".")[-2].split("/")[-1] + snt_id = file_name + + # Reading the signal (to retrieve duration in seconds) + if os.path.isfile(audio_path): + info = read_audio_info(audio_path) + else: + msg = "\tError loading: %s" % (str(len(file_name))) + logger.info(msg) + return None + + duration = info.num_frames / info.sample_rate + + # Getting transcript + + # Unicode Normalization + words = unicode_normalisation(words) + + # !! Language specific cleaning !! + words = language_specific_preprocess(language, words) + + # Remove accents if specified + if not accented_letters: + words = strip_accents(words) + words = words.replace("'", " ") + words = words.replace("’", " ") + + # Remove multiple spaces + words = re.sub(" +", " ", words) + + # Remove spaces at the beginning and the end of the sentence + words = words.lstrip().rstrip() + + # Getting chars + chars = words.replace(" ", "_") + chars = " ".join([char for char in chars][:]) + + # Remove too short sentences (or empty): + if language in ["ja", "zh-CN"]: + if len(chars) < 3: + return None + else: + if len(words.split(" ")) < 3: + return None + + # Composition of the csv_line + return CVRow(snt_id, duration, audio_path, spk_id, words) + + def create_csv( - orig_tsv_file, csv_file, data_folder, accented_letters=False, language="en" + convert_to_wav, + orig_tsv_file, + csv_file, + data_folder, + accented_letters=False, + language="en", ): """ Creates the csv file given a list of wav files. Arguments --------- + convert_to_wav : bool + If True, `.mp3` files are converted (duplicated) to uncompressed `.wav`. + Uncompressed `wav`s can be much faster to decode than MP3, at the cost + of much higher disk usage and bandwidth. This might be useful if you are + CPU-limited in workers during training. + This invokes the `ffmpeg` commandline, so ffmpeg must be installed. orig_tsv_file : str Path to the Common Voice tsv file (standard file). + csv_file : str + New csv file. data_folder : str Path of the CommonVoice dataset. accented_letters : bool, optional Defines if accented letters will be kept as individual letters or transformed to the closest non-accented letters. - - Returns - ------- - None + language : str + Language code, e.g. "en" """ # Check if the given files exists @@ -185,8 +326,15 @@ def create_csv( raise FileNotFoundError(msg) # We load and skip the header - loaded_csv = open(orig_tsv_file, "r").readlines()[1:] - nb_samples = str(len(loaded_csv)) + csv_lines = open(orig_tsv_file, encoding="utf-8").readlines() + header_line = csv_lines[0] + csv_data_lines = csv_lines[1:] + nb_samples = len(csv_data_lines) + + header_map = { + column_name: index + for index, column_name in enumerate(header_line.split("\t")) + } msg = "Preparing CSV files for %s samples ..." % (str(nb_samples)) logger.info(msg) @@ -195,157 +343,215 @@ def create_csv( msg = "Creating csv lists in %s ..." % (csv_file) logger.info(msg) - csv_lines = [["ID", "duration", "wav", "spk_id", "wrd"]] - - # Start processing lines + # Process and write lines total_duration = 0.0 - for line in tzip(loaded_csv): - - line = line[0] - - # Path is at indice 1 in Common Voice tsv files. And .mp3 files - # are located in datasets/lang/clips/ - mp3_path = data_folder + "/clips/" + line.split("\t")[1] - file_name = mp3_path.split(".")[-2].split("/")[-1] - spk_id = line.split("\t")[0] - snt_id = file_name - - # Setting torchaudio backend to sox-io (needed to read mp3 files) - if torchaudio.get_audio_backend() != "sox_io": - logger.warning("This recipe needs the sox-io backend of torchaudio") - logger.warning("The torchaudio backend is changed to sox_io") - torchaudio.set_audio_backend("sox_io") - - # Reading the signal (to retrieve duration in seconds) - if os.path.isfile(mp3_path): - info = torchaudio.info(mp3_path) - else: - msg = "\tError loading: %s" % (str(len(file_name))) - logger.info(msg) - continue - - duration = info.num_frames / info.sample_rate - total_duration += duration - - # Getting transcript - words = line.split("\t")[2] - - # Unicode Normalization - words = unicode_normalisation(words) - - # !! Language specific cleaning !! - # Important: feel free to specify the text normalization - # corresponding to your alphabet. - - if language in ["en", "fr", "it", "rw"]: - words = re.sub( - "[^’'A-Za-z0-9À-ÖØ-öø-ÿЀ-ӿéæœâçèàûî]+", " ", words - ).upper() - - if language == "de": - # this replacement helps preserve the case of ß - # (and helps retain solitary occurrences of SS) - # since python's upper() converts ß to SS. - words = words.replace("ß", "0000ß0000") - words = re.sub("[^’'A-Za-z0-9öÖäÄüÜß]+", " ", words).upper() - words = words.replace("'", " ") - words = words.replace("’", " ") - words = words.replace( - "0000SS0000", "ß" - ) # replace 0000SS0000 back to ß as its initial presence in the corpus - - if language == "fr": - # Replace J'y D'hui etc by J_ D_hui - words = words.replace("'", " ") - words = words.replace("’", " ") - - elif language == "ar": - HAMZA = "\u0621" - ALEF_MADDA = "\u0622" - ALEF_HAMZA_ABOVE = "\u0623" - letters = ( - "ابتةثجحخدذرزسشصضطظعغفقكلمنهويىءآأؤإئ" - + HAMZA - + ALEF_MADDA - + ALEF_HAMZA_ABOVE - ) - words = re.sub("[^" + letters + " ]+", "", words).upper() - elif language == "ga-IE": - # Irish lower() is complicated, but upper() is nondeterministic, so use lowercase - def pfxuc(a): - return len(a) >= 2 and a[0] in "tn" and a[1] in "AEIOUÁÉÍÓÚ" - - def galc(w): - return w.lower() if not pfxuc(w) else w[0] + "-" + w[1:].lower() - - words = re.sub("[^-A-Za-z'ÁÉÍÓÚáéíóú]+", " ", words) - words = " ".join(map(galc, words.split(" "))) - - # Remove accents if specified - if not accented_letters: - words = strip_accents(words) - words = words.replace("'", " ") - words = words.replace("’", " ") - - # Remove multiple spaces - words = re.sub(" +", " ", words) - - # Remove spaces at the beginning and the end of the sentence - words = words.lstrip().rstrip() - # Getting chars - chars = words.replace(" ", "_") - chars = " ".join([char for char in chars][:]) - - # Remove too short sentences (or empty): - if len(words.split(" ")) < 3: - continue - - # Composition of the csv_line - csv_line = [snt_id, str(duration), mp3_path, spk_id, str(words)] + line_processor = functools.partial( + process_line, + convert_to_wav=convert_to_wav, + data_folder=data_folder, + language=language, + accented_letters=accented_letters, + header_map=header_map, + ) - # Adding this line to the csv_lines list - csv_lines.append(csv_line) + # Stream into a .tmp file, and rename it to the real path at the end. + csv_file_tmp = csv_file + ".tmp" - # Writing the csv lines - with open(csv_file, mode="w", encoding="utf-8") as csv_f: + with open(csv_file_tmp, mode="w", newline="", encoding="utf-8") as csv_f: csv_writer = csv.writer( csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) - for line in csv_lines: - csv_writer.writerow(line) + csv_writer.writerow(["ID", "duration", "wav", "spk_id", "wrd"]) + + for row in parallel_map(line_processor, csv_data_lines): + if row is None: + continue + + total_duration += row.duration + csv_writer.writerow( + [ + row.snt_id, + str(row.duration), + row.audio_path, + row.spk_id, + row.words, + ] + ) + + os.replace(csv_file_tmp, csv_file) # Final prints msg = "%s successfully created!" % (csv_file) logger.info(msg) - msg = "Number of samples: %s " % (str(len(loaded_csv))) + msg = "Number of samples: %s " % (str(len(csv_data_lines))) logger.info(msg) msg = "Total duration: %s Hours" % (str(round(total_duration / 3600, 2))) logger.info(msg) +def convert_mp3_to_wav(audio_mp3_path): + """Convert an mp3 file to a wav file. + + Parameters + ---------- + audio_mp3_path : str + The path to the opus file to be converted. + + Returns + ------- + str + The path to the converted wav file. + + Raises + ------ + subprocess.CalledProcessError + If the conversion process fails. + """ + audio_wav_path = audio_mp3_path.replace(".mp3", ".wav") + + if VERBOSE: + os.system( + f"ffmpeg -y -i {audio_mp3_path} -ac 1 -ar {SAMPLING_RATE} {audio_wav_path}" + ) + else: + os.system( + f"ffmpeg -y -i {audio_mp3_path} -ac 1 -ar {SAMPLING_RATE} {audio_wav_path} > /dev/null 2>&1" + ) + return audio_wav_path + + +def language_specific_preprocess(language, words): + # !! Language specific cleaning !! + # Important: feel free to specify the text normalization + # corresponding to your alphabet. + + if language in ["en", "fr", "it", "rw"]: + words = re.sub( + "[^’'A-Za-z0-9À-ÖØ-öø-ÿЀ-ӿéæœâçèàûî]+", " ", words + ).upper() + + if language == "de": + # this replacement helps preserve the case of ß + # (and helps retain solitary occurrences of SS) + # since python's upper() converts ß to SS. + words = words.replace("ß", "0000ß0000") + words = re.sub("[^’'A-Za-z0-9öÖäÄüÜß]+", " ", words).upper() + words = words.replace("'", " ") + words = words.replace("’", " ") + words = words.replace( + "0000SS0000", "ß" + ) # replace 0000SS0000 back to ß as its initial presence in the corpus + + elif language == "fr": # SM + words = re.sub("[^’'A-Za-z0-9À-ÖØ-öø-ÿЀ-ӿéæœâçèàûî]+", " ", words) + words = words.replace("’", "'") + words = words.replace("é", "é") + words = words.replace("æ", "ae") + words = words.replace("œ", "oe") + words = words.replace("â", "â") + words = words.replace("ç", "ç") + words = words.replace("è", "è") + words = words.replace("à", "à") + words = words.replace("û", "û") + words = words.replace("î", "î") + words = words.upper() + + # Case of apostrophe collés + words = words.replace("L'", "L' ") + words = words.replace("L' ", "L' ") + words = words.replace("S'", "S' ") + words = words.replace("S' ", "S' ") + words = words.replace("D'", "D' ") + words = words.replace("D' ", "D' ") + words = words.replace("J'", "J' ") + words = words.replace("J' ", "J' ") + words = words.replace("N'", "N' ") + words = words.replace("N' ", "N' ") + words = words.replace("C'", "C' ") + words = words.replace("C' ", "C' ") + words = words.replace("QU'", "QU' ") + words = words.replace("QU' ", "QU' ") + words = words.replace("M'", "M' ") + words = words.replace("M' ", "M' ") + + # Case of apostrophe qui encadre quelques mots + words = words.replace(" '", " ") + words = words.replace("A'", "A") + words = words.replace("B'", "B") + words = words.replace("E'", "E") + words = words.replace("F'", "F") + words = words.replace("G'", "G") + words = words.replace("K'", "K") + words = words.replace("Q'", "Q") + words = words.replace("V'", "V") + words = words.replace("W'", "W") + words = words.replace("Z'", "Z") + words = words.replace("O'", "O") + words = words.replace("X'", "X") + words = words.replace( + "AUJOURD' HUI", + "AUJOURD'HUI", # cspell:disable-line + ) + elif language == "ar": + HAMZA = "\u0621" + ALEF_MADDA = "\u0622" + ALEF_HAMZA_ABOVE = "\u0623" + letters = ( + "ابتةثجحخدذرزژشسصضطظعغفقكلمنهويىءآأؤإئ" # cspell:disable-line + + HAMZA + + ALEF_MADDA + + ALEF_HAMZA_ABOVE + ) + words = re.sub("[^" + letters + " ]+", "", words).upper() + elif language == "fa": + HAMZA = "\u0621" + ALEF_MADDA = "\u0622" + ALEF_HAMZA_ABOVE = "\u0623" + letters = ( + "ابپتةثجحخچدذرزژسشصضطظعغفقگکلمنهویىءآأؤإئ" # cspell:disable-line + + HAMZA + + ALEF_MADDA + + ALEF_HAMZA_ABOVE + ) + words = re.sub("[^" + letters + " ]+", "", words).upper() + elif language == "ga-IE": + # Irish lower() is complicated, but upper() is nondeterministic, so use lowercase + def pfxuc(a): + return len(a) >= 2 and a[0] in "tn" and a[1] in "AEIOUÁÉÍÓÚ" + + def galc(w): + return w.lower() if not pfxuc(w) else w[0] + "-" + w[1:].lower() + + words = re.sub("[^-A-Za-z'ÁÉÍÓÚáéíóú]+", " ", words) + words = " ".join(map(galc, words.split(" "))) + elif language == "es": + # Fix the following error in dataset large: + # KeyError: 'The item En noviembre lanzaron Queen Elizabeth , coproducida por Foreign Noi$e . requires replacements which were not supplied.' + # cspell:ignore noviembre lanzaron coproducida + words = words.replace("$", "s") + return words + + def check_commonvoice_folders(data_folder): """ Check if the data folder actually contains the Common Voice dataset. - If not, raises an error. - Returns - ------- - None + Arguments + --------- + data_folder : str + The folder containing the data to check Raises ------ FileNotFoundError If data folder doesn't contain Common Voice dataset. """ - files_str = "/clips" - # Checking clips if not os.path.exists(data_folder + files_str): - err_msg = ( "the folder %s does not exist (it is expected in " "the Common Voice dataset)" % (data_folder + files_str) @@ -354,20 +560,13 @@ def check_commonvoice_folders(data_folder): def unicode_normalisation(text): - - try: - text = unicode(text, "utf-8") - except NameError: # unicode is a default on python 3 - pass return str(text) def strip_accents(text): - text = ( unicodedata.normalize("NFD", text) .encode("ascii", "ignore") .decode("utf-8") ) - return str(text) diff --git a/recipes/CommonVoice/common_voice_sense_prepare.py b/recipes/CommonVoice/common_voice_sense_prepare.py new file mode 100644 index 0000000000..6b9b424908 --- /dev/null +++ b/recipes/CommonVoice/common_voice_sense_prepare.py @@ -0,0 +1,473 @@ +""" +Data preparation for SENSE using multilingual Common Voice TSV files. + +Download: https://commonvoice.mozilla.org/lang/datasets + +Authors +------- + * Maryem Bouziane 2025 + * Salima Mdhaffar 2025 + * Haroun Elleuch 2025 + * Yannick Estève 2025 +""" + +import csv +import functools +import json +import os +from os import path +from typing import Any, Dict, List + +from tqdm import tqdm + +from recipes.CommonVoice.common_voice_prepare import ( + check_commonvoice_folders, + process_line, +) +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + +# Maximum duration (in seconds) +DURATION_MAX = 10.0 + + +def prepare_sense( + data_folder, + output_folder, + languages, + sampling_alpha, + language_ratios_file, + train_csv, + valid_csv, + convert_to_wav: bool = False, +) -> None: + """ + Prepares multilingual train/dev CSV files for SENSE from Common Voice TSVs. + + This function iterates over the selected languages and splits, builds + multilingual CSV manifests for the train and dev sets, and optionally + computes and saves language sampling ratios used by the training sampler. + + Arguments + --------- + data_folder : str + Root Common Voice folder containing the per-language subfolders. + output_folder : str + Directory where the combined CSV files will be stored. + languages : list of str + List of Common Voice language codes to include in the multilingual + split (e.g., ["fr", "de", "ar"]). + sampling_alpha : float + Exponent used in the language sampling ratio formula for the train + split. Values close to 0 yield more uniform sampling, values closer + to 1 follow the empirical language distribution. + language_ratios_file : str + Path to the JSON file where language sampling ratios for the train + split will be saved. If the path is empty or None, ratios are not + written to disk. + train_csv : str + Output path for the combined train CSV file. + valid_csv : str + Output path for the combined dev/validation CSV file. + convert_to_wav : bool, optional + If True, `.mp3` files are converted to `.wav` in ``process_line``. + + Returns + ------- + None + """ + create_directory(output_folder) + + if skip_prepared_splits(train_csv, valid_csv): + return + + # Train split + build_combined_split( + data_folder=data_folder, + languages=languages, + split="train", + out_csv=train_csv, + alpha=sampling_alpha, + language_ratios_file=language_ratios_file, + convert_to_wav=convert_to_wav, + ) + + # Dev split + build_combined_split( + data_folder=data_folder, + languages=languages, + split="dev", + out_csv=valid_csv, + alpha=sampling_alpha, + language_ratios_file=None, + convert_to_wav=convert_to_wav, + ) + + +def create_directory(dir_path: str) -> None: + """ + Creates a directory if it does not already exist. + + Arguments + --------- + dir_path : str + Path of the directory to create. If empty or None, nothing is done. + + Returns + ------- + None + """ + if dir_path: + os.makedirs(dir_path, exist_ok=True) + + +def save_json(content, filename: str) -> None: + """ + Saves a Python object to a JSON file. + + The parent directory is created if it does not exist. + + Arguments + --------- + content : object + Serializable Python object to be written as JSON. + filename : str + Path of the JSON file to create. If empty or None, the function + returns without writing anything. + + Returns + ------- + None + """ + if not filename: + return + + directory = os.path.dirname(filename) + if directory: + create_directory(directory) + + with open(filename, "w", encoding="utf-8") as f: + json.dump(content, f, ensure_ascii=False, indent=2) + + +def skip_prepared_splits(train_csv: str, dev_csv: str) -> bool: + """ + Detects if the train and dev CSV files already exist. + + If both files are present, the data preparation step can be safely + skipped. + + Arguments + --------- + train_csv : str + Path to the train CSV file. + dev_csv : str + Path to the dev/validation CSV file. + + Returns + ------- + bool + True if both CSV files already exist and preparation can be skipped, + False otherwise. + """ + if all(path.isfile(p) for p in [train_csv, dev_csv]): + msg = "%s and %s already exist, skipping data preparation!" % ( + train_csv, + dev_csv, + ) + logger.info(msg) + return True + return False + + +def read_split_lang_to_rows( + data_folder: str, + lang: str, + split: str, + convert_to_wav: bool = False, + duration_max: float = DURATION_MAX, +) -> List[Dict[str, Any]]: + """ + Reads one split (train/dev) for a given language and returns a list of + utterance dictionaries. + + The function: + * loads the corresponding Common Voice TSV file, + * processes audio lines with ``process_line``, + * filters out utterances with invalid or too long durations, + * attaches the raw transcription from the TSV file. + + Each dictionary has the following keys: + + ID, duration, wav, spk_id, wrd + + Arguments + --------- + data_folder : str + Root Common Voice directory containing the per-language subfolders. + lang : str + Language code to process (e.g., "fr", "br", "sv-SE"). + split : str + Name of the split to load ("train" or "dev"). + convert_to_wav : bool, optional + If True, audio files are converted to WAV by ``process_line``. + duration_max : float, optional + Maximum allowed duration in seconds. Utterances with + ``duration >= duration_max`` are discarded. + + Returns + ------- + list of dict + List of utterance dictionaries for this language and split. Returns + an empty list if no valid samples are found. + + Raises + ------ + FileNotFoundError + If the expected TSV file for this language and split does not exist. + """ + data_folder_lang = path.join(data_folder, lang) + orig_tsv_file = path.join(data_folder_lang, f"{split}.tsv") + + # If a language is listed in `languages`, its TSV file is expected to exist. + if not path.isfile(orig_tsv_file): + msg = "%s doesn't exist, verify your dataset!" % (orig_tsv_file) + logger.info(msg) + raise FileNotFoundError(msg) + + check_commonvoice_folders(data_folder_lang) + + msg = "Reading %s.tsv for language %s: %s" % ( + split, + lang, + orig_tsv_file, + ) + logger.info(msg) + with open(orig_tsv_file, encoding="utf-8") as f: + csv_lines = f.readlines() + + if len(csv_lines) <= 1: + msg = "No usable data in %s" % (orig_tsv_file) + logger.warning(msg) + return [] + + header_line = csv_lines[0] + data_lines = csv_lines[1:] + + header_map = { + column_name: index + for index, column_name in enumerate(header_line.split("\t")) + } + + if "sentence" not in header_map: + raise KeyError( + "Expected 'sentence' column in Common Voice TSV header, " + "got: %s" % (list(header_map.keys())) + ) + + # Map sentence id (snt_id) → raw transcription from TSV + raw_wrd_by_id: Dict[str, str] = {} + for line in data_lines: + cols = line.rstrip("\n").split("\t") + audio_path_filename = cols[header_map["path"]] + snt_id = audio_path_filename.split(".")[-2].split("/")[-1] + raw_sentence = cols[header_map["sentence"]] + raw_wrd_by_id[snt_id] = raw_sentence + + # Audio processing and duration filtering + line_processor = functools.partial( + process_line, + convert_to_wav=convert_to_wav, + data_folder=data_folder_lang, + language=lang, + accented_letters=True, + header_map=header_map, + ) + + processed_rows = parallel_map(line_processor, data_lines) + + rows: List[Dict[str, Any]] = [] + total_duration = 0.0 + + for row in processed_rows: + if row is None: + continue + if row.duration == 0.0: + continue + if row.duration >= duration_max: + continue + + full_id = row.snt_id + raw_sentence = raw_wrd_by_id.get(row.snt_id, "") + + rows.append( + { + "ID": full_id, + "duration": row.duration, + "wav": row.audio_path, + "spk_id": row.spk_id, + "wrd": raw_sentence, + } + ) + total_duration += row.duration + + if not rows: + msg = "No valid samples for %s / %s" % (lang, split) + logger.warning(msg) + return [] + + msg = "%s / %s: kept %s utterances for a total of %.2f hours" % ( + lang, + split, + len(rows), + total_duration / 3600.0, + ) + logger.info(msg) + + return rows + + +def build_combined_split( + data_folder: str, + languages, + split: str, + out_csv: str, + alpha: float = 0.05, + language_ratios_file=None, + convert_to_wav: bool = False, +) -> str: + """ + Builds a multilingual split (train or dev) from Common Voice TSV files. + + For each language in ``languages``, this function: + * reads the corresponding TSV file, + * processes and filters utterances using ``read_split_lang_to_rows``, + * concatenates all languages into a single CSV manifest. + + For the ``train`` split, it also computes per-language sampling ratios + according to: + + p_l = N_l / N_total + r_l = (1 / p_l) * (p_l ** alpha / sum_k p_k ** alpha) + + where ``p_l`` is the empirical probability of language ``l`` and ``r_l`` + is the sampling ratio used by the sampler. In practice, this makes + low-resource languages appear more often (oversampling) and + high-resource languages less often (undersampling) during training, + while still taking the original data distribution into account. + + Arguments + --------- + data_folder : str + Root Common Voice directory containing the per-language subfolders. + languages : list of str + List of language codes to include in the multilingual split. + split : str + Name of the split to build ("train" or "dev"). + out_csv : str + Path to the output CSV file that will contain all selected utterances + for this split. + alpha : float, optional + Smoothing exponent used in the language sampling ratio formula for + the train split. Defaults to 0.05. + language_ratios_file : str or None, optional + Path to the JSON file where the language ratios will be saved for the + train split. If None, ratios are not written to disk. + convert_to_wav : bool, optional + If True, audio files are converted to WAV inside + ``read_split_lang_to_rows`` via ``process_line``. + + Returns + ------- + str + Path to the output CSV file that has been written. + + Raises + ------ + RuntimeError + If no valid samples are found for the given split across all + selected languages. + """ + msg = "Building multilingual %s split..." % (split) + logger.info(msg) + create_directory(os.path.dirname(out_csv)) + + all_rows: List[Dict[str, Any]] = [] + language_counts: Dict[str, int] = {} + + for lang in tqdm(languages, desc="Split %s" % (split)): + rows_lang = read_split_lang_to_rows( + data_folder=data_folder, + lang=lang, + split=split, + convert_to_wav=convert_to_wav, + duration_max=DURATION_MAX, + ) + if not rows_lang: + continue + + for r in rows_lang: + r["lang"] = lang + + language_counts[lang] = len(rows_lang) + all_rows.extend(rows_lang) + + if not all_rows: + raise RuntimeError( + "No valid samples for split %s across the selected languages." + % (split) + ) + + if split == "train": + total = len(all_rows) + if total == 0: + raise RuntimeError("Combined train split is empty after filtering.") + + ratio_map: Dict[str, float] = {} + ps: Dict[str, float] = {} + p_alphas: Dict[str, float] = {} + + # p_l = N_l / N_total, then p_l**alpha + for lang, count in language_counts.items(): + p = count / total + ps[lang] = p + p_alphas[lang] = p**alpha + + # Sum over all languages once p_l**alpha is known + p_alpha_sum = sum(p_alphas.values()) + + # r_l = (1 / p_l) * (p_l**alpha / sum_k p_k**alpha) + for lang, p in ps.items(): + if p > 0: + ratio_map[lang] = (1.0 / p) * (p_alphas[lang] / p_alpha_sum) + else: + msg = "Language %s has no valid samples in the train split." % ( + lang + ) + logger.warning(msg) + + if language_ratios_file is not None: + save_json(content=ratio_map, filename=language_ratios_file) + msg = "Language ratios saved to %s" % (language_ratios_file) + logger.info(msg) + msg = "Language ratios: %s" % (ratio_map) + logger.info(msg) + + for r in all_rows: + r["ratio"] = ratio_map[r["lang"]] + + fieldnames = ["ID", "duration", "wav", "spk_id", "wrd", "lang"] + if split == "train": + fieldnames.append("ratio") + + with open(out_csv, "w", newline="", encoding="utf-8") as f: + writer = csv.DictWriter(f, fieldnames=fieldnames) + writer.writeheader() + for r in all_rows: + writer.writerow(r) + + msg = "Multilingual %s.csv written to: %s" % (split, out_csv) + logger.info(msg) + return out_csv diff --git a/recipes/CommonVoice/self-supervised-learning/wav2vec2/README.md b/recipes/CommonVoice/self-supervised-learning/wav2vec2/README.md index 1a6999693b..690c8a87b8 100644 --- a/recipes/CommonVoice/self-supervised-learning/wav2vec2/README.md +++ b/recipes/CommonVoice/self-supervised-learning/wav2vec2/README.md @@ -4,11 +4,7 @@ This folder contains the scripts to train a wav2vec2 based system using CommonVo # SpeechBrain VS HuggingFace wav2vec2 training ?? As usual, our goal at SpeechBrain remains to offer as much flexibility to the user as possible. Hence, wav2vec2 pretraining can be achieved in two different ways: fully with SpeechBrain, or following our HuggingFace interface. Both approaches give similar results. Indeed we tested both with a BASE model pretrained on LibriSpeech and fine-tuned on LibriSpeech for ASR, IEMOCAP for emotion recognition and VoxCeleb 1 for speaker identification. Therefore, it is up to the user to decide what training scheme he/she wish to follow. A full SpeechBrain training offers a unique flexibility for further research (e.g. changing the loss, changing the architecture, modifying absolutely everything with wav2vec2), while the HuggingFace pretraining offers a good interfacing with the transformers library. -**On CommonVoice, we officialy provide only a fully HuggingFace recipe. If you wish to use the HuggingFace pretraining, please go to our [LibriSpeech recipe](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/self-supervised-learning/wav2vec2)** - -# Requirements -The HuggingFace *transformers* library must be installed first. -`pip install -r extra_requirements.txt` +**On CommonVoice, we officially provide only a fully HuggingFace recipe. If you wish to use the HuggingFace pretraining, please go to our [LibriSpeech recipe](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/self-supervised-learning/wav2vec2)** # Principle The idea is extremely simple. drawing provides a wav2vec 2.0 loss calculation. In practice, it means that forwarding throughout their wav2vec 2.0 models returns the loss. Hence, we simply use this interface as a lobes wrapper in SpeechBrain so anyone can fully pretrain a wav2vec 2.0 model. @@ -26,13 +22,13 @@ Do not forget to replace the `!PLACEHOLDER` variables in the yaml corresponding # Use a pretrained model for fine-tuning with SpeechBrain -The checkpoint generated by this pretraining is a standard PyTorch checkpoint. If you wish to use it as any pretrained HuggingFace model, as you would do for all the recipes that we currently have for wav2vec 2.0 finetuning, you simply need to copy this checkpoint to a folder that contains the corresponding `config.json` and `preprocessor_config.json`. Indeed, SpeechBrain depends (for now) from HuggingFace to train the wav2vec 2.0 model, and these files are the way HuggingFace defines all the parameters of the model. They usually can be found directly on the HuggingFace repository. Then, you just have to use the `speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec` (e.g., [CommonVoice FR ASR](https://github.com/speechbrain/speechbrain/blob/develop/recipes/CommonVoice/ASR/CTC/hparams/train_fr_with_wav2vec.yaml)) class and give the `wav2vec2_hub:/my/path/to/my/speechbrain_wav2vec2_model` parameter, and your pretrained model will be loaded directly for downstream training! +The checkpoint generated by this pretraining is a standard PyTorch checkpoint. If you wish to use it as any pretrained HuggingFace model, as you would do for all the recipes that we currently have for wav2vec 2.0 finetuning, you simply need to copy this checkpoint to a folder that contains the corresponding `config.json` and `preprocessor_config.json`. Indeed, SpeechBrain depends (for now) from HuggingFace to train the wav2vec 2.0 model, and these files are the way HuggingFace defines all the parameters of the model. They usually can be found directly on the HuggingFace repository. Then, you just have to use the `speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2` (e.g., [CommonVoice FR ASR](https://github.com/speechbrain/speechbrain/blob/develop/recipes/CommonVoice/ASR/CTC/hparams/train_fr_with_wav2vec.yaml)) class and give the `wav2vec2_hub:/my/path/to/my/speechbrain_wav2vec2_model` parameter, and your pretrained model will be loaded directly for downstream training! # Advices Training wav2vec 2.0 models is crazy w.r.t compute resources. For instance, this recipe only trains a BASE wav2vec 2.0 architecture, and it already requires 16 Tesla V100 for 7 days. Of course, you can scale this to your needs (e.g., you can work with 2 GPUs only), but it will take ages! Welcome to the wav2vec 2.0 world! Here is a list of the most important advices: -- To train w2v2 models, it is **extremely** important to have an effective batch size as high as possible. For instance, the original BASE model is trained with batches containing 1.6H of speech. This means that (duration_per_minibatch * nb_gpu * gradient_accumulation) must be at least equal to 1.6H. +- To train w2v2 models, it is **extremely** important to have an effective batch size as high as possible. For instance, the original BASE model is trained with batches containing 1.6H of speech. This means that (duration_per_minibatch * nb_gpu * grad_accumulation_factor) must be at least equal to 1.6H. - Do not train on sequences longer than 20s, this will blow your VRAM up and is useless for now. Indeed training with shorter sentences (10s) may work just as well. - Set the `n_warmup_steps` steps in such a way that it corresponds to 10% of the total training steps. The number of steps correspond to the actual number of call to .backward w.r.t the batch size. diff --git a/recipes/CommonVoice/self-supervised-learning/wav2vec2/extra_requirements.txt b/recipes/CommonVoice/self-supervised-learning/wav2vec2/extra_requirements.txt deleted file mode 100644 index d84a124dbc..0000000000 --- a/recipes/CommonVoice/self-supervised-learning/wav2vec2/extra_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -transformers==4.15 diff --git a/recipes/CommonVoice/self-supervised-learning/wav2vec2/hparams/wav2vec2_base.yaml b/recipes/CommonVoice/self-supervised-learning/wav2vec2/hparams/wav2vec2_base.yaml index 1f72675373..632ffd60ad 100644 --- a/recipes/CommonVoice/self-supervised-learning/wav2vec2/hparams/wav2vec2_base.yaml +++ b/recipes/CommonVoice/self-supervised-learning/wav2vec2/hparams/wav2vec2_base.yaml @@ -5,13 +5,14 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_pretraining/ save_folder: !ref /save train_log: !ref /train_log.txt # URL for the HuggingFace model we want to pretrain (BASE here) wav2vec2_hub: facebook/wav2vec2-base +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/cv-corpus-5.1-2020-06-22/fr @@ -26,11 +27,11 @@ skip_prep: False # We remove utterance slonger than 10s in the train/dev/test sets as -# longer sentences certainly correspond to "open microphones". +# longer sentences certainly correspond to open microphones. avoid_if_longer_than: 10.0 avoid_if_shorter_than: 1.0 -# Training parameters +####################### Training Parameters #################################### # Parameters are corresponding the the ones reported in the official wav2vec2 # paper (for the masking). mask_length: 10 @@ -41,24 +42,23 @@ number_of_epochs: 100 lr_adam: 2.0 # This will get reduced by the training scheduler weight_decay: 0.01 d_model: 768 # Needed by the scheduler. 768 is for the BASE w2v2 -sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs # Must be 12 per GPU to fit 32GB of VRAM -# IMPORTANT: To train w2v2 model, we recommand to have the effective batch_size -# higher than 100 (batch_size * nb_gpu * gradient_accumulation) +# IMPORTANT: To train w2v2 model, we recommend to have the effective batch_size +# higher than 100 (batch_size * nb_gpu * grad_accumulation_factor) # Examples are: -# 32 Tesla V100 32GB — 12 * 32 * 1 -# 4 Tesla V100 32GB — 12 * 4 * {6-8} +# 32 Tesla V100 32GB = 12 * 32 * 1 +# 4 Tesla V100 32GB = 12 * 4 * (6-8) batch_size: 12 test_batch_size: 8 -gradient_accumulation: 8 +grad_accumulation_factor: 8 num_workers: 4 - +sorting: ascending dataloader_options: batch_size: !ref num_workers: !ref @@ -71,19 +71,22 @@ test_dataloader_options: # Instead of the default setting. While the recipe will work directly by setting # it to True, you will first need to read the tutorial on dynamic batching to # properly adapt the hyperparameters to your GPU memory! Using Dynamic Batching -# will drastically optimise your GPU utilization and decrease your training time. +# will drastically optimise your GPU utilization and decrease your training time. # Be careful to also adjust the gradient accumulation when using dynamic batching. # This setup will work with 32GB GPUs. # Dynamic Batching parameters, if used are: dynamic_batching: False -dyn_batch_len: 120 # Cumulative length of each batch, per gpu. -max_batch_size: 64 # Max number of samples per batch, per gpu. +max_batch_length: 120 # Cumulative length of each batch, per gpu. +max_batch_ex: 64 # Max number of samples per batch, per gpu. +shuffle: True +num_buckets: 30 + dynamic_batch_sampler: - max_batch_len: !ref - max_batch_ex: !ref - shuffle_ex: True + max_batch_length: !ref + max_batch_ex: !ref + shuffle: !ref batch_ordering: !ref - num_buckets: 30 + num_buckets: !ref # # Functions and classes @@ -91,9 +94,9 @@ dynamic_batch_sampler: epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2Pretrain +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2Pretrain source: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref mask_prob: !ref mask_length: !ref @@ -101,7 +104,7 @@ modules: wav2vec2: !ref opt_class: !name:torch.optim.AdamW - lr: 0 # Will be changed by the scheduler, but we start at 0! + lr: 0 # Will be changed by the scheduler, but we start at 0 betas: (0.9, 0.98) eps: 0.000000001 weight_decay: !ref diff --git a/recipes/CommonVoice/self-supervised-learning/wav2vec2/train_hf_wav2vec2.py b/recipes/CommonVoice/self-supervised-learning/wav2vec2/train_hf_wav2vec2.py index 1698b71349..e95ffadc6b 100644 --- a/recipes/CommonVoice/self-supervised-learning/wav2vec2/train_hf_wav2vec2.py +++ b/recipes/CommonVoice/self-supervised-learning/wav2vec2/train_hf_wav2vec2.py @@ -1,13 +1,4 @@ #!/usr/bin/env python3 - -import sys -import torch -import logging -import speechbrain as sb -import torchaudio -from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main - """Recipe for pretraining a wav2vec 2.0 model on CommonVoice EN. Note that it can be trained with ANY dataset as long as you provide the correct JSON or CSV file. @@ -36,7 +27,18 @@ * Yan Gao 2021 """ -logger = logging.getLogger(__name__) +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -51,7 +53,7 @@ def compute_forward(self, batch, stage): # Forward on w2v2 and take the loss. # It has to be on train mode even for eval. Otherwise it would deactivate # the loss computation ... - out, mask = self.modules.wav2vec2(wavs) + out, mask = self.modules.wav2vec2(wavs, wav_lens) loss = out.loss if stage != sb.Stage.TRAIN: @@ -64,10 +66,10 @@ def compute_objectives(self, predictions, batch, stage): if stage == sb.Stage.TRAIN: # We don't have to compute anything as the HF model directly returns - # the constrative loss. + # the contrastive loss. loss = predictions else: - # We compute the accuracy between embeddings with cosing sim. + # We compute the accuracy between embeddings with cosine_sim. loss, out, mask_time_indices = predictions cosine_sim = torch.cosine_similarity( out.projected_states, out.projected_quantized_states, dim=-1 @@ -80,51 +82,10 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - - # Here we manage mixed precision - if self.auto_mix_prec: - with torch.cuda.amp.autocast(): - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives( - predictions, batch, sb.Stage.TRAIN - ) - - # normalize the loss by gradient_accumulation step - self.scaler.scale( - loss / self.hparams.gradient_accumulation - ).backward() - - if self.step % self.hparams.gradient_accumulation == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.scaler.unscale_(self.optimizer) - self.scaler.step(self.optimizer) - self.scaler.update() - self.optimizer.zero_grad() - - # anneal lr every update - self.hparams.noam_annealing(self.optimizer) - else: - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - # normalize the loss by gradient_accumulation step - (loss / self.hparams.gradient_accumulation).backward() - - if self.step % self.hparams.gradient_accumulation == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.optimizer.step() - self.optimizer.zero_grad() - - # anneal lr every update - self.hparams.noam_annealing(self.optimizer) - - return loss.detach() + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" @@ -172,13 +133,15 @@ def on_stage_end(self, stage, stage_loss, epoch): # Define custom data procedure def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # 1. Define datasets data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -215,13 +178,15 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate @@ -235,12 +200,12 @@ def dataio_prepare(hparams): @sb.utils.data_pipeline.takes("wav") @sb.utils.data_pipeline.provides("sig") def audio_pipeline(wav): - info = torchaudio.info(wav) + info = audio_io.info(wav) sig = sb.dataio.dataio.read_audio(wav) if info.num_channels > 1: sig = torch.mean(sig, dim=1) resampled = torchaudio.transforms.Resample( - info.sample_rate, hparams["sample_rate"], + info.sample_rate, hparams["sample_rate"] )(sig) return resampled @@ -248,9 +213,7 @@ def audio_pipeline(wav): sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 4. Set output: - sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig"], - ) + sb.dataio.dataset.set_output_keys(datasets, ["id", "sig"]) # 5. If Dynamic Batching is used, we instantiate the needed samplers. train_batch_sampler = None @@ -259,24 +222,13 @@ def audio_pipeline(wav): from speechbrain.dataio.sampler import DynamicBatchSampler # noqa dynamic_hparams = hparams["dynamic_batch_sampler"] - num_buckets = dynamic_hparams["num_buckets"] train_batch_sampler = DynamicBatchSampler( - train_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, - length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + train_data, **dynamic_hparams, length_func=lambda x: x["duration"] ) valid_batch_sampler = DynamicBatchSampler( - valid_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, - length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + valid_data, **dynamic_hparams, length_func=lambda x: x["duration"] ) return ( @@ -289,13 +241,11 @@ def audio_pipeline(wav): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) diff --git a/recipes/DNS/README.md b/recipes/DNS/README.md new file mode 100644 index 0000000000..f7f0143ff3 --- /dev/null +++ b/recipes/DNS/README.md @@ -0,0 +1,152 @@ +# **Speech Enhancement for Microsoft Deep Noise Suppression (DNS) Challenge – ICASSP 2022** +This repository contains training recipes for a speech enhancement system designed for the 4th Deep Noise Suppression Challenge, organized by Microsoft at Interspeech 2022.
+The Deep Noise Suppression Challenge features two distinct tracks: +1. **Real Time Non-Personalized DNS** +2. Real Time Personalized DNS (PDNS) for Fullband Audio + +We focus on implementing solutions only for the first track, which involves real-time non-personalized DNS. + +- **Model and Data** : For this challenge, we employ the [Sepformer model](https://arxiv.org/abs/2010.13154v2) to train our speech enhancement system. Our training utilizes 500 hours of fullband audio. + +- **Evaluation Strategy** : We follow the official evaluation strategy outlined by the ITU-T P.835 subjective test framework. It measures speech quality, background noise quality, and overall audio quality. This is done using [DNSMOS P.835](https://arxiv.org/pdf/2110.01763.pdf), a machine learning-based model capable of predicting SIG (Speech Quality), BAK (Background Noise Quality), and OVRL (Overall Audio Quality). + +**Related links** +- [Official Website](https://www.microsoft.com/en-us/research/academic-program/deep-noise-suppression-challenge-icassp-2022/) +- [DNS-4 ICASSP 2022 github repository](https://github.com/microsoft/DNS-Challenge/tree/5582dcf5ba43155621de72a035eb54a7d233af14) + +## **DNS-4 dataset** +DNS-4 dataset once decompressed, the directory structure and sizes of datasets are: +``` +datasets_fullband 892G ++-- dev_testset 1.7G ++-- impulse_responses 5.9G ++-- noise_fullband 58G +\-- clean_fullband 827G + +-- emotional_speech 2.4G + +-- french_speech 62G + +-- german_speech 319G + +-- italian_speech 42G + +-- read_speech 299G + +-- russian_speech 12G + +-- spanish_speech 65G + +-- vctk_wav48_silence_trimmed 27G + \-- VocalSet_48kHz_mono 974M +``` + +### **Required disk space** +The `dns_download.py` download script downloads the Real-time DNS track data and de-compresses it. The compressed data takes around 550 GB of disk space and when de-compressed you would need 1 TB to store audio files. We bundle this decompressed audio into larger archives called as shards. +However this is not the end, the downloaded clean-audio files, RIRs, and noisy-audio files are further used to synthesize clean-noisy audio pairs for training. Once again, we bundle the synthesized data into shards for efficient and faster accessibility. This means further space will be needed to store the synthesized clean-noisy-noise shards. + +**NOTE** +- This dataset download process can be extremely time-consuming. With a total of 126 splits (train, noise and dev data), the script downloads each split in a serial order. The script also allows concurrent data download (by enabling `--parallel_download` param) by using multiple threads (equal to number of your CPU cores). This is helpful especially when you have access to a large cluster. (Alternatively, you can download all 126 splits and decompress them at once by using array job submission.) + +## **Installing Extra Dependencies** +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +## **Getting started** +- STEP 1: Download DNS dataset. +- STEP 2: Synthesize noisy data. +- STEP 3: Begin training. + +## Step 1: **Downloading Real-time DNS track dataset and create the Webdataset shards** +The DNS dataset can be downloaded by running the script below +``` +python dns_download.py --compressed_path DNS-dataset --decompressed_path DNS-compressed +``` +To use parallel downloading +``` +python dns_download.py --compressed_path DNS-dataset --decompressed_path DNS-compressed --parallel_download +``` +The compressed files are downloaded in `DNS-compressed` and further decompressed audio files can be found in `DNS-dataset`. + +Next, create webdataset shards +``` +## webdataset shards for clean_fullband (choose one one language i.e. read, german etc. at a time) +python create_wds_shards.py DNS-dataset/datasets_fullband/clean_fullband// DNS-shards/clean_fullband/ + +## webdataset shards for noise_fullband +python create_wds_shards.py DNS-dataset/datasets_fullband/noise_fullband/ DNS-shards/noise_fullband + +## webdataset shards for baseline dev-set +python create_wds_shards.py DNS-dataset/datasets_fullband/dev_testset/noisy_testclips/ DNS-shards/devsets_fullband +``` +## Step 2: **Synthesize noisy data and create the Webdataset shards** +To synthesize clean-noisy audio for speech enhancement training (we add noise, RIR to clean fullband speech to synthesize clean-noisy pairs) +``` +cd noisyspeech_synthesizer + +## synthesize read speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name read_speech --synthesized_data_dir synthesized_data_shards + +## synthesize German speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name german_speech --synthesized_data_dir synthesized_data_shards + +## synthesize Italian speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name italian_speech --synthesized_data_dir synthesized_data_shards + +## similarly do for spanish, russian and french. +``` +*For more, please see `noisyspeech_synthesizer` on how to synthesize noisy files from clean audio and noise audio files.* + +## Step 3: **Begin training** +To start training +``` +cd enhancement +python train.py hparams/sepformer-dns-16k.yaml --data_folder --baseline_noisy_shards_folder +``` +*For more details and how to perform evaluation, see `enhancement` folder on details about the main training script* + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` + + +**Citing SepFormer** +```bibtex +@inproceedings{subakan2021attention, + title={Attention is All You Need in Speech Separation}, + author={Cem Subakan and Mirco Ravanelli and Samuele Cornell and Mirko Bronzi and Jianyuan Zhong}, + year={2021}, + booktitle={ICASSP 2021} +} +``` + +**Citing DNS-4 dataset (ICASSP 2022)** +```bibtex +@inproceedings{dubey2022icassp, + title={ICASSP 2022 Deep Noise Suppression Challenge}, + author={Dubey, Harishchandra and Gopal, Vishak and Cutler, Ross and Matusevych, Sergiy and Braun, Sebastian and Eskimez, Emre Sefik and Thakker, Manthan and Yoshioka, Takuya and Gamper, Hannes and Aichner, Robert}, + booktitle={ICASSP}, + year={2022} +} +``` \ No newline at end of file diff --git a/recipes/DNS/create_wds_shards.py b/recipes/DNS/create_wds_shards.py new file mode 100644 index 0000000000..d517ee5369 --- /dev/null +++ b/recipes/DNS/create_wds_shards.py @@ -0,0 +1,193 @@ +################################################################################ +# +# Converts the uncompressed DNS folder +# {french,german,...}_speech/../<*.wav> +# structure of DNS into a WebDataset format +# +# Author(s): Tanel Alumäe, Nik Vaessen, Sangeet Sagar (2023) +################################################################################ + +import argparse +import json +import os +import pathlib +import random +from collections import defaultdict + +import librosa +import torch +import webdataset as wds +from tqdm import tqdm + +from speechbrain.dataio import audio_io + +################################################################################ +# methods for writing the shards + +ID_SEPARATOR = "&" + + +def load_audio(audio_file_path: pathlib.Path) -> torch.Tensor: + t, sr = audio_io.load(audio_file_path) + + return t + + +def write_shards( + dns_folder_path: pathlib.Path, + shards_path: pathlib.Path, + seed: int, + samples_per_shard: int, + min_dur: float, +): + """ + Arguments + --------- + dns_folder_path: pathlib.Path + folder where extracted DNS data is located + shards_path: pathlib.Path + folder to write shards of data to + seed: int + random seed used to initially shuffle data into shards + samples_per_shard: int + number of data samples to store in each shards. + min_dur: float + Smallest possible duration. + """ + # make sure output folder exist + shards_path.mkdir(parents=True, exist_ok=True) + + # find all audio files + audio_files = sorted([f for f in dns_folder_path.rglob("*.wav")]) + + # create tuples (unique_sample_id, language_id, path_to_audio_file, duration) + data_tuples = [] + + # track statistics on data + all_language_ids = set() + sample_keys_per_language = defaultdict(list) + + if "clean" in dns_folder_path.as_posix(): + delim = "clean_fullband/" + elif "noise" in dns_folder_path.as_posix(): + delim = "noise_fullband/" + lang = "noise" + elif "dev_testset" in dns_folder_path.as_posix(): + delim = "dev_testset/" + lang = "baseline_noisytestset" + else: + delim = os.path.basename(dns_folder_path.as_posix()) + lang = delim + + for f in tqdm(audio_files): + # path should be + # {french,german,...}_speech/../<*.wav> + sub_path = f.as_posix().split(delim)[1] + + loc = f.as_posix() + key = os.path.splitext(os.path.basename(sub_path))[0] + if "clean_fullband" in dns_folder_path.as_posix(): + lang = key.split("_speech")[0] + + dur = librosa.get_duration(path=loc) + + # Period is not allowed in a WebDataset key name + key = key.replace(".", "_") + if dur > min_dur: + # store statistics + all_language_ids.add(lang) + sample_keys_per_language[lang].append(key) + t = (key, lang, loc, dur) + data_tuples.append(t) + + all_language_ids = sorted(all_language_ids) + + # write a meta.json file which contains statistics on the data + # which will be written to shards + meta_dict = { + "language_ids": list(all_language_ids), + "sample_keys_per_language": sample_keys_per_language, + "num_data_samples": len(data_tuples), + } + + with (shards_path / "meta.json").open("w", encoding="utf-8") as f: + json.dump(meta_dict, f, indent=4) + + # shuffle the tuples so that each shard has a large variety in languages + random.seed(seed) + random.shuffle(data_tuples) + + # write shards + all_keys = set() + shards_path.mkdir(exist_ok=True, parents=True) + pattern = str(shards_path / "shard") + "-%06d.tar" + + with wds.ShardWriter(pattern, maxcount=samples_per_shard) as sink: + for key, language_id, f, duration in data_tuples: + # load the audio tensor + tensor = load_audio(f) + + # verify key is unique + assert key not in all_keys + all_keys.add(key) + + # create sample to write + sample = { + "__key__": key, + "audio.pth": tensor, + "language_id": language_id, + } + + # write sample to sink + sink.write(sample) + + +################################################################################ +# define CLI + +parser = argparse.ArgumentParser( + description="Convert DNS-4 to WebDataset shards" +) + +parser.add_argument( + "dns_decompressed_path", + type=pathlib.Path, + help="directory containing the (decompressed) DNS dataset", +) +parser.add_argument( + "shards_path", type=pathlib.Path, help="directory to write shards to" +) +parser.add_argument( + "--seed", + type=int, + default=12345, + help="random seed used for shuffling data before writing to shard", +) +parser.add_argument( + "--samples_per_shard", + type=int, + default=5000, + help="the maximum amount of samples placed in each shard. The last shard " + "will most likely contain fewer samples.", +) +parser.add_argument( + "--min-duration", + type=float, + default=3.0, + help="Minimum duration of the audio", +) + + +################################################################################ +# execute script + +if __name__ == "__main__": + args = parser.parse_args() + + write_shards( + args.dns_decompressed_path, + args.shards_path, + args.seed, + args.samples_per_shard, + args.min_duration, + ) diff --git a/recipes/DNS/dns_download.py b/recipes/DNS/dns_download.py new file mode 100644 index 0000000000..5a76ed8704 --- /dev/null +++ b/recipes/DNS/dns_download.py @@ -0,0 +1,603 @@ +#!/usr/bin/env/python3 +""" +Recipe for downloading DNS-4 dataset- training, +baseline DEV noisyset, blind testset +Source: +https://github.com/microsoft/DNS-Challenge +https://github.com/microsoft/DNS-Challenge/blob/master/download-dns-challenge-4.sh + +Disk-space (compressed): 550 GB +Disk-space (decompressed): 1 TB + +NOTE: + 1. Some of the azure links provided by Microsoft are not perfect and data + download may stop mid-way through the download process. Hence we validate + download size of each of the file. + 2. Instead of using the impulse response files provided in the challenge, + we opt to download them from OPENSLR. OPENSLR offers both real and synthetic + RIRs, while the challenge offers only real RIRs. + +Authors + * Sangeet Sagar 2022 +""" + +import argparse +import fileinput +import os +import shutil +import ssl +import tarfile +import urllib.request +import zipfile +from concurrent.futures import ThreadPoolExecutor + +import certifi +import requests +from tqdm.auto import tqdm + +BLOB_NAMES = [ + "clean_fullband/datasets_fullband.clean_fullband.VocalSet_48kHz_mono_000_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.emotional_speech_000_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_000_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_001_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_002_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_003_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_004_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_005_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_006_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_007_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.french_speech_008_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_000_0.00_3.47.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_001_3.47_3.64.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_002_3.64_3.74.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_003_3.74_3.81.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_004_3.81_3.86.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_005_3.86_3.91.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_006_3.91_3.96.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_007_3.96_4.00.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_008_4.00_4.04.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_009_4.04_4.08.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_010_4.08_4.12.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_011_4.12_4.16.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_012_4.16_4.21.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_013_4.21_4.26.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_014_4.26_4.33.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_015_4.33_4.43.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_016_4.43_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_017_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_018_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_019_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_020_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_021_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_022_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_023_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_024_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_025_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_026_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_027_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_028_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_029_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_030_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_031_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_032_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_033_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_034_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_035_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_036_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_037_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_038_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_039_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_040_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_041_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.german_speech_042_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.italian_speech_000_0.00_3.98.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.italian_speech_001_3.98_4.21.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.italian_speech_002_4.21_4.40.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.italian_speech_003_4.40_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.italian_speech_004_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.italian_speech_005_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_000_0.00_3.75.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_001_3.75_3.88.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_002_3.88_3.96.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_003_3.96_4.02.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_004_4.02_4.06.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_005_4.06_4.10.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_006_4.10_4.13.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_007_4.13_4.16.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_008_4.16_4.19.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_009_4.19_4.21.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_010_4.21_4.24.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_011_4.24_4.26.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_012_4.26_4.29.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_013_4.29_4.31.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_014_4.31_4.33.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_015_4.33_4.35.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_016_4.35_4.38.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_017_4.38_4.40.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_018_4.40_4.42.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_019_4.42_4.45.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_020_4.45_4.48.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_021_4.48_4.52.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_022_4.52_4.57.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_023_4.57_4.67.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_024_4.67_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_025_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_026_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_027_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_028_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_029_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_030_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_031_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_032_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_033_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_034_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_035_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_036_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_037_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_038_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.read_speech_039_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.russian_speech_000_0.00_4.31.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.russian_speech_001_4.31_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_000_0.00_4.09.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_001_4.09_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_002_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_003_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_004_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_005_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_006_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_007_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.spanish_speech_008_NA_NA.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.vctk_wav48_silence_trimmed_000.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.vctk_wav48_silence_trimmed_001.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.vctk_wav48_silence_trimmed_002.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.vctk_wav48_silence_trimmed_003.tar.bz2", + "clean_fullband/datasets_fullband.clean_fullband.vctk_wav48_silence_trimmed_004.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.audioset_000.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.audioset_001.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.audioset_002.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.audioset_003.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.audioset_004.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.audioset_005.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.audioset_006.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.freesound_000.tar.bz2", + "noise_fullband/datasets_fullband.noise_fullband.freesound_001.tar.bz2", + "datasets_fullband.dev_testset_000.tar.bz2", +] + +AZURE_URL = ( + "https://dns4public.blob.core.windows.net/dns4archive/datasets_fullband" # noqa ignore-url-check +) + +# Impulse response and Blind testset +OTHER_URLS = { + "impulse_responses": [ + "https://www.openslr.org/resources/26/sim_rir_16k.zip", + "https://www.openslr.org/resources/28/rirs_noises.zip", + ], + "blind_testset": [ + "https://dns4public.blob.core.windows.net/dns4archive/blind_testset_bothtracks.zip" + ], +} + +RIR_table_simple_URL = "https://raw.githubusercontent.com/microsoft/DNS-Challenge/0443a12f5e6e7bec310f453cf0d9637ca28e0eea/datasets/acoustic_params/RIR_table_simple.csv" + +SPLIT_LIST = [ + "dev_testset", + "impulse_responses", + "noise_fullband", + "emotional_speech", + "french_speech", + "german_speech", + "italian_speech", + "read_speech", + "russian_speech", + "spanish_speech", + "vctk_wav48_silence_trimmed", + "VocalSet_48kHz_mono", +] + + +def prepare_download(): + """ + Downloads and prepares various data files and resources. It + downloads real-time DNS track data files (train set and dev + noisy set). + """ + # Real-time DNS track (train set + dev noisy set) + for file_url in BLOB_NAMES: + for split in SPLIT_LIST: + if split in file_url: + split_name = split + + split_path = os.path.join(COMPRESSED_PATH, split_name) + if not os.path.exists(split_path): + os.makedirs(split_path) + if not os.path.exists(DECOMPRESSED_PATH): + os.makedirs(DECOMPRESSED_PATH) + + filename = file_url.split("/")[-1] + download_path = os.path.join(split_path, filename) + download_url = AZURE_URL + "/" + file_url + + if not validate_file(download_url, download_path): + if os.path.exists(download_path): + resume_byte_pos = os.path.getsize(download_path) + else: + resume_byte_pos = None + + download_file( + download_url, + download_path, + split_name, + filename, + resume_byte_pos=resume_byte_pos, + ) + else: + print(", \tDownload complete. Skipping") + decompress_file(download_path, DECOMPRESSED_PATH, split_name) + + # Download RIR (impulse response) & BLIND testset + rir_blind_test_download() + + +def rir_blind_test_download(): + """ + Download the RIRs (room impulse responses), and the blind + test set. + """ + # RIR (impulse response) & BLIND testset + for split_name, download_urls in OTHER_URLS.items(): + for file_url in download_urls: + split_path = os.path.join(COMPRESSED_PATH, split_name) + if not os.path.exists(split_path): + os.makedirs(split_path) + + filename = file_url.split("/")[-1] + download_path = os.path.join(split_path, filename) + + if not validate_file(file_url, download_path): + if os.path.exists(download_path): + resume_byte_pos = os.path.getsize(download_path) + else: + resume_byte_pos = None + + download_file( + file_url, + download_path, + split_name, + filename, + resume_byte_pos=resume_byte_pos, + ) + else: + print(", \tDownload complete. Skipping") + decompress_file( + download_path, + os.path.join(DECOMPRESSED_PATH, split_name), + split_name, + ) + + # Download RIRs simple table + file_path = os.path.join( + DECOMPRESSED_PATH, "impulse_responses", "RIR_table_simple.csv" + ) + response = requests.get(RIR_table_simple_URL) + if response.status_code == 200: + with open(file_path, "wb") as file: + file.write(response.content) + print("\nRIR_simple_table downloaded successfully.") + + else: + print( + f"\nFailed to download RIR_simple_table. Status code: {response.status_code}" + ) + + +def download_file( + download_url, download_path, split_name, filename, resume_byte_pos=None +): + """ + Download file from given URL + + Arguments + --------- + download_url : str + URL of file being downloaded + download_path : str + Full path of the file that is to be downloaded + (or already downloaded) + split_name : str + Split name of the file being downloaded + e.g. read_speech + filename : str + Filename of the file being downloaded + resume_byte_pos: (int, optional) + Starting byte position for resuming the download. + Default is None, which means a fresh download. + + Returns + ------- + bool + If True, the file need not be downloaded again. + Else the download might have failed or is incomplete. + """ + print("Downloading:", split_name, "=>", filename) + resume_header = ( + {"Range": f"bytes={resume_byte_pos}-"} if resume_byte_pos else None + ) + response = requests.get(download_url, headers=resume_header, stream=True) + file_size = int(response.headers.get("Content-Length")) + + mode = "ab" if resume_byte_pos else "wb" + initial_pos = resume_byte_pos if resume_byte_pos else 0 + + with open(download_path, mode, encoding="utf-8") as f: + with tqdm( + total=file_size, + unit="B", + unit_scale=True, + unit_divisor=1024, + initial=initial_pos, + miniters=1, + ) as pbar: + for chunk in response.iter_content(32 * 1024): + f.write(chunk) + pbar.update(len(chunk)) + + # Validate downloaded file + if validate_file(download_url, download_path): + return True + else: + print("Download failed. Moving on.") + return False + + +def download_file_parallel(args): + """ + Downloads a file in parallel using the provided arguments. It + makes use of `download_file` function to download the required file. + + Arguments + --------- + args : tuple + Tuple containing the download URL, download path, split + name, filename, and required bytes to be downloaded. + """ + download_url, download_path, split_name, filename, resume_byte_pos = args + download_file( + download_url, + download_path, + split_name, + filename, + resume_byte_pos=resume_byte_pos, + ) + + +def parallel_download(): + """ + Perform parallel download of files using `using ThreadPoolExecutor`. + """ + with ThreadPoolExecutor() as executor: + futures = [] + for file_url in BLOB_NAMES: + for split in SPLIT_LIST: + if split in file_url: + split_name = split + split_path = os.path.join(COMPRESSED_PATH, split_name) + if not os.path.exists(split_path): + os.makedirs(split_path) + if not os.path.exists(DECOMPRESSED_PATH): + os.makedirs(DECOMPRESSED_PATH) + + filename = file_url.split("/")[-1] + download_path = os.path.join(split_path, filename) + download_url = AZURE_URL + "/" + file_url + + if not validate_file(download_url, download_path): + if os.path.exists(download_path): + resume_byte_pos = os.path.getsize(download_path) + else: + resume_byte_pos = None + args = ( + download_url, + download_path, + split_name, + filename, + resume_byte_pos, + ) + futures.append(executor.submit(download_file_parallel, args)) + # download_file(download_url, download_path, split_name, filename) + # decompress_file(download_path, DECOMPRESSED_PATH) + else: + print(", \tDownload complete. Skipping") + decompress_file(download_path, DECOMPRESSED_PATH, split_name) + + for future in futures: + future.result() + + # Download RIR (impulse response) & BLIND testset + rir_blind_test_download() + + +def decompress_file(file, decompress_path, split_name): + """ + Decompress the downloaded file if the target folder does not exist. + + Arguments + --------- + file : str + Path to the compressed downloaded file + decompress_path : str + Path to store the decompressed audio files + split_name : str + The portion of the data to decompress + + Returns + ------- + True if decompression skipped. + """ + for _, dirs, _ in os.walk(decompress_path): + if split_name in dirs: + print("\tDecompression skipped. Folder already exists.") + return True + + if "sim_rir_16k" in file: + slr26_dir = os.path.join(decompress_path, "SLR26") + if os.path.exists(slr26_dir): + print("\tDecompression skipped. Folder already exists.") + return True + + if "rirs_noises" in file: + slr28_dir = os.path.join(decompress_path, "SLR28") + if os.path.exists(slr28_dir): + print("\tDecompression skipped. Folder already exists.") + return True + + print("\tDecompressing...") + file_extension = os.path.splitext(file)[-1].lower() + if file_extension == ".zip": + zip = zipfile.ZipFile(file, "r") + zip.extractall(decompress_path) + rename_rirs(decompress_path) + + elif file_extension == ".bz2": + tar = tarfile.open(file, "r:bz2") + tar.extractall(decompress_path) + tar.close() + else: + print("Unsupported file format. Only zip and bz2 files are supported.") + + +def rename_rirs(decompress_path): + """ + Rename directories containing simulated room impulse responses + (RIRs). + + Arguments + --------- + decompress_path : str + The path to the directory containing the RIRs + """ + try: + os.rename( + os.path.join(decompress_path, "simulated_rirs_16k"), + os.path.join(decompress_path, "SLR26"), + ) + except Exception: + pass + try: + os.rename( + os.path.join(decompress_path, "RIRS_NOISES"), + os.path.join(decompress_path, "SLR28"), + ) + except Exception: + pass + + +def validate_file(download_url, download_path): + """ + Validate the downloaded file and resume the download if needed. + + Arguments + --------- + download_url : str + URL of the file being downloaded + download_path : str + Full path of the file that is to be downloaded + (or already downloaded) + + Returns + ------- + bool + If True, the file need not be downloaded again. + Else, either the file is not yet downloaded or + partially downloaded, thus resume the download. + """ + if not os.path.isfile(download_path): + # File not yet downloaded + return False + + # Get file size in MB + actual_size = urllib.request.urlopen( + download_url, + context=ssl.create_default_context(cafile=certifi.where()), + ).length + + download_size = os.path.getsize(download_path) + + print( + "File: {}, \t downloaded {} MB out of {} MB".format( + download_path.split("/")[-1], + download_size // (1024 * 1024), + actual_size // (1024 * 1024), + ), + end="", + ) + # Set a margin of 100 MB. We skip re-downloading the file if downloaded + # size differs from actual size by max 100 MB. More than this margin, + # re-download is to attempted. + if actual_size - download_size < 100 * 1024 * 1024: + return True + else: + print(", \tIncomplete download. Resuming...") + return False + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Download and extract DNS dataset." + ) + parser.add_argument( + "--compressed_path", + type=str, + default="DNS-compressed", + help="Path to store the compressed data.", + ) + parser.add_argument( + "--decompressed_path", + type=str, + default="DNS-dataset", + help="Path to store the decompressed data.", + ) + + parser.add_argument( + "--parallel_download", + action="store_true", + help="Use parallel download.", + ) + + args = parser.parse_args() + + COMPRESSED_PATH = args.compressed_path + DECOMPRESSED_PATH = args.decompressed_path + + if args.parallel_download: + parallel_download() + else: + prepare_download() + + # Modify contents inside RIR_simple_table.csv + file_path = os.path.join( + DECOMPRESSED_PATH, "impulse_responses", "RIR_table_simple.csv" + ) + full_path = os.path.abspath(os.path.dirname(file_path)) + + replacements = { + "datasets/impulse_responses/SLR26/simulated_rirs_16k": os.path.join( + full_path, "SLR26" + ), + "datasets/impulse_responses/SLR28/RIRS_NOISES": os.path.join( + full_path, "SLR28" + ), + } + + # Perform the replacements directly in the file using fileinput module + with fileinput.FileInput(file_path, inplace=True) as file: + for line in file: + for original, replacement in replacements.items(): + line = line.replace(original, replacement) + print(line, end="") + + if not os.path.exists( + os.path.join("noisyspeech_synthesizer", "RIR_table_simple.csv") + ): + shutil.move(file_path, "noisyspeech_synthesizer") diff --git a/recipes/DNS/enhancement/README.md b/recipes/DNS/enhancement/README.md new file mode 100644 index 0000000000..8ef4c2c45e --- /dev/null +++ b/recipes/DNS/enhancement/README.md @@ -0,0 +1,83 @@ +# **Speech enhancement with Microsoft DNS dataset** +This folder contains the recipe for speech enhancement on Deep Noise Suppression (DNS) Challenge 4 (ICASSP 2022) dataset using SepFormer. + +For data download and prepration, please refer to the `README.md` in `recipes/DNS/` + +## **Start training** +``` +python train.py hparams/sepformer-dns-16k.yaml --data_folder --baseline_noisy_shards_folder +``` +## **DNSMOS Evaluation on baseline-testclips** +*Reference: [Official repo](https://github.com/microsoft/DNS-Challenge/tree/master/DNSMOS)
* +Download the evaluation models from [Official repo](https://github.com/microsoft/DNS-Challenge/tree/master/DNSMOS) and save it under `DNSMOS`. Then, to run DNSMOS evaluation on the baseline-testclips saved in the above step. +``` +# Model=SepFormer +python dnsmos_local.py -t results/sepformer-enhancement-16k/1234/save/baseline_audio_results/enhanced_testclips/ -o dnsmos_enhance.csv + +# Model=Noisy +python dnsmos_local.py -t -o dnsmos_noisy.csv +``` + +## **Results** +1. The DNS challenge doesn't provide the ground-truth clean files for dev test. Therefore, we randomly separate out 5% of training set as valid set so that we can compute valid stats like Si-SNR and PESQ during validation. Here we show validation performance. + + | Sampling rate | Valid Si-SNR | Valid PESQ | HuggingFace link | Full Model link | + |---------------|--------------|------------|-------------------|------------| + | 16k | -10.6 | 2.06 | [HuggingFace](https://huggingface.co/speechbrain/sepformer-dns4-16k-enhancement) | https://www.dropbox.com/sh/d3rp5d3gjysvy7c/AACmwcEkm_IFvaW1lt2GdtQka?dl=0 | + +2. Evaluation on DNS4 2022 baseline dev set using DNSMOS. + + | Model | SIG | BAK | OVRL | + |------------|--------|--------|--------| + | Noisy | 2.984 | 2.560 | 2.205 | + | Baseline: NSNet2| 3.014 | 3.942 | 2.712 | + | **SepFormer** | 2.999 | 3.076 | 2.437 | + +We performed 45 epochs of training for the enhancement using an 8 X RTXA6000 48GB GPU. On average, each epoch took approximately 9.25 hours to complete. **Consider training it for atleast 90-100 epochs for superior performance.** + +**NOTE** +- Refer [NSNet2](https://github.com/microsoft/DNS-Challenge/tree/5582dcf5ba43155621de72a035eb54a7d233af14/NSNet2-baseline) on how to perform enhancement on baseline dev set (noisy testclips) using the baseline model- NSNet2. + +## **Computing power** +Kindly be aware that in terms of computational power, training can be extremely resource demanding due to the dataset's large size and the complexity of the SepFormer model. To handle the size of 1300 hours of clean-noisy pairs, we employed a multi-GPU distributed data-parallel (DDP) training scheme on an Nvidia 8 X RTXA6000 48GB GPU. The training process lasted for 17 days, for just 45 epochs. + +## **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +## **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` + + +**Citing SepFormer** +```bibtex +@inproceedings{subakan2021attention, + title={Attention is All You Need in Speech Separation}, + author={Cem Subakan and Mirco Ravanelli and Samuele Cornell and Mirko Bronzi and Jianyuan Zhong}, + year={2021}, + booktitle={ICASSP 2021} +} +``` diff --git a/recipes/DNS/enhancement/composite_eval.py b/recipes/DNS/enhancement/composite_eval.py new file mode 100644 index 0000000000..7f90fd0f2e --- /dev/null +++ b/recipes/DNS/enhancement/composite_eval.py @@ -0,0 +1,465 @@ +"""Composite objective enhancement scores in Python (CSIG, CBAK, COVL) + +Taken from https://github.com/facebookresearch/denoiser/blob/master/scripts/matlab_eval.py + +Authors + * adiyoss (https://github.com/adiyoss) +""" + +import os +import sys + +import librosa +import numpy as np +from pesq import pesq +from scipy.linalg import toeplitz +from tqdm import tqdm + + +def eval_composite(ref_wav, deg_wav, sample_rate): + """Evaluate audio quality metrics based on reference + and degraded audio signals. + This function computes various audio quality metrics, + including PESQ, CSIG, CBAK, and COVL, based on the + reference and degraded audio signals provided. + """ + ref_wav = ref_wav.reshape(-1) + deg_wav = deg_wav.reshape(-1) + + alpha = 0.95 + len_ = min(ref_wav.shape[0], deg_wav.shape[0]) + ref_wav = ref_wav[:len_] + deg_wav = deg_wav[:len_] + + # Compute WSS measure + wss_dist_vec = wss(ref_wav, deg_wav, sample_rate) + wss_dist_vec = sorted(wss_dist_vec, reverse=False) + wss_dist = np.mean(wss_dist_vec[: int(round(len(wss_dist_vec) * alpha))]) + + # Compute LLR measure + LLR_dist = llr(ref_wav, deg_wav, sample_rate) + LLR_dist = sorted(LLR_dist, reverse=False) + LLRs = LLR_dist + LLR_len = round(len(LLR_dist) * alpha) + llr_mean = np.mean(LLRs[:LLR_len]) + + # Compute the SSNR + snr_mean, segsnr_mean = SSNR(ref_wav, deg_wav, sample_rate) + segSNR = np.mean(segsnr_mean) + + # Compute the PESQ + pesq_raw = PESQ(ref_wav, deg_wav, sample_rate) + + Csig = 3.093 - 1.029 * llr_mean + 0.603 * pesq_raw - 0.009 * wss_dist + Csig = trim_mos(Csig) + Cbak = 1.634 + 0.478 * pesq_raw - 0.007 * wss_dist + 0.063 * segSNR + Cbak = trim_mos(Cbak) + Covl = 1.594 + 0.805 * pesq_raw - 0.512 * llr_mean - 0.007 * wss_dist + Covl = trim_mos(Covl) + + return {"pesq": pesq_raw, "csig": Csig, "cbak": Cbak, "covl": Covl} + + +# ----------------------------- HELPERS ------------------------------------ # +def trim_mos(val): + """Trim a value to be within the MOS (Mean Opinion Score) + range [1, 5]. + """ + return min(max(val, 1), 5) + + +def lpcoeff(speech_frame, model_order): + """Calculate linear prediction (LP) coefficients using + the autocorrelation method. + """ + # (1) Compute Autocor lags + winlength = speech_frame.shape[0] + R = [] + for k in range(model_order + 1): + first = speech_frame[: (winlength - k)] + second = speech_frame[k:winlength] + R.append(np.sum(first * second)) + + # (2) Lev-Durbin + a = np.ones((model_order,)) + E = np.zeros((model_order + 1,)) + rcoeff = np.zeros((model_order,)) + E[0] = R[0] + for i in range(model_order): + if i == 0: + sum_term = 0 + else: + a_past = a[:i] + sum_term = np.sum(a_past * np.array(R[i:0:-1])) + rcoeff[i] = (R[i + 1] - sum_term) / E[i] + a[i] = rcoeff[i] + if i > 0: + a[:i] = a_past[:i] - rcoeff[i] * a_past[::-1] + E[i + 1] = (1 - rcoeff[i] * rcoeff[i]) * E[i] + acorr = np.array(R, dtype=np.float32) + refcoeff = np.array(rcoeff, dtype=np.float32) + a = a * -1 + lpparams = np.array([1] + list(a), dtype=np.float32) + acorr = np.array(acorr, dtype=np.float32) + refcoeff = np.array(refcoeff, dtype=np.float32) + lpparams = np.array(lpparams, dtype=np.float32) + + return acorr, refcoeff, lpparams + + +# -------------------------------------------------------------------------- # + + +# ---------------------- Speech Quality Metric ----------------------------- # +def PESQ(ref_wav, deg_wav, sample_rate): + """Compute PESQ score.""" + psq_mode = "wb" if sample_rate == 16000 else "nb" + return pesq(sample_rate, ref_wav, deg_wav, psq_mode) + + +def SSNR(ref_wav, deg_wav, srate=16000, eps=1e-10): + """Segmental Signal-to-Noise Ratio Objective Speech Quality Measure + This function implements the segmental signal-to-noise ratio + as defined in [1, p. 45] (see Equation 2.12). + """ + clean_speech = ref_wav + processed_speech = deg_wav + clean_length = ref_wav.shape[0] + + # scale both to have same dynamic range. Remove DC too. + clean_speech -= clean_speech.mean() + processed_speech -= processed_speech.mean() + processed_speech *= np.max(np.abs(clean_speech)) / np.max( + np.abs(processed_speech) + ) + + # Signal-to-Noise Ratio + dif = ref_wav - deg_wav + overall_snr = 10 * np.log10(np.sum(ref_wav**2) / (np.sum(dif**2) + 10e-20)) + # global variables + winlength = int(np.round(30 * srate / 1000)) # 30 msecs + skiprate = winlength // 4 + MIN_SNR = -10 + MAX_SNR = 35 + + # For each frame, calculate SSNR + num_frames = int(clean_length / skiprate - (winlength / skiprate)) + start = 0 + time = np.linspace(1, winlength, winlength) / (winlength + 1) + window = 0.5 * (1 - np.cos(2 * np.pi * time)) + segmental_snr = [] + + for frame_count in range(int(num_frames)): + # (1) get the frames for the test and ref speech. + # Apply Hanning Window + clean_frame = clean_speech[start : start + winlength] + processed_frame = processed_speech[start : start + winlength] + clean_frame = clean_frame * window + processed_frame = processed_frame * window + + # (2) Compute Segmental SNR + signal_energy = np.sum(clean_frame**2) + noise_energy = np.sum((clean_frame - processed_frame) ** 2) + segmental_snr.append( + 10 * np.log10(signal_energy / (noise_energy + eps) + eps) + ) + segmental_snr[-1] = max(segmental_snr[-1], MIN_SNR) + segmental_snr[-1] = min(segmental_snr[-1], MAX_SNR) + start += int(skiprate) + return overall_snr, segmental_snr + + +def wss(ref_wav, deg_wav, srate): + """Calculate Weighted Spectral Slope (WSS) distortion + measure between reference and degraded audio signals. + This function computes the WSS distortion measure using + critical band filters and spectral slope differences. + """ + clean_speech = ref_wav + processed_speech = deg_wav + clean_length = ref_wav.shape[0] + processed_length = deg_wav.shape[0] + + assert clean_length == processed_length, clean_length + + winlength = round(30 * srate / 1000.0) # 240 wlen in samples + skiprate = np.floor(winlength / 4) + max_freq = srate / 2 + num_crit = 25 # num of critical bands + + n_fft = int(2 ** np.ceil(np.log(2 * winlength) / np.log(2))) + n_fftby2 = int(n_fft / 2) + Kmax = 20 + Klocmax = 1 + + # Critical band filter definitions (Center frequency and BW in Hz) + cent_freq = [ + 50.0, + 120, + 190, + 260, + 330, + 400, + 470, + 540, + 617.372, + 703.378, + 798.717, + 904.128, + 1020.38, + 1148.30, + 1288.72, + 1442.54, + 1610.70, + 1794.16, + 1993.93, + 2211.08, + 2446.71, + 2701.97, + 2978.04, + 3276.17, + 3597.63, + ] + bandwidth = [ + 70.0, + 70, + 70, + 70, + 70, + 70, + 70, + 77.3724, + 86.0056, + 95.3398, + 105.411, + 116.256, + 127.914, + 140.423, + 153.823, + 168.154, + 183.457, + 199.776, + 217.153, + 235.631, + 255.255, + 276.072, + 298.126, + 321.465, + 346.136, + ] + + bw_min = bandwidth[0] # min critical bandwidth + + # set up critical band filters. Note here that Gaussianly shaped filters + # are used. Also, the sum of the filter weights are equivalent for each + # critical band filter. Filter less than -30 dB and set to zero. + min_factor = np.exp(-30.0 / (2 * 2.303)) # -30 dB point of filter + + crit_filter = np.zeros((num_crit, n_fftby2)) + all_f0 = [] + for i in range(num_crit): + f0 = (cent_freq[i] / max_freq) * (n_fftby2) + all_f0.append(np.floor(f0)) + bw = (bandwidth[i] / max_freq) * (n_fftby2) + norm_factor = np.log(bw_min) - np.log(bandwidth[i]) + j = list(range(n_fftby2)) + crit_filter[i, :] = np.exp( + -11 * (((j - np.floor(f0)) / bw) ** 2) + norm_factor + ) + crit_filter[i, :] = crit_filter[i, :] * (crit_filter[i, :] > min_factor) + + # For each frame of input speech, compute Weighted Spectral Slope Measure + num_frames = int(clean_length / skiprate - (winlength / skiprate)) + start = 0 # starting sample + time = np.linspace(1, winlength, winlength) / (winlength + 1) + window = 0.5 * (1 - np.cos(2 * np.pi * time)) + distortion = [] + + for frame_count in range(num_frames): + # (1) Get the Frames for the test and reference speech. + # Multiply by Hanning window. + clean_frame = clean_speech[start : start + winlength] + processed_frame = processed_speech[start : start + winlength] + clean_frame = clean_frame * window + processed_frame = processed_frame * window + + # (2) Compute Power Spectrum of clean and processed + clean_spec = np.abs(np.fft.fft(clean_frame, n_fft)) ** 2 + processed_spec = np.abs(np.fft.fft(processed_frame, n_fft)) ** 2 + clean_energy = [None] * num_crit + processed_energy = [None] * num_crit + + # (3) Compute Filterbank output energies (in dB) + for i in range(num_crit): + clean_energy[i] = np.sum(clean_spec[:n_fftby2] * crit_filter[i, :]) + processed_energy[i] = np.sum( + processed_spec[:n_fftby2] * crit_filter[i, :] + ) + clean_energy = np.array(clean_energy).reshape(-1, 1) + eps = np.ones((clean_energy.shape[0], 1)) * 1e-10 + clean_energy = np.concatenate((clean_energy, eps), axis=1) + clean_energy = 10 * np.log10(np.max(clean_energy, axis=1)) + processed_energy = np.array(processed_energy).reshape(-1, 1) + processed_energy = np.concatenate((processed_energy, eps), axis=1) + processed_energy = 10 * np.log10(np.max(processed_energy, axis=1)) + + # (4) Compute Spectral Shape (dB[i+1] - dB[i]) + clean_slope = clean_energy[1:num_crit] - clean_energy[: num_crit - 1] + processed_slope = ( + processed_energy[1:num_crit] - processed_energy[: num_crit - 1] + ) + + # (5) Find the nearest peak locations in the spectra to each + # critical band. If the slope is negative, we search + # to the left. If positive, we search to the right. + clean_loc_peak = [] + processed_loc_peak = [] + for i in range(num_crit - 1): + if clean_slope[i] > 0: + # search to the right + n = i + while n < num_crit - 1 and clean_slope[n] > 0: + n += 1 + clean_loc_peak.append(clean_energy[n - 1]) + else: + # search to the left + n = i + while n >= 0 and clean_slope[n] <= 0: + n -= 1 + clean_loc_peak.append(clean_energy[n + 1]) + # find the peaks in the processed speech signal + if processed_slope[i] > 0: + n = i + while n < num_crit - 1 and processed_slope[n] > 0: + n += 1 + processed_loc_peak.append(processed_energy[n - 1]) + else: + n = i + while n >= 0 and processed_slope[n] <= 0: + n -= 1 + processed_loc_peak.append(processed_energy[n + 1]) + + # (6) Compute the WSS Measure for this frame. This includes + # determination of the weighting function + dBMax_clean = max(clean_energy) + dBMax_processed = max(processed_energy) + + # The weights are calculated by averaging individual + # weighting factors from the clean and processed frame. + # These weights W_clean and W_processed should range + # from 0 to 1 and place more emphasis on spectral + # peaks and less emphasis on slope differences in spectral + # valleys. This procedure is described on page 1280 of + # Klatt's 1982 ICASSP paper. + clean_loc_peak = np.array(clean_loc_peak) + processed_loc_peak = np.array(processed_loc_peak) + Wmax_clean = Kmax / (Kmax + dBMax_clean - clean_energy[: num_crit - 1]) + Wlocmax_clean = Klocmax / ( + Klocmax + clean_loc_peak - clean_energy[: num_crit - 1] + ) + W_clean = Wmax_clean * Wlocmax_clean + Wmax_processed = Kmax / ( + Kmax + dBMax_processed - processed_energy[: num_crit - 1] + ) + Wlocmax_processed = Klocmax / ( + Klocmax + processed_loc_peak - processed_energy[: num_crit - 1] + ) + W_processed = Wmax_processed * Wlocmax_processed + W = (W_clean + W_processed) / 2 + distortion.append( + np.sum( + W + * ( + clean_slope[: num_crit - 1] + - processed_slope[: num_crit - 1] + ) + ** 2 + ) + ) + + # this normalization is not part of Klatt's paper, but helps + # to normalize the measure. Here we scale the measure by the sum of the + # weights + distortion[frame_count] = distortion[frame_count] / np.sum(W) + start += int(skiprate) + return distortion + + +def llr(ref_wav, deg_wav, srate): + """Calculate Log Likelihood Ratio (LLR) distortion measure + between reference and degraded audio signals. This function + computes the LLR distortion measure between reference and + degraded audio signals using LPC analysis and autocorrelation + logs. + """ + clean_speech = ref_wav + processed_speech = deg_wav + clean_length = ref_wav.shape[0] + processed_length = deg_wav.shape[0] + assert clean_length == processed_length, clean_length + + winlength = round(30 * srate / 1000.0) # 240 wlen in samples + skiprate = np.floor(winlength / 4) + if srate < 10000: + # LPC analysis order + P = 10 + else: + P = 16 + + # For each frame of input speech, calculate the Log Likelihood Ratio + num_frames = int(clean_length / skiprate - (winlength / skiprate)) + start = 0 + time = np.linspace(1, winlength, winlength) / (winlength + 1) + window = 0.5 * (1 - np.cos(2 * np.pi * time)) + distortion = [] + + for frame_count in range(num_frames): + # (1) Get the Frames for the test and reference speech. + # Multiply by Hanning window. + clean_frame = clean_speech[start : start + winlength] + processed_frame = processed_speech[start : start + winlength] + clean_frame = clean_frame * window + processed_frame = processed_frame * window + + # (2) Get the autocorrelation logs and LPC params used + # to compute the LLR measure + R_clean, Ref_clean, A_clean = lpcoeff(clean_frame, P) + R_processed, Ref_processed, A_processed = lpcoeff(processed_frame, P) + A_clean = A_clean[None, :] + A_processed = A_processed[None, :] + + # (3) Compute the LLR measure + numerator = A_processed.dot(toeplitz(R_clean)).dot(A_processed.T) + denominator = A_clean.dot(toeplitz(R_clean)).dot(A_clean.T) + + if (numerator / denominator) <= 0: + print(f"Numerator: {numerator}") + print(f"Denominator: {denominator}") + + log_ = np.log(numerator / denominator) + distortion.append(np.squeeze(log_)) + start += int(skiprate) + return np.nan_to_num(np.array(distortion)) + + +# -------------------------------------------------------------------------- # + +if __name__ == "__main__": + clean_path = sys.argv[1] + enhanced_path = sys.argv[2] + csig, cbak, covl, count = 0, 0, 0, 0 + for _file in tqdm(os.listdir(clean_path)): + if _file.endswith("wav"): + clean_path_f = os.path.join(clean_path, _file) + enhanced_path_f = os.path.join( + enhanced_path, _file[:-4] + "_enhanced.wav" + ) + clean_sig = librosa.load(clean_path_f, sr=None)[0] + enhanced_sig = librosa.load(enhanced_path_f, sr=None)[0] + res = eval_composite(clean_sig, enhanced_sig) + csig += res["csig"] + cbak += res["cbak"] + covl += res["covl"] + pesq += res["pesq"] + count += 1 + print(f"CSIG: {csig / count}, CBAK: {cbak / count}, COVL: {covl / count}") diff --git a/recipes/DNS/enhancement/dnsmos_local.py b/recipes/DNS/enhancement/dnsmos_local.py new file mode 100644 index 0000000000..1bda5285ac --- /dev/null +++ b/recipes/DNS/enhancement/dnsmos_local.py @@ -0,0 +1,191 @@ +""" +Usage: + python dnsmos_local.py -t path/to/sepformer_enhc_clips -o dnsmos_enhance.csv + +Ownership: Microsoft +""" + +import argparse +import concurrent.futures +import glob +import os + +import librosa +import numpy as np +import onnxruntime as ort +import pandas as pd +import soundfile as sf +from tqdm import tqdm + +SAMPLING_RATE = 16000 +INPUT_LENGTH = 9.01 + + +class ComputeScore: + """A class for computing MOS scores using an ONNX model and polynomial fitting.""" + + def __init__(self, primary_model_path) -> None: + self.onnx_sess = ort.InferenceSession(primary_model_path) + + def get_polyfit_val(self, sig, bak, ovr, is_personalized_MOS): + """Calculate MOS scores using polynomial fitting. + Returns a tuple containing MOS scores for speech, + background, and overall quality. + """ + # if is_personalized_MOS: + # p_ovr = np.poly1d([-0.00533021, 0.005101 , 1.18058466, -0.11236046]) + # p_sig = np.poly1d([-0.01019296, 0.02751166, 1.19576786, -0.24348726]) + # p_bak = np.poly1d([-0.04976499, 0.44276479, -0.1644611 , 0.96883132]) + # else: + p_ovr = np.poly1d([-0.06766283, 1.11546468, 0.04602535]) + p_sig = np.poly1d([-0.08397278, 1.22083953, 0.0052439]) + p_bak = np.poly1d([-0.13166888, 1.60915514, -0.39604546]) + + sig_poly = p_sig(sig) + bak_poly = p_bak(bak) + ovr_poly = p_ovr(ovr) + + return sig_poly, bak_poly, ovr_poly + + def __call__(self, fpath, sampling_rate, is_personalized_MOS): + """Compute MOS scores for an audio segment.""" + aud, input_fs = sf.read(fpath) + fs = sampling_rate + if input_fs != fs: + audio = librosa.resample(aud, input_fs, fs) + else: + audio = aud + actual_audio_len = len(audio) + len_samples = int(INPUT_LENGTH * fs) + while len(audio) < len_samples: + audio = np.append(audio, audio) + + num_hops = int(np.floor(len(audio) / fs) - INPUT_LENGTH) + 1 + hop_len_samples = fs + predicted_mos_sig_seg_raw = [] + predicted_mos_bak_seg_raw = [] + predicted_mos_ovr_seg_raw = [] + predicted_mos_sig_seg = [] + predicted_mos_bak_seg = [] + predicted_mos_ovr_seg = [] + + for idx in range(num_hops): + audio_seg = audio[ + int(idx * hop_len_samples) : int( + (idx + INPUT_LENGTH) * hop_len_samples + ) + ] + if len(audio_seg) < len_samples: + continue + + input_features = np.array(audio_seg).astype("float32")[ + np.newaxis, : + ] + oi = {"input_1": input_features} + mos_sig_raw, mos_bak_raw, mos_ovr_raw = self.onnx_sess.run( + None, oi + )[0][0] + mos_sig, mos_bak, mos_ovr = self.get_polyfit_val( + mos_sig_raw, mos_bak_raw, mos_ovr_raw, is_personalized_MOS=0 + ) + predicted_mos_sig_seg_raw.append(mos_sig_raw) + predicted_mos_bak_seg_raw.append(mos_bak_raw) + predicted_mos_ovr_seg_raw.append(mos_ovr_raw) + predicted_mos_sig_seg.append(mos_sig) + predicted_mos_bak_seg.append(mos_bak) + predicted_mos_ovr_seg.append(mos_ovr) + + clip_dict = { + "filename": fpath, + "len_in_sec": actual_audio_len / fs, + "sr": fs, + } + clip_dict["num_hops"] = num_hops + clip_dict["OVRL_raw"] = np.mean(predicted_mos_ovr_seg_raw) + clip_dict["SIG_raw"] = np.mean(predicted_mos_sig_seg_raw) + clip_dict["BAK_raw"] = np.mean(predicted_mos_bak_seg_raw) + clip_dict["OVRL"] = np.mean(predicted_mos_ovr_seg) + clip_dict["SIG"] = np.mean(predicted_mos_sig_seg) + clip_dict["BAK"] = np.mean(predicted_mos_bak_seg) + return clip_dict + + +def main(args): + models = glob.glob(os.path.join(args.testset_dir, "*")) + audio_clips_list = [] + + if args.personalized_MOS: + primary_model_path = os.path.join("pDNSMOS", "sig_bak_ovr.onnx") + else: + primary_model_path = os.path.join("DNSMOS", "sig_bak_ovr.onnx") + + compute_score = ComputeScore(primary_model_path) + + rows = [] + clips = [] + clips = glob.glob(os.path.join(args.testset_dir, "*.wav")) + is_personalized_eval = args.personalized_MOS + desired_fs = SAMPLING_RATE + for m in tqdm(models): + max_recursion_depth = 10 + audio_path = os.path.join(args.testset_dir, m) + audio_clips_list = glob.glob(os.path.join(audio_path, "*.wav")) + while len(audio_clips_list) == 0 and max_recursion_depth > 0: + audio_path = os.path.join(audio_path, "**") + audio_clips_list = glob.glob(os.path.join(audio_path, "*.wav")) + max_recursion_depth -= 1 + clips.extend(audio_clips_list) + + with concurrent.futures.ThreadPoolExecutor() as executor: + future_to_url = { + executor.submit( + compute_score, clip, desired_fs, is_personalized_eval + ): clip + for clip in clips + } + for future in tqdm(concurrent.futures.as_completed(future_to_url)): + clip = future_to_url[future] + try: + data = future.result() + except Exception as exc: + print("%r generated an exception: %s" % (clip, exc)) + else: + rows.append(data) + + df = pd.DataFrame(rows) + if args.csv_path: + csv_path = args.csv_path + df.to_csv(csv_path) + else: + print(df.describe()) + + print("======== DNSMOS scores ======== ") + print("SIG:", df.loc[:, "SIG"].mean()) + print("BAK:", df.loc[:, "BAK"].mean()) + print("OVRL:", df.loc[:, "OVRL"].mean()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-t", + "--testset_dir", + default=".", + help="Path to the dir containing audio clips in .wav to be evaluated", + ) + parser.add_argument( + "-o", + "--csv_path", + default=None, + help="Dir to the csv that saves the results", + ) + parser.add_argument( + "-p", + "--personalized_MOS", + action="store_true", + help="Flag to indicate if personalized MOS score is needed or regular", + ) + + args = parser.parse_args() + + main(args) diff --git a/recipes/DNS/enhancement/extra_requirements.txt b/recipes/DNS/enhancement/extra_requirements.txt new file mode 100644 index 0000000000..5a9141099c --- /dev/null +++ b/recipes/DNS/enhancement/extra_requirements.txt @@ -0,0 +1,8 @@ +librosa +mir_eval +onnxruntime +pesq +pyroomacoustics>=0.7.3 +pystoi +tensorboard +webdataset diff --git a/recipes/DNS/enhancement/hparams/sepformer-dns-16k.yaml b/recipes/DNS/enhancement/hparams/sepformer-dns-16k.yaml new file mode 100644 index 0000000000..78d0685194 --- /dev/null +++ b/recipes/DNS/enhancement/hparams/sepformer-dns-16k.yaml @@ -0,0 +1,203 @@ +# ################################ +# Model: SepFormer model for speech enhancement +# https://arxiv.org/abs/2010.13154 +# +# Author: Sangeet Sagar 2022 +# Dataset : Microsoft-DNS 4 +# ################################ + +# Basic parameters +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/sepformer-enhancement-16k/ +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data params +data_folder: !PLACEHOLDER # ../noisyspeech_synthesizer/synthesized_data_shards/ +train_data: !ref /train_shards/ +valid_data: !ref /valid_shards/ +baseline_noisy_shards_folder: !PLACEHOLDER # ../DNS-shards/devsets_fullband/ +baseline_shards: !ref /shard-{000000..999999}.tar + +# Set to a directory on a large disk if using Webdataset shards hosted on the web. +shard_cache_dir: + +# Basic parameters +use_tensorboard: True +tensorboard_logs: !ref /logs/ +dereverberate: False + +# Experiment params +precision: fp16 # bf16, fp16 or fp32 +test_only: False +num_spks: 1 +noprogressbar: False +save_audio: True # Save estimated sources on disk +sample_rate: 16000 +audio_length: 4 # seconds +n_audio_to_save: 20 + +####################### Training Parameters #################################### +N_epochs: 100 +batch_size: 4 +batch_size_test: 1 +lr: 0.00015 +clip_grad_norm: 5 +loss_upper_lim: 999999 # this is the upper limit for an acceptable loss +# if True, the training sequences are cut to a specified length +limit_training_signal_len: False +# this is the length of sequences if we choose to limit +# the signal length of training sequences +training_signal_len: 32000 +ckpt_interval_minutes: 60 + +# Parameters for data augmentation +use_wavedrop: False +use_speedperturb: True +use_rand_shift: False +min_shift: -8000 +max_shift: 8000 + +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + +# loss thresholding -- this thresholds the training loss +threshold_byloss: True +threshold: -30 + +# Encoder parameters +N_encoder_out: 256 +out_channels: 256 +kernel_size: 16 +kernel_stride: 8 + +# Dataloader options +dataloader_opts: + batch_size: !ref + num_workers: 3 + +dataloader_opts_valid: + batch_size: !ref + num_workers: 3 + +dataloader_opts_test: + batch_size: !ref + num_workers: 3 + +# Specifying the network +Encoder: !new:speechbrain.lobes.models.dual_path.Encoder + kernel_size: !ref + out_channels: !ref + +SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock + num_layers: 8 + d_model: !ref + nhead: 8 + d_ffn: 1024 + dropout: 0 + use_positional_encoding: True + norm_before: True + +SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock + num_layers: 8 + d_model: !ref + nhead: 8 + d_ffn: 1024 + dropout: 0 + use_positional_encoding: True + norm_before: True + +MaskNet: !new:speechbrain.lobes.models.dual_path.Dual_Path_Model + num_spks: !ref + in_channels: !ref + out_channels: !ref + num_layers: 2 + K: 250 + intra_model: !ref + inter_model: !ref + norm: ln + linear_layer_after_inter_intra: False + skip_around_intra: True + +Decoder: !new:speechbrain.lobes.models.dual_path.Decoder + in_channels: !ref + out_channels: 1 + kernel_size: !ref + stride: !ref + bias: False + +optimizer: !name:torch.optim.Adam + lr: !ref + weight_decay: 0 + +loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper + +lr_scheduler: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau + factor: 0.5 + patience: 2 + dont_halve_until_epoch: 85 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +modules: + encoder: !ref + decoder: !ref + masknet: !ref + +save_all_checkpoints: False +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + encoder: !ref + decoder: !ref + masknet: !ref + counter: !ref + lr_scheduler: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +## Uncomment to fine-tune a pre-trained model. +# pretrained_enhancement: !new:speechbrain.utils.parameter_transfer.Pretrainer +# collect_in: !ref +# loadables: +# encoder: !ref +# decoder: !ref +# masknet: !ref +# paths: +# encoder: !PLACEHOLDER +# decoder: !PLACEHOLDER +# masknet: !PLACEHOLDER diff --git a/recipes/DNS/enhancement/train.py b/recipes/DNS/enhancement/train.py new file mode 100755 index 0000000000..907c626751 --- /dev/null +++ b/recipes/DNS/enhancement/train.py @@ -0,0 +1,833 @@ +#!/usr/bin/env/python3 +"""Recipe for training a speech enhancement system on Microsoft DNS +(Deep Noise Suppression) challenge dataset using SepFormer architecture. +The system employs an encoder,a decoder, and a masking network. + +To run this recipe, do the following: +python train.py hparams/sepformer-dns-16k.yaml --data_folder --baseline_noisy_shards_folder + +The experiment file is flexible enough to support different neural +networks. By properly changing the parameter files, you can try +different architectures. + +Authors + * Sangeet Sagar 2022 + * Cem Subakan 2020 + * Mirco Ravanelli 2020 + * Samuele Cornell 2020 + * Mirko Bronzi 2020 + * Jianyuan Zhong 2020 +""" + +import csv +import glob +import json +import os +import sys +from functools import partial +from typing import Dict + +import braceexpand +import numpy as np +import torch +import torch.nn.functional as F +import webdataset as wds +from composite_eval import eval_composite +from hyperpyyaml import load_hyperpyyaml +from pesq import pesq +from pystoi import stoi +from tqdm import tqdm + +import speechbrain as sb +import speechbrain.nnet.schedulers as schedulers +from speechbrain.dataio import audio_io +from speechbrain.dataio.batch import PaddedBatch +from speechbrain.processing.features import spectral_magnitude +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger +from speechbrain.utils.metric_stats import MetricStats + + +# Define training procedure +class Enhancement(sb.Brain): + def compute_forward(self, noisy, clean, stage, noise=None): + """Forward computations from the noisy to the separated signals.""" + # Unpack lists and put tensors in the right device + noisy, noisy_lens = noisy + noisy, noisy_lens = noisy.to(self.device), noisy_lens.to(self.device) + # Convert clean to tensor + clean = clean[0].unsqueeze(-1).to(self.device) + + # Add speech distortions + if stage == sb.Stage.TRAIN: + with torch.no_grad(): + if self.hparams.use_speedperturb or self.hparams.use_rand_shift: + noisy, clean = self.add_speed_perturb(clean, noisy_lens) + + # Reverb already added, not adding any reverb + clean_rev = clean + noisy = clean.sum(-1) + # if we reverberate, we set the clean to be reverberant + if not self.hparams.dereverberate: + clean = clean_rev + + noise = noise.to(self.device) + len_noise = noise.shape[1] + len_noisy = noisy.shape[1] + min_len = min(len_noise, len_noisy) + + # add the noise + noisy = noisy[:, :min_len] + noise[:, :min_len] + + # fix the length of clean also + clean = clean[:, :min_len, :] + + if self.hparams.use_wavedrop: + noisy = self.hparams.drop_chunk(noisy, noisy_lens) + noisy = self.hparams.drop_freq(noisy) + + if self.hparams.limit_training_signal_len: + noisy, clean = self.cut_signals(noisy, clean) + + # Enhancement + if self.use_freq_domain: + noisy_w = self.compute_feats(noisy) + est_mask = self.modules.masknet(noisy_w) + + sep_h = noisy_w * est_mask + est_source = self.hparams.resynth(torch.expm1(sep_h), noisy) + else: + noisy_w = self.hparams.Encoder(noisy) + est_mask = self.modules.masknet(noisy_w) + + sep_h = noisy_w * est_mask + est_source = self.hparams.Decoder(sep_h[0]) + + # T changed after conv1d in encoder, fix it here + T_origin = noisy.size(1) + T_est = est_source.size(1) + est_source = est_source.squeeze(-1) + if T_origin > T_est: + est_source = F.pad(est_source, (0, T_origin - T_est)) + else: + est_source = est_source[:, :T_origin] + + return [est_source, sep_h], clean.squeeze(-1) + + def compute_feats(self, wavs): + """Feature computation pipeline""" + feats = self.hparams.Encoder(wavs) + feats = spectral_magnitude(feats, power=0.5) + feats = torch.log1p(feats) + return feats + + def compute_objectives(self, predictions, clean): + """Computes the si-snr loss""" + predicted_wavs, predicted_specs = predictions + + if self.use_freq_domain: + target_specs = self.compute_feats(clean) + return self.hparams.loss(target_specs, predicted_specs) + else: + return self.hparams.loss( + clean.unsqueeze(-1), predicted_wavs.unsqueeze(-1) + ) + + def fit_batch(self, batch): + """Trains one batch""" + + # Unpacking batch list + noisy = batch.noisy_sig + clean = batch.clean_sig + noise = batch.noise_sig[0] + + with self.training_ctx: + predictions, clean = self.compute_forward( + noisy, clean, sb.Stage.TRAIN, noise + ) + loss = self.compute_objectives(predictions, clean) + + # hard threshold the easy dataitems + if self.hparams.threshold_byloss: + th = self.hparams.threshold + loss_to_keep = loss[loss > th] + if loss_to_keep.nelement() > 0: + loss = loss_to_keep.mean() + else: + loss = loss.mean() + + if loss < self.hparams.loss_upper_lim and loss.nelement() > 0: + self.scaler.scale(loss).backward() + if self.hparams.clip_grad_norm >= 0: + self.scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), + self.hparams.clip_grad_norm, + ) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + self.nonfinite_count += 1 + logger.info( + f"infinite loss or empty loss! it happened {self.nonfinite_count} times so far - skipping this batch" + ) + loss.data = torch.tensor(0.0).to(self.device) + self.optimizer.zero_grad() + + return loss.detach().cpu() + + def evaluate_batch(self, batch, stage): + """Computations needed for validation/test batches""" + + snt_id = batch.id + noisy = batch.noisy_sig + clean = batch.clean_sig + + with torch.no_grad(): + predictions, clean = self.compute_forward(noisy, clean, stage) + loss = self.compute_objectives(predictions, clean) + loss = torch.mean(loss) + + if stage != sb.Stage.TRAIN: + self.pesq_metric.append( + ids=batch.id, predict=predictions[0].cpu(), target=clean.cpu() + ) + + # Manage audio file saving + if stage == sb.Stage.TEST and self.hparams.save_audio: + if hasattr(self.hparams, "n_audio_to_save"): + if self.hparams.n_audio_to_save > 0: + self.save_audio(snt_id[0], noisy, clean, predictions[0]) + self.hparams.n_audio_to_save += -1 + else: + self.save_audio(snt_id[0], noisy, clean, predictions[0]) + + return loss.detach() + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + # Define function taking (prediction, target) for parallel eval + def pesq_eval(pred_wav, target_wav): + """Computes the PESQ evaluation metric""" + psq_mode = "wb" if self.hparams.sample_rate == 16000 else "nb" + try: + return pesq( + fs=self.hparams.sample_rate, + ref=target_wav.numpy(), + deg=pred_wav.numpy(), + mode=psq_mode, + ) + except Exception: + print("pesq encountered an error for this data item") + return 0 + + self.pesq_metric = MetricStats( + metric=pesq_eval, n_jobs=1, batch_eval=False + ) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"si-snr": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stats = { + "si-snr": stage_loss, + "pesq": self.pesq_metric.summarize("average"), + } + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + # Save valid logs in torch.TensorBoard + valid_stats = { + "Epochs": epoch, + "Valid SI-SNR": stage_loss, + "Valid PESQ": self.pesq_metric.summarize("average"), + } + if self.hparams.use_tensorboard: + self.hparams.tensorboard_train_logger.log_stats(valid_stats) + + # Learning rate annealing + if isinstance( + self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau + ): + current_lr, next_lr = self.hparams.lr_scheduler( + [self.optimizer], epoch, stage_loss + ) + schedulers.update_learning_rate(self.optimizer, next_lr) + else: + # if we do not use the reducelronplateau, we do not change the lr + current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"] + + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch, "lr": current_lr}, + train_stats=self.train_stats, + valid_stats=stats, + ) + if ( + hasattr(self.hparams, "save_all_checkpoints") + and self.hparams.save_all_checkpoints + ): + self.checkpointer.save_checkpoint(meta={"pesq": stats["pesq"]}) + else: + self.checkpointer.save_and_keep_only( + meta={"pesq": stats["pesq"]}, + max_keys=["pesq"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stats, + ) + + def add_speed_perturb(self, clean, targ_lens): + """Adds speed perturbation and random_shift to the input signals""" + + min_len = -1 + recombine = False + + if self.hparams.use_speedperturb: + # Performing speed change (independently on each source) + new_clean = [] + recombine = True + + for i in range(clean.shape[-1]): + new_target = self.hparams.speed_perturb(clean[:, :, i]) + new_clean.append(new_target) + if i == 0: + min_len = new_target.shape[-1] + else: + if new_target.shape[-1] < min_len: + min_len = new_target.shape[-1] + + if self.hparams.use_rand_shift: + # Performing random_shift (independently on each source) + recombine = True + for i in range(clean.shape[-1]): + rand_shift = torch.randint( + self.hparams.min_shift, self.hparams.max_shift, (1,) + ) + new_clean[i] = new_clean[i].to(self.device) + new_clean[i] = torch.roll( + new_clean[i], shifts=(rand_shift[0],), dims=1 + ) + + # Re-combination + if recombine: + if self.hparams.use_speedperturb: + clean = torch.zeros( + clean.shape[0], + min_len, + clean.shape[-1], + device=clean.device, + dtype=torch.float, + ) + for i, new_target in enumerate(new_clean): + clean[:, :, i] = new_clean[i][:, 0:min_len] + + noisy = clean.sum(-1) + return noisy, clean + + def cut_signals(self, noisy, clean): + """This function selects a random segment of a given length within the noisy. + The corresponding clean are selected accordingly""" + randstart = torch.randint( + 0, + 1 + max(0, noisy.shape[1] - self.hparams.training_signal_len), + (1,), + ).item() + clean = clean[ + :, randstart : randstart + self.hparams.training_signal_len, : + ] + noisy = noisy[ + :, randstart : randstart + self.hparams.training_signal_len + ] + return noisy, clean + + def reset_layer_recursively(self, layer): + """Reinitializes the parameters of the neural networks""" + if hasattr(layer, "reset_parameters"): + layer.reset_parameters() + for child_layer in layer.modules(): + if layer != child_layer: + self.reset_layer_recursively(child_layer) + + def save_results(self, valid_data): + """This script calculates the SDR and SI-SNR metrics + and stores them in a CSV file. As this evaluation + method depends on a gold-standard reference signal, + it is applied exclusively to the valid set and excludes + the baseline data. + """ + # This package is required for SDR computation + from mir_eval.separation import bss_eval_sources + + # Create folders where to store audio + save_file = os.path.join( + self.hparams.output_folder, "valid_results.csv" + ) + + # Variable init + all_sdrs = [] + all_sdrs_i = [] + all_sisnrs = [] + all_sisnrs_i = [] + all_pesqs = [] + all_stois = [] + all_csigs = [] + all_cbaks = [] + all_covls = [] + csv_columns = [ + "snt_id", + "sdr", + "sdr_i", + "si-snr", + "si-snr_i", + "pesq", + "stoi", + "csig", + "cbak", + "covl", + ] + + valid_loader = sb.dataio.dataloader.make_dataloader( + valid_data, **self.hparams.dataloader_opts_test + ) + + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: + writer = csv.DictWriter(results_csv, fieldnames=csv_columns) + writer.writeheader() + + # Loop over all test sentence + with tqdm(valid_loader, dynamic_ncols=True) as t: + for i, batch in enumerate(t): + # Apply Enhancement + noisy, noisy_len = batch.noisy_sig + snt_id = batch.id + clean = batch.clean_sig + + with torch.no_grad(): + predictions, clean = self.compute_forward( + batch.noisy_sig, clean, sb.Stage.TEST + ) + + # Compute PESQ + psq_mode = ( + "wb" if self.hparams.sample_rate == 16000 else "nb" + ) + + try: + # Compute SI-SNR + sisnr = self.compute_objectives(predictions, clean) + + # Compute SI-SNR improvement + noisy_signal = noisy + + noisy_signal = noisy_signal.to(clean.device) + sisnr_baseline = self.compute_objectives( + [noisy_signal.squeeze(-1), None], clean + ) + sisnr_i = sisnr - sisnr_baseline + + # Compute SDR + sdr, _, _, _ = bss_eval_sources( + clean[0].t().cpu().numpy(), + predictions[0][0].t().detach().cpu().numpy(), + ) + + sdr_baseline, _, _, _ = bss_eval_sources( + clean[0].t().cpu().numpy(), + noisy_signal[0].t().detach().cpu().numpy(), + ) + + sdr_i = sdr.mean() - sdr_baseline.mean() + + # Compute PESQ + psq = pesq( + self.hparams.sample_rate, + clean.squeeze().cpu().numpy(), + predictions[0].squeeze().cpu().numpy(), + mode=psq_mode, + ) + # Compute STOI + stoi_score = stoi( + clean.squeeze().cpu().numpy(), + predictions[0].squeeze().cpu().numpy(), + fs_sig=self.hparams.sample_rate, + extended=False, + ) + # Compute CSIG, CBAK, COVL + composite_metrics = eval_composite( + clean.squeeze().cpu().numpy(), + predictions[0].squeeze().cpu().numpy(), + self.hparams.sample_rate, + ) + except Exception: + # this handles all sorts of error that may + # occur when evaluating an enhanced file. + continue + + # Saving on a csv file + row = { + "snt_id": snt_id[0], + "sdr": sdr.mean(), + "sdr_i": sdr_i, + "si-snr": -sisnr.item(), + "si-snr_i": -sisnr_i.item(), + "pesq": psq, + "stoi": stoi_score, + "csig": composite_metrics["csig"], + "cbak": composite_metrics["cbak"], + "covl": composite_metrics["covl"], + } + writer.writerow(row) + + # Metric Accumulation + all_sdrs.append(sdr.mean()) + all_sdrs_i.append(sdr_i.mean()) + all_sisnrs.append(-sisnr.item()) + all_sisnrs_i.append(-sisnr_i.item()) + all_pesqs.append(psq) + all_stois.append(stoi_score) + all_csigs.append(composite_metrics["csig"]) + all_cbaks.append(composite_metrics["cbak"]) + all_covls.append(composite_metrics["covl"]) + + row = { + "snt_id": "avg", + "sdr": np.array(all_sdrs).mean(), + "sdr_i": np.array(all_sdrs_i).mean(), + "si-snr": np.array(all_sisnrs).mean(), + "si-snr_i": np.array(all_sisnrs_i).mean(), + "pesq": np.array(all_pesqs).mean(), + "stoi": np.array(all_stois).mean(), + "csig": np.array(all_csigs).mean(), + "cbak": np.array(all_cbaks).mean(), + "covl": np.array(all_covls).mean(), + } + writer.writerow(row) + + logger.info(f"Mean SISNR is {np.array(all_sisnrs).mean()}") + logger.info(f"Mean SISNRi is {np.array(all_sisnrs_i).mean()}") + logger.info(f"Mean SDR is {np.array(all_sdrs).mean()}") + logger.info(f"Mean SDRi is {np.array(all_sdrs_i).mean()}") + logger.info(f"Mean PESQ {np.array(all_pesqs).mean()}") + logger.info(f"Mean STOI {np.array(all_stois).mean()}") + logger.info(f"Mean CSIG {np.array(all_csigs).mean()}") + logger.info(f"Mean CBAK {np.array(all_cbaks).mean()}") + logger.info(f"Mean COVL {np.array(all_covls).mean()}") + + def save_audio(self, snt_id, noisy, clean, predictions): + "saves the test audio (noisy, clean, and estimated sources) on disk" + print("Saving enhanced sources (valid set)") + + # Create output folders + save_path = os.path.join( + self.hparams.save_folder, "valid_audio_results" + ) + save_path_enhanced = os.path.join(save_path, "enhanced_sources") + save_path_clean = os.path.join(save_path, "clean_sources") + save_path_noisy = os.path.join(save_path, "noisy_sources") + + for path in [save_path_enhanced, save_path_clean, save_path_noisy]: + if not os.path.exists(path): + os.makedirs(path) + + # Estimated source + signal = predictions[0, :] + signal = signal / signal.abs().max() + save_file = os.path.join( + save_path_enhanced, f"item{snt_id}_sourcehat.wav" + ) + audio_io.save( + save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate + ) + + # Original source + signal = clean[0, :] + signal = signal / signal.abs().max() + save_file = os.path.join(save_path_clean, f"item{snt_id}_source.wav") + audio_io.save( + save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate + ) + + # Noisy source + signal = noisy[0][0, :] + signal = signal / signal.abs().max() + save_file = os.path.join(save_path_noisy, f"item{snt_id}_noisy.wav") + audio_io.save( + save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate + ) + + +def dataio_prep(hparams): + """Creates data processing pipeline""" + speech_dirs = [ + "read_speech", + "german_speech", + "french_speech", + "italian_speech", + "spanish_speech", + "russian_speech", + ] + audio_length = hparams["audio_length"] + + train_shard_patterns = [] + for dir in speech_dirs: + if not os.path.exists(os.path.join(hparams["train_data"], dir)): + dir = "" + shard_pattern = os.path.join(hparams["train_data"], dir, "shard-*.tar") + shard_files = glob.glob(shard_pattern) + train_shard_patterns.extend(shard_files) + + valid_shard_patterns = [] + for dir in speech_dirs: + if not os.path.exists(os.path.join(hparams["valid_data"], dir)): + dir = "" + shard_pattern = os.path.join(hparams["valid_data"], dir, "shard-*.tar") + shard_files = glob.glob(shard_pattern) + valid_shard_patterns.extend(shard_files) + + def meta_loader(split_path): + # Initialize the total number of samples + total_samples = 0 + + # Walk through the all subdirs + # eg. german_speech, read_speech, ... + for root, _, files in os.walk(split_path): + for file in files: + if file == "meta.json": + meta_json_path = os.path.join(root, file) + with open(meta_json_path, "rb") as f: + meta = json.load(f) + total_samples += meta.get("num_data_samples", 0) + + return total_samples + + def train_audio_pipeline(sample_dict: Dict, random_chunk=True): + key = sample_dict["__key__"] + clean_wav = sample_dict["clean_file"] + noise_wav = sample_dict["noise_file"] + noisy_wav = sample_dict["noisy_file"] + clean_sig = sample_dict["clean_audio.pth"].squeeze() + noise_sig = sample_dict["noise_audio.pth"].squeeze() + noisy_sig = sample_dict["noisy_audio.pth"].squeeze() + + return { + "id": key, + "clean_wav": clean_wav, + "clean_sig": clean_sig, + "noise_wav": noise_wav, + "noise_sig": noise_sig, + "noisy_wav": noisy_wav, + "noisy_sig": noisy_sig, + } + + def baseline_audio_pipeline(sample_dict: Dict, random_chunk=True): + key = sample_dict["__key__"] + noisy_sig = sample_dict["audio.pth"].squeeze() + + return { + "id": key, + "noisy_wav": key, + "noisy_sig": noisy_sig, + } + + def create_combined_dataset(shard_patterns, cache_dir): + # mix multiple datasets, where each dataset consists of multiple shards + # e.g. combine read_speech, german_speech etc. each with multiple shards. + urls = [ + url + for shard in shard_patterns + for url in braceexpand.braceexpand(shard) + ] + + combined_dataset = ( + wds.WebDataset( + urls, + shardshuffle=True, + cache_dir=cache_dir, + ) + .repeat() + .shuffle(1000) + .decode("pil") + .map(partial(train_audio_pipeline, random_chunk=True)) + ) + + return combined_dataset + + train_data = create_combined_dataset( + train_shard_patterns, hparams["shard_cache_dir"] + ) + train_samples = meta_loader(hparams["train_data"]) + logger.info(f"Training data- Number of samples: {train_samples}") + logger.info( + f"Training data - Total duration: {train_samples * audio_length / 3600:.2f} hours" + ) + + valid_data = create_combined_dataset( + valid_shard_patterns, hparams["shard_cache_dir"] + ) + valid_samples = meta_loader(hparams["valid_data"]) + logger.info(f"Valid data- Number of samples: {valid_samples}") + logger.info( + f"Valid data - Total duration: {valid_samples * audio_length / 3600:.2f} hours" + ) + + baseline_data = ( + wds.WebDataset( + hparams["baseline_shards"], + cache_dir=hparams["shard_cache_dir"], + ) + .repeat() + .shuffle(1000) + .decode("pil") + .map(partial(baseline_audio_pipeline, random_chunk=True)) + ) + + return train_data, valid_data, train_samples, valid_samples, baseline_data + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Logger info + logger = get_logger(__name__) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Update precision to bf16 if the device is CPU and precision is fp16 + if run_opts.get("device") == "cpu" and hparams.get("precision") == "fp16": + hparams["precision"] = "bf16" + + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs"] + ) + + ( + train_data, + valid_data, + num_train_samples, + num_valid_samples, + baseline_data, + ) = dataio_prep(hparams) + + # add collate_fn to dataloader options + hparams["dataloader_opts"]["collate_fn"] = PaddedBatch + hparams["dataloader_opts_valid"]["collate_fn"] = PaddedBatch + hparams["dataloader_opts_test"]["collate_fn"] = PaddedBatch + + hparams["dataloader_opts"]["looped_nominal_epoch"] = ( + num_train_samples // hparams["dataloader_opts"]["batch_size"] + ) + hparams["dataloader_opts_valid"]["looped_nominal_epoch"] = ( + num_valid_samples // hparams["dataloader_opts_valid"]["batch_size"] + ) + hparams["dataloader_opts_test"]["looped_nominal_epoch"] = ( + num_valid_samples // hparams["dataloader_opts_test"]["batch_size"] + ) + + # Load pretrained model if pretrained_enhancement is present in the yaml + if "pretrained_enhancement" in hparams: + run_on_main(hparams["pretrained_enhancement"].collect_files) + hparams["pretrained_enhancement"].load_collected() + + # Brain class initialization + enhancement = Enhancement( + modules=hparams["modules"], + opt_class=hparams["optimizer"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # re-initialize the parameters if we don't use a pretrained model + if "pretrained_enhancement" not in hparams: + for module in enhancement.modules.values(): + enhancement.reset_layer_recursively(module) + + # determine if frequency domain enhancement or not + use_freq_domain = hparams.get("use_freq_domain", False) + enhancement.use_freq_domain = use_freq_domain + + if not hparams["test_only"]: + # Training + enhancement.fit( + enhancement.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts_valid"], + ) + + ## Evaluation on valid data + # (since our test set is blind) + enhancement.evaluate( + valid_data, + max_key="pesq", + test_loader_kwargs=hparams["dataloader_opts_valid"], + ) + enhancement.save_results(valid_data) + + ## Save enhanced sources of baseline noisy testclips + def save_baseline_audio(snt_id, predictions): + "saves the estimated sources on disk" + # Create output folder + save_path = os.path.join( + hparams["save_folder"], "baseline_audio_results" + ) + save_path_enhanced = os.path.join(save_path, "enhanced_testclips") + + if not os.path.exists(save_path_enhanced): + os.makedirs(save_path_enhanced) + + # Estimated source + signal = predictions[0, :] + signal = signal / signal.abs().max() + save_file = os.path.join(save_path_enhanced, snt_id) + ".wav" + + audio_io.save( + save_file, signal.unsqueeze(0).cpu(), hparams["sample_rate"] + ) + + test_loader = sb.dataio.dataloader.make_dataloader( + baseline_data, **hparams["dataloader_opts_test"] + ) + + # Loop over all noisy baseline shards and save the enhanced clips + print("Saving enhanced sources (baseline set)") + with tqdm(test_loader, dynamic_ncols=True) as t: + for i, batch in enumerate(t): + # Apply Enhancement + snt_id = batch.id[0] + + with torch.no_grad(): + # Since only noisy sources are provided for baseline + # we use the compute_forward function with the same noisy + # signal for all inputs. (ugly hack) + predictions, clean = enhancement.compute_forward( + batch.noisy_sig, + batch.noisy_sig, + batch.noisy_sig, + sb.Stage.TEST, + ) + predictions = predictions[0] + + # Write enhanced wavs + save_baseline_audio(snt_id, predictions) diff --git a/recipes/DNS/noisyspeech_synthesizer/README.md b/recipes/DNS/noisyspeech_synthesizer/README.md new file mode 100644 index 0000000000..b4c3e6870f --- /dev/null +++ b/recipes/DNS/noisyspeech_synthesizer/README.md @@ -0,0 +1,34 @@ +# **DNS: Noisy speech synthesizer** +This folder contains scripts to synthesize noisy audio for training. +Scripts have been taken from [official GitHub repo](https://github.com/microsoft/DNS-Challenge). + +Modify parameters like `sampling_rate`, `audio_length` , `total_hours` etc in the YAML file as per your requirement. + +## Synthesize clean-noisy data and create the Webdataset shards +Synthesize clean-noisy data and create WebDataset shards. + +### **Usage** +To create noisy dataset, run +``` +## synthesize read speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name read_speech --synthesized_data_dir synthesized_data_shards + +## synthesize German speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name german_speech --synthesized_data_dir synthesized_data_shards + +## synthesize Italian speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name italian_speech --synthesized_data_dir synthesized_data_shards + +## synthesize French speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name french_speech --synthesized_data_dir synthesized_data_shards + +## synthesize Spanish speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name spanish_speech --synthesized_data_dir synthesized_data_shards + +## synthesize Russian speech +python noisyspeech_synthesizer_singleprocess.py noisyspeech_synthesizer.yaml --input_shards_dir ../DNS-shards --split_name russian_speech --synthesized_data_dir synthesized_data_shards +``` + +It's recommended to execute these commands in parallel for quicker synthesis. + +**Time** : It takes about 140 HRS to synthesize a dataset of 500 HRS. This calls the need for dynamic mixing. diff --git a/recipes/DNS/noisyspeech_synthesizer/audiolib.py b/recipes/DNS/noisyspeech_synthesizer/audiolib.py new file mode 100644 index 0000000000..44dc1c36ea --- /dev/null +++ b/recipes/DNS/noisyspeech_synthesizer/audiolib.py @@ -0,0 +1,347 @@ +""" +Source: https://github.com/microsoft/DNS-Challenge +Ownership: Microsoft + +* Author + chkarada +""" + +import glob +import os +import subprocess + +import librosa +import numpy as np +import soundfile as sf + +EPS = np.finfo(float).eps +np.random.seed(0) + + +def is_clipped(audio, clipping_threshold=0.99): + """Check if an audio signal is clipped.""" + return any(abs(audio) > clipping_threshold) + + +def normalize(audio, target_level=-25): + """Normalize the signal to the target level""" + rms = (audio**2).mean() ** 0.5 + scalar = 10 ** (target_level / 20) / (rms + EPS) + audio = audio * scalar + return audio + + +def normalize_segmental_rms(audio, rms, target_level=-25): + """Normalize the signal to the target level + based on segmental RMS""" + scalar = 10 ** (target_level / 20) / (rms + EPS) + audio = audio * scalar + return audio + + +def audioread(path, norm=False, start=0, stop=None, target_level=-25): + """Function to read audio""" + + path = os.path.abspath(path) + if not os.path.exists(path): + raise ValueError(f"[{path}] does not exist!") + try: + audio, sample_rate = sf.read(path, start=start, stop=stop) + except RuntimeError: # fix for sph pcm-embedded shortened v2 + print("WARNING: Audio type not supported") + return (None, None) + + if len(audio.shape) == 1: # mono + if norm: + rms = (audio**2).mean() ** 0.5 + scalar = 10 ** (target_level / 20) / (rms + EPS) + audio = audio * scalar + else: # multi-channel + audio = audio.T + audio = audio.sum(axis=0) / audio.shape[0] + if norm: + audio = normalize(audio, target_level) + + return audio, sample_rate + + +def audiowrite( + destpath, + audio, + sample_rate=16000, + norm=False, + target_level=-25, + clipping_threshold=0.99, + clip_test=False, +): + """Function to write audio""" + + if clip_test: + if is_clipped(audio, clipping_threshold=clipping_threshold): + raise ValueError( + "Clipping detected in audiowrite()! " + + destpath + + " file not written to disk." + ) + + if norm: + audio = normalize(audio, target_level) + max_amp = max(abs(audio)) + if max_amp >= clipping_threshold: + audio = audio / max_amp * (clipping_threshold - EPS) + + destpath = os.path.abspath(destpath) + destdir = os.path.dirname(destpath) + + if not os.path.exists(destdir): + os.makedirs(destdir) + + sf.write(destpath, audio, sample_rate) + return + + +def add_reverb(sasxExe, input_wav, filter_file, output_wav): + """Function to add reverb""" + command_sasx_apply_reverb = f"{sasxExe} -r {input_wav} \ + -f {filter_file} -o {output_wav}" + + subprocess.call(command_sasx_apply_reverb) + return output_wav + + +def add_clipping(audio, max_thresh_perc=0.8): + """Function to add clipping""" + threshold = max(abs(audio)) * max_thresh_perc + audioclipped = np.clip(audio, -threshold, threshold) + return audioclipped + + +def adsp_filter(Adspvqe, nearEndInput, nearEndOutput, farEndInput): + command_adsp_clean = f"{Adspvqe} --breakOnErrors 0 --sampleRate 16000 --useEchoCancellation 0 \ + --operatingMode 2 --useDigitalAgcNearend 0 --useDigitalAgcFarend 0 \ + --useVirtualAGC 0 --useComfortNoiseGenerator 0 --useAnalogAutomaticGainControl 0 \ + --useNoiseReduction 0 --loopbackInputFile {farEndInput} --farEndInputFile {farEndInput} \ + --nearEndInputFile {nearEndInput} --nearEndOutputFile {nearEndOutput}" + subprocess.call(command_adsp_clean) + + +def snr_mixer( + params, clean, noise, snr, target_level=-25, clipping_threshold=0.99 +): + """Function to mix clean speech and noise at various SNR levels""" + # cfg = params['cfg'] + if len(clean) > len(noise): + noise = np.append(noise, np.zeros(len(clean) - len(noise))) + else: + clean = np.append(clean, np.zeros(len(noise) - len(clean))) + + # Normalizing to -25 dB FS + clean = clean / (max(abs(clean)) + EPS) + clean = normalize(clean, target_level) + rmsclean = (clean**2).mean() ** 0.5 + + noise = noise / (max(abs(noise)) + EPS) + noise = normalize(noise, target_level) + rmsnoise = (noise**2).mean() ** 0.5 + + # Set the noise level for a given SNR + noisescalar = rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS) + noisenewlevel = noise * noisescalar + + # Mix noise and clean speech + noisyspeech = clean + noisenewlevel + + # Randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value + # There is a chance of clipping that might happen with very less probability, which is not a major issue. + noisy_rms_level = np.random.randint( + params["target_level_lower"], params["target_level_upper"] + ) + rmsnoisy = (noisyspeech**2).mean() ** 0.5 + scalarnoisy = 10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS) + noisyspeech = noisyspeech * scalarnoisy + clean = clean * scalarnoisy + noisenewlevel = noisenewlevel * scalarnoisy + + # Final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly + if is_clipped(noisyspeech): + noisyspeech_maxamplevel = max(abs(noisyspeech)) / ( + clipping_threshold - EPS + ) + noisyspeech = noisyspeech / noisyspeech_maxamplevel + clean = clean / noisyspeech_maxamplevel + noisenewlevel = noisenewlevel / noisyspeech_maxamplevel + noisy_rms_level = int( + 20 + * np.log10(scalarnoisy / noisyspeech_maxamplevel * (rmsnoisy + EPS)) + ) + + return clean, noisenewlevel, noisyspeech, noisy_rms_level + + +def segmental_snr_mixer( + params, clean, noise, snr, target_level=-25, clipping_threshold=0.99 +): + """Function to mix clean speech and noise at various segmental SNR levels""" + # cfg = params['cfg'] + if len(clean) > len(noise): + noise = np.append(noise, np.zeros(len(clean) - len(noise))) + else: + clean = np.append(clean, np.zeros(len(noise) - len(clean))) + clean = clean / (max(abs(clean)) + EPS) + noise = noise / (max(abs(noise)) + EPS) + rmsclean, rmsnoise = active_rms(clean=clean, noise=noise) + clean = normalize_segmental_rms( + clean, rms=rmsclean, target_level=target_level + ) + noise = normalize_segmental_rms( + noise, rms=rmsnoise, target_level=target_level + ) + # Set the noise level for a given SNR + noisescalar = rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS) + noisenewlevel = noise * noisescalar + + # Mix noise and clean speech + noisyspeech = clean + noisenewlevel + # Randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value + # There is a chance of clipping that might happen with very less probability, which is not a major issue. + noisy_rms_level = np.random.randint( + params["target_level_lower"], params["target_level_upper"] + ) + rmsnoisy = (noisyspeech**2).mean() ** 0.5 + scalarnoisy = 10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS) + noisyspeech = noisyspeech * scalarnoisy + clean = clean * scalarnoisy + noisenewlevel = noisenewlevel * scalarnoisy + # Final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly + if is_clipped(noisyspeech): + noisyspeech_maxamplevel = max(abs(noisyspeech)) / ( + clipping_threshold - EPS + ) + noisyspeech = noisyspeech / noisyspeech_maxamplevel + clean = clean / noisyspeech_maxamplevel + noisenewlevel = noisenewlevel / noisyspeech_maxamplevel + noisy_rms_level = int( + 20 + * np.log10(scalarnoisy / noisyspeech_maxamplevel * (rmsnoisy + EPS)) + ) + + return clean, noisenewlevel, noisyspeech, noisy_rms_level + + +def active_rms(clean, noise, fs=16000, energy_thresh=-50): + """Returns the clean and noise RMS of the noise calculated only in the active portions""" + window_size = 100 # in ms + window_samples = int(fs * window_size / 1000) + sample_start = 0 + noise_active_segs = [] + clean_active_segs = [] + + while sample_start < len(noise): + sample_end = min(sample_start + window_samples, len(noise)) + noise_win = noise[sample_start:sample_end] + clean_win = clean[sample_start:sample_end] + noise_seg_rms = (noise_win**2).mean() ** 0.5 + # Considering frames with energy + if noise_seg_rms > energy_thresh: + noise_active_segs = np.append(noise_active_segs, noise_win) + clean_active_segs = np.append(clean_active_segs, clean_win) + sample_start += window_samples + + if len(noise_active_segs) != 0: + noise_rms = (noise_active_segs**2).mean() ** 0.5 + else: + noise_rms = EPS + + if len(clean_active_segs) != 0: + clean_rms = (clean_active_segs**2).mean() ** 0.5 + else: + clean_rms = EPS + + return clean_rms, noise_rms + + +def activitydetector(audio, fs=16000, energy_thresh=0.13, target_level=-25): + """Return the percentage of the time the audio signal is above an energy threshold""" + + audio = normalize(audio, target_level) + window_size = 50 # in ms + window_samples = int(fs * window_size / 1000) + sample_start = 0 + cnt = 0 + prev_energy_prob = 0 + active_frames = 0 + + a = -1 + b = 0.2 + alpha_rel = 0.05 + alpha_att = 0.8 + + while sample_start < len(audio): + sample_end = min(sample_start + window_samples, len(audio)) + audio_win = audio[sample_start:sample_end] + frame_rms = 20 * np.log10(sum(audio_win**2) + EPS) + frame_energy_prob = 1.0 / (1 + np.exp(-(a + b * frame_rms))) + + if frame_energy_prob > prev_energy_prob: + smoothed_energy_prob = ( + frame_energy_prob * alpha_att + + prev_energy_prob * (1 - alpha_att) + ) + else: + smoothed_energy_prob = ( + frame_energy_prob * alpha_rel + + prev_energy_prob * (1 - alpha_rel) + ) + + if smoothed_energy_prob > energy_thresh: + active_frames += 1 + prev_energy_prob = frame_energy_prob + sample_start += window_samples + cnt += 1 + + perc_active = active_frames / cnt + return perc_active + + +def resampler(input_dir, target_sr=16000, ext="*.wav"): + """Resamples the audio files in input_dir to target_sr""" + files = glob.glob(f"{input_dir}/" + ext) + for pathname in files: + print(pathname) + try: + audio, fs = audioread(pathname) + audio_resampled = librosa.core.resample(audio, fs, target_sr) + audiowrite(pathname, audio_resampled, target_sr) + except: # noqa + continue + + +def audio_segmenter(input_dir, dest_dir, segment_len=10, ext="*.wav"): + """Segments the audio clips in dir to segment_len in secs""" + files = glob.glob(f"{input_dir}/" + ext) + for i in range(len(files)): + audio, fs = audioread(files[i]) + + if ( + len(audio) > (segment_len * fs) + and len(audio) % (segment_len * fs) != 0 + ): + audio = np.append( + audio, + audio[0 : segment_len * fs - (len(audio) % (segment_len * fs))], + ) + if len(audio) < (segment_len * fs): + while len(audio) < (segment_len * fs): + audio = np.append(audio, audio) + audio = audio[: segment_len * fs] + + num_segments = int(len(audio) / (segment_len * fs)) + audio_segments = np.split(audio, num_segments) + + basefilename = os.path.basename(files[i]) + basename, ext = os.path.splitext(basefilename) + + for j in range(len(audio_segments)): + newname = basename + "_" + str(j) + ext + destpath = os.path.join(dest_dir, newname) + audiowrite(destpath, audio_segments[j], fs) diff --git a/recipes/DNS/noisyspeech_synthesizer/noisyspeech_synthesizer.yaml b/recipes/DNS/noisyspeech_synthesizer/noisyspeech_synthesizer.yaml new file mode 100644 index 0000000000..26e3073702 --- /dev/null +++ b/recipes/DNS/noisyspeech_synthesizer/noisyspeech_synthesizer.yaml @@ -0,0 +1,101 @@ +# yamllint disable +################################ +# Configuration for generating Noisy Speech Dataset +# - sampling_rate: Specify the sampling rate. Default is 16 kHz +# - audioformat: default is .wav +# - audio_length: Minimum Length of each audio clip (noisy and clean speech) +# in seconds that will be generated by augmenting utterances. +# - silence_length: Duration of silence introduced between clean speech +# utterances. +# - total_hours: Total number of hours of data required. Units are in hours. +# - snr_lower: Lower bound for SNR required (default: 0 dB) +# - snr_upper: Upper bound for SNR required (default: 40 dB) +# - target_level_lower: Lower bound for the target audio level +# before audiowrite (default: -35 dB) +# - target_level_upper: Upper bound for the target audio level +# before audiowrite (default: -15 dB) +# - total_snrlevels: Number of SNR levels required (default: 5, which means +# there are 5 levels between snr_lower and snr_upper) +# - clean_activity_threshold: Activity threshold for clean speech +# - noise_activity_threshold: Activity threshold for noise +# - fileindex_start: Starting file ID that will be used in filenames +# - fileindex_end: Last file ID that will be used in filenames +# - is_test_set: Set it to True if it is the test set, else False for the +# - log_dir: Specify path to the directory to store all the log files +# ################################ +# yamllint enable + + +# Data storage params +input_shards_dir: !PLACEHOLDER # ../DNS-shards +split_name: !PLACEHOLDER # read_speech, german_speech, italian_speech, french_speech etc +rirs: RIR_table_simple.csv + +# Noisy data synthesis params +sampling_rate: 16000 # sampling rate of synthesized signal +audioformat: "*.wav" +audio_length: 4 +silence_length: 0.2 +total_hours: 100 +snr_lower: -5 +snr_upper: 15 +randomize_snr: True +target_level_lower: -35 +target_level_upper: -15 +total_snrlevels: 21 +clean_activity_threshold: 0.6 +noise_activity_threshold: 0.0 +fileindex_start: None +fileindex_end: None +is_test_set: False + +# Source dir +rir_table_csv: !ref + +# Directory path where Webdatasets of DNS clean and noise shards are located. +input_sampling_rate: 48000 # sampling rate of input signal +clean_meta: !ref /clean_fullband//meta.json +noise_meta: !ref /noise_fullband/meta.json +clean_fullband_shards: !ref /clean_fullband//shard-{000000..999999}.tar +noise_fullband_shards: !ref /noise_fullband/shard-{000000..999999}.tar + +# Configuration for synthesizing shards of clean-noisy pairs. +samples_per_shard: 5000 + +# Destination directory for storing shards of synthesized data. +synthesized_data_dir: !PLACEHOLDER # synthesized_data_shards +train_shard_destination: !ref /train_shards/ +valid_shard_destination: !ref /valid_shards/ + +# Set to a directory on a large disk if using Webdataset shards hosted on the web. +shard_cache_dir: + +# These can be skipped. (uncomment if you want to use them) +# clean_singing: !PLACEHOLDER # ../DNS-shards/clean_fullband/VocalSet_48kHz_mono/ +# clean_emotion: !PLACEHOLDER # ../DNS-shards/clean_fullband/emotional_speech/ +## Aishell data needs to be downloaded separately. +# clean_mandarin: !PLACEHOLDER # ../DNS-shards/clean_fullband/mandrin_speech/data_aishell + +log_dir: !ref _logs +noise_types_excluded: None + +## Config: add singing voice to clean speech +use_singing_data: 0 # 0 for no, 1 for yes +# 1 for only male, 2 for only female, 3 (default) for both male and female +singing_choice: 3 + +## Config: add emotional data to clean speech +# 0 for no, 1 for yes +use_emotion_data: 0 + +## Config: add Chinese (mandarin) data to clean speech +# 0 for no, 1 for yes +use_mandarin_data: 0 + +## Config: add reverb to clean speech +# 1 for only real rir, 2 for only synthetic rir, 3 (default) use both real and synthetic +rir_choice: 3 +# lower bound of t60 range in seconds +lower_t60: 0.3 +# upper bound of t60 range in seconds +upper_t60: 1.3 diff --git a/recipes/DNS/noisyspeech_synthesizer/noisyspeech_synthesizer_singleprocess.py b/recipes/DNS/noisyspeech_synthesizer/noisyspeech_synthesizer_singleprocess.py new file mode 100644 index 0000000000..65b3610da8 --- /dev/null +++ b/recipes/DNS/noisyspeech_synthesizer/noisyspeech_synthesizer_singleprocess.py @@ -0,0 +1,712 @@ +""" +Source: https://github.com/microsoft/DNS-Challenge +Ownership: Microsoft + +This script will attempt to use each clean and noise +webdataset shards to synthesize clean-noisy pairs of +audio. The output is again stored in webdataset shards. + +* Author + chkarada + +* Further modified + Sangeet Sagar (2023) +""" + +# Note: This single process audio synthesizer will attempt to use each clean +# speech sourcefile once (from the webdataset shards), as it does not +# randomly sample from these files + +import json +import os +import random +import sys +import time +from collections import defaultdict +from functools import partial +from pathlib import Path +from typing import Dict + +import librosa +import numpy as np +import pandas as pd +import torch +import utils +import webdataset as wds +from audiolib import activitydetector, is_clipped, segmental_snr_mixer +from hyperpyyaml import load_hyperpyyaml +from scipy import signal +from scipy.io import wavfile + +import speechbrain as sb + +np.random.seed(5) +random.seed(5) + +MAXTRIES = 50 +MAXFILELEN = 100 + +start = time.time() + + +def add_pyreverb(clean_speech, rir): + """ + Add reverb to clean signal + """ + reverb_speech = signal.fftconvolve(clean_speech, rir, mode="full") + + # make reverb_speech same length as clean_speech + reverb_speech = reverb_speech[0 : clean_speech.shape[0]] + + return reverb_speech + + +def build_audio(is_clean, params, index, audio_samples_length=-1): + """Construct an audio signal from source files""" + + fs_output = params["fs"] + silence_length = params["silence_length"] + if audio_samples_length == -1: + audio_samples_length = int(params["audio_length"] * params["fs"]) + + output_audio = np.zeros(0) + remaining_length = audio_samples_length + files_used = [] + clipped_files = [] + + if is_clean: + data_iterator = iter(params["clean_data"]) + idx = index + else: + data_iterator = iter(params["noise_data"]) + idx = index + + # initialize silence + silence = np.zeros(int(fs_output * silence_length)) + + # iterate through multiple clips until we have a long enough signal + tries_left = MAXTRIES + while remaining_length > 0 and tries_left > 0: + # read next audio file and resample if necessary + fs_input = params["fs_input"] + batch = next(data_iterator) + input_audio = batch["sig"].numpy() + + if input_audio is None: + sys.stderr.write( + "\nWARNING: Cannot read file: %s\n" % batch["__key__"] + ) + continue + if fs_input != fs_output: + input_audio = librosa.resample( + input_audio, orig_sr=fs_input, target_sr=fs_output + ) + + # if current file is longer than remaining desired length, and this is + # noise generation or this is training set, subsample it randomly + if len(input_audio) > remaining_length and ( + not is_clean or not params["is_test_set"] + ): + idx_seg = np.random.randint(0, len(input_audio) - remaining_length) + input_audio = input_audio[idx_seg : idx_seg + remaining_length] + + # check for clipping, and if found move onto next file + if is_clipped(input_audio): + clipped_files.append(batch["__key__"]) + tries_left -= 1 + continue + + # concatenate current input audio to output audio stream + files_used.append(batch["__key__"]) + output_audio = np.append(output_audio, input_audio) + remaining_length -= len(input_audio) + + # add some silence if we have not reached desired audio length + if remaining_length > 0: + silence_len = min(remaining_length, len(silence)) + output_audio = np.append(output_audio, silence[:silence_len]) + remaining_length -= silence_len + + if tries_left == 0 and not is_clean and "noise_data" in params.keys(): + print( + "There are not enough non-clipped files in the " + + "given noise directory to complete the audio build" + ) + return [], [], clipped_files, idx + + return output_audio, files_used, clipped_files, idx + + +def gen_audio(is_clean, params, index, audio_samples_length=-1): + """Calls build_audio() to get an audio signal, and verify that it meets the + activity threshold""" + + clipped_files = [] + low_activity_files = [] + if audio_samples_length == -1: + audio_samples_length = int(params["audio_length"] * params["fs"]) + if is_clean: + activity_threshold = params["clean_activity_threshold"] + else: + activity_threshold = params["noise_activity_threshold"] + + while True: + audio, source_files, new_clipped_files, index = build_audio( + is_clean, params, index, audio_samples_length + ) + + clipped_files += new_clipped_files + if len(audio) < audio_samples_length: + continue + + if activity_threshold == 0.0: + break + + percactive = activitydetector(audio=audio) + if percactive > activity_threshold: + break + else: + low_activity_files += source_files + + return audio, source_files, clipped_files, low_activity_files, index + + +def main_gen(params): + """Calls gen_audio() to generate the audio signals, verifies that they meet + the requirements, and writes the files to storage""" + + clean_source_files = [] + clean_clipped_files = [] + clean_low_activity_files = [] + noise_source_files = [] + noise_clipped_files = [] + noise_low_activity_files = [] + + clean_index = 0 + noise_index = 0 + + # write shards + train_shards_path = Path(params["train_shard_destination"]) + train_shards_path.mkdir(exist_ok=True, parents=True) + valid_shards_path = Path(params["valid_shard_destination"]) + valid_shards_path.mkdir(exist_ok=True, parents=True) + + all_keys = set() + train_pattern = str(train_shards_path / "shard") + "-%06d.tar" + valid_pattern = str(valid_shards_path / "shard") + "-%06d.tar" + samples_per_shard = params["samples_per_shard"] + + # track statistics on data + train_sample_keys = defaultdict(list) + valid_sample_keys = defaultdict(list) + + # Define the percentage of data to be used for validation + validation_percentage = 0.05 + + # Calculate the number of samples for training and validation + total_samples = params["fileindex_end"] - params["fileindex_start"] + 1 + num_validation_samples = int(total_samples * validation_percentage) + + # Define separate ShardWriters for training and validation data + train_writer = wds.ShardWriter(train_pattern, maxcount=samples_per_shard) + valid_writer = wds.ShardWriter(valid_pattern, maxcount=samples_per_shard) + + # Initialize counters and data lists for statistics + file_num = params["fileindex_start"] + train_data_tuples = [] + valid_data_tuples = [] + + while file_num <= params["fileindex_end"]: + print( + "\rFiles synthesized {:4d}/{:4d}".format( + file_num, params["fileindex_end"] + ), + end="", + ) + # CLEAN SPEECH GENERATION + clean, clean_sf, clean_cf, clean_laf, clean_index = gen_audio( + True, params, clean_index + ) + + # add reverb with selected RIR + rir_index = random.randint(0, len(params["myrir"]) - 1) + + my_rir = os.path.normpath(os.path.join(params["myrir"][rir_index])) + (fs_rir, samples_rir) = wavfile.read(my_rir) + + my_channel = int(params["mychannel"][rir_index]) + + if samples_rir.ndim == 1: + samples_rir_ch = np.array(samples_rir) + + elif my_channel > 1: + samples_rir_ch = samples_rir[:, my_channel - 1] + else: + samples_rir_ch = samples_rir[:, my_channel - 1] + # print(samples_rir.shape) + # print(my_channel) + + # REVERB MIXED TO THE CLEAN SPEECH + clean = add_pyreverb(clean, samples_rir_ch) + + # generate noise + noise, noise_sf, noise_cf, noise_laf, noise_index = gen_audio( + False, params, noise_index, len(clean) + ) + + clean_clipped_files += clean_cf + clean_low_activity_files += clean_laf + noise_clipped_files += noise_cf + noise_low_activity_files += noise_laf + + # mix clean speech and noise + # if specified, use specified SNR value + if not params["randomize_snr"]: + snr = params["snr"] + # use a randomly sampled SNR value between the specified bounds + else: + snr = np.random.randint(params["snr_lower"], params["snr_upper"]) + + # NOISE ADDED TO THE REVERBED SPEECH + clean_snr, noise_snr, noisy_snr, target_level = segmental_snr_mixer( + params=params, clean=clean, noise=noise, snr=snr + ) + # Uncomment the below lines if you need segmental SNR and comment the above lines using snr_mixer + # clean_snr, noise_snr, noisy_snr, target_level = segmental_snr_mixer(params=params, + # clean=clean, + # noise=noise, + # snr=snr) + # unexpected clipping + if ( + is_clipped(clean_snr) + or is_clipped(noise_snr) + or is_clipped(noisy_snr) + ): + print( + "\nWarning: File #" + + str(file_num) + + " has unexpected clipping, " + + "returning without writing audio to disk" + ) + continue + + clean_source_files += clean_sf + noise_source_files += noise_sf + + # write resultant audio streams to files + hyphen = "-" + clean_source_filenamesonly = [ + i[:-4].split(os.path.sep)[-1] for i in clean_sf + ] + clean_files_joined = hyphen.join(clean_source_filenamesonly)[ + :MAXFILELEN + ] + noise_source_filenamesonly = [ + i[:-4].split(os.path.sep)[-1] for i in noise_sf + ] + noise_files_joined = hyphen.join(noise_source_filenamesonly)[ + :MAXFILELEN + ] + + noisyfilename = ( + clean_files_joined + + "_" + + noise_files_joined + + "_snr" + + str(snr) + + "_tl" + + str(target_level) + + "_fileid_" + + str(file_num) + ) + + # Period is not allowed in a WebDataset key name + cleanfilename = "clean_fileid_" + str(file_num) + cleanfilename = cleanfilename.replace(".", "_") + noisefilename = "noise_fileid_" + str(file_num) + noisefilename = noisefilename.replace(".", "_") + + file_num += 1 + + # store statistics + key = noisyfilename + key = key.replace(".", "_") + lang = params["split_name"].split("_")[0] + t = (key, lang) + + # verify key is unique + assert cleanfilename not in all_keys + all_keys.add(cleanfilename) + + # Split the data between training and validation based on the file number + if file_num % total_samples <= num_validation_samples: + # Write to validation set + valid_sample_keys[lang].append(key) + valid_data_tuples.append(t) + sample = { + "__key__": key, + "noisy_file": key, + "clean_file": cleanfilename, + "noise_file": noisefilename, + "clean_audio.pth": torch.tensor(clean_snr).to(torch.float32), + "noise_audio.pth": torch.tensor(noise_snr).to(torch.float32), + "noisy_audio.pth": torch.tensor(noisy_snr).to(torch.float32), + } + valid_writer.write(sample) + else: + # Write to training set + train_sample_keys[lang].append(key) + train_data_tuples.append(t) + sample = { + "__key__": key, + "noisy_file": key, + "clean_file": cleanfilename, + "noise_file": noisefilename, + "clean_audio.pth": torch.tensor(clean_snr).to(torch.float32), + "noise_audio.pth": torch.tensor(noise_snr).to(torch.float32), + "noisy_audio.pth": torch.tensor(noisy_snr).to(torch.float32), + } + train_writer.write(sample) + + train_writer.close() + valid_writer.close() + + # Write meta.json files for both training and validation + train_meta_dict = { + "language_id": lang, + "sample_keys_per_language": train_sample_keys, + "num_data_samples": len(train_data_tuples), + } + valid_meta_dict = { + "language_id": lang, + "sample_keys_per_language": valid_sample_keys, + "num_data_samples": len(valid_data_tuples), + } + + with (train_shards_path / "meta.json").open("w", encoding="utf-8") as f: + json.dump(train_meta_dict, f, indent=4) + + with (valid_shards_path / "meta.json").open("w", encoding="utf-8") as f: + json.dump(valid_meta_dict, f, indent=4) + + return ( + clean_source_files, + clean_clipped_files, + clean_low_activity_files, + noise_source_files, + noise_clipped_files, + noise_low_activity_files, + ) + + +def main_body(): # noqa + """Main body of this file""" + + params = dict() + + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Data Directories and Settings + params["split_name"] = hparams["split_name"] + + # Audio Settings + params["fs"] = int(hparams["sampling_rate"]) + params["fs_input"] = int( + hparams["input_sampling_rate"] + ) # Sampling rate of input data + params["audioformat"] = hparams["audioformat"] + params["audio_length"] = float(hparams["audio_length"]) + params["silence_length"] = float(hparams["silence_length"]) + params["total_hours"] = float(hparams["total_hours"]) + + # Clean Data Categories + params["use_singing_data"] = int(hparams["use_singing_data"]) + if hasattr(hparams, "clean_singing"): + params["clean_singing"] = str(hparams["clean_singing"]) + params["singing_choice"] = int(hparams["singing_choice"]) + + params["use_emotion_data"] = int(hparams["use_emotion_data"]) + if hasattr(hparams, "clean_emotion"): + params["clean_emotion"] = str(hparams["clean_emotion"]) + + params["use_mandarin_data"] = int(hparams["use_mandarin_data"]) + if hasattr(hparams, "clean_mandarin"): + params["clean_mandarin"] = str(hparams["clean_mandarin"]) + + # Room Impulse Response (RIR) Settings + params["rir_choice"] = int(hparams["rir_choice"]) + params["lower_t60"] = float(hparams["lower_t60"]) + params["upper_t60"] = float(hparams["upper_t60"]) + params["rir_table_csv"] = str(hparams["rir_table_csv"]) + + # File Indexing + if ( + hparams["fileindex_start"] != "None" + and hparams["fileindex_end"] != "None" + ): + params["num_files"] = int(hparams["fileindex_end"]) - int( + params["fileindex_start"] + ) + params["fileindex_start"] = int(hparams["fileindex_start"]) + params["fileindex_end"] = int(hparams["fileindex_end"]) + else: + params["num_files"] = int( + (params["total_hours"] * 60 * 60) / params["audio_length"] + ) + params["fileindex_start"] = 0 + params["fileindex_end"] = params["num_files"] + + print("Number of files to be synthesized:", params["num_files"]) + + # Data Generation and Synthesis Settings + params["is_test_set"] = utils.str2bool(str(hparams["is_test_set"])) + params["clean_activity_threshold"] = float( + hparams["clean_activity_threshold"] + ) + params["noise_activity_threshold"] = float( + hparams["noise_activity_threshold"] + ) + params["snr_lower"] = int(hparams["snr_lower"]) + params["snr_upper"] = int(hparams["snr_upper"]) + params["randomize_snr"] = utils.str2bool(str(hparams["randomize_snr"])) + params["target_level_lower"] = int(hparams["target_level_lower"]) + params["target_level_upper"] = int(hparams["target_level_upper"]) + + if hasattr(hparams, "snr"): + params["snr"] = int(hparams["snr"]) + else: + params["snr"] = int((params["snr_lower"] + params["snr_upper"]) / 2) + + # Synthesized Data Destination + params["samples_per_shard"] = hparams["samples_per_shard"] + params["train_shard_destination"] = hparams["train_shard_destination"] + params["valid_shard_destination"] = hparams["valid_shard_destination"] + + #### Shard data extraction ~~~ + # load the meta info json file + + with wds.gopen(hparams["clean_meta"], "rb") as f: + clean_meta = json.load(f) + with wds.gopen(hparams["noise_meta"], "rb") as f: + noise_meta = json.load(f) + + def audio_pipeline(sample_dict: Dict, random_chunk=True): + key = sample_dict["__key__"] + audio_tensor = sample_dict["audio.pth"] + + sig = audio_tensor.squeeze() + + return { + "sig": sig, + "id": key, + } + + clean_data = ( + wds.WebDataset( + hparams["clean_fullband_shards"], + cache_dir=hparams["shard_cache_dir"], + ) + .repeat() + .shuffle(1000) + .decode("pil") + .map(partial(audio_pipeline, random_chunk=True)) + ) + print(f"Clean data consist of {clean_meta['num_data_samples']} samples") + + noise_data = ( + wds.WebDataset( + hparams["noise_fullband_shards"], + cache_dir=hparams["shard_cache_dir"], + ) + .repeat() + .shuffle(1000) + .decode("pil") + .map(partial(audio_pipeline, random_chunk=True)) + ) + print(f"Noise data consist of {noise_meta['num_data_samples']} samples") + + params["clean_data"] = clean_data + params["noise_data"] = noise_data + + # add singing voice to clean speech + if params["use_singing_data"] == 1: + raise NotImplementedError("Add sining voice to clean speech") + else: + print("NOT using singing data for training!") + + # add emotion data to clean speech + if params["use_emotion_data"] == 1: + raise NotImplementedError("Add emotional data to clean speech") + else: + print("NOT using emotion data for training!") + + # add mandarin data to clean speech + if params["use_mandarin_data"] == 1: + raise NotImplementedError("Add Mandarin data to clean speech") + else: + print("NOT using non-english (Mandarin) data for training!") + + # rir + temp = pd.read_csv( + params["rir_table_csv"], + skiprows=[1], + sep=",", + header=None, + names=["wavfile", "channel", "T60_WB", "C50_WB", "isRealRIR"], + ) + temp.keys() + # temp.wavfile + + rir_wav = temp["wavfile"][1:] # 115413 + rir_channel = temp["channel"][1:] + rir_t60 = temp["T60_WB"][1:] + rir_isreal = temp["isRealRIR"][1:] + + rir_wav2 = [w.replace("\\", "/") for w in rir_wav] + rir_channel2 = [w for w in rir_channel] + rir_t60_2 = [w for w in rir_t60] + rir_isreal2 = [w for w in rir_isreal] + + myrir = [] + mychannel = [] + myt60 = [] + + lower_t60 = params["lower_t60"] + upper_t60 = params["upper_t60"] + + if params["rir_choice"] == 1: # real 3076 IRs + real_indices = [i for i, x in enumerate(rir_isreal2) if x == "1"] + + chosen_i = [] + for i in real_indices: + if (float(rir_t60_2[i]) >= lower_t60) and ( + float(rir_t60_2[i]) <= upper_t60 + ): + chosen_i.append(i) + + myrir = [rir_wav2[i] for i in chosen_i] + mychannel = [rir_channel2[i] for i in chosen_i] + myt60 = [rir_t60_2[i] for i in chosen_i] + + elif params["rir_choice"] == 2: # synthetic 112337 IRs + synthetic_indices = [i for i, x in enumerate(rir_isreal2) if x == "0"] + + chosen_i = [] + for i in synthetic_indices: + if (float(rir_t60_2[i]) >= lower_t60) and ( + float(rir_t60_2[i]) <= upper_t60 + ): + chosen_i.append(i) + + myrir = [rir_wav2[i] for i in chosen_i] + mychannel = [rir_channel2[i] for i in chosen_i] + myt60 = [rir_t60_2[i] for i in chosen_i] + + elif params["rir_choice"] == 3: # both real and synthetic + all_indices = [i for i, x in enumerate(rir_isreal2)] + + chosen_i = [] + for i in all_indices: + if (float(rir_t60_2[i]) >= lower_t60) and ( + float(rir_t60_2[i]) <= upper_t60 + ): + chosen_i.append(i) + + myrir = [rir_wav2[i] for i in chosen_i] + mychannel = [rir_channel2[i] for i in chosen_i] + myt60 = [rir_t60_2[i] for i in chosen_i] + + else: # default both real and synthetic + all_indices = [i for i, x in enumerate(rir_isreal2)] + + chosen_i = [] + for i in all_indices: + if (float(rir_t60_2[i]) >= lower_t60) and ( + float(rir_t60_2[i]) <= upper_t60 + ): + chosen_i.append(i) + + myrir = [rir_wav2[i] for i in chosen_i] + mychannel = [rir_channel2[i] for i in chosen_i] + myt60 = [rir_t60_2[i] for i in chosen_i] + + params["myrir"] = myrir + params["mychannel"] = mychannel + params["myt60"] = myt60 + + # Call main_gen() to generate audio + ( + clean_source_files, + clean_clipped_files, + clean_low_activity_files, + noise_source_files, + noise_clipped_files, + noise_low_activity_files, + ) = main_gen(params) + + # Create log directory if needed, and write log files of clipped and low activity files + log_dir = utils.get_dir(hparams, "log_dir", "Logs") + + utils.write_log_file( + log_dir, "source_files.csv", clean_source_files + noise_source_files + ) + utils.write_log_file( + log_dir, "clipped_files.csv", clean_clipped_files + noise_clipped_files + ) + utils.write_log_file( + log_dir, + "low_activity_files.csv", + clean_low_activity_files + noise_low_activity_files, + ) + + # Compute and print stats about percentage of clipped and low activity files + total_clean = ( + len(clean_source_files) + + len(clean_clipped_files) + + len(clean_low_activity_files) + ) + total_noise = ( + len(noise_source_files) + + len(noise_clipped_files) + + len(noise_low_activity_files) + ) + pct_clean_clipped = round(len(clean_clipped_files) / total_clean * 100, 1) + pct_noise_clipped = round(len(noise_clipped_files) / total_noise * 100, 1) + pct_clean_low_activity = round( + len(clean_low_activity_files) / total_clean * 100, 1 + ) + pct_noise_low_activity = round( + len(noise_low_activity_files) / total_noise * 100, 1 + ) + + print( + "\nOf the " + + str(total_clean) + + " clean speech files analyzed, " + + str(pct_clean_clipped) + + "% had clipping, and " + + str(pct_clean_low_activity) + + "% had low activity " + + "(below " + + str(params["clean_activity_threshold"] * 100) + + "% active percentage)" + ) + print( + "Of the " + + str(total_noise) + + " noise files analyzed, " + + str(pct_noise_clipped) + + "% had clipping, and " + + str(pct_noise_low_activity) + + "% had low activity " + + "(below " + + str(params["noise_activity_threshold"] * 100) + + "% active percentage)" + ) + + +if __name__ == "__main__": + main_body() diff --git a/recipes/DNS/noisyspeech_synthesizer/utils.py b/recipes/DNS/noisyspeech_synthesizer/utils.py new file mode 100644 index 0000000000..0b617c5eb2 --- /dev/null +++ b/recipes/DNS/noisyspeech_synthesizer/utils.py @@ -0,0 +1,56 @@ +""" +Source: https://github.com/microsoft/DNS-Challenge +Ownership: Microsoft + +* Author + rocheng +""" + +import csv +import glob +import os +from shutil import copyfile + + +def get_dir(cfg, param_name, new_dir_name): + """Helper function to retrieve directory name if it exists, + create it if it doesn't exist""" + + if param_name in cfg: + dir_name = cfg[param_name] + else: + dir_name = os.path.join(os.path.dirname(__file__), new_dir_name) + if not os.path.exists(dir_name): + os.makedirs(dir_name) + return dir_name + + +def write_log_file(log_dir, log_filename, data): + """Helper function to write log file""" + # data = zip(*data) + with open( + os.path.join(log_dir, log_filename), + mode="w", + newline="", + encoding="utf-8", + ) as csvfile: + csvwriter = csv.writer( + csvfile, delimiter=" ", quotechar="|", quoting=csv.QUOTE_MINIMAL + ) + for row in data: + csvwriter.writerow([row]) + + +def str2bool(string): + """Convert a string to a boolean value.""" + return string.lower() in ("yes", "true", "t", "1") + + +def rename_copyfile(src_path, dest_dir, prefix="", ext="*.wav"): + """Copy and rename files from a source directory to a destination directory.""" + srcfiles = glob.glob(f"{src_path}/" + ext) + for i in range(len(srcfiles)): + dest_path = os.path.join( + dest_dir, prefix + "_" + os.path.basename(srcfiles[i]) + ) + copyfile(srcfiles[i], dest_path) diff --git a/recipes/DVoice/ASR/CTC/README.md b/recipes/DVoice/ASR/CTC/README.md index cc3f647d67..b3a0df8a11 100644 --- a/recipes/DVoice/ASR/CTC/README.md +++ b/recipes/DVoice/ASR/CTC/README.md @@ -2,7 +2,7 @@ This folder contains scripts necessary to run an ASR experiment with the DVoice dataset(Darija, Swahili): [Link](https://zenodo.org/record/6342622). The dataset used to train the Wolof, Fongbe and Amharic languages can be founded here: [Link](https://github.com/besacier/ALFFA_PUBLIC). # Data preparation -[DVoice](https://dvoice.ma) attempts to provide automatic voice processing solutions for African languages and dialects. We use preprocessing techniques including voice augmentation to fill the data gap for each language. +[DVoice](https://zenodo.org/record/5482551) attempts to provide automatic voice processing solutions for African languages and dialects. We use preprocessing techniques including voice augmentation to fill the data gap for each language. # How to run - Darija : Just download the DVoice dataset than run `python train_with_wav2vec2.py hparams/train_dar_with_wav2vec.yaml --data_folder=/localscratch/darija/` @@ -48,7 +48,7 @@ Here is a list of the different African languages and dialects that we tested: | Fongbe | v2.0 | train_fon_with_wav2vec.yaml | No | 4.16 | 9.19 | 3.98 | 9.00 | [Link](https://huggingface.co/speechbrain/asr-wav2vec2-dvoice-fongbe) | Amharic | v2.0 | train_amh_with_wav2vec.yaml | No | 6.71 | 25.50 | 6.57 | 24.92 | [Link](https://huggingface.co/speechbrain/asr-wav2vec2-dvoice-amharic) | -You can find our training results (models, logs, etc) [here](https://drive.google.com/drive/folders/1vNT7RjRuELs7pumBHmfYsrOp9m46D0ym?usp=sharing). +You can find our training results (models, logs, etc) [here](https://www.dropbox.com/sh/pyu40jq1ebv6hcc/AADQO_lAD-F9Q0vlVq8KoXHqa?dl=0). # Performances of DVoice Multilingual on each language | Dataset Link | Language | Test WER | @@ -63,7 +63,7 @@ You can find our training results (models, logs, etc) [here](https://drive.googl SpeechBrain provides a simple interface to transcribe audio files with pretrained models. All the necessary information can be found on the different HuggingFace repositories (see the results table above) corresponding to our different models for DVoice. # **About DVoice** -DVoice is a community initiative that aims to provide Africa low resources languages with data and models to facilitate their use of voice technologies. The lack of data on these languages makes it necessary to collect data using methods that are specific to each one. The DVoice platform([https://dvoice.ma](https://dvoice.ma) is based on Mozilla Common Voice, for collecting authentic recordings from the community, and transfer learning techniques for automatically labeling recordings that are retrived from social medias. The DVoice platform currently manages 7 languages including Darija(Moroccan Arabic dialect) whose dataset appears on this version, Wolof, Mandingo, Serere, Pular, Diola and Soninke. +DVoice is a community initiative that aims to provide Africa low resources languages with data and models to facilitate their use of voice technologies. The lack of data on these languages makes it necessary to collect data using methods that are specific to each one. The DVoice platform is based on Mozilla Common Voice, for collecting authentic recordings from the community, and transfer learning techniques for automatically labeling recordings that are retrieved from social media. The DVoice platform currently manages 7 languages including Darija(Moroccan Arabic dialect) whose dataset appears on this version, Wolof, Mandingo, Serere, Pular, Diola and Soninke. For this project, AIOX Labs the SI2M Laboratory are joining forces to build the future of technologies together. @@ -75,7 +75,6 @@ Based in Rabat, London and Paris, AIOX - Labs mobilizes artificial intelligence - Business ready data products with a solid algorithmic base and adaptability for the specific needs of each client. - A complementary team made up of doctors in AI and business experts with a solid scientific base and international publications. -Website: [https://www.aiox-labs.com/ ](https://www.aiox-labs.com/) # **About SI2M Laboratory** The Information Systems, Intelligent Systems and Mathematical Modeling Research Laboratory(SI2M) is an academic research laboratory of the National Institute of Statistics and Applied Economics(INSEA). The research areas of the laboratories are Information Systems, Intelligent Systems, Artificial Intelligence, Decision Support, Network and System Security, Mathematical Modelling. diff --git a/recipes/DVoice/ASR/CTC/extra_requirements.txt b/recipes/DVoice/ASR/CTC/extra_requirements.txt deleted file mode 100644 index 7bb0f523d7..0000000000 --- a/recipes/DVoice/ASR/CTC/extra_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -transformers==4.13 diff --git a/recipes/DVoice/ASR/CTC/hparams/train_amh_with_wav2vec.yaml b/recipes/DVoice/ASR/CTC/hparams/train_amh_with_wav2vec.yaml index 0212a85ebe..b3d92454ce 100644 --- a/recipes/DVoice/ASR/CTC/hparams/train_amh_with_wav2vec.yaml +++ b/recipes/DVoice/ASR/CTC/hparams/train_amh_with_wav2vec.yaml @@ -6,14 +6,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1249 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_AMHARIC/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for the wav2vec2 xlsr wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: #!PLACEHOLDER # e.g, /PATH_TO/ALFFA_PUBLIC/ASR/AMHARIC/data @@ -31,12 +32,12 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 15.0 -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min @@ -57,7 +58,7 @@ test_dataloader_options: token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False @@ -77,37 +78,68 @@ eos_index: 2 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn1: !name:speechbrain.nnet.normalization.BatchNorm1d activation: !new:torch.nn.LeakyReLU drop: !new:torch.nn.Dropout p: 0.15 linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn2: !name:speechbrain.nnet.normalization.BatchNorm1d activation2: !new:torch.nn.LeakyReLU drop2: !new:torch.nn.Dropout p: 0.15 linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/DVoice/ASR/CTC/hparams/train_dar_with_wav2vec.yaml b/recipes/DVoice/ASR/CTC/hparams/train_dar_with_wav2vec.yaml index 795b7879fe..577d86cfb8 100644 --- a/recipes/DVoice/ASR/CTC/hparams/train_dar_with_wav2vec.yaml +++ b/recipes/DVoice/ASR/CTC/hparams/train_dar_with_wav2vec.yaml @@ -6,14 +6,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_DAR/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for the xlsr wav2vec2 wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: #!PLACEHOLDER # e.g, /dataset/ @@ -31,12 +32,12 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 15.0 -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min @@ -57,7 +58,7 @@ test_dataloader_options: token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False @@ -77,37 +78,69 @@ eos_index: 2 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn1: !name:speechbrain.nnet.normalization.BatchNorm1d activation: !new:torch.nn.LeakyReLU drop: !new:torch.nn.Dropout p: 0.15 linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn2: !name:speechbrain.nnet.normalization.BatchNorm1d activation2: !new:torch.nn.LeakyReLU drop2: !new:torch.nn.Dropout p: 0.15 linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/DVoice/ASR/CTC/hparams/train_fon_with_wav2vec.yaml b/recipes/DVoice/ASR/CTC/hparams/train_fon_with_wav2vec.yaml index b384dd0f8e..451286cbdd 100644 --- a/recipes/DVoice/ASR/CTC/hparams/train_fon_with_wav2vec.yaml +++ b/recipes/DVoice/ASR/CTC/hparams/train_fon_with_wav2vec.yaml @@ -6,14 +6,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1250 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_FONGBE/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # Url for xlsr wav2vec2 wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: #!PLACEHOLDER # e.g, /PATH_TO/ALFFA_PUBLIC/ASR/FONGBE/data @@ -31,12 +32,12 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 15.0 -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min @@ -57,7 +58,7 @@ test_dataloader_options: token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False @@ -77,37 +78,69 @@ eos_index: 2 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn1: !name:speechbrain.nnet.normalization.BatchNorm1d activation: !new:torch.nn.LeakyReLU drop: !new:torch.nn.Dropout p: 0.15 linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn2: !name:speechbrain.nnet.normalization.BatchNorm1d activation2: !new:torch.nn.LeakyReLU drop2: !new:torch.nn.Dropout p: 0.15 linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/DVoice/ASR/CTC/hparams/train_multi_with_wav2vec.yaml b/recipes/DVoice/ASR/CTC/hparams/train_multi_with_wav2vec.yaml index cea01034c7..9c5366d039 100644 --- a/recipes/DVoice/ASR/CTC/hparams/train_multi_with_wav2vec.yaml +++ b/recipes/DVoice/ASR/CTC/hparams/train_multi_with_wav2vec.yaml @@ -6,13 +6,14 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1247 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_MULTI/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: #!PLACEHOLDER # e.g, /dataset/ @@ -30,12 +31,12 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 15.0 -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min @@ -56,7 +57,7 @@ test_dataloader_options: token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False @@ -76,37 +77,69 @@ eos_index: 2 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn1: !name:speechbrain.nnet.normalization.BatchNorm1d activation: !new:torch.nn.LeakyReLU drop: !new:torch.nn.Dropout p: 0.15 linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn2: !name:speechbrain.nnet.normalization.BatchNorm1d activation2: !new:torch.nn.LeakyReLU drop2: !new:torch.nn.Dropout p: 0.15 linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/DVoice/ASR/CTC/hparams/train_sw_with_wav2vec.yaml b/recipes/DVoice/ASR/CTC/hparams/train_sw_with_wav2vec.yaml index e7f88a9cc1..75a8bb485b 100644 --- a/recipes/DVoice/ASR/CTC/hparams/train_sw_with_wav2vec.yaml +++ b/recipes/DVoice/ASR/CTC/hparams/train_sw_with_wav2vec.yaml @@ -6,14 +6,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_SW/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for xlsr wav2vec2 wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: #!PLACEHOLDER # e.g, /dataset/ @@ -31,12 +32,12 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 15.0 -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min @@ -57,7 +58,7 @@ test_dataloader_options: token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False @@ -77,37 +78,69 @@ eos_index: 2 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn1: !name:speechbrain.nnet.normalization.BatchNorm1d activation: !new:torch.nn.LeakyReLU drop: !new:torch.nn.Dropout p: 0.15 linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn2: !name:speechbrain.nnet.normalization.BatchNorm1d activation2: !new:torch.nn.LeakyReLU drop2: !new:torch.nn.Dropout p: 0.15 linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/DVoice/ASR/CTC/hparams/train_wol_with_wav2vec.yaml b/recipes/DVoice/ASR/CTC/hparams/train_wol_with_wav2vec.yaml index 755c042234..53771c4e88 100644 --- a/recipes/DVoice/ASR/CTC/hparams/train_wol_with_wav2vec.yaml +++ b/recipes/DVoice/ASR/CTC/hparams/train_wol_with_wav2vec.yaml @@ -6,14 +6,15 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1249 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/wav2vec2_ctc_WOLOF/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # URL for xlsr wav2vec2 wav2vec2_hub: facebook/wav2vec2-large-xlsr-53 +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files data_folder: #!PLACEHOLDER # e.g, /PATH_TO/ALFFA_PUBLIC/ASR/WOLOF/data @@ -31,12 +32,12 @@ skip_prep: False # Skip data preparation # longer sentences certainly correspond to "open microphones". avoid_if_longer_than: 15.0 -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 30 lr: 1.0 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 ckpt_interval_minutes: 30 # save checkpoint every N min @@ -57,7 +58,7 @@ test_dataloader_options: token_type: char # ["unigram", "bpe", "char"] character_coverage: 1.0 -# Model parameters +####################### Model Parameters ####################################### wav2vec_output_dim: 1024 dnn_neurons: 1024 freeze_wav2vec: False @@ -77,37 +78,68 @@ eos_index: 2 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn1: !name:speechbrain.nnet.normalization.BatchNorm1d activation: !new:torch.nn.LeakyReLU drop: !new:torch.nn.Dropout p: 0.15 linear2: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn2: !name:speechbrain.nnet.normalization.BatchNorm1d activation2: !new:torch.nn.LeakyReLU drop2: !new:torch.nn.Dropout p: 0.15 linear3: !name:speechbrain.nnet.linear.Linear - n_neurons: 1024 + n_neurons: !ref bias: True bn3: !name:speechbrain.nnet.normalization.BatchNorm1d activation3: !new:torch.nn.LeakyReLU -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead diff --git a/recipes/DVoice/ASR/CTC/train_with_wav2vec2.py b/recipes/DVoice/ASR/CTC/train_with_wav2vec2.py index 2eedd55ad7..2ee00cf25d 100644 --- a/recipes/DVoice/ASR/CTC/train_with_wav2vec2.py +++ b/recipes/DVoice/ASR/CTC/train_with_wav2vec2.py @@ -1,14 +1,4 @@ #!/usr/bin/env python3 -import sys -import torch -import logging -import speechbrain as sb -import torchaudio -from hyperpyyaml import load_hyperpyyaml -from speechbrain.tokenizers.SentencePiece import SentencePiece -from speechbrain.utils.data_utils import undo_padding -from speechbrain.utils.distributed import run_on_main - """Recipe for training a sequence-to-sequence ASR system with DVoice. The system employs a wav2vec2 encoder and a CTC decoder. Decoding is performed with greedy decoding (will be extended to beam search). @@ -17,7 +7,7 @@ > python train_with_wav2vec2.py hparams/train_sw_with_wav2vec.yaml --data_folder=/path_to_dataset/swahili With the default hyperparameters, the system employs a pretrained wav2vec2 encoder. -The wav2vec2 model is pretrained following the model given in the hprams file. +The wav2vec2 model is pretrained following the model given in the hparams file. It may be dependent on the language. The neural network is trained with CTC on sub-word units estimated with @@ -33,7 +23,20 @@ * Naira Abdou Mohamed 2022 """ -logger = logging.getLogger(__name__) +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -43,15 +46,14 @@ def compute_forward(self, batch, stage): batch = batch.to(self.device) wavs, wav_lens = batch.sig - tokens_bos, _ = batch.tokens_bos wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) # Forward pass - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) x = self.modules.enc(feats) logits = self.modules.ctc_lin(x) p_ctc = self.hparams.log_softmax(logits) @@ -64,9 +66,13 @@ def compute_objectives(self, predictions, batch, stage): p_ctc, wav_lens = predictions ids = batch.id - tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + tokens_lens = self.hparams.wav_augment.replicate_labels(tokens_lens) + loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) if stage != sb.Stage.TRAIN: @@ -86,53 +92,6 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - if self.auto_mix_prec: - - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - - with torch.cuda.amp.autocast(): - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - - self.scaler.scale(loss).backward() - if not self.hparams.wav2vec2.freeze: - self.scaler.unscale_(self.wav2vec_optimizer) - self.scaler.unscale_(self.model_optimizer) - - if self.check_gradients(loss): - if not self.hparams.wav2vec2.freeze: - self.scaler.step(self.wav2vec_optimizer) - self.scaler.step(self.model_optimizer) - - self.scaler.update() - else: - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - loss.backward() - - if self.check_gradients(loss): - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.step() - self.model_optimizer.step() - - if not self.hparams.wav2vec2.freeze: - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -174,19 +133,27 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def init_optimizers(self): "Initializes the wav2vec2 optimizer and model optimizer" + self.model_optimizer = self.hparams.model_opt_class( + self.hparams.model.parameters() + ) + # If the wav2vec encoder is unfrozen, we create the optimizer if not self.hparams.wav2vec2.freeze: self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( @@ -196,25 +163,39 @@ def init_optimizers(self): self.checkpointer.add_recoverable( "wav2vec_opt", self.wav2vec_optimizer ) - - self.model_optimizer = self.hparams.model_opt_class( - self.hparams.model.parameters() - ) + self.optimizers_dict = { + "wav2vec_optimizer": self.wav2vec_optimizer, + "model_optimizer": self.model_optimizer, + } + else: + self.optimizers_dict = {"model_optimizer": self.model_optimizer} if self.checkpointer is not None: self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if not self.hparams.wav2vec2.freeze: + valid_optimizers["wav2vec_optimizer"] = optimizers[ + "wav2vec_optimizer" + ] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers + # Define custom data procedure def dataio_prepare(hparams, tokenizer): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # 1. Define datasets data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -244,13 +225,15 @@ def dataio_prepare(hparams, tokenizer): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, ) # We also sort the validation data so it is faster to validate @@ -262,10 +245,11 @@ def dataio_prepare(hparams, tokenizer): @sb.utils.data_pipeline.takes("wav") @sb.utils.data_pipeline.provides("sig") def audio_pipeline(wav): - info = torchaudio.info(wav) + info = audio_io.info(wav) sig = sb.dataio.dataio.read_audio(wav) resampled = torchaudio.transforms.Resample( - info.sample_rate, hparams["sample_rate"], + info.sample_rate, + hparams["sample_rate"], )(sig) return resampled @@ -290,19 +274,18 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], ) return train_data, valid_data, test_data if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -365,7 +348,6 @@ def text_pipeline(wrd): ) # Test - asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt" asr_brain.evaluate( test_data, min_key="WER", diff --git a/recipes/DVoice/dvoice_prepare.py b/recipes/DVoice/dvoice_prepare.py index 2cff2f2f69..6003e3a9cd 100644 --- a/recipes/DVoice/dvoice_prepare.py +++ b/recipes/DVoice/dvoice_prepare.py @@ -1,26 +1,28 @@ """ Data preparation. -Download: https://dvoice.ma/ +Download: https://zenodo.org/record/5482551 Author ------ Abdou Mohamed Naira 2022 """ -import os import csv +import glob +import os +import random import re -import logging -import torchaudio import unicodedata -from tqdm.contrib import tzip -import random + +import numpy as np import pandas as pd from tqdm import tqdm -import numpy as np -import glob +from tqdm.contrib import tzip + +from speechbrain.dataio.dataio import read_audio_info +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) def prepare_dvoice( @@ -33,7 +35,6 @@ def prepare_dvoice( language="fongbe", skip_prep=False, ): - if skip_prep: return @@ -107,14 +108,13 @@ def prepare_dvoice( dev.to_csv(f"{data_folder}/dev.csv", index=False, sep="\t") test.to_csv(f"{data_folder}/test.csv", index=False, sep="\t") - # Setting ouput files + # Setting output files save_csv_train = save_folder + "/train.csv" save_csv_dev = save_folder + "/dev.csv" save_csv_test = save_folder + "/test.csv" # If csv already exists, we skip the data preparation if skip(save_csv_train, save_csv_dev, save_csv_test): - msg = "%s already exists, skipping data preparation!" % (save_csv_train) logger.info(msg) @@ -131,7 +131,6 @@ def prepare_dvoice( # Creating csv file for training data if train_csv_file is not None: - create_csv( train_csv_file, save_csv_train, @@ -142,14 +141,16 @@ def prepare_dvoice( # Creating csv file for dev data if dev_csv_file is not None: - create_csv( - dev_csv_file, save_csv_dev, data_folder, accented_letters, language, + dev_csv_file, + save_csv_dev, + data_folder, + accented_letters, + language, ) # Creating csv file for test data if test_csv_file is not None: - create_csv( test_csv_file, save_csv_test, @@ -162,15 +163,15 @@ def prepare_dvoice( def alffa_public_prepare(language, data_folder): if language == "amharic": wavs = glob.glob(f"{data_folder}/*/*/*.wav") - f_train = open(f"{data_folder}/train/text", "r") - f_test = open(f"{data_folder}/test/text", "r") + f_train = open(f"{data_folder}/train/text", encoding="utf-8") + f_test = open(f"{data_folder}/test/text", encoding="utf-8") text = f_train.readlines() + f_test.readlines() random.shuffle(text) if language == "fongbe": wavs = glob.glob(f"{data_folder}/*/wav/*/*.wav") - f_train = open(f"{data_folder}/train/text", "r") - f_test = open(f"{data_folder}/test/text", "r") + f_train = open(f"{data_folder}/train/text", encoding="utf-8") + f_test = open(f"{data_folder}/test/text", encoding="utf-8") text = f_train.readlines() + f_test.readlines() random.shuffle(text) @@ -179,9 +180,9 @@ def alffa_public_prepare(language, data_folder): wavs_dev = glob.glob(f"{data_folder}/dev/wav/*/*.wav") wavs_test = glob.glob(f"{data_folder}/test/wav/*/*.wav") wavs = wavs_train + wavs_dev + wavs_test - f_train = open(f"{data_folder}/train/text", "r") - f_test = open(f"{data_folder}/test/text", "r") - f_dev = open(f"{data_folder}/dev/text", "r") + f_train = open(f"{data_folder}/train/text", encoding="utf-8") + f_test = open(f"{data_folder}/test/text", encoding="utf-8") + f_dev = open(f"{data_folder}/dev/text", encoding="utf-8") text = f_train.readlines() + f_dev.readlines() + f_test.readlines() random.shuffle(text) @@ -195,7 +196,7 @@ def alffa_public_prepare(language, data_folder): for j in range(len(wavs)): if wavs[j].split("/")[-1] == file_name + ".wav": wav = wavs[j] - info = torchaudio.info(wav) + info = read_audio_info(wav) duration = info.num_frames / info.sample_rate dic = { "wav": wavs[j].replace(data_folder + "/", ""), @@ -231,10 +232,12 @@ def swahili_prepare(data_folder): ) f_train_alffa = open( - f"{data_folder}/ALFFA_PUBLIC/ASR/SWAHILI/data/train/text", "r" + f"{data_folder}/ALFFA_PUBLIC/ASR/SWAHILI/data/train/text", + encoding="utf-8", ) f_test_alffa = open( - f"{data_folder}/ALFFA_PUBLIC/ASR/SWAHILI/data/test/text", "r" + f"{data_folder}/ALFFA_PUBLIC/ASR/SWAHILI/data/test/text", + encoding="utf-8", ) train_alffa = f_train_alffa.readlines() test_alffa = f_test_alffa.readlines() @@ -254,7 +257,7 @@ def swahili_prepare(data_folder): for j in range(len(wavs_alffa)): if wavs_alffa[j].split("/")[-1] == file_name + ".wav": wav = wavs_alffa[j] - info = torchaudio.info(wav) + info = read_audio_info(wav) duration = info.num_frames / info.sample_rate dic = { "wav": wavs_alffa[j].replace(data_folder + "/", ""), @@ -288,6 +291,16 @@ def skip(save_csv_train, save_csv_dev, save_csv_test): """ Detects if the DVoice data preparation has been already done. If the preparation has been done, we can skip it. + + Arguments + --------- + save_csv_train : str + Path to the train csv + save_csv_dev : str + Path to the dev csv + save_csv_test : str + Path to the test csv + Returns ------- bool @@ -317,18 +330,20 @@ def create_csv( ): """ Creates the csv file given a list of wav files. + Arguments --------- orig_csv_file : str Path to the DVoice csv file (standard file). + csv_file : str + Path to the new DVoice csv file. data_folder : str Path of the DVoice dataset. accented_letters : bool, optional Defines if accented letters will be kept as individual letters or transformed to the closest non-accented letters. - Returns - ------- - None + language : str + Language to prepare. """ # Check if the given files exists @@ -338,7 +353,7 @@ def create_csv( raise FileNotFoundError(msg) # We load and skip the header - loaded_csv = open(orig_csv_file, "r").readlines()[1:] + loaded_csv = open(orig_csv_file, encoding="utf-8").readlines()[1:] nb_samples = str(len(loaded_csv)) msg = "Preparing CSV files for %s samples ..." % (str(nb_samples)) logger.info(msg) @@ -371,12 +386,6 @@ def create_csv( spk_id = line.split("\t")[0].replace(".wav", "") snt_id = os.path.basename(file_name) - # Setting torchaudio backend to sox-io (needed to read mp3 files) - if torchaudio.get_audio_backend() != "sox_io": - logger.warning("This recipe needs the sox-io backend of torchaudio") - logger.warning("The torchaudio backend is changed to sox_io") - torchaudio.set_audio_backend("sox_io") - duration = float(line.split("\t")[2]) total_duration += duration @@ -395,7 +404,7 @@ def create_csv( ALEF_MADDA = "\u0622" ALEF_HAMZA_ABOVE = "\u0623" letters = ( - "ابتةثجحخدذرزسشصضطظعغفقكلمنهويءآأؤإئ" + "ابتةثجحخدذرزسشصضطظعغفقكلمنهويءآأؤإئ" # cspell:disable-line + HAMZA + ALEF_MADDA + ALEF_HAMZA_ABOVE @@ -450,9 +459,14 @@ def check_dvoice_folders(data_folder, language): """ Check if the data folder actually contains the DVoice dataset. If not, raises an error. - Returns - ------- - None + + Arguments + --------- + data_folder : str + Path to directory with data. + language : str + The language to check. + Raises ------ FileNotFoundError @@ -471,7 +485,6 @@ def check_dvoice_folders(data_folder, language): # Checking clips if not os.path.exists(data_folder + files_str): - err_msg = ( "the folder %s does not exist (it is expected in " "the DVoice dataset)" % (data_folder + files_str) @@ -480,7 +493,6 @@ def check_dvoice_folders(data_folder, language): def unicode_normalisation(text): - try: text = unicode(text, "utf-8") except NameError: # unicode is a default on python 3 @@ -489,7 +501,6 @@ def unicode_normalisation(text): def strip_accents(text): - text = ( unicodedata.normalize("NFD", text) .encode("ascii", "ignore") diff --git a/recipes/ESC50/classification/README.md b/recipes/ESC50/classification/README.md new file mode 100644 index 0000000000..4de0ea48c6 --- /dev/null +++ b/recipes/ESC50/classification/README.md @@ -0,0 +1,166 @@ +# Sound Classification - ESC50 Dataset + +This recipe trains a classifier for the ESC50 multiclass sound classification dataset. + +The task involves classifying audio sounds into 50 different categories. These categories are divided into the following groups: + +- Animals +- Natural soundscapes and water sounds +- Human, non-speech sounds +- Interior/domestic sounds +- Exterior/urban noises + +The scripts offer the possibility to train both with log-spectra and log-mel audio features. + +## Dataset Download + +The ESC50 dataset will be automatically downloaded when running the recipe. If you prefer to download it manually, please visit: [https://github.com/karolpiczak/ESC-50](https://github.com/karolpiczak/ESC-50) + + +--------------------------------------------------------------------------------------------------------- + +## Installing Extra Dependencies + +Before proceeding, make sure you have installed the necessary additional dependencies. + +To do this, simply run the following command in your terminal: + +```shell +pip install -r extra_requirements.txt +``` + +--------------------------------------------------------------------------------------------------------- + +## Supported Models + +### CNN14 + +This script trains a [CNN14 model](https://arxiv.org/abs/1912.10211) on the ESC50 dataset. To run this, you can use the command: + +```shell +python train.py hparams/cnn14.yaml --data_folder /yourpath/ESC50 +``` + +The dataset will be automatically download at the specified data folder. + +--------------------------------------------------------------------------------------------------------- + +### Conv2D + +This script trains a simple convolutional model on the ESC50 dataset. To run this, you can use the command: + +```shell +python train.py hparams/conv2d.yaml --data_folder /yourpath/ESC50 +```` + +--------------------------------------------------------------------------------------------------------- + +### FocalNet + +This script trains a FocalNet model on the ESC50 dataset. To run this, you can use the command: + +```shell +python train.py hparams/focalnet.yaml --data_folder /yourpath/ESC50 +``` + +--------------------------------------------------------------------------------------------------------- + +### ViT + +This script trains a ViT model on the ESC50 dataset. To run this, you can use the command: + +```shell +python train.py hparams/vit.yaml --data_folder /yourpath/ESC50 +``` + +--------------------------------------------------------------------------------------------------------- + +### To train with WHAM! noise + +In order to train the classifier with WHAM! noise, you need to download the WHAM! noise dataset from [here](http://wham.whisper.ai/). +Then, you can train your classifier with the following command: + +```shell +python train.py hparams/modelofchoice.yaml --data_folder /yourpath/ESC50 --add_wham_noise True --wham_folder /yourpath/wham_noise +``` + + +## Results + +| Hyperparams file | Accuracy (%) | Training time | HuggingFace link | Model link | GPUs | +|:----------------:|:------------:|:------------------:|:---------------------------------------------------------------:|:--------------------------------------------------------------------------------------------------------------------------:|:-----------:| +| cnn14.yaml | 82.0 | 11 seconds / epoch | [model](https://huggingface.co/speechbrain/cnn14-esc50) | [model](https://www.dropbox.com/sh/fbe7l14o3n8f5rw/AACABE1BQGBbX4j6A1dIhBcSa?dl=0) | RTX 3090 | +| conv2d.yaml | 75.0 | 15 seconds / epoch | [model](https://huggingface.co/speechbrain/PIQ-ESC50) | [model](https://www.dropbox.com/sh/tl2pbfkreov3z7e/AADwwhxBLw1sKvlSWzp6DMEia?dl=0) | RTX 3090 | +| focalnet.yaml | 77.4 | 60 seconds / epoch | [model](https://huggingface.co/speechbrain/focalnet-base-esc50) | [model](https://www.dropbox.com/scl/fo/zk101h5xypgi56d777yp5/AGVIfoe56OWInxWf6F57JyQ?rlkey=hmme5c8rnu2sok3jnwbanw7eq&dl=0) | 1xV100 32GB | +| vit.yaml | 73.6 | 56 seconds / epoch | [model](https://huggingface.co/speechbrain/vit-base-esc50) | [model](https://www.dropbox.com/scl/fo/af59l6mtm0ytqyhz3l7ib/ADGklBYXxil1DWKv5CSMDGk?rlkey=wk5tdh0h26f61e1tn3bh80vys&dl=0) | 1xV100 32GB | + +--------------------------------------------------------------------------------------------------------- + +## How to Run on Test Sets Only + +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: + +```shell +python train.py hparams/.yaml --data_folder /yourpath/ESC50 --test_only +``` + +--------------------------------------------------------------------------------------------------------- + +## Notes + +- The recipe automatically downloads the ESC50 dataset. You only need to specify the path to which you would like to download it. + +- All the necessary models are downloaded automatically for each training script. + +--------------------------------------------------------------------------------------------------------- + +## Citing + +If you find this recipe useful, please cite: + +```bibtex +@article{Wang_2022, + doi = {10.1109/lsp.2022.3229643}, + url = {https://doi.org/10.1109%2Flsp.2022.3229643}, + year = 2022, + publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, + volume = {29}, + pages = {2607--2611}, + author = {Zhepei Wang and Cem Subakan and Xilin Jiang and Junkai Wu and Efthymios Tzinis and Mirco Ravanelli and Paris Smaragdis}, + title = {Learning Representations for New Sound Classes With Continual Self-Supervised Learning}, + journal = {{IEEE} Signal Processing Letters} +} +``` + +```bibtex +@inproceedings{dellalibera2024focal, + title={Focal Modulation Networks for Interpretable Sound Classification}, + author={Luca Della Libera and Cem Subakan and Mirco Ravanelli}, + booktitle={IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) XAI-SA Workshop}, + year={2024}, +} +``` + +If you use **SpeechBrain**, please cite: + +```bibtex +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` + +--------------------------------------------------------------------------------------------------------- + +## About SpeechBrain + +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +--------------------------------------------------------------------------------------------------------- diff --git a/recipes/ESC50/classification/confusion_matrix_fig.py b/recipes/ESC50/classification/confusion_matrix_fig.py new file mode 100644 index 0000000000..1a92c8b89d --- /dev/null +++ b/recipes/ESC50/classification/confusion_matrix_fig.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +"""Helper to create Confusion Matrix figure + +Authors + * David Whipps 2021 + * Ala Eddine Limame 2021 +""" + +import itertools + +import matplotlib.pyplot as plt +import numpy as np + + +def create_cm_fig(cm, display_labels): + """Creates confusion matrix plot. + + Arguments + --------- + cm : np.ndarray + Confusion matrix. + display_labels : list + Class labels to display. + + Returns + ------- + Confusion matrix figure : matplotlib.figure.Figure + """ + + fig = plt.figure(figsize=cm.shape, dpi=50, facecolor="w", edgecolor="k") + ax = fig.add_subplot(1, 1, 1) + + ax.imshow(cm, cmap="Oranges") # fits with the tensorboard colour scheme + + tick_marks = np.arange(cm.shape[0]) + + ax.set_xlabel("Predicted class", fontsize=18) + ax.set_xticks(tick_marks) + ax.set_xticklabels(display_labels, ha="center", fontsize=18, rotation=90) + ax.xaxis.set_label_position("bottom") + ax.xaxis.tick_bottom() + + ax.set_ylabel("True class", fontsize=18) + ax.set_yticks(tick_marks) + ax.set_yticklabels(display_labels, va="center", fontsize=18) + ax.yaxis.set_label_position("left") + ax.yaxis.tick_left() + + fmt = "d" # TODO use '.3f' if normalized + thresh = cm.max() / 2.0 + for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): + ax.text( + j, + i, + format(cm[i, j], fmt), + horizontalalignment="center", + verticalalignment="center", + color="white" if cm[i, j] > thresh else "black", + fontsize=18, + ) + + fig.set_tight_layout(True) + + return fig diff --git a/recipes/ESC50/classification/esc50_prepare.py b/recipes/ESC50/classification/esc50_prepare.py new file mode 120000 index 0000000000..c52175c3dc --- /dev/null +++ b/recipes/ESC50/classification/esc50_prepare.py @@ -0,0 +1 @@ +../esc50_prepare.py \ No newline at end of file diff --git a/recipes/UrbanSound8k/SoundClassification/extra_dependencies.txt b/recipes/ESC50/classification/extra_requirements.txt similarity index 50% rename from recipes/UrbanSound8k/SoundClassification/extra_dependencies.txt rename to recipes/ESC50/classification/extra_requirements.txt index 773f5ae6c8..bfc0717a91 100644 --- a/recipes/UrbanSound8k/SoundClassification/extra_dependencies.txt +++ b/recipes/ESC50/classification/extra_requirements.txt @@ -1,4 +1,6 @@ -scikit-learn matplotlib pandas -tensorboard \ No newline at end of file +scikit-learn +torchvision +transformers +wget diff --git a/recipes/ESC50/classification/hparams/cnn14.yaml b/recipes/ESC50/classification/hparams/cnn14.yaml new file mode 100644 index 0000000000..d84366aa42 --- /dev/null +++ b/recipes/ESC50/classification/hparams/cnn14.yaml @@ -0,0 +1,165 @@ +# ################################# +# Basic training parameters for sound classification using the ESC50 dataset. +# This recipe uses the ecapa-tdnn backbone for classification. +# +# Authors: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023, 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: !ref cnn14-esc50 +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +add_wham_noise: False +test_only: False + +wham_folder: null # Set it if add_wham_noise is True. +wham_audio_folder: !ref /tr + + +sample_rate: 16000 +signal_length_s: 5 + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 200 +batch_size: 32 +lr: 0.0002 +base_lr: 0.00000001 +max_lr: !ref +step_size: 65000 + + +# Feature parameters +n_mels: 80 +left_frames: 0 +right_frames: 0 +deltas: False + +use_melspectra: True +use_log1p_mel: True + +# Number of classes +out_n_neurons: 50 + +# Note that it's actually important to shuffle the data here +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +# Functions +compute_features: !new:speechbrain.lobes.features.Fbank + n_mels: !ref + left_frames: !ref + right_frames: !ref + deltas: !ref + sample_rate: !ref + n_fft: 1024 + win_length: 20 + hop_length: 10 + +embedding_model: !new:speechbrain.lobes.models.Cnn14.Cnn14 + mel_bins: !ref + emb_dim: 2048 + +classifier: !new:torch.nn.Linear + in_features: 2048 + out_features: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +# pre-processing +n_fft: 1024 +spec_mag_power: 0.5 +hop_length: 11.6099 +win_length: 23.2199 +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_fbank: !new:speechbrain.processing.features.Filterbank + n_mels: 80 + n_fft: !ref + sample_rate: !ref + log_mel: False + +modules: + compute_stft: !ref + compute_fbank: !ref + compute_features: !ref + embedding_model: !ref + classifier: !ref + +compute_cost: !new:speechbrain.nnet.losses.LogSoftmaxWrapper + loss_fn: !new:speechbrain.nnet.losses.AdditiveAngularMargin + margin: 0.2 + scale: 30 + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.CyclicLRScheduler + base_lr: !ref + max_lr: !ref + step_size: !ref + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_stats: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.classification_error + reduction: batch + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + embedding_model: !ref + classifier: !ref + counter: !ref + +use_pretrained: True +# If you do not want to use the pretrained encoder +# you can simply delete pretrained_encoder field, +# or set use_pretrained=False +embedding_model_path: speechbrain/cnn14-esc50/embedding_model.ckpt +pretrained_encoder: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + paths: + embedding_model: !ref diff --git a/recipes/ESC50/classification/hparams/conv2d.yaml b/recipes/ESC50/classification/hparams/conv2d.yaml new file mode 100644 index 0000000000..c410cdfc0f --- /dev/null +++ b/recipes/ESC50/classification/hparams/conv2d.yaml @@ -0,0 +1,155 @@ +# ################################# +# Basic training parameters for sound classification using the ESC50 dataset. +# This recipe uses a conv2d backbone for classification. +# +# Authors: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +wham_folder: null # Set it if add_wham_noise is True +wham_audio_folder: !ref /tr + +experiment_name: conv2dv2_classifier-16k +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +test_only: False + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 200 +batch_size: 32 +lr: 0.00002 +base_lr: 0.000002 +max_lr: !ref +step_size: 65000 +sample_rate: 16000 +signal_length_s: 5 + +add_wham_noise: False + + +# Feature parameters +n_mels: 80 + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +use_pretrained: True +use_melspectra: False +use_log1p_mel: False +embedding_model: !new:speechbrain.lobes.models.PIQ.Conv2dEncoder_v2 + dim: 256 + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 256 + out_neurons: !ref + lin_blocks: 1 + + #classifier: !new:torch.nn.Linear + #in_features: 256 + #out_features: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +mean_var_norm: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + std_norm: False + +# pre-processing +n_fft: 1024 +spec_mag_power: 0.5 +hop_length: 11.6099 +win_length: 23.2199 +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_fbank: !new:speechbrain.processing.features.Filterbank + n_mels: !ref + n_fft: !ref + sample_rate: !ref + +modules: + compute_stft: !ref + compute_fbank: !ref + embedding_model: !ref + classifier: !ref + mean_var_norm: !ref + +compute_cost: !new:speechbrain.nnet.losses.LogSoftmaxWrapper + loss_fn: !new:speechbrain.nnet.losses.AdditiveAngularMargin + margin: 0.2 + scale: 30 + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.CyclicLRScheduler + base_lr: !ref + max_lr: !ref + step_size: !ref + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_stats: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.classification_error + reduction: batch + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + embedding_model: !ref + classifier: !ref + normalizer: !ref + counter: !ref + + +# If you do not want to use the pretrained separator you can simply delete pretrained_separator field. +embedding_model_path: "speechbrain/PIQ-ESC50/embedding_model.ckpt" +pretrained_encoder: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + paths: + embedding_model: !ref diff --git a/recipes/ESC50/classification/hparams/focalnet.yaml b/recipes/ESC50/classification/hparams/focalnet.yaml new file mode 100644 index 0000000000..2cff8f8bd3 --- /dev/null +++ b/recipes/ESC50/classification/hparams/focalnet.yaml @@ -0,0 +1,152 @@ +# ################################# +# Basic training parameters for sound classification using the ESC50 dataset. +# This recipe uses a FocalNet backbone for classification. +# +# Authors: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# * Luca Della Libera 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: focalnet-base-esc50 +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +add_wham_noise: False +test_only: False + +wham_folder: null # Set it if add_wham_noise is True +wham_audio_folder: !ref /tr + +use_melspectra: False +use_log1p_mel: False + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 100 +batch_size: 16 +lr: 0.0002 +base_lr: 0.00000001 +max_lr: !ref +step_size: 65000 +sample_rate: 16000 + +signal_length_s: 5 + +# Number of classes +out_n_neurons: 50 + +# Note that it's actually important to shuffle the data here +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +# Augmentation +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 # Min frequency band dropout probability + drop_freq_high: 1 # Max frequency band dropout probability + drop_freq_count_low: 1 # Min number of frequency bands to drop + drop_freq_count_high: 3 # Max number of frequency bands to drop + drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1 # Min number of audio chunks to drop + drop_length_high: 5 # Max number of audio chunks to drop + drop_count_low: 1000 # Min length of audio chunks to drop + drop_count_high: 2000 # Max length of audio chunks to drop + +augmentation: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 2 + max_augmentations: 2 + augment_prob: 0.75 + augmentations: [!ref , !ref ] + +# Model +embedding_model: !apply:transformers.FocalNetBackbone.from_pretrained [microsoft/focalnet-base] + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 1024 + out_neurons: !ref + lin_blocks: 1 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +# Pre-processing +n_fft: 1024 +spec_mag_power: 0.5 +hop_length: 11.6099 +win_length: 23.2199 + +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +modules: + compute_stft: !ref + embedding_model: !ref + classifier: !ref + +compute_cost: !new:speechbrain.nnet.losses.LogSoftmaxWrapper + loss_fn: !new:speechbrain.nnet.losses.AdditiveAngularMargin + margin: 0.2 + scale: 30 + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.CyclicLRScheduler + base_lr: !ref + max_lr: !ref + step_size: !ref + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_stats: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.classification_error + reduction: batch + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + embedding_model: !ref + classifier: !ref + counter: !ref diff --git a/recipes/ESC50/classification/hparams/vit.yaml b/recipes/ESC50/classification/hparams/vit.yaml new file mode 100644 index 0000000000..5e079365af --- /dev/null +++ b/recipes/ESC50/classification/hparams/vit.yaml @@ -0,0 +1,151 @@ +# ################################# +# Basic training parameters for sound classification using the ESC50 dataset. +# This recipe uses a ViT backbone for classification. +# +# Authors: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# * Luca Della Libera 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: vit-base-esc50 +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +add_wham_noise: False +use_melspectra: False +use_log1p_mel: False +test_only: False + +wham_folder: null # Set it if add_wham_noise is True +wham_audio_folder: !ref /tr + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 100 +batch_size: 16 +lr: 0.0002 +base_lr: 0.00000001 +max_lr: !ref +step_size: 65000 + +sample_rate: 16000 +signal_length_s: 5 + +# Number of classes +out_n_neurons: 50 + +# Note that it's actually important to shuffle the data here +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +# Augmentation +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 # Min frequency band dropout probability + drop_freq_high: 1 # Max frequency band dropout probability + drop_freq_count_low: 1 # Min number of frequency bands to drop + drop_freq_count_high: 3 # Max number of frequency bands to drop + drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1 # Min number of audio chunks to drop + drop_length_high: 5 # Max number of audio chunks to drop + drop_count_low: 1000 # Min length of audio chunks to drop + drop_count_high: 2000 # Max length of audio chunks to drop + +augmentation: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 2 + max_augmentations: 2 + augment_prob: 0.75 + augmentations: [!ref , !ref ] + +# Model +embedding_model: !apply:transformers.ViTModel.from_pretrained [google/vit-base-patch16-224] + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 768 + out_neurons: !ref + lin_blocks: 1 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +# Pre-processing +n_fft: 1024 +spec_mag_power: 0.5 +hop_length: 11.6099 +win_length: 23.2199 + +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +modules: + compute_stft: !ref + embedding_model: !ref + classifier: !ref + +compute_cost: !new:speechbrain.nnet.losses.LogSoftmaxWrapper + loss_fn: !new:speechbrain.nnet.losses.AdditiveAngularMargin + margin: 0.2 + scale: 30 + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.CyclicLRScheduler + base_lr: !ref + max_lr: !ref + step_size: !ref + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_stats: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.classification_error + reduction: batch + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + embedding_model: !ref + classifier: !ref + counter: !ref diff --git a/recipes/ESC50/classification/train.py b/recipes/ESC50/classification/train.py new file mode 100755 index 0000000000..0866a3ea80 --- /dev/null +++ b/recipes/ESC50/classification/train.py @@ -0,0 +1,470 @@ +#!/usr/bin/python3 + +"""Recipe to train a classifier on ESC50 data. + +To run this recipe, use the following command: +> python train.py hparams/.yaml --data_folder yourpath/ESC-50-master + +Authors + * Cem Subakan 2022, 2023 + * Francesco Paissan 2022, 2023 + * Luca Della Libera 2024 + +Based on the Urban8k recipe by + * David Whipps 2021 + * Ala Eddine Limame 2021 +""" + +import os +import sys + +import numpy as np +import torch +import torch.nn.functional as F +import torchaudio +import torchvision +from confusion_matrix_fig import create_cm_fig +from esc50_prepare import prepare_esc50 +from hyperpyyaml import load_hyperpyyaml +from sklearn.metrics import confusion_matrix +from wham_prepare import combine_batches, prepare_wham + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.distributed import run_on_main + + +class ESC50Brain(sb.core.Brain): + """Class for classifier training.""" + + def compute_forward(self, batch, stage): + """Computation pipeline based on an encoder + sound classifier.""" + batch = batch.to(self.device) + wavs, lens = batch.sig + + # Augment if specified + if hasattr(self.hparams, "augmentation") and stage == sb.Stage.TRAIN: + wavs, lens = self.hparams.augmentation(wavs, lens) + + # augment batch with WHAM! + if hasattr(self.hparams, "add_wham_noise"): + if self.hparams.add_wham_noise: + wavs = combine_batches(wavs, iter(self.hparams.wham_dataset)) + + X_stft = self.modules.compute_stft(wavs) + net_input = sb.processing.features.spectral_magnitude( + X_stft, power=self.hparams.spec_mag_power + ) + if ( + hasattr(self.hparams, "use_melspectra") + and self.hparams.use_melspectra + ): + net_input = self.modules.compute_fbank(net_input) + + if (not self.hparams.use_melspectra) or self.hparams.use_log1p_mel: + net_input = torch.log1p(net_input) + + # Embeddings + sound classifier + if hasattr(self.modules.embedding_model, "config"): + # Hugging Face model + config = self.modules.embedding_model.config + # Resize to match expected resolution + net_input = torchvision.transforms.functional.resize( + net_input, (config.image_size, config.image_size) + ) + # Expand to have 3 channels + net_input = net_input[:, None, ...].expand(-1, 3, -1, -1) + if config.model_type == "focalnet": + embeddings = self.modules.embedding_model( + net_input + ).feature_maps[-1] + embeddings = embeddings.mean(dim=(-1, -2)) + elif config.model_type == "vit": + embeddings = self.modules.embedding_model( + net_input + ).last_hidden_state.movedim(-1, -2) + embeddings = embeddings.mean(dim=-1) + else: + raise NotImplementedError + else: + # SpeechBrain model + embeddings = self.modules.embedding_model(net_input) + if isinstance(embeddings, tuple): + embeddings, _ = embeddings + + if embeddings.ndim == 4: + embeddings = embeddings.mean((-1, -2)) + + # run through classifier + outputs = self.modules.classifier(embeddings) + + if outputs.ndim == 2: + outputs = outputs.unsqueeze(1) + + return outputs, lens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss using class-id as label.""" + predictions, lens = predictions + uttid = batch.id + classid, _ = batch.class_string_encoded + + # Target augmentation + N_augments = int(predictions.shape[0] / classid.shape[0]) + classid = torch.cat(N_augments * [classid], dim=0) + + # loss = self.hparams.compute_cost(predictions.squeeze(1), classid, lens) + target = F.one_hot( + classid.squeeze(), num_classes=self.hparams.out_n_neurons + ) + loss = ( + -(F.log_softmax(predictions.squeeze(1), 1) * target).sum(1).mean() + ) + + if stage != sb.Stage.TEST: + if hasattr(self.hparams.lr_annealing, "on_batch_end"): + self.hparams.lr_annealing.on_batch_end(self.optimizer) + + # Append this batch of losses to the loss metric + self.loss_metric.append( + uttid, predictions, classid, lens, reduction="batch" + ) + + # Confusion matrices + if stage != sb.Stage.TRAIN: + y_true = classid.cpu().detach().numpy().squeeze(-1) + y_pred = predictions.cpu().detach().numpy().argmax(-1).squeeze(-1) + + if stage == sb.Stage.VALID: + confusion_matix = confusion_matrix( + y_true, + y_pred, + labels=sorted(self.hparams.label_encoder.ind2lab.keys()), + ) + self.valid_confusion_matrix += confusion_matix + if stage == sb.Stage.TEST: + confusion_matix = confusion_matrix( + y_true, + y_pred, + labels=sorted(self.hparams.label_encoder.ind2lab.keys()), + ) + self.test_confusion_matrix += confusion_matix + + # Compute accuracy using MetricStats + self.acc_metric.append( + uttid, predict=predictions, target=classid, lengths=lens + ) + + if stage != sb.Stage.TRAIN: + self.error_metrics.append(uttid, predictions, classid, lens) + + return loss + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch. + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + # Set up statistics trackers for this stage + self.loss_metric = sb.utils.metric_stats.MetricStats( + metric=sb.nnet.losses.nll_loss + ) + + # Compute accuracy using MetricStats + # Define function taking (prediction, target, length) for eval + def accuracy_value(predict, target, lengths): + """Computes accuracy.""" + nbr_correct, nbr_total = sb.utils.Accuracy.Accuracy( + predict, target, lengths + ) + acc = torch.tensor([nbr_correct / nbr_total]) + return acc + + self.acc_metric = sb.utils.metric_stats.MetricStats( + metric=accuracy_value, n_jobs=1 + ) + + # Confusion matrices + if stage == sb.Stage.VALID: + self.valid_confusion_matrix = np.zeros( + shape=(self.hparams.out_n_neurons, self.hparams.out_n_neurons), + dtype=int, + ) + if stage == sb.Stage.TEST: + self.test_confusion_matrix = np.zeros( + shape=(self.hparams.out_n_neurons, self.hparams.out_n_neurons), + dtype=int, + ) + + # Set up evaluation-only statistics trackers + if stage != sb.Stage.TRAIN: + self.error_metrics = self.hparams.error_stats() + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of an epoch. + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + # Compute/store important stats + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + self.train_stats = { + "loss": self.train_loss, + "acc": self.acc_metric.summarize("average"), + } + # Summarize Valid statistics from the stage for record-keeping + elif stage == sb.Stage.VALID: + valid_stats = { + "loss": stage_loss, + "acc": self.acc_metric.summarize("average"), + "error": self.error_metrics.summarize("average"), + } + # Summarize Test statistics from the stage for record-keeping + else: + test_stats = { + "loss": stage_loss, + "acc": self.acc_metric.summarize("average"), + "error": self.error_metrics.summarize("average"), + } + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr, new_lr = self.hparams.lr_annealing(epoch) + sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + + # Tensorboard logging + if self.hparams.use_tensorboard: + self.hparams.tensorboard_train_logger.log_stats( + stats_meta={"Epoch": epoch}, + train_stats=self.train_stats, + valid_stats=valid_stats, + ) + # Log confusion matrix fig to tensorboard + cm_fig = create_cm_fig( + self.valid_confusion_matrix, + display_labels=list( + self.hparams.label_encoder.ind2lab.values() + ), + ) + self.hparams.tensorboard_train_logger.writer.add_figure( + "Validation Confusion Matrix", cm_fig, epoch + ) + + # The train_logger writes a summary to stdout and to the log file + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch, "lr": old_lr}, + train_stats=self.train_stats, + valid_stats=valid_stats, + ) + # Save the current checkpoint and delete previous checkpoints, + self.checkpointer.save_and_keep_only( + meta=valid_stats, min_keys=["error"] + ) + + # We also write statistics about test data to stdout and to the log file + if stage == sb.Stage.TEST: + # Per class accuracy from Test confusion matrix + per_class_acc_arr = np.diag(self.test_confusion_matrix) / np.sum( + self.test_confusion_matrix, axis=1 + ) + per_class_acc_arr_str = "\n" + "\n".join( + f"{class_id}: {class_acc:.3f}" + for class_id, class_acc in enumerate(per_class_acc_arr) + ) + + self.hparams.train_logger.log_stats( + { + "Epoch loaded": self.hparams.epoch_counter.current, + "\n Per Class Accuracy": per_class_acc_arr_str, + "\n Confusion Matrix": f"\n{self.test_confusion_matrix}\n", + }, + test_stats=test_stats, + ) + + +def dataio_prep(hparams): + """Creates the datasets and their data processing pipelines.""" + data_audio_folder = hparams["audio_data_folder"] + config_sample_rate = hparams["sample_rate"] + label_encoder = sb.dataio.encoder.CategoricalEncoder() + hparams["resampler"] = torchaudio.transforms.Resample( + new_freq=config_sample_rate + ) + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load the signal, and pass it and its length to the corruption class. + This is done on the CPU in the `collate_fn`.""" + + wave_file = data_audio_folder + f"/{wav}" + + sig, read_sr = audio_io.load(wave_file) + + # If multi-channels, downmix it to a mono channel + sig = torch.squeeze(sig) + if len(sig.shape) > 1: + sig = torch.mean(sig, dim=0) + + # Convert sample rate to required config_sample_rate + if read_sr != config_sample_rate: + # Re-initialize sampler if source file sample rate changed compared to last file + if read_sr != hparams["resampler"].orig_freq: + hparams["resampler"] = torchaudio.transforms.Resample( + orig_freq=read_sr, new_freq=config_sample_rate + ) + # Resample audio + sig = hparams["resampler"].forward(sig) + + sig = sig.float() + sig = sig / sig.max() + return sig + + # 3. Define label pipeline: + @sb.utils.data_pipeline.takes("class_string") + @sb.utils.data_pipeline.provides("class_string", "class_string_encoded") + def label_pipeline(class_string): + """The label pipeline.""" + yield class_string + class_string_encoded = label_encoder.encode_label_torch(class_string) + yield class_string_encoded + + # Define datasets. We also connect the dataset with the data processing + # functions defined above. + datasets = {} + data_info = { + "train": hparams["train_annotation"], + "valid": hparams["valid_annotation"], + "test": hparams["test_annotation"], + } + for dataset in data_info: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline, label_pipeline], + output_keys=["id", "sig", "class_string_encoded"], + ) + + # Load or compute the label encoder (with multi-GPU DDP support) + # Please, take a look into the lab_enc_file to see the label to index + # mapping. + lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") + label_encoder.load_or_create( + path=lab_enc_file, + from_didatasets=[datasets["train"]], + output_key="class_string", + ) + + return datasets, label_encoder + + +if __name__ == "__main__": + # This flag enables the built-in cuDNN auto-tuner + # torch.backends.cudnn.benchmark = True + + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Tensorboard logging + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs_folder"] + ) + + run_on_main( + prepare_esc50, + kwargs={ + "data_folder": hparams["data_folder"], + "audio_data_folder": hparams["audio_data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "train_fold_nums": hparams["train_fold_nums"], + "valid_fold_nums": hparams["valid_fold_nums"], + "test_fold_nums": hparams["test_fold_nums"], + "skip_manifest_creation": hparams["skip_manifest_creation"], + }, + ) + + # Dataset IO prep: creating Dataset objects and proper encodings for phones + datasets, label_encoder = dataio_prep(hparams) + hparams["label_encoder"] = label_encoder + + if "wham_folder" in hparams: + hparams["wham_dataset"] = prepare_wham( + hparams["wham_folder"], + hparams["add_wham_noise"], + hparams["sample_rate"], + hparams["signal_length_s"], + hparams["wham_audio_folder"], + ) + + if hparams["wham_dataset"] is not None: + assert hparams["signal_length_s"] == 5, "Fix wham sig length!" + + class_labels = list(label_encoder.ind2lab.values()) + print("Class Labels:", class_labels) + + ESC50_brain = ESC50Brain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Load pretrained encoder if it exists in the yaml file + if not hasattr(ESC50_brain.modules, "embedding_model"): + ESC50_brain.hparams.embedding_model.to(ESC50_brain.device) + + if "pretrained_encoder" in hparams and hparams["use_pretrained"]: + run_on_main(hparams["pretrained_encoder"].collect_files) + hparams["pretrained_encoder"].load_collected() + + if not hparams["test_only"]: + ESC50_brain.fit( + epoch_counter=ESC50_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["dataloader_options"], + ) + + # Load the best checkpoint for evaluation + test_stats = ESC50_brain.evaluate( + test_set=datasets["test"], + min_key="error", + progressbar=True, + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/ESC50/classification/wham_prepare.py b/recipes/ESC50/classification/wham_prepare.py new file mode 120000 index 0000000000..96bd210cce --- /dev/null +++ b/recipes/ESC50/classification/wham_prepare.py @@ -0,0 +1 @@ +../wham_prepare.py \ No newline at end of file diff --git a/recipes/ESC50/esc50_prepare.py b/recipes/ESC50/esc50_prepare.py new file mode 100644 index 0000000000..f66307ab44 --- /dev/null +++ b/recipes/ESC50/esc50_prepare.py @@ -0,0 +1,478 @@ +""" +Creates data manifest files for ESC50 +If the data does not exist in the specified --data_folder, we download the data automatically. + +https://github.com/karolpiczak/ESC-50/ + +Authors: + * Cem Subakan 2022, 2023 + * Francesco Paissan 2022, 2023 + + Adapted from the Urbansound8k recipe. +""" + +import json +import os +import shutil + +import torch +import torchaudio + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_data_csv, read_audio +from speechbrain.utils.fetching import LocalStrategy, fetch +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + +ESC50_DOWNLOAD_URL = "https://github.com/karolpiczak/ESC-50/archive/master.zip" +MODIFIED_METADATA_FILE_NAME = "esc50_speechbrain.csv" + +ACCEPTABLE_FOLD_NUMS = [1, 2, 3, 4, 5] + + +def download_esc50(data_path): + """ + This function automatically downloads the ESC50 dataset to the specified data path in the data_path variable + + Arguments + --------- + data_path: str or Path + Directory used to save the dataset. + """ + if not os.path.exists(os.path.join(data_path, "meta")): + print( + f"ESC50 is missing. We are now downloading it. Be patient it's a 600M file. You can check {data_path}/temp_download to see the download progression" + ) + temp_path = os.path.join(data_path, "temp_download") + + # download the data + archive_path = fetch( + "master.zip", + "https://github.com/karolpiczak/ESC-50/archive/", # noqa ignore-url-check + savedir=temp_path, + # URL, so will be fetched directly in the savedir anyway + local_strategy=LocalStrategy.COPY_SKIP_CACHE, + ) + + # unpack the .zip file + shutil.unpack_archive(archive_path, data_path) + + # move the files up to the datapath + files = os.listdir(os.path.join(data_path, "ESC-50-master")) + for fl in files: + shutil.move(os.path.join(data_path, "ESC-50-master", fl), data_path) + + # remove the unused datapath + shutil.rmtree(os.path.join(data_path, "temp_download")) + shutil.rmtree(os.path.join(data_path, "ESC-50-master")) + + print(f"ESC50 is downloaded in {data_path}") + + +def prepare_esc50( + data_folder, + audio_data_folder, + save_json_train, + save_json_valid, + save_json_test, + train_fold_nums=[1, 2, 3], + valid_fold_nums=[4], + test_fold_nums=[5], + skip_manifest_creation=False, +): + """ + Prepares the json files for the ESC50 dataset. + Prompts to download the dataset if it is not found in the `data_folder`. + + Arguments + --------- + data_folder : str + Path to the folder where the ESC50 dataset (including the metadata) is stored. + audio_data_folder: str + Path to the folder where the ESC50 dataset audio files are stored. + save_json_train : str + Path where the train data specification file will be saved. + save_json_valid : str + Path where the validation data specification file will be saved. + save_json_test : str + Path where the test data specification file will be saved. + train_fold_nums : list or int (integers [1,5]) + A list of integers defining which pre-defined "folds" to use for training. Must be + exclusive of valid_folds and test_folds. + valid_fold_nums : list or int (integers [1,5]) + A list of integers defining which pre-defined "folds" to use for validation. Must be + exclusive of train_folds and test_folds. + test_fold_nums : list or int (integers [1,5]) + A list of integers defining which pre-defined "folds" to use for test. Must be + exclusive of train_folds and valid_folds. + skip_manifest_creation : bool + Whether to skip over the manifest creation step. + + Returns + ------- + None + + Example + ------- + >>> data_folder = "/path/to/ESC-50-master" + >>> prepare_urban_sound_8k( + ... data_folder, + ... "train.json", + ... "valid.json", + ... "test.json", + ... [1, 2, 3], + ... [4], + ... [5], + ... ) + """ + download_esc50(data_folder) + + # Tease params to correct type if necessary + if type(train_fold_nums) is int: + train_fold_nums = [train_fold_nums] + if type(valid_fold_nums) is int: + valid_fold_nums = [valid_fold_nums] + if type(test_fold_nums) is int: + test_fold_nums = [test_fold_nums] + + # Validate passed fold params + for fold_num in train_fold_nums: + if fold_num not in ACCEPTABLE_FOLD_NUMS: + print( + f"Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}" + ) + logger.info( + f"Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}" + ) + return + for fold_num in valid_fold_nums: + if fold_num not in ACCEPTABLE_FOLD_NUMS: + print( + f"Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}" + ) + logger.info( + f"Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}" + ) + return + for fold_num in test_fold_nums: + if fold_num not in ACCEPTABLE_FOLD_NUMS: + print( + f"Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}" + ) + logger.info( + f"Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}" + ) + return + + # Check if train, and valid and train and test folds are exclusive + if folds_overlap(train_fold_nums, valid_fold_nums): + print( + f"Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!" + ) + logger.info( + f"Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!" + ) + return + if folds_overlap(train_fold_nums, test_fold_nums): + print( + f"Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!" + ) + logger.info( + f"Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!" + ) + return + + # If the dataset doesn't exist yet, prompt the user to set or download it + + # Don't need to do this every single time + if skip_manifest_creation is True: + return + + # If our modified metadata file does not exist, create it + esc50_speechbrain_metadata_csv_path = os.path.join( + os.path.abspath(data_folder), "metadata/", MODIFIED_METADATA_FILE_NAME + ) + if not os.path.exists(esc50_speechbrain_metadata_csv_path): + esc50_speechbrain_metadata_csv_path = create_metadata_speechbrain_file( + data_folder + ) + + # Read the metadata into a dictionary + # Every key of this dictionary is now one of the sound filenames, without the ".wav" suffix + metadata = load_data_csv(esc50_speechbrain_metadata_csv_path) + + # List files and create manifest from list + logger.info( + f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}" + ) + + # Creating json files + create_json(metadata, audio_data_folder, train_fold_nums, save_json_train) + create_json(metadata, audio_data_folder, valid_fold_nums, save_json_valid) + create_json(metadata, audio_data_folder, test_fold_nums, save_json_test) + + +def create_json(metadata, audio_data_folder, folds_list, json_file): + """ + Creates the json file given a list of wav files. + + Arguments + --------- + metadata: dict + A dictionary containing the ESC50 metadata file modified for the + SpeechBrain, such that keys are IDs (which are the .wav file names without the file extension). + audio_data_folder : str or Path + Data folder that stores ESC50 samples. + folds_list : list of int + The list of folds [1,5] to include in this batch + json_file : str + The path of the output json file + """ + # Processing all the wav files in the list + json_dict = {} + + for ID, sample_metadata in metadata.items(): + fold_num = int(sample_metadata["fold"]) + if fold_num in folds_list: + # Reading the signal (to retrieve duration in seconds) + wav_file = os.path.join( + os.path.abspath(audio_data_folder), + # "fold" + str(fold_num) + "/", + sample_metadata["filename"], + ) + try: + signal = read_audio(wav_file) + file_info = audio_io.info(wav_file) + + # If we're using sox/soundfile backend, file_info will have the old type + if isinstance(file_info, torchaudio.AudioMetaData): + duration = signal.shape[0] / file_info.sample_rate + else: + duration = signal.shape[0] / file_info[0].rate + + # Create entry for this sample ONLY if we have successfully read-in the file using SpeechBrain/torchaudio + json_dict[ID] = { + "wav": sample_metadata["filename"], + "classID": int(sample_metadata["target"]), + "class_string": sample_metadata["class_string"], + # "salience": int(sample_metadata["salience"]), + "fold": sample_metadata["fold"], + "duration": duration, + } + except Exception: + print( + f"There was a problem reading the file:{wav_file}. Skipping duration field for it." + ) + logger.exception( + f"There was a problem reading the file:{wav_file}. Skipping it." + ) + + # Writing the dictionary to the json file + # Need to make sure sub folder "manifest" exists, if not create it + parent_dir = os.path.dirname(json_file) + if not os.path.exists(parent_dir): + os.mkdir(parent_dir) + + with open(json_file, mode="w", encoding="utf-8") as json_f: + json.dump(json_dict, json_f, indent=2) + + logger.info(f"{json_file} successfully created!") + + +def folds_overlap(list1, list2): + """Returns True if any passed lists has incorrect type OR has items in common. + + Arguments + --------- + list1 : list + First list for comparison. + list2 : list + Second list for comparison. + + Returns + ------- + overlap : bool + Whether lists overlap. + """ + if not isinstance(list1, list) or not isinstance(list2, list): + return True + if any(item in list1 for item in list2): + return True + return False + + +def check_folders(*folders): + """Returns False if any passed folder does not exist. + + Arguments + --------- + *folders: list + Folders to check. + + Returns + ------- + pass: bool + """ + for folder in folders: + if not os.path.exists(folder): + return False + return True + + +def full_path_to_audio_file(data_folder, slice_file_name, fold_num): + """Get path to file given slice file name and fold number + + Arguments + --------- + data_folder : str + Folder that contains the dataset. + slice_file_name : str + Filename. + fold_num : int + Fold number. + + Returns + ------- + string containing absolute path to corresponding file + """ + return os.path.join( + os.path.abspath(data_folder), + "audio/", + "fold" + str(fold_num) + "/", + slice_file_name, + ) + + +def create_metadata_speechbrain_file(data_folder): + """Get path to file given slice file name and fold number + + Arguments + --------- + data_folder : str + ESC50 data folder. + + Returns + ------- + string containing absolute path to metadata csv file modified for SpeechBrain or None if source file not found + """ + import pandas as pd + + esc50_metadata_csv_path = os.path.join( + os.path.abspath(data_folder), "meta/esc50.csv" + ) + if not os.path.exists(esc50_metadata_csv_path): + return None + + esc50_metadata_df = pd.read_csv(esc50_metadata_csv_path) + # SpeechBrain wants an ID column + esc50_metadata_df["ID"] = esc50_metadata_df.apply( + lambda row: removesuffix(row["filename"], ".wav"), axis=1 + ) + esc50_metadata_df = esc50_metadata_df.rename( + columns={"category": "class_string"} + ) + + esc50_speechbrain_metadata_csv_path = os.path.join( + os.path.abspath(data_folder), "meta/", MODIFIED_METADATA_FILE_NAME + ) + esc50_metadata_df.to_csv(esc50_speechbrain_metadata_csv_path, index=False) + return esc50_speechbrain_metadata_csv_path + + +def removesuffix(some_string, suffix): + """Removed a suffix from a string + + Arguments + --------- + some_string : str + Any string. + suffix : str + Suffix to be removed from some_string. + + Returns + ------- + string resulting from suffix removed from some_string, if found, unchanged otherwise + """ + if some_string.endswith(suffix): + return some_string[: -1 * len(suffix)] + else: + return some_string + + +def dataio_prep(hparams): + "Creates the datasets and their data processing pipelines." + + data_audio_folder = hparams["audio_data_folder"] + config_sample_rate = hparams["sample_rate"] + label_encoder = sb.dataio.encoder.CategoricalEncoder() + hparams["resampler"] = torchaudio.transforms.Resample( + new_freq=config_sample_rate + ) + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load the signal, and pass it and its length to the corruption class. + This is done on the CPU in the `collate_fn`.""" + + wave_file = data_audio_folder + f"/{wav}" + + sig, read_sr = audio_io.load(wave_file) + + # If multi-channels, downmix it to a mono channel + sig = torch.squeeze(sig) + if len(sig.shape) > 1: + sig = torch.mean(sig, dim=0) + + # Convert sample rate to required config_sample_rate + if read_sr != config_sample_rate: + # Re-initialize sampler if source file sample rate changed compared to last file + if read_sr != hparams["resampler"].orig_freq: + hparams["resampler"] = torchaudio.transforms.Resample( + orig_freq=read_sr, new_freq=config_sample_rate + ) + # Resample audio + sig = hparams["resampler"].forward(sig) + + sig = sig.float() + sig = sig / sig.max() + return sig + + # 3. Define label pipeline: + @sb.utils.data_pipeline.takes("class_string") + @sb.utils.data_pipeline.provides("class_string", "class_string_encoded") + def label_pipeline(class_string): + yield class_string + class_string_encoded = label_encoder.encode_label_torch(class_string) + yield class_string_encoded + + # Define datasets. We also connect the dataset with the data processing + # functions defined above. + datasets = {} + data_info = { + "train": hparams["train_annotation"], + "valid": hparams["valid_annotation"], + "test": hparams["test_annotation"], + } + for dataset in data_info: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline, label_pipeline], + output_keys=["id", "sig", "class_string_encoded"], + ) + + # Load or compute the label encoder (with multi-GPU DDP support) + # Please, take a look into the lab_enc_file to see the label to index + # mappinng. + lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") + label_encoder.load_or_create( + path=lab_enc_file, + from_didatasets=[datasets["train"]], + output_key="class_string", + ) + + return datasets, label_encoder diff --git a/recipes/ESC50/interpret/README.md b/recipes/ESC50/interpret/README.md new file mode 100644 index 0000000000..63c50c4b10 --- /dev/null +++ b/recipes/ESC50/interpret/README.md @@ -0,0 +1,274 @@ +# Interpretability - ESC50 Dataset + +![image](https://github.com/ycemsubakan/speechbrain-1/assets/16886998/8199f0fb-66ee-4f5a-87ee-349695f7e982) + +The objective of interpretability is to offer an explanation regarding the decision made by a classifier. + +**Post-hoc** interpretation methods aim to build an auxiliary module -- the **interpreter** -- that generates an additional signal in its output +helping the user to better understand why a specific prediction was made by a pre-trained classifier. +You can find some examples [here](https://piqinter.github.io). + +Conversely, **by-design** interpretation methods aim to build an interpretable classifier directly from the data. + +This recipe implements a number of interpretation techniques. + +They utilize pre-trained models obtained from `ESC50/classification`, some of which are readily available in +our HuggingFace repository (e.g., CNN14, Conv2D). + +You can train your own classifier by following the instructions provided in the reference readme under `ESC50/classification`. + +The recipe also makes use of the WHAM! noise dataset, which can be downloaded from [here](http://wham.whisper.ai/). + +--------------------------------------------------------------------------------------------------------- + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. + +To do this, simply run the following command in your terminal: + +```shell +pip install -r extra_requirements.txt +``` + +--------------------------------------------------------------------------------------------------------- + +## Supported Methods + +Some results that are obtained with this recipe on the OOD evaluation are as follows: + +|Method | AI | AD | AG |FF |Fid-In | SPS | COMP | +|--- |--- |--- |--- |--- | ---- | -- | --- | +|L-MAC | 61.62 | 3.83 | 33.48 | 0.40 | 0.82 | 0.93 | 9.77 | +|L-MAC FT | 58.87 | 4.89 | 30.84 | 0.40 | 0.82 | 0.82 | 10.65 | +|L2I | 6.75 |25.93 |1.25 |0.26 | 0.01 | 0.58 | 11.38 | + +Please, refer to the [L-MAC paper](https://arxiv.org/abs/2403.13086) for more information about the evaluation metrics. + + +### Listenable Maps for Audio Classifiers (L-MAC) + +LMAC trains an interpreter on the classifier's representations to reconstruct interpretations based on a amortized inference loss. + +For more details, refer to our [L-MAC paper](https://arxiv.org/abs/2403.13086). You can also find samples on the [companion website](https://francescopaissan.it/lmac/). + +To train LMAC on a convolutional classifier using the ESC50 dataset, use the `train_lmac.py` script. Run the following command: + +```shell +python train_lmac.py hparams/lmac_cnn14.yaml --data_folder=/yourpath/ESC50 +``` + +Eventually, you can use WHAM! augmentation to boost the interpretations performance, using: +```shell +python train_lmac.py hparams/lmac_cnn14.yaml --data_folder=/yourpath/ESC50 --add_wham_noise True --wham_folder=/yourpath/wham_noise +``` +**Note**: The WHAM! noise dataset can be downloaded from [here](http://wham.whisper.ai/). + +To run the finetuning stage of the interpreter, use +```shell +python train_lmac.py hparams/lmac_cnn14.yaml --data_folder=/yourpath/ESC50 \ + --add_wham_noise True --wham_folder=/yourpath/wham_noise \ + --finetuning True --pretrained_interpreter=/yourLMACcheckpointpath/psi_model.ckpt --g_w 4 +``` +where $g_w$ is the guidance weight for the interpreter. + +#### Specifying the pretrained classifier + +The pretrained classifier to be interpreted is specified with the variables `embedding_model_path`, and `classifier_model_path`. The default model is a model we trained on ESC50, however, if you would like to specify your own model just use paths that point to your own model. + +--------------------------------------------------------------------------------------------------------- + +### Posthoc Interpretation via Quantization (PIQ) + +PIQ utilizes vector quantization on the classifier's representations to reconstruct predictions. + +For more details, refer to our [PIQ paper](https://arxiv.org/abs/2303.12659). You can also find samples on the [companion website](https://piqinter.github.io). + +To train PIQ on a convolutional classifier using the ESC50 dataset, use the `train_piq.py` script. Run the following command: + +```shell +python train_piq.py hparams/piq.yaml --data_folder=/yourpath/ESC50 +``` + +Note that the command above runs the recipe for PIQ for a conv2d classifier used in the PIQ paper. Note that we also have yaml files for interpreting a ViT model and a focalnet using PIQ. (respectively, `piq_vit.yaml`, `piq.yaml`. + +--------------------------------------------------------------------------------------------------------- + +### Listen to Interpret (L2I) + +L2I employs Non-Negative Matrix Factorization to reconstruct the classifier's hidden representation and generate an interpretation audio signal for the classifier decision. + +Read more about L2I in the [L2I paper](https://arxiv.org/abs/2202.11479v2). + +To train an NMF model on the ESC50 dataset, use the `train_nmf.py` script. Run the command below: + +```shell +python train_nmf.py hparams/nmf.yaml --data_folder /yourpath/ESC50 --save_period 30 +``` +Note that the variable `save_period` determines the period with which the reconstructions are saved for debugging purposes. + +Additionally, we provide an L2I interpretation method for a convolutional classifier. To train this method on the ESC50 dataset, use the following command: + +```shell +python train_l2i.py hparams/l2i_conv2d.yaml --data_folder /yourpath/ESC50 +``` +Note that the default l2i script uses the NMF dictionary specified in the hparams yaml file. + +Lastly, we offer the training script for the L2I interpretation method on CNN14. To run this, execute the following command: + +```shell +python train_l2i.py hparams/l2i_cnn14.yaml --data_folder /yourpath/ESC50 +``` + +--------------------------------------------------------------------------------------------------------- + +### Activation Map Thresholding (AMT) + +This method interprets the norm of the activation maps as a measure of importance of each input location to the prediction. + +We obtain an interpretation mask by thresholding these saliency maps at the q-th quantile. +Hence, the quality of the generated interpretation depends on how interpretable the activation maps are. + +Two neural network architectures are currently supported for this method: [FocalNet](https://arxiv.org/abs/2203.11926) and [ViT](https://arxiv.org/abs/2010.11929). +In particular, FocalNet offers a neural network architecture that is interpretable by design. + +For more details, refer to our [FocalNet paper](https://arxiv.org/abs/2402.02754). + +To generate interpretations for the pre-trained FocalNet or ViT classifiers available on HuggingFace, use the following command: + +```shell +python interpret_amt.py hparams/amt_focalnet.yaml --data_folder /yourpath/ESC50 +python interpret_amt.py hparams/amt_vit.yaml --data_folder /yourpath/ESC50 +``` + +Alternatively, you can train your own FocalNet or ViT classifiers using the classification recipe under `ESC50/classification`, +and set the corresponding paths as command line arguments or directly in the configuration file. For example: + +```yaml +embedding_model_path: ../classification/results/focalnet-base-esc50/1234/save/CKPT+2024-02-08+18-59-37+00/embedding_model.ckpt +classifier_model_path: ../classification/results/focalnet-base-esc50/1234/save/CKPT+2024-02-08+18-59-37+00/classifier.ckpt +``` + +## Evaluation and Inference + +### Out of distribution (OOD) tests + +If you want to run tests on the OOD setting, you can use +```shell +python eval.py hparams/.yaml --data_folder /yourpath/esc50 --overlap_type --add_wham_noise False --pretrained_interpreter yourpath/psi_model.ckpt +``` + +Note that overlap type should be either `mixture` (for contaminating signal to be set as other signals from ESC50), `LJSpeech` (for contaminating signal to be set as speech), or `white_noise` (for contaminating signal to be set as white noise). Please refer to the L-MAC paper for the performance obtained in each setting. Note that `yourpath/psi_model.ckpt` should point to the path of the model checkpoint you would like to use. The typical path for `yourpath/psi_model.ckpt` would be similar to `results/LMAC_cnn14/1234/save/CKPT+2024-06-20+16-05-44+00/psi_model.ckpt`. + +Note also that `add_wham_noise` should be set to `False`. + +Another thing to note is that if you use `--overlap_type LJSpeech`, you would need to specify the path via the variable `ljspeech_path`. If the LJSpeech dataset is not already downloaded on the path you specify, the code will automatically download it, and use the downloaded data. + + +### In distribution (ID) tests + +If you want to run tests on the ID setting, you can use +```shell +python eval.py hparams/.yaml --data_folder /yourpath/esc50 --add_wham_noise True --wham_folder /yourpath/wham_noise --pretrained_interpreter yourpath/psi_model.ckpt + +``` + +This will evaluate the model using the test set contaminated with WHAM! noise samples. + + +### Single-sample inference + +If you want to run inference on a single sample, you can use the following command: +```shell +python eval.py hparams/.yaml --data_folder /yourpath/esc50 --add_wham_noise True --wham_folder /yourpath/wham_noise --pretrained_interpreter yourpath/psi_model.ckpt --single_sample $PATH_TO_WAV + +``` + +--------------------------------------------------------------------------------------------------------- + +## Notes + +- The recipe automatically downloads the ESC50 dataset. You only need to specify the path to which you would like to download it. + +- All the necessary models are downloaded automatically for each training script. + +--------------------------------------------------------------------------------------------------------- + +## Training Logs + +| Method | Link | +| --- | --- | +| L-MAC | [Link](https://www.dropbox.com/scl/fo/k5r0zdrtkywamazrke2p1/AEP2D4Scu9mQ_McAxRYzWQQ?rlkey=qhwhe8729f2h2zbue88632f8n&st=vt316u20&dl=0) | +| L-MAC FT| [Link](https://www.dropbox.com/scl/fo/kma3iznhjcyoco9slfwck/AMBmOXJAhiDUFs_dllXLaCQ?rlkey=drh04466lj1mca8qfrd31e14g&st=umd9ygj6&dl=0) | +| L2I CNN14 | [Link](https://www.dropbox.com/sh/cli2gm8nb4bthow/AAAKnzU0c80s_Rm7wx4i_Orza?dl=0) | +| L2I Conv2d | [Link](https://www.dropbox.com/sh/gcpk9jye9ka08n0/AAB-m10r1YEH0rJdUMrCwizUa?dl=0) | +| AMT-FocalNet | [Link](https://www.dropbox.com/scl/fo/0hheboei1b35mlrhwj6mt/AOeCdNstN3h8UqFxv0abT7M?rlkey=kx0d1t5v5hqawqwr5ir9weihq&dl=0) | +| AMT-ViT | [Link](https://www.dropbox.com/scl/fo/vlluiqiirlprl3oa7h4sj/APrEFgcIiWjdQhDUEZuNook?rlkey=bhswfspzklypu7k8ndh8lm3st&dl=0) | +| NMF Training | [Link](https://www.dropbox.com/sh/01exv8dt3k6l1kk/AADuKmikAPwMw5wlulojd5Ira?dl=0) | +| PIQ | [Link](https://www.dropbox.com/sh/v1x5ks9t67ftysp/AABo494rDElHTiTpKR_6PP_ua?dl=0) | +| PIQ-FocalNet | [Link](https://www.dropbox.com/scl/fo/6mvxb32f0g1i8b4lkdjoq/AGD1xNF8Of2_IXeEsbpXtQE?rlkey=llefue4rxalqyqwxqtwrn8qii&dl=0) | +| PIQ-ViT | [Link](https://www.dropbox.com/scl/fo/nz4lqwumgz03nanmf9xai/AI21fGwSOzsVvyegTJUEtz4?rlkey=40yjchqgkhcrhbxsa30m3rr6w&dl=0) | + +## Citing + +Please cite our [L-MAC paper](https://arxiv.org/abs/2403.13086) if you use it in your research: + +```bibtex +@inproceedings{lmac, + author={Francesco Paissan and Mirco Ravanelli and Cem Subakan}, + title={{Listenable Maps for Audio Classifiers}}, + year={2024}, + booktitle={Proceedings of the International Conference on Machine Learning (ICML)}, +} +``` + + +Please cite our [PIQ paper](https://arxiv.org/abs/2303.12659) if you use it in your research: + +```bibtex +@misc{paissan2023posthoc, + title={Posthoc Interpretation via Quantization}, + author={Francesco Paissan and Cem Subakan and Mirco Ravanelli}, + year={2023}, + eprint={2303.12659}, + archivePrefix={arXiv}, + primaryClass={cs.AI} +} +``` + +Please cite our [FocalNet paper](https://arxiv.org/abs/2402.02754) if you use it in your research: + +```bibtex +@inproceedings{dellalibera2024focal, + title={Focal Modulation Networks for Interpretable Sound Classification}, + author={Luca Della Libera and Cem Subakan and Mirco Ravanelli}, + booktitle={IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) XAI-SA Workshop}, + year={2024}, +} +``` + +If you use **SpeechBrain**, please cite: + +```bibtex +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` + +--------------------------------------------------------------------------------------------------------- + +## About SpeechBrain + +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +--------------------------------------------------------------------------------------------------------- + + diff --git a/recipes/ESC50/interpret/esc50_prepare.py b/recipes/ESC50/interpret/esc50_prepare.py new file mode 120000 index 0000000000..c52175c3dc --- /dev/null +++ b/recipes/ESC50/interpret/esc50_prepare.py @@ -0,0 +1 @@ +../esc50_prepare.py \ No newline at end of file diff --git a/recipes/ESC50/interpret/eval.py b/recipes/ESC50/interpret/eval.py new file mode 100644 index 0000000000..4bc9be9828 --- /dev/null +++ b/recipes/ESC50/interpret/eval.py @@ -0,0 +1,311 @@ +#!/usr/bin/python3 +"""This file performs out-of-distribution (OOD) evaluation of interpreters. + +To run this recipe, use the following command: + python eval.py hparams/.yaml --data_folder /yourpath/esc50 --overlap_type --add_wham_noise False + Please refer to README.md for more details. + +Authors + * Francesco Paissan 2024 + * Cem Subakan 2024 +""" + +import os +import random +import sys + +import matplotlib.pyplot as plt +import torch +import torchaudio.datasets as dts +import torchaudio.transforms as T +from esc50_prepare import dataio_prep, prepare_esc50 +from hyperpyyaml import load_hyperpyyaml +from train_l2i import L2I +from train_lmac import LMAC +from wham_prepare import prepare_wham + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.distributed import run_on_main + +eps = 1e-10 + +random.seed(10) + + +class LJSPEECH_split(dts.LJSPEECH): + """Create a Dataset for *LJSpeech-1.1* [:footcite:`ljspeech17`]. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): The URL to download the dataset from. + (default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``) + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"wavs"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + """ + + def __init__(self, root, url, folder_in_archive, download, train=True): + # super(LJSPEECH_train, self).__init__() + super().__init__(root, url, folder_in_archive, download) + # path = os.path.join('LJSpeech-1.1', folder_in_archive) + # self._flist = glob.glob(path + '/*.wav') + if train: + self._flist = self._flist[:10000] + else: + self._flist = self._flist[-3000:] + print("dataset size = ", len(self._flist)) + + +class ESCContaminated(torch.utils.data.Dataset): + """ESC50 Contaminated dataset + + Arguments + --------- + esc50_ds : dataset + the ESC50 dataset as per training. + cont_d : dataset + the contamination dataset. + overlap_multiplier : int + number of overlaps + overlap_type : str + one of "mixtures" or "LJSpeech" or "white_noise" + """ + + def __init__( + self, esc50_ds, cont_d, overlap_multiplier=2, overlap_type="mixtures" + ): + super().__init__() + + self.esc50_ds = esc50_ds + self.cont_d = cont_d + self.overlap_multiplier = overlap_multiplier + self.overlap_type = overlap_type + + def generate_mixture(self, s1, s2): + s1 = s1 / torch.norm(s1) + s2 = s2 / torch.norm(s2) + + # create the mixture with s2 being the noise (lower gain) + mix = s1 * 0.8 + (s2 * 0.2) + mix = mix / mix.max() + return mix + + def __len__(self): + return len(self.esc50_ds) + + def __getitem__( + self, + idx_mix: int, + ): + sample = self.esc50_ds[idx_mix] + + pool = [i for i in range(len(self.cont_d))] + indices = random.sample(pool, self.overlap_multiplier) + + samples = [ + {k: v for k, v in sample.items()} + for _ in range(self.overlap_multiplier) + ] + for i, idx in enumerate(indices): + if self.overlap_type == "mixtures": + samples[i]["sig"] = self.generate_mixture( + sample["sig"], self.cont_d[idx]["sig"] + ) + + elif self.overlap_type == "LJSpeech": + noise = self.cont_d[idx][0][0] + tfm = T.Resample(22050, 16000) + noise = tfm(noise) + smpl = sample["sig"] + + if noise.shape[0] > smpl.shape[0]: + noise = noise[: smpl.shape[0]] + else: + noise = torch.nn.functional.pad( + noise, (0, smpl.shape[0] - noise.shape[0]) + ) + samples[i]["sig"] = self.generate_mixture(smpl, noise) + + elif self.overlap_type == "white_noise": + smp = sample["sig"] / sample["sig"].pow(2).sum().sqrt() + noise = torch.randn(sample["sig"].shape) + noise = noise / noise.pow(2).sum().sqrt() + samples[i]["sig"] = smp + 0.5 * noise + samples[i]["sig"] = samples[i]["sig"] / samples[i]["sig"].max() + + else: + raise ValueError("Overlap type not implemented.") + + return sb.dataio.batch.PaddedBatch(samples) + + +if __name__ == "__main__": + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + if hparams["add_wham_noise"]: + print( + "CAREFUL! You are running ID evaluation. If you want to run OOD, use add_wham_noise=False." + ) + ljspeech_tr = None + if hparams["ljspeech_path"] is not None: + os.makedirs(hparams["ljspeech_path"], exist_ok=True) + ljspeech_tr = LJSPEECH_split( + root=hparams["ljspeech_path"], + url="https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2", + folder_in_archive="wavs", + download=True, + train=True, + ) + + if hparams["overlap_type"] == "LJSpeech": + assert ljspeech_tr is not None, ( + "Specify a path if you want to generate OOD with LJSpeech." + ) + + run_on_main( + prepare_esc50, + kwargs={ + "data_folder": hparams["data_folder"], + "audio_data_folder": hparams["audio_data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "train_fold_nums": hparams["train_fold_nums"], + "valid_fold_nums": hparams["valid_fold_nums"], + "test_fold_nums": hparams["test_fold_nums"], + "skip_manifest_creation": hparams["skip_manifest_creation"], + }, + ) + + # Dataset IO prep: creating Dataset objects and proper encodings for phones + datasets, label_encoder = dataio_prep(hparams) + hparams["label_encoder"] = label_encoder + + # create WHAM dataset according to hparams + if "wham_folder" in hparams: + hparams["wham_dataset"] = prepare_wham( + hparams["wham_folder"], + hparams["add_wham_noise"], + hparams["sample_rate"], + hparams["signal_length_s"], + hparams["wham_audio_folder"], + ) + assert hparams["signal_length_s"] == 5, "Fix wham sig length!" + assert hparams["out_n_neurons"] == 50, "Fix number of outputs classes!" + + assert hparams["use_pretrained"], "Load a model checkpoint during eval." + if "pretrained_esc50" in hparams and hparams["use_pretrained"]: + print("Loading model...") + run_on_main(hparams["pretrained_esc50"].collect_files) + hparams["pretrained_esc50"].load_collected() + + hparams["embedding_model"].to(run_opts["device"]) + hparams["classifier"].to(run_opts["device"]) + hparams["embedding_model"].eval() + hparams["classifier"].eval() + + overlap_type = hparams["overlap_type"] + if overlap_type == "white_noise": + overlap_dataset = datasets["test"] + elif overlap_type == "mixtures": + overlap_dataset = datasets["test"] + elif overlap_type == "LJSpeech": + overlap_dataset = ljspeech_tr + else: + raise ValueError("Not a valid overlap type") + + ood_dataset = ESCContaminated( + datasets["valid"], overlap_dataset, overlap_type=overlap_type + ) + + # if add_wham_noise == True then ood_dataset is simply in domain + if hparams["add_wham_noise"]: + ood_dataset = datasets["valid"] + + assert hparams["pretrained_interpreter"] is not None, ( + "You need to specify a path for the pretrained_interpreter!" + ) + hparams["psi_model"].load_state_dict( + torch.load(hparams["pretrained_interpreter"], map_location="cpu") + ) + + if hparams["int_method"] == "lmac": + Interpreter = LMAC( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + ) + elif hparams["int_method"] == "l2i": + # hparams["nmf_decoder"].load_state_dict( + # torch.load(hparams["nmf_decoder_path"], map_location="cpu") + # ) + hparams["nmf_decoder"].to(run_opts["device"]) + hparams["nmf_decoder"].eval() + + Interpreter = L2I( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + ) + + if hparams["single_sample"] is None: + Interpreter.evaluate( + test_set=ood_dataset, + min_key="loss", + progressbar=True, + test_loader_kwargs=( + {"collate_fn": lambda x: x[0], "batch_size": 1} + if not hparams["add_wham_noise"] + else {"batch_size": 2} + ), + ) + + else: + wav, sr = audio_io.load(hparams["single_sample"]) + wav = T.Resample(sr, hparams["sample_rate"])(wav).to(run_opts["device"]) + + with torch.no_grad(): + X_int, _, X_stft_phase, X_orig = ( + Interpreter.interpret_computation_steps(wav) + ) + + # make sure shapes are ok + X_int = X_int.transpose(1, 2) + X_orig = X_orig[:, : X_int.shape[1]] + X_stft_phase = X_stft_phase[:, : X_int.shape[1]] + + def plot_spec(X, suffix=""): + X = X.expm1() + X = X ** (1 / 3) + + plt.figure(figsize=(5, 5)) + plt.matshow( + X.cpu().numpy()[0].T, + aspect="auto", + origin="lower", + cmap="inferno", + ) + plt.axis("off") + plt.savefig( + ".".join(hparams["single_sample"].split(".")[:-1]) + + f"_{suffix}.pdf" + ) + + plot_spec(X_int, "int") + plot_spec(X_orig, "orig") + + X_int = X_int[..., None] + xhat_tm = Interpreter.invert_stft_with_phase(X_int, X_stft_phase).cpu() + + audio_io.save( + ".".join(hparams["single_sample"].split(".")[:-1]) + "_int.wav", + xhat_tm, + hparams["sample_rate"], + ) diff --git a/recipes/ESC50/interpret/extra_requirements.txt b/recipes/ESC50/interpret/extra_requirements.txt new file mode 100644 index 0000000000..0de9c3a457 --- /dev/null +++ b/recipes/ESC50/interpret/extra_requirements.txt @@ -0,0 +1,6 @@ +matplotlib +pandas +quantus==0.5.3 +scikit-learn +torchvision +transformers diff --git a/recipes/ESC50/interpret/hparams/amt_focalnet.yaml b/recipes/ESC50/interpret/hparams/amt_focalnet.yaml new file mode 100644 index 0000000000..8b664c6a00 --- /dev/null +++ b/recipes/ESC50/interpret/hparams/amt_focalnet.yaml @@ -0,0 +1,122 @@ +# ################################# +# Recipe to interpret a FocalNet audio classifier by-design via activation map thresholding (AMT) +# applied to its modulation maps. +# +# Author: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# * Luca Della Libera 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: amt_focalnet-base +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +save_interpretations: False +concat_sources: False + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 100 +batch_size: 6 +lr: 0.0002 +sample_rate: 16000 +use_mask_output: True +mask_th: 0.35 + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + counter: !ref + +use_pretrained: True + +embedding_model: !apply:transformers.FocalNetBackbone.from_pretrained [microsoft/focalnet-base] + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 1024 + out_neurons: !ref + lin_blocks: 1 + +embedding_model_path: speechbrain/focalnet-base-esc50/embedding_model.ckpt +classifier_model_path: speechbrain/focalnet-base-esc50/classifier.ckpt + +pretrained_esc50: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + classifier: !ref + paths: + embedding_model: !ref + classifier: !ref + +# Interpretation hyperparams +quantile: 0.90 + +# Pre-processing +n_fft: 1024 +spec_mag_power: 0.5 +hop_length: 11.6099 +win_length: 23.2199 + +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_istft: !new:speechbrain.processing.features.ISTFT + sample_rate: !ref + hop_length: !ref + win_length: !ref + +modules: + compute_stft: !ref + compute_istft: !ref diff --git a/recipes/ESC50/interpret/hparams/amt_vit.yaml b/recipes/ESC50/interpret/hparams/amt_vit.yaml new file mode 100644 index 0000000000..d7326bf9e4 --- /dev/null +++ b/recipes/ESC50/interpret/hparams/amt_vit.yaml @@ -0,0 +1,125 @@ +# ################################# +# Recipe to interpret a ViT audio classifier by-design via activation map thresholding (AMT) +# applied to its attention maps. +# +# Author: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# * Luca Della Libera 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: amt_vit-base +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +save_interpretations: False +concat_sources: False + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 100 +batch_size: 6 +lr: 0.0002 +sample_rate: 16000 +use_mask_output: True +mask_th: 0.35 + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + counter: !ref + +use_pretrained: True + +embedding_model: !apply:transformers.ViTModel.from_pretrained + pretrained_model_name_or_path: google/vit-base-patch16-224 + attn_implementation: eager + + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 768 + out_neurons: !ref + lin_blocks: 1 + +embedding_model_path: speechbrain/vit-base-esc50/embedding_model.ckpt +classifier_model_path: speechbrain/vit-base-esc50/classifier.ckpt + +pretrained_esc50: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + classifier: !ref + paths: + embedding_model: !ref + classifier: !ref + +# Interpretation hyperparams +quantile: 0.90 + +# Pre-processing +n_fft: 1024 +spec_mag_power: 0.5 +hop_length: 11.6099 +win_length: 23.2199 + +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_istft: !new:speechbrain.processing.features.ISTFT + sample_rate: !ref + hop_length: !ref + win_length: !ref + +modules: + compute_stft: !ref + compute_istft: !ref diff --git a/recipes/ESC50/interpret/hparams/l2i_cnn14.yaml b/recipes/ESC50/interpret/hparams/l2i_cnn14.yaml new file mode 100644 index 0000000000..7913359f84 --- /dev/null +++ b/recipes/ESC50/interpret/hparams/l2i_cnn14.yaml @@ -0,0 +1,192 @@ +# ################################# +# The recipe for training an L2I interpretability method on the ESC50 dataset. +# +# Author: +# * Cem Subakan 2022, 2023, 2024 +# * Francesco Paissan 2022, 2023, 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +overlap_type: "mixtures" + +int_method: "l2i" + +# this is needed for ood evaluation with ljspeech, when using eval.py +ljspeech_path: null + +single_sample: null + +# this is needed when using eval.py to specify the path for the psi_model.ckpt +# the typical path would be similar to results/LMAC_cnn14/1234/save/CKPT+2024-06-20+16-05-44+00/psi_model.ckpt +pretrained_interpreter: null + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +add_wham_noise: False +signal_length_s: 5 +wham_folder: null +wham_audio_folder: !ref /tr + +experiment_name: L2I_cnn14 +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +save_interpretations: False +classifier_temp: 1 # classifier temperature for the auxiliary L2I classifier + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 200 +batch_size: 2 +lr: 0.0001 +sample_rate: 16000 +interpret_period: 1 +relevance_th: 0.2 + +# Feature parameters +n_mels: 80 +left_frames: 0 +right_frames: 0 +deltas: False + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau + factor: 0.5 + patience: 3 + dont_halve_until_epoch: 50 + + +use_melspectra_log1p: True + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + theta: !ref + psi_model: !ref + counter: !ref + +compute_features: !new:speechbrain.lobes.features.Fbank + n_mels: !ref + left_frames: !ref + right_frames: !ref + deltas: !ref + sample_rate: !ref + n_fft: 1024 + win_length: 20 + hop_length: 10 + +embedding_model: !new:speechbrain.lobes.models.Cnn14.Cnn14 + mel_bins: !ref + emb_dim: 2048 + return_reps: True + l2i: False + +classifier: !new:torch.nn.Linear + in_features: 2048 + out_features: !ref + +# Interpretation hyperparams +K: 100 +n_freq: 513 + +# pre-processing +n_fft: 1024 +spec_mag_power: 0.5 +hop_length: 11.6099 +win_length: 23.2199 +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_fbank: !new:speechbrain.processing.features.Filterbank + n_mels: 80 + n_fft: !ref + sample_rate: !ref + log_mel: False + +compute_istft: !new:speechbrain.processing.features.ISTFT + sample_rate: !ref + hop_length: !ref + win_length: !ref + +psi_model: !new:speechbrain.lobes.models.L2I.CNN14PSI_stft_2d + dim: 2048 + K: !ref + +theta: !new:speechbrain.lobes.models.L2I.Theta + num_classes: !ref + +# NMF Decoder +nmf_decoder: !new:speechbrain.lobes.models.L2I.NMFDecoderAudio + n_comp: !ref + n_freq: !ref + +alpha: 10 # applied to NMF loss +beta: 0.8 # L1 regularization to time activations + +modules: + compute_stft: !ref + compute_fbank: !ref + compute_istft: !ref + compute_features: !ref + psi: !ref + theta: !ref + +embedding_model_path: 'fpaissan/r/embedding_model.ckpt' +classifier_model_path: 'fpaissan/r/classifier.ckpt' + +nmf_decoder_path: 'speechbrain/PIQ-ESC50/nmf_decoder.ckpt' +use_pretrained: True +pretrained_esc50: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + classifier: !ref + nmf_decoder: !ref + paths: + embedding_model: !ref + classifier: !ref + nmf_decoder: !ref diff --git a/recipes/ESC50/interpret/hparams/l2i_conv2d.yaml b/recipes/ESC50/interpret/hparams/l2i_conv2d.yaml new file mode 100644 index 0000000000..5f12bee5e5 --- /dev/null +++ b/recipes/ESC50/interpret/hparams/l2i_conv2d.yaml @@ -0,0 +1,176 @@ +# ################################# +# The recipe for training an L2I interpretability method on the ESC50 dataset. +# +# Author: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023, 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +int_method: "l2i" +add_wham_noise: False +overlap_type: "mixtures" + +# this is needed for ood evaluation with ljspeech, when using eval.py +ljspeech_path: null + +single_sample: null + +# this is needed when using eval.py to specify the path for the psi_model.ckpt +# the typical path would be similar to results/LMAC_cnn14/1234/save/CKPT+2024-06-20+16-05-44+00/psi_model.ckpt +pretrained_interpreter: null + +experiment_name: L2I +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +wham_folder: null +wham_audio_folder: !ref /tr + +test_only: False +save_interpretations: False + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 200 +batch_size: 16 +lr: 0.00002 +sample_rate: 16000 +interpret_period: 1 +relevance_th: 0.2 +signal_length_s: 5 +use_melspectra_log1p: False + + +# Feature parameters +n_mels: 80 + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.0001 + +lr_annealing: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau + factor: 0.5 + patience: 3 + dont_halve_until_epoch: 50 + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + theta: !ref + psi_model: !ref + counter: !ref + +embedding_model: !new:speechbrain.lobes.models.PIQ.Conv2dEncoder_v2 + dim: 256 + +classifier_temp: 0.01 # classifier temperature for the auxiliary L2I classifier +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 256 + out_neurons: !ref + lin_blocks: 1 + +# Interpretation hyperparams +K: 100 +n_freq: 513 + +# pre-processing +n_fft: 1024 +spec_mag_power: 0.5 +hop_length: 11.6099 +win_length: 23.2199 +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_fbank: !new:speechbrain.processing.features.Filterbank + n_mels: !ref + n_fft: !ref + sample_rate: !ref + +compute_istft: !new:speechbrain.processing.features.ISTFT + sample_rate: !ref + hop_length: !ref + win_length: !ref + +psi_model: !new:speechbrain.lobes.models.L2I.PsiOptimized + dim: 256 + K: !ref + use_adapter: False + adapter_reduce_dim: False + +theta: !new:speechbrain.lobes.models.L2I.Theta + n_comp: !ref + T: 417 + +# NMF Decoder +nmf_decoder: !new:speechbrain.lobes.models.L2I.NMFDecoderAudio + n_comp: !ref + n_freq: !ref + +alpha: 10 # applied to NMF loss +beta: 0.8 # L1 regularization to time activations + +modules: + compute_stft: !ref + compute_fbank: !ref + compute_istft: !ref + psi: !ref + theta: !ref + +embedding_model_path: "speechbrain/PIQ-ESC50/embedding_modelft.ckpt" +classifier_model_path: "speechbrain/PIQ-ESC50/classifier.ckpt" +nmf_decoder_path: "speechbrain/PIQ-ESC50/nmf_decoder.ckpt" +use_pretrained: True +pretrained_esc50: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + classifier: !ref + nmf_decoder: !ref + paths: + embedding_model: !ref + classifier: !ref + nmf_decoder: !ref diff --git a/recipes/ESC50/interpret/hparams/lmac_cnn14.yaml b/recipes/ESC50/interpret/hparams/lmac_cnn14.yaml new file mode 100644 index 0000000000..ce6347741a --- /dev/null +++ b/recipes/ESC50/interpret/hparams/lmac_cnn14.yaml @@ -0,0 +1,180 @@ +# ################################# +# The recipe for training LMAC on the ESC50 dataset. +# +# Author: +# * Francesco Paissan 2024 +# * Cem Subakan 2024 +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +# Dataset must already exist at `audio_data_folder` +data_folder: /mnt/data/ESC50 # e.g., /localscratch/UrbanSound8K +audio_data_folder: !ref /audio + +single_sample: null + +int_method: "lmac" +overlap_type: "mixtures" + +l_in_w: 4 +l_out_w: 0.2 +reg_w_tv: 0.0 +reg_w_l1: 0.4 + +g_w: 4 # regularization weight of oracles +crosscor_th: 0.6 +bin_th: 0.35 # needed to binarize guidance spectrograms +finetuning: False +guidelosstype: binary +crosscortype: 'dotp' + +experiment_name: LMAC_cnn14 +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +wham_folder: null +wham_audio_folder: !ref /tr + +test_only: False +save_interpretations: True +interpret_period: 10 + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + + +# this is needed for ood evaluation with ljspeech, when using eval.py +ljspeech_path: null + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 200 +batch_size: 16 +lr: 0.0002 +sample_rate: 16000 +use_mask_output: True +signal_length_s: 5 +add_wham_noise: False + + +# Feature parameters +n_mels: 80 + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau + factor: 0.5 + patience: 3 + dont_halve_until_epoch: 100 + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + psi_model: !ref + counter: !ref + + +return_reps: True +embedding_model: !new:speechbrain.lobes.models.Cnn14.Cnn14 + mel_bins: !ref + emb_dim: 2048 + return_reps: !ref + +classifier: !new:torch.nn.Linear + in_features: 2048 + out_features: !ref + + +# this is needed when using eval.py to specify the path for the psi_model.ckpt +# the typical path would be similar to results/LMAC_cnn14/1234/save/CKPT+2024-06-20+16-05-44+00/psi_model.ckpt +pretrained_interpreter: null + +load_pretrained: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + psi: !ref + paths: + psi: !ref + +use_pretrained: True +embedding_model_path: fpaissan/r/embedding_model.ckpt +classifier_model_path: fpaissan/r/classifier.ckpt + +pretrained_esc50: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + classifier: !ref + paths: + embedding_model: !ref + classifier: !ref + +# pre-processing +n_fft: 1024 +hop_length: 11.6099 +win_length: 23.2199 +use_melspectra_log1p: True +use_stft2mel: True + +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_fbank: !new:speechbrain.processing.features.Filterbank + n_mels: !ref + n_fft: !ref + sample_rate: !ref + log_mel: False + +compute_istft: !new:speechbrain.processing.features.ISTFT + sample_rate: !ref + hop_length: !ref + win_length: !ref + +psi_model: !new:speechbrain.lobes.models.Cnn14.CNN14PSI_stft + dim: 2048 + +modules: + compute_stft: !ref + compute_fbank: !ref + compute_istft: !ref + psi: !ref diff --git a/recipes/ESC50/interpret/hparams/nmf.yaml b/recipes/ESC50/interpret/hparams/nmf.yaml new file mode 100644 index 0000000000..1a28c6defc --- /dev/null +++ b/recipes/ESC50/interpret/hparams/nmf.yaml @@ -0,0 +1,100 @@ +# ################################# +# The hyperparameters to train an NMF model on ESC50 +# +# Author: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: train_nmf +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt +save_period: 10 + +# if true we save the nmf dictionary at the end of training +save_nmfdictionary: False +nmf_savepath: nmf_dictionary.pt + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 300 +batch_size: 2 +lr: 0.00005 +sample_rate: 44100 +signal_length_s: 5 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +## Data augmentation +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: 1024 + hop_length: 11.6099 + win_length: 23.2199 + sample_rate: !ref + +compute_stft_mag: !name:speechbrain.processing.features.spectral_magnitude + power: 0.5 + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + + +# Interpretation hyperparams +K: 100 +N_FREQ: 513 + +# NMF Decoder +nmf_decoder: !new:speechbrain.lobes.models.L2I.NMFDecoderAudio + n_comp: !ref + n_freq: !ref + # init_file: !ref + +nmf_encoder: !new:speechbrain.lobes.models.L2I.NMFEncoder + n_comp: !ref + n_freq: !ref + +modules: + nmf_decoder: !ref + nmf_encoder: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + nmf_decoder: !ref + nmf_encoder: !ref + counter: !ref diff --git a/recipes/ESC50/interpret/hparams/piq.yaml b/recipes/ESC50/interpret/hparams/piq.yaml new file mode 100644 index 0000000000..1fc0de4b60 --- /dev/null +++ b/recipes/ESC50/interpret/hparams/piq.yaml @@ -0,0 +1,147 @@ +# ################################# +# The recipe for training PIQ on the ESC50 dataset. +# +# Author: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: piq +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +save_interpretations: False +interpret_period: 10 + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 200 +batch_size: 16 +lr: 0.0002 +sample_rate: 16000 +use_vq: True +rec_loss_coef: 1 +use_mask_output: True +mask_th: 0.35 +use_melspectra_log1p: False + + +# Feature parameters +n_mels: 80 + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau + factor: 0.5 + patience: 3 + dont_halve_until_epoch: 100 + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + psi_model: !ref + counter: !ref + +use_pretrained: True + +# embedding_model: !new:custom_models.Conv2dEncoder_v2 +embedding_model: !new:speechbrain.lobes.models.PIQ.Conv2dEncoder_v2 + dim: 256 + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 256 + out_neurons: !ref + lin_blocks: 1 + +embedding_model_path: "speechbrain/PIQ-ESC50/embedding_modelft.ckpt" +classifier_model_path: "speechbrain/PIQ-ESC50/classifier.ckpt" +pretrained_esc50: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + classifier: !ref + paths: + embedding_model: !ref + classifier: !ref + +# Interpretation hyperparams +K: 1024 + +# pre-processing +n_fft: 1024 +hop_length: 11.6099 +win_length: 23.2199 +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_fbank: !new:speechbrain.processing.features.Filterbank + n_mels: !ref + n_fft: !ref + sample_rate: !ref + +compute_istft: !new:speechbrain.processing.features.ISTFT + sample_rate: !ref + hop_length: !ref + win_length: !ref + +psi_model: !new:speechbrain.lobes.models.PIQ.VectorQuantizedPSI_Audio + dim: 256 + K: !ref + shared_keys: 0 + activate_class_partitioning: True + use_adapter: True + adapter_reduce_dim: True + +modules: + compute_stft: !ref + compute_fbank: !ref + compute_istft: !ref + psi: !ref diff --git a/recipes/ESC50/interpret/hparams/piq_focalnet.yaml b/recipes/ESC50/interpret/hparams/piq_focalnet.yaml new file mode 100644 index 0000000000..becb80a85f --- /dev/null +++ b/recipes/ESC50/interpret/hparams/piq_focalnet.yaml @@ -0,0 +1,138 @@ +# ################################# +# This recipe trains PIQ to interpret a FocalNet audio classifier. +# +# Author: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# * Luca Della Libera 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: piq_focalnet-base +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +save_interpretations: False +interpret_period: 10 + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 100 +batch_size: 6 +lr: 0.0002 +sample_rate: 16000 +use_vq: True +rec_loss_coef: 1 +use_mask_output: True +mask_th: 0.35 + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau + factor: 0.5 + patience: 3 + dont_halve_until_epoch: 100 + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + psi_model: !ref + counter: !ref + +use_pretrained: True + +embedding_model: !apply:transformers.FocalNetBackbone.from_pretrained [microsoft/focalnet-base] + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 1024 + out_neurons: !ref + lin_blocks: 1 + +embedding_model_path: speechbrain/focalnet-base-esc50/embedding_model.ckpt +classifier_model_path: speechbrain/focalnet-base-esc50/classifier.ckpt + +pretrained_esc50: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + classifier: !ref + paths: + embedding_model: !ref + classifier: !ref + +# Interpretation hyperparams +K: 1024 + +# Pre-processing +n_fft: 1024 +hop_length: 11.6099 +win_length: 23.2199 +use_melspectra_log1p: False + +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_istft: !new:speechbrain.processing.features.ISTFT + sample_rate: !ref + hop_length: !ref + win_length: !ref + +psi_model: !new:speechbrain.lobes.models.PIQ.VectorQuantizedPSIFocalNet_Audio + dim: 1024 + K: !ref + shared_keys: 0 + activate_class_partitioning: True + use_adapter: True + adapter_reduce_dim: True + +modules: + compute_stft: !ref + compute_istft: !ref + psi: !ref diff --git a/recipes/ESC50/interpret/hparams/piq_vit.yaml b/recipes/ESC50/interpret/hparams/piq_vit.yaml new file mode 100644 index 0000000000..6bf67c7c42 --- /dev/null +++ b/recipes/ESC50/interpret/hparams/piq_vit.yaml @@ -0,0 +1,138 @@ +# ################################# +# This recipe trains PIQ to interpret a ViT audio classifier. +# +# Author: +# * Cem Subakan 2022, 2023 +# * Francesco Paissan 2022, 2023 +# * Luca Della Libera 2024 +# (based on the SpeechBrain UrbanSound8k recipe) +# ################################# + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Set up folders for reading from and writing to +data_folder: !PLACEHOLDER # e.g., /localscratch/ESC-50-master +audio_data_folder: !ref /audio + +experiment_name: piq_vit-base +output_folder: !ref ./results// +save_folder: !ref /save +train_log: !ref /train_log.txt + +save_interpretations: False +interpret_period: 10 + +# Tensorboard logs +use_tensorboard: False +tensorboard_logs_folder: !ref /tb_logs/ + +# Path where data manifest files will be stored +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json + +# To standardize results, UrbanSound8k has pre-separated samples into +# 10 folds for multi-fold validation +train_fold_nums: [1, 2, 3] +valid_fold_nums: [4] +test_fold_nums: [5] +skip_manifest_creation: False + +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 100 +batch_size: 6 +lr: 0.0002 +sample_rate: 16000 +use_vq: True +rec_loss_coef: 1 +use_mask_output: True +mask_th: 0.35 + +# Number of classes +out_n_neurons: 50 + +shuffle: True +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau + factor: 0.5 + patience: 3 + dont_halve_until_epoch: 100 + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + psi_model: !ref + counter: !ref + +use_pretrained: True + +embedding_model: !apply:transformers.ViTModel.from_pretrained [google/vit-base-patch16-224] + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 768 + out_neurons: !ref + lin_blocks: 1 + +embedding_model_path: speechbrain/vit-base-esc50/embedding_model.ckpt +classifier_model_path: speechbrain/vit-base-esc50/classifier.ckpt + +pretrained_esc50: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + classifier: !ref + paths: + embedding_model: !ref + classifier: !ref + +# Interpretation hyperparams +K: 1024 + +# Pre-processing +n_fft: 1024 +hop_length: 11.6099 +win_length: 23.2199 +use_melspectra_log1p: False + +compute_stft: !new:speechbrain.processing.features.STFT + n_fft: !ref + hop_length: !ref + win_length: !ref + sample_rate: !ref + +compute_istft: !new:speechbrain.processing.features.ISTFT + sample_rate: !ref + hop_length: !ref + win_length: !ref + +psi_model: !new:speechbrain.lobes.models.PIQ.VectorQuantizedPSIViT_Audio + dim: 768 + K: !ref + shared_keys: 0 + activate_class_partitioning: True + use_adapter: True + adapter_reduce_dim: True + +modules: + compute_stft: !ref + compute_istft: !ref + psi: !ref diff --git a/recipes/ESC50/interpret/interpret_amt.py b/recipes/ESC50/interpret/interpret_amt.py new file mode 100644 index 0000000000..27fd7e6db8 --- /dev/null +++ b/recipes/ESC50/interpret/interpret_amt.py @@ -0,0 +1,731 @@ +#!/usr/bin/python3 + +"""Recipe to interpret an audio classifier by-design via activation maps thresholding (AMT). + +To run this recipe, use the following command: +> python intepret_amt.py hparams/.yaml --data_folder /yourpath/ESC-50-master + +Authors + * Cem Subakan 2022, 2023 + * Francesco Paissan 2022, 2023 + * Luca Della Libera 2024 +""" + +import os +import sys + +import matplotlib.pyplot as plt +import torch +import torchaudio +import torchvision +from hyperpyyaml import load_hyperpyyaml +from torch.nn import functional as F + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.processing.NMF import spectral_phase +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.metric_stats import MetricStats + + +class InterpreterESC50Brain(sb.core.Brain): + """Class for interpreter training.""" + + def invert_stft_with_phase(self, X_int, X_stft_phase): + """Inverts STFT spectra given phase.""" + X_stft_phase_sb = torch.cat( + ( + torch.cos(X_stft_phase).unsqueeze(-1), + torch.sin(X_stft_phase).unsqueeze(-1), + ), + dim=-1, + ) + + X_stft_phase_sb = X_stft_phase_sb[:, : X_int.shape[1], :, :] + if X_int.ndim == 3: + X_int = X_int.unsqueeze(-1) + X_wpsb = X_int * X_stft_phase_sb + x_int_sb = self.modules.compute_istft(X_wpsb) + + return x_int_sb + + def preprocess(self, wavs): + """Pre-process wavs.""" + X_stft = self.modules.compute_stft(wavs) + X_stft_power = sb.processing.features.spectral_magnitude( + X_stft, power=self.hparams.spec_mag_power + ) + X_stft_logpower = torch.log1p(X_stft_power) + + return X_stft_logpower, X_stft, X_stft_power + + @torch.no_grad() + def classifier_forward(self, X_stft_logpower): + """The forward pass for the classifier.""" + config = self.hparams.embedding_model.config + # Resize to match expected resolution + net_input = torchvision.transforms.functional.resize( + X_stft_logpower, (config.image_size, config.image_size) + ) + # Expand to have 3 channels + net_input = net_input[:, None, ...].expand(-1, 3, -1, -1) + if config.model_type == "focalnet": + hcat = self.hparams.embedding_model(net_input).feature_maps[-1] + embeddings = hcat.mean(dim=(-1, -2)) + modulators = [ + encoder_stage.layers[-1].modulation.modulator + for encoder_stage in self.hparams.embedding_model.focalnet.encoder.stages + ] + modulators = [x.norm(dim=-3, p=2, keepdim=True) for x in modulators] + # Upsample spatial dimensions + modulators = [ + torchvision.transforms.functional.resize( + x, X_stft_logpower.shape[-2:] + ) + for x in modulators + ] + xhat = modulators[-1] + elif config.model_type == "vit": + model_output = self.hparams.embedding_model( + net_input, output_attentions=True + ) + + hcat = model_output.last_hidden_state.movedim(-1, -2) + embeddings = hcat.mean(dim=-1) + + # Take the representations from CLS token + num_heads = self.hparams.embedding_model.config.num_attention_heads + attentions = [x[:, :, 0, 1:] for x in model_output.attentions] + + # Reshape the attention scores to resemble mini patches + num_patches = ( + self.hparams.embedding_model.config.image_size + // self.hparams.embedding_model.config.patch_size + ) + + attentions = [ + x.reshape(-1, num_heads, num_patches, num_patches) + for x in attentions + ] + attentions = [x.mean(dim=-3, keepdim=True) for x in attentions] + # Upsample spatial dimensions + attentions = [ + torchvision.transforms.functional.resize( + x, X_stft_logpower.shape[-2:] + ) + for x in attentions + ] + xhat = attentions[-1] + else: + raise NotImplementedError + + predictions = self.hparams.classifier(embeddings).squeeze(1) + class_pred = predictions.argmax(1) + + threshold = xhat.reshape(len(xhat), -1).quantile( + self.hparams.quantile, dim=-1 + )[:, None, None, None] + xhat[xhat < threshold] = -float("inf") + xhat[xhat >= threshold] = float("inf") + + return xhat, predictions, class_pred + + def interpret_computation_steps(self, wavs, print_probability=False): + """Computation steps to get the interpretation spectrogram.""" + X_stft_logpower, X_stft, X_stft_power = self.preprocess(wavs) + X_stft_phase = spectral_phase(X_stft) + + xhat, predictions, class_pred = self.classifier_forward(X_stft_logpower) + if print_probability: + predictions = F.softmax(predictions, dim=1) + class_prob = predictions[0, class_pred].item() + print(f"classifier_prob: {class_prob}") + + xhat = xhat.squeeze(1) + + Tmax = xhat.shape[1] + if self.hparams.use_mask_output: + xhat = F.sigmoid(xhat) + X_int = xhat * X_stft_logpower[:, :Tmax, :] + else: + xhat = F.softplus(xhat) + th = xhat.max() * self.hparams.mask_th + X_int = (xhat > th) * X_stft_logpower[:, :Tmax, :] + + return X_int, X_stft_phase, class_pred, X_stft_logpower, xhat + + def interpret_sample(self, wavs, batch=None): + """Get the interpratation for a given wav file.""" + + # Get the interpretation spectrogram, phase, and the predicted class + X_int, X_stft_phase, pred_cl, _, _ = self.interpret_computation_steps( + wavs + ) + X_stft_phase = X_stft_phase[:, : X_int.shape[1], :] + if batch is not None: + x_int_sb = self.invert_stft_with_phase(X_int, X_stft_phase) + + # Save reconstructed and original spectrograms + os.makedirs( + os.path.join( + self.hparams.output_folder, + "audios_from_interpretation", + ), + exist_ok=True, + ) + + current_class_ind = batch.class_string_encoded.data[0].item() + current_class_name = self.hparams.label_encoder.ind2lab[ + current_class_ind + ] + predicted_class_name = self.hparams.label_encoder.ind2lab[ + pred_cl.item() + ] + audio_io.save( + os.path.join( + self.hparams.output_folder, + "audios_from_interpretation", + f"original_tc_{current_class_name}_pc_{predicted_class_name}.wav", + ), + wavs[0].unsqueeze(0).cpu(), + self.hparams.sample_rate, + ) + + audio_io.save( + os.path.join( + self.hparams.output_folder, + "audios_from_interpretation", + f"interpretation_tc_{current_class_name}_pc_{predicted_class_name}.wav", + ), + x_int_sb.cpu(), + self.hparams.sample_rate, + ) + + return X_int + + def overlap_test(self, batch): + """Interpration test with overlapped audio.""" + wavs, _ = batch.sig + wavs = wavs.to(self.device) + + if wavs.shape[0] <= 1: + return + + s1 = wavs[0] + s1 = s1 / s1.max() + s2 = wavs[1] + s2 = s2 / s2.max() + + # Create the mixture with s2 being the noise (lower gain) + if ( + hasattr(self.hparams, "concat_sources") + and self.hparams.concat_sources + ): + length = min(len(s1), len(s2)) + mid = length // 2 + s1[mid:] = 0.0 + s2[:mid] = 0.0 + mix = (s1 + s2).unsqueeze(0) + else: + mix = (s1 * 0.8 + (s2 * 0.2)).unsqueeze(0) + mix = mix / mix.max() + + # Get the interpretation spectrogram, phase, and the predicted class + ( + X_int, + X_stft_phase, + pred_cl, + X_mix, + mask, + ) = self.interpret_computation_steps(mix) + X_int = X_int[0, ...] + X_stft_phase = X_stft_phase[0, : X_int.shape[0], ...].unsqueeze(0) + pred_cl = pred_cl[0, ...] + mask = mask[0, ...] + + temp = torch.expm1(X_int).unsqueeze(0).unsqueeze(-1) + x_int_sb = self.invert_stft_with_phase(temp, X_stft_phase) + + # Save reconstructed and original spectrograms + current_class_ind = batch.class_string_encoded.data[0].item() + current_class_name = self.hparams.label_encoder.ind2lab[ + current_class_ind + ] + predicted_class_name = self.hparams.label_encoder.ind2lab[ + pred_cl.item() + ] + + noise_class_ind = batch.class_string_encoded.data[1].item() + noise_class_name = self.hparams.label_encoder.ind2lab[noise_class_ind] + + out_folder = os.path.join( + self.hparams.output_folder, + "overlap_test", + f"tc_{current_class_name}_nc_{noise_class_name}_pc_{predicted_class_name}", + ) + os.makedirs( + out_folder, + exist_ok=True, + ) + + audio_io.save( + os.path.join(out_folder, "mixture.wav"), + mix.data.cpu(), + self.hparams.sample_rate, + ) + + audio_io.save( + os.path.join(out_folder, "source.wav"), + s1.unsqueeze(0).data.cpu(), + self.hparams.sample_rate, + ) + + audio_io.save( + os.path.join(out_folder, "noise.wav"), + s2.unsqueeze(0).data.cpu(), + self.hparams.sample_rate, + ) + + audio_io.save( + os.path.join(out_folder, "interpretation.wav"), + x_int_sb.data.cpu(), + self.hparams.sample_rate, + ) + + plt.figure(figsize=(15, 5), dpi=100) + + plt.subplot(161) + ( + _, + _, + _, + X_s1, + _, + ) = self.interpret_computation_steps(s1.unsqueeze(0)) + X_target = X_s1[0].permute(1, 0)[:, : X_int.shape[1]].cpu() + plt.imshow(X_target, origin="lower") + current_class_ind = batch.class_string_encoded.data[0].item() + current_class_name = self.hparams.label_encoder.ind2lab[ + current_class_ind + ] + plt.title(current_class_name) + plt.colorbar(fraction=0.05) + + plt.subplot(162) + ( + _, + _, + _, + X_s2, + _, + ) = self.interpret_computation_steps(s2.unsqueeze(0)) + X_target = X_s2[0].permute(1, 0)[:, : X_int.shape[1]].cpu() + plt.imshow(X_target, origin="lower") + current_class_ind = batch.class_string_encoded.data[1].item() + current_class_name = self.hparams.label_encoder.ind2lab[ + current_class_ind + ] + plt.title(current_class_name) + plt.colorbar(fraction=0.05) + + plt.subplot(163) + X_target = X_mix[0].permute(1, 0)[:, : X_int.shape[1]].cpu() + plt.imshow(X_target, origin="lower") + predicted_class_name = self.hparams.label_encoder.ind2lab[ + pred_cl.item() + ] + plt.title(predicted_class_name) + plt.colorbar(fraction=0.05) + + plt.subplot(164) + plt.imshow(mask.data.cpu().permute(1, 0), origin="lower") + plt.title("estimated mask") + plt.colorbar(fraction=0.05) + + plt.subplot(165) + plt.imshow(X_int.data.cpu().permute(1, 0).data.cpu(), origin="lower") + plt.title("interpretation") + plt.colorbar(fraction=0.05) + + plt.subplots_adjust() + plt.tight_layout() + plt.savefig(os.path.join(out_folder, "specs.png"), bbox_inches="tight") + plt.close() + + def debug_files(self, X_stft, xhat, X_stft_logpower, batch, wavs): + """The helper function to create debugging images.""" + X_stft_phase = spectral_phase(X_stft) + temp = xhat[0].transpose(0, 1).unsqueeze(0).unsqueeze(-1) + Xspec_est = torch.expm1(temp.permute(0, 2, 1, 3)) + xhat_tm = self.invert_stft_with_phase(Xspec_est, X_stft_phase) + + Tmax = Xspec_est.shape[1] + if self.hparams.use_mask_output: + X_masked = xhat[0] * X_stft_logpower[0, :Tmax, :] + else: + th = xhat[0].max() * 0.15 + X_masked = (xhat[0] > th) * X_stft_logpower[0, :Tmax, :] + + X_est_masked = torch.expm1(X_masked).unsqueeze(0).unsqueeze(-1) + xhat_tm_masked = self.invert_stft_with_phase(X_est_masked, X_stft_phase) + + plt.figure(figsize=(10, 5), dpi=100) + + plt.subplot(141) + X_target = X_stft_logpower[0].permute(1, 0)[:, : xhat.shape[1]].cpu() + plt.imshow(X_target, origin="lower") + plt.title("input") + plt.colorbar(fraction=0.05) + + plt.subplot(142) + input_masked = X_target > ( + X_target.max(keepdim=True, dim=-1)[0].max(keepdim=True, dim=-2)[0] + * self.hparams.mask_th + ) + plt.imshow(input_masked, origin="lower") + plt.title("input masked") + plt.colorbar(fraction=0.05) + + plt.subplot(143) + if self.hparams.use_mask_output: + mask = xhat[0] + else: + mask = xhat[0] > th + X_masked = mask * X_stft_logpower[0, :Tmax, :] + plt.imshow(X_masked.permute(1, 0).data.cpu(), origin="lower") + plt.colorbar(fraction=0.05) + plt.title("interpretation") + + plt.subplot(144) + plt.imshow(mask.permute(1, 0).data.cpu(), origin="lower") + plt.colorbar(fraction=0.05) + plt.title("estimated mask") + + out_folder = os.path.join( + self.hparams.output_folder, + "reconstructions", + f"{batch.id[0]}", + ) + os.makedirs( + out_folder, + exist_ok=True, + ) + + plt.subplots_adjust() + plt.tight_layout() + plt.savefig( + os.path.join(out_folder, "reconstructions.png"), + bbox_inches="tight", + ) + plt.close() + + audio_io.save( + os.path.join(out_folder, "reconstruction.wav"), + xhat_tm.data.cpu(), + self.hparams.sample_rate, + ) + + audio_io.save( + os.path.join(out_folder, "reconstruction_masked.wav"), + xhat_tm_masked.data.cpu(), + self.hparams.sample_rate, + ) + + audio_io.save( + os.path.join(out_folder, "true.wav"), + wavs[0:1].data.cpu(), + self.hparams.sample_rate, + ) + + def compute_forward(self, batch, stage): + """Computation pipeline based on an encoder + sound classifier.""" + batch = batch.to(self.device) + wavs, lens = batch.sig + + X_stft_logpower, X_stft, X_stft_power = self.preprocess(wavs) + + # Embeddings + sound classifier + xhat, predictions, class_pred = self.classifier_forward(X_stft_logpower) + + xhat = xhat.squeeze(1) + + if self.hparams.use_mask_output: + xhat = F.sigmoid(xhat) + else: + xhat = F.softplus(xhat) + + # Save some samples + if self.hparams.save_interpretations: + wavs = wavs[0].unsqueeze(0) + self.interpret_sample(wavs, batch) + self.overlap_test(batch) + self.debug_files(X_stft, xhat, X_stft_logpower, batch, wavs) + + return predictions, xhat + + def compute_objectives(self, pred, batch, stage): + """Helper function to compute the objectives.""" + predictions, xhat = pred + + batch = batch.to(self.device) + wavs, lens = batch.sig + + uttid = batch.id + classid, _ = batch.class_string_encoded + + X_stft_logpower, X_stft, X_stft_power = self.preprocess(wavs) + + Tmax = xhat.shape[1] + + _, theta_out, _ = self.classifier_forward( + xhat * X_stft_logpower[:, :Tmax, :] + ) + + self.acc_metric.append( + uttid, predict=predictions, target=classid, length=lens + ) + + self.top_3_fidelity.append( + [batch.id] * theta_out.shape[0], theta_out, predictions + ) + + self.faithfulness.append(batch.id, wavs, predictions) + + return torch.as_tensor([0.0], device=self.device) + + def on_stage_start(self, stage, epoch=None): + """Steps taken before stage start.""" + + @torch.no_grad() + def accuracy_value(predict, target, length): + """Computes accuracy.""" + nbr_correct, nbr_total = sb.utils.Accuracy.Accuracy( + predict.unsqueeze(1), target, length + ) + acc = torch.tensor([nbr_correct / nbr_total]) + return acc + + @torch.no_grad() + def compute_fidelity(theta_out, predictions): + """Computes top-k fidelity of interpreter.""" + predictions = F.softmax(predictions, dim=1) + theta_out = F.softmax(theta_out, dim=1) + + pred_cl = torch.argmax(predictions, dim=1) + k_top = torch.argmax(theta_out, dim=1) + + # 1 element for each sample in batch, is 0 if pred_cl is in top k + temp = (k_top == pred_cl).float() + + return temp + + @torch.no_grad() + def compute_faithfulness(wavs, predictions): + """Computes the faithfulness metric.""" + X2 = self.interpret_computation_steps(wavs)[0] + + _, predictions_masked, _ = self.classifier_forward(X2) + + predictions = F.softmax(predictions, dim=1) + predictions_masked = F.softmax(predictions_masked, dim=1) + + # Get the prediction indices + pred_cl = predictions.argmax(dim=1, keepdim=True) + + # Get the corresponding output probabilities + predictions_selected = torch.gather( + predictions, dim=1, index=pred_cl + ) + predictions_masked_selected = torch.gather( + predictions_masked, dim=1, index=pred_cl + ) + + faithfulness = ( + predictions_selected - predictions_masked_selected + ).squeeze(1) + + return faithfulness + + self.top_3_fidelity = MetricStats(metric=compute_fidelity) + self.faithfulness = MetricStats(metric=compute_faithfulness) + self.acc_metric = sb.utils.metric_stats.MetricStats( + metric=accuracy_value, n_jobs=1 + ) + return super().on_stage_start(stage, epoch) + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of an epoch. + Plots in subplots the values of `self.batch_to_plot` and saves the + plot to the experiment folder `self.hparams.output_folder`.""" + current_fid = self.top_3_fidelity.summarize("average") + test_stats = { + "acc": self.acc_metric.summarize("average"), + "input_fidelity": current_fid, + "faithfulness_median": torch.Tensor( + self.faithfulness.scores + ).median(), + "faithfulness_mean": torch.Tensor(self.faithfulness.scores).mean(), + } + + # The train_logger writes a summary to stdout and to the log file + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch}, test_stats=test_stats + ) + + +def dataio_prep(hparams): + """Creates the datasets and their data processing pipelines.""" + data_audio_folder = hparams["audio_data_folder"] + config_sample_rate = hparams["sample_rate"] + label_encoder = sb.dataio.encoder.CategoricalEncoder() + hparams["resampler"] = torchaudio.transforms.Resample( + new_freq=config_sample_rate + ) + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load the signal, and pass it and its length to the corruption class. + This is done on the CPU in the `collate_fn`.""" + + wave_file = data_audio_folder + f"/{wav}" + + sig, read_sr = audio_io.load(wave_file) + + # If multi-channels, downmix it to a mono channel + sig = torch.squeeze(sig) + if len(sig.shape) > 1: + sig = torch.mean(sig, dim=0) + + # Convert sample rate to required config_sample_rate + if read_sr != config_sample_rate: + # Re-initialize sampler if source file sample rate changed compared to last file + if read_sr != hparams["resampler"].orig_freq: + hparams["resampler"] = torchaudio.transforms.Resample( + orig_freq=read_sr, new_freq=config_sample_rate + ) + # Resample audio + sig = hparams["resampler"].forward(sig) + + sig = sig.float() + sig = sig / sig.max() + return sig + + # 3. Define label pipeline: + @sb.utils.data_pipeline.takes("class_string") + @sb.utils.data_pipeline.provides("class_string", "class_string_encoded") + def label_pipeline(class_string): + """The label pipeline.""" + yield class_string + class_string_encoded = label_encoder.encode_label_torch(class_string) + yield class_string_encoded + + # Define datasets. We also connect the dataset with the data processing + # functions defined above. + datasets = {} + data_info = { + "train": hparams["train_annotation"], + "valid": hparams["valid_annotation"], + "test": hparams["test_annotation"], + } + for dataset in data_info: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline, label_pipeline], + output_keys=["id", "sig", "class_string_encoded"], + ) + + # Load or compute the label encoder (with multi-GPU DDP support) + # Please, take a look into the lab_enc_file to see the label to index + # mapping. + lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") + label_encoder.load_or_create( + path=lab_enc_file, + from_didatasets=[datasets["train"]], + output_key="class_string", + ) + + return datasets, label_encoder + + +if __name__ == "__main__": + # This flag enables the built-in cuDNN auto-tuner + # torch.backends.cudnn.benchmark = True + + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # classifier is fixed here + hparams["embedding_model"].eval() + hparams["classifier"].eval() + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Tensorboard logging + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs_folder"] + ) + + from esc50_prepare import prepare_esc50 + + run_on_main( + prepare_esc50, + kwargs={ + "data_folder": hparams["data_folder"], + "audio_data_folder": hparams["audio_data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "train_fold_nums": hparams["train_fold_nums"], + "valid_fold_nums": hparams["valid_fold_nums"], + "test_fold_nums": hparams["test_fold_nums"], + "skip_manifest_creation": hparams["skip_manifest_creation"], + }, + ) + + # Dataset IO prep: creating Dataset objects and proper encodings for phones + datasets, label_encoder = dataio_prep(hparams) + hparams["label_encoder"] = label_encoder + + class_labels = list(label_encoder.ind2lab.values()) + print("Class Labels:", class_labels) + + Interpreter_brain = InterpreterESC50Brain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + if "pretrained_esc50" in hparams and hparams["use_pretrained"]: + print("Loading model...") + run_on_main(hparams["pretrained_esc50"].collect_files) + hparams["pretrained_esc50"].load_collected() + + hparams["embedding_model"].to(run_opts["device"]) + hparams["classifier"].to(run_opts["device"]) + + test_stats = Interpreter_brain.evaluate( + test_set=datasets["test"], + min_key="loss", + progressbar=True, + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/ESC50/interpret/interpreter_brain.py b/recipes/ESC50/interpret/interpreter_brain.py new file mode 100644 index 0000000000..5f5a2ba8e0 --- /dev/null +++ b/recipes/ESC50/interpret/interpreter_brain.py @@ -0,0 +1,438 @@ +"""This is a parent class for the interpretability recipes. + +Authors + * Francesco Paissan 2022, 2023, 2024 + * Cem Subakan 2022, 2023, 2024 + * Luca Della Libera 2024 +""" + +import os + +import matplotlib.pyplot as plt +import quantus +import torch +import torchvision +from torch.nn import functional as F + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.metric_stats import MetricStats + +eps = 1e-10 + + +class InterpreterBrain(sb.core.Brain): + """Class for interpreter training.""" + + def invert_stft_with_phase(self, X_int, X_stft_phase): + """Inverts STFT spectra given phase.""" + X_stft_phase_sb = torch.cat( + ( + torch.cos(X_stft_phase).unsqueeze(-1), + torch.sin(X_stft_phase).unsqueeze(-1), + ), + dim=-1, + ) + + X_stft_phase_sb = X_stft_phase_sb[:, : X_int.shape[1], :, :] + if X_int.ndim == 3: + X_int = X_int.unsqueeze(-1) + X_wpsb = X_int * X_stft_phase_sb + x_int_sb = self.modules.compute_istft(X_wpsb) + + return x_int_sb + + def preprocess(self, wavs): + """Pre-process wavs.""" + X_stft = self.modules.compute_stft(wavs) + X_stft_power = sb.processing.features.spectral_magnitude( + X_stft, power=0.5 + ) + + X_mel, X_mel_log1p = [None] * 2 + if self.hparams.use_melspectra_log1p: + X_mel = self.hparams.compute_fbank(X_stft_power) + X_mel_log1p = torch.log1p(X_mel) + + X_stft_logpower = torch.log1p(X_stft_power) + + return X_stft_logpower, X_mel_log1p, X_stft, X_stft_power + + def classifier_forward(self, X_stft_logpower): + """The forward pass for the classifier.""" + if hasattr(self.hparams.embedding_model, "config"): + # Hugging Face model + config = self.hparams.embedding_model.config + # Resize to match expected resolution + net_input = torchvision.transforms.functional.resize( + X_stft_logpower, (config.image_size, config.image_size) + ) + # Expand to have 3 channels + net_input = net_input[:, None, ...].expand(-1, 3, -1, -1) + if config.model_type == "focalnet": + hcat = self.hparams.embedding_model(net_input).feature_maps[-1] + embeddings = hcat.mean(dim=(-1, -2)) + # Upsample spatial dimensions by 2x to avoid OOM (otherwise the psi model is too large) + hcat = torchvision.transforms.functional.resize( + hcat, (2 * hcat.shape[-2], 2 * hcat.shape[-1]) + ) + elif config.model_type == "vit": + hcat = self.hparams.embedding_model( + net_input + ).last_hidden_state.movedim(-1, -2) + embeddings = hcat.mean(dim=-1) + # Reshape to have 2 spatial dimensions (remove CLS token) + num_patches = ( + self.hparams.embedding_model.config.image_size + // self.hparams.embedding_model.config.patch_size + ) + hcat = hcat[..., 1:].reshape( + len(hcat), -1, num_patches, num_patches + ) + else: + raise NotImplementedError + else: + if hasattr(self.hparams, "return_reps"): + embeddings, hs = self.hparams.embedding_model(X_stft_logpower) + hcat = hs + else: + hcat = self.hparams.embedding_model(X_stft_logpower) + embeddings = hcat.mean((-1, -2)) + + predictions = self.hparams.classifier(embeddings).squeeze(1) + class_pred = predictions.argmax(1) + + return hcat, embeddings, predictions, class_pred + + def interpret_computation_steps(self, wavs, print_probability=False): + """Computation steps to get the interpretation spectrogram.""" + + def extra_metrics(self): + return {} + + def viz_ints(self, X_stft, X_stft_logpower, batch, wavs): + """The helper function to create debugging images""" + X_int, _, X_stft_phase, _ = self.interpret_computation_steps(wavs) + + X_int = torch.expm1(X_int) + + X_int = X_int[..., None] + X_int = X_int.permute(0, 2, 1, 3) + + X_stft_phase = X_stft_phase[:, : X_int.shape[1], :] + + xhat_tm = self.invert_stft_with_phase(X_int, X_stft_phase) + + plt.figure(figsize=(10, 5), dpi=100) + + plt.subplot(121) + plt.imshow(X_stft_logpower[0].squeeze().cpu().t(), origin="lower") + plt.title("input") + plt.colorbar() + + plt.subplot(122) + plt.imshow(X_int[0].squeeze().cpu().t(), origin="lower") + plt.colorbar() + plt.title("interpretation") + + out_folder = os.path.join( + self.hparams.output_folder, + f"interpretations/{batch.id[0]}", + ) + os.makedirs( + out_folder, + exist_ok=True, + ) + + plt.savefig( + os.path.join(out_folder, "spectra.png"), + format="png", + ) + plt.close() + + audio_io.save( + os.path.join(out_folder, "interpretation.wav"), + xhat_tm.data.cpu(), + self.hparams.sample_rate, + ) + + audio_io.save( + os.path.join(out_folder, "original.wav"), + wavs.data.cpu(), + self.hparams.sample_rate, + ) + + def compute_forward(self, batch, stage): + """Interpreter training forward step.""" + + def compute_objectives(self, pred, batch, stage): + """Defines and computes the optimization objectives.""" + + def on_stage_start(self, stage, epoch=None): + """Steps taken before stage start.""" + + @torch.no_grad() + def compute_fidelity(theta_out, predictions): + """Computes top-`k` fidelity of interpreter.""" + pred_cl = torch.argmax(predictions, dim=1) + k_top = torch.topk(theta_out, k=1, dim=1)[1] + + # 1 element for each sample in batch, is 0 if pred_cl is in top k + temp = (k_top - pred_cl.unsqueeze(1) == 0).sum(1) + + return temp + + @torch.no_grad() + def compute_faithfulness(predictions, predictions_masked): + "This function implements the faithful metric (FF) used in the L-MAC paper." + # get the prediction indices + pred_cl = predictions.argmax(dim=1, keepdim=True) + + # get the corresponding output probabilities + predictions_selected = torch.gather( + predictions, dim=1, index=pred_cl + ) + predictions_masked_selected = torch.gather( + predictions_masked, dim=1, index=pred_cl + ) + + faithfulness = ( + predictions_selected - predictions_masked_selected + ).squeeze(dim=1) + + return faithfulness + + @torch.no_grad() + def compute_AD(theta_out, predictions): + """Computes top-`k` fidelity of interpreter.""" + predictions = F.softmax(predictions, dim=1) + theta_out = F.softmax(theta_out, dim=1) + + pc = torch.gather( + predictions, dim=1, index=predictions.argmax(1, keepdim=True) + ).squeeze() + oc = torch.gather( + theta_out, dim=1, index=predictions.argmax(1, keepdim=True) + ).squeeze(dim=1) + + # 1 element for each sample in batch, is 0 if pred_cl is in top k + temp = (F.relu(pc - oc) / (pc + eps)) * 100 + + return temp + + @torch.no_grad() + def compute_AI(theta_out, predictions): + """Computes top-`k` fidelity of interpreter.""" + pc = torch.gather( + predictions, dim=1, index=predictions.argmax(1, keepdim=True) + ).squeeze() + oc = torch.gather( + theta_out, dim=1, index=predictions.argmax(1, keepdim=True) + ).squeeze(dim=1) + + # 1 element for each sample in batch, is 0 if pred_cl is in top k + temp = (pc < oc).float() * 100 + + return temp + + @torch.no_grad() + def compute_AG(theta_out, predictions): + """Computes top-`k` fidelity of interpreter.""" + pc = torch.gather( + predictions, dim=1, index=predictions.argmax(1, keepdim=True) + ).squeeze() + oc = torch.gather( + theta_out, dim=1, index=predictions.argmax(1, keepdim=True) + ).squeeze(dim=1) + + # 1 element for each sample in batch, is 0 if pred_cl is in top k + temp = (F.relu(oc - pc) / (1 - pc + eps)) * 100 + + return temp + + @torch.no_grad() + def compute_sparseness(wavs, X, y): + """Computes the SPS metric used in the L-MAC paper.""" + self.sparseness = quantus.Sparseness( + return_aggregate=True, abs=True + ) + device = X.device + attr = ( + self.interpret_computation_steps(wavs)[1] + .transpose(1, 2) + .unsqueeze(1) + .clone() + .detach() + .cpu() + .numpy() + ) + if attr.sum() > 0: + X = X[:, : attr.shape[2], :] + X = X.unsqueeze(1) + quantus_inp = { + "model": None, + "x_batch": X.clone() + .detach() + .cpu() + .numpy(), # quantus expects the batch dim + "a_batch": attr, + "y_batch": y.squeeze(dim=1).clone().detach().cpu().numpy(), + "softmax": False, + "device": device, + } + return torch.Tensor([self.sparseness(**quantus_inp)[0]]).float() + else: + print("all zeros saliency map") + return torch.zeros([0]) + + @torch.no_grad() + def compute_complexity(wavs, X, y): + """Computes the COMP metric used in L-MAC paper""" + self.complexity = quantus.Complexity( + return_aggregate=True, abs=True + ) + device = X.device + attr = ( + self.interpret_computation_steps(wavs)[1] + .transpose(1, 2) + .unsqueeze(1) + .clone() + .detach() + .cpu() + .numpy() + ) + if attr.sum() > 0: + X = X[:, : attr.shape[2], :] + X = X.unsqueeze(1) + quantus_inp = { + "model": None, + "x_batch": X.clone() + .detach() + .cpu() + .numpy(), # quantus expects the batch dim + "a_batch": attr, + "y_batch": y.squeeze(dim=1).clone().detach().cpu().numpy(), + "softmax": False, + "device": device, + } + + return torch.Tensor([self.complexity(**quantus_inp)[0]]).float() + else: + print("all zeros saliency map") + return torch.zeros([0]) + + @torch.no_grad() + def accuracy_value(predict, target): + """Computes Accuracy""" + predict = predict.argmax(1) + + return (predict.unsqueeze(1) == target).float().squeeze(1) + + self.AD = MetricStats(metric=compute_AD) + self.AI = MetricStats(metric=compute_AI) + self.AG = MetricStats(metric=compute_AG) + self.sps = MetricStats(metric=compute_sparseness) + self.comp = MetricStats(metric=compute_complexity) + self.inp_fid = MetricStats(metric=compute_fidelity) + self.faithfulness = MetricStats(metric=compute_faithfulness) + self.acc_metric = MetricStats(metric=accuracy_value) + + for metric_name, metric_fn in self.extra_metrics().items(): + setattr( + self, + metric_name, + sb.utils.metric_stats.MetricStats(metric=metric_fn), + ) + + return super().on_stage_start(stage, epoch) + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of an epoch. + Plots in subplots the values of `self.batch_to_plot` and saves the + plot to the experiment folder `self.hparams.output_folder`.""" + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + self.train_stats = { + "loss": self.train_loss, + "acc": self.acc_metric.summarize("average"), + } + + extra_m = { + k: torch.Tensor(getattr(self, k).scores).mean() + for k in self.extra_metrics().keys() + } + + # this is needed to eliminate comp values which are nan + comp_tensor = torch.Tensor(self.comp.scores) + comp_tensor = comp_tensor[~torch.isnan(comp_tensor)] + tmp = { + "SPS": torch.Tensor(self.sps.scores).mean(), + "COMP": comp_tensor.mean(), + } + quantus_metrics = {} + for m in tmp: + if not tmp[m].isnan(): + quantus_metrics[m] = tmp[m] + + if stage == sb.Stage.VALID: + current_fid = torch.Tensor(self.inp_fid.scores).mean() + old_lr, new_lr = self.hparams.lr_annealing( + [self.optimizer], epoch, -current_fid + ) + sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + valid_stats = { + "loss": stage_loss, + "acc": self.acc_metric.summarize("average"), + "input_fidelity": current_fid, + "AI": torch.Tensor(self.AI.scores).mean(), + "AD": torch.Tensor(self.AD.scores).mean(), + "AG": torch.Tensor(self.AG.scores).mean(), + "faithfulness_mean": torch.Tensor( + self.faithfulness.scores + ).mean(), + } + valid_stats.update(extra_m) + valid_stats.update(quantus_metrics) + + # The train_logger writes a summary to stdout and to the log file + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch, "lr": old_lr}, + train_stats=self.train_stats, + valid_stats=valid_stats, + ) + + # Save the current checkpoint and delete previous checkpoints + self.checkpointer.save_and_keep_only( + meta=valid_stats, max_keys=["faithfulnesstop-3_fid"] + ) + + if stage == sb.Stage.TEST: + current_fid = torch.Tensor(self.inp_fid.scores).mean() + test_stats = { + "loss": stage_loss, + "acc": self.acc_metric.summarize("average"), + "input_fidelity": current_fid, + "AI": torch.Tensor(self.AI.scores).mean(), + "AD": torch.Tensor(self.AD.scores).mean(), + "AG": torch.Tensor(self.AG.scores).mean(), + "faithfulness_mean": torch.Tensor( + self.faithfulness.scores + ).mean(), + } + test_stats.update(extra_m) + test_stats.update(quantus_metrics) + + # The train_logger writes a summary to stdout and to the log file + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch}, test_stats=test_stats + ) + + test_stats = { + k: ( + test_stats[k].item() + if isinstance(test_stats[k], torch.Tensor) + else test_stats[k] + ) + for k in test_stats + } + print(test_stats) diff --git a/recipes/ESC50/interpret/train_l2i.py b/recipes/ESC50/interpret/train_l2i.py new file mode 100644 index 0000000000..51d5ff12f2 --- /dev/null +++ b/recipes/ESC50/interpret/train_l2i.py @@ -0,0 +1,448 @@ +#!/usr/bin/python3 +"""This recipe to train L2I (https://arxiv.org/abs/2202.11479) to interpret audio classifiers. + +The command to run this recipe: + python train_l2i.py hparams/l2i_cnn14.yaml --data_folder /yourpath/ESC50 + +Authors + * Cem Subakan 2022, 2023 + * Francesco Paissan 2022, 2023, 2024 +""" + +import sys + +import torch +import torch.nn.functional as F +from esc50_prepare import dataio_prep, prepare_esc50 +from hyperpyyaml import load_hyperpyyaml +from interpreter_brain import InterpreterBrain +from wham_prepare import combine_batches, prepare_wham + +import speechbrain as sb +from speechbrain.processing.NMF import spectral_phase +from speechbrain.utils.distributed import run_on_main + +eps = 1e-10 + + +class L2I(InterpreterBrain): + """Class for sound class embedding training" """ + + def interpret_computation_steps(self, wavs_batch): + """computation steps to get the interpretation spectrogram""" + X_stft = self.modules.compute_stft(wavs_batch[0:1]) + ret_X_int = torch.empty( + wavs_batch.shape[0], X_stft.shape[2], X_stft.shape[1] + ).to(wavs_batch.device) + ret_mask = torch.empty( + wavs_batch.shape[0], X_stft.shape[2], X_stft.shape[1] + ).to(wavs_batch.device) + ret_X_stft_phase = torch.empty( + wavs_batch.shape[0], X_stft.shape[1], X_stft.shape[2] + ).to(wavs_batch.device) + for idx, wavs in enumerate(wavs_batch): + # compute stft and logmel, and phase + wavs = wavs[None] + X_stft_logpower, X_mel, X_stft, _ = self.preprocess(wavs) + + net_input = X_stft_logpower + if self.hparams.use_melspectra_log1p: + net_input = X_mel + + X_stft_phase = spectral_phase(X_stft) + + # get the classifier embeddings + temp = self.hparams.embedding_model(net_input) + + if isinstance( + temp, tuple + ): # if embeddings are not used for interpretation + embeddings, f_I = temp + else: + embeddings, f_I = temp, temp + + # get the nmf activations + psi_out = self.modules.psi(f_I) + + if isinstance(psi_out, tuple): + psi_out = psi_out[0] + psi_out = psi_out.squeeze(1).permute(0, 2, 1) + + # cut the length of psi in case necessary + psi_out = psi_out[:, :, : net_input.shape[1]] + + # get the classifier output + if embeddings.ndim == 4: + embeddings = embeddings.mean((-1, -2)) + + predictions = self.hparams.classifier(embeddings).squeeze(1) + pred_cl = torch.argmax(predictions, dim=1)[0].item() + + nmf_dictionary = self.hparams.nmf_decoder.return_W() + + # computes time activations per component + # FROM NOW ON WE FOLLOW THE PAPER'S NOTATION + psi_out = psi_out.squeeze() + z = self.modules.theta.hard_att(psi_out).squeeze() + theta_c_w = self.modules.theta.classifier[0].weight[pred_cl] + + # some might be negative, relevance of component + r_c_x = theta_c_w * z / torch.abs(theta_c_w * z).max() + + # define selected components by thresholding + L = ( + torch.arange(r_c_x.shape[0]) + .to(r_c_x.device)[r_c_x > self.hparams.relevance_th] + .tolist() + ) + + # get the log power spectra, this is needed as NMF is trained on log-power spectra + X_stft_logpower = X_stft_logpower.transpose(1, 2).squeeze(0) + + X_withselected = nmf_dictionary[:, L] @ psi_out[L, :] + Xhat = nmf_dictionary @ psi_out + + X_stft_power_log = X_stft_logpower[..., : Xhat.shape[1]] + + # need the eps for the denominator + mask = X_withselected / (Xhat + eps) + X_int = mask * X_stft_power_log + + pad_time = X_stft_logpower.shape[1] - Xhat.shape[1] + X_int = F.pad(X_int, (0, pad_time)) + mask = F.pad(mask, (0, pad_time)) + + ret_X_int[idx] = X_int + ret_mask[idx] = mask + ret_X_stft_phase[idx] = X_stft_phase + + return ret_X_int, ret_mask, ret_X_stft_phase, X_stft_logpower[None] + + def compute_forward(self, batch, stage): + """Computation pipeline based on a encoder + sound classifier. + Data augmentation and environmental corruption are applied to the + input sound. + """ + batch = batch.to(self.device) + wavs, lens = batch.sig + + if self.hparams.add_wham_noise: + # augment batch with WHAM! + wavs = combine_batches(wavs, iter(self.hparams.wham_dataset)) + + net_input, X_mel, X_stft, _ = self.preprocess(wavs) + + if self.hparams.use_melspectra_log1p: + net_input = X_mel + + # Embeddings + sound classifier + temp = self.hparams.embedding_model(net_input) + if isinstance(temp, tuple): + embeddings, f_I = temp + else: + embeddings, f_I = temp, temp + + if embeddings.ndim == 4: + embeddings = embeddings.mean((-1, -2)) + + predictions = self.hparams.classifier(embeddings).squeeze(1) + + psi_out = self.modules.psi(f_I) # generate nmf activations + + if isinstance(psi_out, tuple): + psi_out = psi_out[0] + psi_out = psi_out.squeeze(1).permute(0, 2, 1) + + # cut the length of psi + psi_out = psi_out[:, :, : net_input.shape[1]] + + # generate log-mag spectrogram + reconstructed = self.hparams.nmf_decoder(psi_out).transpose(1, 2) + + # generate classifications from time activations + theta_out = self.modules.theta(psi_out) + + if stage == sb.Stage.VALID: + # save some samples + if ( + self.hparams.epoch_counter.current + % self.hparams.interpret_period + ) == 0 and self.hparams.save_interpretations: + self.viz_ints(X_stft, net_input, batch, wavs) + + if stage == sb.Stage.TEST and self.hparams.save_interpretations: + # During TEST save always, if required + self.viz_ints(X_stft, net_input, batch, wavs) + + return (reconstructed, psi_out), (predictions, theta_out), wavs + + def compute_objectives(self, pred, batch, stage): + """Computes the loss using class-id as label.""" + batch = batch.to(self.device) + wavs, lens = batch.sig + + ( + (reconstructions, time_activations), + (classification_out, theta_out), + # take augmented wavs + wavs, + ) = pred + + uttid = batch.id + classid, _ = batch.class_string_encoded + + X_stft = self.modules.compute_stft(wavs).to(self.device) + X_stft_power = sb.processing.features.spectral_magnitude( + X_stft, power=self.hparams.spec_mag_power + ) + X_stft_logpower = torch.log1p(X_stft_power) + + with torch.no_grad(): + tmp, _, _, _ = self.interpret_computation_steps( + wavs + ) # returns log1p + interpretations = torch.expm1(tmp).transpose(2, 1) + + if self.hparams.use_melspectra_log1p: + interpretations = self.hparams.compute_fbank(interpretations) + interpretations = torch.log1p(interpretations) + + # Embeddings + sound classifier + temp = self.hparams.embedding_model(interpretations) + if isinstance(temp, tuple): + embeddings, _ = temp + else: + embeddings, _ = temp, temp + + if embeddings.ndim == 4: + embeddings = embeddings.mean((-1, -2)) + + maskin_preds = ( + self.hparams.classifier(embeddings).squeeze(1).softmax(1) + ) + + X_stft_logpower = X_stft_logpower[:, : interpretations.shape[-2], :] + if self.hparams.use_melspectra_log1p: + xx_temp = torch.log1p(self.hparams.compute_fbank(X_stft_power)) + temp = self.hparams.embedding_model(xx_temp - interpretations) + else: + temp = self.hparams.embedding_model( + X_stft_logpower - interpretations + ) + + if isinstance(temp, tuple): + embeddings, _ = temp + else: + embeddings, _ = temp, temp + + if embeddings.ndim == 4: + embeddings = embeddings.mean((-1, -2)) + + maskout_preds = ( + self.hparams.classifier(embeddings).squeeze(1).softmax(1) + ) + self.l2i_fid.append(uttid, theta_out, classid) + self.inp_fid.append(uttid, maskin_preds, classid) + + self.acc_metric.append( + uttid, + predict=classification_out, + target=classid, + ) + + self.AD.append( + uttid, + maskin_preds, + classification_out.softmax(1), + ) + self.AI.append( + uttid, + maskin_preds, + classification_out.softmax(1), + ) + self.AG.append( + uttid, + maskin_preds, + classification_out.softmax(1), + ) + self.faithfulness.append( + uttid, + classification_out.softmax(1), + maskout_preds, + ) + + if stage == sb.Stage.VALID or stage == sb.Stage.TEST: + try: + self.sps.append(uttid, wavs, X_stft_logpower, classid) + except ValueError: + print("zero sps entry!") + + try: + self.comp.append(uttid, wavs, X_stft_logpower, classid) + except ValueError: + print("zero comp entry!") + + X_stft_logpower = X_stft_logpower[:, : reconstructions.shape[1], :] + + loss_nmf = ((reconstructions - X_stft_logpower) ** 2).mean() + self.recons_err.append(uttid, loss_nmf) + + loss_nmf = self.hparams.alpha * loss_nmf + prev = loss_nmf.clone().detach() + + loss_nmf += self.hparams.beta * (time_activations).abs().mean() + self.reg_loss.append(uttid, loss_nmf - prev) + + if stage != sb.Stage.TEST: + if hasattr(self.hparams.lr_annealing, "on_batch_end"): + self.hparams.lr_annealing.on_batch_end(self.optimizer) + + self.last_batch = batch + self.batch_to_plot = (reconstructions.clone(), X_stft_logpower.clone()) + + theta_out = -torch.log(theta_out) + loss_fdi = ( + F.softmax(classification_out / self.hparams.classifier_temp, dim=1) + * theta_out + ).mean() + + self.fid_loss.append(uttid, loss_fdi) + + return loss_nmf + loss_fdi + + def extra_metrics(self): + """This function defines the extra metrics required for the L2I method""" + + @torch.no_grad() + def l2i_fid(predict, target): + """Computes Accuracy""" + predict = predict.argmax(1) + return (predict.unsqueeze(1) == target).float().squeeze(1) + + @torch.no_grad() + def save(x): + return x[None] + + return { + "l2i_fid": l2i_fid, + "recons_err": save, + "reg_loss": save, + "fid_loss": save, + } + + def pretrained_interpreter(self): + """This function enables us to use hparams.pretrained_interpreter inside train.py""" + print( + f"pretrained_interpreter path {self.hparams.pretrained_interpreter}" + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + print("Eval only hparams:") + print("overlap_type=", hparams["overlap_type"]) + print("int_method=", hparams["int_method"]) + print("ljspeech_path=", hparams["ljspeech_path"]) + print("single_sample=", hparams["single_sample"]) + + print( + "Interpreter class is inheriting the train_logger", + hparams["train_logger"], + ) + + # classifier is fixed here + hparams["embedding_model"].eval() + hparams["classifier"].eval() + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Tensorboard logging + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs_folder"] + ) + + run_on_main( + prepare_esc50, + kwargs={ + "data_folder": hparams["data_folder"], + "audio_data_folder": hparams["audio_data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "train_fold_nums": hparams["train_fold_nums"], + "valid_fold_nums": hparams["valid_fold_nums"], + "test_fold_nums": hparams["test_fold_nums"], + "skip_manifest_creation": hparams["skip_manifest_creation"], + }, + ) + + # Dataset IO prep: creating Dataset objects and proper encodings for phones + datasets, label_encoder = dataio_prep(hparams) + hparams["label_encoder"] = label_encoder + + # create WHAM dataset according to hparams + if "wham_folder" in hparams: + hparams["wham_dataset"] = prepare_wham( + hparams["wham_folder"], + hparams["add_wham_noise"], + hparams["sample_rate"], + hparams["signal_length_s"], + hparams["wham_audio_folder"], + ) + + class_labels = list(label_encoder.ind2lab.values()) + print("Class Labels:", class_labels) + + assert hparams["signal_length_s"] == 5, "Fix wham sig length!" + # assert hparams["out_n_neurons"] == 50, "Fix number of outputs classes!" + + Interpreter_brain = L2I( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + if "pretrained_esc50" in hparams and hparams["use_pretrained"]: + run_on_main(hparams["pretrained_esc50"].collect_files) + hparams["pretrained_esc50"].load_collected() + + # transfer the frozen parts to the model to the device + hparams["embedding_model"].to(run_opts["device"]) + hparams["classifier"].to(run_opts["device"]) + hparams["nmf_decoder"].to(run_opts["device"]) + hparams["embedding_model"].eval() + + Interpreter_brain.fit( + epoch_counter=Interpreter_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["dataloader_options"], + ) + + # Load the best checkpoint for evaluation + test_stats = Interpreter_brain.evaluate( + test_set=datasets["test"], + min_key="error", + progressbar=True, + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/ESC50/interpret/train_lmac.py b/recipes/ESC50/interpret/train_lmac.py new file mode 100644 index 0000000000..1109367da0 --- /dev/null +++ b/recipes/ESC50/interpret/train_lmac.py @@ -0,0 +1,429 @@ +#!/usr/bin/python3 +"""This recipe to train L-MAC to interpret audio classifiers. + +The command to run for this recipe, with WHAM augmentation (as used in the L-MAC paper) + python train_lmac.py hparams/lmac_cnn14.yaml --data_folder=/yourpath/ESC50 --add_wham_noise True --wham_folder=/yourpath/wham_noise + +For more details, please refer to the README file. + + +Authors + * Francesco Paissan 2024 + * Cem Subakan 2024 +""" + +import sys + +import torch +import torch.nn.functional as F +from esc50_prepare import dataio_prep, prepare_esc50 +from hyperpyyaml import load_hyperpyyaml +from interpreter_brain import InterpreterBrain +from wham_prepare import combine_batches, prepare_wham + +import speechbrain as sb +from speechbrain.processing.NMF import spectral_phase +from speechbrain.utils.distributed import run_on_main + +eps = 1e-10 + + +def tv_loss(mask, tv_weight=1, power=2, border_penalty=0.3): + if tv_weight is None or tv_weight == 0: + return 0.0 + # https://github.com/chongyangma/cs231n/blob/master/assignments/assignment3/style_transfer_pytorch.py + # https://github.com/PiotrDabkowski/pytorch-saliency/blob/bfd501ec7888dbb3727494d06c71449df1530196/sal/utils/mask.py#L5 + w_variance = torch.sum(torch.pow(mask[:, :, :-1] - mask[:, :, 1:], power)) + h_variance = torch.sum(torch.pow(mask[:, :-1, :] - mask[:, 1:, :], power)) + + loss = tv_weight * (h_variance + w_variance) / float(power * mask.size(0)) + return loss + + +class LMAC(InterpreterBrain): + def crosscor(self, spectrogram, template): + """Compute the cross correlation metric defined in the L-MAC paper, used in finetuning""" + if self.hparams.crosscortype == "conv": + spectrogram = spectrogram - spectrogram.mean((-1, -2), keepdim=True) + template = template - template.mean((-1, -2), keepdim=True) + template = template.unsqueeze(1) + # 1 x BS x T x F + # BS x 1 x T x F + tmp = F.conv2d( + spectrogram[None], + template, + bias=None, + groups=spectrogram.shape[0], + ) + + normalization1 = F.conv2d( + spectrogram[None] ** 2, + torch.ones_like(template), + groups=spectrogram.shape[0], + ) + normalization2 = F.conv2d( + torch.ones_like(spectrogram[None]), + template**2, + groups=spectrogram.shape[0], + ) + + ncc = ( + tmp / torch.sqrt(normalization1 * normalization2 + 1e-8) + ).squeeze() + + return ncc + elif self.hparams.crosscortype == "dotp": + dotp = (spectrogram * template).mean((-1, -2)) + norms_specs = spectrogram.pow(2).mean((-1, -2)).sqrt() + norms_templates = template.pow(2).mean((-1, -2)).sqrt() + norm_dotp = dotp / (norms_specs * norms_templates) + return norm_dotp + else: + raise ValueError("unknown crosscor type!") + + def interpret_computation_steps(self, wavs, print_probability=False): + """Computation steps to get the interpretation spectrogram""" + X_stft_logpower, X_mel, X_stft, _ = self.preprocess(wavs) + X_stft_phase = spectral_phase(X_stft) + + hcat, _, predictions, class_pred = self.classifier_forward(X_mel) + if print_probability: + predictions = F.softmax(predictions, dim=1) + class_prob = predictions[0, class_pred].item() + print(f"classifier_prob: {class_prob}") + + xhat = self.modules.psi(hcat).squeeze(1) + + Tmax = xhat.shape[1] + if self.hparams.use_mask_output: + xhat = F.sigmoid(xhat) + X_int = xhat * X_stft_logpower[:, :Tmax, :] + + return ( + X_int.transpose(1, 2), + xhat.transpose(1, 2), + X_stft_phase, + X_stft_logpower, + ) + + def compute_forward(self, batch, stage): + """Forward computation defined for to generate the saliency maps with L-MAC""" + batch = batch.to(self.device) + wavs, lens = batch.sig + + # augment batch with WHAM! + if hasattr(self.hparams, "add_wham_noise"): + if self.hparams.add_wham_noise: + wavs = combine_batches(wavs, iter(self.hparams.wham_dataset)) + + X_stft_logpower, X_mel, X_stft, _ = self.preprocess(wavs) + + # Embeddings + sound classifier + hcat, _, predictions, class_pred = self.classifier_forward(X_mel) + + xhat = self.modules.psi(hcat).squeeze(1) + + if self.hparams.use_mask_output: + xhat = F.sigmoid(xhat) + + if stage == sb.Stage.VALID: + # save some samples + if ( + self.hparams.epoch_counter.current + % self.hparams.interpret_period + ) == 0 and self.hparams.save_interpretations: + self.viz_ints(X_stft, X_stft_logpower, batch, wavs) + + if stage == sb.Stage.TEST and self.hparams.save_interpretations: + # During TEST save always, if required + self.viz_ints(X_stft, X_stft_logpower, batch, wavs) + + return ((wavs, lens), predictions, xhat, hcat) + + def extra_metrics(self): + """This function defines the extra metrics required for L-MAC. + This is limited to the counter() function which is used to count the number of data items which passes the crosscorrelation threshold, during the finetuning stage of L-MAC. + """ + + def counter(c): + return c + + return {"in_masks": counter} + + def compute_objectives(self, pred, batch, stage): + """Helper function to compute the objectives""" + ( + batch_sig, + predictions, + xhat, + _, + ) = pred + + batch = batch.to(self.device) + wavs_clean, _ = batch.sig + + # taking them from forward because they are augmented there! + wavs, _ = batch_sig + + uttid = batch.id + labels, _ = batch.class_string_encoded + + ( + X_stft_logpower_clean, + _, + _, + _, + ) = self.preprocess(wavs_clean) + X_stft_logpower, _, _, _ = self.preprocess(wavs) + + Tmax = xhat.shape[1] + + # map clean to same dimensionality + X_stft_logpower_clean = X_stft_logpower_clean[:, :Tmax, :] + + mask_in = xhat * X_stft_logpower[:, :Tmax, :] + mask_out = (1 - xhat) * X_stft_logpower[:, :Tmax, :] + + if self.hparams.use_stft2mel: + X_in = torch.expm1(mask_in) + mask_in_mel = self.hparams.compute_fbank(X_in) + mask_in_mel = torch.log1p(mask_in_mel) + + X_out = torch.expm1(mask_out) + mask_out_mel = self.hparams.compute_fbank(X_out) + mask_out_mel = torch.log1p(mask_out_mel) + + if self.hparams.finetuning: + crosscor = self.crosscor(X_stft_logpower_clean, mask_in) + crosscor_mask = (crosscor >= self.hparams.crosscor_th).float() + + max_batch = ( + X_stft_logpower_clean.view(X_stft_logpower_clean.shape[0], -1) + .max(1) + .values.view(-1, 1, 1) + ) + binarized_oracle = ( + X_stft_logpower_clean >= self.hparams.bin_th * max_batch + ).float() + + if self.hparams.guidelosstype == "binary": + rec_loss = ( + F.binary_cross_entropy( + xhat, binarized_oracle, reduce=False + ).mean((-1, -2)) + * self.hparams.g_w + * crosscor_mask + ).mean() + else: + temp = ( + ( + ( + xhat + * X_stft_logpower[ + :, : X_stft_logpower_clean.shape[1], : + ] + ) + - X_stft_logpower_clean + ) + .pow(2) + .mean((-1, -2)) + ) + rec_loss = (temp * crosscor_mask).mean() * self.hparams.g_w + + else: + rec_loss = 0 + crosscor_mask = torch.zeros(xhat.shape[0], device=self.device) + + mask_in_preds = self.classifier_forward(mask_in_mel)[2] + mask_out_preds = self.classifier_forward(mask_out_mel)[2] + + class_pred = predictions.argmax(1) + l_in = F.nll_loss(mask_in_preds.log_softmax(1), class_pred) + l_out = -F.nll_loss(mask_out_preds.log_softmax(1), class_pred) + ao_loss = l_in * self.hparams.l_in_w + self.hparams.l_out_w * l_out + + r_m = ( + xhat.abs().mean((-1, -2, -3)) + * self.hparams.reg_w_l1 + * torch.logical_not(crosscor_mask) + ).sum() + r_m += ( + tv_loss(xhat) + * self.hparams.reg_w_tv + * torch.logical_not(crosscor_mask) + ).sum() + + mask_in_preds = mask_in_preds.softmax(1) + mask_out_preds = mask_out_preds.softmax(1) + + if stage == sb.Stage.VALID or stage == sb.Stage.TEST: + self.inp_fid.append( + uttid, + mask_in_preds, + predictions.softmax(1), + ) + self.AD.append( + uttid, + mask_in_preds, + predictions.softmax(1), + ) + self.AI.append( + uttid, + mask_in_preds, + predictions.softmax(1), + ) + self.AG.append( + uttid, + mask_in_preds, + predictions.softmax(1), + ) + self.sps.append(uttid, wavs, X_stft_logpower, labels) + self.comp.append(uttid, wavs, X_stft_logpower, labels) + self.faithfulness.append( + uttid, + predictions.softmax(1), + mask_out_preds, + ) + + self.in_masks.append(uttid, c=crosscor_mask) + self.acc_metric.append( + uttid, + predict=predictions, + target=labels, + ) + + if stage != sb.Stage.TEST: + if hasattr(self.hparams.lr_annealing, "on_batch_end"): + self.hparams.lr_annealing.on_batch_end(self.optimizer) + + return ao_loss + r_m + rec_loss + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + print("Eval only hparams:") + print("overlap_type=", hparams["overlap_type"]) + print("int_method=", hparams["int_method"]) + print("ljspeech_path=", hparams["ljspeech_path"]) + print("single_sample=", hparams["single_sample"]) + + print("Inherited hparams:") + print("use_melspectra_log1p=", hparams["use_melspectra_log1p"]) + + print( + "Interpreter class is inheriting the train_logger", + hparams["train_logger"], + ) + + # classifier is fixed here + hparams["embedding_model"].eval() + hparams["classifier"].eval() + hparams["embedding_model"].requires_grad_(False) + hparams["classifier"].requires_grad_(False) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Tensorboard logging + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs_folder"] + ) + + run_on_main( + prepare_esc50, + kwargs={ + "data_folder": hparams["data_folder"], + "audio_data_folder": hparams["audio_data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "train_fold_nums": hparams["train_fold_nums"], + "valid_fold_nums": hparams["valid_fold_nums"], + "test_fold_nums": hparams["test_fold_nums"], + "skip_manifest_creation": hparams["skip_manifest_creation"], + }, + ) + + # Dataset IO prep: creating Dataset objects and proper encodings for phones + datasets, label_encoder = dataio_prep(hparams) + hparams["label_encoder"] = label_encoder + + # create WHAM dataset according to hparams + if "wham_folder" in hparams: + hparams["wham_dataset"] = prepare_wham( + hparams["wham_folder"], + hparams["add_wham_noise"], + hparams["sample_rate"], + hparams["signal_length_s"], + hparams["wham_audio_folder"], + ) + + assert hparams["signal_length_s"] == 5, "Fix wham sig length!" + # assert hparams["out_n_neurons"] == 50, "Fix number of outputs classes!" + + class_labels = list(label_encoder.ind2lab.values()) + print("Class Labels:", class_labels) + + if hparams["finetuning"]: + if hparams["pretrained_interpreter"] is None: + raise AssertionError( + "You should specify pretrained model for finetuning." + ) + + Interpreter_brain = LMAC( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + if hparams["pretrained_interpreter"] is not None and hparams["finetuning"]: + print("Load pretrained_interpreter for interpreer finetuning...") + run_on_main(hparams["load_pretrained"].collect_files) + hparams["load_pretrained"].load_collected() + + if "pretrained_esc50" in hparams and hparams["use_pretrained"]: + print("Loading model...") + run_on_main(hparams["pretrained_esc50"].collect_files) + hparams["pretrained_esc50"].load_collected() + + hparams["embedding_model"].to(Interpreter_brain.device) + hparams["classifier"].to(Interpreter_brain.device) + hparams["embedding_model"].eval() + + if not hparams["test_only"]: + Interpreter_brain.fit( + epoch_counter=Interpreter_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["dataloader_options"], + ) + + Interpreter_brain.checkpointer.recover_if_possible( + min_key="loss", + ) + + test_stats = Interpreter_brain.evaluate( + test_set=datasets["test"], + min_key="loss", + progressbar=True, + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/ESC50/interpret/train_nmf.py b/recipes/ESC50/interpret/train_nmf.py new file mode 100644 index 0000000000..b4648f612a --- /dev/null +++ b/recipes/ESC50/interpret/train_nmf.py @@ -0,0 +1,167 @@ +#!/usr/bin/python3 +"""The recipe to train an NMF model with amortized inference on ESC50 data. + +To run this recipe, use the following command: +> python train_nmf.py hparams/nmf.yaml --data_folder /yourpath/ESC-50-master + +Authors + * Cem Subakan 2022, 2023 + * Francesco Paissan 2022, 2023 +""" + +import os +import sys + +import matplotlib.pyplot as plt +import torch +from esc50_prepare import prepare_esc50 +from hyperpyyaml import load_hyperpyyaml +from train_l2i import dataio_prep + +import speechbrain as sb +from speechbrain.utils.distributed import run_on_main + + +class NMFBrain(sb.core.Brain): + """ + The SpeechBrain class to train Non-Negative Factorization with Amortized Inference + """ + + def compute_forward(self, batch, stage=sb.Stage.TRAIN): + """ + This function calculates the forward pass for NMF + """ + + batch = batch.to(self.device) + wavs, _ = batch.sig + + X_stft = self.hparams.compute_stft(wavs) + X_stft_power = self.hparams.compute_stft_mag(X_stft) + X_stft_tf = torch.log1p(X_stft_power) + z = self.hparams.nmf_encoder(X_stft_tf.permute(0, 2, 1)) + Xhat = self.hparams.nmf_decoder(z) + + # returning wavs because they are augmented + return Xhat, wavs + + def compute_objectives(self, predictions, batch, stage=sb.Stage.TRAIN): + """ + this function computes the l2-error to train the NMF model. + """ + # extracting augmented wavs + predictions, wavs = predictions + + X_stft = self.hparams.compute_stft(wavs) + X_stft_power = self.hparams.compute_stft_mag(X_stft) + target = torch.log1p(X_stft_power).permute(0, 2, 1) + + loss = ((target.squeeze() - predictions) ** 2).mean() + + with torch.no_grad(): + if ( + self.hparams.epoch_counter.current % self.hparams.save_period + == 0 + and stage == sb.Stage.VALID + ): + os.makedirs("nmf_rec", exist_ok=True) + for idx in range(X_stft.shape[0]): + tmp = os.path.join("nmf_rec", f"{idx}.png") + plt.subplot(121) + plt.imshow(target[idx].cpu(), origin="lower") + + plt.subplot(122) + plt.imshow(predictions[idx].cpu(), origin="lower") + + plt.tight_layout() + plt.savefig(tmp) + + return loss + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of an epoch.""" + # Compute/store important stats + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + self.train_stats = { + "loss": self.train_loss, + } + # Summarize Valid statistics from the stage for record-keeping. + elif stage == sb.Stage.VALID: + valid_stats = { + "loss": stage_loss, + } + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + # The train_logger writes a summary to stdout and to the logfile. + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch}, + train_stats=self.train_stats, + valid_stats=valid_stats, + ) + # Save the current checkpoint and delete previous checkpoints, + self.checkpointer.save_and_keep_only( + meta=valid_stats, min_keys=["loss"] + ) + + +if __name__ == "__main__": + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + assert hparams["signal_length_s"] == 5, "Fix wham sig length!" + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + run_on_main( + prepare_esc50, + kwargs={ + "data_folder": hparams["data_folder"], + "audio_data_folder": hparams["audio_data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "train_fold_nums": hparams["train_fold_nums"], + "valid_fold_nums": hparams["valid_fold_nums"], + "test_fold_nums": hparams["test_fold_nums"], + "skip_manifest_creation": hparams["skip_manifest_creation"], + }, + ) + + datasets, _ = dataio_prep(hparams) + + nmfbrain = NMFBrain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + nmfbrain.fit( + epoch_counter=nmfbrain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["dataloader_options"], + ) + + test_stats = nmfbrain.evaluate( + test_set=datasets["test"], + min_key="loss", + progressbar=True, + test_loader_kwargs=hparams["dataloader_options"], + ) + + if hparams["save_nmfdictionary"]: + torch.save(hparams["nmf_decoder"].return_W(), hparams["nmf_savepath"]) diff --git a/recipes/ESC50/interpret/train_piq.py b/recipes/ESC50/interpret/train_piq.py new file mode 100644 index 0000000000..50741e305f --- /dev/null +++ b/recipes/ESC50/interpret/train_piq.py @@ -0,0 +1,293 @@ +#!/usr/bin/python3 + +"""This recipe trains PIQ to interpret an audio classifier. + +To run this recipe, use the following command: +> python train_piq.py hparams/.yaml --data_folder /yourpath/ESC-50-master + +Authors + * Cem Subakan 2022, 2023 + * Francesco Paissan 2022, 2023, 2024 + * Luca Della Libera 2024 +""" + +import sys + +import torch +from esc50_prepare import dataio_prep +from hyperpyyaml import load_hyperpyyaml +from interpreter_brain import InterpreterBrain +from torch.nn import functional as F + +import speechbrain as sb +from speechbrain.processing.NMF import spectral_phase +from speechbrain.utils.distributed import run_on_main + +eps = 1e-10 + + +class PIQ(InterpreterBrain): + """Class for interpreter training.""" + + def interpret_computation_steps(self, wavs, print_probability=False): + """Computation steps to get the interpretation spectrogram.""" + X_stft_logpower, X_mel, X_stft, X_stft_power = self.preprocess(wavs) + X_stft_phase = spectral_phase(X_stft) + + hcat, embeddings, predictions, class_pred = self.classifier_forward( + X_stft_logpower + ) + if print_probability: + predictions = F.softmax(predictions, dim=1) + class_prob = predictions[0, class_pred].item() + print(f"classifier_prob: {class_prob}") + + if self.hparams.use_vq: + xhat, hcat, _ = self.modules.psi(hcat, class_pred) + else: + xhat = self.modules.psi.decoder(hcat) + xhat = xhat.squeeze(1) + + Tmax = xhat.shape[1] + if self.hparams.use_mask_output: + xhat = F.sigmoid(xhat) + X_int = xhat * X_stft_logpower[:, :Tmax, :] + else: + xhat = F.softplus(xhat) + th = xhat.max() * self.hparams.mask_th + X_int = (xhat > th) * X_stft_logpower[:, :Tmax, :] + + return X_int.permute(0, 2, 1), xhat.permute(0, 2, 1), X_stft_phase + + def compute_forward(self, batch, stage): + """Computation pipeline based on an encoder + sound classifier.""" + batch = batch.to(self.device) + wavs, lens = batch.sig + + X_stft_logpower, X_mel, X_stft, X_stft_power = self.preprocess(wavs) + + # Embeddings + sound classifier + hcat, embeddings, predictions, class_pred = self.classifier_forward( + X_stft_logpower + ) + + if self.hparams.use_vq: + xhat, hcat, z_q_x = self.modules.psi(hcat, class_pred) + else: + xhat = self.modules.psi.decoder(hcat) + z_q_x = None + + xhat = xhat.squeeze(1) + + if self.hparams.use_mask_output: + xhat = F.sigmoid(xhat) + else: + xhat = F.softplus(xhat) + + garbage = 0 + + if stage == sb.Stage.VALID: + # Save some samples + if ( + self.hparams.epoch_counter.current + % self.hparams.interpret_period + ) == 0 and self.hparams.save_interpretations: + self.viz_ints(X_stft, X_stft_logpower, batch, wavs) + + return predictions, xhat, hcat, z_q_x, garbage + + def compute_objectives(self, pred, batch, stage): + """Helper function to compute the objectives.""" + predictions, xhat, hcat, z_q_x, garbage = pred + + batch = batch.to(self.device) + wavs, lens = batch.sig + + uttid = batch.id + classid, _ = batch.class_string_encoded + + X_stft_logpower, X_mel, X_stft, X_stft_power = self.preprocess(wavs) + + Tmax = xhat.shape[1] + + hcat_theta, embeddings, theta_out, _ = self.classifier_forward( + xhat * X_stft_logpower[:, :Tmax, :] + ) + mask_in_preds = theta_out + mask_out_preds = self.classifier_forward( + (1 - xhat) * X_stft_logpower[:, :Tmax, :] + )[2] + + # If there is a separator, we need to add sigmoid to the sum + loss_fid = 0 + + if self.hparams.use_mask_output: + eps = 1e-10 + target_spec = X_stft_logpower[:, : xhat.shape[1], :] + target_mask = target_spec > ( + target_spec.max(keepdim=True, dim=-1)[0].max( + keepdim=True, dim=-2 + )[0] + * self.hparams.mask_th + ) + target_mask = target_mask.float() + rec_loss = ( + -target_mask * torch.log(xhat + eps) + - (1 - target_mask) * torch.log(1 - xhat + eps) + ).mean() + else: + rec_loss = ( + (X_stft_logpower[:, : xhat.shape[1], :] - xhat).pow(2).mean() + ) + + if self.hparams.use_vq: + loss_vq = F.mse_loss(z_q_x, hcat.detach()) + loss_commit = F.mse_loss(hcat, z_q_x.detach()) + else: + loss_vq = 0 + loss_commit = 0 + + self.acc_metric.append(uttid, predict=predictions, target=classid) + if stage == sb.Stage.VALID or stage == sb.Stage.TEST: + self.inp_fid.append( + uttid, + mask_in_preds.softmax(1), + predictions.softmax(1), + ) + + self.AD.append( + uttid, + mask_in_preds.softmax(1), + predictions.softmax(1), + ) + self.AI.append( + uttid, + mask_in_preds.softmax(1), + predictions.softmax(1), + ) + self.AG.append( + uttid, + mask_in_preds.softmax(1), + predictions.softmax(1), + ) + self.sps.append(uttid, wavs, X_stft_logpower, classid) + self.comp.append(uttid, wavs, X_stft_logpower, classid) + self.faithfulness.append( + uttid, + predictions.softmax(1), + mask_out_preds.softmax(1), + ) + + if stage != sb.Stage.TEST: + if hasattr(self.hparams.lr_annealing, "on_batch_end"): + self.hparams.lr_annealing.on_batch_end(self.optimizer) + + return ( + self.hparams.rec_loss_coef * rec_loss + + loss_vq + + loss_commit + + loss_fid + ) + + +if __name__ == "__main__": + # This flag enables the built-in cuDNN auto-tuner + # torch.backends.cudnn.benchmark = True + + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + print("Inherited hparams:") + print("use_melspectra_log1p=", hparams["use_melspectra_log1p"]) + + print( + "Interpreter class is inheriting the train_logger", + hparams["train_logger"], + ) + + # Classifier is fixed here + hparams["embedding_model"].eval() + hparams["classifier"].eval() + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Tensorboard logging + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs_folder"] + ) + + from esc50_prepare import prepare_esc50 + + run_on_main( + prepare_esc50, + kwargs={ + "data_folder": hparams["data_folder"], + "audio_data_folder": hparams["audio_data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "train_fold_nums": hparams["train_fold_nums"], + "valid_fold_nums": hparams["valid_fold_nums"], + "test_fold_nums": hparams["test_fold_nums"], + "skip_manifest_creation": hparams["skip_manifest_creation"], + }, + ) + + # Dataset IO prep: creating Dataset objects and proper encodings for phones + datasets, label_encoder = dataio_prep(hparams) + hparams["label_encoder"] = label_encoder + + class_labels = list(label_encoder.ind2lab.values()) + print("Class Labels:", class_labels) + + Interpreter_brain = PIQ( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + if "pretrained_esc50" in hparams and hparams["use_pretrained"]: + print("Loading model...") + run_on_main(hparams["pretrained_esc50"].collect_files) + hparams["pretrained_esc50"].load_collected() + + hparams["embedding_model"].to(run_opts["device"]) + hparams["classifier"].to(run_opts["device"]) + hparams["embedding_model"].eval() + + Interpreter_brain.fit( + epoch_counter=Interpreter_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["dataloader_options"], + ) + + # Load the best checkpoint for evaluation + Interpreter_brain.checkpointer.recover_if_possible( + max_key="valid_top-3_fid", + ) + + test_stats = Interpreter_brain.evaluate( + test_set=datasets["test"], + min_key="loss", + progressbar=True, + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/ESC50/interpret/wham_prepare.py b/recipes/ESC50/interpret/wham_prepare.py new file mode 120000 index 0000000000..96bd210cce --- /dev/null +++ b/recipes/ESC50/interpret/wham_prepare.py @@ -0,0 +1 @@ +../wham_prepare.py \ No newline at end of file diff --git a/recipes/ESC50/wham_prepare.py b/recipes/ESC50/wham_prepare.py new file mode 100644 index 0000000000..5bd670fc5d --- /dev/null +++ b/recipes/ESC50/wham_prepare.py @@ -0,0 +1,192 @@ +import os +import shutil + +import numpy as np +import torch +import torch.nn.functional as F +import torchaudio +from torch.utils.data import IterableDataset + +from speechbrain.dataio import audio_io +from speechbrain.utils.fetching import fetch + +np.random.seed(1234) + + +class WHAMDataset(IterableDataset): + """Implements class for WHAM! dataset. + + Arguments + --------- + data_dir: str or Path + Directory where the dataset is stored. + target_length: int + Expected audio sample length. Used for padding and cropping. + sample_rate: int + Sample rate of the audio samples. + """ + + def __init__(self, data_dir, target_length=4, sample_rate=22050): + self.data_dir = data_dir + self.target_length = target_length + self.sample_rate = sample_rate + + # Get a list of all WAV files in the WHAM data directory + self.file_list = [f for f in os.listdir(data_dir) if f.endswith(".wav")] + + def generate(self): + """Generates viable audio sample from the WHAM set.""" + while True: + idx = np.random.choice([i for i in range(len(self.file_list))]) + file_path = os.path.join(self.data_dir, self.file_list[idx]) + + waveform, sr = audio_io.load(file_path) + waveform = waveform.mean(0, keepdim=True) + + # Resample if needed + if self.sample_rate != sr: + waveform = torchaudio.transforms.Resample(sr, self.sample_rate)( + waveform + ) + + # Cut audio to the target length + if waveform.shape[1] > self.target_length * self.sample_rate: + start = 0 + end = int(self.target_length * self.sample_rate) + waveform = waveform[:, start:end] + + zeros = ( + int(self.target_length * self.sample_rate) - waveform.shape[1] + ) + waveform = F.pad(waveform, (0, zeros)) + + yield waveform + + def __iter__(self): + """Iterator constructor.""" + return iter(self.generate()) + + +def combine_batches(clean, noise_loader): + """Combines waveforms at 0dB. + + Arguments + --------- + clean: torch.Tensor + Original sample. + noise_loader: int + DataLoader for the contamination dataset. + + Returns + ------- + Mixture : torch.Tensor + """ + batch_size = clean.shape[0] + + noise = [] + for _ in range(batch_size): + noise.append(next(noise_loader)) + noise = torch.stack(noise).to(clean.device) + + if noise.ndim == 3: + noise = noise.squeeze(1) + elif noise.ndim == 1: + noise = noise[None] + + clean_l2 = (clean**2).sum(-1) ** 0.5 + noise_l2 = (noise**2).sum(-1) ** 0.5 + + # Combine the batches at 0dB + combined_batch = clean / clean_l2[..., None] + noise / noise_l2[..., None] + combined_batch = ( + combined_batch / torch.max(combined_batch, dim=1, keepdim=True).values + ) + + return combined_batch + + +def download_wham(wham_path: str): + """ + This function automatically downloads the WHAM! dataset to the specified data path in the wham_path variable + + Arguments + --------- + wham_path: str or Path + Directory used to save the dataset. + + Returns + ------- + None + """ + if len(os.listdir(wham_path)) != 0: + return + + print("WHAM! is missing. Downloading WHAM!. This will take a while...") + os.makedirs(wham_path, exist_ok=True) + + temp_path = os.path.join(wham_path, "temp_download_wham") + + # download the data + fetch( + "wham_noise.zip", + "https://my-bucket-a8b4b49c25c811ee9a7e8bba05fa24c7.s3.amazonaws.com", + savedir=temp_path, + ) + + # unpack the .zip file + shutil.unpack_archive(os.path.join(temp_path, "wham_noise.zip"), wham_path) + + files = os.listdir(os.path.join(wham_path, "WHAM", "wham_noise")) + for fl in files: + shutil.move( + os.path.join(wham_path, "WHAM", "wham_noise", fl), wham_path + ) + + # remove the unused datapath + shutil.rmtree(temp_path) + shutil.rmtree(os.path.join(wham_path, "WHAM")) + + print(f"WHAM! is downloaded in {wham_path}") + + +def prepare_wham( + wham_folder, add_wham_noise, sample_rate, signal_length_s, wham_audio_folder +): + """Creates WHAM! dataset when needed. + + Arguments + --------- + wham_folder: str or Path + Directory where the dataset is stored. + If empty, data will be automatically downloaded. + add_wham_noise: bool + True when wham contamination is required. When False, returns None. + sample_rate: int + Sample rate for the mixture. + signal_length_s: int + Seconds. Expected length of the audio sample. + wham_audio_folder: str or Path + Points to the wham split. E.g. wham_noise/tr + + Returns + ------- + WHAM Loader or None, depending on configuration. : WHAMDataset + """ + if wham_folder is None: + if add_wham_noise: + raise Exception("You should specify wham_folder to add noise.") + return None + + if add_wham_noise: + # download WHAM! in specified folder + download_wham(wham_folder) + + dataset = WHAMDataset( + data_dir=wham_audio_folder, + target_length=signal_length_s, + sample_rate=sample_rate, + ) + + return dataset + + return None diff --git a/recipes/Fisher-Callhome-Spanish/README.md b/recipes/Fisher-Callhome-Spanish/README.md index 42953a4a40..baf324e4b9 100644 --- a/recipes/Fisher-Callhome-Spanish/README.md +++ b/recipes/Fisher-Callhome-Spanish/README.md @@ -1,12 +1,15 @@ # Speech Translation on Fisher-Callhome Spanish recipe This folder contains recipes for tokenization and speech translation with [Fisher-Callhome Spanish](https://catalog.ldc.upenn.edu/LDC2014T23), a 160-hour Spanish-English ST dataset. -### How to run -0- Install extra dependencies +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + ``` pip install -r extra_requirements.txt ``` +### How to run 1- Train a tokenizer. The tokenizer takes in input the training translations and determines the subword units that will be used for the ST task, the auxiliary MT task. ``` @@ -26,10 +29,10 @@ Results are reported in terms of sacrebleu. | hyperparams file | dev | dev2 | test | ctc_weight | asr_weight | mt_weight | Model | GPUs | |:----------------:|:-----:| :-----:| :-----:| :--------: | :--------: | :-------: | :-------: | :----------------: | | transformer.yaml | 40.67 | 41.51 | 40.30 | 0 | 0 | 0 | Not Avail. | 2xRTX 2080 Ti 11GB | -| transformer.yaml | 47.50 | 48.33 | 47.31 | 1 | 0.3 | 0 | [Model](https://drive.google.com/drive/folders/1wd4iWuFimZBanBDeZSPFjxM1m4LovXdb?usp=sharing) | 2xRTX 2080 Ti 11GB | +| transformer.yaml | 47.50 | 48.33 | 47.31 | 1 | 0.3 | 0 | [Model](https://www.dropbox.com/sh/tmh7op8xwthdta0/AACuU9xHDHPs8ToxIIwoTLB0a?dl=0) | 2xRTX 2080 Ti 11GB | | transformer.yaml | 46.10 | 46.56 | 46.79 | 1 | 0.2 | 0.2 | Not Avail. | 2xRTX 2080 Ti 11GB | | conformer.yaml | 46.37 | 47.07 | 46.10 | 0 | 0 | 0 | Not Avail. | 2xRTX 2080 Ti 11GB | -| conformer.yaml | 48.09 | 48.19 | 48.04 | 1 | 0.3 | 0 | [Model](https://drive.google.com/drive/folders/1hlMOy1yutwkcXgKIW7tMa5WEe1ixhLaU?usp=sharing) | 1xTesla A100 (works with 2xRTX 2080 Ti) | +| conformer.yaml | 48.09 | 48.19 | 48.04 | 1 | 0.3 | 0 | [Model](https://www.dropbox.com/sh/qz33qjr10y351gk/AADApachs3WtDXx67pIz5fCZa?dl=0) | 1xTesla A100 (works with 2xRTX 2080 Ti) | # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -41,6 +44,15 @@ Results are reported in terms of sacrebleu. Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/conformer.yaml b/recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/conformer.yaml index bf099397b8..18313552a3 100644 --- a/recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/conformer.yaml +++ b/recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/conformer.yaml @@ -14,7 +14,7 @@ debug: False seed: 8886 num_workers: 8 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/conformer/ ckpt_interval_minutes: 15 # save checkpoint every N min bleu_file: !ref /bleu.txt @@ -26,17 +26,6 @@ data_folder: !PLACEHOLDER # Path to the folder generated by the preparation scri tokenizer_file: !PLACEHOLDER # Path to the file of the Tokenizer model (.model) -# Tokenier initialization -tokenizer: !new:sentencepiece.SentencePieceProcessor - -# Pretrain the tokenizer -pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer - collect_in: ./tokenizer - loadables: - tokenizer: !ref - paths: - tokenizer: !ref - # The train logger writes training statistics to a file, as well as stdout. train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref @@ -55,17 +44,21 @@ normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global update_until_epoch: 4 -speed_perturb: !new:speechbrain.processing.speech_augmentation.SpeedPerturb +# Speed perturbation +speed_changes: [90, 100, 110] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb orig_freq: !ref - speeds: [90, 100, 110] + speeds: !ref # Trainer settings number_of_epochs: 50 -valid_search_eopch: 10 +valid_search_epoch: 10 batch_size: 4 # this works for 2 GPUs with 11GB -gradient_accumulation: 16 +grad_accumulation_factor: 16 loss_reduction: batchmean sorting: random +avg_checkpoints: 5 # Number of checkpoints to average for evaluation # stages related parameters stage_one_epochs: 100 # not gonna changing optimizer in this recipe @@ -88,7 +81,7 @@ test_dataloader_opts: batch_size: !ref num_workers: !ref -####################### Model parameters ########################### +####################### Model Parameters ########################### # Transformer d_model: 256 nhead: 4 @@ -132,7 +125,7 @@ eos_index: 2 # Decoding parameters min_decode_ratio: 0.0 max_decode_ratio: 1.0 -valid_search_interval: !ref +valid_search_interval: !ref valid_beam_size: 10 test_beam_size: 10 @@ -202,29 +195,25 @@ SGD: !name:torch.optim.SGD momentum: 0.99 nesterov: True -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , null] +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref using_eos_threshold: False - length_normalization: False + length_normalization: True -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , null] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref using_eos_threshold: True length_normalization: True - ctc_weight: 0 - lm_weight: 0 log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -242,6 +231,17 @@ noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler n_warmup_steps: 35000 model_size: !ref +# Tokenier initialization +tokenizer: !new:sentencepiece.SentencePieceProcessor + +# Pretrain the tokenizer +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref /tokenizer + loadables: + tokenizer: !ref + paths: + tokenizer: !ref + # Checkpoint setting checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref @@ -251,6 +251,5 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer normalizer: !ref counter: !ref -bleu_computer: !name:speechbrain.utils.bleu.BLEUStats - merge_words: False +bleu_computer: !name:speechbrain.integrations.nlp.bleu.BLEUStats acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats diff --git a/recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/transformer.yaml b/recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/transformer.yaml index e9ed5cc5db..b790dd5e06 100644 --- a/recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/transformer.yaml +++ b/recipes/Fisher-Callhome-Spanish/ST/transformer/hparams/transformer.yaml @@ -14,7 +14,7 @@ debug: False seed: 8886 num_workers: 8 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/transformer/ ckpt_interval_minutes: 15 # save checkpoint every N min bleu_file: !ref /bleu.txt @@ -54,17 +54,21 @@ normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global update_until_epoch: 4 -speed_perturb: !new:speechbrain.processing.speech_augmentation.SpeedPerturb +# Speed perturbation +speed_changes: [90, 100, 110] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb orig_freq: !ref - speeds: [90, 100, 110] + speeds: !ref # Trainer settings number_of_epochs: 50 -valid_search_eopch: 10 +valid_search_epoch: 10 batch_size: 8 # this works for 2 GPUs with 11GB -gradient_accumulation: 8 +grad_accumulation_factor: 8 loss_reduction: batchmean sorting: random +avg_checkpoints: 5 # Number of checkpoints to average for evaluation # stages related parameters stage_one_epochs: 100 # not gonna changing optimizer in this recipe @@ -87,7 +91,7 @@ test_dataloader_opts: batch_size: !ref num_workers: !ref -####################### Model parameters ########################### +####################### Model Parameters ########################### # Transformer d_model: 256 nhead: 4 @@ -128,7 +132,7 @@ eos_index: 2 # Decoding parameters min_decode_ratio: 0.0 max_decode_ratio: 1.0 -valid_search_interval: !ref +valid_search_interval: !ref valid_beam_size: 10 test_beam_size: 10 @@ -196,29 +200,25 @@ SGD: !name:torch.optim.SGD momentum: 0.99 nesterov: True -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , null] +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref using_eos_threshold: False - length_normalization: False + length_normalization: True -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , null] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref using_eos_threshold: True length_normalization: True - ctc_weight: 0 - lm_weight: 0 log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -245,6 +245,5 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer normalizer: !ref counter: !ref -bleu_computer: !name:speechbrain.utils.bleu.BLEUStats - merge_words: False +bleu_computer: !name:speechbrain.integrations.nlp.bleu.BLEUStats acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats diff --git a/recipes/Fisher-Callhome-Spanish/ST/transformer/train.py b/recipes/Fisher-Callhome-Spanish/ST/transformer/train.py index 2c71cb6df6..4321a82e26 100644 --- a/recipes/Fisher-Callhome-Spanish/ST/transformer/train.py +++ b/recipes/Fisher-Callhome-Spanish/ST/transformer/train.py @@ -12,17 +12,16 @@ """ import sys + import torch -import logging +from hyperpyyaml import load_hyperpyyaml +from sacremoses import MosesDetokenizer import speechbrain as sb +from speechbrain.utils.logger import get_logger -from sacremoses import MosesDetokenizer -from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main - -logger = logging.getLogger(__name__) -en_detoeknizer = MosesDetokenizer(lang="en") +logger = get_logger(__name__) +en_detokenizer = MosesDetokenizer(lang="en") class ST(sb.core.Brain): @@ -82,23 +81,26 @@ def compute_forward(self, batch, stage): mt_pred = self.modules.seq_lin(mt_pred) mt_p_seq = self.hparams.log_softmax(mt_pred) - # compute outputs + # Compute outputs hyps = None - if stage == sb.Stage.TRAIN: - hyps = None - elif stage == sb.Stage.VALID: - hyps = None - current_epoch = self.hparams.epoch_counter.current - if current_epoch % self.hparams.valid_search_interval == 0: - hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) - elif stage == sb.Stage.TEST: - hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + if is_valid_search: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + elif is_test_search: + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, asr_p_seq, mt_p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): """Computes the loss given predictions and targets.""" - (p_ctc, p_seq, asr_p_seq, mt_p_seq, wav_lens, hyps,) = predictions + (p_ctc, p_seq, asr_p_seq, mt_p_seq, wav_lens, hyps) = predictions ids = batch.id @@ -120,25 +122,25 @@ def compute_objectives(self, predictions, batch, stage): # st attention loss attention_loss = self.hparams.seq_cost( - p_seq, tokens_eos, length=tokens_eos_lens, + p_seq, tokens_eos, length=tokens_eos_lens ) # asr attention loss if self.hparams.ctc_weight < 1 and self.hparams.asr_weight > 0: asr_attention_loss = self.hparams.seq_cost( - asr_p_seq, transcription_eos, length=transcription_eos_lens, + asr_p_seq, transcription_eos, length=transcription_eos_lens ) # asr ctc loss if self.hparams.ctc_weight > 0 and self.hparams.asr_weight > 0: asr_ctc_loss = self.hparams.ctc_cost( - p_ctc, transcription_tokens, wav_lens, transcription_lens, + p_ctc, transcription_tokens, wav_lens, transcription_lens ) # mt attention loss if self.hparams.mt_weight > 0: mt_loss = self.hparams.seq_cost( - mt_p_seq, tokens_eos, length=tokens_eos_lens, + mt_p_seq, tokens_eos, length=tokens_eos_lens ) asr_loss = (self.hparams.ctc_weight * asr_ctc_loss) + ( @@ -158,7 +160,7 @@ def compute_objectives(self, predictions, batch, stage): if stage == sb.Stage.TEST: # 4 references bleu score predictions = [ - en_detoeknizer.detokenize( + en_detokenizer.detokenize( hparams["tokenizer"].decode_ids(utt_seq).split(" ") ) for utt_seq in hyps @@ -174,7 +176,7 @@ def compute_objectives(self, predictions, batch, stage): targets = [] for reference in four_references: detokenized_translation = [ - en_detoeknizer.detokenize(translation.split(" ")) + en_detokenizer.detokenize(translation.split(" ")) for translation in reference ] targets.append(detokenized_translation) @@ -185,14 +187,14 @@ def compute_objectives(self, predictions, batch, stage): and stage == sb.Stage.VALID ): predictions = [ - en_detoeknizer.detokenize( + en_detokenizer.detokenize( hparams["tokenizer"].decode_ids(utt_seq).split(" ") ) for utt_seq in hyps ] targets = [ - en_detoeknizer.detokenize(translation.split(" ")) + en_detokenizer.detokenize(translation.split(" ")) for translation in batch.translation_0 ] self.bleu_metric.append(ids, predictions, [targets]) @@ -202,29 +204,17 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" + def on_fit_batch_start(self, batch, should_step): + """Gets called at the beginning of each fit_batch.""" # check if we need to switch optimizer # if so change the optimizer from Adam to SGD self.check_and_reset_optimizer() - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - # normalize the loss by gradient_accumulation step - (loss / self.hparams.gradient_accumulation).backward() - - if self.step % self.hparams.gradient_accumulation == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.optimizer.step() - self.optimizer.zero_grad() - # anneal lr every update + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: self.hparams.noam_annealing(self.optimizer) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -251,7 +241,7 @@ def on_stage_end(self, stage, stage_loss, epoch): stage_stats["BLEU"] = self.bleu_metric.summarize("BLEU") # log stats and save checkpoint at end-of-epoch - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): + if stage == sb.Stage.VALID: current_epoch = self.hparams.epoch_counter.current # report different epoch stages according current stage @@ -279,7 +269,7 @@ def on_stage_end(self, stage, stage_loss, epoch): self.checkpointer.save_and_keep_only( meta={"ACC": stage_stats["ACC"], "epoch": epoch}, max_keys=["ACC"], - num_to_keep=5, + num_to_keep=self.hparams.avg_checkpoints, ) elif stage == sb.Stage.TEST: @@ -332,25 +322,22 @@ def on_fit_start(self): # Load latest checkpoint to resume training if interrupted if self.checkpointer is not None: - # do not reload the weights if training is interrupted right before stage 2 group = current_optimizer.param_groups[0] if "momentum" not in group: return - self.checkpointer.recover_if_possible( - device=torch.device(self.device) - ) + self.checkpointer.recover_if_possible() def on_evaluate_start(self, max_key=None, min_key=None): - """perform checkpoint averge if needed""" + """perform checkpoint average if needed""" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( - ckpts, recoverable_name="model", device=self.device + ckpts, recoverable_name="model" ) self.hparams.model.load_state_dict(ckpt, strict=True) @@ -359,7 +346,8 @@ def on_evaluate_start(self, max_key=None, min_key=None): def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # Define audio pipeline. In this case, we simply read the path contained # in the variable wav with the audio reader. @@ -386,7 +374,7 @@ def sp_audio_pipeline(wav): # The tokens without BOS or EOS is for computing CTC loss. @sb.utils.data_pipeline.takes("translation_0") @sb.utils.data_pipeline.provides( - "translation_0", "tokens_list", "tokens_bos", "tokens_eos", "tokens", + "translation_0", "tokens_list", "tokens_bos", "tokens_eos", "tokens" ) def one_reference_text_pipeline(translation): """Processes the transcriptions to generate proper labels""" @@ -401,7 +389,7 @@ def one_reference_text_pipeline(translation): yield tokens @sb.utils.data_pipeline.takes( - "translation_0", "translation_1", "translation_2", "translation_3", + "translation_0", "translation_1", "translation_2", "translation_3" ) @sb.utils.data_pipeline.provides( "translation_0", @@ -571,7 +559,8 @@ def transcription_text_pipeline(transcription): sort_key="duration", ) datasets["valid"] = datasets["valid"].filtered_sorted( - key_min_value={"duration": 1}, key_max_value={"duration": 5}, + key_min_value={"duration": 1}, + key_max_value={"duration": 5}, ) hparams["train_dataloader_opts"]["shuffle"] = True @@ -591,7 +580,7 @@ def transcription_text_pipeline(transcription): sb.utils.distributed.ddp_init_group(run_opts) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -602,8 +591,8 @@ def transcription_text_pipeline(transcription): ) # transcription/translation tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # We can now directly create the datasets for training, valid, and test datasets = dataio_prepare(hparams) diff --git a/recipes/Fisher-Callhome-Spanish/Tokenizer/hparams/train_bpe_1k.yaml b/recipes/Fisher-Callhome-Spanish/Tokenizer/hparams/train_bpe_1k.yaml index c4440de134..63892c5d88 100644 --- a/recipes/Fisher-Callhome-Spanish/Tokenizer/hparams/train_bpe_1k.yaml +++ b/recipes/Fisher-Callhome-Spanish/Tokenizer/hparams/train_bpe_1k.yaml @@ -10,6 +10,8 @@ original_data_folder: !PLACEHOLDER # i.e., path to the original data contain LDC data_folder: !PLACEHOLDER # Path where to store the .json and prepared data output_folder: !PLACEHOLDER # Path where to store theTokenizer output (model, logs etc) device: "cuda:0" # for resample audio +skip_prep: False +train_annotation: !ref /train/data.json # Tokenizer parameters token_type: bpe # ["unigram", "bpe", "char"] @@ -23,10 +25,10 @@ annotation_read: "transcription_and_translation" # field to read tokenizer: !name:speechbrain.tokenizers.SentencePiece.SentencePiece model_dir: !ref vocab_size: !ref - annotation_train: !ref /train/data.json + annotation_train: !ref annotation_read: !ref model_type: !ref # ["unigram", "bpe", "char"] - annotation_list_to_check: [!ref /train/data.json] + annotation_list_to_check: [!ref ] annotation_format: json bos_id: 1 eos_id: 2 diff --git a/recipes/Fisher-Callhome-Spanish/Tokenizer/train.py b/recipes/Fisher-Callhome-Spanish/Tokenizer/train.py index 975ae728c4..6a0ddda43c 100644 --- a/recipes/Fisher-Callhome-Spanish/Tokenizer/train.py +++ b/recipes/Fisher-Callhome-Spanish/Tokenizer/train.py @@ -1,6 +1,6 @@ #!/usr/bin/env/python3 """Recipe for training a BPE tokenizer with librispeech. -The tokenizer coverts words into sub-word units that can +The tokenizer converts words into sub-word units that can be used to train a language (LM) or an acoustic model (AM). When doing a speech recognition experiment you have to make sure that the acoustic and language models are trained with @@ -15,15 +15,15 @@ import sys -import speechbrain as sb -from hyperpyyaml import load_hyperpyyaml from fisher_callhome_prepare import prepare_fisher_callhome_spanish +from hyperpyyaml import load_hyperpyyaml -if __name__ == "__main__": +import speechbrain as sb +if __name__ == "__main__": # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -34,11 +34,12 @@ ) # Data preparation, to be run on only one process. - prepare_fisher_callhome_spanish( - data_folder=hparams["original_data_folder"], - save_folder=hparams["data_folder"], - device=hparams["device"], - ) + if not hparams["skip_prep"]: + prepare_fisher_callhome_spanish( + data_folder=hparams["original_data_folder"], + save_folder=hparams["data_folder"], + device=hparams["device"], + ) # Train tokenizer hparams["tokenizer"]() diff --git a/recipes/Fisher-Callhome-Spanish/fisher_callhome_prepare.py b/recipes/Fisher-Callhome-Spanish/fisher_callhome_prepare.py index 660c7d75d5..e56242fa47 100644 --- a/recipes/Fisher-Callhome-Spanish/fisher_callhome_prepare.py +++ b/recipes/Fisher-Callhome-Spanish/fisher_callhome_prepare.py @@ -6,23 +6,22 @@ YAO-FEI, CHENG 2021 """ +import json import os import re -import json import string -import logging import subprocess - -from typing import List from dataclasses import dataclass, field +from typing import List import torch -import torchaudio - from tqdm import tqdm + +from speechbrain.augment.time_domain import Resample +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger from speechbrain.utils.torch_audio_backend import check_torchaudio_backend -from speechbrain.processing.speech_augmentation import Resample try: from sacremoses import MosesPunctNormalizer, MosesTokenizer @@ -31,7 +30,7 @@ err_msg += "Install using `pip install sacremoses`.\n" raise ImportError(err_msg) -logger = logging.getLogger(__name__) +logger = get_logger(__name__) check_torchaudio_backend() es_normalizer = MosesPunctNormalizer(lang="es") @@ -52,7 +51,7 @@ class TDF: end: int end time of utterance transcript: str - transcript of utteranc + transcript of utterance """ channel: int @@ -78,19 +77,26 @@ class Data: def prepare_fisher_callhome_spanish( data_folder: str, save_folder: str, device: str = "cpu" ): - """ Prepares the json files for the Mini Fisher-Callhome-Spanish dataset. + Arguments --------- data_folder : str Path to the folder where the Fisher-Callhome-Spanish dataset is stored. - save_folder: str: + save_folder : str Path of train/valid/test specification file will be saved. + device : str + The device on which to perform computation, e.g. "cpu", "cuda" + + Returns + ------- + None + Example ------- - >>> data_folder = '/path/to/fisher-callhome' - >>> save_foler = 'data' + >>> data_folder = "/path/to/fisher-callhome" + >>> save_folder = "data" >>> prepare_fisher_callhome_spanish(data_folder, save_folder) """ @@ -264,7 +270,7 @@ def check_folders(*folders) -> bool: def get_data_list(path: str) -> str: - with open(path, "r", encoding="utf-8") as data_file: + with open(path, encoding="utf-8") as data_file: return data_file.readlines() @@ -272,7 +278,7 @@ def extract_transcription(transcription_path: str) -> List[TDF]: """Extract transcriptions from given file""" extracted_transcriptions = [] - with open(transcription_path) as transcription_file: + with open(transcription_path, encoding="utf-8") as transcription_file: # get rid of the first three useless headers transcriptions = transcription_file.readlines()[3:] @@ -307,7 +313,7 @@ def concate_transcriptions_by_mapping_file( ) -> List[Data]: """return concated transcriptions from the given mapping file""" - with open(mapping_file_path, "r", encoding="utf-8") as fisher_mapping_file: + with open(mapping_file_path, encoding="utf-8") as fisher_mapping_file: fisher_mapping = fisher_mapping_file.readlines() utterances = [] @@ -324,8 +330,9 @@ def concate_transcriptions_by_mapping_file( if len(need_to_be_concate_lines) > 1: # index shift one is because id is count from 1 in file however, list start from 0 concated_transcripts = selected_transcription[ - need_to_be_concate_lines[0] - - 1 : need_to_be_concate_lines[-1] + need_to_be_concate_lines[0] - 1 : need_to_be_concate_lines[ + -1 + ] ] concated_transcripts = list( map(lambda tdf: tdf.transcript, concated_transcripts) @@ -357,11 +364,11 @@ def concate_transcriptions_by_mapping_file( need_to_be_concate_lines[0] - 1 ].channel channel_symbol = "B" if channel == 1 else "A" - uttrance_id = f"{uid}-{channel_symbol}-{start:06d}-{end:06d}" + utterance_id = f"{uid}-{channel_symbol}-{start:06d}-{end:06d}" utterances.append( Data( - uid=uttrance_id, + uid=utterance_id, transcription=concated_transcripts, wav=f"{speech_folder}/{uid}.sph {channel} {start} {end}", duration=(end - start) / 100, @@ -386,7 +393,7 @@ def segment_audio( end = int(end / 100 * 8000) num_frames = end - start - data, _ = torchaudio.load( + data, _ = audio_io.load( audio_path, frame_offset=start, num_frames=num_frames ) @@ -395,7 +402,7 @@ def segment_audio( data = resampler(data) data = torch.unsqueeze(data[channel], 0) - torchaudio.save(save_path, src=data, sample_rate=sample_rate) + audio_io.save(save_path, src=data, sample_rate=sample_rate) def get_transcription_files_by_dataset( @@ -418,7 +425,7 @@ def get_transcription_files_by_dataset( def get_translations_from_path(translation_path: str) -> List[str]: - """"return translations from the given path""" + """ "return translations from the given path""" extracted_translations = [] with open(translation_path, "rb") as translations_file: original_translations = translations_file.readlines() @@ -472,7 +479,7 @@ def make_data_splits( for fisher_split in fisher_splits: split = set() with open( - f"{mapping_folder}/fisher_{fisher_split}", "r", encoding="utf-8" + f"{mapping_folder}/fisher_{fisher_split}", encoding="utf-8" ) as fisher_file, open( f"./splits/{fisher_split}", "a+", encoding="utf-8" ) as split_file: @@ -496,7 +503,7 @@ def remove_punctuation(text: str) -> str: text = text.replace("'", "apostrophe") # based on the definition of [[:punct]] - punctuation = r"[{}]".format(string.punctuation) + punctuation = rf"[{string.punctuation}]" text = re.sub(punctuation, "", text) text = text.replace("spacemark", "") @@ -513,7 +520,7 @@ def remove_punctuation(text: str) -> str: def normalize_punctuation(text: str) -> str: """remove punctuation from given string""" - # remove brachets and inside + # remove brackets and inside text = re.sub(r"\([^)]*\)", " ", text) text = re.sub(r"\[[^]]+\]", " ", text) @@ -550,7 +557,7 @@ def normalize_punctuation(text: str) -> str: text = re.sub(r"\", "", text) text = re.sub(r"\", "", text) text = re.sub( - r"\", + r"\", # cspell:ignore ideea "", text, ) @@ -646,7 +653,7 @@ def clean_transcription(transcription: str) -> str: transcription = transcription.replace("<", "larrow") transcription = transcription.replace(">", "rarrow") - punctuation = r"[{}]".format(string.punctuation) + punctuation = rf"[{string.punctuation}]" transcription = re.sub(punctuation, "", transcription) transcription = transcription.replace("larrow", "<") @@ -672,7 +679,7 @@ def clean_transcription(transcription: str) -> str: def clean_translation(translation: str) -> str: - """clean a given translation and returne a cleaned translation""" + """clean a given translation and return a cleaned translation""" translation = translation.strip() translation = translation.lower() @@ -710,7 +717,7 @@ def remove_labels(transcription: str): transcription = re.sub(r"", "", transcription) + transcription = re.sub(r"<[/]?foreign\s*\w*>", "", transcription) transcription = re.sub(r"", "", transcription) transcription = re.sub(r"foreign>", "", transcription) diff --git a/recipes/GigaSpeech/ASR/CTC/README.md b/recipes/GigaSpeech/ASR/CTC/README.md new file mode 100644 index 0000000000..488906ecb6 --- /dev/null +++ b/recipes/GigaSpeech/ASR/CTC/README.md @@ -0,0 +1,91 @@ +# Speech Recognition on GigaSpeech with pre-trained self-supervised models and CTC + +This folder contains the scripts to finetune any HuggingFace transformer model based +on transformers (wavlm, wav2vec 2, HuBERT...) with CTC for speech recognition on +GigaSpeech. Training can be done on any of the GigaSpeech subset (XL, L, S etc). + +## Data access and download + +**The XL set is fairly large, 2.2TB are necessary to store the compressed and uncompressed version of the data** + +SpeechBrain supports two ways of dealing with the GigaSpeech dataset: +1. [HuggingFace dataset](https://huggingface.co/datasets/speechcolab/gigaspeech/). For HuggingFacem note that **you must use** the HuggingFace client to log in first before running the recipe. +2. [Original Github](https://github.com/SpeechColab/GigaSpeech). + +You simply need to follow the instructions on either of the above links. **We strongly +recomment using HuggingFace as the download speed for people outside of China is +much quicker**. + +## Data preparation + +**This step can be very long depending on your internet connection and filesystem for the XL split of GigaSpeech. For DDP (multi GPU) the recipe must be run once without DDP otherwise it will timeout. You do not want to let X GPUs hang out without doing nothing for hours anyway. Use the *data_prep_only* flag from the yaml to exit after data preparation** + +SpeechBrain will automatically download the dataset if you use HuggingFace. Note that if you use HuggingFace, the *data_folder* argument is used to store the **extracted** dataset. However, HuggingFace first needs to download the compressed data, and this is not stored in *data_folder* by default. Indeed, HuggingFace is a bit strict in the way it operates with dataset, and the data will be put into the folder specified by the environment variable *HF_HUB_CACHE* or, if not set, *HF_HOME* or, if not set, *XDG_CACHE_HOME*. Hence, we recommend setting the *HF_HUB_CACHE* to the place where you want to store the data first. For example, you can set it like this: + +```export HF_HUB_CACHE=/path/to/your/data/folder``` + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +# How to run + +With a single GPU: +``` +python train_with_wavlm.py hparams/file.yaml +``` +With multiple GPUs: +``` +torchrun --nproc_per_node=8 train_with_wavlm.py hparams/file.yaml +``` + +# KenLM n-gram CTC rescoring +To enable n-gram rescoring during the decoding, you must download (or train yourself) the n-gram language model: + +``` +wget https://huggingface.co/wgb14/gigaspeech_lm/resolve/main/3gram_pruned_1e7.arpa.gz +wget https://huggingface.co/wgb14/gigaspeech_lm/resolve/main/4gram.arpa.gz +gunzip -c 3gram_pruned_1e7.arpa.gz > 3gram_pruned_1e7.arpa +gunzip -c 4gram.arpa.gz > 4gram.arpa +``` + +Then simply modify the *test_beam_search* in the yaml by adding *kenlm_model_path:* and your path as a parameter. + +# Rescoring with a Neural Language Model +This can be done by modifying the current recipe. We invite you to have a look at our LibriSpeech CTC recipe for many different examples. + +# Results + +| Release | Hyperparams file | Decoding method | Finetuning Split | Test WER | Dev WER | HuggingFace link | Full model link | Training GPUs | +|:-------------:|:---------------------------:| :----------:| :-----:| :-----:| :-----:| :-----:| :-----:| :-----:| +| 25-10-2024 | train_hf_wavlm.yaml | GreedySearch | XL | 11.88% | 11.86% | Unavailable\* | Unavailable\* | 8xRTX 3090 | + +\*: Unfortunately, we are unable to upload the checkpoints for the WavLM model at this time. We currently don't have plans to remedy this. + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/GigaSpeech/ASR/CTC/dataset.py b/recipes/GigaSpeech/ASR/CTC/dataset.py new file mode 120000 index 0000000000..f3bfeaf826 --- /dev/null +++ b/recipes/GigaSpeech/ASR/CTC/dataset.py @@ -0,0 +1 @@ +../../dataset.py \ No newline at end of file diff --git a/recipes/GigaSpeech/ASR/CTC/extra_requirements.txt b/recipes/GigaSpeech/ASR/CTC/extra_requirements.txt new file mode 100644 index 0000000000..a619ba044a --- /dev/null +++ b/recipes/GigaSpeech/ASR/CTC/extra_requirements.txt @@ -0,0 +1,5 @@ +datasets +kenlm +soundfile +speechcolab +transformers diff --git a/recipes/GigaSpeech/ASR/CTC/gigaspeech_prepare.py b/recipes/GigaSpeech/ASR/CTC/gigaspeech_prepare.py new file mode 120000 index 0000000000..5190685a8e --- /dev/null +++ b/recipes/GigaSpeech/ASR/CTC/gigaspeech_prepare.py @@ -0,0 +1 @@ +../../gigaspeech_prepare.py \ No newline at end of file diff --git a/recipes/GigaSpeech/ASR/CTC/hparams/train_hf_wavlm.yaml b/recipes/GigaSpeech/ASR/CTC/hparams/train_hf_wavlm.yaml new file mode 100644 index 0000000000..b90df96ae5 --- /dev/null +++ b/recipes/GigaSpeech/ASR/CTC/hparams/train_hf_wavlm.yaml @@ -0,0 +1,240 @@ +# ################################ +# Model: wavlm + DNN + CTC +# Decoding AM: Greedy for validation, and Beam search for testing +# Augmentation: SpecAugment +# Authors: Adel Moumen 2024, Titouan Parcollet 2024 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +experiment_name: train_wavlm_char +output_folder: !ref results// +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +wav2vec2_hub: microsoft/wavlm-large +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/GigaSpeech + +# see https://github.com/SpeechColab/GigaSpeech for more details on the dataset +# must be one of ["XS", "S", "M", "L", "XL"] +# and ["DEV", "TEST"] for the eval splits. +splits: ["XL", "DEV", "TEST"] +skip_prep: False +data_prep_only: False +download_with_HF: True +convert_opus_to_wav: True +keep_filler_words: False +keep_punctuation: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv +json_file: !ref /GigaSpeech.json + +# Training parameters + +# The training will either stops at number_of_epochs or optimizer_step_limit +# I.e. the first that is reached. +number_of_epochs: 10 +optimizer_step_limit: 300000 +warmup: 1000 # Not much is needed as models are pretrained +lr: 0.001 +lr_wav2vec: 0.0001 +sorting: ascending +num_workers: 4 +precision: fp16 # bf16, fp16 or fp32 +sample_rate: 16000 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 3 per GPU to fit 32GB of VRAM +batch_size: 8 +test_batch_size: 1 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + num_workers: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +# Using dynamic batching by default. This works with 4x24GB GPUs +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 50 +max_batch_length_val: 30 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# BPE parameters +token_type: char # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +# Model parameters +dnn_neurons: 1024 +dropout: 0.1 +freeze_wav2vec: False +freeze_wav2vec_extractor: False +wav2vec_output_dim: 1024 + +# Outputs +output_neurons: 29 # without punctuation +blank_index: 0 +bos_index: -1 # No bos/eos with CTC +eos_index: -1 + +# Decoding parameters +test_beam_search: + beam_size: 143 + topk: 1 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1 + drop_length_high: 5 + drop_count_low: 1000 + drop_count_high: 2000 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: True + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 2 + max_augmentations: 2 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + + +enc: !new:speechbrain.nnet.containers.Sequential + input_shape: [null, null, !ref ] + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.BatchNorm1d + activation: !new:torch.nn.LeakyReLU + drop: !new:torch.nn.Dropout + p: !ref + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.BatchNorm1d + activation2: !new:torch.nn.LeakyReLU + drop2: !new:torch.nn.Dropout + p: !ref + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.BatchNorm1d + activation3: !new:torch.nn.LeakyReLU + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: False + freeze: !ref + freeze_feature_extractor: !ref + save_path: !ref + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_opt_class: !name:torch.optim.AdamW + lr: !ref + +wav2vec_opt_class: !name:torch.optim.AdamW + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.WarmAndExpDecayLRSchedule + lr: !ref + n_warmup_steps: !ref + total_steps: !ref + decay_factor: 0.05 # Divided by twenty at the end. + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.WarmAndExpDecayLRSchedule + lr: !ref + n_warmup_steps: !ref + total_steps: !ref + decay_factor: 0.1 # Divided by ten at the end. + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/GigaSpeech/ASR/CTC/train_with_wavlm.py b/recipes/GigaSpeech/ASR/CTC/train_with_wavlm.py new file mode 100644 index 0000000000..9a18dd9877 --- /dev/null +++ b/recipes/GigaSpeech/ASR/CTC/train_with_wavlm.py @@ -0,0 +1,483 @@ +"""This recipe finetunes a pretrained wavlm model large +on GigaSpeech for speech recognition with CTC and at the character level. +The WavLM model can be swapped with any HuggingFace model if wanted. + +To run this recipe, do the follo +wing: +> python train_with_wavlm.py hparams/train_hf_wavlm.yaml + +Authors + * Adel Moumen 2024 + * Titouan Parcollet 2024 +""" + +import logging +import os +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main + +logger = logging.getLogger(__name__) + + +# Define training procedure +class ASR(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) + + # Downsample the inputs if specified + if hasattr(self.modules, "downsampler"): + wavs = self.modules.downsampler(wavs) + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + + # Forward pass + + # Handling SpeechBrain vs HuggingFace pretrained models + if hasattr(self.modules, "extractor"): # SpeechBrain pretrained model + latents = self.modules.extractor(wavs) + feats = self.modules.encoder_wrapper(latents, wav_lens=wav_lens)[ + "embeddings" + ] + else: # HuggingFace pretrained model + feats = self.modules.wav2vec2(wavs, wav_lens) + + x = self.modules.enc(feats) + + # Compute outputs + logits = self.modules.ctc_lin(x) + + # Upsample the inputs if they have been highly downsampled + if hasattr(self.hparams, "upsampling") and self.hparams.upsampling: + logits = logits.view( + logits.shape[0], -1, self.hparams.output_neurons + ) + + p_ctc = self.hparams.log_softmax(logits) + + if stage == sb.Stage.VALID: + p_tokens = sb.decoders.ctc_greedy_decode( + p_ctc, wav_lens, blank_id=self.hparams.blank_index + ) + elif stage == sb.Stage.TEST: + p_tokens = test_searcher(p_ctc, wav_lens) + else: + p_tokens = None + + return p_ctc, wav_lens, p_tokens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + p_ctc, wav_lens, predicted_tokens = predictions + + ids = batch.id + tokens, tokens_lens = batch.tokens + + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + tokens_lens = self.hparams.wav_augment.replicate_labels(tokens_lens) + + loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) + + if stage == sb.Stage.VALID: + # Decode token terms to words + predicted_words = self.tokenizer( + predicted_tokens, task="decode_from_list" + ) + elif stage == sb.Stage.TEST: + predicted_words = [ + hyp[0].text.split(" ") for hyp in predicted_tokens + ] + + if stage != sb.Stage.TRAIN: + # Convert indices to words + target_words = undo_padding(tokens, tokens_lens) + target_words = self.tokenizer(target_words, task="decode_from_list") + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + if stage == sb.Stage.TEST: + if hasattr(self.hparams, "rescorer"): + self.hparams.rescorer.move_rescorers_to_device() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + new_lr_model = self.model_optimizer.param_groups[0]["lr"] + new_lr_wav2vec = self.wav2vec_optimizer.param_groups[0]["lr"] + + self.hparams.train_logger.log_stats( + stats_meta={ + "epoch": epoch, + "lr_model": new_lr_model, + "lr_wav2vec": new_lr_wav2vec, + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """Called after ``fit_batch()``. + + Arguments + --------- + batch : list of torch.Tensors + Batch of data to use for training. Default implementation assumes + this batch has two elements: inputs and targets. + outputs : list or dictionary of torch.Tensors + Returned value of compute_forward(). + loss : torch.Tensor + Returned value of compute_objectives(). + should_step : boolean + Whether optimizer.step() was called or not. + """ + + self.hparams.lr_annealing_model(self.model_optimizer) + self.hparams.lr_annealing_wav2vec(self.wav2vec_optimizer) + + def init_optimizers(self): + "Initializes the wav2vec2 optimizer and model optimizer" + # Handling SpeechBrain vs HuggingFace pretrained models + if hasattr(self.modules, "extractor"): # SpeechBrain pretrained model + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.encoder_wrapper.parameters() + ) + + else: # HuggingFace pretrained model + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + + self.model_optimizer = self.hparams.model_opt_class( + self.hparams.model.parameters() + ) + + # save the optimizers in a dictionary + # the key will be used in `freeze_optimizers()` + self.optimizers_dict = { + "model_optimizer": self.model_optimizer, + } + if not self.hparams.freeze_wav2vec: + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "wav2vec_opt", self.wav2vec_optimizer + ) + self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + + +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, + ) + + # We also sort the validation data so it is faster to validate + test_data = test_data.filtered_sorted(sort_key="duration") + + datasets = [train_data, valid_data, test_data] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("audio_path", "begin_time", "end_time") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(audio_path, begin_time, end_time): + if hparams["download_with_HF"]: + sig = sb.dataio.dataio.read_audio(audio_path) + else: + start_sample = int(float(begin_time) * hparams["sample_rate"]) + stop_sample = int(float(end_time) * hparams["sample_rate"]) + sig = sb.dataio.dataio.read_audio( + {"file": audio_path, "start": start_sample, "stop": stop_sample} + ) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("text") + @sb.utils.data_pipeline.provides( + "wrd", "char_list", "tokens_list", "tokens" + ) + def text_pipeline(wrd): + yield wrd + char_list = list(wrd) + yield char_list + tokens_list = tokenizer.sp.encode_as_ids(wrd) + yield tokens_list + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "text", "char_list", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_data, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + from gigaspeech_prepare import prepare_gigaspeech # noqa + + # We run on main for no reason as it is advised to not run this dataprep with + # DDP initialised. Indeed, it takes a lot of time and will most likely + # result in a timeout (internal DDP timeout). + run_on_main( + prepare_gigaspeech, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "splits": hparams["splits"], + "output_train": hparams["train_csv"], + "output_dev": hparams["valid_csv"], + "output_test": hparams["test_csv"], + "json_file": hparams["json_file"], + "convert_opus_to_wav": hparams["convert_opus_to_wav"], + "download_with_HF": hparams["download_with_HF"], + "punctuation": hparams["keep_punctuation"], + "skip_prep": hparams["skip_prep"], + "filler": hparams["keep_filler_words"], + }, + ) + + if hparams["data_prep_only"]: + logger.info( + "Data preparation finished. Restart the script with data_prep_only to False. " + ) + import sys + + sys.exit() + + # Defining tokenizer and loading it + tokenizer = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["output_neurons"], + annotation_train=hparams["train_csv"], + annotation_read="text", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_data, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams, tokenizer) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # We load the pretrained wav2vec2 model + if "pretrainer" in hparams.keys(): + run_on_main(hparams["pretrainer"].collect_files) + hparams["pretrainer"].load_collected() + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for the LM!! + asr_brain.tokenizer = tokenizer + + # Manage dynamic batching + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + + vocab_list = [ + tokenizer.sp.id_to_piece(i) for i in range(tokenizer.sp.vocab_size()) + ] + + from speechbrain.decoders.ctc import CTCBeamSearcher + + test_searcher = CTCBeamSearcher( + **hparams["test_beam_search"], + vocab_list=vocab_list, + ) + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + # report WER on valid data + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "valid_wer.txt" + ) + asr_brain.evaluate( + valid_data, + min_key="WER", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) + + # report WER on test data + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "test_wer.txt" + ) + asr_brain.evaluate( + test_data, + min_key="WER", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/GigaSpeech/ASR/transducer/README.md b/recipes/GigaSpeech/ASR/transducer/README.md new file mode 100644 index 0000000000..43d79af082 --- /dev/null +++ b/recipes/GigaSpeech/ASR/transducer/README.md @@ -0,0 +1,127 @@ +# GigaSpeech streaming and non streaming speech recognition with Transducer models. +This folder contains scripts necessary to run an ASR experiment with the GigaSpeech dataset. +Before running this recipe, make sure numba is installed (pip install numba) + +## Data access and download + +**The XL set is fairly large, 2.2TB are necessary to store the compressed and uncompressed version of the data** + +SpeechBrain supports two ways of dealing with the GigaSpeech dataset: +1. [HuggingFace dataset](https://huggingface.co/datasets/speechcolab/gigaspeech/). For HuggingFacem note that **you must use** the HuggingFace client to log in first before running the recipe. +2. [Original Github](https://github.com/SpeechColab/GigaSpeech). + +You simply need to follow the instructions on either of the above links. **We strongly +recomment using HuggingFace as the download speed for people outside of China is +much quicker**. + +## Data preparation + +**This step can be very long depending on your internet connection and filesystem for the XL split of GigaSpeech. For DDP (multi GPU) the recipe must be run once without DDP otherwise it will timeout. You do not want to let X GPUs hang out without doing nothing for hours anyway. Use the *data_prep_only* flag from the yaml to exit after data preparation** + +SpeechBrain will automatically download the dataset if you use HuggingFace. Note that if you use HuggingFace, the *data_folder* argument is used to store the **extracted** dataset. However, HuggingFace first needs to download the compressed data, and this is not stored in *data_folder* by default. Indeed, HuggingFace is a bit strict in the way it operates with dataset, and the data will be put into the folder specified by the environment variable *HF_HUB_CACHE* or, if not set, *HF_HOME* or, if not set, *XDG_CACHE_HOME*. Hence, we recommend setting the *HF_HUB_CACHE* to the place where you want to store the data first. For example, you can set it like this: + +```export HF_HUB_CACHE=/path/to/your/data/folder``` + +# Extra-Dependencies +This recipe supports two implementations of the transducer loss, see `use_torchaudio` arg in the yaml file: +1. Transducer loss from torchaudio (this requires torchaudio version >= 0.10.0). +2. Speechbrain implementation using Numba. To use it, please set `use_torchaudio=False` in the yaml file. This version is implemented within SpeechBrain and allows you to directly access the python code of the transducer loss (and directly modify it if needed). + +The Numba implementation is currently enabled by default as the `use_torchaudio` option is incompatible with `bfloat16` training. + +Note: Before running this recipe, make sure numba is installed. Otherwise, run: +``` +pip install numba +``` + +# How to run it +```shell +python train.py hparams/conformer_transducer.yaml +``` + +## Precision Notes +If your GPU effectively supports fp16 (half-precision) computations, it is recommended to execute the training script with the `--precision=fp16` (or `--precision=bf16`) option. +Enabling half precision can significantly reduce the peak VRAM requirements. For example, in the case of the Conformer Transducer recipe trained with GigaSpeech, the peak VRAM decreases from 39GB to 12GB when using fp16. +According to our tests, the performance is not affected. + +## Streaming model + +# Results (non-streaming) + +Results are obtained with beam search and no LM (no-streaming i.e. full context). + + +| Release | LM | Val. CER | Val. WER | Test CER | Test WER | Model | GPUs | +|:-------------:| -----:| --------:| --------:| --------:| --------:| :---------:|:-----------:| +| 08-11-2024 | None | 6.09%\* | 11.75%\* | 6.14%\* | 11.97%\* | [Dropbox](https://www.dropbox.com/scl/fo/jg0vzm8l27o9qsixpqzjo/ABpKqmTMg24RVJKLY5Io1eU?rlkey=8z51y0gosme1fh4niahvi6b84&st=6smf7i5z&dl=0), [HuggingFace](https://huggingface.co/speechbrain/asr-streaming-conformer-gigaspeech) | 4x A100 80GB | + +\*: These results were obtained with our usual training scripts and are included for completeness, **but note that we have noticed an unexpected significant improvement to the error rate (see #2753) using the inference code path. Please refer to the table below for better and more accurate results.** + +### WER vs chunk size & left context + +The following matrix presents the Word Error Rate (WER%) achieved on GigaSpeech +`test` with various chunk sizes (in ms). + +The relative difference is not trivial to interpret, because we are not testing +against a continuous stream of speech, but rather against utterances of various +lengths. This tends to bias results in favor of larger chunk sizes. + +The chunk size might not accurately represent expected latency due to slight +padding differences in streaming contexts. + +The left chunk size is not representative of the receptive field of the model. +Because the model caches the streaming context at different layers, the model +may end up forming indirect dependencies to audio many seconds ago. + +| | full | cs=32 (1280ms) | 24 (960ms) | 16 (640ms) | 12 (480ms) | 8 (320ms) | +|:-----:|:------:|:------:|:------:|:------:|:------:|:------:| +| full | 11.00% | - | - | - | - | - | +| 16 | - | - | - | 11.70% | 11.84% | 12.14% | +| 8 | - | - | 11.50% | 11.72% | 11.88% | 12.28% | +| 4 | - | 11.40% | 11.53% | 11.81% | 12.03% | 12.64% | +| 2 | - | 11.46% | 11.67% | 12.03% | 12.43% | 13.25% | +| 1\*\* | - | 11.59% | 11.85% | 12.39% | 12.93% | 14.13% | + +(\*\*: model was never explicitly trained with this setting) + +### Inference + +Once your model is trained, you need a few manual steps in order to use it with the high-level streaming interfaces (`speechbrain.inference.ASR.StreamingASR`): + +1. Create a new directory where you want to store the model. +2. Copy `results/conformer_transducer//lm.ckpt` (optional; currently, for streaming rescoring LMs might be unsupported) and `tokenizer.ckpt` to that directory. +3. Copy `results/conformer_transducer//save/CKPT+????/model.ckpt` and `normalizer.ckpt` to that directory. +4. Copy your hyperparameters file to that directory. Uncomment the streaming specific keys and remove any training-specific keys. Alternatively, grab the inference hyperparameters YAML for this model from HuggingFace and adapt it to any changes you may have done. +5. You can now instantiate a `StreamingASR` with your model using `StreamingASR.from_hparams("/path/to/model/")`. + +The contents of that directory may be uploaded as a HuggingFace model, in which case the model source path can just be specified as `youruser/yourmodel`. + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/GigaSpeech/ASR/transducer/dataset.py b/recipes/GigaSpeech/ASR/transducer/dataset.py new file mode 120000 index 0000000000..f3bfeaf826 --- /dev/null +++ b/recipes/GigaSpeech/ASR/transducer/dataset.py @@ -0,0 +1 @@ +../../dataset.py \ No newline at end of file diff --git a/recipes/GigaSpeech/ASR/transducer/extra_requirements.txt b/recipes/GigaSpeech/ASR/transducer/extra_requirements.txt new file mode 100644 index 0000000000..f582033930 --- /dev/null +++ b/recipes/GigaSpeech/ASR/transducer/extra_requirements.txt @@ -0,0 +1,8 @@ +datasets +# Numba is used if use_torchaudio=False +# Numba might be faster, but it is harder to install +# You might need to install numba with conda +# You might also need to install other packages such as cudatoolkit +numba +soundfile +speechcolab diff --git a/recipes/GigaSpeech/ASR/transducer/gigaspeech_prepare.py b/recipes/GigaSpeech/ASR/transducer/gigaspeech_prepare.py new file mode 120000 index 0000000000..5190685a8e --- /dev/null +++ b/recipes/GigaSpeech/ASR/transducer/gigaspeech_prepare.py @@ -0,0 +1 @@ +../../gigaspeech_prepare.py \ No newline at end of file diff --git a/recipes/GigaSpeech/ASR/transducer/hparams/conformer_transducer.yaml b/recipes/GigaSpeech/ASR/transducer/hparams/conformer_transducer.yaml new file mode 100644 index 0000000000..b44538eebd --- /dev/null +++ b/recipes/GigaSpeech/ASR/transducer/hparams/conformer_transducer.yaml @@ -0,0 +1,405 @@ +# ############################################################################ +# Model: E2E ASR with transformer and transducer +# Encoder: Conformer +# Decoder: LSTM + beamsearch + RNNLM +# Tokens: BPE with unigram +# losses: Transducer + CTC (optional) + CE (optional) +# Training: GigaSpeech +# Authors: Titouan Parcollet 2024 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +experiment_name: conformer_transducer +output_folder: !ref results// +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/GigaSpeech + +# see https://github.com/SpeechColab/GigaSpeech for more details on the dataset +# must be one of ["XS", "S", "M", "L", "XL"] +# and ["DEV", "TEST"] for the eval splits. +splits: ["XL", "DEV", "TEST"] +skip_prep: False +data_prep_only: False +download_with_HF: True +convert_opus_to_wav: True +keep_filler_words: False +keep_punctuation: False +ckpt_interval_minutes: 10 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv +json_file: !ref /GigaSpeech.json + +####################### Training Parameters #################################### + +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 40 # limited by the step limit in practice +optimizer_step_limit: 500000 +warmup_steps: 30000 +num_workers: 4 +batch_size_valid: 4 +lr: 0.0008 +weight_decay: 0.01 +number_of_ctc_epochs: 2 +ctc_weight: 0.3 # Multitask with CTC for the encoder (0.0 = disabled) +ce_weight: 0.0 # Multitask with CE for the decoder (0.0 = disabled) +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +precision: fp16 # bf16, fp16 or fp32 +grad_accumulation_factor: 1 + +# The batch size is used if and only if dynamic batching is set to False +# Validation and testing are done with fixed batches and not dynamic batching. +batch_size: 8 + +sorting: random +avg_checkpoints: 1 # Number of checkpoints to average for evaluation + +# Feature parameters +sample_rate: 16000 +n_fft: 512 +n_mels: 80 +win_length: 32 + +# Streaming & dynamic chunk training options +# At least for the current architecture on LibriSpeech, we found out that +# non-streaming accuracy is very similar between `streaming: True` and +# `streaming: False`. +streaming: True # controls all Dynamic Chunk Training & chunk size & left context mechanisms + +# Configuration for Dynamic Chunk Training. +# In this model, a chunk is roughly equivalent to 40ms of audio. +dynchunktrain_config_sampler: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler # yamllint disable-line rule:line-length + chunkwise_prob: 0.6 # Probability during a batch to limit attention and sample a random chunk size in the following range + chunk_size_min: 8 # Minimum chunk size (if in a DynChunkTrain batch) + chunk_size_max: 32 # Maximum chunk size (if in a DynChunkTrain batch) + limited_left_context_prob: 0.75 # If in a DynChunkTrain batch, the probability during a batch to restrict left context to a random number of chunks + left_context_chunks_min: 2 # Minimum left context size (in # of chunks) + left_context_chunks_max: 32 # Maximum left context size (in # of chunks) + # If you specify a valid/test config, you can optionally have evaluation be + # done with a specific DynChunkTrain configuration. + # valid_config: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig + # chunk_size: 24 + # left_context_size: 16 + # test_config: ... + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + num_workers: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +# Using dynamic batching by default. This was tuned for A100 80GB. +# Or turn it off (but training speed will decrease) +# Play with grad_accum_factor such that the total batch is around 600 to 1500 s. +# You may have to adjust down the max_batch_length_train for GPUs with smaller +# VRAM. The grad_accumulation_factor is tuned for 4x A100 80GB. You may have to +# increase this factor if you are training on fewer GPUs or smaller batch sizes. +dynamic_batching: True +max_batch_length_train: 500 +max_batch_length_val: 50 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# BPE parameters +token_type: unigram # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 768 +joint_dim: 512 +nhead: 8 +num_encoder_layers: 12 +num_decoder_layers: 0 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 1024 +dec_dim: 512 +dec_emb_dropout: 0.2 +dec_dropout: 0.1 + +# Decoding parameters +blank_index: 0 +bos_index: 1 +eos_index: 2 +pad_index: 0 +beam_size: 10 +nbest: 1 +# by default {state,expand}_beam = 2.3 as mention in paper +# https://arxiv.org/abs/1904.02619 +state_beam: 2.3 +expand_beam: 2.3 + +# If True uses torchaudio loss. Otherwise, the numba one +use_torchaudio: False + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + win_length: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 1 + max_augmentations: 1 + augment_prob: 1.0 + augmentations: [!ref ] + + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 12 + drop_length_high: 20 + drop_count_low: 1 + drop_count_high: 1 + replace: "zeros" + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 20 + drop_length_high: 25 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 2 + max_augmentations: 2 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +# We must call an encoder wrapper so the decoder isn't run (we don't have any) +enc: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper + transformer: !ref + +# For MTL CTC over the encoder +proj_ctc: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +# Define some projection layers to make sure that enc and dec +# output dim are the same before joining +proj_enc: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +proj_dec: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +# Uncomment for MTL with CTC +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +emb: !new:speechbrain.nnet.embedding.Embedding + num_embeddings: !ref + consider_as_one_hot: True + blank_id: !ref + +dec: !new:speechbrain.nnet.RNN.LSTM + input_shape: [null, null, !ref - 1] + hidden_size: !ref + num_layers: 1 + re_init: True + +# For MTL with LM over the decoder (need to uncomment to activate) +# dec_lin: !new:speechbrain.nnet.linear.Linear +# input_size: !ref +# n_neurons: !ref +# bias: False + +# For MTL +ce_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: 0.1 + +Tjoint: !new:speechbrain.nnet.transducer.transducer_joint.Transducer_joint + joint: sum # joint [sum | concat] + nonlinearity: !ref + +transducer_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +transducer_cost: !name:speechbrain.nnet.losses.transducer_loss + blank_index: !ref + use_torchaudio: !ref + +# for MTL +# update model if any HEAD module is added +modules: + CNN: !ref + enc: !ref + emb: !ref + dec: !ref + Tjoint: !ref + transducer_lin: !ref + normalize: !ref + proj_ctc: !ref + proj_dec: !ref + proj_enc: !ref + + +# update model if any HEAD module is added +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref , !ref , !ref , !ref , !ref ] + +############################## Decoding & optimiser ############################ + +Greedysearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher + decode_network_lst: [!ref , !ref , !ref ] + tjoint: !ref + classifier_network: [!ref ] + blank_id: !ref + beam_size: 1 + nbest: 1 + +Beamsearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher + decode_network_lst: [!ref , !ref , !ref ] + tjoint: !ref + classifier_network: [!ref ] + blank_id: !ref + beam_size: !ref + nbest: !ref + state_beam: !ref + expand_beam: !ref + +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 1.e-8 + weight_decay: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: !ref + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + scheduler: !ref + normalizer: !ref + counter: !ref + + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +# for the inference hparams, you will need to include and uncomment something like this: + +# make_tokenizer_streaming_context: !name:speechbrain.tokenizers.SentencePiece.SentencePieceDecoderStreamingContext +# tokenizer_decode_streaming: !name:speechbrain.tokenizers.SentencePiece.spm_decode_preserve_leading_space + +# make_decoder_streaming_context: !name:speechbrain.decoders.transducer.TransducerGreedySearcherStreamingContext # default constructor +# decoding_function: !name:speechbrain.decoders.transducer.TransducerBeamSearcher.transducer_greedy_decode_streaming +# - !ref # self + +# fea_streaming_extractor: !new:speechbrain.lobes.features.StreamingFeatureWrapper +# module: !new:speechbrain.nnet.containers.LengthsCapableSequential +# - !ref +# - !ref +# - !ref +# # don't consider normalization as part of the input filter chain. +# # normalization will operate at chunk level, which mismatches training +# # somewhat, but does not appear to result in noticeable degradation. +# properties: !apply:speechbrain.utils.filter_analysis.stack_filter_properties +# - [!ref , !ref ] diff --git a/recipes/GigaSpeech/ASR/transducer/train.py b/recipes/GigaSpeech/ASR/transducer/train.py new file mode 100644 index 0000000000..09e9fdc234 --- /dev/null +++ b/recipes/GigaSpeech/ASR/transducer/train.py @@ -0,0 +1,542 @@ +#!/usr/bin/env/python3 +"""Recipe for training a Transducer ASR system with GigaSpeech. +The system employs an encoder, a decoder, and an joint network +between them. Decoding is performed with beamsearch coupled with a neural +language model. + +To run this recipe, do the following: +> python train.py hparams/conformer_transducer.yaml + +With the default hyperparameters, the system employs a conformer encoder. +The decoder is based on a standard LSTM. Beamsearch coupled with a RNN +language model is used on the top of decoder probabilities. + +The neural network is trained on both CTC and negative-log likelihood +targets and sub-word units estimated with Byte Pairwise Encoding (BPE) +are used as basic recognition tokens. + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, tokens (e.g, characters instead of BPE), +training split, and many +other possible variations. + + +Authors + * Sylvain de Langen 2024 + * Titouan Parcollet 2024 + * Abdel Heba 2020 + * Mirco Ravanelli 2020 + * Ju-Chieh Chou 2020 + * Peter Plantinga 2020 +""" + +import os +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +class ASR(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_with_bos, token_with_bos_lens = batch.tokens_bos + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN: + if hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_with_bos = self.hparams.wav_augment.replicate_labels( + tokens_with_bos + ) + + feats = self.hparams.compute_features(wavs) + + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_with_bos = self.hparams.fea_augment.replicate_labels( + tokens_with_bos + ) + + current_epoch = self.hparams.epoch_counter.current + + # Old models may not have the streaming hparam, we don't break them in + # any other way so just check for its presence + if hasattr(self.hparams, "streaming") and self.hparams.streaming: + dynchunktrain_config = self.hparams.dynchunktrain_config_sampler( + stage + ) + else: + dynchunktrain_config = None + + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + src = self.modules.CNN(feats) + x = self.modules.enc( + src, + wav_lens, + pad_idx=self.hparams.pad_index, + dynchunktrain_config=dynchunktrain_config, + ) + x = self.modules.proj_enc(x) + + e_in = self.modules.emb(tokens_with_bos) + e_in = torch.nn.functional.dropout( + e_in, + self.hparams.dec_emb_dropout, + training=(stage == sb.Stage.TRAIN), + ) + h, _ = self.modules.dec(e_in) + h = torch.nn.functional.dropout( + h, self.hparams.dec_dropout, training=(stage == sb.Stage.TRAIN) + ) + h = self.modules.proj_dec(h) + + # Joint network + # add labelseq_dim to the encoder tensor: [B,T,H_enc] => [B,T,1,H_enc] + # add timeseq_dim to the decoder tensor: [B,U,H_dec] => [B,1,U,H_dec] + joint = self.modules.Tjoint(x.unsqueeze(2), h.unsqueeze(1)) + + # Output layer for transducer log-probabilities + logits_transducer = self.modules.transducer_lin(joint) + + # Compute outputs + if stage == sb.Stage.TRAIN: + p_ctc = None + p_ce = None + + if ( + self.hparams.ctc_weight > 0.0 + and current_epoch <= self.hparams.number_of_ctc_epochs + ): + # Output layer for ctc log-probabilities + out_ctc = self.modules.proj_ctc(x) + p_ctc = self.hparams.log_softmax(out_ctc) + + if self.hparams.ce_weight > 0.0: + # Output layer for ctc log-probabilities + p_ce = self.modules.dec_lin(h) + p_ce = self.hparams.log_softmax(p_ce) + + return p_ctc, p_ce, logits_transducer, wav_lens + + elif stage == sb.Stage.VALID: + best_hyps, scores, _, _ = self.hparams.Greedysearcher(x) + return logits_transducer, wav_lens, best_hyps + else: + ( + best_hyps, + best_scores, + nbest_hyps, + nbest_scores, + ) = self.hparams.Beamsearcher(x) + return logits_transducer, wav_lens, best_hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (Transducer+(CTC+NLL)) given predictions and targets.""" + + ids = batch.id + tokens, token_lens = batch.tokens + tokens_eos, token_eos_lens = batch.tokens_eos + + # Train returns 4 elements vs 3 for val and test + if len(predictions) == 4: + p_ctc, p_ce, logits_transducer, wav_lens = predictions + else: + logits_transducer, wav_lens, predicted_tokens = predictions + + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if hasattr(self.hparams, "fea_augment"): + ( + tokens, + token_lens, + tokens_eos, + token_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, token_lens, tokens_eos, token_eos_lens + ) + + if stage == sb.Stage.TRAIN: + CTC_loss = 0.0 + CE_loss = 0.0 + if p_ctc is not None: + CTC_loss = self.hparams.ctc_cost( + p_ctc, tokens, wav_lens, token_lens + ) + if p_ce is not None: + CE_loss = self.hparams.ce_cost( + p_ce, tokens_eos, length=token_eos_lens + ) + loss_transducer = self.hparams.transducer_cost( + logits_transducer, tokens, wav_lens, token_lens + ) + loss = ( + self.hparams.ctc_weight * CTC_loss + + self.hparams.ce_weight * CE_loss + + (1 - (self.hparams.ctc_weight + self.hparams.ce_weight)) + * loss_transducer + ) + else: + loss = self.hparams.transducer_cost( + logits_transducer, tokens, wav_lens, token_lens + ) + + if stage != sb.Stage.TRAIN: + # Decode token terms to words + predicted_words = self.tokenizer( + predicted_tokens, task="decode_from_list" + ) + + # Convert indices to words + target_words = undo_padding(tokens, token_lens) + target_words = self.tokenizer(target_words, task="decode_from_list") + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } + + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"], "epoch": epoch}, + min_keys=["WER"], + num_to_keep=self.hparams.avg_checkpoints, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + # save the averaged checkpoint at the end of the evaluation stage + # delete the rest of the intermediate checkpoints + # WER is set to -0.1 so checkpointer only keeps the averaged checkpoint + self.checkpointer.save_and_keep_only( + meta={"WER": -0.1, "epoch": epoch}, + min_keys=["WER"], + num_to_keep=1, + ) + + def on_evaluate_start(self, max_key=None, min_key=None): + """perform checkpoint average if needed""" + super().on_evaluate_start() + + ckpts = self.checkpointer.find_checkpoints( + max_key=max_key, + min_key=min_key, + ) + ckpt = sb.utils.checkpoints.average_checkpoints( + ckpts, recoverable_name="model" + ) + + self.hparams.model.load_state_dict(ckpt, strict=True) + self.hparams.model.eval() + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, + ) + + # We also sort the validation data so it is faster to validate + test_data = test_data.filtered_sorted(sort_key="duration") + + datasets = [train_data, valid_data, test_data] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("audio_path", "begin_time", "end_time") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(audio_path, begin_time, end_time): + if hparams["download_with_HF"]: + sig = sb.dataio.dataio.read_audio(audio_path) + else: + start_sample = int(float(begin_time) * hparams["sample_rate"]) + stop_sample = int(float(end_time) * hparams["sample_rate"]) + sig = sb.dataio.dataio.read_audio( + {"file": audio_path, "start": start_sample, "stop": stop_sample} + ) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("text") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + yield wrd + tokens_list = tokenizer.sp.encode_as_ids(wrd) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_data, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Use torchaudio if the device is CPU + if run_opts.get("device") == "cpu": + if "use_torchaudio: True" in overrides: + overrides.replace("use_torchaudio: True", "use_torchaudio: False") + else: + overrides += "\nuse_torchaudio: True" + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + from gigaspeech_prepare import prepare_gigaspeech # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_gigaspeech, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "splits": hparams["splits"], + "output_train": hparams["train_csv"], + "output_dev": hparams["valid_csv"], + "output_test": hparams["test_csv"], + "json_file": hparams["json_file"], + "skip_prep": hparams["skip_prep"], + "convert_opus_to_wav": hparams["convert_opus_to_wav"], + "download_with_HF": hparams["download_with_HF"], + "punctuation": hparams["keep_punctuation"], + "filler": hparams["keep_filler_words"], + }, + ) + + if hparams["data_prep_only"]: + logger.info( + "Data preparation finished. Restart the script with data_prep_only to False. " + ) + import sys + + sys.exit() + + # Defining tokenizer and loading it + tokenizer = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["output_neurons"], + annotation_train=hparams["train_csv"], + annotation_read="text", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_data, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for the LM!! + asr_brain.tokenizer = tokenizer + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for the LM!! + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if valid_bsampler is not None: + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + # report WER on valid data + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "valid_wer.txt" + ) + asr_brain.evaluate( + valid_data, + min_key="WER", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) + + # report WER on test data + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "test_wer.txt" + ) + asr_brain.evaluate( + test_data, + min_key="WER", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/GigaSpeech/README.md b/recipes/GigaSpeech/README.md new file mode 100644 index 0000000000..a71fd9593d --- /dev/null +++ b/recipes/GigaSpeech/README.md @@ -0,0 +1,13 @@ +# Experimenting with the GigaSpeech dataset + +GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised and unsupervised training (this implementation contains only labelled data for now). However, the data access is gated, meaning, you need to request access to it. + +# Data access and download + +SpeechBrain supports two ways of dealing with the GigaSpeech dataset: +1. [HuggingFace dataset](https://huggingface.co/datasets/speechcolab/gigaspeech/). For HuggingFace note that **you must use** the HuggingFace client to log in first before running the recipe. +2. [Original Github](https://github.com/SpeechColab/GigaSpeech). + +You simply need to follow the instructions on either of the above links. **We strongly +recomment using HuggingFace as the download speed for people outside of China is +much quicker**. \ No newline at end of file diff --git a/recipes/GigaSpeech/dataset.py b/recipes/GigaSpeech/dataset.py new file mode 100644 index 0000000000..2a3fd76138 --- /dev/null +++ b/recipes/GigaSpeech/dataset.py @@ -0,0 +1,449 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MODIFIED BY: Adel Moumen 2024 +""" +GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality +labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised +and unsupervised training. Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts +and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science, +sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable +for speech recognition training, and to filter out segments with low-quality transcription. For system training, +GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h. +For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage, +and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand, +are re-processed by professional human transcribers to ensure high transcription quality. +""" + +import csv +import os + +import datasets + +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + +_CITATION = """\ +@article{DBLP:journals/corr/abs-2106-06909, + author = {Guoguo Chen and + Shuzhou Chai and + Guanbo Wang and + Jiayu Du and + Wei{-}Qiang Zhang and + Chao Weng and + Dan Su and + Daniel Povey and + Jan Trmal and + Junbo Zhang and + Mingjie Jin and + Sanjeev Khudanpur and + Shinji Watanabe and + Shuaijiang Zhao and + Wei Zou and + Xiangang Li and + Xuchen Yao and + Yongqing Wang and + Yujun Wang and + Zhao You and + Zhiyong Yan}, + title = {GigaSpeech: An Evolving, Multi-domain {ASR} Corpus with 10, 000 Hours + of Transcribed Audio}, + journal = {CoRR}, + volume = {abs/2106.06909}, + year = {2021}, + url = {https://arxiv.org/abs/2106.06909}, + eprinttype = {arXiv}, + eprint = {2106.06909}, + timestamp = {Wed, 29 Dec 2021 14:29:26 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-2106-06909.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +""" + +_DESCRIPTION = """\ +GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality +labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised +and unsupervised training. Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts +and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science, +sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable +for speech recognition training, and to filter out segments with low-quality transcription. For system training, +GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h. +For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage, +and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand, +are re-processed by professional human transcribers to ensure high transcription quality. +""" + +_HOMEPAGE = "https://github.com/SpeechColab/GigaSpeech" + +_LICENSE = "Apache License 2.0" + +_CATEGORIES = ( + "People and Blogs", + "Business", + "Nonprofits and Activism", + "Crime", + "History", + "Pets and Animals", + "News and Politics", + "Travel and Events", + "Kids and Family", + "Leisure", + "N/A", + "Comedy", + "News and Politics", + "Sports", + "Arts", + "Science and Technology", + "Autos and Vehicles", + "Science and Technology", + "People and Blogs", + "Music", + "Society and Culture", + "Education", + "Howto and Style", + "Film and Animation", + "Gaming", + "Entertainment", + "Travel and Events", + "Health and Fitness", + "audiobook", +) + +_SOURCES = ("audiobook", "podcast", "youtube") + +_SUBSETS = ("xs", "s", "m", "l", "xl") + +_BASE_DATA_URL = ( + "https://huggingface.co/datasets/speechcolab/gigaspeech/resolve/main/data/" +) + +_AUDIO_ARCHIVE_URL = ( + _BASE_DATA_URL + + "audio/{subset}_files{is_additional}/{subset}_chunks_{archive_id:04}.tar.gz" +) + +_META_URL = ( + _BASE_DATA_URL + + "metadata/{subset}_metadata{is_additional}/{subset}_chunks_{archive_id:04}_metadata.csv" +) + +_N_ARCHIVES_URL = _BASE_DATA_URL + "{subset}_n_archives{is_additional}.txt" + +logger = datasets.utils.logging.get_logger(__name__) + + +class GigaspeechConfig(datasets.BuilderConfig): + """BuilderConfig for Gigaspeech.""" + + def __init__(self, name, *args, **kwargs): + super().__init__(name=name, *args, **kwargs) + # larger subsets are supersets of smaller subsets, + # if we want to download "m", we need to download "xs" and "s" data too. + # so if name == "m", self.subsets_to_download will be ("xs", "s", "m") + if name not in {"dev", "test"}: + self.subsets_to_download = _SUBSETS[: _SUBSETS.index(name) + 1] + else: + self.subsets_to_download = (name,) + + +class Gigaspeech(datasets.GeneratorBasedBuilder): + """ + GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality + labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised + and unsupervised training (this implementation contains only labelled data for now). + Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts + and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science, + sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable + for speech recognition training, and to filter out segments with low-quality transcription. For system training, + GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h. + For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage, + and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand, + are re-processed by professional human transcribers to ensure high transcription quality. + """ + + VERSION = datasets.Version("1.0.0") + + BUILDER_CONFIGS = [ + GigaspeechConfig(name=subset) for subset in _SUBSETS + ("dev", "test") + ] + + DEFAULT_WRITER_BATCH_SIZE = 128 + + def _info(self): + features = datasets.Features( + { + "segment_id": datasets.Value("string"), + "speaker": datasets.Value("string"), + "text": datasets.Value("string"), + "audio": datasets.Audio(sampling_rate=16_000, decode=False), + "begin_time": datasets.Value("float32"), + "end_time": datasets.Value("float32"), + "audio_id": datasets.Value("string"), + "title": datasets.Value("string"), + "url": datasets.Value("string"), + "source": datasets.ClassLabel(names=_SOURCES), + "category": datasets.ClassLabel(names=_CATEGORIES), + "original_full_path": datasets.Value( + "string" + ), # relative path to full audio in original data dirs + } + ) + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=features, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION, + ) + + def _is_additional_data(self, name): + if name in {"s", "m", "l", "xl"}: + return "_additional" + return "" + + @property + def _splits_to_subsets(self): + return { + "train": self.config.subsets_to_download, + "dev": ["dev"], + "test": ["test"], + } + + def _read_n_archives(self, n_archives_path): + with open(n_archives_path, encoding="utf-8") as f: + return int(f.read().strip()) + + def _split_generators(self, dl_manager): + splits_to_subsets = self._splits_to_subsets + if self.config.name in {"dev", "test"}: + splits = (self.config.name,) + else: + splits = ("train", "dev", "test") + + # 1. get number of archives (shards) in each subset + n_archives_links = { + split: { + subset: _N_ARCHIVES_URL.format( + subset=subset, + is_additional=self._is_additional_data(subset), + ) + for subset in splits_to_subsets[split] + } + for split in splits + } + logger.info("Downloading the data. It may take a while.") + paths = dl_manager.download(n_archives_links) + logger.info("Extracting the data. It may take a while.") + n_archives_paths = dl_manager.extract(paths) + n_archives = { + # mapping from a subset to a single number - number of audio archives (shards) in a subset + split: { + subset: self._read_n_archives(n_archives_paths[split][subset]) + for subset in splits_to_subsets[split] + } + for split in splits + } + + # 2. prepare sharded archives with audio files + audio_archives_urls = { + split: { + subset: [ + _AUDIO_ARCHIVE_URL.format( + subset=subset, + is_additional=self._is_additional_data(subset), + archive_id=i, + ) + for i in range(n_archives[split][subset]) + ] + for subset in splits_to_subsets[split] + } + for split in splits + } + audio_archives_paths = dl_manager.download(audio_archives_urls) + # flatten archives paths from + # {"train": {"xs": [path1, path2,], "s": [path3], "m": [path5, path5]}, "dev": {"dev": [path6,...]}, "test": {"test": [...]}} + # to {"train": [path1, path2, path3, path4, path5], "dev": [path6, ...], "test": [...]} + audio_archives_paths = _flatten_nested_dict(audio_archives_paths) + local_audio_archives_paths = ( + dl_manager.extract(audio_archives_paths) + if not dl_manager.is_streaming + else None + ) + + # 3. prepare sharded metadata csv files + meta_urls = { + split: { + subset: [ + _META_URL.format( + subset=subset, + is_additional=self._is_additional_data(subset), + archive_id=i, + ) + for i in range(n_archives[split][subset]) + ] + for subset in splits_to_subsets[split] + } + for split in splits + } + meta_paths = dl_manager.download_and_extract(meta_urls) + meta_paths = _flatten_nested_dict(meta_paths) + + if self.config.name not in {"dev", "test"}: + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "audio_archives_iterators": [ + dl_manager.iter_archive(archive_path) + for archive_path in audio_archives_paths["train"] + ], + "local_audio_archives_paths": ( + local_audio_archives_paths["train"] + if local_audio_archives_paths + else None + ), + "meta_paths": meta_paths["train"], + }, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + gen_kwargs={ + "audio_archives_iterators": [ + dl_manager.iter_archive(archive_path) + for archive_path in audio_archives_paths["dev"] + ], + "local_audio_archives_paths": ( + local_audio_archives_paths["dev"] + if local_audio_archives_paths + else None + ), + "meta_paths": meta_paths["dev"], + }, + ), + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={ + "audio_archives_iterators": [ + dl_manager.iter_archive(archive_path) + for archive_path in audio_archives_paths["test"] + ], + "local_audio_archives_paths": ( + local_audio_archives_paths["test"] + if local_audio_archives_paths + else None + ), + "meta_paths": meta_paths["test"], + }, + ), + ] + + if self.config.name == "dev": + return [ + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + gen_kwargs={ + "audio_archives_iterators": [ + dl_manager.iter_archive(archive_path) + for archive_path in audio_archives_paths["dev"] + ], + "local_audio_archives_paths": ( + local_audio_archives_paths["dev"] + if local_audio_archives_paths + else None + ), + "meta_paths": meta_paths["dev"], + }, + ), + ] + + if self.config.name == "test": + return [ + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={ + "audio_archives_iterators": [ + dl_manager.iter_archive(archive_path) + for archive_path in audio_archives_paths["test"] + ], + "local_audio_archives_paths": ( + local_audio_archives_paths["test"] + if local_audio_archives_paths + else None + ), + "meta_paths": meta_paths["test"], + }, + ), + ] + + def _generate_examples( + self, audio_archives_iterators, local_audio_archives_paths, meta_paths + ): + assert len(audio_archives_iterators) == len(meta_paths) + if local_audio_archives_paths: + assert len(audio_archives_iterators) == len( + local_audio_archives_paths + ) + + for i, (meta_path, audio_archive_iterator) in enumerate( + zip(meta_paths, audio_archives_iterators) + ): + meta_dict = dict() + with open(meta_path, encoding="utf-8") as csvfile: + meta_csv = csv.DictReader(csvfile) + for line in meta_csv: + meta_dict[line["sid"]] = line + + for audio_path_in_archive, audio_file in audio_archive_iterator: + # `audio_path_in_archive` is like "dev_chunks_0000/YOU1000000029_S0000095.wav" + audio_filename = os.path.split(audio_path_in_archive)[1] + audio_id = audio_filename.split(".wav")[0] + audio_meta = meta_dict[audio_id] + audio_meta["segment_id"] = audio_meta.pop("sid") + audio_meta["original_full_path"] = audio_meta.pop("path") + audio_meta["text"] = audio_meta.pop("text_tn") + audio_meta["audio_id"] = audio_meta.pop("aid") + if not audio_meta["category"]: + audio_meta["category"] = "N/A" + + path = ( + os.path.join( + local_audio_archives_paths[i], audio_path_in_archive + ) + if local_audio_archives_paths + else audio_path_in_archive + ) + + yield ( + audio_id, + { + "audio": {"path": path, "bytes": audio_file.read()}, + **{ + feature: value + for feature, value in audio_meta.items() + if feature in self.info.features + }, + }, + ) + + +def _flatten_nested_dict(nested_dict): + return { + key: [ + inner_list_element + for inner_list in value_to_lists.values() + for inner_list_element in inner_list + ] + for key, value_to_lists in nested_dict.items() + } diff --git a/recipes/GigaSpeech/gigaspeech_prepare.py b/recipes/GigaSpeech/gigaspeech_prepare.py new file mode 100644 index 0000000000..dcff868feb --- /dev/null +++ b/recipes/GigaSpeech/gigaspeech_prepare.py @@ -0,0 +1,697 @@ +""" +Data preparation script for the GigaSpeech dataset. + +Download instructions: + 1. https://github.com/SpeechColab/GigaSpeech + 2. https://huggingface.co/datasets/speechcolab/gigaspeech +Reference: https://arxiv.org/abs/2106.06909 + +Author +------- + * Adel Moumen, 2024 +""" + +import csv +import functools +import json +import logging +import os +from dataclasses import dataclass + +from speechbrain.dataio import audio_io +from speechbrain.utils.parallel import get_available_cpu_count, parallel_map + +logger = logging.getLogger(__name__) +FILLERS = [ + "UH", + "UHH", + "UM", + "EH", + "MM", + "HM", + "AH", + "HUH", + "HA", + "ER", + "OOF", + "HEE", + "ACH", + "EEE", + "EW", +] +GARBAGE_UTTERANCE_TAGS = ["", "", "", ""] +PUNCTUATION_TAGS = { + "": ",", + "": "!", + "": ".", + "": "?", +} +SPLITS = ["DEV", "TEST"] +TRAIN_SUBSET = ["XS", "S", "M", "L", "XL"] +SAMPLING_RATE = 16000 + + +@dataclass +class GigaSpeechRow: + """Dataclass for handling GigaSpeech rows. + + Attributes + ---------- + utt_id : str + The segment ID. + audio_id : str + The audio ID. + audio_path : str + The path to the audio file. + speaker : str + The speaker ID. + begin_time : float + The start time of the segment. + end_time : float + The end time of the segment. + duration : float + The duration of the segment. + text : str + The text of the segment. + """ + + utt_id: str # segment[sid] + audio_id: str # audio[aid] + audio_path: str # by default this is opus files + speaker: str # audio["speaker"] + begin_time: float + end_time: float + duration: float + text: str + + +def prepare_gigaspeech( + data_folder: str, + save_folder: str, + splits: list, + output_train: str, + output_dev: str, + output_test: str, + json_file: str = "GigaSpeech.json", + skip_prep: bool = False, + convert_opus_to_wav: bool = True, + download_with_HF: bool = False, + punctuation: bool = False, + filler: bool = False, + hf_multiprocess_load: bool = True, +) -> None: + """Prepare the csv files for GigaSpeech dataset. + + Download instructions: https://github.com/SpeechColab/GigaSpeech + Reference: https://arxiv.org/abs/2106.06909 + + The `train.csv` file is created by following the train subset specified in the `splits` list. + It must be part of the `TRAIN_SUBSET` list. You cannot use multiple train subsets. + + The `dev.csv` and `test.csv` files are created based on the `DEV` and `TEST` splits + specified in the `splits` list. + + Parameters + ---------- + data_folder : str + The path to the GigaSpeech dataset. + save_folder : str + The path to the folder where the CSV files will be saved. + splits : list + The list of splits to be used for creating the CSV files. + output_train : str + The path in which the train CSV or shards will be saved. + output_dev : str + The path in which the dev CSV or shards will be saved. + output_test : str + The path in which the test CSV or shards will be saved. + json_file : str, optional + The name of the JSON file containing the metadata of the GigaSpeech dataset. + skip_prep : bool, optional + If True, the data preparation will be skipped, and the function will return immediately. + convert_opus_to_wav : bool, optional + If True, the opus files will be converted to wav files. + download_with_HF : bool, optional + If True, the dataset will be downloaded using the Hugging Face datasets library. + We highly recommend using this option if you are based in the EU or US as it will + be faster and more reliable than the official host. Make sure to read the + instructions on how to get the dataset from Hugging Face here: + https://huggingface.co/datasets/speechcolab/gigaspeech + The dataset will be downloaded in the default folder specified in the + environment variable HF_HUB_CACHE. Please change it if necessary. + punctuation : bool, optional + Keeping the punctuation, or not. + filler : bool, optional + Keeping filler words (hum), or not. + hf_multiprocess_load: bool, optional + If True, all the CPU threads will be used for data prepration. If set to + False, only one will be. Note that the data prepration of the larger sets + on a single core car take more than 24 hours (from downloading to done). + + Returns + ------- + None + """ + logger.info(f"Preparing GigaSpeech dataset in {save_folder}...") + + if skip_prep: + logger.info("Skipping data preparation as `skip_prep` is set to `True`") + return + + # check that `splits` input is valid + for split in splits: + assert split in SPLITS + TRAIN_SUBSET, ( + f"Split {split} not recognized. Valid splits are {SPLITS + TRAIN_SUBSET}." + ) + + # check that we are not using multiple train subsets + if len(set(splits).intersection(TRAIN_SUBSET)) > 1: + raise ValueError( + "You cannot use multiple train subsets. Please select only one train subset." + ) + + os.makedirs(save_folder, exist_ok=True) + + # Setting output paths + save_output = {} + split_map = {} + train_split = "" + for split in splits: + if split in TRAIN_SUBSET: + save_output["train"] = output_train + split_map["train"] = split + train_split = split + else: + if split == "DEV": + save_output["validation"] = output_dev + split_map["validation"] = split + elif split == "TEST": + save_output["test"] = output_test + split_map["test"] = split + + # check if the data is already prepared + if skip_csv(save_output): + logger.info("Skipping preparation, completed in previous run.") + return + else: + logger.info("Starting data preparation...") + + if download_with_HF: + from datasets import load_dataset + + if os.path.exists("dataset.py"): + logger.info("HuggingFace dataset.py found.") + else: + raise FileNotFoundError( + "HuggingFace dataset.py not found. Please run this recipe from the correct recipe folder or copy the dataset.py file." + ) + + if "HF_HUB_CACHE" in os.environ: + hf_caching_dir = os.environ["HF_HUB_CACHE"] + elif "HF_HOME" in os.environ: + hf_caching_dir = os.environ["HF_HOME"] + else: + hf_caching_dir = os.environ["XDG_CACHE_HOME"] + + logger.info( + "Downloading dataset from HuggingFace to: " + str(hf_caching_dir) + ) + logger.info( + "To change this directory modify the HF_HUB_CACHE env. variable." + ) + + nproc = 1 + if hf_multiprocess_load: + nproc = get_available_cpu_count() + + hf_dataset = load_dataset( + "dataset.py", + train_split.lower(), + trust_remote_code=True, + data_dir=data_folder, + cache_dir=data_folder, + num_proc=nproc, + ) + for split, output in save_output.items(): + logger.info(f"Starting creating {output} using {split} split.") + HF_create_csv(output, hf_dataset[split], split, punctuation, filler) + else: + # check that the data folder contains the GigaSpeech dataset + check_gigaspeech_folders(data_folder, json_file) + + logger.info(f"Starting reading {json_file}.") + with open(json_file, encoding="utf-8") as f: + info = json.load(f) + logger.info(f"Reading {json_file} done.") + + for split, output in save_output.items(): + logger.info(f"Starting creating {output} using {split} split.") + create_csv( + output, + info, + data_folder, + split_map[split], + convert_opus_to_wav, + punctuation, + filler, + ) + logger.info("Data preparation completed!") + + +def process_line( + audio: json, + data_folder: str, + split: str, + convert_opus_to_wav: bool, + punctuation: bool, + stopwords: list, +) -> list: + """ + Process the audio line and return the utterances for the given split. + + Parameters + ---------- + audio : dict + The audio line to be processed. + data_folder : str + The path to the GigaSpeech dataset. + split : str + The split to be used for filtering the data. + convert_opus_to_wav : bool + If True, the opus files will be converted to wav files. + punctuation : bool + Keeping punctuation or not. Default is no. + stopwords: list + List of stopwords to remove from the text of the labels. + + Returns + ------- + list + The list of utterances for the given split. + """ + if ("{" + split + "}") in audio["subsets"]: + audio_path = os.path.join(data_folder, audio["path"]) + assert os.path.isfile(audio_path), f"File not found: {audio_path}" + + if convert_opus_to_wav and audio_path.endswith(".opus"): + audio_path = convert_opus2wav(audio_path) + + # 2. iterate over the utterances + utterances = [] + for segment in audio["segments"]: + text = preprocess_text(segment["text_tn"], punctuation, stopwords) + if text: + begin_time = float(segment["begin_time"]) + end_time = float(segment["end_time"]) + duration = end_time - begin_time + utterance = GigaSpeechRow( + utt_id=segment["sid"], + audio_id=audio["aid"], + audio_path=str(audio_path), + speaker=audio["speaker"], + begin_time=begin_time, + end_time=end_time, + duration=duration, + text=text, + ) + utterances.append(utterance) + return utterances + + +def create_csv( + csv_file: str, + info: json, + data_folder: str, + split: str, + convert_opus_to_wav: bool, + punctuation: bool = False, + filler: bool = False, +) -> None: + """ + Create a CSV file based on the info in the GigaSpeech JSON file and filter the data based on the split. + + Parameters + ---------- + csv_file : str + The path to the CSV file to be created. + info : dict + The GigaSpeech JSON file content. + data_folder : str + The path to the GigaSpeech dataset. + split : str + The split to be used for filtering the data. + convert_opus_to_wav : bool + If True, the opus files will be converted to wav files. + punctuation : bool + Keeping punctuation or not. Default is no. + filler : bool + Keeping filler words or not (hum, er). Default is no. + + Returns + ------- + None + """ + total_duration = 0.0 + nb_samples = 0 + + to_remove = GARBAGE_UTTERANCE_TAGS + if not filler: + to_remove += FILLERS + + line_processor = functools.partial( + process_line, + data_folder=data_folder, + split=split, + convert_opus_to_wav=convert_opus_to_wav, + stopwords=to_remove, + punctuation=punctuation, + ) + + csv_file_tmp = csv_file + ".tmp" + with open(csv_file_tmp, mode="w", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + header = [ + "ID", + "audio_id", + "audio_path", + "speaker", + "begin_time", + "end_time", + "duration", + "text", + ] + csv_writer.writerow(header) + for row in parallel_map(line_processor, info["audios"]): + if row is None: + continue + + for item in row: + csv_writer.writerow( + [ + item.utt_id, + item.audio_id, + item.audio_path, + item.speaker, + str(item.begin_time), + str(item.end_time), + str(item.duration), + item.text, + ] + ) + + total_duration += item.duration + nb_samples += 1 + + os.replace(csv_file_tmp, csv_file) + + logger.info(f"{csv_file} successfully created!") + logger.info(f"Number of samples in {split} split: {nb_samples}") + logger.info( + f"Total duration of {split} split: {round(total_duration / 3600, 2)} Hours" + ) + + +def HF_create_csv( + csv_file: str, + hf_dataset, + split: str, + punctuation: bool = False, + filler: bool = False, +) -> None: + """ + Create a CSV file based on a HuggingFace dataset. + + Parameters + ---------- + csv_file : str + The path to the CSV file to be created. + hf_dataset : huggingface dataset, + The huggingface dataset. + split : str + The split to be used for filtering the data. + punctuation : bool + Keeping punctuation or not. Default is no. + filler : bool + Keeping filler words or not (hum, er). Default is no. + + + Returns + ------- + None + """ + total_duration = 0.0 + nb_samples = 0 + + to_remove = GARBAGE_UTTERANCE_TAGS + if not filler: + to_remove += FILLERS + + line_processor = functools.partial( + HF_process_line, + stopwords=to_remove, + punctuation=punctuation, + ) + + csv_file_tmp = csv_file + ".tmp" + with open(csv_file_tmp, mode="w", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + header = [ + "ID", + "audio_id", + "audio_path", + "speaker", + "begin_time", + "end_time", + "duration", + "text", + ] + csv_writer.writerow(header) + + for row in parallel_map(line_processor, hf_dataset, chunk_size=1024): + if row is None: + continue + + csv_writer.writerow( + [ + row.utt_id, + row.audio_id, + row.audio_path, + row.speaker, + str(row.begin_time), + str(row.end_time), + str(row.duration), + row.text, + ] + ) + + total_duration += row.duration + nb_samples += 1 + + os.replace(csv_file_tmp, csv_file) + + logger.info(f"{csv_file} successfully created!") + logger.info(f"Number of samples in {split} split: {nb_samples}") + logger.info( + f"Total duration of {split} split: {round(total_duration / 3600, 2)} Hours" + ) + + +def HF_process_line(row: dict, punctuation: bool, stopwords: list) -> list: + """ + Process the audio line and return the utterances for the given split. + + Parameters + ---------- + row: dict + The audio line to be processed. + punctuation : bool + Keeping punctuation or not. Default is no. + stopwords: list + List of stopwords to remove from the text of the labels. + + Returns + ------- + list + The list of utterances for the given split. + """ + audio_path = os.path.join(row["audio"]["path"]) + + if not os.path.isfile(audio_path): + return None + + # check reading the audio file ; HF may have some corrupted files + try: + _ = audio_io.info(audio_path) + except Exception as e: + logger.error(f"Failed reading {audio_path}: {e}") + return None + + text = preprocess_text(row["text"], punctuation, stopwords) + + if text: + utt_id = row["segment_id"] + audio_id = row["audio_id"] + audio_path = row["audio"]["path"] + speaker = row["speaker"] + begin_time = float(row["begin_time"]) + end_time = float(row["end_time"]) + duration = end_time - begin_time + + row = GigaSpeechRow( + utt_id=utt_id, + audio_id=audio_id, + audio_path=audio_path, + speaker=speaker, + begin_time=begin_time, + end_time=end_time, + duration=duration, + text=text, + ) + + return row + else: + return None + + +def convert_opus2wav(audio_opus_path): + """Convert an opus file to a wav file. + + Parameters + ---------- + audio_opus_path : str + The path to the opus file to be converted. + + Returns + ------- + str + The path to the converted wav file. + + Raises + ------ + subprocess.CalledProcessError + If the conversion process fails. + """ + audio_wav_path = audio_opus_path.replace(".opus", ".wav") + os.system( + f"ffmpeg -y -i {audio_opus_path} -ac 1 -ar {SAMPLING_RATE} {audio_wav_path} > /dev/null 2>&1" + ) + return audio_wav_path + + +def preprocess_text(text: str, punctuation: bool, stopwords) -> str: + """ + Preprocesses the input text by removing garbage tags and removing punctuation + and filler words if specified. + + Parameters + ---------- + text : str + The input text to be preprocessed. + punctuation : bool + Keeping punctuation or not. Default is no. + stopwords : list + List of words to remove from the input test string. + + Returns + ------- + str + The preprocessed text with removed garbage tags and replaced punctuation tags. + + Raises + ------ + AssertionError + If '<' or '>' tags are found in the text after preprocessing. + + Notes + ----- + The function iterates over predefined garbage utterance tags (GARBAGE_UTTERANCE_TAGS) + and removes them from the input text. It then iterates over predefined punctuation tags + (PUNCTUATION_TAGS) and replaces them with the corresponding punctuation. + + Examples + -------- + >>> text = " DOUGLAS MCGRAY IS GOING TO BE OUR GUIDE YOU WALK THROUGH THE DOOR YOU SEE THE RED CARPETING YOU SEE SOMEONE IN A SUIT THEY MAY BE GREETING YOU " + >>> preprocess_text( + ... text, punctuation=True, stopwords=GARBAGE_UTTERANCE_TAGS + ... ) + "DOUGLAS MCGRAY IS GOING TO BE OUR GUIDE YOU WALK THROUGH THE DOOR, YOU SEE THE RED CARPETING, YOU SEE SOMEONE IN A SUIT. THEY MAY BE GREETING YOU." + """ + + text = text.upper() + text = text.replace("-", " ") + + sentence = " ".join( + [word for word in text.split() if word not in stopwords] + ) + + if punctuation: + for tag, punctuation in PUNCTUATION_TAGS.items(): + sentence = sentence.replace(" " + tag, punctuation) + + return sentence + + +def skip_csv(save_csv_files: dict) -> bool: + """Check if the CSV files already exist. + + Parameters + ---------- + save_csv_files : dict + The dictionary containing the paths to the CSV files. + + Returns + ------- + bool + True if all the CSV files already exist, False otherwise. + """ + return all(os.path.isfile(path) for path in save_csv_files.values()) + + +def check_gigaspeech_folders( + data_folder: str, + json_file: str = "GigaSpeech.json", + audio_folder: str = "audio", +) -> None: + """Check if the data folder actually contains the GigaSpeech dataset. + + If it does not, an error is raised. + + Parameters + ---------- + data_folder : str + The path to the GigaSpeech dataset. + json_file : str, optional + The name of the JSON file containing the metadata of the GigaSpeech dataset. + audio_folder : str, optional + The name of the folder containing the audio files of the GigaSpeech dataset. + + Returns + ------- + None + + Raises + ------ + OSError + If GigaSpeech is not found at the specified path. + """ + # Checking if "GigaSpeech.json" exist + if not os.path.exists(json_file): + err_msg = ( + "the opus file %s does not exist (it is expected in the " + "Gigaspeech dataset)" % json_file + ) + raise OSError(err_msg) + + # Check if audio folders exist + for folder_subset in ["audiobook", "podcast", "youtube"]: + audio_subset = os.path.join(data_folder, audio_folder, folder_subset) + if not os.path.exists(audio_subset): + err_msg = ( + "the file %s does not exist (it is expected in the " + "Gigaspeech dataset)" % audio_subset + ) + raise OSError(err_msg) diff --git a/recipes/Google-speech-commands/README.md b/recipes/Google-speech-commands/README.md index 36fdeb7f3f..5a5aa9428e 100644 --- a/recipes/Google-speech-commands/README.md +++ b/recipes/Google-speech-commands/README.md @@ -1,6 +1,9 @@ # Google Speech Command v0.02 Dataset This folder contains recipes for command recognition with [Google Speech Command Dataset](https://www.tensorflow.org/datasets/catalog/speech_commands), including a sample recipe for the recent [LEAF audio frontend](https://openreview.net/forum?id=jM76BCb6F9m). -The recipes supports 12 or 35 commands. To run it, please type: +The recipes supports 12 or 35 commands. + +# How to run +To run it, please type: ``` python train.py hparams/xvect.yaml --data_folder=/path_to_/GSC (V12 task) @@ -26,8 +29,8 @@ You can find the pre-trained model with an easy-inference function on HuggingFac - https://huggingface.co/speechbrain/google_speech_command_xvector You can find the full experiment folder (i.e., checkpoints, logs, etc) here: -- xvector v12: https://drive.google.com/drive/folders/1yPcXVHtrnNM0RhA_IGo8iAdezYZfoViQ?usp=sharing -- xvector leaf v35: https://drive.google.com/drive/folders/18AaNWrFUtr5OggwZxV7X7ZXvv2aQ4iMh?usp=sharing +- xvector v12: https://www.dropbox.com/sh/9n9q42pugbx0g7a/AADihpfGKuWf6gkwQznEFINDa?dl=0 +- xvector leaf v35: https://www.dropbox.com/sh/r63w4gytft4s1x6/AAApP8-pp179QKGCZHV_OuD8a?dl=0 # **About SpeechBrain** @@ -40,6 +43,15 @@ You can find the full experiment folder (i.e., checkpoints, logs, etc) here: Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Google-speech-commands/extra-requirements.txt b/recipes/Google-speech-commands/extra-requirements.txt deleted file mode 100644 index fb6c7ed7ec..0000000000 --- a/recipes/Google-speech-commands/extra-requirements.txt +++ /dev/null @@ -1 +0,0 @@ -pandas diff --git a/recipes/Google-speech-commands/hparams/xvect.yaml b/recipes/Google-speech-commands/hparams/xvect.yaml index a9d58fd720..c4a20ad718 100644 --- a/recipes/Google-speech-commands/hparams/xvect.yaml +++ b/recipes/Google-speech-commands/hparams/xvect.yaml @@ -6,7 +6,7 @@ # Basic parameters seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Use 12 for V2 12 task and 35 for V2 35 task number_of_commands: 12 @@ -14,14 +14,19 @@ output_folder: !ref results/xvect_v/ save_folder: !ref /save train_log: !ref /train_log.txt +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 + # Data files data_folder: !PLACEHOLDER # e.g. /path/to/GSC -train_annotation: !ref /train.csv -valid_annotation: !ref /valid.csv -test_annotation: !ref /test.csv - -# Folder to extract data augmentation files -rir_folder: !ref # Change it if needed +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. +train_annotation: !ref /train.csv +valid_annotation: !ref /valid.csv +test_annotation: !ref /test.csv +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv # Percentage of files used for validation and test validation_percentage: 10 @@ -32,12 +37,10 @@ testing_percentage: 10 percentage_unknown: 10 # Set this to 0 for the V2 35 task percentage_silence: 10 # Set this to 0 for the V2 35 task -# Whether to use data augmentation -apply_data_augmentation: True skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 100 batch_size: 32 lr: 0.001 @@ -56,10 +59,11 @@ deltas: False # Number of classes (i.e. different commands) out_n_neurons: !ref #includes core commands & auxiliary words +num_workers: 4 dataloader_options: batch_size: !ref shuffle: !ref - num_workers: 2 + num_workers: !ref # Functions compute_features: !new:speechbrain.lobes.features.Fbank @@ -91,54 +95,80 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augment_wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [100] - -augment_speed: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -add_rev: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 1.0 # seconds - reverb_prob: 1.0 - noise_prob: 0.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 1.0 # seconds - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_rev_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 1.0 # seconds - reverb_prob: 1.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - - -# Definition of the augmentation pipeline. -# If concat_augment = False, the augmentation techniques are applied -# in sequence. If concat_augment = True, all the augmented signals -# # are concatenated in a single big batch. -augment_pipeline: [ - !ref , - !ref , - !ref , - !ref , - !ref -] -concat_augment: True +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +snr_low: 0 # Min SNR for noise augmentation +snr_high: 15 # Max SNR for noise augmentation + +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: !ref + snr_high: !ref + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: True + concat_original: True + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] mean_var_norm: !new:speechbrain.processing.features.InputNormalization norm_type: sentence @@ -146,11 +176,6 @@ mean_var_norm: !new:speechbrain.processing.features.InputNormalization modules: compute_features: !ref - augment_wavedrop: !ref - augment_speed: !ref - add_rev: !ref - add_noise: !ref - add_rev_noise: !ref embedding_model: !ref classifier: !ref softmax: !ref diff --git a/recipes/Google-speech-commands/hparams/xvect_leaf.yaml b/recipes/Google-speech-commands/hparams/xvect_leaf.yaml index 1a9cf4e979..3e1e034729 100644 --- a/recipes/Google-speech-commands/hparams/xvect_leaf.yaml +++ b/recipes/Google-speech-commands/hparams/xvect_leaf.yaml @@ -7,7 +7,7 @@ # Basic parameters seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Use 12 for V2 12 task and 35 for V2 35 task number_of_commands: 12 @@ -15,14 +15,20 @@ output_folder: !ref results/xvect_leaf_legacy_complex_mvnorm_v/save train_log: !ref /train_log.txt +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 + +# Data files # Data files data_folder: !PLACEHOLDER # e.g. /path/to/GSC -train_annotation: !ref /train.csv -valid_annotation: !ref /valid.csv -test_annotation: !ref /test.csv - -# Folder to extract data augmentation files -rir_folder: !ref # Change it if needed +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. +train_annotation: !ref /train.csv +valid_annotation: !ref /valid.csv +test_annotation: !ref /test.csv +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv # Percentage of files used for validation and test validation_percentage: 10 @@ -33,12 +39,10 @@ testing_percentage: 10 percentage_unknown: 10 # Set this to 0 for the V2 35 task percentage_silence: 10 # Set this to 0 for the V2 35 task -# Whether to use data augmentation -apply_data_augmentation: True skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 100 batch_size: 32 lr: 0.001 @@ -54,10 +58,11 @@ n_features: 24 # Number of classes (i.e. different commands) out_n_neurons: !ref #includes core commands & auxiliary words +num_workers: 4 dataloader_options: batch_size: !ref shuffle: !ref - num_workers: 2 + num_workers: !ref # Functions compute_features: !new:speechbrain.lobes.features.Leaf @@ -90,54 +95,80 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augment_wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [100] - -augment_speed: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -add_rev: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 1.0 # seconds - reverb_prob: 1.0 - noise_prob: 0.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 1.0 # seconds - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_rev_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 1.0 # seconds - reverb_prob: 1.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - - -# Definition of the augmentation pipeline. -# If concat_augment = False, the augmentation techniques are applied -# in sequence. If concat_augment = True, all the augmented signals -# # are concatenated in a single big batch. -augment_pipeline: [ - !ref , - !ref , - !ref , - !ref , - !ref -] -concat_augment: True +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +snr_low: 0 # Min SNR for noise augmentation +snr_high: 15 # Max SNR for noise augmentation + +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: !ref + snr_high: !ref + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: True + concat_original: True + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] mean_var_norm: !new:speechbrain.processing.features.InputNormalization norm_type: sentence @@ -145,11 +176,6 @@ mean_var_norm: !new:speechbrain.processing.features.InputNormalization modules: compute_features: !ref - augment_wavedrop: !ref - augment_speed: !ref - add_rev: !ref - add_noise: !ref - add_rev_noise: !ref embedding_model: !ref classifier: !ref softmax: !ref diff --git a/recipes/Google-speech-commands/prepare_GSC.py b/recipes/Google-speech-commands/prepare_GSC.py index 7beb5ba255..37371b6c9e 100644 --- a/recipes/Google-speech-commands/prepare_GSC.py +++ b/recipes/Google-speech-commands/prepare_GSC.py @@ -9,18 +9,20 @@ """ -import os -from os import walk +import copy import glob -import shutil -import logging -import torch -import re import hashlib -import copy +import os +import re +import shutil +from os import walk + import numpy as np -from speechbrain.utils.data_utils import download_file +import torch + from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.data_utils import download_file +from speechbrain.utils.logger import get_logger try: import pandas as pd @@ -31,7 +33,7 @@ err_msg += "Install using `pip install pandas`.\n" raise ImportError(err_msg) -logger = logging.getLogger(__name__) +logger = get_logger(__name__) GSC_URL = "http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz" @@ -109,16 +111,22 @@ def prepare_GSC( How much of the data set to use for validation. testing_percentage: int How much of the data set to use for testing. - percentage unknown: int. + percentage_unknown: int. How much data outside of the known (i.e wanted) words to preserve; relative to the total number of known words. - percentage silence: int + percentage_silence: int How many silence samples to generate; relative to the total number of known words. + words_wanted: list + The list of commands to use from the dataset. skip_prep: bool If True, skip data preparation. + Returns + ------- + None + Example ------- - >>> data_folder = '/path/to/GSC' + >>> data_folder = "/path/to/GSC" >>> prepare_GSC(data_folder) """ @@ -175,7 +183,7 @@ def prepare_GSC( # Read all files under a specific class (i.e. command) files = [] - for (dirpath, dirnames, filenames) in walk( + for dirpath, dirnames, filenames in walk( os.path.join(data_folder, command) ): files.extend(filenames) @@ -234,23 +242,23 @@ def prepare_GSC( new_df.to_csv(new_filename, index=False) -MAX_NUM_WAVS_PER_CLASS = 2 ** 27 - 1 # ~134M +MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M def which_set(filename, validation_percentage, testing_percentage): """Determines which data partition the file should belong to. - We want to keep files in the same training, validation, or testing sets even - if new ones are added over time. This makes it less likely that testing - samples will accidentally be reused in training when long runs are restarted - for example. To keep this stability, a hash of the filename is taken and used - to determine which set it should belong to. This determination only depends on - the name and the set proportions, so it won't change as other files are added. + We want to keep files in the same training, validation, or testing sets even + if new ones are added over time. This makes it less likely that testing + samples will accidentally be reused in training when long runs are restarted + for example. To keep this stability, a hash of the filename is taken and used + to determine which set it should belong to. This determination only depends on + the name and the set proportions, so it won't change as other files are added. - It's also useful to associate particular files as related (for example words - spoken by the same person), so anything after '_nohash_' in a filename is - ignored for set determination. This ensures that 'bobby_nohash_0.wav' and - 'bobby_nohash_1.wav' are always in the same set, for example. + It's also useful to associate particular files as related (for example words + spoken by the same person), so anything after '_nohash_' in a filename is + ignored for set determination. This ensures that 'bobby_nohash_0.wav' and + 'bobby_nohash_1.wav' are always in the same set, for example. Arguments --------- @@ -262,10 +270,10 @@ def which_set(filename, validation_percentage, testing_percentage): How much of the data set to use for testing. Returns - --------- + ------- result: str one of 'training', 'validation', or 'testing'. - """ + """ base_name = os.path.basename(filename) # We want to ignore anything after '_nohash_' in the file name when # deciding which set to put a wav in, so the data set creator has a way of @@ -306,7 +314,7 @@ def generate_silence_data( path to dataset. percentage_silence: int How many silence samples to generate; relative to the total number of known words. - """ + """ for split in splits: num_silence_samples = int( (percentage_silence / 100.0) * num_known_samples_per_split[split] diff --git a/recipes/Google-speech-commands/train.py b/recipes/Google-speech-commands/train.py index d4f8e8ede5..96856f2678 100644 --- a/recipes/Google-speech-commands/train.py +++ b/recipes/Google-speech-commands/train.py @@ -15,20 +15,21 @@ * Sarthak Yadav 2022 Script adapted by David Raby-Pepin 2021 """ + import os import sys + import torch -import torchaudio -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml +import speechbrain as sb import speechbrain.nnet.CNN +from speechbrain.dataio import audio_io from speechbrain.utils.distributed import run_on_main class SpeakerBrain(sb.core.Brain): - """Class for GSC training" - """ + """Class for GSC training" """ def compute_forward(self, batch, stage): """Computation pipeline based on a encoder + command classifier. @@ -38,33 +39,9 @@ def compute_forward(self, batch, stage): batch = batch.to(self.device) wavs, lens = batch.sig - if stage == sb.Stage.TRAIN and self.hparams.apply_data_augmentation: - - # Applying the augmentation pipeline - wavs_aug_tot = [] - wavs_aug_tot.append(wavs) - for count, augment in enumerate(self.hparams.augment_pipeline): - - # Apply augment - wavs_aug = augment(wavs, lens) - - # Managing speed change - if wavs_aug.shape[1] > wavs.shape[1]: - wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] - else: - zero_sig = torch.zeros_like(wavs) - zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug - wavs_aug = zero_sig - - if self.hparams.concat_augment: - wavs_aug_tot.append(wavs_aug) - else: - wavs = wavs_aug - wavs_aug_tot[0] = wavs - - wavs = torch.cat(wavs_aug_tot, dim=0) - self.n_augment = len(wavs_aug_tot) - lens = torch.cat([lens] * self.n_augment) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, lens = self.hparams.wav_augment(wavs, lens) if isinstance( self.modules.compute_features, speechbrain.lobes.features.Leaf @@ -82,22 +59,21 @@ def compute_forward(self, batch, stage): embeddings = self.modules.embedding_model(feats) outputs = self.modules.classifier(embeddings) - # Ecapa model uses softmax outside of its classifer + # Ecapa model uses softmax outside of its classifier if "softmax" in self.modules.keys(): outputs = self.modules.softmax(outputs) return outputs, lens def compute_objectives(self, predictions, batch, stage): - """Computes the loss using command-id as label. - """ + """Computes the loss using command-id as label.""" predictions, lens = predictions uttid = batch.id command, _ = batch.command_encoded # Concatenate labels (due to data augmentation) - if stage == sb.Stage.TRAIN and self.hparams.apply_data_augmentation: - command = torch.cat([command] * self.n_augment, dim=0) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + command = self.hparams.wav_augment.replicate_labels(command) # compute the cost function loss = self.hparams.compute_cost(predictions, command, lens) @@ -179,9 +155,7 @@ def audio_pipeline(wav, start, stop, duration): start = int(start) stop = int(stop) num_frames = stop - start - sig, fs = torchaudio.load( - wav, num_frames=num_frames, frame_offset=start - ) + sig, fs = audio_io.load(wav, num_frames=num_frames, frame_offset=start) sig = sig.transpose(0, 1).squeeze(1) return sig @@ -201,7 +175,9 @@ def label_pipeline(command): # Load or compute the label encoder (with multi-GPU DDP support) lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") label_encoder.load_or_create( - path=lab_enc_file, from_didatasets=[train_data], output_key="command", + path=lab_enc_file, + from_didatasets=[train_data], + output_key="command", ) # 4. Set output: @@ -213,7 +189,6 @@ def label_pipeline(command): if __name__ == "__main__": - # This flag enables the inbuilt cudnn auto-tuner torch.backends.cudnn.benchmark = True @@ -224,7 +199,7 @@ def label_pipeline(command): sb.utils.distributed.ddp_init_group(run_opts) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -306,6 +281,8 @@ def label_pipeline(command): "skip_prep": hparams["skip_prep"], }, ) + sb.utils.distributed.run_on_main(hparams["prepare_noise_data"]) + sb.utils.distributed.run_on_main(hparams["prepare_rir_data"]) # Dataset IO prep: creating Dataset objects and proper encodings for phones train_data, valid_data, test_data, label_encoder = dataio_prep(hparams) diff --git a/recipes/IEMOCAP/README.md b/recipes/IEMOCAP/README.md index e2c276e1af..75d1544508 100644 --- a/recipes/IEMOCAP/README.md +++ b/recipes/IEMOCAP/README.md @@ -11,10 +11,10 @@ or with wav2vec2 model: The results reported here use random splits | Release | hyperparams file | Val. Acc. | Test Acc. | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| :-----------:| -| 2021-07-04 | train.yaml | 65.3 | 65.7 | [model](https://drive.google.com/drive/folders/1U9SiO4KkCNBKfxilXzJqBZ_k-vHz4ltV?usp=sharing) | 1xV100 16GB | -| 2021-10-17 | train_with_wav2vec2.yaml (wav2vec2 base) | best 78.1 | best: 78.7 (avg 77.0) | [model](https://drive.google.com/drive/u/0/folders/11iZkcxvXYPnhf1yfYO_WVfRpGbN6HmNw) | 1xV100 32GB | -| 2021-10-17 | train_with_wav2vec2.yaml (voxpopuli base) | best 73.3 | best: 73.3 (avg 70.5) | [model](https://drive.google.com/drive/u/0/folders/1hCL2vCQe2WS5wv5LU7JYkh7QSHNH9m4d) | 1xV100 32GB | -| 2021-10-17 | train_with_wav2vec2.yaml (hubert base) | best 74.9 | best: 79.1 (avg 76,6) | [model](https://drive.google.com/drive/u/0/folders/1m8xggbhbsXHedMbF6dNVkNEW1bfGTjvi) | 1xV100 32GB | +| 2021-07-04 | train.yaml | 65.3 | 65.7 | [model](https://www.dropbox.com/sh/o72ex46i49qgdm0/AABxsuG0EEqTLgzWwrkYQzu_a?dl=0) | 1xV100 16GB | +| 2021-10-17 | train_with_wav2vec2.yaml (wav2vec2 base) | best 78.1 | best: 78.7 (avg 77.0) | [model](https://www.dropbox.com/sh/lmebg4li83sgkhg/AACooPKbNlwd-7n5qSJMbc7ya?dl=0) | 1xV100 32GB | +| 2021-10-17 | train_with_wav2vec2.yaml (voxpopuli base) | best 73.3 | best: 73.3 (avg 70.5) | [model](https://www.dropbox.com/sh/ikjwnwebekf2xx2/AADyaJKPiaR0_iO0nntucH5pa?dl=0) | 1xV100 32GB | +| 2021-10-17 | train_with_wav2vec2.yaml (hubert base) | best 74.9 | best: 79.1 (avg 76,6) | [model](https://www.dropbox.com/sh/ke4fxiry97z58m8/AACPEOM5bIyxo9HxG2mT9v_aa?dl=0) | 1xV100 32GB | # Training Time About 40 sec for each epoch with a TESLA V100 (with ECAPA-TDNN). @@ -24,9 +24,9 @@ About 3min 14 sec for each epoch with a TESLA V100 (with wav2vec2 BASE encoder). We here use only the audio part of the dataset. Our `iemocap_prepare.py` will: -1. Do labelling transformation to 4 emitions [neural, happy, sad, anger] -2. Prepare IEMOCAP data with random split if different_speakers is False. (Note for becnhmarking: you need to run 5 folds) -3. Prepare IEMOCAP data with speaker-independent split if different_speakers is True. (Note for becnhmarking: you need to run 10 folds with test_spk_id from 1 to 10) +1. Do labelling transformation to 4 emotions [neural, happy, sad, anger] +2. Prepare IEMOCAP data with random split if different_speakers is False. (Note for benchmarking: you need to run 5 folds) +3. Prepare IEMOCAP data with speaker-independent split if different_speakers is True. (Note for benchmarking: you need to run 10 folds with test_spk_id from 1 to 10) # PreTrained Model + Easy-Inference @@ -55,6 +55,15 @@ You can find the wav2vec2 pre-trained model with an easy-inference function on H Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/IEMOCAP/emotion_recognition/extra_dependencies.txt b/recipes/IEMOCAP/emotion_recognition/extra_dependencies.txt deleted file mode 100644 index 3cc3686ab9..0000000000 --- a/recipes/IEMOCAP/emotion_recognition/extra_dependencies.txt +++ /dev/null @@ -1,4 +0,0 @@ -# For Self-supervised recipes (HuggingFace) -transformers -# For Self-supervised recipe (Fairsec) -#fairseq diff --git a/recipes/IEMOCAP/emotion_recognition/hparams/train.yaml b/recipes/IEMOCAP/emotion_recognition/hparams/train.yaml index 224b9dc606..3acf49db1c 100644 --- a/recipes/IEMOCAP/emotion_recognition/hparams/train.yaml +++ b/recipes/IEMOCAP/emotion_recognition/hparams/train.yaml @@ -9,7 +9,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1968 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Dataset will be downloaded to the `data_original` data_folder: !PLACEHOLDER # e.g., /path/to/IEMOCAP_full_release @@ -27,6 +27,8 @@ test_spk_id: 1 train_annotation: !ref /train.json valid_annotation: !ref /valid.json test_annotation: !ref /test.json +split_ratio: [80, 10, 10] +skip_prep: False # The train logger writes training statistics to a file, as well as stdout. train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger @@ -41,7 +43,7 @@ ckpt_interval_minutes: 15 # save checkpoint every N min # Training Parameters number_of_epochs: 30 batch_size: 16 -gradient_accumulation: 2 +grad_accumulation_factor: 2 lr: 0.0001 weight_decay: 0.00002 base_lr: 0.000001 diff --git a/recipes/IEMOCAP/emotion_recognition/hparams/train_with_wav2vec2.yaml b/recipes/IEMOCAP/emotion_recognition/hparams/train_with_wav2vec2.yaml index 8fc6780452..345ee4e67c 100644 --- a/recipes/IEMOCAP/emotion_recognition/hparams/train_with_wav2vec2.yaml +++ b/recipes/IEMOCAP/emotion_recognition/hparams/train_with_wav2vec2.yaml @@ -6,7 +6,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1993 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Dataset will be downloaded to the `data_original` data_folder: !PLACEHOLDER # e.g., /path/to/IEMOCAP_full_release @@ -14,10 +14,11 @@ output_folder: !ref results/train_with_wav2vec2/ save_folder: !ref /save train_log: !ref /train_log.txt -# URL for the wav2vec2 model, you can change to benchmark diffrenet models +# URL for the wav2vec2 model, you can change to benchmark different models # Important: we use wav2vec2 base and not the fine-tuned one with ASR task -# This allow you to have ~4% improvment -wav2vec2_hub: "facebook/wav2vec2-base" +# This allow you to have ~4% improvement +wav2vec2_hub: facebook/wav2vec2-base +wav2vec2_folder: !ref /wav2vec2_checkpoint # different speakers for train, valid and test sets different_speakers: False @@ -28,6 +29,8 @@ test_spk_id: 1 train_annotation: !ref /train.json valid_annotation: !ref /valid.json test_annotation: !ref /test.json +split_ratio: [80, 10, 10] +skip_prep: False # The train logger writes training statistics to a file, as well as stdout. train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger @@ -35,7 +38,7 @@ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger ckpt_interval_minutes: 15 # save checkpoint every N min -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 30 batch_size: 4 lr: 0.0001 @@ -47,7 +50,7 @@ freeze_wav2vec2: False # We see an improvement of 2% with freezing CNNs freeze_wav2vec2_conv: True -# Model parameters +####################### Model Parameters ####################################### encoder_dim: 768 # Number of emotions @@ -60,12 +63,12 @@ dataloader_options: drop_last: False # Wav2vec2 encoder -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref freeze_feature_extractor: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref avg_pool: !new:speechbrain.nnet.pooling.StatisticsPooling return_std: False diff --git a/recipes/IEMOCAP/emotion_recognition/iemocap_prepare.py b/recipes/IEMOCAP/emotion_recognition/iemocap_prepare.py index 48feed81f4..ba76a34fb5 100644 --- a/recipes/IEMOCAP/emotion_recognition/iemocap_prepare.py +++ b/recipes/IEMOCAP/emotion_recognition/iemocap_prepare.py @@ -9,15 +9,15 @@ * Yingzhi Wang, 2022 """ -import os -import sys -import re import json +import os import random -import logging +import re + from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) SAMPLERATE = 16000 NUMBER_UTT = 5531 @@ -45,12 +45,14 @@ def prepare_data( Path where the validation data specification file will be saved. save_json_test : str Path where the test data specification file will be saved. - split_ratio: list + split_ratio : list List composed of three integers that sets split ratios for train, - valid, and test sets, respecively. + valid, and test sets, respectively. For instance split_ratio=[80, 10, 10] will assign 80% of the sentences to training, 10% for validation, and 10% for test. - test_spk_id: int + different_speakers : bool + If True, splits data so speakers are NOT shared among splits. + test_spk_id : int Id of speaker used for test set, 10 speakers in total. Here a leave-two-speaker strategy is used for the split, if one test_spk_id is selected for test, the other spk_id in the same @@ -60,10 +62,14 @@ def prepare_data( seed : int Seed for reproducibility + Returns + ------- + None + Example ------- - >>> data_original = '/path/to/iemocap/IEMOCAP_full_release' - >>> prepare_data(data_original, 'train.json', 'valid.json', 'test.json') + >>> data_original = "/path/to/iemocap/IEMOCAP_full_release" + >>> prepare_data(data_original, "train.json", "valid.json", "test.json") """ data_original = data_original + "/Session" # setting seeds for reproducible code. @@ -77,10 +83,9 @@ def prepare_data( speaker_dict = transform_data(data_original) if sum([len(value) for value in speaker_dict.values()]) != NUMBER_UTT: - logger.error( + raise ValueError( "Error: Number of utterances is not 5531, please check your IEMOCAP folder" ) - sys.exit() # List files and create manifest from list logger.info( @@ -128,7 +133,7 @@ def create_json(wav_list, json_file): } # Writing the dictionary to the json file - with open(json_file, mode="w") as json_f: + with open(json_file, mode="w", encoding="utf-8") as json_f: json.dump(json_dict, json_f, indent=2) logger.info(f"{json_file} successfully created!") @@ -139,6 +144,11 @@ def skip(*filenames): Detects if the data preparation has been already done. If the preparation has been done, we can skip it. + Arguments + --------- + *filenames : tuple + A list of paths to check for existence. + Returns ------- bool @@ -168,7 +178,7 @@ def split_different_speakers(speaker_dict, test_spk_id): Session1 contains speaker 1&2, Session2 contains speaker 3&4, ... Returns - ------ + ------- dictionary containing train, valid, and test splits. """ data_split = {k: [] for k in ["train", "valid", "test"]} @@ -195,21 +205,21 @@ def split_sets(speaker_dict, split_ratio): same proportion of samples (e.g, spk01 should have 80% of samples in training, 10% validation, 10% test, the same for speaker2 etc.). This is the approach followed in some recipes such as the Voxceleb one. For - simplicity, we here simply split the full list without necessarly + simplicity, we here simply split the full list without necessarily respecting the split ratio within each class. Arguments --------- speaker_dict : list a dictionary of speaker id and its corresponding audio information - split_ratio: list + split_ratio : list List composed of three integers that sets split ratios for train, valid, and test sets, respectively. For instance split_ratio=[80, 10, 10] will assign 80% of the sentences to training, 10% for validation, and 10% for test. Returns - ------ + ------- dictionary containing train, valid, and test splits. """ @@ -233,6 +243,7 @@ def split_sets(speaker_dict, split_ratio): return data_split +# cspell:ignore ahsn def transform_data(path_loadSession): """ Create a dictionary that maps speaker id and corresponding wavs @@ -242,10 +253,15 @@ def transform_data(path_loadSession): path_loadSession : str Path to the folder where the original IEMOCAP dataset is stored. + Returns + ------- + speaker_dict : dict + Map from speaker id to wav. + Example ------- - >>> data_original = '/path/to/iemocap/IEMOCAP_full_release/Session' - >>> data_transformed = '/path/to/iemocap/IEMOCAP_ahsn_leave-two-speaker-out' + >>> data_original = "/path/to/iemocap/IEMOCAP_full_release/Session" + >>> data_transformed = "/path/to/iemocap/IEMOCAP_ahsn_leave-two-speaker-out" >>> transform_data(data_original, data_transformed) """ @@ -269,14 +285,14 @@ def load_utterInfo(inputFile): Load utterInfo from original IEMOCAP database """ - # this regx allow to create a list with: + # this regex allow to create a list with: # [START_TIME - END_TIME] TURN_NAME EMOTION [V, A, D] # [V, A, D] means [Valence, Arousal, Dominance] pattern = re.compile( - "[\[]*[0-9]*[.][0-9]*[ -]*[0-9]*[.][0-9]*[\]][\t][a-z0-9_]*[\t][a-z]{3}[\t][\[][0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[\]]", + r"[\[]*[0-9]*[.][0-9]*[ -]*[0-9]*[.][0-9]*[\]][\t][a-z0-9_]*[\t][a-z]{3}[\t][\[][0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[\]]", re.IGNORECASE, ) # noqa - with open(inputFile, "r") as myfile: + with open(inputFile, encoding="utf-8") as myfile: data = myfile.read().replace("\n", " ") result = pattern.findall(data) out = [] @@ -296,12 +312,12 @@ def load_session(pathSession): Arguments --------- - pathSession: str - Path folder of IEMOCAP session. + pathSession: str + Path folder of IEMOCAP session. Returns ------- - improvisedUtteranceList: list - List of improvised utterancefor IEMOCAP session. + improvisedUtteranceList: list + List of improvised utterancefor IEMOCAP session. """ pathEmo = pathSession + "/dialog/EmoEvaluation/" pathWavFolder = pathSession + "/sentences/wav/" diff --git a/recipes/IEMOCAP/emotion_recognition/train.py b/recipes/IEMOCAP/emotion_recognition/train.py index a6462752a3..5a25c1232c 100644 --- a/recipes/IEMOCAP/emotion_recognition/train.py +++ b/recipes/IEMOCAP/emotion_recognition/train.py @@ -11,21 +11,22 @@ * Pierre-Yves Yanni 2021 """ +import csv import os import sys -import csv -import speechbrain as sb -import torch -from torch.utils.data import DataLoader from enum import Enum, auto -from tqdm.contrib import tqdm + +import torch from hyperpyyaml import load_hyperpyyaml +from torch.utils.data import DataLoader +from tqdm import tqdm + +import speechbrain as sb class EmoIdBrain(sb.Brain): def compute_forward(self, batch, stage): - """Computation pipeline based on a encoder + emotion classifier. - """ + """Computation pipeline based on a encoder + emotion classifier.""" batch = batch.to(self.device) wavs, lens = batch.sig @@ -39,32 +40,13 @@ def compute_forward(self, batch, stage): return outputs - def fit_batch(self, batch): - """Trains the parameters given a single batch in input""" - - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - # normalize the loss by gradient_accumulation step - (loss / self.hparams.gradient_accumulation).backward() - - if self.step % self.hparams.gradient_accumulation == 0: - # gradient clipping & early stop if loss is not finite - self.check_gradients(loss) - self.optimizer.step() - self.optimizer.zero_grad() - - return loss.detach() - def compute_objectives(self, predictions, batch, stage): - """Computes the loss using speaker-id as label. - """ + """Computes the loss using speaker-id as label.""" _, lens = batch.sig emoid, _ = batch.emo_encoded # Concatenate labels (due to data augmentation) if stage == sb.Stage.TRAIN: - if hasattr(self.hparams.lr_annealing, "on_batch_end"): self.hparams.lr_annealing.on_batch_end(self.optimizer) @@ -123,7 +105,6 @@ def on_stage_end(self, stage, stage_loss, epoch=None): # At the end of validation... if stage == sb.Stage.VALID: - old_lr, new_lr = self.hparams.lr_annealing(epoch) sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) @@ -185,7 +166,7 @@ def output_predictions_test_set( save_file = os.path.join( self.hparams.output_folder, "predictions.csv" ) - with open(save_file, "w", newline="") as csvfile: + with open(save_file, "w", newline="", encoding="utf-8") as csvfile: outwriter = csv.writer(csvfile, delimiter=",") outwriter.writerow(["id", "prediction", "true_value"]) @@ -204,7 +185,9 @@ def output_predictions_test_set( torch.argmax(output, dim=-1).squeeze(dim=1).tolist() ) - with open(save_file, "a", newline="") as csvfile: + with open( + save_file, "a", newline="", encoding="utf-8" + ) as csvfile: outwriter = csv.writer(csvfile, delimiter=",") for emo_id, prediction, true_val in zip( emo_ids, predictions, true_vals @@ -254,7 +237,7 @@ def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig - # Initialization of the label encoder. The label encoder assignes to each + # Initialization of the label encoder. The label encoder assigns to each # of the observed label a unique index (e.g, 'spk01': 0, 'spk02': 1, ..) label_encoder = sb.dataio.encoder.CategoricalEncoder() @@ -283,7 +266,7 @@ def label_pipeline(emo): ) # Load or compute the label encoder (with multi-GPU DDP support) # Please, take a look into the lab_enc_file to see the label to index - # mappinng. + # mapping. lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") label_encoder.load_or_create( @@ -297,7 +280,6 @@ def label_pipeline(emo): # RECIPE BEGINS! if __name__ == "__main__": - # Reading command line arguments. hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) @@ -305,7 +287,7 @@ def label_pipeline(emo): sb.utils.distributed.ddp_init_group(run_opts) # Load hyperparameters file with command-line overrides. - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -318,19 +300,20 @@ def label_pipeline(emo): from iemocap_prepare import prepare_data # noqa E402 # Data preparation, to be run on only one process. - sb.utils.distributed.run_on_main( - prepare_data, - kwargs={ - "data_original": hparams["data_folder"], - "save_json_train": hparams["train_annotation"], - "save_json_valid": hparams["valid_annotation"], - "save_json_test": hparams["test_annotation"], - "split_ratio": [80, 10, 10], - "different_speakers": hparams["different_speakers"], - "test_spk_id": hparams["test_spk_id"], - "seed": hparams["seed"], - }, - ) + if not hparams["skip_prep"]: + sb.utils.distributed.run_on_main( + prepare_data, + kwargs={ + "data_original": hparams["data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "split_ratio": hparams["split_ratio"], + "different_speakers": hparams["different_speakers"], + "test_spk_id": hparams["test_spk_id"], + "seed": hparams["seed"], + }, + ) # Create dataset objects "train", "valid", and "test". datasets = dataio_prep(hparams) diff --git a/recipes/IEMOCAP/emotion_recognition/train_with_wav2vec2.py b/recipes/IEMOCAP/emotion_recognition/train_with_wav2vec2.py index 98596e546d..29fcd89475 100644 --- a/recipes/IEMOCAP/emotion_recognition/train_with_wav2vec2.py +++ b/recipes/IEMOCAP/emotion_recognition/train_with_wav2vec2.py @@ -13,20 +13,21 @@ import os import sys -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml +import speechbrain as sb + class EmoIdBrain(sb.Brain): def compute_forward(self, batch, stage): - """Computation pipeline based on a encoder + emotion classifier. - """ + """Computation pipeline based on a encoder + emotion classifier.""" batch = batch.to(self.device) wavs, lens = batch.sig - outputs = self.modules.wav2vec2(wavs) + outputs = self.modules.wav2vec2(wavs, lens) - # last dim will be used for AdaptativeAVG pool + # last dim will be used for AdaptiveAVG pool outputs = self.hparams.avg_pool(outputs, lens) outputs = outputs.view(outputs.shape[0], -1) @@ -35,8 +36,7 @@ def compute_forward(self, batch, stage): return outputs def compute_objectives(self, predictions, batch, stage): - """Computes the loss using speaker-id as label. - """ + """Computes the loss using speaker-id as label.""" emoid, _ = batch.emo_encoded """to meet the input form of nll loss""" @@ -47,21 +47,6 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Trains the parameters given a single batch in input""" - - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.wav2vec2_optimizer.step() - self.optimizer.step() - - self.wav2vec2_optimizer.zero_grad() - self.optimizer.zero_grad() - - return loss.detach() - def on_stage_start(self, stage, epoch=None): """Gets called at the beginning of each epoch. Arguments @@ -108,7 +93,6 @@ def on_stage_end(self, stage, stage_loss, epoch=None): # At the end of validation... if stage == sb.Stage.VALID: - old_lr, new_lr = self.hparams.lr_annealing(stats["error_rate"]) sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) @@ -152,6 +136,11 @@ def init_optimizers(self): ) self.checkpointer.add_recoverable("optimizer", self.optimizer) + self.optimizers_dict = { + "model_optimizer": self.optimizer, + "wav2vec2_optimizer": self.wav2vec2_optimizer, + } + def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. @@ -180,7 +169,7 @@ def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig - # Initialization of the label encoder. The label encoder assignes to each + # Initialization of the label encoder. The label encoder assigns to each # of the observed label a unique index (e.g, 'spk01': 0, 'spk02': 1, ..) label_encoder = sb.dataio.encoder.CategoricalEncoder() @@ -209,7 +198,7 @@ def label_pipeline(emo): ) # Load or compute the label encoder (with multi-GPU DDP support) # Please, take a look into the lab_enc_file to see the label to index - # mappinng. + # mapping. lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") label_encoder.load_or_create( @@ -223,7 +212,6 @@ def label_pipeline(emo): # RECIPE BEGINS! if __name__ == "__main__": - # Reading command line arguments. hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) @@ -231,7 +219,7 @@ def label_pipeline(emo): sb.utils.distributed.ddp_init_group(run_opts) # Load hyperparameters file with command-line overrides. - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -244,24 +232,25 @@ def label_pipeline(emo): from iemocap_prepare import prepare_data # noqa E402 # Data preparation, to be run on only one process. - sb.utils.distributed.run_on_main( - prepare_data, - kwargs={ - "data_original": hparams["data_folder"], - "save_json_train": hparams["train_annotation"], - "save_json_valid": hparams["valid_annotation"], - "save_json_test": hparams["test_annotation"], - "split_ratio": [80, 10, 10], - "different_speakers": hparams["different_speakers"], - "test_spk_id": hparams["test_spk_id"], - "seed": hparams["seed"], - }, - ) + if not hparams["skip_prep"]: + sb.utils.distributed.run_on_main( + prepare_data, + kwargs={ + "data_original": hparams["data_folder"], + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "save_json_test": hparams["test_annotation"], + "split_ratio": hparams["split_ratio"], + "different_speakers": hparams["different_speakers"], + "test_spk_id": hparams["test_spk_id"], + "seed": hparams["seed"], + }, + ) # Create dataset objects "train", "valid", and "test". datasets = dataio_prep(hparams) - hparams["wav2vec2"] = hparams["wav2vec2"].to("cuda:0") + hparams["wav2vec2"] = hparams["wav2vec2"].to(device=run_opts["device"]) # freeze the feature extractor part when unfreezing if not hparams["freeze_wav2vec2"] and hparams["freeze_wav2vec2_conv"]: hparams["wav2vec2"].model.feature_extractor._freeze_parameters() diff --git a/recipes/IEMOCAP/iemocap_prepare.py b/recipes/IEMOCAP/iemocap_prepare.py new file mode 100644 index 0000000000..ba76a34fb5 --- /dev/null +++ b/recipes/IEMOCAP/iemocap_prepare.py @@ -0,0 +1,359 @@ +""" +Downloads and creates data manifest files for IEMOCAP +(https://paperswithcode.com/dataset/iemocap). + +Authors: + * Mirco Ravanelli, 2021 + * Modified by Pierre-Yves Yanni, 2021 + * Abdel Heba, 2021 + * Yingzhi Wang, 2022 +""" + +import json +import os +import random +import re + +from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +SAMPLERATE = 16000 +NUMBER_UTT = 5531 + + +def prepare_data( + data_original, + save_json_train, + save_json_valid, + save_json_test, + split_ratio=[80, 10, 10], + different_speakers=False, + test_spk_id=1, + seed=12, +): + """ + Prepares the json files for the IEMOCAP dataset. + + Arguments + --------- + data_original : str + Path to the folder where the original IEMOCAP dataset is stored. + save_json_train : str + Path where the train data specification file will be saved. + save_json_valid : str + Path where the validation data specification file will be saved. + save_json_test : str + Path where the test data specification file will be saved. + split_ratio : list + List composed of three integers that sets split ratios for train, + valid, and test sets, respectively. + For instance split_ratio=[80, 10, 10] will assign 80% of the sentences + to training, 10% for validation, and 10% for test. + different_speakers : bool + If True, splits data so speakers are NOT shared among splits. + test_spk_id : int + Id of speaker used for test set, 10 speakers in total. + Here a leave-two-speaker strategy is used for the split, + if one test_spk_id is selected for test, the other spk_id in the same + session is automatically used for validation. + To perform a 10-fold cross-validation, + 10 experiments with test_spk_id from 1 to 10 should be done. + seed : int + Seed for reproducibility + + Returns + ------- + None + + Example + ------- + >>> data_original = "/path/to/iemocap/IEMOCAP_full_release" + >>> prepare_data(data_original, "train.json", "valid.json", "test.json") + """ + data_original = data_original + "/Session" + # setting seeds for reproducible code. + random.seed(seed) + + # Check if this phase is already done (if so, skip it) + if skip(save_json_train, save_json_valid, save_json_test): + logger.info("Preparation completed in previous run, skipping.") + return + + speaker_dict = transform_data(data_original) + + if sum([len(value) for value in speaker_dict.values()]) != NUMBER_UTT: + raise ValueError( + "Error: Number of utterances is not 5531, please check your IEMOCAP folder" + ) + + # List files and create manifest from list + logger.info( + f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}" + ) + + if different_speakers: + data_split = split_different_speakers(speaker_dict, test_spk_id) + else: + data_split = split_sets(speaker_dict, split_ratio) + + # Creating json files + create_json(data_split["train"], save_json_train) + create_json(data_split["valid"], save_json_valid) + create_json(data_split["test"], save_json_test) + + +def create_json(wav_list, json_file): + """ + Creates the json file given a list of wav information. + + Arguments + --------- + wav_list : list of list + The list of wav information (path, label, gender). + json_file : str + The path of the output json file + """ + + json_dict = {} + for obj in wav_list: + wav_file = obj[0] + emo = obj[1] + # Read the signal (to retrieve duration in seconds) + signal = read_audio(wav_file) + duration = signal.shape[0] / SAMPLERATE + + uttid = wav_file.split("/")[-1][:-4] + + # Create entry for this utterance + json_dict[uttid] = { + "wav": wav_file, + "length": duration, + "emo": emo, + } + + # Writing the dictionary to the json file + with open(json_file, mode="w", encoding="utf-8") as json_f: + json.dump(json_dict, json_f, indent=2) + + logger.info(f"{json_file} successfully created!") + + +def skip(*filenames): + """ + Detects if the data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + *filenames : tuple + A list of paths to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + for filename in filenames: + if not os.path.isfile(filename): + return False + return True + + +def split_different_speakers(speaker_dict, test_spk_id): + """Constructs train, validation and test sets that do not share common + speakers. There are two different speakers in each session. Train set is + constituted of 4 sessions (8 speakers), while validation set and test set + contain each 1 speaker. If test_spk_id is 1, then speaker 2 is selected + automatically for validation set, and training set contains other 8 speakers. + If test_spk_id is 2, then speaker 1 is selected for validation set. + + Arguments + --------- + speaker_dict: dict + a dictionary of speaker id and its corresponding audio information + test_spk_id: int + Id of speaker used for test set, 10 speakers in total. + Session1 contains speaker 1&2, Session2 contains speaker 3&4, ... + + Returns + ------- + dictionary containing train, valid, and test splits. + """ + data_split = {k: [] for k in ["train", "valid", "test"]} + data_split["test"].extend(speaker_dict[str(test_spk_id)]) + + # use the speaker in the same session as validation set + if test_spk_id % 2 == 0: + valid_spk_num = test_spk_id - 1 + else: + valid_spk_num = test_spk_id + 1 + + data_split["valid"].extend(speaker_dict[str(valid_spk_num)]) + + for i in range(1, 11): + if i != valid_spk_num and i != test_spk_id: + data_split["train"].extend(speaker_dict[str(i)]) + + return data_split + + +def split_sets(speaker_dict, split_ratio): + """Randomly splits the wav list into training, validation, and test lists. + Note that a better approach is to make sure that all the classes have the + same proportion of samples (e.g, spk01 should have 80% of samples in + training, 10% validation, 10% test, the same for speaker2 etc.). This + is the approach followed in some recipes such as the Voxceleb one. For + simplicity, we here simply split the full list without necessarily + respecting the split ratio within each class. + + Arguments + --------- + speaker_dict : list + a dictionary of speaker id and its corresponding audio information + split_ratio : list + List composed of three integers that sets split ratios for train, + valid, and test sets, respectively. + For instance split_ratio=[80, 10, 10] will assign 80% of the sentences + to training, 10% for validation, and 10% for test. + + Returns + ------- + dictionary containing train, valid, and test splits. + """ + + wav_list = [] + for key in speaker_dict.keys(): + wav_list.extend(speaker_dict[key]) + + # Random shuffle of the list + random.shuffle(wav_list) + tot_split = sum(split_ratio) + tot_snts = len(wav_list) + data_split = {} + splits = ["train", "valid"] + + for i, split in enumerate(splits): + n_snts = int(tot_snts * split_ratio[i] / tot_split) + data_split[split] = wav_list[0:n_snts] + del wav_list[0:n_snts] + data_split["test"] = wav_list + + return data_split + + +# cspell:ignore ahsn +def transform_data(path_loadSession): + """ + Create a dictionary that maps speaker id and corresponding wavs + + Arguments + --------- + path_loadSession : str + Path to the folder where the original IEMOCAP dataset is stored. + + Returns + ------- + speaker_dict : dict + Map from speaker id to wav. + + Example + ------- + >>> data_original = "/path/to/iemocap/IEMOCAP_full_release/Session" + >>> data_transformed = "/path/to/iemocap/IEMOCAP_ahsn_leave-two-speaker-out" + >>> transform_data(data_original, data_transformed) + """ + + speaker_dict = {str(i + 1): [] for i in range(10)} + + speaker_count = 0 + for k in range(5): + session = load_session("%s%s" % (path_loadSession, k + 1)) + for idx in range(len(session)): + if session[idx][2] == "F": + speaker_dict[str(speaker_count + 1)].append(session[idx]) + else: + speaker_dict[str(speaker_count + 2)].append(session[idx]) + speaker_count += 2 + + return speaker_dict + + +def load_utterInfo(inputFile): + """ + Load utterInfo from original IEMOCAP database + """ + + # this regex allow to create a list with: + # [START_TIME - END_TIME] TURN_NAME EMOTION [V, A, D] + # [V, A, D] means [Valence, Arousal, Dominance] + pattern = re.compile( + r"[\[]*[0-9]*[.][0-9]*[ -]*[0-9]*[.][0-9]*[\]][\t][a-z0-9_]*[\t][a-z]{3}[\t][\[][0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[\]]", + re.IGNORECASE, + ) # noqa + with open(inputFile, encoding="utf-8") as myfile: + data = myfile.read().replace("\n", " ") + result = pattern.findall(data) + out = [] + for i in result: + a = i.replace("[", "") + b = a.replace(" - ", "\t") + c = b.replace("]", "") + x = c.replace(", ", "\t") + out.append(x.split("\t")) + return out + + +def load_session(pathSession): + """Load wav file from IEMOCAP session + and keep only the following 4 emotions: + [neural, happy, sad, anger]. + + Arguments + --------- + pathSession: str + Path folder of IEMOCAP session. + Returns + ------- + improvisedUtteranceList: list + List of improvised utterancefor IEMOCAP session. + """ + pathEmo = pathSession + "/dialog/EmoEvaluation/" + pathWavFolder = pathSession + "/sentences/wav/" + + improvisedUtteranceList = [] + for emoFile in [ + f + for f in os.listdir(pathEmo) + if os.path.isfile(os.path.join(pathEmo, f)) + ]: + for utterance in load_utterInfo(pathEmo + emoFile): + if ( + (utterance[3] == "neu") + or (utterance[3] == "hap") + or (utterance[3] == "sad") + or (utterance[3] == "ang") + or (utterance[3] == "exc") + ): + path = ( + pathWavFolder + + utterance[2][:-5] + + "/" + + utterance[2] + + ".wav" + ) + + label = utterance[3] + if label == "exc": + label = "hap" + + if emoFile[7] != "i" and utterance[2][7] == "s": + improvisedUtteranceList.append( + [path, label, utterance[2][18]] + ) + else: + improvisedUtteranceList.append( + [path, label, utterance[2][15]] + ) + return improvisedUtteranceList diff --git a/recipes/IWSLT22_lowresource/AST/transformer/README.md b/recipes/IWSLT22_lowresource/AST/transformer/README.md new file mode 100644 index 0000000000..6b2dc2828c --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/README.md @@ -0,0 +1,107 @@ +# IWSLT 2022 Low-resource Task: Tamasheq-French end-to-end Speech Translation + + +## Description + +This is the recipe for the best system from the IWSLT 2022 low-resource task, as described in the original paper. +The speech translation model comprises a wav2vec 2.0 encoder and a Transformer decoder. It is trained end-to-end without any auxiliary loss. The recipe allows for removing the last layers of the Transformer Encoder inside the wav2vec 2.0 in order to reduce the number of training parameters. + +This recipe also provides a flexible use of text-based sequence-to-sequence models, such as [mBART](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) or [NLLB](https://huggingface.co/facebook/nllb-200-1.3B) model, to initialize the decoder of the speech translation model. This practice has been proven more effective in a wide range of settings in comparison with the randomly initialized decoder. + +## Data Downloading + +For downloading the dataset used for this experiment, please run the following command. + +``` +git clone https://github.com/mzboito/IWSLT2022_Tamasheq_data.git +``` + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +## Training + +For training the model, please update the variables at hparams/train_w2v2_st.yaml. + +Note that in order to drop the last layers of the wav2vec 2.0 module, it is necessary to update the parameter "keep_n_layers". +For instance: Using ``keep_n_layers: 10'' means that only the first 10 layers inside the wav2vec 2.0 Transformer encoder will be used for training. The remaining layers are removed. + +For launching training: +``` +python train.py hparams/train_w2v2_st.yaml --root_data_folder=your/data/path # e.g., /workspace/speechbrain/recipes/IWSLT22_lowresource/IWSLT2022_Tamasheq_data/taq_fra_clean/ + +``` + +## Training with mBART/NLLB + +For training the model with the mBART/NLLB model, please refer to the hparams/train_w2v2_mbart_st.yaml or hparams/train_w2v2_nllb_st.yaml file. + +For launching training: +``` +python train_with_w2v_mbart.py hparams/train_w2v2_mbart_st.yaml --root_data_folder=your/data/path # e.g., /workspace/speechbrain/recipes/IWSLT22_lowresource/IWSLT2022_Tamasheq_data/taq_fra_clean +``` + +One should change hparams/train_w2v2_mbart_st.yaml to hparams/train_w2v2_nllb_st.yaml in the above training command for using NLLB model instead. + +## Pre-training Semantically-Aligned Multimodal Utterance-level (SAMU) wav2vec + +Inspired by [SAMU-XLSR](https://arxiv.org/abs/2205.08180), a model that unifies speech and text modality for making the pre-trained speech foundation model more semantically aware, we introduce here a recipe for fine-tuning a pre-trained wav2vec 2.0 model in the same manner. Training data can be paired speech/text data of the kind used by ASR or AST. In this recipe, we use directly the IWSLT2022_Tamasheq_data AST data. + +For launching SAMU training: +``` +python train_samu.py hparams/train_samu.yaml --root_data_folder=your/data/path # e.g., /workspace/speechbrain/recipes/IWSLT22_lowresource/IWSLT2022_Tamasheq_data/taq_fra_clean +``` + +After the SAMU model is pre-trained, one can use it in the same manner as wav2vec 2.0 model. We found that using SAMU model as speech encoder coupled with a decoder from mBART or NLLB helps further improve BLEU scores on this challenging dataset. + +For launching AST training: +``` +train_with_samu_mbart.py hparams/train_samu_mbart_st.yaml --root_data_folder=your/data/path --pre_trained_samu=your/samu/ckpt +``` + +Examples of the two parameters: +--root_data_folder=/workspace/speechbrain/recipes/IWSLT22_lowresource/IWSLT2022_Tamasheq_data/taq_fra_clean +--pre_trained_samu=/workspace/speechbrain/recipes/IWSLT22_lowresource/results/samu_pretraining/7777/save/CKPT+checkpoint_epoch100/wav2vec2.ckpt + +One should change hparams/train_samu_mbart_st.yaml to hparams/train_samu_nllb_st.yaml in the above training command for using NLLB model instead. + +# Results + +| No. | hyperparams file | dev BLEU | test BLEU | Model Link | +| --- |:----------------:|:---------:|:--------:|:--------:| +| 1 | train_w2v2_st.yaml | 7.63 | 5.38 | Not avail. | Not avail. | +| 2 | train_w2v2_mbart_st.yaml | 9.62 | 7.73 | [DropBox](https://www.dropbox.com/sh/xjo0ou739oksnus/AAAgyrCwywmDRRuUiDnUva2za?dl=0) | +| 3 | train_w2v2_nllb_st.yaml | 11.09 | 8.70 | [DropBox](https://www.dropbox.com/sh/spp2ijgfdbzuz26/AABkJ97e72D7aKzNLTm1qmWEa?dl=0) | +| 4 | train_samu_mbart_st.yaml | 13.41 | 10.28 | [DropBox](https://www.dropbox.com/sh/98s1xyc3chreaw6/AABom3FnwY5SsIvg4en9tWC2a?dl=0) | +| 5 | train_samu_nllb_st.yaml | 13.89 | 11.32 | [DropBox](https://www.dropbox.com/sh/ekkpl9c3kxsgllj/AABa0q2LrJe_o7JF-TTbfxZ-a?dl=0) | + +## Citation +``` +@inproceedings{boito-etal-2022-trac, + title = "{ON}-{TRAC} Consortium Systems for the {IWSLT} 2022 Dialect and Low-resource Speech Translation Tasks", + author = {Boito, Marcely Zanon and + Ortega, John and + Riguidel, Hugo and + Laurent, Antoine and + Barrault, Lo{\"\i}c and + Bougares, Fethi and + Chaabani, Firas and + Nguyen, Ha and + Barbier, Florentin and + Gahbiche, Souhir and + Est{\`e}ve, Yannick}, + booktitle = "Proceedings of the 19th International Conference on Spoken Language Translation (IWSLT 2022)", + month = may, + year = "2022", + address = "Dublin, Ireland (in-person and online)", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2022.iwslt-1.28", + doi = "10.18653/v1/2022.iwslt-1.28", + pages = "308--318" +} +``` diff --git a/recipes/IWSLT22_lowresource/AST/transformer/extra_requirements.txt b/recipes/IWSLT22_lowresource/AST/transformer/extra_requirements.txt new file mode 100644 index 0000000000..506764464f --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/extra_requirements.txt @@ -0,0 +1,2 @@ +protobuf +sacremoses diff --git a/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu.yaml b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu.yaml new file mode 100644 index 0000000000..2a6e6e272f --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu.yaml @@ -0,0 +1,121 @@ +# ############################################################################ +# Model: SAMU model +# losses: cosine similarity +# Training: Tamasheq-French corpus +# Author: Ha Nguyen, 2023 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 7777 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +debug: False +output_folder: !ref results/samu_pretraining/ +save_folder: !ref /save +train_log: !ref /train_log.txt +wer_file: !ref /wer.txt + +# root data folder points to 17h version inside the github folder (IWSLT2022_Tamasheq_data/taq_fra_clean/) +root_data_folder: !PLACEHOLDER # e.g., /users/hnguyen/IWSLT2022_Tamasheq_data/taq_fra_clean +# data folder is the place where the json files will be stored prior to training +data_folder: !ref /json_version/ +# Data files +train_set: !ref /train.json +valid_set: !ref /valid.json +test_set: !ref /test.json +skip_prep: False + +# URL for the HuggingFace model we want to load (BASE here) +wav2vec2_hub: LIA-AvignonUniversity/IWSLT2022-tamasheq-only + +# wav2vec 2.0 specific parameters +wav2vec2_frozen: False + +####################### Training Parameters #################################### +number_of_epochs: 100 +lr: 0.001 +lr_wav2vec: 0.00001 +lr_labse: 0.00001 +sorting: ascending +batch_size: 2 +test_batch_size: 1 +ckpt_interval_minutes: 15 # save checkpoint every N min + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +dataloader_options: + batch_size: !ref + num_workers: 4 + +test_dataloader_options: + batch_size: !ref + num_workers: 4 + +# Transformer +d_model: 768 +loss_scale: 50 + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: False + freeze: !ref + save_path: !ref /wav2vec2_checkpoint + +attn_pooling: !new:speechbrain.nnet.pooling.AttentionPooling + input_dim: !ref + +#LaBSE +labse_path: setu4993/LaBSE # cspell:disable +labse_frozen: True +LaBSE: !new:speechbrain.integrations.huggingface.labse.LaBSE + source: !ref + freeze: !ref + output_norm: True + save_path: !ref /labse_checkpoint + +modules: + wav2vec2: !ref + attn_pooling: !ref + LaBSE: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +adam_opt_class: !name:torch.optim.Adam + lr: !ref + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +labse_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_adam: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.5 + patient: 2 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + +lr_annealing_labse: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + LaBSE: !ref + lr_annealing_adam: !ref + lr_annealing_wav2vec: !ref + lr_annealing_labse: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref diff --git a/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_mbart_st.yaml b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_mbart_st.yaml new file mode 100644 index 0000000000..352c2e5078 --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_mbart_st.yaml @@ -0,0 +1,196 @@ +# ############################################################################ +# Model: E2E ST with SAMU encoder and mBART decoder +# Encoder: SAMU +# Decoder: mBART decoder +# losses: NLL +# Training: Tamasheq-French corpus +# Author: Ha Nguyen, 2023 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1337 #7777 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +debug: False +output_folder: !ref results/samu_mbart/ +save_folder: !ref /save +train_log: !ref /train_log.txt +wer_file: !ref /wer.txt +bleu_file: !ref /bleu.txt + +# root data folder points to 17h version inside the github folder (IWSLT2022_Tamasheq_data/taq_fra_clean/) +root_data_folder: !PLACEHOLDER # e.g., /users/hnguyen/IWSLT2022_Tamasheq_data/taq_fra_clean +# data folder is the place where the json files will be stored prior to training +data_folder: !ref /json_version/ +lang: "fr" #for the BLEU score detokenization +target_lang: "fr_XX" # for mbart initialization + +annotation_train: !ref /train.json +annotation_valid: !ref /valid.json +annotation_test: !ref /test.json +skip_prep: False + +# URL for the HuggingFace model we want to load (BASE here) +wav2vec2_hub: LIA-AvignonUniversity/IWSLT2022-tamasheq-only +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# wav2vec 2.0 specific parameters +wav2vec2_frozen: False + +####################### Training Parameters #################################### +number_of_epochs: 500 +lr: 0.001 +lr_wav2vec: 0.0001 +lr_mbart: 0.0001 +batch_size: 2 +test_batch_size: 1 +grad_accumulation_factor: 6 +valid_search_interval: 4 +loss_reduction: batchmean +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Data sorting parameters: sorting_debug_duration replaces sorting_min_duration in debug mode +sorting: ascending + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +dataloader_options: + batch_size: !ref + num_workers: 4 + +test_dataloader_options: + batch_size: !ref + num_workers: 4 + +# Feature parameters (W2V2 etc) +features_dim: 768 # base wav2vec output dimension, for large replace by 1024 + +#projection for w2v +enc_dnn_layers: 1 +enc_dnn_neurons: 1024 #256 + +# Transformer +activation: !name:torch.nn.GELU + +# Outputs +label_smoothing: 0.1 +pad_index: 1 # pad_index defined by mbart model +bos_index: 250008 # fr_XX bos_index defined by mbart model +eos_index: 2 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +min_decode_ratio: 0.0 +max_decode_ratio: 0.25 +valid_beam_size: 5 + +############################## models ################################ +#wav2vec model +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + +#linear projection +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, !ref ] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +#mBART +mbart_path: facebook/mbart-large-50-many-to-many-mmt +mbart_frozen: False +vocab_size: 250054 +mBART: !new:speechbrain.integrations.huggingface.mbart.mBART + source: !ref + freeze: !ref + save_path: !ref /mbart_checkpoint + target_lang: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +modules: + wav2vec2: !ref + enc: !ref + mBART: !ref + +model: !new:torch.nn.ModuleList + - [!ref ] + +adam_opt_class: !name:torch.optim.Adam + lr: !ref + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +mbart_opt_class: !name:torch.optim.Adam + lr: !ref + +seq_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: !ref + reduction: !ref + +lr_annealing_adam: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.5 + patient: 2 + +warmup: 8000 +hold: 32000 +cooldown: 40000 +optimizer_step_limit: 80000 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.TriStageLRSchedule + lr: !ref + warmup_steps: !ref + hold_steps: !ref + decay_steps: !ref + total_steps: !ref + +lr_annealing_mbart: !new:speechbrain.nnet.schedulers.TriStageLRSchedule + lr: !ref + warmup_steps: !ref + hold_steps: !ref + decay_steps: !ref + total_steps: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + mBART: !ref + lr_annealing_wav2vec: !ref + lr_annealing_mbart: !ref + counter: !ref + +valid_search: !new:speechbrain.decoders.S2SHFTextBasedBeamSearcher + modules: [!ref , null, null] + vocab_size: !ref + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: True + length_normalization: True + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +bleu_computer: !name:speechbrain.integrations.nlp.bleu.BLEUStats + +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# Path to the samu checkpoint +pre_trained_samu: !PLACEHOLDER # e.g., /users/hnguyen/output_samu_pretraining/7777/save/CKPT+checkpoint_epoch100/wav2vec2.ckpt +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + wav2vec: !ref + paths: + wav2vec: !ref diff --git a/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_nllb_st.yaml b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_nllb_st.yaml new file mode 100644 index 0000000000..d9529000d2 --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_samu_nllb_st.yaml @@ -0,0 +1,196 @@ +# ############################################################################ +# Model: E2E ST with SAMU encoder and mBART decoder +# Encoder: SAMU +# Decoder: mBART decoder +# losses: NLL +# Training: Tamasheq-French corpus +# Author: Ha Nguyen, 2023 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1337 #7777 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +debug: False +output_folder: !ref results/samu_nllb1.3B/ +save_folder: !ref /save +train_log: !ref /train_log.txt +wer_file: !ref /wer.txt +bleu_file: !ref /bleu.txt + +# root data folder points to 17h version inside the github folder (IWSLT2022_Tamasheq_data/taq_fra_clean/) +root_data_folder: !PLACEHOLDER # e.g., /users/hnguyen/IWSLT2022_Tamasheq_data/taq_fra_clean +# data folder is the place where the json files will be stored prior to training +data_folder: !ref /json_version/ +lang: "fr" #for the BLEU score detokenization +target_lang: "fra_Latn" # for nllb initialization + +annotation_train: !ref /train.json +annotation_valid: !ref /valid.json +annotation_test: !ref /test.json +skip_prep: False + +# URL for the HuggingFace model we want to load (BASE here) +wav2vec2_hub: LIA-AvignonUniversity/IWSLT2022-tamasheq-only +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# wav2vec 2.0 specific parameters +wav2vec2_frozen: False + +####################### Training Parameters #################################### +number_of_epochs: 500 +lr: 0.001 +lr_wav2vec: 0.0001 +lr_mbart: 0.0001 +batch_size: 2 +test_batch_size: 1 +grad_accumulation_factor: 6 +valid_search_interval: 4 +loss_reduction: batchmean +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Data sorting parameters: sorting_debug_duration replaces sorting_min_duration in debug mode +sorting: ascending + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +dataloader_options: + batch_size: !ref + num_workers: 4 + +test_dataloader_options: + batch_size: !ref + num_workers: 4 + +# Feature parameters (W2V2 etc) +features_dim: 768 # base wav2vec output dimension, for large replace by 1024 + +#projection for w2v +enc_dnn_layers: 1 +enc_dnn_neurons: 1024 #256 + +# Transformer +activation: !name:torch.nn.GELU + +# Outputs +label_smoothing: 0.1 +pad_index: 1 # pad_index defined by nllb model +bos_index: 256057 # fra_Latn bos_index defined by nllb model +eos_index: 2 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +min_decode_ratio: 0.0 +max_decode_ratio: 0.25 +valid_beam_size: 5 + +############################## models ################################ +#wav2vec model +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + +#linear projection +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, !ref ] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +#mBART +mbart_path: facebook/nllb-200-1.3B +mbart_frozen: False +vocab_size: 256206 +mBART: !new:speechbrain.integrations.huggingface.nllb.NLLB + source: !ref + freeze: !ref + save_path: !ref /mbart_checkpoint + target_lang: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +modules: + wav2vec2: !ref + enc: !ref + mBART: !ref + +model: !new:torch.nn.ModuleList + - [!ref ] + +adam_opt_class: !name:torch.optim.Adam + lr: !ref + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +mbart_opt_class: !name:torch.optim.Adam + lr: !ref + +seq_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: !ref + reduction: !ref + +lr_annealing_adam: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.5 + patient: 2 + +warmup: 8000 +hold: 32000 +cooldown: 40000 +optimizer_step_limit: 80000 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.TriStageLRSchedule + lr: !ref + warmup_steps: !ref + hold_steps: !ref + decay_steps: !ref + total_steps: !ref + +lr_annealing_mbart: !new:speechbrain.nnet.schedulers.TriStageLRSchedule + lr: !ref + warmup_steps: !ref + hold_steps: !ref + decay_steps: !ref + total_steps: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + mBART: !ref + lr_annealing_wav2vec: !ref + lr_annealing_mbart: !ref + counter: !ref + +valid_search: !new:speechbrain.decoders.S2SHFTextBasedBeamSearcher + modules: [!ref , null, null] + vocab_size: !ref + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: True + length_normalization: True + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +bleu_computer: !name:speechbrain.integrations.nlp.bleu.BLEUStats + +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# Path to the samu checkpoint +pre_trained_samu: !PLACEHOLDER # e.g., /users/hnguyen/output_samu_pretraining/7777/save/CKPT+checkpoint_epoch100/wav2vec2.ckpt +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + wav2vec: !ref + paths: + wav2vec: !ref diff --git a/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_mbart_st.yaml b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_mbart_st.yaml new file mode 100644 index 0000000000..a70c59e9ab --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_mbart_st.yaml @@ -0,0 +1,187 @@ +# ############################################################################ +# Model: E2E ST with wav2vec 2.0 encoder and mBART decoder +# Encoder: wav2vec 2.0 +# Decoder: mBART decoder +# losses: NLL +# Training: Tamasheq-French corpus +# Author: Ha Nguyen, 2023 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1337 #7777 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +debug: False +output_folder: !ref results/w2v2_mbart/ +save_folder: !ref /save +train_log: !ref /train_log.txt +wer_file: !ref /wer.txt +bleu_file: !ref /bleu.txt + +# root data folder points to 17h version inside the github folder (IWSLT2022_Tamasheq_data/taq_fra_clean/) +root_data_folder: !PLACEHOLDER # e.g., /users/hnguyen/IWSLT2022_Tamasheq_data/taq_fra_clean +# data folder is the place where the json files will be stored prior to training +data_folder: !ref /json_version/ +lang: "fr" #for the BLEU score detokenization +target_lang: "fr_XX" # for mbart initialization + +annotation_train: !ref /train.json +annotation_valid: !ref /valid.json +annotation_test: !ref /test.json +skip_prep: False + +# URL for the HuggingFace model we want to load (BASE here) +wav2vec2_hub: LIA-AvignonUniversity/IWSLT2022-tamasheq-only +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# wav2vec 2.0 specific parameters +wav2vec2_frozen: False + +####################### Training Parameters #################################### +number_of_epochs: 500 +lr: 0.001 +lr_wav2vec: 0.0001 +lr_mbart: 0.0001 +batch_size: 2 +test_batch_size: 1 +grad_accumulation_factor: 6 +valid_search_interval: 4 +loss_reduction: batchmean +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Data sorting parameters: sorting_debug_duration replaces sorting_min_duration in debug mode +sorting: ascending + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +dataloader_options: + batch_size: !ref + num_workers: 4 + +test_dataloader_options: + batch_size: !ref + num_workers: 4 + +# Feature parameters (W2V2 etc) +features_dim: 768 # base wav2vec output dimension, for large replace by 1024 + +#projection for w2v +enc_dnn_layers: 1 +enc_dnn_neurons: 1024 #256 + +# Transformer +activation: !name:torch.nn.GELU + +# Outputs +label_smoothing: 0.1 +pad_index: 1 # pad_index defined by mbart model +bos_index: 250008 # fr_XX bos_index defined by mbart model +eos_index: 2 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +min_decode_ratio: 0.0 +max_decode_ratio: 0.25 +valid_beam_size: 5 + +############################## models ################################ +#wav2vec model +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + +#linear projection +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, !ref ] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +#mBART +mbart_path: facebook/mbart-large-50-many-to-many-mmt +mbart_frozen: False +vocab_size: 250054 +mBART: !new:speechbrain.integrations.huggingface.mbart.mBART + source: !ref + freeze: !ref + save_path: !ref /mbart_checkpoint + target_lang: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +modules: + wav2vec2: !ref + enc: !ref + mBART: !ref + +model: !new:torch.nn.ModuleList + - [!ref ] + +adam_opt_class: !name:torch.optim.Adam + lr: !ref + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +mbart_opt_class: !name:torch.optim.Adam + lr: !ref + +seq_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: !ref + reduction: !ref + +lr_annealing_adam: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.5 + patient: 2 + +warmup: 8000 +hold: 32000 +cooldown: 40000 +optimizer_step_limit: 80000 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.TriStageLRSchedule + lr: !ref + warmup_steps: !ref + hold_steps: !ref + decay_steps: !ref + total_steps: !ref + +lr_annealing_mbart: !new:speechbrain.nnet.schedulers.TriStageLRSchedule + lr: !ref + warmup_steps: !ref + hold_steps: !ref + decay_steps: !ref + total_steps: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + mBART: !ref + lr_annealing_wav2vec: !ref + lr_annealing_mbart: !ref + counter: !ref + +valid_search: !new:speechbrain.decoders.S2SHFTextBasedBeamSearcher + modules: [!ref , null, null] + vocab_size: !ref + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: True + length_normalization: True + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +bleu_computer: !name:speechbrain.integrations.nlp.bleu.BLEUStats + +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats diff --git a/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_nllb_st.yaml b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_nllb_st.yaml new file mode 100644 index 0000000000..3e6050244d --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_nllb_st.yaml @@ -0,0 +1,187 @@ +# ############################################################################ +# Model: E2E ST with wav2vec 2.0 encoder and NLLB decoder +# Encoder: wav2vec 2.0 +# Decoder: NLLB decoder +# losses: NLL +# Training: Tamasheq-French corpus +# Author: Ha Nguyen, 2023 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1337 #7777 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +debug: False +output_folder: !ref results/w2v2_nllb1.3B/ +save_folder: !ref /save +train_log: !ref /train_log.txt +wer_file: !ref /wer.txt +bleu_file: !ref /bleu.txt + +# root data folder points to 17h version inside the github folder (IWSLT2022_Tamasheq_data/taq_fra_clean/) +root_data_folder: !PLACEHOLDER # e.g., /users/hnguyen/IWSLT2022_Tamasheq_data/taq_fra_clean +# data folder is the place where the json files will be stored prior to training +data_folder: !ref /json_version/ +lang: "fr" #for the BLEU score detokenization +target_lang: "fra_Latn" # for nllb initialization + +annotation_train: !ref /train.json +annotation_valid: !ref /valid.json +annotation_test: !ref /test.json +skip_prep: False + +# URL for the HuggingFace model we want to load (BASE here) +wav2vec2_hub: LIA-AvignonUniversity/IWSLT2022-tamasheq-only +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# wav2vec 2.0 specific parameters +wav2vec2_frozen: False + +####################### Training Parameters #################################### +number_of_epochs: 500 +lr: 0.001 +lr_wav2vec: 0.0001 +lr_mbart: 0.0001 +batch_size: 2 +test_batch_size: 1 +grad_accumulation_factor: 6 +valid_search_interval: 4 +loss_reduction: batchmean +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Data sorting parameters: sorting_debug_duration replaces sorting_min_duration in debug mode +sorting: ascending + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +dataloader_options: + batch_size: !ref + num_workers: 4 + +test_dataloader_options: + batch_size: !ref + num_workers: 4 + +# Feature parameters (W2V2 etc) +features_dim: 768 # base wav2vec output dimension, for large replace by 1024 + +#projection for w2v +enc_dnn_layers: 1 +enc_dnn_neurons: 1024 #256 + +# Transformer +activation: !name:torch.nn.GELU + +# Outputs +label_smoothing: 0.1 +pad_index: 1 # pad_index defined by nllb model +bos_index: 256057 # fra_Latn bos_index defined by nllb model +eos_index: 2 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +min_decode_ratio: 0.0 +max_decode_ratio: 0.25 +valid_beam_size: 5 + +############################## models ################################ +#wav2vec model +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + +#linear projection +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, !ref ] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +#mBART +mbart_path: facebook/nllb-200-1.3B +mbart_frozen: False +vocab_size: 256206 +mBART: !new:speechbrain.integrations.huggingface.nllb.NLLB + source: !ref + freeze: !ref + save_path: !ref /mbart_checkpoint + target_lang: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +modules: + wav2vec2: !ref + enc: !ref + mBART: !ref + +model: !new:torch.nn.ModuleList + - [!ref ] + +adam_opt_class: !name:torch.optim.Adam + lr: !ref + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +mbart_opt_class: !name:torch.optim.Adam + lr: !ref + +seq_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: !ref + reduction: !ref + +lr_annealing_adam: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.5 + patient: 2 + +warmup: 8000 +hold: 32000 +cooldown: 40000 +optimizer_step_limit: 80000 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.TriStageLRSchedule + lr: !ref + warmup_steps: !ref + hold_steps: !ref + decay_steps: !ref + total_steps: !ref + +lr_annealing_mbart: !new:speechbrain.nnet.schedulers.TriStageLRSchedule + lr: !ref + warmup_steps: !ref + hold_steps: !ref + decay_steps: !ref + total_steps: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + mBART: !ref + lr_annealing_wav2vec: !ref + lr_annealing_mbart: !ref + counter: !ref + +valid_search: !new:speechbrain.decoders.S2SHFTextBasedBeamSearcher + modules: [!ref , null, null] + vocab_size: !ref + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: True + length_normalization: True + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +bleu_computer: !name:speechbrain.integrations.nlp.bleu.BLEUStats + +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats diff --git a/recipes/IWSLT22_lowresource/hparams/train_w2v2_st.yaml b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_st.yaml similarity index 86% rename from recipes/IWSLT22_lowresource/hparams/train_w2v2_st.yaml rename to recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_st.yaml index 35702ee3d3..1363985af0 100644 --- a/recipes/IWSLT22_lowresource/hparams/train_w2v2_st.yaml +++ b/recipes/IWSLT22_lowresource/AST/transformer/hparams/train_w2v2_st.yaml @@ -10,9 +10,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 5988 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] debug: False -output_folder: !ref output/ +output_folder: !ref results/w2v2_st/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -23,14 +23,20 @@ data_folder: !ref /json_version/ lang: "fr" #for the BLEU score detokenization vocab_size: 1000 #for SentencePiece tokenizer (unigram) +annotation_train: !ref /train.json +annotation_valid: !ref /valid.json +annotation_test: !ref /test.json +skip_prep: False + # URL for the HuggingFace model we want to load (BASE here) wav2vec2_hub: LIA-AvignonUniversity/IWSLT2022-tamasheq-only +wav2vec2_folder: !ref /wav2vec2_checkpoint # wav2vec 2.0 specific parameters wav2vec2_frozen: False keep_n_layers: 6 # keep first N layers from the Transformer Encoder stack inside the wav2vec 2.0 model -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 100 lr: 0.001 lr_wav2vec: 0.00001 @@ -57,7 +63,7 @@ test_dataloader_options: num_workers: 4 # Feature parameters (W2V2 etc) -features_dim: 768 #base wav2vec output dimension, for large replace by 1024 +features_dim: 768 # base wav2vec output dimension, for large replace by 1024 #projection for w2v enc_dnn_layers: 1 @@ -75,7 +81,6 @@ output_neurons: !ref # /!\ needs to be changed accordingly to the v attention_type: "regularMHA" # "RelPosMHAXL" or "regularMHA" # Outputs -blank_index: 0 label_smoothing: 0.1 pad_index: 0 bos_index: 1 @@ -90,11 +95,11 @@ test_beam_size: 5 ############################## models ################################ #wav2vec model -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref #linear projection @@ -168,30 +173,26 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , null] +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref using_eos_threshold: False - length_normalization: False + length_normalization: True -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , null] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref using_eos_threshold: True length_normalization: True -bleu_computer: !name:speechbrain.utils.bleu.BLEUStats - merge_words: False - lang: !ref +bleu_computer: !name:speechbrain.integrations.nlp.bleu.BLEUStats acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats diff --git a/recipes/IWSLT22_lowresource/AST/transformer/prepare_iwslt22.py b/recipes/IWSLT22_lowresource/AST/transformer/prepare_iwslt22.py new file mode 120000 index 0000000000..93e733e489 --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/prepare_iwslt22.py @@ -0,0 +1 @@ +../../prepare_iwslt22.py \ No newline at end of file diff --git a/recipes/IWSLT22_lowresource/train.py b/recipes/IWSLT22_lowresource/AST/transformer/train.py similarity index 88% rename from recipes/IWSLT22_lowresource/train.py rename to recipes/IWSLT22_lowresource/AST/transformer/train.py index 9238c918bf..19f9882039 100644 --- a/recipes/IWSLT22_lowresource/train.py +++ b/recipes/IWSLT22_lowresource/AST/transformer/train.py @@ -6,13 +6,15 @@ """ import sys + import torch -import logging +from hyperpyyaml import load_hyperpyyaml +from sacremoses import MosesDetokenizer + import speechbrain as sb from speechbrain.tokenizers.SentencePiece import SentencePiece from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml -from sacremoses import MosesDetokenizer +from speechbrain.utils.logger import get_logger # Define training procedure @@ -25,7 +27,7 @@ def compute_forward(self, batch, stage): tokens_bos, _ = batch.tokens_bos # translation # wav2vec module - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) # dimensionality reduction src = self.modules.enc(feats) @@ -48,9 +50,10 @@ def compute_forward(self, batch, stage): hyps = None if stage == sb.Stage.VALID: # the output of the encoder (enc) is used for valid search - hyps, _ = self.hparams.valid_search(src.detach(), wav_lens) + hyps, _, _, _ = self.hparams.valid_search(src.detach(), wav_lens) + elif stage == sb.Stage.TEST: - hyps, _ = self.hparams.test_search(src.detach(), wav_lens) + hyps, _, _, _ = self.hparams.test_search(src.detach(), wav_lens) return p_seq, wav_lens, hyps @@ -88,38 +91,28 @@ def compute_objectives(self, predictions, batch, stage): return loss def init_optimizers(self): - # Initializes the wav2vec2 optimizer if the model is not wav2vec2_frozen - if not self.hparams.wav2vec2_frozen: - self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( - self.modules.wav2vec2.parameters() - ) self.adam_optimizer = self.hparams.adam_opt_class( self.hparams.model.parameters() ) - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - - if self.check_gradients(loss): - if not self.hparams.wav2vec2_frozen: # if wav2vec2 is not frozen - self.wav2vec_optimizer.step() - self.adam_optimizer.step() + self.optimizers_dict = {"model_optimizer": self.adam_optimizer} + # Initializes the wav2vec2 optimizer if the model is not wav2vec2_frozen if not self.hparams.wav2vec2_frozen: - self.wav2vec_optimizer.zero_grad() - self.adam_optimizer.zero_grad() - - return loss.detach().cpu() + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if not self.hparams.wav2vec2_frozen: + valid_optimizers["wav2vec_optimizer"] = optimizers[ + "wav2vec_optimizer" + ] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers def on_stage_start(self, stage, epoch): """Gets called when a stage (either training, validation, test) starts.""" @@ -143,7 +136,7 @@ def on_stage_end(self, stage, stage_loss, epoch): current_epoch = self.hparams.epoch_counter.current # log stats and save checkpoint at end-of-epoch - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): + if stage == sb.Stage.VALID: current_epoch = self.hparams.epoch_counter.current old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam( stage_stats["BLEU"] @@ -176,7 +169,7 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) - # create checkpoing + # create checkpoint meta = {"BLEU": stage_stats["BLEU"], "epoch": current_epoch} name = "checkpoint_epoch" + str(current_epoch) @@ -194,7 +187,8 @@ def on_stage_end(self, stage, stage_loss, epoch): # Define custom data procedure def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # Define audio pipeline. In this case, we simply read the path contained # in the variable wav with the audio reader. @@ -238,7 +232,7 @@ def reference_text_pipeline(translation): tokenizer = SentencePiece( model_dir=hparams["save_folder"], vocab_size=hparams["vocab_size"], - annotation_train=data_folder + "/train.json", + annotation_train=hparams["annotation_train"], annotation_read="trans", annotation_format="json", model_type="unigram", @@ -249,7 +243,7 @@ def reference_text_pipeline(translation): # 2. load data and tokenize with trained tokenizer datasets = {} for dataset in ["train", "valid"]: - json_path = f"{data_folder}/{dataset}.json" + json_path = hparams[f"annotation_{dataset}"] is_use_sp = dataset == "train" and "speed_perturb" in hparams audio_pipeline_func = sp_audio_pipeline if is_use_sp else audio_pipeline @@ -270,7 +264,7 @@ def reference_text_pipeline(translation): ) for dataset in ["valid", "test"]: - json_path = f"{data_folder}/{dataset}.json" + json_path = hparams[f"annotation_{dataset}"] datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( json_path=json_path, replacements={"data_root": data_folder}, @@ -362,16 +356,14 @@ def reference_text_pipeline(translation): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # creates a logger - logger = logging.getLogger(__name__) + logger = get_logger(__name__) - # If distributed_launch=True then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -393,20 +385,24 @@ def reference_text_pipeline(translation): # Data preparation import prepare_iwslt22 - run_on_main( - prepare_iwslt22.data_proc, - kwargs={ - "dataset_folder": hparams["root_data_folder"], - "output_folder": hparams["data_folder"], - }, - ) + if not hparams["skip_prep"]: + run_on_main( + prepare_iwslt22.data_proc, + kwargs={ + "dataset_folder": hparams["root_data_folder"], + "output_folder": hparams["data_folder"], + }, + ) + # Load datasets for training, valid, and test, trains and applies tokenizer datasets, tokenizer = dataio_prepare(hparams) # Before training, we drop some of the wav2vec 2.0 Transformer Encoder layers - st_brain.modules.wav2vec2.model.encoder.layers = st_brain.modules.wav2vec2.model.encoder.layers[ - : hparams["keep_n_layers"] - ] + st_brain.modules.wav2vec2.model.encoder.layers = ( + st_brain.modules.wav2vec2.model.encoder.layers[ + : hparams["keep_n_layers"] + ] + ) # Training st_brain.fit( diff --git a/recipes/IWSLT22_lowresource/AST/transformer/train_samu.py b/recipes/IWSLT22_lowresource/AST/transformer/train_samu.py new file mode 100644 index 0000000000..663b495ff5 --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/train_samu.py @@ -0,0 +1,345 @@ +#!/usr/bin/env python3 +"""Recipe for fine-tuning a wav2vec model for semantically enriching: https://arxiv.org/abs/2205.08180. + +Author + * Ha Nguyen, 2023 +""" + +import sys + +import torch +import torch.nn.functional as F +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ST(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + + batch = batch.to(self.device) + wavs, wav_lens = batch.sig # audio + + # wav2vec module + feats = self.modules.wav2vec2(wavs) + + # self-attention pooling + uttr_embeddings = self.modules.attn_pooling(feats) + + # norm + uttr_embeddings = F.normalize(uttr_embeddings, p=2) + + # LaBSE + text_embeddings = self.modules.LaBSE(batch.trans) + + return uttr_embeddings, text_embeddings + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given predictions and targets.""" + (uttr_embeddings, text_embeddings) = predictions + + B, S = uttr_embeddings.shape + loss = 0.0 + for b in range(B): + cosine_sim = torch.dot( + uttr_embeddings[b].float(), text_embeddings[b].float() + ) + loss += 1.0 - cosine_sim + loss *= self.hparams.loss_scale + return loss + + def init_optimizers(self): + self.adam_optimizer = self.hparams.adam_opt_class( + self.hparams.model.parameters() + ) + + self.optimizers_dict = {"model_optimizer": self.adam_optimizer} + + # Initializes the wav2vec2 optimizer if the model is not wav2vec2_frozen + if not self.hparams.wav2vec2_frozen: + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + # Initializes the labse optimizer if the model is not labse_frozen + if not self.hparams.labse_frozen: + self.labse_optimizer = self.hparams.labse_opt_class( + self.modules.LaBSE.parameters() + ) + self.optimizers_dict["labse_optimizer"] = self.labse_optimizer + + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if not self.hparams.wav2vec2_frozen: + valid_optimizers["wav2vec_optimizer"] = optimizers[ + "wav2vec_optimizer" + ] + if not self.hparams.labse_frozen: + valid_optimizers["labse_optimizer"] = optimizers["labse_optimizer"] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers + + def on_stage_start(self, stage, epoch): + """Gets called when a stage (either training, validation, test) starts.""" + return + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + if stage == sb.Stage.TRAIN: + self.train_stats = stage_loss + + else: # valid or test + stage_stats = {"loss": stage_loss} + current_epoch = self.hparams.epoch_counter.current + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): + current_epoch = self.hparams.epoch_counter.current + old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate( + self.adam_optimizer, new_lr_adam + ) + + stats_meta = { + "epoch": current_epoch, + "lr_adam": old_lr_adam, + } + + if not self.hparams.wav2vec2_frozen: + ( + old_lr_wav2vec, + new_lr_wav2vec, + ) = self.hparams.lr_annealing_wav2vec(stage_stats["loss"]) + sb.nnet.schedulers.update_learning_rate( + self.wav2vec_optimizer, new_lr_wav2vec + ) + stats_meta["lr_wav2vec"] = old_lr_wav2vec + + if not self.hparams.labse_frozen: + (old_lr_labse, new_lr_labse) = self.hparams.lr_annealing_labse( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate( + self.labse_optimizer, new_lr_labse + ) + stats_meta["lr_labse"] = old_lr_labse + + self.hparams.train_logger.log_stats( + stats_meta=stats_meta, + train_stats={"loss": self.train_stats}, + valid_stats=stage_stats, + ) + + # create checkpoint + meta = {"loss": stage_stats["loss"], "epoch": current_epoch} + name = "checkpoint_epoch" + str(current_epoch) + + self.checkpointer.save_and_keep_only( + meta=meta, name=name, num_to_keep=10, min_keys=["loss"] + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + + +# Define custom data procedure +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # Define audio pipeline. In this case, we simply read the path contained + # in the variable wav with the audio reader. + @sb.utils.data_pipeline.takes("path") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load the audio signal. This is done on the CPU in the `collate_fn`.""" + sig = sb.dataio.dataio.read_audio(wav) + return sig + + @sb.utils.data_pipeline.takes("path") + @sb.utils.data_pipeline.provides("sig") + def sp_audio_pipeline(wav): + """Load the audio signal. This is done on the CPU in the `collate_fn`.""" + sig = sb.dataio.dataio.read_audio(wav) + sig = sig.unsqueeze(0) + sig = hparams["speed_perturb"](sig) + sig = sig.squeeze(0) + return sig + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("trans") + @sb.utils.data_pipeline.provides("trans") + def reference_text_pipeline(wrd): + yield wrd + + datasets = {} + data_folder = hparams["data_folder"] + for dataset in ["train", "valid"]: + json_path = hparams[f"{dataset}_set"] + + is_use_sp = dataset == "train" and "speed_perturb" in hparams + audio_pipeline_func = sp_audio_pipeline if is_use_sp else audio_pipeline + + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=json_path, + replacements={"data_root": data_folder}, + dynamic_items=[audio_pipeline_func, reference_text_pipeline], + output_keys=["id", "sig", "duration", "trans"], + ) + + for dataset in ["test"]: + json_path = hparams[f"{dataset}_set"] + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=json_path, + replacements={"data_root": data_folder}, + dynamic_items=[audio_pipeline, reference_text_pipeline], + output_keys=["id", "sig", "duration", "trans"], + ) + + # Sorting training data with ascending order makes the code much + # faster because we minimize zero-padding. In most of the cases, this + # does not harm the performance. + if hparams["sorting"] == "ascending": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 5}, + sort_key="duration", + reverse=True, + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 5}, + sort_key="duration", + reverse=True, + ) + else: + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="duration" + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + sort_key="duration" + ) + + hparams["dataloader_options"]["shuffle"] = False + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "descending": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 5}, + sort_key="duration", + reverse=True, + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 5}, + sort_key="duration", + reverse=True, + ) + else: + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="duration", reverse=True + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + sort_key="duration", reverse=True + ) + + hparams["dataloader_options"]["shuffle"] = False + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "random": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 3}, + key_max_value={"duration": 5}, + sort_key="duration", + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 5}, + ) + + hparams["dataloader_options"]["shuffle"] = True + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + return datasets + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Create main experiment class + st_brain = ST( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Data preparation + import prepare_iwslt22 + + if not hparams["skip_prep"]: + run_on_main( + prepare_iwslt22.data_proc, + kwargs={ + "dataset_folder": hparams["root_data_folder"], + "output_folder": hparams["data_folder"], + }, + ) + + # We can now directly create the datasets for training, valid, and test + datasets = dataio_prepare(hparams) + + # Training + st_brain.fit( + st_brain.hparams.epoch_counter, + datasets["train"], + datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["test_dataloader_options"], + ) + + # Test + for dataset in ["valid", "test"]: + st_brain.hparams.wer_file = ( + hparams["output_folder"] + "/wer_test" + ".txt" + ) + st_brain.evaluate( + datasets[dataset], + test_loader_kwargs=hparams["test_dataloader_options"], + ) diff --git a/recipes/IWSLT22_lowresource/AST/transformer/train_with_samu_mbart.py b/recipes/IWSLT22_lowresource/AST/transformer/train_with_samu_mbart.py new file mode 100644 index 0000000000..084399e055 --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/train_with_samu_mbart.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python3 +"""Recipe for fine-tuning a samu model and mBART/NLLB model for the ST task (no transcriptions). + +Author + * Ha Nguyen, 2023 +""" + +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml +from sacremoses import MosesDetokenizer +from torch.nn.parallel import DistributedDataParallel + +import speechbrain as sb +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ST(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + + batch = batch.to(self.device) + wavs, wav_lens = batch.sig # audio + tokens_bos, _ = batch.tokens_bos # translation + + src = self.modules.wav2vec2(wavs, wav_lens) + + # dimensionality reduction + src = self.modules.enc(src) + + dec_out = self.modules.mBART( + src, tokens_bos, pad_idx=self.hparams.pad_index + ) + + # logits and softmax + p_seq = self.hparams.log_softmax(dec_out) + if hparams["mbart_frozen"] and not p_seq.requires_grad: + p_seq.requires_grad = True + + # compute outputs + hyps = None + if stage == sb.Stage.VALID and self.optimizer_step >= 1000: + # the output of the encoder (enc) is used for valid search + current_epoch = self.hparams.epoch_counter.current + if current_epoch % self.hparams.valid_search_interval == 0: + if isinstance(self.modules.mBART, DistributedDataParallel): + self.modules.mBART = self.modules.mBART.module + hyps, _, _, _ = self.hparams.valid_search( + src.detach(), wav_lens + ) + + elif stage == sb.Stage.TEST: + if isinstance(self.modules.mBART, DistributedDataParallel): + self.modules.mBART = self.modules.mBART.module + hyps, _, _, _ = self.hparams.valid_search(src.detach(), wav_lens) + + return p_seq, wav_lens, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given predictions and targets.""" + (p_seq, wav_lens, hyps) = predictions + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + + # st loss + tokens_eos = self.modules.mBART.custom_padding( + tokens_eos, + 0, + self.modules.mBART.model.model.decoder.config.pad_token_id, + ) + loss = self.hparams.seq_cost(p_seq, tokens_eos, length=tokens_eos_lens) + + fr_detokenizer = MosesDetokenizer(lang=self.hparams.lang) + + if stage != sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + and self.optimizer_step >= 1000 + or (stage == sb.Stage.TEST) + ): + detokenized_translation = [ + fr_detokenizer.detokenize(translation.split(" ")) + for translation in batch.trans + ] + # it needs to be a list of list due to the extend on the bleu implementation + targets = [detokenized_translation] + + predictions = [ + fr_detokenizer.detokenize(hyp.split(" ")) + for hyp in self.modules.mBART.tokenizer.batch_decode( + hyps, skip_special_tokens=True + ) + ] + + self.bleu_metric.append(ids, predictions, targets) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + + return loss + + def init_optimizers(self): + self.adam_optimizer = self.hparams.adam_opt_class( + self.hparams.model.parameters() + ) + + self.optimizers_dict = {"model_optimizer": self.adam_optimizer} + + # Initializes the wav2vec2 optimizer if the model is not wav2vec2_frozen + if not self.hparams.wav2vec2_frozen: + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + # Initializes the mbart optimizer if the model is not mbart_frozen + if not self.hparams.mbart_frozen: + self.mbart_optimizer = self.hparams.mbart_opt_class( + self.modules.mBART.parameters() + ) + self.optimizers_dict["mbart_optimizer"] = self.mbart_optimizer + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + if not self.hparams.wav2vec2_frozen: + self.hparams.lr_annealing_wav2vec( + self.wav2vec_optimizer, self.optimizer_step + ) + if not self.hparams.mbart_frozen: + self.hparams.lr_annealing_mbart( + self.mbart_optimizer, self.optimizer_step + ) + + def on_stage_start(self, stage, epoch): + """Gets called when a stage (either training, validation, test) starts.""" + self.bleu_metric = self.hparams.bleu_computer() + + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.bleu_metric = self.hparams.bleu_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + if stage == sb.Stage.TRAIN: + self.train_stats = stage_loss + + else: # valid or test + stage_stats = {"loss": stage_loss} + stage_stats["ACC"] = self.acc_metric.summarize() + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + and self.optimizer_step >= 1000 + or stage == sb.Stage.TEST + ): + stage_stats["BLEU"] = self.bleu_metric.summarize(field="BLEU") + stage_stats["BLEU_extensive"] = self.bleu_metric.summarize() + self.anneal_bleu = stage_stats["BLEU"] + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): + current_epoch = self.hparams.epoch_counter.current + old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam( + self.anneal_bleu # stage_stats["BLEU"] + ) + sb.nnet.schedulers.update_learning_rate( + self.adam_optimizer, new_lr_adam + ) + + stats_meta = { + "epoch": current_epoch, + "steps": self.optimizer_step, + "lr_adam": old_lr_adam, + } + + if not self.hparams.wav2vec2_frozen: + self.hparams.lr_annealing_wav2vec( + self.wav2vec_optimizer, self.optimizer_step + ) + stats_meta["lr_wav2vec"] = self.wav2vec_optimizer.param_groups[ + 0 + ]["lr"] + if not self.hparams.mbart_frozen: + self.hparams.lr_annealing_mbart( + self.mbart_optimizer, self.optimizer_step + ) + stats_meta["lr_mbart"] = self.mbart_optimizer.param_groups[0][ + "lr" + ] + self.hparams.train_logger.log_stats( + stats_meta=stats_meta, + train_stats={"loss": self.train_stats}, + valid_stats=stage_stats, + ) + + # create checkpoint + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + and self.optimizer_step >= 1000 + ): + meta = {"BLEU": stage_stats["BLEU"], "epoch": current_epoch} + name = "checkpoint_epoch" + str(current_epoch) + + self.checkpointer.save_and_keep_only( + meta=meta, name=name, num_to_keep=10, max_keys=["BLEU"] + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + + with open(self.hparams.bleu_file, "w", encoding="utf-8") as w: + self.bleu_metric.write_stats(w) + + +# Define custom data procedure +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # Define audio pipeline. In this case, we simply read the path contained + # in the variable wav with the audio reader. + @sb.utils.data_pipeline.takes("path") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load the audio signal. This is done on the CPU in the `collate_fn`.""" + sig = sb.dataio.dataio.read_audio(wav) + return sig + + @sb.utils.data_pipeline.takes("path") + @sb.utils.data_pipeline.provides("sig") + def sp_audio_pipeline(wav): + """Load the audio signal. This is done on the CPU in the `collate_fn`.""" + sig = sb.dataio.dataio.read_audio(wav) + sig = sig.unsqueeze(0) + sig = hparams["speed_perturb"](sig) + sig = sig.squeeze(0) + return sig + + # Define text processing pipeline. We start from the raw text and then + # encode it using the tokenizer. The tokens with BOS are used for feeding + # decoder during training, the tokens with EOS for computing the cost function. + @sb.utils.data_pipeline.takes("trans") + @sb.utils.data_pipeline.provides( + "trans", "tokens_list", "tokens_bos", "tokens_eos" + ) + def reference_text_pipeline(translation): + """Processes the transcriptions to generate proper labels""" + yield translation + labels = tokenizer( + text_target=translation.replace("\n", ""), return_tensors="pt" + ) + tokens_list = labels["input_ids"].tolist()[-1] + yield tokens_list + tokens_bos = torch.LongTensor(tokens_list[0:-1]) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list[1:]) + yield tokens_eos + + datasets = {} + data_folder = hparams["data_folder"] + for dataset in ["train", "valid"]: + json_path = hparams[f"annotation_{dataset}"] + + is_use_sp = dataset == "train" and "speed_perturb" in hparams + audio_pipeline_func = sp_audio_pipeline if is_use_sp else audio_pipeline + + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=json_path, + replacements={"data_root": data_folder}, + dynamic_items=[audio_pipeline_func, reference_text_pipeline], + output_keys=[ + "id", + "sig", + "duration", + "trans", + "tokens_list", + "tokens_bos", + "tokens_eos", + ], + ) + + for dataset in ["test"]: + json_path = hparams[f"annotation_{dataset}"] + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=json_path, + replacements={"data_root": data_folder}, + dynamic_items=[audio_pipeline, reference_text_pipeline], + output_keys=[ + "id", + "sig", + "duration", + "trans", + "tokens_list", + "tokens_bos", + "tokens_eos", + ], + ) + + # Sorting training data with ascending order makes the code much + # faster because we minimize zero-padding. In most of the cases, this + # does not harm the performance. + if hparams["sorting"] == "ascending": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + reverse=True, + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + reverse=True, + ) + else: + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="duration" + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + sort_key="duration" + ) + + hparams["dataloader_options"]["shuffle"] = False + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "descending": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + reverse=True, + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + reverse=True, + ) + else: + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="duration", reverse=True + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + sort_key="duration", reverse=True + ) + + hparams["dataloader_options"]["shuffle"] = False + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "random": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + ) + + hparams["dataloader_options"]["shuffle"] = True + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + return datasets + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # Create main experiment class + st_brain = ST( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + st_brain.anneal_bleu = 0 + + # Data preparation + import prepare_iwslt22 + + if not hparams["skip_prep"]: + run_on_main( + prepare_iwslt22.data_proc, + kwargs={ + "dataset_folder": hparams["root_data_folder"], + "output_folder": hparams["data_folder"], + }, + ) + + # We can now directly create the datasets for training, valid, and test + datasets = dataio_prepare(hparams, st_brain.modules.mBART.tokenizer) + + # Training + st_brain.fit( + st_brain.hparams.epoch_counter, + datasets["train"], + datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["test_dataloader_options"], + ) + + # Test + for dataset in ["valid", "test"]: + st_brain.hparams.wer_file = ( + hparams["output_folder"] + "/wer_test" + ".txt" + ) + st_brain.evaluate( + datasets[dataset], + test_loader_kwargs=hparams["test_dataloader_options"], + ) diff --git a/recipes/IWSLT22_lowresource/AST/transformer/train_with_w2v_mbart.py b/recipes/IWSLT22_lowresource/AST/transformer/train_with_w2v_mbart.py new file mode 100644 index 0000000000..c09a97e0f3 --- /dev/null +++ b/recipes/IWSLT22_lowresource/AST/transformer/train_with_w2v_mbart.py @@ -0,0 +1,447 @@ +#!/usr/bin/env python3 +"""Recipe for fine-tuning a wav2vec model and mBART/NLLB model for the ST task (no transcriptions). + +Author + * Ha Nguyen, 2023 +""" + +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml +from sacremoses import MosesDetokenizer +from torch.nn.parallel import DistributedDataParallel + +import speechbrain as sb +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ST(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + + batch = batch.to(self.device) + wavs, wav_lens = batch.sig # audio + tokens_bos, _ = batch.tokens_bos # translation + + src = self.modules.wav2vec2(wavs, wav_lens) + + # dimensionality reduction + src = self.modules.enc(src) + + dec_out = self.modules.mBART( + src, tokens_bos, pad_idx=self.hparams.pad_index + ) + + # logits and softmax + p_seq = self.hparams.log_softmax(dec_out) + if hparams["mbart_frozen"] and not p_seq.requires_grad: + p_seq.requires_grad = True + + # compute outputs + hyps = None + if stage == sb.Stage.VALID and self.optimizer_step >= 1000: + # the output of the encoder (enc) is used for valid search + current_epoch = self.hparams.epoch_counter.current + if current_epoch % self.hparams.valid_search_interval == 0: + if isinstance(self.modules.mBART, DistributedDataParallel): + self.modules.mBART = self.modules.mBART.module + hyps, _, _, _ = self.hparams.valid_search( + src.detach(), wav_lens + ) + + elif stage == sb.Stage.TEST: + if isinstance(self.modules.mBART, DistributedDataParallel): + self.modules.mBART = self.modules.mBART.module + hyps, _, _, _ = self.hparams.valid_search(src.detach(), wav_lens) + + return p_seq, wav_lens, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given predictions and targets.""" + (p_seq, wav_lens, hyps) = predictions + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + + # st loss + tokens_eos = self.modules.mBART.custom_padding( + tokens_eos, + 0, + self.modules.mBART.model.model.decoder.config.pad_token_id, + ) + loss = self.hparams.seq_cost(p_seq, tokens_eos, length=tokens_eos_lens) + + fr_detokenizer = MosesDetokenizer(lang=self.hparams.lang) + + if stage != sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + and self.optimizer_step >= 1000 + or (stage == sb.Stage.TEST) + ): + detokenized_translation = [ + fr_detokenizer.detokenize(translation.split(" ")) + for translation in batch.trans + ] + # it needs to be a list of list due to the extend on the bleu implementation + targets = [detokenized_translation] + + predictions = [ + fr_detokenizer.detokenize(hyp.split(" ")) + for hyp in self.modules.mBART.tokenizer.batch_decode( + hyps, skip_special_tokens=True + ) + ] + + self.bleu_metric.append(ids, predictions, targets) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + + return loss + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + if not self.hparams.wav2vec2_frozen: + self.hparams.lr_annealing_wav2vec( + self.wav2vec_optimizer, self.optimizer_step + ) + if not self.hparams.mbart_frozen: + self.hparams.lr_annealing_mbart( + self.mbart_optimizer, self.optimizer_step + ) + + def init_optimizers(self): + self.adam_optimizer = self.hparams.adam_opt_class( + self.hparams.model.parameters() + ) + + self.optimizers_dict = {"model_optimizer": self.adam_optimizer} + + # Initializes the wav2vec2 optimizer if the model is not wav2vec2_frozen + if not self.hparams.wav2vec2_frozen: + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + # Initializes the mbart optimizer if the model is not mbart_frozen + if not self.hparams.mbart_frozen: + self.mbart_optimizer = self.hparams.mbart_opt_class( + self.modules.mBART.parameters() + ) + self.optimizers_dict["mbart_optimizer"] = self.mbart_optimizer + + def on_stage_start(self, stage, epoch): + """Gets called when a stage (either training, validation, test) starts.""" + self.bleu_metric = self.hparams.bleu_computer() + + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.bleu_metric = self.hparams.bleu_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + if stage == sb.Stage.TRAIN: + self.train_stats = stage_loss + + else: # valid or test + stage_stats = {"loss": stage_loss} + stage_stats["ACC"] = self.acc_metric.summarize() + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + and self.optimizer_step >= 1000 + or stage == sb.Stage.TEST + ): + stage_stats["BLEU"] = self.bleu_metric.summarize(field="BLEU") + stage_stats["BLEU_extensive"] = self.bleu_metric.summarize() + self.anneal_bleu = stage_stats["BLEU"] + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): + current_epoch = self.hparams.epoch_counter.current + old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam( + self.anneal_bleu # stage_stats["BLEU"] + ) + sb.nnet.schedulers.update_learning_rate( + self.adam_optimizer, new_lr_adam + ) + + stats_meta = { + "epoch": current_epoch, + "steps": self.optimizer_step, + "lr_adam": old_lr_adam, + } + + if not self.hparams.wav2vec2_frozen: + self.hparams.lr_annealing_wav2vec( + self.wav2vec_optimizer, self.optimizer_step + ) + stats_meta["lr_wav2vec"] = self.wav2vec_optimizer.param_groups[ + 0 + ]["lr"] + if not self.hparams.mbart_frozen: + self.hparams.lr_annealing_mbart( + self.mbart_optimizer, self.optimizer_step + ) + stats_meta["lr_mbart"] = self.mbart_optimizer.param_groups[0][ + "lr" + ] + self.hparams.train_logger.log_stats( + stats_meta=stats_meta, + train_stats={"loss": self.train_stats}, + valid_stats=stage_stats, + ) + + # create checkpoint + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + and self.optimizer_step >= 1000 + ): + meta = {"BLEU": stage_stats["BLEU"], "epoch": current_epoch} + name = "checkpoint_epoch" + str(current_epoch) + + self.checkpointer.save_and_keep_only( + meta=meta, name=name, num_to_keep=10, max_keys=["BLEU"] + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + + with open(self.hparams.bleu_file, "w", encoding="utf-8") as w: + self.bleu_metric.write_stats(w) + + +# Define custom data procedure +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # Define audio pipeline. In this case, we simply read the path contained + # in the variable wav with the audio reader. + @sb.utils.data_pipeline.takes("path") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load the audio signal. This is done on the CPU in the `collate_fn`.""" + sig = sb.dataio.dataio.read_audio(wav) + return sig + + @sb.utils.data_pipeline.takes("path") + @sb.utils.data_pipeline.provides("sig") + def sp_audio_pipeline(wav): + """Load the audio signal. This is done on the CPU in the `collate_fn`.""" + sig = sb.dataio.dataio.read_audio(wav) + sig = sig.unsqueeze(0) + sig = hparams["speed_perturb"](sig) + sig = sig.squeeze(0) + return sig + + # Define text processing pipeline. We start from the raw text and then + # encode it using the tokenizer. The tokens with BOS are used for feeding + # decoder during training, the tokens with EOS for computing the cost function. + @sb.utils.data_pipeline.takes("trans") + @sb.utils.data_pipeline.provides( + "trans", "tokens_list", "tokens_bos", "tokens_eos" + ) + def reference_text_pipeline(translation): + """Processes the transcriptions to generate proper labels""" + yield translation + labels = tokenizer( + text_target=translation.replace("\n", ""), return_tensors="pt" + ) + tokens_list = labels["input_ids"].tolist()[-1] + yield tokens_list + tokens_bos = torch.LongTensor(tokens_list[0:-1]) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list[1:]) + yield tokens_eos + + datasets = {} + data_folder = hparams["data_folder"] + for dataset in ["train", "valid"]: + json_path = hparams[f"annotation_{dataset}"] + + is_use_sp = dataset == "train" and "speed_perturb" in hparams + audio_pipeline_func = sp_audio_pipeline if is_use_sp else audio_pipeline + + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=json_path, + replacements={"data_root": data_folder}, + dynamic_items=[audio_pipeline_func, reference_text_pipeline], + output_keys=[ + "id", + "sig", + "duration", + "trans", + "tokens_list", + "tokens_bos", + "tokens_eos", + ], + ) + + for dataset in ["test"]: + json_path = hparams[f"annotation_{dataset}"] + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=json_path, + replacements={"data_root": data_folder}, + dynamic_items=[audio_pipeline, reference_text_pipeline], + output_keys=[ + "id", + "sig", + "duration", + "trans", + "tokens_list", + "tokens_bos", + "tokens_eos", + ], + ) + + # Sorting training data with ascending order makes the code much + # faster because we minimize zero-padding. In most of the cases, this + # does not harm the performance. + if hparams["sorting"] == "ascending": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + reverse=True, + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + reverse=True, + ) + else: + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="duration" + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + sort_key="duration" + ) + + hparams["dataloader_options"]["shuffle"] = False + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "descending": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + reverse=True, + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + reverse=True, + ) + else: + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="duration", reverse=True + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + sort_key="duration", reverse=True + ) + + hparams["dataloader_options"]["shuffle"] = False + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "random": + # use smaller dataset to debug the model + if hparams["debug"]: + datasets["train"] = datasets["train"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + sort_key="duration", + ) + datasets["valid"] = datasets["valid"].filtered_sorted( + key_min_value={"duration": 1}, + key_max_value={"duration": 3}, + ) + + hparams["dataloader_options"]["shuffle"] = True + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + return datasets + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Create main experiment class + st_brain = ST( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + st_brain.anneal_bleu = 0 + + # Data preparation + import prepare_iwslt22 + + if not hparams["skip_prep"]: + run_on_main( + prepare_iwslt22.data_proc, + kwargs={ + "dataset_folder": hparams["root_data_folder"], + "output_folder": hparams["data_folder"], + }, + ) + + # We can now directly create the datasets for training, valid, and test + datasets = dataio_prepare(hparams, st_brain.modules.mBART.tokenizer) + + # Training + st_brain.fit( + st_brain.hparams.epoch_counter, + datasets["train"], + datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["test_dataloader_options"], + ) + + # Test + for dataset in ["valid", "test"]: + st_brain.hparams.wer_file = ( + hparams["output_folder"] + "/wer_test" + ".txt" + ) + st_brain.evaluate( + datasets[dataset], + test_loader_kwargs=hparams["test_dataloader_options"], + ) diff --git a/recipes/IWSLT22_lowresource/README.md b/recipes/IWSLT22_lowresource/README.md deleted file mode 100644 index 44fe511a9e..0000000000 --- a/recipes/IWSLT22_lowresource/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# IWSLT 2022 Low-resource Task: Tamasheq-French end-to-end Speech Translation - - -## Description - -This is the recipe for the best system from the IWSLT 2022 low-resource task, as described in the original paper. -The speech translation model comprises a wav2vec 2.0 encoder and a Transformer decoder. It is trained end-to-end without any auxiliary loss. The recipe allows for removing the last layers of the Transformer Encoder inside the wav2vec 2.0 in order to reduce the number of training parameters. - -## Data Downloading - -For downloading the dataset used for this experiment, please run the following command. - -``` -git clone https://github.com/mzboito/IWSLT2022_Tamasheq_data.git -``` - -## Training - -For training the model, please update the variables at hparams/train_w2v2_st.yaml. - -Note that in order to drop the last layers of the wav2vec 2.0 module, it is necessary to update the parameter "keep_n_layers". -For instance: Using ``keep_n_layers: 10'' means that only the first 10 layers inside the wav2vec 2.0 Transformer encoder will be used for training. The remaining layers are removed. - -For launching training: -``` -python train.py hparams/train_w2v2_st.yaml - -``` - -## Citation -``` -@inproceedings{boito-etal-2022-trac, - title = "{ON}-{TRAC} Consortium Systems for the {IWSLT} 2022 Dialect and Low-resource Speech Translation Tasks", - author = {Boito, Marcely Zanon and - Ortega, John and - Riguidel, Hugo and - Laurent, Antoine and - Barrault, Lo{\"\i}c and - Bougares, Fethi and - Chaabani, Firas and - Nguyen, Ha and - Barbier, Florentin and - Gahbiche, Souhir and - Est{\`e}ve, Yannick}, - booktitle = "Proceedings of the 19th International Conference on Spoken Language Translation (IWSLT 2022)", - month = may, - year = "2022", - address = "Dublin, Ireland (in-person and online)", - publisher = "Association for Computational Linguistics", - url = "https://aclanthology.org/2022.iwslt-1.28", - doi = "10.18653/v1/2022.iwslt-1.28", - pages = "308--318" -} -``` - - diff --git a/recipes/IWSLT22_lowresource/prepare_iwslt22.py b/recipes/IWSLT22_lowresource/prepare_iwslt22.py index e06e0a43d3..0ca6626f9e 100644 --- a/recipes/IWSLT22_lowresource/prepare_iwslt22.py +++ b/recipes/IWSLT22_lowresource/prepare_iwslt22.py @@ -45,7 +45,7 @@ def generate_json(folder_path, split): def read_file(f_path): - return [line for line in open(f_path)] + return [line for line in open(f_path, encoding="utf-8")] def data_proc(dataset_folder, output_folder): diff --git a/recipes/KsponSpeech/ASR/transformer/README.md b/recipes/KsponSpeech/ASR/transformer/README.md deleted file mode 100644 index 9d52a0da0e..0000000000 --- a/recipes/KsponSpeech/ASR/transformer/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# KsponSpeech ASR with Transformers - -This folder contains the scripts to train a Transformer-based speech recognizer using KsponSpeech. - -You can download KsponSpeech at https://aihub.or.kr/aidata/105/download - -# How to run -Before start training, set pretrained lm and tokenizer path in the YAML file to the proper path (i.e. the directory where trained tokenizer and language model exist) - -This is set to huggingface repository as a default. Pretrained models will be downloaded from the repository. - -```YAML -pretrained_lm_tokenizer_path: /path/to/pretrained/models -``` - -Also, data_foler in the YAML file should point to the results of ksponspeech_prepare.py -```YAML -data_folder: /path/to/data/prep/results -``` -Run the following to start training -```bash -python train.py hparams/conformer_medium.yaml -``` - -# Results -| Release | hyperparams file | eval clean WER | eval other WER | eval clean CER | eval other CER | HuggingFace link | Model link | GPUs | Training time | -| :------: | :-------------------: | :------------: | :------------: | :------------: | :------------: | :----------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------: | :---------: | :-------------: | -| 09-05-21 | conformer_medium.yaml | 21.00% | 25.69% | 7.48% | 8.38% | [HuggingFace](https://huggingface.co/speechbrain/asr-conformer-transformerlm-ksponspeech) | [GoogleDrive](https://drive.google.com/drive/folders/1iPzuhaKIUeKtOunkBkhc_sGlk47Awe80?usp=sharing) | 6xA100 80GB | 2 days 13 hours | - -# PreTrained Model + Easy-Inference -You can find the pre-trained model with an easy-inference function on HuggingFace: [HuggingFace](https://huggingface.co/speechbrain/asr-conformer-transformerlm-ksponspeech) - -# About SpeechBrain -- Website: https://speechbrain.github.io/ -- Code: https://github.com/speechbrain/speechbrain/ -- HuggingFace: https://huggingface.co/speechbrain/ - - -# Citing SpeechBrain -Please, cite SpeechBrain if you use it for your research or business. - -```bibtex -@misc{speechbrain, - title={{SpeechBrain}: A General-Purpose Speech Toolkit}, - author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, - year={2021}, - eprint={2106.04624}, - archivePrefix={arXiv}, - primaryClass={eess.AS}, - note={arXiv:2106.04624} -} -``` diff --git a/recipes/KsponSpeech/ASR/transformer/ksponspeech_prepare.py b/recipes/KsponSpeech/ASR/transformer/ksponspeech_prepare.py deleted file mode 120000 index 2d3d8dadde..0000000000 --- a/recipes/KsponSpeech/ASR/transformer/ksponspeech_prepare.py +++ /dev/null @@ -1 +0,0 @@ -../../ksponspeech_prepare.py \ No newline at end of file diff --git a/recipes/KsponSpeech/LM/README.md b/recipes/KsponSpeech/LM/README.md deleted file mode 100644 index f44a5b5f3f..0000000000 --- a/recipes/KsponSpeech/LM/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Language Model with KsponSpeech - -This folder contains recipes for training language models for the KsponSpeech Dataset. It supports a Transformer-based LM. - -You can download KsponSpeech at https://aihub.or.kr/aidata/105/download - -# How to run -Set tokenizer_file in the yaml files to the directory where the trained tokenizer is located. This is set to huggingface repository as a default. - -Also, set data_folder in the yaml file to the result of ksponspeech_prepare.py. - -Run the following to start training the language model. - -```bash -python train.py hparams/transformer.yaml -``` -# Results - -| Release | hyperparams file | eval clean loss | eval other loss | Model link | GPUs |Training time| -|:----:|:----:|:----:|:----:|:----:|:----:|:----:| -|09-05-21|transformer.yaml|4.41|4.68|[GoogleDrive](https://drive.google.com/drive/folders/1NmpE7aThLogxVhPrpWqFcw1dzd-fqzVA?usp=sharing)|1xA100 80GB|5 hours 35 mins| - -# About SpeechBrain -- Website: https://speechbrain.github.io/ -- Code: https://github.com/speechbrain/speechbrain/ -- HuggingFace: https://huggingface.co/speechbrain/ - - -# Citing SpeechBrain -Please, cite SpeechBrain if you use it for your research or business. - -```bibtex -@misc{speechbrain, - title={{SpeechBrain}: A General-Purpose Speech Toolkit}, - author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, - year={2021}, - eprint={2106.04624}, - archivePrefix={arXiv}, - primaryClass={eess.AS}, - note={arXiv:2106.04624} -} -``` diff --git a/recipes/KsponSpeech/LM/ksponspeech_prepare.py b/recipes/KsponSpeech/LM/ksponspeech_prepare.py deleted file mode 120000 index e73769e720..0000000000 --- a/recipes/KsponSpeech/LM/ksponspeech_prepare.py +++ /dev/null @@ -1 +0,0 @@ -../ksponspeech_prepare.py \ No newline at end of file diff --git a/recipes/KsponSpeech/README.md b/recipes/KsponSpeech/README.md deleted file mode 100644 index 59d54d8276..0000000000 --- a/recipes/KsponSpeech/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# KsponSpeech - -## About the corpus - -KsponSpeech is a large-scale spontaneous speech corpus of Korean. This corpus contains 965.2 hours of general -open-domain dialog utterances with transcriptions. - -More information can be found -at https://www.mdpi.com/2076-3417/10/19/6936 - -Training and evaluation transcripts were generated the same way as in the above paper. - -## Prepare data - -You can download KsponSpeech at https://aihub.or.kr/aidata/105/download - -Run following script to unzip the downloaded file - -```bash -./unzip_ksponspeech.sh "PATH_TO_DOWNLOADED/한국어 음성" PATH_TO_UNZIPPED -``` - -pcm files need to be converted to wav for training. Run following script for the conversion. - -```bash -python conert_to_wav.py --dirpath PATH_TO_UNZIPPED --nj num_processes -``` - -## How to run an ASR experiment with KsponSpeech - -To train a full speech recognition system the pipeline is the following: - -1. **Train a tokenizer.** The tokenizer takes in input the training transcripts and determines the subword units that - will be used for both acoustic and language model training. **Training a tokenizer before the language and acoustic - model is necessary**. Indeed, both of them will reuse this tokenizer to map the output tokens. -2. **Train a Language Model (LM).** The language model takes in input long texts from available books. We have recipes - with RNN and transformer-based LMs. In both cases, the LM is used during beam search to assign different weights to - different hypotheses generated by the acoustic model. -3. **Train an acoustic model (AM).** The acoustic model maps the input speech into a set of sub-words units. The current - repository contains recipes for transformer-based systems. - -**The results obtained with the different models can be found in the corresponding sub-directories!** - -## How to simply use pretrained models to transcribe my audio file? - -SpeechBrain provides a simple interface to transcribe audio files with pretrained models. All the necessary information -can be found on the HuggingFace repositories corresponding to our models for KsponSpeech: - -- [Conformer + ctc + TransformerLM](https://huggingface.co/speechbrain/asr-conformer-transformerlm-ksponspeech) -- [Colab example](https://colab.research.google.com/drive/10N98aGoeLGfh6Hu6xOCH5BbjVTVYgCyB?usp=sharing) - -# Citing SpeechBrain -Please, cite SpeechBrain if you use it for your research or business. - -```bibtex -@misc{speechbrain, - title={{SpeechBrain}: A General-Purpose Speech Toolkit}, - author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, - year={2021}, - eprint={2106.04624}, - archivePrefix={arXiv}, - primaryClass={eess.AS}, - note={arXiv:2106.04624} -} -``` - -# Citing this recipe -```bibtex -@misc{returnzero, - title = {ReturnZero Conformer Korean ASR model}, - author = {Dongwon Kim and Dongwoo Kim and Roh Jeongkyu}, - year = {2021}, - howpublished = {\url{https://huggingface.co/ddwkim/asr-conformer-transformerlm-ksponspeech}}, -} -``` - -# Citing KsponSpeech dataset -```bibtex -@Article{app10196936, -AUTHOR = {Bang, Jeong-Uk and Yun, Seung and Kim, Seung-Hi and Choi, Mu-Yeol and Lee, Min-Kyu and Kim, Yeo-Jeong and Kim, Dong-Hyun and Park, Jun and Lee, Young-Jik and Kim, Sang-Hun}, -TITLE = {KsponSpeech: Korean Spontaneous Speech Corpus for Automatic Speech Recognition}, -JOURNAL = {Applied Sciences}, -VOLUME = {10}, -YEAR = {2020}, -NUMBER = {19}, -ARTICLE-NUMBER = {6936}, -URL = {https://www.mdpi.com/2076-3417/10/19/6936}, -ISSN = {2076-3417}, -DOI = {10.3390/app10196936} -} -``` diff --git a/recipes/KsponSpeech/Tokenizer/README.md b/recipes/KsponSpeech/Tokenizer/README.md deleted file mode 100644 index 5e376d5fd0..0000000000 --- a/recipes/KsponSpeech/Tokenizer/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Tokenizer -This folder contains the scripts to train a tokenizer using SentencePiece (https://github.com/google/sentencepiece). The tokenizer is trained on the top of the KsponSpeech training transcriptions. - -# How to run -``` -python train.py train/5K_unigram_subword_bpe.yaml -``` -# Model link -- 5K unigram model: [HuggingFace](https://huggingface.co/ddwkim/asr-conformer-transformerlm-ksponspeech/blob/main/tokenizer.ckpt) - -The output folder with the logs and the tokenizers is available [here](https://drive.google.com/drive/folders/1zNGKDvHlLjQdUPrqP66vpD5RN9IIX6RC?usp=sharing). - -# About SpeechBrain -- Website: https://speechbrain.github.io/ -- Code: https://github.com/speechbrain/speechbrain/ -- HuggingFace: https://huggingface.co/speechbrain/ - - -# Citing SpeechBrain -Please, cite SpeechBrain if you use it for your research or business. - -```bibtex -@misc{speechbrain, - title={{SpeechBrain}: A General-Purpose Speech Toolkit}, - author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, - year={2021}, - eprint={2106.04624}, - archivePrefix={arXiv}, - primaryClass={eess.AS}, - note={arXiv:2106.04624} -} -``` diff --git a/recipes/KsponSpeech/Tokenizer/ksponspeech_prepare.py b/recipes/KsponSpeech/Tokenizer/ksponspeech_prepare.py deleted file mode 120000 index e73769e720..0000000000 --- a/recipes/KsponSpeech/Tokenizer/ksponspeech_prepare.py +++ /dev/null @@ -1 +0,0 @@ -../ksponspeech_prepare.py \ No newline at end of file diff --git a/recipes/KsponSpeech/convert_to_wav.py b/recipes/KsponSpeech/convert_to_wav.py deleted file mode 100644 index 256204abb4..0000000000 --- a/recipes/KsponSpeech/convert_to_wav.py +++ /dev/null @@ -1,48 +0,0 @@ -import argparse -import multiprocessing as mp -import wave -from pathlib import Path - -from tqdm import tqdm - - -def convert_to_wav(filepath): - """ - This function converts pcm files to wav files - - Arguments - --------- - filepath : str - path to the pcm file - - Returns - ------- - None - """ - - with open(filepath, "rb") as r: - data = r.read() - with wave.open(str(filepath.with_suffix(".wav")), "wb") as w: - w.setparams((1, 2, 16000, 0, "NONE", "NONE")) - w.writeframes(data) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--dirpath", type=str) - parser.add_argument("--nj", type=int, default=32) - args = parser.parse_args() - - file_list = list(Path(args.dirpath).glob("**/*.pcm")) - - pool = mp.Pool(processes=args.nj) - with tqdm(total=len(file_list)) as pbar: - for _ in tqdm(pool.imap_unordered(convert_to_wav, file_list)): - pbar.update() - - pool.close() - pool.join() - - -if __name__ == "__main__": - main() diff --git a/recipes/KsponSpeech/ksponspeech_prepare.py b/recipes/KsponSpeech/ksponspeech_prepare.py deleted file mode 100644 index 882bf76894..0000000000 --- a/recipes/KsponSpeech/ksponspeech_prepare.py +++ /dev/null @@ -1,426 +0,0 @@ -""" -Data preparation. - -Download: https://aihub.or.kr/aidata/105/download - -Author ------- -Dongwon Kim, Dongwoo Kim 2021 -""" -import csv -import logging -import os -import re - -import torchaudio - -from speechbrain.dataio.dataio import load_pkl, merge_csvs, save_pkl -from speechbrain.utils.data_utils import get_all_files - -logger = logging.getLogger(__name__) -OPT_FILE = "opt_ksponspeech_prepare.pkl" -SAMPLERATE = 16000 - - -def prepare_ksponspeech( - data_folder, - save_folder, - tr_splits=[], - dev_splits=[], - te_splits=[], - select_n_sentences=None, - merge_lst=[], - merge_name=None, - skip_prep=False, -): - """ - This class prepares the csv files for the KsponSpeech dataset. - Download link: https://aihub.or.kr/aidata/105/download - - Arguments - --------- - data_folder : str - Path to the folder where the original KsponSpeech dataset is stored. - tr_splits : list - List of train splits to prepare from ['train', 'dev', 'eval_clean', - 'eval_other']. - dev_splits : list - List of dev splits to prepare from ['dev']. - te_splits : list - List of test splits to prepare from ['eval_clean','eval_other']. - save_folder : str - The directory where to store the csv files. - select_n_sentences : int - Default : None - If not None, only pick this many sentences. - merge_lst : list - List of KsponSpeech splits (e.g, eval_clean, eval_other) to - merge in a singe csv file. - merge_name: str - Name of the merged csv file. - skip_prep: bool - If True, data preparation is skipped. - - - Example - ------- - >>> data_folder = 'datasets/KsponSpeech' - >>> tr_splits = ['train'] - >>> dev_splits = ['dev'] - >>> te_splits = ['eval_clean'] - >>> save_folder = 'KsponSpeech_prepared' - >>> prepare_ksponspeech(data_folder, save_folder, tr_splits, dev_splits, \ - te_splits) - """ - - if skip_prep: - return - data_folder = data_folder - splits = tr_splits + dev_splits + te_splits - save_folder = save_folder - select_n_sentences = select_n_sentences - conf = { - "select_n_sentences": select_n_sentences, - } - - # Other variables - # Saving folder - if not os.path.exists(save_folder): - os.makedirs(save_folder) - - save_opt = os.path.join(save_folder, OPT_FILE) - - # Check if this phase is already done (if so, skip it) - if skip(splits, save_folder, conf): - logger.info("Skipping preparation, completed in previous run.") - return - else: - logger.info("Data_preparation...") - - # Additional checks to make sure the data folder contains ksponspeech - check_ksponspeech_folders(data_folder, splits) - - # parse trn file - all_texts = {} - for split_index in range(len(splits)): - - split = splits[split_index] - dirlist = split2dirs(split) - wav_lst = [] - for dir in dirlist: - wav_lst += get_all_files( - os.path.join(data_folder, dir), match_and=[".wav"] - ) - - trnpath = os.path.join(data_folder, split + ".trn") - text_dict = text_to_dict(trnpath) - all_texts.update(text_dict) - - if select_n_sentences is not None: - n_sentences = select_n_sentences[split_index] - else: - n_sentences = len(wav_lst) - - create_csv( - save_folder, wav_lst, text_dict, split, n_sentences, - ) - - # Merging csv file if needed - if merge_lst and merge_name is not None: - merge_files = [split_kspon + ".csv" for split_kspon in merge_lst] - merge_csvs( - data_folder=save_folder, csv_lst=merge_files, merged_csv=merge_name, - ) - - # saving options - save_pkl(conf, save_opt) - - -def create_csv( - save_folder, wav_lst, text_dict, split, select_n_sentences, -): - """ - Create the dataset csv file given a list of wav files. - - Arguments - --------- - save_folder : str - Location of the folder for storing the csv. - wav_lst : list - The list of wav files of a given data split. - text_dict : list - The dictionary containing the text of each sentence. - split : str - The name of the current data split. - select_n_sentences : int, optional - The number of sentences to select. - - Returns - ------- - None - """ - # Setting path for the csv file - csv_file = os.path.join(save_folder, split + ".csv") - - # Preliminary prints - msg = "Creating csv lists in %s..." % (csv_file) - logger.info(msg) - - csv_lines = [["ID", "duration", "wav", "spk_id", "wrd"]] - - snt_cnt = 0 - # Processing all the wav files in wav_lst - for wav_file in wav_lst: - - snt_id = wav_file.split("/")[-1].replace(".wav", "") - spk_id = snt_id.split("_")[-1] - wrds = text_dict[snt_id] - - duration = torchaudio.info(wav_file).num_frames / SAMPLERATE - - csv_line = [ - snt_id, - str(duration), - wav_file, - spk_id, - str(" ".join(wrds.split())), - ] - - # Appending current file to the csv_lines list - csv_lines.append(csv_line) - snt_cnt = snt_cnt + 1 - - if snt_cnt == select_n_sentences: - break - - # Writing the csv_lines - with open(csv_file, mode="w") as csv_f: - csv_writer = csv.writer( - csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL - ) - - for line in csv_lines: - csv_writer.writerow(line) - - # Final print - msg = "%s successfully created!" % (csv_file) - logger.info(msg) - - -def skip(splits, save_folder, conf): - """ - Detect when the ksponspeech data prep can be skipped. - - Arguments - --------- - splits : list - A list of the splits expected in the preparation. - save_folder : str - The location of the seave directory - conf : dict - The configuration options to ensure they haven't changed. - - Returns - ------- - bool - if True, the preparation phase can be skipped. - if False, it must be done. - """ - - # Checking csv files - skip = True - - for split in splits: - if not os.path.isfile(os.path.join(save_folder, split + ".csv")): - skip = False - - # Checking saved options - save_opt = os.path.join(save_folder, OPT_FILE) - if skip is True: - if os.path.isfile(save_opt): - opts_old = load_pkl(save_opt) - if opts_old == conf: - skip = True - else: - skip = False - else: - skip = False - - return skip - - -def text_to_dict(trnpath): - """ - This converts lines of text into a dictionary- - - Arguments - --------- - text_lst : str - Path to the file containing the ksponspeech text transcription. - - Returns - ------- - dict - The dictionary containing the text transcriptions for each sentence. - - """ - # Initialization of the text dictionary - text_dict = {} - # Reading all the transcription files is text_lst - with open(trnpath, "r") as f: - # Reading all line of the transcription file - for line in f: - filename, raw_script = line.split(" :: ") - file_id = filename.split("/")[-1].replace(".pcm", "") - script = normalize(raw_script) - text_dict[file_id] = script - return text_dict - - -def normalize(string): - """ - This function normalizes a given string according to - the normalization rule - The normalization rule removes "/" indicating filler words, - removes "+" indicating repeated words, - removes all punctuation marks, - removes non-speech symbols, - and extracts orthographic transcriptions. - - Arguments - --------- - string : str - The string to be normalized - - Returns - ------- - str - The string normalized according to the rules - - """ - # extracts orthographic transcription - string = re.sub(r"\(([^)]*)\)\/\(([^)]*)\)", r"\1", string) - # removes non-speech symbols - string = re.sub(r"n/|b/|o/|l/|u/", "", string) - # removes punctuation marks - string = re.sub(r"[+*/.?!,]", "", string) - # removes extra spaces - string = re.sub(r"\s+", " ", string) - string = string.strip() - - return string - - -def split2dirs(split): - """ - This gives directory names for a given data split - - Arguments - --------- - split : str - The split of ksponspeech data - - Returns - ------- - list - A list containing directories of the given data split - - """ - - if split not in ["eval_other", "eval_clean", "train", "dev"]: - raise ValueError("Unsupported data split") - - if "eval" in split: - dirs = ["test/" + split] - - elif split == "dev": - dirs = [ - "train/KsponSpeech_05/KsponSpeech_{0:>04d}".format(num) - for num in range(621, 624) - ] - - elif split == "train": - dirs = ( - [ - "train/KsponSpeech_01/KsponSpeech_{0:>04d}".format(num) - for num in range(1, 125) - ] - + [ - "train/KsponSpeech_02/KsponSpeech_{0:>04d}".format(num) - for num in range(125, 249) - ] - + [ - "train/KsponSpeech_03/KsponSpeech_{0:>04d}".format(num) - for num in range(249, 373) - ] - + [ - "train/KsponSpeech_04/KsponSpeech_{0:>04d}".format(num) - for num in range(373, 497) - ] - + [ - "train/KsponSpeech_05/KsponSpeech_{0:>04d}".format(num) - for num in range(497, 621) - ] - ) - - return dirs - - -def check_ksponspeech_folders(data_folder, splits): - """ - Check if the data folder actually contains the ksponspeech dataset. - - If it does not, an error is raised. - - Returns - ------- - None - - Raises - ------ - OSError - If ksponspeech is not found at the specified path. - """ - # Checking if all the splits exist - - for split in splits: - if split not in ["eval_other", "eval_clean", "train", "dev"]: - raise ValueError("Unsupported data split") - - if "eval" in split: - trn_folder = os.path.join(data_folder, split + ".trn") - if not os.path.exists(trn_folder): - err_msg = ( - "the file %s does not exist (it is expected in the " - "ksponspeech dataset)" % trn_folder - ) - raise OSError(err_msg) - - elif split == "dev": - trn_folder = os.path.join(data_folder, "train.trn") - if not os.path.exists(trn_folder): - err_msg = ( - "the file %s does not exist (it is expected in the " - "ksponspeech dataset)" % trn_folder - ) - raise OSError(err_msg) - - elif split == "train": - trn_folder = os.path.join(data_folder, "train.trn") - if not os.path.exists(trn_folder): - err_msg = ( - "the file %s does not exist (it is expected in the " - "ksponspeech dataset)" % trn_folder - ) - raise OSError(err_msg) - - dirs = split2dirs(split) - - for dir in dirs: - dir_folder = os.path.join(data_folder, dir) - if not os.path.exists(dir_folder): - err_msg = ( - "the file %s does not exist (it is expected in the " - "ksponspeech dataset)" % dir_folder - ) - raise OSError(err_msg) diff --git a/recipes/KsponSpeech/unzip_ksponspeech.sh b/recipes/KsponSpeech/unzip_ksponspeech.sh deleted file mode 100755 index 6296ed4925..0000000000 --- a/recipes/KsponSpeech/unzip_ksponspeech.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -if [ $# -ne 2 ] -then - echo "Usage: unzip_ksponspeech.sh " -fi - -KSPONPATH=$1 -DESTPATH=$2 - -mkdir -p $DESTPATH/train -mkdir -p $DESTPATH/test - -echo "expanding transcription" -unzip "$KSPONPATH/전시문_통합_스크립트/KsponSpeech_scripts.zip" -d $DESTPATH - -echo "expanding train data" -unzip "$KSPONPATH/한국어_음성_분야/KsponSpeech_01.zip" -d $DESTPATH/train -unzip "$KSPONPATH/한국어_음성_분야/KsponSpeech_02.zip" -d $DESTPATH/train -unzip "$KSPONPATH/한국어_음성_분야/KsponSpeech_03.zip" -d $DESTPATH/train -unzip "$KSPONPATH/한국어_음성_분야/KsponSpeech_04.zip" -d $DESTPATH/train -unzip "$KSPONPATH/한국어_음성_분야/KsponSpeech_05.zip" -d $DESTPATH/train - -echo "expanding eval data" -unzip "$KSPONPATH/평가용_데이터/KsponSpeech_eval.zip" -d $DESTPATH/test \ No newline at end of file diff --git a/recipes/LJSpeech/TTS/README.md b/recipes/LJSpeech/TTS/README.md index 7372d11a7e..f88415a2c3 100644 --- a/recipes/LJSpeech/TTS/README.md +++ b/recipes/LJSpeech/TTS/README.md @@ -1,10 +1,18 @@ # Text-to-Speech (with LJSpeech) -This folder contains the recipes for training TTS systems (including vocoders) wiith the popular LJSpeech dataset. +This folder contains the recipes for training TTS systems (including vocoders) with the popular LJSpeech dataset. # Dataset The dataset can be downloaded from here: https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2 +# Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + # Tacotron 2 The subfolder "tacotron2" contains the recipe for training the popular [tacotron2](https://arxiv.org/abs/1712.05884) TTS model. To run this recipe, go into the "tacotron2" folder and run: @@ -12,30 +20,135 @@ To run this recipe, go into the "tacotron2" folder and run: ``` python train.py --device=cuda:0 --max_grad_norm=1.0 --data_folder=/your_folder/LJSpeech-1.1 hparams/train.yaml ``` -Training takes about X hours on an nvidia RTX8000. -The training logs are available [here](https://drive.google.com/drive/folders/1CbkXPvtLFVrRBeeuMnmTmNCyagNKO6uX?usp=sharing). +The training logs are available [here](https://www.dropbox.com/sh/1npvo1g1ncafipf/AAC5DR1ErF2Q9V4bd1DHqX43a?dl=0). You can find the pre-trained model with an easy-inference function on [HuggingFace](https://huggingface.co/speechbrain/tts-tacotron2-ljspeech). -# HiFi GAN (Vocoder) -The subfolder "vocoder/hifi_gan/" contains the [HiFi GAN vocoder](https://arxiv.org/pdf/2010.05646.pdf). -The vocoder is a neural network that converts a spectrogram into a waveform (it can be used on top of Tacotroon 2). +# FastSpeech2 +The subfolder "fastspeech2" contains the recipes for training the non-autoregressive transformer based TTS model [FastSpeech2](https://arxiv.org/abs/2006.04558). + +### FastSpeech2 with pre-extracted durations from a forced aligner +Training FastSpeech2 requires pre-extracted phoneme alignments (durations). The LJSpeech phoneme alignments from Montreal Forced Aligner are automatically downloaded, decompressed and stored at this location: ```/your_folder/LJSpeech-1.1/TextGrid```. + +To run this recipe, please first install the extra-dependencies : + +``` +pip install -r extra_requirements.txt +```` + +Then go into the "fastspeech2" folder and run: + +``` +python train.py --data_folder=/your_folder/LJSpeech-1.1 hparams/train.yaml +``` +Training takes about 3 minutes/epoch on 1 * V100 32G. + +The training logs are available [here](https://www.dropbox.com/scl/fo/vtgbltqdrvw9r0vs7jz67/h?rlkey=cm2mwh5rce5ad9e90qaciypox&dl=0). + +You can find the pre-trained model with an easy-inference function on [HuggingFace](https://huggingface.co/speechbrain/tts-fastspeech2-ljspeech). + +### FastSpeech2 with internal alignment +This recipe allows training FastSpeech2 without forced aligner referring to [One TTS Alignment To Rule Them All](https://arxiv.org/pdf/2108.10447.pdf). The alignment can be learnt by an internal alignment network that is added to FastSpeech2. This recipe aims to simplify training when using custom data and provide better alignments for punctuations. + +To run this recipe, please first install the extra-requirements: +``` +pip install -r extra_requirements.txt +``` +Then go into the "fastspeech2" folder and run: +``` +python train_internal_alignment.py hparams/train_internal_alignment.yaml --data_folder=/your_folder/LJSpeech-1.1 +``` +The data preparation includes a grapheme-to-phoneme process for the entire corpus which may take several hours. Training takes about 5 minutes/epoch on 1 * V100 32G. + +The training logs are available [here](https://www.dropbox.com/scl/fo/4ctkc6jjas3uij9dzcwta/h?rlkey=i0k086d77flcsdx40du1ppm2d&dl=0). + +You can find the pre-trained model with an easy-inference function on [HuggingFace](https://huggingface.co/speechbrain/tts-fastspeech2-internal-alignment-ljspeech). + +# HiFiGAN (Vocoder) +The subfolder "vocoder/hifigan/" contains the [HiFiGAN vocoder](https://arxiv.org/pdf/2010.05646.pdf). +The vocoder is a neural network that converts a spectrogram into a waveform (it can be used on top of Tacotron2/FastSpeech2). We suggest using `tensorboard_logger` by setting `use_tensorboard: True` in the yaml file, thus `Tensorboard` should be installed. -To run this recipe, go into the "vocoder/hifi_gan/" folder and run: +To run this recipe, go into the "vocoder/hifigan/" folder and run: ``` python train.py hparams/train.yaml --data_folder /path/to/LJspeech ``` -Training takes about X hours on an nvidia RTX8000. +Training takes about 10 minutes/epoch on an nvidia RTX8000. -The training logs are available [here](https://drive.google.com/drive/folders/19sLwV7nAsnUuLkoTu5vafURA9Fo2WZgG?usp=sharing) +The training logs are available [here](https://www.dropbox.com/sh/m2xrdssiroipn8g/AAD-TqPYLrSg6eNxUkcImeg4a?dl=0) You can find the pre-trained model with an easy-inference function on [HuggingFace](https://huggingface.co/speechbrain/tts-hifigan-ljspeech). +# DiffWave (Vocoder) +The subfolder "vocoder/diffwave/" contains the [Diffwave](https://arxiv.org/pdf/2009.09761.pdf) vocoder. + +DiffWave is a versatile diffusion model for audio synthesis, which produces high-fidelity audio in different waveform generation tasks, including neural vocoding conditioned on mel spectrogram, class-conditional generation, and unconditional generation. + +Here it serves as a vocoder that generates waveforms given spectrograms as conditions (it can be used on top of Tacotron2/FastSpeech2). + +To run this recipe, go into the "vocoder/diffwave/" folder and run: + +``` +python train.py hparams/train.yaml --data_folder /path/to/LJspeech +``` + +The scripts will output a synthesized audio to `/samples` for a certain interval of training epoch. + +We suggest using tensorboard_logger by setting `use_tensorboard: True` in the yaml file, thus torch.Tensorboard should be installed. + +Training takes about 6 minutes/epoch on 1 * V100 32G. + +The training logs are available [here](https://www.dropbox.com/sh/tbhpn1xirtaix68/AACvYaVDiUGAKURf2o-fvgMoa?dl=0) + +For inference, by setting `fast_sampling: True` , a fast sampling can be realized by passing user-defined variance schedules. According to the paper, high-quality audios can be generated with only 6 steps. This is highly recommended. + +You can find the pre-trained model with an easy-inference function on [HuggingFace](https://huggingface.co/speechbrain/tts-diffwave-ljspeech). + + +# HiFiGAN Unit Vocoder +The subfolder "vocoder/hifigan_discrete/" contains the [HiFiGAN Unit vocoder](https://arxiv.org/abs/2406.10735). This vocoder is a neural network designed to transform discrete self-supervised representations into waveform data. +This is suitable for a wide range of generative tasks such as speech enhancement, separation, text-to-speech, voice cloning, etc. Please read [DASB - Discrete Audio and Speech Benchmark](https://arxiv.org/abs/2406.14294) for more information. + +To run this recipe successfully, start by installing the necessary extra dependencies: + +```bash +pip install -r extra_requirements.txt +``` + +Before training the vocoder, you need to choose a speech encoder to extract representations that will be used as discrete audio input. We support k-means models using features from HuBERT, WavLM, or Wav2Vec2. Below are the available self-supervised speech encoders for which we provide pre-trained k-means checkpoints: + +| Encoder | HF model | +|----------|-----------------------------------------| +| HuBERT | facebook/hubert-large-ll60k | +| Wav2Vec2 | facebook/wav2vec2-large-960h-lv60-self | +| WavLM | microsoft/wavlm-large | + +Checkpoints are available in the HF [SSL_Quantization](https://huggingface.co/speechbrain/SSL_Quantization) repository. Alternatively, you can train your own k-means model by following instructions in the "LJSpeech/quantization" README. + +Next, configure the SSL model type, k-means model, and corresponding hub in your YAML configuration file. Follow these steps: + +1. Navigate to the "vocoder/hifigan_discrete/hparams" folder and open "train.yaml" file. +2. Modify the `encoder_type` field to specify one of the SSL models: "HuBERT", "WavLM", or "Wav2Vec2". +3. Update the `encoder_hub` field with the specific name of the SSL Hub associated with your chosen model type. + +If you have trained your own k-means model, follow these additional steps: + +4. Update the `kmeans_folder` field with the specific name of the SSL Hub containing your trained k-means model. Please follow the same file structure as the official one in [SSL_Quantization](https://huggingface.co/speechbrain/SSL_Quantization). +5. Update the `kmeans_dataset` field with the specific name of the dataset on which the k-means model was trained. +6. Update the `num_clusters` field according to the number of clusters of your k-means model. + +Finally, navigate back to the "vocoder/hifigan_discrete/" folder and run the following command: + +```bash +python train.py hparams/train.yaml --data_folder=/path/to/LJspeech +``` + +Training typically takes around 4 minutes per epoch when using an NVIDIA A100 40G. + # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -47,6 +160,15 @@ You can find the pre-trained model with an easy-inference function on [HuggingFa Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriTTS/vocoder/hifigan/extra-dependencies.txt b/recipes/LJSpeech/TTS/extra_requirements.txt similarity index 65% rename from recipes/LibriTTS/vocoder/hifigan/extra-dependencies.txt rename to recipes/LJSpeech/TTS/extra_requirements.txt index 1064f7cd4e..78ddcc94f2 100644 --- a/recipes/LibriTTS/vocoder/hifigan/extra-dependencies.txt +++ b/recipes/LJSpeech/TTS/extra_requirements.txt @@ -1,4 +1,8 @@ +# Needed only for quantization +scikit-learn # Needed only with use_tensorboard=True -tensorboard # torchvision is needed to save spectrograms +tensorboard +tgt torchvision +unidecode diff --git a/recipes/LJSpeech/TTS/fastspeech2/hparams/train.yaml b/recipes/LJSpeech/TTS/fastspeech2/hparams/train.yaml new file mode 100644 index 0000000000..a7070f5aab --- /dev/null +++ b/recipes/LJSpeech/TTS/fastspeech2/hparams/train.yaml @@ -0,0 +1,278 @@ +############################################################################ +# Model: FastSpeech2 +# Tokens: Raw characters (English text) +# Training: LJSpeech +# Authors: Sathvik Udupa, Yingzhi Wang, Pradnya Kandarkar +# ############################################################################ + +################################### +# Experiment Parameters and setup # +################################### +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/fastspeech2/ +save_folder: !ref /save +train_log: !ref /train_log.txt +epochs: 500 +train_spn_predictor_epochs: 8 +progress_samples: True +progress_sample_path: !ref /samples +progress_samples_min_run: 10 +progress_samples_interval: 10 +progress_batch_sample_size: 4 + +################################# +# Data files and pre-processing # +################################# +data_folder: #!PLACEHOLDER # e.g., /data/Database/LJSpeech-1.1 + +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json + +splits: ["train", "valid"] +split_ratio: [90, 10] + +skip_prep: False + +################################ +# Audio Parameters # +################################ +sample_rate: 22050 +hop_length: 256 +win_length: null +n_mel_channels: 80 +n_fft: 1024 +mel_fmin: 0.0 +mel_fmax: 8000.0 +power: 1 +norm: "slaney" +mel_scale: "slaney" +dynamic_range_compression: True +mel_normalized: False +min_max_energy_norm: True +min_f0: 65 #(torchaudio pyin values) +max_f0: 2093 #(torchaudio pyin values) + +################################ +# Optimization Hyperparameters # +################################ +learning_rate: 0.0001 +weight_decay: 0.000001 +max_grad_norm: 1.0 +batch_size: 32 #minimum 2 +num_workers_train: 16 +num_workers_valid: 4 +betas: [0.9, 0.98] + +################################ +# Model Parameters and model # +################################ +# Input parameters +lexicon: + - AA + - AE + - AH + - AO + - AW + - AY + - B + - CH + - D + - DH + - EH + - ER + - EY + - F + - G + - HH + - IH + - IY + - JH + - K + - L + - M + - N + - NG + - OW + - OY + - P + - R + - S + - SH + - T + - TH + - UH + - UW + - V + - W + - Y + - Z + - ZH + - spn + +n_symbols: 42 #fixed depending on symbols in the lexicon +1 for a dummy symbol used for padding +padding_idx: 0 + +# Encoder parameters +enc_num_layers: 4 +enc_num_head: 2 +enc_d_model: 384 +enc_ffn_dim: 1024 +enc_k_dim: 384 +enc_v_dim: 384 +enc_dropout: 0.2 + +# Decoder parameters +dec_num_layers: 4 +dec_num_head: 2 +dec_d_model: 384 +dec_ffn_dim: 1024 +dec_k_dim: 384 +dec_v_dim: 384 +dec_dropout: 0.2 + +# Postnet parameters +postnet_embedding_dim: 512 +postnet_kernel_size: 5 +postnet_n_convolutions: 5 +postnet_dropout: 0.5 + +# common +normalize_before: True +ffn_type: 1dcnn #1dcnn or ffn +ffn_cnn_kernel_size_list: [9, 1] + +# variance predictor +dur_pred_kernel_size: 3 +pitch_pred_kernel_size: 3 +energy_pred_kernel_size: 3 +variance_predictor_dropout: 0.5 + +# silent phoneme token predictor +spn_predictor: !new:speechbrain.lobes.models.FastSpeech2.SPNPredictor + enc_num_layers: !ref + enc_num_head: !ref + enc_d_model: !ref + enc_ffn_dim: !ref + enc_k_dim: !ref + enc_v_dim: !ref + enc_dropout: !ref + normalize_before: !ref + ffn_type: !ref + ffn_cnn_kernel_size_list: !ref + n_char: !ref + padding_idx: !ref + +#model +model: !new:speechbrain.lobes.models.FastSpeech2.FastSpeech2 + enc_num_layers: !ref + enc_num_head: !ref + enc_d_model: !ref + enc_ffn_dim: !ref + enc_k_dim: !ref + enc_v_dim: !ref + enc_dropout: !ref + dec_num_layers: !ref + dec_num_head: !ref + dec_d_model: !ref + dec_ffn_dim: !ref + dec_k_dim: !ref + dec_v_dim: !ref + dec_dropout: !ref + normalize_before: !ref + ffn_type: !ref + ffn_cnn_kernel_size_list: !ref + n_char: !ref + n_mels: !ref + postnet_embedding_dim: !ref + postnet_kernel_size: !ref + postnet_n_convolutions: !ref + postnet_dropout: !ref + padding_idx: !ref + dur_pred_kernel_size: !ref + pitch_pred_kernel_size: !ref + energy_pred_kernel_size: !ref + variance_predictor_dropout: !ref + +mel_spectogram: !name:speechbrain.lobes.models.FastSpeech2.mel_spectogram + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_fft: !ref + n_mels: !ref + f_min: !ref + f_max: !ref + power: !ref + normalized: !ref + min_max_energy_norm: !ref + norm: !ref + mel_scale: !ref + compression: !ref + +criterion: !new:speechbrain.lobes.models.FastSpeech2.Loss + log_scale_durations: True + duration_loss_weight: 1.0 + pitch_loss_weight: 1.0 + energy_loss_weight: 1.0 + ssim_loss_weight: 1.0 + mel_loss_weight: 1.0 + postnet_mel_loss_weight: 1.0 + spn_loss_weight: 1.0 + spn_loss_max_epochs: !ref + +vocoder: "hifi-gan" +pretrained_vocoder: True +vocoder_source: speechbrain/tts-hifigan-ljspeech +vocoder_download_path: tmpdir_vocoder + +modules: + spn_predictor: !ref + model: !ref + +train_dataloader_opts: + batch_size: !ref + drop_last: False #True #False + num_workers: !ref + shuffle: True + collate_fn: !new:speechbrain.lobes.models.FastSpeech2.TextMelCollate + +valid_dataloader_opts: + batch_size: !ref + num_workers: !ref + shuffle: False + collate_fn: !new:speechbrain.lobes.models.FastSpeech2.TextMelCollate + +#optimizer +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: !ref + betas: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 4000 + +#epoch object +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +#checkpointer +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + spn_predictor: !ref + model: !ref + lr_annealing: !ref + counter: !ref + +input_encoder: !new:speechbrain.dataio.encoder.TextEncoder + +progress_sample_logger: !new:speechbrain.utils.train_logger.ProgressSampleLogger + output_path: !ref + batch_sample_size: !ref + formats: + raw_batch: raw diff --git a/recipes/LJSpeech/TTS/fastspeech2/hparams/train_internal_alignment.yaml b/recipes/LJSpeech/TTS/fastspeech2/hparams/train_internal_alignment.yaml new file mode 100644 index 0000000000..d0dddc8c27 --- /dev/null +++ b/recipes/LJSpeech/TTS/fastspeech2/hparams/train_internal_alignment.yaml @@ -0,0 +1,284 @@ +############################################################################ +# Model: FastSpeech2 with internal alignment +# Tokens: Phonemes (ARPABET) +# Dataset: LJSpeech +# Authors: Yingzhi Wang 2023 +# ############################################################################ + +################################### +# Experiment Parameters and setup # +################################### +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/fastspeech2_internal_alignment/ +save_folder: !ref /save +train_log: !ref /train_log.txt +epochs: 500 +progress_samples: True +progress_sample_path: !ref /samples +progress_samples_min_run: 10 +progress_samples_interval: 10 +progress_batch_sample_size: 4 + +################################# +# Data files and pre-processing # +################################# +data_folder: !PLACEHOLDER # e.g., /data/Database/LJSpeech-1.1 + +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json + +splits: ["train", "valid"] +split_ratio: [90, 10] + +skip_prep: False + +################################ +# Audio Parameters # +################################ +sample_rate: 22050 +hop_length: 256 +win_length: null +n_mel_channels: 80 +n_fft: 1024 +mel_fmin: 0.0 +mel_fmax: 8000.0 +power: 1 +norm: "slaney" +mel_scale: "slaney" +dynamic_range_compression: True +mel_normalized: False +min_max_energy_norm: True +min_f0: 65 #(torchaudio pyin values) +max_f0: 2093 #(torchaudio pyin values) + +################################ +# Optimization Hyperparameters # +################################ +learning_rate: 0.0001 +weight_decay: 0.000001 +max_grad_norm: 1.0 +batch_size: 16 #minimum 2 +betas: [0.9, 0.998] +num_workers_train: 16 +num_workers_valid: 4 + +################################ +# Model Parameters and model # +################################ +# Input parameters +lexicon: + - "AA" + - "AE" + - "AH" + - "AO" + - "AW" + - "AY" + - "B" + - "CH" + - "D" + - "DH" + - "EH" + - "ER" + - "EY" + - "F" + - "G" + - "HH" + - "IH" + - "IY" + - "JH" + - "K" + - "L" + - "M" + - "N" + - "NG" + - "OW" + - "OY" + - "P" + - "R" + - "S" + - "SH" + - "T" + - "TH" + - "UH" + - "UW" + - "V" + - "W" + - "Y" + - "Z" + - "ZH" + - "-" + - "!" + - "'" + - "(" + - ")" + - "," + - "." + - ":" + - ";" + - "?" + - " " + +n_symbols: 52 #fixed depending on symbols in the lexicon (+1 for a dummy symbol used for padding, +1 for unknown) +padding_idx: 0 + +hidden_channels: 512 +# Encoder parameters +enc_num_layers: 4 +enc_num_head: 2 +enc_d_model: !ref +enc_ffn_dim: 1024 +enc_k_dim: !ref +enc_v_dim: !ref +enc_dropout: 0.2 + +# Aligner parameters +in_query_channels: 80 +in_key_channels: !ref # 512 in the paper +attn_channels: 80 +temperature: 0.0005 + +# Decoder parameters +dec_num_layers: 4 +dec_num_head: 2 +dec_d_model: !ref +dec_ffn_dim: 1024 +dec_k_dim: !ref +dec_v_dim: !ref +dec_dropout: 0.2 + +# Postnet parameters +postnet_embedding_dim: 512 +postnet_kernel_size: 5 +postnet_n_convolutions: 5 +postnet_dropout: 0.2 + +# common +normalize_before: True +ffn_type: 1dcnn #1dcnn or ffn +ffn_cnn_kernel_size_list: [9, 1] + +# variance predictor +dur_pred_kernel_size: 3 +pitch_pred_kernel_size: 3 +energy_pred_kernel_size: 3 +variance_predictor_dropout: 0.5 + +#model +model: !new:speechbrain.lobes.models.FastSpeech2.FastSpeech2WithAlignment + enc_num_layers: !ref + enc_num_head: !ref + enc_d_model: !ref + enc_ffn_dim: !ref + enc_k_dim: !ref + enc_v_dim: !ref + enc_dropout: !ref + in_query_channels: !ref + in_key_channels: !ref + attn_channels: !ref + temperature: !ref + dec_num_layers: !ref + dec_num_head: !ref + dec_d_model: !ref + dec_ffn_dim: !ref + dec_k_dim: !ref + dec_v_dim: !ref + dec_dropout: !ref + normalize_before: !ref + ffn_type: !ref + ffn_cnn_kernel_size_list: !ref + n_char: !ref + n_mels: !ref + postnet_embedding_dim: !ref + postnet_kernel_size: !ref + postnet_n_convolutions: !ref + postnet_dropout: !ref + padding_idx: !ref + dur_pred_kernel_size: !ref + pitch_pred_kernel_size: !ref + energy_pred_kernel_size: !ref + variance_predictor_dropout: !ref + +mel_spectogram: !name:speechbrain.lobes.models.FastSpeech2.mel_spectogram + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_fft: !ref + n_mels: !ref + f_min: !ref + f_max: !ref + power: !ref + normalized: !ref + min_max_energy_norm: !ref + norm: !ref + mel_scale: !ref + compression: !ref + +criterion: !new:speechbrain.lobes.models.FastSpeech2.LossWithAlignment + log_scale_durations: True + duration_loss_weight: 1.0 + pitch_loss_weight: 1.0 + energy_loss_weight: 1.0 + ssim_loss_weight: 1.0 + mel_loss_weight: 1.0 + postnet_mel_loss_weight: 1.0 + aligner_loss_weight: 1.0 + binary_alignment_loss_weight: 0.2 + binary_alignment_loss_warmup_epochs: 1 + binary_alignment_loss_max_epochs: 80 + +vocoder: "hifi-gan" +pretrained_vocoder: True +vocoder_source: speechbrain/tts-hifigan-ljspeech +vocoder_download_path: tmpdir_vocoder + +modules: + model: !ref + +train_dataloader_opts: + batch_size: !ref + drop_last: False #True #False + num_workers: !ref + shuffle: True + collate_fn: !new:speechbrain.lobes.models.FastSpeech2.TextMelCollateWithAlignment + +valid_dataloader_opts: + batch_size: !ref + num_workers: !ref + shuffle: False + collate_fn: !new:speechbrain.lobes.models.FastSpeech2.TextMelCollateWithAlignment + +#optimizer +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: !ref + betas: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 4000 + + +#epoch object +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +#checkpointer +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + lr_annealing: !ref + counter: !ref + +input_encoder: !new:speechbrain.dataio.encoder.TextEncoder + +progress_sample_logger: !new:speechbrain.utils.train_logger.ProgressSampleLogger + output_path: !ref + batch_sample_size: !ref + formats: + raw_batch: raw diff --git a/recipes/LJSpeech/TTS/fastspeech2/ljspeech_prepare.py b/recipes/LJSpeech/TTS/fastspeech2/ljspeech_prepare.py new file mode 120000 index 0000000000..2f703273cb --- /dev/null +++ b/recipes/LJSpeech/TTS/fastspeech2/ljspeech_prepare.py @@ -0,0 +1 @@ +../../ljspeech_prepare.py \ No newline at end of file diff --git a/recipes/LJSpeech/TTS/fastspeech2/train.py b/recipes/LJSpeech/TTS/fastspeech2/train.py new file mode 100644 index 0000000000..604d49f0ac --- /dev/null +++ b/recipes/LJSpeech/TTS/fastspeech2/train.py @@ -0,0 +1,626 @@ +""" + Recipe for training the FastSpeech2 Text-To-Speech model, an end-to-end + neural text-to-speech (TTS) system introduced in 'FastSpeech 2: Fast and High-Quality End-to-End Text to Speech +synthesis' paper + (https://arxiv.org/abs/2006.04558) + To run this recipe, do the following: + # python train.py hparams/train.yaml + Authors + * Sathvik Udupa 2022 + * Yingzhi Wang 2022 + * Pradnya Kandarkar 2023 +""" + +import os +import sys +from pathlib import Path + +import numpy as np +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.inference.text import GraphemeToPhoneme +from speechbrain.inference.vocoders import HIFIGAN +from speechbrain.utils.data_utils import scalarize +from speechbrain.utils.logger import get_logger + +os.environ["TOKENIZERS_PARALLELISM"] = "false" +logger = get_logger(__name__) + + +class FastSpeech2Brain(sb.Brain): + def on_fit_start(self): + """Gets called at the beginning of ``fit()``, on multiple processes + if ``distributed_count > 0`` and backend is ddp and initializes statistics + """ + self.hparams.progress_sample_logger.reset() + self.last_epoch = 0 + self.last_batch = None + self.last_loss_stats = {} + self.g2p = GraphemeToPhoneme.from_hparams("speechbrain/soundchoice-g2p") + self.spn_token_encoded = ( + self.input_encoder.encode_sequence_torch(["spn"]).int().item() + ) + return super().on_fit_start() + + def compute_forward(self, batch, stage): + """Computes the forward pass + Arguments + --------- + batch: str + a single batch + stage: speechbrain.Stage + the training stage + Returns + ------- + the model output + """ + inputs, _ = self.batch_to_device(batch) + + tokens, durations, pitch, energy, no_spn_seqs, last_phonemes = inputs + + # Forward pass for the silent token predictor module + if ( + self.hparams.epoch_counter.current + > self.hparams.train_spn_predictor_epochs + ): + self.hparams.modules["spn_predictor"].eval() + with torch.no_grad(): + spn_preds = self.hparams.modules["spn_predictor"]( + no_spn_seqs, last_phonemes + ) + else: + spn_preds = self.hparams.modules["spn_predictor"]( + no_spn_seqs, last_phonemes + ) + + # Forward pass for the FastSpeech2 module + ( + predict_mel_post, + predict_postnet_output, + predict_durations, + predict_pitch, + predict_avg_pitch, + predict_energy, + predict_avg_energy, + predict_mel_lens, + ) = self.hparams.model(tokens, durations, pitch, energy) + + return ( + predict_mel_post, + predict_postnet_output, + predict_durations, + predict_pitch, + predict_avg_pitch, + predict_energy, + predict_avg_energy, + predict_mel_lens, + spn_preds, + ) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs. + Arguments + --------- + predictions : torch.Tensor + The model generated spectrograms and other metrics from `compute_forward`. + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient. + """ + x, y, metadata = self.batch_to_device(batch, return_metadata=True) + self.last_batch = [x[0], y[-2], y[-3], predictions[0], *metadata] + self._remember_sample([x[0], *y, *metadata], predictions) + loss = self.hparams.criterion( + predictions, y, self.hparams.epoch_counter.current + ) + self.last_loss_stats[stage] = scalarize(loss) + return loss["total_loss"] + + def _remember_sample(self, batch, predictions): + """Remembers samples of spectrograms and the batch for logging purposes + Arguments + --------- + batch: tuple + a training batch + predictions: tuple + predictions (raw output of the FastSpeech2 + model) + """ + ( + tokens, + spectogram, + durations, + pitch, + energy, + mel_lengths, + input_lengths, + spn_labels, + labels, + wavs, + ) = batch + ( + mel_post, + postnet_mel_out, + predict_durations, + predict_pitch, + predict_avg_pitch, + predict_energy, + predict_avg_energy, + predict_mel_lens, + spn_preds, + ) = predictions + self.hparams.progress_sample_logger.remember( + target=self.process_mel(spectogram, mel_lengths), + output=self.process_mel(postnet_mel_out, mel_lengths), + raw_batch=self.hparams.progress_sample_logger.get_batch_sample( + { + "tokens": tokens, + "input_lengths": input_lengths, + "mel_target": spectogram, + "mel_out": postnet_mel_out, + "mel_lengths": predict_mel_lens, + "durations": durations, + "predict_durations": predict_durations, + "labels": labels, + "wavs": wavs, + } + ), + ) + + def process_mel(self, mel, len, index=0): + """Converts a mel spectrogram to one that can be saved as an image + sample = sqrt(exp(mel)) + Arguments + --------- + mel: torch.Tensor + the mel spectrogram (as used in the model) + len: int + length of the mel spectrogram + index: int + batch index + Returns + ------- + mel: torch.Tensor + the spectrogram, for image saving purposes + """ + assert mel.dim() == 3 + return torch.sqrt(torch.exp(mel[index][: len[index]])) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch. + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + # At the end of validation, we can write + if stage == sb.Stage.VALID: + # Update learning rate + self.last_epoch = epoch + lr = self.hparams.noam_annealing.current_lr + + # The train_logger writes a summary to stdout and to the logfile. + self.hparams.train_logger.log_stats( # 1#2# + stats_meta={"Epoch": epoch, "lr": lr}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=self.last_loss_stats[sb.Stage.VALID], + ) + output_progress_sample = ( + self.hparams.progress_samples + and epoch % self.hparams.progress_samples_interval == 0 + and epoch >= self.hparams.progress_samples_min_run + ) + + if output_progress_sample: + logger.info("Saving predicted samples") + ( + inference_mel, + mel_lens, + inf_mel_spn_pred, + mel_lens_spn_pred, + ) = self.run_inference() + self.hparams.progress_sample_logger.save(epoch) + self.run_vocoder( + inference_mel, mel_lens, sample_type="with_spn" + ) + self.run_vocoder( + inf_mel_spn_pred, mel_lens_spn_pred, sample_type="no_spn" + ) + # Save the current checkpoint and delete previous checkpoints. + # UNCOMMENT THIS + self.checkpointer.save_and_keep_only( + meta=self.last_loss_stats[stage], + min_keys=["total_loss"], + ) + # We also write statistics about test data spectogram to stdout and to the logfile. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + + def run_inference(self): + """Produces a sample in inference mode with predicted durations.""" + if self.last_batch is None: + return + tokens, *_, labels, _ = self.last_batch + + # Generates inference samples without using the silent phoneme predictor + ( + _, + postnet_mel_out, + _, + _, + _, + _, + _, + predict_mel_lens, + ) = self.hparams.model(tokens) + + self.hparams.progress_sample_logger.remember( + infer_output=self.process_mel( + postnet_mel_out, [len(postnet_mel_out[0])] + ) + ) + + # Generates inference samples using the silent phoneme predictor + + # Preprocessing required at the inference time for the input text + # "label" below contains input text + # "phoneme_labels" contain the phoneme sequences corresponding to input text labels + # "last_phonemes_combined" is used to indicate whether the index position is for a last phoneme of a word + phoneme_labels = list() + last_phonemes_combined = list() + + for label in labels: + phoneme_label = list() + last_phonemes = list() + + words = label.split() + words = [word.strip() for word in words] + words_phonemes = self.g2p(words) + + for words_phonemes_seq in words_phonemes: + for phoneme in words_phonemes_seq: + if not phoneme.isspace(): + phoneme_label.append(phoneme) + last_phonemes.append(0) + last_phonemes[-1] = 1 + + phoneme_labels.append(phoneme_label) + last_phonemes_combined.append(last_phonemes) + + # Inserts silent phonemes in the input phoneme sequence + all_tokens_with_spn = list() + max_seq_len = -1 + for i in range(len(phoneme_labels)): + phoneme_label = phoneme_labels[i] + token_seq = ( + self.input_encoder.encode_sequence_torch(phoneme_label) + .int() + .to(self.device) + ) + last_phonemes = torch.LongTensor(last_phonemes_combined[i]).to( + self.device + ) + + # Runs the silent phoneme predictor + spn_preds = ( + self.hparams.modules["spn_predictor"] + .infer(token_seq.unsqueeze(0), last_phonemes.unsqueeze(0)) + .int() + ) + + spn_to_add = torch.nonzero(spn_preds).reshape(-1).tolist() + + tokens_with_spn = list() + + for token_idx in range(token_seq.shape[0]): + tokens_with_spn.append(token_seq[token_idx].item()) + if token_idx in spn_to_add: + tokens_with_spn.append(self.spn_token_encoded) + + tokens_with_spn = torch.LongTensor(tokens_with_spn).to(self.device) + all_tokens_with_spn.append(tokens_with_spn) + if max_seq_len < tokens_with_spn.shape[-1]: + max_seq_len = tokens_with_spn.shape[-1] + + # "tokens_with_spn_tensor" holds the input phoneme sequence with silent phonemes + tokens_with_spn_tensor = torch.LongTensor( + tokens.shape[0], max_seq_len + ).to(self.device) + tokens_with_spn_tensor.zero_() + + for seq_idx, seq in enumerate(all_tokens_with_spn): + tokens_with_spn_tensor[seq_idx, : len(seq)] = seq + + ( + _, + postnet_mel_out_spn_pred, + _, + _, + _, + _, + _, + predict_mel_lens_spn_pred, + ) = self.hparams.model(tokens_with_spn_tensor) + + return ( + postnet_mel_out, + predict_mel_lens, + postnet_mel_out_spn_pred, + predict_mel_lens_spn_pred, + ) + + def run_vocoder(self, inference_mel, mel_lens, sample_type=""): + """Uses a pretrained vocoder to generate audio from predicted mel + spectogram. By default, uses speechbrain hifigan. + + Arguments + --------- + inference_mel: torch.Tensor + predicted mel from fastspeech2 inference + mel_lens: torch.Tensor + predicted mel lengths from fastspeech2 inference + used to mask the noise from padding + sample_type: str + used for logging the type of the inference sample being generated + + Returns + ------- + None + """ + if self.last_batch is None: + return + *_, wavs = self.last_batch + + inference_mel = inference_mel[: self.hparams.progress_batch_sample_size] + mel_lens = mel_lens[0 : self.hparams.progress_batch_sample_size] + assert ( + self.hparams.vocoder == "hifi-gan" + and self.hparams.pretrained_vocoder is True + ), "Specified vocoder not supported yet" + logger.info( + f"Generating audio with pretrained {self.hparams.vocoder_source} vocoder" + ) + hifi_gan = HIFIGAN.from_hparams( + source=self.hparams.vocoder_source, + savedir=self.hparams.vocoder_download_path, + ) + waveforms = hifi_gan.decode_batch( + inference_mel.transpose(2, 1), mel_lens, self.hparams.hop_length + ) + for idx, wav in enumerate(waveforms): + path = os.path.join( + self.hparams.progress_sample_path, + str(self.last_epoch), + f"pred_{sample_type}_{Path(wavs[idx]).stem}.wav", + ) + audio_io.save(path, wav, self.hparams.sample_rate) + + def batch_to_device(self, batch, return_metadata=False): + """Transfers the batch to the target device + Arguments + --------- + batch: tuple + the batch to use + return_metadata: bool + indicates whether the metadata should be returned + Returns + ------- + batch: tuple + the batch on the correct device + """ + + ( + text_padded, + durations, + input_lengths, + mel_padded, + pitch_padded, + energy_padded, + output_lengths, + len_x, + labels, + wavs, + no_spn_seq_padded, + spn_labels_padded, + last_phonemes_padded, + ) = batch + + durations = durations.to(self.device, non_blocking=True).long() + phonemes = text_padded.to(self.device, non_blocking=True).long() + input_lengths = input_lengths.to(self.device, non_blocking=True).long() + spectogram = mel_padded.to(self.device, non_blocking=True).float() + pitch = pitch_padded.to(self.device, non_blocking=True).float() + energy = energy_padded.to(self.device, non_blocking=True).float() + mel_lengths = output_lengths.to(self.device, non_blocking=True).long() + no_spn_seqs = no_spn_seq_padded.to( + self.device, non_blocking=True + ).long() + spn_labels = spn_labels_padded.to(self.device, non_blocking=True).long() + last_phonemes = last_phonemes_padded.to( + self.device, non_blocking=True + ).long() + x = (phonemes, durations, pitch, energy, no_spn_seqs, last_phonemes) + y = ( + spectogram, + durations, + pitch, + energy, + mel_lengths, + input_lengths, + spn_labels, + ) + metadata = (labels, wavs) + if return_metadata: + return x, y, metadata + return x, y + + +def dataio_prepare(hparams): + # Load lexicon + lexicon = hparams["lexicon"] + input_encoder = hparams.get("input_encoder") + + # add a dummy symbol for idx 0 - used for padding. + lexicon = ["@@"] + lexicon + input_encoder.update_from_iterable(lexicon, sequence_input=False) + input_encoder.add_unk() + + # load audio, text and durations on the fly; encode audio and text. + @sb.utils.data_pipeline.takes( + "wav", + "label_phoneme", + "durations", + "pitch", + "start", + "end", + "spn_labels", + "last_phoneme_flags", + ) + @sb.utils.data_pipeline.provides("mel_text_pair") + def audio_pipeline( + wav, + label_phoneme, + dur, + pitch, + start, + end, + spn_labels, + last_phoneme_flags, + ): + durs = np.load(dur) + durs_seq = torch.from_numpy(durs).int() + label_phoneme = label_phoneme.strip() + label_phoneme = label_phoneme.split() + text_seq = input_encoder.encode_sequence_torch(label_phoneme).int() + + assert len(text_seq) == len(durs), ( + f"{len(text_seq)}, {len(durs), len(label_phoneme)}, ({label_phoneme})" + ) # ensure every token has a duration + + no_spn_label, last_phonemes = list(), list() + for i in range(len(label_phoneme)): + if label_phoneme[i] != "spn": + no_spn_label.append(label_phoneme[i]) + last_phonemes.append(last_phoneme_flags[i]) + + no_spn_seq = input_encoder.encode_sequence_torch(no_spn_label).int() + + spn_labels = [ + spn_labels[i] + for i in range(len(label_phoneme)) + if label_phoneme[i] != "spn" + ] + + audio, fs = audio_io.load(wav) + + audio = audio.squeeze() + audio = audio[int(fs * start) : int(fs * end)] + + mel, energy = hparams["mel_spectogram"](audio=audio) + mel = mel[:, : sum(durs)] + energy = energy[: sum(durs)] + pitch = np.load(pitch) + pitch = torch.from_numpy(pitch) + pitch = pitch[: mel.shape[-1]] + return ( + text_seq, + durs_seq, + mel, + pitch, + energy, + len(text_seq), + last_phonemes, + no_spn_seq, + spn_labels, + ) + + # define splits and load it as sb dataset + datasets = {} + + for dataset in hparams["splits"]: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams[f"{dataset}_json"], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline], + output_keys=["mel_text_pair", "wav", "label", "durations", "pitch"], + ) + return datasets, input_encoder + + +def main(): + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + sb.utils.distributed.ddp_init_group(run_opts) + + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + from ljspeech_prepare import prepare_ljspeech + + sb.utils.distributed.run_on_main( + prepare_ljspeech, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "splits": hparams["splits"], + "split_ratio": hparams["split_ratio"], + "model_name": hparams["model"].__class__.__name__, + "seed": hparams["seed"], + "pitch_n_fft": hparams["n_fft"], + "pitch_hop_length": hparams["hop_length"], + "pitch_min_f0": hparams["min_f0"], + "pitch_max_f0": hparams["max_f0"], + "skip_prep": hparams["skip_prep"], + "use_custom_cleaner": True, + }, + ) + + datasets, input_encoder = dataio_prepare(hparams) + + # Brain class initialization + fastspeech2_brain = FastSpeech2Brain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + fastspeech2_brain.input_encoder = input_encoder + # Training + fastspeech2_brain.fit( + fastspeech2_brain.hparams.epoch_counter, + datasets["train"], + datasets["valid"], + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + +if __name__ == "__main__": + main() diff --git a/recipes/LJSpeech/TTS/fastspeech2/train_internal_alignment.py b/recipes/LJSpeech/TTS/fastspeech2/train_internal_alignment.py new file mode 100644 index 0000000000..bee5ae3d7c --- /dev/null +++ b/recipes/LJSpeech/TTS/fastspeech2/train_internal_alignment.py @@ -0,0 +1,417 @@ +""" +Recipe for training the FastSpeech2 Text-To-Speech model +Instead of using pre-extracted phoneme durations from MFA, +This recipe trains an internal alignment from scratch, as introduced in: +https://arxiv.org/pdf/2108.10447.pdf (One TTS Alignment To Rule Them All) +To run this recipe, do the following: +# python train_internal_alignment.py hparams/train_internal_alignment.yaml + +Authors +* Yingzhi Wang 2023 +""" + +import os +import sys +from pathlib import Path + +import numpy as np +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.inference.vocoders import HIFIGAN +from speechbrain.utils.data_utils import scalarize +from speechbrain.utils.logger import get_logger + +os.environ["TOKENIZERS_PARALLELISM"] = "false" +logger = get_logger(__name__) + + +class FastSpeech2Brain(sb.Brain): + def on_fit_start(self): + """Gets called at the beginning of ``fit()``, on multiple processes + if ``distributed_count > 0`` and backend is ddp and initializes statistics + """ + self.hparams.progress_sample_logger.reset() + self.last_epoch = 0 + self.last_batch = None + self.last_loss_stats = {} + return super().on_fit_start() + + def compute_forward(self, batch, stage): + """Computes the forward pass + Arguments + --------- + batch: str + a single batch + stage: speechbrain.Stage + the training stage + Returns + ------- + the model output + """ + inputs, _ = self.batch_to_device(batch) + return self.hparams.model(*inputs) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing and logging.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs. + Arguments + --------- + predictions : torch.Tensor + The model generated spectrograms and other metrics from `compute_forward`. + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient. + """ + x, y, metadata = self.batch_to_device(batch, return_metadata=True) + self.last_batch = [x[0], y[-1], y[-2], predictions[0], *metadata] + self._remember_sample([x[0], *y, *metadata], predictions) + loss = self.hparams.criterion( + predictions, y, self.hparams.epoch_counter.current + ) + self.last_loss_stats[stage] = scalarize(loss) + return loss["total_loss"] + + def _remember_sample(self, batch, predictions): + """Remembers samples of spectrograms and the batch for logging purposes + Arguments + --------- + batch: tuple + a training batch + predictions: tuple + predictions (raw output of the FastSpeech2 + model) + """ + ( + phoneme_padded, + mel_padded, + pitch, + energy, + output_lengths, + input_lengths, + labels, + wavs, + ) = batch + + ( + mel_post, + postnet_mel_out, + predict_durations, + predict_pitch, + average_pitch, + predict_energy, + average_energy, + predict_mel_lens, + alignment_durations, + alignment_soft, + alignment_logprob, + alignment_mas, + ) = predictions + self.hparams.progress_sample_logger.remember( + target=self.process_mel(mel_padded, output_lengths), + output=self.process_mel(postnet_mel_out, output_lengths), + raw_batch=self.hparams.progress_sample_logger.get_batch_sample( + { + "tokens": phoneme_padded, + "input_lengths": input_lengths, + "mel_target": mel_padded, + "mel_out": postnet_mel_out, + "mel_lengths": predict_mel_lens, + "durations": alignment_durations, + "predict_durations": predict_durations, + "labels": labels, + "wavs": wavs, + } + ), + ) + + def process_mel(self, mel, len, index=0): + """Converts a mel spectrogram to one that can be saved as an image + sample = sqrt(exp(mel)) + Arguments + --------- + mel: torch.Tensor + the mel spectrogram (as used in the model) + len: int + length of the mel spectrogram + index: int + batch index + Returns + ------- + mel: torch.Tensor + the spectrogram, for image saving purposes + """ + assert mel.dim() == 3 + return torch.sqrt(torch.exp(mel[index][: len[index]])) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch. + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + # At the end of validation, we can write + if stage == sb.Stage.VALID: + # Update learning rate + self.last_epoch = epoch + lr = self.hparams.noam_annealing.current_lr + + # The train_logger writes a summary to stdout and to the logfile. + self.hparams.train_logger.log_stats( # 1#2# + stats_meta={"Epoch": epoch, "lr": lr}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=self.last_loss_stats[sb.Stage.VALID], + ) + output_progress_sample = ( + self.hparams.progress_samples + and epoch % self.hparams.progress_samples_interval == 0 + and epoch >= self.hparams.progress_samples_min_run + ) + + if output_progress_sample: + logger.info("Saving predicted samples") + inference_mel, mel_lens = self.run_inference() + self.hparams.progress_sample_logger.save(epoch) + self.run_vocoder(inference_mel, mel_lens) + # Save the current checkpoint and delete previous checkpoints. + # UNCOMMENT THIS + self.checkpointer.save_and_keep_only( + meta=self.last_loss_stats[stage], + min_keys=["total_loss"], + ) + # We also write statistics about test data spectogram to stdout and to the logfile. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + + def run_inference(self): + """Produces a sample in inference mode with predicted durations.""" + if self.last_batch is None: + return + tokens, *_ = self.last_batch + + ( + _, + postnet_mel_out, + _, + _, + _, + _, + _, + predict_mel_lens, + _, + _, + _, + _, + ) = self.hparams.model(tokens) + self.hparams.progress_sample_logger.remember( + infer_output=self.process_mel( + postnet_mel_out, [len(postnet_mel_out[0])] + ) + ) + return postnet_mel_out, predict_mel_lens + + def run_vocoder(self, inference_mel, mel_lens): + """Uses a pretrained vocoder to generate audio from predicted mel + spectogram. By default, uses speechbrain hifigan. + + Arguments + --------- + inference_mel: torch.Tensor + predicted mel from fastspeech2 inference + mel_lens: torch.Tensor + predicted mel lengths from fastspeech2 inference + used to mask the noise from padding + + Returns + ------- + None + """ + if self.last_batch is None: + return + *_, wavs = self.last_batch + + inference_mel = inference_mel[: self.hparams.progress_batch_sample_size] + mel_lens = mel_lens[0 : self.hparams.progress_batch_sample_size] + assert ( + self.hparams.vocoder == "hifi-gan" + and self.hparams.pretrained_vocoder is True + ), "Specified vocoder not supported yet" + logger.info( + f"Generating audio with pretrained {self.hparams.vocoder_source} vocoder" + ) + hifi_gan = HIFIGAN.from_hparams( + source=self.hparams.vocoder_source, + savedir=self.hparams.vocoder_download_path, + ) + waveforms = hifi_gan.decode_batch( + inference_mel.transpose(2, 1), mel_lens, self.hparams.hop_length + ) + for idx, wav in enumerate(waveforms): + path = os.path.join( + self.hparams.progress_sample_path, + str(self.last_epoch), + f"pred_{Path(wavs[idx]).stem}.wav", + ) + audio_io.save(path, wav, self.hparams.sample_rate) + + def batch_to_device(self, batch, return_metadata=False): + """Transfers the batch to the target device + + Arguments + --------- + batch: tuple + the batch to use + return_metadata: bool + Whether to additionally return labels and wavs. + + Returns + ------- + x: tuple + phonemes, spectrogram, pitch, energy + y: tuple + spectrogram, pitch, energy, mel_lengths, input_lengths + metadata: tuple + labels, wavs + """ + + ( + phoneme_padded, + input_lengths, + mel_padded, + pitch_padded, + energy_padded, + output_lengths, + # len_x, + labels, + wavs, + ) = batch + + # durations = durations.to(self.device, non_blocking=True).long() + phonemes = phoneme_padded.to(self.device, non_blocking=True).long() + input_lengths = input_lengths.to(self.device, non_blocking=True).long() + spectogram = mel_padded.to(self.device, non_blocking=True).float() + pitch = pitch_padded.to(self.device, non_blocking=True).float() + energy = energy_padded.to(self.device, non_blocking=True).float() + mel_lengths = output_lengths.to(self.device, non_blocking=True).long() + x = (phonemes, spectogram, pitch, energy) + y = (spectogram, pitch, energy, mel_lengths, input_lengths) + metadata = (labels, wavs) + if return_metadata: + return x, y, metadata + return x, y + + +def dataio_prepare(hparams): + "Creates the datasets and their data processing pipelines." + # Load lexicon + lexicon = hparams["lexicon"] + input_encoder = hparams.get("input_encoder") + + # add a dummy symbol for idx 0 - used for padding. + lexicon = ["@@"] + lexicon + input_encoder.update_from_iterable(lexicon, sequence_input=False) + input_encoder.add_unk() + + # load audio, text and durations on the fly; encode audio and text. + @sb.utils.data_pipeline.takes("wav", "phonemes", "pitch") + @sb.utils.data_pipeline.provides("mel_text_pair") + def audio_pipeline(wav, phonemes, pitch): + phoneme_seq = input_encoder.encode_sequence_torch(phonemes).int() + + audio, fs = audio_io.load(wav) + audio = audio.squeeze() + mel, energy = hparams["mel_spectogram"](audio=audio) + + pitch = np.load(pitch) + pitch = torch.from_numpy(pitch) + pitch = pitch[: mel.shape[-1]] + return phoneme_seq, mel, pitch, energy, len(phoneme_seq), len(mel) + + # define splits and load it as sb dataset + datasets = {} + + for dataset in hparams["splits"]: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams[f"{dataset}_json"], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline], + output_keys=["mel_text_pair", "wav", "label", "pitch"], + ) + return datasets + + +def main(): + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + sb.utils.distributed.ddp_init_group(run_opts) + + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + from ljspeech_prepare import prepare_ljspeech + + sb.utils.distributed.run_on_main( + prepare_ljspeech, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "splits": hparams["splits"], + "split_ratio": hparams["split_ratio"], + "model_name": hparams["model"].__class__.__name__, + "seed": hparams["seed"], + "pitch_n_fft": hparams["n_fft"], + "pitch_hop_length": hparams["hop_length"], + "pitch_min_f0": hparams["min_f0"], + "pitch_max_f0": hparams["max_f0"], + "skip_prep": hparams["skip_prep"], + "use_custom_cleaner": True, + "device": "cuda", + }, + ) + + datasets = dataio_prepare(hparams) + + # Brain class initialization + fastspeech2_brain = FastSpeech2Brain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + # Training + fastspeech2_brain.fit( + fastspeech2_brain.hparams.epoch_counter, + datasets["train"], + datasets["valid"], + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + +if __name__ == "__main__": + main() diff --git a/recipes/LJSpeech/TTS/ljspeech_prepare.py b/recipes/LJSpeech/TTS/ljspeech_prepare.py deleted file mode 100644 index 9ad05b97a3..0000000000 --- a/recipes/LJSpeech/TTS/ljspeech_prepare.py +++ /dev/null @@ -1,258 +0,0 @@ -""" -LJspeech data preparation. -Download: https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2 - -Authors - * Yingzhi WANG 2022 -""" - -import os -import csv -import json -import logging -import random -from speechbrain.dataio.dataio import ( - load_pkl, - save_pkl, -) - -logger = logging.getLogger(__name__) -OPT_FILE = "opt_ljspeech_prepare.pkl" -METADATA_CSV = "metadata.csv" -TRAIN_JSON = "train.json" -VALID_JSON = "valid.json" -TEST_JSON = "test.json" -WAVS = "wavs" - - -def prepare_ljspeech( - data_folder, - save_folder, - splits=["train", "valid"], - split_ratio=[90, 10], - seed=1234, - skip_prep=False, -): - """ - Prepares the csv files for the LJspeech datasets. - - Arguments - --------- - data_folder : str - Path to the folder where the original LJspeech dataset is stored. - save_folder : str - The directory where to store the csv files. - splits : list - List of splits to prepare. - split_ratio : list - Proportion for train and validation splits. - skip_prep: Bool - If True, skip preparation. - seed : int - Random seed - - Example - ------- - >>> from recipes.LJSpeech.TTS.ljspeech_prepare import prepare_ljspeech - >>> data_folder = 'data/LJspeech/' - >>> save_folder = 'save/' - >>> splits = ['train', 'valid'] - >>> split_ratio = [90, 10] - >>> seed = 1234 - >>> prepare_ljspeech(data_folder, save_folder, splits, split_ratio, seed) - """ - # setting seeds for reproducible code. - random.seed(seed) - - if skip_prep: - return - # Create configuration for easily skipping data_preparation stage - conf = { - "data_folder": data_folder, - "splits": splits, - "split_ratio": split_ratio, - "save_folder": save_folder, - "seed": seed, - } - - if not os.path.exists(save_folder): - os.makedirs(save_folder) - - # Setting ouput files - meta_csv = os.path.join(data_folder, METADATA_CSV) - wavs_folder = os.path.join(data_folder, WAVS) - - save_opt = os.path.join(save_folder, OPT_FILE) - save_json_train = os.path.join(save_folder, TRAIN_JSON) - save_json_valid = os.path.join(save_folder, VALID_JSON) - save_json_test = os.path.join(save_folder, TEST_JSON) - - # Check if this phase is already done (if so, skip it) - if skip(splits, save_folder, conf): - logger.info("Skipping preparation, completed in previous run.") - return - - # Additional check to make sure metadata.csv and wavs folder exists - assert os.path.exists(meta_csv), "metadata.csv does not exist" - assert os.path.exists(wavs_folder), "wavs/ folder does not exist" - - msg = "\tCreating json file for ljspeech Dataset.." - logger.info(msg) - - data_split, meta_csv = split_sets(data_folder, splits, split_ratio) - - # Prepare csv - if "train" in splits: - prepare_json( - data_split["train"], save_json_train, wavs_folder, meta_csv - ) - if "valid" in splits: - prepare_json( - data_split["valid"], save_json_valid, wavs_folder, meta_csv - ) - if "test" in splits: - prepare_json(data_split["test"], save_json_test, wavs_folder, meta_csv) - - save_pkl(conf, save_opt) - - -def skip(splits, save_folder, conf): - """ - Detects if the ljspeech data_preparation has been already done. - If the preparation has been done, we can skip it. - - Returns - ------- - bool - if True, the preparation phase can be skipped. - if False, it must be done. - """ - # Checking json files - skip = True - - split_files = { - "train": TRAIN_JSON, - "valid": VALID_JSON, - "test": TEST_JSON, - } - - for split in splits: - if not os.path.isfile(os.path.join(save_folder, split_files[split])): - skip = False - - # Checking saved options - save_opt = os.path.join(save_folder, OPT_FILE) - if skip is True: - if os.path.isfile(save_opt): - opts_old = load_pkl(save_opt) - if opts_old == conf: - skip = True - else: - skip = False - else: - skip = False - return skip - - -def split_sets(data_folder, splits, split_ratio): - """Randomly splits the wav list into training, validation, and test lists. - Note that a better approach is to make sure that all the classes have the - same proportion of samples for each session. - - Arguments - --------- - wav_list : list - list of all the signals in the dataset - split_ratio: list - List composed of three integers that sets split ratios for train, - valid, and test sets, respectively. - For instance split_ratio=[80, 10, 10] will assign 80% of the sentences - to training, 10% for validation, and 10% for test. - - Returns - ------ - dictionary containing train, valid, and test splits. - """ - meta_csv = os.path.join(data_folder, METADATA_CSV) - csv_reader = csv.reader( - open(meta_csv), delimiter="|", quoting=csv.QUOTE_NONE - ) - - meta_csv = list(csv_reader) - - index_for_sessions = [] - session_id_start = "LJ001" - index_this_session = [] - for i in range(len(meta_csv)): - session_id = meta_csv[i][0].split("-")[0] - if session_id == session_id_start: - index_this_session.append(i) - if i == len(meta_csv) - 1: - index_for_sessions.append(index_this_session) - else: - index_for_sessions.append(index_this_session) - session_id_start = session_id - index_this_session = [i] - - session_len = [len(session) for session in index_for_sessions] - - data_split = {} - for i, split in enumerate(splits): - data_split[split] = [] - for j in range(len(index_for_sessions)): - if split == "train": - random.shuffle(index_for_sessions[j]) - n_snts = int(session_len[j] * split_ratio[i] / sum(split_ratio)) - data_split[split].extend(index_for_sessions[j][0:n_snts]) - del index_for_sessions[j][0:n_snts] - if split == "valid": - if "test" in splits: - random.shuffle(index_for_sessions[j]) - n_snts = int( - session_len[j] * split_ratio[i] / sum(split_ratio) - ) - data_split[split].extend(index_for_sessions[j][0:n_snts]) - del index_for_sessions[j][0:n_snts] - else: - data_split[split].extend(index_for_sessions[j]) - if split == "test": - data_split[split].extend(index_for_sessions[j]) - - return data_split, meta_csv - - -def prepare_json(seg_lst, json_file, wavs_folder, csv_reader): - """ - Creates json file given a list of indexes. - - Arguments - --------- - seg_list : list - The list of json indexes of a given data split. - json_file : str - Output json path - wavs_folder : str - LJspeech wavs folder - csv_reader : _csv.reader - LJspeech metadata - - Returns - ------- - None - """ - json_dict = {} - for index in seg_lst: - id = list(csv_reader)[index][0] - wav = os.path.join(wavs_folder, f"{id}.wav") - label = list(csv_reader)[index][2] - json_dict[id] = { - "wav": wav, - "label": label, - "segment": True if "train" in json_file else False, - } - - # Writing the dictionary to the json file - with open(json_file, mode="w") as json_f: - json.dump(json_dict, json_f, indent=2) - - logger.info(f"{json_file} successfully created!") diff --git a/recipes/LJSpeech/TTS/tacotron2/hparams/train.yaml b/recipes/LJSpeech/TTS/tacotron2/hparams/train.yaml index 65077c247f..bcf52553c4 100644 --- a/recipes/LJSpeech/TTS/tacotron2/hparams/train.yaml +++ b/recipes/LJSpeech/TTS/tacotron2/hparams/train.yaml @@ -3,7 +3,7 @@ # Tokens: Raw characters (English text) # losses: Transducer # Training: LJSpeech -# Authors: Georges Abous-Rjeili, Artem Ploujnikov, Yingzhi Wang +# Authors: Georges Abou-Rjeili, Artem Ploujnikov, Yingzhi Wang # ############################################################################ @@ -11,7 +11,7 @@ # Experiment Parameters and setup # ################################### seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref ./results/tacotron2/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -65,7 +65,7 @@ n_mel_channels: 80 n_fft: 1024 mel_fmin: 0.0 mel_fmax: 8000.0 -mel_normalized: null +mel_normalized: False power: 1 norm: "slaney" mel_scale: "slaney" @@ -77,6 +77,7 @@ dynamic_range_compression: True learning_rate: 0.001 weight_decay: 0.000006 batch_size: 64 #minimum 2 +num_workers: 8 mask_padding: True guided_attention_sigma: 0.2 guided_attention_weight: 50.0 @@ -87,17 +88,17 @@ gate_loss_weight: 1.0 train_dataloader_opts: batch_size: !ref drop_last: False #True #False - num_workers: 8 + num_workers: !ref collate_fn: !new:speechbrain.lobes.models.Tacotron2.TextMelCollate valid_dataloader_opts: batch_size: !ref - num_workers: 8 + num_workers: !ref collate_fn: !new:speechbrain.lobes.models.Tacotron2.TextMelCollate test_dataloader_opts: batch_size: !ref - num_workers: 8 + num_workers: !ref collate_fn: !new:speechbrain.lobes.models.Tacotron2.TextMelCollate ################################ diff --git a/recipes/LJSpeech/TTS/tacotron2/ljspeech_prepare.py b/recipes/LJSpeech/TTS/tacotron2/ljspeech_prepare.py new file mode 120000 index 0000000000..2f703273cb --- /dev/null +++ b/recipes/LJSpeech/TTS/tacotron2/ljspeech_prepare.py @@ -0,0 +1 @@ +../../ljspeech_prepare.py \ No newline at end of file diff --git a/recipes/LJSpeech/TTS/tacotron2/train.py b/recipes/LJSpeech/TTS/tacotron2/train.py index c7166e41cf..50f7707342 100644 --- a/recipes/LJSpeech/TTS/tacotron2/train.py +++ b/recipes/LJSpeech/TTS/tacotron2/train.py @@ -1,31 +1,33 @@ -# -*- coding: utf-8 -*- """ - Recipe for training the Tacotron Text-To-Speech model, an end-to-end - neural text-to-speech (TTS) system +Recipe for training the Tacotron Text-To-Speech model, an end-to-end +neural text-to-speech (TTS) system - To run this recipe, do the following: - # python train.py --device=cuda:0 --max_grad_norm=1.0 --data_folder=/your_folder/LJSpeech-1.1 hparams/train.yaml +To run this recipe, do the following: +# python train.py --device=cuda:0 --max_grad_norm=1.0 --data_folder=/your_folder/LJSpeech-1.1 hparams/train.yaml - to infer simply load saved model and do - savemodel.infer(text_Sequence,len(textsequence)) +to infer simply load saved model and do +savemodel.infer(text_Sequence,len(textsequence)) - were text_Sequence is the ouput of the text_to_sequence function from - textToSequence.py (from textToSequence import text_to_sequence) +were text_Sequence is the output of the text_to_sequence function from +textToSequence.py (from textToSequence import text_to_sequence) - Authors - * Georges Abous-Rjeili 2021 - * Artem Ploujnikov 2021 - * Yingzhi Wang 2022 +Authors +* Georges Abou-Rjeili 2021 +* Artem Ploujnikov 2021 +* Yingzhi Wang 2022 """ -import torch -import speechbrain as sb + import sys -import logging + +import torch from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.text_to_sequence import text_to_sequence + +import speechbrain as sb from speechbrain.utils.data_utils import scalarize +from speechbrain.utils.logger import get_logger +from speechbrain.utils.text_to_sequence import text_to_sequence -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class Tacotron2Brain(sb.Brain): @@ -33,7 +35,8 @@ class Tacotron2Brain(sb.Brain): def on_fit_start(self): """Gets called at the beginning of ``fit()``, on multiple processes - if ``distributed_count > 0`` and backend is ddp and initializes statistics""" + if ``distributed_count > 0`` and backend is ddp and initializes statistics + """ self.hparams.progress_sample_logger.reset() self.last_epoch = 0 self.last_batch = None @@ -62,22 +65,10 @@ def compute_forward(self, batch, stage): max_input_length = input_lengths.max().item() return self.modules.model(inputs, alignments_dim=max_input_length) - def fit_batch(self, batch): - """Fits a single batch and applies annealing - - Arguments - --------- - batch: tuple - a training batch - - Returns - ------- - loss: torch.Tensor - detached loss - """ - result = super().fit_batch(batch) - self.hparams.lr_annealing(self.optimizer) - return result + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.lr_annealing(self.optimizer) def compute_objectives(self, predictions, batch, stage): """Computes the loss given the predicted and targeted outputs. @@ -96,7 +87,7 @@ def compute_objectives(self, predictions, batch, stage): """ effective_batch = self.batch_to_device(batch) # Hold on to the batch for the inference sample. This is needed because - # the infernece sample is run from on_stage_end only, where + # the inference sample is run from on_stage_end only, where # batch information is not available self.last_batch = effective_batch # Hold on to a sample (for logging) @@ -112,8 +103,10 @@ def _compute_loss(self, predictions, batch, stage): --------- predictions: tuple model predictions - targets: tuple - ground truth data + batch: PaddedBatch + Inputs for this training iteration. + stage: sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- @@ -182,7 +175,7 @@ def batch_to_device(self, batch): Returns ------- - batch: tiuple + batch: tuple the batch on the correct device """ ( @@ -263,14 +256,16 @@ def on_stage_end(self, stage, stage_loss, epoch): meta=epoch_metadata, min_keys=["loss"], ckpt_predicate=( - lambda ckpt: ( - ckpt.meta["epoch"] - % self.hparams.keep_checkpoint_interval - != 0 + ( + lambda ckpt: ( + ckpt.meta["epoch"] + % self.hparams.keep_checkpoint_interval + != 0 + ) ) - ) - if self.hparams.keep_checkpoint_interval is not None - else None, + if self.hparams.keep_checkpoint_interval is not None + else None + ), ) output_progress_sample = ( self.hparams.progress_samples @@ -339,14 +334,12 @@ def audio_pipeline(wav, label): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -357,7 +350,6 @@ def audio_pipeline(wav, label): overrides=overrides, ) - sys.path.append("../") from ljspeech_prepare import prepare_ljspeech sb.utils.distributed.run_on_main( diff --git a/recipes/LJSpeech/TTS/vocoder/diffwave/hparams/train.yaml b/recipes/LJSpeech/TTS/vocoder/diffwave/hparams/train.yaml new file mode 100644 index 0000000000..43f9e22fca --- /dev/null +++ b/recipes/LJSpeech/TTS/vocoder/diffwave/hparams/train.yaml @@ -0,0 +1,169 @@ +# ################################################ +# Basic training parameters for a diffwave vocoder +# +# Author: +# * Yingzhi Wang 2022 +# ################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +data_folder: !PLACEHOLDER +output_folder: !ref ./results/diffwave/ +save_folder: !ref /save +progress_sample_path: !ref /samples +train_log: !ref /train_log.txt +progress_samples_interval: 10 + +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json +splits: ["train", "valid"] +split_ratio: [90, 10] +skip_prep: False +# The train logger writes training statistics to a file, as well as stdout. +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +keep_checkpoint_interval: 100 + +# conditional training length +segment_size: 15872 + +# Training Parameters +sample_rate: 22050 +number_of_epochs: 500 +batch_size: 16 +num_workers: 8 + +lr: 0.0002 + +# diffusion parameters +train_timesteps: 50 +beta_start: 0.0001 +beta_end: 0.05 +fast_sampling: True +fast_sampling_noise_schedule: [0.0001, 0.001, 0.01, 0.05, 0.2, 0.5] + +loss_l2_steps: 0 + +adam_beta1: 0.95 +adam_beta2: 0.999 +adam_weight_decay: 0.000001 +adam_epsilon: 0.00000001 + +train_dataloader_opts: + batch_size: !ref + drop_last: False + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + num_workers: !ref + +test_dataloader_opts: + batch_size: 1 + num_workers: !ref + +use_tensorboard: False +tensorboard_logs: !ref /logs/ + +residual_layers: 30 +residual_channels: 64 +dilation_cycle_length: 10 + +unconditional: False + +# Spectrogram Parameters +spec_n_fft: 1024 +spec_f_min: 0 +spec_f_max: 8000 +mel_normalized: False +spec_n_mels: 80 +spec_power: 1 +spec_hop_length: 256 +spec_win_length: 1024 +spec_norm: "slaney" +spec_mel_scale: "slaney" +dynamic_range_compression: True + +# Feature extraction +mel_spectogram: !name:speechbrain.lobes.models.HifiGAN.mel_spectogram + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_fft: !ref + n_mels: !ref + f_min: !ref + f_max: !ref + power: !ref + normalized: !ref + norm: !ref + mel_scale: !ref + compression: !ref + +compute_cost: !new:speechbrain.nnet.schedulers.ScheduledLoss + schedule: + - loss_fn: !name:speechbrain.nnet.losses.mse_loss + steps: !ref + - loss_fn: !name:speechbrain.nnet.losses.l1_loss + + +# To design a custom model, either just edit the simple CustomModel +# class that's listed here, or replace this `!new` call with a line +# pointing to a different file you've defined. +diffwave: !new:speechbrain.lobes.models.DiffWave.DiffWave + input_channels: !ref + residual_layers: !ref + residual_channels: !ref + dilation_cycle_length: !ref + total_steps: !ref + unconditional: !ref + +noise: !new:speechbrain.nnet.diffusion.GaussianNoise + +diffusion: !new:speechbrain.lobes.models.DiffWave.DiffWaveDiffusion + model: !ref + beta_start: !ref + beta_end: !ref + timesteps: !ref + noise: !ref + +# The first object passed to the Brain class is this "Epoch Counter" +# which is saved by the Checkpointer so that training can be resumed +# if it gets interrupted at any point. +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +# Objects in "modules" dict will have their parameters moved to the correct +# device, as well as having train()/eval() called on them by the Brain class. +modules: + diffwave: !ref + diffusion: !ref + +# This optimizer will be constructed by the Brain class after all parameters +# are moved to the correct device. Then it will be added to the checkpointer. +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: !ref (, ) + weight_decay: !ref + eps: !ref + +# This function manages learning rate annealing over the epochs. +# We here use the simple lr annealing method that linearly decreases +# the lr from the initial value to the final one. +# lr_annealing: !new:speechbrain.nnet.schedulers.WarmCoolDecayLRSchedule +# lr: !ref +# warmup: !ref +# cooldown: !ref +# total_steps: !ref + +# This object is used for saving the state of training both so that it +# can be resumed if it gets interrupted, and also so that the best checkpoint +# can be later loaded for evaluation or inference. +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + diffwave: !ref + counter: !ref diff --git a/recipes/LJSpeech/TTS/vocoder/diffwave/ljspeech_prepare.py b/recipes/LJSpeech/TTS/vocoder/diffwave/ljspeech_prepare.py new file mode 120000 index 0000000000..069e475ec6 --- /dev/null +++ b/recipes/LJSpeech/TTS/vocoder/diffwave/ljspeech_prepare.py @@ -0,0 +1 @@ +../../../ljspeech_prepare.py \ No newline at end of file diff --git a/recipes/LJSpeech/TTS/vocoder/diffwave/train.py b/recipes/LJSpeech/TTS/vocoder/diffwave/train.py new file mode 100644 index 0000000000..ce978628a4 --- /dev/null +++ b/recipes/LJSpeech/TTS/vocoder/diffwave/train.py @@ -0,0 +1,361 @@ +#!/usr/bin/env python3 +"""script to train a diffwave vocoder +See https://arxiv.org/pdf/2009.09761.pdf for more details + +Authors + * Yingzhi Wang 2022 +""" + +import os +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +class DiffWaveBrain(sb.Brain): + """Class that manages the training loop. See speechbrain.core.Brain.""" + + def compute_forward(self, batch, stage): + """Runs all the computation of that transforms the input into the + output probabilities over the N classes. + Arguments + --------- + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + Returns + ------- + predictions : torch.Tensor + torch.Tensor that contains the posterior probabilities over the N classes. + """ + + # We first move the batch to the appropriate device. + batch = batch.to(self.device) + + x, _ = batch.mel + y, _ = batch.sig + + pred, noise, noisy_sample = self.modules.diffusion.train_sample( + y, + timesteps=None, + condition=x, + ) + + return pred, noise, noisy_sample, None + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs. + Arguments + --------- + predictions : tensor + The output tensor from `compute_forward`. + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient. + """ + batch = batch.to(self.device) + x, _ = batch.mel + y, _ = batch.sig + self.last_batch = (x, y) + self._remember_sample(self.last_batch, predictions) + + preds, noise, noisy_sample, lens = predictions + + loss = self.hparams.compute_cost( + preds.squeeze(1), noise.squeeze(1), length=lens + ) + + self.last_loss_stats[stage] = {"loss": loss} + return loss + + def on_fit_start(self): + """Gets called at the beginning of ``fit()``, on multiple processes + if ``distributed_count > 0`` and backend is ddp and initializes statistics + """ + self.last_batch = None + self.last_loss_stats = {} + return super().on_fit_start() + + def _remember_sample(self, batch, predictions): + """Remembers samples of spectrograms and the batch for logging purposes + Arguments + --------- + batch: tuple + a training batch + predictions: tuple + predictions (raw output of the Tacotron model) + """ + mel, sig = batch + pred, noise, noisy_sample, steps = predictions + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a stage (TRAIN, VALID, Or TEST)""" + if stage == sb.Stage.VALID: + lr = self.optimizer.param_groups[0]["lr"] + self.hparams.train_logger.log_stats( + stats_meta={"Epoch": epoch, "lr": lr}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=self.last_loss_stats[sb.Stage.VALID], + ) + # The tensorboard_logger writes a summary to stdout and to the logfile. + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + stats_meta={"Epoch": epoch, "lr": lr}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=self.last_loss_stats[sb.Stage.VALID], + ) + + # Save the current checkpoint and delete previous checkpoints. + epoch_metadata = { + **{"epoch": epoch}, + **self.last_loss_stats[sb.Stage.VALID], + } + self.checkpointer.save_and_keep_only( + meta=epoch_metadata, + end_of_epoch=True, + min_keys=["loss"], + ckpt_predicate=( + ( + lambda ckpt: ( + ckpt.meta["epoch"] + % self.hparams.keep_checkpoint_interval + != 0 + ) + ) + if self.hparams.keep_checkpoint_interval is not None + else None + ), + ) + + if epoch % self.hparams.progress_samples_interval == 0: + self.run_inference_sample("Valid") + + # We also write statistics about test data to stdout and to the TensorboardLogger. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( # 1#2# + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + self.run_inference_sample("Test") + + def run_inference_sample(self, name): + """Produces a sample in inference mode. This is called when producing + samples. + """ + with torch.no_grad(): + if self.last_batch is None: + return + x, y = self.last_batch + + sig_out = self.modules.diffusion.inference( + unconditional=self.hparams.unconditional, + scale=self.hparams.spec_hop_length, + condition=x, + fast_sampling=self.hparams.fast_sampling, + fast_sampling_noise_schedule=self.hparams.fast_sampling_noise_schedule, + ) + + spec_out = self.hparams.mel_spectogram( + audio=sig_out.squeeze(1).cpu() + ) + + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_audio( + f"{name}/audio_target", y.squeeze(0), self.hparams.sample_rate + ) + self.tensorboard_logger.log_audio( + f"{name}/audio_pred", + sig_out.squeeze(0), + self.hparams.sample_rate, + ) + self.tensorboard_logger.log_figure(f"{name}/mel_target", x) + self.tensorboard_logger.log_figure(f"{name}/mel_pred", spec_out) + else: + # folder name is the current epoch for validation and "test" for test + folder = ( + self.hparams.epoch_counter.current + if name == "Valid" + else "test" + ) + self.save_audio("target", y.squeeze(1), folder) + self.save_audio("synthesized", sig_out, folder) + + def save_audio(self, name, data, epoch): + """Saves a single wav + Arguments + --------- + name: str + the name of the saved audio + data: torch.Tensor + the wave data to save + epoch: int or str + the epoch number (used in file path calculations) + or "test" for test stage + """ + target_path = os.path.join( + self.hparams.progress_sample_path, str(epoch) + ) + if not os.path.exists(target_path): + os.makedirs(target_path) + file_name = f"{name}.wav" + effective_file_name = os.path.join(target_path, file_name) + audio_io.save(effective_file_name, data.cpu(), 22050) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + segment_size = hparams["segment_size"] + + # Define audio pipeline: + @sb.utils.data_pipeline.takes("wav", "segment") + @sb.utils.data_pipeline.provides("mel", "sig") + def audio_pipeline(wav, segment): + audio = sb.dataio.dataio.read_audio(wav) + audio = torch.FloatTensor(audio) + audio = audio.unsqueeze(0) + if segment: + if audio.size(1) >= segment_size: + max_audio_start = audio.size(1) - segment_size + audio_start = torch.randint(0, max_audio_start, (1,)) + audio = audio[:, audio_start : audio_start + segment_size] + else: + audio = torch.nn.functional.pad( + audio, (0, segment_size - audio.size(1)), "constant" + ) + + mel = hparams["mel_spectogram"](audio=audio.squeeze(0)) + + # for diffwave the audio length needs to be hop_length * mel_length + audio_length = mel.shape[-1] * hparams["spec_hop_length"] + audio = torch.nn.functional.pad( + audio, (0, audio_length - audio.size(1)), "constant" + ) + return mel, audio + + datasets = {} + data_info = { + "train": hparams["train_json"], + "valid": hparams["valid_json"], + "test": hparams["test_json"], + } + for dataset in hparams["splits"]: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline], + output_keys=["id", "mel", "sig"], + ) + + return datasets + + +def check_tensorboard(hparams): + """Checks whether Tensorboard is enabled and initializes the logger if it is + + Arguments + --------- + hparams: dict + the hyperparameter dictionary + """ + if hparams["use_tensorboard"]: + try: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs"] + ) + except ImportError: + logger.warning( + "Could not enable torch.TensorBoard logging - torch.TensorBoard is not available" + ) + hparams["use_tensorboard"] = False + + +# Recipe begins! +if __name__ == "__main__": + # Reading command line arguments. + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training). + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides. + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Check whether Tensorboard is available and enabled + check_tensorboard(hparams) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Create dataset objects "train", "valid", and "test". + sys.path.append("../../") + from ljspeech_prepare import prepare_ljspeech + + sb.utils.distributed.run_on_main( + prepare_ljspeech, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "splits": hparams["splits"], + "split_ratio": hparams["split_ratio"], + "seed": hparams["seed"], + "skip_prep": hparams["skip_prep"], + }, + ) + + datasets = dataio_prepare(hparams) + + # Initialize the Brain object to prepare for mask training. + diffusion_brain = DiffWaveBrain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # The `fit()` method iterates the training loop, calling the methods + # necessary to update the parameters of the model. Since all objects + # with changing state are managed by the Checkpointer, training can be + # stopped at any point, and will be resumed on next call. + diffusion_brain.fit( + epoch_counter=diffusion_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + # Load the best checkpoint for evaluation + if "test" in datasets: + test_stats = diffusion_brain.evaluate( + test_set=datasets["test"], + min_key="error", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/LJSpeech/TTS/vocoder/hifi_gan/hparams/train.yaml b/recipes/LJSpeech/TTS/vocoder/hifigan/hparams/train.yaml similarity index 96% rename from recipes/LJSpeech/TTS/vocoder/hifi_gan/hparams/train.yaml rename to recipes/LJSpeech/TTS/vocoder/hifigan/hparams/train.yaml index fce3ea7cf9..920a135819 100644 --- a/recipes/LJSpeech/TTS/vocoder/hifi_gan/hparams/train.yaml +++ b/recipes/LJSpeech/TTS/vocoder/hifigan/hparams/train.yaml @@ -2,7 +2,7 @@ # Experiment Parameters and setup # ################################### seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref ./results/hifi_gan/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -34,7 +34,7 @@ n_mel_channels: 80 n_fft: 1024 mel_fmin: 0.0 mel_fmax: 8000 -mel_normalized: null +mel_normalized: False power: 1 norm: "slaney" mel_scale: "slaney" @@ -48,19 +48,20 @@ weight_decay: 0.9999 adam_b1: 0.8 adam_b2: 0.99 batch_size: 32 #minimum 2 +num_workers: 8 train_dataloader_opts: batch_size: !ref drop_last: False - num_workers: 8 + num_workers: !ref valid_dataloader_opts: batch_size: 1 - num_workers: 8 + num_workers: !ref test_dataloader_opts: batch_size: 1 - num_workers: 8 + num_workers: !ref ################################ # Model Parameters and model # ################################ diff --git a/recipes/LJSpeech/TTS/vocoder/hifigan/ljspeech_prepare.py b/recipes/LJSpeech/TTS/vocoder/hifigan/ljspeech_prepare.py new file mode 120000 index 0000000000..069e475ec6 --- /dev/null +++ b/recipes/LJSpeech/TTS/vocoder/hifigan/ljspeech_prepare.py @@ -0,0 +1 @@ +../../../ljspeech_prepare.py \ No newline at end of file diff --git a/recipes/LJSpeech/TTS/vocoder/hifi_gan/train.py b/recipes/LJSpeech/TTS/vocoder/hifigan/train.py similarity index 89% rename from recipes/LJSpeech/TTS/vocoder/hifi_gan/train.py rename to recipes/LJSpeech/TTS/vocoder/hifigan/train.py index 7e5e16d662..3d3bf2d89b 100644 --- a/recipes/LJSpeech/TTS/vocoder/hifi_gan/train.py +++ b/recipes/LJSpeech/TTS/vocoder/hifigan/train.py @@ -10,14 +10,16 @@ * Yingzhi WANG 2022 """ +import copy +import os import sys + import torch -import copy from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import scalarize -import torchaudio -import os class HifiGanBrain(sb.Brain): @@ -33,12 +35,19 @@ def compute_forward(self, batch, stage): stage: speechbrain.Stage the training stage + Returns + ------- + y_g_hat : torch.Tensor + scores_fake : torch.Tensor + feats_fake : torch.Tensor + scores_real : torch.Tensor + feats_real : torch.Tensor """ batch = batch.to(self.device) x, _ = batch.mel y, _ = batch.sig - # generate sythesized waveforms + # generate synthesized waveforms y_g_hat = self.modules.generator(x)[:, :, : y.size(2)] # get scores and features from discriminator for real and synthesized waveforms @@ -48,14 +57,13 @@ def compute_forward(self, batch, stage): return (y_g_hat, scores_fake, feats_fake, scores_real, feats_real) def compute_objectives(self, predictions, batch, stage): - """Computes and combines generator and discriminator losses - """ + """Computes and combines generator and discriminator losses""" batch = batch.to(self.device) x, _ = batch.mel y, _ = batch.sig # Hold on to the batch for the inference sample. This is needed because - # the infernece sample is run from on_stage_end only, where + # the inference sample is run from on_stage_end only, where # batch information is not available self.last_batch = (x, y) @@ -64,7 +72,7 @@ def compute_objectives(self, predictions, batch, stage): y_hat, scores_fake, feats_fake, scores_real, feats_real = predictions loss_g = self.hparams.generator_loss( - y_hat, y, scores_fake, feats_fake, feats_real + stage, y_hat, y, scores_fake, feats_fake, feats_real ) loss_d = self.hparams.discriminator_loss(scores_fake, scores_real) loss = {**loss_g, **loss_d} @@ -72,8 +80,7 @@ def compute_objectives(self, predictions, batch, stage): return loss def fit_batch(self, batch): - """Train discriminator and generator adversarially - """ + """Train discriminator and generator adversarially""" batch = batch.to(self.device) y, _ = batch.sig @@ -104,8 +111,7 @@ def fit_batch(self, batch): return loss_g.detach().cpu() def evaluate_batch(self, batch, stage): - """Evaluate one batch - """ + """Evaluate one batch""" out = self.compute_forward(batch, stage=stage) loss = self.compute_objectives(out, batch, stage=stage) loss_g = loss["G_loss"] @@ -153,6 +159,11 @@ def init_optimizers(self): "scheduler_d", self.scheduler_d ) + def zero_grad(self, set_to_none=False): + if self.opt_class is not None: + self.optimizer_g.zero_grad(set_to_none) + self.optimizer_d.zero_grad(set_to_none) + def _remember_sample(self, batch, predictions): """Remembers samples of spectrograms and the batch for logging purposes @@ -167,8 +178,7 @@ def _remember_sample(self, batch, predictions): y_hat, scores_fake, feats_fake, scores_real, feats_real = predictions def on_stage_end(self, stage, stage_loss, epoch): - """Gets called at the end of a stage (TRAIN, VALID, Or TEST) - """ + """Gets called at the end of a stage (TRAIN, VALID, Or TEST)""" if stage == sb.Stage.VALID: # Update learning rate self.scheduler_g.step() @@ -199,19 +209,21 @@ def on_stage_end(self, stage, stage_loss, epoch): end_of_epoch=True, min_keys=["loss"], ckpt_predicate=( - lambda ckpt: ( - ckpt.meta["epoch"] - % self.hparams.keep_checkpoint_interval - != 0 + ( + lambda ckpt: ( + ckpt.meta["epoch"] + % self.hparams.keep_checkpoint_interval + != 0 + ) ) - ) - if self.hparams.keep_checkpoint_interval is not None - else None, + if self.hparams.keep_checkpoint_interval is not None + else None + ), ) self.run_inference_sample("Valid") - # We also write statistics about test data to stdout and to the TensorboardLogger. + # We also write statistics about test data to stdout and to the torch.TensorboardLogger. if stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( # 1#2# {"Epoch loaded": self.hparams.epoch_counter.current}, @@ -281,7 +293,7 @@ def save_audio(self, name, data, epoch): os.makedirs(target_path) file_name = f"{name}.wav" effective_file_name = os.path.join(target_path, file_name) - torchaudio.save(effective_file_name, data.cpu(), 22050) + audio_io.save(effective_file_name, data.cpu(), 22050) def dataio_prepare(hparams): @@ -329,13 +341,15 @@ def audio_pipeline(wav, segment): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams["output_folder"], @@ -343,7 +357,6 @@ def audio_pipeline(wav, segment): overrides=overrides, ) - sys.path.append("../../") from ljspeech_prepare import prepare_ljspeech sb.utils.distributed.run_on_main( @@ -375,8 +388,10 @@ def audio_pipeline(wav, segment): ) if hparams["use_tensorboard"]: - hifi_gan_brain.tensorboard_logger = sb.utils.train_logger.TensorboardLogger( - save_dir=hparams["output_folder"] + "/tensorboard" + hifi_gan_brain.tensorboard_logger = ( + sb.utils.train_logger.TensorboardLogger( + save_dir=hparams["output_folder"] + "/tensorboard" + ) ) # Training diff --git a/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/extract_code.py b/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/extract_code.py new file mode 100644 index 0000000000..1717f3a33c --- /dev/null +++ b/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/extract_code.py @@ -0,0 +1,261 @@ +""" +Apply K-means clustering over acoustic features to extract speech units for HiFi-GAN training. + +Authors + * Jarod Duret 2023 +""" + +import json +import logging +import pathlib as pl + +import numpy as np +import torch +import torchaudio +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.integrations.huggingface import hubert, wav2vec2, wavlm +from speechbrain.integrations.huggingface.discrete_ssl import DiscreteSSL +from speechbrain.utils.logger import get_logger + +OPT_FILE = "opt_ljspeech_extract_code.pkl" +TRAIN_JSON = "train.json" +VALID_JSON = "valid.json" +TEST_JSON = "test.json" + +ENCODER_CLASSES = { + "HuBERT": hubert.HuBERT, + "Wav2Vec2": wav2vec2.Wav2Vec2, + "WavLM": wavlm.WavLM, +} + + +def setup_logger(): + """Set up a logger with a log format and logging level.""" + log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" + logging.basicConfig(format=log_format, level=logging.INFO) + logger = get_logger(__name__) + return logger + + +def get_device(use_cuda): + """Determine and return the appropriate device for computation.""" + use_cuda = use_cuda and torch.cuda.is_available() + print("\n" + "=" * 30) + print(f"USE_CUDA SET TO: {use_cuda}") + print(f"CUDA AVAILABLE?: {torch.cuda.is_available()}") + print("=" * 30 + "\n") + return torch.device("cuda" if use_cuda else "cpu") + + +def np_array(tensor): + """Convert a Pytorch tensor to a Numpy array.""" + tensor = tensor.squeeze(0) + tensor = tensor.detach().cpu() + return tensor.numpy() + + +def skip(splits, save_folder, conf): + """ + Detects if the ljspeech data_extraction has been already done. + If the extraction has been done, we can skip it. + + Arguments + --------- + splits : list + List of splits to check for existence. + save_folder : str + Path to folder containing prepared data. + conf : dict + Loaded configuration options. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + # Checking json files + skip = True + + split_files = { + "train": TRAIN_JSON, + "valid": VALID_JSON, + "test": TEST_JSON, + } + + for split in splits: + if not (save_folder / split_files[split]).exists(): + skip = False + + # Checking saved options + save_opt = save_folder / OPT_FILE + if skip is True: + if save_opt.is_file(): + opts_old = load_pkl(save_opt.as_posix()) + if opts_old == conf: + skip = True + else: + skip = False + else: + skip = False + return skip + + +def extract_ljspeech( + data_folder, + splits, + kmeans_folder, + kmeans_dataset, + num_clusters, + encoder_type, + encoder_source, + layer, + encoder_save_folder, + codes_save_folder, + sample_rate=16000, + skip_extract=False, +): + """ + Extract speech units for HiFi-GAN training on the LJspeech datasets. + + Arguments + --------- + data_folder : str + Path to the folder where the original LJspeech dataset is stored. + splits : list + List of splits to prepare. + kmeans_folder: str + Huggingface repository if that contains the pretrained kmean model. + kmeans_dataset : str + Name of the dataset that Kmeans model on HF repo is trained with. + num_clusters: (int) + determine the number of clusters of the targeted kmeans models to be downloaded. + encoder_type: str + Name of the model used as feature extractor. + encoder_source: str + Url to the model used as feature extractor. + layer: List[int] (default: [7]): + Determine which layers of SSL should be used to extract information. + encoder_save_folder: str + Path to the folder where the ssl encoder stored. + codes_save_folder: str + Path to the folder where the tokens are stored. + sample_rate: int + LjSpeech dataset sample rate + skip_extract: Bool + If True, skip extraction. + + Returns + ------- + None + + Example + ------- + >>> from recipes.LJSpeech.TTS.vocoder.hifi_gan_unit.extract_code import ( + ... extract_ljspeech, + ... ) + >>> data_folder = "data/LJspeech/" + >>> splits = ["train", "valid"] + >>> kmeans_folder = "speechbrain/SSL_Quantization" + >>> kmeans_dataset = LibriSpeech - 100 - 360 - 500 + >>> encoder_type = "HuBERT" + >>> encoder_source = facebook / hubert - large - ll60k + >>> layer = [7] + >>> encoder_save_folder = "ssl_encoder/" + >>> codes_save_folder = "codes/" + >>> extract_ljspeech( + ... data_folder, + ... splits, + ... kmeans_folder, + ... kmeans_filename, + ... encoder_type, + ... encoder_source, + ... layer, + ... encoder_save_folder, + ... codes_save_folder, + ... ) + """ + logger = setup_logger() + + if skip_extract: + return + # Create configuration for easily skipping code extraction stage + conf = { + "data_folder": data_folder, + "splits": splits, + "save_folder": codes_save_folder, + "kmeans_folder": kmeans_folder, + "encoder_type": encoder_type, + "encoder_source": encoder_source, + "layer": layer, + } + + codes_save_folder = pl.Path(codes_save_folder) + # Check if this phase is already done (if so, skip it) + if skip(splits, codes_save_folder, conf): + logger.info("Skipping code extraction, completed in previous run.") + return + + # Fetch device + device = get_device(use_cuda=True) + + save_opt = codes_save_folder / OPT_FILE + data_folder = pl.Path(data_folder) + encoder_save_folder = pl.Path(encoder_save_folder) + codes_save_folder.mkdir(parents=True, exist_ok=True) + + logger.info(f"Loading encoder: {encoder_source} ...") + if encoder_type not in ENCODER_CLASSES: + raise TypeError("Not a supported Encoder") + + encoder_class = ENCODER_CLASSES[encoder_type] + encoder = encoder_class( + source=encoder_source, + save_path=encoder_save_folder.as_posix(), + output_norm=False, + freeze=True, + freeze_feature_extractor=True, + apply_spec_augment=False, + output_all_hiddens=True, + ).to(device) + + discrete_encoder = DiscreteSSL( + save_path=encoder_save_folder.as_posix(), + ssl_model=encoder, + kmeans_dataset=kmeans_dataset, + kmeans_repo_id=kmeans_folder, + num_clusters=num_clusters, + ) + + for split in splits: + dataset_path = data_folder / f"{split}.json" + logger.info(f"Reading dataset from {dataset_path} ...") + meta_json = json.load(open(dataset_path, encoding="utf-8")) + for key in tqdm(meta_json.keys()): + item = meta_json[key] + wav = item["wav"] + with torch.no_grad(): + info = audio_io.info(wav) + audio = sb.dataio.dataio.read_audio(wav) + audio = torchaudio.transforms.Resample( + info.sample_rate, + sample_rate, + )(audio) + audio = audio.unsqueeze(0).to(device) + deduplicates = [False for _ in layer] + bpe_tokenizers = [None for _ in layer] + tokens, _, _ = discrete_encoder( + audio, + SSL_layers=layer, + deduplicates=deduplicates, + bpe_tokenizers=bpe_tokenizers, + ) + tokens = np_array(tokens.squeeze(0)) + np.save(codes_save_folder / f"{key}.npy", tokens) + + logger.info("Extraction completed.") + save_pkl(conf, save_opt) diff --git a/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/hparams/train.yaml b/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/hparams/train.yaml new file mode 100644 index 0000000000..614d0fdcdd --- /dev/null +++ b/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/hparams/train.yaml @@ -0,0 +1,233 @@ +############################################################################ +# Model: Unit HiFi-GAN +# Tokens: discrete speech units (K-means) +# Training: LJSpeech (English) +# Authors: Jarod Duret, Yingzhi Wang +# ############################################################################ + + +################################### +# Experiment Parameters and setup # +################################### +seed: 4321 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref ./results/hifi_gan/ +save_folder: !ref /save +train_log: !ref /train_log.txt +progress_sample_path: !ref /samples +epochs: 200 +keep_checkpoint_interval: 50 +use_tensorboard: False + +################################# +# Data files and pre-processing # +################################# +data_folder: !PLACEHOLDER # e.g, /datasets/ljspeech +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json + +splits: ["train", "valid", "test"] +split_ratio: [80, 10, 10] +skip_prep: False + +######################################################## +# Encoder | HF model # +#------------------------------------------------------# +# HuBERT | facebook/hubert-large-ll60k # +# Wav2Vec2 | facebook/wav2vec2-large-960h-lv60-self # +# WavLM | microsoft/wavlm-large # +######################################################## +kmeans_folder: speechbrain/SSL_Quantization +kmeans_dataset: LibriSpeech-100-360-500 +codes_save_folder: !ref /codes +encoder_type: HuBERT +encoder_hub: facebook/hubert-large-ll60k +encoder_save_folder: !ref /ssl_encoder +layer: [1, 3, 7, 12, 18, 23] +num_clusters: 1000 +skip_extract: False + +################################ +# Audio Parameters # +################################ + +segment_size: 8960 +code_hop_size: 320 +sample_rate: 16000 +layer_drop: True + +hop_length: 256 +win_length: 1024 +n_mel_channels: 80 +n_fft: 1024 +mel_fmin: 0.0 +mel_fmax: 8000 +mel_normalized: False +power: 1 +norm: "slaney" +mel_scale: "slaney" +dynamic_range_compression: True + +################################ +# Optimization Hyperparameters # +################################ +learning_rate: 0.0002 +weight_decay: 0.9999 +adam_b1: 0.8 +adam_b2: 0.99 +batch_size: 32 #minimum 32 + +train_dataloader_opts: + batch_size: !ref + drop_last: False + num_workers: 8 + +valid_dataloader_opts: + batch_size: 1 + num_workers: 8 + +test_dataloader_opts: + batch_size: 1 + num_workers: 8 + +################################ +# Model Parameters and model # +################################ +duration_predictor: False + +# embedding params +vocab_size: 6001 # K-means size * num layer + 1 for padding 1000x6+1 +embedding_dim: 128 + +# generator params +in_channels: 128 +out_channels: 1 + +var_pred_hidden_dim: 128 +var_pred_kernel_size: 3 +var_pred_dropout: 0.5 + +########################################################################################################################################################### +# version | resblock_type | upsample_kernel_sizes | upsample_factors | resblock_kernel_sizes | upsample_initial_channel | resblock_dilation_sizes +# 1 | "1" | [16,16,4,4] | [8, 8, 2, 2] | [3, 7, 11] | 512 | [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +# 2 | "1" | [16,16,4,4] | [8, 8, 2, 2] | [3, 7, 11] | 128 | [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +# 3 | "2" | [16,16,8] | [8,8,4] | [3,5,7] | 256 | [[1,2], [2,6], [3,12]] +########################################################################################################################################################### +resblock_type: "1" +resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +resblock_kernel_sizes: [3, 7, 11] +upsample_kernel_sizes: [11, 8, 8, 4, 4] +upsample_initial_channel: 512 +upsample_factors: [5, 4, 4, 2, 2] + +inference_padding: 5 +cond_channels: 0 +conv_post_bias: True + +mel_spectogram: !name:speechbrain.lobes.models.HifiGAN.mel_spectogram + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_fft: !ref + n_mels: !ref + f_min: !ref + f_max: !ref + power: !ref + normalized: !ref + norm: !ref + mel_scale: !ref + compression: !ref + +generator: !new:speechbrain.lobes.models.HifiGAN.UnitHifiganGenerator + in_channels: !ref + out_channels: !ref + resblock_type: !ref + resblock_dilation_sizes: !ref + resblock_kernel_sizes: !ref + upsample_kernel_sizes: !ref + upsample_initial_channel: !ref + upsample_factors: !ref + inference_padding: !ref + cond_channels: !ref + conv_post_bias: !ref + vocab_size: !ref + embedding_dim: !ref + duration_predictor: !ref + var_pred_hidden_dim: !ref + var_pred_kernel_size: !ref + var_pred_dropout: !ref + +discriminator: !new:speechbrain.lobes.models.HifiGAN.HifiganDiscriminator + +modules: + generator: !ref + discriminator: !ref + +#generator loss +stft_loss: null +mseg_loss: !new:speechbrain.lobes.models.HifiGAN.MSEGLoss +feat_match_loss: !new:speechbrain.lobes.models.HifiGAN.MelganFeatureLoss +l1_spec_loss: !new:speechbrain.lobes.models.HifiGAN.L1SpecLoss + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_mel_channels: !ref + n_fft: !ref + n_stft: !ref // 2 + 1 + mel_fmin: !ref + mel_fmax: null + mel_normalized: !ref + power: !ref + dynamic_range_compression: !ref +mseg_dur_loss: False + +generator_loss: !new:speechbrain.lobes.models.HifiGAN.GeneratorLoss + stft_loss: !ref + stft_loss_weight: 0 + mseg_loss: !ref + mseg_loss_weight: 1 + feat_match_loss: !ref + feat_match_loss_weight: 10 + l1_spec_loss: !ref + l1_spec_loss_weight: 45 + mseg_dur_loss: !ref + mseg_dur_loss_weight: 1 + +#discriminator loss +msed_loss: !new:speechbrain.lobes.models.HifiGAN.MSEDLoss + +discriminator_loss: !new:speechbrain.lobes.models.HifiGAN.DiscriminatorLoss + msed_loss: !ref + +#optimizer +opt_class_generator: !name:torch.optim.AdamW + lr: !ref + betas: [!ref , !ref ] + +opt_class_discriminator: !name:torch.optim.AdamW + lr: !ref + betas: [!ref , !ref ] + +sch_class_generator: !name:torch.optim.lr_scheduler.ExponentialLR + gamma: !ref + last_epoch: -1 + +sch_class_discriminator: !name:torch.optim.lr_scheduler.ExponentialLR + gamma: !ref + last_epoch: -1 + +#epoch object +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +#checkpointer +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + generator: !ref + discriminator: !ref + counter: !ref diff --git a/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/ljspeech_prepare.py b/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/ljspeech_prepare.py new file mode 120000 index 0000000000..069e475ec6 --- /dev/null +++ b/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/ljspeech_prepare.py @@ -0,0 +1 @@ +../../../ljspeech_prepare.py \ No newline at end of file diff --git a/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/train.py b/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/train.py new file mode 100644 index 0000000000..dba51b3518 --- /dev/null +++ b/recipes/LJSpeech/TTS/vocoder/hifigan_discrete/train.py @@ -0,0 +1,565 @@ +#!/usr/bin/env python3 +"""Recipe for training a hifi-gan vocoder on self-supervised representations. +For more details about hifi-gan: https://arxiv.org/pdf/2010.05646.pdf +For more details about speech synthesis using self-supervised representations: https://arxiv.org/pdf/2104.00355.pdf + +To run this recipe, do the following: +> python train.py hparams/train.yaml --data_folder=/path/to/LJspeech + +Authors + * Jarod Duret 2023 + * Yingzhi WANG 2022 +""" + +import copy +import pathlib as pl +import random +import sys + +import numpy as np +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.data_utils import scalarize + + +class HifiGanBrain(sb.Brain): + def compute_forward(self, batch, stage): + """The forward function, generates synthesized waveforms, + calculates the scores and the features of the discriminator + for synthesized waveforms and real waveforms. + + Arguments + --------- + batch : torch.Tensor or tensors + An element from the dataloader, including inputs for processing. + stage : Stage + The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST + + Returns + ------- + Generator and Discriminator outputs + """ + batch = batch.to(self.device) + + x, _ = batch.code + y, _ = batch.sig + + # generate synthesized waveforms + y_g_hat, (log_dur_pred, log_dur) = self.modules.generator(x) + y_g_hat = y_g_hat[:, :, : y.size(2)] + + # get scores and features from discriminator for real and synthesized waveforms + scores_fake, feats_fake = self.modules.discriminator(y_g_hat.detach()) + scores_real, feats_real = self.modules.discriminator(y) + + return ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs. + Arguments + --------- + predictions : torch.Tensor + The model generated spectrograms and other metrics from `compute_forward`. + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient. + """ + batch = batch.to(self.device) + + x, _ = batch.code + y, y_lens = batch.sig + + # Hold on to the batch for the inference sample. This is needed because + # the infernece sample is run from on_stage_end only, where + # batch information is not available + self.last_batch = (x, y) + + ( + y_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) = predictions + + loss_g = self.hparams.generator_loss( + stage, + y_hat, + y, + scores_fake, + feats_fake, + feats_real, + log_dur_pred, + log_dur, + ) + + loss_d = self.hparams.discriminator_loss(scores_fake, scores_real) + loss = {**loss_g, **loss_d} + self.last_loss_stats[stage] = scalarize(loss) + + return loss + + def fit_batch(self, batch): + """Fits a single batch. + Arguments + --------- + batch: tuple + a training batch + Returns + ------- + loss: torch.Tensor + detached loss + """ + batch = batch.to(self.device) + y, _ = batch.sig + + outputs = self.compute_forward(batch, sb.core.Stage.TRAIN) + ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) = outputs + # calculate discriminator loss with the latest updated generator + loss_d = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[ + "D_loss" + ] + # First train the discriminator + self.optimizer_d.zero_grad() + loss_d.backward() + self.optimizer_d.step() + + # calculate generator loss with the latest updated discriminator + scores_fake, feats_fake = self.modules.discriminator(y_g_hat) + scores_real, feats_real = self.modules.discriminator(y) + outputs = ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) + loss_g = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[ + "G_loss" + ] + # Then train the generator + self.optimizer_g.zero_grad() + loss_g.backward() + self.optimizer_g.step() + + return loss_g.detach().cpu() + + def evaluate_batch(self, batch, stage): + """Evaluate one batch. + + Arguments + --------- + batch : list of torch.Tensors + Batch of data to use for evaluation. Default implementation assumes + this batch has two elements: inputs and targets. + stage : Stage + The stage of the experiment: Stage.VALID, Stage.TEST + + Returns + ------- + detached loss + """ + out = self.compute_forward(batch, stage=stage) + loss = self.compute_objectives(out, batch, stage=stage) + loss_g = loss["G_loss"] + return loss_g.detach().cpu() + + def on_fit_start(self): + """Gets called at the beginning of ``fit()``, on multiple processes + if ``distributed_count > 0`` and backend is ddp and initializes statistics. + """ + self.last_epoch = 0 + self.last_batch = None + self.last_loss_stats = {} + return super().on_fit_start() + + def init_optimizers(self): + """Called during ``on_fit_start()``, initialize optimizers + after parameters are fully configured (e.g. DDP, jit). + """ + if self.opt_class is not None: + ( + opt_g_class, + opt_d_class, + sch_g_class, + sch_d_class, + ) = self.opt_class + + self.optimizer_g = opt_g_class(self.modules.generator.parameters()) + self.optimizer_d = opt_d_class( + self.modules.discriminator.parameters() + ) + self.optimizers_dict = { + "optimizer_g": self.optimizer_g, + "optimizer_d": self.optimizer_d, + } + + self.scheduler_g = sch_g_class(self.optimizer_g) + self.scheduler_d = sch_d_class(self.optimizer_d) + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "optimizer_g", self.optimizer_g + ) + self.checkpointer.add_recoverable( + "optimizer_d", self.optimizer_d + ) + self.checkpointer.add_recoverable( + "scheduler_g", self.scheduler_d + ) + self.checkpointer.add_recoverable( + "scheduler_d", self.scheduler_d + ) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch. + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + if stage == sb.Stage.VALID: + # Update learning rate + self.scheduler_g.step() + self.scheduler_d.step() + lr_g = self.optimizer_g.param_groups[-1]["lr"] + lr_d = self.optimizer_d.param_groups[-1]["lr"] + + stats = { + **self.last_loss_stats[sb.Stage.VALID], + } + + self.hparams.train_logger.log_stats( # 1#2# + stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=stats, + ) + # The tensorboard_logger writes a summary to stdout and to the logfile. + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=stats, + ) + + # Save the current checkpoint and delete previous checkpoints. + epoch_metadata = { + **{"epoch": epoch}, + **self.last_loss_stats[sb.Stage.VALID], + } + if self.checkpointer is not None: + self.checkpointer.save_and_keep_only( + meta=epoch_metadata, + end_of_epoch=True, + min_keys=["loss"], + ckpt_predicate=( + ( + lambda ckpt: ( + ckpt.meta["epoch"] + % self.hparams.keep_checkpoint_interval + != 0 + ) + ) + if self.hparams.keep_checkpoint_interval is not None + else None + ), + ) + + self.run_inference_sample("Valid", epoch) + + # We also write statistics about test data to stdout and to the TensorboardLogger. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( # 1#2# + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + self.run_inference_sample("Test", epoch) + + def run_inference_sample(self, name, epoch): + """Produces a sample in inference mode. + This is called when producing samples. + + Arguments + --------- + name: str + the name of the saved audio folder + epoch: int or str + the epoch number (used in file path calculations) + or "test" for test stage + + Returns + ------- + None + """ + with torch.no_grad(): + if self.last_batch is None: + return + x, y = self.last_batch + + # Preparing model for inference by removing weight norm + inference_generator = copy.deepcopy(self.hparams.generator) + inference_generator.remove_weight_norm() + if inference_generator.duration_predictor: + x = torch.unique_consecutive(x, dim=1) + sig_out = inference_generator.inference(x) + spec_out = self.hparams.mel_spectogram( + audio=sig_out.squeeze(0).cpu() + ) + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_audio( + f"{name}/audio_target", y.squeeze(0), self.hparams.sample_rate + ) + self.tensorboard_logger.log_audio( + f"{name}/audio_pred", + sig_out.squeeze(0), + self.hparams.sample_rate, + ) + self.tensorboard_logger.log_figure(f"{name}/mel_target", x) + self.tensorboard_logger.log_figure(f"{name}/mel_pred", spec_out) + else: + # folder name is the current epoch for validation and "test" for test + folder = ( + self.hparams.epoch_counter.current + if name == "Valid" + else "test" + ) + self.save_audio("target", y.squeeze(0), folder) + self.save_audio("synthesized", sig_out.squeeze(0), folder) + + def save_audio(self, name, data, epoch): + """Saves a single wav file. + + Arguments + --------- + name: str + the name of the saved audio + data: torch.Tensor + the wave data to save + epoch: int or str + the epoch number (used in file path calculations) + or "test" for test stage + """ + target_path = pl.Path(self.hparams.progress_sample_path) / str(epoch) + target_path.mkdir(parents=True, exist_ok=True) + file_name = str(target_path / f"{name}.wav") + audio_io.save(file_name, data.cpu(), 16000) + + +def sample_interval(seqs, segment_size): + "This function sample an interval of audio and code according to segment size." + N = max([v.shape[-1] for v in seqs]) + seq_len = segment_size if segment_size > 0 else N + hops = [N // v.shape[-1] for v in seqs] + lcm = np.lcm.reduce(hops) + interval_start = 0 + interval_end = N // lcm - seq_len // lcm + start_step = random.randint(interval_start, interval_end) + + new_seqs = [] + for i, v in enumerate(seqs): + start = start_step * (lcm // hops[i]) + end = (start_step + seq_len // lcm) * (lcm // hops[i]) + new_seqs += [v[..., start:end]] + + return new_seqs + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + segment_size = hparams["segment_size"] + code_hop_size = hparams["code_hop_size"] + codes_folder = pl.Path(hparams["codes_save_folder"]) + + # Define audio pipeline: + @sb.utils.data_pipeline.takes("id", "wav", "segment") + @sb.utils.data_pipeline.provides("code", "sig") + def audio_pipeline(utt_id, wav, segment): + info = audio_io.info(wav) + audio = sb.dataio.dataio.read_audio(wav) + audio = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(audio) + + code = np.load(codes_folder / f"{utt_id}.npy") + + num_layer = len(hparams["layer"]) + offsets = np.arange(num_layer) * hparams["num_clusters"] + code = code + offsets + 1 + + if hparams["layer_drop"]: + num_layers_to_drop = np.random.randint(0, code.shape[1]) + if num_layers_to_drop > 0: + layers_to_drop = np.random.choice( + code.shape[1], size=num_layers_to_drop, replace=False + ) + code[:, layers_to_drop] = 0 + + code = torch.IntTensor(code) + + # Trim end of audio + code_length = min(audio.shape[0] // code_hop_size, code.shape[0]) + code = code[:code_length] + audio = audio[: code_length * code_hop_size] + + while audio.shape[0] < segment_size: + audio = torch.hstack([audio, audio]) + code = torch.hstack([code, code]) + audio = audio.unsqueeze(0) + + if segment: + code = code.swapdims(0, 1) + audio, code = sample_interval([audio, code], segment_size) + code = code.swapdims(0, 1) + + return code, audio + + datasets = {} + data_info = { + "train": hparams["train_json"], + "valid": hparams["valid_json"], + "test": hparams["test_json"], + } + for dataset in hparams["splits"]: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline], + output_keys=["id", "code", "sig"], + ) + + return datasets + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + from ljspeech_prepare import prepare_ljspeech + + sb.utils.distributed.run_on_main( + prepare_ljspeech, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "splits": hparams["splits"], + "split_ratio": hparams["split_ratio"], + "seed": hparams["seed"], + "skip_prep": hparams["skip_prep"], + }, + ) + + from extract_code import extract_ljspeech + + sb.utils.distributed.run_on_main( + extract_ljspeech, + kwargs={ + "data_folder": hparams["save_folder"], + "splits": hparams["splits"], + "kmeans_folder": hparams["kmeans_folder"], + "kmeans_dataset": hparams["kmeans_dataset"], + "num_clusters": hparams["num_clusters"], + "encoder_type": hparams["encoder_type"], + "encoder_source": hparams["encoder_hub"], + "layer": hparams["layer"], + "encoder_save_folder": hparams["encoder_save_folder"], + "codes_save_folder": hparams["codes_save_folder"], + "sample_rate": hparams["sample_rate"], + "skip_extract": hparams["skip_extract"], + }, + ) + + datasets = dataio_prepare(hparams) + + # Brain class initialization + hifi_gan_brain = HifiGanBrain( + modules=hparams["modules"], + opt_class=[ + hparams["opt_class_generator"], + hparams["opt_class_discriminator"], + hparams["sch_class_generator"], + hparams["sch_class_discriminator"], + ], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + if hparams["use_tensorboard"]: + hifi_gan_brain.tensorboard_logger = ( + sb.utils.train_logger.TensorboardLogger( + save_dir=hparams["output_folder"] + "/tensorboard" + ) + ) + + # Training + hifi_gan_brain.fit( + hifi_gan_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + # Test + if "test" in datasets: + hifi_gan_brain.evaluate( + datasets["test"], + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/LJSpeech/ljspeech_prepare.py b/recipes/LJSpeech/ljspeech_prepare.py new file mode 100644 index 0000000000..55c452bcdd --- /dev/null +++ b/recipes/LJSpeech/ljspeech_prepare.py @@ -0,0 +1,728 @@ +""" +LJspeech data preparation. +Download: https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2 + +Authors + * Yingzhi WANG 2022 + * Sathvik Udupa 2022 + * Pradnya Kandarkar 2023 +""" + +import csv +import json +import os +import random +import re + +import numpy as np +import tgt +import torch +import torchaudio +from tqdm import tqdm +from unidecode import unidecode + +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.inference.text import GraphemeToPhoneme +from speechbrain.utils.data_utils import download_file +from speechbrain.utils.logger import get_logger +from speechbrain.utils.text_to_sequence import _g2p_keep_punctuations + +logger = get_logger(__name__) +OPT_FILE = "opt_ljspeech_prepare.pkl" +METADATA_CSV = "metadata.csv" +TRAIN_JSON = "train.json" +VALID_JSON = "valid.json" +TEST_JSON = "test.json" +WAVS = "wavs" +DURATIONS = "durations" + +logger = get_logger(__name__) +OPT_FILE = "opt_ljspeech_prepare.pkl" + + +def prepare_ljspeech( + data_folder, + save_folder, + splits=["train", "valid"], + split_ratio=[90, 10], + model_name=None, + seed=1234, + pitch_n_fft=1024, + pitch_hop_length=256, + pitch_min_f0=65, + pitch_max_f0=400, + skip_prep=False, + use_custom_cleaner=False, + device="cpu", +): + """ + Prepares the csv files for the LJspeech datasets. + + Arguments + --------- + data_folder : str + Path to the folder where the original LJspeech dataset is stored + save_folder : str + The directory where to store the csv/json files + splits : list + List of dataset splits to prepare + split_ratio : list + Proportion for dataset splits + model_name : str + Model name (used to prepare additional model specific data) + seed : int + Random seed + pitch_n_fft : int + Number of fft points for pitch computation + pitch_hop_length : int + Hop length for pitch computation + pitch_min_f0 : int + Minimum f0 for pitch computation + pitch_max_f0 : int + Max f0 for pitch computation + skip_prep : bool + If True, skip preparation + use_custom_cleaner : bool + If True, uses custom cleaner defined for this recipe + device : str + Device for to be used for computation (used as required) + + Returns + ------- + None + + Example + ------- + >>> from recipes.LJSpeech.TTS.ljspeech_prepare import prepare_ljspeech + >>> data_folder = "data/LJspeech/" + >>> save_folder = "save/" + >>> splits = ["train", "valid"] + >>> split_ratio = [90, 10] + >>> seed = 1234 + >>> prepare_ljspeech(data_folder, save_folder, splits, split_ratio, seed) + """ + # Sets seeds for reproducible code + random.seed(seed) + + if skip_prep: + return + + # Creating configuration for easily skipping data_preparation stage + conf = { + "data_folder": data_folder, + "splits": splits, + "split_ratio": split_ratio, + "save_folder": save_folder, + "seed": seed, + } + if not os.path.exists(save_folder): + os.makedirs(save_folder) + + # Setting output files + meta_csv = os.path.join(data_folder, METADATA_CSV) + wavs_folder = os.path.join(data_folder, WAVS) + + save_opt = os.path.join(save_folder, OPT_FILE) + save_json_train = os.path.join(save_folder, TRAIN_JSON) + save_json_valid = os.path.join(save_folder, VALID_JSON) + save_json_test = os.path.join(save_folder, TEST_JSON) + + phoneme_alignments_folder = None + duration_folder = None + pitch_folder = None + # Setting up additional folders required for FastSpeech2 + if model_name is not None and "FastSpeech2" in model_name: + # This step requires phoneme alignments to be present in the data_folder + # We automatically download the alignments from https://www.dropbox.com/s/v28x5ldqqa288pu/LJSpeech.zip + # Download and unzip LJSpeech phoneme alignments from here: https://drive.google.com/drive/folders/1DBRkALpPd6FL9gjHMmMEdHODmkgNIIK4 + alignment_URL = ( + "https://www.dropbox.com/s/v28x5ldqqa288pu/LJSpeech.zip?dl=1" + ) + phoneme_alignments_folder = os.path.join( + data_folder, "TextGrid", "LJSpeech" + ) + download_file( + alignment_URL, data_folder + "/alignments.zip", unpack=True + ) + + duration_folder = os.path.join(data_folder, "durations") + if not os.path.exists(duration_folder): + os.makedirs(duration_folder) + + # extract pitch for both Fastspeech2 and FastSpeech2WithAligner models + pitch_folder = os.path.join(data_folder, "pitch") + if not os.path.exists(pitch_folder): + os.makedirs(pitch_folder) + + # Check if this phase is already done (if so, skip it) + if skip(splits, save_folder, conf): + logger.info("Skipping preparation, completed in previous run.") + return + + # Additional check to make sure metadata.csv and wavs folder exists + assert os.path.exists(meta_csv), "metadata.csv does not exist" + assert os.path.exists(wavs_folder), "wavs/ folder does not exist" + + # Prepare data splits + msg = "Creating json file for ljspeech Dataset.." + logger.info(msg) + data_split, meta_csv = split_sets(data_folder, splits, split_ratio) + + if "train" in splits: + prepare_json( + model_name, + data_split["train"], + save_json_train, + wavs_folder, + meta_csv, + phoneme_alignments_folder, + duration_folder, + pitch_folder, + pitch_n_fft, + pitch_hop_length, + pitch_min_f0, + pitch_max_f0, + use_custom_cleaner, + device, + ) + if "valid" in splits: + prepare_json( + model_name, + data_split["valid"], + save_json_valid, + wavs_folder, + meta_csv, + phoneme_alignments_folder, + duration_folder, + pitch_folder, + pitch_n_fft, + pitch_hop_length, + pitch_min_f0, + pitch_max_f0, + use_custom_cleaner, + device, + ) + if "test" in splits: + prepare_json( + model_name, + data_split["test"], + save_json_test, + wavs_folder, + meta_csv, + phoneme_alignments_folder, + duration_folder, + pitch_folder, + pitch_n_fft, + pitch_hop_length, + pitch_min_f0, + pitch_max_f0, + use_custom_cleaner, + device, + ) + save_pkl(conf, save_opt) + + +def skip(splits, save_folder, conf): + """ + Detects if the ljspeech data_preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + splits : list + The portions of data to review. + save_folder : str + The path to the directory containing prepared files. + conf : dict + Configuration to match against saved config. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + # Checking json files + skip = True + + split_files = { + "train": TRAIN_JSON, + "valid": VALID_JSON, + "test": TEST_JSON, + } + + for split in splits: + if not os.path.isfile(os.path.join(save_folder, split_files[split])): + skip = False + + # Checking saved options + save_opt = os.path.join(save_folder, OPT_FILE) + if skip is True: + if os.path.isfile(save_opt): + opts_old = load_pkl(save_opt) + if opts_old == conf: + skip = True + else: + skip = False + else: + skip = False + return skip + + +def split_sets(data_folder, splits, split_ratio): + """Randomly splits the wav list into training, validation, and test lists. + Note that a better approach is to make sure that all the classes have the + same proportion of samples for each session. + + Arguments + --------- + data_folder : str + The path to the directory containing the data. + splits : list + The list of the selected splits. + split_ratio : list + List composed of three integers that sets split ratios for train, + valid, and test sets, respectively. + For instance split_ratio=[80, 10, 10] will assign 80% of the sentences + to training, 10% for validation, and 10% for test. + + Returns + ------- + dictionary containing train, valid, and test splits. + """ + meta_csv = os.path.join(data_folder, METADATA_CSV) + csv_reader = csv.reader( + open(meta_csv, newline="", encoding="utf-8"), + delimiter="|", + quoting=csv.QUOTE_NONE, + ) + + meta_csv = list(csv_reader) + + index_for_sessions = [] + session_id_start = "LJ001" + index_this_session = [] + for i in range(len(meta_csv)): + session_id = meta_csv[i][0].split("-")[0] + if session_id == session_id_start: + index_this_session.append(i) + if i == len(meta_csv) - 1: + index_for_sessions.append(index_this_session) + else: + index_for_sessions.append(index_this_session) + session_id_start = session_id + index_this_session = [i] + + session_len = [len(session) for session in index_for_sessions] + + data_split = {} + for i, split in enumerate(splits): + data_split[split] = [] + for j in range(len(index_for_sessions)): + if split == "train": + random.shuffle(index_for_sessions[j]) + n_snts = int(session_len[j] * split_ratio[i] / sum(split_ratio)) + data_split[split].extend(index_for_sessions[j][0:n_snts]) + del index_for_sessions[j][0:n_snts] + if split == "valid": + if "test" in splits: + random.shuffle(index_for_sessions[j]) + n_snts = int( + session_len[j] * split_ratio[i] / sum(split_ratio) + ) + data_split[split].extend(index_for_sessions[j][0:n_snts]) + del index_for_sessions[j][0:n_snts] + else: + data_split[split].extend(index_for_sessions[j]) + if split == "test": + data_split[split].extend(index_for_sessions[j]) + + return data_split, meta_csv + + +def prepare_json( + model_name, + seg_lst, + json_file, + wavs_folder, + csv_reader, + phoneme_alignments_folder, + durations_folder, + pitch_folder, + pitch_n_fft, + pitch_hop_length, + pitch_min_f0, + pitch_max_f0, + use_custom_cleaner=False, + device="cpu", +): + """ + Creates json file given a list of indexes. + + Arguments + --------- + model_name : str + Model name (used to prepare additional model specific data) + seg_lst : list + The list of json indexes of a given data split + json_file : str + Output json path + wavs_folder : str + LJspeech wavs folder + csv_reader : _csv.reader + LJspeech metadata + phoneme_alignments_folder : path + Path where the phoneme alignments are stored + durations_folder : path + Folder where to store the duration values of each audio + pitch_folder : path + Folder where to store the pitch of each audio + pitch_n_fft : int + Number of fft points for pitch computation + pitch_hop_length : int + Hop length for pitch computation + pitch_min_f0 : int + Minimum f0 for pitch computation + pitch_max_f0 : int + Max f0 for pitch computation + use_custom_cleaner : bool + If True, uses custom cleaner defined for this recipe + device : str + Device for to be used for computation (used as required) + """ + + logger.info(f"preparing {json_file}.") + if model_name in ["Tacotron2", "FastSpeech2WithAlignment"]: + logger.info( + "Computing phonemes for LJSpeech labels using SpeechBrain G2P. This may take a while." + ) + g2p = GraphemeToPhoneme.from_hparams( + "speechbrain/soundchoice-g2p", run_opts={"device": device} + ) + if model_name is not None and "FastSpeech2" in model_name: + logger.info( + "Computing pitch as required for FastSpeech2. This may take a while." + ) + + json_dict = {} + for index in tqdm(seg_lst): + # Common data preparation + id = list(csv_reader)[index][0] + wav = os.path.join(wavs_folder, f"{id}.wav") + label = list(csv_reader)[index][2] + if use_custom_cleaner: + label = custom_clean(label, model_name) + + # Compute duration + info = audio_io.info(wav) + duration = info.num_frames / info.sample_rate + + json_dict[id] = { + "uttid": id, + "wav": wav, + "label": label, + "segment": True if "train" in json_file else False, + "duration": duration, + } + + # FastSpeech2 specific data preparation + if model_name == "FastSpeech2": + audio, fs = audio_io.load(wav) + + # Parses phoneme alignments + textgrid_path = os.path.join( + phoneme_alignments_folder, f"{id}.TextGrid" + ) + textgrid = tgt.io.read_textgrid( + textgrid_path, include_empty_intervals=True + ) + + last_phoneme_flags = get_last_phoneme_info( + textgrid.get_tier_by_name("words"), + textgrid.get_tier_by_name("phones"), + ) + ( + phonemes, + duration, + start, + end, + trimmed_last_phoneme_flags, + ) = get_alignment( + textgrid.get_tier_by_name("phones"), + fs, + pitch_hop_length, + last_phoneme_flags, + ) + + # Gets label phonemes + label_phoneme = " ".join(phonemes) + spn_labels = [0] * len(phonemes) + for i in range(1, len(phonemes)): + if phonemes[i] == "spn": + spn_labels[i - 1] = 1 + if start >= end: + print(f"Skipping {id}") + continue + + # Saves durations + duration_file_path = os.path.join(durations_folder, f"{id}.npy") + np.save(duration_file_path, duration) + + # Computes pitch + audio = audio[:, int(fs * start) : int(fs * end)] + pitch_file = wav.replace(".wav", ".npy").replace( + wavs_folder, pitch_folder + ) + if not os.path.isfile(pitch_file): + pitch = torchaudio.functional.detect_pitch_frequency( + waveform=audio, + sample_rate=fs, + frame_time=(pitch_hop_length / fs), + win_length=3, + freq_low=pitch_min_f0, + freq_high=pitch_max_f0, + ).squeeze(0) + + # Concatenate last element to match duration. + pitch = torch.cat([pitch, pitch[-1].unsqueeze(0)]) + + # Mean and Variance Normalization + mean = 256.1732939688805 + std = 328.319759158607 + + pitch = (pitch - mean) / std + + pitch = pitch[: sum(duration)] + np.save(pitch_file, pitch) + + # Updates data for the utterance + json_dict[id].update({"label_phoneme": label_phoneme}) + json_dict[id].update({"spn_labels": spn_labels}) + json_dict[id].update({"start": start}) + json_dict[id].update({"end": end}) + json_dict[id].update({"durations": duration_file_path}) + json_dict[id].update({"pitch": pitch_file}) + json_dict[id].update( + {"last_phoneme_flags": trimmed_last_phoneme_flags} + ) + + # FastSpeech2WithAlignment specific data preparation + if model_name == "FastSpeech2WithAlignment": + audio, fs = audio_io.load(wav) + # Computes pitch + pitch_file = wav.replace(".wav", ".npy").replace( + wavs_folder, pitch_folder + ) + if not os.path.isfile(pitch_file): + if torchaudio.__version__ < "2.1": + pitch = torchaudio.functional.compute_kaldi_pitch( + waveform=audio, + sample_rate=fs, + frame_length=(pitch_n_fft / fs * 1000), + frame_shift=(pitch_hop_length / fs * 1000), + min_f0=pitch_min_f0, + max_f0=pitch_max_f0, + )[0, :, 0] + else: + pitch = torchaudio.functional.detect_pitch_frequency( + waveform=audio, + sample_rate=fs, + frame_time=(pitch_hop_length / fs), + win_length=3, + freq_low=pitch_min_f0, + freq_high=pitch_max_f0, + ).squeeze(0) + + # Concatenate last element to match duration. + pitch = torch.cat([pitch, pitch[-1].unsqueeze(0)]) + + # Mean and Variance Normalization + mean = 256.1732939688805 + std = 328.319759158607 + + pitch = (pitch - mean) / std + + np.save(pitch_file, pitch) + + phonemes = _g2p_keep_punctuations(g2p, label) + # Updates data for the utterance + json_dict[id].update({"phonemes": phonemes}) + json_dict[id].update({"pitch": pitch_file}) + + # Writing the dictionary to the json file + with open(json_file, mode="w", encoding="utf-8") as json_f: + json.dump(json_dict, json_f, indent=2) + + logger.info(f"{json_file} successfully created!") + + +def get_alignment(tier, sampling_rate, hop_length, last_phoneme_flags): + """ + Returns phonemes, phoneme durations (in frames), start time (in seconds), end time (in seconds). + This function is adopted from https://github.com/ming024/FastSpeech2/blob/master/preprocessor/preprocessor.py + + Arguments + --------- + tier : tgt.core.IntervalTier + For an utterance, contains Interval objects for phonemes and their start time and end time in seconds + sampling_rate : int + Sample rate if audio signal + hop_length : int + Hop length for duration computation + last_phoneme_flags : list + List of (phoneme, flag) tuples with flag=1 if the phoneme is the last phoneme else flag=0 + + + Returns + ------- + (phones, durations, start_time, end_time) : tuple + The phonemes, durations, start time, and end time for an utterance + """ + + sil_phones = ["sil", "sp", "spn", ""] + + phonemes = [] + durations = [] + start_time = 0 + end_time = 0 + end_idx = 0 + trimmed_last_phoneme_flags = [] + + flag_iter = iter(last_phoneme_flags) + + for t in tier._objects: + s, e, p = t.start_time, t.end_time, t.text + current_flag = next(flag_iter) + + # Trims leading silences + if phonemes == []: + if p in sil_phones: + continue + else: + start_time = s + + if p not in sil_phones: + # For ordinary phones + # Removes stress indicators + if p[-1].isdigit(): + phonemes.append(p[:-1]) + else: + phonemes.append(p) + trimmed_last_phoneme_flags.append(current_flag[1]) + end_time = e + end_idx = len(phonemes) + else: + # Uses a unique token for all silent phones + phonemes.append("spn") + trimmed_last_phoneme_flags.append(current_flag[1]) + + durations.append( + int( + np.round(e * sampling_rate / hop_length) + - np.round(s * sampling_rate / hop_length) + ) + ) + + # Trims tailing silences + phonemes = phonemes[:end_idx] + durations = durations[:end_idx] + + return phonemes, durations, start_time, end_time, trimmed_last_phoneme_flags + + +def get_last_phoneme_info(words_seq, phones_seq): + """This function takes word and phoneme tiers from a TextGrid file as input + and provides a list of tuples for the phoneme sequence indicating whether + each of the phonemes is the last phoneme of a word or not. + + Each tuple of the returned list has this format: (phoneme, flag) + + + Arguments + --------- + words_seq : tier + word tier from a TextGrid file + phones_seq : tier + phoneme tier from a TextGrid file + + Returns + ------- + last_phoneme_flags : list + each tuple of the returned list has this format: (phoneme, flag) + """ + + # Gets all phoneme objects for the entire sequence + phoneme_objects = phones_seq._objects + phoneme_iter = iter(phoneme_objects) + + # Stores flags to show if an element (phoneme) is a the last phoneme of a word + last_phoneme_flags = list() + + # Matches the end times of the phoneme and word objects to get the last phoneme information + for word_obj in words_seq._objects: + word_end_time = word_obj.end_time + + current_phoneme = next(phoneme_iter, None) + while current_phoneme: + phoneme_end_time = current_phoneme.end_time + if phoneme_end_time == word_end_time: + last_phoneme_flags.append((current_phoneme.text, 1)) + break + else: + last_phoneme_flags.append((current_phoneme.text, 0)) + current_phoneme = next(phoneme_iter, None) + + return last_phoneme_flags + + +def custom_clean(text, model_name): + """ + Uses custom criteria to clean text. + + Arguments + --------- + text : str + Input text to be cleaned + model_name : str + whether to treat punctuations + + Returns + ------- + text : str + Cleaned text + """ + + _abbreviations = [ + (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) + for x in [ + ("mrs", "missus"), + ("mr", "mister"), + ("dr", "doctor"), + ("st", "saint"), + ("co", "company"), + ("jr", "junior"), + ("maj", "major"), + ("gen", "general"), + ("drs", "doctors"), + ("rev", "reverend"), + ("lt", "lieutenant"), + ("hon", "honorable"), + ("sgt", "sergeant"), + ("capt", "captain"), + ("esq", "esquire"), + ("ltd", "limited"), + ("col", "colonel"), + ("ft", "fort"), + ] + ] + text = unidecode(text.lower()) + if model_name != "FastSpeech2WithAlignment": + text = re.sub("[:;]", " - ", text) + text = re.sub(r'[)(\[\]"]', " ", text) + text = text.strip().strip().strip("-") + + text = re.sub(" +", " ", text) + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text diff --git a/recipes/LJSpeech/quantization/README.md b/recipes/LJSpeech/quantization/README.md new file mode 100644 index 0000000000..a58c04f913 --- /dev/null +++ b/recipes/LJSpeech/quantization/README.md @@ -0,0 +1,102 @@ +# Quantization + +This folder contains recipes for training K-means quantizers on the LJSpeech dataset. +The quantizer maps self-supervised representations from wav2vec 2.0, HuBERT, WavLM, etc. into discrete representations. +These discrete representations can then be used as input features for downstream tasks such as ASR, ASV, TTS, etc. + +You can download LJSpeech from https://keithito.com/LJ-Speech-Dataset. + +--------------------------------------------------------------------------------------------------------- + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. +To do so, simply run the following command in your terminal: + +```shell +pip install -r extra_requirements.txt +``` + +--------------------------------------------------------------------------------------------------------- + +## Running an Experiment + +```shell +python train.py hparams/train_discrete_ssl.yaml --data_folder +``` + +Examples: + +```shell +python train.py hparams/train_discrete_ssl.yaml \ +--data_folder data/LJSpeech \ +--ssl_hub facebook/wav2vec2-large \ +--n_clusters 1000 \ +--layer_id 7 \ +--experiment_name wav2vec2_K1000_L7 +``` + +```shell +python train.py hparams/train_discrete_ssl.yaml \ +--data_folder data/LJSpeech \ +--ssl_hub facebook/hubert-large-ll60k \ +--n_clusters 1000 \ +--layer_id 7 \ +--experiment_name hubert_K1000_L7 +``` + +```shell +python train.py hparams/train_discrete_ssl.yaml \ +--data_folder data/LJSpeech \ +--ssl_hub microsoft/wavlm-large \ +--n_clusters 1000 \ +--layer_id 7 \ +--experiment_name wavlm_K1000_L7 +``` + +--------------------------------------------------------------------------------------------------------- + +## Results + +The output folders with checkpoints and logs can be found [here](https://www.dropbox.com/sh/bk5qz0u1ppx15jk/AAAj23FI3AVKtfRKGvyHJYHza?dl=0). + +The checkpoints can be also found at [this](https://huggingface.co/speechbrain/SSL_Quantization) HuggingFace repository. + +**NOTE**: these logs and checkpoints were created using an earlier version of the code. + +--------------------------------------------------------------------------------------------------------- + +## About SpeechBrain + +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +--------------------------------------------------------------------------------------------------------- + +## Citing SpeechBrain + +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@article{speechbrainV1, + author = {Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca {Della Libera} and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Ha Nguyen and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Ga{{\"e}}lle Laperri{{\`e}}re and Mickael Rouvier and Renato De Mori and Yannick Est{{\`e}}ve}, + title = {Open-Source Conversational {AI} with {SpeechBrain} 1.0}, + journal = {Journal of Machine Learning Research}, + year = {2024}, + volume = {25}, + number = {333}, + pages = {1--11}, + url = {http://jmlr.org/papers/v25/24-0991.html} +} +``` + +```bibtex +@article{ravanelli2021speechbrain, + author = {Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + title = {{SpeechBrain}: A General-Purpose Speech Toolkit}, + journal = {arXiv preprint arXiv:2106.04624}, + year = {2021}, + url = {https://arxiv.org/abs/2106.04624}, +} +``` diff --git a/recipes/LJSpeech/quantization/extra_requirements.txt b/recipes/LJSpeech/quantization/extra_requirements.txt new file mode 100644 index 0000000000..bd6f06aa66 --- /dev/null +++ b/recipes/LJSpeech/quantization/extra_requirements.txt @@ -0,0 +1,3 @@ +scikit-learn +tgt +unidecode diff --git a/recipes/LJSpeech/quantization/hparams/train_discrete_ssl.yaml b/recipes/LJSpeech/quantization/hparams/train_discrete_ssl.yaml new file mode 100644 index 0000000000..0901f19ccf --- /dev/null +++ b/recipes/LJSpeech/quantization/hparams/train_discrete_ssl.yaml @@ -0,0 +1,102 @@ +# ########################################################################################### +# Model: K-means applied to SSL model +# Authors: Luca Della Libera 2024 +# Adapted from: https://github.com/speechbrain/speechbrain/blob/v1.0.2/recipes/LJSpeech/quantization/hparams/train_discrete_ssl.yaml +# ########################################################################################### + +experiment_name: wav2vec2_K1000_L7 + +# Seed needs to be set at top of YAML +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Data preparation +data_folder: !PLACEHOLDER +splits: [train, valid, test] +split_ratio: [80, 10, 10] +skip_prep: False +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json + +# Output folders +output_folder: !ref results// +save_folder: !ref /save +cache_folder: !name:huggingface_hub.constants.HUGGINGFACE_HUB_CACHE + +# Preprocessing parameters +train_remove_if_longer: 60.0 # Seconds +valid_remove_if_longer: 60.0 # Seconds +test_remove_if_longer: 60.0 # Seconds +sorting: random + +# Training parameters +num_epochs: 1 +train_batch_size: 8 +valid_batch_size: 1 +test_batch_size: 1 +dataloader_workers: 4 +nonfinite_patience: 10 +precision: fp32 +ckpt_interval_steps: 4000 +keep_checkpoints: 2 + +# SSL model parameters +ssl_hub: facebook/wav2vec2-large +sample_rate: 16000 # NOTE: must match the SSL model sample rate +layer_id: 7 + +# Quantizer parameters +n_clusters: 1000 +init: k-means++ +max_iter: 100 +kmeans_batch_size: 10000 # Should be >= num_clusters +tol: 0.0 +max_no_improvement: 100 +n_init: 20 +reassignment_ratio: 0.0 + +# Modules +ssl_model: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: False + freeze: True + freeze_feature_extractor: True + output_all_hiddens: True + save_path: !ref + +quantizer: !new:speechbrain.integrations.audio_tokenizers.kmeans.MiniBatchKMeansSklearn + n_clusters: !ref + init: !ref + max_iter: !ref + batch_size: !ref + tol: !ref + max_no_improvement: !ref + n_init: !ref + reassignment_ratio: !ref + random_state: !ref + verbose: 1 + compute_labels: True + init_size: null + +modules: + ssl_model: !ref + quantizer: !ref + +# Counters, checkpointers, loggers, etc. +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + quantizer: !ref + counter: !ref + custom_load_hooks: + quantizer: !name:speechbrain.integrations.audio_tokenizers.kmeans.MiniBatchKMeansSklearn.load + custom_save_hooks: + quantizer: !name:speechbrain.integrations.audio_tokenizers.kmeans.MiniBatchKMeansSklearn.save + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref /train_log.txt + precision: 3 diff --git a/recipes/LJSpeech/quantization/ljspeech_prepare.py b/recipes/LJSpeech/quantization/ljspeech_prepare.py new file mode 120000 index 0000000000..2de5a21a8d --- /dev/null +++ b/recipes/LJSpeech/quantization/ljspeech_prepare.py @@ -0,0 +1 @@ +../ljspeech_prepare.py \ No newline at end of file diff --git a/recipes/LJSpeech/quantization/train.py b/recipes/LJSpeech/quantization/train.py new file mode 100644 index 0000000000..8280674bf6 --- /dev/null +++ b/recipes/LJSpeech/quantization/train.py @@ -0,0 +1,267 @@ +#!/usr/bin/env/python + +"""Recipe for training a K-means quantizer on features from an SSL model. + +To run this recipe: +> python train.py hparams/train_discrete_ssl.yaml + +Authors + * Luca Della Libera 2024 +""" + +# Adapted from: +# https://github.com/speechbrain/speechbrain/blob/v1.0.2/recipes/LJSpeech/quantization/train.py + +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process + + +class Quantization(sb.Brain): + def compute_forward(self, batch, stage): + """Forward pass.""" + batch = batch.to(self.device) + sig, lens = batch.sig # [B, T] + + # Extract features + with torch.no_grad(): + self.modules.ssl_model.eval() + feats = self.modules.ssl_model(sig, lens) # [K, B, N, H] + feats = feats[self.hparams.layer_id] # [B, N, H] + + return feats + + def compute_objectives(self, predictions, batch, stage): + """Computes the objectives.""" + feats = predictions # [B, N, H] + + if stage != sb.Stage.TRAIN: + # For K-means the validation/test loss is the inertia + # The lower the inertia, the better should be the clustering + # It is useful to monitor progress across epochs + # However, when saving checkpoints we always keep the last one (i.e. max_keys=["epoch"]) + # to keep backward compatibility + loss = self.hparams.quantizer.inertia(feats) + return loss + + # If training, accumulate features (batch size used for K-means training + # should be much larger than batch size used for feature extraction) + feats = feats.flatten(end_dim=-2) # [BN, H] + self.curr_feats.append(feats) + self.curr_batch_size += len(feats) + if self.curr_batch_size < self.hparams.kmeans_batch_size: + # If not enough features, leave average loss unchanged and go to next batch + # avg_loss is computed as: (avg_loss - avg_loss / self.step) + float(loss) / self.step + # If we set loss = avg_loss, avg_loss stays unchanged + loss = torch.tensor(self.avg_train_loss) + # Keep compatibility with standard supervised training + # (SpeechBrain expects a tensor with gradient) + loss.requires_grad_() + return loss + self.curr_feats = torch.cat(self.curr_feats) + feats = self.curr_feats[: self.hparams.kmeans_batch_size] + + # Keep remaining features for next iteration + self.curr_feats = [self.curr_feats[self.hparams.kmeans_batch_size :]] + self.curr_batch_size = len(self.curr_feats[0]) + + # Retrieve current centroids + old_cluster_centers = self.hparams.quantizer.cluster_centers + + # Partial fit on current batch + self.hparams.quantizer.partial_fit(feats) + + # For K-means the training loss is the drift between current centroids and old centroids + # If close to 0, it means that the training has converged + curr_cluster_centers = self.hparams.quantizer.cluster_centers + loss = (curr_cluster_centers - old_cluster_centers).norm() + + # Keep compatibility with standard supervised training + # (SpeechBrain expects a tensor with gradient) + loss.requires_grad_() + self.optimizer_step += 1 + assert self.optimizer_step == self.modules.quantizer.n_steps, ( + f"optimizer_step: {self.optimizer_step}", + f"quantizer.n_steps: {self.modules.quantizer.n_steps}", + ) + + return loss + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch.""" + if stage == sb.Stage.TRAIN: + # NOTE: not included in intra-epoch checkpoints + self.curr_feats = [] + self.curr_batch_size = 0 + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of each epoch.""" + # Compute/store important stats + current_epoch = self.hparams.epoch_counter.current + stage_stats = {"loss": stage_loss} + + if stage == sb.Stage.TRAIN: + self.avg_train_loss = 0.0 + self.train_stats = stage_stats + self.stats_meta = {"epoch": epoch, "steps": self.optimizer_step} + if if_main_process(): + self.checkpointer.save_and_keep_only( + meta={"loss": stage_stats["loss"], "epoch": epoch}, + max_keys=["epoch"], + num_to_keep=self.hparams.keep_checkpoints, + ) + self.hparams.train_logger.log_stats( + stats_meta=self.stats_meta, + train_stats=self.train_stats, + ) + + # Perform end-of-iteration operations, like annealing, logging, etc. + elif stage == sb.Stage.VALID: + self.hparams.train_logger.log_stats( + stats_meta=self.stats_meta, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": current_epoch}, + test_stats=stage_stats, + ) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + + """ + train_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams["train_json"], + replacements={"data_root": hparams["data_folder"]}, + ) + # Sort training data to speed up training + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=hparams["sorting"] == "descending", + key_max_value={"duration": hparams["train_remove_if_longer"]}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams["valid_json"], + replacements={"data_root": hparams["data_folder"]}, + ) + # Sort validation data to speed up validation + valid_data = valid_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["valid_remove_if_longer"]}, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams["test_json"], + replacements={"data_root": hparams["data_folder"]}, + ) + # Sort the test data to speed up testing + test_data = test_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["test_remove_if_longer"]}, + ) + + datasets = [train_data, valid_data, test_data] + + # Define audio pipeline + takes = ["wav"] + provides = ["sig"] + + def audio_pipeline(wav): + original_sample_rate = sb.dataio.dataio.read_audio_info(wav).sample_rate + sig = sb.dataio.dataio.read_audio(wav) + sig = torchaudio.functional.resample( + sig, original_sample_rate, hparams["sample_rate"] + ) + yield sig + + sb.dataio.dataset.add_dynamic_item( + datasets, audio_pipeline, takes, provides + ) + + # Set output + sb.dataio.dataset.set_output_keys(datasets, ["id"] + provides) + + return datasets + + +if __name__ == "__main__": + # Command-line interface + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then create ddp_init_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Prepare data + from ljspeech_prepare import prepare_ljspeech + + kwargs = { + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "splits": hparams["splits"], + "split_ratio": hparams["split_ratio"], + "seed": hparams["seed"], + "skip_prep": hparams["skip_prep"], + } + prepare_ljspeech(**kwargs) + + # Create the datasets objects + train_data, valid_data, test_data = dataio_prepare(hparams) + + # Trainer initialization + brain = Quantization( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Train + brain.fit( + brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=dict( + num_workers=hparams["dataloader_workers"], + batch_size=hparams["train_batch_size"], + shuffle=hparams["sorting"] == "random", + pin_memory=run_opts.get("device", "cpu") != "cpu", + ), + valid_loader_kwargs=dict( + num_workers=hparams["dataloader_workers"], + batch_size=hparams["valid_batch_size"], + pin_memory=run_opts.get("device", "cpu") != "cpu", + ), + ) + + # Test + brain.evaluate( + test_data, + max_key="epoch", + test_loader_kwargs=dict( + num_workers=hparams["dataloader_workers"], + batch_size=hparams["test_batch_size"], + pin_memory=run_opts.get("device", "cpu") != "cpu", + ), + ) diff --git a/recipes/Libri-Light/self-supervised-learning/BEST-RQ/README.md b/recipes/Libri-Light/self-supervised-learning/BEST-RQ/README.md new file mode 100644 index 0000000000..72e39c7c9f --- /dev/null +++ b/recipes/Libri-Light/self-supervised-learning/BEST-RQ/README.md @@ -0,0 +1,70 @@ +# SpeechBrain self-supervised-learning (BEST-RQ streaming and offline) with Libri-Light + +This folder contains the script for preparing the Libri-Light dataset, and the script of training a small BEST-RQ model (94M parameters) using Libri-Light. The data preparation requires a few steps that may take a bit of time due to the size and structure of the dataset. + +## How to run + +1- Download the Libri-Light data (small, or medium, or large split) from + + https://github.com/facebookresearch/libri-light/tree/main/data_preparation + + +2- Git clone the Libri-Light repo from + + https://github.com/facebookresearch/libri-light + +Then, use the ```cut_by_vad.py``` script from the Libri-Light repo to do the VAD of each downloaded split. +For example, if you want to the use the small split, and you want to have most clips after VAD to be 20 seconds, then + + python cut_by_vad.py \ + --input_dir path_to_Libri-Light/small \ + --output_dir Libri-Light_VAD/small_vad \ + --target_len_sec 20 + +If you also want to use the medium or the large split + + python cut_by_vad.py \ + --input_dir path_to_Libri-Light/medium \ + --output_dir Libri-Light_VAD/medium_vad \ + --target_len_sec 20 + + python cut_by_vad.py \ + --input_dir path_to_Libri-Light/large \ + --output_dir Libri-Light_VAD/large_vad \ + --target_len_sec 20 + +**Note** + If you want to use more than one split, it is important to save the VAD results of each split into the same folder. + If you want to use the large split, step 1 and 2 may take days. + +3- Libri-Light does not have a dev split. Thus, please use the dev set of another dataset to monitor the training. E.g., +LibriSpeech dev-clean. In practice, you can just put any wav files in a folder and use the path to this folder as a dev. +Ideally, for this recipe, LibriSpeech dev-clean is in-domain and is therefore a good validation test. + + +4- Now, you can do the Libri-Light data preparation and train a BEST-RQ model using + + python train.py hparams/BEST-RQ.yaml \ + --data_folder Libri-Light_VAD/ \ + --dev_folder /path/to/LibriSpeech/dev-clean \ + --vad_splits=["small_vad"] \ + +or, since this can be a fairly big dataset, one may want to use multiple GPUs + + torchrun --nproc_per_node=8 train.py hparams/BEST-RQ.yaml \ + --data_folder Libri-Light_VAD/ \ + --dev_folder /path/to/LibriSpeech/dev-clean \ + --vad_splits=["small_vad"] \ + +To use different amount of training data + +```--vad_splits=["small_vad"]``` -> around 600 hours + +```--vad_splits=["small_vad", "medium_vad"]``` -> around 6k hours + +```--vad_splits=["small_vad", "medium_vad", "large_vad"]``` -> around 60k hours + +# Finetuning after pretraining + +Please refer to the LibriSpeech [ASR / CTC](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/ASR/CTC) recipe for speech recognition finetuning. The produced checkpoint is a standard PyTorch checkpoint and this recipe gives you an example of how to load it. Then, you can +just plug it in any recipe that you are interested in by changing the YAML and train.py a bit! \ No newline at end of file diff --git a/recipes/Libri-Light/self-supervised-learning/BEST-RQ/hparams/BEST-RQ.yaml b/recipes/Libri-Light/self-supervised-learning/BEST-RQ/hparams/BEST-RQ.yaml new file mode 100644 index 0000000000..c9cf6dbf75 --- /dev/null +++ b/recipes/Libri-Light/self-supervised-learning/BEST-RQ/hparams/BEST-RQ.yaml @@ -0,0 +1,202 @@ +# ############################################################################ +# Model: Best-RQ +# Encoder: Conformer Encoder w/Random Projection Quantizer +# Training: Libri-Light 60k h +# NOTE! Before running the SSL, please apply VAD to the Libri-Light dataset +# Please refer to the README for details +# Authors: Ryan Whetten 2024 +# Shucong Zhang 2024 +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 1000 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +data_folder: !PLACEHOLDER # /path/to/Libri-Light_VAD/ +output_folder: !ref results/ +save_folder: !ref /save +# Logging file for every N optimizer steps (many lines) +train_steps_log: !ref /train_steps_log.txt +# Logging file per epoch +train_stage_log: !ref /train_stage_log.txt + +# Libri-Light does not have a dev split. Please use another dataset for dev. +# For example, /path/to/LibriSpeech/dev-clean +dev_folder: !PLACEHOLDER +# The data folder after the VAD. +# Please put "small_vad", "medium_vad", and "large_vad" under the path +# ["small_vad"]: 600 hours +# ["small_vad", "medium_vad"] 6k hours +# ["small_vad", "medium_vad", "large_vad"] 60k hours +vad_splits: !PLACEHOLDER +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +skip_prep: False + +avoid_if_longer_than: 60.0 +avoid_if_shorter_than: 2.0 +log_interval: 500 # Logging every N optimizer steps +max_grad_norm: 10 +precision: fp16 # bf16, fp16 or fp32 + +# The training will either stops at number_of_epochs or optimizer_step_limit +# I.e. the first that is reached. +number_of_epochs: 3000 +optimizer_step_limit: 300000 + +# This setup is for 8 V100. +seconds_per_batch: 400 +train_num_buckets: 150 +grad_accumulation_factor: 2 + +train_dataloader_options: + num_workers: 4 + +test_dataloader_options: + batch_size: 8 # DynamicBatching not used at testing time + num_workers: 4 + +lr: 0.0008 + +# Mel-Filterbank parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 +hop_length: 10 + +# BEST RQ quantiser has a special downsampling mechanism. +# convolutions reduction dim by 4 in the time domain +# so the input to quantizer also needs to reduce dim by 4 +pad_to_divisible_by: 4 + +# Streaming & dynamic chunk training options +# At least for the current architecture on LibriSpeech, we found out that +# non-streaming accuracy is very similar between `streaming: True` and +# `streaming: False`. +streaming: True # controls all Dynamic Chunk Training & chunk size & left context mechanisms + +# Configuration for Dynamic Chunk Training. +dynchunktrain_config_sampler: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler # yamllint disable-line rule:line-length + chunkwise_prob: 0.6 # Probability during a batch to limit attention and sample a random chunk size in the following range + chunk_size_min: 8 # Minimum chunk size (if in a DynChunkTrain batch) + chunk_size_max: 32 # Maximum chunk size (if in a DynChunkTrain batch) + limited_left_context_prob: 0.75 # If in a DynChunkTrain batch, the probability during a batch to restrict left context to a random number of chunks + left_context_chunks_min: 2 # Minimum left context size (in # of chunks) + left_context_chunks_max: 32 # Maximum left context size (in # of chunks) + valid_config: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig + chunk_size: 8 + left_context_size: 16 + +####################### Model parameters ########################### +# Transformer +d_model: 576 +nhead: 8 +num_encoder_layers: 12 +num_decoder_layers: 0 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5000 +encoder_layerdrop: 0.00 + +# Masking parameters +mask_length: 4 +mask_prob: 0.15 +noise_mean: 0 +noise_std: 0.1 + +# quantizer (codebook = cb) parameters +p_input: 320 +cb_dim: 16 +cb_vocab: 8192 + + +############################## Models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (128, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + conformer_activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + layerdrop_prob: !ref + +# We must call an encoder wrapper so the decoder isn't run (we don't have any) +wrapper: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper + transformer: !ref + +Quantizer: !new:speechbrain.nnet.quantisers.RandomProjectionQuantizer + input_dim: !ref + cb_dim: !ref + cb_vocab: !ref + +linear: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + wrapper: !ref + Quantizer: !ref + normalize: !ref + linear: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +optimizer: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + weight_decay: 0.01 + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + +############################## running ################################ + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + + +train_steps_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +train_stage_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + quantizer: !ref + linear: !ref diff --git a/recipes/Libri-Light/self-supervised-learning/BEST-RQ/librilight_prepare.py b/recipes/Libri-Light/self-supervised-learning/BEST-RQ/librilight_prepare.py new file mode 100644 index 0000000000..c022a91d41 --- /dev/null +++ b/recipes/Libri-Light/self-supervised-learning/BEST-RQ/librilight_prepare.py @@ -0,0 +1,272 @@ +""" +Data preparation. + +1. Download the Libri-Light dataset through the toolkit in the Libri-Light github repo + "https://github.com/facebookresearch/libri-light/" + +2. Use the data_preparation/cut_by_vad.py script of the Libri-Light repo to do the vad. For example, + "python cut_by_vad.py --input_dir path_to_Libri-Light/small --output_dir Libri-Light_vad/small_vad --target_len_sec 20" + +Author +------ + * Titouan Parcollet 2024 + * Shucong Zhang 2024 +""" + +import csv +import functools +import os +from dataclasses import dataclass + +from speechbrain.dataio.dataio import merge_csvs, read_audio_info +from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + + +def prepare_librilight( + data_folder, + dev_folder, + save_folder, + vad_splits=[], + merge_lst=[], + merge_name=None, + skip_prep=False, +): + """ + This class prepares the csv files for the LibriLight dataset. + Please do the VAD first before preparing the csv files + + Arguments + --------- + data_folder : str + Path to the folder where LibriLight dataset after VAD is stored. + dev_folder: str + Path to the folder which will be used to create the dev csv. + LibriLight does not have a dev split, so please use something like + "path_to_LibriSpeech/dev-clean" + save_folder : str + The directory where to store the csv files. + vad_splits : list + List of train splits. E.g, ['small_vad'] or ['small_vad', 'medium_vad'] + or ['small_vad', 'medium_vad', 'large_vad']. Please ensure to have + the 'small_vad' or 'medium_vad' or 'large_vad' folders under the data_folder + merge_lst : list + List of train splits to merge in a single csv file. + merge_name: str + Name of the merged csv file. + skip_prep: bool + If True, data preparation is skipped. + + Returns + ------- + None + + Example + ------- + >>> data_folder = "datasets/Libri-Light_VAD" + >>> dev_folder = "datasets/LibriSpeech/dev-clean" + >>> vad_splits = ["small_vad"] + >>> save_folder = "librilight_prepared" + >>> prepare_librilight(data_folder, dev_folder, save_folder, vad_splits) + """ + + if skip_prep: + return + + # Saving folder + if not os.path.exists(save_folder): + os.makedirs(save_folder, exist_ok=True) + + # Check if this phase is already done (if so, skip it) + if skip(vad_splits, save_folder): + logger.info("Skipping preparation, completed in previous run.") + return + else: + logger.info("Data_preparation...") + logger.info( + "If you are using the large split, the data preparation may take more than 30 minutes." + ) + logger.info("Please be patient and do not kill the process.") + + # Additional checks to make sure the data folder contains LibriLight + check_librilight_folders(data_folder, vad_splits) + + # create csv files for each split + for split_index in range(len(vad_splits)): + split = vad_splits[split_index] + + wav_lst = get_all_files( + os.path.join(data_folder, split), match_and=[".flac"] + ) + + n_sentences = len(wav_lst) + + create_csv(save_folder, wav_lst, split, n_sentences) + + # Merging csv file if needed + if merge_lst and merge_name is not None: + merge_files = [split_libri + ".csv" for split_libri in merge_lst] + merge_csvs( + data_folder=save_folder, csv_lst=merge_files, merged_csv=merge_name + ) + + # create a dev csv file from the dev_folder + dev_wav_lst = get_all_files(dev_folder, match_and=[".flac"]) + + dev_n_sentences = len(dev_wav_lst) + + create_csv(save_folder, dev_wav_lst, "dev", dev_n_sentences) + + +@dataclass +class LLRow: + """Dataclass for handling Libri-Light rows. + + Attributes + ---------- + snt_id : str + The segment ID. + duration : float + The duration of the segment. + file_path : str + The path to the audio file. + """ + + snt_id: str + duration: float + file_path: str + + +def process_line(wav_file) -> LLRow: + snt_id = "".join(wav_file.split("/")[-3:]).replace(".flac", "") + + info = read_audio_info(wav_file) + duration = info.num_frames / info.sample_rate + + return LLRow( + snt_id=snt_id, + duration=duration, + file_path=wav_file, + ) + + +def create_csv(save_folder, wav_lst, split, select_n_sentences): + """ + Create the dataset csv file given a list of wav files. + + Arguments + --------- + save_folder : str + Location of the folder for storing the csv. + wav_lst : list + The list of wav files of a given data split. + split : str + The name of the current data split. + select_n_sentences : int, optional + The number of sentences to select. + + Returns + ------- + None + """ + # Setting path for the csv file + csv_file = os.path.join(save_folder, split + ".csv") + if os.path.exists(csv_file): + logger.info("Csv file %s already exists, not recreating." % csv_file) + return + + # Preliminary prints + msg = "Creating csv lists in %s..." % (csv_file) + logger.info(msg) + + csv_lines = [["ID", "duration", "wav"]] + + snt_cnt = 0 + line_processor = functools.partial(process_line) + # Processing all the wav files in wav_lst + # FLAC metadata reading is already fast, so we set a high chunk size + # to limit main thread CPU bottlenecks + for row in parallel_map(line_processor, wav_lst, chunk_size=8192): + csv_line = [row.snt_id, str(row.duration), row.file_path] + + # Appending current file to the csv_lines list + csv_lines.append(csv_line) + + snt_cnt = snt_cnt + 1 + + # parallel_map guarantees element ordering so we're OK + if snt_cnt == select_n_sentences: + break + + # Writing the csv_lines + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + for line in csv_lines: + csv_writer.writerow(line) + + # Final print + msg = "%s successfully created!" % (csv_file) + logger.info(msg) + + +def skip(splits, save_folder): + """ + Detect when the LibriLight data prep can be skipped. + + Arguments + --------- + splits : list + A list of the splits expected in the preparation. + save_folder : str + The location of the save directory + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + + # Checking csv files + skip = True + + for split in splits: + if not os.path.isfile(os.path.join(save_folder, split + ".csv")): + skip = False + + return skip + + +def check_librilight_folders(data_folder, splits): + """ + Check if the data folder actually contains the Libri-Light dataset. + + If it does not, an error is raised. + + Arguments + --------- + data_folder : str + The path to the directory with the data. + splits : list + The portions of the data to check. + + Raises + ------ + OSError + If Libri-Light is not found at the specified path. + """ + # Checking if all the splits exist + for split in splits: + split_folder = os.path.join(data_folder, split) + if not os.path.exists(split_folder): + err_msg = ( + "the folder %s does not exist (it is expected in the " + "Libri-Light dataset)" % split_folder + ) + raise OSError(err_msg) diff --git a/recipes/Libri-Light/self-supervised-learning/BEST-RQ/train.py b/recipes/Libri-Light/self-supervised-learning/BEST-RQ/train.py new file mode 100644 index 0000000000..069d839b8f --- /dev/null +++ b/recipes/Libri-Light/self-supervised-learning/BEST-RQ/train.py @@ -0,0 +1,345 @@ +#!/usr/bin/env python3 +"""Recipe for pretraining Best-RQ (https://arxiv.org/pdf/2405.04296) + +To run this recipe call python train.py BEST-RQ.yaml --find_unused_parameters + +Authors + * Shucong Zhang 2024 + * Ryan Whetten 2023 +""" + +import sys +import time +from functools import partial + +import torch +import torch.nn.functional as F +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio.dataloader import SaveableDataLoader +from speechbrain.dataio.sampler import DynamicBatchSampler +from speechbrain.lobes.models.BESTRQ import brq_mask_collate_fn +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +class BestRQBrain(sb.core.Brain): + def compute_forward(self, batch, stage): + """Computes forward pass through BestRQ model and returns encoded and + target embeddings as well as other metrics of interest. + """ + # get batch and mask + wavs, wav_lens, mask = batch + wavs, wav_lens, mask = ( + wavs.to(self.device), + wav_lens.to(self.device), + mask.to(self.device), + ) + + if self.hparams.streaming: + dynchunktrain_config = self.hparams.dynchunktrain_config_sampler( + stage + ) + else: + dynchunktrain_config = None + + ### get fbanks and normalize + feats = self.hparams.compute_features(wavs) + current_epoch = self.hparams.epoch_counter.current + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + ### augment data if necessary + if stage == sb.Stage.TRAIN: + if hasattr(self.hparams, "augmentation"): + feats = self.hparams.augmentation(feats) + + divis_by = self.hparams.pad_to_divisible_by + feats = pad_feats(feats, divis_by) + + # get targets from quantizer and stack the frames! + B, T, C = feats.shape + targets = self.modules.Quantizer( + feats.view(B, feats.shape[1] // divis_by, -1) + ) + + # generate random noise + noise = torch.normal( + mean=self.hparams.noise_mean, + std=self.hparams.noise_std, + size=(B, mask.shape[0], C), + device=self.device, + ) + # replace with random noise + feats[:, mask, :] = noise + + #### convolutions + src = self.modules.CNN(feats) + + ##### transformer + enc_out = self.modules.wrapper( + src, wav_lens, dynchunktrain_config=dynchunktrain_config + ) # only use encoder + + ##### linear + logits = self.modules.linear(enc_out) + + ##### get masked region for loss computation only over these. + mask_idx = mask[::divis_by] // divis_by + logits = logits[:, mask_idx, :] + targets = targets[:, mask_idx] + + B, T, C = logits.shape + return logits.view(B * T, C), targets.view(B * T) + + def compute_objectives(self, predictions, batch, stage): + pred, targets = predictions + + if stage != sb.Stage.TRAIN and sb.utils.distributed.if_main_process(): + predicted_classes = torch.argmax(pred, dim=-1) + correct_predictions = predicted_classes == targets + accuracy = correct_predictions.sum().item() / len( + correct_predictions + ) + self.acc_metric.append(accuracy) + + return F.cross_entropy(pred, targets) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """Called after fit_batch(), updates learning rate and does per-step logging.""" + + if should_step: + self.hparams.noam_annealing(self.optimizer) + + # Perform step-wise logging + if ( + hasattr(self.hparams, "log_interval") + and self.optimizer_step % self.hparams.log_interval == 0 + ): + # Create a dictionary and fill it with everything we + # want to log such as contrastive loss, diversity loss, + # learning rate etc. + log_dct = {} + + current_lr = self.optimizer.param_groups[0]["lr"] + log_dct["steps"] = self.optimizer_step + log_dct["lr"] = current_lr + log_dct["avg_loss"] = self.avg_train_loss + + if hasattr(self, "time_last_log"): + run_time_since_last_log = time.time() - self.time_last_log + log_dct["run_time"] = run_time_since_last_log + self.time_last_log = time.time() + + if sb.utils.distributed.if_main_process(): + self.hparams.train_steps_logger.log_stats( + stats_meta=log_dct, + ) + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.acc_metric = [] + + def on_stage_end(self, stage, stage_loss, epoch=None): + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + + if stage == sb.Stage.VALID: + if self.acc_metric: + stage_stats["accuracy"] = sum(self.acc_metric) / len( + self.acc_metric + ) + + self.hparams.train_stage_logger.log_stats( + stats_meta={ + "epoch": epoch, + "steps": self.optimizer_step, + "lr": self.optimizer.param_groups[0]["lr"], + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + + self.checkpointer.save_and_keep_only( + end_of_epoch=True, + num_to_keep=4, + meta={"valid_loss": stage_loss}, + ) + + +def pad_feats(feats, divis_by): + """BEST-RQ quantizer stackes frames together. Hence, we need to pad the + incoming features such that the time dimension is divisible by divis_by. + + Arguments + --------- + feats: torch.Tensor + The feature tensor. + divis_by: int + The stacking factor. The time dimension of feats will become divisible + by this value. + + Returns + ------- + Padded features + """ + + B, T, C = feats.shape + + #### pad features to enable a reduction by pad_to_divisible_by for the + # quantiser of BEST-RQ + current_dim_size = T + dim_to_pad = 1 # Pad along the second dimension (i.e. time) + + # Calculate the amount of padding needed to make the tensor divisible + # by divis_by + current_dim_size = feats.shape[dim_to_pad] + # Ensure positive padding + padding_needed = (divis_by - (current_dim_size % divis_by)) % divis_by + + # Define the padding + # Initialize padding for all dimensions, have a look at the documentation of + # torch.nn.functional.pad because the padding argument is quite special. + padding = [0, 0, 0, 0, 0, 0] + padding[dim_to_pad * 2] = ( + padding_needed # Set padding for the chosen dimension + ) + + # add in padding to features and mask + return torch.nn.functional.pad(feats, padding) + + +def dataio_prepare(hparams): + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + # We remove longer and shorter files from the train. + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_shorter_than"]}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + + datasets = [train_data, valid_data] + + def get_output_lengths(input_lengths): + """Function to get the output length of the feature extractor this is + necessary to compute the masks of BestRQ. + """ + sr = hparams["sample_rate"] + hop_length = hparams["hop_length"] + + return (input_lengths // (sr * hop_length / 1000) + 1).to(torch.long) + + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + sb.dataio.dataset.set_output_keys(datasets, ["id", "sig"]) + + # We create the DynamicBatch Sampler + train_sampler = DynamicBatchSampler( + train_data, + hparams["seconds_per_batch"], + num_buckets=hparams["train_num_buckets"], + length_func=lambda x: x["duration"], + batch_ordering="random", + shuffle=True, + ) + + # We define the custom collation function that is necessary for best-rq to + # generate masks. + brq_mask_collate_fn_partial = partial( + brq_mask_collate_fn, + get_out_len_fn=get_output_lengths, + mask_prob=hparams["mask_prob"], + mask_length=hparams["mask_length"], + n_mels=hparams["n_mels"], + ) + + train_loader_kwargs = { + "batch_sampler": train_sampler, + "collate_fn": brq_mask_collate_fn_partial, + "num_workers": hparams["train_dataloader_options"]["num_workers"], + "pin_memory": True, + } + + valid_loader = SaveableDataLoader( + valid_data, + collate_fn=brq_mask_collate_fn_partial, + num_workers=hparams["test_dataloader_options"]["num_workers"], + batch_size=hparams["test_dataloader_options"]["batch_size"], + pin_memory=True, + ) + + return train_data, valid_loader, train_loader_kwargs + + +def main(): + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + hparams.update(run_opts) + + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + from librilight_prepare import prepare_librilight + + run_on_main( + prepare_librilight, + kwargs={ + "data_folder": hparams["data_folder"], + "dev_folder": hparams["dev_folder"], + "save_folder": hparams["output_folder"], + "vad_splits": hparams["vad_splits"], + "merge_lst": hparams["vad_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # Part that matters starts here. + train_dataset, valid_loader, train_loader_kwargs = dataio_prepare(hparams) + + brain = BestRQBrain( + modules=hparams["modules"], + opt_class=hparams["optimizer"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + # with torch.autograd.detect_anomaly(): + brain.fit( + brain.hparams.epoch_counter, + train_dataset, + valid_loader, + train_loader_kwargs=train_loader_kwargs, + progressbar=True, + ) + + +if __name__ == "__main__": + main() diff --git a/recipes/LibriMix/extra-dependencies.txt b/recipes/LibriMix/extra_requirements.txt similarity index 100% rename from recipes/LibriMix/extra-dependencies.txt rename to recipes/LibriMix/extra_requirements.txt diff --git a/recipes/LibriMix/meta/preprocess_dynamic_mixing.py b/recipes/LibriMix/meta/preprocess_dynamic_mixing.py index 7bc4ac55c9..7f07b6e185 100644 --- a/recipes/LibriMix/meta/preprocess_dynamic_mixing.py +++ b/recipes/LibriMix/meta/preprocess_dynamic_mixing.py @@ -8,18 +8,19 @@ Samuele Cornell, 2020 """ -import os import argparse +import glob +import os from pathlib import Path + +import numpy as np +import torch import tqdm -import torchaudio -import glob # from oct2py import octave from scipy import signal -import numpy as np -import torch +from speechbrain.dataio import audio_io parser = argparse.ArgumentParser( "utility for resampling all audio files in a folder recursively" @@ -45,7 +46,7 @@ def resample_folder(input_folder, output_folder, fs, regex): Path of the output folder with the resampled data. fs : int Target sampling frequency. - reg_exp: str + regex : str Regular expression for search. """ # filedir = os.path.dirname(os.path.realpath(__file__)) @@ -54,8 +55,7 @@ def resample_folder(input_folder, output_folder, fs, regex): files = glob.glob(os.path.join(input_folder, regex), recursive=True) for f in tqdm.tqdm(files): - - audio, fs_read = torchaudio.load(f) + audio, fs_read = audio_io.load(f) audio = audio[0].numpy() audio = signal.resample_poly(audio, fs, fs_read) @@ -68,8 +68,7 @@ def resample_folder(input_folder, output_folder, fs, regex): relative_path = os.path.join( Path(f).relative_to(Path(input_folder)).parent, - Path(f).relative_to(Path(input_folder)).stem - + "_peak_{}.wav".format(peak), + Path(f).relative_to(Path(input_folder)).stem + f"_peak_{peak}.wav", ) os.makedirs( @@ -81,7 +80,7 @@ def resample_folder(input_folder, output_folder, fs, regex): exist_ok=True, ) - torchaudio.save( + audio_io.save( os.path.join(output_folder, relative_path), audio.reshape(1, -1), fs, @@ -89,7 +88,6 @@ def resample_folder(input_folder, output_folder, fs, regex): if __name__ == "__main__": - args = parser.parse_args() resample_folder( args.input_folder, args.output_folder, int(args.fs), args.regex diff --git a/recipes/LibriMix/prepare_data.py b/recipes/LibriMix/prepare_data.py index bc3fab5bb2..97eb00b4d2 100644 --- a/recipes/LibriMix/prepare_data.py +++ b/recipes/LibriMix/prepare_data.py @@ -5,8 +5,8 @@ * Cem Subakan 2020 """ -import os import csv +import os def prepare_librimix( @@ -36,14 +36,14 @@ def prepare_librimix( if "Libri" in datapath: # Libri 2/3Mix datasets if n_spks == 2: - assert ( - "Libri2Mix" in datapath - ), "Inconsistent number of speakers and datapath" + assert "Libri2Mix" in datapath, ( + "Inconsistent number of speakers and datapath" + ) create_libri2mix_csv(datapath, savepath, addnoise=librimix_addnoise) elif n_spks == 3: - assert ( - "Libri3Mix" in datapath - ), "Inconsistent number of speakers and datapath" + assert "Libri3Mix" in datapath, ( + "Inconsistent number of speakers and datapath" + ) create_libri3mix_csv(datapath, savepath, addnoise=librimix_addnoise) else: raise ValueError("Unsupported Number of Speakers") @@ -96,13 +96,17 @@ def create_libri2mix_csv( "noise_wav_opts", ] - with open(savepath + "/libri2mix_" + set_type + ".csv", "w") as csvfile: + with open( + savepath + "/libri2mix_" + set_type + ".csv", + "w", + newline="", + encoding="utf-8", + ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, (mix_path, s1_path, s2_path, noise_path) in enumerate( zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, noise_fl_paths) ): - row = { "ID": i, "duration": 1.0, @@ -172,7 +176,12 @@ def create_libri3mix_csv( "noise_wav_opts", ] - with open(savepath + "/libri3mix_" + set_type + ".csv", "w") as csvfile: + with open( + savepath + "/libri3mix_" + set_type + ".csv", + "w", + newline="", + encoding="utf-8", + ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for ( diff --git a/recipes/LibriMix/separation/README.md b/recipes/LibriMix/separation/README.md index 5139c8a383..b725c0acd9 100644 --- a/recipes/LibriMix/separation/README.md +++ b/recipes/LibriMix/separation/README.md @@ -3,21 +3,31 @@ This folder contains some popular recipes for the [LibriMix Dataset](https://arx * This recipe supports train with several source separation models on LibriMix, including [Sepformer](https://arxiv.org/abs/2010.13154), [DPRNN](https://arxiv.org/abs/1910.06379), [ConvTasnet](https://arxiv.org/abs/1809.07454), [DPTNet](https://arxiv.org/abs/2007.13975). -Additional dependencies: +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + ``` -pip install mir_eval -pip install pyloudnorm +pip install -r ../extra_requirements.txt ``` +## How to run To run it: -``` +```shell python train.py hparams/sepformer-libri2mix.yaml --data_folder yourpath/Libri2Mix python train.py hparams/sepformer-libri3mix.yaml --data_folder yourpath/Libri3Mix ``` Note that during training we print the negative SI-SNR (as we treat this value as the loss). +## How to run on test sets only +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: + +```shell +python train.py hparams/sepformer-libri2mix.yaml --data_folder yourpath/Libri3Mix --test_only +python train.py hparams/sepformer-libri3mix.yaml --data_folder yourpath/Libri3Mix --test_only +``` # Libri2/3 Mix * The Dataset can be created using the scripts at `https://github.com/JorisCos/LibriMix`. @@ -54,16 +64,16 @@ Here are the SI - SNRi results (in dB) on the test set of LibriMix dataset with * Libri3Mix with dynamic mixing with WHAM! noise in the mixtures `python train.py hparams/sepformer-libri3mix.yaml --data_folder yourpath/Libri3Mix/ --base_folder_dm yourpath/LibriSpeech_processed --dynamic_mixing True --use_wham_noise True` -The output folder with the trained model and the logs can be found [here](https://drive.google.com/drive/folders/1DN49LtAs6cq1X0jZ8tRMlh2Pj6AecClz?usp=sharing) for 3-speaker mixtures and [here](https://drive.google.com/drive/folders/1NPTXw4i9Vmahhr5BSQQa-ZTTm45FwYJA?usp=sharing) for 2-speakers ones. +The output folder with the trained model and the logs can be found [here](https://www.dropbox.com/sh/kmyz7tts9tyg198/AACsDcRwKvelXxEB-k5q1OaIa?dl=0) for 3-speaker mixtures and [here](https://www.dropbox.com/sh/skkiozml92xtgdo/AAD0eJxgbCTK03kAaILytGtVa?dl=0) for 2-speakers ones. # Multi-GPU training You can run the following command to train the model using Distributed Data Parallel (DDP) with 2 GPUs: +```bash +torchrun --nproc_per_node=2 train.py hparams/sepformer-libri2mix.yaml --data_folder /yourdatapath ``` - python -m torch.distributed.launch --nproc_per_node=2 train.py hparams/sepformer-libri2mix.yaml --data_folder /yourdatapath --distributed_launch --distributed_backend='nccl' -``` -You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at this [tutorial](https://colab.research.google.com/drive/13pBUacPiotw1IvyffvGZ-HrtBr9T6l15?usp=sharing). +You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at [our documentation](https://speechbrain.readthedocs.io/en/latest/multigpu.html). # **About SpeechBrain** @@ -76,6 +86,15 @@ You can add the other runtime options as appropriate. For more complete informat Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriMix/separation/dynamic_mixing.py b/recipes/LibriMix/separation/dynamic_mixing.py index a6f69501c1..390bfc49cc 100644 --- a/recipes/LibriMix/separation/dynamic_mixing.py +++ b/recipes/LibriMix/separation/dynamic_mixing.py @@ -1,14 +1,16 @@ -import speechbrain as sb -import numpy as np -import torch -import torchaudio import glob import os -from speechbrain.dataio.batch import PaddedBatch -from tqdm import tqdm +import random import warnings + +import numpy as np import pyloudnorm -import random +import torch +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.batch import PaddedBatch """ The functions to implement Dynamic Mixing For SpeechSeparation @@ -31,11 +33,9 @@ def build_spk_hashtable_librimix(hparams): # just for one file check if the sample rate is correct assert ( - torchaudio.info(libri_utterances[0]).sample_rate - == hparams["sample_rate"] + audio_io.info(libri_utterances[0]).sample_rate == hparams["sample_rate"] ) for utt in tqdm(libri_utterances): - path = os.path.normpath(utt) path_list = path.split(os.sep) spk_id = path_list[-3] @@ -120,7 +120,7 @@ def audio_pipeline( if hparams["use_wham_noise"]: noise_file = np.random.choice(noise_files, 1, replace=False) - noise, fs_read = torchaudio.load(noise_file[0]) + noise, fs_read = audio_io.load(noise_file[0]) noise = noise.squeeze() # select two speakers randomly @@ -131,7 +131,7 @@ def audio_pipeline( ] minlen = min( - *[torchaudio.info(x).num_frames for x in spk_files], + *[audio_io.info(x).num_frames for x in spk_files], hparams["training_signal_len"], ) @@ -166,15 +166,17 @@ def normalize(signal, is_noise=False): for i, spk_file in enumerate(spk_files): # select random offset - length = torchaudio.info(spk_file).num_frames + length = audio_io.info(spk_file).num_frames start = 0 stop = length if length > minlen: # take a random window start = np.random.randint(0, length - minlen) stop = start + minlen - tmp, fs_read = torchaudio.load( - spk_file, frame_offset=start, num_frames=stop - start, + tmp, fs_read = audio_io.load( + spk_file, + frame_offset=start, + num_frames=stop - start, ) tmp = tmp[0].numpy() tmp = normalize(tmp) diff --git a/recipes/LibriMix/separation/hparams/sepformer-libri2mix.yaml b/recipes/LibriMix/separation/hparams/sepformer-libri2mix.yaml index 25c002e65d..11b8463c67 100644 --- a/recipes/LibriMix/separation/hparams/sepformer-libri2mix.yaml +++ b/recipes/LibriMix/separation/hparams/sepformer-libri2mix.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -31,14 +31,13 @@ skip_prep: False ckpt_interval_minutes: 60 # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 2 noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -61,18 +60,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -83,6 +102,7 @@ N_encoder_out: 256 out_channels: 256 kernel_size: 16 kernel_stride: 8 +d_ffn: 1024 # Dataloader options dataloader_opts: @@ -100,7 +120,7 @@ SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True @@ -109,7 +129,7 @@ SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True diff --git a/recipes/LibriMix/separation/hparams/sepformer-libri3mix.yaml b/recipes/LibriMix/separation/hparams/sepformer-libri3mix.yaml index 6620c50d9c..bc34a9d418 100644 --- a/recipes/LibriMix/separation/hparams/sepformer-libri3mix.yaml +++ b/recipes/LibriMix/separation/hparams/sepformer-libri3mix.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -31,14 +31,13 @@ skip_prep: False ckpt_interval_minutes: 60 # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 3 noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -61,18 +60,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -83,6 +102,7 @@ N_encoder_out: 256 out_channels: 256 kernel_size: 16 kernel_stride: 8 +d_ffn: 1024 # Dataloader options dataloader_opts: @@ -100,7 +120,7 @@ SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True @@ -109,7 +129,7 @@ SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock num_layers: 8 d_model: !ref nhead: 8 - d_ffn: 1024 + d_ffn: !ref dropout: 0 use_positional_encoding: True norm_before: True diff --git a/recipes/LibriMix/separation/prepare_data.py b/recipes/LibriMix/separation/prepare_data.py new file mode 120000 index 0000000000..1a7125c969 --- /dev/null +++ b/recipes/LibriMix/separation/prepare_data.py @@ -0,0 +1 @@ +../prepare_data.py \ No newline at end of file diff --git a/recipes/LibriMix/separation/train.py b/recipes/LibriMix/separation/train.py index c4ef62145f..24c870e50d 100755 --- a/recipes/LibriMix/separation/train.py +++ b/recipes/LibriMix/separation/train.py @@ -21,20 +21,23 @@ * Jianyuan Zhong 2020 """ +import csv import os import sys + +import numpy as np import torch import torch.nn.functional as F -import torchaudio +from hyperpyyaml import load_hyperpyyaml +from tqdm import tqdm + import speechbrain as sb import speechbrain.nnet.schedulers as schedulers +from speechbrain.dataio import audio_io from speechbrain.utils.distributed import run_on_main -from torch.cuda.amp import autocast -from hyperpyyaml import load_hyperpyyaml -import numpy as np -from tqdm import tqdm -import csv -import logging +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -73,7 +76,8 @@ def compute_forward(self, mix, targets, stage, noise=None): targets = targets[:, :min_len, :] if self.hparams.use_wavedrop: - mix = self.hparams.wavedrop(mix, mix_lens) + mix = self.hparams.drop_chunk(mix, mix_lens) + mix = self.hparams.drop_freq(mix) if self.hparams.limit_training_signal_len: mix, targets = self.cut_signals(mix, targets) @@ -109,6 +113,7 @@ def compute_objectives(self, predictions, targets): def fit_batch(self, batch): """Trains one batch""" + # Unpacking batch list mixture = batch.mix_sig targets = [batch.s1_sig, batch.s2_sig] @@ -120,72 +125,37 @@ def fit_batch(self, batch): if self.hparams.num_spks == 3: targets.append(batch.s3_sig) - if self.auto_mix_prec: - with autocast(): - predictions, targets = self.compute_forward( - mixture, targets, sb.Stage.TRAIN, noise - ) - loss = self.compute_objectives(predictions, targets) - - # hard threshold the easy dataitems - if self.hparams.threshold_byloss: - th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() - else: - loss = loss.mean() - - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - self.scaler.scale(loss).backward() - if self.hparams.clip_grad_norm >= 0: - self.scaler.unscale_(self.optimizer) - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm, - ) - self.scaler.step(self.optimizer) - self.scaler.update() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) - ) - loss.data = torch.tensor(0).to(self.device) - else: + with self.training_ctx: predictions, targets = self.compute_forward( mixture, targets, sb.Stage.TRAIN, noise ) loss = self.compute_objectives(predictions, targets) + # hard threshold the easy dataitems if self.hparams.threshold_byloss: th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() + loss = loss[loss > th] + if loss.nelement() > 0: + loss = loss.mean() else: loss = loss.mean() - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - loss.backward() - if self.hparams.clip_grad_norm >= 0: - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm - ) - self.optimizer.step() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) + if loss.nelement() > 0 and loss < self.hparams.loss_upper_lim: + self.scaler.scale(loss).backward() + if self.hparams.clip_grad_norm >= 0: + self.scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), + self.hparams.clip_grad_norm, ) - loss.data = torch.tensor(0).to(self.device) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + self.nonfinite_count += 1 + logger.info( + f"infinite loss or empty loss! it happened {self.nonfinite_count} times so far - skipping this batch" + ) + loss.data = torch.tensor(0.0).to(self.device) self.optimizer.zero_grad() return loss.detach().cpu() @@ -222,7 +192,6 @@ def on_stage_end(self, stage, stage_loss, epoch): # Perform end-of-iteration things, like annealing, logging, etc. if stage == sb.Stage.VALID: - # Learning rate annealing if isinstance( self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau @@ -241,7 +210,8 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"], + meta={"si-snr": stage_stats["si-snr"]}, + min_keys=["si-snr"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( @@ -261,9 +231,7 @@ def add_speed_perturb(self, targets, targ_lens): recombine = True for i in range(targets.shape[-1]): - new_target = self.hparams.speedperturb( - targets[:, :, i], targ_lens - ) + new_target = self.hparams.speed_perturb(targets[:, :, i]) new_targets.append(new_target) if i == 0: min_len = new_target.shape[-1] @@ -300,7 +268,7 @@ def add_speed_perturb(self, targets, targ_lens): return mix, targets def cut_signals(self, mixture, targets): - """This function selects a random segment of a given length withing the mixture. + """This function selects a random segment of a given length within the mixture. The corresponding targets are selected accordingly""" randstart = torch.randint( 0, @@ -344,14 +312,13 @@ def save_results(self, test_data): test_data, **self.hparams.dataloader_opts ) - with open(save_file, "w") as results_csv: + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: writer = csv.DictWriter(results_csv, fieldnames=csv_columns) writer.writeheader() # Loop over all test sentence with tqdm(test_loader, dynamic_ncols=True) as t: for i, batch in enumerate(t): - # Apply Separation mixture, mix_len = batch.mix_sig snt_id = batch.id @@ -415,28 +382,27 @@ def save_results(self, test_data): } writer.writerow(row) - logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean())) - logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean())) - logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean())) - logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean())) + logger.info(f"Mean SISNR is {np.array(all_sisnrs).mean()}") + logger.info(f"Mean SISNRi is {np.array(all_sisnrs_i).mean()}") + logger.info(f"Mean SDR is {np.array(all_sdrs).mean()}") + logger.info(f"Mean SDRi is {np.array(all_sdrs_i).mean()}") def save_audio(self, snt_id, mixture, targets, predictions): "saves the test audio (mixture, targets, and estimated sources) on disk" - # Create outout folder + # Create output folder save_path = os.path.join(self.hparams.save_folder, "audio_results") if not os.path.exists(save_path): os.mkdir(save_path) for ns in range(self.hparams.num_spks): - # Estimated source signal = predictions[0, :, ns] signal = signal / signal.abs().max() save_file = os.path.join( - save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1) + save_path, f"item{snt_id}_source{ns + 1}hat.wav" ) - torchaudio.save( + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) @@ -444,17 +410,17 @@ def save_audio(self, snt_id, mixture, targets, predictions): signal = targets[0, :, ns] signal = signal / signal.abs().max() save_file = os.path.join( - save_path, "item{}_source{}.wav".format(snt_id, ns + 1) + save_path, f"item{snt_id}_source{ns + 1}.wav" ) - torchaudio.save( + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) # Mixture signal = mixture[0][0, :] signal = signal / signal.abs().max() - save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id)) - torchaudio.save( + save_file = os.path.join(save_path, f"item{snt_id}_mix.wav") + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) @@ -548,18 +514,14 @@ def audio_pipeline_noise(noise_wav): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) sb.utils.distributed.ddp_init_group(run_opts) - # Logger info - logger = logging.getLogger(__name__) - # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams["output_folder"], @@ -571,13 +533,16 @@ def audio_pipeline_noise(noise_wav): if hparams["dynamic_mixing"] and not os.path.exists( hparams["base_folder_dm"] ): - print( + raise ValueError( "Please, specify a valid base_folder_dm folder when using dynamic mixing" ) - sys.exit(1) + + # Update precision to bf16 if the device is CPU and precision is fp16 + if run_opts.get("device") == "cpu" and hparams.get("precision") == "fp16": + hparams["precision"] = "bf16" # Data preparation - from recipes.LibriMix.prepare_data import prepare_librimix + from prepare_data import prepare_librimix run_on_main( prepare_librimix, @@ -666,15 +631,14 @@ def audio_pipeline_noise(noise_wav): for module in separator.modules.values(): separator.reset_layer_recursively(module) - if not hparams["test_only"]: - # Training - separator.fit( - separator.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["dataloader_opts"], - valid_loader_kwargs=hparams["dataloader_opts"], - ) + # Training + separator.fit( + separator.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts"], + ) # Eval separator.evaluate(test_data, min_key="si-snr") diff --git a/recipes/LibriParty/VAD/README.md b/recipes/LibriParty/VAD/README.md index 90279699f2..39fce57285 100644 --- a/recipes/LibriParty/VAD/README.md +++ b/recipes/LibriParty/VAD/README.md @@ -1,8 +1,8 @@ # Voice Activity Detection (VAD) with LibriParty -This folder contains scripts for training a VAD with the [LibriParty dataset](https://drive.google.com/file/d/1--cAS5ePojMwNY5fewioXAv9YlYAWzIJ/view?usp=sharing). +This folder contains scripts for training a VAD with the [LibriParty dataset](https://www.dropbox.com/s/8zcn6zx4fnxvfyt/LibriParty.tar.gz?dl=0). LibriParty contains sequences of 1 minute compose of speech sentences (sampled from LibriSpeech) corrupted by noise and reverberation. -Data augmentation with open_rir, musan, CommonLanguge is used as well. Make sure you download all the datasets before staring the experiment: -- LibriParty: https://drive.google.com/file/d/1--cAS5ePojMwNY5fewioXAv9YlYAWzIJ/view?usp=sharing +Data augmentation with open_rir, musan, CommonLanguage is used as well. Make sure you download all the datasets before staring the experiment: +- LibriParty: https://www.dropbox.com/s/8zcn6zx4fnxvfyt/LibriParty.tar.gz?dl=0 - Musan: https://www.openslr.org/resources/17/musan.tar.gz - CommonLanguage: https://zenodo.org/record/5036977/files/CommonLanguage.tar.gz?download=1 @@ -16,7 +16,7 @@ Run the following command to train the model: # Results | Release | hyperparams file | Test Precision | Test Recall. | Test F-Score | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| -----------:| -----------:| -| 2021-09-09 | train.yaml | 0.9518 | 0.9437 | 0.9477 | [Model](https://drive.google.com/drive/folders/1YLYGuiyuTH0D7fXOOp6cMddfQoM74o-Y?usp=sharing) | 1xV100 16GB | +| 2021-09-09 | train.yaml | 0.9518 | 0.9437 | 0.9477 | [Model](https://www.dropbox.com/sh/6yguuzn4pybjasd/AABpUF8LAQ8d2TJyC8aK2OBga?dl=0) | 1xV100 16GB | # Training Time @@ -29,7 +29,7 @@ The pre-trained model + easy inference is available on HuggingFace: Basically, you can run inference with only a few lines of code: ```python -from speechbrain.pretrained import VAD +from speechbrain.inference import VAD VAD = VAD.from_hparams(source="speechbrain/vad-crdnn-libriparty", savedir="pretrained_models/vad-crdnn-libriparty") boundaries = VAD.get_speech_segments("speechbrain/vad-crdnn-libriparty/example_vad.wav") @@ -47,6 +47,15 @@ VAD.save_boundaries(boundaries) Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriParty/VAD/commonlanguage_prepare.py b/recipes/LibriParty/VAD/commonlanguage_prepare.py index ea5867d60d..a3566c2b7f 100644 --- a/recipes/LibriParty/VAD/commonlanguage_prepare.py +++ b/recipes/LibriParty/VAD/commonlanguage_prepare.py @@ -1,10 +1,11 @@ import os -import logging -import torchaudio + import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) COMMON_LANGUAGE_URL = ( "https://zenodo.org/record/5036977/files/CommonLanguage.tar.gz?download=1" @@ -33,13 +34,14 @@ def prepare_commonlanguage(folder, csv_file, max_noise_len=None): def _prepare_csv(folder, filelist, csv_file, max_length=None): """Iterate a set of wavs and write the corresponding csv file. + Arguments --------- folder : str The folder relative to which the files in the list are listed. filelist : str The location of a file listing the files to be used. - csvfile : str + csv_file : str The location to use for writing the csv file. max_length : float The maximum length in seconds. Waveforms longer @@ -47,18 +49,17 @@ def _prepare_csv(folder, filelist, csv_file, max_length=None): """ try: if sb.utils.distributed.if_main_process(): - with open(csv_file, "w") as w: + with open(csv_file, "w", encoding="utf-8") as w: w.write("ID,duration,wav,wav_format,wav_opts\n\n") for line in filelist: - # Read file for duration/channel info filename = os.path.join(folder, line.split()[-1]) - signal, rate = torchaudio.load(filename) + signal, rate = audio_io.load(filename) # Ensure only one channel if signal.shape[0] > 1: signal = signal[0].unsqueeze(0) - torchaudio.save(filename, signal, rate) + audio_io.save(filename, signal, rate) ID, ext = os.path.basename(filename).split(".") duration = signal.shape[1] / rate @@ -75,7 +76,7 @@ def _prepare_csv(folder, filelist, csv_file, max_length=None): new_filename = ( filename[: -len(f".{ext}")] + f"_{i}.{ext}" ) - torchaudio.save( + audio_io.save( new_filename, signal[:, start:stop], rate ) csv_row = ( diff --git a/recipes/LibriParty/VAD/data_augment.py b/recipes/LibriParty/VAD/data_augment.py index f4639195f2..261bc795ad 100644 --- a/recipes/LibriParty/VAD/data_augment.py +++ b/recipes/LibriParty/VAD/data_augment.py @@ -4,9 +4,10 @@ * Mirco Ravanelli 2020 """ +import random + import torch import torchaudio -import random # fade-in/fade-out definition fade_in = torchaudio.transforms.Fade(fade_in_len=1000, fade_out_len=0) @@ -33,16 +34,17 @@ def add_chunk( min_len-max_len range. The shift is controlled by the chunk_shift parameter. - Arguments --------- - wav : torch.tensor + wav: torch.Tensor The waveform to append. - wav_chunk : torch.tensor + wav_chunk: torch.Tensor The existing waveform where to append the new source. + target: torch.Tensor + Old target. sample_rate: int The sample rate of the input waveforms. - time_resolution: + time_resolution: float Time resolution of the targets (in seconds)- example_length: float Duration (in seconds) of the existing chunk. @@ -68,12 +70,12 @@ def add_chunk( Returns ------- - wav_chunk: torch.tensor + wav_chunk: torch.Tensor The new waveform with the added signal. - target: torch.tensor + target: torch.Tensor The new targets corresponding to the output signal. - lengths: torch.tensor - relative lenghts of each chunk. + lengths: torch.Tensor + relative lengths of each chunk. end_chunk: int The last sample of the appended sequence. It can be used later to add another source that do not overlap with the current one. @@ -134,11 +136,11 @@ def add_chunk( end_speech_target = int(end_chunk / (sample_rate * time_resolution)) target[:, beg_speech_target:end_speech_target] = 1 - # Lenth computation - lenghts = torch.ones( + # Length computation + lengths = torch.ones( wav_chunk.shape[0], wav_chunk.shape[-1], device=wav.device ) - return wav_chunk, target, lenghts, end_chunk + return wav_chunk, target, lengths, end_chunk def initialize_targets(wav, sample_rate, time_resolution): @@ -160,13 +162,13 @@ def get_samples_from_datasets(datasets, wav): List containing datasets. More precisely, we expect here the pointers to the object used in speechbrain for data augmentation (e.g, speechbrain.lobes.augment.EnvCorrupt). - wav : torch.tensor + wav : torch.Tensor The original waveform. The drawn samples will have the same dimensionality of the original waveform. Returns ------- - samples: torch.tensor + samples: torch.Tensor A batch of new samples drawn from the input list of datasets. """ # We want a sample of the same size of the original signal @@ -176,7 +178,6 @@ def get_samples_from_datasets(datasets, wav): # Let's sample a sequence from each dataset for i, dataset in enumerate(datasets): - # Initialize the signal with noise wav_sample = (torch.rand_like(wav) * 2) - 1 len_sample = torch.ones(wav.shape[0], device=wav.device) @@ -209,18 +210,17 @@ def create_chunks( """This method creates augmented data for training the VAD. It sums up two delayed sources + a noise background. - Arguments --------- - wav1 : torch.tensor + wav1 : torch.Tensor The waveform for source 1. - wav2 : torch.tensor + wav2 : torch.Tensor The waveform for source 2. - background : torch.tensor + background : torch.Tensor The waveform for background noise. sample_rate: int The sample rate of the input waveforms. - time_resolution: + time_resolution: float Time resolution of the targets (in seconds)- example_length: float Duration (in seconds) of the existing chunk. @@ -240,11 +240,11 @@ def create_chunks( Returns ------- - wavs: torch.tensor + wavs: torch.Tensor The generated speech signal. - target: torch.tensor + target: torch.Tensor The new targets corresponding to the generated signal. - lengths: torch.tensor + lengths: torch.Tensor relative lengths of each chunk. """ @@ -312,22 +312,22 @@ def augment_data(noise_datasets, speech_datasets, wavs, targets, lens_targ): List containing noise datasets. More precisely, we expect here the pointers to the object used in speechbrain for data augmentation (e.g, speechbrain.lobes.augment.EnvCorrupt). - wav: torch.tensor + wavs: torch.Tensor The original waveform. - targets: torch.tensor + targets: torch.Tensor The original targets. - lens_tar: torch.tensor - The lenght of the original targets. + lens_targ: torch.Tensor + The length of the original targets. Returns ------- - wavs: torch.tensor + wavs: torch.Tensor The output batch with the augmented signals - target: torch.tensor + target: torch.Tensor The new targets corresponding to the augmented signals. - lengths: torch.tensor - relative lenghts of each element in the batch. + lengths: torch.Tensor + relative lengths of each element in the batch. """ # Sample a noise sequence wav_samples_noise = get_samples_from_datasets(noise_datasets, wavs) diff --git a/recipes/LibriParty/VAD/extra-dependencies.txt b/recipes/LibriParty/VAD/extra-dependencies.txt deleted file mode 100644 index fb6c7ed7ec..0000000000 --- a/recipes/LibriParty/VAD/extra-dependencies.txt +++ /dev/null @@ -1 +0,0 @@ -pandas diff --git a/recipes/LibriParty/VAD/hparams/train.yaml b/recipes/LibriParty/VAD/hparams/train.yaml index cdee478a30..7cbe4e6c23 100644 --- a/recipes/LibriParty/VAD/hparams/train.yaml +++ b/recipes/LibriParty/VAD/hparams/train.yaml @@ -3,7 +3,7 @@ # This code heavily relis on on-the-fly data augmentation using external data. # Before running the code, please download the needed datasets: # -# - LibriParty: https://drive.google.com/file/d/1--cAS5ePojMwNY5fewioXAv9YlYAWzIJ/view?usp=sharing +# - LibriParty: https://www.dropbox.com/s/8zcn6zx4fnxvfyt/LibriParty.tar.gz?dl=0 # - Musan: https://www.openslr.org/resources/17/musan.tar.gz # - CommonLanguage: https://zenodo.org/record/5036977/files/CommonLanguage.tar.gz?download=1 # @@ -14,7 +14,7 @@ # Seed and output folders seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/VAD_CRDNN/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -22,8 +22,12 @@ train_log: !ref /train_log.txt # LibriParty (main data) data_folder: !PLACEHOLDER # e.g. /path/to/LibriParty +# Openrir Dataset for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_csv_openrir: !ref /noise_openrir.csv #The data manifest files are created by the data preparation script + # Additional data (for augmentation) -open_rir_folder: !ref # where to store noisy +ris from open_rir musan_folder: !PLACEHOLDER # e.g, /path/to/musan (download it from the web before) commonlanguage_folder: !PLACEHOLDER # e.g, /path/to/commonlang (download it from the web before) @@ -37,7 +41,7 @@ speech_csv: !ref /speech.csv multilang_speech_csv: !ref /multilang_speech.csv skip_prep: False # Skip data preparation -# Training parameters +####################### Training Parameters #################################### N_epochs: 100 lr: 1.0 lr_final: 0.1 @@ -45,18 +49,23 @@ batch_size: 2 example_length: 5 # in seconds sample_rate: 16000 time_resolution: 0.01 # in seconds + +num_workers: 4 train_dataloader_opts: batch_size: !ref + num_workers: !ref valid_dataloader_opts: batch_size: !ref + num_workers: !ref test_dataloader_opts: batch_size: !ref + num_workers: !ref # Feature parameters n_fft: 400 n_mels: 40 -# Model parameters +####################### Model Parameters ####################################### # activation: !name:torch.nn.LeakyReLU # dropout: 0.15 # cnn_blocks: 2 @@ -71,54 +80,52 @@ output_neurons: 1 # Data augmentation -add_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: -5 - noise_snr_high: -15 - -# noise_corruption: !new:speechbrain.lobes.augment.EnvCorrupt -# openrir_folder: !ref -# babble_prob: 0.0 -# reverb_prob: 0.0 -# noise_prob: 1.0 -# noise_snr_low: 5 -# noise_snr_high: 15 - -add_noise_musan: !new:speechbrain.lobes.augment.EnvCorrupt - noise_csv: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: -15 - noise_snr_high: -20 - -add_music_musan: !new:speechbrain.lobes.augment.EnvCorrupt - noise_csv: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: -15 - noise_snr_high: -20 - -add_speech_musan: !new:speechbrain.lobes.augment.EnvCorrupt - noise_csv: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: -15 - noise_snr_high: -20 - -# add_speech_multilang: !new:speechbrain.lobes.augment.EnvCorrupt -# noise_csv: !ref -# babble_prob: 0.0 -# reverb_prob: 0.0 -# noise_prob: 1.0 -# noise_snr_low: -15 -# noise_snr_high: -20 - +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: -5 + snr_high: 15 + noise_sample_rate: 16000 + clean_sample_rate: 16000 + num_workers: !ref + +add_noise_musan: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: -5 + snr_high: 15 + noise_sample_rate: 16000 + clean_sample_rate: 16000 + num_workers: !ref + +add_music_musan: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: -5 + snr_high: 15 + noise_sample_rate: 16000 + clean_sample_rate: 16000 + num_workers: !ref + +add_speech_musan: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: -5 + snr_high: 15 + noise_sample_rate: 16000 + clean_sample_rate: 16000 + num_workers: !ref + +#add_speech_multilang: !new:speechbrain.augment.time_domain.AddNoise +# csv_file: !ref +# snr_low: -5 +# snr_high: 15 +# noise_sample_rate: 16000 +# clean_sample_rate: 16000 +# num_workers: !ref # Models compute_features: !new:speechbrain.lobes.features.Fbank diff --git a/recipes/LibriParty/VAD/libriparty_prepare.py b/recipes/LibriParty/VAD/libriparty_prepare.py index bb62980838..a0685f847a 100644 --- a/recipes/LibriParty/VAD/libriparty_prepare.py +++ b/recipes/LibriParty/VAD/libriparty_prepare.py @@ -1,38 +1,39 @@ -""" This script prepares the data-manifest files (in JSON format) +"""This script prepares the data-manifest files (in JSON format) for training and testing a Voice Activity Detection system with the LibriParty dataset. -The dataset contains sequences of 1-minutes of LibiSpeech sentences +The dataset contains sequences of 1-minutes of LibriSpeech sentences corrupted by noise and reverberation. The dataset can be downloaded from here: -https://drive.google.com/file/d/1--cAS5ePojMwNY5fewioXAv9YlYAWzIJ/view?usp=sharing +https://www.dropbox.com/s/8zcn6zx4fnxvfyt/LibriParty.tar.gz?dl=0 Authors * Mohamed Kleit 2021 * Arjun V 2021 """ -import numpy as np -import pandas as pd import json -import logging from collections import OrderedDict +import numpy as np +import pandas as pd + +from speechbrain.utils.logger import get_logger """ Global variables""" -logger = logging.getLogger(__name__) +logger = get_logger(__name__) valid_json_dataset = {} def load_data_json(path): - with open(path) as f: + with open(path, encoding="utf-8") as f: json_file = json.load(f) return json_file def clean_dataframe(df): - # Drop unecessary columns + # Drop unnecessary columns df.drop( [ "utt_id", @@ -219,7 +220,7 @@ def create_json_dataset(dic, sample_rate, window_size): def save_dataset(json_save_path, json_dataset): """Saves a JSON file.""" - with open(json_save_path, "w+") as fp: + with open(json_save_path, "w+", encoding="utf-8") as fp: json.dump(json_dataset, fp, indent=4) @@ -237,21 +238,25 @@ def prepare_libriparty( --------- data_folder : str Path to the folder where the original LibriSpeech dataset is stored. - data_folder : str + save_json_folder : str The path where to store the training json file. - save_json_valid : str - The path where to store the valid json file. - save_json_test : str - The path where to store the test json file. + sample_rate : int + Sampling rate for the audio. + window_size : int + Size of window for creating splits. skip_prep: bool Default: False If True, the data preparation is skipped. + Returns + ------- + None + Example ------- >>> from recipes.LibriParty.libriparty_prepare import prepare_libriparty - >>> data_folder = 'datasets/LibriParty' - >>> prepare_libriparty(data_folder, 'train.json', 'valid.json', 'test.json') + >>> data_folder = "datasets/LibriParty" + >>> prepare_libriparty(data_folder, "train.json", "valid.json", "test.json") """ # Skip if needed diff --git a/recipes/LibriParty/VAD/musan_prepare.py b/recipes/LibriParty/VAD/musan_prepare.py index 0f51547855..daf5ada982 100644 --- a/recipes/LibriParty/VAD/musan_prepare.py +++ b/recipes/LibriParty/VAD/musan_prepare.py @@ -1,10 +1,11 @@ import os -import logging -import torchaudio + import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) def prepare_musan(folder, music_csv, noise_csv, speech_csv, max_noise_len=None): @@ -14,8 +15,12 @@ def prepare_musan(folder, music_csv, noise_csv, speech_csv, max_noise_len=None): --------- folder : str The location of the folder containing the dataset. + music_csv : str + Filename for storing the prepared music csv. noise_csv : str Filename for storing the prepared noise csv. + speech_csv : str + Filename for storing the prepared speech csv. max_noise_len : float The maximum noise length in seconds. Noises longer than this will be cut into pieces. @@ -35,13 +40,14 @@ def prepare_musan(folder, music_csv, noise_csv, speech_csv, max_noise_len=None): def _prepare_csv(folder, filelist, csv_file, max_length=None): """Iterate a set of wavs and write the corresponding csv file. + Arguments --------- folder : str The folder relative to which the files in the list are listed. filelist : str The location of a file listing the files to be used. - csvfile : str + csv_file : str The location to use for writing the csv file. max_length : float The maximum length in seconds. Waveforms longer @@ -49,18 +55,17 @@ def _prepare_csv(folder, filelist, csv_file, max_length=None): """ try: if sb.utils.distributed.if_main_process(): - with open(csv_file, "w") as w: + with open(csv_file, "w", encoding="utf-8") as w: w.write("ID,duration,wav,wav_format,wav_opts\n\n") for line in filelist: - # Read file for duration/channel info filename = os.path.join(folder, line.split()[-1]) - signal, rate = torchaudio.load(filename) + signal, rate = audio_io.load(filename) # Ensure only one channel if signal.shape[0] > 1: signal = signal[0].unsqueeze(0) - torchaudio.save(filename, signal, rate) + audio_io.save(filename, signal, rate) ID, ext = os.path.basename(filename).split(".") duration = signal.shape[1] / rate @@ -77,7 +82,7 @@ def _prepare_csv(folder, filelist, csv_file, max_length=None): new_filename = ( filename[: -len(f".{ext}")] + f"_{i}.{ext}" ) - torchaudio.save( + audio_io.save( new_filename, signal[:, start:stop], rate ) csv_row = ( diff --git a/recipes/LibriParty/VAD/train.py b/recipes/LibriParty/VAD/train.py index ea16333d1a..33f0fbf580 100644 --- a/recipes/LibriParty/VAD/train.py +++ b/recipes/LibriParty/VAD/train.py @@ -2,10 +2,10 @@ """ Recipe for training a Voice Activity Detection (VAD) model on LibriParty. This code heavily relis on data augmentation with external datasets. -(e.g, open_rir, musan, CommonLanguge is used as well). +(e.g, open_rir, musan, CommonLanguage is used as well). Make sure you download all the datasets before staring the experiment: -- LibriParty: https://drive.google.com/file/d/1--cAS5ePojMwNY5fewioXAv9YlYAWzIJ/view?usp=sharing +- LibriParty: https://www.dropbox.com/s/8zcn6zx4fnxvfyt/LibriParty.tar.gz?dl=0 - Musan: https://www.openslr.org/resources/17/musan.tar.gz - CommonLanguage: https://zenodo.org/record/5036977/files/CommonLanguage.tar.gz?download=1 @@ -23,15 +23,17 @@ """ import sys -import torch -import logging + import numpy as np -import speechbrain as sb +import torch +from data_augment import augment_data from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main -from data_augment import augment_data +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class VADBrain(sb.Brain): @@ -55,7 +57,7 @@ def compute_forward(self, batch, stage): self.lens = lens self.targets = targets - # From wav input to output binary prediciton + # From wav input to output binary prediction feats = self.hparams.compute_features(wavs) feats = self.modules.mean_var_norm(feats, lens) feats = feats.detach() @@ -124,7 +126,7 @@ def on_stage_end(self, stage, stage_loss, epoch=None): meta={"loss": stage_loss, "summary": summary}, num_to_keep=1, min_keys=["loss"], - name="epoch_{}".format(epoch), + name=f"epoch_{epoch}", ) elif stage == sb.Stage.TEST: @@ -201,12 +203,11 @@ def vad_targets(speech, hparams=hparams): # Begin Recipe! if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) @@ -233,30 +234,35 @@ def vad_targets(speech, hparams=hparams): }, ) + # Prepare openrir + run_on_main(hparams["prepare_noise_data"]) + # Prepare Musan from musan_prepare import prepare_musan - run_on_main( - prepare_musan, - kwargs={ - "folder": hparams["musan_folder"], - "music_csv": hparams["music_csv"], - "noise_csv": hparams["noise_csv"], - "speech_csv": hparams["speech_csv"], - "max_noise_len": hparams["example_length"], - }, - ) + if not hparams["skip_prep"]: + run_on_main( + prepare_musan, + kwargs={ + "folder": hparams["musan_folder"], + "music_csv": hparams["music_csv"], + "noise_csv": hparams["noise_csv"], + "speech_csv": hparams["speech_csv"], + "max_noise_len": hparams["example_length"], + }, + ) # Prepare common from commonlanguage_prepare import prepare_commonlanguage - run_on_main( - prepare_commonlanguage, - kwargs={ - "folder": hparams["commonlanguage_folder"], - "csv_file": hparams["multilang_speech_csv"], - }, - ) + if not hparams["skip_prep"]: + run_on_main( + prepare_commonlanguage, + kwargs={ + "folder": hparams["commonlanguage_folder"], + "csv_file": hparams["multilang_speech_csv"], + }, + ) # Dataset IO prep: creating Dataset objects train_data, valid_data, test_data = dataio_prep(hparams) diff --git a/recipes/LibriParty/generate_dataset/README.md b/recipes/LibriParty/generate_dataset/README.md index fbb7d997a3..f4a966a76f 100644 --- a/recipes/LibriParty/generate_dataset/README.md +++ b/recipes/LibriParty/generate_dataset/README.md @@ -38,7 +38,7 @@ It also requires background QUT-TIMIT noises. The metadata are downloaded from t You need to specify *metadata_folder*, *out_folder* and paths to downloaded source datasets: Librispeech, noises and impulse responses and QUT noise. -- step 3: run *get_dataset_from_metadata.py* +- step 3: run *python get_dataset_from_metadata.py dataset.yaml* #### Custom: Follow the next steps to create a novel LibriParty datasets. @@ -84,6 +84,15 @@ in Proceedings of Interspeech 2010, Makuhari Messe International Convention Comp Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriParty/generate_dataset/create_custom_dataset.py b/recipes/LibriParty/generate_dataset/create_custom_dataset.py index 2ff70daf1d..6a475d4c9f 100644 --- a/recipes/LibriParty/generate_dataset/create_custom_dataset.py +++ b/recipes/LibriParty/generate_dataset/create_custom_dataset.py @@ -6,23 +6,24 @@ Samuele Cornell, 2020 """ - -import os -import sys import json +import os import random +import sys +from pathlib import Path + import numpy as np -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.data_utils import get_all_files -from local.create_mixtures_metadata import create_metadata from local.create_mixtures_from_metadata import create_mixture -from pathlib import Path +from local.create_mixtures_metadata import create_metadata from tqdm import tqdm +import speechbrain as sb +from speechbrain.utils.data_utils import get_all_files + # Load hyperparameters file with command-line overrides params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:]) -with open(params_file) as fin: +with open(params_file, encoding="utf-8") as fin: params = load_hyperpyyaml(fin, overrides) # setting seeds for reproducible code. @@ -36,10 +37,10 @@ def split_list(array, split_factors): np.random.shuffle(array) pivots = [int(len(array) * x) for x in split_factors] out = [] - indx = 0 + index = 0 for i in pivots: - out.append(array[indx : i + indx]) - indx = i + out.append(array[index : i + index]) + index = i return out @@ -54,7 +55,7 @@ def parse_libri_folder(libri_folders): # step 2: we then build an hashtable for words for each utterance words_dict = {} for trans in txt_files: - with open(trans, "r") as f: + with open(trans, encoding="utf-8") as f: for line in f: splitted = line.split(" ") utt_id = splitted[0] @@ -95,8 +96,8 @@ def parse_libri_folder(libri_folders): os.makedirs(os.path.join(params["out_folder"], "metadata"), exist_ok=True) # we generate metadata for each split -for indx, split in enumerate(["train", "dev", "eval"]): - print("Generating metadata for {} set".format(split)) +for index, split in enumerate(["train", "dev", "eval"]): + print(f"Generating metadata for {split} set") # we parse librispeech utterances for current split c_libri_folder = params["librispeech_folders"][split] c_utterances, c_words = parse_libri_folder(c_libri_folder) @@ -107,20 +108,21 @@ def parse_libri_folder(libri_folders): params, c_utterances, c_words, - rirs[indx], - noises[indx], - backgrounds[indx], + rirs[index], + noises[index], + backgrounds[index], ) # from metadata we generate the actual mixtures -for indx, split in enumerate(["train", "dev", "eval"]): +for index, split in enumerate(["train", "dev", "eval"]): # load metadata with open( - os.path.join(params["out_folder"], "metadata", split + ".json") + os.path.join(params["out_folder"], "metadata", split + ".json"), + encoding="utf-8", ) as f: c_meta = json.load(f) - print("Creating {} set".format(split)) + print(f"Creating {split} set") for sess in tqdm(c_meta.keys()): c_folder = os.path.join(params["out_folder"], split) os.makedirs(c_folder, exist_ok=True) diff --git a/recipes/LibriParty/generate_dataset/dataset.yaml b/recipes/LibriParty/generate_dataset/dataset.yaml index 39ec7cb1f0..3dcddb9bf9 100644 --- a/recipes/LibriParty/generate_dataset/dataset.yaml +++ b/recipes/LibriParty/generate_dataset/dataset.yaml @@ -17,7 +17,8 @@ save_dry_sources: False # Source datasets paths # ######################### -librispeech_root: /media/sam/bx500/LibriSpeech +librispeech_root: !PLACEHOLDER # e.g., /workspace/LibriParty/LibriSpeech/ +# /media/sam/bx500/LibriSpeech # root path to librispeech: download from https://openslr.org/12/ librispeech_folders: # folders one wants to use for the train dataset. @@ -28,13 +29,13 @@ librispeech_folders: # folders one wants to use for the train dataset. eval: - !ref /test-clean/ -rirs_noises_root: /media/sam/bx500/LibriParty/RIRS_NOISES/ +rirs_noises_root: !PLACEHOLDER #e.g., /workspace/LibriParty/RIRS_NOISES/ rirs_folders: - !ref /simulated_rirs/ - !ref /real_rirs_isotropic_noises noises_folders: - !ref /pointsource_noises/ -backgrounds_root: /media/sam/bx500/LibriParty/QUT_NOISE_16kHz/ +backgrounds_root: !PLACEHOLDER # e.g., /workspace/LibriParty/QUT_NOISE_16kHz/ # optional background noise from QUT (required for "official" dataset) # One can use also other background noises. diff --git a/recipes/LibriParty/generate_dataset/download_required_data.py b/recipes/LibriParty/generate_dataset/download_required_data.py index a85dea4b7c..a2a12d7b2c 100644 --- a/recipes/LibriParty/generate_dataset/download_required_data.py +++ b/recipes/LibriParty/generate_dataset/download_required_data.py @@ -8,9 +8,11 @@ import argparse import os -from speechbrain.utils.data_utils import download_file + from local.resample_folder import resample_folder +from speechbrain.utils.data_utils import download_file + LIBRISPEECH_URLS = [ "http://www.openslr.org/resources/12/test-clean.tar.gz", "http://www.openslr.org/resources/12/dev-clean.tar.gz", diff --git a/recipes/LibriParty/generate_dataset/get_dataset_from_metadata.py b/recipes/LibriParty/generate_dataset/get_dataset_from_metadata.py index 0c96631707..774996badd 100644 --- a/recipes/LibriParty/generate_dataset/get_dataset_from_metadata.py +++ b/recipes/LibriParty/generate_dataset/get_dataset_from_metadata.py @@ -6,23 +6,24 @@ Mirco Ravanelli, 2020 """ - +import json import os import sys -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.data_utils import download_file from local.create_mixtures_from_metadata import create_mixture -import json from tqdm import tqdm +import speechbrain as sb +from speechbrain.utils.data_utils import download_file + URL_METADATA = ( "https://www.dropbox.com/s/0u6x6ndyedb4rl7/LibriParty_metadata.zip?dl=1" ) # Load hyperparameters file with command-line overrides params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:]) -with open(params_file) as fin: +with open(params_file, encoding="utf-8") as fin: params = load_hyperpyyaml(fin, overrides) metadata_folder = params["metadata_folder"] @@ -38,9 +39,12 @@ ) for data_split in ["train", "dev", "eval"]: - with open(os.path.join(metadata_folder, data_split + ".json"), "r") as f: + with open( + os.path.join(metadata_folder, data_split + ".json"), + encoding="utf-8", + ) as f: metadata = json.load(f) - print("Creating data for {} set".format(data_split)) + print(f"Creating data for {data_split} set") c_folder = os.path.join(params["out_folder"], data_split) os.makedirs(c_folder, exist_ok=True) for sess in tqdm(metadata.keys()): diff --git a/recipes/LibriParty/generate_dataset/local/create_mixtures_from_metadata.py b/recipes/LibriParty/generate_dataset/local/create_mixtures_from_metadata.py index a8b9fa184f..2232543b0e 100644 --- a/recipes/LibriParty/generate_dataset/local/create_mixtures_from_metadata.py +++ b/recipes/LibriParty/generate_dataset/local/create_mixtures_from_metadata.py @@ -7,12 +7,13 @@ Samuele Cornell, 2020 """ - -import os -import torch import json +import os + import numpy as np -import torchaudio +import torch + +from speechbrain.dataio import audio_io from speechbrain.processing.signal_processing import rescale, reverberate @@ -37,7 +38,7 @@ def create_mixture(session_n, output_dir, params, metadata): wet = torch.zeros(tot_length) for utt in metadata[spk]: - c_audio, fs = torchaudio.load( + c_audio, fs = audio_io.load( os.path.join(params["librispeech_root"], utt["file"]) ) assert fs == params["samplerate"] @@ -57,7 +58,7 @@ def create_mixture(session_n, output_dir, params, metadata): if params["save_dry_sources"]: dry[dry_start:dry_stop] += c_audio # we add now reverb and put it in wet - c_rir, fs = torchaudio.load( + c_rir, fs = audio_io.load( os.path.join(params["rirs_noises_root"], utt["rir"]) ) assert fs == params["samplerate"] @@ -89,36 +90,37 @@ def create_mixture(session_n, output_dir, params, metadata): # save per speaker clean sources if params["save_dry_sources"]: - torchaudio.save( + audio_io.save( os.path.join( output_dir, session_n, - "session_{}_spk_{}_dry.wav".format(session_n, spk), + f"session_{session_n}_spk_{spk}_dry.wav", ), torch.clamp(dry, min=-1, max=1), params["samplerate"], ) if params["save_wet_sources"]: - torchaudio.save( + audio_io.save( os.path.join( output_dir, session_n, - "session_{}_spk_{}_wet.wav".format(session_n, spk), + f"session_{session_n}_spk_{spk}_wet.wav", ), torch.clamp(wet, min=-1, max=1), params["samplerate"], ) with open( - os.path.join(output_dir, session_n, "{}.json".format(session_n)), "w" + os.path.join(output_dir, session_n, f"{session_n}.json"), + "w", + encoding="utf-8", ) as f: json.dump(session_meta, f, indent=4) # add impulsive noises for noise_event in metadata["noises"]: - - c_audio, fs = torchaudio.load( + c_audio, fs = audio_io.load( os.path.join(params["rirs_noises_root"], noise_event["file"]) ) assert fs == params["samplerate"] @@ -137,7 +139,7 @@ def create_mixture(session_n, output_dir, params, metadata): dry_start = int(noise_event["start"] * params["samplerate"]) # dry_stop = dry_start + c_audio.shape[-1] # we add now reverb and put it in wet - c_rir, fs = torchaudio.load( + c_rir, fs = audio_io.load( os.path.join(params["rirs_noises_root"], noise_event["rir"]) ) assert fs == params["samplerate"] @@ -151,7 +153,7 @@ def create_mixture(session_n, output_dir, params, metadata): # add background if metadata["background"]["file"]: - c_audio, fs = torchaudio.load( + c_audio, fs = audio_io.load( os.path.join( params["backgrounds_root"], metadata["background"]["file"] ), @@ -183,8 +185,8 @@ def create_mixture(session_n, output_dir, params, metadata): # save total mixture mixture = torch.clamp(mixture, min=-1, max=1) - torchaudio.save( - os.path.join(output_dir, session_n, "{}_mixture.wav".format(session_n)), + audio_io.save( + os.path.join(output_dir, session_n, f"{session_n}_mixture.wav"), mixture.unsqueeze(0), params["samplerate"], ) diff --git a/recipes/LibriParty/generate_dataset/local/create_mixtures_metadata.py b/recipes/LibriParty/generate_dataset/local/create_mixtures_metadata.py index ee90313bd0..f9a92092e5 100644 --- a/recipes/LibriParty/generate_dataset/local/create_mixtures_metadata.py +++ b/recipes/LibriParty/generate_dataset/local/create_mixtures_metadata.py @@ -7,24 +7,24 @@ Samuele Cornell, 2020 """ +import json +from pathlib import Path import numpy as np -from pathlib import Path -import json -import os from tqdm import tqdm -import torchaudio + +from speechbrain.dataio import audio_io def _read_metadata(file_path, configs): - meta = torchaudio.info(file_path) + meta = audio_io.info(file_path) if meta.num_channels > 1: channel = np.random.randint(0, meta.num_channels - 1) else: channel = 0 - assert ( - meta.sample_rate == configs["samplerate"] - ), "file samplerate is different from the one specified" + assert meta.sample_rate == configs["samplerate"], ( + "file samplerate is different from the one specified" + ) return meta, channel @@ -39,10 +39,8 @@ def create_metadata( impulsive_noises_list=None, background_noises_list=None, ): - dataset_metadata = {} for n_sess in tqdm(range(n_sessions)): - # we sample randomly n_speakers ids c_speakers = np.random.choice( list(utterances_dict.keys()), configs["n_speakers"], replace=False @@ -201,9 +199,7 @@ def create_metadata( "channel": None, } - dataset_metadata["session_{}".format(n_sess)] = activity + dataset_metadata[f"session_{n_sess}"] = activity - with open( - os.path.join(configs["out_folder"], output_filename + ".json"), "w" - ) as f: + with open(output_filename + ".json", "w", encoding="utf-8") as f: json.dump(dataset_metadata, f, indent=4) diff --git a/recipes/LibriParty/generate_dataset/local/resample_folder.py b/recipes/LibriParty/generate_dataset/local/resample_folder.py index 481ebf09e1..529037261a 100644 --- a/recipes/LibriParty/generate_dataset/local/resample_folder.py +++ b/recipes/LibriParty/generate_dataset/local/resample_folder.py @@ -9,12 +9,15 @@ Samuele Cornell, 2020 """ -import os import argparse +import os from pathlib import Path -import tqdm -import torchaudio + import torch +import torchaudio +import tqdm + +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import get_all_files parser = argparse.ArgumentParser( @@ -29,17 +32,12 @@ def resample_folder(input_folder, output_folder, fs, regex): - files = get_all_files(input_folder, match_and=[regex]) - torchaudio.initialize_sox() for f in tqdm.tqdm(files): - # we use sox because torchaudio.Resample uses too much RAM. - resample = torchaudio.sox_effects.SoxEffectsChain() - resample.append_effect_to_chain("rate", [fs]) - resample.set_input_file(f) - - audio, fs = resample.sox_build_flow_effects() + audio, fs = torchaudio.sox_effects.apply_effects_file( + f, [["rate", str(fs)]] + ) audio = ( audio / torch.max(torch.abs(audio), dim=-1, keepdim=True)[0] @@ -52,17 +50,15 @@ def resample_folder(input_folder, output_folder, fs, regex): ).parent, exist_ok=True, ) - torchaudio.save( + audio_io.save( os.path.join( output_folder, Path(f).relative_to(Path(input_folder)) ), audio, fs, ) - torchaudio.shutdown_sox() if __name__ == "__main__": - args = parser.parse_args() resample_folder(args.input_folder, args.output_folder, args.fs, args.regex) diff --git a/recipes/LibriSpeech/ASR/CTC/README.md b/recipes/LibriSpeech/ASR/CTC/README.md index 8e1e36b70e..22041fa8cc 100644 --- a/recipes/LibriSpeech/ASR/CTC/README.md +++ b/recipes/LibriSpeech/ASR/CTC/README.md @@ -1,34 +1,149 @@ -# LibriSpeech ASR with CTC and pre-trained wav2vec2 models. -This folder contains the scripts to finetune a wav2vec2 based system using LibriSpeech. +# LibriSpeech ASR with CTC only or pre-trained wav2vec2 or whisper models. +This folder contains the scripts to finetune a wav2vec2 or a whisper based system using LibriSpeech. You can download LibriSpeech at http://www.openslr.org/12. +The loss function is the CTC loss and it is implemented in two different ways: +- Using the [CTCLoss](https://pytorch.org/docs/stable/generated/torch.nn.CTCLoss.html) from PyTorch. +- Using the [CTC implementation](https://github.com/k2-fsa/k2/blob/master/k2/python/k2/ctc_loss.py) from K2 (WFST-based). For an example of such recipe, check the `train_with_wav2vec_k2.py` file. -**Supported pre-trained wav2vec2:** [SpeechBrain]() and [HuggingFace]() +**Supported pre-trained wav2vec2:** [SpeechBrain](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/self-supervised-learning/wav2vec2) and [HuggingFace](https://github.com/speechbrain/speechbrain/tree/develop/recipes/CommonVoice/self-supervised-learning/wav2vec2) + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` # How to run +``` +python train.py hparams/file.yaml python train_with_wav2vec.py hparams/file.yaml +python train_with_whisper.py hparams/file.yaml +``` +To run a fine-tuning of "WavLM" with signal downsampled inputs (for faster training and inferences) + +``` +python train_with_wav2vec.py hparams/downsampled/train_hf_wavlm_signal_downsampling.yaml --downsampling_factor 2 +``` +To train a model from scratch (without any pre-training), please firstly go to the Tokenizer folder to train a tokenizer: -**If using a HuggingFace pre-trained model, please make sure you have "transformers" -installed in your environment (see extra-requirements.txt)** +``` +cd ../../Tokenizer +python train.py hparams/128_bpe.yaml +``` +Then, go back to this directory. You can train a Branchformer CTC model with: + +``` +python train.py hparams/train_branchformer.yaml +``` +or a Conformer CTC model with: + +``` +python train.py hparams/train_conformer.yaml +``` +# WFST-based CTC loss +To fine-tune a wav2vec 2.0 model with the WFST-based CTC loss, you can use the `train_with_wav2vec_k2.py` script. This will create a `lang` directory inside your output folder, which will contain the files required to build a lexicon FST. The tokenization method used here is a very basic character-based tokenization (e.g. `hello -> h e l l o`). + +To use this script, you will first need to install `k2`. The integration has been tested with `k2==1.24.4` and `torch==2.0.1`, although it should also work with any `torch` version as long as `k2` supports it (compatibility list [here](https://k2-fsa.github.io/k2/installation/pre-compiled-cuda-wheels-linux/index.html)). You can install `k2` by following the instructions [here](https://k2-fsa.github.io/k2/installation/from_wheels.html#linux-cuda-example). + +Using a lexicon FST (L) while training can help guide the model to better predictions. When decoding, you can either use a simple HL decoding graph (where H is the ctc topology), or use an HLG graph (where G is usually a 3-gram language model) to further improve the results. In addition, whole lattice rescoring is also supported. This typically happens with a 4-gram language model. See `hparams/train_with_wav2vec_k2.yaml`` for more details. + +If you choose to use a 3-gram or a 4-gram language model, you can either supply pre-existing ARPA LMs for both cases, including the option to train your own, or you can specify the name in the YAML docstring for automatic downloading. Comprehensive instructions are provided in `train_hf_wav2vec_k2.yaml`. + +For those interested in training their own language model, please consult our recipe at LibriSpeech/LM/train_ngram.py. + +Example usage: +``` +python train_with_wav2vec_k2.py hparams/train_hf_wav2vec_k2.yaml --data_folder=/path/to/LibriSpeech +``` + +To use the HLG graph (instead of the default HL), pass `--compose_HL_with_G=True`. To use the 4-gram LM for rescoring, pass the `--decoding_method=whole-lattice-rescoring` argument. Note that this will require more memory, as the whole lattice will be kept in memory during the decoding. In this recipe, the `lm_scale` used by default is 0.4. This is the value that gave the best results in our HL-graph experiments after trying scales of `[0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4]`. When rescoring is used alongside the HLG graph, the 4-gram seems to not bring any improvement. The best lm scale in that case was 0.2 (the lowest value we tried). + +# KenLM n-gram CTC rescoring +To enable n-gram rescoring during the decoding, you can download the LibriSpeech official LM from [here](https://www.openslr.org/11/). Please make sure to install the extra dependencies first. Any KenLM language model may be used with this rescoring technique. The n-gram can either be a binary or an arpa file, but note that the binary format is faster to load. The following command shows how to use the official LibriSpeech 4-gram LM with SpeechBrain: +```bash +wget https://openslr.elda.org/resources/11/4-gram.arpa.gz +gzip -d 4-gram.arpa.gz +python train_with_wav2vec.py hparams/file.yaml --kenlm_model_path='4-gram.arpa' +``` + +# Rescoring with a Neural Language Model +Two yamls do support LM rescoring: `train_hf_wav2vec_rnn_rescoring.yaml` and `train_hf_wav2vec_transformer_rescoring.yaml`. The first one uses a RNN LM, while the second one uses a Transformer LM. Both LMs are already pretrained on LibriSpeech (see [RNNLM](https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech) and [TransformerLM](https://huggingface.co/speechbrain/asr-transformer-transformerlm-librispeech)). The acoustic model (wav2vec2) generates a list of hypotheses (called n-best), which are then rescored (aka re-ranked) by the LM. The LM rescores by computing the score of each hypothesis by summing the log-probabilities of each tokens with respect to the previous tokens. The LM score is then added to the acoustic model score to obtain the final score. Using this technique, will results in better WERs. For instance, we went from 1.95 to 1.57 of WER. However, note that the inference time will be slower. + +Two parameters need to be tuned: `topk` (and `beam_size` to have enough topk) and `lm_weight`. Increasing `topk` will increase the number of hypotheses to be rescored, and ultimately the inference time. Increasing `lm_weight` will increase the importance of the LM score in the final score. The following command shows how to use the RNN LM with SpeechBrain: +```bash +python train_with_wav2vec.py hparams/train_hf_wav2vec_rnn_rescoring.yaml --data_folder=/path/to/LibriSpeech/ --topk=50 --beam_size=50 --lm_weight=0.5 +``` +Note: by default, `topk` is set to 20 as it gives a good trade-off between WER and inference time. # Results -| Release | Hyperparams file | Finetuning Split | Test Clean WER | HuggingFace link | Full model link | GPUs | -|:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :-----:| :--------:| -| 09-09-21 | train_hf_wav2vec.yaml | 960h | 1.90 | Not Avail. | [Link](https://drive.google.com/drive/folders/1pg0QzW-LqAISG8Viw_lUTGjXwOqh7gkl?usp=sharing) | 1xRTX8000 48GB | -| 22-09-22 | train_sb_wav2vec.yaml | 100h | 7.X | Not Avail. | Not Avail. | 1xTesla V100 32GB | +| Release | Hyperparams file | Decoding method | Finetuning Split | Test-clean WER | GPU- Test-clean Inference Time | Test-other WER | GPU- Test-other Inference Time | HuggingFace link | Full model link | Inference GPUs | Training GPUs | +|:-------------:|:---------------------------:| :----------:| :-----:| :-----:| :-----:| :-----:| :-----:| :-----:| :-----:| :--------:| :--------:| +| 05-08-23 | train_hf_wav2vec.yaml | GreedySearch | 960h | 2.12 | 1min30s | 4.31| 1min24s | [Link](https://huggingface.co/speechbrain/asr-wav2vec2-librispeech) | [Link](https://www.dropbox.com/sh/qj2ps85g8oiicrj/AAAxlkQw5Pfo0M9EyHMi8iAra?dl=0) | 1xRTX3090 24GB | 1xA100 40GB | +| 05-08-23 | train_hf_wav2vec.yaml | GreedySearch + test batch size = 1| 960h | 1.95 | 2min09s | 3.97| 2min21s | Not Avail. | [Link](https://www.dropbox.com/sh/8zqufkmegbgpsa8/AACB6MMJ_efbGDvTi5ZhB4pQa?dl=0) | 1xRTX3090 24GB | 1xA100 40GB | +| 05-08-23 | train_hf_wav2vec.yaml | CTCBeamSearch + test batch size = 1| 960h | 1.92 | 2min22s | 3.97 | 2min16s | Not Avail. | [Link](https://www.dropbox.com/sh/8zqufkmegbgpsa8/AACB6MMJ_efbGDvTi5ZhB4pQa?dl=0) | 1xRTX3090 24GB | 1xA100 40GB | +| 05-08-23 | train_hf_wav2vec.yaml | CTCPrefixBeamSearch + test batch size = 1| 960h | 1.92 | 2min45s | 3.97 | 2min21s | Not Avail. | [Link](https://www.dropbox.com/sh/8zqufkmegbgpsa8/AACB6MMJ_efbGDvTi5ZhB4pQa?dl=0) | 1xRTX3090 24GB | 1xA100 40GB | +| 05-08-23 | train_hf_wav2vec.yaml | CTCBeamSearch + 4-gram + test batch size = 1| 960h | 1.75 | 2min37s | 3.67 | 2min20s | Not Avail. | [Link](https://www.dropbox.com/sh/8zqufkmegbgpsa8/AACB6MMJ_efbGDvTi5ZhB4pQa?dl=0) | 1xRTX3090 24GB | 1xA100 40GB | +| 05-08-23 | train_hf_wav2vec.yaml | CTCPrefixBeamSearch + 4-gram + test batch size = 1| 960h | 1.80 | 2min38s | 3.78 | 2min25s |Not Avail. | [Link](https://www.dropbox.com/sh/8zqufkmegbgpsa8/AACB6MMJ_efbGDvTi5ZhB4pQa?dl=0) | 1xRTX3090 24GB | 1xA100 40GB | +| 22-09-22 | train_sb_wav2vec.yaml | GreedySearch | 960h | 4.2 | Not Avail. | Not Avail. | Not Avail. | Not Avail. | Not Avail. | Not Avail.| 2xTesla V100 32GB | +| 08-12-23 | train_hf_whisper.yaml (small) | CTCBeamSearch + test batch size = 1 | 960h | 4.72 | 3.08 | 12.66 |3.30 | Not Avail. | [Link](https://www.dropbox.com/sh/zmtp13huxn02fot/AADyKL5q0MwRhEG1-WbSXDWda?dl=0) | 1xRTX3090 24GB | 2xTesla V100 32GB | +| 08-12-23 | train_hf_whisper.yaml (small) | CTCPrefixBeamSearch + test batch size = 1 | 960h | 4.73 | 3.19 | 12.65 |3.39 | Not Avail. | [Link](https://www.dropbox.com/sh/zmtp13huxn02fot/AADyKL5q0MwRhEG1-WbSXDWda?dl=0) | 1xRTX3090 24GB | 2xTesla V100 32GB | +| 08-12-23 | train_hf_whisper.yaml (small) | CTCBeamSearch + 4-gram + test batch size = 1 | 960h | 4.37 | 3.16 | 11.76 | 3.43 | Not Avail. | [Link](https://www.dropbox.com/sh/zmtp13huxn02fot/AADyKL5q0MwRhEG1-WbSXDWda?dl=0) | 1xRTX3090 24GB | 2xTesla V100 32GB | +| 08-12-23 | train_hf_whisper.yaml (small) | CTCPrefixBeamSearch + 4-gram + test batch size = 1 | 960h | 4.44 | 3.30 | 11.89 | 3.47 | Not Avail. | [Link](https://www.dropbox.com/sh/zmtp13huxn02fot/AADyKL5q0MwRhEG1-WbSXDWda?dl=0) | 1xRTX3090 24GB | 2xTesla V100 32GB | +| 23-01-24 | train_hf_wav2vec_k2.yaml | k2CTC + HL graph + 1best decoding + test batch size = 1 | 960h | 1.83 | Not Avail. | 3.82 | Not Avail. | Not Avail. | [Link](https://www.dropbox.com/scl/fo/678rj1a44jt4zrxjwaetu/h?rlkey=x0xwz31nkl01qwr3k5ivtywvz&dl=0) | 1xRTX2080Ti 12GB | 1xRTX2080Ti 12GB | +| 23-01-24 | train_hf_wav2vec_k2.yaml | k2CTC + HLG graph + 1best decoding + test batch size = 1 | 960h | 1.69 | Not Avail. | 3.44 | Not Avail. | Not Avail. | [Link](https://www.dropbox.com/scl/fo/c91vqlr8ase90x0m7u3v3/h?rlkey=duh55n0qzlfnfhy4auu0a4f8g&dl=0) | 1xRTX2080Ti 12GB | 1xRTX2080Ti 12GB | +| 23-01-24 | train_hf_wav2vec_k2.yaml | k2CTC + HL graph + whole lattice rescoring + test batch size = 1 | 960h | 1.72 | Not Avail. | 3.51 | Not Avail. | Not Avail. | [Link](https://www.dropbox.com/scl/fo/mx6hd4zc0iyzqvixxre6q/h?rlkey=xxbpb949btmeiecw30be5qwhj&dl=0) | 1xRTX2080Ti 12GB | 1xRTX2080Ti 12GB | +| 23-01-24 | train_hf_wav2vec_k2.yaml | k2CTC + HLG graph + whole lattice rescoring + test batch size = 1 | 960h | 1.81 | Not Avail. | 3.57 | Not Avail. | Not Avail. | [Link](https://www.dropbox.com/scl/fo/kj2ujqj3votq7ue6ydh0l/h?rlkey=mibyoria19zasvuxs0iwx6plt&dl=0) | 1xRTX2080Ti 12GB | 1xRTX2080Ti 12GB | +| 08-12-23 | train_hf_wav2vec.yaml | CTCBeamSearch + RNNLM Rescorer + test batch size = 1 + topk = 100 | 960h | 1.69 | 26mins15 | 3.55 | 32min44s | Not Avail. | [Link](https://www.dropbox.com/sh/k4ixa211yp5b1tm/AAD85sgYw2CH7NKk_qKMO9Tja?dl=0) | 1x A100 40GB | 2xTesla V100 40GB | +| 08-12-23 | train_hf_wav2vec.yaml | CTCBeamSearch + TransformerLM Rescorer + test batch size = 1 + topk = 100 | 960h | 1.57 | 26mins56s | 3.37 | 32min46 | Not Avail. | [Link](https://www.dropbox.com/sh/ijqalvre7mm08ng/AAD_hsN-8dBneUMMkELsOOxga?dl=0) | 1x A100 40GB | 2xTesla V100 32GB | +| 06-12-23 | train_branchformer.yaml (25.9M) | 960h | 3.6 (no LM) | Not Avail. | Not Avail. | 8xA40 46G | +| 06-12-23 | train_conformer.yaml (28.8M) | 960h | 3.7 (no LM) | Not Avail. | Not Avail. | 8xA40 46G | + + +# Downsampling inputs for faster fine-tuning and inferences using SSL Models +This repository contains the code allowing to reproduce part of the results obtained in the paper : "Fine-tuning Strategies for Faster Inference using Speech Self-Supervised Models: A Comparative Study" +The reported experiments are the ones leading to largest inference time reductions while keeping lower error rates, using a downsampling of the input sequences. You can download LibriSpeech at http://www.openslr.org/12. + +### Downsampling Results with Librispeech train-clean-100 split +The inference times shown here are for running the whole test-clean LibriSpeech split, and are in seconds. MACs shown here are the mean MACs for a test batch +These results are obtained using WavLM Large finetuned only on the train-clean-100 split of LibriSpeech (100 hours of speech) -# Training Time -TO UPDATE. +| Name | Factor | WER | GPU- Inference Time | CPU - Inference Time | WER-LM | GPULM - Inference Time | CPULM - Inference Time | MACs (G) | +|-------|--------|-------|---------------------|----------------------|--------|------------------------|------------------------|----------| +| No SD | 1 | 4.09 | 134 | 1121 | 3.31 | 152 | 1128 | 386.538 | +| CL2 | 2 | 4.61 | 84 | 582 | 3.48 | 98 | 600 | 192.97 | +| CL3 | 3 | 5.47 | 69 | 414 | 4.12 | 91 | 436 | 134.864 | +| AV2 | 2 | 4.93 | 80 | 570 | 3.66 | 98 | 578 | 192.97 | +| AV3 | 3 | 6.01 | 64 | 406 | 4.27 | 90 | 422 | 134.864 | +| SD2 | 2 | 4.85 | 86 | 569 | 3.58 | 97 | 575 | 192.97 | +| SD3 | 3 | 5.83 | 72 | 427 | 4.08 | 89 | 458 | 134.864 | + +CL: Learned convolutional downsampling + +SD : Signal downsampling + +AV : Averaging window # **About SpeechBrain** - Website: https://speechbrain.github.io/ - Code: https://github.com/speechbrain/speechbrain/ - HuggingFace: https://huggingface.co/speechbrain/ -# **Citing SpeechBrain** +# **Citing** Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, @@ -39,3 +154,15 @@ Please, cite SpeechBrain if you use it for your research or business. note={arXiv:2106.04624} } ``` +If you use the downsampling approach, please cite : + +```bibtex +@article{zaiem2023fine, + title={Fine-tuning Strategies for Faster Inference using Speech Self-Supervised Models: A Comparative Study}, + author={Zaiem, Salah and Algayres, Robin and Parcollet, Titouan and Essid, Slim and Ravanelli, Mirco}, + journal={arXiv preprint arXiv:2303.06740}, + year={2023} +} +``` + + diff --git a/recipes/LibriSpeech/ASR/CTC/extra_requirements.txt b/recipes/LibriSpeech/ASR/CTC/extra_requirements.txt index 78949924f4..b2d2230d5b 100644 --- a/recipes/LibriSpeech/ASR/CTC/extra_requirements.txt +++ b/recipes/LibriSpeech/ASR/CTC/extra_requirements.txt @@ -1,2 +1,3 @@ -# For wav2vect recipe (HuggingFace) -transformers +https://github.com/kpu/kenlm/archive/master.zip +# k2 # It is better to install k2 with the procedure listed here: https://k2-fsa.github.io/k2/installation/from_wheels.html +kaldilm==1.15.1 diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/branchformer_large.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/branchformer_large.yaml new file mode 100644 index 0000000000..5fae5c9253 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/branchformer_large.yaml @@ -0,0 +1,252 @@ +# ############################################################################ +# Model: E2E ASR with CTC +# Encoder: Branchformer Encoder +# Decoder: CTC beam searcher and greedy searcher +# Tokens: character +# Training: Librispeech 960h +# Authors: Titouan Parcollet, Shucong Zhang, Adel Moumen +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 3402 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/branchformer_ctc/ +wer_file: !ref /wer.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["dev-clean", "test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /dev-clean.csv + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 500 +batch_size: 16 # This works for 2x GPUs with 32GB +grad_accumulation_factor: 2 +max_grad_norm: 5.0 +sorting: descending #random +num_workers: 8 +loss_reduction: batchmean +valid_search_interval: 1 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +lr_model: 0.001 +weight_decay: 0.0005 + +# Feature parameters +sample_rate: 16000 +n_fft: 512 +n_mels: 80 +win_length: 25 + +# Training parameters +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is max_batch_len * n_gpus * gradient_accumulation. +# Empirically, we used 850 * 8 A40 45G GPUs * 2 or 1700 * 4 A100 80G * 2. +# Please, set your parameters accordingly. +dynamic_batching: True +max_batch_length_train: 850 +max_batch_len_val: 100 +num_bucket: 200 +shuffle: False # if true re-creates batches at each epoch shuffling examples. +max_batch_ex: 128 +batch_ordering: random + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_val: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model parameters ########################### + +# Transformer +attention_type: RelPosMHAXL +d_model: 256 +nhead: 4 +csgu_linear_units: 2400 +csgu_kernel_size: 31 +num_encoder_layers: 18 +num_decoder_layers: 0 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 31 + +# BPE parameters +token_type: char # ["unigram", "bpe", "char"] +character_coverage: 1.0 +blank_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +beam_size: 100 +beam_prune_logp: -12.0 +token_prune_min_logp: -1.2 +prune_history: False + +############################## models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + dropout: !ref + activation: !ref + encoder_module: branchformer + attention_type: !ref + normalize_before: True + causal: False + csgu_linear_units: !ref + kernel_size: !ref + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +modules: + CNN: !ref + Transformer: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref ] + +####################### Decoding & optimiser ########################### + +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: !ref + beam_prune_logp: !ref + token_prune_min_logp: !ref + prune_history: !ref + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.LinearNoamScheduler + lr_initial: !ref + n_warmup_steps: 7500 + n_keep_steps: 36000 + +model_opt_class: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + weight_decay: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +############################## Augmentations ################################### + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: True + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref + ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + win_length: !ref + n_mels: !ref + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True +wer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/conformer_large.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/conformer_large.yaml new file mode 100644 index 0000000000..1fd3aa0974 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/conformer_large.yaml @@ -0,0 +1,250 @@ +# ############################################################################ +# Model: E2E ASR with CTC +# Encoder: Conformer Encoder +# Decoder: CTC beam searcher and greedy searcher +# Tokens: character +# Training: Librispeech 960h +# Authors: Titouan Parcollet, Shucong Zhang, Adel Moumen +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 3402 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_ctc/ +wer_file: !ref /wer.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["dev-clean", "test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /dev-clean.csv + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 500 +batch_size: 16 # This works for 2x GPUs with 32GB +grad_accumulation_factor: 2 +max_grad_norm: 5.0 +sorting: descending #random +num_workers: 8 +loss_reduction: batchmean +valid_search_interval: 1 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +lr_model: 0.001 +weight_decay: 0.0005 + +# Feature parameters +sample_rate: 16000 +n_fft: 512 +n_mels: 80 +win_length: 25 + +# Training parameters +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is max_batch_len * n_gpus * gradient_accumulation. +# Empirically, we used 850 * 8 A40 45G GPUs * 2 or 1700 * 4 A100 80G * 2. +# Please, set your parameters accordingly. +dynamic_batching: True +max_batch_length_train: 850 +max_batch_len_val: 100 +num_bucket: 200 +shuffle: False # if true re-creates batches at each epoch shuffling examples. +max_batch_ex: 128 +batch_ordering: random + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_val: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ####################################### + +# Transformer +attention_type: RelPosMHAXL +d_model: 256 +nhead: 4 +d_ffn: 1024 +num_encoder_layers: 18 +num_decoder_layers: 0 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 31 + +# Outputs +token_type: char # ["unigram", "bpe", "char"] +character_coverage: 1.0 +blank_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +beam_size: 100 +beam_prune_logp: -12.0 +token_prune_min_logp: -1.2 +prune_history: False + +############################## models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: conformer + attention_type: !ref + normalize_before: True + causal: False + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +modules: + CNN: !ref + Transformer: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref ] + +####################### Decoding & optimiser ########################### + +# Decoding parameters +test_beam_search: + blank_index: !ref + beam_size: !ref + beam_prune_logp: !ref + token_prune_min_logp: !ref + prune_history: !ref + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.LinearNoamScheduler + lr_initial: !ref + n_warmup_steps: 7500 + n_keep_steps: 36000 + +model_opt_class: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + weight_decay: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +############################## Augmentations ################################### + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: True + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref + ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + win_length: !ref + n_mels: !ref + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True +wer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_average_downsampling.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_average_downsampling.yaml new file mode 100644 index 0000000000..dca58f9ae2 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_average_downsampling.yaml @@ -0,0 +1,222 @@ +# ################################ +# Model: downsampling + wavlm + DNN + CTC +# Decoding AM: Greedy for validation, and Beam search for testing +# Augmentation: SpecAugment +# Authors: Sung-Lin Yeh 2021 +# Salah Zaiem 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_wav2vec2_char/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english wav2vec2 model. +wav2vec2_hub: microsoft/wavlm-large +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +# noise/ris dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 1 +lr: 0.9 +lr_wav2vec: 0.0001 +sorting: ascending +precision: fp32 # bf16, fp16 or fp32 +sample_rate: 16000 + +#Downsampling parameters +downsampling_factor: 2 +downsampling_kernel_size: 21 +upsampling: False + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 3 per GPU to fit 32GB of VRAM +batch_size: 6 +test_batch_size: 8 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +####################### Model Parameters ####################################### + +activation: !name:torch.nn.LeakyReLU +dnn_layers: 2 +dnn_neurons: 1024 +freeze_wav2vec: True + +# Outputs +ctc_neurons: 29 +output_neurons: 29 # Characters size, index(blank/eos/bos) = 0 +blank_index: 0 + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 1024] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.wavlm.WavLM + source: !ref + output_norm: True + freeze_feature_extractor: True + freeze: !ref + save_path: !ref + +downsampler: !new:speechbrain.lobes.downsampling.PoolingDownsampler + downsampling_factor: !ref + kernel_size: !ref + +##### +# Uncomment this block if you prefer to use a Fairseq pretrained model instead +# of a HuggingFace one. Here, we provide an URL that is obtained from the +# Fairseq github for the multilingual XLSR. +# +#wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt +#wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 +# pretrained_path: !ref +# output_norm: True +# freeze: False +# save_path: !ref /wav2vec2_checkpoint/model.pt + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + downsampler: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref ] + +model_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +############################## Decoding ######################################## + +test_beam_search: + beam_size: 200 + topk: 1 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -10.0 + token_prune_min_logp: -5 + prune_history: True + alpha: 0.5 + beta: 1.5 + # can be downloaded from here https://www.openslr.org/11/ or trained with kenLM + # It can either be a .bin or .arpa ; note: .arpa is much slower at loading + # If you don't want to use an LM, comment it out or set it to null + kenlm_model_path: null + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + tokenizer: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_conv_downsampling.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_conv_downsampling.yaml new file mode 100644 index 0000000000..8931005840 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_conv_downsampling.yaml @@ -0,0 +1,225 @@ +# ################################ +# Model: downsampling + wavlm + DNN + CTC +# Decoding AM: Greedy for validation, and Beam search for testing +# Augmentation: SpecAugment +# Authors: Sung-Lin Yeh 2021 +# Salah Zaiem 2023 +# ################################ + + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_wav2vec2_char/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english wav2vec2 model. +wav2vec2_hub: microsoft/wavlm-large +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +# noise/ris dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 1 +lr: 0.9 +lr_wav2vec: 0.0001 +sorting: ascending +precision: fp32 # bf16, fp16 or fp32 +sample_rate: 16000 + +#Downsampling parameters +downsampling_factor: 2 +downsampling_kernel_size: 81 +upsampling: False + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 3 per GPU to fit 32GB of VRAM +batch_size: 6 +test_batch_size: 8 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +####################### Model Parameters ####################################### + +activation: !name:torch.nn.LeakyReLU +dnn_layers: 2 +dnn_neurons: 1024 +freeze_wav2vec: True + +# Outputs +ctc_neurons: 29 +output_neurons: 29 # Characters size, index(blank/eos/bos) = 0 +blank_index: 0 + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 1024] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.wavlm.WavLM + source: !ref + output_norm: True + freeze_feature_extractor: True + freeze: !ref + save_path: !ref + +downsampler: !new:speechbrain.lobes.downsampling.Conv1DDownsampler + downsampling_factor: !ref + kernel_size: !ref + + +##### +# Uncomment this block if you prefer to use a Fairseq pretrained model instead +# of a HuggingFace one. Here, we provide an URL that is obtained from the +# Fairseq github for the multilingual XLSR. +# +#wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt +#wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 +# pretrained_path: !ref +# output_norm: True +# freeze: False +# save_path: !ref /wav2vec2_checkpoint/model.pt + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + downsampler: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref ] + +model_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +############################## Decoding ######################################## + +test_beam_search: + beam_size: 200 + topk: 1 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -10.0 + token_prune_min_logp: -5 + prune_history: True + alpha: 0.5 + beta: 1.5 + # can be downloaded from here https://www.openslr.org/11/ or trained with kenLM + # It can either be a .bin or .arpa ; note: .arpa is much slower at loading + # If you don't want to use an LM, comment it out or set it to null + kenlm_model_path: null + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + tokenizer: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_signal_downsampling.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_signal_downsampling.yaml new file mode 100644 index 0000000000..2a80dde887 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/downsampled/train_hf_wavlm_signal_downsampling.yaml @@ -0,0 +1,220 @@ +## ################################ +# Model: downsampling + wavlm + DNN + CTC +# Decoding AM: Greedy for validation, and Beam search for testing +# Augmentation: SpecAugment +# Authors: Sung-Lin Yeh 2021 +# Salah Zaiem 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_wav2vec2_char/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english wav2vec2 model. +wav2vec2_hub: microsoft/wavlm-large +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +# noise/ris dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 1 +lr: 0.9 +lr_wav2vec: 0.0001 +sorting: ascending +precision: fp32 # bf16, fp16 or fp32 +sample_rate: 16000 + +#Downsampling parameters +downsampling_factor: 3 +upsampling: True + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 3 per GPU to fit 32GB of VRAM +batch_size: 6 +test_batch_size: 8 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +####################### Model Parameters ####################################### + +activation: !name:torch.nn.LeakyReLU +dnn_layers: 2 +dnn_neurons: 1024 +freeze_wav2vec: True + +# Outputs +ctc_neurons: 58 # Twice bigger than the number of characters for upsampling +output_neurons: 29 # Characters size, index(blank/eos/bos) = 0 +blank_index: 0 + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 1024] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.wavlm.WavLM + source: !ref + output_norm: True + freeze_feature_extractor: True + freeze: !ref + save_path: !ref + +downsampler: !new:speechbrain.lobes.downsampling.SignalDownsampler + downsampling_factor: !ref + initial_sampling_rate: !ref + +##### +# Uncomment this block if you prefer to use a Fairseq pretrained model instead +# of a HuggingFace one. Here, we provide an URL that is obtained from the +# Fairseq github for the multilingual XLSR. +# +#wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt +#wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 +# pretrained_path: !ref +# output_norm: True +# freeze: False +# save_path: !ref /wav2vec2_checkpoint/model.pt + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + downsampler: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref ] + +model_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +############################## Decoding ######################################## + +test_beam_search: + beam_size: 200 + topk: 1 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -10.0 + token_prune_min_logp: -5 + prune_history: True + alpha: 0.5 + beta: 1.5 + # can be downloaded from here https://www.openslr.org/11/ or trained with kenLM + # It can either be a .bin or .arpa ; note: .arpa is much slower at loading + # If you don't want to use an LM, comment it out or set it to null + kenlm_model_path: null + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + tokenizer: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec.yaml index 3a2aa0cad5..06f4e5851f 100644 --- a/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec.yaml +++ b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec.yaml @@ -1,22 +1,24 @@ # ################################ # Model: wav2vec2 + DNN + CTC +# Decoding AM: Greedy for validation, and Beam search for testing # Augmentation: SpecAugment -# Authors: Sung-Lin Yeh 2021 +# Authors: Sung-Lin Yeh 2021, Adel Moumen 2023 # ################################ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/train_wav2vec2_char/ -wer_file: !ref /wer.txt +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt # URL for the biggest Fairseq english wav2vec2 model. wav2vec2_hub: facebook/wav2vec2-large-960h-lv60-self +wav2vec2_folder: !ref /wav2vec2_checkpoint # Data files -data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech # noise/ris dataset will automatically be downloaded # data_folder_rirs: !ref train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] @@ -30,15 +32,15 @@ test_csv: - !ref /test-clean.csv - !ref /test-other.csv -# Training parameters +####################### Training Parameters #################################### + number_of_epochs: 1 lr: 0.9 lr_wav2vec: 0.0001 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 - # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs # Must be 3 per GPU to fit 32GB of VRAM @@ -55,39 +57,36 @@ valid_dataloader_opts: test_dataloader_opts: batch_size: !ref -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dnn_layers: 2 dnn_neurons: 1024 freeze_wav2vec: True # Outputs -output_neurons: 31 # BPE size, index(blank/eos/bos) = 0 - -# Decoding parameters +output_neurons: 29 # BPE size, index(blank/eos/bos) = 0 blank_index: 0 # # Functions and classes # + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN input_shape: [null, null, 1024] activation: !ref dnn_blocks: !ref dnn_neurons: !ref -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref - save_path: !ref /wav2vec2_checkpoint + save_path: !ref ##### # Uncomment this block if you prefer to use a Fairseq pretrained model instead @@ -139,7 +138,58 @@ lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.9 patient: 0 -label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Decoding ######################################## + +# Decoding parameters +test_beam_search: + beam_size: 143 + topk: 1 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + alpha: 0.8 + beta: 1.2 + # can be downloaded from here https://www.openslr.org/11/ or trained with kenLM + # It can either be a .bin or .arpa ; note: .arpa is much slower at loading + # If you don't want to use an LM, comment it out or set it to null + kenlm_model_path: null + +############################## Logging and Pretrainer ########################## checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_k2.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_k2.yaml new file mode 100644 index 0000000000..5becdc125c --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_k2.yaml @@ -0,0 +1,255 @@ +# ################################ +# Model: wav2vec2 + DNN + CTC + LM (k2) +# Augmentation: SpecAugment +# +# This recipe trains a wav2vec2 model with a DNN and DWFST-based CTC loss. +# To use this recipe you need to have the following: +# - A folder with the LibriSpeech dataset (see `datafolder`) +# - A folder with a small, and (optionally) a big LM (see `lm_dir`) +# These can be downloaded in ARPA format from: http://www.openslr.org/resources/11/. +# - A working installation of k2 (and kaldilm if you want to use ARPA LMs). +# +# Authors: Zeyu Zhao 2023 +# Georgios Karakasidis 2023 +# Pierre Champion 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1111 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_wav2vec2_char_k2/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english wav2vec2 model. +wav2vec2_hub: facebook/wav2vec2-large-960h-lv60-self +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +# noise/ris dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean", "dev-other"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + - !ref /dev-clean.csv + - !ref /dev-other.csv + +# For k2 CTC training +lang_dir: !ref /lang +vocab_file: !ref /librispeech-vocab.txt +sil_prob: 0. +add_word_boundary: True +# For k2 decoding +test_search_beam: 32 +# Beam size (for decoding) +test_output_beam: 8 +test_min_active_state: 300 +test_max_active_state: 3000 +# Acoustic scale (multiplied by the log probs) +ac_scale: 1.5 +compose_HL_with_G: False +# 1best or whole-lattice-rescoring +# decoding_method: whole-lattice-rescoring +decoding_method: 1best +# LM scale to be used for rescoring. Only used if rescoring +rescoring_lm_scale: 0.4 +# This is where the 3gram and (optionally) 4gram LM are stored +# They can be in either ARPA or FST format. If the former, then +# the FST equivalent will be created in the same directory by +# using kaldilm. +lm_dir: !ref /lm +# The ARPA LM files are located under the lm_dir. +# - Use (recommended): +# - 3-gram_sb.arpa +# - 4-gram_sb.arpa +# To downloads speechbrain pretrained models (trained on train-960+librispeech-lm-norm.txt, 214k words) +# - Use: +# - 3-gram.arpa +# - 3-gram.pruned.1e-7.arpa +# - 3-gram.pruned.3e-7.arpa +# - 4-gram.arpa +# To downloads http://www.openslr.org/resources/11/ pretrained models (trained on librispeech-lm-norm.txt, 200k words) +# - Use another name for a model you trained yourself. +# If the arpa does not exist in the lm_dir, you'll need to train it yourself. +# Please see LibriSpeech/LM/README.md for instructions. +# Using one of the above name will automatically download the corresponding model. +# You can specify a different name, but you'll need to make sure the file exists in the lm_dir. +# Make sure to use enough RAM and CPUs as the conversion to FST can be quite demanding. +G_arpa: 3-gram_sb.arpa +G_rescoring_arpa: 4-gram_sb.arpa +# caching: False + +# Training parameters +number_of_epochs: 1 +lr: 0.9 +lr_wav2vec: 0.0001 +sorting: ascending # only ascending and descending are supported currently +precision: fp32 +sample_rate: 16000 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 3 per GPU to fit 32GB of VRAM +batch_size: 6 +test_batch_size: 1 +num_workers: 10 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + num_workers: !ref + +valid_dataloader_opts: + batch_size: !ref + num_workers: !ref + +test_dataloader_opts: + batch_size: !ref + num_workers: !ref + +# Model parameters +activation: !name:torch.nn.LeakyReLU +dnn_layers: 2 +dnn_neurons: 1024 +freeze_wav2vec: True + +# Outputs +output_neurons: 30 # BPE size, index(blank/eos/bos) = 0 + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 1024] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + +##### +# Uncomment this block if you prefer to use a Fairseq pretrained model instead +# of a HuggingFace one. Here, we provide an URL that is obtained from the +# Fairseq github for the multilingual XLSR. +# +#wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt +#wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 +# pretrained_path: !ref +# output_norm: True +# freeze: False +# save_path: !ref /wav2vec2_checkpoint/model.pt + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.integrations.k2_fsa.losses.ctc_k2 + reduction: mean + beam_size: 10 + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_rnn_rescoring.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_rnn_rescoring.yaml new file mode 100644 index 0000000000..51e3ff991d --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_rnn_rescoring.yaml @@ -0,0 +1,256 @@ +# ################################ +# Model: wav2vec2 + DNN + CTC +# Decoding AM: Greedy for validation, and Rescoring + Beam search for testing. +# Augmentation: SpecAugment +# Authors: Adel Moumen 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_wav2vec2_char_rnn_rescoring/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english wav2vec2 model. +wav2vec2_hub: facebook/wav2vec2-large-960h-lv60-self +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +# noise/ris dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 1 +lr: 0.9 +lr_wav2vec: 0.0001 +sorting: ascending +precision: fp32 # bf16, fp16 or fp32 +sample_rate: 16000 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 3 per GPU to fit 32GB of VRAM +batch_size: 6 +test_batch_size: 1 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +####################### Model Parameters ####################################### + +activation: !name:torch.nn.LeakyReLU +dnn_layers: 2 +dnn_neurons: 1024 +freeze_wav2vec: True + +# Outputs +output_neurons: 29 # BPE size, index(blank/eos/bos) = 0 + + +pretrained_lm_tokenizer_path: speechbrain/asr-crdnn-rnnlm-librispeech + + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 1024] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + +##### +# Uncomment this block if you prefer to use a Fairseq pretrained model instead +# of a HuggingFace one. Here, we provide an URL that is obtained from the +# Fairseq github for the multilingual XLSR. +# +#wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt +#wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 +# pretrained_path: !ref +# output_norm: True +# freeze: False +# save_path: !ref /wav2vec2_checkpoint/model.pt + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + +# This is the RNNLM that is used according to the Huggingface repository +# NB: It has to match the pre-trained RNNLM!! +lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM + output_neurons: 1000 + embedding_dim: 128 + activation: !name:torch.nn.LeakyReLU + dropout: 0.0 + rnn_layers: 2 + rnn_neurons: 2048 + dnn_blocks: 1 + dnn_neurons: 512 + return_hidden: True # For inference + + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +############################## Decoding ######################################## + +# Decoding parameters +lm_weight: 0.5 +blank_index: 0 + +# topk is the number of hypotheses that will be rescored in the rescorer +# lowering this value might decrease the wer, but will increase speed. +test_beam_search: + beam_size: 20 + topk: 20 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -12.0 + token_prune_min_logp: -12.0 + prune_history: False + alpha: 0.8 + beta: 1.2 + +rnnlm: !new:speechbrain.decoders.scorer.RNNLMRescorer + language_model: !ref + tokenizer: !ref + bos_index: 0 + eos_index: 0 + pad_index: 0 + +rescorer: !new:speechbrain.decoders.scorer.RescorerBuilder + rescorers: [!ref ] + weights: + rnnlm: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + tokenizer: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_transformer_rescoring.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_transformer_rescoring.yaml new file mode 100644 index 0000000000..7e93fbdc16 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_wav2vec_transformer_rescoring.yaml @@ -0,0 +1,253 @@ +# ################################ +# Model: wav2vec2 + DNN + CTC +# Decoding AM: Greedy for validation, and Rescoring + Beam search for testing +# Augmentation: SpecAugment +# Authors: Adel Moumen 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_wav2vec2_char_transformer_rescoring/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english wav2vec2 model. +wav2vec2_hub: facebook/wav2vec2-large-960h-lv60-self +wav2vec2_folder: !ref /wav2vec2_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +# noise/ris dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 1 +lr: 0.9 +lr_wav2vec: 0.0001 +sorting: ascending +precision: fp32 # bf16, fp16 or fp32 +sample_rate: 16000 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 3 per GPU to fit 32GB of VRAM +batch_size: 6 +test_batch_size: 1 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +####################### Model Parameters ####################################### +activation: !name:torch.nn.LeakyReLU +dnn_layers: 2 +dnn_neurons: 1024 +freeze_wav2vec: True + +# Outputs +output_neurons: 29 # BPE size, index(blank/eos/bos) = 0 + + +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech + +# This is the TransformerLM that is used according to the Huggingface repository +# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path +# For more details about the model! +# NB: It has to match the pre-trained TransformerLM!! +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: 5000 + d_model: 768 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +# Decoding parameters +lm_weight: 0.5 +blank_index: 0 + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 1024] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + +##### +# Uncomment this block if you prefer to use a Fairseq pretrained model instead +# of a HuggingFace one. Here, we provide an URL that is obtained from the +# Fairseq github for the multilingual XLSR. +# +#wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt +#wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 +# pretrained_path: !ref +# output_norm: True +# freeze: False +# save_path: !ref /wav2vec2_checkpoint/model.pt + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + +############################## Decoding ######################################## + +# topk is the number of hypotheses that will be rescored in the rescorer +# lowering this value might decrease the wer, but will increase speed. +test_beam_search: + beam_size: 20 + topk: 20 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -12.0 + token_prune_min_logp: -12.0 + prune_history: False + alpha: 0.8 + beta: 1.2 + +transformerlm: !new:speechbrain.decoders.scorer.TransformerLMRescorer + language_model: !ref + tokenizer: !ref + pad_index: 0 + bos_index: 1 + eos_index: 2 + +rescorer: !new:speechbrain.decoders.scorer.RescorerBuilder + rescorers: [!ref ] + weights: + transformerlm: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + tokenizer: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_whisper_encoder.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_whisper_encoder.yaml new file mode 100644 index 0000000000..df4289d1d2 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/train_hf_whisper_encoder.yaml @@ -0,0 +1,213 @@ +# ################################ +# Model: Whisper (Encoder only) + DNN + CTC +# Decoding AM: Greedy for validation, and Beam search for testing +# Augmentation: TimeDomainSpecAugment +# Authors: Titouan Parcollet 2022, Adel Moumen 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 # The secret perfect seed +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_whisper_char/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english whisper model. +whisper_hub: openai/whisper-base +whisper_folder: !ref /whisper_checkpoint + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech + +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 15 +warmup_steps: 1000 # We freeze whisper for 1000 steps to let the CTC adapt +lr: 0.0008 +lr_whisper: 0.0001 +sorting: random +precision: fp32 # bf16, fp16 or fp32 +sample_rate: 16000 + +# BPE parameters +token_type: char # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +batch_size: 6 +test_batch_size: 8 +num_workers: 4 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + num_workers: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +####################### Model Parameters ####################################### +dnn_neurons: 1024 +freeze_whisper: False +whisper_output_dim: 384 + + +# Outputs +output_neurons: 29 # BPE size, index(blank/eos/bos) = 0 +blank_index: 0 + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + + +enc: !new:speechbrain.nnet.containers.Sequential + input_shape: [null, null, !ref ] + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.LayerNorm + activation: !new:torch.nn.LeakyReLU + drop: !new:torch.nn.Dropout + p: 0.15 + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.LayerNorm + activation2: !new:torch.nn.LeakyReLU + drop2: !new:torch.nn.Dropout + p: 0.15 + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.LayerNorm + activation3: !new:torch.nn.LeakyReLU + +whisper: !new:speechbrain.integrations.huggingface.whisper.Whisper + source: !ref + freeze: !ref + save_path: !ref + encoder_only: True + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + whisper: !ref + enc: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_opt_class: !name:torch.optim.AdamW + lr: !ref + +whisper_opt_class: !name:torch.optim.AdamW + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.5 + patient: 0 + +lr_annealing_whisper: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.75 + patient: 0 + +############################## Decoding ######################################## + +test_beam_search: + beam_size: 143 + topk: 1 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -12.0 + token_prune_min_logp: -1.2 + prune_history: True + alpha: 0.8 + beta: 1.2 + # can be downloaded from here https://www.openslr.org/11/ or trained with kenLM + # It can either be a .bin or .arpa ; note: .arpa is much slower at loading + # If you don't want to use an LM, comment it out or set it to null + kenlm_model_path: null + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + whisper: !ref + model: !ref + scheduler_model: !ref + scheduler_whisper: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/train_sb_BEST-RQ.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/train_sb_BEST-RQ.yaml new file mode 100644 index 0000000000..142d04534c --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/hparams/train_sb_BEST-RQ.yaml @@ -0,0 +1,290 @@ +# ################################ +# Model: bestRQ + DNN + CTC +# Decoding: Greedy for validation, and Beam search for testing +# Augmentation: SpecAugment + speedperturb +# Authors: Ryan Whetten 2024 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1000 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/train_bestrq_libri_100/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +pt_model_path: !PLACEHOLDER +pt_model_output_dim: 576 + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +# noise/ris dataset will automatically be downloaded if uncommented +# data_folder_rirs: !ref +train_splits: ["train-clean-100"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 25 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +number_of_epochs: 30 +lr: 0.0003 +lr_bestrq: 0.00005 +precision: fp16 # bf16, fp16 or fp32 +sample_rate: 16000 +sorting: ascending +num_workers: 4 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +batch_size: 6 +test_batch_size: 8 + +train_dataloader_opts: + batch_size: !ref + num_workers: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +####################### Model Parameters ####################################### + +# Transformer +d_model: !ref +nhead: 8 # table 1 https://arxiv.org/pdf/2010.10504.pdf +num_encoder_layers: 12 # section 4.1.1 +num_decoder_layers: 0 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5000 +attention_type: RelPosMHAXL +encoder_module: conformer +dnn_activation: !new:torch.nn.LeakyReLU +dnn_neurons: 1280 +dnn_dropout: 0.15 +freeze_bestrq: False + +# Outputs +output_neurons_ctc: 29 # BPE size, index(blank/eos/bos) = 0 +blank_index: 0 + +# Feature parameters +n_fft: 400 +n_mels: 80 + +############################## models ########################################## + +back_end_ffn: !new:speechbrain.nnet.containers.Sequential + input_shape: [null, null, !ref ] + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.LayerNorm + activation: !ref + drop: !new:torch.nn.Dropout + p: !ref + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.LayerNorm + activation2: !ref + drop2: !new:torch.nn.Dropout + p: !ref + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.LayerNorm + activation3: !ref + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 0 + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (128, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + conformer_activation: !ref + encoder_module: !ref + attention_type: !ref + normalize_before: True + causal: False + +enc: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper + transformer: !ref + +pt_model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + normalize: !ref + CNN: !ref + enc: !ref + pt_model: !ref + back_end_ffn: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +####################### Decoding & optimiser ################################### + +kenlm_model_path: null + +# Decoding parameters +test_beam_search: + beam_size: 200 + topk: 1 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -10.0 + token_prune_min_logp: -5.0 + prune_history: True + alpha: 0.8 + beta: 1.2 + # can be downloaded from here https://www.openslr.org/11/ or trained with kenLM + # It can either be a .bin or .arpa ; note: .arpa is much slower at loading + # If you don't want to use an LM, comment it out or set it to null + kenlm_model_path: !ref + +model_opt_class: !name:torch.optim.AdamW + lr: !ref + weight_decay: 0.001 + +bestrq_opt_class: !name:torch.optim.AdamW + lr: !ref + weight_decay: 0.001 + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.5 + patient: 0 + +lr_annealing_bestrq: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.7 + patient: 0 + +############################## Augmentations ################################### + +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: True + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + pt_model: !ref + normalize: !ref + scheduler_model: !ref + scheduler_bestrq: !ref + counter: !ref + +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + pt_model: !ref + normalize: !ref + + paths: + pt_model: !ref /model.ckpt + normalize: !ref /normalize.ckpt + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref diff --git a/recipes/LibriSpeech/ASR/CTC/hparams/train_sb_wav2vec.yaml b/recipes/LibriSpeech/ASR/CTC/hparams/train_sb_wav2vec.yaml index 75d7422d04..14fe1d7ebc 100644 --- a/recipes/LibriSpeech/ASR/CTC/hparams/train_sb_wav2vec.yaml +++ b/recipes/LibriSpeech/ASR/CTC/hparams/train_sb_wav2vec.yaml @@ -1,27 +1,28 @@ # ################################ # Model: wav2vec2 + DNN + CTC +# Decoding AM: Greedy for validation, and Beam search for testing # Augmentation: SpecAugment -# Authors: Sung-Lin Yeh 2021, Rudolf A. Braun 2022, Titouan Parcollet 2022 +# Authors: Sung-Lin Yeh 2021, Rudolf A. Braun 2022, Titouan Parcollet 2022, Adel Moumen 2023 # ################################ # Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/train_wav2vec2_libri_100/ -wer_file: !ref /wer.txt +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt # Path of the SpeechBrain checkpoints containing the pretrained wav2vec2 model # It can be a local path or a HuggingFace hub containing the model -wav2vec2_hub: !PLACEHOLDER +wav2vec2_hub: facebook/wav2vec2-large-960h-lv60-self wav2vec_output_dim: 768 # This corresponds to the embedding size of the w2v2 # Data files data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech # noise/ris dataset will automatically be downloaded if uncommented # data_folder_rirs: !ref -train_splits: ["train-clean-100"] # ["train-clean-360", "train-other-500"] +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] dev_splits: ["dev-clean"] test_splits: ["test-clean", "test-other"] skip_prep: False @@ -32,13 +33,14 @@ test_csv: - !ref /test-clean.csv - !ref /test-other.csv -# Training parameters -number_of_epochs: 45 -lr: 0.5 -lr_wav2vec: 0.0001 -auto_mix_prec: False +####################### Training Parameters #################################### +number_of_epochs: 30 +lr: 0.0003 +lr_wav2vec: 0.00005 +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 sorting: ascending +num_workers: 2 # With data_parallel batch_size is split into N jobs # With DDP batch_size is multiplied by N jobs @@ -48,6 +50,7 @@ test_batch_size: 8 train_dataloader_opts: batch_size: !ref + num_workers: !ref valid_dataloader_opts: batch_size: !ref @@ -55,15 +58,14 @@ valid_dataloader_opts: test_dataloader_opts: batch_size: !ref -# Model parameters +####################### Model Parameters ####################################### dnn_activation: !new:torch.nn.LeakyReLU -dnn_neurons: 1024 +dnn_neurons: 1280 dnn_dropout: 0.15 +freeze_wav2vec: False # Outputs -output_neurons: 31 # BPE size, index(blank/eos/bos) = 0 - -# Decoding parameters +output_neurons: 29 # BPE size, index(blank/eos/bos) = 0 blank_index: 0 # @@ -72,30 +74,27 @@ blank_index: 0 epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] linear1: !name:speechbrain.nnet.linear.Linear n_neurons: !ref bias: True - bn1: !name:speechbrain.nnet.normalization.BatchNorm1d + bn1: !name:speechbrain.nnet.normalization.LayerNorm activation: !ref drop: !new:torch.nn.Dropout p: !ref linear2: !name:speechbrain.nnet.linear.Linear n_neurons: !ref bias: True - bn2: !name:speechbrain.nnet.normalization.BatchNorm1d + bn2: !name:speechbrain.nnet.normalization.LayerNorm activation2: !ref drop2: !new:torch.nn.Dropout p: !ref linear3: !name:speechbrain.nnet.linear.Linear n_neurons: !ref bias: True - bn3: !name:speechbrain.nnet.normalization.BatchNorm1d + bn3: !name:speechbrain.nnet.normalization.LayerNorm activation3: !ref # enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN @@ -145,18 +144,18 @@ modules: model: !new:torch.nn.ModuleList - [!ref , !ref ] -model_opt_class: !name:torch.optim.Adadelta +model_opt_class: !name:torch.optim.AdamW lr: !ref - rho: 0.95 - eps: 1.e-8 + weight_decay: 0.001 -wav2vec_opt_class: !name:torch.optim.Adam +wav2vec_opt_class: !name:torch.optim.AdamW lr: !ref + weight_decay: 0.001 lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler initial_value: !ref improvement_threshold: 0.0025 - annealing_factor: 0.8 + annealing_factor: 0.5 patient: 0 lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler @@ -165,6 +164,58 @@ lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.7 patient: 0 +############################## Decoding ######################################## + +test_beam_search: + beam_size: 200 + topk: 1 + blank_index: !ref + space_token: ' ' # make sure this is the same as the one used in the tokenizer + beam_prune_logp: -10.0 + token_prune_min_logp: -5.0 + prune_history: True + alpha: 0.8 + beta: 1.2 + # can be downloaded from here https://www.openslr.org/11/ or trained with kenLM + # It can either be a .bin or .arpa ; note: .arpa is much slower at loading + # If you don't want to use an LM, comment it out or set it to null + kenlm_model_path: null + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: diff --git a/recipes/LibriSpeech/ASR/CTC/train.py b/recipes/LibriSpeech/ASR/CTC/train.py new file mode 100644 index 0000000000..6fb0b3d019 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/train.py @@ -0,0 +1,414 @@ +#!/usr/bin/env python3 +"""Recipe for training a Transformer ASR system with librispeech. +The system employs an encoder and CTC greedy decoding. + +To run this recipe, do the following: +> python train_from_scratch.py hparams/train_conformer.yaml +or +> python train_from_scratch.py hparams/train_branchformer.yaml + +With the default hyperparameters, the system employs a convolutional frontend and a transformer. +Training is performed on the full LibriSpeech dataset (960 h). + +Authors + * Titouan Parcollet 2021, 2022 + * Shucong Zhang 2023 + * Adel Moumen 2024 +""" + +import os +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + + # compute features + feats = self.hparams.compute_features(wavs) + current_epoch = self.hparams.epoch_counter.current + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + # forward modules + src = self.modules.CNN(feats) + + enc_out, pred = self.modules.Transformer( + src, tgt=None, wav_len=wav_lens + ) + + # output layer for ctc log-probabilities + logits = self.modules.ctc_lin(enc_out) + p_ctc = self.hparams.log_softmax(logits) + + p_tokens = None + if stage == sb.Stage.VALID: + p_tokens = sb.decoders.ctc_greedy_decode( + p_ctc, wav_lens, blank_id=self.hparams.blank_index + ) + elif stage == sb.Stage.TEST: + p_tokens = test_searcher(p_ctc, wav_lens) + + return p_ctc, wav_lens, p_tokens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC) given predictions and targets.""" + + p_ctc, wav_lens, predicted_tokens = predictions + + ids = batch.id + tokens, tokens_lens = batch.tokens + + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + tokens_lens = self.hparams.wav_augment.replicate_labels(tokens_lens) + + loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) + + if stage == sb.Stage.VALID: + # Decode token terms to words + predicted_words = self.tokenizer( + predicted_tokens, task="decode_from_list" + ) + elif stage == sb.Stage.TEST: + predicted_words = [ + hyp[0].text.split(" ") for hyp in predicted_tokens + ] + + if stage != sb.Stage.TRAIN: + target_words = [wrd.split(" ") for wrd in batch.wrd] + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_evaluate_start(self, max_key=None, min_key=None): + """perform checkpoint averge if needed""" + super().on_evaluate_start() + + ckpts = self.checkpointer.find_checkpoints( + max_key=max_key, min_key=min_key + ) + ckpt = sb.utils.checkpoints.average_checkpoints( + ckpts, + recoverable_name="model", + ) + + self.hparams.model.load_state_dict(ckpt, strict=True) + self.hparams.model.eval() + print("Loaded the average") + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.wer_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + or stage == sb.Stage.TEST + ): + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"], "epoch": epoch}, + min_keys=["WER"], + num_to_keep=self.hparams.avg_checkpoints, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open(self.hparams.wer_file, "w", encoding="utf-8") as w: + self.wer_metric.write_stats(w) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + if should_step: + self.hparams.noam_annealing(self.optimizer) + + +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + valtest_datasets = [valid_data] + [i for k, i in test_datasets.items()] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(valtest_datasets, audio_pipeline) + + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline_train(wav): + # Speed Perturb is done here so it is multi-threaded with the + # workers of the dataloader (faster). + if hparams["speed_perturb"]: + sig = sb.dataio.dataio.read_audio(wav) + + sig = hparams["speed_perturb"](sig.unsqueeze(0)).squeeze(0) + else: + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline_train) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "char_list", "tokens_list", "tokens" + ) + def text_pipeline(wrd): + yield wrd + char_list = list(wrd) + yield char_list + tokens_list = tokenizer.sp.encode_as_ids(wrd) + yield tokens_list + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "wrd", "char_list", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_val = hparams["dynamic_batch_sampler_val"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_val, + ) + + return ( + train_data, + valid_data, + test_datasets, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # 1. # Dataset prep (parsing Librispeech) + from librispeech_prepare import prepare_librispeech # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # Defining tokenizer and loading it + tokenizer = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["output_neurons"], + annotation_train=hparams["train_csv"], + annotation_read="wrd", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_datasets, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams, tokenizer) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["model_opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Adding objects to trainer. + asr_brain.tokenizer = tokenizer + vocab_list = [ + tokenizer.sp.id_to_piece(i) for i in range(tokenizer.sp.vocab_size()) + ] + + from speechbrain.decoders.ctc import CTCBeamSearcher + + test_searcher = CTCBeamSearcher( + **hparams["test_beam_search"], + vocab_list=vocab_list, + ) + + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if valid_bsampler is not None: + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.wer_file = os.path.join( + hparams["output_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + min_key="WER", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/LibriSpeech/ASR/CTC/train_with_bestrq.py b/recipes/LibriSpeech/ASR/CTC/train_with_bestrq.py new file mode 100644 index 0000000000..3598557b65 --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/train_with_bestrq.py @@ -0,0 +1,367 @@ +#!/usr/bin/env/python3 +"""Recipe for training a bestrq ctc ASR system with librispeech. +The system employs bestrq as its encoder. Decoding is performed with +ctc beam search decoding (for the test) and a KenLM LM if specified. +To run this recipe, do the following: +> python train_with_bestrq.py hparams/train_sb_BEST-RQ.yaml --pt_model_path /path/to_ckpt --data_folder /path/to/LibriSpeech/ +The neural network is trained on CTC likelihood target and character units +are used as basic recognition tokens. + +Authors + * Ryan Whetten 2023 +""" + +import os +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + + # Forward pass + + ### get fbanks and normalize + feats = self.hparams.compute_features(wavs) + feats = self.modules.normalize(feats, wav_lens) + + feats = self.modules.CNN(feats) + enc_out = self.modules.enc(feats, wav_lens) + + x = self.modules.back_end_ffn(enc_out) + + # Compute outputs + p_tokens = None + logits = self.modules.ctc_lin(x) + + p_ctc = self.hparams.log_softmax(logits) + + if stage == sb.Stage.VALID: + p_tokens = sb.decoders.ctc_greedy_decode( + p_ctc, wav_lens, blank_id=self.hparams.blank_index + ) + elif stage == sb.Stage.TEST: + p_tokens = test_searcher(p_ctc, wav_lens) + + return p_ctc, wav_lens, p_tokens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + p_ctc, wav_lens, predicted_tokens = predictions + + ids = batch.id + tokens, tokens_lens = batch.tokens + + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + tokens_lens = self.hparams.wav_augment.replicate_labels(tokens_lens) + + loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) + loss = loss_ctc + + if stage == sb.Stage.VALID: + # Decode token terms to words + predicted_words = [ + "".join(self.tokenizer.decode_ndim(utt_seq)).split(" ") + for utt_seq in predicted_tokens + ] + elif ( + stage == sb.Stage.TEST + ): # Language model decoding only used for test + predicted_words = [ + hyp[0].text.split(" ") for hyp in predicted_tokens + ] + + if stage != sb.Stage.TRAIN: + target_words = [wrd.split(" ") for wrd in batch.wrd] + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr_model, new_lr_model = self.hparams.lr_annealing_model( + stage_stats["loss"] + ) + old_lr_bestrq, new_lr_bestrq = self.hparams.lr_annealing_bestrq( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate( + self.model_optimizer, new_lr_model + ) + sb.nnet.schedulers.update_learning_rate( + self.bestrq_optimizer, new_lr_bestrq + ) + self.hparams.train_logger.log_stats( + stats_meta={ + "epoch": epoch, + "lr_model": old_lr_model, + "lr_bestrq": old_lr_bestrq, + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + def init_optimizers(self): + "Initializes the bestrq2 optimizer and model optimizer" + self.bestrq_optimizer = self.hparams.bestrq_opt_class( + self.modules.pt_model.parameters() + ) + + self.model_optimizer = self.hparams.model_opt_class( + self.hparams.model.parameters() + ) + + # save the optimizers in a dictionary + # the key will be used in `freeze_optimizers()` + self.optimizers_dict = { + "model_optimizer": self.model_optimizer, + } + if not self.hparams.freeze_bestrq: + self.optimizers_dict["bestrq_optimizer"] = self.bestrq_optimizer + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "bestrq_opt", self.bestrq_optimizer + ) + self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + label_encoder = sb.dataio.encoder.CTCTextEncoder() + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "char_list", "tokens_list", "tokens" + ) + def text_pipeline(wrd): + yield wrd + char_list = list(wrd) + yield char_list + tokens_list = label_encoder.encode_sequence(char_list) + yield tokens_list + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") + special_labels = { + "blank_label": hparams["blank_index"], + } + label_encoder.load_or_create( + path=lab_enc_file, + from_didatasets=[train_data], + output_key="char_list", + special_labels=special_labels, + sequence_input=True, + ) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "wrd", "char_list", "tokens"], + ) + + return train_data, valid_data, test_datasets, label_encoder + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + from librispeech_prepare import prepare_librispeech # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # here we create the datasets objects as well as tokenization and encoding + train_data, valid_data, test_datasets, label_encoder = dataio_prepare( + hparams + ) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Load the pretrained model + if "pretrainer" in hparams.keys() and hparams["pt_model_path"] is not None: + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for the LM!! + asr_brain.tokenizer = label_encoder + + ind2lab = label_encoder.ind2lab + vocab_list = [ind2lab[x] for x in range(len(ind2lab))] + + from speechbrain.decoders.ctc import CTCBeamSearcher + + test_searcher = CTCBeamSearcher( + **hparams["test_beam_search"], + vocab_list=vocab_list, + ) + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="WER", + ) diff --git a/recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py b/recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py index 8d5f904c41..04cf0a908a 100644 --- a/recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py +++ b/recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py @@ -1,7 +1,10 @@ #!/usr/bin/env/python3 """Recipe for training a wav2vec-based ctc ASR system with librispeech. The system employs wav2vec as its encoder. Decoding is performed with -ctc greedy decoder. +ctc greedy decoder during validation and a beam search with an optional +language model during test. The test searcher can be chosen from the following +options: CTCBeamSearcher, CTCPrefixBeamSearcher, TorchAudioCTCPrefixBeamSearcher. + To run this recipe, do the following: > python train_with_wav2vec.py hparams/train_{hf,sb}_wav2vec.yaml The neural network is trained on CTC likelihood target and character units @@ -16,18 +19,21 @@ * Abdel Heba 2020 * Peter Plantinga 2020 * Samuele Cornell 2020 + * Adel Moumen 2023 """ import os import sys +from pathlib import Path + import torch -import logging -import speechbrain as sb -from speechbrain.utils.distributed import run_on_main from hyperpyyaml import load_hyperpyyaml -from pathlib import Path -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -38,37 +44,56 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) + # Downsample the inputs if specified + if hasattr(self.modules, "downsampler"): + wavs = self.modules.downsampler(wavs) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) # Forward pass - # Handling SpeechBrain vs HuggingFance pretrained models + # Handling SpeechBrain vs HuggingFace pretrained models if hasattr(self.modules, "extractor"): # SpeechBrain pretrained model latents = self.modules.extractor(wavs) feats = self.modules.encoder_wrapper(latents, wav_lens=wav_lens)[ "embeddings" ] else: # HuggingFace pretrained model - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) x = self.modules.enc(feats) # Compute outputs p_tokens = None logits = self.modules.ctc_lin(x) + + # Upsample the inputs if they have been highly downsampled + if hasattr(self.hparams, "upsampling") and self.hparams.upsampling: + logits = logits.view( + logits.shape[0], -1, self.hparams.output_neurons + ) + p_ctc = self.hparams.log_softmax(logits) - if stage != sb.Stage.TRAIN: + + if stage == sb.Stage.VALID: p_tokens = sb.decoders.ctc_greedy_decode( p_ctc, wav_lens, blank_id=self.hparams.blank_index ) + elif stage == sb.Stage.TEST: + p_tokens = test_searcher(p_ctc, wav_lens) + + candidates = [] + scores = [] + + for batch in p_tokens: + candidates.append([hyp.text for hyp in batch]) + scores.append([hyp.score for hyp in batch]) + + if hasattr(self.hparams, "rescorer"): + p_tokens, _ = self.hparams.rescorer.rescore(candidates, scores) + return p_ctc, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): @@ -79,64 +104,52 @@ def compute_objectives(self, predictions, batch, stage): ids = batch.id tokens, tokens_lens = batch.tokens - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens = torch.cat([tokens, tokens], dim=0) - tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + ( + tokens, + tokens_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens + ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss = loss_ctc - if stage != sb.Stage.TRAIN: + if stage == sb.Stage.VALID: # Decode token terms to words predicted_words = [ "".join(self.tokenizer.decode_ndim(utt_seq)).split(" ") for utt_seq in predicted_tokens ] + elif stage == sb.Stage.TEST: + if hasattr(self.hparams, "rescorer"): + predicted_words = [ + hyp[0].split(" ") for hyp in predicted_tokens + ] + else: + predicted_words = [ + hyp[0].text.split(" ") for hyp in predicted_tokens + ] + + if stage != sb.Stage.TRAIN: target_words = [wrd.split(" ") for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) self.cer_metric.append(ids, predicted_words, target_words) return loss - def fit_batch(self, batch): - should_step = self.step % self.grad_accumulation_factor == 0 - - # Managing automatic mixed precision - if self.auto_mix_prec: - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - with torch.cuda.amp.autocast(): - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - self.scaler.scale(loss / self.grad_accumulation_factor).backward() - if should_step: - self.scaler.unscale_(self.wav2vec_optimizer) - self.scaler.unscale_(self.model_optimizer) - if self.check_gradients(loss): - self.scaler.step(self.wav2vec_optimizer) - self.scaler.step(self.model_optimizer) - self.scaler.update() - self.optimizer_step += 1 - else: - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - (loss / self.grad_accumulation_factor).backward() - if should_step: - if self.check_gradients(loss): - self.wav2vec_optimizer.step() - self.model_optimizer.step() - self.wav2vec_optimizer.zero_grad() - self.model_optimizer.zero_grad() - self.optimizer_step += 1 - - return loss.detach().cpu() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() + if stage == sb.Stage.TEST: + if hasattr(self.hparams, "rescorer"): + self.hparams.rescorer.move_rescorers_to_device() + def on_stage_end(self, stage, stage_loss, epoch): """Gets called at the end of an epoch.""" # Compute/store important stats @@ -171,22 +184,26 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def init_optimizers(self): "Initializes the wav2vec2 optimizer and model optimizer" - # Handling SpeechBrain vs HuggingFance pretrained models + # Handling SpeechBrain vs HuggingFace pretrained models if hasattr(self.modules, "extractor"): # SpeechBrain pretrained model self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( - self.modules.encoder_wrapper.latent_encoder.parameters() + self.modules.encoder_wrapper.parameters() ) else: # HuggingFace pretrained model @@ -198,6 +215,14 @@ def init_optimizers(self): self.hparams.model.parameters() ) + # save the optimizers in a dictionary + # the key will be used in `freeze_optimizers()` + self.optimizers_dict = { + "model_optimizer": self.model_optimizer, + } + if not self.hparams.freeze_wav2vec: + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + if self.checkpointer is not None: self.checkpointer.add_recoverable( "wav2vec_opt", self.wav2vec_optimizer @@ -207,11 +232,13 @@ def init_optimizers(self): def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -236,7 +263,8 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -293,22 +321,21 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "char_list", "tokens"], + datasets, + ["id", "sig", "wrd", "char_list", "tokens"], ) return train_data, valid_data, test_datasets, label_encoder if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - # If distributed_launch=True then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -351,13 +378,23 @@ def text_pipeline(wrd): # We load the pretrained wav2vec2 model if "pretrainer" in hparams.keys(): - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(asr_brain.device) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() - # We dynamicaly add the tokenizer to our brain class. + # We dynamically add the tokenizer to our brain class. # NB: This tokenizer corresponds to the one used for the LM!! asr_brain.tokenizer = label_encoder + ind2lab = label_encoder.ind2lab + vocab_list = [ind2lab[x] for x in range(len(ind2lab))] + + from speechbrain.decoders.ctc import CTCBeamSearcher + + test_searcher = CTCBeamSearcher( + **hparams["test_beam_search"], + vocab_list=vocab_list, + ) + # Training asr_brain.fit( asr_brain.hparams.epoch_counter, @@ -368,10 +405,15 @@ def text_pipeline(wrd): ) # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + for k in test_datasets.keys(): # keys are test_clean, test_other etc - asr_brain.hparams.wer_file = os.path.join( - hparams["output_folder"], "wer_{}.txt".format(k) + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" ) asr_brain.evaluate( - test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"] + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="WER", ) diff --git a/recipes/LibriSpeech/ASR/CTC/train_with_wav2vec_k2.py b/recipes/LibriSpeech/ASR/CTC/train_with_wav2vec_k2.py new file mode 100644 index 0000000000..60ba43974f --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/train_with_wav2vec_k2.py @@ -0,0 +1,499 @@ +#!/usr/bin/env/python3 +"""Recipe for training a wav2vec-based ctc ASR system with librispeech. +The system employs wav2vec as its encoder. Decoding is performed with +k2 through the use of a decoding graph and, optionally, a rescoring LM. +To run this recipe, do the following: +> python train_with_wav2vec.py hparams/train_{hf,sb}_wav2vec.yaml +The neural network is trained on CTC likelihood target and character units +are used as basic recognition tokens. + +Authors + * Pierre Champion 2023 + * Zeyu Zhao 2023 + * Georgios Karakasidis 2023 + * Rudolf A Braun 2022 + * Titouan Parcollet 2022 + * Sung-Lin Yeh 2021 + * Ju-Chieh Chou 2020 + * Mirco Ravanelli 2020 + * Abdel Heba 2020 + * Peter Plantinga 2020 + * Samuele Cornell 2020 +""" + +import os +import sys +from collections import defaultdict +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +import speechbrain.integrations.k2_fsa as sbk2 +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) + + # Downsample the inputs if specified + if hasattr(self.modules, "downsampler"): + wavs = self.modules.downsampler(wavs) + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + + # Forward pass + + # Handling SpeechBrain vs HuggingFace pretrained models + if hasattr(self.modules, "extractor"): # SpeechBrain pretrained model + latents = self.modules.extractor(wavs) + feats = self.modules.encoder_wrapper(latents, wav_lens=wav_lens)[ + "embeddings" + ] + else: # HuggingFace pretrained model + feats = self.modules.wav2vec2(wavs, wav_lens) + + x = self.modules.enc(feats) + + # Compute outputs + logits = self.modules.ctc_lin(x) + + # Upsample the inputs if they have been highly downsampled + if hasattr(self.hparams, "upsampling") and self.hparams.upsampling: + logits = logits.view( + logits.shape[0], -1, self.hparams.output_neurons + ) + + p_ctc = self.hparams.log_softmax(logits) + paths = None + if stage == sb.Stage.VALID or stage == sb.Stage.TEST: + # Decode token terms to words + lattice = sbk2.lattice_decoder.get_lattice( + p_ctc, + wav_lens, + self.decoder["decoding_graph"], + search_beam=self.hparams.test_search_beam, + output_beam=self.hparams.test_output_beam, + ac_scale=self.hparams.ac_scale, + max_active_states=self.hparams.test_max_active_state, + min_active_states=self.hparams.test_min_active_state, + ) + if stage == sb.Stage.VALID: + # 1best decoding for fast valid + paths = {"onebest": sbk2.lattice_decoder.one_best_decoding(lattice)} + elif stage == sb.Stage.TEST: + # user defined decoding for test + paths = self.decoder["decoding_method"](lattice) + + return p_ctc, wav_lens, paths + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + p_ctc, wav_lens, paths = predictions + + # Sort batch to be descending by length of wav files, which is required + # by `k2.intersect_dense` called in `k2.ctc_loss` + indices = torch.argsort(wav_lens, descending=True) + p_ctc = p_ctc[indices] + wav_lens = wav_lens[indices] + texts = [batch.wrd[i] for i in indices] + + is_training = stage == sb.Stage.TRAIN + loss = self.hparams.ctc_cost( + log_probs=p_ctc, + input_lens=wav_lens, + graph_compiler=self.graph_compiler, + texts=texts, + is_training=is_training, + ) + + if stage == sb.Stage.TEST or stage == sb.Stage.VALID: + for k, path in paths.items(): + predicted_texts = sbk2.utils.lattice_paths_to_text( + path, self.lexicon.word_table + ) + + predicted_words = [wrd.split(" ") for wrd in predicted_texts] + target_words = [wrd.split(" ") for wrd in batch.wrd] + self.wer_metrics[k].append( + batch.id, predicted_words, target_words + ) + self.cer_metrics[k].append( + batch.id, predicted_words, target_words + ) + # For TEST and VALID stages, the loss value is not exact. + # The words have a target length (e.g., number of phones or characters) of 1. + # As such, sentences with have a higher loss during CTC loss 'mean' reduction mode. + # It does not impact training. + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch. In this case, + it initializes the wer and cer metric watchers. If the decoding + method is whole-lattice-rescoring then a list of wer/cer metrics + will be initialized (for each lm scale). Otherwise, a single class + will be initialized for wer and cer, respectively. + """ + if stage == sb.Stage.VALID: + logger.info("Valid stage") + if stage == sb.Stage.TEST: + logger.info("Test stage") + self.cer_metrics = defaultdict(self.hparams.cer_computer) + self.wer_metrics = defaultdict(self.hparams.error_rate_computer) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch. During testing, its primary goal + is to summarize the WER/CER stats and save them in a file. + """ + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + # Only report the fist config (first rescoring_lm_scale value) + stage_stats["CER"] = list(self.cer_metrics.values())[0].summarize( + "error_rate" + ) + stage_stats["WER"] = list(self.wer_metrics.values())[0].summarize( + "error_rate" + ) + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr_model, new_lr_model = self.hparams.lr_annealing_model( + stage_stats["loss"] + ) + old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate( + self.model_optimizer, new_lr_model + ) + sb.nnet.schedulers.update_learning_rate( + self.wav2vec_optimizer, new_lr_wav2vec + ) + self.hparams.train_logger.log_stats( + stats_meta={ + "epoch": epoch, + "lr_model": old_lr_model, + "lr_wav2vec": old_lr_wav2vec, + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + for k, stat in self.wer_metrics.items(): + with open( + self.hparams.wer_file + f"_{k}.txt", + "w", + encoding="utf-8", + ) as w: + stat.write_stats(w) + + def init_optimizers(self): + "Initializes the wav2vec2 optimizer and model optimizer" + # Handling SpeechBrain vs HuggingFace pretrained models + if hasattr(self.modules, "extractor"): # SpeechBrain pretrained model + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.encoder_wrapper.parameters() + ) + + else: # HuggingFace pretrained model + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + + self.model_optimizer = self.hparams.model_opt_class( + self.hparams.model.parameters() + ) + + # save the optimizers in a dictionary + # the key will be used in `freeze_optimizers()` + self.optimizers_dict = { + "model_optimizer": self.model_optimizer, + } + if not self.hparams.freeze_wav2vec: + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "wav2vec_opt", self.wav2vec_optimizer + ) + self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides("wrd", "char_list") + def text_pipeline(wrd): + yield wrd + char_list = list(wrd) + yield char_list + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "wrd", "char_list"], + ) + + return train_data, valid_data, test_datasets + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # If distributed_launch=True then + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # env_corrupt is not supported with k2 yet + if hparams.get("env_corrupt", None): + raise NotImplementedError("env_corrupt is not supported with k2 yet") + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + import librispeech_prepare + + # multi-gpu (ddp) save data preparation + run_on_main( + librispeech_prepare.prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # Download the vocabulary file for librispeech + librispeech_prepare.download_librispeech_vocab_text( + destination=hparams["vocab_file"] + ) + + # here we create the datasets objects as well as tokenization and encoding + train_data, valid_data, test_datasets = dataio_prepare(hparams) + + # Create the lexicon.txt for k2 + run_on_main( + sbk2.lexicon.prepare_char_lexicon, + kwargs={ + "lang_dir": hparams["lang_dir"], + "vocab_files": [hparams["vocab_file"]], + "extra_csv_files": ( + [hparams["output_folder"] + "/train.csv"] + if not hparams["skip_prep"] + else [] + ), + "add_word_boundary": hparams["add_word_boundary"], + }, + ) + + caching = ( + {"cache": False} + if "caching" in hparams and hparams["caching"] is False + else {} + ) + + # Create the lang directory for k2 + run_on_main( + sbk2.prepare_lang.prepare_lang, + kwargs={ + "lang_dir": hparams["lang_dir"], + "sil_prob": hparams["sil_prob"], + **caching, + }, + ) + + # OpenSLR ngram models + if ( + hparams["G_arpa"] + ".gz" + in librispeech_prepare.OPEN_SLR_11_NGRAM_MODELs + and hparams["G_rescoring_arpa"] + ".gz" + in librispeech_prepare.OPEN_SLR_11_NGRAM_MODELs + and ( + hparams["compose_HL_with_G"] + or hparams["decoding_method"] == "whole-lattice-rescoring" + ) + ): + librispeech_prepare.download_openslr_librispeech_lm( + destination=hparams["lm_dir"], + rescoring_lm=( + hparams["decoding_method"] == "whole-lattice-rescoring" + ), + ) + # SB ngram models + elif ( + "sb" in hparams["G_arpa"] + and "sb" in hparams["G_rescoring_arpa"] + and ( + hparams["compose_HL_with_G"] + or hparams["decoding_method"] == "whole-lattice-rescoring" + ) + ): + librispeech_prepare.download_sb_librispeech_lm( + destination=hparams["lm_dir"], + rescoring_lm=( + hparams["decoding_method"] == "whole-lattice-rescoring" + ), + ) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + lexicon = sbk2.lexicon.Lexicon(hparams["lang_dir"]) + graph_compiler = sbk2.graph_compiler.CtcGraphCompiler( + lexicon, + device=asr_brain.device, + ) + + decoding_params = {} + for param_name in ( + "compose_HL_with_G", + "lm_dir", + "decoding_method", + "caching", + "G_arpa", + "G_rescoring_arpa", + "lang_dir", + "output_folder", + "rescoring_lm_scale", + ): + if param_name in hparams: + decoding_params[param_name] = hparams[param_name] + + decoder = sbk2.lattice_decoder.get_decoding( + decoding_params, graph_compiler, device=asr_brain.device + ) + + # Add attributes to asr_brain + asr_brain.lexicon = lexicon + asr_brain.graph_compiler = graph_compiler + asr_brain.decoder = decoder + + # We load the pretrained wav2vec2 model + if "pretrainer" in hparams.keys(): + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected(asr_brain.device) + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + # Testing + for k in test_datasets.keys(): # keys are test_clean, test_other etc + wer_dir = os.path.join(hparams["output_wer_folder"], f"metric_{k}") + os.makedirs(wer_dir, exist_ok=True) + exp = "HLG" if hparams["compose_HL_with_G"] else "HL" + asr_brain.hparams.wer_file = os.path.join(wer_dir, f"wer_{exp}") + asr_brain.evaluate( + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="WER", + ) diff --git a/recipes/LibriSpeech/ASR/CTC/train_with_whisper.py b/recipes/LibriSpeech/ASR/CTC/train_with_whisper.py new file mode 100644 index 0000000000..9a3e8c7c9f --- /dev/null +++ b/recipes/LibriSpeech/ASR/CTC/train_with_whisper.py @@ -0,0 +1,386 @@ +#!/usr/bin/env/python3 +"""Recipe for training a whisper-based ctc ASR system with librispeech. +The system employs whisper from OpenAI (https://cdn.openai.com/papers/whisper.pdf). +This recipe take only the whisper encoder and add a DNN + CTC to fine-tune. + +If you want to use the full whisper system, please refer to the recipe +speechbrain/recipes/LibriSpeech/ASR/transformer/train_with_whisper.py + +To run this recipe, do the following: +> python train_with_whisper.py hparams/train_hf_whisper_encoder.yaml + +Authors + * Titouan Parcollet 2022 + * Rudolf A Braun 2022 + * Sung-Lin Yeh 2021 + * Ju-Chieh Chou 2020 + * Mirco Ravanelli 2020 + * Abdel Heba 2020 + * Peter Plantinga 2020 + * Samuele Cornell 2020 +""" + +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + + # Forward pass + + # Encode with Whisper and then DNN + feats = self.modules.whisper(wavs) + x = self.modules.enc(feats) + + # Compute outputs + p_tokens = None + logits = self.modules.ctc_lin(x) + p_ctc = self.hparams.log_softmax(logits) + if stage == sb.Stage.VALID: + p_tokens = sb.decoders.ctc_greedy_decode( + p_ctc, wav_lens, blank_id=self.hparams.blank_index + ) + elif stage == sb.Stage.TEST: + p_tokens = test_searcher(p_ctc, wav_lens) + + return p_ctc, wav_lens, p_tokens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC) given predictions and targets.""" + + p_ctc, wav_lens, predicted_tokens = predictions + + ids = batch.id + tokens, tokens_lens = batch.tokens + + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + ( + tokens, + tokens_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens + ) + + loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) + loss = loss_ctc + + if stage == sb.Stage.VALID: + # Decode token terms to words + predicted_words = self.tokenizer( + predicted_tokens, task="decode_from_list" + ) + + elif stage == sb.Stage.TEST: + predicted_words = [ + hyp[0].text.split(" ") for hyp in predicted_tokens + ] + + if stage != sb.Stage.TRAIN: + # Convert indices to words + target_words = undo_padding(tokens, tokens_lens) + target_words = self.tokenizer(target_words, task="decode_from_list") + + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr_model, new_lr_model = self.hparams.lr_annealing_model( + stage_stats["loss"] + ) + old_lr_whisper, new_lr_whisper = self.hparams.lr_annealing_whisper( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate( + self.model_optimizer, new_lr_model + ) + sb.nnet.schedulers.update_learning_rate( + self.whisper_optimizer, new_lr_whisper + ) + self.hparams.train_logger.log_stats( + stats_meta={ + "epoch": epoch, + "lr_model": old_lr_model, + "lr_whisper": old_lr_whisper, + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + def init_optimizers(self): + "Initializes the whisper optimizer and model optimizer" + self.whisper_optimizer = self.hparams.whisper_opt_class( + self.modules.whisper.parameters() + ) + + self.model_optimizer = self.hparams.model_opt_class( + self.hparams.model.parameters() + ) + + # save the optimizers in a dictionary + # the key will be used in `freeze_optimizers()` + self.optimizers_dict = { + "model_optimizer": self.model_optimizer, + "whisper_optimizer": self.whisper_optimizer, + } + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "whisper_opt", self.whisper_optimizer + ) + self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + + def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if not self.hparams.freeze_whisper: + # Here we added a warmup to the CTC encoder to make sure that + # it does not break the whisper with too large gradients. + if self.optimizer_step > self.hparams.warmup_steps: + valid_optimizers["whisper_optimizer"] = optimizers[ + "whisper_optimizer" + ] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers + + +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "char_list", "tokens_list", "tokens" + ) + def text_pipeline(wrd): + yield wrd + char_list = list(wrd) + yield char_list + tokens_list = tokenizer.sp.encode_as_ids(wrd) + yield tokens_list + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "wrd", "char_list", "tokens"], + ) + + return train_data, valid_data, test_datasets + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + from librispeech_prepare import prepare_librispeech # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # Defining tokenizer and loading it + tokenizer = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["output_neurons"], + annotation_train=hparams["train_csv"], + annotation_read="wrd", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + ) + + # here we create the datasets objects as well as tokenization and encoding + train_data, valid_data, test_datasets = dataio_prepare(hparams, tokenizer) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # We load the pretrained whisper model + if "pretrainer" in hparams.keys(): + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected(asr_brain.device) + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for the LM!! + asr_brain.tokenizer = tokenizer + + vocab_list = [ + tokenizer.sp.id_to_piece(i) for i in range(tokenizer.sp.vocab_size()) + ] + + from speechbrain.decoders.ctc import CTCBeamSearcher + + test_searcher = CTCBeamSearcher( + **hparams["test_beam_search"], + vocab_list=vocab_list, + ) + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + import os + + # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="WER", + ) diff --git a/recipes/LibriSpeech/ASR/seq2seq/README.md b/recipes/LibriSpeech/ASR/seq2seq/README.md index 2c8b0278bd..cf74b6159f 100644 --- a/recipes/LibriSpeech/ASR/seq2seq/README.md +++ b/recipes/LibriSpeech/ASR/seq2seq/README.md @@ -3,17 +3,19 @@ This folder contains the scripts to train a seq2seq CNN-RNN-based system using L You can download LibriSpeech at http://www.openslr.org/12 # How to run +```shell python train.py hparams/file.yaml +``` # Results | Release | hyperparams file | Test Clean WER | HuggingFace link | Full model link | GPUs | |:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :--------:| -| 01-03-21 | train_BPE_1000.yaml | 3.16 | [HuggingFace](https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech) | [Model](https://drive.google.com/drive/folders/19mAyMR1ITSb83Anhds4n694PLwKD47yf?usp=sharing)| 1xV100 32GB | -| 01-03-21 | train_BPE_5000.yaml | 2.89 | [HuggingFace](https://huggingface.co/speechbrain/asr-crdnn-transformerlm-librispeech) | [Model](https://drive.google.com/drive/folders/15uUZ21HYnw4KyOPW3tx8bLrS9RoBZfS7?usp=sharing) | 1xV100 32GB | +| 01-03-21 | train_BPE_1000.yaml | 3.16 | [HuggingFace](https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech) | [Model](https://www.dropbox.com/sh/1ycv07gyxdq8hdl/AABUDYzza4SLYtY45RcGf2_0a?dl=0)| 1xV100 32GB | +| 01-03-21 | train_BPE_5000.yaml | 2.89 | [HuggingFace](https://huggingface.co/speechbrain/asr-crdnn-transformerlm-librispeech) | [Model](https://www.dropbox.com/sh/a39wq3h60luv552/AABBnCM2Uf-CNax_cgMWdqDda?dl=0) | 1xV100 32GB | # Training Time -It takes about 5 hours for each epoch on a NVDIA V100 (32GB). +It takes about 5 hours for each epoch on a NVIDIA V100 (32GB). # PreTrained Model + Easy-Inference You can find the pre-trained model with an easy-inference function on HuggingFace: @@ -22,7 +24,7 @@ You can find the pre-trained model with an easy-inference function on HuggingFac - https://huggingface.co/speechbrain/asr-transformer-transformerlm-librispeech You can find the full experiment folder (i.e., checkpoints, logs, etc) here: -https://drive.google.com/drive/folders/15uUZ21HYnw4KyOPW3tx8bLrS9RoBZfS7?usp=sharing +https://www.dropbox.com/sh/a39wq3h60luv552/AABBnCM2Uf-CNax_cgMWdqDda?dl=0 # **About SpeechBrain** @@ -35,6 +37,15 @@ https://drive.google.com/drive/folders/15uUZ21HYnw4KyOPW3tx8bLrS9RoBZfS7?usp=sha Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_1000.yaml b/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_1000.yaml index 35a03a8e37..1a6e67a924 100644 --- a/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_1000.yaml +++ b/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_1000.yaml @@ -11,9 +11,9 @@ # Seed needs to be set at top of yaml, before objects with parameters seed: 2602 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_BPE_960h_LM/ -wer_file: !ref /wer.txt +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt @@ -27,21 +27,25 @@ pretrained_lm_tokenizer_path: speechbrain/asr-crdnn-rnnlm-librispeech # Data files data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech -# noise/ris dataset will automatically be downloaded -data_folder_rirs: !ref # where to store noisy data for augment (change it if needed) train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] dev_splits: ["dev-clean"] test_splits: ["test-clean", "test-other"] skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min -train_csv: !ref /train.csv -valid_csv: !ref /dev-clean.csv +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv test_csv: - - !ref /test-clean.csv - - !ref /test-other.csv + - !ref /test-clean.csv + - !ref /test-other.csv + +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + +####################### Training Parameters #################################### -# Training parameters number_of_epochs: 15 number_of_ctc_epochs: 5 batch_size: 8 @@ -49,14 +53,19 @@ lr: 1.0 ctc_weight: 0.5 sorting: ascending dynamic_batching: False +precision: fp32 # bf16, fp16 or fp32 # dynamic batching parameters, if used +feats_hop_size: 0.01 +max_batch_length: 20000 # in terms of frames +shuffle: True +batch_ordering: random +num_buckets: 20 dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 20000 # in terms of frames - shuffle_ex: True - batch_ordering: random - num_buckets: 20 + max_batch_length: !ref + shuffle: !ref + batch_ordering: !ref + num_buckets: !ref # Feature parameters sample_rate: 16000 @@ -69,16 +78,20 @@ opt_class: !name:torch.optim.Adadelta eps: 1.e-8 # Dataloader options +num_workers: 4 train_dataloader_opts: + num_workers: !ref batch_size: !ref valid_dataloader_opts: + num_workers: !ref batch_size: !ref test_dataloader_opts: - batch_size: !ref + batch_size: 1 + +####################### Model Parameters ####################################### -# Model parameters activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 2 @@ -107,12 +120,13 @@ test_beam_size: 80 eos_threshold: 1.5 using_max_attn_shift: True max_attn_shift: 240 -lm_weight: 0.50 -ctc_weight_decode: 0.0 -coverage_penalty: 1.5 temperature: 1.25 temperature_lm: 1.25 +# Scoring parameters +lm_weight: 0.5 +coverage_penalty: 1.5 + epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref @@ -124,17 +138,6 @@ compute_features: !new:speechbrain.lobes.features.Fbank n_fft: !ref n_mels: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] enc: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] @@ -214,49 +217,56 @@ modules: ctc_lin: !ref seq_lin: !ref normalize: !ref - env_corrupt: !ref lm_model: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , !ref , !ref , !ref ] +############################## Decoding & optimiser ############################ + +coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer + vocab_size: !ref + +rnnlm_scorer: !new:speechbrain.decoders.scorer.RNNLMScorer + language_model: !ref + temperature: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , + !ref ] + weights: + rnnlm: !ref + coverage: !ref + +# Search valid_search: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref temperature: !ref -test_search: !new:speechbrain.decoders.S2SRNNBeamSearchLM +test_search: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref - language_model: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref - lm_weight: !ref - ctc_weight: !ref temperature: !ref - temperature_lm: !ref + scorer: !ref lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler initial_value: !ref @@ -264,6 +274,57 @@ lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.8 patient: 0 +############################## Augmentations ################################### + +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: diff --git a/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_1000_sligru.yaml b/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_1000_sligru.yaml new file mode 100644 index 0000000000..2db49a33d3 --- /dev/null +++ b/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_1000_sligru.yaml @@ -0,0 +1,352 @@ +# ############################################################################ +# Model: E2E ASR with attention-based ASR +# Encoder: CRDNN model +# Decoder: SLi-GRU + beamsearch + RNNLM +# Tokens: BPE with unigram +# losses: CTC + NLL +# Training: LibriSpeech 960h +# Authors: Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, Peter Plantinga, +# Samuele Cornell 2020, Adel Moumen 2023 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters +seed: 2602 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/CRDNN_BPE_960h_LM_SLiGRU/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-crdnn-rnnlm-librispeech + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech + +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +ckpt_interval_minutes: 15 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + +####################### Training Parameters #################################### + +number_of_epochs: 15 +number_of_ctc_epochs: 15 +batch_size: 24 +lr: 1.0 +ctc_weight: 0.5 +sorting: ascending +dynamic_batching: False +precision: fp32 # bf16, fp16 or fp32 + +# dynamic batching parameters, if used +feats_hop_size: 0.01 +max_batch_length: 20000 # in terms of frames +shuffle: True +batch_ordering: random +num_buckets: 20 +dynamic_batch_sampler: + max_batch_length: !ref + shuffle: !ref + batch_ordering: !ref + num_buckets: !ref + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 40 + +opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +# Dataloader options +num_workers: 4 +train_dataloader_opts: + num_workers: !ref + batch_size: !ref + +valid_dataloader_opts: + num_workers: !ref + batch_size: !ref + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ####################################### + +activation: !name:torch.nn.LeakyReLU +dropout: 0.15 +cnn_blocks: 2 +cnn_channels: (128, 256) +inter_layer_pooling_size: (2, 2) +cnn_kernelsize: (3, 3) +time_pooling_size: 4 +rnn_class: !name:speechbrain.nnet.RNN.SLiGRU +rnn_layers: 4 +rnn_neurons: 1024 +rnn_bidirectional: True +dnn_blocks: 2 +dnn_neurons: 512 +emb_size: 128 +dec_neurons: 1024 +output_neurons: 1000 # Number of tokens (same as LM) +blank_index: 0 +bos_index: 0 +eos_index: 0 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_beam_size: 80 +test_beam_size: 80 +eos_threshold: 1.5 +using_max_attn_shift: True +max_attn_shift: 240 +lm_weight: 0.50 +coverage_penalty: 1.5 +temperature: 1.25 +temperature_lm: 1.25 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +enc: !new:speechbrain.lobes.models.CRDNN.CRDNN + input_shape: [null, null, !ref ] + activation: !ref + dropout: !ref + cnn_blocks: !ref + cnn_channels: !ref + cnn_kernelsize: !ref + inter_layer_pooling_size: !ref + time_pooling: True + using_2d_pooling: False + time_pooling_size: !ref + rnn_class: !ref + rnn_layers: !ref + rnn_neurons: !ref + rnn_bidirectional: !ref + rnn_re_init: True + dnn_blocks: !ref + dnn_neurons: !ref + use_rnnp: False + +emb: !new:speechbrain.nnet.embedding.Embedding + num_embeddings: !ref + embedding_dim: !ref + +dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder + enc_dim: !ref + input_size: !ref + rnn_type: gru + attn_type: location + hidden_size: !ref + attn_dim: 1024 + num_layers: 1 + scaling: 1.0 + channels: 10 + kernel_size: 100 + re_init: True + dropout: !ref + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +seq_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: 0.1 + +# This is the RNNLM that is used according to the Huggingface repository +# NB: It has to match the pre-trained RNNLM!! +lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM + output_neurons: !ref + embedding_dim: !ref + activation: !name:torch.nn.LeakyReLU + dropout: 0.0 + rnn_layers: 2 + rnn_neurons: 2048 + dnn_blocks: 1 + dnn_neurons: 512 + return_hidden: True # For inference + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +# Models +modules: + enc: !ref + emb: !ref + dec: !ref + ctc_lin: !ref + seq_lin: !ref + normalize: !ref + lm_model: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref , !ref ] + +############################## Decoding & optimiser ############################ + +coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer + vocab_size: !ref + +rnnlm_scorer: !new:speechbrain.decoders.scorer.RNNLMScorer + language_model: !ref + temperature: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , + !ref ] + weights: + rnnlm: !ref + coverage: !ref + +# Search +valid_search: !new:speechbrain.decoders.S2SRNNBeamSearcher + embedding: !ref + decoder: !ref + linear: !ref + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + eos_threshold: !ref + using_max_attn_shift: !ref + max_attn_shift: !ref + temperature: !ref + +test_search: !new:speechbrain.decoders.S2SRNNBeamSearcher + embedding: !ref + decoder: !ref + linear: !ref + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + eos_threshold: !ref + using_max_attn_shift: !ref + max_attn_shift: !ref + temperature: !ref + scorer: !ref + +lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +############################## Augmentations ################################### + +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + scheduler: !ref + normalizer: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_5000.yaml b/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_5000.yaml index 74c34585c9..cc082220c5 100644 --- a/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_5000.yaml +++ b/recipes/LibriSpeech/ASR/seq2seq/hparams/train_BPE_5000.yaml @@ -12,9 +12,9 @@ # Seed needs to be set at top of yaml, before objects with parameters # are instantiated seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_BPE_960h_5k_LM/ -wer_file: !ref /wer.txt +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt @@ -28,20 +28,25 @@ pretrained_lm_tokenizer_path: speechbrain/asr-crdnn-transformerlm-librispeech # Data files data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech -# noise/ris dataset will automatically be downloaded -data_folder_rirs: !ref + train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] dev_splits: ["dev-clean"] test_splits: ["test-clean", "test-other"] skip_prep: False ckpt_interval_minutes: 25 # save checkpoint every N min -train_csv: !ref /train.csv -valid_csv: !ref /dev-clean.csv +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv test_csv: - - !ref /test-clean.csv - - !ref /test-other.csv + - !ref /test-clean.csv + - !ref /test-other.csv + +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + +####################### Training Parameters #################################### -# Training parameters number_of_epochs: 25 number_of_ctc_epochs: 25 batch_size: 8 @@ -49,14 +54,19 @@ lr: 1.0 ctc_weight: 0.5 sorting: ascending dynamic_batching: False +precision: fp32 # bf16, fp16 or fp32 # dynamic batching parameters, if used +feats_hop_size: 0.01 +max_batch_length: 20000 # in terms of frames +shuffle: True +batch_ordering: random +num_buckets: 20 dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 20000 # in terms of frames - shuffle_ex: True - batch_ordering: random - num_buckets: 20 + max_batch_length: !ref + shuffle: !ref + batch_ordering: !ref + num_buckets: !ref # Feature parameters sample_rate: 16000 @@ -69,16 +79,20 @@ opt_class: !name:torch.optim.Adadelta eps: 1.e-8 # Dataloader options +num_workers: 4 train_dataloader_opts: + num_workers: !ref batch_size: !ref valid_dataloader_opts: + num_workers: !ref batch_size: !ref test_dataloader_opts: batch_size: 1 -# Model parameters +####################### Model Parameters ####################################### + activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 2 @@ -104,15 +118,18 @@ min_decode_ratio: 0.0 max_decode_ratio: 1.0 valid_beam_size: 20 test_beam_size: 40 +using_eos_threshold: True eos_threshold: 1.5 using_max_attn_shift: True max_attn_shift: 300 -lm_weight: 0.80 -ctc_weight_decode: 0.40 +lm_weight: 0.8 +temperature: 1.0 ctc_window_size: 200 + +# Scoring parameters +ctc_weight_decode: 0.40 coverage_penalty: 1.5 -temperature: 1.0 -temperature_lm: 1.0 + epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref @@ -125,17 +142,6 @@ compute_features: !new:speechbrain.lobes.features.Fbank n_fft: !ref n_mels: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] enc: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] @@ -216,50 +222,71 @@ modules: ctc_lin: !ref seq_lin: !ref normalize: !ref - env_corrupt: !ref lm_model: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , !ref , !ref , !ref ] +############################## Decoding & optimiser ############################ + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + ctc_window_size: !ref + +coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer + vocab_size: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + +valid_scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + coverage: !ref + +test_scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [ + !ref , + !ref ] + partial_scorers: [!ref ] + weights: + transformerlm: !ref + coverage: !ref + ctc: !ref + +# Search valid_search: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref temperature: !ref + scorer: !ref -test_search: !new:speechbrain.decoders.S2SRNNBeamSearchTransformerLM +test_search: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref - language_model: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref - lm_weight: !ref - ctc_weight: !ref - ctc_window_size: !ref + using_eos_threshold: !ref temperature: !ref - temperature_lm: !ref + scorer: !ref lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler initial_value: !ref @@ -267,6 +294,57 @@ lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler annealing_factor: 0.8 patient: 0 +############################## Augmentations ################################### + +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Logging and Pretrainer ########################## + checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref recoverables: diff --git a/recipes/LibriSpeech/ASR/seq2seq/train.py b/recipes/LibriSpeech/ASR/seq2seq/train.py index ecc877a28b..0af9f1e528 100644 --- a/recipes/LibriSpeech/ASR/seq2seq/train.py +++ b/recipes/LibriSpeech/ASR/seq2seq/train.py @@ -3,37 +3,28 @@ The system employs an encoder, a decoder, and an attention mechanism between them. Decoding is performed with beamsearch coupled with a neural language model. - To run this recipe, do the following: > python train.py hparams/train_BPE1000.yaml - With the default hyperparameters, the system employs a CRDNN encoder. The decoder is based on a standard GRU. Beamsearch coupled with a RNN language model is used on the top of decoder probabilities. - The neural network is trained on both CTC and negative-log likelihood targets and sub-word units estimated with Byte Pairwise Encoding (BPE) are used as basic recognition tokens. Training is performed on the full LibriSpeech dataset (960 h). - The experiment file is flexible enough to support a large variety of different systems. By properly changing the parameter files, you can try different encoders, decoders, tokens (e.g, characters instead of BPE), training split (e.g, train-clean 100 rather than the full one), and many other possible variations. - This recipe assumes that the tokenizer and the LM are already trained. To avoid token mismatches, the tokenizer used for the acoustic model is the same use for the LM. The recipe downloads the pre-trained tokenizer and LM. - If you would like to train a full system from scratch do the following: 1- Train a tokenizer (see ../../Tokenizer) 2- Train a language model (see ../../LM) 3- Train the acoustic model (with this code). - - - Authors * Ju-Chieh Chou 2020 * Mirco Ravanelli 2020 @@ -43,16 +34,17 @@ * Andreas Nautsch 2021 """ -import os import sys +from pathlib import Path + import torch -import logging -import speechbrain as sb -from speechbrain.utils.distributed import run_on_main from hyperpyyaml import load_hyperpyyaml -from pathlib import Path -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -64,16 +56,10 @@ def compute_forward(self, batch, stage): tokens_bos, _ = batch.tokens_bos wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) - - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) # Forward pass feats = self.hparams.compute_features(wavs) @@ -87,45 +73,43 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs + p_ctc, p_tokens = None, None if stage == sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.number_of_ctc_epochs: # Output layer for ctc log-probabilities logits = self.modules.ctc_lin(x) p_ctc = self.hparams.log_softmax(logits) - return p_ctc, p_seq, wav_lens - else: - return p_seq, wav_lens else: if stage == sb.Stage.VALID: - p_tokens, scores = self.hparams.valid_search(x, wav_lens) + # Get token strings from index prediction + p_tokens, _, _, _ = self.hparams.valid_search(x, wav_lens) else: - p_tokens, scores = self.hparams.test_search(x, wav_lens) - return p_seq, wav_lens, p_tokens + p_tokens, _, _, _ = self.hparams.test_search(x, wav_lens) + + return p_ctc, p_seq, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" current_epoch = self.hparams.epoch_counter.current - if stage == sb.Stage.TRAIN: - if current_epoch <= self.hparams.number_of_ctc_epochs: - p_ctc, p_seq, wav_lens = predictions - else: - p_seq, wav_lens = predictions - else: - p_seq, wav_lens, predicted_tokens = predictions + p_ctc, p_seq, wav_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens ) - tokens = torch.cat([tokens, tokens], dim=0) - tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens @@ -156,23 +140,6 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: @@ -199,24 +166,30 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -241,7 +214,8 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -291,36 +265,29 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], ) train_batch_sampler = None valid_batch_sampler = None if hparams["dynamic_batching"]: - from speechbrain.dataio.sampler import DynamicBatchSampler # noqa - from speechbrain.dataio.dataloader import SaveableDataLoader # noqa from speechbrain.dataio.batch import PaddedBatch # noqa + from speechbrain.dataio.dataloader import SaveableDataLoader # noqa + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa dynamic_hparams = hparams["dynamic_batch_sampler"] - hop_size = dynamic_hparams["feats_hop_size"] - - num_buckets = dynamic_hparams["num_buckets"] + hop_size = hparams["feats_hop_size"] train_batch_sampler = DynamicBatchSampler( train_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, length_func=lambda x: x["duration"] * (1 / hop_size), - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + **dynamic_hparams, ) valid_batch_sampler = DynamicBatchSampler( valid_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, length_func=lambda x: x["duration"] * (1 / hop_size), - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + **dynamic_hparams, ) return ( @@ -333,15 +300,13 @@ def text_pipeline(wrd): if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -368,6 +333,7 @@ def text_pipeline(wrd): "skip_prep": hparams["skip_prep"], }, ) + run_on_main(hparams["prepare_noise_data"]) # here we create the datasets objects as well as tokenization and encoding ( @@ -380,8 +346,8 @@ def text_pipeline(wrd): # We download the pretrained LM from HuggingFace (or elsewhere depending on # the path given in the YAML file). The tokenizer is loaded at the same time. - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Trainer initialization asr_brain = ASR( @@ -392,7 +358,7 @@ def text_pipeline(wrd): checkpointer=hparams["checkpointer"], ) - # We dynamicaly add the tokenizer to our brain class. + # We dynamically add the tokenizer to our brain class. # NB: This tokenizer corresponds to the one used for the LM!! asr_brain.tokenizer = hparams["tokenizer"] train_dataloader_opts = hparams["train_dataloader_opts"] @@ -412,11 +378,18 @@ def text_pipeline(wrd): valid_loader_kwargs=valid_dataloader_opts, ) + import os + # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + for k in test_datasets.keys(): # keys are test_clean, test_other etc - asr_brain.hparams.wer_file = os.path.join( - hparams["output_folder"], "wer_{}.txt".format(k) + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" ) asr_brain.evaluate( - test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"] + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="WER", ) diff --git a/recipes/LibriSpeech/ASR/transducer/README.md b/recipes/LibriSpeech/ASR/transducer/README.md index 0fd5617b21..f39b623101 100644 --- a/recipes/LibriSpeech/ASR/transducer/README.md +++ b/recipes/LibriSpeech/ASR/transducer/README.md @@ -5,25 +5,76 @@ You can download LibriSpeech at http://www.openslr.org/12 # Extra-Dependencies This recipe supports two implementations of the transducer loss, see `use_torchaudio` arg in the yaml file: -1. Transducer loss from torchaudio (this requires torchaudio version >= 0.10.0) (Default). +1. Transducer loss from torchaudio (this requires torchaudio version >= 0.10.0). 2. Speechbrain implementation using Numba. To use it, please set `use_torchaudio=False` in the yaml file. This version is implemented within SpeechBrain and allows you to directly access the python code of the transducer loss (and directly modify it if needed). +The Numba implementation is currently enabled by default as the `use_torchaudio` option is incompatible with `bfloat16` training. + Note: Before running this recipe, make sure numba is installed. Otherwise, run: ``` pip install numba ``` # How to run it -python train.py train/train.yaml +```shell +python train.py hparams/conformer_transducer.yaml +``` + +## Precision Notes +If your GPU effectively supports fp16 (half-precision) computations, it is recommended to execute the training script with the `--precision=fp16` (or `--precision=bf16`) option. +Enabling half precision can significantly reduce the peak VRAM requirements. For example, in the case of the Conformer Transducer recipe trained with Librispeech, the peak VRAM decreases from 39GB to 12GB when using fp16. +According to our tests, the performance is not affected. + +# Librispeech Results + +Dev. clean is evaluated with Greedy Decoding while the test sets are using Greedy Decoding OR a RNNLM + Beam Search. +Evaluation is performed in fp32. However, we found that during inference, fp16 or bf16 autocast has very little incidence on the WER. + +| Release | Hyperparams file | Train precision | Dev-clean Greedy | Test-clean Greedy | Test-other Greedy | Test-clean BS+RNNLM | Test-other BS+RNNLM | Model link | GPUs | +|:-------------:|:---------------------------:|:-:| :------:| :-----------:| :------------------:| :------------------:| :------------------:| :--------:| :-----------:| +| 2023-12-12 | conformer_transducer.yaml `streaming: True` | bf16 | 2.56% | 2.72% | 6.47% | \* | \* | [DropBox](https://www.dropbox.com/scl/fo/kl1eikmoauygwqcx8ok4r/AMkreKLzHtxPtqnoXzUerko?rlkey=juk374k210b76lbnblh7or95d&st=1ugwe9e3&dl=0) | [4x A100SXM4 40GB](https://docs.alliancecan.ca/wiki/Narval/en) | + +\*: not evaluated due to performance issues, see [issue #2301](https://github.com/speechbrain/speechbrain/issues/2301) + +## Streaming model -# Librispeech 100H Results +### WER vs chunk size & left context -| Release | hyperparams file | Val. CER | Val. WER | Test WER (test clean) | Model link | GPUs | -|:-------------:|:---------------------------:| ------:| :-----------:| :------------------:| --------:| :-----------:| -| 2020-10-22 | train.yaml | 5.2 | GS: 11.45 | BS (beam=4): 11.03 | Not Available | 1xRTX-8000 48GB | +The following matrix presents the Word Error Rate (WER%) achieved on LibriSpeech +`test-clean` with various chunk sizes (in ms) and left context sizes (in # of +chunks). -The output folder with the checkpoints and training logs is available [here](https://drive.google.com/drive/folders/17kEW0crU3tyP-8-u5TeoFom4ton_B-j2?usp=sharing). +The relative difference is not trivial to interpret, because we are not testing +against a continuous stream of speech, but rather against utterances of various +lengths. This tends to bias results in favor of larger chunk sizes. +The chunk size might not accurately represent expected latency due to slight +padding differences in streaming contexts. + +The left chunk size is not representative of the receptive field of the model. +Because the model caches the streaming context at different layers, the model +may end up forming indirect dependencies to audio many seconds ago. + +| | full | cs=32 (1280ms) | 24 (960ms) | 16 (640ms) | 12 (480ms) | 8 (320ms) | +|:-----:|:----:|:-----:|:-----:|:-----:|:-----:|:-----:| +| full | 2.72%| - | - | - | - | - | +| lc=32 | - | 3.09% | 3.07% | 3.26% | 3.31% | 3.44% | +| 16 | - | 3.10% | 3.07% | 3.27% | 3.32% | 3.50% | +| 8 | - | 3.10% | 3.11% | 3.31% | 3.39% | 3.62% | +| 4 | - | 3.12% | 3.13% | 3.37% | 3.51% | 3.80% | +| 2 | - | 3.19% | 3.24% | 3.50% | 3.79% | 4.38% | + +### Inference + +Once your model is trained, you need a few manual steps in order to use it with the high-level streaming interfaces (`speechbrain.inference.ASR.StreamingASR`): + +1. Create a new directory where you want to store the model. +2. Copy `results/conformer_transducer//lm.ckpt` (optional; currently, for streaming rescoring LMs might be unsupported) and `tokenizer.ckpt` to that directory. +3. Copy `results/conformer_transducer//save/CKPT+????/model.ckpt` and `normalizer.ckpt` to that directory. +4. Copy your hyperparameters file to that directory. Uncomment the streaming specific keys and remove any training-specific keys. Alternatively, grab the inference hyperparameters YAML for this model from HuggingFace and adapt it to any changes you may have done. +5. You can now instantiate a `StreamingASR` with your model using `StreamingASR.from_hparams("/path/to/model/")`. + +The contents of that directory may be uploaded as a HuggingFace model, in which case the model source path can just be specified as `youruser/yourmodel`. # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -35,6 +86,15 @@ The output folder with the checkpoints and training logs is available [here](htt Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriSpeech/ASR/transducer/extra_requirements.txt b/recipes/LibriSpeech/ASR/transducer/extra_requirements.txt new file mode 100644 index 0000000000..47bf394ff1 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transducer/extra_requirements.txt @@ -0,0 +1,5 @@ +# Numba is used if use_torchaudio=False +# Numba might be faster, but it is harder to install +# You might need to install numba with conda +# You might also need to install other packages such as cudatoolkit +numba diff --git a/recipes/LibriSpeech/ASR/transducer/hparams/conformer_transducer.yaml b/recipes/LibriSpeech/ASR/transducer/hparams/conformer_transducer.yaml new file mode 100644 index 0000000000..6fe8515bf3 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transducer/hparams/conformer_transducer.yaml @@ -0,0 +1,422 @@ +# ############################################################################ +# Model: E2E ASR with transformer and transducer +# Encoder: Conformer +# Decoder: LSTM + beamsearch + RNNLM +# Tokens: BPE with unigram +# losses: Transducer + CTC (optional) + CE (optional) +# Training: Librispeech 960h +# Authors: Titouan Parcollet 2023, Abdel HEBA, Mirco Ravanelli, Sung-Lin Yeh 2020 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_transducer_large/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-crdnn-rnnlm-librispeech + +# Data files +data_folder: !PLACEHOLDER # e.g, /localscratch/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv +skip_prep: False +ckpt_interval_minutes: 5 # save checkpoint every N min + +####################### Training Parameters #################################### + +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 100 +warmup_steps: 25000 +num_workers: 4 +batch_size_valid: 4 +lr: 0.0008 +weight_decay: 0.01 +number_of_ctc_epochs: 60 +ctc_weight: 0.3 # Multitask with CTC for the encoder (0.0 = disabled) +ce_weight: 0.0 # Multitask with CE for the decoder (0.0 = disabled) +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +precision: fp16 # bf16, fp16 or fp32 + +# The batch size is used if and only if dynamic batching is set to False +# Validation and testing are done with fixed batches and not dynamic batching. +batch_size: 8 +grad_accumulation_factor: 4 +sorting: random +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +# Feature parameters +sample_rate: 16000 +n_fft: 512 +n_mels: 80 +win_length: 32 + +# Streaming & dynamic chunk training options +# At least for the current architecture on LibriSpeech, we found out that +# non-streaming accuracy is very similar between `streaming: True` and +# `streaming: False`. +streaming: True # controls all Dynamic Chunk Training & chunk size & left context mechanisms + +# Configuration for Dynamic Chunk Training. +# In this model, a chunk is roughly equivalent to 40ms of audio. +dynchunktrain_config_sampler: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler # yamllint disable-line rule:line-length + chunkwise_prob: 0.6 # Probability during a batch to limit attention and sample a random chunk size in the following range + chunk_size_min: 8 # Minimum chunk size (if in a DynChunkTrain batch) + chunk_size_max: 32 # Maximum chunk size (if in a DynChunkTrain batch) + limited_left_context_prob: 0.75 # If in a DynChunkTrain batch, the probability during a batch to restrict left context to a random number of chunks + left_context_chunks_min: 2 # Minimum left context size (in # of chunks) + left_context_chunks_max: 32 # Maximum left context size (in # of chunks) + # If you specify a valid/test config, you can optionally have evaluation be + # done with a specific DynChunkTrain configuration. + # valid_config: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig + # chunk_size: 24 + # left_context_size: 16 + # test_config: ... + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + num_workers: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +# This setup works well for 3090 24GB GPU, adapt it to your needs. +# Adjust grad_accumulation_factor depending on the DDP node count (here 3) +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_len: 150 +max_batch_len_val: 50 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 + +dynamic_batch_sampler: + max_batch_len: !ref + max_batch_len_val: !ref + num_buckets: !ref + shuffle_ex: True # if true re-creates batches at each epoch shuffling examples. + batch_ordering: random + max_batch_ex: 256 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 512 +joint_dim: 640 +nhead: 8 +num_encoder_layers: 12 +num_decoder_layers: 0 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 1000 +dec_dim: 512 +dec_emb_dropout: 0.2 +dec_dropout: 0.1 +attention_type: RoPEMHA + +# Decoding parameters +blank_index: 0 +bos_index: 0 +eos_index: 0 +pad_index: 0 +beam_size: 10 +nbest: 1 +# by default {state,expand}_beam = 2.3 as mention in paper +# https://arxiv.org/abs/1904.02619 +state_beam: 2.3 +expand_beam: 2.3 +lm_weight: 0.50 + +# If True uses torchaudio loss. Otherwise, the numba one +use_torchaudio: False + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + win_length: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 1 + max_augmentations: 1 + augment_prob: 1.0 + augmentations: [!ref ] + + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 12 + drop_length_high: 20 + drop_count_low: 5 + drop_count_high: 5 + replace: "zeros" + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 20 + drop_length_high: 25 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: conformer + attention_type: !ref + normalize_before: True + causal: False + +# We must call an encoder wrapper so the decoder isn't run (we don't have any) +enc: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper + transformer: !ref + +# For MTL CTC over the encoder +proj_ctc: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +# Define some projection layers to make sure that enc and dec +# output dim are the same before joining +proj_enc: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +proj_dec: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +# Uncomment for MTL with CTC +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +emb: !new:speechbrain.nnet.embedding.Embedding + num_embeddings: !ref + consider_as_one_hot: True + blank_id: !ref + +dec: !new:speechbrain.nnet.RNN.LSTM + input_shape: [null, null, !ref - 1] + hidden_size: !ref + num_layers: 1 + re_init: True + +# For MTL with LM over the decoder (need to uncomment to activate) +# dec_lin: !new:speechbrain.nnet.linear.Linear +# input_size: !ref +# n_neurons: !ref +# bias: False + +# For MTL +ce_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: 0.1 + +Tjoint: !new:speechbrain.nnet.transducer.transducer_joint.Transducer_joint + joint: sum # joint [sum | concat] + nonlinearity: !ref + +transducer_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +transducer_cost: !name:speechbrain.nnet.losses.transducer_loss + blank_index: !ref + use_torchaudio: !ref + +# This is the RNNLM that is used according to the Huggingface repository +# NB: It has to match the pre-trained RNNLM!! +lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM + output_neurons: !ref + embedding_dim: 128 + activation: !name:torch.nn.LeakyReLU + dropout: 0.0 + rnn_layers: 2 + rnn_neurons: 2048 + dnn_blocks: 1 + dnn_neurons: 512 + return_hidden: True # For inference + +# for MTL +# update model if any HEAD module is added +modules: + CNN: !ref + enc: !ref + emb: !ref + dec: !ref + Tjoint: !ref + transducer_lin: !ref + normalize: !ref + lm_model: !ref + proj_ctc: !ref + proj_dec: !ref + proj_enc: !ref +# dec_lin: !ref + +# for MTL +# update model if any HEAD module is added +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref , !ref , !ref , !ref , !ref ] + +############################## Decoding & optimiser ############################ + +# Tokenizer initialization +tokenizer: !new:sentencepiece.SentencePieceProcessor + +Greedysearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher + decode_network_lst: [!ref , !ref , !ref ] + tjoint: !ref + classifier_network: [!ref ] + blank_id: !ref + beam_size: 1 + nbest: 1 + +Beamsearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher + decode_network_lst: [!ref , !ref , !ref ] + tjoint: !ref + classifier_network: [!ref ] + blank_id: !ref + beam_size: !ref + nbest: !ref + lm_module: !ref + lm_weight: !ref + state_beam: !ref + expand_beam: !ref + +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 1.e-8 + weight_decay: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: !ref + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + scheduler: !ref + normalizer: !ref + counter: !ref + +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt + + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +# for the inference hparams, you will need to include and uncomment something like this: + +# make_tokenizer_streaming_context: !name:speechbrain.tokenizers.SentencePiece.SentencePieceDecoderStreamingContext +# tokenizer_decode_streaming: !name:speechbrain.tokenizers.SentencePiece.spm_decode_preserve_leading_space + +# make_decoder_streaming_context: !name:speechbrain.decoders.transducer.TransducerGreedySearcherStreamingContext # default constructor +# decoding_function: !name:speechbrain.decoders.transducer.TransducerBeamSearcher.transducer_greedy_decode_streaming +# - !ref # self + +# fea_streaming_extractor: !new:speechbrain.lobes.features.StreamingFeatureWrapper +# module: !new:speechbrain.nnet.containers.LengthsCapableSequential +# - !ref +# - !ref +# - !ref +# # don't consider normalization as part of the input filter chain. +# # normalization will operate at chunk level, which mismatches training +# # somewhat, but does not appear to result in noticeable degradation. +# properties: !apply:speechbrain.utils.filter_analysis.stack_filter_properties +# - [!ref , !ref ] diff --git a/recipes/LibriSpeech/ASR/transducer/hparams/train.yaml b/recipes/LibriSpeech/ASR/transducer/hparams/train.yaml deleted file mode 100644 index 986089c770..0000000000 --- a/recipes/LibriSpeech/ASR/transducer/hparams/train.yaml +++ /dev/null @@ -1,281 +0,0 @@ -# ############################################################################ -# Model: E2E ASR with attention-based ASR -# Encoder: CRDNN model -# Decoder: GRU + beamsearch + RNNLM -# Tokens: BPE with unigram -# losses: Transducer -# Training: Librispeech 100h -# Authors: Abdel HEBA, Mirco Ravanelli, Sung-Lin Yeh 2020 -# ############################################################################ - -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] -output_folder: !ref results/CRDNN_BPE_RNNT_LM_100H/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Language model (LM) pretraining -# NB: To avoid mismatch, the speech recognizer must be trained with the same -# tokenizer used for LM training. Here, we download everything from the -# speechbrain HuggingFace repository. However, a local path pointing to a -# directory containing the lm.ckpt and tokenizer.ckpt may also be specified -# instead. E.g if you want to use your own LM / tokenizer. -pretrained_lm_tokenizer_path: speechbrain/asr-crdnn-rnnlm-librispeech - -# Data files -data_folder: !PLACEHOLDER # e.g, /localscratch/LibriSpeech -# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES -# then data_folder_rirs should be /localscratch/xxx_corpus -# otherwise the dataset will automatically be downloaded -data_folder_rirs: !ref -#train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] -train_splits: ["train-clean-100"] -dev_splits: ["dev-clean"] -test_splits: ["test-clean", "test-other"] -train_csv: !ref /train.csv -valid_csv: !ref /dev-clean.csv -test_csv: - - !ref /test-clean.csv - - !ref /test-other.csv -skip_prep: False -ckpt_interval_minutes: 15 # save checkpoint every N min - -# Training parameters -number_of_epochs: 30 -batch_size: 8 -batch_size_valid: 4 -lr: 1.0 -sorting: ascending -# MTL for encoder with CTC (uncomment enc_lin layer) -#number_of_ctc_epochs: 2 -#ctc_weight: 0.33 -# MTL for decoder with CE (uncomment dec_lin layer) -#number_of_ce_epochs: 2 -#ce_weight: 0.33 - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -opt_class: !name:torch.optim.Adadelta - lr: !ref - rho: 0.95 - eps: 1.e-8 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.15 -cnn_blocks: 2 -cnn_channels: (128, 256) -inter_layer_pooling_size: (2, 2) -cnn_kernelsize: (3, 3) -time_pooling_size: 4 -rnn_class: !name:speechbrain.nnet.RNN.LSTM -rnn_layers: 4 -rnn_neurons: 1024 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 1024 -dec_neurons: 1024 -output_neurons: 1000 # index(blank/eos/bos) = 0 -joint_dim: 1024 -blank_index: 0 - -# Decoding parameters -beam_size: 4 -nbest: 1 -# by default {state,expand}_beam = 2.3 as mention in paper -# https://arxiv.org/abs/1904.02619 -state_beam: 2.3 -expand_beam: 2.3 -lm_weight: 0.50 - - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - inter_layer_pooling_size: !ref - time_pooling: True - using_2d_pooling: False - time_pooling_size: !ref - rnn_class: !ref - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - rnn_re_init: True - dnn_blocks: !ref - dnn_neurons: !ref - -# For MTL CTC over the encoder -enc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - -# Uncomment for MTL with CTC -#ctc_cost: !name:speechbrain.nnet.losses.ctc_loss -# blank_index: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - consider_as_one_hot: True - blank_id: !ref - -dec: !new:speechbrain.nnet.RNN.GRU - input_shape: [null, null, !ref - 1] - hidden_size: !ref - num_layers: 1 - re_init: True - -# For MTL with LM over the decoder -dec_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - bias: False - -# For MTL -#ce_cost: !name:speechbrain.nnet.losses.nll_loss -# label_smoothing: 0.1 - -Tjoint: !new:speechbrain.nnet.transducer.transducer_joint.Transducer_joint - joint: sum # joint [sum | concat] - nonlinearity: !ref - -transducer_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref - bias: False - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -transducer_cost: !name:speechbrain.nnet.losses.transducer_loss - blank_index: !ref - use_torchaudio: True - -# This is the RNNLM that is used according to the Huggingface repository -# NB: It has to match the pre-trained RNNLM!! -lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM - output_neurons: !ref - embedding_dim: 128 - activation: !name:torch.nn.LeakyReLU - dropout: 0.0 - rnn_layers: 2 - rnn_neurons: 2048 - dnn_blocks: 1 - dnn_neurons: 512 - return_hidden: True # For inference - -# for MTL -# update model if any HEAD module is added -modules: - enc: !ref - emb: !ref - dec: !ref - Tjoint: !ref - transducer_lin: !ref - normalize: !ref - env_corrupt: !ref - augmentation: !ref - lm_model: !ref - enc_lin: !ref - dec_lin: !ref - -# for MTL -# update model if any HEAD module is added -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref ] - -# Tokenizer initialization -tokenizer: !new:sentencepiece.SentencePieceProcessor - -Greedysearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher - decode_network_lst: [!ref , !ref ] - tjoint: !ref - classifier_network: [!ref ] - blank_id: !ref - beam_size: 1 - nbest: 1 - -Beamsearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher - decode_network_lst: [!ref , !ref ] - tjoint: !ref - classifier_network: [!ref ] - blank_id: !ref - beam_size: !ref - nbest: !ref - lm_module: !ref - lm_weight: !ref - state_beam: !ref - expand_beam: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - scheduler: !ref - normalizer: !ref - counter: !ref - -pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer - collect_in: !ref - loadables: - lm: !ref - tokenizer: !ref - paths: - lm: !ref /lm.ckpt - tokenizer: !ref /tokenizer.ckpt - - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats - -cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats - split_tokens: True diff --git a/recipes/LibriSpeech/ASR/transducer/train.py b/recipes/LibriSpeech/ASR/transducer/train.py index 10a328ffd7..bde7f637ea 100644 --- a/recipes/LibriSpeech/ASR/transducer/train.py +++ b/recipes/LibriSpeech/ASR/transducer/train.py @@ -5,10 +5,10 @@ language model. To run this recipe, do the following: -> python train.py hparams/train.yaml +> python train.py hparams/conformer_transducer.yaml -With the default hyperparameters, the system employs a CRDNN encoder. -The decoder is based on a standard GRU. Beamsearch coupled with a RNN +With the default hyperparameters, the system employs a conformer encoder. +The decoder is based on a standard LSTM. Beamsearch coupled with a RNN language model is used on the top of decoder probabilities. The neural network is trained on both CTC and negative-log likelihood @@ -24,6 +24,8 @@ Authors + * Sylvain de Langen 2024 + * Titouan Parcollet 2024 * Abdel Heba 2020 * Mirco Ravanelli 2020 * Ju-Chieh Chou 2020 @@ -32,14 +34,16 @@ import os import sys +from pathlib import Path + import torch -import logging -import speechbrain as sb -from speechbrain.utils.distributed import run_on_main from hyperpyyaml import load_hyperpyyaml -from pathlib import Path -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -50,31 +54,58 @@ def compute_forward(self, batch, stage): batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_with_bos, token_with_bos_lens = batch.tokens_bos - # wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) - # Add augmentation if specified + # Add waveform augmentation if specified. if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - batch.sig = wavs, wav_lens - tokens_with_bos = torch.cat( - [tokens_with_bos, tokens_with_bos], dim=0 - ) - token_with_bos_lens = torch.cat( - [token_with_bos_lens, token_with_bos_lens] + if hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_with_bos = self.hparams.wav_augment.replicate_labels( + tokens_with_bos ) - batch.tokens_bos = tokens_with_bos, token_with_bos_lens - if hasattr(self.modules, "augmentation"): - wavs = self.modules.augmentation(wavs, wav_lens) - # Forward pass feats = self.hparams.compute_features(wavs) - feats = self.modules.normalize(feats, wav_lens) - x = self.modules.enc(feats.detach()) + + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_with_bos = self.hparams.fea_augment.replicate_labels( + tokens_with_bos + ) + + current_epoch = self.hparams.epoch_counter.current + + # Old models may not have the streaming hparam, we don't break them in + # any other way so just check for its presence + if hasattr(self.hparams, "streaming") and self.hparams.streaming: + dynchunktrain_config = self.hparams.dynchunktrain_config_sampler( + stage + ) + else: + dynchunktrain_config = None + + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + src = self.modules.CNN(feats) + x = self.modules.enc( + src, + wav_lens, + pad_idx=self.hparams.pad_index, + dynchunktrain_config=dynchunktrain_config, + ) + x = self.modules.proj_enc(x) + e_in = self.modules.emb(tokens_with_bos) + e_in = torch.nn.functional.dropout( + e_in, + self.hparams.dec_emb_dropout, + training=(stage == sb.Stage.TRAIN), + ) h, _ = self.modules.dec(e_in) + h = torch.nn.functional.dropout( + h, self.hparams.dec_dropout, training=(stage == sb.Stage.TRAIN) + ) + h = self.modules.proj_dec(h) + # Joint network # add labelseq_dim to the encoder tensor: [B,T,H_enc] => [B,T,1,H_enc] # add timeseq_dim to the decoder tensor: [B,U,H_dec] => [B,1,U,H_dec] @@ -85,33 +116,23 @@ def compute_forward(self, batch, stage): # Compute outputs if stage == sb.Stage.TRAIN: - return_CTC = False - return_CE = False - current_epoch = self.hparams.epoch_counter.current + p_ctc = None + p_ce = None + if ( - hasattr(self.hparams, "ctc_cost") + self.hparams.ctc_weight > 0.0 and current_epoch <= self.hparams.number_of_ctc_epochs ): - return_CTC = True # Output layer for ctc log-probabilities - out_ctc = self.modules.enc_lin(x) + out_ctc = self.modules.proj_ctc(x) p_ctc = self.hparams.log_softmax(out_ctc) - if ( - hasattr(self.hparams, "ce_cost") - and current_epoch <= self.hparams.number_of_ce_epochs - ): - return_CE = True + + if self.hparams.ce_weight > 0.0: # Output layer for ctc log-probabilities p_ce = self.modules.dec_lin(h) p_ce = self.hparams.log_softmax(p_ce) - if return_CE and return_CTC: - return p_ctc, p_ce, logits_transducer, wav_lens - elif return_CTC: - return p_ctc, logits_transducer, wav_lens - elif return_CE: - return p_ce, logits_transducer, wav_lens - else: - return logits_transducer, wav_lens + + return p_ctc, p_ce, logits_transducer, wav_lens elif stage == sb.Stage.VALID: best_hyps, scores, _, _ = self.hparams.Greedysearcher(x) @@ -129,68 +150,49 @@ def compute_objectives(self, predictions, batch, stage): """Computes the loss (Transducer+(CTC+NLL)) given predictions and targets.""" ids = batch.id - current_epoch = self.hparams.epoch_counter.current tokens, token_lens = batch.tokens tokens_eos, token_eos_lens = batch.tokens_eos - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - token_eos_lens = torch.cat([token_eos_lens, token_eos_lens], dim=0) - tokens = torch.cat([tokens, tokens], dim=0) - token_lens = torch.cat([token_lens, token_lens], dim=0) + + # Train returns 4 elements vs 3 for val and test + if len(predictions) == 4: + p_ctc, p_ce, logits_transducer, wav_lens = predictions + else: + logits_transducer, wav_lens, predicted_tokens = predictions + + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if hasattr(self.hparams, "fea_augment"): + ( + tokens, + token_lens, + tokens_eos, + token_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, token_lens, tokens_eos, token_eos_lens + ) if stage == sb.Stage.TRAIN: - if len(predictions) == 4: - p_ctc, p_ce, logits_transducer, wav_lens = predictions + CTC_loss = 0.0 + CE_loss = 0.0 + if p_ctc is not None: CTC_loss = self.hparams.ctc_cost( p_ctc, tokens, wav_lens, token_lens ) + if p_ce is not None: CE_loss = self.hparams.ce_cost( p_ce, tokens_eos, length=token_eos_lens ) - loss_transducer = self.hparams.transducer_cost( - logits_transducer, tokens, wav_lens, token_lens - ) - loss = ( - self.hparams.ctc_weight * CTC_loss - + self.hparams.ce_weight * CE_loss - + (1 - (self.hparams.ctc_weight + self.hparams.ce_weight)) - * loss_transducer - ) - elif len(predictions) == 3: - # one of the 2 heads (CTC or CE) is still computed - # CTC alive - if current_epoch <= self.hparams.number_of_ctc_epochs: - p_ctc, logits_transducer, wav_lens = predictions - CTC_loss = self.hparams.ctc_cost( - p_ctc, tokens, wav_lens, token_lens - ) - loss_transducer = self.hparams.transducer_cost( - logits_transducer, tokens, wav_lens, token_lens - ) - loss = ( - self.hparams.ctc_weight * CTC_loss - + (1 - self.hparams.ctc_weight) * loss_transducer - ) - # CE for decoder alive - else: - p_ce, logits_transducer, wav_lens = predictions - CE_loss = self.hparams.ce_cost( - p_ce, tokens_eos, length=token_eos_lens - ) - loss_transducer = self.hparams.transducer_cost( - logits_transducer, tokens, wav_lens, token_lens - ) - loss = ( - self.hparams.ce_weight * CE_loss - + (1 - self.hparams.ctc_weight) * loss_transducer - ) - else: - logits_transducer, wav_lens = predictions - loss = self.hparams.transducer_cost( - logits_transducer, tokens, wav_lens, token_lens - ) + loss_transducer = self.hparams.transducer_cost( + logits_transducer, tokens, wav_lens, token_lens + ) + loss = ( + self.hparams.ctc_weight * CTC_loss + + self.hparams.ce_weight * CE_loss + + (1 - (self.hparams.ctc_weight + self.hparams.ce_weight)) + * loss_transducer + ) else: - logits_transducer, wav_lens, predicted_tokens = predictions loss = self.hparams.transducer_cost( logits_transducer, tokens, wav_lens, token_lens ) @@ -207,22 +209,10 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - with torch.no_grad(): - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" @@ -242,33 +232,73 @@ def on_stage_end(self, stage, stage_loss, epoch): # Perform end-of-iteration things, like annealing, logging, etc. if stage == sb.Stage.VALID: - old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"]) - sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } self.hparams.train_logger.log_stats( - stats_meta={"epoch": epoch, "lr": old_lr}, + stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"], "epoch": epoch}, + min_keys=["WER"], + num_to_keep=self.hparams.avg_checkpoints, ) + elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + # save the averaged checkpoint at the end of the evaluation stage + # delete the rest of the intermediate checkpoints + # WER is set to -0.1 so checkpointer only keeps the averaged checkpoint + self.checkpointer.save_and_keep_only( + meta={"WER": -0.1, "epoch": epoch}, + min_keys=["WER"], + num_to_keep=1, + ) + + def on_evaluate_start(self, max_key=None, min_key=None): + """perform checkpoint average if needed""" + super().on_evaluate_start() + + ckpts = self.checkpointer.find_checkpoints( + max_key=max_key, + min_key=min_key, + ) + ckpt = sb.utils.checkpoints.average_checkpoints( + ckpts, recoverable_name="model" + ) + + self.hparams.model.load_state_dict(ckpt, strict=True) + self.hparams.model.eval() def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -293,7 +323,8 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -332,9 +363,9 @@ def text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list - tokens_bos = torch.LongTensor([hparams["blank_index"]] + (tokens_list)) + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) yield tokens_bos - tokens_eos = torch.LongTensor(tokens_list + [hparams["blank_index"]]) + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens @@ -343,21 +374,62 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], ) - return train_data, valid_data, test_datasets + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams = hparams["dynamic_batch_sampler"] + num_buckets = dynamic_hparams["num_buckets"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + dynamic_hparams["max_batch_len"], + num_buckets=num_buckets, + length_func=lambda x: x["duration"], + shuffle=dynamic_hparams["shuffle_ex"], + batch_ordering=dynamic_hparams["batch_ordering"], + ) + + valid_batch_sampler = DynamicBatchSampler( + valid_data, + dynamic_hparams["max_batch_len_val"], + num_buckets=num_buckets, + length_func=lambda x: x["duration"], + shuffle=dynamic_hparams["shuffle_ex"], + batch_ordering=dynamic_hparams["batch_ordering"], + ) + + return ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_batch_sampler, + valid_batch_sampler, + ) -if __name__ == "__main__": +if __name__ == "__main__": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - # If --distributed_launch then + # Use torchaudio if the device is CPU + if run_opts.get("device") == "cpu": + if "use_torchaudio: True" in overrides: + overrides.replace("use_torchaudio: True", "use_torchaudio: False") + else: + overrides += "\nuse_torchaudio: True" + # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -378,21 +450,28 @@ def text_pipeline(wrd): "tr_splits": hparams["train_splits"], "dev_splits": hparams["dev_splits"], "te_splits": hparams["test_splits"], - "save_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], "merge_lst": hparams["train_splits"], - "merge_name": hparams["train_csv"], + "merge_name": "train.csv", "skip_prep": hparams["skip_prep"], }, ) # here we create the datasets objects as well as tokenization and encoding - train_data, valid_data, test_datasets = dataio_prepare(hparams) + ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams) # We download the pretrained LM and the tokenizer from HuggingFace (or elsewhere # depending on the path given in the YAML file). The tokenizer is loaded at # the same time. - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Trainer initialization asr_brain = ASR( @@ -403,24 +482,39 @@ def text_pipeline(wrd): checkpointer=hparams["checkpointer"], ) - # We dynamicaly add the tokenizer to our brain class. + # We dynamically add the tokenizer to our brain class. # NB: This tokenizer corresponds to the one used for the LM!! asr_brain.tokenizer = hparams["tokenizer"] + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if valid_bsampler is not None: + valid_dataloader_opts = {"batch_sampler": valid_bsampler} # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, - train_loader_kwargs=hparams["train_dataloader_opts"], - valid_loader_kwargs=hparams["valid_dataloader_opts"], + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, ) # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + for k in test_datasets.keys(): # keys are test_clean, test_other etc - asr_brain.hparams.wer_file = os.path.join( - hparams["output_folder"], "wer_{}.txt".format(k) + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" ) asr_brain.evaluate( - test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"] + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="WER", ) diff --git a/recipes/LibriSpeech/ASR/transformer/README.md b/recipes/LibriSpeech/ASR/transformer/README.md index ebc45aefdb..32b07253d6 100644 --- a/recipes/LibriSpeech/ASR/transformer/README.md +++ b/recipes/LibriSpeech/ASR/transformer/README.md @@ -1,31 +1,67 @@ -# LibriSpeech ASR with Transformers. -This folder contains the scripts to train a Transformer-based speech recognizer -using LibriSpeech. +# LibriSpeech ASR with Transformers or Whisper models. +This folder contains the scripts to train a Transformer-based speech recognizer or the scripts to fine-tune the Whisper encoder-decoder model. You can download LibriSpeech at http://www.openslr.org/12 - # How to run -python train.py train/train.yaml +```shell +python train_with_whisper.py hparams/train_hf_whisper.yaml +python train.py hparams/transformer.yaml +``` + +# How to run on test sets only +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: + +```shell +python train_with_whisper.py hparams/train_hf_whisper.yaml --test_only +python train.py hparams/transformer.yaml --test_only +``` + +**If using a HuggingFace pre-trained model, please make sure you have "transformers" +installed in your environment (see extra-requirements.txt)** # Results -| Release | hyperparams file | Test Clean WER | HuggingFace link | Model link | GPUs | -|:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :--------:| -| 24-03-22 | transformer.yaml | 2.26 | [HuggingFace](https://huggingface.co/speechbrain/asr-transformer-transformerlm-librispeech) | [GoogleDrive](https://drive.google.com/drive/folders/1sM3_PksmGQZMxXPibp7W7mQfPXFdHqc5?usp=sharing) | 1xA100 40GB | +## SpeechLLM with SSL features + +Two SpeechLLM modes are supported: +- SpeechLLM with SSL features +- SpeechLLM with E2E features -# Training Time -It takes about 45 minutes for each epoch on 1 NVDIA A100 (40GB). +In the first mode, the speech features are extracted from the audio waveforms using a pre-trained SSL model, and then projected to the LLM embedding space using a linear layer projection, where everything is trained jointly. +In the second mode, the speech features are already being extracted offline (see: `extract_ssl_feats.py` script). The LLM is then trained on the frozen SSL representations. This mode is more efficient and faster to train, but at the cost of flexibility on the frozen SSL model. -# PreTrained Model + Easy-Inference -You can find the pre-trained model with an easy-inference function on HuggingFace: -- https://huggingface.co/speechbrain/asr-crdnn-rnnlm-librispeech -- https://huggingface.co/speechbrain/asr-crdnn-transformerlm-librispeech -- https://huggingface.co/speechbrain/asr-transformer-transformerlm-librispeech +| Release | Model | hyperparams file | Dev Clean WER | Dev Other WER | Test Clean WER | Test Other WER | HuggingFace link | Model link | GPUs | +|:-------------:|:-------------:|:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :-----:| :--------:| +| 29-01-26 | WavLM Large + LLama 3.2 1B + LoRA | speechllm_e2e.yaml | 2.79 | 5.03 | 2.72 | 5.34 | [HuggingFace](https://huggingface.co/speechbrain/asr-wavlm-large-llama3.2-1b-lora-librispeech) | - | 1xA100 80GB | + +## Whisper Finetuning Result: + +Following table contains whisper-finetuning results for 1 epoch using Whisper model, freezing encoder and finetuning decoder. +| Release | Model | commit hash | hyperparams file | LM | Dev Clean WER | Test Clean WER | Test Other WER | HuggingFace link | Model link | GPUs | +| ------------- |:-------------:| -----:|-----:|:---------------------------:| -----:| -----:| -----:| :-----------: |:-----------:| :-----------:| +| 2024-03-28 | large-v3 | [e4e2e13](https://github.com/speechbrain/speechbrain/pull/2450/commits/e4e2e135e9edafc6a26fc9aa4df9a94eaf86de41) | train_hf_whisper.yaml | No | 2.00% | 1.96% | 4.30% | Not Avail. | [DropBox](https://www.dropbox.com/scl/fo/d3gmgf6q79byuhzozdwz8/AGFQwMWJ5hqB466GXTnL72M?rlkey=gmi157oa36vvo9c9o1z4oys0e&dl=0) | 2xV100S 32GB | +| 2024-03-28 | medium.en | [e4e2e13](https://github.com/speechbrain/speechbrain/pull/2450/commits/e4e2e135e9edafc6a26fc9aa4df9a94eaf86de41) | train_hf_whisper.yaml | No | 2.35% | 2.40% | 5.59% | Not Avail. | [DropBox](https://www.dropbox.com/scl/fo/a233v5q1gjpy4nyfh2gq0/ALCbTe3UwAjfia7XI2GLx7A?rlkey=lnoxdpiyxm6lg461ptbdrifcj&dl=0160) | 2xV100S 32GB | +| 2024-07-20 | small.en | [9864011](https://github.com/speechbrain/speechbrain/pull/2563/commits/98640110123afe8b9d10c5cac14818ed7654477a) | train_whisper_lora.yaml | No | 2.81% | 2.90% | 6.57% | Not Avail. | [DropBox]() | 1x1080Ti 12GB | + + +## Transformers + +| Release | hyperparams file | Dev Clean WER (No LM, small beam) | Test Clean WER (Transformer LM) | Test Other WER (Transformer LM) | HuggingFace link | Model link | GPUs | +|:-------------:|:-------------:|:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :--------:| +| 30-09-24 | conformer_large.yaml (new RoPE version) |1.85 with LM | 1.96 | 4.50 | Not Avail. | Not Avail. | 4xA40 46GB | +| 23-05-23 | branchformer_large.yaml | 2.72 (1.9 with LM) | 2.04 | 4.13 | Not Avail. | [DropBox](https://www.dropbox.com/scl/fo/qhtds5rrdvhhhjywa7ovw/AMiIL5YvQENw5JKVpzXlP5o?rlkey=hz8vlpy3qf9kcyfx0cox089e6&st=ufckv6tb&dl=0) | 4xA100 80GB | +| 10-02-25 | conformer_large.yaml | 1.85 with LM | 1.97 | 4.50 | N/A | N/A | 4xA100 80GB | +| 23-05-23 | conformer_large.yaml | 2.62 (1.9 with LM) | 2.01 | 4.52 | [HuggingFace](https://huggingface.co/speechbrain/asr-conformer-transformerlm-librispeech) | [DropBox](https://www.dropbox.com/scl/fo/9we244tgdf47ay20hrdoz/AKnoqQ13nLwSv1ITeJEQ3wY?rlkey=05o5jiszr8rhj6dlprw87t2x4&st=u2odesyk&dl=0) | 4xA100 80GB | +| 24-03-22 | transformer.yaml | 3.32 | 2.27 | 5.53 | [HuggingFace](https://huggingface.co/speechbrain/asr-transformer-transformerlm-librispeech) | [DropBox](https://www.dropbox.com/sh/653kq8h2k87md4p/AAByAaAryXtQKpRzYtzV9ih5a?dl=0) | 4xV100 32GB | +| 24-03-22 | conformer_small.yaml | 4.05 | 2.49 | 6.1 (**only 13.3M parameters**) | [HuggingFace](https://huggingface.co/speechbrain/asr-conformersmall-transformerlm-librispeech) | [DropBox](https://www.dropbox.com/sh/s0x6ni124858b8i/AAALaCH6sGTMRUVTjh8Tm8Jwa?dl=0) | 1xV100 32GB | +| 27-03-23 | hyperconformer_8M.yaml | 4.69 | 2.55 | 6.61 (**only 7.9M parameters**) | Not Avail. | [DropBox](https://www.dropbox.com/sh/8jc96avmivr8fke/AABrFEhtWy_3-Q7BHhkh0enwa?dl=0) | 1xP40 24GB +| 27-03-23 | hyperconformer_22M.yaml | 3.19 | 2.23 | 5.54 (**only 21.7M parameters**) | Not Avail. | [DropBox](https://www.dropbox.com/sh/30xsmqj13jexzoh/AACvZNtX1Fsr0Wa1Z3C9rHLXa?dl=0) | 1xP40 24GB +| 03-09-23 | hyperbranchformer_13M.yaml | NA | 2.54 | 6.58 | Not Avail. | Not Avail. | 1xP40 24GB +| 03-09-23 | hyperbranchformer_25M.yaml | NA | 2.36 | 5.89 | Not Avail. | Not Avail. | 1xP40 24GB +| 05-01-24 | bayesspeech.yaml | 4.28 | 2.84 | 6.27 | Not Avail. | [DropBox](https://www.dropbox.com/scl/fo/cdken4jqfj96ev1v84jxm/h?rlkey=25eu1ytgm5ac51zqj8p65zwxd&dl=0) | 1xV100 32GB | -You can find the full experiment folder (i.e., checkpoints, logs, etc) here: -https://drive.google.com/drive/folders/15uUZ21HYnw4KyOPW3tx8bLrS9RoBZfS7?usp=sharing # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -37,6 +73,15 @@ https://drive.google.com/drive/folders/15uUZ21HYnw4KyOPW3tx8bLrS9RoBZfS7?usp=sha Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriSpeech/ASR/transformer/extra_requirements.txt b/recipes/LibriSpeech/ASR/transformer/extra_requirements.txt new file mode 100644 index 0000000000..cd14b4fe53 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/extra_requirements.txt @@ -0,0 +1 @@ +bayestorch>=0.0.3 # For Bayes ASR recipe diff --git a/recipes/LibriSpeech/ASR/transformer/extract_ssl_feats.py b/recipes/LibriSpeech/ASR/transformer/extract_ssl_feats.py new file mode 100644 index 0000000000..651b41a2c9 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/extract_ssl_feats.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python3 +"""Script to extract SSL features from the audio waveforms. + +The script uses the `speechbrain.integrations.hdf5.cached_item` module to cache the features. +The cached features are used in the `train_speechllm.py` script to train the SpeechLLM ASR system. + +Since we do the extractions within the pipeline in the dataloader, we must place +our hparams elements directly on device, and use a default bsize of 1. + +Example +------- +python extract_ssl_feats.py hparams/extract_ssl_feats.yaml + --data_folder path/to/LibriSpeech \ + --output_folder path/to/feats_cache \ + --ssl_hub path/to/wavlm-large \ + --feats_cache_dir path/to/feats_cache + ...other_hparams... + +Authors +------- + * Adel Moumen, 2025 +""" + +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.integrations.hdf5.cached_item import CachedHDF5DynamicItem +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + normalizer = hparams["normalize"].to(hparams["device"]).eval() + ssl_encoder = hparams["ssl"].to(hparams["device"]).eval() + + # Base compute function used by all cached wrappers (no file bound yet) + @CachedHDF5DynamicItem.cache(hparams["feats_cache_dir"], compression="gzip") + @sb.utils.data_pipeline.takes("id", "sig") + @sb.utils.data_pipeline.provides("feats") + def compute_feats(uid, sig): + sig = sig.to(hparams["device"]).unsqueeze(0) + length = torch.ones(1, device=hparams["device"]) + with torch.no_grad(), torch.amp.autocast( + hparams["device"].type, dtype=hparams["dtype"] + ): + feats = normalizer(sig, length) + feats = ssl_encoder(feats, length) + return feats.squeeze(0).cpu() + + dynamic_items = [audio_pipeline, compute_feats] + output_keys = ["id", "sig", "feats"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + dynamic_items=dynamic_items, + output_keys=output_keys, + ) + + # Build valid dataset with its own cached wrapper + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + dynamic_items=dynamic_items, + output_keys=output_keys, + ) + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, + replacements={"data_root": data_folder}, + dynamic_items=dynamic_items, + output_keys=output_keys, + ) + + datasets = {"train": train_data, "valid": valid_data} | { + k: v for k, v in test_datasets.items() + } + + for stage, dataset in datasets.items(): + logger.info(f"Iterating {stage} dataset to warm the cache.") + dataset.iterate_once(output_keys=["feats"]) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # 1. # Dataset prep (parsing Librispeech) + from librispeech_prepare import prepare_librispeech # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + logger.info("Preparing data...") + dataio_prepare(hparams) + logger.info("Done preparing data") diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/bayesspeech.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/bayesspeech.yaml new file mode 100644 index 0000000000..22f4473425 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/bayesspeech.yaml @@ -0,0 +1,355 @@ +# ############################################################################ +# Model: E2E ASR with Bayesian Transformer (https://arxiv.org/abs/2301.11276) +# Encoder: Bayesian Transformer Encoder +# Decoder: Bayesian Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Librispeech 960h +# Authors: Jianyuan Zhong, Titouan Parcollet, Samuele Cornell, Luca Della Libera +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 74443 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/bayesspeech/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech + +# Data files +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +ckpt_interval_minutes: 30 # save checkpoint every N min + +####################### Training Parameters #################################### + +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 30 +batch_size: 32 # This works for 1x GPU with 40GB with no dynamic batching +ctc_weight: 0.3 +grad_accumulation_factor: 1 +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +precision: fp32 # bf16, fp16 or fp32 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +# index +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# This setup works well for V100 32GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 600 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 128 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# stages related parameters +lr_adam: 0.001 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: !ref + +valid_dataloader_opts: + batch_size: 1 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: !ref + +test_dataloader_opts: + batch_size: 1 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: !ref + +####################### Model Parameters ####################################### +# Transformer +d_model: 512 +nhead: 4 +num_encoder_layers: 12 +num_decoder_layers: 6 +d_ffn: 2048 +transformer_dropout: 0.0 +activation: !name:torch.nn.GELU +output_neurons: 5000 + +# Bayesian inference parameters +normal_prior_log_scale: -1.0 +normal_posterior_softplus_inv_scale: -5.0 +kl_div_weight: 0.000001 # Set based on the number of model parameters +num_eval_mc_samples: 10 + +# Outputs +blank_index: 0 +label_smoothing: 0.0 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 10 +test_beam_size: 66 + +# Scoring parameters +lm_weight: 0.60 +ctc_weight_decode: 0.40 + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 3 + num_layers_per_block: 1 + out_channels: (64, 64, 64) + kernel_sizes: (5, 5, 1) + strides: (2, 2, 1) + residuals: (False, False, True) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 1280 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: transformer + attention_type: regularMHA + normalize_before: True + causal: False + +# This is the TransformerLM that is used according to the Huggingface repository +# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path +# For more details about the model! +# NB: It has to match the pre-trained TransformerLM!! +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: 768 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# define two optimizers here for two-stage training +Adam: !name:torch.optim.Adam + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + + +############################## Decoding & optimiser ############################ + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: False + length_normalization: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +############################## Logging and Pretrainer ########################## + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/branchformer_large.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/branchformer_large.yaml new file mode 100644 index 0000000000..2f8f1e08d5 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/branchformer_large.yaml @@ -0,0 +1,343 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: Branchformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Librispeech 960h +# Authors: Titouan Parcollet +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/branchformer_large/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech + +# Data files +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["dev-clean", "test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /dev-clean.csv + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * +# grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 120 +batch_size: 16 # This works for 2x GPUs with 32GB +ctc_weight: 0.3 +grad_accumulation_factor: 1 +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +precision: fp32 # bf16, fp16 or fp32 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +# stages related parameters +# stage_one_epochs: 90 +lr_adam: 0.0008 +weight_decay: 0.01 + +# Feature parameters +sample_rate: 16000 +n_fft: 512 +n_mels: 80 +win_length: 32 + +# This setup works well for A100 80GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 500 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +max_batch_ex: 128 +batch_ordering: random +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 512 +nhead: 8 +num_encoder_layers: 18 +num_decoder_layers: 6 +csgu_linear_units: 3072 +csgu_kernel_size: 31 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5000 + +# Outputs +blank_index: 0 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 10 +test_beam_size: 66 +lm_weight: 0.60 +ctc_weight_decode: 0.40 + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + dropout: !ref + activation: !ref + branchformer_activation: !ref + encoder_module: branchformer + csgu_linear_units: !ref + kernel_size: !ref + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +# This is the TransformerLM that is used according to the Huggingface repository +# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path +# For more details about the model! +# NB: It has to match the pre-trained TransformerLM!! +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: 768 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +Adam: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + weight_decay: !ref + +####################### Decoding & optimiser ################################### + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: False + length_normalization: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 30000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +####################### Augmentations ########################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + win_length: !ref + n_mels: !ref + +############################## Logging and Pretrainer ########################## + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/conformer.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/conformer_large.yaml similarity index 60% rename from recipes/LibriSpeech/ASR/transformer/hparams/conformer.yaml rename to recipes/LibriSpeech/ASR/transformer/hparams/conformer_large.yaml index a7de883fae..922b32b3e6 100644 --- a/recipes/LibriSpeech/ASR/transformer/hparams/conformer.yaml +++ b/recipes/LibriSpeech/ASR/transformer/hparams/conformer_large.yaml @@ -1,18 +1,18 @@ # ############################################################################ -# Model: E2E ASR with Conformer +# Model: E2E ASR with Transformer # Encoder: Conformer Encoder # Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM # Tokens: unigram # losses: CTC + KLdiv (Label Smoothing loss) # Training: Librispeech 960h -# Authors: Titouan Parcollet, Samuele Cornell +# Authors: Jianyuan Zhong, Titouan Parcollet, Samuele Cornell # ############################################################################ # Seed needs to be set at top of yaml, before objects with parameters are made -seed: 74448 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/conformer/ -wer_file: !ref /wer.txt +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_large/ +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt @@ -25,59 +25,79 @@ train_log: !ref /train_log.txt pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech # Data files -data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech # If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES # then data_folder_rirs should be /localscratch/xxx_corpus # otherwise the dataset will automatically be downloaded # data_folder_rirs: !ref train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] dev_splits: ["dev-clean"] -test_splits: ["test-clean", "test-other"] +test_splits: ["dev-clean", "test-clean", "test-other"] skip_prep: False -train_csv: !ref /train.csv -valid_csv: !ref /dev-clean.csv +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv test_csv: - - !ref /test-clean.csv - - !ref /test-other.csv + - !ref /dev-clean.csv + - !ref /test-clean.csv + - !ref /test-other.csv -ckpt_interval_minutes: 30 # save checkpoint every N min +####################### Training Parameters #################################### -# Training parameters # To make Transformers converge, the global bath size should be large enough. -# The global batch size is computed as batch_size * n_gpus * gradient_accumulation. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. # Empirically, we found that this value should be >= 128. # Please, set your parameters accordingly. -number_of_epochs: 60 -batch_size: 24 # This works for 1x GPU with 40GB +number_of_epochs: 120 +batch_size: 16 # This works for 2x GPUs with 32GB ctc_weight: 0.3 -grad_accumulation_factor: 2 +grad_accumulation_factor: 1 max_grad_norm: 5.0 loss_reduction: 'batchmean' -sorting: descending - -dynamic_batching: False - -dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 100000 # in terms of frames - num_buckets: 200 - shuffle_ex: False # if true re-creates batches at each epoch shuffling examples. - batch_ordering: descending - max_batch_ex: -1 +sorting: random +num_workers: 4 +precision: fp16 # bf16, fp16 or fp32 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation # stages related parameters -lr_adam: 0.001 +lr_adam: 0.0008 +warmup: 50000 +augment_warmup: 8000 # Feature parameters sample_rate: 16000 -n_fft: 400 +n_fft: 512 n_mels: 80 +win_length: 32 + +# This setup works well for V100 32GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 150 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref # Dataloader options train_dataloader_opts: batch_size: !ref shuffle: True - num_workers: 12 + num_workers: !ref valid_dataloader_opts: batch_size: 1 @@ -85,11 +105,12 @@ valid_dataloader_opts: test_dataloader_opts: batch_size: 1 -####################### Model parameters ########################### +####################### Model Parameters ####################################### + # Transformer d_model: 512 -nhead: 4 -num_encoder_layers: 8 +nhead: 8 +num_encoder_layers: 12 num_decoder_layers: 6 d_ffn: 2048 transformer_dropout: 0.1 @@ -98,7 +119,7 @@ output_neurons: 5000 # Outputs blank_index: 0 -label_smoothing: 0.0 +label_smoothing: 0.1 pad_index: 0 bos_index: 1 eos_index: 2 @@ -112,19 +133,19 @@ test_beam_size: 66 lm_weight: 0.60 ctc_weight_decode: 0.40 -############################## models ################################ +############################## Models ########################################## CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd input_shape: (8, 10, 80) - num_blocks: 3 + num_blocks: 2 num_layers_per_block: 1 - out_channels: (64, 64, 64) - kernel_sizes: (5, 5, 1) - strides: (2, 2, 1) - residuals: (False, False, True) + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length - input_size: 1280 + input_size: 640 tgt_vocab: !ref d_model: !ref nhead: !ref @@ -134,7 +155,7 @@ Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.Transforme dropout: !ref activation: !ref encoder_module: conformer - attention_type: RelPosMHAXL + attention_type: RoPEMHA normalize_before: True causal: False @@ -163,6 +184,10 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + modules: CNN: !ref Transformer: !ref @@ -170,44 +195,60 @@ modules: ctc_lin: !ref normalize: !ref -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref ] - # define two optimizers here for two-stage training -Adam: !name:torch.optim.Adam +Adam: !name:torch.optim.AdamW lr: !ref betas: (0.9, 0.98) eps: 0.000000001 +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +####################### Decoding & optimiser ########################### -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] - bos_index: !ref +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer eos_index: !ref blank_index: !ref + ctc_fc: !ref + + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False - length_normalization: False - + length_normalization: True + scorer: !ref -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref - lm_weight: !ref - lm_modules: !ref temperature: 1.15 - temperature_lm: 1.15 using_eos_threshold: False length_normalization: True + scorer: !ref log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -222,8 +263,7 @@ seq_cost: !name:speechbrain.nnet.losses.kldiv_loss noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler lr_initial: !ref - n_warmup_steps: 25000 - #model_size: !ref + n_warmup_steps: !ref checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref @@ -236,28 +276,49 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - update_until_epoch: 4 - -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: False - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 4 - time_mask: True - n_time_mask: 4 - replace_with_zero: False - freq_mask_width: 15 - time_mask_width: 20 - -speed_perturb: True +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] compute_features: !new:speechbrain.lobes.features.Fbank sample_rate: !ref n_fft: !ref n_mels: !ref + win_length: !ref + +############################## Logging and Pretrainer ########################## train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/conformer_small.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/conformer_small.yaml index 22c31e4d92..1376ef332c 100644 --- a/recipes/LibriSpeech/ASR/transformer/hparams/conformer_small.yaml +++ b/recipes/LibriSpeech/ASR/transformer/hparams/conformer_small.yaml @@ -10,9 +10,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 7775 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/conformer_small/ -wer_file: !ref /wer.txt +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt @@ -34,24 +34,28 @@ train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] dev_splits: ["dev-clean"] test_splits: ["test-clean", "test-other"] skip_prep: False -train_csv: !ref /train.csv -valid_csv: !ref /dev-clean.csv +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv test_csv: - - !ref /test-clean.csv - - !ref /test-other.csv + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### -# Training parameters # To make Transformers converge, the global bath size should be large enough. -# The global batch size is computed as batch_size * n_gpus * gradient_accumulation. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. # Empirically, we found that this value should be >= 128. # Please, set your parameters accordingly. number_of_epochs: 110 batch_size: 16 # This works for 2x GPUs with 32GB ctc_weight: 0.3 -grad_accumulation_factor: 4 +grad_accumulation_factor: 1 max_grad_norm: 5.0 loss_reduction: 'batchmean' -sorting: ascending +sorting: random +num_workers: 4 +precision: fp32 # bf16, fp16 or fp32 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation # stages related parameters # stage_one_epochs: 90 @@ -63,20 +67,35 @@ sample_rate: 16000 n_fft: 400 n_mels: 80 -dynamic_batching: False - -dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 100000 # in terms of frames - num_buckets: 200 - shuffle_ex: False # if true re-creates batches at each epoch shuffling examples. - batch_ordering: descending - max_batch_ex: -1 +# This setup works well for V100 32GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 900 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 128 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref # Dataloader options train_dataloader_opts: batch_size: !ref shuffle: True + num_workers: !ref valid_dataloader_opts: batch_size: 1 @@ -84,14 +103,15 @@ valid_dataloader_opts: test_dataloader_opts: batch_size: 1 -####################### Model parameters ########################### +####################### Model Parameters ####################################### + # Transformer d_model: 144 nhead: 4 num_encoder_layers: 12 num_decoder_layers: 4 d_ffn: 1024 -transformer_dropout: 0.0 +transformer_dropout: 0.1 activation: !name:torch.nn.GELU output_neurons: 5000 @@ -111,7 +131,7 @@ test_beam_size: 66 lm_weight: 0.60 ctc_weight_decode: 0.40 -############################## models ################################ +############################## Models ########################################## CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd input_shape: (8, 10, 80) @@ -182,39 +202,52 @@ Adam: !name:torch.optim.Adam betas: (0.9, 0.98) eps: 0.000000001 -#SGD: !name:torch.optim.SGD -# lr: !ref -# momentum: 0.99 -# nesterov: True +############################## Decoding & optimiser ############################ -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] - bos_index: !ref +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer eos_index: !ref blank_index: !ref + ctc_fc: !ref + + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False - length_normalization: False - + length_normalization: True + scorer: !ref -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref - lm_weight: !ref - lm_modules: !ref temperature: 1.15 - temperature_lm: 1.15 using_eos_threshold: False length_normalization: True + scorer: !ref + log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -230,7 +263,6 @@ seq_cost: !name:speechbrain.nnet.losses.kldiv_loss noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler lr_initial: !ref n_warmup_steps: 25000 -# model_size: !ref checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref @@ -243,27 +275,49 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: True - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 2 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 - -speed_perturb: !new:speechbrain.processing.speech_augmentation.SpeedPerturb +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb orig_freq: !ref speeds: [95, 100, 105] +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + compute_features: !new:speechbrain.lobes.features.Fbank sample_rate: !ref n_fft: !ref n_mels: !ref +############################## Logging and Pretrainer ########################## + train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/extract_ssl_feats.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/extract_ssl_feats.yaml new file mode 100644 index 0000000000..db3953e9d2 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/extract_ssl_feats.yaml @@ -0,0 +1,42 @@ +# ############################################################################ +# Task : Extraction of self-supervised (SSL) speech features from LibriSpeech +# Usage: Precompute and cache SSL representations for downstream SpeechLLM ASR +# Authors: +# * Adel Moumen, 2025 +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +experiment_name: ssl_feats_extraction +output_folder: !ref results// +save_folder: !ref /save +feats_cache_dir: !ref /feats_cache + +# Data files +data_folder: !PLACEHOLDER +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv +dtype: !name:torch.bfloat16 +device: cuda + +####################### Training Parameters #################################### +ssl_hub: !PLACEHOLDER +ssl_folder: !ref /ssl_checkpoint +ssl_frozen: True + +####################### Model Components #################################### +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence +ssl: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + device_map: !ref diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_13M.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_13M.yaml new file mode 100644 index 0000000000..e1b29f1b2a --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_13M.yaml @@ -0,0 +1,342 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: HyperConformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Librispeech 960h +# Authors: Juan Pablo Zuluaga, Florian Mai, Titouan Parcollet +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 7775 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/hyperbranchformer_13M/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech + +# Data files +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +############################## Training Parameters ############################# + +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 110 +batch_size: 16 # This works for 2x GPUs with 32GB +ctc_weight: 0.3 +grad_accumulation_factor: 3 +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +# stages related parameters +lr_adam: 0.001 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for a P40 24GB GPU, adapt it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 600 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +max_batch_ex: 128 +batch_ordering: random + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 144 +nhead: 8 +num_encoder_layers: 10 +num_decoder_layers: 4 +csgu_linear_units: 3072 +csgu_kernel_size: 31 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5000 +# specify 'hypermixing' for usage of multi-head HyperMixer instead of MultiHeadAttention +# You can also specify RelPosMHAXL for conformer +attention_type: hypermixing + +# option 1) 'conformer' for HyperConformer; option 2) 'transformer' for vanilla HyperMixer +encoder_module: branchformer + +# Outputs +blank_index: 0 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 30 +valid_beam_size: 10 +test_beam_size: 66 +lm_weight: 0.60 +ctc_weight_decode: 0.40 + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + dropout: !ref + activation: !ref + branchformer_activation: !ref + encoder_module: !ref + csgu_linear_units: !ref + kernel_size: !ref + attention_type: !ref + normalize_before: True + causal: False + +# This is the TransformerLM that is used according to the Huggingface repository +# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path +# For more details about the model! +# NB: It has to match the pre-trained TransformerLM!! +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: 768 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# define two optimizers here for two-stage training +Adam: !name:torch.optim.Adam + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + +############################## Decoding & optimiser ############################ + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: False + length_normalization: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +############################## Logging and Pretrainer ########################## + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_25M.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_25M.yaml new file mode 100644 index 0000000000..c88c9a776c --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/hyperbranchformer_25M.yaml @@ -0,0 +1,342 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: HyperConformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Librispeech 960h +# Authors: Juan Pablo Zuluaga, Florian Mai, Titouan Parcollet +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 7775 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/hyperbranchformer_25M/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech + +# Data files +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +############################## Training Parameters ############################# + +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 110 +batch_size: 16 # This works for 2x GPUs with 32GB +ctc_weight: 0.3 +grad_accumulation_factor: 3 +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +# stages related parameters +lr_adam: 0.001 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for a P40 24GB GPU, adapt it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 600 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +max_batch_ex: 128 +batch_ordering: random + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ####################################### +# Transformer +d_model: 256 +nhead: 8 +num_encoder_layers: 10 +num_decoder_layers: 4 +csgu_linear_units: 3072 +csgu_kernel_size: 31 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5000 +# specify 'hypermixing' for usage of multi-head HyperMixer instead of MultiHeadAttention +# You can also specify RelPosMHAXL for conformer +attention_type: hypermixing + +# option 1) 'conformer' for HyperConformer; option 2) 'transformer' for vanilla HyperMixer +encoder_module: branchformer + +# Outputs +blank_index: 0 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 30 +valid_beam_size: 10 +test_beam_size: 66 +lm_weight: 0.60 +ctc_weight_decode: 0.40 + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + dropout: !ref + activation: !ref + branchformer_activation: !ref + encoder_module: !ref + csgu_linear_units: !ref + kernel_size: !ref + attention_type: !ref + normalize_before: True + causal: False + +# This is the TransformerLM that is used according to the Huggingface repository +# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path +# For more details about the model! +# NB: It has to match the pre-trained TransformerLM!! +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: 768 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# define two optimizers here for two-stage training +Adam: !name:torch.optim.Adam + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + +############################## Decoding & optimiser ############################ + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: False + length_normalization: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +############################## Logging and Pretrainer ########################## + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_22M.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_22M.yaml new file mode 100644 index 0000000000..bd6fca49a6 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_22M.yaml @@ -0,0 +1,340 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: HyperConformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Librispeech 960h +# Authors: Juan Pablo Zuluaga, Florian Mai, Titouan Parcollet +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 7775 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/hyperconformer_22M/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech + +# Data files +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 110 +batch_size: 16 # This works for 2x GPUs with 32GB +ctc_weight: 0.3 +grad_accumulation_factor: 1 +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +# stages related parameters +lr_adam: 0.001 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for a P40 24GB GPU, adapt it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 600 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +max_batch_ex: 128 +batch_ordering: random + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 256 +nhead: 8 +num_encoder_layers: 10 +num_decoder_layers: 4 +d_ffn: 1024 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5000 +# specify 'hypermixing' for usage of multi-head HyperMixer instead of MultiHeadAttention +# You can also specify RelPosMHAXL for conformer +attention_type: hypermixing + +# option 1) 'conformer' for HyperConformer; option 2) 'transformer' for vanilla HyperMixer +encoder_module: conformer + +# Outputs +blank_index: 0 +label_smoothing: 0.0 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 10 +test_beam_size: 66 +lm_weight: 0.60 +ctc_weight_decode: 0.40 + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: !ref + attention_type: !ref + normalize_before: True + causal: False + +# This is the TransformerLM that is used according to the Huggingface repository +# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path +# For more details about the model! +# NB: It has to match the pre-trained TransformerLM!! +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: 768 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# define two optimizers here for two-stage training +Adam: !name:torch.optim.Adam + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + +####################### Decoding & optimiser ################################### + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: False + length_normalization: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +############################## Logging and Pretrainer ########################## + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_8M.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_8M.yaml new file mode 100644 index 0000000000..6ac279b812 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/hyperconformer_8M.yaml @@ -0,0 +1,340 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: HyperConformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Librispeech 960h +# Authors: Juan Pablo Zuluaga, Florian Mai, Titouan Parcollet +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 7775 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/hyperconformer_8M/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech + +# Data files +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +# If RIRS_NOISES dir exists in /localscratch/xxx_corpus/RIRS_NOISES +# then data_folder_rirs should be /localscratch/xxx_corpus +# otherwise the dataset will automatically be downloaded +# data_folder_rirs: !ref +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Training Parameters #################################### + +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 110 +batch_size: 16 # This works for 2x GPUs with 32GB +ctc_weight: 0.3 +grad_accumulation_factor: 1 +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation + +# stages related parameters +lr_adam: 0.001 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for a P40 24GB GPU, adapt it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 600 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +max_batch_ex: 128 +batch_ordering: random + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 144 +nhead: 8 +num_encoder_layers: 10 +num_decoder_layers: 4 +d_ffn: 576 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5000 +# specify 'hypermixing' for usage of multi-head HyperMixer instead of MultiHeadAttention +# You can also specify RelPosMHAXL for conformer +attention_type: hypermixing + +# option 1) 'conformer' for HyperConformer; option 2) 'transformer' for vanilla HyperMixer +encoder_module: conformer + +# Outputs +blank_index: 0 +label_smoothing: 0.0 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 10 +test_beam_size: 66 +lm_weight: 0.60 +ctc_weight_decode: 0.40 + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: !ref + attention_type: !ref + normalize_before: True + causal: False + +# This is the TransformerLM that is used according to the Huggingface repository +# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path +# For more details about the model! +# NB: It has to match the pre-trained TransformerLM!! +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: 768 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# define two optimizers here for two-stage training +Adam: !name:torch.optim.Adam + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + +####################### Decoding & optimiser ########################### + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: False + length_normalization: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +############################## Logging and Pretrainer ########################## + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/speechllm_e2e.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/speechllm_e2e.yaml new file mode 100644 index 0000000000..2ca028a2a7 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/speechllm_e2e.yaml @@ -0,0 +1,250 @@ +# ############################################################################ +# Model: End-to-end SpeechLLM-based ASR with on-the-fly SSL feature extraction +# Task : Large-scale ASR on LibriSpeech 960h with an LLM decoder +# Authors: +# * Adel Moumen, 2025 +# ############################################################################ + +# Seed must be set at the top before objects with parameters are created +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +####################### Experiment Configuration ########################### + +experiment_name: speechllm_e2e +output_folder: !ref results// +output_wer_folder: !ref /wer_results +save_folder: !ref /save +train_log: !ref /train_log.txt +feats_cache_dir: null +ckpt_interval_minutes: 15 + +####################### Data Configuration ################################# + +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other", "dev-other"] +skip_prep: False +csv_folder: !ref +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + - !ref /dev-clean.csv + - !ref /dev-other.csv + +####################### Model Paths & Configuration ######################## + +# SSL Model Configuration +ssl_hub: !PLACEHOLDER +ssl_folder: !ref /ssl_checkpoint +ssl_frozen: True +ssl_device: cuda +use_feats: False +# LLM Configuration +llm_path: !PLACEHOLDER +llm_emb_size: 2048 + +####################### Training Hyperparameters ########################### + +number_of_epochs: 1 +batch_size: 32 # Only used if dynamic batching is off +grad_accumulation_factor: 5 +sorting: random +num_workers: 0 +precision: bf16 # Options: bf16, fp16, fp32 +eval_precision: bf16 +max_grad_norm: 1.0 + +####################### Learning Rate & Optimization ###################### + +initial_lr: 0.0005 +final_lr: 0.00001 +lr_ssl: 0.00002 +weight_decay: 0.0 + +####################### Feature & Audio Parameters ######################### + +downsampling_factor: 5 # Used to downsample frames before LLM projection + +####################### Dynamic Batching Configuration ###################### + +# This setup works well for A100 80GB GPU; adapt to your needs +# Turn off dynamic batching if needed (training speed will decrease) +dynamic_batching: True +max_batch_length_train: 400 +max_batch_length_val: 100 # Reduced for validation due to wider beam (VRAM) +num_bucket: 200 +shuffle: True # Re-creates batches at each epoch by shuffling examples +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +####################### DataLoader Configuration ########################## +ignore_index: -100 +test_batch_size: 1 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + per_key_padding_kwargs: + sig: + value: 0 + tokens_eos: + value: !ref + +valid_dataloader_opts: + batch_size: !ref + num_workers: 0 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + per_key_padding_kwargs: + sig: + value: 0 + tokens_eos: + value: !ref + +test_dataloader_opts: + batch_size: !ref + num_workers: 0 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + per_key_padding_kwargs: + sig: + value: 0 + tokens_eos: + value: !ref + +####################### Model Architecture Parameters ####################### + +# Output & Decoding Parameters +bos_index: !PLACEHOLDER # 0 +eos_index: !PLACEHOLDER # 0 +pad_token: !PLACEHOLDER # 128256 +prompt: "Transcribe speech to text." + +####################### Model Components #################################### + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + +ssl: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref + device_map: !ref + +activation: !name:speechbrain.nnet.activations.ReLU +dnn_layers: 2 +dnn_neurons: !ref +proj: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 5120] # 1024 * downsampling_factor + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + + +backbone_llm: !new:speechbrain.integrations.huggingface.llama.LLaMA + source: !ref + save_path: !ref + freeze: True + attn_implementation: sdpa + device: cuda + torch_dtype: !name:torch.bfloat16 + additional_special_tokens: + - "<|start_of_audio|>" + - "<|end_of_audio|>" + output_hidden_states: False + +lora_rank: 16 +llm: !new:speechbrain.nnet.adapters.AdaptedModel + model_to_adapt: !ref + adapter_class: !name:speechbrain.nnet.adapters.LoRA + all_linear: True + adapter_kwargs: + rank: !ref + +feat_downsampler: !new:speechbrain.lobes.downsampling.ConcatDownsampler + downsampling_factor: !ref + +searcher: !new:speechbrain.decoders.seq2seq.S2SHuggingFaceLLMGreedySearcher + llm_model: !ref + temperature: 0.0 + min_decode_ratio: 0.0 + max_decode_ratio: 1.0 + bos_index: !ref + eos_index: !ref + +modules: + ssl: !ref + feat_downsampler: !ref + llm: !ref + proj: !ref + normalize: !ref + searcher: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +####################### Optimizers & Schedulers ############################ + +opt: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +opt_ssl: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +scheduler: !new:speechbrain.nnet.schedulers.LinearScheduler + initial_value: !ref + final_value: !ref + epoch_count: !ref + +lr_annealing_ssl: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 1 + +####################### Checkpointing & Logging ############################# + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + ssl: !ref + llm: !ref + proj: !ref + lr_annealing_ssl: !ref + counter: !ref + normalize: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +####################### Metrics ############################################# + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/speechllm_ssl_feats.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/speechllm_ssl_feats.yaml new file mode 100644 index 0000000000..b4b6ba7309 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/speechllm_ssl_feats.yaml @@ -0,0 +1,220 @@ +# ############################################################################ +# Model: SpeechLLM-based E2E ASR using pre-extracted SSL features +# Task : Large-scale ASR on LibriSpeech 960h with an LLM decoder +# Authors: +# * Adel Moumen, 2025 +# ############################################################################ + +# Seed must be set at the top before objects with parameters are created +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +####################### Experiment Configuration ########################### + +experiment_name: speechllm_ssl_feats +output_folder: !ref results// +output_wer_folder: !ref /wer_results +save_folder: !ref /save +train_log: !ref /train_log.txt +feats_cache_dir: !PLACEHOLDER +use_feats: True # Use pre-extracted SSL features from `feats_cache_dir` +ckpt_interval_minutes: 15 + +####################### Data Configuration ################################# + +data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean", "test-other"] +skip_prep: False +csv_folder: !ref +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /test-clean.csv + - !ref /test-other.csv + +####################### Model Paths & Configuration ######################## + +# LLM Configuration +llm_path: !PLACEHOLDER # e.g., /path/to/meta-llama/Llama-3.2-1B +llm_emb_size: 2048 + +####################### Training Hyperparameters ########################### + +number_of_epochs: 1 +batch_size: 32 # Only used if dynamic batching is off +grad_accumulation_factor: 5 +sorting: random +num_workers: 0 +precision: bf16 # Options: bf16, fp16, fp32 +eval_precision: bf16 +max_grad_norm: 1.0 + +####################### Learning Rate & Optimization ###################### + +initial_lr: 0.0005 +final_lr: 0.00001 +weight_decay: 0.0 + +####################### Feature & Audio Parameters ######################### + +downsampling_factor: 5 # Used to downsample frames before LLM projection + +####################### Dynamic Batching Configuration ###################### + +# This setup works well for A100 80GB GPU; adapt to your needs +# Turn off dynamic batching if needed (training speed will decrease) +dynamic_batching: True +max_batch_length_train: 400 +max_batch_length_val: 100 # Reduced for validation due to wider beam (VRAM) +num_bucket: 200 +shuffle: True # Re-creates batches at each epoch by shuffling examples +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +####################### DataLoader Configuration ########################## +ignore_index: -100 +test_batch_size: 1 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + per_key_padding_kwargs: + sig: + value: 0 + tokens_eos: + value: !ref + +valid_dataloader_opts: + batch_size: !ref + num_workers: 0 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + per_key_padding_kwargs: + sig: + value: 0 + tokens_eos: + value: !ref + +test_dataloader_opts: + batch_size: !ref + num_workers: 0 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + per_key_padding_kwargs: + sig: + value: 0 + tokens_eos: + value: !ref + +####################### Model Architecture Parameters ####################### +lora_rank: 16 + +# Output & Decoding Parameters +# The user needs to set the bos_index, eos_index and pad_token. +# If a value is None, we won't use the corresponding token. +# Current template is: +# <|start_of_audio|> audio_feats <|end_of_audio|> txt tokens +bos_index: !PLACEHOLDER # 0 +eos_index: !PLACEHOLDER # 0 +pad_token: !PLACEHOLDER # 128256 +prompt: "Transcribe speech to text." + +####################### Model Components #################################### + +activation: !name:speechbrain.nnet.activations.ReLU +dnn_layers: 2 +dnn_neurons: !ref +proj: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, 5120] # 1024 * downsampling_factor + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +backbone_llm: !new:speechbrain.integrations.huggingface.llama.LLaMA + source: !ref + save_path: !ref + freeze: True + attn_implementation: flash_attention_2 + device: cuda + torch_dtype: !name:torch.bfloat16 + additional_special_tokens: + - "<|start_of_audio|>" + - "<|end_of_audio|>" + output_hidden_states: False + +llm: !new:speechbrain.nnet.adapters.AdaptedModel + model_to_adapt: !ref + adapter_class: !name:speechbrain.nnet.adapters.LoRA + all_linear: True + adapter_kwargs: + rank: !ref + +feat_downsampler: !new:speechbrain.lobes.downsampling.ConcatDownsampler + downsampling_factor: !ref + +searcher: !new:speechbrain.decoders.seq2seq.S2SHuggingFaceLLMGreedySearcher + llm_model: !ref + temperature: 0.0 # 0.0 => true greedy (deterministic). >0 => stochastic sampling + min_decode_ratio: 0.0 + max_decode_ratio: 1.0 + bos_index: !ref + eos_index: !ref + +modules: + feat_downsampler: !ref + llm: !ref + proj: !ref + searcher: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +####################### Optimizers & Schedulers ############################ + +opt: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +scheduler: !new:speechbrain.nnet.schedulers.LinearScheduler + initial_value: !ref + final_value: !ref + epoch_count: !ref + +####################### Checkpointing & Logging ############################# + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + llm: !ref + proj: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +####################### Metrics ############################################# + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/train_hf_whisper.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/train_hf_whisper.yaml new file mode 100644 index 0000000000..0006f8d78b --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/train_hf_whisper.yaml @@ -0,0 +1,164 @@ +# ################################ +# Model: Whisper (Encoder-Decoder) + NLL +# Augmentation: TimeDomainSpecAugment +# Authors: Adel Moumen 2022 & 2024, Titouan Parcollet 2022 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/whisper/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english whisper model. +whisper_hub: openai/whisper-medium.en +whisper_folder: !ref /whisper_checkpoint + +# Normalize the english inputs with +# the same normalization done in the paper +normalized_transcripts: True + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["dev-clean", "test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /dev-clean.csv + - !ref /test-clean.csv + - !ref /test-other.csv + +ckpt_interval_minutes: 10 # save checkpoint every N min + +############################## Training Parameters ############################# +freeze_encoder: True +number_of_epochs: 1 +weight_decay: 0.01 +lr_whisper: 1e-5 +warmup_steps: 500 +max_grad_norm: 2.0 +sorting: ascending +precision: fp16 # bf16, fp16 or fp32 +eval_precision: fp16 +sampling_rate: 16_000 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# This setup works well with 1x 32GB GPU +batch_size: 16 +test_batch_size: 16 +grad_accumulation_factor: 1 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +test_beam_size: 8 + +####################### Model Parameters ####################################### + +train_loader_kwargs: + batch_size: !ref + +valid_loader_kwargs: + batch_size: !ref + +test_loader_kwargs: + batch_size: !ref + + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# UNCOMMENT THIS SECTION TO ADD AUGMENTATIONS +# speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb +# orig_freq: !ref +# speeds: [95, 100, 105] + +# # Frequency drop: randomly drops a number of frequency bands to zero. +# drop_freq: !new:speechbrain.augment.time_domain.DropFreq +# drop_freq_low: 0 # Min frequency band dropout probability +# drop_freq_high: 1 # Max frequency band dropout probability +# drop_freq_count_low: 1 # Min number of frequency bands to drop +# drop_freq_count_high: 3 # Max number of frequency bands to drop +# drop_freq_width: 0.05 # Width of frequency bands to drop + +# # Time drop: randomly drops a number of temporal chunks. +# drop_chunk: !new:speechbrain.augment.time_domain.DropChunk +# drop_length_low: 1 +# drop_length_high: 5 +# drop_count_low: 1000 +# drop_count_high: 2000 + +# # Augmenter: Combines previously defined augmentations to perform data augmentation +# wav_augment: !new:speechbrain.augment.augmenter.Augmenter +# concat_original: True +# min_augmentations: 3 +# max_augmentations: 3 +# augment_prob: 1.0 +# augmentations: [ +# !ref , +# !ref , +# !ref ] + +############################## Models ########################################## + +whisper: !new:speechbrain.integrations.huggingface.whisper.Whisper + source: !ref + freeze_encoder: !ref + save_path: !ref + language: "english" + task: "transcribe" + sampling_rate: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +nll_loss: !name:speechbrain.nnet.losses.nll_loss + +modules: + whisper: !ref + +############################## Decoding & optimiser ############################ + +whisper_opt_class: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +valid_search: !new:speechbrain.decoders.seq2seq.S2SWhisperGreedySearcher + model: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + +test_search: !new:speechbrain.decoders.seq2seq.S2SWhisperBeamSearcher + module: [!ref ] + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + +lr_annealing_whisper: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: !ref + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + whisper: !ref + scheduler_whisper: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/train_whisper_lora.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/train_whisper_lora.yaml new file mode 100644 index 0000000000..e95b36ccd4 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/hparams/train_whisper_lora.yaml @@ -0,0 +1,174 @@ +# ################################ +# Model: Whisper (Encoder-Decoder) + NLL + LoRA +# Augmentation: TimeDomainSpecAugment +# Authors: Peter Plantinga 2024, Adel Moumen 2022 & 2024, Titouan Parcollet 2022 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/whisper/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the whisper model +whisper_hub: openai/whisper-small.en +whisper_folder: !ref /whisper_checkpoint + +# Normalize the english inputs with +# the same normalization done in the paper +normalized_transcripts: True + +# Data files +data_folder: !PLACEHOLDER # e,g./path/to/LibriSpeech +train_splits: ["train-clean-100"] #, "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["dev-clean", "test-clean", "test-other"] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: + - !ref /dev-clean.csv + - !ref /test-clean.csv + - !ref /test-other.csv + +ckpt_interval_minutes: 1 # save checkpoint every N min + +############################## Training Parameters ############################# +freeze_encoder: True +number_of_epochs: 1 +weight_decay: 0.01 +lr_whisper: 1e-5 +warmup_steps: 500 +max_grad_norm: 2.0 +sorting: ascending +precision: fp16 # bf16, fp16 or fp32 +eval_precision: fp16 +sampling_rate: 16_000 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# This setup works well with 1x 32GB GPU +batch_size: 2 +test_batch_size: 2 +grad_accumulation_factor: 8 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +test_beam_size: 8 + +# Lora configuration +lora_rank: 16 + +####################### Model Parameters ####################################### + +train_loader_kwargs: + batch_size: !ref + +valid_loader_kwargs: + batch_size: !ref + +test_loader_kwargs: + batch_size: !ref + + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# UNCOMMENT THIS SECTION TO ADD AUGMENTATIONS +# speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb +# orig_freq: !ref +# speeds: [95, 100, 105] + +# # Frequency drop: randomly drops a number of frequency bands to zero. +# drop_freq: !new:speechbrain.augment.time_domain.DropFreq +# drop_freq_low: 0 # Min frequency band dropout probability +# drop_freq_high: 1 # Max frequency band dropout probability +# drop_freq_count_low: 1 # Min number of frequency bands to drop +# drop_freq_count_high: 3 # Max number of frequency bands to drop +# drop_freq_width: 0.05 # Width of frequency bands to drop + +# # Time drop: randomly drops a number of temporal chunks. +# drop_chunk: !new:speechbrain.augment.time_domain.DropChunk +# drop_length_low: 1 +# drop_length_high: 5 +# drop_count_low: 1000 +# drop_count_high: 2000 + +# # Augmenter: Combines previously defined augmentations to perform data augmentation +# wav_augment: !new:speechbrain.augment.augmenter.Augmenter +# concat_original: True +# min_augmentations: 3 +# max_augmentations: 3 +# augment_prob: 1.0 +# augmentations: [ +# !ref , +# !ref , +# !ref ] + +############################## Models ########################################## + +whisper_pretrained: !new:speechbrain.integrations.huggingface.whisper.Whisper + source: !ref + freeze_encoder: !ref + save_path: !ref + language: "english" + task: "transcribe" + sampling_rate: !ref + +whisper: !new:speechbrain.nnet.adapters.AdaptedModel + model_to_adapt: !ref + adapter_class: !name:speechbrain.nnet.adapters.LoRA + all_linear: True + adapter_kwargs: + rank: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +nll_loss: !name:speechbrain.nnet.losses.nll_loss + +modules: + whisper: !ref + +############################## Decoding & optimiser ############################ + +whisper_opt_class: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +valid_search: !new:speechbrain.decoders.seq2seq.S2SWhisperGreedySearcher + model: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + +test_search: !new:speechbrain.decoders.seq2seq.S2SWhisperBeamSearcher + module: [!ref ] + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + +lr_annealing_whisper: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: !ref + +############################## Logging and Pretrainer ########################## + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + whisper: !ref + scheduler_whisper: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/LibriSpeech/ASR/transformer/hparams/transformer.yaml b/recipes/LibriSpeech/ASR/transformer/hparams/transformer.yaml index 4522282f51..abd536b7e5 100644 --- a/recipes/LibriSpeech/ASR/transformer/hparams/transformer.yaml +++ b/recipes/LibriSpeech/ASR/transformer/hparams/transformer.yaml @@ -10,9 +10,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 74443 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/transformer/ -wer_file: !ref /wer.txt +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt @@ -34,36 +34,59 @@ train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] dev_splits: ["dev-clean"] test_splits: ["test-clean", "test-other"] skip_prep: False -train_csv: !ref /train.csv -valid_csv: !ref /dev-clean.csv +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv test_csv: - - !ref /test-clean.csv - - !ref /test-other.csv + - !ref /test-clean.csv + - !ref /test-other.csv ckpt_interval_minutes: 30 # save checkpoint every N min -# Training parameters +####################### Training Parameters #################################### + # To make Transformers converge, the global bath size should be large enough. -# The global batch size is computed as batch_size * n_gpus * gradient_accumulation. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. # Empirically, we found that this value should be >= 128. # Please, set your parameters accordingly. -number_of_epochs: 60 -batch_size: 32 # This works for 1x GPU with 40GB +number_of_epochs: 100 +batch_size: 32 # This works for 1x GPU with 40GB with no dynamic batching ctc_weight: 0.3 -grad_accumulation_factor: 2 +grad_accumulation_factor: 1 max_grad_norm: 5.0 loss_reduction: 'batchmean' sorting: random +num_workers: 4 +precision: fp32 # bf16, fp16 or fp32 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation -dynamic_batching: False +# index +pad_index: 0 +bos_index: 1 +eos_index: 2 -dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 100000 # in terms of frames - num_buckets: 200 - shuffle_ex: False # if true re-creates batches at each epoch shuffling examples. - batch_ordering: descending - max_batch_ex: -1 +# This setup works well for V100 32GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 600 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 128 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref # stages related parameters lr_adam: 0.001 @@ -77,15 +100,25 @@ n_mels: 80 train_dataloader_opts: batch_size: !ref shuffle: True - num_workers: 12 + num_workers: !ref + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: !ref valid_dataloader_opts: batch_size: 1 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: !ref test_dataloader_opts: batch_size: 1 + collate_fn: !name:speechbrain.dataio.batch.PaddedBatch + padding_kwargs: + value: !ref + +####################### Model Parameters ####################################### -####################### Model parameters ########################### # Transformer d_model: 512 nhead: 4 @@ -99,9 +132,6 @@ output_neurons: 5000 # Outputs blank_index: 0 label_smoothing: 0.0 -pad_index: 0 -bos_index: 1 -eos_index: 2 # Decoding parameters min_decode_ratio: 0.0 @@ -109,10 +139,12 @@ max_decode_ratio: 1.0 valid_search_interval: 10 valid_beam_size: 10 test_beam_size: 66 + +# Scoring parameters lm_weight: 0.60 ctc_weight_decode: 0.40 -############################## models ################################ +############################## Models ########################################## CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd input_shape: (8, 10, 80) @@ -180,34 +212,50 @@ Adam: !name:torch.optim.Adam eps: 0.000000001 -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] - bos_index: !ref +####################### Decoding & optimiser ################################### + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer eos_index: !ref blank_index: !ref + ctc_fc: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False - length_normalization: False - + length_normalization: True + scorer: !ref -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref - lm_weight: !ref - lm_modules: !ref temperature: 1.15 - temperature_lm: 1.15 using_eos_threshold: False length_normalization: True + scorer: !ref log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -223,7 +271,6 @@ seq_cost: !name:speechbrain.nnet.losses.kldiv_loss noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler lr_initial: !ref n_warmup_steps: 25000 - #model_size: !ref checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref @@ -240,25 +287,49 @@ normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global update_until_epoch: 4 -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: False - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 4 - time_mask: True - n_time_mask: 4 - replace_with_zero: False - freq_mask_width: 15 - time_mask_width: 20 - -speed_perturb: True +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] compute_features: !new:speechbrain.lobes.features.Fbank sample_rate: !ref n_fft: !ref n_mels: !ref +############################## Logging and Pretrainer ########################## + train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref diff --git a/recipes/LibriSpeech/ASR/transformer/train.py b/recipes/LibriSpeech/ASR/transformer/train.py index 682983ae77..375def1307 100644 --- a/recipes/LibriSpeech/ASR/transformer/train.py +++ b/recipes/LibriSpeech/ASR/transformer/train.py @@ -36,14 +36,16 @@ import os import sys -import torch -import logging from pathlib import Path -import speechbrain as sb + +import torch from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -54,28 +56,27 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) - # compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - feats = self.hparams.augmentation(feats) + # Add feature augmentation if specified. + augment_warmup = 0 + if hasattr(self.hparams, "augment_warmup"): + augment_warmup = self.hparams.augment_warmup + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + if self.optimizer_step > augment_warmup: + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels( + tokens_bos + ) # forward modules src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( - src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index, + src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output layer for ctc log-probabilities @@ -88,43 +89,61 @@ def compute_forward(self, batch, stage): # Compute outputs hyps = None - if stage == sb.Stage.TRAIN: - hyps = None - elif stage == sb.Stage.VALID: - hyps = None - current_epoch = self.hparams.epoch_counter.current - if current_epoch % self.hparams.valid_search_interval == 0: - # for the sake of efficiency, we only perform beamsearch with limited capacity - # and no LM to give user some idea of how the AM is doing - hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) - elif stage == sb.Stage.TEST: - hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if any([is_valid_search, is_test_search]): + # Note: For valid_search, for the sake of efficiency, we only perform beamsearch with + # limited capacity and no LM to give user some idea of how the AM is doing + + # Decide searcher for inference: valid or test search + if stage == sb.Stage.VALID: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + else: + hyps, _, _, _ = self.hparams.test_search( + enc_out.detach(), wav_lens + ) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" - (p_ctc, p_seq, wav_lens, hyps,) = predictions + (p_ctc, p_seq, wav_lens, hyps) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 - ) - tokens = torch.cat([tokens, tokens], dim=0) - tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + augment_warmup = 0 + if hasattr(self.hparams, "augment_warmup"): + augment_warmup = self.hparams.augment_warmup + if ( + hasattr(self.hparams, "fea_augment") + and self.optimizer_step > augment_warmup + ): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ).sum() - # now as training progresses we use real prediction from the prev step instead of teacher forcing - loss_ctc = self.hparams.ctc_cost( p_ctc, tokens, wav_lens, tokens_lens ).sum() @@ -151,46 +170,20 @@ def compute_objectives(self, predictions, batch, stage): self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss - def fit_batch(self, batch): - - should_step = self.step % self.grad_accumulation_factor == 0 - # Managing automatic mixed precision - if self.auto_mix_prec: - self.optimizer.zero_grad() - with torch.cuda.amp.autocast(): - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - self.scaler.scale(loss / self.grad_accumulation_factor).backward() - if should_step: - self.scaler.unscale_(self.optimizer) - if self.check_gradients(loss): - self.scaler.step(self.optimizer) - self.scaler.update() - self.optimizer_step += 1 - - # anneal lr every update - self.hparams.noam_annealing(self.optimizer) - else: - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - (loss / self.grad_accumulation_factor).backward() - if should_step: - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.optimizer_step += 1 - - # anneal lr every update - self.hparams.noam_annealing(self.optimizer) - - return loss.detach().cpu() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - with torch.no_grad(): - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() + def on_evaluate_start(self, max_key=None, min_key=None): + """perform checkpoint average if needed""" + super().on_evaluate_start() + + ckpts = self.checkpointer.find_checkpoints( + max_key=max_key, min_key=min_key + ) + ckpt = sb.utils.checkpoints.average_checkpoints( + ckpts, recoverable_name="model" + ) + + self.hparams.model.load_state_dict(ckpt, strict=True) + self.hparams.model.eval() + print("Loaded the average") def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" @@ -215,8 +208,7 @@ def on_stage_end(self, stage, stage_loss, epoch): stage_stats["WER"] = self.wer_metric.summarize("error_rate") # log stats and save checkpoint at end-of-epoch - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): - + if stage == sb.Stage.VALID: lr = self.hparams.noam_annealing.current_lr steps = self.optimizer_step optimizer = self.optimizer.__class__.__name__ @@ -235,7 +227,7 @@ def on_stage_end(self, stage, stage_loss, epoch): self.checkpointer.save_and_keep_only( meta={"ACC": stage_stats["ACC"], "epoch": epoch}, max_keys=["ACC"], - num_to_keep=5, + num_to_keep=self.hparams.avg_checkpoints, ) elif stage == sb.Stage.TEST: @@ -243,8 +235,11 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) # save the averaged checkpoint at the end of the evaluation stage # delete the rest of the intermediate checkpoints @@ -255,28 +250,21 @@ def on_stage_end(self, stage, stage_loss, epoch): num_to_keep=1, ) - def on_evaluate_start(self, max_key=None, min_key=None): - """perform checkpoint averge if needed""" - super().on_evaluate_start() - - ckpts = self.checkpointer.find_checkpoints( - max_key=max_key, min_key=min_key - ) - ckpt = sb.utils.checkpoints.average_checkpoints( - ckpts, recoverable_name="model", device=self.device - ) - - self.hparams.model.load_state_dict(ckpt, strict=True) - self.hparams.model.eval() + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -300,7 +288,8 @@ def dataio_prepare(hparams): "sorting must be random, ascending or descending" ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -336,14 +325,10 @@ def audio_pipeline(wav): def audio_pipeline_train(wav): # Speed Perturb is done here so it is multi-threaded with the # workers of the dataloader (faster). - if hparams["speed_perturb"]: + if "speed_perturb" in hparams: sig = sb.dataio.dataio.read_audio(wav) - # factor = np.random.uniform(0.95, 1.05) - # sig = resample(sig.numpy(), 16000, int(16000*factor)) - speed = sb.processing.speech_augmentation.SpeedPerturb( - 16000, [x for x in range(95, 105)] - ) - sig = speed(sig.unsqueeze(0)).squeeze(0) # torch.from_numpy(sig) + + sig = hparams["speed_perturb"](sig.unsqueeze(0)).squeeze(0) else: sig = sb.dataio.dataio.read_audio(wav) return sig @@ -370,7 +355,8 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], ) # 5. If Dynamic Batching is used, we instantiate the needed samplers. @@ -379,25 +365,18 @@ def text_pipeline(wrd): if hparams["dynamic_batching"]: from speechbrain.dataio.sampler import DynamicBatchSampler # noqa - dynamic_hparams = hparams["dynamic_batch_sampler"] - num_buckets = dynamic_hparams["num_buckets"] + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] train_batch_sampler = DynamicBatchSampler( train_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + **dynamic_hparams_train, ) - valid_batch_sampler = DynamicBatchSampler( valid_data, - dynamic_hparams["max_batch_len"], - num_buckets=num_buckets, length_func=lambda x: x["duration"], - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], + **dynamic_hparams_valid, ) return ( @@ -413,10 +392,9 @@ def text_pipeline(wrd): if __name__ == "__main__": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -438,9 +416,9 @@ def text_pipeline(wrd): "tr_splits": hparams["train_splits"], "dev_splits": hparams["dev_splits"], "te_splits": hparams["test_splits"], - "save_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], "merge_lst": hparams["train_splits"], - "merge_name": hparams["train_csv"], + "merge_name": "train.csv", "skip_prep": hparams["skip_prep"], }, ) @@ -457,8 +435,8 @@ def text_pipeline(wrd): # We download the pretrained LM from HuggingFace (or elsewhere depending on # the path given in the YAML file). The tokenizer is loaded at the same time. - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Trainer initialization asr_brain = ASR( @@ -475,13 +453,28 @@ def text_pipeline(wrd): valid_dataloader_opts = hparams["valid_dataloader_opts"] if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + train_dataloader_opts = { "batch_sampler": train_bsampler, "num_workers": hparams["num_workers"], } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + # Training asr_brain.fit( asr_brain.hparams.epoch_counter, @@ -492,9 +485,12 @@ def text_pipeline(wrd): ) # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + for k in test_datasets.keys(): # keys are test_clean, test_other etc - asr_brain.hparams.wer_file = os.path.join( - hparams["output_folder"], "wer_{}.txt".format(k) + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" ) asr_brain.evaluate( test_datasets[k], diff --git a/recipes/LibriSpeech/ASR/transformer/train_bayesspeech.py b/recipes/LibriSpeech/ASR/transformer/train_bayesspeech.py new file mode 100644 index 0000000000..c0ff44849a --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/train_bayesspeech.py @@ -0,0 +1,564 @@ +#!/usr/bin/env python3 +"""Recipe for training a Bayesian Transformer ASR system (https://arxiv.org/abs/2301.11276) +with LibriSpeech via Bayes by Backprop (https://arxiv.org/abs/1505.05424). +The system employs an encoder, a decoder, and an attention mechanism between them. +Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural language model. + +To run this recipe, do the following: +> python train_bayesspeech.py hparams/transformer_bayesspeech.yaml + +With the default hyperparameters, the system employs a convolutional frontend and a transformer. +The decoder is based on a Transformer decoder. Beamsearch coupled with a Transformer +language model is used on the top of decoder probabilities. + +Linear layers are turned into Bayesian linear layers by placing a normal prior and a normal +variational posterior upon their weights and biases. The Bayesian neural network is trained +to minimize the evidence lower bound (ELBO), which is a trade-off between the simplicity +of the prior (complexity loss) and the complexity of the data (likelihood loss). +The likelihood loss is the standard loss function used in non-Bayesian ASR transformers +(CTC + negative-log likelihood), the complexity loss is the Kullback-Leibler divergence between +variational posterior and prior. Sub-word units estimated with Byte Pairwise Encoding (BPE) are +used as basic recognition tokens. Training is performed on the full LibriSpeech dataset (960 h). + +The best model is the average of the checkpoints from last 5 epochs. + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, tokens (e.g, characters instead of BPE), +training split (e.g, train-clean 100 rather than the full one), and many +other possible variations. + + +Authors + * Jianyuan Zhong 2020 + * Mirco Ravanelli 2020 + * Peter Plantinga 2020 + * Samuele Cornell 2020, 2021, 2022 + * Titouan Parcollet 2021, 2022 + * Luca Della Libera 2023 +""" + +import os +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_bos, _ = batch.tokens_bos + + # compute features + feats = self.hparams.compute_features(wavs) + current_epoch = self.hparams.epoch_counter.current + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels(tokens_bos) + + # forward modules + src = self.modules.CNN(feats) + + enc_out, pred = self.modules.Transformer( + src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index + ) + + # output layer for ctc log-probabilities + logits = self.modules.ctc_lin(enc_out) + p_ctc = self.hparams.log_softmax(logits) + + # output layer for seq2seq log-probabilities + pred = self.modules.seq_lin(pred) + p_seq = self.hparams.log_softmax(pred) + + # Compute outputs + hyps = None + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if any([is_valid_search, is_test_search]): + # Note: For valid_search, for the sake of efficiency, we only perform beamsearch with + # limited capacity and no LM to give user some idea of how the AM is doing + + # Decide searcher for inference: valid or test search + if stage == sb.Stage.VALID: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + else: + hyps, _, _, _ = self.hparams.test_search( + enc_out.detach(), wav_lens + ) + + return p_ctc, p_seq, wav_lens, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + (p_ctc, p_seq, wav_lens, hyps) = predictions + + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + tokens, tokens_lens = batch.tokens + + if stage == sb.Stage.TRAIN: + if hasattr(self.hparams, "fea_augment"): + tokens = self.hparams.fea_augment.replicate_labels(tokens) + tokens_lens = self.hparams.fea_augment.replicate_labels( + tokens_lens + ) + tokens_eos = self.hparams.fea_augment.replicate_labels( + tokens_eos + ) + tokens_eos_lens = self.hparams.fea_augment.replicate_labels( + tokens_eos_lens + ) + + loss_seq = self.hparams.seq_cost( + p_seq, tokens_eos, length=tokens_eos_lens + ).sum() + + loss_ctc = self.hparams.ctc_cost( + p_ctc, tokens, wav_lens, tokens_lens + ).sum() + + loss = ( + self.hparams.ctc_weight * loss_ctc + + (1 - self.hparams.ctc_weight) * loss_seq + + self.hparams.kl_div_weight * self.modules.Transformer.kl_div + ) + + if stage != sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if current_epoch % valid_search_interval == 0 or ( + stage == sb.Stage.TEST + ): + # Decode token terms to words + predicted_words = [ + tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps + ] + target_words = [wrd.split(" ") for wrd in batch.wrd] + self.wer_metric.append(ids, predicted_words, target_words) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + return loss + + def on_evaluate_start(self, max_key=None, min_key=None): + """perform checkpoint average if needed""" + super().on_evaluate_start() + + ckpts = self.checkpointer.find_checkpoints( + max_key=max_key, min_key=min_key + ) + ckpt = sb.utils.checkpoints.average_checkpoints( + ckpts, recoverable_name="model" + ) + + self.hparams.model.load_state_dict(ckpt, strict=True) + self.hparams.model.eval() + print("Loaded the average") + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["ACC"] = self.acc_metric.summarize() + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + or stage == sb.Stage.TEST + ): + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"ACC": stage_stats["ACC"], "epoch": epoch}, + max_keys=["ACC"], + num_to_keep=self.hparams.avg_checkpoints, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + # save the averaged checkpoint at the end of the evaluation stage + # delete the rest of the intermediate checkpoints + # ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint + self.checkpointer.save_and_keep_only( + meta={"ACC": 1.1, "epoch": epoch}, + max_keys=["ACC"], + num_to_keep=1, + ) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + valtest_datasets = [valid_data] + [i for k, i in test_datasets.items()] + + # We get the tokenizer as we need it to encode the labels when creating + # mini-batches. + tokenizer = hparams["tokenizer"] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(valtest_datasets, audio_pipeline) + + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline_train(wav): + # Speed Perturb is done here so it is multi-threaded with the + # workers of the dataloader (faster). + if "speed_perturb" in hparams: + sig = sb.dataio.dataio.read_audio(wav) + + sig = hparams["speed_perturb"](sig.unsqueeze(0)).squeeze(0) + else: + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline_train) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + yield wrd + tokens_list = tokenizer.encode_as_ids(wrd) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # 1. # Dataset prep (parsing Librispeech) + from librispeech_prepare import prepare_librispeech # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams) + + # We download the pretrained LM from HuggingFace (or elsewhere depending on + # the path given in the YAML file). The tokenizer is loaded at the same time. + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # ################################################################### + # Define Bayesian modules + # ################################################################### + from speechbrain.nnet.attention import PositionalwiseFeedForward + + try: + from bayestorch.distributions import ( + get_log_scale_normal, + get_softplus_inv_scale_normal, + ) + from bayestorch.nn import VariationalPosteriorModule + except ImportError: + raise ImportError( + "Please install BayesTorch to use BayesSpeech (e.g. `pip install bayestorch>=0.0.3`)" + ) + + # Minimize number of modifications to existing training/evaluation loops + # NOTE: differently from https://arxiv.org/abs/2301.11276, we employ the standard + # reparameterization trick instead of the local reparameterization trick + class BBBModule(VariationalPosteriorModule): + def forward(self, *args, **kwargs): + if self.training: + output, self.kl_div = super().forward( + *args, num_mc_samples=1, return_kl_div=True, **kwargs + ) + return output + output, self.kl_div = ( + super().forward( + *args, + num_mc_samples=hparams["num_eval_mc_samples"], + **kwargs, + ), + 0.0, + ) + return output + + parameters = [] + for module in hparams["modules"]["Transformer"].modules(): + if isinstance(module, PositionalwiseFeedForward): + parameters += list(module.parameters()) + prior_builder, prior_kwargs = get_log_scale_normal( + parameters, + log_scale=hparams["normal_prior_log_scale"], + ) + posterior_builder, posterior_kwargs = get_softplus_inv_scale_normal( + parameters, + softplus_inv_scale=hparams["normal_posterior_softplus_inv_scale"], + requires_grad=True, + ) + hparams["Transformer"] = hparams["modules"]["Transformer"] = BBBModule( + hparams["modules"]["Transformer"], + prior_builder, + prior_kwargs, + posterior_builder, + posterior_kwargs, + parameters, + ) + hparams["model"] = torch.nn.ModuleList( + [hparams["CNN"], hparams["seq_lin"], hparams["ctc_lin"]] + ) + hparams["ctc_scorer"].ctc_fc = hparams["ctc_lin"] + hparams["test_search"].modules = hparams["valid_search"].modules = [ + hparams["Transformer"], + hparams["seq_lin"], + ] + hparams["checkpointer"].recoverables["model"] = hparams["model"] + hparams["checkpointer"].add_recoverable( + "Transformer", + hparams["Transformer"], + ) + # ################################################################### + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["Adam"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # adding objects to trainer: + asr_brain.tokenizer = hparams["tokenizer"] + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + max_key="ACC", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/LibriSpeech/ASR/transformer/train_speechllm.py b/recipes/LibriSpeech/ASR/transformer/train_speechllm.py new file mode 100644 index 0000000000..c82bf4c852 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/train_speechllm.py @@ -0,0 +1,726 @@ +#!/usr/bin/env python3 +"""Recipe for training a SpeechLLM ASR system with LibriSpeech. + +The system employs a speech SSL encoder, and a pre-trained LLM decoder. +The speech features are projected to the LLM embedding space using a linear layer projection. +The LLM is trained used the cross-entropy loss on the text tokens excluding the prompt. + +An input sequence is typically constructed like this: + <|start_of_audio|> audio features <|end_of_audio|> + +This script supports both offline and online SSL/cached features mode. +To extract the features offline, run the `extract_ssl_feats.py` script, and use +the correct yaml file for this script. + +python extract_ssl_feats.py hparams/extract_ssl_feats.yaml + --data_folder path/to/LibriSpeech \ + --output_folder path/to/feats_cache \ + --ssl_hub path/to/wavlm-large \ + --feats_cache_dir path/to/feats_cache + ...other_hparams... + +python train_speechllm.py hparams/speechllm_ssl_feats.yaml + --feats_cache_dir path/to/feats_cache \ + ...other_hparams... + +Authors +------- + * Adel Moumen, 2025 +""" + +import os +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.integrations.hdf5.cached_item import CachedHDF5DynamicItem +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +def get_multimodal_attention_mask(wav, wav_lens, txt, txt_lens, device): + """Create attention mask for multimodal sequence. + + Arguments + --------- + wav : torch.Tensor + Audio features tensor of shape (batch_size, L_audio, ...) + wav_lens : torch.Tensor + Relative lengths of audio features, shape (batch_size,) + txt : torch.Tensor + Text embeddings tensor of shape (batch_size, txt_len, ...) + This is txt_embds which includes: [start_of_audio, end_of_audio, prompt, bos, text] + txt_lens : torch.Tensor + Relative lengths of text tokens, shape (batch_size,) + device : torch.device + Device to create the mask on + + Returns + ------- + attention_mask : torch.Tensor + Boolean attention mask of shape (batch_size, L_audio + txt_len). + + Important + --------- + The actual multimodal embedding order in this recipe is: + + [start_of_audio] + [audio_feats] + [end_of_audio + prompt + bos + text] + + i.e., the first text token (<|start_of_audio|>) is placed *before* audio. + Therefore, we must build the mask with the same layout: + position 0 -> <|start_of_audio|> + positions [1 : 1+L_audio] -> audio feats + positions [1+L_audio : ] -> remaining text tokens (txt[:, 1:]) + """ + batch_size = wav.size(0) + wav_len = wav.size(1) + txt_len = txt.size(1) + # Total length matches multimodal_embds: 1 (start token) + L_audio + (txt_len - 1) + total_len = wav_len + txt_len + attention_mask = torch.zeros( + batch_size, total_len, dtype=torch.bool, device=device + ) + for i in range(batch_size): + # Match SpeechBrain convention (see S2SGreedySearcher): round relative lengths. + actual_wav_len = int(torch.round(wav_lens[i] * wav_len).item()) + actual_txt_len = int(torch.round(txt_lens[i] * txt_len).item()) + + # (1) start_of_audio token (always valid) + attention_mask[i, 0] = True + + # (2) audio features + attention_mask[i, 1 : 1 + actual_wav_len] = True + + # (3) remaining text tokens (exclude the start token already handled above) + remaining_txt = max(actual_txt_len - 1, 0) + attention_mask[i, 1 + wav_len : 1 + wav_len + remaining_txt] = True + return attention_mask + + +# Define training procedure +class ASR(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities. + + The forward pass processes either cached SSL features or raw audio waveforms, + projects them to the LLM embedding space, and concatenates with text embeddings + to form a multimodal sequence. + + Sequence structure: + [start_of_audio] + [audio_features] + [end_of_audio + prompt + bos + text] + + Arguments + --------- + batch : PaddedBatch + Batch containing audio/features, tokens, and metadata + stage : sb.Stage + Current stage (TRAIN, VALID, or TEST) + + Returns + ------- + logits : torch.Tensor + Model output logits of shape (batch_size, seq_len, vocab_size) + hyps : list or None + Decoded hypotheses (only during validation/test, None during training) + """ + batch = batch.to(self.device) + tokens_bos, tokens_bos_lens = batch.tokens_bos + prompt_len = batch.prompt_len + + use_feats = bool(getattr(self.hparams, "use_feats", False)) + if use_feats: + if getattr(batch, "feats", None) is None: + raise ValueError( + "`use_feats=True` but the batch does not provide `feats`. " + "Check `feats_cache_dir` and the data pipeline." + ) + audio_feats, audio_feats_lens = batch.feats + else: + wavs, wav_lens = batch.sig + wavs = self.hparams.normalize(wavs, wav_lens) + audio_feats = self.modules.ssl(wavs, wav_lens) + audio_feats_lens = wav_lens + # R^L*D -> R^(L/R)*(D*R) + audio_down_feats = self.modules.feat_downsampler(audio_feats) + # R^D' -> R^llm_emb_size + projected_audio_feats = self.modules.proj(audio_down_feats) + txt_embds = self.txt_embedding(tokens_bos) + multimodal_embds = torch.cat( + [ + txt_embds[:, 0].unsqueeze(1), # B, D -> B, 1, D + projected_audio_feats, + txt_embds[:, 1:], + ], + dim=1, + ) + # attention_mask should be all the true audio features + all the true text features + attention_mask = get_multimodal_attention_mask( + projected_audio_feats, + audio_feats_lens, + txt_embds, + tokens_bos_lens, + self.device, + ) + logits = self.modules.llm( + inputs_embeds=multimodal_embds, attention_mask=attention_mask + ).logits + + hyps = None + if stage != sb.Stage.TRAIN: + audio_and_prompt_len = projected_audio_feats.shape[1] + int( + prompt_len[0].item() + ) + inputs_embeds = multimodal_embds[:, :audio_and_prompt_len] + hyps = self.modules.searcher( + inputs_embeds, + audio_feats_lens, + attention_mask[:, :audio_and_prompt_len], + ) + return logits, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the cross-entropy loss given predictions and targets. + + The loss is computed only on text tokens, with audio feature positions + masked out using ignore_index. During validation/test, also computes + CER and WER metrics. + + Arguments + --------- + predictions : tuple + (logits, hyps) from compute_forward + batch : PaddedBatch + Batch containing target tokens and metadata + stage : sb.Stage + Current stage (TRAIN, VALID, or TEST) + + Returns + ------- + loss : torch.Tensor + Cross-entropy loss value + """ + logits, hyps = predictions + tokens_eos, _ = batch.tokens_eos + ids = batch.id + + num_audio_feats = logits.shape[1] - tokens_eos.shape[1] + # We prepend `ignore_index` to the tokens_eos to ignore them in the loss. + # This corresponds to the audio features. + target_tokens = torch.cat( + [ + torch.full( + (tokens_eos.shape[0], num_audio_feats), + self.hparams.ignore_index, + device=self.device, + ), + tokens_eos, + ], + dim=1, + ).long() + # compute the cross entropy loss + loss = torch.nn.functional.cross_entropy( + logits.view(-1, logits.shape[-1]), + target_tokens.view(-1), + ignore_index=self.hparams.ignore_index, + ) + if stage != sb.Stage.TRAIN: + # replace ignore_index with pad token + target_tokens = target_tokens.masked_fill( + target_tokens == self.hparams.ignore_index, + self.tokenizer.pad_token_id, + ) + preds = self.tokenizer.batch_decode( + hyps[0], skip_special_tokens=True + ) + preds_words = [pred.split(" ") for pred in preds] + targets = self.tokenizer.batch_decode( + target_tokens, skip_special_tokens=True + ) + targets_words = [target.split(" ") for target in targets] + self.cer_metric.append(ids, preds_words, targets_words) + self.wer_metric.append(ids, preds_words, targets_words) + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch. + + Initializes metrics for validation and test stages. + + Arguments + --------- + stage : sb.Stage + Current stage (TRAIN, VALID, or TEST) + epoch : int + Current epoch number + """ + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch. + + Logs statistics, updates learning rate, and saves checkpoints. + + Arguments + --------- + stage : sb.Stage + Current stage (TRAIN, VALID, or TEST) + stage_loss : float + Average loss for this stage + epoch : int + Current epoch number + """ + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + old_lr, new_lr = self.hparams.scheduler(epoch) + sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + + # Optional: SSL fine-tuning LR scheduling (only when SSL is unfrozen). + if hasattr(self, "ssl_optimizer") and hasattr( + self.hparams, "lr_annealing_ssl" + ): + old_lr_ssl, new_lr_ssl = self.hparams.lr_annealing_ssl( + stage_stats["WER"] + ) + sb.nnet.schedulers.update_learning_rate( + self.ssl_optimizer, new_lr_ssl + ) + + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": old_lr, + "steps": steps, + "optimizer": optimizer, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"], "epoch": epoch}, + min_keys=["WER"], + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + def init_optimizers(self): + """Initialize optimizers for the model. + + Creates separate optimizers for the main model and optionally for the SSL encoder + if it's not frozen. Registers optimizers with the checkpointer for resuming training. + """ + self.optimizer = self.hparams.opt(self.hparams.model.parameters()) + self.optimizers_dict = {"model_optimizer": self.optimizer} + + ssl_frozen = getattr(self.hparams, "ssl_frozen", True) + if not ssl_frozen: + self.ssl_optimizer = self.hparams.opt_ssl( + self.modules.ssl.parameters() + ) + self.optimizers_dict["ssl_optimizer"] = self.ssl_optimizer + + if self.checkpointer is not None: + self.checkpointer.add_recoverable("model_optimizer", self.optimizer) + if not ssl_frozen: + self.checkpointer.add_recoverable( + "ssl_optimizer", self.ssl_optimizer + ) + + +def dataio_prepare(hparams, tokenizer): + """Prepares the datasets and dynamic pipelines for the brain class. + + This function sets up the data pipelines for both training and evaluation. + It handles two modes: + 1. Standard audio mode: loads raw audio files and processes them on-the-fly + 2. Cached features mode: loads pre-extracted SSL features from HDF5 cache + + Arguments + --------- + hparams : dict + Hyperparameters dictionary containing data paths, token indices, etc. + tokenizer : transformers.PreTrainedTokenizer + Tokenizer for encoding text tokens + + Returns + ------- + train_data : DynamicItemDataset + Training dataset + valid_data : DynamicItemDataset + Validation dataset + test_datasets : dict + Dictionary of test datasets (keyed by split name) + tokenizer : transformers.PreTrainedTokenizer + The tokenizer (returned for convenience) + train_batch_sampler : DynamicBatchSampler or None + Batch sampler for training if dynamic batching is enabled + valid_batch_sampler : DynamicBatchSampler or None + Batch sampler for validation if dynamic batching is enabled + """ + data_folder = hparams["data_folder"] + # Cached-feats mode should be enabled ONLY via the explicit `use_feats` flag. + # Do not use `hparams["ssl"]` as a boolean (it's a model object). + use_feats = bool(hparams.get("use_feats", False)) + + if use_feats: + feats_cache_dir = hparams.get("feats_cache_dir", None) + if not feats_cache_dir: + raise ValueError( + "`use_feats=True` requires `feats_cache_dir` to be set " + "(directory produced by `extract_ssl_feats.py`)." + ) + else: + # On-the-fly SSL feature extraction requires an SSL encoder module. + modules = hparams.get("modules", {}) + if not (isinstance(modules, dict) and "ssl" in modules): + raise ValueError( + "`use_feats=False` requires an SSL encoder under `modules.ssl` " + "to extract features on-the-fly. Either set `use_feats=True` " + "and provide `feats_cache_dir`, or add `ssl` to `modules`." + ) + + logger.info("use_feats=%s", use_feats) + # Token indices and prompt setup + bos_index = hparams["bos_index"] + eos_index = hparams["eos_index"] + pad_index = hparams["pad_token"] + + # Convert special tokens to IDs with error handling + start_of_audio_token = "<|start_of_audio|>" + end_of_audio_token = "<|end_of_audio|>" + + start_of_audio_index = tokenizer.convert_tokens_to_ids(start_of_audio_token) + end_of_audio_index = tokenizer.convert_tokens_to_ids(end_of_audio_token) + + logger.info( + f"Token indices - BOS: {bos_index}, EOS: {eos_index}, PAD: {pad_index}, " + f"start_of_audio: {start_of_audio_index}, end_of_audio: {end_of_audio_index}" + ) + logger.info(f"Prompt: '{hparams['prompt']}'") + + prompt_ids = ( + tokenizer( + hparams["prompt"], return_tensors="pt", add_special_tokens=False + ) + .input_ids.view(-1) + .tolist() + ) + + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens", "prompt_len" + ) + def text_pipeline(wrd): + """Process text through tokenization pipeline. + + Creates the following sequence structure: + tokens_bos: [<|start_of_audio|>, <|end_of_audio|>, prompt_tokens, , text_tokens] + tokens_eos: [text_tokens, ] + + Arguments + --------- + wrd : str + Word/transcription text + + Yields + ------ + wrd : str + Original word (unchanged) + tokens_list : list + List of token IDs for the text (without special tokens) + tokens_bos : torch.LongTensor + Token sequence with start_of_audio, end_of_audio, prompt, bos, and text + tokens_eos : torch.LongTensor + Token sequence with text and eos + tokens : torch.LongTensor + Token IDs for text only (same as tokens_list but as tensor) + prompt_len : int + Length of prompt tokens (start_of_audio + end_of_audio + prompt) + """ + yield wrd + tokens_list = tokenizer(wrd, add_special_tokens=False).input_ids + yield tokens_list + tokens_bos = torch.LongTensor( + [start_of_audio_index] + + [end_of_audio_index] + + prompt_ids + + [bos_index] + + tokens_list + ) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [eos_index]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + prompt_len = len( + [start_of_audio_index] + [end_of_audio_index] + prompt_ids + ) + yield prompt_len + + # Define dynamic items based on mode + # Note: build_dynamic_items is defined outside the if/else to avoid scope issues + def build_dynamic_items(): + """Build dynamic items list based on whether we're using cached features or raw audio. + + Returns + ------- + list + List of dynamic item pipelines + """ + if use_feats: + feats_pipeline = CachedHDF5DynamicItem( + hparams["feats_cache_dir"], + file_mode="r", + takes=["id"], + provides=["feats"], + compression="gzip", + ) + return [text_pipeline, feats_pipeline] + else: + + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load audio from file path. + + Arguments + --------- + wav : str + Path to audio file + + Returns + ------- + sig : torch.Tensor + Audio waveform + """ + sig = sb.dataio.dataio.read_audio(wav) + return sig + + return [text_pipeline, audio_pipeline] + + # Set output keys based on mode + if use_feats: + output_keys = [ + "id", + "wrd", + "tokens_bos", + "tokens_eos", + "tokens", + "prompt_len", + "feats", + ] + else: + output_keys = [ + "id", + "sig", + "wrd", + "tokens_bos", + "tokens_eos", + "tokens", + "prompt_len", + ] + + def _create_dataset(csv_path, sorting="ascending"): + """Create a dataset from CSV file with optional sorting. + + Arguments + --------- + csv_path : str + Path to CSV file containing dataset metadata + sorting : str + Sorting strategy: "ascending", "descending", or "random" + + Returns + ------- + dataset : DynamicItemDataset + Configured dataset with dynamic pipelines applied + """ + assert sorting in ["ascending", "descending", "random"], ( + f"sorting must be one of ['ascending', 'descending', 'random'], got '{sorting}'" + ) + + dataset = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_path, + replacements={"data_root": data_folder}, + dynamic_items=build_dynamic_items(), + output_keys=output_keys, + ) + if sorting == "ascending": + dataset = dataset.filtered_sorted(sort_key="duration") + hparams["train_dataloader_opts"]["shuffle"] = False + elif sorting == "descending": + dataset = dataset.filtered_sorted(sort_key="duration", reverse=True) + hparams["train_dataloader_opts"]["shuffle"] = False + elif sorting == "random": + pass + return dataset + + # Create training dataset with sorting logic + train_data = _create_dataset( + hparams["train_csv"], sorting=hparams["sorting"] + ) + valid_data = _create_dataset(hparams["valid_csv"], sorting="ascending") + + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = _create_dataset(csv_file, sorting="ascending") + + # Dynamic batch sampling + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **hparams["dynamic_batch_sampler_train"], + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **hparams["dynamic_batch_sampler_valid"], + ) + + return ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # 1. # Dataset prep (parsing Librispeech) + from librispeech_prepare import prepare_librispeech # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # here we create the datasets objects as well as tokenization and encoding + tokenizer = hparams["llm"].tokenizer + + ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams, tokenizer) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + asr_brain.tokenizer = tokenizer + asr_brain.txt_embedding = ( + asr_brain.raw_modules.llm.model.get_input_embeddings() + ) + # adding objects to trainer: + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + min_key="WER", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/LibriSpeech/ASR/transformer/train_with_whisper.py b/recipes/LibriSpeech/ASR/transformer/train_with_whisper.py new file mode 100644 index 0000000000..71266fc648 --- /dev/null +++ b/recipes/LibriSpeech/ASR/transformer/train_with_whisper.py @@ -0,0 +1,344 @@ +#!/usr/bin/env python3 +"""Recipe for training a whisper-based ASR system with librispeech. +The system employs whisper from OpenAI (https://cdn.openai.com/papers/whisper.pdf). +This recipe take the whisper encoder-decoder to fine-tune on the NLL. + +If you want to only use the whisper encoder system, please refer to the recipe +speechbrain/recipes/LibriSpeech/ASR/CTC/train_with_whisper.py + +To run this recipe, do the following: +> python train_with_whisper.py hparams/train_hf_whisper.yaml + +To add adapters and train only a fraction of the parameters, do: +> python train_with_whisper.py hparams/train_whisper_lora.yaml + +Authors + * Peter Plantinga 2024 + * Adel Moumen 2022, 2024 + * Titouan Parcollet 2022 +""" + +import os +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + bos_tokens, bos_tokens_lens = batch.tokens_bos + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + bos_tokens = self.hparams.wav_augment.replicate_labels(bos_tokens) + bos_tokens_lens = self.hparams.wav_augment.replicate_labels( + bos_tokens_lens + ) + + # We compute the padding mask and replace the values with the pad_token_id + # that the Whisper decoder expect to see. + abs_tokens_lens = torch.round( + bos_tokens_lens * bos_tokens.shape[1] + ).long() + pad_mask = ( + torch.arange(abs_tokens_lens.max(), device=self.device)[None, :] + < abs_tokens_lens[:, None] + ) + bos_tokens[~pad_mask] = self.tokenizer.pad_token_id + + # Forward encoder + decoder + enc_out, logits, _ = self.modules.whisper(wavs, bos_tokens) + log_probs = self.hparams.log_softmax(logits) + + hyps = None + if stage == sb.Stage.VALID: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + elif stage == sb.Stage.TEST: + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + + return log_probs, hyps, wav_lens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss NLL given predictions and targets.""" + + (log_probs, hyps, wav_lens) = predictions + batch = batch.to(self.device) + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens + ) + + loss = self.hparams.nll_loss( + log_probs, tokens_eos, length=tokens_eos_lens + ) + + if stage != sb.Stage.TRAIN: + tokens, tokens_lens = batch.tokens + + # Decode token terms to words + predicted_words = [ + self.tokenizer.decode(t, skip_special_tokens=True).strip() + for t in hyps + ] + + # Convert indices to words + target_words = undo_padding(tokens, tokens_lens) + target_words = self.tokenizer.batch_decode( + target_words, skip_special_tokens=True + ) + if hasattr(self.hparams, "normalized_transcripts"): + if hasattr(self.tokenizer, "normalize"): + normalized_fn = self.tokenizer.normalize + else: + normalized_fn = self.tokenizer._normalize + + predicted_words = [ + normalized_fn(text).split(" ") for text in predicted_words + ] + + target_words = [ + normalized_fn(text).split(" ") for text in target_words + ] + else: + predicted_words = [text.split(" ") for text in predicted_words] + target_words = [text.split(" ") for text in target_words] + + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + lr = self.hparams.lr_annealing_whisper.current_lr + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch, "lr": lr}, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_loader_kwargs"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_loader_kwargs"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + if ( + "normalized_transcripts" in hparams + and hparams["normalized_transcripts"] + ): + wrd = tokenizer.normalize(wrd) + yield wrd + tokens_list = tokenizer.encode(wrd, add_special_tokens=False) + yield tokens_list + tokens_list = tokenizer.build_inputs_with_special_tokens(tokens_list) + tokens_bos = torch.LongTensor(tokens_list[:-1]) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list[1:]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "tokens_list", "tokens_bos", "tokens_eos", "tokens"], + ) + + return train_data, valid_data, test_datasets + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + from librispeech_prepare import prepare_librispeech # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # Defining tokenizer and loading it + tokenizer = hparams["whisper"].tokenizer + + # here we create the datasets objects as well as tokenization and encoding + train_data, valid_data, test_datasets = dataio_prepare(hparams, tokenizer) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + opt_class=hparams["whisper_opt_class"], + ) + + # We load the pretrained whisper model + if "pretrainer" in hparams.keys(): + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected(asr_brain.device) + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for Whisper. + asr_brain.tokenizer = tokenizer + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["train_loader_kwargs"], + valid_loader_kwargs=hparams["valid_loader_kwargs"], + ) + + # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + test_loader_kwargs=hparams["test_loader_kwargs"], + min_key="WER", + ) diff --git a/recipes/LibriSpeech/G2P/README.md b/recipes/LibriSpeech/G2P/README.md index 642ddbaa97..3aaf2a78c4 100644 --- a/recipes/LibriSpeech/G2P/README.md +++ b/recipes/LibriSpeech/G2P/README.md @@ -14,10 +14,23 @@ The datasets are derived from the LibriSpeech-Alignments dataset (https://zenodo Decoding is performed with a beamsearch, optionally enhanced with language models. + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +## How to run To run this recipe, do the following: -> python train.py +> python train.py <hyperparameter file> + Example: -> python train.py hparams/hparams_g2p_transformer.yaml +```shell +python train.py hparams/hparams_g2p_transformer.yaml +``` RNN Model --------- @@ -55,7 +68,7 @@ To train a language model, use the `train_lm.py` script provided. For an RNN-based language model: > python train_lm.py hparams/hparams_lm_rnn.yaml -For a transformer-based language modle: +For a transformer-based language model: > python train_lm.py hparams/hparams_lm_transformer.yaml To use a language model during training or inference @@ -69,11 +82,11 @@ Hyperparameter Optimization This recipe supports hyperparameter optimization via Oríon or other similar tools. For details on how to set up hyperparameter optimization, refer to the "Hyperparameter Optimization" tutorial in the Advanced Tutorials section -on the SpeechBrian website: +on the SpeechBrain website: https://speechbrain.github.io/tutorial_advanced.html -A supplemental hyperparameter file is provided for hyperparameter optimiszation, +A supplemental hyperparameter file is provided for hyperparameter optimization, which will turn off checkpointing and limit the number of epochs: hparams/hpopt.yaml @@ -84,8 +97,8 @@ Pretrained Models ----------------- | Release | hyperparams file | Sentence Test PER | Homograph % | Model link | |:-------------:|:--------------------------:| --------:| --------------------------------------------------------------------------------------------------:| -| 0.5.12 | train_g2p_rnn.yaml | 2.72 | 94% | https://drive.google.com/drive/folders/1jpVDz6Kqtl4qp3_dsuK767mjNlqkIxTH?usp=sharing | -| 0.5.12 | train_g2p_transformer.yaml | 2.89 | 92% | https://drive.google.com/drive/folders/1lbSjCKUit8H3FCzaDJmfBDJOkcDRH3XI?usp=sharing | +| 0.5.12 | train_g2p_rnn.yaml | 2.72 | 94% | https://www.dropbox.com/sh/qmcl1obp8pxqaap/AAC3yXvjkfJ3mL-RKyAUxPdNa?dl=0 | +| 0.5.12 | train_g2p_transformer.yaml | 2.89 | 92% | https://www.dropbox.com/sh/zhrxg7anuhje7e8/AADTeJtdsja_wClkE2DsF9Ewa?dl=0 | NOTE: Sentence PER is reported as achieved at the end of the sentence training step. Nominal PER on librispeech data may increase post fine-tuning due to a distribution shift in labeling, if reevaluated. @@ -93,8 +106,12 @@ To replicate the result exactly, train with --homograph_epochs=0. Pretrained language models can be found at the following URLs: -* **RNN**: https://drive.google.com/drive/folders/1Zv8SNYIXzboFatSRpmoNgRyVXl_6ucir?usp=sharing -* **Transformer**: https://drive.google.com/drive/folders/1MPceslDRVKW7sk1Q6W6nSaWETEAqp5t5?usp=sharing +* **RNN**: https://www.dropbox.com/sh/pig0uk80xxii7cg/AACQ1rrRLYthvpNZ5FadPLtRa?dl=0 +* **Transformer**: https://www.dropbox.com/sh/tkf6di10edpz4i6/AAArnGAkE0bEEOvOGfc6KWuma?dl=0 + + +The best model is available on HuggingFace: +https://huggingface.co/speechbrain/soundchoice-g2p Training Time ------------- @@ -113,7 +130,7 @@ evaluation, use `--beam_search_beam_size 1`. # Pretrained Models Pretrained models can be found on the following Google drive: -https://drive.google.com/drive/folders/1nk9ms8cQ5N07wOG4oTi9h5a1dmiPmvnv?usp=sharing +https://www.dropbox.com/sh/3m4u7xda4xsh2ob/AAAYpOJHRhYbUHmuQtybgzrea?dl=0 # **About SpeechBrain** @@ -126,6 +143,15 @@ https://drive.google.com/drive/folders/1nk9ms8cQ5N07wOG4oTi9h5a1dmiPmvnv?usp=sha Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, @@ -135,4 +161,4 @@ Please, cite SpeechBrain if you use it for your research or business. primaryClass={eess.AS}, note={arXiv:2106.04624} } -``` \ No newline at end of file +``` diff --git a/recipes/LibriSpeech/G2P/evaluate.py b/recipes/LibriSpeech/G2P/evaluate.py index d482ec1a55..cd3b5a1ab2 100644 --- a/recipes/LibriSpeech/G2P/evaluate.py +++ b/recipes/LibriSpeech/G2P/evaluate.py @@ -1,25 +1,31 @@ """Recipe for evaluating a grapheme-to-phoneme system with librispeech lexicon. + The script may be use in isolation or in combination with Orion to fit hyperparameters that do not require model retraining (e.g. Beam Search) + +Authors + * Mirco Ravanelli 2022 + * Artem Ploujnikov 2022 """ +import itertools +import math +import sys +from types import SimpleNamespace +import torch from hyperpyyaml import load_hyperpyyaml +from tqdm.auto import tqdm +from train import dataio_prep, load_dependencies + +import speechbrain as sb from speechbrain.dataio.batch import PaddedBatch +from speechbrain.integrations.huggingface.wordemb.util import expand_to_chars from speechbrain.lobes.models.g2p.dataio import get_sequence_key from speechbrain.utils import hpopt as hp -from speechbrain.wordemb.util import expand_to_chars -from train import dataio_prep, load_dependencies -from types import SimpleNamespace -from tqdm.auto import tqdm -import math -import itertools -import speechbrain as sb -import torch -import sys -import logging +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class G2PEvaluator: @@ -120,7 +126,7 @@ def evaluate_batch(self, batch): phns, phn_lens = batch.phn_encoded self.per_metrics.append( - ids, hyps, phns, None, phn_lens, self.hparams.out_phoneme_decoder, + ids, hyps, phns, None, phn_lens, self.hparams.out_phoneme_decoder ) def _get_phonemes(self, grapheme_encoded, phn_encoded=None, char=None): @@ -131,10 +137,8 @@ def _get_phonemes(self, grapheme_encoded, phn_encoded=None, char=None): --------- grapheme_encoded: speechbrain.dataio.batch.PaddedData An encoded grapheme sequence - - phn_encoded_bos: speechbrain.dataio.batch.PaddedData + phn_encoded: speechbrain.dataio.batch.PaddedData An encoded phoneme sequence (optional) - char: str Raw character input (needed for word embeddings) @@ -147,9 +151,9 @@ def _get_phonemes(self, grapheme_encoded, phn_encoded=None, char=None): """ _, char_word_emb = None, None if self._grapheme_word_separator_idx is None: - self._grapheme_word_separator_idx = self.hparams.grapheme_encoder.lab2ind[ - " " - ] + self._grapheme_word_separator_idx = ( + self.hparams.grapheme_encoder.lab2ind[" "] + ) if not phn_encoded: grapheme_encoded_data, grapheme_lens = grapheme_encoded phn_encoded = ( @@ -228,7 +232,7 @@ def _flatten_results_jumbled(self, results): Returns ------- result: list - the concatenated reuslt + the concatenated result """ return [token for item_result in results for token in item_result] @@ -244,7 +248,7 @@ def _flatten_results_separated(self, results): Returns ------- result: list - the concatenated reuslt + the concatenated result """ result = [] for item_result in results: @@ -266,7 +270,7 @@ def _flatten_scores(self, hyps, scores): scores: list the scores corresponding to the hypotheses - Results + Returns ------- scores: list the scores corresponding to the hypotheses, @@ -296,9 +300,11 @@ def _split_words_seq(self, graphemes, length): --------- graphemes: torch.Tensor an encoded sequence of phonemes + length: torch.Tensor + The length of the corresponding inputs. - Returns - ------- + Yields + ------ graphemes: generator a generator representing a sequence of words """ @@ -323,6 +329,11 @@ def _add_delimiters(self, word): --------- word: torch.Tensor a tensor representing a word + + Returns + ------- + word: torch.Tensor + word with delimiters added. """ if self.grapheme_sequence_mode == "bos": word = torch.cat([self._bos, word]) @@ -338,6 +349,8 @@ def evaluate_epoch(self, dataset, dataloader_opts=None): --------- dataset: DynamicItemDataset a G2P dataset (same as the ones used for training) + dataloader_opts: dict + Additional options to pass to dataloader. Returns ------- @@ -372,7 +385,7 @@ def evaluate_epoch(self, dataset, dataloader_opts=None): return self.per_metrics.summarize() def _output_wer_file(self): - with open(self.hparams.eval_wer_file, "w") as w: + with open(self.hparams.eval_wer_file, "w", encoding="utf-8") as w: w.write("\nPER stats:\n") self.per_metrics.write_stats(w) print( @@ -389,7 +402,7 @@ def _output_wer_file(self): search_hparam_file = sys.argv[0] hparams_file, run_opts, overrides = hp_ctx.parse_arguments(sys.argv[1:]) device = run_opts.get("device", "cpu") - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Load dependencies diff --git a/recipes/LibriSpeech/G2P/hparams/hparams_g2p_rnn.yaml b/recipes/LibriSpeech/G2P/hparams/hparams_g2p_rnn.yaml index 7a8b482fd9..bcf282a3cc 100644 --- a/recipes/LibriSpeech/G2P/hparams/hparams_g2p_rnn.yaml +++ b/recipes/LibriSpeech/G2P/hparams/hparams_g2p_rnn.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Hyperparameter optimization (disabled by default) hpopt: False @@ -16,7 +16,7 @@ trial_id: null # Data paths output_folder: !ref results/RNN/ -data_folder: null # e.g. /localscratch/librig2p +data_folder: !PLACEHOLDER # e.g. /localscratch/librig2p save_folder: !ref /save train_log: !ref /train_log.txt use_tensorboard: True @@ -95,7 +95,7 @@ homograph_loss_weight: 2.0 lr: 0.002 save_for_pretrained: True -# Model parameters +####################### Model Parameters ####################################### output_neurons: !apply:speechbrain.utils.hparams.choice value: !ref choices: @@ -304,7 +304,7 @@ word_emb_enc: !new:speechbrain.lobes.models.g2p.model.WordEmbeddingEncoder norm_type: !ref word_emb: !apply:speechbrain.lobes.models.g2p.dataio.lazy_init - init: !name:speechbrain.wordemb.transformer.TransformerWordEmbeddings + init: !name:speechbrain.integrations.huggingface.wordemb.transformer.TransformerWordEmbeddings model: !ref log_softmax: !new:speechbrain.nnet.activations.Softmax @@ -342,60 +342,79 @@ lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM opt_class: !name:torch.optim.Adam lr: !ref +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: !ref + +# Scorer +coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer + vocab_size: !ref + + +scorer_lm: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + coverage: !ref + + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + coverage: !ref + beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref - ctc_weight: !ref + temperature: !ref + scorer: !ref beam_searcher_valid: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref - ctc_weight: !ref + temperature: !ref + scorer: !ref -beam_searcher_lm: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearchLM +beam_searcher_lm: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref - language_model: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref - ctc_weight: !ref - lm_weight: !ref temperature: !ref - temperature_lm: !ref + scorer: !ref lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler diff --git a/recipes/LibriSpeech/G2P/hparams/hparams_g2p_transformer.yaml b/recipes/LibriSpeech/G2P/hparams/hparams_g2p_transformer.yaml index 67c43fd907..064463945c 100644 --- a/recipes/LibriSpeech/G2P/hparams/hparams_g2p_transformer.yaml +++ b/recipes/LibriSpeech/G2P/hparams/hparams_g2p_transformer.yaml @@ -5,7 +5,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Hyperparameter optimization (disabled by default) hpopt: False @@ -93,11 +93,9 @@ lr: 0.0001 lr_min: 0.00001 lr_dont_halve_until_epoch: 1 lr_patience: 1 -adam_beta1: 0.99 -adam_beta2: 0.998 save_for_pretrained: True -# Model parameters +####################### Model Parameters ####################################### output_neurons: !apply:speechbrain.utils.hparams.choice value: !ref choices: @@ -311,7 +309,7 @@ word_emb_enc: !new:speechbrain.lobes.models.g2p.model.WordEmbeddingEncoder norm_type: !ref word_emb: !apply:speechbrain.lobes.models.g2p.dataio.lazy_init - init: !name:speechbrain.wordemb.transformer.TransformerWordEmbeddings + init: !name:speechbrain.integrations.huggingface.wordemb.transformer.TransformerWordEmbeddings model: !ref log_softmax: !new:speechbrain.nnet.activations.Softmax @@ -367,71 +365,75 @@ lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM opt_class: !name:torch.optim.Adam lr: !ref - betas: !ref (, ) + betas: (0.99, 0.998) -beam_searcher: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: - - !ref - - !ref - - !ref - bos_index: !ref +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer eos_index: !ref blank_index: !ref + ctc_fc: !ref + +coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer + vocab_size: !ref + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: !ref + +scorer_lm: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + coverage: !ref + + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + coverage: !ref + +beam_searcher: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref - using_max_attn_shift: !ref + using_max_attn_shift: False + length_normalization: True max_attn_shift: !ref - coverage_penalty: !ref - ctc_weight: !ref - using_eos_threshold: False - length_normalization: False - - -beam_searcher_lm: !new:speechbrain.decoders.seq2seq.S2STransformerBeamSearch - modules: - - !ref - - !ref - - !ref - lm_modules: - - !ref + temperature: !ref + scorer: !ref + +beam_searcher_lm: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref - using_max_attn_shift: !ref + using_max_attn_shift: False + length_normalization: True max_attn_shift: !ref - coverage_penalty: !ref - ctc_weight: !ref - lm_weight: !ref temperature: !ref - temperature_lm: !ref - using_eos_threshold: False - length_normalization: False + scorer: !ref - -beam_searcher_valid: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: - - !ref - - !ref - - !ref +beam_searcher_valid: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref eos_threshold: !ref - using_max_attn_shift: !ref + using_max_attn_shift: False + length_normalization: True max_attn_shift: !ref - coverage_penalty: !ref - ctc_weight: !ref - using_eos_threshold: False - length_normalization: False + temperature: !ref + scorer: !ref lr_annealing: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau diff --git a/recipes/LibriSpeech/G2P/hparams/hparams_lm_rnn.yaml b/recipes/LibriSpeech/G2P/hparams/hparams_lm_rnn.yaml index 077ed019b9..670547220f 100644 --- a/recipes/LibriSpeech/G2P/hparams/hparams_lm_rnn.yaml +++ b/recipes/LibriSpeech/G2P/hparams/hparams_lm_rnn.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 2602 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/RNNLM/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -50,11 +50,11 @@ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger tokenizer_file: /save/phoneme_tokenizer.model -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 batch_size: 80 lr: 0.001 -accu_steps: 1 # Gradient accumulation to simulate large batch training +grad_accumulation_factor: 1 # Gradient accumulation to simulate large batch training ckpt_interval_minutes: 15 # save checkpoint every N min # Dataloader options @@ -68,7 +68,7 @@ valid_dataloader_opts: test_dataloader_opts: batch_size: 1 -# Model parameters +####################### Model Parameters ####################################### model_dim: !apply:speechbrain.utils.hparams.choice value: !ref choices: diff --git a/recipes/LibriSpeech/G2P/hparams/hparams_lm_transformer.yaml b/recipes/LibriSpeech/G2P/hparams/hparams_lm_transformer.yaml index d6b9da3696..c5eb95913d 100644 --- a/recipes/LibriSpeech/G2P/hparams/hparams_lm_transformer.yaml +++ b/recipes/LibriSpeech/G2P/hparams/hparams_lm_transformer.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 2602 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/TRANSFORMERLM/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -39,11 +39,11 @@ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger # Tokenizer model (you must use the same tokenizer for LM and ASR training) tokenizer_file: /save/phoneme_tokenizer.model -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 batch_size: 80 lr: 0.001 -accu_steps: 1 # Gradient accumulation to simulate large batch training +grad_accumulation_factor: 1 # Gradient accumulation to simulate large batch training ckpt_interval_minutes: 15 # save checkpoint every N min # Dataloader options @@ -57,7 +57,7 @@ valid_dataloader_opts: test_dataloader_opts: batch_size: 1 -# Model parameters +####################### Model Parameters ####################################### emb_dim: 256 # dimension of the embeddings transformer_num_heads: 4 diff --git a/recipes/LibriSpeech/G2P/tokenizer_prepare.py b/recipes/LibriSpeech/G2P/tokenizer_prepare.py index 2cce66d63e..b7c64067f7 100644 --- a/recipes/LibriSpeech/G2P/tokenizer_prepare.py +++ b/recipes/LibriSpeech/G2P/tokenizer_prepare.py @@ -1,9 +1,9 @@ -"""A script to prepare annotations for tokenizers +"""A script to prepare annotations for tokenizers""" -""" import json import os import re + import datasets from speechbrain.lobes.models.g2p.dataio import build_token_char_map @@ -18,7 +18,7 @@ def phn2txt(phn, phoneme_map): --------- phn: list a list of original phonemes (ARPABET) - phoneme_map + phoneme_map: dict the phoneme-to-character map Returns @@ -51,7 +51,7 @@ def prepare_annotation(src, destination_file_name, phonemes): } for item in src } - with open(destination_file_name, "w") as dst_file: + with open(destination_file_name, "w", encoding="utf-8") as dst_file: json.dump(annotation, dst_file, indent=2) @@ -63,14 +63,14 @@ def prepare_tokenizer(data_folder, save_folder, phonemes, dataset_name): Arguments --------- - dataset_name: str - the name of the HuggingFace dataset data_folder: str the path to the dataset save_folder: str the path to the folder where annotations will be saved phonemes: list the list of phonemes + dataset_name: str + the name of the HuggingFace dataset """ dataset = datasets.load_dataset(dataset_name) if dataset_name else None for data_split in DATA_SPLITS: diff --git a/recipes/LibriSpeech/G2P/train.py b/recipes/LibriSpeech/G2P/train.py index ba8cc5e33c..ed428be861 100644 --- a/recipes/LibriSpeech/G2P/train.py +++ b/recipes/LibriSpeech/G2P/train.py @@ -8,41 +8,43 @@ * Mirco Ravanelli 2020 * Artem Ploujnikov 2021 """ -from speechbrain.dataio.dataset import ( - FilteredSortedDynamicItemDataset, - DynamicItemDataset, -) -from speechbrain.dataio.sampler import BalancingDataSampler -from speechbrain.utils.data_utils import undo_padding -import datasets -import logging + import os import random -import torch -import speechbrain as sb import sys -from enum import Enum from collections import namedtuple -from hyperpyyaml import load_hyperpyyaml +from enum import Enum from functools import partial -from speechbrain.utils.distributed import run_on_main -from speechbrain.pretrained.training import save_for_pretrained +from io import StringIO + +import datasets +import numpy as np +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio.dataset import ( + DynamicItemDataset, + FilteredSortedDynamicItemDataset, +) +from speechbrain.dataio.sampler import BalancingDataSampler +from speechbrain.dataio.wer import print_alignments +from speechbrain.integrations.huggingface.wordemb.util import expand_to_chars from speechbrain.lobes.models.g2p.dataio import ( + add_bos_eos, enable_eos_bos, + get_sequence_key, grapheme_pipeline, phoneme_pipeline, - tokenizer_encode_pipeline, - add_bos_eos, - get_sequence_key, phonemes_to_label, + tokenizer_encode_pipeline, ) -from speechbrain.dataio.wer import print_alignments -from speechbrain.wordemb.util import expand_to_chars -from io import StringIO from speechbrain.utils import hpopt as hp -import numpy as np +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger +from speechbrain.utils.pretrained import save_for_pretrained -logger = logging.getLogger(__name__) +logger = get_logger(__name__) G2PPredictions = namedtuple( "G2PPredictions", @@ -52,7 +54,7 @@ class TrainMode(Enum): - """An enumeration that represents the trainining mode + """An enumeration that represents the training mode NORMAL: trains the sequence-to-sequence model HOMOGRAPH: fine-tunes a trained model on homographs""" @@ -63,13 +65,19 @@ class TrainMode(Enum): # Define training procedure class G2PBrain(sb.Brain): - def __init__(self, train_step_name, *args, **kwargs): - """Class constructor + """Class constructor - Arguments - --------- - train_step_name: the name of the training step, for curriculum learning - """ + Arguments + --------- + train_step_name: str + the name of the training step, for curriculum learning + *args: tuple + Arguments to forward to ``Brain`` + **kwargs: dict + Arguments to forward to ``Brain`` + """ + + def __init__(self, train_step_name, *args, **kwargs): super().__init__(*args, **kwargs) self.train_step_name = train_step_name self.train_step = next( @@ -110,7 +118,7 @@ def on_fit_start(self): """ # Run this *after* starting all processes since jit modules cannot be # pickled. - self._compile_jit() + self._compile() # Wrap modules with parallel backend after jit self._wrap_distributed() @@ -143,32 +151,29 @@ def _recover_checkpoint(self, min_key=None, max_key=None): Arguments --------- - max_key : str - Key to use for finding best checkpoint (higher is better). - By default, passed to ``self.checkpointer.recover_if_possible()``. min_key : str Key to use for finding best checkpoint (lower is better). By default, passed to ``self.checkpointer.recover_if_possible()``. + max_key : str + Key to use for finding best checkpoint (higher is better). + By default, passed to ``self.checkpointer.recover_if_possible()``. """ if self.checkpointer is not None: step = self.train_step["name"] logger.info(f"Attempting to restore checkpoint for step {step}") result = self.checkpointer.recover_if_possible( - device=torch.device(self.device), min_key=min_key, max_key=max_key, ckpt_predicate=(lambda ckpt: ckpt.meta.get("step") == step), ) if result is None: logger.info( - "No checkpoint fount for step %s, " + "No checkpoint found for step %s, " "attempting to recover any checkpoint", step, ) result = self.checkpointer.recover_if_possible( - device=torch.device(self.device), - min_key=min_key, - max_key=max_key, + min_key=min_key, max_key=max_key ) if result: logger.info( @@ -221,14 +226,14 @@ def compute_forward(self, batch, stage): if stage == sb.Stage.VALID else self.beam_searcher ) - hyps, scores = beam_searcher(encoder_out, char_lens) + + hyps, _, _, _ = beam_searcher(encoder_out, char_lens) return G2PPredictions(p_seq, char_lens, hyps, ctc_logprobs, attn) def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets. - Arguments --------- predictions: G2PPredictions @@ -237,6 +242,11 @@ def compute_objectives(self, predictions, batch, stage): a raw G2P data batch stage: speechbrain.Stage the training stage + + Returns + ------- + loss : torch.Tensor + Computed loss for this batch. """ phns_eos, phn_lens_eos = batch.phn_encoded_eos phns, phn_lens = batch.phn_encoded @@ -380,30 +390,17 @@ def is_ctc_active(self, stage): --------- stage: speechbrain.Stage the training stage + + Returns + ------- + active : bool + Whether CTC is active for this epoch """ if stage != sb.Stage.TRAIN: return False current_epoch = self.epoch_counter.current return current_epoch <= self.train_step["ctc_epochs"] - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" self.seq_metrics = self.hparams.seq_stats() @@ -428,9 +425,9 @@ def on_stage_start(self, stage, epoch): if self.mode == TrainMode.HOMOGRAPH: self._set_word_separator() - self.grapheme_word_separator_idx = self.hparams.grapheme_encoder.lab2ind[ - " " - ] + self.grapheme_word_separator_idx = ( + self.hparams.grapheme_encoder.lab2ind[" "] + ) if self.hparams.use_word_emb: self.modules.word_emb = self.hparams.word_emb().to(self.device) @@ -440,9 +437,9 @@ def _set_word_separator(self): word_separator_idx = self.hparams.token_space_index word_separator_base_idx = self.phoneme_encoder.lab2ind[" "] else: - word_separator_base_idx = ( - word_separator_idx - ) = self.phoneme_encoder.lab2ind[" "] + word_separator_base_idx = word_separator_idx = ( + self.phoneme_encoder.lab2ind[" "] + ) self.hparams.homograph_cost.word_separator = word_separator_idx self.hparams.homograph_cost.word_separator_base = ( @@ -470,7 +467,8 @@ def on_stage_end(self, stage, stage_loss, epoch): ): old_lr, new_lr = self.hparams.lr_annealing(per) elif isinstance( - self.hparams.lr_annealing, sb.nnet.schedulers.ReduceLROnPlateau, + self.hparams.lr_annealing, + sb.nnet.schedulers.ReduceLROnPlateau, ): old_lr, new_lr = self.hparams.lr_annealing( optim_list=[self.optimizer], @@ -533,10 +531,7 @@ def on_stage_end(self, stage, stage_loss, epoch): if self.hparams.enable_metrics: self._write_reports(epoch, final=False) - if self.epoch_counter.should_stop( - current=epoch, current_metric=per, - ): - self.epoch_counter.current = self.epoch_counter.limit + self.epoch_counter.update_metric(per) if stage == sb.Stage.TEST: test_stats = {"loss": stage_loss} @@ -573,6 +568,11 @@ def _get_interim_report_path(self, epoch, file_path): the epoch number file_path: str the raw report path + + Returns + ------- + file_path: str + Path to interim report """ output_path = os.path.join( self.hparams.output_folder, @@ -599,8 +599,8 @@ def _get_report_path(self, epoch, key, final): whether or not this si the final report. If final is false, an epoch number will be inserted into the path - Arguments - --------- + Returns + ------- file_name: str the report file name """ @@ -616,15 +616,9 @@ def _write_reports(self, epoch, final=True): --------- epoch: int the epoch number - final: bool whether or not the reports are final (i.e. after the final epoch) - - Returns - ------- - file_name: str - the report file name """ wer_file_name = self._get_report_path(epoch, "wer_file", final) self._write_wer_file(wer_file_name) @@ -642,7 +636,7 @@ def _write_wer_file(self, file_name): file_name: str the report file name """ - with open(file_name, "w") as w: + with open(file_name, "w", encoding="utf-8") as w: w.write("\nseq2seq loss stats:\n") self.seq_metrics.write_stats(w) w.write("\nPER stats:\n") @@ -659,7 +653,7 @@ def _write_homograph_file(self, file_name): file_name: str the report file name """ - with open(file_name, "w") as w: + with open(file_name, "w", encoding="utf-8") as w: self.classification_metrics_homograph.write_stats(w) def _add_stats_prefix(self, stats): @@ -673,7 +667,7 @@ def _add_stats_prefix(self, stats): a statistics dictionary Returns - --------- + ------- stats: dict a prefixed statistics dictionary """ @@ -692,19 +686,19 @@ def tb_writer(self): @property def tb_global_step(self): - """Returns the global step number in the Tensorboard writer""" + """Returns the global step number in the TensorBoard writer""" global_step = self.hparams.tensorboard_train_logger.global_step prefix = self.train_step["name"] return global_step["valid"][f"{prefix}_loss"] def save_samples(self): - """Saves attention alignment and text samples to the Tensorboard + """Saves attention alignment and text samples to the TensorBoard writer""" self._save_attention_alignment() self._save_text_alignments() def _save_text_alignments(self): - """Saves text predictions aligned with lables (a sample, for progress + """Saves text predictions aligned with labels (a sample, for progress tracking)""" if not self.hparams.enable_metrics: return @@ -762,7 +756,7 @@ def _save_text_alignment(self, tag, metrics_sample): Arguments --------- tag: str - the tag - for Tensorboard + the tag - for TensorBoard metrics_sample: list List of wer details by utterance, see ``speechbrain.utils.edit_distance.wer_details_by_utterance`` @@ -836,7 +830,7 @@ def filter_origins(data, hparams): hparams: dict the hyperparameters data - Results + Returns ------- data: speechbrain.dataio.dataset.DynamicItemDataset the filtered data @@ -855,10 +849,10 @@ def filter_homograph_positions(dataset): Arguments --------- - data: speechbrain.dataio.dataset.DynamicItemDataset + dataset: speechbrain.dataio.dataset.DynamicItemDataset the data to be filtered - Results + Returns ------- data: speechbrain.dataio.dataset.DynamicItemDataset the filtered data @@ -1024,7 +1018,8 @@ def dataio_prep(hparams, train_step=None): ) else: phoneme_pipeline_item = partial( - phoneme_pipeline, phoneme_encoder=phoneme_encoder, + phoneme_pipeline, + phoneme_encoder=phoneme_encoder, ) phn_bos_eos_pipeline_item = partial(add_bos_eos, encoder=phoneme_encoder) @@ -1040,7 +1035,7 @@ def dataio_prep(hparams, train_step=None): "takes": ["char"], "provides": [ "grapheme_list", - "grpaheme_encoded_list", + "grapheme_encoded_list", "grapheme_encoded", ], }, @@ -1075,7 +1070,8 @@ def dataio_prep(hparams, train_step=None): # A raw tokenizer is needed to determine the correct # word boundaries from data phoneme_raw_pipeline = partial( - phoneme_pipeline, phoneme_encoder=phoneme_encoder, + phoneme_pipeline, + phoneme_encoder=phoneme_encoder, ) dynamic_items.append( { @@ -1117,7 +1113,8 @@ def dataio_prep(hparams, train_step=None): output_keys.append("phn_raw_encoded") sb.dataio.dataset.set_output_keys( - datasets, output_keys, + datasets, + output_keys, ) if "origins" in hparams: datasets = [filter_origins(dataset, hparams) for dataset in datasets] @@ -1142,7 +1139,7 @@ def load_dependencies(hparams, run_opts): deps_pretrainer = hparams.get("deps_pretrainer") if deps_pretrainer: run_on_main(deps_pretrainer.collect_files) - deps_pretrainer.load_collected(device=run_opts["device"]) + deps_pretrainer.load_collected() def check_tensorboard(hparams): @@ -1155,9 +1152,9 @@ def check_tensorboard(hparams): """ if hparams["use_tensorboard"]: try: - from speechbrain.utils.train_logger import TensorboardLogger + from speechbrain.utils.train_logger import TensorBoardLogger - hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_train_logger"] = TensorBoardLogger( hparams["tensorboard_logs"] ) except ImportError: @@ -1176,12 +1173,15 @@ def check_tensorboard(hparams): hparams_file, run_opts, overrides = hp_ctx.parse_arguments(sys.argv[1:]) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Validate hyperparameters if not validate_hparams(hparams): - sys.exit(1) + raise ValueError( + "Non-wordwise tokenization is not supported with " + "homograph disambiguation training" + ) # Initialize ddp (useful only for multi-GPU DDP training) sb.utils.distributed.ddp_init_group(run_opts) @@ -1239,7 +1239,7 @@ def check_tensorboard(hparams): g2p_brain.phoneme_encoder = phoneme_encoder # NOTE: This gets modified after the first run and causes a double - # agument issue + # argument issue dataloader_opts = train_step.get( "dataloader_opts", hparams.get("dataloader_opts", {}) ) diff --git a/recipes/LibriSpeech/G2P/train_lm.py b/recipes/LibriSpeech/G2P/train_lm.py index 2bc0acd999..1cab4fe8dc 100644 --- a/recipes/LibriSpeech/G2P/train_lm.py +++ b/recipes/LibriSpeech/G2P/train_lm.py @@ -13,14 +13,17 @@ * Mirco Ravanelli 2021 * Artem Ploujnikov 2021 """ -import sys -import logging + import os -import speechbrain as sb +import sys + from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main from train import dataio_prep +import speechbrain as sb +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + # The following hyperparameters are used in dataio_prep, shared with the # main G2P training script: # - hparams["phoneme_encoder"] @@ -43,7 +46,7 @@ # - hparams["blank_index"] -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Brain class for language model training @@ -92,37 +95,9 @@ def compute_objectives(self, predictions, batch, stage): ) return loss - def fit_batch(self, batch): - """Runs all the steps needed to train the model on a single batch. - - Arguments - --------- - batch : PaddedBatch - This batch object contains all the relevant tensors for computation. - - Returns - ------- - Loss : torch.Tensor - A tensor containing the loss (single real number). - """ - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - # Loss backpropagation (gradient computation) - (loss / self.hparams.accu_steps).backward() - - # Manage gradient accumulation - if self.step % self.hparams.accu_steps == 0: - - # Gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - # Update the parameters - self.optimizer.step() - - # Reset the gradient - self.optimizer.zero_grad() - + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: if isinstance( self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler ) or isinstance( @@ -131,8 +106,6 @@ def fit_batch(self, batch): ): self.hparams.lr_annealing(self.optimizer) - return loss - def on_stage_end(self, stage, stage_loss, epoch): """Gets called at the end of an epoch. @@ -159,7 +132,6 @@ def on_stage_end(self, stage, stage_loss, epoch): # At the end of validation, we can wrote if stage == sb.Stage.VALID: - # Update learning rate old_lr, new_lr = self.hparams.lr_annealing(stage_loss) sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) @@ -184,7 +156,6 @@ def on_stage_end(self, stage, stage_loss, epoch): # Recipe begins! if __name__ == "__main__": - # Reading command line arguments hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) @@ -194,7 +165,7 @@ def on_stage_end(self, stage, stage_loss, epoch): from tokenizer_prepare import prepare_tokenizer # noqa # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) if hparams.get("phn_tokenize"): diff --git a/recipes/LibriSpeech/LM/README.md b/recipes/LibriSpeech/LM/README.md index f73eefe768..25bd927869 100644 --- a/recipes/LibriSpeech/LM/README.md +++ b/recipes/LibriSpeech/LM/README.md @@ -1,25 +1,53 @@ # Language Model with LibriSpeech This folder contains recipes for training language models for the LibriSpeech Dataset. -It supports both an RNN-based LM and a Transformer-based LM. -The scripts rely on the HuggingFace dataset, which manages data reading and loading from +It supports n-gram LM, RNN-based LM, and Transformer-based LM. +The scripts is relying on the HuggingFace dataset for RNN/Transformer based LM, which manages data reading and loading from large text corpora. You can download LibriSpeech at http://www.openslr.org/12 -# Extra Dependency: -Make sure you have the HuggingFace dataset installed. If not, type: -pip install datasets +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +If you want to train an n-gram, in this recipe we are using the popular KenLM library. Let's start by installing the Ubuntu library prerequisites. For a complete guide on how to install required dependencies, please refer to [this](https://kheafield.com/code/kenlm/dependencies/) link: + ``` + sudo apt install build-essential cmake libboost-system-dev libboost-thread-dev libboost-program-options-dev libboost-test-dev libeigen3-dev zlib1g-dev libbz2-dev liblzma-dev + ``` + + Next, we need to start downloading and unpacking the KenLM repo. + ``` + wget -O - https://kheafield.com/code/kenlm.tar.gz | tar xz + ``` + +KenLM is written in C++, so we'll make use of cmake to build the binaries. + ``` +mkdir kenlm/build && cd kenlm/build && cmake .. && make -j2 + ``` + +Now, make sure that the executables are added to your .bashrc file. To do it, +- Open the ~/.bashrc file in a text editor. +- Scroll to the end of the file and add the following line: ```export PATH=$PATH:/your/path/to/kenlm/build/bin ``` +- Save it and type: `source ~/.bashrc ` # How to run: +```shell python train.py hparams/RNNLM.yaml python train.py hparams/transformer.yaml +python train_ngram.py hparams/train_ngram.yaml --data_folder=your/data/folder +``` | Release | hyperparams file | Test PP | Model link | GPUs | | :--- | :---: | :---: | :---: | :---: | -| 20-05-22 | RNNLM.yaml (1k BPE) | --.-- | [link](https://drive.google.com/drive/folders/1CCsGfq0mbHTvOVL7cJRl6hwmXDQB2Xcy?usp=sharing) | 1xV100 32GB | -| 20-05-22 | RNNLM.yaml (5k BPE) | --.-- | [link](https://drive.google.com/drive/folders/17Qa2-3Q9KF-8huxxH_oZGdEwz4igCJ4o?usp=sharing) | 1xV100 32GB | -| 20-05-22 | transformer.yaml | --.-- | [link](https://drive.google.com/drive/folders/1oCEAjYUyummzcQSkhCbl_3Vf2ozy0BXp?usp=sharing) | 1xV100 32GB | - +| 20-05-22 | RNNLM.yaml (1k BPE) | --.-- | [link](https://www.dropbox.com/sh/8xpybezuv70ibcg/AAByv2NuNv_ZFXuDdG89-MVPa?dl=0) | 1xV100 32GB | +| 20-05-22 | RNNLM.yaml (5k BPE) | --.-- | [link](https://www.dropbox.com/sh/8462ef441wvava2/AABNfHr07J_0SsdaM1yO5qkxa?dl=0) | 1xV100 32GB | +| 20-05-22 | transformer.yaml | --.-- | [link](https://www.dropbox.com/sh/6uwqlw2tvv3kiy6/AACgvTR5jihyMrugBrpZPFNha?dl=0) | 1xV100 32GB | +| 22-01-24 | 4-gram - train_ngram.yaml | --.-- | [link](https://www.dropbox.com/scl/fi/kkd5jrwthpahn4t7e7sgk/4gram_lm.arpa?rlkey=mc820i9bugpi3oxtwwd6ulz0b&dl=0) | --.-- | +| 22-01-24 | 3-gram - train_ngram.yaml | --.-- | [link](https://www.dropbox.com/scl/fi/juryiq2e50bsbdy1qx540/3gram_lm.arpa?rlkey=3ntfnkn6zxda9memm5zh1mmt9&dl=0) | --.-- | # Training time Training a LM takes a lot of time. In our case, it take 3/4 weeks on 4 TESLA V100. Use the pre-trained model to avoid training it from scratch @@ -35,6 +63,15 @@ Training a LM takes a lot of time. In our case, it take 3/4 weeks on 4 TESLA V10 Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, @@ -44,4 +81,4 @@ Please, cite SpeechBrain if you use it for your research or business. primaryClass={eess.AS}, note={arXiv:2106.04624} } -``` \ No newline at end of file +``` diff --git a/recipes/LibriSpeech/LM/dataset.py b/recipes/LibriSpeech/LM/dataset.py index 908a7c2443..a46672ea05 100644 --- a/recipes/LibriSpeech/LM/dataset.py +++ b/recipes/LibriSpeech/LM/dataset.py @@ -1,5 +1,4 @@ -# coding=utf-8 -# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. +# Copyright 2020 The torch.TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,17 +15,14 @@ # Lint as: python3 """ Librispeech language modeling dataset. - this is an extented from huggingface's official implementation to allow the use of train-960 trainscript and lm_corpus for LM training + this is extended from huggingface's official implementation to allow the use of train-960 trainscript and lm_corpus for LM training Authors * Jianyuan Zhong 2021 """ -from __future__ import absolute_import, division, print_function - -import datasets import re -from typing import Optional +import datasets _CITATION = """\ @inproceedings{panayotov2015librispeech, @@ -49,10 +45,13 @@ class LibrispeechLmConfig(datasets.BuilderConfig): - """builder config for LibriSpeech LM - """ + """builder config for LibriSpeech LM""" - lm_corpus_path: Optional[str] = None + def __init__(self, **kwargs): + self.lm_corpus_path = kwargs.pop("lm_corpus_path", None) + super().__init__( + **kwargs, + ) def __post_init__(self): if self.lm_corpus_path is None: @@ -81,7 +80,7 @@ def _split_generators(self, dl_manager): for split_name, files in self.config.data_files.items(): if ( split_name == "train" - ): # concatination lm_copus and train transcripts + ): # concatenation lm_corpus and train transcripts path_dic[split_name] = dl_manager.download_and_extract( [self.config.lm_corpus_path] + files ) @@ -97,9 +96,10 @@ def _split_generators(self, dl_manager): def _generate_examples(self, archive_path): """Yields examples.""" + key = 0 for p in archive_path: - with open(p, "r", encoding="utf-8") as f: - for key, line in enumerate(f): + with open(p, encoding="utf-8") as f: + for line in f: line = re.sub( r"\d+-\d+-\d+\s", "", line ) # remove ids in transcripts @@ -109,3 +109,4 @@ def _generate_examples(self, archive_path): # very long sentences (>1000 char) are removed to prevent OOM if text and len(text) < 1000: yield key, {"text": text} + key += 1 diff --git a/recipes/LibriSpeech/LM/extra_requirements.txt b/recipes/LibriSpeech/LM/extra_requirements.txt index 0fc6b89465..a6a7d36f44 100644 --- a/recipes/LibriSpeech/LM/extra_requirements.txt +++ b/recipes/LibriSpeech/LM/extra_requirements.txt @@ -1,2 +1,2 @@ # huggingface dataset -datasets==1.6.2 +datasets diff --git a/recipes/LibriSpeech/LM/hparams/RNNLM.yaml b/recipes/LibriSpeech/LM/hparams/RNNLM.yaml index a04951639f..89c8957853 100644 --- a/recipes/LibriSpeech/LM/hparams/RNNLM.yaml +++ b/recipes/LibriSpeech/LM/hparams/RNNLM.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 2223 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/RNN/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -18,19 +18,22 @@ train_log: !ref /train_log.txt # transcriptions of LibriSpeech as well. data_folder: !PLACEHOLDER # e.g, /localscratch/LibriSpeech -# path to the lm_corpus +# path to the lm_corpus, assumed to be from a trusted source # if set to null, it will automatically download from the internet # in the case when there is no internet access, set this to your local file lm_corpus_path: null +train_transcripts_pattern: "train*/**/*.trans.txt" +dev_transcripts_pattern: "dev*/**/*.trans.txt" +test_transcripts_pattern: "test*/**/*.trans.txt" # Tokenizer model tokenizer_file: https://www.dropbox.com/s/o7gnouwdoqchotj/1000_unigram.model?dl=1 -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 batch_size: 80 lr: 0.001 -accu_steps: 1 # Gradient accumulation to simulate large batch training +grad_accumulation_factor: 1 # Gradient accumulation to simulate large batch training ckpt_interval_minutes: 15 # save checkpoint every N min # Dataloader options @@ -44,7 +47,7 @@ valid_dataloader_opts: test_dataloader_opts: batch_size: 1 -# Model parameters +####################### Model Parameters ####################################### emb_size: 128 activation: !name:torch.nn.LeakyReLU dropout: 0.0 diff --git a/recipes/LibriSpeech/LM/hparams/train_ngram.yaml b/recipes/LibriSpeech/LM/hparams/train_ngram.yaml new file mode 100644 index 0000000000..e83d79c52c --- /dev/null +++ b/recipes/LibriSpeech/LM/hparams/train_ngram.yaml @@ -0,0 +1,24 @@ +######### +# Recipe for Training kenLM on LibriSpeech Data. +# It is used to boost any CTC or CTC/joint attention models. +# +# Author: +# - Adel Moumen 2024 +################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +output_folder: !ref results/n_gram_lm/ +# Data files +data_folder: !PLACEHOLDER # e.g, /localscratch/LibriSpeech +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: [] +test_splits: [] +train_csv: !ref /train.csv +lang_dir: !ref /lang +vocab_file: !ref /librispeech-vocab.txt +sil_prob: 0. +add_word_boundary: True +caching: False +skip_prep: False +arpa_order: 3 +prune_level: [0, 1, 2] +output_arpa: !ref /-gram.arpa diff --git a/recipes/LibriSpeech/LM/hparams/transformer.yaml b/recipes/LibriSpeech/LM/hparams/transformer.yaml index ba9bf72426..7c2b8ac153 100644 --- a/recipes/LibriSpeech/LM/hparams/transformer.yaml +++ b/recipes/LibriSpeech/LM/hparams/transformer.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 2223 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/Transformer/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -18,19 +18,22 @@ train_log: !ref /train_log.txt # transcriptions of LibriSpeech as well. data_folder: !PLACEHOLDER # e.g, /localscratch/LibriSpeech -# path to the lm_corpus +# path to the lm_corpus, assumed to be from a trusted source # if set to null, it will automatically download from the internet # in the case when there is no internet access, set this to your local file lm_corpus_path: null +train_transcripts_pattern: "train*/**/*.trans.txt" +dev_transcripts_pattern: "dev*/**/*.trans.txt" +test_transcripts_pattern: "test*/**/*.trans.txt" # Tokenizer model tokenizer_file: speechbrain/asr-transformer-transformerlm-librispeech/tokenizer.ckpt -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 batch_size: 16 lr: 10 -accu_steps: 8 # Gradient accumulation to simulate large batch training +grad_accumulation_factor: 8 # Gradient accumulation to simulate large batch training ckpt_interval_minutes: 15 # save checkpoint every N min # Dataloader options diff --git a/recipes/LibriSpeech/LM/train.py b/recipes/LibriSpeech/LM/train.py index 127319b4f1..1a03fa0154 100644 --- a/recipes/LibriSpeech/LM/train.py +++ b/recipes/LibriSpeech/LM/train.py @@ -9,18 +9,19 @@ * Jianyuan Zhong 2021 * Ju-Chieh Chou 2020 """ + +import glob import os import sys -import logging -import glob + import torch from datasets import load_dataset from hyperpyyaml import load_hyperpyyaml -import speechbrain as sb -from speechbrain.utils.distributed import run_on_main +import speechbrain as sb +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -42,20 +43,9 @@ def compute_objectives(self, predictions, batch, stage): ) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - (loss / self.hparams.accu_steps).backward() - - if self.step % self.hparams.accu_steps == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.optimizer.step() - self.optimizer.zero_grad() - + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: if isinstance( self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler ) or isinstance( @@ -64,15 +54,13 @@ def fit_batch(self, batch): ): self.hparams.lr_annealing(self.optimizer) - return loss - def on_stage_end(self, stage, stage_loss, epoch): """Gets called at the end of a epoch.""" stage_stats = {"loss": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): + if stage == sb.Stage.VALID: if not ( isinstance( self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler @@ -93,22 +81,26 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta=stage_stats, min_keys=["loss"], + meta=stage_stats, + min_keys=["loss"], ) def dataio_prepare(hparams): - """grap all the .txt files for transcripts""" - logging.info("generating datasets...") + """grab all the .txt files for transcripts""" + logger.info("generating datasets...") data_folder = hparams["data_folder"] train_transcripts = glob.glob( - os.path.join(data_folder, "train*/**/*.trans.txt"), recursive=True + os.path.join(data_folder, hparams["train_transcripts_pattern"]), + recursive=True, ) dev_transcripts = glob.glob( - os.path.join(data_folder, "dev*/**/*.trans.txt"), recursive=True + os.path.join(data_folder, hparams["dev_transcripts_pattern"]), + recursive=True, ) test_transcripts = glob.glob( - os.path.join(data_folder, "test*/**/*.trans.txt"), recursive=True + os.path.join(data_folder, hparams["test_transcripts_pattern"]), + recursive=True, ) """prepare data and generate datasets""" @@ -120,6 +112,7 @@ def dataio_prepare(hparams): "dev": dev_transcripts, "test": test_transcripts, }, + trust_remote_code=True, ) train_data, valid_data, test_data = ( @@ -144,6 +137,7 @@ def dataio_prepare(hparams): tokenizer = hparams["tokenizer"] """Define text pipeline""" + # TODO: implement text augmentations pipelines @sb.utils.data_pipeline.takes("text") @sb.utils.data_pipeline.provides("text", "tokens_bos", "tokens_eos") @@ -159,7 +153,8 @@ def text_pipeline(text): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "text", "tokens_bos", "tokens_eos"], + datasets, + ["id", "text", "tokens_bos", "tokens_eos"], ) return train_data, valid_data, test_data @@ -167,10 +162,9 @@ def text_pipeline(text): if __name__ == "__main__": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -186,8 +180,8 @@ def text_pipeline(text): # We download the tokenizer from HuggingFace (or elsewhere depending on # the path given in the YAML file). - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() lm_brain = LM( modules=hparams["modules"], diff --git a/recipes/LibriSpeech/LM/train_ngram.py b/recipes/LibriSpeech/LM/train_ngram.py new file mode 100644 index 0000000000..d29bb44b33 --- /dev/null +++ b/recipes/LibriSpeech/LM/train_ngram.py @@ -0,0 +1,177 @@ +""" +Recipe to train kenlm ngram model. + +To run this recipe, do the following: +> python train.py hparams/train.yaml --data_folder=/path/to/LibriSpeech + +Authors + * Adel Moumen 2024 + * Pierre Champion 2023 +""" + +import os +import sys + +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +import speechbrain.integrations.k2_fsa as sbk2 +from speechbrain.utils.data_utils import download_file, get_list_from_csv +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +OPEN_SLR_11_LINK = "http://www.openslr.org/resources/11/" + + +def download_librispeech_lm_training_text(destination): + """Download librispeech lm training and unpack it. + + Arguments + --------- + destination : str + Place to put dataset. + """ + f = "librispeech-lm-norm.txt.gz" + download_file( + OPEN_SLR_11_LINK + f, os.path.join(destination, f), unpack=True + ) + + +def dataprep_lm_training( + lm_dir, + output_arpa, + csv_files, + external_lm_corpus, + vocab_file, + arpa_order=3, + prune_level=[0, 1, 2], +): + """Prepare lm txt corpus file for lm training with kenlm (https://github.com/kpu/kenlm) + Does nothing if output_arpa exists. + Else display to the user how to use kenlm in command line, then exit + (return code 1), the user has to run the command manually. + Instruction on how to compile kenlm (lmplz binary) is available in the + above link. + + Arguments + --------- + lm_dir : str + Path to where to store txt corpus + output_arpa : str + File to write arpa lm + csv_files : List[str] + CSV files to use to increase lm txt corpus + external_lm_corpus : List[str] + (Big) text dataset corpus + vocab_file : str + N-grams that contain vocabulary items not in this file be pruned. + arpa_order : int + Order of the arpa lm + prune_level : List[int] + The numbers must be non-decreasing and the last number will be extended to any higher order. + For example, --prune 0 disables pruning (the default) while --prune 0 0 1 prunes singletons for orders three and higher. + Please refer to https://kheafield.com/code/kenlm/estimation/ for more details. + """ + download_librispeech_lm_training_text(lm_dir) + column_text_key = "wrd" # defined in librispeech_prepare.py + lm_corpus = os.path.join(lm_dir, "libri_lm_corpus.txt") + line_seen = set() + with open(lm_corpus, "w", encoding="utf-8") as corpus: + for file in csv_files: + for line in get_list_from_csv(file, column_text_key): + corpus.write(line + "\n") + line_seen.add(line + "\n") + for file in external_lm_corpus: + with open(file, encoding="utf-8") as f: + for line in f: + if line not in line_seen: + corpus.write(line) + prune_level = " ".join(map(str, prune_level)) + cmd = f"lmplz -o {arpa_order} --prune {prune_level} --limit_vocab_file {vocab_file} < {lm_corpus} | sed '1,20s///1' > {output_arpa}" + logger.critical( + "RUN the following kenlm command to build a 3-gram arpa LM (https://github.com/kpu/kenlm):" + ) + logger.critical(f"$ {cmd}") + sys.exit(0) + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Librispeech) + import librispeech_prepare + + # multi-gpu (ddp) save data preparation + run_on_main( + librispeech_prepare.prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # Download the vocabulary file for librispeech + librispeech_prepare.download_librispeech_vocab_text( + destination=hparams["vocab_file"] + ) + + # Create the lexicon.txt for k2 + run_on_main( + sbk2.lexicon.prepare_char_lexicon, + kwargs={ + "lang_dir": hparams["lang_dir"], + "vocab_files": [hparams["vocab_file"]], + "extra_csv_files": ( + [hparams["output_folder"] + "/train.csv"] + if not hparams["skip_prep"] + else [] + ), + "add_word_boundary": hparams["add_word_boundary"], + }, + ) + + caching = ( + {"cache": False} + if "caching" in hparams and hparams["caching"] is False + else {} + ) + + # Create the lang directory for k2 + run_on_main( + sbk2.prepare_lang.prepare_lang, + kwargs={ + "lang_dir": hparams["lang_dir"], + "sil_prob": hparams["sil_prob"], + **caching, + }, + ) + + dataprep_lm_training( + lm_dir=hparams["output_folder"], + output_arpa=hparams["output_arpa"], + csv_files=[hparams["train_csv"]], + external_lm_corpus=[ + os.path.join(hparams["output_folder"], "librispeech-lm-norm.txt") + ], + vocab_file=os.path.join(hparams["lang_dir"], "words.txt"), + arpa_order=hparams["arpa_order"], + prune_level=hparams["prune_level"], + ) diff --git a/recipes/LibriSpeech/README.md b/recipes/LibriSpeech/README.md index 24d3c9d76c..7dc6c3fdce 100644 --- a/recipes/LibriSpeech/README.md +++ b/recipes/LibriSpeech/README.md @@ -27,6 +27,15 @@ SpeechBrain provides a simple interface to transcribe audio files with pretraine Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriSpeech/Tokenizer/README.md b/recipes/LibriSpeech/Tokenizer/README.md index c31b9617c4..9dea848f24 100644 --- a/recipes/LibriSpeech/Tokenizer/README.md +++ b/recipes/LibriSpeech/Tokenizer/README.md @@ -6,10 +6,12 @@ You can download LibriSpeech at http://www.openslr.org/12 # How to run -python train.py train/1K_unigram_subword_bpe.yaml -python train.py train/5K_unigram_subword_bpe.yaml +```shell +python train.py hparams/1K_unigram_subword_bpe.yaml +python train.py hparams/5K_unigram_subword_bpe.yaml +``` -The pretrained tokenizers are available [here](https://drive.google.com/drive/folders/1NcsYx5ER-Zlv7bRxtwBrefuYxaEO4nY3?usp=sharing). +The pretrained tokenizers are available [here](https://www.dropbox.com/sh/xyifwhyq2o7g8u8/AACVHHgXUsRUZIfrzHOccLP7a?dl=0). # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -21,6 +23,15 @@ The pretrained tokenizers are available [here](https://drive.google.com/drive/fo Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriSpeech/Tokenizer/hparams/1K_unigram_subword_bpe.yaml b/recipes/LibriSpeech/Tokenizer/hparams/1K_unigram_subword_bpe.yaml index 5aeec2237d..9dda21f827 100644 --- a/recipes/LibriSpeech/Tokenizer/hparams/1K_unigram_subword_bpe.yaml +++ b/recipes/LibriSpeech/Tokenizer/hparams/1K_unigram_subword_bpe.yaml @@ -12,17 +12,21 @@ data_folder: !PLACEHOLDER # e.g, /path/to/LibriSpeech train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] dev_splits: ["dev-clean"] test_splits: ["test-clean", "test-other"] +skip_prep: False train_csv: !ref /train.csv valid_csv: !ref /dev-clean.csv -# Training parameters +####################### Training Parameters #################################### token_type: unigram # ["unigram", "bpe", "char"] token_output: 1000 # index(blank/eos/bos/unk) = 0 character_coverage: 1.0 csv_read: wrd - +bos_id: 1 +eos_id: 2 tokenizer: !name:speechbrain.tokenizers.SentencePiece.SentencePiece + bos_id: !ref + eos_id: !ref model_dir: !ref vocab_size: !ref annotation_train: !ref diff --git a/recipes/LibriSpeech/Tokenizer/hparams/5K_unigram_subword_bpe.yaml b/recipes/LibriSpeech/Tokenizer/hparams/5K_unigram_subword_bpe.yaml index c20dbb0f53..1f328c6f16 100644 --- a/recipes/LibriSpeech/Tokenizer/hparams/5K_unigram_subword_bpe.yaml +++ b/recipes/LibriSpeech/Tokenizer/hparams/5K_unigram_subword_bpe.yaml @@ -12,10 +12,11 @@ data_folder: !PLACEHOLDER # e.g., /path/to/LibriSpeech train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] dev_splits: ["dev-clean"] test_splits: ["test-clean", "test-other"] +skip_prep: False train_csv: !ref /train.csv valid_csv: !ref /dev-clean.csv -# Training parameters +####################### Training Parameters #################################### token_type: unigram # ["unigram", "bpe", "char"] token_output: 5000 # index(blank/eos/bos/unk) = 0 character_coverage: 1.0 diff --git a/recipes/LibriSpeech/Tokenizer/train.py b/recipes/LibriSpeech/Tokenizer/train.py index fc9c5dc90a..766dfa7f2d 100644 --- a/recipes/LibriSpeech/Tokenizer/train.py +++ b/recipes/LibriSpeech/Tokenizer/train.py @@ -16,18 +16,18 @@ """ import sys -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -52,6 +52,7 @@ "save_folder": hparams["output_folder"], "merge_lst": hparams["train_splits"], "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], }, ) diff --git a/recipes/LibriSpeech/librispeech_prepare.py b/recipes/LibriSpeech/librispeech_prepare.py index 12669e651e..55c9823750 100644 --- a/recipes/LibriSpeech/librispeech_prepare.py +++ b/recipes/LibriSpeech/librispeech_prepare.py @@ -5,25 +5,40 @@ Author ------ -Mirco Ravanelli, Ju-Chieh Chou, Loren Lugosch 2020 + * Mirco Ravanelli, 2020 + * Ju-Chieh Chou, 2020 + * Loren Lugosch, 2020 + * Pierre Champion, 2023 + * Adel Moumen, 2024 """ -import os import csv +import functools +import os import random from collections import Counter -import logging -import torchaudio -from speechbrain.utils.data_utils import download_file, get_all_files +from dataclasses import dataclass + from speechbrain.dataio.dataio import ( load_pkl, - save_pkl, merge_csvs, + read_audio_info, + save_pkl, ) +from speechbrain.utils.data_utils import download_file, get_all_files +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map -logger = logging.getLogger(__name__) +logger = get_logger(__name__) OPT_FILE = "opt_librispeech_prepare.pkl" SAMPLERATE = 16000 +OPEN_SLR_11_LINK = "http://www.openslr.org/resources/11/" +OPEN_SLR_11_NGRAM_MODELs = [ + "3-gram.arpa.gz", + "3-gram.pruned.1e-7.arpa.gz", + "3-gram.pruned.3e-7.arpa.gz", + "4-gram.arpa.gz", +] def prepare_librispeech( @@ -46,6 +61,8 @@ def prepare_librispeech( --------- data_folder : str Path to the folder where the original LibriSpeech dataset is stored. + save_folder : str + The directory where to store the csv files. tr_splits : list List of train splits to prepare from ['test-others','train-clean-100', 'train-clean-360','train-other-500']. @@ -53,14 +70,12 @@ def prepare_librispeech( List of dev splits to prepare from ['dev-clean','dev-others']. te_splits : list List of test splits to prepare from ['test-clean','test-others']. - save_folder : str - The directory where to store the csv files. select_n_sentences : int Default : None If not None, only pick this many sentences. merge_lst : list List of librispeech splits (e.g, train-clean, train-clean-360,..) to - merge in a singe csv file. + merge in a single csv file. merge_name: str Name of the merged csv file. create_lexicon: bool @@ -69,20 +84,24 @@ def prepare_librispeech( skip_prep: bool If True, data preparation is skipped. + Returns + ------- + None Example ------- - >>> data_folder = 'datasets/LibriSpeech' - >>> tr_splits = ['train-clean-100'] - >>> dev_splits = ['dev-clean'] - >>> te_splits = ['test-clean'] - >>> save_folder = 'librispeech_prepared' - >>> prepare_librispeech(data_folder, save_folder, tr_splits, dev_splits, te_splits) + >>> data_folder = "datasets/LibriSpeech" + >>> tr_splits = ["train-clean-100"] + >>> dev_splits = ["dev-clean"] + >>> te_splits = ["test-clean"] + >>> save_folder = "librispeech_prepared" + >>> prepare_librispeech( + ... data_folder, save_folder, tr_splits, dev_splits, te_splits + ... ) """ if skip_prep: return - data_folder = data_folder splits = tr_splits + dev_splits + te_splits save_folder = save_folder select_n_sentences = select_n_sentences @@ -110,7 +129,6 @@ def prepare_librispeech( # create csv files for each split all_texts = {} for split_index in range(len(splits)): - split = splits[split_index] wav_lst = get_all_files( @@ -130,40 +148,35 @@ def prepare_librispeech( n_sentences = len(wav_lst) create_csv( - save_folder, wav_lst, text_dict, split, n_sentences, + data_folder, save_folder, wav_lst, text_dict, split, n_sentences ) # Merging csv file if needed if merge_lst and merge_name is not None: merge_files = [split_libri + ".csv" for split_libri in merge_lst] merge_csvs( - data_folder=save_folder, csv_lst=merge_files, merged_csv=merge_name, + data_folder=save_folder, csv_lst=merge_files, merged_csv=merge_name ) # Create lexicon.csv and oov.csv if create_lexicon: - create_lexicon_and_oov_csv(all_texts, data_folder, save_folder) + create_lexicon_and_oov_csv(all_texts, save_folder) # saving options save_pkl(conf, save_opt) -def create_lexicon_and_oov_csv(all_texts, data_folder, save_folder): +def create_lexicon_and_oov_csv(all_texts, save_folder): """ Creates lexicon csv files useful for training and testing a grapheme-to-phoneme (G2P) model. Arguments --------- - all_text : dict + all_texts : dict Dictionary containing text from the librispeech transcriptions - data_folder : str - Path to the folder where the original LibriSpeech dataset is stored. save_folder : str The directory where to store the csv files. - Returns - ------- - None """ # If the lexicon file does not exist, download it lexicon_url = "http://www.openslr.org/resources/11/librispeech-lexicon.txt" @@ -183,7 +196,7 @@ def create_lexicon_and_oov_csv(all_texts, data_folder, save_folder): # Get list of all words in the lexicon lexicon_words = [] lexicon_pronunciations = [] - with open(lexicon_path, "r") as f: + with open(lexicon_path, encoding="utf-8") as f: lines = f.readlines() for line in lines: word = line.split()[0] @@ -194,7 +207,7 @@ def create_lexicon_and_oov_csv(all_texts, data_folder, save_folder): # Create lexicon.csv header = "ID,duration,char,phn\n" lexicon_csv_path = os.path.join(save_folder, "lexicon.csv") - with open(lexicon_csv_path, "w") as f: + with open(lexicon_csv_path, "w", newline="", encoding="utf-8") as f: f.write(header) for idx in range(len(lexicon_words)): separated_graphemes = [c for c in lexicon_words[idx]] @@ -226,14 +239,10 @@ def split_lexicon(data_folder, split_ratio): List containing the training, validation, and test split ratio. Set it to [80, 10, 10] for having 80% of material for training, 10% for valid, and 10 for test. - - Returns - ------- - None """ # Reading lexicon.csv lexicon_csv_path = os.path.join(data_folder, "lexicon.csv") - with open(lexicon_csv_path, "r") as f: + with open(lexicon_csv_path, newline="", encoding="utf-8") as f: lexicon_lines = f.readlines() # Remove header lexicon_lines = lexicon_lines[1:] @@ -251,16 +260,58 @@ def split_lexicon(data_folder, split_ratio): test_lines = [header] + lexicon_lines[tr_snts + valid_snts :] # Saving files - with open(os.path.join(data_folder, "lexicon_tr.csv"), "w") as f: + with open( + os.path.join(data_folder, "lexicon_tr.csv"), + "w", + newline="", + encoding="utf-8", + ) as f: f.writelines(train_lines) - with open(os.path.join(data_folder, "lexicon_dev.csv"), "w") as f: + with open( + os.path.join(data_folder, "lexicon_dev.csv"), + "w", + newline="", + encoding="utf-8", + ) as f: f.writelines(valid_lines) - with open(os.path.join(data_folder, "lexicon_test.csv"), "w") as f: + with open( + os.path.join(data_folder, "lexicon_test.csv"), + "w", + newline="", + encoding="utf-8", + ) as f: f.writelines(test_lines) +@dataclass +class LSRow: + snt_id: str + spk_id: str + duration: float + file_path: str + words: str + + +def process_line(wav_file, text_dict) -> LSRow: + snt_id = wav_file.split("/")[-1].replace(".flac", "") + spk_id = "-".join(snt_id.split("-")[0:2]) + wrds = text_dict[snt_id] + wrds = " ".join(wrds.split("_")) + + info = read_audio_info(wav_file) + duration = info.num_frames / info.sample_rate + + return LSRow( + snt_id=snt_id, + spk_id=spk_id, + duration=duration, + file_path=wav_file, + words=wrds, + ) + + def create_csv( - save_folder, wav_lst, text_dict, split, select_n_sentences, + data_folder, save_folder, wav_lst, text_dict, split, select_n_sentences ): """ Create the dataset csv file given a list of wav files. @@ -295,34 +346,30 @@ def create_csv( csv_lines = [["ID", "duration", "wav", "spk_id", "wrd"]] snt_cnt = 0 + line_processor = functools.partial(process_line, text_dict=text_dict) # Processing all the wav files in wav_lst - for wav_file in wav_lst: - - snt_id = wav_file.split("/")[-1].replace(".flac", "") - spk_id = "-".join(snt_id.split("-")[0:2]) - wrds = text_dict[snt_id] - - signal, fs = torchaudio.load(wav_file) - signal = signal.squeeze(0) - duration = signal.shape[0] / SAMPLERATE - + # FLAC metadata reading is already fast, so we set a high chunk size + # to limit main thread CPU bottlenecks + for row in parallel_map(line_processor, wav_lst, chunk_size=8192): csv_line = [ - snt_id, - str(duration), - wav_file, - spk_id, - str(" ".join(wrds.split("_"))), + row.snt_id, + str(row.duration), + # Replace data_folder with $data_root/ placeholder + row.file_path.replace(data_folder, "$data_root/"), + row.spk_id, + row.words, ] - - # Appending current file to the csv_lines list + # Appending current file to the csv_lines list csv_lines.append(csv_line) + snt_cnt = snt_cnt + 1 + # parallel_map guarantees element ordering so we're OK if snt_cnt == select_n_sentences: break # Writing the csv_lines - with open(csv_file, mode="w") as csv_f: + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: csv_writer = csv.writer( csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) @@ -344,7 +391,7 @@ def skip(splits, save_folder, conf): splits : list A list of the splits expected in the preparation. save_folder : str - The location of the seave directory + The location of the save directory conf : dict The configuration options to ensure they haven't changed. @@ -396,7 +443,7 @@ def text_to_dict(text_lst): text_dict = {} # Reading all the transcription files is text_lst for file in text_lst: - with open(file, "r") as f: + with open(file, encoding="utf-8") as f: # Reading all line of the transcription file for line in f: line_lst = line.strip().split(" ") @@ -410,9 +457,12 @@ def check_librispeech_folders(data_folder, splits): If it does not, an error is raised. - Returns - ------- - None + Arguments + --------- + data_folder : str + The path to the directory with the data. + splits : list + The portions of the data to check. Raises ------ @@ -428,3 +478,55 @@ def check_librispeech_folders(data_folder, splits): "Librispeech dataset)" % split_folder ) raise OSError(err_msg) + + +def download_librispeech_vocab_text(destination): + """Download librispeech vocab file and unpack it. + + Arguments + --------- + destination : str + Place to put vocab file. + """ + f = "librispeech-vocab.txt" + download_file(OPEN_SLR_11_LINK + f, destination) + + +def download_openslr_librispeech_lm(destination, rescoring_lm=True): + """Download openslr librispeech lm and unpack it. + + Arguments + --------- + destination : str + Place to put lm. + rescoring_lm : bool + Also download bigger 4grams model + """ + os.makedirs(destination, exist_ok=True) + for f in OPEN_SLR_11_NGRAM_MODELs: + if f.startswith("4") and not rescoring_lm: + continue + d = os.path.join(destination, f) + download_file(OPEN_SLR_11_LINK + f, d, unpack=True) + + +def download_sb_librispeech_lm(destination, rescoring_lm=True): + """Download sb librispeech lm and unpack it. + + Arguments + --------- + destination : str + Place to put lm. + rescoring_lm : bool + Also download bigger 4grams model + """ + os.makedirs(destination, exist_ok=True) + download_file( + "https://www.dropbox.com/scl/fi/3fkkdlliavhveb5n3nsow/3gram_lm.arpa?rlkey=jgdrluppfut1pjminf3l3y106&dl=1", + os.path.join(destination, "3-gram_sb.arpa"), + ) + if rescoring_lm: + download_file( + "https://www.dropbox.com/scl/fi/roz46ee0ah2lvy5csno4z/4gram_lm.arpa?rlkey=2wt8ozb1mqgde9h9n9rp2yppz&dl=1", + os.path.join(destination, "4-gram_sb.arpa"), + ) diff --git a/recipes/LibriSpeech/quantization/README.md b/recipes/LibriSpeech/quantization/README.md new file mode 100644 index 0000000000..1e27b7253d --- /dev/null +++ b/recipes/LibriSpeech/quantization/README.md @@ -0,0 +1,102 @@ +# Quantization + +This folder contains recipes for training K-means quantizers on the LibriSpeech dataset. +The quantizer maps self-supervised representations from wav2vec 2.0, HuBERT, WavLM, etc. into discrete representations. +These discrete representations can then be used as input features for downstream tasks such as ASR, ASV, TTS, etc. + +You can download LibriSpeech from http://www.openslr.org/12. + +--------------------------------------------------------------------------------------------------------- + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. +To do so, simply run the following command in your terminal: + +```shell +pip install -r extra_requirements.txt +``` + +--------------------------------------------------------------------------------------------------------- + +## Running an Experiment + +```shell +python train.py hparams/train_discrete_ssl.yaml --data_folder +``` + +Examples: + +```shell +python train.py hparams/train_discrete_ssl.yaml \ +--data_folder data/LibriSpeech \ +--ssl_hub facebook/wav2vec2-large \ +--n_clusters 1000 \ +--layer_id 7 \ +--experiment_name wav2vec2_K1000_L7 +``` + +```shell +python train.py hparams/train_discrete_ssl.yaml \ +--data_folder data/LibriSpeech \ +--ssl_hub facebook/hubert-large-ll60k \ +--n_clusters 1000 \ +--layer_id 7 \ +--experiment_name hubert_K1000_L7 +``` + +```shell +python train.py hparams/train_discrete_ssl.yaml \ +--data_folder data/LibriSpeech \ +--ssl_hub microsoft/wavlm-large \ +--n_clusters 1000 \ +--layer_id 7 \ +--experiment_name wavlm_K1000_L7 +``` + +--------------------------------------------------------------------------------------------------------- + +## Results + +The output folders with checkpoints and logs can be found [here](https://www.dropbox.com/sh/bk5qz0u1ppx15jk/AAAj23FI3AVKtfRKGvyHJYHza?dl=0). + +The checkpoints can be also found at [this](https://huggingface.co/speechbrain/SSL_Quantization) HuggingFace repository. + +**NOTE**: these logs and checkpoints were created using an earlier version of the code. + +--------------------------------------------------------------------------------------------------------- + +## About SpeechBrain + +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +--------------------------------------------------------------------------------------------------------- + +## Citing SpeechBrain + +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@article{speechbrainV1, + author = {Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca {Della Libera} and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Ha Nguyen and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Ga{{\"e}}lle Laperri{{\`e}}re and Mickael Rouvier and Renato De Mori and Yannick Est{{\`e}}ve}, + title = {Open-Source Conversational {AI} with {SpeechBrain} 1.0}, + journal = {Journal of Machine Learning Research}, + year = {2024}, + volume = {25}, + number = {333}, + pages = {1--11}, + url = {http://jmlr.org/papers/v25/24-0991.html} +} +``` + +```bibtex +@article{ravanelli2021speechbrain, + author = {Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + title = {{SpeechBrain}: A General-Purpose Speech Toolkit}, + journal = {arXiv preprint arXiv:2106.04624}, + year = {2021}, + url = {https://arxiv.org/abs/2106.04624}, +} +``` diff --git a/recipes/LibriSpeech/quantization/extra_requirements.txt b/recipes/LibriSpeech/quantization/extra_requirements.txt new file mode 100644 index 0000000000..d5e06028d8 --- /dev/null +++ b/recipes/LibriSpeech/quantization/extra_requirements.txt @@ -0,0 +1 @@ +scikit-learn diff --git a/recipes/LibriSpeech/quantization/hparams/train_discrete_ssl.yaml b/recipes/LibriSpeech/quantization/hparams/train_discrete_ssl.yaml new file mode 100644 index 0000000000..ea6ffc771a --- /dev/null +++ b/recipes/LibriSpeech/quantization/hparams/train_discrete_ssl.yaml @@ -0,0 +1,103 @@ +# ########################################################################################### +# Model: K-means applied to SSL model +# Authors: Luca Della Libera 2024 +# Adapted from: https://github.com/speechbrain/speechbrain/blob/v1.0.2/recipes/LJSpeech/quantization/hparams/train_discrete_ssl.yaml +# ########################################################################################### + +experiment_name: wav2vec2_K1000_L7 + +# Seed needs to be set at top of YAML +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Data preparation +data_folder: !PLACEHOLDER +train_splits: [train-clean-100] +dev_splits: [dev-clean] +test_splits: [test-clean] +skip_prep: False +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +test_csv: !ref /test-clean.csv + +# Output folders +output_folder: !ref results// +save_folder: !ref /save +cache_folder: !name:huggingface_hub.constants.HUGGINGFACE_HUB_CACHE + +# Preprocessing parameters +train_remove_if_longer: 60.0 # Seconds +valid_remove_if_longer: 60.0 # Seconds +test_remove_if_longer: 60.0 # Seconds +sorting: random + +# Training parameters +num_epochs: 1 +train_batch_size: 8 +valid_batch_size: 1 +test_batch_size: 1 +dataloader_workers: 4 +nonfinite_patience: 10 +precision: fp32 +ckpt_interval_steps: 4000 +keep_checkpoints: 2 + +# SSL model parameters +ssl_hub: facebook/wav2vec2-large +sample_rate: 16000 # NOTE: must match the SSL model sample rate +layer_id: 7 + +# Quantizer parameters +n_clusters: 1000 +init: k-means++ +max_iter: 100 +kmeans_batch_size: 10000 # Should be >= num_clusters +tol: 0.0 +max_no_improvement: 100 +n_init: 20 +reassignment_ratio: 0.0 + +# Modules +ssl_model: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: False + freeze: True + freeze_feature_extractor: True + output_all_hiddens: True + save_path: !ref + +quantizer: !new:speechbrain.integrations.audio_tokenizers.kmeans.MiniBatchKMeansSklearn + n_clusters: !ref + init: !ref + max_iter: !ref + batch_size: !ref + tol: !ref + max_no_improvement: !ref + n_init: !ref + reassignment_ratio: !ref + random_state: !ref + verbose: 1 + compute_labels: True + init_size: null + +modules: + ssl_model: !ref + quantizer: !ref + +# Counters, checkpointers, loggers, etc. +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + quantizer: !ref + counter: !ref + custom_load_hooks: + quantizer: !name:speechbrain.integrations.audio_tokenizers.kmeans.MiniBatchKMeansSklearn.load + custom_save_hooks: + quantizer: !name:speechbrain.integrations.audio_tokenizers.kmeans.MiniBatchKMeansSklearn.save + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref /train_log.txt + precision: 3 diff --git a/recipes/LibriSpeech/quantization/librispeech_prepare.py b/recipes/LibriSpeech/quantization/librispeech_prepare.py new file mode 120000 index 0000000000..a3126ec94a --- /dev/null +++ b/recipes/LibriSpeech/quantization/librispeech_prepare.py @@ -0,0 +1 @@ +../librispeech_prepare.py \ No newline at end of file diff --git a/recipes/LibriSpeech/quantization/train.py b/recipes/LibriSpeech/quantization/train.py new file mode 100644 index 0000000000..6ab20971dc --- /dev/null +++ b/recipes/LibriSpeech/quantization/train.py @@ -0,0 +1,269 @@ +#!/usr/bin/env/python + +"""Recipe for training a K-means quantizer on features from an SSL model. + +To run this recipe: +> python train.py hparams/train_discrete_ssl.yaml + +Authors + * Luca Della Libera 2024 +""" + +# Adapted from: +# https://github.com/speechbrain/speechbrain/blob/v1.0.2/recipes/LJSpeech/quantization/train.py + +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process + + +class Quantization(sb.Brain): + def compute_forward(self, batch, stage): + """Forward pass.""" + batch = batch.to(self.device) + sig, lens = batch.sig # [B, T] + + # Extract features + with torch.no_grad(): + self.modules.ssl_model.eval() + feats = self.modules.ssl_model(sig, lens) # [K, B, N, H] + feats = feats[self.hparams.layer_id] # [B, N, H] + + return feats + + def compute_objectives(self, predictions, batch, stage): + """Computes the objectives.""" + feats = predictions # [B, N, H] + + if stage != sb.Stage.TRAIN: + # For K-means the validation/test loss is the inertia + # The lower the inertia, the better should be the clustering + # It is useful to monitor progress across epochs + # However, when saving checkpoints we always keep the last one (i.e. max_keys=["epoch"]) + # to keep backward compatibility + loss = self.hparams.quantizer.inertia(feats) + return loss + + # If training, accumulate features (batch size used for K-means training + # should be much larger than batch size used for feature extraction) + feats = feats.flatten(end_dim=-2) # [BN, H] + self.curr_feats.append(feats) + self.curr_batch_size += len(feats) + if self.curr_batch_size < self.hparams.kmeans_batch_size: + # If not enough features, leave average loss unchanged and go to next batch + # avg_loss is computed as: (avg_loss - avg_loss / self.step) + float(loss) / self.step + # If we set loss = avg_loss, avg_loss stays unchanged + loss = torch.tensor(self.avg_train_loss) + # Keep compatibility with standard supervised training + # (SpeechBrain expects a tensor with gradient) + loss.requires_grad_() + return loss + self.curr_feats = torch.cat(self.curr_feats) + feats = self.curr_feats[: self.hparams.kmeans_batch_size] + + # Keep remaining features for next iteration + self.curr_feats = [self.curr_feats[self.hparams.kmeans_batch_size :]] + self.curr_batch_size = len(self.curr_feats[0]) + + # Retrieve current centroids + old_cluster_centers = self.hparams.quantizer.cluster_centers + + # Partial fit on current batch + self.hparams.quantizer.partial_fit(feats) + + # For K-means the training loss is the drift between current centroids and old centroids + # If close to 0, it means that the training has converged + curr_cluster_centers = self.hparams.quantizer.cluster_centers + loss = (curr_cluster_centers - old_cluster_centers).norm() + + # Keep compatibility with standard supervised training + # (SpeechBrain expects a tensor with gradient) + loss.requires_grad_() + self.optimizer_step += 1 + assert self.optimizer_step == self.modules.quantizer.n_steps, ( + f"optimizer_step: {self.optimizer_step}", + f"quantizer.n_steps: {self.modules.quantizer.n_steps}", + ) + + return loss + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch.""" + if stage == sb.Stage.TRAIN: + # NOTE: not included in intra-epoch checkpoints + self.curr_feats = [] + self.curr_batch_size = 0 + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of each epoch.""" + # Compute/store important stats + current_epoch = self.hparams.epoch_counter.current + stage_stats = {"loss": stage_loss} + + if stage == sb.Stage.TRAIN: + self.avg_train_loss = 0.0 + self.train_stats = stage_stats + self.stats_meta = {"epoch": epoch, "steps": self.optimizer_step} + if if_main_process(): + self.checkpointer.save_and_keep_only( + meta={"loss": stage_stats["loss"], "epoch": epoch}, + max_keys=["epoch"], + num_to_keep=self.hparams.keep_checkpoints, + ) + self.hparams.train_logger.log_stats( + stats_meta=self.stats_meta, + train_stats=self.train_stats, + ) + + # Perform end-of-iteration operations, like annealing, logging, etc. + elif stage == sb.Stage.VALID: + self.hparams.train_logger.log_stats( + stats_meta=self.stats_meta, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": current_epoch}, + test_stats=stage_stats, + ) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + + """ + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"DATA_ROOT": hparams["data_folder"]}, + ) + # Sort training data to speed up training + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=hparams["sorting"] == "descending", + key_max_value={"duration": hparams["train_remove_if_longer"]}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"DATA_ROOT": hparams["data_folder"]}, + ) + # Sort validation data to speed up validation + valid_data = valid_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["valid_remove_if_longer"]}, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_csv"], + replacements={"DATA_ROOT": hparams["data_folder"]}, + ) + # Sort the test data to speed up testing + test_data = test_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["test_remove_if_longer"]}, + ) + + datasets = [train_data, valid_data, test_data] + + # Define audio pipeline + takes = ["wav"] + provides = ["sig"] + + def audio_pipeline(wav): + original_sample_rate = sb.dataio.dataio.read_audio_info(wav).sample_rate + sig = sb.dataio.dataio.read_audio(wav) + sig = torchaudio.functional.resample( + sig, original_sample_rate, hparams["sample_rate"] + ) + yield sig + + sb.dataio.dataset.add_dynamic_item( + datasets, audio_pipeline, takes, provides + ) + + # Set output + sb.dataio.dataset.set_output_keys(datasets, ["id"] + provides) + + return datasets + + +if __name__ == "__main__": + # Command-line interface + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then create ddp_init_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Prepare data + from librispeech_prepare import prepare_librispeech + + kwargs = { + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + } + prepare_librispeech(**kwargs) + + # Create the datasets objects + train_data, valid_data, test_data = dataio_prepare(hparams) + + # Trainer initialization + brain = Quantization( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Train + brain.fit( + brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=dict( + num_workers=hparams["dataloader_workers"], + batch_size=hparams["train_batch_size"], + shuffle=hparams["sorting"] == "random", + pin_memory=run_opts.get("device", "cpu") != "cpu", + ), + valid_loader_kwargs=dict( + num_workers=hparams["dataloader_workers"], + batch_size=hparams["valid_batch_size"], + pin_memory=run_opts.get("device", "cpu") != "cpu", + ), + ) + + # Test + brain.evaluate( + test_data, + max_key="epoch", + test_loader_kwargs=dict( + num_workers=hparams["dataloader_workers"], + batch_size=hparams["test_batch_size"], + pin_memory=run_opts.get("device", "cpu") != "cpu", + ), + ) diff --git a/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/README.md b/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/README.md new file mode 100644 index 0000000000..a8c02ed282 --- /dev/null +++ b/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/README.md @@ -0,0 +1,27 @@ +# BEST-RQ streaming and offline pretraining with SpeechBrain + +This folder contains the scripts to train a BEST-RQ model using LibriSpeech. It can be adapted to any dataset as long as you provide the csv or json files. No other adaptation will be required apart from controlling the sequence length and Dynamic Batching arguments to avoid out of memory issues. + +More information on the architecture can be found in [the original paper](https://arxiv.org/pdf/2202.01855.). + +# Go ! +Simply type: +```shell +# single GPU example +python train.py hparams/BEST-RQ.yaml --data_folder /path/to/LibriSpeech/ --streaming True + +# single node multi GPU example +torchrun --rdzv-backend=c10d --rdzv-endpoint=localhost:0 --nnodes=1 --nproc-per-node=2 train.py hparams/BEST-RQ.yaml --data_folder /path/to/LibriSpeech/ --streaming True +``` + +Do not forget to replace the `!PLACEHOLDER` variables in the yaml corresponding to your local configuration. + +# Tips and tricks +We found that the following parameters can greatly affect downstream performance: +- Batch size (the bigger the better depending on the dataset of interest) +- learning rate (`lr`, depending on the batch size) +- mask probability (`mask_prob` which may need to be adapted to the audio source) + +# Finetuning after pretraining +For speech recognition finetuning, simply head to the [ASR / CTC](https://github.com/speechbrain/speechbrain/tree/develop/recipes/LibriSpeech/ASR/CTC) recipe and use train_with_bestrq.py! Numbers should be equivalent to the paper. + diff --git a/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/hparams/BEST-RQ.yaml b/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/hparams/BEST-RQ.yaml new file mode 100644 index 0000000000..33aae88add --- /dev/null +++ b/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/hparams/BEST-RQ.yaml @@ -0,0 +1,195 @@ +# ############################################################################ +# Model: Best-RQ +# Encoder: Conformer Encoder w/Random Projection Quantizer +# Training: Librispeech 960h +# Authors: Ryan Whetten 2024 +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 1000 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +data_folder: !PLACEHOLDER # /path/to/LibriSpeech/ +output_folder: !ref results/ +save_folder: !ref /save +# Logging file for every N optimizer steps (many lines) +train_steps_log: !ref /train_steps_log.txt +# Logging file per epoch +train_stage_log: !ref /train_stage_log.txt + +train_splits: ["train-clean-100", "train-clean-360", "train-other-500"] +dev_splits: ["dev-clean"] +test_splits: ["test-clean"] +train_csv: !ref /train.csv +valid_csv: !ref /dev-clean.csv +skip_prep: False + +avoid_if_longer_than: 60.0 +avoid_if_shorter_than: 2.0 +log_interval: 500 # Logging every N optimizer steps +max_grad_norm: 10 +precision: bf16 # bf16, fp16 or fp32 + +# The training will either stops at number_of_epochs or optimizer_step_limit +# I.e. the first that is reached. +number_of_epochs: 3000 +optimizer_step_limit: 300000 + +# Ideally you want the total batch to be around 2 hours. So you must do +# seconds_per_batch * nb_gpus * grad_accumulation factor = ~7200 seconds. +# If your dataset is bigger, we recommend increasing this batch size to 3 to 4 +# hours. +max_batch_len: 400 +train_num_buckets: 200 +grad_accumulation_factor: 2 + +train_dataloader_options: + num_workers: 4 + +test_dataloader_options: + batch_size: 8 # DynamicBatching not used at testing time + num_workers: 4 + +lr: 0.0008 + +# Mel-Filterbank parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 +hop_length: 10 + +# BEST RQ quantiser has a special downsampling mechanism. +# convolutions reduction dim by 4 in the time domain +# so the input to quantizer also needs to reduce dim by 4 +pad_to_divisible_by: 4 + +# Streaming & dynamic chunk training options +# At least for the current architecture on LibriSpeech, we found out that +# non-streaming accuracy is very similar between `streaming: True` and +# `streaming: False`. +streaming: True # controls all Dynamic Chunk Training & chunk size & left context mechanisms + +# Configuration for Dynamic Chunk Training. +dynchunktrain_config_sampler: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler # yamllint disable-line rule:line-length + chunkwise_prob: 0.6 # Probability during a batch to limit attention and sample a random chunk size in the following range + chunk_size_min: 8 # Minimum chunk size (if in a DynChunkTrain batch) + chunk_size_max: 32 # Maximum chunk size (if in a DynChunkTrain batch) + limited_left_context_prob: 0.75 # If in a DynChunkTrain batch, the probability during a batch to restrict left context to a random number of chunks + left_context_chunks_min: 2 # Minimum left context size (in # of chunks) + left_context_chunks_max: 32 # Maximum left context size (in # of chunks) + valid_config: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig + chunk_size: 8 + left_context_size: 16 +####################### Model parameters ########################### +# Transformer +d_model: 576 +nhead: 8 +num_encoder_layers: 12 +num_decoder_layers: 0 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 2048 +encoder_layerdrop: 0.0 + +# Masking parameters +mask_length: 4 +mask_prob: 0.15 +noise_mean: 0 +noise_std: 0.1 + +# quantizer (codebook = cb) parameters +p_input: 320 +cb_dim: 16 +cb_vocab: 8192 + + +############################## Models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (128, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + conformer_activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + layerdrop_prob: !ref + +# We must call an encoder wrapper so the decoder isn't run (we don't have any) +wrapper: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper + transformer: !ref + +Quantizer: !new:speechbrain.nnet.quantisers.RandomProjectionQuantizer + input_dim: !ref + cb_dim: !ref + cb_vocab: !ref + +linear: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + wrapper: !ref + Quantizer: !ref + normalize: !ref + linear: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +optimizer: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + weight_decay: 0.01 + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + +############################## running ################################ + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + + +train_steps_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +train_stage_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalize: !ref + counter: !ref + quantizer: !ref + linear: !ref diff --git a/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/librispeech_prepare.py b/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/librispeech_prepare.py new file mode 120000 index 0000000000..cf4adfd790 --- /dev/null +++ b/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/librispeech_prepare.py @@ -0,0 +1 @@ +../../librispeech_prepare.py \ No newline at end of file diff --git a/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/train.py b/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/train.py new file mode 100644 index 0000000000..a1f642317d --- /dev/null +++ b/recipes/LibriSpeech/self-supervised-learning/BEST-RQ/train.py @@ -0,0 +1,351 @@ +#!/usr/bin/env python3 +"""Recipe for pretraining Best-RQ (https://arxiv.org/pdf/2405.04296) + +To run this recipe call python train.py BEST-RQ.yaml --find_unused_parameters + +Authors: + * Ryan Whetten, 2023 + * Jarod Duret, 2024 +""" + +import sys +import time +from functools import partial + +import torch +import torch.nn.functional as F +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio.dataloader import SaveableDataLoader +from speechbrain.dataio.sampler import DynamicBatchSampler +from speechbrain.lobes.models.BESTRQ import brq_mask_collate_fn +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +class BestRQBrain(sb.core.Brain): + def compute_forward(self, batch, stage): + """Computes forward pass through BestRQ model and returns encoded and + target embeddings as well as other metrics of interest. + """ + + if self.hparams.streaming: + dynchunktrain_config = self.hparams.dynchunktrain_config_sampler( + stage + ) + else: + dynchunktrain_config = None + + # get batch and mask + wavs, wav_lens, mask = batch + wavs, wav_lens, mask = ( + wavs.to(self.device), + wav_lens.to(self.device), + mask.to(self.device), + ) + + ### get fbanks and normalize + feats = self.hparams.compute_features(wavs) + current_epoch = self.hparams.epoch_counter.current + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + ### augment data if necessary + if stage == sb.Stage.TRAIN: + if hasattr(self.hparams, "augmentation"): + feats = self.hparams.augmentation(feats) + + divis_by = self.hparams.pad_to_divisible_by + feats = pad_feats(feats, divis_by) + + # get targets from quantizer and stack the frames! + mask_idx = mask[::4] // 4 + B, T, C = feats.shape + targets = self.modules.Quantizer( + feats.view(B, feats.shape[1] // divis_by, -1)[:, mask_idx, :] + ) + + # generate random noise + noise = torch.normal( + mean=self.hparams.noise_mean, + std=self.hparams.noise_std, + size=(B, mask.shape[0], C), + device=self.device, + ) + # replace with random noise + feats[:, mask, :] = noise + + #### convolutions + src = self.modules.CNN(feats) + + ##### transformer + enc_out = self.modules.wrapper( + src, wav_lens, dynchunktrain_config=dynchunktrain_config + ) # only use encoder + + ##### linear + logits = self.modules.linear(enc_out) + + ##### get masked region for loss computation only over these. + logits = logits[:, mask_idx, :] + + B, T, C = logits.shape + return logits.view(B * T, C), targets.view(B * T) + + def compute_objectives(self, predictions, batch, stage): + pred, targets = predictions + + if stage != sb.Stage.TRAIN and sb.utils.distributed.if_main_process(): + predicted_classes = torch.argmax(pred, dim=-1) + correct_predictions = predicted_classes == targets + accuracy = correct_predictions.sum().item() / len( + correct_predictions + ) + self.acc_metric.append(accuracy) + + return F.cross_entropy(pred, targets) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """Called after fit_batch(), updates learning rate and does per-step logging.""" + + if should_step: + self.hparams.noam_annealing(self.optimizer) + + # Perform step-wise logging + if ( + hasattr(self.hparams, "log_interval") + and self.optimizer_step % self.hparams.log_interval == 0 + ): + # Create a dictionary and fill it with everything we + # want to log such as contrastive loss, diversity loss, + # learning rate etc. + log_dct = {} + + current_lr = self.optimizer.param_groups[0]["lr"] + log_dct["steps"] = self.optimizer_step + log_dct["lr"] = current_lr + log_dct["avg_loss"] = self.avg_train_loss + + if hasattr(self, "time_last_log"): + run_time_since_last_log = time.time() - self.time_last_log + log_dct["run_time"] = run_time_since_last_log + self.time_last_log = time.time() + + if sb.utils.distributed.if_main_process(): + self.hparams.train_steps_logger.log_stats( + stats_meta=log_dct, + ) + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.acc_metric = [] + + def on_stage_end(self, stage, stage_loss, epoch=None): + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + + if stage == sb.Stage.VALID: + if self.acc_metric: + stage_stats["accuracy"] = sum(self.acc_metric) / len( + self.acc_metric + ) + + self.hparams.train_stage_logger.log_stats( + stats_meta={ + "epoch": epoch, + "steps": self.optimizer_step, + "lr": self.optimizer.param_groups[0]["lr"], + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + + self.checkpointer.save_and_keep_only( + end_of_epoch=True, + num_to_keep=3, + meta={ + "valid_loss": stage_loss, + "epoch": epoch, + "steps": self.optimizer_step, + **stage_stats, + }, + ) + + +def pad_feats(feats, divis_by): + """BEST-RQ quantizer stackes frames together. Hence, we need to pad the + incoming features such that the time dimension is divisible by divis_by. + + Arguments + --------- + feats: torch.Tensor + The feature tensor. + divis_by: int + The stacking factor. The time dimension of feats will become divisible + by this value. + + Returns + ------- + Padded features + """ + + B, T, C = feats.shape + + #### pad features to enable a reduction by pad_to_divisible_by for the + # quantiser of BEST-RQ + current_dim_size = T + dim_to_pad = 1 # Pad along the second dimension (i.e. time) + + # Calculate the amount of padding needed to make the tensor divisible + # by divis_by + current_dim_size = feats.shape[dim_to_pad] + # Ensure positive padding + padding_needed = (divis_by - (current_dim_size % divis_by)) % divis_by + + # Define the padding + # Initialize padding for all dimensions, have a look at the documentation of + # torch.nn.functional.pad because the padding argument is quite special. + padding = [0, 0, 0, 0, 0, 0] + padding[dim_to_pad * 2] = ( + padding_needed # Set padding for the chosen dimension + ) + + # add in padding to features and mask + return torch.nn.functional.pad(feats, padding) + + +def dataio_prepare(hparams): + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + # We remove longer and shorter files from the train. + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_shorter_than"]}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + + datasets = [train_data, valid_data] + + def get_output_lengths(input_lengths): + """Function to get the output length of the feature extractor this is + necessary to compute the masks of BestRQ. + """ + sr = hparams["sample_rate"] + hop_length = hparams["hop_length"] + + return (input_lengths // (sr * hop_length / 1000) + 1).to(torch.long) + + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + sb.dataio.dataset.set_output_keys(datasets, ["id", "sig"]) + + # We create the DynamicBatch Sampler + train_sampler = DynamicBatchSampler( + train_data, + hparams["max_batch_len"], + num_buckets=hparams["train_num_buckets"], + length_func=lambda x: x["duration"], + batch_ordering="random", + shuffle=True, + ) + + # We define the custom collation function that is necessary for best-rq to + # generate masks. + brq_mask_collate_fn_partial = partial( + brq_mask_collate_fn, + get_out_len_fn=get_output_lengths, + mask_prob=hparams["mask_prob"], + mask_length=hparams["mask_length"], + n_mels=hparams["n_mels"], + ) + + train_loader_kwargs = { + "batch_sampler": train_sampler, + "collate_fn": brq_mask_collate_fn_partial, + "num_workers": hparams["train_dataloader_options"]["num_workers"], + "pin_memory": True, + } + + valid_loader = SaveableDataLoader( + valid_data, + collate_fn=brq_mask_collate_fn_partial, + num_workers=hparams["test_dataloader_options"]["num_workers"], + batch_size=hparams["test_dataloader_options"]["batch_size"], + pin_memory=True, + ) + + return train_data, valid_loader, train_loader_kwargs + + +def main(): + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + hparams.update(run_opts) + + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + from librispeech_prepare import prepare_librispeech + + run_on_main( + prepare_librispeech, + kwargs={ + "data_folder": hparams["data_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "save_folder": hparams["output_folder"], + "merge_lst": hparams["train_splits"], + "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], + }, + ) + + # Part that matters starts here. + train_dataset, valid_loader, train_loader_kwargs = dataio_prepare(hparams) + + brain = BestRQBrain( + modules=hparams["modules"], + opt_class=hparams["optimizer"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + # with torch.autograd.detect_anomaly(): + brain.fit( + brain.hparams.epoch_counter, + train_dataset, + valid_loader, + train_loader_kwargs=train_loader_kwargs, + progressbar=True, + ) + + +if __name__ == "__main__": + main() diff --git a/recipes/LibriSpeech/self-supervised-learning/wav2vec2/README.md b/recipes/LibriSpeech/self-supervised-learning/wav2vec2/README.md index 0c488a9bbb..7df70416fc 100644 --- a/recipes/LibriSpeech/self-supervised-learning/wav2vec2/README.md +++ b/recipes/LibriSpeech/self-supervised-learning/wav2vec2/README.md @@ -6,28 +6,30 @@ This folder contains the scripts to train a wav2vec2 based system using LibriSpe As usual, our goal at SpeechBrain remains to offer as much flexibility to the user as possible. Hence, wav2vec2 pretraining can be achieved in two different ways: fully with SpeechBrain, or following our HuggingFace interface. Both approaches give similar results. Indeed we tested both with a BASE model pretrained on LibriSpeech and fine-tuned on LibriSpeech for ASR, IEMOCAP for emotion recognition and VoxCeleb 1 for speaker identification. Therefore, it is up to the user to decide what training scheme he/she wish to follow. A full SpeechBrain training offers a unique flexibility for further research (e.g. changing the loss, changing the architecture, modifying absolutely everything with wav2vec2), while the HuggingFace pretraining offers a good interfacing with the transformers library. -**On LibriSpeech, we officialy provide only a fully SpeechBrain recipe. If you wish to use the HuggingFace pretraining, please go to our [CommonVoice recipe](https://github.com/speechbrain/speechbrain/tree/develop/recipes/CommonVoice/self-supervised-learning/wav2vec2)** +**On LibriSpeech, we officially provide only a fully SpeechBrain recipe. If you wish to use the HuggingFace pretraining, please go to our [CommonVoice recipe](https://github.com/speechbrain/speechbrain/tree/develop/recipes/CommonVoice/self-supervised-learning/wav2vec2)** # Go ! Simply type: -`python train_sb_wav2vec2.py hparams/wav2vec2_base.yaml` +```shell +python train_sb_wav2vec2.py hparams/wav2vec2_base.yaml +``` Do not forget to replace the `!PLACEHOLDER` variables in the yaml corresponding to your local configuration. # Use a pretrained model for fine-tuning with SpeechBrain -The checkpoint generated by this pretraining is a standard PyTorch checkpoint. If you wish to use it as any pretrained model, simply follow our [fine-tuning exemple using our dedicated Pretrainer](#). +The checkpoint generated by this pretraining is a standard PyTorch checkpoint. If you wish to use it as any pretrained model, simply follow our [fine-tuning example using our dedicated Pretrainer](#). # Results | Release | Hyperparams file | Pre-Training Dataset | Finetuning Dataset | WER | HuggingFace link | Full model link | GPUs | |:-------------:|:---------------------------:|:---------------------------:| :-----:| :-----:| :-----:| :-----:| :--------:| -| 22-09-22 | wav2vec2_base.yaml | LibriSpeech 960h | LibriSpeech 100h | 7.X (LibriSpeech test-clean) | [Link](https://huggingface.co/speechbrain/ssl-wav2vec2-base-librispeech) | [Link](https://drive.google.com/drive/folders/1eXA6HQtiKfgrPejvvoKvRRfTEvOI3BQt?usp=sharing) | 16xTesla V100 32GB | +| 22-09-22 | wav2vec2_base.yaml | LibriSpeech 960h | LibriSpeech 100h | 7.X (LibriSpeech test-clean) | [Link](https://huggingface.co/speechbrain/ssl-wav2vec2-base-librispeech) | [Link](https://www.dropbox.com/sh/y88z33qtgbl49k4/AAAcVxaBjTh5W_HH99D5UKmka?dl=0) | 16xTesla V100 32GB | # Advices Training wav2vec 2.0 models is crazy w.r.t compute resources. For instance, this recipe only trains a BASE wav2vec 2.0 architecture, and it already requires 16 Tesla V100 for 7 days. Of course, you can scale this to your needs (e.g., you can work with 2 GPUs only), but it will take ages! Welcome to the wav2vec 2.0 world! Here is a list of the most important advices: -- To train w2v2 models, it is **extremely** important to have an effective batch size as high as possible. For instance, the original BASE model is trained with batches containing 1.6H of speech. This means that (duration_per_minibatch * nb_gpu * gradient_accumulation) must be at least equal to 1.6H. +- To train w2v2 models, it is **extremely** important to have an effective batch size as high as possible. For instance, the original BASE model is trained with batches containing 1.6H of speech. This means that (duration_per_minibatch * nb_gpu * grad_accumulation_factor) must be at least equal to 1.6H. - Do not train on sequences longer than 20s, this will blow your VRAM up and is useless for now. Indeed training with shorter sentences (10s) may work just as well. - Set the `n_warmup_steps` steps in such a way that it corresponds to 10% of the total training steps. The number of steps correspond to the actual number of call to .backward w.r.t the batch size. diff --git a/recipes/LibriSpeech/self-supervised-learning/wav2vec2/hparams/wav2vec2_base.yaml b/recipes/LibriSpeech/self-supervised-learning/wav2vec2/hparams/wav2vec2_base.yaml index 0fa622217f..13ce0d2203 100644 --- a/recipes/LibriSpeech/self-supervised-learning/wav2vec2/hparams/wav2vec2_base.yaml +++ b/recipes/LibriSpeech/self-supervised-learning/wav2vec2/hparams/wav2vec2_base.yaml @@ -21,7 +21,7 @@ skip_prep: False avoid_if_longer_than: 30.0 avoid_if_shorter_than: 1.5 log_interval: 1000 # Logging every N optimizer steps -auto_mix_prec: True +precision: fp16 # bf16, fp16 or fp32 max_grad_norm: 100. # The training will either stops at number_of_epochs or optimizer_step_limit @@ -30,8 +30,16 @@ number_of_epochs: 3000 optimizer_step_limit: 400000 # Dynamic Batching parameters -train_num_buckets: 70 -seconds_per_batch: 200 # Fits in a 32GB GPUs (V100) +max_batch_length: 200 # Fits in a 32GB GPUs (V100) +num_buckets: 70 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref train_dataloader_options: num_workers: 4 @@ -40,7 +48,7 @@ test_dataloader_options: batch_size: 8 # DynamicBatching not used at testing time num_workers: 4 -# Training parameters +####################### Training Parameters #################################### lr: 0.0005 warmup: 30000 # This is equivalent to optimizer_step_limit - warmup @@ -55,7 +63,7 @@ mask_prob: 0.65 mask_length: 10 num_negatives: 100 -# Model parameters +####################### Model Parameters ####################################### embedding_dim: 768 extractor_dim: 512 final_dim: 256 diff --git a/recipes/LibriSpeech/self-supervised-learning/wav2vec2/train_sb_wav2vec2.py b/recipes/LibriSpeech/self-supervised-learning/wav2vec2/train_sb_wav2vec2.py index d4f97de8e7..bec70ccdea 100644 --- a/recipes/LibriSpeech/self-supervised-learning/wav2vec2/train_sb_wav2vec2.py +++ b/recipes/LibriSpeech/self-supervised-learning/wav2vec2/train_sb_wav2vec2.py @@ -4,7 +4,7 @@ See the readme of the recipe for advices on the pretraining that may appear a bit challenging depending on your available resources. -To run this recipe call python train.py hparams/train_wav2vec.yaml --find_unused_parameters --max_grad_norm 0.0 +To run this recipe call python train_sb_wav2vec2.py hparams/wav2vec2_base.yaml --find_unused_parameters --max_grad_norm 0.0 Authors * Rudolf Braun 2022 @@ -12,25 +12,27 @@ * Titouan Parcollet 2022 """ -import logging import sys import time from functools import partial -import speechbrain as sb import torch import torch.nn.functional as F -from torch.nn.parallel import DistributedDataParallel from hyperpyyaml import load_hyperpyyaml +from torch.nn.parallel import DistributedDataParallel +import speechbrain as sb from speechbrain import Stage -from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataloader import SaveableDataLoader from speechbrain.dataio.sampler import DynamicBatchSampler -from speechbrain.lobes.models.wav2vec import w2v_mask_collate_fn -from speechbrain.lobes.models.wav2vec import sample_negatives +from speechbrain.lobes.models.wav2vec import ( + sample_negatives, + w2v_mask_collate_fn, +) +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class W2V2Brain(sb.core.Brain): @@ -46,13 +48,15 @@ def compute_forward(self, batch, stage): ) batch_size = wavs.size(0) - # Mormalisation already done in dataloader + # Normalisation already done in dataloader # 1. Go through features extractor latents = self.modules.latent_extractor(wavs, normalize_signal=False) # 2. Go through latent (Transformer). results = self.modules.latent_encoder( - latents, mask=mask, wav_lens=wav_lens, + latents, + mask=mask, + wav_lens=wav_lens, ) embeddings = results["embeddings"] @@ -73,8 +77,7 @@ def compute_forward(self, batch, stage): return results def compute_objectives(self, forward_outputs, batch, stage): - """Samples negatives, computes contrastive loss and accuracy. - """ + """Samples negatives, computes contrastive loss and accuracy.""" embeddings = forward_outputs["embeddings"] targets = forward_outputs["targets"] @@ -84,7 +87,7 @@ def compute_objectives(self, forward_outputs, batch, stage): loss, accuracy = self.hparams.loss(embeddings, targets, negs) # This is only used for logging purpose - if stage != sb.Stage.TRAIN and sb.utils.distributed.if_main_process(): + if stage != sb.Stage.TRAIN: self.acc_metric.append(accuracy) objectives = { @@ -120,53 +123,30 @@ def compute_objectives(self, forward_outputs, batch, stage): return objectives def fit_batch(self, batch): - should_step = self.step % self.grad_accumulation_factor == 0 + should_step = (self.step % self.grad_accumulation_factor) == 0 + # Managing automatic mixed precision - if self.auto_mix_prec: - with self.no_sync(not should_step): - with torch.cuda.amp.autocast(): - outputs = self.compute_forward(batch, Stage.TRAIN) - objectives = self.compute_objectives( - outputs, batch, Stage.TRAIN - ) - - self.scaler.scale( - objectives["backprop_loss"] / self.grad_accumulation_factor - ).backward() - - objectives["total_loss"] = objectives["backprop_loss"].detach() - if should_step: - self.scaler.unscale_(self.optimizer) - if self.check_gradients(objectives["backprop_loss"]): - self.scaler.step(self.optimizer) - self.optimizer.zero_grad() - self.optimizer_step += 1 - self.scaler.update() - else: - with self.no_sync(not should_step): + with self.no_sync(not should_step): + with self.training_ctx: outputs = self.compute_forward(batch, Stage.TRAIN) objectives = self.compute_objectives( outputs, batch, Stage.TRAIN ) - ( - objectives["backprop_loss"] / self.grad_accumulation_factor - ).backward() - objectives["total_loss"] = objectives["backprop_loss"].detach() + self.scaler.scale( + objectives["backprop_loss"] / self.grad_accumulation_factor + ).backward() - if should_step: - if self.check_gradients(objectives["backprop_loss"]): - self.optimizer.step() - self.optimizer.zero_grad() - self.optimizer_step += 1 + objectives["total_loss"] = objectives["backprop_loss"].detach() if should_step: + self.optimizers_step() self.on_fit_batch_end(objectives) return objectives["backprop_loss"].detach() def on_fit_batch_end(self, objectives): - """ Called after fit_batch(), updates learning rate and does per-step logging. """ + """Called after fit_batch(), updates learning rate and does per-step logging.""" if isinstance(self.modules.target_quantiser, DistributedDataParallel): w2v_model = self.modules.target_quantiser.module else: @@ -181,7 +161,6 @@ def on_fit_batch_end(self, objectives): hasattr(self.hparams, "log_interval") and self.optimizer_step % self.hparams.log_interval == 0 ): - # Create a dictionary and fill it with everything we # want to log such as contrastive loss, diversity loss, # learning rate etc. @@ -200,10 +179,12 @@ def on_fit_batch_end(self, objectives): self.time_last_log = time.time() if sb.utils.distributed.if_main_process(): - self.hparams.train_steps_logger.log_stats(stats_meta=log_dct,) + self.hparams.train_steps_logger.log_stats( + stats_meta=log_dct, + ) def evaluate_batch(self, batch, stage): - """ Returns accuracy on contrastive objective. """ + """Returns accuracy on contrastive objective.""" out = self.compute_forward(batch, stage=stage) objectives = self.compute_objectives(out, batch, stage=stage) return objectives["backprop_loss"].detach().cpu() @@ -214,13 +195,11 @@ def on_stage_start(self, stage, epoch): self.acc_metric = [] def on_stage_end(self, stage, stage_loss, epoch=None): - stage_stats = {"loss": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats if stage == sb.Stage.VALID: - print(self.acc_metric) stage_stats["accuracy"] = sum(self.acc_metric) / len( self.acc_metric ) @@ -246,7 +225,8 @@ def dataio_prepare(hparams): data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) # We remove longer and shorter files from the train. @@ -257,14 +237,15 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) datasets = [train_data, valid_data] def get_output_lengths(input_lengths): - """ Function to get the output length of the feature extractor this is - necessery to compute the masks of wav2vec2. + """Function to get the output length of the feature extractor this is + necessary to compute the masks of wav2vec2. """ def _conv_out_length(input_length, kernel_size, stride): @@ -292,13 +273,12 @@ def audio_pipeline(wav): sb.dataio.dataset.set_output_keys(datasets, ["id", "sig"]) # We create the DynamicBatch Sampler + dynamic_hparams = hparams["dynamic_batch_sampler_train"] + train_sampler = DynamicBatchSampler( train_data, - hparams["seconds_per_batch"], - num_buckets=hparams["train_num_buckets"], + **dynamic_hparams, length_func=lambda x: x["duration"], - batch_ordering="random", - shuffle=True, ) # We define the custom collation function that is necessary for w2v2 to @@ -329,12 +309,11 @@ def audio_pipeline(wav): def main(): - logger.setLevel(logging.INFO) hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) sb.utils.distributed.ddp_init_group(run_opts) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) hparams.update(run_opts) @@ -344,6 +323,10 @@ def main(): overrides=overrides, ) + # Update precision to bf16 if the device is CPU and precision is fp16 + if run_opts.get("device") == "cpu" and hparams.get("precision") == "fp16": + hparams["precision"] = "bf16" + from librispeech_prepare import prepare_librispeech run_on_main( @@ -376,7 +359,6 @@ def main(): train_dataset, valid_loader, train_loader_kwargs=train_loader_kwargs, - progressbar=False, ) diff --git a/recipes/LibriTTS/README.md b/recipes/LibriTTS/README.md index 41b876aa83..4fd7667e24 100644 --- a/recipes/LibriTTS/README.md +++ b/recipes/LibriTTS/README.md @@ -6,21 +6,44 @@ The LibriTTS dataset is available here: https://www.openslr.org/60/, https://www The `libritts_prepare.py` file automatically downloads the dataset if not present and has facilities to provide the names of the subsets to be downloaded. +# Zero-Shot Multi-Speaker Tacotron2 +The subfolder "TTS/mstacotron2" contains the recipe for training a zero-shot multi-speaker version of the [Tacotron2](https://arxiv.org/abs/1712.05884) model. +To run this recipe, go into the `"TTS/mstacotron2"` folder and run: + +```bash +python train.py hparams/train.yaml --data_folder=/path/to/libritts_data --device=cuda:0 --max_grad_norm=1.0 +``` + +Please ensure that you use absolute paths when specifying the data folder. + +Training time required on NVIDIA A100 GPU using LibriTTS train-clean-100 and train-clean-360 subsets: ~ 2 hours 54 minutes per epoch + +The training logs are available [here](https://www.dropbox.com/sh/ti2vk7sce8f9fgd/AABcDGWCrBvLX_ZQs76mlJRYa?dl=0). + +For now, enhancements are needed for training the model from scratch when train-clean-360 is included. Inference can be effectuated with `clone_voice_char_input` function in the MSTacotron2 interface. + +The pre-trained model (a model fine-tuned from LJSpeech tacotron2) with an easy-inference interface is available on [HuggingFace](https://huggingface.co/speechbrain/tts-mstacotron2-libritts). + +**Please Note**: The current model effectively captures speaker identities. Nevertheless, the synthesized speech quality exhibits some metallic characteristics and may include artifacts like overly long pauses. +We are actively working to enhancing the model and will release updates as soon as improvements are achieved. We warmly welcome contributions from the community to collaboratively make the model even better! + # HiFi GAN (Vocoder) -The subfolder "vocoder/hifi_gan/" contains the [HiFi GAN vocoder](https://arxiv.org/pdf/2010.05646.pdf). +The subfolder "vocoder/hifigan/" contains the [HiFi GAN vocoder](https://arxiv.org/pdf/2010.05646.pdf). The vocoder is a neural network that converts a spectrogram into a waveform (it can be used on top of Tacotron2). We suggest using `tensorboard_logger` by setting `use_tensorboard: True` in the yaml file. Thus, `Tensorboard` should be installed. To run this recipe, go into the `"vocoder/hifigan/"` folder and run: -``` +```bash python train.py hparams/train.yaml --data_folder=/path/to/LibriTTS ``` -The recipe will automatically download the librispeech dataset and resamples it as specified. +The recipe will automatically download the LibriTTS dataset and resamples it as specified. + +Training time required on NVIDIA A100 GPU using LibriTTS train-clean-100 and train-clean-360 subsets: ~ 1 hour 50 minutes per epoch -The training logs and checkpoints are available [here](https://drive.google.com/drive/folders/1cImFzEonNYhetS9tmH9R_d0EFXXN0zpn?usp=sharing). +The training logs and checkpoints are available [here](https://www.dropbox.com/sh/gjs1kslxkxz819q/AABPriN4dOoD1qL7NoIyVk0Oa?dl=0). To change the sample rate for model training go to the `"vocoder/hifigan/hparams/train.yaml"` file and change the value for `sample_rate` as required. @@ -28,6 +51,58 @@ On HuggingFace, you can find the following pretrained models (with easy-inferenc - https://huggingface.co/speechbrain/tts-hifigan-libritts-22050Hz - https://huggingface.co/speechbrain/tts-hifigan-libritts-16kHz +# HiFiGAN Unit Vocoder +The subfolder "vocoder/hifigan_discrete/" contains the [HiFiGAN Unit vocoder](https://arxiv.org/abs/2406.10735). This vocoder is a neural network designed to transform discrete self-supervised representations into waveform data. +This is suitable for a wide range of generative tasks such as speech enhancement, separation, text-to-speech, voice cloning, etc. Please read [DASB - Discrete Audio and Speech Benchmark](https://arxiv.org/abs/2406.14294) for more information. + +To run this recipe successfully, start by installing the necessary extra dependencies: + +```bash +pip install -r extra_requirements.txt +``` + +Before training the vocoder, you need to choose a speech encoder to extract representations that will be used as discrete audio input. We support k-means models using features from HuBERT, WavLM, or Wav2Vec2. Below are the available self-supervised speech encoders for which we provide pre-trained k-means checkpoints: + +| Encoder | HF model | +|----------|-----------------------------------------| +| HuBERT | facebook/hubert-large-ll60k | +| Wav2Vec2 | facebook/wav2vec2-large-960h-lv60-self | +| WavLM | microsoft/wavlm-large | + +Checkpoints are available in the HF [SSL_Quantization](https://huggingface.co/speechbrain/SSL_Quantization) repository. Alternatively, you can train your own k-means model by following instructions in the "LJSpeech/quantization" README. + +Next, configure the SSL model type, k-means model, and corresponding hub in your YAML configuration file. Follow these steps: + +1. Navigate to the "vocoder/hifigan_discrete/hparams" folder and open "train.yaml" file. +2. Modify the `encoder_type` field to specify one of the SSL models: "HuBERT", "WavLM", or "Wav2Vec2". +3. Update the `encoder_hub` field with the specific name of the SSL Hub associated with your chosen model type. + +If you have trained your own k-means model, follow these additional steps: + +4. Update the `kmeans_folder` field with the specific name of the SSL Hub containing your trained k-means model. Please follow the same file structure as the official one in [SSL_Quantization](https://huggingface.co/speechbrain/SSL_Quantization). +5. Update the `kmeans_dataset` field with the specific name of the dataset on which the k-means model was trained. +6. Update the `num_clusters` field according to the number of clusters of your k-means model. + +Finally, navigate back to the "vocoder/hifigan_discrete/" folder and run the following command: + +```bash +python train.py hparams/train.yaml --data_folder=/path/to/LibriTTS +``` + +Additionally, we provide support for external speaker embeddings along with discrete tokens. By default, the speaker model used is ECAPA-TDNN trained on the VoxCeleb dataset. For more information, you can find the pretrained model on [HuggingFace](https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb-mel-spec). +To run it, use the following command: + +```bash +python train.py hparams/train_spk.yaml --data_folder=/path/to/LibriTTS +``` + +Training typically takes around 15 minutes per epoch when using an NVIDIA A100 40G. + +On HuggingFace, you can find the following pretrained models (with easy-inference interface): +- https://huggingface.co/speechbrain/hifigan-hubert-l1-3-7-12-18-23-k1000-LibriTTS +- https://huggingface.co/speechbrain/hifigan-wav2vec-l1-3-7-12-18-23-k1000-LibriTTS +- https://huggingface.co/speechbrain/hifigan-wavlm-l1-3-7-12-18-23-k1000-LibriTTS + # **About SpeechBrain** - Website: https://speechbrain.github.io/ - Code: https://github.com/speechbrain/speechbrain/ @@ -38,6 +113,15 @@ On HuggingFace, you can find the following pretrained models (with easy-inferenc Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/LibriTTS/TTS/mstacotron2/compute_speaker_embeddings.py b/recipes/LibriTTS/TTS/mstacotron2/compute_speaker_embeddings.py new file mode 100644 index 0000000000..195e03a4ad --- /dev/null +++ b/recipes/LibriTTS/TTS/mstacotron2/compute_speaker_embeddings.py @@ -0,0 +1,130 @@ +import json +import os +import pickle + +import torchaudio +from tqdm import tqdm + +from speechbrain.dataio import audio_io +from speechbrain.inference.classifiers import EncoderClassifier +from speechbrain.inference.encoders import MelSpectrogramEncoder +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +def compute_speaker_embeddings( + input_filepaths, + output_file_paths, + data_folder, + spk_emb_encoder_path, + spk_emb_sr, + mel_spec_params, + device, +): + """This function processes a JSON file to compute the speaker embeddings + + Arguments + --------- + input_filepaths : list + A list of paths to the JSON files to be processed + output_file_paths : list + A list of paths to the output pickle files corresponding to the input JSON files + data_folder : str + Path to the folder where LibriTTS data is stored + spk_emb_encoder_path : str + Path for the speaker encoder + spk_emb_sr : int + Sample rate used by the speaker embedding encoder + mel_spec_params: dict + Information about mel-spectrogram computation + device : str + Device for to be used for computation + + Returns + ------- + None + """ + + # Checks if this phase is already done (if so, skips it) + if skip(output_file_paths): + logger.info("Preparation completed in previous run, skipping.") + return + + # Initializes the speaker encoder + spk_emb_encoder = None + if mel_spec_params["custom_mel_spec_encoder"]: + # To use the custom mel-spectrogram based encoder - for compatibility with future speaker consistency loss work + spk_emb_encoder = MelSpectrogramEncoder.from_hparams( + source=spk_emb_encoder_path, run_opts={"device": device} + ) + else: + # To use the speaker encoders available with SpeechBrain + spk_emb_encoder = EncoderClassifier.from_hparams( + source=spk_emb_encoder_path, run_opts={"device": device} + ) + + # Processes data manifests files to create corresponding speaker embedding files + for i in range(len(input_filepaths)): + logger.info(f"Creating {output_file_paths[i]}.") + + speaker_embeddings = dict() # Holds speaker embeddings + + json_file = open(input_filepaths[i], encoding="utf-8") + json_data = json.load(json_file) + + # Processes all utterances in the data manifest file + for utt_id, utt_data in tqdm(json_data.items()): + utt_wav_path = utt_data["wav"] + utt_wav_path = utt_wav_path.replace("{data_root}", data_folder) + + # Loads and resamples waveforms if required + signal, sig_sr = audio_io.load(utt_wav_path) + if sig_sr != spk_emb_sr: + signal = torchaudio.functional.resample( + signal, sig_sr, spk_emb_sr + ) + signal = signal.to(device) + + # Computes the speaker embedding + if mel_spec_params["custom_mel_spec_encoder"]: + spk_emb = spk_emb_encoder.encode_waveform(signal) + else: + spk_emb = spk_emb_encoder.encode_batch(signal) + + spk_emb = spk_emb.squeeze() + spk_emb = spk_emb.detach() + + speaker_embeddings[utt_id] = spk_emb.cpu() + + # Stores the speaker embeddings at the destination + with open(output_file_paths[i], "wb") as output_file: + pickle.dump( + speaker_embeddings, + output_file, + protocol=pickle.HIGHEST_PROTOCOL, + ) + + logger.info(f"Created {output_file_paths[i]}.") + + +def skip(filepaths): + """ + Detects if the data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + filepaths : list + List of paths to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + for filepath in filepaths: + if not os.path.isfile(filepath): + return False + return True diff --git a/recipes/LibriTTS/TTS/mstacotron2/hparams/train.yaml b/recipes/LibriTTS/TTS/mstacotron2/hparams/train.yaml new file mode 100644 index 0000000000..68377c480c --- /dev/null +++ b/recipes/LibriTTS/TTS/mstacotron2/hparams/train.yaml @@ -0,0 +1,283 @@ +############################################################################ +# Model: Zero-Shot Multi-Speaker Tacotron2 +# Tokens: ARPAbet Phonemes +# Training: LibriTTS +# Authors: Georges Abou-Rjeili, Artem Ploujnikov, Yingzhi Wang, Pradnya Kandarkar +# ############################################################################ + + +################################### +# Experiment Parameters and setup # +################################### +seed: 1234 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref ./results/tacotron2/ +save_folder: !ref /save +train_log: !ref /train_log.txt +epochs: 700 +keep_checkpoint_interval: 50 +use_tensorboard: False + +# Vocoder is used to convert the intermediate mel-spectrogram into the final waveform +log_audio_samples: True +vocoder: speechbrain/tts-hifigan-libritts-16kHz +vocoder_savedir: tmpdir_vocoder_16k + +################################### +# Progress Samples # +################################### +# Progress samples are used to monitor the progress +# of an ongoing training session by outputting samples +# of spectrogram, alignment, etc. at regular intervals + +# Whether to enable progress samples +progress_samples: True + +# The path where the samples will be stored +progress_sample_path: !ref /samples + +# The interval, in epochs. For instance, if it is set to 5, +# progress samples will be output every 5 epochs +progress_samples_interval: 10 + +# The sample size for raw batch samples saved in batch.pth +# (useful mostly for model debugging) +progress_batch_sample_size: 3 + +################################# +# Data files and pre-processing # +################################# +data_folder: !PLACEHOLDER # e.g, /localscratch/LibriTTS/ + +# Files to hold the manifest data +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json + +# Files to hold the speaker embeddings - corresponding to the data manifest files +train_speaker_embeddings_pickle: !ref /train_speaker_embeddings.pickle +valid_speaker_embeddings_pickle: !ref /valid_speaker_embeddings.pickle +test_speaker_embeddings_pickle: !ref /test_speaker_embeddings.pickle + +# Data splits +skip_prep: False +splits: ["train", "valid", "test"] + +# train_split: ["train-clean-100", "train-clean-360"] +train_split: ["train-clean-100"] +valid_split: ["dev-clean"] +test_split: ["test-clean"] + +# Use the original preprocessing from nvidia +# The cleaners to be used (applicable to nvidia only) +text_cleaners: ['english_cleaners'] + +# Avoid audios longer than x seconds +avoid_if_longer_than: 10.0 +################################ +# Audio Parameters # +################################ +sample_rate: 16000 +hop_length: 256 +win_length: 1024 +n_mel_channels: 80 +n_fft: 1024 +mel_fmin: 0.0 +mel_fmax: 8000.0 +mel_normalized: False +power: 1 +norm: "slaney" +mel_scale: "slaney" +dynamic_range_compression: True + +################################ +# Speaker Embedding Parameters # +################################ +spk_emb_size: 192 +spk_emb_sample_rate: 16000 +custom_mel_spec_encoder: False +spk_emb_encoder: speechbrain/spkrec-ecapa-voxceleb + +# To use the custom mel-spectrogram based encoder - for compatibility with future speaker consistency loss work +# 1. Change "custom_mel_spec_encoder" to True +# 2. Change the path for "spk_emb_encoder". +# The ECAPA-TDNN model used for the Zero-Shot Multi-Speaker Tacotron2 experiments is available here: speechbrain/spkrec-ecapa-voxceleb-mel-spec + +################################ +# Optimization Hyperparameters # +################################ +learning_rate: 0.001 +weight_decay: 0.000006 +batch_size: 32 #minimum 2 +mask_padding: True +guided_attention_sigma: 0.2 +guided_attention_weight: 25.0 +guided_attention_weight_half_life: 25. +guided_attention_hard_stop: 50 +gate_loss_weight: 1.0 +spk_emb_loss_weight: 1.0 + +train_dataloader_opts: + batch_size: !ref + drop_last: True #True #False + num_workers: 8 + collate_fn: !new:speechbrain.lobes.models.MSTacotron2.TextMelCollate + speaker_embeddings_pickle: !ref + +valid_dataloader_opts: + batch_size: !ref + drop_last: True + num_workers: 8 + collate_fn: !new:speechbrain.lobes.models.MSTacotron2.TextMelCollate + speaker_embeddings_pickle: !ref + +test_dataloader_opts: + batch_size: !ref + drop_last: True + num_workers: 8 + collate_fn: !new:speechbrain.lobes.models.MSTacotron2.TextMelCollate + speaker_embeddings_pickle: !ref + +############################### +# Model Parameters and model # +############################### +n_symbols: 148 #fixed depending on symbols in textToSequence +symbols_embedding_dim: 1024 + +# Encoder parameters +encoder_kernel_size: 5 +encoder_n_convolutions: 6 +encoder_embedding_dim: 1024 + +# Decoder parameters +# The number of frames in the target per encoder step +n_frames_per_step: 1 +decoder_rnn_dim: 2048 +prenet_dim: 512 +max_decoder_steps: 1500 +gate_threshold: 0.5 +p_attention_dropout: 0.1 +p_decoder_dropout: 0.1 +decoder_no_early_stopping: False + +# Attention parameters +attention_rnn_dim: 2048 +attention_dim: 256 + +# Location Layer parameters +attention_location_n_filters: 32 +attention_location_kernel_size: 31 + +# Mel-post processing network parameters +postnet_embedding_dim: 1024 +postnet_kernel_size: 5 +postnet_n_convolutions: 10 + +# To compute the mel-spectrogram for an audio +mel_spectogram: !name:speechbrain.lobes.models.Tacotron2.mel_spectogram + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_fft: !ref + n_mels: !ref + f_min: !ref + f_max: !ref + power: !ref + normalized: !ref + norm: !ref + mel_scale: !ref + compression: !ref + +# Zero-Shot Multi-Speaker Tacotron2 model +model: !new:speechbrain.lobes.models.MSTacotron2.Tacotron2 + mask_padding: !ref + n_mel_channels: !ref + # Symbols + n_symbols: !ref + symbols_embedding_dim: !ref + # Encoder + encoder_kernel_size: !ref + encoder_n_convolutions: !ref + encoder_embedding_dim: !ref + # Attention + attention_rnn_dim: !ref + attention_dim: !ref + # Attention location + attention_location_n_filters: !ref + attention_location_kernel_size: !ref + # Decoder + n_frames_per_step: !ref + decoder_rnn_dim: !ref + prenet_dim: !ref + max_decoder_steps: !ref + gate_threshold: !ref + p_attention_dropout: !ref + p_decoder_dropout: !ref + # Postnet + postnet_embedding_dim: !ref + postnet_kernel_size: !ref + postnet_n_convolutions: !ref + decoder_no_early_stopping: !ref + # Speaker embeddings + spk_emb_size: !ref + +# Scheduler for guided attention +guided_attention_scheduler: !new:speechbrain.nnet.schedulers.StepScheduler + initial_value: !ref + half_life: !ref + +# Loss function +criterion: !new:speechbrain.lobes.models.MSTacotron2.Loss + gate_loss_weight: !ref + guided_attention_weight: !ref + guided_attention_sigma: !ref + guided_attention_scheduler: !ref + guided_attention_hard_stop: !ref + spk_emb_loss_weight: !ref + +# Overall modules used +modules: + model: !ref + +# Optimizer +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: !ref + +# To keep track of the epochs +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +# To log training information +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +# # Learning rate annealing function +lr_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 4000 + +# Checkpointer +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + counter: !ref + scheduler: !ref + +# Progress sample logger +progress_sample_logger: !new:speechbrain.utils.train_logger.ProgressSampleLogger + output_path: !ref + batch_sample_size: !ref + formats: + raw_batch: raw + + +# Pretrained separator - Use when fine-tuning - REMOVE IF NOT REQUIRED +# tacotron2_model_path: !PLACEHOLDER +# pretrained_separator: !new:speechbrain.utils.parameter_transfer.Pretrainer +# collect_in: !ref +# loadables: +# model: !ref +# paths: +# model: !ref /model.ckpt diff --git a/recipes/LibriTTS/TTS/mstacotron2/libritts_prepare.py b/recipes/LibriTTS/TTS/mstacotron2/libritts_prepare.py new file mode 120000 index 0000000000..489ab40118 --- /dev/null +++ b/recipes/LibriTTS/TTS/mstacotron2/libritts_prepare.py @@ -0,0 +1 @@ +../../libritts_prepare.py \ No newline at end of file diff --git a/recipes/LibriTTS/TTS/mstacotron2/train.py b/recipes/LibriTTS/TTS/mstacotron2/train.py new file mode 100644 index 0000000000..90e09467ac --- /dev/null +++ b/recipes/LibriTTS/TTS/mstacotron2/train.py @@ -0,0 +1,670 @@ +""" +Recipe for training the Zero-Shot Multi-Speaker Tacotron Text-To-Speech model, an end-to-end +neural text-to-speech (TTS) system + +To run this recipe, do the following: +# python train.py --device=cuda:0 --max_grad_norm=1.0 --data_folder=/path_to_data_folder hparams/train.yaml + +Authors +* Georges Abou-Rjeili 2021 +* Artem Ploujnikov 2021 +* Yingzhi Wang 2022 +* Pradnya Kandarkar 2023 +""" + +import os +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.inference.vocoders import HIFIGAN +from speechbrain.utils.data_utils import scalarize +from speechbrain.utils.logger import get_logger +from speechbrain.utils.text_to_sequence import text_to_sequence + +os.environ["TOKENIZERS_PARALLELISM"] = "false" +logger = get_logger(__name__) + + +class Tacotron2Brain(sb.Brain): + """The Brain implementation for Tacotron2""" + + def on_fit_start(self): + """Gets called at the beginning of ``fit()``, on multiple processes + if ``distributed_count > 0`` and backend is ddp and initializes statistics + """ + self.hparams.progress_sample_logger.reset() + self.last_epoch = 0 + self.last_batch = None + self.last_preds = None + + # Instantiate a vocoder if audio samples should be logged + if self.hparams.log_audio_samples: + self.vocoder = HIFIGAN.from_hparams( + source=self.hparams.vocoder, + savedir=self.hparams.vocoder_savedir, + run_opts={"device": self.device}, + freeze_params=True, + ) + + self.last_loss_stats = {} + return super().on_fit_start() + + def compute_forward(self, batch, stage): + """Computes the forward pass + + Arguments + --------- + batch: str + a single batch + stage: speechbrain.Stage + the training stage + + Returns + ------- + the model output + """ + effective_batch = self.batch_to_device(batch) + inputs, y, num_items, _, _, spk_embs, spk_ids = effective_batch + + _, input_lengths, _, _, _ = inputs + + max_input_length = input_lengths.max().item() + + return self.modules.model( + inputs, spk_embs, alignments_dim=max_input_length + ) + + def fit_batch(self, batch): + """Fits a single batch and applies annealing + + Arguments + --------- + batch: tuple + a training batch + + Returns + ------- + loss: torch.Tensor + detached loss + """ + result = super().fit_batch(batch) + self.hparams.lr_annealing(self.optimizer) + return result + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs + + Arguments + --------- + predictions : torch.Tensor + The model generated mel-spectrograms and other metrics from `compute_forward` + batch : PaddedBatch + This batch object contains all the relevant tensors for computation + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST + + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient + """ + effective_batch = self.batch_to_device(batch) + # Hold on to the batch for the inference sample. + # This is needed because the inference sample is run from on_stage_end only, + # where batch information is not available + self.last_batch = effective_batch + self.last_preds = predictions + # Hold on to a sample (for logging) + self._remember_sample(effective_batch, predictions) + # Compute the loss + loss = self._compute_loss(predictions, effective_batch, stage) + return loss + + def _compute_loss(self, predictions, batch, stage): + """Computes the value of the loss function and updates stats + + Arguments + --------- + predictions: tuple + model predictions + batch : PaddedBatch + This batch object contains all the relevant tensors for computation + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST + + Returns + ------- + loss: torch.Tensor + the loss value + """ + inputs, targets, num_items, labels, wavs, spk_embs, spk_ids = batch + text_padded, input_lengths, _, max_len, output_lengths = inputs + + # Speaker embedding input to compute speaker consistency loss - WIP + spk_emb_input = None + + loss_stats = self.hparams.criterion( + predictions, + targets, + input_lengths, + output_lengths, + spk_emb_input, + self.last_epoch, + ) + self.last_loss_stats[stage] = scalarize(loss_stats) + return loss_stats.loss + + def _remember_sample(self, batch, predictions): + """Remembers samples of spectrograms and the batch for logging purposes + + Arguments + --------- + batch: tuple + a training batch + predictions: tuple + predictions (raw output of the Tacotron model) + """ + inputs, targets, num_items, labels, wavs, spk_embs, spk_ids = batch + text_padded, input_lengths, _, max_len, output_lengths = inputs + mel_target, _ = targets + ( + mel_out, + mel_out_postnet, + gate_out, + alignments, + pred_mel_lengths, + ) = predictions + alignments_max = ( + alignments[0] + .max(dim=-1) + .values.max(dim=-1) + .values.unsqueeze(-1) + .unsqueeze(-1) + ) + alignments_output = alignments[0].T.flip(dims=(1,)) / alignments_max + self.hparams.progress_sample_logger.remember( + target=self._get_spectrogram_sample(mel_target), + output=self._get_spectrogram_sample(mel_out), + output_postnet=self._get_spectrogram_sample(mel_out_postnet), + alignments=alignments_output, + raw_batch=self.hparams.progress_sample_logger.get_batch_sample( + { + "text_padded": text_padded, + "input_lengths": input_lengths, + "mel_target": mel_target, + "mel_out": mel_out, + "mel_out_postnet": mel_out_postnet, + "max_len": max_len, + "output_lengths": output_lengths, + "gate_out": gate_out, + "alignments": alignments, + "labels": labels, + "wavs": wavs, + "spk_embs": spk_embs, + "spk_ids": spk_ids, + } + ), + ) + + def batch_to_device(self, batch): + """Transfers the batch to the target device + + Arguments + --------- + batch: tuple + the batch to use + + Returns + ------- + batch: tuple + the batch on the correct device + """ + ( + text_padded, + input_lengths, + mel_padded, + gate_padded, + output_lengths, + len_x, + labels, + wavs, + spk_embs, + spk_ids, + ) = batch + text_padded = text_padded.to(self.device, non_blocking=True).long() + input_lengths = input_lengths.to(self.device, non_blocking=True).long() + max_len = torch.max(input_lengths.data).item() + mel_padded = mel_padded.to(self.device, non_blocking=True).float() + gate_padded = gate_padded.to(self.device, non_blocking=True).float() + + output_lengths = output_lengths.to( + self.device, non_blocking=True + ).long() + x = (text_padded, input_lengths, mel_padded, max_len, output_lengths) + y = (mel_padded, gate_padded) + len_x = torch.sum(output_lengths) + spk_embs = spk_embs.to(self.device, non_blocking=True).float() + return (x, y, len_x, labels, wavs, spk_embs, spk_ids) + + def _get_spectrogram_sample(self, raw): + """Converts a raw spectrogram to one that can be saved as an image + sample = sqrt(exp(raw)) + + Arguments + --------- + raw: torch.Tensor + the raw spectrogram (as used in the model) + + Returns + ------- + sample: torch.Tensor + the spectrogram, for image saving purposes + """ + sample = raw[0] + return torch.sqrt(torch.exp(sample)) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + + Returns + ------- + None + """ + + # Logs training samples every 10 epochs + if stage == sb.Stage.TRAIN and ( + self.hparams.epoch_counter.current % 10 == 0 + ): + if self.last_batch is None: + return + + train_sample_path = os.path.join( + self.hparams.progress_sample_path, + str(self.hparams.epoch_counter.current), + ) + if not os.path.exists(train_sample_path): + os.makedirs(train_sample_path) + + _, targets, _, labels, wavs, spk_embs, spk_ids = self.last_batch + + train_sample_text = os.path.join( + self.hparams.progress_sample_path, + str(self.hparams.epoch_counter.current), + "train_input_text.txt", + ) + with open(train_sample_text, "w", encoding="utf-8") as f: + f.write(labels[0]) + + train_input_audio = os.path.join( + self.hparams.progress_sample_path, + str(self.hparams.epoch_counter.current), + "train_input_audio.wav", + ) + audio_io.save( + train_input_audio, + sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0), + self.hparams.sample_rate, + ) + + _, mel_out_postnet, _, _, pred_mel_lengths = self.last_preds + + if self.hparams.log_audio_samples: + waveform_ss = self.vocoder.decode_batch(mel_out_postnet[0]) + train_sample_audio = os.path.join( + self.hparams.progress_sample_path, + str(self.hparams.epoch_counter.current), + "train_output_audio.wav", + ) + audio_io.save( + train_sample_audio, + waveform_ss.squeeze(1).cpu(), + self.hparams.sample_rate, + ) + + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_audio( + f"{stage}/train_audio_target", + sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0), + self.hparams.sample_rate, + ) + if self.hparams.log_audio_samples: + self.tensorboard_logger.log_audio( + f"{stage}/train_audio_pred", + waveform_ss.squeeze(1), + self.hparams.sample_rate, + ) + try: + self.tensorboard_logger.log_figure( + f"{stage}/train_mel_target", targets[0][0] + ) + self.tensorboard_logger.log_figure( + f"{stage}/train_mel_pred", mel_out_postnet[0] + ) + except Exception: + # This is to avoid the code from crashing in case of a mel-spectrogram with one frame + pass + + # At the end of validation, we can write + if stage == sb.Stage.VALID: + # Update learning rate + lr = self.optimizer.param_groups[-1]["lr"] + self.last_epoch = epoch + + # The train_logger writes a summary to stdout and to the logfile. + self.hparams.train_logger.log_stats( # 1#2# + stats_meta={"Epoch": epoch, "lr": lr}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=self.last_loss_stats[sb.Stage.VALID], + ) + + # The tensorboard_logger writes a summary to stdout and to the logfile. + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + stats_meta={"Epoch": epoch, "lr": lr}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=self.last_loss_stats[sb.Stage.VALID], + ) + + # Save the current checkpoint and delete previous checkpoints. + epoch_metadata = { + **{"epoch": epoch}, + **self.last_loss_stats[sb.Stage.VALID], + } + self.checkpointer.save_and_keep_only( + meta=epoch_metadata, + min_keys=["loss"], + ckpt_predicate=( + ( + lambda ckpt: ( + ckpt.meta["epoch"] + % self.hparams.keep_checkpoint_interval + != 0 + ) + ) + if self.hparams.keep_checkpoint_interval is not None + else None + ), + ) + output_progress_sample = ( + self.hparams.progress_samples + and epoch % self.hparams.progress_samples_interval == 0 + ) + if output_progress_sample: + self.run_inference_sample(sb.Stage.VALID) + self.hparams.progress_sample_logger.save(epoch) + + # We also write statistics about test data to stdout and to the logfile. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + if self.hparams.progress_samples: + self.run_inference_sample(sb.Stage.TEST) + self.hparams.progress_sample_logger.save("test") + + def run_inference_sample(self, stage): + """Produces a sample in inference mode. This is called when producing + samples and can be useful because""" + + if self.last_batch is None: + return + inputs, targets, _, labels, wavs, spk_embs, spk_ids = self.last_batch + text_padded, input_lengths, _, _, _ = inputs + + mel_out, _, _ = self.hparams.model.infer( + text_padded[:1], spk_embs[:1], input_lengths[:1] + ) + self.hparams.progress_sample_logger.remember( + inference_mel_out=self._get_spectrogram_sample(mel_out) + ) + + if stage == sb.Stage.VALID: + inf_sample_path = os.path.join( + self.hparams.progress_sample_path, + str(self.hparams.epoch_counter.current), + ) + + if not os.path.exists(inf_sample_path): + os.makedirs(inf_sample_path) + + inf_sample_text = os.path.join( + self.hparams.progress_sample_path, + str(self.hparams.epoch_counter.current), + "inf_input_text.txt", + ) + with open(inf_sample_text, "w", encoding="utf-8") as f: + f.write(labels[0]) + + inf_input_audio = os.path.join( + self.hparams.progress_sample_path, + str(self.hparams.epoch_counter.current), + "inf_input_audio.wav", + ) + audio_io.save( + inf_input_audio, + sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0), + self.hparams.sample_rate, + ) + + if self.hparams.log_audio_samples: + waveform_ss = self.vocoder.decode_batch(mel_out) + inf_sample_audio = os.path.join( + self.hparams.progress_sample_path, + str(self.hparams.epoch_counter.current), + "inf_output_audio.wav", + ) + audio_io.save( + inf_sample_audio, + waveform_ss.squeeze(1).cpu(), + self.hparams.sample_rate, + ) + + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_audio( + f"{stage}/inf_audio_target", + sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0), + self.hparams.sample_rate, + ) + if self.hparams.log_audio_samples: + self.tensorboard_logger.log_audio( + f"{stage}/inf_audio_pred", + waveform_ss.squeeze(1), + self.hparams.sample_rate, + ) + try: + self.tensorboard_logger.log_figure( + f"{stage}/inf_mel_target", targets[0][0] + ) + self.tensorboard_logger.log_figure( + f"{stage}/inf_mel_pred", mel_out + ) + except Exception: + # This is to avoid the code from crashing in case of a mel-spectrogram with one frame + pass + + +def dataio_prepare(hparams): + # Define audio pipeline: + + @sb.utils.data_pipeline.takes("wav", "label") + @sb.utils.data_pipeline.provides("mel_text_pair") + def audio_pipeline(wav, label): + text_seq = torch.IntTensor( + text_to_sequence(label, hparams["text_cleaners"]) + ) + + audio, sig_sr = audio_io.load(wav) + if sig_sr != hparams["sample_rate"]: + audio = torchaudio.functional.resample( + audio, sig_sr, hparams["sample_rate"] + ) + + mel = hparams["mel_spectogram"](audio=audio.squeeze()) + + len_text = len(text_seq) + + return text_seq, mel, len_text + + datasets = {} + data_info = { + "train": hparams["train_json"], + "valid": hparams["valid_json"], + "test": hparams["test_json"], + } + for dataset in hparams["splits"]: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline], + output_keys=["mel_text_pair", "wav", "label", "uttid"], + ) + + datasets[dataset] = datasets[dataset].filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + + return datasets + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Prepare data + if not hparams["skip_prep"]: + sys.path.append("../../") + from libritts_prepare import prepare_libritts + + sb.utils.distributed.run_on_main( + prepare_libritts, + kwargs={ + "data_folder": hparams["data_folder"], + "save_json_train": hparams["train_json"], + "save_json_valid": hparams["valid_json"], + "save_json_test": hparams["test_json"], + "sample_rate": hparams["sample_rate"], + "train_split": hparams["train_split"], + "valid_split": hparams["valid_split"], + "test_split": hparams["test_split"], + "seed": hparams["seed"], + "model_name": hparams["model"].__class__.__name__, + }, + ) + + from compute_speaker_embeddings import compute_speaker_embeddings + + sb.utils.distributed.run_on_main( + compute_speaker_embeddings, + kwargs={ + "input_filepaths": [ + hparams["train_json"], + hparams["valid_json"], + hparams["test_json"], + ], + "output_file_paths": [ + hparams["train_speaker_embeddings_pickle"], + hparams["valid_speaker_embeddings_pickle"], + hparams["test_speaker_embeddings_pickle"], + ], + "data_folder": hparams["data_folder"], + "spk_emb_encoder_path": hparams["spk_emb_encoder"], + "spk_emb_sr": hparams["spk_emb_sample_rate"], + "mel_spec_params": { + "custom_mel_spec_encoder": hparams["custom_mel_spec_encoder"], + "sample_rate": hparams["spk_emb_sample_rate"], + "hop_length": hparams["hop_length"], + "win_length": hparams["win_length"], + "n_mel_channels": hparams["n_mel_channels"], + "n_fft": hparams["n_fft"], + "mel_fmin": hparams["mel_fmin"], + "mel_fmax": hparams["mel_fmax"], + "mel_normalized": hparams["mel_normalized"], + "power": hparams["power"], + "norm": hparams["norm"], + "mel_scale": hparams["mel_scale"], + "dynamic_range_compression": hparams[ + "dynamic_range_compression" + ], + }, + "device": run_opts["device"], + }, + ) + + datasets = dataio_prepare(hparams) + + # Brain class initialization + tacotron2_brain = Tacotron2Brain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Load pretrained model if pretrained_separator is present in the yaml + if "pretrained_separator" in hparams: + sb.utils.distributed.run_on_main( + hparams["pretrained_separator"].collect_files + ) + hparams["pretrained_separator"].load_collected( + device=run_opts["device"] + ) + + if hparams["use_tensorboard"]: + tacotron2_brain.tensorboard_logger = ( + sb.utils.train_logger.TensorboardLogger( + save_dir=hparams["output_folder"] + "/tensorboard" + ) + ) + + # Training + tacotron2_brain.fit( + tacotron2_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + # Test + if "test" in datasets: + tacotron2_brain.evaluate( + datasets["test"], + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/LibriTTS/focalcodec/README.md b/recipes/LibriTTS/focalcodec/README.md new file mode 100644 index 0000000000..2bdcee458a --- /dev/null +++ b/recipes/LibriTTS/focalcodec/README.md @@ -0,0 +1,144 @@ +# FocalCodec: Low-Bitrate Speech Coding via Focal Modulation Networks + +**Project Page**: https://lucadellalib.github.io/focalcodec-web/ + +This folder contains recipes for training FocalCodec on LibriTTS. You can download LibriTTS from https://www.openslr.org/60/. +FocalCodec is a low-bitrate single-codebook speech codec based on [focal modulation](https://arxiv.org/abs/2203.11926). + +For more information, check our papers: + +- [FocalCodec: Low-Bitrate Speech Coding via Focal Modulation Networks](https://arxiv.org/abs/2502.04465) + +- [FocalCodec-Stream: Streaming Low-Bitrate Speech Coding via Causal Distillation](https://arxiv.org/abs/2509.16195) + + + +--------------------------------------------------------------------------------------------------------- + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. +To do so, simply run the following command in your terminal: + +```bash +pip install -r extra_requirements.txt +``` + +--------------------------------------------------------------------------------------------------------- + +## Running an Experiment + +Training FocalCodec is a two-stage process: + +1. **Train the decoder** to reconstruct waveforms from continuous speech representations (WavLM6 in our case). +2. **Train the quantization pipeline** (compressor, quantizer, decompressor) using the same representations. + +--------------------------------------------------------------------------------------------------------- + +### 1. Train the Decoder + +```bash +python train_decoder.py hparams/vocos.yaml --data_folder +``` + +This step trains a decoder to map encoder features back into high-quality audio. +UTMOS, dWER, and speaker similarity are computed on test set to assess the resynthesis performance. + +--------------------------------------------------------------------------------------------------------- + +### 2. Train the Quantization Pipeline + +```bash +python train_quantizer.py hparams/bsq.yaml --data_folder +``` + +This stage trains the compressor, quantizer, and decompressor. +Note that it can be run in parallel with decoder training, since both stages operate on the same continuous encoder representations. + +To monitor the end-to-end resynthesis performance during training, you can provide the previously trained decoder checkpoint: + +```bash +python train_quantizer.py hparams/bsq.yaml --data_folder --decoder_checkpoint +``` + +--------------------------------------------------------------------------------------------------------- + +## Results + +Note that this is a SpeechBrain adaptation of the original training code. +Some implementation details may differ, which can lead to slightly different results compared to the original implementation. + +For reference, we include the resynthesis results from the paper, obtained on **LibriSpeech test-clean**: + +| Checkpoint | Train Data | Sample
Rate (kHz) | Token
Rate (Hz) | Codebooks | Bitrate
(kbps) | UTMOS | dWER (%) | Sim | +| :-------------------------------------------------------------------------------------: | :----------: |:---------------------:|:-------------------:| :-------: |:------------------:| :---: | :------: |:----:| +| [lucadellalib/focalcodec_50hz](https://huggingface.co/lucadellalib/focalcodec_50hz) | LibriTTS-960 | 16 | 50.0 | 1x8192 | 0.65 | 4.05 | 2.18 | 97.4 | +| [lucadellalib/focalcodec_25hz](https://huggingface.co/lucadellalib/focalcodec_25hz) | LibriTTS-960 | 16 | 25.0 | 1x8192 | 0.33 | 4.14 | 3.30 | 96.3 | +| [lucadellalib/focalcodec_12_5hz](https://huggingface.co/lucadellalib/focalcodec_12_5hz) | LibriTTS-960 | 16 | 12.5 | 1x8192 | 0.16 | 4.22 | 7.94 | 93.9 | + +The original training logs can be found at: [https://www.dropbox.com/scl/fo/o652m0qow1hs428ppocx3/ABiZp8xIK4d6iTcl-JXbn0s?rlkey=6cka0iabo2kzjg44if2kdgsvu&st=yqwv7x0w&dl=0](https://www.dropbox.com/scl/fo/o652m0qow1hs428ppocx3/ABiZp8xIK4d6iTcl-JXbn0s?rlkey=6cka0iabo2kzjg44if2kdgsvu&st=yqwv7x0w&dl=0). + +The original checkpoints can be found at: [https://huggingface.co/collections/lucadellalib/focalcodec](https://huggingface.co/collections/lucadellalib/focalcodec). + +The inference code can be found at: [https://github.com/lucadellalib/focalcodec](https://github.com/lucadellalib/focalcodec). + +--------------------------------------------------------------------------------------------------------- + +## About SpeechBrain + +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +--------------------------------------------------------------------------------------------------------- + +## Citing FocalCodec + +Please, cite FocalCodec if you use it for your research or business. + +```bibtex +@inproceedings{dellalibera2025focalcodec, + title = {{FocalCodec}: Low-Bitrate Speech Coding via Focal Modulation Networks}, + author = {Luca {Della Libera} and Francesco Paissan and Cem Subakan and Mirco Ravanelli}, + booktitle = {Advances in Neural Information Processing Systems}, + year = {2025}, +} +``` + +```bibtex +@article{dellalibera2025focalcodecstream, + title = {{FocalCodec-Stream}: Streaming Low-Bitrate Speech Coding via Causal Distillation}, + author = {Luca {Della Libera} and Cem Subakan and Mirco Ravanelli}, + journal = {arXiv preprint arXiv:2509.16195}, + year = {2025}, +} +``` + +--------------------------------------------------------------------------------------------------------- + +## Citing SpeechBrain + +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@article{speechbrainV1, + author = {Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca {Della Libera} and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Ha Nguyen and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Ga{{\"e}}lle Laperri{{\`e}}re and Mickael Rouvier and Renato De Mori and Yannick Est{{\`e}}ve}, + title = {Open-Source Conversational {AI} with {SpeechBrain} 1.0}, + journal = {Journal of Machine Learning Research}, + year = {2024}, + volume = {25}, + number = {333}, + pages = {1--11}, + url = {http://jmlr.org/papers/v25/24-0991.html} +} +``` + +```bibtex +@article{ravanelli2021speechbrain, + author = {Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + title = {{SpeechBrain}: A General-Purpose Speech Toolkit}, + journal = {arXiv preprint arXiv:2106.04624}, + year = {2021}, + url = {https://arxiv.org/abs/2106.04624}, +} +``` diff --git a/recipes/LibriTTS/focalcodec/extra_requirements.txt b/recipes/LibriTTS/focalcodec/extra_requirements.txt new file mode 100644 index 0000000000..04ad88679c --- /dev/null +++ b/recipes/LibriTTS/focalcodec/extra_requirements.txt @@ -0,0 +1,2 @@ +focalcodec@git+https://github.com/lucadellalib/focalcodec.git@main#egg=focalcodec +transformers diff --git a/recipes/LibriTTS/focalcodec/hparams/bsq.yaml b/recipes/LibriTTS/focalcodec/hparams/bsq.yaml new file mode 100644 index 0000000000..41a378ac7b --- /dev/null +++ b/recipes/LibriTTS/focalcodec/hparams/bsq.yaml @@ -0,0 +1,309 @@ +# ########################################################################################### +# Model: BSQ +# Authors: Luca Della Libera 2025 +# ########################################################################################### + +experiment_name: bsq + +# Seed needs to be set at top of YAML +seed: 0 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Data preparation +data_folder: !PLACEHOLDER +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json +train_split: + - train-clean-100 + - train-clean-360 + - train-other-500 +valid_split: [dev-clean] +test_split: [test-clean] +skip_prep: False + +# Output folders +output_folder: !ref results// +save_folder: !ref /save +cache_folder: !name:huggingface_hub.constants.HUGGINGFACE_HUB_CACHE + +# Save options +compute_metrics: True +save_audios: True + +# Preprocessing parameters +sample_rate: 16000 +audio_backend: soundfile +train_remove_if_longer: 60.0 # Seconds +valid_remove_if_longer: 60.0 # Seconds +test_remove_if_longer: 60.0 # Seconds +sorting: random + +# Training parameters +num_epochs: 100 +grad_accumulation_factor: 1 +dynamic_batching: False +train_batch_size: 16 +valid_batch_size: 1 +test_batch_size: 1 +train_max_batch_length: 20.0 # Seconds +valid_max_batch_length: 20.0 # Seconds +test_max_batch_length: 20.0 # Seconds +num_buckets: 100 +max_batch_size: 128 +dataloader_workers: 4 +nonfinite_patience: 10 +max_grad_norm: 5.0 +precision: fp32 +ckpt_interval_steps: 10000 +keep_checkpoints: 1 +augment: False +augment_prob: 0.75 +segment_size: null +segment_pad: False +valid_freq: 1 + +# Optimizer parameters +lr: 0.0005 +betas: [0.9, 0.98] +weight_decay: 0.01 +improvement_threshold: 0.0025 +annealing_factor: 0.9 +patient: 0 + +# Encoder parameters +encoder_hidden_dims: [512, 512, 512, 512, 512, 512, 512] +encoder_kernel_sizes: [10, 3, 3, 3, 3, 2, 2] +encoder_strides: [5, 2, 2, 2, 2, 2, 2] +encoder_num_layers: 6 +encoder_dim: 1024 +encoder_ffn_dim: 4096 +encoder_num_heads: 16 +encoder_num_buckets: 320 +encoder_max_distance: 800 +encoder_max_cached_steps: 2048 +encoder_dropout: 0.0 +encoder_conv_pos: 128 +encoder_conv_pos_groups: 16 +encoder_causal: False +encoder_window_size: 512 +encoder_lookahead_size: 3 +encoder_use_flex_attention: False +encoder_checkpoint: !apply:utils.download_wavlm6 [!ref ] + +# Compressor parameters +compressor_input_dim: !ref +compressor_output_dim: !ref +compressor_hidden_dims: [1024, 512, 256] +compressor_downscale_factors: [1, 1, 1] +compressor_focal_window: 7 +compressor_focal_level: 2 +compressor_focal_factor: 2 +compressor_dropout: 0.0 +compressor_use_post_norm: False +compressor_use_layerscale: False +compressor_layerscale_init: 0.0001 +compressor_tanhscale_init: 0.5 +compressor_normalize_modulator: False +compressor_causal: False +compressor_window_size: 512 + +# Quantizer parameters +quantizer_code_dim: 13 # codebook_size = 2 ** 13 +quantizer_entropy_loss_weight: 0.1 +quantizer_diversity_gamma: 1.0 + +# Decompressor parameters +decompressor_input_dim: !ref +decompressor_output_dim: !ref +decompressor_hidden_dims: [256, 512, 1024] +decompressor_upscale_factors: [1, 1, 1] +decompressor_focal_window: 7 +decompressor_focal_level: 2 +decompressor_focal_factor: 2 +decompressor_dropout: 0.0 +decompressor_use_post_norm: False +decompressor_use_layerscale: False +decompressor_layerscale_init: 0.0001 +decompressor_tanhscale_init: 0.5 +decompressor_normalize_modulator: False +decompressor_causal: False +decompressor_window_size: 512 +decompressor_last_window_size: 512 +decompressor_lookahead_size: 3 + +# Decoder parameters +decoder_input_dim: !ref +decoder_num_layers: 8 +decoder_dim: 512 +decoder_ffn_dim: 1536 +decoder_kernel_size: 7 +decoder_hop_length: 320 +decoder_layerscale_init: null +decoder_n_fft: 1024 +decoder_causal: False +decoder_checkpoint: null + +# Augmentation +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 # Min frequency band dropout probability + drop_freq_high: 1 # Max frequency band dropout probability + drop_freq_count_low: 1 # Min number of frequency bands to drop + drop_freq_count_high: 3 # Max number of frequency bands to drop + drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1 # Min number of audio chunks to drop + drop_length_high: 5 # Max number of audio chunks to drop + drop_count_low: 1000 # Min length of audio chunks to drop + drop_count_high: 2000 # Max length of audio chunks to drop + +augmentation: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 2 + max_augmentations: 2 + augment_prob: !ref + augmentations: [!ref , !ref ] + +# Modules +encoder: !new:focalcodec.wavlm.WavLM + hidden_dims: !ref + kernel_sizes: !ref + strides: !ref + num_layers: !ref + dim: !ref + ffn_dim: !ref + num_heads: !ref + num_buckets: !ref + max_distance: !ref + max_cached_steps: !ref + dropout: !ref + conv_pos: !ref + conv_pos_groups: !ref + causal: !ref + window_size: !ref + lookahead_size: !ref + use_flex_attention: !ref + +compressor: !new:focalcodec.focalnet.FocalEncoder + input_dim: !ref + output_dim: !ref + hidden_dims: !ref + downscale_factors: !ref + focal_window: !ref + focal_level: !ref + focal_factor: !ref + dropout: !ref + use_post_norm: !ref + use_layerscale: !ref + layerscale_init: !ref + tanhscale_init: !ref + normalize_modulator: !ref + causal: !ref + window_size: !ref + +quantizer: !new:speechbrain.lobes.models.bsq.BinarySphericalQuantizer + code_dim: !ref + entropy_loss_weight: !ref + diversity_gamma: !ref + +decompressor: !new:focalcodec.focalnet.FocalDecoder + input_dim: !ref + output_dim: !ref + hidden_dims: !ref + upscale_factors: !ref + focal_window: !ref + focal_level: !ref + focal_factor: !ref + dropout: !ref + use_post_norm: !ref + use_layerscale: !ref + layerscale_init: !ref + tanhscale_init: !ref + normalize_modulator: !ref + causal: !ref + window_size: !ref + last_window_size: !ref + lookahead_size: !ref + +decoder: !new:focalcodec.vocos.Vocos + input_dim: !ref + num_layers: !ref + dim: !ref + ffn_dim: !ref + kernel_size: !ref + hop_length: !ref + layerscale_init: !ref + n_fft: !ref + causal: !ref + +modules: + compressor: !ref + quantizer: !ref + decompressor: !ref + +# Loss functions +rec_loss: !name:speechbrain.nnet.losses.mse_loss + allowed_len_diff: 0 + reduction: mean + +# Optimizers +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: !ref + eps: 1.e-8 + weight_decay: !ref + +# Schedulers +scheduler: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: !ref + annealing_factor: !ref + patient: !ref + +# Performance metrics +utmos_computer: !name:metrics.utmos.UTMOS + sample_rate: !ref + +dwer_computer: !name:metrics.dwer.DWER + model_hub: openai/whisper-small + save_path: !ref + sample_rate: !ref + +wavlm_sim_computer: !name:metrics.speaker_similarity.SpkSimWavLM + model_hub: microsoft/wavlm-base-sv + save_path: !ref + sample_rate: !ref + +# Pretrainer +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + encoder: !ref + decoder: !ref + paths: + encoder: !ref + decoder: !ref + conditions: + encoder: !ref + decoder: !ref + +# Counters, checkpointers, loggers, etc. +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + compressor: !ref + quantizer: !ref + decompressor: !ref + scheduler: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref /train_log.txt + precision: 5 diff --git a/recipes/LibriTTS/focalcodec/hparams/vocos.yaml b/recipes/LibriTTS/focalcodec/hparams/vocos.yaml new file mode 100644 index 0000000000..e5556a941f --- /dev/null +++ b/recipes/LibriTTS/focalcodec/hparams/vocos.yaml @@ -0,0 +1,252 @@ +# ########################################################################################### +# Model: Vocos +# Authors: Luca Della Libera 2024 +# ########################################################################################### + +experiment_name: vocos + +# Seed needs to be set at top of YAML +seed: 0 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Data preparation +data_folder: !PLACEHOLDER +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json +train_split: + - train-clean-100 + # - train-clean-360 + # - train-other-500 +valid_split: [dev-clean] +test_split: [test-clean] +skip_prep: False + +# Output folders +output_folder: !ref results// +save_folder: !ref /save +cache_folder: !name:huggingface_hub.constants.HUGGINGFACE_HUB_CACHE + +# Save options +compute_metrics: True +save_audios: True + +# Preprocessing parameters +sample_rate: 16000 +audio_backend: soundfile +train_remove_if_longer: 60.0 # Seconds +valid_remove_if_longer: 60.0 # Seconds +test_remove_if_longer: 60.0 # Seconds +sorting: random + +# Training parameters +num_epochs: 100 +grad_accumulation_factor: 1 +dynamic_batching: False +train_batch_size: 16 +valid_batch_size: 1 +test_batch_size: 1 +train_max_batch_length: 20.0 # Seconds +valid_max_batch_length: 20.0 # Seconds +test_max_batch_length: 20.0 # Seconds +num_buckets: 100 +max_batch_size: 128 +dataloader_workers: 4 +nonfinite_patience: 10 +max_grad_norm: 5.0 +precision: fp32 +ckpt_interval_steps: 10000 +keep_checkpoints: 1 +augment: False +augment_prob: 0.75 +segment_size_feats: !ref 7040 // # Segment AFTER feature extraction +segment_size: null +segment_pad: False +valid_freq: 1 + +# Optimizer parameters +lr: 0.0002 +betas: [0.8, 0.99] +weight_decay: 0.01 +decay_factor: 0.999 + +# Encoder parameters +encoder_hidden_dims: [512, 512, 512, 512, 512, 512, 512] +encoder_kernel_sizes: [10, 3, 3, 3, 3, 2, 2] +encoder_strides: [5, 2, 2, 2, 2, 2, 2] +encoder_num_layers: 6 +encoder_dim: 1024 +encoder_ffn_dim: 4096 +encoder_num_heads: 16 +encoder_num_buckets: 320 +encoder_max_distance: 800 +encoder_max_cached_steps: 2048 +encoder_dropout: 0.0 +encoder_conv_pos: 128 +encoder_conv_pos_groups: 16 +encoder_causal: False +encoder_window_size: 512 +encoder_lookahead_size: 3 +encoder_use_flex_attention: False +encoder_checkpoint: !apply:utils.download_wavlm6 [!ref ] + +# Generator parameters +generator_input_dim: !ref +generator_num_layers: 8 +generator_dim: 512 +generator_ffn_dim: 1536 +generator_kernel_size: 7 +generator_hop_length: 320 +generator_layerscale_init: null +generator_n_fft: 1024 +generator_causal: False + +# Loss parameters +win_length: 1024 +n_mel_channels: 80 +mel_fmin: 0 +mel_fmax: 8000 +mel_normalized: False +power: 1.0 +dynamic_range_compression: True +hingeg_loss_weight: 1.0 +feat_match_loss_weight: 54.0 +l1_spec_loss_weight: 45.0 + +# Augmentation +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 # Min frequency band dropout probability + drop_freq_high: 1 # Max frequency band dropout probability + drop_freq_count_low: 1 # Min number of frequency bands to drop + drop_freq_count_high: 3 # Max number of frequency bands to drop + drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1 # Min number of audio chunks to drop + drop_length_high: 5 # Max number of audio chunks to drop + drop_count_low: 1000 # Min length of audio chunks to drop + drop_count_high: 2000 # Max length of audio chunks to drop + +augmentation: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 2 + max_augmentations: 2 + augment_prob: !ref + augmentations: [!ref , !ref ] + +# Modules +encoder: !new:focalcodec.wavlm.WavLM + hidden_dims: !ref + kernel_sizes: !ref + strides: !ref + num_layers: !ref + dim: !ref + ffn_dim: !ref + num_heads: !ref + num_buckets: !ref + max_distance: !ref + max_cached_steps: !ref + dropout: !ref + conv_pos: !ref + conv_pos_groups: !ref + causal: !ref + window_size: !ref + lookahead_size: !ref + use_flex_attention: !ref + +generator: !new:focalcodec.vocos.Vocos + input_dim: !ref + num_layers: !ref + dim: !ref + ffn_dim: !ref + kernel_size: !ref + hop_length: !ref + layerscale_init: !ref + n_fft: !ref + causal: !ref + +discriminator: !new:speechbrain.lobes.models.HifiGAN.HifiganDiscriminator + +modules: + generator: !ref + discriminator: !ref + +# Loss functions +generator_loss: !new:speechbrain.lobes.models.HifiGAN.GeneratorLoss + stft_loss: null + stft_loss_weight: 0 + mseg_loss: !new:speechbrain.lobes.models.HifiGAN.HingeGLoss + mseg_loss_weight: !ref + feat_match_loss: !new:speechbrain.lobes.models.HifiGAN.MelganFeatureLoss + feat_match_loss_weight: !ref + l1_spec_loss: !new:speechbrain.lobes.models.HifiGAN.L1SpecLoss + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_mel_channels: !ref + n_fft: !ref + n_stft: !ref // 2 + 1 + mel_fmin: !ref + mel_fmax: !ref + mel_normalized: !ref + power: !ref + dynamic_range_compression: !ref + l1_spec_loss_weight: !ref + +# Discriminator loss +discriminator_loss: !new:speechbrain.lobes.models.HifiGAN.DiscriminatorLoss + msed_loss: !new:speechbrain.lobes.models.HifiGAN.HingeDLoss + +# Optimizers +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: !ref + eps: 1.e-8 + weight_decay: !ref + +# Schedulers +scheduler: !new:speechbrain.nnet.schedulers.StepScheduler + initial_value: !ref + decay_factor: !ref + +# Performance metrics +utmos_computer: !name:metrics.utmos.UTMOS + sample_rate: !ref + +dwer_computer: !name:metrics.dwer.DWER + model_hub: openai/whisper-small + save_path: !ref + sample_rate: !ref + +wavlm_sim_computer: !name:metrics.speaker_similarity.SpkSimWavLM + model_hub: microsoft/wavlm-base-sv + save_path: !ref + sample_rate: !ref + +# Pretrainer +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + encoder: !ref + paths: + encoder: !ref + conditions: + encoder: !ref + +# Counters, checkpointers, loggers, etc. +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + generator: !ref + discriminator: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref /train_log.txt + precision: 5 diff --git a/recipes/LibriTTS/focalcodec/libritts_prepare.py b/recipes/LibriTTS/focalcodec/libritts_prepare.py new file mode 120000 index 0000000000..39f1a78c23 --- /dev/null +++ b/recipes/LibriTTS/focalcodec/libritts_prepare.py @@ -0,0 +1 @@ +../libritts_prepare.py \ No newline at end of file diff --git a/recipes/LibriTTS/focalcodec/metrics/dwer.py b/recipes/LibriTTS/focalcodec/metrics/dwer.py new file mode 100644 index 0000000000..4907a6bc40 --- /dev/null +++ b/recipes/LibriTTS/focalcodec/metrics/dwer.py @@ -0,0 +1,125 @@ +"""Differential WER (dWER) (see https://arxiv.org/abs/1911.07953). + +Authors + * Luca Della Libera 2025 +""" + +import torch +import torchaudio +from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE + +from speechbrain.decoders.seq2seq import S2SWhisperGreedySearcher +from speechbrain.integrations.huggingface import Whisper +from speechbrain.utils.metric_stats import ErrorRateStats, MetricStats + +__all__ = ["DWER"] + + +SAMPLE_RATE = 16000 + + +class DWER(MetricStats): + """Differentiable Word Error Rate (dWER) metric. + + Arguments + --------- + model_hub : str + Name of the HuggingFace Whisper checkpoint to load. + sample_rate : int + Sampling rate. + save_path : str, optional + Model cache directory. + model : Any, optional + Pre-initialized model. + + Example + ------- + > import torch + > device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + > sample_rate = 24000 + > ids = ["A", "B"] + > hyp_sig = torch.randn(2, 2 * sample_rate, device=device) + > ref_sig = torch.randn(2, 2 * sample_rate, device=device) + > dwer = DWER("openai/whisper-small", sample_rate) + > dwer.append(ids, hyp_sig, ref_sig) + > print(dwer.summarize("error_rate")) + > print(dwer.summarize("WER")) + > print(dwer.summarize("error_rate_char")) + > print(dwer.summarize("CER")) + + """ + + def __init__( + self, + model_hub, + sample_rate, + save_path=HUGGINGFACE_HUB_CACHE, + model=None, + ): + self.sample_rate = sample_rate + self.model = model + if model is None: + self.model = Whisper( + model_hub, + save_path, + SAMPLE_RATE, + freeze=True, + freeze_encoder=True, + ).cpu() + self.searcher = S2SWhisperGreedySearcher( + self.model, + min_decode_ratio=0.0, + max_decode_ratio=1.0, + ) + self.model.tokenizer.set_prefix_tokens("english", "transcribe", False) + self.wer_computer = ErrorRateStats() + self.cer_computer = ErrorRateStats(split_tokens=True) + + def clear(self): + self.wer_computer.clear() + self.cer_computer.clear() + + @torch.no_grad() + def append(self, ids, hyp_audio, ref_audio, lens=None): + assert hyp_audio.shape == ref_audio.shape + assert hyp_audio.ndim == 2 + + # Concatenate + audio = torch.cat([hyp_audio, ref_audio]) + if lens is not None: + lens = torch.cat([lens, lens]) + else: + lens = torch.ones(audio.shape[0], device=audio.device) + + # Resample + audio = torchaudio.functional.resample( + audio, self.sample_rate, SAMPLE_RATE + ) + + self.model.to(hyp_audio.device) + self.model.eval() + + # Forward + enc_out = self.model.forward_encoder(self.model._get_mel(audio)) + text, _, _, _ = self.searcher(enc_out, lens) + text = self.model.tokenizer.batch_decode(text, skip_special_tokens=True) + text = [self.model.tokenizer._normalize(x).split(" ") for x in text] + hyp_text = text[: hyp_audio.shape[0]] + ref_text = text[hyp_audio.shape[0] :] + + # Compute WER + self.wer_computer.append(ids, hyp_text, ref_text) + self.cer_computer.append(ids, hyp_text, ref_text) + + def summarize(self, field=None): + wer_summary = self.wer_computer.summarize() + cer_summary = self.cer_computer.summarize() + wer_summary["CER"] = wer_summary["error_rate_char"] = cer_summary[ + "error_rate" + ] + if field is None: + return wer_summary + return wer_summary[field] + + def write_stats(self, filestream, verbose=False): + self.wer_computer.write_stats(filestream) diff --git a/recipes/LibriTTS/focalcodec/metrics/speaker_similarity.py b/recipes/LibriTTS/focalcodec/metrics/speaker_similarity.py new file mode 100644 index 0000000000..17dd6592f6 --- /dev/null +++ b/recipes/LibriTTS/focalcodec/metrics/speaker_similarity.py @@ -0,0 +1,104 @@ +"""Cosine similarity between speaker embeddings. + +Authors + * Luca Della Libera 2025 +""" + +import torch +import torchaudio +from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE +from transformers import AutoModelForAudioXVector + +from speechbrain.dataio.dataio import length_to_mask +from speechbrain.utils.metric_stats import MetricStats + +__all__ = ["SpkSimWavLM"] + + +SAMPLE_RATE = 16000 + + +class SpkSimWavLM(MetricStats): + """WavLM speaker similarity metric. + + Arguments + --------- + model_hub : str + Name of the HuggingFace WavLM checkpoint to load. + sample_rate : int + Sampling rate. + save_path : str, optional + Model cache directory. + model : Any, optional + Pre-initialized model. + + Example + ------- + > import torch + > sample_rate = 24000 + > ids = ["A", "B"] + > hyp_sig = torch.randn(2, 2 * sample_rate) + > ref_sig = torch.randn(2, 2 * sample_rate) + > spk_sim = SpkSimWavLM("microsoft/wavlm-base-sv", sample_rate) + > spk_sim.append(ids, hyp_sig, ref_sig) + > print(spk_sim.summarize("average")) + + """ + + def __init__( + self, + model_hub, + sample_rate, + save_path=HUGGINGFACE_HUB_CACHE, + model=None, + ): + self.sample_rate = sample_rate + self.model = model + if model is None: + self.model = AutoModelForAudioXVector.from_pretrained( + model_hub, cache_dir=save_path + ) + self.clear() + + @torch.no_grad() + def append(self, ids, hyp_sig, ref_sig, lens=None): + assert hyp_sig.shape == ref_sig.shape + assert hyp_sig.ndim == 2 + + # Concatenate + sig = torch.cat([hyp_sig, ref_sig]) + if lens is not None: + lens = torch.cat([lens, lens]) + + # Resample + sig = torchaudio.functional.resample(sig, self.sample_rate, SAMPLE_RATE) + if sig.shape[-1] < 4880: + sig = torch.nn.functional.pad( + sig, [0, 4880 - sig.shape[-1]], mode="replicate" + ) + + self.model.to(hyp_sig.device) + self.model.eval() + + # Attention mask + attention_mask = None + if lens is not None: + abs_length = lens * sig.shape[-1] + attention_mask = length_to_mask( + abs_length.int() + ).long() # 0 for masked tokens + + # Forward + embs = self.model( + input_values=sig, + attention_mask=attention_mask, + output_attentions=False, + ).embeddings + + hyp_embs, ref_embs = embs.split([len(hyp_sig), len(ref_sig)]) + scores = torch.nn.functional.cosine_similarity( + hyp_embs, ref_embs, dim=-1 + ) + + self.ids += ids + self.scores += scores.cpu().tolist() diff --git a/recipes/LibriTTS/focalcodec/metrics/utmos.py b/recipes/LibriTTS/focalcodec/metrics/utmos.py new file mode 100644 index 0000000000..a3495beee0 --- /dev/null +++ b/recipes/LibriTTS/focalcodec/metrics/utmos.py @@ -0,0 +1,65 @@ +"""UTokyo-SaruLab System for VoiceMOS Challenge 2022 (UTMOS) (see https://arxiv.org/abs/2204.02152). + +Authors + * Luca Della Libera 2025 +""" + +import torch +import torchaudio + +from speechbrain.utils.metric_stats import MetricStats + +__all__ = ["UTMOS"] + + +SAMPLE_RATE = 16000 + + +class UTMOS(MetricStats): + """UTMOS metric. + + Arguments + --------- + sample_rate : int + Sampling rate. + model : Any, optional + Pre-initialized model. + + Example + ------- + > import torch + > sample_rate = 24000 + > ids = ["A", "B"] + > hyp_sig = torch.randn(2, 2 * sample_rate) + > utmos = UTMOS(sample_rate) + > utmos.append(ids, hyp_sig) + > print(utmos.summarize("average")) + + """ + + def __init__(self, sample_rate, model=None): + self.sample_rate = sample_rate + self.model = model + if model is None: + self.model = torch.hub.load( + "tarepan/SpeechMOS:v1.2.0", "utmos22_strong", trust_repo=True + ) + self.clear() + + @torch.no_grad() + def append(self, ids, sig, lens=None): + assert sig.ndim == 2 + + # Resample + hyp_sig = torchaudio.functional.resample( + sig, self.sample_rate, SAMPLE_RATE + ) + + self.model.to(hyp_sig.device) + self.model.eval() + + # Forward + scores = self.model(hyp_sig, SAMPLE_RATE) + + self.ids += ids + self.scores += scores.cpu().tolist() diff --git a/recipes/LibriTTS/focalcodec/train_decoder.py b/recipes/LibriTTS/focalcodec/train_decoder.py new file mode 100644 index 0000000000..7ddb0fbf54 --- /dev/null +++ b/recipes/LibriTTS/focalcodec/train_decoder.py @@ -0,0 +1,661 @@ +#!/usr/bin/env/python + +"""Recipe for training a decoder from continuous audio representations to waveform. + +To run this recipe: +> python train_decoder.py hparams/.yaml + +Authors + * Luca Della Libera 2025 +""" + +import os +import random +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio.dataio import write_audio +from speechbrain.dataio.sampler import DynamicBatchSampler +from speechbrain.utils.distributed import if_main_process, run_on_main + + +class Resynthesis(sb.Brain): + def fit_batch(self, batch): + """Fit one batch.""" + amp = sb.core.AMPConfig.from_name(self.precision) + + # Train discriminator + with torch.autocast( + device_type=torch.device(self.device).type, + dtype=amp.dtype, + enabled=self.precision != torch.float32, + ): + self.extract_feats(batch, sb.Stage.TRAIN) + self.compute_forward_generator(batch, sb.Stage.TRAIN) + outputs = self.compute_forward_discriminator(batch, sb.Stage.TRAIN) + loss_discriminator = self.compute_objectives_discriminator( + outputs, batch, sb.Stage.TRAIN + ) + + self.scaler.scale(loss_discriminator).backward() + self.scaler.step(self.optimizer_discriminator) + self.optimizer_discriminator.zero_grad(set_to_none=True) + del loss_discriminator + + # Train generator + with torch.autocast( + device_type=torch.device(self.device).type, + dtype=amp.dtype, + enabled=self.precision != torch.float32, + ): + outputs = self.compute_forward_discriminator( + batch, sb.Stage.TRAIN, return_discriminator=False + ) + loss_generator = self.compute_objectives_generator( + outputs, batch, sb.Stage.TRAIN + ) + + self.scaler.scale(loss_generator).backward() + self.scaler.step(self.optimizer_generator) + self.optimizer_generator.zero_grad(set_to_none=True) + + self.scaler.update() + self.optimizer_step += 1 + self.step = self.optimizer_step + + # Cleanup + batch.sig = batch.hyp_sig = None + + return loss_generator.detach().cpu() + + def extract_feats(self, batch, stage): + """Extract continuous audio features from waveform.""" + batch = batch.to(self.device) + sig, lens = batch.sig + + # Augment if specified + if stage == sb.Stage.TRAIN and self.hparams.augment: + sig, lens = self.hparams.augmentation(sig, lens) + + # Extract features + with torch.no_grad(): + self.hparams.encoder.to(self.device).eval() + feats, *encoder_state_ = self.hparams.encoder(sig, length=lens) + + # Extract segments + if ( + stage == sb.Stage.TRAIN + and self.hparams.segment_size_feats is not None + ): + segment_size_feats = self.hparams.segment_size_feats + abs_lens = ( + (feats.shape[1] * lens) + .ceil() + .clamp(min=segment_size_feats, max=feats.shape[1]) + .long() + ) + max_starts = abs_lens - segment_size_feats # [B] + starts = ( + torch.rand(feats.shape[0], device=self.device) + * (max_starts + 1).float() + ).to(torch.long) + offsets = torch.arange( + segment_size_feats, device=self.device + ) # [L] + idx = starts[:, None] + offsets[None, :] # [B, L] + idx_expanded = idx[:, :, None].expand(-1, -1, feats.shape[-1]) + feats = feats.gather(1, idx_expanded) # [B, L, H] + + segment_size_sig = ( + segment_size_feats * self.hparams.generator_hop_length + ) + starts = starts * self.hparams.generator_hop_length # [B] + offsets = torch.arange( + segment_size_sig, device=self.device + ) # [L_sig] + idx = starts[:, None] + offsets[None, :] # [B, L_sig] + idx = idx.clamp(max=sig.shape[1] - 1).long() + sig = sig.gather(1, idx) + lens = torch.ones_like(lens) + + batch.sig = sig, lens + batch.feats = feats, lens + + def compute_forward_generator(self, batch, stage): + """Generator forward pass.""" + sig, lens = batch.sig + + # Forward generator + feats, _ = batch.feats + hyp_sig, *generator_state_ = self.modules.generator(feats) # [B, T] + hyp_sig = hyp_sig[:, None] # [B, 1, T] + + # Adjust length if not matching + sig = sig[:, None] + if sig.shape[-1] > hyp_sig.shape[-1]: + pad = [0, sig.shape[-1] - hyp_sig.shape[-1]] + hyp_sig = torch.nn.functional.pad(hyp_sig, pad, mode="replicate") + elif sig.shape[-1] < hyp_sig.shape[-1]: + hyp_sig = hyp_sig.narrow(-1, 0, sig.shape[-1]) + + batch.sig = sig, lens + batch.hyp_sig = hyp_sig, lens # With gradient + + def compute_forward_discriminator( + self, batch, stage, return_discriminator=True + ): + """Discriminator forward pass.""" + sig, lens = batch.sig + hyp_sig, _ = batch.hyp_sig # With gradient + + if return_discriminator: + # Return predictions to compute discriminator loss + scores_fake, _ = self.modules.discriminator(hyp_sig.detach()) + scores_real, _ = self.modules.discriminator(sig) + return scores_fake, scores_real + + # Return predictions to compute generator loss + self.modules.discriminator.requires_grad_(False) + scores_fake, feats_fake = self.modules.discriminator(hyp_sig) + scores_real, feats_real = self.modules.discriminator(sig) + self.modules.discriminator.requires_grad_() + + return hyp_sig, sig, scores_fake, feats_fake, feats_real + + def compute_objectives_generator(self, predictions, batch, stage): + """Compute generator loss.""" + loss = self.hparams.generator_loss( + stage, + y_hat=predictions[0], + y=predictions[1], + scores_fake=predictions[2], + feats_fake=predictions[3], + feats_real=predictions[4], + ) + return loss["G_loss"] + + def compute_objectives_discriminator(self, predictions, batch, stage): + """Compute discriminator loss.""" + loss = self.hparams.discriminator_loss( + scores_fake=predictions[0], + scores_real=predictions[1], + ) + return loss["D_loss"] + + def _fit_valid(self, valid_set, epoch, enable): + """Validation stage.""" + if epoch % self.hparams.valid_freq == 0: + return super()._fit_valid(valid_set, epoch, enable) + + @torch.no_grad() + def evaluate_batch(self, batch, stage): + """Evaluate one batch.""" + assert stage in (sb.Stage.VALID, sb.Stage.TEST) + self.extract_feats(batch, stage) + self.compute_forward_generator(batch, stage) + outputs = self.compute_forward_discriminator( + batch, stage, return_discriminator=False + ) + loss = self.compute_objectives_generator(outputs, batch, stage) + + IDs = batch.id + _, lens = batch.sig + hyp_sig, sig, *_ = outputs + hyp_sig = hyp_sig[:, 0] + sig = sig[:, 0] + + if ( + self.hparams.save_audios + and self.saved_audios < 10 + and if_main_process() + ): + save_folder = os.path.join( + self.hparams.output_folder, + "audios", + f"epoch{str(self.hparams.epoch_counter.current).zfill(4)}", + ) + os.makedirs(save_folder, exist_ok=True) + for i in range(len(IDs)): + write_audio( + os.path.join(save_folder, f"{IDs[i]}_hyp.wav"), + hyp_sig[i].cpu(), + self.hparams.sample_rate, + ) + write_audio( + os.path.join(save_folder, f"{IDs[i]}_ref.wav"), + sig[i].cpu(), + self.hparams.sample_rate, + ) + self.saved_audios += 1 + + if stage == sb.Stage.TEST and self.hparams.compute_metrics: + self.utmos_metric.append(IDs, hyp_sig, lens) + self.ref_utmos_metric.append(IDs, sig, lens) + self.dwer_metric.append(IDs, hyp_sig, sig, lens) + self.wavlm_sim_metric.append(IDs, hyp_sig, sig, lens) + + # Cleanup + batch.sig = batch.hyp_sig = None + + return loss.detach().cpu() + + def init_optimizers(self): + """Called during ``on_fit_start().``""" + self.optimizer_generator = self.opt_class( + self.modules.generator.parameters() + ) + self.optimizer_discriminator = self.opt_class( + self.modules.discriminator.parameters() + ) + self.optimizers_dict = { + "optimizer_generator": self.optimizer_generator, + "optimizer_discriminator": self.optimizer_discriminator, + } + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "optimizer_generator", self.optimizer_generator + ) + self.checkpointer.add_recoverable( + "optimizer_discriminator", self.optimizer_discriminator + ) + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch.""" + super().on_stage_start(stage, epoch) + torch.backends.cudnn.benchmark = ( + stage == sb.Stage.TRAIN + and self.hparams.segment_size is not None + and self.hparams.segment_pad + ) + if stage != sb.Stage.TRAIN: + self.saved_audios = 0 + if stage == sb.Stage.TEST and self.hparams.compute_metrics: + self.utmos_metric = self.hparams.utmos_computer() + self.ref_utmos_metric = self.hparams.utmos_computer( + model=self.utmos_metric.model + ) + self.dwer_metric = self.hparams.dwer_computer() + self.wavlm_sim_metric = self.hparams.wavlm_sim_computer() + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of each epoch.""" + # Compute/store important stats + current_epoch = self.hparams.epoch_counter.current + stage_stats = {"loss": stage_loss} + + # Save checkpoint and anneal learning rate at the end of each epoch + if stage == sb.Stage.TRAIN: + self.avg_train_loss = 0.0 + self.train_stats = stage_stats + _, lr = self.hparams.scheduler(epoch) + sb.nnet.schedulers.update_learning_rate( + self.optimizer_generator, lr + ) + sb.nnet.schedulers.update_learning_rate( + self.optimizer_discriminator, lr + ) + self.stats_meta = { + "epoch": epoch, + "steps": self.optimizer_step, + "lr": lr, + } + if if_main_process(): + self.checkpointer.save_and_keep_only( + meta={"loss": stage_stats["loss"], "epoch": epoch}, + max_keys=["epoch"], + num_to_keep=self.hparams.keep_checkpoints, + ) + if epoch % self.hparams.valid_freq != 0: + self.hparams.train_logger.log_stats( + stats_meta=self.stats_meta, + train_stats=self.train_stats, + ) + + # Perform end-of-validation operations + elif stage == sb.Stage.VALID: + self.hparams.train_logger.log_stats( + stats_meta=self.stats_meta, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + + elif stage == sb.Stage.TEST: + if self.hparams.compute_metrics: + stage_stats["UTMOS"] = self.utmos_metric.summarize("average") + stage_stats["RefUTMOS"] = self.ref_utmos_metric.summarize( + "average" + ) + stage_stats["dWER"] = self.dwer_metric.summarize("error_rate") + stage_stats["dCER"] = self.dwer_metric.summarize( + "error_rate_char" + ) + stage_stats["WavLMSim"] = self.wavlm_sim_metric.summarize( + "average" + ) + if if_main_process(): + # Save dWER + with open(self.hparams.dwer_file, "w") as w: + self.dwer_metric.write_stats(w) + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": current_epoch}, + test_stats=stage_stats, + ) + + +def dataio_prepare( + data_folder, + train_json, + valid_json, + test_json, + sample_rate=16000, + train_remove_if_longer=60.0, + valid_remove_if_longer=60.0, + test_remove_if_longer=60.0, + sorting="ascending", + debug=False, + segment_size=None, + segment_pad=False, + audio_backend="soundfile", +): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + + Arguments + --------- + data_folder : str + Root directory containing audio files referenced by the JSON manifests. + train_json : str + Path to the training manifest JSON. + valid_json : str + Path to the validation manifest JSON. + test_json : str + Path to the test manifest JSON. + sample_rate : int, optional + Target sampling rate for loaded audio. Audio is automatically resampled + if it does not match this rate. Default: 16000. + train_remove_if_longer : float, optional + Remove training examples longer than this duration (in seconds). + valid_remove_if_longer : float, optional + Remove validation examples longer than this duration (in seconds). + test_remove_if_longer : float, optional + Remove test examples longer than this duration (in seconds). + sorting : str, optional + Sorting strategy for dataset iteration, "ascending", "descending", or `"random"`. + Default: "ascending". + debug : bool, optional + If True, load only a small subset of each dataset for faster debugging. + segment_size : float, optional + If provided, randomly crop each audio sample to this duration (in seconds) + during training. Useful for training models on fixed-length segments. + segment_pad : bool, optional + If True, pad segments shorter than `segment_size` instead of skipping them. + audio_backend : str, optional + Backend to use for audio loading (e.g., "soundfile"). + + Returns + ------- + tuple + Train data, valid data, test data. + + """ + train_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=train_json, + replacements={"data_root": data_folder}, + ) + # Sort training data to speed up training + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=sorting == "descending", + key_max_value={"duration": train_remove_if_longer}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=valid_json, + replacements={"data_root": data_folder}, + ) + # Sort validation data to speed up validation + valid_data = valid_data.filtered_sorted( + sort_key="duration", + reverse=not debug, + key_max_value={"duration": valid_remove_if_longer}, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=test_json, + replacements={"data_root": data_folder}, + ) + # Sort the test data to speed up testing + test_data = test_data.filtered_sorted( + sort_key="duration", + reverse=not debug, + key_max_value={"duration": test_remove_if_longer}, + ) + + datasets = [train_data, valid_data, test_data] + + # Define audio pipeline + takes = ["wav"] + provides = ["sig"] + + def audio_pipeline_train(wav): + """Load waveform, resample, and optionally extract a random segment.""" + original_sample_rate = sb.dataio.dataio.read_audio_info(wav).sample_rate + sig = sb.dataio.dataio.read_audio(wav, backend=audio_backend) + sig = torchaudio.functional.resample( + sig, original_sample_rate, sample_rate + ) + + if segment_size is not None: + delta_length = segment_size - len(sig) + if delta_length > 0 and segment_pad: + sig = torch.nn.functional.pad(sig, [0, delta_length]) + elif delta_length < 0: + start = random.randint(0, -delta_length) + sig = sig[start : start + segment_size] + + yield sig + + def audio_pipeline_eval(wav): + """Load waveform and resample.""" + original_sample_rate = sb.dataio.dataio.read_audio_info(wav).sample_rate + sig = sb.dataio.dataio.read_audio(wav, backend=audio_backend) + sig = torchaudio.functional.resample( + sig, original_sample_rate, sample_rate + ) + yield sig + + sb.dataio.dataset.add_dynamic_item( + [train_data], audio_pipeline_train, takes, provides + ) + sb.dataio.dataset.add_dynamic_item( + [valid_data, test_data], audio_pipeline_eval, takes, provides + ) + + # Set output + sb.dataio.dataset.set_output_keys(datasets, ["id"] + provides) + + return train_data, valid_data, test_data + + +def prepare_recipe(hparams, run_opts): + """Prepare SpeechBrain recipe. + + Arguments + --------- + hparams : dict + SpeechBrain hparams dictionary loaded from the YAML recipe file. + run_opts : dict + SpeechBrain runtime options. + + Returns + ------- + tuple + Update hparams, train data, valid data, test data. + + """ + # Dataset preparation + import libritts_prepare + + # Due to DDP, do the preparation ONLY on the main Python process + run_on_main( + libritts_prepare.prepare_libritts, + kwargs={ + "data_folder": hparams["data_folder"], + "train_split": hparams["train_split"], + "valid_split": hparams["valid_split"], + "test_split": hparams["test_split"], + "save_json_train": hparams["train_json"], + "save_json_valid": hparams["valid_json"], + "save_json_test": hparams["test_json"], + "sample_rate": hparams["sample_rate"], + "skip_prep": hparams["skip_prep"], + "model_name": "HiFi-GAN", + }, + ) + + # Create the datasets objects + train_data, valid_data, test_data = dataio_prepare( + hparams["data_folder"], + hparams["train_json"], + hparams["valid_json"], + hparams["test_json"], + hparams["sample_rate"], + hparams["train_remove_if_longer"], + hparams["valid_remove_if_longer"], + hparams["test_remove_if_longer"], + hparams["sorting"], + run_opts["debug"], + hparams["segment_size"], + hparams["segment_pad"], + hparams["audio_backend"], + ) + + # Dynamic batching + train_dataloader_kwargs = { + "num_workers": hparams.get("dataloader_workers", 0) + } + if hparams.get("dynamic_batching", False) or hparams.get( + "train_dynamic_batching", False + ): + train_dataloader_kwargs["batch_sampler"] = DynamicBatchSampler( + train_data, + hparams["train_max_batch_length"], + num_buckets=hparams.get("num_buckets"), + length_func=lambda x: x["duration"], + shuffle=False, + batch_ordering=hparams.get("sorting", "batch_ordering"), + max_batch_ex=hparams.get("max_batch_size"), + bucket_boundaries=hparams.get("bucket_boundaries", []), + lengths_list=hparams.get("lengths_list"), + ) + else: + train_dataloader_kwargs["batch_size"] = hparams["train_batch_size"] + train_dataloader_kwargs["shuffle"] = hparams["sorting"] == "random" + train_dataloader_kwargs["pin_memory"] = run_opts["device"] != "cpu" + train_dataloader_kwargs["drop_last"] = hparams.get( + "segment_size", None + ) is not None and hparams.get("segment_pad", False) + hparams["train_dataloader_kwargs"] = train_dataloader_kwargs + + valid_dataloader_kwargs = { + "num_workers": hparams.get("dataloader_workers", 0) + } + if hparams.get("dynamic_batching", False) or hparams.get( + "valid_dynamic_batching", False + ): + valid_dataloader_kwargs["batch_sampler"] = DynamicBatchSampler( + valid_data, + hparams["valid_max_batch_length"], + num_buckets=hparams.get("num_buckets"), + length_func=lambda x: x["duration"], + shuffle=False, + batch_ordering="descending", + max_batch_ex=hparams.get("max_batch_size"), + bucket_boundaries=hparams.get("bucket_boundaries", []), + lengths_list=hparams.get("lengths_list"), + ) + else: + valid_dataloader_kwargs["batch_size"] = hparams["valid_batch_size"] + valid_dataloader_kwargs["pin_memory"] = run_opts["device"] != "cpu" + hparams["valid_dataloader_kwargs"] = valid_dataloader_kwargs + + test_dataloader_kwargs = { + "num_workers": hparams.get("dataloader_workers", 0) + } + if hparams.get("dynamic_batching", False) or hparams.get( + "test_dynamic_batching", False + ): + test_dataloader_kwargs["batch_sampler"] = DynamicBatchSampler( + test_data, + hparams["test_max_batch_length"], + num_buckets=hparams.get("num_buckets"), + length_func=lambda x: x["duration"], + shuffle=False, + batch_ordering="descending", + max_batch_ex=hparams.get("max_batch_size"), + bucket_boundaries=hparams.get("bucket_boundaries", []), + lengths_list=hparams.get("lengths_list"), + ) + else: + test_dataloader_kwargs["batch_size"] = hparams["test_batch_size"] + test_dataloader_kwargs["pin_memory"] = run_opts["device"] != "cpu" + hparams["test_dataloader_kwargs"] = test_dataloader_kwargs + + # Pretrain the specified modules + if "pretrainer" in hparams: + run_on_main(hparams["pretrainer"].collect_files) + run_on_main(hparams["pretrainer"].load_collected) + + return hparams, train_data, valid_data, test_data + + +if __name__ == "__main__": + # Command-line interface + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file) as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then create ddp_init_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Prepare recipe + hparams, train_data, valid_data, test_data = prepare_recipe( + hparams, run_opts + ) + + # Trainer initialization + brain = Resynthesis( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Train + brain.fit( + brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["train_dataloader_kwargs"], + valid_loader_kwargs=hparams["valid_dataloader_kwargs"], + ) + + # Test + brain.hparams.dwer_file = os.path.join(hparams["output_folder"], "dwer.txt") + brain.evaluate( + test_data, + max_key="epoch", + test_loader_kwargs=hparams["test_dataloader_kwargs"], + ) diff --git a/recipes/LibriTTS/focalcodec/train_quantizer.py b/recipes/LibriTTS/focalcodec/train_quantizer.py new file mode 100644 index 0000000000..aec03a37bd --- /dev/null +++ b/recipes/LibriTTS/focalcodec/train_quantizer.py @@ -0,0 +1,510 @@ +#!/usr/bin/env/python + +"""Recipe for training a quantizer on continuous audio representations. + +To run this recipe: +> python train_quantizer.py hparams/.yaml + +Authors + * Luca Della Libera 2025 +""" + +import os +import random +import sys + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio.dataio import write_audio +from speechbrain.dataio.sampler import DynamicBatchSampler +from speechbrain.utils.distributed import if_main_process, run_on_main + + +class Quantization(sb.Brain): + def compute_forward(self, batch, stage): + """Forward pass.""" + batch = batch.to(self.device) + sig, lens = batch.sig + + # Augment if specified + if stage == sb.Stage.TRAIN and self.hparams.augment: + sig, lens = self.hparams.augmentation(sig, lens) + + # Extract features + with torch.no_grad(): + self.hparams.encoder.to(self.device).eval() + feats, *encoder_state_ = self.hparams.encoder(sig, length=lens) + + # Forward model + lats, *compressor_state_ = self.modules.compressor(feats) + codes, toks, aux_loss = self.modules.quantizer(lats) + hyp_feats, *decompressor_state_ = self.modules.decompressor(codes) + + return hyp_feats, feats, aux_loss + + def compute_objectives(self, predictions, batch, stage): + """Computes the objectives.""" + hyp_feats, feats, aux_loss = predictions + + _, lens = batch.sig + + # Reconstruction loss + loss = self.hparams.rec_loss(hyp_feats, feats, length=lens) + loss += aux_loss + + return loss + + def _fit_valid(self, valid_set, epoch, enable): + """Validation stage.""" + if epoch % self.hparams.valid_freq == 0: + return super()._fit_valid(valid_set, epoch, enable) + + @torch.no_grad() + def evaluate_batch(self, batch, stage): + """Evaluate one batch.""" + assert stage in (sb.Stage.VALID, sb.Stage.TEST) + outputs = self.compute_forward(batch, stage=stage) + loss = self.compute_objectives(outputs, batch, stage=stage) + hyp_feats, feats, _ = outputs + + IDs = batch.id + sig, lens = batch.sig + + self.hparams.decoder.to(self.device).eval() + hyp_sig, *decoder_state_ = self.hparams.decoder(hyp_feats) + rec_sig, *decoder_state_ = self.hparams.decoder(feats) + + # Adjust length if not matching + if sig.shape[-1] > hyp_sig.shape[-1]: + pad = [0, sig.shape[-1] - hyp_sig.shape[-1]] + hyp_sig = torch.nn.functional.pad(hyp_sig, pad, mode="replicate") + rec_sig = torch.nn.functional.pad(rec_sig, pad, mode="replicate") + elif sig.shape[-1] < hyp_sig.shape[-1]: + hyp_sig = hyp_sig.narrow(-1, 0, sig.shape[-1]) + rec_sig = rec_sig.narrow(-1, 0, sig.shape[-1]) + + if ( + self.hparams.save_audios + and self.saved_audios < 10 + and if_main_process() + ): + save_folder = os.path.join( + self.hparams.output_folder, + "audios", + f"epoch{str(self.hparams.epoch_counter.current).zfill(4)}", + ) + os.makedirs(save_folder, exist_ok=True) + for i in range(len(IDs)): + write_audio( + os.path.join(save_folder, f"{IDs[i]}_hyp.wav"), + hyp_sig[i].cpu(), + self.hparams.sample_rate, + ) + write_audio( + os.path.join(save_folder, f"{IDs[i]}_rec.wav"), + rec_sig[i].cpu(), + self.hparams.sample_rate, + ) + write_audio( + os.path.join(save_folder, f"{IDs[i]}_ref.wav"), + sig[i].cpu(), + self.hparams.sample_rate, + ) + self.saved_audios += 1 + + if stage == sb.Stage.TEST and self.hparams.compute_metrics: + self.utmos_metric.append(IDs, hyp_sig, lens) + self.ref_utmos_metric.append(IDs, sig, lens) + self.dwer_metric.append(IDs, hyp_sig, sig, lens) + self.wavlm_sim_metric.append(IDs, hyp_sig, sig, lens) + + return loss.detach().cpu() + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch.""" + super().on_stage_start(stage, epoch) + torch.backends.cudnn.benchmark = ( + stage == sb.Stage.TRAIN + and self.hparams.segment_size is not None + and self.hparams.segment_pad + ) + if stage != sb.Stage.TRAIN: + self.saved_audios = 0 + if stage == sb.Stage.TEST and self.hparams.compute_metrics: + self.utmos_metric = self.hparams.utmos_computer() + self.ref_utmos_metric = self.hparams.utmos_computer( + model=self.utmos_metric.model + ) + self.dwer_metric = self.hparams.dwer_computer() + self.wavlm_sim_metric = self.hparams.wavlm_sim_computer() + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of each epoch.""" + current_epoch = self.hparams.epoch_counter.current + stage_stats = {"loss": stage_loss} + + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + self.stats_meta = {"epoch": epoch, "steps": self.optimizer_step} + if epoch % self.hparams.valid_freq != 0: + self.hparams.train_logger.log_stats( + stats_meta=self.stats_meta, + train_stats=self.train_stats, + ) + + # Perform end-of-iteration operations, like annealing, logging, etc. + elif stage == sb.Stage.VALID: + _, lr = self.hparams.scheduler(stage_stats["loss"]) + sb.nnet.schedulers.update_learning_rate(self.optimizer, lr) + self.stats_meta["lr"] = lr + self.hparams.train_logger.log_stats( + stats_meta=self.stats_meta, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + if if_main_process(): + self.checkpointer.save_and_keep_only( + meta={"loss": stage_stats["loss"]}, + min_keys=["loss"], + num_to_keep=self.hparams.keep_checkpoints, + ) + + elif stage == sb.Stage.TEST: + if self.hparams.compute_metrics: + stage_stats["UTMOS"] = self.utmos_metric.summarize("average") + stage_stats["RefUTMOS"] = self.ref_utmos_metric.summarize( + "average" + ) + stage_stats["dWER"] = self.dwer_metric.summarize("error_rate") + stage_stats["dCER"] = self.dwer_metric.summarize( + "error_rate_char" + ) + stage_stats["WavLMSim"] = self.wavlm_sim_metric.summarize( + "average" + ) + if if_main_process(): + # Save dWER + with open(self.hparams.dwer_file, "w") as w: + self.dwer_metric.write_stats(w) + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": current_epoch}, + test_stats=stage_stats, + ) + + +def dataio_prepare( + data_folder, + train_json, + valid_json, + test_json, + sample_rate=16000, + train_remove_if_longer=60.0, + valid_remove_if_longer=60.0, + test_remove_if_longer=60.0, + sorting="ascending", + debug=False, + segment_size=None, + segment_pad=False, + audio_backend="soundfile", +): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + + Arguments + --------- + data_folder : str + Root directory containing audio files referenced by the JSON manifests. + train_json : str + Path to the training manifest JSON. + valid_json : str + Path to the validation manifest JSON. + test_json : str + Path to the test manifest JSON. + sample_rate : int, optional + Target sampling rate for loaded audio. Audio is automatically resampled + if it does not match this rate. Default: 16000. + train_remove_if_longer : float, optional + Remove training examples longer than this duration (in seconds). + valid_remove_if_longer : float, optional + Remove validation examples longer than this duration (in seconds). + test_remove_if_longer : float, optional + Remove test examples longer than this duration (in seconds). + sorting : str, optional + Sorting strategy for dataset iteration, "ascending", "descending", or `"random"`. + Default: "ascending". + debug : bool, optional + If True, load only a small subset of each dataset for faster debugging. + segment_size : float, optional + If provided, randomly crop each audio sample to this duration (in seconds) + during training. Useful for training models on fixed-length segments. + segment_pad : bool, optional + If True, pad segments shorter than `segment_size` instead of skipping them. + audio_backend : str, optional + Backend to use for audio loading (e.g., "soundfile"). + + Returns + ------- + tuple + Train data, valid data, test data. + + """ + train_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=train_json, + replacements={"data_root": data_folder}, + ) + # Sort training data to speed up training + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=sorting == "descending", + key_max_value={"duration": train_remove_if_longer}, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=valid_json, + replacements={"data_root": data_folder}, + ) + # Sort validation data to speed up validation + valid_data = valid_data.filtered_sorted( + sort_key="duration", + reverse=not debug, + key_max_value={"duration": valid_remove_if_longer}, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=test_json, + replacements={"data_root": data_folder}, + ) + # Sort the test data to speed up testing + test_data = test_data.filtered_sorted( + sort_key="duration", + reverse=not debug, + key_max_value={"duration": test_remove_if_longer}, + ) + + datasets = [train_data, valid_data, test_data] + + # Define audio pipeline + takes = ["wav"] + provides = ["sig"] + + def audio_pipeline_train(wav): + """Load waveform, resample, and optionally extract a random segment.""" + original_sample_rate = sb.dataio.dataio.read_audio_info(wav).sample_rate + sig = sb.dataio.dataio.read_audio(wav, backend=audio_backend) + sig = torchaudio.functional.resample( + sig, original_sample_rate, sample_rate + ) + + if segment_size is not None: + delta_length = segment_size - len(sig) + if delta_length > 0 and segment_pad: + sig = torch.nn.functional.pad(sig, [0, delta_length]) + elif delta_length < 0: + start = random.randint(0, -delta_length) + sig = sig[start : start + segment_size] + + yield sig + + def audio_pipeline_eval(wav): + """Load waveform and resample.""" + original_sample_rate = sb.dataio.dataio.read_audio_info(wav).sample_rate + sig = sb.dataio.dataio.read_audio(wav, backend=audio_backend) + sig = torchaudio.functional.resample( + sig, original_sample_rate, sample_rate + ) + yield sig + + sb.dataio.dataset.add_dynamic_item( + [train_data], audio_pipeline_train, takes, provides + ) + sb.dataio.dataset.add_dynamic_item( + [valid_data, test_data], audio_pipeline_eval, takes, provides + ) + + # Set output + sb.dataio.dataset.set_output_keys(datasets, ["id"] + provides) + + return train_data, valid_data, test_data + + +def prepare_recipe(hparams, run_opts): + """Prepare SpeechBrain recipe. + + Arguments + --------- + hparams : dict + SpeechBrain hparams dictionary loaded from the YAML recipe file. + run_opts : dict + SpeechBrain runtime options. + + Returns + ------- + tuple + Update hparams, train data, valid data, test data. + + """ + # Dataset preparation + import libritts_prepare + + # Due to DDP, do the preparation ONLY on the main Python process + run_on_main( + libritts_prepare.prepare_libritts, + kwargs={ + "data_folder": hparams["data_folder"], + "train_split": hparams["train_split"], + "valid_split": hparams["valid_split"], + "test_split": hparams["test_split"], + "save_json_train": hparams["train_json"], + "save_json_valid": hparams["valid_json"], + "save_json_test": hparams["test_json"], + "sample_rate": hparams["sample_rate"], + "skip_prep": hparams["skip_prep"], + "model_name": "HiFi-GAN", + }, + ) + + # Create the datasets objects + train_data, valid_data, test_data = dataio_prepare( + hparams["data_folder"], + hparams["train_json"], + hparams["valid_json"], + hparams["test_json"], + hparams["sample_rate"], + hparams["train_remove_if_longer"], + hparams["valid_remove_if_longer"], + hparams["test_remove_if_longer"], + hparams["sorting"], + run_opts["debug"], + hparams["segment_size"], + hparams["segment_pad"], + hparams["audio_backend"], + ) + + # Dynamic batching + train_dataloader_kwargs = { + "num_workers": hparams.get("dataloader_workers", 0) + } + if hparams.get("dynamic_batching", False) or hparams.get( + "train_dynamic_batching", False + ): + train_dataloader_kwargs["batch_sampler"] = DynamicBatchSampler( + train_data, + hparams["train_max_batch_length"], + num_buckets=hparams.get("num_buckets"), + length_func=lambda x: x["duration"], + shuffle=False, + batch_ordering=hparams.get("sorting", "batch_ordering"), + max_batch_ex=hparams.get("max_batch_size"), + bucket_boundaries=hparams.get("bucket_boundaries", []), + lengths_list=hparams.get("lengths_list"), + ) + else: + train_dataloader_kwargs["batch_size"] = hparams["train_batch_size"] + train_dataloader_kwargs["shuffle"] = hparams["sorting"] == "random" + train_dataloader_kwargs["pin_memory"] = run_opts["device"] != "cpu" + train_dataloader_kwargs["drop_last"] = hparams.get( + "segment_size", None + ) is not None and hparams.get("segment_pad", False) + hparams["train_dataloader_kwargs"] = train_dataloader_kwargs + + valid_dataloader_kwargs = { + "num_workers": hparams.get("dataloader_workers", 0) + } + if hparams.get("dynamic_batching", False) or hparams.get( + "valid_dynamic_batching", False + ): + valid_dataloader_kwargs["batch_sampler"] = DynamicBatchSampler( + valid_data, + hparams["valid_max_batch_length"], + num_buckets=hparams.get("num_buckets"), + length_func=lambda x: x["duration"], + shuffle=False, + batch_ordering="descending", + max_batch_ex=hparams.get("max_batch_size"), + bucket_boundaries=hparams.get("bucket_boundaries", []), + lengths_list=hparams.get("lengths_list"), + ) + else: + valid_dataloader_kwargs["batch_size"] = hparams["valid_batch_size"] + valid_dataloader_kwargs["pin_memory"] = run_opts["device"] != "cpu" + hparams["valid_dataloader_kwargs"] = valid_dataloader_kwargs + + test_dataloader_kwargs = { + "num_workers": hparams.get("dataloader_workers", 0) + } + if hparams.get("dynamic_batching", False) or hparams.get( + "test_dynamic_batching", False + ): + test_dataloader_kwargs["batch_sampler"] = DynamicBatchSampler( + test_data, + hparams["test_max_batch_length"], + num_buckets=hparams.get("num_buckets"), + length_func=lambda x: x["duration"], + shuffle=False, + batch_ordering="descending", + max_batch_ex=hparams.get("max_batch_size"), + bucket_boundaries=hparams.get("bucket_boundaries", []), + lengths_list=hparams.get("lengths_list"), + ) + else: + test_dataloader_kwargs["batch_size"] = hparams["test_batch_size"] + test_dataloader_kwargs["pin_memory"] = run_opts["device"] != "cpu" + hparams["test_dataloader_kwargs"] = test_dataloader_kwargs + + # Pretrain the specified modules + if "pretrainer" in hparams: + run_on_main(hparams["pretrainer"].collect_files) + run_on_main(hparams["pretrainer"].load_collected) + + return hparams, train_data, valid_data, test_data + + +if __name__ == "__main__": + # Command-line interface + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file) as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then create ddp_init_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Prepare recipe + hparams, train_data, valid_data, test_data = prepare_recipe( + hparams, run_opts + ) + + # Trainer initialization + brain = Quantization( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Train + brain.fit( + brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["train_dataloader_kwargs"], + valid_loader_kwargs=hparams["valid_dataloader_kwargs"], + ) + + # Test + brain.hparams.dwer_file = os.path.join(hparams["output_folder"], "dwer.txt") + brain.evaluate( + test_data, + min_key="loss", + test_loader_kwargs=hparams["test_dataloader_kwargs"], + ) diff --git a/recipes/LibriTTS/focalcodec/utils.py b/recipes/LibriTTS/focalcodec/utils.py new file mode 100644 index 0000000000..6944563fd0 --- /dev/null +++ b/recipes/LibriTTS/focalcodec/utils.py @@ -0,0 +1,44 @@ +"""Common utilities. + +Authors + * Luca Della Libera 2025 +""" + +import os + +import torch + +__all__ = ["download_wavlm6"] + + +def download_wavlm6(cache_dir: "str") -> "str": + """Download WavLM6 checkpoint to cache and return the path. + + Arguments + --------- + cache_dir: + Cache directory where the checkpoint will be saved. + + Returns + ------- + Path to the saved checkpoint. + + """ + os.makedirs(cache_dir, exist_ok=True) + checkpoint_path = os.path.join(cache_dir, "wavlm6.pt") + + # If already cached, return immediately + if os.path.exists(checkpoint_path): + return checkpoint_path + + # Load FocalCodec model + codec = torch.hub.load( + repo_or_dir="lucadellalib/focalcodec", + model="focalcodec", + config="lucadellalib/focalcodec_50hz", + ) + + # Save WavLM6 checkpoint + torch.save(codec.encoder.state_dict(), checkpoint_path) + + return checkpoint_path diff --git a/recipes/LibriTTS/libritts_prepare.py b/recipes/LibriTTS/libritts_prepare.py index ea78dfdd82..0ac76530c7 100644 --- a/recipes/LibriTTS/libritts_prepare.py +++ b/recipes/LibriTTS/libritts_prepare.py @@ -1,15 +1,29 @@ -from speechbrain.utils.data_utils import get_all_files, download_file -from speechbrain.processing.speech_augmentation import Resample +""" +LibriTTS data preparation + +Authors + * Pradnya Kandarkar 2022 +""" + import json import os -import shutil import random -import logging + +import torch import torchaudio +from tqdm import tqdm + +from speechbrain.dataio import audio_io +from speechbrain.inference.text import GraphemeToPhoneme +from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger +from speechbrain.utils.text_to_sequence import _g2p_keep_punctuations -logger = logging.getLogger(__name__) +logger = get_logger(__name__) LIBRITTS_URL_PREFIX = "https://www.openslr.org/resources/60/" +DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + def prepare_libritts( data_folder, @@ -18,11 +32,18 @@ def prepare_libritts( save_json_test, sample_rate, split_ratio=[80, 10, 10], - libritts_subsets=["train-clean-100"], + libritts_subsets=None, + train_split=None, + valid_split=None, + test_split=None, + seed=1234, + model_name=None, + skip_prep=False, ): """ Prepares the json files for the LibriTTS dataset. Downloads the dataset if it is not found in the `data_folder` as expected. + Arguments --------- data_folder : str @@ -33,37 +54,109 @@ def prepare_libritts( Path where the validation data specification file will be saved. save_json_test : str Path where the test data specification file will be saved. + sample_rate : int + The sample rate to be used for the dataset split_ratio : list List composed of three integers that sets split ratios for train, valid, and test sets, respectively. For instance split_ratio=[80, 10, 10] will assign 80% of the sentences to training, 10% for validation, and 10% for test. - sample_rate : int - The sample rate to be used for the dataset libritts_subsets: list - List of librispeech subsets to use (e.g., dev-clean, train-clean-100, ...). - Example + List of librispeech subsets to use (e.g., dev-clean, train-clean-100, ...) for the experiment. + This parameter will be ignored if explicit data splits are provided. + Explicit data splits parameters: "train_split", "valid_split", "test_split" + train_split : list + List of librispeech subsets to use (e.g.,train-clean-100, train-clean-360) for the experiment training stage. + valid_split : list + List of librispeech subsets to use (e.g., dev-clean) for the experiment validation stage. + test_split : list + List of librispeech subsets to use (e.g., test-clean) for the experiment testing stage. + seed : int + Seed value + model_name : str + Model name (used to prepare additional model specific data) + skip_prep: Bool + If True, skip preparation. + + Returns ------- - >>> data_folder = '/path/to/LibriTTS' - >>> prepare_libritts(data_folder, 'train.json', 'valid.json', 'test.json', 2050) + None """ + if skip_prep: + return + + # Setting the seed value + random.seed(seed) + # Checks if this phase is already done (if so, skips it) if skip(save_json_train, save_json_valid, save_json_test): logger.info("Preparation completed in previous run, skipping.") return + logger.info( + f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}" + ) + + # If specific splits are provided, creates data manifest files accordingly + if train_split: + wav_list = prepare_split(data_folder, train_split) + create_json(wav_list, save_json_train, sample_rate, model_name) + if valid_split: + wav_list = prepare_split(data_folder, valid_split) + # TODO add better way to speedup evaluation + wav_list = random.sample(wav_list, 500) + create_json(wav_list, save_json_valid, sample_rate, model_name) + if test_split: + wav_list = prepare_split(data_folder, test_split) + create_json(wav_list, save_json_test, sample_rate, model_name) + + if skip(save_json_train, save_json_valid, save_json_test): + logger.info("Preparation completed.") + return + + # If specific splits are not provided, and a list of subsets if provided, creates train, valid, test splits + # Creates data manifest files according to the data splits + if libritts_subsets: + wav_list = prepare_split(data_folder, libritts_subsets) + # Random split the signal list into train, valid, and test sets. + data_split = split_sets(wav_list, split_ratio) + # Creating json files + create_json( + data_split["train"], save_json_train, sample_rate, model_name + ) + create_json( + data_split["valid"], save_json_valid, sample_rate, model_name + ) + create_json(data_split["test"], save_json_test, sample_rate, model_name) + + +def prepare_split(data_folder, split_list): + """ + Processes the provided list of LibriTTS subsets and creates a list of all the .wav files present in the subsets. + Downloads the LibriTTS subsets as required. + + Arguments + --------- + data_folder : str + Path to the folder where the LibriTTS dataset is stored + split_list : list + List of librispeech subsets to process (e.g., dev-clean, train-clean-100, ...) + + Returns + ------- + wav_list : list + List of all .wav files to be processed + """ extension = [".wav"] # The expected extension for audio files wav_list = list() # Stores all audio file paths for the dataset - # For every subset of the dataset, if it doesn't exist, downloads it and sets flag to resample the subset - for subset_name in libritts_subsets: - + # For every subset of the dataset, if it doesn't exist, downloads it + for subset_name in split_list: subset_folder = os.path.join(data_folder, subset_name) subset_archive = os.path.join(subset_folder, subset_name + ".tar.gz") - subset_data = os.path.join(subset_folder, "LibriTTS") - if not check_folders(subset_data): + if not check_folders(subset_folder): logger.info( f"No data found for {subset_name}. Checking for an archive file." ) @@ -71,32 +164,14 @@ def prepare_libritts( logger.info( f"No archive file found for {subset_name}. Downloading and unpacking." ) - subset_url = LIBRITTS_URL_PREFIX + subset_name + ".tar.gz" - download_file(subset_url, subset_archive) - logger.info(f"Downloaded data for subset {subset_name}.") - else: - logger.info( - f"Found an archive file for {subset_name}. Unpacking." - ) - - shutil.unpack_archive(subset_archive, subset_folder) - + quit() # Collects all files matching the provided extension wav_list.extend(get_all_files(subset_folder, match_and=extension)) - logger.info( - f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}" - ) - - # Random split the signal list into train, valid, and test sets. - data_split = split_sets(wav_list, split_ratio) - # Creating json files - create_json(data_split["train"], save_json_train, sample_rate) - create_json(data_split["valid"], save_json_valid, sample_rate) - create_json(data_split["test"], save_json_test, sample_rate) + return wav_list -def create_json(wav_list, json_file, sample_rate): +def create_json(wav_list, json_file, sample_rate, model_name=None): """ Creates the json file given a list of wav files. Arguments @@ -107,55 +182,80 @@ def create_json(wav_list, json_file, sample_rate): The path of the output json file sample_rate : int The sample rate to be used for the dataset + model_name : str + Model name (used to prepare additional model specific data) """ + # Downloads and initializes the G2P model to compute the phonemes if data is being prepared for Tacotron2 experiments + if model_name == "Tacotron2": + logger.info( + "Computing phonemes for labels using SpeechBrain G2P. This may take a while." + ) + g2p = GraphemeToPhoneme.from_hparams( + "speechbrain/soundchoice-g2p", run_opts={"device": DEVICE} + ) + json_dict = {} - # Creates a resampler object with orig_freq set to LibriTTS sample rate (24KHz) and new_freq set to SAMPLERATE - resampler = Resample(orig_freq=24000, new_freq=sample_rate) # Processes all the wav files in the list - for wav_file in wav_list: - + for wav_file in tqdm(wav_list): # Reads the signal - signal, sig_sr = torchaudio.load(wav_file) - signal = signal.squeeze(0) + signal, sig_sr = audio_io.load(wav_file) + duration = signal.shape[1] / sig_sr + + # TODO add better way to filter short utterances + if duration < 1.0: + continue # Manipulates path to get relative path and uttid path_parts = wav_file.split(os.path.sep) uttid, _ = os.path.splitext(path_parts[-1]) - relative_path = os.path.join("{data_root}", *path_parts[-6:]) + # relative_path = os.path.join("{data_root}", *path_parts[-4:]) - # Gets the path for the text files and extracts the input text - original_text_path = os.path.join( - "/", *path_parts[:-1], uttid + ".original.txt" + # Gets the path for the text files and extracts the input text + normalized_text_path = os.path.join( + "/", *path_parts[:-1], uttid + ".normalized.txt" ) - with open(original_text_path) as f: - original_text = f.read() - if original_text.__contains__("{"): - original_text = original_text.replace("{", "") - if original_text.__contains__("}"): - original_text = original_text.replace("}", "") + try: + with open(normalized_text_path, encoding="utf-8") as f: + normalized_text = f.read() + if normalized_text.__contains__("{"): + normalized_text = normalized_text.replace("{", "") + if normalized_text.__contains__("}"): + normalized_text = normalized_text.replace("}", "") + except FileNotFoundError: + print(f"Warning: The file {normalized_text_path} does not exist.") + continue # Resamples the audio file if required if sig_sr != sample_rate: - signal = signal.unsqueeze(0) - resampled_signal = resampler(signal) + resampled_signal = torchaudio.functional.resample( + signal, sig_sr, sample_rate + ) os.unlink(wav_file) - torchaudio.save(wav_file, resampled_signal, sample_rate=sample_rate) + audio_io.save(wav_file, resampled_signal, sample_rate=sample_rate) # Gets the speaker-id from the utterance-id spk_id = uttid.split("_")[0] # Creates an entry for the utterance json_dict[uttid] = { - "wav": relative_path, + "uttid": uttid, + "wav": wav_file, + "duration": duration, "spk_id": spk_id, - "label": original_text, + "label": normalized_text, "segment": True if "train" in json_file else False, } + # Characters are used for Tacotron2, phonemes may be needed for other models + if model_name not in ["Tacotron2", "HiFi-GAN"]: + # Computes phoneme labels using SpeechBrain G2P and keeps the punctuations + phonemes = _g2p_keep_punctuations(g2p, normalized_text) + json_dict[uttid].update({"label_phoneme": phonemes}) + # Writes the dictionary to the json file - with open(json_file, mode="w") as json_f: + with open(json_file, mode="w", encoding="utf-8") as json_f: json.dump(json_dict, json_f, indent=2) logger.info(f"{json_file} successfully created!") @@ -165,6 +265,12 @@ def skip(*filenames): """ Detects if the data preparation has been already done. If the preparation has been done, we can skip it. + + Arguments + --------- + *filenames : tuple + Set of filenames to check for existence. + Returns ------- bool @@ -189,8 +295,9 @@ def split_sets(wav_list, split_ratio): and test sets, respectively. For instance split_ratio=[80, 10, 10] will assign 80% of the sentences to training, 10% for validation, and 10% for test. + Returns - ------ + ------- dictionary containing train, valid, and test splits. """ # Random shuffles the list @@ -215,9 +322,3 @@ def check_folders(*folders): if not os.path.exists(folder): return False return True - - -if __name__ == "__main__": - prepare_libritts( - "libritts_data", "train.json", "valid.json", "test.json", 16000 - ) diff --git a/recipes/LJSpeech/TTS/vocoder/hifi_gan/extra-dependencies.txt b/recipes/LibriTTS/vocoder/hifigan/extra_requirements.txt similarity index 100% rename from recipes/LJSpeech/TTS/vocoder/hifi_gan/extra-dependencies.txt rename to recipes/LibriTTS/vocoder/hifigan/extra_requirements.txt diff --git a/recipes/LibriTTS/vocoder/hifigan/hparams/train.yaml b/recipes/LibriTTS/vocoder/hifigan/hparams/train.yaml index a08bc71b81..73b9cb4c83 100644 --- a/recipes/LibriTTS/vocoder/hifigan/hparams/train.yaml +++ b/recipes/LibriTTS/vocoder/hifigan/hparams/train.yaml @@ -2,14 +2,14 @@ # Experiment Parameters and setup # ################################### seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref ./results/hifi_gan/ save_folder: !ref /save train_log: !ref /train_log.txt progress_sample_path: !ref /samples epochs: 100 keep_checkpoint_interval: 50 -use_tensorboard: True +use_tensorboard: False ################################# # Data files and pre-processing # @@ -21,6 +21,7 @@ libritts_subsets: ["train-clean-100", "train-clean-360"] train_json: !ref /train.json valid_json: !ref /valid.json test_json: !ref /test.json +skip_prep: False splits: ["train", "valid"] split_ratio: [90, 10] @@ -36,7 +37,7 @@ n_mel_channels: 80 n_fft: 1024 mel_fmin: 0.0 mel_fmax: 8000 -mel_normalized: null +mel_normalized: False power: 1 norm: "slaney" mel_scale: "slaney" diff --git a/recipes/LibriTTS/vocoder/hifigan/libritts_prepare.py b/recipes/LibriTTS/vocoder/hifigan/libritts_prepare.py new file mode 120000 index 0000000000..489ab40118 --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan/libritts_prepare.py @@ -0,0 +1 @@ +../../libritts_prepare.py \ No newline at end of file diff --git a/recipes/LibriTTS/vocoder/hifigan/train.py b/recipes/LibriTTS/vocoder/hifigan/train.py index 24d56e0c83..fc8d7734ac 100644 --- a/recipes/LibriTTS/vocoder/hifigan/train.py +++ b/recipes/LibriTTS/vocoder/hifigan/train.py @@ -11,13 +11,15 @@ * Pradnya Kandarkar 2022 """ +import os import sys + import torch from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import scalarize -import torchaudio -import os class HifiGanBrain(sb.Brain): @@ -33,12 +35,19 @@ def compute_forward(self, batch, stage): stage: speechbrain.Stage the training stage + Returns + ------- + y_g_hat : torch.Tensor + scores_fake : torch.Tensor + feats_fake : torch.Tensor + scores_real : torch.Tensor + feats_real : torch.Tensor """ batch = batch.to(self.device) x, _ = batch.mel y, _ = batch.sig - # generate sythesized waveforms + # generate synthesized waveforms y_g_hat = self.modules.generator(x)[:, :, : y.size(2)] # get scores and features from discriminator for real and synthesized waveforms @@ -48,14 +57,13 @@ def compute_forward(self, batch, stage): return (y_g_hat, scores_fake, feats_fake, scores_real, feats_real) def compute_objectives(self, predictions, batch, stage): - """Computes and combines generator and discriminator losses - """ + """Computes and combines generator and discriminator losses""" batch = batch.to(self.device) x, _ = batch.mel y, _ = batch.sig # Hold on to the batch for the inference sample. This is needed because - # the infernece sample is run from on_stage_end only, where + # the inference sample is run from on_stage_end only, where # batch information is not available self.last_batch = (x, y) @@ -64,7 +72,7 @@ def compute_objectives(self, predictions, batch, stage): y_hat, scores_fake, feats_fake, scores_real, feats_real = predictions loss_g = self.hparams.generator_loss( - y_hat, y, scores_fake, feats_fake, feats_real + stage, y_hat, y, scores_fake, feats_fake, feats_real ) loss_d = self.hparams.discriminator_loss(scores_fake, scores_real) loss = {**loss_g, **loss_d} @@ -72,8 +80,7 @@ def compute_objectives(self, predictions, batch, stage): return loss def fit_batch(self, batch): - """Train discriminator and generator adversarially - """ + """Train discriminator and generator adversarially""" batch = batch.to(self.device) y, _ = batch.sig @@ -104,8 +111,7 @@ def fit_batch(self, batch): return loss_g.detach().cpu() def evaluate_batch(self, batch, stage): - """Evaluate one batch - """ + """Evaluate one batch""" out = self.compute_forward(batch, stage=stage) loss = self.compute_objectives(out, batch, stage=stage) loss_g = loss["G_loss"] @@ -153,6 +159,11 @@ def init_optimizers(self): "scheduler_d", self.scheduler_d ) + self.optimizers_dict = { + "optimizer_g": self.optimizer_g, + "optimizer_d": self.optimizer_d, + } + def _remember_sample(self, batch, predictions): """Remembers samples of spectrograms and the batch for logging purposes @@ -167,8 +178,7 @@ def _remember_sample(self, batch, predictions): y_hat, scores_fake, feats_fake, scores_real, feats_real = predictions def on_stage_end(self, stage, stage_loss, epoch): - """Gets called at the end of a stage (TRAIN, VALID, Or TEST) - """ + """Gets called at the end of a stage (TRAIN, VALID, Or TEST)""" if stage == sb.Stage.VALID: # Update learning rate self.scheduler_g.step() @@ -199,19 +209,21 @@ def on_stage_end(self, stage, stage_loss, epoch): end_of_epoch=True, min_keys=["loss"], ckpt_predicate=( - lambda ckpt: ( - ckpt.meta["epoch"] - % self.hparams.keep_checkpoint_interval - != 0 + ( + lambda ckpt: ( + ckpt.meta["epoch"] + % self.hparams.keep_checkpoint_interval + != 0 + ) ) - ) - if self.hparams.keep_checkpoint_interval is not None - else None, + if self.hparams.keep_checkpoint_interval is not None + else None + ), ) self.run_inference_sample("Valid") - # We also write statistics about test data to stdout and to the TensorboardLogger. + # We also write statistics about test data to stdout and to the torch.TensorboardLogger. if stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( # 1#2# {"Epoch loaded": self.hparams.epoch_counter.current}, @@ -247,9 +259,7 @@ def run_inference_sample(self, name): inference_padding=self.hparams.inference_padding, cond_channels=self.hparams.cond_channels, conv_post_bias=self.hparams.conv_post_bias, - ).to( - self.device - ) # Gets a new instance + ).to(self.device) # Gets a new instance inference_generator.load_state_dict( self.hparams.generator.state_dict() ) # Copies weights @@ -299,9 +309,7 @@ def save_audio(self, name, data, epoch): os.makedirs(target_path) file_name = f"{name}.wav" effective_file_name = os.path.join(target_path, file_name) - torchaudio.save( - effective_file_name, data.cpu(), self.hparams.sample_rate - ) + audio_io.save(effective_file_name, data.cpu(), self.hparams.sample_rate) def dataio_prepare(hparams): @@ -349,11 +357,10 @@ def audio_pipeline(wav, segment): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -363,21 +370,22 @@ def audio_pipeline(wav, segment): overrides=overrides, ) - sys.path.append("../../") - from libritts_prepare import prepare_libritts - - sb.utils.distributed.run_on_main( - prepare_libritts, - kwargs={ - "data_folder": hparams["data_folder"], - "save_json_train": hparams["train_json"], - "save_json_valid": hparams["valid_json"], - "save_json_test": hparams["test_json"], - "sample_rate": hparams["sample_rate"], - "split_ratio": hparams["split_ratio"], - "libritts_subsets": hparams["libritts_subsets"], - }, - ) + if not hparams["skip_prep"]: + from libritts_prepare import prepare_libritts + + sb.utils.distributed.run_on_main( + prepare_libritts, + kwargs={ + "data_folder": hparams["data_folder"], + "save_json_train": hparams["train_json"], + "save_json_valid": hparams["valid_json"], + "save_json_test": hparams["test_json"], + "sample_rate": hparams["sample_rate"], + "split_ratio": hparams["split_ratio"], + "libritts_subsets": hparams["libritts_subsets"], + "model_name": "HiFi-GAN", + }, + ) datasets = dataio_prepare(hparams) @@ -396,8 +404,10 @@ def audio_pipeline(wav, segment): ) if hparams["use_tensorboard"]: - hifi_gan_brain.tensorboard_logger = sb.utils.train_logger.TensorboardLogger( - save_dir=hparams["output_folder"] + "/tensorboard" + hifi_gan_brain.tensorboard_logger = ( + sb.utils.train_logger.TensorboardLogger( + save_dir=hparams["output_folder"] + "/tensorboard" + ) ) # Training diff --git a/recipes/LibriTTS/vocoder/hifigan_discrete/extra_requirements.txt b/recipes/LibriTTS/vocoder/hifigan_discrete/extra_requirements.txt new file mode 100644 index 0000000000..09489d9186 --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan_discrete/extra_requirements.txt @@ -0,0 +1,3 @@ +# Needed for quantization +scikit-learn + diff --git a/recipes/LibriTTS/vocoder/hifigan_discrete/extract_code.py b/recipes/LibriTTS/vocoder/hifigan_discrete/extract_code.py new file mode 100644 index 0000000000..b250e33c64 --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan_discrete/extract_code.py @@ -0,0 +1,258 @@ +""" +Apply K-means clustering over acoustic features to extract speech units for HiFi-GAN training. + +Authors + * Jarod Duret 2023 +""" + +import json +import logging +import pathlib as pl + +import numpy as np +import torch +import torchaudio +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.integrations.huggingface import hubert, wav2vec2, wavlm +from speechbrain.integrations.huggingface.discrete_ssl import DiscreteSSL +from speechbrain.utils.logger import get_logger + +OPT_FILE = "opt_libritts_extract_code.pkl" +TRAIN_JSON = "train.json" +VALID_JSON = "valid.json" +TEST_JSON = "test.json" + +ENCODER_CLASSES = { + "HuBERT": hubert.HuBERT, + "Wav2Vec2": wav2vec2.Wav2Vec2, + "WavLM": wavlm.WavLM, +} + + +def setup_logger(): + """Set up a logger with a log format and logging level.""" + log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" + logging.basicConfig(format=log_format, level=logging.INFO) + logger = get_logger(__name__) + return logger + + +def get_device(use_cuda): + """Determine and return the appropriate device for computation.""" + use_cuda = use_cuda and torch.cuda.is_available() + print("\n" + "=" * 30) + print(f"USE_CUDA SET TO: {use_cuda}") + print(f"CUDA AVAILABLE?: {torch.cuda.is_available()}") + print("=" * 30 + "\n") + return torch.device("cuda" if use_cuda else "cpu") + + +def np_array(tensor): + """Convert a Pytorch tensor to a Numpy array.""" + tensor = tensor.squeeze(0) + tensor = tensor.detach().cpu() + return tensor.numpy() + + +def skip(splits, save_folder, conf): + """ + Detects if the ljspeech data_extraction has been already done. + If the extraction has been done, we can skip it. + + Arguments + --------- + splits : list + List of splits to check for existence. + save_folder : str + Folder containing prepared data. + conf : dict + The loaded configuration options. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + # Checking json files + skip = True + + split_files = { + "train": TRAIN_JSON, + "valid": VALID_JSON, + "test": TEST_JSON, + } + + for split in splits: + if not (save_folder / split_files[split]).exists(): + skip = False + + # Checking saved options + save_opt = save_folder / OPT_FILE + if skip is True: + if save_opt.is_file(): + opts_old = load_pkl(save_opt.as_posix()) + if opts_old == conf: + skip = True + else: + skip = False + else: + skip = False + return skip + + +def extract_libritts( + data_folder, + splits, + kmeans_folder, + kmeans_dataset, + num_clusters, + encoder_type, + encoder_source, + layer, + save_folder, + sample_rate=16000, + skip_extract=False, +): + """ + Extract speech units for HiFi-GAN training on the LibriTTS datasets. + + Arguments + --------- + data_folder : str + Path to the folder where the original LibriTTS dataset is stored. + splits : list + List of splits to prepare. + kmeans_folder: str + Huggingface repository if that contains the pretrained kmean model. + kmeans_dataset : str + Name of the dataset that Kmeans model on HF repo is trained with. + num_clusters: (int) + determine the number of clusters of the targeted kmeans models to be downloaded. + encoder_type: str + Name of the model used as feature extractor. + encoder_source: str + Url to the model used as feature extractor. + layer: List[int] (default: [7]): + Determine which layers of SSL should be used to extract information. + save_folder: str + Path to the folder where the speech units are stored. + sample_rate: int + LibriTTS dataset sample rate + skip_extract: Bool + If True, skip extraction. + + Returns + ------- + None + + Example + ------- + >>> from recipes.LibriTTS.TTS.vocoder.hifigan_unit.extract_code import ( + ... extract_libritts, + ... ) + >>> data_folder = "data/LibriTTS/" + >>> splits = ["train", "valid"] + >>> kmeans_folder = "speechbrain/SSL_Quantization" + >>> kmeans_dataset = LibriSpeech - 100 - 360 - 500 + >>> encoder_type = "HuBERT" + >>> encoder_source = facebook / hubert - large - ll60k + >>> layer = [7] + >>> save_folder = "save/" + >>> extract_libritts( + ... data_folder, + ... splits, + ... kmeans_folder, + ... kmeans_filename, + ... encoder_type, + ... encoder_source, + ... layer, + ... save_folder, + ... ) + """ + logger = setup_logger() + + if skip_extract: + return + # Create configuration for easily skipping code extraction stage + conf = { + "data_folder": data_folder, + "splits": splits, + "save_folder": save_folder, + "kmeans_folder": kmeans_folder, + "encoder_type": encoder_type, + "encoder_source": encoder_source, + "layer": layer, + } + + save_folder = pl.Path(save_folder) + # Check if this phase is already done (if so, skip it) + if skip(splits, save_folder, conf): + logger.info("Skipping code extraction, completed in previous run.") + return + + # Fetch device + device = get_device(use_cuda=True) + + save_opt = save_folder / OPT_FILE + data_folder = pl.Path(data_folder) + save_path = save_folder / "savedir" + code_folder = save_folder / "codes" + code_folder.mkdir(parents=True, exist_ok=True) + + logger.info(f"Loading encoder: {encoder_source} ...") + if encoder_type not in ENCODER_CLASSES: + raise TypeError("Not a supported Encoder") + + encoder_class = ENCODER_CLASSES[encoder_type] + encoder = encoder_class( + source=encoder_source, + save_path=save_path.as_posix(), + output_norm=False, + freeze=True, + freeze_feature_extractor=True, + apply_spec_augment=False, + output_all_hiddens=True, + ).to(device) + + discrete_encoder = DiscreteSSL( + save_path=save_path.as_posix(), + ssl_model=encoder, + kmeans_dataset=kmeans_dataset, + kmeans_repo_id=kmeans_folder, + num_clusters=num_clusters, + # layers_num=layer, + ) + + for split in splits: + dataset_path = data_folder / f"{split}.json" + logger.info(f"Reading dataset from {dataset_path} ...") + meta_json = json.load(open(dataset_path, encoding="utf-8")) + for key in tqdm(meta_json.keys()): + item = meta_json[key] + wav = item["wav"] + with torch.no_grad(): + info = audio_io.info(wav) + audio = sb.dataio.dataio.read_audio(wav) + audio = torchaudio.transforms.Resample( + info.sample_rate, + sample_rate, + )(audio) + audio = audio.unsqueeze(0).to(device) + deduplicates = [False for _ in layer] + bpe_tokenizers = [None for _ in layer] + tokens, _, _ = discrete_encoder( + audio, + SSL_layers=layer, + deduplicates=deduplicates, + bpe_tokenizers=bpe_tokenizers, + ) + tokens = np_array(tokens.squeeze(0)) + np.save(code_folder / f"{key}.npy", tokens) + + logger.info("Extraction completed.") + save_pkl(conf, save_opt) diff --git a/recipes/LibriTTS/vocoder/hifigan_discrete/extract_speaker_embeddings.py b/recipes/LibriTTS/vocoder/hifigan_discrete/extract_speaker_embeddings.py new file mode 100644 index 0000000000..8d1b12632f --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan_discrete/extract_speaker_embeddings.py @@ -0,0 +1,203 @@ +""" +Apply speaker recognition model to extract speaker embeddings for HiFi-GAN training. + +Authors + * Jarod Duret 2023 +""" + +import json +import logging +import pathlib as pl + +import numpy as np +import torch +import torchaudio +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.inference.encoders import MelSpectrogramEncoder +from speechbrain.utils.logger import get_logger + +OPT_FILE = "opt_libritts_extract_speaker.pkl" +TRAIN_JSON = "train.json" +VALID_JSON = "valid.json" +TEST_JSON = "test.json" + + +def setup_logger(): + """Set up a logger with a log format and logging level.""" + log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" + logging.basicConfig(format=log_format, level=logging.INFO) + logger = get_logger(__name__) + return logger + + +def get_device(use_cuda): + """Determine and return the appropriate device for computation.""" + use_cuda = use_cuda and torch.cuda.is_available() + print("\n" + "=" * 30) + print(f"USE_CUDA SET TO: {use_cuda}") + print(f"CUDA AVAILABLE?: {torch.cuda.is_available()}") + print("=" * 30 + "\n") + return torch.device("cuda" if use_cuda else "cpu") + + +def np_array(tensor): + """Convert a Pytorch tensor to a Numpy array.""" + tensor = tensor.squeeze(0) + tensor = tensor.detach().cpu() + return tensor.numpy() + + +def skip(splits, save_folder, conf): + """ + Detects if the libritts data_extraction has been already done. + If the extraction has been done, we can skip it. + + Arguments + --------- + splits : list + List of splits to check. + save_folder : str + Path to the folder where the speech units are stored. + conf : dict + Loaded configuration options + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + # Checking json files + skip = True + + split_files = { + "train": TRAIN_JSON, + "valid": VALID_JSON, + "test": TEST_JSON, + } + + for split in splits: + if ( + split in split_files + and not (save_folder / split_files[split]).exists() + ): + skip = False + + # Checking saved options + save_opt = save_folder / OPT_FILE + if skip is True: + if save_opt.is_file(): + opts_old = load_pkl(save_opt.as_posix()) + if opts_old == conf: + skip = True + else: + skip = False + else: + skip = False + return skip + + +def extract_libritts_embeddings( + data_folder, + splits, + encoder_source, + save_folder, + sample_rate=16000, + skip_extract=False, +): + """ + Extract speaker embeddings for HiFi-GAN training on the LibriTTS datasets. + + Arguments + --------- + data_folder : str + Path to the folder where the original LibriTTS dataset is stored. + splits : list + List of splits to prepare. + encoder_source: str + Url to the model used as embedding extractor. + save_folder: str + Path to the folder where the speech units are stored. + sample_rate: int + LibriTTS dataset sample rate + skip_extract: Bool + If True, skip extraction. + + Returns + ------- + None + + Example + ------- + >>> from recipes.LibriTTS.vocoder.hifigan_unit.extract_speaker_embeddings import ( + ... extract_libritts_embeddings, + ... ) + >>> data_folder = "data/libritts/" + >>> splits = ["train", "valid"] + >>> encoder_source = facebook / hubert - base - ls960 + >>> save_folder = "save/" + >>> extract_libritts_embeddings( + ... data_folder, splits, encoder_source, save_folder + ... ) + """ + logger = setup_logger() + + if skip_extract: + return + # Create configuration for easily skipping extraction stage + conf = { + "data_folder": data_folder, + "splits": splits, + "save_folder": save_folder, + "encoder_source": encoder_source, + } + + save_folder = pl.Path(save_folder) + # Check if this phase is already done (if so, skip it) + if skip(splits, save_folder, conf): + logger.info( + "Skipping speaker embeddings extraction, completed in previous run." + ) + return + + # Fetch device + device = get_device(use_cuda=True) + + save_opt = save_folder / OPT_FILE + data_folder = pl.Path(data_folder) + save_path = save_folder / "savedir/melspec_encoder" + speaker_folder = save_folder / "speaker_embeddings" + speaker_folder.mkdir(parents=True, exist_ok=True) + + logger.info(f"Loading encoder: {encoder_source} ...") + encoder = MelSpectrogramEncoder.from_hparams( + source=encoder_source, + run_opts={"device": str(device)}, + savedir=save_path, + ) + + for split in splits: + dataset_path = save_folder / f"{split}.json" + logger.info(f"Reading dataset from {dataset_path} ...") + meta_json = json.load(open(dataset_path, encoding="utf-8")) + for key in tqdm(meta_json.keys()): + item = meta_json[key] + wav = item["wav"] + with torch.no_grad(): + info = audio_io.info(wav) + audio = sb.dataio.dataio.read_audio(wav) + audio = torchaudio.transforms.Resample( + info.sample_rate, + sample_rate, + )(audio) + audio = audio.to(device) + feats = encoder.encode_waveform(audio) + feats = np_array(feats.squeeze(0)) + np.save(speaker_folder / f"{key}.npy", feats) + + logger.info("Extraction completed.") + save_pkl(conf, save_opt) diff --git a/recipes/LibriTTS/vocoder/hifigan_discrete/hparams/train.yaml b/recipes/LibriTTS/vocoder/hifigan_discrete/hparams/train.yaml new file mode 100644 index 0000000000..b879bb261e --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan_discrete/hparams/train.yaml @@ -0,0 +1,238 @@ +############################################################################ +# Model: Unit HiFi-GAN +# Tokens: discrete speech units (K-means) +# Training: LibriTTS (English) +# Authors: Jarod Duret +# ############################################################################ + + +################################### +# Experiment Parameters and setup # +################################### +seed: 4321 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref ./results/hifigan_hubert/ +save_folder: !ref /save +train_log: !ref /train_log.txt +progress_sample_path: !ref /samples +epochs: 150 +keep_checkpoint_interval: 50 +use_tensorboard: False + +################################# +# Data files and pre-processing # +################################# +data_folder: !PLACEHOLDER # e.g, /datasets/LibriTTS +# Data will be automatically downloaded in the data folder +# (and resampled to the specified sampling rate) +train_split: [train-clean-100] +valid_split: [dev-clean] +test_split: [test-clean] +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json +libritts_subsets: null +splits: ["train", "valid", "test"] +split_ratio: null +skip_prep: False + +######################################################## +# Encoder | HF model # +#------------------------------------------------------# +# HuBERT | facebook/hubert-large-ll60k # +# Wav2Vec2 | facebook/wav2vec2-large-960h-lv60-self # +# WavLM | microsoft/wavlm-large # +######################################################## +kmeans_folder: speechbrain/SSL_Quantization +kmeans_dataset: LibriSpeech-100-360-500 +num_clusters: 1000 +codes_folder: !ref /codes +encoder_type: HuBERT +encoder_hub: facebook/hubert-large-ll60k +layer: [1, 3, 7, 12, 18, 23] +skip_extract: False + +################################ +# Audio Parameters # +################################ +segment_size: 8960 +code_hop_size: 320 +sample_rate: 16000 +layer_drop: True + +hop_length: 256 +win_length: 1024 +n_mel_channels: 80 +n_fft: 1024 +mel_fmin: 0.0 +mel_fmax: 8000 +mel_normalized: False +power: 1 +norm: "slaney" +mel_scale: "slaney" +dynamic_range_compression: True + +################################ +# Optimization Hyperparameters # +################################ +learning_rate: 0.0002 +weight_decay: 0.9999 +adam_b1: 0.8 +adam_b2: 0.99 +batch_size: 32 + +train_dataloader_opts: + batch_size: !ref + drop_last: False + num_workers: 8 + +valid_dataloader_opts: + batch_size: 1 + num_workers: 8 + +test_dataloader_opts: + batch_size: 1 + num_workers: 8 + +################################ +# Model Parameters and model # +################################ +duration_predictor: False +multi_speaker: False + +# embedding params +vocab_size: 6001 # K-means size + 1 for padding 1000x6 +embedding_dim: 1024 + +# generator params +in_channels: 1024 +out_channels: 1 + +var_pred_hidden_dim: 128 +var_pred_kernel_size: 3 +var_pred_dropout: 0.5 + +########################################################################################################################################################### +# version | resblock_type | upsample_kernel_sizes | upsample_factors | resblock_kernel_sizes | upsample_initial_channel | resblock_dilation_sizes +# 1 | "1" | [16,16,4,4] | [8, 8, 2, 2] | [3, 7, 11] | 512 | [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +# 2 | "1" | [16,16,4,4] | [8, 8, 2, 2] | [3, 7, 11] | 128 | [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +# 3 | "2" | [16,16,8] | [8,8,4] | [3,5,7] | 256 | [[1,2], [2,6], [3,12]] +########################################################################################################################################################### +resblock_type: "1" +resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +resblock_kernel_sizes: [3, 7, 11] +upsample_kernel_sizes: [11, 8, 8, 4, 4] +upsample_initial_channel: 512 +upsample_factors: [5, 4, 4, 2, 2] + +inference_padding: 5 +cond_channels: 0 +conv_post_bias: True + +mel_spectogram: !name:speechbrain.lobes.models.HifiGAN.mel_spectogram + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_fft: !ref + n_mels: !ref + f_min: !ref + f_max: !ref + power: !ref + normalized: !ref + norm: !ref + mel_scale: !ref + compression: !ref + +generator: !new:speechbrain.lobes.models.HifiGAN.UnitHifiganGenerator + in_channels: !ref + out_channels: !ref + resblock_type: !ref + resblock_dilation_sizes: !ref + resblock_kernel_sizes: !ref + upsample_kernel_sizes: !ref + upsample_initial_channel: !ref + upsample_factors: !ref + inference_padding: !ref + cond_channels: !ref + conv_post_bias: !ref + vocab_size: !ref + embedding_dim: !ref + duration_predictor: !ref + var_pred_hidden_dim: !ref + var_pred_kernel_size: !ref + var_pred_dropout: !ref + multi_speaker: !ref + +discriminator: !new:speechbrain.lobes.models.HifiGAN.HifiganDiscriminator + +modules: + generator: !ref + discriminator: !ref + +#generator loss +stft_loss: null +mseg_loss: !new:speechbrain.lobes.models.HifiGAN.MSEGLoss +feat_match_loss: !new:speechbrain.lobes.models.HifiGAN.MelganFeatureLoss +l1_spec_loss: !new:speechbrain.lobes.models.HifiGAN.L1SpecLoss + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_mel_channels: !ref + n_fft: !ref + n_stft: !ref // 2 + 1 + mel_fmin: !ref + mel_fmax: null + mel_normalized: !ref + power: !ref + dynamic_range_compression: !ref +mseg_dur_loss: False + +generator_loss: !new:speechbrain.lobes.models.HifiGAN.GeneratorLoss + stft_loss: !ref + stft_loss_weight: 0 + mseg_loss: !ref + mseg_loss_weight: 1 + feat_match_loss: !ref + feat_match_loss_weight: 10 + l1_spec_loss: !ref + l1_spec_loss_weight: 45 + mseg_dur_loss: !ref + mseg_dur_loss_weight: 1 + +#discriminator loss +msed_loss: !new:speechbrain.lobes.models.HifiGAN.MSEDLoss + +discriminator_loss: !new:speechbrain.lobes.models.HifiGAN.DiscriminatorLoss + msed_loss: !ref + +#optimizer +opt_class_generator: !name:torch.optim.AdamW + lr: !ref + betas: [!ref , !ref ] + +opt_class_discriminator: !name:torch.optim.AdamW + lr: !ref + betas: [!ref , !ref ] + +sch_class_generator: !name:torch.optim.lr_scheduler.ExponentialLR + gamma: !ref + last_epoch: -1 + +sch_class_discriminator: !name:torch.optim.lr_scheduler.ExponentialLR + gamma: !ref + last_epoch: -1 + +#epoch object +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +#checkpointer +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + generator: !ref + discriminator: !ref + counter: !ref diff --git a/recipes/LibriTTS/vocoder/hifigan_discrete/hparams/train_spk.yaml b/recipes/LibriTTS/vocoder/hifigan_discrete/hparams/train_spk.yaml new file mode 100644 index 0000000000..ed223caf72 --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan_discrete/hparams/train_spk.yaml @@ -0,0 +1,243 @@ +############################################################################ +# Model: Unit HiFi-GAN +# Tokens: discrete speech units (K-means) +# Training: LibriTTS (English) +# Authors: Jarod Duret +# ############################################################################ + + +################################### +# Experiment Parameters and setup # +################################### +seed: 4321 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref ./results/hifigan_spk/ +save_folder: !ref /save +train_log: !ref /train_log.txt +progress_sample_path: !ref /samples +epochs: 150 +keep_checkpoint_interval: 50 +use_tensorboard: False + +################################# +# Data files and pre-processing # +################################# +data_folder: !PLACEHOLDER # e.g, /datasets/LibriTTS +# Data will be automatically downloaded in the data folder +# (and resampled to the specified sampling rate) +train_split: [train-clean-100] +valid_split: [dev-clean] +test_split: [test-clean] +train_json: !ref /train.json +valid_json: !ref /valid.json +test_json: !ref /test.json +libritts_subsets: null +splits: ["train", "valid", "test"] +split_ratio: null +skip_prep: False + +######################################################## +# Encoder | HF model # +#------------------------------------------------------# +# HuBERT | facebook/hubert-large-ll60k # +# Wav2Vec2 | facebook/wav2vec2-large-960h-lv60-self # +# WavLM | microsoft/wavlm-large # +######################################################## +kmeans_folder: speechbrain/SSL_Quantization +kmeans_dataset: LibriSpeech-100-360-500 +num_clusters: 1000 +codes_folder: !ref /codes +encoder_type: HuBERT +encoder_hub: facebook/hubert-large-ll60k +layer: [1, 3, 7, 12, 18, 23] +skip_extract: False + +speaker_embeddings_folder: !ref /speaker_embeddings +speaker_encoder_hub: speechbrain/spkrec-ecapa-voxceleb-mel-spec + +################################ +# Audio Parameters # +################################ +segment_size: 8960 +code_hop_size: 320 +sample_rate: 16000 +layer_drop: True + +hop_length: 256 +win_length: 1024 +n_mel_channels: 80 +n_fft: 1024 +mel_fmin: 0.0 +mel_fmax: 8000 +mel_normalized: False +power: 1 +norm: "slaney" +mel_scale: "slaney" +dynamic_range_compression: True + +################################ +# Optimization Hyperparameters # +################################ +learning_rate: 0.0002 +weight_decay: 0.9999 +adam_b1: 0.8 +adam_b2: 0.99 +batch_size: 32 + +train_dataloader_opts: + batch_size: !ref + drop_last: False + num_workers: 8 + +valid_dataloader_opts: + batch_size: 1 + num_workers: 8 + +test_dataloader_opts: + batch_size: 1 + num_workers: 8 + +################################ +# Model Parameters and model # +################################ +duration_predictor: False +multi_speaker: True +normalize_speaker_embeddings: False + +# embedding params +vocab_size: 6001 # K-means size + 1 for padding 1000x6 +embedding_dim: 1024 + +# generator params +in_channels: 1216 +out_channels: 1 + +var_pred_hidden_dim: 128 +var_pred_kernel_size: 3 +var_pred_dropout: 0.5 + +########################################################################################################################################################### +# version | resblock_type | upsample_kernel_sizes | upsample_factors | resblock_kernel_sizes | upsample_initial_channel | resblock_dilation_sizes +# 1 | "1" | [16,16,4,4] | [8, 8, 2, 2] | [3, 7, 11] | 512 | [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +# 2 | "1" | [16,16,4,4] | [8, 8, 2, 2] | [3, 7, 11] | 128 | [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +# 3 | "2" | [16,16,8] | [8,8,4] | [3,5,7] | 256 | [[1,2], [2,6], [3,12]] +########################################################################################################################################################### +resblock_type: "1" +resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] +resblock_kernel_sizes: [3, 7, 11] +upsample_kernel_sizes: [11, 8, 8, 4, 4] +upsample_initial_channel: 512 +upsample_factors: [5, 4, 4, 2, 2] + +inference_padding: 5 +cond_channels: 0 +conv_post_bias: True + +mel_spectogram: !name:speechbrain.lobes.models.HifiGAN.mel_spectogram + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_fft: !ref + n_mels: !ref + f_min: !ref + f_max: !ref + power: !ref + normalized: !ref + norm: !ref + mel_scale: !ref + compression: !ref + +generator: !new:speechbrain.lobes.models.HifiGAN.UnitHifiganGenerator + in_channels: !ref + out_channels: !ref + resblock_type: !ref + resblock_dilation_sizes: !ref + resblock_kernel_sizes: !ref + upsample_kernel_sizes: !ref + upsample_initial_channel: !ref + upsample_factors: !ref + inference_padding: !ref + cond_channels: !ref + conv_post_bias: !ref + vocab_size: !ref + embedding_dim: !ref + duration_predictor: !ref + var_pred_hidden_dim: !ref + var_pred_kernel_size: !ref + var_pred_dropout: !ref + multi_speaker: !ref + normalize_speaker_embeddings: !ref + +discriminator: !new:speechbrain.lobes.models.HifiGAN.HifiganDiscriminator + +modules: + generator: !ref + discriminator: !ref + +#generator loss +stft_loss: null +mseg_loss: !new:speechbrain.lobes.models.HifiGAN.MSEGLoss +feat_match_loss: !new:speechbrain.lobes.models.HifiGAN.MelganFeatureLoss +l1_spec_loss: !new:speechbrain.lobes.models.HifiGAN.L1SpecLoss + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_mel_channels: !ref + n_fft: !ref + n_stft: !ref // 2 + 1 + mel_fmin: !ref + mel_fmax: null + mel_normalized: !ref + power: !ref + dynamic_range_compression: !ref +mseg_dur_loss: False + +generator_loss: !new:speechbrain.lobes.models.HifiGAN.GeneratorLoss + stft_loss: !ref + stft_loss_weight: 0 + mseg_loss: !ref + mseg_loss_weight: 1 + feat_match_loss: !ref + feat_match_loss_weight: 10 + l1_spec_loss: !ref + l1_spec_loss_weight: 45 + mseg_dur_loss: !ref + mseg_dur_loss_weight: 1 + +#discriminator loss +msed_loss: !new:speechbrain.lobes.models.HifiGAN.MSEDLoss + +discriminator_loss: !new:speechbrain.lobes.models.HifiGAN.DiscriminatorLoss + msed_loss: !ref + +#optimizer +opt_class_generator: !name:torch.optim.AdamW + lr: !ref + betas: [!ref , !ref ] + +opt_class_discriminator: !name:torch.optim.AdamW + lr: !ref + betas: [!ref , !ref ] + +sch_class_generator: !name:torch.optim.lr_scheduler.ExponentialLR + gamma: !ref + last_epoch: -1 + +sch_class_discriminator: !name:torch.optim.lr_scheduler.ExponentialLR + gamma: !ref + last_epoch: -1 + +#epoch object +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +#checkpointer +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + generator: !ref + discriminator: !ref + counter: !ref diff --git a/recipes/LibriTTS/vocoder/hifigan_discrete/libritts_prepare.py b/recipes/LibriTTS/vocoder/hifigan_discrete/libritts_prepare.py new file mode 120000 index 0000000000..489ab40118 --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan_discrete/libritts_prepare.py @@ -0,0 +1 @@ +../../libritts_prepare.py \ No newline at end of file diff --git a/recipes/LibriTTS/vocoder/hifigan_discrete/train.py b/recipes/LibriTTS/vocoder/hifigan_discrete/train.py new file mode 100644 index 0000000000..bbe2e1da7d --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan_discrete/train.py @@ -0,0 +1,570 @@ +#!/usr/bin/env python3 +"""Recipe for training a hifi-gan vocoder on self-supervised representations. +For more details about hifi-gan: https://arxiv.org/pdf/2010.05646.pdf +For more details about speech synthesis using self-supervised representations: https://arxiv.org/pdf/2104.00355.pdf + +To run this recipe, do the following: +> python train.py hparams/train.yaml --data_folder=/path/to/LibriTTS + +Authors + * Jarod Duret 2023 + * Yingzhi WANG 2022 +""" + +import copy +import pathlib as pl +import random +import sys + +import numpy as np +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.data_utils import scalarize + + +class HifiGanBrain(sb.Brain): + def compute_forward(self, batch, stage): + """The forward function, generates synthesized waveforms, + calculates the scores and the features of the discriminator + for synthesized waveforms and real waveforms. + + Arguments + --------- + batch : torch.Tensor or tensors + An element from the dataloader, including inputs for processing. + stage : Stage + The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST + + Returns + ------- + Generator and Discriminator outputs. + """ + batch = batch.to(self.device) + + x, _ = batch.code + y, _ = batch.sig + + # generate synthesized waveforms + y_g_hat, (log_dur_pred, log_dur) = self.modules.generator(x) + y_g_hat = y_g_hat[:, :, : y.size(2)] + + # get scores and features from discriminator for real and synthesized waveforms + scores_fake, feats_fake = self.modules.discriminator(y_g_hat.detach()) + scores_real, feats_real = self.modules.discriminator(y) + + return ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs. + Arguments + --------- + predictions : torch.Tensor + The model generated spectrograms and other metrics from `compute_forward`. + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient. + """ + batch = batch.to(self.device) + + x, _ = batch.code + y, _ = batch.sig + + # Hold on to the batch for the inference sample. This is needed because + # the infernece sample is run from on_stage_end only, where + # batch information is not available + self.last_batch = (x, y) + + ( + y_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) = predictions + + loss_g = self.hparams.generator_loss( + stage, + y_hat, + y, + scores_fake, + feats_fake, + feats_real, + log_dur_pred, + log_dur, + ) + + loss_d = self.hparams.discriminator_loss(scores_fake, scores_real) + loss = {**loss_g, **loss_d} + self.last_loss_stats[stage] = scalarize(loss) + + return loss + + def fit_batch(self, batch): + """Fits a single batch. + Arguments + --------- + batch: tuple + a training batch + Returns + ------- + loss: torch.Tensor + detached loss + """ + batch = batch.to(self.device) + y, _ = batch.sig + + outputs = self.compute_forward(batch, sb.core.Stage.TRAIN) + ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) = outputs + # calculate discriminator loss with the latest updated generator + loss_d = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[ + "D_loss" + ] + # First train the discriminator + self.optimizer_d.zero_grad() + loss_d.backward() + self.optimizer_d.step() + + # calculate generator loss with the latest updated discriminator + scores_fake, feats_fake = self.modules.discriminator(y_g_hat) + scores_real, feats_real = self.modules.discriminator(y) + outputs = ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) + loss_g = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[ + "G_loss" + ] + # Then train the generator + self.optimizer_g.zero_grad() + loss_g.backward() + self.optimizer_g.step() + + return loss_g.detach().cpu() + + def evaluate_batch(self, batch, stage): + """Evaluate one batch. + + Arguments + --------- + batch : list of torch.Tensors + Batch of data to use for evaluation. Default implementation assumes + this batch has two elements: inputs and targets. + stage : Stage + The stage of the experiment: Stage.VALID, Stage.TEST + + Returns + ------- + detached loss + """ + out = self.compute_forward(batch, stage=stage) + loss = self.compute_objectives(out, batch, stage=stage) + loss_g = loss["G_loss"] + return loss_g.detach().cpu() + + def on_fit_start(self): + """Gets called at the beginning of ``fit()``, on multiple processes + if ``distributed_count > 0`` and backend is ddp and initializes statistics. + """ + self.last_epoch = 0 + self.last_batch = None + self.last_loss_stats = {} + return super().on_fit_start() + + def init_optimizers(self): + """Called during ``on_fit_start()``, initialize optimizers + after parameters are fully configured (e.g. DDP, jit). + """ + if self.opt_class is not None: + ( + opt_g_class, + opt_d_class, + sch_g_class, + sch_d_class, + ) = self.opt_class + + self.optimizer_g = opt_g_class(self.modules.generator.parameters()) + self.optimizer_d = opt_d_class( + self.modules.discriminator.parameters() + ) + self.optimizers_dict = { + "optimizer_g": self.optimizer_g, + "optimizer_d": self.optimizer_d, + } + + self.scheduler_g = sch_g_class(self.optimizer_g) + self.scheduler_d = sch_d_class(self.optimizer_d) + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "optimizer_g", self.optimizer_g + ) + self.checkpointer.add_recoverable( + "optimizer_d", self.optimizer_d + ) + self.checkpointer.add_recoverable( + "scheduler_g", self.scheduler_d + ) + self.checkpointer.add_recoverable( + "scheduler_d", self.scheduler_d + ) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch. + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + if stage == sb.Stage.VALID: + # Update learning rate + self.scheduler_g.step() + self.scheduler_d.step() + lr_g = self.optimizer_g.param_groups[-1]["lr"] + lr_d = self.optimizer_d.param_groups[-1]["lr"] + + stats = { + **self.last_loss_stats[sb.Stage.VALID], + } + + self.hparams.train_logger.log_stats( # 1#2# + stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=stats, + ) + # The tensorboard_logger writes a summary to stdout and to the logfile. + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=stats, + ) + + # Save the current checkpoint and delete previous checkpoints. + epoch_metadata = { + **{"epoch": epoch}, + **self.last_loss_stats[sb.Stage.VALID], + } + if self.checkpointer is not None: + self.checkpointer.save_and_keep_only( + meta=epoch_metadata, + end_of_epoch=True, + min_keys=["loss"], + ckpt_predicate=( + ( + lambda ckpt: ( + ckpt.meta["epoch"] + % self.hparams.keep_checkpoint_interval + != 0 + ) + ) + if self.hparams.keep_checkpoint_interval is not None + else None + ), + ) + + self.run_inference_sample("Valid", epoch) + + # We also write statistics about test data to stdout and to the TensorboardLogger. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( # 1#2# + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + self.run_inference_sample("Test", epoch) + + def run_inference_sample(self, name, epoch): + """Produces a sample in inference mode. + This is called when producing samples. + + Arguments + --------- + name: str + the name of the saved audio folder + epoch: int or str + the epoch number (used in file path calculations) + or "test" for test stage + + Returns + ------- + None + """ + with torch.no_grad(): + if self.last_batch is None: + return + x, y = self.last_batch + + # Preparing model for inference by removing weight norm + inference_generator = copy.deepcopy(self.hparams.generator) + inference_generator.remove_weight_norm() + if inference_generator.duration_predictor: + x = torch.unique_consecutive(x, dim=1) + sig_out = inference_generator.inference(x) + spec_out = self.hparams.mel_spectogram( + audio=sig_out.squeeze(0).cpu() + ) + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_audio( + f"{name}/audio_target", y.squeeze(0), self.hparams.sample_rate + ) + self.tensorboard_logger.log_audio( + f"{name}/audio_pred", + sig_out.squeeze(0), + self.hparams.sample_rate, + ) + self.tensorboard_logger.log_figure(f"{name}/mel_target", x) + self.tensorboard_logger.log_figure(f"{name}/mel_pred", spec_out) + else: + # folder name is the current epoch for validation and "test" for test + folder = ( + self.hparams.epoch_counter.current + if name == "Valid" + else "test" + ) + self.save_audio("target", y.squeeze(0), folder) + self.save_audio("synthesized", sig_out.squeeze(0), folder) + + def save_audio(self, name, data, epoch): + """Saves a single wav file. + + Arguments + --------- + name: str + the name of the saved audio + data: torch.Tensor + the wave data to save + epoch: int or str + the epoch number (used in file path calculations) + or "test" for test stage + """ + target_path = pl.Path(self.hparams.progress_sample_path) / str(epoch) + target_path.mkdir(parents=True, exist_ok=True) + file_name = target_path / f"{name}.wav" + audio_io.save(file_name.as_posix(), data.cpu(), 16000) + + +def sample_interval(seqs, segment_size): + "This function sample an interval of audio and code according to segment size." + N = max([v.shape[-1] for v in seqs]) + seq_len = segment_size if segment_size > 0 else N + hops = [N // v.shape[-1] for v in seqs] + lcm = np.lcm.reduce(hops) + interval_start = 0 + interval_end = N // lcm - seq_len // lcm + start_step = random.randint(interval_start, interval_end) + + new_seqs = [] + for i, v in enumerate(seqs): + start = start_step * (lcm // hops[i]) + end = (start_step + seq_len // lcm) * (lcm // hops[i]) + new_seqs += [v[..., start:end]] + + return new_seqs + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + segment_size = hparams["segment_size"] + code_hop_size = hparams["code_hop_size"] + code_folder = pl.Path(hparams["codes_folder"]) + + # Define audio pipeline: + @sb.utils.data_pipeline.takes("id", "wav", "segment") + @sb.utils.data_pipeline.provides("code", "sig") + def audio_pipeline(utt_id, wav, segment): + info = audio_io.info(wav) + audio = sb.dataio.dataio.read_audio(wav) + audio = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(audio) + + code = np.load(code_folder / f"{utt_id}.npy") + + num_layer = len(hparams["layer"]) + offsets = np.arange(num_layer) * hparams["num_clusters"] + code = code + offsets + 1 + + if hparams["layer_drop"]: + num_layers_to_drop = np.random.randint(0, code.shape[1]) + if num_layers_to_drop > 0: + layers_to_drop = np.random.choice( + code.shape[1], size=num_layers_to_drop, replace=False + ) + code[:, layers_to_drop] = 0 + + code = torch.IntTensor(code) + + # Trim end of audio + code_length = min(audio.shape[0] // code_hop_size, code.shape[0]) + code = code[:code_length] + audio = audio[: code_length * code_hop_size] + + while audio.shape[0] < segment_size: + audio = torch.hstack([audio, audio]) + code = torch.hstack([code, code]) + audio = audio.unsqueeze(0) + + if segment: + code = code.swapdims(0, 1) + audio, code = sample_interval([audio, code], segment_size) + code = code.swapdims(0, 1) + + return code, audio + + datasets = {} + data_info = { + "train": hparams["train_json"], + "valid": hparams["valid_json"], + "test": hparams["test_json"], + } + for dataset in hparams["splits"]: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline], + output_keys=["id", "code", "sig"], + ) + + return datasets + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + from libritts_prepare import prepare_libritts + + sb.utils.distributed.run_on_main( + prepare_libritts, + kwargs={ + "data_folder": hparams["data_folder"], + "save_json_train": hparams["train_json"], + "save_json_valid": hparams["valid_json"], + "save_json_test": hparams["test_json"], + "sample_rate": hparams["sample_rate"], + "split_ratio": hparams["split_ratio"], + "libritts_subsets": hparams["libritts_subsets"], + "train_split": hparams["train_split"], + "valid_split": hparams["valid_split"], + "test_split": hparams["test_split"], + "model_name": "HiFi-GAN", + "skip_prep": hparams["skip_prep"], + }, + ) + + from extract_code import extract_libritts + + sb.utils.distributed.run_on_main( + extract_libritts, + kwargs={ + "data_folder": hparams["save_folder"], + "splits": hparams["splits"], + "kmeans_folder": hparams["kmeans_folder"], + "kmeans_dataset": hparams["kmeans_dataset"], + "num_clusters": hparams["num_clusters"], + "encoder_type": hparams["encoder_type"], + "encoder_source": hparams["encoder_hub"], + "layer": hparams["layer"], + "save_folder": hparams["save_folder"], + "sample_rate": hparams["sample_rate"], + "skip_extract": hparams["skip_extract"], + }, + ) + + datasets = dataio_prepare(hparams) + + # Brain class initialization + hifi_gan_brain = HifiGanBrain( + modules=hparams["modules"], + opt_class=[ + hparams["opt_class_generator"], + hparams["opt_class_discriminator"], + hparams["sch_class_generator"], + hparams["sch_class_discriminator"], + ], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + if hparams["use_tensorboard"]: + hifi_gan_brain.tensorboard_logger = ( + sb.utils.train_logger.TensorboardLogger( + save_dir=hparams["output_folder"] + "/tensorboard" + ) + ) + + # Training + hifi_gan_brain.fit( + hifi_gan_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + # Test + if "test" in datasets: + hifi_gan_brain.evaluate( + datasets["test"], + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/LibriTTS/vocoder/hifigan_discrete/train_spk.py b/recipes/LibriTTS/vocoder/hifigan_discrete/train_spk.py new file mode 100644 index 0000000000..fe9e440b56 --- /dev/null +++ b/recipes/LibriTTS/vocoder/hifigan_discrete/train_spk.py @@ -0,0 +1,593 @@ +#!/usr/bin/env python3 +"""Recipe for training a hifi-gan vocoder on self-supervised representations and speaker embedding. +For more details about hifi-gan: https://arxiv.org/pdf/2010.05646.pdf +For more details about speech synthesis using self-supervised representations: https://arxiv.org/pdf/2104.00355.pdf + +To run this recipe, do the following: +> python train.py hparams/train.yaml --data_folder=/path/to/LibriTTS + +Authors + * Jarod Duret 2023 + * Yingzhi WANG 2022 +""" + +import copy +import pathlib as pl +import random +import sys + +import numpy as np +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.data_utils import scalarize + + +class HifiGanBrain(sb.Brain): + def compute_forward(self, batch, stage): + """The forward function, generates synthesized waveforms, + calculates the scores and the features of the discriminator + for synthesized waveforms and real waveforms. + + Arguments + --------- + batch : torch.Tensor or tensors + An element from the dataloader, including inputs for processing. + stage : Stage + The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST + + Returns + ------- + Generator and Discriminator outputs + """ + batch = batch.to(self.device) + + x, _ = batch.code + y, _ = batch.sig + spk, _ = batch.spk_emb + + # generate synthesized waveforms + y_g_hat, (log_dur_pred, log_dur) = self.modules.generator(x, spk=spk) + y_g_hat = y_g_hat[:, :, : y.size(2)] + + # get scores and features from discriminator for real and synthesized waveforms + scores_fake, feats_fake = self.modules.discriminator(y_g_hat.detach()) + scores_real, feats_real = self.modules.discriminator(y) + + return ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss given the predicted and targeted outputs. + Arguments + --------- + predictions : torch.Tensor + The model generated spectrograms and other metrics from `compute_forward`. + batch : PaddedBatch + This batch object contains all the relevant tensors for computation. + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + Returns + ------- + loss : torch.Tensor + A one-element tensor used for backpropagating the gradient. + """ + batch = batch.to(self.device) + + x, _ = batch.code + y, _ = batch.sig + spk, _ = batch.spk_emb + + # Hold on to the batch for the inference sample. This is needed because + # the infernece sample is run from on_stage_end only, where + # batch information is not available + self.last_batch = (x, y, spk) + + ( + y_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) = predictions + + loss_g = self.hparams.generator_loss( + stage, + y_hat, + y, + scores_fake, + feats_fake, + feats_real, + log_dur_pred, + log_dur, + ) + + loss_d = self.hparams.discriminator_loss(scores_fake, scores_real) + loss = {**loss_g, **loss_d} + self.last_loss_stats[stage] = scalarize(loss) + + return loss + + def fit_batch(self, batch): + """Fits a single batch. + Arguments + --------- + batch: tuple + a training batch + Returns + ------- + loss: torch.Tensor + detached loss + """ + batch = batch.to(self.device) + y, _ = batch.sig + + outputs = self.compute_forward(batch, sb.core.Stage.TRAIN) + ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) = outputs + # calculate discriminator loss with the latest updated generator + loss_d = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[ + "D_loss" + ] + # First train the discriminator + self.optimizer_d.zero_grad() + loss_d.backward() + self.optimizer_d.step() + + # calculate generator loss with the latest updated discriminator + scores_fake, feats_fake = self.modules.discriminator(y_g_hat) + scores_real, feats_real = self.modules.discriminator(y) + outputs = ( + y_g_hat, + scores_fake, + feats_fake, + scores_real, + feats_real, + log_dur_pred, + log_dur, + ) + loss_g = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[ + "G_loss" + ] + # Then train the generator + self.optimizer_g.zero_grad() + loss_g.backward() + self.optimizer_g.step() + + return loss_g.detach().cpu() + + def evaluate_batch(self, batch, stage): + """Evaluate one batch. + + Arguments + --------- + batch : list of torch.Tensors + Batch of data to use for evaluation. Default implementation assumes + this batch has two elements: inputs and targets. + stage : Stage + The stage of the experiment: Stage.VALID, Stage.TEST + + Returns + ------- + detached loss + """ + out = self.compute_forward(batch, stage=stage) + loss = self.compute_objectives(out, batch, stage=stage) + loss_g = loss["G_loss"] + return loss_g.detach().cpu() + + def on_fit_start(self): + """Gets called at the beginning of ``fit()``, on multiple processes + if ``distributed_count > 0`` and backend is ddp and initializes statistics. + """ + self.last_epoch = 0 + self.last_batch = None + self.last_loss_stats = {} + return super().on_fit_start() + + def init_optimizers(self): + """Called during ``on_fit_start()``, initialize optimizers + after parameters are fully configured (e.g. DDP, jit). + """ + if self.opt_class is not None: + ( + opt_g_class, + opt_d_class, + sch_g_class, + sch_d_class, + ) = self.opt_class + + self.optimizer_g = opt_g_class(self.modules.generator.parameters()) + self.optimizer_d = opt_d_class( + self.modules.discriminator.parameters() + ) + self.optimizers_dict = { + "optimizer_g": self.optimizer_g, + "optimizer_d": self.optimizer_d, + } + + self.scheduler_g = sch_g_class(self.optimizer_g) + self.scheduler_d = sch_d_class(self.optimizer_d) + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "optimizer_g", self.optimizer_g + ) + self.checkpointer.add_recoverable( + "optimizer_d", self.optimizer_d + ) + self.checkpointer.add_recoverable( + "scheduler_g", self.scheduler_d + ) + self.checkpointer.add_recoverable( + "scheduler_d", self.scheduler_d + ) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch. + + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + if stage == sb.Stage.VALID: + # Update learning rate + self.scheduler_g.step() + self.scheduler_d.step() + lr_g = self.optimizer_g.param_groups[-1]["lr"] + lr_d = self.optimizer_d.param_groups[-1]["lr"] + + stats = { + **self.last_loss_stats[sb.Stage.VALID], + } + + self.hparams.train_logger.log_stats( # 1#2# + stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=stats, + ) + # The tensorboard_logger writes a summary to stdout and to the logfile. + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d}, + train_stats=self.last_loss_stats[sb.Stage.TRAIN], + valid_stats=stats, + ) + + # Save the current checkpoint and delete previous checkpoints. + epoch_metadata = { + **{"epoch": epoch}, + **self.last_loss_stats[sb.Stage.VALID], + } + if self.checkpointer is not None: + self.checkpointer.save_and_keep_only( + meta=epoch_metadata, + end_of_epoch=True, + min_keys=["loss"], + ckpt_predicate=( + ( + lambda ckpt: ( + ckpt.meta["epoch"] + % self.hparams.keep_checkpoint_interval + != 0 + ) + ) + if self.hparams.keep_checkpoint_interval is not None + else None + ), + ) + + self.run_inference_sample("Valid", epoch) + + # We also write statistics about test data to stdout and to the TensorboardLogger. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( # 1#2# + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=self.last_loss_stats[sb.Stage.TEST], + ) + self.run_inference_sample("Test", epoch) + + def run_inference_sample(self, name, epoch): + """Produces a sample in inference mode. + This is called when producing samples. + + Arguments + --------- + name: str + the name of the saved audio folder + epoch: int or str + the epoch number (used in file path calculations) + or "test" for test stage + + Returns + ------- + None + """ + with torch.no_grad(): + if self.last_batch is None: + return + x, y, spk = self.last_batch + + # Preparing model for inference by removing weight norm + inference_generator = copy.deepcopy(self.hparams.generator) + inference_generator.remove_weight_norm() + if inference_generator.duration_predictor: + x = torch.unique_consecutive(x, dim=1) + sig_out = inference_generator.inference(x, spk=spk) + spec_out = self.hparams.mel_spectogram( + audio=sig_out.squeeze(0).cpu() + ) + if self.hparams.use_tensorboard: + self.tensorboard_logger.log_audio( + f"{name}/audio_target", y.squeeze(0), self.hparams.sample_rate + ) + self.tensorboard_logger.log_audio( + f"{name}/audio_pred", + sig_out.squeeze(0), + self.hparams.sample_rate, + ) + self.tensorboard_logger.log_figure(f"{name}/mel_target", x) + self.tensorboard_logger.log_figure(f"{name}/mel_pred", spec_out) + else: + # folder name is the current epoch for validation and "test" for test + folder = ( + self.hparams.epoch_counter.current + if name == "Valid" + else "test" + ) + self.save_audio("target", y.squeeze(0), folder) + self.save_audio("synthesized", sig_out.squeeze(0), folder) + + def save_audio(self, name, data, epoch): + """Saves a single wav file. + + Arguments + --------- + name: str + the name of the saved audio + data: torch.Tensor + the wave data to save + epoch: int or str + the epoch number (used in file path calculations) + or "test" for test stage + """ + target_path = pl.Path(self.hparams.progress_sample_path) / str(epoch) + target_path.mkdir(parents=True, exist_ok=True) + file_name = target_path / f"{name}.wav" + audio_io.save(file_name.as_posix(), data.cpu(), 16000) + + +def sample_interval(seqs, segment_size): + "This function sample an interval of audio and code according to segment size." + N = max([v.shape[-1] for v in seqs]) + seq_len = segment_size if segment_size > 0 else N + hops = [N // v.shape[-1] for v in seqs] + lcm = np.lcm.reduce(hops) + interval_start = 0 + interval_end = N // lcm - seq_len // lcm + start_step = random.randint(interval_start, interval_end) + + new_seqs = [] + for i, v in enumerate(seqs): + start = start_step * (lcm // hops[i]) + end = (start_step + seq_len // lcm) * (lcm // hops[i]) + new_seqs += [v[..., start:end]] + + return new_seqs + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + segment_size = hparams["segment_size"] + code_hop_size = hparams["code_hop_size"] + code_folder = pl.Path(hparams["codes_folder"]) + speaker_folder = pl.Path(hparams["speaker_embeddings_folder"]) + + # Define audio pipeline: + @sb.utils.data_pipeline.takes("id", "wav", "segment") + @sb.utils.data_pipeline.provides("code", "sig") + def audio_pipeline(utt_id, wav, segment): + info = audio_io.info(wav) + audio = sb.dataio.dataio.read_audio(wav) + audio = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(audio) + + code = np.load(code_folder / f"{utt_id}.npy") + + num_layer = len(hparams["layer"]) + offsets = np.arange(num_layer) * hparams["num_clusters"] + code = code + offsets + 1 + + if hparams["layer_drop"]: + num_layers_to_drop = np.random.randint(0, code.shape[1]) + if num_layers_to_drop > 0: + layers_to_drop = np.random.choice( + code.shape[1], size=num_layers_to_drop, replace=False + ) + code[:, layers_to_drop] = 0 + + code = torch.IntTensor(code) + + # Trim end of audio + code_length = min(audio.shape[0] // code_hop_size, code.shape[0]) + code = code[:code_length] + audio = audio[: code_length * code_hop_size] + + while audio.shape[0] < segment_size: + audio = torch.hstack([audio, audio]) + code = torch.hstack([code, code]) + audio = audio.unsqueeze(0) + + if segment: + code = code.swapdims(0, 1) + audio, code = sample_interval([audio, code], segment_size) + code = code.swapdims(0, 1) + + return code, audio + + @sb.utils.data_pipeline.takes("id") + @sb.utils.data_pipeline.provides("spk_emb") + def spk_pipeline(utt_id): + spk_emb = np.load(speaker_folder / f"{utt_id}.npy") + yield torch.FloatTensor(spk_emb) + + datasets = {} + data_info = { + "train": hparams["train_json"], + "valid": hparams["valid_json"], + "test": hparams["test_json"], + } + for dataset in hparams["splits"]: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline, spk_pipeline], + output_keys=["id", "code", "sig", "spk_emb"], + ) + + return datasets + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # If --distributed_launch then + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + from libritts_prepare import prepare_libritts + + sb.utils.distributed.run_on_main( + prepare_libritts, + kwargs={ + "data_folder": hparams["data_folder"], + "save_json_train": hparams["train_json"], + "save_json_valid": hparams["valid_json"], + "save_json_test": hparams["test_json"], + "sample_rate": hparams["sample_rate"], + "split_ratio": hparams["split_ratio"], + "libritts_subsets": hparams["libritts_subsets"], + "train_split": hparams["train_split"], + "valid_split": hparams["valid_split"], + "test_split": hparams["test_split"], + "model_name": "HiFi-GAN", + "skip_prep": hparams["skip_prep"], + }, + ) + + from extract_code import extract_libritts + + sb.utils.distributed.run_on_main( + extract_libritts, + kwargs={ + "data_folder": hparams["save_folder"], + "splits": hparams["splits"], + "kmeans_folder": hparams["kmeans_folder"], + "kmeans_dataset": hparams["kmeans_dataset"], + "num_clusters": hparams["num_clusters"], + "encoder_type": hparams["encoder_type"], + "encoder_source": hparams["encoder_hub"], + "layer": hparams["layer"], + "save_folder": hparams["save_folder"], + "sample_rate": hparams["sample_rate"], + "skip_extract": hparams["skip_extract"], + }, + ) + + from extract_speaker_embeddings import extract_libritts_embeddings + + sb.utils.distributed.run_on_main( + extract_libritts_embeddings, + kwargs={ + "data_folder": hparams["data_folder"], + "splits": hparams["splits"], + "encoder_source": hparams["speaker_encoder_hub"], + "save_folder": hparams["save_folder"], + "sample_rate": hparams["sample_rate"], + "skip_extract": hparams["skip_extract"], + }, + ) + + datasets = dataio_prepare(hparams) + + # Brain class initialization + hifi_gan_brain = HifiGanBrain( + modules=hparams["modules"], + opt_class=[ + hparams["opt_class_generator"], + hparams["opt_class_discriminator"], + hparams["sch_class_generator"], + hparams["sch_class_discriminator"], + ], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + if hparams["use_tensorboard"]: + hifi_gan_brain.tensorboard_logger = ( + sb.utils.train_logger.TensorboardLogger( + save_dir=hparams["output_folder"] + "/tensorboard" + ) + ) + + # Training + hifi_gan_brain.fit( + hifi_gan_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + # Test + if "test" in datasets: + hifi_gan_brain.evaluate( + datasets["test"], + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/Libriheavy/ASR/transformer/README.md b/recipes/Libriheavy/ASR/transformer/README.md new file mode 100644 index 0000000000..9abbc04021 --- /dev/null +++ b/recipes/Libriheavy/ASR/transformer/README.md @@ -0,0 +1,56 @@ +# Libriheavy Dataset +This folder contains the scripts to train a Transformer-based speech recognizer. + +1. Please download Libri-Light at https://github.com/facebookresearch/libri-light/tree/main/data_preparation +After this step, please make sure you have all the splits (small, medium, and large) in one folder. +Please note if you want to use the large split, the large.tar file is 3.05TB. Also, the download can take quite a while. + +2. Please git clone the repo https://github.com/k2-fsa/libriheavy, and follow the repo's instruction to prepare Libriheavy manifests. +After this step, please make sure you have all the "jsonl.gz" Libriheavy manifest files in one folder. + +**Note 1:** This recipe relies on the `soundfile` backend for fast audio processing. Libriheavy comes with long audio files, and we need to read them in chunks. In our experiments, we found that `soundfile` was the only audio backend fast enough to read these long audio files. You can dynamically change the backend through the `--audio_backend` parameter in the YAML file. + +**Note 2:** If you don't have the `large` folder but want to run this recipe with the `small` and/or `medium` splits, you need to download the official `dev` and `test` splits from the LibriSpeech dataset. This is necessary because the `dev` and `test` splits for Libriheavy are located in the `large` folder. You can download LibriSpeech at http://www.openslr.org/12 and run the `librispeech_prepare.py` script from the `recipes/LibriSpeech/` folder. Then, specify the `dev_splits` and `test_splits` parameters in the YAML file. + +# How to run +```shell +python train.py hparams/transformer.yaml --data_folder=/path/to/Libri-Light --manifest_folder=/path/to/Libriheavy +``` + +# LibriSpeech Dev/Test Results +Results of trained with the Libriheavy large split and tested with LibriSpeech dev/test sets. + +| Release | hyperparams file | Dev Clean WER (Transformer LM) | Test Clean WER (Transformer LM) | Test Other WER (Transformer LM) | HuggingFace link | Model link | GPUs | +|:-------------:|:-------------:|:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :--------:| +| 24-12-09 | conformer_large.yaml | 1.58 | 1.74 | 3.92 | Not Avail. | Not Avail. | 8xA100 80GB | + + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/KsponSpeech/ASR/transformer/hparams/conformer_medium.yaml b/recipes/Libriheavy/ASR/transformer/hparams/conformer_large.yaml similarity index 51% rename from recipes/KsponSpeech/ASR/transformer/hparams/conformer_medium.yaml rename to recipes/Libriheavy/ASR/transformer/hparams/conformer_large.yaml index c19d87f989..b2e07225be 100644 --- a/recipes/KsponSpeech/ASR/transformer/hparams/conformer_medium.yaml +++ b/recipes/Libriheavy/ASR/transformer/hparams/conformer_large.yaml @@ -4,60 +4,102 @@ # Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM # Tokens: unigram # losses: CTC + KLdiv (Label Smoothing loss) -# Training: KsponSpeech 965.2h -# Authors: Jianyuan Zhong, Titouan Parcollet, Samuele Cornell, Dongwon Kim, Dongwoo Kim +# Training: Libriheavy 50k +# Authors: Titouan Parcollet, Shucong Zhang # ############################################################################ # Seed needs to be set at top of yaml, before objects with parameters are made -seed: 7774 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/transformer/ -wer_file: !ref /wer.txt + +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_large/ +output_wer_folder: !ref / save_folder: !ref /save train_log: !ref /train_log.txt # Language model (LM) pretraining # NB: To avoid mismatch, the speech recognizer must be trained with the same -# tokenizer used for LM training. -pretrained_lm_tokenizer_path: ddwkim/asr-conformer-transformerlm-ksponspeech +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech # Data files -data_folder: ../../Tokenizer/results/5K_subword_unigram_LM -train_splits: ["train"] +data_folder: !PLACEHOLDER # e.g., /path/to/Libri-Light +manifest_folder: !PLACEHOLDER # e.g., /path/to/Libriheavy +# I/O +audio_backend: soundfile +data_placeholder: data_root +train_splits: ["large"] # ["small"], ["medium"], or ["large"] dev_splits: ["dev"] -test_splits: ["eval_clean", "eval_other"] +test_splits: ["test_clean", "test_other"] # you can also add "test_clean_large" and/or "test_other_large" skip_prep: False -train_csv: !ref /train.csv -valid_csv: !ref /dev.csv +train_csv: !ref /large.csv # or small.csv or medium.csv +valid_csv: !ref /dev.csv test_csv: - - !ref /eval_clean.csv - - !ref /eval_other.csv + - !ref /dev.csv + - !ref /test_clean.csv + - !ref /test_other.csv # you can also add "test_clean_large.csv" and/or "test_other_large.csv" + + +####################### Training Parameters #################################### -# Training parameters # To make Transformers converge, the global bath size should be large enough. -# The global batch size is computed as batch_size * n_gpus * gradient_accumulation. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. # Empirically, we found that this value should be >= 128. # Please, set your parameters accordingly. -number_of_epochs: 100 -batch_size: 32 # This works for GPUs with 80GB +# 6 epochs are enough for the model to converge with the large split. +# If you are using the small or medium split, please adjust this. +number_of_epochs: 6 +batch_size: 16 # This works for 2x GPUs with 32GB ctc_weight: 0.3 -gradient_accumulation: 4 +grad_accumulation_factor: 1 +max_grad_norm: 5.0 loss_reduction: 'batchmean' sorting: random +num_workers: 4 +precision: bf16 # bf16, fp16 or fp32 +avg_checkpoints: 10 # Number of checkpoints to average for evaluation +ckpt_interval_minutes: 15 # stages related parameters -stage_one_epochs: 80 -lr_adam: 0.001 -lr_sgd: 0.000025 +lr_adam: 0.0005 # Feature parameters sample_rate: 16000 -n_fft: 400 +n_fft: 512 n_mels: 80 +win_length: 32 + +# This setup works well for A100 80GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 250 # we use 250 bz * 8 GPUs +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref # Dataloader options train_dataloader_opts: batch_size: !ref shuffle: True + num_workers: !ref valid_dataloader_opts: batch_size: 1 @@ -65,11 +107,12 @@ valid_dataloader_opts: test_dataloader_opts: batch_size: 1 -####################### Model parameters ########################### +####################### Model Parameters ####################################### + # Transformer -d_model: 256 -nhead: 4 -num_encoder_layers: 12 +d_model: 640 +nhead: 8 +num_encoder_layers: 14 num_decoder_layers: 6 d_ffn: 2048 transformer_dropout: 0.1 @@ -88,15 +131,11 @@ min_decode_ratio: 0.0 max_decode_ratio: 1.0 valid_search_interval: 10 valid_beam_size: 10 -test_beam_size: 60 -lm_weight: 0.20 +test_beam_size: 66 +lm_weight: 0.60 ctc_weight_decode: 0.40 -############################## models ################################ - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - update_until_epoch: 4 +############################## Models ########################################## CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd input_shape: (8, 10, 80) @@ -122,6 +161,9 @@ Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.Transforme normalize_before: True causal: False +# This is the TransformerLM that is used according to the Huggingface repository +# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path +# For more details about the model! # NB: It has to match the pre-trained TransformerLM!! lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length vocab: !ref @@ -144,55 +186,71 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + modules: - normalize: !ref CNN: !ref Transformer: !ref seq_lin: !ref ctc_lin: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] + normalize: !ref # define two optimizers here for two-stage training -Adam: !name:torch.optim.Adam +Adam: !name:torch.optim.AdamW lr: !ref betas: (0.9, 0.98) eps: 0.000000001 -SGD: !name:torch.optim.SGD - lr: !ref - momentum: 0.99 - nesterov: True +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +####################### Decoding & optimiser ########################### -valid_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] - bos_index: !ref +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer eos_index: !ref blank_index: !ref + ctc_fc: !ref + + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: 1.15 + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + ctc: !ref + transformerlm: !ref + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref using_eos_threshold: False - length_normalization: False - + length_normalization: True + scorer: !ref -test_search: !new:speechbrain.decoders.S2STransformerBeamSearch - modules: [!ref , !ref , !ref ] +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref - ctc_weight: !ref - lm_weight: !ref - lm_modules: !ref - temperature: 1.25 - temperature_lm: 1.25 + temperature: 1.15 using_eos_threshold: False length_normalization: True + scorer: !ref log_softmax: !new:torch.nn.LogSoftmax dim: -1 @@ -207,8 +265,7 @@ seq_cost: !name:speechbrain.nnet.losses.kldiv_loss noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler lr_initial: !ref - n_warmup_steps: 3000 -# model_size: !ref + n_warmup_steps: 50000 checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref @@ -221,26 +278,44 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.SpecAugment - time_warp: False - time_warp_window: 5 - time_warp_mode: bicubic - freq_mask: True - n_freq_mask: 2 - time_mask: True - n_time_mask: 4 - replace_with_zero: False - freq_mask_width: 30 - time_mask_width: 40 - -# speed_perturb: !new:speechbrain.processing.speech_augmentation.SpeedPerturb -# orig_freq: !ref -# speeds: [95, 100, 105] +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 4 + drop_count_high: 4 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] compute_features: !new:speechbrain.lobes.features.Fbank sample_rate: !ref n_fft: !ref n_mels: !ref + win_length: !ref + +############################## Logging and Pretrainer ########################## train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref @@ -249,7 +324,9 @@ error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats # The pretrainer allows a mapping between pretrained files and instances that -# are declared in the yaml. +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer collect_in: !ref loadables: diff --git a/recipes/Libriheavy/ASR/transformer/libriheavy_prepare.py b/recipes/Libriheavy/ASR/transformer/libriheavy_prepare.py new file mode 120000 index 0000000000..9238d76b00 --- /dev/null +++ b/recipes/Libriheavy/ASR/transformer/libriheavy_prepare.py @@ -0,0 +1 @@ +../../libriheavy_prepare.py \ No newline at end of file diff --git a/recipes/Libriheavy/ASR/transformer/train.py b/recipes/Libriheavy/ASR/transformer/train.py new file mode 100644 index 0000000000..6f42121646 --- /dev/null +++ b/recipes/Libriheavy/ASR/transformer/train.py @@ -0,0 +1,505 @@ +#!/usr/bin/env python3 +"""Recipe for training a Transformer ASR system with Lirbiheavy. +The system employs an encoder, a decoder, and an attention mechanism +between them. Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural +language model. + +To run this recipe, do the following: +> python train.py hparams/conformer_large.yaml + +With the default hyperparameters, the system employs a convolutional frontend and a transformer. +The decoder is based on a Transformer decoder. Beamsearch coupled with a Transformer +language model is used on the top of decoder probabilities. + +The neural network is trained on both CTC and negative-log likelihood +targets and sub-word units estimated with Byte Pairwise Encoding (BPE) +are used as basic recognition tokens. + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, tokens (e.g, characters instead of BPE), +training split (e.g, small, medium, or large), and many +other possible variations. + +Note: This recipe relies on the `soundfile` backend for fast audio processing. +Libriheavy comes with long audio files, and we need to read them in chunks. +In our experiments, we found that `soundfile` was the only audio backend fast enough to read these long audio files. +You can dynamically change the backend through the `audio_backend` parameter in the YAML file. + +Authors +------- + * Titouan Parcollet 2024 + * Shucong Zhang 2024 +""" + +import os +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + +SAMPLING_RATE = 16000 + + +# Define training procedure +class ASR(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_bos, _ = batch.tokens_bos + + # compute features + feats = self.hparams.compute_features(wavs) + current_epoch = self.hparams.epoch_counter.current + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels(tokens_bos) + + # forward modules + src = self.modules.CNN(feats) + + enc_out, pred = self.modules.Transformer( + src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index + ) + + # output layer for ctc log-probabilities + logits = self.modules.ctc_lin(enc_out) + p_ctc = self.hparams.log_softmax(logits) + + # output layer for seq2seq log-probabilities + pred = self.modules.seq_lin(pred) + p_seq = self.hparams.log_softmax(pred) + + # Compute outputs + hyps = None + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if any([is_valid_search, is_test_search]): + # Note: For valid_search, for the sake of efficiency, we only perform beamsearch with + # limited capacity and no LM to give user some idea of how the AM is doing + + # Decide searcher for inference: valid or test search + if stage == sb.Stage.VALID: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + else: + hyps, _, _, _ = self.hparams.test_search( + enc_out.detach(), wav_lens + ) + + return p_ctc, p_seq, wav_lens, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + (p_ctc, p_seq, wav_lens, hyps) = predictions + + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + tokens, tokens_lens = batch.tokens + + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if hasattr(self.hparams, "fea_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + + loss_seq = self.hparams.seq_cost( + p_seq, tokens_eos, length=tokens_eos_lens + ).sum() + + loss_ctc = self.hparams.ctc_cost( + p_ctc, tokens, wav_lens, tokens_lens + ).sum() + + loss = ( + self.hparams.ctc_weight * loss_ctc + + (1 - self.hparams.ctc_weight) * loss_seq + ) + + if stage != sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if current_epoch % valid_search_interval == 0 or ( + stage == sb.Stage.TEST + ): + # Decode token terms to words + predicted_words = [ + tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps + ] + target_words = [wrd.split(" ") for wrd in batch.wrd] + self.wer_metric.append(ids, predicted_words, target_words) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + return loss + + def on_evaluate_start(self, max_key=None, min_key=None): + """perform checkpoint average if needed""" + super().on_evaluate_start() + + ckpts = self.checkpointer.find_checkpoints( + max_key=max_key, min_key=min_key + ) + ckpt = sb.utils.checkpoints.average_checkpoints( + ckpts, recoverable_name="model" + ) + + self.hparams.model.load_state_dict(ckpt, strict=True) + self.hparams.model.eval() + print("Loaded the average") + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["ACC"] = self.acc_metric.summarize() + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + or stage == sb.Stage.TEST + ): + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"ACC": stage_stats["ACC"], "epoch": epoch}, + max_keys=["ACC"], + num_to_keep=self.hparams.avg_checkpoints, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + # save the averaged checkpoint at the end of the evaluation stage + # delete the rest of the intermediate checkpoints + # ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint + self.checkpointer.save_and_keep_only( + meta={"ACC": 1.1, "epoch": epoch}, + max_keys=["ACC"], + num_to_keep=1, + ) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_placeholder": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_placeholder": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_placeholder": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + valtest_datasets = [valid_data] + [i for k, i in test_datasets.items()] + + # We get the tokenizer as we need it to encode the labels when creating + # mini-batches. + tokenizer = hparams["tokenizer"] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(valtest_datasets, audio_pipeline) + + @sb.utils.data_pipeline.takes("wav", "duration", "start") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline_train(wav, duration, start): + duration = float(duration) + start = float(start) + duration = int(duration * SAMPLING_RATE) + start = int(start * SAMPLING_RATE) + + sig = sb.dataio.dataio.read_audio( + {"file": wav, "start": start, "stop": start + duration}, + backend=hparams["audio_backend"], + ) + return sig + + sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline_train) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("text") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + yield wrd + tokens_list = tokenizer.encode_as_ids(wrd) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline_libri(wrd): + yield wrd + tokens_list = tokenizer.encode_as_ids(wrd) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(valtest_datasets, text_pipeline_libri) + sb.dataio.dataset.add_dynamic_item([train_data], text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # 1. # Dataset prep + from libriheavy_prepare import prepare_libriheavy + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # multi-gpu (ddp) save data preparation + if not hparams["skip_prep"]: + run_on_main( + prepare_libriheavy, + kwargs={ + "data_folder": hparams["data_folder"], + "manifest_folder": hparams["manifest_folder"], + "save_folder": hparams["output_folder"], + "tr_splits": hparams["train_splits"], + "dev_splits": hparams["dev_splits"], + "te_splits": hparams["test_splits"], + "skip_prep": hparams["skip_prep"], + "data_placeholder": hparams["data_placeholder"], + }, + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams) + + # We download the pretrained LM from HuggingFace (or elsewhere depending on + # the path given in the YAML file). The tokenizer is loaded at the same time. + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["Adam"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # adding objects to trainer: + asr_brain.tokenizer = hparams["tokenizer"] + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + max_key="ACC", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/Libriheavy/README.md b/recipes/Libriheavy/README.md new file mode 100644 index 0000000000..35869327d4 --- /dev/null +++ b/recipes/Libriheavy/README.md @@ -0,0 +1,43 @@ +# Libriheavy ASR with Transformers. +This folder contains the scripts to train a Transformer-based speech recognizer. + +1. Please download Libri-Light at https://github.com/facebookresearch/libri-light/tree/main/data_preparation +After this step, please make sure you have all the splits (small, medium, and large) in one folder. +Please note if you want to use the large split, the large.tar file is 3.05TB. Also, the download can take quite a while. + +2. Please git clone the repo https://github.com/k2-fsa/libriheavy, and follow the repo's instruction to prepare Libriheavy manifests. +After this step, please make sure you have all the "jsonl.gz" Libriheavy manifest files in one folder. + +**Note 1:** This recipe relies on the `soundfile` backend for fast audio processing. Libriheavy comes with long audio files, and we need to read them in chunks. In our experiments, we found that `soundfile` was the only audio backend fast enough to read these long audio files. You can dynamically change the backend through the `--audio_backend` parameter in the YAML file. + +**Note 2:** If you don't have the `large` folder but want to run this recipe with the `small` and/or `medium` splits, you need to download the official `dev` and `test` splits from the LibriSpeech dataset. This is necessary because the `dev` and `test` splits for Libriheavy are located in the `large` folder. You can download LibriSpeech at http://www.openslr.org/12 and run the `librispeech_prepare.py` script from the `recipes/LibriSpeech/` folder. Then, specify the `dev_splits` and `test_splits` parameters in the YAML file. + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/Libriheavy/libriheavy_prepare.py b/recipes/Libriheavy/libriheavy_prepare.py new file mode 100644 index 0000000000..d90731746e --- /dev/null +++ b/recipes/Libriheavy/libriheavy_prepare.py @@ -0,0 +1,433 @@ +""" +This script prepares the Libriheavy dataset for ASR. + +Authors +------- +* Titouan Parcollet 2024 +* Shucong Zhang 2024 +""" + +import csv +import functools +import gzip +import json +import os +import re +from dataclasses import dataclass + +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + +SAMPLING_RATE = 16000 +LOWER_DURATION_THRESHOLD_IN_S = 1.0 # Should not happen in that dataset +UPPER_DURATION_THRESHOLD_IN_S = 100 # Should not happen in that dataset +LOWER_WORDS_THRESHOLD = 3 +JSON_SAMPLE_PROGRESS = 1000000 + + +@dataclass +class LibriheavyRow: + """Represents one row of data from the LibriHeavy dataset. + + Arguments + --------- + ID : str + Unique identifier for the audio segment. + duration : float + Duration of the audio segment in seconds. + start : float + Start time of the segment within the original audio file in seconds. + wav : str + Path to the audio file. + spk_id : str + Speaker identifier. + text : str + Transcription text corresponding to the audio segment. + """ + + ID: str + duration: float + start: float + wav: str + spk_id: str + text: str + + +def prepare_libriheavy( + data_folder, + manifest_folder, + save_folder, + tr_splits=[], + dev_splits=[], + te_splits=[], + skip_prep=False, + data_placeholder="data_root", +): + """ + Prepares the csv files for the Libriheavy dataset. + 1. Please download the Libri-Light dataset. + Download: https://github.com/facebookresearch/libri-light/tree/main/data_preparation + + 2. Please download the manifests. + Download: https://github.com/k2-fsa/libriheavy + + Arguments + --------- + data_folder : str + Path to the folder where the original Libri-Light dataset is stored. + e.g. /my_path/to/libri-light + manifest_folder : str + Path to the folder where the Libriheavy manifest (jsonl.gz files) is stored. + e.g. /my_path/to/libriheavy + save_folder : str + The directory where to store the csv files. + tr_splits : list + Train split to prepare from. + ['small'] -> 0.5 hours data + ['medium'] -> 5k hours data + ['large'] -> 50k hours data + dev_splits : list + Dev split to prepare from. + te_splits : list + List of test splits to prepare from ['test_clean','test_others', + 'test_clean_large','test_others_large']. + skip_prep: bool + If True, data preparation is skipped. + data_placeholder: str + This variable is used to replace the audio path by the data_placeholder + in the csv file. + + Returns + ------- + None + + """ + + if skip_prep: + return + + splits = tr_splits + dev_splits + te_splits + + # Setting the save folder + os.makedirs(save_folder, exist_ok=True) + + for split_index in range(len(splits)): + split = splits[split_index] + save_csv = save_folder + f"/{split}.csv" + + if os.path.isfile(save_csv): + msg = "%s already exists, skipping data preparation!" % (save_csv) + logger.info(msg) + continue + + csv_corpus = extract_transcripts( + manifest_folder + f"/libriheavy_cuts_{split}.jsonl.gz" + ) + + if "dev" in split or "test" in split: + data_folder_for_split = data_folder + "/large" + else: + data_folder_for_split = data_folder + f"/{split}" + + create_csv( + csv_corpus, + save_csv, + data_folder_for_split, + data_placeholder, + ) + + +def extract_transcripts(jsonl_gz_file_path): + """Extract the json file into a list. + + Arguments + --------- + jsonl_gz_file_path : str + Path to the jsonl_gz file to extract. + + Returns + ------- + list + A list containing SpeechBrain ready lines of data. + """ + + logger.info( + f"Extracting transcriptions from {jsonl_gz_file_path} to a list. This step can be fairly long." + ) + + csv_corpus = [] + + # Open the gzipped JSONL file and the CSV file + with gzip.open(jsonl_gz_file_path, "rt") as jsonl_file: + # Write the header to the CSV file + header = "ID,wav,start,duration,text,spk_id" + csv_corpus.append(header) + + # Initialize the progress bar + for cpt, line in enumerate(jsonl_file): + if (cpt + 1) % JSON_SAMPLE_PROGRESS == 0: + logger.info(f"{cpt} samples have been loaded!") + data = json.loads(line) + snt_id = data["id"] + wav = data["recording"]["id"] + start = str(data["start"]) + duration = str(data["duration"]) + texts = data["supervisions"][0]["custom"]["texts"] + spk_id = str(data["supervisions"][0]["speaker"]) + # Extract transcriptions + text = texts[1] + # Write the row to the CSV file + csv_corpus.append( + snt_id + + "," + + wav + + "," + + start + + "," + + duration + + "," + + text + + "," + + spk_id + ) + + return csv_corpus + + +def process_line(line, data_folder, data_placeholder): + """Process a line of Libryheavy csv list. + + Arguments + --------- + line : str + A line of the Libriheavy csv list. + data_folder : str + Path to the Libri-Light dataset. + data_placeholder : str + This variable is used to replace the audio path by the data_placeholder + in the csv file. + + Returns + ------- + LibriheavyRow + A dataclass containing the information about the line. + """ + + if len(line.split(",")) != 6: + return None + + snt_id, wav, start, duration, text, spk_id = line.split(",") + + start = float(start) + duration = float(duration) + + # Remove the large / small denomination as already given by user. + wav = os.path.join(*wav.split("/")[1:]) + + # Unicode Normalization + words = unicode_normalisation(text) + + # !! Language specific cleaning !! + words = english_specific_preprocess(words) + + if words is None or len(words) < LOWER_WORDS_THRESHOLD: + return None + + audio_path = os.path.join(data_folder, wav) + ".flac" + + # Reading the signal (to retrieve duration in seconds) + if not os.path.isfile(audio_path): + msg = "\tError loading: %s" % (str(audio_path)) + logger.info(msg) + return None + + audio_path = audio_path.replace(data_placeholder, data_folder) + + if duration < LOWER_DURATION_THRESHOLD_IN_S: + return None + elif duration > UPPER_DURATION_THRESHOLD_IN_S: + return None + + # Composition of the csv_line + return LibriheavyRow(snt_id, duration, start, audio_path, spk_id, words) + + +def create_csv( + filtered_csv_corpus, + csv_file, + data_folder, + data_placeholder, +): + """ + Creates the csv file given a list of wav files. + + Arguments + --------- + filtered_csv_corpus : list + Pre filtered list containing each sample. Obtained with functions + extract_transcripts() + csv_file : str + New csv file. + data_folder : str + Path of the Libri-Light dataset. + data_placeholder : str + This variable is used to replace the audio path by the data_placeholder + in the csv file. + + """ + + # We load and skip the header + csv_lines = filtered_csv_corpus + csv_data_lines = csv_lines[1:] + nb_samples = len(csv_data_lines) + + msg = "Preparing CSV files for %s samples ..." % (str(nb_samples)) + logger.info(msg) + + # Adding some Prints + msg = "Creating csv lists in %s ..." % (csv_file) + logger.info(msg) + + # Process and write lines + total_duration = 0.0 + + line_processor = functools.partial( + process_line, + data_folder=data_folder, + data_placeholder=data_placeholder, + ) + + # Stream into a .tmp file, and rename it to the real path at the end. + csv_file_tmp = csv_file + ".tmp" + + with open(csv_file_tmp, mode="w", newline="", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + csv_writer.writerow( + ["ID", "duration", "start", "wav", "spk_id", "text"] + ) + + for row in parallel_map(line_processor, csv_data_lines): + if row is None: + continue + + total_duration += row.duration + csv_writer.writerow( + [ + row.ID, + str(row.duration), + str(row.start), + row.wav, + row.spk_id, + row.text, + ] + ) + + os.replace(csv_file_tmp, csv_file) + + # Final prints + msg = "%s successfully created!" % (csv_file) + logger.info(msg) + msg = "Number of samples: %s " % (str(len(csv_data_lines))) + logger.info(msg) + msg = "Total duration: %s Hours" % (str(round(total_duration / 3600, 2))) + logger.info(msg) + + +def unicode_normalisation(text): + return str(text) + + +def english_specific_preprocess(sentence): + """ + Preprocess English text from the CommonVoice dataset into space-separated + words. + This removes various punctuation and treats it as word boundaries. + It normalises and retains various apostrophes (’‘´) between letters, but not + other ones, which are probably quotation marks. + It capitalises all text. + It removes non-English characters and those with accents, on the basis that + each of them indicates text that is likely to be pronounced differently by + different native speakers of English. + Sometimes this is because the word with the accent is foreign, but often the + other words in the Sentence are also hard to pronounce. + An extreme example is ö,ß, and ü, which are often whole German sentences. + CommonVoice regularly has data added to it. + This function may error out if new characters show up in the training, dev, + or test sets. + If this happens, add the case to test_common_voice_prepare.py, and fix this + function in a backward-compatible manner. + """ + + # These characters mean we should discard the sentence, because the + # pronunciation will be too uncertain. + stop_characters = ( + "[" + "áÁàăâåäÄãÃāảạæćčČçÇðéÉèÈêěëęēəğíîÎïīịıłṃńňñóÓòôőõøØōŌœŒřšŠşșȘúÚûūụýžþ" + # Suggests the sentence is not English but German. + "öÖßüÜ" + # All sorts of languages: Greek, Arabic... + "\u0370-\u1aaf" + # Chinese/Japanese/Korean. + "\u4e00-\u9fff" + # Technical symbols. + "\u2190-\u23ff" + # Symbols that could be pronounced in various ways. + "\\[\\]€→=~%§_#" + "]" + ) + + # These characters mark word boundaries. + split_character_regex = '[ ",:;!?¡\\.…()\\-—–‑_“”„/«»]' + + # These could all be used as apostrophes in the middle of words. + # If at the start or end of a word, they will be removed. + apostrophes_or_quotes = "['`´ʻ‘’]" + + sentence_level_mapping = {"&": " and ", "+": " plus ", "fl": "fl"} + + final_characters = set(" ABCDEFGHIJKLMNOPQRSTUVWXYZ'") + + if re.search(stop_characters, sentence) is not None: + return None + + sentence_mapped = sentence + if any((source in sentence) for source in sentence_level_mapping): + for source, target in sentence_level_mapping.items(): + sentence_mapped = sentence_mapped.replace(source, target) + + # Some punctuation that indicates a word boundary. + words_split = re.split(split_character_regex, sentence_mapped) + words_quotes = [ + # Use ' as apostrophe. + # Remove apostrophes at the start and end of words (probably quotes). + # Word-internal apostrophes, even where rotated, are retained. + re.sub(apostrophes_or_quotes, "'", word).strip("'") + for word in words_split + ] + + # Processing that does not change the length. + words_upper = [word.upper() for word in words_quotes] + + words_mapped = [ + # word.translate(character_mapping) + word + for word in words_upper + # Previous processing may have reduced words to nothing. + # Remove them. + if word != "" + ] + + result = " ".join(words_mapped) + character_set = set(result) + assert character_set <= final_characters, ( + "Unprocessed characters", + sentence, + result, + character_set - final_characters, + ) + return result diff --git a/recipes/Loquacious/ASR/transformer/README.md b/recipes/Loquacious/ASR/transformer/README.md new file mode 100644 index 0000000000..29a52856ec --- /dev/null +++ b/recipes/Loquacious/ASR/transformer/README.md @@ -0,0 +1,114 @@ +# Loquacious Set ASR with CTC + Attention Transformers + +This repository provides the scripts needed to run an Automatic Speech Recognition (ASR) experiment using the [Loquacious Set](https://huggingface.co/datasets/speechbrain/LoquaciousSet). The experiment leverages a combination of Connectionist Temporal Classification (CTC) and attention-based transformer models. + +## Table of Contents + +- [How to Run](#how-to-run) +- [Results](#results) +- [Loquacious Set](#loquacious-set) + - [Description](#description) + - [Downloading the Data](#downloading-the-data) + - [Obtaining the CSV Files for Tokenization](#obtaining-the-csv-files-for-tokenization) +- [About SpeechBrain](#about-speechbrain) +- [Citing SpeechBrain](#citing-speechbrain) + +## How to Run + +Execute the training script using the command below. Be sure to replace the placeholder values (e.g., `[number_of_gpus]`, `{hparam_file}`, and file paths) with your specific configuration: + +```bash +torchrun --nproc_per_node=[number_of_gpus] train.py hparams/{hparam_file}.py \ + --hf_caching_dir=/path/to/hf/cache/dir \ + --tls_subset=[large|medium|small] \ + --train_csv=/path/to/downloaded/train.csv +``` + +**Note:** +The `hf_caching_dir` typically corresponds to the `$HF_HUB_CACHE` environment variable. If this variable isn’t set, locate your HuggingFace cache directory (commonly in the `.cache` folder) and provide its path. + +## Results + +Below is a summary of experimental results: + +| Hyperparameters File | # Parameters | Split | Validation WER | Test WER | GPUs | HuggingFace Link | +| ----------------------- | ------------ | ----------------- | -------------- | -------- | ------------- | ---------------------------------------------------------------------------- | +| `conformer_base.yaml` | 100M | Small (250h) | 22.3 | 22.7 | 4xV100 32GB | N/A | +| `conformer_large.yaml` | 250M | Medium (2,500h) | 10.7 | 11.9 | 4xV100 32GB | N/A | +| `conformer_large.yaml` | 250M | Large (25,000h) | 7.9 | 8.8 | 8xV100 32GB | N/A | +| `conformer_xlarge.yaml` | 480M | Large (25,000h) | 6.8 | 7.5 | 8xV100 32GB | [Model](https://huggingface.co/speechbrain/asr-conformer-loquacious) | + +## Loquacious Set + +### Description + +The Loquacious set is a curated blend of five permissively licensed datasets. The table below summarizes its composition: + +| Dataset | Amount Taken (large/medium/small/dev/test) | License | +| ---------------------------- | ------------------------------------------ | --------- | +| VoxPopuli | 550 / 500 / 50 / 5 / 7 | CC0 | +| LibriHeavy | 11,000 / 500 / 50 / 0 / 0 | CC BY 4.0 | +| Librispeech (dev-/test-other) | 0 / 0 / 0 / 5 / 7 | CC BY 4.0 | +| YODAS | 6,100 / 500 / 50 / 1.5 / 1.5 | CC BY 3.0 | +| People's Speech | 5,900 / 500 / 50 / 1.5 / 1.5 | CC-BY 4.0 | +| CommonVoice 18.0 | 1,660 / 500 / 50 / 5 / 7 | CC0 | + +For the **development** and **test** splits, only data from the respective `dev` and `test` sets of each dataset are used (i.e., no data is extracted from the training set, except for YODAS). For YODAS, data is drawn from the `en003` split and manually verified for audio and transcription quality to form the `dev`/`test` partitions. + +### Downloading the Data + +**Note:** Downloading and extracting the large subset of the Loquacious dataset requires approximately **4 TB** of storage. + +As with other SpeechBrain projects, this recipe does not include a data download script. Please refer to the [HuggingFace webpage](https://huggingface.co/datasets/speechbrain/LoquaciousSet) for instructions on downloading the dataset using the `datasets` library or `huggingface-cli`. + +### Obtaining the CSV Files for Tokenization + +The tokenizer requires CSV files (available [in the repository](https://huggingface.co/datasets/speechbrain/LoquaciousSet/tree/main)) for training. You have two options: + +1. **Clone the entire repository:** + + ```bash + git clone https://huggingface.co/datasets/speechbrain/LoquaciousSet + ``` + +2. **Download specific CSV files:** + + ```bash + wget https://huggingface.co/datasets/speechbrain/LoquaciousSet/resolve/main/loquacious_large_train.csv?download=true + wget https://huggingface.co/datasets/speechbrain/LoquaciousSet/resolve/main/loquacious_medium_train.csv?download=true + wget https://huggingface.co/datasets/speechbrain/LoquaciousSet/resolve/main/loquacious_small_train.csv?download=true + ``` + +Alternatively, download only the CSV files using `huggingface-cli`: + +```bash +huggingface-cli download speechbrain/LoquaciousSet --include="*.csv" --repo-type dataset +``` + +## About SpeechBrain + +- Website: [https://speechbrain.github.io/](https://speechbrain.github.io/) +- Code: [https://github.com/speechbrain/speechbrain/](https://github.com/speechbrain/speechbrain/) +- HuggingFace: [https://huggingface.co/speechbrain/](https://huggingface.co/speechbrain/) + +## Citing SpeechBrain + +If you use SpeechBrain for your research or business, please cite it: + +```bibtex +@inproceedings{Loquacious, + title = {Loquacious Set: 25,000 Hours of Transcribed and Diverse English Speech Recognition Data for Research and Commercial Use}, + author = {Titouan Parcollet and Yuan Tseng and Shucong Zhang and Rogier van Dalen}, + year = {2025}, + booktitle = {Interspeech 2025}, +} +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +``` \ No newline at end of file diff --git a/recipes/Loquacious/ASR/transformer/extra_requirements.txt b/recipes/Loquacious/ASR/transformer/extra_requirements.txt new file mode 100644 index 0000000000..dce87c6dd6 --- /dev/null +++ b/recipes/Loquacious/ASR/transformer/extra_requirements.txt @@ -0,0 +1 @@ +datasets >= 3.1 diff --git a/recipes/Loquacious/ASR/transformer/hparams/conformer_base.yaml b/recipes/Loquacious/ASR/transformer/hparams/conformer_base.yaml new file mode 100644 index 0000000000..fe0320c392 --- /dev/null +++ b/recipes/Loquacious/ASR/transformer/hparams/conformer_base.yaml @@ -0,0 +1,265 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: Transformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Authors: Titouan Parcollet and Jianyuan Zhong +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_en_med/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +# Whether to use HuggingFace or SpeechBrain data loader. This depends on how your LoquaciousSet has been prepared. +tls_subset: !PLACEHOLDER +hf_hub: speechbrain/LoquaciousSet # path to the dataset +hf_caching_dir: !PLACEHOLDER # path to where the dataset will be extracted by HF. + +# Necessary for the BPE tokenization +train_csv: !PLACEHOLDER + +ckpt_interval_minutes: 15 # save checkpoint every N min + +####################### Training Parameters #################################### +number_of_epochs: 100 +optimizer_step_limit: 500000 +ctc_weight: 0.3 +grad_accumulation_factor: 1 +loss_reduction: 'batchmean' +sorting: random +num_workers: 6 +precision: fp16 # bf16, fp16 or fp32 + +# stages related parameters +lr_adam: 0.0008 +weight_decay: 0.003 +warmup_steps: 40000 +augment_warmup: 30000 + +# BPE parameters +token_type: bpe # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for A100 80GB GPU, adapts it to your needs. +max_batch_length_train: 300 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +train_dataloader_opts: + num_workers: !ref + +test_dataloader_opts: + batch_size: 2 + +####################### Model Parameters ########################### +# Transformer +d_model: 512 +nhead: 8 +num_encoder_layers: 12 +num_decoder_layers: 6 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5120 + +# Outputs +blank_index: 3 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 1 # We do greedy here so it's faster to decode ... +test_beam_size: 80 +ctc_weight_decode: 0.3 +scorer_beam_scale: 0.3 + +############################## models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + conformer_activation: !ref + activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# We define two optimizers as we have two stages (training + finetuning) +Adam: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + scorer_beam_scale: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.WarmAndExpDecayLRSchedule + lr: !ref + n_warmup_steps: !ref + total_steps: !ref + decay_factor: 0.05 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 1 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 1 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/Loquacious/ASR/transformer/hparams/conformer_large.yaml b/recipes/Loquacious/ASR/transformer/hparams/conformer_large.yaml new file mode 100644 index 0000000000..4eb9f34b0a --- /dev/null +++ b/recipes/Loquacious/ASR/transformer/hparams/conformer_large.yaml @@ -0,0 +1,265 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: Transformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Authors: Titouan Parcollet and Jianyuan Zhong +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_en/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +# Whether to use HuggingFace or SpeechBrain data loader. This depends on how your LoquaciousSet has been prepared. +tls_subset: !PLACEHOLDER +hf_hub: speechbrain/LoquaciousSet # path to the dataset +hf_caching_dir: !PLACEHOLDER # path to where the dataset will be extracted by HF. + +# Necessary for the BPE tokenization +train_csv: !PLACEHOLDER + +ckpt_interval_minutes: 15 # save checkpoint every N min + +####################### Training Parameters #################################### +number_of_epochs: 100 +optimizer_step_limit: 500000 +ctc_weight: 0.3 +grad_accumulation_factor: 1 +loss_reduction: 'batchmean' +sorting: random +num_workers: 6 +precision: fp16 # bf16, fp16 or fp32 + +# stages related parameters +lr_adam: 0.0008 +weight_decay: 0.003 +warmup_steps: 40000 +augment_warmup: 30000 + +# BPE parameters +token_type: bpe # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for A100 80GB GPU, adapts it to your needs. +max_batch_length_train: 300 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +train_dataloader_opts: + num_workers: !ref + +test_dataloader_opts: + batch_size: 2 + +####################### Model Parameters ########################### +# Transformer +d_model: 768 +nhead: 12 +num_encoder_layers: 14 +num_decoder_layers: 6 +d_ffn: 3072 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5120 + +# Outputs +blank_index: 3 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 1 # We do greedy here so it's faster to decode ... +test_beam_size: 80 +ctc_weight_decode: 0.3 +scorer_beam_scale: 0.3 + +############################## models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + conformer_activation: !ref + activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# We define two optimizers as we have two stages (training + finetuning) +Adam: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + scorer_beam_scale: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.WarmAndExpDecayLRSchedule + lr: !ref + n_warmup_steps: !ref + total_steps: !ref + decay_factor: 0.05 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 1 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 1 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/Loquacious/ASR/transformer/hparams/conformer_small.yaml b/recipes/Loquacious/ASR/transformer/hparams/conformer_small.yaml new file mode 100644 index 0000000000..ac59dc0088 --- /dev/null +++ b/recipes/Loquacious/ASR/transformer/hparams/conformer_small.yaml @@ -0,0 +1,285 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: Transformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Authors: Titouan Parcollet and Jianyuan Zhong +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_en/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +# Whether to use HuggingFace or SpeechBrain data loader. This depends on how your The Loquacious Set has been prepared. +use_huggingface: False +tls_subset: large +hf_hub: speechbrain/LoquaciousSet +hf_caching_dir: !PLACEHOLDER # path to where the dataset will be extracted by HF. +train_csv: !PLACEHOLDER + +# We remove utterance slonger than 10s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 100.0 + +# THIS IS TERRIBLE BUT WE HAVE NO CHOICE. +# Some version of the CV dataset may contain one or two files of more than +# 2 min in the validation and or test. This is an error by design of the dataset +# as these files contain 90% of silence. We exclude them. +avoid_if_longer_than_val_test: 100.0 + +ckpt_interval_minutes: 15 # save checkpoint every N min + +####################### Training Parameters #################################### +number_of_epochs: 10 +optimizer_step_limit: 500000 +batch_size: 32 # This works with a 32GB GPU ! (bs * nb_gpu * accum) > 128 ! +ctc_weight: 0.3 +grad_accumulation_factor: 1 +loss_reduction: 'batchmean' +sorting: random +num_workers: 6 +precision: fp16 # bf16, fp16 or fp32 + +# stages related parameters +lr_adam: 0.0008 +weight_decay: 0.003 +warmup_steps: 40000 +augment_warmup: 500000 + +# BPE parameters +token_type: bpe # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for A100 80GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 300 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 8 + +test_dataloader_opts: + batch_size: 8 + + +####################### Model Parameters ########################### +# Transformer +d_model: 256 +nhead: 4 +num_encoder_layers: 12 +num_decoder_layers: 6 +d_ffn: 1024 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 1024 + +# Outputs +blank_index: 3 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 1 # We do greedy here so it's faster to decode ... +test_beam_size: 80 +ctc_weight_decode: 0.3 +scorer_beam_scale: 0.3 + +############################## models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + conformer_activation: !ref + activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# We define two optimizers as we have two stages (training + finetuning) +Adam: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + scorer_beam_scale: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + # scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.WarmAndExpDecayLRSchedule + lr: !ref + n_warmup_steps: !ref + total_steps: !ref + decay_factor: 0.05 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 3 + drop_count_high: 3 + replace: "zeros" + dim: 1 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +# fea_augment: !new:speechbrain.augment.augmenter.Augmenter +# min_augmentations: 3 +# max_augmentations: 3 +# augment_prob: 1.0 +# augmentations: [ +# !ref , +# !ref , +# !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/Loquacious/ASR/transformer/hparams/conformer_xlarge.yaml b/recipes/Loquacious/ASR/transformer/hparams/conformer_xlarge.yaml new file mode 100644 index 0000000000..e402f8fb2e --- /dev/null +++ b/recipes/Loquacious/ASR/transformer/hparams/conformer_xlarge.yaml @@ -0,0 +1,268 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: Transformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Authors: Titouan Parcollet and Jianyuan Zhong +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_en/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +# Whether to use HuggingFace or SpeechBrain data loader. This depends on how your LoquaciousSet has been prepared. +tls_subset: !PLACEHOLDER +hf_hub: speechbrain/LoquaciousSet # path to the dataset +hf_caching_dir: !PLACEHOLDER # path to where the dataset will be extracted by HF. + +# Necessary for the BPE tokenization +train_csv: !PLACEHOLDER + + +ckpt_interval_minutes: 15 # save checkpoint every N min + +####################### Training Parameters #################################### +number_of_epochs: 100 +optimizer_step_limit: 500000 +ctc_weight: 0.3 +grad_accumulation_factor: 1 +loss_reduction: 'batchmean' +sorting: random +num_workers: 6 +precision: fp16 # bf16, fp16 or fp32 + +# stages related parameters +lr_adam: 0.0007 +weight_decay: 0.001 +warmup_steps: 60000 +augment_warmup: 80000 + +# BPE parameters +token_type: bpe # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# This setup works well for A100 80GB GPU, adapts it to your needs. +max_batch_length_train: 300 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +train_dataloader_opts: + num_workers: !ref + +test_dataloader_opts: + batch_size: 2 + + +####################### Model Parameters ########################### +# Transformer +d_model: 1024 +nhead: 16 +num_encoder_layers: 18 +num_decoder_layers: 6 +d_ffn: 3072 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5120 + +# Outputs +blank_index: 3 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 1 # We do greedy here so it's faster to decode ... +test_beam_size: 80 +ctc_weight_decode: 0.3 +scorer_beam_scale: 0.3 + +############################## models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + conformer_activation: !ref + activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +# We define two optimizers as we have two stages (training + finetuning) +Adam: !name:torch.optim.AdamW + lr: !ref + weight_decay: !ref + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + scorer_beam_scale: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + # scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.WarmAndExpDecayLRSchedule + lr: !ref + n_warmup_steps: !ref + total_steps: !ref + decay_factor: 0.05 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 1 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 1 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/Loquacious/ASR/transformer/loquacious_set_prepare.py b/recipes/Loquacious/ASR/transformer/loquacious_set_prepare.py new file mode 120000 index 0000000000..e6bede8a1f --- /dev/null +++ b/recipes/Loquacious/ASR/transformer/loquacious_set_prepare.py @@ -0,0 +1 @@ +../../loquacious_set_prepare.py \ No newline at end of file diff --git a/recipes/Loquacious/ASR/transformer/train.py b/recipes/Loquacious/ASR/transformer/train.py new file mode 100644 index 0000000000..2a72195d80 --- /dev/null +++ b/recipes/Loquacious/ASR/transformer/train.py @@ -0,0 +1,416 @@ +#!/usr/bin/env python3 +"""Recipe for training a Transformer ASR system with The Loquacious Set. + +Authors +------- + * Titouan Parcollet 2024 +""" + +import os +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio.dataio import read_audio +from speechbrain.dataio.sampler import DynamicBatchSampler # noqa +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_bos, _ = batch.tokens_bos + # Add waveform augmentation if specified. + if ( + stage == sb.Stage.TRAIN + and hasattr(self.hparams, "wav_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + + # compute features + feats = self.hparams.compute_features(wavs) + current_epoch = self.hparams.epoch_counter.current + feats = self.hparams.normalize(feats, wav_lens, epoch=current_epoch) + + # Add feature augmentation if specified. + if ( + stage == sb.Stage.TRAIN + and hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels(tokens_bos) + + # forward modules + src = self.modules.CNN(feats) + enc_out, pred = self.modules.Transformer( + src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index + ) + + # output layer for ctc log-probabilities + logits = self.modules.ctc_lin(enc_out) + p_ctc = self.hparams.log_softmax(logits) + + # output layer for seq2seq log-probabilities + pred = self.modules.seq_lin(pred) + p_seq = self.hparams.log_softmax(pred) + + # Compute outputs + hyps = None + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if is_valid_search: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + + elif is_test_search: + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + + return p_ctc, p_seq, wav_lens, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + (p_ctc, p_seq, wav_lens, predicted_tokens) = predictions + + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + tokens, tokens_lens = batch.tokens + + # Augment Labels + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if ( + hasattr(self.hparams, "wav_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + if ( + hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup + ): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + + loss_seq = self.hparams.seq_cost( + p_seq, tokens_eos, length=tokens_eos_lens + ) + loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) + loss = ( + self.hparams.ctc_weight * loss_ctc + + (1 - self.hparams.ctc_weight) * loss_seq + ) + + if stage != sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if current_epoch % valid_search_interval == 0 or ( + stage == sb.Stage.TEST + ): + # Decode token terms to words + predicted_words = self.tokenizer( + predicted_tokens, task="decode_from_list" + ) + + # Convert indices to words + target_words = undo_padding(tokens, tokens_lens) + target_words = self.tokenizer( + target_words, task="decode_from_list" + ) + if not isinstance(ids, list): + ids = ids.tolist() + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + return loss + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["ACC"] = self.acc_metric.summarize() + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + or stage == sb.Stage.TEST + ): + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + # report different epoch stages according current stage + current_epoch = self.hparams.epoch_counter.current + lr = self.hparams.noam_annealing.current_lr + steps = self.hparams.noam_annealing.current_step + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"ACC": stage_stats["ACC"], "epoch": epoch}, + max_keys=["ACC"], + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + +# Define custom data procedure +def dataio_prepare_hf(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + This is valid for The Loquacious Set prepared with HuggingFace + """ + from loquacious_set_prepare import load_datasets + + hf_data_dict = load_datasets( + hparams["tls_subset"], + hparams["hf_hub"], + hparams["hf_caching_dir"], + ) + + # We must rename the 'id' column because SpeechBrain sampling use this + # name for the sampler already, also it's not an id, but an audio_path. + train_data = hf_data_dict["train"].rename_column("ID", "audio_id") + valid_data = hf_data_dict["dev"].rename_column("ID", "audio_id") + test_data = hf_data_dict["test"].rename_column("ID", "audio_id") + + # We need to get the full list of durations of all samples to enable + # bucketing from the dynamic batch sampler. We do it that way instead + # of the usual iterable because the HF dataset ALWAYS open the file + # when called, which means that the dynamic sampling needs to read the + # 1.5M audio samples from disk.... using a list instead is much master. + train_len_list = list(train_data.select_columns("duration")["duration"]) + val_len_list = list(valid_data.select_columns("duration")["duration"]) + + train_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset( + train_data, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset( + valid_data, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset( + test_data, + ) + + # we sort testing/val data to speed up decoding and get better results. + valid_data = valid_data.filtered_sorted( + sort_key="duration", + ) + test_data = test_data.filtered_sorted( + sort_key="duration", + ) + + datasets = [train_data, valid_data, test_data] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = read_audio(wav["bytes"]) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("text") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + yield wrd + tokens_list = tokenizer.sp.encode_as_ids(wrd) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + ) + + # 5. we instantiate the needed samplers with dynamic batching + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + lengths_list=train_len_list, + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + lengths_list=val_len_list, + **dynamic_hparams_valid, + ) + + train_loader_kwargs = { + "batch_sampler": train_batch_sampler, + "num_workers": hparams["num_workers"], + } + valid_loader_kwargs = {"batch_sampler": valid_batch_sampler} + + return ( + train_data, + valid_data, + test_data, + train_loader_kwargs, + valid_loader_kwargs, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Defining tokenizer and loading it + tokenizer = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["output_neurons"], + annotation_train=hparams["train_csv"], + annotation_read="text", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_data, + train_loader_kwargs, + valid_loader_kwargs, + ) = dataio_prepare_hf(hparams, tokenizer) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["Adam"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # adding objects to trainer: + asr_brain.tokenizer = tokenizer + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_loader_kwargs, + valid_loader_kwargs=valid_loader_kwargs, + ) + + # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "wer_valid.txt" + ) + asr_brain.evaluate( + valid_data, + max_key="ACC", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) + + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "wer_test.txt" + ) + asr_brain.evaluate( + test_data, + max_key="ACC", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/Loquacious/loquacious_set_prepare.py b/recipes/Loquacious/loquacious_set_prepare.py new file mode 100644 index 0000000000..a356b5ffa2 --- /dev/null +++ b/recipes/Loquacious/loquacious_set_prepare.py @@ -0,0 +1,57 @@ +"""Simple utilities to load the mysterious Loquacious dataset from HuggingFace. +This does not actually prepare the Loquacious dataset. For this, please refer to the dataset_preparation folder. +This only load the prepared dataset to be used in a SpeechBrain recipe. + +Authors +------- + * Titouan Parcollet, 2024 +""" + +import os + +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import get_available_cpu_count + +logger = get_logger(__name__) + + +def load_datasets(subset, hf_download_folder, hf_caching_dir): + """Load and create the HuggingFace dataset for the Loquacious. It must + have been downloaded manually into hf_download_folder first. This function + operates in an "offline" mode and will not try to download the dataset. + + Parameters + ---------- + subset: str + Name of the subset of interest: one of [large, medium, small, clean] + hf_download_folder : str + The path where HF stored the dataset. + hf_caching_dir : str + The path where HF will extract (or not if already done) the dataset. + + Returns + ------- + Dictionary of HuggingFace dataset. ["train", "dev", "test"] + """ + + try: + import datasets + from datasets import load_dataset + except ImportError as error: + raise ImportError(error) + + # Managing the download dir as HF can be capricious with this. + logger.info("Loading dataset from: " + str(hf_download_folder)) + + nproc = get_available_cpu_count() + os.environ["HF_DATASETS_OFFLINE"] = "1" + datasets.disable_progress_bars() + hf_data = load_dataset( + hf_download_folder, + name=subset, + num_proc=nproc, + cache_dir=hf_caching_dir, + ) + os.environ["HF_DATASETS_OFFLINE"] = "0" + + return hf_data diff --git a/recipes/MEDIA/ASR/CTC/README.md b/recipes/MEDIA/ASR/CTC/README.md new file mode 100644 index 0000000000..e442039671 --- /dev/null +++ b/recipes/MEDIA/ASR/CTC/README.md @@ -0,0 +1,49 @@ +# Media ASR with CTC + Wav2Vec 2.0. +This folder contains scripts necessary to run an ASR experiment with the Media French dataset: [Media ASR (ELRA-S0272)](https://catalogue.elra.info/en-us/repository/browse/ELRA-S0272/), [Media SLU (ELRA-E0024)](https://catalogue.elra.info/en-us/repository/browse/ELRA-E0024/) both needed for the task. Please also download the 2 csv files given [here](https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0) and place them in the `../../MEDIA` directory. + +This recipe has been implemented following the paper of G. Laperrière, V. Pelloin, A. Caubriere, S. Mdhaffar, N. Camelin, S. Ghannay, B. Jabaian, Y. Estève, [The Spoken Language Understanding MEDIA Benchmark Dataset in the Era of Deep Learning: data updates, training and evaluation tools](https://aclanthology.org/2022.lrec-1.171). + +# How to run +Do not forget to process the dataset and change the `!PLACEHOLDER` in the yaml file. + +```bash +python train_hf_wav2vec.py hparams/train_hf_wav2vec.yaml +``` + +# Data preparation +It is important to note that Media initially offers audio files at 8kHz. Hence, audio files are upsampled on the fly within the preparation script to 16kHz. + +# Results + +| Media Release | hyperparams | Test ChER | Wav2Vec | Training time | HuggingFace link | Model link | +|:------:|:------:|:------:|:------:|:------:|:------:|:------:| +| 2008-03-27 | train_with_wav2vec.yaml | 4.78 | [LeBenchmark wav2vec2-FR-3K-large](https://huggingface.co/LeBenchmark/wav2vec2-FR-3K-large) | 12m30s per epoch | [here](https://huggingface.co/speechbrain/asr-wav2vec2-ctc-MEDIA) | Not Avail. | + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/MEDIA/ASR/CTC/hparams/train_hf_wav2vec.yaml b/recipes/MEDIA/ASR/CTC/hparams/train_hf_wav2vec.yaml new file mode 100644 index 0000000000..f682c95de0 --- /dev/null +++ b/recipes/MEDIA/ASR/CTC/hparams/train_hf_wav2vec.yaml @@ -0,0 +1,163 @@ +# ################################ +# Model: Wav2Vec + DNN + CTC + Softmax +# Authors: +# Gaëlle Laperrière 2023 +# ################################ + +# ------ Paths and parameters + +# Seed needs to be set at top of yaml, before objects with parameters are made. +seed: 4242 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/media_ASR_wav2vec/ +cer_file_test: !ref /cer_test.txt +ctc_file_test: !ref /ctc_test.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +data_folder: !PLACEHOLDER # Path of folders S0272 and E0024, to process ELRA original xml datasets. +channels_path: !PLACEHOLDER # Path of the channels.csv file downloaded via https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 +concepts_path: !PLACEHOLDER # Path of the concepts_full_relax.csv file downloaded via https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 +skip_wav: False # Skip the wav files storing if already done before. +method: Null # Remove or keep specifiers in concepts. Method used by default. +task: asr # Parse SLU or ASR data. +skip_prep: False # Skip data preparation to csv because already done. +process_test2: False # Process the test2 corpus + +# See https://github.com/pytorch/fairseq/blob/main/examples/wav2vec/README.md +# for Wav2Vec models and https://huggingface.co/LeBenchmark for French ones. +wav2vec_url: LeBenchmark/wav2vec2-FR-3K-large + +# Data files: +csv_train: !ref /csv/train.csv +csv_valid: !ref /csv/dev.csv +csv_test: !ref /csv/test.csv # If the test2 was processed, you can change the file to test2.csv + +# Data parameters: +# With data_parallel batch_size is split into N jobs. +# With DDP batch_size is multiplied by N jobs. +batch_size: 4 +test_batch_size: 2 +# We remove utterances longer than 90s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 90.0 +avoid_if_smaller_than: 0.0 +num_workers: 3 +dataloader_options: + batch_size: !ref + num_workers: !ref + shuffle: True +test_dataloader_options: + batch_size: !ref + num_workers: !ref + +# Feature parameters: +sample_rate: 16000 +feats_dim: 1024 + +####################### Training Parameters ####################################: +number_of_epochs: 30 +lr: 1 +lr_wav2vec: 0.0001 +annealing_factor: 0.8 +annealing_factor_wav2vec: 0.9 +improvement_threshold: 0.0025 +improvement_threshold_wav2vec: 0.0025 +patient: 0 +patient_wav2vec: 0 +sorting: ascending + +####################### Model Parameters ####################################### +activation: !name:torch.nn.LeakyReLU +dnn_blocks: 3 +dnn_neurons: 512 + +# Wav2Vec parameters: +freeze: False + +# Decoding parameters: +blank_index: 0 + +# Outputs: +output_neurons: 67 + +# ------ Functions and classes + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref /wav2vec2_checkpoint + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, !ref ] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +output_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: True + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + output_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_wav2vec2: !new:torch.nn.ModuleList + - [!ref ] + +opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +opt_class_wav2vec: !name:torch.optim.Adam + lr: !ref + +lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: !ref + annealing_factor: !ref + patient: !ref + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: !ref + annealing_factor: !ref + patient: !ref + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + lr_annealing: !ref + lr_annealing_wav2vec: !ref + counter: !ref + tokenizer: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +ctc_computer: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: batch + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/MEDIA/ASR/CTC/media_prepare.py b/recipes/MEDIA/ASR/CTC/media_prepare.py new file mode 120000 index 0000000000..30d4994a1c --- /dev/null +++ b/recipes/MEDIA/ASR/CTC/media_prepare.py @@ -0,0 +1 @@ +../../media_prepare.py \ No newline at end of file diff --git a/recipes/MEDIA/ASR/CTC/train_hf_wav2vec.py b/recipes/MEDIA/ASR/CTC/train_hf_wav2vec.py new file mode 100644 index 0000000000..ce3966f7e3 --- /dev/null +++ b/recipes/MEDIA/ASR/CTC/train_hf_wav2vec.py @@ -0,0 +1,350 @@ +#!/usr/bin/env python3 + +""" +Recipe for training a CTC based ASR system with Media. +The system employs a wav2vec2 model and a decoder. + +To run this recipe, do the following: +> python train_with_wav2vec.py hparams/train_with_wav2vec.yaml + +With the default hyperparameters, the system employs a VanillaNN encoder. + +The neural network is trained on greedy CTC. + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, training tasks (Media , PortMedia), +and many other possible variations. + +Authors + * Gaëlle Laperrière 2023 +""" + +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml +from media_prepare import prepare_media + +import speechbrain as sb +from speechbrain.dataio.batch import PaddedBatch +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure. +class ASR(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from waveform to output probabilities.""" + + # Get data. + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + + # Forward pass. + feats = self.modules.wav2vec2(wavs, wav_lens) + + x = self.modules.enc(feats) + + # Output layer for seq2seq log-probabilities. + logits = self.modules.output_lin(x) + p_ctc = self.hparams.log_softmax(logits) + + return p_ctc, wav_lens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC) given predictions and targets.""" + + # Get data. + batch = batch.to(self.device) + chars, char_lens = batch.char_encoded + ids = batch.id + + # Get predictions & loss. + p_ctc, wav_lens = predictions + loss = self.hparams.ctc_cost(p_ctc, chars, wav_lens, char_lens) + + # Get metrics. + if stage != sb.Stage.TRAIN: + # Generate sequences with CTC greedy decoder. + sequence = sb.decoders.ctc_greedy_decode( + p_ctc, wav_lens, self.hparams.blank_index + ) + # Update metrics. + self.cer_metric.append( + ids=ids, + predict=sequence, + target=chars, + target_len=char_lens, + ind2lab=self.tokenizer.decode_ndim, + ) + self.ctc_metric.append(ids, p_ctc, chars, wav_lens, char_lens) + + return loss + + def init_optimizers(self): + """Initializes the wav2vec2 optimizer and model optimizer""" + + # Join optimizers. + self.optimizer_wav2vec = self.hparams.opt_class_wav2vec( + self.hparams.model_wav2vec2.parameters() + ) + self.optimizer = self.hparams.opt_class(self.hparams.model.parameters()) + + # Add optimizers to checkpoint recoverables. + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "optimizer_wav2vec", self.optimizer_wav2vec + ) + self.checkpointer.add_recoverable("optimizer", self.optimizer) + + self.optimizers_dict = { + "wav2vec_optimizer": self.optimizer_wav2vec, + "model_optimizer": self.optimizer, + } + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + + # Re-initialize metrics. + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.ctc_metric = self.hparams.ctc_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + + # Save loss and metrics. + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"]) + old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + sb.nnet.schedulers.update_learning_rate( + self.optimizer_wav2vec, new_lr_wav2vec + ) + self.hparams.train_logger.log_stats( + stats_meta={ + "epoch": epoch, + "lr": old_lr, + "lr_wav2vec": old_lr_wav2vec, + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"CER": stage_stats["CER"]}, + min_keys=["CER"], + ) + + # Same plus write results in txt files. + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + with open(hparams["cer_file_test"], "w", encoding="utf-8") as w: + self.cer_metric.write_stats(w) + with open(hparams["ctc_file_test"], "w", encoding="utf-8") as w: + self.ctc_metric.write_stats(w) + + +# Define custom data procedure. +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # 1. Define datasets: + csv_folder = hparams["save_folder"] + "/csv" + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["csv_train"], + replacements={"data_root": csv_folder}, + ) + + # We sort training data to speed up training and get better results. + # When sorting do not shuffle in dataloader ! otherwise is pointless. + if hparams["sorting"] == "ascending": + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_smaller_than"]}, + ) + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_smaller_than"]}, + ) + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "random": + train_data = train_data.filtered_sorted( + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_smaller_than"]}, + ) + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + # We also sort the validation data so it is faster to validate. + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["csv_valid"], replacements={"data_root": csv_folder} + ) + valid_data = valid_data.filtered_sorted(sort_key="duration", reverse=True) + + # We also sort the test data so it is faster to validate. + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["csv_test"], replacements={"data_root": csv_folder} + ) + test_data = test_data.filtered_sorted(sort_key="duration", reverse=True) + + datasets = [train_data, valid_data, test_data] + + label_encoder = sb.dataio.encoder.CTCTextEncoder() + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav", "start", "stop") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav, start, stop): + start = int(float(start) * hparams["sample_rate"]) + stop = int(float(stop) * hparams["sample_rate"]) + speech_segment = {"file": wav, "start": start, "stop": stop} + sig = sb.dataio.dataio.read_audio(speech_segment) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("char") + @sb.utils.data_pipeline.provides("char_list", "char_encoded") + def text_pipeline(char): + char_list = char.strip().split() + yield char_list + char_encoded = label_encoder.encode_sequence_torch(char_list) + yield char_encoded + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Create a label encoder instead of a tokenizer for our tag list: + lab_enc_file = hparams["save_folder"] + "/labelencoder.txt" + label_encoder.load_or_create( + path=lab_enc_file, + from_didatasets=[train_data], + output_key="char_list", + special_labels={"blank_label": hparams["blank_index"]}, + sequence_input=True, + ) + + # 5. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "char_encoded"], + ) + + # 6. Make DataLoaders and shuffle if needed: + dataloader_train = torch.utils.data.DataLoader( + train_data, + batch_size=hparams["batch_size"], + num_workers=hparams["dataloader_options"]["num_workers"], + collate_fn=PaddedBatch, + shuffle=hparams["dataloader_options"]["shuffle"], + ) + dataloader_valid = torch.utils.data.DataLoader( + valid_data, + batch_size=hparams["batch_size"], + num_workers=hparams["dataloader_options"]["num_workers"], + collate_fn=PaddedBatch, + shuffle=hparams["dataloader_options"]["shuffle"], + ) + dataloader_test = torch.utils.data.DataLoader( + test_data, + batch_size=hparams["test_batch_size"], + num_workers=hparams["test_dataloader_options"]["num_workers"], + collate_fn=PaddedBatch, + ) + + return dataloader_train, dataloader_valid, dataloader_test, label_encoder + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides. + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol. + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory. + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Due to DDP, we do the preparation ONLY on the main python process + run_on_main( + prepare_media, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "channels_path": hparams["channels_path"], + "concepts_path": hparams["concepts_path"], + "skip_wav": hparams["skip_wav"], + "method": hparams["method"], + "task": hparams["task"], + "skip_prep": hparams["skip_prep"], + "process_test2": hparams["process_test2"], + }, + ) + + # Create the datasets objects as well as tokenization and encoding. + train_data, valid_data, test_data, label_encoder = dataio_prepare(hparams) + + # Trainer initialization. + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Adding objects to trainer. + asr_brain.tokenizer = label_encoder + asr_brain.tokenizer.add_unk() # handle unknown SLU labels + + # Check for stopped training. + asr_brain.checkpointer.recover_if_possible() + + # Train. + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + progressbar=True, + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["test_dataloader_options"], + ) + + # Test. + asr_brain.evaluate( + test_data, + min_key="CER", + progressbar=True, + test_loader_kwargs=hparams["test_dataloader_options"], + ) diff --git a/recipes/MEDIA/README.md b/recipes/MEDIA/README.md new file mode 100644 index 0000000000..ed4f858fe1 --- /dev/null +++ b/recipes/MEDIA/README.md @@ -0,0 +1,35 @@ +# Media data preparation. +The `media_prepare.py` script allows to prepare the Media French dataset for experiments. You need both [Media ASR (ELRA-S0272)](https://catalogue.elra.info/en-us/repository/browse/ELRA-S0272/) and [Media SLU (ELRA-E0024)](https://catalogue.elra.info/en-us/repository/browse/ELRA-E0024/) to run the script. Please also download the 2 csv files given [here](https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0) and place them in the `MEDIA` directory. + +The recipes have been implemented following the paper of G. Laperrière, V. Pelloin, A. Caubriere, S. Mdhaffar, N. Camelin, S. Ghannay, B. Jabaian, Y. Estève, [The Spoken Language Understanding MEDIA Benchmark Dataset in the Era of Deep Learning: data updates, training and evaluation tools](https://aclanthology.org/2022.lrec-1.171). + +**The results obtained with the different models can be found in the corresponding sub-directories!** + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/MEDIA/SLU/CTC/README.md b/recipes/MEDIA/SLU/CTC/README.md new file mode 100644 index 0000000000..435416627d --- /dev/null +++ b/recipes/MEDIA/SLU/CTC/README.md @@ -0,0 +1,52 @@ +# Media SLU with CTC + Wav2Vec 2.0. +This folder contains scripts necessary to run an SLU experiment with the Media French dataset: [Media ASR (ELRA-S0272)](https://catalogue.elra.info/en-us/repository/browse/ELRA-S0272/), [Media SLU (ELRA-E0024)](https://catalogue.elra.info/en-us/repository/browse/ELRA-E0024/) both needed for the task. Please also download the 2 csv files given [here](https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0) and place them in the `../../MEDIA` directory. + +This recipe has been implemented following the paper of G. Laperrière, V. Pelloin, A. Caubriere, S. Mdhaffar, N. Camelin, S. Ghannay, B. Jabaian, Y. Estève, [The Spoken Language Understanding MEDIA Benchmark Dataset in the Era of Deep Learning: data updates, training and evaluation tools](https://aclanthology.org/2022.lrec-1.171). + +# How to run +Do not forget to process the dataset and change the `!PLACEHOLDER` in the yaml file. + +```bash +python train_hf_wav2vec.py hparams/{hparam_file}.yaml +``` + +# Data preparation +It is important to note that Media initially offers audio files at 8kHz. Hence, audio files are upsampled on the fly within the preparation script to 16kHz. + +# Results + +| Media Release | hyperparams | Test ChER | Test CER | Test CVER | Wav2Vec | Training time | HuggingFace link | Model link | +|:------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:|:------:| +| 2008-03-27 | train_with_wav2vec_relax.yaml | 7.46 | 20.10 | 31.41 | [LeBenchmark wav2vec2-FR-3K-large](https://huggingface.co/LeBenchmark/wav2vec2-FR-3K-large) | 12m30s per epoch | [here](https://huggingface.co/speechbrain/slu-wav2vec2-ctc-MEDIA-relax) | Not Avail. | +| 2008-03-27 | train_with_wav2vec_full.yaml | 7.78 | 24.88 | 35.77 | [LeBenchmark wav2vec2-FR-3K-large](https://huggingface.co/LeBenchmark/wav2vec2-FR-3K-large) | 12m30s per epoch | [here](https://huggingface.co/speechbrain/slu-wav2vec2-ctc-MEDIA-full) | Not Avail. | + +The CVER is the one implemented in SpeechBrain for this recipe. It is strict (yield an error for a single false character), without the human rules added generally for MEDIA. Find more in the article linked above, as it corresponds to u-CVER. + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_full.yaml b/recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_full.yaml new file mode 100644 index 0000000000..1d1e696557 --- /dev/null +++ b/recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_full.yaml @@ -0,0 +1,177 @@ +# ################################ +# Model: Wav2Vec + DNN + CTC + Softmax +# Authors: +# Gaëlle Laperrière 2023 +# ################################ + +# ------ Paths and parameters + +# Seed needs to be set at top of yaml, before objects with parameters are made. +seed: 4242 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/media_SLU_wav2vec_full/ +cer_file_test: !ref /cer_test.txt +ctc_file_test: !ref /ctc_test.txt +coer_file_test: !ref /coer_test.txt +cver_file_test: !ref /cver_test.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +data_folder: !PLACEHOLDER # Path of folders S0272 and E0024, to process ELRA original xml datasets. +channels_path: !PLACEHOLDER # Path of the channels.csv file downloaded via https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 +concepts_path: !PLACEHOLDER # Path of the concepts_full_relax.csv file downloaded via https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 +skip_wav: False # Skip the wav files storing if already done before. +method: full # Remove or keep specifiers in concepts. Method used by default. +task: slu # Parse SLU or ASR data. +skip_prep: False # Skip data preparation to csv because already done. +process_test2: False # Process the test2 corpus + +# See https://github.com/pytorch/fairseq/blob/main/examples/wav2vec/README.md +# for Wav2Vec models and https://huggingface.co/LeBenchmark for French ones. +wav2vec_url: LeBenchmark/wav2vec2-FR-3K-large + +# Data files: +csv_train: !ref /csv/train.csv +csv_valid: !ref /csv/dev.csv +csv_test: !ref /csv/test.csv # If the test2 was processed, you can change the file to test2.csv + +# Data parameters: +# With data_parallel batch_size is split into N jobs. +# With DDP batch_size is multiplied by N jobs. +batch_size: 4 +test_batch_size: 2 +# We remove utterances longer than 90s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 90.0 +avoid_if_smaller_than: 0.0 +num_workers: 3 +dataloader_options: + batch_size: !ref + num_workers: !ref + shuffle: True +test_dataloader_options: + batch_size: !ref + num_workers: !ref + +# Feature parameters: +sample_rate: 16000 +feats_dim: 1024 + +####################### Training Parameters ####################################: +number_of_epochs: 30 +lr: 1 +lr_wav2vec: 0.0001 +annealing_factor: 0.8 +annealing_factor_wav2vec: 0.9 +improvement_threshold: 0.0025 +improvement_threshold_wav2vec: 0.0025 +patient: 0 +patient_wav2vec: 0 +sorting: ascending + +####################### Model Parameters ####################################### +activation: !name:torch.nn.LeakyReLU +dnn_blocks: 3 +dnn_neurons: 512 + +# Wav2Vec parameters: +freeze: False + +# Decoding parameters: +blank_index: 0 + +# Outputs: +output_neurons: 212 + +# ------ Functions and classes + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref /wav2vec2_checkpoint + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, !ref ] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +output_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: True + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + output_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_wav2vec2: !new:torch.nn.ModuleList + - [!ref ] + +opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +opt_class_wav2vec: !name:torch.optim.Adam + lr: !ref + +lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: !ref + annealing_factor: !ref + patient: !ref + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: !ref + annealing_factor: !ref + patient: !ref + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + lr_annealing: !ref + lr_annealing_wav2vec: !ref + counter: !ref + tokenizer: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +ctc_computer: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: batch + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +coer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + extract_concepts_values: True + keep_values: False + tag_in: '<' + tag_out: '>' + +cver_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + extract_concepts_values: True + keep_values: True + tag_in: '<' + tag_out: '>' diff --git a/recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_relax.yaml b/recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_relax.yaml new file mode 100644 index 0000000000..4dcc127dfc --- /dev/null +++ b/recipes/MEDIA/SLU/CTC/hparams/train_hf_wav2vec_relax.yaml @@ -0,0 +1,177 @@ +# ################################ +# Model: Wav2Vec + DNN + CTC + Softmax +# Authors: +# Gaëlle Laperrière 2023 +# ################################ + +# ------ Paths and parameters + +# Seed needs to be set at top of yaml, before objects with parameters are made. +seed: 4242 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/media_SLU_wav2vec_relax/ +cer_file_test: !ref /cer_test.txt +ctc_file_test: !ref /ctc_test.txt +coer_file_test: !ref /coer_test.txt +cver_file_test: !ref /cver_test.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +data_folder: !PLACEHOLDER # Path of folders S0272 and E0024, to process ELRA original xml datasets. +channels_path: !PLACEHOLDER # Path of the channels.csv file downloaded via https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 +concepts_path: !PLACEHOLDER # Path of the concepts_full_relax.csv file downloaded via https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 +skip_wav: False # Skip the wav files storing if already done before. +method: relax # Remove or keep specifiers in concepts. Method used by default. +task: slu # Parse SLU or ASR data. +skip_prep: False # Skip data preparation to csv because already done. +process_test2: False # Process the test2 corpus + +# See https://github.com/pytorch/fairseq/blob/main/examples/wav2vec/README.md +# for Wav2Vec models and https://huggingface.co/LeBenchmark for French ones. +wav2vec_url: LeBenchmark/wav2vec2-FR-3K-large + +# Data files: +csv_train: !ref /csv/train.csv +csv_valid: !ref /csv/dev.csv +csv_test: !ref /csv/test.csv # If the test2 was processed, you can change the file to test2.csv + +# Data parameters: +# With data_parallel batch_size is split into N jobs. +# With DDP batch_size is multiplied by N jobs. +batch_size: 4 +test_batch_size: 2 +# We remove utterances longer than 90s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 90.0 +avoid_if_smaller_than: 0.0 +num_workers: 3 +dataloader_options: + batch_size: !ref + num_workers: !ref + shuffle: True +test_dataloader_options: + batch_size: !ref + num_workers: !ref + +# Feature parameters: +sample_rate: 16000 +feats_dim: 1024 + +####################### Training Parameters ####################################: +number_of_epochs: 30 +lr: 1 +lr_wav2vec: 0.0001 +annealing_factor: 0.8 +annealing_factor_wav2vec: 0.9 +improvement_threshold: 0.0025 +improvement_threshold_wav2vec: 0.0025 +patient: 0 +patient_wav2vec: 0 +sorting: ascending + +####################### Model Parameters ####################################### +activation: !name:torch.nn.LeakyReLU +dnn_blocks: 3 +dnn_neurons: 512 + +# Wav2Vec parameters: +freeze: False + +# Decoding parameters: +blank_index: 0 + +# Outputs: +output_neurons: 141 + +# ------ Functions and classes + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref /wav2vec2_checkpoint + +enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN + input_shape: [null, null, !ref ] + activation: !ref + dnn_blocks: !ref + dnn_neurons: !ref + +output_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: True + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + output_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] + +model_wav2vec2: !new:torch.nn.ModuleList + - [!ref ] + +opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +opt_class_wav2vec: !name:torch.optim.Adam + lr: !ref + +lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: !ref + annealing_factor: !ref + patient: !ref + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: !ref + annealing_factor: !ref + patient: !ref + +label_encoder: !new:speechbrain.dataio.encoder.CTCTextEncoder + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + wav2vec2: !ref + lr_annealing: !ref + lr_annealing_wav2vec: !ref + counter: !ref + tokenizer: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +ctc_computer: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: batch + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +coer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + extract_concepts_values: True + keep_values: False + tag_in: '<' + tag_out: '>' + +cver_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + extract_concepts_values: True + keep_values: True + tag_in: '<' + tag_out: '>' diff --git a/recipes/MEDIA/SLU/CTC/media_prepare.py b/recipes/MEDIA/SLU/CTC/media_prepare.py new file mode 120000 index 0000000000..30d4994a1c --- /dev/null +++ b/recipes/MEDIA/SLU/CTC/media_prepare.py @@ -0,0 +1 @@ +../../media_prepare.py \ No newline at end of file diff --git a/recipes/MEDIA/SLU/CTC/train_hf_wav2vec.py b/recipes/MEDIA/SLU/CTC/train_hf_wav2vec.py new file mode 100644 index 0000000000..861bac1c76 --- /dev/null +++ b/recipes/MEDIA/SLU/CTC/train_hf_wav2vec.py @@ -0,0 +1,372 @@ +#!/usr/bin/env python3 + +""" +Recipe for training a CTC based SLU system with Media. +The system employs a wav2vec2 model and a decoder. + +To run this recipe, do the following: +> python train_with_wav2vec.py hparams/train_with_wav2vec.yaml + +With the default hyperparameters, the system employs a VanillaNN encoder. + +The neural network is trained on greedy CTC. + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, training tasks (Media , PortMedia), +and many other possible variations. + +Authors + * Gaëlle Laperrière 2023 +""" + +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml +from media_prepare import prepare_media + +import speechbrain as sb +from speechbrain.dataio.batch import PaddedBatch +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure. +class SLU(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from waveform to output probabilities.""" + + # Get data. + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + + # Forward pass. + feats = self.modules.wav2vec2(wavs, wav_lens) + + x = self.modules.enc(feats) + + # Output layer for seq2seq log-probabilities. + logits = self.modules.output_lin(x) + p_ctc = self.hparams.log_softmax(logits) + + return p_ctc, wav_lens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC) given predictions and targets.""" + + # Get data. + batch = batch.to(self.device) + chars, char_lens = batch.char_encoded + ids = batch.id + + # Get predictions & loss. + p_ctc, wav_lens = predictions + loss = self.hparams.ctc_cost(p_ctc, chars, wav_lens, char_lens) + + # Get metrics. + if stage != sb.Stage.TRAIN: + # Generate sequences with CTC greedy decoder. + sequence = sb.decoders.ctc_greedy_decode( + p_ctc, wav_lens, self.hparams.blank_index + ) + # Update metrics. + self.cer_metric.append( + ids=ids, + predict=sequence, + target=chars, + target_len=char_lens, + ind2lab=self.tokenizer.decode_ndim, + ) + self.coer_metric.append( + ids=ids, + predict=sequence, + target=chars, + target_len=char_lens, + ind2lab=self.tokenizer.decode_ndim, + ) + self.cver_metric.append( + ids=ids, + predict=sequence, + target=chars, + target_len=char_lens, + ind2lab=self.tokenizer.decode_ndim, + ) + self.ctc_metric.append(ids, p_ctc, chars, wav_lens, char_lens) + + return loss + + def init_optimizers(self): + """Initializes the wav2vec2 optimizer and model optimizer""" + + # Join optimizers. + self.optimizer_wav2vec = self.hparams.opt_class_wav2vec( + self.hparams.model_wav2vec2.parameters() + ) + self.optimizer = self.hparams.opt_class(self.hparams.model.parameters()) + + # Add optimizers to checkpoint recoverables. + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "optimizer_wav2vec", self.optimizer_wav2vec + ) + self.checkpointer.add_recoverable("optimizer", self.optimizer) + + self.optimizers_dict = { + "wav2vec_optimizer": self.optimizer_wav2vec, + "model_optimizer": self.optimizer, + } + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + + # Re-initialize metrics. + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.ctc_metric = self.hparams.ctc_computer() + self.coer_metric = self.hparams.coer_computer() + self.cver_metric = self.hparams.cver_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + + # Save loss and metrics. + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["COER"] = self.coer_metric.summarize("error_rate") + stage_stats["CVER"] = self.cver_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"]) + old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + sb.nnet.schedulers.update_learning_rate( + self.optimizer_wav2vec, new_lr_wav2vec + ) + self.hparams.train_logger.log_stats( + stats_meta={ + "epoch": epoch, + "lr": old_lr, + "lr_wav2vec": old_lr_wav2vec, + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"CER": stage_stats["CER"]}, + min_keys=["CER"], + ) + + # Same plus write results in txt files. + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + with open(hparams["cer_file_test"], "w", encoding="utf-8") as w: + self.cer_metric.write_stats(w) + with open(hparams["ctc_file_test"], "w", encoding="utf-8") as w: + self.ctc_metric.write_stats(w) + with open(hparams["coer_file_test"], "w", encoding="utf-8") as w: + self.coer_metric.write_stats(w) + with open(hparams["cver_file_test"], "w", encoding="utf-8") as w: + self.cver_metric.write_stats(w) + + +# Define custom data procedure. +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # 1. Define datasets: + csv_folder = hparams["save_folder"] + "/csv" + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["csv_train"], + replacements={"data_root": csv_folder}, + ) + + # We sort training data to speed up training and get better results. + # When sorting do not shuffle in dataloader ! otherwise is pointless. + if hparams["sorting"] == "ascending": + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_smaller_than"]}, + ) + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_smaller_than"]}, + ) + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] == "random": + train_data = train_data.filtered_sorted( + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + key_min_value={"duration": hparams["avoid_if_smaller_than"]}, + ) + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + # We also sort the validation data so it is faster to validate. + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["csv_valid"], replacements={"data_root": csv_folder} + ) + valid_data = valid_data.filtered_sorted(sort_key="duration", reverse=True) + + # We also sort the test data so it is faster to validate. + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["csv_test"], replacements={"data_root": csv_folder} + ) + test_data = test_data.filtered_sorted(sort_key="duration", reverse=True) + + datasets = [train_data, valid_data, test_data] + + label_encoder = sb.dataio.encoder.CTCTextEncoder() + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav", "start", "stop") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav, start, stop): + start = int(float(start) * hparams["sample_rate"]) + stop = int(float(stop) * hparams["sample_rate"]) + speech_segment = {"file": wav, "start": start, "stop": stop} + sig = sb.dataio.dataio.read_audio(speech_segment) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("char") + @sb.utils.data_pipeline.provides("char_list", "char_encoded") + def text_pipeline(char): + char_list = char.strip().split() + yield char_list + char_encoded = label_encoder.encode_sequence_torch(char_list) + yield char_encoded + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Create a label encoder instead of a tokenizer for our tag list: + lab_enc_file = hparams["save_folder"] + "/labelencoder.txt" + label_encoder.load_or_create( + path=lab_enc_file, + from_didatasets=[train_data], + output_key="char_list", + special_labels={"blank_label": hparams["blank_index"]}, + sequence_input=True, + ) + + # 5. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "char_encoded"], + ) + + # 6. Make DataLoaders and shuffle if needed: + dataloader_train = torch.utils.data.DataLoader( + train_data, + batch_size=hparams["batch_size"], + num_workers=hparams["dataloader_options"]["num_workers"], + collate_fn=PaddedBatch, + shuffle=hparams["dataloader_options"]["shuffle"], + ) + dataloader_valid = torch.utils.data.DataLoader( + valid_data, + batch_size=hparams["batch_size"], + num_workers=hparams["dataloader_options"]["num_workers"], + collate_fn=PaddedBatch, + shuffle=hparams["dataloader_options"]["shuffle"], + ) + dataloader_test = torch.utils.data.DataLoader( + test_data, + batch_size=hparams["test_batch_size"], + num_workers=hparams["test_dataloader_options"]["num_workers"], + collate_fn=PaddedBatch, + ) + + return dataloader_train, dataloader_valid, dataloader_test, label_encoder + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides. + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol. + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory. + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Due to DDP, we do the preparation ONLY on the main python process + run_on_main( + prepare_media, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["save_folder"], + "channels_path": hparams["channels_path"], + "concepts_path": hparams["concepts_path"], + "skip_wav": hparams["skip_wav"], + "method": hparams["method"], + "task": hparams["task"], + "skip_prep": hparams["skip_prep"], + "process_test2": hparams["process_test2"], + }, + ) + + # Create the datasets objects as well as tokenization and encoding. + train_data, valid_data, test_data, label_encoder = dataio_prepare(hparams) + + # Trainer initialization. + slu_brain = SLU( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Adding objects to trainer. + slu_brain.tokenizer = label_encoder + slu_brain.tokenizer.add_unk() # handle unknown SLU labels + + # Check for stopped training. + slu_brain.checkpointer.recover_if_possible() + + # Train. + slu_brain.fit( + slu_brain.hparams.epoch_counter, + train_data, + valid_data, + progressbar=True, + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["test_dataloader_options"], + ) + + # Test. + slu_brain.evaluate( + test_data, + min_key="CER", + progressbar=True, + test_loader_kwargs=hparams["test_dataloader_options"], + ) diff --git a/recipes/MEDIA/media_prepare.py b/recipes/MEDIA/media_prepare.py new file mode 100644 index 0000000000..d2447f1d9c --- /dev/null +++ b/recipes/MEDIA/media_prepare.py @@ -0,0 +1,1248 @@ +""" +Data preparation. +Download: +https://catalogue.elra.info/en-us/repository/browse/ELRA-S0272/ +https://catalogue.elra.info/en-us/repository/browse/ELRA-E0024/ +https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 +See README.md for more info. + +Author +------ +Gaëlle Laperrière 2023 +""" + +import csv +import glob +import os +import re +import subprocess +import xml.dom.minidom as DOM + +from tqdm import tqdm + +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +def prepare_media( + data_folder, + save_folder, + channels_path, + concepts_path, + skip_wav=True, + method="slu", + task="full", + skip_prep=False, + process_test2=False, +): + """ + Prepares the csv files for the MEDIA dataset. + Both following repositories are necessary for transcriptions + and annotations (S0272) and audio (E0024). + https://catalogue.elra.info/en-us/repository/browse/ELRA-S0272/ + https://catalogue.elra.info/en-us/repository/browse/ELRA-E0024/ + + Arguments + --------- + data_folder: str + Path where folders S0272 and E0024 are stored. + save_folder: str + Path where the csvs and preprocessed wavs will be stored. + channels_path: str + Path of the channels.csv file downloaded via https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 + concepts_path: str + Path of the concepts_full_relax.csv file downloaded via https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 + skip_wav: bool, optional + Skip the wav files storing if already done before. + method: str, optional + Used only for 'slu' task. + Either 'full' or 'relax'. + 'full' Keep specifiers in concepts. + 'relax' Remove specifiers from concepts. + task: str, optional + Either 'slu' or 'asr'. + 'slu' Parse SLU data. + 'asr' Parse ASR data. + skip_prep: bool, optional + If True, skip data preparation. + process_test2: bool, optional + It True, process test2 corpus + + Returns + ------- + None + """ + + if skip_prep: + return + + if not os.path.exists(save_folder): + os.makedirs(save_folder) + if not os.path.exists(save_folder + "/wav"): + os.makedirs(save_folder + "/wav") + if not os.path.exists(save_folder + "/csv"): + os.makedirs(save_folder + "/csv") + + if skip( + save_folder + "/csv/train.csv", + save_folder + "/csv/dev.csv", + save_folder + "/csv/test.csv", + ): + logger.info("Csv files already exist, skipping data preparation!") + return + + if task == "slu": + if method == "relax" or method == "full": + logger.info("Processing SLU " + method + " Media Dataset") + else: + raise ValueError("Parameter method must be 'full' or 'relax'") + elif task == "asr": + logger.info("Processing ASR Media Dataset") + else: + raise ValueError("Parameter task must be 'asr' or 'slu'") + + xmls = { + "media_lot1.xml": "train", + "media_lot2.xml": "train", + "media_lot3.xml": "train", + "media_lot4.xml": "train", + "media_testHC.xml": "test", + "media_testHC_a_blanc.xml": "dev", + } + + train_data = [] + dev_data = [] + test_data = [] + test2_data = [] + + wav_paths = glob.glob(data_folder + "/S0272/**/*.wav", recursive=True) + channels, filenames = get_channels(channels_path) + + # Wavs. + if not skip_wav: + logger.info("Processing wavs") + for wav_path in tqdm(wav_paths): + filename = wav_path.split("/")[-1][:-4] + channel = get_channel(filename, channels, filenames) + split_audio_channels(wav_path, filename, channel, save_folder) + + # Train, Dev, Test. + for xml in xmls: + logger.info( + "Processing xml file " + + str(list(xmls.keys()).index(xml) + 1) + + "/" + + str(len(xmls)) + ) + root = get_root( + data_folder + "/E0024/MEDIA1FR_00/MEDIA1FR/DATA/" + xml, + 0, + ) + data = parse( + root, channels, filenames, save_folder, method, task, xmls[xml] + ) + if xmls[xml] == "train": + train_data.extend(data) + elif xmls[xml] == "dev": + dev_data.extend(data) + elif xmls[xml] == "test": + test_data.extend(data) + + # Test2. + if process_test2: + unused_dialogs = get_unused_dialogs(data_folder) + concepts_full, concepts_relax = get_concepts_full_relax(concepts_path) + logger.info("Processing xml files for test2") + for filename in unused_dialogs: + root = get_root( + data_folder + + "/E0024/MEDIA1FR_00/MEDIA1FR/DATA/semantizer_files/" + + filename + + "_HC.xml", + 1, + ) + test2_data.extend( + parse_test2( + root, + channels, + filenames, + save_folder, + method, + task, + filename, + concepts_full, + concepts_relax, + ) + ) + + append_data(save_folder, train_data, "train") + append_data(save_folder, dev_data, "dev") + append_data(save_folder, test_data, "test") + if process_test2: + append_data(save_folder, test2_data, "test2") + + +def skip(save_csv_train, save_csv_dev, save_csv_test): + """ + Detects if the MEDIA data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + save_csv_train : str + Path to train csv + save_csv_dev : str + Path to dev csv + save_csv_test : str + Path to test csv + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + + # Checking folders and save options + skip = False + + if ( + os.path.isfile(save_csv_train) + and os.path.isfile(save_csv_dev) + and os.path.isfile(save_csv_test) + ): + skip = True + + return skip + + +def parse(root, channels, filenames, save_folder, method, task, corpus): + """ + Parse data for the train, dev and test csv files of the Media dataset. + Files are stored in MEDIA1FR_00/MEDIA1FR/DATA/. + They are the original xml files used by the community for train, dev and test. + + Arguments + --------- + root: Document + Object representing the content of the Media xml document being processed. + channels: list of str + Channels (Right / Left) of the stereo recording to keep. + filenames: list of str + Linked IDs of the recordings, for the channels to keep. + save_folder: str + Path where the csvs and preprocessed wavs will be stored. + method: str + Either 'full' or 'relax'. + task: str + Either 'asr' or 'slu'. + corpus: str + 'train', 'dev' or 'test'. + + Returns + ------- + list + all information needed to append the data in SpeechBrain csv files. + """ + + data = [] + + for dialogue in root.getElementsByTagName("dialogue"): + speaker_name = get_speaker(dialogue) + filename = dialogue.getAttribute("id") + channel = get_channel(filename, channels, filenames) + + for turn in dialogue.getElementsByTagName("turn"): + if turn.getAttribute("speaker") == "spk": + time_beg = turn.getAttribute("startTime") + time_end = turn.getAttribute("endTime") + + sentences = parse_sentences( + turn, time_beg, time_end, method, task + ) + + data.append([channel, filename, speaker_name, sentences]) + + return data + + +def parse_test2( + root, + channels, + filenames, + save_folder, + method, + task, + filename, + concepts_full, + concepts_relax, +): + """ + This function prepares the data for the test2 csv files of the Media dataset. + "Laperrière et al. The Spoken Language Understanding MEDIA Benchmark Dataset in the Era of Deep Learning: data updates, training and evaluation tools, LREC 2022" (https://aclanthology.org/2022.lrec-1.171) made the decision to make a new corpus named "test2". + These xml files are structured differently from the original ones, explaining special functions for the test2. + They are xml files made after the first dataset release, and have never been used before this recipe. + This new corpus can be used as a second inference corpus, as the original test. + Files are stored in /E0024/MEDIA1FR_00/MEDIA1FR/DATA/semantizer_files/. + + Arguments + --------- + root: Document + Object representing the content of the Media xml document being processed. + channels: list of str + Channels (Right / Left) of the stereo recording to keep. + filenames: list of str + Linked IDs of the recordings, for the channels to keep. + save_folder: str + Path where the csvs and preprocessed wavs will be stored. + method: str + Either 'full' or 'relax'. + task: str + Either 'asr' or 'slu'. + filename: str + Name of the Media recording. + concepts_full: list of str + Concepts in method full. + concepts_relax: list of str + Concepts equivalent in method relax. + + Returns + ------- + data : list + """ + + speaker_id, speaker_name = get_speaker_test2(root) + channel = get_channel(filename, channels, filenames) + + data = [] + + for turn in root.getElementsByTagName("Turn"): + if turn.getAttribute("speaker") == speaker_id: + time_beg = turn.getAttribute("startTime") + time_end = turn.getAttribute("endTime") + + sentences = parse_sentences_test2( + turn, + time_beg, + time_end, + method, + concepts_full, + concepts_relax, + task, + ) + + if ( + filename == "70" + and sentences[len(sentences) - 1][3] == "344.408" + ): + sentences[len(sentences) - 1][3] = "321.000" + + data.append([channel, filename, speaker_name, sentences]) + + return data + + +def append_data(save_folder, data, corpus): + """ + Make the csv corpora using data retrieved previously for one Media file. + + Arguments + --------- + save_folder: str + Path where the csvs and preprocessed wavs will be stored. + data: list + channel, filename, speaker_name, sentences + corpus: str + Either 'train', 'dev', 'test', or 'test2'. + """ + + logger.info("Preparing " + corpus + ".csv") + + to_append = [] + + for line in tqdm(data): + channel, filename, speaker_name, sentences = line + + # Retrieve other necessary information + out = subprocess.Popen( + ["soxi", "-D", save_folder + "/wav/" + channel + filename + ".wav"], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + stdout, stderr = out.communicate() + wav_duration = str("%.2f" % float(stdout)) + wav = save_folder + "/wav/" + channel + filename + ".wav" + IDs = get_IDs(speaker_name, sentences, channel, filename) + + # Fill to_append list + for n in range(len(sentences)): + f1 = float(sentences[n][3]) + f2 = float(sentences[n][2]) + duration = str("%.2f" % (f1 - f2)) + if ( + float(wav_duration) >= f1 + and float(duration) != 0.0 + and sentences[n][0] != "" + ): + to_append.append( + [ + IDs[n], + duration, + sentences[n][2], + sentences[n][3], + wav, + "wav", + speaker_name, + "string", + sentences[n][0], + "string", + sentences[n][1], + "string", + ] + ) + + # Write to_append + if to_append is not None: + write_first_row(save_folder, corpus) + path = save_folder + "/csv/" + corpus + ".csv" + SB_file = open(path, "a", encoding="utf-8") + writer = csv.writer(SB_file, delimiter=",") + writer.writerows(to_append) + SB_file.close() + + +def parse_sentences(turn, time_beg, time_end, method, task): + """ + Get the sentences spoken by the speaker (not the "Compère" aka Woz). + + Arguments + --------- + turn: list of Document + The current turn node. + time_beg: str + Time (s) at the beginning of the turn. + time_end: str + Time (s) at the end of the turn. + method: str + Either 'full' or 'relax'. + task: str + Either 'asr' or 'slu'. + + Returns + ------- + dictionary of str + """ + + has_speech = False + sentences = [["", "", time_beg, time_end]] + concept_open = False + sync_waiting = False + time = None + n = 0 # Number of segments in the turn + + # For each semAnnotation in the Turn + for semAnnotation in turn.getElementsByTagName("semAnnotation"): + # We only process HC + if semAnnotation.getAttribute("withContext") == "false": + # For each sem + for sem in semAnnotation.getElementsByTagName("sem"): + # Check concept + concept = sem.getAttribute("concept") + specif = sem.getAttribute("specif") + if method == "full" and specif != "null": + concept += specif + + # For each transcription in the Turn + for transcription in sem.getElementsByTagName("transcription"): + # Check for sync or text + for node in transcription.childNodes: + # Check transcription + if ( + node.nodeType == node.TEXT_NODE + and node.data.replace("\n", "").replace(" ", "") + != "" + ): + ( + sentences, + has_speech, + sync_waiting, + concept_open, + ) = process_text_node( + node, + sentences, + sync_waiting, + has_speech, + concept, + concept_open, + task, + n, + time_end, + ) + + # Check Sync times + if node.nodeName == "Sync": + ( + sentences, + has_speech, + sync_waiting, + time, + n, + ) = process_sync_node( + node, + sentences, + sync_waiting, + has_speech, + concept_open, + task, + n, + time, + time_end, + ) + + if task == "slu": + ( + sentences, + concept, + concept_open, + has_speech, + sync_waiting, + n, + ) = process_semfin_node( + sentences, + sync_waiting, + has_speech, + concept, + concept_open, + n, + time, + time_end, + ) + + sentences = clean_last_char(sentences) + return sentences + + +def parse_sentences_test2( + turn, time_beg, time_end, method, concepts_full, concepts_relax, task +): + """ + Get the sentences spoken by the speaker (not the "Compère" aka Woz). + + Arguments + --------- + turn: list of Document + All the xml following nodes present in the turn. + time_beg: str + Time (s) at the beginning of the turn. + time_end: str + Time (s) at the end of the turn. + method: str + Either 'full' or 'relax'. + concepts_full: list of str + Concepts in method full. + concepts_relax: list of str + Concepts equivalent in method relax. + task: str + Either 'asr' or 'slu'. + + Returns + ------- + dictionary of str + """ + + sentences = [["", "", time_beg, time_end]] + n = 0 # Number of segments in the turn + concept = "null" + has_speech = False + concept_open = False + sync_waiting = False + time = None + + # For each node in the Turn + for node in turn.childNodes: + # Check concept + if task == "slu" and node.nodeName == "SemDebut": + concept = node.getAttribute("concept") + if method == "relax": + concept = get_concept_relax( + concept, concepts_full, concepts_relax + ) + + # Check transcription + if ( + node.nodeType == node.TEXT_NODE + and node.data.replace("\n", "") != "" + ): + ( + sentences, + has_speech, + sync_waiting, + concept_open, + ) = process_text_node( + node, + sentences, + sync_waiting, + has_speech, + concept, + concept_open, + task, + n, + time_end, + ) + + # Save audio segment + if task == "slu" and node.nodeName == "SemFin": + ( + sentences, + concept, + concept_open, + has_speech, + sync_waiting, + n, + ) = process_semfin_node( + sentences, + sync_waiting, + has_speech, + concept, + concept_open, + n, + time, + time_end, + ) + + if node.nodeName == "Sync": + sentences, has_speech, sync_waiting, time, n = process_sync_node( + node, + sentences, + sync_waiting, + has_speech, + concept_open, + task, + n, + time, + time_end, + ) + + sentences = clean_last_char(sentences) + + return sentences + + +def process_text_node( + node, + sentences, + sync_waiting, + has_speech, + concept, + concept_open, + task, + n, + time_end, +): + """ + Parse text nodes from the xml files of MEDIA. + + Arguments + --------- + node: Node + Node of the xml file. + sentences: dictionary of str + All sentences being extracted from the turn. + sync_waiting: bool + Used to keep track of sync nodes, to cut blank audio signal. + True if a sync node has been processed without text in it. + False if no sync nodes have been processed, or text has been parsed after one. + has_speech: bool + Used to keep track of the existence of speech in the turn's sentence. + True if speech is present in the turn. + False if no speech is present yet in the turn. + concept: str + Concept of the node being processed. + Will be "null" if no concept is linked to this node. + concept_open: bool + Used to know if a concept has been used but not its closing tag ">". + True if closing tag not seen yet and concept has been used. + False if closing tag put or concept not used. + task: str, optional + Either 'slu' or 'asr'. + 'slu' Parse SLU data. + 'asr' Parse ASR data. + n: int + Used to keep track of the number of sentences in the turn. + time_end: str + Last time given by the turn, after last speech. + + Returns + ------- + dictionary of str, bool, bool, bool + """ + + # Add a new concept, when speech following + if task == "slu" and concept != "null" and not concept_open: + sentences[n][0] += "<" + concept + "> " + sentences[n][1] += "<" + concept + "> _ " + concept_open = True + sentence = normalize_sentence(node.data) + sentences[n][0] += sentence + " " + sentences[n][1] += " ".join(list(sentence.replace(" ", "_"))) + " _ " + sentences[n][3] = time_end + has_speech = True + sync_waiting = False + return sentences, has_speech, sync_waiting, concept_open + + +def process_sync_node( + node, + sentences, + sync_waiting, + has_speech, + concept_open, + task, + n, + time, + time_end, +): + """ + Parse sync nodes from the xml files of MEDIA. + + Arguments + --------- + node: Node + Node of the xml file. + sentences: dictionary of str + All sentences being extracted from the turn. + sync_waiting: bool + Used to keep track of sync nodes, to cut blank audio signal. + True if a sync node has been processed without text in it. + False if no sync nodes have been processed, or text has been parsed after one. + has_speech: bool + Used to keep track of the existence of speech in the turn's sentence. + True if speech is present in the turn. + False if no speech is present yet in the turn. + concept_open: bool + Used to know if a concept has been used but not its closing tag ">". + True if closing tag not seen yet and concept has been used. + False if closing tag put or concept not used. + task: str, optional + Either 'slu' or 'asr'. + 'slu' Parse SLU data. + 'asr' Parse ASR data. + n: int + Used to keep track of the number of sentences in the turn. + time: str + Current time given by the sync node. + time_end: str + Last time given by the turn, after last speech. + + Returns + ------- + dictionary of str, bool, bool, str, int + """ + + # If the segment has no speech yet + if not has_speech: + # Change time_beg for the last segment + sentences[n][2] = node.getAttribute("time") + # If the segment has speech and sync doesn't cut a concept + elif task == "asr" or (task == "slu" and not concept_open): + # Change time_end for the last segment + sentences[n][3] = node.getAttribute("time") + sentences.append(["", "", sentences[n][3], time_end]) + has_speech = False + n += 1 + else: + sync_waiting = True + time = node.getAttribute("time") + return sentences, has_speech, sync_waiting, time, n + + +def process_semfin_node( + sentences, + sync_waiting, + has_speech, + concept, + concept_open, + n, + time, + time_end, +): + """ + Parse SemFin nodes from the xml files of MEDIA. + + Arguments + --------- + sentences: dictionary of str + All sentences being extracted from the turn. + sync_waiting: bool + Used to keep track of sync nodes, to cut blank audio signal. + True if a sync node has been processed without text in it. + False if no sync nodes have been processed, or text has been parsed after one. + has_speech: bool + Used to keep track of the existence of speech in the turn's sentence. + True if speech is present in the turn. + False if no speech is present yet in the turn. + concept: str + Concept of the node being processed. + Will be "null" if no concept is linked to this node. + concept_open: bool + Used to know if a concept has been used but not its closing tag ">". + True if closing tag not seen yet and concept has been used. + False if closing tag put or concept not used. + n: int + Used to keep track of the number of sentences in the turn. + time: str + Current time given by the sync node. + time_end: str + Last time given by the turn, after last speech. + + Returns + ------- + dictionary of str, str, bool, bool, bool, int + """ + + # Prevent adding a closing concept + # If Sync followed by SemFin generate a new segment without speech yet + if concept_open: + sentences[n][0] += "> " + sentences[n][1] += "> _ " + concept = "null" # Indicate there is no currently open concept + concept_open = False + if sync_waiting: + sentences[n][3] = time + sentences.append(["", "", time, time_end]) + has_speech = False + sync_waiting = False + n += 1 + return sentences, concept, concept_open, has_speech, sync_waiting, n + + +def clean_last_char(sentences): + """ + Clean the sentences by deleting their last characters. + + Arguments + --------- + sentences: dictionary of str + All sentences being extracted from the turn. + + Returns + ------- + dictionary of str + """ + + for n in range(len(sentences)): + if sentences[n][0] != "": + sentences[n][0] = sentences[n][0][:-1] # Remove last ' ' + sentences[n][1] = sentences[n][1][:-3] # Remove last ' _ ' + else: + del sentences[n] # Useful for last appended segment + return sentences + + +def normalize_sentence(sentence): + """ + Normalize and correct a sentence of the turn. + + Arguments + --------- + sentence: str + A sentence being extracted from the turn. + + Returns + ------- + str + """ + + # cspell:disable + # Apostrophes + sentence = sentence.replace(" '", "'") # Join apostrophe to previous word + sentence = sentence.replace("'", "' ") # Detach apostrophe to next word + # Specific errors + sentence = sentence.replace("gc'est", "c'est") + sentence = sentence.replace("a-t- il", "a-t-il") + sentence = sentence.replace("' un parking", "un parking") + sentence = sentence.replace("bleu marine", "bleu-marine") + sentence = sentence.replace("Saint-jacques", "Saint-Jacques") + sentence = sentence.replace("Mont-de-Marsan", "Mont-De-Marsan") + sentence = sentence.replace("Mont de Marsan", "Mont-De-Marsan") + # Particular characters + sentence = re.sub(r"^'", "", sentence) + sentence = re.sub(r"\(.*?\)", "*", sentence) # Replace (...) with * + sentence = re.sub(r"[^\w\s'-><_]", "", sentence) # Punct. except '-><_ + # Numbers correction + sentence = sentence.replace("dix-", "dix ") + sentence = sentence.replace("vingt-", "vingt ") + sentence = sentence.replace("trente-", "trente ") + sentence = sentence.replace("quarante-", "quarante ") + sentence = sentence.replace("cinquante-", "cinquante ") + sentence = sentence.replace("soixante-", "soixante ") + sentence = sentence.replace("quatre-", "quatre ") + # Spaces + sentence = re.sub(r"\s+", " ", sentence) + sentence = re.sub(r"^\s+", "", sentence) + sentence = re.sub(r"\s+$", "", sentence) + # Specific + sentence = sentence.replace("c' est", "c'est") # Re-join this word + return sentence + # cspell:enable + + +def write_first_row(save_folder, corpus): + """ + Write the first row of the csv files. + + Arguments + --------- + save_folder: str + Path where the csvs and preprocessed wavs will be stored. + corpus : str + Either 'train', 'dev', 'test', or 'test2'. + """ + + SB_file = open( + save_folder + "/csv/" + corpus + ".csv", + "w", + newline="", + encoding="utf-8", + ) + writer = csv.writer(SB_file, delimiter=",") + writer.writerow( + [ + "ID", + "duration", + "start", + "stop", + "wav", + "wav_format", + "spk_id", + "spk_id_format", + "wrd", + "wrd_format", + "char", + "char_format", + ] + ) + SB_file.close() + + +def split_audio_channels(path, filename, channel, save_folder): + """ + Split the stereo wav Media files from the downloaded dataset. + Keep only the speaker channel. + + Arguments: + ------- + path: str + Path of the original Media file without the extension ".wav" nor ".trs". + filename: str + Name of the Media recording. + channel: str + "R" or "L" following the channel of the speaker in the stereo wav file. + save_folder: str + Path where the csvs and preprocessed wavs will be stored. + """ + + channel_int = "1" + if channel == "R": + channel_int = "2" + path = path.replace("1 ", "'1 ") + path = path.replace("2 ", "'2 ") + path = path.replace(" 2", " 2'") + os.system( + "sox " + + path + + " " + + save_folder + + "/wav/" + + channel + + filename + + "_8khz.wav remix " + + channel_int + ) + os.system( + "sox -G " + + save_folder + + "/wav/" + + channel + + filename + + "_8khz.wav -r 16000 " + + save_folder + + "/wav/" + + channel + + filename + + ".wav 2>/dev/null" + ) + os.system("rm " + save_folder + "/wav/" + channel + filename + "_8khz.wav") + + +def get_root(path, id): + """ + Get the root of an xml file. + + Arguments + --------- + path: str + The path of the xml file. + id: int + id of the node to extract, different considering the xml format. + + Returns + ------- + Node + """ + + with open(path, "rb") as f: + text = f.read() + text2 = text.decode("ISO-8859-1") + tree = DOM.parseString(text2) + root = tree.childNodes[id] + return root + + +def get_speaker(dialogue): + """ + Get the name of the speaker of a dialogue. + + Arguments + --------- + dialogue: Node + The node where the speaker information is stored. + + Returns + ------- + str + """ + + speaker = dialogue.getAttribute("nameSpk") + speaker = normalize_speaker(speaker) + return speaker + + +def get_speaker_test2(root): + """ + Get the name of the speaker of a whole xml file, for the test2 xml structure. + + Arguments + --------- + root: Node + The node where the speaker information is stored. + + Returns + ------- + str, str + """ + + for speaker in root.getElementsByTagName("Speaker"): + if speaker.getAttribute("name")[0] == "s": + speaker_id = speaker.getAttribute("id") + speaker_name = speaker.getAttribute("name") + speaker_name = normalize_speaker(speaker_name) + return speaker_id, speaker_name + + +def normalize_speaker(speaker): + """ + Normalize and correct the speaker name. + + Arguments + --------- + speaker: str + Initial name of the speaker as given by the xml file. + + Returns + ------- + str + """ + + speaker = speaker.replace("-", "_") + speaker = speaker.replace("#", "_") + speaker = speaker.replace("__", "_1_") + speaker = speaker.replace("speaker1", "speaker") + speaker = speaker.replace("108730", "1_08730") + speaker = speaker.replace("087301123", "08730_1123") + speaker = speaker.replace("087301457", "08730_1457") + speaker = speaker.replace(".", "") + speaker = speaker.replace("speaker_08730_1394", "speaker_1_08730_1394") + speaker = speaker.replace("speaker_08730_1399", "speaker_1_08730_1399") + speaker = speaker.replace("speaker_08730_37", "speaker_1_08730_37") + speaker = speaker.replace("speaker_08730_400", "speaker_1_08730_400") + speaker = speaker.replace("_8730", "_08730") + speaker = speaker.replace("_0873", "_08730") + speaker = speaker.replace("_08737", "_08730") + speaker = speaker.replace("21_08730", "1_08730") + speaker = speaker.replace("058730", "08730") + speaker = speaker.replace("2_08730", "1_08730") + speaker = speaker.replace("speaker_08730_846", "speaker_1_08730_846") + speaker = speaker.replace("speaker_8730_270", "speaker_1_08730_270") + return speaker + + +def get_channels(path): + """ + Get the channels (Right / Left) from the stereo audio files where the speaker (not the WoZ) speak. + + Arguments + --------- + path: str + Path of the channels csv file given with this recipe. + Can be downloaded from https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 + + Returns + ------- + list of str, list of str + """ + + channels = [] + filenames = [] + with open(path, encoding="utf-8") as file: + reader = csv.reader(file, delimiter=",") + for row in reader: + channels.append(row[0]) + filenames.append(row[1]) + return channels, filenames + + +def get_channel(filename, channels, filenames): + """ + Get the channel (Right / Left) of a transcription linked audio. + + Arguments + --------- + filename: str + Name of the Media recording. + channels: list of str + Channels (Right / Left) of the stereo recording to keep. + filenames: list of str + Linked IDs of the recordings, for the channels to keep. + + Returns + ------- + str + """ + + channel = channels[filenames.index(filename)] + return channel + + +def get_concepts_full_relax(path): + """ + Put the corresponding MEDIA relax concepts from their full version in lists from the concepts csv file. + + Arguments + --------- + path: str + Path of the channels csv file given with this recipe. + Can be downloaded from https://www.dropbox.com/sh/y7ab0lktbylz647/AADMsowYHmNYwaoL_hQt7NMha?dl=0 + + Returns + ------- + list of str, list of str + """ + + concepts_full = [] + concepts_relax = [] + with open(path, encoding="utf-8") as file: + reader = csv.reader(file, delimiter=",") + for row in reader: + concepts_full.append(row[0]) + concepts_relax.append(row[1]) + return concepts_full, concepts_relax + + +def get_concept_relax(concept, concepts_full, concepts_relax): + """ + Get the corresponding MEDIA relax concept from its full version. + + Arguments + --------- + concept: str + Concept of the node being processed. + concepts_full: list of str + Concepts in method full. + concepts_relax: list of str + Concepts equivalent in method relax. + + Returns + ------- + str + """ + + for c in concepts_full: + if (c[-1] == "*" and concept[: len(c) - 1] == c[:-1]) or concept == c: + return concepts_relax[concepts_full.index(c)] + return concept + + +def get_unused_dialogs(data_folder): + """ + Get the dialogs to be process for the test2 new corpus. + + Arguments + --------- + data_folder: str + Path where folders S0272 and E0024 are stored. + + Returns + ------- + list of str + """ + + # Used dialogs + proc = subprocess.Popen( + "egrep -a '] +output_folder: !ref results/conformer_transducer_large/ +output_wer_folder: !ref / +save_folder: !ref /save +train_csv: !ref /train.csv +train_log: !ref /train_log.txt + +# Data files +hf_download_folder: !PLACEHOLDER +subsets: ["clean", "dirty"] # _sa sets are removed. +ckpt_interval_minutes: 5 # save checkpoint every N min + + +####################### Training Parameters #################################### + +number_of_epochs: 30 +optimizer_step_limit: 400000 +ctc_weight: 0.3 +grad_accumulation_factor: 1 +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 +precision: fp16 # bf16, fp16 or fp32 +skip_prep: False + +# stages related parameters +lr_adam: 0.0008 + +# Feature parameters +sample_rate: 16000 +n_fft: 512 +n_mels: 80 +win_length: 32 + +# This setup works well for A100 80GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +max_batch_length_train: 500 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random +max_batch_ex: 256 + +# BPE parameters +token_type: bpe # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +dynamic_batch_sampler: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +# Dataloader options +train_dataloader_opts: + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + num_workers: !ref + +test_dataloader_opts: + batch_size: 4 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 640 +nhead: 8 +num_encoder_layers: 14 +num_decoder_layers: 6 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:speechbrain.nnet.activations.Swish +output_neurons: 5120 + +# Outputs +label_smoothing: 0.1 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 5 +test_beam_size: 10 +ctc_weight_decode: 0.3 + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + conformer_activation: !ref + encoder_module: conformer + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 2 + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +# define two optimizers here for two-stage training +Adam: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + weight_decay: 0.001 + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +####################### Decoding & optimiser ########################### + +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: 0 + ctc_fc: !ref + + +scorer_test_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +scorer_valid_search: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: False + length_normalization: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: 0 + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 40000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 1 + drop_count_high: 2 + replace: "mean" + +# Freq Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 10 + drop_length_high: 20 + drop_count_low: 1 + drop_count_high: 2 + replace: "mean" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +# We remove augmentations as we are using the full 28k hour set. +# Uncomment to bring augmentations back! +# fea_augment: !new:speechbrain.augment.augmenter.Augmenter +# min_augmentations: 2 +# max_augmentations: 2 +# augment_prob: 1.0 +# augmentations: [ +# !ref , +# !ref , +# !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + win_length: !ref + +############################## Logging and Pretrainer ########################## + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats diff --git a/recipes/PeoplesSpeech/ASR/transformer/peoples_speech_prepare.py b/recipes/PeoplesSpeech/ASR/transformer/peoples_speech_prepare.py new file mode 120000 index 0000000000..ef6e8b79bf --- /dev/null +++ b/recipes/PeoplesSpeech/ASR/transformer/peoples_speech_prepare.py @@ -0,0 +1 @@ +../../peoples_speech_prepare.py \ No newline at end of file diff --git a/recipes/PeoplesSpeech/ASR/transformer/train.py b/recipes/PeoplesSpeech/ASR/transformer/train.py new file mode 100644 index 0000000000..a4fdd48e9a --- /dev/null +++ b/recipes/PeoplesSpeech/ASR/transformer/train.py @@ -0,0 +1,459 @@ +#!/usr/bin/env python3 +"""Recipe for training a Conformer ASR system with People's Speech. +The system employs an encoder, a decoder, and an attention mechanism +between them. Decoding is performed with (CTC/Att joint) beamsearch. + +To run this recipe, do the following: +> python train.py hparams/conformer_large.yaml + +With the default hyperparameters, the system employs a convolutional frontend and a conformer. +The decoder is based on a Transformer decoder. + +The neural network is trained on both CTC and negative-log likelihood +targets and sub-word units estimated with Byte Pairwise Encoding (BPE) +are used as basic recognition tokens. Training is performed on the full +People's Speech dataset (28,000 hours). + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, tokens (e.g, characters instead of BPE), +training split, and many other possible variations. + + +Authors + * Titouan Parcollet 2024 +""" + +import os +import sys + +import numpy as np +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio.sampler import DynamicBatchSampler +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_bos, _ = batch.tokens_bos + + # compute features + feats = self.hparams.compute_features(wavs) + current_epoch = self.hparams.epoch_counter.current + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels(tokens_bos) + + # forward modules + src = self.modules.CNN(feats) + + enc_out, pred = self.modules.Transformer( + src, tokens_bos, wav_lens, pad_idx=0 + ) + + # output layer for ctc log-probabilities + logits = self.modules.ctc_lin(enc_out) + p_ctc = self.hparams.log_softmax(logits) + + # output layer for seq2seq log-probabilities + pred = self.modules.seq_lin(pred) + p_seq = self.hparams.log_softmax(pred) + + # Compute outputs + hyps = None + current_epoch = self.hparams.epoch_counter.current + is_valid_search = ( + stage == sb.Stage.VALID + and current_epoch % self.hparams.valid_search_interval == 0 + ) + is_test_search = stage == sb.Stage.TEST + + if any([is_valid_search, is_test_search]): + # Note: For valid_search, for the sake of efficiency, we only perform beamsearch with + # limited capacity and no LM to give user some idea of how the AM is doing + + # Decide searcher for inference: valid or test search + if stage == sb.Stage.VALID: + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + else: + hyps, _, _, _ = self.hparams.test_search( + enc_out.detach(), wav_lens + ) + + return p_ctc, p_seq, wav_lens, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + (p_ctc, p_seq, wav_lens, hyps) = predictions + + ids = batch.audio_id + tokens_eos, tokens_eos_lens = batch.tokens_eos + tokens, tokens_lens = batch.tokens + + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if hasattr(self.hparams, "fea_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + + loss_seq = self.hparams.seq_cost( + p_seq, tokens_eos, length=tokens_eos_lens + ).sum() + + loss_ctc = self.hparams.ctc_cost( + p_ctc, tokens, wav_lens, tokens_lens + ).sum() + + loss = ( + self.hparams.ctc_weight * loss_ctc + + (1 - self.hparams.ctc_weight) * loss_seq + ) + + if stage != sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if current_epoch % valid_search_interval == 0 or ( + stage == sb.Stage.TEST + ): + # Decode token terms to words + predicted_words = self.tokenizer(hyps, task="decode_from_list") + + # Convert indices to words + target_words = undo_padding(tokens, tokens_lens) + target_words = self.tokenizer( + target_words, task="decode_from_list" + ) + + self.wer_metric.append(ids, predicted_words, target_words) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["ACC"] = self.acc_metric.summarize() + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + or stage == sb.Stage.TEST + ): + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"ACC": stage_stats["ACC"], "epoch": epoch}, + max_keys=["ACC"], + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # We load the dataset + ds = load_and_concatenate_datasets( + hparams["subsets"], + hparams["hf_download_folder"], + ) + + # We must rename the 'id' column because SpeechBrain sampling use this + # name for the sampler already, also it's not an id, but an audio_path. + train_data = ds[0].rename_column("id", "audio_id") + valid_data = ds[1].rename_column("id", "audio_id") + test_data = ds[2].rename_column("id", "audio_id") + + # We need to get the full list of durations of all samples to enable + # bucketing from the dynamic batch sampler. We do it that way instead + # of the usual iterable because the HF dataset ALWAYS open the file + # when called, which means that the dynamic sampling needs to read the + # 1.5M audio samples from disk.... using a list instead is much master. + train_len_list = list( + train_data.select_columns("duration_ms")["duration_ms"] + ) + val_len_list = list(valid_data.select_columns("duration_ms")["duration_ms"]) + + train_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset( + train_data, + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset( + valid_data, + ) + + test_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset( + test_data, + ) + + datasets = [train_data, valid_data, test_data] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("audio") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(audio): + if audio["sampling_rate"] != 16000: + sig = torchaudio.transforms.Resample( + audio["sampling_rate"], + 16000, + )(audio["array"]) + else: + sig = audio["array"].astype(np.single) + yield sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("text") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + wrd = english_specific_preprocess(wrd) + yield wrd + tokens_list = tokenizer.sp.encode_as_ids(wrd) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "audio_id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + + dynamic_hparams = hparams["dynamic_batch_sampler"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + num_buckets = dynamic_hparams["num_buckets"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + dynamic_hparams["max_batch_length"] + * 1000, # duration is in ms so back to s. + num_buckets=num_buckets, + lengths_list=train_len_list, + shuffle=dynamic_hparams["shuffle"], + batch_ordering=dynamic_hparams["batch_ordering"], + ) + + valid_batch_sampler = DynamicBatchSampler( + valid_data, + dynamic_hparams_valid["max_batch_length"] + * 1000, # duration is in ms so back to s. + num_buckets=num_buckets, + lengths_list=val_len_list, + shuffle=dynamic_hparams_valid["shuffle"], + batch_ordering=dynamic_hparams_valid["batch_ordering"], + ) + + return ( + train_data, + valid_data, + test_data, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset download and preparation + from peoples_speech_prepare import ( + english_specific_preprocess, + load_and_concatenate_datasets, + prepare_peoples_speech, + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_peoples_speech, + kwargs={ + "hf_download_folder": hparams["hf_download_folder"], + "subsets": hparams["subsets"], + "save_folder": hparams["save_folder"], + "skip_prep": hparams["skip_prep"], + }, + ) + + # Defining tokenizer and loading it + tokenizer = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["output_neurons"], + annotation_train=hparams["train_csv"], + annotation_read="text", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + bos_id=hparams["bos_index"], + eos_id=hparams["eos_index"], + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_data, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["Adam"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # adding objects to trainer: + asr_brain.tokenizer = tokenizer + + # Setup dynamic batching specifics + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + # report WER on valid data + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "valid_wer.txt" + ) + asr_brain.evaluate( + valid_data, + min_key="WER", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) + + # report WER on test data + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], "test_wer.txt" + ) + asr_brain.evaluate( + test_data, + min_key="WER", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/PeoplesSpeech/peoples_speech_prepare.py b/recipes/PeoplesSpeech/peoples_speech_prepare.py new file mode 100644 index 0000000000..a98b80c0bf --- /dev/null +++ b/recipes/PeoplesSpeech/peoples_speech_prepare.py @@ -0,0 +1,419 @@ +""" +This is a preparation script for the People Speech dataset. + +Data preparation for people speech is slightly different than usual for +SpeechBrain as it relies exclusively on HuggingFace Datasets. This means that +audio files will NOT be extracted but instead read from shard directly. Instead +we propose to still generate .csv files to have transcriptions and durations +readable to the user. This means that the csv file generation part can totally +be skipped and the training recipe would still work. + +TL;DR: .csv files are only generated for debugging/monitoring purpose and are +not necessary to start the recipe. + +Download instructions: + 1. https://huggingface.co/datasets/MLCommons/peoples_speech +Reference: https://arxiv.org/abs/2111.09344 + + +Author +------- + * Titouan Parcollet, 2024 +""" + +import csv +import functools +import logging +import os +import re +from dataclasses import dataclass + +from speechbrain.utils.parallel import get_available_cpu_count, parallel_map + +logger = logging.getLogger(__name__) + +HF_HUB = "MLCommons/peoples_speech" + + +@dataclass +class PeoplesSpeechRow: + """Dataclass for handling People's Speech rows. + + Attributes + ---------- + audio_id : str + The audio ID. + duration : float + The duration in seconds. + text : str + The text of the segment. + """ + + audio_id: str # audio[aid] + duration: float + text: str + + +def prepare_peoples_speech( + hf_download_folder: str, + save_folder: str, + subsets: list, + skip_prep: bool = False, +) -> None: + """Download the dataset and csv for Peoples Speech. + + Data preparation for people speech is slightly different than usual for + SpeechBrain as it relies exclusively on HuggingFace Datasets. This means that + audio files will NOT be extracted but instead read from shard directly. Instead + we propose to still generate .csv files to have transcriptions and durations + readable to the user. + + Download: https://huggingface.co/datasets/MLCommons/peoples_speech + Reference: https://arxiv.org/abs/2111.09344 + + The `train.csv` file is created by combining the sets given in the `subsets` + variable. + + The `dev.csv` and `test.csv` files are created based on the `DEV` and `TEST` splits + specified in the `splits` list. + + Parameters + ---------- + hf_download_folder : str + The path where HF stored the dataset. Important, you must set the global + env variable HF_HUB_CACHE to the same path as HuggingFace is primilarily + using this to know where to store datasets. + save_folder : str + The path to the folder where the CSV files will be saved. + subsets : list + Target subset. People's speech contains multiple subsets, which must be + loaded invidividually and then concatenated. E.g. 'clean', 'clean_sac', + 'dirty' or 'dirty_sa'. E.g. to combine ['clean', 'dirty']. + skip_prep : bool, optional + If True, the data preparation will be skipped, and the function will return immediately. + + Returns + ------- + None + """ + + if not os.path.isdir(hf_download_folder): + msg = "You must download the dataset with HuggingFace before starting " + msg += ( + "this recipe. Please check the HuggingFace hub of people's speech." + ) + raise ValueError(msg) + + if skip_prep: + logger.info("Skipping data preparation as `skip_prep` is set to `True`") + return + + if len(subsets) == 0: + raise ImportError( + "At least one People's speech subset must be specified." + ) + + # Setting output paths + save_output = {} + splits = ["train", "validation", "test"] + for split in splits: + save_output[split] = os.path.join(save_folder, str(split) + ".csv") + + # check if the data is already prepared + if skip_csv(save_output): + logger.info("Skipping preparation, completed in previous run.") + return + else: + logger.info("Starting data preparation...") + + hf_dataset = load_and_concatenate_datasets(subsets, hf_download_folder) + + logger.info( + f"Preparing CSV of the Peoples Speech dataset in {save_folder}..." + ) + + os.makedirs(save_folder, exist_ok=True) + + for i, (split, output) in enumerate(save_output.items()): + logger.info(f"Starting creating {output} using {split} split.") + HF_create_csv(output, hf_dataset[i], split) + + logger.info("Data preparation completed!") + + +def load_and_concatenate_datasets(subsets, hf_download_folder): + """Load/download and concatenate all the specified subsets from People's + speech. The people's speech dataset have 4 subset "clean", "clean_sa", + "dirty" and "dirty_sa". Multiple subsets cannot be loaded all at once with + HuggingFace so this function makes it possible. + + Parameters + ---------- + subsets : list + Target subset. People's speech contains multiple subsets, which must be + loaded invidividually and then concatenated. E.g. 'clean', 'clean_sac', + 'dirty' or 'dirty_sa'. E.g. to combine ['clean', 'dirty']. + hf_download_folder : str + The path where HF stored the dataset. Important, you must set the global + env variable HF_HUB_CACHE to the same path as HuggingFace is primilarily + using this to know where to store datasets. + + Returns + ------- + List of HuggingFace dataset. + """ + + try: + import datasets + from datasets import concatenate_datasets, load_dataset + except ImportError as error: + raise ImportError( + f"{str(error)}\nHuggingFace datasets must be installed." + ) + + # Managing the download dir as HF can be capricious with this. + if "HF_HUB_CACHE" in os.environ: + hf_caching_dir = os.environ["HF_HUB_CACHE"] + elif "HF_HOME" in os.environ: + hf_caching_dir = os.environ["HF_HOME"] + else: + hf_caching_dir = os.environ["XDG_CACHE_HOME"] + + if hf_caching_dir != hf_download_folder: + msg = "HuggingFace HF_HUB_CACHE or HF_HOME is not equal to the given" + msg += " hf_download_folder. Make sure to set these variables properly." + raise Exception(msg) + + logger.info("Loading dataset from: " + str(hf_caching_dir)) + + nproc = get_available_cpu_count() + + # Setting no download mode for HuggingFace. Only cache. + # We remove progress bars as they repeat for each DDP process. + os.environ["HF_DATASETS_OFFLINE"] = "1" + datasets.disable_progress_bars() + datasets_list = [] + for subset in subsets: + hf_data = load_dataset( + HF_HUB, + name=subset, + split=["train"], + num_proc=nproc, + cache_dir=hf_caching_dir, + ) + datasets_list.append(hf_data[0]) + + os.environ["HF_DATASETS_OFFLINE"] = "0" + + # Datasets need to be concatenated back. + final_dataset = [] + if len(datasets_list) > 1: + final_dataset.append(concatenate_datasets(datasets_list, split="train")) + else: + final_dataset.append(datasets_list[0]) + + # Now get validation and test + # Setting no download mode for HuggingFace. Only cache. + os.environ["HF_DATASETS_OFFLINE"] = "1" + hf_data = load_dataset( + HF_HUB, + name=subset, + split=["validation", "test"], + num_proc=nproc, + cache_dir=hf_caching_dir, + ) + os.environ["HF_DATASETS_OFFLINE"] = "0" + datasets.enable_progress_bars() + + final_dataset.append(hf_data[0]) + final_dataset.append(hf_data[1]) + + return final_dataset + + +def HF_create_csv( + csv_file: str, + hf_dataset, + split: str, +) -> None: + """ + Create a CSV file based on a HuggingFace dataset. + + Parameters + ---------- + csv_file : str + The path to the CSV file to be created. + hf_dataset : huggingface dataset, + The huggingface dataset. + split : str + The split to be used for filtering the data. + + Returns + ------- + None + """ + + # We don't need to open the audio file. This will speed up drastically. + hf_dataset = hf_dataset.select_columns(["id", "duration_ms", "text"]) + + total_duration = 0.0 + nb_samples = 0 + + line_processor = functools.partial(HF_process_line) + + csv_file_tmp = csv_file + ".tmp" + with open(csv_file_tmp, mode="w", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + header = [ + "audio_id", + "duration", + "text", + ] + csv_writer.writerow(header) + + for row in parallel_map(line_processor, hf_dataset): + if row is None: + continue + + csv_writer.writerow( + [ + row.audio_id, + str(row.duration), + row.text, + ] + ) + + total_duration += row.duration + nb_samples += 1 + + os.replace(csv_file_tmp, csv_file) + + logger.info(f"{csv_file} successfully created!") + logger.info(f"Number of samples in {split} split: {nb_samples}") + logger.info( + f"Total duration of {split} split: {round(total_duration / 3600, 2)} Hours" + ) + + +def HF_process_line(row: dict) -> list: + """ + Process the audio line and return the utterances for the given split. + + Parameters + ---------- + row: dict + The audio line to be processed. + + Returns + ------- + list + The list of utterances for the given split. + """ + text = english_specific_preprocess(row["text"]) + + if text: + audio_id = row["id"] + duration = row["duration_ms"] / 1000 # HF dataset column is in ms. + + row = PeoplesSpeechRow( + audio_id=audio_id, + duration=duration, + text=text, + ) + + return row + else: + return None + + +def skip_csv(save_csv_files: dict) -> bool: + """Check if the CSV files already exist. + + Parameters + ---------- + save_csv_files : dict + The dictionary containing the paths to the CSV files. + + Returns + ------- + bool + True if all the CSV files already exist, False otherwise. + """ + return all(os.path.isfile(path) for path in save_csv_files.values()) + + +def english_specific_preprocess(sentence): + """ + Preprocess English text from the People's Speech dataset into space-separated + words. This removes various punctuation and treats it as word boundaries. + It normalises and retains various apostrophes (’‘´) between letters, but not + other ones, which are probably quotation marks. It capitalises all text. + This function may error out if new characters show up in the training, dev, + or test sets. + + Parameters + ---------- + sentence : str + The string to modify. + + Returns + ------- + str + The normalised sentence. + """ + + # These characters mark word boundaries. + split_character_regex = '[ ",:;!?¡\\.…()\\-—–‑_“”„/«»]' + + # These could all be used as apostrophes in the middle of words. + # If at the start or end of a word, they will be removed. + apostrophes_or_quotes = "['`´ʻ‘’]" + + sentence_level_mapping = {"&": " and ", "+": " plus ", "fl": "fl"} + + # If it contains anything numerical, we remove it as it is only on val and + # test. Unfortunately, we can't make sure of what is actually being uttered. + # Hence, we must throw it away from the evaluation (roughly 1 hours each) + # if bool(re.search(r'\d', sentence)): + # return None + + final_characters = set(" ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'") + + sentence_mapped = sentence + if any((source in sentence) for source in sentence_level_mapping): + for source, target in sentence_level_mapping.items(): + sentence_mapped = sentence_mapped.replace(source, target) + + # Some punctuation that indicates a word boundary. + words_split = re.split(split_character_regex, sentence_mapped) + words_quotes = [ + # Use ' as apostrophe. + # Remove apostrophes at the start and end of words (probably quotes). + # Word-internal apostrophes, even where rotated, are retained. + re.sub(apostrophes_or_quotes, "'", word).strip("'") + for word in words_split + ] + + # Processing that does not change the length. + words_upper = [word.upper() for word in words_quotes] + + words_mapped = [ + # word.translate(character_mapping) + word + for word in words_upper + # Previous processing may have reduced words to nothing. + # Remove them. + if word != "" + ] + + result = " ".join(words_mapped) + character_set = set(result) + assert character_set <= final_characters, ( + "Unprocessed characters", + sentence, + result, + character_set - final_characters, + ) + return result diff --git a/recipes/REAL-M/sisnr-estimation/README.md b/recipes/REAL-M/sisnr-estimation/README.md index 661b05867e..3d98a896ed 100644 --- a/recipes/REAL-M/sisnr-estimation/README.md +++ b/recipes/REAL-M/sisnr-estimation/README.md @@ -8,21 +8,29 @@ * The paper for the REAL-M dataset can be found on [this arxiv link](https://arxiv.org/pdf/2110.10812.pdf). -* The model is trained with the LibriMix and WHAMR! datasets. You can download LibriMix by following the instructions [here](https://github.com/JorisCos/LibriMix). Instructions on WHAMR! can be found [here](https://wham.whisper.ai/) +* The model is trained with the LibriMix and WHAMR! datasets. You can download LibriMix by following the instructions [here](https://github.com/JorisCos/LibriMix). Instructions on WHAMR! can be found [here](http://wham.whisper.ai/) # How to Run * To train with dynamic mixing: -```python train.py hparams/pool_sisnrestimator.yaml --data_folder /yourLibri2Mixpath --base_folder_dm /yourLibriSpeechpath --rir_path /yourpathforwhamrRIRs --dynamic_mixing True --use_whamr_train True --whamr_data_folder /yourpath/whamr --base_folder_dm_whamr /yourpath/wsj0-processed/si_tr_s``` +```shell +python train.py hparams/pool_sisnrestimator.yaml --data_folder /yourLibri2Mixpath --base_folder_dm /yourLibriSpeechpath --rir_path /yourpathforwhamrRIRs --dynamic_mixing True --use_whamr_train True --whamr_data_folder /yourpath/whamr --base_folder_dm_whamr /yourpath/wsj0-processed/si_tr_s +``` + +# How to run on test sets only +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: +```shell +python train.py hparams/pool_sisnrestimator.yaml --data_folder /yourLibri2Mixpath --base_folder_dm /yourLibriSpeechpath --rir_path /yourpathforwhamrRIRs --dynamic_mixing True --use_whamr_train True --whamr_data_folder /yourpath/whamr --base_folder_dm_whamr /yourpath/wsj0-processed/si_tr_s --test_only +``` # Results | Release | hyperparams file | L1-Error (DB) | HuggingFace link | Full model link | GPUs | |:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :--------:| | 18-10-21 | pool_sisnrestimator.yaml | 1.71 | [HuggingFace](https://huggingface.co/speechbrain/REAL-M-sisnr-estimator) | Not Available| RTX8000 48GB | -You can find the output folders with the training logs [here](https://drive.google.com/drive/folders/1NGncbjvLeGfbUqmVi6ej-NH9YQn5vBmI?usp=sharing). +You can find the output folders with the training logs [here](https://www.dropbox.com/sh/n55lm8i5z51pbm1/AABHfByOEy__UP_bmT4GJvSba?dl=0). This [repository](https://huggingface.co/speechbrain/REAL-M-sisnr-estimator-training) provides the Separator models needed to train a blind SI-SNR estimator. # Training Time @@ -42,12 +50,12 @@ Please, cite our paper for the REAL-M dataset, if you use it for your research o ```bibtex @misc{subakan2021realm, - title={REAL-M: Towards Speech Separation on Real Mixtures}, - author={Cem Subakan and Mirco Ravanelli and Samuele Cornell and François Grondin}, - year={2021}, - eprint={2110.10812}, - archivePrefix={arXiv}, - primaryClass={eess.AS} + title={REAL-M: Towards Speech Separation on Real Mixtures}, + author={Cem Subakan and Mirco Ravanelli and Samuele Cornell and François Grondin}, + year={2021}, + eprint={2110.10812}, + archivePrefix={arXiv}, + primaryClass={eess.AS} } ``` @@ -55,6 +63,15 @@ Please, cite our paper for the REAL-M dataset, if you use it for your research o Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/REAL-M/sisnr-estimation/create_whamr_rirs.py b/recipes/REAL-M/sisnr-estimation/create_whamr_rirs.py new file mode 120000 index 0000000000..e9fb19ca54 --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/create_whamr_rirs.py @@ -0,0 +1 @@ +../../WHAMandWHAMR/meta/create_whamr_rirs.py \ No newline at end of file diff --git a/recipes/REAL-M/sisnr-estimation/dynamic_mixing_librimix.py b/recipes/REAL-M/sisnr-estimation/dynamic_mixing_librimix.py new file mode 120000 index 0000000000..9503882208 --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/dynamic_mixing_librimix.py @@ -0,0 +1 @@ +../../LibriMix/separation/dynamic_mixing.py \ No newline at end of file diff --git a/recipes/REAL-M/sisnr-estimation/dynamic_mixing_wham.py b/recipes/REAL-M/sisnr-estimation/dynamic_mixing_wham.py new file mode 120000 index 0000000000..bb135c9e75 --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/dynamic_mixing_wham.py @@ -0,0 +1 @@ +../../WHAMandWHAMR/separation/dynamic_mixing.py \ No newline at end of file diff --git a/recipes/REAL-M/sisnr-estimation/extra_requirements.txt b/recipes/REAL-M/sisnr-estimation/extra_requirements.txt new file mode 100644 index 0000000000..73fe73d2cc --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/extra_requirements.txt @@ -0,0 +1 @@ +pyroomacoustics==0.1.4 diff --git a/recipes/REAL-M/sisnr-estimation/hparams/pool_sisnrestimator.yaml b/recipes/REAL-M/sisnr-estimation/hparams/pool_sisnrestimator.yaml index 3ce586e490..29a161a821 100644 --- a/recipes/REAL-M/sisnr-estimation/hparams/pool_sisnrestimator.yaml +++ b/recipes/REAL-M/sisnr-estimation/hparams/pool_sisnrestimator.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -60,15 +60,14 @@ skip_prep: False ckpt_interval_minutes: 60 # Experiment params -auto_mix_prec: False # Set this to True for mixed precision -test_only: False # if True, we only do Testing (no training) +precision: fp32 # bf16, fp16 or fp32 # Set this to True for mixed precision # for the currently supported datasets (Libri2Mix, WHAMR!), this should be set 2 num_spks: 2 noprogressbar: False sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.0001 @@ -92,18 +91,39 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + # Dataloader options dataloader_opts: diff --git a/recipes/REAL-M/sisnr-estimation/prepare_data_librimix.py b/recipes/REAL-M/sisnr-estimation/prepare_data_librimix.py new file mode 120000 index 0000000000..b77e802c5d --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/prepare_data_librimix.py @@ -0,0 +1 @@ +../../LibriMix/prepare_data.py \ No newline at end of file diff --git a/recipes/REAL-M/sisnr-estimation/prepare_data_wham.py b/recipes/REAL-M/sisnr-estimation/prepare_data_wham.py new file mode 120000 index 0000000000..cedb5e689c --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/prepare_data_wham.py @@ -0,0 +1 @@ +../../WHAMandWHAMR/prepare_data.py \ No newline at end of file diff --git a/recipes/REAL-M/sisnr-estimation/preprocess_dynamic_mixing_librimix.py b/recipes/REAL-M/sisnr-estimation/preprocess_dynamic_mixing_librimix.py new file mode 120000 index 0000000000..58304b6adf --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/preprocess_dynamic_mixing_librimix.py @@ -0,0 +1 @@ +../../LibriMix/meta/preprocess_dynamic_mixing.py \ No newline at end of file diff --git a/recipes/REAL-M/sisnr-estimation/preprocess_dynamic_mixing_wham.py b/recipes/REAL-M/sisnr-estimation/preprocess_dynamic_mixing_wham.py new file mode 120000 index 0000000000..80db586e3c --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/preprocess_dynamic_mixing_wham.py @@ -0,0 +1 @@ +../../WHAMandWHAMR/meta/preprocess_dynamic_mixing.py \ No newline at end of file diff --git a/recipes/REAL-M/sisnr-estimation/train.py b/recipes/REAL-M/sisnr-estimation/train.py index 28156ef02d..b589283ab0 100644 --- a/recipes/REAL-M/sisnr-estimation/train.py +++ b/recipes/REAL-M/sisnr-estimation/train.py @@ -8,36 +8,37 @@ * Samuele Cornell 2021 """ +import csv +import itertools as it import os import sys + +import numpy as np import torch +from hyperpyyaml import load_hyperpyyaml +from tqdm import tqdm + import speechbrain as sb import speechbrain.nnet.schedulers as schedulers from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml -from torch.cuda.amp import autocast -import itertools as it -from tqdm import tqdm -import numpy as np -import logging -import csv +from speechbrain.utils.logger import get_logger # Define training procedure class Separation(sb.Brain): def compress_snrrange(self, inp): """Convert from true snr range to 0-1 range""" - rnge = self.hparams.snrmax - self.hparams.snrmin + range = self.hparams.snrmax - self.hparams.snrmin inp = torch.clip(inp, min=self.hparams.snrmin, max=self.hparams.snrmax) inp = inp - self.hparams.snrmin - inp = inp / rnge + inp = inp / range return inp def gettrue_snrrange(self, inp): """Convert from 0-1 range to true snr range""" - rnge = self.hparams.snrmax - self.hparams.snrmin - inp = inp * rnge + range = self.hparams.snrmax - self.hparams.snrmin + inp = inp * range inp = inp + self.hparams.snrmin return inp @@ -62,7 +63,7 @@ def compute_forward(self, mix, targets, stage, noise=None): if self.hparams.use_reverb_augment: targets_rev = [ - self.hparams.reverb(targets[:, :, i], None) + self.hparams.reverb(targets[:, :, i]) for i in range(self.hparams.num_spks) ] targets_rev = torch.stack(targets_rev, dim=-1) @@ -83,7 +84,8 @@ def compute_forward(self, mix, targets, stage, noise=None): targets = targets[:, :min_len, :] if self.hparams.use_wavedrop: - mix = self.hparams.wavedrop(mix, mix_lens) + mix = self.hparams.drop_chunk(mix, mix_lens) + mix = self.hparams.drop_freq(mix) if self.hparams.limit_training_signal_len: mix, targets = self.cut_signals(mix, targets) @@ -174,39 +176,7 @@ def fit_batch(self, batch): if self.hparams.num_spks == 3: targets.append(batch.s3_sig) - if self.auto_mix_prec: - with autocast(): - predictions, snrhat, snr, snr_compressed = self.compute_forward( - mixture, targets, sb.Stage.TRAIN, noise - ) - - snr = snr.reshape(-1) - loss = ((snr_compressed - snrhat).abs()).mean() - - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - - self.scaler.scale(loss).backward() - if self.hparams.clip_grad_norm >= 0: - self.scaler.unscale_(self.optimizer) - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), - self.hparams.clip_grad_norm, - ) - self.scaler.step(self.optimizer) - self.scaler.update() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) - ) - loss.data = torch.tensor(0).to(self.device) - - else: - # get the oracle snrs, estimated snrs, and the source estimates + with self.training_ctx: predictions, snrhat, snr, snr_compressed = self.compute_forward( mixture, targets, sb.Stage.TRAIN, noise ) @@ -214,23 +184,22 @@ def fit_batch(self, batch): snr = snr.reshape(-1) loss = ((snr_compressed - snrhat).abs()).mean() - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - loss.backward() - if self.hparams.clip_grad_norm >= 0: - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm - ) - self.optimizer.step() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) + if loss.nelement() > 0 and loss < self.hparams.loss_upper_lim: + self.scaler.scale(loss).backward() + if self.hparams.clip_grad_norm >= 0: + self.scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), + self.hparams.clip_grad_norm, ) - loss.data = torch.tensor(0).to(self.device) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + self.nonfinite_count += 1 + logger.info( + f"infinite loss or empty loss! it happened {self.nonfinite_count} times so far - skipping this batch" + ) + loss.data = torch.tensor(0.0).to(self.device) self.optimizer.zero_grad() @@ -270,7 +239,6 @@ def on_stage_end(self, stage, stage_loss, epoch): # Perform end-of-iteration things, like annealing, logging, etc. if stage == sb.Stage.VALID: - # Learning rate annealing if isinstance( self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau @@ -289,7 +257,8 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"error": stage_stats["error"]}, min_keys=["error"], + meta={"error": stage_stats["error"]}, + min_keys=["error"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( @@ -309,9 +278,7 @@ def add_speed_perturb(self, targets, targ_lens): recombine = True for i in range(targets.shape[-1]): - new_target = self.hparams.speedperturb( - targets[:, :, i], targ_lens - ) + new_target = self.hparams.speed_perturb(targets[:, :, i]) new_targets.append(new_target) if i == 0: min_len = new_target.shape[-1] @@ -348,7 +315,7 @@ def add_speed_perturb(self, targets, targ_lens): return mix, targets def cut_signals(self, mixture, targets): - """This function selects a random segment of a given length withing the mixture. + """This function selects a random segment of a given length within the mixture. The corresponding targets are selected accordingly""" randstart = torch.randint( 0, @@ -393,14 +360,13 @@ def save_results(self, test_data): test_data, **self.hparams.dataloader_opts ) - with open(save_file, "w") as results_csv: + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: writer = csv.DictWriter(results_csv, fieldnames=csv_columns) writer.writeheader() # Loop over all test sentence with tqdm(test_loader, dynamic_ncols=True) as t: for i, batch in enumerate(t): - # Apply Separation mixture = batch.mix_sig snt_id = batch.id @@ -452,20 +418,16 @@ def save_results(self, test_data): writer.writerow(row) logger.info( - "Mean SISNR for source 1 is {}".format(np.array(all_sisnr1s).mean()) + f"Mean SISNR for source 1 is {np.array(all_sisnr1s).mean()}" ) logger.info( - "Mean SISNR hat for source 1 is {}".format( - np.array(all_sisnr1_hats).mean() - ) + f"Mean SISNR hat for source 1 is {np.array(all_sisnr1_hats).mean()}" ) logger.info( - "Mean SISNR for source 2 is {}".format(np.array(all_sisnr2s).mean()) + f"Mean SISNR for source 2 is {np.array(all_sisnr2s).mean()}" ) logger.info( - "Mean SISNR hat for source 2 is {}".format( - np.array(all_sisnr2_hats).mean() - ) + f"Mean SISNR hat for source 2 is {np.array(all_sisnr2_hats).mean()}" ) @@ -558,17 +520,16 @@ def audio_pipeline_noise(noise_wav): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) sb.utils.distributed.ddp_init_group(run_opts) # Logger info - logger = logging.getLogger(__name__) + logger = get_logger(__name__) # Create experiment directory sb.create_experiment_directory( @@ -581,13 +542,12 @@ def audio_pipeline_noise(noise_wav): if hparams["dynamic_mixing"] and not os.path.exists( hparams["base_folder_dm"] ): - print( + raise ValueError( "Please, specify a valid base_folder_dm folder when using dynamic mixing" ) - sys.exit(1) # Data preparation for LibriMix - from recipes.LibriMix.prepare_data import prepare_librimix as prepare_libri + from prepare_data_librimix import prepare_librimix as prepare_libri # create the csv files run_on_main( @@ -603,26 +563,27 @@ def audio_pipeline_noise(noise_wav): ) # Data preparation for WHAMR - from recipes.WHAMandWHAMR.prepare_data import create_wham_whamr_csv - from recipes.WHAMandWHAMR.separation.train import ( - dataio_prep as dataio_prep_whamr, - ) - - create_wham_whamr_csv( - datapath=hparams["whamr_data_folder"], - savepath=hparams["save_folder"], - fs=hparams["sample_rate"], - add_reverb=True, - savename="whamr_", - set_types=["tr", "cv", "tt"], - ) + from prepare_data_wham import create_wham_whamr_csv + from train_wham import dataio_prep as dataio_prep_whamr + + # add another skip_prep to distinguish between LibriSpeech & WHAM/R prep + skip_prep = hparams["skip_prep"] + if not skip_prep: + create_wham_whamr_csv( + datapath=hparams["whamr_data_folder"], + savepath=hparams["save_folder"], + fs=hparams["sample_rate"], + add_reverb=True, + savename="whamr_", + set_types=["tr", "cv", "tt"], + ) train_data_whamr, valid_data, test_data = dataio_prep_whamr(hparams) # if whamr, and we do speedaugment we need to prepare the csv file if hparams["use_reverb_augment"]: - from recipes.WHAMandWHAMR.prepare_data import create_whamr_rir_csv - from recipes.WHAMandWHAMR.meta.create_whamr_rirs import create_rirs + from create_whamr_rirs import create_rirs + from prepare_data_wham import create_whamr_rir_csv # If the Room Impulse Responses do not exist, we create them if not os.path.exists(hparams["rir_path"]): @@ -643,20 +604,19 @@ def audio_pipeline_noise(noise_wav): }, ) - hparams["reverb"] = sb.processing.speech_augmentation.AddReverb( + hparams["reverb"] = sb.augment.time_domain.AddReverb( os.path.join(hparams["save_folder"], "whamr_rirs.csv") ) if hparams["dynamic_mixing"]: - from recipes.LibriMix.separation.dynamic_mixing import ( + from dynamic_mixing_librimix import ( dynamic_mix_data_prep_librimix as dynamic_mix_data_prep, ) - from recipes.WHAMandWHAMR.separation.dynamic_mixing import ( + from dynamic_mixing_wham import ( dynamic_mix_data_prep as dynamic_mix_data_prep_whamr, ) if hparams["use_whamr_train"]: - if "processed" not in hparams["base_folder_dm_whamr"]: # if the processed folder does not exist for whamr dynamic mixing, we do the necessary preprocessing @@ -664,9 +624,7 @@ def audio_pipeline_noise(noise_wav): os.path.normpath(hparams["base_folder_dm_whamr"]) + "_processed" ): - from recipes.WHAMandWHAMR.meta.preprocess_dynamic_mixing import ( - resample_folder, - ) + from preprocess_dynamic_mixing_wham import resample_folder print("Resampling the base folder") run_on_main( @@ -713,9 +671,7 @@ def audio_pipeline_noise(noise_wav): if not os.path.exists( os.path.normpath(hparams["base_folder_dm"]) + "_processed" ): - from recipes.LibriMix.meta.preprocess_dynamic_mixing import ( - resample_folder, - ) + from preprocess_dynamic_mixing_librimix import resample_folder print("Resampling the base folder") run_on_main( @@ -744,6 +700,7 @@ def audio_pipeline_noise(noise_wav): train_data = dynamic_mix_data_prep(hparams) else: + hparams["use_whamr_train"] = False train_data, valid_data, test_data = dataio_prep(hparams) # Brain class initialization @@ -755,58 +712,61 @@ def audio_pipeline_noise(noise_wav): checkpointer=hparams["checkpointer"], ) - from speechbrain.pretrained import SepformerSeparation as separator - from speechbrain.pretrained.interfaces import fetch + from speechbrain.inference.separation import ( + SepformerSeparation as separator, + ) + from speechbrain.utils.fetching import fetch all_separators = [] for separator_model in hparams["separators_to_use"]: + savedir = hparams["output_folder"] + "/" + separator_model + fetch( - separator_model + "_encoder.ckpt", + filename=separator_model + "_encoder.ckpt", source=hparams["separator_repo"], - savedir=separator_model, + savedir=savedir, save_filename="encoder.ckpt", ) fetch( - separator_model + "_decoder.ckpt", + filename=separator_model + "_decoder.ckpt", source=hparams["separator_repo"], - savedir=separator_model, + savedir=savedir, save_filename="decoder.ckpt", ) fetch( - separator_model + "_masknet.ckpt", + filename=separator_model + "_masknet.ckpt", source=hparams["separator_repo"], - savedir=separator_model, + savedir=savedir, save_filename="masknet.ckpt", ) fetch( - separator_model + "_hyperparams.yaml", + filename=separator_model + "_hyperparams.yaml", source=hparams["separator_repo"], - savedir=separator_model, + savedir=savedir, save_filename="hyperparams.yaml", ) separator_loaded = separator.from_hparams( - source=separator_model, - run_opts={"device": "cuda"}, - savedir=separator_model, + source=savedir, + run_opts={"device": run_opts["device"]}, + savedir=savedir, ) all_separators.append(separator_loaded) snrestimator.all_separators = all_separators - if not hparams["test_only"]: - # Training - snrestimator.fit( - snrestimator.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["dataloader_opts"], - valid_loader_kwargs=hparams["dataloader_opts"], - ) + # Training + snrestimator.fit( + snrestimator.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts"], + ) # Eval snrestimator.evaluate(test_data, min_key="error") diff --git a/recipes/REAL-M/sisnr-estimation/train_wham.py b/recipes/REAL-M/sisnr-estimation/train_wham.py new file mode 120000 index 0000000000..493d4a7ec8 --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/train_wham.py @@ -0,0 +1 @@ +../../WHAMandWHAMR/separation/train.py \ No newline at end of file diff --git a/recipes/REAL-M/sisnr-estimation/wham_room.py b/recipes/REAL-M/sisnr-estimation/wham_room.py new file mode 120000 index 0000000000..6676fa1ce5 --- /dev/null +++ b/recipes/REAL-M/sisnr-estimation/wham_room.py @@ -0,0 +1 @@ +../../WHAMandWHAMR/meta/wham_room.py \ No newline at end of file diff --git a/recipes/RescueSpeech/ASR/noise-robust/hparams/robust_asr_16k.yaml b/recipes/RescueSpeech/ASR/noise-robust/hparams/robust_asr_16k.yaml new file mode 100644 index 0000000000..475cffce26 --- /dev/null +++ b/recipes/RescueSpeech/ASR/noise-robust/hparams/robust_asr_16k.yaml @@ -0,0 +1,182 @@ +# Model: wav2vec2 + DNN + CTC +# Augmentation: SpecAugment +# Authors: Sangeet Sagar 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 8200 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/robust_asr/ +test_wer_file: !ref /wer_test.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest whisper model. +whisper_hub: !ref openai/whisper-large-v2 +whisper_folder: !ref /whisper_checkpoint +language: german + + +# Path to pre-trained models +pretrained_whisper_path: speechbrain/whisper_rescuespeech +pretrained_enhance_path: speechbrain/sepformer_rescuespeech + +epochs_before_lr_drop: 2 +unfreeze_epoch: !ref + 1 +frozen_models: [encoder, decoder, masknet, whisper] +unfrozen_models: [masknet, whisper] + +# Dataset prep parameters +data_folder: !PLACEHOLDER +train_tsv_file: !ref /train.tsv +dev_tsv_file: !ref /dev.tsv +test_tsv_file: !ref /test.tsv +accented_letters: True +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv +skip_prep: False + +# We remove utterance slonger than 10s in the train/dev/test sets as +# longer sentences certainly correspond to "open microphones". +avoid_if_longer_than: 10.0 + +## Model Parameters- Enhance model +dereverberate: False +save_audio: True +sample_rate: 16000 +enhance_sample_rate: 16000 +limit_training_signal_len: False +training_signal_len: 64000 +use_speedperturb: True +use_freq_domain: False +use_rand_shift: False +min_shift: -8000 +max_shift: 8000 + +######################## Training Parameters ####################################- ASR +number_of_epochs: 10 +lr_whisper: 0.00003 +sorting: ascending +precision: fp32 # bf16, fp16 or fp32 +asr_sample_rate: 16000 +ckpt_interval_minutes: 30 # save checkpoint every N min +checkpoint_avg: 5 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 6 per GPU to fit 16GB of VRAM +batch_size: 2 +test_batch_size: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +test_beam_size: 8 + +# Whisper model parameters +freeze_whisper: False +freeze_encoder_only: False +freeze_encoder: True + +train_loader_kwargs: + batch_size: !ref + +valid_loader_kwargs: + batch_size: !ref + +test_loader_kwargs: + batch_size: !ref + +# Loss weights +sepformer_weight: 0.1 +asr_weight: 1 + +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +enhance_model: !include:../models/sepformer.yaml + + +whisper: !new:speechbrain.integrations.huggingface.whisper.Whisper + source: !ref + freeze: !ref + save_path: !ref + encoder_only: !ref + freeze_encoder: !ref + language: !ref + task: transcribe + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +nll_loss: !name:speechbrain.nnet.losses.nll_loss + +whisper_opt_class: !name:torch.optim.AdamW + lr: !ref + weight_decay: 0.01 + +valid_greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SWhisperGreedySearcher + model: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + +test_beam_searcher: !new:speechbrain.decoders.seq2seq.S2SWhisperBeamSearcher + module: [!ref ] + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + +lr_annealing_whisper: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +# Enhance loss +enhance_loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper + +# Change the path to use a local model instead of the remote one +asr_pretrained: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + encoder: !ref + masknet: !ref + decoder: !ref + whisper: !ref + paths: + encoder: !ref /encoder.ckpt + decoder: !ref /decoder.ckpt + masknet: !ref /masknet.ckpt + whisper: !ref /whisper.ckpt + +modules: + encoder: !ref + masknet: !ref + decoder: !ref + whisper: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + encoder: !ref + decoder: !ref + masknet: !ref + whisper: !ref + scheduler_whisper: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/RescueSpeech/ASR/noise-robust/models/sepformer.yaml b/recipes/RescueSpeech/ASR/noise-robust/models/sepformer.yaml new file mode 100644 index 0000000000..dcb5174b09 --- /dev/null +++ b/recipes/RescueSpeech/ASR/noise-robust/models/sepformer.yaml @@ -0,0 +1,54 @@ +# ################################ +# Model: SepFormer for source separation +# Authors: Sangeet Sagar (2022) +# ################################ + +num_spks: 1 + +# Encoder parameters +N_encoder_out: 256 +out_channels: 256 +kernel_size: 16 +kernel_stride: 8 + +# Specifying the network +Encoder: !new:speechbrain.lobes.models.dual_path.Encoder + kernel_size: !ref + out_channels: !ref + +SBtfintra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock + num_layers: 8 + d_model: !ref + nhead: 8 + d_ffn: 1024 + dropout: 0 + use_positional_encoding: True + norm_before: True + +SBtfinter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock + num_layers: 8 + d_model: !ref + nhead: 8 + d_ffn: 1024 + dropout: 0 + use_positional_encoding: True + norm_before: True + +MaskNet: !new:speechbrain.lobes.models.dual_path.Dual_Path_Model + num_spks: !ref + in_channels: !ref + out_channels: !ref + num_layers: 2 + K: 250 + intra_model: !ref + inter_model: !ref + norm: ln + linear_layer_after_inter_intra: False + skip_around_intra: True + +Decoder: !new:speechbrain.lobes.models.dual_path.Decoder + in_channels: !ref + out_channels: 1 + kernel_size: !ref + stride: !ref + bias: False diff --git a/recipes/RescueSpeech/ASR/noise-robust/rescuespeech_prepare.py b/recipes/RescueSpeech/ASR/noise-robust/rescuespeech_prepare.py new file mode 120000 index 0000000000..fb96f6b168 --- /dev/null +++ b/recipes/RescueSpeech/ASR/noise-robust/rescuespeech_prepare.py @@ -0,0 +1 @@ +../../rescuespeech_prepare.py \ No newline at end of file diff --git a/recipes/RescueSpeech/ASR/noise-robust/train.py b/recipes/RescueSpeech/ASR/noise-robust/train.py new file mode 100644 index 0000000000..c2a65ab40b --- /dev/null +++ b/recipes/RescueSpeech/ASR/noise-robust/train.py @@ -0,0 +1,856 @@ +#!/usr/bin/env python3 +"""Recipe for noise robust speech recognition. It provides a simple +combination of a unfrozen speech enhancement model (SepFormer already +fine-tuned on noisy RescueSpeech) and a speech recognition model +(fine-tuned on clean RescueSpeech). The ASR employs Whisper encoder +-decoder to fine-tune on the NLL. + +The training is performed jointly allowing both enhancement and +ASR model to update its weight. + +This is an adaption from LibriSpeech Whisper recipe. + +To run this recipe, do the following: +> python train.py hparams/robust_asr_16k.yaml + +Authors + * Sangeet Sagar 2023 + * Adel Moumen 2022, 2024 + * Titouan Parcollet 2022 +""" + +import csv +import os +import sys + +import numpy as np +import torch +import torch.nn.functional as F +import torchaudio +from hyperpyyaml import load_hyperpyyaml +from pesq import pesq +from pystoi import stoi +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger +from speechbrain.utils.metric_stats import MetricStats + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.core.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + + batch = batch.to(self.device) + wavs, wav_lens = batch.clean_sig + bos_tokens, bos_tokens_lens = batch.tokens_bos + + predictions, clean = self.compute_forward_enhance(batch, stage) + + # Enhanced signal is to be fed into ASR + wavs = predictions[0] + + # We compute the padding mask and replace the values with the pad_token_id + # that the Whisper decoder expect to see. + abs_tokens_lens = (bos_tokens_lens * bos_tokens.shape[1]).long() + pad_mask = ( + torch.arange(abs_tokens_lens.max(), device=self.device)[None, :] + < abs_tokens_lens[:, None] + ) + bos_tokens[~pad_mask] = self.tokenizer.pad_token_id + + # Forward encoder + decoder + enc_out, logits, _ = self.modules.whisper(wavs, bos_tokens) + + log_probs = self.hparams.log_softmax(logits) + + hyps = None + if stage == sb.Stage.VALID: + hyps, _, _, _ = self.hparams.valid_greedy_searcher( + enc_out, wav_lens + ) + elif stage == sb.Stage.TEST: + hyps, _, _, _ = self.hparams.test_beam_searcher(enc_out, wav_lens) + + return predictions, clean, [log_probs, hyps, wav_lens] + + def compute_forward_enhance(self, batch, stage): + """Forward computations from the noisy to the separated signals.""" + noisy = batch.noisy_sig + clean = batch.clean_sig + noise = batch.noise_sig[0] + + # Unpack lists and put tensors in the right device + noisy, noisy_lens = noisy + noisy, noisy_lens = noisy.to(self.device), noisy_lens.to(self.device) + # Convert clean to tensor + clean = clean[0].unsqueeze(-1).to(self.device) + + # Add speech distortions + if stage == sb.Stage.TRAIN: + with torch.no_grad(): + if self.hparams.use_speedperturb or self.hparams.use_rand_shift: + noisy, clean = self.add_speed_perturb(clean, noisy_lens) + + # Reverb already added, not adding any reverb + clean_rev = clean + noisy = clean.sum(-1) + # if we reverberate, we set the clean to be reverberant + if not self.hparams.dereverberate: + clean = clean_rev + + noise = noise.to(self.device) + len_noise = noise.shape[1] + len_noisy = noisy.shape[1] + min_len = min(len_noise, len_noisy) + + # add the noise + noisy = noisy[:, :min_len] + noise[:, :min_len] + + # fix the length of clean also + clean = clean[:, :min_len, :] + + if self.hparams.limit_training_signal_len: + noisy, clean = self.cut_signals(noisy, clean) + + # Separation + if self.use_freq_domain: + noisy_w = self.compute_feats(noisy) + est_mask = self.modules.masknet(noisy_w) + + sep_h = noisy_w * est_mask + est_source = self.hparams.resynth(torch.expm1(sep_h), noisy) + else: + noisy_w = self.hparams.enhance_model["Encoder"](noisy) + est_mask = self.modules.masknet(noisy_w) + + sep_h = noisy_w * est_mask + est_source = self.hparams.enhance_model["Decoder"](sep_h[0]) + + # T changed after conv1d in encoder, fix it here + T_origin = noisy.size(1) + T_est = est_source.size(1) + est_source = est_source.squeeze(-1) + if T_origin > T_est: + est_source = F.pad(est_source, (0, T_origin - T_est)) + else: + est_source = est_source[:, :T_origin] + + return [est_source, sep_h], clean.squeeze(-1) + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss NLL given predictions and targets.""" + + (log_probs, hyps, wav_lens) = predictions + batch = batch.to(self.device) + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + + loss = self.hparams.nll_loss( + log_probs, tokens_eos, length=tokens_eos_lens + ) + + if stage != sb.Stage.TRAIN: + tokens, tokens_lens = batch.tokens + + # Decode token terms to words + predicted_words = [ + self.tokenizer.decode(t, skip_special_tokens=True).strip() + for t in hyps + ] + + # Convert indices to words + target_words = undo_padding(tokens, tokens_lens) + target_words = self.tokenizer.batch_decode( + target_words, skip_special_tokens=True + ) + + if hasattr(self.hparams, "normalized_transcripts"): + predicted_words = [ + self.tokenizer.normalize(text).split(" ") + for text in predicted_words + ] + + target_words = [ + self.tokenizer.normalize(text).split(" ") + for text in target_words + ] + else: + predicted_words = [text.split(" ") for text in predicted_words] + target_words = [text.split(" ") for text in target_words] + + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def compute_objectives_enhance(self, predictions, clean): + """Computes the si-snr loss""" + predicted_wavs, predicted_specs = predictions + + if self.use_freq_domain: + target_specs = self.compute_feats(clean) + loss = self.hparams.enhance_loss(target_specs, predicted_specs) + else: + loss = self.hparams.enhance_loss( + clean.unsqueeze(-1), predicted_wavs.unsqueeze(-1) + ) + return loss.mean() + + def fit_batch(self, batch): + """Train the parameters given a single batch in input""" + + predictions, clean, outputs = self.compute_forward( + batch, sb.Stage.TRAIN + ) + enhance_loss = ( + self.compute_objectives_enhance(predictions, clean) + * self.hparams.sepformer_weight + ) + loss = ( + self.compute_objectives(outputs, batch, sb.Stage.TRAIN) + * self.hparams.asr_weight + ) + loss = torch.add(enhance_loss, loss) + + if loss.requires_grad: + loss.backward() + + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.optimizer.step() + self.optimizer.zero_grad() + + return loss.detach() + + def evaluate_batch(self, batch, stage): + """Computations needed for validation/test batches""" + predictions, clean, outputs = self.compute_forward(batch, stage=stage) + + with torch.no_grad(): + enhance_loss = ( + self.compute_objectives_enhance(predictions, clean) + * self.hparams.sepformer_weight + ) + loss = ( + self.compute_objectives(outputs, batch, stage=stage) + * self.hparams.asr_weight + ) + loss = torch.add(enhance_loss, loss) + + if stage != sb.Stage.TRAIN: + self.pesq_metric.append( + ids=batch.id, predict=predictions[0].cpu(), target=clean.cpu() + ) + return loss.detach() + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + # Define function taking (prediction, target) for parallel eval + def pesq_eval(pred_wav, target_wav): + """Computes the PESQ evaluation metric""" + psq_mode = ( + "wb" if self.hparams.enhance_sample_rate == 16000 else "nb" + ) + try: + return pesq( + fs=self.hparams.enhance_sample_rate, + ref=target_wav.numpy(), + deg=pred_wav.numpy(), + mode=psq_mode, + ) + except Exception: + print("pesq encountered an error for this data item") + return 0 + + self.pesq_metric = MetricStats( + metric=pesq_eval, n_jobs=1, batch_eval=False + ) + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + # Freeze models before training + else: + for model in self.hparams.frozen_models: + if ( + hasattr(self.hparams, "unfreeze_epoch") + and epoch >= self.hparams.unfreeze_epoch + and ( + not hasattr(self.hparams, "unfrozen_models") + or model in self.hparams.unfrozen_models + ) + ): + self.modules[model].train() + for p in self.modules[model].parameters(): + p.requires_grad = True # Model's weight will be updated + else: + self.modules[model].eval() + for p in self.modules[model].parameters(): + p.requires_grad = False # Model is frozen + + def on_evaluate_start(self, max_key=None, min_key=None): + self.checkpointer.recover_if_possible(max_key=max_key, min_key=min_key) + checkpoints = self.checkpointer.find_checkpoints( + min_key=min_key, + max_key=max_key, + max_num_checkpoints=self.hparams.checkpoint_avg, + ) + for model in self.modules: + if ( + model not in self.hparams.frozen_models + or hasattr(self.hparams, "unfrozen_models") + and model in self.hparams.unfrozen_models + ): + model_state_dict = sb.utils.checkpoints.average_checkpoints( + checkpoints, model + ) + self.modules[model].load_state_dict(model_state_dict) + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["pesq"] = self.pesq_metric.summarize("average") + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr_whisper, new_lr_whisper = self.hparams.lr_annealing_whisper( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate( + self.optimizer, new_lr_whisper + ) + + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch, "lr_whisper": old_lr_whisper}, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + with open(self.hparams.test_wer_file, "w", encoding="utf-8") as w: + self.wer_metric.write_stats(w) + + def add_speed_perturb(self, clean, targ_lens): + """ + Adds speed perturbation and random_shift to the input signals + (Only for enhance Model) + """ + + min_len = -1 + recombine = False + + if self.hparams.use_speedperturb: + # Performing speed change (independently on each source) + new_clean = [] + recombine = True + + for i in range(clean.shape[-1]): + new_target = self.hparams.speed_perturb(clean[:, :, i]) + new_clean.append(new_target) + if i == 0: + min_len = new_target.shape[-1] + else: + if new_target.shape[-1] < min_len: + min_len = new_target.shape[-1] + + if self.hparams.use_rand_shift: + # Performing random_shift (independently on each source) + recombine = True + for i in range(clean.shape[-1]): + rand_shift = torch.randint( + self.hparams.min_shift, self.hparams.max_shift, (1,) + ) + new_clean[i] = new_clean[i].to(self.device) + new_clean[i] = torch.roll( + new_clean[i], shifts=(rand_shift[0],), dims=1 + ) + + # Re-combination + if recombine: + if self.hparams.use_speedperturb: + clean = torch.zeros( + clean.shape[0], + min_len, + clean.shape[-1], + device=clean.device, + dtype=torch.float, + ) + for i, new_target in enumerate(new_clean): + clean[:, :, i] = new_clean[i][:, 0:min_len] + + noisy = clean.sum(-1) + return noisy, clean + + def cut_signals(self, noisy, clean): + """ + This function selects a random segment of a given length within the noisy. + The corresponding clean are selected accordingly + (Only for enhance Model) + """ + randstart = torch.randint( + 0, + 1 + max(0, noisy.shape[1] - self.hparams.training_signal_len), + (1,), + ).item() + clean = clean[ + :, randstart : randstart + self.hparams.training_signal_len, : + ] + noisy = noisy[ + :, randstart : randstart + self.hparams.training_signal_len + ] + return noisy, clean + + def fix_sample_rate(self, wavs): + """ + Fix sample rate of all samples in a batch + """ + resampled_wavs = [] + for wav in wavs: + wav = wav.cpu() + resampled_wavs.append( + torchaudio.transforms.Resample( + self.hparams.enhance_sample_rate, + self.hparams.asr_sample_rate, + )(wav).to(self.device) + ) + return torch.stack(resampled_wavs, dim=-2) + + def save_audio(self, snt_id, noisy, clean, predictions, batch): + """ + saves the test audio (noisy, clean, and estimated sources) on disk + (Only for enhance Model) + """ + # Create output folder + f_name = batch.noisy_wav[0].split("/")[-1].replace(".wav", "") + save_path = os.path.join( + self.hparams.output_folder, "enhanced_wavs", f_name + ) + os.makedirs(save_path, exist_ok=True) + + # Estimated source + signal = predictions[0, :] + signal = signal / signal.abs().max() + save_file = os.path.join(save_path, "enhanced.wav") + audio_io.save( + save_file, + signal.unsqueeze(0).cpu(), + self.hparams.enhance_sample_rate, + ) + + # Original source + signal = clean[0, :] + signal = signal / signal.abs().max() + save_file = os.path.join(save_path, "clean.wav") + audio_io.save( + save_file, + signal.unsqueeze(0).cpu(), + self.hparams.enhance_sample_rate, + ) + + # noisy + signal = noisy[0][0, :] + signal = signal / signal.abs().max() + save_file = os.path.join(save_path, "noisy.wav") + audio_io.save( + save_file, + signal.unsqueeze(0).cpu(), + self.hparams.enhance_sample_rate, + ) + + def save_results(self, test_data): + """This script computes the SDR and SI-SNR metrics and saves + them into a csv file""" + # This package is required for SDR computation + from mir_eval.separation import bss_eval_sources + + # Create folders where to store audio + save_file = os.path.join(self.hparams.output_folder, "test_results.csv") + + count = 0 + # Variable init + all_sisnrs = [] + all_sisnrs_i = [] + all_pesqs = [] + all_stois = [] + all_sdrs = [] + all_sdrs_i = [] + csv_columns = [ + "snt_id", + "snt", + "sdr", + "sdr_i", + "si-snr", + "si-snr_i", + "pesq", + "stoi", + "csig", + "cbak", + "covl", + ] + + test_loader = sb.dataio.dataloader.make_dataloader(test_data) + + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: + writer = csv.DictWriter(results_csv, fieldnames=csv_columns) + writer.writeheader() + + # Loop over all test sentence + with tqdm(test_loader, dynamic_ncols=True) as t: + for i, batch in enumerate(t): + # Apply Enhancement + noisy, noisy_len = batch.noisy_sig + snt_id = batch.id + clean = batch.clean_sig + noisy_wav = ( + batch.noisy_wav[0].split("/")[-1].replace(".wav", "") + ) + + with torch.no_grad(): + predictions, clean = self.compute_forward_enhance( + batch, sb.Stage.TEST + ) + + # Write enhanced wavs for sanity check + if self.hparams.save_audio: + self.save_audio( + snt_id[0], + batch.noisy_sig, + clean, + predictions[0], + batch, + ) + + psq_mode = ( + "wb" + if self.hparams.enhance_sample_rate == 16000 + else "nb" + ) + + try: + # Compute SI-SNR + sisnr = self.compute_objectives_enhance( + predictions, clean + ) + + # Compute SI-SNR improvement + noisy_signal = noisy + + noisy_signal = noisy_signal.to(clean.device) + sisnr_baseline = self.compute_objectives_enhance( + [noisy_signal.squeeze(-1), None], clean + ) + sisnr_i = sisnr - sisnr_baseline + + # Compute SDR + sdr, _, _, _ = bss_eval_sources( + clean[0].t().cpu().numpy(), + predictions[0][0].t().detach().cpu().numpy(), + ) + + sdr_baseline, _, _, _ = bss_eval_sources( + clean[0].t().cpu().numpy(), + noisy_signal[0].t().detach().cpu().numpy(), + ) + + sdr_i = sdr.mean() - sdr_baseline.mean() + + # Compute PESQ + psq = pesq( + self.hparams.enhance_sample_rate, + clean.squeeze().cpu().numpy(), + predictions[0].squeeze().cpu().numpy(), + mode=psq_mode, + ) + # Compute STOI + stoi_score = stoi( + clean.squeeze().cpu().numpy(), + predictions[0].squeeze().cpu().numpy(), + fs_sig=self.hparams.enhance_sample_rate, + extended=False, + ) + + except Exception: + # this handles all sorts of error that may + # occur when evaluating a enhanced file. + count += 1 + continue + + # Saving on a csv file + row = { + "snt_id": snt_id[0], + "snt": noisy_wav, + "si-snr": -sisnr.item(), + "si-snr_i": -sisnr_i.item(), + "pesq": psq, + "stoi": stoi_score, + "sdr": sdr.mean(), + "sdr_i": sdr_i, + } + writer.writerow(row) + + # Metric Accumulation + all_sisnrs.append(-sisnr.item()) + all_sisnrs_i.append(-sisnr_i.item()) + all_pesqs.append(psq) + all_stois.append(stoi_score) + all_sdrs.append(sdr.mean()) + all_sdrs_i.append(sdr_i.mean()) + + row = { + "snt_id": "avg", + "snt": "avg", + "si-snr": np.array(all_sisnrs).mean(), + "si-snr_i": np.array(all_sisnrs_i).mean(), + "pesq": np.array(all_pesqs).mean(), + "stoi": np.array(all_stois).mean(), + "sdr": np.array(all_sdrs).mean(), + "sdr_i": np.array(all_sdrs_i).mean(), + } + writer.writerow(row) + + logger.info(f"Mean SISNR is {np.array(all_sisnrs).mean()}") + logger.info(f"Mean SISNRi is {np.array(all_sisnrs_i).mean()}") + logger.info(f"Mean PESQ {np.array(all_pesqs).mean()}") + logger.info(f"Mean STOI {np.array(all_stois).mean()}") + logger.info(f"Mean SDR is {np.array(all_sdrs).mean()}") + logger.info(f"Mean SDRi is {np.array(all_sdrs_i).mean()}") + logger.info(f"Total discarded files {count}") + + +# Define custom data procedure +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # 1. Define datasets + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_loader_kwargs"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_loader_kwargs"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + # We also sort the validation data so it is faster to validate + valid_data = valid_data.filtered_sorted(sort_key="duration") + + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, + ) + + # We also sort the validation data so it is faster to validate + test_data = test_data.filtered_sorted(sort_key="duration") + + datasets = [train_data, valid_data, test_data] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("clean_wav") + @sb.utils.data_pipeline.provides("clean_sig") + def audio_pipeline_clean(wav): + info = audio_io.info(wav) + clean_sig = sb.dataio.dataio.read_audio(wav) + clean_sig = torchaudio.transforms.Resample( + info.sample_rate, + hparams["enhance_sample_rate"], + )(clean_sig) + return clean_sig + + @sb.utils.data_pipeline.takes("noise_wav") + @sb.utils.data_pipeline.provides("noise_sig") + def audio_pipeline_noise(wav): + info = audio_io.info(wav) + noise_sig = sb.dataio.dataio.read_audio(wav) + noise_sig = torchaudio.transforms.Resample( + info.sample_rate, + hparams["enhance_sample_rate"], + )(noise_sig) + return noise_sig + + @sb.utils.data_pipeline.takes("noisy_wav") + @sb.utils.data_pipeline.provides("noisy_wav", "noisy_sig") + def audio_pipeline_noisy(wav): + info = audio_io.info(wav) + noisy_sig = sb.dataio.dataio.read_audio(wav) + noisy_sig = torchaudio.transforms.Resample( + info.sample_rate, + hparams["enhance_sample_rate"], + )(noisy_sig) + return wav, noisy_sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_clean) + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noise) + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noisy) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + if hasattr(hparams, "normalized_transcripts"): + wrd = tokenizer.normalize(wrd) + yield wrd + tokens_list = tokenizer.encode(wrd, add_special_tokens=False) + yield tokens_list + tokens_list = tokenizer.build_inputs_with_special_tokens(tokens_list) + tokens_bos = torch.LongTensor(tokens_list[:-1]) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list[1:]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + [ + "id", + "clean_sig", + "noise_sig", + "noisy_sig", + "tokens_list", + "tokens_bos", + "tokens_eos", + "tokens", + "noisy_wav", + ], + ) + + return train_data, valid_data, test_data + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # 1. # Dataset prep (parsing RescueSpeech dataset) + from rescuespeech_prepare import prepare_RescueSpeech # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_RescueSpeech, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "train_tsv_file": hparams["train_tsv_file"], + "dev_tsv_file": hparams["dev_tsv_file"], + "test_tsv_file": hparams["test_tsv_file"], + "accented_letters": hparams["accented_letters"], + "skip_prep": hparams["skip_prep"], + }, + ) + + # Defining tokenizer and loading it + tokenizer = hparams["whisper"].tokenizer + + # here we create the datasets objects as well as tokenization and encoding + train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer) + + # Load pre-trained models + pretrained = "asr_pretrained" + if pretrained in hparams: + run_on_main(hparams[pretrained].collect_files) + hparams[pretrained].load_collected() + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + opt_class=hparams["whisper_opt_class"], + ) + + # Adding objects to trainer. + asr_brain.tokenizer = tokenizer + + # determine if frequency domain enhancement or not + use_freq_domain = hparams.get("use_freq_domain", False) + asr_brain.use_freq_domain = use_freq_domain + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["train_loader_kwargs"], + valid_loader_kwargs=hparams["valid_loader_kwargs"], + ) + + # Testing + asr_brain.evaluate( + test_data, + min_key="WER", + test_loader_kwargs=hparams["test_loader_kwargs"], + ) + + # Eval + asr_brain.save_results(test_data) diff --git a/recipes/RescueSpeech/README.md b/recipes/RescueSpeech/README.md new file mode 100755 index 0000000000..57d1a4c5e9 --- /dev/null +++ b/recipes/RescueSpeech/README.md @@ -0,0 +1,116 @@ +# Noise robust speech recognition on RescueSpeech dataset +[RescuSpeech](https://doi.org/10.5281/zenodo.8077622) is a dataset specifically designed for performing noise robust speech recognition in the Search and Rescue domain. In this repository, we provide training recipes and pre-trained models for the best setup that have been developed and evaluated using RescuSpeech data. This aims to enhance the performance of speech recognizers in challenging and noisy environments. + +Our [paper](https://arxiv.org/abs/2306.04054) compares ASR models (CRDNN, Wav2vec2, WavLM, Whisper) and speech-enhancement systems (SepFormer). This recipe contains the best-performing model, which is based on a simple combination of a speech enhancement model (**SepFormer**) and an ASR (**Whisper**) model. The models are trained jointly and then combined to tackle noise interference. + +- Link to dataset: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8077622.svg)](https://doi.org/10.5281/zenodo.8077622) +- Language: German (DE) + + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +## How to run +``` +cd RescueSpeech/ASR/noise-robust +python train.py hparams/robust_asr_16k.yaml --data_folder= +``` +Here the data path should be the path to **uncompressed `Task_ASR.tar.gz`** downloaded from link above. + +# How to run on test sets only +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: + +```shell +cd RescueSpeech/ASR/noise-robust +python train.py hparams/robust_asr_16k.yaml --data_folder= --test_only +``` +## Computing power +Please note that running this recipe can be computationally demanding due to the Whisper ASR (`whisper-large-v2`) model with 906.5M parameters (compared to 1.5B parameters in the original model but feature encoder is frozen in our case). When fine-tuning both the Whisper and SepFormer models together, we used an Nvidia A100-80 GB GPU, which took approximately 15 minutes per epoch. + +## Results +During training, both speech enhancement and ASR is kept unfrozen- i.e. both ASR and enhance loss are backpropagated and weights are updated. + +| Model | SISNRi | SDRi | PESQ | STOI | *WER* | +|------ |--------|-------|-------|-------|---- | +| Whisper (`large-v2`)| 7.482 | 8.011 | 2.083 | 0.854 | **45.29** | + + +## Fine-tuned models +1. Firstly, the SepFormer enhancement model is trained on the Microsoft-DNS dataset. Subsequently, it undergoes fine-tuning with our RescueSpeech *enhancement* dataset (first row in the table below). +2. The Whisper ASR is fine-tuned on the RescueSpeech *ASR* dataset (second row in the table below). +3. Finally, the fine-tuned SepFormer and Whisper ASR models are jointly fine-tuned using our RescueSpeech *ASR* dataset. This represents the best model reported in the table above, with its pretrained models and logs accessible in the third row of the table below. + +|S. No. | Model | HuggingFace link | Full Model link | +|---|----------------|------------------------------------------------|------------------------------------------------| +| 1. | Whisper ASR | [HuggingFace](https://huggingface.co/speechbrain/whisper_rescuespeech) | [Dropbox](https://www.dropbox.com/sh/dgmgi0b3bfxlfo4/AAAo3EYPXUEMZRTdRDzhw4lea?dl=) | +| 2. | Sepformer Enhancement | [HuggingFace](https://huggingface.co/speechbrain/sepformer_rescuespeech) | [Dropbox](https://www.dropbox.com/sh/edrna82oarivkzl/AACsiGQXnbAYa_bfTJzjY23qa?dl=0) | +| 3. | Sepformer + Whisper ASR (fine-tuned) | [HuggingFace](https://huggingface.co/sangeet2020/noisy-whisper-resucespeech) | [Dropbox](https://www.dropbox.com/sh/kqs2ld14fm20cxl/AACiobSLdNtXhm-4Y3IIbTeia?dl=0) | + + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` + + +**Citing RescueSpeech** +- Dataset +```bibtex +@misc{sagar_sangeet_2023_8077622, + author = {Sagar, Sangeet and + Kiefer, Bernd and + Kruijff Korbayova, Ivana}, + title = {{RescueSpeech: A German Corpus for Speech + Recognition in Search and Rescue Domain}}, + month = jun, + year = 2023, + note = {{Our work was supported under the project "A-DRZ: + Setting up the German Rescue Robotics Center" and + funded by the German Ministry of Education and + Research (BMBF), grant No. I3N14856.}}, + publisher = {Zenodo}, + doi = {10.5281/zenodo.8077622}, + url = {https://doi.org/10.5281/zenodo.8077622} +} +``` +- Paper +```bibtex +@misc{sagar2023rescuespeech, + title={RescueSpeech: A German Corpus for Speech Recognition in Search and Rescue Domain}, + author={Sangeet Sagar and Mirco Ravanelli and Bernd Kiefer and Ivana Kruijff Korbayova and Josef van Genabith}, + year={2023}, + eprint={2306.04054}, + archivePrefix={arXiv}, + primaryClass={eess.AS} +} +``` diff --git a/recipes/RescueSpeech/dataset.md b/recipes/RescueSpeech/dataset.md new file mode 100644 index 0000000000..d46a35beb1 --- /dev/null +++ b/recipes/RescueSpeech/dataset.md @@ -0,0 +1,113 @@ +Dear User, + +We are thrilled to introduce our latest release - the **RescueSpeech** audio dataset, comprising authentic German speech recordings obtained from simulated search and rescue (SAR) exercises. The dataset contains manually annotated recordings from native German speakers, which were initially captured at 44.1 kHz and later down-sampled to 16 kHz to obtain a set of mono-speaker-single channel audio recordings. In order to protect the identity of the speakers, their names have been anonymized. + +The RescueSpeech dataset is divided into two sets, each designed for different tasks: Automatic Speech Recognition (ASR) and Speech Enhancement. + +1. `Task_ASR.tar.gz`: For the ASR task, the dataset spans a duration of 1 hour and 36 minutes. It comprises a collection of clean-noisy pairs, where the noisy utterances are created by introducing contaminations from five different noise types sourced from the AudioSet dataset. These noise types include emergency vehicle siren, breathing, engine, chopper, and static radio noise. To match the 2412 clean utterances in the dataset, we have synthesized an equal number of corresponding noisy utterances. Additionally, we have provided the noise waveform files used to create the noisy utterances, ensuring transparency and reproducibility in the research community. + +2. `Task_enhancement.tar.gz`: The Speech Enhancement task dataset is larger in size compared to the ASR dataset. The primary objective of this dataset is to facilitate the fine-tuning of speech enhancement models, particularly for the five SAR noise types mentioned earlier: emergency vehicle siren, breathing, engine, chopper, and static radio noise. Given the limited duration of clean audio available (1 hour and 36 minutes), we have synthesized multiple noisy utterances with varying noise types and signal-to-noise ratio (SNR) levels, all derived from a single clean utterance. This augmentation approach allows us to generate a more extensive dataset for speech enhancement purposes while preserving the original speaker distribution. + +By providing these diverse datasets, we aim to support advancements in ASR and Speech Enhancement research, enabling the development and evaluation of robust systems that can handle real-world scenarios encountered during search and rescue operations. + +## Main contact person +------------------ +For any inquiries related to the dataset, please reach out to +Bernd Kiefer: bernd.kiefer@dfki.de + +Other contact people +-------------------- +- Ivana Kruijff‑Korbayová: ivana.kruijff@rettungsrobotik.de +- Sangeet Sagar: sangeetsagar2020@gmail.com + +## Task: ASR- Dataset details +--------------- +- Total number of recordings: 2412 +- Total duration: 1:36:10 +- Number of speakers: 26 +- Number of recordings where speaker is undetermined (indicated with ?): 38 +- Average length of dataset: 2.39 sec +- Longest duration: 15 sec +- Shortest duration: 0.28 sec + +To obtain a train/test/dev set, we perform a stratified sampling technique that ensures that the valid/test set contains a representative sample of speakers from the overall population. We first identify a set of unique speakers in the dataset and then randomly sample a subset of those speakers to be included in the test/dev set. The remaining speakers are assigned to the train set. + +Train Split +----------- +- Total number of files: 1591 +- Total duration: 61.86 mins +- Total number of speakers: 17 +- Speakers involved: spk01, spk02, spk05, spk07, spk08, spk09, spk10, spk11, spk12, spk13, spk16, spk19, spk20, spk21, spk22, spk23, spk25 + +Test Split +----------- +- Total number of files: 576 +- Total duration: 24.68 mins +- Total number of speakers: 5 +- Speakers involved- spk03, spk06, spk15, spk24, ? + +Dev Split +----------- +- Total number of files: 245 +- Total duration: 9.61 mins +- Total number of speakers: 4 +- Speakers involved- spk04, spk14, spk17, spk18 + + +This table represents the number of recordings in each of the three sets (train, test, and dev) for each speaker ID. The speaker IDs are listed in the first column, while the number of recordings for each speaker in each set is listed in the corresponding column. + + +| Speaker ID | train.tsv | test.tsv | dev.tsv | **Total** | +|:-----------|:----------|:---------|:--------|:------| +| ? | 0 | 38 | 0 | 38 | +| spk01 | 211 | 0 | 0 | 211 | +| spk02 | 502 | 0 | 0 | 502 | +| spk03 | 0 | 344 | 0 | 344 | +| spk04 | 0 | 0 | 204 | 204 | +| spk05 | 266 | 0 | 0 | 266 | +| spk06 | 0 | 164 | 0 | 164 | +| spk07 | 257 | 0 | 0 | 257 | +| spk08 | 25 | 0 | 0 | 25 | +| spk09 | 48 | 0 | 0 | 48 | +| spk10 | 24 | 0 | 0 | 24 | +| spk11 | 27 | 0 | 0 | 27 | +| spk12 | 7 | 0 | 0 | 7 | +| spk13 | 7 | 0 | 0 | 7 | +| spk14 | 0 | 0 | 12 | 12 | +| spk15 | 0 | 15 | 0 | 15 | +| spk16 | 8 | 0 | 0 | 8 | +| spk17 | 0 | 0 | 4 | 4 | +| spk18 | 0 | 0 | 25 | 25 | +| spk19 | 7 | 0 | 0 | 7 | +| spk20 | 37 | 0 | 0 | 37 | +| spk21 | 102 | 0 | 0 | 102 | +| spk22 | 13 | 0 | 0 | 13 | +| spk23 | 49 | 0 | 0 | 49 | +| spk24 | 0 | 15 | 0 | 15 | +| spk25 | 1 | 0 | 0 | 1 | +| **Totals** | 1591 | 576 | 245 | 2167 | + +** ? indicates undetermined speakers. + +## Task: Speech enhancement- Dataset details +--------------- +- Noises used: + - Static and radio noise + - Emergency vehicle and siren noise + - Engine + - Chopper + - Breathing + +| Set | # wav files | Length | +|:----------|:------------------|:--------------| +|Train | 4501 | 7.2 HRS | +|Valid | 1351 | 130 mins | +|Test | 1351 | 130 mins | + + +Thank You + + +## Acknowledgment +--------------- +This work was supported under the project A-DRZ: Setting up the German Rescue Robotics Center and funded by the German Ministry of Education and Research (BMBF), grant No. I3N14856. diff --git a/recipes/RescueSpeech/extra_requirements.txt b/recipes/RescueSpeech/extra_requirements.txt new file mode 100644 index 0000000000..c152ac2cb2 --- /dev/null +++ b/recipes/RescueSpeech/extra_requirements.txt @@ -0,0 +1,4 @@ +mir_eval +pesq +pystoi + diff --git a/recipes/RescueSpeech/rescuespeech_prepare.py b/recipes/RescueSpeech/rescuespeech_prepare.py new file mode 100755 index 0000000000..0eb44a602b --- /dev/null +++ b/recipes/RescueSpeech/rescuespeech_prepare.py @@ -0,0 +1,629 @@ +""" +Data preparation script for RescueSpeech dataset. This +script prepares CSV files for ASR and Speech Enhancement. +In the generated CSV files the column- + +`clean_noisy_mix` : alternates between the paths to the clean and +noisy speech recordings in the same order as they appear in the dataset. + +By using this script, you can easily prepare the necessary CSV files +for training and evaluating ASR models on the RescueSpeech dataset. + + +Author +------ +Sangeet Sagar 2023 +(while some functions have been +adapted from the CommonVoice recipe) +""" + +import csv +import glob +import os +import re +import unicodedata + +from tqdm import tqdm +from tqdm.contrib import tzip + +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +def prepare_RescueSpeech( + data_folder, + save_folder, + train_tsv_file=None, + dev_tsv_file=None, + test_tsv_file=None, + accented_letters=False, + skip_prep=False, + sample_rate=16000, + task="asr", +): + """ + Prepares the csv files for RescueSpeech audio data. + + Arguments + --------- + data_folder : str + Path to the folder where the original dataset is stored. + save_folder : str + The directory where to store the csv files. + train_tsv_file : str, optional + Path to the Train RescueSpeech .tsv file (cs) + dev_tsv_file : str, optional + Path to the Dev RescueSpeech .tsv file (cs) + test_tsv_file : str, optional + Path to the Test RescueSpeech .tsv file (cs) + accented_letters : bool, optional + Defines if accented letters will be kept as individual letters or + transformed to the closest non-accented letters. + skip_prep: bool + If True, skip data preparation. + sample_rate: int, optional + Sample rate of the wav files. + task: str, optional + States the task for which data prepration is being done. + It can either be 'asr' or 'enhance' + + Returns + ------- + None + """ + + if skip_prep: + return + + # If not specified point toward standard location + if train_tsv_file is None: + train_tsv_file = data_folder + "/train.tsv" + else: + train_tsv_file = train_tsv_file + + if dev_tsv_file is None: + dev_tsv_file = data_folder + "/dev.tsv" + else: + dev_tsv_file = dev_tsv_file + + if test_tsv_file is None: + test_tsv_file = data_folder + "/test.tsv" + else: + test_tsv_file = test_tsv_file + + # Setting the save folder + if not os.path.exists(save_folder): + os.makedirs(save_folder) + + # Setting output files + save_csv_train = save_folder + "/train.csv" + save_csv_dev = save_folder + "/dev.csv" + save_csv_test = save_folder + "/test.csv" + + # If csv already exists, we skip the data preparation + if skip(save_csv_train, save_csv_dev, save_csv_test): + msg = "%s already exists, skipping data preparation!" % (save_csv_train) + logger.info(msg) + + msg = "%s already exists, skipping data preparation!" % (save_csv_dev) + logger.info(msg) + + msg = "%s already exists, skipping data preparation!" % (save_csv_test) + logger.info(msg) + + return + + # Additional checks to make sure the data folder contains RescueSpeech data. + check_RescueSpeech_data_folders(data_folder) + + # Creating csv files for {train, dev, test} data + file_pairs = zip( + [train_tsv_file, dev_tsv_file, test_tsv_file], + [save_csv_train, save_csv_dev, save_csv_test], + ) + if task == "asr": + for tsv_file, save_csv in file_pairs: + # Prepare CSV files + create_asr_csv(tsv_file, save_csv, data_folder, accented_letters) + elif task == "enhance": + create_enhance_csv(data_folder, save_csv_train, "train", sample_rate) + create_enhance_csv(data_folder, save_csv_dev, "valid", sample_rate) + create_enhance_csv(data_folder, save_csv_test, "test", sample_rate) + + +def skip(save_csv_train, save_csv_dev, save_csv_test): + """ + Detects if the RescueSpeech data preparation has been already done. + + If the preparation has been done, we can skip it. + + Arguments + --------- + save_csv_train : str + Path to train csv + save_csv_dev : str + Path to dev csv + save_csv_test : str + Path to test csv + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + + # Checking folders and save options + skip = False + + if ( + os.path.isfile(save_csv_train) + and os.path.isfile(save_csv_dev) + and os.path.isfile(save_csv_test) + ): + skip = True + + return skip + + +def create_asr_csv( + orig_tsv_file, csv_file, data_folder, accented_letters=False +): + """ + Creates the csv file given a list of wav files. + + Arguments + --------- + orig_tsv_file : str + Path to the RescueSpeech tsv file (standard file). + csv_file: str + Path to csv file that will be saved. + data_folder : str + Path of the RescueSpeech domain dataset (clean, noisy, noise). + accented_letters : bool, optional + Defines if accented letters will be kept as individual letters or + transformed to the closest non-accented letters. + """ + + # Check if the given files exists + if not os.path.isfile(orig_tsv_file): + msg = "\t%s doesn't exist, verify your dataset!" % (orig_tsv_file) + logger.info(msg) + raise FileNotFoundError(msg) + + # We load and skip the header + loaded_csv = open(orig_tsv_file, encoding="utf-8").readlines()[1:] + nb_samples = str(len(loaded_csv)) + + msg = "Preparing CSV files for %s samples ..." % (str(nb_samples)) + logger.info(msg) + + # Adding some Prints + msg = "Creating csv lists in %s ..." % (csv_file) + logger.info(msg) + + csv_lines = [ + [ + "ID", + "duration", + "clean_wav", + "noisy_wav", + "clean_noisy_mix", + "noise_wav", + "noise_type", + "snr_level", + "spk_id", + "wrd", + ] + ] + + # Noise types + noise_types = [ + "Breathing-noise", + "Emergency-vehicle-and-siren-noise", + "Engine-noise", + "Chopper-noise", + "Static-radio-noise", + ] + + idx = 0 + + # Start processing lines + total_duration = 0.0 + for line in tzip(loaded_csv): + line = line[0] + + clean_data_fp = os.path.join(data_folder, "audio_files/clean") + noisy_data_fp = os.path.join(data_folder, "audio_files/noisy") + noise_data_fp = os.path.join(data_folder, "audio_files/noise") + + clean_fp = os.path.join(clean_data_fp, line.split("\t")[1]) + file_name = ".".join(clean_fp.split(".")).split("/")[-1] + spk_id = line.split("\t")[0] + snt_id = file_name + + # Retrieve the corresponding noisy file from noisy data file path + clean_wav_bname = os.path.splitext(file_name)[0] + "_" + noisy_file = [ + filename + for filename in os.listdir(noisy_data_fp) + if filename.startswith(clean_wav_bname) + ] + + noisy_file = noisy_file[0] + noisy_fp = os.path.join(noisy_data_fp, noisy_file) + + # alternate between clean and noisy wav + idx += 1 + if idx % 2 == 0: + clean_noisy_mix = clean_fp + else: + clean_noisy_mix = noisy_fp + + # Get corresponding noise file + fields = os.path.splitext(noisy_file)[0].split("_") + fileid = fields[fields.index("fileid") + 1] + noise_file = "noise_fileid_" + str(fileid) + ".wav" + # clean_file = "clean_fileid_" + str(fileid) + ".wav" + noise_fp = os.path.join(noise_data_fp, noise_file) + + # Get noise type + for item in noise_types: + if item in noisy_file: + noise_type = item + break + + # Get SNR level + for item in fields: + if "snr" in item: + snr_level = item.replace("snr", "") + break + + # Reading the signal (to retrieve duration in seconds) + if os.path.isfile(clean_fp): + info = audio_io.info(clean_fp) + info_noisy = audio_io.info(noisy_fp) + else: + msg = "\tError loading: %s" % (str(len(file_name))) + logger.info(msg) + idx += 1 + continue + + duration = info.num_frames / info.sample_rate + + # Do some sanity check duration of clean, and noisy must be same + duration_noisy = info_noisy.num_frames / info_noisy.sample_rate + if round(duration, 3) != round(duration_noisy, 3): + print("Length mismatch detected") + + total_duration += duration + + # Getting transcript + words = line.split("\t")[2] + + # Unicode Normalization + words = unicode_normalisation(words) + + # Perform data cleaning + words = data_cleaning(words) + + # Remove accents if specified + if not accented_letters: + words = strip_accents(words) + words = words.replace("'", " ") + words = words.replace("’", " ") + + # Remove multiple spaces + words = re.sub(" +", " ", words) + + # Remove spaces at the beginning and the end of the sentence + words = words.lstrip().rstrip() + + # Getting chars + chars = words.replace(" ", "_") + chars = " ".join([char for char in chars][:]) + + # Remove too short sentences (or empty): + if len(words.split(" ")) < 3: + idx += 1 + continue + + # Composition of the csv_line + csv_line = [ + snt_id, + str(duration), + clean_fp, + noisy_fp, + clean_noisy_mix, + noise_fp, + noise_type, + str(snr_level), + spk_id, + str(words), + ] + + # Adding this line to the csv_lines list + csv_lines.append(csv_line) + + # Writing the csv lines + with open(csv_file, mode="w", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + for line in csv_lines: + csv_writer.writerow(line) + + # Final prints + msg = "%s successfully created!" % (csv_file) + logger.info(msg) + msg = "Number of samples: %s " % (str(len(loaded_csv))) + logger.info(msg) + msg = "Total duration: %s Hours" % (str(round(total_duration / 3600, 2))) + logger.info(msg) + + +def create_enhance_csv(data_folder, csv_file, split, fs=16000): + """ + Create CSV files for train, valid and test set. + + Arguments + --------- + data_folder : str + Path to synthesized RescuSpeech data for task enhancement + csv_file : str + Save csv_file path for prepared data. + split : str + CSV prepration for train/valid/test + fs : int + Sampling rate. Defaults to 16000. + """ + + clean_fullpaths = [] + noise_fullpaths = [] + noisy_fullpaths = [] + language = [] + lang = "de" + + clean_f1_path = extract_files( + os.path.join(data_folder, split), type="clean" + ) + noise_f1_path = extract_files( + os.path.join(data_folder, split), type="noise" + ) + noisy_f1_path = extract_files( + os.path.join(data_folder, split), type="noisy" + ) + + language.extend([lang] * len(clean_f1_path)) + clean_fullpaths.extend(clean_f1_path) + noise_fullpaths.extend(noise_f1_path) + noisy_fullpaths.extend(noisy_f1_path) + + # Write CSV for train and dev + msg = "Writing " + split + " csv files" + logger.info(msg) + write2csv( + language, + clean_fullpaths, + noise_fullpaths, + noisy_fullpaths, + csv_file, + fs, + ) + + +def write2csv( + language, + clean_fullpaths, + noise_fullpaths, + noisy_fullpaths, + csv_file, + fs=16000, +): + """ + Write data to CSV file in an appropriate format. + + Arguments + --------- + language : str + Language of audio file + clean_fullpaths : str + Path to clean audio files of a split in the train/valid-set + noise_fullpaths : str + Path to noise audio files of a split in the train/valid-set + noisy_fullpaths : str + Path to noisy audio files of a split in the train/valid-set + csv_file : str + Save csv_file path for prepared data. + fs : int + Sampling rate. Defaults to 16000. + """ + csv_columns = [ + "ID", + "language", + "duration", + "clean_wav", + "clean_wav_format", + "clean_wav_opts", + "noise_wav", + "noise_wav_format", + "noise_wav_opts", + "noisy_wav", + "noisy_wav_format", + "noisy_wav_opts", + ] + + # Retrieve duration of just one signal. It is assumed + # that all files have the same duration in MS-DNS dataset. + + total_duration = 0 + with open(csv_file, "w", encoding="utf-8") as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=csv_columns) + writer.writeheader() + + for i, (lang, clean_fp, noise_fp, noisy_fp) in enumerate( + tqdm( + zip(language, clean_fullpaths, noise_fullpaths, noisy_fullpaths) + ) + ): + signal = read_audio(clean_fp) + duration = signal.shape[0] / fs + total_duration += duration + + row = { + "ID": i, + "language": lang, + "duration": duration, + "clean_wav": clean_fp, + "clean_wav_format": "wav", + "clean_wav_opts": None, + "noise_wav": noise_fp, + "noise_wav_format": "wav", + "noise_wav_opts": None, + "noisy_wav": noisy_fp, + "noisy_wav_format": "wav", + "noisy_wav_opts": None, + } + writer.writerow(row) + + # Final prints + msg = "%s successfully created!" % (csv_file) + logger.info(msg) + msg = "Number of samples: %s " % (str(len(clean_fullpaths))) + logger.info(msg) + msg = "Total duration: %s Hours" % (str(round(total_duration / 3600, 2))) + logger.info(msg) + + +def check_RescueSpeech_data_folders(data_folder): + """ + Check if the data folder actually contains the RescueSpeech dataset. + + If not, raises an error. + + Arguments + --------- + data_folder : str + Path to folder containing data. + + Raises + ------ + FileNotFoundError + If data folder doesn't contain RescueSpeech dataset. + """ + + # Checking clips + if not os.path.exists(data_folder): + err_msg = ( + "the folder %s does not exist (it is expected in " + "the RescueSpeech dataset)" % (data_folder) + ) + raise FileNotFoundError(err_msg) + + +def unicode_normalisation(text): + """ + Normalizes the Unicode representation of a given text. + + Arguments + --------- + text : str + The text to be normalized. + + Returns + ------- + str + The normalized text. + """ + try: + text = unicode(text, "utf-8") + except NameError: # unicode is a default on python 3 + pass + return str(text) + + +def data_cleaning(words): + """ + Perform data cleaning + + Arguments + --------- + words : str + Text that needs to be cleaned + + Returns + ------- + str + Cleaned data + + """ + + # this replacement helps preserve the case of ß + # (and helps retain solitary occurrences of SS) + # since python's upper() converts ß to SS. + words = words.replace("ß", "0000ß0000") + words = re.sub("[^’'A-Za-z0-9öÖäÄüÜß]+", " ", words).upper() + words = words.replace("'", " ") + words = words.replace("’", " ") + words = words.replace( + "0000SS0000", "ß" + ) # replace 0000SS0000 back to ß as its initial presence in the corpus + return words + + +def strip_accents(text): + """ + Strips accents from a given text string. + + Arguments + --------- + text : str + The text from which accents are to be stripped. + + Returns + ------- + str + The text with accents stripped. + """ + + text = ( + unicodedata.normalize("NFD", text) + .encode("ascii", "ignore") + .decode("utf-8") + ) + + return str(text) + + +def extract_files(datapath, type=None): + """ + Given a dir-path, it extracts full path of all wav files + and sorts them. + + Arguments + --------- + datapath : str + Path to synthesized SAR data + type : str + Type of split: clean, noisy, noise. + + Returns + ------- + list + Sorted list of all wav files found in the given path. + """ + if type: + path = os.path.join(datapath, type) + files = glob.glob(path + "/*.wav") + + # Sort all files based on the suffixed file_id (ascending order) + files.sort(key=lambda f: int(f.split("fileid_")[-1].strip(".wav"))) + else: + # Sort all files by name + files = sorted(glob.glob(datapath + "/*.wav")) + + return files diff --git a/recipes/SEP-28k/stuttering-detection/README.md b/recipes/SEP-28k/stuttering-detection/README.md new file mode 100644 index 0000000000..b23bb8f508 --- /dev/null +++ b/recipes/SEP-28k/stuttering-detection/README.md @@ -0,0 +1,52 @@ +# Stuttering Event Detection experiments with SEP-28k (speech only) +This folder contains scripts for running stuttering event detection experiments with the SEP-28k dataset (https://github.com/apple/ml-stuttering-events-dataset). The partitioning follows the suggestion of SEP-28k-E (https://github.com/th-nuernberg/ml-stuttering-events-dataset-extended). + +# Training +Run the following command to train the model: +`python train.py hparams/train.yaml` + +Note that this is a minimal working example. The model and training parameters should be modified accordingly. + +# Note on Data Preparation + +Our `sep28k_prepare.py` will: +1. Download the dataset (including deleted podcasts impossible to download from the original script). +2. Prepare train/valid/test with partitioning suggested by https://github.com/th-nuernberg/ml-stuttering-events-dataset-extended. By default, it follows the "SEP-28k-E" partitioning. + +# **About SEP-28k and SEP-28k-E** + +```bibtex +@misc{lea:2021, + author = {Colin Lea AND Vikramjit Mitra AND Aparna Joshi AND Sachin Kajarekar AND Jeffrey P. Bigham}, + title = {{SEP-28k}: A Dataset for Stuttering Event Detection from Podcasts with People Who Stutter}, + howpublished = {ICASSP 2021}, +} +``` +```bibtex +@incollection{bayerl_sep28k_E_2022, + title = {The {Influence} of {Dataset-Partitioning} on {Dysfluency} {Detection} {Systems}}, + booktitle = {Text, {Speech}, and {Dialogue}}, + author = {Bayerl, Sebastian P. and Wagner, Dominik and Bocklet, Tobias and Riedhammer, Korbinian}, + year = {2022}, +} +``` + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` + diff --git a/recipes/SEP-28k/stuttering-detection/extra_requirements.txt b/recipes/SEP-28k/stuttering-detection/extra_requirements.txt new file mode 100644 index 0000000000..4c3ace6692 --- /dev/null +++ b/recipes/SEP-28k/stuttering-detection/extra_requirements.txt @@ -0,0 +1,4 @@ +# Needed only for quantization +scikit-learn +# Needed only with use_tensorboard=True +tensorboard diff --git a/recipes/SEP-28k/stuttering-detection/hparams/train.yaml b/recipes/SEP-28k/stuttering-detection/hparams/train.yaml new file mode 100644 index 0000000000..3ee097c39d --- /dev/null +++ b/recipes/SEP-28k/stuttering-detection/hparams/train.yaml @@ -0,0 +1,108 @@ +seed: 0 +#---------------------------------- Misc -------------------------------------- +__set_seed: !apply:torch.manual_seed [!ref ] +output_folder: !ref results/ + +counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +modules: + model: !ref + +save_folder: !ref /save +train_log: !ref /train_log.txt +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref +use_tensorboard: False +skip_prep: False + +data_folder: !PLACEHOLDER +manifest_folder: !PLACEHOLDER +split_type: SEP28k-E +train_csv: !ref /_train.csv +valid_csv: !ref /_valid.csv +test_csv: !ref /_test.csv + +hpopt_mode: orion +hpopt: hpopt.yaml +ckpt_enable: True +batch_size: 256 +number_of_epochs: 5 +#---------------------------------- Classes ----------------------------------- +# Choose the classes to consider in the training. The minimal example handles +# only a binary classification, even if multiple classes are selected. +# TODO: Handle multi-class classification +Prolongation: True +Block: True +SoundRep: True +WordRep: True +Interjection: True +#----------------------------------- Feats ------------------------------------ +# Here, it is planned to fill with elements related to the dataset, +#"remove_unsure", will remove samples that don't match the pre-requisite of +# having at least a value of "annot_value" in the 5 stuttering class (+fluent) +annot_value: 2 +remove_unsure: False +#----------------------------------- Loss ------------------------------------- +#The parameter positive is used as a weight for the bce loss. The value should +#be equal to "number of negative examples/number of positive examples". +# This depends on the distribution of the class. +positive: 1 +#----------------------------------- Model ------------------------------------ +# The proposed model here is a simple example based on Whisper and a +# classification layer. +dropout: 0.2 +size_i: 1024 +size_h: 256 +backbone: !new:speechbrain.lobes.models.huggingface_transformers.whisper.Whisper + source: openai/whisper-base.en + encoder_only: True + freeze: False + freeze_encoder: True + save_path: !ref + +layer1: !new:torch.nn.Linear + in_features: !ref + out_features: !ref + +layer2: !new:torch.nn.Linear + in_features: !ref + out_features: 1 + +bn1: !new:speechbrain.nnet.normalization.BatchNorm1d + input_size: !ref + +bn2: !new:speechbrain.nnet.normalization.BatchNorm1d + input_size: !ref + +do: !new:torch.nn.Dropout + p: !ref + +model: !new:torch.nn.Sequential + - !ref + - !new:speechbrain.nnet.pooling.StatisticsPooling + - !new:torch.nn.Flatten + - !ref + - !ref + - !ref + - !new:torch.nn.LeakyReLU + - !ref + - !ref + - !ref + + +#-------------------------------- Scheduling ---------------------------------- +dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: 2 + +learning_rate: 0.00004 +opt_class: !name:torch.optim.AdamW + lr: !ref diff --git a/recipes/SEP-28k/stuttering-detection/sep28k_prepare.py b/recipes/SEP-28k/stuttering-detection/sep28k_prepare.py new file mode 100644 index 0000000000..2faeddfe4c --- /dev/null +++ b/recipes/SEP-28k/stuttering-detection/sep28k_prepare.py @@ -0,0 +1,111 @@ +""" +Creates data manifest files for SEP-28k +If the data does not exist, we download the data automatically. + +Authors: + * Ilias Maoudj 2024 + +Adapted from the ESC50 and Urbansound8k recipe. +""" + +import logging +import os +import shutil + +import pandas as pd +import requests +from tqdm import tqdm + +logger = logging.getLogger(__name__) + + +def download_dropbox(url, podcast): + headers = {"user-agent": "Wget/1.16 (linux-gnu)"} + r = requests.get(url, stream=True, headers=headers) + with open(podcast, "wb") as f: + for chunk in tqdm(r.iter_content(chunk_size=1024)): + if chunk: + f.write(chunk) + + +def download_sep28k(data_path): + """ + This function automatically downloads the SEP-28k dataset to the specified data path in the data_path variable + + Arguments + --------- + data_path: str or Path + Directory used to save the dataset. + """ + temp_path = os.path.join(data_path, "temp_download") + if not os.path.exists(temp_path): + os.mkdir(temp_path) + if not os.path.exists(os.path.join(data_path, "SEP28k-data.zip")): + logger.info( + "SEP-28k is missing. We are now downloading it. Be patient, the total size is 1.9GB. Takes 1,995,736 iterations." + ) + logger.info("**** NOW DOWNLOADING zip file *******") + download_dropbox( + "https://www.dropbox.com/scl/fi/rpavffri0odb2g25bxy58/sep28k_clips.zip?rlkey=zfxpdrek642pxu0rj64qid7gh&st=xum8yxm1&dl=0", + f"{temp_path}/SEP28k-data.zip", + ) + if not os.path.exists( + os.path.join(data_path, "SEP-28k-Extended_clips.csv") + ): + logger.info("**** NOW DOWNLOADING csv file *******") + download_dropbox( + "https://www.dropbox.com/scl/fi/amzp62bpj8zqo21kpmoqy/SEP-28k-Extended_ \ + clips.csv?rlkey=5ehd8wv1q2gyz32m2pyynlcn2&st=cb76g1r4&dl=0", + f"{temp_path}/SEP-28k-Extended_clips.csv", + ) + files = os.listdir(temp_path) + for fl in files: + shutil.move(os.path.join(temp_path, fl), data_path) + shutil.rmtree(os.path.join(temp_path)) + if not os.path.exists(os.path.join(data_path, "SEP28k-data")): + shutil.unpack_archive( + os.path.join(data_path, "SEP28k-data.zip"), data_path + ) + logger.info(f"SEP-28k is downloaded in {data_path}") + + +def prepare_sep28k( + data_folder, manifest_folder, split_type="SEP28k-E", skip_prep=False +): + """ + Prepares the csv files for SEP-28k audio data. + + Arguments + --------- + data_folder: str + Where to save the dataset + manifest_folder: str + Where to save the manifest files + split_type: str + Which partitioning to use (can be either SEP12k, SEP28k-E [default], SEP28k-T, SEP28k-D) + skip_prep: bool + Whether to build the train/valid/test files or not + + Returns + ------- + None + """ + if skip_prep: + return + if not os.path.exists(data_folder): + os.mkdir(data_folder) + if not os.path.exists(manifest_folder): + os.mkdir(manifest_folder) + download_sep28k(data_folder) + + df = pd.read_csv(f"{data_folder}/SEP-28k-Extended_clips.csv") + df["ID"] = df.index + df_train = df[df[split_type] == "train"] + df_valid = df[df[split_type] == "dev"] + df_test = df[df[split_type] == "test"] + + df_train.to_csv(f"{manifest_folder}/{split_type}_train.csv") + df_valid.to_csv(f"{manifest_folder}/{split_type}_valid.csv") + df_test.to_csv(f"{manifest_folder}/{split_type}_test.csv") + df_all = pd.concat([df_train, df_valid, df_test]) + df_all.to_csv(f"{manifest_folder}/{split_type}_clean.csv") diff --git a/recipes/SEP-28k/stuttering-detection/train.py b/recipes/SEP-28k/stuttering-detection/train.py new file mode 100644 index 0000000000..05b2587a8d --- /dev/null +++ b/recipes/SEP-28k/stuttering-detection/train.py @@ -0,0 +1,258 @@ +import sys +from functools import partial + +import numpy as np +import torch +from hyperpyyaml import load_hyperpyyaml +from sep28k_prepare import prepare_sep28k +from sklearn.metrics import confusion_matrix +from torch.utils.tensorboard import SummaryWriter + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils import hpopt as hp +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.metric_stats import BinaryMetricStats + + +class SEP28kBrain(sb.Brain): + def compute_feats(self, wavs, lens, stage): + """Verify wavs length (although padding and lens handles it)""" + # All clips should be 16Khz and 3 seconds long thus size 48000 + if wavs.shape[1] > 48000: + wavs = wavs[:, :48000] + elif wavs.shape[1] < 48000: + pad = torch.zeros([wavs.shape[0], 48000 - wavs.shape[1]]).to( + self.device + ) + wavs = torch.cat([wavs, pad], dim=1) + return wavs + + def compute_forward(self, batch, stage): + """Input waveform into the model and outputs binary classification""" + batch = batch.to(self.device) + waveforms, lens = batch.waveform + waveforms = self.compute_feats(waveforms, lens, stage) + bin_out = self.modules.model(waveforms) + return {"bin_pred": bin_out} + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (Binary Cross Entropy) given predictions and targets.""" + labels = batch.label.data + loss = sb.nnet.losses.bce_loss( + predictions["bin_pred"].squeeze(1).float(), + labels.squeeze(1).float(), + pos_weight=torch.Tensor([self.hparams.positive]).to(self.device), + ) + binary_preds = torch.round( + torch.sigmoid(predictions["bin_pred"]) + ) # torch.argmax(, axis=1) + self.metrics.append(batch.id, binary_preds, labels) + return loss + + def on_stage_start(self, stage, epoch): + "Gets called when a stage (either training, validation, test) starts." + self.metrics = BinaryMetricStats() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a stage.""" + self.compute_metrics(epoch, stage, stage_loss) + if stage != sb.Stage.TEST and self.hparams.use_tensorboard: + writer.add_scalar( + f"Loss/{stage.name.split('.')[-1].lower()}", stage_loss, epoch + ) + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + else: + stage_stats = {"loss": stage_loss} + stage_stats["f1-score"] = self.fscore * 100 + if stage == sb.Stage.VALID: + self.stage_loss = stage_loss + if self.hparams.ckpt_enable: + self.checkpointer.save_and_keep_only( + meta=stage_stats, + min_keys=["loss"], + keep_recent=False, + name=f"ckpt_{epoch}", + ) + if stage_loss < self.best_loss: + self.best_loss = stage_loss + self.best_fscore = self.fscore + self.best_epoch = epoch + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch}, + train_stats={"loss": self.train_loss}, + valid_stats=stage_stats, + ) + elif stage == sb.Stage.TEST: + self.results = stage_stats + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.counter.current}, + test_stats=stage_stats, + ) + self.test_fscore = self.fscore + + def compute_metrics(self, epoch, stage, stage_loss): + summarized = self.metrics.summarize(threshold=0.5) + self.fscore = summarized["F-score"] + self.cf_matrix = confusion_matrix( + self.metrics.labels.cpu().detach().numpy(), + self.metrics.scores.cpu().detach().numpy(), + ) + self.hparams.train_logger.log_stats( + stats_meta={"\nbin fscore": np.round(self.fscore, 4)} + ) + self.hparams.train_logger.log_stats( + stats_meta={"confusion matrix": self.cf_matrix} + ) + + +def dataio_prep(hparams): + @sb.utils.data_pipeline.takes("Show", "EpId", "ClipId") + @sb.utils.data_pipeline.provides("id", "waveform") + def audio_pipeline(Show, EpId, ClipId): + EpId = int(EpId) + file = f"{hparams['data_folder']}/sep28k_clips/{Show}/{EpId}/{Show}_{EpId}_{ClipId}.wav" + waveform, _ = audio_io.load(file, normalize=True) + return (EpId, int(ClipId)), waveform.squeeze() + + @sb.utils.data_pipeline.takes( + "Prolongation", + "Block", + "SoundRep", + "WordRep", + "Interjection", + "NoStutteredWords", + ) + @sb.utils.data_pipeline.provides("label", "unsure") + def get_label(p, b, sr, wr, inter, f): + label, unsure = get_labels(p, b, sr, wr, inter, f) + return label, unsure + + datasets = {} + for dataset in ["train", "valid", "test"]: + hparams["train_logger"].log_stats(stats_meta={"Processing": dataset}) + datasets[f"{dataset}"] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams[f"{dataset}_csv"], + dynamic_items=[audio_pipeline, get_label], + output_keys=["id", "waveform", "label", "unsure"], + ) + + if hparams["remove_unsure"]: + counter_u = 0 + for i in range(len(datasets[dataset])): + if datasets[dataset][i]["unsure"] == 1: + counter_u += 1 + d = datasets[dataset].filtered_sorted( + sort_key="unsure", + reverse=True, + select_n=len(datasets[dataset]) - counter_u, + ) + datasets[dataset] = d + hparams["train_logger"].log_stats( + stats_meta={f"{dataset} samples": len(datasets[dataset])} + ) + return datasets + + +def get_labels(p, b, sr, wr, inter, f): + annots = torch.tensor([int(p), int(b), int(sr), int(wr), int(inter)]) + classes = torch.tensor( + [ + hparams["Prolongation"], + hparams["Block"], + hparams["SoundRep"], + hparams["WordRep"], + hparams["Interjection"], + ] + ) + # Check which classes are taken into account + annots = annots * classes + # Check if any classes taken into account has a value greater the threshold + label = torch.any(annots >= hparams["annot_value"]) + # if sample is not fluent nor has stutter, consider sample unsure + if int(f) < hparams["annot_value"] and not label: + unsure = 1 # handle "unsure" samples + else: + unsure = 0 + return torch.tensor([int(label)]), torch.tensor([unsure]) + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + with hp.hyperparameter_optimization( + objective_key="loss" + ) as hp_ctx: # <-- Initialize the context + hparams_file, run_opts, overrides = hp_ctx.parse_arguments( + sys.argv[1:] + ) # <-- Replace sb with hp_ctx + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + if hp_ctx.reporter is not None: + hparams["output_folder"] = ( + hparams["output_folder"] + hp.get_trial_id() + ) + if hparams["use_tensorboard"]: + writer = SummaryWriter("/tensorboard") + else: + if hparams["use_tensorboard"]: + writer = SummaryWriter( + hparams["output_folder"] + "/tensorboard" + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_sep28k, + kwargs={ + "data_folder": hparams["data_folder"], + "manifest_folder": hparams["manifest_folder"], + "split_type": hparams["split_type"], + "skip_prep": hparams["skip_prep"], + }, + ) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + datasets = dataio_prep(hparams) + # Initialize trainer + opt_class = partial( + hparams["opt_class"].func, + lr=float(hparams["opt_class"].keywords["lr"]), + ) + detect_brain = SEP28kBrain( + modules=hparams["modules"], + opt_class=opt_class, + run_opts=run_opts, + hparams=hparams, + checkpointer=hparams["checkpointer"], + ) + detect_brain.best_loss = 100000 + # Fit dataset + detect_brain.fit( + epoch_counter=hparams["counter"], + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts"], + ) + + detect_brain.evaluate( + datasets["test"], + test_loader_kwargs=hparams["dataloader_opts"], + ) + + hp.report_result(detect_brain.results) + if hparams["use_tensorboard"]: + writer.add_hparams( + overrides, + { + "score/F1-macro": detect_brain.test_fscore, + }, + ) + writer.flush() + writer.close() + detect_brain.checkpointer.delete_checkpoints(num_to_keep=0) diff --git a/recipes/SLURP/NLU/hparams/train.yaml b/recipes/SLURP/NLU/hparams/train.yaml index c9d6ebf032..627ca449f7 100644 --- a/recipes/SLURP/NLU/hparams/train.yaml +++ b/recipes/SLURP/NLU/hparams/train.yaml @@ -10,10 +10,11 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/better_tokenizer/ save_folder: !ref /save train_log: !ref /train_log.txt +test_wer_file: !ref /wer_test_real.txt # Data files # The SLURP dataset will be automatically downloaded in the specified data_folder @@ -27,14 +28,14 @@ asr_tokenizer_file: https://www.dropbox.com/s/o7gnouwdoqchotj/1000_unigram.model slu_tokenizer_file: https://www.dropbox.com/s/tmwq12r5vgcsif9/58_unigram.model?dl=1 skip_prep: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 batch_size: 16 lr: 0.0003 # token_type: unigram # ["unigram", "bpe", "char"] sorting: random -# Model parameters +####################### Model Parameters ####################################### # sample_rate: 1600 emb_size: 128 dec_neurons: 512 @@ -127,9 +128,8 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher beam_size: !ref eos_threshold: !ref temperature: !ref - using_max_attn_shift: False max_attn_shift: 30 - coverage_penalty: 0. + using_max_attn_shift: False opt_class: !name:torch.optim.Adam lr: !ref @@ -147,10 +147,6 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer scheduler: !ref counter: !ref -# augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment -# sample_rate: !ref -# speeds: [95, 100, 105] - log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True diff --git a/recipes/SLURP/NLU/train.py b/recipes/SLURP/NLU/train.py index 76ca5efd8c..8607638a6e 100644 --- a/recipes/SLURP/NLU/train.py +++ b/recipes/SLURP/NLU/train.py @@ -9,14 +9,16 @@ """ +import ast import sys -import torch -import speechbrain as sb -from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main + import jsonlines -import ast import pandas as pd +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main # Define training procedure @@ -40,24 +42,20 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): + p_tokens = None + if stage == sb.Stage.TRAIN and self.step % show_results_every != 0: return p_seq, transcript_tokens_lens else: - p_tokens, scores = self.hparams.beam_searcher( + p_tokens, _, _, _ = self.hparams.beam_searcher( encoder_out, transcript_tokens_lens ) + return p_seq, transcript_tokens_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (NLL) given predictions and targets.""" - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): + if stage == sb.Stage.TRAIN and self.step % show_results_every != 0: p_seq, transcript_tokens_lens = predictions else: p_seq, transcript_tokens_lens, predicted_tokens = predictions @@ -76,9 +74,7 @@ def compute_objectives(self, predictions, batch, stage): # (No ctc loss) loss = loss_seq - if (stage != sb.Stage.TRAIN) or ( - self.batch_count % show_results_every == 0 - ): + if (stage != sb.Stage.TRAIN) or (self.step % show_results_every == 0): # Decode token terms to words predicted_semantics = [ slu_tokenizer.decode_ids(utt_seq).split(" ") @@ -109,7 +105,8 @@ def compute_objectives(self, predictions, batch, stage): "|", "," ) ) - except SyntaxError: # need this if the output is not a valid dictionary + # need this if the output is not a valid dictionary + except SyntaxError: dict = { "scenario": "none", "action": "none", @@ -121,35 +118,16 @@ def compute_objectives(self, predictions, batch, stage): return loss def log_outputs(self, predicted_semantics, target_semantics): - """ TODO: log these to a file instead of stdout """ + """TODO: log these to a file instead of stdout""" for i in range(len(target_semantics)): print(" ".join(predicted_semantics[i]).replace("|", ",")) print(" ".join(target_semantics[i]).replace("|", ",")) print("") - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" - self.batch_count = 0 if stage != sb.Stage.TRAIN: - self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() @@ -173,25 +151,31 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -216,12 +200,14 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_valid"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_valid"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_test"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_test"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -284,15 +270,13 @@ def semantics_pipeline(semantics): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -328,8 +312,8 @@ def semantics_pipeline(semantics): ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Brain class initialization slu_brain = SLU( @@ -359,5 +343,4 @@ def semantics_pipeline(semantics): for i in range(len(df)): id_to_file[str(df.ID[i])] = df.wav[i].split("/")[-1] - slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt" slu_brain.evaluate(test_set, test_loader_kwargs=hparams["dataloader_opts"]) diff --git a/recipes/SLURP/README.md b/recipes/SLURP/README.md index 2d80bb8a5a..25d95f796b 100644 --- a/recipes/SLURP/README.md +++ b/recipes/SLURP/README.md @@ -1,6 +1,14 @@ # SLU recipes for SLURP This folder contains recipes for spoken language understanding (SLU) with [SLURP](https://zenodo.org/record/4274930#.YEFCYHVKg5k). +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + ### Direct recipe The "direct" maps the input speech directly to semantics using a seq2seq model. @@ -40,8 +48,8 @@ The following results were obtained on a 48 GB RTX 8000 (the recipe has also bee | Model | scenario (accuracy) | action (accuracy) | intent (accuracy) | Word-F1 | Char-F1 | SLU-F1 | Training time | Model link | |:------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| -| Direct | 81.73 | 77.11 | 75.05 | 61.24 | 65.42 | 63.26 | 1 hour per epoch | https://drive.google.com/drive/folders/103t4_zqBZNqa_gGlIfteIs8_mdKhn3Rd?usp=sharing | -| Direct (HuBert) | 91.24 | 88.47 | 87.54 | 72.93 | 77.40 | 75.10 | 4 hours per epoch | https://drive.google.com/drive/folders/1LpcuFldRo_Va1OCGp1bLNdiaC7AQNJOb?usp=sharing | +| Direct | 81.73 | 77.11 | 75.05 | 61.24 | 65.42 | 63.26 | 1 hour per epoch | https://www.dropbox.com/scl/fo/c0rm2ja8oxus8q27om8ve/h?rlkey=irxzl1ea8g7e6ipk0vuc288zh&dl=0 | +| Direct (HuBert) | 91.24 | 88.47 | 87.54 | 72.93 | 77.40 | 75.10 | 4 hours per epoch | https://www.dropbox.com/scl/fo/c0rm2ja8oxus8q27om8ve/h?rlkey=irxzl1ea8g7e6ipk0vuc288zh&dl=0 | | Model | scenario (accuracy) | action (accuracy) | intent (accuracy) | Training time | |:---:|:-----:|:-----:|:-----:|:-----:| @@ -62,6 +70,15 @@ https://huggingface.co/speechbrain/SLU-direct-SLURP-hubert-enc Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/SLURP/Tokenizer/hparams/tokenizer_bpe58.yaml b/recipes/SLURP/Tokenizer/hparams/tokenizer_bpe58.yaml index 51f805b078..bf935024a7 100644 --- a/recipes/SLURP/Tokenizer/hparams/tokenizer_bpe58.yaml +++ b/recipes/SLURP/Tokenizer/hparams/tokenizer_bpe58.yaml @@ -14,7 +14,7 @@ train_csv: !ref /train-type=direct.csv valid_csv: !ref /devel-type=direct.csv skip_prep: False -# Training parameters +####################### Training Parameters #################################### token_type: unigram # ["unigram", "bpe", "char"] token_output: 58 # index(blank/eos/bos/unk) = 0 character_coverage: 1.0 diff --git a/recipes/SLURP/Tokenizer/train.py b/recipes/SLURP/Tokenizer/train.py index 2479f47569..b9b0671014 100644 --- a/recipes/SLURP/Tokenizer/train.py +++ b/recipes/SLURP/Tokenizer/train.py @@ -1,6 +1,6 @@ #!/usr/bin/env/python3 """Recipe for training a BPE tokenizer with SLURP. -The tokenizer coverts semantics into sub-word units that can +The tokenizer converts semantics into sub-word units that can be used to train a language (LM) or an acoustic model (AM). To run this recipe, do the following: @@ -14,18 +14,18 @@ """ import sys -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) diff --git a/recipes/SLURP/direct/hparams/train.yaml b/recipes/SLURP/direct/hparams/train.yaml index 06503f2ab0..707c84b699 100644 --- a/recipes/SLURP/direct/hparams/train.yaml +++ b/recipes/SLURP/direct/hparams/train.yaml @@ -10,23 +10,31 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/better_tokenizer/ save_folder: !ref /save train_log: !ref /train_log.txt +test_wer_file: !ref /wer_test_real.txt + +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 # Data files # The SLURP dataset will be automatically downloaded in the specified data_folder data_folder: !PLACEHOLDER # e.g, /localscratch/SLURP -data_folder_rirs: !ref +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. train_splits: ["train_synthetic", "train_real"] -csv_train: !ref /train-type=direct.csv -csv_valid: !ref /devel-type=direct.csv -csv_test: !ref /test-type=direct.csv +csv_train: !ref /train-type=direct.csv +csv_valid: !ref /devel-type=direct.csv +csv_test: !ref /test-type=direct.csv +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv tokenizer_file: https://www.dropbox.com/s/tmwq12r5vgcsif9/58_unigram.model?dl=1 skip_prep: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 batch_size: 16 lr: 0.0003 @@ -34,7 +42,7 @@ lr: 0.0003 sorting: random ckpt_interval_minutes: 15 # save checkpoint every N min -# Model parameters +####################### Model Parameters ####################################### sample_rate: 16000 emb_size: 128 dec_neurons: 512 @@ -51,17 +59,86 @@ slu_beam_size: 80 eos_threshold: 1.5 temperature: 1.25 +num_workers: 4 dataloader_opts: + num_workers: !ref batch_size: !ref shuffle: True epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Models -asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams - source: speechbrain/asr-crdnn-rnnlm-librispeech - run_opts: {"device":"cuda:0"} +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Add noise to input signal +snr_low: 0 # Min SNR for noise augmentation +snr_high: 15 # Max SNR for noise augmentation +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: !ref + snr_high: !ref + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [90, 95, 105, 110] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 3 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + shuffle_augmentations: True + min_augmentations: 1 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +asr_model_source: speechbrain/asr-crdnn-rnnlm-librispeech slu_enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] @@ -93,20 +170,11 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - modules: slu_enc: !ref output_emb: !ref dec: !ref seq_lin: !ref - env_corrupt: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , @@ -132,9 +200,8 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher beam_size: !ref eos_threshold: !ref temperature: !ref - using_max_attn_shift: False max_attn_shift: 30 - coverage_penalty: 0. + using_max_attn_shift: False opt_class: !name:torch.optim.Adam lr: !ref @@ -152,9 +219,6 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer scheduler: !ref counter: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True diff --git a/recipes/SLURP/direct/hparams/train_with_wav2vec2.yaml b/recipes/SLURP/direct/hparams/train_with_wav2vec2.yaml index f1e2be1a57..90f40f5b8b 100644 --- a/recipes/SLURP/direct/hparams/train_with_wav2vec2.yaml +++ b/recipes/SLURP/direct/hparams/train_with_wav2vec2.yaml @@ -11,10 +11,11 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/ save_folder: !ref /save train_log: !ref /train_log.txt +test_wer_file: !ref /wer_test_real.txt # Data files # The SLURP dataset will be automatically downloaded in the specified data_folder @@ -28,10 +29,10 @@ tokenizer_file: speechbrain/SLU-direct-SLURP-hubert-enc skip_prep: False -# URL for the wav2vec2 model, you can change to benchmark diffrenet models +# URL for the wav2vec2 model, you can change to benchmark different models wav2vec2_hub: "facebook/hubert-base-ls960" -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 35 batch_size: 6 lr: 0.0003 @@ -46,7 +47,7 @@ freeze_wav2vec2: False #set to true to freeze the CONV part of the wav2vec2 model freeze_wav2vec2_conv: True -# Model parameters +####################### Model Parameters ####################################### sample_rate: 16000 emb_size: 128 dec_neurons: 512 @@ -70,7 +71,7 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref # Models -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.hubert.HuBERT source: !ref output_norm: True freeze: !ref @@ -95,10 +96,39 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + modules: wav2vec2: !ref output_emb: !ref @@ -131,7 +161,6 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher temperature: !ref using_max_attn_shift: False max_attn_shift: 30 - coverage_penalty: 0. opt_class: !name:torch.optim.Adam lr: !ref diff --git a/recipes/SLURP/direct/train.py b/recipes/SLURP/direct/train.py index 83178c273c..288da8620f 100644 --- a/recipes/SLURP/direct/train.py +++ b/recipes/SLURP/direct/train.py @@ -15,14 +15,16 @@ * Mirco Ravanelli 2020 """ +import ast import sys -import torch -import speechbrain as sb -from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main + import jsonlines -import ast import pandas as pd +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main # Define training procedure @@ -33,16 +35,13 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, tokens_bos_lens = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "env_corrupt"): - wavs_noise = self.hparams.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) - tokens_bos_lens = torch.cat([tokens_bos_lens, tokens_bos_lens]) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + tokens_bos_lens = self.hparams.wav_augment.replicate_labels( + tokens_bos_lens + ) # ASR encoder forward pass with torch.no_grad(): @@ -60,34 +59,28 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - return p_seq, wav_lens + if stage == sb.Stage.TRAIN and self.step % show_results_every != 0: + return p_seq, wav_lens, None else: - p_tokens, scores = self.hparams.beam_searcher(encoder_out, wav_lens) + p_tokens, _, _, _ = self.hparams.beam_searcher( + encoder_out, wav_lens + ) + return p_seq, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (NLL) given predictions and targets.""" - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - p_seq, wav_lens = predictions - else: - p_seq, wav_lens, predicted_tokens = predictions + p_seq, wav_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens - if hasattr(self.hparams, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens ) loss_seq = self.hparams.seq_cost( @@ -97,9 +90,7 @@ def compute_objectives(self, predictions, batch, stage): # (No ctc loss) loss = loss_seq - if (stage != sb.Stage.TRAIN) or ( - self.batch_count % show_results_every == 0 - ): + if (stage != sb.Stage.TRAIN) or (self.step % show_results_every == 0): # Decode token terms to words predicted_semantics = [ tokenizer.decode_ids(utt_seq).split(" ") @@ -136,7 +127,14 @@ def compute_objectives(self, predictions, batch, stage): "action": "none", "entities": [], } - except SyntaxError: # need this if the output is not a valid dictionary + # need this if the output is not a valid dictionary + except SyntaxError: + _dict = { + "scenario": "none", + "action": "none", + "entities": [], + } + except ValueError: _dict = { "scenario": "none", "action": "none", @@ -148,35 +146,16 @@ def compute_objectives(self, predictions, batch, stage): return loss def log_outputs(self, predicted_semantics, target_semantics): - """ TODO: log these to a file instead of stdout """ + """TODO: log these to a file instead of stdout""" for i in range(len(target_semantics)): print(" ".join(predicted_semantics[i]).replace("|", ",")) print(" ".join(target_semantics[i]).replace("|", ",")) print("") - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" - self.batch_count = 0 if stage != sb.Stage.TRAIN: - self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() @@ -200,25 +179,31 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -243,12 +228,14 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_valid"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_valid"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_test"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_test"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -292,15 +279,13 @@ def text_pipeline(semantics): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -325,13 +310,28 @@ def text_pipeline(semantics): "skip_prep": hparams["skip_prep"], }, ) + run_on_main(hparams["prepare_noise_data"]) + run_on_main(hparams["prepare_rir_data"]) # here we create the datasets objects as well as tokenization and encoding - (train_set, valid_set, test_set, tokenizer,) = dataio_prepare(hparams) + ( + train_set, + valid_set, + test_set, + tokenizer, + ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # Download pretrained ASR model + from speechbrain.inference.ASR import EncoderDecoderASR + + hparams["asr_model"] = EncoderDecoderASR.from_hparams( + source=hparams["asr_model_source"], + run_opts={"device": run_opts["device"]}, + ) # Brain class initialization slu_brain = SLU( @@ -361,5 +361,4 @@ def text_pipeline(semantics): for i in range(len(df)): id_to_file[str(df.ID[i])] = df.wav[i].split("/")[-1] - slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt" slu_brain.evaluate(test_set, test_loader_kwargs=hparams["dataloader_opts"]) diff --git a/recipes/SLURP/direct/train_with_wav2vec2.py b/recipes/SLURP/direct/train_with_wav2vec2.py index e3d4bad09b..9e639f36f5 100644 --- a/recipes/SLURP/direct/train_with_wav2vec2.py +++ b/recipes/SLURP/direct/train_with_wav2vec2.py @@ -15,14 +15,16 @@ For more wav2vec2/HuBERT results, please see https://arxiv.org/pdf/2111.02735.pdf """ +import ast import sys -import torch -import speechbrain as sb -from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main + import jsonlines -import ast import pandas as pd +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main class SLU(sb.Brain): @@ -32,13 +34,13 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, tokens_bos_lens = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) # encoder forward pass - wav2vec2_out = self.modules.wav2vec2(wavs) + wav2vec2_out = self.modules.wav2vec2(wavs, wav_lens) # SLU forward pass e_in = self.hparams.output_emb(tokens_bos) @@ -49,24 +51,19 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): + if stage == sb.Stage.TRAIN and self.step % show_results_every != 0: return p_seq, wav_lens else: - p_tokens, scores = self.hparams.beam_searcher( - wav2vec2_out, wav_lens + hyps, _, _, _ = self.hparams.beam_searcher( + wav2vec2_out.detach(), wav_lens ) - return p_seq, wav_lens, p_tokens + + return p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): """Computes the loss (NLL) given predictions and targets.""" - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): + if stage == sb.Stage.TRAIN and self.step % show_results_every != 0: p_seq, wav_lens = predictions else: p_seq, wav_lens, predicted_tokens = predictions @@ -74,15 +71,20 @@ def compute_objectives(self, predictions, batch, stage): ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens + ) + loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss = loss_seq - if (stage != sb.Stage.TRAIN) or ( - self.batch_count % show_results_every == 0 - ): + if (stage != sb.Stage.TRAIN) or (self.step % show_results_every == 0): # Decode token terms to words predicted_semantics = [ tokenizer.decode_ids(utt_seq).split(" ") @@ -119,7 +121,8 @@ def compute_objectives(self, predictions, batch, stage): "action": "none", "entities": [], } - except SyntaxError: # need this if the output is not a valid dictionary + # need this if the output is not a valid dictionary + except SyntaxError: _dict = { "scenario": "none", "action": "none", @@ -131,37 +134,16 @@ def compute_objectives(self, predictions, batch, stage): return loss def log_outputs(self, predicted_semantics, target_semantics): - """ TODO: log these to a file instead of stdout """ + """TODO: log these to a file instead of stdout""" for i in range(len(target_semantics)): print(" ".join(predicted_semantics[i]).replace("|", ",")) print(" ".join(target_semantics[i]).replace("|", ",")) print("") - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.wav2vec2_optimizer.step() - self.optimizer.step() - self.wav2vec2_optimizer.zero_grad() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" - self.batch_count = 0 if stage != sb.Stage.TRAIN: - self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() @@ -196,15 +178,19 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def init_optimizers(self): "Initializes the wav2vec2 optimizer and model optimizer" @@ -219,15 +205,22 @@ def init_optimizers(self): ) self.checkpointer.add_recoverable("optimizer", self.optimizer) + self.optimizers_dict = { + "wav2vec_optimizer": self.wav2vec2_optimizer, + "model_optimizer": self.optimizer, + } + def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -252,12 +245,14 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_valid"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_valid"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_test"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_test"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -301,15 +296,13 @@ def text_pipeline(semantics): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -336,11 +329,16 @@ def text_pipeline(semantics): ) # here we create the datasets objects as well as tokenization and encoding - (train_set, valid_set, test_set, tokenizer,) = dataio_prepare(hparams) + ( + train_set, + valid_set, + test_set, + tokenizer, + ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Move the wav2vec2 hparams["wav2vec2"] = hparams["wav2vec2"].to(run_opts["device"]) @@ -377,5 +375,4 @@ def text_pipeline(semantics): for i in range(len(df)): id_to_file[str(df.ID[i])] = df.wav[i].split("/")[-1] - slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt" slu_brain.evaluate(test_set, test_loader_kwargs=hparams["dataloader_opts"]) diff --git a/recipes/SLURP/extra_requirements.txt b/recipes/SLURP/extra_requirements.txt new file mode 100644 index 0000000000..c9ad36fe96 --- /dev/null +++ b/recipes/SLURP/extra_requirements.txt @@ -0,0 +1 @@ +jsonlines diff --git a/recipes/SLURP/prepare.py b/recipes/SLURP/prepare.py index c70f187f90..e0c05a6119 100644 --- a/recipes/SLURP/prepare.py +++ b/recipes/SLURP/prepare.py @@ -1,8 +1,10 @@ import os +import shutil + import jsonlines -from speechbrain.dataio.dataio import read_audio, merge_csvs + +from speechbrain.dataio.dataio import merge_csvs, read_audio from speechbrain.utils.data_utils import download_file -import shutil try: import pandas as pd @@ -147,7 +149,9 @@ def prepare_SLURP( "action": action, "entities": entities, } - semantics_ = str(semantics_dict).replace( + semantics_ = str( + semantics_dict + ).replace( ",", "|" ) # Commas in dict will make using csv files tricky; replace with pipe. semantics.append(semantics_) diff --git a/recipes/Switchboard/ASR/CTC/README.md b/recipes/Switchboard/ASR/CTC/README.md new file mode 100644 index 0000000000..3a871c217f --- /dev/null +++ b/recipes/Switchboard/ASR/CTC/README.md @@ -0,0 +1,53 @@ +# Switchboard ASR with CTC models + +This folder contains the scripts to train a wav2vec2-based system on the Switchboard dataset. + +You can download the Switchboard data at https://catalog.ldc.upenn.edu/LDC97S62. + +The eval2000/Hub5 English test set can be found at: +- Speech data: https://catalog.ldc.upenn.edu/LDC2002S09 +- Transcripts: https://catalog.ldc.upenn.edu/LDC2002T43 + +Part 1 and part 2 of the Fisher corpus are available at: +- https://catalog.ldc.upenn.edu/LDC2004T19 + +# How to run +`python train_with_wav2vec.py hparams/.yaml` + +Make sure you have the "transformers" package installed in your environment (see `extra-requirements.txt`). + +# Results + +| Release | hyperparams file | Swbd WER | Callhome WER | Eval2000 WER | HuggingFace link | Full model link | GPUs | +|:-------------:|:---------------------------:| :-----:| :--------:| :--------:| :-----:| :-----:| :--------:| +| 17-09-22 | train_with_wav2vec.yaml | 8.76 | 14.67 | 11.78 | [HuggingFace](https://huggingface.co/speechbrain/asr-wav2vec2-switchboard) | n.a. | 4xA100 40GB | + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/Switchboard/ASR/CTC/hparams/train_with_wav2vec.yaml b/recipes/Switchboard/ASR/CTC/hparams/train_with_wav2vec.yaml new file mode 100644 index 0000000000..a7e531c3fe --- /dev/null +++ b/recipes/Switchboard/ASR/CTC/hparams/train_with_wav2vec.yaml @@ -0,0 +1,240 @@ +# ################################ +# Model: wav2vec2 + DNN + CTC +# Augmentation: SpecAugment +# Authors: Titouan Parcollet 2021, Dominik Wagner 2022 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1312 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/wav2vec2_large_ctc/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# URL for the biggest Fairseq english wav2vec2 model. +wav2vec2_hub: facebook/wav2vec2-large-lv60 + +# Data files +# Set the local path to the Switchboard dataset (e.g. /nfs/data/swbd) here +data_folder: !PLACEHOLDER + +# We remove utterances longer than x seconds to save some memory +# When set to 15 seconds, a tiny portion (31 of 192k) utterances +# will be removed from the train set. +# This allows for training with batch_size=4 on 40GB VRAM. +# Set to anything > 28 seconds to include all utterances. +# The longest utterance in the test set is 15.51 seconds. +# The longest utterance in the train set is 27.97 seconds. +avoid_if_longer_than: 15.52 + +splits: [train, dev] +split_ratio: [99, 1] +skip_prep: False +# The Fisher corpus is only used for Tokenizer training here +add_fisher_corpus: True +# Remove optional/deletable parts of the transcript +normalize_words: True +train_tokenizer_csv: !ref /train_lm.csv +# Maximum number of times the same utterance is allowed to appear +# in the training data +max_utt: 300 +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +# The test data is split into the full test set (test.csv), +# the Switchboard portion of the data (test_swbd.csv), +# and the Callhome portion of the data (test_callhome.csv). +test_csv: + - !ref /test_swbd.csv + - !ref /test_callhome.csv + - !ref /test.csv + +####################### Training Parameters #################################### +number_of_epochs: 30 +lr: 1.0 +lr_wav2vec: 0.0001 +sorting: ascending +precision: fp32 # bf16, fp16 or fp32 +sample_rate: 16000 +ckpt_interval_minutes: 30 # save checkpoint every N min + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +batch_size: 4 +test_batch_size: 2 + +dataloader_options: + batch_size: !ref + num_workers: 6 +test_dataloader_options: + batch_size: !ref + num_workers: 6 + +# BPE parameters +token_type: unigram # ["unigram", "bpe", "char"] +character_coverage: 1.0 + +####################### Model Parameters ####################################### +wav2vec_output_dim: 1024 +dnn_neurons: 1024 +freeze_wav2vec: False +dropout: 0.15 + +# Outputs +output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0 + +# Decoding parameters +# Be sure that the bos and eos index match with the BPEs ones +blank_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +test_searcher: !name:speechbrain.decoders.CTCBeamSearcher +beam_size: 143 +beam_prune_logp: -12.0 +token_prune_min_logp: -1.2 +prune_history: True +topk: 1 +alpha: 0.8 +beta: 1.2 +# can be downloaded from here https://www.openslr.org/11/ or trained with kenLM +# It can either be a .bin or .arpa ; note: .arpa is much slower at loading +# If you don't want to use an LM, comment it out or set it to null +kenlm_model_path: null + +# +# Functions and classes +# +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +enc: !new:speechbrain.nnet.containers.Sequential + input_shape: [null, null, !ref ] + linear1: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn1: !name:speechbrain.nnet.normalization.BatchNorm1d + activation: !new:torch.nn.LeakyReLU + drop: !new:torch.nn.Dropout + p: !ref + linear2: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn2: !name:speechbrain.nnet.normalization.BatchNorm1d + activation2: !new:torch.nn.LeakyReLU + drop2: !new:torch.nn.Dropout + p: !ref + linear3: !name:speechbrain.nnet.linear.Linear + n_neurons: !ref + bias: True + bn3: !name:speechbrain.nnet.normalization.BatchNorm1d + activation3: !new:torch.nn.LeakyReLU + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 + source: !ref + output_norm: True + freeze: !ref + save_path: !ref /wav2vec2_checkpoint + +##### +# Uncomment this block if you prefer to use a Fairseq pretrained model instead +# of a HuggingFace one. Here, we provide an URL that is obtained from the +# Fairseq github for the multilingual XLSR. +# +# wav2vec2_url: https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr_53_56k.pt +# wav2vec2: !new:speechbrain.lobes.models.fairseq_wav2vec.FairseqWav2Vec2 +# pretrained_path: !ref +# output_norm: True +# freeze: False +# save_path: !ref /wav2vec2_checkpoint/model.pt +##### + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +modules: + wav2vec2: !ref + enc: !ref + ctc_lin: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref ] +model_opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +wav2vec_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing_model: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/Switchboard/ASR/CTC/normalize_util.py b/recipes/Switchboard/ASR/CTC/normalize_util.py new file mode 120000 index 0000000000..cf3ffea7bc --- /dev/null +++ b/recipes/Switchboard/ASR/CTC/normalize_util.py @@ -0,0 +1 @@ +../normalize_util.py \ No newline at end of file diff --git a/recipes/Switchboard/ASR/CTC/switchboard_prepare.py b/recipes/Switchboard/ASR/CTC/switchboard_prepare.py new file mode 120000 index 0000000000..f98ba6cfb9 --- /dev/null +++ b/recipes/Switchboard/ASR/CTC/switchboard_prepare.py @@ -0,0 +1 @@ +../../switchboard_prepare.py \ No newline at end of file diff --git a/recipes/Switchboard/ASR/CTC/train_with_wav2vec.py b/recipes/Switchboard/ASR/CTC/train_with_wav2vec.py new file mode 100644 index 0000000000..b942d9d9eb --- /dev/null +++ b/recipes/Switchboard/ASR/CTC/train_with_wav2vec.py @@ -0,0 +1,444 @@ +#!/usr/bin/env python3 +"""Recipe for training a sequence-to-sequence ASR system with Switchboard. +The system employs a wav2vec2 encoder and a CTC decoder. +Decoding is performed with greedy decoding. + +To run this recipe, do the following: +> python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml + +With the default hyperparameters, the system employs a pretrained wav2vec2 encoder. +The wav2vec2 model is pretrained following the model given in the hparams file. + +The neural network is trained with CTC on sub-word units (based on e.g. Byte Pairwise Encoding or a unigram language +model). + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, tokens (e.g, characters instead of BPE), and many +other possible variations. + +Authors + * Titouan Parcollet 2021 + * Dominik Wagner 2022 +""" + +import functools +import os +import sys +from pathlib import Path + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.tokenizers.SentencePiece import SentencePiece +from speechbrain.utils.data_utils import undo_padding +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.core.Brain): + def __init__( + self, + modules=None, + opt_class=None, + hparams=None, + run_opts=None, + checkpointer=None, + normalize_fn=None, + ): + self.normalize_fn = normalize_fn + + super().__init__( + modules=modules, + opt_class=opt_class, + hparams=hparams, + run_opts=run_opts, + checkpointer=checkpointer, + ) + + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + + # Forward pass + feats = self.modules.wav2vec2(wavs, wav_lens) + x = self.modules.enc(feats) + logits = self.modules.ctc_lin(x) + p_ctc = self.hparams.log_softmax(logits) + + return p_ctc, wav_lens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC) given predictions and targets.""" + + p_ctc, wav_lens = predictions + + ids = batch.id + tokens, tokens_lens = batch.tokens + + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + tokens_lens = self.hparams.wav_augment.replicate_labels(tokens_lens) + + loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) + + if stage == sb.Stage.VALID: + # Decode token terms to words + sequence = sb.decoders.ctc_greedy_decode( + p_ctc, wav_lens, blank_id=self.hparams.blank_index + ) + + predicted_words = self.tokenizer(sequence, task="decode_from_list") + + elif stage == sb.Stage.TEST: + # Decode token terms to words + sequence = test_searcher(p_ctc, wav_lens) + predicted_words = [hyp[0].text.split(" ") for hyp in sequence] + + if stage != sb.Stage.TRAIN: + # Convert indices to words + target_words = undo_padding(tokens, tokens_lens) + target_words = self.tokenizer(target_words, task="decode_from_list") + + # Check for possible word alternatives and exclusions + if stage == sb.Stage.TEST and self.normalize_fn is not None: + target_words, predicted_words = self.normalize_fn( + target_words, predicted_words + ) + + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of an epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr_model, new_lr_model = self.hparams.lr_annealing_model( + stage_stats["loss"] + ) + old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec( + stage_stats["loss"] + ) + sb.nnet.schedulers.update_learning_rate( + self.model_optimizer, new_lr_model + ) + if not self.hparams.wav2vec2.freeze: + sb.nnet.schedulers.update_learning_rate( + self.wav2vec_optimizer, new_lr_wav2vec + ) + self.hparams.train_logger.log_stats( + stats_meta={ + "epoch": epoch, + "lr_model": old_lr_model, + "lr_wav2vec": old_lr_wav2vec, + }, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + def init_optimizers(self): + "Initializes the wav2vec2 optimizer and model optimizer" + + self.optimizers_dict = {} + + # If the wav2vec encoder is unfrozen, we create the optimizer + if not self.hparams.wav2vec2.freeze: + self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( + self.modules.wav2vec2.parameters() + ) + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "wav2vec_opt", self.wav2vec_optimizer + ) + + self.optimizers_dict["wav2vec_optimizer"] = self.wav2vec_optimizer + + self.model_optimizer = self.hparams.model_opt_class( + self.hparams.model.parameters() + ) + + self.optimizers_dict["model_optimizer"] = self.model_optimizer + + if self.checkpointer is not None: + self.checkpointer.add_recoverable("modelopt", self.model_optimizer) + + +def freeze_optimizers(self, optimizers): + """Freezes the wav2vec2 optimizer according to the warmup steps""" + valid_optimizers = {} + if not self.hparams.wav2vec2.freeze: + valid_optimizers["wav2vec_optimizer"] = optimizers["wav2vec_optimizer"] + valid_optimizers["model_optimizer"] = optimizers["model_optimizer"] + return valid_optimizers + + +# Define custom data procedure +def dataio_prepare(hparams, tokenizer): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # 1. Define datasets + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + # train_data = train_data.filtered_sorted(sort_key="duration",) + + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted( + sort_key="duration", + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["dataloader_options"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + # train_data = train_data.filtered_sorted( + # sort_key="duration", reverse=True, + # ) + train_data = train_data.filtered_sorted( + sort_key="duration", + reverse=True, + key_max_value={"duration": hparams["avoid_if_longer_than"]}, + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["dataloader_options"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + # We also sort the validation data so it is faster to validate + valid_data = valid_data.filtered_sorted(sort_key="duration") + + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + datasets = [train_data, valid_data] + [i for _, i in test_datasets.items()] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav", "channel", "start", "stop") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav, channel, start, stop): + # Select a speech segment from the sph file + # start and end times are already frames. + # This is done in data preparation stage. + start = int(start) + stop = int(stop) + num_frames = stop - start + sig, fs = audio_io.load(wav, num_frames=num_frames, frame_offset=start) + info = audio_io.info(wav) + + resampled = sig + # Maybe resample to 16kHz + if int(info.sample_rate) != int(hparams["sample_rate"]): + resampled = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(sig) + + resampled = resampled.transpose(0, 1).squeeze(1) + + if info.num_channels > 1: + # Select the proper audio channel of the segment + if channel == "A": + resampled = resampled[:, 0] + else: + resampled = resampled[:, 1] + return resampled + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("words") + @sb.utils.data_pipeline.provides( + "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + tokens_list = tokenizer.sp.encode_as_ids(wrd) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], + ) + return train_data, valid_data, test_datasets + + +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # Dataset preparation (parsing Switchboard) + from normalize_util import normalize_words, read_glm_csv # noqa + from switchboard_prepare import prepare_switchboard # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Due to DDP, we do the preparation ONLY on the main python process + run_on_main( + prepare_switchboard, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "splits": hparams["splits"], + "split_ratio": hparams["split_ratio"], + "skip_prep": hparams["skip_prep"], + "add_fisher_corpus": hparams["add_fisher_corpus"], + "max_utt": hparams["max_utt"], + }, + ) + + # Defining tokenizer and loading it + tokenizer = SentencePiece( + model_dir=hparams["save_folder"], + vocab_size=hparams["output_neurons"], + annotation_train=hparams["train_tokenizer_csv"], + annotation_read="words", + model_type=hparams["token_type"], + character_coverage=hparams["character_coverage"], + ) + + # Create the datasets objects as well as tokenization and encoding + train_data, valid_data, test_datasets = dataio_prepare(hparams, tokenizer) + + # Helper function that removes optional/deletable parts of the transcript + # for cleaner performance metrics + normalize_fn = None + if hparams["normalize_words"]: + normalize_fn = functools.partial( + normalize_words, + glm_alternatives=read_glm_csv(hparams["output_folder"]), + ) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + normalize_fn=normalize_fn, + ) + + asr_brain.tokenizer = tokenizer + vocab_list = [ + tokenizer.sp.id_to_piece(i) for i in range(tokenizer.sp.vocab_size()) + ] + test_searcher = hparams["test_searcher"]( + blank_index=hparams["blank_index"], + vocab_list=vocab_list, + alpha=hparams["alpha"], + beta=hparams["beta"], + beam_size=hparams["beam_size"], + beam_prune_logp=hparams["beam_prune_logp"], + token_prune_min_logp=hparams["token_prune_min_logp"], + prune_history=hparams["prune_history"], + topk=hparams["topk"], + kenlm_model_path=hparams.get("kenlm_model_path"), + ) + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["test_dataloader_options"], + ) + + # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_options"], + min_key="WER", + ) diff --git a/recipes/Switchboard/ASR/normalize_util.py b/recipes/Switchboard/ASR/normalize_util.py new file mode 100644 index 0000000000..6fb0465200 --- /dev/null +++ b/recipes/Switchboard/ASR/normalize_util.py @@ -0,0 +1,212 @@ +""" +This script provides some utility functions that can be used +during inference of ASR models. + +The intended use is to import `normalize_words` after decoding and tokenization. +Note that this will only work, when UPPERCASE letters are used throughout the recipe. + +Author +------ +Dominik Wagner 2022 +""" + +import csv +import os +import re +import string +from collections import defaultdict + + +def read_glm_csv(save_folder): + """Load the ARPA Hub4-E and Hub5-E alternate spellings and contractions map""" + + alternatives_dict = defaultdict(list) + with open( + os.path.join(save_folder, "glm.csv"), encoding="utf-8" + ) as csv_file: + csv_reader = csv.reader(csv_file, delimiter=",") + for row in csv_reader: + alternatives = row[1].split("|") + alternatives_dict[row[0]] += alternatives + return alternatives_dict + + +def expand_contractions(text) -> list: + """ + Some regular expressions for expanding common contractions and for splitting linked words. + + Arguments + --------- + text : str + Text to process + + Returns + ------- + A list of tokens + """ + # Specific contractions + # cspell:disable + text = re.sub(r"won\'t", "WILL NOT", text, flags=re.IGNORECASE) + text = re.sub(r"can\'t", "CAN NOT", text, flags=re.IGNORECASE) + text = re.sub(r"let\'s", "LET US", text, flags=re.IGNORECASE) + text = re.sub(r"ain\'t", "AM NOT", text, flags=re.IGNORECASE) + text = re.sub(r"y\'all", "YOU ALL", text, flags=re.IGNORECASE) + text = re.sub(r"can\'t", "CANNOT", text, flags=re.IGNORECASE) + text = re.sub(r"can not", "CANNOT", text, flags=re.IGNORECASE) + text = re.sub(r"\'cause", "BECAUSE", text, flags=re.IGNORECASE) + text = re.sub(r"thats", "THAT IS", text, flags=re.IGNORECASE) + text = re.sub(r"dont", "DO NOT", text, flags=re.IGNORECASE) + text = re.sub(r"hes", "HE IS", text, flags=re.IGNORECASE) + text = re.sub(r"shes", "SHE IS", text, flags=re.IGNORECASE) + text = re.sub(r"wanna", "WANT TO", text, flags=re.IGNORECASE) + text = re.sub(r"theyd", "THEY WOULD", text, flags=re.IGNORECASE) + text = re.sub(r"theyre", "THEY ARE", text, flags=re.IGNORECASE) + text = re.sub(r"hed", "HE WOULD", text, flags=re.IGNORECASE) + text = re.sub(r"shed", "SHE WOULD", text, flags=re.IGNORECASE) + text = re.sub(r"wouldve", "WOULD HAVE", text, flags=re.IGNORECASE) + text = re.sub(r"couldve", "COULD HAVE", text, flags=re.IGNORECASE) + text = re.sub(r"couldnt", "COULD NOT", text, flags=re.IGNORECASE) + text = re.sub(r"cant", "CAN NOT", text, flags=re.IGNORECASE) + text = re.sub(r"shouldve", "SHOULD HAVE", text, flags=re.IGNORECASE) + text = re.sub(r"oclock", "O CLOCK", text, flags=re.IGNORECASE) + text = re.sub(r"o'clock", "O CLOCK", text, flags=re.IGNORECASE) + text = re.sub(r"didn", "DID NOT", text, flags=re.IGNORECASE) + text = re.sub(r"didnt", "DID NOT", text, flags=re.IGNORECASE) + text = re.sub(r"im", "I AM", text, flags=re.IGNORECASE) + text = re.sub(r"ive", "I HAVE", text, flags=re.IGNORECASE) + text = re.sub(r"youre", "YOU ARE", text, flags=re.IGNORECASE) + # cspell:enable + + # More general contractions + text = re.sub(r"n\'t", " NOT", text, flags=re.IGNORECASE) + text = re.sub(r"\'re", " ARE", text, flags=re.IGNORECASE) + text = re.sub(r"\'s", " IS", text, flags=re.IGNORECASE) + text = re.sub(r"\'d", " WOULD", text, flags=re.IGNORECASE) + text = re.sub(r"\'ll", " WILL", text, flags=re.IGNORECASE) + text = re.sub(r"\'t", " NOT", text, flags=re.IGNORECASE) + text = re.sub(r"\'ve", " HAVE", text, flags=re.IGNORECASE) + text = re.sub(r"\'m", " AM", text, flags=re.IGNORECASE) + + # Split linked words + if "VOCALIZED" not in text: + text = re.sub(r"-", " ", text, flags=re.IGNORECASE) + text = re.sub(r"\s\s+", " ", text) + text = text.split() + return text + + +def expand_contractions_batch(text_batch): + """ + Wrapper that handles a batch of predicted or + target words for contraction expansion + """ + parsed_batch = [] + for batch in text_batch: + # Remove incomplete words + batch = [t for t in batch if not t.startswith("-")] + # Expand contractions + batch = [expand_contractions(t) for t in batch] + # Flatten list of lists + batch_expanded = [i for sublist in batch for i in sublist] + parsed_batch.append(batch_expanded) + return parsed_batch + + +def normalize_words( + target_words_batch, predicted_words_batch, glm_alternatives=None +): + """ + Remove some references and hypotheses we don't want to score. + We remove incomplete words (i.e. words that start with "-"), + expand common contractions (e.g. I'v -> I have), + and split linked words (e.g. pseudo-rebel -> pseudo rebel). + Then we check if some of the predicted words have mapping rules according + to the glm (alternatives) file. + Finally, we check if a predicted word is on the exclusion list. + The exclusion list contains stuff like "MM", "HM", "AH", "HUH", which would get mapped, + into hesitations by the glm file anyway. + The goal is to remove all the things that appear in the reference as optional/deletable + (i.e. inside parentheses). + If we delete these tokens, there is no loss, + and if we recognize them correctly, there is no gain. + + The procedure is adapted from Kaldi's local/score.sh script. + + Arguments + --------- + target_words_batch : list + List of length containing lists of target words for each utterance + predicted_words_batch : list of list + List of length containing lists of predicted words for each utterance + glm_alternatives : dict + Dictionary containing valid word alternatives + + Returns + ------- + A new list containing the filtered predicted words. + """ + excluded_words = [ + "", + "UH", + "UM", + "EH", + "MM", + "HM", + "AH", + "HUH", + "HA", + "ER", + "OOF", + "HEE", + "ACH", + "EEE", + "EW", + ] + + target_words_batch = expand_contractions_batch(target_words_batch) + predicted_words_batch = expand_contractions_batch(predicted_words_batch) + + # Find all possible alternatives for each word in the target utterance + alternative2tgt_word_batch = [] + for tgt_utterance in target_words_batch: + alternative2tgt_word = defaultdict(str) + if glm_alternatives is not None: + for tgt_wrd in tgt_utterance: + alts = glm_alternatives[tgt_wrd] + for alt in alts: + if alt != tgt_wrd and len(alt) > 0: + alternative2tgt_word[alt] = tgt_wrd + alternative2tgt_word_batch.append(alternative2tgt_word) + + # See if a predicted word is on the exclusion list, + # and if it matches one of the valid alternatives. + # Also do some cleaning. + checked_predicted_words_batch = [] + for i, pred_utterance in enumerate(predicted_words_batch): + alternative2tgt_word = alternative2tgt_word_batch[i] + checked_predicted_words = [] + for pred_wrd in pred_utterance: + # Remove stuff like [LAUGHTER] + pred_wrd = re.sub(r"\[.*?\]", "", pred_wrd) + # Remove any remaining punctuation + pred_wrd = pred_wrd.translate( + str.maketrans("", "", string.punctuation) + ) + # Sometimes things like LAUGHTER get appended to existing words e.g. THOUGHLAUGHTER + if pred_wrd != "LAUGHTER" and pred_wrd.endswith("LAUGHTER"): + pred_wrd = pred_wrd.replace("LAUGHTER", "") + if pred_wrd != "NOISE" and pred_wrd.endswith("NOISE"): + pred_wrd = pred_wrd.replace("NOISE", "") + if pred_wrd.endswith("VOCALIZED"): + pred_wrd = pred_wrd.replace("VOCALIZED", "") + # Check word exclusion list + if pred_wrd in excluded_words: + continue + # Finally, check word alternatives + tgt_wrd = alternative2tgt_word[pred_wrd] + if len(tgt_wrd) > 0: + pred_wrd = tgt_wrd + if len(pred_wrd) > 0: + checked_predicted_words.append(pred_wrd) + checked_predicted_words_batch.append(checked_predicted_words) + return target_words_batch, checked_predicted_words_batch diff --git a/recipes/Switchboard/ASR/seq2seq/README.md b/recipes/Switchboard/ASR/seq2seq/README.md new file mode 100644 index 0000000000..83b43d1681 --- /dev/null +++ b/recipes/Switchboard/ASR/seq2seq/README.md @@ -0,0 +1,53 @@ +# Switchboard ASR with seq2seq models (CTC + attention) + +This folder contains the scripts to train a seq2seq CNN-RNN-based system on the Switchboard dataset. + +You can download the Switchboard data at https://catalog.ldc.upenn.edu/LDC97S62. + +The eval2000/Hub5 English test set can be found at: +- Speech data: https://catalog.ldc.upenn.edu/LDC2002S09 +- Transcripts: https://catalog.ldc.upenn.edu/LDC2002T43 + +Part 1 and part 2 of the Fisher corpus are available at: +- https://catalog.ldc.upenn.edu/LDC2004T19 +- https://catalog.ldc.upenn.edu/LDC2005T19 + +# How to run +`python train.py hparams/.yaml` + +# Results + +| Release | hyperparams file | Swbd WER | Callhome WER | Eval2000 WER | HuggingFace link | Full model link | GPUs | +|:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :-----:| :-----:| :--------:| +| 17-09-22 | train_BPE_2000.yaml | 16.01 | 25.12 | 20.71 | [HuggingFace](https://huggingface.co/speechbrain/asr-crdnn-switchboard) | n.a. | 1xA100 40GB | + + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/Switchboard/ASR/seq2seq/hparams/train_BPE_2000.yaml b/recipes/Switchboard/ASR/seq2seq/hparams/train_BPE_2000.yaml new file mode 100644 index 0000000000..403e49178d --- /dev/null +++ b/recipes/Switchboard/ASR/seq2seq/hparams/train_BPE_2000.yaml @@ -0,0 +1,363 @@ +# ############################################################################ +# Model: E2E ASR with attention-based ASR +# Encoder: CRDNN model +# Decoder: GRU + beamsearch +# Tokens: BPE with unigram +# Losses: CTC + NLL +# Training: Switchboard +# Authors: Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, Peter Plantinga, +# Samuele Cornell 2020, Dominik Wagner 2022 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters +seed: 1312 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/CRDNN_BPE_NO_LM/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Pretrained tokenizer +# You need to specify a local path pointing to a +# directory containing the tokenizer.ckpt. +# NB: You need to provide a full local path, when +# the tokenizer is not loaded from HuggingFace. +pretrained_tokenizer_path: !PLACEHOLDER +tokenizer_file: !ref /tokenizer.ckpt + +# Set the local path to the Switchboard dataset (e.g. /nfs/data/swbd) here. +data_folder: !PLACEHOLDER + +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + +# Note that the test set will be created separately using the +# Hub5/eval2000 dataset +splits: ["train", "dev"] +split_ratio: [99, 1] +skip_prep: False +# We don't use the Fisher corpus for training the AM +# (it is only used for Tokenizer and LM training) +add_fisher_corpus: False +# Remove optional/deletable parts of the transcript +normalize_words: True +# Maximum number of times the same utterance is +# allowed to appear in the training data +max_utt: 300 +ckpt_interval_minutes: 15 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +# The test data is split into the full test set (test.csv), +# the Switchboard portion of the data (test_swbd.csv), +# and the Callhome portion of the data (test_callhome.csv). +test_csv: + - !ref /test_swbd.csv + - !ref /test_callhome.csv + - !ref /test.csv + +####################### Training Parameters #################################### +number_of_epochs: 20 +number_of_ctc_epochs: 5 +batch_size: 10 +lr: 1.0 +ctc_weight: 0.5 +sorting: ascending +dynamic_batching: False + +# dynamic batching parameters, if used +feats_hop_size: 0.01 +max_batch_length: 20000 # in terms of frames +shuffle: True +batch_ordering: random +num_buckets: 20 + +dynamic_batch_sampler: + max_batch_length: !ref + shuffle: !ref + batch_ordering: !ref + num_buckets: !ref + +# Feature parameters +sample_rate: 8000 +n_fft: 400 +n_mels: 40 + +opt_class: !name:torch.optim.Adadelta + lr: !ref + rho: 0.95 + eps: 1.e-8 + +# Dataloader options +num_workers: 4 +train_dataloader_opts: + num_workers: !ref + batch_size: !ref + +valid_dataloader_opts: + num_workers: !ref + batch_size: !ref + +test_dataloader_opts: + num_workers: !ref + batch_size: !ref + +####################### Model Parameters ####################################### +activation: !name:torch.nn.LeakyReLU +dropout: 0.15 +cnn_blocks: 2 +cnn_channels: (128, 256) +inter_layer_pooling_size: (2, 2) +cnn_kernelsize: (3, 3) +time_pooling_size: 4 +rnn_class: !name:speechbrain.nnet.RNN.LSTM +rnn_layers: 4 +rnn_neurons: 1024 +rnn_bidirectional: True +dnn_blocks: 2 +dnn_neurons: 512 +emb_size: 128 +dec_neurons: 1024 +output_neurons: 2000 # Number of tokens used for tokenizer +blank_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_beam_size: 60 +test_beam_size: 80 +eos_threshold: 1.5 +using_max_attn_shift: True +max_attn_shift: 240 +ctc_weight_decode: 0.3 +coverage_penalty: 1.8 +temperature: 1.25 +scorer_beam_scale: 0.1 + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +############################## Augmentations ################################### + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +############################## Models ########################################## + +enc: !new:speechbrain.lobes.models.CRDNN.CRDNN + input_shape: [null, null, !ref ] + activation: !ref + dropout: !ref + cnn_blocks: !ref + cnn_channels: !ref + cnn_kernelsize: !ref + inter_layer_pooling_size: !ref + time_pooling: True + using_2d_pooling: False + time_pooling_size: !ref + rnn_class: !ref + rnn_layers: !ref + rnn_neurons: !ref + rnn_bidirectional: !ref + rnn_re_init: True + dnn_blocks: !ref + dnn_neurons: !ref + use_rnnp: False + +emb: !new:speechbrain.nnet.embedding.Embedding + num_embeddings: !ref + embedding_dim: !ref + +dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder + enc_dim: !ref + input_size: !ref + rnn_type: gru + attn_type: location + hidden_size: !ref + attn_dim: 1024 + num_layers: 1 + scaling: 1.0 + channels: 10 + kernel_size: 100 + re_init: True + dropout: !ref + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + +seq_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: 0.1 + +# This is the RNNLM that is used according to the Huggingface repository +# NB: It has to match the pre-trained RNNLM!! +lm_model: !new:speechbrain.lobes.models.RNNLM.RNNLM + output_neurons: !ref + embedding_dim: !ref + activation: !name:torch.nn.LeakyReLU + dropout: 0.0 + rnn_layers: 2 + rnn_neurons: 2048 + dnn_blocks: 1 + dnn_neurons: 512 + return_hidden: True # For inference + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +# Models +modules: + enc: !ref + emb: !ref + dec: !ref + ctc_lin: !ref + seq_lin: !ref + normalize: !ref + lm_model: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref , !ref ] + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer + vocab_size: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + coverage: !ref + ctc: !ref + scorer_beam_scale: !ref + +test_search: !new:speechbrain.decoders.S2SRNNBeamSearcher + embedding: !ref + decoder: !ref + linear: !ref + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + eos_threshold: !ref + using_max_attn_shift: !ref + max_attn_shift: !ref + scorer: !ref + temperature: !ref + +valid_search: !new:speechbrain.decoders.S2SRNNBeamSearcher + embedding: !ref + decoder: !ref + linear: !ref + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + eos_threshold: !ref + using_max_attn_shift: !ref + max_attn_shift: !ref + scorer: !ref + temperature: !ref + +lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + scheduler: !ref + normalizer: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + tokenizer: !ref + paths: + tokenizer: !ref diff --git a/recipes/Switchboard/ASR/seq2seq/normalize_util.py b/recipes/Switchboard/ASR/seq2seq/normalize_util.py new file mode 120000 index 0000000000..cf3ffea7bc --- /dev/null +++ b/recipes/Switchboard/ASR/seq2seq/normalize_util.py @@ -0,0 +1 @@ +../normalize_util.py \ No newline at end of file diff --git a/recipes/Switchboard/ASR/seq2seq/switchboard_prepare.py b/recipes/Switchboard/ASR/seq2seq/switchboard_prepare.py new file mode 120000 index 0000000000..f98ba6cfb9 --- /dev/null +++ b/recipes/Switchboard/ASR/seq2seq/switchboard_prepare.py @@ -0,0 +1 @@ +../../switchboard_prepare.py \ No newline at end of file diff --git a/recipes/Switchboard/ASR/seq2seq/train.py b/recipes/Switchboard/ASR/seq2seq/train.py new file mode 100644 index 0000000000..36de63c318 --- /dev/null +++ b/recipes/Switchboard/ASR/seq2seq/train.py @@ -0,0 +1,460 @@ +#!/usr/bin/env/python3 +"""Recipe for training a sequence-to-sequence ASR system with Switchboard. +The system employs an encoder, a decoder, and an attention mechanism +between them. Decoding is performed with beamsearch. + +To run this recipe, do the following: +> python train.py hparams/train_BPE1000.yaml + +With the default hyperparameters, the system employs a CRDNN encoder. +The decoder is based on a standard GRU. + +The neural network is trained on both CTC and negative-log likelihood +targets and sub-word units estimated with Byte Pairwise Encoding (BPE) +are used as basic recognition tokens. + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, tokens (e.g, characters instead of BPE), +training split, and many other possible variations. + +This recipe assumes that the tokenizer is already trained. + +Authors + * Ju-Chieh Chou 2020 + * Mirco Ravanelli 2020 + * Abdel Heba 2020 + * Peter Plantinga 2020 + * Samuele Cornell 2020 + * Andreas Nautsch 2021 + * Dominik Wagner 2022 +""" + +import functools +import os +import sys +from pathlib import Path + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.Brain): + def __init__( + self, + modules=None, + opt_class=None, + hparams=None, + run_opts=None, + checkpointer=None, + normalize_fn=None, + ): + self.normalize_fn = normalize_fn + + super().__init__( + modules=modules, + opt_class=opt_class, + hparams=hparams, + run_opts=run_opts, + checkpointer=checkpointer, + ) + + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_bos, _ = batch.tokens_bos + wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + + # Forward pass + feats = self.hparams.compute_features(wavs) + feats = self.modules.normalize(feats, wav_lens) + x = self.modules.enc(feats.detach()) + e_in = self.modules.emb(tokens_bos) # y_in bos + tokens + h, _ = self.modules.dec(e_in, x, wav_lens) + + # Output layer for seq2seq log-probabilities + logits = self.modules.seq_lin(h) + p_seq = self.hparams.log_softmax(logits) + + # Compute outputs + if stage == sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + if current_epoch <= self.hparams.number_of_ctc_epochs: + # Output layer for ctc log-probabilities + logits = self.modules.ctc_lin(x) + p_ctc = self.hparams.log_softmax(logits) + return p_ctc, p_seq, wav_lens + else: + return p_seq, wav_lens + else: + if stage == sb.Stage.VALID: + p_tokens, _, _, _ = self.hparams.valid_search(x, wav_lens) + else: + p_tokens, _, _, _ = self.hparams.test_search(x, wav_lens) + + return p_seq, wav_lens, p_tokens + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + current_epoch = self.hparams.epoch_counter.current + if stage == sb.Stage.TRAIN: + if current_epoch <= self.hparams.number_of_ctc_epochs: + p_ctc, p_seq, wav_lens = predictions + else: + p_seq, wav_lens = predictions + else: + p_seq, wav_lens, predicted_tokens = predictions + + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + tokens, tokens_lens = batch.tokens + + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.wav_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + + loss_seq = self.hparams.seq_cost( + p_seq, tokens_eos, length=tokens_eos_lens + ) + + # Add ctc loss if necessary + if ( + stage == sb.Stage.TRAIN + and current_epoch <= self.hparams.number_of_ctc_epochs + ): + loss_ctc = self.hparams.ctc_cost( + p_ctc, tokens, wav_lens, tokens_lens + ) + loss = self.hparams.ctc_weight * loss_ctc + loss += (1 - self.hparams.ctc_weight) * loss_seq + else: + loss = loss_seq + + if stage != sb.Stage.TRAIN: + # Decode token terms to words + predicted_words = [ + self.tokenizer.decode_ids(utt_seq).split() + for utt_seq in predicted_tokens + ] + target_words = [wrd.split() for wrd in batch.words] + + # Check for possible word alternatives and exclusions + if stage == sb.Stage.TEST and self.normalize_fn is not None: + target_words, predicted_words = self.normalize_fn( + target_words, predicted_words + ) + + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"]) + sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + self.hparams.train_logger.log_stats( + stats_meta={"epoch": epoch, "lr": old_lr}, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], + ) + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for _, i in test_datasets.items()] + + # We get the tokenizer as we need it to encode the labels when creating + # mini-batches. + tokenizer = hparams["tokenizer"] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav", "channel", "start", "stop") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav, channel, start, stop): + # Select a speech segment from the sph file + # start and end times are already frames. + # This is done in data preparation stage. + start = int(start) + stop = int(stop) + num_frames = stop - start + sig, fs = audio_io.load(wav, num_frames=num_frames, frame_offset=start) + info = audio_io.info(wav) + + resampled = sig + # Maybe resample to 16kHz + if int(info.sample_rate) != int(hparams["sample_rate"]): + resampled = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(sig) + + resampled = resampled.transpose(0, 1).squeeze(1) + if info.num_channels > 1: + # Select the proper audio channel of the segment + if channel == "A": + resampled = resampled[:, 0] + else: + resampled = resampled[:, 1] + return resampled + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("words") + @sb.utils.data_pipeline.provides( + "words", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(words): + yield words + tokens_list = tokenizer.encode_as_ids(words) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "words", "tokens_bos", "tokens_eos", "tokens"], + ) + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.batch import PaddedBatch # noqa + from speechbrain.dataio.dataloader import SaveableDataLoader # noqa + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams = hparams["dynamic_batch_sampler"] + hop_size = hparams["feats_hop_size"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + **dynamic_hparams, + length_func=lambda x: int(float(x["duration"]) * (1 / hop_size)), + ) + + valid_batch_sampler = DynamicBatchSampler( + valid_data, + **dynamic_hparams, + length_func=lambda x: int(float(x["duration"]) * (1 / hop_size)), + ) + + return ( + train_data, + valid_data, + test_datasets, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Dataset prep (parsing Switchboard) + from normalize_util import normalize_words, read_glm_csv # noqa + from switchboard_prepare import prepare_switchboard # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_switchboard, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "splits": hparams["splits"], + "split_ratio": hparams["split_ratio"], + "skip_prep": hparams["skip_prep"], + "add_fisher_corpus": hparams["add_fisher_corpus"], + "max_utt": hparams["max_utt"], + }, + ) + run_on_main(hparams["prepare_noise_data"]) + + # create the dataset objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_datasets, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams) + + # Depending on the path given in the hparams YAML file, + # we download the pretrained LM and Tokenizer + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # Helper function that removes optional/deletable parts of the transcript + # for cleaner performance metrics + normalize_fn = None + if hparams["normalize_words"]: + normalize_fn = functools.partial( + normalize_words, + glm_alternatives=read_glm_csv(hparams["output_folder"]), + ) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + normalize_fn=normalize_fn, + ) + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for the LM! + asr_brain.tokenizer = hparams["tokenizer"] + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + train_dataloader_opts = {"batch_sampler": train_bsampler} + if valid_bsampler is not None: + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + + # Testing + for k in test_datasets.keys(): # keys are test_swbd and test_callhome + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="WER", + ) diff --git a/recipes/Switchboard/ASR/transformer/README.md b/recipes/Switchboard/ASR/transformer/README.md new file mode 100644 index 0000000000..2e67ec13fe --- /dev/null +++ b/recipes/Switchboard/ASR/transformer/README.md @@ -0,0 +1,58 @@ +# Switchboard ASR with Transformers + +This folder contains the scripts to train a transformer-based speech recognizer on the Switchboard dataset. + +You can download the Switchboard data at https://catalog.ldc.upenn.edu/LDC97S62. + +The eval2000/Hub5 English test set can be found at: +- Speech data: https://catalog.ldc.upenn.edu/LDC2002S09 +- Transcripts: https://catalog.ldc.upenn.edu/LDC2002T43 + +Part 1 and part 2 of the Fisher corpus are available at: +- https://catalog.ldc.upenn.edu/LDC2004T19 +- https://catalog.ldc.upenn.edu/LDC2005T19 + +# How to run +`python train.py hparams/.yaml` + +# Results + +| Release | hyperparams file | Swbd WER | Callhome WER | Eval2000 WER | HuggingFace link | Full model link | GPUs | Comment +|:-------------:|:---------------------------:| :-----:| :-----:| :-----:| :-----:| :-----:| :--------:|:--------:| +| 17-09-22 | transformer.yaml | 9.80 | 17.89 | 13.94 | [HuggingFace](https://huggingface.co/speechbrain/asr-transformer-switchboard) | n.a. | 1xA100 40GB | This model uses an LM trained on Swbd+Fisher data (see ../../LM/hparams/transformer.yaml)| +| 17-09-22 | transformer_finetuned_LM.yaml| 9.99 | 18.98 | 14.58 | n.a. | n.a. | 1xA100 40GB | This model uses the LibriSpeech LM but finetuned on Swbd+Fisher data (see ../../LM/hparams/transformer_finetune.yaml)| + + +# Training Time +It takes about 45 minutes for each epoch on 1 NVIDIA A100 (40GB). + + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/Switchboard/ASR/transformer/hparams/transformer.yaml b/recipes/Switchboard/ASR/transformer/hparams/transformer.yaml new file mode 100644 index 0000000000..2dce433127 --- /dev/null +++ b/recipes/Switchboard/ASR/transformer/hparams/transformer.yaml @@ -0,0 +1,332 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: Transformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Switchboard +# Authors: Jianyuan Zhong, Titouan Parcollet, Samuele Cornell, Dominik Wagner +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 1312 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/transformer/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. You can download everything from the +# Speechbrain HuggingFace repository, or you can provide a local +# path pointing to a directory containing the lm.ckpt and tokenizer.ckpt. +pretrained_lm_tokenizer_path: !PLACEHOLDER +tokenizer_file: !ref /tokenizer.ckpt +lm_file: !ref /lm.ckpt + +# Data files +# Set the local path to the Switchboard dataset (e.g. /nfs/data/swbd) here +data_folder: !PLACEHOLDER +splits: [train, dev] +split_ratio: [99, 1] +skip_prep: False +# We don't need the Fisher corpus, here since +# it is only used for Tokenizer and LM training +add_fisher_corpus: False +# Remove optional/deletable parts of the transcript +normalize_words: True +# Maximum number of times the same utterance is allowed to appear +# in the training data. +max_utt: 300 +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +# The test data is split into the full test set (test.csv), +# the Switchboard portion of the data (test_swbd.csv), +# and the Callhome portion of the data (test_callhome.csv). +test_csv: + - !ref /test_swbd.csv + - !ref /test_callhome.csv + - !ref /test.csv + +ckpt_interval_minutes: 30 # save checkpoint every N min + +####################### Training Parameters #################################### +# To make Transformers converge, the global batch size should be large enough. +# The global batch size is computed as: +# batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 100 +batch_size: 48 # This works for 1x GPU with 40GB +ctc_weight: 0.3 +grad_accumulation_factor: 2 +max_grad_norm: 5.0 +loss_reduction: batchmean +sorting: random +avg_checkpoints: 5 + +#dynamic_batching: False +# +#dynamic_batch_sampler: +# feats_hop_size: 0.01 +# max_batch_len: 100000 # in terms of frames +# num_buckets: 200 +# shuffle_ex: False # re-creates batches at each epoch shuffling examples. +# batch_ordering: descending +# max_batch_ex: -1 + +# stages related parameters +lr_adam: 0.006 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: 12 + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ########################### +# Transformer +transformer_input_size: 1280 +d_model: 256 +nhead: 4 +num_encoder_layers: 12 +num_decoder_layers: 6 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 2000 + +# Outputs +blank_index: 0 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 +# unk_index: 0 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 10 +lm_weight: 0.30 +test_beam_size: 60 +ctc_weight_decode: 0.30 +temperature: 1.0 +temperature_lm: 1.0 +using_eos_threshold: False +eos_threshold: 1.5 +length_normalization: True +using_max_attn_shift: False +max_attn_shift: 30 +scorer_beam_scale: 0.3 + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 3 + num_layers_per_block: 1 + out_channels: (64, 64, 64) + kernel_sizes: (5, 5, 1) + strides: (2, 2, 1) + residuals: (False, False, True) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: !ref + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: transformer + attention_type: regularMHA + normalize_before: True + causal: False + +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: 264 + d_embedding: 128 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 1024 + dropout: 0.1 + activation: !name:torch.nn.ReLU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] +Adam: !name:torch.optim.Adam + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: !ref + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + transformerlm: !ref + ctc: !ref + scorer_beam_scale: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: !ref + length_normalization: !ref + using_max_attn_shift: !ref + max_attn_shift: !ref + scorer: !ref + temperature: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: !ref + using_max_attn_shift: !ref + max_attn_shift: !ref + eos_threshold: !ref + temperature: !ref + length_normalization: !ref + scorer: !ref + + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + # model_size: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +do_speed_perturb: True + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref + tokenizer: !ref diff --git a/recipes/Switchboard/ASR/transformer/hparams/transformer_finetuned_LM.yaml b/recipes/Switchboard/ASR/transformer/hparams/transformer_finetuned_LM.yaml new file mode 100644 index 0000000000..df947a59c6 --- /dev/null +++ b/recipes/Switchboard/ASR/transformer/hparams/transformer_finetuned_LM.yaml @@ -0,0 +1,319 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: Transformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Switchboard +# Authors: Jianyuan Zhong, Titouan Parcollet, Samuele Cornell, Dominik Wagner +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 1312 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/transformer_finetuned_lm/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. You can download everything from the +# Speechbrain HuggingFace repository, or you can provide a local +# path pointing to a directory containing the lm.ckpt and tokenizer.ckpt. +pretrained_lm_tokenizer_path: !PLACEHOLDER +tokenizer_file: !ref /tokenizer.ckpt +lm_file: !ref /lm.ckpt + +# Data files +# Set the local path to the Switchboard dataset (e.g. /nfs/data/swbd) here +data_folder: !PLACEHOLDER +splits: [train, dev] +split_ratio: [99, 1] +skip_prep: False +# We don't need the Fisher corpus, here since +# it is only used for Tokenizer and LM training +add_fisher_corpus: False +# Remove optional/deletable parts of the transcript +normalize_words: True +# Maximum number of times the same utterance is allowed to appear +# in the training data +max_utt: 300 +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +# The test data is split into the full test set (test.csv), +# the Switchboard portion of the data (test_swbd.csv), +# and the Callhome portion of the data (test_callhome.csv). +test_csv: + - !ref /test_swbd.csv + - !ref /test_callhome.csv + - !ref /test.csv + +ckpt_interval_minutes: 30 # save checkpoint every N min + +####################### Training Parameters #################################### +# To make Transformers converge, the global batch size should be large enough. +# The global batch size is computed as: +# batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +number_of_epochs: 60 +batch_size: 32 # This works for 1x GPU with 40GB +ctc_weight: 0.3 +grad_accumulation_factor: 2 +max_grad_norm: 5.0 +loss_reduction: batchmean +sorting: random +avg_checkpoints: 5 + +#dynamic_batching: False +# +#dynamic_batch_sampler: +# feats_hop_size: 0.01 +# max_batch_len: 100000 # in terms of frames +# num_buckets: 200 +# shuffle_ex: False # re-creates batches at each epoch shuffling examples. +# batch_ordering: descending +# max_batch_ex: -1 + +# stages related parameters +lr_adam: 0.001 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: 12 + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ########################### +# Transformer +d_model: 512 +nhead: 4 +num_encoder_layers: 12 +num_decoder_layers: 6 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 5000 +# vocab_size: 5000 + +# Outputs +blank_index: 0 +label_smoothing: 0.0 +pad_index: 0 +bos_index: 1 +eos_index: 2 +# unk_index: 0 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 10 +test_beam_size: 66 +lm_weight: 0.60 +ctc_weight_decode: 0.40 +temperature: 1.15 +temperature_lm: 1.15 +############################## Models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 3 + num_layers_per_block: 1 + out_channels: (64, 64, 64) + kernel_sizes: (5, 5, 1) + strides: (2, 2, 1) + residuals: (False, False, True) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 1280 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: transformer + attention_type: regularMHA + normalize_before: True + causal: False + +lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: 768 + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] +Adam: !name:torch.optim.Adam + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + + +transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer + language_model: !ref + temperature: !ref + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , !ref ] + weights: + transformerlm: !ref + ctc: !ref + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + temperature: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 25000 + # model_size: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +do_speed_perturb: True + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + lm: !ref + tokenizer: !ref + paths: + lm: !ref + tokenizer: !ref diff --git a/recipes/Switchboard/ASR/transformer/normalize_util.py b/recipes/Switchboard/ASR/transformer/normalize_util.py new file mode 120000 index 0000000000..cf3ffea7bc --- /dev/null +++ b/recipes/Switchboard/ASR/transformer/normalize_util.py @@ -0,0 +1 @@ +../normalize_util.py \ No newline at end of file diff --git a/recipes/Switchboard/ASR/transformer/switchboard_prepare.py b/recipes/Switchboard/ASR/transformer/switchboard_prepare.py new file mode 120000 index 0000000000..f98ba6cfb9 --- /dev/null +++ b/recipes/Switchboard/ASR/transformer/switchboard_prepare.py @@ -0,0 +1 @@ +../../switchboard_prepare.py \ No newline at end of file diff --git a/recipes/Switchboard/ASR/transformer/train.py b/recipes/Switchboard/ASR/transformer/train.py new file mode 100644 index 0000000000..1ebe03834f --- /dev/null +++ b/recipes/Switchboard/ASR/transformer/train.py @@ -0,0 +1,526 @@ +#!/usr/bin/env python3 +"""Recipe for training a Transformer ASR system with Switchboard. +The system employs an encoder, a decoder, and an attention mechanism +between them. Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural +language model. + +To run this recipe, do the following: +> python train.py hparams/transformer.yaml + +With the default hyperparameters, the system employs a convolutional frontend and a transformer. +The decoder is based on a Transformer decoder. Beamsearch coupled with a Transformer +language model is used on the top of decoder probabilities. + +The neural network is trained on both CTC and negative-log likelihood +targets and sub-word units estimated with Byte Pairwise Encoding (BPE) +are used as basic recognition tokens. Training is performed on the full +Switchboard dataset (~300 h). + +The best model is the average of the checkpoints from last 5 epochs. + +The experiment file is flexible enough to support a large variety of +different systems. By properly changing the parameter files, you can try +different encoders, decoders, tokens (e.g, characters instead of BPE), and many +other possible variations. + + +Authors + * Jianyuan Zhong 2020 + * Mirco Ravanelli 2020 + * Peter Plantinga 2020 + * Samuele Cornell 2020, 2021, 2022 + * Titouan Parcollet 2021, 2022 + * Dominik Wagner 2022 +""" + +import functools +import os +import sys +from pathlib import Path + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +# Define training procedure +class ASR(sb.core.Brain): + def __init__( + self, + modules=None, + opt_class=None, + hparams=None, + run_opts=None, + checkpointer=None, + normalize_fn=None, + ): + self.normalize_fn = normalize_fn + + super().__init__( + modules=modules, + opt_class=opt_class, + hparams=hparams, + run_opts=run_opts, + checkpointer=checkpointer, + ) + + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_bos, _ = batch.tokens_bos + + # compute features + feats = self.hparams.compute_features(wavs) + current_epoch = self.hparams.epoch_counter.current + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels(tokens_bos) + # forward modules + src = self.modules.CNN(feats) + + enc_out, pred = self.modules.Transformer( + src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index + ) + + # output layer for ctc log-probabilities + logits = self.modules.ctc_lin(enc_out) + p_ctc = self.hparams.log_softmax(logits) + + # output layer for seq2seq log-probabilities + pred = self.modules.seq_lin(pred) + p_seq = self.hparams.log_softmax(pred) + + # Compute outputs + hyps = None + if stage == sb.Stage.TRAIN: + hyps = None + elif stage == sb.Stage.VALID: + hyps = None + current_epoch = self.hparams.epoch_counter.current + if current_epoch % self.hparams.valid_search_interval == 0: + # for the sake of efficiency, we only perform beamsearch with limited capacity + # and no LM to give user some idea of how the AM is doing + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) + elif stage == sb.Stage.TEST: + # for the sake of efficiency, we only perform beamsearch with limited capacity + # and no LM to give user some idea of how the AM is doing + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + + return p_ctc, p_seq, wav_lens, hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (CTC+NLL) given predictions and targets.""" + + (p_ctc, p_seq, wav_lens, hyps) = predictions + + ids = batch.id + tokens_eos, tokens_eos_lens = batch.tokens_eos + tokens, tokens_lens = batch.tokens + + if stage == sb.Stage.TRAIN: + # Labels must be extended if parallel augmentation or concatenated + # augmentation was performed on the input (increasing the time dimension) + if hasattr(self.hparams, "fea_augment"): + ( + tokens, + tokens_lens, + tokens_eos, + tokens_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, tokens_lens, tokens_eos, tokens_eos_lens + ) + + loss_seq = self.hparams.seq_cost( + p_seq, tokens_eos, length=tokens_eos_lens + ).sum() + + # now as training progresses we use real prediction from the prev step instead of teacher forcing + + loss_ctc = self.hparams.ctc_cost( + p_ctc, tokens, wav_lens, tokens_lens + ).sum() + + loss = ( + self.hparams.ctc_weight * loss_ctc + + (1 - self.hparams.ctc_weight) * loss_seq + ) + + if stage != sb.Stage.TRAIN: + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if current_epoch % valid_search_interval == 0 or ( + stage == sb.Stage.TEST + ): + # Decode token terms to words + predicted_words = [ + tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps + ] + target_words = [wrd.split(" ") for wrd in batch.words] + + # Check for possible word alternatives and exclusions + if stage == sb.Stage.TEST and self.normalize_fn is not None: + target_words, predicted_words = self.normalize_fn( + target_words, predicted_words + ) + + self.wer_metric.append(ids, predicted_words, target_words) + + # compute the accuracy of the one-step-forward prediction + self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) + return loss + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: + self.hparams.noam_annealing(self.optimizer) + + def evaluate_batch(self, batch, stage): + """Computations needed for validation/test batches""" + with torch.no_grad(): + predictions = self.compute_forward(batch, stage=stage) + loss = self.compute_objectives(predictions, batch, stage=stage) + return loss.detach() + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.acc_metric = self.hparams.acc_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["ACC"] = self.acc_metric.summarize() + current_epoch = self.hparams.epoch_counter.current + valid_search_interval = self.hparams.valid_search_interval + if ( + current_epoch % valid_search_interval == 0 + or stage == sb.Stage.TEST + ): + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # log stats and save checkpoint at end-of-epoch + if stage == sb.Stage.VALID: + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"ACC": stage_stats["ACC"], "epoch": epoch}, + max_keys=["ACC"], + num_to_keep=self.hparams.avg_checkpoints, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + # save the averaged checkpoint at the end of the evaluation stage + # delete the rest of the intermediate checkpoints + # ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint + self.checkpointer.save_and_keep_only( + meta={"ACC": 1.1, "epoch": epoch}, + max_keys=["ACC"], + num_to_keep=1, + ) + + def on_evaluate_start(self, max_key=None, min_key=None): + """perform checkpoint average if needed""" + super().on_evaluate_start() + + ckpts = self.checkpointer.find_checkpoints( + max_key=max_key, min_key=min_key + ) + ckpt = sb.utils.checkpoints.average_checkpoints( + ckpts, + recoverable_name="model", + ) + + self.hparams.model.load_state_dict(ckpt, strict=True) + self.hparams.model.eval() + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + valtest_datasets = [valid_data] + [i for k, i in test_datasets.items()] + + # We get the tokenizer as we need it to encode the labels when creating + # mini-batches. + tokenizer = hparams["tokenizer"] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav", "channel", "start", "stop") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav, channel, start, stop): + # Select a speech segment from the sph file + # start and end times are already frames. + # This is done in data preparation stage. + start = int(start) + stop = int(stop) + num_frames = stop - start + sig, fs = audio_io.load(wav, num_frames=num_frames, frame_offset=start) + info = audio_io.info(wav) + + resampled = sig + # Maybe resample to 16kHz + if int(info.sample_rate) != int(hparams["sample_rate"]): + resampled = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(sig) + + resampled = resampled.transpose(0, 1).squeeze(1) + if info.num_channels > 1: + # Select the proper audio channel of the segment + if channel == "A": + resampled = resampled[:, 0] + else: + resampled = resampled[:, 1] + return resampled + + sb.dataio.dataset.add_dynamic_item(valtest_datasets, audio_pipeline) + + @sb.utils.data_pipeline.takes("wav", "channel", "start", "stop") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline_train(wav, channel, start, stop): + # Speed Perturb is done here so it is multi-threaded with the + # workers of the dataloader (faster). + + # Select a speech segment from the sph file + # start and end times are already frames. + # This is done in data preparation stage. + start = int(start) + stop = int(stop) + num_frames = stop - start + sig, fs = audio_io.load(wav, num_frames=num_frames, frame_offset=start) + info = audio_io.info(wav) + + resampled = sig + # Maybe resample to 16kHz + if int(info.sample_rate) != int(hparams["sample_rate"]): + resampled = torchaudio.transforms.Resample( + info.sample_rate, + hparams["sample_rate"], + )(sig) + + resampled = resampled.transpose(0, 1).squeeze(1) + if info.num_channels > 1: + # Select the proper audio channel of the segment + if channel == "A": + resampled = resampled[:, 0] + else: + resampled = resampled[:, 1] + + # Speed Perturb is done here so it is multi-threaded with the + # workers of the dataloader (faster). + if hparams["do_speed_perturb"]: + resampled = hparams["speed_perturb"]( + resampled.unsqueeze(0) + ).squeeze(0) + + return resampled + + sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline_train) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("words") + @sb.utils.data_pipeline.provides( + "words", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(words): + yield words + tokens_list = tokenizer.encode_as_ids(words) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "words", "tokens_bos", "tokens_eos", "tokens"], + ) + + return ( + train_data, + valid_data, + test_datasets, + tokenizer, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + from normalize_util import normalize_words, read_glm_csv # noqa + from switchboard_prepare import prepare_switchboard # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_switchboard, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "splits": hparams["splits"], + "split_ratio": hparams["split_ratio"], + "skip_prep": hparams["skip_prep"], + "add_fisher_corpus": hparams["add_fisher_corpus"], + "max_utt": hparams["max_utt"], + }, + ) + + # here we create the datasets objects as well as tokenization and encoding + train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) + + # We download the pretrained LM from HuggingFace (or elsewhere depending on + # the path given in the YAML file). The tokenizer is loaded at the same time. + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # Helper function that removes optional/deletable parts of the transcript + # for cleaner performance metrics + normalize_fn = None + if hparams["normalize_words"]: + normalize_fn = functools.partial( + normalize_words, + glm_alternatives=read_glm_csv(hparams["output_folder"]), + ) + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["Adam"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + normalize_fn=normalize_fn, + ) + + # adding objects to trainer: + asr_brain.tokenizer = hparams["tokenizer"] + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + # Testing + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + + # Testing + for k in test_datasets.keys(): # keys are test_swbd and test_callhome + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + max_key="ACC", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/Switchboard/LM/README.md b/recipes/Switchboard/LM/README.md new file mode 100644 index 0000000000..dbef5b52cb --- /dev/null +++ b/recipes/Switchboard/LM/README.md @@ -0,0 +1,58 @@ +# Language Model with Switchboard +This folder contains recipes for finetuning language models for the Switchboard dataset. +It supports both an RNN-based LM and a Transformer-based LM. + +You can download the Switchboard data at https://catalog.ldc.upenn.edu/LDC97S62. + +The eval2000/Hub5 English test set can be found at: +- Speech data: https://catalog.ldc.upenn.edu/LDC2002S09 +- Transcripts: https://catalog.ldc.upenn.edu/LDC2002T43 + +Part 1 and part 2 of the Fisher corpus are available at: +- https://catalog.ldc.upenn.edu/LDC2004T19 +- https://catalog.ldc.upenn.edu/LDC2005T19 + +As in Kaldi's [swbd/s5c](https://github.com/kaldi-asr/kaldi/tree/master/egs/swbd/s5c) recipe, +the Fisher transcripts can be used as an additional resource for training Tokenizer and LM. + +# How to run: +``` +python train.py hparams/transformer.yaml +python train.py hparams/transformer_finetune.yaml +``` + +| Release | hyperparams file | Test PP | Model link | GPUs | +| :--- | :---: | :---: | :---: | :---: | +| 01-07-22 | transformer.yaml | --.-- | n.a. | 1xA100 40GB | +| 01-07-22 | transformer_finetune.yaml | --.-- | n.a. | 1xA100 40GB | + + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` \ No newline at end of file diff --git a/recipes/KsponSpeech/LM/hparams/transformer.yaml b/recipes/Switchboard/LM/hparams/transformer.yaml similarity index 59% rename from recipes/KsponSpeech/LM/hparams/transformer.yaml rename to recipes/Switchboard/LM/hparams/transformer.yaml index 4635df9aba..a7fab68d33 100644 --- a/recipes/KsponSpeech/LM/hparams/transformer.yaml +++ b/recipes/Switchboard/LM/hparams/transformer.yaml @@ -1,38 +1,46 @@ # ############################################################################ -# Model: Transformer LM of E2E ASR +# Model: Transformer LM for E2E ASR # Tokens: unigram # losses: NLL -# Training: KsponSpeech train transcript -# Authors: Dongwon Kim, Dongwoo Kim 2021 +# Training: Switchboard corpus transcripts + Fisher corpus transcripts +# Authors: Jianyuan Zhong 2021, Dominik Wagner 2022 # ############################################################################ # Seed needs to be set at top of yaml, before objects with parameters are made -seed: 2222 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/Transformer/ +seed: 1312 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/transformer/ save_folder: !ref /save train_log: !ref /train_log.txt # Data files -data_folder: ../Tokenizer/results/5K_subword_unigram_LM -# train_splits: ["train"] -# dev_splits: ["dev"] -# test_splits: ["eval_clean", "eval_other"] -# skip_prep: False -train_csv: !ref /train.csv -valid_csv: !ref /dev.csv -test_csv: - - !ref /eval_clean.csv - - !ref /eval_other.csv +# Set the local path to the Switchboard dataset (e.g. /nfs/data/swbd) here. +data_folder: !PLACEHOLDER +splits: ["train", "dev"] +split_ratio: [99, 1] +add_fisher_corpus: True +# Maximum number of times the same utterance is allowed to appear +# in the training data. +# Note that this only filters the swbd1 data but not the Fisher data. +max_utt: 300 +skip_prep: False +# train_lm.csv is is created, when the Fisher +# corpus is included in the data preparation +# procedure via add_fisher_corpus +train_csv: !ref /train_lm.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv # Tokenizer model -tokenizer_file: ddwkim/ksponspeech-conformer-medium/tokenizer.ckpt - -# Training parameters -number_of_epochs: 30 -batch_size: 256 -lr: 0.1 -accu_steps: 4 # Gradient accumulation to simulate large batch training +# Location of your trained Sentencepiece tokenizer +# (e.g. /path/to/2000_unigram.model) +tokenizer_file: !PLACEHOLDER + +####################### Training Parameters #################################### +number_of_epochs: 100 +batch_size: 164 +lr: 1 +grad_accumulation_factor: 2 # Gradient accumulation to simulate large batch training ckpt_interval_minutes: 15 # save checkpoint every N min # Dataloader options @@ -48,26 +56,27 @@ test_dataloader_opts: batch_size: 1 # Outputs -output_neurons: 5000 +output_neurons: 2000 # blank_index: 0 bos_index: 1 eos_index: 2 +# unk_index: 0 # pad_index: 0 # model params -d_model: 768 - +d_model: 264 # Functions model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length vocab: !ref d_model: !ref + d_embedding: 128 nhead: 12 num_encoder_layers: 12 num_decoder_layers: 0 - d_ffn: 3072 - dropout: 0.15 - activation: !name:torch.nn.GELU + d_ffn: 1024 + dropout: 0.1 + activation: !name:torch.nn.ReLU normalize_before: False modules: @@ -90,7 +99,7 @@ optimizer: !name:torch.optim.Adam lr_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler lr_initial: !ref - n_warmup_steps: 1250 + n_warmup_steps: 25000 model_size: !ref epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter diff --git a/recipes/Switchboard/LM/hparams/transformer_finetune.yaml b/recipes/Switchboard/LM/hparams/transformer_finetune.yaml new file mode 100644 index 0000000000..5edaf29cfb --- /dev/null +++ b/recipes/Switchboard/LM/hparams/transformer_finetune.yaml @@ -0,0 +1,126 @@ +# ############################################################################ +# Model: Transformer LM of E2E ASR +# Tokens: unigram +# losses: NLL +# Training: Librispeech 960h transcripts + Librispeech LM corpus +# + Swbd transcripts + Fisher transcripts +# Authors: Jianyuan Zhong 2021, Dominik Wagner 2022 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 1312 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/transformer_finetune/ +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +# Set the local path to the Switchboard dataset (e.g. /nfs/data/swbd) here. +data_folder: !PLACEHOLDER +splits: ["train", "dev"] +split_ratio: [99, 1] +add_fisher_corpus: True +# Maximum number of times the same utterance is allowed to appear +# in the training data +max_utt: 300 +skip_prep: False +# train_lm.csv is is created, when the Fisher +# corpus is included in the data preparation +# procedure via add_fisher_corpus +train_csv: !ref /train_lm.csv +valid_csv: !ref /dev.csv +test_csv: !ref /test.csv + +# Language model (LM) pretraining +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Here, we download everything from the +# speechbrain HuggingFace repository. However, a local path pointing to a +# directory containing the lm.ckpt and tokenizer.ckpt may also be specified +# instead. E.g if you want to use your own LM / tokenizer. +pretrained_lm_tokenizer_path: speechbrain/asr-transformer-transformerlm-librispeech + +####################### Training Parameters #################################### +number_of_epochs: 5 +batch_size: 128 +lr: 2 +grad_accumulation_factor: 2 +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + pin_memory: True + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +# Outputs +output_neurons: 5000 +# blank_index: 0 +bos_index: 1 +eos_index: 2 +# unk_index: 0 +# pad_index: 0 + +# model params +d_model: 768 + +# Functions +model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length + vocab: !ref + d_model: !ref + nhead: 12 + num_encoder_layers: 12 + num_decoder_layers: 0 + d_ffn: 3072 + dropout: 0.0 + activation: !name:torch.nn.GELU + normalize_before: False + +modules: + model: !ref + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + scheduler: !ref + counter: !ref + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +optimizer: !name:torch.optim.Adam + lr: 0 + betas: (0.9, 0.98) + eps: 0.000000001 + +lr_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 50000 + model_size: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +compute_cost: !name:speechbrain.nnet.losses.nll_loss + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +# This object is used to load a pretrained language model and tokenizer +# (defined above). +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + model: !ref + tokenizer: !ref + paths: + model: !ref /lm.ckpt + tokenizer: !ref /tokenizer.ckpt diff --git a/recipes/Switchboard/LM/switchboard_prepare.py b/recipes/Switchboard/LM/switchboard_prepare.py new file mode 120000 index 0000000000..0ce87cb348 --- /dev/null +++ b/recipes/Switchboard/LM/switchboard_prepare.py @@ -0,0 +1 @@ +../switchboard_prepare.py \ No newline at end of file diff --git a/recipes/KsponSpeech/LM/train.py b/recipes/Switchboard/LM/train.py similarity index 54% rename from recipes/KsponSpeech/LM/train.py rename to recipes/Switchboard/LM/train.py index 366b1f29b4..e21e7691df 100644 --- a/recipes/KsponSpeech/LM/train.py +++ b/recipes/Switchboard/LM/train.py @@ -1,34 +1,32 @@ #!/usr/bin/env python3 -"""Recipe for training a Language Model with ksponspeech train-965.2 -transcript and lm_corpus. +"""Recipe for training a Language Model on Switchboard and Fisher corpus. To run this recipe, do the following: > pip install datasets -> python train.py hparams/.yaml \ - --data_folder +> python train.py hparams/.yaml Authors * Jianyuan Zhong 2021 * Ju-Chieh Chou 2020 - * Dongwon Kim, Dongwoo Kim 2021 + * Dominik Wagner 2022 """ + import sys -import logging -from pathlib import Path + import torch from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger - -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure class LM(sb.core.Brain): def compute_forward(self, batch, stage): - """Forward computations from the sentence batches - to the output probabilities.""" + """Forward computations from the sentence batches to the output probabilities.""" batch = batch.to(self.device) tokens_bos, _ = batch.tokens_bos logits = self.hparams.model(tokens_bos) @@ -44,20 +42,9 @@ def compute_objectives(self, predictions, batch, stage): ) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - (loss / self.hparams.accu_steps).backward() - - if self.step % self.hparams.accu_steps == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.optimizer.step() - self.optimizer.zero_grad() - + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: if isinstance( self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler ) or isinstance( @@ -66,22 +53,13 @@ def fit_batch(self, batch): ): self.hparams.lr_annealing(self.optimizer) - if isinstance( - self.hparams.train_logger, sb.utils.train_logger.TensorboardLogger - ): - self.hparams.train_logger.log_stats( - stats_meta={"step": self.step}, train_stats={"loss": loss}, - ) - - return loss - def on_stage_end(self, stage, stage_loss, epoch): """Gets called at the end of a epoch.""" stage_stats = {"loss": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): + if stage == sb.Stage.VALID: if not ( isinstance( self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler @@ -102,51 +80,48 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta=stage_stats, min_keys=["loss"], - ) - - elif stage == sb.Stage.TEST: - self.hparams.train_logger.log_stats( - stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, - test_stats=stage_stats, + meta=stage_stats, + min_keys=["loss"], ) def dataio_prepare(hparams): - """This function prepares the datasets to be used in the brain class. + """ + This function prepares the datasets to be used in the brain class. It also defines the data processing pipeline through user-defined - functions.""" - data_folder = hparams["data_folder"] + functions. + """ + + data_folder = hparams["save_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, ) - # test is separate - test_datasets = {} - for csv_file in hparams["test_csv"]: - name = Path(csv_file).stem - test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=csv_file, replacements={"data_root": data_folder} - ) + test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["test_csv"], + replacements={"data_root": data_folder}, + ) - datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + datasets = [train_data, valid_data, test_data] # We get the tokenizer as we need it to encode the labels when creating # mini-batches. tokenizer = hparams["tokenizer"] """Define text pipeline""" - # TODO: implement text augmentations pipelines - @sb.utils.data_pipeline.takes("wrd") - @sb.utils.data_pipeline.provides("wrd", "tokens_bos", "tokens_eos") - def text_pipeline(wrd): - yield wrd - tokens_list = tokenizer.encode_as_ids(wrd) + + @sb.utils.data_pipeline.takes("words") + @sb.utils.data_pipeline.provides("words", "tokens_bos", "tokens_eos") + def text_pipeline(words): + yield words + tokens_list = tokenizer.encode_as_ids(words) tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) @@ -156,18 +131,18 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "wrd", "tokens_bos", "tokens_eos"], + datasets, + ["id", "words", "tokens_bos", "tokens_eos"], ) - return train_data, valid_data, test_datasets + return train_data, valid_data, test_data if __name__ == "__main__": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -178,14 +153,30 @@ def text_pipeline(wrd): overrides=overrides, ) - # here we create the dataloader objects as well as - # tokenization and encoding - train_data, valid_data, test_datasets = dataio_prepare(hparams) + # 1. # Dataset prep (parsing Switchboard (and Fisher) data) + from switchboard_prepare import prepare_switchboard # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_switchboard, + kwargs={ + "data_folder": hparams["data_folder"], + "splits": hparams["splits"], + "save_folder": hparams["save_folder"], + "skip_prep": hparams["skip_prep"], + "add_fisher_corpus": hparams["add_fisher_corpus"], + "split_ratio": hparams["split_ratio"], + "max_utt": hparams["max_utt"], + }, + ) + + # here we create the dataloader objects as well as tokenization and encoding + train_data, valid_data, test_data = dataio_prepare(hparams) - # We download the tokenizer from HuggingFace (or elsewhere depending on - # the path given in the YAML file). - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + # We download the tokenizer and pretrained LM from HuggingFace (or elsewhere depending on + # the path given in tokenizer_file of the hparams YAML file). + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() lm_brain = LM( modules=hparams["modules"], @@ -204,9 +195,10 @@ def text_pipeline(wrd): ) # evaluation - for k in test_datasets.keys(): # keys are eval_clean, eval_other etc - lm_brain.evaluate( - test_datasets[k], - min_key="loss", - test_loader_kwargs=hparams["test_dataloader_opts"], - ) + test_stats = lm_brain.evaluate( + test_data, + min_key="loss", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) + + lm_brain.checkpointer.save_checkpoint(name="latest") diff --git a/recipes/Switchboard/README.md b/recipes/Switchboard/README.md new file mode 100644 index 0000000000..6780c7df08 --- /dev/null +++ b/recipes/Switchboard/README.md @@ -0,0 +1,35 @@ +## How to run an ASR experiment with Switchboard +To train a full ASR system, the pipeline is as follows: +1. **Train a tokenizer.** The tokenizer receives as input the training transcripts and determines the subword units that will be used for both acoustic and language model training. **Training a tokenizer before the language and acoustic model is necessary**. Indeed, both of them will reuse this tokenizer to map the output tokens. +2. **Train a Language Model (LM).** The language model takes in input long texts from available books. We have recipes with RNN and transformer-based LMs. In both cases, the LM is used during beam search to assign different weights to different hypotheses generated by the acoustic model. +3. **Train an acoustic model (AM).** The acoustic model maps the input speech into a set of sub-words units. The current repository contains recipes for seq2seq (ctc+attention), transducers, and transformer-based systems. Since training an LM can take several days, by default the recipes downloads a pre-trained LM. + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` \ No newline at end of file diff --git a/recipes/Switchboard/Tokenizer/README.md b/recipes/Switchboard/Tokenizer/README.md new file mode 100644 index 0000000000..a865da7eb7 --- /dev/null +++ b/recipes/Switchboard/Tokenizer/README.md @@ -0,0 +1,50 @@ +# Tokenizer +This folder contains the scripts to train a tokenizer using SentencePiece (https://github.com/google/sentencepiece). +The tokenizer is trained using the Switchboard training transcriptions and optionally the Fisher corpus. + +You can download the Switchboard data at https://catalog.ldc.upenn.edu/LDC97S62. + +The eval2000/Hub5 English test set can be found at: +- Speech data: https://catalog.ldc.upenn.edu/LDC2002S09 +- Transcripts: https://catalog.ldc.upenn.edu/LDC2002T43 + +Part 1 and part 2 of the Fisher corpus are available at: +- https://catalog.ldc.upenn.edu/LDC2004T19 +- https://catalog.ldc.upenn.edu/LDC2005T19 + +As in Kaldi's [swbd/s5c](https://github.com/kaldi-asr/kaldi/tree/master/egs/swbd/s5c) recipe, +the Fisher transcripts can be used as an additional resource for training Tokenizer and LM. + +# How to run +`python train.py hparams/2K_unigram_subword_bpe.yaml` + + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/Switchboard/Tokenizer/hparams/2K_unigram_subword_bpe.yaml b/recipes/Switchboard/Tokenizer/hparams/2K_unigram_subword_bpe.yaml new file mode 100644 index 0000000000..d07d83e707 --- /dev/null +++ b/recipes/Switchboard/Tokenizer/hparams/2K_unigram_subword_bpe.yaml @@ -0,0 +1,40 @@ +# ############################################################################ +# Tokenizer: subword BPE with unigram 2K +# Training: Switchboard and Fisher (optional) +# Authors: Dominik Wagner 2022 +# ############################################################################ + +output_folder: !ref results/2K_subword_unigram + +# Data files +# Set the local path to the Switchboard dataset (e.g. /nfs/data/swbd) here. +data_folder: !PLACEHOLDER +splits: ["train", "dev"] +split_ratio: [99, 1] +add_fisher_corpus: True +# Maximum number of times the same utterance is allowed +# to appear in the training data +# Note that this only filters the swbd1 data but not the Fisher data. +max_utt: 300 +train_csv: !ref /train_lm.csv +valid_csv: !ref /dev.csv +skip_prep: False + +####################### Training Parameters #################################### +token_type: unigram # ["unigram", "bpe", "char"] +token_output: 2000 # index(blank/eos/bos/unk) = 0 +character_coverage: 1.0 +csv_read: words +bos_index: 1 +eos_index: 2 + +tokenizer: !name:speechbrain.tokenizers.SentencePiece.SentencePiece + model_dir: !ref + vocab_size: !ref + annotation_train: !ref + annotation_read: !ref + model_type: !ref + character_coverage: !ref + bos_id: !ref # Define bos_id/eos_id if different from blank_id + eos_id: !ref + annotation_list_to_check: [!ref , !ref ] diff --git a/recipes/Switchboard/Tokenizer/switchboard_prepare.py b/recipes/Switchboard/Tokenizer/switchboard_prepare.py new file mode 120000 index 0000000000..0ce87cb348 --- /dev/null +++ b/recipes/Switchboard/Tokenizer/switchboard_prepare.py @@ -0,0 +1 @@ +../switchboard_prepare.py \ No newline at end of file diff --git a/recipes/Switchboard/Tokenizer/train.py b/recipes/Switchboard/Tokenizer/train.py new file mode 100644 index 0000000000..b15118ef1e --- /dev/null +++ b/recipes/Switchboard/Tokenizer/train.py @@ -0,0 +1,59 @@ +#!/usr/bin/env/python3 +"""Recipe for training a BPE tokenizer with Switchboard. +The tokenizer converts words into sub-word units that can +be used to train a language (LM) or an acoustic model (AM). +When doing a speech recognition experiment you have to make +sure that the acoustic and language models are trained with +the same tokenizer. Otherwise, a token mismatch is introduced +and beamsearch will produce bas results when combining AM and LM. + +To run this recipe, do the following: +> python train.py hparams/2K_unigram_subword_bpe.yaml + + +Authors + * Abdel Heba 2021 +""" + +import sys + +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import run_on_main + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # 1. # Dataset prep (parsing Switchboard (and Fisher) data) + from switchboard_prepare import prepare_switchboard # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_switchboard, + kwargs={ + "data_folder": hparams["data_folder"], + "splits": hparams["splits"], + "save_folder": hparams["output_folder"], + "skip_prep": hparams["skip_prep"], + "add_fisher_corpus": hparams["add_fisher_corpus"], + "split_ratio": hparams["split_ratio"], + "max_utt": hparams["max_utt"], + }, + ) + + # Train tokenizer + hparams["tokenizer"]() diff --git a/recipes/Switchboard/switchboard_prepare.py b/recipes/Switchboard/switchboard_prepare.py new file mode 100644 index 0000000000..fd013ad530 --- /dev/null +++ b/recipes/Switchboard/switchboard_prepare.py @@ -0,0 +1,1269 @@ +""" +This script prepares the data of the switchboard-1 release 2 corpus (LDC97S62). +Optionally, the Fisher corpus transcripts (LDC2004T19 and LDC2005T19) can be added to +the CSVs for Tokenizer and LM training. +The test set is based on the eval2000/Hub 5 data (LDC2002S09/LDC2002T43). + +The datasets can be obtained from: +- Switchboard: https://catalog.ldc.upenn.edu/LDC97S62 +- Fisher part 1: https://catalog.ldc.upenn.edu/LDC2004T19 +- Fisher part 2: https://catalog.ldc.upenn.edu/LDC2005T19 + +The test data is available at: +- Speech data: https://catalog.ldc.upenn.edu/LDC2002S09 +- Transcripts: https://catalog.ldc.upenn.edu/LDC2002T43 + +Author +------ +Dominik Wagner 2022 +""" + +import csv +import os +import re +from collections import defaultdict + +from speechbrain.dataio.dataio import merge_csvs +from speechbrain.utils.data_utils import download_file, get_all_files +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +SAMPLERATE = 8000 + + +def prepare_switchboard( + data_folder, + save_folder, + splits=None, + split_ratio=None, + merge_lst=None, + merge_name=None, + skip_prep=False, + add_fisher_corpus=False, + max_utt=300, +): + """ + Main function for Switchboard data preparation. + + Arguments + --------- + data_folder : str + Path to the folder where the Switchboard (and Fisher) datasets are stored. + Note that the Fisher data must be stored (or at least symlinked) + to the same location. + save_folder : str + The directory to store the outputs generated by this script. + splits : list + A list of data splits you want to obtain from the Switchboard dataset. + This would be usually ["train", "dev"] since the "test" set is generated + separately using the Hub5/eval2000 portion of the Switchboard corpus. + The default split is into a ["train", "dev"] portion based on + the split_ratio argument. + split_ratio : list + List containing the portions you want to allocate to + each of your data splits e.g. [90, 10]. The default is [90, 10]. + merge_lst : list + This allows you to merge some (or all) of the data splits you specified + (e.g. ["train", "dev"]) into a single file. The default is [], i.e. no merging. + merge_name : str + Name of the merged csv file. + skip_prep : bool + If True, data preparation is skipped. + add_fisher_corpus : bool + If True, a separate csv file called "train_lm.csv" will be created containing + the Switchboard training data and the Fisher corpus transcripts. + The "train_lm.csv" file can be used instead of the regular "train.csv" file + for LM and Tokenizer training. + Note that this requires the Fisher corpus (part 1 and part 2) + to be downloaded in your data_folder location. + max_utt : int + Remove excess utterances once they appear more than a specified + number of times with the same transcription, in a data set. + This is useful for removing utterances like "uh-huh" from training. + + Returns + ------- + None + + Example + ------- + >>> data_folder = "/nfs/data/ldc" + >>> save_folder = "swbd_data" + >>> splits = ["train", "dev"] + >>> split_ratio = [90, 10] + >>> prepare_switchboard( + ... data_folder, + ... save_folder, + ... splits, + ... split_ratio, + ... add_fisher_corpus=True, + ... ) + """ + if merge_lst is None: + merge_lst = [] + if split_ratio is None: + split_ratio = [90, 10] + if splits is None: + splits = ["train", "dev"] + if skip_prep: + logger.info("Data preparation skipped manually via hparams") + return + + filenames = [] + for split in splits: + filenames.append(os.path.join(save_folder, str(split + ".csv"))) + if add_fisher_corpus: + filenames.append(os.path.join(save_folder, "train_lm.csv")) + filenames.append(os.path.join(save_folder, "test.csv")) + + if skip(*filenames): + logger.info("Preparation completed in previous run, skipping.") + return + + swbd_train_lines = swbd1_data_prep( + data_folder, + save_folder, + splits, + split_ratio, + add_fisher_corpus=add_fisher_corpus, + max_utt=max_utt, + ) + + # Merging csv file if needed + maybe_merge_files(merge_name, merge_lst) + + # Prepare eval2000 testset + eval2000_data_prep(data_folder, save_folder) + + if add_fisher_corpus: + fisher_lines = fisher_data_prep(data_folder, save_folder) + # fisher_lines already contains a header, so we don't need to add one here + combined_lines = fisher_lines + swbd_train_lines + + csv_file = os.path.join(save_folder, "train_lm.csv") + # We set max_utt to a large number, so all utterances will be included in train_lm.csv + # Note that the Kaldi recipe also doesn't care about a maximum utterance number in the + # LM training corpus. + write_csv(csv_file, combined_lines, utt_id_idx=1, max_utt=999999999) + + logger.info("Switchboard data preparation finished.") + + +def write_csv(csv_file, csv_lines, utt_id_idx=0, max_utt=300): + """ + Write utterances to a .csv file. + + Arguments + --------- + csv_file : str + Full path of the file to save + csv_lines : list + A list of lists containing the data to write to the .csv file. + utt_id_idx : int + Element in the list representing a line that marks the utterance id. + This is necessary to keep track of duplicate utterances. + max_utt : int + Maximum number of duplicate utterances to be written. + Once max_utt is exceeded, any lines containing the same + utterance will not be written to the .csv file + """ + + # Keep track of the number of times each utterance appears + utt2count = defaultdict(int) + + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + for line in csv_lines: + current_utt = line[utt_id_idx] + # Avoid that the same utterance becomes part of the dataset too often + if utt2count[current_utt] < max_utt: + csv_writer.writerow(line) + + utt2count[current_utt] += 1 + + +def maybe_merge_files(merge_name, merge_lst: list): + """ + + Merge multiple .csv files and store the combined data in a new file. + + Arguments + --------- + merge_name : str + New name to save the combined files under. + merge_lst : list + List of data splits to be merged. + + """ + if len(merge_lst) > 1: + if merge_name is not None and len(merge_name) > 0: + merge_files = [data_split + ".csv" for data_split in merge_lst] + merge_csvs( + data_folder=save_folder, + csv_lst=merge_files, + merged_csv=merge_name, + ) + else: + logger.warning( + "No name for merged .csv supplied. " + "You can pass a name for the merged .csv files " + "via the merge_name parameter. Not combining any .csv files!" + ) + + +def check_data_folder(root_folder): + """ + Check if all directories exist to prepare the Switchboard dataset. + + Arguments + --------- + root_folder : str + Root directory, where the Switchboard data is located. + Expects the following subdirectories to exist: + "docs", "swb1_d1", "swb1_d2", "swb1_d3", "swb1_d4" + """ + for sub_folder in ["docs", "swb1_d1", "swb1_d2", "swb1_d3", "swb1_d4"]: + swbd_folder = os.path.join(root_folder, sub_folder) + if not os.path.exists(swbd_folder): + err_msg = f"The folder {swbd_folder} does not exist (it is expected in the Switchboard dataset)" + raise OSError(err_msg) + + +def download_transcripts(target_folder): + """ + Download and unpack Switchboard transcripts from OpenSLR. + + Arguments + --------- + target_folder : str + Desired location to store the transcripts. + """ + transcription_dir = os.path.join(target_folder, "swb_ms98_transcriptions") + + if not os.path.exists(transcription_dir): + logger.info( + f"Download transcriptions and store them in {target_folder}" + ) + + download_source = "http://www.openslr.org/resources/5/switchboard_word_alignments.tar.gz" + download_target = os.path.join( + target_folder, "switchboard_word_alignments.tar.gz" + ) + download_file(download_source, download_target, unpack=True) + else: + logger.info( + f"Skipping download of transcriptions because {target_folder} already exists." + ) + + +def skip(*filenames): + """ + Detects if the Switchboard data preparation has already been done. + + Arguments + --------- + *filenames : tuple + List of paths to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, preparation must be done. + """ + for filename in filenames: + if not os.path.isfile(filename): + return False + return True + + +def filter_text( + transcription: str, dataset="train", acronyms=None, acronyms_noi=None +): + """ + This function takes a string representing a sentence in one + of the datasets and cleans it using various regular expressions. + The types of regular expressions applied depend on the dataset. + + Arguments + --------- + transcription : str + A transcribed sentence + dataset : str + Either "train", "eval2000", or "fisher" depending on the type + of data you want to clean. + acronyms : dict + Dictionary mapping acronyms to Fisher convention (only relevant for swbd1 training data) + acronyms_noi : dict + Dictionary mapping acronyms to Fisher convention without I (only relevant for swbd1 training data) + + Returns + ------- + A string containing the cleaned sentence. + + """ + dataset = dataset.strip().lower() + + if dataset == "train": + # This is similar to what swbd1_data_prep.sh and swbd1_map_words.pl does. + transcription = re.sub( + r"\[SILENCE\]", "", transcription, flags=re.IGNORECASE + ) + transcription = re.sub(r"<.*?>", "", transcription) + transcription = match_swbd1(transcription.strip()) + + transcription = re.sub(r"\s\s+", " ", transcription) + + # Convert acronyms to Fisher convention + if len(transcription) > 0: + transcription = map_acronyms(acronyms, acronyms_noi, transcription) + + # Split acronyms written as u._c._l._a._ into single characters (e.g. u c l a) + transcription = remove_acronym_symbols(transcription) + transcription = transcription.upper().strip() + + elif dataset in ["eval2000", "hub5", "test"]: + # This is similar to what eval2000_data_prep.sh does. + transcription = match_eval2000(transcription.strip()) + elif dataset == "fisher": + # This is similar to what fisher_data_prep.sh does. + transcription = match_fisher(transcription.strip()) + else: + raise NameError(f"Invalid dataset descriptor '{dataset}' supplied.") + + # Remove redundant whitespaces + transcription = re.sub(r"\s\s+", " ", transcription) + return transcription.strip() + + +# cspell:ignore WOLMANIZED +def match_swbd1(text): + """ + Clean transcripts in the Switchboard-1 training data. + The transformations we do are: + - remove laughter markings, e.g. [LAUGHTER-STORY] -> STORY + - Remove partial-words, e.g. -[40]1K becomes -1K and -[AN]Y IY becomes -Y + Also, curly braces, which appear to be used for "nonstandard" + words or non-words, are removed, e.g. {WOLMANIZED} -> WOLMANIZED + + This is similar to Kaldi's swbd1_map_words.pl. + + Arguments + --------- + text : str + Input text from the Switchboard-1 training data. + + Returns + ------- + A string containing the cleaned sentence. + """ + tokens = text.split() + parsed_tokens = [] + # cspell:disable + for token in tokens: + # e.g. [LAUGHTER-STORY] -> STORY; elem 1 and 3 relate to preserving trailing "-" + m = re.match(r"(|-)^\[LAUGHTER-(.+)\](|-)$", token, flags=re.IGNORECASE) + token = "".join(m.group(1, 2, 3)) if m else token + + # e.g. [IT'N/ISN'T] -> IT'N + # Note: 1st part may include partial-word stuff, which we process further below, + # e.g. [LEM[GUINI]-/LINGUINI] + m = re.match(r"^\[(.+)/.+\](|-)$", token) + token = "".join(m.group(1, 2)) if m else token + + # e.g. -[AN]Y -> -Y + m = re.match(r"^(|-)\[[^][]+\](.+)$", token) + token = "-" + m.group(2) if m else token + + # e.g. AB[SOLUTE]- -> AB-; + m = re.match(r"^(.+)\[[^][]+\](|-)$", token) + token = "".join(m.group(1, 2)) if m else token + + # e.g. EX[SPECIALLY]-/ESPECIALLY] -> EX + m = re.match(r"([^][]+)\[.+\]$", token) + token = m.group(1) if m else token + + # e.g. {YUPPIEDOM} -> YUPPIEDOM + m = re.match(r"^\{(.+)\}$", token) + token = m.group(1) if m else token + + # e.g. AMMU[N]IT- -> AMMU-IT + m = re.match(r"(\w+)\[([^][])+\](\w+)", token) + token = m.group(1) + "-" + m.group(3) if m else token + + # e.g. THEM_1 -> THEM + token = re.sub(r"_\d+$", "", token) + parsed_tokens.append(token) + return " ".join(parsed_tokens) + # cspell:enable + + +def match_eval2000(text): + """ + Clean transcripts in the 2000 Hub5 english evaluation test (LDC2002S09 LDC2002T43) + See: + http://www.ldc.upenn.edu/Catalog/catalogEntry.jsp?catalogId=LDC2002S09 + http://www.ldc.upenn.edu/Catalog/CatalogEntry.jsp?catalogId=LDC2002T43 + + This is similar to eval2000_data_prep.sh + + Arguments + --------- + text : str + Input text from the eval2000 test data. + + Returns + ------- + A string containing the cleaned sentence. + """ + cleaned_text = "" + + # Remove utterance when it's just optional nonwords + text = text.strip().upper() + for nw in ["UM-HUM", "UMM", "UH-HUH", "MHM", "UH-OH"]: + text = text.replace(nw, "") + + if "IGNORE_TIME_SEGMENT_" not in text: + # Remove and . + cleaned_text = re.sub(r"<.*?>", "", text) + # Remove everything that is declared optional e.g. (%HESITATION) or (WE-) + cleaned_text = re.sub(r"[\(\[].*?[\)\]]", "", cleaned_text) + else: + logger.debug(f"Ignoring eval2000 segment: {text}") + + return cleaned_text + + +def match_fisher(text): + """ + Clean transcripts in the Fisher corpus. + + This is similar to fisher_data_prep.sh + + Arguments + --------- + text : str + Input text from the Fisher data. + + Returns + ------- + A string containing the cleaned sentence. + """ + + cleaned_text = "" + + # Remove utterance when it's just optional nonwords + text = text.strip().upper() + for nw in ["UM-HUM", "UMM", "UH-HUH", "MHM", "UH-OH"]: + text = text.replace(nw, "") + + if "((" not in text: + cleaned_text = re.sub( + r"\[laugh\]", "[laughter]", text, flags=re.IGNORECASE + ) + cleaned_text = re.sub( + r"\[sigh\]", "[noise]", cleaned_text, flags=re.IGNORECASE + ) + cleaned_text = re.sub( + r"\[cough\]", "[noise]", cleaned_text, flags=re.IGNORECASE + ) + cleaned_text = re.sub( + r"\[sigh\]", "[noise]", cleaned_text, flags=re.IGNORECASE + ) + cleaned_text = re.sub( + r"\[mn\]", "[noise]", cleaned_text, flags=re.IGNORECASE + ) + cleaned_text = re.sub( + r"\[breath\]", "[noise]", cleaned_text, flags=re.IGNORECASE + ) + cleaned_text = re.sub( + r"\[lipsmack\]", "[noise]", cleaned_text, flags=re.IGNORECASE + ) + return cleaned_text + + +def remove_acronym_symbols(text): + """ + Remove symbols according to the Fisher acronym convention. + This splits acronyms written as u._c._l._a._ into single characters (e.g. u c l a) + + Arguments + --------- + text : str + Input text + + Returns + ------- + A string containing the cleaned text. + + """ + cleaned_text = re.sub(r"\._", " ", text) + cleaned_text = re.sub(r"\.", "", cleaned_text) + cleaned_text = re.sub(r"them_1", "them", cleaned_text, flags=re.IGNORECASE) + return cleaned_text + + +def prepare_lexicon(lexicon_file, output_file): + """ + Prepare the swbd1 lexicon for further processing. + The lexicon is used to find acronyms and to convert them into Fisher convention. + + Arguments + --------- + lexicon_file : str + Path to the sw-ms98-dict.text file in the Switchboard corpus + output_file : str + Path to store the cleaned lexicon at + + Returns + ------- + A list containing the cleaned lexicon entries + + """ + lexicon = [] + lex_out = open(output_file, "w", encoding="utf-8") + with open(lexicon_file, encoding="utf-8") as lf: + # Skip first row + next(lf) + for line in lf: + # Skip header + if line.startswith("#"): + continue + parsed_line = match_swbd1(line.strip()) + if len(parsed_line) > 0: + lexicon.append(parsed_line) + lex_out.write(f"{parsed_line}\n") + return lexicon + + +def make_acronym_map(save_folder, lexicon_file, acronym_map_file): + """ + Create mappings that can be used to convert acronyms in the Switchboard corpus + into acronyms using the Fisher corpus convention. + + Examples we want to convert: + IBM to i._b._m. + BBC to b._b._c. + BBCs to b._b._c.s + + This is what Kaldi's format_acronyms_dict.py does. + + Arguments + --------- + save_folder : str + Folder to store the acronym map on disk + lexicon_file : str + Path to the sw-ms98-dict.text file + acronym_map_file : str + File to store the acronym map in + + Returns + ------- + Two dictionaries mapping from swbd acronyms to acronyms according to the Fisher corpus convention. + The first dict contains all entries, the other has the letter I removed. + """ + + # Taken from https://github.com/kaldi-asr/kaldi/blob/master/egs/swbd/s5c/local/MSU_single_letter.txt + msu_single_letter = [ + "A ey", + "B b iy", + "C s iy", + "D d iy", + "E iy", + "F eh f", + "G jh iy", + "H ey ch", + "I ay", + "J jh ey", + "K k ey", + "L eh l", + "M eh m", + "N eh n", + "O ow", + "P p iy", + "Q k y uw", + "R aa r", + "S eh s", + "T t iy", + "U y uw", + "V v iy", + "W d ah b ax l y uw", + "X eh k s", + "Y w ay", + "Z z iy", + ] + + fin_lex = ( + prepare_lexicon(lexicon_file, os.path.join(save_folder, "lexicon.txt")) + + msu_single_letter + ) + logger.info( + f"Prepared Swbd1 + MSU single letter lexicon with {len(fin_lex)} entries" + ) + fout_map = open(acronym_map_file, "w", encoding="utf-8") + + # Initialise single letter dictionary + dict_letter = {} + for single_letter_lex in msu_single_letter: + items = single_letter_lex.split() + dict_letter[items[0]] = single_letter_lex[len(items[0]) + 1 :].strip() + + for lex in fin_lex: + items = lex.split() + word = items[0] + lexicon = lex[len(items[0]) + 1 :].strip() + # find acronyms from words with only letters and ' + pre_match = re.match(r"^[A-Za-z]+$|^[A-Za-z]+\'s$|^[A-Za-z]+s$", word) + if pre_match: + # find if words in the form of xxx's is acronym + if word[-2:] == "'s" and (lexicon[-1] == "s" or lexicon[-1] == "z"): + actual_word = word[:-2] + actual_lexicon = lexicon[:-2] + acronym_lexicon = "" + for w in actual_word: + acronym_lexicon = ( + acronym_lexicon + dict_letter[w.upper()] + " " + ) + if acronym_lexicon.strip() == actual_lexicon: + acronym_mapped = "" + acronym_mapped_back = "" + for w in actual_word[:-1]: + acronym_mapped = acronym_mapped + w.lower() + "._" + acronym_mapped_back = ( + acronym_mapped_back + w.lower() + " " + ) + acronym_mapped = ( + acronym_mapped + actual_word[-1].lower() + ".'s" + ) + acronym_mapped_back = ( + acronym_mapped_back + actual_word[-1].lower() + "'s" + ) + fout_map.write( + word + + "\t" + + acronym_mapped + + "\t" + + acronym_mapped_back + + "\n" + ) + + # find if words in the form of xxxs is acronym # cspell:ignore xxxs + elif word[-1] == "s" and (lexicon[-1] == "s" or lexicon[-1] == "z"): + actual_word = word[:-1] + actual_lexicon = lexicon[:-2] + acronym_lexicon = "" + for w in actual_word: + acronym_lexicon = ( + acronym_lexicon + dict_letter[w.upper()] + " " + ) + if acronym_lexicon.strip() == actual_lexicon: + acronym_mapped = "" + acronym_mapped_back = "" + for w in actual_word[:-1]: + acronym_mapped = acronym_mapped + w.lower() + "._" + acronym_mapped_back = ( + acronym_mapped_back + w.lower() + " " + ) + acronym_mapped = ( + acronym_mapped + actual_word[-1].lower() + ".s" + ) + acronym_mapped_back = ( + acronym_mapped_back + actual_word[-1].lower() + "'s" + ) + fout_map.write( + word + + "\t" + + acronym_mapped + + "\t" + + acronym_mapped_back + + "\n" + ) + + # find if words in the form of xxx (not ended with 's or s) is acronym + elif word.find("'") == -1 and word[-1] != "s": + acronym_lexicon = "" + for w in word: + acronym_lexicon = ( + acronym_lexicon + dict_letter[w.upper()] + " " + ) + if acronym_lexicon.strip() == lexicon: + acronym_mapped = "" + acronym_mapped_back = "" + for w in word[:-1]: + acronym_mapped = acronym_mapped + w.lower() + "._" + acronym_mapped_back = ( + acronym_mapped_back + w.lower() + " " + ) + acronym_mapped = acronym_mapped + word[-1].lower() + "." + acronym_mapped_back = acronym_mapped_back + word[-1].lower() + fout_map.write( + word + + "\t" + + acronym_mapped + + "\t" + + acronym_mapped_back + + "\n" + ) + + fout_map.close() + + # Load acronym map for further processing + fin_map = open(acronym_map_file, encoding="utf-8") + dict_acronym = {} + dict_acronym_noi = {} # Mapping of acronyms without I, i + for pair in fin_map: + items = pair.split("\t") + dict_acronym[items[0]] = items[1] + dict_acronym_noi[items[0]] = items[1] + fin_map.close() + del dict_acronym_noi["I"] + del dict_acronym_noi["i"] + + return dict_acronym, dict_acronym_noi + + +def map_acronyms(dict_acronym, dict_acronym_noi, transcription): + """ + Transform acronyms in Switchboard transcripts into Fisher corpus convention. + + Examples we want to convert: + IBM to i._b._m. + BBC to b._b._c. + BBCs to b._b._c.s + + This is what Kaldi's map_acronyms_transcripts.py does. + + Arguments + --------- + dict_acronym : dict + Mapping from swbd acronyms to acronyms according to the Fisher corpus convention + dict_acronym_noi : dict + Mapping from swbd acronyms to acronyms according to the Fisher corpus convention with the letter I removed + transcription : str + A sentence in the Switchboard transcripts + Returns + ------- + The original sentence but with acronyms according to the Fisher convention + """ + + items = transcription.split() + utt_length = len(items) + # First pass mapping to map I as part of acronym + for i in range(utt_length): + if items[i] == "I": + x = 0 + while i - 1 - x >= 0 and re.match(r"^[A-Z]$", items[i - 1 - x]): + x += 1 + + y = 0 + while i + 1 + y < utt_length and re.match( + r"^[A-Z]$", items[i + 1 + y] + ): + y += 1 + + if x + y > 0: + for bias in range(-x, y + 1): + items[i + bias] = dict_acronym[items[i + bias]] + + # Second pass mapping (not mapping 'i' and 'I') + for i in range(len(items)): + if items[i] in dict_acronym_noi.keys(): + items[i] = dict_acronym_noi[items[i]] + sentence = " ".join(items[1:]) + + return items[0] + " " + sentence + + +def make_name_to_disk_dict(mapping_table: str): + """ + The Switchboard data is spread across 4 DVDs + represented by directories ("swb1_d1", "swb1_d2" and so on). + This function creates a lookup dictionary to map a given filename to the + disk it was stored on. + This information is useful to assemble the absolute path to the sph audio + files. + + Arguments + --------- + mapping_table : str + String representing the path to the mapping table file "swb1_all.dvd.tbl" + provided along with the rest of the Switchboard data. + + Returns + ------- + name2disk : dict + A dictionary that maps from sph filename (key) to disk-id (value) + """ + name2disk = {} + with open(mapping_table, encoding="utf-8") as mt: + for line in mt: + split = line.split() + name = split[1].strip() + name2disk[name] = split[0].strip() + return name2disk + + +def swbd1_data_prep( + data_folder, + save_folder, + splits, + split_ratio, + add_fisher_corpus=False, + max_utt=9999999999, +): + """ + Prepare the Switchboard Phase 1 training data (LDC97S62). + + Arguments + --------- + data_folder : str + Path to the data. Expects the LDC97S62 directory to be located there. + save_folder : str + Path where the file output will be stored + splits : list + A list of data splits you want to obtain from the Switchboard dataset (usually ["train", "dev"]) + split_ratio : list + List containing the portions you want to allocate to each of your data splits e.g. [90, 10] + add_fisher_corpus : bool + If True, a separate csv file called "train_lm.csv" will be created which contains + the Switchboard training data and the Fisher corpus transcripts. + max_utt : int + Exclude utterances once they appear more than a specified number of times + + Returns + ------- + A list containing the prepared data for further processing + """ + + logger.info("Starting data preparation for main Switchboard corpus") + + train_data_folder = os.path.join(data_folder, "LDC97S62") + check_data_folder(train_data_folder) + + if not os.path.exists(save_folder): + os.makedirs(save_folder) + + download_transcripts(save_folder) + + # Make a mapping from Switchboard acronyms to Fisher convention + logger.info("Preparing acronym mappings") + lexicon_input_file = os.path.join( + save_folder, "swb_ms98_transcriptions", "sw-ms98-dict.text" + ) + acronym_map_output_file = os.path.join(save_folder, "acronyms.map") + dict_acronym, dict_acronym_noi = make_acronym_map( + save_folder, lexicon_input_file, acronym_map_output_file + ) + + assert len(splits) == len(split_ratio) + if sum(split_ratio) != 100 and sum(split_ratio) != 1: + error_msg = ( + "Implausible split ratios! Make sure they equal to 1 (or 100)." + ) + raise ValueError(error_msg) + if sum(split_ratio) == 100: + split_ratio = [i / 100 for i in split_ratio] + + # collect all files containing transcriptions + transcript_files = get_all_files( + os.path.join(save_folder, "swb_ms98_transcriptions"), + match_and=["trans.text"], + ) + split_lens = [int(i * len(transcript_files)) for i in split_ratio] + + name2disk = make_name_to_disk_dict( + os.path.join(train_data_folder, "docs/swb1_all.dvd.tbl") + ) + logger.info( + f"Made name2disk mapping dict containing {len(name2disk)} conversations." + ) + + start = 0 + stop = 0 + # We save all lines from the swbd train split, in case we want to combine them + # with the Fisher corpus for LM and Tokenizer training later + swbd_train_lines = [] + for i, split in enumerate(splits): + stop += split_lens[i] + transcript_files_split = transcript_files[start:stop] + logger.info( + f"Preparing data for {split} split. " + f"Split will contain {len(transcript_files_split)} " + f"conversations separated by channel." + ) + + start += split_lens[i] + + csv_lines = [ + [ + "ID", + "duration", + "start", + "stop", + "channel", + "wav", + "words", + "spk_id", + ] + ] + # Open each transcription file and extract information + for filename in transcript_files_split: + with open(filename, encoding="utf-8") as file: + for line in file: + str_split = line.split() + id = str_split[0].strip() + channel = id.split("-")[0][-1] + wav_name = id.split("-")[0][:6] + ".sph" + spk_id = wav_name.replace(".sph", channel) + wav_name = wav_name.replace(wav_name[0:2], "sw0") + disk = name2disk[wav_name] + + wav_path = os.path.join( + train_data_folder, disk, "data", wav_name + ) + # We want the segment start and end times in samples, + # so we can slice the segment from the tensor + seg_start = int(float(str_split[1].strip()) * SAMPLERATE) + seg_end = int(float(str_split[2].strip()) * SAMPLERATE) + audio_duration = (seg_end - seg_start) / SAMPLERATE + + transcription = " ".join(str_split[3:]) + cleaned_transcription = filter_text( + transcription, + dataset="train", + acronyms=dict_acronym, + acronyms_noi=dict_acronym_noi, + ) + + # Skip empty transcriptions + if len(cleaned_transcription) > 0: + csv_lines.append( + [ + id, + audio_duration, + seg_start, + seg_end, + channel, + wav_path, + cleaned_transcription, + spk_id, + ] + ) + + # We store the lines from the first split + # (assuming this is the training data) in a separate list + # so we can easily merge it with the Fisher data later + if add_fisher_corpus and i == 0: + swbd_train_lines.append([id, cleaned_transcription]) + # Setting path for the csv file + csv_file = os.path.join(save_folder, str(split + ".csv")) + logger.info(f"Creating csv file {csv_file}") + + write_csv(csv_file, csv_lines, utt_id_idx=6, max_utt=max_utt) + return swbd_train_lines + + +def eval2000_data_prep(data_folder: str, save_folder: str): + """ + Prepare the eval2000/Hub5 English data (LDC2002S09 and LDC2002T43). + The data serves as the test set and is separated into + the full dataset (test.csv), the Switchboard portion + of the dataset (test_swbd.csv) and the Callhome portion + of the dataset (test_callhome.csv). + + Arguments + --------- + data_folder : str + Path to the folder where the eval2000/Hub5 English data is located. + save_folder : str + The directory to store the csv files at. + """ + + logger.info( + "Begin preparing the eval2000 Hub5 English test set and transcripts (LDC2002S09 and LDC2002T43)" + ) + + audio_folder = os.path.join(data_folder, "LDC2002S09/hub5e_00/english") + transcription_file = os.path.join( + data_folder, + "LDC2002T43/2000_hub5_eng_eval_tr/reference/hub5e00.english.000405.stm", + ) + + for d in [audio_folder, transcription_file]: + if not os.path.exists(d): + err_msg = f"The folder {d} does not exist (it is expected to prepare the eval2000/hub5 test set)" + raise OSError(err_msg) + + csv_lines_callhome = [ + ["ID", "duration", "start", "stop", "channel", "wav", "words", "spk_id"] + ] + csv_lines_swbd = [ + ["ID", "duration", "start", "stop", "channel", "wav", "words", "spk_id"] + ] + + with open(transcription_file, encoding="utf-8") as file: + utt_count = 0 + for line in file: + # Skip header + if line.startswith(";;"): + continue + + str_split = line.split() + # Sometimes the end time of a segment is shifted to the right + # So we remove all empty strings from the split + str_split = [i for i in str_split if len(i) > 0] + + # Make ID unique + id = str_split[2].strip() + "_" + str(utt_count) + channel = str_split[1].strip() + + wav_name = str_split[0].strip() + ".sph" + wav_path = os.path.join(audio_folder, wav_name) + + spk_id = str_split[2].strip() + + # The label "en" stands for "Callhome conversations" + # The label "sw" stands for "Switchboard conversations" + is_swbd = str_split[0].strip().startswith("sw_") + + # We want the segment start and end times in samples, + # so we can slice the segment from the tensor + try: + seg_start = int(float(str_split[3].strip()) * SAMPLERATE) + seg_end = int(float(str_split[4].strip()) * SAMPLERATE) + except ValueError: + logger.error( + f"Unable to determine start and end time of segment. " + f"This should not happen! Split in " + f"question was: {str_split}" + ) + + audio_duration = (seg_end - seg_start) / SAMPLERATE + + transcription = " ".join(str_split[6:]) + cleaned_transcription = filter_text( + transcription, dataset="eval2000" + ) + + # Skip empty transcriptions + if len(cleaned_transcription) > 0: + utt_line = [ + id, + audio_duration, + seg_start, + seg_end, + channel, + wav_path, + cleaned_transcription, + spk_id, + ] + if is_swbd: + csv_lines_swbd.append(utt_line) + else: + csv_lines_callhome.append(utt_line) + utt_count += 1 + + merge_files = [] + for name, lines in [ + ("swbd", csv_lines_swbd), + ("callhome", csv_lines_callhome), + ]: + filename = f"test_{name}.csv" + csv_file = os.path.join(save_folder, filename) + logger.info(f"Creating csv file {csv_file}") + + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + for line in lines: + csv_writer.writerow(line) + + merge_files.append(filename) + merge_csvs( + data_folder=save_folder, csv_lst=merge_files, merged_csv="test.csv" + ) + + glm_dir = os.path.join( + data_folder, + "LDC2002T43/2000_hub5_eng_eval_tr/reference", + ) + logger.info("Start parsing mapping rules in en20000405_hub5.glm") + parse_glm_file(glm_dir, save_folder) + + +def parse_glm_file(glm_dir, save_folder): + """ + Parse the file called en20000405_hub5.glm. + This file contains the transcript filtering rules for the + Hub4-E and Hub5-E Evaluations. + + These filtering rules are needed during inference to find valid word alternatives. + + Arguments + --------- + glm_dir : str + Location of the en20000405_hub5.glm file in the eval2000 test set + save_folder : str + Directory to store the parsed GLM file + """ + results = defaultdict(list) + with open( + os.path.join(glm_dir, "en20000405_hub5.glm"), encoding="utf-8" + ) as file: + is_contraction = False + for line in file: + # Skip comments + if "CONTRACTIONIZER" in line: + is_contraction = True + if line.startswith(";;") or line.startswith("*"): + continue + line_split = line.split("=>") + if len(line_split) > 1: + wrd = line_split[0].replace("[", "").replace("]", "").strip() + # Split alternative at comment + if not is_contraction: + alternative = line_split[1] + alternative = alternative.split(";;")[0].strip() + # Split alternative again add additional information + alternative = ( + alternative.split("/")[0] + .replace("[", "") + .replace("]", "") + .strip() + ) + results[wrd] += [alternative] + else: + # Now we parse contraction rules (last 1000 rows or so) + alternative = ( + line_split[1] + .replace("/ [ ] __ [ ]", "") + .replace("[{", "") + .replace("}]", "") + ) + alternatives = alternative.split("/") + alternatives = [ + i.replace("[", "").replace("]", "").strip() + for i in alternatives + ] + results[wrd] += alternatives + + csv_file = os.path.join(save_folder, "glm.csv") + logger.info("Writing GLM csv file") + + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + for wrd, alternatives in results.items(): + line = [wrd, "|".join(alternatives)] + csv_writer.writerow(line) + + +def fisher_data_prep(data_folder, save_folder): + """ + Prepare Fisher data for Tokenizer and LM Training. + The Fisher transcripts are located at + LDC2004T19/fe_03_p1_tran and LDC2005T19/fe_03_p2_tran. + + Arguments + --------- + data_folder : str + Path to the folder where the Fisher data is located. + save_folder : str + Path to the folder where you want to store the prepared data. + + Returns + ------- + A list containing the prepared data for further processing + """ + + logger.info( + "Begin preparing the Fisher corpus transcripts (LDC2002S09 and LDC2002T43)" + ) + + fisher_dirs = [ + "LDC2004T19/fe_03_p1_tran/data/trans", + "LDC2005T19/fe_03_p2_tran/data/trans", + ] + + for fisher_dir in fisher_dirs: + joined_path = os.path.join(data_folder, fisher_dir) + if not os.path.exists(joined_path): + err_msg = f"The folder {joined_path} does not exist (it is expected to prepare the Fisher corpus)" + raise OSError(err_msg) + + csv_lines = [["ID", "words"]] + num_files_processed = 0 + num_dirs_processed = 0 + utt_count = 0 + + for fisher_dir in fisher_dirs: + joined_path = os.path.join(data_folder, fisher_dir) + transcript_files = get_all_files(joined_path, match_and=[".txt"]) + + for transcript_file in transcript_files: + with open(transcript_file, encoding="utf-8") as file: + for line in file: + # skip header and empty lines + if line.startswith("#") or len(line.strip()) < 1: + continue + + # Create unique id + id = "fisher-" + str(utt_count) + transcription = line.split()[3:] + transcription = " ".join(transcription) + transcription_clean = filter_text( + transcription, dataset="fisher" + ) + + # Split acronyms written as u._c._l._a._ into single characters (e.g. u c l a) + transcription_clean = remove_acronym_symbols( + transcription_clean + ) + transcription_clean = transcription_clean.upper().strip() + + # Skip empty transcriptions + if len(transcription_clean) > 0: + csv_lines.append([id, transcription_clean]) + utt_count += 1 + # This is just for accounting + num_files_processed += 1 + num_dirs_processed += 1 + + logger.info( + f"Fisher corpus: Processed {num_files_processed} files in " + f"{num_dirs_processed} directories." + ) + + csv_file = os.path.join(save_folder, "fisher.csv") + logger.info(f"Creating csv file {csv_file}") + + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + for line in csv_lines: + csv_writer.writerow(line) + return csv_lines + + +if __name__ == "__main__": + data_folder = "/nfs/data/ldc" + save_folder = "/mnt/md0/user/wagnerdo/speechbrain/recipes/Switchboard/test" + + prepare_switchboard( + data_folder, + save_folder, + splits=["train", "dev"], + split_ratio=[99, 1], + merge_lst=[], + skip_prep=False, + add_fisher_corpus=True, + ) diff --git a/recipes/TIMIT/ASR/CTC/README.md b/recipes/TIMIT/ASR/CTC/README.md index 7ce3d3f5e9..c7f55bdf03 100644 --- a/recipes/TIMIT/ASR/CTC/README.md +++ b/recipes/TIMIT/ASR/CTC/README.md @@ -2,15 +2,24 @@ This folder contains the scripts to train a CTC system using TIMIT. TIMIT is a speech dataset available from LDC: https://catalog.ldc.upenn.edu/LDC93S1 -# How to run -python train.py hparams/train.yaml +# Running the Code + +To execute the code, use the following command: + +``` +python train.py hparams/train.yaml --data_folder=your_data_folder/TIMIT --jit +``` + +**Note on Compilation**: +Enabling the just-in-time (JIT) compiler significantly improves code performance, resulting in a 50-60% speed boost. We highly recommend utilizing the JIT compiler for optimal results. +This speed improvement is observed specifically when using the CRDNN model. # Results | Release | hyperparams file | Val. PER | Test PER | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| :-----------:| -| 20-05-22 | train.yaml | 12.80 | 14.78 | https://drive.google.com/drive/folders/1OhBOTfC34PaOuiLIUjEBP1JmmlBTxJ8D?usp=sharing | 1xV100 16GB | +| 20-05-22 | train.yaml | 12.80 | 14.78 | https://www.dropbox.com/sh/xjh9qrat7v8ssuq/AAA4inQHBzRcbAeB_I8GSXi_a?dl=0 | 1xV100 16GB | -The output folders with checkpoints and logs for TIMIT recipes can be found [here](https://drive.google.com/drive/folders/1ZcME-Wf4stlzW3j_iJ3zGDCkSy1V_Wjs?usp=sharing). +The output folders with checkpoints and logs for TIMIT recipes can be found [here](https://www.dropbox.com/sh/059jnwdass8v45u/AADTjh5DYdYKuZsgH9HXGx0Sa?dl=0). # Training Time About 1 min and 30 sec for each epoch with a TESLA V100. @@ -26,6 +35,15 @@ About 1 min and 30 sec for each epoch with a TESLA V100. Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/TIMIT/ASR/CTC/hparams/train.yaml b/recipes/TIMIT/ASR/CTC/hparams/train.yaml index 78d7d28a37..86236dedd8 100644 --- a/recipes/TIMIT/ASR/CTC/hparams/train.yaml +++ b/recipes/TIMIT/ASR/CTC/hparams/train.yaml @@ -6,22 +6,26 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/augment_noise_CRDNN/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # Data files data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -open_rir_folder: !ref # where to store noisy data for augment (change it if needed) -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json +train_annotation: !ref /train.json +valid_annotation: !ref /dev.json +test_annotation: !ref /test.json skip_prep: False # Skip data preparation uppercase: False # Must be True when the TIMIT dataset is in the upper-case version -# Training parameters +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + +####################### Training Parameters #################################### number_of_epochs: 50 batch_size: 8 lr: 1.0 @@ -32,7 +36,7 @@ sample_rate: 16000 n_fft: 400 n_mels: 40 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 2 @@ -49,30 +53,73 @@ output_neurons: 40 blank_index: 0 # Dataloader options +num_workers: 4 train_dataloader_opts: batch_size: !ref + num_workers: !ref valid_dataloader_opts: batch_size: !ref + num_workers: !ref test_dataloader_opts: batch_size: !ref + num_workers: !ref normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] -# Can be removed to improve speed -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Models ########################################## epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref @@ -119,7 +166,6 @@ modules: model: !ref output: !ref normalize: !ref - env_corrupt: !ref jit_module_keys: [model] diff --git a/recipes/TIMIT/ASR/CTC/train.py b/recipes/TIMIT/ASR/CTC/train.py index 75f7207c1f..1f2e13faf5 100644 --- a/recipes/TIMIT/ASR/CTC/train.py +++ b/recipes/TIMIT/ASR/CTC/train.py @@ -5,7 +5,11 @@ is used at test time to improve the system performance. To run this recipe, do the following: -> python train.py hparams/train.yaml --data_folder /path/to/TIMIT +> python train.py hparams/train.yaml --data_folder /path/to/TIMIT --jit + +Note on Compilation: +Enabling the just-in-time (JIT) compiler with --jit significantly improves code performance, +resulting in a 50-60% speed boost. We highly recommend utilizing the JIT compiler for optimal results. Authors * Mirco Ravanelli 2020 @@ -14,13 +18,14 @@ import os import sys -import torch -import logging -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -29,14 +34,10 @@ def compute_forward(self, batch, stage): "Given an input batch it computes the phoneme probabilities." batch = batch.to(self.device) wavs, wav_lens = batch.sig - # Adding optional augmentation when specified: - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "env_corrupt"): - wavs_noise = self.hparams.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) feats = self.hparams.compute_features(wavs) feats = self.modules.normalize(feats, wav_lens) @@ -51,9 +52,9 @@ def compute_objectives(self, predictions, batch, stage): pout, pout_lens = predictions phns, phn_lens = batch.phn_encoded - if stage == sb.Stage.TRAIN and hasattr(self.hparams, "env_corrupt"): - phns = torch.cat([phns, phns], dim=0) - phn_lens = torch.cat([phn_lens, phn_lens], dim=0) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + phns = self.hparams.wav_augment.replicate_labels(phns) + phn_lens = self.hparams.wav_augment.replicate_labels(phn_lens) loss = self.hparams.compute_cost(pout, phns, pout_lens, phn_lens) self.ctc_metrics.append(batch.id, pout, phns, pout_lens, phn_lens) @@ -95,7 +96,8 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats={"loss": stage_loss, "PER": per}, ) self.checkpointer.save_and_keep_only( - meta={"PER": per}, min_keys=["PER"], + meta={"PER": per}, + min_keys=["PER"], ) elif stage == sb.Stage.TEST: @@ -103,12 +105,18 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats={"loss": stage_loss, "PER": per}, ) - with open(self.hparams.wer_file, "w") as w: - w.write("CTC loss stats:\n") - self.ctc_metrics.write_stats(w) - w.write("\nPER stats:\n") - self.per_metrics.write_stats(w) - print("CTC and PER stats written to ", self.hparams.wer_file) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + w.write("CTC loss stats:\n") + self.ctc_metrics.write_stats(w) + w.write("\nPER stats:\n") + self.per_metrics.write_stats(w) + print( + "CTC and PER stats written to ", + self.hparams.test_wer_file, + ) def dataio_prep(hparams): @@ -197,12 +205,11 @@ def text_pipeline(phn): # Begin Recipe! if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Dataset prep (parsing TIMIT and annotation into csv files) @@ -230,6 +237,7 @@ def text_pipeline(phn): "uppercase": hparams["uppercase"], }, ) + run_on_main(hparams["prepare_noise_data"]) # Dataset IO prep: creating Dataset objects and proper encodings for phones train_data, valid_data, test_data, label_encoder = dataio_prep(hparams) diff --git a/recipes/TIMIT/ASR/seq2seq/README.md b/recipes/TIMIT/ASR/seq2seq/README.md index dc2ed0fde0..009ae68b4d 100644 --- a/recipes/TIMIT/ASR/seq2seq/README.md +++ b/recipes/TIMIT/ASR/seq2seq/README.md @@ -2,17 +2,26 @@ This folder contains the scripts to train a seq2seq RNNN-based system using TIMIT. TIMIT is a speech dataset available from LDC: https://catalog.ldc.upenn.edu/LDC93S1 -# How to run -python train.py hparams/train.yaml +# Running the Code + +To execute the code, use the following command: + +``` +python train.py hparams/train.yaml --data_folder=your_data_folder/TIMIT --jit +``` + +**Important Note on Compilation**: +Enabling the just-in-time (JIT) compiler with --jit significantly improves code performance, resulting in a 50-60% speed boost. We highly recommend utilizing the JIT compiler for optimal results. +This speed improvement is observed specifically when using the CRDNN model. # Results | Release | hyperparams file | Val. PER | Test PER | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| :-----------:| -| 20-05-22 | train.yaml | 12.50 | 14.07 | https://drive.google.com/drive/folders/1OOieZsNJiLSUSjxidmXg0ywYDJCw0dfm?usp=sharing | 1xV100 16GB | -| 21-04-08 | train_with_wav2vec2.yaml | 7.11 | 8.04 | https://drive.google.com/drive/folders/1-IbO7hldwrRh4rwz9xAYzKeeMe57YIiq?usp=sharing | 1xV100 32GB | +| 20-05-22 | train.yaml | 12.50 | 14.07 | https://www.dropbox.com/sh/cran9y7da18ehb1/AADQ7Nu2eNuNF6V_vyqVAlA_a?dl=0 | 1xV100 16GB | +| 21-04-08 | train_with_wav2vec2.yaml | 7.11 | 8.04 | https://www.dropbox.com/sh/ablljzwv5rl7007/AAAKlTlFw3TZ_lZFZYwNpd8la?dl=0 | 1xV100 32GB | -The output folders with checkpoints and logs for TIMIT recipes can be found [here](https://drive.google.com/drive/folders/1ZcME-Wf4stlzW3j_iJ3zGDCkSy1V_Wjs?usp=sharing). +The output folders with checkpoints and logs for TIMIT recipes can be found [here](https://www.dropbox.com/sh/059jnwdass8v45u/AADTjh5DYdYKuZsgH9HXGx0Sa?dl=0). # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -24,6 +33,15 @@ The output folders with checkpoints and logs for TIMIT recipes can be found [her Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/TIMIT/ASR/seq2seq/extra_dependencies.txt b/recipes/TIMIT/ASR/seq2seq/extra_dependencies.txt deleted file mode 100644 index fd6bacd868..0000000000 --- a/recipes/TIMIT/ASR/seq2seq/extra_dependencies.txt +++ /dev/null @@ -1,4 +0,0 @@ -# For wav2vect recipe (HuggingFace) -transformers==4.4.0 -# For wav2vect recipe (Fairsec) -#fairseq diff --git a/recipes/TIMIT/ASR/seq2seq/hparams/train.yaml b/recipes/TIMIT/ASR/seq2seq/hparams/train.yaml index 95f0ab4ee4..5b30bf48f1 100644 --- a/recipes/TIMIT/ASR/seq2seq/hparams/train.yaml +++ b/recipes/TIMIT/ASR/seq2seq/hparams/train.yaml @@ -8,21 +8,21 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_seq2seq/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # Data files data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json +train_annotation: !ref /train.json +valid_annotation: !ref /dev.json +test_annotation: !ref /test.json skip_prep: False # Skip data preparation uppercase: False # Must be True when the TIMIT dataset is in the upper-case version -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 50 batch_size: 8 # Used if dynamic_batching is False lr: 0.0003 @@ -34,7 +34,7 @@ sample_rate: 16000 n_fft: 400 n_mels: 40 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 2 @@ -76,18 +76,53 @@ test_dataloader_opts: # For more info, see speechbrain.dataio.sampler.DynamicBatchSampler dynamic_batching: False +feats_hop_size: 0.01 +max_batch_length: 5000 # in terms of frames +num_buckets: 20 +shuffle: False # if true re-creates batches at each epoch shuffling examples. +batch_ordering: random + dynamic_batch_sampler: - feats_hop_size: 0.01 - max_batch_len: 5000 # in terms of frames - num_buckets: 20 - shuffle_ex: False # if true re-creates batches at each epoch shuffling examples. - batch_ordering: random + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref +############################## Augmentations ################################### -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + + normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global @@ -149,7 +184,8 @@ ctc_cost: !name:speechbrain.nnet.losses.ctc_loss seq_cost: !name:speechbrain.nnet.losses.nll_loss label_smoothing: 0.1 -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher + +valid_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher embedding: !ref decoder: !ref linear: !ref @@ -158,14 +194,12 @@ greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher min_decode_ratio: !ref max_decode_ratio: !ref -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher +test_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref diff --git a/recipes/TIMIT/ASR/seq2seq/hparams/train_with_wav2vec2.yaml b/recipes/TIMIT/ASR/seq2seq/hparams/train_with_wav2vec2.yaml index 1c8018e877..de71cd0262 100644 --- a/recipes/TIMIT/ASR/seq2seq/hparams/train_with_wav2vec2.yaml +++ b/recipes/TIMIT/ASR/seq2seq/hparams/train_with_wav2vec2.yaml @@ -6,9 +6,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/CRDNN_wav2vec_seq2seq/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -17,23 +17,23 @@ wav2vec2_hub: "facebook/wav2vec2-large-lv60" # Data files data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json +train_annotation: !ref /train.json +valid_annotation: !ref /dev.json +test_annotation: !ref /test.json skip_prep: False # Skip data preparation uppercase: False # Must be True when the TIMIT dataset is in the upper-case version -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 batch_size: 8 lr: 0.0003 lr_wav2vec: 0.0001 ctc_weight: 0.2 sorting: ascending -auto_mix_prec: False +precision: fp32 # bf16, fp16 or fp32 sample_rate: 16000 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dnn_layers: 2 dnn_neurons: 1024 @@ -66,10 +66,40 @@ test_dataloader_opts: batch_size: !ref num_workers: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref @@ -79,7 +109,7 @@ enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN dnn_blocks: !ref dnn_neurons: !ref -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref @@ -133,7 +163,7 @@ ctc_cost: !name:speechbrain.nnet.losses.ctc_loss seq_cost: !name:speechbrain.nnet.losses.nll_loss label_smoothing: 0.1 -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher +valid_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher embedding: !ref decoder: !ref linear: !ref @@ -142,14 +172,12 @@ greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher min_decode_ratio: !ref max_decode_ratio: !ref -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher +test_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - ctc_linear: !ref bos_index: !ref eos_index: !ref - blank_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: !ref diff --git a/recipes/TIMIT/ASR/seq2seq/train.py b/recipes/TIMIT/ASR/seq2seq/train.py index 98978f6fb4..8f6943da3b 100644 --- a/recipes/TIMIT/ASR/seq2seq/train.py +++ b/recipes/TIMIT/ASR/seq2seq/train.py @@ -6,7 +6,11 @@ improve the system performance. To run this recipe, do the following: -> python train.py hparams/train.yaml --data_folder /path/to/TIMIT +> python train.py hparams/train.yaml --data_folder /path/to/TIMIT --jit + +Note on Compilation: +Enabling the just-in-time (JIT) compiler with --jit significantly improves code performance, +resulting in a 50-60% speed boost. We highly recommend utilizing the JIT compiler for optimal results. Authors * Mirco Ravanelli 2020 @@ -17,17 +21,18 @@ import os import sys + import torch -import logging -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main + +import speechbrain as sb +from speechbrain.dataio.batch import PaddedBatch from speechbrain.dataio.dataloader import SaveableDataLoader from speechbrain.dataio.sampler import DynamicBatchSampler -from speechbrain.dataio.batch import PaddedBatch +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger - -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -38,14 +43,10 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig phns_bos, _ = batch.phn_encoded_bos - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "env_corrupt"): - wavs_noise = self.hparams.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - phns_bos = torch.cat([phns_bos, phns_bos]) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + phns_bos = self.hparams.wav_augment.replicate_labels(phns_bos) feats = self.hparams.compute_features(wavs) feats = self.modules.normalize(feats, wav_lens) @@ -62,32 +63,30 @@ def compute_forward(self, batch, stage): logits = self.modules.seq_lin(h) p_seq = self.hparams.log_softmax(logits) + hyps = None if stage == sb.Stage.VALID: - hyps, scores = self.hparams.greedy_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens, hyps + hyps, _, _, _ = self.hparams.valid_searcher(x, wav_lens) elif stage == sb.Stage.TEST: - hyps, scores = self.hparams.beam_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens, hyps + hyps, _, _, _ = self.hparams.test_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens + return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): "Given the network predictions and targets computed the NLL loss." - if stage == sb.Stage.TRAIN: - p_ctc, p_seq, wav_lens = predictions - else: - p_ctc, p_seq, wav_lens, hyps = predictions + p_ctc, p_seq, wav_lens, hyps = predictions ids = batch.id phns_eos, phn_lens_eos = batch.phn_encoded_eos phns, phn_lens = batch.phn_encoded - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - phns = torch.cat([phns, phns], dim=0) - phn_lens = torch.cat([phn_lens, phn_lens], dim=0) - phns_eos = torch.cat([phns_eos, phns_eos], dim=0) - phn_lens_eos = torch.cat([phn_lens_eos, phn_lens_eos], dim=0) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + phns = self.hparams.wav_augment.replicate_labels(phns) + phn_lens = self.hparams.wav_augment.replicate_labels(phn_lens) + phns_eos = self.hparams.wav_augment.replicate_labels(phns_eos) + phn_lens_eos = self.hparams.wav_augment.replicate_labels( + phn_lens_eos + ) loss_ctc = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens) loss_seq = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos) @@ -99,27 +98,11 @@ def compute_objectives(self, predictions, batch, stage): self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens) self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens_eos) self.per_metrics.append( - ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim, + ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim ) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): "Gets called when a stage (either training, validation, test) starts." self.ctc_metrics = self.hparams.ctc_stats() @@ -158,22 +141,26 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats={"loss": stage_loss, "PER": per}, ) - with open(self.hparams.wer_file, "w") as w: - w.write("CTC loss stats:\n") - self.ctc_metrics.write_stats(w) - w.write("\nseq2seq loss stats:\n") - self.seq_metrics.write_stats(w) - w.write("\nPER stats:\n") - self.per_metrics.write_stats(w) - print( - "CTC, seq2seq, and PER stats written to file", - self.hparams.wer_file, - ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + w.write("CTC loss stats:\n") + self.ctc_metrics.write_stats(w) + w.write("\nseq2seq loss stats:\n") + self.seq_metrics.write_stats(w) + w.write("\nPER stats:\n") + self.per_metrics.write_stats(w) + print( + "CTC, seq2seq, and PER stats written to file", + self.hparams.test_wer_file, + ) def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] # 1. Declarations: train_data = sb.dataio.dataset.DynamicItemDataset.from_json( @@ -277,15 +264,12 @@ def text_pipeline(phn): # Support for dynamic batching if hparams["dynamic_batching"]: dynamic_hparams = hparams["dynamic_batch_sampler"] - hop_size = dynamic_hparams["feats_hop_size"] + hop_size = hparams["feats_hop_size"] batch_sampler = DynamicBatchSampler( train_data, - dynamic_hparams["max_batch_len"], - num_buckets=dynamic_hparams["num_buckets"], + **dynamic_hparams, length_func=lambda x: x["duration"] * (1 / hop_size), - shuffle=dynamic_hparams["shuffle_ex"], - batch_ordering=dynamic_hparams["batch_ordering"], ) train_data = SaveableDataLoader( @@ -300,7 +284,7 @@ def text_pipeline(phn): hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Dataset prep (parsing TIMIT and annotation into csv files) diff --git a/recipes/TIMIT/ASR/seq2seq/train_with_wav2vec2.py b/recipes/TIMIT/ASR/seq2seq/train_with_wav2vec2.py index 60d9d8b694..51f2628bc1 100644 --- a/recipes/TIMIT/ASR/seq2seq/train_with_wav2vec2.py +++ b/recipes/TIMIT/ASR/seq2seq/train_with_wav2vec2.py @@ -16,13 +16,15 @@ import os import sys + import torch -import logging -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -33,11 +35,12 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig phns_bos, _ = batch.phn_encoded_bos - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + phns_bos = self.hparams.wav_augment.replicate_labels(phns_bos) - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) x = self.modules.enc(feats) # output layer for ctc log-probabilities @@ -51,27 +54,30 @@ def compute_forward(self, batch, stage): logits = self.modules.seq_lin(h) p_seq = self.hparams.log_softmax(logits) + hyps = None if stage == sb.Stage.VALID: - hyps, scores = self.hparams.greedy_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens, hyps - + hyps, _, _, _ = self.hparams.valid_searcher(x, wav_lens) elif stage == sb.Stage.TEST: - hyps, scores = self.hparams.beam_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens, hyps + hyps, _, _, _ = self.hparams.test_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens + return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): "Given the network predictions and targets computed the NLL loss." - if stage == sb.Stage.TRAIN: - p_ctc, p_seq, wav_lens = predictions - else: - p_ctc, p_seq, wav_lens, hyps = predictions + p_ctc, p_seq, wav_lens, hyps = predictions ids = batch.id phns_eos, phn_lens_eos = batch.phn_encoded_eos phns, phn_lens = batch.phn_encoded + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + phns = self.hparams.wav_augment.replicate_labels(phns) + phn_lens = self.hparams.wav_augment.replicate_labels(phn_lens) + phns_eos = self.hparams.wav_augment.replicate_labels(phns_eos) + phn_lens_eos = self.hparams.wav_augment.replicate_labels( + phn_lens_eos + ) + loss_ctc = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens) loss_seq = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos) loss = self.hparams.ctc_weight * loss_ctc @@ -82,17 +88,11 @@ def compute_objectives(self, predictions, batch, stage): self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens) self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens_eos) self.per_metrics.append( - ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim, + ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim ) return loss - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): "Gets called when a stage (either training, validation, test) starts." self.ctc_metrics = self.hparams.ctc_stats() @@ -143,72 +143,20 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats={"loss": stage_loss, "PER": per}, ) - with open(self.hparams.wer_file, "w") as w: - w.write("CTC loss stats:\n") - self.ctc_metrics.write_stats(w) - w.write("\nseq2seq loss stats:\n") - self.seq_metrics.write_stats(w) - w.write("\nPER stats:\n") - self.per_metrics.write_stats(w) - print( - "CTC, seq2seq, and PER stats written to file", - self.hparams.wer_file, - ) - - def fit_batch(self, batch): - """Fit one batch, override to do multiple updates. - - The default implementation depends on a few methods being defined - with a particular behavior: - - * ``compute_forward()`` - * ``compute_objectives()`` - - Also depends on having optimizers passed at initialization. - - Arguments - --------- - batch : list of torch.Tensors - Batch of data to use for training. Default implementation assumes - this batch has two elements: inputs and targets. - - Returns - ------- - detached loss - """ - # Managing automatic mixed precision - if self.auto_mix_prec: - - self.wav2vec_optimizer.zero_grad() - self.adam_optimizer.zero_grad() - - with torch.cuda.amp.autocast(): - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - - self.scaler.scale(loss).backward() - self.scaler.unscale_(self.wav2vec_optimizer) - self.scaler.unscale_(self.adam_optimizer) - - if self.check_gradients(loss): - self.scaler.step(self.wav2vec_optimizer) - self.scaler.step(self.adam_optimizer) - - self.scaler.update() - else: - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - loss.backward() - - if self.check_gradients(loss): - self.wav2vec_optimizer.step() - self.adam_optimizer.step() - - self.wav2vec_optimizer.zero_grad() - self.adam_optimizer.zero_grad() - - return loss.detach().cpu() + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + w.write("CTC loss stats:\n") + self.ctc_metrics.write_stats(w) + w.write("\nseq2seq loss stats:\n") + self.seq_metrics.write_stats(w) + w.write("\nPER stats:\n") + self.per_metrics.write_stats(w) + print( + "CTC, seq2seq, and PER stats written to file", + self.hparams.test_wer_file, + ) def init_optimizers(self): "Initializes the wav2vec2 optimizer and model optimizer" @@ -225,10 +173,16 @@ def init_optimizers(self): ) self.checkpointer.add_recoverable("adam_opt", self.adam_optimizer) + self.optimizers_dict = { + "wav2vec_opt": self.wav2vec_optimizer, + "adam_opt": self.adam_optimizer, + } + def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] # 1. Declarations: train_data = sb.dataio.dataset.DynamicItemDataset.from_json( @@ -337,7 +291,7 @@ def text_pipeline(phn): hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Dataset prep (parsing TIMIT and annotation into csv files) diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/README.md b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/README.md deleted file mode 100644 index 4f8cbb2f1c..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/README.md +++ /dev/null @@ -1,93 +0,0 @@ -## Multi-teacher Knowledge Distillation for CTC/Att models -This is the implementation of multi-teacher distillation methods to -joint ctc-attention end-to-end ASR systems. The proposed approaches integrate -the error rate metric to the teacher selection rather than solely focusing on the observed losses. -This way, we directly distillate and optimize the student toward the relevant metric for speech recognition. -For details please refer to: https://arxiv.org/abs/2005.09310 - -### Results with this recipe - -| Distillation Strategy | Valid PER | Test PER | Model link | GPUs | -|:---------------------------:| :-----:| :-----:| :-----:| :--------:| -| Weighted | 11.87 | 13.11 | [model](https://drive.google.com/drive/folders/1MHR2AZvCYZr88yUQZTmORCvKJqTsYZAQ?usp=sharing) | 1xV100 16GB | -| Best | 11.93 | 13.15 | [model](https://drive.google.com/drive/folders/1D-3GNh-XzjoU-_6egT3Ns6maCvF-fAJH?usp=sharing) | 1xV100 16GB | - -The output folders with checkpoints and logs for TIMIT recipes can be found [here](https://drive.google.com/drive/folders/1ZcME-Wf4stlzW3j_iJ3zGDCkSy1V_Wjs?usp=sharing). - -### Extra-Dependencies -Before running this recipe, make sure h5py is installed. Otherwise, run: -pip install h5py - -### Training steps -To speed up student distillation from multiple teachers, we separate the whole procedure into three parts: teacher model training, inference running on teacher models, student distillation. - -#### 1. Teacher model training -Before doing distillation, we require finishing N teacher models training. Here, we propose to set N=10 as in the referenced paper. - -Models training can be done in parallel using `train_teacher.py`. - -Example: -``` -python train_teacher.py hparams/teachers/tea0.yaml --data_folder /path-to/data_folder -``` - -#### 2. Run inference on all teacher models -This part run inference on all teacher models and store them on disk using `save_teachers.py`. It is only required that you setup the `tea_models_dir` variable corresponding to the path to a txt file. The latter txt file needs to contain -a list of paths pointing to each teacher model.ckpt. We decided to work with a file so it can easily scale to hundreds of teachers. Hence, an example of this -file is: - -``` -results/tea0/1234/save/CKPT+2021-01-21+14-50-32+00/model.ckpt -results/tea1/1234/save/CKPT+2021-01-21+13-55-56+00/model.ckpt -results/tea2/1234/save/CKPT+2021-01-21+14-25-21+00/model.ckpt -results/tea3/1234/save/CKPT+2021-01-21+15-02-32+00/model.ckpt -results/tea4/1234/save/CKPT+2021-01-21+15-47-09+00/model.ckpt -results/tea5/1234/save/CKPT+2021-01-21+16-02-38+00/model.ckpt -results/tea6/1234/save/CKPT+2021-01-21+16-05-33+00/model.ckpt -results/tea7/1234/save/CKPT+2021-01-21+16-03-20+00/model.ckpt -results/tea8/1234/save/CKPT+2021-01-21+16-25-17+00/model.ckpt -results/tea9/1234/save/CKPT+2021-01-21+15-48-42+00/model.ckpt -``` - -Example: -``` -python save_teachers.py hparams/save_teachers.yaml --data_folder /path-to/data_folder --tea_models_dir /path-to/tea_model_paths.txt -``` - -#### 3. Student distillation -This is the main part for distillation using `train_kd.py`. Here, the variable `pretrain` might be used to use a pre-trained teacher as the student. Note that if set to `True`, a path to the corresponding `model.ckpt` must be given in `pretrain_st_dir`. Also, `tea_infer_dir` is required, linking to the directory of teacher model inference results. Finally, note that the distillation must be trained on with the exact same input CSV files that are generated by `save_teachers.py`. This ensure that the distillation is perfectly linked to the -generated teacher predictions! Diverging input CSV files might generate incompatible shape errors! - -Example: -``` -python train_kd.py hparams/train_kd.yaml --data_folder /path-to/data_folder --pretrain_st_dir /path-to/model_directory --tea_infer_dir /path-to/tea_infer_directory -``` - -### Distillation strategies -There are three strategies in the current version that can be switched with the option `strategy` in `hparams/train_kd.yaml`. - -- **average**: average losses of teachers when doing distillation. -- **best**: choosing the best teacher based on WER. -- **weighted**: assigning weights to teachers based on WER. - - -# **About SpeechBrain** -- Website: https://speechbrain.github.io/ -- Code: https://github.com/speechbrain/speechbrain/ -- HuggingFace: https://huggingface.co/speechbrain/ - - -# **Citing SpeechBrain** -Please, cite SpeechBrain if you use it for your research or business. - -```bibtex -@misc{speechbrain, - title={{SpeechBrain}: A General-Purpose Speech Toolkit}, - author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, - year={2021}, - eprint={2106.04624}, - archivePrefix={arXiv}, - primaryClass={eess.AS}, - note={arXiv:2106.04624} -} -``` diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/save_teachers.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/save_teachers.yaml deleted file mode 100644 index 6c27f01292..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/save_teachers.yaml +++ /dev/null @@ -1,485 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/teachers_save/ - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -# Training parameters -# number_of_epochs: 1 -batch_size: 8 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -emb_size: 128 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -# min_decode_ratio: 0.0 -# max_decode_ratio: 1.0 -# beam_size: 16 -# eos_threshold: 1.5F - -# teacher models -num_tea: 10 - -# .txt file containing paths for saved teacher models. -# e.g. each line is /path/to/model.ckpt -tea_models_dir: !PLACEHOLDER - -# distillation parameters -# Temperature -temperature: 1 -# distillation weight alpha -# alpha: 1 -# different stages in dataset -stage: ["train", "valid", "test"] - -# tea0 -tea0_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.15 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 4 - rnn_neurons: 512 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 512 - -tea0_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea0_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 512 - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea0_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 512 - n_neurons: !ref # 39 phonemes + 1 blank - -tea0_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea1 -tea1_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.3 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 4 - rnn_neurons: 512 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 512 - -tea1_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea1_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 512 - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea1_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 512 - n_neurons: !ref # 39 phonemes + 1 blank - -tea1_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea2 -tea2_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.3 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 4 - rnn_neurons: 512 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 512 - -tea2_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea2_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 512 - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea2_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 512 - n_neurons: !ref # 39 phonemes + 1 blank - -tea2_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea3 -tea3_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.2 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 5 - rnn_neurons: 512 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 512 - -tea3_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea3_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 512 - input_size: !ref - rnn_type: lstm - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea3_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 512 - n_neurons: !ref # 39 phonemes + 1 blank - -tea3_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea4 -tea4_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.3 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 4 - rnn_neurons: 512 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 512 - -tea4_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea4_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 512 - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea4_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 512 - n_neurons: !ref # 39 phonemes + 1 blank - -tea4_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea5 -tea5_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.3 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 4 - rnn_neurons: 320 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 320 - -tea5_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea5_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 320 - input_size: !ref - rnn_type: lstm - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea5_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 320 - n_neurons: !ref # 39 phonemes + 1 blank - -tea5_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea6 -tea6_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.3 - cnn_blocks: 1 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 4 - rnn_neurons: 320 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 320 - -tea6_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea6_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 320 - input_size: !ref - rnn_type: lstm - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea6_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 320 - n_neurons: !ref # 39 phonemes + 1 blank - -tea6_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea7 -tea7_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.15 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 4 - rnn_neurons: 640 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 512 - -tea7_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea7_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 512 - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea7_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 512 - n_neurons: !ref # 39 phonemes + 1 blank - -tea7_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea8 -tea8_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.3 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 5 - rnn_neurons: 512 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 512 - -tea8_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea8_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 512 - input_size: !ref - rnn_type: lstm - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea8_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 512 - n_neurons: !ref # 39 phonemes + 1 blank - -tea8_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - -# tea9 -tea9_enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !name:torch.nn.LeakyReLU - dropout: 0.15 - cnn_blocks: 2 - cnn_channels: (128, 256) - cnn_kernelsize: (3, 3) - time_pooling: True - rnn_layers: 4 - rnn_neurons: 512 - rnn_bidirectional: True - dnn_blocks: 2 - dnn_neurons: 512 - -tea9_emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -tea9_dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: 512 - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: 256 - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -tea9_ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: 512 - n_neurons: !ref # 39 phonemes + 1 blank - -tea9_seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: 256 - n_neurons: !ref # 39 phonemes + 1 eos - - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -# epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter -# limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea0.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea0.yaml deleted file mode 100644 index bb27a93d46..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea0.yaml +++ /dev/null @@ -1,191 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea0/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.15 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 512 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea1.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea1.yaml deleted file mode 100644 index eefbbffc7c..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea1.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea1/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 16 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.3 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 512 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea2.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea2.yaml deleted file mode 100644 index 8a8e6c1483..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea2.yaml +++ /dev/null @@ -1,191 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea2/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 16 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.3 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 512 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea3.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea3.yaml deleted file mode 100644 index cbe1abc9f3..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea3.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea3/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.2 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 5 -rnn_neurons: 512 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: lstm - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea4.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea4.yaml deleted file mode 100644 index 60daa3bddf..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea4.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea4/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.3 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 512 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea5.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea5.yaml deleted file mode 100644 index 5850dd22c3..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea5.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea5/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.3 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 320 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 320 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: lstm - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea6.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea6.yaml deleted file mode 100644 index ceda59e00d..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea6.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea6/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.3 -cnn_blocks: 1 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 320 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 320 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: lstm - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea7.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea7.yaml deleted file mode 100644 index 00a793a703..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea7.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea7/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.15 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 640 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea8.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea8.yaml deleted file mode 100644 index 89873a6a42..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea8.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea8/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.3 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 5 -rnn_neurons: 512 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: lstm - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea9.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea9.yaml deleted file mode 100644 index 7caeba6f73..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/teachers/tea9.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/tea9/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0003 -ctc_weight: 0.2 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.15 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 512 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -jit_module_keys: [enc] - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - lr_annealing: !ref - counter: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/train_kd.yaml b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/train_kd.yaml deleted file mode 100644 index 3b072bb3b5..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/hparams/train_kd.yaml +++ /dev/null @@ -1,220 +0,0 @@ -# Seed needs to be set at top of yaml, before objects with parameters are made -seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] -output_folder: !ref results/augment_CRDNN/ -wer_file: !ref /wer.txt -save_folder: !ref /save -train_log: !ref /train_log.txt - -# Data files -data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json - -# Path containing the stored inferences of the different teachers -tea_infer_dir: !PLACEHOLDER - -# Training parameters -number_of_epochs: 50 -batch_size: 8 -lr: 0.0005 -ctc_weight: 0.1 -sorting: ascending - -# Feature parameters -sample_rate: 16000 -n_fft: 400 -n_mels: 40 - -# teacher models -num_tea: 10 - -# distillation parameters -pretrain: True - -# Path to the student model to load the weights from -pretrain_st_dir: !PLACEHOLDER -strategy: best # [average, best, weighted] - -# Temperature : smooth the distribution of output probability -temperature: 1 -# distillation weight alpha -alpha: 1 - -# variable name when loading teacher inference -tea_keys: ["p_ctc_tea", "p_seq_tea", "wer_ctc_tea", "wer_tea"] - -# Model parameters -activation: !name:torch.nn.LeakyReLU -dropout: 0.25 -cnn_blocks: 2 -cnn_channels: (128, 256) -cnn_kernelsize: (3, 3) -rnn_layers: 4 -rnn_neurons: 512 -rnn_bidirectional: True -dnn_blocks: 2 -dnn_neurons: 512 -emb_size: 128 -dec_neurons: 256 - -# Outputs -output_neurons: 40 -blank_index: !ref - 1 -bos_index: !ref - 1 -eos_index: !ref - 1 - -# Decoding parameters -min_decode_ratio: 0.0 -max_decode_ratio: 1.0 -beam_size: 16 -# eos_threshold: 1.5 - -# Dataloader options -train_dataloader_opts: - batch_size: !ref - -valid_dataloader_opts: - batch_size: !ref - -test_dataloader_opts: - batch_size: !ref - -# Functions -enc: !new:speechbrain.lobes.models.CRDNN.CRDNN - input_shape: [null, null, !ref ] - activation: !ref - dropout: !ref - cnn_blocks: !ref - cnn_channels: !ref - cnn_kernelsize: !ref - time_pooling: True - rnn_layers: !ref - rnn_neurons: !ref - rnn_bidirectional: !ref - dnn_blocks: !ref - dnn_neurons: !ref - -emb: !new:speechbrain.nnet.embedding.Embedding - num_embeddings: !ref - embedding_dim: !ref - -dec: !new:speechbrain.nnet.RNN.AttentionalRNNDecoder - enc_dim: !ref - input_size: !ref - rnn_type: gru - attn_type: location - hidden_size: !ref - attn_dim: 256 - num_layers: 1 - scaling: 1.0 - channels: 10 - kernel_size: 100 - re_init: True - dropout: 0.5 - -ctc_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 blank - -seq_lin: !new:speechbrain.nnet.linear.Linear - input_size: !ref - n_neurons: !ref # 39 phonemes + 1 eos - -model: !new:torch.nn.ModuleList - - [!ref , !ref , !ref , !ref , !ref ] - -log_softmax: !new:speechbrain.nnet.activations.Softmax - apply_log: True - -ctc_cost: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - -seq_cost: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - -greedy_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNGreedySearcher - embedding: !ref - decoder: !ref - linear: !ref - bos_index: !ref - eos_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearcher - embedding: !ref - decoder: !ref - linear: !ref - ctc_linear: !ref - bos_index: !ref - eos_index: !ref - blank_index: !ref - min_decode_ratio: !ref - max_decode_ratio: !ref - beam_size: !ref - -opt_class: !name:torch.optim.Adam - lr: !ref - -lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler - initial_value: !ref - improvement_threshold: 0.0025 - annealing_factor: 0.8 - patient: 0 - -# Modules to have train/eval/optimizer called on -modules: - enc: !ref - emb: !ref - dec: !ref - ctc_lin: !ref - seq_lin: !ref - normalize: !ref - -# Names of modules to be compiled with torch.jit.script -jit_module_keys: [enc] - -epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter - limit: !ref - -normalize: !new:speechbrain.processing.features.InputNormalization - norm_type: global - -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -compute_features: !new:speechbrain.lobes.features.Fbank - sample_rate: !ref - n_fft: !ref - n_mels: !ref - -checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer - checkpoints_dir: !ref - recoverables: - model: !ref - normalize: !ref - counter: !ref - lr_annealing: !ref - -train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger - save_file: !ref - -ctc_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.ctc_loss - blank_index: !ref - reduction: batch - -seq_stats: !name:speechbrain.utils.metric_stats.MetricStats - metric: !name:speechbrain.nnet.losses.nll_loss - label_smoothing: 0.1 - reduction: batch - -per_stats: !name:speechbrain.utils.metric_stats.ErrorRateStats - -ctc_cost_kd: !name:speechbrain.nnet.losses.ctc_loss_kd - blank_index: !ref - -seq_cost_kd: !name:speechbrain.nnet.losses.nll_loss_kd diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/save_teachers.py b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/save_teachers.py deleted file mode 100644 index 00a7f9c5be..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/save_teachers.py +++ /dev/null @@ -1,394 +0,0 @@ -#!/usr/bin/env python3 - -"""Recipe for doing ASR with phoneme targets and joint seq2seq -and CTC loss on the TIMIT dataset following a knowledge distillation scheme as -reported in " Distilling Knowledge from Ensembles of Acoustic Models for Joint -CTC-Attention End-to-End Speech Recognition", Yan Gao et al. - -To run this recipe, do the following: -> python experiment.py hyperparams.yaml --data_folder /path/to/TIMIT - -Authors - * Yan Gao 2021 - * Titouan Parcollet 2021 -""" - -import sys -import torch -import speechbrain as sb -from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml - -from tqdm.contrib import tqdm -import h5py -import numpy as np - - -# Define training procedure -class ASR(sb.Brain): - def __init__(self, tea_modules_list=None, hparams=None, run_opts=None): - super(ASR, self).__init__( - modules=None, - opt_class=None, - hparams=hparams, - run_opts=run_opts, - checkpointer=None, - ) - - # Initialize teacher parameters - tea_modules_list_ = [] - for tea_modules in tea_modules_list: - tea_modules_ = torch.nn.ModuleList(tea_modules) - tea_modules_ = tea_modules_.to(self.device) - tea_modules_list_.append(tea_modules_) - self.tea_modules_list = tea_modules_list_ - - def compute_forward_tea(self, batch): - batch = batch.to(self.device) - wavs, wav_lens = batch.sig - phns_bos, _ = batch.phn_encoded_bos - phns, phn_lens = batch.phn_encoded - - feats = self.hparams.compute_features(wavs) - feats = self.hparams.normalize(feats, wav_lens) - apply_softmax = torch.nn.Softmax(dim=-1) - - # run inference to each teacher model - tea_dict_list = [] - for num in range(self.hparams.num_tea): - tea_dict = {} - self.tea_modules_list[num].eval() - with torch.no_grad(): - x_tea = tea_enc_list[num](feats) - ctc_logits_tea = tea_ctc_lin_list[num](x_tea) - - # output layer for ctc log-probabilities - p_ctc_tea = self.hparams.log_softmax( - ctc_logits_tea / self.hparams.temperature - ) - - e_in_tea = tea_emb_list[num](phns_bos) - h_tea, _ = tea_dec_list[num](e_in_tea, x_tea, wav_lens) - - # output layer for seq2seq log-probabilities - seq_logits_tea = tea_seq_lin_list[num](h_tea) - p_seq_tea = apply_softmax( - seq_logits_tea / self.hparams.temperature - ) - - # WER from output layer of CTC - sequence_ctc = sb.decoders.ctc_greedy_decode( - p_ctc_tea, wav_lens, blank_id=self.hparams.blank_index - ) - - phns_decode = sb.utils.data_utils.undo_padding(phns, phn_lens) - phns_decode = self.label_encoder.decode_ndim(phns_decode) - sequence_decode = self.label_encoder.decode_ndim(sequence_ctc) - - per_stats_ctc = sb.utils.edit_distance.wer_details_for_batch( - batch.id, - phns_decode, - sequence_decode, - compute_alignments=False, - ) - - wer_ctc_tea = [] - for item in per_stats_ctc: - wer_ctc_tea.append(item["WER"]) - - wer_ctc_tea = exclude_wer(wer_ctc_tea) - wer_ctc_tea = np.expand_dims(wer_ctc_tea, axis=0) - - # WER from output layer of CE - _, predictions = p_seq_tea.max(dim=-1) - hyps = sb.decoders.seq2seq.batch_filter_seq2seq_output( - predictions, eos_id=self.hparams.eos_index - ) - sequence_ce = self.label_encoder.decode_ndim(hyps) - per_stats_ce = sb.utils.edit_distance.wer_details_for_batch( - batch.id, phns_decode, sequence_ce, compute_alignments=False - ) - - wer_tea = [] - for item in per_stats_ce: - wer_tea.append(item["WER"]) - - wer_tea = exclude_wer(wer_tea) - wer_tea = np.expand_dims(wer_tea, axis=0) - - # save the variables into dict - tea_dict["p_ctc_tea"] = p_ctc_tea.cpu().numpy() - tea_dict["p_seq_tea"] = p_seq_tea.cpu().numpy() - tea_dict["wer_ctc_tea"] = wer_ctc_tea - tea_dict["wer_tea"] = wer_tea - tea_dict_list.append(tea_dict) - - return tea_dict_list - - def def_tea_name(self): - # define teacher variable name - tea_name = [] - for tea_num in range(self.hparams.num_tea): - tea = "t{}".format(tea_num) - tea_name.append(tea) - return tea_name - - def fit_save(self, train_set, valid_set=None, test_set=None): - data_sets = [train_set, valid_set, test_set] - stage = self.hparams.stage - tea_name = self.def_tea_name() - - # define output file name - f_name = "/tea_infer_{}batch.hdf5".format(self.hparams.batch_size) - f = h5py.File(self.hparams.output_folder + f_name, "w") - for num in range(len(stage)): - # create group for each set (train, valid, test). - g_sets = f.create_group(stage[num]) - - with tqdm( - data_sets[num], initial=self.step, dynamic_ncols=True, - ) as t: - for batch in t: - self.step += 1 - # create group for each batch - g_batch = g_sets.create_group(str(self.step)) - - # run inference to each teacher - tea_dict_list = self.compute_forward_tea(batch) - - for tea_num in range(self.hparams.num_tea): - # create group for each teacher - g_tea = g_batch.create_group(tea_name[tea_num]) - g_tea.create_dataset( - "p_ctc_tea", - data=tea_dict_list[tea_num]["p_ctc_tea"], - ) - g_tea.create_dataset( - "p_seq_tea", - data=tea_dict_list[tea_num]["p_seq_tea"], - ) - g_tea.create_dataset( - "wer_ctc_tea", - data=tea_dict_list[tea_num]["wer_ctc_tea"][0], - ) - g_tea.create_dataset( - "wer_tea", data=tea_dict_list[tea_num]["wer_tea"][0] - ) - self.step = 0 - f.close() - - -def exclude_wer(wer): - """ - This function is used to exclude the - wer values which is more than 100. - """ - wer_list = [] - for item in wer: - if item > 100: - item = 100 - wer_list.append(item) - return np.array(wer_list) - - -def data_io_prep(hparams): - "Creates the datasets and their data processing pipelines." - data_folder = hparams["data_folder"] - # 1. Declarations: - train_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["train_annotation"], - replacements={"data_root": data_folder}, - ) - if hparams["sorting"] == "ascending": - # we sort training data to speed up training and get better results. - train_data = train_data.filtered_sorted(sort_key="duration") - # when sorting do not shuffle in dataloader ! otherwise is pointless - hparams["train_dataloader_opts"]["shuffle"] = False - - elif hparams["sorting"] == "descending": - train_data = train_data.filtered_sorted( - sort_key="duration", reverse=True - ) - # when sorting do not shuffle in dataloader ! otherwise is pointless - hparams["train_dataloader_opts"]["shuffle"] = False - - elif hparams["sorting"] == "random": - pass - - else: - raise NotImplementedError( - "sorting must be random, ascending or descending" - ) - - valid_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["valid_annotation"], - replacements={"data_root": data_folder}, - ) - valid_data = valid_data.filtered_sorted(sort_key="duration") - - test_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["test_annotation"], - replacements={"data_root": data_folder}, - ) - test_data = test_data.filtered_sorted(sort_key="duration") - - datasets = [train_data, valid_data, test_data] - label_encoder = sb.dataio.encoder.CTCTextEncoder() - - # 2. Define audio pipeline: - @sb.utils.data_pipeline.takes("wav") - @sb.utils.data_pipeline.provides("sig") - def audio_pipeline(wav): - sig = sb.dataio.dataio.read_audio(wav) - return sig - - sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) - - # 3. Define text pipeline: - @sb.utils.data_pipeline.takes("phn") - @sb.utils.data_pipeline.provides( - "phn_list", - "phn_encoded_list", - "phn_encoded", - "phn_encoded_eos", - "phn_encoded_bos", - ) - def text_pipeline(phn): - phn_list = phn.strip().split() - yield phn_list - phn_encoded_list = label_encoder.encode_sequence(phn_list) - yield phn_encoded_list - phn_encoded = torch.LongTensor(phn_encoded_list) - yield phn_encoded - phn_encoded_eos = torch.LongTensor( - label_encoder.append_eos_index(phn_encoded_list) - ) - yield phn_encoded_eos - phn_encoded_bos = torch.LongTensor( - label_encoder.prepend_bos_index(phn_encoded_list) - ) - yield phn_encoded_bos - - sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) - - # 3. Fit encoder: - # NOTE: In this minimal example, also update from valid data - - label_encoder.update_from_didataset(train_data, output_key="phn_list") - if ( - hparams["blank_index"] != hparams["bos_index"] - or hparams["blank_index"] != hparams["eos_index"] - ): - label_encoder.insert_blank(index=hparams["blank_index"]) - - if hparams["bos_index"] == hparams["eos_index"]: - label_encoder.insert_bos_eos( - bos_label="", - eos_label="", - bos_index=hparams["bos_index"], - ) - else: - label_encoder.insert_bos_eos( - bos_label="", - eos_label="", - bos_index=hparams["bos_index"], - eos_index=hparams["eos_index"], - ) - - # 4. Set output: - sb.dataio.dataset.set_output_keys( - datasets, - ["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"], - ) - - return train_data, valid_data, test_data, label_encoder - - -if __name__ == "__main__": - # CLI: - hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - - # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: - hparams = load_hyperpyyaml(fin, overrides) - - # Dataset prep (parsing TIMIT and annotation into csv files) - from timit_prepare import prepare_timit # noqa - - # Initialize ddp (useful only for multi-GPU DDP training) - sb.utils.distributed.ddp_init_group(run_opts) - - # multi-gpu (ddp) save data preparation - run_on_main( - prepare_timit, - kwargs={ - "data_folder": hparams["data_folder"], - "splits": ["train", "dev", "test"], - "save_folder": hparams["data_folder"], - }, - ) - - # Dataset IO prep: creating Dataset objects and proper encodings for phones - train_data, valid_data, test_data, label_encoder = data_io_prep(hparams) - - # Create experiment directory - sb.create_experiment_directory( - experiment_directory=hparams["output_folder"], - hyperparams_to_save=hparams_file, - overrides=overrides, - ) - - # initialise teacher model variables - tea_enc_list = [] - tea_emb_list = [] - tea_dec_list = [] - tea_ctc_lin_list = [] - tea_seq_lin_list = [] - for i in range(hparams["num_tea"]): - exec("tea_enc_list.append(hparams['tea{}_enc'])".format(i)) - exec("tea_emb_list.append(hparams['tea{}_emb'])".format(i)) - exec("tea_dec_list.append(hparams['tea{}_dec'])".format(i)) - exec("tea_ctc_lin_list.append(hparams['tea{}_ctc_lin'])".format(i)) - exec("tea_seq_lin_list.append(hparams['tea{}_seq_lin'])".format(i)) - - # create ModuleList - for i in range(hparams["num_tea"]): - exec( - "tea{}_modules = torch.nn.ModuleList([tea_enc_list[i], tea_emb_list[i], tea_dec_list[i], tea_ctc_lin_list[i], tea_seq_lin_list[i]])".format( - i - ) - ) # i denotes the index of teacher models - - tea_modules_list = [] - for i in range(hparams["num_tea"]): - exec("tea_modules_list.append(tea{}_modules)".format(i)) - - # Trainer initialization - asr_brain = ASR( - tea_modules_list=tea_modules_list, hparams=hparams, run_opts=run_opts - ) - asr_brain.label_encoder = label_encoder - - # load pre-trained weights of teacher models - with open(hparams["tea_models_dir"], "r") as f: - enter_token = "\n" - for i, path in enumerate(f.readlines()): - exec( - "tea{}_modules.load_state_dict(torch.load(path.strip(enter_token)))".format( - i - ) - ) - - # make dataloaders - train_set = sb.dataio.dataloader.make_dataloader( - train_data, **hparams["train_dataloader_opts"] - ) - valid_set = sb.dataio.dataloader.make_dataloader( - valid_data, **hparams["valid_dataloader_opts"] - ) - test_set = sb.dataio.dataloader.make_dataloader( - test_data, **hparams["test_dataloader_opts"] - ) - - # run inference and save results - asr_brain.fit_save(train_set, valid_set, test_set) diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/timit_prepare.py b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/timit_prepare.py deleted file mode 120000 index 9b0f68bc85..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/timit_prepare.py +++ /dev/null @@ -1 +0,0 @@ -../../timit_prepare.py \ No newline at end of file diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/train_kd.py b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/train_kd.py deleted file mode 100644 index 71d285700d..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/train_kd.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/env python3 - -"""Recipe for doing ASR with phoneme targets and joint seq2seq -and CTC loss on the TIMIT dataset following a knowledge distillation scheme as -reported in " Distilling Knowledge from Ensembles of Acoustic Models for Joint -CTC-Attention End-to-End Speech Recognition", Yan Gao et al. - -To run this recipe, do the following: -> python experiment.py hyperparams.yaml --data_folder /path/to/TIMIT - -Authors - * Yan Gao 2021 - * Titouan Parcollet 2021 -""" - -import sys -import torch -import h5py -import speechbrain as sb -from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml - - -# Define training procedure -class ASR(sb.Brain): - def compute_forward(self, batch, stage): - batch = batch.to(self.device) - wavs, wav_lens = batch.sig - phns_bos, _ = batch.phn_encoded_bos - - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "env_corrupt"): - wavs_noise = self.hparams.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - phns_bos = torch.cat([phns_bos, phns_bos]) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) - - feats = self.hparams.compute_features(wavs) - feats = self.modules.normalize(feats, wav_lens) - x = self.modules.enc(feats) - - # output layer for ctc log-probabilities - logits = self.modules.ctc_lin(x) - p_ctc = self.hparams.log_softmax(logits) - - e_in = self.modules.emb(phns_bos) - h, _ = self.modules.dec(e_in, x, wav_lens) - - # output layer for seq2seq log-probabilities - logits = self.modules.seq_lin(h) - p_seq = self.hparams.log_softmax(logits) - - if stage == sb.Stage.VALID: - hyps, scores = self.hparams.greedy_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens, hyps - - elif stage == sb.Stage.TEST: - hyps, scores = self.hparams.beam_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens, hyps - - return p_ctc, p_seq, wav_lens - - def def_tea_name(self): - # define teacher variable name - tea_name = [] - for tea_num in range(self.hparams.num_tea): - tea = "t{}".format(tea_num) - tea_name.append(tea) - return tea_name - - def re_format(self, data_dict): - item_tea_list = [None, None, None, None] - tea_name = self.def_tea_name() - for tea_num in range(self.hparams.num_tea): - for i in range(4): - item_tea = data_dict[str(self.step)][tea_name[tea_num]][ - self.hparams.tea_keys[i] - ][()] - - if self.hparams.tea_keys[i].startswith("wer"): - item_tea = torch.tensor(item_tea) - else: - item_tea = torch.from_numpy(item_tea) - - item_tea = item_tea.to(self.device) - item_tea = torch.unsqueeze(item_tea, 0) - if tea_num == 0: - item_tea_list[i] = item_tea - else: - item_tea_list[i] = torch.cat( - [item_tea_list[i], item_tea], 0 - ) - return item_tea_list - - def compute_objectives(self, predictions, batch, stage): - if stage == sb.Stage.TRAIN: - p_ctc, p_seq, wav_lens = predictions - else: - p_ctc, p_seq, wav_lens, hyps = predictions - - ids = batch.id - phns_eos, phn_lens_eos = batch.phn_encoded_eos - phns, phn_lens = batch.phn_encoded - - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - phns_eos = torch.cat([phns_eos, phns_eos], dim=0) - phn_lens_eos = torch.cat([phn_lens_eos, phn_lens_eos], dim=0) - - # normal supervised training - loss_ctc_nor = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens) - loss_seq_nor = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos) - - # load teacher inference results - data_dict = ( - self.train_dict - if stage == sb.Stage.TRAIN - else self.valid_dict - if stage == sb.Stage.VALID - else self.test_dict - ) - - item_tea_list = self.re_format(data_dict) - p_ctc_tea, p_seq_tea, wer_ctc_tea, wer_tea = [ - item for item in item_tea_list - ] - - # Strategy "average": average losses of teachers when doing distillation. - # Strategy "best": choosing the best teacher based on WER. - # Strategy "weighted": assigning weights to teachers based on WER. - if self.hparams.strategy == "best": - # tea_ce for kd - wer_scores, indx = torch.min(wer_tea, dim=0) - indx = list(indx.cpu().numpy()) - - # select the best teacher for each sentence - tea_seq2seq_pout = None - for stn_indx, tea_indx in enumerate(indx): - s2s_one = p_seq_tea[tea_indx][stn_indx] - s2s_one = torch.unsqueeze(s2s_one, 0) - if stn_indx == 0: - tea_seq2seq_pout = s2s_one - else: - tea_seq2seq_pout = torch.cat([tea_seq2seq_pout, s2s_one], 0) - - apply_softmax = torch.nn.Softmax(dim=0) - - if ( - self.hparams.strategy == "best" - or self.hparams.strategy == "weighted" - ): - # mean wer for ctc - tea_wer_ctc_mean = wer_ctc_tea.mean(1) - tea_acc_main = 100 - tea_wer_ctc_mean - - # normalise weights via Softmax function - tea_acc_softmax = apply_softmax(tea_acc_main) - - if self.hparams.strategy == "weighted": - # mean wer for ce - tea_wer_mean = wer_tea.mean(1) - tea_acc_ce_main = 100 - tea_wer_mean - - # normalise weights via Softmax function - tea_acc_ce_softmax = apply_softmax(tea_acc_ce_main) - - # kd loss - ctc_loss_list = None - ce_loss_list = None - for tea_num in range(self.hparams.num_tea): - # ctc - p_ctc_tea_one = p_ctc_tea[tea_num] - # calculate CTC distillation loss of one teacher - loss_ctc_one = self.hparams.ctc_cost_kd( - p_ctc, p_ctc_tea_one, wav_lens, device=self.device - ) - loss_ctc_one = torch.unsqueeze(loss_ctc_one, 0) - if tea_num == 0: - ctc_loss_list = loss_ctc_one - else: - ctc_loss_list = torch.cat([ctc_loss_list, loss_ctc_one]) - - # ce - p_seq_tea_one = p_seq_tea[tea_num] - # calculate CE distillation loss of one teacher - loss_seq_one = self.hparams.seq_cost_kd( - p_seq, p_seq_tea_one, phn_lens_eos - ) - loss_seq_one = torch.unsqueeze(loss_seq_one, 0) - if tea_num == 0: - ce_loss_list = loss_seq_one - else: - ce_loss_list = torch.cat([ce_loss_list, loss_seq_one]) - - # kd loss - if self.hparams.strategy == "average": - # get average value of losses from all teachers (CTC and CE loss) - ctc_loss_kd = ctc_loss_list.mean(0) - seq2seq_loss_kd = ce_loss_list.mean(0) - else: - # assign weights to different teachers (CTC loss) - ctc_loss_kd = (tea_acc_softmax * ctc_loss_list).sum(0) - if self.hparams.strategy == "best": - # only use the best teacher to compute CE loss - seq2seq_loss_kd = self.hparams.seq_cost_kd( - p_seq, tea_seq2seq_pout, phn_lens_eos - ) - if self.hparams.strategy == "weighted": - # assign weights to different teachers (CE loss) - seq2seq_loss_kd = (tea_acc_ce_softmax * ce_loss_list).sum(0) - - # total loss - # combine normal supervised training - loss_ctc = ( - self.hparams.temperature - * self.hparams.temperature - * self.hparams.alpha - * ctc_loss_kd - + (1 - self.hparams.alpha) * loss_ctc_nor - ) - loss_seq = ( - self.hparams.temperature - * self.hparams.temperature - * self.hparams.alpha - * seq2seq_loss_kd - + (1 - self.hparams.alpha) * loss_seq_nor - ) - - loss = ( - self.hparams.ctc_weight * loss_ctc - + (1 - self.hparams.ctc_weight) * loss_seq - ) - - # Record losses for posterity - if stage != sb.Stage.TRAIN: - self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens) - self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens_eos) - self.per_metrics.append( - ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim, - ) - - return loss - - def fit_batch(self, batch): - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - return loss.detach() - - def evaluate_batch(self, batch, stage): - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - - def on_stage_start(self, stage, epoch): - self.ctc_metrics = self.hparams.ctc_stats() - self.seq_metrics = self.hparams.seq_stats() - - if stage != sb.Stage.TRAIN: - self.per_metrics = self.hparams.per_stats() - - def on_stage_end(self, stage, stage_loss, epoch): - if stage == sb.Stage.TRAIN: - self.train_loss = stage_loss - else: - per = self.per_metrics.summarize("error_rate") - - if stage == sb.Stage.VALID: - old_lr, new_lr = self.hparams.lr_annealing(per) - sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) - - self.hparams.train_logger.log_stats( - stats_meta={"epoch": epoch, "lr": old_lr}, - train_stats={"loss": self.train_loss}, - valid_stats={ - "loss": stage_loss, - "ctc_loss": self.ctc_metrics.summarize("average"), - "seq_loss": self.seq_metrics.summarize("average"), - "PER": per, - }, - ) - self.checkpointer.save_and_keep_only( - meta={"PER": per}, min_keys=["PER"] - ) - - if stage == sb.Stage.TEST: - self.hparams.train_logger.log_stats( - stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, - test_stats={"loss": stage_loss, "PER": per}, - ) - with open(self.hparams.wer_file, "w") as w: - w.write("CTC loss stats:\n") - self.ctc_metrics.write_stats(w) - w.write("\nseq2seq loss stats:\n") - self.seq_metrics.write_stats(w) - w.write("\nPER stats:\n") - self.per_metrics.write_stats(w) - print( - "CTC, seq2seq, and PER stats written to file", - self.hparams.wer_file, - ) - - -def data_io_prep(hparams): - "Creates the datasets and their data processing pipelines." - data_folder = hparams["data_folder"] - # 1. Declarations: - train_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["train_annotation"], - replacements={"data_root": data_folder}, - ) - if hparams["sorting"] == "ascending": - # we sort training data to speed up training and get better results. - train_data = train_data.filtered_sorted(sort_key="duration") - # when sorting do not shuffle in dataloader ! otherwise is pointless - hparams["train_dataloader_opts"]["shuffle"] = False - - elif hparams["sorting"] == "descending": - train_data = train_data.filtered_sorted( - sort_key="duration", reverse=True - ) - # when sorting do not shuffle in dataloader ! otherwise is pointless - hparams["train_dataloader_opts"]["shuffle"] = False - - elif hparams["sorting"] == "random": - pass - - else: - raise NotImplementedError( - "sorting must be random, ascending or descending" - ) - - valid_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["valid_annotation"], - replacements={"data_root": data_folder}, - ) - valid_data = valid_data.filtered_sorted(sort_key="duration") - - test_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["test_annotation"], - replacements={"data_root": data_folder}, - ) - test_data = test_data.filtered_sorted(sort_key="duration") - - datasets = [train_data, valid_data, test_data] - label_encoder = sb.dataio.encoder.CTCTextEncoder() - - # 2. Define audio pipeline: - @sb.utils.data_pipeline.takes("wav") - @sb.utils.data_pipeline.provides("sig") - def audio_pipeline(wav): - sig = sb.dataio.dataio.read_audio(wav) - return sig - - sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) - - # 3. Define text pipeline: - @sb.utils.data_pipeline.takes("phn") - @sb.utils.data_pipeline.provides( - "phn_list", - "phn_encoded_list", - "phn_encoded", - "phn_encoded_eos", - "phn_encoded_bos", - ) - def text_pipeline(phn): - phn_list = phn.strip().split() - yield phn_list - phn_encoded_list = label_encoder.encode_sequence(phn_list) - yield phn_encoded_list - phn_encoded = torch.LongTensor(phn_encoded_list) - yield phn_encoded - phn_encoded_eos = torch.LongTensor( - label_encoder.append_eos_index(phn_encoded_list) - ) - yield phn_encoded_eos - phn_encoded_bos = torch.LongTensor( - label_encoder.prepend_bos_index(phn_encoded_list) - ) - yield phn_encoded_bos - - sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) - - # 3. Fit encoder: - # NOTE: In this minimal example, also update from valid data - - label_encoder.update_from_didataset(train_data, output_key="phn_list") - if ( - hparams["blank_index"] != hparams["bos_index"] - or hparams["blank_index"] != hparams["eos_index"] - ): - label_encoder.insert_blank(index=hparams["blank_index"]) - - if hparams["bos_index"] == hparams["eos_index"]: - label_encoder.insert_bos_eos( - bos_label="", - eos_label="", - bos_index=hparams["bos_index"], - ) - else: - label_encoder.insert_bos_eos( - bos_label="", - eos_label="", - bos_index=hparams["bos_index"], - eos_index=hparams["eos_index"], - ) - - # 4. Set output: - sb.dataio.dataset.set_output_keys( - datasets, - ["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"], - ) - - return train_data, valid_data, test_data, label_encoder - - -def load_teachers(hparams): - """ - Load results of inference of teacher models stored on disk. - Note: Run experiment_save_teachers.py beforehand to generate .hdf5 files. - """ - path = hparams["tea_infer_dir"] + "/tea_infer_{}batch.hdf5".format( - hparams["batch_size"] - ) - f = h5py.File(path, "r") - train_dict = f["train"] - valid_dict = f["valid"] - test_dict = f["test"] - - return train_dict, valid_dict, test_dict - - -def st_load(hparams, asr_brain): - """ - load pre-trained student model and remove decoder layer. - """ - print("loading pre-trained student model...") - chpt_path = hparams["pretrain_st_dir"] + "/model.ckpt" - weight_dict = torch.load(chpt_path) - # del the decoder layer - key_list = [] - for k in weight_dict.keys(): - key_list.append(k) - for k in key_list: - if not k.startswith("0"): - del weight_dict[k] - - # loading weights - asr_brain.hparams.model.load_state_dict(weight_dict, strict=False) - - -if __name__ == "__main__": - # CLI: - hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - - # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: - hparams = load_hyperpyyaml(fin, overrides) - - # Dataset prep (parsing TIMIT and annotation into csv files) - from timit_prepare import prepare_timit # noqa - - # Initialize ddp (useful only for multi-GPU DDP training) - sb.utils.distributed.ddp_init_group(run_opts) - - # multi-gpu (ddp) save data preparation - run_on_main( - prepare_timit, - kwargs={ - "data_folder": hparams["data_folder"], - "splits": ["train", "dev", "test"], - "save_folder": hparams["data_folder"], - }, - ) - - # Dataset IO prep: creating Dataset objects and proper encodings for phones - train_data, valid_data, test_data, label_encoder = data_io_prep(hparams) - - # Create experiment directory - sb.create_experiment_directory( - experiment_directory=hparams["output_folder"], - hyperparams_to_save=hparams_file, - overrides=overrides, - ) - - # Trainer initialization - asr_brain = ASR( - modules=hparams["modules"], - opt_class=hparams["opt_class"], - hparams=hparams, - run_opts=run_opts, - checkpointer=hparams["checkpointer"], - ) - asr_brain.label_encoder = label_encoder - - # load teacher models - train_dict, valid_dict, test_dict = load_teachers(hparams) - asr_brain.train_dict = train_dict - asr_brain.valid_dict = valid_dict - asr_brain.test_dict = test_dict - - if hparams["pretrain"]: - # load pre-trained student model except last layer - if hparams["epoch_counter"].current == 0: - st_load(hparams, asr_brain) - - # Training/validation loop - asr_brain.fit( - asr_brain.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["train_dataloader_opts"], - valid_loader_kwargs=hparams["valid_dataloader_opts"], - ) - - # Test - asr_brain.evaluate( - test_data, - min_key="PER", - test_loader_kwargs=hparams["test_dataloader_opts"], - ) diff --git a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/train_teacher.py b/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/train_teacher.py deleted file mode 100644 index 9e0c030c3f..0000000000 --- a/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/train_teacher.py +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/env python3 -"""Recipe for doing ASR with phoneme targets and joint seq2seq -and CTC loss on the TIMIT dataset following a knowledge distillation scheme as -reported in " Distilling Knowledge from Ensembles of Acoustic Models for Joint -CTC-Attention End-to-End Speech Recognition", Yan Gao et al. - -To run this recipe, do the following: -> python experiment.py hyperparams.yaml --data_folder /path/to/TIMIT - -Authors - * Yan Gao 2021 - * Titouan Parcollet 2021 -""" -import os -import sys -import torch -import speechbrain as sb -from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml - - -# Define training procedure -class ASR(sb.Brain): - def compute_forward(self, batch, stage): - batch = batch.to(self.device) - wavs, wav_lens = batch.sig - phns_bos, _ = batch.phn_encoded_bos - - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "env_corrupt"): - wavs_noise = self.hparams.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - phns_bos = torch.cat([phns_bos, phns_bos]) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) - - feats = self.hparams.compute_features(wavs) - feats = self.modules.normalize(feats, wav_lens) - x = self.modules.enc(feats) - - # output layer for ctc log-probabilities - logits = self.modules.ctc_lin(x) - p_ctc = self.hparams.log_softmax(logits) - - e_in = self.modules.emb(phns_bos) - h, _ = self.modules.dec(e_in, x, wav_lens) - - # output layer for seq2seq log-probabilities - logits = self.modules.seq_lin(h) - p_seq = self.hparams.log_softmax(logits) - - if stage == sb.Stage.VALID: - hyps, scores = self.hparams.greedy_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens, hyps - - elif stage == sb.Stage.TEST: - hyps, scores = self.hparams.beam_searcher(x, wav_lens) - return p_ctc, p_seq, wav_lens, hyps - - return p_ctc, p_seq, wav_lens - - def compute_objectives(self, predictions, batch, stage): - if stage == sb.Stage.TRAIN: - p_ctc, p_seq, wav_lens = predictions - else: - p_ctc, p_seq, wav_lens, hyps = predictions - - ids = batch.id - phns_eos, phn_lens_eos = batch.phn_encoded_eos - phns, phn_lens = batch.phn_encoded - - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - phns_eos = torch.cat([phns_eos, phns_eos], dim=0) - phn_lens_eos = torch.cat([phn_lens_eos, phn_lens_eos], dim=0) - - loss_ctc = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens) - loss_seq = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos) - loss = self.hparams.ctc_weight * loss_ctc - loss += (1 - self.hparams.ctc_weight) * loss_seq - - # Record losses for posterity - if stage != sb.Stage.TRAIN: - self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens) - self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens) - self.per_metrics.append( - ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim, - ) - - return loss - - def fit_batch(self, batch): - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - return loss.detach() - - def evaluate_batch(self, batch, stage): - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - - def on_stage_start(self, stage, epoch): - self.ctc_metrics = self.hparams.ctc_stats() - self.seq_metrics = self.hparams.seq_stats() - - if stage != sb.Stage.TRAIN: - self.per_metrics = self.hparams.per_stats() - - def on_stage_end(self, stage, stage_loss, epoch): - if stage == sb.Stage.TRAIN: - self.train_loss = stage_loss - else: - per = self.per_metrics.summarize("error_rate") - - if stage == sb.Stage.VALID: - old_lr, new_lr = self.hparams.lr_annealing(per) - sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) - - self.hparams.train_logger.log_stats( - stats_meta={"epoch": epoch, "lr": old_lr}, - train_stats={"loss": self.train_loss}, - valid_stats={ - "loss": stage_loss, - "ctc_loss": self.ctc_metrics.summarize("average"), - "seq_loss": self.seq_metrics.summarize("average"), - "PER": per, - }, - ) - self.checkpointer.save_and_keep_only( - meta={"PER": per}, min_keys=["PER"] - ) - - if stage == sb.Stage.TEST: - self.hparams.train_logger.log_stats( - stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, - test_stats={"loss": stage_loss, "PER": per}, - ) - with open(self.hparams.wer_file, "w") as w: - w.write("CTC loss stats:\n") - self.ctc_metrics.write_stats(w) - w.write("\nseq2seq loss stats:\n") - self.seq_metrics.write_stats(w) - w.write("\nPER stats:\n") - self.per_metrics.write_stats(w) - print( - "CTC, seq2seq, and PER stats written to file", - self.hparams.wer_file, - ) - - -def data_io_prep(hparams): - "Creates the datasets and their data processing pipelines." - data_folder = hparams["data_folder"] - # 1. Declarations: - train_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["train_annotation"], - replacements={"data_root": data_folder}, - ) - if hparams["sorting"] == "ascending": - # we sort training data to speed up training and get better results. - train_data = train_data.filtered_sorted(sort_key="duration") - # when sorting do not shuffle in dataloader ! otherwise is pointless - hparams["train_dataloader_opts"]["shuffle"] = False - - elif hparams["sorting"] == "descending": - train_data = train_data.filtered_sorted( - sort_key="duration", reverse=True - ) - # when sorting do not shuffle in dataloader ! otherwise is pointless - hparams["train_dataloader_opts"]["shuffle"] = False - - elif hparams["sorting"] == "random": - pass - - else: - raise NotImplementedError( - "sorting must be random, ascending or descending" - ) - - valid_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["valid_annotation"], - replacements={"data_root": data_folder}, - ) - valid_data = valid_data.filtered_sorted(sort_key="duration") - - test_data = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["test_annotation"], - replacements={"data_root": data_folder}, - ) - test_data = test_data.filtered_sorted(sort_key="duration") - - datasets = [train_data, valid_data, test_data] - label_encoder = sb.dataio.encoder.CTCTextEncoder() - - # 2. Define audio pipeline: - @sb.utils.data_pipeline.takes("wav") - @sb.utils.data_pipeline.provides("sig") - def audio_pipeline(wav): - sig = sb.dataio.dataio.read_audio(wav) - return sig - - sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) - - # 3. Define text pipeline: - @sb.utils.data_pipeline.takes("phn") - @sb.utils.data_pipeline.provides( - "phn_list", - "phn_encoded_list", - "phn_encoded", - "phn_encoded_eos", - "phn_encoded_bos", - ) - def text_pipeline(phn): - phn_list = phn.strip().split() - yield phn_list - phn_encoded_list = label_encoder.encode_sequence(phn_list) - yield phn_encoded_list - phn_encoded = torch.LongTensor(phn_encoded_list) - yield phn_encoded - phn_encoded_eos = torch.LongTensor( - label_encoder.append_eos_index(phn_encoded_list) - ) - yield phn_encoded_eos - phn_encoded_bos = torch.LongTensor( - label_encoder.prepend_bos_index(phn_encoded_list) - ) - yield phn_encoded_bos - - sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) - - # 3. Fit encoder: - # Load or compute the label encoder - label_encoder_file = os.path.join( - hparams["save_folder"], "label_encoder.txt" - ) - if os.path.exists(label_encoder_file): - label_encoder.load(label_encoder_file) - else: - label_encoder.update_from_didataset(train_data, output_key="phn_list") - if ( - hparams["blank_index"] != hparams["bos_index"] - or hparams["blank_index"] != hparams["eos_index"] - ): - label_encoder.insert_blank(index=hparams["blank_index"]) - - if hparams["bos_index"] == hparams["eos_index"]: - label_encoder.insert_bos_eos( - bos_label="", - eos_label="", - bos_index=hparams["bos_index"], - ) - else: - label_encoder.insert_bos_eos( - bos_label="", - eos_label="", - bos_index=hparams["bos_index"], - eos_index=hparams["eos_index"], - ) - label_encoder.save( - os.path.join(hparams["save_folder"], "label_encoder.txt") - ) - - # 4. Set output: - sb.dataio.dataset.set_output_keys( - datasets, - ["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"], - ) - - return train_data, valid_data, test_data, label_encoder - - -if __name__ == "__main__": - # CLI: - hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - - # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: - hparams = load_hyperpyyaml(fin, overrides) - - # Dataset prep (parsing TIMIT and annotation into csv files) - from timit_prepare import prepare_timit # noqa - - # Initialize ddp (useful only for multi-GPU DDP training) - sb.utils.distributed.ddp_init_group(run_opts) - - # multi-gpu (ddp) save data preparation - run_on_main( - prepare_timit, - kwargs={ - "data_folder": hparams["data_folder"], - "save_json_train": hparams["train_annotation"], - "save_json_valid": hparams["valid_annotation"], - "save_json_test": hparams["test_annotation"], - }, - ) - - # Dataset IO prep: creating Dataset objects and proper encodings for phones - train_data, valid_data, test_data, label_encoder = data_io_prep(hparams) - - # Create experiment directory - sb.create_experiment_directory( - experiment_directory=hparams["output_folder"], - hyperparams_to_save=hparams_file, - overrides=overrides, - ) - - # Trainer initialization - asr_brain = ASR( - modules=hparams["modules"], - opt_class=hparams["opt_class"], - hparams=hparams, - run_opts=run_opts, - checkpointer=hparams["checkpointer"], - ) - asr_brain.label_encoder = label_encoder - - # Training/validation loop - asr_brain.fit( - asr_brain.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["train_dataloader_opts"], - valid_loader_kwargs=hparams["valid_dataloader_opts"], - ) - - # Test - asr_brain.evaluate( - test_data, - min_key="PER", - test_loader_kwargs=hparams["test_dataloader_opts"], - ) diff --git a/recipes/TIMIT/ASR/transducer/README.md b/recipes/TIMIT/ASR/transducer/README.md index 72a27032d5..8f4375ca1e 100644 --- a/recipes/TIMIT/ASR/transducer/README.md +++ b/recipes/TIMIT/ASR/transducer/README.md @@ -15,17 +15,21 @@ pip install numba # How to run Update the path to the dataset in the yaml config file and run the following. ``` -python train.py hparams/train.yaml +python train.py hparams/train.yaml --data_folder=your/data/folder/TIMIT --jit ``` +**Note on Compilation**: +Enabling the just-in-time (JIT) compiler with --jit significantly improves code performance, resulting in a 50-60% speed boost. We highly recommend utilizing the JIT compiler for optimal results. +This speed improvement is observed specifically when using the CRDNN model. + # Results | Release | hyperparams file | Val. PER | Test PER | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| :-----------:| -| 2021-02-06 | train.yaml | 13.11 | 14.12 | https://drive.google.com/drive/folders/1g3T6zK2o9XTEa_GTw0aoAkRqhg1_BVQ3?usp=sharing | 1xRTX6000 24GB | -| 21-04-16 | train_wav2vec2.yaml | 7.97 | 8.91 | https://drive.google.com/drive/folders/1z8Ox3q2ntnnnh3PPk_eOcKhGeFgVeRcD?usp=sharing | 1xRTX6000 24Gb | +| 2021-02-06 | train.yaml | 13.11 | 14.12 | https://www.dropbox.com/sh/ufktmvk38ulxca3/AAD9_o_ZtNJlHbpeYW1ldvSoa?dl=0 | 1xRTX6000 24GB | +| 21-04-16 | train_wav2vec2.yaml | 7.97 | 8.91 | https://www.dropbox.com/sh/31o2j2ylpavunae/AADhJazz5mGaEbiCQ-cv7IgEa?dl=0 | 1xRTX6000 24Gb | -The output folders with checkpoints and logs for TIMIT recipes can be found [here](https://drive.google.com/drive/folders/1ZcME-Wf4stlzW3j_iJ3zGDCkSy1V_Wjs?usp=sharing). +The output folders with checkpoints and logs for TIMIT recipes can be found [here](https://www.dropbox.com/sh/059jnwdass8v45u/AADTjh5DYdYKuZsgH9HXGx0Sa?dl=0). # Training Time About 2 min and 40 sec for each epoch with a RTX 6000. @@ -40,6 +44,15 @@ About 2 min and 40 sec for each epoch with a RTX 6000. Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/TIMIT/ASR/transducer/extra_requirements.txt b/recipes/TIMIT/ASR/transducer/extra_requirements.txt index 44c65201fe..667d8dbed1 100644 --- a/recipes/TIMIT/ASR/transducer/extra_requirements.txt +++ b/recipes/TIMIT/ASR/transducer/extra_requirements.txt @@ -1,6 +1,2 @@ # For transducer loss numba -# For wav2vect recipe (HuggingFace) -transformers==4.4.0 -# For wav2vect recipe (Fairsec) -#fairseq diff --git a/recipes/TIMIT/ASR/transducer/hparams/train.yaml b/recipes/TIMIT/ASR/transducer/hparams/train.yaml index 10022bd4cd..fc4e6ddb6f 100644 --- a/recipes/TIMIT/ASR/transducer/hparams/train.yaml +++ b/recipes/TIMIT/ASR/transducer/hparams/train.yaml @@ -9,22 +9,26 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/augment_noise_CRDNN/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt # Data files data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -openrir_folder: !ref # where storing the noisy data for augment -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json +train_annotation: !ref /train.json +valid_annotation: !ref /dev.json +test_annotation: !ref /test.json skip_prep: False # Skip data preparation uppercase: False # Must be True when the TIMIT dataset is in the upper-case version -# Training parameters +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + +####################### Training Parameters #################################### number_of_epochs: 50 batch_size: 8 lr: 1.0 @@ -36,7 +40,7 @@ n_fft: 400 n_mels: 40 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 2 @@ -51,7 +55,7 @@ dnn_neurons: 512 dec_neurons: 128 # Outputs -output_neurons: 40 +output_neurons: 42 joint_dim: 128 blank_index: 0 @@ -64,14 +68,17 @@ state_beam: 1.0 expand_beam: 1.0 # Dataloader options +num_workers: 4 train_dataloader_opts: batch_size: !ref - + num_workers: !ref valid_dataloader_opts: batch_size: !ref + num_workers: !ref test_dataloader_opts: batch_size: !ref + num_workers: !ref epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref @@ -85,17 +92,57 @@ compute_features: !new:speechbrain.lobes.features.Fbank normalize: !new:speechbrain.processing.features.InputNormalization norm_type: global -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Models ########################################## enc: !new:speechbrain.lobes.models.CRDNN.CRDNN @@ -146,9 +193,6 @@ output: !new:speechbrain.nnet.linear.Linear n_neurons: !ref # 42 phonemes + 1 blank bias: False -# log_softmax: !new:speechbrain.nnet.activations.Softmax -# apply_log: True - compute_cost: !name:speechbrain.nnet.losses.transducer_loss use_torchaudio: True blank_index: !ref @@ -200,8 +244,6 @@ modules: Tjoint: !ref output: !ref normalize: !ref - env_corrupt: !ref - augmentation: !ref checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref diff --git a/recipes/TIMIT/ASR/transducer/hparams/train_wav2vec.yaml b/recipes/TIMIT/ASR/transducer/hparams/train_wav2vec.yaml index 57a472821f..a9437bdf68 100644 --- a/recipes/TIMIT/ASR/transducer/hparams/train_wav2vec.yaml +++ b/recipes/TIMIT/ASR/transducer/hparams/train_wav2vec.yaml @@ -9,9 +9,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/augment_wav2vec/ -wer_file: !ref /wer.txt +test_wer_file: !ref /wer_test.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -22,36 +22,35 @@ freeze_wav2vec: False # Data files data_folder: !PLACEHOLDER # e.g. /path/to/TIMIT -train_annotation: !ref /train.json -valid_annotation: !ref /dev.json -test_annotation: !ref /test.json +train_annotation: !ref /train.json +valid_annotation: !ref /dev.json +test_annotation: !ref /test.json skip_prep: False # Skip data preparation uppercase: False # Must be True when the TIMIT dataset is in the upper-case version -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 batch_size: 8 lr: 0.0003 lr_wav2vec: 0.0001 sorting: ascending # choose between ascending, descending and random -auto_mix_prec: True +precision: fp16 # bf16, fp16 or fp32 # Feature parameters sample_rate: 16000 # n_fft: 400 # n_mels: 40 - -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU # dropout: 0.15 dnn_blocks: 1 -dnn_neurons: 40 +dnn_neurons: 43 dec_neurons: 128 # Outputs -output_neurons: 40 -joint_dim: 40 +output_neurons: 43 +joint_dim: 43 blank_index: 0 # Decoding parameters @@ -75,16 +74,41 @@ test_dataloader_opts: epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# compute_features: !new:speechbrain.lobes.features.Fbank -# sample_rate: !ref -# n_fft: !ref -# n_mels: !ref +############################## Augmentations ################################### -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref @@ -96,8 +120,6 @@ enc: !new:speechbrain.lobes.models.VanillaNN.VanillaNN dnn_blocks: !ref dnn_neurons: !ref -jit_module_keys: [enc] - enc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref @@ -129,9 +151,6 @@ output: !new:speechbrain.nnet.linear.Linear n_neurons: !ref # 42 phonemes + 1 blank bias: False -#log_softmax: !new:speechbrain.nnet.activations.Softmax -# apply_log: True - compute_cost: !name:speechbrain.nnet.losses.transducer_loss use_torchaudio: True blank_index: !ref @@ -189,7 +208,6 @@ modules: dec_lin: !ref Tjoint: !ref output: !ref - augmentation: !ref checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer checkpoints_dir: !ref diff --git a/recipes/TIMIT/ASR/transducer/train.py b/recipes/TIMIT/ASR/transducer/train.py index b8db3349c1..1a7d090c4a 100644 --- a/recipes/TIMIT/ASR/transducer/train.py +++ b/recipes/TIMIT/ASR/transducer/train.py @@ -3,7 +3,11 @@ Transducer loss on the TIMIT dataset. To run this recipe, do the following: -> python train.py hparams/train.yaml --data_folder /path/to/TIMIT +> python train.py hparams/train.yaml --data_folder /path/to/TIMIT --jit + +Note on Compilation: +Enabling the just-in-time (JIT) compiler with --jit significantly improves code performance, +resulting in a 50-60% speed boost. We highly recommend utilizing the JIT compiler for optimal results. Authors @@ -11,15 +15,17 @@ * Mirco Ravanelli 2020 * Ju-Chieh Chou 2020 """ + import os import sys -import torch -import logging -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -30,18 +36,10 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig phns, phn_lens = batch.phn_encoded - # Adding optional augmentation when specified: - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "env_corrupt"): - wavs_noise = self.hparams.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - batch.sig = wavs, wav_lens - phns = torch.cat([phns, phns], dim=0) - phn_lens = torch.cat([phn_lens, phn_lens]) - batch.phn_encoded = phns, phn_lens - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + phns = self.hparams.wav_augment.replicate_labels(phns) # Model computations feats = self.hparams.compute_features(wavs) @@ -66,8 +64,8 @@ def compute_forward(self, batch, stage): logits = self.modules.output(joint) if stage == sb.Stage.VALID: - hyps, scores, _, _ = self.hparams.Greedysearcher(x) - return logits, hyps + hyps, _, _, _ = self.hparams.Greedysearcher(x) + return logits, wav_lens, hyps elif stage == sb.Stage.TEST: ( @@ -76,16 +74,22 @@ def compute_forward(self, batch, stage): nbest_hyps, nbest_scores, ) = self.hparams.Beamsearcher(x) - return logits, best_hyps - return logits + return logits, wav_lens, best_hyps + return logits, wav_lens def compute_objectives(self, predictions, batch, stage): "Given the network predictions and targets computed the loss." ids = batch.id - _, wav_lens = batch.sig phns, phn_lens = batch.phn_encoded - if stage != sb.Stage.TRAIN: - predictions, hyps = predictions + + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + phns = self.hparams.wav_augment.replicate_labels(phns) + phn_lens = self.hparams.wav_augment.replicate_labels(phn_lens) + + if stage == sb.Stage.TRAIN: + predictions, wav_lens = predictions + else: + predictions, wav_lens, hyps = predictions # Transducer loss use logits from RNN-T model. loss = self.hparams.compute_cost(predictions, phns, wav_lens, phn_lens) @@ -132,20 +136,32 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats={"loss": stage_loss, "PER": per}, ) - with open(self.hparams.wer_file, "w") as w: - w.write("Transducer loss stats:\n") - self.transducer_metrics.write_stats(w) - w.write("\nPER stats:\n") - self.per_metrics.write_stats(w) - print( - "Transducer and PER stats written to file", - self.hparams.wer_file, - ) + run_on_main( + save_metrics_to_file, + args=[ + self.hparams.test_wer_file, + self.transducer_metrics, + self.per_metrics, + ], + ) + + +def save_metrics_to_file(wer_file, transducer_metrics, per_metrics): + with open(wer_file, "w", encoding="utf-8") as w: + w.write("Transducer loss stats:\n") + transducer_metrics.write_stats(w) + w.write("\nPER stats:\n") + per_metrics.write_stats(w) + print( + "Transducer and PER stats written to file", + hparams["test_wer_file"], + ) def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] @@ -230,12 +246,11 @@ def text_pipeline(phn): # Begin Recipe! if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Dataset prep (parsing TIMIT and annotation into csv files) @@ -263,6 +278,7 @@ def text_pipeline(phn): "uppercase": hparams["uppercase"], }, ) + run_on_main(hparams["prepare_noise_data"]) # Dataset IO prep: creating Dataset objects and proper encodings for phones train_data, valid_data, test_data, label_encoder = dataio_prep(hparams) diff --git a/recipes/TIMIT/ASR/transducer/train_wav2vec.py b/recipes/TIMIT/ASR/transducer/train_wav2vec.py index 0222452919..dfad5ccebc 100644 --- a/recipes/TIMIT/ASR/transducer/train_wav2vec.py +++ b/recipes/TIMIT/ASR/transducer/train_wav2vec.py @@ -11,15 +11,17 @@ * Mirco Ravanelli 2020 * Ju-Chieh Chou 2020 """ + import os import sys -import torch -import logging -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -30,13 +32,13 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig phns, phn_lens = batch.phn_encoded - # Adding optional augmentation when specified: - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + phns = self.hparams.wav_augment.replicate_labels(phns) # Model computations - feats = self.modules.wav2vec2(wavs) + feats = self.modules.wav2vec2(wavs, wav_lens) x = self.modules.enc(feats) x = self.modules.enc_lin(x) @@ -57,8 +59,8 @@ def compute_forward(self, batch, stage): logits = self.modules.output(joint) if stage == sb.Stage.VALID: - hyps, scores, _, _ = self.hparams.Greedysearcher(x) - return logits, hyps + hyps, _, _, _ = self.hparams.Greedysearcher(x) + return logits, wav_lens, hyps elif stage == sb.Stage.TEST: ( @@ -67,16 +69,22 @@ def compute_forward(self, batch, stage): nbest_hyps, nbest_scores, ) = self.hparams.Beamsearcher(x) - return logits, best_hyps - return logits + return logits, wav_lens, best_hyps + return logits, wav_lens def compute_objectives(self, predictions, batch, stage): "Given the network predictions and targets computed the loss." ids = batch.id - _, wav_lens = batch.sig phns, phn_lens = batch.phn_encoded - if stage != sb.Stage.TRAIN: - predictions, hyps = predictions + + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + phns = self.hparams.wav_augment.replicate_labels(phns) + phn_lens = self.hparams.wav_augment.replicate_labels(phn_lens) + + if stage == sb.Stage.TRAIN: + predictions, wav_lens = predictions + else: + predictions, wav_lens, hyps = predictions # Transducer loss use logits from RNN-T model. loss = self.hparams.compute_cost(predictions, phns, wav_lens, phn_lens) @@ -135,70 +143,18 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats={"loss": stage_loss, "PER": per}, ) - with open(self.hparams.wer_file, "w") as w: - w.write("Transducer loss stats:\n") - self.transducer_metrics.write_stats(w) - w.write("\nPER stats:\n") - self.per_metrics.write_stats(w) - print( - "Transducer and PER stats written to file", - self.hparams.wer_file, - ) - - def fit_batch(self, batch): - """Fit one batch, override to do multiple updates. - - The default implementation depends on a few methods being defined - with a particular behavior: - - * ``compute_forward()`` - * ``compute_objectives()`` - - Also depends on having optimizers passed at initialization. - - Arguments - --------- - batch : list of torch.Tensors - Batch of data to use for training. Default implementation assumes - this batch has two elements: inputs and targets. - - Returns - ------- - detached loss - """ - # Managing automatic mixed precision - if self.auto_mix_prec: - - self.wav2vec_optimizer.zero_grad() - self.adam_optimizer.zero_grad() - - with torch.cuda.amp.autocast(): - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - - self.scaler.scale(loss).backward() - self.scaler.unscale_(self.wav2vec_optimizer) - self.scaler.unscale_(self.adam_optimizer) - - if self.check_gradients(loss): - self.scaler.step(self.wav2vec_optimizer) - self.scaler.step(self.adam_optimizer) - - self.scaler.update() - else: - outputs = self.compute_forward(batch, sb.Stage.TRAIN) - - loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) - loss.backward() - - if self.check_gradients(loss): - self.wav2vec_optimizer.step() - self.adam_optimizer.step() - - self.wav2vec_optimizer.zero_grad() - self.adam_optimizer.zero_grad() - - return loss.detach() + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + w.write("Transducer loss stats:\n") + self.transducer_metrics.write_stats(w) + w.write("\nPER stats:\n") + self.per_metrics.write_stats(w) + print( + "Transducer and PER stats written to file", + self.hparams.test_wer_file, + ) def init_optimizers(self): "Initializes the wav2vec2 optimizer and model optimizer" @@ -215,10 +171,16 @@ def init_optimizers(self): ) self.checkpointer.add_recoverable("adam_opt", self.adam_optimizer) + self.optimizers_dict = { + "wav2vec": self.wav2vec_optimizer, + "adam": self.adam_optimizer, + } + def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] @@ -303,12 +265,11 @@ def text_pipeline(phn): # Begin Recipe! if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Dataset prep (parsing TIMIT and annotation into csv files) diff --git a/recipes/TIMIT/Alignment/README.md b/recipes/TIMIT/Alignment/README.md index db448230b1..4271bb3d97 100644 --- a/recipes/TIMIT/Alignment/README.md +++ b/recipes/TIMIT/Alignment/README.md @@ -10,7 +10,7 @@ python train.py train/train.yaml | Release | hyperparams file | Test Accuracy | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| -| 20-05-22 | train.yaml | 79.55 | [model](https://drive.google.com/drive/folders/1fXu7JAVUYxZLosH05iBTEPrJyVSCjNRi?usp=sharing) | 1xV100 32GB | +| 20-05-22 | train.yaml | 79.55 | [model](https://www.dropbox.com/sh/dcicuz1r6v7iitt/AAB1BpaMjfhUDBsEsxjAuaHVa?dl=0) | 1xV100 32GB | # Training Time @@ -27,6 +27,15 @@ About 2 minutes for each epoch with a TESLA V100. Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/TIMIT/Alignment/hparams/train.yaml b/recipes/TIMIT/Alignment/hparams/train.yaml index 03fcc86df3..937e0e0644 100644 --- a/recipes/TIMIT/Alignment/hparams/train.yaml +++ b/recipes/TIMIT/Alignment/hparams/train.yaml @@ -8,9 +8,8 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/augment_noise_CRDNN/ -# wer_file: !ref /wer.txt save_folder: !ref /save train_log: !ref /train_log.txt @@ -21,7 +20,7 @@ valid_annotation: !ref /dev.json test_annotation: !ref /test.json skip_prep: False # Skip data prep -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 10 batch_size: 256 lr: 0.0003 @@ -41,7 +40,7 @@ phn_set: 60 # {60, 48, 39} output_neurons: 183 blank_index: 182 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dnn_blocks: 1 dnn_neurons: 2000 @@ -56,9 +55,52 @@ dataloader_options: epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] compute_features: !new:speechbrain.lobes.features.Fbank context: True diff --git a/recipes/TIMIT/Alignment/train.py b/recipes/TIMIT/Alignment/train.py index 7cd1a5d1b7..544d92abf8 100644 --- a/recipes/TIMIT/Alignment/train.py +++ b/recipes/TIMIT/Alignment/train.py @@ -10,11 +10,14 @@ * Mirco Ravanelli 2020 * Peter Plantinga 2020 """ + import os import sys + import torch -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main # Define training procedure @@ -26,14 +29,9 @@ def compute_forward(self, batch, stage): batch = batch.to(self.device) wavs, wav_lens = batch.sig - # Adding augmentation when specified: - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) feats = self.hparams.compute_features(wavs) if hasattr(self.hparams, "normalize"): @@ -51,9 +49,9 @@ def compute_objectives(self, predictions, batch, stage): phns, phn_lens = batch.phn_encoded phn_ends, _ = batch.phn_ends - if stage == sb.Stage.TRAIN and hasattr(self.modules, "env_corrupt"): - phns = torch.cat([phns, phns], dim=0) - phn_lens = torch.cat([phn_lens, phn_lens], dim=0) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + phns = self.hparams.wav_augment.replicate_labels(phns) + phn_lens = self.hparams.wav_augment.replicate_labels(phn_lens) phns, phn_lens = phns.to(self.device), phn_lens.to(self.device) phns_orig = sb.utils.data_utils.undo_padding(phns, phn_lens) @@ -127,7 +125,8 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats={"loss": stage_loss, "accuracy": acc}, ) self.checkpointer.save_and_keep_only( - meta={"accuracy": acc}, max_keys=["accuracy"], + meta={"accuracy": acc}, + max_keys=["accuracy"], ) elif stage == sb.Stage.TEST: @@ -139,7 +138,8 @@ def on_stage_end(self, stage, stage_loss, epoch): def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] @@ -239,12 +239,11 @@ def phn_ends_pipeline(ground_truth_phn_ends): # Begin Recipe! if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Dataset prep (parsing TIMIT and annotation into csv files) diff --git a/recipes/TIMIT/timit_prepare.py b/recipes/TIMIT/timit_prepare.py index 86811717d3..fab20a8e26 100644 --- a/recipes/TIMIT/timit_prepare.py +++ b/recipes/TIMIT/timit_prepare.py @@ -8,13 +8,14 @@ * Elena Rastorgueva 2020 """ -import os import json -import logging -from speechbrain.utils.data_utils import get_all_files +import os + from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) SAMPLERATE = 16000 @@ -28,7 +29,7 @@ def prepare_timit( skip_prep=False, ): """ - repares the json files for the TIMIT dataset. + Prepares the json files for the TIMIT dataset. Arguments --------- @@ -52,11 +53,15 @@ def prepare_timit( Default: False If True, the data preparation is skipped. + Returns + ------- + None + Example ------- >>> from recipes.TIMIT.timit_prepare import prepare_timit - >>> data_folder = 'datasets/TIMIT' - >>> prepare_timit(data_folder, 'train.json', 'valid.json', 'test.json') + >>> data_folder = "datasets/TIMIT" + >>> prepare_timit(data_folder, "train.json", "valid.json", "test.json") """ # Skip if needed @@ -113,8 +118,7 @@ def prepare_timit( def _get_phonemes(): - - # This dictionary is used to conver the 60 phoneme set + # This dictionary is used to convert the 60 phoneme set # into the 48 one from_60_to_48_phn = {} from_60_to_48_phn["sil"] = "sil" @@ -180,7 +184,7 @@ def _get_phonemes(): from_60_to_48_phn["z"] = "z" from_60_to_48_phn["zh"] = "zh" - # This dictionary is used to conver the 60 phoneme set + # This dictionary is used to convert the 60 phoneme set from_60_to_39_phn = {} from_60_to_39_phn["sil"] = "sil" from_60_to_39_phn["aa"] = "aa" @@ -249,8 +253,8 @@ def _get_phonemes(): def _get_speaker(): - # List of test speakers + # cspell:disable test_spk = [ "fdhc0", "felc0", @@ -332,6 +336,8 @@ def _get_speaker(): "mwjg0", ] + # cspell:enable + return dev_spk, test_spk @@ -340,6 +346,11 @@ def skip(annotations): Detects if the timit data_preparation has been already done. If the preparation has been done, we can skip it. + Arguments + --------- + annotations : list + List of paths to check for existence. + Returns ------- bool @@ -356,9 +367,7 @@ def skip(annotations): return skip -def create_json( - wav_lst, json_file, uppercase, phn_set, -): +def create_json(wav_lst, json_file, uppercase, phn_set): """ Creates the json file given a list of wav files. @@ -381,7 +390,6 @@ def create_json( json_dict = {} for wav_file in wav_lst: - # Getting sentence and speaker ids spk_id = wav_file.split("/")[-2] snt_id = wav_file.split("/")[-1].replace(".wav", "") @@ -401,7 +409,10 @@ def create_json( err_msg = "the wrd file %s does not exists!" % (wrd_file) raise FileNotFoundError(err_msg) - words = [line.rstrip("\n").split(" ")[2] for line in open(wrd_file)] + words = [ + line.rstrip("\n").split(" ")[2] + for line in open(wrd_file, encoding="utf-8") + ] words = " ".join(words) # Retrieving phonemes @@ -427,7 +438,7 @@ def create_json( } # Writing the dictionary to the json file - with open(json_file, mode="w") as json_f: + with open(json_file, mode="w", encoding="utf-8") as json_f: json.dump(json_dict, json_f, indent=2) logger.info(f"{json_file} successfully created!") @@ -441,7 +452,7 @@ def get_phoneme_lists(phn_file, phn_set): phonemes = [] ends = [] - for line in open(phn_file): + for line in open(phn_file, encoding="utf-8"): end, phoneme = line.rstrip("\n").replace("h#", "sil").split(" ")[1:] # Getting dictionaries for phoneme conversion @@ -505,9 +516,12 @@ def _check_timit_folders(uppercase, data_folder): If not, raises an error. - Returns - ------- - None + Arguments + --------- + uppercase : bool + Whether the files and folders are uppercase. + data_folder : str + Path to the directory containing the data. Raises ------ @@ -533,7 +547,6 @@ def _check_timit_folders(uppercase, data_folder): # Checking train/dr1 if not os.path.exists(data_folder + train_str): - err_msg = ( "the folder %s does not exist (it is expected in " "the TIMIT dataset)" % (data_folder + train_str) diff --git a/recipes/Tedlium2/ASR/transformer/README.md b/recipes/Tedlium2/ASR/transformer/README.md new file mode 100644 index 0000000000..f582610500 --- /dev/null +++ b/recipes/Tedlium2/ASR/transformer/README.md @@ -0,0 +1,67 @@ +# Tedlium2 ASR with Transformers +This folder contains the scripts to train a Transformer-based speech recognizer. + +You can download Tedlium2 at https://lium.univ-lemans.fr/ted-lium2/ + +# How to Run: + +1. Begin by training the tokenizer: + +```shell +cd ../../Tokenizer +python train.py hparams/tedlium2_500_bpe.yaml --data_folder /path/to/tedlium2 --clipped_utt_folder /path/to/clipped_folder +``` + +Please, read ../../Tokenizer/README.md before proceeding. +This training script will handle data preparation and tokenizer training. Note that this script prepares the data in a format suitable for training the ASR model. +Specifically, it segments the entire TED recording into individual utterance-level recordings, resulting in approximately 46 gigabytes of data. +The CSV files generated for training, development, and testing are also utilized in ASR training. + +**IMPORTANT:** Ensure you complete this step before proceeding to train the ASR Model. + +2. Proceed to train the ASR model: + +```shell +python train.py hparams/branchformer_large.yaml --pretrained_tokenizer_file /path/to/tokenizer --data_folder /path/to/tedlium2 --clipped_utt_folder /path/to/clipped_folder +``` + +This script relies on the data manifest files prepared in step 1. + + +# Results + +| Release | hyperparams file | Test WER (No LM) | HuggingFace link | Model link | GPUs | +|:-------------:|:-------------:|:-------------:|:---------------------------:| :-----:| :-----:| +| 24-10-23 | branchformer_large.yaml | 8.11 | [HuggingFace](https://huggingface.co/speechbrain/asr-branchformer-large-tedlium2) | [DropBox](https://www.dropbox.com/sh/el523uofs96czfi/AADgTd838pKo2aR8fhqVOh-Oa?dl=0) | 1xA100 80GB | + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + +# Training Time + +It takes about 15 minutes per epoch for the branchformer large model. + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} diff --git a/recipes/Tedlium2/ASR/transformer/hparams/branchformer_large.yaml b/recipes/Tedlium2/ASR/transformer/hparams/branchformer_large.yaml new file mode 100644 index 0000000000..efe3efcf39 --- /dev/null +++ b/recipes/Tedlium2/ASR/transformer/hparams/branchformer_large.yaml @@ -0,0 +1,326 @@ +# ############################################################################ +# Model: E2E ASR with Transformer +# Encoder: Branchformer Encoder +# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch +# Tokens: unigram +# losses: CTC + KLdiv (Label Smoothing loss) +# Training: Tedlium2 +# Authors: Titouan Parcollet, Shucong Zhang +# ############################################################################ +# Seed needs to be set at top of yaml, before objects with parameters are made + +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/branchformer_large/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +# IMPORTANT: before running this script, you need to train the tokenizer (refer to ../../Tokenizer/README.md for details). +# Or use the pretrained tokenizer provided in the DropBox folder. +# +# The tokenizer is stored in ../../Tokenizer/results/tokenizer/tokenizer.ckpt + +# Please ensure that the tokenizer has been trained before (refer to ../../Tokenizer/README.md for details). +pretrained_tokenizer_file: !PLACEHOLDER +clipped_utt_folder: !PLACEHOLDER # folder where to store the clipped utterance-level recordings +data_folder: !PLACEHOLDER # e.g, /path/to/TEDLIUM_release2 +skip_prep: False +avoid_if_shorter_than: 1.0 + +train_csv: !ref /train/train.csv +valid_csv: !ref /dev/dev.csv +test_csv: + - !ref /test/test.csv + +####################### Training Parameters #################################### +# To make Transformers converge, the global bath size should be large enough. +# The global batch size is computed as batch_size * n_gpus * grad_accumulation_factor. +# Empirically, we found that this value should be >= 128. +# Please, set your parameters accordingly. +precision: fp32 # bf16, fp16 or fp32 +number_of_epochs: 120 +batch_size: 16 # This works for 2x GPUs with 32GB +ctc_weight: 0.3 +grad_accumulation_factor: 2 +max_grad_norm: 5.0 +loss_reduction: 'batchmean' +sorting: random +num_workers: 4 + +# stages related parameters +# stage_one_epochs: 90 +lr_adam: 0.0005 +weight_decay: 0.05 + +# Feature parameters +sample_rate: 16000 +n_fft: 400 +n_mels: 80 +win_length: 25 + +# This setup works well for A100 80GB GPU, adapts it to your needs. +# Or turn it off (but training speed will decrease) +dynamic_batching: True +max_batch_length_train: 800 +max_batch_length_val: 100 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 +shuffle: True +batch_ordering: random +max_batch_ex: 128 + +dynamic_batch_sampler_train: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + +dynamic_batch_sampler_valid: + max_batch_length: !ref + num_buckets: !ref + shuffle: !ref + batch_ordering: !ref + max_batch_ex: !ref + + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + shuffle: True + num_workers: !ref + +valid_dataloader_opts: + batch_size: 1 + +test_dataloader_opts: + batch_size: 1 + +####################### Model Parameters ########################### +# Transformer +d_model: 512 +nhead: 8 +num_encoder_layers: 18 +num_decoder_layers: 6 +csgu_linear_units: 3072 +csgu_kernel_size: 31 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 500 + +# Outputs +blank_index: 0 +label_smoothing: 0.1 +pad_index: 0 +bos_index: 1 +eos_index: 2 + +# Decoding parameters +min_decode_ratio: 0.0 +max_decode_ratio: 1.0 +valid_search_interval: 10 +valid_beam_size: 20 +test_beam_size: 20 +ctc_weight_decode: 0.3 + +############################## models ################################ + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + dropout: !ref + activation: !ref + branchformer_activation: !ref + encoder_module: branchformer + csgu_linear_units: !ref + kernel_size: !ref + attention_type: RelPosMHAXL + normalize_before: True + causal: False + +tokenizer: !new:sentencepiece.SentencePieceProcessor + +ctc_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +seq_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +modules: + CNN: !ref + Transformer: !ref + seq_lin: !ref + ctc_lin: !ref + normalize: !ref + +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref ] + +Adam: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 0.000000001 + weight_decay: !ref + +# Scorer +ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer + eos_index: !ref + blank_index: !ref + ctc_fc: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref ] + weights: + ctc: !ref + + +valid_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + using_eos_threshold: False + length_normalization: True + scorer: !ref + +test_search: !new:speechbrain.decoders.S2STransformerBeamSearcher + modules: [!ref , !ref , !ref ] + bos_index: !ref + eos_index: !ref + min_decode_ratio: !ref + max_decode_ratio: !ref + beam_size: !ref + temperature: 1.15 + using_eos_threshold: False + length_normalization: True + scorer: !ref + +log_softmax: !new:torch.nn.LogSoftmax + dim: -1 + +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +seq_cost: !name:speechbrain.nnet.losses.kldiv_loss + label_smoothing: !ref + reduction: !ref + +noam_annealing: !new:speechbrain.nnet.schedulers.NoamScheduler + lr_initial: !ref + n_warmup_steps: 30000 + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + noam_scheduler: !ref + normalizer: !ref + counter: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Time Drop +time_drop_length_low: 20 # Min length for temporal chunk to drop in spectrogram +time_drop_length_high: 25 # Max length for temporal chunk to drop in spectrogram +time_drop_count_low: 7 # Min number of chunks to drop in time in the spectrogram +time_drop_count_high: 7 # Max number of chunks to drop in time in the spectrogram +time_drop_replace: "mean" # Method of dropping chunks + +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + replace: !ref + dim: 1 + +# Frequency Drop +freq_drop_length_low: 25 # Min length for chunks to drop in frequency in the spectrogram +freq_drop_length_high: 30 # Max length for chunks to drop in frequency in the spectrogram +freq_drop_count_low: 2 # Min number of chunks to drop in frequency in the spectrogram +freq_drop_count_high: 2 # Max number of chunks to drop in frequency in the spectrogram +freq_drop_replace: "mean" # Method of dropping chunks + +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + replace: !ref + dim: 2 + +# Time warp +time_warp_window: 5 # Length of time warping window +time_warp_mode: "bicubic" # Time warping method + +time_warp: !new:speechbrain.augment.freq_domain.Warping + warp_window: !ref + warp_mode: !ref + dim: 1 + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + win_length: !ref + n_mels: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats +acc_computer: !name:speechbrain.utils.Accuracy.AccuracyStats + +# The pretrainer allows a mapping between pretrained files and instances that +# are declared in the yaml. E.g here, we will download the file lm.ckpt +# and it will be loaded into "lm" which is pointing to the defined +# before. +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + tokenizer: !ref + paths: + tokenizer: !ref diff --git a/recipes/Tedlium2/ASR/transformer/tedlium2_prepare.py b/recipes/Tedlium2/ASR/transformer/tedlium2_prepare.py new file mode 100644 index 0000000000..de4b6b2ee2 --- /dev/null +++ b/recipes/Tedlium2/ASR/transformer/tedlium2_prepare.py @@ -0,0 +1,238 @@ +""" +Download link: https://lium.univ-lemans.fr/ted-lium2/ + +Authors + * Shucong Zhang 2023 + * Adel Moumen 2023 +""" + +import csv +import functools +import os + +from speechbrain.dataio import audio_io +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + + +def make_splits(sph_file, stm_file, utt_save_folder, avoid_if_shorter_than): + """ + This function splits the .sph Ted-talk recording into utterances based on the .stm annotation. + + Arguments + --------- + sph_file : str + Path to the sph file containing Ted-talk recording. + stm_file : str + Path to the stm file containing Ted-talk annotation. + utt_save_folder: str + The folder stores the clipped individual utterances. + avoid_if_shorter_than: int + Any utterance shorter than this will be discarded. + + Returns + ------- + entry : list + Contents of annotation files. + """ + # the annotation for JillSobuleMANHATTANINJANUARY_2006.sph is not useful + if "JillSobuleMANHATTANINJANUARY_2006" in sph_file: + logger.info("JillSobuleMANHATTANINJANUARY_2006.sph is skipped") + return + + # load the annotation of the entire speech recording + annotation_file = open(stm_file, encoding="utf-8") + annotations = annotation_file.readlines() + + # load the original speech recording + original_speech, sample_rate = audio_io.load(sph_file) + + entry = [] + + # process the annotation utterance by utterance + for i, line in enumerate(annotations): + line = line.strip("\n") + line = line.split(" ") + # parse the annotation + talk_id = line[0] + spk_id = line[2] + + # start and end point of the utterances in the recording + start = float(line[3]) + end = float(line[4]) + duration = -start + end + # we skip short utterances in case of CNN padding issues + if duration < avoid_if_shorter_than: + continue + + # transcriptions + wrd_list = line[6:] + if wrd_list[-1] == "": + wrd_list = wrd_list[:-1] + transcript = " ".join(wrd_list) + if not transcript[-1].isalpha(): + transcript = transcript[:-1] + transcript = transcript.replace(" 've", "'ve") + transcript = transcript.replace(" 't", "'t") + transcript = transcript.replace(" 'll", "'ll") + transcript = transcript.replace(" 'd", "'d") + transcript = transcript.replace(" 'm", "'m") + transcript = transcript.replace(" 're", "'re") + transcript = transcript.replace(" 's", "'s") + # skip invalid transcriptions + if len(wrd_list) <= 1 or transcript == "ignore_time_segment_in_scoring": + continue + + # clip and save the current utterance + clipped_save_path = os.path.join( + utt_save_folder, talk_id + "-" + str(i) + ".wav" + ) + + # we avoid duplicated clip and save + if not os.path.exists(clipped_save_path): + start = float(line[3]) * sample_rate + end = float(line[4]) * sample_rate + curr_utt = original_speech[:, int(start) : int(end)] + audio_io.save(clipped_save_path, curr_utt, sample_rate) + # append to the csv entry list + csv_line = [ + f"{talk_id}-{str(i)}", + str(duration), + clipped_save_path, + spk_id, + transcript, + ] + entry.append(csv_line) + + return entry + + +def process_line( + talk_sph, avoid_if_shorter_than, utt_save_folder_split, data_folder, split +): + """This function processes a single Ted-talk recording. + + Arguments + --------- + talk_sph : str + The name of the Ted-talk recording. + avoid_if_shorter_than: int + Any utterance shorter than this will be discarded. + utt_save_folder_split: str + The folder stores the clipped individual utterances. + data_folder: str + The folder stores the original Ted-talk recordings. + split: str + The split of the dataset, e.g., train, dev, test. + + Returns + ------- + See ``make_splits`` + """ + talk_name = talk_sph[:-4] + talk_sph_path = os.path.join(data_folder, split, "sph", talk_sph) + talk_stm_path = os.path.join(data_folder, split, "stm", talk_name + ".stm") + + return make_splits( + talk_sph_path, + talk_stm_path, + utt_save_folder_split, + avoid_if_shorter_than, + ) + + +def prepare_tedlium2( + data_folder, + utt_save_folder, + csv_save_folder, + skip_prep=False, + avoid_if_shorter_than=1, +): + """This function prepares the Tedlium2 dataset. + Download link: https://lium.univ-lemans.fr/ted-lium2/ + + Arguments + --------- + data_folder : str + Path to the folder where the original Tedlium2 dataset is stored. + utt_save_folder : list + Path where to save the clipped utterance-level recordings. + csv_save_folder: str + Path where to save the generated .csv files. + skip_prep: bool + If True, data preparation is skipped. + avoid_if_shorter_than: int + Any utterance shorter than this will be discarded. + + Returns + ------- + None + + Example + ------- + >>> data_folder = "datasets/TEDLIUM_release2" + >>> utt_save_folder = "datasets/TEDLIUM_release2_processed" + >>> csv_save_folder = "TEDLIUM2" + >>> prepare_tedlium2(data_folder, utt_save_folder, csv_save_folder) + """ + if skip_prep: + return + + splits = [ + "train", + "dev", + "test", + ] + + for split in splits: + utt_save_folder_split = os.path.join(utt_save_folder, split) + csv_save_folder_split = os.path.join(csv_save_folder, split) + os.makedirs(utt_save_folder_split, exist_ok=True) + os.makedirs(csv_save_folder_split, exist_ok=True) + new_filename = os.path.join(csv_save_folder_split, split + ".csv") + if os.path.exists(new_filename): + continue + logger.info("Preparing %s..." % new_filename) + data_folder_split = os.path.join(data_folder, split) + talk_sphs = os.listdir(os.path.join(data_folder_split, "sph")) + + line_processor = functools.partial( + process_line, + avoid_if_shorter_than=avoid_if_shorter_than, + utt_save_folder_split=utt_save_folder_split, + data_folder=data_folder, + split=split, + ) + + tmp_csv = os.path.join(csv_save_folder_split, split + ".tmp") + final_csv = os.path.join(csv_save_folder_split, split + ".csv") + total_line = 0 + total_duration = 0 + with open(tmp_csv, mode="w", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + csv_writer.writerow(["ID", "duration", "wav", "spk_id", "wrd"]) + for row in parallel_map(line_processor, talk_sphs): + if row is None: + continue + + for line in row: + csv_writer.writerow(line) + total_duration += float(line[1]) + total_line += len(row) + + os.replace(tmp_csv, final_csv) + + msg = "\t%s successfully created!" % (new_filename) + logger.info(msg) + + msg = f"Number of samples: {total_line} " + logger.info(msg) + msg = "Total duration: %s Hours" % ( + str(round(total_duration / 3600, 2)) + ) + logger.info(msg) diff --git a/recipes/KsponSpeech/ASR/transformer/train.py b/recipes/Tedlium2/ASR/transformer/train.py similarity index 55% rename from recipes/KsponSpeech/ASR/transformer/train.py rename to recipes/Tedlium2/ASR/transformer/train.py index 71f9640a44..ae4ba3ac62 100644 --- a/recipes/KsponSpeech/ASR/transformer/train.py +++ b/recipes/Tedlium2/ASR/transformer/train.py @@ -1,81 +1,71 @@ #!/usr/bin/env python3 -"""Recipe for training a Transformer ASR system with KsponSpeech. +"""Recipe for training a Transformer ASR system with Tedlium2. The system employs an encoder, a decoder, and an attention mechanism -between them. Decoding is performed with (CTC/Att joint) beamsearch -coupled with a neural language model. +between them. Decoding is performed with (CTC/Att joint) beamsearch. To run this recipe, do the following: -> python train.py hparams/transformer.yaml -> python train.py hparams/conformer.yaml +> python train.py hparams/branchformer.yaml -With the default hyperparameters, the system employs -a convolutional frontend and a transformer. +With the default hyperparameters, the system employs a convolutional frontend and a Branchformer. The decoder is based on a Transformer decoder. -Beamsearch coupled with a Transformer language model is used -on the top of decoder probabilities. The neural network is trained on both CTC and negative-log likelihood targets and sub-word units estimated with Byte Pairwise Encoding (BPE) -are used as basic recognition tokens. Training is performed on the full -KsponSpeech dataset (965.2 h). +are used as basic recognition tokens. Training is performed on the Tedlium2 +training dataset. -The best model is the average of the checkpoints from last 5 epochs. +The best model is the average of the checkpoints from last 10 epochs. The experiment file is flexible enough to support a large variety of different systems. By properly changing the parameter files, you can try different encoders, decoders, tokens (e.g, characters instead of BPE), -training split (e.g, train-clean 100 rather than the full one), and many -other possible variations. +and many other possible variations. Authors * Jianyuan Zhong 2020 * Mirco Ravanelli 2020 * Peter Plantinga 2020 - * Samuele Cornell 2020 - * Titouan Parcollet 2021 - * Dongwon Kim, Dongwoo Kim 2021 + * Samuele Cornell 2020, 2021, 2022 + * Titouan Parcollet 2021, 2022 + * Shucong Zhang 2023 """ + import os import sys -import torch -import logging from pathlib import Path -import speechbrain as sb + +import torch from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure class ASR(sb.core.Brain): def compute_forward(self, batch, stage): - """Forward computations from the waveform batches - to the output probabilities.""" + """Forward computations from the waveform batches to the output probabilities.""" batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.modules, "env_corrupt"): - wavs_noise = self.modules.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) - # compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - feats = self.hparams.augmentation(feats) + # Add feature augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "fea_augment"): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_bos = self.hparams.fea_augment.replicate_labels(tokens_bos) # forward modules src = self.modules.CNN(feats) + enc_out, pred = self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) @@ -96,40 +86,51 @@ def compute_forward(self, batch, stage): hyps = None current_epoch = self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval == 0: - # for the sake of efficiency, we only perform beamsearch with - # limited capacity and no LM to give user some idea of - # how the AM is doing - hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) + # for the sake of efficiency, we only perform beamsearch with limited capacity + # and no LM to give user some idea of how the AM is doing + hyps, _, _, _ = self.hparams.valid_search( + enc_out.detach(), wav_lens + ) elif stage == sb.Stage.TEST: - hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) + hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): """Computes the loss (CTC+NLL) given predictions and targets.""" - (p_ctc, p_seq, wav_lens, hyps,) = predictions + (p_ctc, p_seq, wav_lens, hyps) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens - if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 - ) - tokens = torch.cat([tokens, tokens], dim=0) - tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) + if stage == sb.Stage.TRAIN: + if hasattr(self.hparams, "fea_augment"): + tokens = self.hparams.fea_augment.replicate_labels(tokens) + tokens_lens = self.hparams.fea_augment.replicate_labels( + tokens_lens + ) + tokens_eos = self.hparams.fea_augment.replicate_labels( + tokens_eos + ) + tokens_eos_lens = self.hparams.fea_augment.replicate_labels( + tokens_eos_lens + ) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens - ) - loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) + ).sum() + + loss_ctc = self.hparams.ctc_cost( + p_ctc, tokens, wav_lens, tokens_lens + ).sum() + loss = ( self.hparams.ctc_weight * loss_ctc + (1 - self.hparams.ctc_weight) * loss_seq ) + if stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval @@ -141,62 +142,37 @@ def compute_objectives(self, predictions, batch, stage): tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps ] target_words = [wrd.split(" ") for wrd in batch.wrd] - predicted_chars = [ - list("".join(utt_seq)) for utt_seq in predicted_words - ] - target_chars = [list("".join(wrd.split())) for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) - self.cer_metric.append(ids, predicted_chars, target_chars) # compute the accuracy of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - # check if we need to switch optimizer - # if so change the optimizer from Adam to SGD - self.check_and_reset_optimizer() - - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - # normalize the loss by gradient_accumulation step - (loss / self.hparams.gradient_accumulation).backward() - - if self.step % self.hparams.gradient_accumulation == 0: - # gradient clipping & early stop if loss is not fini - self.check_gradients(loss) - - self.optimizer.step() - self.optimizer.zero_grad() - - # anneal lr every update + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply noam annealing.""" + if should_step: self.hparams.noam_annealing(self.optimizer) - if isinstance( - self.hparams.train_logger, - sb.utils.train_logger.TensorboardLogger, - ): - self.hparams.train_logger.log_stats( - stats_meta={"step": self.step}, train_stats={"loss": loss}, - ) + def on_evaluate_start(self, max_key=None, min_key=None): + """perform checkpoint average if needed""" + super().on_evaluate_start() - return loss.detach() + ckpts = self.checkpointer.find_checkpoints( + max_key=max_key, min_key=min_key + ) + ckpt = sb.utils.checkpoints.average_checkpoints( + ckpts, recoverable_name="model" + ) - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - with torch.no_grad(): - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() + self.hparams.model.load_state_dict(ckpt, strict=True) + self.hparams.model.eval() + logger.info("Loaded the average") def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" if stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() - self.cer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch): """Gets called at the end of a epoch.""" @@ -213,21 +189,19 @@ def on_stage_end(self, stage, stage_loss, epoch): or stage == sb.Stage.TEST ): stage_stats["WER"] = self.wer_metric.summarize("error_rate") - stage_stats["CER"] = self.cer_metric.summarize("error_rate") # log stats and save checkpoint at end-of-epoch - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): - - # report different epoch stages according current stage - current_epoch = self.hparams.epoch_counter.current - if current_epoch <= self.hparams.stage_one_epochs: - lr = self.hparams.noam_annealing.current_lr - steps = self.hparams.noam_annealing.n_steps - else: - lr = self.hparams.lr_sgd - steps = -1 - - epoch_stats = {"epoch": epoch, "lr": lr, "steps": steps} + if stage == sb.Stage.VALID: + lr = self.hparams.noam_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, @@ -236,7 +210,7 @@ def on_stage_end(self, stage, stage_loss, epoch): self.checkpointer.save_and_keep_only( meta={"ACC": stage_stats["ACC"], "epoch": epoch}, max_keys=["ACC"], - num_to_keep=5, + num_to_keep=10, ) elif stage == sb.Stage.TEST: @@ -244,86 +218,29 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) - self.cer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) # save the averaged checkpoint at the end of the evaluation stage # delete the rest of the intermediate checkpoints - # ACC is set to 1.1 so checkpointer - # only keeps the averaged checkpoint + # ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={"ACC": 1.1, "epoch": epoch}, max_keys=["ACC"], num_to_keep=1, ) - def check_and_reset_optimizer(self): - """reset the optimizer if training enters stage 2""" - current_epoch = self.hparams.epoch_counter.current - if not hasattr(self, "switched"): - self.switched = False - if isinstance(self.optimizer, torch.optim.SGD): - self.switched = True - - if self.switched is True: - return - - if current_epoch > self.hparams.stage_one_epochs: - self.optimizer = self.hparams.SGD(self.modules.parameters()) - - if self.checkpointer is not None: - self.checkpointer.add_recoverable("optimizer", self.optimizer) - - self.switched = True - - def on_fit_start(self): - """Initialize the right optimizer on the training start""" - super().on_fit_start() - - # if the model is resumed from stage two, reinitialize the optimizer - current_epoch = self.hparams.epoch_counter.current - current_optimizer = self.optimizer - if current_epoch > self.hparams.stage_one_epochs: - del self.optimizer - self.optimizer = self.hparams.SGD(self.modules.parameters()) - - # Load latest checkpoint to resume training if interrupted - if self.checkpointer is not None: - - # do not reload the weights if training is interrupted - # right before stage 2 - group = current_optimizer.param_groups[0] - if "momentum" not in group: - return - - self.checkpointer.recover_if_possible( - device=torch.device(self.device) - ) - - def on_evaluate_start(self, max_key=None, min_key=None): - """perform checkpoint averge if needed""" - super().on_evaluate_start() - - ckpts = self.checkpointer.find_checkpoints( - max_key=max_key, min_key=min_key - ) - ckpt = sb.utils.checkpoints.average_checkpoints( - ckpts, recoverable_name="model", device=self.device - ) - - self.hparams.model.load_state_dict(ckpt, strict=True) - self.hparams.model.eval() - def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined - functions.""" - data_folder = hparams["data_folder"] + It also defines the data processing pipeline through user-defined functions. + """ train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["train_csv"] ) if hparams["sorting"] == "ascending": @@ -347,7 +264,7 @@ def dataio_prepare(hparams): "sorting must be random, ascending or descending" ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, + csv_path=hparams["valid_csv"] ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -356,13 +273,14 @@ def dataio_prepare(hparams): for csv_file in hparams["test_csv"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=csv_file, replacements={"data_root": data_folder} + csv_path=csv_file ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key="duration" ) datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + valtest_datasets = [valid_data] + [i for k, i in test_datasets.items()] # We get the tokenizer as we need it to encode the labels when creating # mini-batches. @@ -375,7 +293,22 @@ def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig - sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + sb.dataio.dataset.add_dynamic_item(valtest_datasets, audio_pipeline) + + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline_train(wav): + # Speed Perturb is done here so it is multi-threaded with the + # workers of the dataloader (faster). + if "speed_perturb" in hparams: + sig = sb.dataio.dataio.read_audio(wav) + + sig = hparams["speed_perturb"](sig.unsqueeze(0)).squeeze(0) + else: + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline_train) # 3. Define text pipeline: @sb.utils.data_pipeline.takes("wrd") @@ -397,24 +330,50 @@ def text_pipeline(wrd): # 4. Set output: sb.dataio.dataset.set_output_keys( - datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams_train = hparams["dynamic_batch_sampler_train"] + dynamic_hparams_valid = hparams["dynamic_batch_sampler_valid"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_train, + ) + valid_batch_sampler = DynamicBatchSampler( + valid_data, + length_func=lambda x: x["duration"], + **dynamic_hparams_valid, + ) + + return ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_batch_sampler, + valid_batch_sampler, ) - return train_data, valid_data, test_datasets, tokenizer if __name__ == "__main__": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) - # 1. # Dataset prep (parsing KsponSpeech) - from ksponspeech_prepare import prepare_ksponspeech # noqa - # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams["output_folder"], @@ -422,26 +381,35 @@ def text_pipeline(wrd): overrides=overrides, ) + # 1. # Dataset prep (parsing Tedlium2) + from tedlium2_prepare import prepare_tedlium2 # noqa + # multi-gpu (ddp) save data preparation run_on_main( - prepare_ksponspeech, + prepare_tedlium2, kwargs={ "data_folder": hparams["data_folder"], - "tr_splits": hparams["train_splits"], - "dev_splits": hparams["dev_splits"], - "te_splits": hparams["test_splits"], - "save_folder": hparams["data_folder"], - "merge_lst": hparams["train_splits"], - "merge_name": hparams["train_csv"], + "utt_save_folder": hparams["clipped_utt_folder"], + "csv_save_folder": hparams["output_folder"], "skip_prep": hparams["skip_prep"], + "avoid_if_shorter_than": hparams["avoid_if_shorter_than"], }, ) # here we create the datasets objects as well as tokenization and encoding - train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) + ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams) - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + # We download the pretrained LM from HuggingFace (or elsewhere depending on + # the path given in the YAML file). The tokenizer is loaded at the same time. + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Trainer initialization asr_brain = ASR( @@ -454,20 +422,48 @@ def text_pipeline(wrd): # adding objects to trainer: asr_brain.tokenizer = hparams["tokenizer"] + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + collate_fn = None + if "collate_fn" in train_dataloader_opts: + collate_fn = train_dataloader_opts["collate_fn"] + + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if collate_fn is not None: + train_dataloader_opts["collate_fn"] = collate_fn + + if valid_bsampler is not None: + collate_fn = None + if "collate_fn" in valid_dataloader_opts: + collate_fn = valid_dataloader_opts["collate_fn"] + + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + if collate_fn is not None: + valid_dataloader_opts["collate_fn"] = collate_fn # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, - train_loader_kwargs=hparams["train_dataloader_opts"], - valid_loader_kwargs=hparams["valid_dataloader_opts"], + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, ) # Testing - for k in test_datasets.keys(): # keys are eval_clean, eval_other etc - asr_brain.hparams.wer_file = os.path.join( - hparams["output_folder"], "wer_{}.txt".format(k) + if not os.path.exists(hparams["output_wer_folder"]): + os.makedirs(hparams["output_wer_folder"]) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" ) asr_brain.evaluate( test_datasets[k], diff --git a/recipes/Tedlium2/Tokenizer/README.md b/recipes/Tedlium2/Tokenizer/README.md new file mode 100644 index 0000000000..007b719638 --- /dev/null +++ b/recipes/Tedlium2/Tokenizer/README.md @@ -0,0 +1,53 @@ +# Tokenizer. +This folder contains the scripts to train a tokenizer using SentencePiece (https://github.com/google/sentencepiece). +The tokenizer is trained on the top of the Tedlium2 training transcriptions. + +You can download Tedlium2 at https://lium.univ-lemans.fr/ted-lium2/ + + +# How to Run + +To run the training script, follow these steps: + +1. Run the following command, replacing `--data_folder` with the path to your downloaded and unpacked Tedlium2 dataset: + +```python +python train.py hparams/tedlium2_500_bpe.yaml --data_folder=/path/to/TEDLIUM --clipped_utt_folder=/path/where/to/store/clipped/TEDLIUM +``` + +**IMPORTANT**: Please utilize **absolute paths** for both the `data_folder` and the `clipped_utt_folder` because the generated CSV files will be employed in training the ASR model. + + +2. The script will automatically process the dataset and store a modified version of it in the directory specified by `--clipped_utt_folder`. This modified dataset contains recordings split into individual utterances, making it suitable for Automatic Speech Recognition (ASR) training. You can now use this processed dataset for ASR training as described in the `../ASR/README.md` file. + +Make sure to adjust the paths and filenames as needed to match your specific setup and dataset location. + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/Tedlium2/Tokenizer/hparams/tedlium2_500_bpe.yaml b/recipes/Tedlium2/Tokenizer/hparams/tedlium2_500_bpe.yaml new file mode 100644 index 0000000000..945972ae23 --- /dev/null +++ b/recipes/Tedlium2/Tokenizer/hparams/tedlium2_500_bpe.yaml @@ -0,0 +1,31 @@ +# ############################################################################ +# Tokenizer: subword BPE with unigram 500 +# Training: Tedlium2 +# Authors: Abdel Heba 2021 +# Shucong Zhang 2023 +# ############################################################################ + +output_folder: results/tokenizer # folder where to store the BPE ckpt and csv files +clipped_utt_folder: !PLACEHOLDER # folder where to store the clipped utterance-level recordings + +# Data files +data_folder: !PLACEHOLDER # e.g, /path/to/TEDLIUM_release2 +skip_prep: False +train_csv: !ref /train/train.csv +valid_csv: !ref /dev/dev.csv + +####################### Training Parameters #################################### +token_type: bpe # ["unigram", "bpe", "char"] +token_output: 500 # index(blank/eos/bos/unk) = 0 +character_coverage: 1.0 +csv_read: wrd +avoid_if_shorter_than: 1.0 + +tokenizer: !name:speechbrain.tokenizers.SentencePiece.SentencePiece + model_dir: !ref + vocab_size: !ref + annotation_train: !ref + annotation_read: !ref + model_type: !ref # ["unigram", "bpe", "char"] + character_coverage: !ref + annotation_list_to_check: [!ref , !ref ] diff --git a/recipes/Tedlium2/Tokenizer/tedlium2_prepare.py b/recipes/Tedlium2/Tokenizer/tedlium2_prepare.py new file mode 120000 index 0000000000..53047f4d83 --- /dev/null +++ b/recipes/Tedlium2/Tokenizer/tedlium2_prepare.py @@ -0,0 +1 @@ +../tedlium2_prepare.py \ No newline at end of file diff --git a/recipes/Tedlium2/Tokenizer/train.py b/recipes/Tedlium2/Tokenizer/train.py new file mode 100644 index 0000000000..d423910a8a --- /dev/null +++ b/recipes/Tedlium2/Tokenizer/train.py @@ -0,0 +1,66 @@ +#!/usr/bin/env/python3 +"""Recipe for training a BPE tokenizer with Tedlium2. +The tokenizer converts words into sub-word units that can +be used to train a language (LM) or an acoustic model (AM). +When doing a speech recognition experiment you have to make +sure that the acoustic and language models are trained with +the same tokenizer. Otherwise, a token mismatch is introduced +and beamsearch will produce bad results when combining AM and LM. + +To run this recipe, do the following: +> python train.py hyperparams/tedlium2_500_bpe.yaml + +Authors + * Shucong Zhang 2023 +""" + +import shutil +import sys + +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import run_on_main + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + # 1. # Dataset prep (parsing Tedlium2) + from tedlium2_prepare import prepare_tedlium2 # noqa + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_tedlium2, + kwargs={ + "data_folder": hparams["data_folder"], + "utt_save_folder": hparams["clipped_utt_folder"], + "csv_save_folder": hparams["output_folder"], + "skip_prep": hparams["skip_prep"], + "avoid_if_shorter_than": hparams["avoid_if_shorter_than"], + }, + ) + + # Train tokenizer + hparams["tokenizer"]() + + output_path = hparams["output_folder"] + + token_output = hparams["token_output"] + token_type = hparams["token_type"] + bpe_model = f"{output_path}/{token_output}_{token_type}.model" + + tokenizer_ckpt = f"{output_path}/tokenizer.ckpt" + shutil.copyfile(bpe_model, tokenizer_ckpt) diff --git a/recipes/Tedlium2/tedlium2_prepare.py b/recipes/Tedlium2/tedlium2_prepare.py new file mode 100644 index 0000000000..c79f1f7e94 --- /dev/null +++ b/recipes/Tedlium2/tedlium2_prepare.py @@ -0,0 +1,238 @@ +""" +Download link: https://lium.univ-lemans.fr/ted-lium2/ + +Authors + * Shucong Zhang 2023 + * Adel Moumen 2023 +""" + +import csv +import functools +import os + +from speechbrain.dataio import audio_io +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + + +def make_splits(sph_file, stm_file, utt_save_folder, avoid_if_shorter_than): + """ + This function splits the .sph Ted-talk recording into utterances based on the .stm annotation. + + Arguments + --------- + sph_file : str + Path to the sph file containing Ted-talk recording. + stm_file : str + Path to the stm file containing Ted-talk annotation. + utt_save_folder: str + The folder stores the clipped individual utterances. + avoid_if_shorter_than: int + Any utterance shorter than this will be discarded. + + Returns + ------- + entry : list + Loaded entries from file. + """ + # the annotation for JillSobuleMANHATTANINJANUARY_2006.sph is not useful + if "JillSobuleMANHATTANINJANUARY_2006" in sph_file: + logger.info("JillSobuleMANHATTANINJANUARY_2006.sph is skipped") + return + + # load the annotation of the entire speech recording + annotation_file = open(stm_file, encoding="utf-8") + annotations = annotation_file.readlines() + + # load the original speech recording + original_speech, sample_rate = audio_io.load(sph_file) + + entry = [] + + # process the annotation utterance by utterance + for i, line in enumerate(annotations): + line = line.strip("\n") + line = line.split(" ") + # parse the annotation + talk_id = line[0] + spk_id = line[2] + + # start and end point of the utterances in the recording + start = float(line[3]) + end = float(line[4]) + duration = -start + end + # we skip short utterances in case of CNN padding issues + if duration < avoid_if_shorter_than: + continue + + # transcriptions + wrd_list = line[6:] + if wrd_list[-1] == "": + wrd_list = wrd_list[:-1] + transcript = " ".join(wrd_list) + if not transcript[-1].isalpha(): + transcript = transcript[:-1] + transcript = transcript.replace(" 've", "'ve") + transcript = transcript.replace(" 't", "'t") + transcript = transcript.replace(" 'll", "'ll") + transcript = transcript.replace(" 'd", "'d") + transcript = transcript.replace(" 'm", "'m") + transcript = transcript.replace(" 're", "'re") + transcript = transcript.replace(" 's", "'s") + # skip invalid transcriptions + if len(wrd_list) <= 1 or transcript == "ignore_time_segment_in_scoring": + continue + + # clip and save the current utterance + clipped_save_path = os.path.join( + utt_save_folder, talk_id + "-" + str(i) + ".wav" + ) + + # we avoid duplicated clip and save + if not os.path.exists(clipped_save_path): + start = float(line[3]) * sample_rate + end = float(line[4]) * sample_rate + curr_utt = original_speech[:, int(start) : int(end)] + audio_io.save(clipped_save_path, curr_utt, sample_rate) + # append to the csv entry list + csv_line = [ + f"{talk_id}-{str(i)}", + str(duration), + clipped_save_path, + spk_id, + transcript, + ] + entry.append(csv_line) + + return entry + + +def process_line( + talk_sph, avoid_if_shorter_than, utt_save_folder_split, data_folder, split +): + """This function processes a single Ted-talk recording. + + Arguments + --------- + talk_sph : str + The name of the Ted-talk recording. + avoid_if_shorter_than: int + Any utterance shorter than this will be discarded. + utt_save_folder_split: str + The folder stores the clipped individual utterances. + data_folder: str + The folder stores the original Ted-talk recordings. + split: str + The split of the dataset, e.g., train, dev, test. + + Returns + ------- + See ``make_splits`` + """ + talk_name = talk_sph[:-4] + talk_sph_path = os.path.join(data_folder, split, "sph", talk_sph) + talk_stm_path = os.path.join(data_folder, split, "stm", talk_name + ".stm") + + return make_splits( + talk_sph_path, + talk_stm_path, + utt_save_folder_split, + avoid_if_shorter_than, + ) + + +def prepare_tedlium2( + data_folder, + utt_save_folder, + csv_save_folder, + skip_prep=False, + avoid_if_shorter_than=1, +): + """This function prepares the Tedlium2 dataset. + Download link: https://lium.univ-lemans.fr/ted-lium2/ + + Arguments + --------- + data_folder : str + Path to the folder where the original Tedlium2 dataset is stored. + utt_save_folder : list + Path where to save the clipped utterance-level recordings. + csv_save_folder: str + Path where to save the generated .csv files. + skip_prep: bool + If True, data preparation is skipped. + avoid_if_shorter_than: int + Any utterance shorter than this will be discarded. + + Returns + ------- + None + + Example + ------- + >>> data_folder = "datasets/TEDLIUM_release2" + >>> utt_save_folder = "datasets/TEDLIUM_release2_processed" + >>> csv_save_folder = "TEDLIUM2" + >>> prepare_tedlium2(data_folder, utt_save_folder, csv_save_folder) + """ + if skip_prep: + return + + splits = [ + "train", + "dev", + "test", + ] + + for split in splits: + utt_save_folder_split = os.path.join(utt_save_folder, split) + csv_save_folder_split = os.path.join(csv_save_folder, split) + os.makedirs(utt_save_folder_split, exist_ok=True) + os.makedirs(csv_save_folder_split, exist_ok=True) + new_filename = os.path.join(csv_save_folder_split, split + ".csv") + if os.path.exists(new_filename): + continue + logger.info("Preparing %s..." % new_filename) + data_folder_split = os.path.join(data_folder, split) + talk_sphs = os.listdir(os.path.join(data_folder_split, "sph")) + + line_processor = functools.partial( + process_line, + avoid_if_shorter_than=avoid_if_shorter_than, + utt_save_folder_split=utt_save_folder_split, + data_folder=data_folder, + split=split, + ) + + tmp_csv = os.path.join(csv_save_folder_split, split + ".tmp") + final_csv = os.path.join(csv_save_folder_split, split + ".csv") + total_line = 0 + total_duration = 0 + with open(tmp_csv, mode="w", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + csv_writer.writerow(["ID", "duration", "wav", "spk_id", "wrd"]) + for row in parallel_map(line_processor, talk_sphs): + if row is None: + continue + + for line in row: + csv_writer.writerow(line) + total_duration += float(line[1]) + total_line += len(row) + + os.replace(tmp_csv, final_csv) + + msg = "\t%s successfully created!" % (new_filename) + logger.info(msg) + + msg = f"Number of samples: {total_line} " + logger.info(msg) + msg = "Total duration: %s Hours" % ( + str(round(total_duration / 3600, 2)) + ) + logger.info(msg) diff --git a/recipes/UrbanSound8k/README.md b/recipes/UrbanSound8k/README.md index bc7b0e291f..1e40744148 100644 --- a/recipes/UrbanSound8k/README.md +++ b/recipes/UrbanSound8k/README.md @@ -19,6 +19,15 @@ UrbanSound8k is divided into 10 classes, one of which (engine_idling) receives s 9 = car_horn ``` +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + + # Multiclass Classification Run the following command to train using the ECAPA-TDNN network architecture: @@ -83,12 +92,12 @@ Per Class Accuracy: [ 3 1 4 2 0 1 3 6 79 1] [ 1 0 0 0 0 0 0 0 1 31]] -Please, take a look [here](https://drive.google.com/drive/folders/1sItfg_WNuGX6h2dCs8JTGq2v2QoNTaUg?usp=sharing) for the full experiment folder (with pre-trained models). +Please, take a look [here](https://www.dropbox.com/sh/f61325e3w8h5yy2/AADm3E3PXFi1NYA7-QW3H-Ata?dl=0) for the full experiment folder (with pre-trained models). Classification performance and f-scores are output to the console and log file for each epoch using a passed validation set, and after training using the passed test set. -The default hyperparameter settings will output Tensorboard logs to `/tb_logs/` and can be viewed simply using: +The default hyperparameter settings will output torch.Tensorboard logs to `/tb_logs/` and can be viewed simply using: `tensorboard --logdir=/tb_logs/` @@ -97,7 +106,7 @@ The default hyperparameter settings will output Tensorboard logs to `] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Set up folders for reading from and writing to # Dataset must already exist at `audio_data_folder` data_folder: !PLACEHOLDER # e.g., /localscratch/UrbanSound8K -open_rir_folder: /RIRS # Change if needed +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. audio_data_folder: !ref /audio -# TODO the follwing folder will contain the resampled audio files (mono channel and config SR) to train on -#reasmpled_audio_data_folder: !ref /audio_mono16kHz +# TODO the following folder will contain the resampled audio files (mono channel and config SR) to train on +# resampled_audio_data_folder: !ref /audio_mono16kHz output_folder: !ref ./results/urban_sound/ save_folder: !ref /save train_log: !ref /train_log.txt -# Tensorboard logs +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 + + +# torch.Tensorboard logs use_tensorboard: True tensorboard_logs_folder: !ref /tb_logs/ # Path where data manifest files will be stored -train_annotation: !ref /manifest/train.json -valid_annotation: !ref /manifest/valid.json -test_annotation: !ref /manifest/test.json +train_annotation: !ref /manifest/train.json +valid_annotation: !ref /manifest/valid.json +test_annotation: !ref /manifest/test.json +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv # To standardize results, UrbanSound8k has pre-separated samples into # 10 folds for multi-fold validation @@ -40,7 +48,7 @@ skip_manifest_creation: False ckpt_interval_minutes: 15 # save checkpoint every N min -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 25 batch_size: 32 lr: 0.001 @@ -66,10 +74,11 @@ out_n_neurons: 10 # because this does not mix samples from folds in train to valid/test, only # within train or valid, or test shuffle: True +num_workers: 4 dataloader_options: batch_size: !ref shuffle: !ref - num_workers: 0 + num_workers: !ref # Functions compute_features: !new:speechbrain.lobes.features.Fbank @@ -94,55 +103,88 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augment_wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [100] - -augment_speed: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -add_rev: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 1.0 - noise_prob: 0.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_rev_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 1.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - - -# Definition of the augmentation pipeline. -# If concat_augment = False, the augmentation techniques are applied -# in sequence. If concat_augment = True, all the augmented signals -# # are concatenated in a single big batch. - -augment_pipeline: [ - #!ref , - #!ref , - #!ref , - #!ref , - #!ref -] -concat_augment: True +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Add noise to input signal +snr_low: 0 # Min SNR for noise augmentation +snr_high: 15 # Max SNR for noise augmentation + +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: !ref + snr_high: !ref + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: True + concat_original: True + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref , + !ref ] mean_var_norm: !new:speechbrain.processing.features.InputNormalization norm_type: sentence @@ -150,11 +192,6 @@ mean_var_norm: !new:speechbrain.processing.features.InputNormalization modules: compute_features: !ref - augment_wavedrop: !ref - augment_speed: !ref - add_rev: !ref - add_noise: !ref - add_rev_noise: !ref embedding_model: !ref classifier: !ref mean_var_norm: !ref diff --git a/recipes/UrbanSound8k/SoundClassification/train.py b/recipes/UrbanSound8k/SoundClassification/train.py index 806e667d8c..06bc5f0b0f 100755 --- a/recipes/UrbanSound8k/SoundClassification/train.py +++ b/recipes/UrbanSound8k/SoundClassification/train.py @@ -18,22 +18,25 @@ * Hwidong Na 2020 * Nauman Dawalatabad 2020 """ + import os import sys + +import numpy as np import torch import torchaudio -import speechbrain as sb +from confusion_matrix_fig import create_cm_fig from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main -from urbansound8k_prepare import prepare_urban_sound_8k from sklearn.metrics import confusion_matrix -import numpy as np -from confusion_matrix_fig import create_cm_fig +from urbansound8k_prepare import prepare_urban_sound_8k + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.distributed import run_on_main class UrbanSound8kBrain(sb.core.Brain): - """Class for sound class embedding training" - """ + """Class for sound class embedding training""" def compute_forward(self, batch, stage): """Computation pipeline based on a encoder + sound classifier. @@ -43,33 +46,9 @@ def compute_forward(self, batch, stage): batch = batch.to(self.device) wavs, lens = batch.sig - if stage == sb.Stage.TRAIN: - - # Applying the augmentation pipeline - wavs_aug_tot = [] - wavs_aug_tot.append(wavs) - for count, augment in enumerate(self.hparams.augment_pipeline): - - # Apply augment - wavs_aug = augment(wavs, lens) - - # Managing speed change - if wavs_aug.shape[1] > wavs.shape[1]: - wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] - else: - zero_sig = torch.zeros_like(wavs) - zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug - wavs_aug = zero_sig - - if self.hparams.concat_augment: - wavs_aug_tot.append(wavs_aug) - else: - wavs = wavs_aug - wavs_aug_tot[0] = wavs - - wavs = torch.cat(wavs_aug_tot, dim=0) - self.n_augment = len(wavs_aug_tot) - lens = torch.cat([lens] * self.n_augment) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, lens = self.hparams.wav_augment(wavs, lens) # Feature extraction and normalization feats = self.modules.compute_features(wavs) @@ -91,15 +70,15 @@ def compute_forward(self, batch, stage): return outputs, lens def compute_objectives(self, predictions, batch, stage): - """Computes the loss using class-id as label. - """ + """Computes the loss using class-id as label.""" predictions, lens = predictions uttid = batch.id classid, _ = batch.class_string_encoded # Concatenate labels (due to data augmentation) - if stage == sb.Stage.TRAIN: - classid = torch.cat([classid] * self.n_augment, dim=0) + # Concatenate labels (due to data augmentation) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + classid = self.hparams.wav_augment.replicate_labels(classid) loss = self.hparams.compute_cost(predictions, classid, lens) @@ -117,19 +96,19 @@ def compute_objectives(self, predictions, batch, stage): y_pred = predictions.cpu().detach().numpy().argmax(-1).squeeze(-1) if stage == sb.Stage.VALID: - confusion_matix = confusion_matrix( + my_confusion_matrix = confusion_matrix( y_true, y_pred, labels=sorted(self.hparams.label_encoder.ind2lab.keys()), ) - self.valid_confusion_matrix += confusion_matix + self.valid_confusion_matrix += my_confusion_matrix if stage == sb.Stage.TEST: - confusion_matix = confusion_matrix( + my_confusion_matrix = confusion_matrix( y_true, y_pred, labels=sorted(self.hparams.label_encoder.ind2lab.keys()), ) - self.test_confusion_matrix += confusion_matix + self.test_confusion_matrix += my_confusion_matrix # Compute Accuracy using MetricStats self.acc_metric.append( @@ -233,7 +212,7 @@ def on_stage_end(self, stage, stage_loss, epoch=None): old_lr, new_lr = self.hparams.lr_annealing(epoch) sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) - # Tensorboard logging + # torch.Tensorboard logging if self.hparams.use_tensorboard: self.hparams.tensorboard_train_logger.log_stats( stats_meta={"Epoch": epoch}, @@ -256,9 +235,7 @@ def on_stage_end(self, stage, stage_loss, epoch=None): self.valid_confusion_matrix, axis=1 ) per_class_acc_arr_str = "\n" + "\n".join( - "{:}: {:.3f}".format( - self.hparams.label_encoder.decode_ndim(class_id), class_acc - ) + f"{self.hparams.label_encoder.decode_ndim(class_id)}: {class_acc:.3f}" for class_id, class_acc in enumerate(per_class_acc_arr) ) @@ -280,7 +257,7 @@ def on_stage_end(self, stage, stage_loss, epoch=None): self.test_confusion_matrix, axis=1 ) per_class_acc_arr_str = "\n" + "\n".join( - "{:}: {:.3f}".format(class_id, class_acc) + f"{class_id}: {class_acc:.3f}" for class_id, class_acc in enumerate(per_class_acc_arr) ) @@ -288,9 +265,7 @@ def on_stage_end(self, stage, stage_loss, epoch=None): { "Epoch loaded": self.hparams.epoch_counter.current, "\n Per Class Accuracy": per_class_acc_arr_str, - "\n Confusion Matrix": "\n{:}\n".format( - self.test_confusion_matrix - ), + "\n Confusion Matrix": f"\n{self.test_confusion_matrix}\n", }, test_stats=test_stats, ) @@ -315,9 +290,9 @@ def audio_pipeline(wav, fold): """Load the signal, and pass it and its length to the corruption class. This is done on the CPU in the `collate_fn`.""" - wave_file = data_audio_folder + "/fold{:}/{:}".format(fold, wav) + wave_file = data_audio_folder + f"/fold{fold}/{wav}" - sig, read_sr = torchaudio.load(wave_file) + sig, read_sr = audio_io.load(wave_file) # If multi-channels, downmix it to a mono channel sig = torch.squeeze(sig) @@ -362,7 +337,7 @@ def label_pipeline(class_string): # Load or compute the label encoder (with multi-GPU DDP support) # Please, take a look into the lab_enc_file to see the label to index - # mappinng. + # mapping. lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") label_encoder.load_or_create( path=lab_enc_file, @@ -374,7 +349,6 @@ def label_pipeline(class_string): if __name__ == "__main__": - # This flag enables the inbuilt cudnn auto-tuner torch.backends.cudnn.benchmark = True @@ -385,7 +359,7 @@ def label_pipeline(class_string): sb.utils.distributed.ddp_init_group(run_opts) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -395,7 +369,7 @@ def label_pipeline(class_string): overrides=overrides, ) - # Tensorboard logging + # torch.Tensorboard logging if hparams["use_tensorboard"]: from speechbrain.utils.train_logger import TensorboardLogger @@ -417,6 +391,8 @@ def label_pipeline(class_string): "skip_manifest_creation": hparams["skip_manifest_creation"], }, ) + sb.utils.distributed.run_on_main(hparams["prepare_noise_data"]) + sb.utils.distributed.run_on_main(hparams["prepare_rir_data"]) # Dataset IO prep: creating Dataset objects and proper encodings for phones datasets, label_encoder = dataio_prep(hparams) diff --git a/recipes/UrbanSound8k/urbansound8k_prepare.py b/recipes/UrbanSound8k/urbansound8k_prepare.py index cd86981669..41fa325eb3 100644 --- a/recipes/UrbanSound8k/urbansound8k_prepare.py +++ b/recipes/UrbanSound8k/urbansound8k_prepare.py @@ -38,15 +38,17 @@ * David Whipps, 2021 """ -import os import json -import logging import ntpath +import os + import torchaudio -from speechbrain.dataio.dataio import read_audio -from speechbrain.dataio.dataio import load_data_csv -logger = logging.getLogger(__name__) +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_data_csv, read_audio +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) URBAN_SOUND_8K_DOWNLOAD_FORM_URL = ( "https://urbansounddataset.weebly.com/download-urbansound8k.html" @@ -70,6 +72,7 @@ def prepare_urban_sound_8k( """ Prepares the json files for the UrbanSound8k dataset. Prompts to download the dataset if it is not found in the `data_folder`. + Arguments --------- data_folder : str @@ -82,19 +85,34 @@ def prepare_urban_sound_8k( Path where the validation data specification file will be saved. save_json_test : str Path where the test data specification file will be saved. - train_folds: list or int (integers [1,10]) + train_fold_nums : list or int (integers [1,10]) A list of integers defining which pre-defined "folds" to use for training. Must be exclusive of valid_folds and test_folds. - valid_folds: list or int (integers [1,10]) + valid_fold_nums : list or int (integers [1,10]) A list of integers defining which pre-defined "folds" to use for validation. Must be exclusive of train_folds and test_folds. - test_folds: list or int (integers [1,10]) + test_fold_nums : list or int (integers [1,10]) A list of integers defining which pre-defined "folds" to use for test. Must be exclusive of train_folds and valid_folds. + skip_manifest_creation : bool + Whether to skip over creation of the manifest files. + + Returns + ------- + None + Example ------- - >>> data_folder = '/path/to/UrbanSound8k' - >>> prepare_urban_sound_8k(data_folder, 'train.json', 'valid.json', 'test.json', [1,2,3,4,5,6,7,8], [9], [10]) + >>> data_folder = "/path/to/UrbanSound8k" + >>> prepare_urban_sound_8k( + ... data_folder, + ... "train.json", + ... "valid.json", + ... "test.json", + ... [1, 2, 3, 4, 5, 6, 7, 8], + ... [9], + ... [10], + ... ) """ # Tease params to correct type if necessary @@ -166,8 +184,8 @@ def prepare_urban_sound_8k( os.path.abspath(data_folder), "metadata/", MODIFIED_METADATA_FILE_NAME ) if not os.path.exists(urban_sound_8k_speechbrain_metadata_csv_path): - urban_sound_8k_speechbrain_metadata_csv_path = create_metadata_speechbrain_file( - data_folder + urban_sound_8k_speechbrain_metadata_csv_path = ( + create_metadata_speechbrain_file(data_folder) ) # TODO: If it does not exist, we create it, but next step will certainly fail? @@ -189,11 +207,14 @@ def prepare_urban_sound_8k( def create_json(metadata, audio_data_folder, folds_list, json_file): """ Creates the json file given a list of wav files. + Arguments --------- - metadata: dict + metadata : dict A dictionary containing the UrbanSound8k metadata file modified for the SpeechBrain, such that keys are IDs (which are the .wav file names without the file extension). + audio_data_folder : str + Path to the folder containing audio data folds_list : list of int The list of folds [1,10] to include in this batch json_file : str @@ -212,9 +233,8 @@ def create_json(metadata, audio_data_folder, folds_list, json_file): sample_metadata["slice_file_name"], ) try: - signal = read_audio(wav_file) - file_info = torchaudio.info(wav_file) + file_info = audio_io.info(wav_file) # If we're using sox/soundfile backend, file_info will have the old type if isinstance( @@ -247,7 +267,7 @@ def create_json(metadata, audio_data_folder, folds_list, json_file): if not os.path.exists(parent_dir): os.mkdir(parent_dir) - with open(json_file, mode="w") as json_f: + with open(json_file, mode="w", encoding="utf-8") as json_f: json.dump(json_dict, json_f, indent=2) logger.info(f"{json_file} successfully created!") @@ -255,7 +275,7 @@ def create_json(metadata, audio_data_folder, folds_list, json_file): def folds_overlap(list1, list2): """Returns True if any passed lists has incorrect type OR has items in common.""" - if (type(list1) != list) or (type(list2) != list): + if not isinstance(list1, list) or not isinstance(list2, list): return True if any(item in list1 for item in list2): return True @@ -272,14 +292,18 @@ def check_folders(*folders): def full_path_to_audio_file(data_folder, slice_file_name, fold_num): """Get path to file given slice file name and fold number + Arguments --------- + data_folder : str + Path to folder containing data. slice_file_name : str Filename. fold_num : int Fold number. + Returns - ------ + ------- string containing absolute path to corresponding file """ return os.path.join( @@ -292,12 +316,14 @@ def full_path_to_audio_file(data_folder, slice_file_name, fold_num): def create_metadata_speechbrain_file(data_folder): """Get path to file given slice file name and fold number + Arguments --------- data_folder : str UrbanSound8k data folder. + Returns - ------ + ------- string containing absolute path to metadata csv file modified for SpeechBrain or None if source file not found """ import pandas as pd @@ -333,14 +359,16 @@ def path_leaf(path): def removesuffix(somestring, suffix): """Removed a suffix from a string + Arguments --------- somestring : str Any string. suffix : str Suffix to be removed from somestring. + Returns - ------ + ------- string resulting from suffix removed from somestring, if found, unchanged otherwise """ if somestring.endswith(suffix): @@ -351,15 +379,14 @@ def removesuffix(somestring, suffix): def prompt_download_urban_sound_8k(destination): """Prompt to download dataset + Arguments --------- destination : str Place to put dataset. """ print( - "UrbanSound8k data is missing from {}!\nRequest it from here: {}".format( - destination, URBAN_SOUND_8K_DOWNLOAD_FORM_URL - ) + f"UrbanSound8k data is missing from {destination}!\nRequest it from here: {URBAN_SOUND_8K_DOWNLOAD_FORM_URL}" ) diff --git a/recipes/Voicebank/ASR/CTC/README.md b/recipes/Voicebank/ASR/CTC/README.md index eb4157f43b..14c70fd600 100644 --- a/recipes/Voicebank/ASR/CTC/README.md +++ b/recipes/Voicebank/ASR/CTC/README.md @@ -8,16 +8,20 @@ download and resample the dataset. ## How to run ```bash -python train.py hparams/train.yaml +python train.py hparams/train.yaml --data_folder=your/data/folder --jit ``` +**Note on Compilation**: +Enabling the just-in-time (JIT) compiler significantly improves code performance, resulting in a 50-60% speed boost. We highly recommend utilizing the JIT compiler for optimal results. +This speed improvement is observed specifically when using the CRDNN model. + ## Results | Release | hyperparams file | input type | Test PER | Model link | GPUs | |:--------:|:----------------:|:-----------:|:--------:|:-------------:|:-----------:| | 21-02-09 | train.yaml | `clean_wav` | 10.12 | Not Available | 1xV100 32GB | -You can find the output folders with the training logs and checkpoints [here](https://drive.google.com/drive/folders/1diFVwth-MKKeNPJFwRdU9ItiFrupddKk?usp=sharing) +You can find the output folders with the training logs and checkpoints [here](https://www.dropbox.com/sh/w4j0auezgmmo005/AAAjKcoJMdLDp0Pqe3m7CLVaa?dl=0) ## Training Time @@ -33,6 +37,15 @@ About 4 mins for each epoch with a TESLA V100. Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/ASR/CTC/hparams/train.yaml b/recipes/Voicebank/ASR/CTC/hparams/train.yaml index c76c1d7453..330b7167bd 100644 --- a/recipes/Voicebank/ASR/CTC/hparams/train.yaml +++ b/recipes/Voicebank/ASR/CTC/hparams/train.yaml @@ -6,7 +6,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1236 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] input_type: clean_wav output_folder: !ref results//phn/ per_file: !ref /per.txt @@ -20,13 +20,14 @@ valid_annotation: !ref /valid.json test_annotation: !ref /test.json skip_prep: False # Skip data preparation -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 50 batch_size: 8 sorting: ascending dataloader_options: batch_size: !ref -lr: 1.0 +lr: 0.5 +max_grad_norm: 5.0 # Set this to the path of a pretrained model to load before training # pretrained: model_clean_ep3.ckpt @@ -36,7 +37,7 @@ sample_rate: 16000 n_fft: 400 n_mels: 40 -# Model parameters +####################### Model Parameters ####################################### activation: !name:torch.nn.LeakyReLU dropout: 0.15 cnn_blocks: 2 @@ -60,10 +61,41 @@ compute_features: !new:speechbrain.lobes.features.Fbank n_fft: !ref n_mels: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +############################## Models ########################################## + model: !new:speechbrain.lobes.models.CRDNN.CRDNN input_shape: [null, null, !ref ] activation: !ref @@ -104,7 +136,6 @@ modules: model: !ref output: !ref normalize: !ref - augmentation: !ref jit_module_keys: [model] diff --git a/recipes/Voicebank/ASR/CTC/train.py b/recipes/Voicebank/ASR/CTC/train.py index 1d361ea126..3c774190f5 100644 --- a/recipes/Voicebank/ASR/CTC/train.py +++ b/recipes/Voicebank/ASR/CTC/train.py @@ -12,12 +12,15 @@ Authors * Peter Plantinga 2020 """ + import os import sys + import torch +from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb from speechbrain.utils.distributed import run_on_main -from hyperpyyaml import load_hyperpyyaml # Define training procedure @@ -26,7 +29,9 @@ def compute_forward(self, batch, stage): "Given an input batch it computes the phoneme probabilities." batch = batch.to(self.device) wavs, wav_lens = batch.sig - wavs = self.modules.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) feats = self.hparams.compute_features(wavs) feats = self.modules.normalize(feats, wav_lens) out = self.modules.model(feats) @@ -39,6 +44,10 @@ def compute_objectives(self, predictions, batch, stage): "Given the network predictions and targets computed the CTC loss." pout, pout_lens = predictions phns, phn_lens = batch.phn_encoded + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + phns = self.hparams.wav_augment.replicate_labels(phns) + phn_lens = self.hparams.wav_augment.replicate_labels(phn_lens) loss = self.hparams.compute_cost(pout, phns, pout_lens, phn_lens) self.ctc_metrics.append(batch.id, pout, phns, pout_lens, phn_lens) @@ -79,7 +88,7 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats={"loss": stage_loss, "PER": per}, ) self.checkpointer.save_and_keep_only( - meta={"PER": per}, min_keys=["PER"], + meta={"PER": per}, min_keys=["PER"] ) elif stage == sb.Stage.TEST: @@ -87,7 +96,7 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats={"loss": stage_loss, "PER": per}, ) - with open(self.hparams.per_file, "w") as w: + with open(self.hparams.per_file, "w", encoding="utf-8") as w: w.write("CTC loss stats:\n") self.ctc_metrics.write_stats(w) w.write("\nPER stats:\n") @@ -134,7 +143,7 @@ def text_pipeline(phones): # Sort train dataset and ensure it doesn't get un-sorted if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending": data["train"] = data["train"].filtered_sorted( - sort_key="length", reverse=hparams["sorting"] == "descending", + sort_key="length", reverse=hparams["sorting"] == "descending" ) hparams["dataloader_options"]["shuffle"] = False elif hparams["sorting"] != "random": @@ -158,10 +167,9 @@ def text_pipeline(phones): # Begin Recipe! if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) diff --git a/recipes/Voicebank/MTL/ASR_enhance/README.md b/recipes/Voicebank/MTL/ASR_enhance/README.md index b4bbc8a0e5..e670fb95e8 100644 --- a/recipes/Voicebank/MTL/ASR_enhance/README.md +++ b/recipes/Voicebank/MTL/ASR_enhance/README.md @@ -18,6 +18,15 @@ but maintains the advantages of interpretability and independence, since each model can be used for other data or tasks without requiring the co-trained model. +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +## How to run To train these models from scratch, you can run these three steps using the following commands: @@ -71,7 +80,7 @@ You can find the pre-trained model with an easy-inference function on HuggingFac https://huggingface.co/speechbrain/mtl-mimic-voicebank You can find the full experiment folder (i.e., checkpoints, logs, etc) here: -https://drive.google.com/drive/folders/1vSpQ5UREiBbTxNUjJjEpSYO8rLTPvQW_?usp=sharing +https://www.dropbox.com/sh/azvcbvu8g5hpgm1/AACDc6QxtNMGZ3IoZLrDiU0Va?dl=0 ## References @@ -93,6 +102,15 @@ https://drive.google.com/drive/folders/1vSpQ5UREiBbTxNUjJjEpSYO8rLTPvQW_?usp=sha Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/MTL/ASR_enhance/composite_eval.py b/recipes/Voicebank/MTL/ASR_enhance/composite_eval.py index 721c660ed8..03d29c8834 100644 --- a/recipes/Voicebank/MTL/ASR_enhance/composite_eval.py +++ b/recipes/Voicebank/MTL/ASR_enhance/composite_eval.py @@ -5,14 +5,16 @@ Authors * adiyoss (https://github.com/adiyoss) """ -from scipy.linalg import toeplitz -from tqdm import tqdm -from pesq import pesq -import librosa -import numpy as np + import os import sys +import librosa +import numpy as np +from pesq import pesq +from scipy.linalg import toeplitz +from tqdm import tqdm + def eval_composite(ref_wav, deg_wav): ref_wav = ref_wav.reshape(-1) @@ -95,6 +97,7 @@ def lpcoeff(speech_frame, model_order): # -------------------------------------------------------------------------- # + # ---------------------- Speech Quality Metric ----------------------------- # def PESQ(ref_wav, deg_wav): rate = 16000 @@ -102,9 +105,9 @@ def PESQ(ref_wav, deg_wav): def SSNR(ref_wav, deg_wav, srate=16000, eps=1e-10): - """ Segmental Signal-to-Noise Ratio Objective Speech Quality Measure - This function implements the segmental signal-to-noise ratio - as defined in [1, p. 45] (see Equation 2.12). + """Segmental Signal-to-Noise Ratio Objective Speech Quality Measure + This function implements the segmental signal-to-noise ratio + as defined in [1, p. 45] (see Equation 2.12). """ clean_speech = ref_wav processed_speech = deg_wav @@ -119,9 +122,7 @@ def SSNR(ref_wav, deg_wav, srate=16000, eps=1e-10): # Signal-to-Noise Ratio dif = ref_wav - deg_wav - overall_snr = 10 * np.log10( - np.sum(ref_wav ** 2) / (np.sum(dif ** 2) + 10e-20) - ) + overall_snr = 10 * np.log10(np.sum(ref_wav**2) / (np.sum(dif**2) + 10e-20)) # global variables winlength = int(np.round(30 * srate / 1000)) # 30 msecs skiprate = winlength // 4 @@ -144,7 +145,7 @@ def SSNR(ref_wav, deg_wav, srate=16000, eps=1e-10): processed_frame = processed_frame * window # (2) Compute Segmental SNR - signal_energy = np.sum(clean_frame ** 2) + signal_energy = np.sum(clean_frame**2) noise_energy = np.sum((clean_frame - processed_frame) ** 2) segmental_snr.append( 10 * np.log10(signal_energy / (noise_energy + eps) + eps) @@ -257,14 +258,14 @@ def wss(ref_wav, deg_wav, srate): distortion = [] for frame_count in range(num_frames): - # (1) Get the Frames for the test and reference speeech. + # (1) Get the Frames for the test and reference speech. # Multiply by Hanning window. clean_frame = clean_speech[start : start + winlength] processed_frame = processed_speech[start : start + winlength] clean_frame = clean_frame * window processed_frame = processed_frame * window - # (2) Compuet Power Spectrum of clean and processed + # (2) Compute Power Spectrum of clean and processed clean_spec = np.abs(np.fft.fft(clean_frame, n_fft)) ** 2 processed_spec = np.abs(np.fft.fft(processed_frame, n_fft)) ** 2 clean_energy = [None] * num_crit @@ -320,8 +321,8 @@ def wss(ref_wav, deg_wav, srate): n -= 1 processed_loc_peak.append(processed_energy[n + 1]) - # (6) Compuet the WSS Measure for this frame. This includes - # determination of the weighting functino + # (6) Compute the WSS Measure for this frame. This includes + # determination of the weighting function dBMax_clean = max(clean_energy) dBMax_processed = max(processed_energy) @@ -359,7 +360,7 @@ def wss(ref_wav, deg_wav, srate): ) # this normalization is not part of Klatt's paper, but helps - # to normalize the meaasure. Here we scale the measure by the sum of the + # to normalize the measure. Here we scale the measure by the sum of the # weights distortion[frame_count] = distortion[frame_count] / np.sum(W) start += int(skiprate) @@ -389,7 +390,7 @@ def llr(ref_wav, deg_wav, srate): distortion = [] for frame_count in range(num_frames): - # (1) Get the Frames for the test and reference speeech. + # (1) Get the Frames for the test and reference speech. # Multiply by Hanning window. clean_frame = clean_speech[start : start + winlength] processed_frame = processed_speech[start : start + winlength] @@ -436,4 +437,4 @@ def llr(ref_wav, deg_wav, srate): cbak += res["cbak"] covl += res["covl"] count += 1 - print(f"CSIG: {csig/count}, CBAK: {cbak/count}, COVL: {covl/count}") + print(f"CSIG: {csig / count}, CBAK: {cbak / count}, COVL: {covl / count}") diff --git a/recipes/Voicebank/MTL/ASR_enhance/hparams/enhance_mimic.yaml b/recipes/Voicebank/MTL/ASR_enhance/hparams/enhance_mimic.yaml index 2a96e25db7..36840ed572 100644 --- a/recipes/Voicebank/MTL/ASR_enhance/hparams/enhance_mimic.yaml +++ b/recipes/Voicebank/MTL/ASR_enhance/hparams/enhance_mimic.yaml @@ -5,7 +5,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1260 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/enhance_model/ stats_file: !ref /stats.txt save_folder: !ref /save @@ -18,7 +18,7 @@ valid_annotation: !ref /valid.json test_annotation: !ref /test.json skip_prep: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 50 batch_size: 8 lr: 0.0001 diff --git a/recipes/Voicebank/MTL/ASR_enhance/hparams/pretrain_perceptual.yaml b/recipes/Voicebank/MTL/ASR_enhance/hparams/pretrain_perceptual.yaml index a86fbc4ccf..3f8d16e87c 100644 --- a/recipes/Voicebank/MTL/ASR_enhance/hparams/pretrain_perceptual.yaml +++ b/recipes/Voicebank/MTL/ASR_enhance/hparams/pretrain_perceptual.yaml @@ -5,7 +5,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1288 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/perceptual_model/ stats_file: !ref /stats.txt save_folder: !ref /save @@ -18,7 +18,7 @@ valid_annotation: !ref /valid.json test_annotation: !ref /test.json skip_prep: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 20 ctc_epochs: 4 batch_size: 8 diff --git a/recipes/Voicebank/MTL/ASR_enhance/hparams/robust_asr.yaml b/recipes/Voicebank/MTL/ASR_enhance/hparams/robust_asr.yaml index 8eec433834..035f9f48f6 100644 --- a/recipes/Voicebank/MTL/ASR_enhance/hparams/robust_asr.yaml +++ b/recipes/Voicebank/MTL/ASR_enhance/hparams/robust_asr.yaml @@ -6,21 +6,25 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1428 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/robust_asr/ stats_file: !ref /stats.txt save_folder: !ref /save train_log: !ref /train_log.txt +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 + # Data files data_folder: !PLACEHOLDER # e.g. /path/to/Voicebank -data_folder_rirs: !ref +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. train_annotation: !ref /train.json valid_annotation: !ref /valid.json test_annotation: !ref /test.json +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script skip_prep: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 30 ctc_epochs: 0 batch_size: 8 @@ -31,13 +35,18 @@ checkpoint_avg: 5 # average this many checkpoints for eval sorting: ascending eval_max_key: null eval_min_key: null + +num_workers: 4 train_loader_options: batch_size: !ref + num_workers: !ref valid_loader_options: batch_size: !ref + num_workers: !ref shuffle: False test_loader_options: batch_size: !ref + num_workers: !ref shuffle: False epochs_before_lr_drop: 3 @@ -132,24 +141,78 @@ compute_stft: !new:speechbrain.processing.features.STFT spectral_magnitude: !name:speechbrain.processing.features.spectral_magnitude power: 0.5 -env_corr: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] -augment: !new:speechbrain.lobes.augment.TimeDomainSpecAugment fbank: !new:speechbrain.lobes.features.Fbank n_mels: !ref sample_rate: !ref -beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearchLM +coverage_scorer: !new:speechbrain.decoders.scorer.CoverageScorer + vocab_size: !ref + +rnnlm_scorer: !new:speechbrain.decoders.scorer.RNNLMScorer + language_model: !ref + temperature: !ref + +scorer: !new:speechbrain.decoders.scorer.ScorerBuilder + full_scorers: [!ref , + !ref ] + weights: + rnnlm: !ref + coverage: !ref + +beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher embedding: !ref decoder: !ref linear: !ref - language_model: !ref bos_index: !ref eos_index: !ref min_decode_ratio: !ref @@ -158,10 +221,8 @@ beam_searcher: !new:speechbrain.decoders.seq2seq.S2SRNNBeamSearchLM eos_threshold: !ref using_max_attn_shift: !ref max_attn_shift: !ref - coverage_penalty: !ref - lm_weight: !ref temperature: !ref - temperature_lm: !ref + scorer: !ref opt_class: !name:torch.optim.AdamW lr: !ref diff --git a/recipes/Voicebank/MTL/ASR_enhance/train.py b/recipes/Voicebank/MTL/ASR_enhance/train.py index 8b9703ee8d..ce64a6f868 100644 --- a/recipes/Voicebank/MTL/ASR_enhance/train.py +++ b/recipes/Voicebank/MTL/ASR_enhance/train.py @@ -16,22 +16,28 @@ Authors * Peter Plantinga 2020, 2021 """ + import os import sys + import torch -import torchaudio -import speechbrain as sb -from pesq import pesq -from pystoi import stoi from composite_eval import eval_composite from hyperpyyaml import load_hyperpyyaml +from pesq import pesq +from pystoi import stoi + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import undo_padding -from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) def pesq_eval(pred_wav, target_wav): return pesq( - fs=16000, ref=target_wav.numpy(), deg=pred_wav.numpy(), mode="wb", + fs=16000, ref=target_wav.numpy(), deg=pred_wav.numpy(), mode="wb" ) @@ -74,7 +80,7 @@ def compute_forward(self, batch, stage): predictions = {} if self.hparams.enhance_type is not None: - noisy_wavs, lens = self.prepare_wavs(batch.noisy_sig) + noisy_wavs, lens = self.prepare_wavs(batch.noisy_sig, stage) # Mask with "signal approximation (SA)" if self.hparams.enhance_type == "masking": @@ -89,14 +95,13 @@ def compute_forward(self, batch, stage): # Generate clean features for ASR pre-training if self.hparams.ctc_type == "clean" or self.hparams.seq_type == "clean": - clean_wavs, lens = self.prepare_wavs(batch.clean_sig) + clean_wavs, lens = self.prepare_wavs(batch.clean_sig, stage) clean_feats = self.prepare_feats(clean_wavs) # Compute seq outputs if self.hparams.seq_type is not None: - # Prepare target inputs - tokens, token_lens = self.prepare_targets(batch.tokens_bos) + tokens, token_lens = self.prepare_targets(batch.tokens_bos, stage) tokens = self.modules.tgt_embedding(tokens) if self.hparams.seq_type == "clean": @@ -105,8 +110,6 @@ def compute_forward(self, batch, stage): embed = self.modules.src_embedding(clean_feats) if self.hparams.seq_type == "joint": asr_feats = predictions["wavs"] - if stage == sb.Stage.TRAIN: - asr_feats = self.hparams.augment(asr_feats, lens) asr_feats = self.hparams.fbank(asr_feats) asr_feats = self.hparams.normalizer(asr_feats, lens) embed = self.modules.src_embedding(asr_feats) @@ -119,9 +122,10 @@ def compute_forward(self, batch, stage): predictions["ctc_pout"] = torch.log_softmax(out, dim=-1) if stage != sb.Stage.TRAIN: - predictions["hyps"], _ = self.hparams.beam_searcher( - embed.detach(), lens - ) + hyps, _, _, _ = self.hparams.beam_searcher(embed.detach(), lens) + + # Convert best hypothesis to list + predictions["hyps"] = hyps elif self.hparams.ctc_type is not None: if self.hparams.ctc_type == "clean": @@ -137,18 +141,12 @@ def compute_forward(self, batch, stage): return predictions - def prepare_wavs(self, signal, augment=True): + def prepare_wavs(self, signal, stage): """Prepare possibly enhanced waveforms""" wavs, wav_lens = signal - - if self.stage == sb.Stage.TRAIN and hasattr(self.hparams, "env_corr"): - if augment: - wavs_noise = self.hparams.env_corr(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - else: - wavs = torch.cat([wavs, wavs], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) return wavs, wav_lens def prepare_feats(self, wavs): @@ -158,13 +156,13 @@ def prepare_feats(self, wavs): feats = torch.log1p(feats) return feats - def prepare_targets(self, tokens): + def prepare_targets(self, tokens, stage): """Prepare target by concatenating self if "env_corr" is used""" tokens, token_lens = tokens - if self.stage == sb.Stage.TRAIN and hasattr(self.hparams, "env_corr"): - tokens = torch.cat([tokens, tokens], dim=0) - token_lens = torch.cat([token_lens, token_lens]) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens = self.hparams.wav_augment.replicate_labels(tokens) + token_lens = self.hparams.wav_augment.replicate_labels(token_lens) return tokens, token_lens @@ -172,7 +170,7 @@ def compute_objectives(self, predictions, batch, stage): """Compute possibly several loss terms: enhance, mimic, ctc, seq""" # Do not augment targets - clean_wavs, lens = self.prepare_wavs(batch.clean_sig, augment=False) + clean_wavs, lens = self.prepare_wavs(batch.clean_sig, stage) loss = 0 # Compute enhancement loss @@ -214,7 +212,7 @@ def compute_objectives(self, predictions, batch, stage): length = int(abs_lens[i]) wav = predictions["wavs"][i, :length].unsqueeze(0) path = os.path.join(self.hparams.enh_dir, uid + ".wav") - torchaudio.save(path, wav.cpu(), sample_rate=16000) + audio_io.save(path, wav.cpu(), sample_rate=16000) # Compute mimic loss if self.hparams.mimic_weight > 0: @@ -237,7 +235,7 @@ def compute_objectives(self, predictions, batch, stage): not hasattr(self.hparams, "ctc_epochs") or self.hparams.epoch_counter.current < self.hparams.ctc_epochs ): - tokens, token_lens = self.prepare_targets(batch.tokens) + tokens, token_lens = self.prepare_targets(batch.tokens, stage) ctc_loss = sb.nnet.losses.ctc_loss( predictions["ctc_pout"], tokens, @@ -263,8 +261,7 @@ def compute_objectives(self, predictions, batch, stage): # Compute nll loss for seq2seq model if self.hparams.seq_weight > 0: - - tokens, token_lens = self.prepare_targets(batch.tokens_eos) + tokens, token_lens = self.prepare_targets(batch.tokens_eos, stage) seq_loss = self.hparams.seq_loss( predictions["seq_pout"], tokens, token_lens ) @@ -297,7 +294,6 @@ def compute_objectives(self, predictions, batch, stage): def on_stage_start(self, stage, epoch): if stage != sb.Stage.TRAIN: - if self.hparams.enhance_weight > 0: self.enh_metrics = self.hparams.enhance_stats() self.stoi_metrics = self.hparams.estoi_stats() @@ -388,20 +384,23 @@ def on_stage_end(self, stage, stage_loss, epoch): stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.stats_file + ".txt", "w") as w: - if self.hparams.enhance_weight > 0: - w.write("\nstoi stats:\n") - self.stoi_metrics.write_stats(w) - w.write("\npesq stats:\n") - self.pesq_metrics.write_stats(w) - w.write("\ncomposite stats:\n") - self.composite_metrics.write_stats(w) - if self.hparams.mimic_weight > 0: - w.write("\nmimic stats:\n") - self.mimic_metrics.write_stats(w) - if self.hparams.seq_weight > 0: - self.err_rate_metrics.write_stats(w) - print("stats written to ", self.hparams.stats_file) + if if_main_process(): + with open( + self.hparams.stats_file + ".txt", "w", encoding="utf-8" + ) as w: + if self.hparams.enhance_weight > 0: + w.write("\nstoi stats:\n") + self.stoi_metrics.write_stats(w) + w.write("\npesq stats:\n") + self.pesq_metrics.write_stats(w) + w.write("\ncomposite stats:\n") + self.composite_metrics.write_stats(w) + if self.hparams.mimic_weight > 0: + w.write("\nmimic stats:\n") + self.mimic_metrics.write_stats(w) + if self.hparams.seq_weight > 0: + self.err_rate_metrics.write_stats(w) + print("stats written to ", self.hparams.stats_file) def on_evaluate_start(self, max_key=None, min_key=None): self.checkpointer.recover_if_possible(max_key=max_key, min_key=min_key) @@ -410,6 +409,7 @@ def on_evaluate_start(self, max_key=None, min_key=None): min_key=min_key, max_num_checkpoints=self.hparams.checkpoint_avg, ) + logger.info(f"Averaging {len(checkpoints)} Checkpoints...") for model in self.modules: if ( model not in self.hparams.frozen_models @@ -471,7 +471,7 @@ def target_pipeline(target): # Sort train dataset and ensure it doesn't get un-sorted if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending": data["train"] = data["train"].filtered_sorted( - sort_key="length", reverse=hparams["sorting"] == "descending", + sort_key="length", reverse=hparams["sorting"] == "descending" ) hparams["train_loader_options"]["shuffle"] = False elif hparams["sorting"] != "random": @@ -491,10 +491,9 @@ def target_pipeline(target): # Begin Recipe! if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory @@ -515,6 +514,8 @@ def target_pipeline(target): "skip_prep": hparams["skip_prep"], }, ) + if "prepare_noise_data" in hparams: + run_on_main(hparams["prepare_noise_data"]) # Load pretrained models for model in ["asr", "enhance", "perceptual"]: diff --git a/recipes/Voicebank/dereverb/MetricGAN-U/README.md b/recipes/Voicebank/dereverb/MetricGAN-U/README.md index 292a0cd27c..c8eeac54ba 100644 --- a/recipes/Voicebank/dereverb/MetricGAN-U/README.md +++ b/recipes/Voicebank/dereverb/MetricGAN-U/README.md @@ -4,12 +4,20 @@ This recipe implements MetricGAN-U recipe for dereverberation as described in th [MetricGAN-U: Unsupervised speech enhancement/ dereverberation based only on noisy/ reverberated speech](https://arxiv.org/abs/2110.05866) Notes: -1- By default we use srmr as a default target metric. This requires you to install SRMRpy (see extra-dependecies.txt) +1- By default we use srmr as a default target metric. This requires you to install SRMRpy (see extra-dependencies.txt) 2- To use dnsmos as a target metric, you have to ask the key from the DNS organizer first: dns_challenge@microsoft.com # Dataset Please "Manually" Download VoiceBank-SLR dataset from [here](https://bio-asplab.citi.sinica.edu.tw/Opensource.html#VB-SLR): +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + # How to run To run an experiment, execute the following command in the current folder: @@ -23,7 +31,7 @@ Experiment Date | Hyperparams file | PESQ | SRMR | -|-|-|-| 2021-10-31 | train_dereverb.yaml | 2.07 | 8.265 | -You can find the full experiment folder (i.e., checkpoints, logs, etc) [here](https://drive.google.com/drive/folders/1CFHE3lFYyIUWAxW8Ccx3hReACQ70qdE1?usp=sharing). +You can find the full experiment folder (i.e., checkpoints, logs, etc) [here](https://www.dropbox.com/sh/r94qn1f5lq9r3p7/AAAZfisBhhkS8cwpzy1O5ADUa?dl=0). ## Citation @@ -55,6 +63,15 @@ If you find the code useful in your research, please cite: Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/dereverb/MetricGAN-U/extra-requirements.txt b/recipes/Voicebank/dereverb/MetricGAN-U/extra_requirements.txt similarity index 100% rename from recipes/Voicebank/dereverb/MetricGAN-U/extra-requirements.txt rename to recipes/Voicebank/dereverb/MetricGAN-U/extra_requirements.txt diff --git a/recipes/Voicebank/dereverb/MetricGAN-U/hparams/train_dereverb.yaml b/recipes/Voicebank/dereverb/MetricGAN-U/hparams/train_dereverb.yaml index da235eb51b..f2f722463f 100644 --- a/recipes/Voicebank/dereverb/MetricGAN-U/hparams/train_dereverb.yaml +++ b/recipes/Voicebank/dereverb/MetricGAN-U/hparams/train_dereverb.yaml @@ -9,7 +9,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 12234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] data_folder: !PLACEHOLDER # e.g, /data/member1/user_jasonfu/noisy-vctk-16k @@ -24,7 +24,7 @@ historical_file: !ref /historical.txt use_tensorboard: False tensorboard_logs: !ref /logs/ -# FFT paremeters +# FFT parameters Sample_rate: 16000 Win_length: 32 Hop_length: 16 @@ -39,7 +39,7 @@ skip_prep: False # The target metrics that you want to optimize. # Right now we only support 'dnsmos', and 'srmr'. -# (Of course, it can be any arbitary metric.) +# (Of course, it can be any arbitrary metric.) target_metric: srmr calculate_dnsmos_on_validation_set: False target_score: 1 @@ -114,6 +114,6 @@ resynth: !name:speechbrain.processing.signal_processing.resynthesize train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref -# Tensorboard logger (optional) +# torch.Tensorboard logger (optional) tensorboard_train_logger: !new:speechbrain.utils.train_logger.TensorboardLogger save_dir: !ref diff --git a/recipes/Voicebank/dereverb/MetricGAN-U/train.py b/recipes/Voicebank/dereverb/MetricGAN-U/train.py index 867ee10139..4396602464 100644 --- a/recipes/Voicebank/dereverb/MetricGAN-U/train.py +++ b/recipes/Voicebank/dereverb/MetricGAN-U/train.py @@ -9,32 +9,33 @@ * Szu-Wei Fu 2021/09 """ -import os -import sys -import shutil -import torch -import torchaudio -import speechbrain as sb -import numpy as np import json +import os import pickle -import requests +import shutil +import sys import time - -from urllib.parse import urlparse, urljoin -from srmrpy import srmr -from pesq import pesq from enum import Enum, auto +from urllib.parse import urljoin, urlparse + +import numpy as np +import requests +import torch from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import MetricStats -from speechbrain.processing.features import spectral_magnitude +from pesq import pesq +from srmrpy import srmr + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler from speechbrain.nnet.loss.stoi_loss import stoi_loss +from speechbrain.processing.features import spectral_magnitude from speechbrain.utils.distributed import run_on_main -from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler +from speechbrain.utils.metric_stats import MetricStats ### For DNSMSOS # URL for the web service -SCORING_URI = "https://dnsmos-4.azurewebsites.net/score" +SCORING_URI = "https://github.com/microsoft/DNS-Challenge" # If the service is authenticated, set the key or token AUTH_KEY = "" if AUTH_KEY == "": @@ -44,7 +45,7 @@ # Set the content type headers = {"Content-Type": "application/json"} # If authentication is enabled, set the authorization header -headers["Authorization"] = f"Basic {AUTH_KEY }" +headers["Authorization"] = f"Basic {AUTH_KEY}" def sigmoid(x): @@ -60,8 +61,8 @@ def pesq_eval(predict, target): def srmrpy_eval(predict, target): - """ Note target_wav is not used in the srmr function !!! - Normalize the score to 0~1 for training. + """Note target_wav is not used in the srmr function !!! + Normalize the score to 0~1 for training. """ return float( sigmoid( @@ -81,8 +82,8 @@ def srmrpy_eval(predict, target): def srmrpy_eval_valid(predict, target): - """ Note target_wav is not used in the srmr function !!! - Show the unnormalized score for valid and test set. + """Note target_wav is not used in the srmr function !!! + Show the unnormalized score for valid and test set. """ return float( srmr( @@ -99,8 +100,8 @@ def srmrpy_eval_valid(predict, target): def dnsmos_eval(predict, target): - """ Note target_wav is not used in the dnsmos function !!! - Normalize the score to 0~1 for training. + """Note target_wav is not used in the dnsmos function !!! + Normalize the score to 0~1 for training. """ pred_wav = predict @@ -118,19 +119,19 @@ def dnsmos_eval(predict, target): headers=headers, ) score_dict = resp.json() - score = float( - sigmoid(score_dict["mos"]) - ) # normalize the score to 0~1 + # normalize the score to 0~1 + score = float(sigmoid(score_dict["mos"])) break - except Exception as e: # sometimes, access the dnsmos server too ofen may disable the service. + # sometimes, access the dnsmos server too often may disable the service. + except Exception as e: print(e) time.sleep(10) # wait for 10 secs return score def dnsmos_eval_valid(predict, target): - """ Note target_wav is not used in the dnsmos function !!! - Show the unnormalized score for valid and test set. + """Note target_wav is not used in the dnsmos function !!! + Show the unnormalized score for valid and test set. """ pred_wav = predict @@ -149,7 +150,8 @@ def dnsmos_eval_valid(predict, target): score_dict = resp.json() score = float(score_dict["mos"]) break - except Exception as e: # sometimes, access the dnsmos server too ofen may disable the service. + # sometimes, access the dnsmos server too often may disable the service. + except Exception as e: print(e) time.sleep(10) # wait for 10 secs return score @@ -285,7 +287,7 @@ def compute_objectives(self, predictions, batch, stage, optim_name=""): for name, pred_wav, length in zip(batch.id, predict_wav, lens): name += ".wav" enhance_path = os.path.join(self.hparams.enhanced_folder, name) - torchaudio.save( + audio_io.save( enhance_path, torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), 16000, @@ -314,8 +316,12 @@ def score(self, batch_id, deg_wav, ref_wav, lens): The degraded waveform to score ref_wav : torch.Tensor The reference waveform to use for scoring - length : torch.Tensor + lens : torch.Tensor The relative lengths of the utterances + + Returns + ------- + final_score : torch.Tensor """ new_ids = [ i @@ -335,7 +341,8 @@ def score(self, batch_id, deg_wav, ref_wav, lens): lengths=lens[new_ids], ) score = torch.tensor( - [[s] for s in self.target_metric.scores], device=self.device, + [[s] for s in self.target_metric.scores], + device=self.device, ) else: raise ValueError("Expected 'srmr' or 'dnsmos' for target_metric") @@ -362,8 +369,10 @@ def est_score(self, deg_spec): --------- deg_spec : torch.Tensor The spectral features of the degraded utterance - ref_spec : torch.Tensor - The spectral features of the reference utterance + + Returns + ------- + est_score : torch.Tensor """ """ @@ -392,7 +401,7 @@ def write_wavs(self, batch_id, wavs, score, lens): for i, (name, pred_wav, length) in enumerate(zip(batch_id, wavs, lens)): path = os.path.join(self.hparams.MetricGAN_folder, name + ".wav") data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0) - torchaudio.save(path, data, self.hparams.Sample_rate) + audio_io.save(path, data.detach(), self.hparams.Sample_rate) # Make record of path and score for historical training score = float(score[i][0]) @@ -418,8 +427,10 @@ def fit_batch(self, batch): ) self.d_optimizer.zero_grad() loss.backward() - if self.check_gradients(loss): - self.d_optimizer.step() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.d_optimizer.step() loss_tracker += loss.detach() / 3 elif self.sub_stage == SubStage.HISTORICAL: loss = self.compute_objectives( @@ -427,8 +438,10 @@ def fit_batch(self, batch): ) self.d_optimizer.zero_grad() loss.backward() - if self.check_gradients(loss): - self.d_optimizer.step() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.d_optimizer.step() loss_tracker += loss.detach() elif self.sub_stage == SubStage.GENERATOR: for name, param in self.modules.generator.named_parameters(): @@ -442,8 +455,10 @@ def fit_batch(self, batch): ) self.g_optimizer.zero_grad() loss.backward() - if self.check_gradients(loss): - self.g_optimizer.step() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.g_optimizer.step() loss_tracker += loss.detach() return loss_tracker @@ -601,7 +616,6 @@ def make_dataloader( ): "Override dataloader to insert custom sampler/dataset" if stage == sb.Stage.TRAIN: - # Create a new dataset each time, this set grows if self.sub_stage == SubStage.HISTORICAL: dataset = sb.dataio.dataset.DynamicItemDataset( @@ -610,6 +624,7 @@ def make_dataloader( output_keys=["id", "enh_sig", "score"], ) samples = round(len(dataset) * self.hparams.history_portion) + samples = max(samples, 1) # Ensure there's at least 1 sample else: samples = self.hparams.number_of_samples @@ -619,8 +634,12 @@ def make_dataloader( # Equal weights for all samples, we use "Weighted" so we can do # both "replacement=False" and a set number of samples, reproducibly weights = torch.ones(len(dataset)) + replacement = samples > len(dataset) sampler = ReproducibleWeightedRandomSampler( - weights, epoch=epoch, replacement=False, num_samples=samples + weights, + epoch=epoch, + replacement=replacement, + num_samples=samples, ) loader_kwargs["sampler"] = sampler @@ -650,15 +669,19 @@ def init_optimizers(self): self.checkpointer.add_recoverable("g_opt", self.g_optimizer) self.checkpointer.add_recoverable("d_opt", self.d_optimizer) + def zero_grad(self, set_to_none=False): + self.g_optimizer.zero_grad(set_to_none) + self.d_optimizer.zero_grad(set_to_none) -# Define audio piplines for training set + +# Define audio pipelines for training set @sb.utils.data_pipeline.takes("noisy_wav") @sb.utils.data_pipeline.provides("noisy_sig") def audio_pipeline_train(noisy_wav): yield sb.dataio.dataio.read_audio(noisy_wav) -# Define audio piplines for validation/test set +# Define audio pipelines for validation/test set @sb.utils.data_pipeline.takes("noisy_wav", "clean_wav") @sb.utils.data_pipeline.provides("noisy_sig", "clean_sig") def audio_pipeline_valid(noisy_wav, clean_wav): @@ -707,10 +730,9 @@ def create_folder(folder): # Recipe begins! if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) diff --git a/recipes/Voicebank/dereverb/MetricGAN-U/voicebank_revb_prepare.py b/recipes/Voicebank/dereverb/MetricGAN-U/voicebank_revb_prepare.py index 0ce7cf71f9..71c3f71c49 100644 --- a/recipes/Voicebank/dereverb/MetricGAN-U/voicebank_revb_prepare.py +++ b/recipes/Voicebank/dereverb/MetricGAN-U/voicebank_revb_prepare.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Data preparation. @@ -12,14 +11,15 @@ * Peter Plantinga, 2020 """ -import os import json +import os import string -import logging -from speechbrain.utils.data_utils import get_all_files + from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) LEXICON_URL = "http://www.openslr.org/resources/11/librispeech-lexicon.txt" TRAIN_JSON = "train_revb.json" TEST_JSON = "test_revb.json" @@ -169,17 +169,21 @@ def prepare_voicebank( skip_prep: bool If True, skip data preparation. + Returns + ------- + None + Example ------- - >>> data_folder = '/path/to/datasets/Voicebank' - >>> save_folder = 'exp/Voicebank_exp' + >>> data_folder = "/path/to/datasets/Voicebank" + >>> save_folder = "exp/Voicebank_exp" >>> prepare_voicebank(data_folder, save_folder) """ if skip_prep: return - # Setting ouput files + # Setting output files save_json_train = os.path.join(save_folder, TRAIN_JSON) save_json_valid = os.path.join(save_folder, VALID_JSON) save_json_test = os.path.join(save_folder, TEST_JSON) @@ -220,10 +224,14 @@ def prepare_voicebank( extension = [".wav"] valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count] wav_lst_train = get_all_files( - train_noisy_folder, match_and=extension, exclude_or=valid_speakers, + train_noisy_folder, + match_and=extension, + exclude_or=valid_speakers, ) wav_lst_valid = get_all_files( - train_noisy_folder, match_and=extension, match_or=valid_speakers, + train_noisy_folder, + match_and=extension, + match_or=valid_speakers, ) wav_lst_test = get_all_files(test_noisy_folder, match_and=extension) @@ -238,6 +246,11 @@ def skip(*filenames): Detects if the Voicebank data_preparation has been already done. If the preparation has been done, we can skip it. + Arguments + --------- + *filenames : tuple + List of paths to check for existence. + Returns ------- bool @@ -273,7 +286,6 @@ def create_json(wav_lst, json_file, clean_folder): # Processing all the wav files in the list json_dict = {} for wav_file in wav_lst: # ex:p203_122.wav - # Example wav_file: p232_001.wav noisy_path, filename = os.path.split(wav_file) _, noisy_dir = os.path.split(noisy_path) @@ -295,7 +307,7 @@ def create_json(wav_lst, json_file, clean_folder): } # Writing the json lines - with open(json_file, mode="w") as json_f: + with open(json_file, mode="w", encoding="utf-8") as json_f: json.dump(json_dict, json_f, indent=2) logger.info(f"{json_file} successfully created!") diff --git a/recipes/Voicebank/dereverb/spectral_mask/README.md b/recipes/Voicebank/dereverb/spectral_mask/README.md index ebe1e4a776..99e24be024 100644 --- a/recipes/Voicebank/dereverb/spectral_mask/README.md +++ b/recipes/Voicebank/dereverb/spectral_mask/README.md @@ -4,7 +4,7 @@ This recipe implements the Spectral-Mask baseline for dereverberation as describ [MetricGAN-U: Unsupervised speech enhancement/ dereverberation based only on noisy/ reverberated speech](https://arxiv.org/abs/2110.05866) Notes: -1- By default we use srmr as a default target metric. This requires you to install SRMRpy (see extra-dependecies.txt) +1- By default we use srmr as a default target metric. This requires you to install SRMRpy (see extra-dependencies.txt) 2- To use dnsmos as a target metric, you have to ask the key from the DNS organizer first: dns_challenge@microsoft.com # Dataset @@ -23,7 +23,7 @@ Experiment Date | Hyperparams file | PESQ | STOI | -|-|-|-| 2021-10-31 | train.yaml | 2.35 | 0.886 | -You can find the full experiment folder (i.e., checkpoints, logs, etc) [here](https://drive.google.com/drive/folders/1Bf-SL4gRpBdazBFuae3aFe0_EwL8v7jh?usp=sharing). +You can find the full experiment folder (i.e., checkpoints, logs, etc) [here](https://www.dropbox.com/sh/pw8aer8gcsrdbx7/AADknh7plHF5GBeTRK9VkIKga?dl=0). ## Citation If you find the code useful in your research, please cite: @@ -53,6 +53,15 @@ If you find the code useful in your research, please cite: Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/dereverb/spectral_mask/hparams/train.yaml b/recipes/Voicebank/dereverb/spectral_mask/hparams/train.yaml index 291a96f930..d7bb5ee76a 100644 --- a/recipes/Voicebank/dereverb/spectral_mask/hparams/train.yaml +++ b/recipes/Voicebank/dereverb/spectral_mask/hparams/train.yaml @@ -9,7 +9,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 17234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] data_folder: !PLACEHOLDER # e.g, /data/member1/user_jasonfu/noisy-vctk-16k @@ -92,6 +92,6 @@ resynth: !name:speechbrain.processing.signal_processing.resynthesize train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref -# Tensorboard logger (optional) +# torch.Tensorboard logger (optional) tensorboard_train_logger: !new:speechbrain.utils.train_logger.TensorboardLogger save_dir: !ref diff --git a/recipes/Voicebank/dereverb/spectral_mask/train.py b/recipes/Voicebank/dereverb/spectral_mask/train.py index 0aa16e6d2d..341e94f6ce 100644 --- a/recipes/Voicebank/dereverb/spectral_mask/train.py +++ b/recipes/Voicebank/dereverb/spectral_mask/train.py @@ -7,17 +7,20 @@ Authors * Szu-Wei Fu 2020 """ + import os import sys + import torch -import torchaudio -import speechbrain as sb -from pesq import pesq from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import MetricStats -from speechbrain.processing.features import spectral_magnitude +from pesq import pesq + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.nnet.loss.stoi_loss import stoi_loss +from speechbrain.processing.features import spectral_magnitude from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.metric_stats import MetricStats # Brain class for speech enhancement training @@ -65,7 +68,6 @@ def compute_objectives(self, predictions, batch, stage): ) if stage != sb.Stage.TRAIN: - # Evaluate speech quality/intelligibility self.stoi_metric.append( batch.id, predict_wav, clean_wavs, lens, reduction="batch" @@ -82,7 +84,7 @@ def compute_objectives(self, predictions, batch, stage): enhance_path = os.path.join( self.hparams.enhanced_folder, name ) - torchaudio.save( + audio_io.save( enhance_path, torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), 16000, @@ -148,11 +150,15 @@ def init_optimizers(self): self.optimizer = self.hparams.g_opt_class( self.modules.generator.parameters() ) + self.optimizers_dict = { + "optimizer": self.optimizer, + } def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # Define audio pipelines @sb.utils.data_pipeline.takes("noisy_wav") @@ -201,10 +207,9 @@ def create_folder(folder): # Recipe begins! if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) diff --git a/recipes/Voicebank/dereverb/spectral_mask/voicebank_revb_prepare.py b/recipes/Voicebank/dereverb/spectral_mask/voicebank_revb_prepare.py index 35a519a4e1..d3b91fb43e 100644 --- a/recipes/Voicebank/dereverb/spectral_mask/voicebank_revb_prepare.py +++ b/recipes/Voicebank/dereverb/spectral_mask/voicebank_revb_prepare.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Data preparation. @@ -12,14 +11,15 @@ * Peter Plantinga, 2020 """ -import os import json +import os import string -import logging -from speechbrain.utils.data_utils import get_all_files + from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.data_utils import get_all_files +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) LEXICON_URL = "http://www.openslr.org/resources/11/librispeech-lexicon.txt" TRAIN_JSON = "train_revb.json" TEST_JSON = "test_revb.json" @@ -169,16 +169,20 @@ def prepare_voicebank( skip_prep: bool If True, skip data preparation. + Returns + ------- + None + Example ------- - >>> data_folder = '/path/to/datasets/Voicebank' - >>> save_folder = 'exp/Voicebank_exp' + >>> data_folder = "/path/to/datasets/Voicebank" + >>> save_folder = "exp/Voicebank_exp" >>> prepare_voicebank(data_folder, save_folder) """ if skip_prep: return - # Setting ouput files + # Setting output files save_json_train = os.path.join(save_folder, TRAIN_JSON) save_json_valid = os.path.join(save_folder, VALID_JSON) save_json_test = os.path.join(save_folder, TEST_JSON) @@ -219,10 +223,14 @@ def prepare_voicebank( extension = [".wav"] valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count] wav_lst_train = get_all_files( - train_noisy_folder, match_and=extension, exclude_or=valid_speakers, + train_noisy_folder, + match_and=extension, + exclude_or=valid_speakers, ) wav_lst_valid = get_all_files( - train_noisy_folder, match_and=extension, match_or=valid_speakers, + train_noisy_folder, + match_and=extension, + match_or=valid_speakers, ) wav_lst_test = get_all_files(test_noisy_folder, match_and=extension) @@ -237,6 +245,11 @@ def skip(*filenames): Detects if the Voicebank data_preparation has been already done. If the preparation has been done, we can skip it. + Arguments + --------- + *filenames : tuple + List of paths to check for existence. + Returns ------- bool @@ -272,7 +285,6 @@ def create_json(wav_lst, json_file, clean_folder): # Processing all the wav files in the list json_dict = {} for wav_file in wav_lst: # ex:p203_122.wav - # Example wav_file: p232_001.wav noisy_path, filename = os.path.split(wav_file) _, noisy_dir = os.path.split(noisy_path) @@ -294,7 +306,7 @@ def create_json(wav_lst, json_file, clean_folder): } # Writing the json lines - with open(json_file, mode="w") as json_f: + with open(json_file, mode="w", encoding="utf-8") as json_f: json.dump(json_dict, json_f, indent=2) logger.info(f"{json_file} successfully created!") diff --git a/recipes/Voicebank/enhance/MetricGAN-U/README.md b/recipes/Voicebank/enhance/MetricGAN-U/README.md index 53f757664d..f303a06d58 100644 --- a/recipes/Voicebank/enhance/MetricGAN-U/README.md +++ b/recipes/Voicebank/enhance/MetricGAN-U/README.md @@ -6,7 +6,18 @@ This recipe implements MetricGAN-U recipe for enhancement as described in the pa !!! Note: To access DNSMOS, you have to ask the key from the DNS organizer first: dns_challenge@microsoft.com !!! Use the `download_vctk` function in `voicebank_prepare.py` to download the dataset -and resample it to 16000 Hz. To run an experiment, execute the following command in +and resample it to 16000 Hz. + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +## How to run +To run an experiment, execute the following command in the current folder: @@ -23,7 +34,7 @@ Experiment Date | DNSMOS You can find the full experiment folder (i.e., checkpoints, logs, etc) here: -https://drive.google.com/drive/folders/14KpZnUhnCAhoeRDwbIBu_zTq26y7BZdt?usp=sharing. +https://www.dropbox.com/sh/h9akxmyel17sc8y/AAAP3Oz5MbXDfMlEXVjOBWV0a?dl=0. @@ -56,6 +67,15 @@ If you find the code useful in your research, please cite: Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/enhance/MetricGAN-U/extra-requirements.txt b/recipes/Voicebank/enhance/MetricGAN-U/extra_requirements.txt similarity index 100% rename from recipes/Voicebank/enhance/MetricGAN-U/extra-requirements.txt rename to recipes/Voicebank/enhance/MetricGAN-U/extra_requirements.txt diff --git a/recipes/Voicebank/enhance/MetricGAN-U/hparams/train_dnsmos.yaml b/recipes/Voicebank/enhance/MetricGAN-U/hparams/train_dnsmos.yaml index d81d445037..f441432044 100644 --- a/recipes/Voicebank/enhance/MetricGAN-U/hparams/train_dnsmos.yaml +++ b/recipes/Voicebank/enhance/MetricGAN-U/hparams/train_dnsmos.yaml @@ -9,7 +9,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 12234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] data_folder: !PLACEHOLDER # e.g, /data/member1/user_jasonfu/noisy-vctk-16k @@ -24,7 +24,7 @@ historical_file: !ref /historical.txt use_tensorboard: True tensorboard_logs: !ref /logs/ -# FFT paremeters +# FFT parameters Sample_rate: 16000 Win_length: 32 Hop_length: 16 @@ -39,7 +39,7 @@ skip_prep: False # The target metrics that you want to optimize. # Right now we only support 'dnsmos', and 'srmr'. -# (Of course, it can be any arbitary metric.) +# (Of course, it can be any arbitrary metric.) target_metric: dnsmos calculate_dnsmos_on_validation_set: True target_score: 1 @@ -115,6 +115,6 @@ resynth: !name:speechbrain.processing.signal_processing.resynthesize train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref -# Tensorboard logger (optional) +# torch.Tensorboard logger (optional) tensorboard_train_logger: !new:speechbrain.utils.train_logger.TensorboardLogger save_dir: !ref diff --git a/recipes/Voicebank/enhance/MetricGAN-U/train.py b/recipes/Voicebank/enhance/MetricGAN-U/train.py index b7fe88cbc4..c66ad5b4ba 100644 --- a/recipes/Voicebank/enhance/MetricGAN-U/train.py +++ b/recipes/Voicebank/enhance/MetricGAN-U/train.py @@ -1,774 +1,795 @@ -#!/usr/bin/env/python3 -""" -Recipe for training MetricGAN-U (Unsupervised) with the Voicebank dataset. - -To run this recipe, do the following: -> python train.py hparams/{hyperparam_file}.yaml - -Authors - * Szu-Wei Fu 2021/09 -""" - -import os -import sys -import shutil -import torch -import torchaudio -import speechbrain as sb -import numpy as np -import json -import pickle -import requests -import time - -from urllib.parse import urlparse, urljoin -from srmrpy import srmr -from pesq import pesq -from enum import Enum, auto -from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import MetricStats -from speechbrain.processing.features import spectral_magnitude -from speechbrain.nnet.loss.stoi_loss import stoi_loss -from speechbrain.utils.distributed import run_on_main -from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler - -### For DNSMSOS -# URL for the web service -SCORING_URI = "https://dnsmos-4.azurewebsites.net/score" -# If the service is authenticated, set the key or token -AUTH_KEY = "" -if AUTH_KEY == "": - print( - "To access DNSMOS, you have to ask the key from the DNS organizer: dns_challenge@microsoft.com" - ) -# Set the content type -headers = {"Content-Type": "application/json"} -# If authentication is enabled, set the authorization header -headers["Authorization"] = f"Basic {AUTH_KEY }" - - -def sigmoid(x): - s = 1 / (1 + np.exp(-x)) - return s - - -def pesq_eval(predict, target): - """Normalized PESQ (to 0-1)""" - return ( - pesq(fs=16000, ref=target.numpy(), deg=predict.numpy(), mode="wb") + 0.5 - ) / 5 - - -def srmrpy_eval(predict, target): - """ Note target is not used in the srmr function !!! - Normalize the score to 0~1 for training. - """ - return float( - sigmoid( - 0.1 - * srmr( - predict.numpy(), - fs=16000, - n_cochlear_filters=23, - low_freq=125, - min_cf=4, - max_cf=128, - fast=True, - norm=False, - )[0] - ) - ) - - -def srmrpy_eval_valid(predict, target): - """ Note target is not used in the srmr function !!! - Show the unnormalized score for valid and test set. - """ - return float( - srmr( - predict.numpy(), - fs=16000, - n_cochlear_filters=23, - low_freq=125, - min_cf=4, - max_cf=128, - fast=True, - norm=False, - )[0] - ) - - -def dnsmos_eval(predict, target): - """ Note target is not used in the dnsmos function !!! - Normalize the score to 0~1 for training. - """ - pred_wav = predict - - pred_wav = pred_wav.numpy() - pred_wav = pred_wav / max(abs(pred_wav)) - data = {"data": pred_wav.tolist()} - - input_data = json.dumps(data) - while True: - try: - u = urlparse(SCORING_URI) - resp = requests.post( - urljoin("https://" + u.netloc, "score"), - data=input_data, - headers=headers, - ) - score_dict = resp.json() - score = float( - sigmoid(score_dict["mos"]) - ) # normalize the score to 0~1 - break - except Exception as e: # sometimes, access the dnsmos server too ofen may disable the service. - print(e) - time.sleep(10) # wait for 10 secs - return score - - -def dnsmos_eval_valid(predict, target): - """ Note target is not used in the dnsmos function !!! - Show the unnormalized score for valid and test set. - """ - pred_wav = predict - - pred_wav = pred_wav.numpy() - pred_wav = pred_wav / max(abs(pred_wav)) - data = {"data": pred_wav.tolist()} - input_data = json.dumps(data) - while True: - try: - u = urlparse(SCORING_URI) - resp = requests.post( - urljoin("https://" + u.netloc, "score"), - data=input_data, - headers=headers, - ) - score_dict = resp.json() - score = float(score_dict["mos"]) - break - except Exception as e: # sometimes, access the dnsmos server too ofen may disable the service. - print(e) - time.sleep(10) # wait for 10 secs - return score - - -class SubStage(Enum): - """For keeping track of training stage progress""" - - GENERATOR = auto() - CURRENT = auto() - HISTORICAL = auto() - - -class MetricGanBrain(sb.Brain): - def load_history(self): - if os.path.isfile(self.hparams.historical_file): - with open(self.hparams.historical_file, "rb") as fp: # Unpickling - self.historical_set = pickle.load(fp) - - def compute_feats(self, wavs): - """Feature computation pipeline""" - feats = self.hparams.compute_STFT(wavs) - spec = spectral_magnitude(feats, power=0.5) - return spec - - def compute_forward(self, batch, stage): - "Given an input batch computes the enhanced signal" - batch = batch.to(self.device) - - if self.sub_stage == SubStage.HISTORICAL: - predict_wav, lens = batch.enh_sig - return predict_wav - else: - noisy_wav, lens = batch.noisy_sig - noisy_spec = self.compute_feats(noisy_wav) - - mask = self.modules.generator(noisy_spec, lengths=lens) - mask = mask.clamp(min=self.hparams.min_mask).squeeze(2) - predict_spec = torch.mul(mask, noisy_spec) - - # Also return predicted wav - predict_wav = self.hparams.resynth(predict_spec, noisy_wav) - - return predict_wav, mask - - def compute_objectives(self, predictions, batch, stage, optim_name=""): - "Given the network predictions and targets compute the total loss" - if self.sub_stage == SubStage.HISTORICAL: - predict_wav = predictions - else: - predict_wav, mask = predictions - predict_spec = self.compute_feats(predict_wav) - - ids = self.compute_ids(batch.id, optim_name) - if self.sub_stage != SubStage.HISTORICAL: - noisy_wav, lens = batch.noisy_sig - - if optim_name == "generator": - est_score = self.est_score(predict_spec) - target_score = self.hparams.target_score * torch.ones( - self.batch_size, 1, device=self.device - ) - - noisy_wav, lens = batch.noisy_sig - noisy_spec = self.compute_feats(noisy_wav) - mse_cost = self.hparams.compute_cost(predict_spec, noisy_spec, lens) - - # D Learns to estimate the scores of enhanced speech - elif optim_name == "D_enh" and self.sub_stage == SubStage.CURRENT: - target_score = self.score( - ids, predict_wav, predict_wav, lens - ) # no clean_wav is needed - est_score = self.est_score(predict_spec) - - # Write enhanced wavs during discriminator training, because we - # compute the actual score here and we can save it - self.write_wavs(ids, predict_wav, target_score, lens) - - # D Relearns to estimate the scores of previous epochs - elif optim_name == "D_enh" and self.sub_stage == SubStage.HISTORICAL: - target_score = batch.score.unsqueeze(1).float() - est_score = self.est_score(predict_spec) - - # D Learns to estimate the scores of noisy speech - elif optim_name == "D_noisy": - noisy_spec = self.compute_feats(noisy_wav) - target_score = self.score( - ids, noisy_wav, noisy_wav, lens - ) # no clean_wav is needed - est_score = self.est_score(noisy_spec) - # Save scores of noisy wavs - self.save_noisy_scores(ids, target_score) - - if stage == sb.Stage.TRAIN: - # Compute the cost - cost = self.hparams.compute_cost(est_score, target_score) - if optim_name == "generator": - cost += self.hparams.mse_weight * mse_cost - self.metrics["G"].append(cost.detach()) - else: - self.metrics["D"].append(cost.detach()) - - # Compute scores on validation data - if stage != sb.Stage.TRAIN: - clean_wav, lens = batch.clean_sig - - cost = self.hparams.compute_si_snr(predict_wav, clean_wav, lens) - # Evaluate speech quality/intelligibility - self.stoi_metric.append( - batch.id, predict_wav, clean_wav, lens, reduction="batch" - ) - self.pesq_metric.append( - batch.id, predict=predict_wav, target=clean_wav, lengths=lens - ) - if ( - self.hparams.calculate_dnsmos_on_validation_set - ): # Note: very time consuming........ - self.dnsmos_metric.append( - batch.id, - predict=predict_wav, - target=predict_wav, - lengths=lens, # no clean_wav is needed - ) - - # Write wavs to file, for evaluation - lens = lens * clean_wav.shape[1] - for name, pred_wav, length in zip(batch.id, predict_wav, lens): - name += ".wav" - enhance_path = os.path.join(self.hparams.enhanced_folder, name) - torchaudio.save( - enhance_path, - torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), - 16000, - ) - - return cost - - def compute_ids(self, batch_id, optim_name): - """Returns the list of ids, edited via optimizer name.""" - if optim_name == "D_enh": - return [f"{uid}@{self.epoch}" for uid in batch_id] - return batch_id - - def save_noisy_scores(self, batch_id, scores): - for i, score in zip(batch_id, scores): - self.noisy_scores[i] = score - - def score(self, batch_id, deg_wav, ref_wav, lens): - """Returns actual metric score, either pesq or stoi - - Arguments - --------- - batch_id : list of str - A list of the utterance ids for the batch - deg_wav : torch.Tensor - The degraded waveform to score - ref_wav : torch.Tensor - The reference waveform to use for scoring - length : torch.Tensor - The relative lengths of the utterances - """ - new_ids = [ - i - for i, d in enumerate(batch_id) - if d not in self.historical_set and d not in self.noisy_scores - ] - - if len(new_ids) == 0: - pass - elif self.hparams.target_metric == "srmr" or "dnsmos": - self.target_metric.append( - ids=[batch_id[i] for i in new_ids], - predict=deg_wav[new_ids].detach(), - target=ref_wav[ - new_ids - ].detach(), # target is not used in the function !!! - lengths=lens[new_ids], - ) - score = torch.tensor( - [[s] for s in self.target_metric.scores], device=self.device, - ) - else: - raise ValueError("Expected 'srmr' or 'dnsmos' for target_metric") - - # Clear metric scores to prepare for next batch - self.target_metric.clear() - - # Combine old scores and new - final_score = [] - for i, d in enumerate(batch_id): - if d in self.historical_set: - final_score.append([self.historical_set[d]["score"]]) - elif d in self.noisy_scores: - final_score.append([self.noisy_scores[d]]) - else: - final_score.append([score[new_ids.index(i)]]) - - return torch.tensor(final_score, device=self.device) - - def est_score(self, deg_spec): - """Returns score as estimated by discriminator - - Arguments - --------- - deg_spec : torch.Tensor - The spectral features of the degraded utterance - ref_spec : torch.Tensor - The spectral features of the reference utterance - """ - - """ - combined_spec = torch.cat( - [deg_spec.unsqueeze(1), ref_spec.unsqueeze(1)], 1 - ) - """ - return self.modules.discriminator(deg_spec.unsqueeze(1)) - - def write_wavs(self, batch_id, wavs, score, lens): - """Write wavs to files, for historical discriminator training - - Arguments - --------- - batch_id : list of str - A list of the utterance ids for the batch - wavs : torch.Tensor - The wavs to write to files - score : torch.Tensor - The actual scores for the corresponding utterances - lens : torch.Tensor - The relative lengths of each utterance - """ - lens = lens * wavs.shape[1] - record = {} - for i, (name, pred_wav, length) in enumerate(zip(batch_id, wavs, lens)): - path = os.path.join(self.hparams.MetricGAN_folder, name + ".wav") - data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0) - torchaudio.save(path, data, self.hparams.Sample_rate) - - # Make record of path and score for historical training - score = float(score[i][0]) - record[name] = { - "enh_wav": path, - "score": score, - } - - # Update records for historical training - self.historical_set.update(record) - - with open(self.hparams.historical_file, "wb") as fp: # Pickling - pickle.dump(self.historical_set, fp) - - def fit_batch(self, batch): - "Compute gradients and update either D or G based on sub-stage." - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss_tracker = 0 - if self.sub_stage == SubStage.CURRENT: - for mode in ["enh", "noisy"]: - loss = self.compute_objectives( - predictions, batch, sb.Stage.TRAIN, f"D_{mode}" - ) - self.d_optimizer.zero_grad() - loss.backward() - if self.check_gradients(loss): - self.d_optimizer.step() - loss_tracker += loss.detach() / 3 - elif self.sub_stage == SubStage.HISTORICAL: - loss = self.compute_objectives( - predictions, batch, sb.Stage.TRAIN, "D_enh" - ) - self.d_optimizer.zero_grad() - loss.backward() - if self.check_gradients(loss): - self.d_optimizer.step() - loss_tracker += loss.detach() - elif self.sub_stage == SubStage.GENERATOR: - for name, param in self.modules.generator.named_parameters(): - if "Learnable_sigmoid" in name: - param.data = torch.clamp( - param, max=3.5 - ) # to prevent gradient goes to infinity - - loss = self.compute_objectives( - predictions, batch, sb.Stage.TRAIN, "generator" - ) - self.g_optimizer.zero_grad() - loss.backward() - if self.check_gradients(loss): - self.g_optimizer.step() - loss_tracker += loss.detach() - - return loss_tracker - - def on_stage_start(self, stage, epoch=None): - """Gets called at the beginning of each epoch - - This method calls ``fit()`` again to train the discriminator - before proceeding with generator training. - """ - - self.metrics = {"G": [], "D": []} - - if stage == sb.Stage.TRAIN: - if self.hparams.target_metric == "srmr": - self.target_metric = MetricStats( - metric=srmrpy_eval, - n_jobs=hparams["n_jobs"], - batch_eval=False, - ) - elif self.hparams.target_metric == "dnsmos": - self.target_metric = MetricStats( - metric=dnsmos_eval, - n_jobs=hparams["n_jobs"], - batch_eval=False, - ) - else: - raise NotImplementedError( - "Right now we only support 'srmr' and 'dnsmos'" - ) - - # Train discriminator before we start generator training - if self.sub_stage == SubStage.GENERATOR: - self.epoch = epoch - self.train_discriminator() - self.sub_stage = SubStage.GENERATOR - print("Generator training by current data...") - - if stage != sb.Stage.TRAIN: - self.pesq_metric = MetricStats( - metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False - ) - self.stoi_metric = MetricStats(metric=stoi_loss) - self.srmr_metric = MetricStats( - metric=srmrpy_eval_valid, - n_jobs=hparams["n_jobs"], - batch_eval=False, - ) - self.dnsmos_metric = MetricStats( - metric=dnsmos_eval_valid, - n_jobs=hparams["n_jobs"], - batch_eval=False, - ) - - def train_discriminator(self): - """A total of 3 data passes to update discriminator.""" - # First, iterate train subset w/ updates for enh, noisy - print("Discriminator training by current data...") - self.sub_stage = SubStage.CURRENT - self.fit( - range(1), - self.train_set, - train_loader_kwargs=self.hparams.dataloader_options, - ) - - # Next, iterate historical subset w/ updates for enh - if self.historical_set: - print("Discriminator training by historical data...") - self.sub_stage = SubStage.HISTORICAL - self.fit( - range(1), - self.historical_set, - train_loader_kwargs=self.hparams.dataloader_options, - ) - - # Finally, iterate train set again. Should iterate same - # samples as before, due to ReproducibleRandomSampler - print("Discriminator training by current data again...") - self.sub_stage = SubStage.CURRENT - self.fit( - range(1), - self.train_set, - train_loader_kwargs=self.hparams.dataloader_options, - ) - - def on_stage_end(self, stage, stage_loss, epoch=None): - "Called at the end of each stage to summarize progress" - if self.sub_stage != SubStage.GENERATOR: - return - - if stage == sb.Stage.TRAIN: - self.train_loss = stage_loss - g_loss = torch.tensor(self.metrics["G"]) # batch_size - d_loss = torch.tensor(self.metrics["D"]) # batch_size - print("Avg G loss: %.3f" % torch.mean(g_loss)) - print("Avg D loss: %.3f" % torch.mean(d_loss)) - else: - if self.hparams.calculate_dnsmos_on_validation_set: - stats = { - "SI-SNR": -stage_loss, - "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, - "stoi": -self.stoi_metric.summarize("average"), - "dnsmos": self.dnsmos_metric.summarize("average"), - } - else: - stats = { - "SI-SNR": -stage_loss, - "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, - "stoi": -self.stoi_metric.summarize("average"), - } - - if stage == sb.Stage.VALID: - old_lr, new_lr = self.hparams.lr_annealing(5.0 - stats["pesq"]) - sb.nnet.schedulers.update_learning_rate(self.g_optimizer, new_lr) - - if self.hparams.use_tensorboard: - if ( - self.hparams.calculate_dnsmos_on_validation_set - ): # Note: very time consuming........ - valid_stats = { - "SI-SNR": -stage_loss, - "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, - "stoi": -self.stoi_metric.summarize("average"), - "dnsmos": self.dnsmos_metric.summarize("average"), - } - else: - valid_stats = { - "SI-SNR": -stage_loss, - "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, - "stoi": -self.stoi_metric.summarize("average"), - } - - self.hparams.tensorboard_train_logger.log_stats( - {"lr": old_lr}, valid_stats - ) - self.hparams.train_logger.log_stats( - {"Epoch": epoch, "lr": old_lr}, - train_stats={"loss": self.train_loss}, - valid_stats=stats, - ) - self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"]) - - if stage == sb.Stage.TEST: - self.hparams.train_logger.log_stats( - {"Epoch loaded": self.hparams.epoch_counter.current}, - test_stats=stats, - ) - - def make_dataloader( - self, dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs - ): - "Override dataloader to insert custom sampler/dataset" - if stage == sb.Stage.TRAIN: - - # Create a new dataset each time, this set grows - if self.sub_stage == SubStage.HISTORICAL: - dataset = sb.dataio.dataset.DynamicItemDataset( - data=dataset, - dynamic_items=[enh_pipeline], - output_keys=["id", "enh_sig", "score"], - ) - samples = round(len(dataset) * self.hparams.history_portion) - else: - samples = self.hparams.number_of_samples - - # This sampler should give the same samples for D and G - epoch = self.hparams.epoch_counter.current - - # Equal weights for all samples, we use "Weighted" so we can do - # both "replacement=False" and a set number of samples, reproducibly - weights = torch.ones(len(dataset)) - sampler = ReproducibleWeightedRandomSampler( - weights, epoch=epoch, replacement=False, num_samples=samples - ) - loader_kwargs["sampler"] = sampler - - if self.sub_stage == SubStage.GENERATOR: - self.train_sampler = sampler - - # Make the dataloader as normal - return super().make_dataloader( - dataset, stage, ckpt_prefix, **loader_kwargs - ) - - def on_fit_start(self): - "Override to prevent this from running for D training" - if self.sub_stage == SubStage.GENERATOR: - super().on_fit_start() - - def init_optimizers(self): - "Initializes the generator and discriminator optimizers" - self.g_optimizer = self.hparams.g_opt_class( - self.modules.generator.parameters() - ) - self.d_optimizer = self.hparams.d_opt_class( - self.modules.discriminator.parameters() - ) - - if self.checkpointer is not None: - self.checkpointer.add_recoverable("g_opt", self.g_optimizer) - self.checkpointer.add_recoverable("d_opt", self.d_optimizer) - - -# Define audio piplines for training set -@sb.utils.data_pipeline.takes("noisy_wav") -@sb.utils.data_pipeline.provides("noisy_sig") -def audio_pipeline_train(noisy_wav): - yield sb.dataio.dataio.read_audio(noisy_wav) - - -# Define audio piplines for validation/test set -@sb.utils.data_pipeline.takes("noisy_wav", "clean_wav") -@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig") -def audio_pipeline_valid(noisy_wav, clean_wav): - yield sb.dataio.dataio.read_audio(noisy_wav) - yield sb.dataio.dataio.read_audio(clean_wav) - - -# For historical data -@sb.utils.data_pipeline.takes("enh_wav") -@sb.utils.data_pipeline.provides("enh_sig") -def enh_pipeline(enh_wav): - yield sb.dataio.dataio.read_audio(enh_wav) - - -def dataio_prep(hparams): - """This function prepares the datasets to be used in the brain class.""" - - # Define datasets - datasets = {} - - datasets["train"] = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["train_annotation"], - replacements={"data_root": hparams["data_folder"]}, - dynamic_items=[audio_pipeline_train], - output_keys=["id", "noisy_sig"], - ) - datasets["valid"] = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["valid_annotation"], - replacements={"data_root": hparams["data_folder"]}, - dynamic_items=[audio_pipeline_valid], - output_keys=["id", "noisy_sig", "clean_sig"], - ) - - datasets["test"] = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=hparams["test_annotation"], - replacements={"data_root": hparams["data_folder"]}, - dynamic_items=[audio_pipeline_valid], - output_keys=["id", "noisy_sig", "clean_sig"], - ) - - return datasets - - -def create_folder(folder): - if not os.path.isdir(folder): - os.makedirs(folder) - - -# Recipe begins! -if __name__ == "__main__": - - # Load hyperparameters file with command-line overrides - hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: - hparams = load_hyperpyyaml(fin, overrides) - - # Initialize ddp (useful only for multi-GPU DDP training) - sb.utils.distributed.ddp_init_group(run_opts) - - # Data preparation - from voicebank_prepare import prepare_voicebank # noqa - - run_on_main( - prepare_voicebank, - kwargs={ - "data_folder": hparams["data_folder"], - "save_folder": hparams["data_folder"], - "skip_prep": hparams["skip_prep"], - }, - ) - - # Create dataset objects - datasets = dataio_prep(hparams) - - # Create experiment directory - sb.create_experiment_directory( - experiment_directory=hparams["output_folder"], - hyperparams_to_save=hparams_file, - overrides=overrides, - ) - - if hparams["use_tensorboard"]: - from speechbrain.utils.train_logger import TensorboardLogger - - hparams["tensorboard_train_logger"] = TensorboardLogger( - hparams["tensorboard_logs"] - ) - - # Create the folder to save enhanced files (+ support for DDP) - run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]}) - - se_brain = MetricGanBrain( - modules=hparams["modules"], - hparams=hparams, - run_opts=run_opts, - checkpointer=hparams["checkpointer"], - ) - se_brain.train_set = datasets["train"] - se_brain.historical_set = {} - se_brain.noisy_scores = {} - se_brain.batch_size = hparams["dataloader_options"]["batch_size"] - se_brain.sub_stage = SubStage.GENERATOR - - if not os.path.isfile(hparams["historical_file"]): - shutil.rmtree(hparams["MetricGAN_folder"]) - run_on_main(create_folder, kwargs={"folder": hparams["MetricGAN_folder"]}) - - se_brain.load_history() - # Load latest checkpoint to resume training - se_brain.fit( - epoch_counter=se_brain.hparams.epoch_counter, - train_set=datasets["train"], - valid_set=datasets["valid"], - train_loader_kwargs=hparams["dataloader_options"], - valid_loader_kwargs=hparams["valid_dataloader_options"], - ) - - # Load best checkpoint for evaluation - test_stats = se_brain.evaluate( - test_set=datasets["test"], - max_key="pesq", - test_loader_kwargs=hparams["dataloader_options"], - ) +#!/usr/bin/env/python3 +""" +Recipe for training MetricGAN-U (Unsupervised) with the Voicebank dataset. + +To run this recipe, do the following: +> python train.py hparams/{hyperparam_file}.yaml + +Authors + * Szu-Wei Fu 2021/09 +""" + +import json +import os +import pickle +import shutil +import sys +import time +from enum import Enum, auto +from urllib.parse import urljoin, urlparse + +import numpy as np +import requests +import torch +from hyperpyyaml import load_hyperpyyaml +from pesq import pesq +from srmrpy import srmr + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler +from speechbrain.nnet.loss.stoi_loss import stoi_loss +from speechbrain.processing.features import spectral_magnitude +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.metric_stats import MetricStats + +### For DNSMSOS +# URL for the web service +SCORING_URI = "https://github.com/microsoft/DNS-Challenge" +# If the service is authenticated, set the key or token +AUTH_KEY = "" +if AUTH_KEY == "": + print( + "To access DNSMOS, you have to ask the key from the DNS organizer: dns_challenge@microsoft.com" + ) +# Set the content type +headers = {"Content-Type": "application/json"} +# If authentication is enabled, set the authorization header +headers["Authorization"] = f"Basic {AUTH_KEY}" + + +def sigmoid(x): + s = 1 / (1 + np.exp(-x)) + return s + + +def pesq_eval(predict, target): + """Normalized PESQ (to 0-1)""" + return ( + pesq(fs=16000, ref=target.numpy(), deg=predict.numpy(), mode="wb") + 0.5 + ) / 5 + + +def srmrpy_eval(predict, target): + """Note target is not used in the srmr function !!! + Normalize the score to 0~1 for training. + """ + return float( + sigmoid( + 0.1 + * srmr( + predict.numpy(), + fs=16000, + n_cochlear_filters=23, + low_freq=125, + min_cf=4, + max_cf=128, + fast=True, + norm=False, + )[0] + ) + ) + + +def srmrpy_eval_valid(predict, target): + """Note target is not used in the srmr function !!! + Show the unnormalized score for valid and test set. + """ + return float( + srmr( + predict.numpy(), + fs=16000, + n_cochlear_filters=23, + low_freq=125, + min_cf=4, + max_cf=128, + fast=True, + norm=False, + )[0] + ) + + +def dnsmos_eval(predict, target): + """Note target is not used in the dnsmos function !!! + Normalize the score to 0~1 for training. + """ + pred_wav = predict + + pred_wav = pred_wav.numpy() + pred_wav = pred_wav / max(abs(pred_wav)) + data = {"data": pred_wav.tolist()} + + input_data = json.dumps(data) + while True: + try: + u = urlparse(SCORING_URI) + resp = requests.post( + urljoin("https://" + u.netloc, "score"), + data=input_data, + headers=headers, + ) + score_dict = resp.json() + # normalize the score to 0~1 + score = float(sigmoid(score_dict["mos"])) + break + # sometimes, access the dnsmos server too often may disable the service. + except Exception as e: + print(e) + time.sleep(10) # wait for 10 secs + return score + + +def dnsmos_eval_valid(predict, target): + """Note target is not used in the dnsmos function !!! + Show the unnormalized score for valid and test set. + """ + pred_wav = predict + + pred_wav = pred_wav.numpy() + pred_wav = pred_wav / max(abs(pred_wav)) + data = {"data": pred_wav.tolist()} + input_data = json.dumps(data) + while True: + try: + u = urlparse(SCORING_URI) + resp = requests.post( + urljoin("https://" + u.netloc, "score"), + data=input_data, + headers=headers, + ) + score_dict = resp.json() + score = float(score_dict["mos"]) + break + # sometimes, access the dnsmos server too often may disable the service. + except Exception as e: + print(e) + time.sleep(10) # wait for 10 secs + return score + + +class SubStage(Enum): + """For keeping track of training stage progress""" + + GENERATOR = auto() + CURRENT = auto() + HISTORICAL = auto() + + +class MetricGanBrain(sb.Brain): + def load_history(self): + if os.path.isfile(self.hparams.historical_file): + with open(self.hparams.historical_file, "rb") as fp: # Unpickling + self.historical_set = pickle.load(fp) + + def compute_feats(self, wavs): + """Feature computation pipeline""" + feats = self.hparams.compute_STFT(wavs) + spec = spectral_magnitude(feats, power=0.5) + return spec + + def compute_forward(self, batch, stage): + "Given an input batch computes the enhanced signal" + batch = batch.to(self.device) + + if self.sub_stage == SubStage.HISTORICAL: + predict_wav, lens = batch.enh_sig + return predict_wav + else: + noisy_wav, lens = batch.noisy_sig + noisy_spec = self.compute_feats(noisy_wav) + + mask = self.modules.generator(noisy_spec, lengths=lens) + mask = mask.clamp(min=self.hparams.min_mask).squeeze(2) + predict_spec = torch.mul(mask, noisy_spec) + + # Also return predicted wav + predict_wav = self.hparams.resynth(predict_spec, noisy_wav) + + return predict_wav, mask + + def compute_objectives(self, predictions, batch, stage, optim_name=""): + "Given the network predictions and targets compute the total loss" + if self.sub_stage == SubStage.HISTORICAL: + predict_wav = predictions + else: + predict_wav, mask = predictions + predict_spec = self.compute_feats(predict_wav) + + ids = self.compute_ids(batch.id, optim_name) + if self.sub_stage != SubStage.HISTORICAL: + noisy_wav, lens = batch.noisy_sig + + if optim_name == "generator": + est_score = self.est_score(predict_spec) + target_score = self.hparams.target_score * torch.ones( + self.batch_size, 1, device=self.device + ) + + noisy_wav, lens = batch.noisy_sig + noisy_spec = self.compute_feats(noisy_wav) + mse_cost = self.hparams.compute_cost(predict_spec, noisy_spec, lens) + + # D Learns to estimate the scores of enhanced speech + elif optim_name == "D_enh" and self.sub_stage == SubStage.CURRENT: + target_score = self.score( + ids, predict_wav, predict_wav, lens + ) # no clean_wav is needed + est_score = self.est_score(predict_spec) + + # Write enhanced wavs during discriminator training, because we + # compute the actual score here and we can save it + self.write_wavs(ids, predict_wav, target_score, lens) + + # D Relearns to estimate the scores of previous epochs + elif optim_name == "D_enh" and self.sub_stage == SubStage.HISTORICAL: + target_score = batch.score.unsqueeze(1).float() + est_score = self.est_score(predict_spec) + + # D Learns to estimate the scores of noisy speech + elif optim_name == "D_noisy": + noisy_spec = self.compute_feats(noisy_wav) + target_score = self.score( + ids, noisy_wav, noisy_wav, lens + ) # no clean_wav is needed + est_score = self.est_score(noisy_spec) + # Save scores of noisy wavs + self.save_noisy_scores(ids, target_score) + + if stage == sb.Stage.TRAIN: + # Compute the cost + cost = self.hparams.compute_cost(est_score, target_score) + if optim_name == "generator": + cost += self.hparams.mse_weight * mse_cost + self.metrics["G"].append(cost.detach()) + else: + self.metrics["D"].append(cost.detach()) + + # Compute scores on validation data + if stage != sb.Stage.TRAIN: + clean_wav, lens = batch.clean_sig + + cost = self.hparams.compute_si_snr(predict_wav, clean_wav, lens) + # Evaluate speech quality/intelligibility + self.stoi_metric.append( + batch.id, predict_wav, clean_wav, lens, reduction="batch" + ) + self.pesq_metric.append( + batch.id, predict=predict_wav, target=clean_wav, lengths=lens + ) + if ( + self.hparams.calculate_dnsmos_on_validation_set + ): # Note: very time consuming........ + self.dnsmos_metric.append( + batch.id, + predict=predict_wav, + target=predict_wav, + lengths=lens, # no clean_wav is needed + ) + + # Write wavs to file, for evaluation + lens = lens * clean_wav.shape[1] + for name, pred_wav, length in zip(batch.id, predict_wav, lens): + name += ".wav" + enhance_path = os.path.join(self.hparams.enhanced_folder, name) + audio_io.save( + enhance_path, + torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), + 16000, + ) + + return cost + + def compute_ids(self, batch_id, optim_name): + """Returns the list of ids, edited via optimizer name.""" + if optim_name == "D_enh": + return [f"{uid}@{self.epoch}" for uid in batch_id] + return batch_id + + def save_noisy_scores(self, batch_id, scores): + for i, score in zip(batch_id, scores): + self.noisy_scores[i] = score + + def score(self, batch_id, deg_wav, ref_wav, lens): + """Returns actual metric score, either pesq or stoi + + Arguments + --------- + batch_id : list of str + A list of the utterance ids for the batch + deg_wav : torch.Tensor + The degraded waveform to score + ref_wav : torch.Tensor + The reference waveform to use for scoring + lens : torch.Tensor + The relative lengths of the utterances + + Returns + ------- + final_score : torch.Tensor + """ + new_ids = [ + i + for i, d in enumerate(batch_id) + if d not in self.historical_set and d not in self.noisy_scores + ] + + if len(new_ids) == 0: + pass + elif self.hparams.target_metric == "srmr" or "dnsmos": + self.target_metric.append( + ids=[batch_id[i] for i in new_ids], + predict=deg_wav[new_ids].detach(), + target=ref_wav[ + new_ids + ].detach(), # target is not used in the function !!! + lengths=lens[new_ids], + ) + score = torch.tensor( + [[s] for s in self.target_metric.scores], device=self.device + ) + else: + raise ValueError("Expected 'srmr' or 'dnsmos' for target_metric") + + # Clear metric scores to prepare for next batch + self.target_metric.clear() + + # Combine old scores and new + final_score = [] + for i, d in enumerate(batch_id): + if d in self.historical_set: + final_score.append([self.historical_set[d]["score"]]) + elif d in self.noisy_scores: + final_score.append([self.noisy_scores[d]]) + else: + final_score.append([score[new_ids.index(i)]]) + + return torch.tensor(final_score, device=self.device) + + def est_score(self, deg_spec): + """Returns score as estimated by discriminator + + Arguments + --------- + deg_spec : torch.Tensor + The spectral features of the degraded utterance + + Returns + ------- + est_score : torch.Tensor + """ + + """ + combined_spec = torch.cat( + [deg_spec.unsqueeze(1), ref_spec.unsqueeze(1)], 1 + ) + """ + return self.modules.discriminator(deg_spec.unsqueeze(1)) + + def write_wavs(self, batch_id, wavs, score, lens): + """Write wavs to files, for historical discriminator training + + Arguments + --------- + batch_id : list of str + A list of the utterance ids for the batch + wavs : torch.Tensor + The wavs to write to files + score : torch.Tensor + The actual scores for the corresponding utterances + lens : torch.Tensor + The relative lengths of each utterance + """ + lens = lens * wavs.shape[1] + record = {} + for i, (name, pred_wav, length) in enumerate(zip(batch_id, wavs, lens)): + path = os.path.join(self.hparams.MetricGAN_folder, name + ".wav") + data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0) + audio_io.save(path, data.detach(), self.hparams.Sample_rate) + + # Make record of path and score for historical training + score = float(score[i][0]) + record[name] = { + "enh_wav": path, + "score": score, + } + + # Update records for historical training + self.historical_set.update(record) + + with open(self.hparams.historical_file, "wb") as fp: # Pickling + pickle.dump(self.historical_set, fp) + + def fit_batch(self, batch): + "Compute gradients and update either D or G based on sub-stage." + predictions = self.compute_forward(batch, sb.Stage.TRAIN) + loss_tracker = 0 + if self.sub_stage == SubStage.CURRENT: + for mode in ["enh", "noisy"]: + loss = self.compute_objectives( + predictions, batch, sb.Stage.TRAIN, f"D_{mode}" + ) + self.d_optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.d_optimizer.step() + loss_tracker += loss.detach() / 3 + elif self.sub_stage == SubStage.HISTORICAL: + loss = self.compute_objectives( + predictions, batch, sb.Stage.TRAIN, "D_enh" + ) + self.d_optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.d_optimizer.step() + loss_tracker += loss.detach() + elif self.sub_stage == SubStage.GENERATOR: + for name, param in self.modules.generator.named_parameters(): + if "Learnable_sigmoid" in name: + param.data = torch.clamp( + param, max=3.5 + ) # to prevent gradient goes to infinity + + loss = self.compute_objectives( + predictions, batch, sb.Stage.TRAIN, "generator" + ) + self.g_optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.g_optimizer.step() + loss_tracker += loss.detach() + + return loss_tracker + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch + + This method calls ``fit()`` again to train the discriminator + before proceeding with generator training. + """ + + self.metrics = {"G": [], "D": []} + + if stage == sb.Stage.TRAIN: + if self.hparams.target_metric == "srmr": + self.target_metric = MetricStats( + metric=srmrpy_eval, + n_jobs=hparams["n_jobs"], + batch_eval=False, + ) + elif self.hparams.target_metric == "dnsmos": + self.target_metric = MetricStats( + metric=dnsmos_eval, + n_jobs=hparams["n_jobs"], + batch_eval=False, + ) + else: + raise NotImplementedError( + "Right now we only support 'srmr' and 'dnsmos'" + ) + + # Train discriminator before we start generator training + if self.sub_stage == SubStage.GENERATOR: + self.epoch = epoch + self.train_discriminator() + self.sub_stage = SubStage.GENERATOR + print("Generator training by current data...") + + if stage != sb.Stage.TRAIN: + self.pesq_metric = MetricStats( + metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False + ) + self.stoi_metric = MetricStats(metric=stoi_loss) + self.srmr_metric = MetricStats( + metric=srmrpy_eval_valid, + n_jobs=hparams["n_jobs"], + batch_eval=False, + ) + self.dnsmos_metric = MetricStats( + metric=dnsmos_eval_valid, + n_jobs=hparams["n_jobs"], + batch_eval=False, + ) + + def train_discriminator(self): + """A total of 3 data passes to update discriminator.""" + # First, iterate train subset w/ updates for enh, noisy + print("Discriminator training by current data...") + self.sub_stage = SubStage.CURRENT + self.fit( + range(1), + self.train_set, + train_loader_kwargs=self.hparams.dataloader_options, + ) + + # Next, iterate historical subset w/ updates for enh + if self.historical_set: + print("Discriminator training by historical data...") + self.sub_stage = SubStage.HISTORICAL + self.fit( + range(1), + self.historical_set, + train_loader_kwargs=self.hparams.dataloader_options, + ) + + # Finally, iterate train set again. Should iterate same + # samples as before, due to ReproducibleRandomSampler + print("Discriminator training by current data again...") + self.sub_stage = SubStage.CURRENT + self.fit( + range(1), + self.train_set, + train_loader_kwargs=self.hparams.dataloader_options, + ) + + def on_stage_end(self, stage, stage_loss, epoch=None): + "Called at the end of each stage to summarize progress" + if self.sub_stage != SubStage.GENERATOR: + return + + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + g_loss = torch.tensor(self.metrics["G"]) # batch_size + d_loss = torch.tensor(self.metrics["D"]) # batch_size + print("Avg G loss: %.3f" % torch.mean(g_loss)) + print("Avg D loss: %.3f" % torch.mean(d_loss)) + else: + if self.hparams.calculate_dnsmos_on_validation_set: + stats = { + "SI-SNR": -stage_loss, + "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, + "stoi": -self.stoi_metric.summarize("average"), + "dnsmos": self.dnsmos_metric.summarize("average"), + } + else: + stats = { + "SI-SNR": -stage_loss, + "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, + "stoi": -self.stoi_metric.summarize("average"), + } + + if stage == sb.Stage.VALID: + old_lr, new_lr = self.hparams.lr_annealing(5.0 - stats["pesq"]) + sb.nnet.schedulers.update_learning_rate(self.g_optimizer, new_lr) + + if self.hparams.use_tensorboard: + if ( + self.hparams.calculate_dnsmos_on_validation_set + ): # Note: very time consuming........ + valid_stats = { + "SI-SNR": -stage_loss, + "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, + "stoi": -self.stoi_metric.summarize("average"), + "dnsmos": self.dnsmos_metric.summarize("average"), + } + else: + valid_stats = { + "SI-SNR": -stage_loss, + "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, + "stoi": -self.stoi_metric.summarize("average"), + } + + self.hparams.tensorboard_train_logger.log_stats( + {"lr": old_lr}, valid_stats + ) + self.hparams.train_logger.log_stats( + {"Epoch": epoch, "lr": old_lr}, + train_stats={"loss": self.train_loss}, + valid_stats=stats, + ) + self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"]) + + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stats, + ) + + def make_dataloader( + self, dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs + ): + "Override dataloader to insert custom sampler/dataset" + if stage == sb.Stage.TRAIN: + # Create a new dataset each time, this set grows + if self.sub_stage == SubStage.HISTORICAL: + dataset = sb.dataio.dataset.DynamicItemDataset( + data=dataset, + dynamic_items=[enh_pipeline], + output_keys=["id", "enh_sig", "score"], + ) + samples = round(len(dataset) * self.hparams.history_portion) + samples = max(samples, 1) # Ensure there's at least one sample + else: + samples = self.hparams.number_of_samples + + # This sampler should give the same samples for D and G + epoch = self.hparams.epoch_counter.current + + # Equal weights for all samples, we use "Weighted" so we can do + # both "replacement=False" and a set number of samples, reproducibly + weights = torch.ones(len(dataset)) + replacement = samples > len(dataset) + sampler = ReproducibleWeightedRandomSampler( + weights, + epoch=epoch, + replacement=replacement, + num_samples=samples, + ) + loader_kwargs["sampler"] = sampler + + if self.sub_stage == SubStage.GENERATOR: + self.train_sampler = sampler + + # Make the dataloader as normal + return super().make_dataloader( + dataset, stage, ckpt_prefix, **loader_kwargs + ) + + def on_fit_start(self): + "Override to prevent this from running for D training" + if self.sub_stage == SubStage.GENERATOR: + super().on_fit_start() + + def init_optimizers(self): + "Initializes the generator and discriminator optimizers" + self.g_optimizer = self.hparams.g_opt_class( + self.modules.generator.parameters() + ) + self.d_optimizer = self.hparams.d_opt_class( + self.modules.discriminator.parameters() + ) + + if self.checkpointer is not None: + self.checkpointer.add_recoverable("g_opt", self.g_optimizer) + self.checkpointer.add_recoverable("d_opt", self.d_optimizer) + + def zero_grad(self, set_to_none=False): + self.g_optimizer.zero_grad(set_to_none) + self.d_optimizer.zero_grad(set_to_none) + + +# Define audio pipelines for training set +@sb.utils.data_pipeline.takes("noisy_wav") +@sb.utils.data_pipeline.provides("noisy_sig") +def audio_pipeline_train(noisy_wav): + yield sb.dataio.dataio.read_audio(noisy_wav) + + +# Define audio pipelines for validation/test set +@sb.utils.data_pipeline.takes("noisy_wav", "clean_wav") +@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig") +def audio_pipeline_valid(noisy_wav, clean_wav): + yield sb.dataio.dataio.read_audio(noisy_wav) + yield sb.dataio.dataio.read_audio(clean_wav) + + +# For historical data +@sb.utils.data_pipeline.takes("enh_wav") +@sb.utils.data_pipeline.provides("enh_sig") +def enh_pipeline(enh_wav): + yield sb.dataio.dataio.read_audio(enh_wav) + + +def dataio_prep(hparams): + """This function prepares the datasets to be used in the brain class.""" + + # Define datasets + datasets = {} + + datasets["train"] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams["train_annotation"], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline_train], + output_keys=["id", "noisy_sig"], + ) + datasets["valid"] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams["valid_annotation"], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline_valid], + output_keys=["id", "noisy_sig", "clean_sig"], + ) + + datasets["test"] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams["test_annotation"], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline_valid], + output_keys=["id", "noisy_sig", "clean_sig"], + ) + + return datasets + + +def create_folder(folder): + if not os.path.isdir(folder): + os.makedirs(folder) + + +# Recipe begins! +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Data preparation + from voicebank_prepare import prepare_voicebank # noqa + + run_on_main( + prepare_voicebank, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["data_folder"], + "skip_prep": hparams["skip_prep"], + }, + ) + + # Create dataset objects + datasets = dataio_prep(hparams) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs"] + ) + + # Create the folder to save enhanced files (+ support for DDP) + run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]}) + + se_brain = MetricGanBrain( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + se_brain.train_set = datasets["train"] + se_brain.historical_set = {} + se_brain.noisy_scores = {} + se_brain.batch_size = hparams["dataloader_options"]["batch_size"] + se_brain.sub_stage = SubStage.GENERATOR + + if not os.path.isfile(hparams["historical_file"]): + shutil.rmtree(hparams["MetricGAN_folder"]) + run_on_main(create_folder, kwargs={"folder": hparams["MetricGAN_folder"]}) + + se_brain.load_history() + # Load latest checkpoint to resume training + se_brain.fit( + epoch_counter=se_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["valid_dataloader_options"], + ) + + # Load best checkpoint for evaluation + test_stats = se_brain.evaluate( + test_set=datasets["test"], + max_key="pesq", + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/Voicebank/enhance/MetricGAN-U/voicebank_prepare.py b/recipes/Voicebank/enhance/MetricGAN-U/voicebank_prepare.py index d2b489cc6d..9bf617a654 100644 --- a/recipes/Voicebank/enhance/MetricGAN-U/voicebank_prepare.py +++ b/recipes/Voicebank/enhance/MetricGAN-U/voicebank_prepare.py @@ -1,476 +1,498 @@ -# -*- coding: utf-8 -*- -""" -Data preparation. - -Download and resample, use ``download_vctk`` below. -https://datashare.is.ed.ac.uk/handle/10283/2791 - -Authors: - * Szu-Wei Fu, 2020 - * Peter Plantinga, 2020 -""" - -import os -import json -import string -import urllib -import shutil -import logging -import tempfile -import torchaudio -from torchaudio.transforms import Resample -from speechbrain.utils.data_utils import get_all_files, download_file -from speechbrain.dataio.dataio import read_audio - -logger = logging.getLogger(__name__) -LEXICON_URL = "http://www.openslr.org/resources/11/librispeech-lexicon.txt" -TRAIN_JSON = "train.json" -TEST_JSON = "test.json" -VALID_JSON = "valid.json" -SAMPLERATE = 16000 -TRAIN_SPEAKERS = [ - "p226", - "p287", - "p227", - "p228", - "p230", - "p231", - "p233", - "p236", - "p239", - "p243", - "p244", - "p250", - "p254", - "p256", - "p258", - "p259", - "p267", - "p268", - "p269", - "p270", - "p273", - "p274", - "p276", - "p277", - "p278", - "p279", - "p282", - "p286", -] -# Lexicon missing entries -MISSING_LEXICON = { - "CRUCIALLY": "K R UW SH AH L IY", - "PAEDOPHILES": "P EH D OW F AY L S", - "MR": "M IH S T ER", - "BBC": "B IY B IY S IY", - "EUPHORIC": "Y UW F AO R IH K", - "RACISM": "R EY S IH S M", - "MP": "EH M P IY", - "RESTRUCTURING": "R IY S T R AH K CH ER IH NG", - "OSAMA": "OW S AH M AH", - "GUITARIST": "G IH T AA R IH S T", - "BLUESHE": "B L UW SH IY", - "FLANKER": "F L AY N K ER", - "SADDAM": "S AA D AA M", - "COVERUP": "K UH V ER UH P", - "FBI": "EH F B IY AY", - "PREEMPTIVE": "P R IY EH M P T IH V", - "FOURYEAR": "F AO R Y IY R", - "XRAY": "EH K S R AY", - "TALIBAN": "T AE L IH B AA N", - "SUPERIMPOSITION": "S UW P ER IH M P OW S IH SH AH N", - "GUIDELINES": "G AY D L AY N S", - "FINALISED": "F AY N AH L AY Z D", - "HALFTIME": "H AE F T AY M", - "WINGERS": "W IH NG ER Z", - "GM": "J IY EH M", - "MCGREGOR": "M AH K G R EH G AO R", - "TWODAY": "T UW D EY", - "DATABASE": "D EY T AH B EY S", - "TELECOM": "T EH L AH K AO M", - "SHORTTERM": "SH AO R T ER M", - "SHORTFALL": "SH AO R T F AH L", - "MCCALL": "M AH K AH L", - "HEADTEACHER": "H EH D T IY CH ER", - "TAKEOVER": "T EY K OW V ER", - "ONETHIRD": "W AH N TH ER D", - "TV": "T IY V IY", - "SCREENPLAY": "S K R IY N P L EY", - "YUGOSLAV": "Y UW G OW S L AA V", - "HIBS": "HH IH B Z", - "DISPOSALS": "D IH S P OW S AH L Z", - "MODERNISATION": "M AA D ER N AH Z EY SH AH N", - "REALLIFE": "R IY L AY F", - "ONEYEAR": "W AH N Y IY R", - "GRASSROOTS": "G R AE S R UW T S", - "ARNIE": "AH R N IY", - "PARTTIME": "P AH R T AY M", - "SHORTLIST": "SH AO R T L IH S T", - "OUTPERFORMED": "OW T P ER F AO R M D", - "LONGTERM": "L AO NG T ER M", - "DAYTODAY": "D EY T UW D EY", - "MCPHERSON": "M AH K F ER S AH N", - "OUTSOURCING": "OW T S AO R S IH NG", - "FULLSCALE": "F UH L S K EY L", - "SERGIO": "S ER J IY OW", - "HENMAN": "HH EH N M AA N", - "MCLEOD": "M AH K L IY AO D", - "TIMESCALE": "T AY M S K EY L", - "REFURBISHMENT": "R IY F UH R B IH SH M AH N T", - "LINEUP": "L AY N UH P", - "DOWNBEAT": "D OW N B IY T", - "MANDELA": "M AE N D EH L AH", - "UNDERAGE": "UH N D ER EY J", - "MCNAUGHTON": "M AH K N AW T AH N", - "MICKELSON": "M IH K L S AH N", - "THREEQUARTERS": "TH R IY K AO R T ER Z", - "WEBSITE": "W EH B S AY T", - "BLUEITS": "B L UW IH T S", - "CEASEFIRE": "S IY S F AY R", - "FULLTIME": "F UH L T AY M", - "DOCHERTY": "D AH K ER T IY", - "RUNNERUP": "R UH N ER AH P", - "DOWNTURN": "D OW N T ER N", - "EUROS": "Y ER OW S", - "FOOTANDMOUTH": "F UH T AE N D M OW TH", - "HIGHLIGHTED": "HH AY L AY T AH D", - "MIDFIELD": "M IH D F IY L D", - "MCKENZIE": "M AH K EH N Z IY", - "BENCHMARK": "B EH N CH M AA R K", - "MCCONNELL": "M AH K AW N EH L", - "UPGRADING": "UH P G R EY D IH NG", - "BLUNKETT": "B L UH N K AH T", - "RETHINK": "R IY TH IH N K", - "UPBEAT": "AH P B IY T", - "TELECOMS": "T EH L AH K AO M Z", - "APARTHEID": "AH P AH R T HH AY D", - "AIRDRIE": "EY R D R IY", - "RETHINK": "R IY TH IH N K", - "HELPLINE": "HH EH L P L AY N", - "CLEARCUT": "K L IY R K UH T", -} - - -def prepare_voicebank( - data_folder, save_folder, valid_speaker_count=2, skip_prep=False -): - """ - Prepares the json files for the Voicebank dataset. - - Expects the data folder to be the same format as the output of - ``download_vctk()`` below. - - Arguments - --------- - data_folder : str - Path to the folder where the original Voicebank dataset is stored. - save_folder : str - The directory where to store the json files. - valid_speaker_count : int - The number of validation speakers to use (out of 28 in train set). - skip_prep: bool - If True, skip data preparation. - - Example - ------- - >>> data_folder = '/path/to/datasets/Voicebank' - >>> save_folder = 'exp/Voicebank_exp' - >>> prepare_voicebank(data_folder, save_folder) - """ - - if skip_prep: - return - - # Setting ouput files - save_json_train = os.path.join(save_folder, TRAIN_JSON) - save_json_valid = os.path.join(save_folder, VALID_JSON) - save_json_test = os.path.join(save_folder, TEST_JSON) - - # Check if this phase is already done (if so, skip it) - if skip(save_json_train, save_json_test, save_json_valid): - logger.info("Preparation completed in previous run, skipping.") - return - - train_clean_folder = os.path.join( - data_folder, "clean_trainset_28spk_wav_16k" - ) - train_noisy_folder = os.path.join( - data_folder, "noisy_trainset_28spk_wav_16k" - ) - train_txts = os.path.join(data_folder, "trainset_28spk_txt") - test_clean_folder = os.path.join(data_folder, "clean_testset_wav_16k") - test_noisy_folder = os.path.join(data_folder, "noisy_testset_wav_16k") - test_txts = os.path.join(data_folder, "testset_txt") - - # Setting the save folder - if not os.path.exists(save_folder): - os.makedirs(save_folder) - - # Additional checks to make sure the data folder contains Voicebank - check_voicebank_folders( - train_clean_folder, - train_noisy_folder, - train_txts, - test_clean_folder, - test_noisy_folder, - test_txts, - ) - - logger.debug("Creating lexicon...") - lexicon = create_lexicon(os.path.join(data_folder, "lexicon.txt")) - logger.info("Creating json files for noisy VoiceBank...") - - logger.debug("Collecting files...") - extension = [".wav"] - valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count] - wav_lst_train = get_all_files( - train_noisy_folder, match_and=extension, exclude_or=valid_speakers, - ) - wav_lst_valid = get_all_files( - train_noisy_folder, match_and=extension, match_or=valid_speakers, - ) - wav_lst_test = get_all_files(test_noisy_folder, match_and=extension) - - logger.debug("Creating json files for noisy VoiceBank...") - create_json( - wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon - ) - create_json( - wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon - ) - create_json( - wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon - ) - - -def skip(*filenames): - """ - Detects if the Voicebank data_preparation has been already done. - If the preparation has been done, we can skip it. - - Returns - ------- - bool - if True, the preparation phase can be skipped. - if False, it must be done. - """ - for filename in filenames: - if not os.path.isfile(filename): - return False - return True - - -def remove_punctuation(a_string): - """Remove all punctuation from string""" - return a_string.translate(str.maketrans("", "", string.punctuation)) - - -def create_lexicon(lexicon_save_filepath): - """ - Creates the lexicon object, downloading if it hasn't been done yet. - - Arguments - --------- - lexicon_save_filepath : str - Path to save the lexicon when downloading - """ - if not os.path.isfile(lexicon_save_filepath): - download_file(LEXICON_URL, lexicon_save_filepath) - - # Iterate lexicon file and add the first pronunciation in the file for - # each word to our lexicon dictionary - lexicon = MISSING_LEXICON - delayed_words = {} - for line in open(lexicon_save_filepath): - line = line.split() - phns = " ".join(p.strip("012") for p in line[1:]) - - # Don't add words with punctuation until we can be sure they won't - # overwrite words without punctuation. - clean_word = remove_punctuation(line[0]) - if clean_word != line[0] and clean_word not in delayed_words: - delayed_words[clean_word] = phns - elif clean_word == line[0] and clean_word not in lexicon: - lexicon[clean_word] = phns - - # Add words with punctuation if they won't overwrite non-punctuated words - for word, phns in delayed_words.items(): - if word not in lexicon: - lexicon[word] = phns - - return lexicon - - -def create_json(wav_lst, json_file, clean_folder, txt_folder, lexicon): - """ - Creates the json file given a list of wav files. - - Arguments - --------- - wav_lst : list - The list of wav files. - json_file : str - The path of the output json file - clean_folder : str - The location of parallel clean samples. - txt_folder : str - The location of the transcript files. - """ - logger.debug(f"Creating json lists in {json_file}") - - # Processing all the wav files in the list - json_dict = {} - for wav_file in wav_lst: # ex:p203_122.wav - - # Example wav_file: p232_001.wav - noisy_path, filename = os.path.split(wav_file) - _, noisy_dir = os.path.split(noisy_path) - _, clean_dir = os.path.split(clean_folder) - noisy_rel_path = os.path.join("{data_root}", noisy_dir, filename) - clean_rel_path = os.path.join("{data_root}", clean_dir, filename) - - # Reading the signal (to retrieve duration in seconds) - signal = read_audio(wav_file) - duration = signal.shape[0] / SAMPLERATE - - # Read text - snt_id = filename.replace(".wav", "") - with open(os.path.join(txt_folder, snt_id + ".txt")) as f: - word_string = f.read() - word_string = remove_punctuation(word_string).strip().upper() - phones = [ - phn for word in word_string.split() for phn in lexicon[word].split() - ] - - # Remove duplicate phones - phones = [i for i, j in zip(phones, phones[1:] + [None]) if i != j] - phone_string = " ".join(phones) - - json_dict[snt_id] = { - "noisy_wav": noisy_rel_path, - "clean_wav": clean_rel_path, - "length": duration, - "words": word_string, - "phones": phone_string, - } - - # Writing the json lines - with open(json_file, mode="w") as json_f: - json.dump(json_dict, json_f, indent=2) - - logger.info(f"{json_file} successfully created!") - - -def check_voicebank_folders(*folders): - """Raises FileNotFoundError if any passed folder does not exist.""" - for folder in folders: - if not os.path.exists(folder): - raise FileNotFoundError( - f"the folder {folder} does not exist (it is expected in " - "the Voicebank dataset)" - ) - - -def download_vctk(destination, tmp_dir=None, device="cpu"): - """Download dataset and perform resample to 16000 Hz. - - Arguments - --------- - destination : str - Place to put final zipped dataset. - tmp_dir : str - Location to store temporary files. Will use `tempfile` if not provided. - device : str - Passed directly to pytorch's ``.to()`` method. Used for resampling. - """ - dataset_name = "noisy-vctk-16k" - if tmp_dir is None: - tmp_dir = tempfile.gettempdir() - final_dir = os.path.join(tmp_dir, dataset_name) - - if not os.path.isdir(tmp_dir): - os.mkdir(tmp_dir) - - if not os.path.isdir(final_dir): - os.mkdir(final_dir) - - prefix = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/2791/" - noisy_vctk_urls = [ - prefix + "clean_testset_wav.zip", - prefix + "noisy_testset_wav.zip", - prefix + "testset_txt.zip", - prefix + "clean_trainset_28spk_wav.zip", - prefix + "noisy_trainset_28spk_wav.zip", - prefix + "trainset_28spk_txt.zip", - ] - - zip_files = [] - for url in noisy_vctk_urls: - filename = os.path.join(tmp_dir, url.split("/")[-1]) - zip_files.append(filename) - if not os.path.isfile(filename): - logger.info("Downloading " + url) - with urllib.request.urlopen(url) as response: - with open(filename, "wb") as tmp_file: - logger.info("... to " + tmp_file.name) - shutil.copyfileobj(response, tmp_file) - - # Unzip - for zip_file in zip_files: - logger.info("Unzipping " + zip_file) - shutil.unpack_archive(zip_file, tmp_dir, "zip") - os.remove(zip_file) - - # Move transcripts to final dir - shutil.move(os.path.join(tmp_dir, "testset_txt"), final_dir) - shutil.move(os.path.join(tmp_dir, "trainset_28spk_txt"), final_dir) - - # Downsample - dirs = [ - "noisy_testset_wav", - "clean_testset_wav", - "noisy_trainset_28spk_wav", - "clean_trainset_28spk_wav", - ] - - downsampler = Resample(orig_freq=48000, new_freq=16000) - - for directory in dirs: - logger.info("Resampling " + directory) - dirname = os.path.join(tmp_dir, directory) - - # Make directory to store downsampled files - dirname_16k = os.path.join(final_dir, directory + "_16k") - if not os.path.isdir(dirname_16k): - os.mkdir(dirname_16k) - - # Load files and downsample - for filename in get_all_files(dirname, match_and=[".wav"]): - signal, rate = torchaudio.load(filename) - downsampled_signal = downsampler(signal.view(1, -1).to(device)) - - # Save downsampled file - torchaudio.save( - os.path.join(dirname_16k, filename[-12:]), - downsampled_signal[0].cpu(), - sample_rate=16000, - channels_first=False, - ) - - # Remove old file - os.remove(filename) - - # Remove old directory - os.rmdir(dirname) - - logger.info("Zipping " + final_dir) - final_zip = shutil.make_archive( - base_name=final_dir, - format="zip", - root_dir=os.path.dirname(final_dir), - base_dir=os.path.basename(final_dir), - ) - - logger.info(f"Moving {final_zip} to {destination}") - shutil.move(final_zip, os.path.join(destination, dataset_name + ".zip")) +""" +Data preparation. + +Download and resample, use ``download_vctk`` below. +https://datashare.is.ed.ac.uk/handle/10283/2791 + +Authors: + * Szu-Wei Fu, 2020 + * Peter Plantinga, 2020 +""" + +import json +import os +import shutil +import string +import tempfile +import urllib + +from torchaudio.transforms import Resample + +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.data_utils import download_file, get_all_files +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +LEXICON_URL = "http://www.openslr.org/resources/11/librispeech-lexicon.txt" +TRAIN_JSON = "train.json" +TEST_JSON = "test.json" +VALID_JSON = "valid.json" +SAMPLERATE = 16000 +TRAIN_SPEAKERS = [ + "p226", + "p287", + "p227", + "p228", + "p230", + "p231", + "p233", + "p236", + "p239", + "p243", + "p244", + "p250", + "p254", + "p256", + "p258", + "p259", + "p267", + "p268", + "p269", + "p270", + "p273", + "p274", + "p276", + "p277", + "p278", + "p279", + "p282", + "p286", +] +# Lexicon missing entries +MISSING_LEXICON = { + "CRUCIALLY": "K R UW SH AH L IY", + "PAEDOPHILES": "P EH D OW F AY L S", + "MR": "M IH S T ER", + "BBC": "B IY B IY S IY", + "EUPHORIC": "Y UW F AO R IH K", + "RACISM": "R EY S IH S M", + "MP": "EH M P IY", + "RESTRUCTURING": "R IY S T R AH K CH ER IH NG", + "OSAMA": "OW S AH M AH", + "GUITARIST": "G IH T AA R IH S T", + "BLUESHE": "B L UW SH IY", + "FLANKER": "F L AY N K ER", + "SADDAM": "S AA D AA M", + "COVERUP": "K UH V ER UH P", + "FBI": "EH F B IY AY", + "PREEMPTIVE": "P R IY EH M P T IH V", + "FOURYEAR": "F AO R Y IY R", + "XRAY": "EH K S R AY", + "TALIBAN": "T AE L IH B AA N", + "SUPERIMPOSITION": "S UW P ER IH M P OW S IH SH AH N", + "GUIDELINES": "G AY D L AY N S", + "FINALISED": "F AY N AH L AY Z D", + "HALFTIME": "H AE F T AY M", + "WINGERS": "W IH NG ER Z", + "GM": "J IY EH M", + "MCGREGOR": "M AH K G R EH G AO R", + "TWODAY": "T UW D EY", + "DATABASE": "D EY T AH B EY S", + "TELECOM": "T EH L AH K AO M", + "SHORTTERM": "SH AO R T ER M", + "SHORTFALL": "SH AO R T F AH L", + "MCCALL": "M AH K AH L", + "HEADTEACHER": "H EH D T IY CH ER", + "TAKEOVER": "T EY K OW V ER", + "ONETHIRD": "W AH N TH ER D", + "TV": "T IY V IY", + "SCREENPLAY": "S K R IY N P L EY", + "YUGOSLAV": "Y UW G OW S L AA V", + "HIBS": "HH IH B Z", + "DISPOSALS": "D IH S P OW S AH L Z", + "MODERNISATION": "M AA D ER N AH Z EY SH AH N", + "REALLIFE": "R IY L AY F", + "ONEYEAR": "W AH N Y IY R", + "GRASSROOTS": "G R AE S R UW T S", + "ARNIE": "AH R N IY", + "PARTTIME": "P AH R T AY M", + "SHORTLIST": "SH AO R T L IH S T", + "OUTPERFORMED": "OW T P ER F AO R M D", + "LONGTERM": "L AO NG T ER M", + "DAYTODAY": "D EY T UW D EY", + "MCPHERSON": "M AH K F ER S AH N", + "OUTSOURCING": "OW T S AO R S IH NG", + "FULLSCALE": "F UH L S K EY L", + "SERGIO": "S ER J IY OW", + "HENMAN": "HH EH N M AA N", + "MCLEOD": "M AH K L IY AO D", + "TIMESCALE": "T AY M S K EY L", + "REFURBISHMENT": "R IY F UH R B IH SH M AH N T", + "LINEUP": "L AY N UH P", + "DOWNBEAT": "D OW N B IY T", + "MANDELA": "M AE N D EH L AH", + "UNDERAGE": "UH N D ER EY J", + "MCNAUGHTON": "M AH K N AW T AH N", + "MICKELSON": "M IH K L S AH N", + "THREEQUARTERS": "TH R IY K AO R T ER Z", + "WEBSITE": "W EH B S AY T", + "BLUEITS": "B L UW IH T S", + "CEASEFIRE": "S IY S F AY R", + "FULLTIME": "F UH L T AY M", + "DOCHERTY": "D AH K ER T IY", + "RUNNERUP": "R UH N ER AH P", + "DOWNTURN": "D OW N T ER N", + "EUROS": "Y ER OW S", + "FOOTANDMOUTH": "F UH T AE N D M OW TH", + "HIGHLIGHTED": "HH AY L AY T AH D", + "MIDFIELD": "M IH D F IY L D", + "MCKENZIE": "M AH K EH N Z IY", + "BENCHMARK": "B EH N CH M AA R K", + "MCCONNELL": "M AH K AW N EH L", + "UPGRADING": "UH P G R EY D IH NG", + "BLUNKETT": "B L UH N K AH T", + "RETHINK": "R IY TH IH N K", + "UPBEAT": "AH P B IY T", + "TELECOMS": "T EH L AH K AO M Z", + "APARTHEID": "AH P AH R T HH AY D", + "AIRDRIE": "EY R D R IY", + "RETHINK": "R IY TH IH N K", + "HELPLINE": "HH EH L P L AY N", + "CLEARCUT": "K L IY R K UH T", +} + + +def prepare_voicebank( + data_folder, save_folder, valid_speaker_count=2, skip_prep=False +): + """ + Prepares the json files for the Voicebank dataset. + + Expects the data folder to be the same format as the output of + ``download_vctk()`` below. + + Arguments + --------- + data_folder : str + Path to the folder where the original Voicebank dataset is stored. + save_folder : str + The directory where to store the json files. + valid_speaker_count : int + The number of validation speakers to use (out of 28 in train set). + skip_prep: bool + If True, skip data preparation. + + Returns + ------- + None + + Example + ------- + >>> data_folder = "/path/to/datasets/Voicebank" + >>> save_folder = "exp/Voicebank_exp" + >>> prepare_voicebank(data_folder, save_folder) + """ + + if skip_prep: + return + + # Setting output files + save_json_train = os.path.join(save_folder, TRAIN_JSON) + save_json_valid = os.path.join(save_folder, VALID_JSON) + save_json_test = os.path.join(save_folder, TEST_JSON) + + # Check if this phase is already done (if so, skip it) + if skip(save_json_train, save_json_test, save_json_valid): + logger.info("Preparation completed in previous run, skipping.") + return + + train_clean_folder = os.path.join( + data_folder, "clean_trainset_28spk_wav_16k" + ) + train_noisy_folder = os.path.join( + data_folder, "noisy_trainset_28spk_wav_16k" + ) + train_txts = os.path.join(data_folder, "trainset_28spk_txt") + test_clean_folder = os.path.join(data_folder, "clean_testset_wav_16k") + test_noisy_folder = os.path.join(data_folder, "noisy_testset_wav_16k") + test_txts = os.path.join(data_folder, "testset_txt") + + # Setting the save folder + if not os.path.exists(save_folder): + os.makedirs(save_folder) + + # Additional checks to make sure the data folder contains Voicebank + check_voicebank_folders( + train_clean_folder, + train_noisy_folder, + train_txts, + test_clean_folder, + test_noisy_folder, + test_txts, + ) + + logger.debug("Creating lexicon...") + lexicon = create_lexicon(os.path.join(data_folder, "lexicon.txt")) + logger.info("Creating json files for noisy VoiceBank...") + + logger.debug("Collecting files...") + extension = [".wav"] + valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count] + wav_lst_train = get_all_files( + train_noisy_folder, + match_and=extension, + exclude_or=valid_speakers, + ) + wav_lst_valid = get_all_files( + train_noisy_folder, + match_and=extension, + match_or=valid_speakers, + ) + wav_lst_test = get_all_files(test_noisy_folder, match_and=extension) + + logger.debug("Creating json files for noisy VoiceBank...") + create_json( + wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon + ) + create_json( + wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon + ) + create_json( + wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon + ) + + +def skip(*filenames): + """ + Detects if the Voicebank data_preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + *filenames : dict + List of paths to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + for filename in filenames: + if not os.path.isfile(filename): + return False + return True + + +def remove_punctuation(a_string): + """Remove all punctuation from string""" + return a_string.translate(str.maketrans("", "", string.punctuation)) + + +def create_lexicon(lexicon_save_filepath): + """ + Creates the lexicon object, downloading if it hasn't been done yet. + + Arguments + --------- + lexicon_save_filepath : str + Path to save the lexicon when downloading + + Returns + ------- + lexicon : dict + Mapping from word strings to list of phonemes. + """ + if not os.path.isfile(lexicon_save_filepath): + download_file(LEXICON_URL, lexicon_save_filepath) + + # Iterate lexicon file and add the first pronunciation in the file for + # each word to our lexicon dictionary + lexicon = MISSING_LEXICON + delayed_words = {} + for line in open(lexicon_save_filepath, encoding="utf-8"): + line = line.split() + phns = " ".join(p.strip("012") for p in line[1:]) + + # Don't add words with punctuation until we can be sure they won't + # overwrite words without punctuation. + clean_word = remove_punctuation(line[0]) + if clean_word != line[0] and clean_word not in delayed_words: + delayed_words[clean_word] = phns + elif clean_word == line[0] and clean_word not in lexicon: + lexicon[clean_word] = phns + + # Add words with punctuation if they won't overwrite non-punctuated words + for word, phns in delayed_words.items(): + if word not in lexicon: + lexicon[word] = phns + + return lexicon + + +def create_json(wav_lst, json_file, clean_folder, txt_folder, lexicon): + """ + Creates the json file given a list of wav files. + + Arguments + --------- + wav_lst : list + The list of wav files. + json_file : str + The path of the output json file + clean_folder : str + The location of parallel clean samples. + txt_folder : str + The location of the transcript files. + lexicon : dict + Mapping from word strings to list of phonemes. + """ + logger.debug(f"Creating json lists in {json_file}") + + # Processing all the wav files in the list + json_dict = {} + for wav_file in wav_lst: # ex:p203_122.wav + # Example wav_file: p232_001.wav + noisy_path, filename = os.path.split(wav_file) + _, noisy_dir = os.path.split(noisy_path) + _, clean_dir = os.path.split(clean_folder) + noisy_rel_path = os.path.join("{data_root}", noisy_dir, filename) + clean_rel_path = os.path.join("{data_root}", clean_dir, filename) + + # Reading the signal (to retrieve duration in seconds) + signal = read_audio(wav_file) + duration = signal.shape[0] / SAMPLERATE + + # Read text + snt_id = filename.replace(".wav", "") + with open( + os.path.join(txt_folder, snt_id + ".txt"), encoding="utf-8" + ) as f: + word_string = f.read() + word_string = remove_punctuation(word_string).strip().upper() + phones = [ + phn for word in word_string.split() for phn in lexicon[word].split() + ] + + # Remove duplicate phones + phones = [i for i, j in zip(phones, phones[1:] + [None]) if i != j] + phone_string = " ".join(phones) + + json_dict[snt_id] = { + "noisy_wav": noisy_rel_path, + "clean_wav": clean_rel_path, + "length": duration, + "words": word_string, + "phones": phone_string, + } + + # Writing the json lines + with open(json_file, mode="w", encoding="utf-8") as json_f: + json.dump(json_dict, json_f, indent=2) + + logger.info(f"{json_file} successfully created!") + + +def check_voicebank_folders(*folders): + """Raises FileNotFoundError if any passed folder does not exist.""" + for folder in folders: + if not os.path.exists(folder): + raise FileNotFoundError( + f"the folder {folder} does not exist (it is expected in " + "the Voicebank dataset)" + ) + + +def download_vctk(destination, tmp_dir=None, device="cpu"): + """Download dataset and perform resample to 16000 Hz. + + Arguments + --------- + destination : str + Place to put final zipped dataset. + tmp_dir : str + Location to store temporary files. Will use `tempfile` if not provided. + device : str + Passed directly to pytorch's ``.to()`` method. Used for resampling. + """ + dataset_name = "noisy-vctk-16k" + if tmp_dir is None: + tmp_dir = tempfile.gettempdir() + final_dir = os.path.join(tmp_dir, dataset_name) + + if not os.path.isdir(tmp_dir): + os.mkdir(tmp_dir) + + if not os.path.isdir(final_dir): + os.mkdir(final_dir) + + prefix = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/2791/" + noisy_vctk_urls = [ + prefix + "clean_testset_wav.zip", + prefix + "noisy_testset_wav.zip", + prefix + "testset_txt.zip", + prefix + "clean_trainset_28spk_wav.zip", + prefix + "noisy_trainset_28spk_wav.zip", + prefix + "trainset_28spk_txt.zip", + ] + + zip_files = [] + for url in noisy_vctk_urls: + filename = os.path.join(tmp_dir, url.split("/")[-1]) + zip_files.append(filename) + if not os.path.isfile(filename): + logger.info("Downloading " + url) + with urllib.request.urlopen(url) as response: + with open(filename, "wb") as tmp_file: + logger.info("... to " + tmp_file.name) + shutil.copyfileobj(response, tmp_file) + + # Unzip + for zip_file in zip_files: + logger.info("Unzipping " + zip_file) + shutil.unpack_archive(zip_file, tmp_dir, "zip") + os.remove(zip_file) + + # Move transcripts to final dir + shutil.move(os.path.join(tmp_dir, "testset_txt"), final_dir) + shutil.move(os.path.join(tmp_dir, "trainset_28spk_txt"), final_dir) + + # Downsample + dirs = [ + "noisy_testset_wav", + "clean_testset_wav", + "noisy_trainset_28spk_wav", + "clean_trainset_28spk_wav", + ] + + downsampler = Resample(orig_freq=48000, new_freq=16000) + + for directory in dirs: + logger.info("Resampling " + directory) + dirname = os.path.join(tmp_dir, directory) + + # Make directory to store downsampled files + dirname_16k = os.path.join(final_dir, directory + "_16k") + if not os.path.isdir(dirname_16k): + os.mkdir(dirname_16k) + + # Load files and downsample + for filename in get_all_files(dirname, match_and=[".wav"]): + signal, rate = audio_io.load(filename) + downsampled_signal = downsampler(signal.view(1, -1).to(device)) + + # Save downsampled file + audio_io.save( + os.path.join(dirname_16k, filename[-12:]), + downsampled_signal[0].cpu(), + sample_rate=16000, + channels_first=False, + ) + + # Remove old file + os.remove(filename) + + # Remove old directory + os.rmdir(dirname) + + logger.info("Zipping " + final_dir) + final_zip = shutil.make_archive( + base_name=final_dir, + format="zip", + root_dir=os.path.dirname(final_dir), + base_dir=os.path.basename(final_dir), + ) + + logger.info(f"Moving {final_zip} to {destination}") + shutil.move(final_zip, os.path.join(destination, dataset_name + ".zip")) diff --git a/recipes/Voicebank/enhance/MetricGAN/README.md b/recipes/Voicebank/enhance/MetricGAN/README.md index 1faed677fd..6385e7c4aa 100644 --- a/recipes/Voicebank/enhance/MetricGAN/README.md +++ b/recipes/Voicebank/enhance/MetricGAN/README.md @@ -6,7 +6,18 @@ This recipe implements MetricGAN+ recipe for enhancement as described in the pap **Web Demo** Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See demo Speech Enhancement: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/Speechbrain-Speech-enhancement) Use the `download_vctk` function in `voicebank_prepare.py` to download the dataset -and resample it to 16000 Hz. To run an experiment, execute the following command in +and resample it to 16000 Hz. + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + +# How to run: +To run an experiment, execute the following command in the current folder: ```bash @@ -25,7 +36,7 @@ You can find the pre-trained model with an easy-inference function on HuggingFac - https://huggingface.co/speechbrain/metricgan-plus-voicebank You can find the full experiment folder (i.e., checkpoints, logs, etc) here: -https://drive.google.com/drive/folders/1IV3ohFracK0zLH-ZGb3LTas-l3ZDFDPW?usp=sharing +https://www.dropbox.com/sh/n5q9vjn0yn1qvk6/AAB-S7i2-XzVm6ux0MrXCvqya?dl=0 @@ -33,19 +44,21 @@ https://drive.google.com/drive/folders/1IV3ohFracK0zLH-ZGb3LTas-l3ZDFDPW?usp=sha If you find the code useful in your research, please cite: - @article{fu2021metricgan+, - title={MetricGAN+: An Improved Version of MetricGAN for Speech Enhancement}, - author={Fu, Szu-Wei and Yu, Cheng and Hsieh, Tsun-An and Plantinga, Peter and Ravanelli, Mirco and Lu, Xugang and Tsao, Yu}, - journal={arXiv preprint arXiv:2104.03538}, - year={2021} - } +```bibtex +@article{fu2021metricgan+, + title={MetricGAN+: An Improved Version of MetricGAN for Speech Enhancement}, + author={Fu, Szu-Wei and Yu, Cheng and Hsieh, Tsun-An and Plantinga, Peter and Ravanelli, Mirco and Lu, Xugang and Tsao, Yu}, + journal={arXiv preprint arXiv:2104.03538}, + year={2021} +} - @inproceedings{fu2019metricGAN, - title = {MetricGAN: Generative Adversarial Networks based Black-box Metric Scores Optimization for Speech Enhancement}, - author = {Fu, Szu-Wei and Liao, Chien-Feng and Tsao, Yu and Lin, Shou-De}, - booktitle = {International Conference on Machine Learning (ICML)}, - year = {2019} - } +@inproceedings{fu2019metricGAN, + title = {MetricGAN: Generative Adversarial Networks based Black-box Metric Scores Optimization for Speech Enhancement}, + author = {Fu, Szu-Wei and Liao, Chien-Feng and Tsao, Yu and Lin, Shou-De}, + booktitle = {International Conference on Machine Learning (ICML)}, + year = {2019} +} +``` # **About SpeechBrain** @@ -58,6 +71,15 @@ If you find the code useful in your research, please cite: Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/enhance/MetricGAN/extra_requirements.txt b/recipes/Voicebank/enhance/MetricGAN/extra_requirements.txt index 3f1af440a3..1988374b6d 100644 --- a/recipes/Voicebank/enhance/MetricGAN/extra_requirements.txt +++ b/recipes/Voicebank/enhance/MetricGAN/extra_requirements.txt @@ -1,2 +1 @@ pesq - diff --git a/recipes/Voicebank/enhance/MetricGAN/hparams/train.yaml b/recipes/Voicebank/enhance/MetricGAN/hparams/train.yaml index b9dd9a2237..65946a07e0 100644 --- a/recipes/Voicebank/enhance/MetricGAN/hparams/train.yaml +++ b/recipes/Voicebank/enhance/MetricGAN/hparams/train.yaml @@ -9,10 +9,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 4234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] data_folder: !PLACEHOLDER # e.g, /data/member1/user_jasonfu/noisy-vctk-16k -train_clean_folder: !ref /clean_trainset_28spk_wav_16k/ MetricGAN_folder: !ref /enhanced_wavs output_folder: !ref ./results/MetricGAN/ @@ -107,6 +106,6 @@ resynth: !name:speechbrain.processing.signal_processing.resynthesize train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref -# Tensorboard logger (optional) +# torch.Tensorboard logger (optional) tensorboard_train_logger: !new:speechbrain.utils.train_logger.TensorboardLogger save_dir: !ref diff --git a/recipes/Voicebank/enhance/MetricGAN/train.py b/recipes/Voicebank/enhance/MetricGAN/train.py index c660b8139d..b3de005579 100644 --- a/recipes/Voicebank/enhance/MetricGAN/train.py +++ b/recipes/Voicebank/enhance/MetricGAN/train.py @@ -1,609 +1,639 @@ -#!/usr/bin/env/python3 -""" -Recipe for training a speech enhancement system with the Voicebank dataset. - -To run this recipe, do the following: -> python train.py hparams/{hyperparam_file}.yaml - -Authors - * Szu-Wei Fu 2020 - * Peter Plantinga 2021 -""" - -import os -import sys -import shutil -import pickle -import torch -import torchaudio -import speechbrain as sb -from pesq import pesq -from enum import Enum, auto -from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import MetricStats -from speechbrain.processing.features import spectral_magnitude -from speechbrain.nnet.loss.stoi_loss import stoi_loss -from speechbrain.utils.distributed import run_on_main -from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler - - -def pesq_eval(pred_wav, target_wav): - """Normalized PESQ (to 0-1)""" - return ( - pesq(fs=16000, ref=target_wav.numpy(), deg=pred_wav.numpy(), mode="wb") - + 0.5 - ) / 5 - - -class SubStage(Enum): - """For keeping track of training stage progress""" - - GENERATOR = auto() - CURRENT = auto() - HISTORICAL = auto() - - -class MetricGanBrain(sb.Brain): - def load_history(self): - if os.path.isfile(self.hparams.historical_file): - with open(self.hparams.historical_file, "rb") as fp: # Unpickling - self.historical_set = pickle.load(fp) - - def compute_feats(self, wavs): - """Feature computation pipeline""" - feats = self.hparams.compute_STFT(wavs) - feats = spectral_magnitude(feats, power=0.5) - feats = torch.log1p(feats) - return feats - - def compute_forward(self, batch, stage): - "Given an input batch computes the enhanced signal" - batch = batch.to(self.device) - - if self.sub_stage == SubStage.HISTORICAL: - predict_wav, lens = batch.enh_sig - else: - noisy_wav, lens = batch.noisy_sig - noisy_spec = self.compute_feats(noisy_wav) - - # mask with "signal approximation (SA)" - mask = self.modules.generator(noisy_spec, lengths=lens) - mask = mask.clamp(min=self.hparams.min_mask).squeeze(2) - predict_spec = torch.mul(mask, noisy_spec) - - # Also return predicted wav - predict_wav = self.hparams.resynth( - torch.expm1(predict_spec), noisy_wav - ) - - return predict_wav - - def compute_objectives(self, predictions, batch, stage, optim_name=""): - "Given the network predictions and targets compute the total loss" - predict_wav = predictions - predict_spec = self.compute_feats(predict_wav) - - clean_wav, lens = batch.clean_sig - clean_spec = self.compute_feats(clean_wav) - - ids = self.compute_ids(batch.id, optim_name) - - # One is real, zero is fake - if optim_name == "generator": - target_score = torch.ones(self.batch_size, 1, device=self.device) - est_score = self.est_score(predict_spec, clean_spec) - self.mse_metric.append( - ids, predict_spec, clean_spec, lens, reduction="batch" - ) - mse_cost = self.hparams.compute_cost(predict_spec, clean_spec, lens) - - # D Learns to estimate the scores of clean speech - elif optim_name == "D_clean": - target_score = torch.ones(self.batch_size, 1, device=self.device) - est_score = self.est_score(clean_spec, clean_spec) - - # D Learns to estimate the scores of enhanced speech - elif optim_name == "D_enh" and self.sub_stage == SubStage.CURRENT: - target_score = self.score(ids, predict_wav, clean_wav, lens) - est_score = self.est_score(predict_spec, clean_spec) - - # Write enhanced wavs during discriminator training, because we - # compute the actual score here and we can save it - self.write_wavs(batch.id, ids, predict_wav, target_score, lens) - - # D Relearns to estimate the scores of previous epochs - elif optim_name == "D_enh" and self.sub_stage == SubStage.HISTORICAL: - target_score = batch.score.unsqueeze(1).float() - est_score = self.est_score(predict_spec, clean_spec) - - # D Learns to estimate the scores of noisy speech - elif optim_name == "D_noisy": - noisy_wav, _ = batch.noisy_sig - noisy_spec = self.compute_feats(noisy_wav) - target_score = self.score(ids, noisy_wav, clean_wav, lens) - est_score = self.est_score(noisy_spec, clean_spec) - - # Save scores of noisy wavs - self.save_noisy_scores(ids, target_score) - - if stage == sb.Stage.TRAIN: - # Compute the cost - cost = self.hparams.compute_cost(est_score, target_score) - if optim_name == "generator": - cost += self.hparams.mse_weight * mse_cost - self.metrics["G"].append(cost.detach()) - else: - self.metrics["D"].append(cost.detach()) - - # On validation data compute scores - if stage != sb.Stage.TRAIN: - cost = self.hparams.compute_si_snr(predict_wav, clean_wav, lens) - # Evaluate speech quality/intelligibility - self.stoi_metric.append( - batch.id, predict_wav, clean_wav, lens, reduction="batch" - ) - self.pesq_metric.append( - batch.id, predict=predict_wav, target=clean_wav, lengths=lens - ) - - # Write wavs to file, for evaluation - lens = lens * clean_wav.shape[1] - for name, pred_wav, length in zip(batch.id, predict_wav, lens): - name += ".wav" - enhance_path = os.path.join(self.hparams.enhanced_folder, name) - torchaudio.save( - enhance_path, - torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), - 16000, - ) - - return cost - - def compute_ids(self, batch_id, optim_name): - """Returns the list of ids, edited via optimizer name.""" - if optim_name == "D_enh": - return [f"{uid}@{self.epoch}" for uid in batch_id] - return batch_id - - def save_noisy_scores(self, batch_id, scores): - for i, score in zip(batch_id, scores): - self.noisy_scores[i] = score - - def score(self, batch_id, deg_wav, ref_wav, lens): - """Returns actual metric score, either pesq or stoi - - Arguments - --------- - batch_id : list of str - A list of the utterance ids for the batch - deg_wav : torch.Tensor - The degraded waveform to score - ref_wav : torch.Tensor - The reference waveform to use for scoring - length : torch.Tensor - The relative lengths of the utterances - """ - new_ids = [ - i - for i, d in enumerate(batch_id) - if d not in self.historical_set and d not in self.noisy_scores - ] - - if len(new_ids) == 0: - pass - elif self.hparams.target_metric == "pesq": - self.target_metric.append( - ids=[batch_id[i] for i in new_ids], - predict=deg_wav[new_ids].detach(), - target=ref_wav[new_ids].detach(), - lengths=lens[new_ids], - ) - score = torch.tensor( - [[s] for s in self.target_metric.scores], device=self.device, - ) - elif self.hparams.target_metric == "stoi": - self.target_metric.append( - [batch_id[i] for i in new_ids], - deg_wav[new_ids], - ref_wav[new_ids], - lens[new_ids], - reduction="batch", - ) - score = torch.tensor( - [[-s] for s in self.target_metric.scores], device=self.device, - ) - else: - raise ValueError("Expected 'pesq' or 'stoi' for target_metric") - - # Clear metric scores to prepare for next batch - self.target_metric.clear() - - # Combine old scores and new - final_score = [] - for i, d in enumerate(batch_id): - if d in self.historical_set: - final_score.append([self.historical_set[d]["score"]]) - elif d in self.noisy_scores: - final_score.append([self.noisy_scores[d]]) - else: - final_score.append([score[new_ids.index(i)]]) - - return torch.tensor(final_score, device=self.device) - - def est_score(self, deg_spec, ref_spec): - """Returns score as estimated by discriminator - - Arguments - --------- - deg_spec : torch.Tensor - The spectral features of the degraded utterance - ref_spec : torch.Tensor - The spectral features of the reference utterance - """ - combined_spec = torch.cat( - [deg_spec.unsqueeze(1), ref_spec.unsqueeze(1)], 1 - ) - return self.modules.discriminator(combined_spec) - - def write_wavs(self, clean_id, batch_id, wavs, scores, lens): - """Write wavs to files, for historical discriminator training - - Arguments - --------- - batch_id : list of str - A list of the utterance ids for the batch - wavs : torch.Tensor - The wavs to write to files - scores : torch.Tensor - The actual scores for the corresponding utterances - lens : torch.Tensor - The relative lengths of each utterance - """ - lens = lens * wavs.shape[1] - record = {} - for i, (cleanid, name, pred_wav, length) in enumerate( - zip(clean_id, batch_id, wavs, lens) - ): - path = os.path.join(self.hparams.MetricGAN_folder, name + ".wav") - data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0) - torchaudio.save(path, data, self.hparams.Sample_rate) - - # Make record of path and score for historical training - score = float(scores[i][0]) - clean_path = os.path.join( - self.hparams.train_clean_folder, cleanid + ".wav" - ) - record[name] = { - "enh_wav": path, - "score": score, - "clean_wav": clean_path, - } - - # Update records for historical training - self.historical_set.update(record) - - with open(self.hparams.historical_file, "wb") as fp: # Pickling - pickle.dump(self.historical_set, fp) - - def fit_batch(self, batch): - "Compute gradients and update either D or G based on sub-stage." - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss_tracker = 0 - if self.sub_stage == SubStage.CURRENT: - for mode in ["clean", "enh", "noisy"]: - loss = self.compute_objectives( - predictions, batch, sb.Stage.TRAIN, f"D_{mode}" - ) - self.d_optimizer.zero_grad() - loss.backward() - if self.check_gradients(loss): - self.d_optimizer.step() - loss_tracker += loss.detach() / 3 - elif self.sub_stage == SubStage.HISTORICAL: - loss = self.compute_objectives( - predictions, batch, sb.Stage.TRAIN, "D_enh" - ) - self.d_optimizer.zero_grad() - loss.backward() - if self.check_gradients(loss): - self.d_optimizer.step() - loss_tracker += loss.detach() - elif self.sub_stage == SubStage.GENERATOR: - for name, param in self.modules.generator.named_parameters(): - if "Learnable_sigmoid" in name: - param.data = torch.clamp( - param, max=3.5 - ) # to prevent gradient goes to infinity - param.data[param != param] = 3.5 # set 'nan' to 3.5 - - loss = self.compute_objectives( - predictions, batch, sb.Stage.TRAIN, "generator" - ) - - self.g_optimizer.zero_grad() - loss.backward() - if self.check_gradients(loss): - self.g_optimizer.step() - loss_tracker += loss.detach() - - return loss_tracker - - def on_stage_start(self, stage, epoch=None): - """Gets called at the beginning of each epoch - - This method calls ``fit()`` again to train the discriminator - before proceeding with generator training. - """ - - self.mse_metric = MetricStats(metric=self.hparams.compute_cost) - self.metrics = {"G": [], "D": []} - - if stage == sb.Stage.TRAIN: - if self.hparams.target_metric == "pesq": - self.target_metric = MetricStats( - metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False - ) - elif self.hparams.target_metric == "stoi": - self.target_metric = MetricStats(metric=stoi_loss) - else: - raise NotImplementedError( - "Right now we only support 'pesq' and 'stoi'" - ) - - # Train discriminator before we start generator training - if self.sub_stage == SubStage.GENERATOR: - self.epoch = epoch - self.train_discriminator() - self.sub_stage = SubStage.GENERATOR - print("Generator training by current data...") - - if stage != sb.Stage.TRAIN: - self.pesq_metric = MetricStats( - metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False - ) - self.stoi_metric = MetricStats(metric=stoi_loss) - - def train_discriminator(self): - """A total of 3 data passes to update discriminator.""" - # First, iterate train subset w/ updates for clean, enh, noisy - print("Discriminator training by current data...") - self.sub_stage = SubStage.CURRENT - self.fit( - range(1), - self.train_set, - train_loader_kwargs=self.hparams.dataloader_options, - ) - - # Next, iterate historical subset w/ updates for enh - if self.historical_set: - print("Discriminator training by historical data...") - self.sub_stage = SubStage.HISTORICAL - self.fit( - range(1), - self.historical_set, - train_loader_kwargs=self.hparams.dataloader_options, - ) - - # Finally, iterate train set again. Should iterate same - # samples as before, due to ReproducibleRandomSampler - print("Discriminator training by current data again...") - self.sub_stage = SubStage.CURRENT - self.fit( - range(1), - self.train_set, - train_loader_kwargs=self.hparams.dataloader_options, - ) - - def on_stage_end(self, stage, stage_loss, epoch=None): - "Called at the end of each stage to summarize progress" - if self.sub_stage != SubStage.GENERATOR: - return - - if stage == sb.Stage.TRAIN: - self.train_loss = stage_loss - g_loss = torch.tensor(self.metrics["G"]) # batch_size - d_loss = torch.tensor(self.metrics["D"]) # batch_size - print("Avg G loss: %.3f" % torch.mean(g_loss)) - print("Avg D loss: %.3f" % torch.mean(d_loss)) - else: - stats = { - "SI-SNR": -stage_loss, - "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, - "stoi": -self.stoi_metric.summarize("average"), - } - - if stage == sb.Stage.VALID: - if self.hparams.use_tensorboard: - valid_stats = { - "SI-SNR": -stage_loss, - "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, - "stoi": -self.stoi_metric.summarize("average"), - } - self.hparams.tensorboard_train_logger.log_stats(valid_stats) - self.hparams.train_logger.log_stats( - {"Epoch": epoch}, - train_stats={"loss": self.train_loss}, - valid_stats=stats, - ) - self.checkpointer.save_and_keep_only( - meta=stats, max_keys=[self.hparams.target_metric] - ) - - if stage == sb.Stage.TEST: - self.hparams.train_logger.log_stats( - {"Epoch loaded": self.hparams.epoch_counter.current}, - test_stats=stats, - ) - - def make_dataloader( - self, dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs - ): - "Override dataloader to insert custom sampler/dataset" - if stage == sb.Stage.TRAIN: - - # Create a new dataset each time, this set grows - if self.sub_stage == SubStage.HISTORICAL: - dataset = sb.dataio.dataset.DynamicItemDataset( - data=dataset, - dynamic_items=[enh_pipeline], - output_keys=["id", "enh_sig", "clean_sig", "score"], - ) - samples = round(len(dataset) * self.hparams.history_portion) - else: - samples = self.hparams.number_of_samples - - # This sampler should give the same samples for D and G - epoch = self.hparams.epoch_counter.current - - # Equal weights for all samples, we use "Weighted" so we can do - # both "replacement=False" and a set number of samples, reproducibly - weights = torch.ones(len(dataset)) - sampler = ReproducibleWeightedRandomSampler( - weights, epoch=epoch, replacement=False, num_samples=samples - ) - loader_kwargs["sampler"] = sampler - - if self.sub_stage == SubStage.GENERATOR: - self.train_sampler = sampler - - # Make the dataloader as normal - return super().make_dataloader( - dataset, stage, ckpt_prefix, **loader_kwargs - ) - - def on_fit_start(self): - "Override to prevent this from running for D training" - if self.sub_stage == SubStage.GENERATOR: - super().on_fit_start() - - def init_optimizers(self): - "Initializes the generator and discriminator optimizers" - self.g_optimizer = self.hparams.g_opt_class( - self.modules.generator.parameters() - ) - self.d_optimizer = self.hparams.d_opt_class( - self.modules.discriminator.parameters() - ) - - if self.checkpointer is not None: - self.checkpointer.add_recoverable("g_opt", self.g_optimizer) - self.checkpointer.add_recoverable("d_opt", self.d_optimizer) - - -# Define audio piplines -@sb.utils.data_pipeline.takes("noisy_wav", "clean_wav") -@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig") -def audio_pipeline(noisy_wav, clean_wav): - yield sb.dataio.dataio.read_audio(noisy_wav) - yield sb.dataio.dataio.read_audio(clean_wav) - - -# For historical data -@sb.utils.data_pipeline.takes("enh_wav", "clean_wav") -@sb.utils.data_pipeline.provides("enh_sig", "clean_sig") -def enh_pipeline(enh_wav, clean_wav): - yield sb.dataio.dataio.read_audio(enh_wav) - yield sb.dataio.dataio.read_audio(clean_wav) - - -def dataio_prep(hparams): - """This function prepares the datasets to be used in the brain class.""" - - # Define datasets - datasets = {} - data_info = { - "train": hparams["train_annotation"], - "valid": hparams["valid_annotation"], - "test": hparams["test_annotation"], - } - for dataset in data_info: - datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=data_info[dataset], - replacements={"data_root": hparams["data_folder"]}, - dynamic_items=[audio_pipeline], - output_keys=["id", "noisy_sig", "clean_sig"], - ) - - return datasets - - -def create_folder(folder): - if not os.path.isdir(folder): - os.makedirs(folder) - - -# Recipe begins! -if __name__ == "__main__": - - # Load hyperparameters file with command-line overrides - hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: - hparams = load_hyperpyyaml(fin, overrides) - - # Initialize ddp (useful only for multi-GPU DDP training) - sb.utils.distributed.ddp_init_group(run_opts) - - # Data preparation - from voicebank_prepare import prepare_voicebank # noqa - - run_on_main( - prepare_voicebank, - kwargs={ - "data_folder": hparams["data_folder"], - "save_folder": hparams["data_folder"], - "skip_prep": hparams["skip_prep"], - }, - ) - - # Create dataset objects - datasets = dataio_prep(hparams) - - # Create experiment directory - sb.create_experiment_directory( - experiment_directory=hparams["output_folder"], - hyperparams_to_save=hparams_file, - overrides=overrides, - ) - - if hparams["use_tensorboard"]: - from speechbrain.utils.train_logger import TensorboardLogger - - hparams["tensorboard_train_logger"] = TensorboardLogger( - hparams["tensorboard_logs"] - ) - - # Create the folder to save enhanced files (+ support for DDP) - run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]}) - - se_brain = MetricGanBrain( - modules=hparams["modules"], - hparams=hparams, - run_opts=run_opts, - checkpointer=hparams["checkpointer"], - ) - se_brain.train_set = datasets["train"] - se_brain.historical_set = {} - se_brain.noisy_scores = {} - se_brain.batch_size = hparams["dataloader_options"]["batch_size"] - se_brain.sub_stage = SubStage.GENERATOR - - if not os.path.isfile(hparams["historical_file"]): - shutil.rmtree(hparams["MetricGAN_folder"]) - run_on_main(create_folder, kwargs={"folder": hparams["MetricGAN_folder"]}) - - se_brain.load_history() - # Load latest checkpoint to resume training - se_brain.fit( - epoch_counter=se_brain.hparams.epoch_counter, - train_set=datasets["train"], - valid_set=datasets["valid"], - train_loader_kwargs=hparams["dataloader_options"], - valid_loader_kwargs=hparams["valid_dataloader_options"], - ) - - # Load best checkpoint for evaluation - test_stats = se_brain.evaluate( - test_set=datasets["test"], - max_key=hparams["target_metric"], - test_loader_kwargs=hparams["dataloader_options"], - ) +#!/usr/bin/env/python3 +""" +Recipe for training a speech enhancement system with the Voicebank dataset. + +To run this recipe, do the following: +> python train.py hparams/{hyperparam_file}.yaml + +Authors + * Szu-Wei Fu 2020 + * Peter Plantinga 2021 +""" + +import os +import pickle +import shutil +import sys +from enum import Enum, auto + +import torch +from hyperpyyaml import load_hyperpyyaml +from pesq import pesq + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler +from speechbrain.nnet.loss.stoi_loss import stoi_loss +from speechbrain.processing.features import spectral_magnitude +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.metric_stats import MetricStats + + +def pesq_eval(pred_wav, target_wav): + """Normalized PESQ (to 0-1)""" + return ( + pesq(fs=16000, ref=target_wav.numpy(), deg=pred_wav.numpy(), mode="wb") + + 0.5 + ) / 5 + + +class SubStage(Enum): + """For keeping track of training stage progress""" + + GENERATOR = auto() + CURRENT = auto() + HISTORICAL = auto() + + +class MetricGanBrain(sb.Brain): + def load_history(self): + if os.path.isfile(self.hparams.historical_file): + with open(self.hparams.historical_file, "rb") as fp: # Unpickling + self.historical_set = pickle.load(fp) + + def compute_feats(self, wavs): + """Feature computation pipeline""" + feats = self.hparams.compute_STFT(wavs) + feats = spectral_magnitude(feats, power=0.5) + feats = torch.log1p(feats) + return feats + + def compute_forward(self, batch, stage): + "Given an input batch computes the enhanced signal" + batch = batch.to(self.device) + + if self.sub_stage == SubStage.HISTORICAL: + predict_wav, lens = batch.enh_sig + else: + noisy_wav, lens = batch.noisy_sig + noisy_spec = self.compute_feats(noisy_wav) + + # mask with "signal approximation (SA)" + mask = self.modules.generator(noisy_spec, lengths=lens) + mask = mask.clamp(min=self.hparams.min_mask).squeeze(2) + predict_spec = torch.mul(mask, noisy_spec) + + # Also return predicted wav + predict_wav = self.hparams.resynth( + torch.expm1(predict_spec), noisy_wav + ) + + return predict_wav + + def compute_objectives(self, predictions, batch, stage, optim_name=""): + "Given the network predictions and targets compute the total loss" + predict_wav = predictions + predict_spec = self.compute_feats(predict_wav) + + clean_wav, lens = batch.clean_sig + clean_spec = self.compute_feats(clean_wav) + clean_paths = batch.clean_wav + + ids = self.compute_ids(batch.id, optim_name) + + # One is real, zero is fake + if optim_name == "generator": + target_score = torch.ones(self.batch_size, 1, device=self.device) + est_score = self.est_score(predict_spec, clean_spec) + self.mse_metric.append( + ids, predict_spec, clean_spec, lens, reduction="batch" + ) + mse_cost = self.hparams.compute_cost(predict_spec, clean_spec, lens) + + # D Learns to estimate the scores of clean speech + elif optim_name == "D_clean": + target_score = torch.ones(self.batch_size, 1, device=self.device) + est_score = self.est_score(clean_spec, clean_spec) + + # D Learns to estimate the scores of enhanced speech + elif optim_name == "D_enh" and self.sub_stage == SubStage.CURRENT: + target_score = self.score(ids, predict_wav, clean_wav, lens) + est_score = self.est_score(predict_spec, clean_spec) + + # Write enhanced wavs during discriminator training, because we + # compute the actual score here and we can save it + self.write_wavs(ids, predict_wav, clean_paths, target_score, lens) + + # D Relearns to estimate the scores of previous epochs + elif optim_name == "D_enh" and self.sub_stage == SubStage.HISTORICAL: + target_score = batch.score.unsqueeze(1).float() + est_score = self.est_score(predict_spec, clean_spec) + + # D Learns to estimate the scores of noisy speech + elif optim_name == "D_noisy": + noisy_wav, _ = batch.noisy_sig + noisy_spec = self.compute_feats(noisy_wav) + target_score = self.score(ids, noisy_wav, clean_wav, lens) + est_score = self.est_score(noisy_spec, clean_spec) + + # Save scores of noisy wavs + self.save_noisy_scores(ids, target_score) + + if stage == sb.Stage.TRAIN: + # Compute the cost + cost = self.hparams.compute_cost(est_score, target_score) + if optim_name == "generator": + cost += self.hparams.mse_weight * mse_cost + self.metrics["G"].append(cost.detach()) + else: + self.metrics["D"].append(cost.detach()) + + # On validation data compute scores + if stage != sb.Stage.TRAIN: + cost = self.hparams.compute_si_snr(predict_wav, clean_wav, lens) + # Evaluate speech quality/intelligibility + self.stoi_metric.append( + batch.id, predict_wav, clean_wav, lens, reduction="batch" + ) + self.pesq_metric.append( + batch.id, predict=predict_wav, target=clean_wav, lengths=lens + ) + + # Write wavs to file, for evaluation + lens = lens * clean_wav.shape[1] + for name, pred_wav, length in zip(batch.id, predict_wav, lens): + name += ".wav" + enhance_path = os.path.join(self.hparams.enhanced_folder, name) + audio_io.save( + enhance_path, + torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), + 16000, + ) + + return cost + + def compute_ids(self, batch_id, optim_name): + """Returns the list of ids, edited via optimizer name.""" + if optim_name == "D_enh": + return [f"{uid}@{self.epoch}" for uid in batch_id] + return batch_id + + def save_noisy_scores(self, batch_id, scores): + for i, score in zip(batch_id, scores): + self.noisy_scores[i] = score + + def score(self, batch_id, deg_wav, ref_wav, lens): + """Returns actual metric score, either pesq or stoi + + Arguments + --------- + batch_id : list of str + A list of the utterance ids for the batch + deg_wav : torch.Tensor + The degraded waveform to score + ref_wav : torch.Tensor + The reference waveform to use for scoring + lens : torch.Tensor + The relative lengths of the utterances + + Returns + ------- + score : torch.Tensor + """ + new_ids = [ + i + for i, d in enumerate(batch_id) + if d not in self.historical_set and d not in self.noisy_scores + ] + + if len(new_ids) == 0: + pass + elif self.hparams.target_metric == "pesq": + self.target_metric.append( + ids=[batch_id[i] for i in new_ids], + predict=deg_wav[new_ids].detach(), + target=ref_wav[new_ids].detach(), + lengths=lens[new_ids], + ) + score = torch.tensor( + [[s] for s in self.target_metric.scores], device=self.device + ) + elif self.hparams.target_metric == "stoi": + self.target_metric.append( + [batch_id[i] for i in new_ids], + deg_wav[new_ids], + ref_wav[new_ids], + lens[new_ids], + reduction="batch", + ) + score = torch.tensor( + [[-s] for s in self.target_metric.scores], + device=self.device, + ) + else: + raise ValueError("Expected 'pesq' or 'stoi' for target_metric") + + # Clear metric scores to prepare for next batch + self.target_metric.clear() + + # Combine old scores and new + final_score = [] + for i, d in enumerate(batch_id): + if d in self.historical_set: + final_score.append([self.historical_set[d]["score"]]) + elif d in self.noisy_scores: + final_score.append([self.noisy_scores[d]]) + else: + final_score.append([score[new_ids.index(i)]]) + + return torch.tensor(final_score, device=self.device) + + def est_score(self, deg_spec, ref_spec): + """Returns score as estimated by discriminator + + Arguments + --------- + deg_spec : torch.Tensor + The spectral features of the degraded utterance + ref_spec : torch.Tensor + The spectral features of the reference utterance + + Returns + ------- + est_score : torch.Tensor + """ + combined_spec = torch.cat( + [deg_spec.unsqueeze(1), ref_spec.unsqueeze(1)], 1 + ) + return self.modules.discriminator(combined_spec) + + def write_wavs(self, batch_id, wavs, clean_paths, scores, lens): + """Write wavs to files, for historical discriminator training + + Arguments + --------- + batch_id : list of str + A list of the utterance ids for the batch + wavs : torch.Tensor + The wavs to write to files + clean_paths : list of str + The paths to the clean wavs + scores : torch.Tensor + The actual scores for the corresponding utterances + lens : torch.Tensor + The relative lengths of each utterance + """ + lens = lens * wavs.shape[1] + record = {} + for i, (name, pred_wav, clean_path, length) in enumerate( + zip(batch_id, wavs, clean_paths, lens) + ): + path = os.path.join(self.hparams.MetricGAN_folder, name + ".wav") + data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0) + audio_io.save(path, data.detach(), self.hparams.Sample_rate) + + # Make record of path and score for historical training + score = float(scores[i][0]) + record[name] = { + "enh_wav": path, + "score": score, + "clean_wav": clean_path, + } + + # Update records for historical training + self.historical_set.update(record) + + with open(self.hparams.historical_file, "wb") as fp: # Pickling + pickle.dump(self.historical_set, fp) + + def fit_batch(self, batch): + "Compute gradients and update either D or G based on sub-stage." + predictions = self.compute_forward(batch, sb.Stage.TRAIN) + loss_tracker = 0 + if self.sub_stage == SubStage.CURRENT: + for mode in ["clean", "enh", "noisy"]: + loss = self.compute_objectives( + predictions, batch, sb.Stage.TRAIN, f"D_{mode}" + ) + self.d_optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.d_optimizer.step() + loss_tracker += loss.detach() / 3 + elif self.sub_stage == SubStage.HISTORICAL: + loss = self.compute_objectives( + predictions, batch, sb.Stage.TRAIN, "D_enh" + ) + self.d_optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.d_optimizer.step() + loss_tracker += loss.detach() + elif self.sub_stage == SubStage.GENERATOR: + for name, param in self.modules.generator.named_parameters(): + if "Learnable_sigmoid" in name: + param.data = torch.clamp( + param, max=3.5 + ) # to prevent gradient goes to infinity + param.data[param != param] = 3.5 # set 'nan' to 3.5 + + loss = self.compute_objectives( + predictions, batch, sb.Stage.TRAIN, "generator" + ) + + self.g_optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.g_optimizer.step() + loss_tracker += loss.detach() + + return loss_tracker + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch + + This method calls ``fit()`` again to train the discriminator + before proceeding with generator training. + """ + + self.mse_metric = MetricStats(metric=self.hparams.compute_cost) + self.metrics = {"G": [], "D": []} + + if stage == sb.Stage.TRAIN: + if self.hparams.target_metric == "pesq": + self.target_metric = MetricStats( + metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False + ) + elif self.hparams.target_metric == "stoi": + self.target_metric = MetricStats(metric=stoi_loss) + else: + raise NotImplementedError( + "Right now we only support 'pesq' and 'stoi'" + ) + + # Train discriminator before we start generator training + if self.sub_stage == SubStage.GENERATOR: + self.epoch = epoch + self.train_discriminator() + self.sub_stage = SubStage.GENERATOR + print("Generator training by current data...") + + if stage != sb.Stage.TRAIN: + self.pesq_metric = MetricStats( + metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False + ) + self.stoi_metric = MetricStats(metric=stoi_loss) + + def train_discriminator(self): + """A total of 3 data passes to update discriminator.""" + # First, iterate train subset w/ updates for clean, enh, noisy + print("Discriminator training by current data...") + self.sub_stage = SubStage.CURRENT + self.fit( + range(1), + self.train_set, + train_loader_kwargs=self.hparams.dataloader_options, + ) + + # Next, iterate historical subset w/ updates for enh + if self.historical_set: + print("Discriminator training by historical data...") + self.sub_stage = SubStage.HISTORICAL + self.fit( + range(1), + self.historical_set, + train_loader_kwargs=self.hparams.dataloader_options, + ) + + # Finally, iterate train set again. Should iterate same + # samples as before, due to ReproducibleRandomSampler + print("Discriminator training by current data again...") + self.sub_stage = SubStage.CURRENT + self.fit( + range(1), + self.train_set, + train_loader_kwargs=self.hparams.dataloader_options, + ) + + def on_stage_end(self, stage, stage_loss, epoch=None): + "Called at the end of each stage to summarize progress" + if self.sub_stage != SubStage.GENERATOR: + return + + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + g_loss = torch.tensor(self.metrics["G"]) # batch_size + d_loss = torch.tensor(self.metrics["D"]) # batch_size + print("Avg G loss: %.3f" % torch.mean(g_loss)) + print("Avg D loss: %.3f" % torch.mean(d_loss)) + else: + stats = { + "SI-SNR": -stage_loss, + "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, + "stoi": -self.stoi_metric.summarize("average"), + } + + if stage == sb.Stage.VALID: + if self.hparams.use_tensorboard: + valid_stats = { + "SI-SNR": -stage_loss, + "pesq": 5 * self.pesq_metric.summarize("average") - 0.5, + "stoi": -self.stoi_metric.summarize("average"), + } + self.hparams.tensorboard_train_logger.log_stats(valid_stats) + self.hparams.train_logger.log_stats( + {"Epoch": epoch}, + train_stats={"loss": self.train_loss}, + valid_stats=stats, + ) + self.checkpointer.save_and_keep_only( + meta=stats, max_keys=[self.hparams.target_metric] + ) + + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stats, + ) + + def make_dataloader( + self, dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs + ): + "Override dataloader to insert custom sampler/dataset" + if stage == sb.Stage.TRAIN: + # Create a new dataset each time, this set grows + if self.sub_stage == SubStage.HISTORICAL: + dataset = sb.dataio.dataset.DynamicItemDataset( + data=dataset, + dynamic_items=[enh_pipeline], + output_keys=[ + "id", + "enh_sig", + "clean_sig", + "score", + "clean_wav", + ], + ) + samples = round(len(dataset) * self.hparams.history_portion) + samples = max(samples, 1) # Ensure there's at least one sample + else: + samples = self.hparams.number_of_samples + + # This sampler should give the same samples for D and G + epoch = self.hparams.epoch_counter.current + + # Equal weights for all samples, we use "Weighted" so we can do + # both "replacement=False" and a set number of samples, reproducibly + weights = torch.ones(len(dataset)) + replacement = samples > len(dataset) + sampler = ReproducibleWeightedRandomSampler( + weights, + epoch=epoch, + replacement=replacement, + num_samples=samples, + ) + loader_kwargs["sampler"] = sampler + + if self.sub_stage == SubStage.GENERATOR: + self.train_sampler = sampler + + # Make the dataloader as normal + return super().make_dataloader( + dataset, stage, ckpt_prefix, **loader_kwargs + ) + + def on_fit_start(self): + "Override to prevent this from running for D training" + if self.sub_stage == SubStage.GENERATOR: + super().on_fit_start() + + def init_optimizers(self): + "Initializes the generator and discriminator optimizers" + self.g_optimizer = self.hparams.g_opt_class( + self.modules.generator.parameters() + ) + self.d_optimizer = self.hparams.d_opt_class( + self.modules.discriminator.parameters() + ) + + if self.checkpointer is not None: + self.checkpointer.add_recoverable("g_opt", self.g_optimizer) + self.checkpointer.add_recoverable("d_opt", self.d_optimizer) + + def zero_grad(self, set_to_none=False): + self.g_optimizer.zero_grad(set_to_none) + self.d_optimizer.zero_grad(set_to_none) + + +# Define audio pipelines +@sb.utils.data_pipeline.takes("noisy_wav", "clean_wav") +@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig") +def audio_pipeline(noisy_wav, clean_wav): + yield sb.dataio.dataio.read_audio(noisy_wav) + yield sb.dataio.dataio.read_audio(clean_wav) + + +# For historical data +@sb.utils.data_pipeline.takes("enh_wav", "clean_wav") +@sb.utils.data_pipeline.provides("enh_sig", "clean_sig") +def enh_pipeline(enh_wav, clean_wav): + yield sb.dataio.dataio.read_audio(enh_wav) + yield sb.dataio.dataio.read_audio(clean_wav) + + +def dataio_prep(hparams): + """This function prepares the datasets to be used in the brain class.""" + + # Define datasets + datasets = {} + data_info = { + "train": hparams["train_annotation"], + "valid": hparams["valid_annotation"], + "test": hparams["test_annotation"], + } + for dataset in data_info: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[audio_pipeline], + output_keys=["id", "noisy_sig", "clean_sig", "clean_wav"], + ) + + return datasets + + +def create_folder(folder): + if not os.path.isdir(folder): + os.makedirs(folder) + + +# Recipe begins! +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Data preparation + from voicebank_prepare import prepare_voicebank # noqa + + run_on_main( + prepare_voicebank, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["data_folder"], + "skip_prep": hparams["skip_prep"], + }, + ) + + # Create dataset objects + datasets = dataio_prep(hparams) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs"] + ) + + # Create the folder to save enhanced files (+ support for DDP) + run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]}) + + se_brain = MetricGanBrain( + modules=hparams["modules"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + se_brain.train_set = datasets["train"] + se_brain.historical_set = {} + se_brain.noisy_scores = {} + se_brain.batch_size = hparams["dataloader_options"]["batch_size"] + se_brain.sub_stage = SubStage.GENERATOR + + if not os.path.isfile(hparams["historical_file"]): + shutil.rmtree(hparams["MetricGAN_folder"]) + run_on_main(create_folder, kwargs={"folder": hparams["MetricGAN_folder"]}) + + se_brain.load_history() + # Load latest checkpoint to resume training + se_brain.fit( + epoch_counter=se_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["valid_dataloader_options"], + ) + + # Load best checkpoint for evaluation + test_stats = se_brain.evaluate( + test_set=datasets["test"], + max_key=hparams["target_metric"], + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/Voicebank/enhance/SEGAN/README.md b/recipes/Voicebank/enhance/SEGAN/README.md index 7fc39bcc27..86385e4de6 100644 --- a/recipes/Voicebank/enhance/SEGAN/README.md +++ b/recipes/Voicebank/enhance/SEGAN/README.md @@ -10,7 +10,7 @@ python train.py hparams/train.yaml # Results | Release | hyperparams file | Test PESQ | Test STOI | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| :-----------:| -| 2021-07-10 | train.yaml | 2.38 | 0.923 | https://drive.google.com/drive/folders/1gLxbH59LpMJFhvGHLPsVlX_MP2lcwVC8?usp=sharing | 1xV100 16GB | +| 2021-07-10 | train.yaml | 2.38 | 0.923 | https://www.dropbox.com/sh/ez0folswdbqiad4/AADDasepeoCkneyiczjCcvaOa?dl=0 | 1xV100 16GB | # Training Time About 2 min and 30 sec for each epoch with a TESLA V100. @@ -41,6 +41,15 @@ About 2 min and 30 sec for each epoch with a TESLA V100. Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/enhance/SEGAN/hparams/train.yaml b/recipes/Voicebank/enhance/SEGAN/hparams/train.yaml index a6bf16839b..ccb915f3d9 100644 --- a/recipes/Voicebank/enhance/SEGAN/hparams/train.yaml +++ b/recipes/Voicebank/enhance/SEGAN/hparams/train.yaml @@ -12,9 +12,9 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] -data_folder: !PLACEHOLDED #e.g., /localscratch/noisy-vctk-16k +data_folder: !PLACEHOLDER #e.g., /localscratch/noisy-vctk-16k # test_clean_folder: !ref /clean_testset_wav_16k/ output_folder: !ref results/SEGAN/ @@ -22,7 +22,7 @@ save_folder: !ref /save train_log: !ref /train_log.txt enhanced_folder: !ref /enhanced -# Tensorboar (optional) +# torch.Tensorboar (optional) use_tensorboard: False tensorboard_logs: !ref /logs/ diff --git a/recipes/Voicebank/enhance/SEGAN/train.py b/recipes/Voicebank/enhance/SEGAN/train.py index 25c671ef59..0195f0fddb 100644 --- a/recipes/Voicebank/enhance/SEGAN/train.py +++ b/recipes/Voicebank/enhance/SEGAN/train.py @@ -12,14 +12,16 @@ import os import sys + import torch -import torchaudio -import speechbrain as sb -from pesq import pesq from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import MetricStats +from pesq import pesq + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.nnet.loss.stoi_loss import stoi_loss from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.metric_stats import MetricStats # Brain class for speech enhancement training @@ -33,12 +35,12 @@ def compute_forward_g(self, noisy_wavs): def compute_forward_d(self, noisy_wavs, clean_wavs): """Forward computations from discriminator. Input denoised-noisy pair, - output whether denoising was properly acheived""" + output whether denoising was properly achieved""" noisy_wavs = noisy_wavs.to(self.device) clean_wavs = clean_wavs.to(self.device) - inpt = torch.cat((noisy_wavs, clean_wavs), -1) - out = self.modules["model_d"](inpt) + input = torch.cat((noisy_wavs, clean_wavs), -1) + out = self.modules["model_d"](input) return out def compute_objectives_d1(self, d_outs, batch): @@ -94,7 +96,6 @@ def compute_objectives_g3( ) if stage != sb.Stage.TRAIN: - # Evaluate speech quality/intelligibility predict_wavs = predict_wavs.reshape(self.batch_current, -1) clean_wavs = clean_wavs.reshape(self.batch_current, -1) @@ -120,7 +121,7 @@ def compute_objectives_g3( print(enhance_path) pred_wav = pred_wav / torch.max(torch.abs(pred_wav)) * 0.99 - torchaudio.save( + audio_io.save( enhance_path, pred_wav[: int(length)].cpu().unsqueeze(0), hparams["sample_rate"], @@ -167,8 +168,10 @@ def fit_batch(self, batch): out_d1 = self.compute_forward_d(noisy_wavs, clean_wavs) loss_d1 = self.compute_objectives_d1(out_d1, batch) loss_d1.backward() - if self.check_gradients(loss_d1): - self.optimizer_d.step() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.optimizer_d.step() self.optimizer_d.zero_grad() # second training step @@ -181,8 +184,10 @@ def fit_batch(self, batch): out_d2 = self.compute_forward_d(out_g2, clean_wavs) loss_d2 = self.compute_objectives_d2(out_d2, batch) loss_d2.backward(retain_graph=True) - if self.check_gradients(loss_d2): - self.optimizer_d.step() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.optimizer_d.step() self.optimizer_d.zero_grad() # third (last) training step @@ -198,8 +203,10 @@ def fit_batch(self, batch): z_logvar=z_logvar, ) loss_g3.backward() - if self.check_gradients(loss_g3): - self.optimizer_g.step() + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), self.max_grad_norm + ) + self.optimizer_g.step() self.optimizer_g.zero_grad() self.optimizer_d.zero_grad() @@ -310,6 +317,10 @@ class being passed at initialization that takes only a list "optimizer_d", self.optimizer_d ) + def zero_grad(self, set_to_none=False): + self.optimizer_d.zero_grad(set_to_none) + self.optimizer_g.zero_grad(set_to_none) + def on_stage_start(self, stage, epoch=None): """Gets called at the beginning of each epoch""" self.loss_metric_d1 = MetricStats( @@ -392,9 +403,10 @@ def create_chunks(x, chunk_size=16384, chunk_stride=16384): def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ - # Define audio piplines + # Define audio pipelines @sb.utils.data_pipeline.takes("noisy_wav") @sb.utils.data_pipeline.provides("noisy_sig") def noisy_pipeline(noisy_wav): @@ -444,10 +456,9 @@ def create_folder(folder): # Recipe begins! if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) diff --git a/recipes/Voicebank/enhance/SGMSE/README.md b/recipes/Voicebank/enhance/SGMSE/README.md new file mode 100644 index 0000000000..0f68ae7c45 --- /dev/null +++ b/recipes/Voicebank/enhance/SGMSE/README.md @@ -0,0 +1,90 @@ +# VoiceBank Speech Enhancement with SGMSE +This recipe implements a speech enhancement system based on the SGMSE architecture using the VoiceBank dataset (based on the paper: [https://arxiv.org/abs/2208.05830](https://arxiv.org/abs/2208.05830)). + +## Results + +Experiment Date | PESQ | SI-SDR | STOI +-|-|-|- +2025-07-24 | 2.78 | 17.8 | 95.7 + +You can find the full experiment folder (i.e., checkpoints, logs, etc) here: +https://www.dropbox.com/scl/fo/bi8sln2de6ep8nrv38jt5/ACWQAOAIsYSMyjhcu2ZSavc?rlkey=xtqlon9xjcy43ghncnlbtruii&st=sql8s5r8&dl=0 + +## How to Run +### Training + +To train the SGMSE speech enhancement model, execute: + +```bash +python recipes/Voicebank/enhance/SGMSE/train.py recipes/Voicebank/enhance/SGMSE/hparams.yaml +``` + +This will: + +* Prepare the VoiceBank dataset automatically (if not already prepared). +* Train the model based on hyperparameters defined in `hparams.yaml`. +* Create a `run_name`, unique to each run. +* Store checkpoints, logs, and validation / testing samples in `output_dir/run_name` (specified within the `hparams.yaml` file). + +### Resume Training from a previous run + +Point --resume to the existing run directory (the folder that contains hyperparams.yaml and checkpoints): + +```bash +python recipes/Voicebank/enhance/SGMSE/train.py --resume path/to/results/run_YYYY-MM-DD_HH-MM-SS +``` + +When --resume is provided: + +* The script loads hyperparams.yaml from the given run directory and uses that saved configuration. +* Training continues from the latest checkpoint in that directory (if present), keeping the same run_name. +* CLI overrides still work, but a new run_name is not generated. + + +### Inference (Speech Enhancement) +You can enhance single audio files or entire directories using a trained model: + +* **Single-file enhancement:** + +```bash +python recipes/Voicebank/enhance/SGMSE/enhancement.py --run_dir /path/to/trained_model noisy_audio.wav +``` + +* **Batch enhancement (whole directory):** + +```bash +python recipes/Voicebank/enhance/SGMSE/enhancement.py --run_dir /path/to/trained_model /path/to/noisy_directory +``` + +Enhanced audio files will be stored in a newly created subdirectory specified in `inference_dir` within the `hparams.yaml` file, preserving the original filenames. + +## Results and Outputs +During training, all results and model checkpoints are saved in: + +``` +// +``` + +During inference, enhanced audio outputs are saved in: + +``` +/// +``` + +## About SpeechBrain +* Website: [https://speechbrain.github.io/](https://speechbrain.github.io/) +* Code: [https://github.com/speechbrain/speechbrain/](https://github.com/speechbrain/speechbrain/) +* HuggingFace: [https://huggingface.co/speechbrain/](https://huggingface.co/speechbrain/) + +## Citing SGMSE +```bibtex +@article{richter2023speech, + title={Speech enhancement and dereverberation with diffusion-based generative models}, + author={Richter, Julius and Welker, Simon and Lemercier, Jean-Marie and Lay, Bunlong and Gerkmann, Timo}, + journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing}, + volume={31}, + pages={2351--2364}, + year={2023}, + publisher={IEEE} +} +``` diff --git a/recipes/Voicebank/enhance/SGMSE/enhance.py b/recipes/Voicebank/enhance/SGMSE/enhance.py new file mode 100644 index 0000000000..c3913def76 --- /dev/null +++ b/recipes/Voicebank/enhance/SGMSE/enhance.py @@ -0,0 +1,118 @@ +""" +Single-file or batch speech enhancement with SGMSE. +Single file: +python enhance.py --run_dir /path/to/run noisy.wav + +Whole directory: +python enhance.py --run_dir /path/to/run /path/to/noisy_dir +""" + +import argparse +import sys +from pathlib import Path + +import torch +import torchaudio +from hyperpyyaml import load_hyperpyyaml +from train import SGMSEBrain + +from speechbrain.utils.checkpoints import Checkpointer + + +# Helpers +def is_audio_file(path): + return path.suffix.lower() in {".wav", ".flac", ".ogg"} + + +def collect_audio_files(src): + return [p for p in src.iterdir() if p.is_file() and is_audio_file(p)] + + +def main(): + parser = argparse.ArgumentParser( + description="Run SGMSE enhancement (torchaudio I/O)" + ) + parser.add_argument( + "--run_dir", + "-r", + type=Path, + required=True, + help="Path to the trained run directory (the folder that " + "contains hyperparams.yaml and checkpoints/).", + ) + parser.add_argument( + "input", + type=Path, + help="Path to a noisy audio file OR a directory of audio files.", + ) + args = parser.parse_args() + + run_dir = args.run_dir.expanduser().resolve() + if not run_dir.exists(): + sys.exit(f"--run_dir '{run_dir}' does not exist.") + + hparams_file = run_dir / "hyperparams.yaml" + checkpoints_dir = run_dir / "checkpoints" + + with open(hparams_file, encoding="utf-8") as f: + hparams = load_hyperpyyaml(f) + + target_sr = hparams["sample_rate"] + inference_dir = Path(run_dir / "enhanced_inference") + inference_dir.mkdir(parents=True, exist_ok=True) + + modules = hparams["modules"] + brain = SGMSEBrain( + modules=modules, + hparams=hparams, + run_opts={"device": "cuda" if torch.cuda.is_available() else "cpu"}, + checkpointer=Checkpointer( + checkpoints_dir=checkpoints_dir, + recoverables={"score_model": modules["score_model"]}, + ), + ) + brain.setup_inference() # loads latest checkpoint, ema ... + + # Enhancement routine + def enhance_file(noisy_path, dst_dir): + wav, sr = torchaudio.load(noisy_path) + if sr != target_sr: + wav = torchaudio.functional.resample(wav, sr, target_sr) + + if wav.shape[0] > 1: + wav = wav.mean(0, keepdim=True) + + with torch.no_grad(): + wav = wav.to(brain.device) + enhanced = brain.enhance(wav).cpu() + + out_path = dst_dir / f"{noisy_path.stem}_enhanced{noisy_path.suffix}" + torchaudio.save(out_path.as_posix(), enhanced, target_sr, format="wav") + return out_path + + src = args.input.expanduser().resolve() + + if src.is_file(): + if not is_audio_file(src): + sys.exit(f"{src} is not a supported audio file.") + out_path = enhance_file(src, inference_dir) + print(f"Enhanced file written to {out_path}") + + elif src.is_dir(): + files = collect_audio_files(src) + if not files: + sys.exit(f"{src} contains no enhanceable audio files.") + + batch_out_dir = inference_dir / f"{src.name}_enhanced" + batch_out_dir.mkdir(parents=True, exist_ok=True) + + print(f"Enhancing {len(files)} file(s) > {batch_out_dir}") + for idx, fpath in enumerate(files, 1): + out_path = enhance_file(fpath, batch_out_dir) + print(f"[{idx}/{len(files)}] > {out_path}") + else: + sys.exit(f"{src} is neither a file nor a directory.") + + +if __name__ == "__main__": + main() diff --git a/recipes/Voicebank/enhance/SGMSE/extra_requirements.txt b/recipes/Voicebank/enhance/SGMSE/extra_requirements.txt new file mode 100644 index 0000000000..028676fc8a --- /dev/null +++ b/recipes/Voicebank/enhance/SGMSE/extra_requirements.txt @@ -0,0 +1,29 @@ +gdown +h5py +hyperpyyaml +ipympl +librosa +ninja +numpy<2.0 +pandas +pesq +pillow +protobuf +pyarrow +pyroomacoustics +pystoi +scipy +sdeint +seaborn +setuptools +git+https://github.com/sp-uhh/sgmse.git@main#egg=sgmse +tensorboard +torch +torch-ema +torch-pesq +torchaudio +torchinfo +torchmetrics +torchsde +torchvision +tqdm diff --git a/recipes/Voicebank/enhance/SGMSE/hparams.yaml b/recipes/Voicebank/enhance/SGMSE/hparams.yaml new file mode 100644 index 0000000000..c41546388b --- /dev/null +++ b/recipes/Voicebank/enhance/SGMSE/hparams.yaml @@ -0,0 +1,88 @@ +output_folder: results # Main directory to store experiment results +run_name: "RUN_NAME" # Will be updated with a unique name at runtime + +save_dir: !ref //checkpoints # Directory to save checkpoints +enhanced_dir: !ref //enhanced_training # Directory to store waveforms at validation during training + +data_dir: !PLACEHOLDER # Root dir for the dataset +train_annotation: !ref /train.json # JSON file listing training samples +valid_annotation: !ref /valid.json # JSON file listing validation samples +test_annotation: !ref /test.json # JSON file listing test samples + +skip_prep: False # If True, skip data preparation steps +segment_frames: 256 # Number of STFT frames fed into the model. Has to align with what the model ‘wants’ to see due to u net architecture +random_crop: True # Whether to crop segments randomly from longer waveforms in training +random_crop_valid: False # Whether to crop segments randomly from longer waveforms in validation +random_crop_test: False # Whether to crop segments randomly from longer waveforms in testing + +normalize: noisy # Waveforms are normalized with respect to ... (noisy / clean / not) +sample_rate: 16000 # Sampling rate (in Hz) for audio data +batch_size: 8 # Batch size for the training set +number_of_epochs: 160 # Total epochs to train +num_to_keep: 2 # Numbers of checkpoints to keep +lr: 0.0001 # Learning rate +sorting: ascending # Sorting strategy for data loading (e.g., ascending, descending) + +n_fft: 510 # FFT size for STFT +hop_length: 128 # Hop length (stride) for STFT +window_type: hann # Type of window function for STFT + +transform_type: exponent # Type of spectral transform (log, exponent, none) +spec_factor: 0.15 # Factor to scale the transformed spectrogram +spec_abs_exponent: 0.5 # Exponent to apply to spectrogram magnitude if needed + +train_dataloader_opts: + batch_size: !ref + shuffle: True # Shuffle training data each epoch + +valid_dataloader_opts: + batch_size: 1 # Validation batch size + +test_dataloader_opts: + batch_size: 1 # Test batch size + +sampling: + sampler_type: pc + predictor: reverse_diffusion + corrector: ald + N: 30 + corrector_steps: 1 + snr: 0.5 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref # Sets the upper bound on training epochs + +modules: + score_model: !new:speechbrain.integrations.models.sgmse_plus.ScoreModel + backbone: ncsnpp_v2 # Name of the backbone neural network architecture + sde: ouve # Which SDE to use (Ornstein-Uhlenbeck VE SDE) + theta: 1.5 # Stiffness parameter for the OU SDE + sigma_min: 0.05 # Minimum sigma value for OU SDE + sigma_max: 0.5 # Maximum sigma value for OU SDE + lr: !ref # Learning rate for the model + ema_decay: 0.999 # Decay factor for EMA of model parameters + t_eps: 0.03 # Min time-step to avoid zero in continuous diffusion + num_eval_files: 5 # Number of files to process for evaluation + loss_type: score_matching # Which loss approach to use (score matching, etc.) + loss_weighting: sigma^2 # Weighting in the loss function + network_scaling: 1/t # Scaling strategy (if any) for network outputs + c_in: "1" # Input scaling scheme + c_out: "1" # Output scaling scheme + c_skip: "0" # Skip connection scaling scheme + sigma_data: 0.1 # Data STD for EDM-based parameterizations + l1_weight: 0.001 # Weight factor for L1 (time-domain) loss + pesq_weight: 0.0 # Weight factor for PESQ-based loss (0 = disabled) + N: !ref # Sampler steps + corrector_steps: !ref # Corrector updates per step + sampler_type: !ref # SDE sampler type + snr: !ref # SNR for sampler + sr: !ref # Sample rate for model references + +opt_class: !name:torch.optim.Adam + lr: !ref # LR used in the Adam optimizer + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref # Directory to store checkpoint files + recoverables: + score_model: !ref # Model parameters to be saved + counter: !ref # Epoch counter to be saved diff --git a/recipes/Voicebank/enhance/SGMSE/train.py b/recipes/Voicebank/enhance/SGMSE/train.py new file mode 100644 index 0000000000..9a98cf3a13 --- /dev/null +++ b/recipes/Voicebank/enhance/SGMSE/train.py @@ -0,0 +1,868 @@ +import argparse +import os +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.nn.functional as F +from hyperpyyaml import load_hyperpyyaml +from pesq import pesq +from sgmse.util.other import pad_spec +from torch.utils.tensorboard import SummaryWriter +from torchmetrics.functional.audio import ( + scale_invariant_signal_distortion_ratio as si_sdr, + short_time_objective_intelligibility as stoi_tm, +) + +import speechbrain as sb +from speechbrain.dataio.dataio import write_audio +from speechbrain.utils.metric_stats import MetricStats + + +class SGMSEBrain(sb.Brain): + """ + A Brain class to train an SGMSE-based diffusion model. + """ + + def on_fit_start(self): + """ + Called once in the beginning of training. + """ + super().on_fit_start() + + self.writer = SummaryWriter(log_dir=self.hparams.save_dir) + + ema = self.modules["score_model"].ema + self.checkpointer.add_recoverable( + name="ema", + obj=ema, + custom_save_hook=lambda obj, path: torch.save( + obj.state_dict(), path + ), + custom_load_hook=lambda obj, path, end: obj.load_state_dict( + torch.load(path) + ), + ) + + # STFT + n_fft = self.hparams.n_fft + hop_length = self.hparams.hop_length + window_type = self.hparams.window_type + self.window = self.get_window(window_type, n_fft).to(self.device) + self.stft_kwargs = { + "n_fft": n_fft, + "hop_length": hop_length, + "center": True, + "return_complex": True, + } + + def setup_inference(self): + """ + Called once from inference script. + Loads the checkpoint, restores the EMA shadow weights, + swaps them into the DNN, and prepares STFT objects. + """ + ema_obj = self.modules["score_model"].ema + if "ema" not in self.checkpointer.recoverables: + self.checkpointer.add_recoverable( + name="ema", + obj=ema_obj, + custom_save_hook=lambda o, p: torch.save(o.state_dict(), p), + custom_load_hook=lambda o, p, end: o.load_state_dict( + torch.load(p) + ), + ) + + # Load checkpoint + self.checkpointer.recover_if_possible() + + # Store EMA + self.modules["score_model"].store_ema() + + # STFT + n_fft = self.hparams.n_fft + hop = self.hparams.hop_length + win_t = self.hparams.window_type + self.window = self.get_window(win_t, n_fft).to(self.device) + self.stft_kwargs = dict( + n_fft=n_fft, hop_length=hop, center=True, return_complex=True + ) + + def _step(self, x, y, model): + """ + Perform a single diffusion step for the score-based model. + + This function samples a random time-step for each input in the batch, computes the corresponding + marginal probability of the clean signal using the SDE, adds noise to generate a noisy version + of the input (x_t), and obtains the model's prediction from this noisy input. + + Arguments + --------- + x: torch.Tensor + Clean input signal spectrogram, of shape (B, 1, F, T). + y: torch.Tensor + Conditioning or auxiliary input spectrogram, of shape (B, 1, F, T). + model: nn.Module + Score-based generative model that contains the SDE and the forward method. + + Returns + ------- + forward_out: torch.Tensor + Model's prediction computed from the noisy input x_t, of shape (B, 1, F, T). + x_t: torch.Tensor + Noisy version of the clean input generated using the SDE, of shape (B, 1, F, T). + z: torch.Tensor + Noise tensor sampled from a standard normal distribution, of shape (B, 1, F, T). + t: torch.Tensor + Randomly sampled time-step for each sample in the batch, of shape (B,). + mean: torch.Tensor + Mean of the marginal probability distribution for the clean input, of shape (B, 1, F, T). + x: torch.Tensor + Clean input signal spectrogram, of shape (B, 1, F, T). + """ + t = ( + torch.rand(x.shape[0], device=x.device) + * (model.sde.T - model.t_eps) + + model.t_eps + ) # (B,) + mean, std = model.sde.marginal_prob( + x, y, t + ) # (B,1,F,T) and (B,) respectively + z = torch.randn_like( + x + ) # (B,1,F,T), i.i.d. normal distributed with var=0.5 + sigma = std[:, None, None, None] # (B,1,1,1) + x_t = mean + sigma * z # (B,1,F,T) + forward_out = model(x_t, y, t) + + return { + "forward_out": forward_out, + "x_t": x_t, + "z": z, + "t": t, + "mean": mean, + "x": x, + } + + def compute_forward(self, batch, stage): + """ + Compute forward pass for a given batch. + + This method obtains waveforms from the batch, applies STFT and any spectral + transformations, and calls `_step` to perform a single diffusion step. + During validation or test stages, it may also perform a "full enhancement" + process on a subset of files. + + Arguments + --------- + batch: speechbrain.dataio.batch.PaddedBatch + A batch of data containing + clean and noisy signals, among other possible fields. + stage: sb.Stage + The current stage (TRAIN, VALID, TEST). + + Returns + ------- + outs: dict + A dictionary containing the forward pass outputs (including + the model prediction and any enhanced waveforms if generated). + """ + # Model and batch preparation + model = self.modules["score_model"] + batch = batch.to(self.device) + + # Extract waveforms + x_wav = batch.clean_sig.data # (B,S) + y_wav = batch.noisy_sig.data # (B,S) + + # STFT, Spec transformations, adding channel dim + x = self.spec_fwd(self.stft(x_wav)).unsqueeze(1) # (B,1,F,T) + y = self.spec_fwd(self.stft(y_wav)).unsqueeze(1) # (B,1,F,T) + + outs = self._step(x, y, model) + + # TRAIN: never run enhancement + if stage == sb.Stage.TRAIN: + return outs + + # VALID: only enhance up to eval_files_left + if stage == sb.Stage.VALID: + if self.eval_files_left <= 0: + # nothing left to do in VALID + return outs + + # How many files from current batch shall we process? + B = y_wav.size(0) + take = min(B, self.eval_files_left) + self.eval_files_left -= take + + # Slice to that number + x_wav = x_wav[:take] # (num_eval_files,S) + y_wav = y_wav[:take] # (num_eval_files,S) + uttids = batch.id[:take] + + # TEST: enhance everything + if stage == sb.Stage.TEST: + uttids = batch.id + + # Save original length in time dimension + T_orig_wav = y_wav.size(1) + + # Enhancement + x_hat = model.enhance( + y, + sampler_type=self.hparams.sampling["sampler_type"], + predictor=self.hparams.sampling["predictor"], + corrector=self.hparams.sampling["corrector"], + N=self.hparams.sampling["N"], + corrector_steps=self.hparams.sampling["corrector_steps"], + snr=self.hparams.sampling["snr"], + ) # (num_files, 1, F, T) + + # Unsqueeze channel dim + x_hat = x_hat.squeeze(1) + x = x.squeeze(1) + + # Reverse spech transformations + x_hat = self.spec_back(x_hat) # (num_files, F, T) + x = self.spec_back(x) # (num_files, F, T) + + # iSTFT + x_hat_wav = self.istft(x_hat, T_orig_wav) # (num_files, S) + x_wav = self.istft(x, T_orig_wav) # (num_files, S) + + outs.update( + { + "x_hat_wav": x_hat_wav, # enhanced + "x_wav": x_wav, # clean + "y_wav": y_wav, # noisy + "uttids": uttids, # so compute_objectives can see them + } + ) + + return outs + + def compute_objectives(self, predictions, batch, stage): + """ + Computes the diffusion loss and optionally processes enhanced waveforms. + + This method takes the outputs of `compute_forward` (which include + the model's prediction and possibly enhanced waveforms), computes + the loss for training or collects metrics for validation/testing. + + Arguments + --------- + predictions: dict + Dictionary containing forward pass outputs, + e.g. from `_step`. + batch: speechbrain.dataio.batch.PaddedBatch + The current batch, which + can be used for retrieving IDs or additional data if needed. + stage: sb.Stage + The current stage (TRAIN, VALID, TEST). + + Returns + ------- + loss: torch.Tensor + The computed diffusion loss for this batch. + """ + model = self.modules["score_model"] + + # Extract items from predictions + forward_out = predictions["forward_out"] # (B,1,F,T) + x_t = predictions["x_t"] # (B,1,F,T) + z = predictions["z"] # (B,1,F,T) + t = predictions["t"] # (B,) + mean = predictions["mean"] # (B,1,F,T) + x = predictions["x"] # (B,1,F,T) + + # Pass the necessary inputs to the model loss + loss = model.compute_loss( + forward_out, x_t, z, t, mean, x, to_audio_func=self.to_audio + ) + self.loss_metric.append(batch.id, forward_out, x_t, z, t, mean, x) + + # Only process enhanced wavs in VALID and TEST + if stage != sb.Stage.TRAIN: + x_wav = predictions.get("x_wav", None) + x_hat_wav = predictions.get("x_hat_wav", None) + y_wav = predictions.get("y_wav", None) + uttids = predictions.get("uttids", None) + + if x_wav is not None: + # STOI + self.stoi_metric.append(batch.id, x_hat_wav, x_wav) + + # SISDR + self.sisdr_metric.append(batch.id, x_hat_wav, x_wav) + + # PESQ + x_wav_cpu = x_wav.cpu() + x_hat_wav_cpu = x_hat_wav.cpu() + y_wav_cpu = y_wav.cpu() + self.pesq_metric.append( + batch.id, predict=x_hat_wav_cpu, target=x_wav_cpu + ) + + sr = self.hparams.sample_rate + save_dir = self.hparams.enhanced_dir + os.makedirs(save_dir, exist_ok=True) + + epoch_tag = ( + f"ep{self.hparams.epoch_counter.current}" + if stage == sb.Stage.VALID + else "test" + ) + for i, uid in enumerate(uttids): + clean_path = os.path.join( + save_dir, f"{epoch_tag}_{uid}_clean.wav" + ) + enh_path = os.path.join( + save_dir, f"{epoch_tag}_{uid}_enhanced.wav" + ) + noisy_path = os.path.join( + save_dir, f"{epoch_tag}_{uid}_noisy.wav" + ) + + write_audio(clean_path, x_wav_cpu[i], sr) + write_audio(enh_path, x_hat_wav_cpu[i], sr) + write_audio(noisy_path, y_wav_cpu[i], sr) + return loss + + def fit_batch(self, batch): + """ + Overridden method to train on a single batch. + + This performs the typical Brain forward-backward-update + steps, and can include updates for EMA. + + Arguments + --------- + batch: speechbrain.dataio.batch.PaddedBatch + The batch of data used for training. + + Returns + ------- + loss: torch.Tensor + The computed training loss for the batch. + """ + # Standard "forward" + "objectives" + loss = super().fit_batch(batch) + + # Update EMA for the diffusion model + self.modules["score_model"].update_ema() + + return loss + + def enhance(self, y): + """ + Run enhancement on a noisy signal. + + Arguments + --------- + y: torch.Tensor + Noisy input signal, of shape (1, T). + + Returns + ------- + x_hat_wav: torch.Tensor + Enhanced signal, of shape (1, T). + """ + model = self.modules["score_model"] + + norm = y.abs().max() + y = y / norm + T_orig = y.size(1) # keep for iSTFT + y = self.spec_fwd(self.stft(y)).unsqueeze(1) # (B,1,F,T) + F_orig, T_spec_orig = y.shape[-2:] + + y = pad_spec( + y, mode="reflection" + ) # pad for U-Net down-/up-sampling constraints + + # SGMSE + x_hat = model.enhance( + y, + sampler_type=self.hparams.sampling["sampler_type"], + predictor=self.hparams.sampling["predictor"], + corrector=self.hparams.sampling["corrector"], + N=self.hparams.sampling["N"], + corrector_steps=self.hparams.sampling["corrector_steps"], + snr=self.hparams.sampling["snr"], + ) # (B,1,F,T) + + # revert to waveform + x_hat = x_hat[:, :, :F_orig, :T_spec_orig].squeeze(1) # drop ch-dim + x_hat = self.spec_back(x_hat) + x_hat_wav = self.istft(x_hat, length=T_orig) # trim padding + x_hat_wav = x_hat_wav * norm # restore scale + return x_hat_wav + + def on_stage_start(self, stage, epoch=None): + """ + Called at the beginning of each stage (TRAIN, VALID, TEST). + + This method initializes or resets metrics for that stage. + It can also be used to set flags or other stage-specific fields. + + Arguments + --------- + stage: sb.Stage + The current stage (TRAIN, VALID, TEST). + epoch: int, optional + The current epoch number, if applicable. + + Returns + ------- + None + """ + self.loss_metric = MetricStats( + metric=lambda forward_out, x_t, z, t, mean, x: self.modules[ + "score_model" + ].compute_loss(forward_out, x_t, z, t, mean, x, reduction="none") + ) + + if stage == sb.Stage.TRAIN: + return # Nothing else to prepare for TRAIN + + if stage == sb.Stage.VALID: + self.modules[ + "score_model" + ].store_ema() # Only for VALID, because TEST is wrapped in on_evaluate_start() + self.eval_files_left = self.hparams.modules[ + "score_model" + ].num_eval_files + self.save_counter = 0 + + # Build MetricStats objects + self.stoi_metric = MetricStats( + metric=lambda pred, tgt: stoi_tm( + pred, tgt, fs=self.hparams.sample_rate, extended=False + ) + ) + + self.sisdr_metric = MetricStats( + metric=lambda pred, tgt: si_sdr(pred, tgt) + ) + + self.pesq_metric = MetricStats( + metric=lambda pred_wav, target_wav: pesq( + fs=self.hparams.sample_rate, + ref=target_wav.numpy().squeeze(), + deg=pred_wav.numpy().squeeze(), + mode="wb", + ), + batch_eval=False, + n_jobs=1, + ) + + def on_stage_end(self, stage, stage_loss, epoch=None): + """ + Called at the end of each stage (TRAIN, VALID, TEST). + + Summarizes and prints the loss metrics, and for non-training stages, + prints additional evaluation metrics (e.g., PESQ, STOI). + + Arguments + --------- + stage: sb.Stage + The current stage (TRAIN, VALID, TEST). + stage_loss: torch.Tensor + The aggregated loss over the stage. + epoch: int, optional + The current epoch number, if applicable. + + Returns + ------- + None + """ + # Get a human-readable name for the stage: + stage_name = stage.name if hasattr(stage, "name") else str(stage) + + # Summarize the loss metric (average loss over the stage) + avg_loss = self.loss_metric.summarize("average") + + # Print to console + if epoch is not None: + print(f"Epoch {epoch} | Avg {stage_name} Loss: {avg_loss:.4f}") + else: + print(f"Avg {stage_name} Loss: {avg_loss:.4f}") + + # Log training loss + self.writer.add_scalar(f"Loss_{stage_name}", avg_loss, epoch) + + if stage == sb.Stage.TRAIN: + self.writer.flush() # Manually write to disk to ensure real time updates + return # Nothing else to wrap up for TRAIN + + # Summarize metrics + avg_pesq = self.pesq_metric.summarize("average") + avg_stoi = self.stoi_metric.summarize("average") + avg_sisdr = self.sisdr_metric.summarize("average") + + # Print summaries + print(f"Avg PESQ: {avg_pesq:.4f}") + print(f"Avg STOI: {avg_stoi:.4f}") + print(f"Avg SI-SDR: {avg_sisdr:.4f}") + + # Write summaries to log + self.writer.add_scalar(f"PESQ_{stage_name}", avg_pesq, epoch) + self.writer.add_scalar(f"STOI_{stage_name}", avg_stoi, epoch) + self.writer.add_scalar(f"SI-SDR_{stage_name}", avg_sisdr, epoch) + + if stage == sb.Stage.VALID: + self.modules[ + "score_model" + ].restore_ema() # Only for VALID, because TEST is wrapped in on_evaluate_end() + self.checkpointer.save_and_keep_only( + meta={f"{stage_name}_loss": avg_loss}, + min_keys=[f"{stage_name}_loss"], + num_to_keep=self.hparams.num_to_keep, + ) + + # Manually write to disk to ensure real time updates + self.writer.flush() + + def on_evaluate_start(self, max_key=None, min_key=None): + """ + Prepares evaluation. + + Arguments + --------- + max_key: str, optional + Key used to track maximum metric value. + min_key: str, optional + Key used to track minimum metric value. + """ + # Swap in the EMA weights for evaluation + self.modules["score_model"].store_ema() + super().on_evaluate_start(max_key=max_key, min_key=min_key) + + def on_evaluate_end(self): + """ + Restore original weights after evaluation. + """ + # Restore original weights + self.modules["score_model"].restore_ema() + super().on_evaluate_end() + + def to_audio(brain, spec, length=None): + """ + Convert a complex spectrogram into time-domain audio. + + This method applies `spec_back` to invert the spectral transform + (log or exponent, if used), followed by iSTFT to return time-domain + waveforms. + + Arguments + --------- + spec: torch.Tensor + Complex spectrogram of shape (B, F, T) + length: int, optional + The target number of samples in the output signal. + + Returns + ------- + audio: torch.Tensor + Time-domain waveform, shape (B, S) + """ + return brain.istft(brain.spec_back(spec), length=length) + + def stft(self, sig): + """ + Compute the short-time Fourier transform (STFT) of the given signal. + + Arguments + --------- + sig: torch.Tensor + Time-domain signal of shape (B, S). + + Returns + ------- + spec: torch.Tensor + Complex STFT, shape (B, F, T). + """ + return torch.stft( + sig, + **{**self.stft_kwargs, "window": self.window}, + ) + + def istft(self, spec, length=None): + """ + Compute the inverse short-time Fourier transform (iSTFT). + + This method reverts the STFT computed by `stft`, using the same + parameters but without the `return_complex` key. + + Arguments + --------- + spec: torch.Tensor + Complex STFT of shape (B, F, T). + length: int, optional + The desired number of samples in the output. + + Returns + ------- + waveform: torch.Tensor + Time-domain signal of shape (B, S). + """ + stft_args = dict(self.stft_kwargs) + stft_args.pop("return_complex", None) + stft_args["window"] = self.window + stft_args["length"] = length + + return torch.istft(spec, **stft_args) + + def spec_fwd(self, spec_cplx): + """ + Forward spectral transform (e.g., log or exponent) on the complex spectrogram. + + Depending on `transform_type`, applies scaling or a log-based transform to + the magnitude, preserving phase. Also multiplies by a factor if specified. + + Arguments + --------- + spec_cplx: torch.Tensor + Complex spectrogram of shape (B, F, T). + + Returns + ------- + spec_trans: torch.Tensor + Transformed complex spectrogram of the same shape. + """ + transform_type = self.hparams.transform_type + factor = self.hparams.spec_factor + e = getattr(self.hparams, "spec_abs_exponent", 1.0) + + if transform_type == "exponent": + if e != 1.0: + mag = spec_cplx.abs() ** e + phase = spec_cplx.angle() + spec_cplx = mag * torch.exp(1j * phase) + spec_cplx *= factor + + elif transform_type == "log": + mag = torch.log1p(spec_cplx.abs()) + phase = spec_cplx.angle() + spec_cplx = mag * torch.exp(1j * phase) + spec_cplx *= factor + + elif transform_type == "none": + pass + + return spec_cplx + + def spec_back(self, spec_cplx): + """ + Inverse spectral transform to revert log or exponent scaling. + + This method divides by the scale factor and reverts the transform + (log or exponent) on the magnitude. The phase remains unchanged. + + Arguments + --------- + spec_cplx: torch.Tensor + Complex spectrogram of shape (B, F, T). + + Returns + ------- + spec_orig: torch.Tensor + Original-like complex spectrogram of the same shape. + """ + transform_type = self.hparams.transform_type + factor = self.hparams.spec_factor + e = getattr(self.hparams, "spec_abs_exponent", 1.0) + + if transform_type == "exponent": + spec_cplx = spec_cplx / factor + if e != 1.0: + mag = spec_cplx.abs() ** (1.0 / e) + phase = spec_cplx.angle() + spec_cplx = mag * torch.exp(1j * phase) + + elif transform_type == "log": + spec_cplx = spec_cplx / factor + mag = torch.expm1(spec_cplx.abs()) + phase = spec_cplx.angle() + spec_cplx = mag * torch.exp(1j * phase) + + elif transform_type == "none": + pass + + return spec_cplx + + def get_window(self, window_type, window_length): + """ + Build a window tensor for STFT based on the specified window type. + + Arguments + --------- + window_type: str + Type of window function to use (e.g., 'hann', 'sqrthann'). + window_length: int + The length of the window (e.g., n_fft). + + Returns + ------- + window: torch.Tensor + The generated window tensor of shape (window_length,). + """ + if window_type == "sqrthann": + return torch.sqrt(torch.hann_window(window_length, periodic=True)) + elif window_type == "hann": + return torch.hann_window(window_length, periodic=True) + else: + raise NotImplementedError( + f"Window type {window_type} not implemented!" + ) + + +def dataio_prep(hparams): + """ + Prepare the datasets, launch training and evaluate the trained model. + """ + seg_frames = hparams["segment_frames"] + hop_length = hparams["hop_length"] + target_len = (seg_frames - 1) * hop_length + normalize = hparams.get("normalize", "noisy") + data_dir = hparams["data_dir"] + + random_crop_train = hparams.get("random_crop_train", True) + random_crop_valid = hparams.get("random_crop_valid", False) + random_crop_test = hparams.get("random_crop_test", False) + + def build_pipeline(random_crop): + @sb.utils.data_pipeline.takes("noisy_wav", "clean_wav") + @sb.utils.data_pipeline.provides("noisy_sig", "clean_sig") + def wav_pairs(noisy_wav, clean_wav): + # Load waveforms + sig_noisy = sb.dataio.dataio.read_audio(noisy_wav) + sig_clean = sb.dataio.dataio.read_audio(clean_wav) + + orig_len = sig_clean.shape[-1] + # Pad if too short + if orig_len < target_len: + needed = target_len - orig_len + left_pad = needed // 2 + right_pad = needed - left_pad + sig_noisy = F.pad( + sig_noisy, (left_pad, right_pad), mode="constant" + ) + sig_clean = F.pad( + sig_clean, (left_pad, right_pad), mode="constant" + ) + # Crop if too long + elif orig_len > target_len: + if random_crop: + start = np.random.randint(0, orig_len - target_len) + else: + start = (orig_len - target_len) // 2 + sig_noisy = sig_noisy[..., start : start + target_len] + sig_clean = sig_clean[..., start : start + target_len] + + # 5) normalize + if normalize == "noisy": + fac = sig_noisy.abs().max() + elif normalize == "clean": + fac = sig_clean.abs().max() + else: + fac = 1.0 + + return sig_noisy / fac, sig_clean / fac + + return [wav_pairs] + + # create datasets + datasets = {} + for split, rc in zip( + ["train", "valid", "test"], + [random_crop_train, random_crop_valid, random_crop_test], + ): + pipelines = build_pipeline(rc) + json_path = hparams[f"{split}_annotation"] + datasets[split] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=json_path, + replacements={"data_root": data_dir}, + dynamic_items=pipelines, + output_keys=["id", "noisy_sig", "clean_sig"], + ) + + # optional length sorting + if hparams["sorting"] in ("ascending", "descending"): + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="length", reverse=hparams["sorting"] == "descending" + ) + hparams["train_dataloader_opts"]["shuffle"] = False + + return datasets + + +if __name__ == "__main__": + cli = argparse.ArgumentParser(add_help=False) + cli.add_argument( + "--resume", + type=str, + default="", + help="Path to an existing run directory to resume.", + ) + resume_args, remaining = cli.parse_known_args() + + hparams_file, run_opts, overrides = sb.parse_arguments(remaining) + + if resume_args.resume: # Resume + run_dir = Path(resume_args.resume).resolve() + hparams_file = run_dir / "hyperparams.yaml" + overrides = overrides or "" + else: # New + run_name = f"run_{datetime.now():%Y-%m-%d_%H-%M-%S}" + overrides = (overrides or "") + f"\nrun_name: '{run_name}'" + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + from voicebank_prepare import prepare_voicebank + + sb.utils.distributed.run_on_main( + prepare_voicebank, + kwargs={ + "data_folder": hparams["data_dir"], + "save_folder": hparams["data_dir"], + "skip_prep": hparams["skip_prep"], + }, + ) + + # Create datasets + datasets = dataio_prep(hparams) + + sb.create_experiment_directory( + experiment_directory=os.path.join( + hparams["output_folder"], hparams["run_name"] + ), + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + sgmse_brain = SGMSEBrain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Train + sgmse_brain.fit( + epoch_counter=sgmse_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["train_dataloader_opts"], + valid_loader_kwargs=hparams["valid_dataloader_opts"], + ) + + # Evaluate + sgmse_brain.evaluate( + test_set=datasets["test"], + max_key="valid_loss", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/Voicebank/enhance/SGMSE/voicebank_prepare.py b/recipes/Voicebank/enhance/SGMSE/voicebank_prepare.py new file mode 120000 index 0000000000..66cb2e6cc9 --- /dev/null +++ b/recipes/Voicebank/enhance/SGMSE/voicebank_prepare.py @@ -0,0 +1 @@ +../../voicebank_prepare.py \ No newline at end of file diff --git a/recipes/Voicebank/enhance/spectral_mask/README.md b/recipes/Voicebank/enhance/spectral_mask/README.md index 69cc6593d4..7c86dd5444 100644 --- a/recipes/Voicebank/enhance/spectral_mask/README.md +++ b/recipes/Voicebank/enhance/spectral_mask/README.md @@ -11,13 +11,13 @@ python train.py train/train.yaml | Release | hyperparams file | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| -| 20-05-22 | train.yaml | [model](https://drive.google.com/drive/folders/1IV3ohFracK0zLH-ZGb3LTas-l3ZDFDPW?usp=sharing) | 1xV100 32GB | +| 20-05-22 | train.yaml | [model](https://www.dropbox.com/sh/n5q9vjn0yn1qvk6/AAB-S7i2-XzVm6ux0MrXCvqya?dl=0) | 1xV100 32GB | # PreTrained Model You can find the full experiment folder (i.e., checkpoints, logs, etc) here: -https://drive.google.com/drive/folders/1IV3ohFracK0zLH-ZGb3LTas-l3ZDFDPW?usp=sharing +https://www.dropbox.com/sh/n5q9vjn0yn1qvk6/AAB-S7i2-XzVm6ux0MrXCvqya?dl=0 # Training Time @@ -34,6 +34,15 @@ About 2 minutes for each epoch with a TESLA V100. Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/enhance/spectral_mask/hparams/train.yaml b/recipes/Voicebank/enhance/spectral_mask/hparams/train.yaml index 3320a3d6d0..838e43ef6b 100644 --- a/recipes/Voicebank/enhance/spectral_mask/hparams/train.yaml +++ b/recipes/Voicebank/enhance/spectral_mask/hparams/train.yaml @@ -9,7 +9,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 4234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] data_folder: /localscratch/noisy-vctk-16k # test_clean_folder: !ref /clean_testset_wav_16k/ @@ -92,6 +92,6 @@ resynth: !name:speechbrain.processing.signal_processing.resynthesize train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref -# Tensorboard logger (optional) +# torch.Tensorboard logger (optional) tensorboard_train_logger: !new:speechbrain.utils.train_logger.TensorboardLogger save_dir: !ref diff --git a/recipes/Voicebank/enhance/spectral_mask/train.py b/recipes/Voicebank/enhance/spectral_mask/train.py index 6098f6bf26..1825a26b8a 100644 --- a/recipes/Voicebank/enhance/spectral_mask/train.py +++ b/recipes/Voicebank/enhance/spectral_mask/train.py @@ -7,17 +7,20 @@ Authors * Szu-Wei Fu 2020 """ + import os import sys + import torch -import torchaudio -import speechbrain as sb -from pesq import pesq from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import MetricStats -from speechbrain.processing.features import spectral_magnitude +from pesq import pesq + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.nnet.loss.stoi_loss import stoi_loss +from speechbrain.processing.features import spectral_magnitude from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.metric_stats import MetricStats # Brain class for speech enhancement training @@ -65,7 +68,6 @@ def compute_objectives(self, predictions, batch, stage): ) if stage != sb.Stage.TRAIN: - # Evaluate speech quality/intelligibility self.stoi_metric.append( batch.id, predict_wav, clean_wavs, lens, reduction="batch" @@ -82,7 +84,7 @@ def compute_objectives(self, predictions, batch, stage): enhance_path = os.path.join( self.hparams.enhanced_folder, name ) - torchaudio.save( + audio_io.save( enhance_path, torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), 16000, @@ -148,7 +150,8 @@ def on_stage_end(self, stage, stage_loss, epoch=None): def dataio_prep(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ # Define audio pipelines @sb.utils.data_pipeline.takes("noisy_wav") @@ -197,10 +200,9 @@ def create_folder(folder): # Recipe begins! if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) diff --git a/recipes/Voicebank/enhance/waveform_map/README.md b/recipes/Voicebank/enhance/waveform_map/README.md index d825eeec8a..ee619a206a 100644 --- a/recipes/Voicebank/enhance/waveform_map/README.md +++ b/recipes/Voicebank/enhance/waveform_map/README.md @@ -9,7 +9,7 @@ python train.py train/train.yaml # PreTrained Model You can find the full experiment folder (i.e., checkpoints, logs, etc) here: -https://drive.google.com/drive/folders/1IV3ohFracK0zLH-ZGb3LTas-l3ZDFDPW?usp=sharing +https://www.dropbox.com/sh/n5q9vjn0yn1qvk6/AAB-S7i2-XzVm6ux0MrXCvqya?dl=0 @@ -23,6 +23,15 @@ https://drive.google.com/drive/folders/1IV3ohFracK0zLH-ZGb3LTas-l3ZDFDPW?usp=sha Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/Voicebank/enhance/waveform_map/hparams/train.yaml b/recipes/Voicebank/enhance/waveform_map/hparams/train.yaml index 4c4cf00f28..86fc5c1468 100644 --- a/recipes/Voicebank/enhance/waveform_map/hparams/train.yaml +++ b/recipes/Voicebank/enhance/waveform_map/hparams/train.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 3234 -__set_seed: !!python/object/apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] data_folder: /localscratch/noisy-vctk-16k # test_clean_folder: !ref /clean_testset_wav_16k/ @@ -81,6 +81,6 @@ compute_cost: !name:speechbrain.nnet.losses.mse_loss train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger save_file: !ref -# Tensorboard logger (optional) +# torch.Tensorboard logger (optional) tensorboard_train_logger: !new:speechbrain.utils.train_logger.TensorboardLogger save_dir: !ref diff --git a/recipes/Voicebank/enhance/waveform_map/train.py b/recipes/Voicebank/enhance/waveform_map/train.py index 5b8984089e..d743af7cb9 100644 --- a/recipes/Voicebank/enhance/waveform_map/train.py +++ b/recipes/Voicebank/enhance/waveform_map/train.py @@ -1,240 +1,242 @@ -#!/usr/bin/env/python3 -"""Recipe for training a waveform-based speech enhancement -system with the Voicebank dataset. - -To run this recipe, do the following: -> python train.py hparams/{hyperparam_file}.yaml - -Authors - * Szu-Wei Fu 2020 -""" -import os -import sys -import torch -import torchaudio -import speechbrain as sb -from pesq import pesq -from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import MetricStats -from speechbrain.nnet.loss.stoi_loss import stoi_loss -from speechbrain.utils.distributed import run_on_main - - -# Brain class for speech enhancement training -class SEBrain(sb.Brain): - def compute_forward(self, batch, stage): - """Forward computations from the waveform batches to the enhanced output""" - batch = batch.to(self.device) - noisy_wavs, lens = batch.noisy_sig - noisy_wavs = torch.unsqueeze(noisy_wavs, -1) - predict_wavs = self.modules.model(noisy_wavs)[:, :, 0] - - return predict_wavs - - def compute_objectives(self, predict_wavs, batch, stage): - """Computes the loss given the predicted and targeted outputs""" - clean_wavs, lens = batch.clean_sig - - loss = self.hparams.compute_cost(predict_wavs, clean_wavs, lens) - self.loss_metric.append( - batch.id, predict_wavs, clean_wavs, lens, reduction="batch" - ) - - if stage != sb.Stage.TRAIN: - - # Evaluate speech quality/intelligibility - self.stoi_metric.append( - batch.id, predict_wavs, clean_wavs, lens, reduction="batch" - ) - self.pesq_metric.append( - batch.id, predict=predict_wavs, target=clean_wavs, lengths=lens - ) - - # Write wavs to file - if stage == sb.Stage.TEST: - lens = lens * clean_wavs.shape[1] - for name, pred_wav, length in zip(batch.id, predict_wavs, lens): - name += ".wav" - enhance_path = os.path.join( - self.hparams.enhanced_folder, name - ) - pred_wav = pred_wav / torch.max(torch.abs(pred_wav)) * 0.99 - torchaudio.save( - enhance_path, - torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), - 16000, - ) - - return loss - - def on_stage_start(self, stage, epoch=None): - """Gets called at the beginning of each epoch""" - self.loss_metric = MetricStats(metric=self.hparams.compute_cost) - self.stoi_metric = MetricStats(metric=stoi_loss) - - # Define function taking (prediction, target) for parallel eval - def pesq_eval(pred_wav, target_wav): - """Computes the PESQ evaluation metric""" - return pesq( - fs=16000, - ref=target_wav.numpy(), - deg=pred_wav.numpy(), - mode="wb", - ) - - if stage != sb.Stage.TRAIN: - self.pesq_metric = MetricStats( - metric=pesq_eval, n_jobs=1, batch_eval=False - ) - - def on_stage_end(self, stage, stage_loss, epoch=None): - """Gets called at the end of an epoch.""" - if stage == sb.Stage.TRAIN: - self.train_loss = stage_loss - self.train_stats = {"loss": self.loss_metric.scores} - else: - stats = { - "loss": stage_loss, - "pesq": self.pesq_metric.summarize("average"), - "stoi": -self.stoi_metric.summarize("average"), - } - - if stage == sb.Stage.VALID: - if self.hparams.use_tensorboard: - valid_stats = { - "loss": self.loss_metric.scores, - "stoi": self.stoi_metric.scores, - "pesq": self.pesq_metric.scores, - } - self.hparams.tensorboard_train_logger.log_stats( - {"Epoch": epoch}, self.train_stats, valid_stats - ) - self.hparams.train_logger.log_stats( - {"Epoch": epoch}, - train_stats={"loss": self.train_loss}, - valid_stats=stats, - ) - self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"]) - - if stage == sb.Stage.TEST: - self.hparams.train_logger.log_stats( - {"Epoch loaded": self.hparams.epoch_counter.current}, - test_stats=stats, - ) - - -def dataio_prep(hparams): - """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" - - # Define audio pipelines - @sb.utils.data_pipeline.takes("noisy_wav") - @sb.utils.data_pipeline.provides("noisy_sig") - def noisy_pipeline(noisy_wav): - return sb.dataio.dataio.read_audio(noisy_wav) - - @sb.utils.data_pipeline.takes("clean_wav") - @sb.utils.data_pipeline.provides("clean_sig") - def clean_pipeline(clean_wav): - return sb.dataio.dataio.read_audio(clean_wav) - - # Define datasets - datasets = {} - data_info = { - "train": hparams["train_annotation"], - "valid": hparams["valid_annotation"], - "test": hparams["test_annotation"], - } - for dataset in data_info: - datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( - json_path=data_info[dataset], - replacements={"data_root": hparams["data_folder"]}, - dynamic_items=[noisy_pipeline, clean_pipeline], - output_keys=["id", "noisy_sig", "clean_sig"], - ) - - # Sort train dataset - if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending": - datasets["train"] = datasets["train"].filtered_sorted( - sort_key="length", reverse=hparams["sorting"] == "descending" - ) - hparams["dataloader_options"]["shuffle"] = False - elif hparams["sorting"] != "random": - raise NotImplementedError( - "Sorting must be random, ascending, or descending" - ) - - return datasets - - -def create_folder(folder): - if not os.path.isdir(folder): - os.makedirs(folder) - - -# Recipe begins! -if __name__ == "__main__": - - # Load hyperparameters file with command-line overrides - hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: - hparams = load_hyperpyyaml(fin, overrides) - - # Initialize ddp (useful only for multi-GPU DDP training) - sb.utils.distributed.ddp_init_group(run_opts) - - # Data preparation - from voicebank_prepare import prepare_voicebank # noqa - - run_on_main( - prepare_voicebank, - kwargs={ - "data_folder": hparams["data_folder"], - "save_folder": hparams["output_folder"], - "skip_prep": hparams["skip_prep"], - }, - ) - - # Create dataset objects - datasets = dataio_prep(hparams) - - # Create experiment directory - sb.create_experiment_directory( - experiment_directory=hparams["output_folder"], - hyperparams_to_save=hparams_file, - overrides=overrides, - ) - - if hparams["use_tensorboard"]: - from speechbrain.utils.train_logger import TensorboardLogger - - hparams["tensorboard_train_logger"] = TensorboardLogger( - hparams["tensorboard_logs"] - ) - - # Create the folder to save enhanced files (+ support for DDP) - run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]}) - - se_brain = SEBrain( - modules=hparams["modules"], - opt_class=hparams["opt_class"], - hparams=hparams, - run_opts=run_opts, - checkpointer=hparams["checkpointer"], - ) - - # Load latest checkpoint to resume training - se_brain.fit( - epoch_counter=se_brain.hparams.epoch_counter, - train_set=datasets["train"], - valid_set=datasets["valid"], - train_loader_kwargs=hparams["dataloader_options"], - valid_loader_kwargs=hparams["dataloader_options"], - ) - - # Load best checkpoint for evaluation - test_stats = se_brain.evaluate( - test_set=datasets["test"], - max_key="pesq", - test_loader_kwargs=hparams["dataloader_options"], - ) +#!/usr/bin/env/python3 +"""Recipe for training a waveform-based speech enhancement +system with the Voicebank dataset. + +To run this recipe, do the following: +> python train.py hparams/{hyperparam_file}.yaml + +Authors + * Szu-Wei Fu 2020 +""" + +import os +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml +from pesq import pesq + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.nnet.loss.stoi_loss import stoi_loss +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.metric_stats import MetricStats + + +# Brain class for speech enhancement training +class SEBrain(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the enhanced output""" + batch = batch.to(self.device) + noisy_wavs, lens = batch.noisy_sig + noisy_wavs = torch.unsqueeze(noisy_wavs, -1) + predict_wavs = self.modules.model(noisy_wavs)[:, :, 0] + + return predict_wavs + + def compute_objectives(self, predict_wavs, batch, stage): + """Computes the loss given the predicted and targeted outputs""" + clean_wavs, lens = batch.clean_sig + + loss = self.hparams.compute_cost(predict_wavs, clean_wavs, lens) + self.loss_metric.append( + batch.id, predict_wavs, clean_wavs, lens, reduction="batch" + ) + + if stage != sb.Stage.TRAIN: + # Evaluate speech quality/intelligibility + self.stoi_metric.append( + batch.id, predict_wavs, clean_wavs, lens, reduction="batch" + ) + self.pesq_metric.append( + batch.id, predict=predict_wavs, target=clean_wavs, lengths=lens + ) + + # Write wavs to file + if stage == sb.Stage.TEST: + lens = lens * clean_wavs.shape[1] + for name, pred_wav, length in zip(batch.id, predict_wavs, lens): + name += ".wav" + enhance_path = os.path.join( + self.hparams.enhanced_folder, name + ) + pred_wav = pred_wav / torch.max(torch.abs(pred_wav)) * 0.99 + audio_io.save( + enhance_path, + torch.unsqueeze(pred_wav[: int(length)].cpu(), 0), + 16000, + ) + + return loss + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch""" + self.loss_metric = MetricStats(metric=self.hparams.compute_cost) + self.stoi_metric = MetricStats(metric=stoi_loss) + + # Define function taking (prediction, target) for parallel eval + def pesq_eval(pred_wav, target_wav): + """Computes the PESQ evaluation metric""" + return pesq( + fs=16000, + ref=target_wav.numpy(), + deg=pred_wav.numpy(), + mode="wb", + ) + + if stage != sb.Stage.TRAIN: + self.pesq_metric = MetricStats( + metric=pesq_eval, n_jobs=1, batch_eval=False + ) + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of an epoch.""" + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + self.train_stats = {"loss": self.loss_metric.scores} + else: + stats = { + "loss": stage_loss, + "pesq": self.pesq_metric.summarize("average"), + "stoi": -self.stoi_metric.summarize("average"), + } + + if stage == sb.Stage.VALID: + if self.hparams.use_tensorboard: + valid_stats = { + "loss": self.loss_metric.scores, + "stoi": self.stoi_metric.scores, + "pesq": self.pesq_metric.scores, + } + self.hparams.tensorboard_train_logger.log_stats( + {"Epoch": epoch}, self.train_stats, valid_stats + ) + self.hparams.train_logger.log_stats( + {"Epoch": epoch}, + train_stats={"loss": self.train_loss}, + valid_stats=stats, + ) + self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"]) + + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stats, + ) + + +def dataio_prep(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + + # Define audio pipelines + @sb.utils.data_pipeline.takes("noisy_wav") + @sb.utils.data_pipeline.provides("noisy_sig") + def noisy_pipeline(noisy_wav): + return sb.dataio.dataio.read_audio(noisy_wav) + + @sb.utils.data_pipeline.takes("clean_wav") + @sb.utils.data_pipeline.provides("clean_sig") + def clean_pipeline(clean_wav): + return sb.dataio.dataio.read_audio(clean_wav) + + # Define datasets + datasets = {} + data_info = { + "train": hparams["train_annotation"], + "valid": hparams["valid_annotation"], + "test": hparams["test_annotation"], + } + for dataset in data_info: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=data_info[dataset], + replacements={"data_root": hparams["data_folder"]}, + dynamic_items=[noisy_pipeline, clean_pipeline], + output_keys=["id", "noisy_sig", "clean_sig"], + ) + + # Sort train dataset + if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending": + datasets["train"] = datasets["train"].filtered_sorted( + sort_key="length", reverse=hparams["sorting"] == "descending" + ) + hparams["dataloader_options"]["shuffle"] = False + elif hparams["sorting"] != "random": + raise NotImplementedError( + "Sorting must be random, ascending, or descending" + ) + + return datasets + + +def create_folder(folder): + if not os.path.isdir(folder): + os.makedirs(folder) + + +# Recipe begins! +if __name__ == "__main__": + # Load hyperparameters file with command-line overrides + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Initialize ddp (useful only for multi-GPU DDP training) + sb.utils.distributed.ddp_init_group(run_opts) + + # Data preparation + from voicebank_prepare import prepare_voicebank # noqa + + run_on_main( + prepare_voicebank, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "skip_prep": hparams["skip_prep"], + }, + ) + + # Create dataset objects + datasets = dataio_prep(hparams) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + if hparams["use_tensorboard"]: + from speechbrain.utils.train_logger import TensorboardLogger + + hparams["tensorboard_train_logger"] = TensorboardLogger( + hparams["tensorboard_logs"] + ) + + # Create the folder to save enhanced files (+ support for DDP) + run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]}) + + se_brain = SEBrain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # Load latest checkpoint to resume training + se_brain.fit( + epoch_counter=se_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["dataloader_options"], + ) + + # Load best checkpoint for evaluation + test_stats = se_brain.evaluate( + test_set=datasets["test"], + max_key="pesq", + test_loader_kwargs=hparams["dataloader_options"], + ) diff --git a/recipes/Voicebank/voicebank_prepare.py b/recipes/Voicebank/voicebank_prepare.py index 89ccadc47a..0ee1276dbd 100644 --- a/recipes/Voicebank/voicebank_prepare.py +++ b/recipes/Voicebank/voicebank_prepare.py @@ -1,473 +1,493 @@ -# -*- coding: utf-8 -*- -""" -Data preparation. - -Download and resample, use ``download_vctk`` below. -https://datashare.is.ed.ac.uk/handle/10283/2791 - -Authors: - * Szu-Wei Fu, 2020 - * Peter Plantinga, 2020 -""" - -import os -import json -import string -import urllib -import shutil -import logging -import tempfile -import torchaudio -from torchaudio.transforms import Resample -from speechbrain.utils.data_utils import get_all_files, download_file -from speechbrain.dataio.dataio import read_audio - -logger = logging.getLogger(__name__) -LEXICON_URL = "http://www.openslr.org/resources/11/librispeech-lexicon.txt" -TRAIN_JSON = "train.json" -TEST_JSON = "test.json" -VALID_JSON = "valid.json" -SAMPLERATE = 16000 -TRAIN_SPEAKERS = [ - "p226", - "p287", - "p227", - "p228", - "p230", - "p231", - "p233", - "p236", - "p239", - "p243", - "p244", - "p250", - "p254", - "p256", - "p258", - "p259", - "p267", - "p268", - "p269", - "p270", - "p273", - "p274", - "p276", - "p277", - "p278", - "p279", - "p282", - "p286", -] -# Lexicon missing entries -MISSING_LEXICON = { - "CRUCIALLY": "K R UW SH AH L IY", - "PAEDOPHILES": "P EH D OW F AY L S", - "MR": "M IH S T ER", - "BBC": "B IY B IY S IY", - "EUPHORIC": "Y UW F AO R IH K", - "RACISM": "R EY S IH S M", - "MP": "EH M P IY", - "RESTRUCTURING": "R IY S T R AH K CH ER IH NG", - "OSAMA": "OW S AH M AH", - "GUITARIST": "G IH T AA R IH S T", - "BLUESHE": "B L UW SH IY", - "FLANKER": "F L AY N K ER", - "SADDAM": "S AA D AA M", - "COVERUP": "K UH V ER UH P", - "FBI": "EH F B IY AY", - "PREEMPTIVE": "P R IY EH M P T IH V", - "FOURYEAR": "F AO R Y IY R", - "XRAY": "EH K S R AY", - "TALIBAN": "T AE L IH B AA N", - "SUPERIMPOSITION": "S UW P ER IH M P OW S IH SH AH N", - "GUIDELINES": "G AY D L AY N S", - "FINALISED": "F AY N AH L AY Z D", - "HALFTIME": "H AE F T AY M", - "WINGERS": "W IH NG ER Z", - "GM": "J IY EH M", - "MCGREGOR": "M AH K G R EH G AO R", - "TWODAY": "T UW D EY", - "DATABASE": "D EY T AH B EY S", - "TELECOM": "T EH L AH K AO M", - "SHORTTERM": "SH AO R T ER M", - "SHORTFALL": "SH AO R T F AH L", - "MCCALL": "M AH K AH L", - "HEADTEACHER": "H EH D T IY CH ER", - "TAKEOVER": "T EY K OW V ER", - "ONETHIRD": "W AH N TH ER D", - "TV": "T IY V IY", - "SCREENPLAY": "S K R IY N P L EY", - "YUGOSLAV": "Y UW G OW S L AA V", - "HIBS": "HH IH B Z", - "DISPOSALS": "D IH S P OW S AH L Z", - "MODERNISATION": "M AA D ER N AH Z EY SH AH N", - "REALLIFE": "R IY L AY F", - "ONEYEAR": "W AH N Y IY R", - "GRASSROOTS": "G R AE S R UW T S", - "ARNIE": "AH R N IY", - "PARTTIME": "P AH R T AY M", - "SHORTLIST": "SH AO R T L IH S T", - "OUTPERFORMED": "OW T P ER F AO R M D", - "LONGTERM": "L AO NG T ER M", - "DAYTODAY": "D EY T UW D EY", - "MCPHERSON": "M AH K F ER S AH N", - "OUTSOURCING": "OW T S AO R S IH NG", - "FULLSCALE": "F UH L S K EY L", - "SERGIO": "S ER J IY OW", - "HENMAN": "HH EH N M AA N", - "MCLEOD": "M AH K L IY AO D", - "TIMESCALE": "T AY M S K EY L", - "REFURBISHMENT": "R IY F UH R B IH SH M AH N T", - "LINEUP": "L AY N UH P", - "DOWNBEAT": "D OW N B IY T", - "MANDELA": "M AE N D EH L AH", - "UNDERAGE": "UH N D ER EY J", - "MCNAUGHTON": "M AH K N AW T AH N", - "MICKELSON": "M IH K L S AH N", - "THREEQUARTERS": "TH R IY K AO R T ER Z", - "WEBSITE": "W EH B S AY T", - "BLUEITS": "B L UW IH T S", - "CEASEFIRE": "S IY S F AY R", - "FULLTIME": "F UH L T AY M", - "DOCHERTY": "D AH K ER T IY", - "RUNNERUP": "R UH N ER AH P", - "DOWNTURN": "D OW N T ER N", - "EUROS": "Y ER OW S", - "FOOTANDMOUTH": "F UH T AE N D M OW TH", - "HIGHLIGHTED": "HH AY L AY T AH D", - "MIDFIELD": "M IH D F IY L D", - "MCKENZIE": "M AH K EH N Z IY", - "BENCHMARK": "B EH N CH M AA R K", - "MCCONNELL": "M AH K AW N EH L", - "UPGRADING": "UH P G R EY D IH NG", - "BLUNKETT": "B L UH N K AH T", - "RETHINK": "R IY TH IH N K", - "UPBEAT": "AH P B IY T", - "TELECOMS": "T EH L AH K AO M Z", - "APARTHEID": "AH P AH R T HH AY D", - "AIRDRIE": "EY R D R IY", - "RETHINK": "R IY TH IH N K", - "HELPLINE": "HH EH L P L AY N", - "CLEARCUT": "K L IY R K UH T", -} - - -def prepare_voicebank( - data_folder, save_folder, valid_speaker_count=2, skip_prep=False -): - """ - Prepares the json files for the Voicebank dataset. - - Expects the data folder to be the same format as the output of - ``download_vctk()`` below. - - Arguments - --------- - data_folder : str - Path to the folder where the original Voicebank dataset is stored. - save_folder : str - The directory where to store the json files. - valid_speaker_count : int - The number of validation speakers to use (out of 28 in train set). - skip_prep: bool - If True, skip data preparation. - - Example - ------- - >>> data_folder = '/path/to/datasets/Voicebank' - >>> save_folder = 'exp/Voicebank_exp' - >>> prepare_voicebank(data_folder, save_folder) - """ - if skip_prep: - return - # Setting ouput files - save_json_train = os.path.join(save_folder, TRAIN_JSON) - save_json_valid = os.path.join(save_folder, VALID_JSON) - save_json_test = os.path.join(save_folder, TEST_JSON) - - # Check if this phase is already done (if so, skip it) - if skip(save_json_train, save_json_test, save_json_valid): - logger.info("Preparation completed in previous run, skipping.") - return - - train_clean_folder = os.path.join( - data_folder, "clean_trainset_28spk_wav_16k" - ) - train_noisy_folder = os.path.join( - data_folder, "noisy_trainset_28spk_wav_16k" - ) - train_txts = os.path.join(data_folder, "trainset_28spk_txt") - test_clean_folder = os.path.join(data_folder, "clean_testset_wav_16k") - test_noisy_folder = os.path.join(data_folder, "noisy_testset_wav_16k") - test_txts = os.path.join(data_folder, "testset_txt") - - # Setting the save folder - if not os.path.exists(save_folder): - os.makedirs(save_folder) - - # Additional checks to make sure the data folder contains Voicebank - check_voicebank_folders( - train_clean_folder, - train_noisy_folder, - train_txts, - test_clean_folder, - test_noisy_folder, - test_txts, - ) - - logger.debug("Creating lexicon...") - lexicon = create_lexicon(os.path.join(data_folder, "lexicon.txt")) - logger.info("Creating json files for noisy VoiceBank...") - - logger.debug("Collecting files...") - extension = [".wav"] - valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count] - wav_lst_train = get_all_files( - train_noisy_folder, match_and=extension, exclude_or=valid_speakers - ) - wav_lst_valid = get_all_files( - train_noisy_folder, match_and=extension, match_or=valid_speakers - ) - wav_lst_test = get_all_files(test_noisy_folder, match_and=extension) - - logger.debug("Creating json files for noisy VoiceBank...") - create_json( - wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon - ) - create_json( - wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon - ) - create_json( - wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon - ) - - -def skip(*filenames): - """ - Detects if the Voicebank data_preparation has been already done. - If the preparation has been done, we can skip it. - - Returns - ------- - bool - if True, the preparation phase can be skipped. - if False, it must be done. - """ - for filename in filenames: - if not os.path.isfile(filename): - return False - return True - - -def remove_punctuation(a_string): - """Remove all punctuation from string""" - return a_string.translate(str.maketrans("", "", string.punctuation)) - - -def create_lexicon(lexicon_save_filepath): - """ - Creates the lexicon object, downloading if it hasn't been done yet. - - Arguments - --------- - lexicon_save_filepath : str - Path to save the lexicon when downloading - """ - if not os.path.isfile(lexicon_save_filepath): - download_file(LEXICON_URL, lexicon_save_filepath) - - # Iterate lexicon file and add the first pronunciation in the file for - # each word to our lexicon dictionary - lexicon = MISSING_LEXICON - delayed_words = {} - for line in open(lexicon_save_filepath): - line = line.split() - phns = " ".join(p.strip("012") for p in line[1:]) - - # Don't add words with punctuation until we can be sure they won't - # overwrite words without punctuation. - clean_word = remove_punctuation(line[0]) - if clean_word != line[0] and clean_word not in delayed_words: - delayed_words[clean_word] = phns - elif clean_word == line[0] and clean_word not in lexicon: - lexicon[clean_word] = phns - - # Add words with punctuation if they won't overwrite non-punctuated words - for word, phns in delayed_words.items(): - if word not in lexicon: - lexicon[word] = phns - - return lexicon - - -def create_json(wav_lst, json_file, clean_folder, txt_folder, lexicon): - """ - Creates the json file given a list of wav files. - - Arguments - --------- - wav_lst : list - The list of wav files. - json_file : str - The path of the output json file - clean_folder : str - The location of parallel clean samples. - txt_folder : str - The location of the transcript files. - """ - logger.debug(f"Creating json lists in {json_file}") - - # Processing all the wav files in the list - json_dict = {} - for wav_file in wav_lst: # ex:p203_122.wav - - # Example wav_file: p232_001.wav - noisy_path, filename = os.path.split(wav_file) - _, noisy_dir = os.path.split(noisy_path) - _, clean_dir = os.path.split(clean_folder) - noisy_rel_path = os.path.join("{data_root}", noisy_dir, filename) - clean_rel_path = os.path.join("{data_root}", clean_dir, filename) - - # Reading the signal (to retrieve duration in seconds) - signal = read_audio(wav_file) - duration = signal.shape[0] / SAMPLERATE - - # Read text - snt_id = filename.replace(".wav", "") - with open(os.path.join(txt_folder, snt_id + ".txt")) as f: - word_string = f.read() - word_string = remove_punctuation(word_string).strip().upper() - phones = [ - phn for word in word_string.split() for phn in lexicon[word].split() - ] - - # Remove duplicate phones - phones = [i for i, j in zip(phones, phones[1:] + [None]) if i != j] - phone_string = " ".join(phones) - - json_dict[snt_id] = { - "noisy_wav": noisy_rel_path, - "clean_wav": clean_rel_path, - "length": duration, - "words": word_string, - "phones": phone_string, - } - - # Writing the json lines - with open(json_file, mode="w") as json_f: - json.dump(json_dict, json_f, indent=2) - - logger.info(f"{json_file} successfully created!") - - -def check_voicebank_folders(*folders): - """Raises FileNotFoundError if any passed folder does not exist.""" - for folder in folders: - if not os.path.exists(folder): - raise FileNotFoundError( - f"the folder {folder} does not exist (it is expected in " - "the Voicebank dataset)" - ) - - -def download_vctk(destination, tmp_dir=None, device="cpu"): - """Download dataset and perform resample to 16000 Hz. - - Arguments - --------- - destination : str - Place to put final zipped dataset. - tmp_dir : str - Location to store temporary files. Will use `tempfile` if not provided. - device : str - Passed directly to pytorch's ``.to()`` method. Used for resampling. - """ - dataset_name = "noisy-vctk-16k" - if tmp_dir is None: - tmp_dir = tempfile.gettempdir() - final_dir = os.path.join(tmp_dir, dataset_name) - - if not os.path.isdir(tmp_dir): - os.mkdir(tmp_dir) - - if not os.path.isdir(final_dir): - os.mkdir(final_dir) - - prefix = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/2791/" - noisy_vctk_urls = [ - prefix + "clean_testset_wav.zip", - prefix + "noisy_testset_wav.zip", - prefix + "testset_txt.zip", - prefix + "clean_trainset_28spk_wav.zip", - prefix + "noisy_trainset_28spk_wav.zip", - prefix + "trainset_28spk_txt.zip", - ] - - zip_files = [] - for url in noisy_vctk_urls: - filename = os.path.join(tmp_dir, url.split("/")[-1]) - zip_files.append(filename) - if not os.path.isfile(filename): - print("Downloading " + url) - with urllib.request.urlopen(url) as response: - with open(filename, "wb") as tmp_file: - logger.info("... to " + tmp_file.name) - shutil.copyfileobj(response, tmp_file) - - # Unzip - for zip_file in zip_files: - logger.info("Unzipping " + zip_file) - shutil.unpack_archive(zip_file, tmp_dir, "zip") - os.remove(zip_file) - - # Move transcripts to final dir - shutil.move(os.path.join(tmp_dir, "testset_txt"), final_dir) - shutil.move(os.path.join(tmp_dir, "trainset_28spk_txt"), final_dir) - - # Downsample - dirs = [ - "noisy_testset_wav", - "clean_testset_wav", - "noisy_trainset_28spk_wav", - "clean_trainset_28spk_wav", - ] - - downsampler = Resample(orig_freq=48000, new_freq=16000) - - for directory in dirs: - logger.info("Resampling " + directory) - dirname = os.path.join(tmp_dir, directory) - - # Make directory to store downsampled files - dirname_16k = os.path.join(final_dir, directory + "_16k") - if not os.path.isdir(dirname_16k): - os.mkdir(dirname_16k) - - # Load files and downsample - for filename in get_all_files(dirname, match_and=[".wav"]): - signal, rate = torchaudio.load(filename) - downsampled_signal = downsampler(signal.view(1, -1).to(device)) - - # Save downsampled file - torchaudio.save( - os.path.join(dirname_16k, filename[-12:]), - downsampled_signal.cpu(), - sample_rate=16000, - ) - - # Remove old file - os.remove(filename) - - # Remove old directory - os.rmdir(dirname) - - logger.info("Zipping " + final_dir) - final_zip = shutil.make_archive( - base_name=final_dir, - format="zip", - root_dir=os.path.dirname(final_dir), - base_dir=os.path.basename(final_dir), - ) - - logger.info(f"Moving {final_zip} to {destination}") - shutil.move(final_zip, os.path.join(destination, dataset_name + ".zip")) +""" +Data preparation. + +Download and resample, use ``download_vctk`` below. +https://datashare.ed.ac.uk/handle/10283/2791 + +Authors: + * Szu-Wei Fu, 2020 + * Peter Plantinga, 2020 +""" + +import json +import os +import shutil +import string +import tempfile +import urllib + +from torchaudio.transforms import Resample + +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.data_utils import download_file, get_all_files +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +LEXICON_URL = "http://www.openslr.org/resources/11/librispeech-lexicon.txt" +TRAIN_JSON = "train.json" +TEST_JSON = "test.json" +VALID_JSON = "valid.json" +SAMPLERATE = 16000 +TRAIN_SPEAKERS = [ + "p226", + "p287", + "p227", + "p228", + "p230", + "p231", + "p233", + "p236", + "p239", + "p243", + "p244", + "p250", + "p254", + "p256", + "p258", + "p259", + "p267", + "p268", + "p269", + "p270", + "p273", + "p274", + "p276", + "p277", + "p278", + "p279", + "p282", + "p286", +] +# Lexicon missing entries +# cspell:disable +MISSING_LEXICON = { + "CRUCIALLY": "K R UW SH AH L IY", + "PAEDOPHILES": "P EH D OW F AY L S", + "MR": "M IH S T ER", + "BBC": "B IY B IY S IY", + "EUPHORIC": "Y UW F AO R IH K", + "RACISM": "R EY S IH S M", + "MP": "EH M P IY", + "RESTRUCTURING": "R IY S T R AH K CH ER IH NG", + "OSAMA": "OW S AH M AH", + "GUITARIST": "G IH T AA R IH S T", + "BLUESHE": "B L UW SH IY", + "FLANKER": "F L AY N K ER", + "SADDAM": "S AA D AA M", + "COVERUP": "K UH V ER UH P", + "FBI": "EH F B IY AY", + "PREEMPTIVE": "P R IY EH M P T IH V", + "FOURYEAR": "F AO R Y IY R", + "XRAY": "EH K S R AY", + "TALIBAN": "T AE L IH B AA N", + "SUPERIMPOSITION": "S UW P ER IH M P OW S IH SH AH N", + "GUIDELINES": "G AY D L AY N S", + "FINALISED": "F AY N AH L AY Z D", + "HALFTIME": "H AE F T AY M", + "WINGERS": "W IH NG ER Z", + "GM": "J IY EH M", + "MCGREGOR": "M AH K G R EH G AO R", + "TWODAY": "T UW D EY", + "DATABASE": "D EY T AH B EY S", + "TELECOM": "T EH L AH K AO M", + "SHORTTERM": "SH AO R T ER M", + "SHORTFALL": "SH AO R T F AH L", + "MCCALL": "M AH K AH L", + "HEADTEACHER": "H EH D T IY CH ER", + "TAKEOVER": "T EY K OW V ER", + "ONETHIRD": "W AH N TH ER D", + "TV": "T IY V IY", + "SCREENPLAY": "S K R IY N P L EY", + "YUGOSLAV": "Y UW G OW S L AA V", + "HIBS": "HH IH B Z", + "DISPOSALS": "D IH S P OW S AH L Z", + "MODERNISATION": "M AA D ER N AH Z EY SH AH N", + "REALLIFE": "R IY L AY F", + "ONEYEAR": "W AH N Y IY R", + "GRASSROOTS": "G R AE S R UW T S", + "ARNIE": "AH R N IY", + "PARTTIME": "P AH R T AY M", + "SHORTLIST": "SH AO R T L IH S T", + "OUTPERFORMED": "OW T P ER F AO R M D", + "LONGTERM": "L AO NG T ER M", + "DAYTODAY": "D EY T UW D EY", + "MCPHERSON": "M AH K F ER S AH N", + "OUTSOURCING": "OW T S AO R S IH NG", + "FULLSCALE": "F UH L S K EY L", + "SERGIO": "S ER J IY OW", + "HENMAN": "HH EH N M AA N", + "MCLEOD": "M AH K L IY AO D", + "TIMESCALE": "T AY M S K EY L", + "REFURBISHMENT": "R IY F UH R B IH SH M AH N T", + "LINEUP": "L AY N UH P", + "DOWNBEAT": "D OW N B IY T", + "MANDELA": "M AE N D EH L AH", + "UNDERAGE": "UH N D ER EY J", + "MCNAUGHTON": "M AH K N AW T AH N", + "MICKELSON": "M IH K L S AH N", + "THREEQUARTERS": "TH R IY K AO R T ER Z", + "WEBSITE": "W EH B S AY T", + "BLUEITS": "B L UW IH T S", + "CEASEFIRE": "S IY S F AY R", + "FULLTIME": "F UH L T AY M", + "DOCHERTY": "D AH K ER T IY", + "RUNNERUP": "R UH N ER AH P", + "DOWNTURN": "D OW N T ER N", + "EUROS": "Y ER OW S", + "FOOTANDMOUTH": "F UH T AE N D M OW TH", + "HIGHLIGHTED": "HH AY L AY T AH D", + "MIDFIELD": "M IH D F IY L D", + "MCKENZIE": "M AH K EH N Z IY", + "BENCHMARK": "B EH N CH M AA R K", + "MCCONNELL": "M AH K AW N EH L", + "UPGRADING": "UH P G R EY D IH NG", + "BLUNKETT": "B L UH N K AH T", + "RETHINK": "R IY TH IH N K", + "UPBEAT": "AH P B IY T", + "TELECOMS": "T EH L AH K AO M Z", + "APARTHEID": "AH P AH R T HH AY D", + "AIRDRIE": "EY R D R IY", + "RETHINK": "R IY TH IH N K", + "HELPLINE": "HH EH L P L AY N", + "CLEARCUT": "K L IY R K UH T", +} +# cspell:enable + + +def prepare_voicebank( + data_folder, save_folder, valid_speaker_count=2, skip_prep=False +): + """ + Prepares the json files for the Voicebank dataset. + + Expects the data folder to be the same format as the output of + ``download_vctk()`` below. + + Arguments + --------- + data_folder : str + Path to the folder where the original Voicebank dataset is stored. + save_folder : str + The directory where to store the json files. + valid_speaker_count : int + The number of validation speakers to use (out of 28 in train set). + skip_prep: bool + If True, skip data preparation. + + Returns + ------- + None + + Example + ------- + >>> data_folder = "/path/to/datasets/Voicebank" + >>> save_folder = "exp/Voicebank_exp" + >>> prepare_voicebank(data_folder, save_folder) + """ + if skip_prep: + return + # Setting output files + save_json_train = os.path.join(save_folder, TRAIN_JSON) + save_json_valid = os.path.join(save_folder, VALID_JSON) + save_json_test = os.path.join(save_folder, TEST_JSON) + + # Check if this phase is already done (if so, skip it) + if skip(save_json_train, save_json_test, save_json_valid): + logger.info("Preparation completed in previous run, skipping.") + return + + train_clean_folder = os.path.join( + data_folder, "clean_trainset_28spk_wav_16k" + ) + train_noisy_folder = os.path.join( + data_folder, "noisy_trainset_28spk_wav_16k" + ) + train_txts = os.path.join(data_folder, "trainset_28spk_txt") + test_clean_folder = os.path.join(data_folder, "clean_testset_wav_16k") + test_noisy_folder = os.path.join(data_folder, "noisy_testset_wav_16k") + test_txts = os.path.join(data_folder, "testset_txt") + + # Setting the save folder + if not os.path.exists(save_folder): + os.makedirs(save_folder) + + # Additional checks to make sure the data folder contains Voicebank + check_voicebank_folders( + train_clean_folder, + train_noisy_folder, + train_txts, + test_clean_folder, + test_noisy_folder, + test_txts, + ) + + logger.debug("Creating lexicon...") + lexicon = create_lexicon(os.path.join(data_folder, "lexicon.txt")) + logger.info("Creating json files for noisy VoiceBank...") + + logger.debug("Collecting files...") + extension = [".wav"] + valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count] + wav_lst_train = get_all_files( + train_noisy_folder, match_and=extension, exclude_or=valid_speakers + ) + wav_lst_valid = get_all_files( + train_noisy_folder, match_and=extension, match_or=valid_speakers + ) + wav_lst_test = get_all_files(test_noisy_folder, match_and=extension) + + logger.debug("Creating json files for noisy VoiceBank...") + create_json( + wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon + ) + create_json( + wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon + ) + create_json( + wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon + ) + + +def skip(*filenames): + """ + Detects if the Voicebank data_preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + *filenames : tuple + List of paths to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + for filename in filenames: + if not os.path.isfile(filename): + return False + return True + + +def remove_punctuation(a_string): + """Remove all punctuation from string""" + return a_string.translate(str.maketrans("", "", string.punctuation)) + + +def create_lexicon(lexicon_save_filepath): + """ + Creates the lexicon object, downloading if it hasn't been done yet. + + Arguments + --------- + lexicon_save_filepath : str + Path to save the lexicon when downloading + + Returns + ------- + lexicon : dict + Mapping from word string to list of phonemes. + """ + if not os.path.isfile(lexicon_save_filepath): + download_file(LEXICON_URL, lexicon_save_filepath) + + # Iterate lexicon file and add the first pronunciation in the file for + # each word to our lexicon dictionary + lexicon = MISSING_LEXICON + delayed_words = {} + for line in open(lexicon_save_filepath, encoding="utf-8"): + line = line.split() + phns = " ".join(p.strip("012") for p in line[1:]) + + # Don't add words with punctuation until we can be sure they won't + # overwrite words without punctuation. + clean_word = remove_punctuation(line[0]) + if clean_word != line[0] and clean_word not in delayed_words: + delayed_words[clean_word] = phns + elif clean_word == line[0] and clean_word not in lexicon: + lexicon[clean_word] = phns + + # Add words with punctuation if they won't overwrite non-punctuated words + for word, phns in delayed_words.items(): + if word not in lexicon: + lexicon[word] = phns + + return lexicon + + +def create_json(wav_lst, json_file, clean_folder, txt_folder, lexicon): + """ + Creates the json file given a list of wav files. + + Arguments + --------- + wav_lst : list + The list of wav files. + json_file : str + The path of the output json file + clean_folder : str + The location of parallel clean samples. + txt_folder : str + The location of the transcript files. + lexicon : dict + Mapping from word string to list of phonemes. + """ + logger.debug(f"Creating json lists in {json_file}") + + # Processing all the wav files in the list + json_dict = {} + for wav_file in wav_lst: # ex:p203_122.wav + # Example wav_file: p232_001.wav + noisy_path, filename = os.path.split(wav_file) + _, noisy_dir = os.path.split(noisy_path) + _, clean_dir = os.path.split(clean_folder) + noisy_rel_path = os.path.join("{data_root}", noisy_dir, filename) + clean_rel_path = os.path.join("{data_root}", clean_dir, filename) + + # Reading the signal (to retrieve duration in seconds) + signal = read_audio(wav_file) + duration = signal.shape[0] / SAMPLERATE + + # Read text + snt_id = filename.replace(".wav", "") + with open( + os.path.join(txt_folder, snt_id + ".txt"), encoding="utf-8" + ) as f: + word_string = f.read() + word_string = remove_punctuation(word_string).strip().upper() + phones = [ + phn for word in word_string.split() for phn in lexicon[word].split() + ] + + # Remove duplicate phones + phones = [i for i, j in zip(phones, phones[1:] + [None]) if i != j] + phone_string = " ".join(phones) + + json_dict[snt_id] = { + "noisy_wav": noisy_rel_path, + "clean_wav": clean_rel_path, + "length": duration, + "words": word_string, + "phones": phone_string, + } + + # Writing the json lines + with open(json_file, mode="w", encoding="utf-8") as json_f: + json.dump(json_dict, json_f, indent=2) + + logger.info(f"{json_file} successfully created!") + + +def check_voicebank_folders(*folders): + """Raises FileNotFoundError if any passed folder does not exist.""" + for folder in folders: + if not os.path.exists(folder): + raise FileNotFoundError( + f"the folder {folder} does not exist (it is expected in " + "the Voicebank dataset)" + ) + + +def download_vctk(destination, tmp_dir=None, device="cpu"): + """Download dataset and perform resample to 16000 Hz. + + Arguments + --------- + destination : str + Place to put final zipped dataset. + tmp_dir : str + Location to store temporary files. Will use `tempfile` if not provided. + device : str + Passed directly to pytorch's ``.to()`` method. Used for resampling. + """ + dataset_name = "noisy-vctk-16k" + if tmp_dir is None: + tmp_dir = tempfile.gettempdir() + final_dir = os.path.join(tmp_dir, dataset_name) + + if not os.path.isdir(tmp_dir): + os.mkdir(tmp_dir) + + if not os.path.isdir(final_dir): + os.mkdir(final_dir) + + prefix = "https://datashare.ed.ac.uk/bitstream/handle/10283/2791/" + noisy_vctk_urls = [ + prefix + "clean_testset_wav.zip", + prefix + "noisy_testset_wav.zip", + prefix + "testset_txt.zip", + prefix + "clean_trainset_28spk_wav.zip", + prefix + "noisy_trainset_28spk_wav.zip", + prefix + "trainset_28spk_txt.zip", + ] + + zip_files = [] + for url in noisy_vctk_urls: + filename = os.path.join(tmp_dir, url.split("/")[-1]) + zip_files.append(filename) + if not os.path.isfile(filename): + print("Downloading " + url) + with urllib.request.urlopen(url) as response: + with open(filename, "wb") as tmp_file: + logger.info("... to " + tmp_file.name) + shutil.copyfileobj(response, tmp_file) + + # Unzip + for zip_file in zip_files: + logger.info("Unzipping " + zip_file) + shutil.unpack_archive(zip_file, tmp_dir, "zip") + os.remove(zip_file) + + # Move transcripts to final dir + shutil.move(os.path.join(tmp_dir, "testset_txt"), final_dir) + shutil.move(os.path.join(tmp_dir, "trainset_28spk_txt"), final_dir) + + # Downsample + dirs = [ + "noisy_testset_wav", + "clean_testset_wav", + "noisy_trainset_28spk_wav", + "clean_trainset_28spk_wav", + ] + + downsampler = Resample(orig_freq=48000, new_freq=16000).to(device) + + for directory in dirs: + logger.info("Resampling " + directory) + dirname = os.path.join(tmp_dir, directory) + + # Make directory to store downsampled files + dirname_16k = os.path.join(final_dir, directory + "_16k") + if not os.path.isdir(dirname_16k): + os.mkdir(dirname_16k) + + # Load files and downsample + for filename in get_all_files(dirname, match_and=[".wav"]): + signal, rate = audio_io.load(filename) + downsampled_signal = downsampler(signal.view(1, -1).to(device)) + + # Save downsampled file + audio_io.save( + os.path.join(dirname_16k, filename[-12:]), + downsampled_signal.cpu(), + sample_rate=16000, + ) + + # Remove old file + os.remove(filename) + + # Remove old directory + os.rmdir(dirname) + + logger.info("Zipping " + final_dir) + final_zip = shutil.make_archive( + base_name=final_dir, + format="zip", + root_dir=os.path.dirname(final_dir), + base_dir=os.path.basename(final_dir), + ) + + logger.info(f"Moving {final_zip} to {destination}") + shutil.move(final_zip, os.path.join(destination, dataset_name + ".zip")) diff --git a/recipes/VoxCeleb/SpeakerRec/README.md b/recipes/VoxCeleb/SpeakerRec/README.md index 14b0a11744..f420195460 100644 --- a/recipes/VoxCeleb/SpeakerRec/README.md +++ b/recipes/VoxCeleb/SpeakerRec/README.md @@ -1,44 +1,11 @@ -# Speaker recognition experiments with VoxCeleb. +# Speaker recognition experiments with VoxCeleb This folder contains scripts for running speaker identification and verification experiments with the VoxCeleb dataset(http://www.robots.ox.ac.uk/~vgg/data/voxceleb/). -# Training Xvectors -Run the following command to train xvectors: - -`python train_speaker_embeddings.py hparams/train_x_vectors.yaml` - -You can use the same script for voxceleb1, voxceleb2, and voxceleb1+2. Just change the datafolder and the corresponding number of speakers (1211 vox1, 5994 vox2, 7205 vox1+2). -For voxceleb1 + voxceleb2, see preparation instructions below). - -The system trains a TDNN for speaker embeddings coupled with a speaker-id classifier. The speaker-id accuracy should be around 97-98% for both voxceleb1 and voceleb2. - -# Speaker verification with PLDA -After training the speaker embeddings, it is possible to perform speaker verification using PLDA. You can run it with the following command: - -`python speaker_verification_plda.py hparams/verification_plda_xvector.yaml` - -If you didn't train the speaker embedding before, we automatically download the xvector model from the web. -This system achieves an EER = 3.23% on voxceleb1 + voxceleb2. -These results are all obtained with the official verification split of voxceleb1 (veri\_test2_.txt) - - -# Speaker verification using ECAPA-TDNN embeddings -Run the following command to train speaker embeddings using [ECAPA-TDNN](https://arxiv.org/abs/2005.07143): - -`python train_speaker_embeddings.py hparams/train_ecapa_tdnn.yaml` - -The speaker-id accuracy should be around 98-99% for both voxceleb1 and voceleb2. - -After training the speaker embeddings, it is possible to perform speaker verification using cosine similarity. You can run it with the following command: - -`python speaker_verification_cosine.py hparams/verification_ecapa.yaml` - -This system achieves: -- EER = 0.80% (voxceleb1 + voxceleb2) with s-norm -- EER = 0.90% (voxceleb1 + voxceleb2) without s-norm - -These results are all obtained with the official verification split of voxceleb1 (veri\_test2.txt) - -Below you can find the results from model trained on VoxCeleb 2 dev set and tested on VoxSRC derivatives. Note that however, the models are trained under a very limited condition (single GPU so batch_size=2) and no score normalization at test time. +## Installing Extra Dependencies +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: +``` +pip install -r extra_requirements.txt +``` # VoxCeleb2 preparation Voxceleb2 audio files are released in m4a format. All the files must be converted in wav files before @@ -52,7 +19,9 @@ Note that for the speaker verification experiments with Voxceleb2 the official s Voxceleb2 stores files with the m4a audio format. To use them within SpeechBrain you have to convert all the m4a files into wav files. You can do the conversion using ffmpeg(https://gist.github.com/seungwonpark/4f273739beef2691cd53b5c39629d830). This operation might take several hours and should be only once. + 2. Put all the wav files in a folder called wav. You should have something like `voxceleb2/wav/id*/*.wav` (e.g, `voxceleb2/wav/id00012/21Uxsk56VDQ/00001.wav`) + 3. copy the `voxceleb1/vox1_test_wav.zip` file into the voxceleb2 folder. @@ -69,22 +38,76 @@ Go to the voxceleb2 folder and run `unzip vox1_test_wav.zip`. Note: To prepare the voxceleb1 + voxceleb2 dataset you have to copy and unpack vox1_dev_wav.zip for the voxceleb1 dataset. -# Performance summary -[Speaker verification results with Voxceleb 1 + Voxceleb 2] +## Training Xvectors +Run the following command to train xvectors: +``` +python train_speaker_embeddings.py hparams/train_x_vectors.yaml +``` +You can use the same script for voxceleb1, voxceleb2, and voxceleb1+2. Just change the datafolder and the corresponding number of speakers (1211 vox1, 5994 vox2, 7205 vox1+2). For voxceleb1 + voxceleb2, see preparation instructions above. + +The system trains a TDNN for speaker embeddings coupled with a speaker-id classifier. The speaker-id accuracy should be around 97-98% for both voxceleb1 and voceleb2. The backbone for TDNN can vary from: +* [X-Vector, proposed at early 2018](https://danielpovey.com/files/2018_icassp_xvectors.pdf) +* ResNet X-Vector +* [ECAPA-TDNN](https://arxiv.org/abs/2005.07143) + +Below we show the example of doing speaker verification using ECAPA-TDNN. + +## Speaker verification using ECAPA-TDNN embeddings +Run the following command to train speaker embeddings using ECAPA-TDNN + +`python train_speaker_embeddings.py hparams/train_ecapa_tdnn.yaml` + +The speaker-id accuracy should be around 98-99% for both voxceleb1 and voceleb2. + +After training the speaker embeddings, it is possible to perform speaker verification using cosine similarity. You can run it with the following command: + +`python speaker_verification_cosine.py hparams/verification_ecapa.yaml` + +This system achieves: +- EER = 0.80% (voxceleb1 + voxceleb2) with s-norm +- EER = 0.90% (voxceleb1 + voxceleb2) without s-norm + +These results are all obtained with the official verification split of voxceleb1 (veri\_test2.txt) + +Below you can find the results from model trained on VoxCeleb 2 dev set and tested on VoxSRC derivatives. Note that however, the models are trained under a very limited condition (single GPU so batch_size=2) and no score normalization at test time. + + +## Speaker verification with PLDA +After training the speaker embeddings, it is possible to perform speaker verification using PLDA. You can run it with the following command. If you didn't train the speaker embedding before, we automatically download the xvector model from the web. +``` +python speaker_verification_plda.py hparams/verification_plda_xvector.yaml +``` + +## Performance summary +Below results are all obtained with the official verification split of voxceleb1 (veri\_test2_.txt). Note that if the model is trained with VoxCeleb1 training data, it cannot be evaluated on VoxCeleb1-{E,H} because these two evaluation sets are part of the foremost. + +[Speaker verification results (in EER) on VoxCeleb1-O, with score normalization] | System | Dataset | EER | Model/Log Link | |-----------------|------------|------| -----| -| Xvector + PLDA | VoxCeleb 1,2 | 3.23% | https://drive.google.com/drive/folders/1TLKByLRkgkUiDV2coMrIh-OMHANrnOl-?usp=sharing | -| ECAPA-TDNN | Voxceleb 1,2 | 0.80% | https://drive.google.com/file/d/1EziERcHD_gyE6qc8DbxPKU1isVf7pbNl/view?usp=sharing | +| Xvector + PLDA | VoxCeleb 1,2 | 3.23% | https://www.dropbox.com/sh/ab1ma1lnmskedo8/AADsmgOLPdEjSF6wV3KyhNG1a?dl=0 | +| ECAPA-TDNN | VoxCeleb 1,2 | 0.80% | https://www.dropbox.com/sh/ab1ma1lnmskedo8/AADsmgOLPdEjSF6wV3KyhNG1a?dl=0 | +| ResNet TDNN | VoxCeleb 1,2 | 0.95% | https://www.dropbox.com/sh/ab1ma1lnmskedo8/AADsmgOLPdEjSF6wV3KyhNG1a?dl=0 | -[Speaker verification results with Voxceleb 2 development set, no score normalization ] +[Speaker verification results (in EER), no score normalization] | System | Dataset | VoxCeleb1-O | VoxCeleb1-E | VoxCeleb1-H | Model/Log Link | -| ECAPA-TDNN | VoxCeleb 2 | 1.30% | 1.98% | 3.62% | (to be updated) | +|-----------------|------------|------|------|------| -----| +| ECAPA-TDNN | VoxCeleb 1,2 | 0.90% | - | - | https://www.dropbox.com/sh/ab1ma1lnmskedo8/AADsmgOLPdEjSF6wV3KyhNG1a?dl=0 | +| ECAPA-TDNN | VoxCeleb 2 | 1.30% | 1.98% | 3.62% | (to be updated) | +| ResNet TDNN | VoxCeleb 1,2 | 1.05% | - | - | https://www.dropbox.com/sh/ab1ma1lnmskedo8/AADsmgOLPdEjSF6wV3KyhNG1a?dl=0 | + + +## PreTrained Model + Easy-Inference +You can perform the easy-inference of various models provided on [HuggingFace](https://huggingface.co) via the links below. They are specified in the hyperparameter yaml files as well. +**NOTE: If you would like to store the embeddings for future use, please check `extract_speaker_embeddings.py` for the gist.** + +| System | Hugging Face model link | +|-----------------|-------------------------| +| Xvector | https://huggingface.co/speechbrain/spkrec-xvect-voxceleb | +| ECAPA-TDNN | https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb | +| ResNet TDNN | https://huggingface.co/speechbrain/spkrec-resnet-voxceleb | -# PreTrained Model + Easy-Inference -You can find the pre-trained ECAPA-TDNN model with an easy-inference function on [HuggingFace](https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb). -You can find the pre-trained xvector models as well on [HuggingFace](https://huggingface.co/speechbrain/spkrec-xvect-voxceleb) # **About SpeechBrain** - Website: https://speechbrain.github.io/ @@ -96,6 +119,15 @@ You can find the pre-trained xvector models as well on [HuggingFace](https://hug Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/VoxCeleb/SpeakerRec/extract_speaker_embeddings.py b/recipes/VoxCeleb/SpeakerRec/extract_speaker_embeddings.py new file mode 100644 index 0000000000..98d3d91e4c --- /dev/null +++ b/recipes/VoxCeleb/SpeakerRec/extract_speaker_embeddings.py @@ -0,0 +1,139 @@ +#!/usr/bin/python3 +"""Recipe for extracting speaker embeddings for other purpose. This +is more like a script that copes with modern usage of speaker embed- +ding vectors. + +The input of this script is a training list like below +(we recommend having full absolute path for wav paths) +---------- +utt1 $wav1_path +... +uttN $wavN_path + +The extracted embeddings are stored as numpy files in the output +folder. The name of each numpy file is its utterance name. +NOTE: This may result in a large number of files in a single folder. + +To run this recipe, use the following command: +> python extract_speaker_embeddings.py {input_training_list} {output_folder} {hyperparameter_file} + +Using your own hyperparameter file or one of the following: + hparams/verification_ecapa.yaml (for the ecapa+tdnn system) + hparams/verification_resnet.yaml (for the resnet tdnn system) + hparams/verification_plda_xvector.yaml (for the xvector system) + +Author + * Mirco Ravanelli 2020 + * Hwidong Na 2020 + * Nauman Dawalatabad 2020 + * Xuechen Liu 2023 +""" + +import os +import sys + +import numpy as np +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.utils.data_utils import download_file +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger + + +def compute_embeddings_single(wavs, wav_lens, params): + """Compute speaker embeddings. + + Arguments + --------- + wavs : Torch.Tensor + torch.Tensor containing the speech waveform (batch, time). + Make sure the sample rate is fs=16000 Hz. + wav_lens: Torch.Tensor + torch.Tensor containing the relative length for each sentence + in the length (e.g., [0.8 0.6 1.0]) + params: dict + The parameter files storing info about model, data, etc + + Returns + ------- + Embeddings + """ + with torch.no_grad(): + feats = params["compute_features"](wavs) + feats = params["mean_var_norm"](feats, wav_lens) + embeddings = params["embedding_model"](feats, wav_lens) + return embeddings.squeeze(1) + + +def compute_embeddings(params, wav_scp, outdir): + """Compute speaker embeddings. + + Arguments + --------- + params: dict + The parameter files storing info about model, data, etc + wav_scp : str + The wav.scp file in Kaldi, in the form of "$utt $wav_path" + outdir: str + The output directory where we store the embeddings in per- + numpy manner. + """ + with torch.no_grad(): + with open(wav_scp, encoding="utf-8") as wavscp: + for line in wavscp: + utt, wav_path = line.split() + out_file = f"{outdir}/{utt}.npy" + wav, _ = audio_io.load(wav_path) + data = wav.transpose(0, 1).squeeze(1).unsqueeze(0) + lens = torch.Tensor([data.shape[1]]) + data, lens = ( + data.to(run_opts["device"]), + lens.to(run_opts["device"]), + ) + embedding = compute_embeddings_single( + data, lens, params + ).squeeze() + + out_embedding = embedding.detach().cpu().numpy() + np.save(out_file, out_embedding) + del out_embedding, wav, data + + +if __name__ == "__main__": + in_list = sys.argv[1] + out_dir = sys.argv[2] + os.makedirs(out_dir, exist_ok=True) + + # Logger setup + logger = get_logger(__name__) + current_dir = os.path.dirname(os.path.abspath(__file__)) + sys.path.append(os.path.dirname(current_dir)) + + # Load hyperparameters file with command-line overrides + params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[3:]) + if "data_folder:" not in overrides: + # By default it is a PLACEHOLDER (we need to replace it with a dummy path) + overrides += "\ndata_folder: ." + if "output_folder:" not in overrides: + # Ensure to put the saved model in the output folder + overrides += f"\noutput_folder: {out_dir}" + + with open(params_file, encoding="utf-8") as fin: + params = load_hyperpyyaml(fin, overrides) + run_on_main(params["pretrainer"].collect_files) + params["pretrainer"].load_collected(run_opts["device"]) + params["embedding_model"].eval() + params["embedding_model"].to(run_opts["device"]) + + # Download verification list (to exclude verification sentences from train) + veri_file_path = os.path.join( + params["save_folder"], os.path.basename(params["verification_file"]) + ) + download_file(params["verification_file"], veri_file_path) + + print("Begin embedding extraction......") + compute_embeddings(params, in_list, out_dir) + print(f"The embeddings have been extracted and stored at {out_dir}") diff --git a/recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn.yaml b/recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn.yaml index 8a783ab96c..eb4eeb66a9 100644 --- a/recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn.yaml +++ b/recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn.yaml @@ -5,18 +5,23 @@ # Basic parameters seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/ecapa_augment/ save_folder: !ref /save train_log: !ref /train_log.txt +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 + # Data files data_folder: !PLACEHOLDER # e.g. /path/to/Voxceleb +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. train_annotation: !ref /train.csv valid_annotation: !ref /dev.csv - -# Folder to extract data augmentation files -rir_folder: !ref # Change it if needed +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv # Use the following links for the official voxceleb splits: # VoxCeleb1 (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt @@ -26,6 +31,7 @@ rir_folder: !ref # Change it if needed # Therefore you cannot use any files in VoxCeleb1 for training if you are using these lists for testing. verification_file: https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt +split_ratio: [90, 10] skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min @@ -50,10 +56,11 @@ deltas: False # Number of speakers out_n_neurons: 7205 #1211 for vox1 # 5994 for vox2, 7205 for vox1+vox2 +num_workers: 4 dataloader_options: batch_size: !ref shuffle: !ref - num_workers: 2 + num_workers: !ref # Functions compute_features: !new:speechbrain.lobes.features.Fbank @@ -78,56 +85,66 @@ classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref - -augment_wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [100] - -augment_speed: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -add_rev: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 1.0 - noise_prob: 0.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_rev_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 1.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - - -# Definition of the augmentation pipeline. -# If concat_augment = False, the augmentation techniques are applied -# in sequence. If concat_augment = True, all the augmented signals -# # are concatenated in a single big batch. - -augment_pipeline: [ - !ref , - !ref , - !ref , - !ref , - !ref -] -concat_augment: True +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: True + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] mean_var_norm: !new:speechbrain.processing.features.InputNormalization norm_type: sentence @@ -135,11 +152,6 @@ mean_var_norm: !new:speechbrain.processing.features.InputNormalization modules: compute_features: !ref - augment_wavedrop: !ref - augment_speed: !ref - add_rev: !ref - add_noise: !ref - add_rev_noise: !ref embedding_model: !ref classifier: !ref mean_var_norm: !ref diff --git a/recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn_mel_spec.yaml b/recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn_mel_spec.yaml new file mode 100644 index 0000000000..ab2bc74c08 --- /dev/null +++ b/recipes/VoxCeleb/SpeakerRec/hparams/train_ecapa_tdnn_mel_spec.yaml @@ -0,0 +1,208 @@ +# ################################ +# Model: Speaker identification with ECAPA +# Authors: Hwidong Na & Mirco Ravanelli +# ################################ + +# Basic parameters +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/ecapa_augment/ +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data files +data_folder: !PLACEHOLDER # e.g. /path/to/Voxceleb +train_annotation: !ref /train.csv +valid_annotation: !ref /dev.csv +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv +data_folder_noise: !ref /noise +data_folder_rir: !ref /rir +# Folder to extract data augmentation files +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 + + +# Use the following links for the official voxceleb splits: +# VoxCeleb1 (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt +# VoxCeleb1-H (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/list_test_hard2.txt +# VoxCeleb1-E (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/list_test_all2.txt. +# VoxCeleb1-E and VoxCeleb1-H lists are drawn from the VoxCeleb1 training set. +# Therefore you cannot use any files in VoxCeleb1 for training if you are using these lists for testing. +verification_file: https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt + +split_ratio: [90, 10] +skip_prep: False +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 10 +batch_size: 32 +lr: 0.001 +base_lr: 0.00000001 +max_lr: !ref +step_size: 65000 +sample_rate: 16000 +sentence_len: 3.0 # seconds +shuffle: True +random_chunk: True + +# Feature parameters +hop_length: 256 +win_length: 1024 +n_mel_channels: 80 +n_fft: 1024 +mel_fmin: 0.0 +mel_fmax: 8000.0 +mel_normalized: False +power: 1 +norm: "slaney" +mel_scale: "slaney" +dynamic_range_compression: True + +# Number of speakers +out_n_neurons: 7205 #1211 for vox1 # 5994 for vox2, 7205 for vox1+vox2 + +num_workers: 4 +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: !ref + +# Functions +use_tacotron2_mel_spec: True + +compute_features: !name:speechbrain.lobes.models.Tacotron2.mel_spectogram + sample_rate: !ref + hop_length: !ref + win_length: !ref + n_fft: !ref + n_mels: !ref + f_min: !ref + f_max: !ref + power: !ref + normalized: !ref + norm: !ref + mel_scale: !ref + compression: !ref + +# Modules +embedding_model: !new:speechbrain.lobes.models.ECAPA_TDNN.ECAPA_TDNN + input_size: !ref + channels: [1024, 1024, 1024, 1024, 3072] + kernel_sizes: [5, 3, 3, 3, 1] + dilations: [1, 2, 3, 4, 1] + groups: [1, 1, 1, 1, 1] + attention_channels: 128 + lin_neurons: 192 + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 192 + out_neurons: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: True + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +mean_var_norm: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + std_norm: False + +modules: + embedding_model: !ref + classifier: !ref + mean_var_norm: !ref + +compute_cost: !new:speechbrain.nnet.losses.LogSoftmaxWrapper + loss_fn: !new:speechbrain.nnet.losses.AdditiveAngularMargin + margin: 0.2 + scale: 30 + +# compute_error: !name:speechbrain.nnet.losses.classification_error + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.CyclicLRScheduler + base_lr: !ref + max_lr: !ref + step_size: !ref + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_stats: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.classification_error + reduction: batch + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + embedding_model: !ref + classifier: !ref + normalizer: !ref + counter: !ref + lr_annealing: !ref diff --git a/recipes/VoxCeleb/SpeakerRec/hparams/train_resnet.yaml b/recipes/VoxCeleb/SpeakerRec/hparams/train_resnet.yaml new file mode 100644 index 0000000000..3bb593c633 --- /dev/null +++ b/recipes/VoxCeleb/SpeakerRec/hparams/train_resnet.yaml @@ -0,0 +1,190 @@ +# ################################ +# Model: Speaker verification with ResNet +# Authors: Mickael Rouvier 2022 +# Xuechen Liu 2023 +# ################################ + +# Basic parameters +seed: 1986 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/resnet/ +save_folder: !ref /save +train_log: !ref /train_log.txt + +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 + +# Data files +data_folder: !PLACEHOLDER # e.g. /path/to/Voxceleb +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. +train_annotation: !ref /train.csv +valid_annotation: !ref /dev.csv +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv + +# Use the following links for the official voxceleb splits: +# VoxCeleb1 (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt +# VoxCeleb1-H (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/list_test_hard2.txt +# VoxCeleb1-E (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/list_test_all2.txt. +# VoxCeleb1-E and VoxCeleb1-H lists are drawn from the VoxCeleb1 training set. +# Therefore you cannot use any files in VoxCeleb1 for training if you are using these lists for testing. +verification_file: https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt + +split_ratio: [90, 10] +skip_prep: False +ckpt_interval_minutes: 15 # save checkpoint every N min + +# Training parameters +number_of_epochs: 10 +batch_size: 32 +lr: 0.001 +base_lr: 0.00000001 +max_lr: !ref +step_size: 65000 +sample_rate: 16000 +sentence_len: 3.0 # seconds +shuffle: True +random_chunk: True + +# Feature parameters +n_mels: 80 +left_frames: 0 +right_frames: 0 +deltas: False + +# Number of speakers +# 1211 for vox1, 5994 for vox2, 7205 for vox1+vox2 +out_n_neurons: 7205 + +num_workers: 4 +dataloader_options: + batch_size: !ref + shuffle: !ref + num_workers: !ref + +# Functions +compute_features: !new:speechbrain.lobes.features.Fbank + n_mels: !ref + left_frames: !ref + right_frames: !ref + deltas: !ref + +embedding_model: !new:speechbrain.lobes.models.ResNet.ResNet + input_size: !ref + channels: [128, 128, 256, 256] + strides: [1, 2, 2, 2] + block_sizes: [3, 4, 6, 3] + lin_neurons: 256 + +classifier: !new:speechbrain.lobes.models.ECAPA_TDNN.Classifier + input_size: 256 + out_neurons: !ref + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: True + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +mean_var_norm: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + std_norm: False + +modules: + compute_features: !ref + embedding_model: !ref + classifier: !ref + mean_var_norm: !ref + +compute_cost: !new:speechbrain.nnet.losses.LogSoftmaxWrapper + loss_fn: !new:speechbrain.nnet.losses.AdditiveAngularMargin + margin: 0.2 + scale: 30 + +# compute_error: !name:speechbrain.nnet.losses.classification_error + +opt_class: !name:torch.optim.Adam + lr: !ref + weight_decay: 0.000002 + +lr_annealing: !new:speechbrain.nnet.schedulers.CyclicLRScheduler + base_lr: !ref + max_lr: !ref + step_size: !ref + +# Logging + checkpoints +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_stats: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.classification_error + reduction: batch + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + embedding_model: !ref + classifier: !ref + normalizer: !ref + counter: !ref + lr_annealing: !ref diff --git a/recipes/VoxCeleb/SpeakerRec/hparams/train_x_vectors.yaml b/recipes/VoxCeleb/SpeakerRec/hparams/train_x_vectors.yaml index 49e33d05ea..7b8f7f2f65 100644 --- a/recipes/VoxCeleb/SpeakerRec/hparams/train_x_vectors.yaml +++ b/recipes/VoxCeleb/SpeakerRec/hparams/train_x_vectors.yaml @@ -5,18 +5,23 @@ # Basic parameters seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/xvect_augment/ save_folder: !ref /save train_log: !ref /train_log.txt +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 + # Data files data_folder: !PLACEHOLDER # e.g. /path/to/Voxceleb +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. train_annotation: !ref /train.csv valid_annotation: !ref /dev.csv - -# Folder to extract data augmentation files -rir_folder: !ref # Change it if needed +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv # Use the following links for the official voxceleb splits: # VoxCeleb1 (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt @@ -26,6 +31,7 @@ rir_folder: !ref # Change it if needed # Therefore you cannot use any files in VoxCeleb1 for training if you are using these lists for testing. verification_file: https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt +split_ratio: [90, 10] skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min @@ -48,11 +54,13 @@ deltas: False # Number of speakers out_n_neurons: 7205 #1211 for vox1 # 5994 for vox2, 7205 for vox1+vox2 +emb_dim: 512 +num_workers: 4 dataloader_options: batch_size: !ref shuffle: !ref - num_workers: 0 + num_workers: !ref # Functions compute_features: !new:speechbrain.lobes.features.Fbank @@ -68,67 +76,78 @@ embedding_model: !new:speechbrain.lobes.models.Xvector.Xvector tdnn_channels: [512, 512, 512, 512, 1500] tdnn_kernel_sizes: [5, 3, 3, 1, 1] tdnn_dilations: [1, 2, 3, 1, 1] - lin_neurons: 512 + lin_neurons: !ref classifier: !new:speechbrain.lobes.models.Xvector.Classifier - input_shape: [null, null, 512] + input_shape: [null, null, !ref ] activation: !name:torch.nn.LeakyReLU lin_blocks: 1 - lin_neurons: 512 + lin_neurons: !ref out_neurons: !ref epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref - -augment_wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [100] - -augment_speed: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -add_rev: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 1.0 - noise_prob: 0.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_rev_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 1.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - - -# Definition of the augmentation pipeline. -# If concat_augment = False, the augmentation techniques are applied -# in sequence. If concat_augment = True, all the augmented signals -# are concatenated in a single big batch. -augment_pipeline: [ - !ref , - !ref , - !ref , - !ref , - !ref -] -concat_augment: True +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: True + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] mean_var_norm: !new:speechbrain.processing.features.InputNormalization norm_type: sentence @@ -136,11 +155,6 @@ mean_var_norm: !new:speechbrain.processing.features.InputNormalization modules: compute_features: !ref - augment_wavedrop: !ref - augment_speed: !ref - add_rev: !ref - add_noise: !ref - add_rev_noise: !ref embedding_model: !ref classifier: !ref mean_var_norm: !ref diff --git a/recipes/VoxCeleb/SpeakerRec/hparams/verification_ecapa.yaml b/recipes/VoxCeleb/SpeakerRec/hparams/verification_ecapa.yaml index c7baeaeaf0..85d343f51b 100755 --- a/recipes/VoxCeleb/SpeakerRec/hparams/verification_ecapa.yaml +++ b/recipes/VoxCeleb/SpeakerRec/hparams/verification_ecapa.yaml @@ -5,13 +5,12 @@ # ################################ seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Folders and train_log file data_folder: !PLACEHOLDER #e.g., /localscratch/voxceleb1 -output_folder: results/voxceleb1_2/speaker_verification_ecapa_big_vox2only +output_folder: results/voxceleb1_2/speaker_verification_ecapa/ save_folder: !ref /save/ -device: 'cuda:0' # Use the following links for the official voxceleb splits: # VoxCeleb1 (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt @@ -31,6 +30,8 @@ pretrain_path: speechbrain/spkrec-ecapa-voxceleb train_data: !ref /train.csv enrol_data: !ref /enrol.csv test_data: !ref /test.csv +split_ratio: [90, 10] +skip_prep: False batch_size: 8 score_norm: s-norm # z-norm t-norm s-norm (uncomment to enable it) diff --git a/recipes/VoxCeleb/SpeakerRec/hparams/verification_plda_xvector.yaml b/recipes/VoxCeleb/SpeakerRec/hparams/verification_plda_xvector.yaml index 21a403cd74..a4ad3d3fa4 100755 --- a/recipes/VoxCeleb/SpeakerRec/hparams/verification_plda_xvector.yaml +++ b/recipes/VoxCeleb/SpeakerRec/hparams/verification_plda_xvector.yaml @@ -4,13 +4,12 @@ # ################################ seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Folders and train_log file data_folder: !PLACEHOLDER output_folder: results/voxceleb1_2/speaker_verification_plda_xvectors save_folder: !ref /save/ -device: 'cuda:0' # Use the following links for the official voxceleb splits: # VoxCeleb1 (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt @@ -30,6 +29,9 @@ pretrain_path: speechbrain/spkrec-xvect-voxceleb train_data: !ref /train.csv enrol_data: !ref /enrol.csv test_data: !ref /test.csv +split_ratio: [90, 10] +seg_dur: 3 +skip_prep: False batch_size: 128 n_train_snts: 300000 # used for normalization stats @@ -37,6 +39,7 @@ n_train_snts: 300000 # used for normalization stats # Feature parameters n_mels: 24 emb_dim: 512 +rank_f: 100 # Dataloader options train_dataloader_opts: @@ -66,7 +69,7 @@ embedding_model: !new:speechbrain.lobes.models.Xvector.Xvector lin_neurons: !ref compute_plda: !new:speechbrain.processing.PLDA_LDA.PLDA - rank_f: 100 + rank_f: !ref nb_iter: 10 scaling_factor: 0.05 diff --git a/recipes/VoxCeleb/SpeakerRec/hparams/verification_resnet.yaml b/recipes/VoxCeleb/SpeakerRec/hparams/verification_resnet.yaml new file mode 100755 index 0000000000..e9af8cae78 --- /dev/null +++ b/recipes/VoxCeleb/SpeakerRec/hparams/verification_resnet.yaml @@ -0,0 +1,77 @@ +# ################################ +# Model: Speaker Verification Baseline +# Authors: Hwidong Na 2020 +# Mirco Ravanelli 2020 +# Xuechen Liu 2023 +# ################################ + +seed: 1984 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] + +# Folders and train_log file +data_folder: !PLACEHOLDER #e.g., /localscratch/voxceleb1 +output_folder: results/resnet/ +save_folder: !ref /save/ + +# Use the following links for the official voxceleb splits: +# VoxCeleb1 (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt +# VoxCeleb1-H (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/list_test_hard2.txt +# VoxCeleb1-E (cleaned): https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/list_test_all2.txt. +# VoxCeleb1-E and VoxCeleb1-H lists are drawn from the VoxCeleb1 training set. +# Therefore you cannot use any files in VoxCeleb1 for training if you are using these lists for testing. +verification_file: https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt + +# Here, the pretrained embedding model trained with train_speaker_embeddings.py hparams/train_ecapa_tdnn.yaml +# is downloaded from the speechbrain HuggingFace repository. +# However, a local path pointing to a directory containing your checkpoints may also be specified +# instead (see pretrainer below) +pretrain_path: underdogliu1005/spkrec-resnet-voxceleb + +# csv files +train_data: !ref /train.csv +enrol_data: !ref /enrol.csv +test_data: !ref /test.csv +split_ratio: [90, 10] +skip_prep: False + +batch_size: 8 +#score_norm: s-norm # z-norm t-norm s-norm (uncomment to enable it) +cohort_size: 20000 # amount of imposter utterances in normalization cohort +n_train_snts: 400000 # used for normalization stats + +# Feature parameters +n_mels: 80 +# left_frames: 0 +# right_frames: 0 +# deltas: False + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + +enrol_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +compute_features: !new:speechbrain.lobes.features.Fbank + n_mels: !ref + +mean_var_norm: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + std_norm: False + +embedding_model: !new:speechbrain.lobes.models.ResNet.ResNet + input_size: !ref + channels: [128, 128, 256, 256] + strides: [1, 2, 2, 2] + block_sizes: [3, 4, 6, 3] + lin_neurons: 256 + +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + embedding_model: !ref + paths: + embedding_model: !ref /embedding_model.ckpt diff --git a/recipes/VoxCeleb/SpeakerRec/speaker_verification_cosine.py b/recipes/VoxCeleb/SpeakerRec/speaker_verification_cosine.py index 4eaaa72da9..c974245a30 100755 --- a/recipes/VoxCeleb/SpeakerRec/speaker_verification_cosine.py +++ b/recipes/VoxCeleb/SpeakerRec/speaker_verification_cosine.py @@ -10,18 +10,22 @@ Authors * Hwidong Na 2020 * Mirco Ravanelli 2020 + * Xuechen Liu 2023 """ + import os import sys + import torch -import logging -import torchaudio -import speechbrain as sb -from tqdm.contrib import tqdm from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import EER, minDCF +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger +from speechbrain.utils.metric_stats import EER, minDCF # Compute embeddings from the waveforms @@ -30,12 +34,16 @@ def compute_embedding(wavs, wav_lens): Arguments --------- - wavs : Torch.Tensor - Tensor containing the speech waveform (batch, time). + wavs : torch.Tensor + torch.Tensor containing the speech waveform (batch, time). Make sure the sample rate is fs=16000 Hz. - wav_lens: Torch.Tensor - Tensor containing the relative length for each sentence + wav_lens : torch.Tensor + torch.Tensor containing the relative length for each sentence in the length (e.g., [0.8 0.6 1.0]) + + Returns + ------- + embeddings : torch.Tensor """ with torch.no_grad(): feats = params["compute_features"](wavs) @@ -52,7 +60,7 @@ def compute_embedding_loop(data_loader): with torch.no_grad(): for batch in tqdm(data_loader, dynamic_ncols=True): - batch = batch.to(params["device"]) + batch = batch.to(run_opts["device"]) seg_ids = batch.id wavs, lens = batch.sig @@ -62,7 +70,10 @@ def compute_embedding_loop(data_loader): found = True if not found: continue - wavs, lens = wavs.to(params["device"]), lens.to(params["device"]) + wavs, lens = ( + wavs.to(run_opts["device"]), + lens.to(run_opts["device"]), + ) emb = compute_embedding(wavs, lens).unsqueeze(1) for i, seg_id in enumerate(seg_ids): embedding_dict[seg_id] = emb[i].detach().clone() @@ -76,7 +87,7 @@ def get_verification_scores(veri_test): negative_scores = [] save_file = os.path.join(params["output_folder"], "scores.txt") - s_file = open(save_file, "w") + s_file = open(save_file, "w", encoding="utf-8") # Cosine similarity initialization similarity = torch.nn.CosineSimilarity(dim=-1, eps=1e-6) @@ -86,7 +97,6 @@ def get_verification_scores(veri_test): train_cohort = torch.stack(list(train_dict.values())) for i, line in enumerate(veri_test): - # Reading verification file (enrol_file test_file label) lab_pair = int(line.split(" ")[0].rstrip().split(".")[0].strip()) enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip() @@ -153,7 +163,8 @@ def dataio_prep(params): # Train data (used for normalization) train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=params["train_data"], replacements={"data_root": data_folder}, + csv_path=params["train_data"], + replacements={"data_root": data_folder}, ) train_data = train_data.filtered_sorted( sort_key="duration", select_n=params["n_train_snts"] @@ -161,13 +172,15 @@ def dataio_prep(params): # Enrol data enrol_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=params["enrol_data"], replacements={"data_root": data_folder}, + csv_path=params["enrol_data"], + replacements={"data_root": data_folder}, ) enrol_data = enrol_data.filtered_sorted(sort_key="duration") # Test data test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=params["test_data"], replacements={"data_root": data_folder}, + csv_path=params["test_data"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -180,9 +193,7 @@ def audio_pipeline(wav, start, stop): start = int(start) stop = int(stop) num_frames = stop - start - sig, fs = torchaudio.load( - wav, num_frames=num_frames, frame_offset=start - ) + sig, fs = audio_io.load(wav, num_frames=num_frames, frame_offset=start) sig = sig.transpose(0, 1).squeeze(1) return sig @@ -207,16 +218,16 @@ def audio_pipeline(wav, start, stop): if __name__ == "__main__": # Logger setup - logger = logging.getLogger(__name__) + logger = get_logger(__name__) current_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.dirname(current_dir)) # Load hyperparameters file with command-line overrides params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:]) - with open(params_file) as fin: + with open(params_file, encoding="utf-8") as fin: params = load_hyperpyyaml(fin, overrides) - # Download verification list (to exlude verification sentences from train) + # Download verification list (to exclude verification sentences from train) veri_file_path = os.path.join( params["save_folder"], os.path.basename(params["verification_file"]) ) @@ -237,11 +248,12 @@ def audio_pipeline(wav, start, stop): save_folder=params["save_folder"], verification_pairs_file=veri_file_path, splits=["train", "dev", "test"], - split_ratio=[90, 10], + split_ratio=params["split_ratio"], seg_dur=3.0, - source=params["voxceleb_source"] - if "voxceleb_source" in params - else None, + skip_prep=params["skip_prep"], + source=( + params["voxceleb_source"] if "voxceleb_source" in params else None + ), ) # here we create the datasets objects as well as tokenization and encoding @@ -250,9 +262,9 @@ def audio_pipeline(wav, start, stop): # We download the pretrained LM from HuggingFace (or elsewhere depending on # the path given in the YAML file). The tokenizer is loaded at the same time. run_on_main(params["pretrainer"].collect_files) - params["pretrainer"].load_collected(params["device"]) + params["pretrainer"].load_collected() params["embedding_model"].eval() - params["embedding_model"].to(params["device"]) + params["embedding_model"].to(run_opts["device"]) # Computing enrollment and test embeddings logger.info("Computing enroll/test embeddings...") @@ -267,7 +279,7 @@ def audio_pipeline(wav, start, stop): # Compute the EER logger.info("Computing EER..") # Reading standard verification split - with open(veri_file_path) as f: + with open(veri_file_path, encoding="utf-8") as f: veri_test = [line.rstrip() for line in f] positive_scores, negative_scores = get_verification_scores(veri_test) diff --git a/recipes/VoxCeleb/SpeakerRec/speaker_verification_plda.py b/recipes/VoxCeleb/SpeakerRec/speaker_verification_plda.py index d78b4c0430..1382293f5a 100755 --- a/recipes/VoxCeleb/SpeakerRec/speaker_verification_plda.py +++ b/recipes/VoxCeleb/SpeakerRec/speaker_verification_plda.py @@ -12,21 +12,25 @@ """ import os +import pickle import sys -import torch -import torchaudio -import logging -import speechbrain as sb + import numpy -import pickle -from tqdm.contrib import tqdm +import torch from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.metric_stats import EER, minDCF -from speechbrain.processing.PLDA_LDA import StatObject_SB -from speechbrain.processing.PLDA_LDA import Ndx -from speechbrain.processing.PLDA_LDA import fast_PLDA_scoring +from tqdm import tqdm + +import speechbrain as sb +from speechbrain.dataio import audio_io +from speechbrain.processing.PLDA_LDA import ( + Ndx, + StatObject_SB, + fast_PLDA_scoring, +) from speechbrain.utils.data_utils import download_file from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger +from speechbrain.utils.metric_stats import EER, minDCF # Compute embeddings from the waveforms @@ -35,15 +39,19 @@ def compute_embeddings(wavs, wav_lens): Arguments --------- - wavs : Torch.Tensor - Tensor containing the speech waveform (batch, time). + wavs : torch.Tensor + torch.Tensor containing the speech waveform (batch, time). Make sure the sample rate is fs=16000 Hz. - wav_lens: Torch.Tensor - Tensor containing the relative length for each sentence + wav_lens : torch.Tensor + torch.Tensor containing the relative length for each sentence in the length (e.g., [0.8 0.6 1.0]) + + Returns + ------- + embeddings : torch.Tensor """ - wavs = wavs.to(params["device"]) - wav_lens = wav_lens.to(params["device"]) + wavs = wavs.to(run_opts["device"]) + wav_lens = wav_lens.to(run_opts["device"]) with torch.no_grad(): feats = params["compute_features"](wavs) feats = params["mean_var_norm"](feats, wav_lens) @@ -77,7 +85,7 @@ def emb_computation_loop(split, set_loader, stat_file): modelset = numpy.array(modelset, dtype="|O") segset = numpy.array(segset, dtype="|O") - # Intialize variables for start, stop and stat0 + # Initialize variables for start, stop and stat0 s = numpy.array([None] * embeddings.shape[0]) b = numpy.array([[1.0]] * embeddings.shape[0]) @@ -111,7 +119,7 @@ def verification_performance(scores_plda): labels = [] positive_scores = [] negative_scores = [] - for line in open(veri_file_path): + for line in open(veri_file_path, encoding="utf-8"): lab = int(line.split(" ")[0].rstrip().split(".")[0].strip()) enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip() test_id = line.split(" ")[2].rstrip().split(".")[0].strip() @@ -156,7 +164,8 @@ def dataio_prep(params): # Train data (used for normalization) train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=params["train_data"], replacements={"data_root": data_folder}, + csv_path=params["train_data"], + replacements={"data_root": data_folder}, ) train_data = train_data.filtered_sorted( sort_key="duration", select_n=params["n_train_snts"] @@ -164,13 +173,15 @@ def dataio_prep(params): # Enrol data enrol_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=params["enrol_data"], replacements={"data_root": data_folder}, + csv_path=params["enrol_data"], + replacements={"data_root": data_folder}, ) enrol_data = enrol_data.filtered_sorted(sort_key="duration") # Test data test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=params["test_data"], replacements={"data_root": data_folder}, + csv_path=params["test_data"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -183,9 +194,7 @@ def audio_pipeline(wav, start, stop): start = int(start) stop = int(stop) num_frames = stop - start - sig, fs = torchaudio.load( - wav, num_frames=num_frames, frame_offset=start - ) + sig, fs = audio_io.load(wav, num_frames=num_frames, frame_offset=start) sig = sig.transpose(0, 1).squeeze(1) return sig @@ -209,18 +218,17 @@ def audio_pipeline(wav, start, stop): if __name__ == "__main__": - # Logger setup - logger = logging.getLogger(__name__) + logger = get_logger(__name__) current_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.dirname(current_dir)) # Load hyperparameters file with command-line overrides params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:]) - with open(params_file) as fin: + with open(params_file, encoding="utf-8") as fin: params = load_hyperpyyaml(fin, overrides) - # Download verification list (to exlude verification sentences from train) + # Download verification list (to exclude verification sentences from train) veri_file_path = os.path.join( params["save_folder"], os.path.basename(params["verification_file"]) ) @@ -242,8 +250,9 @@ def audio_pipeline(wav, start, stop): save_folder=params["save_folder"], verification_pairs_file=veri_file_path, splits=["train", "test"], - split_ratio=[90, 10], - seg_dur=3, + split_ratio=params["split_ratio"], + seg_dur=params["seg_dur"], + skip_prep=params["skip_prep"], ) # here we create the datasets objects as well as tokenization and encoding @@ -264,7 +273,7 @@ def audio_pipeline(wav, start, stop): params["pretrainer"].load_collected() params["embedding_model"].eval() - params["embedding_model"].to(params["device"]) + params["embedding_model"].to(run_opts["device"]) # Computing training embeddings (skip it of if already extracted) if not os.path.exists(xv_file): @@ -290,7 +299,7 @@ def audio_pipeline(wav, start, stop): modelset = numpy.array(modelset, dtype="|O") segset = numpy.array(segset, dtype="|O") - # Intialize variables for start, stop and stat0 + # Initialize variables for start, stop and stat0 s = numpy.array([None] * embeddings.shape[0]) b = numpy.array([[1.0]] * embeddings.shape[0]) diff --git a/recipes/VoxCeleb/SpeakerRec/train_speaker_embeddings.py b/recipes/VoxCeleb/SpeakerRec/train_speaker_embeddings.py index 83ff0ab439..47523f6d55 100755 --- a/recipes/VoxCeleb/SpeakerRec/train_speaker_embeddings.py +++ b/recipes/VoxCeleb/SpeakerRec/train_speaker_embeddings.py @@ -14,20 +14,22 @@ * Hwidong Na 2020 * Nauman Dawalatabad 2020 """ + import os -import sys import random +import sys + import torch -import torchaudio +from hyperpyyaml import load_hyperpyyaml + import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.utils.data_utils import download_file -from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main class SpeakerBrain(sb.core.Brain): - """Class for speaker embedding training" - """ + """Class for speaker embedding training""" def compute_forward(self, batch, stage): """Computation pipeline based on a encoder + speaker classifier. @@ -37,36 +39,19 @@ def compute_forward(self, batch, stage): batch = batch.to(self.device) wavs, lens = batch.sig - if stage == sb.Stage.TRAIN: - - # Applying the augmentation pipeline - wavs_aug_tot = [] - wavs_aug_tot.append(wavs) - for count, augment in enumerate(self.hparams.augment_pipeline): - - # Apply augment - wavs_aug = augment(wavs, lens) - - # Managing speed change - if wavs_aug.shape[1] > wavs.shape[1]: - wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] - else: - zero_sig = torch.zeros_like(wavs) - zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug - wavs_aug = zero_sig - - if self.hparams.concat_augment: - wavs_aug_tot.append(wavs_aug) - else: - wavs = wavs_aug - wavs_aug_tot[0] = wavs - - wavs = torch.cat(wavs_aug_tot, dim=0) - self.n_augment = len(wavs_aug_tot) - lens = torch.cat([lens] * self.n_augment) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, lens = self.hparams.wav_augment(wavs, lens) # Feature extraction and normalization - feats = self.modules.compute_features(wavs) + if ( + hasattr(self.hparams, "use_tacotron2_mel_spec") + and self.hparams.use_tacotron2_mel_spec + ): + feats = self.hparams.compute_features(audio=wavs) + feats = torch.transpose(feats, 1, 2) + else: + feats = self.modules.compute_features(wavs) feats = self.modules.mean_var_norm(feats, lens) # Embeddings + speaker classifier @@ -76,15 +61,14 @@ def compute_forward(self, batch, stage): return outputs, lens def compute_objectives(self, predictions, batch, stage): - """Computes the loss using speaker-id as label. - """ + """Computes the loss using speaker-id as label.""" predictions, lens = predictions uttid = batch.id spkid, _ = batch.spk_id_encoded # Concatenate labels (due to data augmentation) - if stage == sb.Stage.TRAIN: - spkid = torch.cat([spkid] * self.n_augment, dim=0) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + spkid = self.hparams.wav_augment.replicate_labels(spkid) loss = self.hparams.compute_cost(predictions, spkid, lens) @@ -161,9 +145,7 @@ def audio_pipeline(wav, start, stop, duration): start = int(start) stop = int(stop) num_frames = stop - start - sig, fs = torchaudio.load( - wav, num_frames=num_frames, frame_offset=start - ) + sig, fs = audio_io.load(wav, num_frames=num_frames, frame_offset=start) sig = sig.transpose(0, 1).squeeze(1) return sig @@ -183,7 +165,9 @@ def label_pipeline(spk_id): # Load or compute the label encoder (with multi-GPU DDP support) lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") label_encoder.load_or_create( - path=lab_enc_file, from_didatasets=[train_data], output_key="spk_id", + path=lab_enc_file, + from_didatasets=[train_data], + output_key="spk_id", ) # 4. Set output: @@ -193,7 +177,6 @@ def label_pipeline(spk_id): if __name__ == "__main__": - # This flag enables the inbuilt cudnn auto-tuner torch.backends.cudnn.benchmark = True @@ -204,10 +187,10 @@ def label_pipeline(spk_id): sb.utils.distributed.ddp_init_group(run_opts) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # Download verification list (to exlude verification sentences from train) + # Download verification list (to exclude verification sentences from train) veri_file_path = os.path.join( hparams["save_folder"], os.path.basename(hparams["verification_file"]) ) @@ -223,11 +206,13 @@ def label_pipeline(spk_id): "save_folder": hparams["save_folder"], "verification_pairs_file": veri_file_path, "splits": ["train", "dev"], - "split_ratio": [90, 10], + "split_ratio": hparams["split_ratio"], "seg_dur": hparams["sentence_len"], "skip_prep": hparams["skip_prep"], }, ) + sb.utils.distributed.run_on_main(hparams["prepare_noise_data"]) + sb.utils.distributed.run_on_main(hparams["prepare_rir_data"]) # Dataset IO prep: creating Dataset objects and proper encodings for phones train_data, valid_data, label_encoder = dataio_prep(hparams) diff --git a/recipes/VoxCeleb/voxceleb_prepare.py b/recipes/VoxCeleb/voxceleb_prepare.py index 1fef734f10..e86521c079 100644 --- a/recipes/VoxCeleb/voxceleb_prepare.py +++ b/recipes/VoxCeleb/voxceleb_prepare.py @@ -4,23 +4,22 @@ Download: http://www.robots.ox.ac.uk/~vgg/data/voxceleb/ """ -import os import csv -import logging import glob +import os import random import shutil import sys # noqa F401 + import numpy as np import torch -import torchaudio -from tqdm.contrib import tqdm -from speechbrain.dataio.dataio import ( - load_pkl, - save_pkl, -) - -logger = logging.getLogger(__name__) +from tqdm import tqdm + +from speechbrain.dataio import audio_io +from speechbrain.dataio.dataio import load_pkl, save_pkl +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) OPT_FILE = "opt_voxceleb_prepare.pkl" TRAIN_CSV = "train.csv" DEV_CSV = "dev.csv" @@ -75,15 +74,19 @@ def prepare_voxceleb( Speaker-wise split random_segment : bool Train random segments - skip_prep: Bool + skip_prep : bool If True, skip preparation. + Returns + ------- + None + Example ------- >>> from recipes.VoxCeleb.voxceleb1_prepare import prepare_voxceleb - >>> data_folder = 'data/VoxCeleb1/' - >>> save_folder = 'VoxData/' - >>> splits = ['train', 'dev'] + >>> data_folder = "data/VoxCeleb1/" + >>> save_folder = "VoxData/" + >>> splits = ["train", "dev"] >>> split_ratio = [90, 10] >>> prepare_voxceleb(data_folder, save_folder, splits, split_ratio) """ @@ -103,7 +106,7 @@ def prepare_voxceleb( if not os.path.exists(save_folder): os.makedirs(save_folder) - # Setting ouput files + # Setting output files save_opt = os.path.join(save_folder, OPT_FILE) save_csv_train = os.path.join(save_folder, TRAIN_CSV) save_csv_dev = os.path.join(save_folder, DEV_CSV) @@ -164,6 +167,12 @@ def skip(splits, save_folder, conf): Detects if the voxceleb data_preparation has been already done. If the preparation has been done, we can skip it. + Arguments + --------- + splits : list + save_folder : str + conf : str + Returns ------- bool @@ -203,16 +212,18 @@ def _check_voxceleb_folders(data_folders, splits): If it does not, raise an error. - Returns - ------- - None + Arguments + --------- + data_folders : list + List of data folder paths to check + splits : list + List of splits, "train" and/or "test" Raises ------ FileNotFoundError """ for data_folder in data_folders: - if "train" in splits: folder_vox1 = os.path.join(data_folder, "wav", "id10001") folder_vox2 = os.path.join(data_folder, "wav", "id00012") @@ -256,10 +267,9 @@ def _get_utt_split_lists( print("Getting file list...") for data_folder in data_folders: - test_lst = [ line.rstrip("\n").split(" ")[1] - for line in open(verification_pairs_file) + for line in open(verification_pairs_file, encoding="utf-8") ] test_lst = set(sorted(test_lst)) @@ -325,6 +335,8 @@ def prepare_csv(seg_dur, wav_lst, csv_file, random_segment=False, amp_th=0): Arguments --------- + seg_dur : int + Segment duration of a chunk in seconds (e.g., 3.0 seconds). wav_lst : list The list of wav files of a given data split. csv_file : str @@ -334,10 +346,6 @@ def prepare_csv(seg_dur, wav_lst, csv_file, random_segment=False, amp_th=0): amp_th: float Threshold on the average amplitude on the chunk. If under this threshold, the chunk is discarded. - - Returns - ------- - None """ msg = '\t"Creating csv lists in %s..."' % (csv_file) @@ -359,7 +367,7 @@ def prepare_csv(seg_dur, wav_lst, csv_file, random_segment=False, amp_th=0): audio_id = my_sep.join([spk_id, sess_id, utt_id.split(".")[0]]) # Reading the signal (to retrieve duration in seconds) - signal, fs = torchaudio.load(wav_file) + signal, fs = audio_io.load(wav_file) signal = signal.squeeze(0) if random_segment: @@ -405,7 +413,7 @@ def prepare_csv(seg_dur, wav_lst, csv_file, random_segment=False, amp_th=0): csv_output = csv_output + entry # Writing the csv lines - with open(csv_file, mode="w") as csv_f: + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: csv_writer = csv.writer( csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) @@ -423,31 +431,26 @@ def prepare_csv_enrol_test(data_folders, save_folder, verification_pairs_file): Arguments --------- - data_folder : str + data_folders : str Path of the data folders save_folder : str The directory where to store the csv files. - - Returns - ------- - None + verification_pairs_file : str + Path to the file with verification pairs. """ # msg = '\t"Creating csv lists in %s..."' % (csv_file) # logger.debug(msg) - csv_output_head = [ - ["ID", "duration", "wav", "start", "stop", "spk_id"] - ] # noqa E231 + csv_output_head = [["ID", "duration", "wav", "start", "stop", "spk_id"]] # noqa E231 for data_folder in data_folders: - test_lst_file = verification_pairs_file enrol_ids, test_ids = [], [] # Get unique ids (enrol and test utterances) - for line in open(test_lst_file): + for line in open(test_lst_file, encoding="utf-8"): e_id = line.split(" ")[1].rstrip().split(".")[0].strip() t_id = line.split(" ")[2].rstrip().split(".")[0].strip() enrol_ids.append(e_id) @@ -463,7 +466,7 @@ def prepare_csv_enrol_test(data_folders, save_folder, verification_pairs_file): wav = data_folder + "/wav/" + id + ".wav" # Reading the signal (to retrieve duration in seconds) - signal, fs = torchaudio.load(wav) + signal, fs = audio_io.load(wav) signal = signal.squeeze(0) audio_duration = signal.shape[0] / SAMPLERATE start_sample = 0 @@ -485,7 +488,7 @@ def prepare_csv_enrol_test(data_folders, save_folder, verification_pairs_file): csv_file = os.path.join(save_folder, ENROL_CSV) # Writing the csv lines - with open(csv_file, mode="w") as csv_f: + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: csv_writer = csv.writer( csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) @@ -499,7 +502,7 @@ def prepare_csv_enrol_test(data_folders, save_folder, verification_pairs_file): wav = data_folder + "/wav/" + id + ".wav" # Reading the signal (to retrieve duration in seconds) - signal, fs = torchaudio.load(wav) + signal, fs = audio_io.load(wav) signal = signal.squeeze(0) audio_duration = signal.shape[0] / SAMPLERATE start_sample = 0 @@ -521,7 +524,7 @@ def prepare_csv_enrol_test(data_folders, save_folder, verification_pairs_file): csv_file = os.path.join(save_folder, TEST_CSV) # Writing the csv lines - with open(csv_file, mode="w") as csv_f: + with open(csv_file, mode="w", newline="", encoding="utf-8") as csv_f: csv_writer = csv.writer( csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) diff --git a/recipes/VoxLingua107/README.md b/recipes/VoxLingua107/README.md index c2cf728ef2..9acef4bef4 100644 --- a/recipes/VoxLingua107/README.md +++ b/recipes/VoxLingua107/README.md @@ -4,7 +4,7 @@ VoxLingua107 is a speech dataset for training spoken language identification mod The dataset consists of short speech segments automatically extracted from YouTube videos and labeled according the language of the video title and description, with some post-processing steps to filter out false positives. VoxLingua107 contains data for 107 languages. The total amount of speech in the training set is 6628 hours. -The average amount of data per language is 62 hours. However, the real amount per language varies a lot. There is also a seperate development set containing 1609 speech segments from 33 languages, validated by at least two volunteers to really contain the given language. +The average amount of data per language is 62 hours. However, the real amount per language varies a lot. There is also a separate development set containing 1609 speech segments from 33 languages, validated by at least two volunteers to really contain the given language. For more information, see the paper [Jörgen Valk, Tanel Alumäe. _VoxLingua107: a Dataset for Spoken Language Recognition_. Proc. SLT 2021]. diff --git a/recipes/VoxLingua107/lang_id/README.md b/recipes/VoxLingua107/lang_id/README.md index df7efb6090..75d8b2532c 100644 --- a/recipes/VoxLingua107/lang_id/README.md +++ b/recipes/VoxLingua107/lang_id/README.md @@ -15,15 +15,18 @@ the tar files are opened in random order, the audio files in the shards are shuf and fed to the training process. This reduces the disk load during training by large margin. This is all handled by the WebDataset library. +Warning: In the metadata of this dataset, the used ISO language code for Hebrew is obsolete (should be `he` instead of `iw`). The ISO language code for Javanese is incorrect (should be `jv` instead of `jw`). See [issue #2396](https://github.com/speechbrain/speechbrain/issues/2396). + ## Downloading the data You have two options how to download and prepare the VoxLingua107 dataset for training the model: - - Download the VoxLingua107 language-specific zips from http://bark.phon.ioc.ee/voxlingua107 and convert them + - Download the VoxLingua107 language-specific zips from https://cs.taltech.ee/staff/tanel.alumae/data/voxlingua107/ and convert them to WebDataset format. This is the most flexible option, as it allows selecting a subset of VoxLingua107 languages, or adding new languages. It will require around 2.2 TB disk space. - - Download the pre-compiled WebDataset shards from http://bark.phon.ioc.ee/voxlingua107. It will require around 1.4T of disk space. + - Download the pre-compiled WebDataset shards from https://cs.taltech.ee/staff/tanel.alumae/data/voxlingua107/ + It would require around 1.4T of disk space but is unfortunately not available as of 2025-04-07. ### 1st option: download the VoxLingua107 zips and create the Webdataset shards @@ -35,10 +38,9 @@ Download the zips: cd /data/ mkdir voxlingua107 cd voxlingua107 -wget http://bark.phon.ioc.ee/voxlingua107/zip_urls.txt +wget https://cs.taltech.ee/staff/tanel.alumae/data/voxlingua107/zip_urls.txt cat zip_urls.txt | xargs wget --continue -wget bark.phon.ioc.ee/voxlingua107/dev.zip - +wget https://cs.taltech.ee/staff/tanel.alumae/data/voxlingua107/dev.zip ``` Create WebDataset shards: @@ -50,6 +52,12 @@ python create_wds_shards.py /data/voxlingua107/dev/ /data/voxlingua107_shards/de ### 2nd option: download the pre-compiled WebDataset shards +> [!IMPORTANT] +> As of 2024-09-19, according to the +> [official website](https://cs.taltech.ee/staff/tanel.alumae/data/voxlingua107/), the pre-compiled +> WebDataset shards are currently unavailable. As a result, this method is +> likely broken. If you get a 503 error, it is because of that. + Download the shards: ``` @@ -57,7 +65,15 @@ Download the shards: cd /data/ mkdir voxlingua107_shards cd voxlingua107_shards -wget -r -nH --cut-dirs=4 --no-parent --reject="index.html*" http://bark.phon.ioc.ee/lw/korpused/voxlingua107/shards/ +wget -r -nH --cut-dirs=4 --no-parent --reject="index.html*" http://bark.phon.ioc.ee/lw/korpused/voxlingua107/shards/ # ignore-url-check +``` + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt ``` @@ -73,7 +89,7 @@ Training is run for 40 epochs. One epoch takes one hour and 40 minutes on a NVid # Performance | Release | hyperparams file | Dev error rate | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| :-----------:| -| 21-08-24 | train_ecapa.yaml | 6.7 |https://drive.google.com/drive/folders/151QTW9oHVElLIkuzXjkuHpOCLNZF0Ufd?usp=sharing | 1xA100 40GB | +| 21-08-24 | train_ecapa.yaml | 6.7 |https://www.dropbox.com/sh/72gpuic5m4x8ztz/AAB5R-RVIEsXJtRH8SGkb_oCa?dl=0 | 1xA100 40GB | @@ -85,9 +101,9 @@ You can run inference with only few lines of code: ```python import torchaudio -from speechbrain.pretrained import EncoderClassifier +from speechbrain.inference import EncoderClassifier language_id = EncoderClassifier.from_hparams(source="speechbrain/lang-id-voxlingua107-ecapa", savedir="tmp") -# Download Thai language sample from Omniglot and cvert to suitable form +# Download Thai language sample from Omniglot and convert to suitable form signal = language_id.load_audio("https://omniglot.com/soundfiles/udhr/udhr_th.mp3") prediction = language_id.classify_batch(signal) print(prediction) @@ -139,6 +155,15 @@ print(emb.shape) Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/VoxLingua107/lang_id/create_wds_shards.py b/recipes/VoxLingua107/lang_id/create_wds_shards.py index e822ae2011..7da6677c5d 100644 --- a/recipes/VoxLingua107/lang_id/create_wds_shards.py +++ b/recipes/VoxLingua107/lang_id/create_wds_shards.py @@ -6,17 +6,18 @@ # Author(s): Tanel Alumäe, Nik Vaessen ################################################################################ +import argparse import json import pathlib -import argparse import random import re from collections import defaultdict import torch -import torchaudio import webdataset as wds +from speechbrain.dataio import audio_io + ################################################################################ # methods for writing the shards @@ -24,7 +25,7 @@ def load_audio(audio_file_path: pathlib.Path) -> torch.Tensor: - t, sr = torchaudio.load(audio_file_path) + t, sr = audio_io.load(audio_file_path) if sr != 16000: raise ValueError("expected sampling rate of 16 kHz") @@ -40,12 +41,17 @@ def write_shards( min_dur: float, ): """ - Parameters - ---------- - voxlingua_folder_path: folder where extracted voxceleb data is located - shards_path: folder to write shards of data to - seed: random seed used to initially shuffle data into shards - samples_per_shard: number of data samples to store in each shards. + Arguments + --------- + voxlingua_folder_path: pathlib.Path + folder where extracted voxceleb data is located + shards_path: pathlib.Path + folder to write shards of data to + seed: int + random seed used to initially shuffle data into shards + samples_per_shard: int + number of data samples to store in each shards. + min_dur: float """ # make sure output folder exist shards_path.mkdir(parents=True, exist_ok=True) @@ -95,7 +101,7 @@ def write_shards( "num_data_samples": len(data_tuples), } - with (shards_path / "meta.json").open("w") as f: + with (shards_path / "meta.json").open("w", encoding="utf-8") as f: json.dump(meta_dict, f) # shuffle the tuples so that each shard has a large variety in languages @@ -109,7 +115,6 @@ def write_shards( with wds.ShardWriter(pattern, maxcount=samples_per_shard) as sink: for key, language_id, f, duration in data_tuples: - # load the audio tensor tensor = load_audio(f) diff --git a/recipes/VoxLingua107/lang_id/extra-dependencies.txt b/recipes/VoxLingua107/lang_id/extra_requirements.txt similarity index 100% rename from recipes/VoxLingua107/lang_id/extra-dependencies.txt rename to recipes/VoxLingua107/lang_id/extra_requirements.txt diff --git a/recipes/VoxLingua107/lang_id/hparams/train_ecapa.yaml b/recipes/VoxLingua107/lang_id/hparams/train_ecapa.yaml index adf59213e3..b9ae355b21 100644 --- a/recipes/VoxLingua107/lang_id/hparams/train_ecapa.yaml +++ b/recipes/VoxLingua107/lang_id/hparams/train_ecapa.yaml @@ -5,12 +5,11 @@ # Basic parameters seed: 1988 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/epaca/ save_folder: !ref /save train_log: !ref /train_log.txt -data_folder: ./ -rir_folder: !ref +data_folder: !PLACEHOLDER shards_url: /data/voxlingua107_shards train_meta: !ref /train/meta.json @@ -18,6 +17,15 @@ val_meta: !ref /dev/meta.json train_shards: !ref /train/shard-{000000..000507}.tar val_shards: !ref /dev/shard-000000.tar +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv + + # Set to directory on a large disk if you are training on Webdataset shards hosted on the web shard_cache_dir: @@ -39,15 +47,65 @@ deltas: False # Number of languages out_n_neurons: 107 +num_workers: 4 +batch_size: 128 +batch_size_val: 32 train_dataloader_options: - num_workers: 4 - batch_size: 128 + num_workers: !ref + batch_size: !ref val_dataloader_options: num_workers: 1 - batch_size: 32 - -# Functions + batch_size: !ref + +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + shuffle_augmentations: True + min_augmentations: 1 + max_augmentations: 3 + augmentations: [ + !ref , + !ref , + !ref ] + + # Functions compute_features: !new:speechbrain.lobes.features.Fbank n_mels: !ref left_frames: !ref @@ -73,39 +131,12 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -augment_speed: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [90, 100, 110] - - -add_rev_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 0.5 - noise_prob: 0.8 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -# Definition of the augmentation pipeline. -# If concat_augment = False, the augmentation techniques are applied -# in sequence. If concat_augment = True, all the augmented signals -# # are concatenated in a single big batch. -augment_pipeline: [ - !ref , - !ref -] - -concat_augment: False - mean_var_norm: !new:speechbrain.processing.features.InputNormalization norm_type: sentence std_norm: False modules: compute_features: !ref - augment_speed: !ref - add_rev_noise: !ref embedding_model: !ref classifier: !ref mean_var_norm: !ref diff --git a/recipes/VoxLingua107/lang_id/train.py b/recipes/VoxLingua107/lang_id/train.py index c15c3f548e..5486fa273c 100644 --- a/recipes/VoxLingua107/lang_id/train.py +++ b/recipes/VoxLingua107/lang_id/train.py @@ -17,26 +17,27 @@ * Tanel Alumäe 2021 * @nikvaessen """ + +import json import os -import sys import random -from typing import Dict -import json +import sys from functools import partial -import webdataset as wds -import logging +from typing import Dict import torch -import speechbrain as sb +import webdataset as wds from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.dataio.batch import PaddedBatch +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class LanguageBrain(sb.core.Brain): - """Class for language ID training" - """ + """Class for language ID training" """ def compute_forward(self, batch, stage): """Computation pipeline based on a encoder + speaker classifier. @@ -46,33 +47,9 @@ def compute_forward(self, batch, stage): batch = batch.to(self.device) wavs, lens = batch.sig - if stage == sb.Stage.TRAIN: - - # Applying the augmentation pipeline - wavs_aug_tot = [] - wavs_aug_tot.append(wavs) - for count, augment in enumerate(self.hparams.augment_pipeline): - - # Apply augment - wavs_aug = augment(wavs, lens) - - # Managing speed change - if wavs_aug.shape[1] > wavs.shape[1]: - wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] - else: - zero_sig = torch.zeros_like(wavs) - zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug - wavs_aug = zero_sig - - if self.hparams.concat_augment: - wavs_aug_tot.append(wavs_aug) - else: - wavs = wavs_aug - wavs_aug_tot[0] = wavs - - wavs = torch.cat(wavs_aug_tot, dim=0) - self.n_augment = len(wavs_aug_tot) - lens = torch.cat([lens] * self.n_augment) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, lens = self.hparams.wav_augment(wavs, lens) # Feature extraction and normalization feats = self.modules.compute_features(wavs) @@ -85,15 +62,14 @@ def compute_forward(self, batch, stage): return outputs, lens def compute_objectives(self, predictions, batch, stage): - """Computes the loss using speaker-id as label. - """ + """Computes the loss using speaker-id as label.""" predictions, lens = predictions uttid = batch.id langid = batch.lang_id_encoded # Concatenate labels (due to data augmentation) - if stage == sb.Stage.TRAIN: - langid = torch.cat([langid] * self.n_augment, dim=0) + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + langid = self.hparams.wav_augment.replicate_labels(langid) # breakpoint() loss = self.hparams.compute_cost(predictions, langid.unsqueeze(1), lens) @@ -139,11 +115,10 @@ def on_stage_end(self, stage, stage_loss, epoch=None): def dataio_prep_shards(hparams): - # load the meta info json file - with wds.gopen.gopen(hparams["train_meta"], "rb") as f: + with wds.gopen(hparams["train_meta"], "rb") as f: train_meta = json.load(f) - with wds.gopen.gopen(hparams["val_meta"], "rb") as f: + with wds.gopen(hparams["val_meta"], "rb") as f: val_meta = json.load(f) # define the mapping functions in the data pipeline @@ -191,7 +166,7 @@ def audio_pipeline(sample_dict: Dict, random_chunk=True): train_data = ( wds.WebDataset( - hparams["train_shards"], cache_dir=hparams["shard_cache_dir"], + hparams["train_shards"], cache_dir=hparams["shard_cache_dir"] ) .repeat() .shuffle(1000) @@ -204,7 +179,7 @@ def audio_pipeline(sample_dict: Dict, random_chunk=True): valid_data = ( wds.WebDataset( - hparams["val_shards"], cache_dir=hparams["shard_cache_dir"], + hparams["val_shards"], cache_dir=hparams["shard_cache_dir"] ) .decode("pil") .map(partial(audio_pipeline, random_chunk=False)) @@ -222,7 +197,6 @@ def audio_pipeline(sample_dict: Dict, random_chunk=True): if __name__ == "__main__": - logger.info("Starting training...") # This flag enables the inbuilt cudnn auto-tuner torch.backends.cudnn.benchmark = True @@ -234,9 +208,13 @@ def audio_pipeline(sample_dict: Dict, random_chunk=True): sb.utils.distributed.ddp_init_group(run_opts) # Load hyperparameters file with command-line overrides - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) + # Data preparation for augmentation + sb.utils.distributed.run_on_main(hparams["prepare_noise_data"]) + sb.utils.distributed.run_on_main(hparams["prepare_rir_data"]) + ( train_data, valid_data, diff --git a/recipes/VoxPopuli/ASR/transducer/README.md b/recipes/VoxPopuli/ASR/transducer/README.md new file mode 100644 index 0000000000..5ae32d5f12 --- /dev/null +++ b/recipes/VoxPopuli/ASR/transducer/README.md @@ -0,0 +1,83 @@ +# VoxPopuli ASR with Transducers +This folder contains scripts necessary to run an ASR experiment with the VoxPopuli dataset; +Before running this recipe, make sure numba is installed (pip install numba) for faster training! +You can download VoxPopuli at: https://github.com/facebookresearch/voxpopuli + +**We only report results for english but you simply need to download a different set to train with a different language!** + +# Extra-Dependencies +This recipe supports two implementations of the transducer loss, see `use_torchaudio` arg in the yaml file: +1. Transducer loss from torchaudio (this requires torchaudio version >= 0.10.0). +2. Speechbrain implementation using Numba. To use it, please set `use_torchaudio=False` in the yaml file. This version is implemented within SpeechBrain and allows you to directly access the python code of the transducer loss (and directly modify it if needed). + +The Numba implementation is currently enabled by default as the `use_torchaudio` option is incompatible with `bfloat16` training. + +Note: Before running this recipe, make sure numba is installed. Otherwise, run: +``` +pip install numba +``` + +# How to run it +```shell +python train.py hparams/conformer_transducer.yaml +``` + +## Precision Notes +If your GPU effectively supports fp16 (half-precision) computations, it is recommended to execute the training script with the `--precision=fp16` (or `--precision=bf16`) option. +Enabling half precision can significantly reduce the peak VRAM requirements. For example, in the case of the Conformer Transducer recipe trained with Librispeech, the peak VRAM decreases from 39GB to 12GB when using fp16. +According to our tests, the performance is not affected. + +# VoxPopuli non-streaming results + +Results are reported with beam search but without any language model. Models are +trained with dynamic chunk training but decoding is offline. + + +| Language | Hyperparams file | Train precision | Dev-clean Greedy | Test-clean Greedy | Model link | GPUs | +|:-------------:|:---------------------------:|:-:| :------:| :-----------:| :------------------:| :------------------:| +| English | conformer_transducer.yaml `streaming: True` | fp16 | 9.80 | 10.18 | [Model link](https://www.dropbox.com/scl/fo/y2if76ut4xur5rg9sszj3/h?rlkey=y8wmip8bd06cb82vm2cvmfaz3&dl=0) |6x A40| + + +# VoxPopuli streaming results + +### WER vs chunk size & left context + +The following matrix presents the Word Error Rate (WER%) achieved on the test set with various chunk sizes (in ms). + +This is with greedy decoding only. + + +| | full | cs=32 (1280ms) | 16 (640ms) | 8 (320ms) | +|:-----:|:----:|:-----:|:-----:|:-----:| +| full | 10.18| - | - | - | +| lc=32 | - | 10.88 | 11.39 | 12.37 | + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/VoxPopuli/ASR/transducer/hparams/conformer_transducer.yaml b/recipes/VoxPopuli/ASR/transducer/hparams/conformer_transducer.yaml new file mode 100644 index 0000000000..2e2b36be90 --- /dev/null +++ b/recipes/VoxPopuli/ASR/transducer/hparams/conformer_transducer.yaml @@ -0,0 +1,363 @@ +# ############################################################################ +# Model: E2E ASR with transformer and transducer +# Encoder: Conformer +# Decoder: LSTM + beamsearch +# Tokens: BPE with unigram +# losses: Transducer + CTC (optional) + CE (optional) +# Training: VoxPopuli +# Authors: Titouan Parcollet 2024 +# ############################################################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 3407 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/conformer_transducer_large/ +output_wer_folder: !ref / +save_folder: !ref /save +train_log: !ref /train_log.txt + +# NB: To avoid mismatch, the speech recognizer must be trained with the same +# tokenizer used for LM training. Please give the path of your SpeechBrain +# Tokenizer +pretrained_tokenizer_path: !PLACEHOLDER + +# Data files +data_folder: !PLACEHOLDER #Path of VoxPopuli (uncompressed) +skip_prep: False +ckpt_interval_minutes: 10 # save checkpoint every N min +train_csv: !ref /train.csv +valid_csv: !ref /dev.csv +test_csv: + - !ref /dev.csv + - !ref /test.csv + +remove_if_longer_than: 100 # any segment longer than 100s is removed. + +####################### Training Parameters #################################### + +number_of_epochs: 100 # Will never be reached due to next param. +optimizer_step_limit: 70000 # In practice 70k steps are sufficient to converge. +warmup_steps: 10000 +augment_warmup_steps: 5000 # Number of steps before starting data augmentation. +num_workers: 4 +batch_size_valid: 4 +batch_size_test: 1 +lr: 0.0008 +weight_decay: 0.01 +number_of_ctc_epochs: 10 +ctc_weight: 0.4 # Multitask with CTC for the encoder (0.0 = disabled) +ce_weight: 0.0 # Multitask with CE for the decoder (0.0 = disabled) +max_grad_norm: 10.0 +loss_reduction: 'batchmean' +precision: fp32 # bf16, fp16 or fp32 + +# The batch size is used if and only if dynamic batching is set to False +# Validation and testing are done with fixed batches and not dynamic batching. +batch_size: 16 +sorting: random +avg_checkpoints: 5 # Number of checkpoints to average for evaluation + +# Feature parameters +sample_rate: 16000 +n_fft: 512 +n_mels: 80 +win_length: 32 + +# Streaming & dynamic chunk training options +# At least for the current architecture on LibriSpeech, we found out that +# non-streaming accuracy is very similar between `streaming: True` and +# `streaming: False`. +streaming: True # controls all Dynamic Chunk Training & chunk size & left context mechanisms + +# Configuration for Dynamic Chunk Training. +# In this model, a chunk is roughly equivalent to 40ms of audio. +dynchunktrain_config_sampler: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfigRandomSampler # yamllint disable-line rule:line-length + chunkwise_prob: 0.6 # Probability during a batch to limit attention and sample a random chunk size in the following range + chunk_size_min: 8 # Minimum chunk size (if in a DynChunkTrain batch) + chunk_size_max: 32 # Maximum chunk size (if in a DynChunkTrain batch) + limited_left_context_prob: 0.75 # If in a DynChunkTrain batch, the probability during a batch to restrict left context to a random number of chunks + left_context_chunks_min: 2 # Minimum left context size (in # of chunks) + left_context_chunks_max: 32 # Maximum left context size (in # of chunks) + # If you specify a valid/test config, you can optionally have evaluation be + # done with a specific DynChunkTrain configuration. + # valid_config: !new:speechbrain.utils.dynamic_chunk_training.DynChunkTrainConfig + # chunk_size: 24 + # left_context_size: 16 + # test_config: ... + +# Dataloader options +train_dataloader_opts: + batch_size: !ref + num_workers: !ref + +valid_dataloader_opts: + batch_size: !ref + +test_dataloader_opts: + batch_size: !ref + +# We recommend training with a real batch size of around 800-1000s. For instance +# three GPU containing 150s and a gradient accumulation of 2. +dynamic_batching: True +max_batch_len: 150 +grad_accumulation_factor: 2 +max_batch_len_val: 50 # we reduce it as the beam is much wider (VRAM) +num_bucket: 200 + +dynamic_batch_sampler: + max_batch_len: !ref + max_batch_len_val: !ref + num_buckets: !ref + shuffle_ex: True # if true re-creates batches at each epoch shuffling examples. + batch_ordering: random + max_batch_ex: 256 + +####################### Model Parameters ####################################### + +# Transformer +d_model: 512 +joint_dim: 512 +nhead: 8 +num_encoder_layers: 12 +num_decoder_layers: 0 +d_ffn: 2048 +transformer_dropout: 0.1 +activation: !name:torch.nn.GELU +output_neurons: 512 +dec_dim: 512 +dec_emb_dropout: 0.1 +dec_dropout: 0.1 +attention_type: RelPosMHAXL + +# Decoding parameters +blank_index: 0 +bos_index: 1 +eos_index: 2 +pad_index: 0 +beam_size: 10 +nbest: 1 + +# by default {state,expand}_beam = 2.3 as mention in paper +# https://arxiv.org/abs/1904.02619 +state_beam: 2.3 +expand_beam: 2.3 + +# If True uses torchaudio loss. Otherwise, the numba one +use_torchaudio: False + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + +normalize: !new:speechbrain.processing.features.InputNormalization + norm_type: global + update_until_epoch: 4 + +compute_features: !new:speechbrain.lobes.features.Fbank + sample_rate: !ref + n_fft: !ref + n_mels: !ref + win_length: !ref + +############################## Models ########################################## + +CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd + input_shape: (8, 10, 80) + num_blocks: 2 + num_layers_per_block: 1 + out_channels: (64, 32) + kernel_sizes: (3, 3) + strides: (2, 2) + residuals: (False, False) + +Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length + input_size: 640 + tgt_vocab: !ref + d_model: !ref + nhead: !ref + num_encoder_layers: !ref + num_decoder_layers: !ref + d_ffn: !ref + dropout: !ref + activation: !ref + encoder_module: conformer + attention_type: !ref + normalize_before: True + causal: False + max_length: 6000 # For absolute positional encoding + +# We must call an encoder wrapper so the decoder isn't run (we don't have any) +enc: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper + transformer: !ref + +# For MTL CTC over the encoder +proj_ctc: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + +# Define some projection layers to make sure that enc and dec +# output dim are the same before joining +proj_enc: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +proj_dec: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +# Uncomment for MTL with CTC +ctc_cost: !name:speechbrain.nnet.losses.ctc_loss + blank_index: !ref + reduction: !ref + +emb: !new:speechbrain.nnet.embedding.Embedding + num_embeddings: !ref + consider_as_one_hot: True + blank_id: !ref + +dec: !new:speechbrain.nnet.RNN.LSTM + input_shape: [null, null, !ref - 1] + hidden_size: !ref + num_layers: 1 + re_init: True + +# For MTL with LM over the decoder (need to uncomment to activate) +# dec_lin: !new:speechbrain.nnet.linear.Linear +# input_size: !ref +# n_neurons: !ref +# bias: False + +# For MTL +ce_cost: !name:speechbrain.nnet.losses.nll_loss + label_smoothing: 0.1 + +Tjoint: !new:speechbrain.nnet.transducer.transducer_joint.Transducer_joint + joint: sum # joint [sum | concat] + nonlinearity: !ref + +transducer_lin: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +transducer_cost: !name:speechbrain.nnet.losses.transducer_loss + blank_index: !ref + use_torchaudio: !ref + +# for MTL +# update model if any HEAD module is added +modules: + CNN: !ref + enc: !ref + emb: !ref + dec: !ref + Tjoint: !ref + transducer_lin: !ref + normalize: !ref + proj_ctc: !ref + proj_dec: !ref + proj_enc: !ref +# dec_lin: !ref + +# for MTL +# update model if any HEAD module is added +model: !new:torch.nn.ModuleList + - [!ref , !ref , !ref , !ref , !ref , !ref , !ref , !ref ] + +# Tokenizer initialization +tokenizer: !new:sentencepiece.SentencePieceProcessor + +############################## Decoding & optimiser ############################ + +Greedysearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher + decode_network_lst: [!ref , !ref , !ref ] + tjoint: !ref + classifier_network: [!ref ] + blank_id: !ref + beam_size: 1 + nbest: 1 + +Beamsearcher: !new:speechbrain.decoders.transducer.TransducerBeamSearcher + decode_network_lst: [!ref , !ref , !ref ] + tjoint: !ref + classifier_network: [!ref ] + blank_id: !ref + beam_size: !ref + nbest: !ref + state_beam: !ref + expand_beam: !ref + +opt_class: !name:torch.optim.AdamW + lr: !ref + betas: (0.9, 0.98) + eps: 1.e-8 + weight_decay: !ref + +lr_annealing: !new:speechbrain.nnet.schedulers.WarmAndExpDecayLRSchedule + lr: !ref + n_warmup_steps: !ref + total_steps: !ref + decay_factor: 0.05 + +############################## Augmentations ################################### + +# Time Drop +time_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 15 + drop_length_high: 25 + drop_count_low: 5 + drop_count_high: 5 + replace: "zeros" + +# Frequency Drop +freq_drop: !new:speechbrain.augment.freq_domain.SpectrogramDrop + drop_length_low: 25 + drop_length_high: 35 + drop_count_low: 2 + drop_count_high: 2 + replace: "zeros" + dim: 2 + +# Time warp +time_warp: !new:speechbrain.augment.freq_domain.Warping + +fea_augment: !new:speechbrain.augment.augmenter.Augmenter + parallel_augment: False + concat_original: False + repeat_augment: 1 + shuffle_augmentations: False + min_augmentations: 3 + max_augmentations: 3 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + model: !ref + scheduler: !ref + normalizer: !ref + counter: !ref + +pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer + collect_in: !ref + loadables: + tokenizer: !ref + paths: + tokenizer: !ref + + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + +cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats + split_tokens: True diff --git a/recipes/VoxPopuli/ASR/transducer/train.py b/recipes/VoxPopuli/ASR/transducer/train.py new file mode 100644 index 0000000000..e41f9c1556 --- /dev/null +++ b/recipes/VoxPopuli/ASR/transducer/train.py @@ -0,0 +1,482 @@ +#!/usr/bin/env/python3 +"""Recipe for training a streaming Transducer ASR system with VoxPopuli. +The system employs an encoder, a decoder, and an joint network +between them. Decoding is performed with Beamsearch. + +To run this recipe, do the following: +> python train.py hparams/conformer_transducer.yaml + +With the default hyperparameters, the system employs a conformer encoder. +The decoder is based on a standard LSTM. + +The neural network is trained on both CTC and negative-log likelihood +targets and sub-word units estimated with Byte Pairwise Encoding (BPE) +are used as basic recognition tokens. + +Authors + * Titouan Parcollet 2024 +""" + +import sys +from pathlib import Path + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +class ASR(sb.Brain): + def compute_forward(self, batch, stage): + """Forward computations from the waveform batches to the output probabilities.""" + batch = batch.to(self.device) + wavs, wav_lens = batch.sig + tokens_with_bos, token_with_bos_lens = batch.tokens_bos + + feats = self.hparams.compute_features(wavs) + + # Add feature augmentation if specified. + if ( + stage == sb.Stage.TRAIN + and hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup_steps + ): + feats, fea_lens = self.hparams.fea_augment(feats, wav_lens) + tokens_with_bos = self.hparams.fea_augment.replicate_labels( + tokens_with_bos + ) + + current_epoch = self.hparams.epoch_counter.current + + # Old models may not have the streaming hparam, we don't break them in + # any other way so just check for its presence + if hasattr(self.hparams, "streaming") and self.hparams.streaming: + dynchunktrain_config = self.hparams.dynchunktrain_config_sampler( + stage + ) + else: + dynchunktrain_config = None + + feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) + + src = self.modules.CNN(feats) + x = self.modules.enc( + src, + wav_lens, + pad_idx=self.hparams.pad_index, + dynchunktrain_config=dynchunktrain_config, + ) + x = self.modules.proj_enc(x) + + e_in = self.modules.emb(tokens_with_bos) + e_in = torch.nn.functional.dropout( + e_in, + self.hparams.dec_emb_dropout, + training=(stage == sb.Stage.TRAIN), + ) + h, _ = self.modules.dec(e_in) + h = torch.nn.functional.dropout( + h, self.hparams.dec_dropout, training=(stage == sb.Stage.TRAIN) + ) + h = self.modules.proj_dec(h) + + # Joint network + # add labelseq_dim to the encoder tensor: [B,T,H_enc] => [B,T,1,H_enc] + # add timeseq_dim to the decoder tensor: [B,U,H_dec] => [B,1,U,H_dec] + joint = self.modules.Tjoint(x.unsqueeze(2), h.unsqueeze(1)) + + # Output layer for transducer log-probabilities + logits_transducer = self.modules.transducer_lin(joint) + + # Compute outputs + if stage == sb.Stage.TRAIN: + p_ctc = None + p_ce = None + + if ( + self.hparams.ctc_weight > 0.0 + and current_epoch <= self.hparams.number_of_ctc_epochs + ): + # Output layer for ctc log-probabilities + out_ctc = self.modules.proj_ctc(x) + p_ctc = self.hparams.log_softmax(out_ctc) + + if self.hparams.ce_weight > 0.0: + # Output layer for ctc log-probabilities + p_ce = self.modules.dec_lin(h) + p_ce = self.hparams.log_softmax(p_ce) + + return p_ctc, p_ce, logits_transducer, wav_lens + + elif stage == sb.Stage.VALID: + best_hyps, scores, _, _ = self.hparams.Greedysearcher(x) + return logits_transducer, wav_lens, best_hyps + else: + ( + best_hyps, + best_scores, + nbest_hyps, + nbest_scores, + ) = self.hparams.Beamsearcher(x) + return logits_transducer, wav_lens, best_hyps + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss (Transducer+(CTC+NLL)) given predictions and targets.""" + + ids = batch.id + tokens, token_lens = batch.tokens + tokens_eos, token_eos_lens = batch.tokens_eos + + # Train returns 4 elements vs 3 for val and test + if len(predictions) == 4: + p_ctc, p_ce, logits_transducer, wav_lens = predictions + else: + logits_transducer, wav_lens, predicted_tokens = predictions + + if stage == sb.Stage.TRAIN: + if ( + hasattr(self.hparams, "fea_augment") + and self.optimizer_step > self.hparams.augment_warmup_steps + ): + ( + tokens, + token_lens, + tokens_eos, + token_eos_lens, + ) = self.hparams.fea_augment.replicate_multiple_labels( + tokens, token_lens, tokens_eos, token_eos_lens + ) + + if stage == sb.Stage.TRAIN: + CTC_loss = 0.0 + CE_loss = 0.0 + if p_ctc is not None: + CTC_loss = self.hparams.ctc_cost( + p_ctc, tokens, wav_lens, token_lens + ) + if p_ce is not None: + CE_loss = self.hparams.ce_cost( + p_ce, tokens_eos, length=token_eos_lens + ) + loss_transducer = self.hparams.transducer_cost( + logits_transducer, tokens, wav_lens, token_lens + ) + loss = ( + self.hparams.ctc_weight * CTC_loss + + self.hparams.ce_weight * CE_loss + + (1 - (self.hparams.ctc_weight + self.hparams.ce_weight)) + * loss_transducer + ) + else: + loss = self.hparams.transducer_cost( + logits_transducer, tokens, wav_lens, token_lens + ) + + if stage != sb.Stage.TRAIN: + # Decode token terms to words + predicted_words = [ + self.tokenizer.decode_ids(utt_seq).split(" ") + for utt_seq in predicted_tokens + ] + target_words = [wrd.split(" ") for wrd in batch.wrd] + self.wer_metric.append(ids, predicted_words, target_words) + self.cer_metric.append(ids, predicted_words, target_words) + + return loss + + def on_fit_batch_end(self, batch, outputs, loss, should_step): + """At the end of the optimizer step, apply annealing.""" + if should_step: + self.hparams.lr_annealing(self.optimizer) + + def on_stage_start(self, stage, epoch): + """Gets called at the beginning of each epoch""" + if stage != sb.Stage.TRAIN: + self.cer_metric = self.hparams.cer_computer() + self.wer_metric = self.hparams.error_rate_computer() + + def on_stage_end(self, stage, stage_loss, epoch): + """Gets called at the end of a epoch.""" + + # Compute/store important stats + stage_stats = {"loss": stage_loss} + if stage == sb.Stage.TRAIN: + self.train_stats = stage_stats + else: + stage_stats["CER"] = self.cer_metric.summarize("error_rate") + stage_stats["WER"] = self.wer_metric.summarize("error_rate") + + # Perform end-of-iteration things, like annealing, logging, etc. + if stage == sb.Stage.VALID: + lr = self.hparams.lr_annealing.current_lr + steps = self.optimizer_step + optimizer = self.optimizer.__class__.__name__ + + epoch_stats = { + "epoch": epoch, + "lr": lr, + "steps": steps, + "optimizer": optimizer, + } + + self.hparams.train_logger.log_stats( + stats_meta=epoch_stats, + train_stats=self.train_stats, + valid_stats=stage_stats, + ) + self.checkpointer.save_and_keep_only( + meta={"WER": stage_stats["WER"], "epoch": epoch}, + min_keys=["WER"], + num_to_keep=self.hparams.avg_checkpoints, + ) + + elif stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats=stage_stats, + ) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) + + # save the averaged checkpoint at the end of the evaluation stage + # delete the rest of the intermediate checkpoints + # WER is set to -0.1 so checkpointer only keeps the averaged checkpoint + self.checkpointer.save_and_keep_only( + meta={"WER": -0.1, "epoch": epoch}, + min_keys=["WER"], + num_to_keep=1, + ) + + +def dataio_prepare(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined functions. + """ + data_folder = hparams["data_folder"] + + train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["train_csv"], + replacements={"data_root": data_folder}, + ) + + if hparams["sorting"] == "ascending": + # we sort training data to speed up training and get better results. + train_data = train_data.filtered_sorted(sort_key="duration") + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "descending": + train_data = train_data.filtered_sorted( + sort_key="duration", reverse=True + ) + # when sorting do not shuffle in dataloader ! otherwise is pointless + hparams["train_dataloader_opts"]["shuffle"] = False + + elif hparams["sorting"] == "random": + pass + + else: + raise NotImplementedError( + "sorting must be random, ascending or descending" + ) + + valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=hparams["valid_csv"], + replacements={"data_root": data_folder}, + ) + valid_data = valid_data.filtered_sorted(sort_key="duration") + + # test is separate + test_datasets = {} + for csv_file in hparams["test_csv"]: + name = Path(csv_file).stem + test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( + csv_path=csv_file, replacements={"data_root": data_folder} + ) + test_datasets[name] = test_datasets[name].filtered_sorted( + sort_key="duration" + ) + + datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] + + # Defining tokenizer and loading it + # To avoid mismatch, we have to use the same tokenizer used for LM training + tokenizer = hparams["tokenizer"] + + # 2. Define audio pipeline: + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + sig = sb.dataio.dataio.read_audio(wav) + return sig + + sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) + + # 3. Define text pipeline: + @sb.utils.data_pipeline.takes("wrd") + @sb.utils.data_pipeline.provides( + "wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens" + ) + def text_pipeline(wrd): + yield wrd + tokens_list = tokenizer.encode_as_ids(wrd) + yield tokens_list + tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) + yield tokens_bos + tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) + yield tokens_eos + tokens = torch.LongTensor(tokens_list) + yield tokens + + sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) + + # 4. Set output: + sb.dataio.dataset.set_output_keys( + datasets, + ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"], + ) + + # 5. If Dynamic Batching is used, we instantiate the needed samplers. + train_batch_sampler = None + valid_batch_sampler = None + if hparams["dynamic_batching"]: + from speechbrain.dataio.sampler import DynamicBatchSampler # noqa + + dynamic_hparams = hparams["dynamic_batch_sampler"] + num_buckets = dynamic_hparams["num_buckets"] + + train_batch_sampler = DynamicBatchSampler( + train_data, + dynamic_hparams["max_batch_len"], + num_buckets=num_buckets, + length_func=lambda x: x["duration"], + shuffle=dynamic_hparams["shuffle_ex"], + batch_ordering=dynamic_hparams["batch_ordering"], + ) + + valid_batch_sampler = DynamicBatchSampler( + valid_data, + dynamic_hparams["max_batch_len_val"], + num_buckets=num_buckets, + length_func=lambda x: x["duration"], + shuffle=dynamic_hparams["shuffle_ex"], + batch_ordering=dynamic_hparams["batch_ordering"], + ) + + return ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_batch_sampler, + valid_batch_sampler, + ) + + +if __name__ == "__main__": + # CLI: + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Use torchaudio if the device is CPU + if run_opts.get("device") == "cpu": + if "use_torchaudio: True" in overrides: + overrides.replace("use_torchaudio: True", "use_torchaudio: False") + else: + overrides += "\nuse_torchaudio: True" + + # create ddp_group with the right communication protocol + sb.utils.distributed.ddp_init_group(run_opts) + + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # 1. # Dataset prep (parsing Librispeech) + from voxpopuli_prepare import prepare_voxpopuli # noqa + + # multi-gpu (ddp) save data preparation + run_on_main( + prepare_voxpopuli, + kwargs={ + "data_folder": hparams["data_folder"], + "save_folder": hparams["output_folder"], + "skip_prep": hparams["skip_prep"], + "remove_if_longer_than": hparams["remove_if_longer_than"], + }, + ) + + # here we create the datasets objects as well as tokenization and encoding + ( + train_data, + valid_data, + test_datasets, + tokenizer, + train_bsampler, + valid_bsampler, + ) = dataio_prepare(hparams) + + # We download the pretrained LM and the tokenizer from HuggingFace (or elsewhere + # depending on the path given in the YAML file). The tokenizer is loaded at + # the same time. + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # Trainer initialization + asr_brain = ASR( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # We dynamically add the tokenizer to our brain class. + # NB: This tokenizer corresponds to the one used for the LM!! + asr_brain.tokenizer = hparams["tokenizer"] + train_dataloader_opts = hparams["train_dataloader_opts"] + valid_dataloader_opts = hparams["valid_dataloader_opts"] + + if train_bsampler is not None: + train_dataloader_opts = { + "batch_sampler": train_bsampler, + "num_workers": hparams["num_workers"], + } + + if valid_bsampler is not None: + valid_dataloader_opts = {"batch_sampler": valid_bsampler} + + # Training + asr_brain.fit( + asr_brain.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=train_dataloader_opts, + valid_loader_kwargs=valid_dataloader_opts, + ) + + import os + + os.makedirs(hparams["output_wer_folder"], exist_ok=True) + + for k in test_datasets.keys(): # keys are test_clean, test_other etc + asr_brain.hparams.test_wer_file = os.path.join( + hparams["output_wer_folder"], f"wer_{k}.txt" + ) + asr_brain.evaluate( + test_datasets[k], + test_loader_kwargs=hparams["test_dataloader_opts"], + min_key="WER", + ) diff --git a/recipes/VoxPopuli/ASR/transducer/voxpopuli_prepare.py b/recipes/VoxPopuli/ASR/transducer/voxpopuli_prepare.py new file mode 120000 index 0000000000..7b73cf209e --- /dev/null +++ b/recipes/VoxPopuli/ASR/transducer/voxpopuli_prepare.py @@ -0,0 +1 @@ +../../voxpopuli_prepare.py \ No newline at end of file diff --git a/recipes/VoxPopuli/Tokenizer/README.md b/recipes/VoxPopuli/Tokenizer/README.md new file mode 100644 index 0000000000..ae55b68b8f --- /dev/null +++ b/recipes/VoxPopuli/Tokenizer/README.md @@ -0,0 +1,41 @@ +# Tokenizer. +This folder contains the scripts to train a tokenizer using SentencePiece (https://github.com/google/sentencepiece). +The tokenizer is trained on the top of the provided VoxPopuli training transcriptions. + +Download: https://github.com/facebookresearch/voxpopuli + + +# How to run +```shell +python train.py hparams/1K_unigram_subword_bpe.yaml +``` + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ +- HuggingFace: https://huggingface.co/speechbrain/ + + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/KsponSpeech/Tokenizer/hparams/5K_unigram_subword_bpe.yaml b/recipes/VoxPopuli/Tokenizer/hparams/unigram_subword_bpe.yaml similarity index 59% rename from recipes/KsponSpeech/Tokenizer/hparams/5K_unigram_subword_bpe.yaml rename to recipes/VoxPopuli/Tokenizer/hparams/unigram_subword_bpe.yaml index cf725cac6c..0d3880ccb7 100644 --- a/recipes/KsponSpeech/Tokenizer/hparams/5K_unigram_subword_bpe.yaml +++ b/recipes/VoxPopuli/Tokenizer/hparams/unigram_subword_bpe.yaml @@ -1,36 +1,33 @@ # ############################################################################ -# Tokenizer: subword BPE with unigram 5K -# Training: KsponSpeech train transcript -# Authors: Dongwon Kim, Dongwoo Kim 2021 +# Tokenizer: subword BPE with unigram 1K +# Training: VoxPopuli +# Authors: Titouan Parcollet 2024 # ############################################################################ -output_folder: !ref results/5K_subword_unigram_LM/ +output_folder: !ref results/512_subword_unigram/ # train_log: !ref /train_log.txt # Data files -data_folder: !PLACEHOLDER # e.g., /path/to/KsponSpeech -train_splits: ["train"] -dev_splits: ["dev"] -test_splits: ["eval_clean", "eval_other"] +data_folder: !PLACEHOLDER # e.g, /path/to/LibriSpeech +skip_prep: False train_csv: !ref /train.csv -valid_csv: !ref /dev.csv +valid_csv: !ref /valid.csv # Training parameters token_type: unigram # ["unigram", "bpe", "char"] -token_output: 5000 # index(blank/eos/bos/unk) = 0 +token_output: 512 character_coverage: 1.0 csv_read: wrd bos_index: 1 eos_index: 2 - tokenizer: !name:speechbrain.tokenizers.SentencePiece.SentencePiece model_dir: !ref vocab_size: !ref annotation_train: !ref annotation_read: !ref - model_type: !ref + model_type: !ref # ["unigram", "bpe", "char"] character_coverage: !ref - bos_id: !ref # Define bos_id/eos_id if different from blank_id - eos_id: !ref annotation_list_to_check: [!ref , !ref ] + bos_id: !ref + eos_id: !ref diff --git a/recipes/KsponSpeech/Tokenizer/train.py b/recipes/VoxPopuli/Tokenizer/train.py similarity index 65% rename from recipes/KsponSpeech/Tokenizer/train.py rename to recipes/VoxPopuli/Tokenizer/train.py index 0380bd6873..67a624d64d 100644 --- a/recipes/KsponSpeech/Tokenizer/train.py +++ b/recipes/VoxPopuli/Tokenizer/train.py @@ -1,5 +1,5 @@ #!/usr/bin/env/python3 -"""Recipe for training a BPE tokenizer with ksponspeech. +"""Recipe for training a BPE tokenizer with VoxPopuli The tokenizer converts words into sub-word units that can be used to train a language (LM) or an acoustic model (AM). When doing a speech recognition experiment you have to make @@ -7,33 +7,28 @@ the same tokenizer. Otherwise, a token mismatch is introduced and beamsearch will produce bad results when combining AM and LM. -To run this recipe, do the following: -> python train.py hyperparams/5K_unigram_subword_bpe.yaml - - Authors - * Abdel Heba 2021 - * Dongwon Kim, Dongwoo Kim 2021 + * Titouan Parcollet 2024 """ import sys -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) - # 1. # Dataset prep (parsing KsponSpeech) - from ksponspeech_prepare import prepare_ksponspeech # noqa + # 1. # Dataset prep (parsing Librispeech) + from voxpopuli_prepare import prepare_voxpopuli # noqa # Create experiment directory sb.create_experiment_directory( @@ -44,15 +39,11 @@ # multi-gpu (ddp) save data preparation run_on_main( - prepare_ksponspeech, + prepare_voxpopuli, kwargs={ "data_folder": hparams["data_folder"], - "tr_splits": hparams["train_splits"], - "dev_splits": hparams["dev_splits"], - "te_splits": hparams["test_splits"], "save_folder": hparams["output_folder"], - "merge_lst": hparams["train_splits"], - "merge_name": "train.csv", + "skip_prep": hparams["skip_prep"], }, ) diff --git a/recipes/VoxPopuli/Tokenizer/voxpopuli_prepare.py b/recipes/VoxPopuli/Tokenizer/voxpopuli_prepare.py new file mode 120000 index 0000000000..0a7f247dee --- /dev/null +++ b/recipes/VoxPopuli/Tokenizer/voxpopuli_prepare.py @@ -0,0 +1 @@ +../voxpopuli_prepare.py \ No newline at end of file diff --git a/recipes/VoxPopuli/voxpopuli_prepare.py b/recipes/VoxPopuli/voxpopuli_prepare.py new file mode 100644 index 0000000000..b6633f9073 --- /dev/null +++ b/recipes/VoxPopuli/voxpopuli_prepare.py @@ -0,0 +1,431 @@ +""" +Data preparation for ASR with VoxPopuli. +Download: https://github.com/facebookresearch/voxpopuli +Author +------ +Titouan Parcollet 2024 +""" + +import csv +import functools +import os +import re +from dataclasses import dataclass + +from speechbrain.dataio.dataio import read_audio_info +from speechbrain.utils.logger import get_logger +from speechbrain.utils.parallel import parallel_map + +logger = get_logger(__name__) + + +def prepare_voxpopuli( + data_folder, + save_folder, + train_tsv_file=None, + dev_tsv_file=None, + test_tsv_file=None, + skip_prep=False, + language="en", + remove_if_longer_than=100, +): + """ + Prepares the csv files for the Vox Populi dataset. + Download: https://github.com/facebookresearch/voxpopuli + + Arguments + --------- + data_folder : str + Path to the folder where the original Vox Populi dataset is stored. + This path should include the transcribed_data folder. + save_folder : str + The directory where to store the csv files. + train_tsv_file : str, optional + Path to the Train Vox Populi .tsv file (cs) + dev_tsv_file : str, optional + Path to the Dev Vox Populi .tsv file (cs) + test_tsv_file : str, optional + Path to the Test Vox Populi .tsv file (cs) + skip_prep: bool, optional + If True, skip data preparation. + language: str, optional + The language of the voxpopuli dataset. This is used to apply language + specific text normalisation. + remove_if_longer_than: int, optional + Some audio files in VoxPopuli can be very long (200+ seconds). This option + removes them from the train set. + + Returns + ------- + None + + Example + ------- + >>> from recipes.VoxPopuli.ASR.voxpopuli_prepare import prepare_voxpopuli + >>> data_folder = '/datasets/voxpopuli/en' + >>> save_folder = 'exp/voxpopuli_exp' + >>> train_tsv_file = '/datasets/voxpopuli/data/transcribed_data/en/asr_train.tsv' + >>> dev_tsv_file = '/datasets/voxpopuli/data/transcribed_data/en/asr_dev.tsv' + >>> test_tsv_file = '/datasets/voxpopuli/data/transcribed_data/en/test.tsv' + >>> prepare_voxpopuli( \ + data_folder, \ + save_folder, \ + ) + """ + + if skip_prep: + return + + # If not specified point toward standard location w.r.t VoxPopuli tree + if train_tsv_file is None: + train_tsv_file = data_folder + "/asr_train.tsv" + else: + train_tsv_file = train_tsv_file + + if dev_tsv_file is None: + dev_tsv_file = data_folder + "/asr_dev.tsv" + else: + dev_tsv_file = dev_tsv_file + + if test_tsv_file is None: + test_tsv_file = data_folder + "/asr_test.tsv" + else: + test_tsv_file = test_tsv_file + + # Setting the save folder + os.makedirs(save_folder, exist_ok=True) + + # Setting output files + save_csv_train = save_folder + "/train.csv" + save_csv_dev = save_folder + "/dev.csv" + save_csv_test = save_folder + "/test.csv" + + # If csv already exists, we skip the data preparation + if skip(save_csv_train, save_csv_dev, save_csv_test): + msg = "%s already exists, skipping data preparation!" % (save_csv_train) + logger.info(msg) + + msg = "%s already exists, skipping data preparation!" % (save_csv_dev) + logger.info(msg) + + msg = "%s already exists, skipping data preparation!" % (save_csv_test) + logger.info(msg) + + return + + # Additional checks to make sure the data folder contains Common Voice + check_voxpopuli_folders(data_folder) + # Creating csv files for {train, dev, test} data + file_pairs = zip( + [train_tsv_file, dev_tsv_file, test_tsv_file], + [save_csv_train, save_csv_dev, save_csv_test], + ) + for tsv_file, save_csv in file_pairs: + create_csv( + tsv_file, save_csv, data_folder, language, remove_if_longer_than + ) + + +def skip(save_csv_train, save_csv_dev, save_csv_test): + """ + Detects if the VoxPopuli data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + save_csv_train : str + Path to train manifest file + save_csv_dev : str + Path to dev manifest file + save_csv_test : str + Path to test manifest file + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + + # Checking folders and save options + skip = False + + if ( + os.path.isfile(save_csv_train) + and os.path.isfile(save_csv_dev) + and os.path.isfile(save_csv_test) + ): + skip = True + + return skip + + +@dataclass +class VPRow: + snt_id: str + duration: float + ogg_path: str + spk_id: str + words: str + + +def process_line(line, data_folder, language): + """ + Processes each line of the CSV (most likely happening with multiple threads) + + Arguments + --------- + line : str + Line of the csv file. + data_folder : str + Path of the Vox Populi dataset. + language: str, optional + The language of the voxpopuli dataset. This is used to apply language + specific text normalisation. + + Returns + ------- + VPRow + """ + year_path = os.path.join(line[0:4], line.split("\t")[0]) + ogg_path = os.path.join(data_folder, year_path) + ".ogg" + file_name = line.split("\t")[0] + spk_id = line.split("\t")[3] + snt_id = file_name + + # Reading the signal (to retrieve duration in seconds) + if os.path.isfile(ogg_path): + info = read_audio_info(ogg_path) + else: + msg = "\tError loading: %s" % (ogg_path) + logger.info(msg) + return None + + duration = info.num_frames / info.sample_rate + + # Getting transcript + words = line.split("\t")[2] + + # Unicode Normalization + words = unicode_normalisation(words) + + words = language_specific_preprocess(language, words) + + # Remove multiple spaces + words = re.sub(" +", " ", words) + + # Remove spaces at the beginning and the end of the sentence + words = words.lstrip().rstrip() + + if len(words.split(" ")) < 3: + return None + + # Composition of the csv_line + return VPRow(snt_id, duration, ogg_path, spk_id, words) + + +def create_csv( + orig_tsv_file, csv_file, data_folder, language, remove_if_longer_than +): + """ + Creates the csv file given a list of ogg files. + + Arguments + --------- + orig_tsv_file : str + Path to the Vox Populi tsv file (standard file). + csv_file : str + Path to the csv file where data will be dumped. + data_folder : str + Path of the Vox Populi dataset. + language: str, optional + The language of the voxpopuli dataset. This is used to apply language + specific text normalisation. + remove_if_longer_than: int, optional + Some audio files in VoxPopuli can be very long (200+ seconds). This option + removes them from the train set. Information about the discarded data is given. + """ + + # Check if the given files exists + if not os.path.isfile(orig_tsv_file): + msg = "\t%s doesn't exist, verify your dataset!" % (orig_tsv_file) + logger.info(msg) + raise FileNotFoundError(msg) + + # We load and skip the header + loaded_csv = open(orig_tsv_file, encoding="utf-8").readlines()[1:] + nb_samples = len(loaded_csv) + + msg = "Preparing CSV files for %s samples ..." % (str(nb_samples)) + logger.info(msg) + + # Adding some Prints + msg = "Creating csv lists in %s ..." % (csv_file) + logger.info(msg) + + # Process and write lines + total_duration = 0.0 + skipped_duration = 0.0 + + line_processor = functools.partial( + process_line, language=language, data_folder=data_folder + ) + + # Stream into a .tmp file, and rename it to the real path at the end. + csv_file_tmp = csv_file + ".tmp" + + with open(csv_file_tmp, mode="w", encoding="utf-8") as csv_f: + csv_writer = csv.writer( + csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + csv_writer.writerow(["ID", "duration", "wav", "spk_id", "wrd"]) + + for row in parallel_map(line_processor, loaded_csv): + if row is None: + continue + + if row.duration < remove_if_longer_than: + total_duration += row.duration + else: + skipped_duration += row.duration + continue + + csv_writer.writerow( + [ + row.snt_id, + str(row.duration), + row.ogg_path, + row.spk_id, + row.words, + ] + ) + + os.replace(csv_file_tmp, csv_file) + + # Final prints + msg = "%s successfully created!" % (csv_file) + logger.info(msg) + msg = "Number of samples: %s " % (str(len(loaded_csv))) + logger.info(msg) + msg = "Total duration: %s Hours" % (str(round(total_duration / 3600, 2))) + logger.info(msg) + msg = "Total skipped duration (too long segments): %s Hours" % ( + str(round(skipped_duration / 3600, 2)) + ) + logger.info(msg) + + +def check_voxpopuli_folders(data_folder): + """ + Check if the data folder actually contains the voxpopuli dataset. + If not, raises an error. + + Arguments + --------- + data_folder : str + Path to data folder to check + + Raises + ------ + FileNotFoundError + If data folder doesn't contain Common Voice dataset. + """ + files_str = "/2020" + # Checking clips + if not os.path.exists(data_folder + files_str): + err_msg = ( + "the folder %s does not exist (it is expected in " + "the Common Voice dataset)" % (data_folder + files_str) + ) + raise FileNotFoundError(err_msg) + + +def unicode_normalisation(text): + return str(text) + + +def language_specific_preprocess(language, words): + """ + Format the input string according to some rules depending on the language. + + Arguments + --------- + language : str + Corresponds to the two letters for language-specific sets + words : str + The string to be cleaned. + + Returns + ------- + str + """ + + # !! Language specific cleaning !! + # Important: feel free to specify the text normalization + # corresponding to your alphabet. + + if language in ["en", "fr", "it"]: + words = re.sub("[^’'A-Za-z0-9À-ÖØ-öø-ÿЀ-ӿéæœâçèàûî]+", " ", words) + + if language == "de": + # this replacement helps preserve the case of ß + # (and helps retain solitary occurrences of SS) + # since python's upper() converts ß to SS. + words = words.replace("ß", "0000ß0000") + words = re.sub("[^’'A-Za-z0-9öÖäÄüÜß]+", " ", words) + words = words.replace("'", " ") + words = words.replace("’", " ") + words = words.replace( + "0000SS0000", "ß" + ) # replace 0000SS0000 back to ß as its initial presence in the corpus + + elif language == "fr": # SM + words = re.sub("[^’'A-Za-z0-9À-ÖØ-öø-ÿЀ-ӿéæœâçèàûî]+", " ", words) + words = words.replace("’", "'") + words = words.replace("é", "é") + words = words.replace("æ", "ae") + words = words.replace("œ", "oe") + words = words.replace("â", "â") + words = words.replace("ç", "ç") + words = words.replace("è", "è") + words = words.replace("à", "à") + words = words.replace("û", "û") + words = words.replace("î", "î") + words = words + + # Case of apostrophe collés + words = words.replace("L'", "L' ") + words = words.replace("L' ", "L' ") + words = words.replace("S'", "S' ") + words = words.replace("S' ", "S' ") + words = words.replace("D'", "D' ") + words = words.replace("D' ", "D' ") + words = words.replace("J'", "J' ") + words = words.replace("J' ", "J' ") + words = words.replace("N'", "N' ") + words = words.replace("N' ", "N' ") + words = words.replace("C'", "C' ") + words = words.replace("C' ", "C' ") + words = words.replace("QU'", "QU' ") + words = words.replace("QU' ", "QU' ") + words = words.replace("M'", "M' ") + words = words.replace("M' ", "M' ") + + # Case of apostrophe qui encadre quelques mots + words = words.replace(" '", " ") + words = words.replace("A'", "A") + words = words.replace("B'", "B") + words = words.replace("E'", "E") + words = words.replace("F'", "F") + words = words.replace("G'", "G") + words = words.replace("K'", "K") + words = words.replace("Q'", "Q") + words = words.replace("V'", "V") + words = words.replace("W'", "W") + words = words.replace("Z'", "Z") + words = words.replace("O'", "O") + words = words.replace("X'", "X") + words = words.replace("AUJOURD' HUI", "AUJOURD'HUI") + + return words diff --git a/recipes/WHAMandWHAMR/enhancement/README.md b/recipes/WHAMandWHAMR/enhancement/README.md index 9855248657..372f289a87 100644 --- a/recipes/WHAMandWHAMR/enhancement/README.md +++ b/recipes/WHAMandWHAMR/enhancement/README.md @@ -3,22 +3,31 @@ This folder contains speech enhancement recipes for the WHAM! and WHAMR! dataset * This recipe supports training several models on WHAM! and WHAMR! datasets, including [Sepformer](https://arxiv.org/abs/2010.13154), [DPRNN](https://arxiv.org/abs/1910.06379), [ConvTasnet](https://arxiv.org/abs/1809.07454), [DPTNet](https://arxiv.org/abs/2007.13975). -Additional dependency: -``` -pip install mir_eval -pip install pyroomacoustics==0.3.1 +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: ``` -For `pyroomacoustics`, you need to use the version 0.3.1. +pip install -r ../extra_requirements.txt +``` +## How to run: To run it: -``` +```shell python train.py hparams/sepformer-wham.yaml --data_folder yourpath/wham_original python train.py hparams/sepformer-whamr.yaml --data_folder yourpath/whamr ``` Note that during training we print the negative SI-SNR (as we treat this value as the loss). +# How to run on test sets only +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: + +```shell +python train.py hparams/sepformer-wham.yaml --data_folder yourpath/wham_original --test_only +python train.py hparams/sepformer-whamr.yaml --data_folder yourpath/whamr --test_only +``` + # WHAM! and WHAMR! dataset: * This recipe supports the noisy and reverberant [versions](http://wham.whisper.ai/) of WSJ0 - 2/3 Mix datasets. For WHAM!, simply use `--data_folder /yourpath/wham_original`, and for WHAMR! use `--data_folder /yourpath/whamr`. The script will automatically adjust itself to WHAM and WHAMR, but you must rename the top folder (the folder that contains the `wav8k` subfolder should be named respectively `wham_original` and `whamr`, as the script decides which dataset to use based on the `--data_folder` variable. @@ -47,8 +56,8 @@ Here are the SI - SNR (in dB) and PESQ on the test set of WHAM!, WHAMR! datasets |DynamicMixing | 10.6 | 2.84 | -The output folder with the model checkpoints and logs for WHAMR! is available [here](https://drive.google.com/drive/folders/1V0KwkEfWwomZ0Vjox0BTnQ694_uxgu8G?usp=sharing). -The output folder with the model checkpoints and logs for WHAM! is available [here](https://drive.google.com/drive/folders/1bbQvaiN-R79M697NnekA7Rr0jIYtO6e3?usp=sharing). +The output folder with the model checkpoints and logs for WHAMR! is available [here](https://www.dropbox.com/sh/kb0xrvi5k168ou2/AAAPB2U6HyyUT1gMoUH8gxQCa?dl=0). +The output folder with the model checkpoints and logs for WHAM! is available [here](https://www.dropbox.com/sh/pxz2xbj76ijd5ci/AAD3c3dHyszk4oHJaa26K1_ha?dl=0). # Training time It takes about 2h 30 min for WHAMR! (DynamicMixing) and WHAM! on a NVIDIA V100 (32GB). @@ -59,7 +68,7 @@ Pretrained models for SepFormer on WHAM!, WHAMR! datasets can be found through h * https://huggingface.co/speechbrain/sepformer-wham-enhancement * https://huggingface.co/speechbrain/sepformer-whamr-enhancement * https://huggingface.co/speechbrain/sepformer-whamr16k -* Pretrained models with the training logs can be found on `https://drive.google.com/drive/u/0/folders/1ZVuROxR711Xib2MsJbcPla4PWqbK1Ddw` also. +* Pretrained models with the training logs can be found on `https://www.dropbox.com/sh/e4bth1bylk7c6h8/AADFq3cWzBBKxuDv09qjvUMta?dl=0` also. # Example calls for running the training scripts @@ -82,6 +91,15 @@ Pretrained models for SepFormer on WHAM!, WHAMR! datasets can be found through h Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, @@ -97,9 +115,9 @@ Please, cite SpeechBrain if you use it for your research or business. **Citing SepFormer** ```bibtex @inproceedings{subakan2021attention, - title={Attention is All You Need in Speech Separation}, - author={Cem Subakan and Mirco Ravanelli and Samuele Cornell and Mirko Bronzi and Jianyuan Zhong}, - year={2021}, - booktitle={ICASSP 2021} + title={Attention is All You Need in Speech Separation}, + author={Cem Subakan and Mirco Ravanelli and Samuele Cornell and Mirko Bronzi and Jianyuan Zhong}, + year={2021}, + booktitle={ICASSP 2021} } ``` diff --git a/recipes/WHAMandWHAMR/enhancement/create_whamr_rirs.py b/recipes/WHAMandWHAMR/enhancement/create_whamr_rirs.py new file mode 120000 index 0000000000..66094be97c --- /dev/null +++ b/recipes/WHAMandWHAMR/enhancement/create_whamr_rirs.py @@ -0,0 +1 @@ +../meta/create_whamr_rirs.py \ No newline at end of file diff --git a/recipes/WHAMandWHAMR/enhancement/dynamic_mixing.py b/recipes/WHAMandWHAMR/enhancement/dynamic_mixing.py index dba48b7014..bc0e4dda4f 100644 --- a/recipes/WHAMandWHAMR/enhancement/dynamic_mixing.py +++ b/recipes/WHAMandWHAMR/enhancement/dynamic_mixing.py @@ -1,13 +1,15 @@ -import speechbrain as sb -import numpy as np -import torch -import torchaudio import glob import os -from pathlib import Path import random -from speechbrain.processing.signal_processing import rescale +from pathlib import Path + +import numpy as np +import torch + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.dataio.batch import PaddedBatch +from speechbrain.processing.signal_processing import rescale """ The functions to implement Dynamic Mixing For SpeechSeparation @@ -33,9 +35,8 @@ def build_spk_hashtable(base_folder_dm, sample_rate): spk_hashtable = {} for utt in wsj0_utterances: - spk_id = Path(utt).stem[:3] - assert torchaudio.info(utt).sample_rate == sample_rate + assert audio_io.info(utt).sample_rate == sample_rate # e.g. 2speakers/wav8k/min/tr/mix/019o031a_0.27588_01vo030q_-0.27588.wav # id of speaker 1 is 019 utterance id is o031a @@ -97,7 +98,7 @@ def dynamic_mix_data_prep( # 1. Define datasets train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=tr_csv, replacements={"data_root": data_root_folder}, + csv_path=tr_csv, replacements={"data_root": data_root_folder} ) # we build an dictionary where keys are speakers id and entries are list @@ -130,7 +131,7 @@ def audio_pipeline( if "wham" in Path(data_root_folder).stem: noise_file = np.random.choice(noise_files, 1, replace=False) - noise, fs_read = torchaudio.load(noise_file[0]) + noise, fs_read = audio_io.load(noise_file[0]) noise = noise.squeeze() # select two speakers randomly @@ -143,22 +144,21 @@ def audio_pipeline( ] minlen = min( - *[torchaudio.info(x).num_frames for x in spk_files], + *[audio_io.info(x).num_frames for x in spk_files], max_training_signal_len, ) for i, spk_file in enumerate(spk_files): - # select random offset - length = torchaudio.info(spk_file).num_frames + length = audio_io.info(spk_file).num_frames start = 0 stop = length if length > minlen: # take a random window start = np.random.randint(0, length - minlen) stop = start + minlen - tmp, fs_read = torchaudio.load( - spk_file, frame_offset=start, num_frames=stop - start, + tmp, fs_read = audio_io.load( + spk_file, frame_offset=start, num_frames=stop - start ) tmp = tmp[0] # * peak # remove channel dim and normalize diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/cnntransformer-wham-DM.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/cnntransformer-wham-DM.yaml index 826da0bbfd..cb62fc83ee 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/cnntransformer-wham-DM.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/cnntransformer-wham-DM.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -38,15 +38,14 @@ test_data: !ref /whamorg_tt.csv skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 8000 n_audio_to_save: 20 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 8 lr: 0.0001 @@ -73,18 +72,39 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/cnntransformer-whamr-DM.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/cnntransformer-whamr-DM.yaml index 4e377a5448..c91ba904af 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/cnntransformer-whamr-DM.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/cnntransformer-whamr-DM.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -38,15 +38,14 @@ test_data: !ref /whamr_tt.csv skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 8000 n_audio_to_save: 20 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 8 lr: 0.0001 @@ -74,18 +73,39 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/convtasnet-whamr-DM.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/convtasnet-whamr-DM.yaml index fb23e75705..ce0b41a2f3 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/convtasnet-whamr-DM.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/convtasnet-whamr-DM.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,15 +37,14 @@ test_data: !ref /whamr_tt.csv skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 8000 n_audio_to_save: 20 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 10 lr: 0.00015 @@ -73,18 +72,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -101,6 +120,10 @@ dataloader_opts: batch_size: !ref num_workers: 3 +dataloader_opts_valid: + batch_size: !ref + num_workers: 3 + # Specifying the network Encoder: !new:speechbrain.lobes.models.dual_path.Encoder kernel_size: !ref diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/dprnn-whamr-DM.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/dprnn-whamr-DM.yaml index b73b5c58cb..0ef256c681 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/dprnn-whamr-DM.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/dprnn-whamr-DM.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,15 +37,14 @@ test_data: !ref /whamr_tt.csv skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 8000 n_audio_to_save: 20 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -73,18 +72,39 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -101,6 +121,10 @@ dataloader_opts: batch_size: !ref num_workers: 3 +dataloader_opts_valid: + batch_size: !ref + num_workers: 3 + # Specifying the network Encoder: !new:speechbrain.lobes.models.dual_path.Encoder kernel_size: !ref diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-wham.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-wham.yaml index b598b5e26c..54a9abda01 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-wham.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-wham.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -38,14 +38,13 @@ skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -67,18 +66,39 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-16k-DM.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-16k-DM.yaml index 945776f84a..ffb8b24520 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-16k-DM.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-16k-DM.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -39,15 +39,14 @@ test_data: !ref /whamr_tt.csv skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 16000 n_audio_to_save: 20 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -75,18 +74,39 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-16k.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-16k.yaml index ae6542210b..f310049b9c 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-16k.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-16k.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,15 +37,14 @@ test_data: !ref /whamr_tt.csv skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 16000 n_audio_to_save: 20 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -73,18 +72,39 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-DM.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-DM.yaml index 3ce2f60d76..0ceb93b976 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-DM.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr-DM.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,15 +37,14 @@ test_data: !ref /whamr_tt.csv skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 8000 n_audio_to_save: 20 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -73,18 +72,39 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref + # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr.yaml b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr.yaml index aa8ca5bb62..ad577099cb 100644 --- a/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr.yaml +++ b/recipes/WHAMandWHAMR/enhancement/hparams/sepformer-whamr.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -37,15 +37,14 @@ test_data: !ref /whamr_tt.csv skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 1 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 8000 n_audio_to_save: 20 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -73,18 +72,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/enhancement/prepare_data.py b/recipes/WHAMandWHAMR/enhancement/prepare_data.py new file mode 120000 index 0000000000..1a7125c969 --- /dev/null +++ b/recipes/WHAMandWHAMR/enhancement/prepare_data.py @@ -0,0 +1 @@ +../prepare_data.py \ No newline at end of file diff --git a/recipes/WHAMandWHAMR/enhancement/preprocess_dynamic_mixing.py b/recipes/WHAMandWHAMR/enhancement/preprocess_dynamic_mixing.py new file mode 120000 index 0000000000..4521bd013b --- /dev/null +++ b/recipes/WHAMandWHAMR/enhancement/preprocess_dynamic_mixing.py @@ -0,0 +1 @@ +../meta/preprocess_dynamic_mixing.py \ No newline at end of file diff --git a/recipes/WHAMandWHAMR/enhancement/train.py b/recipes/WHAMandWHAMR/enhancement/train.py index f4c2ee9e66..181c3c8f71 100755 --- a/recipes/WHAMandWHAMR/enhancement/train.py +++ b/recipes/WHAMandWHAMR/enhancement/train.py @@ -18,23 +18,24 @@ * Jianyuan Zhong 2020 """ +import csv import os import sys + +import numpy as np import torch import torch.nn.functional as F -import torchaudio -import speechbrain as sb -import speechbrain.nnet.schedulers as schedulers -from speechbrain.utils.distributed import run_on_main -from torch.cuda.amp import autocast from hyperpyyaml import load_hyperpyyaml -import numpy as np +from pesq import pesq from tqdm import tqdm -import csv -import logging + +import speechbrain as sb +import speechbrain.nnet.schedulers as schedulers +from speechbrain.dataio import audio_io from speechbrain.processing.features import spectral_magnitude +from speechbrain.utils.distributed import run_on_main +from speechbrain.utils.logger import get_logger from speechbrain.utils.metric_stats import MetricStats -from pesq import pesq # Define training procedure @@ -92,7 +93,8 @@ def compute_forward(self, mix, targets, stage, noise=None): targets = targets[:, :min_len, :] if self.hparams.use_wavedrop: - mix = self.hparams.wavedrop(mix, mix_lens) + mix = self.hparams.drop_chunk(mix, mix_lens) + mix = self.hparams.drop_freq(mix) if self.hparams.limit_training_signal_len: mix, targets = self.cut_signals(mix, targets) @@ -150,77 +152,43 @@ def compute_objectives(self, predictions, targets): def fit_batch(self, batch): """Trains one batch""" + # Unpacking batch list mixture = batch.mix_sig targets = [batch.s1_sig, batch.s2_sig] noise = batch.noise_sig[0] - if self.auto_mix_prec: - with autocast(): - predictions, targets = self.compute_forward( - mixture, targets, sb.Stage.TRAIN, noise - ) - loss = self.compute_objectives(predictions, targets) - - # hard threshold the easy dataitems - if self.hparams.threshold_byloss: - th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() - else: - loss = loss.mean() - - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - self.scaler.scale(loss).backward() - if self.hparams.clip_grad_norm >= 0: - self.scaler.unscale_(self.optimizer) - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm, - ) - self.scaler.step(self.optimizer) - self.scaler.update() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) - ) - loss.data = torch.tensor(0).to(self.device) - else: + with self.training_ctx: predictions, targets = self.compute_forward( mixture, targets, sb.Stage.TRAIN, noise ) loss = self.compute_objectives(predictions, targets) + # hard threshold the easy dataitems if self.hparams.threshold_byloss: th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() + loss = loss[loss > th] + if loss.nelement() > 0: + loss = loss.mean() else: loss = loss.mean() - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - loss.backward() - if self.hparams.clip_grad_norm >= 0: - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm - ) - self.optimizer.step() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) + if loss.nelement() > 0 and loss < self.hparams.loss_upper_lim: + self.scaler.scale(loss).backward() + if self.hparams.clip_grad_norm >= 0: + self.scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), + self.hparams.clip_grad_norm, ) - loss.data = torch.tensor(0).to(self.device) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + self.nonfinite_count += 1 + logger.info( + f"infinite loss or empty loss! it happened {self.nonfinite_count} times so far - skipping this batch" + ) + loss.data = torch.tensor(0.0).to(self.device) self.optimizer.zero_grad() return loss.detach().cpu() @@ -233,7 +201,7 @@ def evaluate_batch(self, batch, stage): with torch.no_grad(): predictions, targets = self.compute_forward(mixture, targets, stage) - loss = self.compute_objectives(predictions, targets) + loss = self.compute_objectives(predictions, targets).mean() if stage != sb.Stage.TRAIN: self.pesq_metric.append( @@ -287,7 +255,6 @@ def on_stage_end(self, stage, stage_loss, epoch): # Perform end-of-iteration things, like annealing, logging, etc. if stage == sb.Stage.VALID: - # Learning rate annealing if isinstance( self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau @@ -312,7 +279,7 @@ def on_stage_end(self, stage, stage_loss, epoch): self.checkpointer.save_checkpoint(meta={"pesq": stats["pesq"]}) else: self.checkpointer.save_and_keep_only( - meta={"pesq": stats["pesq"]}, max_keys=["pesq"], + meta={"pesq": stats["pesq"]}, max_keys=["pesq"] ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( @@ -332,9 +299,7 @@ def add_speed_perturb(self, targets, targ_lens): recombine = True for i in range(targets.shape[-1]): - new_target = self.hparams.speedperturb( - targets[:, :, i], targ_lens - ) + new_target = self.hparams.speed_perturb(targets[:, :, i]) new_targets.append(new_target) if i == 0: min_len = new_target.shape[-1] @@ -371,7 +336,7 @@ def add_speed_perturb(self, targets, targ_lens): return mix, targets def cut_signals(self, mixture, targets): - """This function selects a random segment of a given length withing the mixture. + """This function selects a random segment of a given length within the mixture. The corresponding targets are selected accordingly""" randstart = torch.randint( 0, @@ -416,14 +381,13 @@ def save_results(self, test_data): test_data, **self.hparams.dataloader_opts ) - with open(save_file, "w") as results_csv: + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: writer = csv.DictWriter(results_csv, fieldnames=csv_columns) writer.writeheader() # Loop over all test sentence with tqdm(test_loader, dynamic_ncols=True) as t: for i, batch in enumerate(t): - # Apply Separation mixture, mix_len = batch.mix_sig snt_id = batch.id @@ -444,8 +408,9 @@ def save_results(self, test_data): [mixture] * self.hparams.num_spks, dim=-1 ) mixture_signal = mixture_signal.to(targets.device) + mix_w = self.compute_feats(mixture_signal.squeeze(-1)) sisnr_baseline = self.compute_objectives( - [mixture_signal.squeeze(-1), None], targets + [mixture_signal.squeeze(-1), mix_w], targets ) sisnr_i = sisnr - sisnr_baseline @@ -501,16 +466,16 @@ def save_results(self, test_data): } writer.writerow(row) - logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean())) - logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean())) - logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean())) - logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean())) - logger.info("Mean PESQ {}".format(np.array(all_pesqs).mean())) + logger.info(f"Mean SISNR is {np.array(all_sisnrs).mean()}") + logger.info(f"Mean SISNRi is {np.array(all_sisnrs_i).mean()}") + logger.info(f"Mean SDR is {np.array(all_sdrs).mean()}") + logger.info(f"Mean SDRi is {np.array(all_sdrs_i).mean()}") + logger.info(f"Mean PESQ {np.array(all_pesqs).mean()}") def save_audio(self, snt_id, mixture, targets, predictions): "saves the test audio (mixture, targets, and estimated sources) on disk" - # Create outout folder + # Create output folder save_path = os.path.join(self.hparams.save_folder, "audio_results") if not os.path.exists(save_path): os.mkdir(save_path) @@ -518,26 +483,24 @@ def save_audio(self, snt_id, mixture, targets, predictions): # Estimated source signal = predictions[0, :] signal = signal / signal.abs().max() - save_file = os.path.join( - save_path, "item{}_sourcehat.wav".format(snt_id) - ) - torchaudio.save( + save_file = os.path.join(save_path, f"item{snt_id}_sourcehat.wav") + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) # Original source signal = targets[0, :] signal = signal / signal.abs().max() - save_file = os.path.join(save_path, "item{}_source.wav".format(snt_id)) - torchaudio.save( + save_file = os.path.join(save_path, f"item{snt_id}_source.wav") + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) # Mixture signal = mixture[0][0, :] signal = signal / signal.abs().max() - save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id)) - torchaudio.save( + save_file = os.path.join(save_path, f"item{snt_id}_mix.wav") + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) @@ -604,17 +567,16 @@ def audio_pipeline_noise(noise_wav): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) sb.utils.distributed.ddp_init_group(run_opts) # Logger info - logger = logging.getLogger(__name__) + logger = get_logger(__name__) # Create experiment directory sb.create_experiment_directory( @@ -623,17 +585,20 @@ def audio_pipeline_noise(noise_wav): overrides=overrides, ) + # Update precision to bf16 if the device is CPU and precision is fp16 + if run_opts.get("device") == "cpu" and hparams.get("precision") == "fp16": + hparams["precision"] = "bf16" + # Check if wsj0_tr is set with dynamic mixing if hparams["dynamic_mixing"] and not os.path.exists( hparams["base_folder_dm"] ): - print( + raise ValueError( "Please, specify a valid base_folder_dm folder when using dynamic mixing" ) - sys.exit(1) # Data preparation - from recipes.WHAMandWHAMR.prepare_data import prepare_wham_whamr_csv + from prepare_data import prepare_wham_whamr_csv run_on_main( prepare_wham_whamr_csv, @@ -648,12 +613,12 @@ def audio_pipeline_noise(noise_wav): # if whamr, and we do speedaugment we need to prepare the csv file if "whamr" in hparams["data_folder"] and hparams["use_speedperturb"]: - from recipes.WHAMandWHAMR.prepare_data import create_whamr_rir_csv - from recipes.WHAMandWHAMR.meta.create_whamr_rirs import create_rirs + from create_whamr_rirs import create_rirs + from prepare_data import create_whamr_rir_csv # If the Room Impulse Responses do not exist, we create them if not os.path.exists(hparams["rir_path"]): - print("Createing Room Impulse Responses...") + print("Creating Room Impulse Responses...") run_on_main( create_rirs, kwargs={ @@ -689,9 +654,7 @@ def audio_pipeline_noise(noise_wav): if not os.path.exists( os.path.normpath(hparams["base_folder_dm"]) + "_" + dm_suffix ): - from recipes.WHAMandWHAMR.meta.preprocess_dynamic_mixing import ( - resample_folder, - ) + from preprocess_dynamic_mixing import resample_folder print("Resampling the base folder") run_on_main( @@ -761,15 +724,14 @@ def audio_pipeline_noise(noise_wav): use_freq_domain = hparams.get("use_freq_domain", False) separator.use_freq_domain = use_freq_domain - if not hparams["test_only"]: - # Training - separator.fit( - separator.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["dataloader_opts"], - valid_loader_kwargs=hparams["dataloader_opts_valid"], - ) + # Training + separator.fit( + separator.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts_valid"], + ) # Eval separator.evaluate(test_data, max_key="pesq") diff --git a/recipes/WHAMandWHAMR/enhancement/wham_room.py b/recipes/WHAMandWHAMR/enhancement/wham_room.py new file mode 120000 index 0000000000..68ebdf470e --- /dev/null +++ b/recipes/WHAMandWHAMR/enhancement/wham_room.py @@ -0,0 +1 @@ +../meta/wham_room.py \ No newline at end of file diff --git a/recipes/WHAMandWHAMR/extra-dependencies.txt b/recipes/WHAMandWHAMR/extra-dependencies.txt deleted file mode 100644 index 0024eed7f7..0000000000 --- a/recipes/WHAMandWHAMR/extra-dependencies.txt +++ /dev/null @@ -1,3 +0,0 @@ -mir-eval==0.6 -pyroomacoustics==0.3.1 - diff --git a/recipes/WHAMandWHAMR/extra_requirements.txt b/recipes/WHAMandWHAMR/extra_requirements.txt new file mode 100644 index 0000000000..ed28f44c44 --- /dev/null +++ b/recipes/WHAMandWHAMR/extra_requirements.txt @@ -0,0 +1,2 @@ +mir-eval==0.6 +pyroomacoustics>=0.7.3 diff --git a/recipes/WHAMandWHAMR/meta/create_whamr_rirs.py b/recipes/WHAMandWHAMR/meta/create_whamr_rirs.py index 7954014a7c..0ddb14f860 100644 --- a/recipes/WHAMandWHAMR/meta/create_whamr_rirs.py +++ b/recipes/WHAMandWHAMR/meta/create_whamr_rirs.py @@ -4,17 +4,18 @@ Authors * Cem Subakan 2021 """ -import os -import pandas as pd + import argparse -import torchaudio +import os -from recipes.WHAMandWHAMR.meta.wham_room import WhamRoom -from scipy.signal import resample_poly +import pandas as pd import torch -from speechbrain.pretrained.fetching import fetch +from scipy.signal import resample_poly from tqdm import tqdm -import pyroomacoustics +from wham_room import WhamRoom + +from speechbrain.dataio import audio_io +from speechbrain.utils.fetching import fetch def create_rirs(output_dir, sr=8000): @@ -29,10 +30,6 @@ def create_rirs(output_dir, sr=8000): """ - assert ( - pyroomacoustics.__version__ == "0.3.1" - ), "The pyroomacoustics version needs to be 0.3.1" - os.makedirs(output_dir) metafilesdir = os.path.dirname(os.path.realpath(__file__)) @@ -65,12 +62,11 @@ def create_rirs(output_dir, sr=8000): metafilesdir, "data", "reverb_params_{}.csv" ) - for splt in SPLITS: - - wsjmix_path = FILELIST_STUB.format(splt) + for split in SPLITS: + wsjmix_path = FILELIST_STUB.format(split) wsjmix_df = pd.read_csv(wsjmix_path) - reverb_param_path = reverb_param_stub.format(splt) + reverb_param_path = reverb_param_stub.format(split) reverb_param_df = pd.read_csv(reverb_param_path) utt_ids = wsjmix_df.output_filename.values @@ -118,10 +114,8 @@ def create_rirs(output_dir, sr=8000): h = resample_poly(source, sr, 16000) h_torch = torch.from_numpy(h).float().unsqueeze(0) - torchaudio.save( - os.path.join( - output_dir, "{}_{}_".format(i, j) + output_name, - ), + audio_io.save( + os.path.join(output_dir, f"{i}_{j}_" + output_name), h_torch, sr, ) diff --git a/recipes/WHAMandWHAMR/meta/preprocess_dynamic_mixing.py b/recipes/WHAMandWHAMR/meta/preprocess_dynamic_mixing.py index 7bc4ac55c9..7f07b6e185 100644 --- a/recipes/WHAMandWHAMR/meta/preprocess_dynamic_mixing.py +++ b/recipes/WHAMandWHAMR/meta/preprocess_dynamic_mixing.py @@ -8,18 +8,19 @@ Samuele Cornell, 2020 """ -import os import argparse +import glob +import os from pathlib import Path + +import numpy as np +import torch import tqdm -import torchaudio -import glob # from oct2py import octave from scipy import signal -import numpy as np -import torch +from speechbrain.dataio import audio_io parser = argparse.ArgumentParser( "utility for resampling all audio files in a folder recursively" @@ -45,7 +46,7 @@ def resample_folder(input_folder, output_folder, fs, regex): Path of the output folder with the resampled data. fs : int Target sampling frequency. - reg_exp: str + regex : str Regular expression for search. """ # filedir = os.path.dirname(os.path.realpath(__file__)) @@ -54,8 +55,7 @@ def resample_folder(input_folder, output_folder, fs, regex): files = glob.glob(os.path.join(input_folder, regex), recursive=True) for f in tqdm.tqdm(files): - - audio, fs_read = torchaudio.load(f) + audio, fs_read = audio_io.load(f) audio = audio[0].numpy() audio = signal.resample_poly(audio, fs, fs_read) @@ -68,8 +68,7 @@ def resample_folder(input_folder, output_folder, fs, regex): relative_path = os.path.join( Path(f).relative_to(Path(input_folder)).parent, - Path(f).relative_to(Path(input_folder)).stem - + "_peak_{}.wav".format(peak), + Path(f).relative_to(Path(input_folder)).stem + f"_peak_{peak}.wav", ) os.makedirs( @@ -81,7 +80,7 @@ def resample_folder(input_folder, output_folder, fs, regex): exist_ok=True, ) - torchaudio.save( + audio_io.save( os.path.join(output_folder, relative_path), audio.reshape(1, -1), fs, @@ -89,7 +88,6 @@ def resample_folder(input_folder, output_folder, fs, regex): if __name__ == "__main__": - args = parser.parse_args() resample_folder( args.input_folder, args.output_folder, int(args.fs), args.regex diff --git a/recipes/WHAMandWHAMR/meta/wham_room.py b/recipes/WHAMandWHAMR/meta/wham_room.py index d42a140c06..678baf0a5c 100644 --- a/recipes/WHAMandWHAMR/meta/wham_room.py +++ b/recipes/WHAMandWHAMR/meta/wham_room.py @@ -16,7 +16,6 @@ class WhamRoom(pra.room.ShoeBox): def __init__( self, p, mics, s1, s2, T60, fs=16000, t0=0.0, sigma2_awgn=None ): - self.T60 = T60 self.max_rir_len = np.ceil(T60 * fs).astype(int) @@ -53,7 +52,6 @@ def add_audio(self, s1, s2): self.sources[1].add_signal(s2) def compute_rir(self): - self.rir = [] self.visibility = None @@ -70,7 +68,6 @@ def compute_rir(self): self.rir.append(h) def generate_rirs(self): - original_max_order = self.max_order self.max_order = 0 @@ -85,7 +82,6 @@ def generate_rirs(self): self.rir_reverberant = self.rir def generate_audio(self, anechoic=False, fs=16000): - if not self.rir: self.generate_rirs() if anechoic: diff --git a/recipes/WHAMandWHAMR/prepare_data.py b/recipes/WHAMandWHAMR/prepare_data.py index f8320963a0..aed0aebaa0 100644 --- a/recipes/WHAMandWHAMR/prepare_data.py +++ b/recipes/WHAMandWHAMR/prepare_data.py @@ -2,11 +2,11 @@ Author * Cem Subakan 2020 -The .csv preperation functions for WSJ0-Mix. +The .csv preparation functions for WSJ0-Mix. """ -import os import csv +import os def prepare_wham_whamr_csv( @@ -92,16 +92,16 @@ def create_wham_whamr_csv( s2 = "s2/" mix_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, mix_both, + datapath, f"wav{sample_rate}", version, set_type, mix_both ) s1_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, s1, + datapath, f"wav{sample_rate}", version, set_type, s1 ) s2_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, s2, + datapath, f"wav{sample_rate}", version, set_type, s2 ) noise_path = os.path.join( - datapath, "wav{}".format(sample_rate), version, set_type, "noise/" + datapath, f"wav{sample_rate}", version, set_type, "noise/" ) # rir_path = os.path.join( # datapath, "wav{}".format(sample_rate), version, set_type, "rirs/" @@ -136,11 +136,13 @@ def create_wham_whamr_csv( ] with open( - os.path.join(savepath, savename + set_type + ".csv"), "w" + os.path.join(savepath, savename + set_type + ".csv"), + "w", + encoding="utf-8", ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() - for (i, (mix_path, s1_path, s2_path, noise_path),) in enumerate( + for i, (mix_path, s1_path, s2_path, noise_path) in enumerate( zip( mix_fl_paths, s1_fl_paths, @@ -149,7 +151,6 @@ def create_wham_whamr_csv( # rir_fl_paths, ) ): - row = { "ID": i, "duration": 1.0, @@ -186,11 +187,12 @@ def create_whamr_rir_csv(datapath, savepath): files = os.listdir(datapath) all_paths = [os.path.join(datapath, fl) for fl in files] - with open(savepath + "/whamr_rirs.csv", "w") as csvfile: + with open( + savepath + "/whamr_rirs.csv", "w", newline="", encoding="utf-8" + ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, wav_path in enumerate(all_paths): - row = { "ID": i, "duration": 2.0, diff --git a/recipes/WHAMandWHAMR/separation/README.md b/recipes/WHAMandWHAMR/separation/README.md index facbf4daaa..a103e2724a 100644 --- a/recipes/WHAMandWHAMR/separation/README.md +++ b/recipes/WHAMandWHAMR/separation/README.md @@ -3,22 +3,32 @@ This folder contains some popular recipes for the WHAM! and WHAMR! datasets. * This recipe supports train with several source separation models on WHAM! and WHAMR! datasets, including [Sepformer](https://arxiv.org/abs/2010.13154), [DPRNN](https://arxiv.org/abs/1910.06379), [ConvTasnet](https://arxiv.org/abs/1809.07454), [DPTNet](https://arxiv.org/abs/2007.13975). -Additional dependency: -``` -pip install mir_eval -pip install pyroomacoustics==0.3.1 +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: ``` -For `pyroomacoustics`, you need to use the version 0.3.1. +pip install -r ../extra_requirements.txt +``` + +## How to run: To run it: -``` +```shell python train.py hparams/sepformer-wham.yaml --data_folder yourpath/wham_original python train.py hparams/sepformer-whamr.yaml --data_folder yourpath/whamr ``` Note that during training we print the negative SI-SNR (as we treat this value as the loss). +# How to run on test sets only +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: + +```shell +python train.py hparams/sepformer-wham.yaml --data_folder yourpath/wham_original --test_only +python train.py hparams/sepformer-whamr.yaml --data_folder yourpath/whamr --test_only +``` + # WHAM! and WHAMR! dataset: * This recipe supports the noisy and reverberant [versions](http://wham.whisper.ai/) of WSJ0 - 2/3 Mix datasets. For WHAM!, simply use `--data_folder /yourpath/wham_original`, and for WHAMR! use `--data_folder /yourpath/whamr`. The script will automatically adjust itself to WHAM and WHAMR, but you must rename the top folder (the folder that contains the `wav8k` subfolder should be named respectively `wham_original` and `whamr`, as the script decides which dataset to use based on the `--data_folder` variable. @@ -52,8 +62,8 @@ Here are the SI - SNRi results (in dB) on the test set of WHAM!, WHAMR! datasets # Training time It takes about 2h 30 min for WHAMR! (DynamicMixing) and WHAM! on a NVIDIA V100 (32GB). -The output folder with the logs for WHAMR! can be found [here](https://drive.google.com/drive/folders/1m1xfx2ojf7qgOyscJVVCQFRY0VRl0rdi?usp=sharing). -The output folder with the logs for WHAM! can be found [here](https://drive.google.com/drive/folders/1dIAT8hZxvdJPZNUb8Zkk3BuN7GZ9-mZb?usp=sharing). +The output folder with the logs for WHAMR! can be found [here](https://www.dropbox.com/sh/1sia32z01xbfgvu/AADditsqaTyfN3N6tzfEFPica?dl=0). +The output folder with the logs for WHAM! can be found [here](https://www.dropbox.com/sh/sfrgb3xivri432e/AACQodNmiDIKrB9vCeCFUDWUa?dl=0). # Pretrained Models: @@ -61,7 +71,7 @@ Pretrained models for SepFormer on WHAM!, WHAMR! datasets can be found through h * https://huggingface.co/speechbrain/sepformer-wham * https://huggingface.co/speechbrain/sepformer-whamr -* Pretrained models with the training logs can be found on `https://drive.google.com/drive/u/0/folders/1ZVuROxR711Xib2MsJbcPla4PWqbK1Ddw` also. +* Pretrained models with the training logs can be found on `https://www.dropbox.com/sh/e4bth1bylk7c6h8/AADFq3cWzBBKxuDv09qjvUMta?dl=0` also. You can find the pre-trained model with an easy-inference function on [HuggingFace](https://huggingface.co/speechbrain/sepformer-whamr). The 16kHz version of the sepformer can be found [here](https://huggingface.co/speechbrain/sepformer-whamr16k). @@ -81,10 +91,10 @@ The 16kHz version of the sepformer can be found [here](https://huggingface.co/sp You can run the following command to train the model using Distributed Data Parallel (DDP) with 2 GPUs: +```bash +torchrun --nproc_per_node=2 train.py hparams/sepformer-whamr.yaml --data_folder /yourdatapath ``` - python -m torch.distributed.launch --nproc_per_node=2 train.py hparams/sepformer-whamr.yaml --data_folder /yourdatapath --distributed_launch --distributed_backend='nccl' -``` -You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at this [tutorial](https://colab.research.google.com/drive/13pBUacPiotw1IvyffvGZ-HrtBr9T6l15?usp=sharing). +You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at [our documentation](https://speechbrain.readthedocs.io/en/latest/multigpu.html). # **About SpeechBrain** @@ -97,6 +107,15 @@ You can add the other runtime options as appropriate. For more complete informat Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/WHAMandWHAMR/separation/create_whamr_rirs.py b/recipes/WHAMandWHAMR/separation/create_whamr_rirs.py new file mode 120000 index 0000000000..66094be97c --- /dev/null +++ b/recipes/WHAMandWHAMR/separation/create_whamr_rirs.py @@ -0,0 +1 @@ +../meta/create_whamr_rirs.py \ No newline at end of file diff --git a/recipes/WHAMandWHAMR/separation/dynamic_mixing.py b/recipes/WHAMandWHAMR/separation/dynamic_mixing.py index c5d5f15a32..1d957b03ac 100644 --- a/recipes/WHAMandWHAMR/separation/dynamic_mixing.py +++ b/recipes/WHAMandWHAMR/separation/dynamic_mixing.py @@ -1,13 +1,15 @@ -import speechbrain as sb -import numpy as np -import torch -import torchaudio import glob import os -from pathlib import Path import random -from speechbrain.processing.signal_processing import rescale +from pathlib import Path + +import numpy as np +import torch + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.dataio.batch import PaddedBatch +from speechbrain.processing.signal_processing import rescale """ The functions to implement Dynamic Mixing For SpeechSeparation @@ -33,9 +35,8 @@ def build_spk_hashtable(base_folder_dm, sample_rate): spk_hashtable = {} for utt in wsj0_utterances: - spk_id = Path(utt).stem[:3] - assert torchaudio.info(utt).sample_rate == sample_rate + assert audio_io.info(utt).sample_rate == sample_rate # e.g. 2speakers/wav8k/min/tr/mix/019o031a_0.27588_01vo030q_-0.27588.wav # id of speaker 1 is 019 utterance id is o031a @@ -97,7 +98,7 @@ def dynamic_mix_data_prep( # 1. Define datasets train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=tr_csv, replacements={"data_root": data_root_folder}, + csv_path=tr_csv, replacements={"data_root": data_root_folder} ) # we build an dictionary where keys are speakers id and entries are list @@ -130,7 +131,7 @@ def audio_pipeline( if "wham" in Path(data_root_folder).stem: noise_file = np.random.choice(noise_files, 1, replace=False) - noise, fs_read = torchaudio.load(noise_file[0]) + noise, fs_read = audio_io.load(noise_file[0]) noise = noise.squeeze() # select two speakers randomly @@ -143,22 +144,21 @@ def audio_pipeline( ] minlen = min( - *[torchaudio.info(x).num_frames for x in spk_files], + *[audio_io.info(x).num_frames for x in spk_files], max_training_signal_len, ) for i, spk_file in enumerate(spk_files): - # select random offset - length = torchaudio.info(spk_file).num_frames + length = audio_io.info(spk_file).num_frames start = 0 stop = length if length > minlen: # take a random window start = np.random.randint(0, length - minlen) stop = start + minlen - tmp, fs_read = torchaudio.load( - spk_file, frame_offset=start, num_frames=stop - start, + tmp, fs_read = audio_io.load( + spk_file, frame_offset=start, num_frames=stop - start ) tmp = tmp[0] # * peak # remove channel dim and normalize diff --git a/recipes/WHAMandWHAMR/separation/hparams/sepformer-wham.yaml b/recipes/WHAMandWHAMR/separation/hparams/sepformer-wham.yaml index 3fe8658b12..757d9c1212 100644 --- a/recipes/WHAMandWHAMR/separation/hparams/sepformer-wham.yaml +++ b/recipes/WHAMandWHAMR/separation/hparams/sepformer-wham.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -36,14 +36,13 @@ skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 2 # set to 3 for wsj0-3mix noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -65,18 +64,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/separation/hparams/sepformer-whamr.yaml b/recipes/WHAMandWHAMR/separation/hparams/sepformer-whamr.yaml index c08bcffe5d..2c4fd6ae96 100644 --- a/recipes/WHAMandWHAMR/separation/hparams/sepformer-whamr.yaml +++ b/recipes/WHAMandWHAMR/separation/hparams/sepformer-whamr.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -35,13 +35,12 @@ test_data: !ref /whamr_tt.csv skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 2 # set to 3 for wsj0-3mix save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -69,18 +68,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WHAMandWHAMR/separation/prepare_data.py b/recipes/WHAMandWHAMR/separation/prepare_data.py new file mode 120000 index 0000000000..1a7125c969 --- /dev/null +++ b/recipes/WHAMandWHAMR/separation/prepare_data.py @@ -0,0 +1 @@ +../prepare_data.py \ No newline at end of file diff --git a/recipes/WHAMandWHAMR/separation/train.py b/recipes/WHAMandWHAMR/separation/train.py index 96f477de9a..4596cdcb4f 100755 --- a/recipes/WHAMandWHAMR/separation/train.py +++ b/recipes/WHAMandWHAMR/separation/train.py @@ -18,20 +18,21 @@ * Jianyuan Zhong 2020 """ +import csv import os import sys + +import numpy as np import torch import torch.nn.functional as F -import torchaudio +from hyperpyyaml import load_hyperpyyaml +from tqdm import tqdm + import speechbrain as sb import speechbrain.nnet.schedulers as schedulers +from speechbrain.dataio import audio_io from speechbrain.utils.distributed import run_on_main -from torch.cuda.amp import autocast -from hyperpyyaml import load_hyperpyyaml -import numpy as np -from tqdm import tqdm -import csv -import logging +from speechbrain.utils.logger import get_logger # Define training procedure @@ -77,7 +78,8 @@ def compute_forward(self, mix, targets, stage, noise=None): targets = targets[:, :min_len, :] if self.hparams.use_wavedrop: - mix = self.hparams.wavedrop(mix, mix_lens) + mix = self.hparams.drop_chunk(mix, mix_lens) + mix = self.hparams.drop_freq(mix) if self.hparams.limit_training_signal_len: mix, targets = self.cut_signals(mix, targets) @@ -113,77 +115,43 @@ def compute_objectives(self, predictions, targets): def fit_batch(self, batch): """Trains one batch""" + # Unpacking batch list mixture = batch.mix_sig targets = [batch.s1_sig, batch.s2_sig] noise = batch.noise_sig[0] - if self.auto_mix_prec: - with autocast(): - predictions, targets = self.compute_forward( - mixture, targets, sb.Stage.TRAIN, noise - ) - loss = self.compute_objectives(predictions, targets) - - # hard threshold the easy dataitems - if self.hparams.threshold_byloss: - th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() - else: - loss = loss.mean() - - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - self.scaler.scale(loss).backward() - if self.hparams.clip_grad_norm >= 0: - self.scaler.unscale_(self.optimizer) - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm, - ) - self.scaler.step(self.optimizer) - self.scaler.update() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) - ) - loss.data = torch.tensor(0).to(self.device) - else: + with self.training_ctx: predictions, targets = self.compute_forward( mixture, targets, sb.Stage.TRAIN, noise ) loss = self.compute_objectives(predictions, targets) + # hard threshold the easy dataitems if self.hparams.threshold_byloss: th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() + loss = loss[loss > th] + if loss.nelement() > 0: + loss = loss.mean() else: loss = loss.mean() - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - loss.backward() - if self.hparams.clip_grad_norm >= 0: - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm - ) - self.optimizer.step() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) + if loss.nelement() > 0 and loss < self.hparams.loss_upper_lim: + self.scaler.scale(loss).backward() + if self.hparams.clip_grad_norm >= 0: + self.scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), + self.hparams.clip_grad_norm, ) - loss.data = torch.tensor(0).to(self.device) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + self.nonfinite_count += 1 + logger.info( + f"infinite loss or empty loss! it happened {self.nonfinite_count} times so far - skipping this batch" + ) + loss.data = torch.tensor(0.0).to(self.device) self.optimizer.zero_grad() return loss.detach().cpu() @@ -218,7 +186,6 @@ def on_stage_end(self, stage, stage_loss, epoch): # Perform end-of-iteration things, like annealing, logging, etc. if stage == sb.Stage.VALID: - # Learning rate annealing if isinstance( self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau @@ -245,7 +212,7 @@ def on_stage_end(self, stage, stage_loss, epoch): ) else: self.checkpointer.save_and_keep_only( - meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"], + meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"] ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( @@ -265,9 +232,7 @@ def add_speed_perturb(self, targets, targ_lens): recombine = True for i in range(targets.shape[-1]): - new_target = self.hparams.speedperturb( - targets[:, :, i], targ_lens - ) + new_target = self.hparams.speed_perturb(targets[:, :, i]) new_targets.append(new_target) if i == 0: min_len = new_target.shape[-1] @@ -304,7 +269,7 @@ def add_speed_perturb(self, targets, targ_lens): return mix, targets def cut_signals(self, mixture, targets): - """This function selects a random segment of a given length withing the mixture. + """This function selects a random segment of a given length within the mixture. The corresponding targets are selected accordingly""" randstart = torch.randint( 0, @@ -348,14 +313,13 @@ def save_results(self, test_data): test_data, **self.hparams.dataloader_opts ) - with open(save_file, "w") as results_csv: + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: writer = csv.DictWriter(results_csv, fieldnames=csv_columns) writer.writeheader() # Loop over all test sentence with tqdm(test_loader, dynamic_ncols=True) as t: for i, batch in enumerate(t): - # Apply Separation mixture, mix_len = batch.mix_sig snt_id = batch.id @@ -419,28 +383,27 @@ def save_results(self, test_data): } writer.writerow(row) - logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean())) - logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean())) - logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean())) - logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean())) + logger.info(f"Mean SISNR is {np.array(all_sisnrs).mean()}") + logger.info(f"Mean SISNRi is {np.array(all_sisnrs_i).mean()}") + logger.info(f"Mean SDR is {np.array(all_sdrs).mean()}") + logger.info(f"Mean SDRi is {np.array(all_sdrs_i).mean()}") def save_audio(self, snt_id, mixture, targets, predictions): "saves the test audio (mixture, targets, and estimated sources) on disk" - # Create outout folder + # Create output folder save_path = os.path.join(self.hparams.save_folder, "audio_results") if not os.path.exists(save_path): os.mkdir(save_path) for ns in range(self.hparams.num_spks): - # Estimated source signal = predictions[0, :, ns] signal = signal / signal.abs().max() save_file = os.path.join( - save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1) + save_path, f"item{snt_id}_source{ns + 1}hat.wav" ) - torchaudio.save( + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) @@ -448,17 +411,17 @@ def save_audio(self, snt_id, mixture, targets, predictions): signal = targets[0, :, ns] signal = signal / signal.abs().max() save_file = os.path.join( - save_path, "item{}_source{}.wav".format(snt_id, ns + 1) + save_path, f"item{snt_id}_source{ns + 1}.wav" ) - torchaudio.save( + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) # Mixture signal = mixture[0][0, :] signal = signal / signal.abs().max() - save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id)) - torchaudio.save( + save_file = os.path.join(save_path, f"item{snt_id}_mix.wav") + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) @@ -525,17 +488,16 @@ def audio_pipeline_noise(noise_wav): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) sb.utils.distributed.ddp_init_group(run_opts) # Logger info - logger = logging.getLogger(__name__) + logger = get_logger(__name__) # Create experiment directory sb.create_experiment_directory( @@ -543,18 +505,22 @@ def audio_pipeline_noise(noise_wav): hyperparams_to_save=hparams_file, overrides=overrides, ) + print("aaaaaaaaaaaaaaaaaa") + # Update precision to bf16 if the device is CPU and precision is fp16 + if run_opts.get("device") == "cpu" and hparams.get("precision") == "fp16": + hparams["precision"] = "bf16" + print("bbbbbbbbbbbbbbbbb") # Check if wsj0_tr is set with dynamic mixing if hparams["dynamic_mixing"] and not os.path.exists( hparams["base_folder_dm"] ): - print( + raise ValueError( "Please, specify a valid base_folder_dm folder when using dynamic mixing" ) - sys.exit(1) # Data preparation - from recipes.WHAMandWHAMR.prepare_data import prepare_wham_whamr_csv + from prepare_data import prepare_wham_whamr_csv run_on_main( prepare_wham_whamr_csv, @@ -568,8 +534,8 @@ def audio_pipeline_noise(noise_wav): # if whamr, and we do speedaugment we need to prepare the csv file if "whamr" in hparams["data_folder"] and hparams["use_speedperturb"]: - from recipes.WHAMandWHAMR.prepare_data import create_whamr_rir_csv - from recipes.WHAMandWHAMR.meta.create_whamr_rirs import create_rirs + from create_whamr_rirs import create_rirs + from prepare_data import create_whamr_rir_csv # If the Room Impulse Responses do not exist, we create them if not os.path.exists(hparams["rir_path"]): @@ -667,15 +633,14 @@ def audio_pipeline_noise(noise_wav): for module in separator.modules.values(): separator.reset_layer_recursively(module) - if not hparams["test_only"]: - # Training - separator.fit( - separator.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["dataloader_opts"], - valid_loader_kwargs=hparams["dataloader_opts"], - ) + # Training + separator.fit( + separator.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts"], + ) # Eval separator.evaluate(test_data, min_key="si-snr") diff --git a/recipes/WHAMandWHAMR/separation/wham_room.py b/recipes/WHAMandWHAMR/separation/wham_room.py new file mode 120000 index 0000000000..68ebdf470e --- /dev/null +++ b/recipes/WHAMandWHAMR/separation/wham_room.py @@ -0,0 +1 @@ +../meta/wham_room.py \ No newline at end of file diff --git a/recipes/WSJ0Mix/extra-dependencies.txt b/recipes/WSJ0Mix/extra_requirements.txt similarity index 100% rename from recipes/WSJ0Mix/extra-dependencies.txt rename to recipes/WSJ0Mix/extra_requirements.txt diff --git a/recipes/WSJ0Mix/meta/preprocess_dynamic_mixing.py b/recipes/WSJ0Mix/meta/preprocess_dynamic_mixing.py index 7bc4ac55c9..7f07b6e185 100644 --- a/recipes/WSJ0Mix/meta/preprocess_dynamic_mixing.py +++ b/recipes/WSJ0Mix/meta/preprocess_dynamic_mixing.py @@ -8,18 +8,19 @@ Samuele Cornell, 2020 """ -import os import argparse +import glob +import os from pathlib import Path + +import numpy as np +import torch import tqdm -import torchaudio -import glob # from oct2py import octave from scipy import signal -import numpy as np -import torch +from speechbrain.dataio import audio_io parser = argparse.ArgumentParser( "utility for resampling all audio files in a folder recursively" @@ -45,7 +46,7 @@ def resample_folder(input_folder, output_folder, fs, regex): Path of the output folder with the resampled data. fs : int Target sampling frequency. - reg_exp: str + regex : str Regular expression for search. """ # filedir = os.path.dirname(os.path.realpath(__file__)) @@ -54,8 +55,7 @@ def resample_folder(input_folder, output_folder, fs, regex): files = glob.glob(os.path.join(input_folder, regex), recursive=True) for f in tqdm.tqdm(files): - - audio, fs_read = torchaudio.load(f) + audio, fs_read = audio_io.load(f) audio = audio[0].numpy() audio = signal.resample_poly(audio, fs, fs_read) @@ -68,8 +68,7 @@ def resample_folder(input_folder, output_folder, fs, regex): relative_path = os.path.join( Path(f).relative_to(Path(input_folder)).parent, - Path(f).relative_to(Path(input_folder)).stem - + "_peak_{}.wav".format(peak), + Path(f).relative_to(Path(input_folder)).stem + f"_peak_{peak}.wav", ) os.makedirs( @@ -81,7 +80,7 @@ def resample_folder(input_folder, output_folder, fs, regex): exist_ok=True, ) - torchaudio.save( + audio_io.save( os.path.join(output_folder, relative_path), audio.reshape(1, -1), fs, @@ -89,7 +88,6 @@ def resample_folder(input_folder, output_folder, fs, regex): if __name__ == "__main__": - args = parser.parse_args() resample_folder( args.input_folder, args.output_folder, int(args.fs), args.regex diff --git a/recipes/WSJ0Mix/prepare_data.py b/recipes/WSJ0Mix/prepare_data.py index c30f151f82..9a5ed03ff3 100644 --- a/recipes/WSJ0Mix/prepare_data.py +++ b/recipes/WSJ0Mix/prepare_data.py @@ -1,13 +1,13 @@ """ -The .csv preperation functions for WSJ0-Mix. +The .csv preparation functions for WSJ0-Mix. Author * Cem Subakan 2020 - """ +""" -import os import csv +import os def prepare_wsjmix( @@ -34,16 +34,15 @@ def prepare_wsjmix( return if "wsj" in datapath: - if n_spks == 2: - assert ( - "2speakers" in datapath - ), "Inconsistent number of speakers and datapath" + assert "2speakers" in datapath, ( + "Inconsistent number of speakers and datapath" + ) create_wsj_csv(datapath, savepath) elif n_spks == 3: - assert ( - "3speakers" in datapath - ), "Inconsistent number of speakers and datapath" + assert "3speakers" in datapath, ( + "Inconsistent number of speakers and datapath" + ) create_wsj_csv_3spks(datapath, savepath) else: raise ValueError("Unsupported Number of Speakers") @@ -96,14 +95,15 @@ def create_custom_dataset( ] with open( - os.path.join(savepath, dataset_name + "_" + set_type + ".csv"), "w" + os.path.join(savepath, dataset_name + "_" + set_type + ".csv"), + "w", + encoding="utf-8", ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, (mix_path, s1_path, s2_path) in enumerate( zip(mix_fl_paths, s1_fl_paths, s2_fl_paths) ): - row = { "ID": i, "duration": 1.0, @@ -153,13 +153,17 @@ def create_wsj_csv(datapath, savepath): "s2_wav_opts", ] - with open(savepath + "/wsj_" + set_type + ".csv", "w") as csvfile: + with open( + savepath + "/wsj_" + set_type + ".csv", + "w", + newline="", + encoding="utf-8", + ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, (mix_path, s1_path, s2_path) in enumerate( zip(mix_fl_paths, s1_fl_paths, s2_fl_paths) ): - row = { "ID": i, "duration": 1.0, @@ -214,13 +218,17 @@ def create_wsj_csv_3spks(datapath, savepath): "s3_wav_opts", ] - with open(savepath + "/wsj_" + set_type + ".csv", "w") as csvfile: + with open( + savepath + "/wsj_" + set_type + ".csv", + "w", + newline="", + encoding="utf-8", + ) as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for i, (mix_path, s1_path, s2_path, s3_path) in enumerate( zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, s3_fl_paths) ): - row = { "ID": i, "duration": 1.0, diff --git a/recipes/WSJ0Mix/separation/README.md b/recipes/WSJ0Mix/separation/README.md index 152f8592c1..5f69f23e9d 100644 --- a/recipes/WSJ0Mix/separation/README.md +++ b/recipes/WSJ0Mix/separation/README.md @@ -3,21 +3,30 @@ This folder contains some popular recipes for the WSJ0-Mix task (2/3 sources). * This recipe supports train with several source separation models on WSJ0-2Mix, including [Sepformer](https://arxiv.org/abs/2010.13154), [RE-SepFormer](https://arxiv.org/abs/2206.09507), [DPRNN](https://arxiv.org/abs/1910.06379), [ConvTasnet](https://arxiv.org/abs/1809.07454), [DPTNet](https://arxiv.org/abs/2007.13975). -**Web Demo** Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See demo Speech Seperation: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/speechbrain-speech-seperation) +**Web Demo** Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See demo Speech Separation: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/speechbrain-speech-seperation) + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: -Additional dependency: ``` -pip install mir_eval +pip install -r ../extra_requirements.txt ``` +## How to run To run it: -``` +```shell python train.py hyperparams/sepformer.yaml --data_folder yourpath/wsj0-mix/2speakers ``` Note that during training we print the negative SI-SNR (as we treat this value as the loss). +# How to run on test sets only +If you want to run it on the test sets only, you can add the flag `--test_only` to the following command: +```shell +python train.py hyperparams/sepformer.yaml --data_folder yourpath/wsj0-mix/2speakers --test_only +``` # WSJ0-2mix and WSJ0-3mix dataset creation * The best way to create the datasets is using the original matlab script. This script and the associated meta data can be obtained through the following [link](https://www.dropbox.com/s/gg524noqvfm1t7e/create_mixtures_wsj023mix.zip?dl=1). * The dataset creation script assumes that the original WSJ0 files in the sphere format are already converted to .wav . @@ -62,12 +71,12 @@ Pretrained models for SepFormer on WSJ0-2Mix, WSJ0-3Mix, and WHAM! datasets can * https://huggingface.co/speechbrain/sepformer-wsj03mix * https://huggingface.co/speechbrain/resepformer-wsj02mix -* The output folder (with logs and checkpoints) for SepFormer (hparams/sepformer.yaml) can be found [here](https://drive.google.com/drive/folders/11ulM8NqLYle6vNNZb3NvPRPHR5Rrl-FF?usp=sharing). -* The output folder (with logs and checkpoints) for RE-SepFormer (hparams/resepformer.yaml) can be found [here](https://drive.google.com/drive/folders/1rXOyPQ7OZZMUzg7wrP1Zsa_fjFKMqaeu?usp=sharing). -* The output folder (with logs and checkpoints) for convtasnet (hparams/convtasnet.yaml) can be found [here](https://drive.google.com/drive/folders/12_Df4zsRW18YvD4hPAJAT9y_mVWnNyBm?usp=sharing). -* The output folder (with logs and checkpoints) for dual-path RNN (hparams/dprnn.yaml) can be found [here](https://drive.google.com/drive/folders/1Olq2077mXKqtqHluxECn1lMKIbo7xPFu?usp=sharing). -* The output folder (with logs and checkpoints) for SkiM (hparams/skim.yaml) can be found [here](https://drive.google.com/drive/folders/12HqVPpMXY-OOMsZ3xTAtkN7kk5TZ2YaL?usp=sharing). -* The output folder (with logs and checkpoints) for Sepformer with conformer block as intra model (hparams/sepformer-conformerintra.yaml) can be found [here](https://drive.google.com/drive/folders/1NcB7pKj7qWzDaI3ScDOyQwJLvRdW9rfl). +* The output folder (with logs and checkpoints) for SepFormer (hparams/sepformer.yaml) can be found [here](https://www.dropbox.com/sh/9klsqadkhin6fw1/AADEqGdT98rcqxVgFlfki7Gva?dl=0). +* The output folder (with logs and checkpoints) for RE-SepFormer (hparams/resepformer.yaml) can be found [here](https://www.dropbox.com/sh/obnu87zhubn1iia/AAAbn_jzqzIfeqaE9YQ7ujyQa?dl=0). +* The output folder (with logs and checkpoints) for convtasnet (hparams/convtasnet.yaml) can be found [here](https://www.dropbox.com/sh/hdpxj47signsay7/AABbDjGoyQesnFxjg0APxl7qa?dl=0). +* The output folder (with logs and checkpoints) for dual-path RNN (hparams/dprnn.yaml) can be found [here](https://www.dropbox.com/sh/o8fohu5s07h4bnw/AADPNyR1E3Q4aRobg3FtXTwVa?dl=0). +* The output folder (with logs and checkpoints) for SkiM (hparams/skim.yaml) can be found [here](https://www.dropbox.com/sh/zy0l5rc8abxdfp3/AAA2ngB74fugqpWXmjZo5v3wa?dl=0). +* The output folder (with logs and checkpoints) for Sepformer with conformer block as intra model (hparams/sepformer-conformerintra.yaml) can be found [here](https://www.dropbox.com/sh/w27rbdfnrtntrc9/AABCMFFvnxxYkKTInYXtsow3a?dl=0). @@ -88,10 +97,10 @@ Pretrained models for SepFormer on WSJ0-2Mix, WSJ0-3Mix, and WHAM! datasets can You can run the following command to train the model using Distributed Data Parallel (DDP) with 2 GPUs: +```bash +torchrun --nproc_per_node=2 train.py hparams/sepformer.yaml --data_folder /yourdatapath ``` - python -m torch.distributed.launch --nproc_per_node=2 train.py hparams/sepformer.yaml --data_folder /yourdatapath --distributed_launch --distributed_backend='nccl' -``` -You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at this [tutorial](https://colab.research.google.com/drive/13pBUacPiotw1IvyffvGZ-HrtBr9T6l15?usp=sharing). +You can add the other runtime options as appropriate. For more complete information on multi-GPU usage, take a look at [our documentation](https://speechbrain.readthedocs.io/en/latest/multigpu.html). @@ -100,6 +109,15 @@ You can add the other runtime options as appropriate. For more complete informat Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/WSJ0Mix/separation/dynamic_mixing.py b/recipes/WSJ0Mix/separation/dynamic_mixing.py index 45ed609a9c..6da27c09ee 100644 --- a/recipes/WSJ0Mix/separation/dynamic_mixing.py +++ b/recipes/WSJ0Mix/separation/dynamic_mixing.py @@ -1,13 +1,15 @@ -import speechbrain as sb -import numpy as np -import torch -import torchaudio import glob import os -from pathlib import Path import random -from speechbrain.processing.signal_processing import rescale +from pathlib import Path + +import numpy as np +import torch + +import speechbrain as sb +from speechbrain.dataio import audio_io from speechbrain.dataio.batch import PaddedBatch +from speechbrain.processing.signal_processing import rescale """ The functions to implement Dynamic Mixing For SpeechSeparation @@ -28,9 +30,8 @@ def build_spk_hashtable(hparams): spk_hashtable = {} for utt in wsj0_utterances: - spk_id = Path(utt).stem[:3] - assert torchaudio.info(utt).sample_rate == hparams["sample_rate"] + assert audio_io.info(utt).sample_rate == hparams["sample_rate"] # e.g. 2speakers/wav8k/min/tr/mix/019o031a_0.27588_01vo030q_-0.27588.wav # id of speaker 1 is 019 utterance id is o031a @@ -111,7 +112,7 @@ def audio_pipeline( if "wham" in Path(hparams["data_folder"]).stem: noise_file = np.random.choice(noise_files, 1, replace=False) - noise, fs_read = torchaudio.load(noise_file[0]) + noise, fs_read = audio_io.load(noise_file[0]) noise = noise.squeeze() # gain = np.clip(random.normalvariate(1, 10), -4, 15) # noise = rescale(noise, torch.tensor(len(noise)), gain, scale="dB").squeeze() @@ -126,22 +127,21 @@ def audio_pipeline( ] minlen = min( - *[torchaudio.info(x).num_frames for x in spk_files], + *[audio_io.info(x).num_frames for x in spk_files], hparams["training_signal_len"], ) for i, spk_file in enumerate(spk_files): - # select random offset - length = torchaudio.info(spk_file).num_frames + length = audio_io.info(spk_file).num_frames start = 0 stop = length if length > minlen: # take a random window start = np.random.randint(0, length - minlen) stop = start + minlen - tmp, fs_read = torchaudio.load( - spk_file, frame_offset=start, num_frames=stop - start, + tmp, fs_read = audio_io.load( + spk_file, frame_offset=start, num_frames=stop - start ) # peak = float(Path(spk_file).stem.split("_peak_")[-1]) diff --git a/recipes/WSJ0Mix/separation/hparams/convtasnet.yaml b/recipes/WSJ0Mix/separation/hparams/convtasnet.yaml index d2da520084..b64106bcf1 100644 --- a/recipes/WSJ0Mix/separation/hparams/convtasnet.yaml +++ b/recipes/WSJ0Mix/separation/hparams/convtasnet.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -30,14 +30,13 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -59,18 +58,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -116,7 +135,7 @@ MaskNet: !new:speechbrain.lobes.models.conv_tasnet.MaskNet R: 4 C: !ref norm_type: 'gLN' - causal: False + causal: True mask_nonlinear: 'relu' Decoder: !new:speechbrain.lobes.models.dual_path.Decoder diff --git a/recipes/WSJ0Mix/separation/hparams/dprnn.yaml b/recipes/WSJ0Mix/separation/hparams/dprnn.yaml index 9817afe3b8..f7a011915c 100644 --- a/recipes/WSJ0Mix/separation/hparams/dprnn.yaml +++ b/recipes/WSJ0Mix/separation/hparams/dprnn.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -30,14 +30,13 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix noprogressbar: False save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -59,18 +58,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WSJ0Mix/separation/hparams/resepformer.yaml b/recipes/WSJ0Mix/separation/hparams/resepformer.yaml index 85497bc957..15128dba67 100644 --- a/recipes/WSJ0Mix/separation/hparams/resepformer.yaml +++ b/recipes/WSJ0Mix/separation/hparams/resepformer.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -32,13 +32,12 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -60,18 +59,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WSJ0Mix/separation/hparams/sepformer-conformerintra.yaml b/recipes/WSJ0Mix/separation/hparams/sepformer-conformerintra.yaml index 4061196858..439eedb893 100644 --- a/recipes/WSJ0Mix/separation/hparams/sepformer-conformerintra.yaml +++ b/recipes/WSJ0Mix/separation/hparams/sepformer-conformerintra.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -32,13 +32,12 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -60,18 +59,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True @@ -83,6 +102,10 @@ out_channels: 256 kernel_size: 16 kernel_stride: 8 +# Other architectural parameters +num_layers_intra: 8 +num_layers_inter: 8 + # Dataloader options # Set num_workers: 0 on MacOS due to behavior of the multiprocessing library dataloader_opts: @@ -98,7 +121,7 @@ Encoder: !new:speechbrain.lobes.models.dual_path.Encoder #longformer_intra: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock #intra: !new:speechbrain.lobes.models.efficient_separators.LongformerBlock intra: !new:speechbrain.lobes.models.dual_path.SBConformerEncoderBlock - num_layers: 8 + num_layers: !ref d_model: !ref nhead: 8 d_ffn: 1024 @@ -107,7 +130,7 @@ intra: !new:speechbrain.lobes.models.dual_path.SBConformerEncoderBlock inter: !new:speechbrain.lobes.models.dual_path.SBTransformerBlock - num_layers: 8 + num_layers: !ref d_model: !ref nhead: 8 d_ffn: 1024 diff --git a/recipes/WSJ0Mix/separation/hparams/sepformer-customdataset.yaml b/recipes/WSJ0Mix/separation/hparams/sepformer-customdataset.yaml index 306915faef..2901b2d199 100644 --- a/recipes/WSJ0Mix/separation/hparams/sepformer-customdataset.yaml +++ b/recipes/WSJ0Mix/separation/hparams/sepformer-customdataset.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -33,14 +33,13 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 num_spks: 2 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk sample_rate: 16000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -62,18 +61,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WSJ0Mix/separation/hparams/sepformer.yaml b/recipes/WSJ0Mix/separation/hparams/sepformer.yaml index 22c3f65ade..eac4c735e9 100644 --- a/recipes/WSJ0Mix/separation/hparams/sepformer.yaml +++ b/recipes/WSJ0Mix/separation/hparams/sepformer.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -33,15 +33,14 @@ skip_prep: False # Experiment params -auto_mix_prec: True # Set it to True for mixed precision -test_only: False +precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 2 # set to 3 for wsj0-3mix noprogressbar: False save_audio: True # Save estimated sources on disk n_audio_to_save: 20 sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -63,18 +62,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WSJ0Mix/separation/hparams/skim.yaml b/recipes/WSJ0Mix/separation/hparams/skim.yaml index b8e2caefa8..f85e20396d 100644 --- a/recipes/WSJ0Mix/separation/hparams/skim.yaml +++ b/recipes/WSJ0Mix/separation/hparams/skim.yaml @@ -7,7 +7,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made # seed: 1234 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] # Data params @@ -32,13 +32,12 @@ skip_prep: False # Experiment params -auto_mix_prec: False # Set it to True for mixed precision -test_only: False +precision: fp32 # bf16, fp16 or fp32 # Set it to True for mixed precision num_spks: 2 # set to 3 for wsj0-3mix save_audio: False # Save estimated sources on disk sample_rate: 8000 -# Training parameters +####################### Training Parameters #################################### N_epochs: 200 batch_size: 1 lr: 0.00015 @@ -60,18 +59,38 @@ use_rand_shift: False min_shift: -8000 max_shift: 8000 -speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 1.0 - drop_freq_prob: 0.0 - drop_chunk_prob: 0.0 - sample_rate: !ref - speeds: [95, 100, 105] - -wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - perturb_prob: 0.0 - drop_freq_prob: 1.0 - drop_chunk_prob: 1.0 - sample_rate: !ref +# Speed perturbation +speed_changes: [95, 100, 105] # List of speed changes for time-stretching + +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq_low: 0 # Min frequency band dropout probability +drop_freq_high: 1 # Max frequency band dropout probability +drop_freq_count_low: 1 # Min number of frequency bands to drop +drop_freq_count_high: 3 # Max number of frequency bands to drop +drop_freq_width: 0.05 # Width of frequency bands to drop + +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: !ref + drop_freq_high: !ref + drop_freq_count_low: !ref + drop_freq_count_high: !ref + drop_freq_width: !ref + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk_count_low: 1 # Min number of audio chunks to drop +drop_chunk_count_high: 5 # Max number of audio chunks to drop +drop_chunk_length_low: 1000 # Min length of audio chunks to drop +drop_chunk_length_high: 2000 # Max length of audio chunks to drop + +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: !ref + drop_length_high: !ref + drop_count_low: !ref + drop_count_high: !ref # loss thresholding -- this thresholds the training loss threshold_byloss: True diff --git a/recipes/WSJ0Mix/separation/prepare_data.py b/recipes/WSJ0Mix/separation/prepare_data.py new file mode 120000 index 0000000000..1a7125c969 --- /dev/null +++ b/recipes/WSJ0Mix/separation/prepare_data.py @@ -0,0 +1 @@ +../prepare_data.py \ No newline at end of file diff --git a/recipes/WSJ0Mix/separation/preprocess_dynamic_mixing.py b/recipes/WSJ0Mix/separation/preprocess_dynamic_mixing.py new file mode 120000 index 0000000000..4521bd013b --- /dev/null +++ b/recipes/WSJ0Mix/separation/preprocess_dynamic_mixing.py @@ -0,0 +1 @@ +../meta/preprocess_dynamic_mixing.py \ No newline at end of file diff --git a/recipes/WSJ0Mix/separation/train.py b/recipes/WSJ0Mix/separation/train.py index 4929382f6e..6a5ae06986 100755 --- a/recipes/WSJ0Mix/separation/train.py +++ b/recipes/WSJ0Mix/separation/train.py @@ -1,5 +1,5 @@ #!/usr/bin/env/python3 -"""Recipe for training a neural speech separation system on wsjmix the +"""Recipe for training a neural speech separation system on the wsjmix dataset. The system employs an encoder, a decoder, and a masking network. To run this recipe, do the following: @@ -21,20 +21,21 @@ * Jianyuan Zhong 2020 """ +import csv import os import sys + +import numpy as np import torch import torch.nn.functional as F -import torchaudio +from hyperpyyaml import load_hyperpyyaml +from tqdm import tqdm + import speechbrain as sb import speechbrain.nnet.schedulers as schedulers +from speechbrain.dataio import audio_io from speechbrain.utils.distributed import run_on_main -from torch.cuda.amp import autocast -from hyperpyyaml import load_hyperpyyaml -import numpy as np -from tqdm import tqdm -import csv -import logging +from speechbrain.utils.logger import get_logger # Define training procedure @@ -55,13 +56,14 @@ def compute_forward(self, mix, targets, stage, noise=None): # Add speech distortions if stage == sb.Stage.TRAIN: with torch.no_grad(): - if self.hparams.use_speedperturb or self.hparams.use_rand_shift: + if self.hparams.use_speedperturb: mix, targets = self.add_speed_perturb(targets, mix_lens) mix = targets.sum(-1) if self.hparams.use_wavedrop: - mix = self.hparams.wavedrop(mix, mix_lens) + mix = self.hparams.drop_chunk(mix, mix_lens) + mix = self.hparams.drop_freq(mix) if self.hparams.limit_training_signal_len: mix, targets = self.cut_signals(mix, targets) @@ -97,6 +99,7 @@ def compute_objectives(self, predictions, targets): def fit_batch(self, batch): """Trains one batch""" + # Unpacking batch list mixture = batch.mix_sig targets = [batch.s1_sig, batch.s2_sig] @@ -104,72 +107,37 @@ def fit_batch(self, batch): if self.hparams.num_spks == 3: targets.append(batch.s3_sig) - if self.auto_mix_prec: - with autocast(): - predictions, targets = self.compute_forward( - mixture, targets, sb.Stage.TRAIN - ) - loss = self.compute_objectives(predictions, targets) - - # hard threshold the easy dataitems - if self.hparams.threshold_byloss: - th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() - else: - loss = loss.mean() - - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - self.scaler.scale(loss).backward() - if self.hparams.clip_grad_norm >= 0: - self.scaler.unscale_(self.optimizer) - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm, - ) - self.scaler.step(self.optimizer) - self.scaler.update() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) - ) - loss.data = torch.tensor(0).to(self.device) - else: + with self.training_ctx: predictions, targets = self.compute_forward( mixture, targets, sb.Stage.TRAIN ) loss = self.compute_objectives(predictions, targets) + # hard threshold the easy dataitems if self.hparams.threshold_byloss: th = self.hparams.threshold - loss_to_keep = loss[loss > th] - if loss_to_keep.nelement() > 0: - loss = loss_to_keep.mean() + loss = loss[loss > th] + if loss.nelement() > 0: + loss = loss.mean() else: loss = loss.mean() - if ( - loss < self.hparams.loss_upper_lim and loss.nelement() > 0 - ): # the fix for computational problems - loss.backward() - if self.hparams.clip_grad_norm >= 0: - torch.nn.utils.clip_grad_norm_( - self.modules.parameters(), self.hparams.clip_grad_norm - ) - self.optimizer.step() - else: - self.nonfinite_count += 1 - logger.info( - "infinite loss or empty loss! it happened {} times so far - skipping this batch".format( - self.nonfinite_count - ) + if loss.nelement() > 0 and loss < self.hparams.loss_upper_lim: + self.scaler.scale(loss).backward() + if self.hparams.clip_grad_norm >= 0: + self.scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_( + self.modules.parameters(), + self.hparams.clip_grad_norm, ) - loss.data = torch.tensor(0).to(self.device) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + self.nonfinite_count += 1 + logger.info( + f"infinite loss or empty loss! it happened {self.nonfinite_count} times so far - skipping this batch" + ) + loss.data = torch.tensor(0.0).to(self.device) self.optimizer.zero_grad() return loss.detach().cpu() @@ -206,7 +174,6 @@ def on_stage_end(self, stage, stage_loss, epoch): # Perform end-of-iteration things, like annealing, logging, etc. if stage == sb.Stage.VALID: - # Learning rate annealing if isinstance( self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau @@ -225,7 +192,7 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"], + meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"] ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( @@ -239,15 +206,13 @@ def add_speed_perturb(self, targets, targ_lens): min_len = -1 recombine = False - if self.hparams.use_speedperturb: + if self.hparams.use_speedperturb or self.hparams.use_rand_shift: # Performing speed change (independently on each source) new_targets = [] recombine = True for i in range(targets.shape[-1]): - new_target = self.hparams.speedperturb( - targets[:, :, i], targ_lens - ) + new_target = self.hparams.speed_perturb(targets[:, :, i]) new_targets.append(new_target) if i == 0: min_len = new_target.shape[-1] @@ -328,14 +293,13 @@ def save_results(self, test_data): test_data, **self.hparams.dataloader_opts ) - with open(save_file, "w") as results_csv: + with open(save_file, "w", newline="", encoding="utf-8") as results_csv: writer = csv.DictWriter(results_csv, fieldnames=csv_columns) writer.writeheader() # Loop over all test sentence with tqdm(test_loader, dynamic_ncols=True) as t: for i, batch in enumerate(t): - # Apply Separation mixture, mix_len = batch.mix_sig snt_id = batch.id @@ -399,28 +363,27 @@ def save_results(self, test_data): } writer.writerow(row) - logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean())) - logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean())) - logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean())) - logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean())) + logger.info(f"Mean SISNR is {np.array(all_sisnrs).mean()}") + logger.info(f"Mean SISNRi is {np.array(all_sisnrs_i).mean()}") + logger.info(f"Mean SDR is {np.array(all_sdrs).mean()}") + logger.info(f"Mean SDRi is {np.array(all_sdrs_i).mean()}") def save_audio(self, snt_id, mixture, targets, predictions): "saves the test audio (mixture, targets, and estimated sources) on disk" - # Create outout folder + # Create output folder save_path = os.path.join(self.hparams.save_folder, "audio_results") if not os.path.exists(save_path): os.mkdir(save_path) for ns in range(self.hparams.num_spks): - # Estimated source signal = predictions[0, :, ns] signal = signal / signal.abs().max() save_file = os.path.join( - save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1) + save_path, f"item{snt_id}_source{ns + 1}hat.wav" ) - torchaudio.save( + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) @@ -428,17 +391,17 @@ def save_audio(self, snt_id, mixture, targets, predictions): signal = targets[0, :, ns] signal = signal / signal.abs().max() save_file = os.path.join( - save_path, "item{}_source{}.wav".format(snt_id, ns + 1) + save_path, f"item{snt_id}_source{ns + 1}.wav" ) - torchaudio.save( + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) # Mixture signal = mixture[0][0, :] signal = signal / signal.abs().max() - save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id)) - torchaudio.save( + save_file = os.path.join(save_path, f"item{snt_id}_mix.wav") + audio_io.save( save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate ) @@ -509,17 +472,16 @@ def audio_pipeline_s3(s3_wav): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) # Initialize ddp (useful only for multi-GPU DDP training) sb.utils.distributed.ddp_init_group(run_opts) # Logger info - logger = logging.getLogger(__name__) + logger = get_logger(__name__) # Create experiment directory sb.create_experiment_directory( @@ -528,17 +490,20 @@ def audio_pipeline_s3(s3_wav): overrides=overrides, ) + # Update precision to bf16 if the device is CPU and precision is fp16 + if run_opts.get("device") == "cpu" and hparams.get("precision") == "fp16": + hparams["precision"] = "bf16" + # Check if wsj0_tr is set with dynamic mixing if hparams["dynamic_mixing"] and not os.path.exists( hparams["base_folder_dm"] ): - print( + raise ValueError( "Please, specify a valid base_folder_dm folder when using dynamic mixing" ) - sys.exit(1) # Data preparation - from recipes.WSJ0Mix.prepare_data import prepare_wsjmix # noqa + from prepare_data import prepare_wsjmix # noqa run_on_main( prepare_wsjmix, @@ -561,9 +526,7 @@ def audio_pipeline_s3(s3_wav): if not os.path.exists( os.path.normpath(hparams["base_folder_dm"]) + "_processed" ): - from recipes.WSJ0Mix.meta.preprocess_dynamic_mixing import ( - resample_folder, - ) + from preprocess_dynamic_mixing import resample_folder print("Resampling the base folder") run_on_main( @@ -590,7 +553,7 @@ def audio_pipeline_s3(s3_wav): os.path.normpath(hparams["base_folder_dm"]) + "_processed" ) - # Colleting the hparams for dynamic batching + # Collecting the hparams for dynamic batching dm_hparams = { "train_data": hparams["train_data"], "data_folder": hparams["data_folder"], @@ -624,15 +587,14 @@ def audio_pipeline_s3(s3_wav): for module in separator.modules.values(): separator.reset_layer_recursively(module) - if not hparams["test_only"]: - # Training - separator.fit( - separator.hparams.epoch_counter, - train_data, - valid_data, - train_loader_kwargs=hparams["dataloader_opts"], - valid_loader_kwargs=hparams["dataloader_opts"], - ) + # Training + separator.fit( + separator.hparams.epoch_counter, + train_data, + valid_data, + train_loader_kwargs=hparams["dataloader_opts"], + valid_loader_kwargs=hparams["dataloader_opts"], + ) # Eval separator.evaluate(test_data, min_key="si-snr") diff --git a/recipes/ZaionEmotionDataset/README.md b/recipes/ZaionEmotionDataset/README.md new file mode 100644 index 0000000000..2eb8eb7ce4 --- /dev/null +++ b/recipes/ZaionEmotionDataset/README.md @@ -0,0 +1,117 @@ +# Speech Emotion Diarization (SED) + +[Speech Emotion Diarization](https://arxiv.org/pdf/2306.12991.pdf) is a technique that focuses on predicting emotions and their corresponding time boundaries within a speech recording. The model, described in the research paper titled "Speech Emotion Diarization" ([available here](https://arxiv.org/pdf/2306.12991.pdf)), has been trained using audio samples that include neutral and a non-neutral emotional event. The model's output takes the form of a dictionary comprising emotion components (*neutral*, *happy*, *angry*, and *sad*) along with their respective start and end boundaries, as exemplified below: + +```python +{ + 'example.wav': [ + {'start': 0.0, 'end': 1.94, 'emotion': 'n'}, # 'n' denotes neutral + {'start': 1.94, 'end': 4.48, 'emotion': 'h'} # 'h' denotes happy + ] +} +``` + +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r emotion_diarization/extra_requirements.txt +``` + +## Datasets + +### Test Set +The test set is **Zaion Emotion Dataset (ZED)**, which can be downloaded via this dropbox [link](https://www.dropbox.com/scl/fi/2s3ro8tmgt1lir77z3hj5/ZED.zip?rlkey=qkizx7t3ozo02xs7k1tlexb1e&st=9l466c1c&dl=0). + +### Training Set +1. [RAVDESS](https://zenodo.org/record/1188976) + + A fast download can be done by `wget https://dl.dropboxusercontent.com/s/aancfsluvcyrxou/RAVDESS.zip` + + + +2. [ESD](https://github.com/HLTSingapore/Emotional-Speech-Data) + + A fast download can be done by `wget https://dl.dropboxusercontent.com/s/e05ul8myqb5hkbj/ESD.zip` + + +3. [IEMOCAP](https://sail.usc.edu/iemocap/iemocap_release.htm) + + +4. [JL-CORPUS](https://www.kaggle.com/datasets/tli725/jl-corpus?resource=download) + + A fast download can be done by `wget https://dl.dropboxusercontent.com/s/4t3vlq5cv5e8wv6/JL_corpus.zip` + +5. [EmoV-DB](https://openslr.org/115/) + + A fast download can be done by `wget https://dl.dropboxusercontent.com/s/drvn10ph8q6aw8t/EmoV-DB.zip`, where only `Amused, Neutral, Angry` emotions are kept. + + +## Run the code + +First download the train/test datasets and unzip them. + +To run the code, do: + +`cd emotion_diarization/` + +`python train.py hparams/train.yaml --zed_folder /path/to/ZED --emovdb_folder /path/to/EmoV-DB --esd_folder /path/to/ESD --iemocap_folder /path/to/IEMOCAP --jlcorpus_folder /path/to/JL_corpus --ravdess_folder /path/to/RAVDESS`. + +The frame-wise classification result for each utterance can be found in `results/eder.txt`. + + +## Results + +The EDER (Emotion Diarization Error Rate) reported here was averaged on 5 different seeds, results of other models (wav2vec2.0, HuBERT) can be found in the paper. You can find our training results (model, logs, etc) [here](https://www.dropbox.com/sh/woudm1v31a7vyp5/AADAMxpQOXaxf8E_1hX202GJa?dl=0). + +| model | EDER | +|:-------------:|:---------------------------:| +| WavLM-large | 30.2 ± 1.60 | + +It takes about 40 mins/epoch with 1xRTX8000(40G), reduce the batch size if OOM. + +## Inference + +The pretrained models and a easy-inference interface can be found on [HuggingFace](https://huggingface.co/speechbrain/emotion-diarization-wavlm-large). + + + +# **About Speech Emotion Diarization/Zaion Emotion Dataset** + +```bibtex +@article{wang2023speech, + title={Speech Emotion Diarization: Which Emotion Appears When?}, + author={Wang, Yingzhi and Ravanelli, Mirco and Nfissi, Alaa and Yacoubi, Alya}, + journal={arXiv preprint arXiv:2306.12991}, + year={2023} +} +``` + +# **About SpeechBrain** +- Website: https://speechbrain.github.io/ +- Code: https://github.com/speechbrain/speechbrain/ + +# **Citing SpeechBrain** +Please, cite SpeechBrain if you use it for your research or business. + +```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} +@misc{speechbrain, + title={{SpeechBrain}: A General-Purpose Speech Toolkit}, + author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, + year={2021}, + eprint={2106.04624}, + archivePrefix={arXiv}, + primaryClass={eess.AS}, + note={arXiv:2106.04624} +} +``` diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_EMOVDB.py b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_EMOVDB.py new file mode 100644 index 0000000000..2ce09038a0 --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_EMOVDB.py @@ -0,0 +1,332 @@ +""" +Data preparation for Emov-DB dataset. + +Dataset link: https://openslr.org/115/ + +extra dependencies: pathlib, pydub, webrtcvad + +Author +------ +Yingzhi Wang 2023 +""" + +import json +import os +import random +from pathlib import Path + +import numpy as np +from datasets.vad import vad_for_folder +from pydub import AudioSegment + +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +repos = [ + "bea_Amused", + "bea_Angry", + "bea_Neutral", + "jenie_Amused", + "jenie_Angry", + "jenie_Neutral", + "josh_Amused", + "josh_Neutral", + "sam_Amused", + "sam_Angry", + "sam_Neutral", +] +combinations = ["neu_emo", "emo_neu", "neu_emo_neu", "emo_emo"] +probabilities = np.array([0.25, 0.25, 0.25, 0.25]) + + +def prepare_emovdb(data_folder, save_json, seed=12): + """ + Prepares the json files for the EmoV-DB dataset. + + Arguments + --------- + data_folder : str + Path to the folder where the original EmoV-DB dataset is stored. + save_json : str + Path where the data specification file will be saved. + seed : int + Seed for reproducibility + + Returns + ------- + data_json : str + """ + random.seed(seed) + + # Check if this phase is already done (if so, skip it) + if skip(save_json): + logger.info("Preparation completed in previous run, skipping.") + return + + # wavs of EmoV-DB are saved in double instead of int16 + logger.info("Converting format from double to int16 ...") + all_paths = Path(data_folder).rglob("*.wav") + # paths = copy.deepcopy(all_paths) + + for repo in repos: + if not os.path.exists(os.path.join(data_folder, "converted", repo)): + os.makedirs(os.path.join(data_folder, "converted", repo)) + + for path in all_paths: + convert_path = os.path.join( + data_folder, "converted", str(path).split("EmoV-DB/")[-1] + ) + if "converted" not in str(path): + os.system( + f"sox -v 0.99 {str(path)} -b 16 -e signed-integer {convert_path}" + ) + logger.info("Converting Finished") + + logger.info("Applying VAD and resampling ...") + for repo in repos: + source_folder = os.path.join(data_folder, "converted", repo) + destin_folder = os.path.join( + data_folder, "processed", repo.split("_")[0] + ) + if not os.path.exists(destin_folder): + os.makedirs(destin_folder) + + # webrtcvad does not deal with 44100Hz, so resampling before VAD + resampling_for_folder(source_folder, destin_folder) + vad_for_folder(destin_folder, destin_folder) + + logger.info("vad and resampling finished") + logger.info("Start EmoV-DB concatenation ...") + data_json = concat_wavs(data_folder, save_json) + logger.info("EmoV-DB concatenation finished ...") + return data_json + + +def resampling_for_folder(in_folder, out_folder): + """ + Resamples all the audios from a certain folder to 16kHz + """ + files = os.listdir(in_folder) + for file_name in files: + try: + sound = AudioSegment.from_file( + os.path.join(in_folder, file_name), format="wav" + ) + sound = sound.set_frame_rate(16000) + sound.export(os.path.join(out_folder, file_name), format="wav") + except Exception as e: + logger.info(e) + + +def get_emotion(wav_path): + """ + Get the emotion of an audio from its filepath + """ + if "anger" in wav_path: + return "angry" + elif "amused" in wav_path: + return "happy" + + +def concat_wavs(data_folder, save_json): + """ + Concatenate audios from the same speaker + The concatenation is produced with a probability for each modality + The amplitude of the sub-sentences are made equal during concatenation + """ + repos_processed = [ + "bea", + "jenie", + "josh", + "sam", + ] + + data_json = {} + for repo in repos_processed: + emotion_wavs = [] + neutral_wavs = [] + + paths = Path(os.path.join(data_folder, "processed", repo)) + angry_files = paths.rglob("anger*.wav") + happy_files = paths.rglob("amused*.wav") + neutral_files = paths.rglob("neutral*.wav") + + for file in angry_files: + emotion_wavs.append(str(file)) + for file in happy_files: + emotion_wavs.append(str(file)) + for file in neutral_files: + neutral_wavs.append(str(file)) + + random.shuffle(emotion_wavs) + random.shuffle(neutral_wavs) + neutral_wavs = neutral_wavs * 10 + + combine_path = os.path.join(data_folder, "combined", repo) + if not os.path.exists(combine_path): + os.makedirs(combine_path) + + while len(emotion_wavs) > 0: + combination = np.random.choice( + combinations, p=probabilities.ravel() + ) + if combination == "neu_emo": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input.dBFS - emotion_input.dBFS + combined_input = neutral_input + emotion_input + + out_name = os.path.join( + combine_path, + neutral_sample.split("/")[-1][:-4] + + "_" + + emo_sample.split("/")[-1], + ) + + combined_input.export(out_name, format="wav") + + id = repo + "_" + out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": len(neutral_input) / 1000, + "end": len(combined_input) / 1000, + } + ], + } + + neutral_wavs = neutral_wavs[1:] + emotion_wavs = emotion_wavs[1:] + + elif combination == "emo_neu": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + neutral_input += emotion_input.dBFS - neutral_input.dBFS + combined_input = emotion_input + neutral_input + + out_name = os.path.join( + combine_path, + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = repo + "_" + out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": 0, + "end": len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[1:] + + elif combination == "neu_emo_neu": + neutral_sample_1 = neutral_wavs[0] + neutral_sample_2 = neutral_wavs[1] + emo_sample = emotion_wavs[0] + + neutral_input_1 = AudioSegment.from_wav(neutral_sample_1) + neutral_input_2 = AudioSegment.from_wav(neutral_sample_2) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input_1.dBFS - emotion_input.dBFS + neutral_input_2 += neutral_input_1.dBFS - neutral_input_2.dBFS + combined_input = ( + neutral_input_1 + emotion_input + neutral_input_2 + ) + + out_name = os.path.join( + combine_path, + neutral_sample_1.split("/")[-1][:-4] + + "_" + + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample_2.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = repo + "_" + out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": len(neutral_input_1) / 1000, + "end": len(neutral_input_1) / 1000 + + len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[2:] + + else: + emo_sample_1 = emotion_wavs[0] + + emotion_input_1 = AudioSegment.from_wav(emo_sample_1) + + out_name = os.path.join( + combine_path, emo_sample_1.split("/")[-1] + ) + emotion_input_1.export(out_name, format="wav") + + id = repo + "_" + out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(emotion_input_1) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample_1), + "start": 0, + "end": len(emotion_input_1) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + + with open(save_json, "w", encoding="utf-8") as outfile: + json.dump(data_json, outfile) + return data_json + + +def skip(save_json): + """ + Detects if the data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + save_json : str + Path to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + if not os.path.isfile(save_json): + return False + return True diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_ESD.py b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_ESD.py new file mode 100644 index 0000000000..95efd7ebdc --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_ESD.py @@ -0,0 +1,327 @@ +""" +Data preparation for Emotion Speech Dataset (ESD). + +Dataset link: https://github.com/HLTSingapore/Emotional-Speech-Data + +extra dependencies: pydub, webrtcvad + +Author +------ +Yingzhi Wang 2023 +""" + +import json +import os +import random + +import numpy as np +from datasets.vad import vad_for_folder +from pydub import AudioSegment + +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +# we choose here only english utterances +repos = [ + "0011", + "0012", + "0013", + "0014", + "0015", + "0016", + "0017", + "0018", + "0019", + "0020", +] +sub_folders = ["Angry", "Happy", "Neutral", "Sad"] +sub_sub_folders = ["train", "evaluation", "test"] +combinations = ["neu_emo", "emo_neu", "neu_emo_neu", "emo_emo"] +probabilities = np.array([0.25, 0.25, 0.25, 0.25]) + + +def prepare_esd(data_folder, save_json, seed=12): + """ + Prepares the json files for the ESD dataset. + + Arguments + --------- + data_folder : str + Path to the folder where the original ESD dataset is stored. + save_json : str + Path where the data specification file will be saved. + seed : int + Seed for reproducibility + + Returns + ------- + data_json : str + """ + random.seed(seed) + + # Check if this phase is already done (if so, skip it) + if skip(save_json): + logger.info("Preparation completed in previous run, skipping.") + return + + logger.info("Applying VAD and resampling ...") + for repo in repos: + for sub_folder in sub_folders: + for sub_sub_folder in sub_sub_folders: + source_folder = os.path.join( + data_folder, repo, sub_folder, sub_sub_folder + ) + destin_folder = os.path.join( + data_folder, "processed", repo, sub_folder + ) + if not os.path.exists(destin_folder): + os.makedirs(destin_folder) + vad_for_folder(source_folder, destin_folder) + logger.info("vad and resampling finished") + logger.info("Start ESD concatenation ...") + data_json = concat_wavs(data_folder, save_json) + logger.info("ESD concatenation finished ...") + return data_json + + +def resampling_for_folder(in_folder, out_folder): + """ + Resamples all the audios from a certain folder to 16kHz + """ + files = os.listdir(in_folder) + for file_name in files: + try: + sound = AudioSegment.from_file( + os.path.join(in_folder, file_name), format="wav" + ) + sound = sound.set_frame_rate(16000) + sound.export(os.path.join(out_folder, file_name), format="wav") + except Exception as e: + logger.info(e) + + +def get_emotion(wav_path): + """ + Get the emotion of an audio from its filepath + """ + num = wav_path.split("_")[-1][:-4] + num = int(num) - 1 + if num // 350 == 1: + return "angry" + elif num // 350 == 2: + return "happy" + elif num // 350 == 3: + return "sad" + + +def concat_wavs(data_folder, save_json): + """ + Concatenate audios from the same speaker + The concatenation is produced with a probability for each modality + The amplitude of the sub-sentences are made equal during concatenation + """ + data_json = {} + for repo in repos: + emotion_wavs = [] + neutral_wavs = [] + + angry_files = os.listdir( + os.path.join(data_folder, "processed", repo, "Angry") + ) + happy_files = os.listdir( + os.path.join(data_folder, "processed", repo, "Happy") + ) + sad_files = os.listdir( + os.path.join(data_folder, "processed", repo, "Sad") + ) + neutral_files = os.listdir( + os.path.join(data_folder, "processed", repo, "Neutral") + ) + + for file in angry_files: + emotion_wavs.append( + os.path.join(data_folder, "processed", repo, "Angry", file) + ) + for file in happy_files: + emotion_wavs.append( + os.path.join(data_folder, "processed", repo, "Happy", file) + ) + for file in sad_files: + emotion_wavs.append( + os.path.join(data_folder, "processed", repo, "Sad", file) + ) + for file in neutral_files: + neutral_wavs.append( + os.path.join(data_folder, "processed", repo, "Neutral", file) + ) + + random.shuffle(emotion_wavs) + random.shuffle(neutral_wavs) + neutral_wavs = neutral_wavs * 10 + + combine_path = os.path.join(data_folder, "combined", repo) + if not os.path.exists(combine_path): + os.makedirs(combine_path) + + while len(emotion_wavs) > 0: + combination = np.random.choice( + combinations, p=probabilities.ravel() + ) + if combination == "neu_emo": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input.dBFS - emotion_input.dBFS + combined_input = neutral_input + emotion_input + + out_name = os.path.join( + combine_path, + neutral_sample.split("/")[-1][:-4] + + "_" + + emo_sample.split("_")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": len(neutral_input) / 1000, + "end": len(combined_input) / 1000, + } + ], + } + + neutral_wavs = neutral_wavs[1:] + emotion_wavs = emotion_wavs[1:] + + elif combination == "emo_neu": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + neutral_input += emotion_input.dBFS - neutral_input.dBFS + combined_input = emotion_input + neutral_input + + out_name = os.path.join( + combine_path, + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample.split("_")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": 0, + "end": len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[1:] + + elif combination == "neu_emo_neu": + neutral_sample_1 = neutral_wavs[0] + neutral_sample_2 = neutral_wavs[1] + emo_sample = emotion_wavs[0] + + neutral_input_1 = AudioSegment.from_wav(neutral_sample_1) + neutral_input_2 = AudioSegment.from_wav(neutral_sample_2) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input_1.dBFS - emotion_input.dBFS + neutral_input_2 += neutral_input_1.dBFS - neutral_input_2.dBFS + combined_input = ( + neutral_input_1 + emotion_input + neutral_input_2 + ) + + out_name = os.path.join( + combine_path, + neutral_sample_1.split("/")[-1][:-4] + + emo_sample.split("/")[-1][4:-4] + + "_" + + neutral_sample_2.split("_")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": len(neutral_input_1) / 1000, + "end": len(neutral_input_1) / 1000 + + len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[2:] + + else: + emo_sample_1 = emotion_wavs[0] + + emotion_input_1 = AudioSegment.from_wav(emo_sample_1) + + out_name = os.path.join( + combine_path, emo_sample_1.split("/")[-1] + ) + emotion_input_1.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(emotion_input_1) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample_1), + "start": 0, + "end": len(emotion_input_1) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + + with open(save_json, "w", encoding="utf-8") as outfile: + json.dump(data_json, outfile) + return data_json + + +def skip(save_json): + """ + Detects if the data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + save_json : str + Path to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + if not os.path.isfile(save_json): + return False + return True diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_IEMOCAP.py b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_IEMOCAP.py new file mode 100644 index 0000000000..ffff26721e --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_IEMOCAP.py @@ -0,0 +1,385 @@ +""" +Data preparation for IEMOCAP. + +Dataset link: https://sail.usc.edu/iemocap/iemocap_release.htm + +extra dependencies: pathlib, pydub, webrtcvad + +Author +------ +Yingzhi Wang 2023 +""" + +import json +import os +import random +import re +from pathlib import Path + +import numpy as np +from datasets.vad import write_audio +from pydub import AudioSegment + +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +combinations = ["neu_emo", "emo_neu", "neu_emo_neu", "emo_emo"] +probabilities = np.array([0.25, 0.25, 0.25, 0.25]) + + +def prepare_iemocap(data_folder, save_json, seed=12): + """ + Prepares the json files for the IEMOCAP dataset. + + Arguments + --------- + data_folder : str + Path to the folder where the original IEMOCAP dataset is stored. + save_json : str + Path where the data specification file will be saved. + seed : int + Seed for reproducibility + + Returns + ------- + data_json : str + """ + random.seed(seed) + + # Check if this phase is already done (if so, skip it) + if skip(save_json): + logger.info("Preparation completed in previous run, skipping.") + return + + logger.info("Applying VAD ...") + dict = {} + emotion_wavs = [] + neutral_wavs = [] + annotations = Path(data_folder).rglob("*/dialog/EmoEvaluation/*.txt") + for i in annotations: + lines = load_utterInfo(i) + for line in lines: + id = line[2] + if line[3] == "hap": + dict[id] = "happy" + append_path_after_vad(data_folder, id, emotion_wavs) + + if line[3] == "exc": + dict[id] = "happy" + append_path_after_vad(data_folder, id, emotion_wavs) + + if line[3] == "sad": + dict[id] = "sad" + append_path_after_vad(data_folder, id, emotion_wavs) + + if line[3] == "ang": + dict[id] = "angry" + append_path_after_vad(data_folder, id, emotion_wavs) + + if line[3] == "neu": + dict[id] = "neutral" + append_path_after_vad(data_folder, id, neutral_wavs) + + logger.info("VAD finished") + logger.info("Start IEMOCAP concatenation ...") + data_json = concat_wavs( + data_folder, save_json, emotion_wavs, neutral_wavs, dict + ) + logger.info("IEMOCAP concatenation finished ...") + return data_json + + +def append_path_after_vad(data_folder, id, list): + """do vad and append the new path into the list + + Args: + data_folder (str): the path to IEMOCAP + id (str): id d'utterance + list (list): which list to be put into + + Returns: + list: new list after adding an element + """ + file = get_path(data_folder, id) + destin_folder = os.path.join(data_folder, "processed", id[:5] + id[-4]) + if not os.path.exists(destin_folder): + os.makedirs(destin_folder) + if not os.path.exists(os.path.join(destin_folder, f"{id}.wav")): + write_audio(file, os.path.join(destin_folder, f"{id}.wav")) + if os.path.exists(os.path.join(destin_folder, f"{id}.wav")): + list.append(os.path.join(destin_folder, f"{id}.wav")) + + +def load_utterInfo(inputFile): + """ + Load utterInfo from original IEMOCAP database + """ + # this regex allow to create a list with: + # [START_TIME - END_TIME] TURN_NAME EMOTION [V, A, D] + # [V, A, D] means [Valence, Arousal, Dominance] + pattern = re.compile( + r"[\[]*[0-9]*[.][0-9]*[ -]*[0-9]*[.][0-9]*[\]][\t][a-z0-9_]*[\t][a-z]{3}[\t][\[][0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[\]]", + re.IGNORECASE, + ) # noqa + with open(inputFile, encoding="utf-8") as myfile: + data = myfile.read().replace("\n", " ") + result = pattern.findall(data) + out = [] + for i in result: + a = i.replace("[", "") + b = a.replace(" - ", "\t") + c = b.replace("]", "") + x = c.replace(", ", "\t") + out.append(x.split("\t")) + return out + + +def resampling_for_folder(in_folder, out_folder): + """ + Resamples all the audios from a certain folder to 16kHz + """ + files = os.listdir(in_folder) + for file_name in files: + try: + sound = AudioSegment.from_file( + os.path.join(in_folder, file_name), format="wav" + ) + sound = sound.set_frame_rate(16000) + sound.export(os.path.join(out_folder, file_name), format="wav") + except Exception as e: + logger.info(e) + + +def get_path(datafolder, id): + """ + Get the filepath with ID + """ + if "Ses01" in id: + return os.path.join( + datafolder, "Session1/sentences/wav", id[:-5], id + ".wav" + ) + if "Ses02" in id: + return os.path.join( + datafolder, "Session2/sentences/wav", id[:-5], id + ".wav" + ) + if "Ses03" in id: + return os.path.join( + datafolder, "Session3/sentences/wav", id[:-5], id + ".wav" + ) + if "Ses04" in id: + return os.path.join( + datafolder, "Session4/sentences/wav", id[:-5], id + ".wav" + ) + if "Ses05" in id: + return os.path.join( + datafolder, "Session5/sentences/wav", id[:-5], id + ".wav" + ) + + +def get_emotion(wav_path, annotations): + """ + Get the emotion of an audio from its filepath + """ + id = wav_path.split("/")[-1][:-4] + return annotations[id] + + +def concat_wavs(data_folder, save_json, emo_wavs, neu_wavs, annotations): + """ + Concatenate audios from the same speaker + The concatenation is produced with a probability for each modality + The amplitude of the sub-sentences are made equal during concatenation + """ + repos = [ + "Ses01F", + "Ses01M", + "Ses02F", + "Ses02M", + "Ses03F", + "Ses03M", + "Ses04F", + "Ses04M", + "Ses05F", + "Ses05M", + ] + data_json = {} + for repo in repos: + emotion_wavs = [ + i for i in emo_wavs if repo in i and f"_{repo[-1]}" in i + ] + neutral_wavs = [ + i for i in neu_wavs if repo in i and f"_{repo[-1]}" in i + ] + + random.shuffle(emotion_wavs) + random.shuffle(neutral_wavs) + neutral_wavs = neutral_wavs * 10 + + combine_path = os.path.join(data_folder, "combined", repo) + if not os.path.exists(combine_path): + os.makedirs(combine_path) + + while len(emotion_wavs) > 0: + combination = np.random.choice( + combinations, p=probabilities.ravel() + ) + + if combination == "neu_emo": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input.dBFS - emotion_input.dBFS + combined_input = neutral_input + emotion_input + + out_name = os.path.join( + combine_path, + neutral_sample.split("/")[-1][:-4] + + "_" + + emo_sample.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample, annotations), + "start": len(neutral_input) / 1000, + "end": len(combined_input) / 1000, + } + ], + } + + neutral_wavs = neutral_wavs[1:] + emotion_wavs = emotion_wavs[1:] + + elif combination == "emo_neu": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + neutral_input += emotion_input.dBFS - neutral_input.dBFS + combined_input = emotion_input + neutral_input + + out_name = os.path.join( + combine_path, + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample, annotations), + "start": 0, + "end": len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[1:] + + elif combination == "neu_emo_neu": + neutral_sample_1 = neutral_wavs[0] + neutral_sample_2 = neutral_wavs[1] + emo_sample = emotion_wavs[0] + + neutral_input_1 = AudioSegment.from_wav(neutral_sample_1) + neutral_input_2 = AudioSegment.from_wav(neutral_sample_2) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input_1.dBFS - emotion_input.dBFS + neutral_input_2 += neutral_input_1.dBFS - neutral_input_2.dBFS + combined_input = ( + neutral_input_1 + emotion_input + neutral_input_2 + ) + + out_name = os.path.join( + combine_path, + neutral_sample_1.split("/")[-1][:-4] + + "_" + + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample_2.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample, annotations), + "start": len(neutral_input_1) / 1000, + "end": len(neutral_input_1) / 1000 + + len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[2:] + + else: + emo_sample_1 = emotion_wavs[0] + + emotion_input_1 = AudioSegment.from_wav(emo_sample_1) + + out_name = os.path.join( + combine_path, emo_sample_1.split("/")[-1] + ) + emotion_input_1.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(emotion_input_1) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample_1, annotations), + "start": 0, + "end": len(emotion_input_1) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + + with open(save_json, "w", encoding="utf-8") as outfile: + json.dump(data_json, outfile) + return data_json + + +def skip(save_json): + """ + Detects if the data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + save_json : str + Path to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + if not os.path.isfile(save_json): + return False + return True diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_JLCORPUS.py b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_JLCORPUS.py new file mode 100644 index 0000000000..9b888aaf08 --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_JLCORPUS.py @@ -0,0 +1,310 @@ +""" +Data preparation for JL-Corpus. + +Dataset link: https://www.kaggle.com/datasets/tli725/jl-corpus?resource=download + +extra dependencies: pathlib, pydub, webrtcvad + +Author +------ +Yingzhi Wang 2023 +""" + +import json +import os +import random +import shutil +from pathlib import Path + +import numpy as np +from datasets.vad import vad_for_folder +from pydub import AudioSegment + +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +repos = [ + "female1", + "female2", + "male1", + "male2", +] +combinations = ["neu_emo", "emo_neu", "neu_emo_neu", "emo_emo"] +probabilities = np.array([0.25, 0.25, 0.25, 0.25]) + + +def prepare_jlcorpus(data_folder, save_json, seed=12): + """ + Prepares the json files for the JL-CORPUS dataset. + + Arguments + --------- + data_folder : str + Path to the folder where the original JL-CORPUS dataset is stored. + save_json : str + Path where the data specification file will be saved. + seed : int + Seed for reproducibility + + Returns + ------- + data_json : str + """ + random.seed(seed) + + # Check if this phase is already done (if so, skip it) + if skip(save_json): + logger.info("Preparation completed in previous run, skipping.") + return + + # 1.vad, resampling + logger.info("Applying VAD and resampling ...") + for repo in repos: + files = Path(data_folder).rglob(f"{repo}_*.wav") + destin_folder = os.path.join(data_folder, "processed", repo) + if not os.path.exists(destin_folder): + os.makedirs(destin_folder) + for file in files: + if "processed" not in str(file): + shutil.copyfile( + str(file), str(file).replace(data_folder, destin_folder) + ) + + resampling_for_folder(destin_folder, destin_folder) + vad_for_folder(destin_folder, destin_folder) + + logger.info("vad and resampling finished") + logger.info("Start JL-CORPUS concatenation ...") + data_json = concat_wavs(data_folder, save_json) + logger.info("JL-CORPUS concatenation finished ...") + return data_json + + +def resampling_for_folder(in_folder, out_folder): + """ + Resamples all the audios from a certain folder to 16kHz + """ + files = os.listdir(in_folder) + for file_name in files: + try: + sound = AudioSegment.from_file( + os.path.join(in_folder, file_name), format="wav" + ) + sound = sound.set_frame_rate(16000) + sound.export(os.path.join(out_folder, file_name), format="wav") + except Exception as e: + logger.info(e) + + +def get_emotion(wav_path): + """ + Get the emotion of an audio from its filepath + """ + if "angry" in wav_path: + return "angry" + elif "happy" in wav_path or "excited" in wav_path: + return "happy" + elif "sad" in wav_path: + return "sad" + + +def concat_wavs(data_folder, save_json): + """ + Concatenate audios from the same speaker + The concatenation is produced with a probability for each modality + The amplitude of the sub-sentences are made equal during concatenation + """ + data_json = {} + for repo in repos: + emotion_wavs = [] + neutral_wavs = [] + + paths = Path(os.path.join(data_folder, "processed", repo)) + angry_files = paths.rglob("*angry*.wav") + happy_files = paths.rglob("*happy*.wav") + excited_files = paths.rglob("*excited*.wav") + sad_files = paths.rglob("*sad*.wav") + neutral_files = paths.rglob("*neutral*.wav") + + for file in angry_files: + emotion_wavs.append(str(file)) + for file in happy_files: + emotion_wavs.append(str(file)) + for file in excited_files: + emotion_wavs.append(str(file)) + for file in sad_files: + emotion_wavs.append(str(file)) + for file in neutral_files: + neutral_wavs.append(str(file)) + + random.shuffle(emotion_wavs) + random.shuffle(neutral_wavs) + neutral_wavs = neutral_wavs * 10 + + combine_path = os.path.join(data_folder, "combined", repo) + if not os.path.exists(combine_path): + os.makedirs(combine_path) + + while len(emotion_wavs) > 0: + combination = np.random.choice( + combinations, p=probabilities.ravel() + ) + if combination == "neu_emo": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input.dBFS - emotion_input.dBFS + combined_input = neutral_input + emotion_input + + out_name = os.path.join( + combine_path, + neutral_sample.split("/")[-1][:-4] + + "_" + + emo_sample.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = repo + "_" + out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": len(neutral_input) / 1000, + "end": len(combined_input) / 1000, + } + ], + } + + neutral_wavs = neutral_wavs[1:] + emotion_wavs = emotion_wavs[1:] + + elif combination == "emo_neu": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + neutral_input += emotion_input.dBFS - neutral_input.dBFS + combined_input = emotion_input + neutral_input + + out_name = os.path.join( + combine_path, + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = repo + "_" + out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": 0, + "end": len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[1:] + + elif combination == "neu_emo_neu": + neutral_sample_1 = neutral_wavs[0] + neutral_sample_2 = neutral_wavs[1] + emo_sample = emotion_wavs[0] + + neutral_input_1 = AudioSegment.from_wav(neutral_sample_1) + neutral_input_2 = AudioSegment.from_wav(neutral_sample_2) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input_1.dBFS - emotion_input.dBFS + neutral_input_2 += neutral_input_1.dBFS - neutral_input_2.dBFS + combined_input = ( + neutral_input_1 + emotion_input + neutral_input_2 + ) + + out_name = os.path.join( + combine_path, + neutral_sample_1.split("/")[-1][:-4] + + "_" + + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample_2.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = repo + "_" + out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": len(neutral_input_1) / 1000, + "end": len(neutral_input_1) / 1000 + + len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[2:] + + else: + emo_sample_1 = emotion_wavs[0] + + emotion_input_1 = AudioSegment.from_wav(emo_sample_1) + + out_name = os.path.join( + combine_path, emo_sample_1.split("/")[-1] + ) + emotion_input_1.export(out_name, format="wav") + + id = repo + "_" + out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(emotion_input_1) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample_1), + "start": 0, + "end": len(emotion_input_1) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + + with open(save_json, "w", encoding="utf-8") as outfile: + json.dump(data_json, outfile) + return data_json + + +def skip(save_json): + """ + Detects if the data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + save_json : str + Path to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + if not os.path.isfile(save_json): + return False + return True diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_RAVDESS.py b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_RAVDESS.py new file mode 100644 index 0000000000..086f2bb805 --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/prepare_RAVDESS.py @@ -0,0 +1,321 @@ +""" +Data preparation for RAVDESS. + +Dataset link: https://zenodo.org/record/1188976 + +extra dependencies: pathlib, pydub, webrtcvad + +Author +------ +Yingzhi Wang 2023 +""" + +import json +import os +import random +from pathlib import Path + +import numpy as np +from datasets.vad import vad_for_folder +from pydub import AudioSegment + +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) +repos = [ + "Actor_01", + "Actor_02", + "Actor_03", + "Actor_04", + "Actor_05", + "Actor_06", + "Actor_07", + "Actor_08", + "Actor_09", + "Actor_10", + "Actor_11", + "Actor_12", + "Actor_13", + "Actor_14", + "Actor_15", + "Actor_16", + "Actor_17", + "Actor_18", + "Actor_19", + "Actor_20", + "Actor_21", + "Actor_22", + "Actor_23", + "Actor_24", +] +combinations = ["neu_emo", "emo_neu", "neu_emo_neu", "emo_emo"] +probabilities = np.array([0.25, 0.25, 0.25, 0.25]) + + +def prepare_ravdess(data_folder, save_json, seed=12): + """ + Prepares the json files for the RAVDESS dataset. + + Arguments + --------- + data_folder : str + Path to the folder where the original RAVDESS dataset is stored. + save_json : str + Path where the data specification file will be saved. + seed : int + Seed for reproducibility + + Returns + ------- + data_json : str + """ + random.seed(seed) + + # Check if this phase is already done (if so, skip it) + if skip(save_json): + logger.info("Preparation completed in previous run, skipping.") + return + + logger.info("Applying VAD and resampling ...") + for repo in repos: + source_folder = os.path.join(data_folder, repo) + destin_folder = os.path.join(data_folder, "processed", repo) + if not os.path.exists(destin_folder): + os.makedirs(destin_folder) + resampling_for_folder(source_folder, destin_folder) + vad_for_folder(destin_folder, destin_folder) + + logger.info("vad and resampling finished") + logger.info("Start RAVDESS concatenation ...") + data_json = concat_wavs(data_folder, save_json) + logger.info("RAVDESS concatenation finished ...") + return data_json + + +def resampling_for_folder(in_folder, out_folder): + """ + Resamples all the audios from a certain folder to 16kHz + """ + files = os.listdir(in_folder) + for file_name in files: + try: + sound = AudioSegment.from_file( + os.path.join(in_folder, file_name), format="wav" + ) + sound = sound.set_frame_rate(16000) + sound.export(os.path.join(out_folder, file_name), format="wav") + except Exception as e: + logger.info(e) + + +def get_emotion(wav_path): + """ + Get the emotion of an audio from its filepath + """ + file_name = wav_path.split("/")[-1][:-4] + num = file_name.split("-")[2] + if num == "05": + return "angry" + elif num == "03": + return "happy" + elif num == "04": + return "sad" + + +def concat_wavs(data_folder, save_json): + """ + Concatenate audios from the same speaker + The concatenation is produced with a probability for each modality + The amplitude of the sub-sentences are made equal during concatenation + """ + data_json = {} + for repo in repos: + emotion_wavs = [] + neutral_wavs = [] + + paths = Path(os.path.join(data_folder, "processed", repo)) + angry_files = paths.rglob("*-*-05-*-*-*-*.wav") + happy_files = paths.rglob("*-*-03-*-*-*-*.wav") + sad_files = paths.rglob("*-*-04-*-*-*-*.wav") + neutral_files = paths.rglob("*-*-01-*-*-*-*.wav") + + for file in angry_files: + emotion_wavs.append(str(file)) + for file in happy_files: + emotion_wavs.append(str(file)) + for file in sad_files: + emotion_wavs.append(str(file)) + for file in neutral_files: + neutral_wavs.append(str(file)) + + random.shuffle(emotion_wavs) + random.shuffle(neutral_wavs) + neutral_wavs = neutral_wavs * 10 + + combine_path = os.path.join(data_folder, "combined", repo) + if not os.path.exists(combine_path): + os.makedirs(combine_path) + + while len(emotion_wavs) > 0: + combination = np.random.choice( + combinations, p=probabilities.ravel() + ) + if combination == "neu_emo": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input.dBFS - emotion_input.dBFS + combined_input = neutral_input + emotion_input + + out_name = os.path.join( + combine_path, + neutral_sample.split("/")[-1][:-4] + + "_" + + emo_sample.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": len(neutral_input) / 1000, + "end": len(combined_input) / 1000, + } + ], + } + + neutral_wavs = neutral_wavs[1:] + emotion_wavs = emotion_wavs[1:] + + elif combination == "emo_neu": + neutral_sample = neutral_wavs[0] + emo_sample = emotion_wavs[0] + + neutral_input = AudioSegment.from_wav(neutral_sample) + emotion_input = AudioSegment.from_wav(emo_sample) + + neutral_input += emotion_input.dBFS - neutral_input.dBFS + combined_input = emotion_input + neutral_input + + out_name = os.path.join( + combine_path, + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": 0, + "end": len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[1:] + + elif combination == "neu_emo_neu": + neutral_sample_1 = neutral_wavs[0] + neutral_sample_2 = neutral_wavs[1] + emo_sample = emotion_wavs[0] + + neutral_input_1 = AudioSegment.from_wav(neutral_sample_1) + neutral_input_2 = AudioSegment.from_wav(neutral_sample_2) + emotion_input = AudioSegment.from_wav(emo_sample) + + emotion_input += neutral_input_1.dBFS - emotion_input.dBFS + neutral_input_2 += neutral_input_1.dBFS - neutral_input_2.dBFS + combined_input = ( + neutral_input_1 + emotion_input + neutral_input_2 + ) + + out_name = os.path.join( + combine_path, + neutral_sample_1.split("/")[-1][:-4] + + "_" + + emo_sample.split("/")[-1][:-4] + + "_" + + neutral_sample_2.split("/")[-1], + ) + combined_input.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(combined_input) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample), + "start": len(neutral_input_1) / 1000, + "end": len(neutral_input_1) / 1000 + + len(emotion_input) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + neutral_wavs = neutral_wavs[2:] + + else: + emo_sample_1 = emotion_wavs[0] + + emotion_input_1 = AudioSegment.from_wav(emo_sample_1) + + out_name = os.path.join( + combine_path, emo_sample_1.split("/")[-1] + ) + emotion_input_1.export(out_name, format="wav") + + id = out_name.split("/")[-1][:-4] + data_json[id] = { + "wav": out_name, + "duration": len(emotion_input_1) / 1000, + "emotion": [ + { + "emo": get_emotion(emo_sample_1), + "start": 0, + "end": len(emotion_input_1) / 1000, + } + ], + } + + emotion_wavs = emotion_wavs[1:] + + with open(save_json, "w", encoding="utf-8") as outfile: + json.dump(data_json, outfile) + return data_json + + +def skip(save_json): + """ + Detects if the data preparation has been already done. + If the preparation has been done, we can skip it. + + Arguments + --------- + save_json : str + Path to json file to check for existence. + + Returns + ------- + bool + if True, the preparation phase can be skipped. + if False, it must be done. + """ + if not os.path.isfile(save_json): + return False + return True diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/datasets/vad.py b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/vad.py new file mode 100644 index 0000000000..cb1ac4d543 --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/datasets/vad.py @@ -0,0 +1,170 @@ +""" +Vad for files/folders with webrtcvad. + +Credit: https://github.com/wiseman/py-webrtcvad/blob/master/example.py + +Author +------ +Yingzhi Wang 2023 +""" + +import collections +import contextlib +import os +import wave + +import webrtcvad + + +def read_wave(path): + """Reads a .wav file. + Takes the path, and returns (PCM audio data, sample rate). + """ + with contextlib.closing(wave.open(path, "rb")) as wf: + num_channels = wf.getnchannels() + assert num_channels == 1 + sample_width = wf.getsampwidth() + assert sample_width == 2 + sample_rate = wf.getframerate() + assert sample_rate in (8000, 16000, 32000, 48000) + pcm_data = wf.readframes(wf.getnframes()) + return pcm_data, sample_rate + + +def write_wave(path, audio, sample_rate): + """Writes a .wav file. + Takes path, PCM audio data, and sample rate. + """ + with contextlib.closing(wave.open(path, "wb")) as wf: + wf.setnchannels(1) + wf.setsampwidth(2) + wf.setframerate(sample_rate) + wf.writeframes(audio) + + +class Frame: + """Represents a "frame" of audio data.""" + + def __init__(self, bytes, timestamp, duration): + self.bytes = bytes + self.timestamp = timestamp + self.duration = duration + + +def frame_generator(frame_duration_ms, audio, sample_rate): + """Generates audio frames from PCM audio data. + Takes the desired frame duration in milliseconds, the PCM data, and + the sample rate. + Yields Frames of the requested duration. + """ + n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) + offset = 0 + timestamp = 0.0 + duration = (float(n) / sample_rate) / 2.0 + while offset + n < len(audio): + yield Frame(audio[offset : offset + n], timestamp, duration) + timestamp += duration + offset += n + + +def vad_collector( + sample_rate, frame_duration_ms, padding_duration_ms, vad, frames +): + """generate vad segments""" + num_padding_frames = int(padding_duration_ms / frame_duration_ms) + # We use a deque for our sliding window/ring buffer. + ring_buffer = collections.deque(maxlen=num_padding_frames) + # We have two states: TRIGGERED and NOTTRIGGERED. We start in the + # NOTTRIGGERED state. + triggered = False + + voiced_frames = [] + for frame in frames: + is_speech = vad.is_speech(frame.bytes, sample_rate) + + # sys.stdout.write('1' if is_speech else '0') + if not triggered: + ring_buffer.append((frame, is_speech)) + num_voiced = len([f for f, speech in ring_buffer if speech]) + # If we're NOTTRIGGERED and more than 90% of the frames in + # the ring buffer are voiced frames, then enter the + # TRIGGERED state. + if num_voiced > 0.9 * ring_buffer.maxlen: + triggered = True + # sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,)) + # We want to yield all the audio we see from now until + # we are NOTTRIGGERED, but we have to start with the + # audio that's already in the ring buffer. + for f, s in ring_buffer: + voiced_frames.append(f) + ring_buffer.clear() + else: + # We're in the TRIGGERED state, so collect the audio data + # and add it to the ring buffer. + voiced_frames.append(frame) + ring_buffer.append((frame, is_speech)) + num_unvoiced = len([f for f, speech in ring_buffer if not speech]) + # If more than 90% of the frames in the ring buffer are + # unvoiced, then enter NOTTRIGGERED and yield whatever + # audio we've collected. + if num_unvoiced > 0.9 * ring_buffer.maxlen: + # sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration)) + triggered = False + yield b"".join([f.bytes for f in voiced_frames]) + # yield f in voiced_frames + ring_buffer.clear() + voiced_frames = [] + if triggered: + pass + # sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration)) + # sys.stdout.write('\n') + # If we have any leftover voiced audio when we run out of input, + # yield it. + if voiced_frames: + yield b"".join([f.bytes for f in voiced_frames]) + # yield f in voiced_frames + + +def vad_for_folder(input_path, out_path): + """do vad for a folder""" + files = os.listdir(input_path) + for file in files: + try: + audio, sample_rate = read_wave(os.path.join(input_path, file)) + except Exception as e: + print(e) + + vad = webrtcvad.Vad(3) + frames = frame_generator(30, audio, sample_rate) + frames = list(frames) + segments = vad_collector(sample_rate, 30, 30, vad, frames) + + total_segment = b"" + + for i, segment in enumerate(segments): + total_segment += segment + + # abandon short emotions (< 0.2s) + if len(total_segment) > 6400: # 0.2 * 16000 * 2 + write_wave(os.path.join(out_path, file), total_segment, sample_rate) + + +def write_audio(input_path, out_path): + """do vad and save the audio after vad""" + try: + audio, sample_rate = read_wave(input_path) + except Exception as e: + print(e) + vad = webrtcvad.Vad(2) + frames = frame_generator(30, audio, sample_rate) + frames = list(frames) + segments = vad_collector(sample_rate, 30, 30, vad, frames) + + total_segment = b"" + + for i, segment in enumerate(segments): + total_segment += segment + + # abandon short emotions (< 0.2s) + if len(total_segment) > 6400: # 0.2 * 16000 * 2 + write_wave(out_path, total_segment, sample_rate) diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/extra_requirements.txt b/recipes/ZaionEmotionDataset/emotion_diarization/extra_requirements.txt new file mode 100644 index 0000000000..e7c5685753 --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/extra_requirements.txt @@ -0,0 +1,3 @@ +pathlib>=1.0.1 +pydub>=0.25.1 +webrtcvad>=2.0.10 diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/hparams/train.yaml b/recipes/ZaionEmotionDataset/emotion_diarization/hparams/train.yaml new file mode 100644 index 0000000000..11da9e9fbd --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/hparams/train.yaml @@ -0,0 +1,149 @@ +# ####################################### +# Model: wavlm for Emotion Diarization +# Authors: +# * Yingzhi Wang 2023 +# ################################ + +# Seed needs to be set at top of yaml, before objects with parameters are made +seed: 78 +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] +output_folder: !ref results/zed_wavlm_large/ +eder_file: !ref /eder.txt +save_folder: !ref /save +train_log: !ref /train_log.txt + +wav2vec2_hub: "microsoft/wavlm-large" # "facebook/wav2vec2-large" "facebook/hubert-large-ll60k" +wav2vec2_folder: !ref /wav2vec2_checkpoint +# Data files +zed_folder: !PLACEHOLDER # e,g./path/to/ZED + +emovdb_folder: !PLACEHOLDER # e,g./path/to/EmoV-DB +esd_folder: !PLACEHOLDER # e,g./path/to/ESD +iemocap_folder: !PLACEHOLDER # e,g./path/to/IEMOCAP_full_release +jlcorpus_folder: !PLACEHOLDER # e,g./path/to/JL_corpus +ravdess_folder: !PLACEHOLDER # e,g./path/to/RAVDESS + +split_ratio: [90, 10] +skip_prep: False + +train_annotation: !ref /train.json +valid_annotation: !ref /valid.json +test_annotation: !ref /test.json + +####################### Training Parameters #################################### +number_of_epochs: 15 +lr: 0.0001 +lr_wav2vec: 0.00001 +# precision: fp32 # bf16, fp16 or fp32 +# do_resample: False +# sample_rate: 16000 + +# With data_parallel batch_size is split into N jobs +# With DDP batch_size is multiplied by N jobs +# Must be 3 per GPU to fit 32GB of VRAM +batch_size: 2 +test_batch_size: 1 + +#freeze all wav2vec2 +freeze_wav2vec2: False +freeze_wav2vec2_conv: True + +window_length: 1 # win_len = 0.02 * 1 = 0.02s +stride: 1 # stride = 0.02 * 1 = 0.02s + +encoder_dim: 1024 +# Outputs +out_n_neurons: 4 # BPE size, index(blank/eos/bos) = 0 + +use_threshold: False +threshold: -0.05 + +# Dataloader options +dataloader_options: + batch_size: !ref + shuffle: True + num_workers: 2 # 2 on linux but 0 works on windows + drop_last: False + +test_dataloader_opts: + batch_size: !ref + +# # DER evaluation parameters +# ignore_overlap: True +# forgiveness_collar: 0.25 + +epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter + limit: !ref + + +input_norm: !new:speechbrain.processing.features.InputNormalization + norm_type: sentence + std_norm: False + +wav2vec2: !new:speechbrain.integrations.huggingface.wavlm.WavLM + source: !ref + output_norm: True + freeze: !ref + freeze_feature_extractor: !ref + save_path: !ref + # output_all_hiddens: False + +avg_pool: !new:speechbrain.nnet.pooling.Pooling1d + pool_type: "avg" + kernel_size: !ref + stride: !ref + ceil_mode: True + +output_mlp: !new:speechbrain.nnet.linear.Linear + input_size: !ref + n_neurons: !ref + bias: False + +log_softmax: !new:speechbrain.nnet.activations.Softmax + apply_log: True + +compute_cost: !name:speechbrain.nnet.losses.nll_loss + +modules: + input_norm: !ref + wav2vec2: !ref + output_mlp: !ref + +model: !new:torch.nn.ModuleList + - [!ref ] + +opt_class: !name:torch.optim.Adam + lr: !ref + +wav2vec2_opt_class: !name:torch.optim.Adam + lr: !ref + +lr_annealing: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 + +lr_annealing_wav2vec2: !new:speechbrain.nnet.schedulers.NewBobScheduler + initial_value: !ref + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 + + +checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer + checkpoints_dir: !ref + recoverables: + input_norm: !ref + wav2vec2: !ref + model: !ref + scheduler_model: !ref + scheduler_wav2vec: !ref + counter: !ref + +train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger + save_file: !ref + +error_stats: !name:speechbrain.utils.metric_stats.MetricStats + metric: !name:speechbrain.nnet.losses.classification_error + reduction: batch diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/train.py b/recipes/ZaionEmotionDataset/emotion_diarization/train.py new file mode 100644 index 0000000000..abee38089a --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/train.py @@ -0,0 +1,351 @@ +#!/usr/bin/env python3 +""" +Authors + * Yingzhi WANG 2023 +""" + +import itertools +import json +import os +import sys + +import torch +from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb +from speechbrain.utils.EDER import EDER + + +class EmoDiaBrain(sb.Brain): + def compute_forward(self, batch, stage): + """Computation pipeline based on a encoder + emotion classifier.""" + batch = batch.to(self.device) + + self.modules = self.modules.to(self.device) + + wavs, lens = batch.sig + wavs = self.hparams.input_norm(wavs, lens) + outputs = self.modules.wav2vec2(wavs) + averaged_out = self.hparams.avg_pool(outputs) + + outputs = self.modules.output_mlp(averaged_out) + + outputs = self.hparams.log_softmax(outputs) + + return outputs + + def compute_objectives(self, predictions, batch, stage): + """Computes the loss using speaker-id as label.""" + emoid, _ = batch.emo_encoded + + if stage == sb.Stage.TEST: + if self.hparams.use_threshold: + preds = threshold_tuning(predictions, self.hparams.threshold) + else: + preds = torch.argmax(predictions, dim=2) + + emoid_decoded = label_encoder.decode_ndim(emoid) + preds_decoded = label_encoder.decode_ndim(preds) + + self.load_ZED() + with open(self.hparams.eder_file, "a", encoding="utf-8") as w: + for i in range(len(batch.id)): + if len(preds_decoded[i]) < len(emoid_decoded[i]): + preds_decoded[i].append(preds_decoded[i][-1]) + eder = EDER( + prediction=preds_decoded[i], + id=batch.id[i], + duration=self.ZED[batch.id[i]]["duration"], + emotion=self.ZED[batch.id[i]]["emotion"], + window_length=self.hparams.window_length * 0.02, + stride=self.hparams.stride * 0.02, + ) + + w.write(" wav_id : " + batch.id[i] + "\n") + w.write(" reference : " + "".join(emoid_decoded[i]) + "\n") + w.write("prediction : " + "".join(preds_decoded[i]) + "\n") + w.write( + " ctc_label : " + + "".join(del_adjacent(emoid_decoded[i])) + + "\n" + ) + w.write( + " ctc_pred : " + + "".join(del_adjacent(preds_decoded[i])) + + "\n" + ) + w.write(" EDER : " + str(eder) + "\n") + w.write("\n") + + self.eder.append(eder) + + loss = self.hparams.compute_cost(predictions, emoid) + if stage != sb.Stage.TRAIN: + self.error_metrics.append(batch.id, predictions, emoid) + + return loss + + def on_stage_start(self, stage, epoch=None): + """Gets called at the beginning of each epoch. + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + + # Set up statistics trackers for this stage + self.loss_metric = sb.utils.metric_stats.MetricStats( + metric=sb.nnet.losses.nll_loss + ) + self.eder = [] + # Set up evaluation-only statistics trackers + if stage != sb.Stage.TRAIN: + self.error_metrics = self.hparams.error_stats() + + def on_stage_end(self, stage, stage_loss, epoch=None): + """Gets called at the end of an epoch. + Arguments + --------- + stage : sb.Stage + One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST + stage_loss : float + The average loss for all of the data processed in this stage. + epoch : int + The currently-starting epoch. This is passed + `None` during the test stage. + """ + + # Store the train loss until the validation stage. + if stage == sb.Stage.TRAIN: + self.train_loss = stage_loss + + # Summarize the statistics from the stage for record-keeping. + else: + stats = { + "loss": stage_loss, + "error_rate": self.error_metrics.summarize("average"), + } + + # At the end of validation... + if stage == sb.Stage.VALID: + old_lr, new_lr = self.hparams.lr_annealing(stats["error_rate"]) + sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) + + ( + old_lr_wav2vec2, + new_lr_wav2vec2, + ) = self.hparams.lr_annealing_wav2vec2(stats["error_rate"]) + sb.nnet.schedulers.update_learning_rate( + self.wav2vec2_optimizer, new_lr_wav2vec2 + ) + + # The train_logger writes a summary to stdout and to the logfile. + self.hparams.train_logger.log_stats( + {"Epoch": epoch, "lr": old_lr, "wave2vec_lr": old_lr_wav2vec2}, + train_stats={"loss": self.train_loss}, + valid_stats=stats, + ) + + # Save the current checkpoint and delete previous checkpoints, + self.checkpointer.save_and_keep_only( + meta=stats, min_keys=["error_rate"] + ) + + # We also write statistics about test data to stdout and to logfile. + if stage == sb.Stage.TEST: + self.hparams.train_logger.log_stats( + {"Epoch loaded": self.hparams.epoch_counter.current}, + test_stats={ + "loss": stats["loss"], + "error_rate": stats["error_rate"], + "EDER": sum(self.eder) / len(self.eder), + }, + ) + # with open(self.hparams.cer_file, "a", encoding="utf-8") as w: + # self.error_metrics.write_stats(w) + + def load_ZED(self): + with open(self.hparams.test_annotation, encoding="utf-8") as f: + ZED_data = json.load(f) + self.ZED = ZED_data + + def init_optimizers(self): + "Initializes the wav2vec2 optimizer and model optimizer" + self.wav2vec2_optimizer = self.hparams.wav2vec2_opt_class( + self.modules.wav2vec2.parameters() + ) + self.optimizer = self.hparams.opt_class(self.hparams.model.parameters()) + + if self.checkpointer is not None: + self.checkpointer.add_recoverable( + "wav2vec2_opt", self.wav2vec2_optimizer + ) + self.checkpointer.add_recoverable("optimizer", self.optimizer) + + self.optimizers_dict = { + "wav2vec2": self.wav2vec2_optimizer, + "model": self.optimizer, + } + + +def dataio_prep(hparams): + """This function prepares the datasets to be used in the brain class. + It also defines the data processing pipeline through user-defined + functions. We expect `prepare_mini_librispeech` to have been called before + this, so that the `train.json`, `valid.json`, and `valid.json` manifest + files are available. + Arguments + --------- + hparams : dict + This dictionary is loaded from the `train.yaml` file, and it includes + all the hyperparameters needed for dataset construction and loading. + Returns + ------- + datasets : dict + Contains two keys, "train" and "valid" that correspond + to the appropriate DynamicItemDataset object. + """ + + # Define audio pipeline + @sb.utils.data_pipeline.takes("wav") + @sb.utils.data_pipeline.provides("sig") + def audio_pipeline(wav): + """Load the signal, and pass it and its length to the corruption class. + This is done on the CPU in the `collate_fn`.""" + sig = sb.dataio.dataio.read_audio(wav) + return sig + + label_encoder = sb.dataio.encoder.CategoricalEncoder() + + # Define label pipeline: + @sb.utils.data_pipeline.takes("frame_label") + @sb.utils.data_pipeline.provides("emo_encoded") + def label_pipeline(frame_label): + emo_encoded = label_encoder.encode_sequence_torch(frame_label) + yield emo_encoded + + datasets = {} + for dataset in ["train", "valid", "test"]: + datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json( + json_path=hparams[f"{dataset}_annotation"], + replacements={"data_root": hparams["zed_folder"]}, + dynamic_items=[audio_pipeline, label_pipeline], + output_keys=["id", "sig", "emo_encoded"], + ) + + lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") + + label_encoder.load_or_create( + path=lab_enc_file, + from_didatasets=[datasets["train"]], + output_key="frame_label", + sequence_input=True, + ) + + return datasets, label_encoder + + +def threshold_tuning(batch_predictions, threshold): + """Post processing by finding a threshold + + Args: + predictions torch.Tensor: (b, t, 4) + """ + argmax_preds = torch.argmax(batch_predictions, dim=2) + max, _ = torch.max(batch_predictions, dim=2) + index = torch.gt(max, threshold) + return torch.mul(index, argmax_preds) + + +def del_adjacent(list): + """delete adjacent elements that is the same as the f""" + return [k for k, g in itertools.groupby(list)] + + +# RECIPE BEGINS! +if __name__ == "__main__": + # Reading command line arguments. + hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) + + # Initialize ddp (useful only for multi-GPU DDP training). + sb.utils.distributed.ddp_init_group(run_opts) + + # Load hyperparameters file with command-line overrides. + with open(hparams_file, encoding="utf-8") as fin: + hparams = load_hyperpyyaml(fin, overrides) + + # Create experiment directory + sb.create_experiment_directory( + experiment_directory=hparams["output_folder"], + hyperparams_to_save=hparams_file, + overrides=overrides, + ) + + # Data preparation, to be run on only one process. + if not hparams["skip_prep"]: + from zed_prepare import prepare_test, prepare_train + + sb.utils.distributed.run_on_main( + prepare_train, + kwargs={ + "save_json_train": hparams["train_annotation"], + "save_json_valid": hparams["valid_annotation"], + "split_ratio": hparams["split_ratio"], + "win_len": hparams["window_length"] * 0.02, + "stride": hparams["stride"] * 0.02, + "seed": hparams["seed"], + "emovdb_folder": hparams["emovdb_folder"], + "esd_folder": hparams["esd_folder"], + "iemocap_folder": hparams["iemocap_folder"], + "jlcorpus_folder": hparams["jlcorpus_folder"], + "ravdess_folder": hparams["ravdess_folder"], + }, + ) + sb.utils.distributed.run_on_main( + prepare_test, + kwargs={ + "ZED_folder": hparams["zed_folder"], + "save_json_test": hparams["test_annotation"], + "win_len": hparams["window_length"] * 0.02, + "stride": hparams["stride"] * 0.02, + }, + ) + # Create dataset objects "train", "valid", and "test". + datasets, label_encoder = dataio_prep(hparams) + + hparams["wav2vec2"] = hparams["wav2vec2"].to(run_opts["device"]) + + # freeze the feature extractor part when unfreezing + if not hparams["freeze_wav2vec2"] and hparams["freeze_wav2vec2_conv"]: + hparams["wav2vec2"].model.feature_extractor._freeze_parameters() + + # Initialize the Brain object to prepare for mask training. + emo_id_brain = EmoDiaBrain( + modules=hparams["modules"], + opt_class=hparams["opt_class"], + hparams=hparams, + run_opts=run_opts, + checkpointer=hparams["checkpointer"], + ) + + # The `fit()` method iterates the training loop, calling the methods + # necessary to update the parameters of the model. Since all objects + # with changing state are managed by the Checkpointer, training can be + # stopped at any point, and will be resumed on next call. + emo_id_brain.fit( + epoch_counter=emo_id_brain.hparams.epoch_counter, + train_set=datasets["train"], + valid_set=datasets["valid"], + train_loader_kwargs=hparams["dataloader_options"], + valid_loader_kwargs=hparams["dataloader_options"], + ) + + # Load the best checkpoint for evaluation + test_stats = emo_id_brain.evaluate( + test_set=datasets["test"], + min_key="error_rate", + test_loader_kwargs=hparams["test_dataloader_opts"], + ) diff --git a/recipes/ZaionEmotionDataset/emotion_diarization/zed_prepare.py b/recipes/ZaionEmotionDataset/emotion_diarization/zed_prepare.py new file mode 100644 index 0000000000..3d83ed9781 --- /dev/null +++ b/recipes/ZaionEmotionDataset/emotion_diarization/zed_prepare.py @@ -0,0 +1,332 @@ +""" +Data preparation for emotion diarization. + +Training set involves Emov-DB, ESD, IEMOCAP, JL-Corpus, RAVDESS +Test set used is Zaion Emotion Dataset + +Author +------ +Yingzhi Wang 2023 +""" + +import json +import os +import random + +from datasets.prepare_EMOVDB import prepare_emovdb +from datasets.prepare_ESD import prepare_esd +from datasets.prepare_IEMOCAP import prepare_iemocap +from datasets.prepare_JLCORPUS import prepare_jlcorpus +from datasets.prepare_RAVDESS import prepare_ravdess + +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) + + +def getOverlap(a, b): + """get the overlap length of two intervals + + Arguments + --------- + a : list + b : list + + Returns + ------- + overlap : float + """ + return max(0, min(a[1], b[1]) - max(a[0], b[0])) + + +def get_labels(data, win_len=0.02, stride=0.02): + """make labels for training/test + + Arguments + --------- + data : dict + a dictionary that contains: + { + 'wav': path, + 'duration': dur, + 'emotion': [{'emo': emo, 'start': s, 'end': e}], + 'transcription': trans, + } + win_len : float + the frame length used for frame-wise prediction + stride : float + the frame length used for frame-wise prediction + + Returns + ------- + intervals : list + labels : list + frame_labels : list + """ + emo_list = data["emotion"] + assert len(emo_list) == 1 + + duration = data["duration"] + emotion = data["emotion"][0]["emo"] + emo_start = data["emotion"][0]["start"] + emo_end = data["emotion"][0]["end"] + + number_frames = int(duration / stride) + 1 + + intervals = [] + labels = [] + if emo_start != 0: + intervals.append([0.0, emo_start]) + labels.append("n") + intervals.append([emo_start, emo_end]) + labels.append(emotion[0]) + if emo_end != duration: + intervals.append([emo_end, duration]) + labels.append("n") + + start = 0.0 + frame_labels = [] + for i in range(number_frames): + win_start = start + i * stride + win_end = win_start + win_len + + # make sure that every sample exists in a window + if win_end >= duration: + win_end = duration + win_start = max(duration - win_len, 0) + + for j in range(len(intervals)): + if getOverlap([win_start, win_end], intervals[j]) >= 0.5 * ( + win_end - win_start + ): + emo_frame = labels[j] + break + frame_labels.append(emo_frame) + if win_end >= duration: + break + return intervals, labels, frame_labels + + +def prepare_train( + save_json_train, + save_json_valid, + save_json_test=None, + split_ratio=[80, 20], + win_len=0.02, + stride=0.02, + seed=12, + emovdb_folder=None, + esd_folder=None, + iemocap_folder=None, + jlcorpus_folder=None, + ravdess_folder=None, +): + """training sets preparation + + Args: + save_json_train (str): train json path + save_json_valid (str): valid json path + save_json_test (str, None): Defaults to None for the current recipe. + split_ratio (list, optional): train/valid split. Defaults to [80, 20]. + win_len (float, optional): + window length for generating emotion labels. Defaults to 0.02. + stride (float, optional): + stride for generating emotion labels. Defaults to 0.02. + seed (int, optional): train/valid test. Defaults to 12. + emovdb_folder (str, optional): path to dataset. Defaults to None. + esd_folder (str, optional): path to dataset. Defaults to None. + iemocap_folder (str, optional): path to dataset. Defaults to None. + jlcorpus_folder (str, optional): path to dataset. Defaults to None. + ravdess_folder (str, optional): path to dataset. Defaults to None. + """ + # setting seeds for reproducible code. + random.seed(seed) + + if os.path.exists(save_json_train) and os.path.exists(save_json_valid): + logger.info("train/valid json both exist, skipping preparation.") + return + + all_dict = {} + check_and_prepare_dataset( + emovdb_folder, "EMOV-DB", prepare_emovdb, all_dict, seed + ) + + check_and_prepare_dataset(esd_folder, "ESD", prepare_esd, all_dict, seed) + + check_and_prepare_dataset( + iemocap_folder, "IEMOCAP", prepare_iemocap, all_dict, seed + ) + check_and_prepare_dataset( + jlcorpus_folder, "JL_CORPUS", prepare_jlcorpus, all_dict, seed + ) + check_and_prepare_dataset( + ravdess_folder, "RAVDESS", prepare_ravdess, all_dict, seed + ) + + bad_keys = [] + for key in all_dict.keys(): + try: + intervals, ctc_label, frame_label = get_labels( + all_dict[key], win_len, stride + ) + all_dict[key]["frame_label"] = frame_label + all_dict[key]["ctc_label"] = ctc_label + except ValueError: + logger.info( + f"Impossible to get labels for id {key} because the window is too large." + ) + bad_keys.append(key) + continue + for key in bad_keys: + del all_dict[key] + + data_split = split_sets(all_dict, split_ratio) + + train_ids = data_split["train"] + train_split = {} + for id in train_ids: + train_split[id] = all_dict[id] + + valid_ids = data_split["valid"] + valid_split = {} + for id in valid_ids: + valid_split[id] = all_dict[id] + + create_json(train_split, save_json_train) + create_json(valid_split, save_json_valid) + if save_json_test is not None: + test_ids = data_split["test"] + test_split = {} + for id in test_ids: + test_split[id] = all_dict[id] + create_json(test_split, save_json_test) + + +def prepare_test(ZED_folder, save_json_test, win_len, stride): + """test(ZED) set preparation + + Args: + ZED_folder (str): path to ZED folder + save_json_test (str): test json path + win_len (float): + window length for generating emotion labels. Defaults to 0.02. + stride (float): + stride for generating emotion labels. Defaults to 0.02. + """ + if os.path.exists(save_json_test): + logger.info("test json exists, skipping preparation.") + return + + try: + zed_json_path = os.path.join(ZED_folder, "ZED.json") + with open(zed_json_path, encoding="utf-8") as f: + all_dict = json.load(f) + except OSError: + logger.info(f"ZED.json can't be found under {ZED_folder}") + return + + bad_keys = [] + for key in all_dict.keys(): + try: + all_dict[key]["wav"] = all_dict[key]["wav"].replace( + "datafolder", ZED_folder + ) + intervals, ctc_label, frame_label = get_labels( + all_dict[key], win_len, stride + ) + all_dict[key]["frame_label"] = frame_label + all_dict[key]["ctc_label"] = ctc_label + except ValueError: + logger.info( + f"Impossible to get labels for id {key} because the window is too large." + ) + bad_keys.append(key) + continue + for key in bad_keys: + del all_dict[key] + + create_json(all_dict, save_json_test) + + +def split_sets(data_dict, split_ratio, splits=["train", "valid"]): + """Randomly splits the wav list into training, validation, and test lists. + + Arguments + --------- + data_dict : list + a dictionary of id and its corresponding audio information + split_ratio: list + List composed of three integers that sets split ratios for train, + valid, and test sets, respectively. + For instance split_ratio=[80, 10, 10] will assign 80% of the sentences + to training, 10% for validation, and 10% for test. + splits : list + List of splits. + + Returns + ------- + dictionary containing train, valid, and test splits. + """ + assert len(splits) == len(split_ratio) + id_list = list(data_dict.keys()) + # Random shuffle of the list + random.shuffle(id_list) + tot_split = sum(split_ratio) + tot_snts = len(id_list) + data_split = {} + splits = ["train", "valid"] + + for i, split in enumerate(splits): + n_snts = int(tot_snts * split_ratio[i] / tot_split) + data_split[split] = id_list[0:n_snts] + del id_list[0:n_snts] + if len(split_ratio) == 3: + data_split["test"] = id_list + return data_split + + +def create_json(data, json_file): + """ + Creates the json file given a list of wav information. + Arguments + --------- + data : dict + The dict of wav information (path, label, emotion). + json_file : str + The path of the output json file + """ + # Writing the dictionary to the json file + with open(json_file, mode="w", encoding="utf-8") as json_f: + json.dump(data, json_f, indent=2) + logger.info(f"{json_file} successfully created!") + + +def check_and_prepare_dataset( + data_folder, data_name, prepare_function, dictionary, seed +): + """check if the preparation is done, do it if not + + Args: + data_folder (str): path to dataset + data_name (str): name of the dataset + prepare_function (function): the preparation function + dictionary (dict): the overall dictionary to be updated + seed (int): the random seed for reproduction + """ + if data_folder is not None: + if not os.path.exists(os.path.join(data_folder, f"{data_name}.json")): + data = prepare_function( + data_folder, + os.path.join(data_folder, f"{data_name}.json"), + seed, + ) + else: + json_path = os.path.join(data_folder, data_name + ".json") + logger.info( + f"{json_path} exists, skipping f{data_name} preparation." + ) + with open(json_path, encoding="utf-8") as f: + data = json.load(f) + dictionary.update(data.items()) + else: + logger.info(f"{data_name} is not used in this exp.") diff --git a/recipes/fluent-speech-commands/README.md b/recipes/fluent-speech-commands/README.md index bf84b1f91e..74de6e0f4c 100644 --- a/recipes/fluent-speech-commands/README.md +++ b/recipes/fluent-speech-commands/README.md @@ -24,7 +24,7 @@ python train.py hparams/train.yaml | Release | hyperparams file | Test Acc | Model link | GPUs | |:-------------:|:---------------------------:| -----:| -----:| --------:| -| 21-06-03 | train.yaml | 99.60% | https://drive.google.com/drive/folders/13t2PYdedrPQoNYo_QSf6s04WXu2_vAb-?usp=sharing | 1xV100 32GB | +| 21-06-03 | train.yaml | 99.60% | https://www.dropbox.com/sh/wal9ap0go9f66qw/AADBVlGs_E2pEU4vYJgEe3Fba?dl=0 | 1xV100 32GB | # PreTrained Model + Easy-Inference @@ -45,6 +45,15 @@ About 15 minutes for each epoch with a TESLA V100. Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/fluent-speech-commands/Tokenizer/hparams/tokenizer_bpe51.yaml b/recipes/fluent-speech-commands/Tokenizer/hparams/tokenizer_bpe51.yaml index db7c1ddb72..eff38c7bf4 100644 --- a/recipes/fluent-speech-commands/Tokenizer/hparams/tokenizer_bpe51.yaml +++ b/recipes/fluent-speech-commands/Tokenizer/hparams/tokenizer_bpe51.yaml @@ -13,7 +13,7 @@ train_csv: !ref /train.csv valid_csv: !ref /valid.csv skip_prep: False -# Training parameters +####################### Training Parameters #################################### token_type: unigram # ["unigram", "bpe", "char"] token_output: 51 # index(blank/eos/bos/unk) = 0 character_coverage: 1.0 diff --git a/recipes/fluent-speech-commands/Tokenizer/train.py b/recipes/fluent-speech-commands/Tokenizer/train.py index 4b8b72e69f..95bd4dba04 100644 --- a/recipes/fluent-speech-commands/Tokenizer/train.py +++ b/recipes/fluent-speech-commands/Tokenizer/train.py @@ -1,6 +1,6 @@ #!/usr/bin/env/python3 """Recipe for training a BPE tokenizer for Fluent Speech Commands. -The tokenizer coverts semantics into sub-word units that can +The tokenizer converts semantics into sub-word units that can be used to train a language (LM) or an acoustic model (AM). To run this recipe, do the following: @@ -14,18 +14,18 @@ """ import sys -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) diff --git a/recipes/fluent-speech-commands/direct/hparams/train.yaml b/recipes/fluent-speech-commands/direct/hparams/train.yaml index 81f93ae239..354b2d4626 100644 --- a/recipes/fluent-speech-commands/direct/hparams/train.yaml +++ b/recipes/fluent-speech-commands/direct/hparams/train.yaml @@ -10,27 +10,36 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/BPE51/ save_folder: !ref /save train_log: !ref /train_log.txt +test_wer_file: !ref /wer_test.txt + +# Data for augmentation +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +RIR_DATASET_URL: https://www.dropbox.com/scl/fi/linhy77c36mu10965a836/RIRs.zip?rlkey=pg9cu8vrpn2u173vhiqyu743u&dl=1 # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/fluent_speech_commands_dataset -rir_folder: !ref # Change it if needed -csv_train: !ref /train.csv -csv_valid: !ref /valid.csv -csv_test: !ref /test.csv +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +data_folder_rir: !ref /rir # The impulse responses used for data augmentation will automatically be downloaded here. +csv_train: !ref /train.csv +csv_valid: !ref /valid.csv +csv_test: !ref /test.csv +noise_annotation: !ref /noise.csv +rir_annotation: !ref /rir.csv + tokenizer_file: https://www.dropbox.com/s/hvf2huofnq0sjbn/51_unigram.model?dl=1 skip_prep: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 6 batch_size: 16 lr: 0.0003 # token_type: unigram # ["unigram", "bpe", "char"] sorting: random -# Model parameters +####################### Model Parameters ####################################### sample_rate: 16000 emb_size: 128 dec_neurons: 512 @@ -47,17 +56,78 @@ slu_beam_size: 80 eos_threshold: 1.5 temperature: 1.25 +num_workers: 4 dataloader_opts: + num_workers: !ref batch_size: !ref shuffle: True epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Models -asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams - source: speechbrain/asr-crdnn-rnnlm-librispeech - run_opts: {"device":"cuda:0"} +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Download and prepare the dataset of room impulse responses for augmentation +prepare_rir_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add reverberation to input signal +add_reverb: !new:speechbrain.augment.time_domain.AddReverb + csv_file: !ref + reverb_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 9 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 2 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + shuffle_augmentations: True + min_augmentations: 1 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +asr_model_source: speechbrain/asr-crdnn-rnnlm-librispeech slu_enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] @@ -89,57 +159,8 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -augment_wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [100] - -augment_speed: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - -add_rev: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 1.0 - noise_prob: 0.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - -add_rev_noise: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - openrir_max_noise_len: 3.0 # seconds - reverb_prob: 1.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - rir_scale_factor: 1.0 - - -augment_pipeline: [ - !ref , - !ref , - !ref , - !ref , - !ref -] - modules: - augment_wavedrop: !ref - augment_speed: !ref - add_rev: !ref - add_noise: !ref - add_rev_noise: !ref slu_enc: !ref output_emb: !ref dec: !ref @@ -171,7 +192,6 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher temperature: !ref using_max_attn_shift: False max_attn_shift: 30 - coverage_penalty: 0. opt_class: !name:torch.optim.Adam lr: !ref diff --git a/recipes/fluent-speech-commands/direct/train.py b/recipes/fluent-speech-commands/direct/train.py index 34819e846d..534335f6e0 100644 --- a/recipes/fluent-speech-commands/direct/train.py +++ b/recipes/fluent-speech-commands/direct/train.py @@ -16,13 +16,15 @@ """ import sys + import torch -import speechbrain as sb -import logging from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -34,30 +36,13 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, tokens_bos_lens = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - # Applying the augmentation pipeline - wavs_aug_tot = [] - wavs_aug_tot.append(wavs) - for count, augment in enumerate(self.hparams.augment_pipeline): - - # Apply augment - wavs_aug = augment(wavs, wav_lens) - - # Managing speed change - if wavs_aug.shape[1] > wavs.shape[1]: - wavs_aug = wavs_aug[:, 0 : wavs.shape[1]] - else: - zero_sig = torch.zeros_like(wavs) - zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug - wavs_aug = zero_sig - - wavs_aug_tot.append(wavs_aug) - - wavs = torch.cat(wavs_aug_tot, dim=0) - self.n_augment = len(wavs_aug_tot) - wav_lens = torch.cat([wav_lens] * self.n_augment) - tokens_bos = torch.cat([tokens_bos] * self.n_augment) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + tokens_bos_lens = self.hparams.wav_augment.replicate_labels( + tokens_bos_lens + ) # ASR encoder forward pass with torch.no_grad(): @@ -75,40 +60,27 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - return p_seq, wav_lens - else: - p_tokens, scores = self.hparams.beam_searcher(encoder_out, wav_lens) - return p_seq, wav_lens, p_tokens + p_tokens = None + if stage != sb.Stage.TRAIN: + p_tokens, _, _, _ = self.hparams.beam_searcher( + encoder_out, wav_lens + ) + + return p_seq, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (NLL) given predictions and targets.""" - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - p_seq, wav_lens = predictions - else: - p_seq, wav_lens, predicted_tokens = predictions + p_seq, wav_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens - if hasattr(self.hparams, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 - ) - - if stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos] * self.n_augment, dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens] * self.n_augment, dim=0 + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens ) loss_seq = self.hparams.seq_cost( @@ -118,9 +90,7 @@ def compute_objectives(self, predictions, batch, stage): # (No ctc loss) loss = loss_seq - if (stage != sb.Stage.TRAIN) or ( - self.batch_count % show_results_every == 0 - ): + if (stage != sb.Stage.TRAIN) or (self.step % show_results_every == 0): # Decode token terms to words predicted_semantics = [ tokenizer.decode_ids(utt_seq).split(" ") @@ -144,29 +114,10 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" - self.batch_count = 0 if stage != sb.Stage.TRAIN: - self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() @@ -190,25 +141,31 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"WER": stage_stats["WER"]}, min_keys=["WER"], + meta={"WER": stage_stats["WER"]}, + min_keys=["WER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -233,12 +190,14 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_valid"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_valid"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_test"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_test"], + replacements={"data_root": data_folder}, ) test_data = test_data.filtered_sorted(sort_key="duration") @@ -282,15 +241,13 @@ def text_pipeline(semantics): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -313,13 +270,22 @@ def text_pipeline(semantics): "skip_prep": hparams["skip_prep"], }, ) - + run_on_main(hparams["prepare_noise_data"]) + run_on_main(hparams["prepare_rir_data"]) # here we create the datasets objects as well as tokenization and encoding - (train_set, valid_set, test_set, tokenizer,) = dataio_prepare(hparams) + (train_set, valid_set, test_set, tokenizer) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # Download pretrained ASR model + from speechbrain.inference.ASR import EncoderDecoderASR + + hparams["asr_model"] = EncoderDecoderASR.from_hparams( + source=hparams["asr_model_source"], + run_opts={"device": run_opts["device"]}, + ) # Brain class initialization slu_brain = SLU( @@ -343,5 +309,4 @@ def text_pipeline(semantics): ) # Test - slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt" slu_brain.evaluate(test_set, test_loader_kwargs=hparams["dataloader_opts"]) diff --git a/recipes/fluent-speech-commands/extra_requirements.txt b/recipes/fluent-speech-commands/extra_requirements.txt deleted file mode 100644 index fb6c7ed7ec..0000000000 --- a/recipes/fluent-speech-commands/extra_requirements.txt +++ /dev/null @@ -1 +0,0 @@ -pandas diff --git a/recipes/fluent-speech-commands/prepare.py b/recipes/fluent-speech-commands/prepare.py index 250df45255..3fb241055e 100644 --- a/recipes/fluent-speech-commands/prepare.py +++ b/recipes/fluent-speech-commands/prepare.py @@ -1,6 +1,7 @@ import os -import logging + from speechbrain.dataio.dataio import read_audio +from speechbrain.utils.logger import get_logger try: import pandas as pd @@ -11,7 +12,7 @@ err_msg += "Install using `pip install pandas`.\n" raise ImportError(err_msg) -logger = logging.getLogger(__name__) +logger = get_logger(__name__) def prepare_FSC(data_folder, save_folder, skip_prep=False): diff --git a/recipes/timers-and-such/LM/hparams/train.yaml b/recipes/timers-and-such/LM/hparams/train.yaml index 485dd54265..27d1bf1ae1 100644 --- a/recipes/timers-and-such/LM/hparams/train.yaml +++ b/recipes/timers-and-such/LM/hparams/train.yaml @@ -8,7 +8,7 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1992 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] output_folder: !ref results/RNNLM_1024/ save_folder: !ref /save train_log: !ref /train_log.txt @@ -23,7 +23,7 @@ csv_test_synth: !ref /test-synth-type=decoupled.csv csv_test_real: !ref /test-real-type=decoupled.csv skip_prep: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 10 batch_size: 128 lr: 0.0003 diff --git a/recipes/timers-and-such/LM/train.py b/recipes/timers-and-such/LM/train.py index d88c995300..d07f4dc6b4 100644 --- a/recipes/timers-and-such/LM/train.py +++ b/recipes/timers-and-such/LM/train.py @@ -10,9 +10,11 @@ """ import sys + import torch -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main @@ -38,27 +40,6 @@ def compute_objectives(self, predictions, batch, stage): loss = self.hparams.seq_cost(p_seq, tokens_eos, length=tokens_eos_lens) return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - - def on_stage_start(self, stage, epoch): - """Gets called at the beginning of each epoch""" - self.batch_count = 0 - def on_stage_end(self, stage, stage_loss, epoch): """Gets called at the end of a epoch.""" # Compute/store important stats @@ -76,7 +57,8 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"loss": stage_stats["loss"]}, min_keys=["loss"], + meta={"loss": stage_stats["loss"]}, + min_keys=["loss"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( @@ -87,12 +69,14 @@ def on_stage_end(self, stage, stage_loss, epoch): def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -117,7 +101,8 @@ def dataio_prepare(hparams): ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_valid"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_valid"], + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -155,21 +140,20 @@ def text_pipeline(transcript): sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) sb.dataio.dataset.set_output_keys( - datasets, ["id", "transcript", "tokens_bos", "tokens_eos", "tokens"], + datasets, + ["id", "transcript", "tokens_bos", "tokens_eos", "tokens"], ) return train_data, valid_data, test_real_data, test_synth_data, tokenizer if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -189,8 +173,8 @@ def text_pipeline(transcript): ) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Create experiment directory sb.create_experiment_directory( diff --git a/recipes/timers-and-such/README.md b/recipes/timers-and-such/README.md index f5140e0d42..bea7852ebc 100644 --- a/recipes/timers-and-such/README.md +++ b/recipes/timers-and-such/README.md @@ -1,6 +1,14 @@ # SLU recipes for Timers and Such v1.0 This folder contains recipes for spoken language understanding (SLU) with [Timers and Such v1.0](https://zenodo.org/record/4623772#.YGeMMHVKg5k), an SLU dataset with a (train/dev/test) set of synthetic speech and a (train/dev/test) set of real speech. +## Installing Extra Dependencies + +Before proceeding, ensure you have installed the necessary additional dependencies. To do this, simply run the following command in your terminal: + +``` +pip install -r extra_requirements.txt +``` + ### LM recipe This recipe trains a language model (LM) on Timers and Such transcripts. (It is not necessary to run this before running the other recipes, as they download a trained checkpoint.) @@ -61,7 +69,7 @@ The third uses the "960 Hr" variant of wav2vec 2.0, which is finetuned on LibriS | wav2vec 2.0 "Base" | 92.7% ± 1.0% | none | | wav2vec 2.0 "960 Hr" | 94.0% ± 1.2% | none | -You can find the output folders (model, logs, etc) [here](https://drive.google.com/drive/folders/1x2crmemZj2uxdzyOM_nlfuHxlTCP-9_-?usp=sharing) +You can find the output folders (model, logs, etc) [here](https://www.dropbox.com/sh/gmmum179ig9wz0x/AAAOSOi11yVymGXHp9LzYNrqa?dl=0) # PreTrained Model + Easy-Inference You can find the pre-trained model with an easy-inference function on [HuggingFace](https://huggingface.co/speechbrain/slu-timers-and-such-direct-librispeech-asr). @@ -75,12 +83,12 @@ The dataset and baseline models are described in the paper below. If you found t ``` @misc{lugosch2021timers, - title={Timers and Such: A Practical Benchmark for Spoken Language Understanding with Numbers}, - author={Lugosch, Loren and Papreja, Piyush and Ravanelli, Mirco and Heba, Abdelwahab and Parcollet, Titouan}, - year={2021}, - eprint={2104.01604}, - archivePrefix={arXiv}, - primaryClass={cs.CL} + title={Timers and Such: A Practical Benchmark for Spoken Language Understanding with Numbers}, + author={Lugosch, Loren and Papreja, Piyush and Ravanelli, Mirco and Heba, Abdelwahab and Parcollet, Titouan}, + year={2021}, + eprint={2104.01604}, + archivePrefix={arXiv}, + primaryClass={cs.CL} } ``` @@ -94,6 +102,15 @@ The dataset and baseline models are described in the paper below. If you found t Please, cite SpeechBrain if you use it for your research or business. ```bibtex +@misc{speechbrainV1, + title={Open-Source Conversational AI with SpeechBrain 1.0}, + author={Mirco Ravanelli and Titouan Parcollet and Adel Moumen and Sylvain de Langen and Cem Subakan and Peter Plantinga and Yingzhi Wang and Pooneh Mousavi and Luca Della Libera and Artem Ploujnikov and Francesco Paissan and Davide Borra and Salah Zaiem and Zeyu Zhao and Shucong Zhang and Georgios Karakasidis and Sung-Lin Yeh and Pierre Champion and Aku Rouhe and Rudolf Braun and Florian Mai and Juan Zuluaga-Gomez and Seyed Mahed Mousavi and Andreas Nautsch and Xuechen Liu and Sangeet Sagar and Jarod Duret and Salima Mdhaffar and Gaelle Laperriere and Mickael Rouvier and Renato De Mori and Yannick Esteve}, + year={2024}, + eprint={2407.00463}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2407.00463}, +} @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, diff --git a/recipes/timers-and-such/Tokenizer/hparams/tokenizer_bpe51.yaml b/recipes/timers-and-such/Tokenizer/hparams/tokenizer_bpe51.yaml index a4ee9a839d..2a9f39161d 100644 --- a/recipes/timers-and-such/Tokenizer/hparams/tokenizer_bpe51.yaml +++ b/recipes/timers-and-such/Tokenizer/hparams/tokenizer_bpe51.yaml @@ -10,11 +10,12 @@ output_folder: !ref results/tokenizer_bpe51/ # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/timers-and-such train_splits: ["train-synth", "train-real"] +skip_prep: False train_csv: !ref /train-type=direct.csv valid_csv: !ref /dev-real-type=direct.csv -# Training parameters +####################### Training Parameters #################################### token_type: unigram # ["unigram", "bpe", "char"] token_output: 51 # index(blank/eos/bos/unk) = 0 character_coverage: 1.0 diff --git a/recipes/timers-and-such/Tokenizer/train.py b/recipes/timers-and-such/Tokenizer/train.py index d8dacb776b..af77cdb3ea 100644 --- a/recipes/timers-and-such/Tokenizer/train.py +++ b/recipes/timers-and-such/Tokenizer/train.py @@ -1,6 +1,6 @@ #!/usr/bin/env/python3 """Recipe for training a BPE tokenizer with timers-and-such. -The tokenizer coverts semantics into sub-word units that can +The tokenizer converts semantics into sub-word units that can be used to train a language (LM) or an acoustic model (AM). To run this recipe, do the following: @@ -14,18 +14,18 @@ """ import sys -import speechbrain as sb + from hyperpyyaml import load_hyperpyyaml + +import speechbrain as sb from speechbrain.utils.distributed import run_on_main if __name__ == "__main__": - # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -47,6 +47,7 @@ "save_folder": hparams["output_folder"], "train_splits": hparams["train_splits"], "type": "direct", + "skip_prep": hparams["skip_prep"], }, ) diff --git a/recipes/timers-and-such/decoupled/hparams/train_LS_LM.yaml b/recipes/timers-and-such/decoupled/hparams/train_LS_LM.yaml index cc7ee9e2f3..7254aafc96 100644 --- a/recipes/timers-and-such/decoupled/hparams/train_LS_LM.yaml +++ b/recipes/timers-and-such/decoupled/hparams/train_LS_LM.yaml @@ -10,11 +10,14 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] experiment: train-real-and-synth output_folder: !ref results/LS_LM// save_folder: !ref /save train_log: !ref /train_log.txt +all_real_wer_file: !ref /all_real_wer.txt +test_real_wer_file: !ref /test_real_wer.txt +test_synth_wer_file: !ref /test_synth_wer.txt # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/timers-and-such @@ -31,7 +34,7 @@ skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min test_on_all_real: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 1 batch_size: 16 lr: 0.0003 @@ -64,7 +67,7 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref # Models -asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams +asr_model: !apply:speechbrain.inference.ASR.EncoderDecoderASR.from_hparams source: speechbrain/asr-crdnn-rnnlm-librispeech run_opts: {"device":"cuda:0"} overrides: {"beam_size": !ref } @@ -135,8 +138,6 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher eos_threshold: !ref temperature: !ref using_max_attn_shift: False - max_attn_shift: 30 - coverage_penalty: 0. opt_class: !name:torch.optim.Adam lr: !ref diff --git a/recipes/timers-and-such/decoupled/hparams/train_TAS_LM.yaml b/recipes/timers-and-such/decoupled/hparams/train_TAS_LM.yaml index 6e7c994f68..0497159c84 100644 --- a/recipes/timers-and-such/decoupled/hparams/train_TAS_LM.yaml +++ b/recipes/timers-and-such/decoupled/hparams/train_TAS_LM.yaml @@ -10,11 +10,14 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] experiment: train-real-and-synth output_folder: !ref results/TAS_LM// save_folder: !ref /save train_log: !ref /train_log.txt +all_real_wer_file: !ref /all_real_wer.txt +test_real_wer_file: !ref /test_real_wer.txt +test_synth_wer_file: !ref /test_synth_wer.txt # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/timers-and-such @@ -31,7 +34,7 @@ skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min test_on_all_real: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 1 batch_size: 16 lr: 0.0003 @@ -65,9 +68,10 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref # Models -asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams +asr_model: !apply:speechbrain.inference.ASR.EncoderDecoderASR.from_hparams source: speechbrain/asr-crdnn-rnnlm-librispeech run_opts: {"device":"cuda:0"} + savedir: !ref /pretrained_models overrides: beam_size: !ref lm_model: @@ -75,9 +79,9 @@ asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams embedding_dim: 128 dropout: 0. rnn_layers: 2 - rnn_neurons: 2048 + rnn_neurons: 1024 dnn_blocks: 1 - dnn_neurons: 512 + dnn_neurons: 1024 return_hidden: True pretrainer: paths: @@ -150,8 +154,6 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher eos_threshold: !ref temperature: !ref using_max_attn_shift: False - max_attn_shift: 30 - coverage_penalty: 0. opt_class: !name:torch.optim.Adam lr: !ref diff --git a/recipes/timers-and-such/decoupled/train.py b/recipes/timers-and-such/decoupled/train.py index ffdd3aecfa..ad7b55ece4 100644 --- a/recipes/timers-and-such/decoupled/train.py +++ b/recipes/timers-and-such/decoupled/train.py @@ -13,10 +13,12 @@ """ import sys + import torch -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main # Define training procedure @@ -68,27 +70,18 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - return p_seq, asr_tokens_lens - else: - p_tokens, scores = self.hparams.beam_searcher( + p_tokens = None + if stage != sb.Stage.TRAIN: + p_tokens, _, _, _ = self.hparams.beam_searcher( encoder_out, asr_tokens_lens ) - return p_seq, asr_tokens_lens, p_tokens + + return p_seq, asr_tokens_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (NLL) given predictions and targets.""" - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - p_seq, asr_tokens_lens = predictions - else: - p_seq, asr_tokens_lens, predicted_tokens = predictions + p_seq, asr_tokens_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos @@ -101,9 +94,7 @@ def compute_objectives(self, predictions, batch, stage): # (No ctc loss) loss = loss_seq - if (stage != sb.Stage.TRAIN) or ( - self.batch_count % show_results_every == 0 - ): + if (stage != sb.Stage.TRAIN) or (self.step % show_results_every == 0): # Decode token terms to words predicted_semantics = [ tokenizer.decode_ids(utt_seq).split(" ") @@ -127,29 +118,10 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" - self.batch_count = 0 if stage != sb.Stage.TRAIN: - self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() @@ -174,25 +146,31 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"SER": stage_stats["SER"]}, min_keys=["SER"], + meta={"SER": stage_stats["SER"]}, + min_keys=["SER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def data_io_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -223,7 +201,8 @@ def data_io_prepare(hparams): else: valid_path = hparams["csv_dev_real"] valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=valid_path, replacements={"data_root": data_folder}, + csv_path=valid_path, + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -306,15 +285,13 @@ def text_pipeline(semantics): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -351,8 +328,8 @@ def text_pipeline(semantics): ) = data_io_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Brain class initialization slu_brain = SLU( @@ -377,9 +354,7 @@ def text_pipeline(semantics): # Test (ALL real data) if slu_brain.hparams.test_on_all_real: - slu_brain.hparams.wer_file = ( - hparams["output_folder"] + "/wer_all_real.txt" - ) + slu_brain.hparams.test_wer_file = hparams["all_real_wer_file"] slu_brain.evaluate( all_real_set, test_loader_kwargs=hparams["dataloader_opts"], @@ -387,7 +362,7 @@ def text_pipeline(semantics): ) # Test (real data) - slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt" + slu_brain.hparams.test_wer_file = hparams["test_real_wer_file"] slu_brain.evaluate( test_real_set, test_loader_kwargs=hparams["dataloader_opts"], @@ -395,9 +370,7 @@ def text_pipeline(semantics): ) # Test (synth data) - slu_brain.hparams.wer_file = ( - hparams["output_folder"] + "/wer_test_synth.txt" - ) + slu_brain.hparams.test_wer_file = hparams["test_synth_wer_file"] slu_brain.evaluate( test_synth_set, test_loader_kwargs=hparams["dataloader_opts"], diff --git a/recipes/timers-and-such/direct/hparams/train.yaml b/recipes/timers-and-such/direct/hparams/train.yaml index 9eec9ebc9b..173c355d27 100644 --- a/recipes/timers-and-such/direct/hparams/train.yaml +++ b/recipes/timers-and-such/direct/hparams/train.yaml @@ -10,35 +10,42 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] experiment: train-real-and-synth output_folder: !ref results// save_folder: !ref /save train_log: !ref /train_log.txt +all_real_wer_file: !ref /all_real_wer.txt +test_real_wer_file: !ref /test_real_wer.txt +test_synth_wer_file: !ref /test_synth_wer.txt # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/timers-and-such -data_folder_rirs: !ref train_splits: ["train-synth", "train-real"] -csv_train: !ref /train-type=direct.csv -csv_dev_real: !ref /dev-real-type=direct.csv -csv_dev_synth: !ref /dev-synth-type=direct.csv -csv_test_real: !ref /test-real-type=direct.csv -csv_test_synth: !ref /test-synth-type=direct.csv -csv_all_real: !ref /all-real-type=direct.csv +csv_train: !ref /train-type=direct.csv +csv_dev_real: !ref /dev-real-type=direct.csv +csv_dev_synth: !ref /dev-synth-type=direct.csv +csv_test_real: !ref /test-real-type=direct.csv +csv_test_synth: !ref /test-synth-type=direct.csv +csv_all_real: !ref /all-real-type=direct.csv tokenizer_file: https://huggingface.co/speechbrain/slu-timers-and-such-direct-librispeech-asr/resolve/main/tokenizer.ckpt skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min test_on_all_real: False -# Training parameters +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + +####################### Training Parameters #################################### number_of_epochs: 1 batch_size: 16 lr: 0.0003 # token_type: unigram # ["unigram", "bpe", "char"] sorting: random -# Model parameters +####################### Model Parameters ####################################### sample_rate: 16000 emb_size: 128 dec_neurons: 512 @@ -55,17 +62,68 @@ slu_beam_size: 80 eos_threshold: 1.5 temperature: 1.25 +num_workers: 4 dataloader_opts: + num_workers: !ref batch_size: !ref shuffle: True epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Models -asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams - source: speechbrain/asr-crdnn-rnnlm-librispeech - run_opts: {"device":"cuda:0"} +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +asr_model_source: speechbrain/asr-crdnn-rnnlm-librispeech slu_enc: !new:speechbrain.nnet.containers.Sequential input_shape: [null, null, !ref ] @@ -97,20 +155,12 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 modules: slu_enc: !ref output_emb: !ref dec: !ref seq_lin: !ref - env_corrupt: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , @@ -137,8 +187,6 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher eos_threshold: !ref temperature: !ref using_max_attn_shift: False - max_attn_shift: 30 - coverage_penalty: 0. opt_class: !name:torch.optim.Adam lr: !ref @@ -156,9 +204,6 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer scheduler: !ref counter: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True diff --git a/recipes/timers-and-such/direct/hparams/train_with_wav2vec2.yaml b/recipes/timers-and-such/direct/hparams/train_with_wav2vec2.yaml index 0be937a3cb..df1b9b6e7f 100644 --- a/recipes/timers-and-such/direct/hparams/train_with_wav2vec2.yaml +++ b/recipes/timers-and-such/direct/hparams/train_with_wav2vec2.yaml @@ -10,11 +10,14 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] experiment: wav2vec2-train-real output_folder: !ref results// save_folder: !ref /save train_log: !ref /train_log.txt +all_real_wer_file: !ref /all_real_wer.txt +test_real_wer_file: !ref /test_real_wer.txt +test_synth_wer_file: !ref /test_synth_wer.txt # URL for the wav2vec 2.0 model, you can change to benchmark different models wav2vec2_hub: "facebook/wav2vec2-base-960h" @@ -34,7 +37,7 @@ ckpt_interval_minutes: 15 # save checkpoint every N min test_on_all_real: False -# Training parameters +####################### Training Parameters #################################### number_of_epochs: 50 batch_size: 8 lr: 0.0004 @@ -46,7 +49,7 @@ freeze_wav2vec: False # token_type: unigram # ["unigram", "bpe", "char"] sorting: ascending -# Model parameters +####################### Model Parameters ####################################### sample_rate: 16000 emb_size: 128 dec_neurons: 512 @@ -70,7 +73,7 @@ dataloader_opts: epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -wav2vec2: !new:speechbrain.lobes.models.huggingface_wav2vec.HuggingFaceWav2Vec2 +wav2vec2: !new:speechbrain.integrations.huggingface.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: !ref @@ -140,7 +143,6 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher temperature: !ref using_max_attn_shift: False max_attn_shift: 30 - coverage_penalty: 0. opt_class: !name:torch.optim.Adam lr: !ref @@ -168,10 +170,39 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer lr_annealing_wav2vec2: !ref counter: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref +############################## Augmentations ################################### + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref speeds: [95, 100, 105] +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref ] + log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True diff --git a/recipes/timers-and-such/direct/train.py b/recipes/timers-and-such/direct/train.py index 9fc88dda8d..10dbe3d39f 100644 --- a/recipes/timers-and-such/direct/train.py +++ b/recipes/timers-and-such/direct/train.py @@ -16,13 +16,15 @@ """ import sys + import torch -import speechbrain as sb -import logging from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main -logger = logging.getLogger(__name__) +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger + +logger = get_logger(__name__) # Define training procedure @@ -34,16 +36,13 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, tokens_bos_lens = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "env_corrupt"): - wavs_noise = self.hparams.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) - tokens_bos_lens = torch.cat([tokens_bos_lens, tokens_bos_lens]) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + tokens_bos_lens = self.hparams.wav_augment.replicate_labels( + tokens_bos_lens + ) # ASR encoder forward pass with torch.no_grad(): @@ -61,34 +60,26 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - return p_seq, wav_lens - else: - p_tokens, scores = self.hparams.beam_searcher(encoder_out, wav_lens) - return p_seq, wav_lens, p_tokens + p_tokens = None + if stage != sb.Stage.TRAIN: + p_tokens, _, _, _ = self.hparams.beam_searcher( + encoder_out, wav_lens + ) + + return p_seq, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (NLL) given predictions and targets.""" - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - p_seq, wav_lens = predictions - else: - p_seq, wav_lens, predicted_tokens = predictions + p_seq, wav_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos - tokens, tokens_lens = batch.tokens - if hasattr(self.hparams, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens ) loss_seq = self.hparams.seq_cost( @@ -98,9 +89,7 @@ def compute_objectives(self, predictions, batch, stage): # (No ctc loss) loss = loss_seq - if (stage != sb.Stage.TRAIN) or ( - self.batch_count % show_results_every == 0 - ): + if (stage != sb.Stage.TRAIN) or (self.step % show_results_every == 0): # Decode token terms to words predicted_semantics = [ tokenizer.decode_ids(utt_seq).split(" ") @@ -124,29 +113,10 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" - self.batch_count = 0 if stage != sb.Stage.TRAIN: - self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() @@ -171,25 +141,31 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"SER": stage_stats["SER"]}, min_keys=["SER"], + meta={"SER": stage_stats["SER"]}, + min_keys=["SER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -220,7 +196,8 @@ def dataio_prepare(hparams): else: valid_path = hparams["csv_dev_real"] valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=valid_path, replacements={"data_root": data_folder}, + csv_path=valid_path, + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -295,15 +272,13 @@ def text_pipeline(semantics): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -328,6 +303,7 @@ def text_pipeline(semantics): "skip_prep": hparams["skip_prep"], }, ) + run_on_main(hparams["prepare_noise_data"]) # here we create the datasets objects as well as tokenization and encoding ( @@ -340,8 +316,16 @@ def text_pipeline(semantics): ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() + + # Download pretrained ASR model + from speechbrain.inference.ASR import EncoderDecoderASR + + hparams["asr_model"] = EncoderDecoderASR.from_hparams( + source=hparams["asr_model_source"], + run_opts={"device": run_opts["device"]}, + ) # Brain class initialization slu_brain = SLU( @@ -366,9 +350,7 @@ def text_pipeline(semantics): # Test (ALL real data) if slu_brain.hparams.test_on_all_real: - slu_brain.hparams.wer_file = ( - hparams["output_folder"] + "/wer_all_real.txt" - ) + slu_brain.hparams.test_wer_file = hparams["all_real_wer_file"] slu_brain.evaluate( all_real_set, test_loader_kwargs=hparams["dataloader_opts"], @@ -376,7 +358,7 @@ def text_pipeline(semantics): ) # Test (real data) - slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt" + slu_brain.hparams.test_wer_file = hparams["test_real_wer_file"] slu_brain.evaluate( test_real_set, test_loader_kwargs=hparams["dataloader_opts"], @@ -384,9 +366,7 @@ def text_pipeline(semantics): ) # Test (synth data) - slu_brain.hparams.wer_file = ( - hparams["output_folder"] + "/wer_test_synth.txt" - ) + slu_brain.hparams.test_wer_file = hparams["test_synth_wer_file"] slu_brain.evaluate( test_synth_set, test_loader_kwargs=hparams["dataloader_opts"], diff --git a/recipes/timers-and-such/direct/train_with_wav2vec2.py b/recipes/timers-and-such/direct/train_with_wav2vec2.py index 698ac8906f..84c53569a0 100644 --- a/recipes/timers-and-such/direct/train_with_wav2vec2.py +++ b/recipes/timers-and-such/direct/train_with_wav2vec2.py @@ -17,14 +17,15 @@ """ import sys + import torch -import speechbrain as sb -import logging from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main +from speechbrain.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Define training procedure @@ -36,12 +37,13 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, tokens_bos_lens = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + # wav2vec forward pass - wav2vec2_out = self.modules.wav2vec2(wavs) + wav2vec2_out = self.modules.wav2vec2(wavs, wav_lens) # SLU forward pass encoder_out = self.hparams.slu_enc(wav2vec2_out) e_in = self.hparams.output_emb(tokens_bos) @@ -51,22 +53,18 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): + if stage == sb.Stage.TRAIN and self.step % show_results_every != 0: return p_seq, wav_lens else: - p_tokens, scores = self.hparams.beam_searcher(encoder_out, wav_lens) + p_tokens, _, _, _ = self.hparams.beam_searcher( + encoder_out, wav_lens + ) return p_seq, wav_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (NLL) given predictions and targets.""" - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): + if stage == sb.Stage.TRAIN and self.step % show_results_every != 0: p_seq, wav_lens = predictions else: p_seq, wav_lens, predicted_tokens = predictions @@ -75,15 +73,20 @@ def compute_objectives(self, predictions, batch, stage): tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens + # Label Augmentation + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens + ) + loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss = loss_seq - if (stage != sb.Stage.TRAIN) or ( - self.batch_count % show_results_every == 0 - ): + if (stage != sb.Stage.TRAIN) or (self.step % show_results_every == 0): # Decode token terms to words predicted_semantics = [ tokenizer.decode_ids(utt_seq).split(" ") @@ -107,34 +110,10 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - - loss.backward() - if self.check_gradients(loss): - self.wav2vec2_optimizer.step() - self.optimizer.step() - - self.wav2vec2_optimizer.zero_grad() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" - self.batch_count = 0 if stage != sb.Stage.TRAIN: - self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() @@ -172,15 +151,19 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"SER": stage_stats["SER"]}, min_keys=["SER"], + meta={"SER": stage_stats["SER"]}, + min_keys=["SER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def init_optimizers(self): "Initializes the wav2vec2 optimizer and model optimizer" @@ -195,15 +178,22 @@ def init_optimizers(self): ) self.checkpointer.add_recoverable("optimizer", self.optimizer) + self.optimizers_dict = { + "wav2vec2_optimizer": self.wav2vec2_optimizer, + "model_optimizer": self.optimizer, + } + def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -234,7 +224,8 @@ def dataio_prepare(hparams): else: valid_path = hparams["csv_dev_real"] valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=valid_path, replacements={"data_root": data_folder}, + csv_path=valid_path, + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -309,15 +300,13 @@ def text_pipeline(semantics): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -354,8 +343,8 @@ def text_pipeline(semantics): ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() hparams["wav2vec2"] = hparams["wav2vec2"].to(run_opts["device"]) @@ -382,9 +371,7 @@ def text_pipeline(semantics): # Test (ALL real data) if slu_brain.hparams.test_on_all_real: - slu_brain.hparams.wer_file = ( - hparams["output_folder"] + "/wer_all_real.txt" - ) + slu_brain.hparams.test_wer_file = hparams["all_real_wer_file"] slu_brain.evaluate( all_real_set, test_loader_kwargs=hparams["dataloader_opts"], @@ -392,7 +379,7 @@ def text_pipeline(semantics): ) # Test (real data) - slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt" + slu_brain.hparams.test_wer_file = hparams["test_real_wer_file"] slu_brain.evaluate( test_real_set, test_loader_kwargs=hparams["dataloader_opts"], @@ -400,9 +387,7 @@ def text_pipeline(semantics): ) # Test (synth data) - slu_brain.hparams.wer_file = ( - hparams["output_folder"] + "/wer_test_synth.txt" - ) + slu_brain.hparams.test_wer_file = hparams["test_synth_wer_file"] slu_brain.evaluate( test_synth_set, test_loader_kwargs=hparams["dataloader_opts"], diff --git a/recipes/timers-and-such/extra_requirements.txt b/recipes/timers-and-such/extra_requirements.txt index baf9366ee8..0fd75fab3e 100644 --- a/recipes/timers-and-such/extra_requirements.txt +++ b/recipes/timers-and-such/extra_requirements.txt @@ -1,2 +1 @@ inflect -pandas diff --git a/recipes/timers-and-such/multistage/hparams/train_LS_LM.yaml b/recipes/timers-and-such/multistage/hparams/train_LS_LM.yaml index e8c6e96a45..bd58daa7ef 100644 --- a/recipes/timers-and-such/multistage/hparams/train_LS_LM.yaml +++ b/recipes/timers-and-such/multistage/hparams/train_LS_LM.yaml @@ -10,15 +10,17 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] experiment: train-real-and-synth output_folder: !ref results/LS_LM// save_folder: !ref /save train_log: !ref /train_log.txt +all_real_wer_file: !ref /all_real_wer.txt +test_real_wer_file: !ref /test_real_wer.txt +test_synth_wer_file: !ref /test_synth_wer.txt # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/timers-and-such -data_folder_rirs: !ref train_splits: ["train-synth", "train-real"] csv_train: !ref /train-type=multistage.csv csv_dev_real: !ref /dev-real-type=multistage.csv @@ -31,7 +33,13 @@ skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min test_on_all_real: False -# Training parameters +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + + +####################### Training Parameters #################################### number_of_epochs: 1 batch_size: 16 lr: 0.0003 @@ -56,15 +64,68 @@ slu_beam_size: 80 eos_threshold: 1.5 temperature: 1.25 +num_workers: 4 dataloader_opts: + num_workers: !ref batch_size: !ref shuffle: True epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref -# Models -asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Models ########################################## + +asr_model: !apply:speechbrain.inference.ASR.EncoderDecoderASR.from_hparams source: speechbrain/asr-crdnn-rnnlm-librispeech run_opts: {"device":"cuda:0"} overrides: {"beam_size": !ref } @@ -103,13 +164,6 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 modules: slu_enc: !ref @@ -117,7 +171,6 @@ modules: output_emb: !ref dec: !ref seq_lin: !ref - env_corrupt: !ref model: !new:torch.nn.ModuleList @@ -146,8 +199,6 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher eos_threshold: !ref temperature: !ref using_max_attn_shift: False - max_attn_shift: 30 - coverage_penalty: 0. opt_class: !name:torch.optim.Adam lr: !ref @@ -165,10 +216,6 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer scheduler: !ref counter: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True diff --git a/recipes/timers-and-such/multistage/hparams/train_TAS_LM.yaml b/recipes/timers-and-such/multistage/hparams/train_TAS_LM.yaml index c243060c77..0075ce0aa7 100644 --- a/recipes/timers-and-such/multistage/hparams/train_TAS_LM.yaml +++ b/recipes/timers-and-such/multistage/hparams/train_TAS_LM.yaml @@ -10,15 +10,17 @@ # Seed needs to be set at top of yaml, before objects with parameters are made seed: 1986 -__set_seed: !apply:torch.manual_seed [!ref ] +__set_seed: !apply:speechbrain.utils.seed_everything [!ref ] experiment: train-real-and-synth output_folder: !ref results/TAS_LM// save_folder: !ref /save train_log: !ref /train_log.txt +all_real_wer_file: !ref /all_real_wer.txt +test_real_wer_file: !ref /test_real_wer.txt +test_synth_wer_file: !ref /test_synth_wer.txt # Data files data_folder: !PLACEHOLDER # e.g, /localscratch/timers-and-such -data_folder_rirs: !ref train_splits: ["train-synth", "train-real"] csv_train: !ref /train-type=multistage.csv csv_dev_real: !ref /dev-real-type=multistage.csv @@ -31,7 +33,13 @@ skip_prep: False ckpt_interval_minutes: 15 # save checkpoint every N min test_on_all_real: False -# Training parameters +# Data for augmentation +data_folder_noise: !ref /noise # The noisy sequences for data augmentation will automatically be downloaded here. +NOISE_DATASET_URL: https://www.dropbox.com/scl/fi/a09pj97s5ifan81dqhi4n/noises.zip?rlkey=j8b0n9kdjdr32o1f06t0cw5b7&dl=1 +noise_annotation: !ref /noise.csv #The data manifest files are created by the data preparation script + + +####################### Training Parameters #################################### number_of_epochs: 1 batch_size: 16 lr: 0.0003 @@ -56,17 +64,72 @@ slu_beam_size: 80 eos_threshold: 1.5 temperature: 1.25 +num_workers: 4 dataloader_opts: + num_workers: !ref batch_size: !ref shuffle: True epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter limit: !ref +############################## Augmentations ################################### + +# Download and prepare the dataset of noisy sequences for augmentation +prepare_noise_data: !name:speechbrain.augment.preparation.prepare_dataset_from_URL + URL: !ref + dest_folder: !ref + ext: wav + csv_file: !ref + +# Add noise to input signal +add_noise: !new:speechbrain.augment.time_domain.AddNoise + csv_file: !ref + snr_low: 0 + snr_high: 15 + noise_sample_rate: !ref + clean_sample_rate: !ref + num_workers: !ref + +# Speed perturbation +speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb + orig_freq: !ref + speeds: [95, 100, 105] + +# Frequency drop: randomly drops a number of frequency bands to zero. +drop_freq: !new:speechbrain.augment.time_domain.DropFreq + drop_freq_low: 0 + drop_freq_high: 1 + drop_freq_count_low: 1 + drop_freq_count_high: 3 + drop_freq_width: 0.05 + +# Time drop: randomly drops a number of temporal chunks. +drop_chunk: !new:speechbrain.augment.time_domain.DropChunk + drop_length_low: 1000 + drop_length_high: 2000 + drop_count_low: 1 + drop_count_high: 5 + +# Augmenter: Combines previously defined augmentations to perform data augmentation +wav_augment: !new:speechbrain.augment.augmenter.Augmenter + concat_original: True + min_augmentations: 4 + max_augmentations: 4 + augment_prob: 1.0 + augmentations: [ + !ref , + !ref , + !ref , + !ref ] + +############################## Models ########################################## + # Models -asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams +asr_model: !apply:speechbrain.inference.ASR.EncoderDecoderASR.from_hparams source: speechbrain/asr-crdnn-rnnlm-librispeech run_opts: {"device":"cuda:0"} + savedir: !ref /pretrained_models overrides: beam_size: !ref lm_model: @@ -74,9 +137,9 @@ asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams embedding_dim: 128 dropout: 0. rnn_layers: 2 - rnn_neurons: 2048 + rnn_neurons: 1024 dnn_blocks: 1 - dnn_neurons: 512 + dnn_neurons: 1024 return_hidden: True pretrainer: paths: @@ -116,21 +179,12 @@ seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref -env_corrupt: !new:speechbrain.lobes.augment.EnvCorrupt - openrir_folder: !ref - babble_prob: 0.0 - reverb_prob: 0.0 - noise_prob: 1.0 - noise_snr_low: 0 - noise_snr_high: 15 - modules: slu_enc: !ref input_emb: !ref output_emb: !ref dec: !ref seq_lin: !ref - env_corrupt: !ref model: !new:torch.nn.ModuleList @@ -158,8 +212,6 @@ beam_searcher: !new:speechbrain.decoders.S2SRNNBeamSearcher eos_threshold: !ref temperature: !ref using_max_attn_shift: False - max_attn_shift: 30 - coverage_penalty: 0. opt_class: !name:torch.optim.Adam lr: !ref @@ -177,10 +229,6 @@ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer scheduler: !ref counter: !ref -augmentation: !new:speechbrain.lobes.augment.TimeDomainSpecAugment - sample_rate: !ref - speeds: [95, 100, 105] - log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True diff --git a/recipes/timers-and-such/multistage/train.py b/recipes/timers-and-such/multistage/train.py index 85a2ac0526..9da6546e00 100644 --- a/recipes/timers-and-such/multistage/train.py +++ b/recipes/timers-and-such/multistage/train.py @@ -19,10 +19,12 @@ """ import sys + import torch -import speechbrain as sb from hyperpyyaml import load_hyperpyyaml -from speechbrain.utils.distributed import run_on_main + +import speechbrain as sb +from speechbrain.utils.distributed import if_main_process, run_on_main # Define training procedure @@ -33,16 +35,13 @@ def compute_forward(self, batch, stage): wavs, wav_lens = batch.sig tokens_bos, tokens_bos_lens = batch.tokens_bos - # Add augmentation if specified - if stage == sb.Stage.TRAIN: - if hasattr(self.hparams, "env_corrupt"): - wavs_noise = self.hparams.env_corrupt(wavs, wav_lens) - wavs = torch.cat([wavs, wavs_noise], dim=0) - wav_lens = torch.cat([wav_lens, wav_lens]) - tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) - tokens_bos_lens = torch.cat([tokens_bos_lens, tokens_bos_lens]) - if hasattr(self.hparams, "augmentation"): - wavs = self.hparams.augmentation(wavs, wav_lens) + # Add waveform augmentation if specified. + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + wavs, wav_lens = self.hparams.wav_augment(wavs, wav_lens) + tokens_bos = self.hparams.wav_augment.replicate_labels(tokens_bos) + tokens_bos_lens = self.hparams.wav_augment.replicate_labels( + tokens_bos_lens + ) # ASR forward pass words, asr_tokens = self.hparams.asr_model.transcribe_batch( @@ -75,36 +74,26 @@ def compute_forward(self, batch, stage): p_seq = self.hparams.log_softmax(logits) # Compute outputs - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - return p_seq, asr_tokens_lens - else: - p_tokens, scores = self.hparams.beam_searcher( + p_tokens = None + if stage != sb.Stage.TRAIN: + p_tokens, _, _, _ = self.hparams.beam_searcher( encoder_out, asr_tokens_lens ) - return p_seq, asr_tokens_lens, p_tokens + + return p_seq, asr_tokens_lens, p_tokens def compute_objectives(self, predictions, batch, stage): """Computes the loss (NLL) given predictions and targets.""" - if ( - stage == sb.Stage.TRAIN - and self.batch_count % show_results_every != 0 - ): - p_seq, asr_tokens_lens = predictions - else: - p_seq, asr_tokens_lens, predicted_tokens = predictions + p_seq, asr_tokens_lens, predicted_tokens = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos - tokens, tokens_lens = batch.tokens - if hasattr(self.hparams, "env_corrupt") and stage == sb.Stage.TRAIN: - tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) - tokens_eos_lens = torch.cat( - [tokens_eos_lens, tokens_eos_lens], dim=0 + if stage == sb.Stage.TRAIN and hasattr(self.hparams, "wav_augment"): + tokens_eos = self.hparams.wav_augment.replicate_labels(tokens_eos) + tokens_eos_lens = self.hparams.wav_augment.replicate_labels( + tokens_eos_lens ) loss_seq = self.hparams.seq_cost( @@ -114,9 +103,7 @@ def compute_objectives(self, predictions, batch, stage): # (No ctc loss) loss = loss_seq - if (stage != sb.Stage.TRAIN) or ( - self.batch_count % show_results_every == 0 - ): + if (stage != sb.Stage.TRAIN) or (self.step % show_results_every == 0): # Decode token terms to words predicted_semantics = [ tokenizer.decode_ids(utt_seq).split(" ") @@ -140,29 +127,10 @@ def compute_objectives(self, predictions, batch, stage): return loss - def fit_batch(self, batch): - """Train the parameters given a single batch in input""" - predictions = self.compute_forward(batch, sb.Stage.TRAIN) - loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) - loss.backward() - if self.check_gradients(loss): - self.optimizer.step() - self.optimizer.zero_grad() - self.batch_count += 1 - return loss.detach() - - def evaluate_batch(self, batch, stage): - """Computations needed for validation/test batches""" - predictions = self.compute_forward(batch, stage=stage) - loss = self.compute_objectives(predictions, batch, stage=stage) - return loss.detach() - def on_stage_start(self, stage, epoch): """Gets called at the beginning of each epoch""" - self.batch_count = 0 if stage != sb.Stage.TRAIN: - self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.error_rate_computer() @@ -187,25 +155,31 @@ def on_stage_end(self, stage, stage_loss, epoch): valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( - meta={"SER": stage_stats["SER"]}, min_keys=["SER"], + meta={"SER": stage_stats["SER"]}, + min_keys=["SER"], ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) - with open(self.hparams.wer_file, "w") as w: - self.wer_metric.write_stats(w) + if if_main_process(): + with open( + self.hparams.test_wer_file, "w", encoding="utf-8" + ) as w: + self.wer_metric.write_stats(w) def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. - It also defines the data processing pipeline through user-defined functions.""" + It also defines the data processing pipeline through user-defined functions. + """ data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=hparams["csv_train"], replacements={"data_root": data_folder}, + csv_path=hparams["csv_train"], + replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": @@ -236,7 +210,8 @@ def dataio_prepare(hparams): else: valid_path = hparams["csv_dev_real"] valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( - csv_path=valid_path, replacements={"data_root": data_folder}, + csv_path=valid_path, + replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") @@ -311,15 +286,13 @@ def text_pipeline(semantics): if __name__ == "__main__": - # Load hyperparameters file with command-line overrides hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) - with open(hparams_file) as fin: + with open(hparams_file, encoding="utf-8") as fin: hparams = load_hyperpyyaml(fin, overrides) show_results_every = 100 # plots results every N iterations - # If --distributed_launch then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) @@ -344,6 +317,7 @@ def text_pipeline(semantics): "skip_prep": hparams["skip_prep"], }, ) + run_on_main(hparams["prepare_noise_data"]) # here we create the datasets objects as well as tokenization and encoding ( @@ -356,8 +330,8 @@ def text_pipeline(semantics): ) = dataio_prepare(hparams) # We download and pretrain the tokenizer - run_on_main(hparams["pretrainer"].collect_files) - hparams["pretrainer"].load_collected(device=run_opts["device"]) + hparams["pretrainer"].collect_files() + hparams["pretrainer"].load_collected() # Brain class initialization slu_brain = SLU( @@ -382,9 +356,7 @@ def text_pipeline(semantics): # Test (ALL real data) if slu_brain.hparams.test_on_all_real: - slu_brain.hparams.wer_file = ( - hparams["output_folder"] + "/wer_all_real.txt" - ) + slu_brain.hparams.test_wer_file = hparams["all_real_wer_file"] slu_brain.evaluate( all_real_set, test_loader_kwargs=hparams["dataloader_opts"], @@ -392,7 +364,7 @@ def text_pipeline(semantics): ) # Test (real data) - slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt" + slu_brain.hparams.test_wer_file = hparams["test_real_wer_file"] slu_brain.evaluate( test_real_set, test_loader_kwargs=hparams["dataloader_opts"], @@ -400,9 +372,7 @@ def text_pipeline(semantics): ) # Test (synth data) - slu_brain.hparams.wer_file = ( - hparams["output_folder"] + "/wer_test_synth.txt" - ) + slu_brain.hparams.test_wer_file = hparams["test_synth_wer_file"] slu_brain.evaluate( test_synth_set, test_loader_kwargs=hparams["dataloader_opts"], diff --git a/recipes/timers-and-such/prepare.py b/recipes/timers-and-such/prepare.py index f2e0cba2ec..5095eacb14 100644 --- a/recipes/timers-and-such/prepare.py +++ b/recipes/timers-and-such/prepare.py @@ -1,8 +1,9 @@ import os import shutil -import logging -from speechbrain.dataio.dataio import read_audio, merge_csvs + +from speechbrain.dataio.dataio import merge_csvs, read_audio from speechbrain.utils.data_utils import download_file +from speechbrain.utils.logger import get_logger try: import pandas as pd @@ -13,7 +14,7 @@ err_msg += "Install using `pip install pandas`.\n" raise ImportError(err_msg) -logger = logging.getLogger(__name__) +logger = get_logger(__name__) def prepare_TAS(data_folder, save_folder, type, train_splits, skip_prep=False): diff --git a/requirements.txt b/requirements.txt index 095c7d0332..f7dcb86c9d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,16 @@ -r lint-requirements.txt -huggingface_hub>=0.7.0 +huggingface_hub>=0.8.0 hyperpyyaml>=0.0.1 joblib>=0.14.1 numpy>=1.17.0 packaging +pandas>=1.0.1 pre-commit>=2.3.0 -scipy>=1.4.1, <1.9 +requests>=2.20.0 +scipy>=1.4.1 sentencepiece>=0.1.91 -SoundFile; sys_platform == 'win32' -torch>=1.9.0 -torchaudio>=0.9.0 +soundfile>=0.12.1 +torch>=2.1.0 +torchaudio>=2.1.0 tqdm>=4.42.0 +transformers>=4.30.0 diff --git a/setup.py b/setup.py deleted file mode 100644 index 92d2cd2b40..0000000000 --- a/setup.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -import os -import sys -import site -import setuptools -from distutils.core import setup - - -# Editable install in user site directory can be allowed with this hack: -# https://github.com/pypa/pip/issues/7953. -site.ENABLE_USER_SITE = "--user" in sys.argv[1:] - -with open("README.md") as f: - long_description = f.read() - -with open(os.path.join("speechbrain", "version.txt")) as f: - version = f.read().strip() - -setup( - name="speechbrain", - version=version, - description="All-in-one speech toolkit in pure Python and Pytorch", - long_description=long_description, - long_description_content_type="text/markdown", - author="Mirco Ravanelli & Others", - author_email="speechbrain@gmail.com", - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - ], - packages=setuptools.find_packages(), - package_data={"speechbrain": ["version.txt", "log-config.yaml"]}, - install_requires=[ - "hyperpyyaml", - "joblib", - "numpy", - "packaging", - "scipy", - "sentencepiece", - "torch>=1.9", - "torchaudio", - "tqdm", - "huggingface_hub", - ], - python_requires=">=3.7", - url="https://speechbrain.github.io/", -) diff --git a/speechbrain/__init__.py b/speechbrain/__init__.py index 4a149b0423..483df895e6 100644 --- a/speechbrain/__init__.py +++ b/speechbrain/__init__.py @@ -1,20 +1,22 @@ -""" Comprehensive speech processing toolkit -""" +"""Comprehensive speech processing toolkit""" + import os -from .core import Stage, Brain, create_experiment_directory, parse_arguments -from . import alignment # noqa -from . import dataio # noqa -from . import decoders # noqa -from . import lobes # noqa -from . import lm # noqa -from . import nnet # noqa -from . import processing # noqa -from . import tokenizers # noqa -from . import utils # noqa - -with open(os.path.join(os.path.dirname(__file__), "version.txt")) as f: + +# For redirect of HF transformers +import speechbrain.lobes.models # noqa: F401 + +from .core import Brain, Stage, create_experiment_directory +from .utils.importutils import deprecated_redirect, lazy_export_all +from .utils.run_opts import RunOptions + +with open( + os.path.join(os.path.dirname(__file__), "version.txt"), encoding="utf-8" +) as f: version = f.read().strip() +# Create an alias to the refactored function +parse_arguments = RunOptions.from_command_line_args + __all__ = [ "Stage", "Brain", @@ -23,3 +25,47 @@ ] __version__ = version + + +deprecations = { + "speechbrain.k2_integration": "speechbrain.integrations.k2_fsa", + "speechbrain.wordemb": "speechbrain.integrations.huggingface.wordemb", + "speechbrain.lobes.models.huggingface_transformers": "speechbrain.integrations.huggingface", + "speechbrain.lobes.models.spacy": "speechbrain.integrations.nlp", + "speechbrain.lobes.models.flair": "speechbrain.integrations.nlp", +} + + +def make_deprecated_redirections(): + sb1_0_redirect_str = ( + "This is a change from SpeechBrain 1.0. " + "See: https://github.com/speechbrain/speechbrain/releases/tag/v1.0.0" + ) + + deprecated_redirect( + "speechbrain.pretrained", + "speechbrain.inference", + extra_reason=sb1_0_redirect_str, + also_lazy_export=True, + ) + + for old_path, new_path in deprecations.items(): + deprecated_redirect(old_path, new_path, also_lazy_export=True) + + # speechbrain.nnet.loss is not yet loaded at this point, so we cannot use + # also_lazy_export (it would try to access sys.modules['speechbrain.nnet.loss']). + # The sys.modules redirect alone is sufficient for import compatibility. + deprecated_redirect( + "speechbrain.nnet.loss.transducer_loss", + "speechbrain.integrations.numba.transducer_loss", + extra_reason=( + "This module depends on the optional 'numba' package. " + "If you encounter an ImportError here, please install numba, " + "for example with: pip install numba" + ), + ) + + +make_deprecated_redirections() + +lazy_export_all(__file__, __name__, export_subpackages=True) diff --git a/speechbrain/alignment/__init__.py b/speechbrain/alignment/__init__.py index 1687db7408..e44e4c84a6 100644 --- a/speechbrain/alignment/__init__.py +++ b/speechbrain/alignment/__init__.py @@ -1,2 +1 @@ -"""Tools for aligning transcripts and speech signals -""" +"""Tools for aligning transcripts and speech signals""" diff --git a/speechbrain/alignment/aligner.py b/speechbrain/alignment/aligner.py index 669fca8e40..1287c507dd 100644 --- a/speechbrain/alignment/aligner.py +++ b/speechbrain/alignment/aligner.py @@ -5,11 +5,16 @@ * Elena Rastorgueva 2020 * Loren Lugosch 2020 """ -import torch + import random -from speechbrain.utils.checkpoints import register_checkpoint_hooks -from speechbrain.utils.checkpoints import mark_as_saver -from speechbrain.utils.checkpoints import mark_as_loader + +import torch + +from speechbrain.utils.checkpoints import ( + mark_as_loader, + mark_as_saver, + register_checkpoint_hooks, +) from speechbrain.utils.data_utils import undo_padding @@ -47,25 +52,31 @@ class HMMAligner(torch.nn.Module): Example ------- - >>> log_posteriors = torch.tensor([[[ -1., -10., -10.], - ... [-10., -1., -10.], - ... [-10., -10., -1.]], - ... - ... [[ -1., -10., -10.], - ... [-10., -1., -10.], - ... [-10., -10., -10.]]]) - >>> lens = torch.tensor([1., 0.66]) - >>> phns = torch.tensor([[0, 1, 2], - ... [0, 1, 0]]) - >>> phn_lens = torch.tensor([1., 0.66]) + >>> log_posteriors = torch.tensor( + ... [ + ... [ + ... [-1.0, -10.0, -10.0], + ... [-10.0, -1.0, -10.0], + ... [-10.0, -10.0, -1.0], + ... ], + ... [ + ... [-1.0, -10.0, -10.0], + ... [-10.0, -1.0, -10.0], + ... [-10.0, -10.0, -10.0], + ... ], + ... ] + ... ) + >>> lens = torch.tensor([1.0, 0.66]) + >>> phns = torch.tensor([[0, 1, 2], [0, 1, 0]]) + >>> phn_lens = torch.tensor([1.0, 0.66]) >>> aligner = HMMAligner() >>> forward_scores = aligner( - ... log_posteriors, lens, phns, phn_lens, 'forward' + ... log_posteriors, lens, phns, phn_lens, "forward" ... ) >>> forward_scores.shape torch.Size([2]) >>> viterbi_scores, alignments = aligner( - ... log_posteriors, lens, phns, phn_lens, 'viterbi' + ... log_posteriors, lens, phns, phn_lens, "viterbi" ... ) >>> alignments [[0, 1, 2], [0, 1]] @@ -96,7 +107,7 @@ def __init__( self.lexicon_path = lexicon_path if self.lexicon_path is not None: - with open(self.lexicon_path, "r") as f: + with open(self.lexicon_path, encoding="utf-8") as f: lines = f.readlines() for i, line in enumerate(lines): @@ -166,9 +177,7 @@ def _use_lexicon(self, words, interword_sils, sample_pron): """ number_of_states = 0 - words_prime = ( - [] - ) # This will contain one "word" for each optional silence and pronunciation. + words_prime = [] # This will contain one "word" for each optional silence and pronunciation. # structure of each "word_prime": # [word index, [[state sequence 1], [state sequence 2]], ] word_index = 0 @@ -327,20 +336,16 @@ def use_lexicon(self, words, interword_sils=True, sample_pron=False): Example ------- >>> aligner = HMMAligner() - >>> aligner.lexicon = { - ... "a": {0: "a"}, - ... "b": {0: "b", 1: "c"} - ... } + >>> aligner.lexicon = {"a": {0: "a"}, "b": {0: "b", 1: "c"}} >>> words = [["a", "b"]] >>> aligner.lex_lab2ind = { - ... "sil": 0, - ... "a": 1, - ... "b": 2, - ... "c": 3, - ... } - >>> poss_phns, poss_phn_lens, trans_prob, pi_prob, final_states = aligner.use_lexicon( - ... words, - ... interword_sils = True + ... "sil": 0, + ... "a": 1, + ... "b": 2, + ... "c": 3, + ... } + >>> poss_phns, poss_phn_lens, trans_prob, pi_prob, final_states = ( + ... aligner.use_lexicon(words, interword_sils=True) ... ) >>> poss_phns tensor([[0, 1, 0, 2, 3, 0]]) @@ -365,9 +370,8 @@ def use_lexicon(self, words, interword_sils=True, sample_pron=False): >>> final_states [[3, 4, 5]] >>> # With no optional silences between words - >>> poss_phns_, _, trans_prob_, pi_prob_, final_states_ = aligner.use_lexicon( - ... words, - ... interword_sils = False + >>> poss_phns_, _, trans_prob_, pi_prob_, final_states_ = ( + ... aligner.use_lexicon(words, interword_sils=False) ... ) >>> poss_phns_ tensor([[0, 1, 2, 3, 0]]) @@ -384,9 +388,8 @@ def use_lexicon(self, words, interword_sils=True, sample_pron=False): >>> # With sampling of a single possible pronunciation >>> import random >>> random.seed(0) - >>> poss_phns_, _, trans_prob_, pi_prob_, final_states_ = aligner.use_lexicon( - ... words, - ... sample_pron = True + >>> poss_phns_, _, trans_prob_, pi_prob_, final_states_ = ( + ... aligner.use_lexicon(words, sample_pron=True) ... ) >>> poss_phns_ tensor([[0, 1, 0, 2, 0]]) @@ -693,6 +696,8 @@ def _dp_viterbi( The absolute length of each phoneme sequence in the batch. phns : torch.Tensor (batch, phoneme in phn sequence) The phonemes that are known/thought to be in each utterance. + final_states : list + List of final states Returns ------- @@ -732,7 +737,7 @@ def _dp_viterbi( ) v_matrix[:, :, t] = x + emiss_pred_useful[:, :, t] - backpointers[:, :, t] = argmax.type(torch.FloatTensor) + backpointers[:, :, t] = argmax.type(dtype=torch.float32) z_stars = [] z_stars_loc = [] @@ -887,7 +892,7 @@ def forward( trans_prob = prob_matrices["trans_prob"] final_states = prob_matrices["final_states"] else: - ValueError( + raise ValueError( """`prob_matrices` must contain the keys `pi_prob`, `trans_prob` and `final_states`""" ) @@ -952,12 +957,11 @@ def expand_phns_by_states_per_phoneme(self, phns, phn_lens): Example ------- - >>> phns = torch.tensor([[0., 3., 5., 0.], - ... [0., 2., 0., 0.]]) - >>> phn_lens = torch.tensor([1., 0.75]) - >>> aligner = HMMAligner(states_per_phoneme = 3) + >>> phns = torch.tensor([[0.0, 3.0, 5.0, 0.0], [0.0, 2.0, 0.0, 0.0]]) + >>> phn_lens = torch.tensor([1.0, 0.75]) + >>> aligner = HMMAligner(states_per_phoneme=3) >>> expanded_phns = aligner.expand_phns_by_states_per_phoneme( - ... phns, phn_lens + ... phns, phn_lens ... ) >>> expanded_phns tensor([[ 0., 1., 2., 9., 10., 11., 15., 16., 17., 0., 1., 2.], @@ -997,12 +1001,12 @@ def store_alignments(self, ids, alignments): Example ------- >>> aligner = HMMAligner() - >>> ids = ['id1', 'id2'] + >>> ids = ["id1", "id2"] >>> alignments = [[0, 2, 4], [1, 2, 3, 4]] >>> aligner.store_alignments(ids, alignments) >>> aligner.align_dict.keys() dict_keys(['id1', 'id2']) - >>> aligner.align_dict['id1'] + >>> aligner.align_dict["id1"] tensor([0, 2, 4], dtype=torch.int16) """ @@ -1132,21 +1136,27 @@ def get_prev_alignments(self, ids, emission_pred, lens, phns, phn_lens): Example ------- - >>> ids = ['id1', 'id2'] - >>> emission_pred = torch.tensor([[[ -1., -10., -10.], - ... [-10., -1., -10.], - ... [-10., -10., -1.]], - ... - ... [[ -1., -10., -10.], - ... [-10., -1., -10.], - ... [-10., -10., -10.]]]) - >>> lens = torch.tensor([1., 0.66]) - >>> phns = torch.tensor([[0, 1, 2], - ... [0, 1, 0]]) - >>> phn_lens = torch.tensor([1., 0.66]) + >>> ids = ["id1", "id2"] + >>> emission_pred = torch.tensor( + ... [ + ... [ + ... [-1.0, -10.0, -10.0], + ... [-10.0, -1.0, -10.0], + ... [-10.0, -10.0, -1.0], + ... ], + ... [ + ... [-1.0, -10.0, -10.0], + ... [-10.0, -1.0, -10.0], + ... [-10.0, -10.0, -10.0], + ... ], + ... ] + ... ) + >>> lens = torch.tensor([1.0, 0.66]) + >>> phns = torch.tensor([[0, 1, 2], [0, 1, 0]]) + >>> phn_lens = torch.tensor([1.0, 0.66]) >>> aligner = HMMAligner() >>> alignment_batch = aligner.get_prev_alignments( - ... ids, emission_pred, lens, phns, phn_lens + ... ids, emission_pred, lens, phns, phn_lens ... ) >>> alignment_batch tensor([[0, 1, 2], @@ -1243,8 +1253,8 @@ def calc_accuracy(self, alignments, ends, phns, ind2labs=None): Example ------- >>> aligner = HMMAligner() - >>> alignments = [[0., 0., 0., 1.]] - >>> phns = [[0., 1.]] + >>> alignments = [[0.0, 0.0, 0.0, 1.0]] + >>> phns = [[0.0, 1.0]] >>> ends = [[2, 4]] >>> mean_acc = aligner.calc_accuracy(alignments, ends, phns) >>> mean_acc.item() @@ -1288,7 +1298,7 @@ def collapse_alignments(self, alignments): Example ------- - >>> aligner = HMMAligner(states_per_phoneme = 3) + >>> aligner = HMMAligner(states_per_phoneme=3) >>> alignments = [0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2] >>> sequence = aligner.collapse_alignments(alignments) >>> sequence @@ -1315,9 +1325,8 @@ def _save(self, path): torch.save(self.align_dict, path) @mark_as_loader - def _load(self, path, end_of_epoch=False, device=None): + def _load(self, path, end_of_epoch=False): del end_of_epoch # Not used here. - del device self.align_dict = torch.load(path) @@ -1351,13 +1360,13 @@ def map_inds_to_intersect(lists1, lists2, ind2labs): >>> lists1 = [[0, 1]] >>> lists2 = [[0, 1]] >>> ind2lab1 = { - ... 0: "a", - ... 1: "b", - ... } + ... 0: "a", + ... 1: "b", + ... } >>> ind2lab2 = { - ... 0: "a", - ... 1: "c", - ... } + ... 0: "a", + ... 1: "c", + ... } >>> ind2labs = (ind2lab1, ind2lab2) >>> out1, out2 = map_inds_to_intersect(lists1, lists2, ind2labs) >>> out1 @@ -1407,23 +1416,33 @@ def batch_log_matvecmul(A, b): b : torch.Tensor (batch, dim1) Tensor. - Outputs + Returns ------- x : torch.Tensor (batch, dim1) Example ------- - >>> A = torch.tensor([[[ 0., 0.], - ... [ -1e5, 0.]]]) - >>> b = torch.tensor([[0., 0.,]]) + >>> A = torch.tensor([[[0.0, 0.0], [-1e5, 0.0]]]) + >>> b = torch.tensor( + ... [ + ... [ + ... 0.0, + ... 0.0, + ... ] + ... ] + ... ) >>> x = batch_log_matvecmul(A, b) >>> x tensor([[0.6931, 0.0000]]) >>> >>> # non-log domain equivalent without batching functionality - >>> A_ = torch.tensor([[1., 1.], - ... [0., 1.]]) - >>> b_ = torch.tensor([1., 1.,]) + >>> A_ = torch.tensor([[1.0, 1.0], [0.0, 1.0]]) + >>> b_ = torch.tensor( + ... [ + ... 1.0, + ... 1.0, + ... ] + ... ) >>> x_ = torch.matmul(A_, b_) >>> x_ tensor([2., 1.]) @@ -1445,7 +1464,7 @@ def batch_log_maxvecmul(A, b): b : torch.Tensor (batch, dim1) Tensor - Outputs + Returns ------- x : torch.Tensor (batch, dim1) Tensor. @@ -1454,9 +1473,15 @@ def batch_log_maxvecmul(A, b): Example ------- - >>> A = torch.tensor([[[ 0., -1.], - ... [ -1e5, 0.]]]) - >>> b = torch.tensor([[0., 0.,]]) + >>> A = torch.tensor([[[0.0, -1.0], [-1e5, 0.0]]]) + >>> b = torch.tensor( + ... [ + ... [ + ... 0.0, + ... 0.0, + ... ] + ... ] + ... ) >>> x, argmax = batch_log_maxvecmul(A, b) >>> x tensor([[0., 0.]]) diff --git a/speechbrain/alignment/ctc_segmentation.py b/speechbrain/alignment/ctc_segmentation.py index e1a7efc412..72888467e9 100644 --- a/speechbrain/alignment/ctc_segmentation.py +++ b/speechbrain/alignment/ctc_segmentation.py @@ -1,664 +1,11 @@ -#!/usr/bin/env python3 -# 2021, Technische Universität München, Ludwig Kürzinger -"""Perform CTC segmentation to align utterances within audio files. +"""This file ensures old links to speechtokenizer continue to work while providing a Deprecation warning""" -This uses the ctc-segmentation Python package. -Install it with pip or see the installing instructions in -https://github.com/lumaku/ctc-segmentation -""" +import warnings -import logging -from pathlib import Path -from types import SimpleNamespace -from typing import Optional -from typing import Union +from speechbrain.integrations.alignment.ctc_seg import * # noqa: F401, F403 -import numpy as np -import torch -from typing import List - -# speechbrain interface -from speechbrain.pretrained.interfaces import EncoderASR, EncoderDecoderASR - -# imports for CTC segmentation -try: - from ctc_segmentation import ctc_segmentation - from ctc_segmentation import CtcSegmentationParameters - from ctc_segmentation import determine_utterance_segments - from ctc_segmentation import prepare_text - from ctc_segmentation import prepare_token_list -except ImportError: - print( - "ImportError: " - "Is the ctc_segmentation module installed " - "and in your PYTHONPATH?" - ) - raise ImportError("The ctc_segmentation module is missing.") - -logger = logging.getLogger(__name__) - - -class CTCSegmentationTask(SimpleNamespace): - """Task object for CTC segmentation. - - This object is automatically generated and acts as - a container for results of a CTCSegmentation object. - - When formatted with str(·), this object returns - results in a kaldi-style segments file formatting. - The human-readable output can be configured with - the printing options. - - Properties - --------- - text : list - Utterance texts, separated by line. But without the utterance - name at the beginning of the line (as in kaldi-style text). - ground_truth_mat : array - Ground truth matrix (CTC segmentation). - utt_begin_indices : np.ndarray - Utterance separator for the Ground truth matrix. - timings : np.ndarray - Time marks of the corresponding chars. - state_list : list - Estimated alignment of chars/tokens. - segments : list - Calculated segments as: (start, end, confidence score). - config : CtcSegmentationParameters - CTC Segmentation configuration object. - name : str - Name of aligned audio file (Optional). If given, name is - considered when generating the text. - Default: "utt". - utt_ids : list - The list of utterance names (Optional). This list should - have the same length as the number of utterances. - lpz : np.ndarray - CTC posterior log probabilities (Optional). - - Properties for printing - ---------------------- - print_confidence_score : bool - Include the confidence score. - Default: True. - print_utterance_text : bool - Include utterance text. - Default: True. - - """ - - text = None - ground_truth_mat = None - utt_begin_indices = None - timings = None - char_probs = None - state_list = None - segments = None - config = None - done = False - # Optional - name = "utt" - utt_ids = None - lpz = None - # Printing - print_confidence_score = True - print_utterance_text = True - - def set(self, **kwargs): - """Update object attributes.""" - self.__dict__.update(kwargs) - - def __str__(self): - """Return a kaldi-style ``segments`` file (string).""" - output = "" - num_utts = len(self.segments) - if self.utt_ids is None: - utt_names = [f"{self.name}_{i:04}" for i in range(num_utts)] - else: - # ensure correct mapping of segments to utterance ids - assert num_utts == len(self.utt_ids) - utt_names = self.utt_ids - for i, boundary in enumerate(self.segments): - # utterance name and file name - utt_entry = f"{utt_names[i]} {self.name} " - # segment start and end - utt_entry += f"{boundary[0]:.2f} {boundary[1]:.2f}" - # confidence score - if self.print_confidence_score: - utt_entry += f" {boundary[2]:3.4f}" - # utterance ground truth - if self.print_utterance_text: - utt_entry += f" {self.text[i]}" - output += utt_entry + "\n" - return output - - -class CTCSegmentation: - """Align text to audio using CTC segmentation. - - Usage - ----- - Initialize with given ASR model and parameters. - If needed, parameters for CTC segmentation can be set with ``set_config(·)``. - Then call the instance as function to align text within an audio file. - - Arguments - --------- - asr_model : EncoderDecoderASR - Speechbrain ASR interface. This requires a model that has a - trained CTC layer for inference. It is better to use a model with - single-character tokens to get a better time resolution. - Please note that the inference complexity with Transformer models - usually increases quadratically with audio length. - It is therefore recommended to use RNN-based models, if available. - kaldi_style_text : bool - A kaldi-style text file includes the name of the - utterance at the start of the line. If True, the utterance name - is expected as first word at each line. If False, utterance - names are automatically generated. Set this option according to - your input data. Default: True. - text_converter : str - How CTC segmentation handles text. - "tokenize": Use the ASR model tokenizer to tokenize the text. - "classic": The text is preprocessed as text pieces which takes - token length into account. If the ASR model has longer tokens, - this option may yield better results. Default: "tokenize". - time_stamps : str - Choose the method how the time stamps are - calculated. While "fixed" and "auto" use both the sample rate, - the ratio of samples to one frame is either automatically - determined for each inference or fixed at a certain ratio that - is initially determined by the module, but can be changed via - the parameter ``samples_to_frames_ratio``. Recommended for - longer audio files: "auto". - **ctc_segmentation_args - Parameters for CTC segmentation. - The full list of parameters is found in ``set_config``. - - Example - ------- - >>> # using example file included in the SpeechBrain repository - >>> from speechbrain.pretrained import EncoderDecoderASR - >>> from speechbrain.alignment.ctc_segmentation import CTCSegmentation - >>> # load an ASR model - >>> pre_trained = "speechbrain/asr-transformer-transformerlm-librispeech" - >>> asr_model = EncoderDecoderASR.from_hparams(source=pre_trained) - >>> aligner = CTCSegmentation(asr_model, kaldi_style_text=False) - >>> # load data - >>> audio_path = "tests/samples/single-mic/example1.wav" - >>> text = ["THE BIRCH CANOE", "SLID ON THE", "SMOOTH PLANKS"] - >>> segments = aligner(audio_path, text, name="example1") - - On multiprocessing - ------------------ - To parallelize the computation with multiprocessing, these three steps - can be separated: - (1) ``get_lpz``: obtain the lpz, - (2) ``prepare_segmentation_task``: prepare the task, and - (3) ``get_segments``: perform CTC segmentation. - Note that the function `get_segments` is a staticmethod and therefore - independent of an already initialized CTCSegmentation obj́ect. - - References - ---------- - CTC-Segmentation of Large Corpora for German End-to-end Speech Recognition - 2020, Kürzinger, Winkelbauer, Li, Watzel, Rigoll - https://arxiv.org/abs/2007.09127 - - More parameters are described in https://github.com/lumaku/ctc-segmentation - - """ - - fs = 16000 - kaldi_style_text = True - samples_to_frames_ratio = None - time_stamps = "auto" - choices_time_stamps = ["auto", "fixed"] - text_converter = "tokenize" - choices_text_converter = ["tokenize", "classic"] - warned_about_misconfiguration = False - config = CtcSegmentationParameters() - - def __init__( - self, - asr_model: Union[EncoderASR, EncoderDecoderASR], - kaldi_style_text: bool = True, - text_converter: str = "tokenize", - time_stamps: str = "auto", - **ctc_segmentation_args, - ): - """Initialize the CTCSegmentation module.""" - # Prepare ASR model - if ( - isinstance(asr_model, EncoderDecoderASR) - and not ( - hasattr(asr_model, "mods") - and hasattr(asr_model.mods, "decoder") - and hasattr(asr_model.mods.decoder, "ctc_weight") - ) - ) or ( - isinstance(asr_model, EncoderASR) - and not ( - hasattr(asr_model, "mods") - and hasattr(asr_model.mods, "encoder") - and hasattr(asr_model.mods.encoder, "ctc_lin") - ) - ): - raise AttributeError("The given asr_model has no CTC module!") - if not hasattr(asr_model, "tokenizer"): - raise AttributeError( - "The given asr_model has no tokenizer in asr_model.tokenizer!" - ) - self.asr_model = asr_model - self._encode = self.asr_model.encode_batch - if isinstance(asr_model, EncoderDecoderASR): - # Assumption: log-softmax is already included in ctc_forward_step - self._ctc = self.asr_model.mods.decoder.ctc_forward_step - else: - # Apply log-softmax to encoder output - self._ctc = self.asr_model.hparams.log_softmax - self._tokenizer = self.asr_model.tokenizer - - # Apply configuration - self.set_config( - fs=self.asr_model.hparams.sample_rate, - time_stamps=time_stamps, - kaldi_style_text=kaldi_style_text, - text_converter=text_converter, - **ctc_segmentation_args, - ) - - # determine token or character list - char_list = [ - asr_model.tokenizer.id_to_piece(i) - for i in range(asr_model.tokenizer.vocab_size()) - ] - self.config.char_list = char_list - - # Warn about possible misconfigurations - max_char_len = max([len(c) for c in char_list]) - if len(char_list) > 500 and max_char_len >= 8: - logger.warning( - f"The dictionary has {len(char_list)} tokens with " - f"a max length of {max_char_len}. This may lead " - f"to low alignment performance and low accuracy." - ) - - def set_config( - self, - time_stamps: Optional[str] = None, - fs: Optional[int] = None, - samples_to_frames_ratio: Optional[float] = None, - set_blank: Optional[int] = None, - replace_spaces_with_blanks: Optional[bool] = None, - kaldi_style_text: Optional[bool] = None, - text_converter: Optional[str] = None, - gratis_blank: Optional[bool] = None, - min_window_size: Optional[int] = None, - max_window_size: Optional[int] = None, - scoring_length: Optional[int] = None, - ): - """Set CTC segmentation parameters. - - Parameters for timing - --------------------- - time_stamps : str - Select method how CTC index duration is estimated, and - thus how the time stamps are calculated. - fs : int - Sample rate. Usually derived from ASR model; use this parameter - to overwrite the setting. - samples_to_frames_ratio : float - If you want to directly determine the - ratio of samples to CTC frames, set this parameter, and - set ``time_stamps`` to "fixed". - Note: If you want to calculate the time stamps from a model - with fixed subsampling, set this parameter to: - ``subsampling_factor * frame_duration / 1000``. - - Parameters for text preparation - ------------------------------- - set_blank : int - Index of blank in token list. Default: 0. - replace_spaces_with_blanks : bool - Inserts blanks between words, which is - useful for handling long pauses between words. Only used in - ``text_converter="classic"`` preprocessing mode. Default: False. - kaldi_style_text : bool - Determines whether the utterance name is expected - as fist word of the utterance. Set at module initialization. - text_converter : str - How CTC segmentation handles text. - Set at module initialization. - - Parameters for alignment - ------------------------ - min_window_size : int - Minimum number of frames considered for a single - utterance. The current default value of 8000 corresponds to - roughly 4 minutes (depending on ASR model) and should be OK in - most cases. If your utterances are further apart, increase - this value, or decrease it for smaller audio files. - max_window_size : int - Maximum window size. It should not be necessary - to change this value. - gratis_blank : bool - If True, the transition cost of blank is set to zero. - Useful for long preambles or if there are large unrelated segments - between utterances. Default: False. - - Parameters for calculation of confidence score - ---------------------------------------------- - scoring_length : int - Block length to calculate confidence score. The - default value of 30 should be OK in most cases. - 30 corresponds to roughly 1-2s of audio. - """ - # Parameters for timing - if time_stamps is not None: - if time_stamps not in self.choices_time_stamps: - raise NotImplementedError( - f"Parameter ´time_stamps´ has to be one of " - f"{list(self.choices_time_stamps)}", - ) - self.time_stamps = time_stamps - if fs is not None: - self.fs = float(fs) - if samples_to_frames_ratio is not None: - self.samples_to_frames_ratio = float(samples_to_frames_ratio) - # Parameters for text preparation - if set_blank is not None: - self.config.blank = int(set_blank) - if replace_spaces_with_blanks is not None: - self.config.replace_spaces_with_blanks = bool( - replace_spaces_with_blanks - ) - if kaldi_style_text is not None: - self.kaldi_style_text = bool(kaldi_style_text) - if text_converter is not None: - if text_converter not in self.choices_text_converter: - raise NotImplementedError( - f"Parameter ´text_converter´ has to be one of " - f"{list(self.choices_text_converter)}", - ) - self.text_converter = text_converter - # Parameters for alignment - if min_window_size is not None: - self.config.min_window_size = int(min_window_size) - if max_window_size is not None: - self.config.max_window_size = int(max_window_size) - if gratis_blank is not None: - self.config.blank_transition_cost_zero = bool(gratis_blank) - if ( - self.config.blank_transition_cost_zero - and self.config.replace_spaces_with_blanks - and not self.warned_about_misconfiguration - ): - logger.error( - "Blanks are inserted between words, and also the transition cost of" - " blank is zero. This configuration may lead to misalignments!" - ) - self.warned_about_misconfiguration = True - # Parameter for calculation of confidence score - if scoring_length is not None: - self.config.score_min_mean_over_L = int(scoring_length) - - def get_timing_config(self, speech_len=None, lpz_len=None): - """Obtain parameters to determine time stamps.""" - timing_cfg = { - "index_duration": self.config.index_duration, - } - # As the parameter ctc_index_duration vetoes the other - if self.time_stamps == "fixed": - # Initialize the value, if not yet available - if self.samples_to_frames_ratio is None: - ratio = self.estimate_samples_to_frames_ratio() - self.samples_to_frames_ratio = ratio - index_duration = self.samples_to_frames_ratio / self.fs - else: - assert self.time_stamps == "auto" - samples_to_frames_ratio = speech_len / lpz_len - index_duration = samples_to_frames_ratio / self.fs - timing_cfg["index_duration"] = index_duration - return timing_cfg - - def estimate_samples_to_frames_ratio(self, speech_len=215040): - """Determine the ratio of encoded frames to sample points. - - This method helps to determine the time a single encoded frame occupies. - As the sample rate already gave the number of samples, only the ratio - of samples per encoded CTC frame are needed. This function estimates them by - doing one inference, which is only needed once. - - Args - ---- - speech_len : int - Length of randomly generated speech vector for single - inference. Default: 215040. - - Returns - ------- - int - Estimated ratio. - """ - random_input = torch.rand(speech_len) - lpz = self.get_lpz(random_input) - lpz_len = lpz.shape[0] - # CAVEAT assumption: Frontend does not discard trailing data! - samples_to_frames_ratio = speech_len / lpz_len - return samples_to_frames_ratio - - @torch.no_grad() - def get_lpz(self, speech: Union[torch.Tensor, np.ndarray]): - """Obtain CTC posterior log probabilities for given speech data. - - Args - ---- - speech : Union[torch.Tensor, np.ndarray] - Speech audio input. - - Returns - ------- - np.ndarray - Numpy vector with CTC log posterior probabilities. - """ - if isinstance(speech, np.ndarray): - speech = torch.tensor(speech) - # Batch data: (Nsamples,) -> (1, Nsamples) - speech = speech.unsqueeze(0).to(self.asr_model.device) - wav_lens = torch.tensor([1.0]).to(self.asr_model.device) - enc = self._encode(speech, wav_lens) - # Apply ctc layer to obtain log character probabilities - lpz = self._ctc(enc).detach() - # Shape should be (